summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/.gitignore7
-rw-r--r--Documentation/ABI/obsolete/sysfs-class-rfkill29
-rw-r--r--Documentation/ABI/stable/sysfs-class-rfkill67
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci40
-rw-r--r--Documentation/ABI/testing/sysfs-devices-memory2
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-picolcd43
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-kone111
-rw-r--r--Documentation/ABI/testing/sysfs-wacom10
-rw-r--r--Documentation/Changes2
-rw-r--r--Documentation/DMA-API-HOWTO.txt2
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/drm.tmpl839
-rw-r--r--Documentation/DocBook/kgdb.tmpl692
-rw-r--r--Documentation/DocBook/libata.tmpl8
-rw-r--r--Documentation/DocBook/media-entities.tmpl11
-rw-r--r--Documentation/DocBook/v4l/compat.xml126
-rw-r--r--Documentation/DocBook/v4l/controls.xml36
-rw-r--r--Documentation/DocBook/v4l/dev-event.xml31
-rw-r--r--Documentation/DocBook/v4l/io.xml18
-rw-r--r--Documentation/DocBook/v4l/pixfmt.xml12
-rw-r--r--Documentation/DocBook/v4l/v4l2.xml3
-rw-r--r--Documentation/DocBook/v4l/videodev2.h.xml10
-rw-r--r--Documentation/DocBook/v4l/vidioc-dqevent.xml131
-rw-r--r--Documentation/DocBook/v4l/vidioc-qbuf.xml14
-rw-r--r--Documentation/DocBook/v4l/vidioc-queryctrl.xml2
-rw-r--r--Documentation/DocBook/v4l/vidioc-reqbufs.xml2
-rw-r--r--Documentation/DocBook/v4l/vidioc-subscribe-event.xml133
-rw-r--r--Documentation/DocBook/writing-an-alsa-driver.tmpl27
-rw-r--r--Documentation/PCI/pci-error-recovery.txt4
-rw-r--r--Documentation/PCI/pcieaer-howto.txt29
-rw-r--r--Documentation/RCU/stallwarn.txt94
-rw-r--r--Documentation/RCU/torture.txt10
-rw-r--r--Documentation/RCU/trace.txt35
-rw-r--r--Documentation/Smack.txt2
-rw-r--r--Documentation/SubmitChecklist12
-rw-r--r--Documentation/acpi/apei/einj.txt49
-rw-r--r--Documentation/arm/00-INDEX2
-rw-r--r--Documentation/arm/SA1100/ADSBitsy2
-rw-r--r--Documentation/arm/SPEAr/overview.txt60
-rw-r--r--Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen2
-rw-r--r--Documentation/atomic_ops.txt2
-rw-r--r--Documentation/blackfin/bfin-gpio-notes.txt2
-rw-r--r--Documentation/cachetlb.txt6
-rw-r--r--Documentation/cgroups/blkio-controller.txt151
-rw-r--r--Documentation/cgroups/cgroups.txt2
-rw-r--r--Documentation/cgroups/cpusets.txt38
-rw-r--r--Documentation/cgroups/memcg_test.txt2
-rw-r--r--Documentation/cgroups/memory.txt2
-rw-r--r--Documentation/connector/connector.txt2
-rw-r--r--Documentation/cpu-hotplug.txt2
-rw-r--r--Documentation/credentials.txt14
-rw-r--r--Documentation/development-process/2.Process29
-rw-r--r--Documentation/development-process/7.AdvancedTopics2
-rw-r--r--Documentation/device-mapper/dm-flakey.txt18
-rw-r--r--Documentation/dvb/ci.txt2
-rw-r--r--Documentation/dvb/contributors.txt2
-rw-r--r--Documentation/edac.txt152
-rw-r--r--Documentation/feature-removal-schedule.txt103
-rw-r--r--Documentation/filesystems/Locking4
-rw-r--r--Documentation/filesystems/autofs4-mount-control.txt2
-rw-r--r--Documentation/filesystems/ceph.txt2
-rw-r--r--Documentation/filesystems/dlmfs.txt2
-rw-r--r--Documentation/filesystems/ext3.txt15
-rw-r--r--Documentation/filesystems/fiemap.txt12
-rw-r--r--Documentation/filesystems/fuse.txt4
-rw-r--r--Documentation/filesystems/gfs2.txt12
-rw-r--r--Documentation/filesystems/hpfs.txt2
-rw-r--r--Documentation/filesystems/logfs.txt8
-rw-r--r--Documentation/filesystems/nfs/rpc-cache.txt2
-rw-r--r--Documentation/filesystems/nilfs2.txt4
-rw-r--r--Documentation/filesystems/ocfs2.txt7
-rw-r--r--Documentation/filesystems/proc.txt10
-rw-r--r--Documentation/filesystems/smbfs.txt2
-rw-r--r--Documentation/filesystems/vfs.txt2
-rw-r--r--Documentation/hwmon/lm637
-rw-r--r--Documentation/hwmon/lm852
-rw-r--r--Documentation/hwmon/sysfs-interface13
-rw-r--r--Documentation/i2c/busses/i2c-i8018
-rw-r--r--Documentation/input/joystick.txt2
-rw-r--r--Documentation/intel_txt.txt18
-rw-r--r--Documentation/kbuild/kbuild.txt6
-rw-r--r--Documentation/kbuild/kconfig-language.txt2
-rw-r--r--Documentation/kbuild/kconfig.txt2
-rw-r--r--Documentation/kbuild/makefiles.txt2
-rw-r--r--Documentation/kernel-docs.txt10
-rw-r--r--Documentation/kernel-parameters.txt72
-rw-r--r--Documentation/kprobes.txt12
-rw-r--r--Documentation/kvm/api.txt208
-rw-r--r--Documentation/kvm/mmu.txt302
-rw-r--r--Documentation/laptops/laptop-mode.txt2
-rw-r--r--Documentation/lguest/lguest.c2
-rw-r--r--Documentation/md.txt2
-rw-r--r--Documentation/mutex-design.txt4
-rw-r--r--Documentation/netlabel/lsm_interface.txt2
-rw-r--r--Documentation/networking/caif/Linux-CAIF.txt212
-rw-r--r--Documentation/networking/caif/README109
-rw-r--r--Documentation/networking/ifenslave.c2
-rw-r--r--Documentation/networking/l2tp.txt247
-rw-r--r--Documentation/networking/packet_mmap.txt4
-rw-r--r--Documentation/networking/x25-iface.txt16
-rw-r--r--Documentation/pcmcia/driver-changes.txt13
-rw-r--r--Documentation/power/devices.txt847
-rw-r--r--Documentation/power/pm_qos_interface.txt48
-rw-r--r--Documentation/power/regulator/consumer.txt10
-rw-r--r--Documentation/power/regulator/machine.txt2
-rw-r--r--Documentation/power/regulator/overview.txt6
-rw-r--r--Documentation/power/userland-swsusp.txt4
-rw-r--r--Documentation/powerpc/booting-without-of.txt2
-rw-r--r--Documentation/powerpc/dts-bindings/4xx/reboot.txt18
-rw-r--r--Documentation/powerpc/dts-bindings/xilinx.txt2
-rw-r--r--Documentation/powerpc/phyp-assisted-dump.txt2
-rw-r--r--Documentation/rbtree.txt58
-rw-r--r--Documentation/rfkill.txt44
-rw-r--r--Documentation/rt-mutex-design.txt2
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt54
-rw-r--r--Documentation/scheduler/sched-rt-group.txt20
-rw-r--r--Documentation/scsi/ChangeLog.lpfc4
-rw-r--r--Documentation/scsi/FlashPoint.txt2
-rw-r--r--Documentation/scsi/dtc3x80.txt2
-rw-r--r--Documentation/scsi/ncr53c8xx.txt2
-rw-r--r--Documentation/scsi/osst.txt2
-rw-r--r--Documentation/scsi/scsi_fc_transport.txt4
-rw-r--r--Documentation/scsi/sym53c8xx_2.txt2
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt31
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt4
-rw-r--r--Documentation/sound/alsa/soc/dapm.txt4
-rw-r--r--Documentation/sound/alsa/soc/machine.txt2
-rw-r--r--Documentation/sound/alsa/soc/overview.txt2
-rw-r--r--Documentation/sparse.txt4
-rw-r--r--Documentation/sysfs-rules.txt2
-rw-r--r--Documentation/trace/events.txt11
-rw-r--r--Documentation/trace/ftrace.txt50
-rw-r--r--Documentation/trace/kprobetrace.txt4
-rw-r--r--Documentation/usb/WUSB-Design-overview.txt2
-rw-r--r--Documentation/video4linux/CARDLIST.bttv2
-rw-r--r--Documentation/video4linux/CARDLIST.cx881
-rw-r--r--Documentation/video4linux/CARDLIST.em28xx4
-rw-r--r--Documentation/video4linux/CARDLIST.saa71343
-rw-r--r--Documentation/video4linux/extract_xc3028.pl817
-rw-r--r--Documentation/video4linux/gspca.txt5
-rw-r--r--Documentation/video4linux/sh_mobile_ceu_camera.txt80
-rw-r--r--Documentation/video4linux/v4l2-framework.txt143
-rw-r--r--Documentation/vm/numa_memory_policy.txt4
-rw-r--r--Documentation/w1/w1.generic2
-rw-r--r--Documentation/watchdog/00-INDEX5
-rw-r--r--Documentation/watchdog/watchdog-parameters.txt390
-rw-r--r--Documentation/watchdog/wdt.txt15
-rw-r--r--MAINTAINERS170
-rw-r--r--Makefile65
-rw-r--r--arch/Kconfig20
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/bitops.h18
-rw-r--r--arch/alpha/kernel/time.c101
-rw-r--r--arch/arm/Kconfig152
-rw-r--r--arch/arm/Makefile10
-rw-r--r--arch/arm/boot/compressed/Makefile1
-rw-r--r--arch/arm/boot/compressed/decompress.c4
-rw-r--r--arch/arm/boot/compressed/piggy.lzma.S6
-rw-r--r--arch/arm/common/Kconfig6
-rw-r--r--arch/arm/common/Makefile3
-rw-r--r--arch/arm/common/clkdev.c7
-rw-r--r--arch/arm/common/icst.c100
-rw-r--r--arch/arm/common/icst307.c161
-rw-r--r--arch/arm/common/icst525.c160
-rw-r--r--arch/arm/common/vic.c107
-rw-r--r--arch/arm/configs/am3517_evm_defconfig144
-rw-r--r--arch/arm/configs/ams_delta_defconfig10
-rw-r--r--arch/arm/configs/cns3420vb_defconfig831
-rw-r--r--arch/arm/configs/devkit8000_defconfig157
-rw-r--r--arch/arm/configs/mx51_defconfig17
-rw-r--r--arch/arm/configs/omap3_defconfig34
-rw-r--r--arch/arm/configs/omap3_evm_defconfig51
-rw-r--r--arch/arm/configs/rx51_defconfig39
-rw-r--r--arch/arm/configs/spear300_defconfig773
-rw-r--r--arch/arm/configs/spear310_defconfig775
-rw-r--r--arch/arm/configs/spear320_defconfig775
-rw-r--r--arch/arm/configs/spear600_defconfig760
-rw-r--r--arch/arm/configs/stamp9g20_defconfig1456
-rw-r--r--arch/arm/include/asm/cacheflush.h4
-rw-r--r--arch/arm/include/asm/hardware/arm_timer.h39
-rw-r--r--arch/arm/include/asm/hardware/icst.h59
-rw-r--r--arch/arm/include/asm/hardware/icst307.h38
-rw-r--r--arch/arm/include/asm/hardware/icst525.h36
-rw-r--r--arch/arm/include/asm/hardware/sp810.h59
-rw-r--r--arch/arm/include/asm/ioctls.h3
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h11
-rw-r--r--arch/arm/include/asm/mach/time.h2
-rw-r--r--arch/arm/include/asm/pci.h15
-rw-r--r--arch/arm/include/asm/perf_event.h17
-rw-r--r--arch/arm/include/asm/pmu.h32
-rw-r--r--arch/arm/include/asm/scatterlist.h20
-rw-r--r--arch/arm/include/asm/tlbflush.h29
-rw-r--r--arch/arm/kernel/bios32.c3
-rw-r--r--arch/arm/kernel/dma.c36
-rw-r--r--arch/arm/kernel/kgdb.c5
-rw-r--r--arch/arm/kernel/perf_event.c928
-rw-r--r--arch/arm/kernel/pmu.c127
-rw-r--r--arch/arm/kernel/time.c70
-rw-r--r--arch/arm/lib/clear_user.S1
-rw-r--r--arch/arm/lib/copy_to_user.S1
-rw-r--r--arch/arm/mach-at91/Kconfig14
-rw-r--r--arch/arm/mach-at91/Makefile2
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c315
-rw-r--r--arch/arm/mach-at91/include/mach/board.h8
-rw-r--r--arch/arm/mach-at91/include/mach/cpu.h4
-rw-r--r--arch/arm/mach-at91/include/mach/system.h7
-rw-r--r--arch/arm/mach-bcmring/arch.c16
-rw-r--r--arch/arm/mach-clps711x/mm.c1
-rw-r--r--arch/arm/mach-cns3xxx/Kconfig12
-rw-r--r--arch/arm/mach-cns3xxx/Makefile2
-rw-r--r--arch/arm/mach-cns3xxx/Makefile.boot3
-rw-r--r--arch/arm/mach-cns3xxx/cns3420vb.c148
-rw-r--r--arch/arm/mach-cns3xxx/core.c249
-rw-r--r--arch/arm/mach-cns3xxx/core.h23
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/cns3xxx.h635
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/entry-macro.S82
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/hardware.h22
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/io.h17
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/irqs.h24
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/memory.h26
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/system.h29
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/timex.h12
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/uncompress.h55
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/vmalloc.h11
-rw-r--r--arch/arm/mach-cns3xxx/pm.c86
-rw-r--r--arch/arm/mach-davinci/Kconfig2
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c44
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c39
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c11
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c11
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c9
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c53
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c13
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c47
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c13
-rw-r--r--arch/arm/mach-davinci/cdce949.c1
-rw-r--r--arch/arm/mach-davinci/clock.c22
-rw-r--r--arch/arm/mach-davinci/clock.h9
-rw-r--r--arch/arm/mach-davinci/common.c3
-rw-r--r--arch/arm/mach-davinci/cp_intc.c6
-rw-r--r--arch/arm/mach-davinci/da830.c3
-rw-r--r--arch/arm/mach-davinci/da850.c3
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c4
-rw-r--r--arch/arm/mach-davinci/devices.c46
-rw-r--r--arch/arm/mach-davinci/dm355.c9
-rw-r--r--arch/arm/mach-davinci/dm365.c10
-rw-r--r--arch/arm/mach-davinci/dm644x.c9
-rw-r--r--arch/arm/mach-davinci/dm646x.c41
-rw-r--r--arch/arm/mach-davinci/dma.c182
-rw-r--r--arch/arm/mach-davinci/gpio.c154
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h11
-rw-r--r--arch/arm/mach-davinci/include/mach/cp_intc.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/cputype.h8
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h29
-rw-r--r--arch/arm/mach-davinci/include/mach/dm355.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/dm365.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/dm644x.h6
-rw-r--r--arch/arm/mach-davinci/include/mach/dm646x.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/gpio.h72
-rw-r--r--arch/arm/mach-davinci/include/mach/irqs.h97
-rw-r--r--arch/arm/mach-davinci/include/mach/mux.h277
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h55
-rw-r--r--arch/arm/mach-davinci/include/mach/serial.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/system.h5
-rw-r--r--arch/arm/mach-davinci/mux.c2
-rw-r--r--arch/arm/mach-davinci/mux.h2
-rw-r--r--arch/arm/mach-davinci/psc.c3
-rw-r--r--arch/arm/mach-davinci/serial.c34
-rw-r--r--arch/arm/mach-davinci/time.c15
-rw-r--r--arch/arm/mach-ep93xx/adssphere.c2
-rw-r--r--arch/arm/mach-ep93xx/core.c71
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c4
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h8
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ts72xx.h19
-rw-r--r--arch/arm/mach-ep93xx/micro9.c2
-rw-r--r--arch/arm/mach-ep93xx/simone.c6
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c6
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c190
-rw-r--r--arch/arm/mach-integrator/Kconfig1
-rw-r--r--arch/arm/mach-integrator/Makefile2
-rw-r--r--arch/arm/mach-integrator/common.h2
-rw-r--r--arch/arm/mach-integrator/core.c127
-rw-r--r--arch/arm/mach-integrator/cpu.c53
-rw-r--r--arch/arm/mach-integrator/impd1.c43
-rw-r--r--arch/arm/mach-integrator/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-integrator/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-integrator/include/mach/hardware.h17
-rw-r--r--arch/arm/mach-integrator/include/mach/platform.h54
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c166
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c88
-rw-r--r--arch/arm/mach-integrator/leds.c1
-rw-r--r--arch/arm/mach-integrator/pci_v3.c7
-rw-r--r--arch/arm/mach-iop32x/n2100.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c1
-rw-r--r--arch/arm/mach-msm/Kconfig28
-rw-r--r--arch/arm/mach-msm/Makefile7
-rw-r--r--arch/arm/mach-msm/acpuclock-arm11.c526
-rw-r--r--arch/arm/mach-msm/acpuclock.h32
-rw-r--r--arch/arm/mach-msm/board-halibut.c15
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c87
-rw-r--r--arch/arm/mach-msm/board-msm7x27.c179
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c196
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c176
-rw-r--r--arch/arm/mach-msm/board-sapphire.c118
-rw-r--r--arch/arm/mach-msm/board-trout.c (renamed from arch/arm/mach-msm/board-dream.c)6
-rw-r--r--arch/arm/mach-msm/board-trout.h (renamed from arch/arm/mach-msm/board-dream.h)0
-rw-r--r--arch/arm/mach-msm/devices.c78
-rw-r--r--arch/arm/mach-msm/dma.c28
-rw-r--r--arch/arm/mach-msm/include/mach/board.h11
-rw-r--r--arch/arm/mach-msm/include/mach/dma.h2
-rw-r--r--arch/arm/mach-msm/include/mach/msm_fb.h147
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h6
-rw-r--r--arch/arm/mach-msm/include/mach/msm_smd.h109
-rw-r--r--arch/arm/mach-msm/include/mach/system.h5
-rw-r--r--arch/arm/mach-msm/include/mach/vreg.h2
-rw-r--r--arch/arm/mach-msm/io.c3
-rw-r--r--arch/arm/mach-msm/irq.c4
-rw-r--r--arch/arm/mach-msm/last_radio_log.c82
-rw-r--r--arch/arm/mach-msm/proc_comm.c26
-rw-r--r--arch/arm/mach-msm/proc_comm.h105
-rw-r--r--arch/arm/mach-msm/smd.c1046
-rw-r--r--arch/arm/mach-msm/smd_debug.c315
-rw-r--r--arch/arm/mach-msm/smd_private.h403
-rw-r--r--arch/arm/mach-msm/vreg.c154
-rw-r--r--arch/arm/mach-mx3/Kconfig1
-rw-r--r--arch/arm/mach-mx3/devices.c19
-rw-r--r--arch/arm/mach-mx3/devices.h1
-rw-r--r--arch/arm/mach-mx3/mach-mx31lilly.c145
-rw-r--r--arch/arm/mach-mx3/mx31lite-db.c1
-rw-r--r--arch/arm/mach-mx3/mx31moboard-smartbot.c8
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c155
-rw-r--r--arch/arm/mach-mx5/clock-mx51.c45
-rw-r--r--arch/arm/mach-mx5/devices.c94
-rw-r--r--arch/arm/mach-mx5/devices.h3
-rw-r--r--arch/arm/mach-nomadik/Kconfig1
-rw-r--r--arch/arm/mach-nomadik/Makefile2
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c10
-rw-r--r--arch/arm/mach-nomadik/clock.c38
-rw-r--r--arch/arm/mach-nomadik/clock.h1
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.c83
-rw-r--r--arch/arm/mach-nomadik/include/mach/gpio.h67
-rw-r--r--arch/arm/mach-omap1/Kconfig10
-rw-r--r--arch/arm/mach-omap1/Makefile1
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S278
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c155
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c18
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c1
-rw-r--r--arch/arm/mach-omap1/include/mach/ams-delta-fiq.h79
-rw-r--r--arch/arm/mach-omap1/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile1
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c21
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c146
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c1
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c155
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c101
-rw-r--r--arch/arm/mach-omap2/board-overo.c14
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c120
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c107
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-debugboard.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom2.c4
-rw-r--r--arch/arm/mach-omap2/board-zoom3.c4
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c2
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c2
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c6
-rw-r--r--arch/arm/mach-omap2/devices.c120
-rw-r--r--arch/arm/mach-omap2/include/mach/am35xx.h20
-rw-r--r--arch/arm/mach-omap2/include/mach/debug-macro.S15
-rw-r--r--arch/arm/mach-omap2/io.c9
-rw-r--r--arch/arm/mach-omap2/mux34xx.c86
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c1
-rw-r--r--arch/arm/mach-pxa/icontrol.c9
-rw-r--r--arch/arm/mach-pxa/zeus.c4
-rw-r--r--arch/arm/mach-realview/Makefile2
-rw-r--r--arch/arm/mach-realview/clock.c64
-rw-r--r--arch/arm/mach-realview/clock.h19
-rw-r--r--arch/arm/mach-realview/core.c194
-rw-r--r--arch/arm/mach-realview/include/mach/clkdev.h9
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-pb1176.h1
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-pba8.h8
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-pbx.h14
-rw-r--r--arch/arm/mach-realview/include/mach/platform.h20
-rw-r--r--arch/arm/mach-realview/realview_eb.c34
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c17
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c34
-rw-r--r--arch/arm/mach-realview/realview_pba8.c17
-rw-r--r--arch/arm/mach-realview/realview_pbx.c32
-rw-r--r--arch/arm/mach-s3c2410/Kconfig1
-rw-r--r--arch/arm/mach-s3c2410/h1940-bluetooth.c32
-rw-r--r--arch/arm/mach-s3c2410/include/mach/dma.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio-fns.h47
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio-nrs.h12
-rw-r--r--arch/arm/mach-s3c2410/include/mach/irqs.h25
-rw-r--r--arch/arm/mach-s3c2410/include/mach/map.h5
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-clock.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-dsc.h36
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-gpio.h45
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-gpioj.h36
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-irq.h10
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h30
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h24
-rw-r--r--arch/arm/mach-s3c2410/include/mach/uncompress.h4
-rw-r--r--arch/arm/mach-s3c2410/mach-amlm5900.c5
-rw-r--r--arch/arm/mach-s3c2410/mach-bast.c9
-rw-r--r--arch/arm/mach-s3c2410/mach-h1940.c25
-rw-r--r--arch/arm/mach-s3c2410/mach-n30.c7
-rw-r--r--arch/arm/mach-s3c2410/mach-qt2410.c10
-rw-r--r--arch/arm/mach-s3c2410/mach-vr1000.c5
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.c8
-rw-r--r--arch/arm/mach-s3c2412/Kconfig3
-rw-r--r--arch/arm/mach-s3c2412/dma.c3
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c26
-rw-r--r--arch/arm/mach-s3c2412/mach-smdk2413.c8
-rw-r--r--arch/arm/mach-s3c2416/Kconfig38
-rw-r--r--arch/arm/mach-s3c2416/Makefile19
-rw-r--r--arch/arm/mach-s3c2416/clock.c135
-rw-r--r--arch/arm/mach-s3c2416/irq.c254
-rw-r--r--arch/arm/mach-s3c2416/mach-smdk2416.c150
-rw-r--r--arch/arm/mach-s3c2416/s3c2416.c128
-rw-r--r--arch/arm/mach-s3c2440/Kconfig1
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c23
-rw-r--r--arch/arm/mach-s3c2440/mach-nexcoder.c9
-rw-r--r--arch/arm/mach-s3c2440/mach-osiris.c5
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c8
-rw-r--r--arch/arm/mach-s3c2443/Kconfig1
-rw-r--r--arch/arm/mach-s3c2443/clock.c481
-rw-r--r--arch/arm/mach-s3c64xx/Makefile1
-rw-r--r--arch/arm/mach-s3c64xx/clock.c10
-rw-r--r--arch/arm/mach-s3c64xx/dev-ts.c61
-rw-r--r--arch/arm/mach-s3c64xx/gpiolib.c6
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/pll.h35
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c5
-rw-r--r--arch/arm/mach-s5p6440/gpio.c5
-rw-r--r--arch/arm/mach-s5p6440/include/mach/pwm-clock.h24
-rw-r--r--arch/arm/mach-s5p6442/include/mach/pwm-clock.h21
-rw-r--r--arch/arm/mach-s5pv210/include/mach/pwm-clock.h21
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c106
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c127
-rw-r--r--arch/arm/mach-shmobile/include/mach/irqs.h6
-rw-r--r--arch/arm/mach-shmobile/include/mach/memory.h3
-rw-r--r--arch/arm/mach-shmobile/include/mach/vmalloc.h3
-rw-r--r--arch/arm/mach-shmobile/intc-sh7367.c178
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c12
-rw-r--r--arch/arm/mach-shmobile/intc-sh7377.c300
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c47
-rw-r--r--arch/arm/mach-spear3xx/Kconfig33
-rw-r--r--arch/arm/mach-spear3xx/Kconfig30017
-rw-r--r--arch/arm/mach-spear3xx/Kconfig31017
-rw-r--r--arch/arm/mach-spear3xx/Kconfig32017
-rw-r--r--arch/arm/mach-spear3xx/Makefile26
-rw-r--r--arch/arm/mach-spear3xx/Makefile.boot3
-rw-r--r--arch/arm/mach-spear3xx/clock.c389
-rw-r--r--arch/arm/mach-spear3xx/include/mach/clkdev.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/debug-macro.S14
-rw-r--r--arch/arm/mach-spear3xx/include/mach/entry-macro.S46
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h205
-rw-r--r--arch/arm/mach-spear3xx/include/mach/gpio.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/hardware.h20
-rw-r--r--arch/arm/mach-spear3xx/include/mach/io.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/irqs.h152
-rw-r--r--arch/arm/mach-spear3xx/include/mach/memory.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/misc_regs.h163
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear.h144
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear300.h83
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear310.h70
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear320.h96
-rw-r--r--arch/arm/mach-spear3xx/include/mach/system.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/timex.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/uncompress.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/vmalloc.h19
-rw-r--r--arch/arm/mach-spear3xx/spear300.c468
-rw-r--r--arch/arm/mach-spear3xx/spear300_evb.c77
-rw-r--r--arch/arm/mach-spear3xx/spear310.c302
-rw-r--r--arch/arm/mach-spear3xx/spear310_evb.c84
-rw-r--r--arch/arm/mach-spear3xx/spear320.c549
-rw-r--r--arch/arm/mach-spear3xx/spear320_evb.c81
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c548
-rw-r--r--arch/arm/mach-spear6xx/Kconfig20
-rw-r--r--arch/arm/mach-spear6xx/Kconfig60017
-rw-r--r--arch/arm/mach-spear6xx/Makefile12
-rw-r--r--arch/arm/mach-spear6xx/Makefile.boot3
-rw-r--r--arch/arm/mach-spear6xx/clock.c483
-rw-r--r--arch/arm/mach-spear6xx/include/mach/clkdev.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/debug-macro.S14
-rw-r--r--arch/arm/mach-spear6xx/include/mach/entry-macro.S55
-rw-r--r--arch/arm/mach-spear6xx/include/mach/generic.h45
-rw-r--r--arch/arm/mach-spear6xx/include/mach/gpio.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/hardware.h21
-rw-r--r--arch/arm/mach-spear6xx/include/mach/io.h20
-rw-r--r--arch/arm/mach-spear6xx/include/mach/irqs.h97
-rw-r--r--arch/arm/mach-spear6xx/include/mach/memory.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/misc_regs.h173
-rw-r--r--arch/arm/mach-spear6xx/include/mach/spear.h173
-rw-r--r--arch/arm/mach-spear6xx/include/mach/spear600.h21
-rw-r--r--arch/arm/mach-spear6xx/include/mach/system.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/timex.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/uncompress.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/vmalloc.h19
-rw-r--r--arch/arm/mach-spear6xx/spear600.c25
-rw-r--r--arch/arm/mach-spear6xx/spear600_evb.c51
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c158
-rw-r--r--arch/arm/mach-u300/i2c.c57
-rw-r--r--arch/arm/mach-u300/include/mach/coh901318.h21
-rw-r--r--arch/arm/mach-u300/include/mach/irqs.h7
-rw-r--r--arch/arm/mach-u300/mmc.c3
-rw-r--r--arch/arm/mach-ux500/Kconfig41
-rw-r--r--arch/arm/mach-ux500/Makefile6
-rw-r--r--arch/arm/mach-ux500/board-mop500.c134
-rw-r--r--arch/arm/mach-ux500/board-u5500.c41
-rw-r--r--arch/arm/mach-ux500/clock.c501
-rw-r--r--arch/arm/mach-ux500/clock.h125
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c50
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c (renamed from arch/arm/mach-ux500/cpu-u8500.c)55
-rw-r--r--arch/arm/mach-ux500/cpu.c99
-rw-r--r--arch/arm/mach-ux500/devices-db5500.c46
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c107
-rw-r--r--arch/arm/mach-ux500/devices.c88
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-regs.h103
-rw-r--r--arch/arm/mach-ux500/include/mach/db8500-regs.h135
-rw-r--r--arch/arm/mach-ux500/include/mach/debug-macro.S12
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h29
-rw-r--r--arch/arm/mach-ux500/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-ux500/include/mach/gpio.h50
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h197
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs.h9
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h24
-rw-r--r--arch/arm/mach-ux500/platsmp.c13
-rw-r--r--arch/arm/mach-versatile/Makefile2
-rw-r--r--arch/arm/mach-versatile/clock.c65
-rw-r--r--arch/arm/mach-versatile/clock.h20
-rw-r--r--arch/arm/mach-versatile/core.c194
-rw-r--r--arch/arm/mach-versatile/include/mach/clkdev.h9
-rw-r--r--arch/arm/mach-versatile/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-versatile/include/mach/hardware.h3
-rw-r--r--arch/arm/mach-versatile/include/mach/platform.h26
-rw-r--r--arch/arm/mach-vexpress/Kconfig9
-rw-r--r--arch/arm/mach-vexpress/Makefile8
-rw-r--r--arch/arm/mach-vexpress/Makefile.boot3
-rw-r--r--arch/arm/mach-vexpress/core.h26
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c250
-rw-r--r--arch/arm/mach-vexpress/headsmp.S39
-rw-r--r--arch/arm/mach-vexpress/include/mach/clkdev.h15
-rw-r--r--arch/arm/mach-vexpress/include/mach/ct-ca9x4.h47
-rw-r--r--arch/arm/mach-vexpress/include/mach/debug-macro.S23
-rw-r--r--arch/arm/mach-vexpress/include/mach/entry-macro.S67
-rw-r--r--arch/arm/mach-vexpress/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-vexpress/include/mach/io.h28
-rw-r--r--arch/arm/mach-vexpress/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-vexpress/include/mach/memory.h25
-rw-r--r--arch/arm/mach-vexpress/include/mach/motherboard.h121
-rw-r--r--arch/arm/mach-vexpress/include/mach/smp.h21
-rw-r--r--arch/arm/mach-vexpress/include/mach/system.h37
-rw-r--r--arch/arm/mach-vexpress/include/mach/timex.h23
-rw-r--r--arch/arm/mach-vexpress/include/mach/uncompress.h52
-rw-r--r--arch/arm/mach-vexpress/include/mach/vmalloc.h21
-rw-r--r--arch/arm/mach-vexpress/localtimer.c26
-rw-r--r--arch/arm/mach-vexpress/platsmp.c190
-rw-r--r--arch/arm/mach-vexpress/v2m.c361
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/abort-ev7.S21
-rw-r--r--arch/arm/mm/alignment.c53
-rw-r--r--arch/arm/mm/cache-v6.S17
-rw-r--r--arch/arm/mm/cache-v7.S4
-rw-r--r--arch/arm/mm/copypage-fa.c2
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/init.c34
-rw-r--r--arch/arm/mm/ioremap.c6
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c39
-rw-r--r--arch/arm/mm/nommu.c13
-rw-r--r--arch/arm/mm/tlb-v7.S8
-rw-r--r--arch/arm/oprofile/Makefile7
-rw-r--r--arch/arm/oprofile/backtrace.c83
-rw-r--r--arch/arm/oprofile/common.c375
-rw-r--r--arch/arm/oprofile/op_arm_model.h35
-rw-r--r--arch/arm/oprofile/op_counter.h27
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.c162
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.h45
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c306
-rw-r--r--arch/arm/oprofile/op_model_mpcore.h61
-rw-r--r--arch/arm/oprofile/op_model_v6.c78
-rw-r--r--arch/arm/oprofile/op_model_v7.c415
-rw-r--r--arch/arm/oprofile/op_model_v7.h103
-rw-r--r--arch/arm/oprofile/op_model_xscale.c444
-rw-r--r--arch/arm/plat-iop/Makefile2
-rw-r--r--arch/arm/plat-iop/pmu.c40
-rw-r--r--arch/arm/plat-mxc/ehci.c100
-rw-r--r--arch/arm/plat-mxc/gpio.c5
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx51.h33
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_ehci.h14
-rw-r--r--arch/arm/plat-mxc/time.c46
-rw-r--r--arch/arm/plat-mxc/tzic.c4
-rw-r--r--arch/arm/plat-nomadik/Kconfig5
-rw-r--r--arch/arm/plat-nomadik/Makefile1
-rw-r--r--arch/arm/plat-nomadik/gpio.c (renamed from arch/arm/mach-nomadik/gpio.c)188
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h70
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h239
-rw-r--r--arch/arm/plat-nomadik/timer.c125
-rw-r--r--arch/arm/plat-omap/i2c.c39
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h4
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h6
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h16
-rw-r--r--arch/arm/plat-omap/include/plat/omap34xx.h5
-rw-r--r--arch/arm/plat-omap/include/plat/serial.h16
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h44
-rw-r--r--arch/arm/plat-omap/mcbsp.c55
-rw-r--r--arch/arm/plat-omap/sram.c16
-rw-r--r--arch/arm/plat-pxa/Makefile2
-rw-r--r--arch/arm/plat-pxa/pmu.c33
-rw-r--r--arch/arm/plat-s3c24xx/Kconfig7
-rw-r--r--arch/arm/plat-s3c24xx/Makefile1
-rw-r--r--arch/arm/plat-s3c24xx/common-smdk.c9
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c21
-rw-r--r--arch/arm/plat-s3c24xx/devs.c14
-rw-r--r--arch/arm/plat-s3c24xx/gpio.c144
-rw-r--r--arch/arm/plat-s3c24xx/gpiolib.c60
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/pll.h25
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/s3c2416.h31
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/s3c2443.h19
-rw-r--r--arch/arm/plat-s3c24xx/pm.c9
-rw-r--r--arch/arm/plat-s3c24xx/s3c2410-clock.c15
-rw-r--r--arch/arm/plat-s3c24xx/s3c2443-clock.c472
-rw-r--r--arch/arm/plat-s3c24xx/setup-i2c.c5
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c16
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c16
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c16
-rw-r--r--arch/arm/plat-s5p/clock.c5
-rw-r--r--arch/arm/plat-s5p/include/plat/s5p-clock.h1
-rw-r--r--arch/arm/plat-samsung/Kconfig6
-rw-r--r--arch/arm/plat-samsung/clock.c15
-rw-r--r--arch/arm/plat-samsung/gpio-config.c103
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h58
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h11
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-core.h5
-rw-r--r--arch/arm/plat-samsung/include/plat/pll6553x.h51
-rw-r--r--arch/arm/plat-samsung/include/plat/ts.h (renamed from arch/arm/mach-s3c2410/include/mach/ts.h)2
-rw-r--r--arch/arm/plat-spear/Kconfig31
-rw-r--r--arch/arm/plat-spear/Makefile8
-rw-r--r--arch/arm/plat-spear/clock.c435
-rw-r--r--arch/arm/plat-spear/include/plat/clkdev.h20
-rw-r--r--arch/arm/plat-spear/include/plat/clock.h126
-rw-r--r--arch/arm/plat-spear/include/plat/debug-macro.S38
-rw-r--r--arch/arm/plat-spear/include/plat/gpio.h24
-rw-r--r--arch/arm/plat-spear/include/plat/io.h22
-rw-r--r--arch/arm/plat-spear/include/plat/memory.h20
-rw-r--r--arch/arm/plat-spear/include/plat/padmux.h92
-rw-r--r--arch/arm/plat-spear/include/plat/shirq.h73
-rw-r--r--arch/arm/plat-spear/include/plat/system.h41
-rw-r--r--arch/arm/plat-spear/include/plat/timex.h19
-rw-r--r--arch/arm/plat-spear/include/plat/uncompress.h43
-rw-r--r--arch/arm/plat-spear/include/plat/vmalloc.h19
-rw-r--r--arch/arm/plat-spear/padmux.c164
-rw-r--r--arch/arm/plat-spear/shirq.c118
-rw-r--r--arch/arm/plat-spear/time.c292
-rw-r--r--arch/arm/plat-versatile/Makefile4
-rw-r--r--arch/arm/plat-versatile/clock.c (renamed from arch/arm/mach-integrator/clock.c)43
-rw-r--r--arch/arm/plat-versatile/include/plat/clock.h15
-rw-r--r--arch/arm/plat-versatile/include/plat/timer-sp.h2
-rw-r--r--arch/arm/plat-versatile/sched-clock.c53
-rw-r--r--arch/arm/plat-versatile/timer-sp.c156
-rw-r--r--arch/avr32/kernel/time.c12
-rw-r--r--arch/blackfin/kernel/kgdb.c5
-rw-r--r--arch/blackfin/kernel/time-ts.c13
-rw-r--r--arch/blackfin/kernel/time.c39
-rw-r--r--arch/cris/Kconfig3
-rw-r--r--arch/cris/arch-v10/drivers/ds1302.c20
-rw-r--r--arch/cris/arch-v10/drivers/pcf8563.c19
-rw-r--r--arch/cris/arch-v10/kernel/time.c37
-rw-r--r--arch/cris/arch-v32/drivers/i2c.c22
-rw-r--r--arch/cris/arch-v32/drivers/pcf8563.c21
-rw-r--r--arch/cris/arch-v32/kernel/time.c40
-rw-r--r--arch/cris/kernel/time.c20
-rw-r--r--arch/frv/kernel/break.S4
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/head.S2
-rw-r--r--arch/frv/kernel/time.c34
-rw-r--r--arch/frv/kernel/vmlinux.lds.S10
-rw-r--r--arch/frv/mm/tlb-miss.S2
-rw-r--r--arch/h8300/boot/compressed/head.S2
-rw-r--r--arch/h8300/boot/compressed/vmlinux.lds2
-rw-r--r--arch/h8300/kernel/time.c10
-rw-r--r--arch/ia64/hp/common/sba_iommu.c8
-rw-r--r--arch/ia64/include/asm/asmmacro.h12
-rw-r--r--arch/ia64/include/asm/bitops.h11
-rw-r--r--arch/ia64/include/asm/cache.h2
-rw-r--r--arch/ia64/include/asm/mmzone.h4
-rw-r--r--arch/ia64/include/asm/percpu.h2
-rw-r--r--arch/ia64/kernel/Makefile.gate2
-rw-r--r--arch/ia64/kernel/acpi.c8
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c10
-rw-r--r--arch/ia64/kernel/entry.S8
-rw-r--r--arch/ia64/kernel/gate-data.S2
-rw-r--r--arch/ia64/kernel/gate.S8
-rw-r--r--arch/ia64/kernel/gate.lds.S10
-rw-r--r--arch/ia64/kernel/init_task.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c9
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kernel/minstate.h4
-rw-r--r--arch/ia64/kernel/paravirtentry.S2
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/salinfo.c5
-rw-r--r--arch/ia64/kernel/time.c12
-rw-r--r--arch/ia64/kernel/topology.c4
-rw-r--r--arch/ia64/kernel/unaligned.c24
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S28
-rw-r--r--arch/ia64/kvm/kvm-ia64.c8
-rw-r--r--arch/ia64/kvm/vmm.c2
-rw-r--r--arch/ia64/kvm/vmm_ivt.S2
-rw-r--r--arch/ia64/mm/fault.c13
-rw-r--r--arch/ia64/pci/pci.c5
-rw-r--r--arch/ia64/scripts/unwcheck.py2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c4
-rw-r--r--arch/ia64/xen/gate-data.S2
-rw-r--r--arch/ia64/xen/xensetup.S2
-rw-r--r--arch/m32r/kernel/time.c47
-rw-r--r--arch/m68k/include/asm/m520xsim.h4
-rw-r--r--arch/m68k/include/asm/m523xsim.h8
-rw-r--r--arch/m68k/include/asm/m5249sim.h6
-rw-r--r--arch/m68k/include/asm/m527xsim.h10
-rw-r--r--arch/m68k/include/asm/m528xsim.h67
-rw-r--r--arch/m68k/include/asm/m532xsim.h1
-rw-r--r--arch/m68k/include/asm/mcfqspi.h64
-rw-r--r--arch/m68k/kernel/time.c11
-rw-r--r--arch/m68knommu/Kconfig4
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S4
-rw-r--r--arch/m68knommu/platform/520x/config.c149
-rw-r--r--arch/m68knommu/platform/523x/config.c170
-rw-r--r--arch/m68knommu/platform/5249/config.c167
-rw-r--r--arch/m68knommu/platform/5272/intc.c39
-rw-r--r--arch/m68knommu/platform/527x/config.c182
-rw-r--r--arch/m68knommu/platform/528x/config.c137
-rw-r--r--arch/m68knommu/platform/532x/config.c124
-rw-r--r--arch/m68knommu/platform/68328/ints.c8
-rw-r--r--arch/m68knommu/platform/68360/head-ram.S2
-rw-r--r--arch/m68knommu/platform/68360/head-rom.S2
-rw-r--r--arch/m68knommu/platform/68360/ints.c8
-rw-r--r--arch/m68knommu/platform/coldfire/intc-2.c76
-rw-r--r--arch/m68knommu/platform/coldfire/intc-simr.c68
-rw-r--r--arch/m68knommu/platform/coldfire/intc.c59
-rw-r--r--arch/microblaze/include/asm/system.h11
-rw-r--r--arch/microblaze/kernel/entry-nommu.S2
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c10
-rw-r--r--arch/microblaze/kernel/of_device.c1
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/alchemy/common/dbdma.c101
-rw-r--r--arch/mips/alchemy/common/irq.c174
-rw-r--r--arch/mips/alchemy/common/power.c16
-rw-r--r--arch/mips/alchemy/devboards/pb1000/board_setup.c16
-rw-r--r--arch/mips/alchemy/devboards/pb1100/board_setup.c5
-rw-r--r--arch/mips/alchemy/devboards/pb1200/board_setup.c6
-rw-r--r--arch/mips/alchemy/devboards/pb1500/board_setup.c5
-rw-r--r--arch/mips/alchemy/devboards/pb1550/board_setup.c5
-rw-r--r--arch/mips/ar7/platform.c17
-rw-r--r--arch/mips/bcm63xx/gpio.c6
-rw-r--r--arch/mips/configs/ar7_defconfig196
-rw-r--r--arch/mips/configs/bcm47xx_defconfig973
-rw-r--r--arch/mips/configs/mtx1_defconfig2404
-rw-r--r--arch/mips/configs/rb532_defconfig521
-rw-r--r--arch/mips/include/asm/i8253.h2
-rw-r--r--arch/mips/include/asm/kgdb.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h34
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h4
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/gpio.h35
-rw-r--r--arch/mips/kernel/cpu-probe.c30
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_cpufreq.c4
-rw-r--r--arch/mips/kernel/i8253.c14
-rw-r--r--arch/mips/kernel/kgdb.c27
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c4
-rw-r--r--arch/mips/kernel/setup.c21
-rw-r--r--arch/mips/kernel/traps.c15
-rw-r--r--arch/mips/lasat/image/head.S2
-rw-r--r--arch/mips/lasat/image/romscript.normal2
-rw-r--r--arch/mips/loongson/common/Makefile1
-rw-r--r--arch/mips/loongson/common/gpio.c139
-rw-r--r--arch/mips/math-emu/cp1emu.c11
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c60
-rw-r--r--arch/mips/powertv/asic/prealloc-calliope.c673
-rw-r--r--arch/mips/powertv/asic/prealloc-cronus.c668
-rw-r--r--arch/mips/powertv/asic/prealloc-cronuslite.c302
-rw-r--r--arch/mips/powertv/asic/prealloc-zeus.c505
-rw-r--r--arch/mips/powertv/asic/prealloc.h70
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c2
-rw-r--r--arch/mips/sibyte/sb1250/irq.c2
-rw-r--r--arch/mips/sibyte/swarm/platform.c54
-rw-r--r--arch/mn10300/Kconfig3
-rw-r--r--arch/mn10300/kernel/rtc.c27
-rw-r--r--arch/mn10300/kernel/time.c4
-rw-r--r--arch/parisc/include/asm/cache.h2
-rw-r--r--arch/parisc/include/asm/system.h2
-rw-r--r--arch/parisc/kernel/head.S2
-rw-r--r--arch/parisc/kernel/init_task.c6
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S12
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig.debug12
-rw-r--r--arch/powerpc/Makefile5
-rw-r--r--arch/powerpc/boot/Makefile9
-rw-r--r--arch/powerpc/boot/dts/iss4xx-mpic.dts155
-rw-r--r--arch/powerpc/boot/dts/iss4xx.dts116
-rw-r--r--arch/powerpc/boot/dts/katmai.dts1
-rw-r--r--arch/powerpc/boot/dts/redwood.dts122
-rw-r--r--arch/powerpc/boot/treeboot-iss4xx.c56
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig1026
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/include/asm/cache.h8
-rw-r--r--arch/powerpc/include/asm/cputable.h5
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/powerpc/include/asm/kmap_types.h1
-rw-r--r--arch/powerpc/include/asm/kvm.h10
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h157
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h42
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h28
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h (renamed from arch/powerpc/include/asm/kvm_book3s_64_asm.h)25
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h96
-rw-r--r--arch/powerpc/include/asm/kvm_fpu.h85
-rw-r--r--arch/powerpc/include/asm/kvm_host.h38
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h97
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h51
-rw-r--r--arch/powerpc/include/asm/mmu.h1
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/mmzone.h2
-rw-r--r--arch/powerpc/include/asm/mpic.h3
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/page_64.h8
-rw-r--r--arch/powerpc/include/asm/parport.h11
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h6
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h2
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/ptrace.h64
-rw-r--r--arch/powerpc/include/asm/reg.h14
-rw-r--r--arch/powerpc/include/asm/reg_booke.h24
-rw-r--r--arch/powerpc/include/asm/smp.h18
-rw-r--r--arch/powerpc/include/asm/topology.h16
-rw-r--r--arch/powerpc/kernel/asm-offsets.c102
-rw-r--r--arch/powerpc/kernel/cputable.c29
-rw-r--r--arch/powerpc/kernel/entry_32.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S13
-rw-r--r--arch/powerpc/kernel/head_32.S14
-rw-r--r--arch/powerpc/kernel/head_44x.S828
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/head_8xx.S70
-rw-r--r--arch/powerpc/kernel/head_booke.h4
-rw-r--r--arch/powerpc/kernel/ibmebus.c5
-rw-r--r--arch/powerpc/kernel/irq.c17
-rw-r--r--arch/powerpc/kernel/kgdb.c11
-rw-r--r--arch/powerpc/kernel/kprobes.c3
-rw-r--r--arch/powerpc/kernel/lparcfg.c12
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/kernel/of_device.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c103
-rw-r--r--arch/powerpc/kernel/rtas.c15
-rw-r--r--arch/powerpc/kernel/rtasd.c10
-rw-r--r--arch/powerpc/kernel/setup-common.c86
-rw-r--r--arch/powerpc/kernel/smp.c67
-rw-r--r--arch/powerpc/kernel/traps.c47
-rw-r--r--arch/powerpc/kernel/vio.c52
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S10
-rw-r--r--arch/powerpc/kvm/44x.c2
-rw-r--r--arch/powerpc/kvm/Kconfig24
-rw-r--r--arch/powerpc/kvm/Makefile20
-rw-r--r--arch/powerpc/kvm/book3s.c493
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c483
-rw-r--r--arch/powerpc/kvm/book3s_32_sr.S143
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c36
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c102
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S183
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c (renamed from arch/powerpc/kvm/book3s_64_emulate.c)245
-rw-r--r--arch/powerpc/kvm/book3s_exports.c (renamed from arch/powerpc/kvm/book3s_64_exports.c)0
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S (renamed from arch/powerpc/kvm/book3s_64_interrupts.S)204
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c1289
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S (renamed from arch/powerpc/kvm/book3s_64_rmhandlers.S)135
-rw-r--r--arch/powerpc/kvm/book3s_segment.S259
-rw-r--r--arch/powerpc/kvm/booke.c6
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/emulate.c55
-rw-r--r--arch/powerpc/kvm/fpu.S273
-rw-r--r--arch/powerpc/kvm/powerpc.c110
-rw-r--r--arch/powerpc/lib/string.S4
-rw-r--r--arch/powerpc/mm/44x_mmu.c144
-rw-r--r--arch/powerpc/mm/fault.c19
-rw-r--r--arch/powerpc/mm/init_64.c43
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c29
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c8
-rw-r--r--arch/powerpc/mm/mmu_decl.h7
-rw-r--r--arch/powerpc/mm/numa.c58
-rw-r--r--arch/powerpc/mm/pgtable_32.c12
-rw-r--r--arch/powerpc/mm/pgtable_64.c8
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S118
-rw-r--r--arch/powerpc/platforms/44x/Kconfig20
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c167
-rw-r--r--arch/powerpc/platforms/512x/Kconfig13
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype5
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c4
-rw-r--r--arch/powerpc/platforms/iseries/pci.c10
-rw-r--r--arch/powerpc/platforms/iseries/smp.c2
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c7
-rw-r--r--arch/powerpc/platforms/powermac/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c67
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h8
-rw-r--r--arch/powerpc/platforms/pseries/smp.c42
-rw-r--r--arch/powerpc/platforms/pseries/xics.c38
-rw-r--r--arch/powerpc/sysdev/mpic.c72
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c119
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.h58
-rw-r--r--arch/powerpc/sysdev/ppc4xx_soc.c24
-rw-r--r--arch/s390/Makefile6
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/cache.h2
-rw-r--r--arch/s390/include/asm/lowcore.h48
-rw-r--r--arch/s390/include/asm/system.h5
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/entry.S268
-rw-r--r--arch/s390/kernel/entry64.S291
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/processor.c37
-rw-r--r--arch/s390/kernel/setup.c24
-rw-r--r--arch/s390/kernel/swsusp_asm64.S2
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/topology.c7
-rw-r--r--arch/s390/kernel/vdso.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/sh/Kconfig29
-rw-r--r--arch/sh/Makefile1
-rw-r--r--arch/sh/boards/board-urquell.c3
-rw-r--r--arch/sh/boards/mach-sdk7786/setup.c17
-rw-r--r--arch/sh/boards/mach-x3proto/setup.c7
-rw-r--r--arch/sh/boot/compressed/Makefile6
-rw-r--r--arch/sh/boot/compressed/head_32.S4
-rw-r--r--arch/sh/boot/compressed/vmlinux.scr2
-rw-r--r--arch/sh/configs/sdk7786_defconfig636
-rw-r--r--arch/sh/include/asm/cache.h6
-rw-r--r--arch/sh/include/asm/clkdev.h35
-rw-r--r--arch/sh/include/asm/clock.h18
-rw-r--r--arch/sh/include/asm/dmaengine.h63
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h11
-rw-r--r--arch/sh/include/asm/hwblk.h12
-rw-r--r--arch/sh/include/asm/irq.h19
-rw-r--r--arch/sh/include/asm/page.h13
-rw-r--r--arch/sh/include/asm/processor.h7
-rw-r--r--arch/sh/include/asm/processor_32.h2
-rw-r--r--arch/sh/include/asm/siu.h8
-rw-r--r--arch/sh/include/asm/smp-ops.h51
-rw-r--r--arch/sh/include/asm/smp.h40
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma-register.h3
-rw-r--r--arch/sh/include/cpu-sh4/cpu/mmu_context.h18
-rw-r--r--arch/sh/include/mach-sdk7786/mach/fpga.h9
-rw-r--r--arch/sh/kernel/Makefile2
-rw-r--r--arch/sh/kernel/clkdev.c169
-rw-r--r--arch/sh/kernel/cpu/clock-cpg.c5
-rw-r--r--arch/sh/kernel/cpu/clock.c64
-rw-r--r--arch/sh/kernel/cpu/hwblk.c5
-rw-r--r--arch/sh/kernel/cpu/init.c24
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c5
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c6
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c4
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c9
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c9
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c12
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c15
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c4
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c24
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c4
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c9
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c15
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c9
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c30
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7366.c28
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c130
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c232
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c257
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7785.c51
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7786.c80
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c16
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c13
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c21
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c27
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c81
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c6
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c18
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c27
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c24
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c30
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c157
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c18
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c74
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c4
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c9
-rw-r--r--arch/sh/kernel/crash_dump.c20
-rw-r--r--arch/sh/kernel/dwarf.c1
-rw-r--r--arch/sh/kernel/head_32.S7
-rw-r--r--arch/sh/kernel/hw_breakpoint.c39
-rw-r--r--arch/sh/kernel/idle.c8
-rw-r--r--arch/sh/kernel/irq.c91
-rw-r--r--arch/sh/kernel/kgdb.c14
-rw-r--r--arch/sh/kernel/localtimer.c6
-rw-r--r--arch/sh/kernel/machine_kexec.c3
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/ptrace_32.c2
-rw-r--r--arch/sh/kernel/setup.c100
-rw-r--r--arch/sh/kernel/smp.c160
-rw-r--r--arch/sh/kernel/topology.c6
-rw-r--r--arch/sh/mm/Makefile9
-rw-r--r--arch/sh/mm/cache-shx3.c44
-rw-r--r--arch/sh/mm/cache.c7
-rw-r--r--arch/sh/mm/fault_32.c14
-rw-r--r--arch/sh/mm/pmb.c2
-rw-r--r--arch/sh/mm/tlb-debugfs.c179
-rw-r--r--arch/sh/mm/tlbflush_64.c20
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/boot/btfixupprep.c2
-rw-r--r--arch/sparc/include/asm/bitops_64.h11
-rw-r--r--arch/sparc/include/asm/cache.h2
-rw-r--r--arch/sparc/kernel/kgdb_32.c6
-rw-r--r--arch/sparc/kernel/kgdb_64.c6
-rw-r--r--arch/sparc/kernel/of_device_32.c1
-rw-r--r--arch/sparc/kernel/of_device_64.c1
-rw-r--r--arch/sparc/kernel/pci.c1
-rw-r--r--arch/sparc/kernel/time_32.c18
-rw-r--r--arch/um/drivers/hostaudio_kern.c10
-rw-r--r--arch/um/drivers/line.c1
-rw-r--r--arch/um/include/asm/system.h3
-rw-r--r--arch/um/kernel/dyn.lds.S2
-rw-r--r--arch/um/kernel/init_task.c2
-rw-r--r--arch/um/kernel/skas/syscall.c4
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/um/sys-i386/asm/elf.h2
-rw-r--r--arch/um/sys-x86_64/asm/elf.h2
-rw-r--r--arch/um/sys-x86_64/signal.c3
-rw-r--r--arch/x86/.gitignore3
-rw-r--r--arch/x86/Kconfig61
-rw-r--r--arch/x86/Kconfig.cpu24
-rw-r--r--arch/x86/Kconfig.debug11
-rw-r--r--arch/x86/boot/compressed/mkpiggy.c2
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S4
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S115
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c130
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c9
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h20
-rw-r--r--arch/x86/include/asm/apic.h13
-rw-r--r--arch/x86/include/asm/arch_hweight.h59
-rw-r--r--arch/x86/include/asm/atomic.h23
-rw-r--r--arch/x86/include/asm/atomic64_32.h278
-rw-r--r--arch/x86/include/asm/atomic64_64.h23
-rw-r--r--arch/x86/include/asm/bitops.h4
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/cache.h2
-rw-r--r--arch/x86/include/asm/cacheflush.h46
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/ds.h302
-rw-r--r--arch/x86/include/asm/e820.h7
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/hpet.h1
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h10
-rw-r--r--arch/x86/include/asm/hyperv.h11
-rw-r--r--arch/x86/include/asm/hypervisor.h27
-rw-r--r--arch/x86/include/asm/i387.h135
-rw-r--r--arch/x86/include/asm/i8253.h2
-rw-r--r--arch/x86/include/asm/insn.h2
-rw-r--r--arch/x86/include/asm/inst.h96
-rw-r--r--arch/x86/include/asm/io_apic.h13
-rw-r--r--arch/x86/include/asm/k8.h5
-rw-r--r--arch/x86/include/asm/kgdb.h3
-rw-r--r--arch/x86/include/asm/kprobes.h2
-rw-r--r--arch/x86/include/asm/kvm.h17
-rw-r--r--arch/x86/include/asm/kvm_emulate.h46
-rw-r--r--arch/x86/include/asm/kvm_host.h79
-rw-r--r--arch/x86/include/asm/mce.h8
-rw-r--r--arch/x86/include/asm/mpspec.h10
-rw-r--r--arch/x86/include/asm/mshyperv.h14
-rw-r--r--arch/x86/include/asm/msr-index.h15
-rw-r--r--arch/x86/include/asm/nmi.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h4
-rw-r--r--arch/x86/include/asm/percpu.h26
-rw-r--r--arch/x86/include/asm/perf_event.h76
-rw-r--r--arch/x86/include/asm/perf_event_p4.h794
-rw-r--r--arch/x86/include/asm/processor.h47
-rw-r--r--arch/x86/include/asm/ptrace-abi.h57
-rw-r--r--arch/x86/include/asm/ptrace.h6
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h12
-rw-r--r--arch/x86/include/asm/svm.h9
-rw-r--r--arch/x86/include/asm/sys_ia32.h3
-rw-r--r--arch/x86/include/asm/thread_info.h9
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/unistd_32.h4
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h247
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h528
-rw-r--r--arch/x86/include/asm/vmware.h27
-rw-r--r--arch/x86/include/asm/vmx.h12
-rw-r--r--arch/x86/include/asm/xsave.h7
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c137
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/alternative.c47
-rw-r--r--arch/x86/kernel/apic/Makefile7
-rw-r--r--arch/x86/kernel/apic/es7000_32.c19
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c127
-rw-r--r--arch/x86/kernel/apic/io_apic.c99
-rw-r--r--arch/x86/kernel/apic/nmi.c7
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c3
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c14
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/common.c34
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Makefile4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c44
-rw-r--r--arch/x86/kernel/cpu/cpufreq/mperf.c51
-rw-r--r--arch/x86/kernel/cpu/cpufreq/mperf.h9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c171
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h2
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c52
-rw-r--r--arch/x86/kernel/cpu/intel.c8
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c181
-rw-r--r--arch/x86/kernel/cpu/mcheck/Makefile2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c138
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c93
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c55
-rw-r--r--arch/x86/kernel/cpu/perf_event.c815
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c46
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c357
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c641
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c218
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c841
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c31
-rw-r--r--arch/x86/kernel/cpu/vmware.c38
-rw-r--r--arch/x86/kernel/ds.c1437
-rw-r--r--arch/x86/kernel/ds_selftest.c408
-rw-r--r--arch/x86/kernel/ds_selftest.h15
-rw-r--r--arch/x86/kernel/dumpstack.c5
-rw-r--r--arch/x86/kernel/early_printk.c8
-rw-r--r--arch/x86/kernel/entry_32.S19
-rw-r--r--arch/x86/kernel/hpet.c29
-rw-r--r--arch/x86/kernel/hw_breakpoint.c41
-rw-r--r--arch/x86/kernel/i387.c107
-rw-r--r--arch/x86/kernel/i8253.c14
-rw-r--r--arch/x86/kernel/init_task.c2
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/kgdb.c103
-rw-r--r--arch/x86/kernel/kprobes.c43
-rw-r--r--arch/x86/kernel/microcode_core.c4
-rw-r--r--arch/x86/kernel/microcode_intel.c22
-rw-r--r--arch/x86/kernel/mpparse.c25
-rw-r--r--arch/x86/kernel/process.c39
-rw-r--r--arch/x86/kernel/process_32.c10
-rw-r--r--arch/x86/kernel/process_64.c10
-rw-r--r--arch/x86/kernel/ptrace.c384
-rw-r--r--arch/x86/kernel/quirks.c5
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/sfi.c4
-rw-r--r--arch/x86/kernel/step.c46
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/kernel/tboot.c20
-rw-r--r--arch/x86/kernel/tlb_uv.c1280
-rw-r--r--arch/x86/kernel/traps.c204
-rw-r--r--arch/x86/kernel/uv_irq.c12
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c1
-rw-r--r--arch/x86/kernel/xsave.c8
-rw-r--r--arch/x86/kvm/emulate.c1247
-rw-r--r--arch/x86/kvm/i8259.c53
-rw-r--r--arch/x86/kvm/irq.h1
-rw-r--r--arch/x86/kvm/kvm_timer.h4
-rw-r--r--arch/x86/kvm/mmu.c215
-rw-r--r--arch/x86/kvm/mmutrace.h84
-rw-r--r--arch/x86/kvm/paging_tmpl.h46
-rw-r--r--arch/x86/kvm/svm.c941
-rw-r--r--arch/x86/kvm/timer.c3
-rw-r--r--arch/x86/kvm/trace.h165
-rw-r--r--arch/x86/kvm/vmx.c351
-rw-r--r--arch/x86/kvm/x86.c1548
-rw-r--r--arch/x86/kvm/x86.h10
-rw-r--r--arch/x86/lib/Makefile5
-rw-r--r--arch/x86/lib/atomic64_32.c273
-rw-r--r--arch/x86/lib/atomic64_386_32.S174
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S224
-rw-r--r--arch/x86/math-emu/fpu_aux.c6
-rw-r--r--arch/x86/math-emu/fpu_entry.c4
-rw-r--r--arch/x86/math-emu/fpu_system.h2
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/pageattr.c53
-rw-r--r--arch/x86/mm/pat.c239
-rw-r--r--arch/x86/mm/pat_internal.h46
-rw-r--r--arch/x86/mm/pat_rbtree.c273
-rw-r--r--arch/x86/mm/srat_64.c51
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
-rw-r--r--arch/x86/pci/acpi.c5
-rw-r--r--arch/x86/pci/common.c2
-rw-r--r--arch/x86/pci/direct.c16
-rw-r--r--arch/x86/pci/irq.c9
-rw-r--r--arch/x86/pci/legacy.c42
-rw-r--r--arch/x86/pci/mmconfig_32.c8
-rw-r--r--arch/x86/pci/numaq_32.c8
-rw-r--r--arch/x86/pci/pcbios.c8
-rw-r--r--arch/x86/xen/time.c6
-rw-r--r--arch/xtensa/kernel/time.c5
-rw-r--r--block/Kconfig23
-rw-r--r--block/Kconfig.iosched16
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-barrier.c147
-rw-r--r--block/blk-cgroup.c791
-rw-r--r--block/blk-cgroup.h178
-rw-r--r--block/blk-core.c46
-rw-r--r--block/blk-lib.c233
-rw-r--r--block/cfq-iosched.c80
-rw-r--r--block/elevator.c11
-rw-r--r--block/genhd.c1
-rw-r--r--block/ioctl.c2
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/algapi.c2
-rw-r--r--crypto/internal.h2
-rw-r--r--crypto/pcrypt.c11
-rw-r--r--crypto/tcrypt.c7
-rw-r--r--crypto/tcrypt.h29
-rw-r--r--crypto/testmgr.h64
-rw-r--r--crypto/vmac.c75
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/acevents.h51
-rw-r--r--drivers/acpi/acpica/acglobal.h25
-rw-r--r--drivers/acpi/acpica/acinterp.h9
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/actables.h4
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c10
-rw-r--r--drivers/acpi/acpica/dsobject.c14
-rw-r--r--drivers/acpi/acpica/dsopcode.c13
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswstate.c10
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c167
-rw-r--r--drivers/acpi/acpica/evgpeblk.c766
-rw-r--r--drivers/acpi/acpica/evgpeinit.c653
-rw-r--r--drivers/acpi/acpica/evgpeutil.c337
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evxface.c24
-rw-r--r--drivers/acpi/acpica/evxfevnt.c224
-rw-r--r--drivers/acpi/acpica/exconfig.c21
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c4
-rw-r--r--drivers/acpi/acpica/exdebug.c261
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c16
-rw-r--r--drivers/acpi/acpica/exmisc.c8
-rw-r--r--drivers/acpi/acpica/exmutex.c46
-rw-r--r--drivers/acpi/acpica/exnames.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c18
-rw-r--r--drivers/acpi/acpica/exoparg2.c37
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c4
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/exregion.c17
-rw-r--r--drivers/acpi/acpica/exresnte.c4
-rw-r--r--drivers/acpi/acpica/exresolv.c11
-rw-r--r--drivers/acpi/acpica/exresop.c8
-rw-r--r--drivers/acpi/acpica/exstore.c218
-rw-r--r--drivers/acpi/acpica/exsystem.c10
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/acpica/hwregs.c6
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c4
-rw-r--r--drivers/acpi/acpica/psargs.c4
-rw-r--r--drivers/acpi/acpica/psloop.c3
-rw-r--r--drivers/acpi/acpica/psxface.c5
-rw-r--r--drivers/acpi/acpica/rscreate.c14
-rw-r--r--drivers/acpi/acpica/rslist.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c4
-rw-r--r--drivers/acpi/acpica/tbfadt.c16
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c69
-rw-r--r--drivers/acpi/acpica/tbutils.c101
-rw-r--r--drivers/acpi/acpica/tbxface.c80
-rw-r--r--drivers/acpi/acpica/tbxfroot.c6
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c14
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utmisc.c6
-rw-r--r--drivers/acpi/acpica/utmutex.c4
-rw-r--r--drivers/acpi/acpica/utobject.c8
-rw-r--r--drivers/acpi/apei/Kconfig30
-rw-r--r--drivers/acpi/apei/Makefile5
-rw-r--r--drivers/acpi/apei/apei-base.c594
-rw-r--r--drivers/acpi/apei/apei-internal.h114
-rw-r--r--drivers/acpi/apei/cper.c84
-rw-r--r--drivers/acpi/apei/einj.c485
-rw-r--r--drivers/acpi/apei/erst.c852
-rw-r--r--drivers/acpi/apei/ghes.c425
-rw-r--r--drivers/acpi/apei/hest.c173
-rw-r--r--drivers/acpi/atomicio.c361
-rw-r--r--drivers/acpi/bus.c37
-rw-r--r--drivers/acpi/debug.c32
-rw-r--r--drivers/acpi/hed.c113
-rw-r--r--drivers/acpi/hest.c139
-rw-r--r--drivers/acpi/osl.c13
-rw-r--r--drivers/acpi/pci_irq.c8
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c238
-rw-r--r--drivers/acpi/system.c5
-rw-r--r--drivers/acpi/video.c118
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile3
-rw-r--r--drivers/ata/ahci.c2542
-rw-r--r--drivers/ata/ahci.h333
-rw-r--r--drivers/ata/ahci_platform.c192
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libahci.c2110
-rw-r--r--drivers/ata/libata-core.c37
-rw-r--r--drivers/ata/libata-pmp.c32
-rw-r--r--drivers/ata/libata-sff.c94
-rw-r--r--drivers/ata/pata_mpc52xx.c78
-rw-r--r--drivers/ata/pata_pcmcia.c47
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/firmware_class.c26
-rw-r--r--drivers/base/platform.c26
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/power/sysfs.c65
-rw-r--r--drivers/block/Kconfig22
-rw-r--r--drivers/block/aoe/aoecmd.c8
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/drbd/drbd_int.h3
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/hd.c4
-rw-r--r--drivers/block/virtio_blk.c46
-rw-r--r--drivers/bluetooth/bluecard_cs.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c11
-rw-r--r--drivers/bluetooth/btmrvl_drv.h8
-rw-r--r--drivers/bluetooth/btmrvl_main.c92
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c7
-rw-r--r--drivers/bluetooth/btuart_cs.c11
-rw-r--r--drivers/bluetooth/dtl1_cs.c11
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ll.c8
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/char/agp/agp.h80
-rw-r--r--drivers/char/agp/ali-agp.c1
-rw-r--r--drivers/char/agp/amd-k7-agp.c9
-rw-r--r--drivers/char/agp/amd64-agp.c1
-rw-r--r--drivers/char/agp/ati-agp.c8
-rw-r--r--drivers/char/agp/efficeon-agp.c1
-rw-r--r--drivers/char/agp/generic.c2
-rw-r--r--drivers/char/agp/intel-agp.c1883
-rw-r--r--drivers/char/agp/intel-agp.h239
-rw-r--r--drivers/char/agp/intel-gtt.c1516
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/agp/sis-agp.c1
-rw-r--r--drivers/char/agp/uninorth-agp.c16
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/hvc_iucv.c9
-rw-r--r--drivers/char/hw_random/virtio-rng.c6
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c42
-rw-r--r--drivers/char/keyboard.c325
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c9
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c5
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/main.h1
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.h3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c22
-rw-r--r--drivers/char/sysrq.c245
-rw-r--r--drivers/char/tpm/tpm.c47
-rw-r--r--drivers/char/tty_io.c1
-rw-r--r--drivers/char/virtio_console.c700
-rw-r--r--drivers/clocksource/cs5535-clockevt.c8
-rw-r--r--drivers/clocksource/sh_cmt.c45
-rw-r--r--drivers/clocksource/sh_mtu2.c37
-rw-r--r--drivers/clocksource/sh_tmu.c41
-rw-r--r--drivers/cpufreq/cpufreq.c50
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c48
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c148
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/geode-aes.c36
-rw-r--r--drivers/crypto/mv_cesa.c688
-rw-r--r--drivers/crypto/mv_cesa.h40
-rw-r--r--drivers/crypto/omap-sham.c1259
-rw-r--r--drivers/dma/Kconfig16
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/at_hdmac.c34
-rw-r--r--drivers/dma/coh901318.c262
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dw_dmac.c23
-rw-r--r--drivers/dma/fsldma.c28
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h18
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c20
-rw-r--r--drivers/dma/iop-adma.c39
-rw-r--r--drivers/dma/ipu/ipu_idmac.c32
-rw-r--r--drivers/dma/mpc512x_dma.c13
-rw-r--r--drivers/dma/mv_xor.c25
-rw-r--r--drivers/dma/ppc4xx/adma.c19
-rw-r--r--drivers/dma/shdma.c59
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/dma/ste_dma40.c2718
-rw-r--r--drivers/dma/ste_dma40_ll.c454
-rw-r--r--drivers/dma/ste_dma40_ll.h354
-rw-r--r--drivers/dma/timb_dma.c859
-rw-r--r--drivers/dma/txx9dmac.c22
-rw-r--r--drivers/edac/Kconfig13
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/edac_core.h23
-rw-r--r--drivers/edac/edac_mc_sysfs.c175
-rw-r--r--drivers/edac/edac_mce.c61
-rw-r--r--drivers/edac/i7core_edac.c2021
-rw-r--r--drivers/firewire/core-card.c11
-rw-r--r--drivers/firewire/core-cdev.c8
-rw-r--r--drivers/firewire/core-transaction.c26
-rw-r--r--drivers/firewire/core.h5
-rw-r--r--drivers/firewire/ohci.c190
-rw-r--r--drivers/firewire/ohci.h10
-rw-r--r--drivers/firmware/Kconfig9
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/iscsi_boot_sysfs.c481
-rw-r--r--drivers/firmware/iscsi_ibft.c726
-rw-r--r--drivers/firmware/iscsi_ibft_find.c56
-rw-r--r--drivers/gpio/Kconfig25
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/janz-ttl.c258
-rw-r--r--drivers/gpio/rdc321x-gpio.c246
-rw-r--r--drivers/gpio/tc35892-gpio.c381
-rw-r--r--drivers/gpio/wm8994-gpio.c12
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c9
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c395
-rw-r--r--drivers/gpu/drm/drm_edid.c789
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c913
-rw-r--r--drivers/gpu/drm/drm_gem.c49
-rw-r--r--drivers/gpu/drm/drm_modes.c105
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/dvo.h10
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c46
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c38
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c32
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c15
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c80
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h138
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c41
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h86
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c88
-rw-r--r--drivers/gpu/drm/i915/intel_display.c990
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c254
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h31
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c103
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c204
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c70
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c111
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c21
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c933
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c185
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c198
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c16
-rw-r--r--drivers/gpu/drm/radeon/atombios.h80
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1494
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h556
-rw-r--r--drivers/gpu/drm/radeon/r100.c253
-rw-r--r--drivers/gpu/drm/radeon/r100d.h128
-rw-r--r--drivers/gpu/drm/radeon/r300.c148
-rw-r--r--drivers/gpu/drm/radeon/r300d.h47
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c5
-rw-r--r--drivers/gpu/drm/radeon/r600.c192
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c58
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon.h128
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h26
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c352
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c116
-rw-r--r--drivers/gpu/drm/radeon/rs400.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c77
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h46
-rw-r--r--drivers/gpu/drm/radeon/rs690.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c96
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c21
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c84
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c845
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/hid/Kconfig86
-rw-r--r--drivers/hid/Makefile4
-rw-r--r--drivers/hid/hid-3m-pct.c31
-rw-r--r--drivers/hid/hid-cando.c272
-rw-r--r--drivers/hid/hid-cherry.c1
-rw-r--r--drivers/hid/hid-core.c57
-rw-r--r--drivers/hid/hid-egalax.c281
-rw-r--r--drivers/hid/hid-ids.h19
-rw-r--r--drivers/hid/hid-lg.c9
-rw-r--r--drivers/hid/hid-ntrig.c590
-rw-r--r--drivers/hid/hid-picolcd.c2631
-rw-r--r--drivers/hid/hid-roccat-kone.c1007
-rw-r--r--drivers/hid/hid-roccat-kone.h214
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hid-topseed.c38
-rw-r--r--drivers/hid/hid-wacom.c230
-rw-r--r--drivers/hid/hidraw.c50
-rw-r--r--drivers/hid/usbhid/hid-core.c46
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c19
-rw-r--r--drivers/hid/usbhid/usbkbd.c1
-rw-r--r--drivers/hwmon/Kconfig21
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adm1031.c68
-rw-r--r--drivers/hwmon/applesmc.c247
-rw-r--r--drivers/hwmon/asc7621.c63
-rw-r--r--drivers/hwmon/asus_atk0110.c7
-rw-r--r--drivers/hwmon/emc1403.c344
-rw-r--r--drivers/hwmon/f71882fg.c170
-rw-r--r--drivers/hwmon/lm63.c16
-rw-r--r--drivers/hwmon/lm90.c3
-rw-r--r--drivers/hwmon/tmp401.c255
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c36
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c3
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c168
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-elektor.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c59
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c16
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c3
-rw-r--r--drivers/i2c/busses/i2c-mpc.c6
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c3
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c265
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c21
-rw-r--r--drivers/i2c/busses/i2c-s6000.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c121
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c3
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-stub.c9
-rw-r--r--drivers/i2c/busses/i2c-versatile.c3
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c4
-rw-r--r--drivers/i2c/busses/scx200_i2c.c2
-rw-r--r--drivers/i2c/i2c-core.c259
-rw-r--r--drivers/i2c/i2c-dev.c36
-rw-r--r--drivers/ide/cmd640.c6
-rw-r--r--drivers/ide/ide-cs.c20
-rw-r--r--drivers/ide/ide-dma.c11
-rw-r--r--drivers/ide/ide.c20
-rw-r--r--drivers/ide/ide_platform.c1
-rw-r--r--drivers/ide/pdc202xx_old.c5
-rw-r--r--drivers/ieee1394/dv1394.c11
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/video1394.c5
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c70
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/user_mad.c12
-rw-r--r--drivers/infiniband/core/uverbs_main.c12
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2374
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c882
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h745
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c811
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c518
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1577
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h550
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c23
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/input/evdev.c39
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/input.c263
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c1
-rw-r--r--drivers/input/keyboard/Kconfig64
-rw-r--r--drivers/input/keyboard/Makefile4
-rw-r--r--drivers/input/keyboard/corgikbd.c414
-rw-r--r--drivers/input/keyboard/lm8323.c6
-rw-r--r--drivers/input/keyboard/spitzkbd.c496
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c349
-rw-r--r--drivers/input/keyboard/tosakbd.c431
-rw-r--r--drivers/input/misc/Kconfig40
-rw-r--r--drivers/input/misc/Makefile4
-rw-r--r--drivers/input/misc/ad714x-i2c.c140
-rw-r--r--drivers/input/misc/ad714x-spi.c103
-rw-r--r--drivers/input/misc/ad714x.c1347
-rw-r--r--drivers/input/misc/ad714x.h26
-rw-r--r--drivers/input/misc/ati_remote2.c26
-rw-r--r--drivers/input/misc/pcf8574_keypad.c227
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c14
-rw-r--r--drivers/input/serio/Kconfig16
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/ams_delta_serio.c177
-rw-r--r--drivers/input/tablet/acecad.c86
-rw-r--r--drivers/input/tablet/kbtab.c53
-rw-r--r--drivers/input/tablet/wacom.h36
-rw-r--r--drivers/input/tablet/wacom_sys.c316
-rw-r--r--drivers/input/tablet/wacom_wac.c1168
-rw-r--r--drivers/input/tablet/wacom_wac.h13
-rw-r--r--drivers/input/touchscreen/Kconfig41
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/corgi_ts.c385
-rw-r--r--drivers/input/touchscreen/hampshire.c205
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c400
-rw-r--r--drivers/input/touchscreen/tsc2007.c2
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c76
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hisax/avma1_cs.c63
-rw-r--r--drivers/isdn/hisax/elsa_cs.c40
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c3
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c60
-rw-r--r--drivers/isdn/hisax/teles_cs.c50
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c17
-rw-r--r--drivers/leds/leds-88pm860x.c11
-rw-r--r--drivers/macintosh/macio-adb.c1
-rw-r--r--drivers/macintosh/macio_asic.c5
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/windfarm_pm81.c8
-rw-r--r--drivers/macintosh/windfarm_pm91.c9
-rw-r--r--drivers/md/Kconfig10
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bitmap.c41
-rw-r--r--drivers/md/bitmap.h2
-rw-r--r--drivers/md/dm-flakey.c214
-rw-r--r--drivers/md/dm-ioctl.c3
-rw-r--r--drivers/md/faulty.c9
-rw-r--r--drivers/md/linear.c35
-rw-r--r--drivers/md/md.c491
-rw-r--r--drivers/md/md.h10
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c251
-rw-r--r--drivers/md/raid0.h3
-rw-r--r--drivers/md/raid1.c109
-rw-r--r--drivers/md/raid10.c299
-rw-r--r--drivers/md/raid10.h12
-rw-r--r--drivers/md/raid5.c231
-rw-r--r--drivers/media/IR/Kconfig59
-rw-r--r--drivers/media/IR/Makefile14
-rw-r--r--drivers/media/IR/imon.c2397
-rw-r--r--drivers/media/IR/ir-core-priv.h126
-rw-r--r--drivers/media/IR/ir-functions.c1
-rw-r--r--drivers/media/IR/ir-jvc-decoder.c320
-rw-r--r--drivers/media/IR/ir-keymaps.c3494
-rw-r--r--drivers/media/IR/ir-keytable.c687
-rw-r--r--drivers/media/IR/ir-nec-decoder.c328
-rw-r--r--drivers/media/IR/ir-raw-event.c251
-rw-r--r--drivers/media/IR/ir-rc5-decoder.c324
-rw-r--r--drivers/media/IR/ir-rc6-decoder.c419
-rw-r--r--drivers/media/IR/ir-sony-decoder.c312
-rw-r--r--drivers/media/IR/ir-sysfs.c202
-rw-r--r--drivers/media/IR/keymaps/Kconfig15
-rw-r--r--drivers/media/IR/keymaps/Makefile67
-rw-r--r--drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c89
-rw-r--r--drivers/media/IR/keymaps/rc-apac-viewcomp.c80
-rw-r--r--drivers/media/IR/keymaps/rc-asus-pc39.c91
-rw-r--r--drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c69
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-a16d.c75
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-cardbus.c97
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-dvbt.c78
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c90
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia.c86
-rw-r--r--drivers/media/IR/keymaps/rc-avertv-303.c85
-rw-r--r--drivers/media/IR/keymaps/rc-behold-columbus.c108
-rw-r--r--drivers/media/IR/keymaps/rc-behold.c141
-rw-r--r--drivers/media/IR/keymaps/rc-budget-ci-old.c92
-rw-r--r--drivers/media/IR/keymaps/rc-cinergy-1400.c84
-rw-r--r--drivers/media/IR/keymaps/rc-cinergy.c78
-rw-r--r--drivers/media/IR/keymaps/rc-dm1105-nec.c76
-rw-r--r--drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c78
-rw-r--r--drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c97
-rw-r--r--drivers/media/IR/keymaps/rc-em-terratec.c69
-rw-r--r--drivers/media/IR/keymaps/rc-empty.c44
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv-fm53.c81
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv.c112
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv2.c90
-rw-r--r--drivers/media/IR/keymaps/rc-evga-indtube.c61
-rw-r--r--drivers/media/IR/keymaps/rc-eztv.c96
-rw-r--r--drivers/media/IR/keymaps/rc-flydvb.c77
-rw-r--r--drivers/media/IR/keymaps/rc-flyvideo.c70
-rw-r--r--drivers/media/IR/keymaps/rc-fusionhdtv-mce.c98
-rw-r--r--drivers/media/IR/keymaps/rc-gadmei-rm008z.c81
-rw-r--r--drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c84
-rw-r--r--drivers/media/IR/keymaps/rc-gotview7135.c79
-rw-r--r--drivers/media/IR/keymaps/rc-hauppauge-new.c100
-rw-r--r--drivers/media/IR/keymaps/rc-imon-mce.c142
-rw-r--r--drivers/media/IR/keymaps/rc-imon-pad.c156
-rw-r--r--drivers/media/IR/keymaps/rc-iodata-bctv7e.c88
-rw-r--r--drivers/media/IR/keymaps/rc-kaiomy.c87
-rw-r--r--drivers/media/IR/keymaps/rc-kworld-315u.c83
-rw-r--r--drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c99
-rw-r--r--drivers/media/IR/keymaps/rc-manli.c135
-rw-r--r--drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c123
-rw-r--r--drivers/media/IR/keymaps/rc-msi-tvanywhere.c69
-rw-r--r--drivers/media/IR/keymaps/rc-nebula.c96
-rw-r--r--drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c105
-rw-r--r--drivers/media/IR/keymaps/rc-norwood.c85
-rw-r--r--drivers/media/IR/keymaps/rc-npgtech.c80
-rw-r--r--drivers/media/IR/keymaps/rc-pctv-sedna.c80
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-color.c94
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-grey.c89
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c73
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview-mk12.c83
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview-new.c83
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview.c82
-rw-r--r--drivers/media/IR/keymaps/rc-powercolor-real-angel.c81
-rw-r--r--drivers/media/IR/keymaps/rc-proteus-2309.c69
-rw-r--r--drivers/media/IR/keymaps/rc-purpletv.c81
-rw-r--r--drivers/media/IR/keymaps/rc-pv951.c78
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c103
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-tv.c81
-rw-r--r--drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c78
-rw-r--r--drivers/media/IR/keymaps/rc-tbs-nec.c73
-rw-r--r--drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c92
-rw-r--r--drivers/media/IR/keymaps/rc-tevii-nec.c88
-rw-r--r--drivers/media/IR/keymaps/rc-tt-1500.c82
-rw-r--r--drivers/media/IR/keymaps/rc-videomate-s350.c85
-rw-r--r--drivers/media/IR/keymaps/rc-videomate-tv-pvr.c87
-rw-r--r--drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c82
-rw-r--r--drivers/media/IR/keymaps/rc-winfast.c102
-rw-r--r--drivers/media/IR/rc-map.c83
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c25
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/dvb/bt8xx/dst.c2
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c25
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c11
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c14
-rw-r--r--drivers/media/dvb/dvb-usb/a800.c6
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-remote.c16
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c8
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.h4
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c43
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h18
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c6
-rw-r--r--drivers/media/dvb/dvb-usb/az6027.c59
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-core.c6
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c51
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c100
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c8
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c18
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h9
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h7
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c44
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c4
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c18
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c18
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c16
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c12
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c12
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c2
-rw-r--r--drivers/media/dvb/frontends/atbm8830_priv.h2
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c7
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c26
-rw-r--r--drivers/media/dvb/frontends/au8522_priv.h5
-rw-r--r--drivers/media/dvb/frontends/dib3000mc.c35
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c36
-rw-r--r--drivers/media/dvb/frontends/dib8000.h1
-rw-r--r--drivers/media/dvb/frontends/ds3000.c2
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c16
-rw-r--r--drivers/media/dvb/frontends/stv090x.c94
-rw-r--r--drivers/media/dvb/frontends/stv090x.h1
-rw-r--r--drivers/media/dvb/frontends/stv6110x.c24
-rw-r--r--drivers/media/dvb/frontends/stv6110x.h1
-rw-r--r--drivers/media/dvb/mantis/mantis_input.c4
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.c10
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c53
-rw-r--r--drivers/media/dvb/pt1/pt1.c271
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.c32
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.h8
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.c31
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.h8
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c52
-rw-r--r--drivers/media/dvb/ttpci/budget.c1
-rw-r--r--drivers/media/video/Kconfig52
-rw-r--r--drivers/media/video/Makefile14
-rw-r--r--drivers/media/video/ak881x.c368
-rw-r--r--drivers/media/video/arv.c524
-rw-r--r--drivers/media/video/au0828/au0828-video.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c30
-rw-r--r--drivers/media/video/bw-qcam.c452
-rw-r--r--drivers/media/video/c-qcam.c483
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c54
-rw-r--r--drivers/media/video/cx18/cx18-av-core.h24
-rw-r--r--drivers/media/video/cx18/cx18-av-vbi.c42
-rw-r--r--drivers/media/video/cx18/cx18-cards.c4
-rw-r--r--drivers/media/video/cx18/cx18-cards.h4
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c6
-rw-r--r--drivers/media/video/cx18/cx18-streams.c5
-rw-r--r--drivers/media/video/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c13
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c52
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c9
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx2341x.c14
-rw-r--r--drivers/media/video/cx23885/cimax2.c12
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c16
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c8
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c4
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c17
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h5
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c40
-rw-r--r--drivers/media/video/cx88/cx88-core.c3
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/video/cx88/cx88-input.c149
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c11
-rw-r--r--drivers/media/video/cx88/cx88-video.c20
-rw-r--r--drivers/media/video/cx88/cx88.h8
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc.c131
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc_regs.h10
-rw-r--r--drivers/media/video/davinci/isif_regs.h2
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c81
-rw-r--r--drivers/media/video/davinci/vpif_display.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c56
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c25
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c29
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c65
-rw-r--r--drivers/media/video/em28xx/em28xx.h7
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c6
-rw-r--r--drivers/media/video/font.h407
-rw-r--r--drivers/media/video/gspca/Kconfig6
-rw-r--r--drivers/media/video/gspca/cpia1.c36
-rw-r--r--drivers/media/video/gspca/gspca.c124
-rw-r--r--drivers/media/video/gspca/gspca.h6
-rw-r--r--drivers/media/video/gspca/ov534.c563
-rw-r--r--drivers/media/video/gspca/pac207.c4
-rw-r--r--drivers/media/video/gspca/sn9c2028.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c238
-rw-r--r--drivers/media/video/gspca/sonixj.c585
-rw-r--r--drivers/media/video/gspca/spca561.c58
-rw-r--r--drivers/media/video/gspca/t613.c172
-rw-r--r--drivers/media/video/gspca/vc032x.c747
-rw-r--r--drivers/media/video/gspca/zc3xx.c95
-rw-r--r--drivers/media/video/ir-kbd-i2c.c28
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c5
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h10
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c53
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c116
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c23
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c17
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c18
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/mem2mem_testdev.c1049
-rw-r--r--drivers/media/video/meye.c78
-rw-r--r--drivers/media/video/meye.h10
-rw-r--r--drivers/media/video/mt9t031.c66
-rw-r--r--drivers/media/video/mt9v022.c2
-rw-r--r--drivers/media/video/mx1_camera.c4
-rw-r--r--drivers/media/video/mx3_camera.c4
-rw-r--r--drivers/media/video/omap/Kconfig11
-rw-r--r--drivers/media/video/omap/Makefile7
-rw-r--r--drivers/media/video/omap/omap_vout.c2643
-rw-r--r--drivers/media/video/omap/omap_voutdef.h147
-rw-r--r--drivers/media/video/omap/omap_voutlib.c293
-rw-r--r--drivers/media/video/omap/omap_voutlib.h34
-rw-r--r--drivers/media/video/omap24xxcam.c4
-rw-r--r--drivers/media/video/ov511.c9
-rw-r--r--drivers/media/video/ov7670.c286
-rw-r--r--drivers/media/video/ov9640.c6
-rw-r--r--drivers/media/video/pms.c18
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c22
-rw-r--r--drivers/media/video/pwc/Kconfig2
-rw-r--r--drivers/media/video/pxa_camera.c4
-rw-r--r--drivers/media/video/rj54n1cb0c.c18
-rw-r--r--drivers/media/video/s2255drv.c1160
-rw-r--r--drivers/media/video/saa7115.c74
-rw-r--r--drivers/media/video/saa7127.c31
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c111
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c22
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c9
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c283
-rw-r--r--drivers/media/video/saa7134/saa7134-reg.h24
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c11
-rw-r--r--drivers/media/video/saa7134/saa7134.h7
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c613
-rw-r--r--drivers/media/video/sh_vou.c1476
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c6
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h18
-rw-r--r--drivers/media/video/sn9c102/sn9c102_hv7131d.c2
-rw-r--r--drivers/media/video/soc_camera.c21
-rw-r--r--drivers/media/video/tlg2300/pd-dvb.c6
-rw-r--r--drivers/media/video/tlg2300/pd-main.c7
-rw-r--r--drivers/media/video/tlg2300/pd-radio.c21
-rw-r--r--drivers/media/video/tlg2300/pd-video.c18
-rw-r--r--drivers/media/video/tvp514x.c13
-rw-r--r--drivers/media/video/tvp5150.c67
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c77
-rw-r--r--drivers/media/video/usbvision/usbvision.h1
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c50
-rw-r--r--drivers/media/video/uvc/uvc_driver.c25
-rw-r--r--drivers/media/video/uvc/uvc_queue.c8
-rw-r--r--drivers/media/video/uvc/uvcvideo.h4
-rw-r--r--drivers/media/video/v4l2-common.c79
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c3
-rw-r--r--drivers/media/video/v4l2-dev.c4
-rw-r--r--drivers/media/video/v4l2-device.c11
-rw-r--r--drivers/media/video/v4l2-event.c292
-rw-r--r--drivers/media/video/v4l2-fh.c79
-rw-r--r--drivers/media/video/v4l2-ioctl.c103
-rw-r--r--drivers/media/video/v4l2-mem2mem.c633
-rw-r--r--drivers/media/video/videobuf-core.c244
-rw-r--r--drivers/media/video/videobuf-dma-contig.c115
-rw-r--r--drivers/media/video/videobuf-dma-sg.c383
-rw-r--r--drivers/media/video/videobuf-dvb.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c183
-rw-r--r--drivers/media/video/vivi.c805
-rw-r--r--drivers/media/video/w9966.c1149
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c6
-rw-r--r--drivers/media/video/zc0301/zc0301_sensor.h2
-rw-r--r--drivers/media/video/zoran/zoran.h24
-rw-r--r--drivers/media/video/zoran/zoran_driver.c16
-rw-r--r--drivers/media/video/zr364xx.c4
-rw-r--r--drivers/message/fusion/mptbase.c180
-rw-r--r--drivers/message/fusion/mptbase.h5
-rw-r--r--drivers/message/fusion/mptctl.c181
-rw-r--r--drivers/message/fusion/mptfc.c22
-rw-r--r--drivers/message/fusion/mptsas.c55
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/message/fusion/mptscsih.c27
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/message/i2o/i2o_config.c16
-rw-r--r--drivers/mfd/88pm860x-core.c42
-rw-r--r--drivers/mfd/88pm860x-i2c.c1
-rw-r--r--drivers/mfd/Kconfig86
-rw-r--r--drivers/mfd/Makefile11
-rw-r--r--drivers/mfd/ab3100-core.c99
-rw-r--r--drivers/mfd/ab3100-otp.c13
-rw-r--r--drivers/mfd/ab3550-core.c1401
-rw-r--r--drivers/mfd/abx500-core.c157
-rw-r--r--drivers/mfd/da903x.c1
-rw-r--r--drivers/mfd/davinci_voicecodec.c190
-rw-r--r--drivers/mfd/janz-cmodio.c304
-rw-r--r--drivers/mfd/max8925-core.c7
-rw-r--r--drivers/mfd/menelaus.c3
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/pcf50633-core.c5
-rw-r--r--drivers/mfd/rdc321x-southbridge.c123
-rw-r--r--drivers/mfd/t7l66xb.c3
-rw-r--r--drivers/mfd/tc35892.c347
-rw-r--r--drivers/mfd/timberdale.c138
-rw-r--r--drivers/mfd/timberdale.h16
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps6507x.c159
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/wm831x-core.c109
-rw-r--r--drivers/mfd/wm831x-irq.c48
-rw-r--r--drivers/mfd/wm8350-core.c4
-rw-r--r--drivers/mfd/wm8350-i2c.c6
-rw-r--r--drivers/mfd/wm8400-core.c4
-rw-r--r--drivers/mfd/wm8994-core.c43
-rw-r--r--drivers/mfd/wm8994-irq.c310
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/eeprom/at24.c59
-rw-r--r--drivers/misc/hdpuftrs/Makefile1
-rw-r--r--drivers/misc/hdpuftrs/hdpu_cpustate.c256
-rw-r--r--drivers/misc/hdpuftrs/hdpu_nexus.c149
-rw-r--r--drivers/misc/lkdtm.c4
-rw-r--r--drivers/misc/vmware_balloon.c4
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/msm_sdcc.c472
-rw-r--r--drivers/mmc/host/msm_sdcc.h15
-rw-r--r--drivers/mmc/host/mxcmmc.c103
-rw-r--r--drivers/mtd/Kconfig13
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c137
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c198
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c136
-rw-r--r--drivers/mtd/chips/fwh_lock.h6
-rw-r--r--drivers/mtd/chips/gen_probe.c3
-rw-r--r--drivers/mtd/devices/Makefile2
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/pmc551.c4
-rw-r--r--drivers/mtd/devices/sst25l.c11
-rw-r--r--drivers/mtd/ftl.c1
-rw-r--r--drivers/mtd/inftlcore.c1
-rw-r--r--drivers/mtd/inftlmount.c7
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c79
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c16
-rw-r--r--drivers/mtd/maps/ceiva.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c5
-rw-r--r--drivers/mtd/maps/physmap.c7
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/maps/pismo.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c335
-rw-r--r--drivers/mtd/mtdblock.c72
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c12
-rw-r--r--drivers/mtd/mtdcore.c285
-rw-r--r--drivers/mtd/mtdcore.h7
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdsuper.c18
-rw-r--r--drivers/mtd/nand/Kconfig52
-rw-r--r--drivers/mtd/nand/Makefile9
-rw-r--r--drivers/mtd/nand/alauda.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/au1550nd.c12
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c3
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c29
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c917
-rw-r--r--drivers/mtd/nand/mxc_nand.c147
-rw-r--r--drivers/mtd/nand/nand_base.c312
-rw-r--r--drivers/mtd/nand/nand_bbt.c26
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h71
-rw-r--r--drivers/mtd/nand/nandsim.c17
-rw-r--r--drivers/mtd/nand/nomadik_nand.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c (renamed from drivers/mtd/nand/w90p910_nand.c)144
-rw-r--r--drivers/mtd/nand/omap2.c16
-rw-r--r--drivers/mtd/nand/orion_nand.c10
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c11
-rw-r--r--drivers/mtd/nand/r852.c1140
-rw-r--r--drivers/mtd/nand/r852.h163
-rw-r--r--drivers/mtd/nand/s3c2410.c5
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sm_common.c143
-rw-r--r--drivers/mtd/nand/sm_common.h61
-rw-r--r--drivers/mtd/nand/socrates_nand.c4
-rw-r--r--drivers/mtd/nand/tmio_nand.c14
-rw-r--r--drivers/mtd/nand/ts7250.c207
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nftlcore.c1
-rw-r--r--drivers/mtd/onenand/omap2.c12
-rw-r--r--drivers/mtd/rfd_ftl.c1
-rw-r--r--drivers/mtd/sm_ftl.c1284
-rw-r--r--drivers/mtd/sm_ftl.h94
-rw-r--r--drivers/mtd/ssfdc.c1
-rw-r--r--drivers/mtd/ubi/Kconfig2
-rw-r--r--drivers/mtd/ubi/build.c60
-rw-r--r--drivers/mtd/ubi/io.c6
-rw-r--r--drivers/mtd/ubi/kapi.c6
-rw-r--r--drivers/mtd/ubi/scan.c4
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vtbl.c4
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/3c501.c1
-rw-r--r--drivers/net/3c503.c42
-rw-r--r--drivers/net/3c505.c14
-rw-r--r--drivers/net/3c507.c4
-rw-r--r--drivers/net/3c509.c3
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/3c523.c11
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/3c59x.c8
-rw-r--r--drivers/net/7990.c11
-rw-r--r--drivers/net/8139cp.c7
-rw-r--r--drivers/net/8139too.c8
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig53
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/a2065.c9
-rw-r--r--drivers/net/acenic.c42
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/amd8111e.c8
-rw-r--r--drivers/net/appletalk/cops.c5
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/am79c961a.c7
-rw-r--r--drivers/net/arm/at91_ether.c7
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/arm/ether1.c1
-rw-r--r--drivers/net/arm/ether3.c1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c9
-rw-r--r--drivers/net/arm/ks8695net.c13
-rw-r--r--drivers/net/arm/w90p910_ether.c7
-rw-r--r--drivers/net/at1700.c9
-rw-r--r--drivers/net/atarilance.c5
-rw-r--r--drivers/net/atl1c/atl1c_main.c7
-rw-r--r--drivers/net/atl1e/atl1e_main.c9
-rw-r--r--drivers/net/atlx/atl1.c4
-rw-r--r--drivers/net/atlx/atl2.c8
-rw-r--r--drivers/net/atlx/atlx.c6
-rw-r--r--drivers/net/atp.c9
-rw-r--r--drivers/net/au1000_eth.c262
-rw-r--r--drivers/net/au1000_eth.h4
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/bcm63xx_enet.c14
-rw-r--r--drivers/net/benet/be.h11
-rw-r--r--drivers/net/benet/be_cmds.c14
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_hw.h3
-rw-r--r--drivers/net/benet/be_main.c305
-rw-r--r--drivers/net/bfin_mac.c9
-rw-r--r--drivers/net/bmac.c12
-rw-r--r--drivers/net/bnx2.c67
-rw-r--r--drivers/net/bnx2.h7
-rw-r--r--drivers/net/bnx2x.h66
-rw-r--r--drivers/net/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x_link.c12
-rw-r--r--drivers/net/bnx2x_main.c1874
-rw-r--r--drivers/net/bnx2x_reg.h27
-rw-r--r--drivers/net/bonding/bond_ipv6.c9
-rw-r--r--drivers/net/bonding/bond_main.c275
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/caif/Kconfig17
-rw-r--r--drivers/net/caif/Makefile12
-rw-r--r--drivers/net/caif/caif_serial.c449
-rw-r--r--drivers/net/can/Kconfig10
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c3
-rw-r--r--drivers/net/can/janz-ican3.c1830
-rw-r--r--drivers/net/can/mcp251x.c16
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c1
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c154
-rw-r--r--drivers/net/can/sja1000/sja1000.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c45
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/cassini.c13
-rw-r--r--drivers/net/chelsio/pm3393.c7
-rw-r--r--drivers/net/chelsio/sge.c58
-rw-r--r--drivers/net/cpmac.c17
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/cs89x0.c1
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/cxgb3/xgmac.c8
-rw-r--r--drivers/net/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c23
-rw-r--r--drivers/net/cxgb4/sge.c11
-rw-r--r--drivers/net/cxgb4/t4_hw.c86
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/davinci_emac.c8
-rw-r--r--drivers/net/de600.c4
-rw-r--r--drivers/net/de620.c1
-rw-r--r--drivers/net/declance.c9
-rw-r--r--drivers/net/defxx.c6
-rw-r--r--drivers/net/depca.c9
-rw-r--r--drivers/net/dl2k.c6
-rw-r--r--drivers/net/dm9000.c12
-rw-r--r--drivers/net/dnet.c4
-rw-r--r--drivers/net/e100.c190
-rw-r--r--drivers/net/e1000/e1000.h37
-rw-r--r--drivers/net/e1000/e1000_ethtool.c85
-rw-r--r--drivers/net/e1000/e1000_hw.c356
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c378
-rw-r--r--drivers/net/e1000/e1000_osdep.h14
-rw-r--r--drivers/net/e1000/e1000_param.c104
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h26
-rw-r--r--drivers/net/e1000e/ethtool.c39
-rw-r--r--drivers/net/e1000e/ich8lan.c36
-rw-r--r--drivers/net/e1000e/lib.c21
-rw-r--r--drivers/net/e1000e/netdev.c772
-rw-r--r--drivers/net/e1000e/param.c21
-rw-r--r--drivers/net/eepro.c13
-rw-r--r--drivers/net/eexpress.c10
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c68
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/cq_enet_desc.h12
-rw-r--r--drivers/net/enic/enic.h4
-rw-r--r--drivers/net/enic/enic_main.c11
-rw-r--r--drivers/net/enic/vnic_dev.c52
-rw-r--r--drivers/net/enic/vnic_dev.h3
-rw-r--r--drivers/net/enic/vnic_rq.c4
-rw-r--r--drivers/net/enic/vnic_wq.c4
-rw-r--r--drivers/net/epic100.c9
-rw-r--r--drivers/net/eth16i.c5
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/ewrk3.c12
-rw-r--r--drivers/net/fealnx.c9
-rw-r--r--drivers/net/fec.c1140
-rw-r--r--drivers/net/fec_mpc52xx.c8
-rw-r--r--drivers/net/forcedeth.c251
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c6
-rw-r--r--drivers/net/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/gianfar.c197
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/greth.c7
-rw-r--r--drivers/net/hamachi.c8
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hp100.c16
-rw-r--r--drivers/net/ibm_newemac/core.c12
-rw-r--r--drivers/net/ibmlana.c8
-rw-r--r--drivers/net/ibmveth.c8
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/igb/e1000_82575.c33
-rw-r--r--drivers/net/igb/e1000_82575.h9
-rw-r--r--drivers/net/igb/e1000_defines.h5
-rw-r--r--drivers/net/igb/e1000_hw.h17
-rw-r--r--drivers/net/igb/e1000_mac.c27
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_ethtool.c58
-rw-r--r--drivers/net/igb/igb_main.c610
-rw-r--r--drivers/net/igbvf/netdev.c88
-rw-r--r--drivers/net/ioc3-eth.c7
-rw-r--r--drivers/net/ipg.c11
-rw-r--r--drivers/net/ipg.h109
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c32
-rw-r--r--drivers/net/irda/au1k_ir.c1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c11
-rw-r--r--drivers/net/irda/pxaficp_ir.c1
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sh_irda.c865
-rw-r--r--drivers/net/irda/sh_sir.c12
-rw-r--r--drivers/net/irda/sir_dev.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c5
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/iseries_veth.c6
-rw-r--r--drivers/net/ixgb/ixgb.h8
-rw-r--r--drivers/net/ixgb/ixgb_ee.c14
-rw-r--r--drivers/net/ixgb/ixgb_hw.c147
-rw-r--r--drivers/net/ixgb/ixgb_hw.h12
-rw-r--r--drivers/net/ixgb/ixgb_main.c107
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h16
-rw-r--r--drivers/net/ixgb/ixgb_param.c31
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c7
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c138
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c602
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c137
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h8
-rw-r--r--drivers/net/ixgbevf/defines.h12
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c121
-rw-r--r--drivers/net/ixgbevf/vf.c27
-rw-r--r--drivers/net/ixgbevf/vf.h4
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/jme.c6
-rw-r--r--drivers/net/korina.c12
-rw-r--r--drivers/net/ks8842.c61
-rw-r--r--drivers/net/ks8851.c448
-rw-r--r--drivers/net/ks8851.h14
-rw-r--r--drivers/net/ks8851_mll.c63
-rw-r--r--drivers/net/ksz884x.c83
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/lib82596.c9
-rw-r--r--drivers/net/lib8390.c21
-rw-r--r--drivers/net/ll_temac.h14
-rw-r--r--drivers/net/ll_temac_main.c159
-rw-r--r--drivers/net/lp486e.c8
-rw-r--r--drivers/net/mac89x0.c1
-rw-r--r--drivers/net/macb.c9
-rw-r--r--drivers/net/mace.c6
-rw-r--r--drivers/net/macmace.c7
-rw-r--r--drivers/net/macvlan.c13
-rw-r--r--drivers/net/macvtap.c46
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c53
-rw-r--r--drivers/net/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/mv643xx_eth.c7
-rw-r--r--drivers/net/myri10ge/myri10ge.c108
-rw-r--r--drivers/net/natsemi.c10
-rw-r--r--drivers/net/netconsole.c15
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/netxen/netxen_nic.h1
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c6
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c12
-rw-r--r--drivers/net/netxen/netxen_nic_init.c164
-rw-r--r--drivers/net/netxen/netxen_nic_main.c29
-rw-r--r--drivers/net/ni5010.c3
-rw-r--r--drivers/net/ni52.c13
-rw-r--r--drivers/net/ni65.c5
-rw-r--r--drivers/net/niu.c57
-rw-r--r--drivers/net/niu.h7
-rw-r--r--drivers/net/octeon/octeon_mgmt.c65
-rw-r--r--drivers/net/pci-skeleton.c7
-rw-r--r--drivers/net/pcmcia/3c574_cs.c19
-rw-r--r--drivers/net/pcmcia/3c589_cs.c303
-rw-r--r--drivers/net/pcmcia/axnet_cs.c29
-rw-r--r--drivers/net/pcmcia/com20020_cs.c29
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c25
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c16
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c24
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c16
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c25
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c45
-rw-r--r--drivers/net/pcnet32.c12
-rw-r--r--drivers/net/phy/bcm63xx.c8
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c8
-rw-r--r--drivers/net/phy/davicom.c9
-rw-r--r--drivers/net/phy/et1011c.c7
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c13
-rw-r--r--drivers/net/phy/mdio-bitbang.c60
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c10
-rw-r--r--drivers/net/phy/national.c7
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/qsemi.c7
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c8
-rw-r--r--drivers/net/phy/vitesse.c8
-rw-r--r--drivers/net/plip.c1
-rw-r--r--drivers/net/ppp_generic.c19
-rw-r--r--drivers/net/pppoe.c8
-rw-r--r--drivers/net/pppol2tp.c2680
-rw-r--r--drivers/net/ps3_gelic_net.c13
-rw-r--r--drivers/net/ps3_gelic_wireless.c71
-rw-r--r--drivers/net/qla3xxx.c66
-rw-r--r--drivers/net/qla3xxx.h8
-rw-r--r--drivers/net/qlcnic/qlcnic.h25
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c24
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h37
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c131
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c49
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c197
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/qlge/qlge_main.c64
-rw-r--r--drivers/net/r6040.c38
-rw-r--r--drivers/net/r8169.c158
-rw-r--r--drivers/net/rrunner.c1
-rw-r--r--drivers/net/s2io.c12
-rw-r--r--drivers/net/s6gmac.c3
-rw-r--r--drivers/net/sb1250-mac.c275
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/seeq8005.c3
-rw-r--r--drivers/net/sfc/efx.c137
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/ethtool.c6
-rw-r--r--drivers/net/sfc/falcon.c16
-rw-r--r--drivers/net/sfc/falcon_xmac.c22
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi_mac.c25
-rw-r--r--drivers/net/sfc/mcdi_pcol.h71
-rw-r--r--drivers/net/sfc/mcdi_phy.c152
-rw-r--r--drivers/net/sfc/net_driver.h76
-rw-r--r--drivers/net/sfc/nic.c114
-rw-r--r--drivers/net/sfc/nic.h3
-rw-r--r--drivers/net/sfc/selftest.c8
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c19
-rw-r--r--drivers/net/sfc/tx.c61
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sgiseeq.c6
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sis190.c6
-rw-r--r--drivers/net/sis900.c18
-rw-r--r--drivers/net/skfp/fplustm.c2
-rw-r--r--drivers/net/skfp/pcmplc.c4
-rw-r--r--drivers/net/skfp/skfddi.c13
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c14
-rw-r--r--drivers/net/sky2.c160
-rw-r--r--drivers/net/sky2.h41
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/smc911x.c21
-rw-r--r--drivers/net/smc9194.c58
-rw-r--r--drivers/net/smc91x.c12
-rw-r--r--drivers/net/smsc911x.c11
-rw-r--r--drivers/net/smsc9420.c8
-rw-r--r--drivers/net/sonic.c10
-rw-r--r--drivers/net/spider_net.c6
-rw-r--r--drivers/net/starfire.c14
-rw-r--r--drivers/net/stmmac/Makefile2
-rw-r--r--drivers/net/stmmac/common.h21
-rw-r--r--drivers/net/stmmac/dwmac100.c538
-rw-r--r--drivers/net/stmmac/dwmac100.h5
-rw-r--r--drivers/net/stmmac/dwmac1000.h12
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c33
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c336
-rw-r--r--drivers/net/stmmac/dwmac100_core.c201
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c138
-rw-r--r--drivers/net/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/stmmac/dwmac_lib.c7
-rw-r--r--drivers/net/stmmac/enh_desc.c342
-rw-r--r--drivers/net/stmmac/norm_desc.c240
-rw-r--r--drivers/net/stmmac/stmmac.h10
-rw-r--r--drivers/net/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/sun3_82586.c13
-rw-r--r--drivers/net/sun3lance.c8
-rw-r--r--drivers/net/sunbmac.c8
-rw-r--r--drivers/net/sundance.c9
-rw-r--r--drivers/net/sungem.c8
-rw-r--r--drivers/net/sunhme.c15
-rw-r--r--drivers/net/sunlance.c9
-rw-r--r--drivers/net/sunqe.c7
-rw-r--r--drivers/net/sunvnet.c9
-rw-r--r--drivers/net/tc35815.c8
-rw-r--r--drivers/net/tehuti.c10
-rw-r--r--drivers/net/tg3.c839
-rw-r--r--drivers/net/tg3.h17
-rw-r--r--drivers/net/tlan.c13
-rw-r--r--drivers/net/tokenring/3c359.c112
-rw-r--r--drivers/net/tokenring/ibmtr.c13
-rw-r--r--drivers/net/tokenring/lanstreamer.c58
-rw-r--r--drivers/net/tokenring/olympic.c74
-rw-r--r--drivers/net/tokenring/smctr.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c18
-rw-r--r--drivers/net/tsi108_eth.c16
-rw-r--r--drivers/net/tulip/de2104x.c13
-rw-r--r--drivers/net/tulip/de4x5.c19
-rw-r--r--drivers/net/tulip/dmfe.c16
-rw-r--r--drivers/net/tulip/pnic.c2
-rw-r--r--drivers/net/tulip/tulip_core.c31
-rw-r--r--drivers/net/tulip/uli526x.c10
-rw-r--r--drivers/net/tulip/winbond-840.c16
-rw-r--r--drivers/net/tulip/xircom_cb.c6
-rw-r--r--drivers/net/tun.c52
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/ucc_geth.c12
-rw-r--r--drivers/net/usb/asix.c16
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/cdc_ether.c113
-rw-r--r--drivers/net/usb/dm9601.c6
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/mcs7830.c6
-rw-r--r--drivers/net/usb/pegasus.c3
-rw-r--r--drivers/net/usb/smsc75xx.c6
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/usb/usbnet.c15
-rw-r--r--drivers/net/vbus-enet.c1560
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/via-rhine.c10
-rw-r--r--drivers/net/via-velocity.c121
-rw-r--r--drivers/net/via-velocity.h77
-rw-r--r--drivers/net/virtio_net.c96
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vxge/vxge-config.c24
-rw-r--r--drivers/net/vxge/vxge-config.h34
-rw-r--r--drivers/net/vxge/vxge-ethtool.c5
-rw-r--r--drivers/net/vxge/vxge-main.c241
-rw-r--r--drivers/net/vxge/vxge-main.h6
-rw-r--r--drivers/net/vxge/vxge-traffic.c53
-rw-r--r--drivers/net/vxge/vxge-traffic.h50
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cycx_x25.c13
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lapbether.c12
-rw-r--r--drivers/net/wan/lmc/lmc_main.c6
-rw-r--r--drivers/net/wan/pc300_drv.c5
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c12
-rw-r--r--drivers/net/wimax/i2400m/tx.c2
-rw-r--r--drivers/net/wireless/Kconfig92
-rw-r--r--drivers/net/wireless/adm8211.c12
-rw-r--r--drivers/net/wireless/airo.c37
-rw-r--r--drivers/net/wireless/airo_cs.c72
-rw-r--r--drivers/net/wireless/at76c50x-usb.c1
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h52
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c587
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c26
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h1
-rw-r--r--drivers/net/wireless/ath/ath.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c744
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h313
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c329
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h39
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c382
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h35
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c379
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c75
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h42
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig21
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile26
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c217
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h742
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c1374
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h1254
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c1000
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c598
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h (renamed from drivers/net/wireless/ath/ath9k/initvals.h)2292
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c803
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c1860
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h323
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_initvals.h1784
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c614
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h120
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c1134
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h847
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h28
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1023
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c392
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1008
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h465
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c255
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c827
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1771
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c708
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c477
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h246
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h280
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1913
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h275
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c571
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h93
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c158
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c978
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h596
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c533
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h183
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c336
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h139
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c591
-rw-r--r--drivers/net/wireless/ath/debug.h1
-rw-r--r--drivers/net/wireless/ath/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c3
-rw-r--r--drivers/net/wireless/atmel.c1
-rw-r--r--drivers/net/wireless/atmel_cs.c70
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c26
-rw-r--r--drivers/net/wireless/b43/pcmcia.c5
-rw-r--r--drivers/net/wireless/b43/phy_n.c479
-rw-r--r--drivers/net/wireless/b43/phy_n.h21
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c22
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h37
-rw-r--r--drivers/net/wireless/b43/xmit.c1
-rw-r--r--drivers/net/wireless/b43legacy/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c38
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c59
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c188
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h14
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c13
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c500
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c91
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c361
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1493
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c331
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c850
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c276
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c308
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1530
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c206
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c1340
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c425
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1020
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h181
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h120
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1020
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h136
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c912
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c826
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c523
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c901
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1074
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c408
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c14
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c79
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c12
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h2
-rw-r--r--drivers/net/wireless/libertas/assoc.c22
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/debugfs.c2
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_cs.c21
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c159
-rw-r--r--drivers/net/wireless/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/libertas/rx.c50
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c4
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c203
-rw-r--r--drivers/net/wireless/libertas_tf/deb_defs.h104
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c254
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c105
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c96
-rw-r--r--drivers/net/wireless/mwl8k.c30
-rw-r--r--drivers/net/wireless/orinoco/Kconfig20
-rw-r--r--drivers/net/wireless/orinoco/Makefile4
-rw-r--r--drivers/net/wireless/orinoco/airport.c8
-rw-r--r--drivers/net/wireless/orinoco/cfg.c91
-rw-r--r--drivers/net/wireless/orinoco/fw.c10
-rw-r--r--drivers/net/wireless/orinoco/hermes.c286
-rw-r--r--drivers/net/wireless/orinoco/hermes.h62
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c243
-rw-r--r--drivers/net/wireless/orinoco/hw.c102
-rw-r--r--drivers/net/wireless/orinoco/hw.h1
-rw-r--r--drivers/net/wireless/orinoco/main.c305
-rw-r--r--drivers/net/wireless/orinoco/main.h12
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h38
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c110
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1795
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c32
-rw-r--r--drivers/net/wireless/orinoco/wext.c273
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c10
-rw-r--r--drivers/net/wireless/p54/txrx.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c16
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c8
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/ray_cs.c252
-rw-r--r--drivers/net/wireless/ray_cs.h1
-rw-r--r--drivers/net/wireless/rndis_wlan.c374
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c30
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c28
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c77
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h119
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c676
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c304
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c289
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h40
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c32
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c60
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c59
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig88
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c115
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c14
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig24
-rw-r--r--drivers/net/wireless/wl12xx/Makefile6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_io.h20
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c73
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c144
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h63
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c179
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h157
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c46
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c337
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h27
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h488
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c57
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c87
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h139
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c1272
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c94
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c291
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c315
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c133
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h9
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c78
-rw-r--r--drivers/net/wireless/zd1201.c9
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/xilinx_emaclite.c10
-rw-r--r--drivers/net/yellowfin.c12
-rw-r--r--drivers/net/znet.c1
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/of_i2c.c1
-rw-r--r--drivers/of/of_mdio.c1
-rw-r--r--drivers/of/of_spi.c1
-rw-r--r--drivers/of/platform.c7
-rw-r--r--drivers/parport/parport_cs.c13
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/access.c34
-rw-r--r--drivers/pci/dmar.c18
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/intel-iommu.c80
-rw-r--r--drivers/pci/intr_remapping.c6
-rw-r--r--drivers/pci/pci-sysfs.c44
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c179
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h23
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c566
-rw-r--r--drivers/pci/probe.c8
-rw-r--r--drivers/pci/slot.c48
-rw-r--r--drivers/pcmcia/Kconfig22
-rw-r--r--drivers/pcmcia/Makefile9
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pcmcia/cistpl.c121
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/cs_internal.h22
-rw-r--r--drivers/pcmcia/ds.c34
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pcmcia_cis.c356
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c23
-rw-r--r--drivers/pcmcia/pcmcia_resource.c634
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c172
-rw-r--r--drivers/pcmcia/rsrc_mgr.c112
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c164
-rw-r--r--drivers/pcmcia/yenta_socket.c7
-rw-r--r--drivers/power/Kconfig10
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ds2782_battery.c194
-rw-r--r--drivers/power/pda_power.c10
-rw-r--r--drivers/power/tosa_battery.c4
-rw-r--r--drivers/power/wm831x_power.c33
-rw-r--r--drivers/power/wm97xx_battery.c3
-rw-r--r--drivers/power/z2_battery.c328
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/regulator/88pm8607.c533
-rw-r--r--drivers/regulator/ab3100.c35
-rw-r--r--drivers/regulator/core.c75
-rw-r--r--drivers/regulator/mc13783-regulator.c6
-rw-r--r--drivers/regulator/tps6507x-regulator.c373
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab3100.c41
-rw-r--r--drivers/rtc/rtc-cmos.c83
-rw-r--r--drivers/rtc/rtc-davinci.c674
-rw-r--r--drivers/rtc/rtc-omap.c12
-rw-r--r--drivers/rtc/rtc-rx8581.c6
-rw-r--r--drivers/rtc/rtc-stk17ta8.c4
-rw-r--r--drivers/s390/block/dasd.c39
-rw-r--r--drivers/s390/block/dasd_3990_erp.c20
-rw-r--r--drivers/s390/block/dasd_alias.c133
-rw-r--r--drivers/s390/block/dasd_devmap.c172
-rw-r--r--drivers/s390/block/dasd_eckd.c116
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_int.h49
-rw-r--r--drivers/s390/char/Kconfig3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/vmcp.c38
-rw-r--r--drivers/s390/char/zcore.c4
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c7
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c1
-rw-r--r--drivers/s390/scsi/zfcp_def.h19
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h6
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c246
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c108
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h104
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c23
-rw-r--r--drivers/scsi/3w-9xxx.c24
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-xxxx.c23
-rw-r--r--drivers/scsi/3w-xxxx.h8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c309
-rw-r--r--drivers/scsi/a2091.h42
-rw-r--r--drivers/scsi/a3000.c285
-rw-r--r--drivers/scsi/a3000.h46
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h29
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c22
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h23
-rw-r--r--drivers/scsi/bfa/bfad.c23
-rw-r--r--drivers/scsi/bfa/bfad_attr.c201
-rw-r--r--drivers/scsi/bfa/bfad_drv.h4
-rw-r--r--drivers/scsi/bfa/bfad_im.c67
-rw-r--r--drivers/scsi/bfa/bfad_im.h6
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h2
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c11
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c127
-rw-r--r--drivers/scsi/fcoe/libfcoe.c95
-rw-r--r--drivers/scsi/fnic/fnic.h4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/gvp11.c565
-rw-r--r--drivers/scsi/gvp11.h36
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hpsa_cmd.h15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/iscsi_tcp.c10
-rw-r--r--drivers/scsi/iscsi_tcp.h1
-rw-r--r--drivers/scsi/libfc/fc_disc.c6
-rw-r--r--drivers/scsi/libfc/fc_exch.c45
-rw-r--r--drivers/scsi/libfc/fc_fcp.c93
-rw-r--r--drivers/scsi/libfc/fc_libfc.h6
-rw-r--r--drivers/scsi/libfc/fc_lport.c31
-rw-r--r--drivers/scsi/libfc/fc_npiv.c3
-rw-r--r--drivers/scsi/libfc/fc_rport.c195
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c498
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h190
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c213
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c99
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c269
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c90
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h36
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c46
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1055
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c89
-rw-r--r--drivers/scsi/mvme147.c169
-rw-r--r--drivers/scsi/mvme147.h4
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c25
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c10
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c201
-rw-r--r--drivers/scsi/mvsas/mv_sas.h11
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c9
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c9
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c23
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h1
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c13
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pmcraid.c6
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c743
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c1208
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h135
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h186
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h59
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h102
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c403
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c74
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c256
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3635
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h888
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c372
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c149
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h48
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h46
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c374
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c337
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c135
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c89
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_trace.c284
-rw-r--r--drivers/scsi/scsi_transport_fc.c30
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/scsi/wd33c93.h1
-rw-r--r--drivers/serial/8250.c4
-rw-r--r--drivers/serial/Kconfig7
-rw-r--r--drivers/serial/amba-pl011.c6
-rw-r--r--drivers/serial/atmel_serial.c207
-rw-r--r--drivers/serial/imx.c10
-rw-r--r--drivers/serial/kgdboc.c94
-rw-r--r--drivers/serial/mpc52xx_uart.c147
-rw-r--r--drivers/serial/mpsc.c1
-rw-r--r--drivers/serial/serial_cs.c36
-rw-r--r--drivers/serial/sh-sci.c197
-rw-r--r--drivers/serial/sunzilog.c50
-rw-r--r--drivers/sh/Kconfig24
-rw-r--r--drivers/sh/intc.c333
-rw-r--r--drivers/spi/spi_mpc8xxx.c2
-rw-r--r--drivers/ssb/driver_chipcommon.c3
-rw-r--r--drivers/ssb/main.c5
-rw-r--r--drivers/ssb/pci.c14
-rw-r--r--drivers/ssb/sprom.c14
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/arlan/arlan-main.c9
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c45
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c35
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c19
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c45
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c103
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.c146
-rw-r--r--drivers/staging/cx25821/cx25821-audups11.c78
-rw-r--r--drivers/staging/cx25821/cx25821-cards.c4
-rw-r--r--drivers/staging/cx25821/cx25821-core.c160
-rw-r--r--drivers/staging/cx25821/cx25821-gpio.c25
-rw-r--r--drivers/staging/cx25821/cx25821-i2c.c14
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-video.c207
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.c144
-rw-r--r--drivers/staging/cx25821/cx25821-video.c134
-rw-r--r--drivers/staging/cx25821/cx25821-video.h78
-rw-r--r--drivers/staging/cx25821/cx25821-video0.c82
-rw-r--r--drivers/staging/cx25821/cx25821-video1.c82
-rw-r--r--drivers/staging/cx25821/cx25821-video2.c82
-rw-r--r--drivers/staging/cx25821/cx25821-video3.c82
-rw-r--r--drivers/staging/cx25821/cx25821-video4.c82
-rw-r--r--drivers/staging/cx25821/cx25821-video5.c82
-rw-r--r--drivers/staging/cx25821/cx25821-video6.c82
-rw-r--r--drivers/staging/cx25821/cx25821-video7.c82
-rw-r--r--drivers/staging/cx25821/cx25821-videoioctl.c78
-rw-r--r--drivers/staging/cx25821/cx25821-vidups10.c78
-rw-r--r--drivers/staging/cx25821/cx25821-vidups9.c78
-rw-r--r--drivers/staging/et131x/et131x_netdev.c6
-rw-r--r--drivers/staging/netwave/netwave_cs.c9
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c6
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c6
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c6
-rw-r--r--drivers/staging/slicoss/slicoss.c6
-rw-r--r--drivers/staging/tm6000/Kconfig32
-rw-r--r--drivers/staging/tm6000/Makefile17
-rw-r--r--drivers/staging/tm6000/README21
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c415
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c974
-rw-r--r--drivers/staging/tm6000/tm6000-core.c603
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c347
-rw-r--r--drivers/staging/tm6000/tm6000-i2c.c370
-rw-r--r--drivers/staging/tm6000/tm6000-regs.h541
-rw-r--r--drivers/staging/tm6000/tm6000-stds.c873
-rw-r--r--drivers/staging/tm6000/tm6000-usb-isoc.h53
-rw-r--r--drivers/staging/tm6000/tm6000-video.c1557
-rw-r--r--drivers/staging/tm6000/tm6000.h301
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6655/rxtx.c14
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/staging/vt6656/rxtx.c6
-rw-r--r--drivers/staging/wavelan/wavelan.c10
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c27
-rw-r--r--drivers/staging/winbond/wbusb.c6
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c9
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c12
-rw-r--r--drivers/telephony/ixj_pcmcia.c3
-rw-r--r--drivers/usb/atm/ueagle-atm.c2
-rw-r--r--drivers/usb/early/ehci-dbgp.c120
-rw-r--r--drivers/usb/gadget/at91_udc.c7
-rw-r--r--drivers/usb/host/ehci-mxc.c4
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/sl811_cs.c28
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c2
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/lc.c16
-rw-r--r--drivers/uwb/wlp/messages.c40
-rw-r--r--drivers/uwb/wlp/sysfs.c46
-rw-r--r--drivers/uwb/wlp/wlp-lc.c12
-rw-r--r--drivers/vbus/Kconfig25
-rw-r--r--drivers/vbus/Makefile6
-rw-r--r--drivers/vbus/bus-proxy.c248
-rw-r--r--drivers/vbus/pci-bridge.c1016
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/vhost.c11
-rw-r--r--drivers/video/Kconfig17
-rw-r--r--drivers/video/backlight/Kconfig79
-rw-r--r--drivers/video/backlight/adx_bl.c4
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c45
-rw-r--r--drivers/video/cobalt_lcdfb.c2
-rw-r--r--drivers/video/mx3fb.c3
-rw-r--r--drivers/video/omap2/displays/Kconfig8
-rw-r--r--drivers/video/omap2/displays/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c819
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c78
-rw-r--r--drivers/video/omap2/displays/panel-taal.c143
-rw-r--r--drivers/video/omap2/dss/Kconfig6
-rw-r--r--drivers/video/omap2/dss/Makefile3
-rw-r--r--drivers/video/omap2/dss/core.c4
-rw-r--r--drivers/video/omap2/dss/display.c9
-rw-r--r--drivers/video/omap2/dss/dss.c24
-rw-r--r--drivers/video/omap2/dss/manager.c21
-rw-r--r--drivers/video/omap2/dss/sdi.c26
-rw-r--r--drivers/video/omap2/dss/venc.c15
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c25
-rw-r--r--drivers/video/tdfxfb.c4
-rw-r--r--drivers/video/uvesafb.c7
-rw-r--r--drivers/video/via/Makefile4
-rw-r--r--drivers/video/via/accel.c137
-rw-r--r--drivers/video/via/accel.h40
-rw-r--r--drivers/video/via/chip.h8
-rw-r--r--drivers/video/via/dvi.c37
-rw-r--r--drivers/video/via/global.h1
-rw-r--r--drivers/video/via/hw.c307
-rw-r--r--drivers/video/via/hw.h20
-rw-r--r--drivers/video/via/ioctl.h2
-rw-r--r--drivers/video/via/lcd.c31
-rw-r--r--drivers/video/via/lcd.h2
-rw-r--r--drivers/video/via/share.h20
-rw-r--r--drivers/video/via/via-core.c668
-rw-r--r--drivers/video/via/via-gpio.c285
-rw-r--r--drivers/video/via/via_i2c.c232
-rw-r--r--drivers/video/via/via_modesetting.c126
-rw-r--r--drivers/video/via/via_modesetting.h38
-rw-r--r--drivers/video/via/via_utility.c1
-rw-r--r--drivers/video/via/viafbdev.c181
-rw-r--r--drivers/video/via/viafbdev.h14
-rw-r--r--drivers/video/via/viamode.c15
-rw-r--r--drivers/video/via/vt1636.c34
-rw-r--r--drivers/video/via/vt1636.h2
-rw-r--r--drivers/video/vt8623fb.c2
-rw-r--r--drivers/virtio/virtio_balloon.c17
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--drivers/virtio/virtio_ring.c44
-rw-r--r--drivers/watchdog/bfin_wdt.c19
-rw-r--r--drivers/watchdog/booke_wdt.c6
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/pc87413_wdt.c9
-rw-r--r--drivers/watchdog/pnx833x_wdt.c11
-rw-r--r--drivers/watchdog/rdc321x_wdt.c53
-rw-r--r--drivers/watchdog/s3c2410_wdt.c6
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/xen/manage.c14
-rw-r--r--fs/9p/v9fs_vfs.h2
-rw-r--r--fs/9p/vfs_dir.c8
-rw-r--r--fs/9p/vfs_file.c11
-rw-r--r--fs/9p/vfs_inode.c78
-rw-r--r--fs/9p/vfs_super.c55
-rw-r--r--fs/autofs4/root.c5
-rw-r--r--fs/block_dev.c257
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/cachefiles/internal.h1
-rw-r--r--fs/cachefiles/namei.c98
-rw-r--r--fs/ceph/addr.c11
-rw-r--r--fs/ceph/auth.c9
-rw-r--r--fs/ceph/auth.h2
-rw-r--r--fs/ceph/auth_none.c1
-rw-r--r--fs/ceph/auth_x.c19
-rw-r--r--fs/ceph/caps.c21
-rw-r--r--fs/ceph/dir.c30
-rw-r--r--fs/ceph/export.c14
-rw-r--r--fs/ceph/file.c8
-rw-r--r--fs/ceph/inode.c81
-rw-r--r--fs/ceph/ioctl.c2
-rw-r--r--fs/ceph/mds_client.c75
-rw-r--r--fs/ceph/messenger.c58
-rw-r--r--fs/ceph/messenger.h5
-rw-r--r--fs/ceph/mon_client.c187
-rw-r--r--fs/ceph/mon_client.h12
-rw-r--r--fs/ceph/msgpool.c180
-rw-r--r--fs/ceph/msgpool.h12
-rw-r--r--fs/ceph/osd_client.c94
-rw-r--r--fs/ceph/pagelist.c2
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/super.c119
-rw-r--r--fs/ceph/super.h25
-rw-r--r--fs/ceph/xattr.c14
-rw-r--r--fs/cifs/asn1.c103
-rw-r--r--fs/cifs/cifs_debug.c48
-rw-r--r--fs/cifs/cifs_debug.h42
-rw-r--r--fs/cifs/cifs_dfs_ref.c34
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifs_unicode.c5
-rw-r--r--fs/cifs/cifsacl.c76
-rw-r--r--fs/cifs/cifsencrypt.c10
-rw-r--r--fs/cifs/cifsfs.c163
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h9
-rw-r--r--fs/cifs/cifsproto.h29
-rw-r--r--fs/cifs/cifssmb.c437
-rw-r--r--fs/cifs/connect.c639
-rw-r--r--fs/cifs/dir.c90
-rw-r--r--fs/cifs/dns_resolve.c16
-rw-r--r--fs/cifs/export.c2
-rw-r--r--fs/cifs/file.c222
-rw-r--r--fs/cifs/inode.c104
-rw-r--r--fs/cifs/ioctl.c10
-rw-r--r--fs/cifs/link.c10
-rw-r--r--fs/cifs/misc.c81
-rw-r--r--fs/cifs/netmisc.c16
-rw-r--r--fs/cifs/readdir.c85
-rw-r--r--fs/cifs/sess.c81
-rw-r--r--fs/cifs/transport.c92
-rw-r--r--fs/cifs/xattr.c40
-rw-r--r--fs/compat.c5
-rw-r--r--fs/dlm/lock.c5
-rw-r--r--fs/dlm/user.c88
-rw-r--r--fs/ecryptfs/main.c28
-rw-r--r--fs/exec.c4
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/exofs/inode.c30
-rw-r--r--fs/ext2/balloc.c6
-rw-r--r--fs/ext2/ialloc.c9
-rw-r--r--fs/ext2/inode.c5
-rw-r--r--fs/ext2/super.c99
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/balloc.c6
-rw-r--r--fs/ext3/fsync.c23
-rw-r--r--fs/ext3/super.c77
-rw-r--r--fs/ext4/extents.c82
-rw-r--r--fs/ext4/fsync.c8
-rw-r--r--fs/ext4/ialloc.c85
-rw-r--r--fs/ext4/inode.c12
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/mballoc.c34
-rw-r--r--fs/ext4/move_extent.c1
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c20
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/fat/dir.c17
-rw-r--r--fs/fat/inode.c6
-rw-r--r--fs/fcntl.c66
-rw-r--r--fs/gfs2/aops.c8
-rw-r--r--fs/gfs2/bmap.c17
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/export.c2
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/gfs2/incore.h11
-rw-r--r--fs/gfs2/inode.c101
-rw-r--r--fs/gfs2/inode.h5
-rw-r--r--fs/gfs2/log.c158
-rw-r--r--fs/gfs2/log.h1
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/meta_io.c1
-rw-r--r--fs/gfs2/ops_fstype.c19
-rw-r--r--fs/gfs2/quota.c114
-rw-r--r--fs/gfs2/rgrp.c63
-rw-r--r--fs/gfs2/super.c11
-rw-r--r--fs/gfs2/sys.c6
-rw-r--r--fs/gfs2/trans.c18
-rw-r--r--fs/inode.c10
-rw-r--r--fs/jbd/commit.c8
-rw-r--r--fs/jbd/journal.c33
-rw-r--r--fs/jbd2/checkpoint.c3
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/jffs2/fs.c10
-rw-r--r--fs/jffs2/nodelist.h8
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/libfs.c35
-rw-r--r--fs/logfs/dev_bdev.c6
-rw-r--r--fs/logfs/dev_mtd.c26
-rw-r--r--fs/logfs/dir.c2
-rw-r--r--fs/logfs/file.c16
-rw-r--r--fs/logfs/gc.c49
-rw-r--r--fs/logfs/inode.c6
-rw-r--r--fs/logfs/journal.c7
-rw-r--r--fs/logfs/logfs.h15
-rw-r--r--fs/logfs/logfs_abi.h10
-rw-r--r--fs/logfs/readwrite.c19
-rw-r--r--fs/logfs/segment.c7
-rw-r--r--fs/logfs/super.c8
-rw-r--r--fs/namei.c65
-rw-r--r--fs/namespace.c18
-rw-r--r--fs/nfs/callback.c11
-rw-r--r--fs/nfs/client.c55
-rw-r--r--fs/nfs/dir.c62
-rw-r--r--fs/nfs/file.c15
-rw-r--r--fs/nfs/getroot.c191
-rw-r--r--fs/nfs/inode.c58
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/namespace.c20
-rw-r--r--fs/nfs/nfs3acl.c23
-rw-r--r--fs/nfs/nfs3proc.c128
-rw-r--r--fs/nfs/nfs3xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h4
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4proc.c99
-rw-r--r--fs/nfs/nfs4state.c15
-rw-r--r--fs/nfs/nfs4xdr.c22
-rw-r--r--fs/nfs/nfsroot.c14
-rw-r--r--fs/nfs/proc.c144
-rw-r--r--fs/nfs/super.c129
-rw-r--r--fs/nfs/unlink.c4
-rw-r--r--fs/nfsd/export.c44
-rw-r--r--fs/nfsd/nfs4callback.c129
-rw-r--r--fs/nfsd/nfs4proc.c45
-rw-r--r--fs/nfsd/nfs4state.c169
-rw-r--r--fs/nfsd/nfs4xdr.c14
-rw-r--r--fs/nfsd/nfsctl.c64
-rw-r--r--fs/nfsd/nfsd.h6
-rw-r--r--fs/nfsd/state.h30
-rw-r--r--fs/nfsd/vfs.c12
-rw-r--r--fs/nfsd/vfs.h1
-rw-r--r--fs/nfsd/xdr4.h5
-rw-r--r--fs/nilfs2/alloc.c154
-rw-r--r--fs/nilfs2/alloc.h7
-rw-r--r--fs/nilfs2/btree.c91
-rw-r--r--fs/nilfs2/btree.h23
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/nilfs2/recovery.c2
-rw-r--r--fs/nilfs2/segbuf.c70
-rw-r--r--fs/nilfs2/segbuf.h10
-rw-r--r--fs/nilfs2/segment.c157
-rw-r--r--fs/nilfs2/segment.h6
-rw-r--r--fs/nilfs2/super.c218
-rw-r--r--fs/nilfs2/the_nilfs.c14
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/notify/Makefile4
-rw-r--r--fs/notify/dnotify/dnotify.c198
-rw-r--r--fs/notify/fanotify/Kconfig26
-rw-r--r--fs/notify/fanotify/Makefile1
-rw-r--r--fs/notify/fanotify/fanotify.c255
-rw-r--r--fs/notify/fanotify/fanotify_user.c777
-rw-r--r--fs/notify/fsnotify.c141
-rw-r--r--fs/notify/fsnotify.h33
-rw-r--r--fs/notify/group.c178
-rw-r--r--fs/notify/inode_mark.c309
-rw-r--r--fs/notify/inotify/Kconfig16
-rw-r--r--fs/notify/inotify/Makefile1
-rw-r--r--fs/notify/inotify/inotify.c933
-rw-r--r--fs/notify/inotify/inotify.h7
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c114
-rw-r--r--fs/notify/inotify/inotify_user.c305
-rw-r--r--fs/notify/mark.c325
-rw-r--r--fs/notify/notification.c180
-rw-r--r--fs/notify/vfsmount_mark.c175
-rw-r--r--fs/ocfs2/Makefile1
-rw-r--r--fs/ocfs2/alloc.c196
-rw-r--r--fs/ocfs2/aops.c3
-rw-r--r--fs/ocfs2/cluster/masklog.c1
-rw-r--r--fs/ocfs2/cluster/masklog.h1
-rw-r--r--fs/ocfs2/cluster/tcp.c3
-rw-r--r--fs/ocfs2/dir.c53
-rw-r--r--fs/ocfs2/dlm/dlmast.c4
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c4
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c25
-rw-r--r--fs/ocfs2/dlm/dlmlock.c4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c12
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c27
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c3
-rw-r--r--fs/ocfs2/file.c22
-rw-r--r--fs/ocfs2/inode.c22
-rw-r--r--fs/ocfs2/inode.h2
-rw-r--r--fs/ocfs2/journal.c26
-rw-r--r--fs/ocfs2/journal.h3
-rw-r--r--fs/ocfs2/localalloc.c271
-rw-r--r--fs/ocfs2/localalloc.h3
-rw-r--r--fs/ocfs2/namei.c62
-rw-r--r--fs/ocfs2/ocfs2.h15
-rw-r--r--fs/ocfs2/ocfs2_fs.h8
-rw-r--r--fs/ocfs2/quota_global.c4
-rw-r--r--fs/ocfs2/quota_local.c50
-rw-r--r--fs/ocfs2/refcounttree.c22
-rw-r--r--fs/ocfs2/reservations.c846
-rw-r--r--fs/ocfs2/reservations.h159
-rw-r--r--fs/ocfs2/resize.c13
-rw-r--r--fs/ocfs2/suballoc.c75
-rw-r--r--fs/ocfs2/suballoc.h2
-rw-r--r--fs/ocfs2/super.c65
-rw-r--r--fs/ocfs2/xattr.c72
-rw-r--r--fs/omfs/inode.c1
-rw-r--r--fs/open.c3
-rw-r--r--fs/quota/dquot.c211
-rw-r--r--fs/quota/quota.c63
-rw-r--r--fs/quota/quota_tree.c4
-rw-r--r--fs/quota/quota_v1.c4
-rw-r--r--fs/read_write.c8
-rw-r--r--fs/reiserfs/file.c3
-rw-r--r--fs/ubifs/io.c1
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/ufs/symlink.c8
-rw-r--r--fs/ufs/truncate.c2
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c231
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c27
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c91
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h150
-rw-r--r--fs/xfs/quota/xfs_dquot.c193
-rw-r--r--fs/xfs/quota/xfs_dquot.h35
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c30
-rw-r--r--fs/xfs/quota/xfs_qm.c609
-rw-r--r--fs/xfs/quota/xfs_qm.h23
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c162
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h102
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c29
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_buf_item.c55
-rw-r--r--fs/xfs/xfs_buf_item.h2
-rw-r--r--fs/xfs/xfs_error.c30
-rw-r--r--fs/xfs/xfs_error.h9
-rw-r--r--fs/xfs/xfs_extfree_item.c18
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_inode_item.c21
-rw-r--r--fs/xfs/xfs_iomap.c123
-rw-r--r--fs/xfs/xfs_iomap.h47
-rw-r--r--fs/xfs/xfs_log.c702
-rw-r--r--fs/xfs/xfs_log.h13
-rw-r--r--fs/xfs/xfs_log_priv.h12
-rw-r--r--fs/xfs/xfs_log_recover.c311
-rw-r--r--fs/xfs/xfs_mount.c7
-rw-r--r--fs/xfs/xfs_quota.h3
-rw-r--r--fs/xfs/xfs_trans.c760
-rw-r--r--fs/xfs/xfs_trans.h14
-rw-r--r--fs/xfs/xfs_trans_buf.c187
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/acpi_drivers.h3
-rw-r--r--include/acpi/acpi_hest.h12
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/acpi/acpixf.h43
-rw-r--r--include/acpi/actbl2.h27
-rw-r--r--include/acpi/actypes.h44
-rw-r--r--include/acpi/apei.h34
-rw-r--r--include/acpi/atomicio.h10
-rw-r--r--include/acpi/hed.h18
-rw-r--r--include/acpi/video.h16
-rw-r--r--include/asm-generic/bitops/arch_hweight.h25
-rw-r--r--include/asm-generic/bitops/const_hweight.h42
-rw-r--r--include/asm-generic/bitops/hweight.h8
-rw-r--r--include/asm-generic/fcntl.h8
-rw-r--r--include/asm-generic/kmap_types.h3
-rw-r--r--include/asm-generic/percpu.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h38
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h24
-rw-r--r--include/drm/drm_crtc_helper.h6
-rw-r--r--include/drm/drm_edid.h3
-rw-r--r--include/drm/drm_fb_helper.h76
-rw-r--r--include/drm/ttm/ttm_bo_api.h29
-rw-r--r--include/drm/ttm/ttm_bo_driver.h57
-rw-r--r--include/drm/ttm/ttm_page_alloc.h74
-rw-r--r--include/linux/Kbuild6
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/ahci_platform.h29
-rw-r--r--include/linux/amba/dma.h49
-rw-r--r--include/linux/amba/mmci.h23
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/bitops.h30
-rw-r--r--include/linux/blkdev.h67
-rw-r--r--include/linux/cache.h2
-rw-r--r--include/linux/caif/caif_socket.h165
-rw-r--r--include/linux/caif/if_caif.h34
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/platform/mcp251x.h4
-rw-r--r--include/linux/clocksource.h19
-rw-r--r--include/linux/cper.h314
-rw-r--r--include/linux/cpufreq.h35
-rw-r--r--include/linux/cpuset.h16
-rw-r--r--include/linux/dcbnl.h2
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/dmaengine.h66
-rw-r--r--include/linux/dnotify.h1
-rw-r--r--include/linux/dqblk_xfs.h9
-rw-r--r--include/linux/ds2782_battery.h8
-rw-r--r--include/linux/edac_mce.h31
-rw-r--r--include/linux/elevator.h6
-rw-r--r--include/linux/ethtool.h116
-rw-r--r--include/linux/ext2_fs_sb.h9
-rw-r--r--include/linux/fanotify.h105
-rw-r--r--include/linux/filter.h3
-rw-r--r--include/linux/firewire.h2
-rw-r--r--include/linux/firmware.h1
-rw-r--r--include/linux/fs.h31
-rw-r--r--include/linux/fsnotify.h164
-rw-r--r--include/linux/fsnotify_backend.h169
-rw-r--r--include/linux/ftrace.h61
-rw-r--r--include/linux/ftrace_event.h3
-rw-r--r--include/linux/genetlink.h8
-rw-r--r--include/linux/hdpu_features.h26
-rw-r--r--include/linux/hid.h10
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/hw_breakpoint.h25
-rw-r--r--include/linux/i2c-omap.h9
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/i2c/twl.h14
-rw-r--r--include/linux/ide.h4
-rw-r--r--include/linux/ieee80211.h4
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/if_link.h33
-rw-r--r--include/linux/if_packet.h1
-rw-r--r--include/linux/if_pppol2tp.h16
-rw-r--r--include/linux/if_pppox.h9
-rw-r--r--include/linux/if_tun.h2
-rw-r--r--include/linux/if_x25.h26
-rw-r--r--include/linux/in6.h5
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/inotify.h175
-rw-r--r--include/linux/input.h41
-rw-r--r--include/linux/input/ad714x.h63
-rw-r--r--include/linux/input/tps6507x-ts.h24
-rw-r--r--include/linux/interrupt.h32
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/ioq.h414
-rw-r--r--include/linux/ipv6.h17
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/isapnp.h8
-rw-r--r--include/linux/iscsi_boot_sysfs.h123
-rw-r--r--include/linux/iscsi_ibft.h12
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/jffs2.h4
-rw-r--r--include/linux/kdb.h117
-rw-r--r--include/linux/kernel.h17
-rw-r--r--include/linux/kfifo.h4
-rw-r--r--include/linux/kgdb.h55
-rw-r--r--include/linux/kobj_map.h9
-rw-r--r--include/linux/kref.h2
-rw-r--r--include/linux/ks8842.h34
-rw-r--r--include/linux/kvm.h26
-rw-r--r--include/linux/kvm_host.h16
-rw-r--r--include/linux/l2tp.h163
-rw-r--r--include/linux/libata.h12
-rw-r--r--include/linux/linkage.h8
-rw-r--r--include/linux/lsm_audit.h2
-rw-r--r--include/linux/meye.h12
-rw-r--r--include/linux/mfd/88pm860x.h7
-rw-r--r--include/linux/mfd/abx500.h (renamed from include/linux/mfd/ab3100.h)134
-rw-r--r--include/linux/mfd/davinci_voicecodec.h126
-rw-r--r--include/linux/mfd/janz.h54
-rw-r--r--include/linux/mfd/rdc321x.h26
-rw-r--r--include/linux/mfd/tc35892.h132
-rw-r--r--include/linux/mfd/tps6507x.h169
-rw-r--r--include/linux/mfd/wm831x/core.h5
-rw-r--r--include/linux/mfd/wm8350/audio.h2
-rw-r--r--include/linux/mfd/wm8994/core.h53
-rw-r--r--include/linux/mfd/wm8994/pdata.h1
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx4/qp.h7
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmc/host.h3
-rw-r--r--include/linux/mmc/sdio.h2
-rw-r--r--include/linux/mod_devicetable.h33
-rw-r--r--include/linux/moduleparam.h282
-rw-r--r--include/linux/mount.h6
-rw-r--r--include/linux/mroute.h20
-rw-r--r--include/linux/msm_mdp.h78
-rw-r--r--include/linux/mtd/blktrans.h15
-rw-r--r--include/linux/mtd/cfi.h14
-rw-r--r--include/linux/mtd/flashchip.h4
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/mtdram.h2
-rw-r--r--include/linux/mtd/nand.h21
-rw-r--r--include/linux/mtd/sh_flctl.h5
-rw-r--r--include/linux/net.h14
-rw-r--r--include/linux/netdevice.h287
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_tuple_common.h3
-rw-r--r--include/linux/netfilter/x_tables.h50
-rw-r--r--include/linux/netfilter/xt_CONNMARK.h22
-rw-r--r--include/linux/netfilter/xt_MARK.h6
-rw-r--r--include/linux/netfilter/xt_TEE.h12
-rw-r--r--include/linux/netfilter/xt_connmark.h11
-rw-r--r--include/linux/netfilter/xt_mark.h4
-rw-r--r--include/linux/netfilter/xt_recent.h7
-rw-r--r--include/linux/netfilter_bridge.h29
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h4
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nfs_fs.h14
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h7
-rw-r--r--include/linux/nfsd/nfsfh.h6
-rw-r--r--include/linux/nilfs2_fs.h17
-rw-r--r--include/linux/nl80211.h76
-rw-r--r--include/linux/nmi.h13
-rw-r--r--include/linux/notifier.h10
-rw-r--r--include/linux/of_device.h2
-rw-r--r--include/linux/of_fdt.h4
-rw-r--r--include/linux/of_platform.h2
-rw-r--r--include/linux/pci.h14
-rw-r--r--include/linux/pci_ids.h39
-rw-r--r--include/linux/pci_regs.h6
-rw-r--r--include/linux/pda_power.h2
-rw-r--r--include/linux/percpu-defs.h4
-rw-r--r--include/linux/perf_event.h83
-rw-r--r--include/linux/phy.h13
-rw-r--r--include/linux/pm_qos_params.h14
-rw-r--r--include/linux/pm_runtime.h7
-rw-r--r--include/linux/pm_wakeup.h38
-rw-r--r--include/linux/ppp_channel.h3
-rw-r--r--include/linux/ptrace.h12
-rw-r--r--include/linux/quota.h48
-rw-r--r--include/linux/quotaops.h6
-rw-r--r--include/linux/rbtree.h5
-rw-r--r--include/linux/rculist.h42
-rw-r--r--include/linux/rcupdate.h42
-rw-r--r--include/linux/rcutiny.h31
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/regulator/machine.h9
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/rtc-v3020.h2
-rw-r--r--include/linux/rtnetlink.h6
-rw-r--r--include/linux/sched.h79
-rw-r--r--include/linux/security.h181
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/serial_sci.h6
-rw-r--r--include/linux/serio.h1
-rw-r--r--include/linux/sh_dma.h102
-rw-r--r--include/linux/sh_intc.h26
-rw-r--r--include/linux/shm_signal.h189
-rw-r--r--include/linux/skbuff.h25
-rw-r--r--include/linux/snmp.h2
-rw-r--r--include/linux/socket.h5
-rw-r--r--include/linux/spi/wl12xx.h2
-rw-r--r--include/linux/spinlock.h2
-rw-r--r--include/linux/srcu.h6
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h15
-rw-r--r--include/linux/ssb/ssb_regs.h239
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/stop_machine.h122
-rw-r--r--include/linux/sunrpc/gss_api.h2
-rw-r--r--include/linux/sunrpc/gss_krb5.h183
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/linux/sysrq.h23
-rw-r--r--include/linux/tca6416_keypad.h34
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/linux/timb_dma.h55
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/timer.h10
-rw-r--r--include/linux/timex.h5
-rw-r--r--include/linux/tipc.h30
-rw-r--r--include/linux/tracepoint.h114
-rw-r--r--include/linux/tty.h4
-rw-r--r--include/linux/usb/audio-v2.h378
-rw-r--r--include/linux/usb/audio.h227
-rw-r--r--include/linux/uuid.h70
-rw-r--r--include/linux/vbus_driver.h83
-rw-r--r--include/linux/vbus_pci.h145
-rw-r--r--include/linux/venet.h133
-rw-r--r--include/linux/via-core.h219
-rw-r--r--include/linux/via-gpio.h14
-rw-r--r--include/linux/via_i2c.h (renamed from drivers/video/via/via_i2c.h)24
-rw-r--r--include/linux/videodev2.h75
-rw-r--r--include/linux/virtio.h55
-rw-r--r--include/linux/virtio_blk.h5
-rw-r--r--include/linux/virtio_console.h25
-rw-r--r--include/linux/wireless.h4
-rw-r--r--include/linux/wlp.h2
-rw-r--r--include/linux/writeback.h3
-rw-r--r--include/linux/z2_battery.h17
-rw-r--r--include/media/ak881x.h25
-rw-r--r--include/media/davinci/vpfe_capture.h2
-rw-r--r--include/media/ir-common.h74
-rw-r--r--include/media/ir-core.h142
-rw-r--r--include/media/ir-kbd-i2c.h6
-rw-r--r--include/media/rc-map.h121
-rw-r--r--include/media/sh_vou.h34
-rw-r--r--include/media/soc_camera.h12
-rw-r--r--include/media/v4l2-chip-ident.h127
-rw-r--r--include/media/v4l2-common.h19
-rw-r--r--include/media/v4l2-dev.h5
-rw-r--r--include/media/v4l2-event.h67
-rw-r--r--include/media/v4l2-fh.h65
-rw-r--r--include/media/v4l2-ioctl.h7
-rw-r--r--include/media/v4l2-mem2mem.h201
-rw-r--r--include/media/v4l2-subdev.h68
-rw-r--r--include/media/videobuf-core.h35
-rw-r--r--include/media/videobuf-dma-sg.h26
-rw-r--r--include/media/videobuf-vmalloc.h12
-rw-r--r--include/net/9p/9p.h33
-rw-r--r--include/net/9p/client.h2
-rw-r--r--include/net/af_unix.h20
-rw-r--r--include/net/bluetooth/hci_core.h6
-rw-r--r--include/net/bluetooth/l2cap.h41
-rw-r--r--include/net/caif/caif_dev.h103
-rw-r--r--include/net/caif/caif_device.h55
-rw-r--r--include/net/caif/caif_layer.h283
-rw-r--r--include/net/caif/cfcnfg.h140
-rw-r--r--include/net/caif/cfctrl.h139
-rw-r--r--include/net/caif/cffrml.h16
-rw-r--r--include/net/caif/cfmuxl.h22
-rw-r--r--include/net/caif/cfpkt.h274
-rw-r--r--include/net/caif/cfserl.h12
-rw-r--r--include/net/caif/cfsrvl.h56
-rw-r--r--include/net/cfg80211.h47
-rw-r--r--include/net/dn_fib.h4
-rw-r--r--include/net/dst.h15
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow.h23
-rw-r--r--include/net/icmp.h11
-rw-r--r--include/net/if_inet6.h13
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_fib.h29
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ipv6.h10
-rw-r--r--include/net/iw_handler.h2
-rw-r--r--include/net/mac80211.h110
-rw-r--r--include/net/mld.h75
-rw-r--r--include/net/neighbour.h14
-rw-r--r--include/net/netns/generic.h9
-rw-r--r--include/net/netns/ipv4.h15
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/raw.h13
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/sm.h3
-rw-r--r--include/net/sctp/structs.h69
-rw-r--r--include/net/snmp.h31
-rw-r--r--include/net/sock.h171
-rw-r--r--include/net/tcp.h13
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/x25device.h1
-rw-r--r--include/net/xfrm.h19
-rw-r--r--include/pcmcia/cs.h19
-rw-r--r--include/pcmcia/ds.h29
-rw-r--r--include/pcmcia/mem_op.h116
-rw-r--r--include/pcmcia/ss.h7
-rw-r--r--include/rdma/ib_verbs.h7
-rw-r--r--include/scsi/Kbuild1
-rw-r--r--include/scsi/fc/fc_fcp.h1
-rw-r--r--include/scsi/libfc.h26
-rw-r--r--include/scsi/libfcoe.h10
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_transport_fc.h2
-rw-r--r--include/sound/asound.h20
-rw-r--r--include/sound/es1688.h11
-rw-r--r--include/sound/info.h24
-rw-r--r--include/sound/jack.h8
-rw-r--r--include/sound/pcm.h3
-rw-r--r--include/sound/soc-dai.h7
-rw-r--r--include/sound/soc-dapm.h8
-rw-r--r--include/sound/soc.h61
-rw-r--r--include/sound/tlv320aic3x.h17
-rw-r--r--include/sound/tlv320dac33-plat.h1
-rw-r--r--include/sound/uda134x.h1
-rw-r--r--include/sound/version.h2
-rw-r--r--include/sound/wm8903.h249
-rw-r--r--include/sound/wm8904.h110
-rw-r--r--include/sound/wm8960.h24
-rw-r--r--include/sound/wm9090.h28
-rw-r--r--include/trace/define_trace.h5
-rw-r--r--include/trace/events/kvm.h1
-rw-r--r--include/trace/events/lock.h55
-rw-r--r--include/trace/events/napi.h10
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/trace/events/scsi.h345
-rw-r--r--include/trace/ftrace.h26
-rw-r--r--init/Kconfig53
-rw-r--r--init/main.c10
-rw-r--r--ipc/mqueue.c74
-rw-r--r--kernel/Makefile10
-rw-r--r--kernel/acct.c20
-rw-r--r--kernel/audit.c1
-rw-r--r--kernel/audit.h26
-rw-r--r--kernel/audit_tree.c233
-rw-r--r--kernel/audit_watch.c295
-rw-r--r--kernel/auditfilter.c39
-rw-r--r--kernel/auditsc.c10
-rw-r--r--kernel/capability.c1
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cgroup_freezer.c21
-rw-r--r--kernel/cpu.c26
-rw-r--r--kernel/cpuset.c67
-rw-r--r--kernel/cred-internals.h21
-rw-r--r--kernel/cred.c5
-rw-r--r--kernel/debug/Makefile6
-rw-r--r--kernel/debug/debug_core.c982
-rw-r--r--kernel/debug/debug_core.h81
-rw-r--r--kernel/debug/gdbstub.c1017
-rw-r--r--kernel/debug/kdb/.gitignore1
-rw-r--r--kernel/debug/kdb/Makefile25
-rw-r--r--kernel/debug/kdb/kdb_bp.c564
-rw-r--r--kernel/debug/kdb/kdb_bt.c210
-rw-r--r--kernel/debug/kdb/kdb_cmds35
-rw-r--r--kernel/debug/kdb/kdb_debugger.c169
-rw-r--r--kernel/debug/kdb/kdb_io.c826
-rw-r--r--kernel/debug/kdb/kdb_keyboard.c212
-rw-r--r--kernel/debug/kdb/kdb_main.c2851
-rw-r--r--kernel/debug/kdb/kdb_private.h300
-rw-r--r--kernel/debug/kdb/kdb_support.c927
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/groups.c6
-rw-r--r--kernel/hrtimer.c67
-rw-r--r--kernel/hw_breakpoint.c196
-rw-r--r--kernel/irq/handle.c3
-rw-r--r--kernel/irq/manage.c89
-rw-r--r--kernel/irq/proc.c62
-rw-r--r--kernel/kallsyms.c21
-rw-r--r--kernel/kgdb.c1764
-rw-r--r--kernel/kprobes.c132
-rw-r--r--kernel/lockdep.c88
-rw-r--r--kernel/lockdep_internals.h72
-rw-r--r--kernel/lockdep_proc.c58
-rw-r--r--kernel/module.c79
-rw-r--r--kernel/nmi_watchdog.c259
-rw-r--r--kernel/padata.c56
-rw-r--r--kernel/params.c233
-rw-r--r--kernel/perf_event.c332
-rw-r--r--kernel/pm_qos_params.c214
-rw-r--r--kernel/posix-cpu-timers.c298
-rw-r--r--kernel/power/Makefile3
-rw-r--r--kernel/power/block_io.c103
-rw-r--r--kernel/power/power.h27
-rw-r--r--kernel/power/snapshot.c145
-rw-r--r--kernel/power/swap.c333
-rw-r--r--kernel/power/user.c37
-rw-r--r--kernel/printk.c25
-rw-r--r--kernel/ptrace.c1
-rw-r--r--kernel/rcupdate.c19
-rw-r--r--kernel/rcutiny.c29
-rw-r--r--kernel/rcutiny_plugin.h39
-rw-r--r--kernel/rcutorture.c2
-rw-r--r--kernel/rcutree.c127
-rw-r--r--kernel/rcutree.h2
-rw-r--r--kernel/rcutree_plugin.h67
-rw-r--r--kernel/rcutree_trace.c4
-rw-r--r--kernel/resource.c16
-rw-r--r--kernel/sched.c775
-rw-r--r--kernel/sched_clock.c1
-rw-r--r--kernel/sched_debug.c108
-rw-r--r--kernel/sched_fair.c350
-rw-r--r--kernel/sched_features.h55
-rw-r--r--kernel/sched_idletask.c8
-rw-r--r--kernel/sched_rt.c15
-rw-r--r--kernel/signal.c40
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/stop_machine.c534
-rw-r--r--kernel/sys.c31
-rw-r--r--kernel/sys_ni.c4
-rw-r--r--kernel/sysctl.c45
-rw-r--r--kernel/sysctl_binary.c1
-rw-r--r--kernel/time.c11
-rw-r--r--kernel/time/clocksource.c48
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/tick-sched.c84
-rw-r--r--kernel/time/timekeeping.c35
-rw-r--r--kernel/time/timer_list.c1
-rw-r--r--kernel/timer.c137
-rw-r--r--kernel/trace/Kconfig11
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/ftrace.c33
-rw-r--r--kernel/trace/ring_buffer.c78
-rw-r--r--kernel/trace/ring_buffer_benchmark.c3
-rw-r--r--kernel/trace/trace.c99
-rw-r--r--kernel/trace/trace.h47
-rw-r--r--kernel/trace/trace_entries.h12
-rw-r--r--kernel/trace/trace_events_filter.c2
-rw-r--r--kernel/trace/trace_functions_graph.c164
-rw-r--r--kernel/trace/trace_hw_branches.c312
-rw-r--r--kernel/trace/trace_irqsoff.c271
-rw-r--r--kernel/trace/trace_kprobe.c535
-rw-r--r--kernel/trace/trace_ksym.c26
-rw-r--r--kernel/trace/trace_output.c18
-rw-r--r--kernel/trace/trace_sched_switch.c5
-rw-r--r--kernel/trace/trace_sched_wakeup.c5
-rw-r--r--kernel/trace/trace_selftest.c62
-rw-r--r--kernel/user.c11
-rw-r--r--lib/Kconfig21
-rw-r--r--lib/Kconfig.debug33
-rw-r--r--lib/Kconfig.kgdb24
-rw-r--r--lib/Makefile9
-rw-r--r--lib/atomic64.c4
-rw-r--r--lib/atomic64_test.c164
-rw-r--r--lib/debugobjects.c4
-rw-r--r--lib/hweight.c19
-rw-r--r--lib/idr.c2
-rw-r--r--lib/ioq.c304
-rw-r--r--lib/rbtree.c48
-rw-r--r--lib/shm_signal.c196
-rw-r--r--lib/uuid.c53
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/mlock.c41
-rw-r--r--mm/page-writeback.c34
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c174
-rw-r--r--mm/slub.c38
-rw-r--r--mm/swapfile.c9
-rw-r--r--net/802/garp.c4
-rw-r--r--net/8021q/vlan.c8
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/9p/client.c70
-rw-r--r--net/9p/protocol.c6
-rw-r--r--net/9p/trans_virtio.c6
-rw-r--r--net/Kconfig8
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/common.c30
-rw-r--r--net/atm/proc.c10
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c62
-rw-r--r--net/ax25/af_ax25.c8
-rw-r--r--net/bluetooth/Kconfig13
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/bnep/netdev.c20
-rw-r--r--net/bluetooth/cmtp/cmtp.h2
-rw-r--r--net/bluetooth/cmtp/core.c4
-rw-r--r--net/bluetooth/hci_core.c27
-rw-r--r--net/bluetooth/hci_sysfs.c34
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap.c1112
-rw-r--r--net/bluetooth/rfcomm/sock.c8
-rw-r--r--net/bluetooth/sco.c30
-rw-r--r--net/bridge/Kconfig6
-rw-r--r--net/bridge/br_device.c112
-rw-r--r--net/bridge/br_forward.c51
-rw-r--r--net/bridge/br_if.c31
-rw-r--r--net/bridge/br_input.c12
-rw-r--r--net/bridge/br_multicast.c658
-rw-r--r--net/bridge/br_netfilter.c263
-rw-r--r--net/bridge/br_notify.c4
-rw-r--r--net/bridge/br_private.h26
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/netfilter/ebt_802_3.c6
-rw-r--r--net/bridge/netfilter/ebt_among.c25
-rw-r--r--net/bridge/netfilter/ebt_arp.c8
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c8
-rw-r--r--net/bridge/netfilter/ebt_dnat.c10
-rw-r--r--net/bridge/netfilter/ebt_ip.c16
-rw-r--r--net/bridge/netfilter/ebt_ip6.c37
-rw-r--r--net/bridge/netfilter/ebt_limit.c9
-rw-r--r--net/bridge/netfilter/ebt_log.c8
-rw-r--r--net/bridge/netfilter/ebt_mark.c10
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c10
-rw-r--r--net/bridge/netfilter/ebt_nflog.c6
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c6
-rw-r--r--net/bridge/netfilter/ebt_redirect.c10
-rw-r--r--net/bridge/netfilter/ebt_snat.c10
-rw-r--r--net/bridge/netfilter/ebt_stp.c8
-rw-r--r--net/bridge/netfilter/ebt_ulog.c36
-rw-r--r--net/bridge/netfilter/ebt_vlan.c52
-rw-r--r--net/bridge/netfilter/ebtables.c23
-rw-r--r--net/caif/Kconfig48
-rw-r--r--net/caif/Makefile26
-rw-r--r--net/caif/caif_config_util.c87
-rw-r--r--net/caif/caif_dev.c418
-rw-r--r--net/caif/caif_socket.c1252
-rw-r--r--net/caif/cfcnfg.c471
-rw-r--r--net/caif/cfctrl.c693
-rw-r--r--net/caif/cfdbgl.c40
-rw-r--r--net/caif/cfdgml.c108
-rw-r--r--net/caif/cffrml.c151
-rw-r--r--net/caif/cfmuxl.c251
-rw-r--r--net/caif/cfpkt_skbuff.c571
-rw-r--r--net/caif/cfrfml.c108
-rw-r--r--net/caif/cfserl.c192
-rw-r--r--net/caif/cfsrvl.c192
-rw-r--r--net/caif/cfutill.c115
-rw-r--r--net/caif/cfveil.c107
-rw-r--r--net/caif/cfvidl.c65
-rw-r--r--net/caif/chnl_net.c467
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c21
-rw-r--r--net/core/dev.c1333
-rw-r--r--net/core/dev_addr_lists.c741
-rw-r--r--net/core/dev_mcast.c232
-rw-r--r--net/core/dst.c45
-rw-r--r--net/core/ethtool.c148
-rw-r--r--net/core/fib_rules.c31
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/flow.c405
-rw-r--r--net/core/net-sysfs.c318
-rw-r--r--net/core/net_namespace.c95
-rw-r--r--net/core/netpoll.c26
-rw-r--r--net/core/pktgen.c58
-rw-r--r--net/core/rtnetlink.c69
-rw-r--r--net/core/skbuff.c31
-rw-r--r--net/core/sock.c71
-rw-r--r--net/core/stream.c22
-rw-r--r--net/core/sysctl_net_core.c68
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c7
-rw-r--r--net/dccp/output.c18
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/decnet/af_decnet.c32
-rw-r--r--net/decnet/dn_dev.c12
-rw-r--r--net/decnet/dn_neigh.c9
-rw-r--r--net/decnet/dn_nsp_in.c3
-rw-r--r--net/decnet/dn_route.c28
-rw-r--r--net/decnet/dn_rules.c22
-rw-r--r--net/dsa/slave.c14
-rw-r--r--net/ethernet/eth.c4
-rw-r--r--net/ipv4/Kconfig22
-rw-r--r--net/ipv4/af_inet.c49
-rw-r--r--net/ipv4/arp.c6
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_rules.c22
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ip_forward.c4
-rw-r--r--net/ipv4/ip_input.c4
-rw-r--r--net/ipv4/ip_output.c26
-rw-r--r--net/ipv4/ip_sockglue.c20
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipmr.c917
-rw-r--r--net/ipv4/netfilter/arp_tables.c26
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c3
-rw-r--r--net/ipv4/netfilter/ip_tables.c127
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c75
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c21
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c17
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c16
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c14
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c14
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c17
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c45
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c24
-rw-r--r--net/ipv4/netfilter/ipt_ah.c24
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c15
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c1
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c137
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c14
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv4/udp.c37
-rw-r--r--net/ipv4/xfrm4_input.c2
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv4/xfrm4_policy.c22
-rw-r--r--net/ipv6/addrconf.c805
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/datagram.c116
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/icmp.c7
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_input.c4
-rw-r--r--net/ipv6/ip6_output.c101
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c86
-rw-r--r--net/ipv6/mcast.c143
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/netfilter.c25
-rw-r--r--net/ipv6/netfilter/ip6_queue.c3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c109
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c18
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c30
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c12
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c12
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c27
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c6
-rw-r--r--net/ipv6/netfilter/ip6t_mh.c15
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/raw.c18
-rw-r--r--net/ipv6/tcp_ipv6.c64
-rw-r--r--net/ipv6/udp.c41
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/ipv6/xfrm6_policy.c31
-rw-r--r--net/irda/af_irda.c14
-rw-r--r--net/irda/ircomm/ircomm_param.c2
-rw-r--r--net/iucv/af_iucv.c21
-rw-r--r--net/l2tp/Kconfig107
-rw-r--r--net/l2tp/Makefile12
-rw-r--r--net/l2tp/l2tp_core.c1666
-rw-r--r--net/l2tp/l2tp_core.h304
-rw-r--r--net/l2tp/l2tp_debugfs.c341
-rw-r--r--net/l2tp/l2tp_eth.c334
-rw-r--r--net/l2tp/l2tp_ip.c679
-rw-r--r--net/l2tp/l2tp_netlink.c840
-rw-r--r--net/l2tp/l2tp_ppp.c1837
-rw-r--r--net/llc/af_llc.c12
-rw-r--r--net/llc/llc_core.c6
-rw-r--r--net/llc/llc_sap.c2
-rw-r--r--net/mac80211/Kconfig17
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-rx.c80
-rw-r--r--net/mac80211/agg-tx.c16
-rw-r--r--net/mac80211/cfg.c122
-rw-r--r--net/mac80211/chan.c127
-rw-r--r--net/mac80211/debugfs_netdev.c12
-rw-r--r--net/mac80211/debugfs_sta.c79
-rw-r--r--net/mac80211/driver-ops.h22
-rw-r--r--net/mac80211/driver-trace.h284
-rw-r--r--net/mac80211/ht.c3
-rw-r--r--net/mac80211/ibss.c46
-rw-r--r--net/mac80211/ieee80211_i.h48
-rw-r--r--net/mac80211/iface.c124
-rw-r--r--net/mac80211/key.c1
-rw-r--r--net/mac80211/main.c28
-rw-r--r--net/mac80211/mesh.c4
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c256
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel.h11
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c41
-rw-r--r--net/mac80211/rx.c102
-rw-r--r--net/mac80211/scan.c126
-rw-r--r--net/mac80211/sta_info.c105
-rw-r--r--net/mac80211/sta_info.h12
-rw-r--r--net/mac80211/status.c21
-rw-r--r--net/mac80211/tx.c26
-rw-r--r--net/mac80211/util.c36
-rw-r--r--net/mac80211/work.c35
-rw-r--r--net/netfilter/Kconfig131
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c28
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c16
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_ecache.c12
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c17
-rw-r--r--net/netfilter/nf_conntrack_proto.c8
-rw-r--r--net/netfilter/nf_conntrack_standalone.c7
-rw-r--r--net/netfilter/nf_log.c6
-rw-r--r--net/netfilter/nfnetlink.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/x_tables.c128
-rw-r--r--net/netfilter/xt_CONNMARK.c113
-rw-r--r--net/netfilter/xt_CONNSECMARK.c27
-rw-r--r--net/netfilter/xt_CT.c23
-rw-r--r--net/netfilter/xt_DSCP.c10
-rw-r--r--net/netfilter/xt_HL.c26
-rw-r--r--net/netfilter/xt_LED.c91
-rw-r--r--net/netfilter/xt_MARK.c56
-rw-r--r--net/netfilter/xt_NFLOG.c8
-rw-r--r--net/netfilter/xt_NFQUEUE.c48
-rw-r--r--net/netfilter/xt_RATEEST.c18
-rw-r--r--net/netfilter/xt_SECMARK.c46
-rw-r--r--net/netfilter/xt_TCPMSS.c37
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c3
-rw-r--r--net/netfilter/xt_TEE.c309
-rw-r--r--net/netfilter/xt_TPROXY.c10
-rw-r--r--net/netfilter/xt_cluster.c19
-rw-r--r--net/netfilter/xt_connbytes.c20
-rw-r--r--net/netfilter/xt_connlimit.c18
-rw-r--r--net/netfilter/xt_connmark.c102
-rw-r--r--net/netfilter/xt_conntrack.c17
-rw-r--r--net/netfilter/xt_dccp.c12
-rw-r--r--net/netfilter/xt_dscp.c12
-rw-r--r--net/netfilter/xt_esp.c24
-rw-r--r--net/netfilter/xt_hashlimit.c342
-rw-r--r--net/netfilter/xt_helper.c16
-rw-r--r--net/netfilter/xt_hl.c12
-rw-r--r--net/netfilter/xt_iprange.c1
-rw-r--r--net/netfilter/xt_limit.c13
-rw-r--r--net/netfilter/xt_mac.c21
-rw-r--r--net/netfilter/xt_mark.c35
-rw-r--r--net/netfilter/xt_multiport.c95
-rw-r--r--net/netfilter/xt_osf.c8
-rw-r--r--net/netfilter/xt_physdev.c16
-rw-r--r--net/netfilter/xt_policy.c29
-rw-r--r--net/netfilter/xt_quota.c8
-rw-r--r--net/netfilter/xt_rateest.c8
-rw-r--r--net/netfilter/xt_recent.c185
-rw-r--r--net/netfilter/xt_sctp.c51
-rw-r--r--net/netfilter/xt_socket.c5
-rw-r--r--net/netfilter/xt_state.c48
-rw-r--r--net/netfilter/xt_statistic.c12
-rw-r--r--net/netfilter/xt_string.c66
-rw-r--r--net/netfilter/xt_tcpudp.c24
-rw-r--r--net/netfilter/xt_time.c13
-rw-r--r--net/netfilter/xt_u32.c3
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/netrom/af_netrom.c8
-rw-r--r--net/packet/af_packet.c69
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/pn_dev.c23
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rds/af_rds.c11
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/ib_rdma.c5
-rw-r--r--net/rds/ib_recv.c4
-rw-r--r--net/rds/ib_send.c20
-rw-r--r--net/rds/iw_cm.c4
-rw-r--r--net/rds/iw_recv.c4
-rw-r--r--net/rds/iw_send.c3
-rw-r--r--net/rds/loop.c7
-rw-r--r--net/rds/rdma.c4
-rw-r--r--net/rds/rdma_transport.c5
-rw-r--r--net/rds/rds.h4
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/send.c40
-rw-r--r--net/rds/tcp_recv.c1
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rds/threads.c2
-rw-r--r--net/rfkill/core.c53
-rw-r--r--net/rose/af_rose.c8
-rw-r--r--net/rxrpc/af_rxrpc.c12
-rw-r--r--net/rxrpc/ar-recvmsg.c6
-rw-r--r--net/sched/act_api.c45
-rw-r--r--net/sched/act_ipt.c4
-rw-r--r--net/sched/cls_api.c30
-rw-r--r--net/sched/cls_u32.c4
-rw-r--r--net/sched/sch_api.c112
-rw-r--r--net/sched/sch_generic.c17
-rw-r--r--net/sched/sch_sfq.c10
-rw-r--r--net/sctp/Kconfig12
-rw-r--r--net/sctp/Makefile3
-rw-r--r--net/sctp/associola.c13
-rw-r--r--net/sctp/chunk.c4
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/input.c22
-rw-r--r--net/sctp/ipv6.c27
-rw-r--r--net/sctp/output.c27
-rw-r--r--net/sctp/outqueue.c94
-rw-r--r--net/sctp/probe.c214
-rw-r--r--net/sctp/protocol.c9
-rw-r--r--net/sctp/sm_make_chunk.c24
-rw-r--r--net/sctp/sm_sideeffect.c43
-rw-r--r--net/sctp/socket.c39
-rw-r--r--net/sctp/transport.c63
-rw-r--r--net/socket.c121
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c22
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c697
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c335
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c582
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c155
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c83
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c113
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c404
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c14
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c15
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/cache.c45
-rw-r--r--net/sunrpc/clnt.c19
-rw-r--r--net/sunrpc/sched.c13
-rw-r--r--net/sunrpc/svc_xprt.c6
-rw-r--r--net/sunrpc/svcsock.c39
-rw-r--r--net/sunrpc/xdr.c1
-rw-r--r--net/sunrpc/xprt.c42
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c3
-rw-r--r--net/sunrpc/xprtrdma/transport.c29
-rw-r--r--net/sunrpc/xprtsock.c58
-rw-r--r--net/tipc/bcast.c35
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/link.c19
-rw-r--r--net/tipc/net.c4
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/socket.c26
-rw-r--r--net/tipc/subscr.c15
-rw-r--r--net/unix/af_unix.c25
-rw-r--r--net/unix/garbage.c13
-rw-r--r--net/wimax/op-reset.c2
-rw-r--r--net/wimax/op-rfkill.c2
-rw-r--r--net/wimax/op-state-get.c2
-rw-r--r--net/wireless/chan.c56
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/core.h27
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/mlme.c52
-rw-r--r--net/wireless/nl80211.c328
-rw-r--r--net/wireless/nl80211.h6
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/sme.c36
-rw-r--r--net/wireless/util.c24
-rw-r--r--net/wireless/wext-compat.c15
-rw-r--r--net/wireless/wext-core.c134
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/x25/af_x25.c8
-rw-r--r--net/x25/x25_dev.c36
-rw-r--r--net/xfrm/xfrm_hash.h3
-rw-r--r--net/xfrm/xfrm_policy.c847
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c10
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.lib6
-rwxr-xr-xscripts/checkincludes.pl24
-rwxr-xr-xscripts/checkstack.pl16
-rwxr-xr-xscripts/checkversion.pl23
-rwxr-xr-xscripts/decodecode48
-rw-r--r--scripts/export_report.pl37
-rw-r--r--scripts/gen_initramfs_list.sh3
-rw-r--r--scripts/genksyms/genksyms.c4
-rwxr-xr-xscripts/headerdep.pl3
-rw-r--r--scripts/headers_check.pl11
-rw-r--r--scripts/headers_install.pl19
-rw-r--r--scripts/kallsyms.c6
-rw-r--r--scripts/kconfig/Makefile16
-rw-r--r--scripts/kconfig/expr.c27
-rw-r--r--scripts/kconfig/expr.h5
-rw-r--r--scripts/kconfig/gconf.c113
-rw-r--r--scripts/kconfig/gconf.glade26
-rw-r--r--scripts/kconfig/lkc.h7
-rw-r--r--scripts/kconfig/lkc_proto.h6
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c4
-rw-r--r--scripts/kconfig/lxdialog/menubox.c22
-rw-r--r--scripts/kconfig/mconf.c36
-rw-r--r--scripts/kconfig/menu.c28
-rw-r--r--scripts/kconfig/nconf.c1568
-rw-r--r--scripts/kconfig/nconf.gui.c617
-rw-r--r--scripts/kconfig/nconf.h95
-rw-r--r--scripts/kconfig/symbol.c30
-rw-r--r--scripts/kconfig/util.c4
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped25
-rw-r--r--scripts/kconfig/zconf.y25
-rw-r--r--scripts/markup_oops.pl54
-rwxr-xr-xscripts/mkcompile_h5
-rw-r--r--scripts/mod/file2alias.c43
-rw-r--r--scripts/mod/modpost.c165
-rwxr-xr-xscripts/namespace.pl65
-rw-r--r--scripts/package/builddeb2
-rwxr-xr-xscripts/package/mkspec4
-rw-r--r--scripts/profile2linkerlist.pl8
-rw-r--r--scripts/rt-tester/rt-tester.py2
-rw-r--r--scripts/selinux/genheaders/genheaders.c2
-rwxr-xr-xscripts/show_delta2
-rwxr-xr-xscripts/tags.sh45
-rw-r--r--security/capability.c74
-rw-r--r--security/commoncap.c6
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_audit.c2
-rw-r--r--security/integrity/ima/ima_crypto.c4
-rw-r--r--security/integrity/ima/ima_fs.c38
-rw-r--r--security/integrity/ima/ima_iint.c6
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_main.c2
-rw-r--r--security/integrity/ima/ima_policy.c107
-rw-r--r--security/integrity/ima/ima_queue.c4
-rw-r--r--security/keys/internal.h11
-rw-r--r--security/keys/key.c47
-rw-r--r--security/keys/keyctl.c65
-rw-r--r--security/keys/keyring.c273
-rw-r--r--security/keys/permission.c2
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/keys/request_key.c55
-rw-r--r--security/lsm_audit.c2
-rw-r--r--security/min_addr.c2
-rw-r--r--security/security.c87
-rw-r--r--security/selinux/avc.c3
-rw-r--r--security/selinux/hooks.c66
-rw-r--r--security/selinux/include/initial_sid_to_string.h2
-rw-r--r--security/selinux/include/netlabel.h8
-rw-r--r--security/selinux/netlabel.c14
-rw-r--r--security/selinux/netlink.c1
-rw-r--r--security/selinux/nlmsgtab.c1
-rw-r--r--security/selinux/selinuxfs.c44
-rw-r--r--security/selinux/ss/mls.c2
-rw-r--r--security/selinux/ss/policydb.c6
-rw-r--r--security/selinux/ss/services.c49
-rw-r--r--security/smack/smack_lsm.c11
-rw-r--r--security/tomoyo/common.c63
-rw-r--r--security/tomoyo/common.h27
-rw-r--r--security/tomoyo/domain.c143
-rw-r--r--security/tomoyo/file.c201
-rw-r--r--security/tomoyo/gc.c7
-rw-r--r--security/tomoyo/realpath.c30
-rw-r--r--sound/aoa/core/gpio-pmf.c9
-rw-r--r--sound/atmel/Kconfig2
-rw-r--r--sound/atmel/ac97c.c355
-rw-r--r--sound/core/control.c5
-rw-r--r--sound/core/info.c74
-rw-r--r--sound/core/jack.c71
-rw-r--r--sound/core/oss/mixer_oss.c5
-rw-r--r--sound/core/oss/pcm_oss.c5
-rw-r--r--sound/core/pcm.c3
-rw-r--r--sound/core/pcm_native.c63
-rw-r--r--sound/core/rawmidi.c5
-rw-r--r--sound/core/seq/seq_clientmgr.c6
-rw-r--r--sound/core/sound.c73
-rw-r--r--sound/core/timer.c6
-rw-r--r--sound/drivers/opl4/opl4_proc.c83
-rw-r--r--sound/drivers/pcsp/pcsp.h2
-rw-r--r--sound/drivers/pcsp/pcsp_input.c4
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c12
-rw-r--r--sound/i2c/i2c.c18
-rw-r--r--sound/isa/Kconfig16
-rw-r--r--sound/isa/es1688/es1688.c214
-rw-r--r--sound/isa/es1688/es1688_lib.c47
-rw-r--r--sound/isa/gus/gus_mem_proc.c48
-rw-r--r--sound/isa/gus/gusextreme.c26
-rw-r--r--sound/isa/sb/Makefile2
-rw-r--r--sound/isa/sb/es968.c248
-rw-r--r--sound/pci/Kconfig32
-rw-r--r--sound/pci/Makefile1
-rw-r--r--sound/pci/asihpi/Makefile5
-rw-r--r--sound/pci/asihpi/asihpi.c3002
-rw-r--r--sound/pci/asihpi/hpi.h2001
-rw-r--r--sound/pci/asihpi/hpi6000.c1841
-rw-r--r--sound/pci/asihpi/hpi6000.h70
-rw-r--r--sound/pci/asihpi/hpi6205.c2332
-rw-r--r--sound/pci/asihpi/hpi6205.h93
-rw-r--r--sound/pci/asihpi/hpi_internal.h1641
-rw-r--r--sound/pci/asihpi/hpicmn.c643
-rw-r--r--sound/pci/asihpi/hpicmn.h64
-rw-r--r--sound/pci/asihpi/hpidebug.c225
-rw-r--r--sound/pci/asihpi/hpidebug.h385
-rw-r--r--sound/pci/asihpi/hpidspcd.c172
-rw-r--r--sound/pci/asihpi/hpidspcd.h104
-rw-r--r--sound/pci/asihpi/hpifunc.c3864
-rw-r--r--sound/pci/asihpi/hpimsginit.c130
-rw-r--r--sound/pci/asihpi/hpimsginit.h40
-rw-r--r--sound/pci/asihpi/hpimsgx.c907
-rw-r--r--sound/pci/asihpi/hpimsgx.h36
-rw-r--r--sound/pci/asihpi/hpioctl.c484
-rw-r--r--sound/pci/asihpi/hpioctl.h38
-rw-r--r--sound/pci/asihpi/hpios.c114
-rw-r--r--sound/pci/asihpi/hpios.h178
-rw-r--r--sound/pci/asihpi/hpipcida.h37
-rw-r--r--sound/pci/cs4281.c40
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c19
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c2
-rw-r--r--sound/pci/emu10k1/emuproc.c51
-rw-r--r--sound/pci/es1968.c129
-rw-r--r--sound/pci/hda/hda_codec.c76
-rw-r--r--sound/pci/hda/hda_codec.h3
-rw-r--r--sound/pci/hda/hda_intel.c109
-rw-r--r--sound/pci/hda/hda_local.h2
-rw-r--r--sound/pci/hda/patch_analog.c21
-rw-r--r--sound/pci/hda/patch_conexant.c165
-rw-r--r--sound/pci/hda/patch_realtek.c237
-rw-r--r--sound/pci/hda/patch_sigmatel.c11
-rw-r--r--sound/pci/ice1712/aureon.c89
-rw-r--r--sound/pci/maestro3.c139
-rw-r--r--sound/pci/mixart/mixart.c79
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c12
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.h1
-rw-r--r--sound/pcmcia/vx/vxpocket.c10
-rw-r--r--sound/pcmcia/vx/vxpocket.h1
-rw-r--r--sound/ppc/tumbler.c12
-rw-r--r--sound/soc/atmel/atmel-pcm.c14
-rw-r--r--sound/soc/blackfin/Kconfig9
-rw-r--r--sound/soc/blackfin/Makefile4
-rw-r--r--sound/soc/blackfin/bf5xx-ad193x.c (renamed from sound/soc/blackfin/bf5xx-ad1938.c)66
-rw-r--r--sound/soc/blackfin/bf5xx-sport.h28
-rw-r--r--sound/soc/codecs/Kconfig16
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/ad1836.c2
-rw-r--r--sound/soc/codecs/ad1938.c522
-rw-r--r--sound/soc/codecs/ad1938.h100
-rw-r--r--sound/soc/codecs/ad193x.c547
-rw-r--r--sound/soc/codecs/ad193x.h81
-rw-r--r--sound/soc/codecs/ak4104.c2
-rw-r--r--sound/soc/codecs/ak4535.c11
-rw-r--r--sound/soc/codecs/ak4642.c177
-rw-r--r--sound/soc/codecs/ak4671.c2
-rw-r--r--sound/soc/codecs/cq93vc.c299
-rw-r--r--sound/soc/codecs/cq93vc.h29
-rw-r--r--sound/soc/codecs/cs4270.c20
-rw-r--r--sound/soc/codecs/cx20442.c2
-rw-r--r--sound/soc/codecs/da7210.c157
-rw-r--r--sound/soc/codecs/ssm2602.c17
-rw-r--r--sound/soc/codecs/stac9766.c5
-rw-r--r--sound/soc/codecs/tlv320aic23.c1
-rw-r--r--sound/soc/codecs/tlv320aic26.c14
-rw-r--r--sound/soc/codecs/tlv320aic3x.c118
-rw-r--r--sound/soc/codecs/tlv320dac33.c540
-rw-r--r--sound/soc/codecs/tpa6130a2.c47
-rw-r--r--sound/soc/codecs/twl4030.c191
-rw-r--r--sound/soc/codecs/twl6040.c1227
-rw-r--r--sound/soc/codecs/twl6040.h141
-rw-r--r--sound/soc/codecs/uda134x.c29
-rw-r--r--sound/soc/codecs/uda1380.c5
-rw-r--r--sound/soc/codecs/wm8350.c103
-rw-r--r--sound/soc/codecs/wm8350.h3
-rw-r--r--sound/soc/codecs/wm8400.c16
-rw-r--r--sound/soc/codecs/wm8510.c2
-rw-r--r--sound/soc/codecs/wm8523.c10
-rw-r--r--sound/soc/codecs/wm8580.c4
-rw-r--r--sound/soc/codecs/wm8711.c8
-rw-r--r--sound/soc/codecs/wm8728.c2
-rw-r--r--sound/soc/codecs/wm8731.c66
-rw-r--r--sound/soc/codecs/wm8750.c347
-rw-r--r--sound/soc/codecs/wm8753.c8
-rw-r--r--sound/soc/codecs/wm8776.c6
-rw-r--r--sound/soc/codecs/wm8900.c10
-rw-r--r--sound/soc/codecs/wm8903.c217
-rw-r--r--sound/soc/codecs/wm8903.h221
-rw-r--r--sound/soc/codecs/wm8904.c59
-rw-r--r--sound/soc/codecs/wm8904.h97
-rw-r--r--sound/soc/codecs/wm8940.c5
-rw-r--r--sound/soc/codecs/wm8955.c16
-rw-r--r--sound/soc/codecs/wm8960.c215
-rw-r--r--sound/soc/codecs/wm8960.h11
-rw-r--r--sound/soc/codecs/wm8961.c6
-rw-r--r--sound/soc/codecs/wm8971.c12
-rw-r--r--sound/soc/codecs/wm8974.c6
-rw-r--r--sound/soc/codecs/wm8978.c12
-rw-r--r--sound/soc/codecs/wm8988.c8
-rw-r--r--sound/soc/codecs/wm8990.c8
-rw-r--r--sound/soc/codecs/wm8993.c24
-rw-r--r--sound/soc/codecs/wm8994.c264
-rw-r--r--sound/soc/codecs/wm8994.h8
-rw-r--r--sound/soc/codecs/wm9081.c18
-rw-r--r--sound/soc/codecs/wm9090.c773
-rw-r--r--sound/soc/codecs/wm9090.h715
-rw-r--r--sound/soc/codecs/wm9712.c3
-rw-r--r--sound/soc/codecs/wm9713.c16
-rw-r--r--sound/soc/codecs/wm_hubs.c18
-rw-r--r--sound/soc/davinci/Kconfig27
-rw-r--r--sound/soc/davinci/Makefile2
-rw-r--r--sound/soc/davinci/davinci-evm.c61
-rw-r--r--sound/soc/davinci/davinci-vcif.c274
-rw-r--r--sound/soc/davinci/davinci-vcif.h28
-rw-r--r--sound/soc/imx/Kconfig8
-rw-r--r--sound/soc/imx/Makefile3
-rw-r--r--sound/soc/imx/wm1133-ev1.c308
-rw-r--r--sound/soc/omap/Kconfig10
-rw-r--r--sound/soc/omap/Makefile2
-rw-r--r--sound/soc/omap/mcpdm.c548
-rw-r--r--sound/soc/omap/omap-mcbsp.c38
-rw-r--r--sound/soc/omap/omap3pandora.c2
-rw-r--r--sound/soc/omap/rx51.c294
-rw-r--r--sound/soc/omap/zoom2.c3
-rw-r--r--sound/soc/pxa/Kconfig8
-rw-r--r--sound/soc/pxa/Makefile2
-rw-r--r--sound/soc/pxa/spitz.c43
-rw-r--r--sound/soc/pxa/z2.c246
-rw-r--r--sound/soc/s3c24xx/Kconfig12
-rw-r--r--sound/soc/s3c24xx/Makefile2
-rw-r--r--sound/soc/s3c24xx/jive_wm8750.c7
-rw-r--r--sound/soc/s3c24xx/neo1973_gta02_wm8753.c8
-rw-r--r--sound/soc/s3c24xx/regs-i2s-v2.h (renamed from arch/arm/plat-samsung/include/plat/regs-s3c2412-iis.h)51
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.c205
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.h15
-rw-r--r--sound/soc/s3c24xx/s3c2412-i2s.c77
-rw-r--r--sound/soc/s3c24xx/s3c2412-i2s.h6
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s-v4.c209
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.c69
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.h18
-rw-r--r--sound/soc/s3c24xx/smdk64xx_wm8580.c6
-rw-r--r--sound/soc/sh/Kconfig3
-rw-r--r--sound/soc/sh/fsi-ak4642.c14
-rw-r--r--sound/soc/sh/fsi.c191
-rw-r--r--sound/soc/soc-cache.c134
-rw-r--r--sound/soc/soc-core.c255
-rw-r--r--sound/soc/soc-dapm.c217
-rw-r--r--sound/soc/soc-jack.c43
-rw-r--r--sound/soc/txx9/txx9aclc.c6
-rw-r--r--sound/usb/Kconfig4
-rw-r--r--sound/usb/Makefile26
-rw-r--r--sound/usb/caiaq/control.c99
-rw-r--r--sound/usb/caiaq/device.c8
-rw-r--r--sound/usb/caiaq/device.h24
-rw-r--r--sound/usb/caiaq/input.c163
-rw-r--r--sound/usb/card.c652
-rw-r--r--sound/usb/card.h105
-rw-r--r--sound/usb/debug.h15
-rw-r--r--sound/usb/endpoint.c362
-rw-r--r--sound/usb/endpoint.h11
-rw-r--r--sound/usb/format.c432
-rw-r--r--sound/usb/format.h8
-rw-r--r--sound/usb/helper.c113
-rw-r--r--sound/usb/helper.h32
-rw-r--r--sound/usb/midi.c (renamed from sound/usb/usbmidi.c)3
-rw-r--r--sound/usb/midi.h48
-rw-r--r--sound/usb/misc/Makefile2
-rw-r--r--sound/usb/misc/ua101.c (renamed from sound/usb/ua101.c)3
-rw-r--r--sound/usb/mixer.c (renamed from sound/usb/usbmixer.c)933
-rw-r--r--sound/usb/mixer.h55
-rw-r--r--sound/usb/mixer_maps.c (renamed from sound/usb/usbmixer_maps.c)4
-rw-r--r--sound/usb/mixer_quirks.c412
-rw-r--r--sound/usb/mixer_quirks.h13
-rw-r--r--sound/usb/pcm.c935
-rw-r--r--sound/usb/pcm.h14
-rw-r--r--sound/usb/proc.c168
-rw-r--r--sound/usb/proc.h8
-rw-r--r--sound/usb/quirks-table.h (renamed from sound/usb/usbquirks.h)68
-rw-r--r--sound/usb/quirks.c594
-rw-r--r--sound/usb/quirks.h23
-rw-r--r--sound/usb/urb.c995
-rw-r--r--sound/usb/urb.h21
-rw-r--r--sound/usb/usbaudio.c4050
-rw-r--r--sound/usb/usbaudio.h93
-rw-r--r--sound/usb/usx2y/us122l.c1
-rw-r--r--sound/usb/usx2y/usbusx2y.h1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt2
-rw-r--r--tools/perf/Documentation/perf-bench.txt8
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt4
-rw-r--r--tools/perf/Documentation/perf-diff.txt2
-rw-r--r--tools/perf/Documentation/perf-inject.txt35
-rw-r--r--tools/perf/Documentation/perf-kmem.txt2
-rw-r--r--tools/perf/Documentation/perf-kvm.txt68
-rw-r--r--tools/perf/Documentation/perf-list.txt33
-rw-r--r--tools/perf/Documentation/perf-probe.txt21
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-sched.txt4
-rw-r--r--tools/perf/Documentation/perf-test.txt22
-rw-r--r--tools/perf/Documentation/perf-trace-perl.txt6
-rw-r--r--tools/perf/Documentation/perf-trace-python.txt12
-rw-r--r--tools/perf/Documentation/perf-trace.txt2
-rw-r--r--tools/perf/Makefile376
-rw-r--r--tools/perf/arch/powerpc/Makefile4
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c88
-rw-r--r--tools/perf/arch/x86/Makefile4
-rw-r--r--tools/perf/arch/x86/util/dwarf-regs.c75
-rw-r--r--tools/perf/bench/mem-memcpy.c3
-rw-r--r--tools/perf/bench/sched-messaging.c4
-rw-r--r--tools/perf/bench/sched-pipe.c2
-rw-r--r--tools/perf/builtin-annotate.c79
-rw-r--r--tools/perf/builtin-buildid-cache.c2
-rw-r--r--tools/perf/builtin-buildid-list.c8
-rw-r--r--tools/perf/builtin-diff.c74
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-inject.c228
-rw-r--r--tools/perf/builtin-kmem.c22
-rw-r--r--tools/perf/builtin-kvm.c144
-rw-r--r--tools/perf/builtin-lock.c694
-rw-r--r--tools/perf/builtin-probe.c286
-rw-r--r--tools/perf/builtin-record.c530
-rw-r--r--tools/perf/builtin-report.c190
-rw-r--r--tools/perf/builtin-sched.c18
-rw-r--r--tools/perf/builtin-stat.c155
-rw-r--r--tools/perf/builtin-test.c281
-rw-r--r--tools/perf/builtin-timechart.c119
-rw-r--r--tools/perf/builtin-top.c291
-rw-r--r--tools/perf/builtin-trace.c97
-rw-r--r--tools/perf/builtin.h3
-rw-r--r--tools/perf/command-list.txt3
-rw-r--r--tools/perf/perf-archive.sh3
-rw-r--r--tools/perf/perf.c8
-rw-r--r--tools/perf/perf.h8
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm6
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-report8
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-record3
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-report8
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-report2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-record2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-report23
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-record2
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-report2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-record2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-report2
-rw-r--r--tools/perf/scripts/perl/failed-syscalls.pl6
-rw-r--r--tools/perf/scripts/perl/rw-by-pid.pl60
-rw-r--r--tools/perf/scripts/perl/rwtop.pl199
-rw-r--r--tools/perf/scripts/perl/wakeup-latency.pl12
-rw-r--r--tools/perf/scripts/perl/workqueue-stats.pl12
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py3
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-report8
-rw-r--r--tools/perf/scripts/python/bin/sctop-record2
-rw-r--r--tools/perf/scripts/python/bin/sctop-report24
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-report8
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-report8
-rw-r--r--tools/perf/scripts/python/sctop.py78
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN6
-rw-r--r--tools/perf/util/bitmap.c21
-rw-r--r--tools/perf/util/build-id.c2
-rw-r--r--tools/perf/util/cache.h14
-rw-r--r--tools/perf/util/callchain.c121
-rw-r--r--tools/perf/util/callchain.h10
-rw-r--r--tools/perf/util/color.c48
-rw-r--r--tools/perf/util/color.h4
-rw-r--r--tools/perf/util/debug.c8
-rw-r--r--tools/perf/util/debug.h30
-rw-r--r--tools/perf/util/event.c351
-rw-r--r--tools/perf/util/event.h64
-rw-r--r--tools/perf/util/header.c489
-rw-r--r--tools/perf/util/header.h39
-rw-r--r--tools/perf/util/hist.c318
-rw-r--r--tools/perf/util/hist.h40
-rw-r--r--tools/perf/util/hweight.c31
-rw-r--r--tools/perf/util/include/asm/bitops.h18
-rw-r--r--tools/perf/util/include/asm/hweight.h8
-rw-r--r--tools/perf/util/include/dwarf-regs.h8
-rw-r--r--tools/perf/util/include/linux/bitmap.h38
-rw-r--r--tools/perf/util/include/linux/bitops.h20
-rw-r--r--tools/perf/util/include/linux/compiler.h2
-rw-r--r--tools/perf/util/include/linux/kernel.h9
-rw-r--r--tools/perf/util/map.c409
-rw-r--r--tools/perf/util/map.h131
-rw-r--r--tools/perf/util/newt.c725
-rw-r--r--tools/perf/util/parse-events.c44
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/parse-options.c7
-rw-r--r--tools/perf/util/parse-options.h4
-rw-r--r--tools/perf/util/probe-event.c1580
-rw-r--r--tools/perf/util/probe-event.h130
-rw-r--r--tools/perf/util/probe-finder.c1046
-rw-r--r--tools/perf/util/probe-finder.h67
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
-rw-r--r--tools/perf/util/session.c550
-rw-r--r--tools/perf/util/session.h133
-rw-r--r--tools/perf/util/sort.c145
-rw-r--r--tools/perf/util/sort.h30
-rw-r--r--tools/perf/util/string.c45
-rw-r--r--tools/perf/util/string.h18
-rw-r--r--tools/perf/util/symbol.c564
-rw-r--r--tools/perf/util/symbol.h80
-rw-r--r--tools/perf/util/thread.c242
-rw-r--r--tools/perf/util/thread.h53
-rw-r--r--tools/perf/util/trace-event-info.c35
-rw-r--r--tools/perf/util/trace-event-parse.c120
-rw-r--r--tools/perf/util/trace-event-read.c116
-rw-r--r--tools/perf/util/trace-event.h8
-rw-r--r--tools/perf/util/util.h21
-rw-r--r--virt/kvm/assigned-dev.c8
-rw-r--r--virt/kvm/coalesced_mmio.c6
-rw-r--r--virt/kvm/ioapic.c30
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/iommu.c4
-rw-r--r--virt/kvm/kvm_main.c63
4906 files changed, 349372 insertions, 125226 deletions
diff --git a/.gitignore b/.gitignore
index a2939fc10b22..8faa6c02b39e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,6 +28,7 @@ modules.builtin
*.gz
*.bz2
*.lzma
+*.lzo
*.patch
*.gcno
diff --git a/Documentation/.gitignore b/Documentation/.gitignore
new file mode 100644
index 000000000000..bcd907b4141f
--- /dev/null
+++ b/Documentation/.gitignore
@@ -0,0 +1,7 @@
+filesystems/dnotify_test
+laptops/dslm
+timers/hpet_example
+vm/hugepage-mmap
+vm/hugepage-shm
+vm/map_hugetlb
+
diff --git a/Documentation/ABI/obsolete/sysfs-class-rfkill b/Documentation/ABI/obsolete/sysfs-class-rfkill
new file mode 100644
index 000000000000..4201d5b05515
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-class-rfkill
@@ -0,0 +1,29 @@
+rfkill - radio frequency (RF) connector kill switch support
+
+For details to this subsystem look at Documentation/rfkill.txt.
+
+What: /sys/class/rfkill/rfkill[0-9]+/state
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: Current state of the transmitter.
+ This file is deprecated and sheduled to be removed in 2014,
+ because its not possible to express the 'soft and hard block'
+ state of the rfkill driver.
+Values: A numeric value.
+ 0: RFKILL_STATE_SOFT_BLOCKED
+ transmitter is turned off by software
+ 1: RFKILL_STATE_UNBLOCKED
+ transmitter is (potentially) active
+ 2: RFKILL_STATE_HARD_BLOCKED
+ transmitter is forced off by something outside of
+ the driver's control.
+
+What: /sys/class/rfkill/rfkill[0-9]+/claim
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: This file is deprecated because there no longer is a way to
+ claim just control over a single rfkill instance.
+ This file is scheduled to be removed in 2012.
+Values: 0: Kernel handles events
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill
new file mode 100644
index 000000000000..097f522c33bb
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-class-rfkill
@@ -0,0 +1,67 @@
+rfkill - radio frequency (RF) connector kill switch support
+
+For details to this subsystem look at Documentation/rfkill.txt.
+
+For the deprecated /sys/class/rfkill/*/state and
+/sys/class/rfkill/*/claim knobs of this interface look in
+Documentation/ABI/obsolete/sysfs-class-rfkill.
+
+What: /sys/class/rfkill
+Date: 09-Jul-2007
+KernelVersion: v2.6.22
+Contact: linux-wireless@vger.kernel.org,
+Description: The rfkill class subsystem folder.
+ Each registered rfkill driver is represented by an rfkillX
+ subfolder (X being an integer > 0).
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/name
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: Name assigned by driver to this key (interface or driver name).
+Values: arbitrary string.
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/type
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: Driver type string ("wlan", "bluetooth", etc).
+Values: See include/linux/rfkill.h.
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/persistent
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: Whether the soft blocked state is initialised from non-volatile
+ storage at startup.
+Values: A numeric value.
+ 0: false
+ 1: true
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/hard
+Date: 12-March-2010
+KernelVersion v2.6.34
+Contact: linux-wireless@vger.kernel.org
+Description: Current hardblock state. This file is read only.
+Values: A numeric value.
+ 0: inactive
+ The transmitter is (potentially) active.
+ 1: active
+ The transmitter is forced off by something outside of
+ the driver's control.
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/soft
+Date: 12-March-2010
+KernelVersion v2.6.34
+Contact: linux-wireless@vger.kernel.org
+Description: Current softblock state. This file is read and write.
+Values: A numeric value.
+ 0: inactive
+ The transmitter is (potentially) active.
+ 1: active
+ The transmitter is turned off by software.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 25be3250f7d6..428676cfa61e 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -133,6 +133,46 @@ Description:
The symbolic link points to the PCI device sysfs entry of the
Physical Function this device associates with.
+
+What: /sys/bus/pci/slots/...
+Date: April 2005 (possibly older)
+KernelVersion: 2.6.12 (possibly older)
+Contact: linux-pci@vger.kernel.org
+Description:
+ When the appropriate driver is loaded, it will create a
+ directory per claimed physical PCI slot in
+ /sys/bus/pci/slots/. The names of these directories are
+ specific to the driver, which in turn, are specific to the
+ platform, but in general, should match the label on the
+ machine's physical chassis.
+
+ The drivers that can create slot directories include the
+ PCI hotplug drivers, and as of 2.6.27, the pci_slot driver.
+
+ The slot directories contain, at a minimum, a file named
+ 'address' which contains the PCI bus:device:function tuple.
+ Other files may appear as well, but are specific to the
+ driver.
+
+What: /sys/bus/pci/slots/.../function[0-7]
+Date: March 2010
+KernelVersion: 2.6.35
+Contact: linux-pci@vger.kernel.org
+Description:
+ If PCI slot directories (as described above) are created,
+ and the physical slot is actually populated with a device,
+ symbolic links in the slot directory pointing to the
+ device's PCI functions are created as well.
+
+What: /sys/bus/pci/devices/.../slot
+Date: March 2010
+KernelVersion: 2.6.35
+Contact: linux-pci@vger.kernel.org
+Description:
+ If PCI slot directories (as described above) are created,
+ a symbolic link pointing to the slot directory will be
+ created as well.
+
What: /sys/bus/pci/slots/.../module
Date: June 2009
Contact: linux-pci@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
index bf1627b02a03..aba7d989208c 100644
--- a/Documentation/ABI/testing/sysfs-devices-memory
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -43,7 +43,7 @@ Date: September 2008
Contact: Badari Pulavarty <pbadari@us.ibm.com>
Description:
The file /sys/devices/system/memory/memoryX/state
- is read-write. When read, it's contents show the
+ is read-write. When read, its contents show the
online/offline state of the memory section. When written,
root can toggle the the online/offline state of a removable
memory section (see removable file description above)
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-picolcd b/Documentation/ABI/testing/sysfs-driver-hid-picolcd
new file mode 100644
index 000000000000..08579e7e1e89
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-picolcd
@@ -0,0 +1,43 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/operation_mode
+Date: March 2010
+Contact: Bruno Prémont <bonbons@linux-vserver.org>
+Description: Make it possible to switch the PicoLCD device between LCD
+ (firmware) and bootloader (flasher) operation modes.
+
+ Reading: returns list of available modes, the active mode being
+ enclosed in brackets ('[' and ']')
+
+ Writing: causes operation mode switch. Permitted values are
+ the non-active mode names listed when read.
+
+ Note: when switching mode the current PicoLCD HID device gets
+ disconnected and reconnects after above delay (see attribute
+ operation_mode_delay for its value).
+
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/operation_mode_delay
+Date: April 2010
+Contact: Bruno Prémont <bonbons@linux-vserver.org>
+Description: Delay PicoLCD waits before restarting in new mode when
+ operation_mode has changed.
+
+ Reading/Writing: It is expressed in ms and permitted range is
+ 0..30000ms.
+
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/fb_update_rate
+Date: March 2010
+Contact: Bruno Prémont <bonbons@linux-vserver.org>
+Description: Make it possible to adjust defio refresh rate.
+
+ Reading: returns list of available refresh rates (expressed in Hz),
+ the active refresh rate being enclosed in brackets ('[' and ']')
+
+ Writing: accepts new refresh rate expressed in integer Hz
+ within permitted rates.
+
+ Note: As device can barely do 2 complete refreshes a second
+ it only makes sense to adjust this value if only one or two
+ tiles get changed and it's not appropriate to expect the application
+ to flush it's tiny changes explicitely at higher than default rate.
+
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
new file mode 100644
index 000000000000..88340a23ce91
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
@@ -0,0 +1,111 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_dpi
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: It is possible to switch the dpi setting of the mouse with the
+ press of a button.
+ When read, this file returns the raw number of the actual dpi
+ setting reported by the mouse. This number has to be further
+ processed to receive the real dpi value.
+
+ VALUE DPI
+ 1 800
+ 2 1200
+ 3 1600
+ 4 2000
+ 5 2400
+ 6 3200
+
+ This file is readonly.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_profile
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the number of the actual profile.
+ This file is readonly.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/firmware_version
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the raw integer version number of the
+ firmware reported by the mouse. Using the integer value eases
+ further usage in other programs. To receive the real version
+ number the decimal point has to be shifted 2 positions to the
+ left. E.g. a returned value of 138 means 1.38
+ This file is readonly.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/kone_driver_version
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the driver version.
+ The format of the string is "v<major>.<minor>.<patchlevel>".
+ This attribute is used by the userland tools to find the sysfs-
+ paths of installed kone-mice and determine the capabilites of
+ the driver. Versions of this driver for old kernels replace
+ usbhid instead of generic-usb. The way to scan for this file
+ has been chosen to provide a consistent way for all supported
+ kernel versions.
+ This file is readonly.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile[1-5]
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile holds informations like button
+ mappings, sensitivity, the colors of the 5 leds and light
+ effects.
+ When read, these files return the respective profile. The
+ returned data is 975 bytes in size.
+ When written, this file lets one write the respective profile
+ data back to the mouse. The data has to be 975 bytes long.
+ The mouse will reject invalid data, whereas the profile number
+ stored in the profile doesn't need to fit the number of the
+ store.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/settings
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the settings stored in the mouse.
+ The size of the data is 36 bytes and holds information like the
+ startup_profile, tcu state and calibration_data.
+ When written, this file lets write settings back to the mouse.
+ The data has to be 36 bytes long. The mouse will reject invalid
+ data.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/startup_profile
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1 to 5.
+ When read, this attribute returns the number of the profile
+ that's active when the mouse is powered on.
+ When written, this file sets the number of the startup profile
+ and the mouse activates this profile immediately.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/tcu
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse has a "Tracking Control Unit" which lets the user
+ calibrate the laser power to fit the mousepad surface.
+ When read, this file returns the current state of the TCU,
+ where 0 means off and 1 means on.
+ Writing 0 in this file will switch the TCU off.
+ Writing 1 in this file will start the calibration which takes
+ around 6 seconds to complete and activates the TCU.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/weight
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can be equipped with one of four supplied weights
+ ranging from 5 to 20 grams which are recognized by the mouse
+ and its value can be read out. When read, this file returns the
+ raw value returned by the mouse which eases further processing
+ in other software.
+ The values map to the weights as follows:
+
+ VALUE WEIGHT
+ 0 none
+ 1 5g
+ 2 10g
+ 3 15g
+ 4 20g
+
+ This file is readonly.
diff --git a/Documentation/ABI/testing/sysfs-wacom b/Documentation/ABI/testing/sysfs-wacom
new file mode 100644
index 000000000000..1517976e25c4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-wacom
@@ -0,0 +1,10 @@
+What: /sys/class/hidraw/hidraw*/device/speed
+Date: April 2010
+Kernel Version: 2.6.35
+Contact: linux-bluetooth@vger.kernel.org
+Description:
+ The /sys/class/hidraw/hidraw*/device/speed file controls
+ reporting speed of wacom bluetooth tablet. Reading from
+ this file returns 1 if tablet reports in high speed mode
+ or 0 otherwise. Writing to this file one of these values
+ switches reporting speed.
diff --git a/Documentation/Changes b/Documentation/Changes
index f08b313cd235..eca9f6e6fbe6 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -49,7 +49,7 @@ o oprofile 0.9 # oprofiled --version
o udev 081 # udevinfo -V
o grub 0.93 # grub --version
o mcelog 0.6
-o iptables 1.4.1 # iptables -V
+o iptables 1.4.2 # iptables -V
Kernel compilation
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 52618ab069ad..2e435adfbd6b 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -742,7 +742,7 @@ failure can be determined by:
Closing
-This document, and the API itself, would not be in it's current
+This document, and the API itself, would not be in its current
form without the feedback and suggestions from numerous individuals.
We would like to specifically mention, in no particular order, the
following people:
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 325cfd1d6d99..c7e5dc7e8cb3 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
mac80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
- tracepoint.xml media.xml
+ tracepoint.xml media.xml drm.xml
###
# The build process is as follows (targets):
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
new file mode 100644
index 000000000000..7583dc7cf64d
--- /dev/null
+++ b/Documentation/DocBook/drm.tmpl
@@ -0,0 +1,839 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="drmDevelopersGuide">
+ <bookinfo>
+ <title>Linux DRM Developer's Guide</title>
+
+ <copyright>
+ <year>2008-2009</year>
+ <holder>
+ Intel Corporation (Jesse Barnes &lt;jesse.barnes@intel.com&gt;)
+ </holder>
+ </copyright>
+
+ <legalnotice>
+ <para>
+ The contents of this file may be used under the terms of the GNU
+ General Public License version 2 (the "GPL") as distributed in
+ the kernel source COPYING file.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <!-- Introduction -->
+
+ <chapter id="drmIntroduction">
+ <title>Introduction</title>
+ <para>
+ The Linux DRM layer contains code intended to support the needs
+ of complex graphics devices, usually containing programmable
+ pipelines well suited to 3D graphics acceleration. Graphics
+ drivers in the kernel can make use of DRM functions to make
+ tasks like memory management, interrupt handling and DMA easier,
+ and provide a uniform interface to applications.
+ </para>
+ <para>
+ A note on versions: this guide covers features found in the DRM
+ tree, including the TTM memory manager, output configuration and
+ mode setting, and the new vblank internals, in addition to all
+ the regular features found in current kernels.
+ </para>
+ <para>
+ [Insert diagram of typical DRM stack here]
+ </para>
+ </chapter>
+
+ <!-- Internals -->
+
+ <chapter id="drmInternals">
+ <title>DRM Internals</title>
+ <para>
+ This chapter documents DRM internals relevant to driver authors
+ and developers working to add support for the latest features to
+ existing drivers.
+ </para>
+ <para>
+ First, we'll go over some typical driver initialization
+ requirements, like setting up command buffers, creating an
+ initial output configuration, and initializing core services.
+ Subsequent sections will cover core internals in more detail,
+ providing implementation notes and examples.
+ </para>
+ <para>
+ The DRM layer provides several services to graphics drivers,
+ many of them driven by the application interfaces it provides
+ through libdrm, the library that wraps most of the DRM ioctls.
+ These include vblank event handling, memory
+ management, output management, framebuffer management, command
+ submission &amp; fencing, suspend/resume support, and DMA
+ services.
+ </para>
+ <para>
+ The core of every DRM driver is struct drm_device. Drivers
+ will typically statically initialize a drm_device structure,
+ then pass it to drm_init() at load time.
+ </para>
+
+ <!-- Internals: driver init -->
+
+ <sect1>
+ <title>Driver initialization</title>
+ <para>
+ Before calling the DRM initialization routines, the driver must
+ first create and fill out a struct drm_device structure.
+ </para>
+ <programlisting>
+ static struct drm_driver driver = {
+ /* don't use mtrr's here, the Xserver or user space app should
+ * deal with them for intel hardware.
+ */
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
+ .load = i915_driver_load,
+ .unload = i915_driver_unload,
+ .firstopen = i915_driver_firstopen,
+ .lastclose = i915_driver_lastclose,
+ .preclose = i915_driver_preclose,
+ .save = i915_save,
+ .restore = i915_restore,
+ .device_is_agp = i915_driver_device_is_agp,
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915_enable_vblank,
+ .disable_vblank = i915_disable_vblank,
+ .irq_preinstall = i915_driver_irq_preinstall,
+ .irq_postinstall = i915_driver_irq_postinstall,
+ .irq_uninstall = i915_driver_irq_uninstall,
+ .irq_handler = i915_driver_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .fb_probe = intelfb_probe,
+ .fb_remove = intelfb_remove,
+ .fb_resize = intelfb_resize,
+ .master_create = i915_master_create,
+ .master_destroy = i915_master_destroy,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = i915_debugfs_init,
+ .debugfs_cleanup = i915_debugfs_cleanup,
+#endif
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
+ .gem_vm_ops = &amp;i915_gem_vm_ops,
+ .ioctls = i915_ioctls,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+#endif
+ },
+ .pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = probe,
+ .remove = __devexit_p(drm_cleanup_pci),
+ },
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+ };
+ </programlisting>
+ <para>
+ In the example above, taken from the i915 DRM driver, the driver
+ sets several flags indicating what core features it supports.
+ We'll go over the individual callbacks in later sections. Since
+ flags indicate which features your driver supports to the DRM
+ core, you need to set most of them prior to calling drm_init(). Some,
+ like DRIVER_MODESET can be set later based on user supplied parameters,
+ but that's the exception rather than the rule.
+ </para>
+ <variablelist>
+ <title>Driver flags</title>
+ <varlistentry>
+ <term>DRIVER_USE_AGP</term>
+ <listitem><para>
+ Driver uses AGP interface
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_REQUIRE_AGP</term>
+ <listitem><para>
+ Driver needs AGP interface to function.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_USE_MTRR</term>
+ <listitem>
+ <para>
+ Driver uses MTRR interface for mapping memory. Deprecated.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_PCI_DMA</term>
+ <listitem><para>
+ Driver is capable of PCI DMA. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_SG</term>
+ <listitem><para>
+ Driver can perform scatter/gather DMA. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_HAVE_DMA</term>
+ <listitem><para>Driver supports DMA. Deprecated.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
+ <listitem>
+ <para>
+ DRIVER_HAVE_IRQ indicates whether the driver has a IRQ
+ handler, DRIVER_IRQ_SHARED indicates whether the device &amp;
+ handler support shared IRQs (note that this is required of
+ PCI drivers).
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_DMA_QUEUE</term>
+ <listitem>
+ <para>
+ If the driver queues DMA requests and completes them
+ asynchronously, this flag should be set. Deprecated.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_FB_DMA</term>
+ <listitem>
+ <para>
+ Driver supports DMA to/from the framebuffer. Deprecated.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_MODESET</term>
+ <listitem>
+ <para>
+ Driver supports mode setting interfaces.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ In this specific case, the driver requires AGP and supports
+ IRQs. DMA, as we'll see, is handled by device specific ioctls
+ in this case. It also supports the kernel mode setting APIs, though
+ unlike in the actual i915 driver source, this example unconditionally
+ exports KMS capability.
+ </para>
+ </sect1>
+
+ <!-- Internals: driver load -->
+
+ <sect1>
+ <title>Driver load</title>
+ <para>
+ In the previous section, we saw what a typical drm_driver
+ structure might look like. One of the more important fields in
+ the structure is the hook for the load function.
+ </para>
+ <programlisting>
+ static struct drm_driver driver = {
+ ...
+ .load = i915_driver_load,
+ ...
+ };
+ </programlisting>
+ <para>
+ The load function has many responsibilities: allocating a driver
+ private structure, specifying supported performance counters,
+ configuring the device (e.g. mapping registers &amp; command
+ buffers), initializing the memory manager, and setting up the
+ initial output configuration.
+ </para>
+ <para>
+ Note that the tasks performed at driver load time must not
+ conflict with DRM client requirements. For instance, if user
+ level mode setting drivers are in use, it would be problematic
+ to perform output discovery &amp; configuration at load time.
+ Likewise, if pre-memory management aware user level drivers are
+ in use, memory management and command buffer setup may need to
+ be omitted. These requirements are driver specific, and care
+ needs to be taken to keep both old and new applications and
+ libraries working. The i915 driver supports the "modeset"
+ module parameter to control whether advanced features are
+ enabled at load time or in legacy fashion. If compatibility is
+ a concern (e.g. with drivers converted over to the new interfaces
+ from the old ones), care must be taken to prevent incompatible
+ device initialization and control with the currently active
+ userspace drivers.
+ </para>
+
+ <sect2>
+ <title>Driver private &amp; performance counters</title>
+ <para>
+ The driver private hangs off the main drm_device structure and
+ can be used for tracking various device specific bits of
+ information, like register offsets, command buffer status,
+ register state for suspend/resume, etc. At load time, a
+ driver can simply allocate one and set drm_device.dev_priv
+ appropriately; at unload the driver can free it and set
+ drm_device.dev_priv to NULL.
+ </para>
+ <para>
+ The DRM supports several counters which can be used for rough
+ performance characterization. Note that the DRM stat counter
+ system is not often used by applications, and supporting
+ additional counters is completely optional.
+ </para>
+ <para>
+ These interfaces are deprecated and should not be used. If performance
+ monitoring is desired, the developer should investigate and
+ potentially enhance the kernel perf and tracing infrastructure to export
+ GPU related performance information to performance monitoring
+ tools and applications.
+ </para>
+ </sect2>
+
+ <sect2>
+ <title>Configuring the device</title>
+ <para>
+ Obviously, device configuration will be device specific.
+ However, there are several common operations: finding a
+ device's PCI resources, mapping them, and potentially setting
+ up an IRQ handler.
+ </para>
+ <para>
+ Finding &amp; mapping resources is fairly straightforward. The
+ DRM wrapper functions, drm_get_resource_start() and
+ drm_get_resource_len() can be used to find BARs on the given
+ drm_device struct. Once those values have been retrieved, the
+ driver load function can call drm_addmap() to create a new
+ mapping for the BAR in question. Note you'll probably want a
+ drm_local_map_t in your driver private structure to track any
+ mappings you create.
+<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
+<!-- !Finclude/drm/drmP.h drm_local_map_t -->
+ </para>
+ <para>
+ if compatibility with other operating systems isn't a concern
+ (DRM drivers can run under various BSD variants and OpenSolaris),
+ native Linux calls can be used for the above, e.g. pci_resource_*
+ and iomap*/iounmap. See the Linux device driver book for more
+ info.
+ </para>
+ <para>
+ Once you have a register map, you can use the DRM_READn() and
+ DRM_WRITEn() macros to access the registers on your device, or
+ use driver specific versions to offset into your MMIO space
+ relative to a driver specific base pointer (see I915_READ for
+ example).
+ </para>
+ <para>
+ If your device supports interrupt generation, you may want to
+ setup an interrupt handler at driver load time as well. This
+ is done using the drm_irq_install() function. If your device
+ supports vertical blank interrupts, it should call
+ drm_vblank_init() to initialize the core vblank handling code before
+ enabling interrupts on your device. This ensures the vblank related
+ structures are allocated and allows the core to handle vblank events.
+ </para>
+<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
+ <para>
+ Once your interrupt handler is registered (it'll use your
+ drm_driver.irq_handler as the actual interrupt handling
+ function), you can safely enable interrupts on your device,
+ assuming any other state your interrupt handler uses is also
+ initialized.
+ </para>
+ <para>
+ Another task that may be necessary during configuration is
+ mapping the video BIOS. On many devices, the VBIOS describes
+ device configuration, LCD panel timings (if any), and contains
+ flags indicating device state. Mapping the BIOS can be done
+ using the pci_map_rom() call, a convenience function that
+ takes care of mapping the actual ROM, whether it has been
+ shadowed into memory (typically at address 0xc0000) or exists
+ on the PCI device in the ROM BAR. Note that once you've
+ mapped the ROM and extracted any necessary information, be
+ sure to unmap it; on many devices the ROM address decoder is
+ shared with other BARs, so leaving it mapped can cause
+ undesired behavior like hangs or memory corruption.
+<!--!Fdrivers/pci/rom.c pci_map_rom-->
+ </para>
+ </sect2>
+
+ <sect2>
+ <title>Memory manager initialization</title>
+ <para>
+ In order to allocate command buffers, cursor memory, scanout
+ buffers, etc., as well as support the latest features provided
+ by packages like Mesa and the X.Org X server, your driver
+ should support a memory manager.
+ </para>
+ <para>
+ If your driver supports memory management (it should!), you'll
+ need to set that up at load time as well. How you intialize
+ it depends on which memory manager you're using, TTM or GEM.
+ </para>
+ <sect3>
+ <title>TTM initialization</title>
+ <para>
+ TTM (for Translation Table Manager) manages video memory and
+ aperture space for graphics devices. TTM supports both UMA devices
+ and devices with dedicated video RAM (VRAM), i.e. most discrete
+ graphics devices. If your device has dedicated RAM, supporting
+ TTM is desireable. TTM also integrates tightly with your
+ driver specific buffer execution function. See the radeon
+ driver for examples.
+ </para>
+ <para>
+ The core TTM structure is the ttm_bo_driver struct. It contains
+ several fields with function pointers for initializing the TTM,
+ allocating and freeing memory, waiting for command completion
+ and fence synchronization, and memory migration. See the
+ radeon_ttm.c file for an example of usage.
+ </para>
+ <para>
+ The ttm_global_reference structure is made up of several fields:
+ </para>
+ <programlisting>
+ struct ttm_global_reference {
+ enum ttm_global_types global_type;
+ size_t size;
+ void *object;
+ int (*init) (struct ttm_global_reference *);
+ void (*release) (struct ttm_global_reference *);
+ };
+ </programlisting>
+ <para>
+ There should be one global reference structure for your memory
+ manager as a whole, and there will be others for each object
+ created by the memory manager at runtime. Your global TTM should
+ have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
+ object should be sizeof(struct ttm_mem_global), and the init and
+ release hooks should point at your driver specific init and
+ release routines, which will probably eventually call
+ ttm_mem_global_init and ttm_mem_global_release respectively.
+ </para>
+ <para>
+ Once your global TTM accounting structure is set up and initialized
+ (done by calling ttm_global_item_ref on the global object you
+ just created), you'll need to create a buffer object TTM to
+ provide a pool for buffer object allocation by clients and the
+ kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
+ and its size should be sizeof(struct ttm_bo_global). Again,
+ driver specific init and release functions can be provided,
+ likely eventually calling ttm_bo_global_init and
+ ttm_bo_global_release, respectively. Also like the previous
+ object, ttm_global_item_ref is used to create an initial reference
+ count for the TTM, which will call your initalization function.
+ </para>
+ </sect3>
+ <sect3>
+ <title>GEM initialization</title>
+ <para>
+ GEM is an alternative to TTM, designed specifically for UMA
+ devices. It has simpler initialization and execution requirements
+ than TTM, but has no VRAM management capability. Core GEM
+ initialization is comprised of a basic drm_mm_init call to create
+ a GTT DRM MM object, which provides an address space pool for
+ object allocation. In a KMS configuration, the driver will
+ need to allocate and initialize a command ring buffer following
+ basic GEM initialization. Most UMA devices have a so-called
+ "stolen" memory region, which provides space for the initial
+ framebuffer and large, contiguous memory regions required by the
+ device. This space is not typically managed by GEM, and must
+ be initialized separately into its own DRM MM object.
+ </para>
+ <para>
+ Initialization will be driver specific, and will depend on
+ the architecture of the device. In the case of Intel
+ integrated graphics chips like 965GM, GEM initialization can
+ be done by calling the internal GEM init function,
+ i915_gem_do_init(). Since the 965GM is a UMA device
+ (i.e. it doesn't have dedicated VRAM), GEM will manage
+ making regular RAM available for GPU operations. Memory set
+ aside by the BIOS (called "stolen" memory by the i915
+ driver) will be managed by the DRM memrange allocator; the
+ rest of the aperture will be managed by GEM.
+ <programlisting>
+ /* Basic memrange allocator for stolen space (aka vram) */
+ drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
+ /* Let GEM Manage from end of prealloc space to end of aperture */
+ i915_gem_do_init(dev, prealloc_size, agp_size);
+ </programlisting>
+<!--!Edrivers/char/drm/drm_memrange.c-->
+ </para>
+ <para>
+ Once the memory manager has been set up, we can allocate the
+ command buffer. In the i915 case, this is also done with a
+ GEM function, i915_gem_init_ringbuffer().
+ </para>
+ </sect3>
+ </sect2>
+
+ <sect2>
+ <title>Output configuration</title>
+ <para>
+ The final initialization task is output configuration. This involves
+ finding and initializing the CRTCs, encoders and connectors
+ for your device, creating an initial configuration and
+ registering a framebuffer console driver.
+ </para>
+ <sect3>
+ <title>Output discovery and initialization</title>
+ <para>
+ Several core functions exist to create CRTCs, encoders and
+ connectors, namely drm_crtc_init(), drm_connector_init() and
+ drm_encoder_init(), along with several "helper" functions to
+ perform common tasks.
+ </para>
+ <para>
+ Connectors should be registered with sysfs once they've been
+ detected and initialized, using the
+ drm_sysfs_connector_add() function. Likewise, when they're
+ removed from the system, they should be destroyed with
+ drm_sysfs_connector_remove().
+ </para>
+ <programlisting>
+<![CDATA[
+void intel_crt_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+
+ intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ connector = &intel_output->base;
+ drm_connector_init(dev, &intel_output->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+
+ /* Set up the DDC bus. */
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+ if (!intel_output->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ return;
+ }
+
+ intel_output->type = INTEL_OUTPUT_ANALOG;
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+}
+]]>
+ </programlisting>
+ <para>
+ In the example above (again, taken from the i915 driver), a
+ CRT connector and encoder combination is created. A device
+ specific i2c bus is also created, for fetching EDID data and
+ performing monitor detection. Once the process is complete,
+ the new connector is regsitered with sysfs, to make its
+ properties available to applications.
+ </para>
+ <sect4>
+ <title>Helper functions and core functions</title>
+ <para>
+ Since many PC-class graphics devices have similar display output
+ designs, the DRM provides a set of helper functions to make
+ output management easier. The core helper routines handle
+ encoder re-routing and disabling of unused functions following
+ mode set. Using the helpers is optional, but recommended for
+ devices with PC-style architectures (i.e. a set of display planes
+ for feeding pixels to encoders which are in turn routed to
+ connectors). Devices with more complex requirements needing
+ finer grained management can opt to use the core callbacks
+ directly.
+ </para>
+ <para>
+ [Insert typical diagram here.] [Insert OMAP style config here.]
+ </para>
+ </sect4>
+ <para>
+ For each encoder, CRTC and connector, several functions must
+ be provided, depending on the object type. Encoder objects
+ need should provide a DPMS (basically on/off) function, mode fixup
+ (for converting requested modes into native hardware timings),
+ and prepare, set and commit functions for use by the core DRM
+ helper functions. Connector helpers need to provide mode fetch and
+ validity functions as well as an encoder matching function for
+ returing an ideal encoder for a given connector. The core
+ connector functions include a DPMS callback, (deprecated)
+ save/restore routines, detection, mode probing, property handling,
+ and cleanup functions.
+ </para>
+<!--!Edrivers/char/drm/drm_crtc.h-->
+<!--!Edrivers/char/drm/drm_crtc.c-->
+<!--!Edrivers/char/drm/drm_crtc_helper.c-->
+ </sect3>
+ </sect2>
+ </sect1>
+
+ <!-- Internals: vblank handling -->
+
+ <sect1>
+ <title>VBlank event handling</title>
+ <para>
+ The DRM core exposes two vertical blank related ioctls:
+ DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL.
+<!--!Edrivers/char/drm/drm_irq.c-->
+ </para>
+ <para>
+ DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
+ as its argument, and is used to block or request a signal when a
+ specified vblank event occurs.
+ </para>
+ <para>
+ DRM_IOCTL_MODESET_CTL should be called by application level
+ drivers before and after mode setting, since on many devices the
+ vertical blank counter will be reset at that time. Internally,
+ the DRM snapshots the last vblank count when the ioctl is called
+ with the _DRM_PRE_MODESET command so that the counter won't go
+ backwards (which is dealt with when _DRM_POST_MODESET is used).
+ </para>
+ <para>
+ To support the functions above, the DRM core provides several
+ helper functions for tracking vertical blank counters, and
+ requires drivers to provide several callbacks:
+ get_vblank_counter(), enable_vblank() and disable_vblank(). The
+ core uses get_vblank_counter() to keep the counter accurate
+ across interrupt disable periods. It should return the current
+ vertical blank event count, which is often tracked in a device
+ register. The enable and disable vblank callbacks should enable
+ and disable vertical blank interrupts, respectively. In the
+ absence of DRM clients waiting on vblank events, the core DRM
+ code will use the disable_vblank() function to disable
+ interrupts, which saves power. They'll be re-enabled again when
+ a client calls the vblank wait ioctl above.
+ </para>
+ <para>
+ Devices that don't provide a count register can simply use an
+ internal atomic counter incremented on every vertical blank
+ interrupt, and can make their enable and disable vblank
+ functions into no-ops.
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>Memory management</title>
+ <para>
+ The memory manager lies at the heart of many DRM operations, and
+ is also required to support advanced client features like OpenGL
+ pbuffers. The DRM currently contains two memory managers, TTM
+ and GEM.
+ </para>
+
+ <sect2>
+ <title>The Translation Table Manager (TTM)</title>
+ <para>
+ TTM was developed by Tungsten Graphics, primarily by Thomas
+ Hellström, and is intended to be a flexible, high performance
+ graphics memory manager.
+ </para>
+ <para>
+ Drivers wishing to support TTM must fill out a drm_bo_driver
+ structure.
+ </para>
+ <para>
+ TTM design background and information belongs here.
+ </para>
+ </sect2>
+
+ <sect2>
+ <title>The Graphics Execution Manager (GEM)</title>
+ <para>
+ GEM is an Intel project, authored by Eric Anholt and Keith
+ Packard. It provides simpler interfaces than TTM, and is well
+ suited for UMA devices.
+ </para>
+ <para>
+ GEM-enabled drivers must provide gem_init_object() and
+ gem_free_object() callbacks to support the core memory
+ allocation routines. They should also provide several driver
+ specific ioctls to support command execution, pinning, buffer
+ read &amp; write, mapping, and domain ownership transfers.
+ </para>
+ <para>
+ On a fundamental level, GEM involves several operations: memory
+ allocation and freeing, command execution, and aperture management
+ at command execution time. Buffer object allocation is relatively
+ straightforward and largely provided by Linux's shmem layer, which
+ provides memory to back each object. When mapped into the GTT
+ or used in a command buffer, the backing pages for an object are
+ flushed to memory and marked write combined so as to be coherent
+ with the GPU. Likewise, when the GPU finishes rendering to an object,
+ if the CPU accesses it, it must be made coherent with the CPU's view
+ of memory, usually involving GPU cache flushing of various kinds.
+ This core CPU&lt;-&gt;GPU coherency management is provided by the GEM
+ set domain function, which evaluates an object's current domain and
+ performs any necessary flushing or synchronization to put the object
+ into the desired coherency domain (note that the object may be busy,
+ i.e. an active render target; in that case the set domain function
+ will block the client and wait for rendering to complete before
+ performing any necessary flushing operations).
+ </para>
+ <para>
+ Perhaps the most important GEM function is providing a command
+ execution interface to clients. Client programs construct command
+ buffers containing references to previously allocated memory objects
+ and submit them to GEM. At that point, GEM will take care to bind
+ all the objects into the GTT, execute the buffer, and provide
+ necessary synchronization between clients accessing the same buffers.
+ This often involves evicting some objects from the GTT and re-binding
+ others (a fairly expensive operation), and providing relocation
+ support which hides fixed GTT offsets from clients. Clients must
+ take care not to submit command buffers that reference more objects
+ than can fit in the GTT or GEM will reject them and no rendering
+ will occur. Similarly, if several objects in the buffer require
+ fence registers to be allocated for correct rendering (e.g. 2D blits
+ on pre-965 chips), care must be taken not to require more fence
+ registers than are available to the client. Such resource management
+ should be abstracted from the client in libdrm.
+ </para>
+ </sect2>
+
+ </sect1>
+
+ <!-- Output management -->
+ <sect1>
+ <title>Output management</title>
+ <para>
+ At the core of the DRM output management code is a set of
+ structures representing CRTCs, encoders and connectors.
+ </para>
+ <para>
+ A CRTC is an abstraction representing a part of the chip that
+ contains a pointer to a scanout buffer. Therefore, the number
+ of CRTCs available determines how many independent scanout
+ buffers can be active at any given time. The CRTC structure
+ contains several fields to support this: a pointer to some video
+ memory, a display mode, and an (x, y) offset into the video
+ memory to support panning or configurations where one piece of
+ video memory spans multiple CRTCs.
+ </para>
+ <para>
+ An encoder takes pixel data from a CRTC and converts it to a
+ format suitable for any attached connectors. On some devices,
+ it may be possible to have a CRTC send data to more than one
+ encoder. In that case, both encoders would receive data from
+ the same scanout buffer, resulting in a "cloned" display
+ configuration across the connectors attached to each encoder.
+ </para>
+ <para>
+ A connector is the final destination for pixel data on a device,
+ and usually connects directly to an external display device like
+ a monitor or laptop panel. A connector can only be attached to
+ one encoder at a time. The connector is also the structure
+ where information about the attached display is kept, so it
+ contains fields for display data, EDID data, DPMS &amp;
+ connection status, and information about modes supported on the
+ attached displays.
+ </para>
+<!--!Edrivers/char/drm/drm_crtc.c-->
+ </sect1>
+
+ <sect1>
+ <title>Framebuffer management</title>
+ <para>
+ In order to set a mode on a given CRTC, encoder and connector
+ configuration, clients need to provide a framebuffer object which
+ will provide a source of pixels for the CRTC to deliver to the encoder(s)
+ and ultimately the connector(s) in the configuration. A framebuffer
+ is fundamentally a driver specific memory object, made into an opaque
+ handle by the DRM addfb function. Once an fb has been created this
+ way it can be passed to the KMS mode setting routines for use in
+ a configuration.
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>Command submission &amp; fencing</title>
+ <para>
+ This should cover a few device specific command submission
+ implementations.
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>Suspend/resume</title>
+ <para>
+ The DRM core provides some suspend/resume code, but drivers
+ wanting full suspend/resume support should provide save() and
+ restore() functions. These will be called at suspend,
+ hibernate, or resume time, and should perform any state save or
+ restore required by your device across suspend or hibernate
+ states.
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>DMA services</title>
+ <para>
+ This should cover how DMA mapping etc. is supported by the core.
+ These functions are deprecated and should not be used.
+ </para>
+ </sect1>
+ </chapter>
+
+ <!-- External interfaces -->
+
+ <chapter id="drmExternals">
+ <title>Userland interfaces</title>
+ <para>
+ The DRM core exports several interfaces to applications,
+ generally intended to be used through corresponding libdrm
+ wrapper functions. In addition, drivers export device specific
+ interfaces for use by userspace drivers &amp; device aware
+ applications through ioctls and sysfs files.
+ </para>
+ <para>
+ External interfaces include: memory mapping, context management,
+ DMA operations, AGP management, vblank control, fence
+ management, memory management, and output management.
+ </para>
+ <para>
+ Cover generic ioctls and sysfs layout here. Only need high
+ level info, since man pages will cover the rest.
+ </para>
+ </chapter>
+
+ <!-- API reference -->
+
+ <appendix id="drmDriverApi">
+ <title>DRM Driver API</title>
+ <para>
+ Include auto-generated API reference here (need to reference it
+ from paragraphs above too).
+ </para>
+ </appendix>
+
+</book>
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
index 5cff41a5fa7c..55f12ac37acd 100644
--- a/Documentation/DocBook/kgdb.tmpl
+++ b/Documentation/DocBook/kgdb.tmpl
@@ -4,7 +4,7 @@
<book id="kgdbOnLinux">
<bookinfo>
- <title>Using kgdb and the kgdb Internals</title>
+ <title>Using kgdb, kdb and the kernel debugger internals</title>
<authorgroup>
<author>
@@ -17,33 +17,8 @@
</affiliation>
</author>
</authorgroup>
-
- <authorgroup>
- <author>
- <firstname>Tom</firstname>
- <surname>Rini</surname>
- <affiliation>
- <address>
- <email>trini@kernel.crashing.org</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
- <authorgroup>
- <author>
- <firstname>Amit S.</firstname>
- <surname>Kale</surname>
- <affiliation>
- <address>
- <email>amitkale@linsyssoft.com</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
<copyright>
- <year>2008</year>
+ <year>2008,2010</year>
<holder>Wind River Systems, Inc.</holder>
</copyright>
<copyright>
@@ -69,41 +44,76 @@
<chapter id="Introduction">
<title>Introduction</title>
<para>
- kgdb is a source level debugger for linux kernel. It is used along
- with gdb to debug a linux kernel. The expectation is that gdb can
- be used to "break in" to the kernel to inspect memory, variables
- and look through call stack information similar to what an
- application developer would use gdb for. It is possible to place
- breakpoints in kernel code and perform some limited execution
- stepping.
+ The kernel has two different debugger front ends (kdb and kgdb)
+ which interface to the debug core. It is possible to use either
+ of the debugger front ends and dynamically transition between them
+ if you configure the kernel properly at compile and runtime.
+ </para>
+ <para>
+ Kdb is simplistic shell-style interface which you can use on a
+ system console with a keyboard or serial console. You can use it
+ to inspect memory, registers, process lists, dmesg, and even set
+ breakpoints to stop in a certain location. Kdb is not a source
+ level debugger, although you can set breakpoints and execute some
+ basic kernel run control. Kdb is mainly aimed at doing some
+ analysis to aid in development or diagnosing kernel problems. You
+ can access some symbols by name in kernel built-ins or in kernel
+ modules if the code was built
+ with <symbol>CONFIG_KALLSYMS</symbol>.
+ </para>
+ <para>
+ Kgdb is intended to be used as a source level debugger for the
+ Linux kernel. It is used along with gdb to debug a Linux kernel.
+ The expectation is that gdb can be used to "break in" to the
+ kernel to inspect memory, variables and look through call stack
+ information similar to the way an application developer would use
+ gdb to debug an application. It is possible to place breakpoints
+ in kernel code and perform some limited execution stepping.
</para>
<para>
- Two machines are required for using kgdb. One of these machines is a
- development machine and the other is a test machine. The kernel
- to be debugged runs on the test machine. The development machine
- runs an instance of gdb against the vmlinux file which contains
- the symbols (not boot image such as bzImage, zImage, uImage...).
- In gdb the developer specifies the connection parameters and
- connects to kgdb. The type of connection a developer makes with
- gdb depends on the availability of kgdb I/O modules compiled as
- builtin's or kernel modules in the test machine's kernel.
+ Two machines are required for using kgdb. One of these machines is
+ a development machine and the other is the target machine. The
+ kernel to be debugged runs on the target machine. The development
+ machine runs an instance of gdb against the vmlinux file which
+ contains the symbols (not boot image such as bzImage, zImage,
+ uImage...). In gdb the developer specifies the connection
+ parameters and connects to kgdb. The type of connection a
+ developer makes with gdb depends on the availability of kgdb I/O
+ modules compiled as built-ins or loadable kernel modules in the test
+ machine's kernel.
</para>
</chapter>
<chapter id="CompilingAKernel">
- <title>Compiling a kernel</title>
+ <title>Compiling a kernel</title>
+ <para>
+ <itemizedlist>
+ <listitem><para>In order to enable compilation of kdb, you must first enable kgdb.</para></listitem>
+ <listitem><para>The kgdb test compile options are described in the kgdb test suite chapter.</para></listitem>
+ </itemizedlist>
+ </para>
+ <sect1 id="CompileKGDB">
+ <title>Kernel config options for kgdb</title>
<para>
To enable <symbol>CONFIG_KGDB</symbol> you should first turn on
"Prompt for development and/or incomplete code/drivers"
(CONFIG_EXPERIMENTAL) in "General setup", then under the
- "Kernel debugging" select "KGDB: kernel debugging with remote gdb".
+ "Kernel debugging" select "KGDB: kernel debugger".
+ </para>
+ <para>
+ While it is not a hard requirement that you have symbols in your
+ vmlinux file, gdb tends not to be very useful without the symbolic
+ data, so you will want to turn
+ on <symbol>CONFIG_DEBUG_INFO</symbol> which is called "Compile the
+ kernel with debug info" in the config menu.
</para>
<para>
It is advised, but not required that you turn on the
- CONFIG_FRAME_POINTER kernel option. This option inserts code to
- into the compiled executable which saves the frame information in
- registers or on the stack at different points which will allow a
- debugger such as gdb to more accurately construct stack back traces
- while debugging the kernel.
+ <symbol>CONFIG_FRAME_POINTER</symbol> kernel option which is called "Compile the
+ kernel with frame pointers" in the config menu. This option
+ inserts code to into the compiled executable which saves the frame
+ information in registers or on the stack at different points which
+ allows a debugger such as gdb to more accurately construct
+ stack back traces while debugging the kernel.
</para>
<para>
If the architecture that you are using supports the kernel option
@@ -116,38 +126,160 @@
this option.
</para>
<para>
- Next you should choose one of more I/O drivers to interconnect debugging
- host and debugged target. Early boot debugging requires a KGDB
- I/O driver that supports early debugging and the driver must be
- built into the kernel directly. Kgdb I/O driver configuration
- takes place via kernel or module parameters, see following
- chapter.
+ Next you should choose one of more I/O drivers to interconnect
+ debugging host and debugged target. Early boot debugging requires
+ a KGDB I/O driver that supports early debugging and the driver
+ must be built into the kernel directly. Kgdb I/O driver
+ configuration takes place via kernel or module parameters which
+ you can learn more about in the in the section that describes the
+ parameter "kgdboc".
</para>
- <para>
- The kgdb test compile options are described in the kgdb test suite chapter.
+ <para>Here is an example set of .config symbols to enable or
+ disable for kgdb:
+ <itemizedlist>
+ <listitem><para># CONFIG_DEBUG_RODATA is not set</para></listitem>
+ <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
+ <listitem><para>CONFIG_KGDB=y</para></listitem>
+ <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
+ </itemizedlist>
</para>
-
+ </sect1>
+ <sect1 id="CompileKDB">
+ <title>Kernel config options for kdb</title>
+ <para>Kdb is quite a bit more complex than the simple gdbstub
+ sitting on top of the kernel's debug core. Kdb must implement a
+ shell, and also adds some helper functions in other parts of the
+ kernel, responsible for printing out interesting data such as what
+ you would see if you ran "lsmod", or "ps". In order to build kdb
+ into the kernel you follow the same steps as you would for kgdb.
+ </para>
+ <para>The main config option for kdb
+ is <symbol>CONFIG_KGDB_KDB</symbol> which is called "KGDB_KDB:
+ include kdb frontend for kgdb" in the config menu. In theory you
+ would have already also selected an I/O driver such as the
+ CONFIG_KGDB_SERIAL_CONSOLE interface if you plan on using kdb on a
+ serial port, when you were configuring kgdb.
+ </para>
+ <para>If you want to use a PS/2-style keyboard with kdb, you would
+ select CONFIG_KDB_KEYBOARD which is called "KGDB_KDB: keyboard as
+ input device" in the config menu. The CONFIG_KDB_KEYBOARD option
+ is not used for anything in the gdb interface to kgdb. The
+ CONFIG_KDB_KEYBOARD option only works with kdb.
+ </para>
+ <para>Here is an example set of .config symbols to enable/disable kdb:
+ <itemizedlist>
+ <listitem><para># CONFIG_DEBUG_RODATA is not set</para></listitem>
+ <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
+ <listitem><para>CONFIG_KGDB=y</para></listitem>
+ <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
+ <listitem><para>CONFIG_KGDB_KDB=y</para></listitem>
+ <listitem><para>CONFIG_KDB_KEYBOARD=y</para></listitem>
+ </itemizedlist>
+ </para>
+ </sect1>
</chapter>
- <chapter id="EnableKGDB">
- <title>Enable kgdb for debugging</title>
- <para>
- In order to use kgdb you must activate it by passing configuration
- information to one of the kgdb I/O drivers. If you do not pass any
- configuration information kgdb will not do anything at all. Kgdb
- will only actively hook up to the kernel trap hooks if a kgdb I/O
- driver is loaded and configured. If you unconfigure a kgdb I/O
- driver, kgdb will unregister all the kernel hook points.
+ <chapter id="kgdbKernelArgs">
+ <title>Kernel Debugger Boot Arguments</title>
+ <para>This section describes the various runtime kernel
+ parameters that affect the configuration of the kernel debugger.
+ The following chapter covers using kdb and kgdb as well as
+ provides some examples of the configuration parameters.</para>
+ <sect1 id="kgdboc">
+ <title>Kernel parameter: kgdboc</title>
+ <para>The kgdboc driver was originally an abbreviation meant to
+ stand for "kgdb over console". Today it is the primary mechanism
+ to configure how to communicate from gdb to kgdb as well as the
+ devices you want to use to interact with the kdb shell.
+ </para>
+ <para>For kgdb/gdb, kgdboc is designed to work with a single serial
+ port. It is intended to cover the circumstance where you want to
+ use a serial console as your primary console as well as using it to
+ perform kernel debugging. It is also possible to use kgdb on a
+ serial port which is not designated as a system console. Kgdboc
+ may be configured as a kernel built-in or a kernel loadable module.
+ You can only make use of <constant>kgdbwait</constant> and early
+ debugging if you build kgdboc into the kernel as a built-in.
</para>
+ <sect2 id="kgdbocArgs">
+ <title>kgdboc arguments</title>
+ <para>Usage: <constant>kgdboc=[kbd][[,]serial_device][,baud]</constant></para>
+ <sect3 id="kgdbocArgs1">
+ <title>Using loadable module or built-in</title>
<para>
- All drivers can be reconfigured at run time, if
- <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
- are enabled, by echo'ing a new config string to
- <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
- The driver can be unconfigured by passing an empty string. You cannot
- change the configuration while the debugger is attached. Make sure
- to detach the debugger with the <constant>detach</constant> command
- prior to trying unconfigure a kgdb I/O driver.
+ <orderedlist>
+ <listitem><para>As a kernel built-in:</para>
+ <para>Use the kernel boot argument: <constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para></listitem>
+ <listitem>
+ <para>As a kernel loadable module:</para>
+ <para>Use the command: <constant>modprobe kgdboc kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
+ <para>Here are two examples of how you might formate the kgdboc
+ string. The first is for an x86 target using the first serial port.
+ The second example is for the ARM Versatile AB using the second
+ serial port.
+ <orderedlist>
+ <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
+ <listitem><para><constant>kgdboc=ttyAMA1,115200</constant></para></listitem>
+ </orderedlist>
</para>
+ </listitem>
+ </orderedlist></para>
+ </sect3>
+ <sect3 id="kgdbocArgs2">
+ <title>Configure kgdboc at runtime with sysfs</title>
+ <para>At run time you can enable or disable kgdboc by echoing a
+ parameters into the sysfs. Here are two examples:</para>
+ <orderedlist>
+ <listitem><para>Enable kgdboc on ttyS0</para>
+ <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ <listitem><para>Disable kgdboc</para>
+ <para><constant>echo "" &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ </orderedlist>
+ <para>NOTE: You do not need to specify the baud if you are
+ configuring the console on tty which is already configured or
+ open.</para>
+ </sect3>
+ <sect3 id="kgdbocArgs3">
+ <title>More examples</title>
+ <para>You can configure kgdboc to use the keyboard, and or a serial device
+ depending on if you are using kdb and or kgdb, in one of the
+ following scenarios.
+ <orderedlist>
+ <listitem><para>kdb and kgdb over only a serial port</para>
+ <para><constant>kgdboc=&lt;serial_device&gt;[,baud]</constant></para>
+ <para>Example: <constant>kgdboc=ttyS0,115200</constant></para>
+ </listitem>
+ <listitem><para>kdb and kgdb with keyboard and a serial port</para>
+ <para><constant>kgdboc=kbd,&lt;serial_device&gt;[,baud]</constant></para>
+ <para>Example: <constant>kgdboc=kbd,ttyS0,115200</constant></para>
+ </listitem>
+ <listitem><para>kdb with a keyboard</para>
+ <para><constant>kgdboc=kbd</constant></para>
+ </listitem>
+ </orderedlist>
+ </para>
+ </sect3>
+ <para>NOTE: Kgdboc does not support interrupting the target via the
+ gdb remote protocol. You must manually send a sysrq-g unless you
+ have a proxy that splits console output to a terminal program.
+ A console proxy has a separate TCP port for the debugger and a separate
+ TCP port for the "human" console. The proxy can take care of sending
+ the sysrq-g for you.
+ </para>
+ <para>When using kgdboc with no debugger proxy, you can end up
+ connecting the debugger at one of two entry points. If an
+ exception occurs after you have loaded kgdboc, a message should
+ print on the console stating it is waiting for the debugger. In
+ this case you disconnect your terminal program and then connect the
+ debugger in its place. If you want to interrupt the target system
+ and forcibly enter a debug session you have to issue a Sysrq
+ sequence and then type the letter <constant>g</constant>. Then
+ you disconnect the terminal session and connect gdb. Your options
+ if you don't like this are to hack gdb to send the sysrq-g for you
+ as well as on the initial connect, or to use a debugger proxy that
+ allows an unmodified gdb to do the debugging.
+ </para>
+ </sect2>
+ </sect1>
<sect1 id="kgdbwait">
<title>Kernel parameter: kgdbwait</title>
<para>
@@ -162,103 +294,204 @@
</para>
<para>
The kernel will stop and wait as early as the I/O driver and
- architecture will allow when you use this option. If you build the
- kgdb I/O driver as a kernel module kgdbwait will not do anything.
+ architecture allows when you use this option. If you build the
+ kgdb I/O driver as a loadable kernel module kgdbwait will not do
+ anything.
</para>
</sect1>
- <sect1 id="kgdboc">
- <title>Kernel parameter: kgdboc</title>
- <para>
- The kgdboc driver was originally an abbreviation meant to stand for
- "kgdb over console". Kgdboc is designed to work with a single
- serial port. It was meant to cover the circumstance
- where you wanted to use a serial console as your primary console as
- well as using it to perform kernel debugging. Of course you can
- also use kgdboc without assigning a console to the same port.
+ <sect1 id="kgdbcon">
+ <title>Kernel parameter: kgdbcon</title>
+ <para> The kgdbcon feature allows you to see printk() messages
+ inside gdb while gdb is connected to the kernel. Kdb does not make
+ use of the kgdbcon feature.
+ </para>
+ <para>Kgdb supports using the gdb serial protocol to send console
+ messages to the debugger when the debugger is connected and running.
+ There are two ways to activate this feature.
+ <orderedlist>
+ <listitem><para>Activate with the kernel command line option:</para>
+ <para><constant>kgdbcon</constant></para>
+ </listitem>
+ <listitem><para>Use sysfs before configuring an I/O driver</para>
+ <para>
+ <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
+ </para>
+ <para>
+ NOTE: If you do this after you configure the kgdb I/O driver, the
+ setting will not take effect until the next point the I/O is
+ reconfigured.
+ </para>
+ </listitem>
+ </orderedlist>
+ <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
+ active system console. An example incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
+ </para>
+ <para>It is possible to use this option with kgdboc on a tty that is not a system console.
+ </para>
</para>
- <sect2 id="UsingKgdboc">
- <title>Using kgdboc</title>
- <para>
- You can configure kgdboc via sysfs or a module or kernel boot line
- parameter depending on if you build with CONFIG_KGDBOC as a module
- or built-in.
- <orderedlist>
- <listitem><para>From the module load or build-in</para>
- <para><constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
+ </sect1>
+ </chapter>
+ <chapter id="usingKDB">
+ <title>Using kdb</title>
<para>
- The example here would be if your console port was typically ttyS0, you would use something like <constant>kgdboc=ttyS0,115200</constant> or on the ARM Versatile AB you would likely use <constant>kgdboc=ttyAMA0,115200</constant>
+ </para>
+ <sect1 id="quickKDBserial">
+ <title>Quick start for kdb on a serial port</title>
+ <para>This is a quick example of how to use kdb.</para>
+ <para><orderedlist>
+ <listitem><para>Boot kernel with arguments:
+ <itemizedlist>
+ <listitem><para><constant>console=ttyS0,115200 kgdboc=ttyS0,115200</constant></para></listitem>
+ </itemizedlist></para>
+ <para>OR</para>
+ <para>Configure kgdboc after the kernel booted; assuming you are using a serial port console:
+ <itemizedlist>
+ <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ </itemizedlist>
</para>
</listitem>
- <listitem><para>From sysfs</para>
- <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para>
+ <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault. There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
+ <itemizedlist>
+ <listitem><para>When logged in as root or with a super user session you can run:</para>
+ <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+ <listitem><para>Example using minicom 2.2</para>
+ <para>Press: <constant>Control-a</constant></para>
+ <para>Press: <constant>f</constant></para>
+ <para>Press: <constant>g</constant></para>
</listitem>
- </orderedlist>
- </para>
- <para>
- NOTE: Kgdboc does not support interrupting the target via the
- gdb remote protocol. You must manually send a sysrq-g unless you
- have a proxy that splits console output to a terminal problem and
- has a separate port for the debugger to connect to that sends the
- sysrq-g for you.
+ <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
+ <para>Press: <constant>Control-]</constant></para>
+ <para>Type in:<constant>send break</constant></para>
+ <para>Press: <constant>Enter</constant></para>
+ <para>Press: <constant>g</constant></para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem><para>From the kdb prompt you can run the "help" command to see a complete list of the commands that are available.</para>
+ <para>Some useful commands in kdb include:
+ <itemizedlist>
+ <listitem><para>lsmod -- Shows where kernel modules are loaded</para></listitem>
+ <listitem><para>ps -- Displays only the active processes</para></listitem>
+ <listitem><para>ps A -- Shows all the processes</para></listitem>
+ <listitem><para>summary -- Shows kernel version info and memory usage</para></listitem>
+ <listitem><para>bt -- Get a backtrace of the current process using dump_stack()</para></listitem>
+ <listitem><para>dmesg -- View the kernel syslog buffer</para></listitem>
+ <listitem><para>go -- Continue the system</para></listitem>
+ </itemizedlist>
</para>
- <para>When using kgdboc with no debugger proxy, you can end up
- connecting the debugger for one of two entry points. If an
- exception occurs after you have loaded kgdboc a message should print
- on the console stating it is waiting for the debugger. In case you
- disconnect your terminal program and then connect the debugger in
- its place. If you want to interrupt the target system and forcibly
- enter a debug session you have to issue a Sysrq sequence and then
- type the letter <constant>g</constant>. Then you disconnect the
- terminal session and connect gdb. Your options if you don't like
- this are to hack gdb to send the sysrq-g for you as well as on the
- initial connect, or to use a debugger proxy that allows an
- unmodified gdb to do the debugging.
+ </listitem>
+ <listitem>
+ <para>When you are done using kdb you need to consider rebooting the
+ system or using the "go" command to resuming normal kernel
+ execution. If you have paused the kernel for a lengthy period of
+ time, applications that rely on timely networking or anything to do
+ with real wall clock time could be adversely affected, so you
+ should take this into consideration when using the kernel
+ debugger.</para>
+ </listitem>
+ </orderedlist></para>
+ </sect1>
+ <sect1 id="quickKDBkeyboard">
+ <title>Quick start for kdb using a keyboard connected console</title>
+ <para>This is a quick example of how to use kdb with a keyboard.</para>
+ <para><orderedlist>
+ <listitem><para>Boot kernel with arguments:
+ <itemizedlist>
+ <listitem><para><constant>kgdboc=kbd</constant></para></listitem>
+ </itemizedlist></para>
+ <para>OR</para>
+ <para>Configure kgdboc after the kernel booted:
+ <itemizedlist>
+ <listitem><para><constant>echo kbd &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ </itemizedlist>
</para>
- </sect2>
+ </listitem>
+ <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault. There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
+ <itemizedlist>
+ <listitem><para>When logged in as root or with a super user session you can run:</para>
+ <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+ <listitem><para>Example using a laptop keyboard</para>
+ <para>Press and hold down: <constant>Alt</constant></para>
+ <para>Press and hold down: <constant>Fn</constant></para>
+ <para>Press and release the key with the label: <constant>SysRq</constant></para>
+ <para>Release: <constant>Fn</constant></para>
+ <para>Press and release: <constant>g</constant></para>
+ <para>Release: <constant>Alt</constant></para>
+ </listitem>
+ <listitem><para>Example using a PS/2 101-key keyboard</para>
+ <para>Press and hold down: <constant>Alt</constant></para>
+ <para>Press and release the key with the label: <constant>SysRq</constant></para>
+ <para>Press and release: <constant>g</constant></para>
+ <para>Release: <constant>Alt</constant></para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem>
+ <para>Now type in a kdb command such as "help", "dmesg", "bt" or "go" to continue kernel execution.</para>
+ </listitem>
+ </orderedlist></para>
</sect1>
- <sect1 id="kgdbcon">
- <title>Kernel parameter: kgdbcon</title>
- <para>
- Kgdb supports using the gdb serial protocol to send console messages
- to the debugger when the debugger is connected and running. There
- are two ways to activate this feature.
+ </chapter>
+ <chapter id="EnableKGDB">
+ <title>Using kgdb / gdb</title>
+ <para>In order to use kgdb you must activate it by passing
+ configuration information to one of the kgdb I/O drivers. If you
+ do not pass any configuration information kgdb will not do anything
+ at all. Kgdb will only actively hook up to the kernel trap hooks
+ if a kgdb I/O driver is loaded and configured. If you unconfigure
+ a kgdb I/O driver, kgdb will unregister all the kernel hook points.
+ </para>
+ <para> All kgdb I/O drivers can be reconfigured at run time, if
+ <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
+ are enabled, by echo'ing a new config string to
+ <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
+ The driver can be unconfigured by passing an empty string. You cannot
+ change the configuration while the debugger is attached. Make sure
+ to detach the debugger with the <constant>detach</constant> command
+ prior to trying to unconfigure a kgdb I/O driver.
+ </para>
+ <sect1 id="ConnectingGDB">
+ <title>Connecting with gdb to a serial port</title>
<orderedlist>
- <listitem><para>Activate with the kernel command line option:</para>
- <para><constant>kgdbcon</constant></para>
+ <listitem><para>Configure kgdboc</para>
+ <para>Boot kernel with arguments:
+ <itemizedlist>
+ <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
+ </itemizedlist></para>
+ <para>OR</para>
+ <para>Configure kgdboc after the kernel booted:
+ <itemizedlist>
+ <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ </itemizedlist></para>
</listitem>
- <listitem><para>Use sysfs before configuring an io driver</para>
- <para>
- <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
- </para>
- <para>
- NOTE: If you do this after you configure the kgdb I/O driver, the
- setting will not take effect until the next point the I/O is
- reconfigured.
- </para>
+ <listitem>
+ <para>Stop kernel execution (break into the debugger)</para>
+ <para>In order to connect to gdb via kgdboc, the kernel must
+ first be stopped. There are several ways to stop the kernel which
+ include using kgdbwait as a boot argument, via a sysrq-g, or running
+ the kernel until it takes an exception where it waits for the
+ debugger to attach.
+ <itemizedlist>
+ <listitem><para>When logged in as root or with a super user session you can run:</para>
+ <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+ <listitem><para>Example using minicom 2.2</para>
+ <para>Press: <constant>Control-a</constant></para>
+ <para>Press: <constant>f</constant></para>
+ <para>Press: <constant>g</constant></para>
</listitem>
- </orderedlist>
- </para>
- <para>
- IMPORTANT NOTE: Using this option with kgdb over the console
- (kgdboc) is not supported.
+ <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
+ <para>Press: <constant>Control-]</constant></para>
+ <para>Type in:<constant>send break</constant></para>
+ <para>Press: <constant>Enter</constant></para>
+ <para>Press: <constant>g</constant></para>
+ </listitem>
+ </itemizedlist>
</para>
- </sect1>
- </chapter>
- <chapter id="ConnectingGDB">
- <title>Connecting gdb</title>
- <para>
- If you are using kgdboc, you need to have used kgdbwait as a boot
- argument, issued a sysrq-g, or the system you are going to debug
- has already taken an exception and is waiting for the debugger to
- attach before you can connect gdb.
- </para>
- <para>
- If you are not using different kgdb I/O driver other than kgdboc,
- you should be able to connect and the target will automatically
- respond.
- </para>
+ </listitem>
+ <listitem>
+ <para>Connect from from gdb</para>
<para>
- Example (using a serial port):
+ Example (using a directly connected port):
</para>
<programlisting>
% gdb ./vmlinux
@@ -266,7 +499,7 @@
(gdb) target remote /dev/ttyS0
</programlisting>
<para>
- Example (kgdb to a terminal server on tcp port 2012):
+ Example (kgdb to a terminal server on TCP port 2012):
</para>
<programlisting>
% gdb ./vmlinux
@@ -283,6 +516,83 @@
communications. You do this prior to issuing the <constant>target
remote</constant> command by typing in: <constant>set debug remote 1</constant>
</para>
+ </listitem>
+ </orderedlist>
+ <para>Remember if you continue in gdb, and need to "break in" again,
+ you need to issue an other sysrq-g. It is easy to create a simple
+ entry point by putting a breakpoint at <constant>sys_sync</constant>
+ and then you can run "sync" from a shell or script to break into the
+ debugger.</para>
+ </sect1>
+ </chapter>
+ <chapter id="switchKdbKgdb">
+ <title>kgdb and kdb interoperability</title>
+ <para>It is possible to transition between kdb and kgdb dynamically.
+ The debug core will remember which you used the last time and
+ automatically start in the same mode.</para>
+ <sect1>
+ <title>Switching between kdb and kgdb</title>
+ <sect2>
+ <title>Switching from kgdb to kdb</title>
+ <para>
+ There are two ways to switch from kgdb to kdb: you can use gdb to
+ issue a maintenance packet, or you can blindly type the command $3#33.
+ Whenever kernel debugger stops in kgdb mode it will print the
+ message <constant>KGDB or $3#33 for KDB</constant>. It is important
+ to note that you have to type the sequence correctly in one pass.
+ You cannot type a backspace or delete because kgdb will interpret
+ that as part of the debug stream.
+ <orderedlist>
+ <listitem><para>Change from kgdb to kdb by blindly typing:</para>
+ <para><constant>$3#33</constant></para></listitem>
+ <listitem><para>Change from kgdb to kdb with gdb</para>
+ <para><constant>maintenance packet 3</constant></para>
+ <para>NOTE: Now you must kill gdb. Typically you press control-z and
+ issue the command: kill -9 %</para></listitem>
+ </orderedlist>
+ </para>
+ </sect2>
+ <sect2>
+ <title>Change from kdb to kgdb</title>
+ <para>There are two ways you can change from kdb to kgdb. You can
+ manually enter kgdb mode by issuing the kgdb command from the kdb
+ shell prompt, or you can connect gdb while the kdb shell prompt is
+ active. The kdb shell looks for the typical first commands that gdb
+ would issue with the gdb remote protocol and if it sees one of those
+ commands it automatically changes into kgdb mode.</para>
+ <orderedlist>
+ <listitem><para>From kdb issue the command:</para>
+ <para><constant>kgdb</constant></para>
+ <para>Now disconnect your terminal program and connect gdb in its place</para></listitem>
+ <listitem><para>At the kdb prompt, disconnect the terminal program and connect gdb in its place.</para></listitem>
+ </orderedlist>
+ </sect2>
+ </sect1>
+ <sect1>
+ <title>Running kdb commands from gdb</title>
+ <para>It is possible to run a limited set of kdb commands from gdb,
+ using the gdb monitor command. You don't want to execute any of the
+ run control or breakpoint operations, because it can disrupt the
+ state of the kernel debugger. You should be using gdb for
+ breakpoints and run control operations if you have gdb connected.
+ The more useful commands to run are things like lsmod, dmesg, ps or
+ possibly some of the memory information commands. To see all the kdb
+ commands you can run <constant>monitor help</constant>.</para>
+ <para>Example:
+ <informalexample><programlisting>
+(gdb) monitor ps
+1 idle process (state I) and
+27 sleeping system daemon (state M) processes suppressed,
+use 'ps A' to see all.
+Task Addr Pid Parent [*] cpu State Thread Command
+
+0xc78291d0 1 0 0 0 S 0xc7829404 init
+0xc7954150 942 1 0 0 S 0xc7954384 dropbear
+0xc78789c0 944 1 0 0 S 0xc7878bf4 sh
+(gdb)
+ </programlisting></informalexample>
+ </para>
+ </sect1>
</chapter>
<chapter id="KGDBTestSuite">
<title>kgdb Test Suite</title>
@@ -309,34 +619,36 @@
</para>
</chapter>
<chapter id="CommonBackEndReq">
- <title>KGDB Internals</title>
+ <title>Kernel Debugger Internals</title>
<sect1 id="kgdbArchitecture">
<title>Architecture Specifics</title>
<para>
- Kgdb is organized into three basic components:
+ The kernel debugger is organized into a number of components:
<orderedlist>
- <listitem><para>kgdb core</para>
+ <listitem><para>The debug core</para>
<para>
- The kgdb core is found in kernel/kgdb.c. It contains:
+ The debug core is found in kernel/debugger/debug_core.c. It contains:
<itemizedlist>
- <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
- <listitem><para>A generic OS exception handler which includes sync'ing the processors into a stopped state on an multi cpu system.</para></listitem>
+ <listitem><para>A generic OS exception handler which includes
+ sync'ing the processors into a stopped state on an multi-CPU
+ system.</para></listitem>
<listitem><para>The API to talk to the kgdb I/O drivers</para></listitem>
- <listitem><para>The API to make calls to the arch specific kgdb implementation</para></listitem>
+ <listitem><para>The API to make calls to the arch-specific kgdb implementation</para></listitem>
<listitem><para>The logic to perform safe memory reads and writes to memory while using the debugger</para></listitem>
<listitem><para>A full implementation for software breakpoints unless overridden by the arch</para></listitem>
+ <listitem><para>The API to invoke either the kdb or kgdb frontend to the debug core.</para></listitem>
</itemizedlist>
</para>
</listitem>
- <listitem><para>kgdb arch specific implementation</para>
+ <listitem><para>kgdb arch-specific implementation</para>
<para>
This implementation is generally found in arch/*/kernel/kgdb.c.
As an example, arch/x86/kernel/kgdb.c contains the specifics to
implement HW breakpoint as well as the initialization to
dynamically register and unregister for the trap handlers on
- this architecture. The arch specific portion implements:
+ this architecture. The arch-specific portion implements:
<itemizedlist>
- <listitem><para>contains an arch specific trap catcher which
+ <listitem><para>contains an arch-specific trap catcher which
invokes kgdb_handle_exception() to start kgdb about doing its
work</para></listitem>
<listitem><para>translation to and from gdb specific packet format to pt_regs</para></listitem>
@@ -347,11 +659,35 @@
</itemizedlist>
</para>
</listitem>
+ <listitem><para>gdbstub frontend (aka kgdb)</para>
+ <para>The gdbstub is located in kernel/debug/gdbstub.c. It contains:</para>
+ <itemizedlist>
+ <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem><para>kdb frontend</para>
+ <para>The kdb debugger shell is broken down into a number of
+ components. The kdb core is located in kernel/debug/kdb. There
+ are a number of helper functions in some of the other kernel
+ components to make it possible for kdb to examine and report
+ information about the kernel without taking locks that could
+ cause a kernel deadlock. The kdb core contains implements the following functionality.</para>
+ <itemizedlist>
+ <listitem><para>A simple shell</para></listitem>
+ <listitem><para>The kdb core command set</para></listitem>
+ <listitem><para>A registration API to register additional kdb shell commands.</para>
+ <para>A good example of a self-contained kdb module is the "ftdump" command for dumping the ftrace buffer. See: kernel/trace/trace_kdb.c</para></listitem>
+ <listitem><para>The implementation for kdb_printf() which
+ emits messages directly to I/O drivers, bypassing the kernel
+ log.</para></listitem>
+ <listitem><para>SW / HW breakpoint management for the kdb shell</para></listitem>
+ </itemizedlist>
+ </listitem>
<listitem><para>kgdb I/O driver</para>
<para>
- Each kgdb I/O driver has to provide an implemenation for the following:
+ Each kgdb I/O driver has to provide an implementation for the following:
<itemizedlist>
- <listitem><para>configuration via builtin or module</para></listitem>
+ <listitem><para>configuration via built-in or module</para></listitem>
<listitem><para>dynamic configuration and kgdb hook registration calls</para></listitem>
<listitem><para>read and write character interface</para></listitem>
<listitem><para>A cleanup handler for unconfiguring from the kgdb core</para></listitem>
@@ -416,15 +752,15 @@
underlying low level to the hardware driver having "polling hooks"
which the to which the tty driver is attached. In the initial
implementation of kgdboc it the serial_core was changed to expose a
- low level uart hook for doing polled mode reading and writing of a
+ low level UART hook for doing polled mode reading and writing of a
single character while in an atomic context. When kgdb makes an I/O
request to the debugger, kgdboc invokes a call back in the serial
- core which in turn uses the call back in the uart driver. It is
- certainly possible to extend kgdboc to work with non-uart based
+ core which in turn uses the call back in the UART driver. It is
+ certainly possible to extend kgdboc to work with non-UART based
consoles in the future.
</para>
<para>
- When using kgdboc with a uart, the uart driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
+ When using kgdboc with a UART, the UART driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = serial8250_get_poll_char,
.poll_put_char = serial8250_put_poll_char,
@@ -434,7 +770,7 @@
<constant>#ifdef CONFIG_CONSOLE_POLL</constant>, as shown above.
Keep in mind that polling hooks have to be implemented in such a way
that they can be called from an atomic context and have to restore
- the state of the uart chip on return such that the system can return
+ the state of the UART chip on return such that the system can return
to normal when the debugger detaches. You need to be very careful
with any kind of lock you consider, because failing here is most
going to mean pressing the reset button.
@@ -453,6 +789,10 @@
<itemizedlist>
<listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
</itemizedlist>
+ In Jan 2010 this document was updated to include kdb.
+ <itemizedlist>
+ <listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
+ </itemizedlist>
</para>
</chapter>
</book>
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index ff3e5bec1c24..2595594c1fb8 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -81,16 +81,14 @@ void (*port_disable) (struct ata_port *);
</programlisting>
<para>
- Called from ata_bus_probe() and ata_bus_reset() error paths,
- as well as when unregistering from the SCSI module (rmmod, hot
- unplug).
+ Called from ata_bus_probe() error path, as well as when
+ unregistering from the SCSI module (rmmod, hot unplug).
This function should do whatever needs to be done to take the
port out of use. In most cases, ata_port_disable() can be used
as this hook.
</para>
<para>
Called from ata_bus_probe() on a failed probe.
- Called from ata_bus_reset() on a failed bus reset.
Called from ata_scsi_release().
</para>
@@ -477,7 +475,7 @@ void (*host_stop) (struct ata_host_set *host_set);
allocates space for a legacy IDE PRD table and returns.
</para>
<para>
- ->port_stop() is called after ->host_stop(). It's sole function
+ ->port_stop() is called after ->host_stop(). Its sole function
is to release DMA/memory resources, now that they are no longer
actively being used. Many drivers also free driver-private
data from port at this time.
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index c725cb852c54..5d4d40f429a5 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -17,6 +17,7 @@
<!ENTITY VIDIOC-DBG-G-REGISTER "<link linkend='vidioc-dbg-g-register'><constant>VIDIOC_DBG_G_REGISTER</constant></link>">
<!ENTITY VIDIOC-DBG-S-REGISTER "<link linkend='vidioc-dbg-g-register'><constant>VIDIOC_DBG_S_REGISTER</constant></link>">
<!ENTITY VIDIOC-DQBUF "<link linkend='vidioc-qbuf'><constant>VIDIOC_DQBUF</constant></link>">
+<!ENTITY VIDIOC-DQEVENT "<link linkend='vidioc-dqevent'><constant>VIDIOC_DQEVENT</constant></link>">
<!ENTITY VIDIOC-ENCODER-CMD "<link linkend='vidioc-encoder-cmd'><constant>VIDIOC_ENCODER_CMD</constant></link>">
<!ENTITY VIDIOC-ENUMAUDIO "<link linkend='vidioc-enumaudio'><constant>VIDIOC_ENUMAUDIO</constant></link>">
<!ENTITY VIDIOC-ENUMAUDOUT "<link linkend='vidioc-enumaudioout'><constant>VIDIOC_ENUMAUDOUT</constant></link>">
@@ -60,6 +61,7 @@
<!ENTITY VIDIOC-REQBUFS "<link linkend='vidioc-reqbufs'><constant>VIDIOC_REQBUFS</constant></link>">
<!ENTITY VIDIOC-STREAMOFF "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMOFF</constant></link>">
<!ENTITY VIDIOC-STREAMON "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMON</constant></link>">
+<!ENTITY VIDIOC-SUBSCRIBE-EVENT "<link linkend='vidioc-subscribe-event'><constant>VIDIOC_SUBSCRIBE_EVENT</constant></link>">
<!ENTITY VIDIOC-S-AUDIO "<link linkend='vidioc-g-audio'><constant>VIDIOC_S_AUDIO</constant></link>">
<!ENTITY VIDIOC-S-AUDOUT "<link linkend='vidioc-g-audioout'><constant>VIDIOC_S_AUDOUT</constant></link>">
<!ENTITY VIDIOC-S-CROP "<link linkend='vidioc-g-crop'><constant>VIDIOC_S_CROP</constant></link>">
@@ -83,6 +85,7 @@
<!ENTITY VIDIOC-TRY-ENCODER-CMD "<link linkend='vidioc-encoder-cmd'><constant>VIDIOC_TRY_ENCODER_CMD</constant></link>">
<!ENTITY VIDIOC-TRY-EXT-CTRLS "<link linkend='vidioc-g-ext-ctrls'><constant>VIDIOC_TRY_EXT_CTRLS</constant></link>">
<!ENTITY VIDIOC-TRY-FMT "<link linkend='vidioc-g-fmt'><constant>VIDIOC_TRY_FMT</constant></link>">
+<!ENTITY VIDIOC-UNSUBSCRIBE-EVENT "<link linkend='vidioc-subscribe-event'><constant>VIDIOC_UNSUBSCRIBE_EVENT</constant></link>">
<!-- Types -->
<!ENTITY v4l2-std-id "<link linkend='v4l2-std-id'>v4l2_std_id</link>">
@@ -141,6 +144,9 @@
<!ENTITY v4l2-enc-idx "struct&nbsp;<link linkend='v4l2-enc-idx'>v4l2_enc_idx</link>">
<!ENTITY v4l2-enc-idx-entry "struct&nbsp;<link linkend='v4l2-enc-idx-entry'>v4l2_enc_idx_entry</link>">
<!ENTITY v4l2-encoder-cmd "struct&nbsp;<link linkend='v4l2-encoder-cmd'>v4l2_encoder_cmd</link>">
+<!ENTITY v4l2-event "struct&nbsp;<link linkend='v4l2-event'>v4l2_event</link>">
+<!ENTITY v4l2-event-subscription "struct&nbsp;<link linkend='v4l2-event-subscription'>v4l2_event_subscription</link>">
+<!ENTITY v4l2-event-vsync "struct&nbsp;<link linkend='v4l2-event-vsync'>v4l2_event_vsync</link>">
<!ENTITY v4l2-ext-control "struct&nbsp;<link linkend='v4l2-ext-control'>v4l2_ext_control</link>">
<!ENTITY v4l2-ext-controls "struct&nbsp;<link linkend='v4l2-ext-controls'>v4l2_ext_controls</link>">
<!ENTITY v4l2-fmtdesc "struct&nbsp;<link linkend='v4l2-fmtdesc'>v4l2_fmtdesc</link>">
@@ -200,6 +206,7 @@
<!ENTITY sub-controls SYSTEM "v4l/controls.xml">
<!ENTITY sub-dev-capture SYSTEM "v4l/dev-capture.xml">
<!ENTITY sub-dev-codec SYSTEM "v4l/dev-codec.xml">
+<!ENTITY sub-dev-event SYSTEM "v4l/dev-event.xml">
<!ENTITY sub-dev-effect SYSTEM "v4l/dev-effect.xml">
<!ENTITY sub-dev-osd SYSTEM "v4l/dev-osd.xml">
<!ENTITY sub-dev-output SYSTEM "v4l/dev-output.xml">
@@ -292,6 +299,8 @@
<!ENTITY sub-v4l2grab-c SYSTEM "v4l/v4l2grab.c.xml">
<!ENTITY sub-videodev2-h SYSTEM "v4l/videodev2.h.xml">
<!ENTITY sub-v4l2 SYSTEM "v4l/v4l2.xml">
+<!ENTITY sub-dqevent SYSTEM "v4l/vidioc-dqevent.xml">
+<!ENTITY sub-subscribe-event SYSTEM "v4l/vidioc-subscribe-event.xml">
<!ENTITY sub-intro SYSTEM "dvb/intro.xml">
<!ENTITY sub-frontend SYSTEM "dvb/frontend.xml">
<!ENTITY sub-dvbproperty SYSTEM "dvb/dvbproperty.xml">
@@ -381,3 +390,5 @@
<!ENTITY reqbufs SYSTEM "v4l/vidioc-reqbufs.xml">
<!ENTITY s-hw-freq-seek SYSTEM "v4l/vidioc-s-hw-freq-seek.xml">
<!ENTITY streamon SYSTEM "v4l/vidioc-streamon.xml">
+<!ENTITY dqevent SYSTEM "v4l/vidioc-dqevent.xml">
+<!ENTITY subscribe_event SYSTEM "v4l/vidioc-subscribe-event.xml">
diff --git a/Documentation/DocBook/v4l/compat.xml b/Documentation/DocBook/v4l/compat.xml
index b9dbdf9e6d29..b42b935913cd 100644
--- a/Documentation/DocBook/v4l/compat.xml
+++ b/Documentation/DocBook/v4l/compat.xml
@@ -2332,15 +2332,26 @@ more information.</para>
</listitem>
</orderedlist>
</section>
- </section>
+ <section>
+ <title>V4L2 in Linux 2.6.34</title>
+ <orderedlist>
+ <listitem>
+ <para>Added
+<constant>V4L2_CID_IRIS_ABSOLUTE</constant> and
+<constant>V4L2_CID_IRIS_RELATIVE</constant> controls to the
+ <link linkend="camera-controls">Camera controls class</link>.
+ </para>
+ </listitem>
+ </orderedlist>
+ </section>
- <section id="other">
- <title>Relation of V4L2 to other Linux multimedia APIs</title>
+ <section id="other">
+ <title>Relation of V4L2 to other Linux multimedia APIs</title>
- <section id="xvideo">
- <title>X Video Extension</title>
+ <section id="xvideo">
+ <title>X Video Extension</title>
- <para>The X Video Extension (abbreviated XVideo or just Xv) is
+ <para>The X Video Extension (abbreviated XVideo or just Xv) is
an extension of the X Window system, implemented for example by the
XFree86 project. Its scope is similar to V4L2, an API to video capture
and output devices for X clients. Xv allows applications to display
@@ -2351,7 +2362,7 @@ capture or output still images in XPixmaps<footnote>
extension available across many operating systems and
architectures.</para>
- <para>Because the driver is embedded into the X server Xv has a
+ <para>Because the driver is embedded into the X server Xv has a
number of advantages over the V4L2 <link linkend="overlay">video
overlay interface</link>. The driver can easily determine the overlay
target, &ie; visible graphics memory or off-screen buffers for a
@@ -2360,16 +2371,16 @@ overlay, scaling or color-keying, or the clipping functions of the
video capture hardware, always in sync with drawing operations or
windows moving or changing their stacking order.</para>
- <para>To combine the advantages of Xv and V4L a special Xv
+ <para>To combine the advantages of Xv and V4L a special Xv
driver exists in XFree86 and XOrg, just programming any overlay capable
Video4Linux device it finds. To enable it
<filename>/etc/X11/XF86Config</filename> must contain these lines:</para>
- <para><screen>
+ <para><screen>
Section "Module"
Load "v4l"
EndSection</screen></para>
- <para>As of XFree86 4.2 this driver still supports only V4L
+ <para>As of XFree86 4.2 this driver still supports only V4L
ioctls, however it should work just fine with all V4L2 devices through
the V4L2 backward-compatibility layer. Since V4L2 permits multiple
opens it is possible (if supported by the V4L2 driver) to capture
@@ -2377,83 +2388,84 @@ video while an X client requested video overlay. Restrictions of
simultaneous capturing and overlay are discussed in <xref
linkend="overlay" /> apply.</para>
- <para>Only marginally related to V4L2, XFree86 extended Xv to
+ <para>Only marginally related to V4L2, XFree86 extended Xv to
support hardware YUV to RGB conversion and scaling for faster video
playback, and added an interface to MPEG-2 decoding hardware. This API
is useful to display images captured with V4L2 devices.</para>
- </section>
+ </section>
- <section>
- <title>Digital Video</title>
+ <section>
+ <title>Digital Video</title>
- <para>V4L2 does not support digital terrestrial, cable or
+ <para>V4L2 does not support digital terrestrial, cable or
satellite broadcast. A separate project aiming at digital receivers
exists. You can find its homepage at <ulink
url="http://linuxtv.org">http://linuxtv.org</ulink>. The Linux DVB API
has no connection to the V4L2 API except that drivers for hybrid
hardware may support both.</para>
- </section>
+ </section>
- <section>
- <title>Audio Interfaces</title>
+ <section>
+ <title>Audio Interfaces</title>
- <para>[to do - OSS/ALSA]</para>
+ <para>[to do - OSS/ALSA]</para>
+ </section>
</section>
- </section>
- <section id="experimental">
- <title>Experimental API Elements</title>
+ <section id="experimental">
+ <title>Experimental API Elements</title>
- <para>The following V4L2 API elements are currently experimental
+ <para>The following V4L2 API elements are currently experimental
and may change in the future.</para>
- <itemizedlist>
- <listitem>
- <para>Video Output Overlay (OSD) Interface, <xref
+ <itemizedlist>
+ <listitem>
+ <para>Video Output Overlay (OSD) Interface, <xref
linkend="osd" />.</para>
- </listitem>
+ </listitem>
<listitem>
- <para><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY</constant>,
+ <para><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY</constant>,
&v4l2-buf-type;, <xref linkend="v4l2-buf-type" />.</para>
- </listitem>
- <listitem>
- <para><constant>V4L2_CAP_VIDEO_OUTPUT_OVERLAY</constant>,
+ </listitem>
+ <listitem>
+ <para><constant>V4L2_CAP_VIDEO_OUTPUT_OVERLAY</constant>,
&VIDIOC-QUERYCAP; ioctl, <xref linkend="device-capabilities" />.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-ENUM-FRAMESIZES; and
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-ENUM-FRAMESIZES; and
&VIDIOC-ENUM-FRAMEINTERVALS; ioctls.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-G-ENC-INDEX; ioctl.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-ENCODER-CMD; and &VIDIOC-TRY-ENCODER-CMD;
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-G-ENC-INDEX; ioctl.</para>
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-ENCODER-CMD; and &VIDIOC-TRY-ENCODER-CMD;
ioctls.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-DBG-G-REGISTER; and &VIDIOC-DBG-S-REGISTER;
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-DBG-G-REGISTER; and &VIDIOC-DBG-S-REGISTER;
ioctls.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-DBG-G-CHIP-IDENT; ioctl.</para>
- </listitem>
- </itemizedlist>
- </section>
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-DBG-G-CHIP-IDENT; ioctl.</para>
+ </listitem>
+ </itemizedlist>
+ </section>
- <section id="obsolete">
- <title>Obsolete API Elements</title>
+ <section id="obsolete">
+ <title>Obsolete API Elements</title>
- <para>The following V4L2 API elements were superseded by new
+ <para>The following V4L2 API elements were superseded by new
interfaces and should not be implemented in new drivers.</para>
- <itemizedlist>
- <listitem>
- <para><constant>VIDIOC_G_MPEGCOMP</constant> and
+ <itemizedlist>
+ <listitem>
+ <para><constant>VIDIOC_G_MPEGCOMP</constant> and
<constant>VIDIOC_S_MPEGCOMP</constant> ioctls. Use Extended Controls,
<xref linkend="extended-controls" />.</para>
- </listitem>
- </itemizedlist>
+ </listitem>
+ </itemizedlist>
+ </section>
</section>
<!--
diff --git a/Documentation/DocBook/v4l/controls.xml b/Documentation/DocBook/v4l/controls.xml
index f46450610412..8408caaee276 100644
--- a/Documentation/DocBook/v4l/controls.xml
+++ b/Documentation/DocBook/v4l/controls.xml
@@ -267,6 +267,12 @@ minimum value disables backlight compensation.</entry>
<entry>Chroma automatic gain control.</entry>
</row>
<row>
+ <entry><constant>V4L2_CID_CHROMA_GAIN</constant></entry>
+ <entry>integer</entry>
+ <entry>Adjusts the Chroma gain control (for use when chroma AGC
+ is disabled).</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CID_COLOR_KILLER</constant></entry>
<entry>boolean</entry>
<entry>Enable the color killer (&ie; force a black &amp; white image in case of a weak video signal).</entry>
@@ -277,8 +283,15 @@ minimum value disables backlight compensation.</entry>
<entry>Selects a color effect. Possible values for
<constant>enum v4l2_colorfx</constant> are:
<constant>V4L2_COLORFX_NONE</constant> (0),
-<constant>V4L2_COLORFX_BW</constant> (1) and
-<constant>V4L2_COLORFX_SEPIA</constant> (2).</entry>
+<constant>V4L2_COLORFX_BW</constant> (1),
+<constant>V4L2_COLORFX_SEPIA</constant> (2),
+<constant>V4L2_COLORFX_NEGATIVE</constant> (3),
+<constant>V4L2_COLORFX_EMBOSS</constant> (4),
+<constant>V4L2_COLORFX_SKETCH</constant> (5),
+<constant>V4L2_COLORFX_SKY_BLUE</constant> (6),
+<constant>V4L2_COLORFX_GRASS_GREEN</constant> (7),
+<constant>V4L2_COLORFX_SKIN_WHITEN</constant> (8) and
+<constant>V4L2_COLORFX_VIVID</constant> (9).</entry>
</row>
<row>
<entry><constant>V4L2_CID_ROTATE</constant></entry>
@@ -1825,6 +1838,25 @@ wide-angle direction. The zoom speed unit is driver-specific.</entry>
<row><entry></entry></row>
<row>
+ <entry spanname="id"><constant>V4L2_CID_IRIS_ABSOLUTE</constant>&nbsp;</entry>
+ <entry>integer</entry>
+ </row><row><entry spanname="descr">This control sets the
+camera's aperture to the specified value. The unit is undefined.
+Larger values open the iris wider, smaller values close it.</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_IRIS_RELATIVE</constant>&nbsp;</entry>
+ <entry>integer</entry>
+ </row><row><entry spanname="descr">This control modifies the
+camera's aperture by the specified amount. The unit is undefined.
+Positive values open the iris one step further, negative values close
+it one step further. This is a write-only control.</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
<entry spanname="id"><constant>V4L2_CID_PRIVACY</constant>&nbsp;</entry>
<entry>boolean</entry>
</row><row><entry spanname="descr">Prevent video from being acquired
diff --git a/Documentation/DocBook/v4l/dev-event.xml b/Documentation/DocBook/v4l/dev-event.xml
new file mode 100644
index 000000000000..be5a98fb4fab
--- /dev/null
+++ b/Documentation/DocBook/v4l/dev-event.xml
@@ -0,0 +1,31 @@
+ <title>Event Interface</title>
+
+ <para>The V4L2 event interface provides means for user to get
+ immediately notified on certain conditions taking place on a device.
+ This might include start of frame or loss of signal events, for
+ example.
+ </para>
+
+ <para>To receive events, the events the user is interested in first must
+ be subscribed using the &VIDIOC-SUBSCRIBE-EVENT; ioctl. Once an event is
+ subscribed, the events of subscribed types are dequeueable using the
+ &VIDIOC-DQEVENT; ioctl. Events may be unsubscribed using
+ VIDIOC_UNSUBSCRIBE_EVENT ioctl. The special event type V4L2_EVENT_ALL may
+ be used to unsubscribe all the events the driver supports.</para>
+
+ <para>The event subscriptions and event queues are specific to file
+ handles. Subscribing an event on one file handle does not affect
+ other file handles.
+ </para>
+
+ <para>The information on dequeueable events is obtained by using select or
+ poll system calls on video devices. The V4L2 events use POLLPRI events on
+ poll system call and exceptions on select system call. </para>
+
+ <!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+ -->
diff --git a/Documentation/DocBook/v4l/io.xml b/Documentation/DocBook/v4l/io.xml
index e870330cbf77..d424886beda0 100644
--- a/Documentation/DocBook/v4l/io.xml
+++ b/Documentation/DocBook/v4l/io.xml
@@ -702,6 +702,16 @@ They can be both cleared however, then the buffer is in "dequeued"
state, in the application domain to say so.</entry>
</row>
<row>
+ <entry><constant>V4L2_BUF_FLAG_ERROR</constant></entry>
+ <entry>0x0040</entry>
+ <entry>When this flag is set, the buffer has been dequeued
+ successfully, although the data might have been corrupted.
+ This is recoverable, streaming may continue as normal and
+ the buffer may be reused normally.
+ Drivers set this flag when the <constant>VIDIOC_DQBUF</constant>
+ ioctl is called.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_BUF_FLAG_KEYFRAME</constant></entry>
<entry>0x0008</entry>
<entry>Drivers set or clear this flag when calling the
@@ -918,8 +928,8 @@ order</emphasis>.</para>
<para>When the driver provides or accepts images field by field
rather than interleaved, it is also important applications understand
-how the fields combine to frames. We distinguish between top and
-bottom fields, the <emphasis>spatial order</emphasis>: The first line
+how the fields combine to frames. We distinguish between top (aka odd) and
+bottom (aka even) fields, the <emphasis>spatial order</emphasis>: The first line
of the top field is the first line of an interlaced frame, the first
line of the bottom field is the second line of that frame.</para>
@@ -972,12 +982,12 @@ between <constant>V4L2_FIELD_TOP</constant> and
<row>
<entry><constant>V4L2_FIELD_TOP</constant></entry>
<entry>2</entry>
- <entry>Images consist of the top field only.</entry>
+ <entry>Images consist of the top (aka odd) field only.</entry>
</row>
<row>
<entry><constant>V4L2_FIELD_BOTTOM</constant></entry>
<entry>3</entry>
- <entry>Images consist of the bottom field only.
+ <entry>Images consist of the bottom (aka even) field only.
Applications may wish to prevent a device from capturing interlaced
images because they will have "comb" or "feathering" artefacts around
moving objects.</entry>
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index 885968d6a2fc..c4ad0a8e42dc 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -792,6 +792,18 @@ http://www.thedirks.org/winnov/</ulink></para></entry>
<entry>'YYUV'</entry>
<entry>unknown</entry>
</row>
+ <row id="V4L2-PIX-FMT-Y4">
+ <entry><constant>V4L2_PIX_FMT_Y4</constant></entry>
+ <entry>'Y04 '</entry>
+ <entry>Old 4-bit greyscale format. Only the least significant 4 bits of each byte are used,
+the other bits are set to 0.</entry>
+ </row>
+ <row id="V4L2-PIX-FMT-Y6">
+ <entry><constant>V4L2_PIX_FMT_Y6</constant></entry>
+ <entry>'Y06 '</entry>
+ <entry>Old 6-bit greyscale format. Only the least significant 6 bits of each byte are used,
+the other bits are set to 0.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 060105af49e5..9737243377a3 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -401,6 +401,7 @@ and discussions on the V4L mailing list.</revremark>
<section id="ttx"> &sub-dev-teletext; </section>
<section id="radio"> &sub-dev-radio; </section>
<section id="rds"> &sub-dev-rds; </section>
+ <section id="event"> &sub-dev-event; </section>
</chapter>
<chapter id="driver">
@@ -426,6 +427,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-cropcap;
&sub-dbg-g-chip-ident;
&sub-dbg-g-register;
+ &sub-dqevent;
&sub-encoder-cmd;
&sub-enumaudio;
&sub-enumaudioout;
@@ -467,6 +469,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-reqbufs;
&sub-s-hw-freq-seek;
&sub-streamon;
+ &sub-subscribe-event;
<!-- End of ioctls. -->
&sub-mmap;
&sub-munmap;
diff --git a/Documentation/DocBook/v4l/videodev2.h.xml b/Documentation/DocBook/v4l/videodev2.h.xml
index 068325940658..865b06d9e679 100644
--- a/Documentation/DocBook/v4l/videodev2.h.xml
+++ b/Documentation/DocBook/v4l/videodev2.h.xml
@@ -1018,6 +1018,13 @@ enum <link linkend="v4l2-colorfx">v4l2_colorfx</link> {
V4L2_COLORFX_NONE = 0,
V4L2_COLORFX_BW = 1,
V4L2_COLORFX_SEPIA = 2,
+ V4L2_COLORFX_NEGATIVE = 3,
+ V4L2_COLORFX_EMBOSS = 4,
+ V4L2_COLORFX_SKETCH = 5,
+ V4L2_COLORFX_SKY_BLUE = 6,
+ V4L2_COLORFX_GRASS_GREEN = 7,
+ V4L2_COLORFX_SKIN_WHITEN = 8,
+ V4L2_COLORFX_VIVID = 9.
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
@@ -1271,6 +1278,9 @@ enum <link linkend="v4l2-exposure-auto-type">v4l2_exposure_auto_type</link> {
#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16)
+#define V4L2_CID_IRIS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+17)
+#define V4L2_CID_IRIS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+18)
+
/* FM Modulator class control IDs */
#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
#define V4L2_CID_FM_TX_CLASS (V4L2_CTRL_CLASS_FM_TX | 1)
diff --git a/Documentation/DocBook/v4l/vidioc-dqevent.xml b/Documentation/DocBook/v4l/vidioc-dqevent.xml
new file mode 100644
index 000000000000..4e0a7cc30812
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-dqevent.xml
@@ -0,0 +1,131 @@
+<refentry id="vidioc-dqevent">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_DQEVENT</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_DQEVENT</refname>
+ <refpurpose>Dequeue event</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_event
+*<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_DQEVENT</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <para>Dequeue an event from a video device. No input is required
+ for this ioctl. All the fields of the &v4l2-event; structure are
+ filled by the driver. The file handle will also receive exceptions
+ which the application may get by e.g. using the select system
+ call.</para>
+
+ <table frame="none" pgwide="1" id="v4l2-event">
+ <title>struct <structname>v4l2_event</structname></title>
+ <tgroup cols="4">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry></entry>
+ <entry>Type of the event.</entry>
+ </row>
+ <row>
+ <entry>union</entry>
+ <entry><structfield>u</structfield></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>&v4l2-event-vsync;</entry>
+ <entry><structfield>vsync</structfield></entry>
+ <entry>Event data for event V4L2_EVENT_VSYNC.
+ </entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>__u8</entry>
+ <entry><structfield>data</structfield>[64]</entry>
+ <entry>Event data. Defined by the event type. The union
+ should be used to define easily accessible type for
+ events.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>pending</structfield></entry>
+ <entry></entry>
+ <entry>Number of pending events excluding this one.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>sequence</structfield></entry>
+ <entry></entry>
+ <entry>Event sequence number. The sequence number is
+ incremented for every subscribed event that takes place.
+ If sequence numbers are not contiguous it means that
+ events have been lost.
+ </entry>
+ </row>
+ <row>
+ <entry>struct timespec</entry>
+ <entry><structfield>timestamp</structfield></entry>
+ <entry></entry>
+ <entry>Event timestamp.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[9]</entry>
+ <entry></entry>
+ <entry>Reserved for future extensions. Drivers must set
+ the array to zero.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </refsect1>
+</refentry>
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/v4l/vidioc-qbuf.xml b/Documentation/DocBook/v4l/vidioc-qbuf.xml
index b843bd7b3897..ab691ebf3b93 100644
--- a/Documentation/DocBook/v4l/vidioc-qbuf.xml
+++ b/Documentation/DocBook/v4l/vidioc-qbuf.xml
@@ -111,7 +111,11 @@ from the driver's outgoing queue. They just set the
and <structfield>reserved</structfield>
fields of a &v4l2-buffer; as above, when <constant>VIDIOC_DQBUF</constant>
is called with a pointer to this structure the driver fills the
-remaining fields or returns an error code.</para>
+remaining fields or returns an error code. The driver may also set
+<constant>V4L2_BUF_FLAG_ERROR</constant> in the <structfield>flags</structfield>
+field. It indicates a non-critical (recoverable) streaming error. In such case
+the application may continue as normal, but should be aware that data in the
+dequeued buffer might be corrupted.</para>
<para>By default <constant>VIDIOC_DQBUF</constant> blocks when no
buffer is in the outgoing queue. When the
@@ -158,7 +162,13 @@ enqueue a user pointer buffer.</para>
<para><constant>VIDIOC_DQBUF</constant> failed due to an
internal error. Can also indicate temporary problems like signal
loss. Note the driver might dequeue an (empty) buffer despite
-returning an error, or even stop capturing.</para>
+returning an error, or even stop capturing. Reusing such buffer may be unsafe
+though and its details (e.g. <structfield>index</structfield>) may not be
+returned either. It is recommended that drivers indicate recoverable errors
+by setting the <constant>V4L2_BUF_FLAG_ERROR</constant> and returning 0 instead.
+In that case the application should be able to safely reuse the buffer and
+continue streaming.
+ </para>
</listitem>
</varlistentry>
</variablelist>
diff --git a/Documentation/DocBook/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/v4l/vidioc-queryctrl.xml
index 4876ff1a1a04..8e0e055ac934 100644
--- a/Documentation/DocBook/v4l/vidioc-queryctrl.xml
+++ b/Documentation/DocBook/v4l/vidioc-queryctrl.xml
@@ -325,7 +325,7 @@ should be part of the control documentation.</entry>
<entry>n/a</entry>
<entry>This is not a control. When
<constant>VIDIOC_QUERYCTRL</constant> is called with a control ID
-equal to a control class code (see <xref linkend="ctrl-class" />), the
+equal to a control class code (see <xref linkend="ctrl-class" />) + 1, the
ioctl returns the name of the control class and this control type.
Older drivers which do not support this feature return an
&EINVAL;.</entry>
diff --git a/Documentation/DocBook/v4l/vidioc-reqbufs.xml b/Documentation/DocBook/v4l/vidioc-reqbufs.xml
index 1c0816372074..69800ae23348 100644
--- a/Documentation/DocBook/v4l/vidioc-reqbufs.xml
+++ b/Documentation/DocBook/v4l/vidioc-reqbufs.xml
@@ -61,7 +61,7 @@ fields of the <structname>v4l2_requestbuffers</structname> structure.
They set the <structfield>type</structfield> field to the respective
stream or buffer type, the <structfield>count</structfield> field to
the desired number of buffers, <structfield>memory</structfield>
-must be set to the requested I/O method and the reserved array
+must be set to the requested I/O method and the <structfield>reserved</structfield> array
must be zeroed. When the ioctl
is called with a pointer to this structure the driver will attempt to allocate
the requested number of buffers and it stores the actual number
diff --git a/Documentation/DocBook/v4l/vidioc-subscribe-event.xml b/Documentation/DocBook/v4l/vidioc-subscribe-event.xml
new file mode 100644
index 000000000000..8b501791aa68
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-subscribe-event.xml
@@ -0,0 +1,133 @@
+<refentry id="vidioc-subscribe-event">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT</refname>
+ <refpurpose>Subscribe or unsubscribe event</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_event_subscription
+*<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <para>Subscribe or unsubscribe V4L2 event. Subscribed events are
+ dequeued by using the &VIDIOC-DQEVENT; ioctl.</para>
+
+ <table frame="none" pgwide="1" id="v4l2-event-subscription">
+ <title>struct <structname>v4l2_event_subscription</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry>Type of the event.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[7]</entry>
+ <entry>Reserved for future extensions. Drivers and applications
+ must set the array to zero.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table frame="none" pgwide="1" id="event-type">
+ <title>Event Types</title>
+ <tgroup cols="3">
+ &cs-def;
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_EVENT_ALL</constant></entry>
+ <entry>0</entry>
+ <entry>All events. V4L2_EVENT_ALL is valid only for
+ VIDIOC_UNSUBSCRIBE_EVENT for unsubscribing all events at once.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_VSYNC</constant></entry>
+ <entry>1</entry>
+ <entry>This event is triggered on the vertical sync.
+ This event has &v4l2-event-vsync; associated with it.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_EOS</constant></entry>
+ <entry>2</entry>
+ <entry>This event is triggered when the end of a stream is reached.
+ This is typically used with MPEG decoders to report to the application
+ when the last of the MPEG stream has been decoded.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_PRIVATE_START</constant></entry>
+ <entry>0x08000000</entry>
+ <entry>Base event number for driver-private events.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table frame="none" pgwide="1" id="v4l2-event-vsync">
+ <title>struct <structname>v4l2_event_vsync</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u8</entry>
+ <entry><structfield>field</structfield></entry>
+ <entry>The upcoming field. See &v4l2-field;.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </refsect1>
+</refentry>
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
index 0d0f7b4d4b1a..0ba149de2608 100644
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl
@@ -5518,34 +5518,41 @@ struct _snd_pcm_runtime {
]]>
</programlisting>
</informalexample>
+
+ For the raw data, <structfield>size</structfield> field must be
+ set properly. This specifies the maximum size of the proc file access.
</para>
<para>
- The callback is much more complicated than the text-file
- version. You need to use a low-level I/O functions such as
+ The read/write callbacks of raw mode are more direct than the text mode.
+ You need to use a low-level I/O functions such as
<function>copy_from/to_user()</function> to transfer the
data.
<informalexample>
<programlisting>
<![CDATA[
- static long my_file_io_read(struct snd_info_entry *entry,
+ static ssize_t my_file_io_read(struct snd_info_entry *entry,
void *file_private_data,
struct file *file,
char *buf,
- unsigned long count,
- unsigned long pos)
+ size_t count,
+ loff_t pos)
{
- long size = count;
- if (pos + size > local_max_size)
- size = local_max_size - pos;
- if (copy_to_user(buf, local_data + pos, size))
+ if (copy_to_user(buf, local_data + pos, count))
return -EFAULT;
- return size;
+ return count;
}
]]>
</programlisting>
</informalexample>
+
+ If the size of the info entry has been set up properly,
+ <structfield>count</structfield> and <structfield>pos</structfield> are
+ guaranteed to fit within 0 and the given size.
+ You don't have to check the range in the callbacks unless any
+ other condition is required.
+
</para>
</chapter>
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index e83f2ea76415..898ded24510d 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -216,7 +216,7 @@ The driver should return one of the following result codes:
- PCI_ERS_RESULT_NEED_RESET
Driver returns this if it thinks the device is not
- recoverable in it's current state and it needs a slot
+ recoverable in its current state and it needs a slot
reset to proceed.
- PCI_ERS_RESULT_DISCONNECT
@@ -241,7 +241,7 @@ in working condition.
The driver is not supposed to restart normal driver I/O operations
at this point. It should limit itself to "probing" the device to
-check it's recoverability status. If all is right, then the platform
+check its recoverability status. If all is right, then the platform
will call resume() once all drivers have ack'd link_reset().
Result codes:
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index be21001ab144..26d3d945c3c2 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -13,7 +13,7 @@ Reporting (AER) driver and provides information on how to use it, as
well as how to enable the drivers of endpoint devices to conform with
PCI Express AER driver.
-1.2 Copyright © Intel Corporation 2006.
+1.2 Copyright (C) Intel Corporation 2006.
1.3 What is the PCI Express AER Driver?
@@ -71,15 +71,11 @@ console. If it's a correctable error, it is outputed as a warning.
Otherwise, it is printed as an error. So users could choose different
log level to filter out correctable error messages.
-Below shows an example.
-+------ PCI-Express Device Error -----+
-Error Severity : Uncorrected (Fatal)
-PCIE Bus Error type : Transaction Layer
-Unsupported Request : First
-Requester ID : 0500
-VendorID=8086h, DeviceID=0329h, Bus=05h, Device=00h, Function=00h
-TLB Header:
-04000001 00200a03 05010000 00050100
+Below shows an example:
+0000:50:00.0: PCIe Bus Error: severity=Uncorrected (Fatal), type=Transaction Layer, id=0500(Requester ID)
+0000:50:00.0: device [8086:0329] error status/mask=00100000/00000000
+0000:50:00.0: [20] Unsupported Request (First)
+0000:50:00.0: TLP Header: 04000001 00200a03 05010000 00050100
In the example, 'Requester ID' means the ID of the device who sends
the error message to root port. Pls. refer to pci express specs for
@@ -112,7 +108,7 @@ but the PCI Express link itself is fully functional. Fatal errors, on
the other hand, cause the link to be unreliable.
When AER is enabled, a PCI Express device will automatically send an
-error message to the PCIE root port above it when the device captures
+error message to the PCIe root port above it when the device captures
an error. The Root Port, upon receiving an error reporting message,
internally processes and logs the error message in its PCI Express
capability structure. Error information being logged includes storing
@@ -198,8 +194,9 @@ to reset link, AER port service driver is required to provide the
function to reset link. Firstly, kernel looks for if the upstream
component has an aer driver. If it has, kernel uses the reset_link
callback of the aer driver. If the upstream component has no aer driver
-and the port is downstream port, we will use the aer driver of the
-root port who reports the AER error. As for upstream ports,
+and the port is downstream port, we will perform a hot reset as the
+default by setting the Secondary Bus Reset bit of the Bridge Control
+register associated with the downstream port. As for upstream ports,
they should provide their own aer service drivers with reset_link
function. If error_detected returns PCI_ERS_RESULT_CAN_RECOVER and
reset_link returns PCI_ERS_RESULT_RECOVERED, the error handling goes
@@ -253,11 +250,11 @@ cleanup uncorrectable status register. Pls. refer to section 3.3.
4. Software error injection
-Debugging PCIE AER error recovery code is quite difficult because it
+Debugging PCIe AER error recovery code is quite difficult because it
is hard to trigger real hardware errors. Software based error
-injection can be used to fake various kinds of PCIE errors.
+injection can be used to fake various kinds of PCIe errors.
-First you should enable PCIE AER software error injection in kernel
+First you should enable PCIe AER software error injection in kernel
configuration, that is, following item should be in your .config.
CONFIG_PCIEAER_INJECT=y or CONFIG_PCIEAER_INJECT=m
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 1423d2570d78..44c6dcc93d6d 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -3,35 +3,79 @@ Using RCU's CPU Stall Detector
The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables
RCU's CPU stall detector, which detects conditions that unduly delay
RCU grace periods. The stall detector's idea of what constitutes
-"unduly delayed" is controlled by a pair of C preprocessor macros:
+"unduly delayed" is controlled by a set of C preprocessor macros:
RCU_SECONDS_TILL_STALL_CHECK
This macro defines the period of time that RCU will wait from
the beginning of a grace period until it issues an RCU CPU
- stall warning. It is normally ten seconds.
+ stall warning. This time period is normally ten seconds.
RCU_SECONDS_TILL_STALL_RECHECK
This macro defines the period of time that RCU will wait after
- issuing a stall warning until it issues another stall warning.
- It is normally set to thirty seconds.
+ issuing a stall warning until it issues another stall warning
+ for the same stall. This time period is normally set to thirty
+ seconds.
RCU_STALL_RAT_DELAY
- The CPU stall detector tries to make the offending CPU rat on itself,
- as this often gives better-quality stack traces. However, if
- the offending CPU does not detect its own stall in the number
- of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will
- complain. This is normally set to two jiffies.
+ The CPU stall detector tries to make the offending CPU print its
+ own warnings, as this often gives better-quality stack traces.
+ However, if the offending CPU does not detect its own stall in
+ the number of jiffies specified by RCU_STALL_RAT_DELAY, then
+ some other CPU will complain. This delay is normally set to
+ two jiffies.
-The following problems can result in an RCU CPU stall warning:
+When a CPU detects that it is stalling, it will print a message similar
+to the following:
+
+INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies)
+
+This message indicates that CPU 5 detected that it was causing a stall,
+and that the stall was affecting RCU-sched. This message will normally be
+followed by a stack dump of the offending CPU. On TREE_RCU kernel builds,
+RCU and RCU-sched are implemented by the same underlying mechanism,
+while on TREE_PREEMPT_RCU kernel builds, RCU is instead implemented
+by rcu_preempt_state.
+
+On the other hand, if the offending CPU fails to print out a stall-warning
+message quickly enough, some other CPU will print a message similar to
+the following:
+
+INFO: rcu_bh_state detected stalls on CPUs/tasks: { 3 5 } (detected by 2, 2502 jiffies)
+
+This message indicates that CPU 2 detected that CPUs 3 and 5 were both
+causing stalls, and that the stall was affecting RCU-bh. This message
+will normally be followed by stack dumps for each CPU. Please note that
+TREE_PREEMPT_RCU builds can be stalled by tasks as well as by CPUs,
+and that the tasks will be indicated by PID, for example, "P3421".
+It is even possible for a rcu_preempt_state stall to be caused by both
+CPUs -and- tasks, in which case the offending CPUs and tasks will all
+be called out in the list.
+
+Finally, if the grace period ends just as the stall warning starts
+printing, there will be a spurious stall-warning message:
+
+INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies)
+
+This is rare, but does happen from time to time in real life.
+
+So your kernel printed an RCU CPU stall warning. The next question is
+"What caused it?" The following problems can result in RCU CPU stall
+warnings:
o A CPU looping in an RCU read-side critical section.
-o A CPU looping with interrupts disabled.
+o A CPU looping with interrupts disabled. This condition can
+ result in RCU-sched and RCU-bh stalls.
-o A CPU looping with preemption disabled.
+o A CPU looping with preemption disabled. This condition can
+ result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh
+ stalls.
+
+o A CPU looping with bottom halves disabled. This condition can
+ result in RCU-sched and RCU-bh stalls.
o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
without invoking schedule().
@@ -39,20 +83,24 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
o A bug in the RCU implementation.
o A hardware failure. This is quite unlikely, but has occurred
- at least once in a former life. A CPU failed in a running system,
+ at least once in real life. A CPU failed in a running system,
becoming unresponsive, but not causing an immediate crash.
This resulted in a series of RCU CPU stall warnings, eventually
leading the realization that the CPU had failed.
-The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning.
-SRCU does not do so directly, but its calls to synchronize_sched() will
-result in RCU-sched detecting any CPU stalls that might be occurring.
-
-To diagnose the cause of the stall, inspect the stack traces. The offending
-function will usually be near the top of the stack. If you have a series
-of stall warnings from a single extended stall, comparing the stack traces
-can often help determine where the stall is occurring, which will usually
-be in the function nearest the top of the stack that stays the same from
-trace to trace.
+The RCU, RCU-sched, and RCU-bh implementations have CPU stall
+warning. SRCU does not have its own CPU stall warnings, but its
+calls to synchronize_sched() will result in RCU-sched detecting
+RCU-sched-related CPU stalls. Please note that RCU only detects
+CPU stalls when there is a grace period in progress. No grace period,
+no CPU stall warnings.
+
+To diagnose the cause of the stall, inspect the stack traces.
+The offending function will usually be near the top of the stack.
+If you have a series of stall warnings from a single extended stall,
+comparing the stack traces can often help determine where the stall
+is occurring, which will usually be in the function nearest the top of
+that portion of the stack which remains the same from trace to trace.
+If you can reliably trigger the stall, ftrace can be quite helpful.
RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 0e50bc2aa1e2..5d9016795fd8 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -182,16 +182,6 @@ Similarly, sched_expedited RCU provides the following:
sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0
sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0
sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0
- state: -1 / 0:0 3:0 4:0
-
-As before, the first four lines are similar to those for RCU.
-The last line shows the task-migration state. The first number is
--1 if synchronize_sched_expedited() is idle, -2 if in the process of
-posting wakeups to the migration kthreads, and N when waiting on CPU N.
-Each of the colon-separated fields following the "/" is a CPU:state pair.
-Valid states are "0" for idle, "1" for waiting for quiescent state,
-"2" for passed through quiescent state, and "3" when a race with a
-CPU-hotplug event forces use of the synchronize_sched() primitive.
USAGE
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 8608fd85e921..efd8cc95c06b 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -256,23 +256,23 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
The output of "cat rcu/rcu_pending" looks as follows:
rcu_sched:
- 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
- 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
- 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
- 3 np=236249 qsp=48766 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723
- 4 np=221310 qsp=46850 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110
- 5 np=237332 qsp=48449 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456
- 6 np=219995 qsp=46718 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834
- 7 np=249893 qsp=49390 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888
+ 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
+ 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
+ 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
+ 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723
+ 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110
+ 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456
+ 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834
+ 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888
rcu_bh:
- 0 np=146741 qsp=1419 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314
- 1 np=155792 qsp=12597 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180
- 2 np=136629 qsp=18680 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936
- 3 np=137723 qsp=2843 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863
- 4 np=123110 qsp=12433 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671
- 5 np=137456 qsp=4210 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235
- 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921
- 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542
+ 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314
+ 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180
+ 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936
+ 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863
+ 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671
+ 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235
+ 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921
+ 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542
As always, this is once again split into "rcu_sched" and "rcu_bh"
portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional
@@ -284,6 +284,9 @@ o "np" is the number of times that __rcu_pending() has been invoked
o "qsp" is the number of times that the RCU was waiting for a
quiescent state from this CPU.
+o "rpq" is the number of times that the CPU had passed through
+ a quiescent state, but not yet reported it to RCU.
+
o "cbr" is the number of times that this CPU had RCU callbacks
that had passed through a grace period, and were thus ready
to be invoked.
diff --git a/Documentation/Smack.txt b/Documentation/Smack.txt
index 34614b4c708e..e9dab41c0fe0 100644
--- a/Documentation/Smack.txt
+++ b/Documentation/Smack.txt
@@ -73,7 +73,7 @@ NOTE: Smack labels are limited to 23 characters. The attr command
If you don't do anything special all users will get the floor ("_")
label when they log in. If you do want to log in via the hacked ssh
at other labels use the attr command to set the smack value on the
-home directory and it's contents.
+home directory and its contents.
You can add access rules in /etc/smack/accesses. They take the form:
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 8916ca48bc95..da0382daa395 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -18,6 +18,8 @@ kernel patches.
2b: Passes allnoconfig, allmodconfig
+2c: Builds successfully when using O=builddir
+
3: Builds on multiple CPU architectures by using local cross-compile tools
or some other build farm.
@@ -95,3 +97,13 @@ kernel patches.
25: If any ioctl's are added by the patch, then also update
Documentation/ioctl/ioctl-number.txt.
+
+26: If your modified source code depends on or uses any of the kernel
+ APIs or features that are related to the following kconfig symbols,
+ then test multiple builds with the related kconfig symbols disabled
+ and/or =m (if that option is available) [not all of these at the
+ same time, just various/random combinations of them]:
+
+ CONFIG_SMP, CONFIG_SYSFS, CONFIG_PROC_FS, CONFIG_INPUT, CONFIG_PCI,
+ CONFIG_BLOCK, CONFIG_PM, CONFIG_HOTPLUG, CONFIG_MAGIC_SYSRQ,
+ CONFIG_NET, CONFIG_INET=n (but latter with CONFIG_NET=y)
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
new file mode 100644
index 000000000000..438ef33c8fd0
--- /dev/null
+++ b/Documentation/acpi/apei/einj.txt
@@ -0,0 +1,49 @@
+ APEI Error INJection
+ ~~~~~~~~~~~~~~~~~~~~
+
+EINJ provides a hardware error injection mechanism, it is very useful
+for debugging and testing of other APEI and RAS features.
+
+To use EINJ, make the following is enabled in your kernel
+configuration:
+
+CONFIG_DEBUG_FS
+CONFIG_ACPI_APEI
+CONFIG_ACPI_APEI_EINJ
+
+The user interface of EINJ is in debug file system, under the
+directory apei/einj. The following files are provided.
+
+- available_error_type
+ Read this file will return the error injection capability of the
+ platform, that is, which error types are supported. The error type
+ definition is as follow, the left field is the error type value, the
+ right field is error description.
+
+ 0x00000001 Processor Correctable
+ 0x00000002 Processor Uncorrectable non-fatal
+ 0x00000004 Processor Uncorrectable fatal
+ 0x00000008 Memory Correctable
+ 0x00000010 Memory Uncorrectable non-fatal
+ 0x00000020 Memory Uncorrectable fatal
+ 0x00000040 PCI Express Correctable
+ 0x00000080 PCI Express Uncorrectable fatal
+ 0x00000100 PCI Express Uncorrectable non-fatal
+ 0x00000200 Platform Correctable
+ 0x00000400 Platform Uncorrectable non-fatal
+ 0x00000800 Platform Uncorrectable fatal
+
+ The format of file contents are as above, except there are only the
+ available error type lines.
+
+- error_type
+ This file is used to set the error type value. The error type value
+ is defined in "available_error_type" description.
+
+- error_inject
+ Write any integer to this file to trigger the error
+ injection. Before this, please specify all necessary error
+ parameters.
+
+For more information about EINJ, please refer to ACPI specification
+version 4.0, section 17.5.
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 82e418d648d0..7f5fc3ba9c91 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -20,6 +20,8 @@ Samsung-S3C24XX
- S3C24XX ARM Linux Overview
Sharp-LH
- Linux on Sharp LH79524 and LH7A40X System On a Chip (SOC)
+SPEAr
+ - ST SPEAr platform Linux Overview
VFP/
- Release notes for Linux Kernel Vector Floating Point support code
empeg/
diff --git a/Documentation/arm/SA1100/ADSBitsy b/Documentation/arm/SA1100/ADSBitsy
index 7197a9e958ee..f9f62e8c0719 100644
--- a/Documentation/arm/SA1100/ADSBitsy
+++ b/Documentation/arm/SA1100/ADSBitsy
@@ -32,7 +32,7 @@ Notes:
- The flash on board is divided into 3 partitions.
You should be careful to use flash on board.
- It's partition is different from GraphicsClient Plus and GraphicsMaster
+ Its partition is different from GraphicsClient Plus and GraphicsMaster
- 16bpp mode requires a different cable than what ships with the board.
Contact ADS or look through the manual to wire your own. Currently,
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
new file mode 100644
index 000000000000..253a35c6f782
--- /dev/null
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -0,0 +1,60 @@
+ SPEAr ARM Linux Overview
+ ==========================
+
+Introduction
+------------
+
+ SPEAr (Structured Processor Enhanced Architecture).
+ weblink : http://www.st.com/spear
+
+ The ST Microelectronics SPEAr range of ARM9/CortexA9 System-on-Chip CPUs are
+ supported by the 'spear' platform of ARM Linux. Currently SPEAr300,
+ SPEAr310, SPEAr320 and SPEAr600 SOCs are supported. Support for the SPEAr13XX
+ series is in progress.
+
+ Hierarchy in SPEAr is as follows:
+
+ SPEAr (Platform)
+ - SPEAr3XX (3XX SOC series, based on ARM9)
+ - SPEAr300 (SOC)
+ - SPEAr300_EVB (Evaluation Board)
+ - SPEAr310 (SOC)
+ - SPEAr310_EVB (Evaluation Board)
+ - SPEAr320 (SOC)
+ - SPEAr320_EVB (Evaluation Board)
+ - SPEAr6XX (6XX SOC series, based on ARM9)
+ - SPEAr600 (SOC)
+ - SPEAr600_EVB (Evaluation Board)
+ - SPEAr13XX (13XX SOC series, based on ARM CORTEXA9)
+ - SPEAr1300 (SOC)
+
+ Configuration
+ -------------
+
+ A generic configuration is provided for each machine, and can be used as the
+ default by
+ make spear600_defconfig
+ make spear300_defconfig
+ make spear310_defconfig
+ make spear320_defconfig
+
+ Layout
+ ------
+
+ The common files for multiple machine families (SPEAr3XX, SPEAr6XX and
+ SPEAr13XX) are located in the platform code contained in arch/arm/plat-spear
+ with headers in plat/.
+
+ Each machine series have a directory with name arch/arm/mach-spear followed by
+ series name. Like mach-spear3xx, mach-spear6xx and mach-spear13xx.
+
+ Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c and for
+ spear6xx is mach-spear6xx/spear6xx.c. mach-spear* also contain soc/machine
+ specific files, like spear300.c, spear310.c, spear320.c and spear600.c.
+ mach-spear* also contains board specific files for each machine type.
+
+
+ Document Author
+ ---------------
+
+ Viresh Kumar, (c) 2010 ST Microelectronics
diff --git a/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen b/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
index 1e6a23fdf2fc..dc460f055647 100644
--- a/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
+++ b/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
@@ -7,7 +7,7 @@ The driver only implements a four-wire touch panel protocol.
The touchscreen driver is maintenance free except for the pen-down or
touch threshold. Some resistive displays and board combinations may
-require tuning of this threshold. The driver exposes some of it's
+require tuning of this threshold. The driver exposes some of its
internal state in the sys filesystem. If the kernel is configured
with it, CONFIG_SYSFS, and sysfs is mounted at /sys, there will be a
directory
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 396bec3b74ed..ac4d47187122 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -320,7 +320,7 @@ counter decrement would not become globally visible until the
obj->active update does.
As a historical note, 32-bit Sparc used to only allow usage of
-24-bits of it's atomic_t type. This was because it used 8 bits
+24-bits of its atomic_t type. This was because it used 8 bits
as a spinlock for SMP safety. Sparc32 lacked a "compare and swap"
type instruction. However, 32-bit Sparc has since been moved over
to a "hash table of spinlocks" scheme, that allows the full 32-bit
diff --git a/Documentation/blackfin/bfin-gpio-notes.txt b/Documentation/blackfin/bfin-gpio-notes.txt
index 9898c7ded7d3..f731c1e56475 100644
--- a/Documentation/blackfin/bfin-gpio-notes.txt
+++ b/Documentation/blackfin/bfin-gpio-notes.txt
@@ -43,7 +43,7 @@
void bfin_gpio_irq_free(unsigned gpio);
The request functions will record the function state for a certain pin,
- the free functions will clear it's function state.
+ the free functions will clear its function state.
Once a pin is requested, it can't be requested again before it is freed by
previous caller, otherwise kernel will dump stacks, and the request
function fail.
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 2b5f823abd03..9164ae3b83bc 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -5,7 +5,7 @@
This document describes the cache/tlb flushing interfaces called
by the Linux VM subsystem. It enumerates over each interface,
-describes it's intended purpose, and what side effect is expected
+describes its intended purpose, and what side effect is expected
after the interface is invoked.
The side effects described below are stated for a uniprocessor
@@ -231,7 +231,7 @@ require a whole different set of interfaces to handle properly.
The biggest problem is that of virtual aliasing in the data cache
of a processor.
-Is your port susceptible to virtual aliasing in it's D-cache?
+Is your port susceptible to virtual aliasing in its D-cache?
Well, if your D-cache is virtually indexed, is larger in size than
PAGE_SIZE, and does not prevent multiple cache lines for the same
physical address from existing at once, you have this problem.
@@ -249,7 +249,7 @@ one way to solve this (in particular SPARC_FLAG_MMAPSHARED).
Next, you have to solve the D-cache aliasing issue for all
other cases. Please keep in mind that fact that, for a given page
mapped into some user address space, there is always at least one more
-mapping, that of the kernel in it's linear mapping starting at
+mapping, that of the kernel in its linear mapping starting at
PAGE_OFFSET. So immediately, once the first user maps a given
physical page into its address space, by implication the D-cache
aliasing problem has the potential to exist since the kernel already
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 630879cd9a42..48e0b21b0059 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -17,6 +17,9 @@ HOWTO
You can do a very simple testing of running two dd threads in two different
cgroups. Here is what you can do.
+- Enable Block IO controller
+ CONFIG_BLK_CGROUP=y
+
- Enable group scheduling in CFQ
CONFIG_CFQ_GROUP_IOSCHED=y
@@ -54,32 +57,52 @@ cgroups. Here is what you can do.
Various user visible config options
===================================
-CONFIG_CFQ_GROUP_IOSCHED
- - Enables group scheduling in CFQ. Currently only 1 level of group
- creation is allowed.
-
-CONFIG_DEBUG_CFQ_IOSCHED
- - Enables some debugging messages in blktrace. Also creates extra
- cgroup file blkio.dequeue.
-
-Config options selected automatically
-=====================================
-These config options are not user visible and are selected/deselected
-automatically based on IO scheduler configuration.
-
CONFIG_BLK_CGROUP
- - Block IO controller. Selected by CONFIG_CFQ_GROUP_IOSCHED.
+ - Block IO controller.
CONFIG_DEBUG_BLK_CGROUP
- - Debug help. Selected by CONFIG_DEBUG_CFQ_IOSCHED.
+ - Debug help. Right now some additional stats file show up in cgroup
+ if this option is enabled.
+
+CONFIG_CFQ_GROUP_IOSCHED
+ - Enables group scheduling in CFQ. Currently only 1 level of group
+ creation is allowed.
Details of cgroup files
=======================
- blkio.weight
- - Specifies per cgroup weight.
-
+ - Specifies per cgroup weight. This is default weight of the group
+ on all the devices until and unless overridden by per device rule.
+ (See blkio.weight_device).
Currently allowed range of weights is from 100 to 1000.
+- blkio.weight_device
+ - One can specify per cgroup per device rules using this interface.
+ These rules override the default value of group weight as specified
+ by blkio.weight.
+
+ Following is the format.
+
+ #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device
+ Configure weight=300 on /dev/sdb (8:16) in this cgroup
+ # echo 8:16 300 > blkio.weight_device
+ # cat blkio.weight_device
+ dev weight
+ 8:16 300
+
+ Configure weight=500 on /dev/sda (8:0) in this cgroup
+ # echo 8:0 500 > blkio.weight_device
+ # cat blkio.weight_device
+ dev weight
+ 8:0 500
+ 8:16 300
+
+ Remove specific weight for /dev/sda in this cgroup
+ # echo 8:0 0 > blkio.weight_device
+ # cat blkio.weight_device
+ dev weight
+ 8:16 300
+
- blkio.time
- disk time allocated to cgroup per device in milliseconds. First
two fields specify the major and minor number of the device and
@@ -92,13 +115,105 @@ Details of cgroup files
third field specifies the number of sectors transferred by the
group to/from the device.
+- blkio.io_service_bytes
+ - Number of bytes transferred to/from the disk by the group. These
+ are further divided by the type of operation - read or write, sync
+ or async. First two fields specify the major and minor number of the
+ device, third field specifies the operation type and the fourth field
+ specifies the number of bytes.
+
+- blkio.io_serviced
+ - Number of IOs completed to/from the disk by the group. These
+ are further divided by the type of operation - read or write, sync
+ or async. First two fields specify the major and minor number of the
+ device, third field specifies the operation type and the fourth field
+ specifies the number of IOs.
+
+- blkio.io_service_time
+ - Total amount of time between request dispatch and request completion
+ for the IOs done by this cgroup. This is in nanoseconds to make it
+ meaningful for flash devices too. For devices with queue depth of 1,
+ this time represents the actual service time. When queue_depth > 1,
+ that is no longer true as requests may be served out of order. This
+ may cause the service time for a given IO to include the service time
+ of multiple IOs when served out of order which may result in total
+ io_service_time > actual time elapsed. This time is further divided by
+ the type of operation - read or write, sync or async. First two fields
+ specify the major and minor number of the device, third field
+ specifies the operation type and the fourth field specifies the
+ io_service_time in ns.
+
+- blkio.io_wait_time
+ - Total amount of time the IOs for this cgroup spent waiting in the
+ scheduler queues for service. This can be greater than the total time
+ elapsed since it is cumulative io_wait_time for all IOs. It is not a
+ measure of total time the cgroup spent waiting but rather a measure of
+ the wait_time for its individual IOs. For devices with queue_depth > 1
+ this metric does not include the time spent waiting for service once
+ the IO is dispatched to the device but till it actually gets serviced
+ (there might be a time lag here due to re-ordering of requests by the
+ device). This is in nanoseconds to make it meaningful for flash
+ devices too. This time is further divided by the type of operation -
+ read or write, sync or async. First two fields specify the major and
+ minor number of the device, third field specifies the operation type
+ and the fourth field specifies the io_wait_time in ns.
+
+- blkio.io_merged
+ - Total number of bios/requests merged into requests belonging to this
+ cgroup. This is further divided by the type of operation - read or
+ write, sync or async.
+
+- blkio.io_queued
+ - Total number of requests queued up at any given instant for this
+ cgroup. This is further divided by the type of operation - read or
+ write, sync or async.
+
+- blkio.avg_queue_size
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+ The average queue size for this cgroup over the entire time of this
+ cgroup's existence. Queue size samples are taken each time one of the
+ queues of this cgroup gets a timeslice.
+
+- blkio.group_wait_time
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+ This is the amount of time the cgroup had to wait since it became busy
+ (i.e., went from 0 to 1 request queued) to get a timeslice for one of
+ its queues. This is different from the io_wait_time which is the
+ cumulative total of the amount of time spent by each IO in that cgroup
+ waiting in the scheduler queue. This is in nanoseconds. If this is
+ read when the cgroup is in a waiting (for timeslice) state, the stat
+ will only report the group_wait_time accumulated till the last time it
+ got a timeslice and will not include the current delta.
+
+- blkio.empty_time
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+ This is the amount of time a cgroup spends without any pending
+ requests when not being served, i.e., it does not include any time
+ spent idling for one of the queues of the cgroup. This is in
+ nanoseconds. If this is read when the cgroup is in an empty state,
+ the stat will only report the empty_time accumulated till the last
+ time it had a pending request and will not include the current delta.
+
+- blkio.idle_time
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+ This is the amount of time spent by the IO scheduler idling for a
+ given cgroup in anticipation of a better request than the exising ones
+ from other queues/cgroups. This is in nanoseconds. If this is read
+ when the cgroup is in an idling state, the stat will only report the
+ idle_time accumulated till the last idle period and will not include
+ the current delta.
+
- blkio.dequeue
- - Debugging aid only enabled if CONFIG_DEBUG_CFQ_IOSCHED=y. This
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y. This
gives the statistics about how many a times a group was dequeued
from service tree of the device. First two fields specify the major
and minor number of the device and third field specifies the number
of times a group was dequeued from a particular device.
+- blkio.reset_stats
+ - Writing an int to this file will result in resetting all the stats
+ for that cgroup.
+
CFQ sysfs tunable
=================
/sys/block/<disk>/queue/iosched/group_isolation
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index a1ca5924faff..57444c2609fc 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -572,7 +572,7 @@ void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Called when a task attach operation has failed after can_attach() has succeeded.
A subsystem whose can_attach() has some side-effects should provide this
-function, so that the subsytem can implement a rollback. If not, not necessary.
+function, so that the subsystem can implement a rollback. If not, not necessary.
This will be called only about subsystems whose can_attach() operation have
succeeded.
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 4160df82b3f5..51682ab2dd1a 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -42,7 +42,7 @@ Nodes to a set of tasks. In this document "Memory Node" refers to
an on-line node that contains memory.
Cpusets constrain the CPU and Memory placement of tasks to only
-the resources within a tasks current cpuset. They form a nested
+the resources within a task's current cpuset. They form a nested
hierarchy visible in a virtual file system. These are the essential
hooks, beyond what is already present, required to manage dynamic
job placement on large systems.
@@ -53,11 +53,11 @@ Documentation/cgroups/cgroups.txt.
Requests by a task, using the sched_setaffinity(2) system call to
include CPUs in its CPU affinity mask, and using the mbind(2) and
set_mempolicy(2) system calls to include Memory Nodes in its memory
-policy, are both filtered through that tasks cpuset, filtering out any
+policy, are both filtered through that task's cpuset, filtering out any
CPUs or Memory Nodes not in that cpuset. The scheduler will not
schedule a task on a CPU that is not allowed in its cpus_allowed
vector, and the kernel page allocator will not allocate a page on a
-node that is not allowed in the requesting tasks mems_allowed vector.
+node that is not allowed in the requesting task's mems_allowed vector.
User level code may create and destroy cpusets by name in the cgroup
virtual file system, manage the attributes and permissions of these
@@ -121,9 +121,9 @@ Cpusets extends these two mechanisms as follows:
- Each task in the system is attached to a cpuset, via a pointer
in the task structure to a reference counted cgroup structure.
- Calls to sched_setaffinity are filtered to just those CPUs
- allowed in that tasks cpuset.
+ allowed in that task's cpuset.
- Calls to mbind and set_mempolicy are filtered to just
- those Memory Nodes allowed in that tasks cpuset.
+ those Memory Nodes allowed in that task's cpuset.
- The root cpuset contains all the systems CPUs and Memory
Nodes.
- For any cpuset, one can define child cpusets containing a subset
@@ -141,11 +141,11 @@ into the rest of the kernel, none in performance critical paths:
- in init/main.c, to initialize the root cpuset at system boot.
- in fork and exit, to attach and detach a task from its cpuset.
- in sched_setaffinity, to mask the requested CPUs by what's
- allowed in that tasks cpuset.
+ allowed in that task's cpuset.
- in sched.c migrate_live_tasks(), to keep migrating tasks within
the CPUs allowed by their cpuset, if possible.
- in the mbind and set_mempolicy system calls, to mask the requested
- Memory Nodes by what's allowed in that tasks cpuset.
+ Memory Nodes by what's allowed in that task's cpuset.
- in page_alloc.c, to restrict memory to allowed nodes.
- in vmscan.c, to restrict page recovery to the current cpuset.
@@ -155,7 +155,7 @@ new system calls are added for cpusets - all support for querying and
modifying cpusets is via this cpuset file system.
The /proc/<pid>/status file for each task has four added lines,
-displaying the tasks cpus_allowed (on which CPUs it may be scheduled)
+displaying the task's cpus_allowed (on which CPUs it may be scheduled)
and mems_allowed (on which Memory Nodes it may obtain memory),
in the two formats seen in the following example:
@@ -323,17 +323,17 @@ stack segment pages of a task.
By default, both kinds of memory spreading are off, and memory
pages are allocated on the node local to where the task is running,
-except perhaps as modified by the tasks NUMA mempolicy or cpuset
+except perhaps as modified by the task's NUMA mempolicy or cpuset
configuration, so long as sufficient free memory pages are available.
When new cpusets are created, they inherit the memory spread settings
of their parent.
Setting memory spreading causes allocations for the affected page
-or slab caches to ignore the tasks NUMA mempolicy and be spread
+or slab caches to ignore the task's NUMA mempolicy and be spread
instead. Tasks using mbind() or set_mempolicy() calls to set NUMA
mempolicies will not notice any change in these calls as a result of
-their containing tasks memory spread settings. If memory spreading
+their containing task's memory spread settings. If memory spreading
is turned off, then the currently specified NUMA mempolicy once again
applies to memory page allocations.
@@ -357,7 +357,7 @@ pages from the node returned by cpuset_mem_spread_node().
The cpuset_mem_spread_node() routine is also simple. It uses the
value of a per-task rotor cpuset_mem_spread_rotor to select the next
-node in the current tasks mems_allowed to prefer for the allocation.
+node in the current task's mems_allowed to prefer for the allocation.
This memory placement policy is also known (in other contexts) as
round-robin or interleave.
@@ -594,7 +594,7 @@ is attached, is subtle.
If a cpuset has its Memory Nodes modified, then for each task attached
to that cpuset, the next time that the kernel attempts to allocate
a page of memory for that task, the kernel will notice the change
-in the tasks cpuset, and update its per-task memory placement to
+in the task's cpuset, and update its per-task memory placement to
remain within the new cpusets memory placement. If the task was using
mempolicy MPOL_BIND, and the nodes to which it was bound overlap with
its new cpuset, then the task will continue to use whatever subset
@@ -603,13 +603,13 @@ was using MPOL_BIND and now none of its MPOL_BIND nodes are allowed
in the new cpuset, then the task will be essentially treated as if it
was MPOL_BIND bound to the new cpuset (even though its NUMA placement,
as queried by get_mempolicy(), doesn't change). If a task is moved
-from one cpuset to another, then the kernel will adjust the tasks
+from one cpuset to another, then the kernel will adjust the task's
memory placement, as above, the next time that the kernel attempts
to allocate a page of memory for that task.
If a cpuset has its 'cpuset.cpus' modified, then each task in that cpuset
will have its allowed CPU placement changed immediately. Similarly,
-if a tasks pid is written to another cpusets 'cpuset.tasks' file, then its
+if a task's pid is written to another cpusets 'cpuset.tasks' file, then its
allowed CPU placement is changed immediately. If such a task had been
bound to some subset of its cpuset using the sched_setaffinity() call,
the task will be allowed to run on any CPU allowed in its new cpuset,
@@ -626,16 +626,16 @@ cpusets memory placement policy 'cpuset.mems' subsequently changes.
If the cpuset flag file 'cpuset.memory_migrate' is set true, then when
tasks are attached to that cpuset, any pages that task had
allocated to it on nodes in its previous cpuset are migrated
-to the tasks new cpuset. The relative placement of the page within
+to the task's new cpuset. The relative placement of the page within
the cpuset is preserved during these migration operations if possible.
For example if the page was on the second valid node of the prior cpuset
then the page will be placed on the second valid node of the new cpuset.
-Also if 'cpuset.memory_migrate' is set true, then if that cpusets
+Also if 'cpuset.memory_migrate' is set true, then if that cpuset's
'cpuset.mems' file is modified, pages allocated to tasks in that
cpuset, that were on nodes in the previous setting of 'cpuset.mems',
will be moved to nodes in the new setting of 'mems.'
-Pages that were not in the tasks prior cpuset, or in the cpusets
+Pages that were not in the task's prior cpuset, or in the cpuset's
prior 'cpuset.mems' setting, will not be moved.
There is an exception to the above. If hotplug functionality is used
@@ -655,7 +655,7 @@ There is a second exception to the above. GFP_ATOMIC requests are
kernel internal allocations that must be satisfied, immediately.
The kernel may drop some request, in rare cases even panic, if a
GFP_ATOMIC alloc fails. If the request cannot be satisfied within
-the current tasks cpuset, then we relax the cpuset, and look for
+the current task's cpuset, then we relax the cpuset, and look for
memory anywhere we can find it. It's better to violate the cpuset
than stress the kernel.
diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt
index f7f68b2ac199..b7eececfb195 100644
--- a/Documentation/cgroups/memcg_test.txt
+++ b/Documentation/cgroups/memcg_test.txt
@@ -244,7 +244,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
we have to check if OLDPAGE/NEWPAGE is a valid page after commit().
8. LRU
- Each memcg has its own private LRU. Now, it's handling is under global
+ Each memcg has its own private LRU. Now, its handling is under global
VM's control (means that it's handled under global zone->lru_lock).
Almost all routines around memcg's LRU is called by global LRU's
list management functions under zone->lru_lock().
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 3a6aecd078ba..6cab1f29da4c 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -263,7 +263,7 @@ some of the pages cached in the cgroup (page cache pages).
4.2 Task migration
-When a task migrates from one cgroup to another, it's charge is not
+When a task migrates from one cgroup to another, its charge is not
carried forward by default. The pages allocated from the original cgroup still
remain charged to it, the charge is dropped when the page is freed or
reclaimed.
diff --git a/Documentation/connector/connector.txt b/Documentation/connector/connector.txt
index 78c9466a9aa8..e5c5f5e6ab70 100644
--- a/Documentation/connector/connector.txt
+++ b/Documentation/connector/connector.txt
@@ -88,7 +88,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __groups, int gfp_mask);
int gfp_mask - GFP mask.
Note: When registering new callback user, connector core assigns
- netlink group to the user which is equal to it's id.idx.
+ netlink group to the user which is equal to its id.idx.
/*****************************************/
Protocol description.
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index a99d7031cdf9..45d5a217484f 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -2,7 +2,7 @@
Maintainers:
CPU Hotplug Core:
- Rusty Russell <rusty@rustycorp.com.au>
+ Rusty Russell <rusty@rustcorp.com.au>
Srivatsa Vaddagiri <vatsa@in.ibm.com>
i386:
Zwane Mwaikambo <zwane@arm.linux.org.uk>
diff --git a/Documentation/credentials.txt b/Documentation/credentials.txt
index df03169782ea..a2db35287003 100644
--- a/Documentation/credentials.txt
+++ b/Documentation/credentials.txt
@@ -408,9 +408,6 @@ This should be used inside the RCU read lock, as in the following example:
...
}
-A function need not get RCU read lock to use __task_cred() if it is holding a
-spinlock at the time as this implicitly holds the RCU read lock.
-
Should it be necessary to hold another task's credentials for a long period of
time, and possibly to sleep whilst doing so, then the caller should get a
reference on them using:
@@ -426,17 +423,16 @@ credentials, hiding the RCU magic from the caller:
uid_t task_uid(task) Task's real UID
uid_t task_euid(task) Task's effective UID
-If the caller is holding a spinlock or the RCU read lock at the time anyway,
-then:
+If the caller is holding the RCU read lock at the time anyway, then:
__task_cred(task)->uid
__task_cred(task)->euid
should be used instead. Similarly, if multiple aspects of a task's credentials
-need to be accessed, RCU read lock or a spinlock should be used, __task_cred()
-called, the result stored in a temporary pointer and then the credential
-aspects called from that before dropping the lock. This prevents the
-potentially expensive RCU magic from being invoked multiple times.
+need to be accessed, RCU read lock should be used, __task_cred() called, the
+result stored in a temporary pointer and then the credential aspects called
+from that before dropping the lock. This prevents the potentially expensive
+RCU magic from being invoked multiple times.
Should some other single aspect of another task's credentials need to be
accessed, then this can be used:
diff --git a/Documentation/development-process/2.Process b/Documentation/development-process/2.Process
index d750321acd5a..97726eba6102 100644
--- a/Documentation/development-process/2.Process
+++ b/Documentation/development-process/2.Process
@@ -151,7 +151,7 @@ The stages that a patch goes through are, generally:
well.
- Wider review. When the patch is getting close to ready for mainline
- inclusion, it will be accepted by a relevant subsystem maintainer -
+ inclusion, it should be accepted by a relevant subsystem maintainer -
though this acceptance is not a guarantee that the patch will make it
all the way to the mainline. The patch will show up in the maintainer's
subsystem tree and into the staging trees (described below). When the
@@ -159,6 +159,15 @@ The stages that a patch goes through are, generally:
the discovery of any problems resulting from the integration of this
patch with work being done by others.
+- Please note that most maintainers also have day jobs, so merging
+ your patch may not be their highest priority. If your patch is
+ getting feedback about changes that are needed, you should either
+ make those changes or justify why they should not be made. If your
+ patch has no review complaints but is not being merged by its
+ appropriate subsystem or driver maintainer, you should be persistent
+ in updating the patch to the current kernel so that it applies cleanly
+ and keep sending it for review and merging.
+
- Merging into the mainline. Eventually, a successful patch will be
merged into the mainline repository managed by Linus Torvalds. More
comments and/or problems may surface at this time; it is important that
@@ -258,12 +267,8 @@ an appropriate subsystem tree or be sent directly to Linus. In a typical
development cycle, approximately 10% of the patches going into the mainline
get there via -mm.
-The current -mm patch can always be found from the front page of
-
- http://kernel.org/
-
-Those who want to see the current state of -mm can get the "-mm of the
-moment" tree, found at:
+The current -mm patch is available in the "mmotm" (-mm of the moment)
+directory at:
http://userweb.kernel.org/~akpm/mmotm/
@@ -298,6 +303,12 @@ volatility of linux-next tends to make it a difficult development target.
See http://lwn.net/Articles/289013/ for more information on this topic, and
stay tuned; much is still in flux where linux-next is involved.
+Besides the mmotm and linux-next trees, the kernel source tree now contains
+the drivers/staging/ directory and many sub-directories for drivers or
+filesystems that are on their way to being added to the kernel tree
+proper, but they remain in drivers/staging/ while they still need more
+work.
+
2.5: TOOLS
@@ -319,9 +330,9 @@ developers; even if they do not use it for their own work, they'll need git
to keep up with what other developers (and the mainline) are doing.
Git is now packaged by almost all Linux distributions. There is a home
-page at
+page at:
- http://git.or.cz/
+ http://git-scm.com/
That page has pointers to documentation and tutorials. One should be
aware, in particular, of the Kernel Hacker's Guide to git, which has
diff --git a/Documentation/development-process/7.AdvancedTopics b/Documentation/development-process/7.AdvancedTopics
index a2cf74093aa1..837179447e17 100644
--- a/Documentation/development-process/7.AdvancedTopics
+++ b/Documentation/development-process/7.AdvancedTopics
@@ -25,7 +25,7 @@ long document in its own right. Instead, the focus here will be on how git
fits into the kernel development process in particular. Developers who
wish to come up to speed with git will find more information at:
- http://git.or.cz/
+ http://git-scm.com/
http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
diff --git a/Documentation/device-mapper/dm-flakey.txt b/Documentation/device-mapper/dm-flakey.txt
new file mode 100644
index 000000000000..115cccaa17ef
--- /dev/null
+++ b/Documentation/device-mapper/dm-flakey.txt
@@ -0,0 +1,18 @@
+dm-flakey
+=========
+
+This target is the same as the linear target except that it returns I/O
+errors periodically. It's been found useful in simulating failing
+devices for testing purposes.
+
+Starting from the time the table is loaded, the device is available for
+<up interval> seconds, then returns errors for <down interval> seconds,
+and then this cycle repeats.
+
+Parameters: <dev path> <offset> <up interval> <down interval>
+ <dev path>: Full pathname to the underlying block-device, or a
+ "major:minor" device-number.
+ <offset>: Starting sector within the device.
+ <up interval>: Number of seconds device is available.
+ <down interval>: Number of seconds device returns errors.
+
diff --git a/Documentation/dvb/ci.txt b/Documentation/dvb/ci.txt
index 2ecd834585e6..4a0c2b56e690 100644
--- a/Documentation/dvb/ci.txt
+++ b/Documentation/dvb/ci.txt
@@ -41,7 +41,7 @@ This application requires the following to function properly as of now.
* Cards that fall in this category
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-At present the cards that fall in this category are the Twinhan and it's
+At present the cards that fall in this category are the Twinhan and its
clones, these cards are available as VVMER, Tomato, Hercules, Orange and
so on.
diff --git a/Documentation/dvb/contributors.txt b/Documentation/dvb/contributors.txt
index 4865addebe1c..47c30098dab6 100644
--- a/Documentation/dvb/contributors.txt
+++ b/Documentation/dvb/contributors.txt
@@ -1,7 +1,7 @@
Thanks go to the following people for patches and contributions:
Michael Hunold <m.hunold@gmx.de>
- for the initial saa7146 driver and it's recent overhaul
+ for the initial saa7146 driver and its recent overhaul
Christian Theiss
for his work on the initial Linux DVB driver
diff --git a/Documentation/edac.txt b/Documentation/edac.txt
index 79c533223762..0b875e8da969 100644
--- a/Documentation/edac.txt
+++ b/Documentation/edac.txt
@@ -6,6 +6,8 @@ Written by Doug Thompson <dougthompson@xmission.com>
7 Dec 2005
17 Jul 2007 Updated
+(c) Mauro Carvalho Chehab <mchehab@redhat.com>
+05 Aug 2009 Nehalem interface
EDAC is maintained and written by:
@@ -717,3 +719,153 @@ unique drivers for their hardware systems.
The 'test_device_edac' sample driver is located at the
bluesmoke.sourceforge.net project site for EDAC.
+=======================================================================
+NEHALEM USAGE OF EDAC APIs
+
+This chapter documents some EXPERIMENTAL mappings for EDAC API to handle
+Nehalem EDAC driver. They will likely be changed on future versions
+of the driver.
+
+Due to the way Nehalem exports Memory Controller data, some adjustments
+were done at i7core_edac driver. This chapter will cover those differences
+
+1) On Nehalem, there are one Memory Controller per Quick Patch Interconnect
+ (QPI). At the driver, the term "socket" means one QPI. This is
+ associated with a physical CPU socket.
+
+ Each MC have 3 physical read channels, 3 physical write channels and
+ 3 logic channels. The driver currenty sees it as just 3 channels.
+ Each channel can have up to 3 DIMMs.
+
+ The minimum known unity is DIMMs. There are no information about csrows.
+ As EDAC API maps the minimum unity is csrows, the driver sequencially
+ maps channel/dimm into different csrows.
+
+ For example, suposing the following layout:
+ Ch0 phy rd0, wr0 (0x063f4031): 2 ranks, UDIMMs
+ dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+ dimm 1 1024 Mb offset: 4, bank: 8, rank: 1, row: 0x4000, col: 0x400
+ Ch1 phy rd1, wr1 (0x063f4031): 2 ranks, UDIMMs
+ dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+ Ch2 phy rd3, wr3 (0x063f4031): 2 ranks, UDIMMs
+ dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+ The driver will map it as:
+ csrow0: channel 0, dimm0
+ csrow1: channel 0, dimm1
+ csrow2: channel 1, dimm0
+ csrow3: channel 2, dimm0
+
+exports one
+ DIMM per csrow.
+
+ Each QPI is exported as a different memory controller.
+
+2) Nehalem MC has the hability to generate errors. The driver implements this
+ functionality via some error injection nodes:
+
+ For injecting a memory error, there are some sysfs nodes, under
+ /sys/devices/system/edac/mc/mc?/:
+
+ inject_addrmatch/*:
+ Controls the error injection mask register. It is possible to specify
+ several characteristics of the address to match an error code:
+ dimm = the affected dimm. Numbers are relative to a channel;
+ rank = the memory rank;
+ channel = the channel that will generate an error;
+ bank = the affected bank;
+ page = the page address;
+ column (or col) = the address column.
+ each of the above values can be set to "any" to match any valid value.
+
+ At driver init, all values are set to any.
+
+ For example, to generate an error at rank 1 of dimm 2, for any channel,
+ any bank, any page, any column:
+ echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm
+ echo 1 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank
+
+ To return to the default behaviour of matching any, you can do:
+ echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm
+ echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank
+
+ inject_eccmask:
+ specifies what bits will have troubles,
+
+ inject_section:
+ specifies what ECC cache section will get the error:
+ 3 for both
+ 2 for the highest
+ 1 for the lowest
+
+ inject_type:
+ specifies the type of error, being a combination of the following bits:
+ bit 0 - repeat
+ bit 1 - ecc
+ bit 2 - parity
+
+ inject_enable starts the error generation when something different
+ than 0 is written.
+
+ All inject vars can be read. root permission is needed for write.
+
+ Datasheet states that the error will only be generated after a write on an
+ address that matches inject_addrmatch. It seems, however, that reading will
+ also produce an error.
+
+ For example, the following code will generate an error for any write access
+ at socket 0, on any DIMM/address on channel 2:
+
+ echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/channel
+ echo 2 >/sys/devices/system/edac/mc/mc0/inject_type
+ echo 64 >/sys/devices/system/edac/mc/mc0/inject_eccmask
+ echo 3 >/sys/devices/system/edac/mc/mc0/inject_section
+ echo 1 >/sys/devices/system/edac/mc/mc0/inject_enable
+ dd if=/dev/mem of=/dev/null seek=16k bs=4k count=1 >& /dev/null
+
+ For socket 1, it is needed to replace "mc0" by "mc1" at the above
+ commands.
+
+ The generated error message will look like:
+
+ EDAC MC0: UE row 0, channel-a= 0 channel-b= 0 labels "-": NON_FATAL (addr = 0x0075b980, socket=0, Dimm=0, Channel=2, syndrome=0x00000040, count=1, Err=8c0000400001009f:4000080482 (read error: read ECC error))
+
+3) Nehalem specific Corrected Error memory counters
+
+ Nehalem have some registers to count memory errors. The driver uses those
+ registers to report Corrected Errors on devices with Registered Dimms.
+
+ However, those counters don't work with Unregistered Dimms. As the chipset
+ offers some counters that also work with UDIMMS (but with a worse level of
+ granularity than the default ones), the driver exposes those registers for
+ UDIMM memories.
+
+ They can be read by looking at the contents of all_channel_counts/
+
+ $ for i in /sys/devices/system/edac/mc/mc0/all_channel_counts/*; do echo $i; cat $i; done
+ /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm0
+ 0
+ /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm1
+ 0
+ /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm2
+ 0
+
+ What happens here is that errors on different csrows, but at the same
+ dimm number will increment the same counter.
+ So, in this memory mapping:
+ csrow0: channel 0, dimm0
+ csrow1: channel 0, dimm1
+ csrow2: channel 1, dimm0
+ csrow3: channel 2, dimm0
+ The hardware will increment udimm0 for an error at the first dimm at either
+ csrow0, csrow2 or csrow3;
+ The hardware will increment udimm1 for an error at the second dimm at either
+ csrow0, csrow2 or csrow3;
+ The hardware will increment udimm2 for an error at the third dimm at either
+ csrow0, csrow2 or csrow3;
+
+4) Standard error counters
+
+ The standard error counters are generated when an mcelog error is received
+ by the driver. Since, with udimm, this is counted by software, it is
+ possible that some errors could be lost. With rdimm's, they displays the
+ contents of the registers
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 05df0b7514b6..a3f57cda7cc7 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -241,16 +241,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
---------------------------
-What (Why):
- - xt_recent: the old ipt_recent proc dir
- (superseded by /proc/net/xt_recent)
-
-When: January 2009 or Linux 2.7.0, whichever comes first
-Why: Superseded by newer revisions or modules
-Who: Jan Engelhardt <jengelh@computergmbh.de>
-
----------------------------
-
What: GPIO autorequest on gpio_direction_{input,output}() in gpiolib
When: February 2010
Why: All callers should use explicit gpio_request()/gpio_free().
@@ -377,24 +367,6 @@ When: 2.6.33
Why: Should be implemented in userspace, policy daemon.
Who: Johannes Berg <johannes@sipsolutions.net>
----------------------------
-
-What: CONFIG_INOTIFY
-When: 2.6.33
-Why: last user (audit) will be converted to the newer more generic
- and more easily maintained fsnotify subsystem
-Who: Eric Paris <eparis@redhat.com>
-
-----------------------------
-
-What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be
- exported interface anymore.
-When: 2.6.33
-Why: cpu_policy_rwsem has a new cleaner definition making it local to
- cpufreq core and contained inside cpufreq.c. Other dependent
- drivers should not use it in order to safely avoid lockdep issues.
-Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
-
----------------------------
What: sound-slot/service-* module aliases and related clutters in
@@ -520,17 +492,6 @@ Who: Hans de Goede <hdegoede@redhat.com>
----------------------------
-What: corgikbd, spitzkbd, tosakbd driver
-When: 2.6.35
-Files: drivers/input/keyboard/{corgi,spitz,tosa}kbd.c
-Why: We now have a generic GPIO based matrix keyboard driver that
- are fully capable of handling all the keys on these devices.
- The original drivers manipulate the GPIO registers directly
- and so are difficult to maintain.
-Who: Eric Miao <eric.y.miao@gmail.com>
-
-----------------------------
-
What: corgi_ssp and corgi_ts driver
When: 2.6.35
Files: arch/arm/mach-pxa/corgi_ssp.c, drivers/input/touchscreen/corgi_ts.c
@@ -543,6 +504,24 @@ Who: Eric Miao <eric.y.miao@gmail.com>
----------------------------
+What: sysfs-class-rfkill state file
+When: Feb 2014
+Files: net/rfkill/core.c
+Why: Documented as obsolete since Feb 2010. This file is limited to 3
+ states while the rfkill drivers can have 4 states.
+Who: anybody or Florian Mickler <florian@mickler.org>
+
+----------------------------
+
+What: sysfs-class-rfkill claim file
+When: Feb 2012
+Files: net/rfkill/core.c
+Why: It is not possible to claim an rfkill driver since 2007. This is
+ Documented as obsolete since Feb 2010.
+Who: anybody or Florian Mickler <florian@mickler.org>
+
+----------------------------
+
What: capifs
When: February 2011
Files: drivers/isdn/capi/capifs.*
@@ -564,6 +543,16 @@ Who: Avi Kivity <avi@redhat.com>
----------------------------
+What: xtime, wall_to_monotonic
+When: 2.6.36+
+Files: kernel/time/timekeeping.c include/linux/time.h
+Why: Cleaning up timekeeping internal values. Please use
+ existing timekeeping accessor functions to access
+ the equivalent functionality.
+Who: John Stultz <johnstul@us.ibm.com>
+
+----------------------------
+
What: KVM kernel-allocated memory slots
When: July 2010
Why: Since 2.6.25, kvm supports user-allocated memory slots, which are
@@ -592,6 +581,13 @@ Who: Len Brown <len.brown@intel.com>
----------------------------
+What: IRQF_DISABLED
+When: 2.6.36
+Why: The flag is a NOOP as we run interrupt handlers with interrupts disabled
+Who: Thomas Gleixner <tglx@linutronix.de>
+
+----------------------------
+
What: video4linux /dev/vtx teletext API support
When: 2.6.35
Files: drivers/media/video/saa5246a.c drivers/media/video/saa5249.c
@@ -612,3 +608,32 @@ Why: The vtx device nodes have been superseded by vbi device nodes
provided by the vtx API, then that functionality should be build
around the sliced VBI API instead.
Who: Hans Verkuil <hverkuil@xs4all.nl>
+
+----------------------------
+
+What: iwlwifi 50XX module parameters
+When: 2.6.40
+Why: The "..50" modules parameters were used to configure 5000 series and
+ up devices; different set of module parameters also available for 4965
+ with same functionalities. Consolidate both set into single place
+ in drivers/net/wireless/iwlwifi/iwl-agn.c
+
+Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+
+----------------------------
+
+What: iwl4965 alias support
+When: 2.6.40
+Why: Internal alias support has been present in module-init-tools for some
+ time, the MODULE_ALIAS("iwl4965") boilerplate aliases can be removed
+ with no impact.
+
+Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+
+---------------------------
+
+What: xt_NOTRACK
+Files: net/netfilter/xt_NOTRACK.c
+When: April 2011
+Why: Superseded by xt_CT
+Who: Netfilter developer team <netfilter-devel@vger.kernel.org>
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 06bbbed71206..af1608070cd5 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -178,7 +178,7 @@ prototypes:
locking rules:
All except set_page_dirty may block
- BKL PageLocked(page) i_sem
+ BKL PageLocked(page) i_mutex
writepage: no yes, unlocks (see below)
readpage: no yes, unlocks
sync_page: no maybe
@@ -429,7 +429,7 @@ check_flags: no
implementations. If your fs is not using generic_file_llseek, you
need to acquire and release the appropriate locks in your ->llseek().
For many filesystems, it is probably safe to acquire the inode
-semaphore. Note some filesystems (i.e. remote ones) provide no
+mutex. Note some filesystems (i.e. remote ones) provide no
protection for i_size so you will need to use the BKL.
Note: ext2_release() was *the* source of contention on fs-intensive
diff --git a/Documentation/filesystems/autofs4-mount-control.txt b/Documentation/filesystems/autofs4-mount-control.txt
index 8f78ded4b648..51986bf08a4d 100644
--- a/Documentation/filesystems/autofs4-mount-control.txt
+++ b/Documentation/filesystems/autofs4-mount-control.txt
@@ -146,7 +146,7 @@ found to be inadequate, in this case. The Generic Netlink system was
used for this as raw Netlink would lead to a significant increase in
complexity. There's no question that the Generic Netlink system is an
elegant solution for common case ioctl functions but it's not a complete
-replacement probably because it's primary purpose in life is to be a
+replacement probably because its primary purpose in life is to be a
message bus implementation rather than specifically an ioctl replacement.
While it would be possible to work around this there is one concern
that lead to the decision to not use it. This is that the autofs
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
index 0660c9f5deef..763d8ebbbebd 100644
--- a/Documentation/filesystems/ceph.txt
+++ b/Documentation/filesystems/ceph.txt
@@ -90,7 +90,7 @@ Mount Options
Specify the IP and/or port the client should bind to locally.
There is normally not much reason to do this. If the IP is not
specified, the client's IP address is determined by looking at the
- address it's connection to the monitor originates from.
+ address its connection to the monitor originates from.
wsize=X
Specify the maximum write size in bytes. By default there is no
diff --git a/Documentation/filesystems/dlmfs.txt b/Documentation/filesystems/dlmfs.txt
index c50bbb2d52b4..1b528b2ad809 100644
--- a/Documentation/filesystems/dlmfs.txt
+++ b/Documentation/filesystems/dlmfs.txt
@@ -47,7 +47,7 @@ You'll want to start heartbeating on a volume which all the nodes in
your lockspace can access. The easiest way to do this is via
ocfs2_hb_ctl (distributed with ocfs2-tools). Right now it requires
that an OCFS2 file system be in place so that it can automatically
-find it's heartbeat area, though it will eventually support heartbeat
+find its heartbeat area, though it will eventually support heartbeat
against raw disks.
Please see the ocfs2_hb_ctl and mkfs.ocfs2 manual pages distributed
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 867c5b50cb42..272f80d5f966 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -59,8 +59,19 @@ commit=nrsec (*) Ext3 can be told to sync all its data and metadata
Setting it to very large values will improve
performance.
-barrier=1 This enables/disables barriers. barrier=0 disables
- it, barrier=1 enables it.
+barrier=<0(*)|1> This enables/disables the use of write barriers in
+barrier the jbd code. barrier=0 disables, barrier=1 enables.
+nobarrier (*) This also requires an IO stack which can support
+ barriers, and if jbd gets an error on a barrier
+ write, it will disable again with a warning.
+ Write barriers enforce proper on-disk ordering
+ of journal commits, making volatile disk write caches
+ safe to use, at some performance penalty. If
+ your disks are battery-backed in one way or another,
+ disabling barriers may safely improve performance.
+ The mount options "barrier" and "nobarrier" can
+ also be used to enable or disable barriers, for
+ consistency with other ext3 mount options.
orlov (*) This enables the new Orlov block allocator. It is
enabled by default.
diff --git a/Documentation/filesystems/fiemap.txt b/Documentation/filesystems/fiemap.txt
index 606233cd4618..1b805a0efbb0 100644
--- a/Documentation/filesystems/fiemap.txt
+++ b/Documentation/filesystems/fiemap.txt
@@ -38,7 +38,7 @@ flags, it will return EBADR and the contents of fm_flags will contain
the set of flags which caused the error. If the kernel is compatible
with all flags passed, the contents of fm_flags will be unmodified.
It is up to userspace to determine whether rejection of a particular
-flag is fatal to it's operation. This scheme is intended to allow the
+flag is fatal to its operation. This scheme is intended to allow the
fiemap interface to grow in the future but without losing
compatibility with old software.
@@ -56,7 +56,7 @@ If this flag is set, the kernel will sync the file before mapping extents.
* FIEMAP_FLAG_XATTR
If this flag is set, the extents returned will describe the inodes
-extended attribute lookup tree, instead of it's data tree.
+extended attribute lookup tree, instead of its data tree.
Extent Mapping
@@ -89,7 +89,7 @@ struct fiemap_extent {
};
All offsets and lengths are in bytes and mirror those on disk. It is valid
-for an extents logical offset to start before the request or it's logical
+for an extents logical offset to start before the request or its logical
length to extend past the request. Unless FIEMAP_EXTENT_NOT_ALIGNED is
returned, fe_logical, fe_physical, and fe_length will be aligned to the
block size of the file system. With the exception of extents flagged as
@@ -125,7 +125,7 @@ been allocated for the file yet.
* FIEMAP_EXTENT_DELALLOC
- This will also set FIEMAP_EXTENT_UNKNOWN.
-Delayed allocation - while there is data for this extent, it's
+Delayed allocation - while there is data for this extent, its
physical location has not been allocated yet.
* FIEMAP_EXTENT_ENCODED
@@ -159,7 +159,7 @@ Data is located within a meta data block.
Data is packed into a block with data from other files.
* FIEMAP_EXTENT_UNWRITTEN
-Unwritten extent - the extent is allocated but it's data has not been
+Unwritten extent - the extent is allocated but its data has not been
initialized. This indicates the extent's data will be all zero if read
through the filesystem but the contents are undefined if read directly from
the device.
@@ -176,7 +176,7 @@ VFS -> File System Implementation
File systems wishing to support fiemap must implement a ->fiemap callback on
their inode_operations structure. The fs ->fiemap call is responsible for
-defining it's set of supported fiemap flags, and calling a helper function on
+defining its set of supported fiemap flags, and calling a helper function on
each discovered extent:
struct inode_operations {
diff --git a/Documentation/filesystems/fuse.txt b/Documentation/filesystems/fuse.txt
index 397a41adb4c3..13af4a49e7db 100644
--- a/Documentation/filesystems/fuse.txt
+++ b/Documentation/filesystems/fuse.txt
@@ -91,7 +91,7 @@ Mount options
'default_permissions'
By default FUSE doesn't check file access permissions, the
- filesystem is free to implement it's access policy or leave it to
+ filesystem is free to implement its access policy or leave it to
the underlying file access mechanism (e.g. in case of network
filesystems). This option enables permission checking, restricting
access based on file mode. It is usually useful together with the
@@ -171,7 +171,7 @@ or may honor them by sending a reply to the _original_ request, with
the error set to EINTR.
It is also possible that there's a race between processing the
-original request and it's INTERRUPT request. There are two possibilities:
+original request and its INTERRUPT request. There are two possibilities:
1) The INTERRUPT request is processed before the original request is
processed
diff --git a/Documentation/filesystems/gfs2.txt b/Documentation/filesystems/gfs2.txt
index 5e3ab8f3beff..0b59c0200912 100644
--- a/Documentation/filesystems/gfs2.txt
+++ b/Documentation/filesystems/gfs2.txt
@@ -1,7 +1,7 @@
Global File System
------------------
-http://sources.redhat.com/cluster/
+http://sources.redhat.com/cluster/wiki/
GFS is a cluster file system. It allows a cluster of computers to
simultaneously use a block device that is shared between them (with FC,
@@ -36,11 +36,11 @@ GFS2 is not on-disk compatible with previous versions of GFS, but it
is pretty close.
The following man pages can be found at the URL above:
- fsck.gfs2 to repair a filesystem
- gfs2_grow to expand a filesystem online
- gfs2_jadd to add journals to a filesystem online
- gfs2_tool to manipulate, examine and tune a filesystem
+ fsck.gfs2 to repair a filesystem
+ gfs2_grow to expand a filesystem online
+ gfs2_jadd to add journals to a filesystem online
+ gfs2_tool to manipulate, examine and tune a filesystem
gfs2_quota to examine and change quota values in a filesystem
gfs2_convert to convert a gfs filesystem to gfs2 in-place
mount.gfs2 to help mount(8) mount a filesystem
- mkfs.gfs2 to make a filesystem
+ mkfs.gfs2 to make a filesystem
diff --git a/Documentation/filesystems/hpfs.txt b/Documentation/filesystems/hpfs.txt
index fa45c3baed98..74630bd504fb 100644
--- a/Documentation/filesystems/hpfs.txt
+++ b/Documentation/filesystems/hpfs.txt
@@ -103,7 +103,7 @@ to analyze or change OS2SYS.INI.
Codepages
HPFS can contain several uppercasing tables for several codepages and each
-file has a pointer to codepage it's name is in. However OS/2 was created in
+file has a pointer to codepage its name is in. However OS/2 was created in
America where people don't care much about codepages and so multiple codepages
support is quite buggy. I have Czech OS/2 working in codepage 852 on my disk.
Once I booted English OS/2 working in cp 850 and I created a file on my 852
diff --git a/Documentation/filesystems/logfs.txt b/Documentation/filesystems/logfs.txt
index e64c94ba401a..bca42c22a143 100644
--- a/Documentation/filesystems/logfs.txt
+++ b/Documentation/filesystems/logfs.txt
@@ -59,7 +59,7 @@ Levels
------
Garbage collection (GC) may fail if all data is written
-indiscriminately. One requirement of GC is that data is seperated
+indiscriminately. One requirement of GC is that data is separated
roughly according to the distance between the tree root and the data.
Effectively that means all file data is on level 0, indirect blocks
are on levels 1, 2, 3 4 or 5 for 1x, 2x, 3x, 4x or 5x indirect blocks,
@@ -67,7 +67,7 @@ respectively. Inode file data is on level 6 for the inodes and 7-11
for indirect blocks.
Each segment contains objects of a single level only. As a result,
-each level requires its own seperate segment to be open for writing.
+each level requires its own separate segment to be open for writing.
Inode File
----------
@@ -106,9 +106,9 @@ Vim
---
By cleverly predicting the life time of data, it is possible to
-seperate long-living data from short-living data and thereby reduce
+separate long-living data from short-living data and thereby reduce
the GC overhead later. Each type of distinc life expectency (vim) can
-have a seperate segment open for writing. Each (level, vim) tupel can
+have a separate segment open for writing. Each (level, vim) tupel can
be open just once. If an open segment with unknown vim is encountered
at mount time, it is closed and ignored henceforth.
diff --git a/Documentation/filesystems/nfs/rpc-cache.txt b/Documentation/filesystems/nfs/rpc-cache.txt
index 8a382bea6808..ebcaaee21616 100644
--- a/Documentation/filesystems/nfs/rpc-cache.txt
+++ b/Documentation/filesystems/nfs/rpc-cache.txt
@@ -185,7 +185,7 @@ failed lookup meant a definite 'no'.
request/response format
-----------------------
-While each cache is free to use it's own format for requests
+While each cache is free to use its own format for requests
and responses over channel, the following is recommended as
appropriate and support routines are available to help:
Each request or response record should be printable ASCII
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index cf6d0d85ca82..d3e7673995eb 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -50,8 +50,8 @@ NILFS2 supports the following mount options:
(*) == default
nobarrier Disables barriers.
-errors=continue(*) Keep going on a filesystem error.
-errors=remount-ro Remount the filesystem read-only on an error.
+errors=continue Keep going on a filesystem error.
+errors=remount-ro(*) Remount the filesystem read-only on an error.
errors=panic Panic and halt the machine if an error occurs.
cp=n Specify the checkpoint-number of the snapshot to be
mounted. Checkpoints and snapshots are listed by lscp
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index c58b9f5ba002..1f7ae144f6d8 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -80,3 +80,10 @@ user_xattr (*) Enables Extended User Attributes.
nouser_xattr Disables Extended User Attributes.
acl Enables POSIX Access Control Lists support.
noacl (*) Disables POSIX Access Control Lists support.
+resv_level=2 (*) Set how agressive allocation reservations will be.
+ Valid values are between 0 (reservations off) to 8
+ (maximum space for reservations).
+dir_resv_level= (*) By default, directory reservations will scale with file
+ reservations - users should rarely need to change this
+ value. If allocation reservations are turned off, this
+ option will have no effect.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index a4f30faa4f1f..01247de8eab4 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -305,7 +305,7 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
cgtime guest time of the task children in jiffies
..............................................................................
-The /proc/PID/map file containing the currently mapped memory regions and
+The /proc/PID/maps file containing the currently mapped memory regions and
their access permissions.
The format is:
@@ -566,6 +566,10 @@ The default_smp_affinity mask applies to all non-active IRQs, which are the
IRQs which have not yet been allocated/activated, and hence which lack a
/proc/irq/[0-9]* directory.
+The node file on an SMP system shows the node to which the device using the IRQ
+reports itself as being attached. This hardware locality information does not
+include information about any possible driver locality preference.
+
prof_cpu_mask specifies which CPUs are to be profiled by the system wide
profiler. Default value is ffffffff (all cpus).
@@ -965,7 +969,7 @@ your system and how much traffic was routed over those devices:
...] 1375103 17405 0 0 0 0 0 0
...] 1703981 5535 0 0 0 3 0 0
-In addition, each Channel Bond interface has it's own directory. For
+In addition, each Channel Bond interface has its own directory. For
example, the bond0 device will have a directory called /proc/net/bond0/.
It will contain information that is specific to that bond, such as the
current slaves of the bond, the link status of the slaves, and how
@@ -1362,7 +1366,7 @@ been accounted as having caused 1MB of write.
In other words: The number of bytes which this process caused to not happen,
by truncating pagecache. A task can cause "negative" IO too. If this task
truncates some dirty pagecache, some IO which another task has been accounted
-for (in it's write_bytes) will not be happening. We _could_ just subtract that
+for (in its write_bytes) will not be happening. We _could_ just subtract that
from the truncating task's write_bytes, but there is information loss in doing
that.
diff --git a/Documentation/filesystems/smbfs.txt b/Documentation/filesystems/smbfs.txt
index f673ef0de0f7..194fb0decd2c 100644
--- a/Documentation/filesystems/smbfs.txt
+++ b/Documentation/filesystems/smbfs.txt
@@ -3,6 +3,6 @@ protocol used by Windows for Workgroups, Windows 95 and Windows NT.
Smbfs was inspired by Samba, the program written by Andrew Tridgell
that turns any Unix host into a file server for DOS or Windows clients.
-Smbfs is a SMB client, but uses parts of samba for it's operation. For
+Smbfs is a SMB client, but uses parts of samba for its operation. For
more info on samba, including documentation, please go to
http://www.samba.org/ and then on to your nearest mirror.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 3de2f32edd90..b66858538df5 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -72,7 +72,7 @@ structure (this is the kernel-side implementation of file
descriptors). The freshly allocated file structure is initialized with
a pointer to the dentry and a set of file operation member functions.
These are taken from the inode data. The open() file method is then
-called so the specific filesystem implementation can do it's work. You
+called so the specific filesystem implementation can do its work. You
can see that this is another switch performed by the VFS. The file
structure is placed into the file descriptor table for the process.
diff --git a/Documentation/hwmon/lm63 b/Documentation/hwmon/lm63
index 31660bf97979..b9843eab1afb 100644
--- a/Documentation/hwmon/lm63
+++ b/Documentation/hwmon/lm63
@@ -7,6 +7,11 @@ Supported chips:
Addresses scanned: I2C 0x4c
Datasheet: Publicly available at the National Semiconductor website
http://www.national.com/pf/LM/LM63.html
+ * National Semiconductor LM64
+ Prefix: 'lm64'
+ Addresses scanned: I2C 0x18 and 0x4e
+ Datasheet: Publicly available at the National Semiconductor website
+ http://www.national.com/pf/LM/LM64.html
Author: Jean Delvare <khali@linux-fr.org>
@@ -55,3 +60,5 @@ The lm63 driver will not update its values more frequently than every
second; reading them more often will do no harm, but will return 'old'
values.
+The LM64 is effectively an LM63 with GPIO lines. The driver does not
+support these GPIO lines at present.
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85
index a13680871bc7..a76aefeeb68a 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85
@@ -157,7 +157,7 @@ temperature configuration points:
There are three PWM outputs. The LM85 datasheet suggests that the
pwm3 output control both fan3 and fan4. Each PWM can be individually
-configured and assigned to a zone for it's control value. Each PWM can be
+configured and assigned to a zone for its control value. Each PWM can be
configured individually according to the following options.
* pwm#_auto_pwm_min - this specifies the PWM value for temp#_auto_temp_off
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 3de6b0bcb147..d4e2917c6f18 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -80,9 +80,9 @@ All entries (except name) are optional, and should only be created in a
given driver if the chip has the feature.
-********
-* Name *
-********
+*********************
+* Global attributes *
+*********************
name The chip name.
This should be a short, lowercase string, not containing
@@ -91,6 +91,13 @@ name The chip name.
I2C devices get this attribute created automatically.
RO
+update_rate The rate at which the chip will update readings.
+ Unit: millisecond
+ RW
+ Some devices have a variable update rate. This attribute
+ can be used to change the update rate to the desired
+ frequency.
+
************
* Voltages *
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index e1bb5b261693..e307914a3eda 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -27,7 +27,13 @@ Authors:
Module Parameters
-----------------
-None.
+* disable_features (bit vector)
+Disable selected features normally supported by the device. This makes it
+possible to work around possible driver or hardware bugs if the feature in
+question doesn't work as intended for whatever reason. Bit values:
+ 1 disable SMBus PEC
+ 2 disable the block buffer
+ 8 disable the I2C block read functionality
Description
diff --git a/Documentation/input/joystick.txt b/Documentation/input/joystick.txt
index 154d767b2acb..8007b7ca87bf 100644
--- a/Documentation/input/joystick.txt
+++ b/Documentation/input/joystick.txt
@@ -402,7 +402,7 @@ for the port of the SoundFusion is supported by the cs461x.c module.
~~~~~~~~~~~~~~~~~~~~~~~~
The Live! has a special PCI gameport, which, although it doesn't provide
any "Enhanced" stuff like 4DWave and friends, is quite a bit faster than
-it's ISA counterparts. It also requires special support, hence the
+its ISA counterparts. It also requires special support, hence the
emu10k1-gp.c module for it instead of the normal ns558.c one.
3.15 SoundBlaster 64 and 128 - ES1370 and ES1371, ESS Solo1 and S3 SonicVibes
diff --git a/Documentation/intel_txt.txt b/Documentation/intel_txt.txt
index f40a1f030019..5dc59b04a71f 100644
--- a/Documentation/intel_txt.txt
+++ b/Documentation/intel_txt.txt
@@ -126,7 +126,7 @@ o Tboot then applies an (optional) user-defined launch policy to
o Tboot adjusts the e820 table provided by the bootloader to reserve
its own location in memory as well as to reserve certain other
TXT-related regions.
-o As part of it's launch, tboot DMA protects all of RAM (using the
+o As part of its launch, tboot DMA protects all of RAM (using the
VT-d PMRs). Thus, the kernel must be booted with 'intel_iommu=on'
in order to remove this blanket protection and use VT-d's
page-level protection.
@@ -161,13 +161,15 @@ o In order to put a system into any of the sleep states after a TXT
has been restored, it will restore the TPM PCRs and then
transfer control back to the kernel's S3 resume vector.
In order to preserve system integrity across S3, the kernel
- provides tboot with a set of memory ranges (kernel
- code/data/bss, S3 resume code, and AP trampoline) that tboot
- will calculate a MAC (message authentication code) over and then
- seal with the TPM. On resume and once the measured environment
- has been re-established, tboot will re-calculate the MAC and
- verify it against the sealed value. Tboot's policy determines
- what happens if the verification fails.
+ provides tboot with a set of memory ranges (RAM and RESERVED_KERN
+ in the e820 table, but not any memory that BIOS might alter over
+ the S3 transition) that tboot will calculate a MAC (message
+ authentication code) over and then seal with the TPM. On resume
+ and once the measured environment has been re-established, tboot
+ will re-calculate the MAC and verify it against the sealed value.
+ Tboot's policy determines what happens if the verification fails.
+ Note that the c/s 194 of tboot which has the new MAC code supports
+ this.
That's pretty much it for TXT support.
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 6f8c1cabbc5d..634c625da8ce 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -65,7 +65,7 @@ CROSS_COMPILE
Specify an optional fixed part of the binutils filename.
CROSS_COMPILE can be a part of the filename or the full path.
-CROSS_COMPILE is also used for ccache is some setups.
+CROSS_COMPILE is also used for ccache in some setups.
CF
--------------------------------------------------
@@ -162,3 +162,7 @@ For tags/TAGS/cscope targets, you can specify more than one arch
to be included in the databases, separated by blank space. E.g.:
$ make ALLSOURCE_ARCHS="x86 mips arm" tags
+
+To get all available archs you can also specify all. E.g.:
+
+ $ make ALLSOURCE_ARCHS=all tags
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index c412c245848f..b472e4e0ba67 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -181,7 +181,7 @@ Expressions are listed in decreasing order of precedence.
(7) Returns the result of max(/expr/, /expr/).
An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2
-respectively for calculations). A menu entry becomes visible when it's
+respectively for calculations). A menu entry becomes visible when its
expression evaluates to 'm' or 'y'.
There are two types of symbols: constant and non-constant symbols.
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 49efae703979..b2cb16ebcb16 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -96,7 +96,7 @@ Environment variables for 'silentoldconfig'
KCONFIG_NOSILENTUPDATE
--------------------------------------------------
If this variable has a non-blank value, it prevents silent kernel
-config udpates (requires explicit updates).
+config updates (requires explicit updates).
KCONFIG_AUTOCONFIG
--------------------------------------------------
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 71c602d61680..01351554df60 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -320,7 +320,7 @@ more details, with real examples.
subdir-ccflags-y, subdir-asflags-y
The two flags listed above are similar to ccflags-y and as-falgs-y.
The difference is that the subdir- variants has effect for the kbuild
- file where tey are present and all subdirectories.
+ file where they are present and all subdirectories.
Options specified using subdir-* are added to the commandline before
the options specified using the non-subdir variants.
diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt
index 28cdc2af2131..ec8d31ee12e0 100644
--- a/Documentation/kernel-docs.txt
+++ b/Documentation/kernel-docs.txt
@@ -116,7 +116,7 @@
Author: Ingo Molnar, Gadi Oxman and Miguel de Icaza.
URL: http://www.linuxjournal.com/article.php?sid=2391
Keywords: RAID, MD driver.
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "A description of the implementation of the RAID-1,
RAID-4 and RAID-5 personalities of the MD device driver in the
Linux kernel, providing users with high performance and reliable,
@@ -127,7 +127,7 @@
URL: http://www.linuxjournal.com/article.php?sid=1219
Keywords: device driver, module, loading/unloading modules,
allocating resources.
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "This is the first of a series of four articles
co-authored by Alessandro Rubini and Georg Zezchwitz which present
a practical approach to writing Linux device drivers as kernel
@@ -141,7 +141,7 @@
Keywords: character driver, init_module, clean_up module,
autodetection, mayor number, minor number, file operations,
open(), close().
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "This article, the second of four, introduces part of
the actual code to create custom module implementing a character
device driver. It describes the code for module initialization and
@@ -152,7 +152,7 @@
URL: http://www.linuxjournal.com/article.php?sid=1221
Keywords: read(), write(), select(), ioctl(), blocking/non
blocking mode, interrupt handler.
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "This article, the third of four on writing character
device drivers, introduces concepts of reading, writing, and using
ioctl-calls".
@@ -161,7 +161,7 @@
Author: Alessandro Rubini and Georg v. Zezschwitz.
URL: http://www.linuxjournal.com/article.php?sid=1222
Keywords: interrupts, irqs, DMA, bottom halves, task queues.
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "This is the fourth in a series of articles about
writing character device drivers as loadable kernel modules. This
month, we further investigate the field of interrupt handling.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 839b21b0699a..ae48b3cad776 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -58,6 +58,7 @@ parameter is applicable:
ISAPNP ISA PnP code is enabled.
ISDN Appropriate ISDN support is enabled.
JOY Appropriate joystick support is enabled.
+ KGDB Kernel debugger support is enabled.
KVM Kernel Virtual Machine support is enabled.
LIBATA Libata driver is enabled
LP Printer support is enabled.
@@ -99,6 +100,7 @@ parameter is applicable:
SWSUSP Software suspend (hibernation) is enabled.
SUSPEND System suspend states are enabled.
FTRACE Function tracing enabled.
+ TPM TPM drivers are enabled.
TS Appropriate touchscreen support is enabled.
UMS USB Mass Storage support is enabled.
USB USB support is enabled.
@@ -151,6 +153,7 @@ and is between 256 and 4096 characters. It is defined in the file
strict -- Be less tolerant of platforms that are not
strictly ACPI specification compliant.
rsdt -- prefer RSDT over (default) XSDT
+ copy_dsdt -- copy DSDT to memory
See also Documentation/power/pm.txt, pci=noacpi
@@ -287,9 +290,6 @@ and is between 256 and 4096 characters. It is defined in the file
advansys= [HW,SCSI]
See header of drivers/scsi/advansys.c.
- advwdt= [HW,WDT] Advantech WDT
- Format: <iostart>,<iostop>
-
aedsp16= [HW,OSS] Audio Excel DSP 16
Format: <io>,<irq>,<dma>,<mss_io>,<mpu_io>,<mpu_irq>
See also header of sound/oss/aedsp16.c.
@@ -708,6 +708,12 @@ and is between 256 and 4096 characters. It is defined in the file
The VGA output is eventually overwritten by the real
console.
+ ekgdboc= [X86,KGDB] Allow early kernel console debugging
+ ekgdboc=kbd
+
+ This is desgined to be used in conjunction with
+ the boot argument: earlyprintk=vga
+
eata= [HW,SCSI]
edd= [EDD]
@@ -750,13 +756,14 @@ and is between 256 and 4096 characters. It is defined in the file
Default value is 0.
Value can be changed at runtime via /selinux/enforce.
+ erst_disable [ACPI]
+ Disable Error Record Serialization Table (ERST)
+ support.
+
ether= [HW,NET] Ethernet cards parameters
This option is obsoleted by the "netdev=" option, which
has equivalent usage. See its documentation for details.
- eurwdt= [HW,WDT] Eurotech CPU-1220/1410 onboard watchdog.
- Format: <io>[,<irq>]
-
failslab=
fail_page_alloc=
fail_make_request=[KNL]
@@ -784,8 +791,12 @@ and is between 256 and 4096 characters. It is defined in the file
as early as possible in order to facilitate early
boot debugging.
- ftrace_dump_on_oops
+ ftrace_dump_on_oops[=orig_cpu]
[FTRACE] will dump the trace buffers on oops.
+ If no parameter is passed, ftrace will dump
+ buffers of all CPUs, but if you pass orig_cpu, it will
+ dump only the buffer of the CPU that triggered the
+ oops.
ftrace_filter=[function-list]
[FTRACE] Limit the functions traced by the function
@@ -843,6 +854,11 @@ and is between 256 and 4096 characters. It is defined in the file
hd= [EIDE] (E)IDE hard drive subsystem geometry
Format: <cyl>,<head>,<sect>
+ hest_disable [ACPI]
+ Disable Hardware Error Source Table (HEST) support,
+ corresponding firmware-first mode error processing
+ logic will be disabled.
+
highmem=nn[KMG] [KNL,BOOT] forces the highmem zone to have an exact
size of <nn>. This works even on boxes that have no
highmem otherwise. This also works to reduce highmem
@@ -1112,10 +1128,26 @@ and is between 256 and 4096 characters. It is defined in the file
use the HighMem zone if it exists, and the Normal
zone if it does not.
- kgdboc= [HW] kgdb over consoles.
- Requires a tty driver that supports console polling.
- (only serial supported for now)
- Format: <serial_device>[,baud]
+ kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
+ Format: <Controller#>[,poll interval]
+ The controller # is the number of the ehci usb debug
+ port as it is probed via PCI. The poll interval is
+ optional and is the number seconds in between
+ each poll cycle to the debug port in case you need
+ the functionality for interrupting the kernel with
+ gdb or control-c on the dbgp connection. When
+ not using this parameter you use sysrq-g to break into
+ the kernel debugger.
+
+ kgdboc= [KGDB,HW] kgdb over consoles.
+ Requires a tty driver that supports console polling,
+ or a supported polling keyboard driver (non-usb).
+ Serial only format: <serial_device>[,baud]
+ keyboard only format: kbd
+ keyboard and serial format: kbd,<serial_device>[,baud]
+
+ kgdbwait [KGDB] Stop kernel execution and enter the
+ kernel debugger at the earliest opportunity.
kmac= [MIPS] korina ethernet MAC address.
Configure the RouterBoard 532 series on-chip
@@ -2236,9 +2268,6 @@ and is between 256 and 4096 characters. It is defined in the file
sched_debug [KNL] Enables verbose scheduler debug messages.
- sc1200wdt= [HW,WDT] SC1200 WDT (watchdog) driver
- Format: <io>[,<timeout>[,<isapnp>]]
-
scsi_debug_*= [SCSI]
See drivers/scsi/scsi_debug.c.
@@ -2610,6 +2639,15 @@ and is between 256 and 4096 characters. It is defined in the file
tp720= [HW,PS2]
+ tpm_suspend_pcr=[HW,TPM]
+ Format: integer pcr id
+ Specify that at suspend time, the tpm driver
+ should extend the specified pcr with zeros,
+ as a workaround for some chips which fail to
+ flush the last written pcr on TPM_SaveState.
+ This will guarantee that all the other pcrs
+ are saved.
+
trace_buf_size=nn[KMG]
[FTRACE] will set tracing buffer size.
@@ -2818,8 +2856,10 @@ and is between 256 and 4096 characters. It is defined in the file
wd7000= [HW,SCSI]
See header of drivers/scsi/wd7000.c.
- wdt= [WDT] Watchdog
- See Documentation/watchdog/wdt.txt.
+ watchdog timers [HW,WDT] For information on watchdog timers,
+ see Documentation/watchdog/watchdog-parameters.txt
+ or other driver-specific files in the
+ Documentation/watchdog/ directory.
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
default x2apic cluster mode on platforms
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 2f9115c0ae62..6653017680dd 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -165,8 +165,8 @@ the user entry_handler invocation is also skipped.
1.4 How Does Jump Optimization Work?
-If you configured your kernel with CONFIG_OPTPROBES=y (currently
-this option is supported on x86/x86-64, non-preemptive kernel) and
+If your kernel is built with CONFIG_OPTPROBES=y (currently this flag
+is automatically set 'y' on x86/x86-64, non-preemptive kernel) and
the "debug.kprobes_optimization" kernel parameter is set to 1 (see
sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
instruction instead of a breakpoint instruction at each probepoint.
@@ -271,8 +271,6 @@ tweak the kernel's execution path, you need to suppress optimization,
using one of the following techniques:
- Specify an empty function for the kprobe's post_handler or break_handler.
or
-- Config CONFIG_OPTPROBES=n.
- or
- Execute 'sysctl -w debug.kprobes_optimization=n'
2. Architectures Supported
@@ -307,10 +305,6 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
so you can use "objdump -d -l vmlinux" to see the source-to-object
code mapping.
-If you want to reduce probing overhead, set "Kprobes jump optimization
-support" (CONFIG_OPTPROBES) to "y". You can find this option under the
-"Kprobes" line.
-
4. API Reference
The Kprobes API includes a "register" function and an "unregister"
@@ -332,7 +326,7 @@ occurs during execution of kp->pre_handler or kp->post_handler,
or during single-stepping of the probed instruction, Kprobes calls
kp->fault_handler. Any or all handlers can be NULL. If kp->flags
is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled,
-so, it's handlers aren't hit until calling enable_kprobe(kp).
+so, its handlers aren't hit until calling enable_kprobe(kp).
NOTE:
1. With the introduction of the "symbol_name" field to struct kprobe,
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index c6416a398163..a237518e51b9 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -656,6 +656,7 @@ struct kvm_clock_data {
4.29 KVM_GET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
+Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_vcpu_event (out)
@@ -676,7 +677,7 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 soft;
- __u8 pad;
+ __u8 shadow;
} interrupt;
struct {
__u8 injected;
@@ -688,9 +689,13 @@ struct kvm_vcpu_events {
__u32 flags;
};
+KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
+interrupt.shadow contains a valid state. Otherwise, this field is undefined.
+
4.30 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
+Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_vcpu_event (in)
@@ -709,6 +714,183 @@ current in-kernel state. The bits are:
KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
+If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
+the flags field to signal that interrupt.shadow contains a valid state and
+shall be written into the VCPU.
+
+4.32 KVM_GET_DEBUGREGS
+
+Capability: KVM_CAP_DEBUGREGS
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_debugregs (out)
+Returns: 0 on success, -1 on error
+
+Reads debug registers from the vcpu.
+
+struct kvm_debugregs {
+ __u64 db[4];
+ __u64 dr6;
+ __u64 dr7;
+ __u64 flags;
+ __u64 reserved[9];
+};
+
+4.33 KVM_SET_DEBUGREGS
+
+Capability: KVM_CAP_DEBUGREGS
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_debugregs (in)
+Returns: 0 on success, -1 on error
+
+Writes debug registers into the vcpu.
+
+See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
+yet and must be cleared on entry.
+
+4.34 KVM_SET_USER_MEMORY_REGION
+
+Capability: KVM_CAP_USER_MEM
+Architectures: all
+Type: vm ioctl
+Parameters: struct kvm_userspace_memory_region (in)
+Returns: 0 on success, -1 on error
+
+struct kvm_userspace_memory_region {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size; /* bytes */
+ __u64 userspace_addr; /* start of the userspace allocated memory */
+};
+
+/* for kvm_memory_region::flags */
+#define KVM_MEM_LOG_DIRTY_PAGES 1UL
+
+This ioctl allows the user to create or modify a guest physical memory
+slot. When changing an existing slot, it may be moved in the guest
+physical memory space, or its flags may be modified. It may not be
+resized. Slots may not overlap in guest physical address space.
+
+Memory for the region is taken starting at the address denoted by the
+field userspace_addr, which must point at user addressable memory for
+the entire memory slot size. Any object may back this memory, including
+anonymous memory, ordinary files, and hugetlbfs.
+
+It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
+be identical. This allows large pages in the guest to be backed by large
+pages in the host.
+
+The flags field supports just one flag, KVM_MEM_LOG_DIRTY_PAGES, which
+instructs kvm to keep track of writes to memory within the slot. See
+the KVM_GET_DIRTY_LOG ioctl.
+
+When the KVM_CAP_SYNC_MMU capability, changes in the backing of the memory
+region are automatically reflected into the guest. For example, an mmap()
+that affects the region will be made visible immediately. Another example
+is madvise(MADV_DROP).
+
+It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
+The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
+allocation and is deprecated.
+
+4.35 KVM_SET_TSS_ADDR
+
+Capability: KVM_CAP_SET_TSS_ADDR
+Architectures: x86
+Type: vm ioctl
+Parameters: unsigned long tss_address (in)
+Returns: 0 on success, -1 on error
+
+This ioctl defines the physical address of a three-page region in the guest
+physical address space. The region must be within the first 4GB of the
+guest physical address space and must not conflict with any memory slot
+or any mmio address. The guest may malfunction if it accesses this memory
+region.
+
+This ioctl is required on Intel-based hosts. This is needed on Intel hardware
+because of a quirk in the virtualization implementation (see the internals
+documentation when it pops into existence).
+
+4.36 KVM_ENABLE_CAP
+
+Capability: KVM_CAP_ENABLE_CAP
+Architectures: ppc
+Type: vcpu ioctl
+Parameters: struct kvm_enable_cap (in)
+Returns: 0 on success; -1 on error
+
++Not all extensions are enabled by default. Using this ioctl the application
+can enable an extension, making it available to the guest.
+
+On systems that do not support this ioctl, it always fails. On systems that
+do support it, it only works for extensions that are supported for enablement.
+
+To check if a capability can be enabled, the KVM_CHECK_EXTENSION ioctl should
+be used.
+
+struct kvm_enable_cap {
+ /* in */
+ __u32 cap;
+
+The capability that is supposed to get enabled.
+
+ __u32 flags;
+
+A bitfield indicating future enhancements. Has to be 0 for now.
+
+ __u64 args[4];
+
+Arguments for enabling a feature. If a feature needs initial values to
+function properly, this is the place to put them.
+
+ __u8 pad[64];
+};
+
+4.37 KVM_GET_MP_STATE
+
+Capability: KVM_CAP_MP_STATE
+Architectures: x86, ia64
+Type: vcpu ioctl
+Parameters: struct kvm_mp_state (out)
+Returns: 0 on success; -1 on error
+
+struct kvm_mp_state {
+ __u32 mp_state;
+};
+
+Returns the vcpu's current "multiprocessing state" (though also valid on
+uniprocessor guests).
+
+Possible values are:
+
+ - KVM_MP_STATE_RUNNABLE: the vcpu is currently running
+ - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP)
+ which has not yet received an INIT signal
+ - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is
+ now ready for a SIPI
+ - KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and
+ is waiting for an interrupt
+ - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector
+ accesible via KVM_GET_VCPU_EVENTS)
+
+This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
+irqchip, the multiprocessing state must be maintained by userspace.
+
+4.38 KVM_SET_MP_STATE
+
+Capability: KVM_CAP_MP_STATE
+Architectures: x86, ia64
+Type: vcpu ioctl
+Parameters: struct kvm_mp_state (in)
+Returns: 0 on success; -1 on error
+
+Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
+arguments.
+
+This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
+irqchip, the multiprocessing state must be maintained by userspace.
5. The kvm_run structure
@@ -820,6 +1002,13 @@ executed a memory-mapped I/O instruction which could not be satisfied
by kvm. The 'data' member contains the written data if 'is_write' is
true, and should be filled by application code otherwise.
+NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO and KVM_EXIT_OSI, the corresponding
+operations are complete (and guest state is consistent) only after userspace
+has re-entered the kernel with KVM_RUN. The kernel side will first finish
+incomplete operations and then check for pending signals. Userspace
+can re-enter the guest with an unmasked signal pending to complete
+pending operations.
+
/* KVM_EXIT_HYPERCALL */
struct {
__u64 nr;
@@ -829,7 +1018,9 @@ true, and should be filled by application code otherwise.
__u32 pad;
} hypercall;
-Unused.
+Unused. This was once used for 'hypercall to userspace'. To implement
+such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO (all except s390).
+Note KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO.
/* KVM_EXIT_TPR_ACCESS */
struct {
@@ -870,6 +1061,19 @@ s390 specific.
powerpc specific.
+ /* KVM_EXIT_OSI */
+ struct {
+ __u64 gprs[32];
+ } osi;
+
+MOL uses a special hypercall interface it calls 'OSI'. To enable it, we catch
+hypercalls and exit with this exit struct that contains all the guest gprs.
+
+If exit_reason is KVM_EXIT_OSI, then the vcpu has triggered such a hypercall.
+Userspace can now handle the hypercall and when it's done modify the gprs as
+necessary. Upon guest entry all guest GPRs will then be replaced by the values
+in this struct.
+
/* Fix the size of the union. */
char padding[256];
};
diff --git a/Documentation/kvm/mmu.txt b/Documentation/kvm/mmu.txt
new file mode 100644
index 000000000000..0cc28fb84f4f
--- /dev/null
+++ b/Documentation/kvm/mmu.txt
@@ -0,0 +1,302 @@
+The x86 kvm shadow mmu
+======================
+
+The mmu (in arch/x86/kvm, files mmu.[ch] and paging_tmpl.h) is responsible
+for presenting a standard x86 mmu to the guest, while translating guest
+physical addresses to host physical addresses.
+
+The mmu code attempts to satisfy the following requirements:
+
+- correctness: the guest should not be able to determine that it is running
+ on an emulated mmu except for timing (we attempt to comply
+ with the specification, not emulate the characteristics of
+ a particular implementation such as tlb size)
+- security: the guest must not be able to touch host memory not assigned
+ to it
+- performance: minimize the performance penalty imposed by the mmu
+- scaling: need to scale to large memory and large vcpu guests
+- hardware: support the full range of x86 virtualization hardware
+- integration: Linux memory management code must be in control of guest memory
+ so that swapping, page migration, page merging, transparent
+ hugepages, and similar features work without change
+- dirty tracking: report writes to guest memory to enable live migration
+ and framebuffer-based displays
+- footprint: keep the amount of pinned kernel memory low (most memory
+ should be shrinkable)
+- reliablity: avoid multipage or GFP_ATOMIC allocations
+
+Acronyms
+========
+
+pfn host page frame number
+hpa host physical address
+hva host virtual address
+gfn guest frame number
+gpa guest physical address
+gva guest virtual address
+ngpa nested guest physical address
+ngva nested guest virtual address
+pte page table entry (used also to refer generically to paging structure
+ entries)
+gpte guest pte (referring to gfns)
+spte shadow pte (referring to pfns)
+tdp two dimensional paging (vendor neutral term for NPT and EPT)
+
+Virtual and real hardware supported
+===================================
+
+The mmu supports first-generation mmu hardware, which allows an atomic switch
+of the current paging mode and cr3 during guest entry, as well as
+two-dimensional paging (AMD's NPT and Intel's EPT). The emulated hardware
+it exposes is the traditional 2/3/4 level x86 mmu, with support for global
+pages, pae, pse, pse36, cr0.wp, and 1GB pages. Work is in progress to support
+exposing NPT capable hardware on NPT capable hosts.
+
+Translation
+===========
+
+The primary job of the mmu is to program the processor's mmu to translate
+addresses for the guest. Different translations are required at different
+times:
+
+- when guest paging is disabled, we translate guest physical addresses to
+ host physical addresses (gpa->hpa)
+- when guest paging is enabled, we translate guest virtual addresses, to
+ guest physical addresses, to host physical addresses (gva->gpa->hpa)
+- when the guest launches a guest of its own, we translate nested guest
+ virtual addresses, to nested guest physical addresses, to guest physical
+ addresses, to host physical addresses (ngva->ngpa->gpa->hpa)
+
+The primary challenge is to encode between 1 and 3 translations into hardware
+that support only 1 (traditional) and 2 (tdp) translations. When the
+number of required translations matches the hardware, the mmu operates in
+direct mode; otherwise it operates in shadow mode (see below).
+
+Memory
+======
+
+Guest memory (gpa) is part of the user address space of the process that is
+using kvm. Userspace defines the translation between guest addresses and user
+addresses (gpa->hva); note that two gpas may alias to the same gva, but not
+vice versa.
+
+These gvas may be backed using any method available to the host: anonymous
+memory, file backed memory, and device memory. Memory might be paged by the
+host at any time.
+
+Events
+======
+
+The mmu is driven by events, some from the guest, some from the host.
+
+Guest generated events:
+- writes to control registers (especially cr3)
+- invlpg/invlpga instruction execution
+- access to missing or protected translations
+
+Host generated events:
+- changes in the gpa->hpa translation (either through gpa->hva changes or
+ through hva->hpa changes)
+- memory pressure (the shrinker)
+
+Shadow pages
+============
+
+The principal data structure is the shadow page, 'struct kvm_mmu_page'. A
+shadow page contains 512 sptes, which can be either leaf or nonleaf sptes. A
+shadow page may contain a mix of leaf and nonleaf sptes.
+
+A nonleaf spte allows the hardware mmu to reach the leaf pages and
+is not related to a translation directly. It points to other shadow pages.
+
+A leaf spte corresponds to either one or two translations encoded into
+one paging structure entry. These are always the lowest level of the
+translation stack, with optional higher level translations left to NPT/EPT.
+Leaf ptes point at guest pages.
+
+The following table shows translations encoded by leaf ptes, with higher-level
+translations in parentheses:
+
+ Non-nested guests:
+ nonpaging: gpa->hpa
+ paging: gva->gpa->hpa
+ paging, tdp: (gva->)gpa->hpa
+ Nested guests:
+ non-tdp: ngva->gpa->hpa (*)
+ tdp: (ngva->)ngpa->gpa->hpa
+
+(*) the guest hypervisor will encode the ngva->gpa translation into its page
+ tables if npt is not present
+
+Shadow pages contain the following information:
+ role.level:
+ The level in the shadow paging hierarchy that this shadow page belongs to.
+ 1=4k sptes, 2=2M sptes, 3=1G sptes, etc.
+ role.direct:
+ If set, leaf sptes reachable from this page are for a linear range.
+ Examples include real mode translation, large guest pages backed by small
+ host pages, and gpa->hpa translations when NPT or EPT is active.
+ The linear range starts at (gfn << PAGE_SHIFT) and its size is determined
+ by role.level (2MB for first level, 1GB for second level, 0.5TB for third
+ level, 256TB for fourth level)
+ If clear, this page corresponds to a guest page table denoted by the gfn
+ field.
+ role.quadrant:
+ When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit
+ sptes. That means a guest page table contains more ptes than the host,
+ so multiple shadow pages are needed to shadow one guest page.
+ For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
+ first or second 512-gpte block in the guest page table. For second-level
+ page tables, each 32-bit gpte is converted to two 64-bit sptes
+ (since each first-level guest page is shadowed by two first-level
+ shadow pages) so role.quadrant takes values in the range 0..3. Each
+ quadrant maps 1GB virtual address space.
+ role.access:
+ Inherited guest access permissions in the form uwx. Note execute
+ permission is positive, not negative.
+ role.invalid:
+ The page is invalid and should not be used. It is a root page that is
+ currently pinned (by a cpu hardware register pointing to it); once it is
+ unpinned it will be destroyed.
+ role.cr4_pae:
+ Contains the value of cr4.pae for which the page is valid (e.g. whether
+ 32-bit or 64-bit gptes are in use).
+ role.cr4_nxe:
+ Contains the value of efer.nxe for which the page is valid.
+ gfn:
+ Either the guest page table containing the translations shadowed by this
+ page, or the base page frame for linear translations. See role.direct.
+ spt:
+ A pageful of 64-bit sptes containing the translations for this page.
+ Accessed by both kvm and hardware.
+ The page pointed to by spt will have its page->private pointing back
+ at the shadow page structure.
+ sptes in spt point either at guest pages, or at lower-level shadow pages.
+ Specifically, if sp1 and sp2 are shadow pages, then sp1->spt[n] may point
+ at __pa(sp2->spt). sp2 will point back at sp1 through parent_pte.
+ The spt array forms a DAG structure with the shadow page as a node, and
+ guest pages as leaves.
+ gfns:
+ An array of 512 guest frame numbers, one for each present pte. Used to
+ perform a reverse map from a pte to a gfn.
+ slot_bitmap:
+ A bitmap containing one bit per memory slot. If the page contains a pte
+ mapping a page from memory slot n, then bit n of slot_bitmap will be set
+ (if a page is aliased among several slots, then it is not guaranteed that
+ all slots will be marked).
+ Used during dirty logging to avoid scanning a shadow page if none if its
+ pages need tracking.
+ root_count:
+ A counter keeping track of how many hardware registers (guest cr3 or
+ pdptrs) are now pointing at the page. While this counter is nonzero, the
+ page cannot be destroyed. See role.invalid.
+ multimapped:
+ Whether there exist multiple sptes pointing at this page.
+ parent_pte/parent_ptes:
+ If multimapped is zero, parent_pte points at the single spte that points at
+ this page's spt. Otherwise, parent_ptes points at a data structure
+ with a list of parent_ptes.
+ unsync:
+ If true, then the translations in this page may not match the guest's
+ translation. This is equivalent to the state of the tlb when a pte is
+ changed but before the tlb entry is flushed. Accordingly, unsync ptes
+ are synchronized when the guest executes invlpg or flushes its tlb by
+ other means. Valid for leaf pages.
+ unsync_children:
+ How many sptes in the page point at pages that are unsync (or have
+ unsynchronized children).
+ unsync_child_bitmap:
+ A bitmap indicating which sptes in spt point (directly or indirectly) at
+ pages that may be unsynchronized. Used to quickly locate all unsychronized
+ pages reachable from a given page.
+
+Reverse map
+===========
+
+The mmu maintains a reverse mapping whereby all ptes mapping a page can be
+reached given its gfn. This is used, for example, when swapping out a page.
+
+Synchronized and unsynchronized pages
+=====================================
+
+The guest uses two events to synchronize its tlb and page tables: tlb flushes
+and page invalidations (invlpg).
+
+A tlb flush means that we need to synchronize all sptes reachable from the
+guest's cr3. This is expensive, so we keep all guest page tables write
+protected, and synchronize sptes to gptes when a gpte is written.
+
+A special case is when a guest page table is reachable from the current
+guest cr3. In this case, the guest is obliged to issue an invlpg instruction
+before using the translation. We take advantage of that by removing write
+protection from the guest page, and allowing the guest to modify it freely.
+We synchronize modified gptes when the guest invokes invlpg. This reduces
+the amount of emulation we have to do when the guest modifies multiple gptes,
+or when the a guest page is no longer used as a page table and is used for
+random guest data.
+
+As a side effect we have to resynchronize all reachable unsynchronized shadow
+pages on a tlb flush.
+
+
+Reaction to events
+==================
+
+- guest page fault (or npt page fault, or ept violation)
+
+This is the most complicated event. The cause of a page fault can be:
+
+ - a true guest fault (the guest translation won't allow the access) (*)
+ - access to a missing translation
+ - access to a protected translation
+ - when logging dirty pages, memory is write protected
+ - synchronized shadow pages are write protected (*)
+ - access to untranslatable memory (mmio)
+
+ (*) not applicable in direct mode
+
+Handling a page fault is performed as follows:
+
+ - if needed, walk the guest page tables to determine the guest translation
+ (gva->gpa or ngpa->gpa)
+ - if permissions are insufficient, reflect the fault back to the guest
+ - determine the host page
+ - if this is an mmio request, there is no host page; call the emulator
+ to emulate the instruction instead
+ - walk the shadow page table to find the spte for the translation,
+ instantiating missing intermediate page tables as necessary
+ - try to unsynchronize the page
+ - if successful, we can let the guest continue and modify the gpte
+ - emulate the instruction
+ - if failed, unshadow the page and let the guest continue
+ - update any translations that were modified by the instruction
+
+invlpg handling:
+
+ - walk the shadow page hierarchy and drop affected translations
+ - try to reinstantiate the indicated translation in the hope that the
+ guest will use it in the near future
+
+Guest control register updates:
+
+- mov to cr3
+ - look up new shadow roots
+ - synchronize newly reachable shadow pages
+
+- mov to cr0/cr4/efer
+ - set up mmu context for new paging mode
+ - look up new shadow roots
+ - synchronize newly reachable shadow pages
+
+Host translation updates:
+
+ - mmu notifier called with updated hva
+ - look up affected sptes through reverse map
+ - drop (or update) translations
+
+Further reading
+===============
+
+- NPT presentation from KVM Forum 2008
+ http://www.linux-kvm.org/wiki/images/c/c8/KvmForum2008%24kdf2008_21.pdf
+
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 2c3c35093023..0bf25eebce94 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -207,7 +207,7 @@ Tips & Tricks
* Drew Scott Daniels observed: "I don't know why, but when I decrease the number
of colours that my display uses it consumes less battery power. I've seen
this on powerbooks too. I hope that this is a piece of information that
- might be useful to the Laptop Mode patch or it's users."
+ might be useful to the Laptop Mode patch or its users."
* In syslog.conf, you can prefix entries with a dash ``-'' to omit syncing the
file after every logging. When you're using laptop-mode and your disk doesn't
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 3119f5db75bd..e9ce3c554514 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -263,7 +263,7 @@ static u8 *get_feature_bits(struct device *dev)
* Launcher virtual with an offset.
*
* This can be tough to get your head around, but usually it just means that we
- * use these trivial conversion functions when the Guest gives us it's
+ * use these trivial conversion functions when the Guest gives us its
* "physical" addresses:
*/
static void *from_guest_phys(unsigned long addr)
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 188f4768f1d5..e4e893ef3e01 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -136,7 +136,7 @@ raid_disks != 0.
Then uninitialized devices can be added with ADD_NEW_DISK. The
structure passed to ADD_NEW_DISK must specify the state of the device
-and it's role in the array.
+and its role in the array.
Once started with RUN_ARRAY, uninitialized spares can be added with
HOT_ADD_DISK.
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt
index aa60d1f627e5..c91ccc0720fa 100644
--- a/Documentation/mutex-design.txt
+++ b/Documentation/mutex-design.txt
@@ -66,14 +66,14 @@ of advantages of mutexes:
c0377ccb <mutex_lock>:
c0377ccb: f0 ff 08 lock decl (%eax)
- c0377cce: 78 0e js c0377cde <.text.lock.mutex>
+ c0377cce: 78 0e js c0377cde <.text..lock.mutex>
c0377cd0: c3 ret
the unlocking fastpath is equally tight:
c0377cd1 <mutex_unlock>:
c0377cd1: f0 ff 00 lock incl (%eax)
- c0377cd4: 7e 0f jle c0377ce5 <.text.lock.mutex+0x7>
+ c0377cd4: 7e 0f jle c0377ce5 <.text..lock.mutex+0x7>
c0377cd6: c3 ret
- 'struct mutex' semantics are well-defined and are enforced if
diff --git a/Documentation/netlabel/lsm_interface.txt b/Documentation/netlabel/lsm_interface.txt
index 98dd9f7430f2..638c74f7de7f 100644
--- a/Documentation/netlabel/lsm_interface.txt
+++ b/Documentation/netlabel/lsm_interface.txt
@@ -38,7 +38,7 @@ Depending on the exact configuration, translation between the network packet
label and the internal LSM security identifier can be time consuming. The
NetLabel label mapping cache is a caching mechanism which can be used to
sidestep much of this overhead once a mapping has been established. Once the
-LSM has received a packet, used NetLabel to decode it's security attributes,
+LSM has received a packet, used NetLabel to decode its security attributes,
and translated the security attributes into a LSM internal identifier the LSM
can use the NetLabel caching functions to associate the LSM internal
identifier with the network packet's label. This means that in the future
diff --git a/Documentation/networking/caif/Linux-CAIF.txt b/Documentation/networking/caif/Linux-CAIF.txt
new file mode 100644
index 000000000000..7fe7a9a33a4f
--- /dev/null
+++ b/Documentation/networking/caif/Linux-CAIF.txt
@@ -0,0 +1,212 @@
+Linux CAIF
+===========
+copyright (C) ST-Ericsson AB 2010
+Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+License terms: GNU General Public License (GPL) version 2
+
+
+Introduction
+------------
+CAIF is a MUX protocol used by ST-Ericsson cellular modems for
+communication between Modem and host. The host processes can open virtual AT
+channels, initiate GPRS Data connections, Video channels and Utility Channels.
+The Utility Channels are general purpose pipes between modem and host.
+
+ST-Ericsson modems support a number of transports between modem
+and host. Currently, UART and Loopback are available for Linux.
+
+
+Architecture:
+------------
+The implementation of CAIF is divided into:
+* CAIF Socket Layer, Kernel API, and Net Device.
+* CAIF Core Protocol Implementation
+* CAIF Link Layer, implemented as NET devices.
+
+
+ RTNL
+ !
+ ! +------+ +------+ +------+
+ ! +------+! +------+! +------+!
+ ! ! Sock !! !Kernel!! ! Net !!
+ ! ! API !+ ! API !+ ! Dev !+ <- CAIF Client APIs
+ ! +------+ +------! +------+
+ ! ! ! !
+ ! +----------!----------+
+ ! +------+ <- CAIF Protocol Implementation
+ +-------> ! CAIF !
+ ! Core !
+ +------+
+ +--------!--------+
+ ! !
+ +------+ +-----+
+ ! ! ! TTY ! <- Link Layer (Net Devices)
+ +------+ +-----+
+
+
+Using the Kernel API
+----------------------
+The Kernel API is used for accessing CAIF channels from the
+kernel.
+The user of the API has to implement two callbacks for receive
+and control.
+The receive callback gives a CAIF packet as a SKB. The control
+callback will
+notify of channel initialization complete, and flow-on/flow-
+off.
+
+
+ struct caif_device caif_dev = {
+ .caif_config = {
+ .name = "MYDEV"
+ .type = CAIF_CHTY_AT
+ }
+ .receive_cb = my_receive,
+ .control_cb = my_control,
+ };
+ caif_add_device(&caif_dev);
+ caif_transmit(&caif_dev, skb);
+
+See the caif_kernel.h for details about the CAIF kernel API.
+
+
+I M P L E M E N T A T I O N
+===========================
+===========================
+
+CAIF Core Protocol Layer
+=========================================
+
+CAIF Core layer implements the CAIF protocol as defined by ST-Ericsson.
+It implements the CAIF protocol stack in a layered approach, where
+each layer described in the specification is implemented as a separate layer.
+The architecture is inspired by the design patterns "Protocol Layer" and
+"Protocol Packet".
+
+== CAIF structure ==
+The Core CAIF implementation contains:
+ - Simple implementation of CAIF.
+ - Layered architecture (a la Streams), each layer in the CAIF
+ specification is implemented in a separate c-file.
+ - Clients must implement PHY layer to access physical HW
+ with receive and transmit functions.
+ - Clients must call configuration function to add PHY layer.
+ - Clients must implement CAIF layer to consume/produce
+ CAIF payload with receive and transmit functions.
+ - Clients must call configuration function to add and connect the
+ Client layer.
+ - When receiving / transmitting CAIF Packets (cfpkt), ownership is passed
+ to the called function (except for framing layers' receive functions
+ or if a transmit function returns an error, in which case the caller
+ must free the packet).
+
+Layered Architecture
+--------------------
+The CAIF protocol can be divided into two parts: Support functions and Protocol
+Implementation. The support functions include:
+
+ - CFPKT CAIF Packet. Implementation of CAIF Protocol Packet. The
+ CAIF Packet has functions for creating, destroying and adding content
+ and for adding/extracting header and trailers to protocol packets.
+
+ - CFLST CAIF list implementation.
+
+ - CFGLUE CAIF Glue. Contains OS Specifics, such as memory
+ allocation, endianness, etc.
+
+The CAIF Protocol implementation contains:
+
+ - CFCNFG CAIF Configuration layer. Configures the CAIF Protocol
+ Stack and provides a Client interface for adding Link-Layer and
+ Driver interfaces on top of the CAIF Stack.
+
+ - CFCTRL CAIF Control layer. Encodes and Decodes control messages
+ such as enumeration and channel setup. Also matches request and
+ response messages.
+
+ - CFSERVL General CAIF Service Layer functionality; handles flow
+ control and remote shutdown requests.
+
+ - CFVEI CAIF VEI layer. Handles CAIF AT Channels on VEI (Virtual
+ External Interface). This layer encodes/decodes VEI frames.
+
+ - CFDGML CAIF Datagram layer. Handles CAIF Datagram layer (IP
+ traffic), encodes/decodes Datagram frames.
+
+ - CFMUX CAIF Mux layer. Handles multiplexing between multiple
+ physical bearers and multiple channels such as VEI, Datagram, etc.
+ The MUX keeps track of the existing CAIF Channels and
+ Physical Instances and selects the apropriate instance based
+ on Channel-Id and Physical-ID.
+
+ - CFFRML CAIF Framing layer. Handles Framing i.e. Frame length
+ and frame checksum.
+
+ - CFSERL CAIF Serial layer. Handles concatenation/split of frames
+ into CAIF Frames with correct length.
+
+
+
+ +---------+
+ | Config |
+ | CFCNFG |
+ +---------+
+ !
+ +---------+ +---------+ +---------+
+ | AT | | Control | | Datagram|
+ | CFVEIL | | CFCTRL | | CFDGML |
+ +---------+ +---------+ +---------+
+ \_____________!______________/
+ !
+ +---------+
+ | MUX |
+ | |
+ +---------+
+ _____!_____
+ / \
+ +---------+ +---------+
+ | CFFRML | | CFFRML |
+ | Framing | | Framing |
+ +---------+ +---------+
+ ! !
+ +---------+ +---------+
+ | | | Serial |
+ | | | CFSERL |
+ +---------+ +---------+
+
+
+In this layered approach the following "rules" apply.
+ - All layers embed the same structure "struct cflayer"
+ - A layer does not depend on any other layer's private data.
+ - Layers are stacked by setting the pointers
+ layer->up , layer->dn
+ - In order to send data upwards, each layer should do
+ layer->up->receive(layer->up, packet);
+ - In order to send data downwards, each layer should do
+ layer->dn->transmit(layer->dn, packet);
+
+
+Linux Driver Implementation
+===========================
+
+Linux GPRS Net Device and CAIF socket are implemented on top of the
+CAIF Core protocol. The Net device and CAIF socket have an instance of
+'struct cflayer', just like the CAIF Core protocol stack.
+Net device and Socket implement the 'receive()' function defined by
+'struct cflayer', just like the rest of the CAIF stack. In this way, transmit and
+receive of packets is handled as by the rest of the layers: the 'dn->transmit()'
+function is called in order to transmit data.
+
+The layer on top of the CAIF Core implementation is
+sometimes referred to as the "Client layer".
+
+
+Configuration of Link Layer
+---------------------------
+The Link Layer is implemented as Linux net devices (struct net_device).
+Payload handling and registration is done using standard Linux mechanisms.
+
+The CAIF Protocol relies on a loss-less link layer without implementing
+retransmission. This implies that packet drops must not happen.
+Therefore a flow-control mechanism is implemented where the physical
+interface can initiate flow stop for all CAIF Channels.
diff --git a/Documentation/networking/caif/README b/Documentation/networking/caif/README
new file mode 100644
index 000000000000..757ccfaa1385
--- /dev/null
+++ b/Documentation/networking/caif/README
@@ -0,0 +1,109 @@
+Copyright (C) ST-Ericsson AB 2010
+Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+License terms: GNU General Public License (GPL) version 2
+---------------------------------------------------------
+
+=== Start ===
+If you have compiled CAIF for modules do:
+
+$modprobe crc_ccitt
+$modprobe caif
+$modprobe caif_socket
+$modprobe chnl_net
+
+
+=== Preparing the setup with a STE modem ===
+
+If you are working on integration of CAIF you should make sure
+that the kernel is built with module support.
+
+There are some things that need to be tweaked to get the host TTY correctly
+set up to talk to the modem.
+Since the CAIF stack is running in the kernel and we want to use the existing
+TTY, we are installing our physical serial driver as a line discipline above
+the TTY device.
+
+To achieve this we need to install the N_CAIF ldisc from user space.
+The benefit is that we can hook up to any TTY.
+
+The use of Start-of-frame-extension (STX) must also be set as
+module parameter "ser_use_stx".
+
+Normally Frame Checksum is always used on UART, but this is also provided as a
+module parameter "ser_use_fcs".
+
+$ modprobe caif_serial ser_ttyname=/dev/ttyS0 ser_use_stx=yes
+$ ifconfig caif_ttyS0 up
+
+PLEASE NOTE: There is a limitation in Android shell.
+ It only accepts one argument to insmod/modprobe!
+
+=== Trouble shooting ===
+
+There are debugfs parameters provided for serial communication.
+/sys/kernel/debug/caif_serial/<tty-name>/
+
+* ser_state: Prints the bit-mask status where
+ - 0x02 means SENDING, this is a transient state.
+ - 0x10 means FLOW_OFF_SENT, i.e. the previous frame has not been sent
+ and is blocking further send operation. Flow OFF has been propagated
+ to all CAIF Channels using this TTY.
+
+* tty_status: Prints the bit-mask tty status information
+ - 0x01 - tty->warned is on.
+ - 0x02 - tty->low_latency is on.
+ - 0x04 - tty->packed is on.
+ - 0x08 - tty->flow_stopped is on.
+ - 0x10 - tty->hw_stopped is on.
+ - 0x20 - tty->stopped is on.
+
+* last_tx_msg: Binary blob Prints the last transmitted frame.
+ This can be printed with
+ $od --format=x1 /sys/kernel/debug/caif_serial/<tty>/last_rx_msg.
+ The first two tx messages sent look like this. Note: The initial
+ byte 02 is start of frame extension (STX) used for re-syncing
+ upon errors.
+
+ - Enumeration:
+ 0000000 02 05 00 00 03 01 d2 02
+ | | | | | |
+ STX(1) | | | |
+ Length(2)| | |
+ Control Channel(1)
+ Command:Enumeration(1)
+ Link-ID(1)
+ Checksum(2)
+ - Channel Setup:
+ 0000000 02 07 00 00 00 21 a1 00 48 df
+ | | | | | | | |
+ STX(1) | | | | | |
+ Length(2)| | | | |
+ Control Channel(1)
+ Command:Channel Setup(1)
+ Channel Type(1)
+ Priority and Link-ID(1)
+ Endpoint(1)
+ Checksum(2)
+
+* last_rx_msg: Prints the last transmitted frame.
+ The RX messages for LinkSetup look almost identical but they have the
+ bit 0x20 set in the command bit, and Channel Setup has added one byte
+ before Checksum containing Channel ID.
+ NOTE: Several CAIF Messages might be concatenated. The maximum debug
+ buffer size is 128 bytes.
+
+== Error Scenarios:
+- last_tx_msg contains channel setup message and last_rx_msg is empty ->
+ The host seems to be able to send over the UART, at least the CAIF ldisc get
+ notified that sending is completed.
+
+- last_tx_msg contains enumeration message and last_rx_msg is empty ->
+ The host is not able to send the message from UART, the tty has not been
+ able to complete the transmit operation.
+
+- if /sys/kernel/debug/caif_serial/<tty>/tty_status is non-zero there
+ might be problems transmitting over UART.
+ E.g. host and modem wiring is not correct you will typically see
+ tty_status = 0x10 (hw_stopped) and ser_state = 0x10 (FLOW_OFF_SENT).
+ You will probably see the enumeration message in last_tx_message
+ and empty last_rx_message.
diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c
index 1b96ccda3836..2bac9618c345 100644
--- a/Documentation/networking/ifenslave.c
+++ b/Documentation/networking/ifenslave.c
@@ -756,7 +756,7 @@ static int enslave(char *master_ifname, char *slave_ifname)
*/
if (abi_ver < 1) {
/* For old ABI, the master needs to be
- * down before setting it's hwaddr
+ * down before setting its hwaddr
*/
res = set_if_down(master_ifname, master_flags.ifr_flags);
if (res) {
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
index 63214b280e00..e7bf3979facb 100644
--- a/Documentation/networking/l2tp.txt
+++ b/Documentation/networking/l2tp.txt
@@ -1,44 +1,95 @@
-This brief document describes how to use the kernel's PPPoL2TP driver
-to provide L2TP functionality. L2TP is a protocol that tunnels one or
-more PPP sessions over a UDP tunnel. It is commonly used for VPNs
+This document describes how to use the kernel's L2TP drivers to
+provide L2TP functionality. L2TP is a protocol that tunnels one or
+more sessions over an IP tunnel. It is commonly used for VPNs
(L2TP/IPSec) and by ISPs to tunnel subscriber PPP sessions over an IP
-network infrastructure.
+network infrastructure. With L2TPv3, it is also useful as a Layer-2
+tunneling infrastructure.
+
+Features
+========
+
+L2TPv2 (PPP over L2TP (UDP tunnels)).
+L2TPv3 ethernet pseudowires.
+L2TPv3 PPP pseudowires.
+L2TPv3 IP encapsulation.
+Netlink sockets for L2TPv3 configuration management.
+
+History
+=======
+
+The original pppol2tp driver was introduced in 2.6.23 and provided
+L2TPv2 functionality (rfc2661). L2TPv2 is used to tunnel one or more PPP
+sessions over a UDP tunnel.
+
+L2TPv3 (rfc3931) changes the protocol to allow different frame types
+to be passed over an L2TP tunnel by moving the PPP-specific parts of
+the protocol out of the core L2TP packet headers. Each frame type is
+known as a pseudowire type. Ethernet, PPP, HDLC, Frame Relay and ATM
+pseudowires for L2TP are defined in separate RFC standards. Another
+change for L2TPv3 is that it can be carried directly over IP with no
+UDP header (UDP is optional). It is also possible to create static
+unmanaged L2TPv3 tunnels manually without a control protocol
+(userspace daemon) to manage them.
+
+To support L2TPv3, the original pppol2tp driver was split up to
+separate the L2TP and PPP functionality. Existing L2TPv2 userspace
+apps should be unaffected as the original pppol2tp sockets API is
+retained. L2TPv3, however, uses netlink to manage L2TPv3 tunnels and
+sessions.
Design
======
-The PPPoL2TP driver, drivers/net/pppol2tp.c, provides a mechanism by
-which PPP frames carried through an L2TP session are passed through
-the kernel's PPP subsystem. The standard PPP daemon, pppd, handles all
-PPP interaction with the peer. PPP network interfaces are created for
-each local PPP endpoint.
-
-The L2TP protocol http://www.faqs.org/rfcs/rfc2661.html defines L2TP
-control and data frames. L2TP control frames carry messages between
-L2TP clients/servers and are used to setup / teardown tunnels and
-sessions. An L2TP client or server is implemented in userspace and
-will use a regular UDP socket per tunnel. L2TP data frames carry PPP
-frames, which may be PPP control or PPP data. The kernel's PPP
+The L2TP protocol separates control and data frames. The L2TP kernel
+drivers handle only L2TP data frames; control frames are always
+handled by userspace. L2TP control frames carry messages between L2TP
+clients/servers and are used to setup / teardown tunnels and
+sessions. An L2TP client or server is implemented in userspace.
+
+Each L2TP tunnel is implemented using a UDP or L2TPIP socket; L2TPIP
+provides L2TPv3 IP encapsulation (no UDP) and is implemented using a
+new l2tpip socket family. The tunnel socket is typically created by
+userspace, though for unmanaged L2TPv3 tunnels, the socket can also be
+created by the kernel. Each L2TP session (pseudowire) gets a network
+interface instance. In the case of PPP, these interfaces are created
+indirectly by pppd using a pppol2tp socket. In the case of ethernet,
+the netdevice is created upon a netlink request to create an L2TPv3
+ethernet pseudowire.
+
+For PPP, the PPPoL2TP driver, net/l2tp/l2tp_ppp.c, provides a
+mechanism by which PPP frames carried through an L2TP session are
+passed through the kernel's PPP subsystem. The standard PPP daemon,
+pppd, handles all PPP interaction with the peer. PPP network
+interfaces are created for each local PPP endpoint. The kernel's PPP
subsystem arranges for PPP control frames to be delivered to pppd,
while data frames are forwarded as usual.
+For ethernet, the L2TPETH driver, net/l2tp/l2tp_eth.c, implements a
+netdevice driver, managing virtual ethernet devices, one per
+pseudowire. These interfaces can be managed using standard Linux tools
+such as "ip" and "ifconfig". If only IP frames are passed over the
+tunnel, the interface can be given an IP addresses of itself and its
+peer. If non-IP frames are to be passed over the tunnel, the interface
+can be added to a bridge using brctl. All L2TP datapath protocol
+functions are handled by the L2TP core driver.
+
Each tunnel and session within a tunnel is assigned a unique tunnel_id
and session_id. These ids are carried in the L2TP header of every
-control and data packet. The pppol2tp driver uses them to lookup
-internal tunnel and/or session contexts. Zero tunnel / session ids are
-treated specially - zero ids are never assigned to tunnels or sessions
-in the network. In the driver, the tunnel context keeps a pointer to
-the tunnel UDP socket. The session context keeps a pointer to the
-PPPoL2TP socket, as well as other data that lets the driver interface
-to the kernel PPP subsystem.
-
-Note that the pppol2tp kernel driver handles only L2TP data frames;
-L2TP control frames are simply passed up to userspace in the UDP
-tunnel socket. The kernel handles all datapath aspects of the
-protocol, including data packet resequencing (if enabled).
-
-There are a number of requirements on the userspace L2TP daemon in
-order to use the pppol2tp driver.
+control and data packet. (Actually, in L2TPv3, the tunnel_id isn't
+present in data frames - it is inferred from the IP connection on
+which the packet was received.) The L2TP driver uses the ids to lookup
+internal tunnel and/or session contexts to determine how to handle the
+packet. Zero tunnel / session ids are treated specially - zero ids are
+never assigned to tunnels or sessions in the network. In the driver,
+the tunnel context keeps a reference to the tunnel UDP or L2TPIP
+socket. The session context holds data that lets the driver interface
+to the kernel's network frame type subsystems, i.e. PPP, ethernet.
+
+Userspace Programming
+=====================
+
+For L2TPv2, there are a number of requirements on the userspace L2TP
+daemon in order to use the pppol2tp driver.
1. Use a UDP socket per tunnel.
@@ -86,6 +137,35 @@ In addition to the standard PPP ioctls, a PPPIOCGL2TPSTATS is provided
to retrieve tunnel and session statistics from the kernel using the
PPPoX socket of the appropriate tunnel or session.
+For L2TPv3, userspace must use the netlink API defined in
+include/linux/l2tp.h to manage tunnel and session contexts. The
+general procedure to create a new L2TP tunnel with one session is:-
+
+1. Open a GENL socket using L2TP_GENL_NAME for configuring the kernel
+ using netlink.
+
+2. Create a UDP or L2TPIP socket for the tunnel.
+
+3. Create a new L2TP tunnel using a L2TP_CMD_TUNNEL_CREATE
+ request. Set attributes according to desired tunnel parameters,
+ referencing the UDP or L2TPIP socket created in the previous step.
+
+4. Create a new L2TP session in the tunnel using a
+ L2TP_CMD_SESSION_CREATE request.
+
+The tunnel and all of its sessions are closed when the tunnel socket
+is closed. The netlink API may also be used to delete sessions and
+tunnels. Configuration and status info may be set or read using netlink.
+
+The L2TP driver also supports static (unmanaged) L2TPv3 tunnels. These
+are where there is no L2TP control message exchange with the peer to
+setup the tunnel; the tunnel is configured manually at each end of the
+tunnel. There is no need for an L2TP userspace application in this
+case -- the tunnel socket is created by the kernel and configured
+using parameters sent in the L2TP_CMD_TUNNEL_CREATE netlink
+request. The "ip" utility of iproute2 has commands for managing static
+L2TPv3 tunnels; do "ip l2tp help" for more information.
+
Debugging
=========
@@ -102,6 +182,69 @@ PPPOL2TP_MSG_CONTROL userspace - kernel interface
PPPOL2TP_MSG_SEQ sequence numbers handling
PPPOL2TP_MSG_DATA data packets
+If enabled, files under a l2tp debugfs directory can be used to dump
+kernel state about L2TP tunnels and sessions. To access it, the
+debugfs filesystem must first be mounted.
+
+# mount -t debugfs debugfs /debug
+
+Files under the l2tp directory can then be accessed.
+
+# cat /debug/l2tp/tunnels
+
+The debugfs files should not be used by applications to obtain L2TP
+state information because the file format is subject to change. It is
+implemented to provide extra debug information to help diagnose
+problems.) Users should use the netlink API.
+
+/proc/net/pppol2tp is also provided for backwards compaibility with
+the original pppol2tp driver. It lists information about L2TPv2
+tunnels and sessions only. Its use is discouraged.
+
+Unmanaged L2TPv3 Tunnels
+========================
+
+Some commercial L2TP products support unmanaged L2TPv3 ethernet
+tunnels, where there is no L2TP control protocol; tunnels are
+configured at each side manually. New commands are available in
+iproute2's ip utility to support this.
+
+To create an L2TPv3 ethernet pseudowire between local host 192.168.1.1
+and peer 192.168.1.2, using IP addresses 10.5.1.1 and 10.5.1.2 for the
+tunnel endpoints:-
+
+# modprobe l2tp_eth
+# modprobe l2tp_netlink
+
+# ip l2tp add tunnel tunnel_id 1 peer_tunnel_id 1 udp_sport 5000 \
+ udp_dport 5000 encap udp local 192.168.1.1 remote 192.168.1.2
+# ip l2tp add session tunnel_id 1 session_id 1 peer_session_id 1
+# ifconfig -a
+# ip addr add 10.5.1.2/32 peer 10.5.1.1/32 dev l2tpeth0
+# ifconfig l2tpeth0 up
+
+Choose IP addresses to be the address of a local IP interface and that
+of the remote system. The IP addresses of the l2tpeth0 interface can be
+anything suitable.
+
+Repeat the above at the peer, with ports, tunnel/session ids and IP
+addresses reversed. The tunnel and session IDs can be any non-zero
+32-bit number, but the values must be reversed at the peer.
+
+Host 1 Host2
+udp_sport=5000 udp_sport=5001
+udp_dport=5001 udp_dport=5000
+tunnel_id=42 tunnel_id=45
+peer_tunnel_id=45 peer_tunnel_id=42
+session_id=128 session_id=5196755
+peer_session_id=5196755 peer_session_id=128
+
+When done at both ends of the tunnel, it should be possible to send
+data over the network. e.g.
+
+# ping 10.5.1.1
+
+
Sample Userspace Code
=====================
@@ -158,12 +301,48 @@ Sample Userspace Code
}
return 0;
+Internal Implementation
+=======================
+
+The driver keeps a struct l2tp_tunnel context per L2TP tunnel and a
+struct l2tp_session context for each session. The l2tp_tunnel is
+always associated with a UDP or L2TP/IP socket and keeps a list of
+sessions in the tunnel. The l2tp_session context keeps kernel state
+about the session. It has private data which is used for data specific
+to the session type. With L2TPv2, the session always carried PPP
+traffic. With L2TPv3, the session can also carry ethernet frames
+(ethernet pseudowire) or other data types such as ATM, HDLC or Frame
+Relay.
+
+When a tunnel is first opened, the reference count on the socket is
+increased using sock_hold(). This ensures that the kernel socket
+cannot be removed while L2TP's data structures reference it.
+
+Some L2TP sessions also have a socket (PPP pseudowires) while others
+do not (ethernet pseudowires). We can't use the socket reference count
+as the reference count for session contexts. The L2TP implementation
+therefore has its own internal reference counts on the session
+contexts.
+
+To Do
+=====
+
+Add L2TP tunnel switching support. This would route tunneled traffic
+from one L2TP tunnel into another. Specified in
+http://tools.ietf.org/html/draft-ietf-l2tpext-tunnel-switching-08
+
+Add L2TPv3 VLAN pseudowire support.
+
+Add L2TPv3 IP pseudowire support.
+
+Add L2TPv3 ATM pseudowire support.
+
Miscellaneous
-============
+=============
-The PPPoL2TP driver was developed as part of the OpenL2TP project by
+The L2TP drivers were developed as part of the OpenL2TP project by
Katalix Systems Ltd. OpenL2TP is a full-featured L2TP client / server,
designed from the ground up to have the L2TP datapath in the
kernel. The project also implemented the pppol2tp plugin for pppd
which allows pppd to use the kernel driver. Details can be found at
-http://openl2tp.sourceforge.net.
+http://www.openl2tp.org.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 09ab0d290326..98f71a5cef00 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -100,7 +100,7 @@ by the kernel.
The destruction of the socket and all associated resources
is done by a simple call to close(fd).
-Next I will describe PACKET_MMAP settings and it's constraints,
+Next I will describe PACKET_MMAP settings and its constraints,
also the mapping of the circular buffer in the user process and
the use of this buffer.
@@ -432,7 +432,7 @@ TP_STATUS_LOSING : indicates there were packet drops from last time
the PACKET_STATISTICS option.
TP_STATUS_CSUMNOTREADY: currently it's used for outgoing IP packets which
- it's checksum will be done in hardware. So while
+ its checksum will be done in hardware. So while
reading the packet we should not try to check the
checksum.
diff --git a/Documentation/networking/x25-iface.txt b/Documentation/networking/x25-iface.txt
index 975cc87ebdd1..78f662ee0622 100644
--- a/Documentation/networking/x25-iface.txt
+++ b/Documentation/networking/x25-iface.txt
@@ -20,23 +20,23 @@ the rest of the skbuff, if any more information does exist.
Packet Layer to Device Driver
-----------------------------
-First Byte = 0x00
+First Byte = 0x00 (X25_IFACE_DATA)
This indicates that the rest of the skbuff contains data to be transmitted
over the LAPB link. The LAPB link should already exist before any data is
passed down.
-First Byte = 0x01
+First Byte = 0x01 (X25_IFACE_CONNECT)
Establish the LAPB link. If the link is already established then the connect
confirmation message should be returned as soon as possible.
-First Byte = 0x02
+First Byte = 0x02 (X25_IFACE_DISCONNECT)
Terminate the LAPB link. If it is already disconnected then the disconnect
confirmation message should be returned as soon as possible.
-First Byte = 0x03
+First Byte = 0x03 (X25_IFACE_PARAMS)
LAPB parameters. To be defined.
@@ -44,22 +44,22 @@ LAPB parameters. To be defined.
Device Driver to Packet Layer
-----------------------------
-First Byte = 0x00
+First Byte = 0x00 (X25_IFACE_DATA)
This indicates that the rest of the skbuff contains data that has been
received over the LAPB link.
-First Byte = 0x01
+First Byte = 0x01 (X25_IFACE_CONNECT)
LAPB link has been established. The same message is used for both a LAPB
link connect_confirmation and a connect_indication.
-First Byte = 0x02
+First Byte = 0x02 (X25_IFACE_DISCONNECT)
LAPB link has been terminated. This same message is used for both a LAPB
link disconnect_confirmation and a disconnect_indication.
-First Byte = 0x03
+First Byte = 0x03 (X25_IFACE_PARAMS)
LAPB parameters. To be defined.
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index 446f43b309df..61bc4e943116 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -1,4 +1,17 @@
This file details changes in 2.6 which affect PCMCIA card driver authors:
+* No dev_node_t (as of 2.6.35)
+ There is no more need to fill out a "dev_node_t" structure.
+
+* New IRQ request rules (as of 2.6.35)
+ Instead of the old pcmcia_request_irq() interface, drivers may now
+ choose between:
+ - calling request_irq/free_irq directly. Use the IRQ from *p_dev->irq.
+ - use pcmcia_request_irq(p_dev, handler_t); the PCMCIA core will
+ clean up automatically on calls to pcmcia_disable_device() or
+ device ejection.
+ - drivers still not capable of IRQF_SHARED (or not telling us so) may
+ use the deprecated pcmcia_request_exclusive_irq() for the time
+ being; they might receive a shared IRQ nonetheless.
* no cs_error / CS_CHECK / CONFIG_PCMCIA_DEBUG (as of 2.6.33)
Instead of the cs_error() callback or the CS_CHECK() macro, please use
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index c9abbd86bc18..57080cd74575 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -1,7 +1,13 @@
+Device Power Management
+
+Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+Copyright (c) 2010 Alan Stern <stern@rowland.harvard.edu>
+
+
Most of the code in Linux is device drivers, so most of the Linux power
-management code is also driver-specific. Most drivers will do very little;
-others, especially for platforms with small batteries (like cell phones),
-will do a lot.
+management (PM) code is also driver-specific. Most drivers will do very
+little; others, especially for platforms with small batteries (like cell
+phones), will do a lot.
This writeup gives an overview of how drivers interact with system-wide
power management goals, emphasizing the models and interfaces that are
@@ -15,9 +21,10 @@ Drivers will use one or both of these models to put devices into low-power
states:
System Sleep model:
- Drivers can enter low power states as part of entering system-wide
- low-power states like "suspend-to-ram", or (mostly for systems with
- disks) "hibernate" (suspend-to-disk).
+ Drivers can enter low-power states as part of entering system-wide
+ low-power states like "suspend" (also known as "suspend-to-RAM"), or
+ (mostly for systems with disks) "hibernation" (also known as
+ "suspend-to-disk").
This is something that device, bus, and class drivers collaborate on
by implementing various role-specific suspend and resume methods to
@@ -25,33 +32,41 @@ states:
them without loss of data.
Some drivers can manage hardware wakeup events, which make the system
- leave that low-power state. This feature may be disabled using the
- relevant /sys/devices/.../power/wakeup file; enabling it may cost some
- power usage, but let the whole system enter low power states more often.
+ leave the low-power state. This feature may be enabled or disabled
+ using the relevant /sys/devices/.../power/wakeup file (for Ethernet
+ drivers the ioctl interface used by ethtool may also be used for this
+ purpose); enabling it may cost some power usage, but let the whole
+ system enter low-power states more often.
Runtime Power Management model:
- Drivers may also enter low power states while the system is running,
- independently of other power management activity. Upstream drivers
- will normally not know (or care) if the device is in some low power
- state when issuing requests; the driver will auto-resume anything
- that's needed when it gets a request.
-
- This doesn't have, or need much infrastructure; it's just something you
- should do when writing your drivers. For example, clk_disable() unused
- clocks as part of minimizing power drain for currently-unused hardware.
- Of course, sometimes clusters of drivers will collaborate with each
- other, which could involve task-specific power management.
-
-There's not a lot to be said about those low power states except that they
-are very system-specific, and often device-specific. Also, that if enough
-drivers put themselves into low power states (at "runtime"), the effect may be
-the same as entering some system-wide low-power state (system sleep) ... and
-that synergies exist, so that several drivers using runtime pm might put the
-system into a state where even deeper power saving options are available.
-
-Most suspended devices will have quiesced all I/O: no more DMA or irqs, no
-more data read or written, and requests from upstream drivers are no longer
-accepted. A given bus or platform may have different requirements though.
+ Devices may also be put into low-power states while the system is
+ running, independently of other power management activity in principle.
+ However, devices are not generally independent of each other (for
+ example, a parent device cannot be suspended unless all of its child
+ devices have been suspended). Moreover, depending on the bus type the
+ device is on, it may be necessary to carry out some bus-specific
+ operations on the device for this purpose. Devices put into low power
+ states at run time may require special handling during system-wide power
+ transitions (suspend or hibernation).
+
+ For these reasons not only the device driver itself, but also the
+ appropriate subsystem (bus type, device type or device class) driver and
+ the PM core are involved in runtime power management. As in the system
+ sleep power management case, they need to collaborate by implementing
+ various role-specific suspend and resume methods, so that the hardware
+ is cleanly powered down and reactivated without data or service loss.
+
+There's not a lot to be said about those low-power states except that they are
+very system-specific, and often device-specific. Also, that if enough devices
+have been put into low-power states (at runtime), the effect may be very similar
+to entering some system-wide low-power state (system sleep) ... and that
+synergies exist, so that several drivers using runtime PM might put the system
+into a state where even deeper power saving options are available.
+
+Most suspended devices will have quiesced all I/O: no more DMA or IRQs (except
+for wakeup events), no more data read or written, and requests from upstream
+drivers are no longer accepted. A given bus or platform may have different
+requirements though.
Examples of hardware wakeup events include an alarm from a real time clock,
network wake-on-LAN packets, keyboard or mouse activity, and media insertion
@@ -60,129 +75,152 @@ or removal (for PCMCIA, MMC/SD, USB, and so on).
Interfaces for Entering System Sleep States
===========================================
-Most of the programming interfaces a device driver needs to know about
-relate to that first model: entering a system-wide low power state,
-rather than just minimizing power consumption by one device.
-
-
-Bus Driver Methods
-------------------
-The core methods to suspend and resume devices reside in struct bus_type.
-These are mostly of interest to people writing infrastructure for busses
-like PCI or USB, or because they define the primitives that device drivers
-may need to apply in domain-specific ways to their devices:
-
-struct bus_type {
- ...
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
+There are programming interfaces provided for subsystems (bus type, device type,
+device class) and device drivers to allow them to participate in the power
+management of devices they are concerned with. These interfaces cover both
+system sleep and runtime power management.
+
+
+Device Power Management Operations
+----------------------------------
+Device power management operations, at the subsystem level as well as at the
+device driver level, are implemented by defining and populating objects of type
+struct dev_pm_ops:
+
+struct dev_pm_ops {
+ int (*prepare)(struct device *dev);
+ void (*complete)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ int (*poweroff)(struct device *dev);
+ int (*restore)(struct device *dev);
+ int (*suspend_noirq)(struct device *dev);
+ int (*resume_noirq)(struct device *dev);
+ int (*freeze_noirq)(struct device *dev);
+ int (*thaw_noirq)(struct device *dev);
+ int (*poweroff_noirq)(struct device *dev);
+ int (*restore_noirq)(struct device *dev);
+ int (*runtime_suspend)(struct device *dev);
+ int (*runtime_resume)(struct device *dev);
+ int (*runtime_idle)(struct device *dev);
};
-Bus drivers implement those methods as appropriate for the hardware and
-the drivers using it; PCI works differently from USB, and so on. Not many
-people write bus drivers; most driver code is a "device driver" that
-builds on top of bus-specific framework code.
+This structure is defined in include/linux/pm.h and the methods included in it
+are also described in that file. Their roles will be explained in what follows.
+For now, it should be sufficient to remember that the last three methods are
+specific to runtime power management while the remaining ones are used during
+system-wide power transitions.
-For more information on these driver calls, see the description later;
-they are called in phases for every device, respecting the parent-child
-sequencing in the driver model tree. Note that as this is being written,
-only the suspend() and resume() are widely available; not many bus drivers
-leverage all of those phases, or pass them down to lower driver levels.
+There also is a deprecated "old" or "legacy" interface for power management
+operations available at least for some subsystems. This approach does not use
+struct dev_pm_ops objects and it is suitable only for implementing system sleep
+power management methods. Therefore it is not described in this document, so
+please refer directly to the source code for more information about it.
-/sys/devices/.../power/wakeup files
------------------------------------
-All devices in the driver model have two flags to control handling of
-wakeup events, which are hardware signals that can force the device and/or
-system out of a low power state. These are initialized by bus or device
-driver code using device_init_wakeup(dev,can_wakeup).
+Subsystem-Level Methods
+-----------------------
+The core methods to suspend and resume devices reside in struct dev_pm_ops
+pointed to by the pm member of struct bus_type, struct device_type and
+struct class. They are mostly of interest to the people writing infrastructure
+for buses, like PCI or USB, or device type and device class drivers.
-The "can_wakeup" flag just records whether the device (and its driver) can
-physically support wakeup events. When that flag is clear, the sysfs
-"wakeup" file is empty, and device_may_wakeup() returns false.
+Bus drivers implement these methods as appropriate for the hardware and the
+drivers using it; PCI works differently from USB, and so on. Not many people
+write subsystem-level drivers; most driver code is a "device driver" that builds
+on top of bus-specific framework code.
-For devices that can issue wakeup events, a separate flag controls whether
-that device should try to use its wakeup mechanism. The initial value of
-device_may_wakeup() will be true, so that the device's "wakeup" file holds
-the value "enabled". Userspace can change that to "disabled" so that
-device_may_wakeup() returns false; or change it back to "enabled" (so that
-it returns true again).
+For more information on these driver calls, see the description later;
+they are called in phases for every device, respecting the parent-child
+sequencing in the driver model tree.
-EXAMPLE: PCI Device Driver Methods
+/sys/devices/.../power/wakeup files
-----------------------------------
-PCI framework software calls these methods when the PCI device driver bound
-to a device device has provided them:
-
-struct pci_driver {
- ...
- int (*suspend)(struct pci_device *pdev, pm_message_t state);
- int (*suspend_late)(struct pci_device *pdev, pm_message_t state);
+All devices in the driver model have two flags to control handling of wakeup
+events (hardware signals that can force the device and/or system out of a low
+power state). These flags are initialized by bus or device driver code using
+device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
+include/linux/pm_wakeup.h.
- int (*resume_early)(struct pci_device *pdev);
- int (*resume)(struct pci_device *pdev);
-};
-
-Drivers will implement those methods, and call PCI-specific procedures
-like pci_set_power_state(), pci_enable_wake(), pci_save_state(), and
-pci_restore_state() to manage PCI-specific mechanisms. (PCI config space
-could be saved during driver probe, if it weren't for the fact that some
-systems rely on userspace tweaking using setpci.) Devices are suspended
-before their bridges enter low power states, and likewise bridges resume
-before their devices.
-
-
-Upper Layers of Driver Stacks
------------------------------
-Device drivers generally have at least two interfaces, and the methods
-sketched above are the ones which apply to the lower level (nearer PCI, USB,
-or other bus hardware). The network and block layers are examples of upper
-level interfaces, as is a character device talking to userspace.
-
-Power management requests normally need to flow through those upper levels,
-which often use domain-oriented requests like "blank that screen". In
-some cases those upper levels will have power management intelligence that
-relates to end-user activity, or other devices that work in cooperation.
-
-When those interfaces are structured using class interfaces, there is a
-standard way to have the upper layer stop issuing requests to a given
-class device (and restart later):
-
-struct class {
- ...
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
-};
-
-Those calls are issued in specific phases of the process by which the
-system enters a low power "suspend" state, or resumes from it.
-
-
-Calling Drivers to Enter System Sleep States
-============================================
-When the system enters a low power state, each device's driver is asked
-to suspend the device by putting it into state compatible with the target
+The "can_wakeup" flag just records whether the device (and its driver) can
+physically support wakeup events. The device_set_wakeup_capable() routine
+affects this flag. The "should_wakeup" flag controls whether the device should
+try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag;
+for the most part drivers should not change its value. The initial value of
+should_wakeup is supposed to be false for the majority of devices; the major
+exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
+(wake-on-LAN) feature has been set up with ethtool.
+
+Whether or not a device is capable of issuing wakeup events is a hardware
+matter, and the kernel is responsible for keeping track of it. By contrast,
+whether or not a wakeup-capable device should issue wakeup events is a policy
+decision, and it is managed by user space through a sysfs attribute: the
+power/wakeup file. User space can write the strings "enabled" or "disabled" to
+set or clear the should_wakeup flag, respectively. Reads from the file will
+return the corresponding string if can_wakeup is true, but if can_wakeup is
+false then reads will return an empty string, to indicate that the device
+doesn't support wakeup events. (But even though the file appears empty, writes
+will still affect the should_wakeup flag.)
+
+The device_may_wakeup() routine returns true only if both flags are set.
+Drivers should check this routine when putting devices in a low-power state
+during a system sleep transition, to see whether or not to enable the devices'
+wakeup mechanisms. However for runtime power management, wakeup events should
+be enabled whenever the device and driver both support them, regardless of the
+should_wakeup flag.
+
+
+/sys/devices/.../power/control files
+------------------------------------
+Each device in the driver model has a flag to control whether it is subject to
+runtime power management. This flag, called runtime_auto, is initialized by the
+bus type (or generally subsystem) code using pm_runtime_allow() or
+pm_runtime_forbid(); the default is to allow runtime power management.
+
+The setting can be adjusted by user space by writing either "on" or "auto" to
+the device's power/control sysfs file. Writing "auto" calls pm_runtime_allow(),
+setting the flag and allowing the device to be runtime power-managed by its
+driver. Writing "on" calls pm_runtime_forbid(), clearing the flag, returning
+the device to full power if it was in a low-power state, and preventing the
+device from being runtime power-managed. User space can check the current value
+of the runtime_auto flag by reading the file.
+
+The device's runtime_auto flag has no effect on the handling of system-wide
+power transitions. In particular, the device can (and in the majority of cases
+should and will) be put into a low-power state during a system-wide transition
+to a sleep state even though its runtime_auto flag is clear.
+
+For more information about the runtime power management framework, refer to
+Documentation/power/runtime_pm.txt.
+
+
+Calling Drivers to Enter and Leave System Sleep States
+======================================================
+When the system goes into a sleep state, each device's driver is asked to
+suspend the device by putting it into a state compatible with the target
system state. That's usually some version of "off", but the details are
system-specific. Also, wakeup-enabled devices will usually stay partly
functional in order to wake the system.
-When the system leaves that low power state, the device's driver is asked
-to resume it. The suspend and resume operations always go together, and
-both are multi-phase operations.
+When the system leaves that low-power state, the device's driver is asked to
+resume it by returning it to full power. The suspend and resume operations
+always go together, and both are multi-phase operations.
-For simple drivers, suspend might quiesce the device using the class code
-and then turn its hardware as "off" as possible with late_suspend. The
+For simple drivers, suspend might quiesce the device using class code
+and then turn its hardware as "off" as possible during suspend_noirq. The
matching resume calls would then completely reinitialize the hardware
before reactivating its class I/O queues.
-More power-aware drivers drivers will use more than one device low power
-state, either at runtime or during system sleep states, and might trigger
-system wakeup events.
+More power-aware drivers might prepare the devices for triggering system wakeup
+events.
Call Sequence Guarantees
------------------------
-To ensure that bridges and similar links needed to talk to a device are
+To ensure that bridges and similar links needing to talk to a device are
available when the device is suspended or resumed, the device tree is
walked in a bottom-up order to suspend devices. A top-down order is
used to resume those devices.
@@ -194,67 +232,310 @@ its parent; and can't be removed or suspended after that parent.
The policy is that the device tree should match hardware bus topology.
(Or at least the control bus, for devices which use multiple busses.)
In particular, this means that a device registration may fail if the parent of
-the device is suspending (ie. has been chosen by the PM core as the next
+the device is suspending (i.e. has been chosen by the PM core as the next
device to suspend) or has already suspended, as well as after all of the other
devices have been suspended. Device drivers must be prepared to cope with such
situations.
-Suspending Devices
-------------------
-Suspending a given device is done in several phases. Suspending the
-system always includes every phase, executing calls for every device
-before the next phase begins. Not all busses or classes support all
-these callbacks; and not all drivers use all the callbacks.
+System Power Management Phases
+------------------------------
+Suspending or resuming the system is done in several phases. Different phases
+are used for standby or memory sleep states ("suspend-to-RAM") and the
+hibernation state ("suspend-to-disk"). Each phase involves executing callbacks
+for every device before the next phase begins. Not all busses or classes
+support all these callbacks and not all drivers use all the callbacks. The
+various phases always run after tasks have been frozen and before they are
+unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
+been disabled (except for those marked with the IRQ_WAKEUP flag).
-The phases are seen by driver notifications issued in this order:
+Most phases use bus, type, and class callbacks (that is, methods defined in
+dev->bus->pm, dev->type->pm, and dev->class->pm). The prepare and complete
+phases are exceptions; they use only bus callbacks. When multiple callbacks
+are used in a phase, they are invoked in the order: <class, type, bus> during
+power-down transitions and in the opposite order during power-up transitions.
+For example, during the suspend phase the PM core invokes
- 1 class.suspend(dev, message) is called after tasks are frozen, for
- devices associated with a class that has such a method. This
- method may sleep.
+ dev->class->pm.suspend(dev);
+ dev->type->pm.suspend(dev);
+ dev->bus->pm.suspend(dev);
- Since I/O activity usually comes from such higher layers, this is
- a good place to quiesce all drivers of a given type (and keep such
- code out of those drivers).
+before moving on to the next device, whereas during the resume phase the core
+invokes
- 2 bus.suspend(dev, message) is called next. This method may sleep,
- and is often morphed into a device driver call with bus-specific
- parameters and/or rules.
+ dev->bus->pm.resume(dev);
+ dev->type->pm.resume(dev);
+ dev->class->pm.resume(dev);
- This call should handle parts of device suspend logic that require
- sleeping. It probably does work to quiesce the device which hasn't
- been abstracted into class.suspend().
+These callbacks may in turn invoke device- or driver-specific methods stored in
+dev->driver->pm, but they don't have to.
-The pm_message_t parameter is currently used to refine those semantics
-(described later).
-At the end of those phases, drivers should normally have stopped all I/O
-transactions (DMA, IRQs), saved enough state that they can re-initialize
-or restore previous state (as needed by the hardware), and placed the
-device into a low-power state. On many platforms they will also use
-clk_disable() to gate off one or more clock sources; sometimes they will
-also switch off power supplies, or reduce voltages. Drivers which have
-runtime PM support may already have performed some or all of the steps
-needed to prepare for the upcoming system sleep state.
+Entering System Suspend
+-----------------------
+When the system goes into the standby or memory sleep state, the phases are:
+
+ prepare, suspend, suspend_noirq.
+
+ 1. The prepare phase is meant to prevent races by preventing new devices
+ from being registered; the PM core would never know that all the
+ children of a device had been suspended if new children could be
+ registered at will. (By contrast, devices may be unregistered at any
+ time.) Unlike the other suspend-related phases, during the prepare
+ phase the device tree is traversed top-down.
+
+ The prepare phase uses only a bus callback. After the callback method
+ returns, no new children may be registered below the device. The method
+ may also prepare the device or driver in some way for the upcoming
+ system power transition, but it should not put the device into a
+ low-power state.
+
+ 2. The suspend methods should quiesce the device to stop it from performing
+ I/O. They also may save the device registers and put it into the
+ appropriate low-power state, depending on the bus type the device is on,
+ and they may enable wakeup events.
+
+ 3. The suspend_noirq phase occurs after IRQ handlers have been disabled,
+ which means that the driver's interrupt handler will not be called while
+ the callback method is running. The methods should save the values of
+ the device's registers that weren't saved previously and finally put the
+ device into the appropriate low-power state.
+
+ The majority of subsystems and device drivers need not implement this
+ callback. However, bus types allowing devices to share interrupt
+ vectors, like PCI, generally need it; otherwise a driver might encounter
+ an error during the suspend phase by fielding a shared interrupt
+ generated by some other device after its own device had been set to low
+ power.
+
+At the end of these phases, drivers should have stopped all I/O transactions
+(DMA, IRQs), saved enough state that they can re-initialize or restore previous
+state (as needed by the hardware), and placed the device into a low-power state.
+On many platforms they will gate off one or more clock sources; sometimes they
+will also switch off power supplies or reduce voltages. (Drivers supporting
+runtime PM may already have performed some or all of these steps.)
+
+If device_may_wakeup(dev) returns true, the device should be prepared for
+generating hardware wakeup signals to trigger a system wakeup event when the
+system is in the sleep state. For example, enable_irq_wake() might identify
+GPIO signals hooked up to a switch or other external hardware, and
+pci_enable_wake() does something similar for the PCI PME signal.
+
+If any of these callbacks returns an error, the system won't enter the desired
+low-power state. Instead the PM core will unwind its actions by resuming all
+the devices that were suspended.
+
+
+Leaving System Suspend
+----------------------
+When resuming from standby or memory sleep, the phases are:
+
+ resume_noirq, resume, complete.
+
+ 1. The resume_noirq callback methods should perform any actions needed
+ before the driver's interrupt handlers are invoked. This generally
+ means undoing the actions of the suspend_noirq phase. If the bus type
+ permits devices to share interrupt vectors, like PCI, the method should
+ bring the device and its driver into a state in which the driver can
+ recognize if the device is the source of incoming interrupts, if any,
+ and handle them correctly.
+
+ For example, the PCI bus type's ->pm.resume_noirq() puts the device into
+ the full-power state (D0 in the PCI terminology) and restores the
+ standard configuration registers of the device. Then it calls the
+ device driver's ->pm.resume_noirq() method to perform device-specific
+ actions.
+
+ 2. The resume methods should bring the the device back to its operating
+ state, so that it can perform normal I/O. This generally involves
+ undoing the actions of the suspend phase.
+
+ 3. The complete phase uses only a bus callback. The method should undo the
+ actions of the prepare phase. Note, however, that new children may be
+ registered below the device as soon as the resume callbacks occur; it's
+ not necessary to wait until the complete phase.
+
+At the end of these phases, drivers should be as functional as they were before
+suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are
+gated on. Even if the device was in a low-power state before the system sleep
+because of runtime power management, afterwards it should be back in its
+full-power state. There are multiple reasons why it's best to do this; they are
+discussed in more detail in Documentation/power/runtime_pm.txt.
-When any driver sees that its device_can_wakeup(dev), it should make sure
-to use the relevant hardware signals to trigger a system wakeup event.
-For example, enable_irq_wake() might identify GPIO signals hooked up to
-a switch or other external hardware, and pci_enable_wake() does something
-similar for PCI's PME# signal.
+However, the details here may again be platform-specific. For example,
+some systems support multiple "run" states, and the mode in effect at
+the end of resume might not be the one which preceded suspension.
+That means availability of certain clocks or power supplies changed,
+which could easily affect how a driver works.
+
+Drivers need to be able to handle hardware which has been reset since the
+suspend methods were called, for example by complete reinitialization.
+This may be the hardest part, and the one most protected by NDA'd documents
+and chip errata. It's simplest if the hardware state hasn't changed since
+the suspend was carried out, but that can't be guaranteed (in fact, it ususally
+is not the case).
+
+Drivers must also be prepared to notice that the device has been removed
+while the system was powered down, whenever that's physically possible.
+PCMCIA, MMC, USB, Firewire, SCSI, and even IDE are common examples of busses
+where common Linux platforms will see such removal. Details of how drivers
+will notice and handle such removals are currently bus-specific, and often
+involve a separate thread.
+
+These callbacks may return an error value, but the PM core will ignore such
+errors since there's nothing it can do about them other than printing them in
+the system log.
+
+
+Entering Hibernation
+--------------------
+Hibernating the system is more complicated than putting it into the standby or
+memory sleep state, because it involves creating and saving a system image.
+Therefore there are more phases for hibernation, with a different set of
+callbacks. These phases always run after tasks have been frozen and memory has
+been freed.
+
+The general procedure for hibernation is to quiesce all devices (freeze), create
+an image of the system memory while everything is stable, reactivate all
+devices (thaw), write the image to permanent storage, and finally shut down the
+system (poweroff). The phases used to accomplish this are:
+
+ prepare, freeze, freeze_noirq, thaw_noirq, thaw, complete,
+ prepare, poweroff, poweroff_noirq
+
+ 1. The prepare phase is discussed in the "Entering System Suspend" section
+ above.
+
+ 2. The freeze methods should quiesce the device so that it doesn't generate
+ IRQs or DMA, and they may need to save the values of device registers.
+ However the device does not have to be put in a low-power state, and to
+ save time it's best not to do so. Also, the device should not be
+ prepared to generate wakeup events.
+
+ 3. The freeze_noirq phase is analogous to the suspend_noirq phase discussed
+ above, except again that the device should not be put in a low-power
+ state and should not be allowed to generate wakeup events.
+
+At this point the system image is created. All devices should be inactive and
+the contents of memory should remain undisturbed while this happens, so that the
+image forms an atomic snapshot of the system state.
+
+ 4. The thaw_noirq phase is analogous to the resume_noirq phase discussed
+ above. The main difference is that its methods can assume the device is
+ in the same state as at the end of the freeze_noirq phase.
+
+ 5. The thaw phase is analogous to the resume phase discussed above. Its
+ methods should bring the device back to an operating state, so that it
+ can be used for saving the image if necessary.
+
+ 6. The complete phase is discussed in the "Leaving System Suspend" section
+ above.
+
+At this point the system image is saved, and the devices then need to be
+prepared for the upcoming system shutdown. This is much like suspending them
+before putting the system into the standby or memory sleep state, and the phases
+are similar.
+
+ 7. The prepare phase is discussed above.
+
+ 8. The poweroff phase is analogous to the suspend phase.
+
+ 9. The poweroff_noirq phase is analogous to the suspend_noirq phase.
+
+The poweroff and poweroff_noirq callbacks should do essentially the same things
+as the suspend and suspend_noirq callbacks. The only notable difference is that
+they need not store the device register values, because the registers should
+already have been stored during the freeze or freeze_noirq phases.
+
+
+Leaving Hibernation
+-------------------
+Resuming from hibernation is, again, more complicated than resuming from a sleep
+state in which the contents of main memory are preserved, because it requires
+a system image to be loaded into memory and the pre-hibernation memory contents
+to be restored before control can be passed back to the image kernel.
+
+Although in principle, the image might be loaded into memory and the
+pre-hibernation memory contents restored by the boot loader, in practice this
+can't be done because boot loaders aren't smart enough and there is no
+established protocol for passing the necessary information. So instead, the
+boot loader loads a fresh instance of the kernel, called the boot kernel, into
+memory and passes control to it in the usual way. Then the boot kernel reads
+the system image, restores the pre-hibernation memory contents, and passes
+control to the image kernel. Thus two different kernels are involved in
+resuming from hibernation. In fact, the boot kernel may be completely different
+from the image kernel: a different configuration and even a different version.
+This has important consequences for device drivers and their subsystems.
+
+To be able to load the system image into memory, the boot kernel needs to
+include at least a subset of device drivers allowing it to access the storage
+medium containing the image, although it doesn't need to include all of the
+drivers present in the image kernel. After the image has been loaded, the
+devices managed by the boot kernel need to be prepared for passing control back
+to the image kernel. This is very similar to the initial steps involved in
+creating a system image, and it is accomplished in the same way, using prepare,
+freeze, and freeze_noirq phases. However the devices affected by these phases
+are only those having drivers in the boot kernel; other devices will still be in
+whatever state the boot loader left them.
+
+Should the restoration of the pre-hibernation memory contents fail, the boot
+kernel would go through the "thawing" procedure described above, using the
+thaw_noirq, thaw, and complete phases, and then continue running normally. This
+happens only rarely. Most often the pre-hibernation memory contents are
+restored successfully and control is passed to the image kernel, which then
+becomes responsible for bringing the system back to the working state.
+
+To achieve this, the image kernel must restore the devices' pre-hibernation
+functionality. The operation is much like waking up from the memory sleep
+state, although it involves different phases:
+
+ restore_noirq, restore, complete
+
+ 1. The restore_noirq phase is analogous to the resume_noirq phase.
+
+ 2. The restore phase is analogous to the resume phase.
+
+ 3. The complete phase is discussed above.
+
+The main difference from resume[_noirq] is that restore[_noirq] must assume the
+device has been accessed and reconfigured by the boot loader or the boot kernel.
+Consequently the state of the device may be different from the state remembered
+from the freeze and freeze_noirq phases. The device may even need to be reset
+and completely re-initialized. In many cases this difference doesn't matter, so
+the resume[_noirq] and restore[_norq] method pointers can be set to the same
+routines. Nevertheless, different callback pointers are used in case there is a
+situation where it actually matters.
-If a driver (or bus, or class) fails it suspend method, the system won't
-enter the desired low power state; it will resume all the devices it's
-suspended so far.
-Note that drivers may need to perform different actions based on the target
-system lowpower/sleep state. At this writing, there are only platform
-specific APIs through which drivers could determine those target states.
+System Devices
+--------------
+System devices (sysdevs) follow a slightly different API, which can be found in
+
+ include/linux/sysdev.h
+ drivers/base/sys.c
+
+System devices will be suspended with interrupts disabled, and after all other
+devices have been suspended. On resume, they will be resumed before any other
+devices, and also with interrupts disabled. These things occur in special
+"sysdev_driver" phases, which affect only system devices.
+
+Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when
+the non-boot CPUs are all offline and IRQs are disabled on the remaining online
+CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a
+sleep state (or a system image is created). During resume (or after the image
+has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs
+are enabled on the only online CPU, the non-boot CPUs are enabled, and the
+resume_noirq (or thaw_noirq or restore_noirq) phase begins.
+
+Code to actually enter and exit the system-wide low power state sometimes
+involves hardware details that are only known to the boot firmware, and
+may leave a CPU running software (from SRAM or flash memory) that monitors
+the system and manages its wakeup sequence.
Device Low Power (suspend) States
---------------------------------
-Device low-power states aren't very standard. One device might only handle
+Device low-power states aren't standard. One device might only handle
"on" and "off, while another might support a dozen different versions of
"on" (how many engines are active?), plus a state that gets back to "on"
faster than from a full "off".
@@ -265,7 +546,7 @@ PCI device may not perform DMA or issue IRQs, and any wakeup events it
issues would be issued through the PME# bus signal. Plus, there are
several PCI-standard device states, some of which are optional.
-In contrast, integrated system-on-chip processors often use irqs as the
+In contrast, integrated system-on-chip processors often use IRQs as the
wakeup event sources (so drivers would call enable_irq_wake) and might
be able to treat DMA completion as a wakeup event (sometimes DMA can stay
active too, it'd only be the CPU and some peripherals that sleep).
@@ -284,120 +565,17 @@ ways; the aforementioned LCD might be active in one product's "standby",
but a different product using the same SOC might work differently.
-Meaning of pm_message_t.event
------------------------------
-Parameters to suspend calls include the device affected and a message of
-type pm_message_t, which has one field: the event. If driver does not
-recognize the event code, suspend calls may abort the request and return
-a negative errno. However, most drivers will be fine if they implement
-PM_EVENT_SUSPEND semantics for all messages.
+Power Management Notifiers
+--------------------------
+There are some operations that cannot be carried out by the power management
+callbacks discussed above, because the callbacks occur too late or too early.
+To handle these cases, subsystems and device drivers may register power
+management notifiers that are called before tasks are frozen and after they have
+been thawed. Generally speaking, the PM notifiers are suitable for performing
+actions that either require user space to be available, or at least won't
+interfere with user space.
-The event codes are used to refine the goal of suspending the device, and
-mostly matter when creating or resuming system memory image snapshots, as
-used with suspend-to-disk:
-
- PM_EVENT_SUSPEND -- quiesce the driver and put hardware into a low-power
- state. When used with system sleep states like "suspend-to-RAM" or
- "standby", the upcoming resume() call will often be able to rely on
- state kept in hardware, or issue system wakeup events.
-
- PM_EVENT_HIBERNATE -- Put hardware into a low-power state and enable wakeup
- events as appropriate. It is only used with hibernation
- (suspend-to-disk) and few devices are able to wake up the system from
- this state; most are completely powered off.
-
- PM_EVENT_FREEZE -- quiesce the driver, but don't necessarily change into
- any low power mode. A system snapshot is about to be taken, often
- followed by a call to the driver's resume() method. Neither wakeup
- events nor DMA are allowed.
-
- PM_EVENT_PRETHAW -- quiesce the driver, knowing that the upcoming resume()
- will restore a suspend-to-disk snapshot from a different kernel image.
- Drivers that are smart enough to look at their hardware state during
- resume() processing need that state to be correct ... a PRETHAW could
- be used to invalidate that state (by resetting the device), like a
- shutdown() invocation would before a kexec() or system halt. Other
- drivers might handle this the same way as PM_EVENT_FREEZE. Neither
- wakeup events nor DMA are allowed.
-
-To enter "standby" (ACPI S1) or "Suspend to RAM" (STR, ACPI S3) states, or
-the similarly named APM states, only PM_EVENT_SUSPEND is used; the other event
-codes are used for hibernation ("Suspend to Disk", STD, ACPI S4).
-
-There's also PM_EVENT_ON, a value which never appears as a suspend event
-but is sometimes used to record the "not suspended" device state.
-
-
-Resuming Devices
-----------------
-Resuming is done in multiple phases, much like suspending, with all
-devices processing each phase's calls before the next phase begins.
-
-The phases are seen by driver notifications issued in this order:
-
- 1 bus.resume(dev) reverses the effects of bus.suspend(). This may
- be morphed into a device driver call with bus-specific parameters;
- implementations may sleep.
-
- 2 class.resume(dev) is called for devices associated with a class
- that has such a method. Implementations may sleep.
-
- This reverses the effects of class.suspend(), and would usually
- reactivate the device's I/O queue.
-
-At the end of those phases, drivers should normally be as functional as
-they were before suspending: I/O can be performed using DMA and IRQs, and
-the relevant clocks are gated on. The device need not be "fully on"; it
-might be in a runtime lowpower/suspend state that acts as if it were.
-
-However, the details here may again be platform-specific. For example,
-some systems support multiple "run" states, and the mode in effect at
-the end of resume() might not be the one which preceded suspension.
-That means availability of certain clocks or power supplies changed,
-which could easily affect how a driver works.
-
-
-Drivers need to be able to handle hardware which has been reset since the
-suspend methods were called, for example by complete reinitialization.
-This may be the hardest part, and the one most protected by NDA'd documents
-and chip errata. It's simplest if the hardware state hasn't changed since
-the suspend() was called, but that can't always be guaranteed.
-
-Drivers must also be prepared to notice that the device has been removed
-while the system was powered off, whenever that's physically possible.
-PCMCIA, MMC, USB, Firewire, SCSI, and even IDE are common examples of busses
-where common Linux platforms will see such removal. Details of how drivers
-will notice and handle such removals are currently bus-specific, and often
-involve a separate thread.
-
-
-Note that the bus-specific runtime PM wakeup mechanism can exist, and might
-be defined to share some of the same driver code as for system wakeup. For
-example, a bus-specific device driver's resume() method might be used there,
-so it wouldn't only be called from bus.resume() during system-wide wakeup.
-See bus-specific information about how runtime wakeup events are handled.
-
-
-System Devices
---------------
-System devices follow a slightly different API, which can be found in
-
- include/linux/sysdev.h
- drivers/base/sys.c
-
-System devices will only be suspended with interrupts disabled, and after
-all other devices have been suspended. On resume, they will be resumed
-before any other devices, and also with interrupts disabled.
-
-That is, IRQs are disabled, the suspend_late() phase begins, then the
-sysdev_driver.suspend() phase, and the system enters a sleep state. Then
-the sysdev_driver.resume() phase begins, followed by the resume_early()
-phase, after which IRQs are enabled.
-
-Code to actually enter and exit the system-wide low power state sometimes
-involves hardware details that are only known to the boot firmware, and
-may leave a CPU running software (from SRAM or flash memory) that monitors
-the system and manages its wakeup sequence.
+For details refer to Documentation/power/notifiers.txt.
Runtime Power Management
@@ -407,82 +585,23 @@ running. This feature is useful for devices that are not being used, and
can offer significant power savings on a running system. These devices
often support a range of runtime power states, which might use names such
as "off", "sleep", "idle", "active", and so on. Those states will in some
-cases (like PCI) be partially constrained by a bus the device uses, and will
+cases (like PCI) be partially constrained by the bus the device uses, and will
usually include hardware states that are also used in system sleep states.
-However, note that if a driver puts a device into a runtime low power state
-and the system then goes into a system-wide sleep state, it normally ought
-to resume into that runtime low power state rather than "full on". Such
-distinctions would be part of the driver-internal state machine for that
-hardware; the whole point of runtime power management is to be sure that
-drivers are decoupled in that way from the state machine governing phases
-of the system-wide power/sleep state transitions.
-
-
-Power Saving Techniques
------------------------
-Normally runtime power management is handled by the drivers without specific
-userspace or kernel intervention, by device-aware use of techniques like:
-
- Using information provided by other system layers
- - stay deeply "off" except between open() and close()
- - if transceiver/PHY indicates "nobody connected", stay "off"
- - application protocols may include power commands or hints
-
- Using fewer CPU cycles
- - using DMA instead of PIO
- - removing timers, or making them lower frequency
- - shortening "hot" code paths
- - eliminating cache misses
- - (sometimes) offloading work to device firmware
-
- Reducing other resource costs
- - gating off unused clocks in software (or hardware)
- - switching off unused power supplies
- - eliminating (or delaying/merging) IRQs
- - tuning DMA to use word and/or burst modes
-
- Using device-specific low power states
- - using lower voltages
- - avoiding needless DMA transfers
-
-Read your hardware documentation carefully to see the opportunities that
-may be available. If you can, measure the actual power usage and check
-it against the budget established for your project.
-
-
-Examples: USB hosts, system timer, system CPU
-----------------------------------------------
-USB host controllers make interesting, if complex, examples. In many cases
-these have no work to do: no USB devices are connected, or all of them are
-in the USB "suspend" state. Linux host controller drivers can then disable
-periodic DMA transfers that would otherwise be a constant power drain on the
-memory subsystem, and enter a suspend state. In power-aware controllers,
-entering that suspend state may disable the clock used with USB signaling,
-saving a certain amount of power.
-
-The controller will be woken from that state (with an IRQ) by changes to the
-signal state on the data lines of a given port, for example by an existing
-peripheral requesting "remote wakeup" or by plugging a new peripheral. The
-same wakeup mechanism usually works from "standby" sleep states, and on some
-systems also from "suspend to RAM" (or even "suspend to disk") states.
-(Except that ACPI may be involved instead of normal IRQs, on some hardware.)
-
-System devices like timers and CPUs may have special roles in the platform
-power management scheme. For example, system timers using a "dynamic tick"
-approach don't just save CPU cycles (by eliminating needless timer IRQs),
-but they may also open the door to using lower power CPU "idle" states that
-cost more than a jiffie to enter and exit. On x86 systems these are states
-like "C3"; note that periodic DMA transfers from a USB host controller will
-also prevent entry to a C3 state, much like a periodic timer IRQ.
-
-That kind of runtime mechanism interaction is common. "System On Chip" (SOC)
-processors often have low power idle modes that can't be entered unless
-certain medium-speed clocks (often 12 or 48 MHz) are gated off. When the
-drivers gate those clocks effectively, then the system idle task may be able
-to use the lower power idle modes and thereby increase battery life.
-
-If the CPU can have a "cpufreq" driver, there also may be opportunities
-to shift to lower voltage settings and reduce the power cost of executing
-a given number of instructions. (Without voltage adjustment, it's rare
-for cpufreq to save much power; the cost-per-instruction must go down.)
+A system-wide power transition can be started while some devices are in low
+power states due to runtime power management. The system sleep PM callbacks
+should recognize such situations and react to them appropriately, but the
+necessary actions are subsystem-specific.
+
+In some cases the decision may be made at the subsystem level while in other
+cases the device driver may be left to decide. In some cases it may be
+desirable to leave a suspended device in that state during a system-wide power
+transition, but in other cases the device must be put back into the full-power
+state temporarily, for example so that its system wakeup capability can be
+disabled. This all depends on the hardware and the design of the subsystem and
+device driver in question.
+
+During system-wide resume from a sleep state it's best to put devices into the
+full-power state, as explained in Documentation/power/runtime_pm.txt. Refer to
+that document for more information regarding this particular issue as well as
+for information on the device runtime power management framework in general.
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt
index c40866e8b957..bfed898a03fc 100644
--- a/Documentation/power/pm_qos_interface.txt
+++ b/Documentation/power/pm_qos_interface.txt
@@ -18,44 +18,46 @@ and pm_qos_params.h. This is done because having the available parameters
being runtime configurable or changeable from a driver was seen as too easy to
abuse.
-For each parameter a list of performance requirements is maintained along with
+For each parameter a list of performance requests is maintained along with
an aggregated target value. The aggregated target value is updated with
-changes to the requirement list or elements of the list. Typically the
-aggregated target value is simply the max or min of the requirement values held
+changes to the request list or elements of the list. Typically the
+aggregated target value is simply the max or min of the request values held
in the parameter list elements.
From kernel mode the use of this interface is simple:
-pm_qos_add_requirement(param_id, name, target_value):
-Will insert a named element in the list for that identified PM_QOS parameter
-with the target value. Upon change to this list the new target is recomputed
-and any registered notifiers are called only if the target value is now
-different.
-pm_qos_update_requirement(param_id, name, new_target_value):
-Will search the list identified by the param_id for the named list element and
-then update its target value, calling the notification tree if the aggregated
-target is changed. with that name is already registered.
+handle = pm_qos_add_request(param_class, target_value):
+Will insert an element into the list for that identified PM_QOS class with the
+target value. Upon change to this list the new target is recomputed and any
+registered notifiers are called only if the target value is now different.
+Clients of pm_qos need to save the returned handle.
-pm_qos_remove_requirement(param_id, name):
-Will search the identified list for the named element and remove it, after
-removal it will update the aggregate target and call the notification tree if
-the target was changed as a result of removing the named requirement.
+void pm_qos_update_request(handle, new_target_value):
+Will update the list element pointed to by the handle with the new target value
+and recompute the new aggregated target, calling the notification tree if the
+target is changed.
+
+void pm_qos_remove_request(handle):
+Will remove the element. After removal it will update the aggregate target and
+call the notification tree if the target was changed as a result of removing
+the request.
From user mode:
-Only processes can register a pm_qos requirement. To provide for automatic
-cleanup for process the interface requires the process to register its
-parameter requirements in the following way:
+Only processes can register a pm_qos request. To provide for automatic
+cleanup of a process, the interface requires the process to register its
+parameter requests in the following way:
To register the default pm_qos target for the specific parameter, the process
must open one of /dev/[cpu_dma_latency, network_latency, network_throughput]
As long as the device node is held open that process has a registered
-requirement on the parameter. The name of the requirement is "process_<PID>"
-derived from the current->pid from within the open system call.
+request on the parameter.
-To change the requested target value the process needs to write a s32 value to
-the open device node. This translates to a pm_qos_update_requirement call.
+To change the requested target value the process needs to write an s32 value to
+the open device node. Alternatively the user mode program could write a hex
+string for the value using 10 char long format e.g. "0x12345678". This
+translates to a pm_qos_update_request call.
To remove the user mode request for a target value simply close the device
node.
diff --git a/Documentation/power/regulator/consumer.txt b/Documentation/power/regulator/consumer.txt
index cdebb5145c25..55c4175d8099 100644
--- a/Documentation/power/regulator/consumer.txt
+++ b/Documentation/power/regulator/consumer.txt
@@ -8,11 +8,11 @@ Please see overview.txt for a description of the terms used in this text.
1. Consumer Regulator Access (static & dynamic drivers)
=======================================================
-A consumer driver can get access to it's supply regulator by calling :-
+A consumer driver can get access to its supply regulator by calling :-
regulator = regulator_get(dev, "Vcc");
-The consumer passes in it's struct device pointer and power supply ID. The core
+The consumer passes in its struct device pointer and power supply ID. The core
then finds the correct regulator by consulting a machine specific lookup table.
If the lookup is successful then this call will return a pointer to the struct
regulator that supplies this consumer.
@@ -34,7 +34,7 @@ usually be called in your device drivers probe() and remove() respectively.
2. Regulator Output Enable & Disable (static & dynamic drivers)
====================================================================
-A consumer can enable it's power supply by calling:-
+A consumer can enable its power supply by calling:-
int regulator_enable(regulator);
@@ -49,7 +49,7 @@ int regulator_is_enabled(regulator);
This will return > zero when the regulator is enabled.
-A consumer can disable it's supply when no longer needed by calling :-
+A consumer can disable its supply when no longer needed by calling :-
int regulator_disable(regulator);
@@ -140,7 +140,7 @@ by calling :-
int regulator_set_optimum_mode(struct regulator *regulator, int load_uA);
This will cause the core to recalculate the total load on the regulator (based
-on all it's consumers) and change operating mode (if necessary and permitted)
+on all its consumers) and change operating mode (if necessary and permitted)
to best match the current operating load.
The load_uA value can be determined from the consumers datasheet. e.g.most
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index 63728fed620b..bdec39b9bd75 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -52,7 +52,7 @@ static struct regulator_init_data regulator1_data = {
};
Regulator-1 supplies power to Regulator-2. This relationship must be registered
-with the core so that Regulator-1 is also enabled when Consumer A enables it's
+with the core so that Regulator-1 is also enabled when Consumer A enables its
supply (Regulator-2). The supply regulator is set by the supply_regulator_dev
field below:-
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt
index ffd185bb6054..9363e056188a 100644
--- a/Documentation/power/regulator/overview.txt
+++ b/Documentation/power/regulator/overview.txt
@@ -35,16 +35,16 @@ Some terms used in this document:-
o Consumer - Electronic device that is supplied power by a regulator.
Consumers can be classified into two types:-
- Static: consumer does not change it's supply voltage or
+ Static: consumer does not change its supply voltage or
current limit. It only needs to enable or disable it's
- power supply. It's supply voltage is set by the hardware,
+ power supply. Its supply voltage is set by the hardware,
bootloader, firmware or kernel board initialisation code.
Dynamic: consumer needs to change it's supply voltage or
current limit to meet operation demands.
- o Power Domain - Electronic circuit that is supplied it's input power by the
+ o Power Domain - Electronic circuit that is supplied its input power by the
output power of a regulator, switch or by another power
domain.
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
index b967cd9137d6..81680f9f5909 100644
--- a/Documentation/power/userland-swsusp.txt
+++ b/Documentation/power/userland-swsusp.txt
@@ -24,6 +24,10 @@ assumed to be in the resume mode. The device cannot be open for simultaneous
reading and writing. It is also impossible to have the device open more than
once at a time.
+Even opening the device has side effects. Data structures are
+allocated, and PM_HIBERNATION_PREPARE / PM_RESTORE_PREPARE chains are
+called.
+
The ioctl() commands recognized by the device are:
SNAPSHOT_FREEZE - freeze user space processes (the current process is
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 79f533f38c61..46d22105aa07 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1289,7 +1289,7 @@ link between a device node and its interrupt parent in
the interrupt tree. The value of interrupt-parent is the
phandle of the parent node.
-If the interrupt-parent property is not defined for a node, it's
+If the interrupt-parent property is not defined for a node, its
interrupt parent is assumed to be an ancestor in the node's
_device tree_ hierarchy.
diff --git a/Documentation/powerpc/dts-bindings/4xx/reboot.txt b/Documentation/powerpc/dts-bindings/4xx/reboot.txt
new file mode 100644
index 000000000000..d7217260589c
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/reboot.txt
@@ -0,0 +1,18 @@
+Reboot property to control system reboot on PPC4xx systems:
+
+By setting "reset_type" to one of the following values, the default
+software reset mechanism may be overidden. Here the possible values of
+"reset_type":
+
+ 1 - PPC4xx core reset
+ 2 - PPC4xx chip reset
+ 3 - PPC4xx system reset (default)
+
+Example:
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,440SPe";
+ ...
+ reset-type = <2>; /* Use chip-reset */
+ };
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/powerpc/dts-bindings/xilinx.txt
index ea68046bb9cb..299d0923537b 100644
--- a/Documentation/powerpc/dts-bindings/xilinx.txt
+++ b/Documentation/powerpc/dts-bindings/xilinx.txt
@@ -11,7 +11,7 @@
control how the core is synthesized. Historically, the EDK tool would
extract the device parameters relevant to device drivers and copy them
into an 'xparameters.h' in the form of #define symbols. This tells the
- device drivers how the IP cores are configured, but it requres the kernel
+ device drivers how the IP cores are configured, but it requires the kernel
to be recompiled every time the FPGA bitstream is resynthesized.
The new approach is to export the parameters into the device tree and
diff --git a/Documentation/powerpc/phyp-assisted-dump.txt b/Documentation/powerpc/phyp-assisted-dump.txt
index c4682b982a2e..ad340205d96a 100644
--- a/Documentation/powerpc/phyp-assisted-dump.txt
+++ b/Documentation/powerpc/phyp-assisted-dump.txt
@@ -19,7 +19,7 @@ dump offers several strong, practical advantages:
immediately available to the system for normal use.
-- After the dump is completed, no further reboots are
required; the system will be fully usable, and running
- in it's normal, production mode on it normal kernel.
+ in its normal, production mode on its normal kernel.
The above can only be accomplished by coordination with,
and assistance from the hypervisor. The procedure is
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index aae8355d3166..221f38be98f4 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -190,3 +190,61 @@ Example:
for (node = rb_first(&mytree); node; node = rb_next(node))
printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring);
+Support for Augmented rbtrees
+-----------------------------
+
+Augmented rbtree is an rbtree with "some" additional data stored in each node.
+This data can be used to augment some new functionality to rbtree.
+Augmented rbtree is an optional feature built on top of basic rbtree
+infrastructure. rbtree user who wants this feature will have an augment
+callback function in rb_root initialized.
+
+This callback function will be called from rbtree core routines whenever
+a node has a change in one or both of its children. It is the responsibility
+of the callback function to recalculate the additional data that is in the
+rb node using new children information. Note that if this new additional
+data affects the parent node's additional data, then callback function has
+to handle it and do the recursive updates.
+
+
+Interval tree is an example of augmented rb tree. Reference -
+"Introduction to Algorithms" by Cormen, Leiserson, Rivest and Stein.
+More details about interval trees:
+
+Classical rbtree has a single key and it cannot be directly used to store
+interval ranges like [lo:hi] and do a quick lookup for any overlap with a new
+lo:hi or to find whether there is an exact match for a new lo:hi.
+
+However, rbtree can be augmented to store such interval ranges in a structured
+way making it possible to do efficient lookup and exact match.
+
+This "extra information" stored in each node is the maximum hi
+(max_hi) value among all the nodes that are its descendents. This
+information can be maintained at each node just be looking at the node
+and its immediate children. And this will be used in O(log n) lookup
+for lowest match (lowest start address among all possible matches)
+with something like:
+
+find_lowest_match(lo, hi, node)
+{
+ lowest_match = NULL;
+ while (node) {
+ if (max_hi(node->left) > lo) {
+ // Lowest overlap if any must be on left side
+ node = node->left;
+ } else if (overlap(lo, hi, node)) {
+ lowest_match = node;
+ break;
+ } else if (lo > node->lo) {
+ // Lowest overlap if any must be on right side
+ node = node->right;
+ } else {
+ break;
+ }
+ }
+ return lowest_match;
+}
+
+Finding exact match will be to first find lowest match and then to follow
+successor nodes looking for exact match, until the start of a node is beyond
+the hi value we are looking for.
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index b4860509c319..83668e5dd17f 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -99,37 +99,15 @@ system. Also, it is possible to switch all rfkill drivers (or all drivers of
a specified type) into a state which also updates the default state for
hotplugged devices.
-After an application opens /dev/rfkill, it can read the current state of
-all devices, and afterwards can poll the descriptor for hotplug or state
-change events.
-
-Applications must ignore operations (the "op" field) they do not handle,
-this allows the API to be extended in the future.
-
-Additionally, each rfkill device is registered in sysfs and there has the
-following attributes:
-
- name: Name assigned by driver to this key (interface or driver name).
- type: Driver type string ("wlan", "bluetooth", etc).
- persistent: Whether the soft blocked state is initialised from
- non-volatile storage at startup.
- state: Current state of the transmitter
- 0: RFKILL_STATE_SOFT_BLOCKED
- transmitter is turned off by software
- 1: RFKILL_STATE_UNBLOCKED
- transmitter is (potentially) active
- 2: RFKILL_STATE_HARD_BLOCKED
- transmitter is forced off by something outside of
- the driver's control.
- This file is deprecated because it can only properly show
- three of the four possible states, soft-and-hard-blocked is
- missing.
- claim: 0: Kernel handles events
- This file is deprecated because there no longer is a way to
- claim just control over a single rfkill instance.
-
-rfkill devices also issue uevents (with an action of "change"), with the
-following environment variables set:
+After an application opens /dev/rfkill, it can read the current state of all
+devices. Changes can be either obtained by either polling the descriptor for
+hotplug or state change events or by listening for uevents emitted by the
+rfkill core framework.
+
+Additionally, each rfkill device is registered in sysfs and emits uevents.
+
+rfkill devices issue uevents (with an action of "change"), with the following
+environment variables set:
RFKILL_NAME
RFKILL_STATE
@@ -137,3 +115,7 @@ RFKILL_TYPE
The contents of these variables corresponds to the "name", "state" and
"type" sysfs files explained above.
+
+
+For further details consult Documentation/ABI/stable/dev-rfkill and
+Documentation/ABI/stable/sysfs-class-rfkill.
diff --git a/Documentation/rt-mutex-design.txt b/Documentation/rt-mutex-design.txt
index 4b736d24da7a..8df0b782c4d7 100644
--- a/Documentation/rt-mutex-design.txt
+++ b/Documentation/rt-mutex-design.txt
@@ -657,7 +657,7 @@ here.
The waiter structure has a "task" field that points to the task that is blocked
on the mutex. This field can be NULL the first time it goes through the loop
-or if the task is a pending owner and had it's mutex stolen. If the "task"
+or if the task is a pending owner and had its mutex stolen. If the "task"
field is NULL then we need to set up the accounting for it.
Task blocks on mutex
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 6f33593e59e2..8239ebbcddce 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -211,7 +211,7 @@ provide fair CPU time to each such task group. For example, it may be
desirable to first provide fair CPU time to each user on the system and then to
each task belonging to a user.
-CONFIG_GROUP_SCHED strives to achieve exactly that. It lets tasks to be
+CONFIG_CGROUP_SCHED strives to achieve exactly that. It lets tasks to be
grouped and divides CPU time fairly among such groups.
CONFIG_RT_GROUP_SCHED permits to group real-time (i.e., SCHED_FIFO and
@@ -220,38 +220,11 @@ SCHED_RR) tasks.
CONFIG_FAIR_GROUP_SCHED permits to group CFS (i.e., SCHED_NORMAL and
SCHED_BATCH) tasks.
-At present, there are two (mutually exclusive) mechanisms to group tasks for
-CPU bandwidth control purposes:
-
- - Based on user id (CONFIG_USER_SCHED)
-
- With this option, tasks are grouped according to their user id.
-
- - Based on "cgroup" pseudo filesystem (CONFIG_CGROUP_SCHED)
-
- This options needs CONFIG_CGROUPS to be defined, and lets the administrator
+ These options need CONFIG_CGROUPS to be defined, and let the administrator
create arbitrary groups of tasks, using the "cgroup" pseudo filesystem. See
Documentation/cgroups/cgroups.txt for more information about this filesystem.
-Only one of these options to group tasks can be chosen and not both.
-
-When CONFIG_USER_SCHED is defined, a directory is created in sysfs for each new
-user and a "cpu_share" file is added in that directory.
-
- # cd /sys/kernel/uids
- # cat 512/cpu_share # Display user 512's CPU share
- 1024
- # echo 2048 > 512/cpu_share # Modify user 512's CPU share
- # cat 512/cpu_share # Display user 512's CPU share
- 2048
- #
-
-CPU bandwidth between two users is divided in the ratio of their CPU shares.
-For example: if you would like user "root" to get twice the bandwidth of user
-"guest," then set the cpu_share for both the users such that "root"'s cpu_share
-is twice "guest"'s cpu_share.
-
-When CONFIG_CGROUP_SCHED is defined, a "cpu.shares" file is created for each
+When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
group created using the pseudo filesystem. See example steps below to create
task groups and modify their CPU share using the "cgroups" pseudo filesystem.
@@ -273,24 +246,3 @@ task groups and modify their CPU share using the "cgroups" pseudo filesystem.
# #Launch gmplayer (or your favourite movie player)
# echo <movie_player_pid> > multimedia/tasks
-
-8. Implementation note: user namespaces
-
-User namespaces are intended to be hierarchical. But they are currently
-only partially implemented. Each of those has ramifications for CFS.
-
-First, since user namespaces are hierarchical, the /sys/kernel/uids
-presentation is inadequate. Eventually we will likely want to use sysfs
-tagging to provide private views of /sys/kernel/uids within each user
-namespace.
-
-Second, the hierarchical nature is intended to support completely
-unprivileged use of user namespaces. So if using user groups, then
-we want the users in a user namespace to be children of the user
-who created it.
-
-That is currently unimplemented. So instead, every user in a new
-user namespace will receive 1024 shares just like any user in the
-initial user namespace. Note that at the moment creation of a new
-user namespace requires each of CAP_SYS_ADMIN, CAP_SETUID, and
-CAP_SETGID.
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 86eabe6c3419..605b0d40329d 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -126,23 +126,12 @@ priority!
2.3 Basis for grouping tasks
----------------------------
-There are two compile-time settings for allocating CPU bandwidth. These are
-configured using the "Basis for grouping tasks" multiple choice menu under
-General setup > Group CPU Scheduler:
-
-a. CONFIG_USER_SCHED (aka "Basis for grouping tasks" = "user id")
-
-This lets you use the virtual files under
-"/sys/kernel/uids/<uid>/cpu_rt_runtime_us" to control he CPU time reserved for
-each user .
-
-The other option is:
-
-.o CONFIG_CGROUP_SCHED (aka "Basis for grouping tasks" = "Control groups")
+Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
+CPU bandwidth to task groups.
This uses the /cgroup virtual file system and
"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each
-control group instead.
+control group.
For more information on working with control groups, you should read
Documentation/cgroups/cgroups.txt as well.
@@ -161,8 +150,7 @@ For now, this can be simplified to just the following (but see Future plans):
===============
There is work in progress to make the scheduling period for each group
-("/sys/kernel/uids/<uid>/cpu_rt_period_us" or
-"/cgroup/<cgroup>/cpu.rt_period_us" respectively) configurable as well.
+("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well.
The constraint on the period is that a subgroup must have a smaller or
equal period to its parent. But realistically its not very useful _yet_
diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc
index 2ffc1148eb95..e759e92e286d 100644
--- a/Documentation/scsi/ChangeLog.lpfc
+++ b/Documentation/scsi/ChangeLog.lpfc
@@ -707,7 +707,7 @@ Changes from 20040920 to 20041018
* Integrate patches from Christoph Hellwig: two new helpers common
to lpfc_sli_resume_iocb and lpfc_sli_issue_iocb - singificant
cleanup of those two functions - the unused SLI_IOCB_USE_TXQ is
- gone - lpfc_sli_issue_iocb_wait loses it's flags argument
+ gone - lpfc_sli_issue_iocb_wait loses its flags argument
totally.
* Fix in lpfc_sli.c: we can not store a 5 bit value in a 4-bit
field.
@@ -1028,7 +1028,7 @@ Changes from 20040614 to 20040709
* Remove the need for buf_tmo.
* Changed ULP_BDE64 to struct ulp_bde64.
* Changed ULP_BDE to struct ulp_bde.
- * Cleanup lpfc_os_return_scsi_cmd() and it's call path.
+ * Cleanup lpfc_os_return_scsi_cmd() and its call path.
* Removed lpfc_no_device_delay.
* Consolidating lpfc_hba_put_event() into lpfc_put_event().
* Removed following attributes and their functionality:
diff --git a/Documentation/scsi/FlashPoint.txt b/Documentation/scsi/FlashPoint.txt
index d5acaa300a46..1540a92f6d2b 100644
--- a/Documentation/scsi/FlashPoint.txt
+++ b/Documentation/scsi/FlashPoint.txt
@@ -71,7 +71,7 @@ peters@mylex.com
Ever since its introduction last October, the BusLogic FlashPoint LT has
been problematic for members of the Linux community, in that no Linux
-drivers have been available for this new Ultra SCSI product. Despite it's
+drivers have been available for this new Ultra SCSI product. Despite its
officially being positioned as a desktop workstation product, and not being
particularly well suited for a high performance multitasking operating
system like Linux, the FlashPoint LT has been touted by computer system
diff --git a/Documentation/scsi/dtc3x80.txt b/Documentation/scsi/dtc3x80.txt
index e8ae6230ab3e..1d7af9f9a8ed 100644
--- a/Documentation/scsi/dtc3x80.txt
+++ b/Documentation/scsi/dtc3x80.txt
@@ -12,7 +12,7 @@ The 3180 does not. Otherwise, they are identical.
The DTC3x80 does not support DMA but it does have Pseudo-DMA which is
supported by the driver.
-It's DTC406 scsi chip is supposedly compatible with the NCR 53C400.
+Its DTC406 scsi chip is supposedly compatible with the NCR 53C400.
It is memory mapped, uses an IRQ, but no dma or io-port. There is
internal DMA, between SCSI bus and an on-chip 128-byte buffer. Double
buffering is done automagically by the chip. Data is transferred
diff --git a/Documentation/scsi/ncr53c8xx.txt b/Documentation/scsi/ncr53c8xx.txt
index 08e2b4d04aab..cda5f8fa2c66 100644
--- a/Documentation/scsi/ncr53c8xx.txt
+++ b/Documentation/scsi/ncr53c8xx.txt
@@ -1479,7 +1479,7 @@ Wide16 SCSI.
Enabling serial NVRAM support enables detection of the serial NVRAM included
on Symbios and some Symbios compatible host adaptors, and Tekram boards. The
serial NVRAM is used by Symbios and Tekram to hold set up parameters for the
-host adaptor and it's attached drives.
+host adaptor and its attached drives.
The Symbios NVRAM also holds data on the boot order of host adaptors in a
system with more than one host adaptor. This enables the order of scanning
diff --git a/Documentation/scsi/osst.txt b/Documentation/scsi/osst.txt
index f536907e241d..2b21890bc983 100644
--- a/Documentation/scsi/osst.txt
+++ b/Documentation/scsi/osst.txt
@@ -40,7 +40,7 @@ behavior looks very much the same as st to the userspace applications.
History
-------
-In the first place, osst shared it's identity very much with st. That meant
+In the first place, osst shared its identity very much with st. That meant
that it used the same kernel structures and the same device node as st.
So you could only have either of them being present in the kernel. This has
been fixed by registering an own device, now.
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
index aec6549ab097..e00192de4d1c 100644
--- a/Documentation/scsi/scsi_fc_transport.txt
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -70,7 +70,7 @@ Overview:
up to an administrative entity controlling the vport. For example,
if vports are to be associated with virtual machines, a XEN mgmt
utility would be responsible for creating wwpn/wwnn's for the vport,
- using it's own naming authority and OUI. (Note: it already does this
+ using its own naming authority and OUI. (Note: it already does this
for virtual MAC addresses).
@@ -81,7 +81,7 @@ Device Trees and Vport Objects:
with rports and scsi target objects underneath it. Currently the FC
transport creates the vport object and places it under the scsi_host
object corresponding to the physical adapter. The LLDD will allocate
- a new scsi_host for the vport and link it's object under the vport.
+ a new scsi_host for the vport and link its object under the vport.
The remainder of the tree under the vports scsi_host is the same
as the non-NPIV case. The transport is written currently to easily
allow the parent of the vport to be something other than the scsi_host.
diff --git a/Documentation/scsi/sym53c8xx_2.txt b/Documentation/scsi/sym53c8xx_2.txt
index eb9a7b905b64..6f63b7989679 100644
--- a/Documentation/scsi/sym53c8xx_2.txt
+++ b/Documentation/scsi/sym53c8xx_2.txt
@@ -687,7 +687,7 @@ maintain the driver code.
Enabling serial NVRAM support enables detection of the serial NVRAM included
on Symbios and some Symbios compatible host adaptors, and Tekram boards. The
serial NVRAM is used by Symbios and Tekram to hold set up parameters for the
-host adaptor and it's attached drives.
+host adaptor and its attached drives.
The Symbios NVRAM also holds data on the boot order of host adaptors in a
system with more than one host adaptor. This information is no longer used
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index bfcbbf88c44d..2075bbb8b3e2 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -227,6 +227,16 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
The power-management is supported.
+ Module snd-asihpi
+ -----------------
+
+ Module for AudioScience ASI soundcards
+
+ enable_hpi_hwdep - enable HPI hwdep for AudioScience soundcard
+
+ This module supports multiple cards.
+ The driver requires the firmware loader support on kernel.
+
Module snd-atiixp
-----------------
@@ -622,28 +632,23 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
The power-management is supported.
- Module snd-es968
- ----------------
-
- Module for sound cards based on ESS ES968 chip (PnP only).
-
- This module supports multiple cards, PnP and autoprobe.
-
- The power-management is supported.
-
Module snd-es1688
-----------------
Module for ESS AudioDrive ES-1688 and ES-688 sound cards.
- port - port # for ES-1688 chip (0x220,0x240,0x260)
- fm_port - port # for OPL3 (option; share the same port as default)
+ isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
mpu_port - port # for MPU-401 port (0x300,0x310,0x320,0x330), -1 = disable (default)
- irq - IRQ # for ES-1688 chip (5,7,9,10)
mpu_irq - IRQ # for MPU-401 port (5,7,9,10)
+ fm_port - port # for OPL3 (option; share the same port as default)
+
+ with isapnp=0, the following additional options are available:
+ port - port # for ES-1688 chip (0x220,0x240,0x260)
+ irq - IRQ # for ES-1688 chip (5,7,9,10)
dma8 - DMA # for ES-1688 chip (0,1,3)
- This module supports multiple cards and autoprobe (without MPU-401 port).
+ This module supports multiple cards and autoprobe (without MPU-401 port)
+ and PnP with the ES968 chip.
Module snd-es18xx
-----------------
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 98d14cb8a85d..bdafdbd32561 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -204,7 +204,6 @@ generic parser regardless of the codec. Usually the codec-specific
parser is much better than the generic parser (as now). Thus this
option is more about the debugging purpose.
-
Speaker and Headphone Output
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
One of the most frequent (and obvious) bugs with HD-audio is the
@@ -600,6 +599,9 @@ probing, the proc file is available, so you can get the raw codec
information before modified by the driver. Of course, the driver
isn't usable with `probe_only=1`. But you can continue the
configuration via hwdep sysfs file if hda-reconfig option is enabled.
+Using `probe_only` mask 2 skips the reset of HDA codecs (use
+`probe_only=3` as module option). The hwdep interface can be used
+to determine the BIOS codec initialization.
hda-verb
diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/alsa/soc/dapm.txt
index 9ac842be9b4f..05bf5a0eee41 100644
--- a/Documentation/sound/alsa/soc/dapm.txt
+++ b/Documentation/sound/alsa/soc/dapm.txt
@@ -188,8 +188,8 @@ The WM8731 output mixer has 3 inputs (sources)
3. Mic Sidetone Input
Each input in this example has a kcontrol associated with it (defined in example
-above) and is connected to the output mixer via it's kcontrol name. We can now
-connect the destination widget (wrt audio signal) with it's source widgets.
+above) and is connected to the output mixer via its kcontrol name. We can now
+connect the destination widget (wrt audio signal) with its source widgets.
/* output mixer */
{"Output Mixer", "Line Bypass Switch", "Line Input"},
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index bab7711ce963..2524c75557df 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -67,7 +67,7 @@ static struct snd_soc_dai_link corgi_dai = {
.ops = &corgi_ops,
};
-struct snd_soc_card then sets up the machine with it's DAIs. e.g.
+struct snd_soc_card then sets up the machine with its DAIs. e.g.
/* corgi audio machine driver */
static struct snd_soc_card snd_soc_corgi = {
diff --git a/Documentation/sound/alsa/soc/overview.txt b/Documentation/sound/alsa/soc/overview.txt
index 1e4c6d3655f2..138ac88c1461 100644
--- a/Documentation/sound/alsa/soc/overview.txt
+++ b/Documentation/sound/alsa/soc/overview.txt
@@ -33,7 +33,7 @@ features :-
and machines.
* Easy I2S/PCM audio interface setup between codec and SoC. Each SoC
- interface and codec registers it's audio interface capabilities with the
+ interface and codec registers its audio interface capabilities with the
core and are subsequently matched and configured when the application
hardware parameters are known.
diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt
index 34c76a55bc04..9b659c79a547 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/sparse.txt
@@ -54,12 +54,12 @@ Getting sparse
~~~~~~~~~~~~~~
You can get latest released versions from the Sparse homepage at
-http://www.kernel.org/pub/linux/kernel/people/josh/sparse/
+https://sparse.wiki.kernel.org/index.php/Main_Page
Alternatively, you can get snapshots of the latest development version
of sparse using git to clone..
- git://git.kernel.org/pub/scm/linux/kernel/git/josh/sparse.git
+ git://git.kernel.org/pub/scm/devel/sparse/sparse.git
DaveJ has hourly generated tarballs of the git tree available at..
diff --git a/Documentation/sysfs-rules.txt b/Documentation/sysfs-rules.txt
index 5d8bc2cd250c..c1a1fd636bf9 100644
--- a/Documentation/sysfs-rules.txt
+++ b/Documentation/sysfs-rules.txt
@@ -125,7 +125,7 @@ versions of the sysfs interface.
- Block
The converted block subsystem at /sys/class/block or
/sys/subsystem/block will contain the links for disks and partitions
- at the same level, never in a hierarchy. Assuming the block subsytem to
+ at the same level, never in a hierarchy. Assuming the block subsystem to
contain only disks and not partition devices in the same flat list is
a bug in the application.
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 02ac6ed38b2d..09bd8e902989 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -90,7 +90,8 @@ In order to facilitate early boot debugging, use boot option:
trace_event=[event-list]
-The format of this boot option is the same as described in section 2.1.
+event-list is a comma separated list of events. See section 2.1 for event
+format.
3. Defining an event-enabled tracepoint
=======================================
@@ -238,7 +239,7 @@ subsystem's filter file.
For convenience, filters for every event in a subsystem can be set or
cleared as a group by writing a filter expression into the filter file
-at the root of the subsytem. Note however, that if a filter for any
+at the root of the subsystem. Note however, that if a filter for any
event within the subsystem lacks a field specified in the subsystem
filter, or if the filter can't be applied for any other reason, the
filter for that event will retain its previous setting. This can
@@ -250,7 +251,7 @@ fields can be guaranteed to propagate successfully to all events.
Here are a few subsystem filter examples that also illustrate the
above points:
-Clear the filters on all events in the sched subsytem:
+Clear the filters on all events in the sched subsystem:
# cd /sys/kernel/debug/tracing/events/sched
# echo 0 > filter
@@ -260,7 +261,7 @@ none
none
Set a filter using only common fields for all events in the sched
-subsytem (all events end up with the same filter):
+subsystem (all events end up with the same filter):
# cd /sys/kernel/debug/tracing/events/sched
# echo common_pid == 0 > filter
@@ -270,7 +271,7 @@ common_pid == 0
common_pid == 0
Attempt to set a filter using a non-common field for all events in the
-sched subsytem (all events but those that have a prev_pid field retain
+sched subsystem (all events but those that have a prev_pid field retain
their old filters):
# cd /sys/kernel/debug/tracing/events/sched
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 03485bfbd797..557c1edeccaf 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -155,6 +155,9 @@ of ftrace. Here is a list of some of the key files:
to be traced. Echoing names of functions into this file
will limit the trace to only those functions.
+ This interface also allows for commands to be used. See the
+ "Filter commands" section for more details.
+
set_ftrace_notrace:
This has an effect opposite to that of
@@ -1337,12 +1340,14 @@ ftrace_dump_on_oops must be set. To set ftrace_dump_on_oops, one
can either use the sysctl function or set it via the proc system
interface.
- sysctl kernel.ftrace_dump_on_oops=1
+ sysctl kernel.ftrace_dump_on_oops=n
or
- echo 1 > /proc/sys/kernel/ftrace_dump_on_oops
+ echo n > /proc/sys/kernel/ftrace_dump_on_oops
+If n = 1, ftrace will dump buffers of all CPUs, if n = 2 ftrace will
+only dump the buffer of the CPU that triggered the oops.
Here's an example of such a dump after a null pointer
dereference in a kernel module:
@@ -1822,6 +1827,47 @@ this special filter via:
echo > set_graph_function
+Filter commands
+---------------
+
+A few commands are supported by the set_ftrace_filter interface.
+Trace commands have the following format:
+
+<function>:<command>:<parameter>
+
+The following commands are supported:
+
+- mod
+ This command enables function filtering per module. The
+ parameter defines the module. For example, if only the write*
+ functions in the ext3 module are desired, run:
+
+ echo 'write*:mod:ext3' > set_ftrace_filter
+
+ This command interacts with the filter in the same way as
+ filtering based on function names. Thus, adding more functions
+ in a different module is accomplished by appending (>>) to the
+ filter file. Remove specific module functions by prepending
+ '!':
+
+ echo '!writeback*:mod:ext3' >> set_ftrace_filter
+
+- traceon/traceoff
+ These commands turn tracing on and off when the specified
+ functions are hit. The parameter determines how many times the
+ tracing system is turned on and off. If unspecified, there is
+ no limit. For example, to disable tracing when a schedule bug
+ is hit the first 5 times, run:
+
+ echo '__schedule_bug:traceoff:5' > set_ftrace_filter
+
+ These commands are cumulative whether or not they are appended
+ to set_ftrace_filter. To remove a command, prepend it by '!'
+ and drop the parameter:
+
+ echo '!__schedule_bug:traceoff' > set_ftrace_filter
+
+
trace_pipe
----------
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt
index a9100b28eb84..ec94748ae65b 100644
--- a/Documentation/trace/kprobetrace.txt
+++ b/Documentation/trace/kprobetrace.txt
@@ -40,7 +40,9 @@ Synopsis of kprobe_events
$stack : Fetch stack address.
$retval : Fetch return value.(*)
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
- NAME=FETCHARG: Set NAME as the argument name of FETCHARG.
+ NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
+ FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
+ (u8/u16/u32/u64/s8/s16/s32/s64) are supported.
(*) only for return probe.
(**) this is useful for fetching a field of data structures.
diff --git a/Documentation/usb/WUSB-Design-overview.txt b/Documentation/usb/WUSB-Design-overview.txt
index c480e9c32dbd..4c5e37939344 100644
--- a/Documentation/usb/WUSB-Design-overview.txt
+++ b/Documentation/usb/WUSB-Design-overview.txt
@@ -381,7 +381,7 @@ descriptor that gives us the status of the transfer, its identification
we issue another URB to read into the destination buffer the chunk of
data coming out of the remote endpoint. Done, wait for the next guy. The
callbacks for the URBs issued from here are the ones that will declare
-the xfer complete at some point and call it's callback.
+the xfer complete at some point and call its callback.
Seems simple, but the implementation is not trivial.
diff --git a/Documentation/video4linux/CARDLIST.bttv b/Documentation/video4linux/CARDLIST.bttv
index f11c583295e9..4739d5684305 100644
--- a/Documentation/video4linux/CARDLIST.bttv
+++ b/Documentation/video4linux/CARDLIST.bttv
@@ -100,7 +100,7 @@
99 -> AD-TVK503
100 -> Hercules Smart TV Stereo
101 -> Pace TV & Radio Card
-102 -> IVC-200 [0000:a155,0001:a155,0002:a155,0003:a155,0100:a155,0101:a155,0102:a155,0103:a155]
+102 -> IVC-200 [0000:a155,0001:a155,0002:a155,0003:a155,0100:a155,0101:a155,0102:a155,0103:a155,0800:a155,0801:a155,0802:a155,0803:a155]
103 -> Grand X-Guard / Trust 814PCI [0304:0102]
104 -> Nebula Electronics DigiTV [0071:0101]
105 -> ProVideo PV143 [aa00:1430,aa00:1431,aa00:1432,aa00:1433,aa03:1433]
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index 7ec3c4e4b60f..f2510541373b 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -82,3 +82,4 @@
81 -> Leadtek WinFast DTV1800 Hybrid [107d:6654]
82 -> WinFast DTV2000 H rev. J [107d:6f2b]
83 -> Prof 7301 DVB-S/S2 [b034:3034]
+ 84 -> Samsung SMT 7020 DVB-S [18ac:dc00,18ac:dccd]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index 0c166ff003a0..3a623aaeae5f 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -1,5 +1,5 @@
0 -> Unknown EM2800 video grabber (em2800) [eb1a:2800]
- 1 -> Unknown EM2750/28xx video grabber (em2820/em2840) [eb1a:2710,eb1a:2820,eb1a:2821,eb1a:2860,eb1a:2861,eb1a:2862,eb1a:2870,eb1a:2881,eb1a:2883,eb1a:2868]
+ 1 -> Unknown EM2750/28xx video grabber (em2820/em2840) [eb1a:2710,eb1a:2820,eb1a:2821,eb1a:2860,eb1a:2861,eb1a:2862,eb1a:2863,eb1a:2870,eb1a:2881,eb1a:2883,eb1a:2868]
2 -> Terratec Cinergy 250 USB (em2820/em2840) [0ccd:0036]
3 -> Pinnacle PCTV USB 2 (em2820/em2840) [2304:0208]
4 -> Hauppauge WinTV USB 2 (em2820/em2840) [2040:4200,2040:4201]
@@ -27,6 +27,7 @@
26 -> Hercules Smart TV USB 2.0 (em2820/em2840)
27 -> Pinnacle PCTV USB 2 (Philips FM1216ME) (em2820/em2840)
28 -> Leadtek Winfast USB II Deluxe (em2820/em2840)
+ 29 -> EM2860/TVP5150 Reference Design (em2860)
30 -> Videology 20K14XUSB USB2.0 (em2820/em2840)
31 -> Usbgear VD204v9 (em2821)
32 -> Supercomp USB 2.0 TV (em2821)
@@ -70,3 +71,4 @@
72 -> Gadmei UTV330+ (em2861)
73 -> Reddo DVB-C USB TV Box (em2870)
74 -> Actionmaster/LinXcel/Digitus VC211A (em2800)
+ 75 -> Dikom DK300 (em2882)
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index b4a767060ed7..070f2576707e 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -175,3 +175,6 @@
174 -> Asus Europa Hybrid OEM [1043:4847]
175 -> Leadtek Winfast DTV1000S [107d:6655]
176 -> Beholder BeholdTV 505 RDS [0000:5051]
+177 -> Hawell HW-404M7
+179 -> Beholder BeholdTV H7 [5ace:7190]
+180 -> Beholder BeholdTV A7 [5ace:7090]
diff --git a/Documentation/video4linux/extract_xc3028.pl b/Documentation/video4linux/extract_xc3028.pl
index 2cb816047fc1..47877deae6d7 100644
--- a/Documentation/video4linux/extract_xc3028.pl
+++ b/Documentation/video4linux/extract_xc3028.pl
@@ -5,12 +5,18 @@
#
# In order to use, you need to:
# 1) Download the windows driver with something like:
+# Version 2.4
+# wget http://www.twinhan.com/files/AW/BDA T/20080303_V1.0.6.7.zip
+# or wget http://www.stefanringel.de/pub/20080303_V1.0.6.7.zip
+# Version 2.7
# wget http://www.steventoth.net/linux/xc5000/HVR-12x0-14x0-17x0_1_25_25271_WHQL.zip
-# 2) Extract the file hcw85bda.sys from the zip into the current dir:
+# 2) Extract the files from the zip into the current dir:
+# unzip -j 20080303_V1.0.6.7.zip 20080303_v1.0.6.7/UDXTTM6000.sys
# unzip -j HVR-12x0-14x0-17x0_1_25_25271_WHQL.zip Driver85/hcw85bda.sys
# 3) run the script:
# ./extract_xc3028.pl
-# 4) copy the generated file:
+# 4) copy the generated files:
+# cp xc3028-v24.fw /lib/firmware
# cp xc3028-v27.fw /lib/firmware
#use strict;
@@ -135,7 +141,7 @@ sub write_hunk_fix_endian($$)
}
}
-sub main_firmware($$$$)
+sub main_firmware_24($$$$)
{
my $out;
my $j=0;
@@ -146,8 +152,774 @@ sub main_firmware($$$$)
for ($j = length($name); $j <32; $j++) {
$name = $name.chr(0);
+ }
+
+ open OUTFILE, ">$outfile";
+ syswrite(OUTFILE, $name);
+ write_le16($version);
+ write_le16($nr_desc);
+
+ #
+ # Firmware 0, type: BASE FW F8MHZ (0x00000003), id: (0000000000000000), size: 6635
+ #
+
+ write_le32(0x00000003); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6635); # Size
+ write_hunk_fix_endian(257752, 6635);
+
+ #
+ # Firmware 1, type: BASE FW F8MHZ MTS (0x00000007), id: (0000000000000000), size: 6635
+ #
+
+ write_le32(0x00000007); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6635); # Size
+ write_hunk_fix_endian(264392, 6635);
+
+ #
+ # Firmware 2, type: BASE FW FM (0x00000401), id: (0000000000000000), size: 6525
+ #
+
+ write_le32(0x00000401); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6525); # Size
+ write_hunk_fix_endian(271040, 6525);
+
+ #
+ # Firmware 3, type: BASE FW FM INPUT1 (0x00000c01), id: (0000000000000000), size: 6539
+ #
+
+ write_le32(0x00000c01); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6539); # Size
+ write_hunk_fix_endian(277568, 6539);
+
+ #
+ # Firmware 4, type: BASE FW (0x00000001), id: (0000000000000000), size: 6633
+ #
+
+ write_le32(0x00000001); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6633); # Size
+ write_hunk_fix_endian(284120, 6633);
+
+ #
+ # Firmware 5, type: BASE FW MTS (0x00000005), id: (0000000000000000), size: 6617
+ #
+
+ write_le32(0x00000005); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6617); # Size
+ write_hunk_fix_endian(290760, 6617);
+
+ #
+ # Firmware 6, type: STD FW (0x00000000), id: PAL/BG A2/A (0000000100000007), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000001, 0x00000007); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(297384, 161);
+
+ #
+ # Firmware 7, type: STD FW MTS (0x00000004), id: PAL/BG A2/A (0000000100000007), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000001, 0x00000007); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(297552, 169);
+
+ #
+ # Firmware 8, type: STD FW (0x00000000), id: PAL/BG A2/B (0000000200000007), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000002, 0x00000007); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(297728, 161);
+
+ #
+ # Firmware 9, type: STD FW MTS (0x00000004), id: PAL/BG A2/B (0000000200000007), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000002, 0x00000007); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(297896, 169);
+
+ #
+ # Firmware 10, type: STD FW (0x00000000), id: PAL/BG NICAM/A (0000000400000007), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000004, 0x00000007); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(298072, 161);
+
+ #
+ # Firmware 11, type: STD FW MTS (0x00000004), id: PAL/BG NICAM/A (0000000400000007), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000004, 0x00000007); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(298240, 169);
+
+ #
+ # Firmware 12, type: STD FW (0x00000000), id: PAL/BG NICAM/B (0000000800000007), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000008, 0x00000007); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(298416, 161);
+
+ #
+ # Firmware 13, type: STD FW MTS (0x00000004), id: PAL/BG NICAM/B (0000000800000007), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000008, 0x00000007); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(298584, 169);
+
+ #
+ # Firmware 14, type: STD FW (0x00000000), id: PAL/DK A2 (00000003000000e0), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000003, 0x000000e0); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(298760, 161);
+
+ #
+ # Firmware 15, type: STD FW MTS (0x00000004), id: PAL/DK A2 (00000003000000e0), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000003, 0x000000e0); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(298928, 169);
+
+ #
+ # Firmware 16, type: STD FW (0x00000000), id: PAL/DK NICAM (0000000c000000e0), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x0000000c, 0x000000e0); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(299104, 161);
+
+ #
+ # Firmware 17, type: STD FW MTS (0x00000004), id: PAL/DK NICAM (0000000c000000e0), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x0000000c, 0x000000e0); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(299272, 169);
+
+ #
+ # Firmware 18, type: STD FW (0x00000000), id: SECAM/K1 (0000000000200000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00200000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(299448, 161);
+
+ #
+ # Firmware 19, type: STD FW MTS (0x00000004), id: SECAM/K1 (0000000000200000), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x00200000); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(299616, 169);
+
+ #
+ # Firmware 20, type: STD FW (0x00000000), id: SECAM/K3 (0000000004000000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x04000000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(299792, 161);
+
+ #
+ # Firmware 21, type: STD FW MTS (0x00000004), id: SECAM/K3 (0000000004000000), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x04000000); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(299960, 169);
+
+ #
+ # Firmware 22, type: STD FW D2633 DTV6 ATSC (0x00010030), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00010030); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300136, 149);
+
+ #
+ # Firmware 23, type: STD FW D2620 DTV6 QAM (0x00000068), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000068); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300296, 149);
+
+ #
+ # Firmware 24, type: STD FW D2633 DTV6 QAM (0x00000070), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000070); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300448, 149);
+
+ #
+ # Firmware 25, type: STD FW D2620 DTV7 (0x00000088), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000088); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300608, 149);
+
+ #
+ # Firmware 26, type: STD FW D2633 DTV7 (0x00000090), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000090); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300760, 149);
+
+ #
+ # Firmware 27, type: STD FW D2620 DTV78 (0x00000108), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000108); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300920, 149);
+
+ #
+ # Firmware 28, type: STD FW D2633 DTV78 (0x00000110), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000110); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(301072, 149);
+
+ #
+ # Firmware 29, type: STD FW D2620 DTV8 (0x00000208), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000208); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(301232, 149);
+
+ #
+ # Firmware 30, type: STD FW D2633 DTV8 (0x00000210), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000210); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(301384, 149);
+
+ #
+ # Firmware 31, type: STD FW FM (0x00000400), id: (0000000000000000), size: 135
+ #
+
+ write_le32(0x00000400); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(135); # Size
+ write_hunk_fix_endian(301554, 135);
+
+ #
+ # Firmware 32, type: STD FW (0x00000000), id: PAL/I (0000000000000010), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00000010); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(301688, 161);
+
+ #
+ # Firmware 33, type: STD FW MTS (0x00000004), id: PAL/I (0000000000000010), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x00000010); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(301856, 169);
+
+ #
+ # Firmware 34, type: STD FW (0x00000000), id: SECAM/L AM (0000001000400000), size: 169
+ #
+
+ #
+ # Firmware 35, type: STD FW (0x00000000), id: SECAM/L NICAM (0000000c00400000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x0000000c, 0x00400000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302032, 161);
+
+ #
+ # Firmware 36, type: STD FW (0x00000000), id: SECAM/Lc (0000000000800000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00800000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302200, 161);
+
+ #
+ # Firmware 37, type: STD FW (0x00000000), id: NTSC/M Kr (0000000000008000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302368, 161);
+
+ #
+ # Firmware 38, type: STD FW LCD (0x00001000), id: NTSC/M Kr (0000000000008000), size: 161
+ #
+
+ write_le32(0x00001000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302536, 161);
+
+ #
+ # Firmware 39, type: STD FW LCD NOGD (0x00003000), id: NTSC/M Kr (0000000000008000), size: 161
+ #
+
+ write_le32(0x00003000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302704, 161);
+
+ #
+ # Firmware 40, type: STD FW MTS (0x00000004), id: NTSC/M Kr (0000000000008000), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(302872, 169);
+
+ #
+ # Firmware 41, type: STD FW (0x00000000), id: NTSC PAL/M PAL/N (000000000000b700), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(303048, 161);
+
+ #
+ # Firmware 42, type: STD FW LCD (0x00001000), id: NTSC PAL/M PAL/N (000000000000b700), size: 161
+ #
+
+ write_le32(0x00001000); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(303216, 161);
+
+ #
+ # Firmware 43, type: STD FW LCD NOGD (0x00003000), id: NTSC PAL/M PAL/N (000000000000b700), size: 161
+ #
+
+ write_le32(0x00003000); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(303384, 161);
+
+ #
+ # Firmware 44, type: STD FW (0x00000000), id: NTSC/M Jp (0000000000002000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00002000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(303552, 161);
+
+ #
+ # Firmware 45, type: STD FW MTS (0x00000004), id: NTSC PAL/M PAL/N (000000000000b700), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(303720, 169);
+
+ #
+ # Firmware 46, type: STD FW MTS LCD (0x00001004), id: NTSC PAL/M PAL/N (000000000000b700), size: 169
+ #
+
+ write_le32(0x00001004); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(303896, 169);
+
+ #
+ # Firmware 47, type: STD FW MTS LCD NOGD (0x00003004), id: NTSC PAL/M PAL/N (000000000000b700), size: 169
+ #
+
+ write_le32(0x00003004); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(304072, 169);
+
+ #
+ # Firmware 48, type: SCODE FW HAS IF (0x60000000), IF = 3.28 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(3280); # IF
+ write_le32(192); # Size
+ write_hunk(309048, 192);
+
+ #
+ # Firmware 49, type: SCODE FW HAS IF (0x60000000), IF = 3.30 MHz id: (0000000000000000), size: 192
+ #
+
+# write_le32(0x60000000); # Type
+# write_le64(0x00000000, 0x00000000); # ID
+# write_le16(3300); # IF
+# write_le32(192); # Size
+# write_hunk(304440, 192);
+
+ #
+ # Firmware 50, type: SCODE FW HAS IF (0x60000000), IF = 3.44 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(3440); # IF
+ write_le32(192); # Size
+ write_hunk(309432, 192);
+
+ #
+ # Firmware 51, type: SCODE FW HAS IF (0x60000000), IF = 3.46 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(3460); # IF
+ write_le32(192); # Size
+ write_hunk(309624, 192);
+
+ #
+ # Firmware 52, type: SCODE FW DTV6 ATSC OREN36 HAS IF (0x60210020), IF = 3.80 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60210020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(3800); # IF
+ write_le32(192); # Size
+ write_hunk(306936, 192);
+
+ #
+ # Firmware 53, type: SCODE FW HAS IF (0x60000000), IF = 4.00 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4000); # IF
+ write_le32(192); # Size
+ write_hunk(309240, 192);
+
+ #
+ # Firmware 54, type: SCODE FW DTV6 ATSC TOYOTA388 HAS IF (0x60410020), IF = 4.08 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60410020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4080); # IF
+ write_le32(192); # Size
+ write_hunk(307128, 192);
+
+ #
+ # Firmware 55, type: SCODE FW HAS IF (0x60000000), IF = 4.20 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4200); # IF
+ write_le32(192); # Size
+ write_hunk(308856, 192);
+
+ #
+ # Firmware 56, type: SCODE FW MONO HAS IF (0x60008000), IF = 4.32 MHz id: NTSC/M Kr (0000000000008000), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le16(4320); # IF
+ write_le32(192); # Size
+ write_hunk(305208, 192);
+
+ #
+ # Firmware 57, type: SCODE FW HAS IF (0x60000000), IF = 4.45 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4450); # IF
+ write_le32(192); # Size
+ write_hunk(309816, 192);
+
+ #
+ # Firmware 58, type: SCODE FW MTS LCD NOGD MONO IF HAS IF (0x6002b004), IF = 4.50 MHz id: NTSC PAL/M PAL/N (000000000000b700), size: 192
+ #
+
+ write_le32(0x6002b004); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le16(4500); # IF
+ write_le32(192); # Size
+ write_hunk(304824, 192);
+
+ #
+ # Firmware 59, type: SCODE FW LCD NOGD IF HAS IF (0x60023000), IF = 4.60 MHz id: NTSC/M Kr (0000000000008000), size: 192
+ #
+
+ write_le32(0x60023000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le16(4600); # IF
+ write_le32(192); # Size
+ write_hunk(305016, 192);
+
+ #
+ # Firmware 60, type: SCODE FW DTV6 QAM DTV7 DTV78 DTV8 ZARLINK456 HAS IF (0x620003e0), IF = 4.76 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x620003e0); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4760); # IF
+ write_le32(192); # Size
+ write_hunk(304440, 192);
+
+ #
+ # Firmware 61, type: SCODE FW HAS IF (0x60000000), IF = 4.94 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4940); # IF
+ write_le32(192); # Size
+ write_hunk(308664, 192);
+
+ #
+ # Firmware 62, type: SCODE FW HAS IF (0x60000000), IF = 5.26 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(5260); # IF
+ write_le32(192); # Size
+ write_hunk(307704, 192);
+
+ #
+ # Firmware 63, type: SCODE FW MONO HAS IF (0x60008000), IF = 5.32 MHz id: PAL/BG A2 NICAM (0000000f00000007), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x0000000f, 0x00000007); # ID
+ write_le16(5320); # IF
+ write_le32(192); # Size
+ write_hunk(307896, 192);
+
+ #
+ # Firmware 64, type: SCODE FW DTV7 DTV78 DTV8 DIBCOM52 CHINA HAS IF (0x65000380), IF = 5.40 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x65000380); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(5400); # IF
+ write_le32(192); # Size
+ write_hunk(304248, 192);
+
+ #
+ # Firmware 65, type: SCODE FW DTV6 ATSC OREN538 HAS IF (0x60110020), IF = 5.58 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60110020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(5580); # IF
+ write_le32(192); # Size
+ write_hunk(306744, 192);
+
+ #
+ # Firmware 66, type: SCODE FW HAS IF (0x60000000), IF = 5.64 MHz id: PAL/BG A2 (0000000300000007), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000003, 0x00000007); # ID
+ write_le16(5640); # IF
+ write_le32(192); # Size
+ write_hunk(305592, 192);
+
+ #
+ # Firmware 67, type: SCODE FW HAS IF (0x60000000), IF = 5.74 MHz id: PAL/BG NICAM (0000000c00000007), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x0000000c, 0x00000007); # ID
+ write_le16(5740); # IF
+ write_le32(192); # Size
+ write_hunk(305784, 192);
+
+ #
+ # Firmware 68, type: SCODE FW HAS IF (0x60000000), IF = 5.90 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(5900); # IF
+ write_le32(192); # Size
+ write_hunk(307512, 192);
+
+ #
+ # Firmware 69, type: SCODE FW MONO HAS IF (0x60008000), IF = 6.00 MHz id: PAL/DK PAL/I SECAM/K3 SECAM/L SECAM/Lc NICAM (0000000c04c000f0), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x0000000c, 0x04c000f0); # ID
+ write_le16(6000); # IF
+ write_le32(192); # Size
+ write_hunk(305576, 192);
+
+ #
+ # Firmware 70, type: SCODE FW DTV6 QAM ATSC LG60 F6MHZ HAS IF (0x68050060), IF = 6.20 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x68050060); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(6200); # IF
+ write_le32(192); # Size
+ write_hunk(306552, 192);
+
+ #
+ # Firmware 71, type: SCODE FW HAS IF (0x60000000), IF = 6.24 MHz id: PAL/I (0000000000000010), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000010); # ID
+ write_le16(6240); # IF
+ write_le32(192); # Size
+ write_hunk(305400, 192);
+
+ #
+ # Firmware 72, type: SCODE FW MONO HAS IF (0x60008000), IF = 6.32 MHz id: SECAM/K1 (0000000000200000), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x00000000, 0x00200000); # ID
+ write_le16(6320); # IF
+ write_le32(192); # Size
+ write_hunk(308472, 192);
+
+ #
+ # Firmware 73, type: SCODE FW HAS IF (0x60000000), IF = 6.34 MHz id: SECAM/K1 (0000000000200000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00200000); # ID
+ write_le16(6340); # IF
+ write_le32(192); # Size
+ write_hunk(306360, 192);
+
+ #
+ # Firmware 74, type: SCODE FW MONO HAS IF (0x60008000), IF = 6.50 MHz id: PAL/DK SECAM/K3 SECAM/L NICAM (0000000c044000e0), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x0000000c, 0x044000e0); # ID
+ write_le16(6500); # IF
+ write_le32(192); # Size
+ write_hunk(308280, 192);
+
+ #
+ # Firmware 75, type: SCODE FW DTV6 ATSC ATI638 HAS IF (0x60090020), IF = 6.58 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60090020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(6580); # IF
+ write_le32(192); # Size
+ write_hunk(304632, 192);
+
+ #
+ # Firmware 76, type: SCODE FW HAS IF (0x60000000), IF = 6.60 MHz id: PAL/DK A2 (00000003000000e0), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000003, 0x000000e0); # ID
+ write_le16(6600); # IF
+ write_le32(192); # Size
+ write_hunk(306168, 192);
+
+ #
+ # Firmware 77, type: SCODE FW MONO HAS IF (0x60008000), IF = 6.68 MHz id: PAL/DK A2 (00000003000000e0), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x00000003, 0x000000e0); # ID
+ write_le16(6680); # IF
+ write_le32(192); # Size
+ write_hunk(308088, 192);
+
+ #
+ # Firmware 78, type: SCODE FW DTV6 ATSC TOYOTA794 HAS IF (0x60810020), IF = 8.14 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60810020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(8140); # IF
+ write_le32(192); # Size
+ write_hunk(307320, 192);
+
+ #
+ # Firmware 79, type: SCODE FW HAS IF (0x60000000), IF = 8.20 MHz id: (0000000000000000), size: 192
+ #
+
+# write_le32(0x60000000); # Type
+# write_le64(0x00000000, 0x00000000); # ID
+# write_le16(8200); # IF
+# write_le32(192); # Size
+# write_hunk(308088, 192);
}
+sub main_firmware_27($$$$)
+{
+ my $out;
+ my $j=0;
+ my $outfile = shift;
+ my $name = shift;
+ my $version = shift;
+ my $nr_desc = shift;
+
+ for ($j = length($name); $j <32; $j++) {
+ $name = $name.chr(0);
+ }
+
open OUTFILE, ">$outfile";
syswrite(OUTFILE, $name);
write_le16($version);
@@ -906,20 +1678,39 @@ sub main_firmware($$$$)
write_hunk(812856, 192);
}
+
sub extract_firmware {
- my $sourcefile = "hcw85bda.sys";
- my $hash = "0e44dbf63bb0169d57446aec21881ff2";
- my $outfile = "xc3028-v27.fw";
- my $name = "xc2028 firmware";
- my $version = 519;
- my $nr_desc = 80;
+ my $sourcefile_24 = "UDXTTM6000.sys";
+ my $hash_24 = "cb9deb5508a5e150af2880f5b0066d78";
+ my $outfile_24 = "xc3028-v24.fw";
+ my $name_24 = "xc2028 firmware";
+ my $version_24 = 516;
+ my $nr_desc_24 = 77;
+ my $out;
+
+ my $sourcefile_27 = "hcw85bda.sys";
+ my $hash_27 = "0e44dbf63bb0169d57446aec21881ff2";
+ my $outfile_27 = "xc3028-v27.fw";
+ my $name_27 = "xc2028 firmware";
+ my $version_27 = 519;
+ my $nr_desc_27 = 80;
my $out;
- verify($sourcefile, $hash);
+ if (-e $sourcefile_24) {
+ verify($sourcefile_24, $hash_24);
+
+ open INFILE, "<$sourcefile_24";
+ main_firmware_24($outfile_24, $name_24, $version_24, $nr_desc_24);
+ close INFILE;
+ }
- open INFILE, "<$sourcefile";
- main_firmware($outfile, $name, $version, $nr_desc);
- close INFILE;
+ if (-e $sourcefile_27) {
+ verify($sourcefile_27, $hash_27);
+
+ open INFILE, "<$sourcefile_27";
+ main_firmware_27($outfile_27, $name_27, $version_27, $nr_desc_27);
+ close INFILE;
+ }
}
extract_firmware;
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 181b9e6fd984..8f3f5d33327c 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -50,6 +50,8 @@ zc3xx 0458:700f Genius VideoCam Web V2
sonixj 0458:7025 Genius Eye 311Q
sn9c20x 0458:7029 Genius Look 320s
sonixj 0458:702e Genius Slim 310 NB
+sn9c20x 0458:704a Genius Slim 1320
+sn9c20x 0458:704c Genius i-Look 1321
sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650)
sonixj 045e:00f5 MicroSoft VX3000
sonixj 045e:00f7 MicroSoft VX1000
@@ -305,12 +307,14 @@ sonixj 0c45:6138 Sn9c120 Mo4000
sonixj 0c45:613a Microdia Sonix PC Camera
sonixj 0c45:613b Surfer SN-206
sonixj 0c45:613c Sonix Pccam168
+sonixj 0c45:6142 Hama PC-Webcam AC-150
sonixj 0c45:6143 Sonix Pccam168
sonixj 0c45:6148 Digitus DA-70811/ZSMC USB PC Camera ZS211/Microdia
sonixj 0c45:614a Frontech E-Ccam (JIL-2225)
sn9c20x 0c45:6240 PC Camera (SN9C201 + MT9M001)
sn9c20x 0c45:6242 PC Camera (SN9C201 + MT9M111)
sn9c20x 0c45:6248 PC Camera (SN9C201 + OV9655)
+sn9c20x 0c45:624c PC Camera (SN9C201 + MT9M112)
sn9c20x 0c45:624e PC Camera (SN9C201 + SOI968)
sn9c20x 0c45:624f PC Camera (SN9C201 + OV9650)
sn9c20x 0c45:6251 PC Camera (SN9C201 + OV9650)
@@ -323,6 +327,7 @@ sn9c20x 0c45:627f PC Camera (SN9C201 + OV9650)
sn9c20x 0c45:6280 PC Camera (SN9C202 + MT9M001)
sn9c20x 0c45:6282 PC Camera (SN9C202 + MT9M111)
sn9c20x 0c45:6288 PC Camera (SN9C202 + OV9655)
+sn9c20x 0c45:628c PC Camera (SN9C201 + MT9M112)
sn9c20x 0c45:628e PC Camera (SN9C202 + SOI968)
sn9c20x 0c45:628f PC Camera (SN9C202 + OV9650)
sn9c20x 0c45:62a0 PC Camera (SN9C202 + OV7670)
diff --git a/Documentation/video4linux/sh_mobile_ceu_camera.txt b/Documentation/video4linux/sh_mobile_ceu_camera.txt
index 2ae16349a78d..cb47e723af74 100644
--- a/Documentation/video4linux/sh_mobile_ceu_camera.txt
+++ b/Documentation/video4linux/sh_mobile_ceu_camera.txt
@@ -17,18 +17,18 @@ Generic scaling / cropping scheme
-2-- -\
| --\
| --\
-+-5-- -\ -- -3--
-| ---\
-| --- -4-- -\
-| -\
-| - -6--
++-5-- . -- -3-- -\
+| `... -\
+| `... -4-- . - -7..
+| `.
+| `. .6--
|
-| - -6'-
-| -/
-| --- -4'- -/
-| ---/
-+-5'- -/
-| -- -3'-
+| . .6'-
+| .´
+| ... -4'- .´
+| ...´ - -7'.
++-5'- .´ -/
+| -- -3'- -/
| --/
| --/
-2'- -/
@@ -36,7 +36,11 @@ Generic scaling / cropping scheme
|
-1'-
-Produced by user requests:
+In the above chart minuses and slashes represent "real" data amounts, points and
+accents represent "useful" data, basically, CEU scaled amd cropped output,
+mapped back onto the client's source plane.
+
+Such a configuration can be produced by user requests:
S_CROP(left / top = (5) - (1), width / height = (5') - (5))
S_FMT(width / height = (6') - (6))
@@ -106,52 +110,30 @@ window:
S_CROP
------
-If old scale applied to new crop is invalid produce nearest new scale possible
-
-1. Calculate current combined scales.
-
- scale_comb = (((4') - (4)) / ((6') - (6))) * (((2') - (2)) / ((3') - (3)))
-
-2. Apply iterative sensor S_CROP for new input window.
-
-3. If old combined scales applied to new crop produce an impossible user window,
-adjust scales to produce nearest possible window.
-
- width_u_out = ((5') - (5)) / scale_comb
+The API at http://v4l2spec.bytesex.org/spec/x1904.htm says:
- if (width_u_out > max)
- scale_comb = ((5') - (5)) / max;
- else if (width_u_out < min)
- scale_comb = ((5') - (5)) / min;
+"...specification does not define an origin or units. However by convention
+drivers should horizontally count unscaled samples relative to 0H."
-4. Issue G_CROP to retrieve actual input window.
+We choose to follow the advise and interpret cropping units as client input
+pixels.
-5. Using actual input window and calculated combined scales calculate sensor
-target output window.
-
- width_s_out = ((3') - (3)) = ((2') - (2)) / scale_comb
-
-6. Apply iterative S_FMT for new sensor target output window.
-
-7. Issue G_FMT to retrieve the actual sensor output window.
-
-8. Calculate sensor scales.
-
- scale_s = ((3') - (3)) / ((2') - (2))
+Cropping is performed in the following 6 steps:
-9. Calculate sensor output subwindow to be cropped on CEU by applying sensor
-scales to the requested window.
+1. Request exactly user rectangle from the sensor.
- width_ceu = ((5') - (5)) / scale_s
+2. If smaller - iterate until a larger one is obtained. Result: sensor cropped
+ to 2 : 2', target crop 5 : 5', current output format 6' - 6.
-10. Use CEU cropping for above calculated window.
+3. In the previous step the sensor has tried to preserve its output frame as
+ good as possible, but it could have changed. Retrieve it again.
-11. Calculate CEU scales from sensor scales from results of (10) and user window
-from (3)
+4. Sensor scaled to 3 : 3'. Sensor's scale is (2' - 2) / (3' - 3). Calculate
+ intermediate window: 4' - 4 = (5' - 5) * (3' - 3) / (2' - 2)
- scale_ceu = calc_scale(((5') - (5)), &width_u_out)
+5. Calculate and apply host scale = (6' - 6) / (4' - 4)
-12. Apply CEU scales.
+6. Calculate and apply host crop: 6 - 7 = (5 - 2) * (6' - 6) / (5' - 5)
--
Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index 5155700c206b..e831aaca66f8 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -545,12 +545,11 @@ unregister them:
This will remove the device nodes from sysfs (causing udev to remove them
from /dev).
-After video_unregister_device() returns no new opens can be done.
-
-However, in the case of USB devices some application might still have one
-of these device nodes open. You should block all new accesses to read,
-write, poll, etc. except possibly for certain ioctl operations like
-queueing buffers.
+After video_unregister_device() returns no new opens can be done. However,
+in the case of USB devices some application might still have one of these
+device nodes open. So after the unregister all file operations will return
+an error as well, except for the ioctl and unlocked_ioctl file operations:
+those will still be passed on since some buffer ioctls may still be needed.
When the last user of the video device node exits, then the vdev->release()
callback is called and you can do the final cleanup there.
@@ -609,3 +608,135 @@ scatter/gather method (videobuf-dma-sg), DMA with linear access
Please see Documentation/video4linux/videobuf for more information on how
to use the videobuf layer.
+
+struct v4l2_fh
+--------------
+
+struct v4l2_fh provides a way to easily keep file handle specific data
+that is used by the V4L2 framework. Using v4l2_fh is optional for
+drivers.
+
+The users of v4l2_fh (in the V4L2 framework, not the driver) know
+whether a driver uses v4l2_fh as its file->private_data pointer by
+testing the V4L2_FL_USES_V4L2_FH bit in video_device->flags.
+
+Useful functions:
+
+- v4l2_fh_init()
+
+ Initialise the file handle. This *MUST* be performed in the driver's
+ v4l2_file_operations->open() handler.
+
+- v4l2_fh_add()
+
+ Add a v4l2_fh to video_device file handle list. May be called after
+ initialising the file handle.
+
+- v4l2_fh_del()
+
+ Unassociate the file handle from video_device(). The file handle
+ exit function may now be called.
+
+- v4l2_fh_exit()
+
+ Uninitialise the file handle. After uninitialisation the v4l2_fh
+ memory can be freed.
+
+struct v4l2_fh is allocated as a part of the driver's own file handle
+structure and is set to file->private_data in the driver's open
+function by the driver. Drivers can extract their own file handle
+structure by using the container_of macro. Example:
+
+struct my_fh {
+ int blah;
+ struct v4l2_fh fh;
+};
+
+...
+
+int my_open(struct file *file)
+{
+ struct my_fh *my_fh;
+ struct video_device *vfd;
+ int ret;
+
+ ...
+
+ ret = v4l2_fh_init(&my_fh->fh, vfd);
+ if (ret)
+ return ret;
+
+ v4l2_fh_add(&my_fh->fh);
+
+ file->private_data = &my_fh->fh;
+
+ ...
+}
+
+int my_release(struct file *file)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct my_fh *my_fh = container_of(fh, struct my_fh, fh);
+
+ ...
+}
+
+V4L2 events
+-----------
+
+The V4L2 events provide a generic way to pass events to user space.
+The driver must use v4l2_fh to be able to support V4L2 events.
+
+Useful functions:
+
+- v4l2_event_alloc()
+
+ To use events, the driver must allocate events for the file handle. By
+ calling the function more than once, the driver may assure that at least n
+ events in total have been allocated. The function may not be called in
+ atomic context.
+
+- v4l2_event_queue()
+
+ Queue events to video device. The driver's only responsibility is to fill
+ in the type and the data fields. The other fields will be filled in by
+ V4L2.
+
+- v4l2_event_subscribe()
+
+ The video_device->ioctl_ops->vidioc_subscribe_event must check the driver
+ is able to produce events with specified event id. Then it calls
+ v4l2_event_subscribe() to subscribe the event.
+
+- v4l2_event_unsubscribe()
+
+ vidioc_unsubscribe_event in struct v4l2_ioctl_ops. A driver may use
+ v4l2_event_unsubscribe() directly unless it wants to be involved in
+ unsubscription process.
+
+ The special type V4L2_EVENT_ALL may be used to unsubscribe all events. The
+ drivers may want to handle this in a special way.
+
+- v4l2_event_pending()
+
+ Returns the number of pending events. Useful when implementing poll.
+
+Drivers do not initialise events directly. The events are initialised
+through v4l2_fh_init() if video_device->ioctl_ops->vidioc_subscribe_event is
+non-NULL. This *MUST* be performed in the driver's
+v4l2_file_operations->open() handler.
+
+Events are delivered to user space through the poll system call. The driver
+can use v4l2_fh->events->wait wait_queue_head_t as the argument for
+poll_wait().
+
+There are standard and private events. New standard events must use the
+smallest available event type. The drivers must allocate their events from
+their own class starting from class base. Class base is
+V4L2_EVENT_PRIVATE_START + n * 1000 where n is the lowest available number.
+The first event type in the class is reserved for future use, so the first
+available event type is 'class base + 1'.
+
+An example on how the V4L2 events may be used can be found in the OMAP
+3 ISP driver available at <URL:http://gitorious.org/omap3camera> as of
+writing this.
diff --git a/Documentation/vm/numa_memory_policy.txt b/Documentation/vm/numa_memory_policy.txt
index be45dbb9d7f2..6690fc34ef6d 100644
--- a/Documentation/vm/numa_memory_policy.txt
+++ b/Documentation/vm/numa_memory_policy.txt
@@ -45,7 +45,7 @@ most general to most specific:
to establish the task policy for a child task exec()'d from an
executable image that has no awareness of memory policy. See the
MEMORY POLICY APIS section, below, for an overview of the system call
- that a task may use to set/change it's task/process policy.
+ that a task may use to set/change its task/process policy.
In a multi-threaded task, task policies apply only to the thread
[Linux kernel task] that installs the policy and any threads
@@ -301,7 +301,7 @@ decrement this reference count, respectively. mpol_put() will only free
the structure back to the mempolicy kmem cache when the reference count
goes to zero.
-When a new memory policy is allocated, it's reference count is initialized
+When a new memory policy is allocated, its reference count is initialized
to '1', representing the reference held by the task that is installing the
new policy. When a pointer to a memory policy structure is stored in another
structure, another reference is added, as the task's reference will be dropped
diff --git a/Documentation/w1/w1.generic b/Documentation/w1/w1.generic
index e3333eec4320..212f4ac31c01 100644
--- a/Documentation/w1/w1.generic
+++ b/Documentation/w1/w1.generic
@@ -25,7 +25,7 @@ When a w1 master driver registers with the w1 subsystem, the following occurs:
- sysfs entries for that w1 master are created
- the w1 bus is periodically searched for new slave devices
-When a device is found on the bus, w1 core checks if driver for it's family is
+When a device is found on the bus, w1 core checks if driver for its family is
loaded. If so, the family driver is attached to the slave.
If there is no driver for the family, default one is assigned, which allows to perform
almost any kind of operations. Each logical operation is a transaction
diff --git a/Documentation/watchdog/00-INDEX b/Documentation/watchdog/00-INDEX
index c3ea47e507fe..ee994513a9b1 100644
--- a/Documentation/watchdog/00-INDEX
+++ b/Documentation/watchdog/00-INDEX
@@ -1,10 +1,15 @@
00-INDEX
- this file.
+hpwdt.txt
+ - information on the HP iLO2 NMI watchdog
pcwd-watchdog.txt
- documentation for Berkshire Products PC Watchdog ISA cards.
src/
- directory holding watchdog related example programs.
watchdog-api.txt
- description of the Linux Watchdog driver API.
+watchdog-parameters.txt
+ - information on driver parameters (for drivers other than
+ the ones that have driver-specific files here)
wdt.txt
- description of the Watchdog Timer Interfaces for Linux.
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
new file mode 100644
index 000000000000..41c95cc1dc1f
--- /dev/null
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -0,0 +1,390 @@
+This file provides information on the module parameters of many of
+the Linux watchdog drivers. Watchdog driver parameter specs should
+be listed here unless the driver has its own driver-specific information
+file.
+
+
+See Documentation/kernel-parameters.txt for information on
+providing kernel parameters for builtin drivers versus loadable
+modules.
+
+
+-------------------------------------------------
+acquirewdt:
+wdt_stop: Acquire WDT 'stop' io port (default 0x43)
+wdt_start: Acquire WDT 'start' io port (default 0x443)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+advantechwdt:
+wdt_stop: Advantech WDT 'stop' io port (default 0x443)
+wdt_start: Advantech WDT 'start' io port (default 0x443)
+timeout: Watchdog timeout in seconds. 1<= timeout <=63, default=60.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+alim1535_wdt:
+timeout: Watchdog timeout in seconds. (0 < timeout < 18000, default=60
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+alim7101_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=30
+use_gpio: Use the gpio watchdog (required by old cobalt boards).
+ default=0/off/no
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ar7_wdt:
+margin: Watchdog margin in seconds (default=60)
+nowayout: Disable watchdog shutdown on close
+ (default=kernel config parameter)
+-------------------------------------------------
+at32ap700x_wdt:
+timeout: Timeout value. Limited to be 1 or 2 seconds. (default=2)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+at91rm9200_wdt:
+wdt_time: Watchdog time in seconds. (default=5)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+at91sam9_wdt:
+heartbeat: Watchdog heartbeats in seconds. (default = 15)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+bcm47xx_wdt:
+wdt_time: Watchdog time in seconds. (default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+bfin_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=((2^32)/SCLK), default=20)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+coh901327_wdt:
+margin: Watchdog margin in seconds (default 60s)
+-------------------------------------------------
+cpu5wdt:
+port: base address of watchdog card, default is 0x91
+verbose: be verbose, default is 0 (no)
+ticks: count down ticks, default is 10000
+-------------------------------------------------
+cpwd:
+wd0_timeout: Default watchdog0 timeout in 1/10secs
+wd1_timeout: Default watchdog1 timeout in 1/10secs
+wd2_timeout: Default watchdog2 timeout in 1/10secs
+-------------------------------------------------
+davinci_wdt:
+heartbeat: Watchdog heartbeat period in seconds from 1 to 600, default 60
+-------------------------------------------------
+ep93xx_wdt:
+nowayout: Watchdog cannot be stopped once started
+timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=TBD)
+-------------------------------------------------
+eurotechwdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+io: Eurotech WDT io port (default=0x3f0)
+irq: Eurotech WDT irq (default=10)
+ev: Eurotech WDT event type (default is `int')
+-------------------------------------------------
+gef_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+geodewdt:
+timeout: Watchdog timeout in seconds. 1<= timeout <=131, default=60.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+i6300esb:
+heartbeat: Watchdog heartbeat in seconds. (1<heartbeat<2046, default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+iTCO_wdt:
+heartbeat: Watchdog heartbeat in seconds.
+ (2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+iTCO_vendor_support:
+vendorsupport: iTCO vendor specific support mode, default=0 (none),
+ 1=SuperMicro Pent3, 2=SuperMicro Pent4+, 911=Broken SMI BIOS
+-------------------------------------------------
+ib700wdt:
+timeout: Watchdog timeout in seconds. 0<= timeout <=30, default=30.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ibmasr:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+indydog:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+iop_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+it8712f_wdt:
+margin: Watchdog margin in seconds (default 60)
+nowayout: Disable watchdog shutdown on close
+ (default=kernel config parameter)
+-------------------------------------------------
+it87_wdt:
+nogameport: Forbid the activation of game port, default=0
+exclusive: Watchdog exclusive device open, default=1
+timeout: Watchdog timeout in seconds, default=60
+testmode: Watchdog test mode (1 = no reboot), default=0
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ixp2000_wdt:
+heartbeat: Watchdog heartbeat in seconds (default 60s)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ixp4xx_wdt:
+heartbeat: Watchdog heartbeat in seconds (default 60s)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ks8695_wdt:
+wdt_time: Watchdog time in seconds. (default=5)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+machzwd:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+action: after watchdog resets, generate:
+ 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI
+-------------------------------------------------
+max63xx_wdt:
+heartbeat: Watchdog heartbeat period in seconds from 1 to 60, default 60
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+nodelay: Force selection of a timeout setting without initial delay
+ (max6373/74 only, default=0)
+-------------------------------------------------
+mixcomwd:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+mpc8xxx_wdt:
+timeout: Watchdog timeout in ticks. (0<timeout<65536, default=65535)
+reset: Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+mpcore_wdt:
+mpcore_margin: MPcore timer margin in seconds.
+ (0 < mpcore_margin < 65536, default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+mpcore_noboot: MPcore watchdog action, set to 1 to ignore reboots,
+ 0 to reboot (default=0
+-------------------------------------------------
+mv64x60_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+nuc900_wdt:
+heartbeat: Watchdog heartbeats in seconds.
+ (default = 15)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+omap_wdt:
+timer_margin: initial watchdog timeout (in seconds)
+-------------------------------------------------
+orion_wdt:
+heartbeat: Initial watchdog heartbeat in seconds
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+pc87413_wdt:
+io: pc87413 WDT I/O port (default: io).
+timeout: Watchdog timeout in minutes (default=timeout).
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+pika_wdt:
+heartbeat: Watchdog heartbeats in seconds. (default = 15)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+pnx4008_wdt:
+heartbeat: Watchdog heartbeat period in seconds from 1 to 60, default 19
+nowayout: Set to 1 to keep watchdog running after device release
+-------------------------------------------------
+pnx833x_wdt:
+timeout: Watchdog timeout in Mhz. (68Mhz clock), default=2040000000 (30 seconds)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+start_enabled: Watchdog is started on module insertion (default=1)
+-------------------------------------------------
+rc32434_wdt:
+timeout: Watchdog timeout value, in seconds (default=20)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+riowd:
+riowd_timeout: Watchdog timeout in minutes (default=1)
+-------------------------------------------------
+s3c2410_wdt:
+tmr_margin: Watchdog tmr_margin in seconds. (default=15)
+tmr_atboot: Watchdog is started at boot time if set to 1, default=0
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+soft_noboot: Watchdog action, set to 1 to ignore reboots, 0 to reboot
+debug: Watchdog debug, set to >1 for debug, (default 0)
+-------------------------------------------------
+sa1100_wdt:
+margin: Watchdog margin in seconds (default 60s)
+-------------------------------------------------
+sb_wdog:
+timeout: Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)
+-------------------------------------------------
+sbc60xxwdt:
+wdt_stop: SBC60xx WDT 'stop' io port (default 0x45)
+wdt_start: SBC60xx WDT 'start' io port (default 0x443)
+timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sbc7240_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=255, default=30)
+nowayout: Disable watchdog when closing device file
+-------------------------------------------------
+sbc8360:
+timeout: Index into timeout table (0-63) (default=27 (60s))
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sbc_epx_c3:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sbc_fitpc2_wdt:
+margin: Watchdog margin in seconds (default 60s)
+nowayout: Watchdog cannot be stopped once started
+-------------------------------------------------
+sc1200wdt:
+isapnp: When set to 0 driver ISA PnP support will be disabled (default=1)
+io: io port
+timeout: range is 0-255 minutes, default is 1
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sc520_wdt:
+timeout: Watchdog timeout in seconds. (1 <= timeout <= 3600, default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sch311x_wdt:
+force_id: Override the detected device ID
+therm_trip: Should a ThermTrip trigger the reset generator
+timeout: Watchdog timeout in seconds. 1<= timeout <=15300, default=60
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+scx200_wdt:
+margin: Watchdog margin in seconds
+nowayout: Disable watchdog shutdown on close
+-------------------------------------------------
+shwdt:
+clock_division_ratio: Clock division ratio. Valid ranges are from 0x5 (1.31ms)
+ to 0x7 (5.25ms). (default=7)
+heartbeat: Watchdog heartbeat in seconds. (1 <= heartbeat <= 3600, default=30
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+smsc37b787_wdt:
+timeout: range is 1-255 units, default is 60
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+softdog:
+soft_margin: Watchdog soft_margin in seconds.
+ (0 < soft_margin < 65536, default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+soft_noboot: Softdog action, set to 1 to ignore reboots, 0 to reboot
+ (default=0)
+-------------------------------------------------
+stmp3xxx_wdt:
+heartbeat: Watchdog heartbeat period in seconds from 1 to 4194304, default 19
+-------------------------------------------------
+ts72xx_wdt:
+timeout: Watchdog timeout in seconds. (1 <= timeout <= 8, default=8)
+nowayout: Disable watchdog shutdown on close
+-------------------------------------------------
+twl4030_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+txx9wdt:
+timeout: Watchdog timeout in seconds. (0<timeout<N, default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+w83627hf_wdt:
+wdt_io: w83627hf/thf WDT io port (default 0x2E)
+timeout: Watchdog timeout in seconds. 1 <= timeout <= 255, default=60.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+w83697hf_wdt:
+wdt_io: w83697hf/hg WDT io port (default 0x2e, 0 = autodetect)
+timeout: Watchdog timeout in seconds. 1<= timeout <=255 (default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+early_disable: Watchdog gets disabled at boot time (default=1)
+-------------------------------------------------
+w83697ug_wdt:
+wdt_io: w83697ug/uf WDT io port (default 0x2e)
+timeout: Watchdog timeout in seconds. 1<= timeout <=255 (default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+w83877f_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+w83977f_wdt:
+timeout: Watchdog timeout in seconds (15..7635), default=45)
+testmode: Watchdog testmode (1 = no reboot), default=0
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+wafer5823wdt:
+timeout: Watchdog timeout in seconds. 1 <= timeout <= 255, default=60.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+wdt285:
+soft_margin: Watchdog timeout in seconds (default=60)
+-------------------------------------------------
+wdt977:
+timeout: Watchdog timeout in seconds (60..15300, default=60)
+testmode: Watchdog testmode (1 = no reboot), default=0
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+wm831x_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+wm8350_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
diff --git a/Documentation/watchdog/wdt.txt b/Documentation/watchdog/wdt.txt
index 03fd756d976d..061c2e35384f 100644
--- a/Documentation/watchdog/wdt.txt
+++ b/Documentation/watchdog/wdt.txt
@@ -14,14 +14,22 @@ reboot will depend on the state of the machines and interrupts. The hardware
boards physically pull the machine down off their own onboard timers and
will reboot from almost anything.
-A second temperature monitoring interface is available on the WDT501P cards
+A second temperature monitoring interface is available on the WDT501P cards.
This provides /dev/temperature. This is the machine internal temperature in
degrees Fahrenheit. Each read returns a single byte giving the temperature.
The third interface logs kernel messages on additional alert events.
-The wdt card cannot be safely probed for. Instead you need to pass
-wdt=ioaddr,irq as a boot parameter - eg "wdt=0x240,11".
+The ICS ISA-bus wdt card cannot be safely probed for. Instead you need to
+pass IO address and IRQ boot parameters. E.g.:
+ wdt.io=0x240 wdt.irq=11
+
+Other "wdt" driver parameters are:
+ heartbeat Watchdog heartbeat in seconds (default 60)
+ nowayout Watchdog cannot be stopped once started (kernel
+ build parameter)
+ tachometer WDT501-P Fan Tachometer support (0=disable, default=0)
+ type WDT501-P Card type (500 or 501, default=500)
Features
--------
@@ -40,4 +48,3 @@ Minor numbers are however allocated for it.
Example Watchdog Driver: see Documentation/watchdog/src/watchdog-simple.c
-
diff --git a/MAINTAINERS b/MAINTAINERS
index a4035d7266bf..9a4564961915 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -131,19 +131,12 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/typhoon*
-3W-9XXX SATA-RAID CONTROLLER DRIVER
-M: Adam Radford <linuxraid@amcc.com>
+3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
+M: Adam Radford <linuxraid@lsi.com>
L: linux-scsi@vger.kernel.org
-W: http://www.amcc.com
+W: http://www.lsi.com
S: Supported
-F: drivers/scsi/3w-9xxx*
-
-3W-XXXX ATA-RAID CONTROLLER DRIVER
-M: Adam Radford <linuxraid@amcc.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.amcc.com
-S: Supported
-F: drivers/scsi/3w-xxxx*
+F: drivers/scsi/3w-*
53C700 AND 53C700-66 SCSI DRIVER
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
@@ -586,6 +579,12 @@ F: drivers/mtd/nand/bcm_umi_bch.c
F: drivers/mtd/nand/bcm_umi_hamming.c
F: drivers/mtd/nand/nand_bcm_umi.h
+ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT
+M: Anton Vorontsov <avorontsov@mvista.com>
+S: Maintained
+F: arch/arm/mach-cns3xxx/
+T: git git://git.infradead.org/users/cbou/linux-cns3xxx.git
+
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
M: Hartley Sweeten <hsweeten@visionengravers.com>
M: Ryan Mallon <ryan@bluewatersys.com>
@@ -814,6 +813,7 @@ ARM/QUALCOMM MSM MACHINE SUPPORT
M: David Brown <davidb@codeaurora.org>
M: Daniel Walker <dwalker@codeaurora.org>
M: Bryan Huntsman <bryanh@codeaurora.org>
+L: linux-arm-msm@vger.kernel.org
F: arch/arm/mach-msm/
F: drivers/video/msm/
F: drivers/mmc/host/msm_sdcc.c
@@ -949,8 +949,9 @@ ARM/SHMOBILE ARM ARCHITECTURE
M: Paul Mundt <lethal@linux-sh.org>
M: Magnus Damm <magnus.damm@gmail.com>
L: linux-sh@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/genesis-2.6.git
W: http://oss.renesas.com
+Q: http://patchwork.kernel.org/project/linux-sh/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/genesis-2.6.git
S: Supported
F: arch/arm/mach-shmobile/
F: drivers/sh/
@@ -1501,9 +1502,10 @@ M: Andy Whitcroft <apw@canonical.com>
S: Supported
F: scripts/checkpatch.pl
-CISCO 10G ETHERNET DRIVER
+CISCO VIC ETHERNET NIC DRIVER
M: Scott Feldman <scofeldm@cisco.com>
-M: Joe Eykholt <jeykholt@cisco.com>
+M: Vasanthy Kolluri <vkolluri@cisco.com>
+M: Roopa Prabhu <roprabhu@cisco.com>
S: Supported
F: drivers/net/enic/
@@ -1729,6 +1731,20 @@ W: http://www.openfabrics.org
S: Supported
F: drivers/infiniband/hw/cxgb3/
+CXGB4 ETHERNET DRIVER (CXGB4)
+M: Dimitris Michailidis <dm@chelsio.com>
+L: netdev@vger.kernel.org
+W: http://www.chelsio.com
+S: Supported
+F: drivers/net/cxgb4/
+
+CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
+M: Steve Wise <swise@chelsio.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.openfabrics.org
+S: Supported
+F: drivers/infiniband/hw/cxgb4/
+
CYBERPRO FB DRIVER
M: Russell King <linux@arm.linux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2667,16 +2683,12 @@ F: Documentation/timers/hpet.txt
F: drivers/char/hpet.c
F: include/linux/hpet.h
-HPET: i386
-M: "Venkatesh Pallipadi (Venki)" <venkatesh.pallipadi@intel.com>
+HPET: x86
+M: "Venkatesh Pallipadi (Venki)" <venki@google.com>
S: Maintained
F: arch/x86/kernel/hpet.c
F: arch/x86/include/asm/hpet.h
-HPET: x86_64
-M: Vojtech Pavlik <vojtech@suse.cz>
-S: Maintained
-
HPET: ACPI
M: Bob Picco <bob.picco@hp.com>
S: Maintained
@@ -2953,6 +2965,17 @@ S: Odd Fixes
F: Documentation/networking/README.ipw2200
F: drivers/net/wireless/ipw2x00/ipw2200.*
+INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
+M: Joseph Cihula <joseph.cihula@intel.com>
+M: Shane Wang <shane.wang@intel.com>
+L: tboot-devel@lists.sourceforge.net
+W: http://tboot.sourceforge.net
+T: Mercurial http://www.bughost.org/repos.hg/tboot.hg
+S: Supported
+F: Documentation/intel_txt.txt
+F: include/linux/tboot.h
+F: arch/x86/kernel/tboot.c
+
INTEL WIRELESS WIMAX CONNECTION 2400
M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
M: linux-wimax@intel.com
@@ -2994,6 +3017,12 @@ L: linux-mips@linux-mips.org
S: Maintained
F: drivers/serial/ioc3_serial.c
+IOQ LIBRARY
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/ioq.h
+F: lib/ioq.c
+
IP MASQUERADING
M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
S: Maintained
@@ -3002,10 +3031,9 @@ F: net/ipv4/netfilter/ipt_MASQUERADE.c
IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
M: Sorbica Shieh <sorbica@icplus.com.tw>
-M: Jesse Huang <jesse@icplus.com.tw>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ipg.c
+F: drivers/net/ipg.*
IPATH DRIVER
M: Ralph Campbell <infinipath@qlogic.com>
@@ -3188,7 +3216,7 @@ L: autofs@linux.kernel.org
S: Maintained
F: fs/autofs4/
-KERNEL BUILD
+KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Michal Marek <mmarek@suse.cz>
T: git git://repo.or.cz/linux-kbuild.git for-next
T: git git://repo.or.cz/linux-kbuild.git for-linus
@@ -3197,6 +3225,9 @@ S: Maintained
F: Documentation/kbuild/
F: Makefile
F: scripts/Makefile.*
+F: scripts/basic/
+F: scripts/mk*
+F: scripts/package/
KERNEL JANITORS
L: kernel-janitors@vger.kernel.org
@@ -3284,15 +3315,17 @@ F: include/linux/key-type.h
F: include/keys/
F: security/keys/
-KGDB
+KGDB / KDB /debug_core
M: Jason Wessel <jason.wessel@windriver.com>
+W: http://kgdb.wiki.kernel.org/
L: kgdb-bugreport@lists.sourceforge.net
S: Maintained
F: Documentation/DocBook/kgdb.tmpl
F: drivers/misc/kgdbts.c
F: drivers/serial/kgdboc.c
+F: include/linux/kdb.h
F: include/linux/kgdb.h
-F: kernel/kgdb.c
+F: kernel/debug/
KMEMCHECK
M: Vegard Nossum <vegardno@ifi.uio.no>
@@ -3852,7 +3885,6 @@ M: Ramkrishna Vepa <ram.vepa@neterion.com>
M: Rastapur Santosh <santosh.rastapur@neterion.com>
M: Sivakumar Subramani <sivakumar.subramani@neterion.com>
M: Sreenivasa Honnur <sreenivasa.honnur@neterion.com>
-M: Anil Murthy <anil.murthy@neterion.com>
L: netdev@vger.kernel.org
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@ -3957,6 +3989,7 @@ F: net/rfkill/
F: net/wireless/
F: include/net/ieee80211*
F: include/linux/wireless.h
+F: include/linux/iw_handler.h
F: drivers/net/wireless/
NETWORKING DRIVERS
@@ -4354,13 +4387,13 @@ M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
M: Arnaldo Carvalho de Melo <acme@redhat.com>
S: Supported
-F: kernel/perf_event.c
+F: kernel/perf_event*.c
F: include/linux/perf_event.h
-F: arch/*/kernel/perf_event.c
-F: arch/*/kernel/*/perf_event.c
-F: arch/*/kernel/*/*/perf_event.c
+F: arch/*/kernel/perf_event*.c
+F: arch/*/kernel/*/perf_event*.c
+F: arch/*/kernel/*/*/perf_event*.c
F: arch/*/include/asm/perf_event.h
-F: arch/*/lib/perf_event.c
+F: arch/*/lib/perf_event*.c
F: arch/*/kernel/perf_callchain.c
F: tools/perf/
@@ -4578,6 +4611,14 @@ S: Supported
F: Documentation/scsi/LICENSE.qla2xxx
F: drivers/scsi/qla2xxx/
+QLOGIC QLA4XXX iSCSI DRIVER
+M: Ravi Anand <ravi.anand@qlogic.com>
+M: Vikas Chaudhary <vikas.chaudhary@qlogic.com>
+M: iscsi-driver@qlogic.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/qla4xxx/
+
QLOGIC QLA3XXX NETWORK DRIVER
M: Ron Mercer <ron.mercer@qlogic.com>
M: linux-driver@qlogic.com
@@ -4717,6 +4758,12 @@ S: Maintained
F: Documentation/rfkill.txt
F: net/rfkill/
+RICOH SMARTMEDIA/XD DRIVER
+M: Maxim Levitsky <maximlevitsky@gmail.com>
+S: Maintained
+F: drivers/mtd/nand/r822.c
+F: drivers/mtd/nand/r822.h
+
RISCOM8 DRIVER
S: Orphan
F: Documentation/serial/riscom8.txt
@@ -5038,6 +5085,12 @@ F: drivers/serial/serial_lh7a40x.c
F: drivers/usb/gadget/lh7a40*
F: drivers/usb/host/ohci-lh7a40*
+SHM-SIGNAL LIBRARY
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/shm_signal.h
+F: lib/shm_signal.c
+
SIMPLE FIRMWARE INTERFACE (SFI)
M: Len Brown <lenb@kernel.org>
L: sfi-devel@simplefirmware.org
@@ -5253,6 +5306,46 @@ F: drivers/serial/sunsu.c
F: drivers/serial/sunzilog.c
F: drivers/serial/sunzilog.h
+SPEAR PLATFORM SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/plat-spear/
+
+SPEAR3XX MACHINE SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/mach-spear3xx/
+
+SPEAR6XX MACHINE SUPPORT
+M: Rajeev Kumar <rajeev-dlh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/mach-spear6xx/
+
+SPEAR CLOCK FRAMEWORK SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/mach-spear*/clock.c
+F: arch/arm/mach-spear*/include/mach/clkdev.h
+F: arch/arm/plat-spear/clock.c
+F: arch/arm/plat-spear/include/plat/clock.h and clkdev.h
+
+SPEAR PAD MULTIPLEXING SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/plat-spear/include/plat/padmux.h
+F: arch/arm/plat-spear/padmux.c
+F: arch/arm/mach-spear*/spear*xx.c
+F: arch/arm/mach-spear*/include/mach/generic.h
+F: arch/arm/mach-spear3xx/spear3*0.c
+F: arch/arm/mach-spear3xx/spear3*0_evb.c
+F: arch/arm/mach-spear6xx/spear600.c
+F: arch/arm/mach-spear6xx/spear600_evb.c
+
SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
M: Roger Wolff <R.E.Wolff@BitWizard.nl>
S: Supported
@@ -5881,7 +5974,7 @@ M: Laurent Pinchart <laurent.pinchart@skynet.be>
L: linux-uvc-devel@lists.berlios.de (subscribers-only)
L: linux-media@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
-W: http://linux-uvc.berlios.de
+W: http://www.ideasonboard.org/uvc/
S: Maintained
F: drivers/media/video/uvc/
@@ -5969,6 +6062,19 @@ S: Maintained
F: Documentation/fb/uvesafb.txt
F: drivers/video/uvesafb.*
+VBUS
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/vbus*
+F: drivers/vbus/*
+
+VBUS ETHERNET DRIVER
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+W: http://developer.novell.com/wiki/index.php/AlacrityVM
+F: include/linux/venet.h
+F: drivers/net/vbus-enet.c
+
VFAT/FAT/MSDOS FILESYSTEM
M: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
S: Maintained
diff --git a/Makefile b/Makefile
index 701bc65b3952..89b99f54001d 100644
--- a/Makefile
+++ b/Makefile
@@ -183,11 +183,14 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
# CROSS_COMPILE can be set on the command line
# make CROSS_COMPILE=ia64-linux-
# Alternatively CROSS_COMPILE can be set in the environment.
+# A third alternative is to store a setting in .config so that plain
+# "make" in the configured kernel build directory always uses that.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
export KBUILD_BUILDHOST := $(SUBARCH)
ARCH ?= $(SUBARCH)
CROSS_COMPILE ?=
+CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%)
# Architecture as present in compile.h
UTS_MACHINE := $(ARCH)
@@ -576,9 +579,6 @@ KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
-# revert to pre-gcc-4.4 behaviour of .eh_frame
-KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
-
# conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
@@ -882,9 +882,6 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
PHONY += $(vmlinux-dirs)
$(vmlinux-dirs): prepare scripts
$(Q)$(MAKE) $(build)=$@
-ifdef CONFIG_MODULES
- $(Q)$(MAKE) $(modbuiltin)=$@
-endif
# Build the kernel release string
#
@@ -907,14 +904,19 @@ endif
# $(localver)
# localversion* (files without backups, containing '~')
# $(CONFIG_LOCALVERSION) (from kernel config setting)
-# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set)
-# ./scripts/setlocalversion (SCM tag, if one exists)
-# $(LOCALVERSION) (from make command line if provided)
+# $(LOCALVERSION) (from make command line, if provided)
+# $(localver-extra)
+# $(scm-identifier) (unique SCM tag, if one exists)
+# ./scripts/setlocalversion (only with CONFIG_LOCALVERSION_AUTO)
+# .scmversion (only with CONFIG_LOCALVERSION_AUTO)
+# + (only without CONFIG_LOCALVERSION_AUTO
+# and without LOCALVERSION= and
+# repository is at non-tagged commit)
#
-# Note how the final $(localver-auto) string is included *only* if the
-# kernel config option CONFIG_LOCALVERSION_AUTO is selected. Also, at the
-# moment, only git is supported but other SCMs can edit the script
-# scripts/setlocalversion and add the appropriate checks as needed.
+# For kernels without CONFIG_LOCALVERSION_AUTO compiled from an SCM that has
+# been revised beyond a tagged commit, `+' is appended to the version string
+# when not overridden by using "make LOCALVERSION=". This indicates that the
+# kernel is not a vanilla release version and has been modified.
pattern = ".*/localversion[^~]*"
string = $(shell cat /dev/null \
@@ -923,26 +925,32 @@ string = $(shell cat /dev/null \
localver = $(subst $(space),, $(string) \
$(patsubst "%",%,$(CONFIG_LOCALVERSION)))
-# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called
-# and if the SCM is know a tag from the SCM is appended.
-# The appended tag is determined by the SCM used.
+# scripts/setlocalversion is called to create a unique identifier if the source
+# is managed by a known SCM and the repository has been revised since the last
+# tagged (release) commit. The format of the identifier is determined by the
+# SCM's implementation.
#
# .scmversion is used when generating rpm packages so we do not loose
# the version information from the SCM when we do the build of the kernel
# from the copied source
-ifdef CONFIG_LOCALVERSION_AUTO
-
ifeq ($(wildcard .scmversion),)
- _localver-auto = $(shell $(CONFIG_SHELL) \
+ scm-identifier = $(shell $(CONFIG_SHELL) \
$(srctree)/scripts/setlocalversion $(srctree))
else
- _localver-auto = $(shell cat .scmversion 2> /dev/null)
+ scm-identifier = $(shell cat .scmversion 2> /dev/null)
endif
- localver-auto = $(LOCALVERSION)$(_localver-auto)
+ifdef CONFIG_LOCALVERSION_AUTO
+ localver-extra = $(scm-identifier)
+else
+ ifneq ($(scm-identifier),)
+ ifeq ($(LOCALVERSION),)
+ localver-extra = +
+ endif
+ endif
endif
-localver-full = $(localver)$(localver-auto)
+localver-full = $(localver)$(LOCALVERSION)$(localver-extra)
# Store (new) KERNELRELASE string in include/config/kernel.release
kernelrelease = $(KERNELVERSION)$(localver-full)
@@ -1089,11 +1097,16 @@ all: modules
PHONY += modules
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
- $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.builtin) > $(objtree)/modules.builtin
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild
+modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+ $(Q)$(AWK) '!x[$$0]++' $^ > $(objtree)/modules.builtin
+
+%/modules.builtin: include/config/auto.conf
+ $(Q)$(MAKE) $(modbuiltin)=$*
+
# Target to prepare building external modules
PHONY += modules_prepare
@@ -1104,7 +1117,7 @@ PHONY += modules_install
modules_install: _modinst_ _modinst_post
PHONY += _modinst_
-_modinst_:
+_modinst_: modules.builtin
@if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
echo "Warning: you may need to install module-init-tools"; \
echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
@@ -1247,7 +1260,9 @@ help:
@echo ' firmware_install- Install all firmware to INSTALL_FW_PATH'
@echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
@echo ' dir/ - Build all files in dir and below'
- @echo ' dir/file.[ois] - Build specified target only'
+ @echo ' dir/file.[oisS] - Build specified target only'
+ @echo ' dir/file.lst - Build specified mixed source/assembly target only'
+ @echo ' (requires a recent binutils and recent build (System.map))'
@echo ' dir/file.ko - Build module including final link'
@echo ' modules_prepare - Set up for building external modules'
@echo ' tags/TAGS - Generate tags file for editors'
diff --git a/arch/Kconfig b/arch/Kconfig
index e5eb1337a537..acda512da2e2 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -42,15 +42,10 @@ config KPROBES
If in doubt, say "N".
config OPTPROBES
- bool "Kprobes jump optimization support (EXPERIMENTAL)"
- default y
- depends on KPROBES
+ def_bool y
+ depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT
- depends on HAVE_OPTPROBES
select KALLSYMS_ALL
- help
- This option will allow kprobes to optimize breakpoint to
- a jump for reducing its overhead.
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
@@ -142,6 +137,17 @@ config HAVE_HW_BREAKPOINT
bool
depends on PERF_EVENTS
+config HAVE_MIXED_BREAKPOINTS_REGS
+ bool
+ depends on HAVE_HW_BREAKPOINT
+ help
+ Depending on the arch implementation of hardware breakpoints,
+ some of them have separate registers for data and instruction
+ breakpoints addresses, others have mixed registers to store
+ them but define the access type in a control register.
+ Select this option if your arch implements breakpoints under the
+ latter fashion.
+
config HAVE_USER_RETURN_NOTIFIER
bool
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 75291fdd379f..b7193986cbf9 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -55,6 +55,9 @@ config ARCH_USES_GETTIMEOFFSET
bool
default y
+config GENERIC_CMOS_UPDATE
+ def_bool y
+
config ZONE_DMA
bool
default y
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
index 15f3ae25c511..296da1d5ed57 100644
--- a/arch/alpha/include/asm/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
@@ -405,29 +405,31 @@ static inline int fls(int x)
#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
/* Whee. EV67 can calculate it directly. */
-static inline unsigned long hweight64(unsigned long w)
+static inline unsigned long __arch_hweight64(unsigned long w)
{
return __kernel_ctpop(w);
}
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_weight32(unsigned int w)
{
- return hweight64(w);
+ return __arch_hweight64(w);
}
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
{
- return hweight64(w & 0xffff);
+ return __arch_hweight64(w & 0xffff);
}
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
{
- return hweight64(w & 0xff);
+ return __arch_hweight64(w & 0xff);
}
#else
-#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/arch_hweight.h>
#endif
+#include <asm-generic/bitops/const_hweight.h>
+
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 5d0826654c61..5465e932e568 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -75,8 +75,6 @@ static struct {
__u32 last_time;
/* ticks/cycle * 2^48 */
unsigned long scaled_ticks_per_cycle;
- /* last time the CMOS clock got updated */
- time_t last_rtc_update;
/* partial unused tick */
unsigned long partial_tick;
} state;
@@ -91,6 +89,52 @@ static inline __u32 rpcc(void)
return result;
}
+int update_persistent_clock(struct timespec now)
+{
+ return set_rtc_mmss(now.tv_sec);
+}
+
+void read_persistent_clock(struct timespec *ts)
+{
+ unsigned int year, mon, day, hour, min, sec, epoch;
+
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ sec = bcd2bin(sec);
+ min = bcd2bin(min);
+ hour = bcd2bin(hour);
+ day = bcd2bin(day);
+ mon = bcd2bin(mon);
+ year = bcd2bin(year);
+ }
+
+ /* PC-like is standard; used for year >= 70 */
+ epoch = 1900;
+ if (year < 20)
+ epoch = 2000;
+ else if (year >= 20 && year < 48)
+ /* NT epoch */
+ epoch = 1980;
+ else if (year >= 48 && year < 70)
+ /* Digital UNIX epoch */
+ epoch = 1952;
+
+ printk(KERN_INFO "Using epoch = %d\n", epoch);
+
+ if ((year += epoch) < 1970)
+ year += 100;
+
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+}
+
+
+
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
@@ -123,19 +167,6 @@ irqreturn_t timer_interrupt(int irq, void *dev)
if (nticks)
do_timer(nticks);
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced()
- && xtime.tv_sec > state.last_rtc_update + 660
- && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2
- && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- int tmp = set_rtc_mmss(xtime.tv_sec);
- state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0);
- }
-
write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
@@ -304,7 +335,7 @@ rpcc_after_update_in_progress(void)
void __init
time_init(void)
{
- unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch;
+ unsigned int cc1, cc2;
unsigned long cycle_freq, tolerance;
long diff;
@@ -348,43 +379,6 @@ time_init(void)
bogomips yet, but this is close on a 500Mhz box. */
__delay(1000000);
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
-
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- sec = bcd2bin(sec);
- min = bcd2bin(min);
- hour = bcd2bin(hour);
- day = bcd2bin(day);
- mon = bcd2bin(mon);
- year = bcd2bin(year);
- }
-
- /* PC-like is standard; used for year >= 70 */
- epoch = 1900;
- if (year < 20)
- epoch = 2000;
- else if (year >= 20 && year < 48)
- /* NT epoch */
- epoch = 1980;
- else if (year >= 48 && year < 70)
- /* Digital UNIX epoch */
- epoch = 1952;
-
- printk(KERN_INFO "Using epoch = %d\n", epoch);
-
- if ((year += epoch) < 1970)
- year += 100;
-
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = 0;
-
- wall_to_monotonic.tv_sec -= xtime.tv_sec;
- wall_to_monotonic.tv_nsec = 0;
if (HZ > (1<<16)) {
extern void __you_loose (void);
@@ -394,7 +388,6 @@ time_init(void)
state.last_time = cc1;
state.scaled_ticks_per_cycle
= ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
- state.last_rtc_update = 0;
state.partial_tick = 0L;
/* Startup the timer source. */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 92622eb5cc0d..cb8b58c21397 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -13,7 +13,7 @@ config ARM
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (!CPU_32v6K)
- select HAVE_OPROFILE
+ select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL)
select HAVE_KRETPROBES if (HAVE_KPROBES)
@@ -21,6 +21,7 @@ config ARM
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_LZMA
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
help
@@ -42,6 +43,11 @@ config GENERIC_GPIO
config GENERIC_TIME
bool
+ default y
+
+config ARCH_USES_GETTIMEOFFSET
+ bool
+ default n
config GENERIC_CLOCKEVENTS
bool
@@ -175,28 +181,6 @@ config ARM_L1_CACHE_SHIFT_6
help
Setting ARM L1 cache line size to 64 Bytes.
-if OPROFILE
-
-config OPROFILE_ARMV6
- def_bool y
- depends on CPU_V6 && !SMP
- select OPROFILE_ARM11_CORE
-
-config OPROFILE_MPCORE
- def_bool y
- depends on CPU_V6 && SMP
- select OPROFILE_ARM11_CORE
-
-config OPROFILE_ARM11_CORE
- bool
-
-config OPROFILE_ARMV7
- def_bool y
- depends on CPU_V7 && !SMP
- bool
-
-endif
-
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -231,6 +215,7 @@ config ARCH_AAEC2000
select CPU_ARM920T
select ARM_AMBA
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Agilent AAEC-2000
@@ -238,21 +223,24 @@ config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
select ARCH_HAS_CPUFREQ
- select HAVE_CLK
select COMMON_CLKDEV
- select ICST525
+ select ICST
+ select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
+ select PLAT_VERSATILE
help
Support for ARM's Integrator platform.
config ARCH_REALVIEW
bool "ARM Ltd. RealView family"
select ARM_AMBA
- select HAVE_CLK
select COMMON_CLKDEV
- select ICST307
+ select ICST
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select PLAT_VERSATILE
+ select ARM_TIMER_SP804
select GPIO_PL061 if GPIOLIB
help
This enables support for ARM Ltd RealView boards.
@@ -261,20 +249,36 @@ config ARCH_VERSATILE
bool "ARM Ltd. Versatile family"
select ARM_AMBA
select ARM_VIC
- select HAVE_CLK
select COMMON_CLKDEV
- select ICST307
+ select ICST
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select PLAT_VERSATILE
+ select ARM_TIMER_SP804
help
This enables support for ARM Ltd Versatile board.
+config ARCH_VEXPRESS
+ bool "ARM Ltd. Versatile Express family"
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARM_AMBA
+ select ARM_TIMER_SP804
+ select COMMON_CLKDEV
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_TIME
+ select HAVE_CLK
+ select ICST
+ select PLAT_VERSATILE
+ help
+ This enables support for the ARM Ltd Versatile Express boards.
+
config ARCH_AT91
bool "Atmel AT91"
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -294,14 +298,25 @@ config ARCH_BCMRING
config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x-based"
select CPU_ARM720T
+ select ARCH_USES_GETTIMEOFFSET
help
Support for Cirrus Logic 711x/721x based boards.
+config ARCH_CNS3XXX
+ bool "Cavium Networks CNS3XXX family"
+ select CPU_V6
+ select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
+ select ARM_GIC
+ help
+ Support for Cavium Networks CNS3XXX platform.
+
config ARCH_GEMINI
bool "Cortina Systems Gemini"
select CPU_FA526
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_USES_GETTIMEOFFSET
help
Support for the Cortina Systems Gemini family SoCs
@@ -310,6 +325,7 @@ config ARCH_EBSA110
select CPU_SA110
select ISA
select NO_IOPORT
+ select ARCH_USES_GETTIMEOFFSET
help
This is an evaluation board for the StrongARM processor available
from Digital. It has limited hardware on-board, including an
@@ -322,10 +338,10 @@ config ARCH_EP93XX
select ARM_AMBA
select ARM_VIC
select GENERIC_GPIO
- select HAVE_CLK
select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for the Cirrus EP93xx series of CPUs.
@@ -333,6 +349,7 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
+ select ARCH_USES_GETTIMEOFFSET
help
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@@ -342,7 +359,6 @@ config ARCH_MXC
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
- select HAVE_CLK
select COMMON_CLKDEV
help
Support for Freescale MXC/iMX-based family of processors
@@ -350,7 +366,6 @@ config ARCH_MXC
config ARCH_STMP3XXX
bool "Freescale STMP3xxx"
select CPU_ARM926T
- select HAVE_CLK
select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB
select GENERIC_TIME
@@ -373,6 +388,7 @@ config ARCH_H720X
bool "Hynix HMS720x-based"
select CPU_ARM720T
select ISA_DMA_API
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Hynix HMS720x
@@ -415,6 +431,7 @@ config ARCH_IXP23XX
depends on MMU
select CPU_XSC3
select PCI
+ select ARCH_USES_GETTIMEOFFSET
help
Support for Intel's IXP23xx (XScale) family of processors.
@@ -423,6 +440,7 @@ config ARCH_IXP2000
depends on MMU
select CPU_XSCALE
select PCI
+ select ARCH_USES_GETTIMEOFFSET
help
Support for Intel's IXP2400/2800 (XScale) family of processors.
@@ -441,6 +459,7 @@ config ARCH_L7200
bool "LinkUp-L7200"
select CPU_ARM720T
select FIQ
+ select ARCH_USES_GETTIMEOFFSET
help
Say Y here if you intend to run this kernel on a LinkUp Systems
L7200 Software Development Board which uses an ARM720T processor.
@@ -517,7 +536,6 @@ config ARCH_MMP
depends on MMU
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select HAVE_CLK
select COMMON_CLKDEV
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
@@ -531,6 +549,7 @@ config ARCH_KS8695
select CPU_ARM922T
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_USES_GETTIMEOFFSET
help
Support for Micrel/Kendin KS8695 "Centaur" (ARM922T) based
System-on-Chip devices.
@@ -553,7 +572,6 @@ config ARCH_W90X900
select CPU_ARM926T
select ARCH_REQUIRE_GPIOLIB
select GENERIC_GPIO
- select HAVE_CLK
select COMMON_CLKDEV
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
@@ -569,7 +587,6 @@ config ARCH_W90X900
config ARCH_NUC93X
bool "Nuvoton NUC93X CPU"
select CPU_ARM926T
- select HAVE_CLK
select COMMON_CLKDEV
help
Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a
@@ -578,8 +595,8 @@ config ARCH_NUC93X
config ARCH_PNX4008
bool "Philips Nexperia PNX4008 Mobile"
select CPU_ARM926T
- select HAVE_CLK
select COMMON_CLKDEV
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for Philips PNX4008 mobile platform.
@@ -589,7 +606,6 @@ config ARCH_PXA
select ARCH_MTD_XIP
select ARCH_HAS_CPUFREQ
select GENERIC_GPIO
- select HAVE_CLK
select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB
select GENERIC_TIME
@@ -601,14 +617,15 @@ config ARCH_PXA
config ARCH_MSM
bool "Qualcomm MSM"
- select CPU_V6
+ select HAVE_CLK
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
help
- Support for Qualcomm MSM7K based systems. This runs on the ARM11
- apps processor of the MSM7K and depends on a shared memory
- interface to the ARM9 modem processor which runs the baseband stack
- and controls some vital subsystems (clock and power control, etc).
+ Support for Qualcomm MSM/QSD based systems. This runs on the
+ apps processor of the MSM/QSD and depends on a shared memory
+ interface to the modem processor which runs the baseband
+ stack and controls some vital subsystems
+ (clock and power control, etc).
config ARCH_SHMOBILE
bool "Renesas SH-Mobile"
@@ -625,6 +642,7 @@ config ARCH_RPC
select ISA_DMA_API
select NO_IOPORT
select ARCH_SPARSEMEM_ENABLE
+ select ARCH_USES_GETTIMEOFFSET
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -647,15 +665,20 @@ config ARCH_SA1100
Support for StrongARM 11x0 based boards.
config ARCH_S3C2410
- bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
+ bool "Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443, S3C2450"
select GENERIC_GPIO
select ARCH_HAS_CPUFREQ
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
Samsung S3C2410X CPU based systems, such as the Simtec Electronics
BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
the Samsung SMDK2410 development board (and derivatives).
+ Note, the S3C2416 and the S3C2450 are so close that they even share
+ the same SoC ID code. This means that there is no seperate machine
+ directory (no arch/arm/mach-s3c2450) as the S3C2416 was first.
+
config ARCH_S3C64XX
bool "Samsung S3C64XX"
select PLAT_SAMSUNG
@@ -664,6 +687,7 @@ config ARCH_S3C64XX
select ARM_VIC
select HAVE_CLK
select NO_IOPORT
+ select ARCH_USES_GETTIMEOFFSET
select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
select SAMSUNG_CLKSRC
@@ -720,6 +744,7 @@ config ARCH_SHARK
select ISA_DMA
select ZONE_DMA
select PCI
+ select ARCH_USES_GETTIMEOFFSET
help
Support for the StrongARM based Digital DNARD machine, also known
as "Shark" (<http://www.shark-linux.de/shark.html>).
@@ -729,6 +754,7 @@ config ARCH_LH7A40X
select CPU_ARM922T
select ARCH_DISCONTIGMEM_ENABLE if !LH7A40X_CONTIGMEM
select ARCH_SPARSEMEM_ENABLE if !LH7A40X_CONTIGMEM
+ select ARCH_USES_GETTIMEOFFSET
help
Say Y here for systems based on one of the Sharp LH7A40X
System on a Chip processors. These CPUs include an ARM922T
@@ -744,7 +770,6 @@ config ARCH_U300
select ARM_VIC
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
- select HAVE_CLK
select COMMON_CLKDEV
select GENERIC_GPIO
help
@@ -757,6 +782,7 @@ config ARCH_U8500
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select COMMON_CLKDEV
+ select ARCH_REQUIRE_GPIOLIB
help
Support for ST-Ericsson's Ux500 architecture
@@ -765,7 +791,6 @@ config ARCH_NOMADIK
select ARM_AMBA
select ARM_VIC
select CPU_ARM926T
- select HAVE_CLK
select COMMON_CLKDEV
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
@@ -776,12 +801,10 @@ config ARCH_NOMADIK
config ARCH_DAVINCI
bool "TI DaVinci"
- select CPU_ARM926T
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select HAVE_CLK
select ZONE_DMA
select HAVE_IDE
select COMMON_CLKDEV
@@ -802,6 +825,18 @@ config ARCH_OMAP
help
Support for TI's OMAP platform (OMAP1 and OMAP2).
+config PLAT_SPEAR
+ bool "ST SPEAr"
+ select ARM_AMBA
+ select ARCH_REQUIRE_GPIOLIB
+ select COMMON_CLKDEV
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_GPIO
+ select GENERIC_TIME
+ select HAVE_CLK
+ help
+ Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx).
+
endchoice
#
@@ -817,6 +852,8 @@ source "arch/arm/mach-bcmring/Kconfig"
source "arch/arm/mach-clps711x/Kconfig"
+source "arch/arm/mach-cns3xxx/Kconfig"
+
source "arch/arm/mach-davinci/Kconfig"
source "arch/arm/mach-dove/Kconfig"
@@ -887,11 +924,13 @@ source "arch/arm/plat-samsung/Kconfig"
source "arch/arm/plat-s3c24xx/Kconfig"
source "arch/arm/plat-s5p/Kconfig"
source "arch/arm/plat-s5pc1xx/Kconfig"
+source "arch/arm/plat-spear/Kconfig"
if ARCH_S3C2410
source "arch/arm/mach-s3c2400/Kconfig"
source "arch/arm/mach-s3c2410/Kconfig"
source "arch/arm/mach-s3c2412/Kconfig"
+source "arch/arm/mach-s3c2416/Kconfig"
source "arch/arm/mach-s3c2440/Kconfig"
source "arch/arm/mach-s3c2443/Kconfig"
endif
@@ -920,6 +959,8 @@ source "arch/arm/mach-ux500/Kconfig"
source "arch/arm/mach-versatile/Kconfig"
+source "arch/arm/mach-vexpress/Kconfig"
+
source "arch/arm/mach-w90x900/Kconfig"
# Definitions to make life easier
@@ -937,6 +978,12 @@ config PLAT_ORION
config PLAT_PXA
bool
+config PLAT_VERSATILE
+ bool
+
+config ARM_TIMER_SP804
+ bool
+
source arch/arm/mm/Kconfig
config IWMMXT
@@ -1065,6 +1112,10 @@ config PCI
your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
VESA. If you have PCI, say Y, otherwise N.
+config PCI_DOMAINS
+ bool
+ depends on PCI
+
config PCI_SYSCALL
def_bool PCI
@@ -1093,10 +1144,11 @@ source "kernel/time/Kconfig"
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP ||\
- MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || ARCH_U8500)
+ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\
+ ARCH_U8500 || ARCH_VEXPRESS_CA9X4)
depends on GENERIC_CLOCKEVENTS
select USE_GENERIC_SMP_HELPERS
- select HAVE_ARM_SCU if (ARCH_REALVIEW || ARCH_OMAP4 || ARCH_U8500)
+ select HAVE_ARM_SCU if (ARCH_REALVIEW || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4)
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
@@ -1277,7 +1329,7 @@ config HIGHPTE
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && CPU_HAS_PMU && (CPU_V6 || CPU_V7)
+ depends on PERF_EVENTS && CPU_HAS_PMU
default y
help
Enable hardware performance counter support for perf events. If
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index ed820e737a8a..d5af3b024300 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -121,6 +121,7 @@ machine-$(CONFIG_ARCH_AAEC2000) := aaec2000
machine-$(CONFIG_ARCH_AT91) := at91
machine-$(CONFIG_ARCH_BCMRING) := bcmring
machine-$(CONFIG_ARCH_CLPS711X) := clps711x
+machine-$(CONFIG_ARCH_CNS3XXX) := cns3xxx
machine-$(CONFIG_ARCH_DAVINCI) := davinci
machine-$(CONFIG_ARCH_DOVE) := dove
machine-$(CONFIG_ARCH_EBSA110) := ebsa110
@@ -160,7 +161,7 @@ machine-$(CONFIG_ARCH_PNX4008) := pnx4008
machine-$(CONFIG_ARCH_PXA) := pxa
machine-$(CONFIG_ARCH_REALVIEW) := realview
machine-$(CONFIG_ARCH_RPC) := rpc
-machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2440 s3c2443
+machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2416 s3c2440 s3c2443
machine-$(CONFIG_ARCH_S3C24A0) := s3c24a0
machine-$(CONFIG_ARCH_S3C64XX) := s3c64xx
machine-$(CONFIG_ARCH_S5P6440) := s5p6440
@@ -175,9 +176,14 @@ machine-$(CONFIG_ARCH_STMP37XX) := stmp37xx
machine-$(CONFIG_ARCH_U300) := u300
machine-$(CONFIG_ARCH_U8500) := ux500
machine-$(CONFIG_ARCH_VERSATILE) := versatile
+machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_ARCH_NUC93X) := nuc93x
machine-$(CONFIG_FOOTBRIDGE) := footbridge
+machine-$(CONFIG_MACH_SPEAR300) := spear3xx
+machine-$(CONFIG_MACH_SPEAR310) := spear3xx
+machine-$(CONFIG_MACH_SPEAR320) := spear3xx
+machine-$(CONFIG_MACH_SPEAR600) := spear6xx
# Platform directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
@@ -192,6 +198,8 @@ plat-$(CONFIG_PLAT_PXA) := pxa
plat-$(CONFIG_PLAT_S3C24XX) := s3c24xx samsung
plat-$(CONFIG_PLAT_S5PC1XX) := s5pc1xx samsung
plat-$(CONFIG_PLAT_S5P) := s5p samsung
+plat-$(CONFIG_PLAT_SPEAR) := spear
+plat-$(CONFIG_PLAT_VERSATILE) := versatile
ifeq ($(CONFIG_ARCH_EBSA110),y)
# This is what happens if you forget the IOCS16 line.
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 97c89e7de7d3..53faa9063a03 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -65,6 +65,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
suffix_$(CONFIG_KERNEL_GZIP) = gzip
suffix_$(CONFIG_KERNEL_LZO) = lzo
+suffix_$(CONFIG_KERNEL_LZMA) = lzma
targets := vmlinux vmlinux.lds \
piggy.$(suffix_y) piggy.$(suffix_y).o \
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index 9c097073ce4c..4c72a97bc3e1 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -40,6 +40,10 @@ extern void error(char *);
#include "../../../../lib/decompress_unlzo.c"
#endif
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
{
decompress(input, len, NULL, NULL, output, NULL, error);
diff --git a/arch/arm/boot/compressed/piggy.lzma.S b/arch/arm/boot/compressed/piggy.lzma.S
new file mode 100644
index 000000000000..d7e69cffbc0a
--- /dev/null
+++ b/arch/arm/boot/compressed/piggy.lzma.S
@@ -0,0 +1,6 @@
+ .section .piggydata,#alloc
+ .globl input_data
+input_data:
+ .incbin "arch/arm/boot/compressed/piggy.lzma"
+ .globl input_data_end
+input_data_end:
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 4efbb9df0444..eb99891297cc 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -12,10 +12,7 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
-config ICST525
- bool
-
-config ICST307
+config ICST
bool
config SA1111
@@ -40,3 +37,4 @@ config SHARP_SCOOP
config COMMON_CLKDEV
bool
+ select HAVE_CLK
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 76be7ff2a7ca..5e8ad0d6c917 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -4,8 +4,7 @@
obj-$(CONFIG_ARM_GIC) += gic.o
obj-$(CONFIG_ARM_VIC) += vic.o
-obj-$(CONFIG_ICST525) += icst525.o
-obj-$(CONFIG_ICST307) += icst307.o
+obj-$(CONFIG_ICST) += icst.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
diff --git a/arch/arm/common/clkdev.c b/arch/arm/common/clkdev.c
index dba4c1da63ed..e2b2bb66e094 100644
--- a/arch/arm/common/clkdev.c
+++ b/arch/arm/common/clkdev.c
@@ -53,12 +53,13 @@ static struct clk *clk_find(const char *dev_id, const char *con_id)
continue;
match += 1;
}
- if (match == 0)
- continue;
if (match > best) {
clk = p->clk;
- best = match;
+ if (match != 3)
+ best = match;
+ else
+ break;
}
}
return clk;
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
new file mode 100644
index 000000000000..9a7f09cff300
--- /dev/null
+++ b/arch/arm/common/icst.c
@@ -0,0 +1,100 @@
+/*
+ * linux/arch/arm/common/icst307.c
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support functions for calculating clocks/divisors for the ICST307
+ * clock generators. See http://www.icst.com/ for more information
+ * on these devices.
+ *
+ * This is an almost identical implementation to the ICST525 clock generator.
+ * The s2div and idx2s files are different
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <asm/hardware/icst.h>
+
+/*
+ * Divisors for each OD setting.
+ */
+const unsigned char icst307_s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
+const unsigned char icst525_s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
+EXPORT_SYMBOL(icst307_s2div);
+EXPORT_SYMBOL(icst525_s2div);
+
+unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
+{
+ return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+}
+
+EXPORT_SYMBOL(icst_hz);
+
+/*
+ * Ascending divisor S values.
+ */
+const unsigned char icst307_idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
+const unsigned char icst525_idx2s[8] = { 1, 3, 4, 7, 5, 2, 6, 0 };
+EXPORT_SYMBOL(icst307_idx2s);
+EXPORT_SYMBOL(icst525_idx2s);
+
+struct icst_vco
+icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
+{
+ struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
+ unsigned long f;
+ unsigned int i = 0, rd, best = (unsigned int)-1;
+
+ /*
+ * First, find the PLL output divisor such
+ * that the PLL output is within spec.
+ */
+ do {
+ f = freq * p->s2div[p->idx2s[i]];
+
+ if (f > p->vco_min && f <= p->vco_max)
+ break;
+ } while (i < 8);
+
+ if (i >= 8)
+ return vco;
+
+ vco.s = p->idx2s[i];
+
+ /*
+ * Now find the closest divisor combination
+ * which gives a PLL output of 'f'.
+ */
+ for (rd = p->rd_min; rd <= p->rd_max; rd++) {
+ unsigned long fref_div, f_pll;
+ unsigned int vd;
+ int f_diff;
+
+ fref_div = (2 * p->ref) / rd;
+
+ vd = (f + fref_div / 2) / fref_div;
+ if (vd < p->vd_min || vd > p->vd_max)
+ continue;
+
+ f_pll = fref_div * vd;
+ f_diff = f_pll - f;
+ if (f_diff < 0)
+ f_diff = -f_diff;
+
+ if ((unsigned)f_diff < best) {
+ vco.v = vd - 8;
+ vco.r = rd - 2;
+ if (f_diff == 0)
+ break;
+ best = f_diff;
+ }
+ }
+
+ return vco;
+}
+
+EXPORT_SYMBOL(icst_hz_to_vco);
diff --git a/arch/arm/common/icst307.c b/arch/arm/common/icst307.c
deleted file mode 100644
index 6d094c157540..000000000000
--- a/arch/arm/common/icst307.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * linux/arch/arm/common/icst307.c
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST307
- * clock generators. See http://www.icst.com/ for more information
- * on these devices.
- *
- * This is an almost identical implementation to the ICST525 clock generator.
- * The s2div and idx2s files are different
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-#include <asm/hardware/icst307.h>
-
-/*
- * Divisors for each OD setting.
- */
-static unsigned char s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
-
-unsigned long icst307_khz(const struct icst307_params *p, struct icst307_vco vco)
-{
- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * s2div[vco.s]);
-}
-
-EXPORT_SYMBOL(icst307_khz);
-
-/*
- * Ascending divisor S values.
- */
-static unsigned char idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
-
-struct icst307_vco
-icst307_khz_to_vco(const struct icst307_params *p, unsigned long freq)
-{
- struct icst307_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = freq * s2div[idx2s[i]];
-
- /*
- * f must be between 6MHz and 200MHz (3.3 or 5V)
- */
- if (f > 6000 && f <= p->vco_max)
- break;
- } while (i < ARRAY_SIZE(idx2s));
-
- if (i >= ARRAY_SIZE(idx2s))
- return vco;
-
- vco.s = idx2s[i];
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long fref_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- fref_div = (2 * p->ref) / rd;
-
- vd = (f + fref_div / 2) / fref_div;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = fref_div * vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst307_khz_to_vco);
-
-struct icst307_vco
-icst307_ps_to_vco(const struct icst307_params *p, unsigned long period)
-{
- struct icst307_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f, ps;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- ps = 1000000000UL / p->vco_max;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = period / s2div[idx2s[i]];
-
- /*
- * f must be between 6MHz and 200MHz (3.3 or 5V)
- */
- if (f >= ps && f < 1000000000UL / 6000 + 1)
- break;
- } while (i < ARRAY_SIZE(idx2s));
-
- if (i >= ARRAY_SIZE(idx2s))
- return vco;
-
- vco.s = idx2s[i];
-
- ps = 500000000UL / p->ref;
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long f_in_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- f_in_div = ps * rd;
-
- vd = (f_in_div + f / 2) / f;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = (f_in_div + vd / 2) / vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst307_ps_to_vco);
diff --git a/arch/arm/common/icst525.c b/arch/arm/common/icst525.c
deleted file mode 100644
index 3d377c5bdef6..000000000000
--- a/arch/arm/common/icst525.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * linux/arch/arm/common/icst525.c
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST525
- * clock generators. See http://www.icst.com/ for more information
- * on these devices.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-#include <asm/hardware/icst525.h>
-
-/*
- * Divisors for each OD setting.
- */
-static unsigned char s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
-
-unsigned long icst525_khz(const struct icst525_params *p, struct icst525_vco vco)
-{
- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * s2div[vco.s]);
-}
-
-EXPORT_SYMBOL(icst525_khz);
-
-/*
- * Ascending divisor S values.
- */
-static unsigned char idx2s[] = { 1, 3, 4, 7, 5, 2, 6, 0 };
-
-struct icst525_vco
-icst525_khz_to_vco(const struct icst525_params *p, unsigned long freq)
-{
- struct icst525_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = freq * s2div[idx2s[i]];
-
- /*
- * f must be between 10MHz and
- * 320MHz (5V) or 200MHz (3V)
- */
- if (f > 10000 && f <= p->vco_max)
- break;
- } while (i < ARRAY_SIZE(idx2s));
-
- if (i >= ARRAY_SIZE(idx2s))
- return vco;
-
- vco.s = idx2s[i];
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long fref_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- fref_div = (2 * p->ref) / rd;
-
- vd = (f + fref_div / 2) / fref_div;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = fref_div * vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst525_khz_to_vco);
-
-struct icst525_vco
-icst525_ps_to_vco(const struct icst525_params *p, unsigned long period)
-{
- struct icst525_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f, ps;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- ps = 1000000000UL / p->vco_max;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = period / s2div[idx2s[i]];
-
- /*
- * f must be between 10MHz and
- * 320MHz (5V) or 200MHz (3V)
- */
- if (f >= ps && f < 100000)
- break;
- } while (i < ARRAY_SIZE(idx2s));
-
- if (i >= ARRAY_SIZE(idx2s))
- return vco;
-
- vco.s = idx2s[i];
-
- ps = 500000000UL / p->ref;
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long f_in_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- f_in_div = ps * rd;
-
- vd = (f_in_div + f / 2) / f;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = (f_in_div + vd / 2) / vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst525_ps_to_vco);
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 1cf999ade4bc..ba65f6eedca6 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -266,13 +266,53 @@ static int vic_set_wake(unsigned int irq, unsigned int on)
#endif /* CONFIG_PM */
static struct irq_chip vic_chip = {
- .name = "VIC",
- .ack = vic_ack_irq,
- .mask = vic_mask_irq,
- .unmask = vic_unmask_irq,
- .set_wake = vic_set_wake,
+ .name = "VIC",
+ .ack = vic_ack_irq,
+ .mask = vic_mask_irq,
+ .unmask = vic_unmask_irq,
+ .set_wake = vic_set_wake,
};
+static void __init vic_disable(void __iomem *base)
+{
+ writel(0, base + VIC_INT_SELECT);
+ writel(0, base + VIC_INT_ENABLE);
+ writel(~0, base + VIC_INT_ENABLE_CLEAR);
+ writel(0, base + VIC_IRQ_STATUS);
+ writel(0, base + VIC_ITCR);
+ writel(~0, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void __init vic_clear_interrupts(void __iomem *base)
+{
+ unsigned int i;
+
+ writel(0, base + VIC_PL190_VECT_ADDR);
+ for (i = 0; i < 19; i++) {
+ unsigned int value;
+
+ value = readl(base + VIC_PL190_VECT_ADDR);
+ writel(value, base + VIC_PL190_VECT_ADDR);
+ }
+}
+
+static void __init vic_set_irq_sources(void __iomem *base,
+ unsigned int irq_start, u32 vic_sources)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; i++) {
+ if (vic_sources & (1 << i)) {
+ unsigned int irq = irq_start + i;
+
+ set_irq_chip(irq, &vic_chip);
+ set_irq_chip_data(irq, base);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ }
+}
+
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
@@ -287,13 +327,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
/* Disable all interrupts initially. */
-
- writel(0, base + VIC_INT_SELECT);
- writel(0, base + VIC_INT_ENABLE);
- writel(~0, base + VIC_INT_ENABLE_CLEAR);
- writel(0, base + VIC_IRQ_STATUS);
- writel(0, base + VIC_ITCR);
- writel(~0, base + VIC_INT_SOFT_CLEAR);
+ vic_disable(base);
/*
* Make sure we clear all existing interrupts. The vector registers
@@ -302,13 +336,8 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
* the second base address, which is 0x20 in the page
*/
if (vic_2nd_block) {
- writel(0, base + VIC_PL190_VECT_ADDR);
- for (i = 0; i < 19; i++) {
- unsigned int value;
+ vic_clear_interrupts(base);
- value = readl(base + VIC_PL190_VECT_ADDR);
- writel(value, base + VIC_PL190_VECT_ADDR);
- }
/* ST has 16 vectors as well, but we don't enable them by now */
for (i = 0; i < 16; i++) {
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
@@ -318,16 +347,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
- for (i = 0; i < 32; i++) {
- if (vic_sources & (1 << i)) {
- unsigned int irq = irq_start + i;
-
- set_irq_chip(irq, &vic_chip);
- set_irq_chip_data(irq, base);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
- }
+ vic_set_irq_sources(base, irq_start, vic_sources);
}
/**
@@ -365,37 +385,14 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
}
/* Disable all interrupts initially. */
+ vic_disable(base);
- writel(0, base + VIC_INT_SELECT);
- writel(0, base + VIC_INT_ENABLE);
- writel(~0, base + VIC_INT_ENABLE_CLEAR);
- writel(0, base + VIC_IRQ_STATUS);
- writel(0, base + VIC_ITCR);
- writel(~0, base + VIC_INT_SOFT_CLEAR);
-
- /*
- * Make sure we clear all existing interrupts
- */
- writel(0, base + VIC_PL190_VECT_ADDR);
- for (i = 0; i < 19; i++) {
- unsigned int value;
-
- value = readl(base + VIC_PL190_VECT_ADDR);
- writel(value, base + VIC_PL190_VECT_ADDR);
- }
+ /* Make sure we clear all existing interrupts */
+ vic_clear_interrupts(base);
vic_init2(base);
- for (i = 0; i < 32; i++) {
- if (vic_sources & (1 << i)) {
- unsigned int irq = irq_start + i;
-
- set_irq_chip(irq, &vic_chip);
- set_irq_chip_data(irq, base);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
- }
+ vic_set_irq_sources(base, irq_start, vic_sources);
vic_pm_register(base, irq_start, resume_sources);
}
diff --git a/arch/arm/configs/am3517_evm_defconfig b/arch/arm/configs/am3517_evm_defconfig
index 66a10b50d938..e4f4fb522bac 100644
--- a/arch/arm/configs/am3517_evm_defconfig
+++ b/arch/arm/configs/am3517_evm_defconfig
@@ -422,15 +422,29 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
+CONFIG_CAN=y
+CONFIG_CAN_RAW=y
+CONFIG_CAN_BCM=y
+
+#
+# CAN Device Drivers
+#
+CONFIG_CAN_VCAN=y
+CONFIG_CAN_DEV=y
+CONFIG_CAN_CALC_BITTIMING=y
+CONFIG_CAN_TI_HECC=y
+# CONFIG_CAN_SJA1000 is not set
+
+#
+# CAN USB interfaces
+#
+# CONFIG_CAN_EMS_USB is not set
+CONFIG_CAN_DEBUG_DEVICES=y
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
-CONFIG_CFG80211_DEFAULT_PS_VALUE=0
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
-# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
#
@@ -517,7 +531,75 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_OSD_INITIATOR is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
-# CONFIG_NETDEVICES is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+CONFIG_TI_DAVINCI_EMAC=y
+# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -692,7 +774,57 @@ CONFIG_SSB_POSSIBLE=y
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+CONFIG_PANEL_SHARP_LQ043T1DG01=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
diff --git a/arch/arm/configs/ams_delta_defconfig b/arch/arm/configs/ams_delta_defconfig
index 3b3a3775bbf4..6d8a0c891f80 100644
--- a/arch/arm/configs/ams_delta_defconfig
+++ b/arch/arm/configs/ams_delta_defconfig
@@ -47,6 +47,7 @@ CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_TASKSTATS is not set
# CONFIG_UTS_NS is not set
# CONFIG_AUDIT is not set
+CONFIG_TREE_PREEMPT_RCU=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED=y
@@ -95,9 +96,8 @@ CONFIG_KMOD=y
# Block layer
#
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
#
# IO Schedulers
@@ -699,6 +699,7 @@ CONFIG_SERIO=y
CONFIG_SERIO_SERPORT=y
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
+CONFIG_SERIO_AMS_DELTA=y
# CONFIG_GAMEPORT is not set
#
@@ -835,7 +836,8 @@ CONFIG_DAB=y
#
# Graphics support
#
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
#
# Display device support
@@ -1283,7 +1285,7 @@ CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_LIST is not set
diff --git a/arch/arm/configs/cns3420vb_defconfig b/arch/arm/configs/cns3420vb_defconfig
new file mode 100644
index 000000000000..d5c088149e46
--- /dev/null
+++ b/arch/arm/configs/cns3420vb_defconfig
@@ -0,0 +1,831 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.34-rc6
+# Sun May 2 21:58:08 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_OPROFILE_ARMV6=y
+CONFIG_OPROFILE_ARM11_CORE=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CPUSETS is not set
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_SLOW_WORK_DEBUG is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=m
+# CONFIG_CFQ_GROUP_IOSCHED is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+CONFIG_ARCH_CNS3XXX=y
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+
+#
+# CNS3XXX platform type
+#
+CONFIG_MACH_CNS3420VB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_V6=y
+# CONFIG_CPU_32v6K is not set
+CONFIG_CPU_32v6=y
+CONFIG_CPU_ABRT_EV6=y
+CONFIG_CPU_PABRT_V6=y
+CONFIG_CPU_CACHE_V6=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V6=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_CPU_HAS_PMU=y
+# CONFIG_ARM_ERRATA_411920 is not set
+CONFIG_ARM_GIC=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyS0,38400 mem=128M root=/dev/mmcblk0p1 ro rootwait"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=20000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+# CONFIG_SATA_PMP is not set
+# CONFIG_ATA_SFF is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=y
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+CONFIG_FSCACHE=y
+# CONFIG_FSCACHE_STATS is not set
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+# CONFIG_CACHEFILES is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/arm/configs/devkit8000_defconfig b/arch/arm/configs/devkit8000_defconfig
index 61a817e8cf81..c7a68202fa3f 100644
--- a/arch/arm/configs/devkit8000_defconfig
+++ b/arch/arm/configs/devkit8000_defconfig
@@ -1,13 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc6
-# Thu Feb 4 15:42:56 2010
+# Linux kernel version: 2.6.34-rc2
+# Wed Mar 24 13:27:25 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -19,7 +20,9 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -60,11 +63,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
@@ -96,10 +94,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
@@ -170,7 +172,7 @@ CONFIG_INLINE_WRITE_UNLOCK=y
CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
# CONFIG_MUTEX_SPIN_ON_OWNER is not set
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System Type
@@ -181,6 +183,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
@@ -190,7 +193,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -207,21 +209,26 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
CONFIG_ARCH_OMAP=y
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
#
# TI OMAP Implementations
@@ -237,16 +244,20 @@ CONFIG_ARCH_OMAP3=y
# OMAP Feature Selections
#
# CONFIG_OMAP_RESET_CLOCKS is not set
-# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+CONFIG_OMAP_MUX_WARNINGS=y
CONFIG_OMAP_MCBSP=y
# CONFIG_OMAP_MBOX_FWK is not set
# CONFIG_OMAP_MPU_TIMER is not set
CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
CONFIG_OMAP_32K_TIMER_HZ=128
CONFIG_OMAP_DM_TIMER=y
# CONFIG_OMAP_PM_NONE is not set
CONFIG_OMAP_PM_NOOP=y
CONFIG_ARCH_OMAP3430=y
+CONFIG_OMAP_PACKAGE_CUS=y
#
# OMAP Board Type
@@ -295,6 +306,7 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
@@ -387,7 +399,14 @@ CONFIG_HAVE_AOUT=y
#
# Power management options
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_NET=y
@@ -395,7 +414,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -666,6 +684,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -717,6 +736,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_AX88796 is not set
# CONFIG_SMC91X is not set
+# CONFIG_TI_DAVINCI_EMAC is not set
CONFIG_DM9000=y
CONFIG_DM9000_DEBUGLEVEL=4
CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y
@@ -863,6 +883,7 @@ CONFIG_SERIAL_8250_RSA=y
# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -891,6 +912,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_OMAP=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -904,15 +926,9 @@ CONFIG_I2C_OMAP=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -944,10 +960,12 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
@@ -984,10 +1002,12 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
CONFIG_TWL4030_CORE=y
CONFIG_TWL4030_POWER=y
@@ -998,22 +1018,25 @@ CONFIG_TWL4030_CODEC=y
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_AB4500_CORE is not set
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
# CONFIG_REGULATOR_MAX8660 is not set
CONFIG_REGULATOR_TWL4030=y
# CONFIG_REGULATOR_LP3971 is not set
@@ -1072,7 +1095,6 @@ CONFIG_OMAP2_DSS_VENC=y
CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
CONFIG_FB_OMAP2=y
CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
-# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
CONFIG_FB_OMAP2_NUM_FBS=3
#
@@ -1080,7 +1102,9 @@ CONFIG_FB_OMAP2_NUM_FBS=3
#
CONFIG_PANEL_GENERIC=y
# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
-CONFIG_PANEL_INNOLUX_AT070TN83=y
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -1136,6 +1160,7 @@ CONFIG_SND_ARM=y
CONFIG_SND_SPI=y
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
CONFIG_SND_SOC=y
CONFIG_SND_OMAP_SOC=y
@@ -1147,42 +1172,44 @@ CONFIG_SND_SOC_TWL4030=y
# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-CONFIG_HIDRAW=y
+# CONFIG_HIDRAW is not set
#
# USB Input Devices
#
CONFIG_USB_HID=y
# CONFIG_HID_PID is not set
-CONFIG_USB_HIDDEV=y
+# CONFIG_USB_HIDDEV is not set
#
# Special HID drivers
#
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
# CONFIG_HID_DRAGONRISE is not set
-CONFIG_HID_EZKEY=y
+# CONFIG_HID_EZKEY is not set
# CONFIG_HID_KYE is not set
-CONFIG_HID_GYRATION=y
+# CONFIG_HID_GYRATION is not set
# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
-CONFIG_HID_LOGITECH=y
-# CONFIG_LOGITECH_FF is not set
-# CONFIG_LOGIRUMBLEPAD2_FF is not set
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
# CONFIG_HID_NTRIG is not set
-CONFIG_HID_PANTHERLORD=y
-# CONFIG_PANTHERLORD_FF is not set
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
# CONFIG_HID_TOPSEED is not set
@@ -1193,7 +1220,7 @@ CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=y
-# CONFIG_USB_DEBUG is not set
+CONFIG_USB_DEBUG=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
#
@@ -1202,7 +1229,7 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
# CONFIG_USB_DEVICEFS is not set
# CONFIG_USB_DEVICE_CLASS is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_OTG is not set
+CONFIG_USB_OTG=y
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
CONFIG_USB_MON=y
@@ -1230,15 +1257,15 @@ CONFIG_USB_MUSB_SOC=y
#
# OMAP 343x high speed USB support
#
-CONFIG_USB_MUSB_HOST=y
+# CONFIG_USB_MUSB_HOST is not set
# CONFIG_USB_MUSB_PERIPHERAL is not set
-# CONFIG_USB_MUSB_OTG is not set
-# CONFIG_USB_GADGET_MUSB_HDRC is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
CONFIG_USB_MUSB_HDRC_HCD=y
# CONFIG_MUSB_PIO_ONLY is not set
CONFIG_USB_INVENTRA_DMA=y
# CONFIG_USB_TI_CPPI_DMA is not set
-# CONFIG_USB_MUSB_DEBUG is not set
+CONFIG_USB_MUSB_DEBUG=y
#
# USB Device Class drivers
@@ -1291,7 +1318,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1304,9 +1330,8 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
CONFIG_USB_GADGET=y
-# CONFIG_USB_GADGET_DEBUG is not set
+CONFIG_USB_GADGET_DEBUG=y
# CONFIG_USB_GADGET_DEBUG_FILES is not set
CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
@@ -1314,8 +1339,7 @@ CONFIG_USB_GADGET_SELECTED=y
# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
# CONFIG_USB_GADGET_LH7A40X is not set
-CONFIG_USB_GADGET_OMAP=y
-CONFIG_USB_OMAP=y
+# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
@@ -1330,19 +1354,20 @@ CONFIG_USB_OMAP=y
# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
-# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_ZERO is not set
-CONFIG_USB_AUDIO=m
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_RNDIS=y
-CONFIG_USB_ETH_EEM=y
-CONFIG_USB_GADGETFS=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=y
+# CONFIG_USB_ETH_RNDIS is not set
+# CONFIG_USB_ETH_EEM is not set
+# CONFIG_USB_GADGETFS is not set
# CONFIG_USB_FILE_STORAGE is not set
# CONFIG_USB_MASS_STORAGE is not set
-CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_G_SERIAL is not set
# CONFIG_USB_MIDI_GADGET is not set
-CONFIG_USB_G_PRINTER=m
+# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -1373,8 +1398,6 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=m
# CONFIG_MMC_OMAP is not set
CONFIG_MMC_OMAP_HS=y
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
CONFIG_MMC_SPI=m
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -1392,11 +1415,11 @@ CONFIG_LEDS_GPIO_PLATFORM=y
# CONFIG_LEDS_REGULATOR is not set
# CONFIG_LEDS_BD2802 is not set
# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
# CONFIG_LEDS_TRIGGER_TIMER is not set
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
@@ -1580,6 +1603,7 @@ CONFIG_UBIFS_FS=y
CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1606,6 +1630,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
diff --git a/arch/arm/configs/mx51_defconfig b/arch/arm/configs/mx51_defconfig
index c88e9527a8ec..a708fd6d6ffe 100644
--- a/arch/arm/configs/mx51_defconfig
+++ b/arch/arm/configs/mx51_defconfig
@@ -809,7 +809,22 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_SOUND is not set
# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_MXC=y
+
+
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
diff --git a/arch/arm/configs/omap3_defconfig b/arch/arm/configs/omap3_defconfig
index d6ad92177324..1efb833147c2 100644
--- a/arch/arm/configs/omap3_defconfig
+++ b/arch/arm/configs/omap3_defconfig
@@ -264,7 +264,7 @@ CONFIG_MACH_OMAP_GENERIC=y
# OMAP Core Type
#
CONFIG_ARCH_OMAP2420=y
-# CONFIG_ARCH_OMAP2430 is not set
+CONFIG_ARCH_OMAP2430=y
CONFIG_ARCH_OMAP3430=y
CONFIG_OMAP_PACKAGE_CBB=y
CONFIG_OMAP_PACKAGE_CUS=y
@@ -276,8 +276,9 @@ CONFIG_OMAP_PACKAGE_CBP=y
CONFIG_MACH_OMAP2_TUSB6010=y
CONFIG_MACH_OMAP_H4=y
CONFIG_MACH_OMAP_APOLLON=y
-# CONFIG_MACH_OMAP_2430SDP is not set
+CONFIG_MACH_OMAP_2430SDP=y
CONFIG_MACH_OMAP3_BEAGLE=y
+CONFIG_MACH_DEVKIT8000=y
CONFIG_MACH_OMAP_LDP=y
CONFIG_MACH_OVERO=y
CONFIG_MACH_OMAP3EVM=y
@@ -390,7 +391,7 @@ CONFIG_ALIGNMENT_TRAP=y
#
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/nfs nfsroot=192.168.0.1:/home/user/buildroot ip=192.168.0.2:192.168.0.1:192.168.0.1:255.255.255.0:tgt:eth0:off rw console=ttyS2,115200n8"
+CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyS2,115200"
# CONFIG_XIP_KERNEL is not set
CONFIG_KEXEC=y
CONFIG_ATAGS_PROC=y
@@ -443,7 +444,7 @@ CONFIG_BINFMT_MISC=y
#
CONFIG_PM=y
CONFIG_PM_DEBUG=y
-CONFIG_PM_VERBOSE=y
+# CONFIG_PM_VERBOSE is not set
CONFIG_CAN_PM_TRACE=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
@@ -1262,7 +1263,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_LIS3_I2C is not set
# CONFIG_THERMAL is not set
CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
#
# Watchdog Device Drivers
@@ -1291,9 +1292,9 @@ CONFIG_MFD_CORE=y
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_TPS65010 is not set
-# CONFIG_MENELAUS is not set
+CONFIG_MENELAUS=y
CONFIG_TWL4030_CORE=y
-# CONFIG_TWL4030_POWER is not set
+CONFIG_TWL4030_POWER=y
CONFIG_TWL4030_CODEC=y
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_T7L66XB is not set
@@ -1312,7 +1313,7 @@ CONFIG_TWL4030_CODEC=y
# CONFIG_AB4500_CORE is not set
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
@@ -1320,8 +1321,8 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
# CONFIG_REGULATOR_MAX8660 is not set
CONFIG_REGULATOR_TWL4030=y
# CONFIG_REGULATOR_LP3971 is not set
-# CONFIG_REGULATOR_TPS65023 is not set
-# CONFIG_REGULATOR_TPS6507X is not set
+CONFIG_REGULATOR_TPS65023=y
+CONFIG_REGULATOR_TPS6507X=y
# CONFIG_MEDIA_SUPPORT is not set
#
@@ -1358,16 +1359,8 @@ CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_FB_BROADSHEET is not set
-CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP is not set
CONFIG_FB_OMAP_LCD_VGA=y
-# CONFIG_FB_OMAP_031M3R is not set
-# CONFIG_FB_OMAP_048M3R is not set
-CONFIG_FB_OMAP_079M3R=y
-# CONFIG_FB_OMAP_092M9R is not set
-# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
-# CONFIG_FB_OMAP_LCD_MIPID is not set
-# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
-CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2
# CONFIG_OMAP2_DSS is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
@@ -1686,7 +1679,7 @@ CONFIG_SDIO_UART=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP=y
CONFIG_MMC_OMAP_HS=y
# CONFIG_MMC_AT91 is not set
# CONFIG_MMC_ATMELMCI is not set
@@ -1751,6 +1744,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL92330=y
CONFIG_RTC_DRV_TWL4030=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
diff --git a/arch/arm/configs/omap3_evm_defconfig b/arch/arm/configs/omap3_evm_defconfig
index a6dd6d1af806..b02e371b0997 100644
--- a/arch/arm/configs/omap3_evm_defconfig
+++ b/arch/arm/configs/omap3_evm_defconfig
@@ -911,7 +911,56 @@ CONFIG_DAB=y
#
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_FB_OMAP2=y
+# CONFIG_FB_OMAP2_DEBUG_SUPPORT is not set
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_SAMSUNG_LTE430WQ_F0C is not set
+CONFIG_PANEL_SHARP_LS037V7DW01=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig
index 473f9e13f08b..56d4928cd4c9 100644
--- a/arch/arm/configs/rx51_defconfig
+++ b/arch/arm/configs/rx51_defconfig
@@ -784,6 +784,7 @@ CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_NEWTON is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_TWL4030=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -809,6 +810,7 @@ CONFIG_INPUT_MISC=y
# CONFIG_INPUT_POWERMATE is not set
# CONFIG_INPUT_YEALINK is not set
# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
CONFIG_INPUT_UINPUT=m
#
@@ -1110,7 +1112,40 @@ CONFIG_RADIO_ADAPTERS=y
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+
+# Frame buffer hardware drivers
+#
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=0
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set
+# CONFIG_OMAP2_DSS_DPI is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_OMAP2_DSS_SDI=y
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
+CONFIG_PANEL_ACX565AKM=y
+
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -1127,6 +1162,8 @@ CONFIG_DISPLAY_SUPPORT=y
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
CONFIG_SOUND=y
# CONFIG_SOUND_OSS_CORE is not set
CONFIG_SND=y
diff --git a/arch/arm/configs/spear300_defconfig b/arch/arm/configs/spear300_defconfig
new file mode 100644
index 000000000000..35e64d1cb750
--- /dev/null
+++ b/arch/arm/configs/spear300_defconfig
@@ -0,0 +1,773 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Mar 23 14:36:23 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR3XX=y
+# CONFIG_ARCH_SPEAR6XX is not set
+CONFIG_MACH_SPEAR300=y
+# CONFIG_MACH_SPEAR310 is not set
+# CONFIG_MACH_SPEAR320 is not set
+CONFIG_BOARD_SPEAR300_EVB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_PL061=y
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/spear310_defconfig b/arch/arm/configs/spear310_defconfig
new file mode 100644
index 000000000000..cbbfd290bba8
--- /dev/null
+++ b/arch/arm/configs/spear310_defconfig
@@ -0,0 +1,775 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Mar 23 14:37:01 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR3XX=y
+# CONFIG_ARCH_SPEAR6XX is not set
+# CONFIG_MACH_SPEAR300 is not set
+CONFIG_MACH_SPEAR310=y
+# CONFIG_MACH_SPEAR320 is not set
+# CONFIG_BOARD_SPEAR300_EVB is not set
+CONFIG_BOARD_SPEAR310_EVB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_PL061=y
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/spear320_defconfig b/arch/arm/configs/spear320_defconfig
new file mode 100644
index 000000000000..2ae3c110a21a
--- /dev/null
+++ b/arch/arm/configs/spear320_defconfig
@@ -0,0 +1,775 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Mar 23 14:37:12 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR3XX=y
+# CONFIG_ARCH_SPEAR6XX is not set
+# CONFIG_MACH_SPEAR300 is not set
+# CONFIG_MACH_SPEAR310 is not set
+CONFIG_MACH_SPEAR320=y
+# CONFIG_BOARD_SPEAR300_EVB is not set
+CONFIG_BOARD_SPEAR320_EVB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_PL061=y
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/spear600_defconfig b/arch/arm/configs/spear600_defconfig
new file mode 100644
index 000000000000..c85a02924ec5
--- /dev/null
+++ b/arch/arm/configs/spear600_defconfig
@@ -0,0 +1,760 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Mar 23 14:37:26 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+CONFIG_PLAT_SPEAR=y
+# CONFIG_ARCH_SPEAR3XX is not set
+CONFIG_ARCH_SPEAR6XX=y
+# CONFIG_MACH_SPEAR300 is not set
+# CONFIG_MACH_SPEAR310 is not set
+# CONFIG_MACH_SPEAR320 is not set
+# CONFIG_BOARD_SPEAR300_EVB is not set
+CONFIG_MACH_SPEAR600=y
+CONFIG_BOARD_SPEAR600_EVB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_R3964 is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_PL061=y
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/stamp9g20_defconfig b/arch/arm/configs/stamp9g20_defconfig
new file mode 100644
index 000000000000..06a8293c61ca
--- /dev/null
+++ b/arch/arm/configs/stamp9g20_defconfig
@@ -0,0 +1,1456 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.34-rc1
+# Wed Mar 17 16:38:03 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+CONFIG_TREE_PREEMPT_RCU=y
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+CONFIG_ARCH_AT91=y
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_U8500 is not set
+CONFIG_HAVE_AT91_USART3=y
+CONFIG_HAVE_AT91_USART4=y
+CONFIG_HAVE_AT91_USART5=y
+
+#
+# Atmel AT91 System-on-Chip
+#
+# CONFIG_ARCH_AT91RM9200 is not set
+# CONFIG_ARCH_AT91SAM9260 is not set
+# CONFIG_ARCH_AT91SAM9261 is not set
+# CONFIG_ARCH_AT91SAM9G10 is not set
+# CONFIG_ARCH_AT91SAM9263 is not set
+# CONFIG_ARCH_AT91SAM9RL is not set
+CONFIG_ARCH_AT91SAM9G20=y
+# CONFIG_ARCH_AT91SAM9G45 is not set
+# CONFIG_ARCH_AT91CAP9 is not set
+# CONFIG_ARCH_AT572D940HF is not set
+# CONFIG_ARCH_AT91X40 is not set
+CONFIG_AT91_PMC_UNIT=y
+
+#
+# AT91SAM9G20 Board Type
+#
+# CONFIG_MACH_AT91SAM9G20EK is not set
+# CONFIG_MACH_AT91SAM9G20EK_2MMC is not set
+# CONFIG_MACH_CPU9G20 is not set
+CONFIG_MACH_PORTUXG20=y
+CONFIG_MACH_STAMP9G20=y
+
+#
+# AT91 Board Options
+#
+
+#
+# AT91 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+CONFIG_AT91_SLOW_CLOCK=y
+CONFIG_AT91_TIMER_HZ=100
+CONFIG_AT91_EARLY_DBGU=y
+# CONFIG_AT91_EARLY_USART0 is not set
+# CONFIG_AT91_EARLY_USART1 is not set
+# CONFIG_AT91_EARLY_USART2 is not set
+# CONFIG_AT91_EARLY_USART3 is not set
+# CONFIG_AT91_EARLY_USART4 is not set
+# CONFIG_AT91_EARLY_USART5 is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_HZ=100
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+# CONFIG_MTD_NAND_ATMEL_ECC_HW is not set
+CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+CONFIG_W1=y
+
+#
+# 1-wire Bus Masters
+#
+# CONFIG_W1_MASTER_DS2490 is not set
+# CONFIG_W1_MASTER_DS2482 is not set
+# CONFIG_W1_MASTER_DS1WM is not set
+CONFIG_W1_MASTER_GPIO=y
+
+#
+# 1-wire Slaves
+#
+CONFIG_W1_SLAVE_THERM=y
+# CONFIG_W1_SLAVE_SMEM is not set
+CONFIG_W1_SLAVE_DS2431=y
+# CONFIG_W1_SLAVE_DS2433 is not set
+# CONFIG_W1_SLAVE_DS2760 is not set
+# CONFIG_W1_SLAVE_BQ27000 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+CONFIG_AT91SAM9X_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB4500_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=m
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+# CONFIG_USB_GADGETFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ULPI is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT91SAM9=y
+CONFIG_RTC_DRV_AT91SAM9_RTT=0
+CONFIG_RTC_DRV_AT91SAM9_GPBR=0
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=y
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 0d08d4170b64..4656a24058d2 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -371,6 +371,10 @@ static inline void __flush_icache_all(void)
#ifdef CONFIG_ARM_ERRATA_411920
extern void v6_icache_inval_all(void);
v6_icache_inval_all();
+#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
+ asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
+ :
+ : "r" (0));
#else
asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
:
diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h
index 04be3bdf46b8..c0f4e7bf22de 100644
--- a/arch/arm/include/asm/hardware/arm_timer.h
+++ b/arch/arm/include/asm/hardware/arm_timer.h
@@ -1,21 +1,30 @@
#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H
#define __ASM_ARM_HARDWARE_ARM_TIMER_H
-#define TIMER_LOAD 0x00
-#define TIMER_VALUE 0x04
-#define TIMER_CTRL 0x08
-#define TIMER_CTRL_ONESHOT (1 << 0)
-#define TIMER_CTRL_32BIT (1 << 1)
-#define TIMER_CTRL_DIV1 (0 << 2)
-#define TIMER_CTRL_DIV16 (1 << 2)
-#define TIMER_CTRL_DIV256 (2 << 2)
-#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable (versatile only) */
-#define TIMER_CTRL_PERIODIC (1 << 6)
-#define TIMER_CTRL_ENABLE (1 << 7)
+/*
+ * ARM timer implementation, found in Integrator, Versatile and Realview
+ * platforms. Not all platforms support all registers and bits in these
+ * registers, so we mark them with A for Integrator AP, C for Integrator
+ * CP, V for Versatile and R for Realview.
+ *
+ * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview
+ * can have 16-bit or 32-bit selectable via a bit in the control register.
+ */
+#define TIMER_LOAD 0x00 /* ACVR rw */
+#define TIMER_VALUE 0x04 /* ACVR ro */
+#define TIMER_CTRL 0x08 /* ACVR rw */
+#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */
+#define TIMER_CTRL_32BIT (1 << 1) /* CVR */
+#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */
+#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */
+#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */
+#define TIMER_CTRL_IE (1 << 5) /* VR */
+#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */
+#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */
-#define TIMER_INTCLR 0x0c
-#define TIMER_RIS 0x10
-#define TIMER_MIS 0x14
-#define TIMER_BGLOAD 0x18
+#define TIMER_INTCLR 0x0c /* ACVR wo */
+#define TIMER_RIS 0x10 /* CVR ro */
+#define TIMER_MIS 0x14 /* CVR ro */
+#define TIMER_BGLOAD 0x18 /* CVR rw */
#endif
diff --git a/arch/arm/include/asm/hardware/icst.h b/arch/arm/include/asm/hardware/icst.h
new file mode 100644
index 000000000000..10382a3dcec9
--- /dev/null
+++ b/arch/arm/include/asm/hardware/icst.h
@@ -0,0 +1,59 @@
+/*
+ * arch/arm/include/asm/hardware/icst.h
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support functions for calculating clocks/divisors for the ICST
+ * clock generators. See http://www.icst.com/ for more information
+ * on these devices.
+ */
+#ifndef ASMARM_HARDWARE_ICST_H
+#define ASMARM_HARDWARE_ICST_H
+
+struct icst_params {
+ unsigned long ref;
+ unsigned long vco_max; /* inclusive */
+ unsigned long vco_min; /* exclusive */
+ unsigned short vd_min; /* inclusive */
+ unsigned short vd_max; /* inclusive */
+ unsigned char rd_min; /* inclusive */
+ unsigned char rd_max; /* inclusive */
+ const unsigned char *s2div; /* chip specific s2div array */
+ const unsigned char *idx2s; /* chip specific idx2s array */
+};
+
+struct icst_vco {
+ unsigned short v;
+ unsigned char r;
+ unsigned char s;
+};
+
+unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco);
+struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq);
+
+/*
+ * ICST307 VCO frequency must be between 6MHz and 200MHz (3.3 or 5V).
+ * This frequency is pre-output divider.
+ */
+#define ICST307_VCO_MIN 6000000
+#define ICST307_VCO_MAX 200000000
+
+extern const unsigned char icst307_s2div[];
+extern const unsigned char icst307_idx2s[];
+
+/*
+ * ICST525 VCO frequency must be between 10MHz and 200MHz (3V) or 320MHz (5V).
+ * This frequency is pre-output divider.
+ */
+#define ICST525_VCO_MIN 10000000
+#define ICST525_VCO_MAX_3V 200000000
+#define ICST525_VCO_MAX_5V 320000000
+
+extern const unsigned char icst525_s2div[];
+extern const unsigned char icst525_idx2s[];
+
+#endif
diff --git a/arch/arm/include/asm/hardware/icst307.h b/arch/arm/include/asm/hardware/icst307.h
deleted file mode 100644
index 554f128a1046..000000000000
--- a/arch/arm/include/asm/hardware/icst307.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/icst307.h
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICS307
- * clock generators. See http://www.icst.com/ for more information
- * on these devices.
- *
- * This file is similar to the icst525.h file
- */
-#ifndef ASMARM_HARDWARE_ICST307_H
-#define ASMARM_HARDWARE_ICST307_H
-
-struct icst307_params {
- unsigned long ref;
- unsigned long vco_max; /* inclusive */
- unsigned short vd_min; /* inclusive */
- unsigned short vd_max; /* inclusive */
- unsigned char rd_min; /* inclusive */
- unsigned char rd_max; /* inclusive */
-};
-
-struct icst307_vco {
- unsigned short v;
- unsigned char r;
- unsigned char s;
-};
-
-unsigned long icst307_khz(const struct icst307_params *p, struct icst307_vco vco);
-struct icst307_vco icst307_khz_to_vco(const struct icst307_params *p, unsigned long freq);
-struct icst307_vco icst307_ps_to_vco(const struct icst307_params *p, unsigned long period);
-
-#endif
diff --git a/arch/arm/include/asm/hardware/icst525.h b/arch/arm/include/asm/hardware/icst525.h
deleted file mode 100644
index 58f0dc43e2ed..000000000000
--- a/arch/arm/include/asm/hardware/icst525.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/icst525.h
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST525
- * clock generators. See http://www.icst.com/ for more information
- * on these devices.
- */
-#ifndef ASMARM_HARDWARE_ICST525_H
-#define ASMARM_HARDWARE_ICST525_H
-
-struct icst525_params {
- unsigned long ref;
- unsigned long vco_max; /* inclusive */
- unsigned short vd_min; /* inclusive */
- unsigned short vd_max; /* inclusive */
- unsigned char rd_min; /* inclusive */
- unsigned char rd_max; /* inclusive */
-};
-
-struct icst525_vco {
- unsigned short v;
- unsigned char r;
- unsigned char s;
-};
-
-unsigned long icst525_khz(const struct icst525_params *p, struct icst525_vco vco);
-struct icst525_vco icst525_khz_to_vco(const struct icst525_params *p, unsigned long freq);
-struct icst525_vco icst525_ps_to_vco(const struct icst525_params *p, unsigned long period);
-
-#endif
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
new file mode 100644
index 000000000000..a101f10bb5b1
--- /dev/null
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -0,0 +1,59 @@
+/*
+ * arch/arm/include/asm/hardware/sp810.h
+ *
+ * ARM PrimeXsys System Controller SP810 header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARM_SP810_H
+#define __ASM_ARM_SP810_H
+
+#include <linux/io.h>
+
+/* sysctl registers offset */
+#define SCCTRL 0x000
+#define SCSYSSTAT 0x004
+#define SCIMCTRL 0x008
+#define SCIMSTAT 0x00C
+#define SCXTALCTRL 0x010
+#define SCPLLCTRL 0x014
+#define SCPLLFCTRL 0x018
+#define SCPERCTRL0 0x01C
+#define SCPERCTRL1 0x020
+#define SCPEREN 0x024
+#define SCPERDIS 0x028
+#define SCPERCLKEN 0x02C
+#define SCPERSTAT 0x030
+#define SCSYSID0 0xEE0
+#define SCSYSID1 0xEE4
+#define SCSYSID2 0xEE8
+#define SCSYSID3 0xEEC
+#define SCITCR 0xF00
+#define SCITIR0 0xF04
+#define SCITIR1 0xF08
+#define SCITOR 0xF0C
+#define SCCNTCTRL 0xF10
+#define SCCNTDATA 0xF14
+#define SCCNTSTEP 0xF18
+#define SCPERIPHID0 0xFE0
+#define SCPERIPHID1 0xFE4
+#define SCPERIPHID2 0xFE8
+#define SCPERIPHID3 0xFEC
+#define SCPCELLID0 0xFF0
+#define SCPCELLID1 0xFF4
+#define SCPCELLID2 0xFF8
+#define SCPCELLID3 0xFFC
+
+static inline void sysctl_soft_reset(void __iomem *base)
+{
+ /* writing any value to SCSYSSTAT reg will reset system */
+ writel(0, base + SCSYSSTAT);
+}
+
+#endif /* __ASM_ARM_SP810_H */
diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h
index a91d8a1523cf..7f0b6d13296a 100644
--- a/arch/arm/include/asm/ioctls.h
+++ b/arch/arm/include/asm/ioctls.h
@@ -53,6 +53,9 @@
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TIOCGRS485 0x542E
+#define TIOCSRS485 0x542F
+
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c4b2ea3fbe42..e51b1e81df05 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -20,6 +20,7 @@ enum km_type {
KM_SOFTIRQ1,
KM_L1_CACHE,
KM_L2_CACHE,
+ KM_KDB,
KM_TYPE_NR
};
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index a38bdc7afa34..52f0da1e97df 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -8,10 +8,16 @@
* published by the Free Software Foundation.
*/
+#ifndef __ASM_MACH_PCI_H
+#define __ASM_MACH_PCI_H
+
struct pci_sys_data;
struct pci_bus;
struct hw_pci {
+#ifdef CONFIG_PCI_DOMAINS
+ int domain;
+#endif
struct list_head buses;
int nr_controllers;
int (*setup)(int nr, struct pci_sys_data *);
@@ -26,6 +32,9 @@ struct hw_pci {
* Per-controller structure
*/
struct pci_sys_data {
+#ifdef CONFIG_PCI_DOMAINS
+ int domain;
+#endif
struct list_head node;
int busnr; /* primary bus number */
u64 mem_offset; /* bus->cpu memory mapping offset */
@@ -70,3 +79,5 @@ extern int pci_v3_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
extern void pci_v3_preinit(void);
extern void pci_v3_postinit(void);
+
+#endif /* __ASM_MACH_PCI_H */
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 8bffc3ff3acf..35d408f6dccf 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -38,7 +38,7 @@ struct sys_timer {
void (*init)(void);
void (*suspend)(void);
void (*resume)(void);
-#ifndef CONFIG_GENERIC_TIME
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
unsigned long (*offset)(void);
#endif
};
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 47980118d0a5..92e2a833693d 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -4,8 +4,23 @@
#ifdef __KERNEL__
#include <asm-generic/pci-dma-compat.h>
+#include <asm/mach/pci.h> /* for pci_sys_data */
#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_domain_nr(struct pci_bus *bus)
+{
+ struct pci_sys_data *root = bus->sysdata;
+
+ return root->domain;
+}
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+#endif /* CONFIG_PCI_DOMAINS */
+
#ifdef CONFIG_PCI_HOST_ITE8152
/* ITE bridge requires setting latency timer to avoid early bus access
termination by PIC bus mater devices
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 49e3049aba32..48837e6d8887 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -28,4 +28,21 @@ set_perf_event_pending(void)
* same indexes here for consistency. */
#define PERF_EVENT_INDEX_OFFSET 1
+/* ARM perf PMU IDs for use by internal perf clients. */
+enum arm_perf_pmu_ids {
+ ARM_PERF_PMU_ID_XSCALE1 = 0,
+ ARM_PERF_PMU_ID_XSCALE2,
+ ARM_PERF_PMU_ID_V6,
+ ARM_PERF_PMU_ID_V6MP,
+ ARM_PERF_PMU_ID_CA8,
+ ARM_PERF_PMU_ID_CA9,
+ ARM_NUM_PMU_IDS,
+};
+
+extern enum arm_perf_pmu_ids
+armpmu_get_pmu_id(void);
+
+extern int
+armpmu_get_max_events(void);
+
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 2829b9f981a1..8ccea012722c 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -12,33 +12,33 @@
#ifndef __ARM_PMU_H__
#define __ARM_PMU_H__
-#ifdef CONFIG_CPU_HAS_PMU
-
-struct pmu_irqs {
- const int *irqs;
- int num_irqs;
+enum arm_pmu_type {
+ ARM_PMU_DEVICE_CPU = 0,
+ ARM_NUM_PMU_DEVICES,
};
+#ifdef CONFIG_CPU_HAS_PMU
+
/**
* reserve_pmu() - reserve the hardware performance counters
*
* Reserve the hardware performance counters in the system for exclusive use.
- * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR()
+ * The platform_device for the system is returned on success, ERR_PTR()
* encoded error on failure.
*/
-extern const struct pmu_irqs *
-reserve_pmu(void);
+extern struct platform_device *
+reserve_pmu(enum arm_pmu_type device);
/**
* release_pmu() - Relinquish control of the performance counters
*
* Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
- * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as
+ * this. The platform_device returned from reserve_pmu() must be passed as
* a cookie.
*/
extern int
-release_pmu(const struct pmu_irqs *irqs);
+release_pmu(struct platform_device *pdev);
/**
* init_pmu() - Initialise the PMU.
@@ -48,24 +48,26 @@ release_pmu(const struct pmu_irqs *irqs);
* the actual hardware initialisation.
*/
extern int
-init_pmu(void);
+init_pmu(enum arm_pmu_type device);
#else /* CONFIG_CPU_HAS_PMU */
-static inline const struct pmu_irqs *
-reserve_pmu(void)
+#include <linux/err.h>
+
+static inline struct platform_device *
+reserve_pmu(enum arm_pmu_type device)
{
return ERR_PTR(-ENODEV);
}
static inline int
-release_pmu(const struct pmu_irqs *irqs)
+release_pmu(struct platform_device *pdev)
{
return -ENODEV;
}
static inline int
-init_pmu(void)
+init_pmu(enum arm_pmu_type device)
{
return -ENODEV;
}
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
index ca0a37d03400..bcda59f39941 100644
--- a/arch/arm/include/asm/scatterlist.h
+++ b/arch/arm/include/asm/scatterlist.h
@@ -4,24 +4,8 @@
#include <asm/memory.h>
#include <asm/types.h>
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset; /* buffer offset */
- dma_addr_t dma_address; /* dma address */
- unsigned int length; /* length */
-};
+#include <asm-generic/scatterlist.h>
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
+#undef ARCH_HAS_SG_CHAIN
#endif /* _ASMARM_SCATTERLIST_H */
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index e085e2c545eb..bd863d8608cd 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -46,6 +46,9 @@
#define TLB_V7_UIS_FULL (1 << 20)
#define TLB_V7_UIS_ASID (1 << 21)
+/* Inner Shareable BTB operation (ARMv7 MP extensions) */
+#define TLB_V7_IS_BTB (1 << 22)
+
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
@@ -183,7 +186,7 @@
#endif
#ifdef CONFIG_SMP
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
#else
#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
@@ -339,6 +342,12 @@ static inline void local_flush_tlb_all(void)
dsb();
isb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -376,6 +385,12 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void
@@ -416,6 +431,12 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -454,6 +475,12 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
dsb();
isb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
/*
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index bd397e0b663e..c6273a3bfc25 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -527,6 +527,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
if (!sys)
panic("PCI: unable to allocate sys data!");
+#ifdef CONFIG_PCI_DOMAINS
+ sys->domain = hw->domain;
+#endif
sys->hw = hw;
sys->busnr = busnr;
sys->swizzle = hw->swizzle;
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 7d5b9fb01e71..2c4a185f92cd 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -16,6 +16,8 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
#include <asm/dma.h>
@@ -264,3 +266,37 @@ int get_dma_residue(unsigned int chan)
return ret;
}
EXPORT_SYMBOL(get_dma_residue);
+
+#ifdef CONFIG_PROC_FS
+static int proc_dma_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
+ dma_t *dma = dma_channel(i);
+ if (dma && dma->lock)
+ seq_printf(m, "%2d: %s\n", i, dma->device_id);
+ }
+ return 0;
+}
+
+static int proc_dma_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_dma_show, NULL);
+}
+
+static const struct file_operations proc_dma_operations = {
+ .open = proc_dma_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init proc_dma_init(void)
+{
+ proc_create("dma", 0, NULL, &proc_dma_operations);
+ return 0;
+}
+
+__initcall(proc_dma_init);
+#endif
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a5b846b9895d..c868a8864117 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -98,6 +98,11 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->ARM_pc = pc;
+}
+
static int compiled_break;
int kgdb_arch_handle_exception(int exception_vector, int signo,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 9e70f2053f9a..c45768614c8a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -16,7 +16,9 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -26,7 +28,7 @@
#include <asm/pmu.h>
#include <asm/stacktrace.h>
-static const struct pmu_irqs *pmu_irqs;
+static struct platform_device *pmu_device;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
@@ -67,8 +69,18 @@ struct cpu_hw_events {
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+/* PMU names. */
+static const char *arm_pmu_names[] = {
+ [ARM_PERF_PMU_ID_XSCALE1] = "xscale1",
+ [ARM_PERF_PMU_ID_XSCALE2] = "xscale2",
+ [ARM_PERF_PMU_ID_V6] = "v6",
+ [ARM_PERF_PMU_ID_V6MP] = "v6mpcore",
+ [ARM_PERF_PMU_ID_CA8] = "ARMv7 Cortex-A8",
+ [ARM_PERF_PMU_ID_CA9] = "ARMv7 Cortex-A9",
+};
+
struct arm_pmu {
- char *name;
+ enum arm_perf_pmu_ids id;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
@@ -87,6 +99,30 @@ struct arm_pmu {
/* Set at runtime when we know what CPU type we are. */
static const struct arm_pmu *armpmu;
+enum arm_perf_pmu_ids
+armpmu_get_pmu_id(void)
+{
+ int id = -ENODEV;
+
+ if (armpmu != NULL)
+ id = armpmu->id;
+
+ return id;
+}
+EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
+
+int
+armpmu_get_max_events(void)
+{
+ int max_events = 0;
+
+ if (armpmu != NULL)
+ max_events = armpmu->num_events;
+
+ return max_events;
+}
+EXPORT_SYMBOL_GPL(armpmu_get_max_events);
+
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) \
@@ -314,38 +350,44 @@ validate_group(struct perf_event *event)
static int
armpmu_reserve_hardware(void)
{
- int i;
- int err;
+ int i, err = -ENODEV, irq;
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs)) {
+ pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
+ if (IS_ERR(pmu_device)) {
pr_warning("unable to reserve pmu\n");
- return PTR_ERR(pmu_irqs);
+ return PTR_ERR(pmu_device);
}
- init_pmu();
+ init_pmu(ARM_PMU_DEVICE_CPU);
- if (pmu_irqs->num_irqs < 1) {
+ if (pmu_device->num_resources < 1) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < pmu_irqs->num_irqs; ++i) {
- err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq,
+ for (i = 0; i < pmu_device->num_resources; ++i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq < 0)
+ continue;
+
+ err = request_irq(irq, armpmu->handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING,
"armpmu", NULL);
if (err) {
- pr_warning("unable to request IRQ%d for ARM "
- "perf counters\n", pmu_irqs->irqs[i]);
+ pr_warning("unable to request IRQ%d for ARM perf "
+ "counters\n", irq);
break;
}
}
if (err) {
- for (i = i - 1; i >= 0; --i)
- free_irq(pmu_irqs->irqs[i], NULL);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
+ for (i = i - 1; i >= 0; --i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
+ release_pmu(pmu_device);
+ pmu_device = NULL;
}
return err;
@@ -354,14 +396,17 @@ armpmu_reserve_hardware(void)
static void
armpmu_release_hardware(void)
{
- int i;
+ int i, irq;
- for (i = pmu_irqs->num_irqs - 1; i >= 0; --i)
- free_irq(pmu_irqs->irqs[i], NULL);
+ for (i = pmu_device->num_resources - 1; i >= 0; --i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
armpmu->stop();
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
+ release_pmu(pmu_device);
+ pmu_device = NULL;
}
static atomic_t active_events = ATOMIC_INIT(0);
@@ -1144,7 +1189,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
}
static const struct arm_pmu armv6pmu = {
- .name = "v6",
+ .id = ARM_PERF_PMU_ID_V6,
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6pmu_disable_event,
@@ -1167,7 +1212,7 @@ static const struct arm_pmu armv6pmu = {
* reset the period and enable the interrupt reporting.
*/
static const struct arm_pmu armv6mpcore_pmu = {
- .name = "v6mpcore",
+ .id = ARM_PERF_PMU_ID_V6MP,
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6mpcore_pmu_disable_event,
@@ -1197,10 +1242,6 @@ static const struct arm_pmu armv6mpcore_pmu = {
* counter and all 4 performance counters together can be reset separately.
*/
-#define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8"
-
-#define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9"
-
/* Common ARMv7 event types */
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
@@ -2079,6 +2120,803 @@ static u32 __init armv7_reset_read_pmnc(void)
return nb_cnt + 1;
}
+/*
+ * ARMv5 [xscale] Performance counter handling code.
+ *
+ * Based on xscale OProfile code.
+ *
+ * There are two variants of the xscale PMU that we support:
+ * - xscale1pmu: 2 event counters and a cycle counter
+ * - xscale2pmu: 4 event counters and a cycle counter
+ * The two variants share event definitions, but have different
+ * PMU structures.
+ */
+
+enum xscale_perf_types {
+ XSCALE_PERFCTR_ICACHE_MISS = 0x00,
+ XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
+ XSCALE_PERFCTR_DATA_STALL = 0x02,
+ XSCALE_PERFCTR_ITLB_MISS = 0x03,
+ XSCALE_PERFCTR_DTLB_MISS = 0x04,
+ XSCALE_PERFCTR_BRANCH = 0x05,
+ XSCALE_PERFCTR_BRANCH_MISS = 0x06,
+ XSCALE_PERFCTR_INSTRUCTION = 0x07,
+ XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
+ XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
+ XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
+ XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
+ XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
+ XSCALE_PERFCTR_PC_CHANGED = 0x0D,
+ XSCALE_PERFCTR_BCU_REQUEST = 0x10,
+ XSCALE_PERFCTR_BCU_FULL = 0x11,
+ XSCALE_PERFCTR_BCU_DRAIN = 0x12,
+ XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
+ XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
+ XSCALE_PERFCTR_RMW = 0x16,
+ /* XSCALE_PERFCTR_CCNT is not hardware defined */
+ XSCALE_PERFCTR_CCNT = 0xFE,
+ XSCALE_PERFCTR_UNUSED = 0xFF,
+};
+
+enum xscale_counters {
+ XSCALE_CYCLE_COUNTER = 1,
+ XSCALE_COUNTER0,
+ XSCALE_COUNTER1,
+ XSCALE_COUNTER2,
+ XSCALE_COUNTER3,
+};
+
+static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
+ [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+#define XSCALE_PMU_ENABLE 0x001
+#define XSCALE_PMN_RESET 0x002
+#define XSCALE_CCNT_RESET 0x004
+#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
+#define XSCALE_PMU_CNT64 0x008
+
+static inline int
+xscalepmu_event_map(int config)
+{
+ int mapping = xscale_perf_map[config];
+ if (HW_OP_UNSUPPORTED == mapping)
+ mapping = -EOPNOTSUPP;
+ return mapping;
+}
+
+static u64
+xscalepmu_raw_event(u64 config)
+{
+ return config & 0xff;
+}
+
+#define XSCALE1_OVERFLOWED_MASK 0x700
+#define XSCALE1_CCOUNT_OVERFLOW 0x400
+#define XSCALE1_COUNT0_OVERFLOW 0x100
+#define XSCALE1_COUNT1_OVERFLOW 0x200
+#define XSCALE1_CCOUNT_INT_EN 0x040
+#define XSCALE1_COUNT0_INT_EN 0x010
+#define XSCALE1_COUNT1_INT_EN 0x020
+#define XSCALE1_COUNT0_EVT_SHFT 12
+#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
+#define XSCALE1_COUNT1_EVT_SHFT 20
+#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
+
+static inline u32
+xscale1pmu_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale1pmu_write_pmnc(u32 val)
+{
+ /* upper 4bits and 7, 11 are write-as-0 */
+ val &= 0xffff77f;
+ asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
+}
+
+static inline int
+xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
+ enum xscale_counters counter)
+{
+ int ret = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+xscale1pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmnc;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * NOTE: there's an A stepping erratum that states if an overflow
+ * bit already exists and another occurs, the previous
+ * Overflow bit gets cleared. There's no workaround.
+ * Fixed in B stepping or later.
+ */
+ pmnc = xscale1pmu_read_pmnc();
+
+ /*
+ * Write the value back to clear the overflow flags. Overflow
+ * flags remain in pmnc for use below. We also disable the PMU
+ * while we process the interrupt.
+ */
+ xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+ if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
+ return IRQ_NONE;
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ armpmu->disable(hwc, idx);
+ }
+
+ perf_event_do_pending();
+
+ /*
+ * Re-enable the PMU.
+ */
+ pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(pmnc);
+
+ return IRQ_HANDLED;
+}
+
+static void
+xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long val, mask, evt, flags;
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ mask = 0;
+ evt = XSCALE1_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ mask = XSCALE1_COUNT0_EVT_MASK;
+ evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
+ XSCALE1_COUNT0_INT_EN;
+ break;
+ case XSCALE_COUNTER1:
+ mask = XSCALE1_COUNT1_EVT_MASK;
+ evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
+ XSCALE1_COUNT1_INT_EN;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~mask;
+ val |= evt;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long val, mask, evt, flags;
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ mask = XSCALE1_CCOUNT_INT_EN;
+ evt = 0;
+ break;
+ case XSCALE_COUNTER0:
+ mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
+ evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
+ evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~mask;
+ val |= evt;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ if (XSCALE_PERFCTR_CCNT == event->config_base) {
+ if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return XSCALE_CYCLE_COUNTER;
+ } else {
+ if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) {
+ return XSCALE_COUNTER1;
+ }
+
+ if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) {
+ return XSCALE_COUNTER0;
+ }
+
+ return -EAGAIN;
+ }
+}
+
+static void
+xscale1pmu_start(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val |= XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale1pmu_stop(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline u32
+xscale1pmu_read_counter(int counter)
+{
+ u32 val = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
+ break;
+ }
+
+ return val;
+}
+
+static inline void
+xscale1pmu_write_counter(int counter, u32 val)
+{
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
+ break;
+ }
+}
+
+static const struct arm_pmu xscale1pmu = {
+ .id = ARM_PERF_PMU_ID_XSCALE1,
+ .handle_irq = xscale1pmu_handle_irq,
+ .enable = xscale1pmu_enable_event,
+ .disable = xscale1pmu_disable_event,
+ .event_map = xscalepmu_event_map,
+ .raw_event = xscalepmu_raw_event,
+ .read_counter = xscale1pmu_read_counter,
+ .write_counter = xscale1pmu_write_counter,
+ .get_event_idx = xscale1pmu_get_event_idx,
+ .start = xscale1pmu_start,
+ .stop = xscale1pmu_stop,
+ .num_events = 3,
+ .max_period = (1LLU << 32) - 1,
+};
+
+#define XSCALE2_OVERFLOWED_MASK 0x01f
+#define XSCALE2_CCOUNT_OVERFLOW 0x001
+#define XSCALE2_COUNT0_OVERFLOW 0x002
+#define XSCALE2_COUNT1_OVERFLOW 0x004
+#define XSCALE2_COUNT2_OVERFLOW 0x008
+#define XSCALE2_COUNT3_OVERFLOW 0x010
+#define XSCALE2_CCOUNT_INT_EN 0x001
+#define XSCALE2_COUNT0_INT_EN 0x002
+#define XSCALE2_COUNT1_INT_EN 0x004
+#define XSCALE2_COUNT2_INT_EN 0x008
+#define XSCALE2_COUNT3_INT_EN 0x010
+#define XSCALE2_COUNT0_EVT_SHFT 0
+#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
+#define XSCALE2_COUNT1_EVT_SHFT 8
+#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
+#define XSCALE2_COUNT2_EVT_SHFT 16
+#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
+#define XSCALE2_COUNT3_EVT_SHFT 24
+#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
+
+static inline u32
+xscale2pmu_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
+ /* bits 1-2 and 4-23 are read-unpredictable */
+ return val & 0xff000009;
+}
+
+static inline void
+xscale2pmu_write_pmnc(u32 val)
+{
+ /* bits 4-23 are write-as-0, 24-31 are write ignored */
+ val &= 0xf;
+ asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_overflow_flags(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale2pmu_write_overflow_flags(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_event_select(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale2pmu_write_event_select(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
+}
+
+static inline u32
+xscale2pmu_read_int_enable(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
+ return val;
+}
+
+static void
+xscale2pmu_write_int_enable(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
+}
+
+static inline int
+xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
+ enum xscale_counters counter)
+{
+ int ret = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
+ break;
+ case XSCALE_COUNTER2:
+ ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
+ break;
+ case XSCALE_COUNTER3:
+ ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+xscale2pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmnc, of_flags;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ /* Disable the PMU. */
+ pmnc = xscale2pmu_read_pmnc();
+ xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+ /* Check the overflow flag register. */
+ of_flags = xscale2pmu_read_overflow_flags();
+ if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
+ return IRQ_NONE;
+
+ /* Clear the overflow bits. */
+ xscale2pmu_write_overflow_flags(of_flags);
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ armpmu->disable(hwc, idx);
+ }
+
+ perf_event_do_pending();
+
+ /*
+ * Re-enable the PMU.
+ */
+ pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(pmnc);
+
+ return IRQ_HANDLED;
+}
+
+static void
+xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags, ien, evtsel;
+
+ ien = xscale2pmu_read_int_enable();
+ evtsel = xscale2pmu_read_event_select();
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien |= XSCALE2_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ ien |= XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ ien |= XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER2:
+ ien |= XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER3:
+ ien |= XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags, ien, evtsel;
+
+ ien = xscale2pmu_read_int_enable();
+ evtsel = xscale2pmu_read_event_select();
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien &= ~XSCALE2_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ ien &= ~XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ ien &= ~XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER2:
+ ien &= ~XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER3:
+ ien &= ~XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ int idx = xscale1pmu_get_event_idx(cpuc, event);
+ if (idx >= 0)
+ goto out;
+
+ if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
+ idx = XSCALE_COUNTER3;
+ else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
+ idx = XSCALE_COUNTER2;
+out:
+ return idx;
+}
+
+static void
+xscale2pmu_start(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
+ val |= XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale2pmu_stop(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale2pmu_read_pmnc();
+ val &= ~XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline u32
+xscale2pmu_read_counter(int counter)
+{
+ u32 val = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER2:
+ asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER3:
+ asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
+ break;
+ }
+
+ return val;
+}
+
+static inline void
+xscale2pmu_write_counter(int counter, u32 val)
+{
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER2:
+ asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER3:
+ asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
+ break;
+ }
+}
+
+static const struct arm_pmu xscale2pmu = {
+ .id = ARM_PERF_PMU_ID_XSCALE2,
+ .handle_irq = xscale2pmu_handle_irq,
+ .enable = xscale2pmu_enable_event,
+ .disable = xscale2pmu_disable_event,
+ .event_map = xscalepmu_event_map,
+ .raw_event = xscalepmu_raw_event,
+ .read_counter = xscale2pmu_read_counter,
+ .write_counter = xscale2pmu_write_counter,
+ .get_event_idx = xscale2pmu_get_event_idx,
+ .start = xscale2pmu_start,
+ .stop = xscale2pmu_stop,
+ .num_events = 5,
+ .max_period = (1LLU << 32) - 1,
+};
+
static int __init
init_hw_perf_events(void)
{
@@ -2086,7 +2924,7 @@ init_hw_perf_events(void)
unsigned long implementor = (cpuid & 0xFF000000) >> 24;
unsigned long part_number = (cpuid & 0xFFF0);
- /* We only support ARM CPUs implemented by ARM at the moment. */
+ /* ARM Ltd CPUs. */
if (0x41 == implementor) {
switch (part_number) {
case 0xB360: /* ARM1136 */
@@ -2105,7 +2943,7 @@ init_hw_perf_events(void)
perf_max_events = armv6mpcore_pmu.num_events;
break;
case 0xC080: /* Cortex-A8 */
- armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME;
+ armv7pmu.id = ARM_PERF_PMU_ID_CA8;
memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
sizeof(armv7_a8_perf_cache_map));
armv7pmu.event_map = armv7_a8_pmu_event_map;
@@ -2117,7 +2955,7 @@ init_hw_perf_events(void)
perf_max_events = armv7pmu.num_events;
break;
case 0xC090: /* Cortex-A9 */
- armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME;
+ armv7pmu.id = ARM_PERF_PMU_ID_CA9;
memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
sizeof(armv7_a9_perf_cache_map));
armv7pmu.event_map = armv7_a9_pmu_event_map;
@@ -2128,15 +2966,33 @@ init_hw_perf_events(void)
armv7pmu.num_events = armv7_reset_read_pmnc();
perf_max_events = armv7pmu.num_events;
break;
- default:
- pr_info("no hardware support available\n");
- perf_max_events = -1;
+ }
+ /* Intel CPUs [xscale]. */
+ } else if (0x69 == implementor) {
+ part_number = (cpuid >> 13) & 0x7;
+ switch (part_number) {
+ case 1:
+ armpmu = &xscale1pmu;
+ memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
+ sizeof(xscale_perf_cache_map));
+ perf_max_events = xscale1pmu.num_events;
+ break;
+ case 2:
+ armpmu = &xscale2pmu;
+ memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
+ sizeof(xscale_perf_cache_map));
+ perf_max_events = xscale2pmu.num_events;
+ break;
}
}
- if (armpmu)
+ if (armpmu) {
pr_info("enabled with %s PMU driver, %d counters available\n",
- armpmu->name, armpmu->num_events);
+ arm_pmu_names[armpmu->id], armpmu->num_events);
+ } else {
+ pr_info("no hardware support available\n");
+ perf_max_events = -1;
+ }
return 0;
}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index a124312e343f..b8af96ea62e6 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -2,6 +2,7 @@
* linux/arch/arm/kernel/pmu.c
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ * Copyright (C) 2010 ARM Ltd, Will Deacon
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,65 +10,78 @@
*
*/
+#define pr_fmt(fmt) "PMU: " fmt
+
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/pmu.h>
-/*
- * Define the IRQs for the system. We could use something like a platform
- * device but that seems fairly heavyweight for this. Also, the performance
- * counters can't be removed or hotplugged.
- *
- * Ordering is important: init_pmu() will use the ordering to set the affinity
- * to the corresponding core. e.g. the first interrupt will go to cpu 0, the
- * second goes to cpu 1 etc.
- */
-static const int irqs[] = {
-#if defined(CONFIG_ARCH_OMAP2)
- 3,
-#elif defined(CONFIG_ARCH_BCMRING)
- IRQ_PMUIRQ,
-#elif defined(CONFIG_MACH_REALVIEW_EB)
- IRQ_EB11MP_PMU_CPU0,
- IRQ_EB11MP_PMU_CPU1,
- IRQ_EB11MP_PMU_CPU2,
- IRQ_EB11MP_PMU_CPU3,
-#elif defined(CONFIG_ARCH_OMAP3)
- INT_34XX_BENCH_MPU_EMUL,
-#elif defined(CONFIG_ARCH_IOP32X)
- IRQ_IOP32X_CORE_PMU,
-#elif defined(CONFIG_ARCH_IOP33X)
- IRQ_IOP33X_CORE_PMU,
-#elif defined(CONFIG_ARCH_PXA)
- IRQ_PMU,
-#endif
-};
+static volatile long pmu_lock;
+
+static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
+
+static int __devinit pmu_device_probe(struct platform_device *pdev)
+{
+
+ if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
+ pr_warning("received registration request for unknown "
+ "device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ if (pmu_devices[pdev->id])
+ pr_warning("registering new PMU device type %d overwrites "
+ "previous registration!\n", pdev->id);
+ else
+ pr_info("registered new PMU device of type %d\n",
+ pdev->id);
-static const struct pmu_irqs pmu_irqs = {
- .irqs = irqs,
- .num_irqs = ARRAY_SIZE(irqs),
+ pmu_devices[pdev->id] = pdev;
+ return 0;
+}
+
+static struct platform_driver pmu_driver = {
+ .driver = {
+ .name = "arm-pmu",
+ },
+ .probe = pmu_device_probe,
};
-static volatile long pmu_lock;
+static int __init register_pmu_driver(void)
+{
+ return platform_driver_register(&pmu_driver);
+}
+device_initcall(register_pmu_driver);
-const struct pmu_irqs *
-reserve_pmu(void)
+struct platform_device *
+reserve_pmu(enum arm_pmu_type device)
{
- return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) :
- &pmu_irqs;
+ struct platform_device *pdev;
+
+ if (test_and_set_bit_lock(device, &pmu_lock)) {
+ pdev = ERR_PTR(-EBUSY);
+ } else if (pmu_devices[device] == NULL) {
+ clear_bit_unlock(device, &pmu_lock);
+ pdev = ERR_PTR(-ENODEV);
+ } else {
+ pdev = pmu_devices[device];
+ }
+
+ return pdev;
}
EXPORT_SYMBOL_GPL(reserve_pmu);
int
-release_pmu(const struct pmu_irqs *irqs)
+release_pmu(struct platform_device *pdev)
{
- if (WARN_ON(irqs != &pmu_irqs))
+ if (WARN_ON(pdev != pmu_devices[pdev->id]))
return -EINVAL;
- clear_bit_unlock(0, &pmu_lock);
+ clear_bit_unlock(pdev->id, &pmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(release_pmu);
@@ -87,17 +101,42 @@ set_irq_affinity(int irq,
#endif
}
-int
-init_pmu(void)
+static int
+init_cpu_pmu(void)
{
int i, err = 0;
+ struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
+
+ if (!pdev) {
+ err = -ENODEV;
+ goto out;
+ }
- for (i = 0; i < pmu_irqs.num_irqs; ++i) {
- err = set_irq_affinity(pmu_irqs.irqs[i], i);
+ for (i = 0; i < pdev->num_resources; ++i) {
+ err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
+out:
+ return err;
+}
+
+int
+init_pmu(enum arm_pmu_type device)
+{
+ int err = 0;
+
+ switch (device) {
+ case ARM_PMU_DEVICE_CPU:
+ err = init_cpu_pmu();
+ break;
+ default:
+ pr_warning("attempt to initialise unknown device %d\n",
+ device);
+ err = -EINVAL;
+ }
+
return err;
}
EXPORT_SYMBOL_GPL(init_pmu);
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 28753805d2d1..38c261f9951c 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -72,12 +72,15 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-#ifndef CONFIG_GENERIC_TIME
-static unsigned long dummy_gettimeoffset(void)
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+u32 arch_gettimeoffset(void)
{
+ if (system_timer->offset != NULL)
+ return system_timer->offset() * 1000;
+
return 0;
}
-#endif
+#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
#ifdef CONFIG_LEDS_TIMER
static inline void do_leds(void)
@@ -93,63 +96,6 @@ static inline void do_leds(void)
#define do_leds()
#endif
-#ifndef CONFIG_GENERIC_TIME
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags;
- unsigned long seq;
- unsigned long usec, sec;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
- usec = system_timer->offset();
- sec = xtime.tv_sec;
- usec += xtime.tv_nsec / 1000;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
- /* usec may have gone up a lot: be safe */
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * done, and then undo it!
- */
- nsec -= system_timer->offset() * NSEC_PER_USEC;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-#endif /* !CONFIG_GENERIC_TIME */
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
@@ -214,10 +160,6 @@ device_initcall(timer_init_sysfs);
void __init time_init(void)
{
-#ifndef CONFIG_GENERIC_TIME
- if (system_timer->offset == NULL)
- system_timer->offset = dummy_gettimeoffset;
-#endif
system_timer->init();
}
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 5e3f99620c04..14a0d988c82c 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -45,6 +45,7 @@ USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
ENDPROC(__clear_user)
+ENDPROC(__clear_user_std)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 027b69bdbad1..d066df686e17 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -93,6 +93,7 @@ WEAK(__copy_to_user)
#include "copy_template.S"
ENDPROC(__copy_to_user)
+ENDPROC(__copy_to_user_std)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 2db43a5ddd9b..103976411a67 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -97,6 +97,7 @@ config ARCH_AT572D940HF
config ARCH_AT91X40
bool "AT91x40"
+ select ARCH_USES_GETTIMEOFFSET
endchoice
@@ -360,6 +361,19 @@ config MACH_CPU9G20
Select this if you are using a Eukrea Electromatique's
CPU9G20 Board <http://www.eukrea.com/>
+config MACH_PORTUXG20
+ bool "taskit PortuxG20"
+ help
+ Select this if you are using taskit's PortuxG20.
+ <http://www.taskit.de/en/>
+
+config MACH_STAMP9G20
+ bool "taskit Stamp9G20 CPU module"
+ help
+ Select this if you are using taskit's Stamp9G20 CPU module on its
+ evaluation board.
+ <http://www.taskit.de/en/>
+
endif
# ----------------------------------------------------------
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index d4004557532a..c1f821e58222 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -63,6 +63,8 @@ obj-$(CONFIG_MACH_AT91SAM9RLEK) += board-sam9rlek.o
obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
obj-$(CONFIG_MACH_AT91SAM9G20EK_2MMC) += board-sam9g20ek-2slot-mmc.o
obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o
+obj-$(CONFIG_MACH_STAMP9G20) += board-stamp9g20.o
+obj-$(CONFIG_MACH_PORTUXG20) += board-stamp9g20.o
# AT91SAM9G45 board-specific support
obj-$(CONFIG_MACH_AT91SAM9G45EKES) += board-sam9m10g45ek.o
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
new file mode 100644
index 000000000000..87958274290f
--- /dev/null
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2010 Christian Glindkamp <christian.glindkamp@taskit.de>
+ * taskit GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/w1-gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/board.h>
+#include <mach/at91sam9_smc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+
+static void __init portuxg20_map_io(void)
+{
+ /* Initialize processor: 18.432 MHz crystal */
+ at91sam9260_initialize(18432000);
+
+ /* DGBU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
+ | ATMEL_UART_DTR | ATMEL_UART_DSR
+ | ATMEL_UART_DCD | ATMEL_UART_RI);
+
+ /* USART1 on ttyS2. (Rx, Tx, CTS, RTS) */
+ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS);
+
+ /* USART2 on ttyS3. (Rx, Tx, CTS, RTS) */
+ at91_register_uart(AT91SAM9260_ID_US2, 3, ATMEL_UART_CTS | ATMEL_UART_RTS);
+
+ /* USART4 on ttyS5. (Rx, Tx only) */
+ at91_register_uart(AT91SAM9260_ID_US4, 5, 0);
+
+ /* USART5 on ttyS6. (Rx, Tx only) */
+ at91_register_uart(AT91SAM9260_ID_US5, 6, 0);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+static void __init stamp9g20_map_io(void)
+{
+ /* Initialize processor: 18.432 MHz crystal */
+ at91sam9260_initialize(18432000);
+
+ /* DGBU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
+ | ATMEL_UART_DTR | ATMEL_UART_DSR
+ | ATMEL_UART_DCD | ATMEL_UART_RI);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+static void __init init_irq(void)
+{
+ at91sam9260_init_interrupts(NULL);
+}
+
+
+/*
+ * NAND flash
+ */
+static struct atmel_nand_data __initdata nand_data = {
+ .ale = 21,
+ .cle = 22,
+ .rdy_pin = AT91_PIN_PC13,
+ .enable_pin = AT91_PIN_PC14,
+ .bus_width_16 = 0,
+};
+
+static struct sam9_smc_config __initdata nand_smc_config = {
+ .ncs_read_setup = 0,
+ .nrd_setup = 2,
+ .ncs_write_setup = 0,
+ .nwe_setup = 2,
+
+ .ncs_read_pulse = 4,
+ .nrd_pulse = 4,
+ .ncs_write_pulse = 4,
+ .nwe_pulse = 4,
+
+ .read_cycle = 7,
+ .write_cycle = 7,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8,
+ .tdf_cycles = 3,
+};
+
+static void __init add_device_nand(void)
+{
+ /* configure chip-select 3 (NAND) */
+ sam9_smc_configure(3, &nand_smc_config);
+
+ at91_add_device_nand(&nand_data);
+}
+
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+static struct mci_platform_data __initdata mmc_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ },
+};
+#else
+static struct at91_mmc_data __initdata mmc_data = {
+ .slot_b = 0,
+ .wire4 = 1,
+};
+#endif
+
+
+/*
+ * USB Host port
+ */
+static struct at91_usbh_data __initdata usbh_data = {
+ .ports = 2,
+};
+
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata portuxg20_udc_data = {
+ .vbus_pin = AT91_PIN_PC7,
+ .pullup_pin = 0, /* pull-up driven by UDC */
+};
+
+static struct at91_udc_data __initdata stamp9g20_udc_data = {
+ .vbus_pin = AT91_PIN_PA22,
+ .pullup_pin = 0, /* pull-up driven by UDC */
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata macb_data = {
+ .phy_irq_pin = AT91_PIN_PA28,
+ .is_rmii = 1,
+};
+
+
+/*
+ * LEDs
+ */
+static struct gpio_led portuxg20_leds[] = {
+ {
+ .name = "LED2",
+ .gpio = AT91_PIN_PC5,
+ .default_trigger = "none",
+ }, {
+ .name = "LED3",
+ .gpio = AT91_PIN_PC4,
+ .default_trigger = "none",
+ }, {
+ .name = "LED4",
+ .gpio = AT91_PIN_PC10,
+ .default_trigger = "heartbeat",
+ }
+};
+
+static struct gpio_led stamp9g20_leds[] = {
+ {
+ .name = "D8",
+ .gpio = AT91_PIN_PB18,
+ .active_low = 1,
+ .default_trigger = "none",
+ }, {
+ .name = "D9",
+ .gpio = AT91_PIN_PB19,
+ .active_low = 1,
+ .default_trigger = "none",
+ }, {
+ .name = "D10",
+ .gpio = AT91_PIN_PB20,
+ .active_low = 1,
+ .default_trigger = "heartbeat",
+ }
+};
+
+
+/*
+ * SPI devices
+ */
+static struct spi_board_info portuxg20_spi_devices[] = {
+ {
+ .modalias = "spidev",
+ .chip_select = 0,
+ .max_speed_hz = 1 * 1000 * 1000,
+ .bus_num = 0,
+ }, {
+ .modalias = "spidev",
+ .chip_select = 0,
+ .max_speed_hz = 1 * 1000 * 1000,
+ .bus_num = 1,
+ },
+};
+
+
+/*
+ * Dallas 1-Wire
+ */
+static struct w1_gpio_platform_data w1_gpio_pdata = {
+ .pin = AT91_PIN_PA29,
+ .is_open_drain = 1,
+};
+
+static struct platform_device w1_device = {
+ .name = "w1-gpio",
+ .id = -1,
+ .dev.platform_data = &w1_gpio_pdata,
+};
+
+void add_w1(void)
+{
+ at91_set_GPIO_periph(w1_gpio_pdata.pin, 1);
+ at91_set_multi_drive(w1_gpio_pdata.pin, 1);
+ platform_device_register(&w1_device);
+}
+
+
+static void __init generic_board_init(void)
+{
+ /* Serial */
+ at91_add_device_serial();
+ /* NAND */
+ add_device_nand();
+ /* MMC */
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+ at91_add_device_mci(0, &mmc_data);
+#else
+ at91_add_device_mmc(0, &mmc_data);
+#endif
+ /* USB Host */
+ at91_add_device_usbh(&usbh_data);
+ /* Ethernet */
+ at91_add_device_eth(&macb_data);
+ /* I2C */
+ at91_add_device_i2c(NULL, 0);
+ /* W1 */
+ add_w1();
+}
+
+static void __init portuxg20_board_init(void)
+{
+ generic_board_init();
+ /* SPI */
+ at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices));
+ /* USB Device */
+ at91_add_device_udc(&portuxg20_udc_data);
+ /* LEDs */
+ at91_gpio_leds(portuxg20_leds, ARRAY_SIZE(portuxg20_leds));
+}
+
+static void __init stamp9g20_board_init(void)
+{
+ generic_board_init();
+ /* USB Device */
+ at91_add_device_udc(&stamp9g20_udc_data);
+ /* LEDs */
+ at91_gpio_leds(stamp9g20_leds, ARRAY_SIZE(stamp9g20_leds));
+}
+
+MACHINE_START(PORTUXG20, "taskit PortuxG20")
+ /* Maintainer: taskit GmbH */
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = portuxg20_map_io,
+ .init_irq = init_irq,
+ .init_machine = portuxg20_board_init,
+MACHINE_END
+
+MACHINE_START(STAMP9G20, "taskit Stamp9G20")
+ /* Maintainer: taskit GmbH */
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = stamp9g20_map_io,
+ .init_irq = init_irq,
+ .init_machine = stamp9g20_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index ceaec6c16eb2..df2ed848c9f8 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -39,6 +39,7 @@
#include <linux/usb/atmel_usba_udc.h>
#include <linux/atmel-mci.h>
#include <sound/atmel-ac97c.h>
+#include <linux/serial.h>
/* USB Device */
struct at91_udc_data {
@@ -143,9 +144,10 @@ extern struct platform_device *atmel_default_console_device;
extern void __init __deprecated at91_init_serial(struct at91_uart_config *config);
struct atmel_uart_data {
- short use_dma_tx; /* use transmit DMA? */
- short use_dma_rx; /* use receive DMA? */
- void __iomem *regs; /* virtual base address, if any */
+ short use_dma_tx; /* use transmit DMA? */
+ short use_dma_rx; /* use receive DMA? */
+ void __iomem *regs; /* virt. base address, if any */
+ struct serial_rs485 rs485; /* rs485 settings */
};
extern void __init at91_add_device_serial(void);
diff --git a/arch/arm/mach-at91/include/mach/cpu.h b/arch/arm/mach-at91/include/mach/cpu.h
index 5a0650101d45..833659d1200a 100644
--- a/arch/arm/mach-at91/include/mach/cpu.h
+++ b/arch/arm/mach-at91/include/mach/cpu.h
@@ -21,7 +21,7 @@
#define ARCH_ID_AT91SAM9260 0x019803a0
#define ARCH_ID_AT91SAM9261 0x019703a0
#define ARCH_ID_AT91SAM9263 0x019607a0
-#define ARCH_ID_AT91SAM9G10 0x819903a0
+#define ARCH_ID_AT91SAM9G10 0x019903a0
#define ARCH_ID_AT91SAM9G20 0x019905a0
#define ARCH_ID_AT91SAM9RL64 0x019b03a0
#define ARCH_ID_AT91SAM9G45 0x819b05a0
@@ -108,7 +108,7 @@ static inline unsigned long at91cap9_rev_identify(void)
#endif
#ifdef CONFIG_ARCH_AT91SAM9G10
-#define cpu_is_at91sam9g10() (at91_cpu_identify() == ARCH_ID_AT91SAM9G10)
+#define cpu_is_at91sam9g10() ((at91_cpu_identify() & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10)
#else
#define cpu_is_at91sam9g10() (0)
#endif
diff --git a/arch/arm/mach-at91/include/mach/system.h b/arch/arm/mach-at91/include/mach/system.h
index 5268af3933c2..c80e090b3670 100644
--- a/arch/arm/mach-at91/include/mach/system.h
+++ b/arch/arm/mach-at91/include/mach/system.h
@@ -24,21 +24,24 @@
#include <mach/hardware.h>
#include <mach/at91_st.h>
#include <mach/at91_dbgu.h>
+#include <mach/at91_pmc.h>
static inline void arch_idle(void)
{
+#ifndef CONFIG_DEBUG_KERNEL
/*
* Disable the processor clock. The processor will be automatically
* re-enabled by an interrupt or by a reset.
*/
-// at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
-
+ at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
+#else
/*
* Set the processor (CP15) into 'Wait for Interrupt' mode.
* Unlike disabling the processor clock via the PMC (above)
* this allows the processor to be woken via JTAG.
*/
cpu_do_idle();
+#endif
}
void (*at91_arch_reset)(void);
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index 53dd2a9eecf9..2f139196d63d 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -29,6 +29,7 @@
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/time.h>
+#include <asm/pmu.h>
#include <asm/mach/arch.h>
#include <mach/dma.h>
@@ -85,8 +86,23 @@ static struct platform_device nand_device = {
.num_resources = ARRAY_SIZE(nand_resource),
};
+static struct resource pmu_resource = {
+ .start = IRQ_PMUIRQ,
+ .end = IRQ_PMUIRQ,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .resource = &pmu_resource,
+ .num_resources = 1,
+};
+
+
static struct platform_device *devices[] __initdata = {
&nand_device,
+ &pmu_device,
};
/****************************************************************************
diff --git a/arch/arm/mach-clps711x/mm.c b/arch/arm/mach-clps711x/mm.c
index a7b4591205a3..986592176767 100644
--- a/arch/arm/mach-clps711x/mm.c
+++ b/arch/arm/mach-clps711x/mm.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-cns3xxx/Kconfig b/arch/arm/mach-cns3xxx/Kconfig
new file mode 100644
index 000000000000..9ebfcc46feb1
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/Kconfig
@@ -0,0 +1,12 @@
+menu "CNS3XXX platform type"
+ depends on ARCH_CNS3XXX
+
+config MACH_CNS3420VB
+ bool "Support for CNS3420 Validation Board"
+ help
+ Include support for the Cavium Networks CNS3420 MPCore Platform
+ Baseboard.
+ This is a platform with an on-board ARM11 MPCore and has support
+ for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, etc.
+
+endmenu
diff --git a/arch/arm/mach-cns3xxx/Makefile b/arch/arm/mach-cns3xxx/Makefile
new file mode 100644
index 000000000000..427507a2d696
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ARCH_CNS3XXX) += core.o pm.o
+obj-$(CONFIG_MACH_CNS3420VB) += cns3420vb.o
diff --git a/arch/arm/mach-cns3xxx/Makefile.boot b/arch/arm/mach-cns3xxx/Makefile.boot
new file mode 100644
index 000000000000..777012865220
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/Makefile.boot
@@ -0,0 +1,3 @@
+ zreladdr-y := 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00C00000
diff --git a/arch/arm/mach-cns3xxx/cns3420vb.c b/arch/arm/mach-cns3xxx/cns3420vb.c
new file mode 100644
index 000000000000..2e30c8288740
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/cns3420vb.c
@@ -0,0 +1,148 @@
+/*
+ * Cavium Networks CNS3420 Validation Board
+ *
+ * Copyright 2000 Deep Blue Solutions Ltd
+ * Copyright 2008 ARM Limited
+ * Copyright 2008 Cavium Networks
+ * Scott Shu
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@mvista.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/partitions.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <mach/hardware.h>
+#include <mach/cns3xxx.h>
+#include <mach/irqs.h>
+#include "core.h"
+
+/*
+ * NOR Flash
+ */
+static struct mtd_partition cns3420_nor_partitions[] = {
+ {
+ .name = "uboot",
+ .size = 0x00040000,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE,
+ }, {
+ .name = "kernel",
+ .size = 0x004C0000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "filesystem",
+ .size = 0x7000000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "filesystem2",
+ .size = 0x0AE0000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "ubootenv",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ },
+};
+
+static struct physmap_flash_data cns3420_nor_pdata = {
+ .width = 2,
+ .parts = cns3420_nor_partitions,
+ .nr_parts = ARRAY_SIZE(cns3420_nor_partitions),
+};
+
+static struct resource cns3420_nor_res = {
+ .start = CNS3XXX_FLASH_BASE,
+ .end = CNS3XXX_FLASH_BASE + SZ_128M - 1,
+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+};
+
+static struct platform_device cns3420_nor_pdev = {
+ .name = "physmap-flash",
+ .id = 0,
+ .resource = &cns3420_nor_res,
+ .num_resources = 1,
+ .dev = {
+ .platform_data = &cns3420_nor_pdata,
+ },
+};
+
+/*
+ * UART
+ */
+static void __init cns3420_early_serial_setup(void)
+{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ static struct uart_port cns3420_serial_port = {
+ .membase = (void __iomem *)CNS3XXX_UART0_BASE_VIRT,
+ .mapbase = CNS3XXX_UART0_BASE,
+ .irq = IRQ_CNS3XXX_UART0,
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .regshift = 2,
+ .uartclk = 24000000,
+ .line = 0,
+ .type = PORT_16550A,
+ .fifosize = 16,
+ };
+
+ early_serial_setup(&cns3420_serial_port);
+#endif
+}
+
+/*
+ * Initialization
+ */
+static struct platform_device *cns3420_pdevs[] __initdata = {
+ &cns3420_nor_pdev,
+};
+
+static void __init cns3420_init(void)
+{
+ platform_add_devices(cns3420_pdevs, ARRAY_SIZE(cns3420_pdevs));
+
+ pm_power_off = cns3xxx_power_off;
+}
+
+static struct map_desc cns3420_io_desc[] __initdata = {
+ {
+ .virtual = CNS3XXX_UART0_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_UART0_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init cns3420_map_io(void)
+{
+ cns3xxx_map_io();
+ iotable_init(cns3420_io_desc, ARRAY_SIZE(cns3420_io_desc));
+
+ cns3420_early_serial_setup();
+}
+
+MACHINE_START(CNS3420VB, "Cavium Networks CNS3420 Validation Board")
+ .phys_io = CNS3XXX_UART0_BASE,
+ .io_pg_offst = (CNS3XXX_UART0_BASE_VIRT >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .map_io = cns3420_map_io,
+ .init_irq = cns3xxx_init_irq,
+ .timer = &cns3xxx_timer,
+ .init_machine = cns3420_init,
+MACHINE_END
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
new file mode 100644
index 000000000000..9ca4d581016f
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 1999 - 2003 ARM Limited
+ * Copyright 2000 Deep Blue Solutions Ltd
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/io.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/mach/irq.h>
+#include <asm/hardware/gic.h>
+#include <mach/cns3xxx.h>
+#include "core.h"
+
+static struct map_desc cns3xxx_io_desc[] __initdata = {
+ {
+ .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TIMER1_2_3_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_GPIOA_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_GPIOA_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_GPIOB_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_GPIOB_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_MISC_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_MISC_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_PM_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_PM_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init cns3xxx_map_io(void)
+{
+ iotable_init(cns3xxx_io_desc, ARRAY_SIZE(cns3xxx_io_desc));
+}
+
+/* used by entry-macro.S */
+void __iomem *gic_cpu_base_addr;
+
+void __init cns3xxx_init_irq(void)
+{
+ gic_cpu_base_addr = __io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT);
+ gic_dist_init(0, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT), 29);
+ gic_cpu_init(0, gic_cpu_base_addr);
+}
+
+void cns3xxx_power_off(void)
+{
+ u32 __iomem *pm_base = __io(CNS3XXX_PM_BASE_VIRT);
+ u32 clkctrl;
+
+ printk(KERN_INFO "powering system down...\n");
+
+ clkctrl = readl(pm_base + PM_SYS_CLK_CTRL_OFFSET);
+ clkctrl &= 0xfffff1ff;
+ clkctrl |= (0x5 << 9); /* Hibernate */
+ writel(clkctrl, pm_base + PM_SYS_CLK_CTRL_OFFSET);
+
+}
+
+/*
+ * Timer
+ */
+static void __iomem *cns3xxx_tmr1;
+
+static void cns3xxx_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+ int pclk = cns3xxx_cpu_clock() / 8;
+ int reload;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ reload = pclk * 20 / (3 * HZ) * 0x25000;
+ writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
+ ctrl |= (1 << 0) | (1 << 2) | (1 << 9);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* period set, and timer enabled in 'next_event' hook */
+ ctrl |= (1 << 2) | (1 << 9);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ ctrl = 0;
+ }
+
+ writel(ctrl, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+}
+
+static int cns3xxx_timer_set_next_event(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+ writel(evt, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
+ writel(ctrl | (1 << 0), cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+ return 0;
+}
+
+static struct clock_event_device cns3xxx_tmr1_clockevent = {
+ .name = "cns3xxx timer1",
+ .shift = 8,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = cns3xxx_timer_set_mode,
+ .set_next_event = cns3xxx_timer_set_next_event,
+ .rating = 350,
+ .cpumask = cpu_all_mask,
+};
+
+static void __init cns3xxx_clockevents_init(unsigned int timer_irq)
+{
+ cns3xxx_tmr1_clockevent.irq = timer_irq;
+ cns3xxx_tmr1_clockevent.mult =
+ div_sc((cns3xxx_cpu_clock() >> 3) * 1000000, NSEC_PER_SEC,
+ cns3xxx_tmr1_clockevent.shift);
+ cns3xxx_tmr1_clockevent.max_delta_ns =
+ clockevent_delta2ns(0xffffffff, &cns3xxx_tmr1_clockevent);
+ cns3xxx_tmr1_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xf, &cns3xxx_tmr1_clockevent);
+
+ clockevents_register_device(&cns3xxx_tmr1_clockevent);
+}
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &cns3xxx_tmr1_clockevent;
+ u32 __iomem *stat = cns3xxx_tmr1 + TIMER1_2_INTERRUPT_STATUS_OFFSET;
+ u32 val;
+
+ /* Clear the interrupt */
+ val = readl(stat);
+ writel(val & ~(1 << 2), stat);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction cns3xxx_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = cns3xxx_timer_interrupt,
+};
+
+/*
+ * Set up the clock source and clock events devices
+ */
+static void __init __cns3xxx_timer_init(unsigned int timer_irq)
+{
+ u32 val;
+ u32 irq_mask;
+
+ /*
+ * Initialise to a known state (all timers off)
+ */
+
+ /* disable timer1 and timer2 */
+ writel(0, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+ /* stop free running timer3 */
+ writel(0, cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET);
+
+ /* timer1 */
+ writel(0x5C800, cns3xxx_tmr1 + TIMER1_COUNTER_OFFSET);
+ writel(0x5C800, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
+
+ writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V1_OFFSET);
+ writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V2_OFFSET);
+
+ /* mask irq, non-mask timer1 overflow */
+ irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
+ irq_mask &= ~(1 << 2);
+ irq_mask |= 0x03;
+ writel(irq_mask, cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
+
+ /* down counter */
+ val = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+ val |= (1 << 9);
+ writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+ /* timer2 */
+ writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V1_OFFSET);
+ writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V2_OFFSET);
+
+ /* mask irq */
+ irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
+ irq_mask |= ((1 << 3) | (1 << 4) | (1 << 5));
+ writel(irq_mask, cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
+
+ /* down counter */
+ val = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+ val |= (1 << 10);
+ writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+ /* Make irqs happen for the system timer */
+ setup_irq(timer_irq, &cns3xxx_timer_irq);
+
+ cns3xxx_clockevents_init(timer_irq);
+}
+
+static void __init cns3xxx_timer_init(void)
+{
+ cns3xxx_tmr1 = __io(CNS3XXX_TIMER1_2_3_BASE_VIRT);
+
+ __cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0);
+}
+
+struct sys_timer cns3xxx_timer = {
+ .init = cns3xxx_timer_init,
+};
diff --git a/arch/arm/mach-cns3xxx/core.h b/arch/arm/mach-cns3xxx/core.h
new file mode 100644
index 000000000000..6b33ec11346e
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/core.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2000 Deep Blue Solutions Ltd
+ * Copyright 2004 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CNS3XXX_CORE_H
+#define __CNS3XXX_CORE_H
+
+extern void __iomem *gic_cpu_base_addr;
+extern struct sys_timer cns3xxx_timer;
+
+void __init cns3xxx_map_io(void);
+void __init cns3xxx_init_irq(void);
+void cns3xxx_power_off(void);
+void cns3xxx_pwr_power_up(unsigned int block);
+void cns3xxx_pwr_power_down(unsigned int block);
+
+#endif /* __CNS3XXX_CORE_H */
diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
new file mode 100644
index 000000000000..8a2f5a21d4ee
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_BOARD_CNS3XXXH
+#define __MACH_BOARD_CNS3XXXH
+
+/*
+ * Memory map
+ */
+#define CNS3XXX_FLASH_BASE 0x10000000 /* Flash/SRAM Memory Bank 0 */
+#define CNS3XXX_FLASH_SIZE SZ_256M
+
+#define CNS3XXX_DDR2SDRAM_BASE 0x20000000 /* DDR2 SDRAM Memory */
+
+#define CNS3XXX_SPI_FLASH_BASE 0x60000000 /* SPI Serial Flash Memory */
+
+#define CNS3XXX_SWITCH_BASE 0x70000000 /* Switch and HNAT Control */
+#define CNS3XXX_SWITCH_BASE_VIRT 0xFFF00000
+
+#define CNS3XXX_PPE_BASE 0x70001000 /* HANT */
+#define CNS3XXX_PPE_BASE_VIRT 0xFFF50000
+
+#define CNS3XXX_EMBEDDED_SRAM_BASE 0x70002000 /* HANT Embedded SRAM */
+#define CNS3XXX_EMBEDDED_SRAM_BASE_VIRT 0xFFF60000
+
+#define CNS3XXX_SSP_BASE 0x71000000 /* Synchronous Serial Port - SPI/PCM/I2C */
+#define CNS3XXX_SSP_BASE_VIRT 0xFFF01000
+
+#define CNS3XXX_DMC_BASE 0x72000000 /* DMC Control (DDR2 SDRAM) */
+#define CNS3XXX_DMC_BASE_VIRT 0xFFF02000
+
+#define CNS3XXX_SMC_BASE 0x73000000 /* SMC Control */
+#define CNS3XXX_SMC_BASE_VIRT 0xFFF03000
+
+#define SMC_MEMC_STATUS_OFFSET 0x000
+#define SMC_MEMIF_CFG_OFFSET 0x004
+#define SMC_MEMC_CFG_SET_OFFSET 0x008
+#define SMC_MEMC_CFG_CLR_OFFSET 0x00C
+#define SMC_DIRECT_CMD_OFFSET 0x010
+#define SMC_SET_CYCLES_OFFSET 0x014
+#define SMC_SET_OPMODE_OFFSET 0x018
+#define SMC_REFRESH_PERIOD_0_OFFSET 0x020
+#define SMC_REFRESH_PERIOD_1_OFFSET 0x024
+#define SMC_SRAM_CYCLES0_0_OFFSET 0x100
+#define SMC_NAND_CYCLES0_0_OFFSET 0x100
+#define SMC_OPMODE0_0_OFFSET 0x104
+#define SMC_SRAM_CYCLES0_1_OFFSET 0x120
+#define SMC_NAND_CYCLES0_1_OFFSET 0x120
+#define SMC_OPMODE0_1_OFFSET 0x124
+#define SMC_USER_STATUS_OFFSET 0x200
+#define SMC_USER_CONFIG_OFFSET 0x204
+#define SMC_ECC_STATUS_OFFSET 0x300
+#define SMC_ECC_MEMCFG_OFFSET 0x304
+#define SMC_ECC_MEMCOMMAND1_OFFSET 0x308
+#define SMC_ECC_MEMCOMMAND2_OFFSET 0x30C
+#define SMC_ECC_ADDR0_OFFSET 0x310
+#define SMC_ECC_ADDR1_OFFSET 0x314
+#define SMC_ECC_VALUE0_OFFSET 0x318
+#define SMC_ECC_VALUE1_OFFSET 0x31C
+#define SMC_ECC_VALUE2_OFFSET 0x320
+#define SMC_ECC_VALUE3_OFFSET 0x324
+#define SMC_PERIPH_ID_0_OFFSET 0xFE0
+#define SMC_PERIPH_ID_1_OFFSET 0xFE4
+#define SMC_PERIPH_ID_2_OFFSET 0xFE8
+#define SMC_PERIPH_ID_3_OFFSET 0xFEC
+#define SMC_PCELL_ID_0_OFFSET 0xFF0
+#define SMC_PCELL_ID_1_OFFSET 0xFF4
+#define SMC_PCELL_ID_2_OFFSET 0xFF8
+#define SMC_PCELL_ID_3_OFFSET 0xFFC
+
+#define CNS3XXX_GPIOA_BASE 0x74000000 /* GPIO port A */
+#define CNS3XXX_GPIOA_BASE_VIRT 0xFFF04000
+
+#define CNS3XXX_GPIOB_BASE 0x74800000 /* GPIO port B */
+#define CNS3XXX_GPIOB_BASE_VIRT 0xFFF05000
+
+#define CNS3XXX_RTC_BASE 0x75000000 /* Real Time Clock */
+#define CNS3XXX_RTC_BASE_VIRT 0xFFF06000
+
+#define RTC_SEC_OFFSET 0x00
+#define RTC_MIN_OFFSET 0x04
+#define RTC_HOUR_OFFSET 0x08
+#define RTC_DAY_OFFSET 0x0C
+#define RTC_SEC_ALM_OFFSET 0x10
+#define RTC_MIN_ALM_OFFSET 0x14
+#define RTC_HOUR_ALM_OFFSET 0x18
+#define RTC_REC_OFFSET 0x1C
+#define RTC_CTRL_OFFSET 0x20
+#define RTC_INTR_STS_OFFSET 0x34
+
+#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */
+#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */
+
+#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */
+#define CNS3XXX_PM_BASE_VIRT 0xFFF08000
+
+#define PM_CLK_GATE_OFFSET 0x00
+#define PM_SOFT_RST_OFFSET 0x04
+#define PM_HS_CFG_OFFSET 0x08
+#define PM_CACTIVE_STA_OFFSET 0x0C
+#define PM_PWR_STA_OFFSET 0x10
+#define PM_SYS_CLK_CTRL_OFFSET 0x14
+#define PM_PLL_LCD_I2S_CTRL_OFFSET 0x18
+#define PM_PLL_HM_PD_OFFSET 0x1C
+
+#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */
+#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000
+
+#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */
+#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000
+
+#define CNS3XXX_UART2_BASE 0x78800000 /* UART 2 */
+#define CNS3XXX_UART2_BASE_VIRT 0xFFF0B000
+
+#define CNS3XXX_DMAC_BASE 0x79000000 /* Generic DMA Control */
+#define CNS3XXX_DMAC_BASE_VIRT 0xFFF0D000
+
+#define CNS3XXX_CORESIGHT_BASE 0x7A000000 /* CoreSight */
+#define CNS3XXX_CORESIGHT_BASE_VIRT 0xFFF0E000
+
+#define CNS3XXX_CRYPTO_BASE 0x7B000000 /* Crypto */
+#define CNS3XXX_CRYPTO_BASE_VIRT 0xFFF0F000
+
+#define CNS3XXX_I2S_BASE 0x7C000000 /* I2S */
+#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000
+
+#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */
+#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800
+
+#define TIMER1_COUNTER_OFFSET 0x00
+#define TIMER1_AUTO_RELOAD_OFFSET 0x04
+#define TIMER1_MATCH_V1_OFFSET 0x08
+#define TIMER1_MATCH_V2_OFFSET 0x0C
+
+#define TIMER2_COUNTER_OFFSET 0x10
+#define TIMER2_AUTO_RELOAD_OFFSET 0x14
+#define TIMER2_MATCH_V1_OFFSET 0x18
+#define TIMER2_MATCH_V2_OFFSET 0x1C
+
+#define TIMER1_2_CONTROL_OFFSET 0x30
+#define TIMER1_2_INTERRUPT_STATUS_OFFSET 0x34
+#define TIMER1_2_INTERRUPT_MASK_OFFSET 0x38
+
+#define TIMER_FREERUN_OFFSET 0x40
+#define TIMER_FREERUN_CONTROL_OFFSET 0x44
+
+#define CNS3XXX_HCIE_BASE 0x7D000000 /* HCIE Control */
+#define CNS3XXX_HCIE_BASE_VIRT 0xFFF30000
+
+#define CNS3XXX_RAID_BASE 0x7E000000 /* RAID Control */
+#define CNS3XXX_RAID_BASE_VIRT 0xFFF12000
+
+#define CNS3XXX_AXI_IXC_BASE 0x7F000000 /* AXI IXC */
+#define CNS3XXX_AXI_IXC_BASE_VIRT 0xFFF13000
+
+#define CNS3XXX_CLCD_BASE 0x80000000 /* LCD Control */
+#define CNS3XXX_CLCD_BASE_VIRT 0xFFF14000
+
+#define CNS3XXX_USBOTG_BASE 0x81000000 /* USB OTG Control */
+#define CNS3XXX_USBOTG_BASE_VIRT 0xFFF15000
+
+#define CNS3XXX_USB_BASE 0x82000000 /* USB Host Control */
+#define CNS3XXX_USB_BASE_VIRT 0xFFF16000
+
+#define CNS3XXX_SATA2_BASE 0x83000000 /* SATA */
+#define CNS3XXX_SATA2_SIZE SZ_16M
+#define CNS3XXX_SATA2_BASE_VIRT 0xFFF17000
+
+#define CNS3XXX_CAMERA_BASE 0x84000000 /* Camera Interface */
+#define CNS3XXX_CAMERA_BASE_VIRT 0xFFF18000
+
+#define CNS3XXX_SDIO_BASE 0x85000000 /* SDIO */
+#define CNS3XXX_SDIO_BASE_VIRT 0xFFF19000
+
+#define CNS3XXX_I2S_TDM_BASE 0x86000000 /* I2S TDM */
+#define CNS3XXX_I2S_TDM_BASE_VIRT 0xFFF1A000
+
+#define CNS3XXX_2DG_BASE 0x87000000 /* 2D Graphic Control */
+#define CNS3XXX_2DG_BASE_VIRT 0xFFF1B000
+
+#define CNS3XXX_USB_OHCI_BASE 0x88000000 /* USB OHCI */
+#define CNS3XXX_USB_OHCI_BASE_VIRT 0xFFF1C000
+
+#define CNS3XXX_L2C_BASE 0x92000000 /* L2 Cache Control */
+#define CNS3XXX_L2C_BASE_VIRT 0xFFF27000
+
+#define CNS3XXX_PCIE0_MEM_BASE 0xA0000000 /* PCIe Port 0 IO/Memory Space */
+#define CNS3XXX_PCIE0_MEM_BASE_VIRT 0xE0000000
+
+#define CNS3XXX_PCIE0_HOST_BASE 0xAB000000 /* PCIe Port 0 RC Base */
+#define CNS3XXX_PCIE0_HOST_BASE_VIRT 0xE1000000
+
+#define CNS3XXX_PCIE0_IO_BASE 0xAC000000 /* PCIe Port 0 */
+#define CNS3XXX_PCIE0_IO_BASE_VIRT 0xE2000000
+
+#define CNS3XXX_PCIE0_CFG0_BASE 0xAD000000 /* PCIe Port 0 CFG Type 0 */
+#define CNS3XXX_PCIE0_CFG0_BASE_VIRT 0xE3000000
+
+#define CNS3XXX_PCIE0_CFG1_BASE 0xAE000000 /* PCIe Port 0 CFG Type 1 */
+#define CNS3XXX_PCIE0_CFG1_BASE_VIRT 0xE4000000
+
+#define CNS3XXX_PCIE0_MSG_BASE 0xAF000000 /* PCIe Port 0 Message Space */
+#define CNS3XXX_PCIE0_MSG_BASE_VIRT 0xE5000000
+
+#define CNS3XXX_PCIE1_MEM_BASE 0xB0000000 /* PCIe Port 1 IO/Memory Space */
+#define CNS3XXX_PCIE1_MEM_BASE_VIRT 0xE8000000
+
+#define CNS3XXX_PCIE1_HOST_BASE 0xBB000000 /* PCIe Port 1 RC Base */
+#define CNS3XXX_PCIE1_HOST_BASE_VIRT 0xE9000000
+
+#define CNS3XXX_PCIE1_IO_BASE 0xBC000000 /* PCIe Port 1 */
+#define CNS3XXX_PCIE1_IO_BASE_VIRT 0xEA000000
+
+#define CNS3XXX_PCIE1_CFG0_BASE 0xBD000000 /* PCIe Port 1 CFG Type 0 */
+#define CNS3XXX_PCIE1_CFG0_BASE_VIRT 0xEB000000
+
+#define CNS3XXX_PCIE1_CFG1_BASE 0xBE000000 /* PCIe Port 1 CFG Type 1 */
+#define CNS3XXX_PCIE1_CFG1_BASE_VIRT 0xEC000000
+
+#define CNS3XXX_PCIE1_MSG_BASE 0xBF000000 /* PCIe Port 1 Message Space */
+#define CNS3XXX_PCIE1_MSG_BASE_VIRT 0xED000000
+
+/*
+ * Testchip peripheral and fpga gic regions
+ */
+#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */
+#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000
+
+#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */
+#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100
+
+#define CNS3XXX_TC11MP_TWD_BASE 0x90000600
+#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600
+
+#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */
+#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000
+
+#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */
+#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
+
+/*
+ * Misc block
+ */
+#define MISC_MEM_MAP(offs) (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + (offs))
+#define MISC_MEM_MAP_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_MISC_BASE_VIRT + (offset))))
+
+#define MISC_MEMORY_REMAP_REG MISC_MEM_MAP_VALUE(0x00)
+#define MISC_CHIP_CONFIG_REG MISC_MEM_MAP_VALUE(0x04)
+#define MISC_DEBUG_PROBE_DATA_REG MISC_MEM_MAP_VALUE(0x08)
+#define MISC_DEBUG_PROBE_SELECTION_REG MISC_MEM_MAP_VALUE(0x0C)
+#define MISC_IO_PIN_FUNC_SELECTION_REG MISC_MEM_MAP_VALUE(0x10)
+#define MISC_GPIOA_PIN_ENABLE_REG MISC_MEM_MAP_VALUE(0x14)
+#define MISC_GPIOB_PIN_ENABLE_REG MISC_MEM_MAP_VALUE(0x18)
+#define MISC_IO_PAD_DRIVE_STRENGTH_CTRL_A MISC_MEM_MAP_VALUE(0x1C)
+#define MISC_IO_PAD_DRIVE_STRENGTH_CTRL_B MISC_MEM_MAP_VALUE(0x20)
+#define MISC_GPIOA_15_0_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x24)
+#define MISC_GPIOA_16_31_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x28)
+#define MISC_GPIOB_15_0_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x2C)
+#define MISC_GPIOB_16_31_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x30)
+#define MISC_IO_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x34)
+#define MISC_E_FUSE_31_0_REG MISC_MEM_MAP_VALUE(0x40)
+#define MISC_E_FUSE_63_32_REG MISC_MEM_MAP_VALUE(0x44)
+#define MISC_E_FUSE_95_64_REG MISC_MEM_MAP_VALUE(0x48)
+#define MISC_E_FUSE_127_96_REG MISC_MEM_MAP_VALUE(0x4C)
+#define MISC_SOFTWARE_TEST_1_REG MISC_MEM_MAP_VALUE(0x50)
+#define MISC_SOFTWARE_TEST_2_REG MISC_MEM_MAP_VALUE(0x54)
+
+#define MISC_SATA_POWER_MODE MISC_MEM_MAP_VALUE(0x310)
+
+#define MISC_USB_CFG_REG MISC_MEM_MAP_VALUE(0x800)
+#define MISC_USB_STS_REG MISC_MEM_MAP_VALUE(0x804)
+#define MISC_USBPHY00_CFG_REG MISC_MEM_MAP_VALUE(0x808)
+#define MISC_USBPHY01_CFG_REG MISC_MEM_MAP_VALUE(0x80c)
+#define MISC_USBPHY10_CFG_REG MISC_MEM_MAP_VALUE(0x810)
+#define MISC_USBPHY11_CFG_REG MISC_MEM_MAP_VALUE(0x814)
+
+#define MISC_PCIEPHY_CMCTL(x) MISC_MEM_MAP(0x900 + (x) * 0x004)
+#define MISC_PCIEPHY_CTL(x) MISC_MEM_MAP(0x940 + (x) * 0x100)
+#define MISC_PCIE_AXIS_AWMISC(x) MISC_MEM_MAP(0x944 + (x) * 0x100)
+#define MISC_PCIE_AXIS_ARMISC(x) MISC_MEM_MAP(0x948 + (x) * 0x100)
+#define MISC_PCIE_AXIS_RMISC(x) MISC_MEM_MAP(0x94C + (x) * 0x100)
+#define MISC_PCIE_AXIS_BMISC(x) MISC_MEM_MAP(0x950 + (x) * 0x100)
+#define MISC_PCIE_AXIM_RMISC(x) MISC_MEM_MAP(0x954 + (x) * 0x100)
+#define MISC_PCIE_AXIM_BMISC(x) MISC_MEM_MAP(0x958 + (x) * 0x100)
+#define MISC_PCIE_CTRL(x) MISC_MEM_MAP(0x95C + (x) * 0x100)
+#define MISC_PCIE_PM_DEBUG(x) MISC_MEM_MAP(0x960 + (x) * 0x100)
+#define MISC_PCIE_RFC_DEBUG(x) MISC_MEM_MAP(0x964 + (x) * 0x100)
+#define MISC_PCIE_CXPL_DEBUGL(x) MISC_MEM_MAP(0x968 + (x) * 0x100)
+#define MISC_PCIE_CXPL_DEBUGH(x) MISC_MEM_MAP(0x96C + (x) * 0x100)
+#define MISC_PCIE_DIAG_DEBUGH(x) MISC_MEM_MAP(0x970 + (x) * 0x100)
+#define MISC_PCIE_W1CLR(x) MISC_MEM_MAP(0x974 + (x) * 0x100)
+#define MISC_PCIE_INT_MASK(x) MISC_MEM_MAP(0x978 + (x) * 0x100)
+#define MISC_PCIE_INT_STATUS(x) MISC_MEM_MAP(0x97C + (x) * 0x100)
+
+/*
+ * Power management and clock control
+ */
+#define PMU_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_PM_BASE_VIRT + (offset))))
+
+#define PM_CLK_GATE_REG PMU_REG_VALUE(0x000)
+#define PM_SOFT_RST_REG PMU_REG_VALUE(0x004)
+#define PM_HS_CFG_REG PMU_REG_VALUE(0x008)
+#define PM_CACTIVE_STA_REG PMU_REG_VALUE(0x00C)
+#define PM_PWR_STA_REG PMU_REG_VALUE(0x010)
+#define PM_CLK_CTRL_REG PMU_REG_VALUE(0x014)
+#define PM_PLL_LCD_I2S_CTRL_REG PMU_REG_VALUE(0x018)
+#define PM_PLL_HM_PD_CTRL_REG PMU_REG_VALUE(0x01C)
+#define PM_REGULAT_CTRL_REG PMU_REG_VALUE(0x020)
+#define PM_WDT_CTRL_REG PMU_REG_VALUE(0x024)
+#define PM_WU_CTRL0_REG PMU_REG_VALUE(0x028)
+#define PM_WU_CTRL1_REG PMU_REG_VALUE(0x02C)
+#define PM_CSR_REG PMU_REG_VALUE(0x030)
+
+/* PM_CLK_GATE_REG */
+#define PM_CLK_GATE_REG_OFFSET_SDIO (25)
+#define PM_CLK_GATE_REG_OFFSET_GPU (24)
+#define PM_CLK_GATE_REG_OFFSET_CIM (23)
+#define PM_CLK_GATE_REG_OFFSET_LCDC (22)
+#define PM_CLK_GATE_REG_OFFSET_I2S (21)
+#define PM_CLK_GATE_REG_OFFSET_RAID (20)
+#define PM_CLK_GATE_REG_OFFSET_SATA (19)
+#define PM_CLK_GATE_REG_OFFSET_PCIE(x) (17 + (x))
+#define PM_CLK_GATE_REG_OFFSET_USB_HOST (16)
+#define PM_CLK_GATE_REG_OFFSET_USB_OTG (15)
+#define PM_CLK_GATE_REG_OFFSET_TIMER (14)
+#define PM_CLK_GATE_REG_OFFSET_CRYPTO (13)
+#define PM_CLK_GATE_REG_OFFSET_HCIE (12)
+#define PM_CLK_GATE_REG_OFFSET_SWITCH (11)
+#define PM_CLK_GATE_REG_OFFSET_GPIO (10)
+#define PM_CLK_GATE_REG_OFFSET_UART3 (9)
+#define PM_CLK_GATE_REG_OFFSET_UART2 (8)
+#define PM_CLK_GATE_REG_OFFSET_UART1 (7)
+#define PM_CLK_GATE_REG_OFFSET_RTC (5)
+#define PM_CLK_GATE_REG_OFFSET_GDMA (4)
+#define PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C (3)
+#define PM_CLK_GATE_REG_OFFSET_SMC_NFI (1)
+#define PM_CLK_GATE_REG_MASK (0x03FFFFBA)
+
+/* PM_SOFT_RST_REG */
+#define PM_SOFT_RST_REG_OFFST_WARM_RST_FLAG (31)
+#define PM_SOFT_RST_REG_OFFST_CPU1 (29)
+#define PM_SOFT_RST_REG_OFFST_CPU0 (28)
+#define PM_SOFT_RST_REG_OFFST_SDIO (25)
+#define PM_SOFT_RST_REG_OFFST_GPU (24)
+#define PM_SOFT_RST_REG_OFFST_CIM (23)
+#define PM_SOFT_RST_REG_OFFST_LCDC (22)
+#define PM_SOFT_RST_REG_OFFST_I2S (21)
+#define PM_SOFT_RST_REG_OFFST_RAID (20)
+#define PM_SOFT_RST_REG_OFFST_SATA (19)
+#define PM_SOFT_RST_REG_OFFST_PCIE(x) (17 + (x))
+#define PM_SOFT_RST_REG_OFFST_USB_HOST (16)
+#define PM_SOFT_RST_REG_OFFST_USB_OTG (15)
+#define PM_SOFT_RST_REG_OFFST_TIMER (14)
+#define PM_SOFT_RST_REG_OFFST_CRYPTO (13)
+#define PM_SOFT_RST_REG_OFFST_HCIE (12)
+#define PM_SOFT_RST_REG_OFFST_SWITCH (11)
+#define PM_SOFT_RST_REG_OFFST_GPIO (10)
+#define PM_SOFT_RST_REG_OFFST_UART3 (9)
+#define PM_SOFT_RST_REG_OFFST_UART2 (8)
+#define PM_SOFT_RST_REG_OFFST_UART1 (7)
+#define PM_SOFT_RST_REG_OFFST_RTC (5)
+#define PM_SOFT_RST_REG_OFFST_GDMA (4)
+#define PM_SOFT_RST_REG_OFFST_SPI_PCM_I2C (3)
+#define PM_SOFT_RST_REG_OFFST_DMC (2)
+#define PM_SOFT_RST_REG_OFFST_SMC_NFI (1)
+#define PM_SOFT_RST_REG_OFFST_GLOBAL (0)
+#define PM_SOFT_RST_REG_MASK (0xF3FFFFBF)
+
+/* PMHS_CFG_REG */
+#define PM_HS_CFG_REG_OFFSET_SDIO (25)
+#define PM_HS_CFG_REG_OFFSET_GPU (24)
+#define PM_HS_CFG_REG_OFFSET_CIM (23)
+#define PM_HS_CFG_REG_OFFSET_LCDC (22)
+#define PM_HS_CFG_REG_OFFSET_I2S (21)
+#define PM_HS_CFG_REG_OFFSET_RAID (20)
+#define PM_HS_CFG_REG_OFFSET_SATA (19)
+#define PM_HS_CFG_REG_OFFSET_PCIE1 (18)
+#define PM_HS_CFG_REG_OFFSET_PCIE0 (17)
+#define PM_HS_CFG_REG_OFFSET_USB_HOST (16)
+#define PM_HS_CFG_REG_OFFSET_USB_OTG (15)
+#define PM_HS_CFG_REG_OFFSET_TIMER (14)
+#define PM_HS_CFG_REG_OFFSET_CRYPTO (13)
+#define PM_HS_CFG_REG_OFFSET_HCIE (12)
+#define PM_HS_CFG_REG_OFFSET_SWITCH (11)
+#define PM_HS_CFG_REG_OFFSET_GPIO (10)
+#define PM_HS_CFG_REG_OFFSET_UART3 (9)
+#define PM_HS_CFG_REG_OFFSET_UART2 (8)
+#define PM_HS_CFG_REG_OFFSET_UART1 (7)
+#define PM_HS_CFG_REG_OFFSET_RTC (5)
+#define PM_HS_CFG_REG_OFFSET_GDMA (4)
+#define PM_HS_CFG_REG_OFFSET_SPI_PCM_I2S (3)
+#define PM_HS_CFG_REG_OFFSET_DMC (2)
+#define PM_HS_CFG_REG_OFFSET_SMC_NFI (1)
+#define PM_HS_CFG_REG_MASK (0x03FFFFBE)
+#define PM_HS_CFG_REG_MASK_SUPPORT (0x01100806)
+
+/* PM_CACTIVE_STA_REG */
+#define PM_CACTIVE_STA_REG_OFFSET_SDIO (25)
+#define PM_CACTIVE_STA_REG_OFFSET_GPU (24)
+#define PM_CACTIVE_STA_REG_OFFSET_CIM (23)
+#define PM_CACTIVE_STA_REG_OFFSET_LCDC (22)
+#define PM_CACTIVE_STA_REG_OFFSET_I2S (21)
+#define PM_CACTIVE_STA_REG_OFFSET_RAID (20)
+#define PM_CACTIVE_STA_REG_OFFSET_SATA (19)
+#define PM_CACTIVE_STA_REG_OFFSET_PCIE1 (18)
+#define PM_CACTIVE_STA_REG_OFFSET_PCIE0 (17)
+#define PM_CACTIVE_STA_REG_OFFSET_USB_HOST (16)
+#define PM_CACTIVE_STA_REG_OFFSET_USB_OTG (15)
+#define PM_CACTIVE_STA_REG_OFFSET_TIMER (14)
+#define PM_CACTIVE_STA_REG_OFFSET_CRYPTO (13)
+#define PM_CACTIVE_STA_REG_OFFSET_HCIE (12)
+#define PM_CACTIVE_STA_REG_OFFSET_SWITCH (11)
+#define PM_CACTIVE_STA_REG_OFFSET_GPIO (10)
+#define PM_CACTIVE_STA_REG_OFFSET_UART3 (9)
+#define PM_CACTIVE_STA_REG_OFFSET_UART2 (8)
+#define PM_CACTIVE_STA_REG_OFFSET_UART1 (7)
+#define PM_CACTIVE_STA_REG_OFFSET_RTC (5)
+#define PM_CACTIVE_STA_REG_OFFSET_GDMA (4)
+#define PM_CACTIVE_STA_REG_OFFSET_SPI_PCM_I2S (3)
+#define PM_CACTIVE_STA_REG_OFFSET_DMC (2)
+#define PM_CACTIVE_STA_REG_OFFSET_SMC_NFI (1)
+#define PM_CACTIVE_STA_REG_MASK (0x03FFFFBE)
+
+/* PM_PWR_STA_REG */
+#define PM_PWR_STA_REG_REG_OFFSET_SDIO (25)
+#define PM_PWR_STA_REG_REG_OFFSET_GPU (24)
+#define PM_PWR_STA_REG_REG_OFFSET_CIM (23)
+#define PM_PWR_STA_REG_REG_OFFSET_LCDC (22)
+#define PM_PWR_STA_REG_REG_OFFSET_I2S (21)
+#define PM_PWR_STA_REG_REG_OFFSET_RAID (20)
+#define PM_PWR_STA_REG_REG_OFFSET_SATA (19)
+#define PM_PWR_STA_REG_REG_OFFSET_PCIE1 (18)
+#define PM_PWR_STA_REG_REG_OFFSET_PCIE0 (17)
+#define PM_PWR_STA_REG_REG_OFFSET_USB_HOST (16)
+#define PM_PWR_STA_REG_REG_OFFSET_USB_OTG (15)
+#define PM_PWR_STA_REG_REG_OFFSET_TIMER (14)
+#define PM_PWR_STA_REG_REG_OFFSET_CRYPTO (13)
+#define PM_PWR_STA_REG_REG_OFFSET_HCIE (12)
+#define PM_PWR_STA_REG_REG_OFFSET_SWITCH (11)
+#define PM_PWR_STA_REG_REG_OFFSET_GPIO (10)
+#define PM_PWR_STA_REG_REG_OFFSET_UART3 (9)
+#define PM_PWR_STA_REG_REG_OFFSET_UART2 (8)
+#define PM_PWR_STA_REG_REG_OFFSET_UART1 (7)
+#define PM_PWR_STA_REG_REG_OFFSET_RTC (5)
+#define PM_PWR_STA_REG_REG_OFFSET_GDMA (4)
+#define PM_PWR_STA_REG_REG_OFFSET_SPI_PCM_I2S (3)
+#define PM_PWR_STA_REG_REG_OFFSET_DMC (2)
+#define PM_PWR_STA_REG_REG_OFFSET_SMC_NFI (1)
+#define PM_PWR_STA_REG_REG_MASK (0x03FFFFBE)
+
+/* PM_CLK_CTRL_REG */
+#define PM_CLK_CTRL_REG_OFFSET_I2S_MCLK (31)
+#define PM_CLK_CTRL_REG_OFFSET_DDR2_CHG_EN (30)
+#define PM_CLK_CTRL_REG_OFFSET_PCIE_REF1_EN (29)
+#define PM_CLK_CTRL_REG_OFFSET_PCIE_REF0_EN (28)
+#define PM_CLK_CTRL_REG_OFFSET_TIMER_SIM_MODE (27)
+#define PM_CLK_CTRL_REG_OFFSET_I2SCLK_DIV (24)
+#define PM_CLK_CTRL_REG_OFFSET_I2SCLK_SEL (22)
+#define PM_CLK_CTRL_REG_OFFSET_CLKOUT_DIV (20)
+#define PM_CLK_CTRL_REG_OFFSET_CLKOUT_SEL (16)
+#define PM_CLK_CTRL_REG_OFFSET_MDC_DIV (14)
+#define PM_CLK_CTRL_REG_OFFSET_CRYPTO_CLK_SEL (12)
+#define PM_CLK_CTRL_REG_OFFSET_CPU_PWR_MODE (9)
+#define PM_CLK_CTRL_REG_OFFSET_PLL_DDR2_SEL (7)
+#define PM_CLK_CTRL_REG_OFFSET_DIV_IMMEDIATE (6)
+#define PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV (4)
+#define PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL (0)
+
+#define PM_CPU_CLK_DIV(DIV) { \
+ PM_CLK_CTRL_REG &= ~((0x3) << PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV); \
+ PM_CLK_CTRL_REG |= (((DIV)&0x3) << PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV); \
+}
+
+#define PM_PLL_CPU_SEL(CPU) { \
+ PM_CLK_CTRL_REG &= ~((0xF) << PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL); \
+ PM_CLK_CTRL_REG |= (((CPU)&0xF) << PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL); \
+}
+
+/* PM_PLL_LCD_I2S_CTRL_REG */
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_MCLK_SMC_DIV (22)
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_R_SEL (17)
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_P (11)
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_M (3)
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_S (0)
+
+/* PM_PLL_HM_PD_CTRL_REG */
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY1 (11)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY0 (10)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_I2SCD (6)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_I2S (5)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_LCD (4)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_USB (3)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_RGMII (2)
+#define PM_PLL_HM_PD_CTRL_REG_MASK (0x00000C7C)
+
+/* PM_WDT_CTRL_REG */
+#define PM_WDT_CTRL_REG_OFFSET_RESET_CPU_ONLY (0)
+
+/* PM_CSR_REG - Clock Scaling Register*/
+#define PM_CSR_REG_OFFSET_CSR_EN (30)
+#define PM_CSR_REG_OFFSET_CSR_NUM (0)
+
+#define CNS3XXX_PWR_CLK_EN(BLOCK) (0x1<<PM_CLK_GATE_REG_OFFSET_##BLOCK)
+
+/* Software reset*/
+#define CNS3XXX_PWR_SOFTWARE_RST(BLOCK) (0x1<<PM_SOFT_RST_REG_OFFST_##BLOCK)
+
+/*
+ * CNS3XXX support several power saving mode as following,
+ * DFS, IDLE, HALT, DOZE, SLEEP, Hibernate
+ */
+#define CNS3XXX_PWR_CPU_MODE_DFS (0)
+#define CNS3XXX_PWR_CPU_MODE_IDLE (1)
+#define CNS3XXX_PWR_CPU_MODE_HALT (2)
+#define CNS3XXX_PWR_CPU_MODE_DOZE (3)
+#define CNS3XXX_PWR_CPU_MODE_SLEEP (4)
+#define CNS3XXX_PWR_CPU_MODE_HIBERNATE (5)
+
+#define CNS3XXX_PWR_PLL(BLOCK) (0x1<<PM_PLL_HM_PD_CTRL_REG_OFFSET_##BLOCK)
+#define CNS3XXX_PWR_PLL_ALL PM_PLL_HM_PD_CTRL_REG_MASK
+
+/* Change CPU frequency and divider */
+#define CNS3XXX_PWR_PLL_CPU_300MHZ (0)
+#define CNS3XXX_PWR_PLL_CPU_333MHZ (1)
+#define CNS3XXX_PWR_PLL_CPU_366MHZ (2)
+#define CNS3XXX_PWR_PLL_CPU_400MHZ (3)
+#define CNS3XXX_PWR_PLL_CPU_433MHZ (4)
+#define CNS3XXX_PWR_PLL_CPU_466MHZ (5)
+#define CNS3XXX_PWR_PLL_CPU_500MHZ (6)
+#define CNS3XXX_PWR_PLL_CPU_533MHZ (7)
+#define CNS3XXX_PWR_PLL_CPU_566MHZ (8)
+#define CNS3XXX_PWR_PLL_CPU_600MHZ (9)
+#define CNS3XXX_PWR_PLL_CPU_633MHZ (10)
+#define CNS3XXX_PWR_PLL_CPU_666MHZ (11)
+#define CNS3XXX_PWR_PLL_CPU_700MHZ (12)
+
+#define CNS3XXX_PWR_CPU_CLK_DIV_BY1 (0)
+#define CNS3XXX_PWR_CPU_CLK_DIV_BY2 (1)
+#define CNS3XXX_PWR_CPU_CLK_DIV_BY4 (2)
+
+/* Change DDR2 frequency */
+#define CNS3XXX_PWR_PLL_DDR2_200MHZ (0)
+#define CNS3XXX_PWR_PLL_DDR2_266MHZ (1)
+#define CNS3XXX_PWR_PLL_DDR2_333MHZ (2)
+#define CNS3XXX_PWR_PLL_DDR2_400MHZ (3)
+
+void cns3xxx_pwr_soft_rst(unsigned int block);
+void cns3xxx_pwr_clk_en(unsigned int block);
+int cns3xxx_cpu_clock(void);
+
+/*
+ * ARM11 MPCore interrupt sources (primary GIC)
+ */
+#define IRQ_CNS3XXX_PMU (IRQ_TC11MP_GIC_START + 0)
+#define IRQ_CNS3XXX_SDIO (IRQ_TC11MP_GIC_START + 1)
+#define IRQ_CNS3XXX_L2CC (IRQ_TC11MP_GIC_START + 2)
+#define IRQ_CNS3XXX_RTC (IRQ_TC11MP_GIC_START + 3)
+#define IRQ_CNS3XXX_I2S (IRQ_TC11MP_GIC_START + 4)
+#define IRQ_CNS3XXX_PCM (IRQ_TC11MP_GIC_START + 5)
+#define IRQ_CNS3XXX_SPI (IRQ_TC11MP_GIC_START + 6)
+#define IRQ_CNS3XXX_I2C (IRQ_TC11MP_GIC_START + 7)
+#define IRQ_CNS3XXX_CIM (IRQ_TC11MP_GIC_START + 8)
+#define IRQ_CNS3XXX_GPU (IRQ_TC11MP_GIC_START + 9)
+#define IRQ_CNS3XXX_LCD (IRQ_TC11MP_GIC_START + 10)
+#define IRQ_CNS3XXX_GPIOA (IRQ_TC11MP_GIC_START + 11)
+#define IRQ_CNS3XXX_GPIOB (IRQ_TC11MP_GIC_START + 12)
+#define IRQ_CNS3XXX_UART0 (IRQ_TC11MP_GIC_START + 13)
+#define IRQ_CNS3XXX_UART1 (IRQ_TC11MP_GIC_START + 14)
+#define IRQ_CNS3XXX_UART2 (IRQ_TC11MP_GIC_START + 15)
+#define IRQ_CNS3XXX_ARM11 (IRQ_TC11MP_GIC_START + 16)
+
+#define IRQ_CNS3XXX_SW_STATUS (IRQ_TC11MP_GIC_START + 17)
+#define IRQ_CNS3XXX_SW_R0TXC (IRQ_TC11MP_GIC_START + 18)
+#define IRQ_CNS3XXX_SW_R0RXC (IRQ_TC11MP_GIC_START + 19)
+#define IRQ_CNS3XXX_SW_R0QE (IRQ_TC11MP_GIC_START + 20)
+#define IRQ_CNS3XXX_SW_R0QF (IRQ_TC11MP_GIC_START + 21)
+#define IRQ_CNS3XXX_SW_R1TXC (IRQ_TC11MP_GIC_START + 22)
+#define IRQ_CNS3XXX_SW_R1RXC (IRQ_TC11MP_GIC_START + 23)
+#define IRQ_CNS3XXX_SW_R1QE (IRQ_TC11MP_GIC_START + 24)
+#define IRQ_CNS3XXX_SW_R1QF (IRQ_TC11MP_GIC_START + 25)
+#define IRQ_CNS3XXX_SW_PPE (IRQ_TC11MP_GIC_START + 26)
+
+#define IRQ_CNS3XXX_CRYPTO (IRQ_TC11MP_GIC_START + 27)
+#define IRQ_CNS3XXX_HCIE (IRQ_TC11MP_GIC_START + 28)
+#define IRQ_CNS3XXX_PCIE0_DEVICE (IRQ_TC11MP_GIC_START + 29)
+#define IRQ_CNS3XXX_PCIE1_DEVICE (IRQ_TC11MP_GIC_START + 30)
+#define IRQ_CNS3XXX_USB_OTG (IRQ_TC11MP_GIC_START + 31)
+#define IRQ_CNS3XXX_USB_EHCI (IRQ_TC11MP_GIC_START + 32)
+#define IRQ_CNS3XXX_SATA (IRQ_TC11MP_GIC_START + 33)
+#define IRQ_CNS3XXX_RAID (IRQ_TC11MP_GIC_START + 34)
+#define IRQ_CNS3XXX_SMC (IRQ_TC11MP_GIC_START + 35)
+
+#define IRQ_CNS3XXX_DMAC_ABORT (IRQ_TC11MP_GIC_START + 36)
+#define IRQ_CNS3XXX_DMAC0 (IRQ_TC11MP_GIC_START + 37)
+#define IRQ_CNS3XXX_DMAC1 (IRQ_TC11MP_GIC_START + 38)
+#define IRQ_CNS3XXX_DMAC2 (IRQ_TC11MP_GIC_START + 39)
+#define IRQ_CNS3XXX_DMAC3 (IRQ_TC11MP_GIC_START + 40)
+#define IRQ_CNS3XXX_DMAC4 (IRQ_TC11MP_GIC_START + 41)
+#define IRQ_CNS3XXX_DMAC5 (IRQ_TC11MP_GIC_START + 42)
+#define IRQ_CNS3XXX_DMAC6 (IRQ_TC11MP_GIC_START + 43)
+#define IRQ_CNS3XXX_DMAC7 (IRQ_TC11MP_GIC_START + 44)
+#define IRQ_CNS3XXX_DMAC8 (IRQ_TC11MP_GIC_START + 45)
+#define IRQ_CNS3XXX_DMAC9 (IRQ_TC11MP_GIC_START + 46)
+#define IRQ_CNS3XXX_DMAC10 (IRQ_TC11MP_GIC_START + 47)
+#define IRQ_CNS3XXX_DMAC11 (IRQ_TC11MP_GIC_START + 48)
+#define IRQ_CNS3XXX_DMAC12 (IRQ_TC11MP_GIC_START + 49)
+#define IRQ_CNS3XXX_DMAC13 (IRQ_TC11MP_GIC_START + 50)
+#define IRQ_CNS3XXX_DMAC14 (IRQ_TC11MP_GIC_START + 51)
+#define IRQ_CNS3XXX_DMAC15 (IRQ_TC11MP_GIC_START + 52)
+#define IRQ_CNS3XXX_DMAC16 (IRQ_TC11MP_GIC_START + 53)
+#define IRQ_CNS3XXX_DMAC17 (IRQ_TC11MP_GIC_START + 54)
+
+#define IRQ_CNS3XXX_PCIE0_RC (IRQ_TC11MP_GIC_START + 55)
+#define IRQ_CNS3XXX_PCIE1_RC (IRQ_TC11MP_GIC_START + 56)
+#define IRQ_CNS3XXX_TIMER0 (IRQ_TC11MP_GIC_START + 57)
+#define IRQ_CNS3XXX_TIMER1 (IRQ_TC11MP_GIC_START + 58)
+#define IRQ_CNS3XXX_USB_OHCI (IRQ_TC11MP_GIC_START + 59)
+#define IRQ_CNS3XXX_TIMER2 (IRQ_TC11MP_GIC_START + 60)
+#define IRQ_CNS3XXX_EXTERNAL_PIN0 (IRQ_TC11MP_GIC_START + 61)
+#define IRQ_CNS3XXX_EXTERNAL_PIN1 (IRQ_TC11MP_GIC_START + 62)
+#define IRQ_CNS3XXX_EXTERNAL_PIN2 (IRQ_TC11MP_GIC_START + 63)
+
+#define NR_IRQS_CNS3XXX (IRQ_TC11MP_GIC_START + 64)
+
+#if !defined(NR_IRQS) || (NR_IRQS < NR_IRQS_CNS3XXX)
+#undef NR_IRQS
+#define NR_IRQS NR_IRQS_CNS3XXX
+#endif
+
+#endif /* __MACH_BOARD_CNS3XXX_H */
diff --git a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
new file mode 100644
index 000000000000..d16ce7eb00e9
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
@@ -0,0 +1,21 @@
+/*
+ * Debugging macro include header
+ *
+ * Copyright 1994-1999 Russell King
+ * Copyright 2008 Cavium Networks
+ * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0x10000000
+ movne \rx, #0xf0000000 @ virtual base
+ orr \rx, \rx, #0x00009000
+ .endm
+
+#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
new file mode 100644
index 000000000000..5e1c5545680f
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
@@ -0,0 +1,82 @@
+/*
+ * Low-level IRQ helper macros for Cavium Networks platforms
+ *
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <mach/hardware.h>
+#include <asm/hardware/gic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =gic_cpu_base_addr
+ ldr \base, [\base]
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ /*
+ * The interrupt numbering scheme is defined in the
+ * interrupt controller spec. To wit:
+ *
+ * Interrupts 0-15 are IPI
+ * 16-28 are reserved
+ * 29-31 are local. We allow 30 to be used for the watchdog.
+ * 32-1020 are global
+ * 1021-1022 are reserved
+ * 1023 is "spurious" (no interrupt)
+ *
+ * For now, we ignore all local interrupts so only return an interrupt if it's
+ * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
+ *
+ * A simple read from the controller will tell us the number of the highest
+ * priority enabled interrupt. We then just need to check whether it is in the
+ * valid range for an IRQ (30-1020 inclusive).
+ */
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+
+ ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
+
+ ldr \tmp, =1021
+
+ bic \irqnr, \irqstat, #0x1c00
+
+ cmp \irqnr, #29
+ cmpcc \irqnr, \irqnr
+ cmpne \irqnr, \tmp
+ cmpcs \irqnr, \irqnr
+
+ .endm
+
+ /* We assume that irqstat (the raw value of the IRQ acknowledge
+ * register) is preserved from the macro above.
+ * If there is an IPI, we immediately signal end of interrupt on the
+ * controller, since this requires the original irqstat value which
+ * we won't easily be able to recreate later.
+ */
+
+ .macro test_for_ipi, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #16
+ strcc \irqstat, [\base, #GIC_CPU_EOI]
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* As above, this assumes that irqstat and base are preserved.. */
+
+ .macro test_for_ltirq, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ mov \tmp, #0
+ cmp \irqnr, #29
+ moveq \tmp, #1
+ streq \irqstat, [\base, #GIC_CPU_EOI]
+ cmp \tmp, #0
+ .endm
diff --git a/arch/arm/mach-cns3xxx/include/mach/hardware.h b/arch/arm/mach-cns3xxx/include/mach/hardware.h
new file mode 100644
index 000000000000..57e09836f9d7
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/hardware.h
@@ -0,0 +1,22 @@
+/*
+ * This file contains the hardware definitions of the Cavium Networks boards.
+ *
+ * Copyright 2003 ARM Limited.
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_HARDWARE_H
+#define __MACH_HARDWARE_H
+
+#include <asm/sizes.h>
+
+/* macro to get at IO space when running virtually */
+#define PCIBIOS_MIN_IO 0x00000000
+#define PCIBIOS_MIN_MEM 0x00000000
+#define pcibios_assign_all_busses() 1
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/io.h b/arch/arm/mach-cns3xxx/include/mach/io.h
new file mode 100644
index 000000000000..33b6fc1ece7c
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/io.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2008 Cavium Networks
+ * Copyright 2003 ARM Limited
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_IO_H
+#define __MACH_IO_H
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+#define __io(a) __typesafe_io(a)
+#define __mem_pci(a) (a)
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/irqs.h b/arch/arm/mach-cns3xxx/include/mach/irqs.h
new file mode 100644
index 000000000000..2ab96f8085c8
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/irqs.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2000 Deep Blue Solutions Ltd.
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+#define IRQ_LOCALTIMER 29
+#define IRQ_LOCALWDOG 30
+#define IRQ_TC11MP_GIC_START 32
+
+#include <mach/cns3xxx.h>
+
+#ifndef NR_IRQS
+#error "NR_IRQS not defined by the board-specific files"
+#endif
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/memory.h b/arch/arm/mach-cns3xxx/include/mach/memory.h
new file mode 100644
index 000000000000..3b6b769b7a27
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/memory.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_MEMORY_H
+#define __MACH_MEMORY_H
+
+/*
+ * Physical DRAM offset.
+ */
+#define PHYS_OFFSET UL(0x00000000)
+
+#define __phys_to_bus(x) ((x) + PHYS_OFFSET)
+#define __bus_to_phys(x) ((x) - PHYS_OFFSET)
+
+#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
+#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
+#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
+#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/system.h b/arch/arm/mach-cns3xxx/include/mach/system.h
new file mode 100644
index 000000000000..58bb03ae3cf4
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/system.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2000 Deep Blue Solutions Ltd
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_SYSTEM_H
+#define __MACH_SYSTEM_H
+
+#include <linux/io.h>
+#include <asm/proc-fns.h>
+#include <mach/hardware.h>
+
+static inline void arch_idle(void)
+{
+ /*
+ * This should do all the clock switching
+ * and wait for interrupt tricks
+ */
+ cpu_do_idle();
+}
+
+void arch_reset(char mode, const char *cmd);
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/timex.h b/arch/arm/mach-cns3xxx/include/mach/timex.h
new file mode 100644
index 000000000000..1fd04217cacb
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/timex.h
@@ -0,0 +1,12 @@
+/*
+ * Cavium Networks architecture timex specifications
+ *
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#define CLOCK_TICK_RATE (50000000 / 16)
diff --git a/arch/arm/mach-cns3xxx/include/mach/uncompress.h b/arch/arm/mach-cns3xxx/include/mach/uncompress.h
new file mode 100644
index 000000000000..de8ead9b91f7
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/uncompress.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include <mach/cns3xxx.h>
+
+#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00))
+#define AMBA_UART_LCRH(base) (*(volatile unsigned char *)((base) + 0x2c))
+#define AMBA_UART_CR(base) (*(volatile unsigned char *)((base) + 0x30))
+#define AMBA_UART_FR(base) (*(volatile unsigned char *)((base) + 0x18))
+
+/*
+ * Return the UART base address
+ */
+static inline unsigned long get_uart_base(void)
+{
+ if (machine_is_cns3420vb())
+ return CNS3XXX_UART0_BASE;
+ else
+ return 0;
+}
+
+/*
+ * This does not append a newline
+ */
+static inline void putc(int c)
+{
+ unsigned long base = get_uart_base();
+
+ while (AMBA_UART_FR(base) & (1 << 5))
+ barrier();
+
+ AMBA_UART_DR(base) = c;
+}
+
+static inline void flush(void)
+{
+ unsigned long base = get_uart_base();
+
+ while (AMBA_UART_FR(base) & (1 << 3))
+ barrier();
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
diff --git a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
new file mode 100644
index 000000000000..4d381ec05278
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2000 Russell King.
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#define VMALLOC_END 0xd8000000
diff --git a/arch/arm/mach-cns3xxx/pm.c b/arch/arm/mach-cns3xxx/pm.c
new file mode 100644
index 000000000000..725e1a4fc231
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/pm.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <mach/system.h>
+#include <mach/cns3xxx.h>
+
+void cns3xxx_pwr_clk_en(unsigned int block)
+{
+ PM_CLK_GATE_REG |= (block & PM_CLK_GATE_REG_MASK);
+}
+
+void cns3xxx_pwr_power_up(unsigned int block)
+{
+ PM_PLL_HM_PD_CTRL_REG &= ~(block & CNS3XXX_PWR_PLL_ALL);
+
+ /* Wait for 300us for the PLL output clock locked. */
+ udelay(300);
+};
+
+void cns3xxx_pwr_power_down(unsigned int block)
+{
+ /* write '1' to power down */
+ PM_PLL_HM_PD_CTRL_REG |= (block & CNS3XXX_PWR_PLL_ALL);
+};
+
+static void cns3xxx_pwr_soft_rst_force(unsigned int block)
+{
+ /*
+ * bit 0, 28, 29 => program low to reset,
+ * the other else program low and then high
+ */
+ if (block & 0x30000001) {
+ PM_SOFT_RST_REG &= ~(block & PM_SOFT_RST_REG_MASK);
+ } else {
+ PM_SOFT_RST_REG &= ~(block & PM_SOFT_RST_REG_MASK);
+ PM_SOFT_RST_REG |= (block & PM_SOFT_RST_REG_MASK);
+ }
+}
+
+void cns3xxx_pwr_soft_rst(unsigned int block)
+{
+ static unsigned int soft_reset;
+
+ if (soft_reset & block) {
+ /* SPI/I2C/GPIO use the same block, reset once. */
+ return;
+ } else {
+ soft_reset |= block;
+ }
+ cns3xxx_pwr_soft_rst_force(block);
+}
+
+void arch_reset(char mode, const char *cmd)
+{
+ /*
+ * To reset, we hit the on-board reset register
+ * in the system FPGA.
+ */
+ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(GLOBAL));
+}
+
+/*
+ * cns3xxx_cpu_clock - return CPU/L2 clock
+ * aclk: cpu clock/2
+ * hclk: cpu clock/4
+ * pclk: cpu clock/8
+ */
+int cns3xxx_cpu_clock(void)
+{
+ int cpu;
+ int cpu_sel;
+ int div_sel;
+
+ cpu_sel = (PM_CLK_CTRL_REG >> PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL) & 0xf;
+ div_sel = (PM_CLK_CTRL_REG >> PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV) & 0x3;
+
+ cpu = (300 + ((cpu_sel / 3) * 100) + ((cpu_sel % 3) * 33)) >> div_sel;
+
+ return cpu;
+}
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 0ebe185610bf..0316e201ada0 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -7,6 +7,7 @@ config CP_INTC
bool
config ARCH_DAVINCI_DMx
+ select CPU_ARM926T
bool
menu "TI DaVinci Implementations"
@@ -41,6 +42,7 @@ config ARCH_DAVINCI_DA850
select ARCH_HAS_CPUFREQ
config ARCH_DAVINCI_DA8XX
+ select CPU_ARM926T
bool
config ARCH_DAVINCI_DM365
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index dc19870b23cd..8f079d2f1af9 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -33,9 +33,6 @@
#define DA830_EVM_PHY_MASK 0x0
#define DA830_EVM_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
-#define DA830_EMIF25_ASYNC_DATA_CE3_BASE 0x62000000
-#define DA830_EMIF25_CONTROL_BASE 0x68000000
-
/*
* USB1 VBUS is controlled by GPIO1[15], over-current is reported on GPIO2[4].
*/
@@ -157,7 +154,7 @@ static __init void da830_evm_usb_init(void)
__func__, ret);
}
- ret = da8xx_pinmux_setup(da830_evm_usb11_pins);
+ ret = davinci_cfg_reg_list(da830_evm_usb11_pins);
if (ret) {
pr_warning("%s: USB 1.1 PinMux setup failed: %d\n",
__func__, ret);
@@ -229,15 +226,22 @@ static const short da830_evm_mmc_sd_pins[] = {
};
#define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1)
+#define DA830_MMCSD_CD_PIN GPIO_TO_PIN(2, 2)
static int da830_evm_mmc_get_ro(int index)
{
return gpio_get_value(DA830_MMCSD_WP_PIN);
}
+static int da830_evm_mmc_get_cd(int index)
+{
+ return !gpio_get_value(DA830_MMCSD_CD_PIN);
+}
+
static struct davinci_mmc_config da830_evm_mmc_config = {
.get_ro = da830_evm_mmc_get_ro,
- .wires = 4,
+ .get_cd = da830_evm_mmc_get_cd,
+ .wires = 8,
.max_freq = 50000000,
.caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
.version = MMC_CTLR_VERSION_2,
@@ -247,7 +251,7 @@ static inline void da830_evm_init_mmc(void)
{
int ret;
- ret = da8xx_pinmux_setup(da830_evm_mmc_sd_pins);
+ ret = davinci_cfg_reg_list(da830_evm_mmc_sd_pins);
if (ret) {
pr_warning("da830_evm_init: mmc/sd mux setup failed: %d\n",
ret);
@@ -262,6 +266,14 @@ static inline void da830_evm_init_mmc(void)
}
gpio_direction_input(DA830_MMCSD_WP_PIN);
+ ret = gpio_request(DA830_MMCSD_CD_PIN, "MMC CD\n");
+ if (ret) {
+ pr_warning("da830_evm_init: can not open GPIO %d\n",
+ DA830_MMCSD_CD_PIN);
+ return;
+ }
+ gpio_direction_input(DA830_MMCSD_CD_PIN);
+
ret = da8xx_register_mmcsd0(&da830_evm_mmc_config);
if (ret) {
pr_warning("da830_evm_init: mmc/sd registration failed: %d\n",
@@ -360,13 +372,13 @@ static struct davinci_nand_pdata da830_evm_nand_pdata = {
static struct resource da830_evm_nand_resources[] = {
[0] = { /* First memory resource is NAND I/O window */
- .start = DA830_EMIF25_ASYNC_DATA_CE3_BASE,
- .end = DA830_EMIF25_ASYNC_DATA_CE3_BASE + PAGE_SIZE - 1,
+ .start = DA8XX_AEMIF_CS3_BASE,
+ .end = DA8XX_AEMIF_CS3_BASE + PAGE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = { /* Second memory resource is AEMIF control registers */
- .start = DA830_EMIF25_CONTROL_BASE,
- .end = DA830_EMIF25_CONTROL_BASE + SZ_32K - 1,
+ .start = DA8XX_AEMIF_CTL_BASE,
+ .end = DA8XX_AEMIF_CTL_BASE + SZ_32K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -392,7 +404,7 @@ static inline void da830_evm_init_nand(int mux_mode)
return;
}
- ret = da8xx_pinmux_setup(da830_evm_emif25_pins);
+ ret = davinci_cfg_reg_list(da830_evm_emif25_pins);
if (ret)
pr_warning("da830_evm_init: emif25 mux setup failed: %d\n",
ret);
@@ -412,7 +424,7 @@ static inline void da830_evm_init_lcdc(int mux_mode)
{
int ret;
- ret = da8xx_pinmux_setup(da830_lcdcntl_pins);
+ ret = davinci_cfg_reg_list(da830_lcdcntl_pins);
if (ret)
pr_warning("da830_evm_init: lcdcntl mux setup failed: %d\n",
ret);
@@ -492,7 +504,7 @@ static __init void da830_evm_init(void)
pr_warning("da830_evm_init: edma registration failed: %d\n",
ret);
- ret = da8xx_pinmux_setup(da830_i2c0_pins);
+ ret = davinci_cfg_reg_list(da830_i2c0_pins);
if (ret)
pr_warning("da830_evm_init: i2c0 mux setup failed: %d\n",
ret);
@@ -508,7 +520,7 @@ static __init void da830_evm_init(void)
soc_info->emac_pdata->mdio_max_freq = DA830_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->rmii_en = 1;
- ret = da8xx_pinmux_setup(da830_cpgmac_pins);
+ ret = davinci_cfg_reg_list(da830_cpgmac_pins);
if (ret)
pr_warning("da830_evm_init: cpgmac mux setup failed: %d\n",
ret);
@@ -527,7 +539,7 @@ static __init void da830_evm_init(void)
i2c_register_board_info(1, da830_evm_i2c_devices,
ARRAY_SIZE(da830_evm_i2c_devices));
- ret = da8xx_pinmux_setup(da830_evm_mcasp1_pins);
+ ret = davinci_cfg_reg_list(da830_evm_mcasp1_pins);
if (ret)
pr_warning("da830_evm_init: mcasp1 mux setup failed: %d\n",
ret);
@@ -554,7 +566,7 @@ static __init void da830_evm_irq_init(void)
struct davinci_soc_info *soc_info = &davinci_soc_info;
cp_intc_init((void __iomem *)DA8XX_CP_INTC_VIRT, DA830_N_CP_INTC_IRQ,
- soc_info->intc_irq_prios);
+ soc_info->intc_irq_prios, NULL);
}
static void __init da830_evm_map_io(void)
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 411284d0b0fa..0c7afacd4098 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/i2c/pca953x.h>
+#include <linux/mfd/tps6507x.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
@@ -24,6 +25,8 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/regulator/machine.h>
+#include <linux/mfd/tps6507x.h>
+#include <linux/input/tps6507x-ts.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -206,12 +209,12 @@ static __init void da850_evm_setup_nor_nand(void)
int ret = 0;
if (ui_card_detected & !HAS_MMC) {
- ret = da8xx_pinmux_setup(da850_nand_pins);
+ ret = davinci_cfg_reg_list(da850_nand_pins);
if (ret)
pr_warning("da850_evm_init: nand mux setup failed: "
"%d\n", ret);
- ret = da8xx_pinmux_setup(da850_nor_pins);
+ ret = davinci_cfg_reg_list(da850_nor_pins);
if (ret)
pr_warning("da850_evm_init: nor mux setup failed: %d\n",
ret);
@@ -533,10 +536,24 @@ struct regulator_init_data tps65070_regulator_data[] = {
},
};
+static struct touchscreen_init_data tps6507x_touchscreen_data = {
+ .poll_period = 30, /* ms between touch samples */
+ .min_pressure = 0x30, /* minimum pressure to trigger touch */
+ .vref = 0, /* turn off vref when not using A/D */
+ .vendor = 0, /* /sys/class/input/input?/id/vendor */
+ .product = 65070, /* /sys/class/input/input?/id/product */
+ .version = 0x100, /* /sys/class/input/input?/id/version */
+};
+
+static struct tps6507x_board tps_board = {
+ .tps6507x_pmic_init_data = &tps65070_regulator_data[0],
+ .tps6507x_ts_init_data = &tps6507x_touchscreen_data,
+};
+
static struct i2c_board_info __initdata da850evm_tps65070_info[] = {
{
I2C_BOARD_INFO("tps6507x", 0x48),
- .platform_data = &tps65070_regulator_data[0],
+ .platform_data = &tps_board,
},
};
@@ -568,12 +585,12 @@ static int __init da850_evm_config_emac(void)
if (rmii_en) {
val |= BIT(8);
- ret = da8xx_pinmux_setup(da850_rmii_pins);
+ ret = davinci_cfg_reg_list(da850_rmii_pins);
pr_info("EMAC: RMII PHY configured, MII PHY will not be"
" functional\n");
} else {
val &= ~BIT(8);
- ret = da8xx_pinmux_setup(da850_cpgmac_pins);
+ ret = davinci_cfg_reg_list(da850_cpgmac_pins);
pr_info("EMAC: MII PHY configured, RMII PHY will not be"
" functional\n");
}
@@ -626,7 +643,7 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: edma registration failed: %d\n",
ret);
- ret = da8xx_pinmux_setup(da850_i2c0_pins);
+ ret = davinci_cfg_reg_list(da850_i2c0_pins);
if (ret)
pr_warning("da850_evm_init: i2c0 mux setup failed: %d\n",
ret);
@@ -643,7 +660,7 @@ static __init void da850_evm_init(void)
ret);
if (HAS_MMC) {
- ret = da8xx_pinmux_setup(da850_mmcsd0_pins);
+ ret = davinci_cfg_reg_list(da850_mmcsd0_pins);
if (ret)
pr_warning("da850_evm_init: mmcsd0 mux setup failed:"
" %d\n", ret);
@@ -679,20 +696,20 @@ static __init void da850_evm_init(void)
__raw_writel(0, IO_ADDRESS(DA8XX_UART1_BASE) + 0x30);
__raw_writel(0, IO_ADDRESS(DA8XX_UART0_BASE) + 0x30);
- ret = da8xx_pinmux_setup(da850_mcasp_pins);
+ ret = davinci_cfg_reg_list(da850_mcasp_pins);
if (ret)
pr_warning("da850_evm_init: mcasp mux setup failed: %d\n",
ret);
da8xx_register_mcasp(0, &da850_evm_snd_data);
- ret = da8xx_pinmux_setup(da850_lcdcntl_pins);
+ ret = davinci_cfg_reg_list(da850_lcdcntl_pins);
if (ret)
pr_warning("da850_evm_init: lcdcntl mux setup failed: %d\n",
ret);
/* Handle board specific muxing for LCD here */
- ret = da8xx_pinmux_setup(da850_evm_lcdc_pins);
+ ret = davinci_cfg_reg_list(da850_evm_lcdc_pins);
if (ret)
pr_warning("da850_evm_init: evm specific lcd mux setup "
"failed: %d\n", ret);
@@ -741,7 +758,7 @@ static __init void da850_evm_irq_init(void)
struct davinci_soc_info *soc_info = &davinci_soc_info;
cp_intc_init((void __iomem *)DA8XX_CP_INTC_VIRT, DA850_N_CP_INTC_IRQ,
- soc_info->intc_irq_prios);
+ soc_info->intc_irq_prios, NULL);
}
static void __init da850_evm_map_io(void)
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index aa48e3f69715..a0ad7d9f5c85 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -33,9 +33,6 @@
#include <mach/mmc.h>
#include <mach/usb.h>
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-
/* NOTE: this is geared for the standard config, with a socketed
* 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
* swap chips, maybe with a different block size, partitioning may
@@ -86,12 +83,12 @@ static struct davinci_nand_pdata davinci_nand_data = {
static struct resource davinci_nand_resources[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+ .start = DM355_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM355_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM355_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM355_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index 21f32eb41e8c..c3d5a70a7f38 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -30,9 +30,6 @@
#include <mach/mmc.h>
#include <mach/usb.h>
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-
/* NOTE: this is geared for the standard config, with a socketed
* 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
* swap chips, maybe with a different block size, partitioning may
@@ -82,12 +79,12 @@ static struct davinci_nand_pdata davinci_nand_data = {
static struct resource davinci_nand_resources[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+ .start = DM355_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM355_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM355_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM355_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index df4ab2105869..6b230bd2f1a9 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -54,11 +54,6 @@ static inline int have_tvp7002(void)
return 0;
}
-
-#define DM365_ASYNC_EMIF_CONTROL_BASE 0x01d10000
-#define DM365_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-#define DM365_ASYNC_EMIF_DATA_CE1_BASE 0x04000000
-
#define DM365_EVM_PHY_MASK (0x2)
#define DM365_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
@@ -605,7 +600,11 @@ static __init void dm365_evm_init(void)
/* maybe setup mmc1/etc ... _after_ mmc0 */
evm_init_cpld();
+#ifdef CONFIG_SND_DM365_AIC3X_CODEC
dm365_init_asp(&dm365_evm_snd_data);
+#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
+ dm365_init_vc(&dm365_evm_snd_data);
+#endif
dm365_init_rtc();
dm365_init_ks(&dm365evm_ks_data);
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 976e11b7fa4a..73c0b04a75ff 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -41,14 +41,6 @@
#define DM644X_EVM_PHY_MASK (0x2)
#define DM644X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-#define DAVINCI_CFC_ATA_BASE 0x01C66000
-
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e00000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-#define DAVINCI_ASYNC_EMIF_DATA_CE1_BASE 0x04000000
-#define DAVINCI_ASYNC_EMIF_DATA_CE2_BASE 0x06000000
-#define DAVINCI_ASYNC_EMIF_DATA_CE3_BASE 0x08000000
-
#define LXT971_PHY_ID (0x001378e2)
#define LXT971_PHY_MASK (0xfffffff0)
@@ -92,8 +84,8 @@ static struct physmap_flash_data davinci_evm_norflash_data = {
/* NOTE: CFI probe will correctly detect flash part as 32M, but EMIF
* limits addresses to 16M, so using addresses past 16M will wrap */
static struct resource davinci_evm_norflash_resource = {
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
};
@@ -111,7 +103,7 @@ static struct platform_device davinci_evm_norflash_device = {
* It may used instead of the (default) NOR chip to boot, using TI's
* tools to install the secondary boot loader (UBL) and U-Boot.
*/
-struct mtd_partition davinci_evm_nandflash_partition[] = {
+static struct mtd_partition davinci_evm_nandflash_partition[] = {
/* Bootloader layout depends on whose u-boot is installed, but we
* can hide all the details.
* - block 0 for u-boot environment ... in mainline u-boot
@@ -154,12 +146,12 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
static struct resource davinci_evm_nandflash_resource[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM644X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -258,32 +250,6 @@ static struct platform_device rtc_dev = {
.id = -1,
};
-static struct resource ide_resources[] = {
- {
- .start = DAVINCI_CFC_ATA_BASE,
- .end = DAVINCI_CFC_ATA_BASE + 0x7ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_IDE,
- .end = IRQ_IDE,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 ide_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ide_dev = {
- .name = "palm_bk3710",
- .id = -1,
- .resource = ide_resources,
- .num_resources = ARRAY_SIZE(ide_resources),
- .dev = {
- .dma_mask = &ide_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
static struct snd_platform_data dm644x_evm_snd_data;
/*----------------------------------------------------------------------*/
@@ -704,10 +670,7 @@ static __init void davinci_evm_init(void)
pr_warning("WARNING: both IDE and Flash are "
"enabled, but they share AEMIF pins.\n"
"\tDisable IDE for NAND/NOR support.\n");
- davinci_cfg_reg(DM644X_HPIEN_DISABLE);
- davinci_cfg_reg(DM644X_ATAEN);
- davinci_cfg_reg(DM644X_HDIREN);
- platform_device_register(&ide_dev);
+ davinci_init_ide();
} else if (HAS_NAND || HAS_NOR) {
davinci_cfg_reg(DM644X_HPIEN_DISABLE);
davinci_cfg_reg(DM644X_ATAEN_DISABLE);
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 5ba3cb2daaa0..e2ac06fac02a 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -80,17 +80,14 @@ static struct davinci_nand_pdata davinci_nand_data = {
.options = 0,
};
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x20008000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x42000000
-
static struct resource davinci_nand_resources[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+ .start = DM646X_ASYNC_EMIF_CS2_SPACE_BASE,
+ .end = DM646X_ASYNC_EMIF_CS2_SPACE_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM646X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM646X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -736,7 +733,7 @@ static __init void evm_init(void)
platform_device_register(&davinci_nand_device);
if (HAS_ATA)
- dm646x_init_ide();
+ davinci_init_ide();
soc_info->emac_pdata->phy_mask = DM646X_EVM_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = DM646X_EVM_MDIO_FREQUENCY;
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index bd9ca079b69d..875770cdea0a 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -31,6 +31,7 @@
#include <asm/mach/arch.h>
#include <mach/dm644x.h>
+#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/mux.h>
@@ -41,11 +42,6 @@
#define NEUROS_OSD2_PHY_MASK 0x2
#define NEUROS_OSD2_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
-#define DAVINCI_CFC_ATA_BASE 0x01C66000
-
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e00000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-
#define LXT971_PHY_ID 0x001378e2
#define LXT971_PHY_MASK 0xfffffff0
@@ -60,7 +56,7 @@
#define NAND_BLOCK_SIZE SZ_128K
-struct mtd_partition davinci_ntosd2_nandflash_partition[] = {
+static struct mtd_partition davinci_ntosd2_nandflash_partition[] = {
{
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
@@ -98,12 +94,12 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
static struct resource davinci_ntosd2_nandflash_resource[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM644X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -130,32 +126,6 @@ static struct platform_device davinci_fb_device = {
.num_resources = 0,
};
-static struct resource ide_resources[] = {
- {
- .start = DAVINCI_CFC_ATA_BASE,
- .end = DAVINCI_CFC_ATA_BASE + 0x7ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_IDE,
- .end = IRQ_IDE,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 ide_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ide_dev = {
- .name = "palm_bk3710",
- .id = -1,
- .resource = ide_resources,
- .num_resources = ARRAY_SIZE(ide_resources),
- .dev = {
- .dma_mask = &ide_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
static struct snd_platform_data dm644x_ntosd2_snd_data;
static struct gpio_led ntosd2_leds[] = {
@@ -259,10 +229,7 @@ static __init void davinci_ntosd2_init(void)
pr_warning("WARNING: both IDE and Flash are "
"enabled, but they share AEMIF pins.\n"
"\tDisable IDE for NAND/NOR support.\n");
- davinci_cfg_reg(DM644X_HPIEN_DISABLE);
- davinci_cfg_reg(DM644X_ATAEN);
- davinci_cfg_reg(DM644X_HDIREN);
- platform_device_register(&ide_dev);
+ davinci_init_ide();
} else if (HAS_NAND) {
davinci_cfg_reg(DM644X_HPIEN_DISABLE);
davinci_cfg_reg(DM644X_ATAEN_DISABLE);
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 08d373bfcc8a..1ed0662cc0e4 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -45,10 +45,7 @@
#define SFFSDR_PHY_MASK (0x2)
#define SFFSDR_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e00000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-
-struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
+static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
/* U-Boot Environment: Block 0
* UBL: Block 1
* U-Boot: Blocks 6-7 (256 kb)
@@ -76,12 +73,12 @@ static struct flash_platform_data davinci_sffsdr_nandflash_data = {
static struct resource davinci_sffsdr_nandflash_resource[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM644X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
diff --git a/arch/arm/mach-davinci/cdce949.c b/arch/arm/mach-davinci/cdce949.c
index aec375690543..ba8b12b2913b 100644
--- a/arch/arm/mach-davinci/cdce949.c
+++ b/arch/arm/mach-davinci/cdce949.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <mach/clock.h>
+#include <mach/cdce949.h>
#include "clock.h"
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index bf6218ee94e1..868cb7693499 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -22,6 +22,7 @@
#include <mach/hardware.h>
+#include <mach/clock.h>
#include <mach/psc.h>
#include <mach/cputype.h>
#include "clock.h"
@@ -42,7 +43,8 @@ static void __clk_enable(struct clk *clk)
if (clk->parent)
__clk_enable(clk->parent);
if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
- davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 1);
+ davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc,
+ PSC_STATE_ENABLE);
}
static void __clk_disable(struct clk *clk)
@@ -51,7 +53,9 @@ static void __clk_disable(struct clk *clk)
return;
if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
(clk->flags & CLK_PSC))
- davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 0);
+ davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc,
+ (clk->flags & PSC_SWRSTDISABLE) ?
+ PSC_STATE_SWRSTDISABLE : PSC_STATE_DISABLE);
if (clk->parent)
__clk_disable(clk->parent);
}
@@ -233,7 +237,10 @@ static int __init clk_disable_unused(void)
continue;
pr_info("Clocks: disable unused %s\n", ck->name);
- davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc, 0);
+
+ davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc,
+ (ck->flags & PSC_SWRSTDISABLE) ?
+ PSC_STATE_SWRSTDISABLE : PSC_STATE_DISABLE);
}
spin_unlock_irq(&clockfw_lock);
@@ -272,7 +279,7 @@ static unsigned long clk_sysclk_recalc(struct clk *clk)
v = __raw_readl(pll->base + clk->div_reg);
if (v & PLLDIV_EN) {
- plldiv = (v & PLLDIV_RATIO_MASK) + 1;
+ plldiv = (v & pll->div_ratio_mask) + 1;
if (plldiv)
rate /= plldiv;
}
@@ -312,7 +319,7 @@ static unsigned long clk_pllclk_recalc(struct clk *clk)
if (pll->flags & PLL_HAS_PREDIV) {
prediv = __raw_readl(pll->base + PREDIV);
if (prediv & PLLDIV_EN)
- prediv = (prediv & PLLDIV_RATIO_MASK) + 1;
+ prediv = (prediv & pll->div_ratio_mask) + 1;
else
prediv = 1;
}
@@ -324,7 +331,7 @@ static unsigned long clk_pllclk_recalc(struct clk *clk)
if (pll->flags & PLL_HAS_POSTDIV) {
postdiv = __raw_readl(pll->base + POSTDIV);
if (postdiv & PLLDIV_EN)
- postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1;
+ postdiv = (postdiv & pll->div_ratio_mask) + 1;
else
postdiv = 1;
}
@@ -451,6 +458,9 @@ int __init davinci_clk_init(struct clk_lookup *clocks)
clk->recalc = clk_leafclk_recalc;
}
+ if (clk->pll_data && !clk->pll_data->div_ratio_mask)
+ clk->pll_data->div_ratio_mask = PLLDIV_RATIO_MASK;
+
if (clk->recalc)
clk->rate = clk->recalc(clk);
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index aa0a61150325..01e36483ac3d 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -76,6 +76,7 @@ struct pll_data {
u32 num;
u32 flags;
u32 input_rate;
+ u32 div_ratio_mask;
};
#define PLL_HAS_PREDIV 0x01
#define PLL_HAS_POSTDIV 0x02
@@ -101,10 +102,11 @@ struct clk {
/* Clock flags: SoC-specific flags start at BIT(16) */
#define ALWAYS_ENABLED BIT(1)
-#define CLK_PSC BIT(2)
-#define PSC_DSP BIT(3) /* PSC uses DSP domain, not ARM */
+#define CLK_PSC BIT(2)
+#define PSC_DSP BIT(3) /* PSC uses DSP domain, not ARM */
#define CLK_PLL BIT(4) /* PLL-derived clock */
-#define PRE_PLL BIT(5) /* source is before PLL mult/div */
+#define PRE_PLL BIT(5) /* source is before PLL mult/div */
+#define PSC_SWRSTDISABLE BIT(6) /* Disable state is SwRstDisable */
#define CLK(dev, con, ck) \
{ \
@@ -118,6 +120,7 @@ int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
unsigned int mult, unsigned int postdiv);
extern struct platform_device davinci_wdt_device;
+extern void davinci_watchdog_reset(struct platform_device *);
#endif
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index 94f27cbcd55a..f8221c5ee380 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -77,6 +77,9 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
local_flush_tlb_all();
flush_cache_all();
+ if (!davinci_soc_info.reset)
+ davinci_soc_info.reset = davinci_watchdog_reset;
+
/*
* We want to check CPU revision early for cpu_is_xxxx() macros.
* IO space mapping must be initialized before we can do that.
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index 37311d1830eb..2a8d26ee4bbf 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -101,7 +101,7 @@ static struct irq_chip cp_intc_irq_chip = {
};
void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
- u8 *irq_prio)
+ u8 *irq_prio, u32 *host_map)
{
unsigned num_reg = BITS_TO_LONGS(num_irq);
int i;
@@ -157,6 +157,10 @@ void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
cp_intc_write(0x0f0f0f0f, CP_INTC_CHAN_MAP(i));
}
+ if (host_map)
+ for (i = 0; host_map[i] != -1; i++)
+ cp_intc_write(host_map[i], CP_INTC_HOST_MAP(i));
+
/* Set up genirq dispatching for cp_intc */
for (i = 0; i < num_irq; i++) {
set_irq_chip(i, &cp_intc_irq_chip);
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 122e61a9f505..83879f894a21 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -19,6 +19,7 @@
#include <mach/common.h>
#include <mach/time.h>
#include <mach/da8xx.h>
+#include <mach/gpio.h>
#include "clock.h"
#include "mux.h"
@@ -1199,11 +1200,13 @@ static struct davinci_soc_info davinci_soc_info_da830 = {
.intc_irq_prios = da830_default_priorities,
.intc_irq_num = DA830_N_CP_INTC_IRQ,
.timer_info = &da830_timer_info,
+ .gpio_type = GPIO_TYPE_DAVINCI,
.gpio_base = IO_ADDRESS(DA8XX_GPIO_BASE),
.gpio_num = 128,
.gpio_irq = IRQ_DA8XX_GPIO0,
.serial_dev = &da8xx_serial_device,
.emac_pdata = &da8xx_emac_pdata,
+ .reset_device = &da8xx_wdt_device,
};
void __init da830_init(void)
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index d0fd7566712a..2b6dd59d72f2 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -27,6 +27,7 @@
#include <mach/da8xx.h>
#include <mach/cpufreq.h>
#include <mach/pm.h>
+#include <mach/gpio.h>
#include "clock.h"
#include "mux.h"
@@ -1084,6 +1085,7 @@ static struct davinci_soc_info davinci_soc_info_da850 = {
.intc_irq_prios = da850_default_priorities,
.intc_irq_num = DA850_N_CP_INTC_IRQ,
.timer_info = &da850_timer_info,
+ .gpio_type = GPIO_TYPE_DAVINCI,
.gpio_base = IO_ADDRESS(DA8XX_GPIO_BASE),
.gpio_num = 144,
.gpio_irq = IRQ_DA8XX_GPIO0,
@@ -1091,6 +1093,7 @@ static struct davinci_soc_info davinci_soc_info_da850 = {
.emac_pdata = &da8xx_emac_pdata,
.sram_dma = DA8XX_ARM_RAM_BASE,
.sram_len = SZ_8K,
+ .reset_device = &da8xx_wdt_device,
};
void __init da850_init(void)
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 0a96791d3b0f..67a6fbcb2e09 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -326,7 +326,7 @@ static struct resource da8xx_watchdog_resources[] = {
},
};
-struct platform_device davinci_wdt_device = {
+struct platform_device da8xx_wdt_device = {
.name = "watchdog",
.id = -1,
.num_resources = ARRAY_SIZE(da8xx_watchdog_resources),
@@ -335,7 +335,7 @@ struct platform_device davinci_wdt_device = {
int __init da8xx_register_watchdog(void)
{
- return platform_device_register(&davinci_wdt_device);
+ return platform_device_register(&da8xx_wdt_device);
}
static struct resource da8xx_emac_resources[] = {
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 147949650c25..d9c82ee434e0 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -23,7 +23,10 @@
#include <mach/mmc.h>
#include <mach/time.h>
+#include "clock.h"
+
#define DAVINCI_I2C_BASE 0x01C21000
+#define DAVINCI_ATA_BASE 0x01C66000
#define DAVINCI_MMCSD0_BASE 0x01E10000
#define DM355_MMCSD0_BASE 0x01E11000
#define DM355_MMCSD1_BASE 0x01E00000
@@ -58,6 +61,49 @@ void __init davinci_init_i2c(struct davinci_i2c_platform_data *pdata)
(void) platform_device_register(&davinci_i2c_device);
}
+static struct resource ide_resources[] = {
+ {
+ .start = DAVINCI_ATA_BASE,
+ .end = DAVINCI_ATA_BASE + 0x7ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_IDE,
+ .end = IRQ_IDE,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 ide_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device ide_device = {
+ .name = "palm_bk3710",
+ .id = -1,
+ .resource = ide_resources,
+ .num_resources = ARRAY_SIZE(ide_resources),
+ .dev = {
+ .dma_mask = &ide_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+void __init davinci_init_ide(void)
+{
+ if (cpu_is_davinci_dm644x()) {
+ davinci_cfg_reg(DM644X_HPIEN_DISABLE);
+ davinci_cfg_reg(DM644X_ATAEN);
+ davinci_cfg_reg(DM644X_HDIREN);
+ } else if (cpu_is_davinci_dm646x()) {
+ /* IRQ_DM646X_IDE is the same as IRQ_IDE */
+ davinci_cfg_reg(DM646X_ATAEN);
+ } else {
+ WARN_ON(1);
+ return;
+ }
+
+ platform_device_register(&ide_device);
+}
+
#if defined(CONFIG_MMC_DAVINCI) || defined(CONFIG_MMC_DAVINCI_MODULE)
static u64 mmcsd0_dma_mask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 3dc0a88712eb..0c62a68a90a3 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -450,11 +450,6 @@ void __init dm355_init_spi0(unsigned chipselect_mask,
/*----------------------------------------------------------------------*/
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-#define PINMUX2 0x08
-#define PINMUX3 0x0c
-#define PINMUX4 0x10
#define INTMUX 0x18
#define EVTMUX 0x1c
@@ -798,7 +793,7 @@ static void __iomem *dm355_psc_bases[] = {
* T1_BOT: Timer 1, bottom: (used by DSP in TI DSPLink code)
* T1_TOP: Timer 1, top : <unused>
*/
-struct davinci_timer_info dm355_timer_info = {
+static struct davinci_timer_info dm355_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
@@ -859,12 +854,14 @@ static struct davinci_soc_info davinci_soc_info_dm355 = {
.intc_irq_prios = dm355_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm355_timer_info,
+ .gpio_type = GPIO_TYPE_DAVINCI,
.gpio_base = IO_ADDRESS(DAVINCI_GPIO_BASE),
.gpio_num = 104,
.gpio_irq = IRQ_DM355_GPIOBNK0,
.serial_dev = &dm355_serial_device,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
};
void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 0d6ee583f65c..ed7645088052 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -467,11 +467,6 @@ static struct clk_lookup dm365_clks[] = {
/*----------------------------------------------------------------------*/
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-#define PINMUX2 0x08
-#define PINMUX3 0x0c
-#define PINMUX4 0x10
#define INTMUX 0x18
#define EVTMUX 0x1c
@@ -576,6 +571,7 @@ MUX_CFG(DM365, SPI4_SDENA1, 4, 16, 3, 2, false)
MUX_CFG(DM365, GPIO20, 3, 21, 3, 0, false)
MUX_CFG(DM365, GPIO33, 4, 12, 3, 0, false)
MUX_CFG(DM365, GPIO40, 4, 26, 3, 0, false)
+MUX_CFG(DM365, GPIO64_57, 2, 6, 1, 0, false)
MUX_CFG(DM365, VOUT_FIELD, 1, 18, 3, 1, false)
MUX_CFG(DM365, VOUT_FIELD_G81, 1, 18, 3, 0, false)
@@ -1010,7 +1006,7 @@ static void __iomem *dm365_psc_bases[] = {
IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE),
};
-struct davinci_timer_info dm365_timer_info = {
+static struct davinci_timer_info dm365_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
@@ -1063,6 +1059,7 @@ static struct davinci_soc_info davinci_soc_info_dm365 = {
.intc_irq_prios = dm365_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm365_timer_info,
+ .gpio_type = GPIO_TYPE_DAVINCI,
.gpio_base = IO_ADDRESS(DAVINCI_GPIO_BASE),
.gpio_num = 104,
.gpio_irq = IRQ_DM365_GPIO0,
@@ -1071,6 +1068,7 @@ static struct davinci_soc_info davinci_soc_info_dm365 = {
.emac_pdata = &dm365_emac_pdata,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
};
void __init dm365_init_asp(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 2f2ae8bc77bb..7f36c22a2684 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -277,7 +277,7 @@ static struct clk timer2_clk = {
.usecount = 1, /* REVISIT: why cant' this be disabled? */
};
-struct clk_lookup dm644x_clks[] = {
+static struct clk_lookup dm644x_clks[] = {
CLK(NULL, "ref", &ref_clk),
CLK(NULL, "pll1", &pll1_clk),
CLK(NULL, "pll1_sysclk1", &pll1_sysclk1),
@@ -350,9 +350,6 @@ static struct platform_device dm644x_emac_device = {
.resource = dm644x_emac_resources,
};
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-
/*
* Device specific mux setup
*
@@ -687,7 +684,7 @@ static void __iomem *dm644x_psc_bases[] = {
* T1_BOT: Timer 1, bottom: (used by DSP in TI DSPLink code)
* T1_TOP: Timer 1, top : <unused>
*/
-struct davinci_timer_info dm644x_timer_info = {
+static struct davinci_timer_info dm644x_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
@@ -748,6 +745,7 @@ static struct davinci_soc_info davinci_soc_info_dm644x = {
.intc_irq_prios = dm644x_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm644x_timer_info,
+ .gpio_type = GPIO_TYPE_DAVINCI,
.gpio_base = IO_ADDRESS(DAVINCI_GPIO_BASE),
.gpio_num = 71,
.gpio_irq = IRQ_GPIOBNK0,
@@ -755,6 +753,7 @@ static struct davinci_soc_info davinci_soc_info_dm644x = {
.emac_pdata = &dm644x_emac_pdata,
.sram_dma = 0x00008000,
.sram_len = SZ_16K,
+ .reset_device = &davinci_wdt_device,
};
void __init dm644x_init_asp(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 893baf4ad37d..8dd0afee7593 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -311,7 +311,7 @@ static struct clk vpif1_clk = {
.flags = ALWAYS_ENABLED,
};
-struct clk_lookup dm646x_clks[] = {
+static struct clk_lookup dm646x_clks[] = {
CLK(NULL, "ref", &ref_clk),
CLK(NULL, "aux", &aux_clkin),
CLK(NULL, "pll1", &pll1_clk),
@@ -401,9 +401,6 @@ static struct platform_device dm646x_emac_device = {
.resource = dm646x_emac_resources,
};
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-
/*
* Device specific mux setup
*
@@ -596,32 +593,6 @@ static struct platform_device dm646x_edma_device = {
.resource = edma_resources,
};
-static struct resource ide_resources[] = {
- {
- .start = DM646X_ATA_REG_BASE,
- .end = DM646X_ATA_REG_BASE + 0x7ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_DM646X_IDE,
- .end = IRQ_DM646X_IDE,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 ide_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ide_dev = {
- .name = "palm_bk3710",
- .id = -1,
- .resource = ide_resources,
- .num_resources = ARRAY_SIZE(ide_resources),
- .dev = {
- .dma_mask = &ide_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
static struct resource dm646x_mcasp0_resources[] = {
{
.name = "mcasp0",
@@ -797,7 +768,7 @@ static void __iomem *dm646x_psc_bases[] = {
* T1_BOT: Timer 1, bottom: (used by DSP in TI DSPLink code)
* T1_TOP: Timer 1, top : <unused>
*/
-struct davinci_timer_info dm646x_timer_info = {
+static struct davinci_timer_info dm646x_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
@@ -858,6 +829,7 @@ static struct davinci_soc_info davinci_soc_info_dm646x = {
.intc_irq_prios = dm646x_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm646x_timer_info,
+ .gpio_type = GPIO_TYPE_DAVINCI,
.gpio_base = IO_ADDRESS(DAVINCI_GPIO_BASE),
.gpio_num = 43, /* Only 33 usable */
.gpio_irq = IRQ_DM646X_GPIOBNK0,
@@ -865,14 +837,9 @@ static struct davinci_soc_info davinci_soc_info_dm646x = {
.emac_pdata = &dm646x_emac_pdata,
.sram_dma = 0x10010000,
.sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
};
-void __init dm646x_init_ide()
-{
- davinci_cfg_reg(DM646X_ATAEN);
- platform_device_register(&ide_dev);
-}
-
void __init dm646x_init_mcasp0(struct snd_platform_data *pdata)
{
dm646x_mcasp0_device.dev.platform_data = pdata;
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 53137387aee1..cc5fcdad9b92 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -243,7 +243,7 @@ struct edma {
} intr_data[EDMA_MAX_DMACH];
};
-static struct edma *edma_info[EDMA_MAX_CC];
+static struct edma *edma_cc[EDMA_MAX_CC];
static int arch_num_cc;
/* dummy param set used to (re)initialize parameter RAM slots */
@@ -261,7 +261,7 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
/* default to low priority queue */
if (queue_no == EVENTQ_DEFAULT)
- queue_no = edma_info[ctlr]->default_queue;
+ queue_no = edma_cc[ctlr]->default_queue;
queue_no &= 7;
edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
@@ -310,13 +310,12 @@ setup_dma_interrupt(unsigned lch,
ctlr = EDMA_CTLR(lch);
lch = EDMA_CHAN_SLOT(lch);
- if (!callback) {
+ if (!callback)
edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
(1 << (lch & 0x1f)));
- }
- edma_info[ctlr]->intr_data[lch].callback = callback;
- edma_info[ctlr]->intr_data[lch].data = data;
+ edma_cc[ctlr]->intr_data[lch].callback = callback;
+ edma_cc[ctlr]->intr_data[lch].data = data;
if (callback) {
edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
@@ -328,11 +327,10 @@ setup_dma_interrupt(unsigned lch,
static int irq2ctlr(int irq)
{
- if (irq >= edma_info[0]->irq_res_start &&
- irq <= edma_info[0]->irq_res_end)
+ if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
return 0;
- else if (irq >= edma_info[1]->irq_res_start &&
- irq <= edma_info[1]->irq_res_end)
+ else if (irq >= edma_cc[1]->irq_res_start &&
+ irq <= edma_cc[1]->irq_res_end)
return 1;
return -1;
@@ -359,9 +357,11 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
while (1) {
int j;
- if (edma_shadow0_read_array(ctlr, SH_IPR, 0))
+ if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
+ edma_shadow0_read_array(ctlr, SH_IER, 0))
j = 0;
- else if (edma_shadow0_read_array(ctlr, SH_IPR, 1))
+ else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
+ edma_shadow0_read_array(ctlr, SH_IER, 1))
j = 1;
else
break;
@@ -369,17 +369,17 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
edma_shadow0_read_array(ctlr, SH_IPR, j));
for (i = 0; i < 32; i++) {
int k = (j << 5) + i;
- if (edma_shadow0_read_array(ctlr, SH_IPR, j) &
- (1 << i)) {
+ if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
+ && (edma_shadow0_read_array(ctlr,
+ SH_IER, j) & BIT(i))) {
/* Clear the corresponding IPR bits */
edma_shadow0_write_array(ctlr, SH_ICR, j,
(1 << i));
- if (edma_info[ctlr]->intr_data[k].callback) {
- edma_info[ctlr]->intr_data[k].callback(
+ if (edma_cc[ctlr]->intr_data[k].callback)
+ edma_cc[ctlr]->intr_data[k].callback(
k, DMA_COMPLETE,
- edma_info[ctlr]->intr_data[k].
+ edma_cc[ctlr]->intr_data[k].
data);
- }
}
}
cnt++;
@@ -430,12 +430,12 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
/* Clear any SER */
edma_shadow0_write_array(ctlr, SH_SECR,
j, (1 << i));
- if (edma_info[ctlr]->intr_data[k].
+ if (edma_cc[ctlr]->intr_data[k].
callback) {
- edma_info[ctlr]->intr_data[k].
+ edma_cc[ctlr]->intr_data[k].
callback(k,
DMA_CC_ERROR,
- edma_info[ctlr]->intr_data
+ edma_cc[ctlr]->intr_data
[k].data);
}
}
@@ -471,9 +471,8 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0)
&& (edma_read_array(ctlr, EDMA_EMR, 1) == 0)
&& (edma_read(ctlr, EDMA_QEMR) == 0)
- && (edma_read(ctlr, EDMA_CCERR) == 0)) {
+ && (edma_read(ctlr, EDMA_CCERR) == 0))
break;
- }
cnt++;
if (cnt > 10)
break;
@@ -511,9 +510,9 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
int stop_slot = start_slot;
DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
- for (i = start_slot; i < edma_info[ctlr]->num_slots; ++i) {
+ for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
j = EDMA_CHAN_SLOT(i);
- if (!test_and_set_bit(j, edma_info[ctlr]->edma_inuse)) {
+ if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
/* Record our current beginning slot */
if (count == num_slots)
stop_slot = i;
@@ -529,8 +528,9 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
stop_slot = i;
break;
- } else
+ } else {
count = num_slots;
+ }
}
}
@@ -540,12 +540,12 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
* of contiguous parameter RAM slots but do not find the exact number
* requested as we may reach the total number of parameter RAM slots
*/
- if (i == edma_info[ctlr]->num_slots)
+ if (i == edma_cc[ctlr]->num_slots)
stop_slot = i;
for (j = start_slot; j < stop_slot; j++)
if (test_bit(j, tmp_inuse))
- clear_bit(j, edma_info[ctlr]->edma_inuse);
+ clear_bit(j, edma_cc[ctlr]->edma_inuse);
if (count)
return -EBUSY;
@@ -567,7 +567,7 @@ static int prepare_unused_channel_list(struct device *dev, void *data)
(int)pdev->resource[i].start >= 0) {
ctlr = EDMA_CTLR(pdev->resource[i].start);
clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
- edma_info[ctlr]->edma_unused);
+ edma_cc[ctlr]->edma_unused);
}
}
@@ -641,14 +641,13 @@ int edma_alloc_channel(int channel,
for (i = 0; i < arch_num_cc; i++) {
channel = 0;
for (;;) {
- channel = find_next_bit(edma_info[i]->
- edma_unused,
- edma_info[i]->num_channels,
+ channel = find_next_bit(edma_cc[i]->edma_unused,
+ edma_cc[i]->num_channels,
channel);
- if (channel == edma_info[i]->num_channels)
+ if (channel == edma_cc[i]->num_channels)
break;
if (!test_and_set_bit(channel,
- edma_info[i]->edma_inuse)) {
+ edma_cc[i]->edma_inuse)) {
done = 1;
ctlr = i;
break;
@@ -660,9 +659,9 @@ int edma_alloc_channel(int channel,
}
if (!done)
return -ENOMEM;
- } else if (channel >= edma_info[ctlr]->num_channels) {
+ } else if (channel >= edma_cc[ctlr]->num_channels) {
return -EINVAL;
- } else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) {
+ } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
return -EBUSY;
}
@@ -703,7 +702,7 @@ void edma_free_channel(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel >= edma_info[ctlr]->num_channels)
+ if (channel >= edma_cc[ctlr]->num_channels)
return;
setup_dma_interrupt(channel, NULL, NULL);
@@ -711,7 +710,7 @@ void edma_free_channel(unsigned channel)
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
&dummy_paramset, PARM_SIZE);
- clear_bit(channel, edma_info[ctlr]->edma_inuse);
+ clear_bit(channel, edma_cc[ctlr]->edma_inuse);
}
EXPORT_SYMBOL(edma_free_channel);
@@ -735,20 +734,19 @@ int edma_alloc_slot(unsigned ctlr, int slot)
slot = EDMA_CHAN_SLOT(slot);
if (slot < 0) {
- slot = edma_info[ctlr]->num_channels;
+ slot = edma_cc[ctlr]->num_channels;
for (;;) {
- slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse,
- edma_info[ctlr]->num_slots, slot);
- if (slot == edma_info[ctlr]->num_slots)
+ slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
+ edma_cc[ctlr]->num_slots, slot);
+ if (slot == edma_cc[ctlr]->num_slots)
return -ENOMEM;
- if (!test_and_set_bit(slot,
- edma_info[ctlr]->edma_inuse))
+ if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
break;
}
- } else if (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots) {
+ } else if (slot < edma_cc[ctlr]->num_channels ||
+ slot >= edma_cc[ctlr]->num_slots) {
return -EINVAL;
- } else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) {
+ } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
return -EBUSY;
}
@@ -774,13 +772,13 @@ void edma_free_slot(unsigned slot)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots)
+ if (slot < edma_cc[ctlr]->num_channels ||
+ slot >= edma_cc[ctlr]->num_slots)
return;
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
&dummy_paramset, PARM_SIZE);
- clear_bit(slot, edma_info[ctlr]->edma_inuse);
+ clear_bit(slot, edma_cc[ctlr]->edma_inuse);
}
EXPORT_SYMBOL(edma_free_slot);
@@ -818,8 +816,8 @@ int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
* of slots
*/
if ((id != EDMA_CONT_PARAMS_ANY) &&
- (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots))
+ (slot < edma_cc[ctlr]->num_channels ||
+ slot >= edma_cc[ctlr]->num_slots))
return -EINVAL;
/*
@@ -828,13 +826,13 @@ int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
* channels
*/
if (count < 1 || count >
- (edma_info[ctlr]->num_slots - edma_info[ctlr]->num_channels))
+ (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
return -EINVAL;
switch (id) {
case EDMA_CONT_PARAMS_ANY:
return reserve_contiguous_slots(ctlr, id, count,
- edma_info[ctlr]->num_channels);
+ edma_cc[ctlr]->num_channels);
case EDMA_CONT_PARAMS_FIXED_EXACT:
case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
return reserve_contiguous_slots(ctlr, id, count, slot);
@@ -866,8 +864,8 @@ int edma_free_cont_slots(unsigned slot, int count)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots ||
+ if (slot < edma_cc[ctlr]->num_channels ||
+ slot >= edma_cc[ctlr]->num_slots ||
count < 1)
return -EINVAL;
@@ -877,7 +875,7 @@ int edma_free_cont_slots(unsigned slot, int count)
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
&dummy_paramset, PARM_SIZE);
- clear_bit(slot_to_free, edma_info[ctlr]->edma_inuse);
+ clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
}
return 0;
@@ -907,7 +905,7 @@ void edma_set_src(unsigned slot, dma_addr_t src_port,
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
if (mode) {
@@ -945,7 +943,7 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port,
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
if (mode) {
@@ -1005,7 +1003,7 @@ void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
0xffff0000, src_bidx);
edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
@@ -1031,7 +1029,7 @@ void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
0x0000ffff, dest_bidx << 16);
edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
@@ -1078,7 +1076,7 @@ void edma_set_transfer_params(unsigned slot,
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
0x0000ffff, bcnt_rld << 16);
if (sync_mode == ASYNC)
@@ -1108,9 +1106,9 @@ void edma_link(unsigned from, unsigned to)
ctlr_to = EDMA_CTLR(to);
to = EDMA_CHAN_SLOT(to);
- if (from >= edma_info[ctlr_from]->num_slots)
+ if (from >= edma_cc[ctlr_from]->num_slots)
return;
- if (to >= edma_info[ctlr_to]->num_slots)
+ if (to >= edma_cc[ctlr_to]->num_slots)
return;
edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
PARM_OFFSET(to));
@@ -1131,7 +1129,7 @@ void edma_unlink(unsigned from)
ctlr = EDMA_CTLR(from);
from = EDMA_CHAN_SLOT(from);
- if (from >= edma_info[ctlr]->num_slots)
+ if (from >= edma_cc[ctlr]->num_slots)
return;
edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
}
@@ -1158,7 +1156,7 @@ void edma_write_slot(unsigned slot, const struct edmacc_param *param)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot >= edma_info[ctlr]->num_slots)
+ if (slot >= edma_cc[ctlr]->num_slots)
return;
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
PARM_SIZE);
@@ -1180,7 +1178,7 @@ void edma_read_slot(unsigned slot, struct edmacc_param *param)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot >= edma_info[ctlr]->num_slots)
+ if (slot >= edma_cc[ctlr]->num_slots)
return;
memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
PARM_SIZE);
@@ -1205,7 +1203,7 @@ void edma_pause(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
+ if (channel < edma_cc[ctlr]->num_channels) {
unsigned int mask = (1 << (channel & 0x1f));
edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
@@ -1226,7 +1224,7 @@ void edma_resume(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
+ if (channel < edma_cc[ctlr]->num_channels) {
unsigned int mask = (1 << (channel & 0x1f));
edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
@@ -1252,12 +1250,12 @@ int edma_start(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
+ if (channel < edma_cc[ctlr]->num_channels) {
int j = channel >> 5;
unsigned int mask = (1 << (channel & 0x1f));
/* EDMA channels without event association */
- if (test_bit(channel, edma_info[ctlr]->edma_unused)) {
+ if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
pr_debug("EDMA: ESR%d %08x\n", j,
edma_shadow0_read_array(ctlr, SH_ESR, j));
edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
@@ -1298,7 +1296,7 @@ void edma_stop(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
+ if (channel < edma_cc[ctlr]->num_channels) {
int j = channel >> 5;
unsigned int mask = (1 << (channel & 0x1f));
@@ -1337,7 +1335,7 @@ void edma_clean_channel(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
+ if (channel < edma_cc[ctlr]->num_channels) {
int j = (channel >> 5);
unsigned int mask = 1 << (channel & 0x1f);
@@ -1365,7 +1363,7 @@ void edma_clear_event(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel >= edma_info[ctlr]->num_channels)
+ if (channel >= edma_cc[ctlr]->num_channels)
return;
if (channel < 32)
edma_write(ctlr, EDMA_ECR, 1 << channel);
@@ -1402,8 +1400,9 @@ static int __init edma_probe(struct platform_device *pdev)
break;
else
return -ENODEV;
- } else
+ } else {
found = 1;
+ }
len[j] = resource_size(r[j]);
@@ -1420,38 +1419,37 @@ static int __init edma_probe(struct platform_device *pdev)
goto fail1;
}
- edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
- if (!edma_info[j]) {
+ edma_cc[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
+ if (!edma_cc[j]) {
status = -ENOMEM;
goto fail1;
}
- memset(edma_info[j], 0, sizeof(struct edma));
+ memset(edma_cc[j], 0, sizeof(struct edma));
- edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel,
+ edma_cc[j]->num_channels = min_t(unsigned, info[j].n_channel,
EDMA_MAX_DMACH);
- edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot,
+ edma_cc[j]->num_slots = min_t(unsigned, info[j].n_slot,
EDMA_MAX_PARAMENTRY);
- edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc,
- EDMA_MAX_CC);
+ edma_cc[j]->num_cc = min_t(unsigned, info[j].n_cc, EDMA_MAX_CC);
- edma_info[j]->default_queue = info[j].default_queue;
- if (!edma_info[j]->default_queue)
- edma_info[j]->default_queue = EVENTQ_1;
+ edma_cc[j]->default_queue = info[j].default_queue;
+ if (!edma_cc[j]->default_queue)
+ edma_cc[j]->default_queue = EVENTQ_1;
dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
edmacc_regs_base[j]);
- for (i = 0; i < edma_info[j]->num_slots; i++)
+ for (i = 0; i < edma_cc[j]->num_slots; i++)
memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
&dummy_paramset, PARM_SIZE);
/* Mark all channels as unused */
- memset(edma_info[j]->edma_unused, 0xff,
- sizeof(edma_info[j]->edma_unused));
+ memset(edma_cc[j]->edma_unused, 0xff,
+ sizeof(edma_cc[j]->edma_unused));
sprintf(irq_name, "edma%d", j);
irq[j] = platform_get_irq_byname(pdev, irq_name);
- edma_info[j]->irq_res_start = irq[j];
+ edma_cc[j]->irq_res_start = irq[j];
status = request_irq(irq[j], dma_irq_handler, 0, "edma",
&pdev->dev);
if (status < 0) {
@@ -1462,7 +1460,7 @@ static int __init edma_probe(struct platform_device *pdev)
sprintf(irq_name, "edma%d_err", j);
err_irq[j] = platform_get_irq_byname(pdev, irq_name);
- edma_info[j]->irq_res_end = err_irq[j];
+ edma_cc[j]->irq_res_end = err_irq[j];
status = request_irq(err_irq[j], dma_ccerr_handler, 0,
"edma_error", &pdev->dev);
if (status < 0) {
@@ -1475,7 +1473,7 @@ static int __init edma_probe(struct platform_device *pdev)
* specified. This way, long transfers on the low priority queue
* started by the codec engine will not cause audio defects.
*/
- for (i = 0; i < edma_info[j]->num_channels; i++)
+ for (i = 0; i < edma_cc[j]->num_channels; i++)
map_dmach_queue(j, i, EVENTQ_1);
queue_tc_mapping = info[j].queue_tc_mapping;
@@ -1538,7 +1536,7 @@ fail1:
release_mem_region(r[i]->start, len[i]);
if (edmacc_regs_base[i])
iounmap(edmacc_regs_base[i]);
- kfree(edma_info[i]);
+ kfree(edma_cc[i]);
}
return status;
}
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index 744755b53236..2efb4468ebd0 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -20,46 +20,92 @@
#include <asm/mach/irq.h>
-static DEFINE_SPINLOCK(gpio_lock);
-
-struct davinci_gpio {
- struct gpio_chip chip;
- struct gpio_controller *__iomem regs;
- int irq_base;
+struct davinci_gpio_regs {
+ u32 dir;
+ u32 out_data;
+ u32 set_data;
+ u32 clr_data;
+ u32 in_data;
+ u32 set_rising;
+ u32 clr_rising;
+ u32 set_falling;
+ u32 clr_falling;
+ u32 intstat;
};
-static struct davinci_gpio chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
+#define chip2controller(chip) \
+ container_of(chip, struct davinci_gpio_controller, chip)
+
+static struct davinci_gpio_controller chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
+
+static struct davinci_gpio_regs __iomem __init *gpio2regs(unsigned gpio)
+{
+ void __iomem *ptr;
+ void __iomem *base = davinci_soc_info.gpio_base;
+
+ if (gpio < 32 * 1)
+ ptr = base + 0x10;
+ else if (gpio < 32 * 2)
+ ptr = base + 0x38;
+ else if (gpio < 32 * 3)
+ ptr = base + 0x60;
+ else if (gpio < 32 * 4)
+ ptr = base + 0x88;
+ else if (gpio < 32 * 5)
+ ptr = base + 0xb0;
+ else
+ ptr = NULL;
+ return ptr;
+}
-/* create a non-inlined version */
-static struct gpio_controller __iomem * __init gpio2controller(unsigned gpio)
+static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
{
- return __gpio_to_controller(gpio);
+ struct davinci_gpio_regs __iomem *g;
+
+ g = (__force struct davinci_gpio_regs __iomem *)get_irq_chip_data(irq);
+
+ return g;
}
static int __init davinci_gpio_irq_setup(void);
/*--------------------------------------------------------------------------*/
-/*
- * board setup code *MUST* set PINMUX0 and PINMUX1 as
- * needed, and enable the GPIO clock.
- */
-
-static int davinci_direction_in(struct gpio_chip *chip, unsigned offset)
+/* board setup code *MUST* setup pinmux and enable the GPIO clock. */
+static inline int __davinci_direction(struct gpio_chip *chip,
+ unsigned offset, bool out, int value)
{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
- struct gpio_controller *__iomem g = d->regs;
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
+ unsigned long flags;
u32 temp;
+ u32 mask = 1 << offset;
- spin_lock(&gpio_lock);
+ spin_lock_irqsave(&d->lock, flags);
temp = __raw_readl(&g->dir);
- temp |= (1 << offset);
+ if (out) {
+ temp &= ~mask;
+ __raw_writel(mask, value ? &g->set_data : &g->clr_data);
+ } else {
+ temp |= mask;
+ }
__raw_writel(temp, &g->dir);
- spin_unlock(&gpio_lock);
+ spin_unlock_irqrestore(&d->lock, flags);
return 0;
}
+static int davinci_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ return __davinci_direction(chip, offset, false, 0);
+}
+
+static int
+davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ return __davinci_direction(chip, offset, true, value);
+}
+
/*
* Read the pin's value (works even if it's set up as output);
* returns zero/nonzero.
@@ -69,37 +115,20 @@ static int davinci_direction_in(struct gpio_chip *chip, unsigned offset)
*/
static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
- struct gpio_controller *__iomem g = d->regs;
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
return (1 << offset) & __raw_readl(&g->in_data);
}
-static int
-davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
- struct gpio_controller *__iomem g = d->regs;
- u32 temp;
- u32 mask = 1 << offset;
-
- spin_lock(&gpio_lock);
- temp = __raw_readl(&g->dir);
- temp &= ~mask;
- __raw_writel(mask, value ? &g->set_data : &g->clr_data);
- __raw_writel(temp, &g->dir);
- spin_unlock(&gpio_lock);
- return 0;
-}
-
/*
* Assuming the pin is muxed as a gpio output, set its output value.
*/
static void
davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
- struct gpio_controller *__iomem g = d->regs;
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
__raw_writel((1 << offset), value ? &g->set_data : &g->clr_data);
}
@@ -109,6 +138,10 @@ static int __init davinci_gpio_setup(void)
int i, base;
unsigned ngpio;
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ struct davinci_gpio_regs *regs;
+
+ if (soc_info->gpio_type != GPIO_TYPE_DAVINCI)
+ return 0;
/*
* The gpio banks conceptually expose a segmented bitmap,
@@ -137,11 +170,20 @@ static int __init davinci_gpio_setup(void)
if (chips[i].chip.ngpio > 32)
chips[i].chip.ngpio = 32;
- chips[i].regs = gpio2controller(base);
+ spin_lock_init(&chips[i].lock);
+
+ regs = gpio2regs(base);
+ chips[i].regs = regs;
+ chips[i].set_data = &regs->set_data;
+ chips[i].clr_data = &regs->clr_data;
+ chips[i].in_data = &regs->in_data;
gpiochip_add(&chips[i].chip);
}
+ soc_info->gpio_ctlrs = chips;
+ soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
+
davinci_gpio_irq_setup();
return 0;
}
@@ -161,7 +203,7 @@ pure_initcall(davinci_gpio_setup);
static void gpio_irq_disable(unsigned irq)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = (u32) get_irq_data(irq);
__raw_writel(mask, &g->clr_falling);
@@ -170,7 +212,7 @@ static void gpio_irq_disable(unsigned irq)
static void gpio_irq_enable(unsigned irq)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = (u32) get_irq_data(irq);
unsigned status = irq_desc[irq].status;
@@ -186,7 +228,7 @@ static void gpio_irq_enable(unsigned irq)
static int gpio_irq_type(unsigned irq, unsigned trigger)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = (u32) get_irq_data(irq);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
@@ -215,7 +257,7 @@ static struct irq_chip gpio_irqchip = {
static void
gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = 0xffff;
/* we only care about one bank */
@@ -253,7 +295,7 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset)
{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
+ struct davinci_gpio_controller *d = chip2controller(chip);
if (d->irq_base >= 0)
return d->irq_base + offset;
@@ -276,7 +318,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
static int gpio_irq_type_unbanked(unsigned irq, unsigned trigger)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = (u32) get_irq_data(irq);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
@@ -305,7 +347,7 @@ static int __init davinci_gpio_irq_setup(void)
u32 binten = 0;
unsigned ngpio, bank_irq;
struct davinci_soc_info *soc_info = &davinci_soc_info;
- struct gpio_controller *__iomem g;
+ struct davinci_gpio_regs __iomem *g;
ngpio = soc_info->gpio_num;
@@ -354,7 +396,7 @@ static int __init davinci_gpio_irq_setup(void)
gpio_irqchip_unbanked.set_type = gpio_irq_type_unbanked;
/* default trigger: both edges */
- g = gpio2controller(0);
+ g = gpio2regs(0);
__raw_writel(~0, &g->set_falling);
__raw_writel(~0, &g->set_rising);
@@ -362,7 +404,7 @@ static int __init davinci_gpio_irq_setup(void)
for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) {
set_irq_chip(irq, &gpio_irqchip_unbanked);
set_irq_data(irq, (void *) __gpio_mask(gpio));
- set_irq_chip_data(irq, g);
+ set_irq_chip_data(irq, (__force void *) g);
irq_desc[irq].status |= IRQ_TYPE_EDGE_BOTH;
}
@@ -379,18 +421,18 @@ static int __init davinci_gpio_irq_setup(void)
unsigned i;
/* disabled by default, enabled only as needed */
- g = gpio2controller(gpio);
+ g = gpio2regs(gpio);
__raw_writel(~0, &g->clr_falling);
__raw_writel(~0, &g->clr_rising);
/* set up all irqs in this bank */
set_irq_chained_handler(bank_irq, gpio_irq_handler);
- set_irq_chip_data(bank_irq, g);
- set_irq_data(bank_irq, (void *)irq);
+ set_irq_chip_data(bank_irq, (__force void *) g);
+ set_irq_data(bank_irq, (void *) irq);
for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
set_irq_chip(irq, &gpio_irqchip);
- set_irq_chip_data(irq, g);
+ set_irq_chip_data(irq, (__force void *) g);
set_irq_data(irq, (void *) __gpio_mask(gpio));
set_irq_handler(irq, handle_simple_irq);
set_irq_flags(irq, IRQF_VALID);
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 50a955f05ef9..a58bd88ba02d 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -12,6 +12,9 @@
#ifndef __ARCH_ARM_MACH_DAVINCI_COMMON_H
#define __ARCH_ARM_MACH_DAVINCI_COMMON_H
+#include <linux/compiler.h>
+#include <linux/types.h>
+
struct sys_timer;
extern struct sys_timer davinci_timer;
@@ -34,6 +37,8 @@ struct davinci_timer_info {
unsigned int clocksource_id;
};
+struct davinci_gpio_controller;
+
/* SoC specific init support */
struct davinci_soc_info {
struct map_desc *io_desc;
@@ -54,19 +59,25 @@ struct davinci_soc_info {
u8 *intc_irq_prios;
unsigned long intc_irq_num;
struct davinci_timer_info *timer_info;
+ int gpio_type;
void __iomem *gpio_base;
unsigned gpio_num;
unsigned gpio_irq;
unsigned gpio_unbanked;
+ struct davinci_gpio_controller *gpio_ctlrs;
+ int gpio_ctlrs_num;
struct platform_device *serial_dev;
struct emac_platform_data *emac_pdata;
dma_addr_t sram_dma;
unsigned sram_len;
+ struct platform_device *reset_device;
+ void (*reset)(struct platform_device *);
};
extern struct davinci_soc_info davinci_soc_info;
extern void davinci_common_init(struct davinci_soc_info *soc_info);
+extern void davinci_init_ide(void);
/* standard place to map on-chip SRAMs; they *may* support DMA */
#define SRAM_VIRT 0xfffe0000
diff --git a/arch/arm/mach-davinci/include/mach/cp_intc.h b/arch/arm/mach-davinci/include/mach/cp_intc.h
index c4d27eec8064..121b114df755 100644
--- a/arch/arm/mach-davinci/include/mach/cp_intc.h
+++ b/arch/arm/mach-davinci/include/mach/cp_intc.h
@@ -52,6 +52,6 @@
#define CP_INTC_VECTOR_ADDR(n) (0x2000 + (n << 2))
void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
- u8 *irq_prio);
+ u8 *irq_prio, u32 *host_map);
#endif /* __ASM_HARDWARE_CP_INTC_H */
diff --git a/arch/arm/mach-davinci/include/mach/cputype.h b/arch/arm/mach-davinci/include/mach/cputype.h
index 189b1ff13642..cea6b8972043 100644
--- a/arch/arm/mach-davinci/include/mach/cputype.h
+++ b/arch/arm/mach-davinci/include/mach/cputype.h
@@ -33,6 +33,7 @@ struct davinci_id {
#define DAVINCI_CPU_ID_DM365 0x03650000
#define DAVINCI_CPU_ID_DA830 0x08300000
#define DAVINCI_CPU_ID_DA850 0x08500000
+#define DAVINCI_CPU_ID_TNETV107X 0x0b8a0000
#define IS_DAVINCI_CPU(type, id) \
static inline int is_davinci_ ##type(void) \
@@ -46,6 +47,7 @@ IS_DAVINCI_CPU(dm355, DAVINCI_CPU_ID_DM355)
IS_DAVINCI_CPU(dm365, DAVINCI_CPU_ID_DM365)
IS_DAVINCI_CPU(da830, DAVINCI_CPU_ID_DA830)
IS_DAVINCI_CPU(da850, DAVINCI_CPU_ID_DA850)
+IS_DAVINCI_CPU(tnetv107x, DAVINCI_CPU_ID_TNETV107X)
#ifdef CONFIG_ARCH_DAVINCI_DM644x
#define cpu_is_davinci_dm644x() is_davinci_dm644x()
@@ -83,4 +85,10 @@ IS_DAVINCI_CPU(da850, DAVINCI_CPU_ID_DA850)
#define cpu_is_davinci_da850() 0
#endif
+#ifdef CONFIG_ARCH_DAVINCI_TNETV107X
+#define cpu_is_davinci_tnetv107x() is_davinci_tnetv107x()
+#else
+#define cpu_is_davinci_tnetv107x() 0
+#endif
+
#endif
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index 03acfd39042b..1b31a9aa8fba 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -64,27 +64,6 @@ extern void __iomem *da8xx_syscfg1_base;
#define DA8XX_DDR2_CTL_BASE 0xb0000000
#define DA8XX_ARM_RAM_BASE 0xffff0000
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-#define PINMUX2 0x08
-#define PINMUX3 0x0c
-#define PINMUX4 0x10
-#define PINMUX5 0x14
-#define PINMUX6 0x18
-#define PINMUX7 0x1c
-#define PINMUX8 0x20
-#define PINMUX9 0x24
-#define PINMUX10 0x28
-#define PINMUX11 0x2c
-#define PINMUX12 0x30
-#define PINMUX13 0x34
-#define PINMUX14 0x38
-#define PINMUX15 0x3c
-#define PINMUX16 0x40
-#define PINMUX17 0x44
-#define PINMUX18 0x48
-#define PINMUX19 0x4c
-
void __init da830_init(void);
void __init da850_init(void);
@@ -108,6 +87,8 @@ extern struct emac_platform_data da8xx_emac_pdata;
extern struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata;
extern struct da8xx_lcdc_platform_data sharp_lk043t1dg01_pdata;
+extern struct platform_device da8xx_wdt_device;
+
extern const short da830_emif25_pins[];
extern const short da830_spi0_pins[];
extern const short da830_spi1_pins[];
@@ -146,10 +127,4 @@ extern const short da850_mmcsd0_pins[];
extern const short da850_nand_pins[];
extern const short da850_nor_pins[];
-#ifdef CONFIG_DAVINCI_MUX
-int da8xx_pinmux_setup(const short pins[]);
-#else
-static inline int da8xx_pinmux_setup(const short pins[]) { return 0; }
-#endif
-
#endif /* __ASM_ARCH_DAVINCI_DA8XX_H */
diff --git a/arch/arm/mach-davinci/include/mach/dm355.h b/arch/arm/mach-davinci/include/mach/dm355.h
index 85536d8e8336..36dff4a0ce3f 100644
--- a/arch/arm/mach-davinci/include/mach/dm355.h
+++ b/arch/arm/mach-davinci/include/mach/dm355.h
@@ -15,6 +15,9 @@
#include <mach/asp.h>
#include <media/davinci/vpfe_capture.h>
+#define DM355_ASYNC_EMIF_CONTROL_BASE 0x01E10000
+#define DM355_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
+
#define ASP1_TX_EVT_EN 1
#define ASP1_RX_EVT_EN 2
diff --git a/arch/arm/mach-davinci/include/mach/dm365.h b/arch/arm/mach-davinci/include/mach/dm365.h
index 3a37b5a6983c..ea5df3b49ec4 100644
--- a/arch/arm/mach-davinci/include/mach/dm365.h
+++ b/arch/arm/mach-davinci/include/mach/dm365.h
@@ -36,6 +36,10 @@
#define DAVINCI_DMA_VC_TX 2
#define DAVINCI_DMA_VC_RX 3
+#define DM365_ASYNC_EMIF_CONTROL_BASE 0x01D10000
+#define DM365_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
+#define DM365_ASYNC_EMIF_DATA_CE1_BASE 0x04000000
+
void __init dm365_init(void);
void __init dm365_init_asp(struct snd_platform_data *pdata);
void __init dm365_init_vc(struct snd_platform_data *pdata);
diff --git a/arch/arm/mach-davinci/include/mach/dm644x.h b/arch/arm/mach-davinci/include/mach/dm644x.h
index 1a8b09ccc3c8..6fca568a0fd2 100644
--- a/arch/arm/mach-davinci/include/mach/dm644x.h
+++ b/arch/arm/mach-davinci/include/mach/dm644x.h
@@ -34,6 +34,12 @@
#define DM644X_EMAC_MDIO_OFFSET (0x4000)
#define DM644X_EMAC_CNTRL_RAM_SIZE (0x2000)
+#define DM644X_ASYNC_EMIF_CONTROL_BASE 0x01E00000
+#define DM644X_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
+#define DM644X_ASYNC_EMIF_DATA_CE1_BASE 0x04000000
+#define DM644X_ASYNC_EMIF_DATA_CE2_BASE 0x06000000
+#define DM644X_ASYNC_EMIF_DATA_CE3_BASE 0x08000000
+
void __init dm644x_init(void);
void __init dm644x_init_asp(struct snd_platform_data *pdata);
void dm644x_set_vpfe_config(struct vpfe_config *cfg);
diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
index 846da98b619a..add6f794a362 100644
--- a/arch/arm/mach-davinci/include/mach/dm646x.h
+++ b/arch/arm/mach-davinci/include/mach/dm646x.h
@@ -25,10 +25,10 @@
#define DM646X_EMAC_MDIO_OFFSET (0x4000)
#define DM646X_EMAC_CNTRL_RAM_SIZE (0x2000)
-#define DM646X_ATA_REG_BASE (0x01C66000)
+#define DM646X_ASYNC_EMIF_CONTROL_BASE 0x20008000
+#define DM646X_ASYNC_EMIF_CS2_SPACE_BASE 0x42000000
void __init dm646x_init(void);
-void __init dm646x_init_ide(void);
void __init dm646x_init_mcasp0(struct snd_platform_data *pdata);
void __init dm646x_init_mcasp1(struct snd_platform_data *pdata);
void __init dm646x_board_setup_refclk(struct clk *clk);
diff --git a/arch/arm/mach-davinci/include/mach/gpio.h b/arch/arm/mach-davinci/include/mach/gpio.h
index f3b8ef878158..504cc180a60b 100644
--- a/arch/arm/mach-davinci/include/mach/gpio.h
+++ b/arch/arm/mach-davinci/include/mach/gpio.h
@@ -14,6 +14,8 @@
#define __DAVINCI_GPIO_H
#include <linux/io.h>
+#include <linux/spinlock.h>
+
#include <asm-generic/gpio.h>
#include <mach/irqs.h>
@@ -21,6 +23,10 @@
#define DAVINCI_GPIO_BASE 0x01C67000
+enum davinci_gpio_type {
+ GPIO_TYPE_DAVINCI = 0,
+};
+
/*
* basic gpio routines
*
@@ -45,17 +51,14 @@
/* Convert GPIO signal to GPIO pin number */
#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-struct gpio_controller {
- u32 dir;
- u32 out_data;
- u32 set_data;
- u32 clr_data;
- u32 in_data;
- u32 set_rising;
- u32 clr_rising;
- u32 set_falling;
- u32 clr_falling;
- u32 intstat;
+struct davinci_gpio_controller {
+ struct gpio_chip chip;
+ int irq_base;
+ spinlock_t lock;
+ void __iomem *regs;
+ void __iomem *set_data;
+ void __iomem *clr_data;
+ void __iomem *in_data;
};
/* The __gpio_to_controller() and __gpio_mask() functions inline to constants
@@ -67,25 +70,16 @@ struct gpio_controller {
*
* These are NOT part of the cross-platform GPIO interface
*/
-static inline struct gpio_controller *__iomem
+static inline struct davinci_gpio_controller *
__gpio_to_controller(unsigned gpio)
{
- void *__iomem ptr;
- void __iomem *base = davinci_soc_info.gpio_base;
-
- if (gpio < 32 * 1)
- ptr = base + 0x10;
- else if (gpio < 32 * 2)
- ptr = base + 0x38;
- else if (gpio < 32 * 3)
- ptr = base + 0x60;
- else if (gpio < 32 * 4)
- ptr = base + 0x88;
- else if (gpio < 32 * 5)
- ptr = base + 0xb0;
- else
- ptr = NULL;
- return ptr;
+ struct davinci_gpio_controller *ctlrs = davinci_soc_info.gpio_ctlrs;
+ int index = gpio / 32;
+
+ if (!ctlrs || index >= davinci_soc_info.gpio_ctlrs_num)
+ return NULL;
+
+ return ctlrs + index;
}
static inline u32 __gpio_mask(unsigned gpio)
@@ -101,16 +95,16 @@ static inline u32 __gpio_mask(unsigned gpio)
*/
static inline void gpio_set_value(unsigned gpio, int value)
{
- if (__builtin_constant_p(value) && gpio < DAVINCI_N_GPIO) {
- struct gpio_controller *__iomem g;
- u32 mask;
+ if (__builtin_constant_p(value) && gpio < davinci_soc_info.gpio_num) {
+ struct davinci_gpio_controller *ctlr;
+ u32 mask;
- g = __gpio_to_controller(gpio);
+ ctlr = __gpio_to_controller(gpio);
mask = __gpio_mask(gpio);
if (value)
- __raw_writel(mask, &g->set_data);
+ __raw_writel(mask, ctlr->set_data);
else
- __raw_writel(mask, &g->clr_data);
+ __raw_writel(mask, ctlr->clr_data);
return;
}
@@ -128,18 +122,18 @@ static inline void gpio_set_value(unsigned gpio, int value)
*/
static inline int gpio_get_value(unsigned gpio)
{
- struct gpio_controller *__iomem g;
+ struct davinci_gpio_controller *ctlr;
- if (!__builtin_constant_p(gpio) || gpio >= DAVINCI_N_GPIO)
+ if (!__builtin_constant_p(gpio) || gpio >= davinci_soc_info.gpio_num)
return __gpio_get_value(gpio);
- g = __gpio_to_controller(gpio);
- return __gpio_mask(gpio) & __raw_readl(&g->in_data);
+ ctlr = __gpio_to_controller(gpio);
+ return __gpio_mask(gpio) & __raw_readl(ctlr->in_data);
}
static inline int gpio_cansleep(unsigned gpio)
{
- if (__builtin_constant_p(gpio) && gpio < DAVINCI_N_GPIO)
+ if (__builtin_constant_p(gpio) && gpio < davinci_soc_info.gpio_num)
return 0;
else
return __gpio_cansleep(gpio);
diff --git a/arch/arm/mach-davinci/include/mach/irqs.h b/arch/arm/mach-davinci/include/mach/irqs.h
index 354af71798dc..ec76c7775c2e 100644
--- a/arch/arm/mach-davinci/include/mach/irqs.h
+++ b/arch/arm/mach-davinci/include/mach/irqs.h
@@ -401,6 +401,103 @@
#define DA850_N_CP_INTC_IRQ 101
+
+/* TNETV107X specific interrupts */
+#define IRQ_TNETV107X_TDM1_TXDMA 0
+#define IRQ_TNETV107X_EXT_INT_0 1
+#define IRQ_TNETV107X_EXT_INT_1 2
+#define IRQ_TNETV107X_GPIO_INT12 3
+#define IRQ_TNETV107X_GPIO_INT13 4
+#define IRQ_TNETV107X_TIMER_0_TINT12 5
+#define IRQ_TNETV107X_TIMER_1_TINT12 6
+#define IRQ_TNETV107X_UART0 7
+#define IRQ_TNETV107X_TDM1_RXDMA 8
+#define IRQ_TNETV107X_MCDMA_INT0 9
+#define IRQ_TNETV107X_MCDMA_INT1 10
+#define IRQ_TNETV107X_TPCC 11
+#define IRQ_TNETV107X_TPCC_INT0 12
+#define IRQ_TNETV107X_TPCC_INT1 13
+#define IRQ_TNETV107X_TPCC_INT2 14
+#define IRQ_TNETV107X_TPCC_INT3 15
+#define IRQ_TNETV107X_TPTC0 16
+#define IRQ_TNETV107X_TPTC1 17
+#define IRQ_TNETV107X_TIMER_0_TINT34 18
+#define IRQ_TNETV107X_ETHSS 19
+#define IRQ_TNETV107X_TIMER_1_TINT34 20
+#define IRQ_TNETV107X_DSP2ARM_INT0 21
+#define IRQ_TNETV107X_DSP2ARM_INT1 22
+#define IRQ_TNETV107X_ARM_NPMUIRQ 23
+#define IRQ_TNETV107X_USB1 24
+#define IRQ_TNETV107X_VLYNQ 25
+#define IRQ_TNETV107X_UART0_DMATX 26
+#define IRQ_TNETV107X_UART0_DMARX 27
+#define IRQ_TNETV107X_TDM1_TXMCSP 28
+#define IRQ_TNETV107X_SSP 29
+#define IRQ_TNETV107X_MCDMA_INT2 30
+#define IRQ_TNETV107X_MCDMA_INT3 31
+#define IRQ_TNETV107X_TDM_CODECIF_EOT 32
+#define IRQ_TNETV107X_IMCOP_SQR_ARM 33
+#define IRQ_TNETV107X_USB0 34
+#define IRQ_TNETV107X_USB_CDMA 35
+#define IRQ_TNETV107X_LCD 36
+#define IRQ_TNETV107X_KEYPAD 37
+#define IRQ_TNETV107X_KEYPAD_FREE 38
+#define IRQ_TNETV107X_RNG 39
+#define IRQ_TNETV107X_PKA 40
+#define IRQ_TNETV107X_TDM0_TXDMA 41
+#define IRQ_TNETV107X_TDM0_RXDMA 42
+#define IRQ_TNETV107X_TDM0_TXMCSP 43
+#define IRQ_TNETV107X_TDM0_RXMCSP 44
+#define IRQ_TNETV107X_TDM1_RXMCSP 45
+#define IRQ_TNETV107X_SDIO1 46
+#define IRQ_TNETV107X_SDIO0 47
+#define IRQ_TNETV107X_TSC 48
+#define IRQ_TNETV107X_TS 49
+#define IRQ_TNETV107X_UART1 50
+#define IRQ_TNETV107X_MBX_LITE 51
+#define IRQ_TNETV107X_GPIO_INT00 52
+#define IRQ_TNETV107X_GPIO_INT01 53
+#define IRQ_TNETV107X_GPIO_INT02 54
+#define IRQ_TNETV107X_GPIO_INT03 55
+#define IRQ_TNETV107X_UART2 56
+#define IRQ_TNETV107X_UART2_DMATX 57
+#define IRQ_TNETV107X_UART2_DMARX 58
+#define IRQ_TNETV107X_IMCOP_IMX 59
+#define IRQ_TNETV107X_IMCOP_VLCD 60
+#define IRQ_TNETV107X_AES 61
+#define IRQ_TNETV107X_DES 62
+#define IRQ_TNETV107X_SHAMD5 63
+#define IRQ_TNETV107X_TPCC_ERR 68
+#define IRQ_TNETV107X_TPCC_PROT 69
+#define IRQ_TNETV107X_TPTC0_ERR 70
+#define IRQ_TNETV107X_TPTC1_ERR 71
+#define IRQ_TNETV107X_UART0_ERR 72
+#define IRQ_TNETV107X_UART1_ERR 73
+#define IRQ_TNETV107X_AEMIF_ERR 74
+#define IRQ_TNETV107X_DDR_ERR 75
+#define IRQ_TNETV107X_WDTARM_INT0 76
+#define IRQ_TNETV107X_MCDMA_ERR 77
+#define IRQ_TNETV107X_GPIO_ERR 78
+#define IRQ_TNETV107X_MPU_ADDR 79
+#define IRQ_TNETV107X_MPU_PROT 80
+#define IRQ_TNETV107X_IOPU_ADDR 81
+#define IRQ_TNETV107X_IOPU_PROT 82
+#define IRQ_TNETV107X_KEYPAD_ADDR_ERR 83
+#define IRQ_TNETV107X_WDT0_ADDR_ERR 84
+#define IRQ_TNETV107X_WDT1_ADDR_ERR 85
+#define IRQ_TNETV107X_CLKCTL_ADDR_ERR 86
+#define IRQ_TNETV107X_PLL_UNLOCK 87
+#define IRQ_TNETV107X_WDTDSP_INT0 88
+#define IRQ_TNETV107X_SEC_CTRL_VIOLATION 89
+#define IRQ_TNETV107X_KEY_MNG_VIOLATION 90
+#define IRQ_TNETV107X_PBIST_CPU 91
+#define IRQ_TNETV107X_WDTARM 92
+#define IRQ_TNETV107X_PSC 93
+#define IRQ_TNETV107X_MMC0 94
+#define IRQ_TNETV107X_MMC1 95
+
+#define TNETV107X_N_CP_INTC_IRQ 96
+
/* da850 currently has the most gpio pins (144) */
#define DAVINCI_N_GPIO 144
/* da850 currently has the most irqs so use DA850_N_CP_INTC_IRQ */
diff --git a/arch/arm/mach-davinci/include/mach/mux.h b/arch/arm/mach-davinci/include/mach/mux.h
index 2a68c1d8a24b..743dbd2e0ea3 100644
--- a/arch/arm/mach-davinci/include/mach/mux.h
+++ b/arch/arm/mach-davinci/include/mach/mux.h
@@ -291,6 +291,7 @@ enum davinci_dm365_index {
DM365_GPIO20,
DM365_GPIO33,
DM365_GPIO40,
+ DM365_GPIO64_57,
/* Video */
DM365_VOUT_FIELD,
@@ -904,12 +905,288 @@ enum davinci_da850_index {
DA850_RTC_ALARM,
};
+enum davinci_tnetv107x_index {
+ TNETV107X_ASR_A00,
+ TNETV107X_GPIO32,
+ TNETV107X_ASR_A01,
+ TNETV107X_GPIO33,
+ TNETV107X_ASR_A02,
+ TNETV107X_GPIO34,
+ TNETV107X_ASR_A03,
+ TNETV107X_GPIO35,
+ TNETV107X_ASR_A04,
+ TNETV107X_GPIO36,
+ TNETV107X_ASR_A05,
+ TNETV107X_GPIO37,
+ TNETV107X_ASR_A06,
+ TNETV107X_GPIO38,
+ TNETV107X_ASR_A07,
+ TNETV107X_GPIO39,
+ TNETV107X_ASR_A08,
+ TNETV107X_GPIO40,
+ TNETV107X_ASR_A09,
+ TNETV107X_GPIO41,
+ TNETV107X_ASR_A10,
+ TNETV107X_GPIO42,
+ TNETV107X_ASR_A11,
+ TNETV107X_BOOT_STRP_0,
+ TNETV107X_ASR_A12,
+ TNETV107X_BOOT_STRP_1,
+ TNETV107X_ASR_A13,
+ TNETV107X_GPIO43,
+ TNETV107X_ASR_A14,
+ TNETV107X_GPIO44,
+ TNETV107X_ASR_A15,
+ TNETV107X_GPIO45,
+ TNETV107X_ASR_A16,
+ TNETV107X_GPIO46,
+ TNETV107X_ASR_A17,
+ TNETV107X_GPIO47,
+ TNETV107X_ASR_A18,
+ TNETV107X_GPIO48,
+ TNETV107X_SDIO1_DATA3_0,
+ TNETV107X_ASR_A19,
+ TNETV107X_GPIO49,
+ TNETV107X_SDIO1_DATA2_0,
+ TNETV107X_ASR_A20,
+ TNETV107X_GPIO50,
+ TNETV107X_SDIO1_DATA1_0,
+ TNETV107X_ASR_A21,
+ TNETV107X_GPIO51,
+ TNETV107X_SDIO1_DATA0_0,
+ TNETV107X_ASR_A22,
+ TNETV107X_GPIO52,
+ TNETV107X_SDIO1_CMD_0,
+ TNETV107X_ASR_A23,
+ TNETV107X_GPIO53,
+ TNETV107X_SDIO1_CLK_0,
+ TNETV107X_ASR_BA_1,
+ TNETV107X_GPIO54,
+ TNETV107X_SYS_PLL_CLK,
+ TNETV107X_ASR_CS0,
+ TNETV107X_ASR_CS1,
+ TNETV107X_ASR_CS2,
+ TNETV107X_TDM_PLL_CLK,
+ TNETV107X_ASR_CS3,
+ TNETV107X_ETH_PHY_CLK,
+ TNETV107X_ASR_D00,
+ TNETV107X_GPIO55,
+ TNETV107X_ASR_D01,
+ TNETV107X_GPIO56,
+ TNETV107X_ASR_D02,
+ TNETV107X_GPIO57,
+ TNETV107X_ASR_D03,
+ TNETV107X_GPIO58,
+ TNETV107X_ASR_D04,
+ TNETV107X_GPIO59_0,
+ TNETV107X_ASR_D05,
+ TNETV107X_GPIO60_0,
+ TNETV107X_ASR_D06,
+ TNETV107X_GPIO61_0,
+ TNETV107X_ASR_D07,
+ TNETV107X_GPIO62_0,
+ TNETV107X_ASR_D08,
+ TNETV107X_GPIO63_0,
+ TNETV107X_ASR_D09,
+ TNETV107X_GPIO64_0,
+ TNETV107X_ASR_D10,
+ TNETV107X_SDIO1_DATA3_1,
+ TNETV107X_ASR_D11,
+ TNETV107X_SDIO1_DATA2_1,
+ TNETV107X_ASR_D12,
+ TNETV107X_SDIO1_DATA1_1,
+ TNETV107X_ASR_D13,
+ TNETV107X_SDIO1_DATA0_1,
+ TNETV107X_ASR_D14,
+ TNETV107X_SDIO1_CMD_1,
+ TNETV107X_ASR_D15,
+ TNETV107X_SDIO1_CLK_1,
+ TNETV107X_ASR_OE,
+ TNETV107X_BOOT_STRP_2,
+ TNETV107X_ASR_RNW,
+ TNETV107X_GPIO29_0,
+ TNETV107X_ASR_WAIT,
+ TNETV107X_GPIO30_0,
+ TNETV107X_ASR_WE,
+ TNETV107X_BOOT_STRP_3,
+ TNETV107X_ASR_WE_DQM0,
+ TNETV107X_GPIO31,
+ TNETV107X_LCD_PD17_0,
+ TNETV107X_ASR_WE_DQM1,
+ TNETV107X_ASR_BA0_0,
+ TNETV107X_VLYNQ_CLK,
+ TNETV107X_GPIO14,
+ TNETV107X_LCD_PD19_0,
+ TNETV107X_VLYNQ_RXD0,
+ TNETV107X_GPIO15,
+ TNETV107X_LCD_PD20_0,
+ TNETV107X_VLYNQ_RXD1,
+ TNETV107X_GPIO16,
+ TNETV107X_LCD_PD21_0,
+ TNETV107X_VLYNQ_TXD0,
+ TNETV107X_GPIO17,
+ TNETV107X_LCD_PD22_0,
+ TNETV107X_VLYNQ_TXD1,
+ TNETV107X_GPIO18,
+ TNETV107X_LCD_PD23_0,
+ TNETV107X_SDIO0_CLK,
+ TNETV107X_GPIO19,
+ TNETV107X_SDIO0_CMD,
+ TNETV107X_GPIO20,
+ TNETV107X_SDIO0_DATA0,
+ TNETV107X_GPIO21,
+ TNETV107X_SDIO0_DATA1,
+ TNETV107X_GPIO22,
+ TNETV107X_SDIO0_DATA2,
+ TNETV107X_GPIO23,
+ TNETV107X_SDIO0_DATA3,
+ TNETV107X_GPIO24,
+ TNETV107X_EMU0,
+ TNETV107X_EMU1,
+ TNETV107X_RTCK,
+ TNETV107X_TRST_N,
+ TNETV107X_TCK,
+ TNETV107X_TDI,
+ TNETV107X_TDO,
+ TNETV107X_TMS,
+ TNETV107X_TDM1_CLK,
+ TNETV107X_TDM1_RX,
+ TNETV107X_TDM1_TX,
+ TNETV107X_TDM1_FS,
+ TNETV107X_KEYPAD_R0,
+ TNETV107X_KEYPAD_R1,
+ TNETV107X_KEYPAD_R2,
+ TNETV107X_KEYPAD_R3,
+ TNETV107X_KEYPAD_R4,
+ TNETV107X_KEYPAD_R5,
+ TNETV107X_KEYPAD_R6,
+ TNETV107X_GPIO12,
+ TNETV107X_KEYPAD_R7,
+ TNETV107X_GPIO10,
+ TNETV107X_KEYPAD_C0,
+ TNETV107X_KEYPAD_C1,
+ TNETV107X_KEYPAD_C2,
+ TNETV107X_KEYPAD_C3,
+ TNETV107X_KEYPAD_C4,
+ TNETV107X_KEYPAD_C5,
+ TNETV107X_KEYPAD_C6,
+ TNETV107X_GPIO13,
+ TNETV107X_TEST_CLK_IN,
+ TNETV107X_KEYPAD_C7,
+ TNETV107X_GPIO11,
+ TNETV107X_SSP0_0,
+ TNETV107X_SCC_DCLK,
+ TNETV107X_LCD_PD20_1,
+ TNETV107X_SSP0_1,
+ TNETV107X_SCC_CS_N,
+ TNETV107X_LCD_PD21_1,
+ TNETV107X_SSP0_2,
+ TNETV107X_SCC_D,
+ TNETV107X_LCD_PD22_1,
+ TNETV107X_SSP0_3,
+ TNETV107X_SCC_RESETN,
+ TNETV107X_LCD_PD23_1,
+ TNETV107X_SSP1_0,
+ TNETV107X_GPIO25,
+ TNETV107X_UART2_CTS,
+ TNETV107X_SSP1_1,
+ TNETV107X_GPIO26,
+ TNETV107X_UART2_RD,
+ TNETV107X_SSP1_2,
+ TNETV107X_GPIO27,
+ TNETV107X_UART2_RTS,
+ TNETV107X_SSP1_3,
+ TNETV107X_GPIO28,
+ TNETV107X_UART2_TD,
+ TNETV107X_UART0_CTS,
+ TNETV107X_UART0_RD,
+ TNETV107X_UART0_RTS,
+ TNETV107X_UART0_TD,
+ TNETV107X_UART1_RD,
+ TNETV107X_UART1_TD,
+ TNETV107X_LCD_AC_NCS,
+ TNETV107X_LCD_HSYNC_RNW,
+ TNETV107X_LCD_VSYNC_A0,
+ TNETV107X_LCD_MCLK,
+ TNETV107X_LCD_PD16_0,
+ TNETV107X_LCD_PCLK_E,
+ TNETV107X_LCD_PD00,
+ TNETV107X_LCD_PD01,
+ TNETV107X_LCD_PD02,
+ TNETV107X_LCD_PD03,
+ TNETV107X_LCD_PD04,
+ TNETV107X_LCD_PD05,
+ TNETV107X_LCD_PD06,
+ TNETV107X_LCD_PD07,
+ TNETV107X_LCD_PD08,
+ TNETV107X_GPIO59_1,
+ TNETV107X_LCD_PD09,
+ TNETV107X_GPIO60_1,
+ TNETV107X_LCD_PD10,
+ TNETV107X_ASR_BA0_1,
+ TNETV107X_GPIO61_1,
+ TNETV107X_LCD_PD11,
+ TNETV107X_GPIO62_1,
+ TNETV107X_LCD_PD12,
+ TNETV107X_GPIO63_1,
+ TNETV107X_LCD_PD13,
+ TNETV107X_GPIO64_1,
+ TNETV107X_LCD_PD14,
+ TNETV107X_GPIO29_1,
+ TNETV107X_LCD_PD15,
+ TNETV107X_GPIO30_1,
+ TNETV107X_EINT0,
+ TNETV107X_GPIO08,
+ TNETV107X_EINT1,
+ TNETV107X_GPIO09,
+ TNETV107X_GPIO00,
+ TNETV107X_LCD_PD20_2,
+ TNETV107X_TDM_CLK_IN_2,
+ TNETV107X_GPIO01,
+ TNETV107X_LCD_PD21_2,
+ TNETV107X_24M_CLK_OUT_1,
+ TNETV107X_GPIO02,
+ TNETV107X_LCD_PD22_2,
+ TNETV107X_GPIO03,
+ TNETV107X_LCD_PD23_2,
+ TNETV107X_GPIO04,
+ TNETV107X_LCD_PD16_1,
+ TNETV107X_USB0_RXERR,
+ TNETV107X_GPIO05,
+ TNETV107X_LCD_PD17_1,
+ TNETV107X_TDM_CLK_IN_1,
+ TNETV107X_GPIO06,
+ TNETV107X_LCD_PD18,
+ TNETV107X_24M_CLK_OUT_2,
+ TNETV107X_GPIO07,
+ TNETV107X_LCD_PD19_1,
+ TNETV107X_USB1_RXERR,
+ TNETV107X_ETH_PLL_CLK,
+ TNETV107X_MDIO,
+ TNETV107X_MDC,
+ TNETV107X_AIC_MUTE_STAT_N,
+ TNETV107X_TDM0_CLK,
+ TNETV107X_AIC_HNS_EN_N,
+ TNETV107X_TDM0_FS,
+ TNETV107X_AIC_HDS_EN_STAT_N,
+ TNETV107X_TDM0_TX,
+ TNETV107X_AIC_HNF_EN_STAT_N,
+ TNETV107X_TDM0_RX,
+};
+
+#define PINMUX(x) (4 * (x))
+
#ifdef CONFIG_DAVINCI_MUX
/* setup pin muxing */
extern int davinci_cfg_reg(unsigned long reg_cfg);
+extern int davinci_cfg_reg_list(const short pins[]);
#else
/* boot loader does it all (no warnings from CONFIG_DAVINCI_MUX_WARNINGS) */
static inline int davinci_cfg_reg(unsigned long reg_cfg) { return 0; }
+static inline int davinci_cfg_reg_list(const short pins[])
+{
+ return 0;
+}
#endif
#endif /* __INC_MACH_MUX_H */
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index 651f6d8158fa..983da6e4554c 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -180,6 +180,53 @@
#define DA8XX_LPSC1_CR_P3_SS 26
#define DA8XX_LPSC1_L3_CBA_RAM 31
+/* TNETV107X LPSC Assignments */
+#define TNETV107X_LPSC_ARM 0
+#define TNETV107X_LPSC_GEM 1
+#define TNETV107X_LPSC_DDR2_PHY 2
+#define TNETV107X_LPSC_TPCC 3
+#define TNETV107X_LPSC_TPTC0 4
+#define TNETV107X_LPSC_TPTC1 5
+#define TNETV107X_LPSC_RAM 6
+#define TNETV107X_LPSC_MBX_LITE 7
+#define TNETV107X_LPSC_LCD 8
+#define TNETV107X_LPSC_ETHSS 9
+#define TNETV107X_LPSC_AEMIF 10
+#define TNETV107X_LPSC_CHIP_CFG 11
+#define TNETV107X_LPSC_TSC 12
+#define TNETV107X_LPSC_ROM 13
+#define TNETV107X_LPSC_UART2 14
+#define TNETV107X_LPSC_PKTSEC 15
+#define TNETV107X_LPSC_SECCTL 16
+#define TNETV107X_LPSC_KEYMGR 17
+#define TNETV107X_LPSC_KEYPAD 18
+#define TNETV107X_LPSC_GPIO 19
+#define TNETV107X_LPSC_MDIO 20
+#define TNETV107X_LPSC_SDIO0 21
+#define TNETV107X_LPSC_UART0 22
+#define TNETV107X_LPSC_UART1 23
+#define TNETV107X_LPSC_TIMER0 24
+#define TNETV107X_LPSC_TIMER1 25
+#define TNETV107X_LPSC_WDT_ARM 26
+#define TNETV107X_LPSC_WDT_DSP 27
+#define TNETV107X_LPSC_SSP 28
+#define TNETV107X_LPSC_TDM0 29
+#define TNETV107X_LPSC_VLYNQ 30
+#define TNETV107X_LPSC_MCDMA 31
+#define TNETV107X_LPSC_USB0 32
+#define TNETV107X_LPSC_TDM1 33
+#define TNETV107X_LPSC_DEBUGSS 34
+#define TNETV107X_LPSC_ETHSS_RGMII 35
+#define TNETV107X_LPSC_SYSTEM 36
+#define TNETV107X_LPSC_IMCOP 37
+#define TNETV107X_LPSC_SPARE 38
+#define TNETV107X_LPSC_SDIO1 39
+#define TNETV107X_LPSC_USB1 40
+#define TNETV107X_LPSC_USBSS 41
+#define TNETV107X_LPSC_DDR2_EMIF1_VRST 42
+#define TNETV107X_LPSC_DDR2_EMIF2_VCTL_RST 43
+#define TNETV107X_LPSC_MAX 44
+
/* PSC register offsets */
#define EPCPR 0x070
#define PTCMD 0x120
@@ -189,13 +236,19 @@
#define MDSTAT 0x800
#define MDCTL 0xA00
+/* PSC module states */
+#define PSC_STATE_SWRSTDISABLE 0
+#define PSC_STATE_SYNCRST 1
+#define PSC_STATE_DISABLE 2
+#define PSC_STATE_ENABLE 3
+
#define MDSTAT_STATE_MASK 0x1f
#ifndef __ASSEMBLER__
extern int davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id);
extern void davinci_psc_config(unsigned int domain, unsigned int ctlr,
- unsigned int id, char enable);
+ unsigned int id, u32 next_state);
#endif
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index a584697a9e70..f6c4f34909a2 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -13,7 +13,6 @@
#include <mach/hardware.h>
-#define DAVINCI_MAX_NR_UARTS 3
#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800)
diff --git a/arch/arm/mach-davinci/include/mach/system.h b/arch/arm/mach-davinci/include/mach/system.h
index 5a7d7581b8ce..e65629c20769 100644
--- a/arch/arm/mach-davinci/include/mach/system.h
+++ b/arch/arm/mach-davinci/include/mach/system.h
@@ -11,7 +11,7 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-extern void davinci_watchdog_reset(void);
+#include <mach/common.h>
static inline void arch_idle(void)
{
@@ -20,7 +20,8 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
- davinci_watchdog_reset();
+ if (davinci_soc_info.reset)
+ davinci_soc_info.reset(davinci_soc_info.reset_device);
}
#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-davinci/mux.c b/arch/arm/mach-davinci/mux.c
index f757e83415f3..e9d530a8f79f 100644
--- a/arch/arm/mach-davinci/mux.c
+++ b/arch/arm/mach-davinci/mux.c
@@ -91,7 +91,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
}
EXPORT_SYMBOL(davinci_cfg_reg);
-int da8xx_pinmux_setup(const short pins[])
+int __init_or_module davinci_cfg_reg_list(const short pins[])
{
int i, error = -EINVAL;
diff --git a/arch/arm/mach-davinci/mux.h b/arch/arm/mach-davinci/mux.h
index adc869413371..5aad1e7dd210 100644
--- a/arch/arm/mach-davinci/mux.h
+++ b/arch/arm/mach-davinci/mux.h
@@ -20,7 +20,7 @@
.name = #desc, \
.debug = dbg, \
.mux_reg_name = "PINMUX"#muxreg, \
- .mux_reg = PINMUX##muxreg, \
+ .mux_reg = PINMUX(muxreg), \
.mask_offset = mode_offset, \
.mask = mode_mask, \
.mode = mux_mode, \
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index adf6b5c7f1e5..d7cb438c4df6 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -47,12 +47,11 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
/* Enable or disable a PSC domain */
void davinci_psc_config(unsigned int domain, unsigned int ctlr,
- unsigned int id, char enable)
+ unsigned int id, u32 next_state)
{
u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
void __iomem *psc_base;
struct davinci_soc_info *soc_info = &davinci_soc_info;
- u32 next_state = enable ? 0x3 : 0x2; /* 0x3 enables, 0x2 disables */
if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) {
pr_warning("PSC: Bad psc data: 0x%x[%d]\n",
diff --git a/arch/arm/mach-davinci/serial.c b/arch/arm/mach-davinci/serial.c
index 7ce5ba086575..1875740fe27c 100644
--- a/arch/arm/mach-davinci/serial.c
+++ b/arch/arm/mach-davinci/serial.c
@@ -35,14 +35,20 @@ static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
int offset)
{
offset <<= up->regshift;
- return (unsigned int)__raw_readl(IO_ADDRESS(up->mapbase) + offset);
+
+ WARN_ONCE(!up->membase, "unmapped read: uart[%d]\n", offset);
+
+ return (unsigned int)__raw_readl(up->membase + offset);
}
static inline void serial_write_reg(struct plat_serial8250_port *p, int offset,
int value)
{
offset <<= p->regshift;
- __raw_writel(value, IO_ADDRESS(p->mapbase) + offset);
+
+ WARN_ONCE(!p->membase, "unmapped write: uart[%d]\n", offset);
+
+ __raw_writel(value, p->membase + offset);
}
static void __init davinci_serial_reset(struct plat_serial8250_port *p)
@@ -77,20 +83,32 @@ int __init davinci_serial_init(struct davinci_uart_config *info)
* Make sure the serial ports are muxed on at this point.
* You have to mux them off in device drivers later on if not needed.
*/
- for (i = 0; i < DAVINCI_MAX_NR_UARTS; i++, p++) {
+ for (i = 0; p->flags; i++, p++) {
if (!(info->enabled_uarts & (1 << i)))
continue;
sprintf(name, "uart%d", i);
uart_clk = clk_get(dev, name);
- if (IS_ERR(uart_clk))
+ if (IS_ERR(uart_clk)) {
printk(KERN_ERR "%s:%d: failed to get UART%d clock\n",
__func__, __LINE__, i);
- else {
- clk_enable(uart_clk);
- p->uartclk = clk_get_rate(uart_clk);
- davinci_serial_reset(p);
+ continue;
}
+
+ clk_enable(uart_clk);
+ p->uartclk = clk_get_rate(uart_clk);
+
+ if (!p->membase && p->mapbase) {
+ p->membase = ioremap(p->mapbase, SZ_4K);
+
+ if (p->membase)
+ p->flags &= ~UPF_IOREMAP;
+ else
+ pr_err("uart regs ioremap failed\n");
+ }
+
+ if (p->membase && p->type != PORT_AR7)
+ davinci_serial_reset(p);
}
return platform_device_register(soc_info->serial_dev);
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 9e0b106b4f5f..e5c598a387be 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -361,13 +361,13 @@ static void __init davinci_timer_init(void)
}
}
- /* init timer hw */
- timer_init();
-
timer_clk = clk_get(NULL, "timer0");
BUG_ON(IS_ERR(timer_clk));
clk_enable(timer_clk);
+ /* init timer hw */
+ timer_init();
+
davinci_clock_tick_rate = clk_get_rate(timer_clk);
/* setup clocksource */
@@ -399,13 +399,16 @@ struct sys_timer davinci_timer = {
/* reset board using watchdog timer */
-void davinci_watchdog_reset(void)
+void davinci_watchdog_reset(struct platform_device *pdev)
{
u32 tgcr, wdtcr;
- struct platform_device *pdev = &davinci_wdt_device;
- void __iomem *base = IO_ADDRESS(pdev->resource[0].start);
+ void __iomem *base;
struct clk *wd_clk;
+ base = ioremap(pdev->resource[0].start, SZ_4K);
+ if (WARN_ON(!base))
+ return;
+
wd_clk = clk_get(&pdev->dev, NULL);
if (WARN_ON(IS_ERR(wd_clk)))
return;
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index caf6d5154aec..3a1a855bfdca 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -41,7 +41,7 @@ static struct platform_device adssphere_flash = {
.resource = &adssphere_flash_resource,
};
-static struct ep93xx_eth_data adssphere_eth_data = {
+static struct ep93xx_eth_data __initdata adssphere_eth_data = {
.phy_id = 1,
};
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 90fb591cbffa..8d3f77e9fa86 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -222,6 +222,20 @@ void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits)
}
EXPORT_SYMBOL(ep93xx_devcfg_set_clear);
+/**
+ * ep93xx_chip_revision() - returns the EP93xx chip revision
+ *
+ * See <mach/platform.h> for more information.
+ */
+unsigned int ep93xx_chip_revision(void)
+{
+ unsigned int v;
+
+ v = __raw_readl(EP93XX_SYSCON_SYSCFG);
+ v &= EP93XX_SYSCON_SYSCFG_REV_MASK;
+ v >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT;
+ return v;
+}
/*************************************************************************
* EP93xx peripheral handling
@@ -330,6 +344,10 @@ static struct platform_device ep93xx_ohci_device = {
.resource = ep93xx_ohci_resources,
};
+
+/*************************************************************************
+ * EP93xx ethernet peripheral handling
+ *************************************************************************/
static struct ep93xx_eth_data ep93xx_eth_data;
static struct resource ep93xx_eth_resource[] = {
@@ -354,6 +372,12 @@ static struct platform_device ep93xx_eth_device = {
.resource = ep93xx_eth_resource,
};
+/**
+ * ep93xx_register_eth - Register the built-in ethernet platform device.
+ * @data: platform specific ethernet configuration (__initdata)
+ * @copy_addr: flag indicating that the MAC address should be copied
+ * from the IndAd registers (as programmed by the bootloader)
+ */
void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
{
if (copy_addr)
@@ -370,11 +394,19 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
static struct i2c_gpio_platform_data ep93xx_i2c_data;
static struct platform_device ep93xx_i2c_device = {
- .name = "i2c-gpio",
- .id = 0,
- .dev.platform_data = &ep93xx_i2c_data,
+ .name = "i2c-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &ep93xx_i2c_data,
+ },
};
+/**
+ * ep93xx_register_i2c - Register the i2c platform device.
+ * @data: platform specific i2c-gpio configuration (__initdata)
+ * @devices: platform specific i2c bus device information (__initdata)
+ * @num: the number of devices on the i2c bus
+ */
void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
struct i2c_board_info *devices, int num)
{
@@ -404,11 +436,11 @@ void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
*************************************************************************/
static struct gpio_led ep93xx_led_pins[] = {
{
- .name = "platform:grled",
- .gpio = EP93XX_GPIO_LINE_GRLED,
+ .name = "platform:grled",
+ .gpio = EP93XX_GPIO_LINE_GRLED,
}, {
- .name = "platform:rdled",
- .gpio = EP93XX_GPIO_LINE_RDLED,
+ .name = "platform:rdled",
+ .gpio = EP93XX_GPIO_LINE_RDLED,
},
};
@@ -528,7 +560,7 @@ static struct platform_device ep93xx_fb_device = {
.name = "ep93xx-fb",
.id = -1,
.dev = {
- .platform_data = &ep93xxfb_data,
+ .platform_data = &ep93xxfb_data,
.coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask,
},
@@ -536,6 +568,10 @@ static struct platform_device ep93xx_fb_device = {
.resource = ep93xx_fb_resource,
};
+/**
+ * ep93xx_register_fb - Register the framebuffer platform device.
+ * @data: platform specific framebuffer configuration (__initdata)
+ */
void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data)
{
ep93xxfb_data = *data;
@@ -546,6 +582,8 @@ void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data)
/*************************************************************************
* EP93xx matrix keypad peripheral handling
*************************************************************************/
+static struct ep93xx_keypad_platform_data ep93xx_keypad_data;
+
static struct resource ep93xx_keypad_resource[] = {
{
.start = EP93XX_KEY_MATRIX_PHYS_BASE,
@@ -559,15 +597,22 @@ static struct resource ep93xx_keypad_resource[] = {
};
static struct platform_device ep93xx_keypad_device = {
- .name = "ep93xx-keypad",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_keypad_resource),
- .resource = ep93xx_keypad_resource,
+ .name = "ep93xx-keypad",
+ .id = -1,
+ .dev = {
+ .platform_data = &ep93xx_keypad_data,
+ },
+ .num_resources = ARRAY_SIZE(ep93xx_keypad_resource),
+ .resource = ep93xx_keypad_resource,
};
+/**
+ * ep93xx_register_keypad - Register the keypad platform device.
+ * @data: platform specific keypad configuration (__initdata)
+ */
void __init ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data)
{
- ep93xx_keypad_device.dev.platform_data = data;
+ ep93xx_keypad_data = *data;
platform_device_register(&ep93xx_keypad_device);
}
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index d22d67ac8b99..3884182cd362 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -74,7 +74,7 @@ static void __init edb93xx_register_flash(void)
}
}
-static struct ep93xx_eth_data edb93xx_eth_data = {
+static struct ep93xx_eth_data __initdata edb93xx_eth_data = {
.phy_id = 1,
};
@@ -82,7 +82,7 @@ static struct ep93xx_eth_data edb93xx_eth_data = {
/*************************************************************************
* EDB93xx i2c peripheral handling
*************************************************************************/
-static struct i2c_gpio_platform_data edb93xx_i2c_gpio_data = {
+static struct i2c_gpio_platform_data __initdata edb93xx_i2c_gpio_data = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.sda_is_open_drain = 0,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index 3da7ca816d19..a809618e9f05 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -41,7 +41,7 @@ static struct platform_device gesbc9312_flash = {
.resource = &gesbc9312_flash_resource,
};
-static struct ep93xx_eth_data gesbc9312_eth_data = {
+static struct ep93xx_eth_data __initdata gesbc9312_eth_data = {
.phy_id = 1,
};
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index c6dc14dbca18..b663390b4d87 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -33,6 +33,14 @@ static inline void ep93xx_devcfg_clear_bits(unsigned int bits)
ep93xx_devcfg_set_clear(0x00, bits);
}
+#define EP93XX_CHIP_REV_D0 3
+#define EP93XX_CHIP_REV_D1 4
+#define EP93XX_CHIP_REV_E0 5
+#define EP93XX_CHIP_REV_E1 6
+#define EP93XX_CHIP_REV_E2 7
+
+unsigned int ep93xx_chip_revision(void);
+
void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
void ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
struct i2c_board_info *devices, int num);
diff --git a/arch/arm/mach-ep93xx/include/mach/ts72xx.h b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
index 93107d88ff3a..0eabec62cd9d 100644
--- a/arch/arm/mach-ep93xx/include/mach/ts72xx.h
+++ b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
@@ -9,9 +9,6 @@
* febff000 22000000 4K model number register
* febfe000 22400000 4K options register
* febfd000 22800000 4K options register #2
- * febfc000 [67]0000000 4K NAND data register
- * febfb000 [67]0400000 4K NAND control register
- * febfa000 [67]0800000 4K NAND busy register
* febf9000 10800000 4K TS-5620 RTC index register
* febf8000 11700000 4K TS-5620 RTC data register
*/
@@ -41,22 +38,6 @@
#define TS72XX_OPTIONS2_TS9420_BOOT 0x02
-#define TS72XX_NAND1_DATA_PHYS_BASE 0x60000000
-#define TS72XX_NAND2_DATA_PHYS_BASE 0x70000000
-#define TS72XX_NAND_DATA_VIRT_BASE 0xfebfc000
-#define TS72XX_NAND_DATA_SIZE 0x00001000
-
-#define TS72XX_NAND1_CONTROL_PHYS_BASE 0x60400000
-#define TS72XX_NAND2_CONTROL_PHYS_BASE 0x70400000
-#define TS72XX_NAND_CONTROL_VIRT_BASE 0xfebfb000
-#define TS72XX_NAND_CONTROL_SIZE 0x00001000
-
-#define TS72XX_NAND1_BUSY_PHYS_BASE 0x60800000
-#define TS72XX_NAND2_BUSY_PHYS_BASE 0x70800000
-#define TS72XX_NAND_BUSY_VIRT_BASE 0xfebfa000
-#define TS72XX_NAND_BUSY_SIZE 0x00001000
-
-
#define TS72XX_RTC_INDEX_VIRT_BASE 0xfebf9000
#define TS72XX_RTC_INDEX_PHYS_BASE 0x10800000
#define TS72XX_RTC_INDEX_SIZE 0x00001000
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index c33360e82868..1cc911b4efa6 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -80,7 +80,7 @@ static void __init micro9_register_flash(void)
/*************************************************************************
* Micro9 Ethernet
*************************************************************************/
-static struct ep93xx_eth_data micro9_eth_data = {
+static struct ep93xx_eth_data __initdata micro9_eth_data = {
.phy_id = 0x1f,
};
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index cd93990f1b99..388aec95f60e 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -49,17 +49,17 @@ static struct platform_device simone_flash = {
},
};
-static struct ep93xx_eth_data simone_eth_data = {
+static struct ep93xx_eth_data __initdata simone_eth_data = {
.phy_id = 1,
};
-static struct ep93xxfb_mach_info simone_fb_info = {
+static struct ep93xxfb_mach_info __initdata simone_fb_info = {
.num_modes = EP93XXFB_USE_MODEDB,
.bpp = 16,
.flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING,
};
-static struct i2c_gpio_platform_data simone_i2c_gpio_data = {
+static struct i2c_gpio_platform_data __initdata simone_i2c_gpio_data = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.sda_is_open_drain = 0,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 51134b0382ca..38deaee40397 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -125,11 +125,11 @@ static struct platform_device snappercl15_nand_device = {
.num_resources = ARRAY_SIZE(snappercl15_nand_resource),
};
-static struct ep93xx_eth_data snappercl15_eth_data = {
+static struct ep93xx_eth_data __initdata snappercl15_eth_data = {
.phy_id = 1,
};
-static struct i2c_gpio_platform_data snappercl15_i2c_gpio_data = {
+static struct i2c_gpio_platform_data __initdata snappercl15_i2c_gpio_data = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.sda_is_open_drain = 0,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
@@ -145,7 +145,7 @@ static struct i2c_board_info __initdata snappercl15_i2c_data[] = {
},
};
-static struct ep93xxfb_mach_info snappercl15_fb_info = {
+static struct ep93xxfb_mach_info __initdata snappercl15_fb_info = {
.num_modes = EP93XXFB_USE_MODEDB,
.bpp = 16,
};
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index fac1ec7a60fb..ae7319e588c7 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -10,12 +10,16 @@
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/m48t86.h>
#include <linux/mtd/physmap.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
#include <mach/hardware.h>
#include <mach/ts72xx.h>
@@ -54,92 +58,162 @@ static struct map_desc ts72xx_io_desc[] __initdata = {
}
};
-static struct map_desc ts72xx_nand_io_desc[] __initdata = {
- {
- .virtual = TS72XX_NAND_DATA_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_DATA_PHYS_BASE),
- .length = TS72XX_NAND_DATA_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = TS72XX_NAND_CONTROL_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_CONTROL_PHYS_BASE),
- .length = TS72XX_NAND_CONTROL_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = TS72XX_NAND_BUSY_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_BUSY_PHYS_BASE),
- .length = TS72XX_NAND_BUSY_SIZE,
- .type = MT_DEVICE,
+static void __init ts72xx_map_io(void)
+{
+ ep93xx_map_io();
+ iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
+}
+
+
+/*************************************************************************
+ * NAND flash
+ *************************************************************************/
+#define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */
+#define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */
+
+static void ts72xx_nand_hwcontrol(struct mtd_info *mtd,
+ int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ void __iomem *addr = chip->IO_ADDR_R;
+ unsigned char bits;
+
+ addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE);
+
+ bits = __raw_readb(addr) & ~0x07;
+ bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */
+ bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */
+ bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */
+
+ __raw_writeb(bits, addr);
}
-};
-static struct map_desc ts72xx_alternate_nand_io_desc[] __initdata = {
+ if (cmd != NAND_CMD_NONE)
+ __raw_writeb(cmd, chip->IO_ADDR_W);
+}
+
+static int ts72xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *addr = chip->IO_ADDR_R;
+
+ addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE);
+
+ return !!(__raw_readb(addr) & 0x20);
+}
+
+static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL };
+
+#define TS72XX_BOOTROM_PART_SIZE (SZ_16K)
+#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M)
+
+static struct mtd_partition ts72xx_nand_parts[] = {
{
- .virtual = TS72XX_NAND_DATA_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_DATA_PHYS_BASE),
- .length = TS72XX_NAND_DATA_SIZE,
- .type = MT_DEVICE,
+ .name = "TS-BOOTROM",
+ .offset = 0,
+ .size = TS72XX_BOOTROM_PART_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
- .virtual = TS72XX_NAND_CONTROL_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_CONTROL_PHYS_BASE),
- .length = TS72XX_NAND_CONTROL_SIZE,
- .type = MT_DEVICE,
+ .name = "Linux",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0, /* filled in later */
}, {
- .virtual = TS72XX_NAND_BUSY_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_BUSY_PHYS_BASE),
- .length = TS72XX_NAND_BUSY_SIZE,
- .type = MT_DEVICE,
- }
+ .name = "RedBoot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
};
-static void __init ts72xx_map_io(void)
+static void ts72xx_nand_set_parts(uint64_t size,
+ struct platform_nand_chip *chip)
{
- ep93xx_map_io();
- iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
+ /* Factory TS-72xx boards only come with 32MiB or 128MiB NAND options */
+ if (size == SZ_32M || size == SZ_128M) {
+ /* Set the "Linux" partition size */
+ ts72xx_nand_parts[1].size = size - TS72XX_REDBOOT_PART_SIZE;
- /*
- * The TS-7200 has NOR flash, the other models have NAND flash.
- */
- if (!board_is_ts7200()) {
- if (is_ts9420_installed()) {
- iotable_init(ts72xx_alternate_nand_io_desc,
- ARRAY_SIZE(ts72xx_alternate_nand_io_desc));
- } else {
- iotable_init(ts72xx_nand_io_desc,
- ARRAY_SIZE(ts72xx_nand_io_desc));
- }
+ chip->partitions = ts72xx_nand_parts;
+ chip->nr_partitions = ARRAY_SIZE(ts72xx_nand_parts);
+ } else {
+ pr_warning("Unknown nand disk size:%lluMiB\n", size >> 20);
}
}
+static struct platform_nand_data ts72xx_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .chip_delay = 15,
+ .part_probe_types = ts72xx_nand_part_probes,
+ .set_parts = ts72xx_nand_set_parts,
+ },
+ .ctrl = {
+ .cmd_ctrl = ts72xx_nand_hwcontrol,
+ .dev_ready = ts72xx_nand_device_ready,
+ },
+};
+
+static struct resource ts72xx_nand_resource[] = {
+ {
+ .start = 0, /* filled in later */
+ .end = 0, /* filled in later */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ts72xx_nand_flash = {
+ .name = "gen_nand",
+ .id = -1,
+ .dev.platform_data = &ts72xx_nand_data,
+ .resource = ts72xx_nand_resource,
+ .num_resources = ARRAY_SIZE(ts72xx_nand_resource),
+};
+
+
/*************************************************************************
* NOR flash (TS-7200 only)
*************************************************************************/
-static struct physmap_flash_data ts72xx_flash_data = {
+static struct physmap_flash_data ts72xx_nor_data = {
.width = 2,
};
-static struct resource ts72xx_flash_resource = {
+static struct resource ts72xx_nor_resource = {
.start = EP93XX_CS6_PHYS_BASE,
.end = EP93XX_CS6_PHYS_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
};
-static struct platform_device ts72xx_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &ts72xx_flash_data,
- },
- .num_resources = 1,
- .resource = &ts72xx_flash_resource,
+static struct platform_device ts72xx_nor_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev.platform_data = &ts72xx_nor_data,
+ .resource = &ts72xx_nor_resource,
+ .num_resources = 1,
};
static void __init ts72xx_register_flash(void)
{
- if (board_is_ts7200())
- platform_device_register(&ts72xx_flash);
+ if (board_is_ts7200()) {
+ platform_device_register(&ts72xx_nor_flash);
+ } else {
+ resource_size_t start;
+
+ if (is_ts9420_installed())
+ start = EP93XX_CS7_PHYS_BASE;
+ else
+ start = EP93XX_CS6_PHYS_BASE;
+
+ ts72xx_nand_resource[0].start = start;
+ ts72xx_nand_resource[0].end = start + SZ_16M - 1;
+
+ platform_device_register(&ts72xx_nand_flash);
+ }
}
+
static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
@@ -186,7 +260,7 @@ static struct platform_device ts72xx_wdt_device = {
.resource = ts72xx_wdt_resources,
};
-static struct ep93xx_eth_data ts72xx_eth_data = {
+static struct ep93xx_eth_data __initdata ts72xx_eth_data = {
.phy_id = 1,
};
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index df97d16390e3..27db275b367c 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -11,6 +11,7 @@ config ARCH_INTEGRATOR_AP
config ARCH_INTEGRATOR_CP
bool "Support Integrator/CP platform"
select ARCH_CINTEGRATOR
+ select ARM_TIMER_SP804
help
Include support for the ARM(R) Integrator CP platform.
diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile
index 6a5ef8d30b10..ebeef966e1f5 100644
--- a/arch/arm/mach-integrator/Makefile
+++ b/arch/arm/mach-integrator/Makefile
@@ -4,7 +4,7 @@
# Object file lists.
-obj-y := clock.o core.o lm.o
+obj-y := core.o lm.o
obj-$(CONFIG_ARCH_INTEGRATOR_AP) += integrator_ap.o
obj-$(CONFIG_ARCH_INTEGRATOR_CP) += integrator_cp.o
diff --git a/arch/arm/mach-integrator/common.h b/arch/arm/mach-integrator/common.h
deleted file mode 100644
index 609c49de3d47..000000000000
--- a/arch/arm/mach-integrator/common.h
+++ /dev/null
@@ -1,2 +0,0 @@
-extern void integrator_time_init(unsigned long, unsigned int);
-extern unsigned long integrator_gettimeoffset(void);
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 8b390e36ba69..b02cfc06e0ae 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -24,15 +24,13 @@
#include <asm/clkdev.h>
#include <mach/clkdev.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/irq.h>
-#include <asm/hardware/arm_timer.h>
#include <mach/cm.h>
#include <asm/system.h>
#include <asm/leds.h>
#include <asm/mach/time.h>
-#include "common.h"
-
static struct amba_pl010_data integrator_uart_data;
static struct amba_device rtc_device = {
@@ -163,8 +161,8 @@ arch_initcall(integrator_init);
* UART0 7 6
* UART1 5 4
*/
-#define SC_CTRLC (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLC_OFFSET)
-#define SC_CTRLS (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLS_OFFSET)
+#define SC_CTRLC IO_ADDRESS(INTEGRATOR_SC_CTRLC)
+#define SC_CTRLS IO_ADDRESS(INTEGRATOR_SC_CTRLS)
static void integrator_uart_set_mctrl(struct amba_device *dev, void __iomem *base, unsigned int mctrl)
{
@@ -196,7 +194,7 @@ static struct amba_pl010_data integrator_uart_data = {
.set_mctrl = integrator_uart_set_mctrl,
};
-#define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_BASE) + INTEGRATOR_HDR_CTRL_OFFSET
+#define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL)
static DEFINE_SPINLOCK(cm_lock);
@@ -217,120 +215,3 @@ void cm_control(u32 mask, u32 set)
}
EXPORT_SYMBOL(cm_control);
-
-/*
- * Where is the timer (VA)?
- */
-#define TIMER0_VA_BASE (IO_ADDRESS(INTEGRATOR_CT_BASE)+0x00000000)
-#define TIMER1_VA_BASE (IO_ADDRESS(INTEGRATOR_CT_BASE)+0x00000100)
-#define TIMER2_VA_BASE (IO_ADDRESS(INTEGRATOR_CT_BASE)+0x00000200)
-#define VA_IC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
-
-/*
- * How long is the timer interval?
- */
-#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
-#if TIMER_INTERVAL >= 0x100000
-#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
-#elif TIMER_INTERVAL >= 0x10000
-#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
-#else
-#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
-#endif
-
-static unsigned long timer_reload;
-
-/*
- * Returns number of ms since last clock interrupt. Note that interrupts
- * will have been disabled by do_gettimeoffset()
- */
-unsigned long integrator_gettimeoffset(void)
-{
- unsigned long ticks1, ticks2, status;
-
- /*
- * Get the current number of ticks. Note that there is a race
- * condition between us reading the timer and checking for
- * an interrupt. We get around this by ensuring that the
- * counter has not reloaded between our two reads.
- */
- ticks2 = readl(TIMER1_VA_BASE + TIMER_VALUE) & 0xffff;
- do {
- ticks1 = ticks2;
- status = __raw_readl(VA_IC_BASE + IRQ_RAW_STATUS);
- ticks2 = readl(TIMER1_VA_BASE + TIMER_VALUE) & 0xffff;
- } while (ticks2 > ticks1);
-
- /*
- * Number of ticks since last interrupt.
- */
- ticks1 = timer_reload - ticks2;
-
- /*
- * Interrupt pending? If so, we've reloaded once already.
- */
- if (status & (1 << IRQ_TIMERINT1))
- ticks1 += timer_reload;
-
- /*
- * Convert the ticks to usecs
- */
- return TICKS2USECS(ticks1);
-}
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t
-integrator_timer_interrupt(int irq, void *dev_id)
-{
- /*
- * clear the interrupt
- */
- writel(1, TIMER1_VA_BASE + TIMER_INTCLR);
-
- timer_tick();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction integrator_timer_irq = {
- .name = "Integrator Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = integrator_timer_interrupt,
-};
-
-/*
- * Set up timer interrupt, and return the current time in seconds.
- */
-void __init integrator_time_init(unsigned long reload, unsigned int ctrl)
-{
- unsigned int timer_ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
-
- timer_reload = reload;
- timer_ctrl |= ctrl;
-
- if (timer_reload > 0x100000) {
- timer_reload >>= 8;
- timer_ctrl |= TIMER_CTRL_DIV256;
- } else if (timer_reload > 0x010000) {
- timer_reload >>= 4;
- timer_ctrl |= TIMER_CTRL_DIV16;
- }
-
- /*
- * Initialise to a known state (all timers off)
- */
- writel(0, TIMER0_VA_BASE + TIMER_CTRL);
- writel(0, TIMER1_VA_BASE + TIMER_CTRL);
- writel(0, TIMER2_VA_BASE + TIMER_CTRL);
-
- writel(timer_reload, TIMER1_VA_BASE + TIMER_LOAD);
- writel(timer_reload, TIMER1_VA_BASE + TIMER_VALUE);
- writel(timer_ctrl, TIMER1_VA_BASE + TIMER_CTRL);
-
- /*
- * Make irqs happen for the system timer
- */
- setup_irq(IRQ_TIMERINT1, &integrator_timer_irq);
-}
diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c
index f77f20255045..a3fbcb3adc29 100644
--- a/arch/arm/mach-integrator/cpu.c
+++ b/arch/arm/mach-integrator/cpu.c
@@ -19,32 +19,39 @@
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/mach-types.h>
-#include <asm/hardware/icst525.h>
+#include <asm/hardware/icst.h>
static struct cpufreq_driver integrator_driver;
-#define CM_ID (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_ID_OFFSET)
-#define CM_OSC (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_OSC_OFFSET)
-#define CM_STAT (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_STAT_OFFSET)
-#define CM_LOCK (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
+#define CM_ID IO_ADDRESS(INTEGRATOR_HDR_ID)
+#define CM_OSC IO_ADDRESS(INTEGRATOR_HDR_OSC)
+#define CM_STAT IO_ADDRESS(INTEGRATOR_HDR_STAT)
+#define CM_LOCK IO_ADDRESS(INTEGRATOR_HDR_LOCK)
-static const struct icst525_params lclk_params = {
- .ref = 24000,
- .vco_max = 320000,
+static const struct icst_params lclk_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
.vd_min = 8,
.vd_max = 132,
.rd_min = 24,
.rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
};
-static const struct icst525_params cclk_params = {
- .ref = 24000,
- .vco_max = 320000,
+static const struct icst_params cclk_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
.vd_min = 12,
.vd_max = 160,
.rd_min = 24,
.rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
};
/*
@@ -52,17 +59,17 @@ static const struct icst525_params cclk_params = {
*/
static int integrator_verify_policy(struct cpufreq_policy *policy)
{
- struct icst525_vco vco;
+ struct icst_vco vco;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
- vco = icst525_khz_to_vco(&cclk_params, policy->max);
- policy->max = icst525_khz(&cclk_params, vco);
+ vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
+ policy->max = icst_hz(&cclk_params, vco) / 1000;
- vco = icst525_khz_to_vco(&cclk_params, policy->min);
- policy->min = icst525_khz(&cclk_params, vco);
+ vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
+ policy->min = icst_hz(&cclk_params, vco) / 1000;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
@@ -78,7 +85,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
{
cpumask_t cpus_allowed;
int cpu = policy->cpu;
- struct icst525_vco vco;
+ struct icst_vco vco;
struct cpufreq_freqs freqs;
u_int cm_osc;
@@ -104,17 +111,17 @@ static int integrator_set_target(struct cpufreq_policy *policy,
}
vco.v = cm_osc & 255;
vco.r = 22;
- freqs.old = icst525_khz(&cclk_params, vco);
+ freqs.old = icst_hz(&cclk_params, vco) / 1000;
- /* icst525_khz_to_vco rounds down -- so we need the next
+ /* icst_hz_to_vco rounds down -- so we need the next
* larger freq in case of CPUFREQ_RELATION_L.
*/
if (relation == CPUFREQ_RELATION_L)
target_freq += 999;
if (target_freq > policy->max)
target_freq = policy->max;
- vco = icst525_khz_to_vco(&cclk_params, target_freq);
- freqs.new = icst525_khz(&cclk_params, vco);
+ vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
+ freqs.new = icst_hz(&cclk_params, vco) / 1000;
freqs.cpu = policy->cpu;
@@ -154,7 +161,7 @@ static unsigned int integrator_get(unsigned int cpu)
cpumask_t cpus_allowed;
unsigned int current_freq;
u_int cm_osc;
- struct icst525_vco vco;
+ struct icst_vco vco;
cpus_allowed = current->cpus_allowed;
@@ -172,7 +179,7 @@ static unsigned int integrator_get(unsigned int cpu)
vco.v = cm_osc & 255;
vco.r = 22;
- current_freq = icst525_khz(&cclk_params, vco); /* current freq */
+ current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
set_cpus_allowed(current, cpus_allowed);
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 41b10725cef7..fd684bf205e5 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -25,7 +25,7 @@
#include <asm/clkdev.h>
#include <mach/clkdev.h>
-#include <asm/hardware/icst525.h>
+#include <asm/hardware/icst.h>
#include <mach/lm.h>
#include <mach/impd1.h>
#include <asm/sizes.h>
@@ -41,32 +41,25 @@ struct impd1_module {
struct clk_lookup *clks[3];
};
-static const struct icst525_params impd1_vco_params = {
- .ref = 24000, /* 24 MHz */
- .vco_max = 200000, /* 200 MHz */
+static const struct icst_params impd1_vco_params = {
+ .ref = 24000000, /* 24 MHz */
+ .vco_max = ICST525_VCO_MAX_3V,
+ .vco_min = ICST525_VCO_MIN,
.vd_min = 12,
.vd_max = 519,
.rd_min = 3,
.rd_max = 120,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
};
-static void impd1_setvco(struct clk *clk, struct icst525_vco vco)
+static void impd1_setvco(struct clk *clk, struct icst_vco vco)
{
struct impd1_module *impd1 = clk->data;
- int vconr = clk - impd1->vcos;
- u32 val;
-
- val = vco.v | (vco.r << 9) | (vco.s << 16);
+ u32 val = vco.v | (vco.r << 9) | (vco.s << 16);
writel(0xa05f, impd1->base + IMPD1_LOCK);
- switch (vconr) {
- case 0:
- writel(val, impd1->base + IMPD1_OSC1);
- break;
- case 1:
- writel(val, impd1->base + IMPD1_OSC2);
- break;
- }
+ writel(val, clk->vcoreg);
writel(0, impd1->base + IMPD1_LOCK);
#ifdef DEBUG
@@ -74,11 +67,17 @@ static void impd1_setvco(struct clk *clk, struct icst525_vco vco)
vco.r = (val >> 9) & 0x7f;
vco.s = (val >> 16) & 7;
- pr_debug("IM-PD1: VCO%d clock is %ld kHz\n",
- vconr, icst525_khz(&impd1_vco_params, vco));
+ pr_debug("IM-PD1: VCO%d clock is %ld Hz\n",
+ vconr, icst525_hz(&impd1_vco_params, vco));
#endif
}
+static const struct clk_ops impd1_clk_ops = {
+ .round = icst_clk_round,
+ .set = icst_clk_set,
+ .setvco = impd1_setvco,
+};
+
void impd1_tweak_control(struct device *dev, u32 mask, u32 val)
{
struct impd1_module *impd1 = dev_get_drvdata(dev);
@@ -374,11 +373,13 @@ static int impd1_probe(struct lm_device *dev)
(unsigned long)dev->resource.start);
for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++) {
+ impd1->vcos[i].ops = &impd1_clk_ops,
impd1->vcos[i].owner = THIS_MODULE,
impd1->vcos[i].params = &impd1_vco_params,
- impd1->vcos[i].data = impd1,
- impd1->vcos[i].setvco = impd1_setvco;
+ impd1->vcos[i].data = impd1;
}
+ impd1->vcos[0].vcoreg = impd1->base + IMPD1_OSC1;
+ impd1->vcos[1].vcoreg = impd1->base + IMPD1_OSC2;
impd1->clks[0] = clkdev_alloc(&impd1->vcos[0], NULL, "lm%x:01000",
dev->id);
diff --git a/arch/arm/mach-integrator/include/mach/clkdev.h b/arch/arm/mach-integrator/include/mach/clkdev.h
index 9293e410832a..bfe07679faec 100644
--- a/arch/arm/mach-integrator/include/mach/clkdev.h
+++ b/arch/arm/mach-integrator/include/mach/clkdev.h
@@ -2,14 +2,15 @@
#define __ASM_MACH_CLKDEV_H
#include <linux/module.h>
-#include <asm/hardware/icst525.h>
+#include <plat/clock.h>
struct clk {
unsigned long rate;
+ const struct clk_ops *ops;
struct module *owner;
- const struct icst525_params *params;
+ const struct icst_params *params;
+ void __iomem *vcoreg;
void *data;
- void (*setvco)(struct clk *, struct icst525_vco vco);
};
static inline int __clk_get(struct clk *clk)
diff --git a/arch/arm/mach-integrator/include/mach/entry-macro.S b/arch/arm/mach-integrator/include/mach/entry-macro.S
index 7649c57acb53..3d029c9f3ef6 100644
--- a/arch/arm/mach-integrator/include/mach/entry-macro.S
+++ b/arch/arm/mach-integrator/include/mach/entry-macro.S
@@ -8,6 +8,7 @@
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <mach/irqs.h>
.macro disable_fiq
diff --git a/arch/arm/mach-integrator/include/mach/hardware.h b/arch/arm/mach-integrator/include/mach/hardware.h
index d795642fad22..8e26360ce9a3 100644
--- a/arch/arm/mach-integrator/include/mach/hardware.h
+++ b/arch/arm/mach-integrator/include/mach/hardware.h
@@ -23,7 +23,6 @@
#define __ASM_ARCH_HARDWARE_H
#include <asm/sizes.h>
-#include <mach/platform.h>
/*
* Where in virtual memory the IO devices (timers, system controllers
@@ -36,17 +35,19 @@
#define PCIO_BASE PCI_IO_VADDR
#define PCIMEM_BASE PCI_MEMORY_VADDR
-#ifdef CONFIG_MMU
-/* macro to get at IO space when running virtually */
-#define IO_ADDRESS(x) (((x) >> 4) + IO_BASE)
-#else
-#define IO_ADDRESS(x) (x)
-#endif
-
#define pcibios_assign_all_busses() 1
#define PCIBIOS_MIN_IO 0x6000
#define PCIBIOS_MIN_MEM 0x00100000
+/* macro to get at IO space when running virtually */
+#ifdef CONFIG_MMU
+#define IO_ADDRESS(x) (((x) & 0x000fffff) | (((x) >> 4) & 0x0ff00000) | IO_BASE)
+#else
+#define IO_ADDRESS(x) (x)
+#endif
+
+#define __io_address(n) ((void __iomem *)IO_ADDRESS(n))
+
#endif
diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h
index e00a2624f269..5e6ea5cfea6e 100644
--- a/arch/arm/mach-integrator/include/mach/platform.h
+++ b/arch/arm/mach-integrator/include/mach/platform.h
@@ -23,9 +23,6 @@
*
* Integrator address map
*
- * NOTE: This is a multi-hosted header file for use with uHAL and
- * supported debuggers.
- *
* ***********************************************************************/
#ifndef __address_h
@@ -290,12 +287,14 @@
#define INTEGRATOR_DBG_LEDS (INTEGRATOR_DBG_BASE + INTEGRATOR_DBG_LEDS_OFFSET)
#define INTEGRATOR_DBG_SWITCH (INTEGRATOR_DBG_BASE + INTEGRATOR_DBG_SWITCH_OFFSET)
+#define INTEGRATOR_AP_GPIO_BASE 0x1B000000 /* GPIO */
-#if defined(CONFIG_ARCH_INTEGRATOR_AP)
-#define INTEGRATOR_GPIO_BASE 0x1B000000 /* GPIO */
-#elif defined(CONFIG_ARCH_INTEGRATOR_CP)
-#define INTEGRATOR_GPIO_BASE 0xC9000000 /* GPIO */
-#endif
+#define INTEGRATOR_CP_MMC_BASE 0x1C000000 /* MMC */
+#define INTEGRATOR_CP_AACI_BASE 0x1D000000 /* AACI */
+#define INTEGRATOR_CP_ETH_BASE 0xC8000000 /* Ethernet */
+#define INTEGRATOR_CP_GPIO_BASE 0xC9000000 /* GPIO */
+#define INTEGRATOR_CP_SIC_BASE 0xCA000000 /* SIC */
+#define INTEGRATOR_CP_CTL_BASE 0xCB000000 /* CP system control */
/* ------------------------------------------------------------------------
* KMI keyboard/mouse definitions
@@ -328,20 +327,6 @@
*/
#define PHYS_PCI_V3_BASE 0x62000000
-#define PCI_DRAMSIZE INTEGRATOR_SSRAM_SIZE
-
-/* 'export' these to UHAL */
-#define UHAL_PCI_IO PCI_IO_BASE
-#define UHAL_PCI_MEM PCI_MEM_BASE
-#define UHAL_PCI_ALLOC_IO_BASE 0x00004000
-#define UHAL_PCI_ALLOC_MEM_BASE PCI_MEM_BASE
-#define UHAL_PCI_MAX_SLOT 20
-
-/* ========================================================================
- * Start of uHAL definitions
- * ========================================================================
- */
-
/* ------------------------------------------------------------------------
* Integrator Interrupt Controllers
* ------------------------------------------------------------------------
@@ -389,7 +374,7 @@
*/
/* ------------------------------------------------------------------------
- * LED's - The header LED is not accessible via the uHAL API
+ * LED's
* ------------------------------------------------------------------------
*
*/
@@ -402,34 +387,18 @@
#define LED_BANK INTEGRATOR_DBG_LEDS
/*
- * Memory definitions - run uHAL out of SSRAM.
- *
- */
-#define uHAL_MEMORY_SIZE INTEGRATOR_SSRAM_SIZE
-
-/*
- * Clean base - dummy
- *
- */
-#define CLEAN_BASE INTEGRATOR_BOOT_ROM_HI
-
-/*
* Timer definitions
*
* Only use timer 1 & 2
* (both run at 24MHz and will need the clock divider set to 16).
*
- * Timer 0 runs at bus frequency and therefore could vary and currently
- * uHAL can't handle that.
- *
+ * Timer 0 runs at bus frequency
*/
#define INTEGRATOR_TIMER0_BASE INTEGRATOR_CT_BASE
#define INTEGRATOR_TIMER1_BASE (INTEGRATOR_CT_BASE + 0x100)
#define INTEGRATOR_TIMER2_BASE (INTEGRATOR_CT_BASE + 0x200)
-#define MAX_TIMER 2
-#define MAX_PERIOD 699050
#define TICKS_PER_uSEC 24
/*
@@ -437,14 +406,9 @@
*
*/
#define mSEC_1 1000
-#define mSEC_5 (mSEC_1 * 5)
#define mSEC_10 (mSEC_1 * 10)
-#define mSEC_25 (mSEC_1 * 25)
-#define SEC_1 (mSEC_1 * 1000)
#define INTEGRATOR_CSR_BASE 0x10000000
#define INTEGRATOR_CSR_SIZE 0x10000000
#endif
-
-/* END */
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 8138a7e24562..227cf4d05088 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -27,9 +27,14 @@
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/kmi.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
+#include <asm/hardware/arm_timer.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/param.h> /* HZ */
@@ -43,8 +48,6 @@
#include <asm/mach/map.h>
#include <asm/mach/time.h>
-#include "common.h"
-
/*
* All IO addresses are mapped onto VA 0xFFFx.xxxx, where x.xxxx
* is the (PA >> 12).
@@ -55,7 +58,7 @@
#define VA_IC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
#define VA_SC_BASE IO_ADDRESS(INTEGRATOR_SC_BASE)
#define VA_EBI_BASE IO_ADDRESS(INTEGRATOR_EBI_BASE)
-#define VA_CMIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE) + INTEGRATOR_HDR_IC_OFFSET
+#define VA_CMIC_BASE IO_ADDRESS(INTEGRATOR_HDR_IC)
/*
* Logical Physical
@@ -117,8 +120,8 @@ static struct map_desc ap_io_desc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(INTEGRATOR_GPIO_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_GPIO_BASE),
+ .virtual = IO_ADDRESS(INTEGRATOR_AP_GPIO_BASE),
+ .pfn = __phys_to_pfn(INTEGRATOR_AP_GPIO_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
@@ -334,14 +337,163 @@ static void __init ap_init(void)
}
}
+/*
+ * Where is the timer (VA)?
+ */
+#define TIMER0_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER0_BASE)
+#define TIMER1_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER1_BASE)
+#define TIMER2_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER2_BASE)
+
+/*
+ * How long is the timer interval?
+ */
+#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
+#if TIMER_INTERVAL >= 0x100000
+#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
+#elif TIMER_INTERVAL >= 0x10000
+#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
+#else
+#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
+#endif
+
+static unsigned long timer_reload;
+
+static void __iomem * const clksrc_base = (void __iomem *)TIMER2_VA_BASE;
+
+static cycle_t timersp_read(struct clocksource *cs)
+{
+ return ~(readl(clksrc_base + TIMER_VALUE) & 0xffff);
+}
+
+static struct clocksource clocksource_timersp = {
+ .name = "timer2",
+ .rating = 200,
+ .read = timersp_read,
+ .mask = CLOCKSOURCE_MASK(16),
+ .shift = 16,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void integrator_clocksource_init(u32 khz)
+{
+ struct clocksource *cs = &clocksource_timersp;
+ void __iomem *base = clksrc_base;
+ u32 ctrl = TIMER_CTRL_ENABLE;
+
+ if (khz >= 1500) {
+ khz /= 16;
+ ctrl = TIMER_CTRL_DIV16;
+ }
+
+ writel(ctrl, base + TIMER_CTRL);
+ writel(0xffff, base + TIMER_LOAD);
+
+ cs->mult = clocksource_khz2mult(khz, cs->shift);
+ clocksource_register(cs);
+}
+
+static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE;
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t integrator_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ /* clear the interrupt */
+ writel(1, clkevt_base + TIMER_INTCLR);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void clkevt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt)
+{
+ u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
+
+ BUG_ON(mode == CLOCK_EVT_MODE_ONESHOT);
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC) {
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+ writel(timer_reload, clkevt_base + TIMER_LOAD);
+ ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ }
+
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+}
+
+static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt)
+{
+ unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
+
+ writel(ctrl & ~TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+ writel(next, clkevt_base + TIMER_LOAD);
+ writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+
+ return 0;
+}
+
+static struct clock_event_device integrator_clockevent = {
+ .name = "timer1",
+ .shift = 34,
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_mode = clkevt_set_mode,
+ .set_next_event = clkevt_set_next_event,
+ .rating = 300,
+ .cpumask = cpu_all_mask,
+};
+
+static struct irqaction integrator_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = integrator_timer_interrupt,
+ .dev_id = &integrator_clockevent,
+};
+
+static void integrator_clockevent_init(u32 khz)
+{
+ struct clock_event_device *evt = &integrator_clockevent;
+ unsigned int ctrl = 0;
+
+ if (khz * 1000 > 0x100000 * HZ) {
+ khz /= 256;
+ ctrl |= TIMER_CTRL_DIV256;
+ } else if (khz * 1000 > 0x10000 * HZ) {
+ khz /= 16;
+ ctrl |= TIMER_CTRL_DIV16;
+ }
+
+ timer_reload = khz * 1000 / HZ;
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+
+ evt->irq = IRQ_TIMERINT1;
+ evt->mult = div_sc(khz, NSEC_PER_MSEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(0xffff, evt);
+ evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
+
+ setup_irq(IRQ_TIMERINT1, &integrator_timer_irq);
+ clockevents_register_device(evt);
+}
+
+/*
+ * Set up timer(s).
+ */
static void __init ap_init_timer(void)
{
- integrator_time_init(1000000 * TICKS_PER_uSEC / HZ, 0);
+ u32 khz = TICKS_PER_uSEC * 1000;
+
+ writel(0, TIMER0_VA_BASE + TIMER_CTRL);
+ writel(0, TIMER1_VA_BASE + TIMER_CTRL);
+ writel(0, TIMER2_VA_BASE + TIMER_CTRL);
+
+ integrator_clocksource_init(khz);
+ integrator_clockevent_init(khz);
}
static struct sys_timer ap_timer = {
.init = ap_init_timer,
- .offset = integrator_gettimeoffset,
};
MACHINE_START(INTEGRATOR, "ARM-Integrator")
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 15e6cc5a352f..cde57b2b83b5 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -25,10 +25,12 @@
#include <asm/clkdev.h>
#include <mach/clkdev.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
-#include <asm/hardware/icst525.h>
+#include <asm/hardware/arm_timer.h>
+#include <asm/hardware/icst.h>
#include <mach/cm.h>
#include <mach/lm.h>
@@ -39,24 +41,20 @@
#include <asm/mach/map.h>
#include <asm/mach/time.h>
-#include "common.h"
-
-#define INTCP_PA_MMC_BASE 0x1c000000
-#define INTCP_PA_AACI_BASE 0x1d000000
+#include <plat/timer-sp.h>
#define INTCP_PA_FLASH_BASE 0x24000000
#define INTCP_FLASH_SIZE SZ_32M
#define INTCP_PA_CLCD_BASE 0xc0000000
-#define INTCP_VA_CIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE) + 0x40
+#define INTCP_VA_CIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE + 0x40)
#define INTCP_VA_PIC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
-#define INTCP_VA_SIC_BASE IO_ADDRESS(0xca000000)
+#define INTCP_VA_SIC_BASE IO_ADDRESS(INTEGRATOR_CP_SIC_BASE)
-#define INTCP_PA_ETH_BASE 0xc8000000
#define INTCP_ETH_SIZE 0x10
-#define INTCP_VA_CTRL_BASE IO_ADDRESS(0xcb000000)
+#define INTCP_VA_CTRL_BASE IO_ADDRESS(INTEGRATOR_CP_CTL_BASE)
#define INTCP_FLASHPROG 0x04
#define CINTEGRATOR_FLASHPROG_FLVPPEN (1 << 0)
#define CINTEGRATOR_FLASHPROG_FLWREN (1 << 1)
@@ -71,7 +69,9 @@
* f1600000 16000000 UART 0
* f1700000 17000000 UART 1
* f1a00000 1a000000 Debug LEDs
- * f1b00000 1b000000 GPIO
+ * fc900000 c9000000 GPIO
+ * fca00000 ca000000 SIC
+ * fcb00000 cb000000 CP system control
*/
static struct map_desc intcp_io_desc[] __initdata = {
@@ -116,18 +116,18 @@ static struct map_desc intcp_io_desc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(INTEGRATOR_GPIO_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_GPIO_BASE),
+ .virtual = IO_ADDRESS(INTEGRATOR_CP_GPIO_BASE),
+ .pfn = __phys_to_pfn(INTEGRATOR_CP_GPIO_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(0xca000000),
- .pfn = __phys_to_pfn(0xca000000),
+ .virtual = IO_ADDRESS(INTEGRATOR_CP_SIC_BASE),
+ .pfn = __phys_to_pfn(INTEGRATOR_CP_SIC_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(0xcb000000),
- .pfn = __phys_to_pfn(0xcb000000),
+ .virtual = IO_ADDRESS(INTEGRATOR_CP_CTL_BASE),
+ .pfn = __phys_to_pfn(INTEGRATOR_CP_CTL_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}
@@ -266,33 +266,43 @@ static void __init intcp_init_irq(void)
/*
* Clock handling
*/
-#define CM_LOCK (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
-#define CM_AUXOSC (IO_ADDRESS(INTEGRATOR_HDR_BASE)+0x1c)
+#define CM_LOCK (__io_address(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
+#define CM_AUXOSC (__io_address(INTEGRATOR_HDR_BASE)+0x1c)
-static const struct icst525_params cp_auxvco_params = {
- .ref = 24000,
- .vco_max = 320000,
+static const struct icst_params cp_auxvco_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
.vd_min = 8,
.vd_max = 263,
.rd_min = 3,
.rd_max = 65,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
};
-static void cp_auxvco_set(struct clk *clk, struct icst525_vco vco)
+static void cp_auxvco_set(struct clk *clk, struct icst_vco vco)
{
u32 val;
- val = readl(CM_AUXOSC) & ~0x7ffff;
+ val = readl(clk->vcoreg) & ~0x7ffff;
val |= vco.v | (vco.r << 9) | (vco.s << 16);
writel(0xa05f, CM_LOCK);
- writel(val, CM_AUXOSC);
+ writel(val, clk->vcoreg);
writel(0, CM_LOCK);
}
+static const struct clk_ops cp_auxclk_ops = {
+ .round = icst_clk_round,
+ .set = icst_clk_set,
+ .setvco = cp_auxvco_set,
+};
+
static struct clk cp_auxclk = {
+ .ops = &cp_auxclk_ops,
.params = &cp_auxvco_params,
- .setvco = cp_auxvco_set,
+ .vcoreg = CM_AUXOSC,
};
static struct clk_lookup cp_lookups[] = {
@@ -363,8 +373,8 @@ static struct platform_device intcp_flash_device = {
static struct resource smc91x_resources[] = {
[0] = {
- .start = INTCP_PA_ETH_BASE,
- .end = INTCP_PA_ETH_BASE + INTCP_ETH_SIZE - 1,
+ .start = INTEGRATOR_CP_ETH_BASE,
+ .end = INTEGRATOR_CP_ETH_BASE + INTCP_ETH_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -394,8 +404,8 @@ static struct platform_device *intcp_devs[] __initdata = {
*/
static unsigned int mmc_status(struct device *dev)
{
- unsigned int status = readl(IO_ADDRESS(0xca000000) + 4);
- writel(8, IO_ADDRESS(0xcb000000) + 8);
+ unsigned int status = readl(IO_ADDRESS(0xca000000 + 4));
+ writel(8, IO_ADDRESS(INTEGRATOR_CP_CTL_BASE + 8));
return status & 8;
}
@@ -413,8 +423,8 @@ static struct amba_device mmc_device = {
.platform_data = &mmc_data,
},
.res = {
- .start = INTCP_PA_MMC_BASE,
- .end = INTCP_PA_MMC_BASE + SZ_4K - 1,
+ .start = INTEGRATOR_CP_MMC_BASE,
+ .end = INTEGRATOR_CP_MMC_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_CP_MMCIINT0, IRQ_CP_MMCIINT1 },
@@ -426,8 +436,8 @@ static struct amba_device aaci_device = {
.init_name = "mb:1d",
},
.res = {
- .start = INTCP_PA_AACI_BASE,
- .end = INTCP_PA_AACI_BASE + SZ_4K - 1,
+ .start = INTEGRATOR_CP_AACI_BASE,
+ .end = INTEGRATOR_CP_AACI_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_CP_AACIINT, NO_IRQ },
@@ -567,16 +577,22 @@ static void __init intcp_init(void)
}
}
-#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable */
+#define TIMER0_VA_BASE __io_address(INTEGRATOR_TIMER0_BASE)
+#define TIMER1_VA_BASE __io_address(INTEGRATOR_TIMER1_BASE)
+#define TIMER2_VA_BASE __io_address(INTEGRATOR_TIMER2_BASE)
static void __init intcp_timer_init(void)
{
- integrator_time_init(1000000 / HZ, TIMER_CTRL_IE);
+ writel(0, TIMER0_VA_BASE + TIMER_CTRL);
+ writel(0, TIMER1_VA_BASE + TIMER_CTRL);
+ writel(0, TIMER2_VA_BASE + TIMER_CTRL);
+
+ sp804_clocksource_init(TIMER2_VA_BASE);
+ sp804_clockevents_init(TIMER1_VA_BASE, IRQ_TIMERINT1);
}
static struct sys_timer cp_timer = {
.init = intcp_timer_init,
- .offset = integrator_gettimeoffset,
};
MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
diff --git a/arch/arm/mach-integrator/leds.c b/arch/arm/mach-integrator/leds.c
index 8dcc823f4135..28be186adb89 100644
--- a/arch/arm/mach-integrator/leds.c
+++ b/arch/arm/mach-integrator/leds.c
@@ -27,6 +27,7 @@
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/leds.h>
#include <asm/system.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index ffbd349363af..9cef0590d5aa 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/irq.h>
#include <asm/signal.h>
#include <asm/system.h>
@@ -389,9 +390,9 @@ static int __init pci_v3_setup_resources(struct resource **resource)
* means I can't get additional information on the reason for the pm2fb
* problems. I suppose I'll just have to mind-meld with the machine. ;)
*/
-#define SC_PCI (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_PCIENABLE_OFFSET)
-#define SC_LBFADDR (IO_ADDRESS(INTEGRATOR_SC_BASE) + 0x20)
-#define SC_LBFCODE (IO_ADDRESS(INTEGRATOR_SC_BASE) + 0x24)
+#define SC_PCI IO_ADDRESS(INTEGRATOR_SC_PCIENABLE)
+#define SC_LBFADDR IO_ADDRESS(INTEGRATOR_SC_BASE + 0x20)
+#define SC_LBFCODE IO_ADDRESS(INTEGRATOR_SC_BASE + 0x24)
static int
v3_pci_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 5d99039286eb..f108a31afc2b 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -176,7 +176,7 @@ static struct plat_serial8250_port n2100_serial_port[] = {
.mapbase = N2100_UART,
.membase = (char *)N2100_UART,
.irq = 0,
- .flags = UPF_SKIP_TEST,
+ .flags = UPF_SKIP_TEST | UPF_AUTO_IRQ | UPF_SHARE_IRQ,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = 1843200,
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 71728d36d501..0bce09799d18 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -21,7 +21,6 @@
#include <linux/tty.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
-#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/time.h>
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index f780086befd7..a25193ad21f2 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -1,5 +1,22 @@
if ARCH_MSM
+choice
+ prompt "Qualcomm MSM SoC Type"
+ default ARCH_MSM7X00A
+
+config ARCH_MSM7X00A
+ bool "MSM7x00A / MSM7x01A"
+ select ARCH_MSM_ARM11
+ select MSM_SMD
+ select MSM_SMD_PKG3
+ select CPU_V6
+
+endchoice
+
+config ARCH_MSM_ARM11
+ bool
+
+
comment "MSM Board Type"
depends on ARCH_MSM
@@ -28,16 +45,25 @@ choice
endchoice
config MACH_HALIBUT
- depends on ARCH_MSM
+ depends on ARCH_MSM7X00A
+ select CPU_V6
default y
bool "Halibut Board (QCT SURF7201A)"
help
Support for the Qualcomm SURF7201A eval board.
config MACH_TROUT
+ depends on ARCH_MSM7X00A
+ select CPU_V6
default y
bool "HTC Dream (aka trout)"
help
Support for the HTC Dream, T-Mobile G1, Android ADP1 devices.
+config MSM_SMD_PKG3
+ bool
+
+config MSM_SMD
+ bool
+
endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 91e6f5c95dc1..147339c87277 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -2,8 +2,11 @@ obj-y += io.o idle.o irq.o timer.o dma.o
obj-y += devices.o
obj-y += proc_comm.o
obj-y += vreg.o
+obj-y += acpuclock-arm11.o
obj-y += clock.o clock-7x01a.o
-obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o
+obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
+obj-$(CONFIG_MSM_SMD) += last_radio_log.o
-obj-$(CONFIG_MACH_TROUT) += board-dream.o
+obj-$(CONFIG_MACH_TROUT) += board-trout.o
+obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o
diff --git a/arch/arm/mach-msm/acpuclock-arm11.c b/arch/arm/mach-msm/acpuclock-arm11.c
new file mode 100644
index 000000000000..af5e85b91d02
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-arm11.c
@@ -0,0 +1,526 @@
+/* arch/arm/mach-msm/acpuclock.c
+ *
+ * MSM architecture clock driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007 QUALCOMM Incorporated
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+
+#include "proc_comm.h"
+#include "acpuclock.h"
+
+
+#define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
+#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
+#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
+
+/*
+ * ARM11 clock configuration for specific ACPU speeds
+ */
+
+#define ACPU_PLL_TCXO -1
+#define ACPU_PLL_0 0
+#define ACPU_PLL_1 1
+#define ACPU_PLL_2 2
+#define ACPU_PLL_3 3
+
+#define PERF_SWITCH_DEBUG 0
+#define PERF_SWITCH_STEP_DEBUG 0
+
+struct clock_state
+{
+ struct clkctl_acpu_speed *current_speed;
+ struct mutex lock;
+ uint32_t acpu_switch_time_us;
+ uint32_t max_speed_delta_khz;
+ uint32_t vdd_switch_time_us;
+ unsigned long power_collapse_khz;
+ unsigned long wait_for_irq_khz;
+};
+
+static struct clk *ebi1_clk;
+static struct clock_state drv_state = { 0 };
+
+static void __init acpuclk_init(void);
+
+/* MSM7201A Levels 3-6 all correspond to 1.2V, level 7 corresponds to 1.325V. */
+enum {
+ VDD_0 = 0,
+ VDD_1 = 1,
+ VDD_2 = 2,
+ VDD_3 = 3,
+ VDD_4 = 3,
+ VDD_5 = 3,
+ VDD_6 = 3,
+ VDD_7 = 7,
+ VDD_END
+};
+
+struct clkctl_acpu_speed {
+ unsigned int a11clk_khz;
+ int pll;
+ unsigned int a11clk_src_sel;
+ unsigned int a11clk_src_div;
+ unsigned int ahbclk_khz;
+ unsigned int ahbclk_div;
+ int vdd;
+ unsigned int axiclk_khz;
+ unsigned long lpj; /* loops_per_jiffy */
+/* Index in acpu_freq_tbl[] for steppings. */
+ short down;
+ short up;
+};
+
+/*
+ * ACPU speed table. Complete table is shown but certain speeds are commented
+ * out to optimized speed switching. Initalize loops_per_jiffy to 0.
+ *
+ * Table stepping up/down is optimized for 256mhz jumps while staying on the
+ * same PLL.
+ */
+#if (0)
+static struct clkctl_acpu_speed acpu_freq_tbl[] = {
+ { 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, VDD_0, 30720, 0, 0, 8 },
+ { 61440, ACPU_PLL_0, 4, 3, 61440, 0, VDD_0, 30720, 0, 0, 8 },
+ { 81920, ACPU_PLL_0, 4, 2, 40960, 1, VDD_0, 61440, 0, 0, 8 },
+ { 96000, ACPU_PLL_1, 1, 7, 48000, 1, VDD_0, 61440, 0, 0, 9 },
+ { 122880, ACPU_PLL_0, 4, 1, 61440, 1, VDD_3, 61440, 0, 0, 8 },
+ { 128000, ACPU_PLL_1, 1, 5, 64000, 1, VDD_3, 61440, 0, 0, 12 },
+ { 176000, ACPU_PLL_2, 2, 5, 88000, 1, VDD_3, 61440, 0, 0, 11 },
+ { 192000, ACPU_PLL_1, 1, 3, 64000, 2, VDD_3, 61440, 0, 0, 12 },
+ { 245760, ACPU_PLL_0, 4, 0, 81920, 2, VDD_4, 61440, 0, 0, 12 },
+ { 256000, ACPU_PLL_1, 1, 2, 128000, 2, VDD_5, 128000, 0, 0, 12 },
+ { 264000, ACPU_PLL_2, 2, 3, 88000, 2, VDD_5, 128000, 0, 6, 13 },
+ { 352000, ACPU_PLL_2, 2, 2, 88000, 3, VDD_5, 128000, 0, 6, 13 },
+ { 384000, ACPU_PLL_1, 1, 1, 128000, 2, VDD_6, 128000, 0, 5, -1 },
+ { 528000, ACPU_PLL_2, 2, 1, 132000, 3, VDD_7, 128000, 0, 11, -1 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+};
+#else /* Table of freq we currently use. */
+static struct clkctl_acpu_speed acpu_freq_tbl[] = {
+ { 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, VDD_0, 30720, 0, 0, 4 },
+ { 122880, ACPU_PLL_0, 4, 1, 61440, 1, VDD_3, 61440, 0, 0, 4 },
+ { 128000, ACPU_PLL_1, 1, 5, 64000, 1, VDD_3, 61440, 0, 0, 6 },
+ { 176000, ACPU_PLL_2, 2, 5, 88000, 1, VDD_3, 61440, 0, 0, 5 },
+ { 245760, ACPU_PLL_0, 4, 0, 81920, 2, VDD_4, 61440, 0, 0, 5 },
+ { 352000, ACPU_PLL_2, 2, 2, 88000, 3, VDD_5, 128000, 0, 3, 7 },
+ { 384000, ACPU_PLL_1, 1, 1, 128000, 2, VDD_6, 128000, 0, 2, -1 },
+ { 528000, ACPU_PLL_2, 2, 1, 132000, 3, VDD_7, 128000, 0, 5, -1 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+};
+#endif
+
+
+#ifdef CONFIG_CPU_FREQ_TABLE
+static struct cpufreq_frequency_table freq_table[] = {
+ { 0, 122880 },
+ { 1, 128000 },
+ { 2, 245760 },
+ { 3, 384000 },
+ { 4, 528000 },
+ { 5, CPUFREQ_TABLE_END },
+};
+#endif
+
+static int pc_pll_request(unsigned id, unsigned on)
+{
+ int res;
+ on = !!on;
+
+#if PERF_SWITCH_DEBUG
+ if (on)
+ printk(KERN_DEBUG "Enabling PLL %d\n", id);
+ else
+ printk(KERN_DEBUG "Disabling PLL %d\n", id);
+#endif
+
+ res = msm_proc_comm(PCOM_CLKCTL_RPC_PLL_REQUEST, &id, &on);
+ if (res < 0)
+ return res;
+
+#if PERF_SWITCH_DEBUG
+ if (on)
+ printk(KERN_DEBUG "PLL %d enabled\n", id);
+ else
+ printk(KERN_DEBUG "PLL %d disabled\n", id);
+#endif
+ return res;
+}
+
+
+/*----------------------------------------------------------------------------
+ * ARM11 'owned' clock control
+ *---------------------------------------------------------------------------*/
+
+unsigned long acpuclk_power_collapse(void) {
+ int ret = acpuclk_get_rate();
+ ret *= 1000;
+ if (ret > drv_state.power_collapse_khz)
+ acpuclk_set_rate(drv_state.power_collapse_khz, 1);
+ return ret;
+}
+
+unsigned long acpuclk_get_wfi_rate(void)
+{
+ return drv_state.wait_for_irq_khz;
+}
+
+unsigned long acpuclk_wait_for_irq(void) {
+ int ret = acpuclk_get_rate();
+ ret *= 1000;
+ if (ret > drv_state.wait_for_irq_khz)
+ acpuclk_set_rate(drv_state.wait_for_irq_khz, 1);
+ return ret;
+}
+
+static int acpuclk_set_vdd_level(int vdd)
+{
+ uint32_t current_vdd;
+
+ current_vdd = readl(A11S_VDD_SVS_PLEVEL_ADDR) & 0x07;
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "acpuclock: Switching VDD from %u -> %d\n",
+ current_vdd, vdd);
+#endif
+ writel((1 << 7) | (vdd << 3), A11S_VDD_SVS_PLEVEL_ADDR);
+ udelay(drv_state.vdd_switch_time_us);
+ if ((readl(A11S_VDD_SVS_PLEVEL_ADDR) & 0x7) != vdd) {
+#if PERF_SWITCH_DEBUG
+ printk(KERN_ERR "acpuclock: VDD set failed\n");
+#endif
+ return -EIO;
+ }
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "acpuclock: VDD switched\n");
+#endif
+ return 0;
+}
+
+/* Set proper dividers for the given clock speed. */
+static void acpuclk_set_div(const struct clkctl_acpu_speed *hunt_s) {
+ uint32_t reg_clkctl, reg_clksel, clk_div;
+
+ /* AHB_CLK_DIV */
+ clk_div = (readl(A11S_CLK_SEL_ADDR) >> 1) & 0x03;
+ /*
+ * If the new clock divider is higher than the previous, then
+ * program the divider before switching the clock
+ */
+ if (hunt_s->ahbclk_div > clk_div) {
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+ reg_clksel &= ~(0x3 << 1);
+ reg_clksel |= (hunt_s->ahbclk_div << 1);
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+ if ((readl(A11S_CLK_SEL_ADDR) & 0x01) == 0) {
+ /* SRC0 */
+
+ /* Program clock source */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~(0x07 << 4);
+ reg_clkctl |= (hunt_s->a11clk_src_sel << 4);
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock divider */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~0xf;
+ reg_clkctl |= hunt_s->a11clk_src_div;
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock source selection */
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+ reg_clksel |= 1; /* CLK_SEL_SRC1NO == SRC1 */
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ } else {
+ /* SRC1 */
+
+ /* Program clock source */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~(0x07 << 12);
+ reg_clkctl |= (hunt_s->a11clk_src_sel << 12);
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock divider */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~(0xf << 8);
+ reg_clkctl |= (hunt_s->a11clk_src_div << 8);
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock source selection */
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+ reg_clksel &= ~1; /* CLK_SEL_SRC1NO == SRC0 */
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+
+ /*
+ * If the new clock divider is lower than the previous, then
+ * program the divider after switching the clock
+ */
+ if (hunt_s->ahbclk_div < clk_div) {
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+ reg_clksel &= ~(0x3 << 1);
+ reg_clksel |= (hunt_s->ahbclk_div << 1);
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+}
+
+int acpuclk_set_rate(unsigned long rate, int for_power_collapse)
+{
+ uint32_t reg_clkctl;
+ struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s;
+ int rc = 0;
+ unsigned int plls_enabled = 0, pll;
+
+ strt_s = cur_s = drv_state.current_speed;
+
+ WARN_ONCE(cur_s == NULL, "acpuclk_set_rate: not initialized\n");
+ if (cur_s == NULL)
+ return -ENOENT;
+
+ if (rate == (cur_s->a11clk_khz * 1000))
+ return 0;
+
+ for (tgt_s = acpu_freq_tbl; tgt_s->a11clk_khz != 0; tgt_s++) {
+ if (tgt_s->a11clk_khz == (rate / 1000))
+ break;
+ }
+
+ if (tgt_s->a11clk_khz == 0)
+ return -EINVAL;
+
+ /* Choose the highest speed speed at or below 'rate' with same PLL. */
+ if (for_power_collapse && tgt_s->a11clk_khz < cur_s->a11clk_khz) {
+ while (tgt_s->pll != ACPU_PLL_TCXO && tgt_s->pll != cur_s->pll)
+ tgt_s--;
+ }
+
+ if (strt_s->pll != ACPU_PLL_TCXO)
+ plls_enabled |= 1 << strt_s->pll;
+
+ if (!for_power_collapse) {
+ mutex_lock(&drv_state.lock);
+ if (strt_s->pll != tgt_s->pll && tgt_s->pll != ACPU_PLL_TCXO) {
+ rc = pc_pll_request(tgt_s->pll, 1);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ tgt_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << tgt_s->pll;
+ }
+ /* Increase VDD if needed. */
+ if (tgt_s->vdd > cur_s->vdd) {
+ if ((rc = acpuclk_set_vdd_level(tgt_s->vdd)) < 0) {
+ printk(KERN_ERR "Unable to switch ACPU vdd\n");
+ goto out;
+ }
+ }
+ }
+
+ /* Set wait states for CPU inbetween frequency changes */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl |= (100 << 16); /* set WT_ST_CNT */
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_INFO "acpuclock: Switching from ACPU rate %u -> %u\n",
+ strt_s->a11clk_khz * 1000, tgt_s->a11clk_khz * 1000);
+#endif
+
+ while (cur_s != tgt_s) {
+ /*
+ * Always jump to target freq if within 256mhz, regulardless of
+ * PLL. If differnece is greater, use the predefinied
+ * steppings in the table.
+ */
+ int d = abs((int)(cur_s->a11clk_khz - tgt_s->a11clk_khz));
+ if (d > drv_state.max_speed_delta_khz) {
+ /* Step up or down depending on target vs current. */
+ int clk_index = tgt_s->a11clk_khz > cur_s->a11clk_khz ?
+ cur_s->up : cur_s->down;
+ if (clk_index < 0) { /* This should not happen. */
+ printk(KERN_ERR "cur:%u target: %u\n",
+ cur_s->a11clk_khz, tgt_s->a11clk_khz);
+ rc = -EINVAL;
+ goto out;
+ }
+ cur_s = &acpu_freq_tbl[clk_index];
+ } else {
+ cur_s = tgt_s;
+ }
+#if PERF_SWITCH_STEP_DEBUG
+ printk(KERN_DEBUG "%s: STEP khz = %u, pll = %d\n",
+ __FUNCTION__, cur_s->a11clk_khz, cur_s->pll);
+#endif
+ if (!for_power_collapse&& cur_s->pll != ACPU_PLL_TCXO
+ && !(plls_enabled & (1 << cur_s->pll))) {
+ rc = pc_pll_request(cur_s->pll, 1);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ cur_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << cur_s->pll;
+ }
+
+ acpuclk_set_div(cur_s);
+ drv_state.current_speed = cur_s;
+ /* Re-adjust lpj for the new clock speed. */
+ loops_per_jiffy = cur_s->lpj;
+ udelay(drv_state.acpu_switch_time_us);
+ }
+
+ /* Nothing else to do for power collapse. */
+ if (for_power_collapse)
+ return 0;
+
+ /* Disable PLLs we are not using anymore. */
+ plls_enabled &= ~(1 << tgt_s->pll);
+ for (pll = ACPU_PLL_0; pll <= ACPU_PLL_2; pll++)
+ if (plls_enabled & (1 << pll)) {
+ rc = pc_pll_request(pll, 0);
+ if (rc < 0) {
+ pr_err("PLL%d disable failed (%d)\n", pll, rc);
+ goto out;
+ }
+ }
+
+ /* Change the AXI bus frequency if we can. */
+ if (strt_s->axiclk_khz != tgt_s->axiclk_khz) {
+ rc = clk_set_rate(ebi1_clk, tgt_s->axiclk_khz * 1000);
+ if (rc < 0)
+ pr_err("Setting AXI min rate failed!\n");
+ }
+
+ /* Drop VDD level if we can. */
+ if (tgt_s->vdd < strt_s->vdd) {
+ if (acpuclk_set_vdd_level(tgt_s->vdd) < 0)
+ printk(KERN_ERR "acpuclock: Unable to drop ACPU vdd\n");
+ }
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "%s: ACPU speed change complete\n", __FUNCTION__);
+#endif
+out:
+ if (!for_power_collapse)
+ mutex_unlock(&drv_state.lock);
+ return rc;
+}
+
+static void __init acpuclk_init(void)
+{
+ struct clkctl_acpu_speed *speed;
+ uint32_t div, sel;
+ int rc;
+
+ /*
+ * Determine the rate of ACPU clock
+ */
+
+ if (!(readl(A11S_CLK_SEL_ADDR) & 0x01)) { /* CLK_SEL_SRC1N0 */
+ /* CLK_SRC0_SEL */
+ sel = (readl(A11S_CLK_CNTL_ADDR) >> 12) & 0x7;
+ /* CLK_SRC0_DIV */
+ div = (readl(A11S_CLK_CNTL_ADDR) >> 8) & 0x0f;
+ } else {
+ /* CLK_SRC1_SEL */
+ sel = (readl(A11S_CLK_CNTL_ADDR) >> 4) & 0x07;
+ /* CLK_SRC1_DIV */
+ div = readl(A11S_CLK_CNTL_ADDR) & 0x0f;
+ }
+
+ for (speed = acpu_freq_tbl; speed->a11clk_khz != 0; speed++) {
+ if (speed->a11clk_src_sel == sel
+ && (speed->a11clk_src_div == div))
+ break;
+ }
+ if (speed->a11clk_khz == 0) {
+ printk(KERN_WARNING "Warning - ACPU clock reports invalid speed\n");
+ return;
+ }
+
+ drv_state.current_speed = speed;
+
+ rc = clk_set_rate(ebi1_clk, speed->axiclk_khz * 1000);
+ if (rc < 0)
+ pr_err("Setting AXI min rate failed!\n");
+
+ printk(KERN_INFO "ACPU running at %d KHz\n", speed->a11clk_khz);
+}
+
+unsigned long acpuclk_get_rate(void)
+{
+ WARN_ONCE(drv_state.current_speed == NULL,
+ "acpuclk_get_rate: not initialized\n");
+ if (drv_state.current_speed)
+ return drv_state.current_speed->a11clk_khz;
+ else
+ return 0;
+}
+
+uint32_t acpuclk_get_switch_time(void)
+{
+ return drv_state.acpu_switch_time_us;
+}
+
+/*----------------------------------------------------------------------------
+ * Clock driver initialization
+ *---------------------------------------------------------------------------*/
+
+/* Initalize the lpj field in the acpu_freq_tbl. */
+static void __init lpj_init(void)
+{
+ int i;
+ const struct clkctl_acpu_speed *base_clk = drv_state.current_speed;
+ for (i = 0; acpu_freq_tbl[i].a11clk_khz; i++) {
+ acpu_freq_tbl[i].lpj = cpufreq_scale(loops_per_jiffy,
+ base_clk->a11clk_khz,
+ acpu_freq_tbl[i].a11clk_khz);
+ }
+}
+
+void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata)
+{
+ pr_info("acpu_clock_init()\n");
+
+ ebi1_clk = clk_get(NULL, "ebi1_clk");
+
+ mutex_init(&drv_state.lock);
+ drv_state.acpu_switch_time_us = clkdata->acpu_switch_time_us;
+ drv_state.max_speed_delta_khz = clkdata->max_speed_delta_khz;
+ drv_state.vdd_switch_time_us = clkdata->vdd_switch_time_us;
+ drv_state.power_collapse_khz = clkdata->power_collapse_khz;
+ drv_state.wait_for_irq_khz = clkdata->wait_for_irq_khz;
+ acpuclk_init();
+ lpj_init();
+#ifdef CONFIG_CPU_FREQ_TABLE
+ cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
+#endif
+}
diff --git a/arch/arm/mach-msm/acpuclock.h b/arch/arm/mach-msm/acpuclock.h
new file mode 100644
index 000000000000..415de2eb9a5e
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock.h
@@ -0,0 +1,32 @@
+/* arch/arm/mach-msm/acpuclock.h
+ *
+ * MSM architecture clock driver header
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007 QUALCOMM Incorporated
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_ACPUCLOCK_H
+#define __ARCH_ARM_MACH_MSM_ACPUCLOCK_H
+
+int acpuclk_set_rate(unsigned long rate, int for_power_collapse);
+unsigned long acpuclk_get_rate(void);
+uint32_t acpuclk_get_switch_time(void);
+unsigned long acpuclk_wait_for_irq(void);
+unsigned long acpuclk_power_collapse(void);
+unsigned long acpuclk_get_wfi_rate(void);
+
+
+#endif
+
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index e61967dde9a1..acc22886e340 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -26,6 +26,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/flash.h>
+#include <asm/setup.h>
#include <mach/irqs.h>
#include <mach/board.h>
@@ -77,6 +78,15 @@ static void __init halibut_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
+static void __init halibut_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ mi->nr_banks=1;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ mi->bank[0].size = (101*1024*1024);
+}
+
static void __init halibut_map_io(void)
{
msm_map_common_io();
@@ -84,7 +94,12 @@ static void __init halibut_map_io(void)
}
MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
.boot_params = 0x10000100,
+ .fixup = halibut_fixup,
.map_io = halibut_map_io,
.init_irq = halibut_init_irq,
.init_machine = halibut_init,
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
new file mode 100644
index 000000000000..bcbefdfe7b5e
--- /dev/null
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -0,0 +1,87 @@
+/* linux/arch/arm/mach-msm/board-mahimahi.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Copyright (C) 2009 HTC Corporation.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/setup.h>
+
+#include <mach/board.h>
+#include <mach/hardware.h>
+#include <mach/system.h>
+
+#include "board-mahimahi.h"
+#include "devices.h"
+#include "proc_comm.h"
+
+static uint debug_uart;
+
+module_param_named(debug_uart, debug_uart, uint, 0);
+
+static struct platform_device *devices[] __initdata = {
+#if !defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ &msm_device_uart1,
+#endif
+ &msm_device_uart_dm1,
+ &msm_device_nand,
+};
+
+static void __init mahimahi_init(void)
+{
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+static void __init mahimahi_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ mi->nr_banks = 2;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ mi->bank[0].size = (219*1024*1024);
+ mi->bank[1].start = MSM_HIGHMEM_BASE;
+ mi->bank[1].node = PHYS_TO_NID(MSM_HIGHMEM_BASE);
+ mi->bank[1].size = MSM_HIGHMEM_SIZE;
+}
+
+static void __init mahimahi_map_io(void)
+{
+ msm_map_common_io();
+ msm_clock_init();
+}
+
+extern struct sys_timer msm_timer;
+
+MACHINE_START(MAHIMAHI, "mahimahi")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x20000100,
+ .fixup = mahimahi_fixup,
+ .map_io = mahimahi_map_io,
+ .init_irq = msm_init_irq,
+ .init_machine = mahimahi_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
new file mode 100644
index 000000000000..cccb9f3c9d01
--- /dev/null
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/power_supply.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+#include <asm/setup.h>
+#ifdef CONFIG_CACHE_L2X0
+#include <asm/hardware/cache-l2x0.h>
+#endif
+
+#include <mach/vreg.h>
+#include <mach/mpp.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include "devices.h"
+#include "socinfo.h"
+#include "clock.h"
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = 0x9C004300,
+ .end = 0x9C0043ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = MSM_GPIO_TO_INT(132),
+ .end = MSM_GPIO_TO_INT(132),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_uart3,
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+ &smc91x_device,
+};
+
+extern struct sys_timer msm_timer;
+
+static void __init msm7x2x_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static void __init msm7x2x_init(void)
+{
+ if (socinfo_init() < 0)
+ BUG();
+
+ if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) {
+ smc91x_resources[0].start = 0x98000300;
+ smc91x_resources[0].end = 0x980003ff;
+ smc91x_resources[1].start = MSM_GPIO_TO_INT(85);
+ smc91x_resources[1].end = MSM_GPIO_TO_INT(85);
+ if (gpio_tlmm_config(GPIO_CFG(85, 0,
+ GPIO_INPUT,
+ GPIO_PULL_DOWN,
+ GPIO_2MA),
+ GPIO_ENABLE)) {
+ printk(KERN_ERR
+ "%s: Err: Config GPIO-85 INT\n",
+ __func__);
+ }
+ }
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+static void __init msm7x2x_map_io(void)
+{
+ msm_map_common_io();
+ /* Technically dependent on the SoC but using machine_is
+ * macros since socinfo is not available this early and there
+ * are plans to restructure the code which will eliminate the
+ * need for socinfo.
+ */
+ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa())
+ msm_clock_init(msm_clocks_7x27, msm_num_clocks_7x27);
+
+ if (machine_is_msm7x25_surf() || machine_is_msm7x25_ffa())
+ msm_clock_init(msm_clocks_7x25, msm_num_clocks_7x25);
+
+#ifdef CONFIG_CACHE_L2X0
+ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) {
+ /* 7x27 has 256KB L2 cache:
+ 64Kb/Way and 4-Way Associativity;
+ R/W latency: 3 cycles;
+ evmon/parity/share disabled. */
+ l2x0_init(MSM_L2CC_BASE, 0x00068012, 0xfe000000);
+ }
+#endif
+}
+
+MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x2x_map_io,
+ .init_irq = msm7x2x_init_irq,
+ .init_machine = msm7x2x_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x2x_map_io,
+ .init_irq = msm7x2x_init_irq,
+ .init_machine = msm7x2x_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x2x_map_io,
+ .init_irq = msm7x2x_init_irq,
+ .init_machine = msm7x2x_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X25_FFA, "QCT MSM7x25 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x2x_map_io,
+ .init_irq = msm7x2x_init_irq,
+ .init_machine = msm7x2x_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
new file mode 100644
index 000000000000..af132ebd86c5
--- /dev/null
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -0,0 +1,196 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/io.h>
+#include <linux/smsc911x.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/setup.h>
+
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/memory.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_hsusb.h>
+#include <mach/dma.h>
+
+#include <mach/vreg.h>
+
+#include "devices.h"
+#include "timer.h"
+#include "socinfo.h"
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = 0x8A000300,
+ .end = 0x8A0003ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = MSM_GPIO_TO_INT(156),
+ .end = MSM_GPIO_TO_INT(156),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+ .flags = SMSC911X_USE_32BIT,
+};
+
+static struct resource smsc911x_resources[] = {
+ [0] = {
+ .start = 0x8D000000,
+ .end = 0x8D000100,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = MSM_GPIO_TO_INT(88),
+ .end = MSM_GPIO_TO_INT(88),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smsc911x_device = {
+ .name = "smsc911x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(smsc911x_resources),
+ .resource = smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc911x_config,
+ },
+};
+
+static struct msm_gpio smsc911x_gpios[] = {
+ { GPIO_CFG(172, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "ebi2_addr6" },
+ { GPIO_CFG(173, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "ebi2_addr5" },
+ { GPIO_CFG(174, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "ebi2_addr4" },
+ { GPIO_CFG(175, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "ebi2_addr3" },
+ { GPIO_CFG(176, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "ebi2_addr2" },
+ { GPIO_CFG(177, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "ebi2_addr1" },
+ { GPIO_CFG(178, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "ebi2_addr0" },
+ { GPIO_CFG(88, 2, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), "smsc911x_irq" },
+};
+
+static void msm7x30_cfg_smsc911x(void)
+{
+ int rc;
+
+ rc = msm_gpios_request_enable(smsc911x_gpios,
+ ARRAY_SIZE(smsc911x_gpios));
+ if (rc)
+ pr_err("%s: unable to enable gpios\n", __func__);
+}
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_smd,
+ &msm_device_dmov,
+ &smc91x_device,
+ &smsc911x_device,
+ &msm_device_nand,
+ &msm_device_uart_dm1,
+};
+
+static void __init msm7x30_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static void __init msm_7x30_init_nand(void)
+{
+ int rc;
+ /* Enable GPIO 86 & 115 */
+ rc = msm_gpios_request_enable(msm_nand_ebi2_cfg_data,
+ ARRAY_SIZE(msm_nand_ebi2_cfg_data));
+ if (rc)
+ printk(KERN_ERR "%s: Failed to enable GPIO 86 & 115\n",
+ __func__);
+}
+
+static void __init msm7x30_init(void)
+{
+ if (socinfo_init() < 0)
+ printk(KERN_ERR "%s: socinfo_init() failed!\n",
+ __func__);
+ msm_acpu_clock_init(&msm7x30_clock_data);
+ if (machine_is_msm7x30_surf() || machine_is_msm7x30_fluid())
+ msm7x30_cfg_smsc911x();
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ msm_7x30_init_nand();
+ buses_init();
+}
+
+static void __init msm7x30_map_io(void)
+{
+ msm_shared_ram_phys = 0x00000000;
+ msm_map_msm7x30_io();
+ msm7x30_allocate_memory_regions();
+ msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30);
+}
+
+MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
new file mode 100644
index 000000000000..a5e862c6ef7b
--- /dev/null
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -0,0 +1,176 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/bootmem.h>
+#include <linux/delay.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+
+#include <mach/vreg.h>
+#include <mach/mpp.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/dma.h>
+#include <mach/memory.h>
+
+#include "devices.h"
+#include "timer.h"
+#include "socinfo.h"
+#include "proc_comm.h"
+
+#define MSM_SHARED_RAM_PHYS (MSM_SMI_BASE + 0x00100000)
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static struct platform_device *devices[] __initdata = {
+ &smc91x_device,
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+};
+
+static void __init qsd8x50_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static void __init qsd8x50_init_host(void)
+{
+ if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa())
+ return;
+
+ vreg_usb = vreg_get(NULL, "boost");
+
+ if (IS_ERR(vreg_usb)) {
+ printk(KERN_ERR "%s: vreg get failed (%ld)\n",
+ __func__, PTR_ERR(vreg_usb));
+ return;
+ }
+
+ platform_device_register(&msm_device_hsusb_otg);
+}
+
+static void __init qsd8x50_cfg_smc91x(void)
+{
+ int rc = 0;
+
+ if (machine_is_qsd8x50_surf() || machine_is_qsd8x50a_surf()) {
+ smc91x_resources[0].start = 0x70000300;
+ smc91x_resources[0].end = 0x700003ff;
+ smc91x_resources[1].start = MSM_GPIO_TO_INT(156);
+ smc91x_resources[1].end = MSM_GPIO_TO_INT(156);
+ } else if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) {
+ smc91x_resources[0].start = 0x84000300;
+ smc91x_resources[0].end = 0x840003ff;
+ smc91x_resources[1].start = MSM_GPIO_TO_INT(87);
+ smc91x_resources[1].end = MSM_GPIO_TO_INT(87);
+
+ rc = gpio_tlmm_config(GPIO_CFG(87, 0, GPIO_INPUT,
+ GPIO_PULL_DOWN, GPIO_2MA),
+ GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR "%s: gpio_tlmm_config=%d\n",
+ __func__, rc);
+ }
+ } else
+ printk(KERN_ERR "%s: invalid machine type\n", __func__);
+}
+
+static void __init qsd8x50_init(void)
+{
+ if (socinfo_init() < 0)
+ printk(KERN_ERR "%s: socinfo_init() failed!\n",
+ __func__);
+ qsd8x50_cfg_smc91x();
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+static void __init qsd8x50_map_io(void)
+{
+ msm_shared_ram_phys = MSM_SHARED_RAM_PHYS;
+ msm_map_qsd8x50_io();
+ msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50);
+}
+
+MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(QSD8X50_FFA, "QCT QSD8X50 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(QSD8X50A_SURF, "QCT QSD8X50A SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(QSD8X50A_FFA, "QCT QSD8X50A FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
new file mode 100644
index 000000000000..2bc1b9d5623e
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -0,0 +1,118 @@
+/* linux/arch/arm/mach-msm/board-sapphire.c
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sysdev.h>
+
+#include <linux/delay.h>
+
+#include <asm/gpio.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+#include <asm/system.h>
+#include <mach/system.h>
+#include <mach/vreg.h>
+#include <mach/board.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/setup.h>
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include "gpio_chip.h"
+#include "board-sapphire.h"
+#include "proc_comm.h"
+#include "devices.h"
+
+void msm_init_irq(void);
+void msm_init_gpio(void);
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+ &msm_device_uart1,
+ &msm_device_uart3,
+};
+
+extern struct sys_timer msm_timer;
+
+static void __init sapphire_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static void __init sapphire_init(void)
+{
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+static struct map_desc sapphire_io_desc[] __initdata = {
+ {
+ .virtual = SAPPHIRE_CPLD_BASE,
+ .pfn = __phys_to_pfn(SAPPHIRE_CPLD_START),
+ .length = SAPPHIRE_CPLD_SIZE,
+ .type = MT_DEVICE_NONSHARED
+ }
+};
+
+static void __init sapphire_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ int smi_sz = parse_tag_smi((const struct tag *)tags);
+
+ mi->nr_banks = 1;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ if (smi_sz == 32) {
+ mi->bank[0].size = (84*1024*1024);
+ } else if (smi_sz == 64) {
+ mi->bank[0].size = (101*1024*1024);
+ } else {
+ /* Give a default value when not get smi size */
+ smi_sz = 64;
+ mi->bank[0].size = (101*1024*1024);
+ }
+}
+
+static void __init sapphire_map_io(void)
+{
+ msm_map_common_io();
+ iotable_init(sapphire_io_desc, ARRAY_SIZE(sapphire_io_desc));
+ msm_clock_init();
+}
+
+MACHINE_START(SAPPHIRE, "sapphire")
+/* Maintainer: Brian Swetland <swetland@google.com> */
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .fixup = sapphire_fixup,
+ .map_io = sapphire_map_io,
+ .init_irq = sapphire_init_irq,
+ .init_machine = sapphire_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-dream.c b/arch/arm/mach-msm/board-trout.c
index 21afa8513168..b88aec269df5 100644
--- a/arch/arm/mach-msm/board-dream.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -1,4 +1,4 @@
-/* linux/arch/arm/mach-msm/board-dream.c
+/* linux/arch/arm/mach-msm/board-trout.c
*
* Copyright (C) 2009 Google, Inc.
* Author: Brian Swetland <swetland@google.com>
@@ -28,7 +28,7 @@
#include <mach/msm_iomap.h>
#include "devices.h"
-#include "board-dream.h"
+#include "board-trout.h"
static struct platform_device *devices[] __initdata = {
&msm_device_uart3,
@@ -82,8 +82,10 @@ static void __init trout_map_io(void)
}
MACHINE_START(TROUT, "HTC Dream")
+#ifdef CONFIG_MSM_DEBUG_UART
.phys_io = MSM_DEBUG_UART_PHYS,
.io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
.boot_params = 0x10000100,
.fixup = trout_fixup,
.map_io = trout_map_io,
diff --git a/arch/arm/mach-msm/board-dream.h b/arch/arm/mach-msm/board-trout.h
index 4f345a5a0a61..4f345a5a0a61 100644
--- a/arch/arm/mach-msm/board-dream.h
+++ b/arch/arm/mach-msm/board-trout.h
diff --git a/arch/arm/mach-msm/devices.c b/arch/arm/mach-msm/devices.c
index 31b6b30e98bf..982f1da60160 100644
--- a/arch/arm/mach-msm/devices.c
+++ b/arch/arm/mach-msm/devices.c
@@ -24,6 +24,8 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <mach/mmc.h>
+
static struct resource resources_uart1[] = {
{
.start = INT_UART1,
@@ -163,8 +165,19 @@ static struct resource resources_sdc1[] = {
},
{
.start = INT_SDC1_0,
+ .end = INT_SDC1_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC1_1,
.end = INT_SDC1_1,
.flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
},
{
.start = 8,
@@ -181,8 +194,19 @@ static struct resource resources_sdc2[] = {
},
{
.start = INT_SDC2_0,
+ .end = INT_SDC2_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC2_1,
.end = INT_SDC2_1,
.flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
},
{
.start = 8,
@@ -199,8 +223,19 @@ static struct resource resources_sdc3[] = {
},
{
.start = INT_SDC3_0,
+ .end = INT_SDC3_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC3_1,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
},
{
.start = 8,
@@ -217,8 +252,19 @@ static struct resource resources_sdc4[] = {
},
{
.start = INT_SDC4_0,
+ .end = INT_SDC4_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC4_1,
.end = INT_SDC4_1,
.flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
},
{
.start = 8,
@@ -266,3 +312,35 @@ struct platform_device msm_device_sdc4 = {
.coherent_dma_mask = 0xffffffff,
},
};
+
+static struct platform_device *msm_sdcc_devices[] __initdata = {
+ &msm_device_sdc1,
+ &msm_device_sdc2,
+ &msm_device_sdc3,
+ &msm_device_sdc4,
+};
+
+int __init msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat,
+ unsigned int stat_irq, unsigned long stat_irq_flags)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+
+ if (controller < 1 || controller > 4)
+ return -EINVAL;
+
+ pdev = msm_sdcc_devices[controller-1];
+ pdev->dev.platform_data = plat;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq");
+ if (!res)
+ return -EINVAL;
+ else if (stat_irq) {
+ res->start = res->end = stat_irq;
+ res->flags &= ~IORESOURCE_DISABLED;
+ res->flags |= stat_irq_flags;
+ }
+
+ return platform_device_register(pdev);
+}
+
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
index f5420f9585c5..d029d1f5f9e2 100644
--- a/arch/arm/mach-msm/dma.c
+++ b/arch/arm/mach-msm/dma.c
@@ -13,6 +13,8 @@
*
*/
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <mach/dma.h>
@@ -26,6 +28,7 @@ enum {
};
static DEFINE_SPINLOCK(msm_dmov_lock);
+static struct clk *msm_dmov_clk;
static unsigned int channel_active;
static struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
static struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
@@ -54,6 +57,9 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
unsigned int status;
spin_lock_irqsave(&msm_dmov_lock, irq_flags);
+ if (!channel_active)
+ clk_enable(msm_dmov_clk);
+ dsb();
status = readl(DMOV_STATUS(id));
if (list_empty(&ready_commands[id]) &&
(status & DMOV_STATUS_CMD_PTR_RDY)) {
@@ -63,6 +69,8 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
writel(DMOV_CONFIG_IRQ_EN, DMOV_CONFIG(id));
}
#endif
+ if (cmd->execute_func)
+ cmd->execute_func(cmd);
PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n", id, status);
list_add_tail(&cmd->list, &active_commands[id]);
if (!channel_active)
@@ -70,6 +78,8 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
channel_active |= 1U << id;
writel(cmd->cmdptr, DMOV_CMD_PTR(id));
} else {
+ if (!channel_active)
+ clk_disable(msm_dmov_clk);
if (list_empty(&active_commands[id]))
PRINT_ERROR("msm_dmov_enqueue_cmd(%d), error datamover stalled, status %x\n", id, status);
@@ -108,6 +118,7 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
cmd.dmov_cmd.cmdptr = cmdptr;
cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
+ cmd.dmov_cmd.execute_func = NULL;
cmd.id = id;
init_completion(&cmd.complete);
@@ -165,6 +176,7 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
"for %p, result %x\n", id, cmd, ch_result);
if (cmd) {
list_del(&cmd->list);
+ dsb();
cmd->complete_func(cmd, ch_result, NULL);
}
}
@@ -181,6 +193,7 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
if (cmd) {
list_del(&cmd->list);
+ dsb();
cmd->complete_func(cmd, ch_result, &errdata);
}
}
@@ -198,6 +211,7 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
if (cmd) {
list_del(&cmd->list);
+ dsb();
cmd->complete_func(cmd, ch_result, &errdata);
}
/* this does not seem to work, once we get an error */
@@ -210,6 +224,8 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
cmd = list_entry(ready_commands[id].next, typeof(*cmd), list);
list_del(&cmd->list);
list_add_tail(&cmd->list, &active_commands[id]);
+ if (cmd->execute_func)
+ cmd->execute_func(cmd);
PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
writel(cmd->cmdptr, DMOV_CMD_PTR(id));
}
@@ -219,8 +235,10 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
}
- if (!channel_active)
- disable_irq(INT_ADM_AARM);
+ if (!channel_active) {
+ disable_irq_nosync(INT_ADM_AARM);
+ clk_disable(msm_dmov_clk);
+ }
spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
return IRQ_HANDLED;
@@ -230,11 +248,17 @@ static int __init msm_init_datamover(void)
{
int i;
int ret;
+ struct clk *clk;
+
for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
INIT_LIST_HEAD(&ready_commands[i]);
INIT_LIST_HEAD(&active_commands[i]);
writel(DMOV_CONFIG_IRQ_EN | DMOV_CONFIG_FORCE_TOP_PTR_RSLT | DMOV_CONFIG_FORCE_FLUSH_RSLT, DMOV_CONFIG(i));
}
+ clk = clk_get(NULL, "adm_clk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ msm_dmov_clk = clk;
ret = request_irq(INT_ADM_AARM, msm_datamover_irq_handler, 0, "msmdatamover", NULL);
if (ret)
return ret;
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 264d62e519f3..7933641c4761 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -21,12 +21,16 @@
/* platform device data structures */
-struct msm_mddi_platform_data
+struct msm_acpu_clock_platform_data
{
- void (*panel_power)(int on);
- unsigned has_vsync_irq:1;
+ uint32_t acpu_switch_time_us;
+ uint32_t max_speed_delta_khz;
+ uint32_t vdd_switch_time_us;
+ unsigned long power_collapse_khz;
+ unsigned long wait_for_irq_khz;
};
+
/* common init routines for use by arch/arm/mach-msm/board-*.c */
void __init msm_add_devices(void);
@@ -34,5 +38,6 @@ void __init msm_map_common_io(void);
void __init msm_init_irq(void);
void __init msm_init_gpio(void);
void __init msm_clock_init(void);
+void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *);
#endif
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index 5ab5bdffab07..78b0ffdf27e8 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -28,6 +28,8 @@ struct msm_dmov_cmd {
void (*complete_func)(struct msm_dmov_cmd *cmd,
unsigned int result,
struct msm_dmov_errdata *err);
+ void (*execute_func)(struct msm_dmov_cmd *cmd);
+ void *data;
};
void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
diff --git a/arch/arm/mach-msm/include/mach/msm_fb.h b/arch/arm/mach-msm/include/mach/msm_fb.h
new file mode 100644
index 000000000000..1f4fc81b3d8f
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_fb.h
@@ -0,0 +1,147 @@
+/* arch/arm/mach-msm/include/mach/msm_fb.h
+ *
+ * Internal shared definitions for various MSM framebuffer parts.
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_FB_H_
+#define _MSM_FB_H_
+
+#include <linux/device.h>
+
+struct mddi_info;
+
+struct msm_fb_data {
+ int xres; /* x resolution in pixels */
+ int yres; /* y resolution in pixels */
+ int width; /* disply width in mm */
+ int height; /* display height in mm */
+ unsigned output_format;
+};
+
+struct msmfb_callback {
+ void (*func)(struct msmfb_callback *);
+};
+
+enum {
+ MSM_MDDI_PMDH_INTERFACE,
+ MSM_MDDI_EMDH_INTERFACE,
+ MSM_EBI2_INTERFACE,
+};
+
+#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
+
+struct msm_panel_data {
+ /* turns off the fb memory */
+ int (*suspend)(struct msm_panel_data *);
+ /* turns on the fb memory */
+ int (*resume)(struct msm_panel_data *);
+ /* turns off the panel */
+ int (*blank)(struct msm_panel_data *);
+ /* turns on the panel */
+ int (*unblank)(struct msm_panel_data *);
+ void (*wait_vsync)(struct msm_panel_data *);
+ void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
+ void (*clear_vsync)(struct msm_panel_data *);
+ /* from the enum above */
+ unsigned interface_type;
+ /* data to be passed to the fb driver */
+ struct msm_fb_data *fb_data;
+
+ /* capabilities supported by the panel */
+ uint32_t caps;
+};
+
+struct msm_mddi_client_data {
+ void (*suspend)(struct msm_mddi_client_data *);
+ void (*resume)(struct msm_mddi_client_data *);
+ void (*activate_link)(struct msm_mddi_client_data *);
+ void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
+ uint32_t reg);
+ uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
+ void (*auto_hibernate)(struct msm_mddi_client_data *, int);
+ /* custom data that needs to be passed from the board file to a
+ * particular client */
+ void *private_client_data;
+ struct resource *fb_resource;
+ /* from the list above */
+ unsigned interface_type;
+};
+
+struct msm_mddi_platform_data {
+ unsigned int clk_rate;
+ void (*power_client)(struct msm_mddi_client_data *, int on);
+
+ /* fixup the mfr name, product id */
+ void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
+
+ struct resource *fb_resource; /*optional*/
+ /* number of clients in the list that follows */
+ int num_clients;
+ /* array of client information of clients */
+ struct {
+ unsigned product_id; /* mfr id in top 16 bits, product id
+ * in lower 16 bits
+ */
+ char *name; /* the device name will be the platform
+ * device name registered for the client,
+ * it should match the name of the associated
+ * driver
+ */
+ unsigned id; /* id for mddi client device node, will also
+ * be used as device id of panel devices, if
+ * the client device will have multiple panels
+ * space must be left here for them
+ */
+ void *client_data; /* required private client data */
+ unsigned int clk_rate; /* optional: if the client requires a
+ * different mddi clk rate
+ */
+ } client_platform_data[];
+};
+
+struct mdp_blit_req;
+struct fb_info;
+struct mdp_device {
+ struct device dev;
+ void (*dma)(struct mdp_device *mpd, uint32_t addr,
+ uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
+ uint32_t y, struct msmfb_callback *callback, int interface);
+ void (*dma_wait)(struct mdp_device *mdp);
+ int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
+ struct mdp_blit_req *req);
+ void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
+};
+
+struct class_interface;
+int register_mdp_client(struct class_interface *class_intf);
+
+/**** private client data structs go below this line ***/
+
+struct msm_mddi_bridge_platform_data {
+ /* from board file */
+ int (*init)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ int (*uninit)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ /* passed to panel for use by the fb driver */
+ int (*blank)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ int (*unblank)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ struct msm_fb_data fb_data;
+};
+
+
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 9dae1a98c77a..ce1effed5894 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -130,4 +130,10 @@
#define MSM_AD5_SIZE (SZ_1M*13)
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_GCC_BASE IOMEM(0xF8009000)
+#define MSM_GCC_PHYS 0xC0182000
+#define MSM_GCC_SIZE SZ_4K
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
new file mode 100644
index 000000000000..029463ec8756
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -0,0 +1,109 @@
+/* linux/include/asm-arm/arch-msm/msm_smd.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_SMD_H
+#define __ASM_ARCH_MSM_SMD_H
+
+typedef struct smd_channel smd_channel_t;
+
+extern int (*msm_check_for_modem_crash)(void);
+
+/* warning: notify() may be called before open returns */
+int smd_open(const char *name, smd_channel_t **ch, void *priv,
+ void (*notify)(void *priv, unsigned event));
+
+#define SMD_EVENT_DATA 1
+#define SMD_EVENT_OPEN 2
+#define SMD_EVENT_CLOSE 3
+
+int smd_close(smd_channel_t *ch);
+
+/* passing a null pointer for data reads and discards */
+int smd_read(smd_channel_t *ch, void *data, int len);
+
+/* Write to stream channels may do a partial write and return
+** the length actually written.
+** Write to packet channels will never do a partial write --
+** it will return the requested length written or an error.
+*/
+int smd_write(smd_channel_t *ch, const void *data, int len);
+int smd_write_atomic(smd_channel_t *ch, const void *data, int len);
+
+int smd_write_avail(smd_channel_t *ch);
+int smd_read_avail(smd_channel_t *ch);
+
+/* Returns the total size of the current packet being read.
+** Returns 0 if no packets available or a stream channel.
+*/
+int smd_cur_packet_size(smd_channel_t *ch);
+
+/* used for tty unthrottling and the like -- causes the notify()
+** callback to be called from the same lock context as is used
+** when it is called from channel updates
+*/
+void smd_kick(smd_channel_t *ch);
+
+
+#if 0
+/* these are interruptable waits which will block you until the specified
+** number of bytes are readable or writable.
+*/
+int smd_wait_until_readable(smd_channel_t *ch, int bytes);
+int smd_wait_until_writable(smd_channel_t *ch, int bytes);
+#endif
+
+typedef enum {
+ SMD_PORT_DS = 0,
+ SMD_PORT_DIAG,
+ SMD_PORT_RPC_CALL,
+ SMD_PORT_RPC_REPLY,
+ SMD_PORT_BT,
+ SMD_PORT_CONTROL,
+ SMD_PORT_MEMCPY_SPARE1,
+ SMD_PORT_DATA1,
+ SMD_PORT_DATA2,
+ SMD_PORT_DATA3,
+ SMD_PORT_DATA4,
+ SMD_PORT_DATA5,
+ SMD_PORT_DATA6,
+ SMD_PORT_DATA7,
+ SMD_PORT_DATA8,
+ SMD_PORT_DATA9,
+ SMD_PORT_DATA10,
+ SMD_PORT_DATA11,
+ SMD_PORT_DATA12,
+ SMD_PORT_DATA13,
+ SMD_PORT_DATA14,
+ SMD_PORT_DATA15,
+ SMD_PORT_DATA16,
+ SMD_PORT_DATA17,
+ SMD_PORT_DATA18,
+ SMD_PORT_DATA19,
+ SMD_PORT_DATA20,
+ SMD_PORT_GPS_NMEA,
+ SMD_PORT_BRIDGE_1,
+ SMD_PORT_BRIDGE_2,
+ SMD_PORT_BRIDGE_3,
+ SMD_PORT_BRIDGE_4,
+ SMD_PORT_BRIDGE_5,
+ SMD_PORT_LOOPBACK,
+ SMD_PORT_CS_APPS_MODEM,
+ SMD_PORT_CS_APPS_DSP,
+ SMD_PORT_CS_MODEM_DSP,
+ SMD_NUM_PORTS,
+} smd_port_id_type;
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/system.h b/arch/arm/mach-msm/include/mach/system.h
index 574ccc493daf..d2e83f42ba16 100644
--- a/arch/arm/mach-msm/include/mach/system.h
+++ b/arch/arm/mach-msm/include/mach/system.h
@@ -21,3 +21,8 @@ static inline void arch_reset(char mode, const char *cmd)
{
for (;;) ; /* depends on IPC w/ other core */
}
+
+/* low level hardware reset hook -- for example, hitting the
+ * PSHOLD line on the PMIC to hard reset the system
+ */
+extern void (*msm_hw_reset_hook)(void);
diff --git a/arch/arm/mach-msm/include/mach/vreg.h b/arch/arm/mach-msm/include/mach/vreg.h
index 9f9e25cb718e..6626e7864e28 100644
--- a/arch/arm/mach-msm/include/mach/vreg.h
+++ b/arch/arm/mach-msm/include/mach/vreg.h
@@ -23,7 +23,7 @@ struct vreg *vreg_get(struct device *dev, const char *id);
void vreg_put(struct vreg *vreg);
int vreg_enable(struct vreg *vreg);
-void vreg_disable(struct vreg *vreg);
+int vreg_disable(struct vreg *vreg);
int vreg_set_level(struct vreg *vreg, unsigned mv);
#endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 05f96b780aa6..fde5e58b51c3 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -45,6 +45,9 @@ static struct map_desc msm_io_desc[] __initdata = {
#ifdef CONFIG_MSM_DEBUG_UART
MSM_DEVICE(DEBUG_UART),
#endif
+#ifdef CONFIG_ARCH_MSM7X30
+ MSM_DEVICE(GCC),
+#endif
{
.virtual = (unsigned long) MSM_SHARED_RAM_BASE,
.pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c
index 69ca0dd79bdf..6c8d5f8caef3 100644
--- a/arch/arm/mach-msm/irq.c
+++ b/arch/arm/mach-msm/irq.c
@@ -101,11 +101,11 @@ static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
writel(readl(treg) | b, treg);
- set_irq_handler(irq, handle_edge_irq);
+ irq_desc[irq].handle_irq = handle_edge_irq;
}
if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
writel(readl(treg) & (~b), treg);
- set_irq_handler(irq, handle_level_irq);
+ irq_desc[irq].handle_irq = handle_level_irq;
}
return 0;
}
diff --git a/arch/arm/mach-msm/last_radio_log.c b/arch/arm/mach-msm/last_radio_log.c
new file mode 100644
index 000000000000..b64ba5a98686
--- /dev/null
+++ b/arch/arm/mach-msm/last_radio_log.c
@@ -0,0 +1,82 @@
+/* arch/arm/mach-msm/last_radio_log.c
+ *
+ * Extract the log from a modem crash though SMEM
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+
+#include "smd_private.h"
+
+static void *radio_log_base;
+static size_t radio_log_size;
+
+extern void *smem_item(unsigned id, unsigned *size);
+
+static ssize_t last_radio_log_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ loff_t pos = *offset;
+ ssize_t count;
+
+ if (pos >= radio_log_size)
+ return 0;
+
+ count = min(len, (size_t)(radio_log_size - pos));
+ if (copy_to_user(buf, radio_log_base + pos, count)) {
+ pr_err("%s: copy to user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ *offset += count;
+ return count;
+}
+
+static struct file_operations last_radio_log_fops = {
+ .read = last_radio_log_read
+};
+
+void msm_init_last_radio_log(struct module *owner)
+{
+ struct proc_dir_entry *entry;
+
+ if (last_radio_log_fops.owner) {
+ pr_err("%s: already claimed\n", __func__);
+ return;
+ }
+
+ radio_log_base = smem_item(SMEM_CLKREGIM_BSP, &radio_log_size);
+ if (!radio_log_base) {
+ pr_err("%s: could not retrieve SMEM_CLKREGIM_BSP\n", __func__);
+ return;
+ }
+
+ entry = create_proc_entry("last_radio_log", S_IFREG | S_IRUGO, NULL);
+ if (!entry) {
+ pr_err("%s: could not create proc entry for radio log\n",
+ __func__);
+ return;
+ }
+
+ pr_err("%s: last radio log is %d bytes long\n", __func__,
+ radio_log_size);
+ last_radio_log_fops.owner = owner;
+ entry->proc_fops = &last_radio_log_fops;
+ entry->size = radio_log_size;
+}
+EXPORT_SYMBOL(msm_init_last_radio_log);
diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c
index 915ee704ed3c..67e701c7f183 100644
--- a/arch/arm/mach-msm/proc_comm.c
+++ b/arch/arm/mach-msm/proc_comm.c
@@ -23,11 +23,18 @@
#include "proc_comm.h"
-#define MSM_A2M_INT(n) (MSM_CSR_BASE + 0x400 + (n) * 4)
+static inline void msm_a2m_int(uint32_t irq)
+{
+#if defined(CONFIG_ARCH_MSM7X30)
+ writel(1 << irq, MSM_GCC_BASE + 0x8);
+#else
+ writel(1, MSM_CSR_BASE + 0x400 + (irq * 4));
+#endif
+}
static inline void notify_other_proc_comm(void)
{
- writel(1, MSM_A2M_INT(6));
+ msm_a2m_int(6);
}
#define APP_COMMAND 0x00
@@ -107,4 +114,17 @@ int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2)
return ret;
}
-
+/*
+ * We need to wait for the ARM9 to at least partially boot
+ * up before we can continue. Since the ARM9 does resource
+ * allocation, if we dont' wait we could end up crashing or in
+ * and unknown state. This function should be called early to
+ * wait on the ARM9.
+ */
+void __init proc_comm_boot_wait(void)
+{
+ void __iomem *base = MSM_SHARED_RAM_BASE;
+
+ proc_comm_wait_for(base + MDM_STATUS, PCOM_READY);
+
+}
diff --git a/arch/arm/mach-msm/proc_comm.h b/arch/arm/mach-msm/proc_comm.h
index 834760f25692..12da4cacd4a8 100644
--- a/arch/arm/mach-msm/proc_comm.h
+++ b/arch/arm/mach-msm/proc_comm.h
@@ -16,6 +16,8 @@
#ifndef _ARCH_ARM_MACH_MSM_PROC_COMM_H_
#define _ARCH_ARM_MACH_MSM_PROC_COMM_H_
+#include <linux/init.h>
+
enum {
PCOM_CMD_IDLE = 0x0,
PCOM_CMD_DONE,
@@ -62,19 +64,104 @@ enum {
PCOM_RESET_CHIP_IMM,
PCOM_PM_VID_EN,
PCOM_VREG_PULLDOWN,
+ PCOM_GET_MODEM_VERSION,
+ PCOM_CLK_REGIME_SEC_RESET,
+ PCOM_CLK_REGIME_SEC_RESET_ASSERT,
+ PCOM_CLK_REGIME_SEC_RESET_DEASSERT,
+ PCOM_CLK_REGIME_SEC_PLL_REQUEST_WRP,
+ PCOM_CLK_REGIME_SEC_ENABLE,
+ PCOM_CLK_REGIME_SEC_DISABLE,
+ PCOM_CLK_REGIME_SEC_IS_ON,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_INV,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_SRC,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_DIV,
+ PCOM_CLK_REGIME_SEC_ICODEC_CLK_ENABLE,
+ PCOM_CLK_REGIME_SEC_ICODEC_CLK_DISABLE,
+ PCOM_CLK_REGIME_SEC_SEL_SPEED,
+ PCOM_CLK_REGIME_SEC_CONFIG_GP_CLK_WRP,
+ PCOM_CLK_REGIME_SEC_CONFIG_MDH_CLK_WRP,
+ PCOM_CLK_REGIME_SEC_USB_XTAL_ON,
+ PCOM_CLK_REGIME_SEC_USB_XTAL_OFF,
+ PCOM_CLK_REGIME_SEC_SET_QDSP_DME_MODE,
+ PCOM_CLK_REGIME_SEC_SWITCH_ADSP_CLK,
+ PCOM_CLK_REGIME_SEC_GET_MAX_ADSP_CLK_KHZ,
+ PCOM_CLK_REGIME_SEC_GET_I2C_CLK_KHZ,
+ PCOM_CLK_REGIME_SEC_MSM_GET_CLK_FREQ_KHZ,
+ PCOM_CLK_REGIME_SEC_SEL_VFE_SRC,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_CAMCLK,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_LCDCLK,
+ PCOM_CLK_REGIME_SEC_VFE_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_VFE_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_GRP_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_GRP_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_VDC_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_VDC_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_LCD_CTRL,
+ PCOM_CLK_REGIME_SEC_REGISTER_FOR_CPU_RESOURCE,
+ PCOM_CLK_REGIME_SEC_DEREGISTER_FOR_CPU_RESOURCE,
+ PCOM_CLK_REGIME_SEC_RESOURCE_REQUEST_WRP,
+ PCOM_CLK_REGIME_MSM_SEC_SEL_CLK_OWNER,
+ PCOM_CLK_REGIME_SEC_DEVMAN_REQUEST_WRP,
+ PCOM_GPIO_CONFIG,
+ PCOM_GPIO_CONFIGURE_GROUP,
+ PCOM_GPIO_TLMM_SET_PORT,
+ PCOM_GPIO_TLMM_CONFIG_EX,
+ PCOM_SET_FTM_BOOT_COUNT,
+ PCOM_RESERVED0,
+ PCOM_RESERVED1,
+ PCOM_CUSTOMER_CMD1,
+ PCOM_CUSTOMER_CMD2,
+ PCOM_CUSTOMER_CMD3,
+ PCOM_CLK_REGIME_ENTER_APPSBL_CHG_MODE,
+ PCOM_CLK_REGIME_EXIT_APPSBL_CHG_MODE,
+ PCOM_CLK_REGIME_SEC_RAIL_DISABLE,
+ PCOM_CLK_REGIME_SEC_RAIL_ENABLE,
+ PCOM_CLK_REGIME_SEC_RAIL_CONTROL,
+ PCOM_SET_SW_WATCHDOG_STATE,
+ PCOM_PM_MPP_CONFIG_DIGITAL_INPUT,
+ PCOM_PM_MPP_CONFIG_I_SINK,
+ PCOM_RESERVED_101,
+ PCOM_MSM_HSUSB_PHY_RESET,
+ PCOM_GET_BATT_MV_LEVEL,
+ PCOM_CHG_USB_IS_PC_CONNECTED,
+ PCOM_CHG_USB_IS_CHARGER_CONNECTED,
+ PCOM_CHG_USB_IS_DISCONNECTED,
+ PCOM_CHG_USB_IS_AVAILABLE,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_FREQ,
+ PCOM_CLK_REGIME_SEC_SET_PCLK_AXI_POLICY,
+ PCOM_CLKCTL_RPC_RESET_ASSERT,
+ PCOM_CLKCTL_RPC_RESET_DEASSERT,
+ PCOM_CLKCTL_RPC_RAIL_ON,
+ PCOM_CLKCTL_RPC_RAIL_OFF,
+ PCOM_CLKCTL_RPC_RAIL_ENABLE,
+ PCOM_CLKCTL_RPC_RAIL_DISABLE,
+ PCOM_CLKCTL_RPC_RAIL_CONTROL,
+ PCOM_CLKCTL_RPC_MIN_MSMC1,
PCOM_NUM_CMDS,
};
enum {
- PCOM_INVALID_STATUS = 0x0,
- PCOM_READY,
- PCOM_CMD_RUNNING,
- PCOM_CMD_SUCCESS,
- PCOM_CMD_FAIL,
+ PCOM_INVALID_STATUS = 0x0,
+ PCOM_READY,
+ PCOM_CMD_RUNNING,
+ PCOM_CMD_SUCCESS,
+ PCOM_CMD_FAIL,
+ PCOM_CMD_FAIL_FALSE_RETURNED,
+ PCOM_CMD_FAIL_CMD_OUT_OF_BOUNDS_SERVER,
+ PCOM_CMD_FAIL_CMD_OUT_OF_BOUNDS_CLIENT,
+ PCOM_CMD_FAIL_CMD_UNREGISTERED,
+ PCOM_CMD_FAIL_CMD_LOCKED,
+ PCOM_CMD_FAIL_SERVER_NOT_YET_READY,
+ PCOM_CMD_FAIL_BAD_DESTINATION,
+ PCOM_CMD_FAIL_SERVER_RESET,
+ PCOM_CMD_FAIL_SMSM_NOT_INIT,
+ PCOM_CMD_FAIL_PROC_COMM_BUSY,
+ PCOM_CMD_FAIL_PROC_COMM_NOT_INIT,
+
};
/* List of VREGs that support the Pull Down Resistor setting. */
-enum {
+enum vreg_pdown_id {
PM_VREG_PDOWN_MSMA_ID,
PM_VREG_PDOWN_MSMP_ID,
PM_VREG_PDOWN_MSME1_ID, /* Not supported in Panoramix */
@@ -131,6 +218,11 @@ enum {
PM_VREG_PDOWN_XO_ID = PM_VREG_PDOWN_TCXO_ID
};
+enum {
+ PCOM_CLKRGM_APPS_RESET_USB_PHY = 34,
+ PCOM_CLKRGM_APPS_RESET_USBH = 37,
+};
+
/* gpio info for PCOM_RPC_GPIO_TLMM_CONFIG_EX */
#define GPIO_ENABLE 0
@@ -161,5 +253,6 @@ enum {
(((drvstr) & 0xF) << 17))
int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2);
+void __init proc_comm_boot_wait(void);
#endif
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
new file mode 100644
index 000000000000..cf11d414b425
--- /dev/null
+++ b/arch/arm/mach-msm/smd.c
@@ -0,0 +1,1046 @@
+/* arch/arm/mach-msm/smd.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#include <mach/msm_smd.h>
+#include <mach/system.h>
+
+#include "smd_private.h"
+#include "proc_comm.h"
+
+#if defined(CONFIG_ARCH_QSD8X50)
+#define CONFIG_QDSP6 1
+#endif
+
+void (*msm_hw_reset_hook)(void);
+
+#define MODULE_NAME "msm_smd"
+
+enum {
+ MSM_SMD_DEBUG = 1U << 0,
+ MSM_SMSM_DEBUG = 1U << 0,
+};
+
+static int msm_smd_debug_mask;
+
+struct shared_info {
+ int ready;
+ unsigned state;
+};
+
+static unsigned dummy_state[SMSM_STATE_COUNT];
+
+static struct shared_info smd_info = {
+ .state = (unsigned) &dummy_state,
+};
+
+module_param_named(debug_mask, msm_smd_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static unsigned last_heap_free = 0xffffffff;
+
+static inline void notify_other_smsm(void)
+{
+ msm_a2m_int(5);
+#ifdef CONFIG_QDSP6
+ msm_a2m_int(8);
+#endif
+}
+
+static inline void notify_modem_smd(void)
+{
+ msm_a2m_int(0);
+}
+
+static inline void notify_dsp_smd(void)
+{
+ msm_a2m_int(8);
+}
+
+static void smd_diag(void)
+{
+ char *x;
+
+ x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
+ if (x != 0) {
+ x[SZ_DIAG_ERR_MSG - 1] = 0;
+ pr_info("smem: DIAG '%s'\n", x);
+ }
+}
+
+/* call when SMSM_RESET flag is set in the A9's smsm_state */
+static void handle_modem_crash(void)
+{
+ pr_err("ARM9 has CRASHED\n");
+ smd_diag();
+
+ /* hard reboot if possible */
+ if (msm_hw_reset_hook)
+ msm_hw_reset_hook();
+
+ /* in this case the modem or watchdog should reboot us */
+ for (;;)
+ ;
+}
+
+uint32_t raw_smsm_get_state(enum smsm_state_item item)
+{
+ return readl(smd_info.state + item * 4);
+}
+
+static int check_for_modem_crash(void)
+{
+ if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET) {
+ handle_modem_crash();
+ return -1;
+ }
+ return 0;
+}
+
+/* the spinlock is used to synchronize between the
+ * irq handler and code that mutates the channel
+ * list or fiddles with channel state
+ */
+DEFINE_SPINLOCK(smd_lock);
+DEFINE_SPINLOCK(smem_lock);
+
+/* the mutex is used during open() and close()
+ * operations to avoid races while creating or
+ * destroying smd_channel structures
+ */
+static DEFINE_MUTEX(smd_creation_mutex);
+
+static int smd_initialized;
+
+LIST_HEAD(smd_ch_closed_list);
+LIST_HEAD(smd_ch_list_modem);
+LIST_HEAD(smd_ch_list_dsp);
+
+static unsigned char smd_ch_allocated[64];
+static struct work_struct probe_work;
+
+/* how many bytes are available for reading */
+static int smd_stream_read_avail(struct smd_channel *ch)
+{
+ return (ch->recv->head - ch->recv->tail) & ch->fifo_mask;
+}
+
+/* how many bytes we are free to write */
+static int smd_stream_write_avail(struct smd_channel *ch)
+{
+ return ch->fifo_mask -
+ ((ch->send->head - ch->send->tail) & ch->fifo_mask);
+}
+
+static int smd_packet_read_avail(struct smd_channel *ch)
+{
+ if (ch->current_packet) {
+ int n = smd_stream_read_avail(ch);
+ if (n > ch->current_packet)
+ n = ch->current_packet;
+ return n;
+ } else {
+ return 0;
+ }
+}
+
+static int smd_packet_write_avail(struct smd_channel *ch)
+{
+ int n = smd_stream_write_avail(ch);
+ return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+
+static int ch_is_open(struct smd_channel *ch)
+{
+ return (ch->recv->state == SMD_SS_OPENED) &&
+ (ch->send->state == SMD_SS_OPENED);
+}
+
+/* provide a pointer and length to readable data in the fifo */
+static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
+{
+ unsigned head = ch->recv->head;
+ unsigned tail = ch->recv->tail;
+ *ptr = (void *) (ch->recv_data + tail);
+
+ if (tail <= head)
+ return head - tail;
+ else
+ return ch->fifo_size - tail;
+}
+
+/* advance the fifo read pointer after data from ch_read_buffer is consumed */
+static void ch_read_done(struct smd_channel *ch, unsigned count)
+{
+ BUG_ON(count > smd_stream_read_avail(ch));
+ ch->recv->tail = (ch->recv->tail + count) & ch->fifo_mask;
+ ch->send->fTAIL = 1;
+}
+
+/* basic read interface to ch_read_{buffer,done} used
+ * by smd_*_read() and update_packet_state()
+ * will read-and-discard if the _data pointer is null
+ */
+static int ch_read(struct smd_channel *ch, void *_data, int len)
+{
+ void *ptr;
+ unsigned n;
+ unsigned char *data = _data;
+ int orig_len = len;
+
+ while (len > 0) {
+ n = ch_read_buffer(ch, &ptr);
+ if (n == 0)
+ break;
+
+ if (n > len)
+ n = len;
+ if (_data)
+ memcpy(data, ptr, n);
+
+ data += n;
+ len -= n;
+ ch_read_done(ch, n);
+ }
+
+ return orig_len - len;
+}
+
+static void update_stream_state(struct smd_channel *ch)
+{
+ /* streams have no special state requiring updating */
+}
+
+static void update_packet_state(struct smd_channel *ch)
+{
+ unsigned hdr[5];
+ int r;
+
+ /* can't do anything if we're in the middle of a packet */
+ if (ch->current_packet != 0)
+ return;
+
+ /* don't bother unless we can get the full header */
+ if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
+ return;
+
+ r = ch_read(ch, hdr, SMD_HEADER_SIZE);
+ BUG_ON(r != SMD_HEADER_SIZE);
+
+ ch->current_packet = hdr[0];
+}
+
+/* provide a pointer and length to next free space in the fifo */
+static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
+{
+ unsigned head = ch->send->head;
+ unsigned tail = ch->send->tail;
+ *ptr = (void *) (ch->send_data + head);
+
+ if (head < tail) {
+ return tail - head - 1;
+ } else {
+ if (tail == 0)
+ return ch->fifo_size - head - 1;
+ else
+ return ch->fifo_size - head;
+ }
+}
+
+/* advace the fifo write pointer after freespace
+ * from ch_write_buffer is filled
+ */
+static void ch_write_done(struct smd_channel *ch, unsigned count)
+{
+ BUG_ON(count > smd_stream_write_avail(ch));
+ ch->send->head = (ch->send->head + count) & ch->fifo_mask;
+ ch->send->fHEAD = 1;
+}
+
+static void ch_set_state(struct smd_channel *ch, unsigned n)
+{
+ if (n == SMD_SS_OPENED) {
+ ch->send->fDSR = 1;
+ ch->send->fCTS = 1;
+ ch->send->fCD = 1;
+ } else {
+ ch->send->fDSR = 0;
+ ch->send->fCTS = 0;
+ ch->send->fCD = 0;
+ }
+ ch->send->state = n;
+ ch->send->fSTATE = 1;
+ ch->notify_other_cpu();
+}
+
+static void do_smd_probe(void)
+{
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ if (shared->heap_info.free_offset != last_heap_free) {
+ last_heap_free = shared->heap_info.free_offset;
+ schedule_work(&probe_work);
+ }
+}
+
+static void smd_state_change(struct smd_channel *ch,
+ unsigned last, unsigned next)
+{
+ ch->last_state = next;
+
+ pr_info("SMD: ch %d %d -> %d\n", ch->n, last, next);
+
+ switch (next) {
+ case SMD_SS_OPENING:
+ ch->recv->tail = 0;
+ case SMD_SS_OPENED:
+ if (ch->send->state != SMD_SS_OPENED)
+ ch_set_state(ch, SMD_SS_OPENED);
+ ch->notify(ch->priv, SMD_EVENT_OPEN);
+ break;
+ case SMD_SS_FLUSHING:
+ case SMD_SS_RESET:
+ /* we should force them to close? */
+ default:
+ ch->notify(ch->priv, SMD_EVENT_CLOSE);
+ }
+}
+
+static void handle_smd_irq(struct list_head *list, void (*notify)(void))
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ int do_notify = 0;
+ unsigned ch_flags;
+ unsigned tmp;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry(ch, list, ch_list) {
+ ch_flags = 0;
+ if (ch_is_open(ch)) {
+ if (ch->recv->fHEAD) {
+ ch->recv->fHEAD = 0;
+ ch_flags |= 1;
+ do_notify |= 1;
+ }
+ if (ch->recv->fTAIL) {
+ ch->recv->fTAIL = 0;
+ ch_flags |= 2;
+ do_notify |= 1;
+ }
+ if (ch->recv->fSTATE) {
+ ch->recv->fSTATE = 0;
+ ch_flags |= 4;
+ do_notify |= 1;
+ }
+ }
+ tmp = ch->recv->state;
+ if (tmp != ch->last_state)
+ smd_state_change(ch, ch->last_state, tmp);
+ if (ch_flags) {
+ ch->update_state(ch);
+ ch->notify(ch->priv, SMD_EVENT_DATA);
+ }
+ }
+ if (do_notify)
+ notify();
+ spin_unlock_irqrestore(&smd_lock, flags);
+ do_smd_probe();
+}
+
+static irqreturn_t smd_modem_irq_handler(int irq, void *data)
+{
+ handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
+ return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_QDSP6)
+static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
+{
+ handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
+ return IRQ_HANDLED;
+}
+#endif
+
+static void smd_fake_irq_handler(unsigned long arg)
+{
+ handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
+ handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
+}
+
+static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
+
+static inline int smd_need_int(struct smd_channel *ch)
+{
+ if (ch_is_open(ch)) {
+ if (ch->recv->fHEAD || ch->recv->fTAIL || ch->recv->fSTATE)
+ return 1;
+ if (ch->recv->state != ch->last_state)
+ return 1;
+ }
+ return 0;
+}
+
+void smd_sleep_exit(void)
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ int need_int = 0;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
+ if (smd_need_int(ch)) {
+ need_int = 1;
+ break;
+ }
+ }
+ list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
+ if (smd_need_int(ch)) {
+ need_int = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+ do_smd_probe();
+
+ if (need_int) {
+ if (msm_smd_debug_mask & MSM_SMD_DEBUG)
+ pr_info("smd_sleep_exit need interrupt\n");
+ tasklet_schedule(&smd_fake_irq_tasklet);
+ }
+}
+
+
+void smd_kick(smd_channel_t *ch)
+{
+ unsigned long flags;
+ unsigned tmp;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ ch->update_state(ch);
+ tmp = ch->recv->state;
+ if (tmp != ch->last_state) {
+ ch->last_state = tmp;
+ if (tmp == SMD_SS_OPENED)
+ ch->notify(ch->priv, SMD_EVENT_OPEN);
+ else
+ ch->notify(ch->priv, SMD_EVENT_CLOSE);
+ }
+ ch->notify(ch->priv, SMD_EVENT_DATA);
+ ch->notify_other_cpu();
+ spin_unlock_irqrestore(&smd_lock, flags);
+}
+
+static int smd_is_packet(int chn, unsigned type)
+{
+ type &= SMD_KIND_MASK;
+ if (type == SMD_KIND_PACKET)
+ return 1;
+ if (type == SMD_KIND_STREAM)
+ return 0;
+
+ /* older AMSS reports SMD_KIND_UNKNOWN always */
+ if ((chn > 4) || (chn == 1))
+ return 1;
+ else
+ return 0;
+}
+
+static int smd_stream_write(smd_channel_t *ch, const void *_data, int len)
+{
+ void *ptr;
+ const unsigned char *buf = _data;
+ unsigned xfer;
+ int orig_len = len;
+
+ if (len < 0)
+ return -EINVAL;
+
+ while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
+ if (!ch_is_open(ch))
+ break;
+ if (xfer > len)
+ xfer = len;
+ memcpy(ptr, buf, xfer);
+ ch_write_done(ch, xfer);
+ len -= xfer;
+ buf += xfer;
+ if (len == 0)
+ break;
+ }
+
+ ch->notify_other_cpu();
+
+ return orig_len - len;
+}
+
+static int smd_packet_write(smd_channel_t *ch, const void *_data, int len)
+{
+ unsigned hdr[5];
+
+ if (len < 0)
+ return -EINVAL;
+
+ if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
+ return -ENOMEM;
+
+ hdr[0] = len;
+ hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
+
+ smd_stream_write(ch, hdr, sizeof(hdr));
+ smd_stream_write(ch, _data, len);
+
+ return len;
+}
+
+static int smd_stream_read(smd_channel_t *ch, void *data, int len)
+{
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ r = ch_read(ch, data, len);
+ if (r > 0)
+ ch->notify_other_cpu();
+
+ return r;
+}
+
+static int smd_packet_read(smd_channel_t *ch, void *data, int len)
+{
+ unsigned long flags;
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ if (len > ch->current_packet)
+ len = ch->current_packet;
+
+ r = ch_read(ch, data, len);
+ if (r > 0)
+ ch->notify_other_cpu();
+
+ spin_lock_irqsave(&smd_lock, flags);
+ ch->current_packet -= r;
+ update_packet_state(ch);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return r;
+}
+
+static int smd_alloc_channel(const char *name, uint32_t cid, uint32_t type)
+{
+ struct smd_channel *ch;
+
+ ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
+ if (ch == 0) {
+ pr_err("smd_alloc_channel() out of memory\n");
+ return -1;
+ }
+ ch->n = cid;
+
+ if (_smd_alloc_channel(ch)) {
+ kfree(ch);
+ return -1;
+ }
+
+ ch->fifo_mask = ch->fifo_size - 1;
+ ch->type = type;
+
+ if ((type & SMD_TYPE_MASK) == SMD_TYPE_APPS_MODEM)
+ ch->notify_other_cpu = notify_modem_smd;
+ else
+ ch->notify_other_cpu = notify_dsp_smd;
+
+ if (smd_is_packet(cid, type)) {
+ ch->read = smd_packet_read;
+ ch->write = smd_packet_write;
+ ch->read_avail = smd_packet_read_avail;
+ ch->write_avail = smd_packet_write_avail;
+ ch->update_state = update_packet_state;
+ } else {
+ ch->read = smd_stream_read;
+ ch->write = smd_stream_write;
+ ch->read_avail = smd_stream_read_avail;
+ ch->write_avail = smd_stream_write_avail;
+ ch->update_state = update_stream_state;
+ }
+
+ if ((type & 0xff) == 0)
+ memcpy(ch->name, "SMD_", 4);
+ else
+ memcpy(ch->name, "DSP_", 4);
+ memcpy(ch->name + 4, name, 20);
+ ch->name[23] = 0;
+ ch->pdev.name = ch->name;
+ ch->pdev.id = -1;
+
+ pr_info("smd_alloc_channel() cid=%02d size=%05d '%s'\n",
+ ch->n, ch->fifo_size, ch->name);
+
+ mutex_lock(&smd_creation_mutex);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ mutex_unlock(&smd_creation_mutex);
+
+ platform_device_register(&ch->pdev);
+ return 0;
+}
+
+static void smd_channel_probe_worker(struct work_struct *work)
+{
+ struct smd_alloc_elm *shared;
+ unsigned ctype;
+ unsigned type;
+ unsigned n;
+
+ shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
+ if (!shared) {
+ pr_err("smd: cannot find allocation table\n");
+ return;
+ }
+ for (n = 0; n < 64; n++) {
+ if (smd_ch_allocated[n])
+ continue;
+ if (!shared[n].ref_count)
+ continue;
+ if (!shared[n].name[0])
+ continue;
+ ctype = shared[n].ctype;
+ type = ctype & SMD_TYPE_MASK;
+
+ /* DAL channels are stream but neither the modem,
+ * nor the DSP correctly indicate this. Fixup manually.
+ */
+ if (!memcmp(shared[n].name, "DAL", 3))
+ ctype = (ctype & (~SMD_KIND_MASK)) | SMD_KIND_STREAM;
+
+ type = shared[n].ctype & SMD_TYPE_MASK;
+ if ((type == SMD_TYPE_APPS_MODEM) ||
+ (type == SMD_TYPE_APPS_DSP))
+ if (!smd_alloc_channel(shared[n].name, shared[n].cid, ctype))
+ smd_ch_allocated[n] = 1;
+ }
+}
+
+static void do_nothing_notify(void *priv, unsigned flags)
+{
+}
+
+struct smd_channel *smd_get_channel(const char *name)
+{
+ struct smd_channel *ch;
+
+ mutex_lock(&smd_creation_mutex);
+ list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
+ if (!strcmp(name, ch->name)) {
+ list_del(&ch->ch_list);
+ mutex_unlock(&smd_creation_mutex);
+ return ch;
+ }
+ }
+ mutex_unlock(&smd_creation_mutex);
+
+ return NULL;
+}
+
+int smd_open(const char *name, smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned))
+{
+ struct smd_channel *ch;
+ unsigned long flags;
+
+ if (smd_initialized == 0) {
+ pr_info("smd_open() before smd_init()\n");
+ return -ENODEV;
+ }
+
+ ch = smd_get_channel(name);
+ if (!ch)
+ return -ENODEV;
+
+ if (notify == 0)
+ notify = do_nothing_notify;
+
+ ch->notify = notify;
+ ch->current_packet = 0;
+ ch->last_state = SMD_SS_CLOSED;
+ ch->priv = priv;
+
+ *_ch = ch;
+
+ spin_lock_irqsave(&smd_lock, flags);
+
+ if ((ch->type & SMD_TYPE_MASK) == SMD_TYPE_APPS_MODEM)
+ list_add(&ch->ch_list, &smd_ch_list_modem);
+ else
+ list_add(&ch->ch_list, &smd_ch_list_dsp);
+
+ /* If the remote side is CLOSING, we need to get it to
+ * move to OPENING (which we'll do by moving from CLOSED to
+ * OPENING) and then get it to move from OPENING to
+ * OPENED (by doing the same state change ourselves).
+ *
+ * Otherwise, it should be OPENING and we can move directly
+ * to OPENED so that it will follow.
+ */
+ if (ch->recv->state == SMD_SS_CLOSING) {
+ ch->send->head = 0;
+ ch_set_state(ch, SMD_SS_OPENING);
+ } else {
+ ch_set_state(ch, SMD_SS_OPENED);
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+ smd_kick(ch);
+
+ return 0;
+}
+
+int smd_close(smd_channel_t *ch)
+{
+ unsigned long flags;
+
+ pr_info("smd_close(%p)\n", ch);
+
+ if (ch == 0)
+ return -1;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ ch->notify = do_nothing_notify;
+ list_del(&ch->ch_list);
+ ch_set_state(ch, SMD_SS_CLOSED);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ mutex_lock(&smd_creation_mutex);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ mutex_unlock(&smd_creation_mutex);
+
+ return 0;
+}
+
+int smd_read(smd_channel_t *ch, void *data, int len)
+{
+ return ch->read(ch, data, len);
+}
+
+int smd_write(smd_channel_t *ch, const void *data, int len)
+{
+ return ch->write(ch, data, len);
+}
+
+int smd_write_atomic(smd_channel_t *ch, const void *data, int len)
+{
+ unsigned long flags;
+ int res;
+ spin_lock_irqsave(&smd_lock, flags);
+ res = ch->write(ch, data, len);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ return res;
+}
+
+int smd_read_avail(smd_channel_t *ch)
+{
+ return ch->read_avail(ch);
+}
+
+int smd_write_avail(smd_channel_t *ch)
+{
+ return ch->write_avail(ch);
+}
+
+int smd_wait_until_readable(smd_channel_t *ch, int bytes)
+{
+ return -1;
+}
+
+int smd_wait_until_writable(smd_channel_t *ch, int bytes)
+{
+ return -1;
+}
+
+int smd_cur_packet_size(smd_channel_t *ch)
+{
+ return ch->current_packet;
+}
+
+
+/* ------------------------------------------------------------------------- */
+
+void *smem_alloc(unsigned id, unsigned size)
+{
+ return smem_find(id, size);
+}
+
+void *smem_item(unsigned id, unsigned *size)
+{
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ struct smem_heap_entry *toc = shared->heap_toc;
+
+ if (id >= SMEM_NUM_ITEMS)
+ return 0;
+
+ if (toc[id].allocated) {
+ *size = toc[id].size;
+ return (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
+ } else {
+ *size = 0;
+ }
+
+ return 0;
+}
+
+void *smem_find(unsigned id, unsigned size_in)
+{
+ unsigned size;
+ void *ptr;
+
+ ptr = smem_item(id, &size);
+ if (!ptr)
+ return 0;
+
+ size_in = ALIGN(size_in, 8);
+ if (size_in != size) {
+ pr_err("smem_find(%d, %d): wrong size %d\n",
+ id, size_in, size);
+ return 0;
+ }
+
+ return ptr;
+}
+
+static irqreturn_t smsm_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ unsigned apps, modm;
+
+ spin_lock_irqsave(&smem_lock, flags);
+
+ apps = raw_smsm_get_state(SMSM_STATE_APPS);
+ modm = raw_smsm_get_state(SMSM_STATE_MODEM);
+
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
+ pr_info("<SM %08x %08x>\n", apps, modm);
+ if (modm & SMSM_RESET)
+ handle_modem_crash();
+
+ do_smd_probe();
+
+ spin_unlock_irqrestore(&smem_lock, flags);
+ return IRQ_HANDLED;
+}
+
+int smsm_change_state(enum smsm_state_item item,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ unsigned long addr = smd_info.state + item * 4;
+ unsigned long flags;
+ unsigned state;
+
+ if (!smd_info.ready)
+ return -EIO;
+
+ spin_lock_irqsave(&smem_lock, flags);
+
+ if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET)
+ handle_modem_crash();
+
+ state = (readl(addr) & ~clear_mask) | set_mask;
+ writel(state, addr);
+
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
+ pr_info("smsm_change_state %d %x\n", item, state);
+ notify_other_smsm();
+
+ spin_unlock_irqrestore(&smem_lock, flags);
+
+ return 0;
+}
+
+uint32_t smsm_get_state(enum smsm_state_item item)
+{
+ unsigned long flags;
+ uint32_t rv;
+
+ spin_lock_irqsave(&smem_lock, flags);
+
+ rv = readl(smd_info.state + item * 4);
+
+ if (item == SMSM_STATE_MODEM && (rv & SMSM_RESET))
+ handle_modem_crash();
+
+ spin_unlock_irqrestore(&smem_lock, flags);
+
+ return rv;
+}
+
+#ifdef CONFIG_ARCH_MSM_SCORPION
+
+int smsm_set_sleep_duration(uint32_t delay)
+{
+ struct msm_dem_slave_data *ptr;
+
+ ptr = smem_find(SMEM_APPS_DEM_SLAVE_DATA, sizeof(*ptr));
+ if (ptr == NULL) {
+ pr_err("smsm_set_sleep_duration <SM NO APPS_DEM_SLAVE_DATA>\n");
+ return -EIO;
+ }
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
+ pr_info("smsm_set_sleep_duration %d -> %d\n",
+ ptr->sleep_time, delay);
+ ptr->sleep_time = delay;
+ return 0;
+}
+
+#else
+
+int smsm_set_sleep_duration(uint32_t delay)
+{
+ uint32_t *ptr;
+
+ ptr = smem_find(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
+ if (ptr == NULL) {
+ pr_err("smsm_set_sleep_duration <SM NO SLEEP_DELAY>\n");
+ return -EIO;
+ }
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
+ pr_info("smsm_set_sleep_duration %d -> %d\n",
+ *ptr, delay);
+ *ptr = delay;
+ return 0;
+}
+
+#endif
+
+int smd_core_init(void)
+{
+ int r;
+ pr_info("smd_core_init()\n");
+
+ /* wait for essential items to be initialized */
+ for (;;) {
+ unsigned size;
+ void *state;
+ state = smem_item(SMEM_SMSM_SHARED_STATE, &size);
+ if (size == SMSM_V1_SIZE || size == SMSM_V2_SIZE) {
+ smd_info.state = (unsigned)state;
+ break;
+ }
+ }
+
+ smd_info.ready = 1;
+
+ r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
+ IRQF_TRIGGER_RISING, "smd_dev", 0);
+ if (r < 0)
+ return r;
+ r = enable_irq_wake(INT_A9_M2A_0);
+ if (r < 0)
+ pr_err("smd_core_init: enable_irq_wake failed for A9_M2A_0\n");
+
+ r = request_irq(INT_A9_M2A_5, smsm_irq_handler,
+ IRQF_TRIGGER_RISING, "smsm_dev", 0);
+ if (r < 0) {
+ free_irq(INT_A9_M2A_0, 0);
+ return r;
+ }
+ r = enable_irq_wake(INT_A9_M2A_5);
+ if (r < 0)
+ pr_err("smd_core_init: enable_irq_wake failed for A9_M2A_5\n");
+
+#if defined(CONFIG_QDSP6)
+ r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
+ IRQF_TRIGGER_RISING, "smd_dsp", 0);
+ if (r < 0) {
+ free_irq(INT_A9_M2A_0, 0);
+ free_irq(INT_A9_M2A_5, 0);
+ return r;
+ }
+#endif
+
+ /* check for any SMD channels that may already exist */
+ do_smd_probe();
+
+ /* indicate that we're up and running */
+ smsm_change_state(SMSM_STATE_APPS,
+ ~0, SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT | SMSM_RUN);
+#ifdef CONFIG_ARCH_MSM_SCORPION
+ smsm_change_state(SMSM_STATE_APPS_DEM, ~0, 0);
+#endif
+
+ pr_info("smd_core_init() done\n");
+
+ return 0;
+}
+
+static int __init msm_smd_probe(struct platform_device *pdev)
+{
+ pr_info("smd_init()\n");
+
+ /*
+ * If we haven't waited for the ARM9 to boot up till now,
+ * then we need to wait here. Otherwise this should just
+ * return immediately.
+ */
+ proc_comm_boot_wait();
+
+ INIT_WORK(&probe_work, smd_channel_probe_worker);
+
+ if (smd_core_init()) {
+ pr_err("smd_core_init() failed\n");
+ return -1;
+ }
+
+ do_smd_probe();
+
+ msm_check_for_modem_crash = check_for_modem_crash;
+
+ msm_init_last_radio_log(THIS_MODULE);
+
+ smd_initialized = 1;
+
+ return 0;
+}
+
+static struct platform_driver msm_smd_driver = {
+ .probe = msm_smd_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_smd_init(void)
+{
+ return platform_driver_register(&msm_smd_driver);
+}
+
+module_init(msm_smd_init);
+
+MODULE_DESCRIPTION("MSM Shared Memory Core");
+MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
new file mode 100644
index 000000000000..3b2dd717b788
--- /dev/null
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -0,0 +1,315 @@
+/* arch/arm/mach-msm/smd_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+#include <mach/msm_iomap.h>
+
+#include "smd_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+static char *chstate(unsigned n)
+{
+ switch (n) {
+ case SMD_SS_CLOSED:
+ return "CLOSED";
+ case SMD_SS_OPENING:
+ return "OPENING";
+ case SMD_SS_OPENED:
+ return "OPENED";
+ case SMD_SS_FLUSHING:
+ return "FLUSHING";
+ case SMD_SS_CLOSING:
+ return "CLOSING";
+ case SMD_SS_RESET:
+ return "RESET";
+ case SMD_SS_RESET_OPENING:
+ return "ROPENING";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+
+static int dump_ch(char *buf, int max, struct smd_channel *ch)
+{
+ volatile struct smd_half_channel *s = ch->send;
+ volatile struct smd_half_channel *r = ch->recv;
+
+ return scnprintf(
+ buf, max,
+ "ch%02d:"
+ " %8s(%05d/%05d) %c%c%c%c%c%c%c <->"
+ " %8s(%05d/%05d) %c%c%c%c%c%c%c '%s'\n", ch->n,
+ chstate(s->state), s->tail, s->head,
+ s->fDSR ? 'D' : 'd',
+ s->fCTS ? 'C' : 'c',
+ s->fCD ? 'C' : 'c',
+ s->fRI ? 'I' : 'i',
+ s->fHEAD ? 'W' : 'w',
+ s->fTAIL ? 'R' : 'r',
+ s->fSTATE ? 'S' : 's',
+ chstate(r->state), r->tail, r->head,
+ r->fDSR ? 'D' : 'd',
+ r->fCTS ? 'R' : 'r',
+ r->fCD ? 'C' : 'c',
+ r->fRI ? 'I' : 'i',
+ r->fHEAD ? 'W' : 'w',
+ r->fTAIL ? 'R' : 'r',
+ r->fSTATE ? 'S' : 's',
+ ch->name
+ );
+}
+
+static int debug_read_stat(char *buf, int max)
+{
+ char *msg;
+ int i = 0;
+
+ msg = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
+
+ if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET)
+ i += scnprintf(buf + i, max - i,
+ "smsm: ARM9 HAS CRASHED\n");
+
+ i += scnprintf(buf + i, max - i, "smsm: a9: %08x a11: %08x\n",
+ raw_smsm_get_state(SMSM_STATE_MODEM),
+ raw_smsm_get_state(SMSM_STATE_APPS));
+#ifdef CONFIG_ARCH_MSM_SCORPION
+ i += scnprintf(buf + i, max - i, "smsm dem: apps: %08x modem: %08x "
+ "qdsp6: %08x power: %08x time: %08x\n",
+ raw_smsm_get_state(SMSM_STATE_APPS_DEM),
+ raw_smsm_get_state(SMSM_STATE_MODEM_DEM),
+ raw_smsm_get_state(SMSM_STATE_QDSP6_DEM),
+ raw_smsm_get_state(SMSM_STATE_POWER_MASTER_DEM),
+ raw_smsm_get_state(SMSM_STATE_TIME_MASTER_DEM));
+#endif
+ if (msg) {
+ msg[SZ_DIAG_ERR_MSG - 1] = 0;
+ i += scnprintf(buf + i, max - i, "diag: '%s'\n", msg);
+ }
+ return i;
+}
+
+static int debug_read_mem(char *buf, int max)
+{
+ unsigned n;
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ struct smem_heap_entry *toc = shared->heap_toc;
+ int i = 0;
+
+ i += scnprintf(buf + i, max - i,
+ "heap: init=%d free=%d remain=%d\n",
+ shared->heap_info.initialized,
+ shared->heap_info.free_offset,
+ shared->heap_info.heap_remaining);
+
+ for (n = 0; n < SMEM_NUM_ITEMS; n++) {
+ if (toc[n].allocated == 0)
+ continue;
+ i += scnprintf(buf + i, max - i,
+ "%04d: offset %08x size %08x\n",
+ n, toc[n].offset, toc[n].size);
+ }
+ return i;
+}
+
+static int debug_read_ch(char *buf, int max)
+{
+ struct smd_channel *ch;
+ unsigned long flags;
+ int i = 0;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry(ch, &smd_ch_list_dsp, ch_list)
+ i += dump_ch(buf + i, max - i, ch);
+ list_for_each_entry(ch, &smd_ch_list_modem, ch_list)
+ i += dump_ch(buf + i, max - i, ch);
+ list_for_each_entry(ch, &smd_ch_closed_list, ch_list)
+ i += dump_ch(buf + i, max - i, ch);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return i;
+}
+
+static int debug_read_version(char *buf, int max)
+{
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ unsigned version = shared->version[VERSION_MODEM];
+ return sprintf(buf, "%d.%d\n", version >> 16, version & 0xffff);
+}
+
+static int debug_read_build_id(char *buf, int max)
+{
+ unsigned size;
+ void *data;
+
+ data = smem_item(SMEM_HW_SW_BUILD_ID, &size);
+ if (!data)
+ return 0;
+
+ if (size >= max)
+ size = max;
+ memcpy(buf, data, size);
+
+ return size;
+}
+
+static int debug_read_alloc_tbl(char *buf, int max)
+{
+ struct smd_alloc_elm *shared;
+ int n, i = 0;
+
+ shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
+
+ for (n = 0; n < 64; n++) {
+ if (shared[n].ref_count == 0)
+ continue;
+ i += scnprintf(buf + i, max - i,
+ "%03d: %-20s cid=%02d type=%03d "
+ "kind=%02d ref_count=%d\n",
+ n, shared[n].name, shared[n].cid,
+ shared[n].ctype & 0xff,
+ (shared[n].ctype >> 8) & 0xf,
+ shared[n].ref_count);
+ }
+
+ return i;
+}
+
+#define DEBUG_BUFMAX 4096
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *buf, int max) = file->private_data;
+ int bsize = fill(debug_buffer, DEBUG_BUFMAX);
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fill, &debug_ops);
+}
+
+static int smd_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smd", 0);
+ if (IS_ERR(dent))
+ return 1;
+
+ debug_create("ch", 0444, dent, debug_read_ch);
+ debug_create("stat", 0444, dent, debug_read_stat);
+ debug_create("mem", 0444, dent, debug_read_mem);
+ debug_create("version", 0444, dent, debug_read_version);
+ debug_create("tbl", 0444, dent, debug_read_alloc_tbl);
+ debug_create("build", 0444, dent, debug_read_build_id);
+
+ return 0;
+}
+
+late_initcall(smd_debugfs_init);
+#endif
+
+
+#define MAX_NUM_SLEEP_CLIENTS 64
+#define MAX_SLEEP_NAME_LEN 8
+
+#define NUM_GPIO_INT_REGISTERS 6
+#define GPIO_SMEM_NUM_GROUPS 2
+#define GPIO_SMEM_MAX_PC_INTERRUPTS 8
+
+struct tramp_gpio_save {
+ unsigned int enable;
+ unsigned int detect;
+ unsigned int polarity;
+};
+
+struct tramp_gpio_smem {
+ uint16_t num_fired[GPIO_SMEM_NUM_GROUPS];
+ uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS];
+ uint32_t enabled[NUM_GPIO_INT_REGISTERS];
+ uint32_t detection[NUM_GPIO_INT_REGISTERS];
+ uint32_t polarity[NUM_GPIO_INT_REGISTERS];
+};
+
+
+void smsm_print_sleep_info(void)
+{
+ unsigned long flags;
+ uint32_t *ptr;
+ struct tramp_gpio_smem *gpio;
+ struct smsm_interrupt_info *int_info;
+
+
+ spin_lock_irqsave(&smem_lock, flags);
+
+ ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
+ if (ptr)
+ pr_info("SMEM_SMSM_SLEEP_DELAY: %x\n", *ptr);
+
+ ptr = smem_alloc(SMEM_SMSM_LIMIT_SLEEP, sizeof(*ptr));
+ if (ptr)
+ pr_info("SMEM_SMSM_LIMIT_SLEEP: %x\n", *ptr);
+
+ ptr = smem_alloc(SMEM_SLEEP_POWER_COLLAPSE_DISABLED, sizeof(*ptr));
+ if (ptr)
+ pr_info("SMEM_SLEEP_POWER_COLLAPSE_DISABLED: %x\n", *ptr);
+
+#ifndef CONFIG_ARCH_MSM_SCORPION
+ int_info = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*int_info));
+ if (int_info)
+ pr_info("SMEM_SMSM_INT_INFO %x %x %x\n",
+ int_info->interrupt_mask,
+ int_info->pending_interrupts,
+ int_info->wakeup_reason);
+
+ gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*gpio));
+ if (gpio) {
+ int i;
+ for (i = 0; i < NUM_GPIO_INT_REGISTERS; i++)
+ pr_info("SMEM_GPIO_INT: %d: e %x d %x p %x\n",
+ i, gpio->enabled[i], gpio->detection[i],
+ gpio->polarity[i]);
+
+ for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
+ pr_info("SMEM_GPIO_INT: %d: f %d: %d %d...\n",
+ i, gpio->num_fired[i], gpio->fired[i][0],
+ gpio->fired[i][1]);
+ }
+#else
+#endif
+ spin_unlock_irqrestore(&smem_lock, flags);
+}
+
diff --git a/arch/arm/mach-msm/smd_private.h b/arch/arm/mach-msm/smd_private.h
new file mode 100644
index 000000000000..727bfe68aa9b
--- /dev/null
+++ b/arch/arm/mach-msm/smd_private.h
@@ -0,0 +1,403 @@
+/* arch/arm/mach-msm/smd_private.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007 QUALCOMM Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/io.h>
+
+#include <mach/msm_iomap.h>
+
+struct smem_heap_info {
+ unsigned initialized;
+ unsigned free_offset;
+ unsigned heap_remaining;
+ unsigned reserved;
+};
+
+struct smem_heap_entry {
+ unsigned allocated;
+ unsigned offset;
+ unsigned size;
+ unsigned reserved;
+};
+
+struct smem_proc_comm {
+ unsigned command;
+ unsigned status;
+ unsigned data1;
+ unsigned data2;
+};
+
+#define PC_APPS 0
+#define PC_MODEM 1
+
+#define VERSION_SMD 0
+#define VERSION_QDSP6 4
+#define VERSION_APPS_SBL 6
+#define VERSION_MODEM_SBL 7
+#define VERSION_APPS 8
+#define VERSION_MODEM 9
+
+struct smem_shared {
+ struct smem_proc_comm proc_comm[4];
+ unsigned version[32];
+ struct smem_heap_info heap_info;
+ struct smem_heap_entry heap_toc[512];
+};
+
+#define SMSM_V1_SIZE (sizeof(unsigned) * 8)
+#define SMSM_V2_SIZE (sizeof(unsigned) * 4)
+
+#ifdef CONFIG_MSM_SMD_PKG3
+struct smsm_interrupt_info {
+ uint32_t interrupt_mask;
+ uint32_t pending_interrupts;
+ uint32_t wakeup_reason;
+};
+#else
+#define DEM_MAX_PORT_NAME_LEN (20)
+struct msm_dem_slave_data {
+ uint32_t sleep_time;
+ uint32_t interrupt_mask;
+ uint32_t resources_used;
+ uint32_t reserved1;
+
+ uint32_t wakeup_reason;
+ uint32_t pending_interrupts;
+ uint32_t rpc_prog;
+ uint32_t rpc_proc;
+ char smd_port_name[DEM_MAX_PORT_NAME_LEN];
+ uint32_t reserved2;
+};
+#endif
+
+#define SZ_DIAG_ERR_MSG 0xC8
+#define ID_DIAG_ERR_MSG SMEM_DIAG_ERR_MESSAGE
+#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
+#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
+#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
+
+#define SMSM_INIT 0x00000001
+#define SMSM_SMDINIT 0x00000008
+#define SMSM_RPCINIT 0x00000020
+#define SMSM_RESET 0x00000040
+#define SMSM_RSA 0x00000080
+#define SMSM_RUN 0x00000100
+#define SMSM_PWRC 0x00000200
+#define SMSM_TIMEWAIT 0x00000400
+#define SMSM_TIMEINIT 0x00000800
+#define SMSM_PWRC_EARLY_EXIT 0x00001000
+#define SMSM_WFPI 0x00002000
+#define SMSM_SLEEP 0x00004000
+#define SMSM_SLEEPEXIT 0x00008000
+#define SMSM_APPS_REBOOT 0x00020000
+#define SMSM_SYSTEM_POWER_DOWN 0x00040000
+#define SMSM_SYSTEM_REBOOT 0x00080000
+#define SMSM_SYSTEM_DOWNLOAD 0x00100000
+#define SMSM_PWRC_SUSPEND 0x00200000
+#define SMSM_APPS_SHUTDOWN 0x00400000
+#define SMSM_SMD_LOOPBACK 0x00800000
+#define SMSM_RUN_QUIET 0x01000000
+#define SMSM_MODEM_WAIT 0x02000000
+#define SMSM_MODEM_BREAK 0x04000000
+#define SMSM_MODEM_CONTINUE 0x08000000
+#define SMSM_UNKNOWN 0x80000000
+
+#define SMSM_WKUP_REASON_RPC 0x00000001
+#define SMSM_WKUP_REASON_INT 0x00000002
+#define SMSM_WKUP_REASON_GPIO 0x00000004
+#define SMSM_WKUP_REASON_TIMER 0x00000008
+#define SMSM_WKUP_REASON_ALARM 0x00000010
+#define SMSM_WKUP_REASON_RESET 0x00000020
+
+#ifdef CONFIG_ARCH_MSM7X00A
+enum smsm_state_item {
+ SMSM_STATE_APPS = 1,
+ SMSM_STATE_MODEM = 3,
+ SMSM_STATE_COUNT,
+};
+#else
+enum smsm_state_item {
+ SMSM_STATE_APPS,
+ SMSM_STATE_MODEM,
+ SMSM_STATE_HEXAGON,
+ SMSM_STATE_APPS_DEM,
+ SMSM_STATE_MODEM_DEM,
+ SMSM_STATE_QDSP6_DEM,
+ SMSM_STATE_POWER_MASTER_DEM,
+ SMSM_STATE_TIME_MASTER_DEM,
+ SMSM_STATE_COUNT,
+};
+#endif
+
+void *smem_alloc(unsigned id, unsigned size);
+int smsm_change_state(enum smsm_state_item item, uint32_t clear_mask, uint32_t set_mask);
+uint32_t smsm_get_state(enum smsm_state_item item);
+int smsm_set_sleep_duration(uint32_t delay);
+void smsm_print_sleep_info(void);
+
+#define SMEM_NUM_SMD_CHANNELS 64
+
+typedef enum {
+ /* fixed items */
+ SMEM_PROC_COMM = 0,
+ SMEM_HEAP_INFO,
+ SMEM_ALLOCATION_TABLE,
+ SMEM_VERSION_INFO,
+ SMEM_HW_RESET_DETECT,
+ SMEM_AARM_WARM_BOOT,
+ SMEM_DIAG_ERR_MESSAGE,
+ SMEM_SPINLOCK_ARRAY,
+ SMEM_MEMORY_BARRIER_LOCATION,
+
+ /* dynamic items */
+ SMEM_AARM_PARTITION_TABLE,
+ SMEM_AARM_BAD_BLOCK_TABLE,
+ SMEM_RESERVE_BAD_BLOCKS,
+ SMEM_WM_UUID,
+ SMEM_CHANNEL_ALLOC_TBL,
+ SMEM_SMD_BASE_ID,
+ SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_CHANNELS,
+ SMEM_SMEM_LOG_EVENTS,
+ SMEM_SMEM_STATIC_LOG_IDX,
+ SMEM_SMEM_STATIC_LOG_EVENTS,
+ SMEM_SMEM_SLOW_CLOCK_SYNC,
+ SMEM_SMEM_SLOW_CLOCK_VALUE,
+ SMEM_BIO_LED_BUF,
+ SMEM_SMSM_SHARED_STATE,
+ SMEM_SMSM_INT_INFO,
+ SMEM_SMSM_SLEEP_DELAY,
+ SMEM_SMSM_LIMIT_SLEEP,
+ SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
+ SMEM_KEYPAD_KEYS_PRESSED,
+ SMEM_KEYPAD_STATE_UPDATED,
+ SMEM_KEYPAD_STATE_IDX,
+ SMEM_GPIO_INT,
+ SMEM_MDDI_LCD_IDX,
+ SMEM_MDDI_HOST_DRIVER_STATE,
+ SMEM_MDDI_LCD_DISP_STATE,
+ SMEM_LCD_CUR_PANEL,
+ SMEM_MARM_BOOT_SEGMENT_INFO,
+ SMEM_AARM_BOOT_SEGMENT_INFO,
+ SMEM_SLEEP_STATIC,
+ SMEM_SCORPION_FREQUENCY,
+ SMEM_SMD_PROFILES,
+ SMEM_TSSC_BUSY,
+ SMEM_HS_SUSPEND_FILTER_INFO,
+ SMEM_BATT_INFO,
+ SMEM_APPS_BOOT_MODE,
+ SMEM_VERSION_FIRST,
+ SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
+ SMEM_OSS_RRCASN1_BUF1,
+ SMEM_OSS_RRCASN1_BUF2,
+ SMEM_ID_VENDOR0,
+ SMEM_ID_VENDOR1,
+ SMEM_ID_VENDOR2,
+ SMEM_HW_SW_BUILD_ID,
+ SMEM_SMD_BLOCK_PORT_BASE_ID,
+ SMEM_SMD_BLOCK_PORT_PROC0_HEAP = SMEM_SMD_BLOCK_PORT_BASE_ID + SMEM_NUM_SMD_CHANNELS,
+ SMEM_SMD_BLOCK_PORT_PROC1_HEAP = SMEM_SMD_BLOCK_PORT_PROC0_HEAP + SMEM_NUM_SMD_CHANNELS,
+ SMEM_I2C_MUTEX = SMEM_SMD_BLOCK_PORT_PROC1_HEAP + SMEM_NUM_SMD_CHANNELS,
+ SMEM_SCLK_CONVERSION,
+ SMEM_SMD_SMSM_INTR_MUX,
+ SMEM_SMSM_CPU_INTR_MASK,
+ SMEM_APPS_DEM_SLAVE_DATA,
+ SMEM_QDSP6_DEM_SLAVE_DATA,
+ SMEM_CLKREGIM_BSP,
+ SMEM_CLKREGIM_SOURCES,
+ SMEM_SMD_FIFO_BASE_ID,
+ SMEM_USABLE_RAM_PARTITION_TABLE = SMEM_SMD_FIFO_BASE_ID + SMEM_NUM_SMD_CHANNELS,
+ SMEM_POWER_ON_STATUS_INFO,
+ SMEM_DAL_AREA,
+ SMEM_SMEM_LOG_POWER_IDX,
+ SMEM_SMEM_LOG_POWER_WRAP,
+ SMEM_SMEM_LOG_POWER_EVENTS,
+ SMEM_ERR_CRASH_LOG,
+ SMEM_ERR_F3_TRACE_LOG,
+ SMEM_NUM_ITEMS,
+} smem_mem_type;
+
+
+#define SMD_SS_CLOSED 0x00000000
+#define SMD_SS_OPENING 0x00000001
+#define SMD_SS_OPENED 0x00000002
+#define SMD_SS_FLUSHING 0x00000003
+#define SMD_SS_CLOSING 0x00000004
+#define SMD_SS_RESET 0x00000005
+#define SMD_SS_RESET_OPENING 0x00000006
+
+#define SMD_BUF_SIZE 8192
+#define SMD_CHANNELS 64
+
+#define SMD_HEADER_SIZE 20
+
+struct smd_alloc_elm {
+ char name[20];
+ uint32_t cid;
+ uint32_t ctype;
+ uint32_t ref_count;
+};
+
+struct smd_half_channel {
+ unsigned state;
+ unsigned char fDSR;
+ unsigned char fCTS;
+ unsigned char fCD;
+ unsigned char fRI;
+ unsigned char fHEAD;
+ unsigned char fTAIL;
+ unsigned char fSTATE;
+ unsigned char fUNUSED;
+ unsigned tail;
+ unsigned head;
+} __attribute__(( aligned(4), packed ));
+
+/* Only used on SMD package v3 on msm7201a */
+struct smd_shared_v1 {
+ struct smd_half_channel ch0;
+ unsigned char data0[SMD_BUF_SIZE];
+ struct smd_half_channel ch1;
+ unsigned char data1[SMD_BUF_SIZE];
+};
+
+/* Used on SMD package v4 */
+struct smd_shared_v2 {
+ struct smd_half_channel ch0;
+ struct smd_half_channel ch1;
+};
+
+struct smd_channel {
+ volatile struct smd_half_channel *send;
+ volatile struct smd_half_channel *recv;
+ unsigned char *send_data;
+ unsigned char *recv_data;
+
+ unsigned fifo_mask;
+ unsigned fifo_size;
+ unsigned current_packet;
+ unsigned n;
+
+ struct list_head ch_list;
+
+ void *priv;
+ void (*notify)(void *priv, unsigned flags);
+
+ int (*read)(struct smd_channel *ch, void *data, int len);
+ int (*write)(struct smd_channel *ch, const void *data, int len);
+ int (*read_avail)(struct smd_channel *ch);
+ int (*write_avail)(struct smd_channel *ch);
+
+ void (*update_state)(struct smd_channel *ch);
+ unsigned last_state;
+ void (*notify_other_cpu)(void);
+ unsigned type;
+
+ char name[32];
+ struct platform_device pdev;
+};
+
+#define SMD_TYPE_MASK 0x0FF
+#define SMD_TYPE_APPS_MODEM 0x000
+#define SMD_TYPE_APPS_DSP 0x001
+#define SMD_TYPE_MODEM_DSP 0x002
+
+#define SMD_KIND_MASK 0xF00
+#define SMD_KIND_UNKNOWN 0x000
+#define SMD_KIND_STREAM 0x100
+#define SMD_KIND_PACKET 0x200
+
+extern struct list_head smd_ch_closed_list;
+extern struct list_head smd_ch_list_modem;
+extern struct list_head smd_ch_list_dsp;
+
+extern spinlock_t smd_lock;
+extern spinlock_t smem_lock;
+
+void *smem_find(unsigned id, unsigned size);
+void *smem_item(unsigned id, unsigned *size);
+uint32_t raw_smsm_get_state(enum smsm_state_item item);
+
+extern void msm_init_last_radio_log(struct module *);
+
+#ifdef CONFIG_MSM_SMD_PKG3
+/*
+ * This allocator assumes an SMD Package v3 which only exists on
+ * MSM7x00 SoC's.
+ */
+static inline int _smd_alloc_channel(struct smd_channel *ch)
+{
+ struct smd_shared_v1 *shared1;
+
+ shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
+ if (!shared1) {
+ pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
+ return -1;
+ }
+ ch->send = &shared1->ch0;
+ ch->recv = &shared1->ch1;
+ ch->send_data = shared1->data0;
+ ch->recv_data = shared1->data1;
+ ch->fifo_size = SMD_BUF_SIZE;
+ return 0;
+}
+#else
+/*
+ * This allocator assumes an SMD Package v4, the most common
+ * and the default.
+ */
+static inline int _smd_alloc_channel(struct smd_channel *ch)
+{
+ struct smd_shared_v2 *shared2;
+ void *buffer;
+ unsigned buffer_sz;
+
+ shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2));
+ buffer = smem_item(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
+
+ if (!buffer)
+ return -1;
+
+ /* buffer must be a power-of-two size */
+ if (buffer_sz & (buffer_sz - 1))
+ return -1;
+
+ buffer_sz /= 2;
+ ch->send = &shared2->ch0;
+ ch->recv = &shared2->ch1;
+ ch->send_data = buffer;
+ ch->recv_data = buffer + buffer_sz;
+ ch->fifo_size = buffer_sz;
+ return 0;
+}
+#endif /* CONFIG_MSM_SMD_PKG3 */
+
+#if defined(CONFIG_ARCH_MSM7X30)
+static inline void msm_a2m_int(uint32_t irq)
+{
+ writel(1 << irq, MSM_GCC_BASE + 0x8);
+}
+#else
+static inline void msm_a2m_int(uint32_t irq)
+{
+ writel(1, MSM_CSR_BASE + 0x400 + (irq * 4));
+}
+#endif /* CONFIG_ARCH_MSM7X30 */
+
+
+#endif
diff --git a/arch/arm/mach-msm/vreg.c b/arch/arm/mach-msm/vreg.c
index fcb0b9f25684..a9103bc6615f 100644
--- a/arch/arm/mach-msm/vreg.c
+++ b/arch/arm/mach-msm/vreg.c
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/vreg.c
*
* Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -18,6 +19,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/debugfs.h>
+#include <linux/string.h>
#include <mach/vreg.h>
#include "proc_comm.h"
@@ -25,42 +27,62 @@
struct vreg {
const char *name;
unsigned id;
+ int status;
+ unsigned refcnt;
};
-#define VREG(_name, _id) { .name = _name, .id = _id, }
+#define VREG(_name, _id, _status, _refcnt) \
+ { .name = _name, .id = _id, .status = _status, .refcnt = _refcnt }
static struct vreg vregs[] = {
- VREG("msma", 0),
- VREG("msmp", 1),
- VREG("msme1", 2),
- VREG("msmc1", 3),
- VREG("msmc2", 4),
- VREG("gp3", 5),
- VREG("msme2", 6),
- VREG("gp4", 7),
- VREG("gp1", 8),
- VREG("tcxo", 9),
- VREG("pa", 10),
- VREG("rftx", 11),
- VREG("rfrx1", 12),
- VREG("rfrx2", 13),
- VREG("synt", 14),
- VREG("wlan", 15),
- VREG("usb", 16),
- VREG("boost", 17),
- VREG("mmc", 18),
- VREG("ruim", 19),
- VREG("msmc0", 20),
- VREG("gp2", 21),
- VREG("gp5", 22),
- VREG("gp6", 23),
- VREG("rf", 24),
- VREG("rf_vco", 26),
- VREG("mpll", 27),
- VREG("s2", 28),
- VREG("s3", 29),
- VREG("rfubm", 30),
- VREG("ncp", 31),
+ VREG("msma", 0, 0, 0),
+ VREG("msmp", 1, 0, 0),
+ VREG("msme1", 2, 0, 0),
+ VREG("msmc1", 3, 0, 0),
+ VREG("msmc2", 4, 0, 0),
+ VREG("gp3", 5, 0, 0),
+ VREG("msme2", 6, 0, 0),
+ VREG("gp4", 7, 0, 0),
+ VREG("gp1", 8, 0, 0),
+ VREG("tcxo", 9, 0, 0),
+ VREG("pa", 10, 0, 0),
+ VREG("rftx", 11, 0, 0),
+ VREG("rfrx1", 12, 0, 0),
+ VREG("rfrx2", 13, 0, 0),
+ VREG("synt", 14, 0, 0),
+ VREG("wlan", 15, 0, 0),
+ VREG("usb", 16, 0, 0),
+ VREG("boost", 17, 0, 0),
+ VREG("mmc", 18, 0, 0),
+ VREG("ruim", 19, 0, 0),
+ VREG("msmc0", 20, 0, 0),
+ VREG("gp2", 21, 0, 0),
+ VREG("gp5", 22, 0, 0),
+ VREG("gp6", 23, 0, 0),
+ VREG("rf", 24, 0, 0),
+ VREG("rf_vco", 26, 0, 0),
+ VREG("mpll", 27, 0, 0),
+ VREG("s2", 28, 0, 0),
+ VREG("s3", 29, 0, 0),
+ VREG("rfubm", 30, 0, 0),
+ VREG("ncp", 31, 0, 0),
+ VREG("gp7", 32, 0, 0),
+ VREG("gp8", 33, 0, 0),
+ VREG("gp9", 34, 0, 0),
+ VREG("gp10", 35, 0, 0),
+ VREG("gp11", 36, 0, 0),
+ VREG("gp12", 37, 0, 0),
+ VREG("gp13", 38, 0, 0),
+ VREG("gp14", 39, 0, 0),
+ VREG("gp15", 40, 0, 0),
+ VREG("gp16", 41, 0, 0),
+ VREG("gp17", 42, 0, 0),
+ VREG("s4", 43, 0, 0),
+ VREG("usb2", 44, 0, 0),
+ VREG("wlan2", 45, 0, 0),
+ VREG("xo_out", 46, 0, 0),
+ VREG("lvsw0", 47, 0, 0),
+ VREG("lvsw1", 48, 0, 0),
};
struct vreg *vreg_get(struct device *dev, const char *id)
@@ -70,7 +92,7 @@ struct vreg *vreg_get(struct device *dev, const char *id)
if (!strcmp(vregs[n].name, id))
return vregs + n;
}
- return 0;
+ return ERR_PTR(-ENOENT);
}
void vreg_put(struct vreg *vreg)
@@ -81,20 +103,39 @@ int vreg_enable(struct vreg *vreg)
{
unsigned id = vreg->id;
unsigned enable = 1;
- return msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if (vreg->refcnt == 0)
+ vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if ((vreg->refcnt < UINT_MAX) && (!vreg->status))
+ vreg->refcnt++;
+
+ return vreg->status;
}
-void vreg_disable(struct vreg *vreg)
+int vreg_disable(struct vreg *vreg)
{
unsigned id = vreg->id;
unsigned enable = 0;
- msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if (!vreg->refcnt)
+ return 0;
+
+ if (vreg->refcnt == 1)
+ vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if (!vreg->status)
+ vreg->refcnt--;
+
+ return vreg->status;
}
int vreg_set_level(struct vreg *vreg, unsigned mv)
{
unsigned id = vreg->id;
- return msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv);
+
+ vreg->status = msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv);
+ return vreg->status;
}
#if defined(CONFIG_DEBUG_FS)
@@ -118,24 +159,59 @@ static int vreg_debug_set(void *data, u64 val)
static int vreg_debug_get(void *data, u64 *val)
{
- return -ENOSYS;
+ struct vreg *vreg = data;
+
+ if (!vreg->status)
+ *val = 0;
+ else
+ *val = 1;
+
+ return 0;
+}
+
+static int vreg_debug_count_set(void *data, u64 val)
+{
+ struct vreg *vreg = data;
+ if (val > UINT_MAX)
+ val = UINT_MAX;
+ vreg->refcnt = val;
+ return 0;
+}
+
+static int vreg_debug_count_get(void *data, u64 *val)
+{
+ struct vreg *vreg = data;
+
+ *val = vreg->refcnt;
+
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(vreg_fops, vreg_debug_get, vreg_debug_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(vreg_count_fops, vreg_debug_count_get,
+ vreg_debug_count_set, "%llu\n");
static int __init vreg_debug_init(void)
{
struct dentry *dent;
int n;
+ char name[32];
+ const char *refcnt_name = "_refcnt";
dent = debugfs_create_dir("vreg", 0);
if (IS_ERR(dent))
return 0;
- for (n = 0; n < ARRAY_SIZE(vregs); n++)
+ for (n = 0; n < ARRAY_SIZE(vregs); n++) {
(void) debugfs_create_file(vregs[n].name, 0644,
dent, vregs + n, &vreg_fops);
+ strlcpy(name, vregs[n].name, sizeof(name));
+ strlcat(name, refcnt_name, sizeof(name));
+ (void) debugfs_create_file(name, 0644,
+ dent, vregs + n, &vreg_count_fops);
+ }
+
return 0;
}
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index 170f68e46dd5..344753fdf25e 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -82,6 +82,7 @@ config MACH_MX31MOBOARD
config MACH_MX31LILLY
bool "Support MX31 LILLY-1131 platforms (INCO startec)"
select ARCH_MX31
+ select MXC_ULPI if USB_ULPI
help
Include support for mx31 based LILLY1131 modules. This includes
specific configurations for the board and its peripherals.
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c
index f8911154a9fa..1ffed283d6cd 100644
--- a/arch/arm/mach-mx3/devices.c
+++ b/arch/arm/mach-mx3/devices.c
@@ -588,6 +588,25 @@ struct platform_device imx_wdt_device0 = {
.resource = imx_wdt_resources,
};
+static struct resource imx_rtc_resources[] = {
+ {
+ .start = MX31_RTC_BASE_ADDR,
+ .end = MX31_RTC_BASE_ADDR + 0x3fff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MX31_INT_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device imx_rtc_device0 = {
+ .name = "mxc_rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(imx_rtc_resources),
+ .resource = imx_rtc_resources,
+};
+
static int __init mx3_devices_init(void)
{
if (cpu_is_mx31()) {
diff --git a/arch/arm/mach-mx3/devices.h b/arch/arm/mach-mx3/devices.h
index 4f77eb501274..b1687ad72052 100644
--- a/arch/arm/mach-mx3/devices.h
+++ b/arch/arm/mach-mx3/devices.h
@@ -27,3 +27,4 @@ extern struct platform_device imx_ssi_device0;
extern struct platform_device imx_ssi_device1;
extern struct platform_device imx_ssi_device1;
extern struct platform_device imx_wdt_device0;
+extern struct platform_device imx_rtc_device0;
diff --git a/arch/arm/mach-mx3/mach-mx31lilly.c b/arch/arm/mach-mx3/mach-mx31lilly.c
index 80847b04c063..d3d5877c750e 100644
--- a/arch/arm/mach-mx3/mach-mx31lilly.c
+++ b/arch/arm/mach-mx3/mach-mx31lilly.c
@@ -27,12 +27,15 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/smsc911x.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/mfd/mc13783.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -44,6 +47,8 @@
#include <mach/iomux-mx3.h>
#include <mach/board-mx31lilly.h>
#include <mach/spi.h>
+#include <mach/mxc_ehci.h>
+#include <mach/ulpi.h>
#include "devices.h"
@@ -108,6 +113,137 @@ static struct platform_device physmap_flash_device = {
.num_resources = 1,
};
+/* USB */
+
+#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
+ PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
+
+static int usbotg_init(struct platform_device *pdev)
+{
+ unsigned int pins[] = {
+ MX31_PIN_USBOTG_DATA0__USBOTG_DATA0,
+ MX31_PIN_USBOTG_DATA1__USBOTG_DATA1,
+ MX31_PIN_USBOTG_DATA2__USBOTG_DATA2,
+ MX31_PIN_USBOTG_DATA3__USBOTG_DATA3,
+ MX31_PIN_USBOTG_DATA4__USBOTG_DATA4,
+ MX31_PIN_USBOTG_DATA5__USBOTG_DATA5,
+ MX31_PIN_USBOTG_DATA6__USBOTG_DATA6,
+ MX31_PIN_USBOTG_DATA7__USBOTG_DATA7,
+ MX31_PIN_USBOTG_CLK__USBOTG_CLK,
+ MX31_PIN_USBOTG_DIR__USBOTG_DIR,
+ MX31_PIN_USBOTG_NXT__USBOTG_NXT,
+ MX31_PIN_USBOTG_STP__USBOTG_STP,
+ };
+
+ mxc_iomux_setup_multiple_pins(pins, ARRAY_SIZE(pins), "USB OTG");
+
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG);
+
+ mxc_iomux_set_gpr(MUX_PGP_USB_4WIRE, true);
+ mxc_iomux_set_gpr(MUX_PGP_USB_COMMON, true);
+
+ /* chip select */
+ mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_DTR_DCE2, IOMUX_CONFIG_GPIO),
+ "USBOTG_CS");
+ gpio_request(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE2), "USBH1 CS");
+ gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE2), 0);
+
+ return 0;
+}
+
+static int usbh1_init(struct platform_device *pdev)
+{
+ int pins[] = {
+ MX31_PIN_CSPI1_MOSI__USBH1_RXDM,
+ MX31_PIN_CSPI1_MISO__USBH1_RXDP,
+ MX31_PIN_CSPI1_SS0__USBH1_TXDM,
+ MX31_PIN_CSPI1_SS1__USBH1_TXDP,
+ MX31_PIN_CSPI1_SS2__USBH1_RCV,
+ MX31_PIN_CSPI1_SCLK__USBH1_OEB,
+ MX31_PIN_CSPI1_SPI_RDY__USBH1_FS,
+ };
+
+ mxc_iomux_setup_multiple_pins(pins, ARRAY_SIZE(pins), "USB H1");
+
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_MOSI, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_MISO, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SS0, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SS1, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SS2, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SCLK, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SPI_RDY, USB_PAD_CFG);
+
+ mxc_iomux_set_gpr(MUX_PGP_USB_SUSPEND, true);
+
+ return 0;
+}
+
+static int usbh2_init(struct platform_device *pdev)
+{
+ int pins[] = {
+ MX31_PIN_USBH2_DATA0__USBH2_DATA0,
+ MX31_PIN_USBH2_DATA1__USBH2_DATA1,
+ MX31_PIN_USBH2_CLK__USBH2_CLK,
+ MX31_PIN_USBH2_DIR__USBH2_DIR,
+ MX31_PIN_USBH2_NXT__USBH2_NXT,
+ MX31_PIN_USBH2_STP__USBH2_STP,
+ };
+
+ mxc_iomux_setup_multiple_pins(pins, ARRAY_SIZE(pins), "USB H2");
+
+ mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG);
+
+ mxc_iomux_set_gpr(MUX_PGP_UH2, true);
+
+ /* chip select */
+ mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_DTR_DCE1, IOMUX_CONFIG_GPIO),
+ "USBH2_CS");
+ gpio_request(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), "USBH2 CS");
+ gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), 0);
+
+ return 0;
+}
+
+static struct mxc_usbh_platform_data usbotg_pdata = {
+ .init = usbotg_init,
+ .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
+ .flags = MXC_EHCI_POWER_PINS_ENABLED,
+};
+
+static struct mxc_usbh_platform_data usbh1_pdata = {
+ .init = usbh1_init,
+ .portsc = MXC_EHCI_MODE_UTMI | MXC_EHCI_SERIAL,
+ .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_SINGLE_UNI,
+};
+
+static struct mxc_usbh_platform_data usbh2_pdata = {
+ .init = usbh2_init,
+ .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
+ .flags = MXC_EHCI_POWER_PINS_ENABLED,
+};
+
static struct platform_device *devices[] __initdata = {
&smsc91x_device,
&physmap_flash_device,
@@ -183,6 +319,15 @@ static void __init mx31lilly_board_init(void)
spi_register_board_info(&mc13783_dev, 1);
platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ /* USB */
+ usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+ usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+
+ mxc_register_device(&mxc_usbh1, &usbh1_pdata);
+ mxc_register_device(&mxc_usbh2, &usbh2_pdata);
}
static void __init mx31lilly_timer_init(void)
diff --git a/arch/arm/mach-mx3/mx31lite-db.c b/arch/arm/mach-mx3/mx31lite-db.c
index 093c595ca581..5f05bfbec380 100644
--- a/arch/arm/mach-mx3/mx31lite-db.c
+++ b/arch/arm/mach-mx3/mx31lite-db.c
@@ -206,5 +206,6 @@ void __init mx31lite_db_init(void)
mxc_register_device(&mxc_spi_device0, &spi0_pdata);
platform_device_register(&litekit_led_device);
mxc_register_device(&imx_wdt_device0, NULL);
+ mxc_register_device(&imx_rtc_device0, NULL);
}
diff --git a/arch/arm/mach-mx3/mx31moboard-smartbot.c b/arch/arm/mach-mx3/mx31moboard-smartbot.c
index 52a69fc8b14f..83d2b9f42cec 100644
--- a/arch/arm/mach-mx3/mx31moboard-smartbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-smartbot.c
@@ -119,7 +119,7 @@ static int __init smartbot_cam_init(void)
#define POWER_EN IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1)
#define DSPIC_RST_B IOMUX_TO_GPIO(MX31_PIN_DSR_DCE1)
#define TRSLAT_RST_B IOMUX_TO_GPIO(MX31_PIN_RI_DCE1)
-#define SEL3 IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1)
+#define TRSLAT_SRC_CHOICE IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1)
static void smartbot_resets_init(void)
{
@@ -138,9 +138,9 @@ static void smartbot_resets_init(void)
gpio_export(TRSLAT_RST_B, false);
}
- if (!gpio_request(SEL3, "sel3")) {
- gpio_direction_input(SEL3);
- gpio_export(SEL3, true);
+ if (!gpio_request(TRSLAT_SRC_CHOICE, "translator-src-choice")) {
+ gpio_direction_output(TRSLAT_SRC_CHOICE, 0);
+ gpio_export(TRSLAT_SRC_CHOICE, false);
}
}
/*
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index ee67a71db80d..dacf506f18ba 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -12,11 +12,16 @@
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/fsl_devices.h>
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/imx-uart.h>
#include <mach/iomux-mx51.h>
+#include <mach/mxc_ehci.h>
#include <asm/irq.h>
#include <asm/setup.h>
@@ -26,6 +31,17 @@
#include "devices.h"
+#define BABBAGE_USB_HUB_RESET (0*32 + 7) /* GPIO_1_7 */
+#define BABBAGE_USBH1_STP (0*32 + 27) /* GPIO_1_27 */
+
+/* USB_CTRL_1 */
+#define MX51_USB_CTRL_1_OFFSET 0x10
+#define MX51_USB_CTRL_UH1_EXT_CLK_EN (1 << 25)
+
+#define MX51_USB_PLLDIV_12_MHZ 0x00
+#define MX51_USB_PLL_DIV_19_2_MHZ 0x01
+#define MX51_USB_PLL_DIV_24_MHZ 0x02
+
static struct platform_device *devices[] __initdata = {
&mxc_fec_device,
};
@@ -46,6 +62,22 @@ static struct pad_desc mx51babbage_pads[] = {
MX51_PAD_EIM_D26__UART3_TXD,
MX51_PAD_EIM_D27__UART3_RTS,
MX51_PAD_EIM_D24__UART3_CTS,
+
+ /* USB HOST1 */
+ MX51_PAD_USBH1_CLK__USBH1_CLK,
+ MX51_PAD_USBH1_DIR__USBH1_DIR,
+ MX51_PAD_USBH1_NXT__USBH1_NXT,
+ MX51_PAD_USBH1_DATA0__USBH1_DATA0,
+ MX51_PAD_USBH1_DATA1__USBH1_DATA1,
+ MX51_PAD_USBH1_DATA2__USBH1_DATA2,
+ MX51_PAD_USBH1_DATA3__USBH1_DATA3,
+ MX51_PAD_USBH1_DATA4__USBH1_DATA4,
+ MX51_PAD_USBH1_DATA5__USBH1_DATA5,
+ MX51_PAD_USBH1_DATA6__USBH1_DATA6,
+ MX51_PAD_USBH1_DATA7__USBH1_DATA7,
+
+ /* USB HUB reset line*/
+ MX51_PAD_GPIO_1_7__GPIO1_7,
};
/* Serial ports */
@@ -66,15 +98,138 @@ static inline void mxc_init_imx_uart(void)
}
#endif /* SERIAL_IMX */
+static int gpio_usbh1_active(void)
+{
+ struct pad_desc usbh1stp_gpio = MX51_PAD_USBH1_STP__GPIO_1_27;
+ int ret;
+
+ /* Set USBH1_STP to GPIO and toggle it */
+ mxc_iomux_v3_setup_pad(&usbh1stp_gpio);
+ ret = gpio_request(BABBAGE_USBH1_STP, "usbh1_stp");
+
+ if (ret) {
+ pr_debug("failed to get MX51_PAD_USBH1_STP__GPIO_1_27: %d\n", ret);
+ return ret;
+ }
+ gpio_direction_output(BABBAGE_USBH1_STP, 0);
+ gpio_set_value(BABBAGE_USBH1_STP, 1);
+ msleep(100);
+ gpio_free(BABBAGE_USBH1_STP);
+ return 0;
+}
+
+static inline void babbage_usbhub_reset(void)
+{
+ int ret;
+
+ /* Bring USB hub out of reset */
+ ret = gpio_request(BABBAGE_USB_HUB_RESET, "GPIO1_7");
+ if (ret) {
+ printk(KERN_ERR"failed to get GPIO_USB_HUB_RESET: %d\n", ret);
+ return;
+ }
+ gpio_direction_output(BABBAGE_USB_HUB_RESET, 0);
+
+ /* USB HUB RESET - De-assert USB HUB RESET_N */
+ msleep(1);
+ gpio_set_value(BABBAGE_USB_HUB_RESET, 0);
+ msleep(1);
+ gpio_set_value(BABBAGE_USB_HUB_RESET, 1);
+}
+
+/* This function is board specific as the bit mask for the plldiv will also
+be different for other Freescale SoCs, thus a common bitmask is not
+possible and cannot get place in /plat-mxc/ehci.c.*/
+static int initialize_otg_port(struct platform_device *pdev)
+{
+ u32 v;
+ void __iomem *usb_base;
+ u32 usbother_base;
+
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+ usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
+
+ /* Set the PHY clock to 19.2MHz */
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
+ v &= ~MX5_USB_UTMI_PHYCTRL1_PLLDIV_MASK;
+ v |= MX51_USB_PLL_DIV_19_2_MHZ;
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
+ iounmap(usb_base);
+ return 0;
+}
+
+static int initialize_usbh1_port(struct platform_device *pdev)
+{
+ u32 v;
+ void __iomem *usb_base;
+ u32 usbother_base;
+
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+ usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
+
+ /* The clock for the USBH1 ULPI port will come externally from the PHY. */
+ v = __raw_readl(usbother_base + MX51_USB_CTRL_1_OFFSET);
+ __raw_writel(v | MX51_USB_CTRL_UH1_EXT_CLK_EN, usbother_base + MX51_USB_CTRL_1_OFFSET);
+ iounmap(usb_base);
+ return 0;
+}
+
+static struct mxc_usbh_platform_data dr_utmi_config = {
+ .init = initialize_otg_port,
+ .portsc = MXC_EHCI_UTMI_16BIT,
+ .flags = MXC_EHCI_INTERNAL_PHY,
+};
+
+static struct fsl_usb2_platform_data usb_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_UTMI_WIDE,
+};
+
+static struct mxc_usbh_platform_data usbh1_config = {
+ .init = initialize_usbh1_port,
+ .portsc = MXC_EHCI_MODE_ULPI,
+ .flags = (MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_ITC_NO_THRESHOLD),
+};
+
+static int otg_mode_host;
+
+static int __init babbage_otg_mode(char *options)
+{
+ if (!strcmp(options, "host"))
+ otg_mode_host = 1;
+ else if (!strcmp(options, "device"))
+ otg_mode_host = 0;
+ else
+ pr_info("otg_mode neither \"host\" nor \"device\". "
+ "Defaulting to device\n");
+ return 0;
+}
+__setup("otg_mode=", babbage_otg_mode);
+
/*
* Board specific initialization.
*/
static void __init mxc_board_init(void)
{
+ struct pad_desc usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
+
mxc_iomux_v3_setup_multiple_pads(mx51babbage_pads,
ARRAY_SIZE(mx51babbage_pads));
mxc_init_imx_uart();
platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ if (otg_mode_host)
+ mxc_register_device(&mxc_usbdr_host_device, &dr_utmi_config);
+ else {
+ initialize_otg_port(NULL);
+ mxc_register_device(&mxc_usbdr_udc_device, &usb_pdata);
+ }
+
+ gpio_usbh1_active();
+ mxc_register_device(&mxc_usbh1_device, &usbh1_config);
+ /* setback USBH1_STP to be function */
+ mxc_iomux_v3_setup_pad(&usbh1stp);
+ babbage_usbhub_reset();
}
static void __init mx51_babbage_timer_init(void)
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index 1ee6ce4087b8..d9f612d3370e 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -37,6 +37,7 @@ static struct clk lp_apm_clk;
static struct clk periph_apm_clk;
static struct clk ahb_clk;
static struct clk ipg_clk;
+static struct clk usboh3_clk;
#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
@@ -570,6 +571,35 @@ static int _clk_uart_set_parent(struct clk *clk, struct clk *parent)
return 0;
}
+static unsigned long clk_usboh3_get_rate(struct clk *clk)
+{
+ u32 reg, prediv, podf;
+ unsigned long parent_rate;
+
+ parent_rate = clk_get_rate(clk->parent);
+
+ reg = __raw_readl(MXC_CCM_CSCDR1);
+ prediv = ((reg & MXC_CCM_CSCDR1_USBOH3_CLK_PRED_MASK) >>
+ MXC_CCM_CSCDR1_USBOH3_CLK_PRED_OFFSET) + 1;
+ podf = ((reg & MXC_CCM_CSCDR1_USBOH3_CLK_PODF_MASK) >>
+ MXC_CCM_CSCDR1_USBOH3_CLK_PODF_OFFSET) + 1;
+
+ return parent_rate / (prediv * podf);
+}
+
+static int _clk_usboh3_set_parent(struct clk *clk, struct clk *parent)
+{
+ u32 reg, mux;
+
+ mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk, &pll3_sw_clk,
+ &lp_apm_clk);
+ reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USBOH3_CLK_SEL_MASK;
+ reg |= mux << MXC_CCM_CSCMR1_USBOH3_CLK_SEL_OFFSET;
+ __raw_writel(reg, MXC_CCM_CSCMR1);
+
+ return 0;
+}
+
static unsigned long get_high_reference_clock_rate(struct clk *clk)
{
return external_high_reference;
@@ -691,6 +721,12 @@ static struct clk uart_root_clk = {
.set_parent = _clk_uart_set_parent,
};
+static struct clk usboh3_clk = {
+ .parent = &pll2_sw_clk,
+ .get_rate = clk_usboh3_get_rate,
+ .set_parent = _clk_usboh3_set_parent,
+};
+
static struct clk ahb_max_clk = {
.parent = &ahb_clk,
.enable_reg = MXC_CCM_CCGR0,
@@ -779,6 +815,12 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("imx-uart.2", NULL, uart3_clk)
_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
_REGISTER_CLOCK("fec.0", NULL, fec_clk)
+ _REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
+ _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", ahb_clk)
+ _REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
+ _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", ahb_clk)
+ _REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
+ _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
};
static void clk_tree_init(void)
@@ -819,6 +861,9 @@ int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
clk_enable(&cpu_clk);
clk_enable(&main_bus_clk);
+ /* set the usboh3_clk parent to pll2_sw_clk */
+ clk_set_parent(&usboh3_clk, &pll2_sw_clk);
+
/* System timer */
mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
MX51_MXC_INT_GPT);
diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c
index d6fd3961ade9..23850e637dc3 100644
--- a/arch/arm/mach-mx5/devices.c
+++ b/arch/arm/mach-mx5/devices.c
@@ -1,5 +1,6 @@
/*
* Copyright 2009 Amit Kucheria <amit.kucheria@canonical.com>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -10,8 +11,11 @@
*/
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
#include <mach/hardware.h>
#include <mach/imx-uart.h>
+#include <mach/irqs.h>
static struct resource uart0[] = {
{
@@ -89,8 +93,94 @@ struct platform_device mxc_fec_device = {
.resource = mxc_fec_resources,
};
-/* Dummy definition to allow compiling in AVIC and TZIC simultaneously */
+static u64 usb_dma_mask = DMA_BIT_MASK(32);
+
+static struct resource usbotg_resources[] = {
+ {
+ .start = MX51_OTG_BASE_ADDR,
+ .end = MX51_OTG_BASE_ADDR + 0x1ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MX51_MXC_INT_USB_OTG,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* OTG gadget device */
+struct platform_device mxc_usbdr_udc_device = {
+ .name = "fsl-usb2-udc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(usbotg_resources),
+ .resource = usbotg_resources,
+ .dev = {
+ .dma_mask = &usb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device mxc_usbdr_host_device = {
+ .name = "mxc-ehci",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(usbotg_resources),
+ .resource = usbotg_resources,
+ .dev = {
+ .dma_mask = &usb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource usbh1_resources[] = {
+ {
+ .start = MX51_OTG_BASE_ADDR + 0x200,
+ .end = MX51_OTG_BASE_ADDR + 0x200 + 0x1ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MX51_MXC_INT_USB_H1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device mxc_usbh1_device = {
+ .name = "mxc-ehci",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(usbh1_resources),
+ .resource = usbh1_resources,
+ .dev = {
+ .dma_mask = &usb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct mxc_gpio_port mxc_gpio_ports[] = {
+ {
+ .chip.label = "gpio-0",
+ .base = MX51_IO_ADDRESS(MX51_GPIO1_BASE_ADDR),
+ .irq = MX51_MXC_INT_GPIO1_LOW,
+ .virtual_irq_start = MXC_GPIO_IRQ_START
+ },
+ {
+ .chip.label = "gpio-1",
+ .base = MX51_IO_ADDRESS(MX51_GPIO2_BASE_ADDR),
+ .irq = MX51_MXC_INT_GPIO2_LOW,
+ .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 1
+ },
+ {
+ .chip.label = "gpio-2",
+ .base = MX51_IO_ADDRESS(MX51_GPIO3_BASE_ADDR),
+ .irq = MX51_MXC_INT_GPIO3_LOW,
+ .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 2
+ },
+ {
+ .chip.label = "gpio-3",
+ .base = MX51_IO_ADDRESS(MX51_GPIO4_BASE_ADDR),
+ .irq = MX51_MXC_INT_GPIO4_LOW,
+ .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 3
+ },
+};
+
int __init mxc_register_gpios(void)
{
- return 0;
+ return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports));
}
diff --git a/arch/arm/mach-mx5/devices.h b/arch/arm/mach-mx5/devices.h
index f339ab8c19be..0494d6bbcc5f 100644
--- a/arch/arm/mach-mx5/devices.h
+++ b/arch/arm/mach-mx5/devices.h
@@ -2,3 +2,6 @@ extern struct platform_device mxc_uart_device0;
extern struct platform_device mxc_uart_device1;
extern struct platform_device mxc_uart_device2;
extern struct platform_device mxc_fec_device;
+extern struct platform_device mxc_usbdr_host_device;
+extern struct platform_device mxc_usbh1_device;
+extern struct platform_device mxc_usbdr_udc_device;
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
index 3c5e0f522e9c..71f3ea623974 100644
--- a/arch/arm/mach-nomadik/Kconfig
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -6,6 +6,7 @@ config MACH_NOMADIK_8815NHK
bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
select NOMADIK_8815
select HAS_MTU
+ select NOMADIK_GPIO
endmenu
diff --git a/arch/arm/mach-nomadik/Makefile b/arch/arm/mach-nomadik/Makefile
index 36f67fb207d2..a6bbd1a7b4e7 100644
--- a/arch/arm/mach-nomadik/Makefile
+++ b/arch/arm/mach-nomadik/Makefile
@@ -7,7 +7,7 @@
# Object file lists.
-obj-y += clock.o gpio.o
+obj-y += clock.o
# Cpu revision
obj-$(CONFIG_NOMADIK_8815) += cpu-8815.o
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index ab3712c86d2b..841d459ad59d 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -32,7 +32,6 @@
#include <mach/setup.h>
#include <mach/nand.h>
#include <mach/fsmc.h>
-#include "clock.h"
/* Initial value for SRC control register: all timers use MXTAL/8 source */
#define SRC_CR_INIT_MASK 0x00007fff
@@ -202,11 +201,6 @@ static struct amba_device *amba_devs[] __initdata = {
&uart1_device,
};
-/* We have a fixed clock alone, by now */
-static struct clk nhk8815_clk_48 = {
- .rate = 48*1000*1000,
-};
-
static struct resource nhk8815_eth_resources[] = {
{
.name = "smc91x-regs",
@@ -276,10 +270,8 @@ static void __init nhk8815_platform_init(void)
platform_add_devices(nhk8815_platform_devices,
ARRAY_SIZE(nhk8815_platform_devices));
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- nmdk_clk_create(&nhk8815_clk_48, amba_devs[i]->dev.init_name);
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
amba_device_register(amba_devs[i], &iomem_resource);
- }
}
MACHINE_START(NOMADIK, "NHK8815")
diff --git a/arch/arm/mach-nomadik/clock.c b/arch/arm/mach-nomadik/clock.c
index 9f92502a0083..60f5bee09f2e 100644
--- a/arch/arm/mach-nomadik/clock.c
+++ b/arch/arm/mach-nomadik/clock.c
@@ -32,14 +32,36 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
-/* Create a clock structure with the given name */
-int nmdk_clk_create(struct clk *clk, const char *dev_id)
-{
- struct clk_lookup *clkdev;
+/* We have a fixed clock alone, for now */
+static struct clk clk_48 = {
+ .rate = 48 * 1000 * 1000,
+};
+
+/*
+ * Catch-all default clock to satisfy drivers using the clk API. We don't
+ * model the actual hardware clocks yet.
+ */
+static struct clk clk_default;
- clkdev = clkdev_alloc(clk, NULL, dev_id);
- if (!clkdev)
- return -ENOMEM;
- clkdev_add(clkdev);
+#define CLK(_clk, dev) \
+ { \
+ .clk = _clk, \
+ .dev_id = dev, \
+ }
+
+static struct clk_lookup lookups[] = {
+ CLK(&clk_48, "uart0"),
+ CLK(&clk_48, "uart1"),
+ CLK(&clk_default, "gpio.0"),
+ CLK(&clk_default, "gpio.1"),
+ CLK(&clk_default, "gpio.2"),
+ CLK(&clk_default, "gpio.3"),
+};
+
+static int __init clk_init(void)
+{
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
return 0;
}
+
+arch_initcall(clk_init);
diff --git a/arch/arm/mach-nomadik/clock.h b/arch/arm/mach-nomadik/clock.h
index 235faec7f627..5563985a2cc7 100644
--- a/arch/arm/mach-nomadik/clock.h
+++ b/arch/arm/mach-nomadik/clock.h
@@ -11,4 +11,3 @@
struct clk {
unsigned long rate;
};
-extern int nmdk_clk_create(struct clk *clk, const char *dev_id);
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index 9bf33b30a025..91c3c901b469 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <mach/hardware.h>
@@ -30,60 +31,66 @@
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
+#define __MEM_4K_RESOURCE(x) \
+ .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
+
/* The 8815 has 4 GPIO blocks, let's register them immediately */
+
+#define GPIO_RESOURCE(block) \
+ { \
+ .start = NOMADIK_GPIO##block##_BASE, \
+ .end = NOMADIK_GPIO##block##_BASE + SZ_4K - 1, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = IRQ_GPIO##block, \
+ .end = IRQ_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }
+
+#define GPIO_DEVICE(block) \
+ { \
+ .name = "gpio", \
+ .id = block, \
+ .num_resources = 2, \
+ .resource = &cpu8815_gpio_resources[block * 2], \
+ .dev = { \
+ .platform_data = &cpu8815_gpio[block], \
+ }, \
+ }
+
static struct nmk_gpio_platform_data cpu8815_gpio[] = {
{
.name = "GPIO-0-31",
.first_gpio = 0,
.first_irq = NOMADIK_GPIO_TO_IRQ(0),
- .parent_irq = IRQ_GPIO0,
}, {
.name = "GPIO-32-63",
.first_gpio = 32,
.first_irq = NOMADIK_GPIO_TO_IRQ(32),
- .parent_irq = IRQ_GPIO1,
}, {
.name = "GPIO-64-95",
.first_gpio = 64,
.first_irq = NOMADIK_GPIO_TO_IRQ(64),
- .parent_irq = IRQ_GPIO2,
}, {
.name = "GPIO-96-127", /* 124..127 not routed to pin */
.first_gpio = 96,
.first_irq = NOMADIK_GPIO_TO_IRQ(96),
- .parent_irq = IRQ_GPIO3,
}
};
-#define __MEM_4K_RESOURCE(x) \
- .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
+static struct resource cpu8815_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+};
-static struct amba_device cpu8815_amba_gpio[] = {
- {
- .dev = {
- .init_name = "gpio0",
- .platform_data = cpu8815_gpio + 0,
- },
- __MEM_4K_RESOURCE(NOMADIK_GPIO0_BASE),
- }, {
- .dev = {
- .init_name = "gpio1",
- .platform_data = cpu8815_gpio + 1,
- },
- __MEM_4K_RESOURCE(NOMADIK_GPIO1_BASE),
- }, {
- .dev = {
- .init_name = "gpio2",
- .platform_data = cpu8815_gpio + 2,
- },
- __MEM_4K_RESOURCE(NOMADIK_GPIO2_BASE),
- }, {
- .dev = {
- .init_name = "gpio3",
- .platform_data = cpu8815_gpio + 3,
- },
- __MEM_4K_RESOURCE(NOMADIK_GPIO3_BASE),
- },
+static struct platform_device cpu8815_platform_gpio[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
};
static struct amba_device cpu8815_amba_rng = {
@@ -93,11 +100,14 @@ static struct amba_device cpu8815_amba_rng = {
__MEM_4K_RESOURCE(NOMADIK_RNG_BASE),
};
+static struct platform_device *platform_devs[] __initdata = {
+ cpu8815_platform_gpio + 0,
+ cpu8815_platform_gpio + 1,
+ cpu8815_platform_gpio + 2,
+ cpu8815_platform_gpio + 3,
+};
+
static struct amba_device *amba_devs[] __initdata = {
- cpu8815_amba_gpio + 0,
- cpu8815_amba_gpio + 1,
- cpu8815_amba_gpio + 2,
- cpu8815_amba_gpio + 3,
&cpu8815_amba_rng
};
@@ -105,6 +115,7 @@ static int __init cpu8815_init(void)
{
int i;
+ platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
amba_device_register(amba_devs[i], &iomem_resource);
return 0;
diff --git a/arch/arm/mach-nomadik/include/mach/gpio.h b/arch/arm/mach-nomadik/include/mach/gpio.h
index 61577c9f9a7d..7a81a0420343 100644
--- a/arch/arm/mach-nomadik/include/mach/gpio.h
+++ b/arch/arm/mach-nomadik/include/mach/gpio.h
@@ -1,71 +1,6 @@
-/*
- * Structures and registers for GPIO access in the Nomadik SoC
- *
- * Copyright (C) 2008 STMicroelectronics
- * Author: Prafulla WADASKAR <prafulla.wadaskar@st.com>
- * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
#ifndef __ASM_ARCH_GPIO_H
#define __ASM_ARCH_GPIO_H
-#include <asm-generic/gpio.h>
-
-/*
- * These currently cause a function call to happen, they may be optimized
- * if needed by adding cpu-specific defines to identify blocks
- * (see mach-pxa/include/mach/gpio.h as an example using GPLR etc)
- */
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-/*
- * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
- * the "gpio" namespace for generic and cross-machine functions
- */
-
-/* Register in the logic block */
-#define NMK_GPIO_DAT 0x00
-#define NMK_GPIO_DATS 0x04
-#define NMK_GPIO_DATC 0x08
-#define NMK_GPIO_PDIS 0x0c
-#define NMK_GPIO_DIR 0x10
-#define NMK_GPIO_DIRS 0x14
-#define NMK_GPIO_DIRC 0x18
-#define NMK_GPIO_SLPC 0x1c
-#define NMK_GPIO_AFSLA 0x20
-#define NMK_GPIO_AFSLB 0x24
-
-#define NMK_GPIO_RIMSC 0x40
-#define NMK_GPIO_FIMSC 0x44
-#define NMK_GPIO_IS 0x48
-#define NMK_GPIO_IC 0x4c
-#define NMK_GPIO_RWIMSC 0x50
-#define NMK_GPIO_FWIMSC 0x54
-#define NMK_GPIO_WKS 0x58
-
-/* Alternate functions: function C is set in hw by setting both A and B */
-#define NMK_GPIO_ALT_GPIO 0
-#define NMK_GPIO_ALT_A 1
-#define NMK_GPIO_ALT_B 2
-#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
-
-extern int nmk_gpio_set_mode(int gpio, int gpio_mode);
-extern int nmk_gpio_get_mode(int gpio);
-
-/*
- * Platform data to register a block: only the initial gpio/irq number.
- */
-struct nmk_gpio_platform_data {
- char *name;
- int first_gpio;
- int first_irq;
- int parent_irq;
-};
+#include <plat/gpio.h>
#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 27f489747bbd..b18d7c28ab7a 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -152,6 +152,16 @@ config MACH_AMS_DELTA
Support for the Amstrad E3 (codename Delta) videophone. Say Y here
if you have such a device.
+config AMS_DELTA_FIQ
+ bool "Fast Interrupt Request (FIQ) support for the E3"
+ depends on MACH_AMS_DELTA
+ select FIQ
+ help
+ Provide a FIQ handler for the E3.
+ This allows for fast handling of interrupts generated
+ by the clock line of the E3 mailboard (or a PS/2 keyboard)
+ connected to the GPIO based external keyboard port.
+
config MACH_OMAP_GENERIC
bool "Generic OMAP board"
depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index b6a537c875b8..ea231c7a550a 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_MACH_OMAP_PALMZ71) += board-palmz71.o
obj-$(CONFIG_MACH_OMAP_PALMTT) += board-palmtt.o
obj-$(CONFIG_MACH_NOKIA770) += board-nokia770.o
obj-$(CONFIG_MACH_AMS_DELTA) += board-ams-delta.o
+obj-$(CONFIG_AMS_DELTA_FIQ) += ams-delta-fiq.o ams-delta-fiq-handler.o
obj-$(CONFIG_MACH_SX1) += board-sx1.o board-sx1-mmc.o
obj-$(CONFIG_MACH_HERALD) += board-htcherald.o
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
new file mode 100644
index 000000000000..927d5a181760
--- /dev/null
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -0,0 +1,278 @@
+/*
+ * linux/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+ *
+ * Based on linux/arch/arm/lib/floppydma.S
+ * Renamed and modified to work with 2.6 kernel by Matt Callow
+ * Copyright (C) 1995, 1996 Russell King
+ * Copyright (C) 2004 Pete Trapps
+ * Copyright (C) 2006 Matt Callow
+ * Copyright (C) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#include <plat/io.h>
+#include <plat/board-ams-delta.h>
+
+#include <mach/ams-delta-fiq.h>
+
+/*
+ * GPIO related definitions, copied from arch/arm/plat-omap/gpio.c.
+ * Unfortunately, those were not placed in a separate header file.
+ */
+#define OMAP1510_GPIO_BASE 0xFFFCE000
+#define OMAP1510_GPIO_DATA_INPUT 0x00
+#define OMAP1510_GPIO_DATA_OUTPUT 0x04
+#define OMAP1510_GPIO_DIR_CONTROL 0x08
+#define OMAP1510_GPIO_INT_CONTROL 0x0c
+#define OMAP1510_GPIO_INT_MASK 0x10
+#define OMAP1510_GPIO_INT_STATUS 0x14
+#define OMAP1510_GPIO_PIN_CONTROL 0x18
+
+/* GPIO register bitmasks */
+#define KEYBRD_DATA_MASK (0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_DATA)
+#define KEYBRD_CLK_MASK (0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_CLK)
+#define MODEM_IRQ_MASK (0x1 << AMS_DELTA_GPIO_PIN_MODEM_IRQ)
+#define HOOK_SWITCH_MASK (0x1 << AMS_DELTA_GPIO_PIN_HOOK_SWITCH)
+#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
+
+/* IRQ handler register bitmasks */
+#define DEFERRED_FIQ_MASK (0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
+#define GPIO_BANK1_MASK (0x1 << INT_GPIO_BANK1)
+
+/* Driver buffer byte offsets */
+#define BUF_MASK (FIQ_MASK * 4)
+#define BUF_STATE (FIQ_STATE * 4)
+#define BUF_KEYS_CNT (FIQ_KEYS_CNT * 4)
+#define BUF_TAIL_OFFSET (FIQ_TAIL_OFFSET * 4)
+#define BUF_HEAD_OFFSET (FIQ_HEAD_OFFSET * 4)
+#define BUF_BUF_LEN (FIQ_BUF_LEN * 4)
+#define BUF_KEY (FIQ_KEY * 4)
+#define BUF_MISSED_KEYS (FIQ_MISSED_KEYS * 4)
+#define BUF_BUFFER_START (FIQ_BUFFER_START * 4)
+#define BUF_GPIO_INT_MASK (FIQ_GPIO_INT_MASK * 4)
+#define BUF_KEYS_HICNT (FIQ_KEYS_HICNT * 4)
+#define BUF_IRQ_PEND (FIQ_IRQ_PEND * 4)
+#define BUF_SIR_CODE_L1 (FIQ_SIR_CODE_L1 * 4)
+#define BUF_SIR_CODE_L2 (IRQ_SIR_CODE_L2 * 4)
+#define BUF_CNT_INT_00 (FIQ_CNT_INT_00 * 4)
+#define BUF_CNT_INT_KEY (FIQ_CNT_INT_KEY * 4)
+#define BUF_CNT_INT_MDM (FIQ_CNT_INT_MDM * 4)
+#define BUF_CNT_INT_03 (FIQ_CNT_INT_03 * 4)
+#define BUF_CNT_INT_HSW (FIQ_CNT_INT_HSW * 4)
+#define BUF_CNT_INT_05 (FIQ_CNT_INT_05 * 4)
+#define BUF_CNT_INT_06 (FIQ_CNT_INT_06 * 4)
+#define BUF_CNT_INT_07 (FIQ_CNT_INT_07 * 4)
+#define BUF_CNT_INT_08 (FIQ_CNT_INT_08 * 4)
+#define BUF_CNT_INT_09 (FIQ_CNT_INT_09 * 4)
+#define BUF_CNT_INT_10 (FIQ_CNT_INT_10 * 4)
+#define BUF_CNT_INT_11 (FIQ_CNT_INT_11 * 4)
+#define BUF_CNT_INT_12 (FIQ_CNT_INT_12 * 4)
+#define BUF_CNT_INT_13 (FIQ_CNT_INT_13 * 4)
+#define BUF_CNT_INT_14 (FIQ_CNT_INT_14 * 4)
+#define BUF_CNT_INT_15 (FIQ_CNT_INT_15 * 4)
+#define BUF_CIRC_BUFF (FIQ_CIRC_BUFF * 4)
+
+
+/*
+ * Register useage
+ * r8 - temporary
+ * r9 - the driver buffer
+ * r10 - temporary
+ * r11 - interrupts mask
+ * r12 - base pointers
+ * r13 - interrupts status
+ */
+
+ .text
+
+ .global qwerty_fiqin_end
+
+ENTRY(qwerty_fiqin_start)
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ FIQ intrrupt handler
+ ldr r12, omap_ih1_base @ set pointer to level1 handler
+
+ ldr r11, [r12, #IRQ_MIR_REG_OFFSET] @ fetch interrupts mask
+
+ ldr r13, [r12, #IRQ_ITR_REG_OFFSET] @ fetch interrupts status
+ bics r13, r13, r11 @ clear masked - any left?
+ beq exit @ none - spurious FIQ? exit
+
+ ldr r10, [r12, #IRQ_SIR_FIQ_REG_OFFSET] @ get requested interrupt number
+
+ mov r8, #2 @ reset FIQ agreement
+ str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
+
+ cmp r10, #INT_GPIO_BANK1 @ is it GPIO bank interrupt?
+ beq gpio @ yes - process it
+
+ mov r8, #1
+ orr r8, r11, r8, lsl r10 @ mask spurious interrupt
+ str r8, [r12, #IRQ_MIR_REG_OFFSET]
+exit:
+ subs pc, lr, #4 @ return from FIQ
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@
+gpio: @ GPIO bank interrupt handler
+ ldr r12, omap1510_gpio_base @ set base pointer to GPIO bank
+
+ ldr r11, [r12, #OMAP1510_GPIO_INT_MASK] @ fetch GPIO interrupts mask
+restart:
+ ldr r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ fetch status bits
+ bics r13, r13, r11 @ clear masked - any left?
+ beq exit @ no - spurious interrupt? exit
+
+ orr r11, r11, r13 @ mask all requested interrupts
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+
+ ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
+ beq hksw @ no - try next source
+
+
+ @@@@@@@@@@@@@@@@@@@@@@
+ @ Keyboard clock FIQ mode interrupt handler
+ @ r10 now contains KEYBRD_CLK_MASK, use it
+ str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
+ bic r11, r11, r10 @ unmask it
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+
+ @ Process keyboard data
+ ldr r8, [r12, #OMAP1510_GPIO_DATA_INPUT] @ fetch GPIO input
+
+ ldr r10, [r9, #BUF_STATE] @ fetch kbd interface state
+ cmp r10, #0 @ are we expecting start bit?
+ bne data @ no - go to data processing
+
+ ands r8, r8, #KEYBRD_DATA_MASK @ check start bit - detected?
+ beq hksw @ no - try next source
+
+ @ r8 contains KEYBRD_DATA_MASK, use it
+ str r8, [r9, #BUF_STATE] @ enter data processing state
+ @ r10 already contains 0, reuse it
+ str r10, [r9, #BUF_KEY] @ clear keycode
+ mov r10, #2 @ reset input bit mask
+ str r10, [r9, #BUF_MASK]
+
+ @ Mask other GPIO line interrupts till key done
+ str r11, [r9, #BUF_GPIO_INT_MASK] @ save mask for later restore
+ mvn r11, #KEYBRD_CLK_MASK @ prepare all except kbd mask
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK] @ store into the mask register
+
+ b restart @ restart
+
+data: ldr r10, [r9, #BUF_MASK] @ fetch current input bit mask
+
+ @ r8 still contains GPIO input bits
+ ands r8, r8, #KEYBRD_DATA_MASK @ is keyboard data line low?
+ ldreq r8, [r9, #BUF_KEY] @ yes - fetch collected so far,
+ orreq r8, r8, r10 @ set 1 at current mask position
+ streq r8, [r9, #BUF_KEY] @ and save back
+
+ mov r10, r10, lsl #1 @ shift mask left
+ bics r10, r10, #0x800 @ have we got all the bits?
+ strne r10, [r9, #BUF_MASK] @ not yet - store the mask
+ bne restart @ and restart
+
+ @ r10 already contains 0, reuse it
+ str r10, [r9, #BUF_STATE] @ reset state to start
+
+ @ Key done - restore interrupt mask
+ ldr r10, [r9, #BUF_GPIO_INT_MASK] @ fetch saved mask
+ and r11, r11, r10 @ unmask all saved as unmasked
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK] @ restore into the mask register
+
+ @ Try appending the keycode to the circular buffer
+ ldr r10, [r9, #BUF_KEYS_CNT] @ get saved keystrokes count
+ ldr r8, [r9, #BUF_BUF_LEN] @ get buffer size
+ cmp r10, r8 @ is buffer full?
+ beq hksw @ yes - key lost, next source
+
+ add r10, r10, #1 @ incremet keystrokes counter
+ str r10, [r9, #BUF_KEYS_CNT]
+
+ ldr r10, [r9, #BUF_TAIL_OFFSET] @ get buffer tail offset
+ @ r8 already contains buffer size
+ cmp r10, r8 @ end of buffer?
+ moveq r10, #0 @ yes - rewind to buffer start
+
+ ldr r12, [r9, #BUF_BUFFER_START] @ get buffer start address
+ add r12, r12, r10, LSL #2 @ calculate buffer tail address
+ ldr r8, [r9, #BUF_KEY] @ get last keycode
+ str r8, [r12] @ append it to the buffer tail
+
+ add r10, r10, #1 @ increment buffer tail offset
+ str r10, [r9, #BUF_TAIL_OFFSET]
+
+ ldr r10, [r9, #BUF_CNT_INT_KEY] @ increment interrupts counter
+ add r10, r10, #1
+ str r10, [r9, #BUF_CNT_INT_KEY]
+ @@@@@@@@@@@@@@@@@@@@@@@@
+
+
+hksw: @Is hook switch interrupt requested?
+ tst r13, #HOOK_SWITCH_MASK @ is hook switch status bit set?
+ beq mdm @ no - try next source
+
+
+ @@@@@@@@@@@@@@@@@@@@@@@@
+ @ Hook switch interrupt FIQ mode simple handler
+
+ @ Don't toggle active edge, the switch always bounces
+
+ @ Increment hook switch interrupt counter
+ ldr r10, [r9, #BUF_CNT_INT_HSW]
+ add r10, r10, #1
+ str r10, [r9, #BUF_CNT_INT_HSW]
+ @@@@@@@@@@@@@@@@@@@@@@@@
+
+
+mdm: @Is it a modem interrupt?
+ tst r13, #MODEM_IRQ_MASK @ is modem status bit set?
+ beq irq @ no - check for next interrupt
+
+
+ @@@@@@@@@@@@@@@@@@@@@@@@
+ @ Modem FIQ mode interrupt handler stub
+
+ @ Increment modem interrupt counter
+ ldr r10, [r9, #BUF_CNT_INT_MDM]
+ add r10, r10, #1
+ str r10, [r9, #BUF_CNT_INT_MDM]
+ @@@@@@@@@@@@@@@@@@@@@@@@
+
+
+irq: @ Place deferred_fiq interrupt request
+ ldr r12, deferred_fiq_ih_base @ set pointer to IRQ handler
+ mov r10, #DEFERRED_FIQ_MASK @ set deferred_fiq bit
+ str r10, [r12, #IRQ_ISR_REG_OFFSET] @ place it in the ISR register
+
+ ldr r12, omap1510_gpio_base @ set pointer back to GPIO bank
+ b restart @ check for next GPIO interrupt
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+/*
+ * Virtual addresses for IO
+ */
+omap_ih1_base:
+ .word OMAP1_IO_ADDRESS(OMAP_IH1_BASE)
+deferred_fiq_ih_base:
+ .word OMAP1_IO_ADDRESS(DEFERRED_FIQ_IH_BASE)
+omap1510_gpio_base:
+ .word OMAP1_IO_ADDRESS(OMAP1510_GPIO_BASE)
+qwerty_fiqin_end:
+
+/*
+ * Check the size of the FIQ,
+ * it cannot go beyond 0xffff0200, and is copied to 0xffff001c
+ */
+.if (qwerty_fiqin_end - qwerty_fiqin_start) > (0x200 - 0x1c)
+ .err
+.endif
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
new file mode 100644
index 000000000000..6c994e2d8879
--- /dev/null
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -0,0 +1,155 @@
+/*
+ * Amstrad E3 FIQ handling
+ *
+ * Copyright (C) 2009 Janusz Krzysztofik
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (C) 2001 RidgeRun, Inc.
+ *
+ * Parts of this code are taken from linux/arch/arm/mach-omap/irq.c
+ * in the MontaVista 2.4 kernel (and the Amstrad changes therein)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <plat/board-ams-delta.h>
+
+#include <asm/fiq.h>
+#include <mach/ams-delta-fiq.h>
+
+static struct fiq_handler fh = {
+ .name = "ams-delta-fiq"
+};
+
+/*
+ * This buffer is shared between FIQ and IRQ contexts.
+ * The FIQ and IRQ isrs can both read and write it.
+ * It is structured as a header section several 32bit slots,
+ * followed by the circular buffer where the FIQ isr stores
+ * keystrokes received from the qwerty keyboard.
+ * See ams-delta-fiq.h for details of offsets.
+ */
+unsigned int fiq_buffer[1024];
+EXPORT_SYMBOL(fiq_buffer);
+
+static unsigned int irq_counter[16];
+
+static irqreturn_t deferred_fiq(int irq, void *dev_id)
+{
+ struct irq_desc *irq_desc;
+ struct irq_chip *irq_chip = NULL;
+ int gpio, irq_num, fiq_count;
+
+ irq_desc = irq_to_desc(IH_GPIO_BASE);
+ if (irq_desc)
+ irq_chip = irq_desc->chip;
+
+ /*
+ * For each handled GPIO interrupt, keep calling its interrupt handler
+ * until the IRQ counter catches the FIQ incremented interrupt counter.
+ */
+ for (gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK;
+ gpio <= AMS_DELTA_GPIO_PIN_HOOK_SWITCH; gpio++) {
+ irq_num = gpio_to_irq(gpio);
+ fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio];
+
+ while (irq_counter[gpio] < fiq_count) {
+ if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
+ /*
+ * It looks like handle_edge_irq() that
+ * OMAP GPIO edge interrupts default to,
+ * expects interrupt already unmasked.
+ */
+ if (irq_chip && irq_chip->unmask)
+ irq_chip->unmask(irq_num);
+ }
+ generic_handle_irq(irq_num);
+
+ irq_counter[gpio]++;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+void __init ams_delta_init_fiq(void)
+{
+ void *fiqhandler_start;
+ unsigned int fiqhandler_length;
+ struct pt_regs FIQ_regs;
+ unsigned long val, offset;
+ int i, retval;
+
+ fiqhandler_start = &qwerty_fiqin_start;
+ fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start;
+ pr_info("Installing fiq handler from %p, length 0x%x\n",
+ fiqhandler_start, fiqhandler_length);
+
+ retval = claim_fiq(&fh);
+ if (retval) {
+ pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n",
+ retval);
+ return;
+ }
+
+ retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq,
+ IRQ_TYPE_EDGE_RISING, "deferred_fiq", 0);
+ if (retval < 0) {
+ pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval);
+ release_fiq(&fh);
+ return;
+ }
+ /*
+ * Since no set_type() method is provided by OMAP irq chip,
+ * switch to edge triggered interrupt type manually.
+ */
+ offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+ val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
+ omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
+
+ set_fiq_handler(fiqhandler_start, fiqhandler_length);
+
+ /*
+ * Initialise the buffer which is shared
+ * between FIQ mode and IRQ mode
+ */
+ fiq_buffer[FIQ_GPIO_INT_MASK] = 0;
+ fiq_buffer[FIQ_MASK] = 0;
+ fiq_buffer[FIQ_STATE] = 0;
+ fiq_buffer[FIQ_KEY] = 0;
+ fiq_buffer[FIQ_KEYS_CNT] = 0;
+ fiq_buffer[FIQ_KEYS_HICNT] = 0;
+ fiq_buffer[FIQ_TAIL_OFFSET] = 0;
+ fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+ fiq_buffer[FIQ_BUF_LEN] = 256;
+ fiq_buffer[FIQ_MISSED_KEYS] = 0;
+ fiq_buffer[FIQ_BUFFER_START] =
+ (unsigned int) &fiq_buffer[FIQ_CIRC_BUFF];
+
+ for (i = FIQ_CNT_INT_00; i <= FIQ_CNT_INT_15; i++)
+ fiq_buffer[i] = 0;
+
+ /*
+ * FIQ mode r9 always points to the fiq_buffer, becauses the FIQ isr
+ * will run in an unpredictable context. The fiq_buffer is the FIQ isr's
+ * only means of communication with the IRQ level and other kernel
+ * context code.
+ */
+ FIQ_regs.ARM_r9 = (unsigned int)fiq_buffer;
+ set_fiq_regs(&FIQ_regs);
+
+ pr_info("request_fiq(): fiq_buffer = %p\n", fiq_buffer);
+
+ /*
+ * Redirect GPIO interrupts to FIQ
+ */
+ offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+ val = omap_readl(OMAP_IH1_BASE + offset) | 1;
+ omap_writel(val, OMAP_IH1_BASE + offset);
+}
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 7fc11c34b696..fdd1dd53fa9c 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -33,6 +33,8 @@
#include <plat/board.h>
#include <plat/common.h>
+#include <mach/ams-delta-fiq.h>
+
static u8 ams_delta_latch1_reg;
static u16 ams_delta_latch2_reg;
@@ -236,6 +238,10 @@ static void __init ams_delta_init(void)
omap_usb_init(&ams_delta_usb_config);
platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
+#ifdef CONFIG_AMS_DELTA_FIQ
+ ams_delta_init_fiq();
+#endif
+
omap_writew(omap_readw(ARM_RSTCT1) | 0x0004, ARM_RSTCT1);
}
@@ -263,8 +269,18 @@ static struct platform_device ams_delta_modem_device = {
static int __init ams_delta_modem_init(void)
{
+ int err;
+
omap_cfg_reg(M14_1510_GPIO2);
- ams_delta_modem_ports[0].irq = gpio_to_irq(2);
+ ams_delta_modem_ports[0].irq =
+ gpio_to_irq(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_MODEM_IRQ, "modem");
+ if (err) {
+ pr_err("Couldn't request gpio pin for modem\n");
+ return err;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
ams_delta_latch2_write(
AMS_DELTA_LATCH2_MODEM_NRESET | AMS_DELTA_LATCH2_MODEM_CODEC,
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index e36639f66150..8e313b4b99a9 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -28,7 +28,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/input.h>
-#include <linux/bootmem.h>
#include <linux/io.h>
#include <linux/gpio.h>
diff --git a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
new file mode 100644
index 000000000000..7a2df29400ca
--- /dev/null
+++ b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
@@ -0,0 +1,79 @@
+/*
+ * arch/arm/mach-omap1/include/ams-delta-fiq.h
+ *
+ * Taken from the original Amstrad modifications to fiq.h
+ *
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __AMS_DELTA_FIQ_H
+#define __AMS_DELTA_FIQ_H
+
+#include <plat/irqs.h>
+
+/*
+ * Interrupt number used for passing control from FIQ to IRQ.
+ * IRQ12, described as reserved, has been selected.
+ */
+#define INT_DEFERRED_FIQ INT_1510_RES12
+/*
+ * Base address of an interrupt handler that the INT_DEFERRED_FIQ belongs to.
+ */
+#if (INT_DEFERRED_FIQ < IH2_BASE)
+#define DEFERRED_FIQ_IH_BASE OMAP_IH1_BASE
+#else
+#define DEFERRED_FIQ_IH_BASE OMAP_IH2_BASE
+#endif
+
+/*
+ * These are the offsets from the begining of the fiq_buffer. They are put here
+ * since the buffer and header need to be accessed by drivers servicing devices
+ * which generate GPIO interrupts - e.g. keyboard, modem, hook switch.
+ */
+#define FIQ_MASK 0
+#define FIQ_STATE 1
+#define FIQ_KEYS_CNT 2
+#define FIQ_TAIL_OFFSET 3
+#define FIQ_HEAD_OFFSET 4
+#define FIQ_BUF_LEN 5
+#define FIQ_KEY 6
+#define FIQ_MISSED_KEYS 7
+#define FIQ_BUFFER_START 8
+#define FIQ_GPIO_INT_MASK 9
+#define FIQ_KEYS_HICNT 10
+#define FIQ_IRQ_PEND 11
+#define FIQ_SIR_CODE_L1 12
+#define IRQ_SIR_CODE_L2 13
+
+#define FIQ_CNT_INT_00 14
+#define FIQ_CNT_INT_KEY 15
+#define FIQ_CNT_INT_MDM 16
+#define FIQ_CNT_INT_03 17
+#define FIQ_CNT_INT_HSW 18
+#define FIQ_CNT_INT_05 19
+#define FIQ_CNT_INT_06 20
+#define FIQ_CNT_INT_07 21
+#define FIQ_CNT_INT_08 22
+#define FIQ_CNT_INT_09 23
+#define FIQ_CNT_INT_10 24
+#define FIQ_CNT_INT_11 25
+#define FIQ_CNT_INT_12 26
+#define FIQ_CNT_INT_13 27
+#define FIQ_CNT_INT_14 28
+#define FIQ_CNT_INT_15 29
+
+#define FIQ_CIRC_BUFF 30 /*Start of circular buffer */
+
+#ifndef __ASSEMBLER__
+extern unsigned int fiq_buffer[];
+extern unsigned char qwerty_fiqin_start, qwerty_fiqin_end;
+
+extern void __init ams_delta_init_fiq(void);
+#endif
+
+#endif
diff --git a/arch/arm/mach-omap1/include/mach/debug-macro.S b/arch/arm/mach-omap1/include/mach/debug-macro.S
index b6d9584544b4..e8a8cf36b7f0 100644
--- a/arch/arm/mach-omap1/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap1/include/mach/debug-macro.S
@@ -13,6 +13,8 @@
#include <linux/serial_reg.h>
+#include <asm/memory.h>
+
#include <plat/serial.h>
.pushsection .data
@@ -37,23 +39,12 @@ omap_uart_virt: .word 0x0
cmp \rx, #0 @ is port configured?
bne 99f @ already configured
- /* Check 7XX UART1 scratchpad register for uart to use */
+ /* Check the debug UART configuration set in uncompress.h */
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
- moveq \rx, #0xff000000 @ physical base address
- movne \rx, #0xfe000000 @ virtual base
- orr \rx, \rx, #0x00fb0000 @ OMAP1UART1
- ldrb \rx, [\rx, #(UART_SCR << OMAP7XX_PORT_SHIFT)]
- cmp \rx, #0 @ anything in 7XX scratchpad?
- bne 10f @ found 7XX uart
-
- /* Check 15xx/16xx UART1 scratchpad register for uart to use */
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- moveq \rx, #0xff000000 @ physical base address
- movne \rx, #0xfe000000 @ virtual base
- orr \rx, \rx, #0x00fb0000 @ OMAP1UART1
- ldrb \rx, [\rx, #(UART_SCR << OMAP_PORT_SHIFT)]
+ ldreq \rx, =OMAP_UART_INFO
+ ldrne \rx, =__phys_to_virt(OMAP_UART_INFO)
+ ldr \rx, [\rx, #0]
/* Select the UART to use based on the UART1 scratchpad value */
10: cmp \rx, #0 @ no port configured?
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 2455dcc744a0..99abac2a8c53 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -10,6 +10,7 @@ config ARCH_OMAP2420
config ARCH_OMAP2430
bool "OMAP2430 support"
depends on ARCH_OMAP2
+ select ARCH_OMAP_OTG
config ARCH_OMAP3430
bool "OMAP3430 support"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 4b9fc57770db..b03cbb434a31 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \
board-rx51-sdram.o \
board-rx51-peripherals.o \
+ board-rx51-video.o \
hsmmc.o
obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom2.o \
board-zoom-peripherals.o \
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 01d113ff9fcf..a11a575745e4 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -174,9 +174,18 @@ static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = {
},
};
+static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("isp1301_omap", 0x2D),
+ .flags = I2C_CLIENT_WAKE,
+ .irq = OMAP_GPIO_IRQ(78),
+ },
+};
+
static int __init omap2430_i2c_init(void)
{
- omap_register_i2c_bus(1, 400, NULL, 0);
+ omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
+ ARRAY_SIZE(sdp2430_i2c1_boardinfo));
omap_register_i2c_bus(2, 2600, sdp2430_i2c_boardinfo,
ARRAY_SIZE(sdp2430_i2c_boardinfo));
return 0;
@@ -198,6 +207,15 @@ static struct omap_musb_board_data musb_board_data = {
.mode = MUSB_OTG,
.power = 100,
};
+static struct omap_usb_config sdp2430_usb_config __initdata = {
+ .otg = 1,
+#ifdef CONFIG_USB_GADGET_OMAP
+ .hmc_mode = 0x0,
+#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+ .hmc_mode = 0x1,
+#endif
+ .pins[0] = 3,
+};
static void __init omap_2430sdp_init(void)
{
@@ -208,6 +226,7 @@ static void __init omap_2430sdp_init(void)
platform_add_devices(sdp2430_devices, ARRAY_SIZE(sdp2430_devices));
omap_serial_init();
omap2_hsmmc_init(mmc);
+ omap_usb_init(&sdp2430_usb_config);
usb_musb_init(&musb_board_data);
board_smc91x_init();
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index c1c4389fbd8f..19b9e415710d 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/i2c/pca953x.h>
+#include <linux/can/platform/ti_hecc.h>
+#include <linux/davinci_emac.h>
#include <mach/hardware.h>
#include <mach/am35xx.h>
@@ -30,11 +32,106 @@
#include <plat/board.h>
#include <plat/common.h>
+#include <plat/control.h>
#include <plat/usb.h>
#include <plat/display.h>
#include "mux.h"
+#define AM35XX_EVM_PHY_MASK (0xF)
+#define AM35XX_EVM_MDIO_FREQUENCY (1000000)
+
+static struct emac_platform_data am3517_evm_emac_pdata = {
+ .phy_mask = AM35XX_EVM_PHY_MASK,
+ .mdio_max_freq = AM35XX_EVM_MDIO_FREQUENCY,
+ .rmii_en = 1,
+};
+
+static struct resource am3517_emac_resources[] = {
+ {
+ .start = AM35XX_IPSS_EMAC_BASE,
+ .end = AM35XX_IPSS_EMAC_BASE + 0x3FFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_35XX_EMAC_C0_RXTHRESH_IRQ,
+ .end = INT_35XX_EMAC_C0_RXTHRESH_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_35XX_EMAC_C0_RX_PULSE_IRQ,
+ .end = INT_35XX_EMAC_C0_RX_PULSE_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_35XX_EMAC_C0_TX_PULSE_IRQ,
+ .end = INT_35XX_EMAC_C0_TX_PULSE_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_35XX_EMAC_C0_MISC_PULSE_IRQ,
+ .end = INT_35XX_EMAC_C0_MISC_PULSE_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device am3517_emac_device = {
+ .name = "davinci_emac",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(am3517_emac_resources),
+ .resource = am3517_emac_resources,
+};
+
+static void am3517_enable_ethernet_int(void)
+{
+ u32 regval;
+
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = (regval | AM35XX_CPGMAC_C0_RX_PULSE_CLR |
+ AM35XX_CPGMAC_C0_TX_PULSE_CLR |
+ AM35XX_CPGMAC_C0_MISC_PULSE_CLR |
+ AM35XX_CPGMAC_C0_RX_THRESH_CLR);
+ omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+static void am3517_disable_ethernet_int(void)
+{
+ u32 regval;
+
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = (regval | AM35XX_CPGMAC_C0_RX_PULSE_CLR |
+ AM35XX_CPGMAC_C0_TX_PULSE_CLR);
+ omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
+{
+ unsigned int regval;
+
+ pdata->ctrl_reg_offset = AM35XX_EMAC_CNTRL_OFFSET;
+ pdata->ctrl_mod_reg_offset = AM35XX_EMAC_CNTRL_MOD_OFFSET;
+ pdata->ctrl_ram_offset = AM35XX_EMAC_CNTRL_RAM_OFFSET;
+ pdata->mdio_reg_offset = AM35XX_EMAC_MDIO_OFFSET;
+ pdata->ctrl_ram_size = AM35XX_EMAC_CNTRL_RAM_SIZE;
+ pdata->version = EMAC_VERSION_2;
+ pdata->hw_ram_addr = AM35XX_EMAC_HW_RAM_ADDR;
+ pdata->interrupt_enable = am3517_enable_ethernet_int;
+ pdata->interrupt_disable = am3517_disable_ethernet_int;
+ am3517_emac_device.dev.platform_data = pdata;
+ platform_device_register(&am3517_emac_device);
+
+ regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+ regval = regval & (~(AM35XX_CPGMACSS_SW_RST));
+ omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+ regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+
+ return ;
+}
+
+
+
#define LCD_PANEL_PWR 176
#define LCD_PANEL_BKLIGHT_PWR 182
#define LCD_PANEL_PWM 181
@@ -119,6 +216,8 @@ static int __init am3517_evm_i2c_init(void)
static int lcd_enabled;
static int dvi_enabled;
+#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
+ defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
static void __init am3517_evm_display_init(void)
{
int r;
@@ -162,6 +261,9 @@ err_2:
err_1:
gpio_free(LCD_PANEL_BKLIGHT_PWR);
}
+#else
+static void __init am3517_evm_display_init(void) {}
+#endif
static int am3517_evm_panel_enable_lcd(struct omap_dss_device *dssdev)
{
@@ -275,7 +377,12 @@ static void __init am3517_evm_init_irq(void)
static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
+ defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+#else
.port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+#endif
.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
.phy_reset = true,
@@ -292,6 +399,42 @@ static struct omap_board_mux board_mux[] __initdata = {
#define board_mux NULL
#endif
+
+static struct resource am3517_hecc_resources[] = {
+ {
+ .start = AM35XX_IPSS_HECC_BASE,
+ .end = AM35XX_IPSS_HECC_BASE + 0x3FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_35XX_HECC0_IRQ,
+ .end = INT_35XX_HECC0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device am3517_hecc_device = {
+ .name = "ti_hecc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(am3517_hecc_resources),
+ .resource = am3517_hecc_resources,
+};
+
+static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
+ .scc_hecc_offset = AM35XX_HECC_SCC_HECC_OFFSET,
+ .scc_ram_offset = AM35XX_HECC_SCC_RAM_OFFSET,
+ .hecc_ram_offset = AM35XX_HECC_RAM_OFFSET,
+ .mbx_offset = AM35XX_HECC_MBOX_OFFSET,
+ .int_line = AM35XX_HECC_INT_LINE,
+ .version = AM35XX_HECC_VERSION,
+};
+
+static void am3517_evm_hecc_init(struct ti_hecc_platform_data *pdata)
+{
+ am3517_hecc_device.dev.platform_data = pdata;
+ platform_device_register(&am3517_hecc_device);
+}
+
static void __init am3517_evm_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
@@ -305,6 +448,7 @@ static void __init am3517_evm_init(void)
/* Configure GPIO for EHCI port */
omap_mux_init_gpio(57, OMAP_PIN_OUTPUT);
usb_ehci_init(&ehci_pdata);
+ am3517_evm_hecc_init(&am3517_evm_hecc_pdata);
/* DSS */
am3517_evm_display_init();
@@ -313,6 +457,8 @@ static void __init am3517_evm_init(void)
i2c_register_board_info(1, am3517evm_i2c_boardinfo,
ARRAY_SIZE(am3517evm_i2c_boardinfo));
+ /*Ethernet*/
+ am3517_evm_ethernet_init(&am3517_evm_emac_pdata);
}
static void __init am3517_evm_map_io(void)
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 2de4f79f03a0..e679a2cc86c3 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -45,6 +45,7 @@
#include <plat/gpmc.h>
#include <plat/usb.h>
#include <plat/display.h>
+#include <plat/mcspi.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 47e3af2166d4..77022b588816 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -633,8 +633,163 @@ static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.reset_gpio_port[2] = -EINVAL
};
+static struct omap_board_mux board_mux[] __initdata = {
+ /* nCS and IRQ for Devkit8000 ethernet */
+ OMAP3_MUX(GPMC_NCS6, OMAP_MUX_MODE0),
+ OMAP3_MUX(ETK_D11, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
+
+ /* McSPI 2*/
+ OMAP3_MUX(MCSPI2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI2_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCSPI2_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI2_CS0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCSPI2_CS1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+ /* PENDOWN GPIO */
+ OMAP3_MUX(ETK_D13, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
+ /* mUSB */
+ OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* USB 1 */
+ OMAP3_MUX(ETK_CTL, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(ETK_D8, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D9, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D0, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D2, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D4, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D5, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D6, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D7, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+
+ /* MMC 1 */
+ OMAP3_MUX(SDMMC1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* McBSP 2 */
+ OMAP3_MUX(MCBSP2_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_CLKX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_DR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_DX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+ /* I2C 1 */
+ OMAP3_MUX(I2C1_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C1_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* I2C 2 */
+ OMAP3_MUX(I2C2_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* I2C 3 */
+ OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* I2C 4 */
+ OMAP3_MUX(I2C4_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C4_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* serial ports */
+ OMAP3_MUX(MCBSP3_CLKX, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCBSP3_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(UART1_TX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(UART1_RX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* DSS */
+ OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+ /* expansion port */
+ /* McSPI 1 */
+ OMAP3_MUX(MCSPI1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_CS0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+ OMAP3_MUX(MCSPI1_CS3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+
+ /* HDQ */
+ OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* McSPI4 */
+ OMAP3_MUX(MCBSP1_CLKR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_DX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_DR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT_PULLUP),
+
+ /* MMC 2 */
+ OMAP3_MUX(SDMMC2_DAT4, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT5, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT6, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT7, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+
+ /* I2C3 */
+ OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCBSP1_FSR, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+ OMAP3_MUX(GPMC_NCS7, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(GPMC_NCS3, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+ /* TPS IRQ */
+ OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_WAKEUP_EN | \
+ OMAP_PIN_INPUT_PULLUP),
+
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
static void __init devkit8000_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
omap_serial_init();
omap_dm9000_init();
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 962d377970e9..69b154cdc75d 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -39,6 +39,7 @@
#include <plat/board.h>
#include <plat/common.h>
+#include <plat/display.h>
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
@@ -106,6 +107,77 @@ static struct platform_device omap3beagle_nand_device = {
.resource = &omap3beagle_nand_resource,
};
+/* DSS */
+
+static int beagle_enable_dvi(struct omap_dss_device *dssdev)
+{
+ if (gpio_is_valid(dssdev->reset_gpio))
+ gpio_set_value(dssdev->reset_gpio, 1);
+
+ return 0;
+}
+
+static void beagle_disable_dvi(struct omap_dss_device *dssdev)
+{
+ if (gpio_is_valid(dssdev->reset_gpio))
+ gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device beagle_dvi_device = {
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .name = "dvi",
+ .driver_name = "generic_panel",
+ .phy.dpi.data_lines = 24,
+ .reset_gpio = 170,
+ .platform_enable = beagle_enable_dvi,
+ .platform_disable = beagle_disable_dvi,
+};
+
+static struct omap_dss_device beagle_tv_device = {
+ .name = "tv",
+ .driver_name = "venc",
+ .type = OMAP_DISPLAY_TYPE_VENC,
+ .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+static struct omap_dss_device *beagle_dss_devices[] = {
+ &beagle_dvi_device,
+ &beagle_tv_device,
+};
+
+static struct omap_dss_board_info beagle_dss_data = {
+ .num_devices = ARRAY_SIZE(beagle_dss_devices),
+ .devices = beagle_dss_devices,
+ .default_device = &beagle_dvi_device,
+};
+
+static struct platform_device beagle_dss_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .platform_data = &beagle_dss_data,
+ },
+};
+
+static struct regulator_consumer_supply beagle_vdac_supply =
+ REGULATOR_SUPPLY("vdda_dac", "omapdss");
+
+static struct regulator_consumer_supply beagle_vdvi_supply =
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+
+static void __init beagle_display_init(void)
+{
+ int r;
+
+ r = gpio_request(beagle_dvi_device.reset_gpio, "DVI reset");
+ if (r < 0) {
+ printk(KERN_ERR "Unable to get DVI reset GPIO\n");
+ return;
+ }
+
+ gpio_direction_output(beagle_dvi_device.reset_gpio, 0);
+}
+
#include "sdram-micron-mt46h32m32lf-6.h"
static struct omap2_hsmmc_info mmc[] = {
@@ -117,15 +189,6 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
-static struct platform_device omap3_beagle_lcd_device = {
- .name = "omap3beagle_lcd",
- .id = -1,
-};
-
-static struct omap_lcd_config omap3_beagle_lcd_config __initdata = {
- .ctrl_name = "internal",
-};
-
static struct regulator_consumer_supply beagle_vmmc1_supply = {
.supply = "vmmc",
};
@@ -181,16 +244,6 @@ static struct twl4030_gpio_platform_data beagle_gpio_data = {
.setup = beagle_twl_gpio_setup,
};
-static struct regulator_consumer_supply beagle_vdac_supply = {
- .supply = "vdac",
- .dev = &omap3_beagle_lcd_device.dev,
-};
-
-static struct regulator_consumer_supply beagle_vdvi_supply = {
- .supply = "vdvi",
- .dev = &omap3_beagle_lcd_device.dev,
-};
-
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data beagle_vmmc1 = {
.constraints = {
@@ -349,14 +402,8 @@ static struct platform_device keys_gpio = {
},
};
-static struct omap_board_config_kernel omap3_beagle_config[] __initdata = {
- { OMAP_TAG_LCD, &omap3_beagle_lcd_config },
-};
-
static void __init omap3_beagle_init_irq(void)
{
- omap_board_config = omap3_beagle_config;
- omap_board_config_size = ARRAY_SIZE(omap3_beagle_config);
omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
omap_init_irq();
@@ -367,9 +414,9 @@ static void __init omap3_beagle_init_irq(void)
}
static struct platform_device *omap3_beagle_devices[] __initdata = {
- &omap3_beagle_lcd_device,
&leds_gpio,
&keys_gpio,
+ &beagle_dss_device,
};
static void __init omap3beagle_flash_init(void)
@@ -456,6 +503,8 @@ static void __init omap3_beagle_init(void)
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
+
+ beagle_display_init();
}
static void __init omap3_beagle_map_io(void)
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 8848c7c5ce48..79ac41400c21 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -63,6 +63,8 @@
#define OVERO_SMSC911X_CS 5
#define OVERO_SMSC911X_GPIO 176
+#define OVERO_SMSC911X2_CS 4
+#define OVERO_SMSC911X2_GPIO 65
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
@@ -137,6 +139,16 @@ static struct resource overo_smsc911x_resources[] = {
},
};
+static struct resource overo_smsc911x2_resources[] = {
+ {
+ .name = "smsc911x2-memory",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
+ },
+};
+
static struct smsc911x_platform_config overo_smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
@@ -146,7 +158,7 @@ static struct smsc911x_platform_config overo_smsc911x_config = {
static struct platform_device overo_smsc911x_device = {
.name = "smsc911x",
- .id = -1,
+ .id = 0,
.num_resources = ARRAY_SIZE(overo_smsc911x_resources),
.resource = overo_smsc911x_resources,
.dev = {
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 4377a4cf36eb..abdf321c2d41 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -45,6 +45,8 @@
/* list all spi devices here */
enum {
RX51_SPI_WL1251,
+ RX51_SPI_MIPID, /* LCD panel */
+ RX51_SPI_TSC2005, /* Touch Controller */
};
static struct wl12xx_platform_data wl1251_pdata;
@@ -54,6 +56,16 @@ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
.single_channel = 1,
};
+static struct omap2_mcspi_device_config mipid_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+};
+
+static struct omap2_mcspi_device_config tsc2005_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+};
+
static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
[RX51_SPI_WL1251] = {
.modalias = "wl1251",
@@ -64,6 +76,22 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
.controller_data = &wl1251_mcspi_config,
.platform_data = &wl1251_pdata,
},
+ [RX51_SPI_MIPID] = {
+ .modalias = "acx565akm",
+ .bus_num = 1,
+ .chip_select = 2,
+ .max_speed_hz = 6000000,
+ .controller_data = &mipid_mcspi_config,
+ },
+ [RX51_SPI_TSC2005] = {
+ .modalias = "tsc2005",
+ .bus_num = 1,
+ .chip_select = 0,
+ /* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
+ .max_speed_hz = 6000000,
+ .controller_data = &tsc2005_mcspi_config,
+ /* .platform_data = &tsc2005_config,*/
+ },
};
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
@@ -277,7 +305,7 @@ static struct regulator_consumer_supply rx51_vmmc1_supply = {
.dev_name = "mmci-omap-hs.0",
};
-static struct regulator_consumer_supply rx51_vmmc2_supply = {
+static struct regulator_consumer_supply rx51_vaux3_supply = {
.supply = "vmmc",
.dev_name = "mmci-omap-hs.1",
};
@@ -287,6 +315,48 @@ static struct regulator_consumer_supply rx51_vsim_supply = {
.dev_name = "mmci-omap-hs.1",
};
+static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
+ /* tlv320aic3x analog supplies */
+ {
+ .supply = "AVDD",
+ .dev_name = "2-0018",
+ },
+ {
+ .supply = "DRVDD",
+ .dev_name = "2-0018",
+ },
+ /* Keep vmmc as last item. It is not iterated for newer boards */
+ {
+ .supply = "vmmc",
+ .dev_name = "mmci-omap-hs.1",
+ },
+};
+
+static struct regulator_consumer_supply rx51_vio_supplies[] = {
+ /* tlv320aic3x digital supplies */
+ {
+ .supply = "IOVDD",
+ .dev_name = "2-0018"
+ },
+ {
+ .supply = "DVDD",
+ .dev_name = "2-0018"
+ },
+};
+
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+extern struct platform_device rx51_display_device;
+#endif
+
+static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+ {
+ .supply = "vdds_sdi",
+ .dev = &rx51_display_device.dev,
+ },
+#endif
+};
+
static struct regulator_init_data rx51_vaux1 = {
.constraints = {
.name = "V28",
@@ -297,6 +367,8 @@ static struct regulator_init_data rx51_vaux1 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vaux1_consumers),
+ .consumer_supplies = rx51_vaux1_consumers,
};
static struct regulator_init_data rx51_vaux2 = {
@@ -338,7 +410,7 @@ static struct regulator_init_data rx51_vaux3_mmc = {
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
- .consumer_supplies = &rx51_vmmc2_supply,
+ .consumer_supplies = &rx51_vaux3_supply,
};
static struct regulator_init_data rx51_vaux4 = {
@@ -370,9 +442,9 @@ static struct regulator_init_data rx51_vmmc1 = {
static struct regulator_init_data rx51_vmmc2 = {
.constraints = {
- .name = "VMMC2_30",
- .min_uV = 1850000,
- .max_uV = 3150000,
+ .name = "V28_A",
+ .min_uV = 2800000,
+ .max_uV = 3000000,
.apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
@@ -380,8 +452,8 @@ static struct regulator_init_data rx51_vmmc2 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &rx51_vmmc2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vmmc2_supplies),
+ .consumer_supplies = rx51_vmmc2_supplies,
};
static struct regulator_init_data rx51_vsim = {
@@ -411,6 +483,20 @@ static struct regulator_init_data rx51_vdac = {
},
};
+static struct regulator_init_data rx51_vio = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vio_supplies),
+ .consumer_supplies = rx51_vio_supplies,
+};
+
static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
{
/* FIXME this gpio setup is just a placeholder for now */
@@ -618,6 +704,7 @@ static struct twl4030_platform_data rx51_twldata __initdata = {
.vmmc1 = &rx51_vmmc1,
.vsim = &rx51_vsim,
.vdac = &rx51_vdac,
+ .vio = &rx51_vio,
};
static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
@@ -629,18 +716,27 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
},
};
+static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+ {
+ I2C_BOARD_INFO("tlv320aic3x", 0x18),
+ },
+};
+
static int __init rx51_i2c_init(void)
{
if ((system_rev >= SYSTEM_REV_S_USES_VAUX3 && system_rev < 0x100) ||
- system_rev >= SYSTEM_REV_B_USES_VAUX3)
+ system_rev >= SYSTEM_REV_B_USES_VAUX3) {
rx51_twldata.vaux3 = &rx51_vaux3_mmc;
- else {
+ /* Only older boards use VMMC2 for internal MMC */
+ rx51_vmmc2.num_consumer_supplies--;
+ } else {
rx51_twldata.vaux3 = &rx51_vaux3_cam;
- rx51_twldata.vmmc2 = &rx51_vmmc2;
}
+ rx51_twldata.vmmc2 = &rx51_vmmc2;
omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1,
- ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
- omap_register_i2c_bus(2, 100, NULL, 0);
+ ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
+ omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
+ ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
omap_register_i2c_bus(3, 400, NULL, 0);
return 0;
}
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
new file mode 100644
index 000000000000..e4a9d4c5e6d5
--- /dev/null
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -0,0 +1,107 @@
+/*
+ * linux/arch/arm/mach-omap2/board-rx51-video.c
+ *
+ * Copyright (C) 2010 Nokia
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/mm.h>
+
+#include <asm/mach-types.h>
+#include <plat/mux.h>
+#include <plat/display.h>
+#include <plat/vram.h>
+#include <plat/mcspi.h>
+
+#include "mux.h"
+
+#define RX51_LCD_RESET_GPIO 90
+
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+
+static int rx51_lcd_enable(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(dssdev->reset_gpio, 1);
+ return 0;
+}
+
+static void rx51_lcd_disable(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device rx51_lcd_device = {
+ .name = "lcd",
+ .driver_name = "panel-acx565akm",
+ .type = OMAP_DISPLAY_TYPE_SDI,
+ .phy.sdi.datapairs = 2,
+ .reset_gpio = RX51_LCD_RESET_GPIO,
+ .platform_enable = rx51_lcd_enable,
+ .platform_disable = rx51_lcd_disable,
+};
+
+static struct omap_dss_device *rx51_dss_devices[] = {
+ &rx51_lcd_device,
+};
+
+static struct omap_dss_board_info rx51_dss_board_info = {
+ .num_devices = ARRAY_SIZE(rx51_dss_devices),
+ .devices = rx51_dss_devices,
+ .default_device = &rx51_lcd_device,
+};
+
+struct platform_device rx51_display_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .platform_data = &rx51_dss_board_info,
+ },
+};
+
+static struct platform_device *rx51_video_devices[] __initdata = {
+ &rx51_display_device,
+};
+
+static int __init rx51_video_init(void)
+{
+ if (!machine_is_nokia_rx51())
+ return 0;
+
+ if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) {
+ pr_err("%s cannot configure MUX for LCD RESET\n", __func__);
+ return 0;
+ }
+
+ if (gpio_request(RX51_LCD_RESET_GPIO, "LCD ACX565AKM reset")) {
+ pr_err("%s failed to get LCD Reset GPIO\n", __func__);
+ return 0;
+ }
+
+ gpio_direction_output(RX51_LCD_RESET_GPIO, 1);
+
+ platform_add_devices(rx51_video_devices,
+ ARRAY_SIZE(rx51_video_devices));
+ return 0;
+}
+
+subsys_initcall(rx51_video_init);
+
+void __init rx51_video_mem_init(void)
+{
+ /*
+ * GFX 864x480x32bpp
+ * VID1/2 1280x720x32bpp double buffered
+ */
+ omap_vram_set_sdram_vram(PAGE_ALIGN(864 * 480 * 4) +
+ 2 * PAGE_ALIGN(1280 * 720 * 4 * 2), 0);
+}
+
+#endif /* defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index b155c366c650..1b86b5bb87a2 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -36,6 +36,7 @@
#define RX51_GPIO_SLEEP_IND 162
struct omap_sdrc_params *rx51_get_sdram_timings(void);
+extern void rx51_video_mem_init(void);
static struct gpio_led gpio_leds[] = {
{
@@ -143,6 +144,7 @@ static void __init rx51_init(void)
static void __init rx51_map_io(void)
{
omap2_set_globals_343x();
+ rx51_video_mem_init();
omap34xx_map_common_io();
}
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index e15d2e87cfc1..1d7f827b0408 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -82,7 +82,7 @@ static inline void __init zoom_init_smsc911x(void)
static struct plat_serial8250_port serial_platform_data[] = {
{
- .mapbase = 0x10000000,
+ .mapbase = ZOOM_UART_BASE,
.irq = OMAP_GPIO_IRQ(102),
.flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP|UPF_SHARE_IRQ,
.irqflags = IRQF_SHARED | IRQF_TRIGGER_RISING,
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c
index 9a26f84b1141..803ef14cbf2d 100644
--- a/arch/arm/mach-omap2/board-zoom2.c
+++ b/arch/arm/mach-omap2/board-zoom2.c
@@ -91,8 +91,8 @@ static void __init omap_zoom2_map_io(void)
}
MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
- .phys_io = 0x48000000,
- .io_pg_offst = ((0xfa000000) >> 18) & 0xfffc,
+ .phys_io = ZOOM_UART_BASE,
+ .io_pg_offst = (ZOOM_UART_VIRT >> 18) & 0xfffc,
.boot_params = 0x80000100,
.map_io = omap_zoom2_map_io,
.init_irq = omap_zoom2_init_irq,
diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c
index cd3e40cf3ac1..33147042485f 100644
--- a/arch/arm/mach-omap2/board-zoom3.c
+++ b/arch/arm/mach-omap2/board-zoom3.c
@@ -73,8 +73,8 @@ static void __init omap_zoom_init(void)
}
MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
- .phys_io = 0x48000000,
- .io_pg_offst = ((0xfa000000) >> 18) & 0xfffc,
+ .phys_io = ZOOM_UART_BASE,
+ .io_pg_offst = (ZOOM_UART_VIRT >> 18) & 0xfffc,
.boot_params = 0x80000100,
.map_io = omap_zoom_map_io,
.init_irq = omap_zoom_init_irq,
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index d932b142d0b6..1820a556361b 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1836,7 +1836,7 @@ static struct omap_clk omap2420_clks[] = {
CLK(NULL, "vlynq_ick", &vlynq_ick, CK_242X),
CLK(NULL, "vlynq_fck", &vlynq_fck, CK_242X),
CLK(NULL, "des_ick", &des_ick, CK_242X),
- CLK(NULL, "sha_ick", &sha_ick, CK_242X),
+ CLK("omap-sham", "ick", &sha_ick, CK_242X),
CLK("omap_rng", "ick", &rng_ick, CK_242X),
CLK(NULL, "aes_ick", &aes_ick, CK_242X),
CLK(NULL, "pka_ick", &pka_ick, CK_242X),
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index 0438b6e4f51a..5884ac681c4a 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -1924,7 +1924,7 @@ static struct omap_clk omap2430_clks[] = {
CLK(NULL, "sdma_ick", &sdma_ick, CK_243X),
CLK(NULL, "sdrc_ick", &sdrc_ick, CK_243X),
CLK(NULL, "des_ick", &des_ick, CK_243X),
- CLK(NULL, "sha_ick", &sha_ick, CK_243X),
+ CLK("omap-sham", "ick", &sha_ick, CK_243X),
CLK("omap_rng", "ick", &rng_ick, CK_243X),
CLK(NULL, "aes_ick", &aes_ick, CK_243X),
CLK(NULL, "pka_ick", &pka_ick, CK_243X),
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 9cba5560519b..b0787a405e28 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3360,7 +3360,7 @@ static struct omap_clk omap3xxx_clks[] = {
CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX),
CLK(NULL, "icr_ick", &icr_ick, CK_343X),
CLK(NULL, "aes2_ick", &aes2_ick, CK_343X),
- CLK(NULL, "sha12_ick", &sha12_ick, CK_343X),
+ CLK("omap-sham", "ick", &sha12_ick, CK_343X),
CLK(NULL, "des2_ick", &des2_ick, CK_343X),
CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX),
CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_3XXX),
@@ -3472,8 +3472,8 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "ipss_ick", &ipss_ick, CK_AM35XX),
CLK(NULL, "rmii_ck", &rmii_ck, CK_AM35XX),
CLK(NULL, "pclk_ck", &pclk_ck, CK_AM35XX),
- CLK("davinci_emac", "ick", &emac_ick, CK_AM35XX),
- CLK("davinci_emac", "fck", &emac_fck, CK_AM35XX),
+ CLK("davinci_emac", "emac_clk", &emac_ick, CK_AM35XX),
+ CLK("davinci_emac", "phy_clk", &emac_fck, CK_AM35XX),
CLK("vpfe-capture", "master", &vpfe_ick, CK_AM35XX),
CLK("vpfe-capture", "slave", &vpfe_fck, CK_AM35XX),
CLK("musb_hdrc", "ick", &hsotgusb_ick_am35xx, CK_AM35XX),
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 2271b9bd1f50..08452f36fd68 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -17,8 +17,10 @@
#include <linux/clk.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
+#include <asm/pmu.h>
#include <plat/control.h>
#include <plat/tc.h>
@@ -26,6 +28,7 @@
#include <plat/mux.h>
#include <mach/gpio.h>
#include <plat/mmc.h>
+#include <plat/dma.h>
#include "mux.h"
@@ -453,8 +456,41 @@ static void omap_init_mcspi(void)
static inline void omap_init_mcspi(void) {}
#endif
-#ifdef CONFIG_OMAP_SHA1_MD5
-static struct resource sha1_md5_resources[] = {
+static struct resource omap2_pmu_resource = {
+ .start = 3,
+ .end = 3,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct resource omap3_pmu_resource = {
+ .start = INT_34XX_BENCH_MPU_EMUL,
+ .end = INT_34XX_BENCH_MPU_EMUL,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device omap_pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = 1,
+};
+
+static void omap_init_pmu(void)
+{
+ if (cpu_is_omap24xx())
+ omap_pmu_device.resource = &omap2_pmu_resource;
+ else if (cpu_is_omap34xx())
+ omap_pmu_device.resource = &omap3_pmu_resource;
+ else
+ return;
+
+ platform_device_register(&omap_pmu_device);
+}
+
+
+#if defined(CONFIG_CRYPTO_DEV_OMAP_SHAM) || defined(CONFIG_CRYPTO_DEV_OMAP_SHAM_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP24XX
+static struct resource omap2_sham_resources[] = {
{
.start = OMAP24XX_SEC_SHA1MD5_BASE,
.end = OMAP24XX_SEC_SHA1MD5_BASE + 0x64,
@@ -465,20 +501,55 @@ static struct resource sha1_md5_resources[] = {
.flags = IORESOURCE_IRQ,
}
};
+static int omap2_sham_resources_sz = ARRAY_SIZE(omap2_sham_resources);
+#else
+#define omap2_sham_resources NULL
+#define omap2_sham_resources_sz 0
+#endif
+
+#ifdef CONFIG_ARCH_OMAP34XX
+static struct resource omap3_sham_resources[] = {
+ {
+ .start = OMAP34XX_SEC_SHA1MD5_BASE,
+ .end = OMAP34XX_SEC_SHA1MD5_BASE + 0x64,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_34XX_SHA1MD52_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = OMAP34XX_DMA_SHA1MD5_RX,
+ .flags = IORESOURCE_DMA,
+ }
+};
+static int omap3_sham_resources_sz = ARRAY_SIZE(omap3_sham_resources);
+#else
+#define omap3_sham_resources NULL
+#define omap3_sham_resources_sz 0
+#endif
-static struct platform_device sha1_md5_device = {
- .name = "OMAP SHA1/MD5",
+static struct platform_device sham_device = {
+ .name = "omap-sham",
.id = -1,
- .num_resources = ARRAY_SIZE(sha1_md5_resources),
- .resource = sha1_md5_resources,
};
-static void omap_init_sha1_md5(void)
+static void omap_init_sham(void)
{
- platform_device_register(&sha1_md5_device);
+ if (cpu_is_omap24xx()) {
+ sham_device.resource = omap2_sham_resources;
+ sham_device.num_resources = omap2_sham_resources_sz;
+ } else if (cpu_is_omap34xx()) {
+ sham_device.resource = omap3_sham_resources;
+ sham_device.num_resources = omap3_sham_resources_sz;
+ } else {
+ pr_err("%s: platform not supported\n", __func__);
+ return;
+ }
+ platform_device_register(&sham_device);
}
#else
-static inline void omap_init_sha1_md5(void) { }
+static inline void omap_init_sham(void) { }
#endif
/*-------------------------------------------------------------------------*/
@@ -786,6 +857,33 @@ static inline void omap_hdq_init(void)
static inline void omap_hdq_init(void) {}
#endif
+/*---------------------------------------------------------------------------*/
+
+#if defined(CONFIG_VIDEO_OMAP2_VOUT) || \
+ defined(CONFIG_VIDEO_OMAP2_VOUT_MODULE)
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+static struct resource omap_vout_resource[3 - CONFIG_FB_OMAP2_NUM_FBS] = {
+};
+#else
+static struct resource omap_vout_resource[2] = {
+};
+#endif
+
+static struct platform_device omap_vout_device = {
+ .name = "omap_vout",
+ .num_resources = ARRAY_SIZE(omap_vout_resource),
+ .resource = &omap_vout_resource[0],
+ .id = -1,
+};
+static void omap_init_vout(void)
+{
+ if (platform_device_register(&omap_vout_device) < 0)
+ printk(KERN_ERR "Unable to register OMAP-VOUT device\n");
+}
+#else
+static inline void omap_init_vout(void) {}
+#endif
+
/*-------------------------------------------------------------------------*/
static int __init omap2_init_devices(void)
@@ -797,9 +895,11 @@ static int __init omap2_init_devices(void)
omap_init_camera();
omap_init_mbox();
omap_init_mcspi();
+ omap_init_pmu();
omap_hdq_init();
omap_init_sti();
- omap_init_sha1_md5();
+ omap_init_sham();
+ omap_init_vout();
return 0;
}
diff --git a/arch/arm/mach-omap2/include/mach/am35xx.h b/arch/arm/mach-omap2/include/mach/am35xx.h
index a705f946fc46..f1e13d1ca5e7 100644
--- a/arch/arm/mach-omap2/include/mach/am35xx.h
+++ b/arch/arm/mach-omap2/include/mach/am35xx.h
@@ -23,4 +23,22 @@
#define AM35XX_IPSS_HECC_BASE 0x5C050000
#define AM35XX_IPSS_VPFE_BASE 0x5C060000
-#endif /* __ASM_ARCH_AM35XX_H */
+
+/* HECC module specifc offset definitions */
+#define AM35XX_HECC_SCC_HECC_OFFSET (0x0)
+#define AM35XX_HECC_SCC_RAM_OFFSET (0x3000)
+#define AM35XX_HECC_RAM_OFFSET (0x3000)
+#define AM35XX_HECC_MBOX_OFFSET (0x2000)
+#define AM35XX_HECC_INT_LINE (0x0)
+#define AM35XX_HECC_VERSION (0x1)
+
+#define AM35XX_EMAC_CNTRL_OFFSET (0x10000)
+#define AM35XX_EMAC_CNTRL_MOD_OFFSET (0x0)
+#define AM35XX_EMAC_CNTRL_RAM_OFFSET (0x20000)
+#define AM35XX_EMAC_MDIO_OFFSET (0x30000)
+#define AM35XX_EMAC_CNTRL_RAM_SIZE (0x2000)
+#define AM35XX_EMAC_RAM_ADDR (AM3517_EMAC_BASE + \
+ AM3517_EMAC_CNTRL_RAM_OFFSET)
+#define AM35XX_EMAC_HW_RAM_ADDR (0x01E20000)
+
+#endif /* __ASM_ARCH_AM35XX_H */
diff --git a/arch/arm/mach-omap2/include/mach/debug-macro.S b/arch/arm/mach-omap2/include/mach/debug-macro.S
index 4a63a2ea484d..35b24409a0c8 100644
--- a/arch/arm/mach-omap2/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap2/include/mach/debug-macro.S
@@ -13,6 +13,8 @@
#include <linux/serial_reg.h>
+#include <asm/memory.h>
+
#include <plat/serial.h>
#define UART_OFFSET(addr) ((addr) & 0x00ffffff)
@@ -40,13 +42,12 @@ omap_uart_lsr: .word 0
cmp \rx, #0 @ is port configured?
bne 99f @ already configured
- /* Check UART1 scratchpad register for uart to use */
+ /* Check the debug UART configuration set in uncompress.h */
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
- moveq \rx, #0x48000000 @ physical base address
- movne \rx, #0xfa000000 @ virtual base
- orr \rx, \rx, #0x0006a000 @ uart1 on omap2/3/4
- ldrb \rx, [\rx, #(UART_SCR << OMAP_PORT_SHIFT)] @ scratchpad
+ ldreq \rx, =OMAP_UART_INFO
+ ldrne \rx, =__phys_to_virt(OMAP_UART_INFO)
+ ldr \rx, [\rx, #0]
/* Select the UART to use based on the UART1 scratchpad value */
cmp \rx, #0 @ no port configured?
@@ -87,10 +88,10 @@ omap_uart_lsr: .word 0
b 98f
44: mov \rx, #UART_OFFSET(OMAP4_UART4_BASE)
b 98f
-95: mov \rx, #ZOOM_UART_BASE
+95: ldr \rx, =ZOOM_UART_BASE
ldr \tmp, =omap_uart_phys
str \rx, [\tmp, #0]
- mov \rx, #ZOOM_UART_VIRT
+ ldr \rx, =ZOOM_UART_VIRT
ldr \tmp, =omap_uart_virt
str \rx, [\tmp, #0]
mov \rx, #(UART_LSR << ZOOM_PORT_SHIFT)
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 87f676acf61d..3cfb425ea67e 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -166,6 +166,15 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
.length = L4_EMU_34XX_SIZE,
.type = MT_DEVICE
},
+#if defined(CONFIG_DEBUG_LL) && \
+ (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3))
+ {
+ .virtual = ZOOM_UART_VIRT,
+ .pfn = __phys_to_pfn(ZOOM_UART_BASE),
+ .length = SZ_1M,
+ .type = MT_DEVICE
+ },
+#endif
};
#endif
#ifdef CONFIG_ARCH_OMAP4
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 07aa7b3c95f7..2ff4dce95ee8 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -1901,26 +1901,15 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(GPMC_A8, "m3", "ab18"),
_OMAP3_BALLENTRY(GPMC_A9, "l3", "ac19"),
_OMAP3_BALLENTRY(GPMC_CLK, "t4", "w2"),
- _OMAP3_BALLENTRY(GPMC_D0, "k1", "m2"),
- _OMAP3_BALLENTRY(GPMC_D1, "l1", "m1"),
_OMAP3_BALLENTRY(GPMC_D10, "p1", "ab4"),
_OMAP3_BALLENTRY(GPMC_D11, "r1", "ac4"),
_OMAP3_BALLENTRY(GPMC_D12, "r2", "ab6"),
_OMAP3_BALLENTRY(GPMC_D13, "t2", "ac6"),
_OMAP3_BALLENTRY(GPMC_D14, "w1", "ab7"),
_OMAP3_BALLENTRY(GPMC_D15, "y1", "ac7"),
- _OMAP3_BALLENTRY(GPMC_D2, "l2", "n2"),
- _OMAP3_BALLENTRY(GPMC_D3, "p2", "n1"),
- _OMAP3_BALLENTRY(GPMC_D4, "t1", "r2"),
- _OMAP3_BALLENTRY(GPMC_D5, "v1", "r1"),
- _OMAP3_BALLENTRY(GPMC_D6, "v2", "t2"),
- _OMAP3_BALLENTRY(GPMC_D7, "w2", "t1"),
- _OMAP3_BALLENTRY(GPMC_D8, "h2", "ab3"),
_OMAP3_BALLENTRY(GPMC_D9, "k2", "ac3"),
- _OMAP3_BALLENTRY(GPMC_NADV_ALE, "f3", "w1"),
_OMAP3_BALLENTRY(GPMC_NBE0_CLE, "g3", "ac12"),
_OMAP3_BALLENTRY(GPMC_NBE1, "u3", NULL),
- _OMAP3_BALLENTRY(GPMC_NCS0, "g4", "y2"),
_OMAP3_BALLENTRY(GPMC_NCS1, "h3", "y1"),
_OMAP3_BALLENTRY(GPMC_NCS2, "v8", NULL),
_OMAP3_BALLENTRY(GPMC_NCS3, "u8", NULL),
@@ -1928,10 +1917,7 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(GPMC_NCS5, "r8", NULL),
_OMAP3_BALLENTRY(GPMC_NCS6, "p8", NULL),
_OMAP3_BALLENTRY(GPMC_NCS7, "n8", NULL),
- _OMAP3_BALLENTRY(GPMC_NOE, "g2", "v2"),
- _OMAP3_BALLENTRY(GPMC_NWE, "f4", "v1"),
_OMAP3_BALLENTRY(GPMC_NWP, "h1", "ab10"),
- _OMAP3_BALLENTRY(GPMC_WAIT0, "m8", "ab12"),
_OMAP3_BALLENTRY(GPMC_WAIT1, "l8", "ac10"),
_OMAP3_BALLENTRY(GPMC_WAIT2, "k8", NULL),
_OMAP3_BALLENTRY(GPMC_WAIT3, "j8", NULL),
@@ -1948,8 +1934,6 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(HSUSB0_DIR, "r28", NULL),
_OMAP3_BALLENTRY(HSUSB0_NXT, "t26", NULL),
_OMAP3_BALLENTRY(HSUSB0_STP, "t25", NULL),
- _OMAP3_BALLENTRY(I2C1_SCL, "k21", NULL),
- _OMAP3_BALLENTRY(I2C1_SDA, "j21", NULL),
_OMAP3_BALLENTRY(I2C2_SCL, "af15", NULL),
_OMAP3_BALLENTRY(I2C2_SDA, "ae15", NULL),
_OMAP3_BALLENTRY(I2C3_SCL, "af14", NULL),
@@ -1958,11 +1942,6 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(I2C4_SDA, "ae26", NULL),
_OMAP3_BALLENTRY(JTAG_EMU0, "aa11", NULL),
_OMAP3_BALLENTRY(JTAG_EMU1, "aa10", NULL),
- _OMAP3_BALLENTRY(JTAG_RTCK, "aa12", NULL),
- _OMAP3_BALLENTRY(JTAG_TCK, "aa13", NULL),
- _OMAP3_BALLENTRY(JTAG_TDI, "aa20", NULL),
- _OMAP3_BALLENTRY(JTAG_TDO, "aa19", NULL),
- _OMAP3_BALLENTRY(JTAG_TMS_TMSC, "aa18", NULL),
_OMAP3_BALLENTRY(MCBSP1_CLKR, "y21", NULL),
_OMAP3_BALLENTRY(MCBSP1_CLKX, "w21", NULL),
_OMAP3_BALLENTRY(MCBSP1_DR, "u21", NULL),
@@ -2010,77 +1989,12 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(SDMMC2_DAT5, "ah3", NULL),
_OMAP3_BALLENTRY(SDMMC2_DAT6, "af3", NULL),
_OMAP3_BALLENTRY(SDMMC2_DAT7, "ae3", NULL),
- _OMAP3_BALLENTRY(SDRC_A0, NULL, "n22"),
- _OMAP3_BALLENTRY(SDRC_A1, NULL, "n23"),
- _OMAP3_BALLENTRY(SDRC_A10, NULL, "v22"),
- _OMAP3_BALLENTRY(SDRC_A11, NULL, "v23"),
- _OMAP3_BALLENTRY(SDRC_A12, NULL, "w22"),
- _OMAP3_BALLENTRY(SDRC_A13, NULL, "w23"),
- _OMAP3_BALLENTRY(SDRC_A14, NULL, "y22"),
- _OMAP3_BALLENTRY(SDRC_A2, NULL, "p22"),
- _OMAP3_BALLENTRY(SDRC_A3, NULL, "p23"),
- _OMAP3_BALLENTRY(SDRC_A4, NULL, "r22"),
- _OMAP3_BALLENTRY(SDRC_A5, NULL, "r23"),
- _OMAP3_BALLENTRY(SDRC_A6, NULL, "t22"),
- _OMAP3_BALLENTRY(SDRC_A7, NULL, "t23"),
- _OMAP3_BALLENTRY(SDRC_A8, NULL, "u22"),
- _OMAP3_BALLENTRY(SDRC_A9, NULL, "u23"),
- _OMAP3_BALLENTRY(SDRC_BA0, "h9", "ab21"),
- _OMAP3_BALLENTRY(SDRC_BA1, "h10", "ac21"),
_OMAP3_BALLENTRY(SDRC_CKE0, "h16", "j22"),
_OMAP3_BALLENTRY(SDRC_CKE1, "h17", "j23"),
- _OMAP3_BALLENTRY(SDRC_CLK, "a13", "a11"),
- _OMAP3_BALLENTRY(SDRC_D0, NULL, "j2"),
- _OMAP3_BALLENTRY(SDRC_D1, NULL, "j1"),
- _OMAP3_BALLENTRY(SDRC_D10, "c15", "b14"),
- _OMAP3_BALLENTRY(SDRC_D11, "b16", "a14"),
- _OMAP3_BALLENTRY(SDRC_D12, "d17", "b16"),
- _OMAP3_BALLENTRY(SDRC_D13, "c17", "a16"),
- _OMAP3_BALLENTRY(SDRC_D14, "b17", "b19"),
- _OMAP3_BALLENTRY(SDRC_D15, "d18", "a19"),
- _OMAP3_BALLENTRY(SDRC_D16, NULL, "b3"),
- _OMAP3_BALLENTRY(SDRC_D17, NULL, "a3"),
- _OMAP3_BALLENTRY(SDRC_D18, NULL, "b5"),
- _OMAP3_BALLENTRY(SDRC_D19, NULL, "a5"),
- _OMAP3_BALLENTRY(SDRC_D2, NULL, "g2"),
- _OMAP3_BALLENTRY(SDRC_D20, NULL, "b8"),
- _OMAP3_BALLENTRY(SDRC_D21, NULL, "a8"),
- _OMAP3_BALLENTRY(SDRC_D22, NULL, "b9"),
- _OMAP3_BALLENTRY(SDRC_D23, NULL, "a9"),
- _OMAP3_BALLENTRY(SDRC_D24, NULL, "b21"),
- _OMAP3_BALLENTRY(SDRC_D25, NULL, "a21"),
- _OMAP3_BALLENTRY(SDRC_D26, NULL, "d22"),
- _OMAP3_BALLENTRY(SDRC_D27, NULL, "d23"),
- _OMAP3_BALLENTRY(SDRC_D28, NULL, "e22"),
- _OMAP3_BALLENTRY(SDRC_D29, NULL, "e23"),
- _OMAP3_BALLENTRY(SDRC_D3, NULL, "g1"),
- _OMAP3_BALLENTRY(SDRC_D30, NULL, "g22"),
- _OMAP3_BALLENTRY(SDRC_D31, NULL, "g23"),
- _OMAP3_BALLENTRY(SDRC_D4, NULL, "f2"),
- _OMAP3_BALLENTRY(SDRC_D5, NULL, "f1"),
- _OMAP3_BALLENTRY(SDRC_D6, NULL, "d2"),
- _OMAP3_BALLENTRY(SDRC_D7, NULL, "d1"),
- _OMAP3_BALLENTRY(SDRC_D8, "c14", "b13"),
- _OMAP3_BALLENTRY(SDRC_D9, "b14", "a13"),
- _OMAP3_BALLENTRY(SDRC_DM0, NULL, "c1"),
- _OMAP3_BALLENTRY(SDRC_DM1, "a16", "a17"),
- _OMAP3_BALLENTRY(SDRC_DM2, NULL, "a6"),
- _OMAP3_BALLENTRY(SDRC_DM3, NULL, "a20"),
- _OMAP3_BALLENTRY(SDRC_DQS0, NULL, "c2"),
- _OMAP3_BALLENTRY(SDRC_DQS1, "a17", "b17"),
- _OMAP3_BALLENTRY(SDRC_DQS2, NULL, "b6"),
- _OMAP3_BALLENTRY(SDRC_DQS3, NULL, "b20"),
- _OMAP3_BALLENTRY(SDRC_NCAS, "h13", "l22"),
- _OMAP3_BALLENTRY(SDRC_NCLK, "a14", "b11"),
- _OMAP3_BALLENTRY(SDRC_NCS0, "h11", "m22"),
- _OMAP3_BALLENTRY(SDRC_NCS1, "h12", "m23"),
- _OMAP3_BALLENTRY(SDRC_NRAS, "h14", "l23"),
- _OMAP3_BALLENTRY(SDRC_NWE, "h15", "k23"),
_OMAP3_BALLENTRY(SIM_CLK, "p26", NULL),
_OMAP3_BALLENTRY(SIM_IO, "p27", NULL),
_OMAP3_BALLENTRY(SIM_PWRCTRL, "r27", NULL),
_OMAP3_BALLENTRY(SIM_RST, "r25", NULL),
- _OMAP3_BALLENTRY(SYS_32K, "ae25", NULL),
_OMAP3_BALLENTRY(SYS_BOOT0, "ah26", NULL),
_OMAP3_BALLENTRY(SYS_BOOT1, "ag26", NULL),
_OMAP3_BALLENTRY(SYS_BOOT2, "ae14", NULL),
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e436dcb19795..2c12e8cd7183 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -43,7 +43,6 @@
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/bootmem.h>
#include <plat/common.h>
#include <plat/cpu.h>
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 771137fc1a82..5ccb0ceff6c4 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -73,7 +73,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
static struct mcp251x_platform_data mcp251x_info = {
.oscillator_frequency = 16E6,
- .model = CAN_MCP251X_MCP2515,
.board_specific_setup = NULL,
.power_enable = NULL,
.transceiver_enable = NULL
@@ -81,7 +80,7 @@ static struct mcp251x_platform_data mcp251x_info = {
static struct spi_board_info mcp251x_board_info[] = {
{
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 3,
.chip_select = 0,
@@ -90,7 +89,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ1)
},
{
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 3,
.chip_select = 1,
@@ -99,7 +98,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ2)
},
{
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 4,
.chip_select = 0,
@@ -108,7 +107,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ3)
},
{
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 4,
.chip_select = 1,
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 39896d883584..dbd256966379 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -414,15 +414,13 @@ static int zeus_mcp2515_transceiver_enable(int enable)
static struct mcp251x_platform_data zeus_mcp2515_pdata = {
.oscillator_frequency = 16*1000*1000,
- .model = CAN_MCP251X_MCP2515,
.board_specific_setup = zeus_mcp2515_setup,
- .transceiver_enable = zeus_mcp2515_transceiver_enable,
.power_enable = zeus_mcp2515_transceiver_enable,
};
static struct spi_board_info zeus_spi_board_info[] = {
[0] = {
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.platform_data = &zeus_mcp2515_pdata,
.irq = gpio_to_irq(ZEUS_CAN_GPIO),
.max_speed_hz = 1*1000*1000,
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile
index e704edb733c0..a01b76b7c956 100644
--- a/arch/arm/mach-realview/Makefile
+++ b/arch/arm/mach-realview/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y := core.o clock.o
+obj-y := core.o
obj-$(CONFIG_MACH_REALVIEW_EB) += realview_eb.o
obj-$(CONFIG_MACH_REALVIEW_PB11MP) += realview_pb11mp.o
obj-$(CONFIG_MACH_REALVIEW_PB1176) += realview_pb1176.o
diff --git a/arch/arm/mach-realview/clock.c b/arch/arm/mach-realview/clock.c
deleted file mode 100644
index a7043115de72..000000000000
--- a/arch/arm/mach-realview/clock.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/clock.c
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-
-#include <asm/hardware/icst307.h>
-
-#include "clock.h"
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- struct icst307_vco vco;
- vco = icst307_khz_to_vco(clk->params, rate / 1000);
- return icst307_khz(clk->params, vco) * 1000;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- int ret = -EIO;
-
- if (clk->setvco) {
- struct icst307_vco vco;
-
- vco = icst307_khz_to_vco(clk->params, rate / 1000);
- clk->rate = icst307_khz(clk->params, vco) * 1000;
- clk->setvco(clk, vco);
- ret = 0;
- }
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
diff --git a/arch/arm/mach-realview/clock.h b/arch/arm/mach-realview/clock.h
deleted file mode 100644
index ebbb0f06b600..000000000000
--- a/arch/arm/mach-realview/clock.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/clock.h
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-struct module;
-struct icst307_params;
-
-struct clk {
- unsigned long rate;
- const struct icst307_params *params;
- void *data;
- void (*setvco)(struct clk *, struct icst307_vco vco);
-};
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index d5a95738f85b..595be19f8ad5 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -25,8 +25,6 @@
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/ata_platform.h>
@@ -40,7 +38,7 @@
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/hardware/arm_timer.h>
-#include <asm/hardware/icst307.h>
+#include <asm/hardware/icst.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
@@ -49,13 +47,12 @@
#include <asm/hardware/gic.h>
+#include <mach/clkdev.h>
#include <mach/platform.h>
#include <mach/irqs.h>
+#include <plat/timer-sp.h>
#include "core.h"
-#include "clock.h"
-
-#define REALVIEW_REFCOUNTER (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_24MHz_OFFSET)
/* used by entry-macro.S and platsmp.c */
void __iomem *gic_cpu_base_addr;
@@ -79,20 +76,6 @@ void __init realview_adjust_zones(int node, unsigned long *size,
}
#endif
-/*
- * This is the RealView sched_clock implementation. This has
- * a resolution of 41.7ns, and a maximum value of about 179s.
- */
-unsigned long long sched_clock(void)
-{
- unsigned long long v;
-
- v = (unsigned long long)readl(REALVIEW_REFCOUNTER) * 125;
- do_div(v, 3);
-
- return v;
-}
-
#define REALVIEW_FLASHCTRL (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_FLASH_OFFSET)
@@ -274,37 +257,40 @@ struct mmci_platform_data realview_mmc1_plat_data = {
/*
* Clock handling
*/
-static const struct icst307_params realview_oscvco_params = {
- .ref = 24000,
- .vco_max = 200000,
+static const struct icst_params realview_oscvco_params = {
+ .ref = 24000000,
+ .vco_max = ICST307_VCO_MAX,
+ .vco_min = ICST307_VCO_MIN,
.vd_min = 4 + 8,
.vd_max = 511 + 8,
.rd_min = 1 + 2,
.rd_max = 127 + 2,
+ .s2div = icst307_s2div,
+ .idx2s = icst307_idx2s,
};
-static void realview_oscvco_set(struct clk *clk, struct icst307_vco vco)
+static void realview_oscvco_set(struct clk *clk, struct icst_vco vco)
{
void __iomem *sys_lock = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_LOCK_OFFSET;
- void __iomem *sys_osc;
u32 val;
- if (machine_is_realview_pb1176())
- sys_osc = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC0_OFFSET;
- else
- sys_osc = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC4_OFFSET;
-
- val = readl(sys_osc) & ~0x7ffff;
+ val = readl(clk->vcoreg) & ~0x7ffff;
val |= vco.v | (vco.r << 9) | (vco.s << 16);
writel(0xa05f, sys_lock);
- writel(val, sys_osc);
+ writel(val, clk->vcoreg);
writel(0, sys_lock);
}
+static const struct clk_ops oscvco_clk_ops = {
+ .round = icst_clk_round,
+ .set = icst_clk_set,
+ .setvco = realview_oscvco_set,
+};
+
static struct clk oscvco_clk = {
+ .ops = &oscvco_clk_ops,
.params = &realview_oscvco_params,
- .setvco = realview_oscvco_set,
};
/*
@@ -347,7 +333,13 @@ static struct clk_lookup lookups[] = {
static int __init clk_init(void)
{
+ if (machine_is_realview_pb1176())
+ oscvco_clk.vcoreg = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC0_OFFSET;
+ else
+ oscvco_clk.vcoreg = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC4_OFFSET;
+
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
return 0;
}
arch_initcall(clk_init);
@@ -644,133 +636,6 @@ void __iomem *timer2_va_base;
void __iomem *timer3_va_base;
/*
- * How long is the timer interval?
- */
-#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
-#if TIMER_INTERVAL >= 0x100000
-#define TIMER_RELOAD (TIMER_INTERVAL >> 8)
-#define TIMER_DIVISOR (TIMER_CTRL_DIV256)
-#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
-#elif TIMER_INTERVAL >= 0x10000
-#define TIMER_RELOAD (TIMER_INTERVAL >> 4) /* Divide by 16 */
-#define TIMER_DIVISOR (TIMER_CTRL_DIV16)
-#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
-#else
-#define TIMER_RELOAD (TIMER_INTERVAL)
-#define TIMER_DIVISOR (TIMER_CTRL_DIV1)
-#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
-#endif
-
-static void timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
-{
- unsigned long ctrl;
-
- switch(mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- writel(TIMER_RELOAD, timer0_va_base + TIMER_LOAD);
-
- ctrl = TIMER_CTRL_PERIODIC;
- ctrl |= TIMER_CTRL_32BIT | TIMER_CTRL_IE | TIMER_CTRL_ENABLE;
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* period set, and timer enabled in 'next_event' hook */
- ctrl = TIMER_CTRL_ONESHOT;
- ctrl |= TIMER_CTRL_32BIT | TIMER_CTRL_IE;
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- ctrl = 0;
- }
-
- writel(ctrl, timer0_va_base + TIMER_CTRL);
-}
-
-static int timer_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long ctrl = readl(timer0_va_base + TIMER_CTRL);
-
- writel(evt, timer0_va_base + TIMER_LOAD);
- writel(ctrl | TIMER_CTRL_ENABLE, timer0_va_base + TIMER_CTRL);
-
- return 0;
-}
-
-static struct clock_event_device timer0_clockevent = {
- .name = "timer0",
- .shift = 32,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = timer_set_mode,
- .set_next_event = timer_set_next_event,
- .rating = 300,
- .cpumask = cpu_all_mask,
-};
-
-static void __init realview_clockevents_init(unsigned int timer_irq)
-{
- timer0_clockevent.irq = timer_irq;
- timer0_clockevent.mult =
- div_sc(1000000, NSEC_PER_SEC, timer0_clockevent.shift);
- timer0_clockevent.max_delta_ns =
- clockevent_delta2ns(0xffffffff, &timer0_clockevent);
- timer0_clockevent.min_delta_ns =
- clockevent_delta2ns(0xf, &timer0_clockevent);
-
- clockevents_register_device(&timer0_clockevent);
-}
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t realview_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &timer0_clockevent;
-
- /* clear the interrupt */
- writel(1, timer0_va_base + TIMER_INTCLR);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction realview_timer_irq = {
- .name = "RealView Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = realview_timer_interrupt,
-};
-
-static cycle_t realview_get_cycles(struct clocksource *cs)
-{
- return ~readl(timer3_va_base + TIMER_VALUE);
-}
-
-static struct clocksource clocksource_realview = {
- .name = "timer3",
- .rating = 200,
- .read = realview_get_cycles,
- .mask = CLOCKSOURCE_MASK(32),
- .shift = 20,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static void __init realview_clocksource_init(void)
-{
- /* setup timer 0 as free-running clocksource */
- writel(0, timer3_va_base + TIMER_CTRL);
- writel(0xffffffff, timer3_va_base + TIMER_LOAD);
- writel(0xffffffff, timer3_va_base + TIMER_VALUE);
- writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
- timer3_va_base + TIMER_CTRL);
-
- clocksource_realview.mult =
- clocksource_khz2mult(1000, clocksource_realview.shift);
- clocksource_register(&clocksource_realview);
-}
-
-/*
* Set up the clock source and clock events devices
*/
void __init realview_timer_init(unsigned int timer_irq)
@@ -797,13 +662,8 @@ void __init realview_timer_init(unsigned int timer_irq)
writel(0, timer2_va_base + TIMER_CTRL);
writel(0, timer3_va_base + TIMER_CTRL);
- /*
- * Make irqs happen for the system timer
- */
- setup_irq(timer_irq, &realview_timer_irq);
-
- realview_clocksource_init();
- realview_clockevents_init(timer_irq);
+ sp804_clocksource_init(timer3_va_base);
+ sp804_clockevents_init(timer0_va_base, timer_irq);
}
/*
diff --git a/arch/arm/mach-realview/include/mach/clkdev.h b/arch/arm/mach-realview/include/mach/clkdev.h
index 04b37a89801c..e58d0771b64e 100644
--- a/arch/arm/mach-realview/include/mach/clkdev.h
+++ b/arch/arm/mach-realview/include/mach/clkdev.h
@@ -1,6 +1,15 @@
#ifndef __ASM_MACH_CLKDEV_H
#define __ASM_MACH_CLKDEV_H
+#include <plat/clock.h>
+
+struct clk {
+ unsigned long rate;
+ const struct clk_ops *ops;
+ const struct icst_params *params;
+ void __iomem *vcoreg;
+};
+
#define __clk_get(clk) ({ 1; })
#define __clk_put(clk) do { } while (0)
diff --git a/arch/arm/mach-realview/include/mach/irqs-pb1176.h b/arch/arm/mach-realview/include/mach/irqs-pb1176.h
index 2410d4f8ddd3..830055bb8628 100644
--- a/arch/arm/mach-realview/include/mach/irqs-pb1176.h
+++ b/arch/arm/mach-realview/include/mach/irqs-pb1176.h
@@ -31,6 +31,7 @@
#define IRQ_DC1176_SOFTINT (IRQ_DC1176_GIC_START + 1) /* Software interrupt */
#define IRQ_DC1176_COMMRx (IRQ_DC1176_GIC_START + 2) /* Debug Comm Rx interrupt */
#define IRQ_DC1176_COMMTx (IRQ_DC1176_GIC_START + 3) /* Debug Comm Tx interrupt */
+#define IRQ_DC1176_CORE_PMU (IRQ_DC1176_GIC_START + 7) /* Core PMU interrupt */
#define IRQ_DC1176_TIMER0 (IRQ_DC1176_GIC_START + 8) /* Timer 0 */
#define IRQ_DC1176_TIMER1 (IRQ_DC1176_GIC_START + 9) /* Timer 1 */
#define IRQ_DC1176_TIMER2 (IRQ_DC1176_GIC_START + 10) /* Timer 2 */
diff --git a/arch/arm/mach-realview/include/mach/irqs-pba8.h b/arch/arm/mach-realview/include/mach/irqs-pba8.h
index 86792a9f2ab6..4a88a4edb651 100644
--- a/arch/arm/mach-realview/include/mach/irqs-pba8.h
+++ b/arch/arm/mach-realview/include/mach/irqs-pba8.h
@@ -23,12 +23,6 @@
#define IRQ_PBA8_GIC_START 32
-/* L220
-#define IRQ_PBA8_L220_EVENT (IRQ_PBA8_GIC_START + 29)
-#define IRQ_PBA8_L220_SLAVE (IRQ_PBA8_GIC_START + 30)
-#define IRQ_PBA8_L220_DECODE (IRQ_PBA8_GIC_START + 31)
-*/
-
/*
* PB-A8 on-board gic irq sources
*/
@@ -65,6 +59,8 @@
#define IRQ_PBA8_TSPEN (IRQ_PBA8_GIC_START + 30) /* Touchscreen pen */
#define IRQ_PBA8_TSKPAD (IRQ_PBA8_GIC_START + 31) /* Touchscreen keypad */
+#define IRQ_PBA8_PMU (IRQ_PBA8_GIC_START + 47) /* Cortex-A8 PMU */
+
/* ... */
#define IRQ_PBA8_PCI0 (IRQ_PBA8_GIC_START + 50)
#define IRQ_PBA8_PCI1 (IRQ_PBA8_GIC_START + 51)
diff --git a/arch/arm/mach-realview/include/mach/irqs-pbx.h b/arch/arm/mach-realview/include/mach/irqs-pbx.h
index deaad4302b17..206a3001f46b 100644
--- a/arch/arm/mach-realview/include/mach/irqs-pbx.h
+++ b/arch/arm/mach-realview/include/mach/irqs-pbx.h
@@ -22,12 +22,6 @@
#define IRQ_PBX_GIC_START 32
-/* L220
-#define IRQ_PBX_L220_EVENT (IRQ_PBX_GIC_START + 29)
-#define IRQ_PBX_L220_SLAVE (IRQ_PBX_GIC_START + 30)
-#define IRQ_PBX_L220_DECODE (IRQ_PBX_GIC_START + 31)
-*/
-
/*
* PBX on-board gic irq sources
*/
@@ -77,10 +71,10 @@
#define IRQ_PBX_TIMER4_5 (IRQ_PBX_GIC_START + 41) /* Timer 0/1 (default timer) */
#define IRQ_PBX_TIMER6_7 (IRQ_PBX_GIC_START + 42) /* Timer 2/3 */
/* ... */
-#define IRQ_PBX_PMU_CPU3 (IRQ_PBX_GIC_START + 44) /* CPU PMU Interrupts */
-#define IRQ_PBX_PMU_CPU2 (IRQ_PBX_GIC_START + 45)
-#define IRQ_PBX_PMU_CPU1 (IRQ_PBX_GIC_START + 46)
-#define IRQ_PBX_PMU_CPU0 (IRQ_PBX_GIC_START + 47)
+#define IRQ_PBX_PMU_CPU0 (IRQ_PBX_GIC_START + 44) /* CPU PMU Interrupts */
+#define IRQ_PBX_PMU_CPU1 (IRQ_PBX_GIC_START + 45)
+#define IRQ_PBX_PMU_CPU2 (IRQ_PBX_GIC_START + 46)
+#define IRQ_PBX_PMU_CPU3 (IRQ_PBX_GIC_START + 47)
/* ... */
#define IRQ_PBX_PCI0 (IRQ_PBX_GIC_START + 50)
diff --git a/arch/arm/mach-realview/include/mach/platform.h b/arch/arm/mach-realview/include/mach/platform.h
index 86c0c4435a46..1b77a27badaf 100644
--- a/arch/arm/mach-realview/include/mach/platform.h
+++ b/arch/arm/mach-realview/include/mach/platform.h
@@ -231,12 +231,6 @@
#define REALVIEW_INTREG_OFFSET 0x8 /* Interrupt control */
#define REALVIEW_DECODE_OFFSET 0xC /* Fitted logic modules */
-/*
- * Clean base - dummy
- *
- */
-#define CLEAN_BASE REALVIEW_BOOT_ROM_HI
-
/*
* System controller bit assignment
*/
@@ -249,20 +243,6 @@
#define REALVIEW_TIMER4_EnSel 21
-#define MAX_TIMER 2
-#define MAX_PERIOD 699050
-#define TICKS_PER_uSEC 1
-
-/*
- * These are useconds NOT ticks.
- *
- */
-#define mSEC_1 1000
-#define mSEC_5 (mSEC_1 * 5)
-#define mSEC_10 (mSEC_1 * 10)
-#define mSEC_25 (mSEC_1 * 25)
-#define SEC_1 (mSEC_1 * 1000)
-
#define REALVIEW_CSR_BASE 0x10000000
#define REALVIEW_CSR_SIZE 0x10000000
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 7d857d300558..422ccd70d5f5 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -31,8 +31,8 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/icst307.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/localtimer.h>
@@ -44,7 +44,6 @@
#include <mach/irqs.h>
#include "core.h"
-#include "clock.h"
static struct map_desc realview_eb_io_desc[] __initdata = {
{
@@ -294,6 +293,36 @@ static struct resource realview_eb_isp1761_resources[] = {
},
};
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = IRQ_EB11MP_PMU_CPU0,
+ .end = IRQ_EB11MP_PMU_CPU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_EB11MP_PMU_CPU1,
+ .end = IRQ_EB11MP_PMU_CPU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_EB11MP_PMU_CPU2,
+ .end = IRQ_EB11MP_PMU_CPU2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_EB11MP_PMU_CPU3,
+ .end = IRQ_EB11MP_PMU_CPU3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
static void __init gic_init_irq(void)
{
if (core_tile_eb11mp() || core_tile_a9mp()) {
@@ -407,6 +436,7 @@ static void __init realview_eb_init(void)
* Bits: .... ...0 0111 1001 0000 .... .... .... */
l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
#endif
+ platform_device_register(&pmu_device);
}
realview_flash_register(&realview_eb_flash_resource, 1);
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index 44392e51dd50..96568ebfa2bb 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -31,8 +31,8 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/icst307.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
@@ -44,7 +44,6 @@
#include <mach/irqs.h>
#include "core.h"
-#include "clock.h"
static struct map_desc realview_pb1176_io_desc[] __initdata = {
{
@@ -263,6 +262,19 @@ static struct resource realview_pb1176_isp1761_resources[] = {
},
};
+static struct resource pmu_resource = {
+ .start = IRQ_DC1176_CORE_PMU,
+ .end = IRQ_DC1176_CORE_PMU,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = 1,
+ .resource = &pmu_resource,
+};
+
static void __init gic_init_irq(void)
{
/* ARM1176 DevChip GIC, primary */
@@ -324,6 +336,7 @@ static void __init realview_pb1176_init(void)
realview_eth_register(NULL, realview_pb1176_smsc911x_resources);
platform_device_register(&realview_i2c_device);
realview_usb_register(realview_pb1176_isp1761_resources);
+ platform_device_register(&pmu_device);
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index 3e02731af959..7fbefbbebaf0 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -31,8 +31,8 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/icst307.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/localtimer.h>
@@ -45,7 +45,6 @@
#include <mach/irqs.h>
#include "core.h"
-#include "clock.h"
static struct map_desc realview_pb11mp_io_desc[] __initdata = {
{
@@ -260,6 +259,36 @@ static struct resource realview_pb11mp_isp1761_resources[] = {
},
};
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = IRQ_TC11MP_PMU_CPU0,
+ .end = IRQ_TC11MP_PMU_CPU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_TC11MP_PMU_CPU1,
+ .end = IRQ_TC11MP_PMU_CPU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_TC11MP_PMU_CPU2,
+ .end = IRQ_TC11MP_PMU_CPU2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_TC11MP_PMU_CPU3,
+ .end = IRQ_TC11MP_PMU_CPU3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
static void __init gic_init_irq(void)
{
unsigned int pldctrl;
@@ -329,6 +358,7 @@ static void __init realview_pb11mp_init(void)
platform_device_register(&realview_i2c_device);
platform_device_register(&realview_cf_device);
realview_usb_register(realview_pb11mp_isp1761_resources);
+ platform_device_register(&pmu_device);
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index fe4e25c4201a..d3c113b3dfce 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -30,8 +30,8 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/icst307.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -42,7 +42,6 @@
#include <mach/irqs.h>
#include "core.h"
-#include "clock.h"
static struct map_desc realview_pba8_io_desc[] __initdata = {
{
@@ -250,6 +249,19 @@ static struct resource realview_pba8_isp1761_resources[] = {
},
};
+static struct resource pmu_resource = {
+ .start = IRQ_PBA8_PMU,
+ .end = IRQ_PBA8_PMU,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = 1,
+ .resource = &pmu_resource,
+};
+
static void __init gic_init_irq(void)
{
/* ARM PB-A8 on-board GIC */
@@ -296,6 +308,7 @@ static void __init realview_pba8_init(void)
platform_device_register(&realview_i2c_device);
platform_device_register(&realview_cf_device);
realview_usb_register(realview_pba8_isp1761_resources);
+ platform_device_register(&pmu_device);
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index d94857eb0690..a235ba30996b 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -29,6 +29,7 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
@@ -270,6 +271,36 @@ static struct resource realview_pbx_isp1761_resources[] = {
},
};
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = IRQ_PBX_PMU_CPU0,
+ .end = IRQ_PBX_PMU_CPU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_PBX_PMU_CPU1,
+ .end = IRQ_PBX_PMU_CPU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_PBX_PMU_CPU2,
+ .end = IRQ_PBX_PMU_CPU2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_PBX_PMU_CPU3,
+ .end = IRQ_PBX_PMU_CPU3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
static void __init gic_init_irq(void)
{
/* ARM PBX on-board GIC */
@@ -354,6 +385,7 @@ static void __init realview_pbx_init(void)
/* 16KB way size, 8-way associativity, parity disabled
* Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
l2x0_init(l2x0_base, 0x02520000, 0xc0000fff);
+ platform_device_register(&pmu_device);
}
#endif
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index 554731868b07..9e5e96f12d86 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -6,6 +6,7 @@ config CPU_S3C2410
bool
depends on ARCH_S3C2410
select CPU_ARM920T
+ select S3C_GPIO_PULL_UP
select S3C2410_CLOCK
select S3C2410_GPIO
select CPU_LLSERIAL_S3C2410
diff --git a/arch/arm/mach-s3c2410/h1940-bluetooth.c b/arch/arm/mach-s3c2410/h1940-bluetooth.c
index a3f3c7b1ca38..8cdeb14af592 100644
--- a/arch/arm/mach-s3c2410/h1940-bluetooth.c
+++ b/arch/arm/mach-s3c2410/h1940-bluetooth.c
@@ -33,14 +33,15 @@ static void h1940bt_enable(int on)
h1940_latch_control(0, H1940_LATCH_BLUETOOTH_POWER);
/* Reset the chip */
mdelay(10);
- s3c2410_gpio_setpin(S3C2410_GPH(1), 1);
+
+ gpio_set_value(S3C2410_GPH(1), 1);
mdelay(10);
- s3c2410_gpio_setpin(S3C2410_GPH(1), 0);
+ gpio_set_value(S3C2410_GPH(1), 0);
}
else {
- s3c2410_gpio_setpin(S3C2410_GPH(1), 1);
+ gpio_set_value(S3C2410_GPH(1), 1);
mdelay(10);
- s3c2410_gpio_setpin(S3C2410_GPH(1), 0);
+ gpio_set_value(S3C2410_GPH(1), 0);
mdelay(10);
h1940_latch_control(H1940_LATCH_BLUETOOTH_POWER, 0);
}
@@ -61,15 +62,21 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
struct rfkill *rfk;
int ret = 0;
+ ret = gpio_request(S3C2410_GPH(1), dev_name(&pdev->dev));
+ if (ret) {
+ dev_err(&pdev->dev, "could not get GPH1\n");\
+ return ret;
+ }
+
/* Configures BT serial port GPIOs */
- s3c2410_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0);
- s3c2410_gpio_pullup(S3C2410_GPH(0), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_pullup(S3C2410_GPH(1), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0);
- s3c2410_gpio_pullup(S3C2410_GPH(2), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
- s3c2410_gpio_pullup(S3C2410_GPH(3), 1);
+ s3c_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0);
+ s3c_gpio_cfgpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0);
+ s3c_gpio_cfgpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
+ s3c_gpio_cfgpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
@@ -100,6 +107,7 @@ static int h1940bt_remove(struct platform_device *pdev)
struct rfkill *rfk = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
+ gpio_free(S3C2410_GPH(1));
if (rfk) {
rfkill_unregister(rfk);
diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h
index 08ac5f96c012..cf68136cc668 100644
--- a/arch/arm/mach-s3c2410/include/mach/dma.h
+++ b/arch/arm/mach-s3c2410/include/mach/dma.h
@@ -54,7 +54,7 @@ enum dma_ch {
#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */
/* we have 4 dma channels */
-#ifndef CONFIG_CPU_S3C2443
+#if !defined(CONFIG_CPU_S3C2443) && !defined(CONFIG_CPU_S3C2416)
#define S3C_DMA_CHANNELS (4)
#else
#define S3C_DMA_CHANNELS (6)
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio-fns.h b/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
index 035a493952db..f453c4f2cb8e 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
@@ -10,14 +10,28 @@
* published by the Free Software Foundation.
*/
+#ifndef __MACH_GPIO_FNS_H
+#define __MACH_GPIO_FNS_H __FILE__
+
/* These functions are in the to-be-removed category and it is strongly
* encouraged not to use these in new code. They will be marked deprecated
* very soon.
*
* Most of the functionality can be either replaced by the gpiocfg calls
* for the s3c platform or by the generic GPIOlib API.
+ *
+ * As of 2.6.35-rc, these will be removed, with the few drivers using them
+ * either replaced or given a wrapper until the calls can be removed.
*/
+#include <plat/gpio-cfg.h>
+
+static inline void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int cfg)
+{
+ /* 1:1 mapping between cfgpin and setcfg calls at the moment */
+ s3c_gpio_cfgpin(pin, cfg);
+}
+
/* external functions for GPIO support
*
* These allow various different clients to access the same GPIO
@@ -25,17 +39,6 @@
* GPIO register, then it is safe to ioremap/__raw_{read|write} to it.
*/
-/* s3c2410_gpio_cfgpin
- *
- * set the configuration of the given pin to the value passed.
- *
- * eg:
- * s3c2410_gpio_cfgpin(S3C2410_GPA(0), S3C2410_GPA0_ADDR0);
- * s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
-*/
-
-extern void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function);
-
extern unsigned int s3c2410_gpio_getcfg(unsigned int pin);
/* s3c2410_gpio_getirq
@@ -73,6 +76,14 @@ extern int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on,
/* s3c2410_gpio_pullup
*
+ * This call should be replaced with s3c_gpio_setpull().
+ *
+ * As a note, there is currently no distinction between pull-up and pull-down
+ * in the s3c24xx series devices with only an on/off configuration.
+ */
+
+/* s3c2410_gpio_pullup
+ *
* configure the pull-up control on the given pin
*
* to = 1 => disable the pull-up
@@ -86,18 +97,8 @@ extern int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on,
extern void s3c2410_gpio_pullup(unsigned int pin, unsigned int to);
-/* s3c2410_gpio_getpull
- *
- * Read the state of the pull-up on a given pin
- *
- * return:
- * < 0 => error code
- * 0 => enabled
- * 1 => disabled
-*/
-
-extern int s3c2410_gpio_getpull(unsigned int pin);
-
extern void s3c2410_gpio_setpin(unsigned int pin, unsigned int to);
extern unsigned int s3c2410_gpio_getpin(unsigned int pin);
+
+#endif /* __MACH_GPIO_FNS_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio-nrs.h b/arch/arm/mach-s3c2410/include/mach/gpio-nrs.h
index 2edbb9c88ab3..f3182ff847cb 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio-nrs.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio-nrs.h
@@ -34,6 +34,10 @@
#define S3C2410_GPIO_F_NR (32)
#define S3C2410_GPIO_G_NR (32)
#define S3C2410_GPIO_H_NR (32)
+#define S3C2410_GPIO_J_NR (32) /* technically 16. */
+#define S3C2410_GPIO_K_NR (32) /* technically 16. */
+#define S3C2410_GPIO_L_NR (32) /* technically 15. */
+#define S3C2410_GPIO_M_NR (32) /* technically 2. */
#if CONFIG_S3C_GPIO_SPACE != 0
#error CONFIG_S3C_GPIO_SPACE cannot be zero at the moment
@@ -53,6 +57,10 @@ enum s3c_gpio_number {
S3C2410_GPIO_F_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_E),
S3C2410_GPIO_G_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_F),
S3C2410_GPIO_H_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_G),
+ S3C2410_GPIO_J_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_H),
+ S3C2410_GPIO_K_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_J),
+ S3C2410_GPIO_L_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_K),
+ S3C2410_GPIO_M_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_L),
};
#endif /* __ASSEMBLY__ */
@@ -67,6 +75,10 @@ enum s3c_gpio_number {
#define S3C2410_GPF(_nr) (S3C2410_GPIO_F_START + (_nr))
#define S3C2410_GPG(_nr) (S3C2410_GPIO_G_START + (_nr))
#define S3C2410_GPH(_nr) (S3C2410_GPIO_H_START + (_nr))
+#define S3C2410_GPJ(_nr) (S3C2410_GPIO_J_START + (_nr))
+#define S3C2410_GPK(_nr) (S3C2410_GPIO_K_START + (_nr))
+#define S3C2410_GPL(_nr) (S3C2410_GPIO_L_START + (_nr))
+#define S3C2410_GPM(_nr) (S3C2410_GPIO_M_START + (_nr))
/* compatibility until drivers can be modified */
diff --git a/arch/arm/mach-s3c2410/include/mach/irqs.h b/arch/arm/mach-s3c2410/include/mach/irqs.h
index 6c12c6312ad8..877c15e1b154 100644
--- a/arch/arm/mach-s3c2410/include/mach/irqs.h
+++ b/arch/arm/mach-s3c2410/include/mach/irqs.h
@@ -115,6 +115,26 @@
#define IRQ_S3C2412_SDI S3C2410_IRQSUB(13)
#define IRQ_S3C2412_CF S3C2410_IRQSUB(14)
+
+#define IRQ_S3C2416_EINT8t15 S3C2410_IRQ(5)
+#define IRQ_S3C2416_DMA S3C2410_IRQ(17)
+#define IRQ_S3C2416_UART3 S3C2410_IRQ(18)
+#define IRQ_S3C2416_SDI1 S3C2410_IRQ(20)
+#define IRQ_S3C2416_SDI0 S3C2410_IRQ(21)
+
+#define IRQ_S3C2416_LCD2 S3C2410_IRQSUB(15)
+#define IRQ_S3C2416_LCD3 S3C2410_IRQSUB(16)
+#define IRQ_S3C2416_LCD4 S3C2410_IRQSUB(17)
+#define IRQ_S3C2416_DMA0 S3C2410_IRQSUB(18)
+#define IRQ_S3C2416_DMA1 S3C2410_IRQSUB(19)
+#define IRQ_S3C2416_DMA2 S3C2410_IRQSUB(20)
+#define IRQ_S3C2416_DMA3 S3C2410_IRQSUB(21)
+#define IRQ_S3C2416_DMA4 S3C2410_IRQSUB(22)
+#define IRQ_S3C2416_DMA5 S3C2410_IRQSUB(23)
+#define IRQ_S32416_WDT S3C2410_IRQSUB(27)
+#define IRQ_S32416_AC97 S3C2410_IRQSUB(28)
+
+
/* extra irqs for s3c2440 */
#define IRQ_S3C2440_CAM_C S3C2410_IRQSUB(11) /* S3C2443 too */
@@ -130,7 +150,10 @@
#define IRQ_S3C2443_HSMMC S3C2410_IRQ(20) /* IRQ_SDI */
#define IRQ_S3C2443_NAND S3C2410_IRQ(24) /* reserved */
+#define IRQ_S3C2416_HSMMC0 S3C2410_IRQ(21) /* S3C2416/S3C2450 */
+
#define IRQ_HSMMC0 IRQ_S3C2443_HSMMC
+#define IRQ_HSMMC1 IRQ_S3C2416_HSMMC0
#define IRQ_S3C2443_LCD1 S3C2410_IRQSUB(14)
#define IRQ_S3C2443_LCD2 S3C2410_IRQSUB(15)
@@ -152,7 +175,7 @@
#define IRQ_S3C2443_WDT S3C2410_IRQSUB(27)
#define IRQ_S3C2443_AC97 S3C2410_IRQSUB(28)
-#ifdef CONFIG_CPU_S3C2443
+#if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
#define NR_IRQS (IRQ_S3C2443_AC97+1)
#else
#define NR_IRQS (IRQ_S3C2440_AC97+1)
diff --git a/arch/arm/mach-s3c2410/include/mach/map.h b/arch/arm/mach-s3c2410/include/mach/map.h
index b049e61460b6..f07d68066d7e 100644
--- a/arch/arm/mach-s3c2410/include/mach/map.h
+++ b/arch/arm/mach-s3c2410/include/mach/map.h
@@ -63,9 +63,9 @@
#define S3C2440_PA_AC97 (0x5B000000)
#define S3C2440_SZ_AC97 SZ_1M
-/* S3C2443 High-speed SD/MMC */
+/* S3C2443/S3C2416 High-speed SD/MMC */
#define S3C2443_PA_HSMMC (0x4A800000)
-#define S3C2443_SZ_HSMMC (256)
+#define S3C2416_PA_HSMMC0 (0x4AC00000)
/* S3C2412 memory and IO controls */
#define S3C2412_PA_SSMC (0x4F000000)
@@ -110,6 +110,7 @@
#define S3C_PA_UART S3C24XX_PA_UART
#define S3C_PA_USBHOST S3C2410_PA_USBHOST
#define S3C_PA_HSMMC0 S3C2443_PA_HSMMC
+#define S3C_PA_HSMMC1 S3C2416_PA_HSMMC0
#define S3C_PA_NAND S3C24XX_PA_NAND
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-clock.h b/arch/arm/mach-s3c2410/include/mach/regs-clock.h
index 9a0d169be137..3415b60082d7 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-clock.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-clock.h
@@ -161,4 +161,6 @@
#endif /* CONFIG_CPU_S3C2412 | CONFIG_CPU_S3C2413 */
+#define S3C2416_CLKDIV2 S3C2410_CLKREG(0x28)
+
#endif /* __ASM_ARM_REGS_CLOCK */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-dsc.h b/arch/arm/mach-s3c2410/include/mach/regs-dsc.h
index 3c3853cd3cf7..98fd4a05587c 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-dsc.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-dsc.h
@@ -19,6 +19,42 @@
#define S3C2412_DSC1 S3C2410_GPIOREG(0xe0)
#endif
+#if defined(CONFIG_CPU_S3C2416)
+#define S3C2416_DSC0 S3C2410_GPIOREG(0xc0)
+#define S3C2416_DSC1 S3C2410_GPIOREG(0xc4)
+#define S3C2416_DSC2 S3C2410_GPIOREG(0xc8)
+#define S3C2416_DSC3 S3C2410_GPIOREG(0x110)
+
+#define S3C2416_SELECT_DSC0 (0 << 30)
+#define S3C2416_SELECT_DSC1 (1 << 30)
+#define S3C2416_SELECT_DSC2 (2 << 30)
+#define S3C2416_SELECT_DSC3 (3 << 30)
+
+#define S3C2416_DSC_GETSHIFT(x) (x & 30)
+
+#define S3C2416_DSC0_CF (S3C2416_SELECT_DSC0 | 28)
+#define S3C2416_DSC0_CF_5mA (0 << 28)
+#define S3C2416_DSC0_CF_10mA (1 << 28)
+#define S3C2416_DSC0_CF_15mA (2 << 28)
+#define S3C2416_DSC0_CF_21mA (3 << 28)
+#define S3C2416_DSC0_CF_MASK (3 << 28)
+
+#define S3C2416_DSC0_nRBE (S3C2416_SELECT_DSC0 | 26)
+#define S3C2416_DSC0_nRBE_5mA (0 << 26)
+#define S3C2416_DSC0_nRBE_10mA (1 << 26)
+#define S3C2416_DSC0_nRBE_15mA (2 << 26)
+#define S3C2416_DSC0_nRBE_21mA (3 << 26)
+#define S3C2416_DSC0_nRBE_MASK (3 << 26)
+
+#define S3C2416_DSC0_nROE (S3C2416_SELECT_DSC0 | 24)
+#define S3C2416_DSC0_nROE_5mA (0 << 24)
+#define S3C2416_DSC0_nROE_10mA (1 << 24)
+#define S3C2416_DSC0_nROE_15mA (2 << 24)
+#define S3C2416_DSC0_nROE_21mA (3 << 24)
+#define S3C2416_DSC0_nROE_MASK (3 << 24)
+
+#endif
+
#if defined(CONFIG_CPU_S3C244X)
#define S3C2440_DSC0 S3C2410_GPIOREG(0xc4)
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
index fd672f330bf2..a6384239eddf 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
@@ -610,35 +610,73 @@
#define S3C2410_GPHUP S3C2410_GPIOREG(0x78)
#define S3C2410_GPH0_nCTS0 (0x02 << 0)
+#define S3C2416_GPH0_TXD0 (0x02 << 0)
#define S3C2410_GPH1_nRTS0 (0x02 << 2)
+#define S3C2416_GPH1_RXD0 (0x02 << 2)
#define S3C2410_GPH2_TXD0 (0x02 << 4)
+#define S3C2416_GPH2_TXD1 (0x02 << 4)
#define S3C2410_GPH3_RXD0 (0x02 << 6)
+#define S3C2416_GPH3_RXD1 (0x02 << 6)
#define S3C2410_GPH4_TXD1 (0x02 << 8)
+#define S3C2416_GPH4_TXD2 (0x02 << 8)
#define S3C2410_GPH5_RXD1 (0x02 << 10)
+#define S3C2416_GPH5_RXD2 (0x02 << 10)
#define S3C2410_GPH6_TXD2 (0x02 << 12)
+#define S3C2416_GPH6_TXD3 (0x02 << 12)
#define S3C2410_GPH6_nRTS1 (0x03 << 12)
+#define S3C2416_GPH6_nRTS2 (0x03 << 12)
#define S3C2410_GPH7_RXD2 (0x02 << 14)
+#define S3C2416_GPH7_RXD3 (0x02 << 14)
#define S3C2410_GPH7_nCTS1 (0x03 << 14)
+#define S3C2416_GPH7_nCTS2 (0x03 << 14)
#define S3C2410_GPH8_UCLK (0x02 << 16)
+#define S3C2416_GPH8_nCTS0 (0x02 << 16)
#define S3C2410_GPH9_CLKOUT0 (0x02 << 18)
#define S3C2442_GPH9_nSPICS0 (0x03 << 18)
+#define S3C2416_GPH9_nRTS0 (0x02 << 18)
#define S3C2410_GPH10_CLKOUT1 (0x02 << 20)
+#define S3C2416_GPH10_nCTS1 (0x02 << 20)
+
+#define S3C2416_GPH11_nRTS1 (0x02 << 22)
+
+#define S3C2416_GPH12_EXTUARTCLK (0x02 << 24)
+
+#define S3C2416_GPH13_CLKOUT0 (0x02 << 26)
+
+#define S3C2416_GPH14_CLKOUT1 (0x02 << 28)
/* The S3C2412 and S3C2413 move the GPJ register set to after
* GPH, which means all registers after 0x80 are now offset by 0x10
* for the 2412/2413 from the 2410/2440/2442
*/
+/* S3C2443 and above */
+#define S3C2440_GPJCON S3C2410_GPIOREG(0xD0)
+#define S3C2440_GPJDAT S3C2410_GPIOREG(0xD4)
+#define S3C2440_GPJUP S3C2410_GPIOREG(0xD8)
+
+#define S3C2443_GPKCON S3C2410_GPIOREG(0xE0)
+#define S3C2443_GPKDAT S3C2410_GPIOREG(0xE4)
+#define S3C2443_GPKUP S3C2410_GPIOREG(0xE8)
+
+#define S3C2443_GPLCON S3C2410_GPIOREG(0xF0)
+#define S3C2443_GPLDAT S3C2410_GPIOREG(0xF4)
+#define S3C2443_GPLUP S3C2410_GPIOREG(0xF8)
+
+#define S3C2443_GPMCON S3C2410_GPIOREG(0x100)
+#define S3C2443_GPMDAT S3C2410_GPIOREG(0x104)
+#define S3C2443_GPMUP S3C2410_GPIOREG(0x108)
+
/* miscellaneous control */
#define S3C2400_MISCCR S3C2410_GPIOREG(0x54)
#define S3C2410_MISCCR S3C2410_GPIOREG(0x80)
@@ -686,6 +724,7 @@
#define S3C2412_MISCCR_CLK1_CLKsrc (0<<8)
#define S3C2410_MISCCR_USBSUSPND0 (1<<12)
+#define S3C2416_MISCCR_SEL_SUSPND (1<<12)
#define S3C2410_MISCCR_USBSUSPND1 (1<<13)
#define S3C2410_MISCCR_nRSTCON (1<<16)
@@ -695,6 +734,9 @@
#define S3C2410_MISCCR_nEN_SCLKE (1<<19) /* not 2412 */
#define S3C2410_MISCCR_SDSLEEP (7<<17)
+#define S3C2416_MISCCR_FLT_I2C (1<<24)
+#define S3C2416_MISCCR_HSSPI_EN2 (1<<31)
+
/* external interrupt control... */
/* S3C2410_EXTINT0 -> irq sense control for EINT0..EINT7
* S3C2410_EXTINT1 -> irq sense control for EINT8..EINT15
@@ -762,8 +804,11 @@
#define S3C2410_GSTATUS1_IDMASK (0xffff0000)
#define S3C2410_GSTATUS1_2410 (0x32410000)
#define S3C2410_GSTATUS1_2412 (0x32412001)
+#define S3C2410_GSTATUS1_2416 (0x32416003)
#define S3C2410_GSTATUS1_2440 (0x32440000)
#define S3C2410_GSTATUS1_2442 (0x32440aaa)
+/* some 2416 CPUs report this value also */
+#define S3C2410_GSTATUS1_2450 (0x32450003)
#define S3C2410_GSTATUS2_WTRESET (1<<2)
#define S3C2410_GSTATUS2_OFFRESET (1<<1)
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-gpioj.h b/arch/arm/mach-s3c2410/include/mach/regs-gpioj.h
index 1202ca5e99f6..19575e061114 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-gpioj.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-gpioj.h
@@ -22,85 +22,49 @@
* pull up works like all other ports.
*/
-#define S3C2440_GPIO_BANKJ (416)
-
-#define S3C2440_GPJCON S3C2410_GPIOREG(0xd0)
-#define S3C2440_GPJDAT S3C2410_GPIOREG(0xd4)
-#define S3C2440_GPJUP S3C2410_GPIOREG(0xd8)
-
#define S3C2413_GPJCON S3C2410_GPIOREG(0x80)
#define S3C2413_GPJDAT S3C2410_GPIOREG(0x84)
#define S3C2413_GPJUP S3C2410_GPIOREG(0x88)
#define S3C2413_GPJSLPCON S3C2410_GPIOREG(0x8C)
-#define S3C2440_GPJ0 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 0)
-#define S3C2440_GPJ0_INP (0x00 << 0)
#define S3C2440_GPJ0_OUTP (0x01 << 0)
#define S3C2440_GPJ0_CAMDATA0 (0x02 << 0)
-#define S3C2440_GPJ1 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 1)
-#define S3C2440_GPJ1_INP (0x00 << 2)
#define S3C2440_GPJ1_OUTP (0x01 << 2)
#define S3C2440_GPJ1_CAMDATA1 (0x02 << 2)
-#define S3C2440_GPJ2 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 2)
-#define S3C2440_GPJ2_INP (0x00 << 4)
#define S3C2440_GPJ2_OUTP (0x01 << 4)
#define S3C2440_GPJ2_CAMDATA2 (0x02 << 4)
-#define S3C2440_GPJ3 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 3)
-#define S3C2440_GPJ3_INP (0x00 << 6)
#define S3C2440_GPJ3_OUTP (0x01 << 6)
#define S3C2440_GPJ3_CAMDATA3 (0x02 << 6)
-#define S3C2440_GPJ4 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 4)
-#define S3C2440_GPJ4_INP (0x00 << 8)
#define S3C2440_GPJ4_OUTP (0x01 << 8)
#define S3C2440_GPJ4_CAMDATA4 (0x02 << 8)
-#define S3C2440_GPJ5 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 5)
-#define S3C2440_GPJ5_INP (0x00 << 10)
#define S3C2440_GPJ5_OUTP (0x01 << 10)
#define S3C2440_GPJ5_CAMDATA5 (0x02 << 10)
-#define S3C2440_GPJ6 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 6)
-#define S3C2440_GPJ6_INP (0x00 << 12)
#define S3C2440_GPJ6_OUTP (0x01 << 12)
#define S3C2440_GPJ6_CAMDATA6 (0x02 << 12)
-#define S3C2440_GPJ7 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 7)
-#define S3C2440_GPJ7_INP (0x00 << 14)
#define S3C2440_GPJ7_OUTP (0x01 << 14)
#define S3C2440_GPJ7_CAMDATA7 (0x02 << 14)
-#define S3C2440_GPJ8 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 8)
-#define S3C2440_GPJ8_INP (0x00 << 16)
#define S3C2440_GPJ8_OUTP (0x01 << 16)
#define S3C2440_GPJ8_CAMPCLK (0x02 << 16)
-#define S3C2440_GPJ9 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 9)
-#define S3C2440_GPJ9_INP (0x00 << 18)
#define S3C2440_GPJ9_OUTP (0x01 << 18)
#define S3C2440_GPJ9_CAMVSYNC (0x02 << 18)
-#define S3C2440_GPJ10 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 10)
-#define S3C2440_GPJ10_INP (0x00 << 20)
#define S3C2440_GPJ10_OUTP (0x01 << 20)
#define S3C2440_GPJ10_CAMHREF (0x02 << 20)
-#define S3C2440_GPJ11 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 11)
-#define S3C2440_GPJ11_INP (0x00 << 22)
#define S3C2440_GPJ11_OUTP (0x01 << 22)
#define S3C2440_GPJ11_CAMCLKOUT (0x02 << 22)
-#define S3C2440_GPJ12 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 12)
-#define S3C2440_GPJ12_INP (0x00 << 24)
#define S3C2440_GPJ12_OUTP (0x01 << 24)
#define S3C2440_GPJ12_CAMRESET (0x02 << 24)
-#define S3C2443_GPJ13 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 13)
-#define S3C2443_GPJ14 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 14)
-#define S3C2443_GPJ15 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 15)
-
#endif /* __ASM_ARCH_REGS_GPIOJ_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-irq.h b/arch/arm/mach-s3c2410/include/mach/regs-irq.h
index de86ee8812bd..0f07ba30b1fb 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-irq.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-irq.h
@@ -27,6 +27,16 @@
#define S3C2410_SUBSRCPND S3C2410_IRQREG(0x018)
#define S3C2410_INTSUBMSK S3C2410_IRQREG(0x01C)
+#define S3C2416_PRIORITY_MODE1 S3C2410_IRQREG(0x030)
+#define S3C2416_PRIORITY_UPDATE1 S3C2410_IRQREG(0x034)
+#define S3C2416_SRCPND2 S3C2410_IRQREG(0x040)
+#define S3C2416_INTMOD2 S3C2410_IRQREG(0x044)
+#define S3C2416_INTMSK2 S3C2410_IRQREG(0x048)
+#define S3C2416_INTPND2 S3C2410_IRQREG(0x050)
+#define S3C2416_INTOFFSET2 S3C2410_IRQREG(0x054)
+#define S3C2416_PRIORITY_MODE2 S3C2410_IRQREG(0x070)
+#define S3C2416_PRIORITY_UPDATE2 S3C2410_IRQREG(0x074)
+
/* mask: 0=enable, 1=disable
* 1 bit EINT, 4=EINT4, 23=EINT23
* EINT0,1,2,3 are not handled here.
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h b/arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h
new file mode 100644
index 000000000000..2f31b74974af
--- /dev/null
+++ b/arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h
@@ -0,0 +1,30 @@
+/* arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * S3C2416 memory register definitions
+*/
+
+#ifndef __ASM_ARM_REGS_S3C2416_MEM
+#define __ASM_ARM_REGS_S3C2416_MEM
+
+#ifndef S3C2416_MEMREG
+#define S3C2416_MEMREG(x) (S3C24XX_VA_MEMCTRL + (x))
+#endif
+
+#define S3C2416_BANKCFG S3C2416_MEMREG(0x00)
+#define S3C2416_BANKCON1 S3C2416_MEMREG(0x04)
+#define S3C2416_BANKCON2 S3C2416_MEMREG(0x08)
+#define S3C2416_BANKCON3 S3C2416_MEMREG(0x0C)
+
+#define S3C2416_REFRESH S3C2416_MEMREG(0x10)
+#define S3C2416_TIMEOUT S3C2416_MEMREG(0x14)
+
+#endif /* __ASM_ARM_REGS_S3C2416_MEM */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h b/arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h
new file mode 100644
index 000000000000..e443167efb87
--- /dev/null
+++ b/arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h
@@ -0,0 +1,24 @@
+/* arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * S3C2416 specific register definitions
+*/
+
+#ifndef __ASM_ARCH_REGS_S3C2416_H
+#define __ASM_ARCH_REGS_S3C2416_H "s3c2416"
+
+#define S3C2416_SWRST (S3C24XX_VA_CLKPWR + 0x44)
+#define S3C2416_SWRST_RESET (0x533C2416)
+
+/* see regs-power.h for the other registers in the power block. */
+
+#endif /* __ASM_ARCH_REGS_S3C2416_H */
+
diff --git a/arch/arm/mach-s3c2410/include/mach/uncompress.h b/arch/arm/mach-s3c2410/include/mach/uncompress.h
index 72f756c5e504..8b283f847daa 100644
--- a/arch/arm/mach-s3c2410/include/mach/uncompress.h
+++ b/arch/arm/mach-s3c2410/include/mach/uncompress.h
@@ -40,7 +40,9 @@ static void arch_detect_cpu(void)
cpuid &= S3C2410_GSTATUS1_IDMASK;
if (is_arm926() || cpuid == S3C2410_GSTATUS1_2440 ||
- cpuid == S3C2410_GSTATUS1_2442) {
+ cpuid == S3C2410_GSTATUS1_2442 ||
+ cpuid == S3C2410_GSTATUS1_2416 ||
+ cpuid == S3C2410_GSTATUS1_2450) {
fifo_mask = S3C2440_UFSTAT_TXMASK;
fifo_max = 63 << S3C2440_UFSTAT_TXSHIFT;
} else {
diff --git a/arch/arm/mach-s3c2410/mach-amlm5900.c b/arch/arm/mach-s3c2410/mach-amlm5900.c
index 7047317ed7f4..34fc05a4244b 100644
--- a/arch/arm/mach-s3c2410/mach-amlm5900.c
+++ b/arch/arm/mach-s3c2410/mach-amlm5900.c
@@ -56,6 +56,7 @@
#include <plat/iic.h>
#include <plat/devs.h>
#include <plat/cpu.h>
+#include <plat/gpio-cfg.h>
#ifdef CONFIG_MTD_PARTITIONS
@@ -225,8 +226,8 @@ static void amlm5900_init_pm(void)
} else {
enable_irq_wake(IRQ_EINT9);
/* configure the suspend/resume status pin */
- s3c2410_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_pullup(S3C2410_GPF(2), 0);
+ s3c_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_setpull(S3C2410_GPF(2), S3C_GPIO_PULL_UP);
}
}
static void __init amlm5900_init(void)
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index 02b1b6220cba..b061ddcf3067 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -61,6 +61,7 @@
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/cpu-freq.h>
+#include <plat/gpio-cfg.h>
#include <plat/audio-simtec.h>
#include "usb-simtec.h"
@@ -216,15 +217,13 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
static int bast_pm_suspend(struct sys_device *sd, pm_message_t state)
{
/* ensure that an nRESET is not generated on resume. */
- s3c2410_gpio_setpin(S3C2410_GPA(21), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPIO_OUTPUT);
-
+ gpio_direction_output(S3C2410_GPA(21), 1);
return 0;
}
static int bast_pm_resume(struct sys_device *sd)
{
- s3c2410_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
+ s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
return 0;
}
@@ -658,6 +657,8 @@ static void __init bast_init(void)
nor_simtec_init();
simtec_audio_add(NULL, true, &bast_audio);
+ WARN_ON(gpio_request(S3C2410_GPA(21), "bast nreset"));
+
s3c_cpufreq_setboard(&bast_cpufreq);
}
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index fbedd0760941..7c39d13abd78 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -46,16 +46,17 @@
#include <mach/h1940.h>
#include <mach/h1940-latch.h>
#include <mach/fb.h>
-#include <mach/ts.h>
#include <plat/udc.h>
#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/pll.h>
#include <plat/pm.h>
#include <plat/mci.h>
+#include <plat/ts.h>
static struct map_desc h1940_iodesc[] __initdata = {
[0] = {
@@ -207,16 +208,16 @@ static int h1940_backlight_init(struct device *dev)
{
gpio_request(S3C2410_GPB(0), "Backlight");
- s3c2410_gpio_setpin(S3C2410_GPB(0), 0);
- s3c2410_gpio_pullup(S3C2410_GPB(0), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
+ gpio_direction_output(S3C2410_GPB(0), 0);
+ s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
return 0;
}
static void h1940_backlight_exit(struct device *dev)
{
- s3c2410_gpio_cfgpin(S3C2410_GPB(0), 1/*S3C2410_GPB0_OUTP*/);
+ gpio_direction_output(S3C2410_GPB(0), 1);
}
static struct platform_pwm_backlight_data backlight_data = {
@@ -245,18 +246,18 @@ static void h1940_lcd_power_set(struct plat_lcd_data *pd,
if (!power) {
/* set to 3ec */
- s3c2410_gpio_setpin(S3C2410_GPC(0), 0);
+ gpio_direction_output(S3C2410_GPC(0), 0);
/* wait for 3ac */
do {
- value = s3c2410_gpio_getpin(S3C2410_GPC(6));
+ value = gpio_get_value(S3C2410_GPC(6));
} while (value);
/* set to 38c */
- s3c2410_gpio_setpin(S3C2410_GPC(5), 0);
+ gpio_direction_output(S3C2410_GPC(5), 0);
} else {
/* Set to 3ac */
- s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
+ gpio_direction_output(S3C2410_GPC(5), 1);
/* Set to 3ad */
- s3c2410_gpio_setpin(S3C2410_GPC(0), 1);
+ gpio_direction_output(S3C2410_GPC(0), 1);
}
}
@@ -271,7 +272,6 @@ static struct platform_device h1940_lcd_powerdev = {
};
static struct platform_device *h1940_devices[] __initdata = {
- &s3c_device_ts,
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
@@ -285,6 +285,8 @@ static struct platform_device *h1940_devices[] __initdata = {
&s3c_device_timer[0],
&h1940_backlight,
&h1940_lcd_powerdev,
+ &s3c_device_adc,
+ &s3c_device_ts,
};
static void __init h1940_map_io(void)
@@ -332,6 +334,7 @@ static void __init h1940_init(void)
gpio_request(S3C2410_GPC(5), "LCD power");
gpio_request(S3C2410_GPC(6), "LCD power");
+ gpio_direction_input(S3C2410_GPC(6));
platform_add_devices(h1940_devices, ARRAY_SIZE(h1940_devices));
}
diff --git a/arch/arm/mach-s3c2410/mach-n30.c b/arch/arm/mach-s3c2410/mach-n30.c
index 684710f88142..75a9fd37a467 100644
--- a/arch/arm/mach-s3c2410/mach-n30.c
+++ b/arch/arm/mach-s3c2410/mach-n30.c
@@ -86,10 +86,10 @@ static void n30_udc_pullup(enum s3c2410_udc_cmd_e cmd)
{
switch (cmd) {
case S3C2410_UDC_P_ENABLE :
- s3c2410_gpio_setpin(S3C2410_GPB(3), 1);
+ gpio_set_value(S3C2410_GPB(3), 1);
break;
case S3C2410_UDC_P_DISABLE :
- s3c2410_gpio_setpin(S3C2410_GPB(3), 0);
+ gpio_set_value(S3C2410_GPB(3), 0);
break;
case S3C2410_UDC_P_RESET :
break;
@@ -536,6 +536,9 @@ static void __init n30_init(void)
platform_add_devices(n35_devices, ARRAY_SIZE(n35_devices));
}
+
+ WARN_ON(gpio_request(S3C2410_GPB(3), "udc pup"));
+ gpio_direction_output(S3C2410_GPB(3), 0);
}
MACHINE_START(N30, "Acer-N30")
diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c
index 92a4ec375d82..d0e87b6e2e0f 100644
--- a/arch/arm/mach-s3c2410/mach-qt2410.c
+++ b/arch/arm/mach-s3c2410/mach-qt2410.c
@@ -58,6 +58,7 @@
#include <plat/iic.h>
#include <plat/common-smdk.h>
+#include <plat/gpio-cfg.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/pm.h>
@@ -219,10 +220,10 @@ static void spi_gpio_cs(struct s3c2410_spigpio_info *spi, int cs)
{
switch (cs) {
case BITBANG_CS_ACTIVE:
- s3c2410_gpio_setpin(S3C2410_GPB(5), 0);
+ gpio_set_value(S3C2410_GPB(5), 0);
break;
case BITBANG_CS_INACTIVE:
- s3c2410_gpio_setpin(S3C2410_GPB(5), 1);
+ gpio_set_value(S3C2410_GPB(5), 1);
break;
}
}
@@ -347,13 +348,14 @@ static void __init qt2410_machine_init(void)
}
s3c24xx_fb_set_platdata(&qt2410_fb_info);
- s3c2410_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPIO_OUTPUT);
s3c2410_gpio_setpin(S3C2410_GPB(0), 1);
s3c24xx_udc_set_platdata(&qt2410_udc_cfg);
s3c_i2c0_set_platdata(NULL);
- s3c2410_gpio_cfgpin(S3C2410_GPB(5), S3C2410_GPIO_OUTPUT);
+ WARN_ON(gpio_request(S3C2410_GPB(5), "spi cs"));
+ gpio_direction_output(S3C2410_GPB(5), 1);
platform_add_devices(qt2410_devices, ARRAY_SIZE(qt2410_devices));
s3c_pm_init();
diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c
index 9051f0d31123..d540d79dd264 100644
--- a/arch/arm/mach-s3c2410/mach-vr1000.c
+++ b/arch/arm/mach-s3c2410/mach-vr1000.c
@@ -357,8 +357,7 @@ static struct clk *vr1000_clocks[] __initdata = {
static void vr1000_power_off(void)
{
- s3c2410_gpio_cfgpin(S3C2410_GPB(9), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_setpin(S3C2410_GPB(9), 1);
+ gpio_direction_output(S3C2410_GPB(9), 1);
}
static void __init vr1000_map_io(void)
@@ -395,6 +394,8 @@ static void __init vr1000_init(void)
nor_simtec_init();
simtec_audio_add(NULL, true, NULL);
+
+ WARN_ON(gpio_request(S3C2410_GPB(9), "power off"));
}
MACHINE_START(VR1000, "Thorcom-VR1000")
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index 91ba42f688ac..adc90a3c5890 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
+#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/sysdev.h>
#include <linux/serial_core.h>
@@ -40,6 +41,10 @@
#include <plat/clock.h>
#include <plat/pll.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
/* Initial IO mappings */
static struct map_desc s3c2410_iodesc[] __initdata = {
@@ -65,6 +70,9 @@ void __init s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no)
void __init s3c2410_map_io(void)
{
+ s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1up;
+ s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1up;
+
iotable_init(s3c2410_iodesc, ARRAY_SIZE(s3c2410_iodesc));
}
diff --git a/arch/arm/mach-s3c2412/Kconfig b/arch/arm/mach-s3c2412/Kconfig
index 9a8c0657ae50..cef6a65637bd 100644
--- a/arch/arm/mach-s3c2412/Kconfig
+++ b/arch/arm/mach-s3c2412/Kconfig
@@ -16,7 +16,8 @@ config CPU_S3C2412
config CPU_S3C2412_ONLY
bool
depends on ARCH_S3C2410 && !CPU_S3C2400 && !CPU_S3C2410 && \
- !CPU_S3C2440 && !CPU_S3C2442 && !CPU_S3C2443 && CPU_S3C2412
+ !CPU_2416 && !CPU_S3C2440 && !CPU_S3C2442 && \
+ !CPU_S3C2443 && CPU_S3C2412
default y if CPU_S3C2412
config S3C2412_DMA
diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c
index e880524904eb..7abecfca0b7e 100644
--- a/arch/arm/mach-s3c2412/dma.c
+++ b/arch/arm/mach-s3c2412/dma.c
@@ -30,7 +30,6 @@
#include <mach/regs-mem.h>
#include <mach/regs-lcd.h>
#include <mach/regs-sdi.h>
-#include <plat/regs-s3c2412-iis.h>
#include <plat/regs-iis.h>
#include <plat/regs-spi.h>
@@ -119,13 +118,11 @@ static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = {
.name = "i2s-sdi",
.channels = MAP(S3C2412_DMAREQSEL_I2SRX),
.channels_rx = MAP(S3C2412_DMAREQSEL_I2SRX),
- .hw_addr.from = S3C2410_PA_IIS + S3C2412_IISRXD,
},
[DMACH_I2S_OUT] = {
.name = "i2s-sdo",
.channels = MAP(S3C2412_DMAREQSEL_I2STX),
.channels_rx = MAP(S3C2412_DMAREQSEL_I2STX),
- .hw_addr.to = S3C2410_PA_IIS + S3C2412_IISTXD,
},
[DMACH_USB_EP1] = {
.name = "usb-ep1",
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 14f4798291aa..43160183571a 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -48,6 +48,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -357,8 +358,7 @@ static void jive_lcm_reset(unsigned int set)
{
printk(KERN_DEBUG "%s(%d)\n", __func__, set);
- s3c2410_gpio_setpin(S3C2410_GPG(13), set);
- s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPIO_OUTPUT);
+ gpio_set_value(S3C2410_GPG(13), set);
}
#undef LCD_UPPER_MARGIN
@@ -391,7 +391,7 @@ static struct ili9320_platdata jive_lcm_config = {
static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs)
{
- s3c2410_gpio_setpin(S3C2410_GPB(7), cs ? 0 : 1);
+ gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1);
}
static struct s3c2410_spigpio_info jive_lcd_spi = {
@@ -413,7 +413,7 @@ static struct platform_device jive_device_lcdspi = {
static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs)
{
- s3c2410_gpio_setpin(S3C2410_GPH(10), cs ? 0 : 1);
+ gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1);
}
static struct s3c2410_spigpio_info jive_wm8750_spi = {
@@ -531,7 +531,7 @@ static void jive_power_off(void)
printk(KERN_INFO "powering system down...\n");
s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
}
static void __init jive_machine_init(void)
@@ -636,22 +636,22 @@ static void __init jive_machine_init(void)
/* initialise the spi */
- s3c2410_gpio_setpin(S3C2410_GPG(13), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPIO_OUTPUT);
+ gpio_request(S3C2410_GPG(13), "lcm reset");
+ gpio_direction_output(S3C2410_GPG(13), 0);
- s3c2410_gpio_setpin(S3C2410_GPB(7), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPB(7), S3C2410_GPIO_OUTPUT);
+ gpio_request(S3C2410_GPB(7), "jive spi");
+ gpio_direction_output(S3C2410_GPB(7), 1);
s3c2410_gpio_setpin(S3C2410_GPB(6), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPB(6), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPB(6), S3C2410_GPIO_OUTPUT);
s3c2410_gpio_setpin(S3C2410_GPG(8), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPG(8), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPG(8), S3C2410_GPIO_OUTPUT);
/* initialise the WM8750 spi */
- s3c2410_gpio_setpin(S3C2410_GPH(10), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPH(10), S3C2410_GPIO_OUTPUT);
+ gpio_request(S3C2410_GPH(10), "jive wm8750 spi");
+ gpio_direction_output(S3C2410_GPH(10), 1);
/* Turn off suspend on both USB ports, and switch the
* selectable USB port to USB device mode. */
diff --git a/arch/arm/mach-s3c2412/mach-smdk2413.c b/arch/arm/mach-s3c2412/mach-smdk2413.c
index 0392065af1af..faddb36ed23b 100644
--- a/arch/arm/mach-s3c2412/mach-smdk2413.c
+++ b/arch/arm/mach-s3c2412/mach-smdk2413.c
@@ -85,10 +85,10 @@ static void smdk2413_udc_pullup(enum s3c2410_udc_cmd_e cmd)
switch (cmd)
{
case S3C2410_UDC_P_ENABLE :
- s3c2410_gpio_setpin(S3C2410_GPF(2), 1);
+ gpio_set_value(S3C2410_GPF(2), 1);
break;
case S3C2410_UDC_P_DISABLE :
- s3c2410_gpio_setpin(S3C2410_GPF(2), 0);
+ gpio_set_value(S3C2410_GPF(2), 0);
break;
case S3C2410_UDC_P_RESET :
break;
@@ -134,8 +134,8 @@ static void __init smdk2413_machine_init(void)
{ /* Turn off suspend on both USB ports, and switch the
* selectable USB port to USB device mode. */
- s3c2410_gpio_setpin(S3C2410_GPF(2), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT);
+ WARN_ON(gpio_request(S3C2410_GPF(2), "udc pull"));
+ gpio_direction_output(S3C2410_GPF(2), 0);
s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST |
S3C2410_MISCCR_USBSUSPND0 |
diff --git a/arch/arm/mach-s3c2416/Kconfig b/arch/arm/mach-s3c2416/Kconfig
new file mode 100644
index 000000000000..29103a6047de
--- /dev/null
+++ b/arch/arm/mach-s3c2416/Kconfig
@@ -0,0 +1,38 @@
+# arch/arm/mach-s3c2416/Kconfig
+#
+# Copyright 2009 Yauhen Kharuzhy <jekhor@gmail.com>
+#
+# Licensed under GPLv2
+
+# note, this also supports the S3C2450 which is so similar it has the same
+# ID code as the S3C2416.
+
+config CPU_S3C2416
+ bool
+ depends on ARCH_S3C2410
+ select CPU_ARM926T
+ select S3C2416_DMA if S3C2410_DMA
+ select CPU_LLSERIAL_S3C2440
+ select S3C_GPIO_PULL_UPDOWN
+ select SAMSUNG_CLKSRC
+ select S3C2443_CLOCK
+ help
+ Support for the S3C2416 SoC from the S3C24XX line
+
+config S3C2416_DMA
+ bool
+ depends on CPU_S3C2416
+ help
+ Internal config node for S3C2416 DMA support
+
+menu "S3C2416 Machines"
+
+config MACH_SMDK2416
+ bool "SMDK2416"
+ select CPU_S3C2416
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ help
+ Say Y here if you are using an SMDK2416
+
+endmenu
diff --git a/arch/arm/mach-s3c2416/Makefile b/arch/arm/mach-s3c2416/Makefile
new file mode 100644
index 000000000000..6c12c7bf40ad
--- /dev/null
+++ b/arch/arm/mach-s3c2416/Makefile
@@ -0,0 +1,19 @@
+# arch/arm/mach-s3c2416/Makefile
+#
+# Copyright 2009 Yauhen Kharuzhy <jekhor@gmail.com>
+#
+# Licensed under GPLv2
+
+obj-y :=
+obj-m :=
+obj-n :=
+obj- :=
+
+obj-$(CONFIG_CPU_S3C2416) += s3c2416.o clock.o
+obj-$(CONFIG_CPU_S3C2416) += irq.o
+
+#obj-$(CONFIG_S3C2416_DMA) += dma.o
+
+# Machine support
+
+obj-$(CONFIG_MACH_SMDK2416) += mach-smdk2416.o
diff --git a/arch/arm/mach-s3c2416/clock.c b/arch/arm/mach-s3c2416/clock.c
new file mode 100644
index 000000000000..7ccf5a2a2bfc
--- /dev/null
+++ b/arch/arm/mach-s3c2416/clock.c
@@ -0,0 +1,135 @@
+/* linux/arch/arm/mach-s3c2416/clock.c
+ *
+ * Copyright (c) 2010 Simtec Electronics
+ * Copyright (c) 2010 Ben Dooks <ben-linux@fluff.org>
+ *
+ * S3C2416 Clock control support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <plat/s3c2416.h>
+#include <plat/s3c2443.h>
+#include <plat/clock.h>
+#include <plat/clock-clksrc.h>
+#include <plat/cpu.h>
+
+#include <plat/cpu-freq.h>
+#include <plat/pll6553x.h>
+#include <plat/pll.h>
+
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+#include <mach/regs-s3c2443-clock.h>
+
+static unsigned int armdiv[8] = {
+ [0] = 1,
+ [1] = 2,
+ [2] = 3,
+ [3] = 4,
+ [5] = 6,
+ [7] = 8,
+};
+
+/* ID to hardware numbering, 0 is HSMMC1, 1 is HSMMC0 */
+static struct clksrc_clk hsmmc_div[] = {
+ [0] = {
+ .clk = {
+ .name = "hsmmc-div",
+ .id = 1,
+ .parent = &clk_esysclk.clk,
+ },
+ .reg_div = { .reg = S3C2416_CLKDIV2, .size = 2, .shift = 6 },
+ },
+ [1] = {
+ .clk = {
+ .name = "hsmmc-div",
+ .id = 0,
+ .parent = &clk_esysclk.clk,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
+ },
+};
+
+static struct clksrc_clk hsmmc_mux[] = {
+ [0] = {
+ .clk = {
+ .id = 1,
+ .name = "hsmmc-if",
+ .ctrlbit = (1 << 6),
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .sources = &(struct clksrc_sources) {
+ .nr_sources = 2,
+ .sources = (struct clk *[]) {
+ [0] = &hsmmc_div[0].clk,
+ [1] = NULL, /* to fix */
+ },
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 16 },
+ },
+ [1] = {
+ .clk = {
+ .id = 0,
+ .name = "hsmmc-if",
+ .ctrlbit = (1 << 12),
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .sources = &(struct clksrc_sources) {
+ .nr_sources = 2,
+ .sources = (struct clk *[]) {
+ [0] = &hsmmc_div[1].clk,
+ [1] = NULL, /* to fix */
+ },
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 17 },
+ },
+};
+
+
+static inline unsigned int s3c2416_fclk_div(unsigned long clkcon0)
+{
+ clkcon0 &= 7 << S3C2443_CLKDIV0_ARMDIV_SHIFT;
+
+ return armdiv[clkcon0 >> S3C2443_CLKDIV0_ARMDIV_SHIFT];
+}
+
+void __init_or_cpufreq s3c2416_setup_clocks(void)
+{
+ s3c2443_common_setup_clocks(s3c2416_get_pll, s3c2416_fclk_div);
+}
+
+
+static struct clksrc_clk *clksrcs[] __initdata = {
+ &hsmmc_div[0],
+ &hsmmc_div[1],
+ &hsmmc_mux[0],
+ &hsmmc_mux[1],
+};
+
+void __init s3c2416_init_clocks(int xtal)
+{
+ u32 epllcon = __raw_readl(S3C2443_EPLLCON);
+ u32 epllcon1 = __raw_readl(S3C2443_EPLLCON+4);
+ int ptr;
+
+ /* s3c2416 EPLL compatible with s3c64xx */
+ clk_epll.rate = s3c_get_pll6553x(xtal, epllcon, epllcon1);
+
+ clk_epll.parent = &clk_epllref.clk;
+
+ s3c2443_common_init_clocks(xtal, s3c2416_get_pll, s3c2416_fclk_div);
+
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
+ s3c_register_clksrc(clksrcs[ptr], 1);
+
+ s3c_pwmclk_init();
+
+}
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
new file mode 100644
index 000000000000..89f521d59d06
--- /dev/null
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -0,0 +1,254 @@
+/* linux/arch/arm/mach-s3c2416/irq.c
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/sysdev.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/irq.h>
+
+#include <asm/mach/irq.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/pm.h>
+#include <plat/irq.h>
+
+#define INTMSK(start, end) ((1 << ((end) + 1 - (start))) - 1)
+
+static inline void s3c2416_irq_demux(unsigned int irq, unsigned int len)
+{
+ unsigned int subsrc, submsk;
+ unsigned int end;
+
+ /* read the current pending interrupts, and the mask
+ * for what it is available */
+
+ subsrc = __raw_readl(S3C2410_SUBSRCPND);
+ submsk = __raw_readl(S3C2410_INTSUBMSK);
+
+ subsrc &= ~submsk;
+ subsrc >>= (irq - S3C2410_IRQSUB(0));
+ subsrc &= (1 << len)-1;
+
+ end = len + irq;
+
+ for (; irq < end && subsrc; irq++) {
+ if (subsrc & 1)
+ generic_handle_irq(irq);
+
+ subsrc >>= 1;
+ }
+}
+
+/* WDT/AC97 sub interrupts */
+
+static void s3c2416_irq_demux_wdtac97(unsigned int irq, struct irq_desc *desc)
+{
+ s3c2416_irq_demux(IRQ_S3C2443_WDT, 4);
+}
+
+#define INTMSK_WDTAC97 (1UL << (IRQ_WDT - IRQ_EINT0))
+#define SUBMSK_WDTAC97 INTMSK(IRQ_S3C2443_WDT, IRQ_S3C2443_AC97)
+
+static void s3c2416_irq_wdtac97_mask(unsigned int irqno)
+{
+ s3c_irqsub_mask(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+}
+
+static void s3c2416_irq_wdtac97_unmask(unsigned int irqno)
+{
+ s3c_irqsub_unmask(irqno, INTMSK_WDTAC97);
+}
+
+static void s3c2416_irq_wdtac97_ack(unsigned int irqno)
+{
+ s3c_irqsub_maskack(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+}
+
+static struct irq_chip s3c2416_irq_wdtac97 = {
+ .mask = s3c2416_irq_wdtac97_mask,
+ .unmask = s3c2416_irq_wdtac97_unmask,
+ .ack = s3c2416_irq_wdtac97_ack,
+};
+
+
+/* LCD sub interrupts */
+
+static void s3c2416_irq_demux_lcd(unsigned int irq, struct irq_desc *desc)
+{
+ s3c2416_irq_demux(IRQ_S3C2443_LCD1, 4);
+}
+
+#define INTMSK_LCD (1UL << (IRQ_LCD - IRQ_EINT0))
+#define SUBMSK_LCD INTMSK(IRQ_S3C2443_LCD1, IRQ_S3C2443_LCD4)
+
+static void s3c2416_irq_lcd_mask(unsigned int irqno)
+{
+ s3c_irqsub_mask(irqno, INTMSK_LCD, SUBMSK_LCD);
+}
+
+static void s3c2416_irq_lcd_unmask(unsigned int irqno)
+{
+ s3c_irqsub_unmask(irqno, INTMSK_LCD);
+}
+
+static void s3c2416_irq_lcd_ack(unsigned int irqno)
+{
+ s3c_irqsub_maskack(irqno, INTMSK_LCD, SUBMSK_LCD);
+}
+
+static struct irq_chip s3c2416_irq_lcd = {
+ .mask = s3c2416_irq_lcd_mask,
+ .unmask = s3c2416_irq_lcd_unmask,
+ .ack = s3c2416_irq_lcd_ack,
+};
+
+
+/* DMA sub interrupts */
+
+static void s3c2416_irq_demux_dma(unsigned int irq, struct irq_desc *desc)
+{
+ s3c2416_irq_demux(IRQ_S3C2443_DMA0, 6);
+}
+
+#define INTMSK_DMA (1UL << (IRQ_S3C2443_DMA - IRQ_EINT0))
+#define SUBMSK_DMA INTMSK(IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5)
+
+
+static void s3c2416_irq_dma_mask(unsigned int irqno)
+{
+ s3c_irqsub_mask(irqno, INTMSK_DMA, SUBMSK_DMA);
+}
+
+static void s3c2416_irq_dma_unmask(unsigned int irqno)
+{
+ s3c_irqsub_unmask(irqno, INTMSK_DMA);
+}
+
+static void s3c2416_irq_dma_ack(unsigned int irqno)
+{
+ s3c_irqsub_maskack(irqno, INTMSK_DMA, SUBMSK_DMA);
+}
+
+static struct irq_chip s3c2416_irq_dma = {
+ .mask = s3c2416_irq_dma_mask,
+ .unmask = s3c2416_irq_dma_unmask,
+ .ack = s3c2416_irq_dma_ack,
+};
+
+
+/* UART3 sub interrupts */
+
+static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
+{
+ s3c2416_irq_demux(IRQ_S3C2443_UART3, 3);
+}
+
+#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
+#define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
+
+
+static void s3c2416_irq_uart3_mask(unsigned int irqno)
+{
+ s3c_irqsub_mask(irqno, INTMSK_UART3, SUBMSK_UART3);
+}
+
+static void s3c2416_irq_uart3_unmask(unsigned int irqno)
+{
+ s3c_irqsub_unmask(irqno, INTMSK_UART3);
+}
+
+static void s3c2416_irq_uart3_ack(unsigned int irqno)
+{
+ s3c_irqsub_maskack(irqno, INTMSK_UART3, SUBMSK_UART3);
+}
+
+static struct irq_chip s3c2416_irq_uart3 = {
+ .mask = s3c2416_irq_uart3_mask,
+ .unmask = s3c2416_irq_uart3_unmask,
+ .ack = s3c2416_irq_uart3_ack,
+};
+
+
+/* IRQ initialisation code */
+
+static int __init s3c2416_add_sub(unsigned int base,
+ void (*demux)(unsigned int,
+ struct irq_desc *),
+ struct irq_chip *chip,
+ unsigned int start, unsigned int end)
+{
+ unsigned int irqno;
+
+ set_irq_chip(base, &s3c_irq_level_chip);
+ set_irq_handler(base, handle_level_irq);
+ set_irq_chained_handler(base, demux);
+
+ for (irqno = start; irqno <= end; irqno++) {
+ set_irq_chip(irqno, chip);
+ set_irq_handler(irqno, handle_level_irq);
+ set_irq_flags(irqno, IRQF_VALID);
+ }
+
+ return 0;
+}
+
+static int __init s3c2416_irq_add(struct sys_device *sysdev)
+{
+ printk(KERN_INFO "S3C2416: IRQ Support\n");
+
+ s3c2416_add_sub(IRQ_LCD, s3c2416_irq_demux_lcd, &s3c2416_irq_lcd,
+ IRQ_S3C2443_LCD2, IRQ_S3C2443_LCD4);
+
+ s3c2416_add_sub(IRQ_S3C2443_DMA, s3c2416_irq_demux_dma,
+ &s3c2416_irq_dma, IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5);
+
+ s3c2416_add_sub(IRQ_S3C2443_UART3, s3c2416_irq_demux_uart3,
+ &s3c2416_irq_uart3,
+ IRQ_S3C2443_RX3, IRQ_S3C2443_ERR3);
+
+ s3c2416_add_sub(IRQ_WDT, s3c2416_irq_demux_wdtac97,
+ &s3c2416_irq_wdtac97,
+ IRQ_S3C2443_WDT, IRQ_S3C2443_AC97);
+
+ return 0;
+}
+
+static struct sysdev_driver s3c2416_irq_driver = {
+ .add = s3c2416_irq_add,
+};
+
+static int __init s3c2416_irq_init(void)
+{
+ return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_irq_driver);
+}
+
+arch_initcall(s3c2416_irq_init);
+
diff --git a/arch/arm/mach-s3c2416/mach-smdk2416.c b/arch/arm/mach-s3c2416/mach-smdk2416.c
new file mode 100644
index 000000000000..99d24c44f30f
--- /dev/null
+++ b/arch/arm/mach-s3c2416/mach-smdk2416.c
@@ -0,0 +1,150 @@
+/* linux/arch/arm/mach-s3c2416/mach-hanlin_v3c.c
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/hardware.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-lcd.h>
+
+#include <mach/idle.h>
+#include <mach/fb.h>
+#include <mach/leds-gpio.h>
+#include <plat/iic.h>
+
+#include <plat/s3c2416.h>
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+#include <plat/nand.h>
+
+#include <plat/common-smdk.h>
+
+static struct map_desc smdk2416_iodesc[] __initdata = {
+ /* ISA IO Space map (memory space selected by A24) */
+
+ {
+ .virtual = (u32)S3C24XX_VA_ISA_WORD,
+ .pfn = __phys_to_pfn(S3C2410_CS2),
+ .length = 0x10000,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (u32)S3C24XX_VA_ISA_WORD + 0x10000,
+ .pfn = __phys_to_pfn(S3C2410_CS2 + (1<<24)),
+ .length = SZ_4M,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (u32)S3C24XX_VA_ISA_BYTE,
+ .pfn = __phys_to_pfn(S3C2410_CS2),
+ .length = 0x10000,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (u32)S3C24XX_VA_ISA_BYTE + 0x10000,
+ .pfn = __phys_to_pfn(S3C2410_CS2 + (1<<24)),
+ .length = SZ_4M,
+ .type = MT_DEVICE,
+ }
+};
+
+#define UCON (S3C2410_UCON_DEFAULT | \
+ S3C2440_UCON_PCLK | \
+ S3C2443_UCON_RXERR_IRQEN)
+
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
+
+#define UFCON (S3C2410_UFCON_RXTRIG8 | \
+ S3C2410_UFCON_FIFOMODE | \
+ S3C2440_UFCON_TXTRIG16)
+
+static struct s3c2410_uartcfg smdk2416_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ /* IR port */
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON | 0x50,
+ .ufcon = UFCON,
+ }
+};
+
+static struct platform_device *smdk2416_devices[] __initdata = {
+ &s3c_device_wdt,
+ &s3c_device_ohci,
+ &s3c_device_i2c0,
+ &s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
+};
+
+static void __init smdk2416_map_io(void)
+{
+
+ s3c24xx_init_io(smdk2416_iodesc, ARRAY_SIZE(smdk2416_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(smdk2416_uartcfgs, ARRAY_SIZE(smdk2416_uartcfgs));
+
+}
+
+static void __init smdk2416_machine_init(void)
+{
+ s3c_i2c0_set_platdata(NULL);
+
+ gpio_request(S3C2410_GPB(4), "USBHost Power");
+ gpio_direction_output(S3C2410_GPB(4), 1);
+
+ platform_add_devices(smdk2416_devices, ARRAY_SIZE(smdk2416_devices));
+ smdk_machine_init();
+}
+
+MACHINE_START(SMDK2416, "SMDK2416")
+ /* Maintainer: Yauhen Kharuzhy <jekhor@gmail.com> */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+
+ .init_irq = s3c24xx_init_irq,
+ .map_io = smdk2416_map_io,
+ .init_machine = smdk2416_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
new file mode 100644
index 000000000000..3bff05745d0b
--- /dev/null
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -0,0 +1,128 @@
+/* linux/arch/arm/mach-s3c2416/s3c2416.c
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * Samsung S3C2416 Mobile CPU support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/sysdev.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/hardware.h>
+#include <asm/proc-fns.h>
+#include <asm/irq.h>
+
+#include <mach/reset.h>
+#include <mach/idle.h>
+#include <mach/regs-s3c2443-clock.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+#include <plat/s3c2416.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+#include <plat/iic-core.h>
+
+static struct map_desc s3c2416_iodesc[] __initdata = {
+ IODESC_ENT(WATCHDOG),
+ IODESC_ENT(CLKPWR),
+ IODESC_ENT(TIMER),
+};
+
+struct sysdev_class s3c2416_sysclass = {
+ .name = "s3c2416-core",
+};
+
+static struct sys_device s3c2416_sysdev = {
+ .cls = &s3c2416_sysclass,
+};
+
+static void s3c2416_hard_reset(void)
+{
+ __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST);
+}
+
+int __init s3c2416_init(void)
+{
+ printk(KERN_INFO "S3C2416: Initializing architecture\n");
+
+ s3c24xx_reset_hook = s3c2416_hard_reset;
+ /* s3c24xx_idle = s3c2416_idle; */
+
+ /* change WDT IRQ number */
+ s3c_device_wdt.resource[1].start = IRQ_S3C2443_WDT;
+ s3c_device_wdt.resource[1].end = IRQ_S3C2443_WDT;
+
+ /* the i2c devices are directly compatible with s3c2440 */
+ s3c_i2c0_setname("s3c2440-i2c");
+ s3c_i2c1_setname("s3c2440-i2c");
+
+ return sysdev_register(&s3c2416_sysdev);
+}
+
+void __init s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+ s3c24xx_init_uartdevs("s3c2440-uart", s3c2410_uart_resources, cfg, no);
+
+ s3c_device_nand.name = "s3c2416-nand";
+}
+
+/* s3c2416_map_io
+ *
+ * register the standard cpu IO areas, and any passed in from the
+ * machine specific initialisation.
+ */
+
+void __init s3c2416_map_io(void)
+{
+ s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_updown;
+ s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_updown;
+
+ iotable_init(s3c2416_iodesc, ARRAY_SIZE(s3c2416_iodesc));
+}
+
+/* need to register class before we actually register the device, and
+ * we also need to ensure that it has been initialised before any of the
+ * drivers even try to use it (even if not on an s3c2416 based system)
+ * as a driver which may support both 2443 and 2440 may try and use it.
+*/
+
+static int __init s3c2416_core_init(void)
+{
+ return sysdev_class_register(&s3c2416_sysclass);
+}
+
+core_initcall(s3c2416_core_init);
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index 7f465265cf04..9d102b912091 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -6,6 +6,7 @@ config CPU_S3C2440
bool
depends on ARCH_S3C2410
select CPU_ARM920T
+ select S3C_GPIO_PULL_UP
select S3C2410_CLOCK
select S3C2410_PM if PM
select S3C2410_GPIO
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index 571b17683d96..a76bcda210ad 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -53,6 +53,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -102,10 +103,10 @@ static void mini2440_udc_pullup(enum s3c2410_udc_cmd_e cmd)
switch (cmd) {
case S3C2410_UDC_P_ENABLE :
- s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
+ gpio_set_value(S3C2410_GPC(5), 1);
break;
case S3C2410_UDC_P_DISABLE :
- s3c2410_gpio_setpin(S3C2410_GPC(5), 0);
+ gpio_set_value(S3C2410_GPC(5), 0);
break;
case S3C2410_UDC_P_RESET :
break;
@@ -632,25 +633,25 @@ static void __init mini2440_init(void)
mini2440_parse_features(&features, mini2440_features_str);
/* turn LCD on */
- s3c2410_gpio_cfgpin(S3C2410_GPC(0), S3C2410_GPC0_LEND);
+ s3c_gpio_cfgpin(S3C2410_GPC(0), S3C2410_GPC0_LEND);
/* Turn the backlight early on */
- s3c2410_gpio_setpin(S3C2410_GPG(4), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPG(4), S3C2410_GPIO_OUTPUT);
+ WARN_ON(gpio_request(S3C2410_GPG(4), "backlight"));
+ gpio_direction_output(S3C2410_GPG(4), 1);
/* remove pullup on optional PWM backlight -- unused on 3.5 and 7"s */
- s3c2410_gpio_pullup(S3C2410_GPB(1), 0);
+ s3c_gpio_setpull(S3C2410_GPB(1), S3C_GPIO_PULL_UP);
s3c2410_gpio_setpin(S3C2410_GPB(1), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT);
/* Make sure the D+ pullup pin is output */
- s3c2410_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
+ WARN_ON(gpio_request(S3C2410_GPC(5), "udc pup"));
+ gpio_direction_output(S3C2410_GPC(5), 0);
/* mark the key as input, without pullups (there is one on the board) */
for (i = 0; i < ARRAY_SIZE(mini2440_buttons); i++) {
- s3c2410_gpio_pullup(mini2440_buttons[i].gpio, 0);
- s3c2410_gpio_cfgpin(mini2440_buttons[i].gpio,
- S3C2410_GPIO_INPUT);
+ s3c_gpio_setpull(mini2440_buttons[i].gpio, S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(mini2440_buttons[i].gpio, S3C2410_GPIO_INPUT);
}
if (features.lcd_index != -1) {
int li;
diff --git a/arch/arm/mach-s3c2440/mach-nexcoder.c b/arch/arm/mach-s3c2440/mach-nexcoder.c
index 342041593f22..3ff62de45fde 100644
--- a/arch/arm/mach-s3c2440/mach-nexcoder.c
+++ b/arch/arm/mach-s3c2440/mach-nexcoder.c
@@ -40,6 +40,7 @@
#include <plat/regs-serial.h>
#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
#include <plat/s3c2410.h>
#include <plat/s3c244x.h>
#include <plat/clock.h>
@@ -122,15 +123,15 @@ static void __init nexcoder_sensorboard_init(void)
{
// Initialize SCCB bus
s3c2410_gpio_setpin(S3C2410_GPE(14), 1); // IICSCL
- s3c2410_gpio_cfgpin(S3C2410_GPE(14), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPE(14), S3C2410_GPIO_OUTPUT);
s3c2410_gpio_setpin(S3C2410_GPE(15), 1); // IICSDA
- s3c2410_gpio_cfgpin(S3C2410_GPE(15), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPE(15), S3C2410_GPIO_OUTPUT);
// Power up the sensor board
s3c2410_gpio_setpin(S3C2410_GPF(1), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPF(1), S3C2410_GPIO_OUTPUT); // CAM_GPIO7 => nLDO_PWRDN
+ s3c_gpio_cfgpin(S3C2410_GPF(1), S3C2410_GPIO_OUTPUT); // CAM_GPIO7 => nLDO_PWRDN
s3c2410_gpio_setpin(S3C2410_GPF(2), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT); // CAM_GPIO6 => CAM_PWRDN
+ s3c_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT); // CAM_GPIO6 => CAM_PWRDN
}
static void __init nexcoder_map_io(void)
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c
index f35371db33f5..319458da71a0 100644
--- a/arch/arm/mach-s3c2440/mach-osiris.c
+++ b/arch/arm/mach-s3c2440/mach-osiris.c
@@ -49,6 +49,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -298,7 +299,7 @@ static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state)
/* ensure that an nRESET is not generated on resume. */
s3c2410_gpio_setpin(S3C2410_GPA(21), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPIO_OUTPUT);
return 0;
}
@@ -310,7 +311,7 @@ static int osiris_pm_resume(struct sys_device *sd)
__raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0);
- s3c2410_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
+ s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
return 0;
}
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index 2b68f7ea45ae..d50f3ae6173d 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/sysdev.h>
+#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -33,6 +34,10 @@
#include <plat/cpu.h>
#include <plat/s3c244x.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
static struct sys_device s3c2440_sysdev = {
.cls = &s3c2440_sysclass,
};
@@ -41,6 +46,9 @@ int __init s3c2440_init(void)
{
printk("S3C2440: Initialising architecture\n");
+ s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1up;
+ s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1up;
+
/* change irq for watchdog */
s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT;
diff --git a/arch/arm/mach-s3c2443/Kconfig b/arch/arm/mach-s3c2443/Kconfig
index 698140af247c..4fef723126fa 100644
--- a/arch/arm/mach-s3c2443/Kconfig
+++ b/arch/arm/mach-s3c2443/Kconfig
@@ -8,6 +8,7 @@ config CPU_S3C2443
select S3C2443_DMA if S3C2410_DMA
select CPU_LLSERIAL_S3C2440
select SAMSUNG_CLKSRC
+ select S3C2443_CLOCK
help
Support for the S3C2443 SoC from the S3C24XX line
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 62cd4eaee01b..0c3c0c884cd3 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -21,6 +21,7 @@
*/
#include <linux/init.h>
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -54,111 +55,13 @@
* set the correct muxing at initialisation
*/
-static int s3c2443_gate(void __iomem *reg, struct clk *clk, int enable)
-{
- u32 ctrlbit = clk->ctrlbit;
- u32 con = __raw_readl(reg);
-
- if (enable)
- con |= ctrlbit;
- else
- con &= ~ctrlbit;
-
- __raw_writel(con, reg);
- return 0;
-}
-
-static int s3c2443_clkcon_enable_h(struct clk *clk, int enable)
-{
- return s3c2443_gate(S3C2443_HCLKCON, clk, enable);
-}
-
-static int s3c2443_clkcon_enable_p(struct clk *clk, int enable)
-{
- return s3c2443_gate(S3C2443_PCLKCON, clk, enable);
-}
-
-static int s3c2443_clkcon_enable_s(struct clk *clk, int enable)
-{
- return s3c2443_gate(S3C2443_SCLKCON, clk, enable);
-}
-
/* clock selections */
-/* mpllref is a direct descendant of clk_xtal by default, but it is not
- * elided as the EPLL can be either sourced by the XTAL or EXTCLK and as
- * such directly equating the two source clocks is impossible.
- */
-static struct clk clk_mpllref = {
- .name = "mpllref",
- .parent = &clk_xtal,
- .id = -1,
-};
-
static struct clk clk_i2s_ext = {
.name = "i2s-ext",
.id = -1,
};
-static struct clk *clk_epllref_sources[] = {
- [0] = &clk_mpllref,
- [1] = &clk_mpllref,
- [2] = &clk_xtal,
- [3] = &clk_ext,
-};
-
-static struct clksrc_clk clk_epllref = {
- .clk = {
- .name = "epllref",
- .id = -1,
- },
- .sources = &(struct clksrc_sources) {
- .sources = clk_epllref_sources,
- .nr_sources = ARRAY_SIZE(clk_epllref_sources),
- },
- .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 7 },
-};
-
-static unsigned long s3c2443_getrate_mdivclk(struct clk *clk)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- unsigned long div = __raw_readl(S3C2443_CLKDIV0);
-
- div &= S3C2443_CLKDIV0_EXTDIV_MASK;
- div >>= (S3C2443_CLKDIV0_EXTDIV_SHIFT-1); /* x2 */
-
- return parent_rate / (div + 1);
-}
-
-static struct clk clk_mdivclk = {
- .name = "mdivclk",
- .parent = &clk_mpllref,
- .id = -1,
- .ops = &(struct clk_ops) {
- .get_rate = s3c2443_getrate_mdivclk,
- },
-};
-
-static struct clk *clk_msysclk_sources[] = {
- [0] = &clk_mpllref,
- [1] = &clk_mpll,
- [2] = &clk_mdivclk,
- [3] = &clk_mpllref,
-};
-
-static struct clksrc_clk clk_msysclk = {
- .clk = {
- .name = "msysclk",
- .parent = &clk_xtal,
- .id = -1,
- },
- .sources = &(struct clksrc_sources) {
- .sources = clk_msysclk_sources,
- .nr_sources = ARRAY_SIZE(clk_msysclk_sources),
- },
- .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 3 },
-};
-
/* armdiv
*
* this clock is sourced from msysclk and can have a number of
@@ -266,44 +169,6 @@ static struct clksrc_clk clk_arm = {
.reg_src = { .reg = S3C2443_CLKDIV0, .size = 1, .shift = 13 },
};
-/* esysclk
- *
- * this is sourced from either the EPLL or the EPLLref clock
-*/
-
-static struct clk *clk_sysclk_sources[] = {
- [0] = &clk_epllref.clk,
- [1] = &clk_epll,
-};
-
-static struct clksrc_clk clk_esysclk = {
- .clk = {
- .name = "esysclk",
- .parent = &clk_epll,
- .id = -1,
- },
- .sources = &(struct clksrc_sources) {
- .sources = clk_sysclk_sources,
- .nr_sources = ARRAY_SIZE(clk_sysclk_sources),
- },
- .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 6 },
-};
-
-/* uartclk
- *
- * UART baud-rate clock sourced from esysclk via a divisor
-*/
-
-static struct clksrc_clk clk_uart = {
- .clk = {
- .name = "uartclk",
- .id = -1,
- .parent = &clk_esysclk.clk,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 8 },
-};
-
-
/* hsspi
*
* high-speed spi clock, sourced from esysclk
@@ -320,21 +185,6 @@ static struct clksrc_clk clk_hsspi = {
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 },
};
-/* usbhost
- *
- * usb host bus-clock, usually 48MHz to provide USB bus clock timing
-*/
-
-static struct clksrc_clk clk_usb_bus_host = {
- .clk = {
- .name = "usb-bus-host-parent",
- .id = -1,
- .parent = &clk_esysclk.clk,
- .ctrlbit = S3C2443_SCLKCON_USBHOST,
- .enable = s3c2443_clkcon_enable_s,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 },
-};
/* clk_hsmcc_div
*
@@ -391,7 +241,7 @@ static struct clk clk_hsmmc = {
/* i2s_eplldiv
*
- * This clock is the output from the I2S divisor of ESYSCLK, and is seperate
+ * This clock is the output from the I2S divisor of ESYSCLK, and is separate
* from the mux that comes after it (cannot merge into one single clock)
*/
@@ -433,89 +283,16 @@ static struct clksrc_clk clk_i2s = {
.reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 14 },
};
-/* cam-if
- *
- * camera interface bus-clock, divided down from esysclk
-*/
-
-static struct clksrc_clk clk_cam = {
- .clk = {
- .name = "camif-upll", /* same as 2440 name */
- .id = -1,
- .parent = &clk_esysclk.clk,
- .ctrlbit = S3C2443_SCLKCON_CAMCLK,
- .enable = s3c2443_clkcon_enable_s,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 26 },
-};
-
-/* display-if
- *
- * display interface clock, divided from esysclk
-*/
-
-static struct clksrc_clk clk_display = {
- .clk = {
- .name = "display-if",
- .id = -1,
- .parent = &clk_esysclk.clk,
- .ctrlbit = S3C2443_SCLKCON_DISPCLK,
- .enable = s3c2443_clkcon_enable_s,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 8, .shift = 16 },
-};
-
-/* prediv
- *
- * this divides the msysclk down to pass to h/p/etc.
- */
-
-static unsigned long s3c2443_prediv_getrate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
-
- clkdiv0 &= S3C2443_CLKDIV0_PREDIV_MASK;
- clkdiv0 >>= S3C2443_CLKDIV0_PREDIV_SHIFT;
-
- return rate / (clkdiv0 + 1);
-}
-
-static struct clk clk_prediv = {
- .name = "prediv",
- .id = -1,
- .parent = &clk_msysclk.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s3c2443_prediv_getrate,
- },
-};
-
/* standard clock definitions */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
{
- .name = "nand",
- .id = -1,
- .parent = &clk_h,
- }, {
.name = "sdi",
.id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_SDI,
}, {
- .name = "adc",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_ADC,
- }, {
- .name = "i2c",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_IIC,
- }, {
.name = "iis",
.id = -1,
.parent = &clk_p,
@@ -537,179 +314,12 @@ static struct clk init_clocks_disable[] = {
};
static struct clk init_clocks[] = {
- {
- .name = "dma",
- .id = 0,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA0,
- }, {
- .name = "dma",
- .id = 1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA1,
- }, {
- .name = "dma",
- .id = 2,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA2,
- }, {
- .name = "dma",
- .id = 3,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA3,
- }, {
- .name = "dma",
- .id = 4,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA4,
- }, {
- .name = "dma",
- .id = 5,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA5,
- }, {
- .name = "lcd",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_LCDC,
- }, {
- .name = "gpio",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_GPIO,
- }, {
- .name = "usb-host",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_USBH,
- }, {
- .name = "usb-device",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_USBD,
- }, {
- .name = "hsmmc",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_HSMMC,
- }, {
- .name = "cfc",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_CFC,
- }, {
- .name = "ssmc",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_SSMC,
- }, {
- .name = "timers",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_PWMT,
- }, {
- .name = "uart",
- .id = 0,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_UART0,
- }, {
- .name = "uart",
- .id = 1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_UART1,
- }, {
- .name = "uart",
- .id = 2,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_UART2,
- }, {
- .name = "uart",
- .id = 3,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_UART3,
- }, {
- .name = "rtc",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_RTC,
- }, {
- .name = "watchdog",
- .id = -1,
- .parent = &clk_p,
- .ctrlbit = S3C2443_PCLKCON_WDT,
- }, {
- .name = "usb-bus-host",
- .id = -1,
- .parent = &clk_usb_bus_host.clk,
- }, {
- .name = "ac97",
- .id = -1,
- .parent = &clk_p,
- .ctrlbit = S3C2443_PCLKCON_AC97,
- }
-};
-
-/* clocks to add where we need to check their parentage */
-
-static struct clksrc_clk __initdata *init_list[] = {
- &clk_epllref, /* should be first */
- &clk_esysclk,
- &clk_msysclk,
- &clk_arm,
- &clk_i2s_eplldiv,
- &clk_i2s,
- &clk_cam,
- &clk_uart,
- &clk_display,
- &clk_hsmmc_div,
- &clk_usb_bus_host,
};
-static void __init s3c2443_clk_initparents(void)
-{
- int ptr;
-
- for (ptr = 0; ptr < ARRAY_SIZE(init_list); ptr++)
- s3c_set_clksrc(init_list[ptr], true);
-}
-
-static inline unsigned long s3c2443_get_hdiv(unsigned long clkcon0)
-{
- clkcon0 &= S3C2443_CLKDIV0_HCLKDIV_MASK;
-
- return clkcon0 + 1;
-}
-
/* clocks to add straight away */
static struct clksrc_clk *clksrcs[] __initdata = {
- &clk_usb_bus_host,
- &clk_epllref,
- &clk_esysclk,
- &clk_msysclk,
&clk_arm,
- &clk_uart,
- &clk_display,
- &clk_cam,
&clk_i2s_eplldiv,
&clk_i2s,
&clk_hsspi,
@@ -717,92 +327,32 @@ static struct clksrc_clk *clksrcs[] __initdata = {
};
static struct clk *clks[] __initdata = {
- &clk_ext,
- &clk_epll,
- &clk_usb_bus,
- &clk_mpllref,
&clk_hsmmc,
&clk_armdiv,
- &clk_prediv,
};
void __init_or_cpufreq s3c2443_setup_clocks(void)
{
- unsigned long mpllcon = __raw_readl(S3C2443_MPLLCON);
- unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
- struct clk *xtal_clk;
- unsigned long xtal;
- unsigned long pll;
- unsigned long fclk;
- unsigned long hclk;
- unsigned long pclk;
-
- xtal_clk = clk_get(NULL, "xtal");
- xtal = clk_get_rate(xtal_clk);
- clk_put(xtal_clk);
-
- pll = s3c2443_get_mpll(mpllcon, xtal);
- clk_msysclk.clk.rate = pll;
-
- fclk = pll / s3c2443_fclk_div(clkdiv0);
- hclk = s3c2443_prediv_getrate(&clk_prediv);
- hclk /= s3c2443_get_hdiv(clkdiv0);
- pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1);
-
- s3c24xx_setup_clocks(fclk, hclk, pclk);
-
- printk("S3C2443: mpll %s %ld.%03ld MHz, cpu %ld.%03ld MHz, mem %ld.%03ld MHz, pclk %ld.%03ld MHz\n",
- (mpllcon & S3C2443_PLLCON_OFF) ? "off":"on",
- print_mhz(pll), print_mhz(fclk),
- print_mhz(hclk), print_mhz(pclk));
-
- s3c24xx_setup_clocks(fclk, hclk, pclk);
+ s3c2443_common_setup_clocks(s3c2443_get_mpll, s3c2443_fclk_div);
}
void __init s3c2443_init_clocks(int xtal)
{
- struct clk *clkp;
unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
- int ret;
int ptr;
- /* s3c2443 parents h and p clocks from prediv */
- clk_h.parent = &clk_prediv;
- clk_p.parent = &clk_prediv;
+ clk_epll.rate = s3c2443_get_epll(epllcon, xtal);
+ clk_epll.parent = &clk_epllref.clk;
+
+ s3c2443_common_init_clocks(xtal, s3c2443_get_mpll, s3c2443_fclk_div);
- s3c24xx_register_baseclocks(xtal);
s3c2443_setup_clocks();
- s3c2443_clk_initparents();
-
- for (ptr = 0; ptr < ARRAY_SIZE(clks); ptr++) {
- clkp = clks[ptr];
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
- }
+ s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_register_clksrc(clksrcs[ptr], 1);
- clk_epll.rate = s3c2443_get_epll(epllcon, xtal);
- clk_epll.parent = &clk_epllref.clk;
- clk_usb_bus.parent = &clk_usb_bus_host.clk;
-
- /* ensure usb bus clock is within correct rate of 48MHz */
-
- if (clk_get_rate(&clk_usb_bus_host.clk) != (48 * 1000 * 1000)) {
- printk(KERN_INFO "Warning: USB host bus not at 48MHz\n");
- clk_set_rate(&clk_usb_bus_host.clk, 48*1000*1000);
- }
-
- printk("S3C2443: epll %s %ld.%03ld MHz, usb-bus %ld.%03ld MHz\n",
- (epllcon & S3C2443_PLLCON_OFF) ? "off":"on",
- print_mhz(clk_get_rate(&clk_epll)),
- print_mhz(clk_get_rate(&clk_usb_bus)));
-
/* register clocks from clock array */
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
@@ -819,17 +369,8 @@ void __init s3c2443_init_clocks(int xtal)
/* install (and disable) the clocks we do not need immediately */
- clkp = init_clocks_disable;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
-
- (clkp->enable)(clkp, 0);
- }
+ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_pwmclk_init();
}
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index 3758e15086be..a690a9dda073 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -60,3 +60,4 @@ obj-y += dev-rtc.o
obj-y += dev-audio.o
obj-$(CONFIG_S3C_ADC) += dev-adc.o
obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
+obj-$(CONFIG_S3C64XX_DEV_TS) += dev-ts.o
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 2ac2e7d73e53..28215d52f176 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -652,6 +652,16 @@ static struct clksrc_clk clksrcs[] = {
.sources = &clkset_audio1,
}, {
.clk = {
+ .name = "audio-bus",
+ .id = -1, /* There's only one IISv4 port */
+ .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C6410_CLK_SRC2, .shift = 0, .size = 3 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 24, .size = 4 },
+ .sources = &clkset_audio2,
+ }, {
+ .clk = {
.name = "irda-bus",
.id = 0,
.ctrlbit = S3C_CLKCON_SCLK_IRDA,
diff --git a/arch/arm/mach-s3c64xx/dev-ts.c b/arch/arm/mach-s3c64xx/dev-ts.c
new file mode 100644
index 000000000000..17cc7934afbe
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/dev-ts.c
@@ -0,0 +1,61 @@
+/* linux/arch/arm/mach-s3c64xx/dev-ts.c
+ *
+ * Copyright (c) 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>, <ben-linux@fluff.org>
+ *
+ * Adapted by Maurus Cuelenaere for s3c64xx
+ *
+ * S3C64XX series device definition for touchscreen device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+#include <plat/ts.h>
+
+static struct resource s3c_ts_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_ADC,
+ .end = S3C64XX_PA_ADC + SZ_256 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_TC,
+ .end = IRQ_TC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_ts = {
+ .name = "s3c64xx-ts",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s3c_ts_resource),
+ .resource = s3c_ts_resource,
+};
+
+void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
+{
+ struct s3c2410_ts_mach_info *npd;
+
+ if (!pd) {
+ printk(KERN_ERR "%s: no platform data\n", __func__);
+ return;
+ }
+
+ npd = kmemdup(pd, sizeof(struct s3c2410_ts_mach_info), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+
+ s3c_device_ts.dev.platform_data = npd;
+}
+EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
diff --git a/arch/arm/mach-s3c64xx/gpiolib.c b/arch/arm/mach-s3c64xx/gpiolib.c
index 66e6794481d2..60c929a3cab6 100644
--- a/arch/arm/mach-s3c64xx/gpiolib.c
+++ b/arch/arm/mach-s3c64xx/gpiolib.c
@@ -51,6 +51,7 @@
static struct s3c_gpio_cfg gpio_4bit_cfg_noint = {
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .get_config = s3c_gpio_getcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
@@ -58,12 +59,14 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_noint = {
static struct s3c_gpio_cfg gpio_4bit_cfg_eint0111 = {
.cfg_eint = 7,
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .get_config = s3c_gpio_getcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
.cfg_eint = 3,
+ .get_config = s3c_gpio_getcfg_s3c64xx_4bit,
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
@@ -171,6 +174,7 @@ static struct s3c_gpio_chip gpio_4bit2[] = {
static struct s3c_gpio_cfg gpio_2bit_cfg_noint = {
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
@@ -178,6 +182,7 @@ static struct s3c_gpio_cfg gpio_2bit_cfg_noint = {
static struct s3c_gpio_cfg gpio_2bit_cfg_eint10 = {
.cfg_eint = 2,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
@@ -185,6 +190,7 @@ static struct s3c_gpio_cfg gpio_2bit_cfg_eint10 = {
static struct s3c_gpio_cfg gpio_2bit_cfg_eint11 = {
.cfg_eint = 3,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
diff --git a/arch/arm/mach-s3c64xx/include/mach/pll.h b/arch/arm/mach-s3c64xx/include/mach/pll.h
index 90bbd72fdc4e..5ef0bb698ee0 100644
--- a/arch/arm/mach-s3c64xx/include/mach/pll.h
+++ b/arch/arm/mach-s3c64xx/include/mach/pll.h
@@ -20,6 +20,7 @@
#define S3C6400_PLL_SDIV_SHIFT (0)
#include <asm/div64.h>
+#include <plat/pll6553x.h>
static inline unsigned long s3c6400_get_pll(unsigned long baseclk,
u32 pllcon)
@@ -37,38 +38,8 @@ static inline unsigned long s3c6400_get_pll(unsigned long baseclk,
return (unsigned long)fvco;
}
-#define S3C6400_EPLL_MDIV_MASK ((1 << (23-16)) - 1)
-#define S3C6400_EPLL_PDIV_MASK ((1 << (13-8)) - 1)
-#define S3C6400_EPLL_SDIV_MASK ((1 << (2-0)) - 1)
-#define S3C6400_EPLL_MDIV_SHIFT (16)
-#define S3C6400_EPLL_PDIV_SHIFT (8)
-#define S3C6400_EPLL_SDIV_SHIFT (0)
-#define S3C6400_EPLL_KDIV_MASK (0xffff)
-
static inline unsigned long s3c6400_get_epll(unsigned long baseclk)
{
- unsigned long result;
- u32 epll0 = __raw_readl(S3C_EPLL_CON0);
- u32 epll1 = __raw_readl(S3C_EPLL_CON1);
- u32 mdiv, pdiv, sdiv, kdiv;
- u64 tmp;
-
- mdiv = (epll0 >> S3C6400_EPLL_MDIV_SHIFT) & S3C6400_EPLL_MDIV_MASK;
- pdiv = (epll0 >> S3C6400_EPLL_PDIV_SHIFT) & S3C6400_EPLL_PDIV_MASK;
- sdiv = (epll0 >> S3C6400_EPLL_SDIV_SHIFT) & S3C6400_EPLL_SDIV_MASK;
- kdiv = epll1 & S3C6400_EPLL_KDIV_MASK;
-
- /* We need to multiple baseclk by mdiv (the integer part) and kdiv
- * which is in 2^16ths, so shift mdiv up (does not overflow) and
- * add kdiv before multiplying. The use of tmp is to avoid any
- * overflows before shifting bac down into result when multipling
- * by the mdiv and kdiv pair.
- */
-
- tmp = baseclk;
- tmp *= (mdiv << 16) + kdiv;
- do_div(tmp, (pdiv << sdiv));
- result = tmp >> 16;
-
- return result;
+ return s3c_get_pll6553x(baseclk, __raw_readl(S3C_EPLL_CON0),
+ __raw_readl(S3C_EPLL_CON1));
}
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 2d5afd221d77..6878fbd1868c 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -64,6 +64,7 @@
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
+#include <plat/audio.h>
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
@@ -254,6 +255,7 @@ static struct platform_device *smdk6410_devices[] __initdata = {
&s3c_device_fb,
&s3c_device_ohci,
&s3c_device_usb_hsotg,
+ &s3c64xx_device_ac97,
&s3c64xx_device_iisv4,
#ifdef CONFIG_REGULATOR
@@ -652,6 +654,9 @@ static void __init smdk6410_machine_init(void)
i2c_register_board_info(0, i2c_devs0, ARRAY_SIZE(i2c_devs0));
i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
+ /* Board defualt with CFG2.1 off */
+ s3c64xx_ac97_setup_gpio(S3C64XX_AC97_GPD);
+
platform_add_devices(smdk6410_devices, ARRAY_SIZE(smdk6410_devices));
}
diff --git a/arch/arm/mach-s5p6440/gpio.c b/arch/arm/mach-s5p6440/gpio.c
index b0ea741177ad..262dc75d5bea 100644
--- a/arch/arm/mach-s5p6440/gpio.c
+++ b/arch/arm/mach-s5p6440/gpio.c
@@ -161,12 +161,15 @@ static struct s3c_gpio_cfg s5p6440_gpio_cfgs[] = {
}, {
.cfg_eint = 0,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
}, {
.cfg_eint = 2,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
}, {
.cfg_eint = 3,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
},
};
@@ -279,6 +282,8 @@ void __init s5p6440_gpiolib_set_cfg(struct s3c_gpio_cfg *chipcfg, int nr_chips)
for (; nr_chips > 0; nr_chips--, chipcfg++) {
if (!chipcfg->set_config)
chipcfg->set_config = s3c_gpio_setcfg_s3c64xx_4bit;
+ if (!chipcfg->get_config)
+ chipcfg->get_config = s3c_gpio_getcfg_s3c64xx_4bit;
if (!chipcfg->set_pull)
chipcfg->set_pull = s3c_gpio_setpull_updown;
if (!chipcfg->get_pull)
diff --git a/arch/arm/mach-s5p6440/include/mach/pwm-clock.h b/arch/arm/mach-s5p6440/include/mach/pwm-clock.h
index c4bb7c555477..6a2a02fdf12a 100644
--- a/arch/arm/mach-s5p6440/include/mach/pwm-clock.h
+++ b/arch/arm/mach-s5p6440/include/mach/pwm-clock.h
@@ -1,11 +1,14 @@
/* linux/arch/arm/mach-s5p6440/include/mach/pwm-clock.h
*
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
- * Copyright 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
*
* S5P6440 - pwm clock and timer support
*
@@ -14,16 +17,19 @@
* published by the Free Software Foundation.
*/
+#ifndef __ASM_ARCH_PWMCLK_H
+#define __ASM_ARCH_PWMCLK_H __FILE__
+
/**
* pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
- * @cfg: The timer TCFG1 register bits shifted down to 0.
+ * @tcfg: The timer TCFG1 register bits shifted down to 0.
*
* Return true if the given configuration from TCFG1 is a TCLK instead
* any of the TDIV clocks.
*/
static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
{
- return tcfg == S3C2410_TCFG1_MUX_TCLK;
+ return 0;
}
/**
@@ -35,7 +41,7 @@ static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
*/
static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
{
- return 1 << (1 + tcfg1);
+ return 1 << tcfg1;
}
/**
@@ -45,7 +51,7 @@ static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
*/
static inline unsigned int pwm_tdiv_has_div1(void)
{
- return 0;
+ return 1;
}
/**
@@ -56,7 +62,9 @@ static inline unsigned int pwm_tdiv_has_div1(void)
*/
static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
{
- return ilog2(div) - 1;
+ return ilog2(div);
}
-#define S3C_TCFG1_MUX_TCLK S3C2410_TCFG1_MUX_TCLK
+#define S3C_TCFG1_MUX_TCLK 0
+
+#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h b/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
index 15e8525da0f1..2724b37def31 100644
--- a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
+++ b/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
@@ -1,13 +1,14 @@
/* linux/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
- * Copyright 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/plat-s3c24xx/include/mach/pwm-clock.h
+ * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
*
* S5P6442 - pwm clock and timer support
*
@@ -21,14 +22,14 @@
/**
* pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
- * @cfg: The timer TCFG1 register bits shifted down to 0.
+ * @tcfg: The timer TCFG1 register bits shifted down to 0.
*
* Return true if the given configuration from TCFG1 is a TCLK instead
* any of the TDIV clocks.
*/
static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
{
- return tcfg == S3C2410_TCFG1_MUX_TCLK;
+ return tcfg == S3C64XX_TCFG1_MUX_TCLK;
}
/**
@@ -40,7 +41,7 @@ static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
*/
static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
{
- return 1 << (1 + tcfg1);
+ return 1 << tcfg1;
}
/**
@@ -50,7 +51,7 @@ static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
*/
static inline unsigned int pwm_tdiv_has_div1(void)
{
- return 0;
+ return 1;
}
/**
@@ -61,9 +62,9 @@ static inline unsigned int pwm_tdiv_has_div1(void)
*/
static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
{
- return ilog2(div) - 1;
+ return ilog2(div);
}
-#define S3C_TCFG1_MUX_TCLK S3C2410_TCFG1_MUX_TCLK
+#define S3C_TCFG1_MUX_TCLK S3C64XX_TCFG1_MUX_TCLK
#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/pwm-clock.h b/arch/arm/mach-s5pv210/include/mach/pwm-clock.h
index 69027fea987a..f8a9f1b330e0 100644
--- a/arch/arm/mach-s5pv210/include/mach/pwm-clock.h
+++ b/arch/arm/mach-s5pv210/include/mach/pwm-clock.h
@@ -1,13 +1,14 @@
/* linux/arch/arm/mach-s5pv210/include/mach/pwm-clock.h
*
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/plat-s3c24xx/include/mach/pwm-clock.h
+ * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
*
* S5PV210 - pwm clock and timer support
*
@@ -21,14 +22,14 @@
/**
* pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
- * @cfg: The timer TCFG1 register bits shifted down to 0.
+ * @tcfg: The timer TCFG1 register bits shifted down to 0.
*
* Return true if the given configuration from TCFG1 is a TCLK instead
* any of the TDIV clocks.
*/
static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
{
- return tcfg == S3C2410_TCFG1_MUX_TCLK;
+ return tcfg == S3C64XX_TCFG1_MUX_TCLK;
}
/**
@@ -40,7 +41,7 @@ static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
*/
static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
{
- return 1 << (1 + tcfg1);
+ return 1 << tcfg1;
}
/**
@@ -50,7 +51,7 @@ static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
*/
static inline unsigned int pwm_tdiv_has_div1(void)
{
- return 0;
+ return 1;
}
/**
@@ -61,9 +62,9 @@ static inline unsigned int pwm_tdiv_has_div1(void)
*/
static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
{
- return ilog2(div) - 1;
+ return ilog2(div);
}
-#define S3C_TCFG1_MUX_TCLK S3C2410_TCFG1_MUX_TCLK
+#define S3C_TCFG1_MUX_TCLK S3C64XX_TCFG1_MUX_TCLK
#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 1c2ec96ce261..d3b8ca5d4041 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -26,11 +26,14 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
+#include <linux/i2c.h>
+#include <linux/i2c/tsc2007.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
+#include <linux/usb/r8a66597.h>
#include <mach/common.h>
#include <mach/sh7372.h>
#include <asm/mach-types.h>
@@ -80,12 +83,25 @@
*/
/*
- * KEYSC
+ * LCD / IRQ / KEYSC / IrDA
*
- * SW43 KEYSC
- * -------------------------
- * ON enable
- * OFF disable
+ * IRQ = IRQ26 (TS), IRQ27 (VIO), IRQ28 (TouchScreen)
+ * LCD = 2nd LCDC
+ *
+ * | SW43 |
+ * SW3 | ON | OFF |
+ * -------------+-----------------------+---------------+
+ * ON | KEY / IrDA | LCD |
+ * OFF | KEY / IrDA / IRQ | IRQ |
+ */
+
+/*
+ * USB
+ *
+ * J7 : 1-2 MAX3355E VBUS
+ * 2-3 DC 5.0V
+ *
+ * S39: bit2: off
*/
/* MTD */
@@ -227,11 +243,73 @@ static struct platform_device sdhi0_device = {
.id = 0,
};
+/* USB1 */
+void usb1_host_port_power(int port, int power)
+{
+ if (!power) /* only power-on supported for now */
+ return;
+
+ /* set VBOUT/PWEN and EXTLP1 in DVSTCTR */
+ __raw_writew(__raw_readw(0xE68B0008) | 0x600, 0xE68B0008);
+}
+
+static struct r8a66597_platdata usb1_host_data = {
+ .on_chip = 1,
+ .port_power = usb1_host_port_power,
+};
+
+static struct resource usb1_host_resources[] = {
+ [0] = {
+ .name = "USBHS",
+ .start = 0xE68B0000,
+ .end = 0xE68B00E6 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 215,
+ .end = 215,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb1_host_device = {
+ .name = "r8a66597_hcd",
+ .id = 1,
+ .dev = {
+ .dma_mask = NULL, /* not use dma */
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &usb1_host_data,
+ },
+ .num_resources = ARRAY_SIZE(usb1_host_resources),
+ .resource = usb1_host_resources,
+};
+
static struct platform_device *ap4evb_devices[] __initdata = {
&nor_flash_device,
&smc911x_device,
&keysc_device,
&sdhi0_device,
+ &usb1_host_device,
+};
+
+/* TouchScreen (Needs SW3 set to OFF) */
+#define IRQ28 396
+struct tsc2007_platform_data tsc2007_info = {
+ .model = 2007,
+ .x_plate_ohms = 180,
+};
+
+/* I2C */
+static struct i2c_board_info i2c1_devices[] = {
+ {
+ I2C_BOARD_INFO("r2025sd", 0x32),
+ },
+ {
+ I2C_BOARD_INFO("tsc2007", 0x48),
+ .type = "tsc2007",
+ .platform_data = &tsc2007_info,
+ .irq = IRQ28,
+ },
};
static struct map_desc ap4evb_io_desc[] __initdata = {
@@ -318,6 +396,24 @@ static void __init ap4evb_init(void)
gpio_request(GPIO_FN_SDHID0_1, NULL);
gpio_request(GPIO_FN_SDHID0_0, NULL);
+ /* enable TouchScreen */
+ gpio_request(GPIO_FN_IRQ28_123, NULL);
+ set_irq_type(IRQ28, IRQ_TYPE_LEVEL_LOW);
+
+ i2c_register_board_info(1, i2c1_devices,
+ ARRAY_SIZE(i2c1_devices));
+
+ /* USB enable */
+ gpio_request(GPIO_FN_VBUS0_1, NULL);
+ gpio_request(GPIO_FN_IDIN_1_18, NULL);
+ gpio_request(GPIO_FN_PWEN_1_115, NULL);
+ gpio_request(GPIO_FN_OVCN_1_114, NULL);
+ gpio_request(GPIO_FN_EXTLP_1, NULL);
+ gpio_request(GPIO_FN_OVCN2_1, NULL);
+
+ /* setup USB phy */
+ __raw_writew(0x8a0a, 0xE6058130); /* USBCR2 */
+
sh7372_add_standard_devices();
platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 10673a90be52..33441d58cfa6 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -30,6 +30,7 @@
#include <linux/io.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/gpio.h>
#include <mach/sh7377.h>
#include <mach/common.h>
@@ -37,6 +38,31 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+/*
+ * SDHI
+ *
+ * SDHI0 : card detection is possible
+ * SDHI1 : card detection is impossible
+ *
+ * [G4-MAIN-BOARD]
+ * JP74 : short # DBG_2V8A for SDHI0
+ * JP75 : NC # DBG_3V3A for SDHI0
+ * JP76 : NC # DBG_3V3A_SD for SDHI0
+ * JP77 : NC # 3V3A_SDIO for SDHI1
+ * JP78 : short # DBG_2V8A for SDHI1
+ * JP79 : NC # DBG_3V3A for SDHI1
+ * JP80 : NC # DBG_3V3A_SD for SDHI1
+ *
+ * [G4-CORE-BOARD]
+ * S32 : all off # to dissever from G3-CORE_DBG board
+ * S33 : all off # to dissever from G3-CORE_DBG board
+ *
+ * [G3-CORE_DBG-BOARD]
+ * S1 : all off # to dissever from G3-CORE_DBG board
+ * S3 : all off # to dissever from G3-CORE_DBG board
+ * S4 : all off # to dissever from G3-CORE_DBG board
+ */
+
static struct mtd_partition nor_flash_partitions[] = {
{
.name = "loader",
@@ -169,10 +195,53 @@ static struct platform_device keysc_device = {
},
};
+/* SDHI */
+static struct resource sdhi0_resources[] = {
+ [0] = {
+ .name = "SDHI0",
+ .start = 0xe6d50000,
+ .end = 0xe6d501ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi0_device = {
+ .name = "sh_mobile_sdhi",
+ .num_resources = ARRAY_SIZE(sdhi0_resources),
+ .resource = sdhi0_resources,
+ .id = 0,
+};
+
+static struct resource sdhi1_resources[] = {
+ [0] = {
+ .name = "SDHI1",
+ .start = 0xe6d60000,
+ .end = 0xe6d601ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 100,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi1_device = {
+ .name = "sh_mobile_sdhi",
+ .num_resources = ARRAY_SIZE(sdhi1_resources),
+ .resource = sdhi1_resources,
+ .id = 1,
+};
+
static struct platform_device *g4evm_devices[] __initdata = {
&nor_flash_device,
&usb_host_device,
&keysc_device,
+ &sdhi0_device,
+ &sdhi1_device,
};
static struct map_desc g4evm_io_desc[] __initdata = {
@@ -197,6 +266,36 @@ static void __init g4evm_map_io(void)
shmobile_setup_console();
}
+#define GPIO_SDHID0_D0 0xe60520fc
+#define GPIO_SDHID0_D1 0xe60520fd
+#define GPIO_SDHID0_D2 0xe60520fe
+#define GPIO_SDHID0_D3 0xe60520ff
+#define GPIO_SDHICMD0 0xe6052100
+
+#define GPIO_SDHID1_D0 0xe6052103
+#define GPIO_SDHID1_D1 0xe6052104
+#define GPIO_SDHID1_D2 0xe6052105
+#define GPIO_SDHID1_D3 0xe6052106
+#define GPIO_SDHICMD1 0xe6052107
+
+/*
+ * FIXME !!
+ *
+ * gpio_pull_up is quick_hack.
+ *
+ * current gpio frame work doesn't have
+ * the method to control only pull up/down/free.
+ * this function should be replaced by correct gpio function
+ */
+static void __init gpio_pull_up(u32 addr)
+{
+ u8 data = __raw_readb(addr);
+
+ data &= 0x0F;
+ data |= 0xC0;
+ __raw_writeb(data, addr);
+}
+
static void __init g4evm_init(void)
{
sh7377_pinmux_init();
@@ -253,6 +352,34 @@ static void __init g4evm_init(void)
gpio_request(GPIO_FN_PORT71_KEYIN5_PU, NULL);
gpio_request(GPIO_FN_PORT72_KEYIN6_PU, NULL);
+ /* SDHI0 */
+ gpio_request(GPIO_FN_SDHICLK0, NULL);
+ gpio_request(GPIO_FN_SDHICD0, NULL);
+ gpio_request(GPIO_FN_SDHID0_0, NULL);
+ gpio_request(GPIO_FN_SDHID0_1, NULL);
+ gpio_request(GPIO_FN_SDHID0_2, NULL);
+ gpio_request(GPIO_FN_SDHID0_3, NULL);
+ gpio_request(GPIO_FN_SDHICMD0, NULL);
+ gpio_request(GPIO_FN_SDHIWP0, NULL);
+ gpio_pull_up(GPIO_SDHID0_D0);
+ gpio_pull_up(GPIO_SDHID0_D1);
+ gpio_pull_up(GPIO_SDHID0_D2);
+ gpio_pull_up(GPIO_SDHID0_D3);
+ gpio_pull_up(GPIO_SDHICMD0);
+
+ /* SDHI1 */
+ gpio_request(GPIO_FN_SDHICLK1, NULL);
+ gpio_request(GPIO_FN_SDHID1_0, NULL);
+ gpio_request(GPIO_FN_SDHID1_1, NULL);
+ gpio_request(GPIO_FN_SDHID1_2, NULL);
+ gpio_request(GPIO_FN_SDHID1_3, NULL);
+ gpio_request(GPIO_FN_SDHICMD1, NULL);
+ gpio_pull_up(GPIO_SDHID1_D0);
+ gpio_pull_up(GPIO_SDHID1_D1);
+ gpio_pull_up(GPIO_SDHID1_D2);
+ gpio_pull_up(GPIO_SDHID1_D3);
+ gpio_pull_up(GPIO_SDHICMD1);
+
sh7377_add_standard_devices();
platform_add_devices(g4evm_devices, ARRAY_SIZE(g4evm_devices));
diff --git a/arch/arm/mach-shmobile/include/mach/irqs.h b/arch/arm/mach-shmobile/include/mach/irqs.h
index 5179b72e1ee3..51b4a6ab6c60 100644
--- a/arch/arm/mach-shmobile/include/mach/irqs.h
+++ b/arch/arm/mach-shmobile/include/mach/irqs.h
@@ -4,7 +4,13 @@
#define NR_IRQS 512
#define NR_IRQS_LEGACY 8
+/* INTCA */
#define evt2irq(evt) (((evt) >> 5) - 16)
#define irq2evt(irq) (((irq) + 16) << 5)
+/* INTCS */
+#define INTCS_VECT_BASE 0x3400
+#define INTCS_VECT(n, vect) INTC_VECT((n), INTCS_VECT_BASE + (vect))
+#define intcs_evt2irq(evt) evt2irq(INTCS_VECT_BASE + (evt))
+
#endif /* __ASM_MACH_IRQS_H */
diff --git a/arch/arm/mach-shmobile/include/mach/memory.h b/arch/arm/mach-shmobile/include/mach/memory.h
index e188183f4dce..377584e57e03 100644
--- a/arch/arm/mach-shmobile/include/mach/memory.h
+++ b/arch/arm/mach-shmobile/include/mach/memory.h
@@ -4,4 +4,7 @@
#define PHYS_OFFSET UL(CONFIG_MEMORY_START)
#define MEM_SIZE UL(CONFIG_MEMORY_SIZE)
+/* DMA memory at 0xf6000000 - 0xffdfffff */
+#define CONSISTENT_DMA_SIZE (158 << 20)
+
#endif /* __ASM_MACH_MEMORY_H */
diff --git a/arch/arm/mach-shmobile/include/mach/vmalloc.h b/arch/arm/mach-shmobile/include/mach/vmalloc.h
index fb3c4f1ab252..4aecf6e3a859 100644
--- a/arch/arm/mach-shmobile/include/mach/vmalloc.h
+++ b/arch/arm/mach-shmobile/include/mach/vmalloc.h
@@ -1,6 +1,7 @@
#ifndef __ASM_MACH_VMALLOC_H
#define __ASM_MACH_VMALLOC_H
-#define VMALLOC_END (PAGE_OFFSET + 0x24000000)
+/* Vmalloc at ... - 0xe5ffffff */
+#define VMALLOC_END 0xe6000000
#endif /* __ASM_MACH_VMALLOC_H */
diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c
index 5ff70cadfc32..1a20c489b20d 100644
--- a/arch/arm/mach-shmobile/intc-sh7367.c
+++ b/arch/arm/mach-shmobile/intc-sh7367.c
@@ -75,7 +75,7 @@ enum {
ETM11, ARM11, USBHS, FLCTL, IIC1
};
-static struct intc_vect intca_vectors[] = {
+static struct intc_vect intca_vectors[] __initdata = {
INTC_VECT(IRQ0A, 0x0200), INTC_VECT(IRQ1A, 0x0220),
INTC_VECT(IRQ2A, 0x0240), INTC_VECT(IRQ3A, 0x0260),
INTC_VECT(IRQ4A, 0x0280), INTC_VECT(IRQ5A, 0x02a0),
@@ -162,7 +162,7 @@ static struct intc_group intca_groups[] __initdata = {
INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
};
-static struct intc_mask_reg intca_mask_registers[] = {
+static struct intc_mask_reg intca_mask_registers[] __initdata = {
{ 0xe6900040, 0xe6900060, 8, /* INTMSK00A / INTMSKCLR00A */
{ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } },
{ 0xe6900044, 0xe6900064, 8, /* INTMSK10A / INTMSKCLR10A */
@@ -211,7 +211,7 @@ static struct intc_mask_reg intca_mask_registers[] = {
MISTY, CMT3, RWDT1, RWDT0 } },
};
-static struct intc_prio_reg intca_prio_registers[] = {
+static struct intc_prio_reg intca_prio_registers[] __initdata = {
{ 0xe6900010, 0, 32, 4, /* INTPRI00A */
{ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } },
{ 0xe6900014, 0, 32, 4, /* INTPRI10A */
@@ -263,8 +263,178 @@ static struct intc_desc intca_desc __initdata = {
intca_sense_registers, intca_ack_registers),
};
+enum {
+ UNUSED_INTCS = 0,
+
+ INTCS,
+
+ /* interrupt sources INTCS */
+ VIO2_VEU0, VIO2_VEU1, VIO2_VEU2, VIO2_VEU3,
+ VIO3_VOU,
+ RTDMAC_1_DEI0, RTDMAC_1_DEI1, RTDMAC_1_DEI2, RTDMAC_1_DEI3,
+ VIO1_CEU, VIO1_BEU0, VIO1_BEU1, VIO1_BEU2,
+ VPU,
+ SGX530,
+ _2DDMAC_2DDM0, _2DDMAC_2DDM1, _2DDMAC_2DDM2, _2DDMAC_2DDM3,
+ IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
+ IPMMU_IPMMUB, IPMMU_IPMMUS,
+ RTDMAC_2_DEI4, RTDMAC_2_DEI5, RTDMAC_2_DADERR,
+ MSIOF,
+ IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0,
+ TMU_TUNI0, TMU_TUNI1, TMU_TUNI2,
+ CMT,
+ TSIF,
+ IPMMUI,
+ MVI3,
+ ICB,
+ PEP,
+ ASA,
+ BEM,
+ VE2HO,
+ HQE,
+ JPEG,
+ LCDC,
+
+ /* interrupt groups INTCS */
+ _2DDMAC, RTDMAC_1, RTDMAC_2, VEU, BEU, IIC0, IPMMU, IIC2,
+};
+
+static struct intc_vect intcs_vectors[] = {
+ INTCS_VECT(VIO2_VEU0, 0x700), INTCS_VECT(VIO2_VEU1, 0x720),
+ INTCS_VECT(VIO2_VEU2, 0x740), INTCS_VECT(VIO2_VEU3, 0x760),
+ INTCS_VECT(VIO3_VOU, 0x780),
+ INTCS_VECT(RTDMAC_1_DEI0, 0x800), INTCS_VECT(RTDMAC_1_DEI1, 0x820),
+ INTCS_VECT(RTDMAC_1_DEI2, 0x840), INTCS_VECT(RTDMAC_1_DEI3, 0x860),
+ INTCS_VECT(VIO1_CEU, 0x880), INTCS_VECT(VIO1_BEU0, 0x8a0),
+ INTCS_VECT(VIO1_BEU1, 0x8c0), INTCS_VECT(VIO1_BEU2, 0x8e0),
+ INTCS_VECT(VPU, 0x980),
+ INTCS_VECT(SGX530, 0x9e0),
+ INTCS_VECT(_2DDMAC_2DDM0, 0xa00), INTCS_VECT(_2DDMAC_2DDM1, 0xa20),
+ INTCS_VECT(_2DDMAC_2DDM2, 0xa40), INTCS_VECT(_2DDMAC_2DDM3, 0xa60),
+ INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0),
+ INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0),
+ INTCS_VECT(IPMMU_IPMMUB, 0xb20), INTCS_VECT(IPMMU_IPMMUS, 0xb60),
+ INTCS_VECT(RTDMAC_2_DEI4, 0xb80), INTCS_VECT(RTDMAC_2_DEI5, 0xba0),
+ INTCS_VECT(RTDMAC_2_DADERR, 0xbc0),
+ INTCS_VECT(MSIOF, 0xd20),
+ INTCS_VECT(IIC0_ALI0, 0xe00), INTCS_VECT(IIC0_TACKI0, 0xe20),
+ INTCS_VECT(IIC0_WAITI0, 0xe40), INTCS_VECT(IIC0_DTEI0, 0xe60),
+ INTCS_VECT(TMU_TUNI0, 0xe80), INTCS_VECT(TMU_TUNI1, 0xea0),
+ INTCS_VECT(TMU_TUNI2, 0xec0),
+ INTCS_VECT(CMT, 0xf00),
+ INTCS_VECT(TSIF, 0xf20),
+ INTCS_VECT(IPMMUI, 0xf60),
+ INTCS_VECT(MVI3, 0x420),
+ INTCS_VECT(ICB, 0x480),
+ INTCS_VECT(PEP, 0x4a0),
+ INTCS_VECT(ASA, 0x4c0),
+ INTCS_VECT(BEM, 0x4e0),
+ INTCS_VECT(VE2HO, 0x520),
+ INTCS_VECT(HQE, 0x540),
+ INTCS_VECT(JPEG, 0x560),
+ INTCS_VECT(LCDC, 0x580),
+
+ INTC_VECT(INTCS, 0xf80),
+};
+
+static struct intc_group intcs_groups[] __initdata = {
+ INTC_GROUP(_2DDMAC, _2DDMAC_2DDM0, _2DDMAC_2DDM1,
+ _2DDMAC_2DDM2, _2DDMAC_2DDM3),
+ INTC_GROUP(RTDMAC_1, RTDMAC_1_DEI0, RTDMAC_1_DEI1,
+ RTDMAC_1_DEI2, RTDMAC_1_DEI3),
+ INTC_GROUP(RTDMAC_2, RTDMAC_2_DEI4, RTDMAC_2_DEI5, RTDMAC_2_DADERR),
+ INTC_GROUP(VEU, VIO2_VEU0, VIO2_VEU1, VIO2_VEU2, VIO2_VEU3),
+ INTC_GROUP(BEU, VIO1_BEU0, VIO1_BEU1, VIO1_BEU2),
+ INTC_GROUP(IIC0, IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0),
+ INTC_GROUP(IPMMU, IPMMU_IPMMUS, IPMMU_IPMMUB),
+ INTC_GROUP(IIC2, IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2),
+};
+
+static struct intc_mask_reg intcs_mask_registers[] = {
+ { 0xffd20184, 0xffd201c4, 8, /* IMR1SA / IMCR1SA */
+ { VIO1_BEU2, VIO1_BEU1, VIO1_BEU0, VIO1_CEU,
+ VIO2_VEU3, VIO2_VEU2, VIO2_VEU1, VIO2_VEU0 } },
+ { 0xffd20188, 0xffd201c8, 8, /* IMR2SA / IMCR2SA */
+ { VIO3_VOU, 0, VE2HO, VPU,
+ 0, 0, 0, 0 } },
+ { 0xffd2018c, 0xffd201cc, 8, /* IMR3SA / IMCR3SA */
+ { _2DDMAC_2DDM3, _2DDMAC_2DDM2, _2DDMAC_2DDM1, _2DDMAC_2DDM0,
+ BEM, ASA, PEP, ICB } },
+ { 0xffd20190, 0xffd201d0, 8, /* IMR4SA / IMCR4SA */
+ { 0, 0, MVI3, 0,
+ JPEG, HQE, 0, LCDC } },
+ { 0xffd20194, 0xffd201d4, 8, /* IMR5SA / IMCR5SA */
+ { 0, RTDMAC_2_DADERR, RTDMAC_2_DEI5, RTDMAC_2_DEI4,
+ RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } },
+ { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
+ { 0, 0, MSIOF, 0,
+ SGX530, 0, 0, 0 } },
+ { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
+ { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0,
+ 0, 0, 0, 0 } },
+ { 0xffd201a4, 0xffd201e4, 8, /* IMR9SA / IMCR9SA */
+ { 0, 0, 0, CMT,
+ IIC2_DTEI2, IIC2_WAITI2, IIC2_TACKI2, IIC2_ALI2 } },
+ { 0xffd201a8, 0xffd201e8, 8, /* IMR10SA / IMCR10SA */
+ { IPMMU_IPMMUS, 0, IPMMU_IPMMUB, 0,
+ 0, 0, 0, 0 } },
+ { 0xffd201ac, 0xffd201ec, 8, /* IMR11SA / IMCR11SA */
+ { IIC0_DTEI0, IIC0_WAITI0, IIC0_TACKI0, IIC0_ALI0,
+ 0, 0, IPMMUI, TSIF } },
+ { 0xffd20104, 0, 16, /* INTAMASK */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, INTCS } },
+};
+
+/* Priority is needed for INTCA to receive the INTCS interrupt */
+static struct intc_prio_reg intcs_prio_registers[] = {
+ { 0xffd20000, 0, 16, 4, /* IPRAS */ { 0, MVI3, _2DDMAC, ICB } },
+ { 0xffd20004, 0, 16, 4, /* IPRBS */ { JPEG, LCDC, 0, 0 } },
+ { 0xffd20008, 0, 16, 4, /* IPRCS */ { BBIF2, 0, 0, 0 } },
+ { 0xffd20010, 0, 16, 4, /* IPRES */ { RTDMAC_1, VIO1_CEU, 0, VPU } },
+ { 0xffd20014, 0, 16, 4, /* IPRFS */ { 0, RTDMAC_2, 0, CMT } },
+ { 0xffd20018, 0, 16, 4, /* IPRGS */ { TMU_TUNI0, TMU_TUNI1,
+ TMU_TUNI2, 0 } },
+ { 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, VIO3_VOU, VEU, BEU } },
+ { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF, IIC0 } },
+ { 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, SGX530, 0, 0 } },
+ { 0xffd20028, 0, 16, 4, /* IPRKS */ { BEM, ASA, IPMMUI, PEP } },
+ { 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, VE2HO, HQE } },
+ { 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } },
+};
+
+static struct resource intcs_resources[] __initdata = {
+ [0] = {
+ .start = 0xffd20000,
+ .end = 0xffd2ffff,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct intc_desc intcs_desc __initdata = {
+ .name = "sh7367-intcs",
+ .resource = intcs_resources,
+ .num_resources = ARRAY_SIZE(intcs_resources),
+ .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
+ intcs_prio_registers, NULL, NULL),
+};
+
+static void intcs_demux(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *reg = (void *)get_irq_data(irq);
+ unsigned int evtcodeas = ioread32(reg);
+
+ generic_handle_irq(intcs_evt2irq(evtcodeas));
+}
+
void __init sh7367_init_irq(void)
{
- /* INTCA */
+ void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
+
register_intc_controller(&intca_desc);
+ register_intc_controller(&intcs_desc);
+
+ /* demux using INTEVTSA */
+ set_irq_data(evt2irq(0xf80), (void *)intevtsa);
+ set_irq_chained_handler(evt2irq(0xf80), intcs_demux);
}
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 3ce9d9bd5899..cfba59f057ec 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -319,17 +319,17 @@ static struct intc_prio_reg intca_prio_registers[] __initdata = {
{ 0xe6950034, 0, 16, 4, /* IPRNA3 */ { AP_ARM2, 0, 0, 0 } },
{ 0xe6950038, 0, 16, 4, /* IPROA3 */ { MFIS2, CPORTR2S,
CMT14, CMT15 } },
- { 0xe694003c, 0, 16, 4, /* IPRPA3 */ { 0, 0,
+ { 0xe695003c, 0, 16, 4, /* IPRPA3 */ { 0, 0,
MMC_MMC_ERR, MMC_MMC_NOR } },
- { 0xe6940040, 0, 16, 4, /* IPRQA3 */ { IIC4_ALI4, IIC4_TACKI4,
+ { 0xe6950040, 0, 16, 4, /* IPRQA3 */ { IIC4_ALI4, IIC4_TACKI4,
IIC4_WAITI4, IIC4_DTEI4 } },
- { 0xe6940044, 0, 16, 4, /* IPRRA3 */ { IIC3_ALI3, IIC3_TACKI3,
+ { 0xe6950044, 0, 16, 4, /* IPRRA3 */ { IIC3_ALI3, IIC3_TACKI3,
IIC3_WAITI3, IIC3_DTEI3 } },
- { 0xe6940048, 0, 16, 4, /* IPRSA3 */ { 0/*ERI*/, 0/*RXI*/,
+ { 0xe6950048, 0, 16, 4, /* IPRSA3 */ { 0/*ERI*/, 0/*RXI*/,
0/*TXI*/, 0/*TEI*/} },
- { 0xe694004c, 0, 16, 4, /* IPRTA3 */ { USB0_USB0I1, USB0_USB0I0,
+ { 0xe695004c, 0, 16, 4, /* IPRTA3 */ { USB0_USB0I1, USB0_USB0I0,
USB1_USB1I1, USB1_USB1I0 } },
- { 0xe6940050, 0, 16, 4, /* IPRUA3 */ { USBHSDMAC1_USHDMI, 0, 0, 0 } },
+ { 0xe6950050, 0, 16, 4, /* IPRUA3 */ { USBHSDMAC1_USHDMI, 0, 0, 0 } },
};
static struct intc_sense_reg intca_sense_registers[] __initdata = {
diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c
index 5c781e2d1897..2cdeb8ccd821 100644
--- a/arch/arm/mach-shmobile/intc-sh7377.c
+++ b/arch/arm/mach-shmobile/intc-sh7377.c
@@ -90,7 +90,7 @@ enum {
ICUSB, ICUDMC
};
-static struct intc_vect intca_vectors[] = {
+static struct intc_vect intca_vectors[] __initdata = {
INTC_VECT(IRQ0A, 0x0200), INTC_VECT(IRQ1A, 0x0220),
INTC_VECT(IRQ2A, 0x0240), INTC_VECT(IRQ3A, 0x0260),
INTC_VECT(IRQ4A, 0x0280), INTC_VECT(IRQ5A, 0x02a0),
@@ -202,7 +202,7 @@ static struct intc_group intca_groups[] __initdata = {
INTC_GROUP(ICUDMC, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2),
};
-static struct intc_mask_reg intca_mask_registers[] = {
+static struct intc_mask_reg intca_mask_registers[] __initdata = {
{ 0xe6900040, 0xe6900060, 8, /* INTMSK00A / INTMSKCLR00A */
{ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } },
{ 0xe6900044, 0xe6900064, 8, /* INTMSK10A / INTMSKCLR10A */
@@ -272,7 +272,7 @@ static struct intc_mask_reg intca_mask_registers[] = {
SCIFA6, 0, 0, 0 } },
};
-static struct intc_prio_reg intca_prio_registers[] = {
+static struct intc_prio_reg intca_prio_registers[] __initdata = {
{ 0xe6900010, 0, 32, 4, /* INTPRI00A */
{ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } },
{ 0xe6900014, 0, 32, 4, /* INTPRI10A */
@@ -346,7 +346,301 @@ static struct intc_desc intca_desc __initdata = {
intca_sense_registers, intca_ack_registers),
};
+/* this macro ignore entry which is also in INTCA */
+#define __IGNORE(a...)
+#define __IGNORE0(a...) 0
+
+enum {
+ UNUSED_INTCS = 0,
+
+ INTCS,
+
+ /* interrupt sources INTCS */
+ VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3,
+ RTDMAC1_1_DEI0, RTDMAC1_1_DEI1, RTDMAC1_1_DEI2, RTDMAC1_1_DEI3,
+ CEU,
+ BEU_BEU0, BEU_BEU1, BEU_BEU2,
+ __IGNORE(MFI)
+ __IGNORE(BBIF2)
+ VPU,
+ TSIF1,
+ __IGNORE(SGX540)
+ _2DDMAC,
+ IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
+ IPMMU_IPMMUR, IPMMU_IPMMUR2,
+ RTDMAC1_2_DEI4, RTDMAC1_2_DEI5, RTDMAC1_2_DADERR,
+ __IGNORE(KEYSC)
+ __IGNORE(TTI20)
+ __IGNORE(MSIOF)
+ IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0,
+ TMU_TUNI0, TMU_TUNI1, TMU_TUNI2,
+ CMT0,
+ TSIF0,
+ __IGNORE(CMT2)
+ LMB,
+ __IGNORE(MSUG)
+ __IGNORE(MSU_MSU, MSU_MSU2)
+ __IGNORE(CTI)
+ MVI3,
+ __IGNORE(RWDT0)
+ __IGNORE(RWDT1)
+ ICB,
+ PEP,
+ ASA,
+ __IGNORE(_2DG)
+ HQE,
+ JPU,
+ LCDC0,
+ __IGNORE(LCRC)
+ RTDMAC2_1_DEI0, RTDMAC2_1_DEI1, RTDMAC2_1_DEI2, RTDMAC2_1_DEI3,
+ RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR,
+ FRC,
+ LCDC1,
+ CSIRX,
+ DSITX_DSITX0, DSITX_DSITX1,
+ __IGNORE(SPU2_SPU0, SPU2_SPU1)
+ __IGNORE(FSI)
+ __IGNORE(FMSI)
+ __IGNORE(SCUV)
+ TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12,
+ TSIF2,
+ CMT4,
+ __IGNORE(MFIS2)
+ CPORTS2R,
+
+ /* interrupt groups INTCS */
+ RTDMAC1_1, RTDMAC1_2, VEU, BEU, IIC0, __IGNORE(MSU) IPMMU,
+ IIC2, RTDMAC2_1, RTDMAC2_2, DSITX, __IGNORE(SPU2) TMU1,
+};
+
+#define INTCS_INTVECT 0x0F80
+static struct intc_vect intcs_vectors[] __initdata = {
+ INTCS_VECT(VEU_VEU0, 0x0700), INTCS_VECT(VEU_VEU1, 0x0720),
+ INTCS_VECT(VEU_VEU2, 0x0740), INTCS_VECT(VEU_VEU3, 0x0760),
+ INTCS_VECT(RTDMAC1_1_DEI0, 0x0800), INTCS_VECT(RTDMAC1_1_DEI1, 0x0820),
+ INTCS_VECT(RTDMAC1_1_DEI2, 0x0840), INTCS_VECT(RTDMAC1_1_DEI3, 0x0860),
+ INTCS_VECT(CEU, 0x0880),
+ INTCS_VECT(BEU_BEU0, 0x08A0),
+ INTCS_VECT(BEU_BEU1, 0x08C0),
+ INTCS_VECT(BEU_BEU2, 0x08E0),
+ __IGNORE(INTCS_VECT(MFI, 0x0900))
+ __IGNORE(INTCS_VECT(BBIF2, 0x0960))
+ INTCS_VECT(VPU, 0x0980),
+ INTCS_VECT(TSIF1, 0x09A0),
+ __IGNORE(INTCS_VECT(SGX540, 0x09E0))
+ INTCS_VECT(_2DDMAC, 0x0A00),
+ INTCS_VECT(IIC2_ALI2, 0x0A80), INTCS_VECT(IIC2_TACKI2, 0x0AA0),
+ INTCS_VECT(IIC2_WAITI2, 0x0AC0), INTCS_VECT(IIC2_DTEI2, 0x0AE0),
+ INTCS_VECT(IPMMU_IPMMUR, 0x0B00), INTCS_VECT(IPMMU_IPMMUR2, 0x0B20),
+ INTCS_VECT(RTDMAC1_2_DEI4, 0x0B80),
+ INTCS_VECT(RTDMAC1_2_DEI5, 0x0BA0),
+ INTCS_VECT(RTDMAC1_2_DADERR, 0x0BC0),
+ __IGNORE(INTCS_VECT(KEYSC 0x0BE0))
+ __IGNORE(INTCS_VECT(TTI20, 0x0C80))
+ __IGNORE(INTCS_VECT(MSIOF, 0x0D20))
+ INTCS_VECT(IIC0_ALI0, 0x0E00), INTCS_VECT(IIC0_TACKI0, 0x0E20),
+ INTCS_VECT(IIC0_WAITI0, 0x0E40), INTCS_VECT(IIC0_DTEI0, 0x0E60),
+ INTCS_VECT(TMU_TUNI0, 0x0E80),
+ INTCS_VECT(TMU_TUNI1, 0x0EA0),
+ INTCS_VECT(TMU_TUNI2, 0x0EC0),
+ INTCS_VECT(CMT0, 0x0F00),
+ INTCS_VECT(TSIF0, 0x0F20),
+ __IGNORE(INTCS_VECT(CMT2, 0x0F40))
+ INTCS_VECT(LMB, 0x0F60),
+ __IGNORE(INTCS_VECT(MSUG, 0x0F80))
+ __IGNORE(INTCS_VECT(MSU_MSU, 0x0FA0))
+ __IGNORE(INTCS_VECT(MSU_MSU2, 0x0FC0))
+ __IGNORE(INTCS_VECT(CTI, 0x0400))
+ INTCS_VECT(MVI3, 0x0420),
+ __IGNORE(INTCS_VECT(RWDT0, 0x0440))
+ __IGNORE(INTCS_VECT(RWDT1, 0x0460))
+ INTCS_VECT(ICB, 0x0480),
+ INTCS_VECT(PEP, 0x04A0),
+ INTCS_VECT(ASA, 0x04C0),
+ __IGNORE(INTCS_VECT(_2DG, 0x04E0))
+ INTCS_VECT(HQE, 0x0540),
+ INTCS_VECT(JPU, 0x0560),
+ INTCS_VECT(LCDC0, 0x0580),
+ __IGNORE(INTCS_VECT(LCRC, 0x05A0))
+ INTCS_VECT(RTDMAC2_1_DEI0, 0x1300), INTCS_VECT(RTDMAC2_1_DEI1, 0x1320),
+ INTCS_VECT(RTDMAC2_1_DEI2, 0x1340), INTCS_VECT(RTDMAC2_1_DEI3, 0x1360),
+ INTCS_VECT(RTDMAC2_2_DEI4, 0x1380), INTCS_VECT(RTDMAC2_2_DEI5, 0x13A0),
+ INTCS_VECT(RTDMAC2_2_DADERR, 0x13C0),
+ INTCS_VECT(FRC, 0x1700),
+ INTCS_VECT(LCDC1, 0x1780),
+ INTCS_VECT(CSIRX, 0x17A0),
+ INTCS_VECT(DSITX_DSITX0, 0x17C0), INTCS_VECT(DSITX_DSITX1, 0x17E0),
+ __IGNORE(INTCS_VECT(SPU2_SPU0, 0x1800))
+ __IGNORE(INTCS_VECT(SPU2_SPU1, 0x1820))
+ __IGNORE(INTCS_VECT(FSI, 0x1840))
+ __IGNORE(INTCS_VECT(FMSI, 0x1860))
+ __IGNORE(INTCS_VECT(SCUV, 0x1880))
+ INTCS_VECT(TMU1_TUNI10, 0x1900), INTCS_VECT(TMU1_TUNI11, 0x1920),
+ INTCS_VECT(TMU1_TUNI12, 0x1940),
+ INTCS_VECT(TSIF2, 0x1960),
+ INTCS_VECT(CMT4, 0x1980),
+ __IGNORE(INTCS_VECT(MFIS2, 0x1A00))
+ INTCS_VECT(CPORTS2R, 0x1A20),
+
+ INTC_VECT(INTCS, INTCS_INTVECT),
+};
+
+static struct intc_group intcs_groups[] __initdata = {
+ INTC_GROUP(RTDMAC1_1,
+ RTDMAC1_1_DEI0, RTDMAC1_1_DEI1,
+ RTDMAC1_1_DEI2, RTDMAC1_1_DEI3),
+ INTC_GROUP(RTDMAC1_2,
+ RTDMAC1_2_DEI4, RTDMAC1_2_DEI5, RTDMAC1_2_DADERR),
+ INTC_GROUP(VEU, VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3),
+ INTC_GROUP(BEU, BEU_BEU0, BEU_BEU1, BEU_BEU2),
+ INTC_GROUP(IIC0, IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0),
+ __IGNORE(INTC_GROUP(MSU, MSU_MSU, MSU_MSU2))
+ INTC_GROUP(IPMMU, IPMMU_IPMMUR, IPMMU_IPMMUR2),
+ INTC_GROUP(IIC2, IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2),
+ INTC_GROUP(RTDMAC2_1,
+ RTDMAC2_1_DEI0, RTDMAC2_1_DEI1,
+ RTDMAC2_1_DEI2, RTDMAC2_1_DEI3),
+ INTC_GROUP(RTDMAC2_2, RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR),
+ INTC_GROUP(DSITX, DSITX_DSITX0, DSITX_DSITX1),
+ __IGNORE(INTC_GROUP(SPU2, SPU2_SPU0, SPU2_SPU1))
+ INTC_GROUP(TMU1, TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12),
+};
+
+static struct intc_mask_reg intcs_mask_registers[] __initdata = {
+ { 0xE6940184, 0xE69401C4, 8, /* IMR1AS / IMCR1AS */
+ { BEU_BEU2, BEU_BEU1, BEU_BEU0, CEU,
+ VEU_VEU3, VEU_VEU2, VEU_VEU1, VEU_VEU0 } },
+ { 0xE6940188, 0xE69401C8, 8, /* IMR2AS / IMCR2AS */
+ { 0, 0, 0, VPU,
+ __IGNORE0(BBIF2), 0, 0, __IGNORE0(MFI) } },
+ { 0xE694018C, 0xE69401CC, 8, /* IMR3AS / IMCR3AS */
+ { 0, 0, 0, _2DDMAC,
+ __IGNORE0(_2DG), ASA, PEP, ICB } },
+ { 0xE6940190, 0xE69401D0, 8, /* IMR4AS / IMCR4AS */
+ { 0, 0, MVI3, __IGNORE0(CTI),
+ JPU, HQE, __IGNORE0(LCRC), LCDC0 } },
+ { 0xE6940194, 0xE69401D4, 8, /* IMR5AS / IMCR5AS */
+ { __IGNORE0(KEYSC), RTDMAC1_2_DADERR, RTDMAC1_2_DEI5, RTDMAC1_2_DEI4,
+ RTDMAC1_1_DEI3, RTDMAC1_1_DEI2, RTDMAC1_1_DEI1, RTDMAC1_1_DEI0 } },
+ __IGNORE({ 0xE6940198, 0xE69401D8, 8, /* IMR6AS / IMCR6AS */
+ { 0, 0, MSIOF, 0,
+ SGX540, 0, TTI20, 0 } })
+ { 0xE694019C, 0xE69401DC, 8, /* IMR7AS / IMCR7AS */
+ { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0,
+ 0, 0, 0, 0 } },
+ __IGNORE({ 0xE69401A0, 0xE69401E0, 8, /* IMR8AS / IMCR8AS */
+ { 0, 0, 0, 0,
+ 0, MSU_MSU, MSU_MSU2, MSUG } })
+ { 0xE69401A4, 0xE69401E4, 8, /* IMR9AS / IMCR9AS */
+ { __IGNORE0(RWDT1), __IGNORE0(RWDT0), __IGNORE0(CMT2), CMT0,
+ IIC2_DTEI2, IIC2_WAITI2, IIC2_TACKI2, IIC2_ALI2 } },
+ { 0xE69401A8, 0xE69401E8, 8, /* IMR10AS / IMCR10AS */
+ { 0, 0, IPMMU_IPMMUR, IPMMU_IPMMUR2,
+ 0, 0, 0, 0 } },
+ { 0xE69401AC, 0xE69401EC, 8, /* IMR11AS / IMCR11AS */
+ { IIC0_DTEI0, IIC0_WAITI0, IIC0_TACKI0, IIC0_ALI0,
+ 0, TSIF1, LMB, TSIF0 } },
+ { 0xE6950180, 0xE69501C0, 8, /* IMR0AS3 / IMCR0AS3 */
+ { RTDMAC2_1_DEI0, RTDMAC2_1_DEI1, RTDMAC2_1_DEI2, RTDMAC2_1_DEI3,
+ RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR, 0 } },
+ { 0xE6950190, 0xE69501D0, 8, /* IMR4AS3 / IMCR4AS3 */
+ { FRC, 0, 0, 0,
+ LCDC1, CSIRX, DSITX_DSITX0, DSITX_DSITX1 } },
+ __IGNORE({ 0xE6950194, 0xE69501D4, 8, /* IMR5AS3 / IMCR5AS3 */
+ {SPU2_SPU0, SPU2_SPU1, FSI, FMSI,
+ SCUV, 0, 0, 0 } })
+ { 0xE6950198, 0xE69501D8, 8, /* IMR6AS3 / IMCR6AS3 */
+ { TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12, TSIF2,
+ CMT4, 0, 0, 0 } },
+ { 0xE695019C, 0xE69501DC, 8, /* IMR7AS3 / IMCR7AS3 */
+ { __IGNORE0(MFIS2), CPORTS2R, 0, 0,
+ 0, 0, 0, 0 } },
+ { 0xFFD20104, 0, 16, /* INTAMASK */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, INTCS } }
+};
+
+static struct intc_prio_reg intcs_prio_registers[] __initdata = {
+ /* IPRAS */
+ { 0xFFD20000, 0, 16, 4, { __IGNORE0(CTI), MVI3, _2DDMAC, ICB } },
+ /* IPRBS */
+ { 0xFFD20004, 0, 16, 4, { JPU, LCDC0, 0, __IGNORE0(LCRC) } },
+ /* IPRCS */
+ __IGNORE({ 0xFFD20008, 0, 16, 4, { BBIF2, 0, 0, 0 } })
+ /* IPRES */
+ { 0xFFD20010, 0, 16, 4, { RTDMAC1_1, CEU, __IGNORE0(MFI), VPU } },
+ /* IPRFS */
+ { 0xFFD20014, 0, 16, 4,
+ { __IGNORE0(KEYSC), RTDMAC1_2, __IGNORE0(CMT2), CMT0 } },
+ /* IPRGS */
+ { 0xFFD20018, 0, 16, 4, { TMU_TUNI0, TMU_TUNI1, TMU_TUNI2, TSIF1 } },
+ /* IPRHS */
+ { 0xFFD2001C, 0, 16, 4, { __IGNORE0(TTI20), 0, VEU, BEU } },
+ /* IPRIS */
+ { 0xFFD20020, 0, 16, 4, { 0, __IGNORE0(MSIOF), TSIF0, IIC0 } },
+ /* IPRJS */
+ __IGNORE({ 0xFFD20024, 0, 16, 4, { 0, SGX540, MSUG, MSU } })
+ /* IPRKS */
+ { 0xFFD20028, 0, 16, 4, { __IGNORE0(_2DG), ASA, LMB, PEP } },
+ /* IPRLS */
+ { 0xFFD2002C, 0, 16, 4, { IPMMU, 0, 0, HQE } },
+ /* IPRMS */
+ { 0xFFD20030, 0, 16, 4,
+ { IIC2, 0, __IGNORE0(RWDT1), __IGNORE0(RWDT0) } },
+ /* IPRAS3 */
+ { 0xFFD50000, 0, 16, 4, { RTDMAC2_1, 0, 0, 0 } },
+ /* IPRBS3 */
+ { 0xFFD50004, 0, 16, 4, { RTDMAC2_2, 0, 0, 0 } },
+ /* IPRIS3 */
+ { 0xFFD50020, 0, 16, 4, { FRC, 0, 0, 0 } },
+ /* IPRJS3 */
+ { 0xFFD50024, 0, 16, 4, { LCDC1, CSIRX, DSITX, 0 } },
+ /* IPRKS3 */
+ __IGNORE({ 0xFFD50028, 0, 16, 4, { SPU2, 0, FSI, FMSI } })
+ /* IPRLS3 */
+ __IGNORE({ 0xFFD5002C, 0, 16, 4, { SCUV, 0, 0, 0 } })
+ /* IPRMS3 */
+ { 0xFFD50030, 0, 16, 4, { TMU1, 0, 0, TSIF2 } },
+ /* IPRNS3 */
+ { 0xFFD50034, 0, 16, 4, { CMT4, 0, 0, 0 } },
+ /* IPROS3 */
+ { 0xFFD50038, 0, 16, 4, { __IGNORE0(MFIS2), CPORTS2R, 0, 0 } },
+};
+
+static struct resource intcs_resources[] __initdata = {
+ [0] = {
+ .start = 0xffd20000,
+ .end = 0xffd500ff,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct intc_desc intcs_desc __initdata = {
+ .name = "sh7377-intcs",
+ .resource = intcs_resources,
+ .num_resources = ARRAY_SIZE(intcs_resources),
+ .hw = INTC_HW_DESC(intcs_vectors, intcs_groups,
+ intcs_mask_registers, intcs_prio_registers,
+ NULL, NULL),
+};
+
+static void intcs_demux(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *reg = (void *)get_irq_data(irq);
+ unsigned int evtcodeas = ioread32(reg);
+
+ generic_handle_irq(intcs_evt2irq(evtcodeas));
+}
+
+#define INTEVTSA 0xFFD20100
void __init sh7377_init_irq(void)
{
+ void __iomem *intevtsa = ioremap_nocache(INTEVTSA, PAGE_SIZE);
+
register_intc_controller(&intca_desc);
+ register_intc_controller(&intcs_desc);
+
+ /* demux using INTEVTSA */
+ set_irq_data(evt2irq(INTCS_INTVECT), (void *)intevtsa);
+ set_irq_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux);
}
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 1d1153290f59..b7c5d896e01c 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -32,6 +32,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+/* SCIF */
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xe6c40000,
.flags = UPF_BOOT_AUTOCONF,
@@ -137,6 +138,7 @@ static struct platform_device scif6_device = {
},
};
+/* CMT */
static struct sh_timer_config cmt10_platform_data = {
.name = "CMT10",
.channel_offset = 0x10,
@@ -169,6 +171,49 @@ static struct platform_device cmt10_device = {
.num_resources = ARRAY_SIZE(cmt10_resources),
};
+/* I2C */
+static struct resource iic0_resources[] = {
+ [0] = {
+ .name = "IIC0",
+ .start = 0xFFF20000,
+ .end = 0xFFF20425 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0xe00),
+ .end = intcs_evt2irq(0xe60),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic0_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic0_resources),
+ .resource = iic0_resources,
+};
+
+static struct resource iic1_resources[] = {
+ [0] = {
+ .name = "IIC1",
+ .start = 0xE6C20000,
+ .end = 0xE6C20425 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 44,
+ .end = 47,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic1_device = {
+ .name = "i2c-sh_mobile",
+ .id = 1, /* "i2c1" clock */
+ .num_resources = ARRAY_SIZE(iic1_resources),
+ .resource = iic1_resources,
+};
+
static struct platform_device *sh7372_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
@@ -178,6 +223,8 @@ static struct platform_device *sh7372_early_devices[] __initdata = {
&scif5_device,
&scif6_device,
&cmt10_device,
+ &iic0_device,
+ &iic1_device,
};
void __init sh7372_add_standard_devices(void)
diff --git a/arch/arm/mach-spear3xx/Kconfig b/arch/arm/mach-spear3xx/Kconfig
new file mode 100644
index 000000000000..20d1317cc486
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Kconfig
@@ -0,0 +1,33 @@
+#
+# SPEAr3XX Machine configuration file
+#
+
+if ARCH_SPEAR3XX
+
+choice
+ prompt "SPEAr3XX Family"
+ default MACH_SPEAR300
+
+config MACH_SPEAR300
+ bool "SPEAr300"
+ help
+ Supports ST SPEAr300 Machine
+
+config MACH_SPEAR310
+ bool "SPEAr310"
+ help
+ Supports ST SPEAr310 Machine
+
+config MACH_SPEAR320
+ bool "SPEAr320"
+ help
+ Supports ST SPEAr320 Machine
+
+endchoice
+
+# Adding SPEAr3XX machine specific configuration files
+source "arch/arm/mach-spear3xx/Kconfig300"
+source "arch/arm/mach-spear3xx/Kconfig310"
+source "arch/arm/mach-spear3xx/Kconfig320"
+
+endif #ARCH_SPEAR3XX
diff --git a/arch/arm/mach-spear3xx/Kconfig300 b/arch/arm/mach-spear3xx/Kconfig300
new file mode 100644
index 000000000000..c519a05b4ab4
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Kconfig300
@@ -0,0 +1,17 @@
+#
+# SPEAr300 machine configuration file
+#
+
+if MACH_SPEAR300
+
+choice
+ prompt "SPEAr300 Boards"
+ default BOARD_SPEAR300_EVB
+
+config BOARD_SPEAR300_EVB
+ bool "SPEAr300 Evaluation Board"
+ help
+ Supports ST SPEAr300 Evaluation Board
+endchoice
+
+endif #MACH_SPEAR300
diff --git a/arch/arm/mach-spear3xx/Kconfig310 b/arch/arm/mach-spear3xx/Kconfig310
new file mode 100644
index 000000000000..60e7442d75bd
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Kconfig310
@@ -0,0 +1,17 @@
+#
+# SPEAr310 machine configuration file
+#
+
+if MACH_SPEAR310
+
+choice
+ prompt "SPEAr310 Boards"
+ default BOARD_SPEAR310_EVB
+
+config BOARD_SPEAR310_EVB
+ bool "SPEAr310 Evaluation Board"
+ help
+ Supports ST SPEAr310 Evaluation Board
+endchoice
+
+endif #MACH_SPEAR310
diff --git a/arch/arm/mach-spear3xx/Kconfig320 b/arch/arm/mach-spear3xx/Kconfig320
new file mode 100644
index 000000000000..1c1d438399b8
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Kconfig320
@@ -0,0 +1,17 @@
+#
+# SPEAr320 machine configuration file
+#
+
+if MACH_SPEAR320
+
+choice
+ prompt "SPEAr320 Boards"
+ default BOARD_SPEAR320_EVB
+
+config BOARD_SPEAR320_EVB
+ bool "SPEAr320 Evaluation Board"
+ help
+ Supports ST SPEAr320 Evaluation Board
+endchoice
+
+endif #MACH_SPEAR320
diff --git a/arch/arm/mach-spear3xx/Makefile b/arch/arm/mach-spear3xx/Makefile
new file mode 100644
index 000000000000..b24862489704
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for SPEAr3XX machine series
+#
+
+# common files
+obj-y += spear3xx.o clock.o
+
+# spear300 specific files
+obj-$(CONFIG_MACH_SPEAR300) += spear300.o
+
+# spear300 boards files
+obj-$(CONFIG_BOARD_SPEAR300_EVB) += spear300_evb.o
+
+
+# spear310 specific files
+obj-$(CONFIG_MACH_SPEAR310) += spear310.o
+
+# spear310 boards files
+obj-$(CONFIG_BOARD_SPEAR310_EVB) += spear310_evb.o
+
+
+# spear320 specific files
+obj-$(CONFIG_MACH_SPEAR320) += spear320.o
+
+# spear320 boards files
+obj-$(CONFIG_BOARD_SPEAR320_EVB) += spear320_evb.o
diff --git a/arch/arm/mach-spear3xx/Makefile.boot b/arch/arm/mach-spear3xx/Makefile.boot
new file mode 100644
index 000000000000..7a1f3c0eadb8
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Makefile.boot
@@ -0,0 +1,3 @@
+zreladdr-y := 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
new file mode 100644
index 000000000000..39f6ccf22294
--- /dev/null
+++ b/arch/arm/mach-spear3xx/clock.c
@@ -0,0 +1,389 @@
+/*
+ * arch/arm/mach-spear3xx/clock.c
+ *
+ * SPEAr3xx machines clock framework source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <mach/misc_regs.h>
+#include <plat/clock.h>
+
+/* root clks */
+/* 32 KHz oscillator clock */
+static struct clk osc_32k_clk = {
+ .flags = ALWAYS_ENABLED,
+ .rate = 32000,
+};
+
+/* 24 MHz oscillator clock */
+static struct clk osc_24m_clk = {
+ .flags = ALWAYS_ENABLED,
+ .rate = 24000000,
+};
+
+/* clock derived from 32 KHz osc clk */
+/* rtc clock */
+static struct clk rtc_clk = {
+ .pclk = &osc_32k_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = RTC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from 24 MHz osc clk */
+/* pll1 configuration structure */
+static struct pll_clk_config pll1_config = {
+ .mode_reg = PLL1_CTR,
+ .cfg_reg = PLL1_FRQ,
+};
+
+/* PLL1 clock */
+static struct clk pll1_clk = {
+ .pclk = &osc_24m_clk,
+ .en_reg = PLL1_CTR,
+ .en_reg_bit = PLL_ENABLE,
+ .recalc = &pll1_clk_recalc,
+ .private_data = &pll1_config,
+};
+
+/* PLL3 48 MHz clock */
+static struct clk pll3_48m_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &osc_24m_clk,
+ .rate = 48000000,
+};
+
+/* watch dog timer clock */
+static struct clk wdt_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &osc_24m_clk,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from pll1 clk */
+/* cpu clock */
+static struct clk cpu_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .recalc = &follow_parent,
+};
+
+/* ahb configuration structure */
+static struct bus_clk_config ahb_config = {
+ .reg = CORE_CLK_CFG,
+ .mask = PLL_HCLK_RATIO_MASK,
+ .shift = PLL_HCLK_RATIO_SHIFT,
+};
+
+/* ahb clock */
+static struct clk ahb_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .recalc = &bus_clk_recalc,
+ .private_data = &ahb_config,
+};
+
+/* uart configurations */
+static struct aux_clk_config uart_config = {
+ .synth_reg = UART_CLK_SYNT,
+};
+
+/* uart parents */
+static struct pclk_info uart_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* uart parent select structure */
+static struct pclk_sel uart_pclk_sel = {
+ .pclk_info = uart_pclk_info,
+ .pclk_count = ARRAY_SIZE(uart_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = UART_CLK_MASK,
+};
+
+/* uart clock */
+static struct clk uart_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = UART_CLK_ENB,
+ .pclk_sel = &uart_pclk_sel,
+ .pclk_sel_shift = UART_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &uart_config,
+};
+
+/* firda configurations */
+static struct aux_clk_config firda_config = {
+ .synth_reg = FIRDA_CLK_SYNT,
+};
+
+/* firda parents */
+static struct pclk_info firda_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* firda parent select structure */
+static struct pclk_sel firda_pclk_sel = {
+ .pclk_info = firda_pclk_info,
+ .pclk_count = ARRAY_SIZE(firda_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = FIRDA_CLK_MASK,
+};
+
+/* firda clock */
+static struct clk firda_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = FIRDA_CLK_ENB,
+ .pclk_sel = &firda_pclk_sel,
+ .pclk_sel_shift = FIRDA_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &firda_config,
+};
+
+/* gpt parents */
+static struct pclk_info gpt_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* gpt parent select structure */
+static struct pclk_sel gpt_pclk_sel = {
+ .pclk_info = gpt_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
+};
+
+/* gpt0 configurations */
+static struct aux_clk_config gpt0_config = {
+ .synth_reg = PRSC1_CLK_CFG,
+};
+
+/* gpt0 timer clock */
+static struct clk gpt0_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT0_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt0_config,
+};
+
+/* gpt1 configurations */
+static struct aux_clk_config gpt1_config = {
+ .synth_reg = PRSC2_CLK_CFG,
+};
+
+/* gpt1 timer clock */
+static struct clk gpt1_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPT1_CLK_ENB,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT1_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt1_config,
+};
+
+/* gpt2 configurations */
+static struct aux_clk_config gpt2_config = {
+ .synth_reg = PRSC3_CLK_CFG,
+};
+
+/* gpt2 timer clock */
+static struct clk gpt2_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPT2_CLK_ENB,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT2_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt2_config,
+};
+
+/* clock derived from pll3 clk */
+/* usbh clock */
+static struct clk usbh_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBH_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* usbd clock */
+static struct clk usbd_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBD_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clcd clock */
+static struct clk clcd_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll3_48m_clk,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from ahb clk */
+/* apb configuration structure */
+static struct bus_clk_config apb_config = {
+ .reg = CORE_CLK_CFG,
+ .mask = HCLK_PCLK_RATIO_MASK,
+ .shift = HCLK_PCLK_RATIO_SHIFT,
+};
+
+/* apb clock */
+static struct clk apb_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &ahb_clk,
+ .recalc = &bus_clk_recalc,
+ .private_data = &apb_config,
+};
+
+/* i2c clock */
+static struct clk i2c_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = I2C_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* dma clock */
+static struct clk dma_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = DMA_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* jpeg clock */
+static struct clk jpeg_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = JPEG_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gmac clock */
+static struct clk gmac_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GMAC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* smi clock */
+static struct clk smi_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SMI_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* c3 clock */
+static struct clk c3_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = C3_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from apb clk */
+/* adc clock */
+static struct clk adc_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = ADC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* ssp clock */
+static struct clk ssp_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SSP_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gpio clock */
+static struct clk gpio_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPIO_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* array of all spear 3xx clock lookups */
+static struct clk_lookup spear_clk_lookups[] = {
+ /* root clks */
+ { .con_id = "osc_32k_clk", .clk = &osc_32k_clk},
+ { .con_id = "osc_24m_clk", .clk = &osc_24m_clk},
+ /* clock derived from 32 KHz osc clk */
+ { .dev_id = "rtc", .clk = &rtc_clk},
+ /* clock derived from 24 MHz osc clk */
+ { .con_id = "pll1_clk", .clk = &pll1_clk},
+ { .con_id = "pll3_48m_clk", .clk = &pll3_48m_clk},
+ { .dev_id = "wdt", .clk = &wdt_clk},
+ /* clock derived from pll1 clk */
+ { .con_id = "cpu_clk", .clk = &cpu_clk},
+ { .con_id = "ahb_clk", .clk = &ahb_clk},
+ { .dev_id = "uart", .clk = &uart_clk},
+ { .dev_id = "firda", .clk = &firda_clk},
+ { .dev_id = "gpt0", .clk = &gpt0_clk},
+ { .dev_id = "gpt1", .clk = &gpt1_clk},
+ { .dev_id = "gpt2", .clk = &gpt2_clk},
+ /* clock derived from pll3 clk */
+ { .dev_id = "usbh", .clk = &usbh_clk},
+ { .dev_id = "usbd", .clk = &usbd_clk},
+ { .dev_id = "clcd", .clk = &clcd_clk},
+ /* clock derived from ahb clk */
+ { .con_id = "apb_clk", .clk = &apb_clk},
+ { .dev_id = "i2c", .clk = &i2c_clk},
+ { .dev_id = "dma", .clk = &dma_clk},
+ { .dev_id = "jpeg", .clk = &jpeg_clk},
+ { .dev_id = "gmac", .clk = &gmac_clk},
+ { .dev_id = "smi", .clk = &smi_clk},
+ { .dev_id = "c3", .clk = &c3_clk},
+ /* clock derived from apb clk */
+ { .dev_id = "adc", .clk = &adc_clk},
+ { .dev_id = "ssp", .clk = &ssp_clk},
+ { .dev_id = "gpio", .clk = &gpio_clk},
+};
+
+void __init clk_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
+ clk_register(&spear_clk_lookups[i]);
+
+ recalc_root_clocks();
+}
diff --git a/arch/arm/mach-spear3xx/include/mach/clkdev.h b/arch/arm/mach-spear3xx/include/mach/clkdev.h
new file mode 100644
index 000000000000..a3d07339d9f1
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/clkdev.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/clkdev.h
+ *
+ * Clock Dev framework definitions for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_CLKDEV_H
+#define __MACH_CLKDEV_H
+
+#include <plat/clkdev.h>
+
+#endif /* __MACH_CLKDEV_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/debug-macro.S b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
new file mode 100644
index 000000000000..590519f10d6e
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
@@ -0,0 +1,14 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/debug-macro.S
+ *
+ * Debugging macro include header spear3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear3xx/include/mach/entry-macro.S b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
new file mode 100644
index 000000000000..947625d6b48d
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
@@ -0,0 +1,46 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/entry-macro.S
+ *
+ * Low-level IRQ helper macros for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <mach/hardware.h>
+#include <mach/spear.h>
+#include <asm/hardware/vic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \base, =VA_SPEAR3XX_ML1_VIC_BASE
+ ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+ teq \irqstat, #0
+ beq 1001f @ this will set/reset
+ @ zero register
+ /*
+ * Following code will find bit position of least significang
+ * bit set in irqstat, using following equation
+ * least significant bit set in n = (n & ~(n-1))
+ */
+ sub \tmp, \irqstat, #1 @ tmp = irqstat - 1
+ mvn \tmp, \tmp @ tmp = ~tmp
+ and \irqstat, \irqstat, \tmp @ irqstat &= tmp
+ /* Now, irqstat is = bit no. of 1st bit set in vic irq status */
+ clz \tmp, \irqstat @ tmp = leading zeros
+ rsb \irqnr, \tmp, #0x1F @ irqnr = 32 - tmp - 1
+
+1001: /* EQ will be set if no irqs pending */
+ .endm
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
new file mode 100644
index 000000000000..af7e02c909a3
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -0,0 +1,205 @@
+/*
+ * arch/arm/mach-spear3xx/generic.h
+ *
+ * SPEAr3XX machine family generic header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GENERIC_H
+#define __MACH_GENERIC_H
+
+#include <asm/mach/time.h>
+#include <asm/mach/map.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <plat/padmux.h>
+
+/* spear3xx declarations */
+/*
+ * Each GPT has 2 timer channels
+ * Following GPT channels will be used as clock source and clockevent
+ */
+#define SPEAR_GPT0_BASE SPEAR3XX_ML1_TMR_BASE
+#define SPEAR_GPT0_CHAN0_IRQ IRQ_CPU_GPT1_1
+#define SPEAR_GPT0_CHAN1_IRQ IRQ_CPU_GPT1_2
+
+/* Add spear3xx family device structure declarations here */
+extern struct amba_device gpio_device;
+extern struct amba_device uart_device;
+extern struct sys_timer spear_sys_timer;
+
+/* Add spear3xx family function declarations here */
+void __init clk_init(void);
+void __init spear3xx_map_io(void);
+void __init spear3xx_init_irq(void);
+void __init spear3xx_init(void);
+void spear_pmx_init(struct pmx_driver *pmx_driver, uint base, uint size);
+
+/* pad mux declarations */
+#define PMX_FIRDA_MASK (1 << 14)
+#define PMX_I2C_MASK (1 << 13)
+#define PMX_SSP_CS_MASK (1 << 12)
+#define PMX_SSP_MASK (1 << 11)
+#define PMX_MII_MASK (1 << 10)
+#define PMX_GPIO_PIN0_MASK (1 << 9)
+#define PMX_GPIO_PIN1_MASK (1 << 8)
+#define PMX_GPIO_PIN2_MASK (1 << 7)
+#define PMX_GPIO_PIN3_MASK (1 << 6)
+#define PMX_GPIO_PIN4_MASK (1 << 5)
+#define PMX_GPIO_PIN5_MASK (1 << 4)
+#define PMX_UART0_MODEM_MASK (1 << 3)
+#define PMX_UART0_MASK (1 << 2)
+#define PMX_TIMER_3_4_MASK (1 << 1)
+#define PMX_TIMER_1_2_MASK (1 << 0)
+
+/* pad mux devices */
+extern struct pmx_dev pmx_firda;
+extern struct pmx_dev pmx_i2c;
+extern struct pmx_dev pmx_ssp_cs;
+extern struct pmx_dev pmx_ssp;
+extern struct pmx_dev pmx_mii;
+extern struct pmx_dev pmx_gpio_pin0;
+extern struct pmx_dev pmx_gpio_pin1;
+extern struct pmx_dev pmx_gpio_pin2;
+extern struct pmx_dev pmx_gpio_pin3;
+extern struct pmx_dev pmx_gpio_pin4;
+extern struct pmx_dev pmx_gpio_pin5;
+extern struct pmx_dev pmx_uart0_modem;
+extern struct pmx_dev pmx_uart0;
+extern struct pmx_dev pmx_timer_3_4;
+extern struct pmx_dev pmx_timer_1_2;
+
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+/* padmux plgpio devices */
+extern struct pmx_dev pmx_plgpio_0_1;
+extern struct pmx_dev pmx_plgpio_2_3;
+extern struct pmx_dev pmx_plgpio_4_5;
+extern struct pmx_dev pmx_plgpio_6_9;
+extern struct pmx_dev pmx_plgpio_10_27;
+extern struct pmx_dev pmx_plgpio_28;
+extern struct pmx_dev pmx_plgpio_29;
+extern struct pmx_dev pmx_plgpio_30;
+extern struct pmx_dev pmx_plgpio_31;
+extern struct pmx_dev pmx_plgpio_32;
+extern struct pmx_dev pmx_plgpio_33;
+extern struct pmx_dev pmx_plgpio_34_36;
+extern struct pmx_dev pmx_plgpio_37_42;
+extern struct pmx_dev pmx_plgpio_43_44_47_48;
+extern struct pmx_dev pmx_plgpio_45_46_49_50;
+#endif
+
+extern struct pmx_driver pmx_driver;
+
+/* spear300 declarations */
+#ifdef CONFIG_MACH_SPEAR300
+/* Add spear300 machine device structure declarations here */
+extern struct amba_device gpio1_device;
+
+/* pad mux modes */
+extern struct pmx_mode nand_mode;
+extern struct pmx_mode nor_mode;
+extern struct pmx_mode photo_frame_mode;
+extern struct pmx_mode lend_ip_phone_mode;
+extern struct pmx_mode hend_ip_phone_mode;
+extern struct pmx_mode lend_wifi_phone_mode;
+extern struct pmx_mode hend_wifi_phone_mode;
+extern struct pmx_mode ata_pabx_wi2s_mode;
+extern struct pmx_mode ata_pabx_i2s_mode;
+extern struct pmx_mode caml_lcdw_mode;
+extern struct pmx_mode camu_lcd_mode;
+extern struct pmx_mode camu_wlcd_mode;
+extern struct pmx_mode caml_lcd_mode;
+
+/* pad mux devices */
+extern struct pmx_dev pmx_fsmc_2_chips;
+extern struct pmx_dev pmx_fsmc_4_chips;
+extern struct pmx_dev pmx_keyboard;
+extern struct pmx_dev pmx_clcd;
+extern struct pmx_dev pmx_telecom_gpio;
+extern struct pmx_dev pmx_telecom_tdm;
+extern struct pmx_dev pmx_telecom_spi_cs_i2c_clk;
+extern struct pmx_dev pmx_telecom_camera;
+extern struct pmx_dev pmx_telecom_dac;
+extern struct pmx_dev pmx_telecom_i2s;
+extern struct pmx_dev pmx_telecom_boot_pins;
+extern struct pmx_dev pmx_telecom_sdio_4bit;
+extern struct pmx_dev pmx_telecom_sdio_8bit;
+extern struct pmx_dev pmx_gpio1;
+
+void spear300_pmx_init(void);
+
+/* Add spear300 machine function declarations here */
+void __init spear300_init(void);
+
+#endif /* CONFIG_MACH_SPEAR300 */
+
+/* spear310 declarations */
+#ifdef CONFIG_MACH_SPEAR310
+/* Add spear310 machine device structure declarations here */
+
+/* pad mux devices */
+extern struct pmx_dev pmx_emi_cs_0_1_4_5;
+extern struct pmx_dev pmx_emi_cs_2_3;
+extern struct pmx_dev pmx_uart1;
+extern struct pmx_dev pmx_uart2;
+extern struct pmx_dev pmx_uart3_4_5;
+extern struct pmx_dev pmx_fsmc;
+extern struct pmx_dev pmx_rs485_0_1;
+extern struct pmx_dev pmx_tdm0;
+
+void spear310_pmx_init(void);
+
+/* Add spear310 machine function declarations here */
+void __init spear310_init(void);
+
+#endif /* CONFIG_MACH_SPEAR310 */
+
+/* spear320 declarations */
+#ifdef CONFIG_MACH_SPEAR320
+/* Add spear320 machine device structure declarations here */
+
+/* pad mux modes */
+extern struct pmx_mode auto_net_smii_mode;
+extern struct pmx_mode auto_net_mii_mode;
+extern struct pmx_mode auto_exp_mode;
+extern struct pmx_mode small_printers_mode;
+
+/* pad mux devices */
+extern struct pmx_dev pmx_clcd;
+extern struct pmx_dev pmx_emi;
+extern struct pmx_dev pmx_fsmc;
+extern struct pmx_dev pmx_spp;
+extern struct pmx_dev pmx_sdio;
+extern struct pmx_dev pmx_i2s;
+extern struct pmx_dev pmx_uart1;
+extern struct pmx_dev pmx_uart1_modem;
+extern struct pmx_dev pmx_uart2;
+extern struct pmx_dev pmx_touchscreen;
+extern struct pmx_dev pmx_can;
+extern struct pmx_dev pmx_sdio_led;
+extern struct pmx_dev pmx_pwm0;
+extern struct pmx_dev pmx_pwm1;
+extern struct pmx_dev pmx_pwm2;
+extern struct pmx_dev pmx_pwm3;
+extern struct pmx_dev pmx_ssp1;
+extern struct pmx_dev pmx_ssp2;
+extern struct pmx_dev pmx_mii1;
+extern struct pmx_dev pmx_smii0;
+extern struct pmx_dev pmx_smii1;
+extern struct pmx_dev pmx_i2c1;
+
+void spear320_pmx_init(void);
+
+/* Add spear320 machine function declarations here */
+void __init spear320_init(void);
+
+#endif /* CONFIG_MACH_SPEAR320 */
+
+#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/gpio.h b/arch/arm/mach-spear3xx/include/mach/gpio.h
new file mode 100644
index 000000000000..451b2081bfc9
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/gpio.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/gpio.h
+ *
+ * GPIO macros for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include <plat/gpio.h>
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/hardware.h b/arch/arm/mach-spear3xx/include/mach/hardware.h
new file mode 100644
index 000000000000..4a86e6a3c444
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/hardware.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/hardware.h
+ *
+ * Hardware definitions for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_HARDWARE_H
+#define __MACH_HARDWARE_H
+
+/* Vitual to physical translation of statically mapped space */
+#define IO_ADDRESS(x) (x | 0xF0000000)
+
+#endif /* __MACH_HARDWARE_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/io.h b/arch/arm/mach-spear3xx/include/mach/io.h
new file mode 100644
index 000000000000..30cff8a1f6b5
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/io.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/io.h
+ *
+ * IO definitions for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IO_H
+#define __MACH_IO_H
+
+#include <plat/io.h>
+
+#endif /* __MACH_IO_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
new file mode 100644
index 000000000000..7f940b818473
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -0,0 +1,152 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/irqs.h
+ *
+ * IRQ helper macros for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+/* SPEAr3xx IRQ definitions */
+#define IRQ_HW_ACCEL_MOD_0 0
+#define IRQ_INTRCOMM_RAS_ARM 1
+#define IRQ_CPU_GPT1_1 2
+#define IRQ_CPU_GPT1_2 3
+#define IRQ_BASIC_GPT1_1 4
+#define IRQ_BASIC_GPT1_2 5
+#define IRQ_BASIC_GPT2_1 6
+#define IRQ_BASIC_GPT2_2 7
+#define IRQ_BASIC_DMA 8
+#define IRQ_BASIC_SMI 9
+#define IRQ_BASIC_RTC 10
+#define IRQ_BASIC_GPIO 11
+#define IRQ_BASIC_WDT 12
+#define IRQ_DDR_CONTROLLER 13
+#define IRQ_SYS_ERROR 14
+#define IRQ_WAKEUP_RCV 15
+#define IRQ_JPEG 16
+#define IRQ_IRDA 17
+#define IRQ_ADC 18
+#define IRQ_UART 19
+#define IRQ_SSP 20
+#define IRQ_I2C 21
+#define IRQ_MAC_1 22
+#define IRQ_MAC_2 23
+#define IRQ_USB_DEV 24
+#define IRQ_USB_H_OHCI_0 25
+#define IRQ_USB_H_EHCI_0 26
+#define IRQ_USB_H_EHCI_1 IRQ_USB_H_EHCI_0
+#define IRQ_USB_H_OHCI_1 27
+#define IRQ_GEN_RAS_1 28
+#define IRQ_GEN_RAS_2 29
+#define IRQ_GEN_RAS_3 30
+#define IRQ_HW_ACCEL_MOD_1 31
+#define IRQ_VIC_END 32
+
+#define VIRQ_START IRQ_VIC_END
+
+/* SPEAr300 Virtual irq definitions */
+#ifdef CONFIG_MACH_SPEAR300
+/* IRQs sharing IRQ_GEN_RAS_1 */
+#define VIRQ_IT_PERS_S (VIRQ_START + 0)
+#define VIRQ_IT_CHANGE_S (VIRQ_START + 1)
+#define VIRQ_I2S (VIRQ_START + 2)
+#define VIRQ_TDM (VIRQ_START + 3)
+#define VIRQ_CAMERA_L (VIRQ_START + 4)
+#define VIRQ_CAMERA_F (VIRQ_START + 5)
+#define VIRQ_CAMERA_V (VIRQ_START + 6)
+#define VIRQ_KEYBOARD (VIRQ_START + 7)
+#define VIRQ_GPIO1 (VIRQ_START + 8)
+
+/* IRQs sharing IRQ_GEN_RAS_3 */
+#define IRQ_CLCD IRQ_GEN_RAS_3
+
+/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
+#define IRQ_SDIO IRQ_INTRCOMM_RAS_ARM
+
+/* GPIO pins virtual irqs */
+#define SPEAR_GPIO_INT_BASE (VIRQ_START + 9)
+#define SPEAR_GPIO1_INT_BASE (SPEAR_GPIO_INT_BASE + 8)
+#define SPEAR_GPIO_INT_END (SPEAR_GPIO1_INT_BASE + 8)
+
+/* SPEAr310 Virtual irq definitions */
+#elif defined(CONFIG_MACH_SPEAR310)
+/* IRQs sharing IRQ_GEN_RAS_1 */
+#define VIRQ_SMII0 (VIRQ_START + 0)
+#define VIRQ_SMII1 (VIRQ_START + 1)
+#define VIRQ_SMII2 (VIRQ_START + 2)
+#define VIRQ_SMII3 (VIRQ_START + 3)
+#define VIRQ_WAKEUP_SMII0 (VIRQ_START + 4)
+#define VIRQ_WAKEUP_SMII1 (VIRQ_START + 5)
+#define VIRQ_WAKEUP_SMII2 (VIRQ_START + 6)
+#define VIRQ_WAKEUP_SMII3 (VIRQ_START + 7)
+
+/* IRQs sharing IRQ_GEN_RAS_2 */
+#define VIRQ_UART1 (VIRQ_START + 8)
+#define VIRQ_UART2 (VIRQ_START + 9)
+#define VIRQ_UART3 (VIRQ_START + 10)
+#define VIRQ_UART4 (VIRQ_START + 11)
+#define VIRQ_UART5 (VIRQ_START + 12)
+
+/* IRQs sharing IRQ_GEN_RAS_3 */
+#define VIRQ_EMI (VIRQ_START + 13)
+#define VIRQ_PLGPIO (VIRQ_START + 14)
+
+/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
+#define VIRQ_TDM_HDLC (VIRQ_START + 15)
+#define VIRQ_RS485_0 (VIRQ_START + 16)
+#define VIRQ_RS485_1 (VIRQ_START + 17)
+
+/* GPIO pins virtual irqs */
+#define SPEAR_GPIO_INT_BASE (VIRQ_START + 18)
+
+/* SPEAr320 Virtual irq definitions */
+#else
+/* IRQs sharing IRQ_GEN_RAS_1 */
+#define VIRQ_EMI (VIRQ_START + 0)
+#define VIRQ_CLCD (VIRQ_START + 1)
+#define VIRQ_SPP (VIRQ_START + 2)
+
+/* IRQs sharing IRQ_GEN_RAS_2 */
+#define IRQ_SDIO IRQ_GEN_RAS_2
+
+/* IRQs sharing IRQ_GEN_RAS_3 */
+#define VIRQ_PLGPIO (VIRQ_START + 3)
+#define VIRQ_I2S_PLAY (VIRQ_START + 4)
+#define VIRQ_I2S_REC (VIRQ_START + 5)
+
+/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
+#define VIRQ_CANU (VIRQ_START + 6)
+#define VIRQ_CANL (VIRQ_START + 7)
+#define VIRQ_UART1 (VIRQ_START + 8)
+#define VIRQ_UART2 (VIRQ_START + 9)
+#define VIRQ_SSP1 (VIRQ_START + 10)
+#define VIRQ_SSP2 (VIRQ_START + 11)
+#define VIRQ_SMII0 (VIRQ_START + 12)
+#define VIRQ_MII1_SMII1 (VIRQ_START + 13)
+#define VIRQ_WAKEUP_SMII0 (VIRQ_START + 14)
+#define VIRQ_WAKEUP_MII1_SMII1 (VIRQ_START + 15)
+#define VIRQ_I2C (VIRQ_START + 16)
+
+/* GPIO pins virtual irqs */
+#define SPEAR_GPIO_INT_BASE (VIRQ_START + 17)
+
+#endif
+
+/* PLGPIO Virtual IRQs */
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+#define SPEAR_PLGPIO_INT_BASE (SPEAR_GPIO_INT_BASE + 8)
+#define SPEAR_GPIO_INT_END (SPEAR_PLGPIO_INT_BASE + 102)
+#endif
+
+#define VIRQ_END SPEAR_GPIO_INT_END
+#define NR_IRQS VIRQ_END
+
+#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/memory.h b/arch/arm/mach-spear3xx/include/mach/memory.h
new file mode 100644
index 000000000000..51735221ea19
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/memory.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/memory.h
+ *
+ * Memory map for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MEMORY_H
+#define __MACH_MEMORY_H
+
+#include <plat/memory.h>
+
+#endif /* __MACH_MEMORY_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
new file mode 100644
index 000000000000..38d767a1aba0
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -0,0 +1,163 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/misc_regs.h
+ *
+ * Miscellaneous registers definitions for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MISC_REGS_H
+#define __MACH_MISC_REGS_H
+
+#include <mach/spear.h>
+
+#define MISC_BASE VA_SPEAR3XX_ICM3_MISC_REG_BASE
+
+#define SOC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x000))
+#define DIAG_CFG_CTR ((unsigned int *)(MISC_BASE + 0x004))
+#define PLL1_CTR ((unsigned int *)(MISC_BASE + 0x008))
+#define PLL1_FRQ ((unsigned int *)(MISC_BASE + 0x00C))
+#define PLL1_MOD ((unsigned int *)(MISC_BASE + 0x010))
+#define PLL2_CTR ((unsigned int *)(MISC_BASE + 0x014))
+/* PLL_CTR register masks */
+#define PLL_ENABLE 2
+#define PLL_MODE_SHIFT 4
+#define PLL_MODE_MASK 0x3
+#define PLL_MODE_NORMAL 0
+#define PLL_MODE_FRACTION 1
+#define PLL_MODE_DITH_DSB 2
+#define PLL_MODE_DITH_SSB 3
+
+#define PLL2_FRQ ((unsigned int *)(MISC_BASE + 0x018))
+/* PLL FRQ register masks */
+#define PLL_DIV_N_SHIFT 0
+#define PLL_DIV_N_MASK 0xFF
+#define PLL_DIV_P_SHIFT 8
+#define PLL_DIV_P_MASK 0x7
+#define PLL_NORM_FDBK_M_SHIFT 24
+#define PLL_NORM_FDBK_M_MASK 0xFF
+#define PLL_DITH_FDBK_M_SHIFT 16
+#define PLL_DITH_FDBK_M_MASK 0xFFFF
+
+#define PLL2_MOD ((unsigned int *)(MISC_BASE + 0x01C))
+#define PLL_CLK_CFG ((unsigned int *)(MISC_BASE + 0x020))
+#define CORE_CLK_CFG ((unsigned int *)(MISC_BASE + 0x024))
+/* CORE CLK CFG register masks */
+#define PLL_HCLK_RATIO_SHIFT 10
+#define PLL_HCLK_RATIO_MASK 0x3
+#define HCLK_PCLK_RATIO_SHIFT 8
+#define HCLK_PCLK_RATIO_MASK 0x3
+
+#define PERIP_CLK_CFG ((unsigned int *)(MISC_BASE + 0x028))
+/* PERIP_CLK_CFG register masks */
+#define UART_CLK_SHIFT 4
+#define UART_CLK_MASK 0x1
+#define FIRDA_CLK_SHIFT 5
+#define FIRDA_CLK_MASK 0x3
+#define GPT0_CLK_SHIFT 8
+#define GPT1_CLK_SHIFT 11
+#define GPT2_CLK_SHIFT 12
+#define GPT_CLK_MASK 0x1
+#define AUX_CLK_PLL3_MASK 0
+#define AUX_CLK_PLL1_MASK 1
+
+#define PERIP1_CLK_ENB ((unsigned int *)(MISC_BASE + 0x02C))
+/* PERIP1_CLK_ENB register masks */
+#define UART_CLK_ENB 3
+#define SSP_CLK_ENB 5
+#define I2C_CLK_ENB 7
+#define JPEG_CLK_ENB 8
+#define FIRDA_CLK_ENB 10
+#define GPT1_CLK_ENB 11
+#define GPT2_CLK_ENB 12
+#define ADC_CLK_ENB 15
+#define RTC_CLK_ENB 17
+#define GPIO_CLK_ENB 18
+#define DMA_CLK_ENB 19
+#define SMI_CLK_ENB 21
+#define GMAC_CLK_ENB 23
+#define USBD_CLK_ENB 24
+#define USBH_CLK_ENB 25
+#define C3_CLK_ENB 31
+
+#define SOC_CORE_ID ((unsigned int *)(MISC_BASE + 0x030))
+#define RAS_CLK_ENB ((unsigned int *)(MISC_BASE + 0x034))
+#define PERIP1_SOF_RST ((unsigned int *)(MISC_BASE + 0x038))
+/* PERIP1_SOF_RST register masks */
+#define JPEG_SOF_RST 8
+
+#define SOC_USER_ID ((unsigned int *)(MISC_BASE + 0x03C))
+#define RAS_SOF_RST ((unsigned int *)(MISC_BASE + 0x040))
+#define PRSC1_CLK_CFG ((unsigned int *)(MISC_BASE + 0x044))
+#define PRSC2_CLK_CFG ((unsigned int *)(MISC_BASE + 0x048))
+#define PRSC3_CLK_CFG ((unsigned int *)(MISC_BASE + 0x04C))
+/* gpt synthesizer register masks */
+#define GPT_MSCALE_SHIFT 0
+#define GPT_MSCALE_MASK 0xFFF
+#define GPT_NSCALE_SHIFT 12
+#define GPT_NSCALE_MASK 0xF
+
+#define AMEM_CLK_CFG ((unsigned int *)(MISC_BASE + 0x050))
+#define EXPI_CLK_CFG ((unsigned int *)(MISC_BASE + 0x054))
+#define CLCD_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x05C))
+#define FIRDA_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x060))
+#define UART_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x064))
+#define GMAC_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x068))
+#define RAS1_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x06C))
+#define RAS2_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x070))
+#define RAS3_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x074))
+#define RAS4_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x078))
+/* aux clk synthesiser register masks for irda to ras4 */
+#define AUX_EQ_SEL_SHIFT 30
+#define AUX_EQ_SEL_MASK 1
+#define AUX_EQ1_SEL 0
+#define AUX_EQ2_SEL 1
+#define AUX_XSCALE_SHIFT 16
+#define AUX_XSCALE_MASK 0xFFF
+#define AUX_YSCALE_SHIFT 0
+#define AUX_YSCALE_MASK 0xFFF
+
+#define ICM1_ARB_CFG ((unsigned int *)(MISC_BASE + 0x07C))
+#define ICM2_ARB_CFG ((unsigned int *)(MISC_BASE + 0x080))
+#define ICM3_ARB_CFG ((unsigned int *)(MISC_BASE + 0x084))
+#define ICM4_ARB_CFG ((unsigned int *)(MISC_BASE + 0x088))
+#define ICM5_ARB_CFG ((unsigned int *)(MISC_BASE + 0x08C))
+#define ICM6_ARB_CFG ((unsigned int *)(MISC_BASE + 0x090))
+#define ICM7_ARB_CFG ((unsigned int *)(MISC_BASE + 0x094))
+#define ICM8_ARB_CFG ((unsigned int *)(MISC_BASE + 0x098))
+#define ICM9_ARB_CFG ((unsigned int *)(MISC_BASE + 0x09C))
+#define DMA_CHN_CFG ((unsigned int *)(MISC_BASE + 0x0A0))
+#define USB2_PHY_CFG ((unsigned int *)(MISC_BASE + 0x0A4))
+#define GMAC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0A8))
+#define EXPI_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0AC))
+#define PRC1_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C0))
+#define PRC2_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C4))
+#define PRC3_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C8))
+#define PRC4_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0CC))
+#define PRC1_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D0))
+#define PRC2_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D4))
+#define PRC3_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D8))
+#define PRC4_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0DC))
+#define PWRDOWN_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0E0))
+#define COMPSSTL_1V8_CFG ((unsigned int *)(MISC_BASE + 0x0E4))
+#define COMPSSTL_2V5_CFG ((unsigned int *)(MISC_BASE + 0x0E8))
+#define COMPCOR_3V3_CFG ((unsigned int *)(MISC_BASE + 0x0EC))
+#define SSTLPAD_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F0))
+#define BIST1_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F4))
+#define BIST2_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F8))
+#define BIST3_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0FC))
+#define BIST4_CFG_CTR ((unsigned int *)(MISC_BASE + 0x100))
+#define BIST5_CFG_CTR ((unsigned int *)(MISC_BASE + 0x104))
+#define BIST1_STS_RES ((unsigned int *)(MISC_BASE + 0x108))
+#define BIST2_STS_RES ((unsigned int *)(MISC_BASE + 0x10C))
+#define BIST3_STS_RES ((unsigned int *)(MISC_BASE + 0x110))
+#define BIST4_STS_RES ((unsigned int *)(MISC_BASE + 0x114))
+#define BIST5_STS_RES ((unsigned int *)(MISC_BASE + 0x118))
+#define SYSERR_CFG_CTR ((unsigned int *)(MISC_BASE + 0x11C))
+
+#endif /* __MACH_MISC_REGS_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h
new file mode 100644
index 000000000000..dcca8568a486
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/spear.h
@@ -0,0 +1,144 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/spear.h
+ *
+ * SPEAr3xx Machine family specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SPEAR3XX_H
+#define __MACH_SPEAR3XX_H
+
+#include <mach/hardware.h>
+#include <mach/spear300.h>
+#include <mach/spear310.h>
+#include <mach/spear320.h>
+
+#define SPEAR3XX_ML_SDRAM_BASE 0x00000000
+#define SPEAR3XX_ML_SDRAM_SIZE 0x40000000
+
+#define SPEAR3XX_ICM9_BASE 0xC0000000
+#define SPEAR3XX_ICM9_SIZE 0x10000000
+
+/* ICM1 - Low speed connection */
+#define SPEAR3XX_ICM1_2_BASE 0xD0000000
+#define SPEAR3XX_ICM1_2_SIZE 0x10000000
+
+#define SPEAR3XX_ICM1_UART_BASE 0xD0000000
+#define VA_SPEAR3XX_ICM1_UART_BASE IO_ADDRESS(SPEAR3XX_ICM1_UART_BASE)
+#define SPEAR3XX_ICM1_UART_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_ADC_BASE 0xD0080000
+#define SPEAR3XX_ICM1_ADC_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_SSP_BASE 0xD0100000
+#define SPEAR3XX_ICM1_SSP_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_I2C_BASE 0xD0180000
+#define SPEAR3XX_ICM1_I2C_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_JPEG_BASE 0xD0800000
+#define SPEAR3XX_ICM1_JPEG_SIZE 0x00800000
+
+#define SPEAR3XX_ICM1_IRDA_BASE 0xD1000000
+#define SPEAR3XX_ICM1_IRDA_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_SRAM_BASE 0xD2800000
+#define SPEAR3XX_ICM1_SRAM_SIZE 0x05800000
+
+/* ICM2 - Application Subsystem */
+#define SPEAR3XX_ICM2_HWACCEL0_BASE 0xD8800000
+#define SPEAR3XX_ICM2_HWACCEL0_SIZE 0x00800000
+
+#define SPEAR3XX_ICM2_HWACCEL1_BASE 0xD9000000
+#define SPEAR3XX_ICM2_HWACCEL1_SIZE 0x00800000
+
+/* ICM4 - High Speed Connection */
+#define SPEAR3XX_ICM4_BASE 0xE0000000
+#define SPEAR3XX_ICM4_SIZE 0x08000000
+
+#define SPEAR3XX_ICM4_MII_BASE 0xE0800000
+#define SPEAR3XX_ICM4_MII_SIZE 0x00800000
+
+#define SPEAR3XX_ICM4_USBD_FIFO_BASE 0xE1000000
+#define SPEAR3XX_ICM4_USBD_FIFO_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USBD_CSR_BASE 0xE1100000
+#define SPEAR3XX_ICM4_USBD_CSR_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USBD_PLDT_BASE 0xE1200000
+#define SPEAR3XX_ICM4_USBD_PLDT_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USB_EHCI0_1_BASE 0xE1800000
+#define SPEAR3XX_ICM4_USB_EHCI0_1_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USB_OHCI0_BASE 0xE1900000
+#define SPEAR3XX_ICM4_USB_OHCI0_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USB_OHCI1_BASE 0xE2100000
+#define SPEAR3XX_ICM4_USB_OHCI1_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USB_ARB_BASE 0xE2800000
+#define SPEAR3XX_ICM4_USB_ARB_SIZE 0x00010000
+
+/* ML1 - Multi Layer CPU Subsystem */
+#define SPEAR3XX_ICM3_ML1_2_BASE 0xF0000000
+#define SPEAR3XX_ICM3_ML1_2_SIZE 0x0F000000
+
+#define SPEAR3XX_ML1_TMR_BASE 0xF0000000
+#define SPEAR3XX_ML1_TMR_SIZE 0x00100000
+
+#define SPEAR3XX_ML1_VIC_BASE 0xF1100000
+#define VA_SPEAR3XX_ML1_VIC_BASE IO_ADDRESS(SPEAR3XX_ML1_VIC_BASE)
+#define SPEAR3XX_ML1_VIC_SIZE 0x00100000
+
+/* ICM3 - Basic Subsystem */
+#define SPEAR3XX_ICM3_SMEM_BASE 0xF8000000
+#define SPEAR3XX_ICM3_SMEM_SIZE 0x04000000
+
+#define SPEAR3XX_ICM3_SMI_CTRL_BASE 0xFC000000
+#define SPEAR3XX_ICM3_SMI_CTRL_SIZE 0x00200000
+
+#define SPEAR3XX_ICM3_DMA_BASE 0xFC400000
+#define SPEAR3XX_ICM3_DMA_SIZE 0x00200000
+
+#define SPEAR3XX_ICM3_SDRAM_CTRL_BASE 0xFC600000
+#define SPEAR3XX_ICM3_SDRAM_CTRL_SIZE 0x00200000
+
+#define SPEAR3XX_ICM3_TMR0_BASE 0xFC800000
+#define SPEAR3XX_ICM3_TMR0_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_WDT_BASE 0xFC880000
+#define SPEAR3XX_ICM3_WDT_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_RTC_BASE 0xFC900000
+#define SPEAR3XX_ICM3_RTC_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_GPIO_BASE 0xFC980000
+#define SPEAR3XX_ICM3_GPIO_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_SYS_CTRL_BASE 0xFCA00000
+#define VA_SPEAR3XX_ICM3_SYS_CTRL_BASE IO_ADDRESS(SPEAR3XX_ICM3_SYS_CTRL_BASE)
+#define SPEAR3XX_ICM3_SYS_CTRL_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_MISC_REG_BASE 0xFCA80000
+#define VA_SPEAR3XX_ICM3_MISC_REG_BASE IO_ADDRESS(SPEAR3XX_ICM3_MISC_REG_BASE)
+#define SPEAR3XX_ICM3_MISC_REG_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_TMR1_BASE 0xFCB00000
+#define SPEAR3XX_ICM3_TMR1_SIZE 0x00080000
+
+/* Debug uart for linux, will be used for debug and uncompress messages */
+#define SPEAR_DBG_UART_BASE SPEAR3XX_ICM1_UART_BASE
+#define VA_SPEAR_DBG_UART_BASE VA_SPEAR3XX_ICM1_UART_BASE
+
+/* Sysctl base for spear platform */
+#define SPEAR_SYS_CTRL_BASE SPEAR3XX_ICM3_SYS_CTRL_BASE
+#define VA_SPEAR_SYS_CTRL_BASE VA_SPEAR3XX_ICM3_SYS_CTRL_BASE
+
+#endif /* __MACH_SPEAR3XX_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear300.h b/arch/arm/mach-spear3xx/include/mach/spear300.h
new file mode 100644
index 000000000000..ccaa76522ee2
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/spear300.h
@@ -0,0 +1,83 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/spear300.h
+ *
+ * SPEAr300 Machine specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_MACH_SPEAR300
+
+#ifndef __MACH_SPEAR300_H
+#define __MACH_SPEAR300_H
+
+/* Base address of various IPs */
+#define SPEAR300_TELECOM_BASE 0x50000000
+#define SPEAR300_TELECOM_SIZE 0x10000000
+
+/* Interrupt registers offsets and masks */
+#define SPEAR300_TELECOM_REG_SIZE 0x00010000
+#define INT_ENB_MASK_REG 0x54
+#define INT_STS_MASK_REG 0x58
+#define IT_PERS_S_IRQ_MASK (1 << 0)
+#define IT_CHANGE_S_IRQ_MASK (1 << 1)
+#define I2S_IRQ_MASK (1 << 2)
+#define TDM_IRQ_MASK (1 << 3)
+#define CAMERA_L_IRQ_MASK (1 << 4)
+#define CAMERA_F_IRQ_MASK (1 << 5)
+#define CAMERA_V_IRQ_MASK (1 << 6)
+#define KEYBOARD_IRQ_MASK (1 << 7)
+#define GPIO1_IRQ_MASK (1 << 8)
+
+#define SHIRQ_RAS1_MASK 0x1FF
+
+#define SPEAR300_CLCD_BASE 0x60000000
+#define SPEAR300_CLCD_SIZE 0x10000000
+
+#define SPEAR300_SDIO_BASE 0x70000000
+#define SPEAR300_SDIO_SIZE 0x10000000
+
+#define SPEAR300_NAND_0_BASE 0x80000000
+#define SPEAR300_NAND_0_SIZE 0x04000000
+
+#define SPEAR300_NAND_1_BASE 0x84000000
+#define SPEAR300_NAND_1_SIZE 0x04000000
+
+#define SPEAR300_NAND_2_BASE 0x88000000
+#define SPEAR300_NAND_2_SIZE 0x04000000
+
+#define SPEAR300_NAND_3_BASE 0x8c000000
+#define SPEAR300_NAND_3_SIZE 0x04000000
+
+#define SPEAR300_NOR_0_BASE 0x90000000
+#define SPEAR300_NOR_0_SIZE 0x01000000
+
+#define SPEAR300_NOR_1_BASE 0x91000000
+#define SPEAR300_NOR_1_SIZE 0x01000000
+
+#define SPEAR300_NOR_2_BASE 0x92000000
+#define SPEAR300_NOR_2_SIZE 0x01000000
+
+#define SPEAR300_NOR_3_BASE 0x93000000
+#define SPEAR300_NOR_3_SIZE 0x01000000
+
+#define SPEAR300_FSMC_BASE 0x94000000
+#define SPEAR300_FSMC_SIZE 0x05000000
+
+#define SPEAR300_SOC_CONFIG_BASE 0x99000000
+#define SPEAR300_SOC_CONFIG_SIZE 0x00000008
+
+#define SPEAR300_KEYBOARD_BASE 0xA0000000
+#define SPEAR300_KEYBOARD_SIZE 0x09000000
+
+#define SPEAR300_GPIO_BASE 0xA9000000
+#define SPEAR300_GPIO_SIZE 0x07000000
+
+#endif /* __MACH_SPEAR300_H */
+
+#endif /* CONFIG_MACH_SPEAR300 */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear310.h b/arch/arm/mach-spear3xx/include/mach/spear310.h
new file mode 100644
index 000000000000..b27bb8af3309
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/spear310.h
@@ -0,0 +1,70 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/spear310.h
+ *
+ * SPEAr310 Machine specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_MACH_SPEAR310
+
+#ifndef __MACH_SPEAR310_H
+#define __MACH_SPEAR310_H
+
+#define SPEAR310_NAND_BASE 0x40000000
+#define SPEAR310_NAND_SIZE 0x04000000
+
+#define SPEAR310_FSMC_BASE 0x44000000
+#define SPEAR310_FSMC_SIZE 0x01000000
+
+#define SPEAR310_UART1_BASE 0xB2000000
+#define SPEAR310_UART2_BASE 0xB2080000
+#define SPEAR310_UART3_BASE 0xB2100000
+#define SPEAR310_UART4_BASE 0xB2180000
+#define SPEAR310_UART5_BASE 0xB2200000
+#define SPEAR310_UART_SIZE 0x00080000
+
+#define SPEAR310_HDLC_BASE 0xB2800000
+#define SPEAR310_HDLC_SIZE 0x00800000
+
+#define SPEAR310_RS485_0_BASE 0xB3000000
+#define SPEAR310_RS485_0_SIZE 0x00800000
+
+#define SPEAR310_RS485_1_BASE 0xB3800000
+#define SPEAR310_RS485_1_SIZE 0x00800000
+
+#define SPEAR310_SOC_CONFIG_BASE 0xB4000000
+#define SPEAR310_SOC_CONFIG_SIZE 0x00000070
+/* Interrupt registers offsets and masks */
+#define INT_STS_MASK_REG 0x04
+#define SMII0_IRQ_MASK (1 << 0)
+#define SMII1_IRQ_MASK (1 << 1)
+#define SMII2_IRQ_MASK (1 << 2)
+#define SMII3_IRQ_MASK (1 << 3)
+#define WAKEUP_SMII0_IRQ_MASK (1 << 4)
+#define WAKEUP_SMII1_IRQ_MASK (1 << 5)
+#define WAKEUP_SMII2_IRQ_MASK (1 << 6)
+#define WAKEUP_SMII3_IRQ_MASK (1 << 7)
+#define UART1_IRQ_MASK (1 << 8)
+#define UART2_IRQ_MASK (1 << 9)
+#define UART3_IRQ_MASK (1 << 10)
+#define UART4_IRQ_MASK (1 << 11)
+#define UART5_IRQ_MASK (1 << 12)
+#define EMI_IRQ_MASK (1 << 13)
+#define TDM_HDLC_IRQ_MASK (1 << 14)
+#define RS485_0_IRQ_MASK (1 << 15)
+#define RS485_1_IRQ_MASK (1 << 16)
+
+#define SHIRQ_RAS1_MASK 0x000FF
+#define SHIRQ_RAS2_MASK 0x01F00
+#define SHIRQ_RAS3_MASK 0x02000
+#define SHIRQ_INTRCOMM_RAS_MASK 0x1C000
+
+#endif /* __MACH_SPEAR310_H */
+
+#endif /* CONFIG_MACH_SPEAR310 */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h
new file mode 100644
index 000000000000..cacf17a958cd
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/spear320.h
@@ -0,0 +1,96 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/spear320.h
+ *
+ * SPEAr320 Machine specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_MACH_SPEAR320
+
+#ifndef __MACH_SPEAR320_H
+#define __MACH_SPEAR320_H
+
+#define SPEAR320_EMI_CTRL_BASE 0x40000000
+#define SPEAR320_EMI_CTRL_SIZE 0x08000000
+
+#define SPEAR320_FSMC_BASE 0x4C000000
+#define SPEAR320_FSMC_SIZE 0x01000000
+
+#define SPEAR320_I2S_BASE 0x60000000
+#define SPEAR320_I2S_SIZE 0x10000000
+
+#define SPEAR320_SDIO_BASE 0x70000000
+#define SPEAR320_SDIO_SIZE 0x10000000
+
+#define SPEAR320_CLCD_BASE 0x90000000
+#define SPEAR320_CLCD_SIZE 0x10000000
+
+#define SPEAR320_PAR_PORT_BASE 0xA0000000
+#define SPEAR320_PAR_PORT_SIZE 0x01000000
+
+#define SPEAR320_CAN0_BASE 0xA1000000
+#define SPEAR320_CAN0_SIZE 0x01000000
+
+#define SPEAR320_CAN1_BASE 0xA2000000
+#define SPEAR320_CAN1_SIZE 0x01000000
+
+#define SPEAR320_UART1_BASE 0xA3000000
+#define SPEAR320_UART2_BASE 0xA4000000
+#define SPEAR320_UART_SIZE 0x01000000
+
+#define SPEAR320_SSP0_BASE 0xA5000000
+#define SPEAR320_SSP0_SIZE 0x01000000
+
+#define SPEAR320_SSP1_BASE 0xA6000000
+#define SPEAR320_SSP1_SIZE 0x01000000
+
+#define SPEAR320_I2C_BASE 0xA7000000
+#define SPEAR320_I2C_SIZE 0x01000000
+
+#define SPEAR320_PWM_BASE 0xA8000000
+#define SPEAR320_PWM_SIZE 0x01000000
+
+#define SPEAR320_SMII0_BASE 0xAA000000
+#define SPEAR320_SMII0_SIZE 0x01000000
+
+#define SPEAR320_SMII1_BASE 0xAB000000
+#define SPEAR320_SMII1_SIZE 0x01000000
+
+#define SPEAR320_SOC_CONFIG_BASE 0xB4000000
+#define SPEAR320_SOC_CONFIG_SIZE 0x00000070
+/* Interrupt registers offsets and masks */
+#define INT_STS_MASK_REG 0x04
+#define INT_CLR_MASK_REG 0x04
+#define INT_ENB_MASK_REG 0x08
+#define GPIO_IRQ_MASK (1 << 0)
+#define I2S_PLAY_IRQ_MASK (1 << 1)
+#define I2S_REC_IRQ_MASK (1 << 2)
+#define EMI_IRQ_MASK (1 << 7)
+#define CLCD_IRQ_MASK (1 << 8)
+#define SPP_IRQ_MASK (1 << 9)
+#define SDIO_IRQ_MASK (1 << 10)
+#define CAN_U_IRQ_MASK (1 << 11)
+#define CAN_L_IRQ_MASK (1 << 12)
+#define UART1_IRQ_MASK (1 << 13)
+#define UART2_IRQ_MASK (1 << 14)
+#define SSP1_IRQ_MASK (1 << 15)
+#define SSP2_IRQ_MASK (1 << 16)
+#define SMII0_IRQ_MASK (1 << 17)
+#define MII1_SMII1_IRQ_MASK (1 << 18)
+#define WAKEUP_SMII0_IRQ_MASK (1 << 19)
+#define WAKEUP_MII1_SMII1_IRQ_MASK (1 << 20)
+#define I2C1_IRQ_MASK (1 << 21)
+
+#define SHIRQ_RAS1_MASK 0x000380
+#define SHIRQ_RAS3_MASK 0x000007
+#define SHIRQ_INTRCOMM_RAS_MASK 0x3FF800
+
+#endif /* __MACH_SPEAR320_H */
+
+#endif /* CONFIG_MACH_SPEAR320 */
diff --git a/arch/arm/mach-spear3xx/include/mach/system.h b/arch/arm/mach-spear3xx/include/mach/system.h
new file mode 100644
index 000000000000..92cee6335c90
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/system.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/system.h
+ *
+ * SPEAr3xx Machine family specific architecture functions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SYSTEM_H
+#define __MACH_SYSTEM_H
+
+#include <plat/system.h>
+
+#endif /* __MACH_SYSTEM_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/timex.h b/arch/arm/mach-spear3xx/include/mach/timex.h
new file mode 100644
index 000000000000..a38cc9de876f
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/timex.h
+ *
+ * SPEAr3XX machine family specific timex definitions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_TIMEX_H
+#define __MACH_TIMEX_H
+
+#include <plat/timex.h>
+
+#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/uncompress.h b/arch/arm/mach-spear3xx/include/mach/uncompress.h
new file mode 100644
index 000000000000..53ba8bbc0dfa
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/uncompress.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#include <plat/uncompress.h>
+
+#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/vmalloc.h b/arch/arm/mach-spear3xx/include/mach/vmalloc.h
new file mode 100644
index 000000000000..df977b3c9a63
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/vmalloc.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/vmalloc.h
+ *
+ * Defining Vmalloc area for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_VMALLOC_H
+#define __MACH_VMALLOC_H
+
+#include <plat/vmalloc.h>
+
+#endif /* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
new file mode 100644
index 000000000000..3560f8c1e723
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -0,0 +1,468 @@
+/*
+ * arch/arm/mach-spear3xx/spear300.c
+ *
+ * SPEAr300 machine source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/types.h>
+#include <linux/amba/pl061.h>
+#include <linux/ptrace.h>
+#include <asm/irq.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+#include <plat/shirq.h>
+
+/* pad multiplexing support */
+/* muxing registers */
+#define PAD_MUX_CONFIG_REG 0x00
+#define MODE_CONFIG_REG 0x04
+
+/* modes */
+#define NAND_MODE (1 << 0)
+#define NOR_MODE (1 << 1)
+#define PHOTO_FRAME_MODE (1 << 2)
+#define LEND_IP_PHONE_MODE (1 << 3)
+#define HEND_IP_PHONE_MODE (1 << 4)
+#define LEND_WIFI_PHONE_MODE (1 << 5)
+#define HEND_WIFI_PHONE_MODE (1 << 6)
+#define ATA_PABX_WI2S_MODE (1 << 7)
+#define ATA_PABX_I2S_MODE (1 << 8)
+#define CAML_LCDW_MODE (1 << 9)
+#define CAMU_LCD_MODE (1 << 10)
+#define CAMU_WLCD_MODE (1 << 11)
+#define CAML_LCD_MODE (1 << 12)
+#define ALL_MODES 0x1FFF
+
+struct pmx_mode nand_mode = {
+ .id = NAND_MODE,
+ .name = "nand mode",
+ .mask = 0x00,
+};
+
+struct pmx_mode nor_mode = {
+ .id = NOR_MODE,
+ .name = "nor mode",
+ .mask = 0x01,
+};
+
+struct pmx_mode photo_frame_mode = {
+ .id = PHOTO_FRAME_MODE,
+ .name = "photo frame mode",
+ .mask = 0x02,
+};
+
+struct pmx_mode lend_ip_phone_mode = {
+ .id = LEND_IP_PHONE_MODE,
+ .name = "lend ip phone mode",
+ .mask = 0x03,
+};
+
+struct pmx_mode hend_ip_phone_mode = {
+ .id = HEND_IP_PHONE_MODE,
+ .name = "hend ip phone mode",
+ .mask = 0x04,
+};
+
+struct pmx_mode lend_wifi_phone_mode = {
+ .id = LEND_WIFI_PHONE_MODE,
+ .name = "lend wifi phone mode",
+ .mask = 0x05,
+};
+
+struct pmx_mode hend_wifi_phone_mode = {
+ .id = HEND_WIFI_PHONE_MODE,
+ .name = "hend wifi phone mode",
+ .mask = 0x06,
+};
+
+struct pmx_mode ata_pabx_wi2s_mode = {
+ .id = ATA_PABX_WI2S_MODE,
+ .name = "ata pabx wi2s mode",
+ .mask = 0x07,
+};
+
+struct pmx_mode ata_pabx_i2s_mode = {
+ .id = ATA_PABX_I2S_MODE,
+ .name = "ata pabx i2s mode",
+ .mask = 0x08,
+};
+
+struct pmx_mode caml_lcdw_mode = {
+ .id = CAML_LCDW_MODE,
+ .name = "caml lcdw mode",
+ .mask = 0x0C,
+};
+
+struct pmx_mode camu_lcd_mode = {
+ .id = CAMU_LCD_MODE,
+ .name = "camu lcd mode",
+ .mask = 0x0D,
+};
+
+struct pmx_mode camu_wlcd_mode = {
+ .id = CAMU_WLCD_MODE,
+ .name = "camu wlcd mode",
+ .mask = 0x0E,
+};
+
+struct pmx_mode caml_lcd_mode = {
+ .id = CAML_LCD_MODE,
+ .name = "caml lcd mode",
+ .mask = 0x0F,
+};
+
+/* devices */
+struct pmx_dev_mode pmx_fsmc_2_chips_modes[] = {
+ {
+ .ids = NAND_MODE | NOR_MODE | PHOTO_FRAME_MODE |
+ ATA_PABX_WI2S_MODE | ATA_PABX_I2S_MODE,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_fsmc_2_chips = {
+ .name = "fsmc_2_chips",
+ .modes = pmx_fsmc_2_chips_modes,
+ .mode_count = ARRAY_SIZE(pmx_fsmc_2_chips_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_fsmc_4_chips_modes[] = {
+ {
+ .ids = NAND_MODE | NOR_MODE | PHOTO_FRAME_MODE |
+ ATA_PABX_WI2S_MODE | ATA_PABX_I2S_MODE,
+ .mask = PMX_FIRDA_MASK | PMX_UART0_MASK,
+ },
+};
+
+struct pmx_dev pmx_fsmc_4_chips = {
+ .name = "fsmc_4_chips",
+ .modes = pmx_fsmc_4_chips_modes,
+ .mode_count = ARRAY_SIZE(pmx_fsmc_4_chips_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_keyboard_modes[] = {
+ {
+ .ids = LEND_IP_PHONE_MODE | HEND_IP_PHONE_MODE |
+ LEND_WIFI_PHONE_MODE | HEND_WIFI_PHONE_MODE |
+ CAML_LCDW_MODE | CAMU_LCD_MODE | CAMU_WLCD_MODE |
+ CAML_LCD_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_keyboard = {
+ .name = "keyboard",
+ .modes = pmx_keyboard_modes,
+ .mode_count = ARRAY_SIZE(pmx_keyboard_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_clcd_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK ,
+ }, {
+ .ids = HEND_IP_PHONE_MODE | HEND_WIFI_PHONE_MODE |
+ CAMU_LCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_clcd = {
+ .name = "clcd",
+ .modes = pmx_clcd_modes,
+ .mode_count = ARRAY_SIZE(pmx_clcd_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_gpio_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE | CAMU_LCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_MII_MASK,
+ }, {
+ .ids = LEND_IP_PHONE_MODE | LEND_WIFI_PHONE_MODE,
+ .mask = PMX_MII_MASK | PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK,
+ }, {
+ .ids = ATA_PABX_I2S_MODE | CAML_LCDW_MODE | CAMU_WLCD_MODE,
+ .mask = PMX_MII_MASK | PMX_TIMER_3_4_MASK,
+ }, {
+ .ids = HEND_IP_PHONE_MODE | HEND_WIFI_PHONE_MODE,
+ .mask = PMX_MII_MASK | PMX_TIMER_1_2_MASK,
+ }, {
+ .ids = ATA_PABX_WI2S_MODE,
+ .mask = PMX_MII_MASK | PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK
+ | PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_gpio = {
+ .name = "telecom_gpio",
+ .modes = pmx_telecom_gpio_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_gpio_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_tdm_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE | LEND_IP_PHONE_MODE |
+ HEND_IP_PHONE_MODE | LEND_WIFI_PHONE_MODE
+ | HEND_WIFI_PHONE_MODE | ATA_PABX_WI2S_MODE
+ | ATA_PABX_I2S_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE
+ | CAMU_WLCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_UART0_MODEM_MASK | PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_tdm = {
+ .name = "telecom_tdm",
+ .modes = pmx_telecom_tdm_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_tdm_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_spi_cs_i2c_clk_modes[] = {
+ {
+ .ids = LEND_IP_PHONE_MODE | HEND_IP_PHONE_MODE |
+ LEND_WIFI_PHONE_MODE | HEND_WIFI_PHONE_MODE
+ | ATA_PABX_WI2S_MODE | ATA_PABX_I2S_MODE |
+ CAML_LCDW_MODE | CAML_LCD_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_spi_cs_i2c_clk = {
+ .name = "telecom_spi_cs_i2c_clk",
+ .modes = pmx_telecom_spi_cs_i2c_clk_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_spi_cs_i2c_clk_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_camera_modes[] = {
+ {
+ .ids = CAML_LCDW_MODE | CAML_LCD_MODE,
+ .mask = PMX_MII_MASK,
+ }, {
+ .ids = CAMU_LCD_MODE | CAMU_WLCD_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK | PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_camera = {
+ .name = "telecom_camera",
+ .modes = pmx_telecom_camera_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_camera_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_dac_modes[] = {
+ {
+ .ids = ATA_PABX_I2S_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE
+ | CAMU_WLCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_dac = {
+ .name = "telecom_dac",
+ .modes = pmx_telecom_dac_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_dac_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_i2s_modes[] = {
+ {
+ .ids = LEND_IP_PHONE_MODE | HEND_IP_PHONE_MODE
+ | LEND_WIFI_PHONE_MODE | HEND_WIFI_PHONE_MODE |
+ ATA_PABX_I2S_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE
+ | CAMU_WLCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_i2s = {
+ .name = "telecom_i2s",
+ .modes = pmx_telecom_i2s_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_i2s_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_boot_pins_modes[] = {
+ {
+ .ids = NAND_MODE | NOR_MODE,
+ .mask = PMX_UART0_MODEM_MASK | PMX_TIMER_1_2_MASK |
+ PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_boot_pins = {
+ .name = "telecom_boot_pins",
+ .modes = pmx_telecom_boot_pins_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_boot_pins_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_sdio_4bit_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE | LEND_IP_PHONE_MODE |
+ HEND_IP_PHONE_MODE | LEND_WIFI_PHONE_MODE |
+ HEND_WIFI_PHONE_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE |
+ CAMU_WLCD_MODE | CAML_LCD_MODE | ATA_PABX_WI2S_MODE |
+ ATA_PABX_I2S_MODE,
+ .mask = PMX_GPIO_PIN0_MASK | PMX_GPIO_PIN1_MASK |
+ PMX_GPIO_PIN2_MASK | PMX_GPIO_PIN3_MASK |
+ PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_sdio_4bit = {
+ .name = "telecom_sdio_4bit",
+ .modes = pmx_telecom_sdio_4bit_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_sdio_4bit_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_sdio_8bit_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE | LEND_IP_PHONE_MODE |
+ HEND_IP_PHONE_MODE | LEND_WIFI_PHONE_MODE |
+ HEND_WIFI_PHONE_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE |
+ CAMU_WLCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_GPIO_PIN0_MASK | PMX_GPIO_PIN1_MASK |
+ PMX_GPIO_PIN2_MASK | PMX_GPIO_PIN3_MASK |
+ PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK | PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_sdio_8bit = {
+ .name = "telecom_sdio_8bit",
+ .modes = pmx_telecom_sdio_8bit_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_sdio_8bit_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_gpio1_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE,
+ .mask = PMX_UART0_MODEM_MASK | PMX_TIMER_1_2_MASK |
+ PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio1 = {
+ .name = "arm gpio1",
+ .modes = pmx_gpio1_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio1_modes),
+ .enb_on_reset = 1,
+};
+
+/* pmx driver structure */
+struct pmx_driver pmx_driver = {
+ .mode_reg = {.offset = MODE_CONFIG_REG, .mask = 0x0000000f},
+ .mux_reg = {.offset = PAD_MUX_CONFIG_REG, .mask = 0x00007fff},
+};
+
+/* Add spear300 specific devices here */
+/* arm gpio1 device registeration */
+static struct pl061_platform_data gpio1_plat_data = {
+ .gpio_base = 8,
+ .irq_base = SPEAR_GPIO1_INT_BASE,
+};
+
+struct amba_device gpio1_device = {
+ .dev = {
+ .init_name = "gpio1",
+ .platform_data = &gpio1_plat_data,
+ },
+ .res = {
+ .start = SPEAR300_GPIO_BASE,
+ .end = SPEAR300_GPIO_BASE + SPEAR300_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {VIRQ_GPIO1, NO_IRQ},
+};
+
+/* spear3xx shared irq */
+struct shirq_dev_config shirq_ras1_config[] = {
+ {
+ .virq = VIRQ_IT_PERS_S,
+ .enb_mask = IT_PERS_S_IRQ_MASK,
+ .status_mask = IT_PERS_S_IRQ_MASK,
+ }, {
+ .virq = VIRQ_IT_CHANGE_S,
+ .enb_mask = IT_CHANGE_S_IRQ_MASK,
+ .status_mask = IT_CHANGE_S_IRQ_MASK,
+ }, {
+ .virq = VIRQ_I2S,
+ .enb_mask = I2S_IRQ_MASK,
+ .status_mask = I2S_IRQ_MASK,
+ }, {
+ .virq = VIRQ_TDM,
+ .enb_mask = TDM_IRQ_MASK,
+ .status_mask = TDM_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CAMERA_L,
+ .enb_mask = CAMERA_L_IRQ_MASK,
+ .status_mask = CAMERA_L_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CAMERA_F,
+ .enb_mask = CAMERA_F_IRQ_MASK,
+ .status_mask = CAMERA_F_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CAMERA_V,
+ .enb_mask = CAMERA_V_IRQ_MASK,
+ .status_mask = CAMERA_V_IRQ_MASK,
+ }, {
+ .virq = VIRQ_KEYBOARD,
+ .enb_mask = KEYBOARD_IRQ_MASK,
+ .status_mask = KEYBOARD_IRQ_MASK,
+ }, {
+ .virq = VIRQ_GPIO1,
+ .enb_mask = GPIO1_IRQ_MASK,
+ .status_mask = GPIO1_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras1 = {
+ .irq = IRQ_GEN_RAS_1,
+ .dev_config = shirq_ras1_config,
+ .dev_count = ARRAY_SIZE(shirq_ras1_config),
+ .regs = {
+ .enb_reg = INT_ENB_MASK_REG,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS1_MASK,
+ .clear_reg = -1,
+ },
+};
+
+/* spear300 routines */
+void __init spear300_init(void)
+{
+ int ret = 0;
+
+ /* call spear3xx family common init function */
+ spear3xx_init();
+
+ /* shared irq registeration */
+ shirq_ras1.regs.base =
+ ioremap(SPEAR300_TELECOM_BASE, SPEAR300_TELECOM_REG_SIZE);
+ if (shirq_ras1.regs.base) {
+ ret = spear_shirq_register(&shirq_ras1);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ\n");
+ }
+}
+
+void spear300_pmx_init(void)
+{
+ spear_pmx_init(&pmx_driver, SPEAR300_SOC_CONFIG_BASE,
+ SPEAR300_SOC_CONFIG_SIZE);
+}
diff --git a/arch/arm/mach-spear3xx/spear300_evb.c b/arch/arm/mach-spear3xx/spear300_evb.c
new file mode 100644
index 000000000000..bb21db152a23
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear300_evb.c
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/mach-spear3xx/spear300_evb.c
+ *
+ * SPEAr300 evaluation board source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* padmux devices to enable */
+static struct pmx_dev *pmx_devs[] = {
+ /* spear3xx specific devices */
+ &pmx_i2c,
+ &pmx_ssp_cs,
+ &pmx_ssp,
+ &pmx_mii,
+ &pmx_uart0,
+
+ /* spear300 specific devices */
+ &pmx_fsmc_2_chips,
+ &pmx_clcd,
+ &pmx_telecom_sdio_4bit,
+ &pmx_gpio1,
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+ /* spear3xx specific devices */
+ &gpio_device,
+ &uart_device,
+
+ /* spear300 specific devices */
+ &gpio1_device,
+};
+
+static struct platform_device *plat_devs[] __initdata = {
+ /* spear3xx specific devices */
+
+ /* spear300 specific devices */
+};
+
+static void __init spear300_evb_init(void)
+{
+ unsigned int i;
+
+ /* call spear300 machine init function */
+ spear300_init();
+
+ /* padmux initialization */
+ pmx_driver.mode = &photo_frame_mode;
+ pmx_driver.devs = pmx_devs;
+ pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
+ spear300_pmx_init();
+
+ /* Add Platform Devices */
+ platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
+
+ /* Add Amba Devices */
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+}
+
+MACHINE_START(SPEAR300, "ST-SPEAR300-EVB")
+ .boot_params = 0x00000100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
+ .timer = &spear_sys_timer,
+ .init_machine = spear300_evb_init,
+MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
new file mode 100644
index 000000000000..96a1ab824bac
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -0,0 +1,302 @@
+/*
+ * arch/arm/mach-spear3xx/spear310.c
+ *
+ * SPEAr310 machine source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h>
+#include <asm/irq.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+#include <plat/shirq.h>
+
+/* pad multiplexing support */
+/* muxing registers */
+#define PAD_MUX_CONFIG_REG 0x08
+
+/* devices */
+struct pmx_dev_mode pmx_emi_cs_0_1_4_5_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_emi_cs_0_1_4_5 = {
+ .name = "emi_cs_0_1_4_5",
+ .modes = pmx_emi_cs_0_1_4_5_modes,
+ .mode_count = ARRAY_SIZE(pmx_emi_cs_0_1_4_5_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_emi_cs_2_3_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_emi_cs_2_3 = {
+ .name = "emi_cs_2_3",
+ .modes = pmx_emi_cs_2_3_modes,
+ .mode_count = ARRAY_SIZE(pmx_emi_cs_2_3_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart1_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart1 = {
+ .name = "uart1",
+ .modes = pmx_uart1_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart2_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart2 = {
+ .name = "uart2",
+ .modes = pmx_uart2_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart2_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart3_4_5_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart3_4_5 = {
+ .name = "uart3_4_5",
+ .modes = pmx_uart3_4_5_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart3_4_5_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_fsmc_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_fsmc = {
+ .name = "fsmc",
+ .modes = pmx_fsmc_modes,
+ .mode_count = ARRAY_SIZE(pmx_fsmc_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_rs485_0_1_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_rs485_0_1 = {
+ .name = "rs485_0_1",
+ .modes = pmx_rs485_0_1_modes,
+ .mode_count = ARRAY_SIZE(pmx_rs485_0_1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_tdm0_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_tdm0 = {
+ .name = "tdm0",
+ .modes = pmx_tdm0_modes,
+ .mode_count = ARRAY_SIZE(pmx_tdm0_modes),
+ .enb_on_reset = 1,
+};
+
+/* pmx driver structure */
+struct pmx_driver pmx_driver = {
+ .mux_reg = {.offset = PAD_MUX_CONFIG_REG, .mask = 0x00007fff},
+};
+
+/* Add spear310 specific devices here */
+
+/* spear3xx shared irq */
+struct shirq_dev_config shirq_ras1_config[] = {
+ {
+ .virq = VIRQ_SMII0,
+ .status_mask = SMII0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SMII1,
+ .status_mask = SMII1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SMII2,
+ .status_mask = SMII2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SMII3,
+ .status_mask = SMII3_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII0,
+ .status_mask = WAKEUP_SMII0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII1,
+ .status_mask = WAKEUP_SMII1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII2,
+ .status_mask = WAKEUP_SMII2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII3,
+ .status_mask = WAKEUP_SMII3_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras1 = {
+ .irq = IRQ_GEN_RAS_1,
+ .dev_config = shirq_ras1_config,
+ .dev_count = ARRAY_SIZE(shirq_ras1_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS1_MASK,
+ .clear_reg = -1,
+ },
+};
+
+struct shirq_dev_config shirq_ras2_config[] = {
+ {
+ .virq = VIRQ_UART1,
+ .status_mask = UART1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART2,
+ .status_mask = UART2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART3,
+ .status_mask = UART3_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART4,
+ .status_mask = UART4_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART5,
+ .status_mask = UART5_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras2 = {
+ .irq = IRQ_GEN_RAS_2,
+ .dev_config = shirq_ras2_config,
+ .dev_count = ARRAY_SIZE(shirq_ras2_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS2_MASK,
+ .clear_reg = -1,
+ },
+};
+
+struct shirq_dev_config shirq_ras3_config[] = {
+ {
+ .virq = VIRQ_EMI,
+ .status_mask = EMI_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras3 = {
+ .irq = IRQ_GEN_RAS_3,
+ .dev_config = shirq_ras3_config,
+ .dev_count = ARRAY_SIZE(shirq_ras3_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS3_MASK,
+ .clear_reg = -1,
+ },
+};
+
+struct shirq_dev_config shirq_intrcomm_ras_config[] = {
+ {
+ .virq = VIRQ_TDM_HDLC,
+ .status_mask = TDM_HDLC_IRQ_MASK,
+ }, {
+ .virq = VIRQ_RS485_0,
+ .status_mask = RS485_0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_RS485_1,
+ .status_mask = RS485_1_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_intrcomm_ras = {
+ .irq = IRQ_INTRCOMM_RAS_ARM,
+ .dev_config = shirq_intrcomm_ras_config,
+ .dev_count = ARRAY_SIZE(shirq_intrcomm_ras_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_INTRCOMM_RAS_MASK,
+ .clear_reg = -1,
+ },
+};
+
+/* spear310 routines */
+void __init spear310_init(void)
+{
+ void __iomem *base;
+ int ret = 0;
+
+ /* call spear3xx family common init function */
+ spear3xx_init();
+
+ /* shared irq registeration */
+ base = ioremap(SPEAR310_SOC_CONFIG_BASE, SPEAR310_SOC_CONFIG_SIZE);
+ if (base) {
+ /* shirq 1 */
+ shirq_ras1.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras1);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 1\n");
+
+ /* shirq 2 */
+ shirq_ras2.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras2);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 2\n");
+
+ /* shirq 3 */
+ shirq_ras3.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras3);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 3\n");
+
+ /* shirq 4 */
+ shirq_intrcomm_ras.regs.base = base;
+ ret = spear_shirq_register(&shirq_intrcomm_ras);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 4\n");
+ }
+}
+
+void spear310_pmx_init(void)
+{
+ spear_pmx_init(&pmx_driver, SPEAR310_SOC_CONFIG_BASE,
+ SPEAR310_SOC_CONFIG_SIZE);
+}
diff --git a/arch/arm/mach-spear3xx/spear310_evb.c b/arch/arm/mach-spear3xx/spear310_evb.c
new file mode 100644
index 000000000000..7facf6643199
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear310_evb.c
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/mach-spear3xx/spear310_evb.c
+ *
+ * SPEAr310 evaluation board source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* padmux devices to enable */
+static struct pmx_dev *pmx_devs[] = {
+ /* spear3xx specific devices */
+ &pmx_i2c,
+ &pmx_ssp,
+ &pmx_gpio_pin0,
+ &pmx_gpio_pin1,
+ &pmx_gpio_pin2,
+ &pmx_gpio_pin3,
+ &pmx_gpio_pin4,
+ &pmx_gpio_pin5,
+ &pmx_uart0,
+
+ /* spear310 specific devices */
+ &pmx_emi_cs_0_1_4_5,
+ &pmx_emi_cs_2_3,
+ &pmx_uart1,
+ &pmx_uart2,
+ &pmx_uart3_4_5,
+ &pmx_fsmc,
+ &pmx_rs485_0_1,
+ &pmx_tdm0,
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+ /* spear3xx specific devices */
+ &gpio_device,
+ &uart_device,
+
+ /* spear310 specific devices */
+};
+
+static struct platform_device *plat_devs[] __initdata = {
+ /* spear3xx specific devices */
+
+ /* spear310 specific devices */
+};
+
+static void __init spear310_evb_init(void)
+{
+ unsigned int i;
+
+ /* call spear310 machine init function */
+ spear310_init();
+
+ /* padmux initialization */
+ pmx_driver.mode = NULL;
+ pmx_driver.devs = pmx_devs;
+ pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
+ spear310_pmx_init();
+
+ /* Add Platform Devices */
+ platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
+
+ /* Add Amba Devices */
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+}
+
+MACHINE_START(SPEAR310, "ST-SPEAR310-EVB")
+ .boot_params = 0x00000100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
+ .timer = &spear_sys_timer,
+ .init_machine = spear310_evb_init,
+MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
new file mode 100644
index 000000000000..6a1219549369
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -0,0 +1,549 @@
+/*
+ * arch/arm/mach-spear3xx/spear320.c
+ *
+ * SPEAr320 machine source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h>
+#include <asm/irq.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+#include <plat/shirq.h>
+
+/* pad multiplexing support */
+/* muxing registers */
+#define PAD_MUX_CONFIG_REG 0x0C
+#define MODE_CONFIG_REG 0x10
+
+/* modes */
+#define AUTO_NET_SMII_MODE (1 << 0)
+#define AUTO_NET_MII_MODE (1 << 1)
+#define AUTO_EXP_MODE (1 << 2)
+#define SMALL_PRINTERS_MODE (1 << 3)
+#define ALL_MODES 0xF
+
+struct pmx_mode auto_net_smii_mode = {
+ .id = AUTO_NET_SMII_MODE,
+ .name = "Automation Networking SMII Mode",
+ .mask = 0x00,
+};
+
+struct pmx_mode auto_net_mii_mode = {
+ .id = AUTO_NET_MII_MODE,
+ .name = "Automation Networking MII Mode",
+ .mask = 0x01,
+};
+
+struct pmx_mode auto_exp_mode = {
+ .id = AUTO_EXP_MODE,
+ .name = "Automation Expanded Mode",
+ .mask = 0x02,
+};
+
+struct pmx_mode small_printers_mode = {
+ .id = SMALL_PRINTERS_MODE,
+ .name = "Small Printers Mode",
+ .mask = 0x03,
+};
+
+/* devices */
+struct pmx_dev_mode pmx_clcd_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_clcd = {
+ .name = "clcd",
+ .modes = pmx_clcd_modes,
+ .mode_count = ARRAY_SIZE(pmx_clcd_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_emi_modes[] = {
+ {
+ .ids = AUTO_EXP_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_emi = {
+ .name = "emi",
+ .modes = pmx_emi_modes,
+ .mode_count = ARRAY_SIZE(pmx_emi_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_fsmc_modes[] = {
+ {
+ .ids = ALL_MODES,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_fsmc = {
+ .name = "fsmc",
+ .modes = pmx_fsmc_modes,
+ .mode_count = ARRAY_SIZE(pmx_fsmc_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_spp_modes[] = {
+ {
+ .ids = SMALL_PRINTERS_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_spp = {
+ .name = "spp",
+ .modes = pmx_spp_modes,
+ .mode_count = ARRAY_SIZE(pmx_spp_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_sdio_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE |
+ SMALL_PRINTERS_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_sdio = {
+ .name = "sdio",
+ .modes = pmx_sdio_modes,
+ .mode_count = ARRAY_SIZE(pmx_sdio_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_i2s_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_i2s = {
+ .name = "i2s",
+ .modes = pmx_i2s_modes,
+ .mode_count = ARRAY_SIZE(pmx_i2s_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart1_modes[] = {
+ {
+ .ids = ALL_MODES,
+ .mask = PMX_GPIO_PIN0_MASK | PMX_GPIO_PIN1_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart1 = {
+ .name = "uart1",
+ .modes = pmx_uart1_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart1_modem_modes[] = {
+ {
+ .ids = AUTO_EXP_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK |
+ PMX_SSP_CS_MASK,
+ }, {
+ .ids = SMALL_PRINTERS_MODE,
+ .mask = PMX_GPIO_PIN3_MASK | PMX_GPIO_PIN4_MASK |
+ PMX_GPIO_PIN5_MASK | PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart1_modem = {
+ .name = "uart1_modem",
+ .modes = pmx_uart1_modem_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart1_modem_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart2_modes[] = {
+ {
+ .ids = ALL_MODES,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart2 = {
+ .name = "uart2",
+ .modes = pmx_uart2_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart2_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_touchscreen_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_touchscreen = {
+ .name = "touchscreen",
+ .modes = pmx_touchscreen_modes,
+ .mode_count = ARRAY_SIZE(pmx_touchscreen_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_can_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | AUTO_EXP_MODE,
+ .mask = PMX_GPIO_PIN2_MASK | PMX_GPIO_PIN3_MASK |
+ PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK,
+ },
+};
+
+struct pmx_dev pmx_can = {
+ .name = "can",
+ .modes = pmx_can_modes,
+ .mode_count = ARRAY_SIZE(pmx_can_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_sdio_led_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_sdio_led = {
+ .name = "sdio_led",
+ .modes = pmx_sdio_led_modes,
+ .mode_count = ARRAY_SIZE(pmx_sdio_led_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_pwm0_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_UART0_MODEM_MASK,
+ }, {
+ .ids = AUTO_EXP_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_pwm0 = {
+ .name = "pwm0",
+ .modes = pmx_pwm0_modes,
+ .mode_count = ARRAY_SIZE(pmx_pwm0_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_pwm1_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_UART0_MODEM_MASK,
+ }, {
+ .ids = AUTO_EXP_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_pwm1 = {
+ .name = "pwm1",
+ .modes = pmx_pwm1_modes,
+ .mode_count = ARRAY_SIZE(pmx_pwm1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_pwm2_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_SSP_CS_MASK,
+ }, {
+ .ids = AUTO_EXP_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_pwm2 = {
+ .name = "pwm2",
+ .modes = pmx_pwm2_modes,
+ .mode_count = ARRAY_SIZE(pmx_pwm2_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_pwm3_modes[] = {
+ {
+ .ids = AUTO_EXP_MODE | SMALL_PRINTERS_MODE | AUTO_NET_SMII_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_pwm3 = {
+ .name = "pwm3",
+ .modes = pmx_pwm3_modes,
+ .mode_count = ARRAY_SIZE(pmx_pwm3_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_ssp1_modes[] = {
+ {
+ .ids = SMALL_PRINTERS_MODE | AUTO_NET_SMII_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_ssp1 = {
+ .name = "ssp1",
+ .modes = pmx_ssp1_modes,
+ .mode_count = ARRAY_SIZE(pmx_ssp1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_ssp2_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_ssp2 = {
+ .name = "ssp2",
+ .modes = pmx_ssp2_modes,
+ .mode_count = ARRAY_SIZE(pmx_ssp2_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_mii1_modes[] = {
+ {
+ .ids = AUTO_NET_MII_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_mii1 = {
+ .name = "mii1",
+ .modes = pmx_mii1_modes,
+ .mode_count = ARRAY_SIZE(pmx_mii1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_smii0_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_EXP_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_smii0 = {
+ .name = "smii0",
+ .modes = pmx_smii0_modes,
+ .mode_count = ARRAY_SIZE(pmx_smii0_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_smii1_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_smii1 = {
+ .name = "smii1",
+ .modes = pmx_smii1_modes,
+ .mode_count = ARRAY_SIZE(pmx_smii1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_i2c1_modes[] = {
+ {
+ .ids = AUTO_EXP_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_i2c1 = {
+ .name = "i2c1",
+ .modes = pmx_i2c1_modes,
+ .mode_count = ARRAY_SIZE(pmx_i2c1_modes),
+ .enb_on_reset = 1,
+};
+
+/* pmx driver structure */
+struct pmx_driver pmx_driver = {
+ .mode_reg = {.offset = MODE_CONFIG_REG, .mask = 0x00000007},
+ .mux_reg = {.offset = PAD_MUX_CONFIG_REG, .mask = 0x00007fff},
+};
+
+/* Add spear320 specific devices here */
+
+/* spear3xx shared irq */
+struct shirq_dev_config shirq_ras1_config[] = {
+ {
+ .virq = VIRQ_EMI,
+ .status_mask = EMI_IRQ_MASK,
+ .clear_mask = EMI_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CLCD,
+ .status_mask = CLCD_IRQ_MASK,
+ .clear_mask = CLCD_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SPP,
+ .status_mask = SPP_IRQ_MASK,
+ .clear_mask = SPP_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras1 = {
+ .irq = IRQ_GEN_RAS_1,
+ .dev_config = shirq_ras1_config,
+ .dev_count = ARRAY_SIZE(shirq_ras1_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS1_MASK,
+ .clear_reg = INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+struct shirq_dev_config shirq_ras3_config[] = {
+ {
+ .virq = VIRQ_PLGPIO,
+ .enb_mask = GPIO_IRQ_MASK,
+ .status_mask = GPIO_IRQ_MASK,
+ .clear_mask = GPIO_IRQ_MASK,
+ }, {
+ .virq = VIRQ_I2S_PLAY,
+ .enb_mask = I2S_PLAY_IRQ_MASK,
+ .status_mask = I2S_PLAY_IRQ_MASK,
+ .clear_mask = I2S_PLAY_IRQ_MASK,
+ }, {
+ .virq = VIRQ_I2S_REC,
+ .enb_mask = I2S_REC_IRQ_MASK,
+ .status_mask = I2S_REC_IRQ_MASK,
+ .clear_mask = I2S_REC_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras3 = {
+ .irq = IRQ_GEN_RAS_3,
+ .dev_config = shirq_ras3_config,
+ .dev_count = ARRAY_SIZE(shirq_ras3_config),
+ .regs = {
+ .enb_reg = INT_ENB_MASK_REG,
+ .reset_to_enb = 1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS3_MASK,
+ .clear_reg = INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+struct shirq_dev_config shirq_intrcomm_ras_config[] = {
+ {
+ .virq = VIRQ_CANU,
+ .status_mask = CAN_U_IRQ_MASK,
+ .clear_mask = CAN_U_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CANL,
+ .status_mask = CAN_L_IRQ_MASK,
+ .clear_mask = CAN_L_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART1,
+ .status_mask = UART1_IRQ_MASK,
+ .clear_mask = UART1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART2,
+ .status_mask = UART2_IRQ_MASK,
+ .clear_mask = UART2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SSP1,
+ .status_mask = SSP1_IRQ_MASK,
+ .clear_mask = SSP1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SSP2,
+ .status_mask = SSP2_IRQ_MASK,
+ .clear_mask = SSP2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SMII0,
+ .status_mask = SMII0_IRQ_MASK,
+ .clear_mask = SMII0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_MII1_SMII1,
+ .status_mask = MII1_SMII1_IRQ_MASK,
+ .clear_mask = MII1_SMII1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII0,
+ .status_mask = WAKEUP_SMII0_IRQ_MASK,
+ .clear_mask = WAKEUP_SMII0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_MII1_SMII1,
+ .status_mask = WAKEUP_MII1_SMII1_IRQ_MASK,
+ .clear_mask = WAKEUP_MII1_SMII1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_I2C,
+ .status_mask = I2C1_IRQ_MASK,
+ .clear_mask = I2C1_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_intrcomm_ras = {
+ .irq = IRQ_INTRCOMM_RAS_ARM,
+ .dev_config = shirq_intrcomm_ras_config,
+ .dev_count = ARRAY_SIZE(shirq_intrcomm_ras_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_INTRCOMM_RAS_MASK,
+ .clear_reg = INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+/* spear320 routines */
+void __init spear320_init(void)
+{
+ void __iomem *base;
+ int ret = 0;
+
+ /* call spear3xx family common init function */
+ spear3xx_init();
+
+ /* shared irq registeration */
+ base = ioremap(SPEAR320_SOC_CONFIG_BASE, SPEAR320_SOC_CONFIG_SIZE);
+ if (base) {
+ /* shirq 1 */
+ shirq_ras1.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras1);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 1\n");
+
+ /* shirq 3 */
+ shirq_ras3.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras3);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 3\n");
+
+ /* shirq 4 */
+ shirq_intrcomm_ras.regs.base = base;
+ ret = spear_shirq_register(&shirq_intrcomm_ras);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 4\n");
+ }
+}
+
+void spear320_pmx_init(void)
+{
+ spear_pmx_init(&pmx_driver, SPEAR320_SOC_CONFIG_BASE,
+ SPEAR320_SOC_CONFIG_SIZE);
+}
diff --git a/arch/arm/mach-spear3xx/spear320_evb.c b/arch/arm/mach-spear3xx/spear320_evb.c
new file mode 100644
index 000000000000..62ac685a4135
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear320_evb.c
@@ -0,0 +1,81 @@
+/*
+ * arch/arm/mach-spear3xx/spear320_evb.c
+ *
+ * SPEAr320 evaluation board source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* padmux devices to enable */
+static struct pmx_dev *pmx_devs[] = {
+ /* spear3xx specific devices */
+ &pmx_i2c,
+ &pmx_ssp,
+ &pmx_mii,
+ &pmx_uart0,
+
+ /* spear320 specific devices */
+ &pmx_fsmc,
+ &pmx_sdio,
+ &pmx_i2s,
+ &pmx_uart1,
+ &pmx_uart2,
+ &pmx_can,
+ &pmx_pwm0,
+ &pmx_pwm1,
+ &pmx_pwm2,
+ &pmx_mii1,
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+ /* spear3xx specific devices */
+ &gpio_device,
+ &uart_device,
+
+ /* spear320 specific devices */
+};
+
+static struct platform_device *plat_devs[] __initdata = {
+ /* spear3xx specific devices */
+
+ /* spear320 specific devices */
+};
+
+static void __init spear320_evb_init(void)
+{
+ unsigned int i;
+
+ /* call spear320 machine init function */
+ spear320_init();
+
+ /* padmux initialization */
+ pmx_driver.mode = &auto_net_mii_mode;
+ pmx_driver.devs = pmx_devs;
+ pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
+ spear320_pmx_init();
+
+ /* Add Platform Devices */
+ platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
+
+ /* Add Amba Devices */
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+}
+
+MACHINE_START(SPEAR320, "ST-SPEAR320-EVB")
+ .boot_params = 0x00000100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
+ .timer = &spear_sys_timer,
+ .init_machine = spear320_evb_init,
+MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
new file mode 100644
index 000000000000..e87313aeae20
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -0,0 +1,548 @@
+/*
+ * arch/arm/mach-spear3xx/spear3xx.c
+ *
+ * SPEAr3XX machines common source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/types.h>
+#include <linux/amba/pl061.h>
+#include <linux/ptrace.h>
+#include <linux/io.h>
+#include <asm/hardware/vic.h>
+#include <asm/irq.h>
+#include <asm/mach/arch.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Add spear3xx machines common devices here */
+/* gpio device registeration */
+static struct pl061_platform_data gpio_plat_data = {
+ .gpio_base = 0,
+ .irq_base = SPEAR_GPIO_INT_BASE,
+};
+
+struct amba_device gpio_device = {
+ .dev = {
+ .init_name = "gpio",
+ .platform_data = &gpio_plat_data,
+ },
+ .res = {
+ .start = SPEAR3XX_ICM3_GPIO_BASE,
+ .end = SPEAR3XX_ICM3_GPIO_BASE + SPEAR3XX_ICM3_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_BASIC_GPIO, NO_IRQ},
+};
+
+/* uart device registeration */
+struct amba_device uart_device = {
+ .dev = {
+ .init_name = "uart",
+ },
+ .res = {
+ .start = SPEAR3XX_ICM1_UART_BASE,
+ .end = SPEAR3XX_ICM1_UART_BASE + SPEAR3XX_ICM1_UART_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_UART, NO_IRQ},
+};
+
+/* Do spear3xx familiy common initialization part here */
+void __init spear3xx_init(void)
+{
+ /* nothing to do for now */
+}
+
+/* This will initialize vic */
+void __init spear3xx_init_irq(void)
+{
+ vic_init((void __iomem *)VA_SPEAR3XX_ML1_VIC_BASE, 0, ~0, 0);
+}
+
+/* Following will create static virtual/physical mappings */
+struct map_desc spear3xx_io_desc[] __initdata = {
+ {
+ .virtual = VA_SPEAR3XX_ICM1_UART_BASE,
+ .pfn = __phys_to_pfn(SPEAR3XX_ICM1_UART_BASE),
+ .length = SPEAR3XX_ICM1_UART_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR3XX_ML1_VIC_BASE,
+ .pfn = __phys_to_pfn(SPEAR3XX_ML1_VIC_BASE),
+ .length = SPEAR3XX_ML1_VIC_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR3XX_ICM3_SYS_CTRL_BASE,
+ .pfn = __phys_to_pfn(SPEAR3XX_ICM3_SYS_CTRL_BASE),
+ .length = SPEAR3XX_ICM3_SYS_CTRL_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR3XX_ICM3_MISC_REG_BASE,
+ .pfn = __phys_to_pfn(SPEAR3XX_ICM3_MISC_REG_BASE),
+ .length = SPEAR3XX_ICM3_MISC_REG_SIZE,
+ .type = MT_DEVICE
+ },
+};
+
+/* This will create static memory mapping for selected devices */
+void __init spear3xx_map_io(void)
+{
+ iotable_init(spear3xx_io_desc, ARRAY_SIZE(spear3xx_io_desc));
+
+ /* This will initialize clock framework */
+ clk_init();
+}
+
+/* pad multiplexing support */
+/* devices */
+struct pmx_dev_mode pmx_firda_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_firda = {
+ .name = "firda",
+ .modes = pmx_firda_modes,
+ .mode_count = ARRAY_SIZE(pmx_firda_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_i2c_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_I2C_MASK,
+ },
+};
+
+struct pmx_dev pmx_i2c = {
+ .name = "i2c",
+ .modes = pmx_i2c_modes,
+ .mode_count = ARRAY_SIZE(pmx_i2c_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_ssp_cs_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_ssp_cs = {
+ .name = "ssp_chip_selects",
+ .modes = pmx_ssp_cs_modes,
+ .mode_count = ARRAY_SIZE(pmx_ssp_cs_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_ssp_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_SSP_MASK,
+ },
+};
+
+struct pmx_dev pmx_ssp = {
+ .name = "ssp",
+ .modes = pmx_ssp_modes,
+ .mode_count = ARRAY_SIZE(pmx_ssp_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_mii_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_mii = {
+ .name = "mii",
+ .modes = pmx_mii_modes,
+ .mode_count = ARRAY_SIZE(pmx_mii_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin0_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN0_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin0 = {
+ .name = "gpio_pin0",
+ .modes = pmx_gpio_pin0_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin0_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin1_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN1_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin1 = {
+ .name = "gpio_pin1",
+ .modes = pmx_gpio_pin1_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin1_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin2_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN2_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin2 = {
+ .name = "gpio_pin2",
+ .modes = pmx_gpio_pin2_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin2_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin3_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN3_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin3 = {
+ .name = "gpio_pin3",
+ .modes = pmx_gpio_pin3_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin3_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin4_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN4_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin4 = {
+ .name = "gpio_pin4",
+ .modes = pmx_gpio_pin4_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin4_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin5_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN5_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin5 = {
+ .name = "gpio_pin5",
+ .modes = pmx_gpio_pin5_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin5_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_uart0_modem_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart0_modem = {
+ .name = "uart0_modem",
+ .modes = pmx_uart0_modem_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart0_modem_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_uart0_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_UART0_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart0 = {
+ .name = "uart0",
+ .modes = pmx_uart0_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart0_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_timer_3_4_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_timer_3_4 = {
+ .name = "timer_3_4",
+ .modes = pmx_timer_3_4_modes,
+ .mode_count = ARRAY_SIZE(pmx_timer_3_4_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_timer_1_2_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_timer_1_2 = {
+ .name = "timer_1_2",
+ .modes = pmx_timer_1_2_modes,
+ .mode_count = ARRAY_SIZE(pmx_timer_1_2_modes),
+ .enb_on_reset = 0,
+};
+
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+/* plgpios devices */
+struct pmx_dev_mode pmx_plgpio_0_1_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_0_1 = {
+ .name = "plgpio 0 and 1",
+ .modes = pmx_plgpio_0_1_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_0_1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_2_3_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_UART0_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_2_3 = {
+ .name = "plgpio 2 and 3",
+ .modes = pmx_plgpio_2_3_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_2_3_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_4_5_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_I2C_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_4_5 = {
+ .name = "plgpio 4 and 5",
+ .modes = pmx_plgpio_4_5_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_4_5_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_6_9_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_SSP_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_6_9 = {
+ .name = "plgpio 6 to 9",
+ .modes = pmx_plgpio_6_9_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_6_9_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_10_27_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_10_27 = {
+ .name = "plgpio 10 to 27",
+ .modes = pmx_plgpio_10_27_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_10_27_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_28_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN0_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_28 = {
+ .name = "plgpio 28",
+ .modes = pmx_plgpio_28_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_28_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_29_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN1_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_29 = {
+ .name = "plgpio 29",
+ .modes = pmx_plgpio_29_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_29_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_30_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN2_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_30 = {
+ .name = "plgpio 30",
+ .modes = pmx_plgpio_30_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_30_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_31_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN3_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_31 = {
+ .name = "plgpio 31",
+ .modes = pmx_plgpio_31_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_31_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_32_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN4_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_32 = {
+ .name = "plgpio 32",
+ .modes = pmx_plgpio_32_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_32_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_33_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN5_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_33 = {
+ .name = "plgpio 33",
+ .modes = pmx_plgpio_33_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_33_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_34_36_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_34_36 = {
+ .name = "plgpio 34 to 36",
+ .modes = pmx_plgpio_34_36_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_34_36_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_37_42_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_37_42 = {
+ .name = "plgpio 37 to 42",
+ .modes = pmx_plgpio_37_42_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_37_42_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_43_44_47_48_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_43_44_47_48 = {
+ .name = "plgpio 43, 44, 47 and 48",
+ .modes = pmx_plgpio_43_44_47_48_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_43_44_47_48_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_45_46_49_50_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_45_46_49_50 = {
+ .name = "plgpio 45, 46, 49 and 50",
+ .modes = pmx_plgpio_45_46_49_50_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_45_46_49_50_modes),
+ .enb_on_reset = 1,
+};
+
+#endif
+
+/* spear padmux initialization function */
+void spear_pmx_init(struct pmx_driver *pmx_driver, uint base, uint size)
+{
+ int ret = 0;
+
+ /* pad mux initialization */
+ pmx_driver->base = ioremap(base, size);
+ if (!pmx_driver->base) {
+ ret = -ENOMEM;
+ goto pmx_fail;
+ }
+
+ ret = pmx_register(pmx_driver);
+ iounmap(pmx_driver->base);
+
+pmx_fail:
+ if (ret)
+ printk(KERN_ERR "padmux: registeration failed. err no: %d\n",
+ ret);
+}
diff --git a/arch/arm/mach-spear6xx/Kconfig b/arch/arm/mach-spear6xx/Kconfig
new file mode 100644
index 000000000000..bddba034f862
--- /dev/null
+++ b/arch/arm/mach-spear6xx/Kconfig
@@ -0,0 +1,20 @@
+#
+# SPEAr6XX Machine configuration file
+#
+
+if ARCH_SPEAR6XX
+
+choice
+ prompt "SPEAr6XX Family"
+ default MACH_SPEAR600
+
+config MACH_SPEAR600
+ bool "SPEAr600"
+ help
+ Supports ST SPEAr600 Machine
+endchoice
+
+# Adding SPEAr6XX machine specific configuration files
+source "arch/arm/mach-spear6xx/Kconfig600"
+
+endif #ARCH_SPEAR6XX
diff --git a/arch/arm/mach-spear6xx/Kconfig600 b/arch/arm/mach-spear6xx/Kconfig600
new file mode 100644
index 000000000000..9e19f65eb78e
--- /dev/null
+++ b/arch/arm/mach-spear6xx/Kconfig600
@@ -0,0 +1,17 @@
+#
+# SPEAr600 machine configuration file
+#
+
+if MACH_SPEAR600
+
+choice
+ prompt "SPEAr600 Boards"
+ default BOARD_SPEAR600_EVB
+
+config BOARD_SPEAR600_EVB
+ bool "SPEAr600 Evaluation Board"
+ help
+ Supports ST SPEAr600 Evaluation Board
+endchoice
+
+endif #MACH_SPEAR600
diff --git a/arch/arm/mach-spear6xx/Makefile b/arch/arm/mach-spear6xx/Makefile
new file mode 100644
index 000000000000..cc1a4d82d459
--- /dev/null
+++ b/arch/arm/mach-spear6xx/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for SPEAr6XX machine series
+#
+
+# common files
+obj-y += clock.o spear6xx.o
+
+# spear600 specific files
+obj-$(CONFIG_MACH_SPEAR600) += spear600.o
+
+# spear600 boards files
+obj-$(CONFIG_BOARD_SPEAR600_EVB) += spear600_evb.o
diff --git a/arch/arm/mach-spear6xx/Makefile.boot b/arch/arm/mach-spear6xx/Makefile.boot
new file mode 100644
index 000000000000..7a1f3c0eadb8
--- /dev/null
+++ b/arch/arm/mach-spear6xx/Makefile.boot
@@ -0,0 +1,3 @@
+zreladdr-y := 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
new file mode 100644
index 000000000000..13e27c769685
--- /dev/null
+++ b/arch/arm/mach-spear6xx/clock.c
@@ -0,0 +1,483 @@
+/*
+ * arch/arm/mach-spear6xx/clock.c
+ *
+ * SPEAr6xx machines clock framework source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <mach/misc_regs.h>
+#include <plat/clock.h>
+
+/* root clks */
+/* 32 KHz oscillator clock */
+static struct clk osc_32k_clk = {
+ .flags = ALWAYS_ENABLED,
+ .rate = 32000,
+};
+
+/* 30 MHz oscillator clock */
+static struct clk osc_30m_clk = {
+ .flags = ALWAYS_ENABLED,
+ .rate = 30000000,
+};
+
+/* clock derived from 32 KHz osc clk */
+/* rtc clock */
+static struct clk rtc_clk = {
+ .pclk = &osc_32k_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = RTC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from 30 MHz osc clk */
+/* pll1 configuration structure */
+static struct pll_clk_config pll1_config = {
+ .mode_reg = PLL1_CTR,
+ .cfg_reg = PLL1_FRQ,
+};
+
+/* PLL1 clock */
+static struct clk pll1_clk = {
+ .pclk = &osc_30m_clk,
+ .en_reg = PLL1_CTR,
+ .en_reg_bit = PLL_ENABLE,
+ .recalc = &pll1_clk_recalc,
+ .private_data = &pll1_config,
+};
+
+/* PLL3 48 MHz clock */
+static struct clk pll3_48m_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &osc_30m_clk,
+ .rate = 48000000,
+};
+
+/* watch dog timer clock */
+static struct clk wdt_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &osc_30m_clk,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from pll1 clk */
+/* cpu clock */
+static struct clk cpu_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .recalc = &follow_parent,
+};
+
+/* ahb configuration structure */
+static struct bus_clk_config ahb_config = {
+ .reg = CORE_CLK_CFG,
+ .mask = PLL_HCLK_RATIO_MASK,
+ .shift = PLL_HCLK_RATIO_SHIFT,
+};
+
+/* ahb clock */
+static struct clk ahb_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .recalc = &bus_clk_recalc,
+ .private_data = &ahb_config,
+};
+
+/* uart parents */
+static struct pclk_info uart_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* uart parent select structure */
+static struct pclk_sel uart_pclk_sel = {
+ .pclk_info = uart_pclk_info,
+ .pclk_count = ARRAY_SIZE(uart_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = UART_CLK_MASK,
+};
+
+/* uart configurations */
+static struct aux_clk_config uart_config = {
+ .synth_reg = UART_CLK_SYNT,
+};
+
+/* uart0 clock */
+static struct clk uart0_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = UART0_CLK_ENB,
+ .pclk_sel = &uart_pclk_sel,
+ .pclk_sel_shift = UART_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &uart_config,
+};
+
+/* uart1 clock */
+static struct clk uart1_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = UART1_CLK_ENB,
+ .pclk_sel = &uart_pclk_sel,
+ .pclk_sel_shift = UART_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &uart_config,
+};
+
+/* firda configurations */
+static struct aux_clk_config firda_config = {
+ .synth_reg = FIRDA_CLK_SYNT,
+};
+
+/* firda parents */
+static struct pclk_info firda_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* firda parent select structure */
+static struct pclk_sel firda_pclk_sel = {
+ .pclk_info = firda_pclk_info,
+ .pclk_count = ARRAY_SIZE(firda_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = FIRDA_CLK_MASK,
+};
+
+/* firda clock */
+static struct clk firda_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = FIRDA_CLK_ENB,
+ .pclk_sel = &firda_pclk_sel,
+ .pclk_sel_shift = FIRDA_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &firda_config,
+};
+
+/* clcd configurations */
+static struct aux_clk_config clcd_config = {
+ .synth_reg = CLCD_CLK_SYNT,
+};
+
+/* clcd parents */
+static struct pclk_info clcd_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* clcd parent select structure */
+static struct pclk_sel clcd_pclk_sel = {
+ .pclk_info = clcd_pclk_info,
+ .pclk_count = ARRAY_SIZE(clcd_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = CLCD_CLK_MASK,
+};
+
+/* clcd clock */
+static struct clk clcd_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = CLCD_CLK_ENB,
+ .pclk_sel = &clcd_pclk_sel,
+ .pclk_sel_shift = CLCD_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &clcd_config,
+};
+
+/* gpt parents */
+static struct pclk_info gpt_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* gpt parent select structure */
+static struct pclk_sel gpt_pclk_sel = {
+ .pclk_info = gpt_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
+};
+
+/* gpt0_1 configurations */
+static struct aux_clk_config gpt0_1_config = {
+ .synth_reg = PRSC1_CLK_CFG,
+};
+
+/* gpt0 ARM1 subsystem timer clock */
+static struct clk gpt0_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT0_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt0_1_config,
+};
+
+/* gpt1 timer clock */
+static struct clk gpt1_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT1_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt0_1_config,
+};
+
+/* gpt2 configurations */
+static struct aux_clk_config gpt2_config = {
+ .synth_reg = PRSC2_CLK_CFG,
+};
+
+/* gpt2 timer clock */
+static struct clk gpt2_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPT2_CLK_ENB,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT2_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt2_config,
+};
+
+/* gpt3 configurations */
+static struct aux_clk_config gpt3_config = {
+ .synth_reg = PRSC3_CLK_CFG,
+};
+
+/* gpt3 timer clock */
+static struct clk gpt3_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPT3_CLK_ENB,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT3_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt3_config,
+};
+
+/* clock derived from pll3 clk */
+/* usbh0 clock */
+static struct clk usbh0_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBH0_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* usbh1 clock */
+static struct clk usbh1_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBH1_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* usbd clock */
+static struct clk usbd_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBD_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from ahb clk */
+/* apb configuration structure */
+static struct bus_clk_config apb_config = {
+ .reg = CORE_CLK_CFG,
+ .mask = HCLK_PCLK_RATIO_MASK,
+ .shift = HCLK_PCLK_RATIO_SHIFT,
+};
+
+/* apb clock */
+static struct clk apb_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &ahb_clk,
+ .recalc = &bus_clk_recalc,
+ .private_data = &apb_config,
+};
+
+/* i2c clock */
+static struct clk i2c_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = I2C_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* dma clock */
+static struct clk dma_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = DMA_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* jpeg clock */
+static struct clk jpeg_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = JPEG_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gmac clock */
+static struct clk gmac_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GMAC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* smi clock */
+static struct clk smi_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SMI_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* fsmc clock */
+static struct clk fsmc_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = FSMC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from apb clk */
+/* adc clock */
+static struct clk adc_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = ADC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* ssp0 clock */
+static struct clk ssp0_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SSP0_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* ssp1 clock */
+static struct clk ssp1_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SSP1_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* ssp2 clock */
+static struct clk ssp2_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SSP2_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gpio0 ARM subsystem clock */
+static struct clk gpio0_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* gpio1 clock */
+static struct clk gpio1_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPIO1_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gpio2 clock */
+static struct clk gpio2_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPIO2_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* array of all spear 6xx clock lookups */
+static struct clk_lookup spear_clk_lookups[] = {
+ /* root clks */
+ { .con_id = "osc_32k_clk", .clk = &osc_32k_clk},
+ { .con_id = "osc_30m_clk", .clk = &osc_30m_clk},
+ /* clock derived from 32 KHz os clk */
+ { .dev_id = "rtc", .clk = &rtc_clk},
+ /* clock derived from 30 MHz os clk */
+ { .con_id = "pll1_clk", .clk = &pll1_clk},
+ { .con_id = "pll3_48m_clk", .clk = &pll3_48m_clk},
+ { .dev_id = "wdt", .clk = &wdt_clk},
+ /* clock derived from pll1 clk */
+ { .con_id = "cpu_clk", .clk = &cpu_clk},
+ { .con_id = "ahb_clk", .clk = &ahb_clk},
+ { .dev_id = "uart0", .clk = &uart0_clk},
+ { .dev_id = "uart1", .clk = &uart1_clk},
+ { .dev_id = "firda", .clk = &firda_clk},
+ { .dev_id = "clcd", .clk = &clcd_clk},
+ { .dev_id = "gpt0", .clk = &gpt0_clk},
+ { .dev_id = "gpt1", .clk = &gpt1_clk},
+ { .dev_id = "gpt2", .clk = &gpt2_clk},
+ { .dev_id = "gpt3", .clk = &gpt3_clk},
+ /* clock derived from pll3 clk */
+ { .dev_id = "usbh0", .clk = &usbh0_clk},
+ { .dev_id = "usbh1", .clk = &usbh1_clk},
+ { .dev_id = "usbd", .clk = &usbd_clk},
+ /* clock derived from ahb clk */
+ { .con_id = "apb_clk", .clk = &apb_clk},
+ { .dev_id = "i2c", .clk = &i2c_clk},
+ { .dev_id = "dma", .clk = &dma_clk},
+ { .dev_id = "jpeg", .clk = &jpeg_clk},
+ { .dev_id = "gmac", .clk = &gmac_clk},
+ { .dev_id = "smi", .clk = &smi_clk},
+ { .dev_id = "fsmc", .clk = &fsmc_clk},
+ /* clock derived from apb clk */
+ { .dev_id = "adc", .clk = &adc_clk},
+ { .dev_id = "ssp0", .clk = &ssp0_clk},
+ { .dev_id = "ssp1", .clk = &ssp1_clk},
+ { .dev_id = "ssp2", .clk = &ssp2_clk},
+ { .dev_id = "gpio0", .clk = &gpio0_clk},
+ { .dev_id = "gpio1", .clk = &gpio1_clk},
+ { .dev_id = "gpio2", .clk = &gpio2_clk},
+};
+
+void __init clk_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
+ clk_register(&spear_clk_lookups[i]);
+
+ recalc_root_clocks();
+}
diff --git a/arch/arm/mach-spear6xx/include/mach/clkdev.h b/arch/arm/mach-spear6xx/include/mach/clkdev.h
new file mode 100644
index 000000000000..05676bf440d3
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/clkdev.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/clkdev.h
+ *
+ * Clock Dev framework definitions for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_CLKDEV_H
+#define __MACH_CLKDEV_H
+
+#include <plat/clkdev.h>
+
+#endif /* __MACH_CLKDEV_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/debug-macro.S b/arch/arm/mach-spear6xx/include/mach/debug-macro.S
new file mode 100644
index 000000000000..0f3ea39edd96
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/debug-macro.S
@@ -0,0 +1,14 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/debug-macro.S
+ *
+ * Debugging macro include header for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear6xx/include/mach/entry-macro.S b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
new file mode 100644
index 000000000000..9eaecaeafcf0
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
@@ -0,0 +1,55 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/entry-macro.S
+ *
+ * Low-level IRQ helper macros for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <mach/hardware.h>
+#include <mach/spear.h>
+#include <asm/hardware/vic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \base, =VA_SPEAR6XX_CPU_VIC_PRI_BASE
+ ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+ mov \irqnr, #0
+ teq \irqstat, #0
+ bne 1001f
+ ldr \base, =VA_SPEAR6XX_CPU_VIC_SEC_BASE
+ ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+ teq \irqstat, #0
+ beq 1002f @ this will set/reset
+ @ zero register
+ mov \irqnr, #32
+1001:
+ /*
+ * Following code will find bit position of least significang
+ * bit set in irqstat, using following equation
+ * least significant bit set in n = (n & ~(n-1))
+ */
+ sub \tmp, \irqstat, #1 @ tmp = irqstat - 1
+ mvn \tmp, \tmp @ tmp = ~tmp
+ and \irqstat, \irqstat, \tmp @ irqstat &= tmp
+ /* Now, irqstat is = bit no. of 1st bit set in vic irq status */
+ clz \tmp, \irqstat @ tmp = leading zeros
+
+ rsb \tmp, \tmp, #0x1F @ tmp = 32 - tmp - 1
+ add \irqnr, \irqnr, \tmp
+
+1002: /* EQ will be set if no irqs pending */
+ .endm
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
new file mode 100644
index 000000000000..16205a538756
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -0,0 +1,45 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/generic.h
+ *
+ * SPEAr6XX machine family specific generic header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GENERIC_H
+#define __MACH_GENERIC_H
+
+#include <asm/mach/time.h>
+#include <asm/mach/map.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+
+/*
+ * Each GPT has 2 timer channels
+ * Following GPT channels will be used as clock source and clockevent
+ */
+#define SPEAR_GPT0_BASE SPEAR6XX_CPU_TMR_BASE
+#define SPEAR_GPT0_CHAN0_IRQ IRQ_CPU_GPT1_1
+#define SPEAR_GPT0_CHAN1_IRQ IRQ_CPU_GPT1_2
+
+/* Add spear6xx family device structure declarations here */
+extern struct amba_device gpio_device[];
+extern struct amba_device uart_device[];
+extern struct sys_timer spear_sys_timer;
+
+/* Add spear6xx family function declarations here */
+void __init spear6xx_map_io(void);
+void __init spear6xx_init_irq(void);
+void __init spear6xx_init(void);
+void __init spear600_init(void);
+void __init clk_init(void);
+
+/* Add spear600 machine device structure declarations here */
+
+#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/gpio.h b/arch/arm/mach-spear6xx/include/mach/gpio.h
new file mode 100644
index 000000000000..3a789dbb69f7
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/gpio.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/gpio.h
+ *
+ * GPIO macros for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include <plat/gpio.h>
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/hardware.h b/arch/arm/mach-spear6xx/include/mach/hardware.h
new file mode 100644
index 000000000000..7545116deca9
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/hardware.h
@@ -0,0 +1,21 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/hardware.h
+ *
+ * Hardware definitions for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_HARDWARE_H
+#define __MACH_HARDWARE_H
+
+/* Vitual to physical translation of statically mapped space */
+#define IO_ADDRESS(x) (x | 0xF0000000)
+
+#endif /* __MACH_HARDWARE_H */
+
diff --git a/arch/arm/mach-spear6xx/include/mach/io.h b/arch/arm/mach-spear6xx/include/mach/io.h
new file mode 100644
index 000000000000..fb7c106cea94
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/io.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/io.h
+ *
+ * IO definitions for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IO_H
+#define __MACH_IO_H
+
+#include <plat/io.h>
+
+#endif /* __MACH_IO_H */
+
diff --git a/arch/arm/mach-spear6xx/include/mach/irqs.h b/arch/arm/mach-spear6xx/include/mach/irqs.h
new file mode 100644
index 000000000000..8f214b03d75d
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/irqs.h
@@ -0,0 +1,97 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/irqs.h
+ *
+ * IRQ helper macros for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+/* IRQ definitions */
+/* VIC 1 */
+#define IRQ_INTRCOMM_SW_IRQ 0
+#define IRQ_INTRCOMM_CPU_1 1
+#define IRQ_INTRCOMM_CPU_2 2
+#define IRQ_INTRCOMM_RAS2A11_1 3
+#define IRQ_INTRCOMM_RAS2A11_2 4
+#define IRQ_INTRCOMM_RAS2A12_1 5
+#define IRQ_INTRCOMM_RAS2A12_2 6
+#define IRQ_GEN_RAS_0 7
+#define IRQ_GEN_RAS_1 8
+#define IRQ_GEN_RAS_2 9
+#define IRQ_GEN_RAS_3 10
+#define IRQ_GEN_RAS_4 11
+#define IRQ_GEN_RAS_5 12
+#define IRQ_GEN_RAS_6 13
+#define IRQ_GEN_RAS_7 14
+#define IRQ_GEN_RAS_8 15
+#define IRQ_CPU_GPT1_1 16
+#define IRQ_CPU_GPT1_2 17
+#define IRQ_LOCAL_GPIO 18
+#define IRQ_PLL_UNLOCK 19
+#define IRQ_JPEG 20
+#define IRQ_FSMC 21
+#define IRQ_IRDA 22
+#define IRQ_RESERVED 23
+#define IRQ_UART_0 24
+#define IRQ_UART_1 25
+#define IRQ_SSP_1 26
+#define IRQ_SSP_2 27
+#define IRQ_I2C 28
+#define IRQ_GEN_RAS_9 29
+#define IRQ_GEN_RAS_10 30
+#define IRQ_GEN_RAS_11 31
+
+/* VIC 2 */
+#define IRQ_APPL_GPT1_1 32
+#define IRQ_APPL_GPT1_2 33
+#define IRQ_APPL_GPT2_1 34
+#define IRQ_APPL_GPT2_2 35
+#define IRQ_APPL_GPIO 36
+#define IRQ_APPL_SSP 37
+#define IRQ_APPL_ADC 38
+#define IRQ_APPL_RESERVED 39
+#define IRQ_AHB_EXP_MASTER 40
+#define IRQ_DDR_CONTROLLER 41
+#define IRQ_BASIC_DMA 42
+#define IRQ_BASIC_RESERVED1 43
+#define IRQ_BASIC_SMI 44
+#define IRQ_BASIC_CLCD 45
+#define IRQ_EXP_AHB_1 46
+#define IRQ_EXP_AHB_2 47
+#define IRQ_BASIC_GPT1_1 48
+#define IRQ_BASIC_GPT1_2 49
+#define IRQ_BASIC_RTC 50
+#define IRQ_BASIC_GPIO 51
+#define IRQ_BASIC_WDT 52
+#define IRQ_BASIC_RESERVED 53
+#define IRQ_AHB_EXP_SLAVE 54
+#define IRQ_GMAC_1 55
+#define IRQ_GMAC_2 56
+#define IRQ_USB_DEV 57
+#define IRQ_USB_H_OHCI_0 58
+#define IRQ_USB_H_EHCI_0 59
+#define IRQ_USB_H_OHCI_1 60
+#define IRQ_USB_H_EHCI_1 61
+#define IRQ_EXP_AHB_3 62
+#define IRQ_EXP_AHB_4 63
+
+#define IRQ_VIC_END 64
+
+/* GPIO pins virtual irqs */
+#define SPEAR_GPIO_INT_BASE IRQ_VIC_END
+#define SPEAR_GPIO0_INT_BASE SPEAR_GPIO_INT_BASE
+#define SPEAR_GPIO1_INT_BASE (SPEAR_GPIO0_INT_BASE + 8)
+#define SPEAR_GPIO2_INT_BASE (SPEAR_GPIO1_INT_BASE + 8)
+#define SPEAR_GPIO_INT_END (SPEAR_GPIO2_INT_BASE + 8)
+#define VIRTUAL_IRQS (SPEAR_GPIO_INT_END - IRQ_VIC_END)
+#define NR_IRQS (IRQ_VIC_END + VIRTUAL_IRQS)
+
+#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/memory.h b/arch/arm/mach-spear6xx/include/mach/memory.h
new file mode 100644
index 000000000000..781f088fc228
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/memory.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/memory.h
+ *
+ * Memory map for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MEMORY_H
+#define __MACH_MEMORY_H
+
+#include <plat/memory.h>
+
+#endif /* __MACH_MEMORY_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
new file mode 100644
index 000000000000..03908036b0d0
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -0,0 +1,173 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/misc_regs.h
+ *
+ * Miscellaneous registers definitions for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MISC_REGS_H
+#define __MACH_MISC_REGS_H
+
+#include <mach/spear.h>
+
+#define MISC_BASE VA_SPEAR6XX_ICM3_MISC_REG_BASE
+
+#define SOC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x000))
+#define DIAG_CFG_CTR ((unsigned int *)(MISC_BASE + 0x004))
+#define PLL1_CTR ((unsigned int *)(MISC_BASE + 0x008))
+#define PLL1_FRQ ((unsigned int *)(MISC_BASE + 0x00C))
+#define PLL1_MOD ((unsigned int *)(MISC_BASE + 0x010))
+#define PLL2_CTR ((unsigned int *)(MISC_BASE + 0x014))
+/* PLL_CTR register masks */
+#define PLL_ENABLE 2
+#define PLL_MODE_SHIFT 4
+#define PLL_MODE_MASK 0x3
+#define PLL_MODE_NORMAL 0
+#define PLL_MODE_FRACTION 1
+#define PLL_MODE_DITH_DSB 2
+#define PLL_MODE_DITH_SSB 3
+
+#define PLL2_FRQ ((unsigned int *)(MISC_BASE + 0x018))
+/* PLL FRQ register masks */
+#define PLL_DIV_N_SHIFT 0
+#define PLL_DIV_N_MASK 0xFF
+#define PLL_DIV_P_SHIFT 8
+#define PLL_DIV_P_MASK 0x7
+#define PLL_NORM_FDBK_M_SHIFT 24
+#define PLL_NORM_FDBK_M_MASK 0xFF
+#define PLL_DITH_FDBK_M_SHIFT 16
+#define PLL_DITH_FDBK_M_MASK 0xFFFF
+
+#define PLL2_MOD ((unsigned int *)(MISC_BASE + 0x01C))
+#define PLL_CLK_CFG ((unsigned int *)(MISC_BASE + 0x020))
+#define CORE_CLK_CFG ((unsigned int *)(MISC_BASE + 0x024))
+/* CORE CLK CFG register masks */
+#define PLL_HCLK_RATIO_SHIFT 10
+#define PLL_HCLK_RATIO_MASK 0x3
+#define HCLK_PCLK_RATIO_SHIFT 8
+#define HCLK_PCLK_RATIO_MASK 0x3
+
+#define PERIP_CLK_CFG ((unsigned int *)(MISC_BASE + 0x028))
+/* PERIP_CLK_CFG register masks */
+#define CLCD_CLK_SHIFT 2
+#define CLCD_CLK_MASK 0x3
+#define UART_CLK_SHIFT 4
+#define UART_CLK_MASK 0x1
+#define FIRDA_CLK_SHIFT 5
+#define FIRDA_CLK_MASK 0x3
+#define GPT0_CLK_SHIFT 8
+#define GPT1_CLK_SHIFT 10
+#define GPT2_CLK_SHIFT 11
+#define GPT3_CLK_SHIFT 12
+#define GPT_CLK_MASK 0x1
+#define AUX_CLK_PLL3_MASK 0
+#define AUX_CLK_PLL1_MASK 1
+
+#define PERIP1_CLK_ENB ((unsigned int *)(MISC_BASE + 0x02C))
+/* PERIP1_CLK_ENB register masks */
+#define UART0_CLK_ENB 3
+#define UART1_CLK_ENB 4
+#define SSP0_CLK_ENB 5
+#define SSP1_CLK_ENB 6
+#define I2C_CLK_ENB 7
+#define JPEG_CLK_ENB 8
+#define FSMC_CLK_ENB 9
+#define FIRDA_CLK_ENB 10
+#define GPT2_CLK_ENB 11
+#define GPT3_CLK_ENB 12
+#define GPIO2_CLK_ENB 13
+#define SSP2_CLK_ENB 14
+#define ADC_CLK_ENB 15
+#define GPT1_CLK_ENB 11
+#define RTC_CLK_ENB 17
+#define GPIO1_CLK_ENB 18
+#define DMA_CLK_ENB 19
+#define SMI_CLK_ENB 21
+#define CLCD_CLK_ENB 22
+#define GMAC_CLK_ENB 23
+#define USBD_CLK_ENB 24
+#define USBH0_CLK_ENB 25
+#define USBH1_CLK_ENB 26
+
+#define SOC_CORE_ID ((unsigned int *)(MISC_BASE + 0x030))
+#define RAS_CLK_ENB ((unsigned int *)(MISC_BASE + 0x034))
+#define PERIP1_SOF_RST ((unsigned int *)(MISC_BASE + 0x038))
+/* PERIP1_SOF_RST register masks */
+#define JPEG_SOF_RST 8
+
+#define SOC_USER_ID ((unsigned int *)(MISC_BASE + 0x03C))
+#define RAS_SOF_RST ((unsigned int *)(MISC_BASE + 0x040))
+#define PRSC1_CLK_CFG ((unsigned int *)(MISC_BASE + 0x044))
+#define PRSC2_CLK_CFG ((unsigned int *)(MISC_BASE + 0x048))
+#define PRSC3_CLK_CFG ((unsigned int *)(MISC_BASE + 0x04C))
+/* gpt synthesizer register masks */
+#define GPT_MSCALE_SHIFT 0
+#define GPT_MSCALE_MASK 0xFFF
+#define GPT_NSCALE_SHIFT 12
+#define GPT_NSCALE_MASK 0xF
+
+#define AMEM_CLK_CFG ((unsigned int *)(MISC_BASE + 0x050))
+#define EXPI_CLK_CFG ((unsigned int *)(MISC_BASE + 0x054))
+#define CLCD_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x05C))
+#define FIRDA_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x060))
+#define UART_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x064))
+#define GMAC_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x068))
+#define RAS1_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x06C))
+#define RAS2_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x070))
+#define RAS3_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x074))
+#define RAS4_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x078))
+/* aux clk synthesiser register masks for irda to ras4 */
+#define AUX_EQ_SEL_SHIFT 30
+#define AUX_EQ_SEL_MASK 1
+#define AUX_EQ1_SEL 0
+#define AUX_EQ2_SEL 1
+#define AUX_XSCALE_SHIFT 16
+#define AUX_XSCALE_MASK 0xFFF
+#define AUX_YSCALE_SHIFT 0
+#define AUX_YSCALE_MASK 0xFFF
+
+#define ICM1_ARB_CFG ((unsigned int *)(MISC_BASE + 0x07C))
+#define ICM2_ARB_CFG ((unsigned int *)(MISC_BASE + 0x080))
+#define ICM3_ARB_CFG ((unsigned int *)(MISC_BASE + 0x084))
+#define ICM4_ARB_CFG ((unsigned int *)(MISC_BASE + 0x088))
+#define ICM5_ARB_CFG ((unsigned int *)(MISC_BASE + 0x08C))
+#define ICM6_ARB_CFG ((unsigned int *)(MISC_BASE + 0x090))
+#define ICM7_ARB_CFG ((unsigned int *)(MISC_BASE + 0x094))
+#define ICM8_ARB_CFG ((unsigned int *)(MISC_BASE + 0x098))
+#define ICM9_ARB_CFG ((unsigned int *)(MISC_BASE + 0x09C))
+#define DMA_CHN_CFG ((unsigned int *)(MISC_BASE + 0x0A0))
+#define USB2_PHY_CFG ((unsigned int *)(MISC_BASE + 0x0A4))
+#define GMAC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0A8))
+#define EXPI_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0AC))
+#define PRC1_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C0))
+#define PRC2_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C4))
+#define PRC3_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C8))
+#define PRC4_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0CC))
+#define PRC1_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D0))
+#define PRC2_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D4))
+#define PRC3_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D8))
+#define PRC4_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0DC))
+#define PWRDOWN_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0E0))
+#define COMPSSTL_1V8_CFG ((unsigned int *)(MISC_BASE + 0x0E4))
+#define COMPSSTL_2V5_CFG ((unsigned int *)(MISC_BASE + 0x0E8))
+#define COMPCOR_3V3_CFG ((unsigned int *)(MISC_BASE + 0x0EC))
+#define SSTLPAD_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F0))
+#define BIST1_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F4))
+#define BIST2_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F8))
+#define BIST3_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0FC))
+#define BIST4_CFG_CTR ((unsigned int *)(MISC_BASE + 0x100))
+#define BIST5_CFG_CTR ((unsigned int *)(MISC_BASE + 0x104))
+#define BIST1_STS_RES ((unsigned int *)(MISC_BASE + 0x108))
+#define BIST2_STS_RES ((unsigned int *)(MISC_BASE + 0x10C))
+#define BIST3_STS_RES ((unsigned int *)(MISC_BASE + 0x110))
+#define BIST4_STS_RES ((unsigned int *)(MISC_BASE + 0x114))
+#define BIST5_STS_RES ((unsigned int *)(MISC_BASE + 0x118))
+#define SYSERR_CFG_CTR ((unsigned int *)(MISC_BASE + 0x11C))
+
+#endif /* __MACH_MISC_REGS_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/spear.h b/arch/arm/mach-spear6xx/include/mach/spear.h
new file mode 100644
index 000000000000..a835f5b6b182
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/spear.h
@@ -0,0 +1,173 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/spear.h
+ *
+ * SPEAr6xx Machine family specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SPEAR6XX_H
+#define __MACH_SPEAR6XX_H
+
+#include <mach/hardware.h>
+#include <mach/spear600.h>
+
+#define SPEAR6XX_ML_SDRAM_BASE 0x00000000
+#define SPEAR6XX_ML_SDRAM_SIZE 0x40000000
+
+/* ICM1 - Low speed connection */
+#define SPEAR6XX_ICM1_BASE 0xD0000000
+#define SPEAR6XX_ICM1_SIZE 0x08000000
+
+#define SPEAR6XX_ICM1_UART0_BASE 0xD0000000
+#define VA_SPEAR6XX_ICM1_UART0_BASE IO_ADDRESS(SPEAR6XX_ICM1_UART0_BASE)
+#define SPEAR6XX_ICM1_UART0_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_UART1_BASE 0xD0080000
+#define SPEAR6XX_ICM1_UART1_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_SSP0_BASE 0xD0100000
+#define SPEAR6XX_ICM1_SSP0_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_SSP1_BASE 0xD0180000
+#define SPEAR6XX_ICM1_SSP1_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_I2C_BASE 0xD0200000
+#define SPEAR6XX_ICM1_I2C_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_JPEG_BASE 0xD0800000
+#define SPEAR6XX_ICM1_JPEG_SIZE 0x00800000
+
+#define SPEAR6XX_ICM1_IRDA_BASE 0xD1000000
+#define SPEAR6XX_ICM1_IRDA_SIZE 0x00800000
+
+#define SPEAR6XX_ICM1_FSMC_BASE 0xD1800000
+#define SPEAR6XX_ICM1_FSMC_SIZE 0x00800000
+
+#define SPEAR6XX_ICM1_NAND_BASE 0xD2000000
+#define SPEAR6XX_ICM1_NAND_SIZE 0x00800000
+
+#define SPEAR6XX_ICM1_SRAM_BASE 0xD2800000
+#define SPEAR6XX_ICM1_SRAM_SIZE 0x00800000
+
+/* ICM2 - Application Subsystem */
+#define SPEAR6XX_ICM2_BASE 0xD8000000
+#define SPEAR6XX_ICM2_SIZE 0x08000000
+
+#define SPEAR6XX_ICM2_TMR0_BASE 0xD8000000
+#define SPEAR6XX_ICM2_TMR0_SIZE 0x00080000
+
+#define SPEAR6XX_ICM2_TMR1_BASE 0xD8080000
+#define SPEAR6XX_ICM2_TMR1_SIZE 0x00080000
+
+#define SPEAR6XX_ICM2_GPIO_BASE 0xD8100000
+#define SPEAR6XX_ICM2_GPIO_SIZE 0x00080000
+
+#define SPEAR6XX_ICM2_SPI2_BASE 0xD8180000
+#define SPEAR6XX_ICM2_SPI2_SIZE 0x00080000
+
+#define SPEAR6XX_ICM2_ADC_BASE 0xD8200000
+#define SPEAR6XX_ICM2_ADC_SIZE 0x00080000
+
+/* ML-1, 2 - Multi Layer CPU Subsystem */
+#define SPEAR6XX_ML_CPU_BASE 0xF0000000
+#define SPEAR6XX_ML_CPU_SIZE 0x08000000
+
+#define SPEAR6XX_CPU_TMR_BASE 0xF0000000
+#define SPEAR6XX_CPU_TMR_SIZE 0x00100000
+
+#define SPEAR6XX_CPU_GPIO_BASE 0xF0100000
+#define SPEAR6XX_CPU_GPIO_SIZE 0x00100000
+
+#define SPEAR6XX_CPU_VIC_SEC_BASE 0xF1000000
+#define VA_SPEAR6XX_CPU_VIC_SEC_BASE IO_ADDRESS(SPEAR6XX_CPU_VIC_SEC_BASE)
+#define SPEAR6XX_CPU_VIC_SEC_SIZE 0x00100000
+
+#define SPEAR6XX_CPU_VIC_PRI_BASE 0xF1100000
+#define VA_SPEAR6XX_CPU_VIC_PRI_BASE IO_ADDRESS(SPEAR6XX_CPU_VIC_PRI_BASE)
+#define SPEAR6XX_CPU_VIC_PRI_SIZE 0x00100000
+
+/* ICM3 - Basic Subsystem */
+#define SPEAR6XX_ICM3_BASE 0xF8000000
+#define SPEAR6XX_ICM3_SIZE 0x08000000
+
+#define SPEAR6XX_ICM3_SMEM_BASE 0xF8000000
+#define SPEAR6XX_ICM3_SMEM_SIZE 0x04000000
+
+#define SPEAR6XX_ICM3_SMI_CTRL_BASE 0xFC000000
+#define SPEAR6XX_ICM3_SMI_CTRL_SIZE 0x00200000
+
+#define SPEAR6XX_ICM3_CLCD_BASE 0xFC200000
+#define SPEAR6XX_ICM3_CLCD_SIZE 0x00200000
+
+#define SPEAR6XX_ICM3_DMA_BASE 0xFC400000
+#define SPEAR6XX_ICM3_DMA_SIZE 0x00200000
+
+#define SPEAR6XX_ICM3_SDRAM_CTRL_BASE 0xFC600000
+#define SPEAR6XX_ICM3_SDRAM_CTRL_SIZE 0x00200000
+
+#define SPEAR6XX_ICM3_TMR_BASE 0xFC800000
+#define SPEAR6XX_ICM3_TMR_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_WDT_BASE 0xFC880000
+#define SPEAR6XX_ICM3_WDT_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_RTC_BASE 0xFC900000
+#define SPEAR6XX_ICM3_RTC_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_GPIO_BASE 0xFC980000
+#define SPEAR6XX_ICM3_GPIO_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_SYS_CTRL_BASE 0xFCA00000
+#define VA_SPEAR6XX_ICM3_SYS_CTRL_BASE IO_ADDRESS(SPEAR6XX_ICM3_SYS_CTRL_BASE)
+#define SPEAR6XX_ICM3_SYS_CTRL_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_MISC_REG_BASE 0xFCA80000
+#define VA_SPEAR6XX_ICM3_MISC_REG_BASE IO_ADDRESS(SPEAR6XX_ICM3_MISC_REG_BASE)
+#define SPEAR6XX_ICM3_MISC_REG_SIZE 0x00080000
+
+/* ICM4 - High Speed Connection */
+#define SPEAR6XX_ICM4_BASE 0xE0000000
+#define SPEAR6XX_ICM4_SIZE 0x08000000
+
+#define SPEAR6XX_ICM4_GMAC_BASE 0xE0800000
+#define SPEAR6XX_ICM4_GMAC_SIZE 0x00800000
+
+#define SPEAR6XX_ICM4_USBD_FIFO_BASE 0xE1000000
+#define SPEAR6XX_ICM4_USBD_FIFO_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USBD_CSR_BASE 0xE1100000
+#define SPEAR6XX_ICM4_USBD_CSR_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USBD_PLDT_BASE 0xE1200000
+#define SPEAR6XX_ICM4_USBD_PLDT_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_EHCI0_BASE 0xE1800000
+#define SPEAR6XX_ICM4_USB_EHCI0_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_OHCI0_BASE 0xE1900000
+#define SPEAR6XX_ICM4_USB_OHCI0_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_EHCI1_BASE 0xE2000000
+#define SPEAR6XX_ICM4_USB_EHCI1_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_OHCI1_BASE 0xE2100000
+#define SPEAR6XX_ICM4_USB_OHCI1_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_ARB_BASE 0xE2800000
+#define SPEAR6XX_ICM4_USB_ARB_SIZE 0x00010000
+
+/* Debug uart for linux, will be used for debug and uncompress messages */
+#define SPEAR_DBG_UART_BASE SPEAR6XX_ICM1_UART0_BASE
+#define VA_SPEAR_DBG_UART_BASE VA_SPEAR6XX_ICM1_UART0_BASE
+
+/* Sysctl base for spear platform */
+#define SPEAR_SYS_CTRL_BASE SPEAR6XX_ICM3_SYS_CTRL_BASE
+#define VA_SPEAR_SYS_CTRL_BASE VA_SPEAR6XX_ICM3_SYS_CTRL_BASE
+
+#endif /* __MACH_SPEAR6XX_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/spear600.h b/arch/arm/mach-spear6xx/include/mach/spear600.h
new file mode 100644
index 000000000000..c068cc50b0fb
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/spear600.h
@@ -0,0 +1,21 @@
+/*
+ * arch/arm/mach-spear66xx/include/mach/spear600.h
+ *
+ * SPEAr600 Machine specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_MACH_SPEAR600
+
+#ifndef __MACH_SPEAR600_H
+#define __MACH_SPEAR600_H
+
+#endif /* __MACH_SPEAR600_H */
+
+#endif /* CONFIG_MACH_SPEAR600 */
diff --git a/arch/arm/mach-spear6xx/include/mach/system.h b/arch/arm/mach-spear6xx/include/mach/system.h
new file mode 100644
index 000000000000..0b1d2be81cfb
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/system.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/system.h
+ *
+ * SPEAr6xx Machine family specific architecture functions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SYSTEM_H
+#define __MACH_SYSTEM_H
+
+#include <plat/system.h>
+
+#endif /* __MACH_SYSTEM_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/timex.h b/arch/arm/mach-spear6xx/include/mach/timex.h
new file mode 100644
index 000000000000..ac1c5b005695
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/timex.h
+ *
+ * SPEAr6XX machine family specific timex definitions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_TIMEX_H
+#define __MACH_TIMEX_H
+
+#include <plat/timex.h>
+
+#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/uncompress.h b/arch/arm/mach-spear6xx/include/mach/uncompress.h
new file mode 100644
index 000000000000..77f0765e21e1
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/uncompress.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#include <plat/uncompress.h>
+
+#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/vmalloc.h b/arch/arm/mach-spear6xx/include/mach/vmalloc.h
new file mode 100644
index 000000000000..4a0b56cb2a91
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/vmalloc.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/vmalloc.h
+ *
+ * Defining Vmalloc area for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_VMALLOC_H
+#define __MACH_VMALLOC_H
+
+#include <plat/vmalloc.h>
+
+#endif /* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear6xx/spear600.c b/arch/arm/mach-spear6xx/spear600.c
new file mode 100644
index 000000000000..5c484c433dc1
--- /dev/null
+++ b/arch/arm/mach-spear6xx/spear600.c
@@ -0,0 +1,25 @@
+/*
+ * arch/arm/mach-spear6xx/spear600.c
+ *
+ * SPEAr600 machine source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h>
+#include <asm/irq.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Add spear600 specific devices here */
+
+void __init spear600_init(void)
+{
+ /* call spear6xx family common init function */
+ spear6xx_init();
+}
diff --git a/arch/arm/mach-spear6xx/spear600_evb.c b/arch/arm/mach-spear6xx/spear600_evb.c
new file mode 100644
index 000000000000..daff8d04f7b6
--- /dev/null
+++ b/arch/arm/mach-spear6xx/spear600_evb.c
@@ -0,0 +1,51 @@
+/*
+ * arch/arm/mach-spear6xx/spear600_evb.c
+ *
+ * SPEAr600 evaluation board source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+static struct amba_device *amba_devs[] __initdata = {
+ &gpio_device[0],
+ &gpio_device[1],
+ &gpio_device[2],
+ &uart_device[0],
+ &uart_device[1],
+};
+
+static struct platform_device *plat_devs[] __initdata = {
+};
+
+static void __init spear600_evb_init(void)
+{
+ unsigned int i;
+
+ /* call spear600 machine init function */
+ spear600_init();
+
+ /* Add Platform Devices */
+ platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
+
+ /* Add Amba Devices */
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+}
+
+MACHINE_START(SPEAR600, "ST-SPEAR600-EVB")
+ .boot_params = 0x00000100,
+ .map_io = spear6xx_map_io,
+ .init_irq = spear6xx_init_irq,
+ .timer = &spear_sys_timer,
+ .init_machine = spear600_evb_init,
+MACHINE_END
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
new file mode 100644
index 000000000000..b67e571d4bf7
--- /dev/null
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -0,0 +1,158 @@
+/*
+ * arch/arm/mach-spear6xx/spear6xx.c
+ *
+ * SPEAr6XX machines common source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/types.h>
+#include <linux/amba/pl061.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/io.h>
+#include <asm/hardware/vic.h>
+#include <asm/irq.h>
+#include <asm/mach/arch.h>
+#include <mach/irqs.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Add spear6xx machines common devices here */
+/* uart device registeration */
+struct amba_device uart_device[] = {
+ {
+ .dev = {
+ .init_name = "uart0",
+ },
+ .res = {
+ .start = SPEAR6XX_ICM1_UART0_BASE,
+ .end = SPEAR6XX_ICM1_UART0_BASE +
+ SPEAR6XX_ICM1_UART0_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_UART_0, NO_IRQ},
+ }, {
+ .dev = {
+ .init_name = "uart1",
+ },
+ .res = {
+ .start = SPEAR6XX_ICM1_UART1_BASE,
+ .end = SPEAR6XX_ICM1_UART1_BASE +
+ SPEAR6XX_ICM1_UART1_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_UART_1, NO_IRQ},
+ }
+};
+
+/* gpio device registeration */
+static struct pl061_platform_data gpio_plat_data[] = {
+ {
+ .gpio_base = 0,
+ .irq_base = SPEAR_GPIO0_INT_BASE,
+ }, {
+ .gpio_base = 8,
+ .irq_base = SPEAR_GPIO1_INT_BASE,
+ }, {
+ .gpio_base = 16,
+ .irq_base = SPEAR_GPIO2_INT_BASE,
+ },
+};
+
+struct amba_device gpio_device[] = {
+ {
+ .dev = {
+ .init_name = "gpio0",
+ .platform_data = &gpio_plat_data[0],
+ },
+ .res = {
+ .start = SPEAR6XX_CPU_GPIO_BASE,
+ .end = SPEAR6XX_CPU_GPIO_BASE +
+ SPEAR6XX_CPU_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_LOCAL_GPIO, NO_IRQ},
+ }, {
+ .dev = {
+ .init_name = "gpio1",
+ .platform_data = &gpio_plat_data[1],
+ },
+ .res = {
+ .start = SPEAR6XX_ICM3_GPIO_BASE,
+ .end = SPEAR6XX_ICM3_GPIO_BASE +
+ SPEAR6XX_ICM3_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_BASIC_GPIO, NO_IRQ},
+ }, {
+ .dev = {
+ .init_name = "gpio2",
+ .platform_data = &gpio_plat_data[2],
+ },
+ .res = {
+ .start = SPEAR6XX_ICM2_GPIO_BASE,
+ .end = SPEAR6XX_ICM2_GPIO_BASE +
+ SPEAR6XX_ICM2_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_APPL_GPIO, NO_IRQ},
+ }
+};
+
+/* This will add devices, and do machine specific tasks */
+void __init spear6xx_init(void)
+{
+ /* nothing to do for now */
+}
+
+/* This will initialize vic */
+void __init spear6xx_init_irq(void)
+{
+ vic_init((void __iomem *)VA_SPEAR6XX_CPU_VIC_PRI_BASE, 0, ~0, 0);
+ vic_init((void __iomem *)VA_SPEAR6XX_CPU_VIC_SEC_BASE, 32, ~0, 0);
+}
+
+/* Following will create static virtual/physical mappings */
+static struct map_desc spear6xx_io_desc[] __initdata = {
+ {
+ .virtual = VA_SPEAR6XX_ICM1_UART0_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_ICM1_UART0_BASE),
+ .length = SPEAR6XX_ICM1_UART0_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR6XX_CPU_VIC_PRI_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_CPU_VIC_PRI_BASE),
+ .length = SPEAR6XX_CPU_VIC_PRI_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR6XX_CPU_VIC_SEC_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_CPU_VIC_SEC_BASE),
+ .length = SPEAR6XX_CPU_VIC_SEC_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR6XX_ICM3_SYS_CTRL_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_ICM3_SYS_CTRL_BASE),
+ .length = SPEAR6XX_ICM3_MISC_REG_BASE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR6XX_ICM3_MISC_REG_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_ICM3_MISC_REG_BASE),
+ .length = SPEAR6XX_ICM3_MISC_REG_SIZE,
+ .type = MT_DEVICE
+ },
+};
+
+/* This will create static memory mapping for selected devices */
+void __init spear6xx_map_io(void)
+{
+ iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
+
+ /* This will initialize clock framework */
+ clk_init();
+}
diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c
index c73ed06b6065..f0394baa11fa 100644
--- a/arch/arm/mach-u300/i2c.c
+++ b/arch/arm/mach-u300/i2c.c
@@ -9,7 +9,7 @@
*/
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
#include <linux/regulator/machine.h>
#include <linux/amba/bus.h>
#include <mach/irqs.h>
@@ -46,6 +46,7 @@
/* BUCK SLEEP 0xAC: 1.05V, Not used, SLEEP_A and B, Not used */
#define BUCK_SLEEP_SETTING 0xAC
+#ifdef CONFIG_AB3100_CORE
static struct regulator_consumer_supply supply_ldo_c[] = {
{
.dev_name = "ab3100-codec",
@@ -253,14 +254,68 @@ static struct ab3100_platform_data ab3100_plf_data = {
LDO_D_SETTING,
},
};
+#endif
+
+#ifdef CONFIG_AB3550_CORE
+static struct abx500_init_settings ab3550_init_settings[] = {
+ {
+ .bank = 0,
+ .reg = AB3550_IMR1,
+ .setting = 0xff
+ },
+ {
+ .bank = 0,
+ .reg = AB3550_IMR2,
+ .setting = 0xff
+ },
+ {
+ .bank = 0,
+ .reg = AB3550_IMR3,
+ .setting = 0xff
+ },
+ {
+ .bank = 0,
+ .reg = AB3550_IMR4,
+ .setting = 0xff
+ },
+ {
+ .bank = 0,
+ .reg = AB3550_IMR5,
+ /* The two most significant bits are not used */
+ .setting = 0x3f
+ },
+};
+
+static struct ab3550_platform_data ab3550_plf_data = {
+ .irq = {
+ .base = IRQ_AB3550_BASE,
+ .count = (IRQ_AB3550_END - IRQ_AB3550_BASE + 1),
+ },
+ .dev_data = {
+ },
+ .init_settings = ab3550_init_settings,
+ .init_settings_sz = ARRAY_SIZE(ab3550_init_settings),
+};
+#endif
static struct i2c_board_info __initdata bus0_i2c_board_info[] = {
+#if defined(CONFIG_AB3550_CORE)
+ {
+ .type = "ab3550",
+ .addr = 0x4A,
+ .irq = IRQ_U300_IRQ0_EXT,
+ .platform_data = &ab3550_plf_data,
+ },
+#elif defined(CONFIG_AB3100_CORE)
{
.type = "ab3100",
.addr = 0x48,
.irq = IRQ_U300_IRQ0_EXT,
.platform_data = &ab3100_plf_data,
},
+#else
+ { },
+#endif
};
static struct i2c_board_info __initdata bus1_i2c_board_info[] = {
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
index b8155b4e5ffa..193da2df732c 100644
--- a/arch/arm/mach-u300/include/mach/coh901318.h
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -103,27 +103,6 @@ struct coh901318_platform {
};
/**
- * coh901318_get_bytes_left() - Get number of bytes left on a current transfer
- * @chan: dma channel handle
- * return number of bytes left, or negative on error
- */
-u32 coh901318_get_bytes_left(struct dma_chan *chan);
-
-/**
- * coh901318_stop() - Stops dma transfer
- * @chan: dma channel handle
- * return 0 on success otherwise negative value
- */
-void coh901318_stop(struct dma_chan *chan);
-
-/**
- * coh901318_continue() - Resumes a stopped dma transfer
- * @chan: dma channel handle
- * return 0 on success otherwise negative value
- */
-void coh901318_continue(struct dma_chan *chan);
-
-/**
* coh901318_filter_id() - DMA channel filter function
* @chan: dma channel handle
* @chan_id: id of dma channel to be filter out
diff --git a/arch/arm/mach-u300/include/mach/irqs.h b/arch/arm/mach-u300/include/mach/irqs.h
index a6867b12773e..09b1b28fa8fd 100644
--- a/arch/arm/mach-u300/include/mach/irqs.h
+++ b/arch/arm/mach-u300/include/mach/irqs.h
@@ -109,6 +109,13 @@
#define U300_NR_IRQS 48
#endif
+#ifdef CONFIG_AB3550_CORE
+#define IRQ_AB3550_BASE (U300_NR_IRQS)
+#define IRQ_AB3550_END (IRQ_AB3550_BASE + 37)
+
+#define NR_IRQS (IRQ_AB3550_END + 1)
+#else
#define NR_IRQS U300_NR_IRQS
+#endif
#endif
diff --git a/arch/arm/mach-u300/mmc.c b/arch/arm/mach-u300/mmc.c
index 77fbb1e0e528..88506d030596 100644
--- a/arch/arm/mach-u300/mmc.c
+++ b/arch/arm/mach-u300/mmc.c
@@ -102,11 +102,12 @@ int __devinit mmc_init(struct amba_device *adev)
* we have a regulator we can control instead.
*/
/* Nominally 2.85V on our platform */
+ mmci_card->mmc0_plat_data.f_max = 24000000;
mmci_card->mmc0_plat_data.status = mmc_status;
mmci_card->mmc0_plat_data.gpio_wp = -1;
mmci_card->mmc0_plat_data.gpio_cd = -1;
mmci_card->mmc0_plat_data.capabilities = MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA;
+ MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
mmcsd_device->platform_data = (void *) &mmci_card->mmc0_plat_data;
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 03625d744857..6625e5bbf4d6 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -1,15 +1,42 @@
-menu "ST-Ericsson platform type"
- depends on ARCH_U8500
+if ARCH_U8500
-comment "ST-Ericsson Multicore Mobile Platforms"
-
-config MACH_U8500_MOP
- bool "U8500 Early Development platform"
+config UX500_SOC_COMMON
+ bool
default y
select ARM_GIC
select HAS_MTU
+ select NOMADIK_GPIO
+
+config UX500_SOC_DB8500
+ bool
+
+config UX500_SOC_DB5500
+ bool
+
+choice
+ prompt "Ux500 target platform"
+ default MACH_U8500_MOP
+
+config MACH_U8500_MOP
+ bool "U8500 Development platform"
+ select UX500_SOC_DB8500
help
Include support for mop500 development platform
based on U8500 architecture. The platform is based
on early drop silicon version of 8500.
-endmenu
+
+config MACH_U5500
+ bool "U5500 Development platform"
+ select UX500_SOC_DB5500
+ help
+ Include support for the U5500 development platform.
+endchoice
+
+config UX500_DEBUG_UART
+ int "Ux500 UART to use for low-level debug"
+ default 2
+ help
+ Choose the UART on which kernel low-level debug messages should be
+ output.
+
+endif
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 95e6e24c0042..c7bc4199e3a8 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -2,7 +2,9 @@
# Makefile for the linux kernel, U8500 machine.
#
-obj-y := clock.o
-obj-$(CONFIG_ARCH_U8500) += cpu-u8500.o
+obj-y := clock.o cpu.o devices.o
+obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o devices-db5500.o
+obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
obj-$(CONFIG_MACH_U8500_MOP) += board-mop500.o
+obj-$(CONFIG_MACH_U5500) += board-u5500.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o localtimer.o
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 803aec1d6728..072196c57263 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -17,37 +17,14 @@
#include <linux/amba/pl022.h>
#include <linux/spi/spi.h>
-#include <asm/localtimer.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <plat/mtu.h>
#include <plat/i2c.h>
#include <mach/hardware.h>
#include <mach/setup.h>
-
-#define __MEM_4K_RESOURCE(x) \
- .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
-
-/* These are active devices on this board */
-static struct amba_device uart0_device = {
- .dev = { .init_name = "uart0" },
- __MEM_4K_RESOURCE(U8500_UART0_BASE),
- .irq = {IRQ_UART0, NO_IRQ},
-};
-
-static struct amba_device uart1_device = {
- .dev = { .init_name = "uart1" },
- __MEM_4K_RESOURCE(U8500_UART1_BASE),
- .irq = {IRQ_UART1, NO_IRQ},
-};
-
-static struct amba_device uart2_device = {
- .dev = { .init_name = "uart2" },
- __MEM_4K_RESOURCE(U8500_UART2_BASE),
- .irq = {IRQ_UART2, NO_IRQ},
-};
+#include <mach/devices.h>
static void ab4500_spi_cs_control(u32 command)
{
@@ -93,55 +70,8 @@ static struct pl022_ssp_controller ssp0_platform_data = {
.num_chipselect = 5,
};
-static struct amba_device pl022_device = {
- .dev = {
- .coherent_dma_mask = ~0,
- .init_name = "pl022",
- .platform_data = &ssp0_platform_data,
- },
- .res = {
- .start = U8500_SSP0_BASE,
- .end = U8500_SSP0_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- },
- .irq = {IRQ_SSP0, NO_IRQ },
- /* ST-Ericsson modified id */
- .periphid = SSP_PER_ID,
-};
-
-static struct amba_device pl031_device = {
- .dev = {
- .init_name = "pl031",
- },
- .res = {
- .start = U8500_RTC_BASE,
- .end = U8500_RTC_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- },
- .irq = {IRQ_RTC_RTT, NO_IRQ},
-};
-
-#define U8500_I2C_RESOURCES(id, size) \
-static struct resource u8500_i2c_resources_##id[] = { \
- [0] = { \
- .start = U8500_I2C##id##_BASE, \
- .end = U8500_I2C##id##_BASE + size - 1, \
- .flags = IORESOURCE_MEM, \
- }, \
- [1] = { \
- .start = IRQ_I2C##id, \
- .end = IRQ_I2C##id, \
- .flags = IORESOURCE_IRQ \
- } \
-}
-
-U8500_I2C_RESOURCES(0, SZ_4K);
-U8500_I2C_RESOURCES(1, SZ_4K);
-U8500_I2C_RESOURCES(2, SZ_4K);
-U8500_I2C_RESOURCES(3, SZ_4K);
-
#define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, _sm) \
-static struct nmk_i2c_controller u8500_i2c_##id = { \
+static struct nmk_i2c_controller u8500_i2c##id##_data = { \
/* \
* slave data setup time, which is \
* 250 ns,100ns,10ns which is 14,6,2 \
@@ -169,58 +99,32 @@ U8500_I2C_CONTROLLER(1, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
U8500_I2C_CONTROLLER(2, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
U8500_I2C_CONTROLLER(3, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
-#define U8500_I2C_PDEVICE(cid) \
-static struct platform_device i2c_controller##cid = { \
- .name = "nmk-i2c", \
- .id = cid, \
- .num_resources = 2, \
- .resource = u8500_i2c_resources_##cid, \
- .dev = { \
- .platform_data = &u8500_i2c_##cid \
- } \
-}
-
-U8500_I2C_PDEVICE(0);
-U8500_I2C_PDEVICE(1);
-U8500_I2C_PDEVICE(2);
-U8500_I2C_PDEVICE(3);
-
static struct amba_device *amba_devs[] __initdata = {
- &uart0_device,
- &uart1_device,
- &uart2_device,
- &pl022_device,
- &pl031_device,
+ &ux500_uart0_device,
+ &ux500_uart1_device,
+ &ux500_uart2_device,
+ &u8500_ssp0_device,
};
/* add any platform devices here - TODO */
static struct platform_device *platform_devs[] __initdata = {
- &i2c_controller0,
- &i2c_controller1,
- &i2c_controller2,
- &i2c_controller3,
-};
-
-static void __init u8500_timer_init(void)
-{
-#ifdef CONFIG_LOCAL_TIMERS
- /* Setup the local timer base */
- twd_base = __io_address(U8500_TWD_BASE);
-#endif
- /* Setup the MTU base */
- mtu_base = __io_address(U8500_MTU0_BASE);
-
- nmdk_timer_init();
-}
-
-static struct sys_timer u8500_timer = {
- .init = u8500_timer_init,
+ &u8500_i2c0_device,
+ &ux500_i2c1_device,
+ &ux500_i2c2_device,
+ &ux500_i2c3_device,
};
static void __init u8500_init_machine(void)
{
int i;
+ u8500_i2c0_device.dev.platform_data = &u8500_i2c0_data;
+ ux500_i2c1_device.dev.platform_data = &u8500_i2c1_data;
+ ux500_i2c2_device.dev.platform_data = &u8500_i2c2_data;
+ ux500_i2c3_device.dev.platform_data = &u8500_i2c3_data;
+
+ u8500_ssp0_device.dev.platform_data = &ssp0_platform_data;
+
/* Register the active AMBA devices on this board */
for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
amba_device_register(amba_devs[i], &iomem_resource);
@@ -239,8 +143,8 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
.io_pg_offst = (IO_ADDRESS(U8500_UART2_BASE) >> 18) & 0xfffc,
.boot_params = 0x100,
.map_io = u8500_map_io,
- .init_irq = u8500_init_irq,
+ .init_irq = ux500_init_irq,
/* we re-use nomadik timer here */
- .timer = &u8500_timer,
+ .timer = &ux500_timer,
.init_machine = u8500_init_machine,
MACHINE_END
diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c
new file mode 100644
index 000000000000..4430e69cf538
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/gpio.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+
+#include <mach/hardware.h>
+#include <mach/devices.h>
+#include <mach/setup.h>
+
+static struct amba_device *amba_board_devs[] __initdata = {
+ &ux500_uart0_device,
+ &ux500_uart1_device,
+ &ux500_uart2_device,
+};
+
+static void __init u5500_init_machine(void)
+{
+ u5500_init_devices();
+
+ amba_add_devices(amba_board_devs, ARRAY_SIZE(amba_board_devs));
+}
+
+MACHINE_START(U8500, "ST-Ericsson U5500 Platform")
+ .phys_io = UX500_UART0_BASE,
+ .io_pg_offst = (IO_ADDRESS(UX500_UART0_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .map_io = u5500_map_io,
+ .init_irq = ux500_init_irq,
+ .timer = &ux500_timer,
+ .init_machine = u5500_init_machine,
+MACHINE_END
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 8359a73d0041..1b2c9890e8b4 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 ST-Ericsson
- * heavily based on realview platform
+ * Copyright (C) 2009 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,33 +12,130 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/mutex.h>
+#include <linux/io.h>
#include <asm/clkdev.h>
-/* currently the clk structure
- * just supports rate. This would
- * be extended as and when new devices are
- * added - TODO
- */
-struct clk {
- unsigned long rate;
-};
+#include <mach/hardware.h>
+#include "clock.h"
+
+#define PRCC_PCKEN 0x00
+#define PRCC_PCKDIS 0x04
+#define PRCC_KCKEN 0x08
+#define PRCC_KCKDIS 0x0C
+
+#define PRCM_YYCLKEN0_MGT_SET 0x510
+#define PRCM_YYCLKEN1_MGT_SET 0x514
+#define PRCM_YYCLKEN0_MGT_CLR 0x518
+#define PRCM_YYCLKEN1_MGT_CLR 0x51C
+#define PRCM_YYCLKEN0_MGT_VAL 0x520
+#define PRCM_YYCLKEN1_MGT_VAL 0x524
+
+#define PRCM_SVAMMDSPCLK_MGT 0x008
+#define PRCM_SIAMMDSPCLK_MGT 0x00C
+#define PRCM_SGACLK_MGT 0x014
+#define PRCM_UARTCLK_MGT 0x018
+#define PRCM_MSP02CLK_MGT 0x01C
+#define PRCM_MSP1CLK_MGT 0x288
+#define PRCM_I2CCLK_MGT 0x020
+#define PRCM_SDMMCCLK_MGT 0x024
+#define PRCM_SLIMCLK_MGT 0x028
+#define PRCM_PER1CLK_MGT 0x02C
+#define PRCM_PER2CLK_MGT 0x030
+#define PRCM_PER3CLK_MGT 0x034
+#define PRCM_PER5CLK_MGT 0x038
+#define PRCM_PER6CLK_MGT 0x03C
+#define PRCM_PER7CLK_MGT 0x040
+#define PRCM_LCDCLK_MGT 0x044
+#define PRCM_BMLCLK_MGT 0x04C
+#define PRCM_HSITXCLK_MGT 0x050
+#define PRCM_HSIRXCLK_MGT 0x054
+#define PRCM_HDMICLK_MGT 0x058
+#define PRCM_APEATCLK_MGT 0x05C
+#define PRCM_APETRACECLK_MGT 0x060
+#define PRCM_MCDECLK_MGT 0x064
+#define PRCM_IPI2CCLK_MGT 0x068
+#define PRCM_DSIALTCLK_MGT 0x06C
+#define PRCM_DMACLK_MGT 0x074
+#define PRCM_B2R2CLK_MGT 0x078
+#define PRCM_TVCLK_MGT 0x07C
+#define PRCM_UNIPROCLK_MGT 0x278
+#define PRCM_SSPCLK_MGT 0x280
+#define PRCM_RNGCLK_MGT 0x284
+#define PRCM_UICCCLK_MGT 0x27C
+
+#define PRCM_MGT_ENABLE (1 << 8)
+
+static DEFINE_SPINLOCK(clocks_lock);
+
+static void __clk_enable(struct clk *clk)
+{
+ if (clk->enabled++ == 0) {
+ if (clk->parent_cluster)
+ __clk_enable(clk->parent_cluster);
+
+ if (clk->parent_periph)
+ __clk_enable(clk->parent_periph);
+
+ if (clk->ops && clk->ops->enable)
+ clk->ops->enable(clk);
+ }
+}
int clk_enable(struct clk *clk)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ __clk_enable(clk);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
return 0;
}
EXPORT_SYMBOL(clk_enable);
+static void __clk_disable(struct clk *clk)
+{
+ if (--clk->enabled == 0) {
+ if (clk->ops && clk->ops->disable)
+ clk->ops->disable(clk);
+
+ if (clk->parent_periph)
+ __clk_disable(clk->parent_periph);
+
+ if (clk->parent_cluster)
+ __clk_disable(clk->parent_cluster);
+ }
+}
+
void clk_disable(struct clk *clk)
{
+ unsigned long flags;
+
+ WARN_ON(!clk->enabled);
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ __clk_disable(clk);
+ spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
- return clk->rate;
+ unsigned long rate;
+
+ if (clk->ops && clk->ops->get_rate)
+ return clk->ops->get_rate(clk);
+
+ rate = clk->rate;
+ if (!rate) {
+ if (clk->parent_periph)
+ rate = clk_get_rate(clk->parent_periph);
+ else if (clk->parent_cluster)
+ rate = clk_get_rate(clk->parent_cluster);
+ }
+
+ return rate;
}
EXPORT_SYMBOL(clk_get_rate);
@@ -56,37 +153,373 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL(clk_set_rate);
-/* ssp clock */
-static struct clk ssp_clk = {
- .rate = 48000000,
+static void clk_prcmu_enable(struct clk *clk)
+{
+ void __iomem *cg_set_reg = __io_address(U8500_PRCMU_BASE)
+ + PRCM_YYCLKEN0_MGT_SET + clk->prcmu_cg_off;
+
+ writel(1 << clk->prcmu_cg_bit, cg_set_reg);
+}
+
+static void clk_prcmu_disable(struct clk *clk)
+{
+ void __iomem *cg_clr_reg = __io_address(U8500_PRCMU_BASE)
+ + PRCM_YYCLKEN0_MGT_CLR + clk->prcmu_cg_off;
+
+ writel(1 << clk->prcmu_cg_bit, cg_clr_reg);
+}
+
+/* ED doesn't have the combined set/clr registers */
+static void clk_prcmu_ed_enable(struct clk *clk)
+{
+ void __iomem *addr = __io_address(U8500_PRCMU_BASE)
+ + clk->prcmu_cg_mgt;
+
+ writel(readl(addr) | PRCM_MGT_ENABLE, addr);
+}
+
+static void clk_prcmu_ed_disable(struct clk *clk)
+{
+ void __iomem *addr = __io_address(U8500_PRCMU_BASE)
+ + clk->prcmu_cg_mgt;
+
+ writel(readl(addr) & ~PRCM_MGT_ENABLE, addr);
+}
+
+static struct clkops clk_prcmu_ops = {
+ .enable = clk_prcmu_enable,
+ .disable = clk_prcmu_disable,
};
-/* fixed clock */
-static struct clk f38_clk = {
- .rate = 38400000,
+static unsigned int clkrst_base[] = {
+ [1] = U8500_CLKRST1_BASE,
+ [2] = U8500_CLKRST2_BASE,
+ [3] = U8500_CLKRST3_BASE,
+ [5] = U8500_CLKRST5_BASE,
+ [6] = U8500_CLKRST6_BASE,
+ [7] = U8500_CLKRST7_BASE_ED,
};
-static struct clk_lookup lookups[] = {
- {
- /* UART0 */
- .dev_id = "uart0",
- .clk = &f38_clk,
- }, { /* UART1 */
- .dev_id = "uart1",
- .clk = &f38_clk,
- }, { /* UART2 */
- .dev_id = "uart2",
- .clk = &f38_clk,
- }, { /* SSP */
- .dev_id = "pl022",
- .clk = &ssp_clk,
- }
+static void clk_prcc_enable(struct clk *clk)
+{
+ void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
+
+ if (clk->prcc_kernel != -1)
+ writel(1 << clk->prcc_kernel, addr + PRCC_KCKEN);
+
+ if (clk->prcc_bus != -1)
+ writel(1 << clk->prcc_bus, addr + PRCC_PCKEN);
+}
+
+static void clk_prcc_disable(struct clk *clk)
+{
+ void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
+
+ if (clk->prcc_bus != -1)
+ writel(1 << clk->prcc_bus, addr + PRCC_PCKDIS);
+
+ if (clk->prcc_kernel != -1)
+ writel(1 << clk->prcc_kernel, addr + PRCC_KCKDIS);
+}
+
+static struct clkops clk_prcc_ops = {
+ .enable = clk_prcc_enable,
+ .disable = clk_prcc_disable,
+};
+
+static struct clk clk_32khz = {
+ .rate = 32000,
+};
+
+/*
+ * PRCMU level clock gating
+ */
+
+/* Bank 0 */
+static DEFINE_PRCMU_CLK(svaclk, 0x0, 2, SVAMMDSPCLK);
+static DEFINE_PRCMU_CLK(siaclk, 0x0, 3, SIAMMDSPCLK);
+static DEFINE_PRCMU_CLK(sgaclk, 0x0, 4, SGACLK);
+static DEFINE_PRCMU_CLK_RATE(uartclk, 0x0, 5, UARTCLK, 38400000);
+static DEFINE_PRCMU_CLK(msp02clk, 0x0, 6, MSP02CLK);
+static DEFINE_PRCMU_CLK(msp1clk, 0x0, 7, MSP1CLK); /* v1 */
+static DEFINE_PRCMU_CLK_RATE(i2cclk, 0x0, 8, I2CCLK, 48000000);
+static DEFINE_PRCMU_CLK_RATE(sdmmcclk, 0x0, 9, SDMMCCLK, 50000000);
+static DEFINE_PRCMU_CLK(slimclk, 0x0, 10, SLIMCLK);
+static DEFINE_PRCMU_CLK(per1clk, 0x0, 11, PER1CLK);
+static DEFINE_PRCMU_CLK(per2clk, 0x0, 12, PER2CLK);
+static DEFINE_PRCMU_CLK(per3clk, 0x0, 13, PER3CLK);
+static DEFINE_PRCMU_CLK(per5clk, 0x0, 14, PER5CLK);
+static DEFINE_PRCMU_CLK_RATE(per6clk, 0x0, 15, PER6CLK, 133330000);
+static DEFINE_PRCMU_CLK_RATE(per7clk, 0x0, 16, PER7CLK, 100000000);
+static DEFINE_PRCMU_CLK(lcdclk, 0x0, 17, LCDCLK);
+static DEFINE_PRCMU_CLK(bmlclk, 0x0, 18, BMLCLK);
+static DEFINE_PRCMU_CLK(hsitxclk, 0x0, 19, HSITXCLK);
+static DEFINE_PRCMU_CLK(hsirxclk, 0x0, 20, HSIRXCLK);
+static DEFINE_PRCMU_CLK(hdmiclk, 0x0, 21, HDMICLK);
+static DEFINE_PRCMU_CLK(apeatclk, 0x0, 22, APEATCLK);
+static DEFINE_PRCMU_CLK(apetraceclk, 0x0, 23, APETRACECLK);
+static DEFINE_PRCMU_CLK(mcdeclk, 0x0, 24, MCDECLK);
+static DEFINE_PRCMU_CLK(ipi2clk, 0x0, 25, IPI2CCLK);
+static DEFINE_PRCMU_CLK(dsialtclk, 0x0, 26, DSIALTCLK); /* v1 */
+static DEFINE_PRCMU_CLK(dmaclk, 0x0, 27, DMACLK);
+static DEFINE_PRCMU_CLK(b2r2clk, 0x0, 28, B2R2CLK);
+static DEFINE_PRCMU_CLK(tvclk, 0x0, 29, TVCLK);
+static DEFINE_PRCMU_CLK(uniproclk, 0x0, 30, UNIPROCLK); /* v1 */
+static DEFINE_PRCMU_CLK_RATE(sspclk, 0x0, 31, SSPCLK, 48000000); /* v1 */
+
+/* Bank 1 */
+static DEFINE_PRCMU_CLK(rngclk, 0x4, 0, RNGCLK); /* v1 */
+static DEFINE_PRCMU_CLK(uiccclk, 0x4, 1, UICCCLK); /* v1 */
+
+/*
+ * PRCC level clock gating
+ * Format: per#, clk, PCKEN bit, KCKEN bit, parent
+ */
+
+/* Peripheral Cluster #1 */
+static DEFINE_PRCC_CLK(1, i2c4, 10, 9, &clk_i2cclk);
+static DEFINE_PRCC_CLK(1, gpio0, 9, -1, NULL);
+static DEFINE_PRCC_CLK(1, slimbus0, 8, 8, &clk_slimclk);
+static DEFINE_PRCC_CLK(1, spi3_ed, 7, 7, NULL);
+static DEFINE_PRCC_CLK(1, spi3_v1, 7, -1, NULL);
+static DEFINE_PRCC_CLK(1, i2c2, 6, 6, &clk_i2cclk);
+static DEFINE_PRCC_CLK(1, sdi0, 5, 5, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(1, msp1_ed, 4, 4, &clk_msp02clk);
+static DEFINE_PRCC_CLK(1, msp1_v1, 4, 4, &clk_msp1clk);
+static DEFINE_PRCC_CLK(1, msp0, 3, 3, &clk_msp02clk);
+static DEFINE_PRCC_CLK(1, i2c1, 2, 2, &clk_i2cclk);
+static DEFINE_PRCC_CLK(1, uart1, 1, 1, &clk_uartclk);
+static DEFINE_PRCC_CLK(1, uart0, 0, 0, &clk_uartclk);
+
+/* Peripheral Cluster #2 */
+
+static DEFINE_PRCC_CLK(2, gpio1_ed, 12, -1, NULL);
+static DEFINE_PRCC_CLK(2, ssitx_ed, 11, -1, NULL);
+static DEFINE_PRCC_CLK(2, ssirx_ed, 10, -1, NULL);
+static DEFINE_PRCC_CLK(2, spi0_ed, 9, -1, NULL);
+static DEFINE_PRCC_CLK(2, sdi3_ed, 8, 6, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, sdi1_ed, 7, 5, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, msp2_ed, 6, 4, &clk_msp02clk);
+static DEFINE_PRCC_CLK(2, sdi4_ed, 4, 2, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, pwl_ed, 3, 1, NULL);
+static DEFINE_PRCC_CLK(2, spi1_ed, 2, -1, NULL);
+static DEFINE_PRCC_CLK(2, spi2_ed, 1, -1, NULL);
+static DEFINE_PRCC_CLK(2, i2c3_ed, 0, 0, &clk_i2cclk);
+
+static DEFINE_PRCC_CLK(2, gpio1_v1, 11, -1, NULL);
+static DEFINE_PRCC_CLK(2, ssitx_v1, 10, 7, NULL);
+static DEFINE_PRCC_CLK(2, ssirx_v1, 9, 6, NULL);
+static DEFINE_PRCC_CLK(2, spi0_v1, 8, -1, NULL);
+static DEFINE_PRCC_CLK(2, sdi3_v1, 7, 5, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, sdi1_v1, 6, 4, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, msp2_v1, 5, 3, &clk_msp02clk);
+static DEFINE_PRCC_CLK(2, sdi4_v1, 4, 2, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, pwl_v1, 3, 1, NULL);
+static DEFINE_PRCC_CLK(2, spi1_v1, 2, -1, NULL);
+static DEFINE_PRCC_CLK(2, spi2_v1, 1, -1, NULL);
+static DEFINE_PRCC_CLK(2, i2c3_v1, 0, 0, &clk_i2cclk);
+
+/* Peripheral Cluster #3 */
+static DEFINE_PRCC_CLK(3, gpio2, 8, -1, NULL);
+static DEFINE_PRCC_CLK(3, sdi5, 7, 7, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(3, uart2, 6, 6, &clk_uartclk);
+static DEFINE_PRCC_CLK(3, ske, 5, 5, &clk_32khz);
+static DEFINE_PRCC_CLK(3, sdi2, 4, 4, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(3, i2c0, 3, 3, &clk_i2cclk);
+static DEFINE_PRCC_CLK(3, ssp1_ed, 2, 2, &clk_i2cclk);
+static DEFINE_PRCC_CLK(3, ssp0_ed, 1, 1, &clk_i2cclk);
+static DEFINE_PRCC_CLK(3, ssp1_v1, 2, 2, &clk_sspclk);
+static DEFINE_PRCC_CLK(3, ssp0_v1, 1, 1, &clk_sspclk);
+static DEFINE_PRCC_CLK(3, fsmc, 0, -1, NULL);
+
+/* Peripheral Cluster #4 is in the always on domain */
+
+/* Peripheral Cluster #5 */
+static DEFINE_PRCC_CLK(5, gpio3, 1, -1, NULL);
+static DEFINE_PRCC_CLK(5, usb_ed, 0, 0, &clk_i2cclk);
+static DEFINE_PRCC_CLK(5, usb_v1, 0, 0, NULL);
+
+/* Peripheral Cluster #6 */
+
+static DEFINE_PRCC_CLK(6, mtu1_v1, 8, -1, NULL);
+static DEFINE_PRCC_CLK(6, mtu0_v1, 7, -1, NULL);
+static DEFINE_PRCC_CLK(6, cfgreg_v1, 6, 6, NULL);
+static DEFINE_PRCC_CLK(6, dmc_ed, 6, 6, NULL);
+static DEFINE_PRCC_CLK(6, hash1, 5, -1, NULL);
+static DEFINE_PRCC_CLK(6, unipro_v1, 4, 1, &clk_uniproclk);
+static DEFINE_PRCC_CLK(6, cryp1_ed, 4, -1, NULL);
+static DEFINE_PRCC_CLK(6, pka, 3, -1, NULL);
+static DEFINE_PRCC_CLK(6, hash0, 2, -1, NULL);
+static DEFINE_PRCC_CLK(6, cryp0, 1, -1, NULL);
+static DEFINE_PRCC_CLK(6, rng_ed, 0, 0, &clk_i2cclk);
+static DEFINE_PRCC_CLK(6, rng_v1, 0, 0, &clk_rngclk);
+
+/* Peripheral Cluster #7 */
+
+static DEFINE_PRCC_CLK(7, tzpc0_ed, 4, -1, NULL);
+static DEFINE_PRCC_CLK(7, mtu1_ed, 3, -1, NULL);
+static DEFINE_PRCC_CLK(7, mtu0_ed, 2, -1, NULL);
+static DEFINE_PRCC_CLK(7, wdg_ed, 1, -1, NULL);
+static DEFINE_PRCC_CLK(7, cfgreg_ed, 0, -1, NULL);
+
+static struct clk_lookup u8500_common_clks[] = {
+ /* Peripheral Cluster #1 */
+ CLK(gpio0, "gpio.0", NULL),
+ CLK(gpio0, "gpio.1", NULL),
+ CLK(slimbus0, "slimbus0", NULL),
+ CLK(i2c2, "nmk-i2c.2", NULL),
+ CLK(sdi0, "sdi0", NULL),
+ CLK(msp0, "msp0", NULL),
+ CLK(i2c1, "nmk-i2c.1", NULL),
+ CLK(uart1, "uart1", NULL),
+ CLK(uart0, "uart0", NULL),
+
+ /* Peripheral Cluster #3 */
+ CLK(gpio2, "gpio.2", NULL),
+ CLK(gpio2, "gpio.3", NULL),
+ CLK(gpio2, "gpio.4", NULL),
+ CLK(gpio2, "gpio.5", NULL),
+ CLK(sdi5, "sdi5", NULL),
+ CLK(uart2, "uart2", NULL),
+ CLK(ske, "ske", NULL),
+ CLK(sdi2, "sdi2", NULL),
+ CLK(i2c0, "nmk-i2c.0", NULL),
+ CLK(fsmc, "fsmc", NULL),
+
+ /* Peripheral Cluster #5 */
+ CLK(gpio3, "gpio.8", NULL),
+
+ /* Peripheral Cluster #6 */
+ CLK(hash1, "hash1", NULL),
+ CLK(pka, "pka", NULL),
+ CLK(hash0, "hash0", NULL),
+ CLK(cryp0, "cryp0", NULL),
+
+ /* PRCMU level clock gating */
+
+ /* Bank 0 */
+ CLK(svaclk, "sva", NULL),
+ CLK(siaclk, "sia", NULL),
+ CLK(sgaclk, "sga", NULL),
+ CLK(slimclk, "slim", NULL),
+ CLK(lcdclk, "lcd", NULL),
+ CLK(bmlclk, "bml", NULL),
+ CLK(hsitxclk, "stm-hsi.0", NULL),
+ CLK(hsirxclk, "stm-hsi.1", NULL),
+ CLK(hdmiclk, "hdmi", NULL),
+ CLK(apeatclk, "apeat", NULL),
+ CLK(apetraceclk, "apetrace", NULL),
+ CLK(mcdeclk, "mcde", NULL),
+ CLK(ipi2clk, "ipi2", NULL),
+ CLK(dmaclk, "dma40", NULL),
+ CLK(b2r2clk, "b2r2", NULL),
+ CLK(tvclk, "tv", NULL),
+};
+
+static struct clk_lookup u8500_ed_clks[] = {
+ /* Peripheral Cluster #1 */
+ CLK(spi3_ed, "spi3", NULL),
+ CLK(msp1_ed, "msp1", NULL),
+
+ /* Peripheral Cluster #2 */
+ CLK(gpio1_ed, "gpio.6", NULL),
+ CLK(gpio1_ed, "gpio.7", NULL),
+ CLK(ssitx_ed, "ssitx", NULL),
+ CLK(ssirx_ed, "ssirx", NULL),
+ CLK(spi0_ed, "spi0", NULL),
+ CLK(sdi3_ed, "sdi3", NULL),
+ CLK(sdi1_ed, "sdi1", NULL),
+ CLK(msp2_ed, "msp2", NULL),
+ CLK(sdi4_ed, "sdi4", NULL),
+ CLK(pwl_ed, "pwl", NULL),
+ CLK(spi1_ed, "spi1", NULL),
+ CLK(spi2_ed, "spi2", NULL),
+ CLK(i2c3_ed, "nmk-i2c.3", NULL),
+
+ /* Peripheral Cluster #3 */
+ CLK(ssp1_ed, "ssp1", NULL),
+ CLK(ssp0_ed, "ssp0", NULL),
+
+ /* Peripheral Cluster #5 */
+ CLK(usb_ed, "musb_hdrc.0", "usb"),
+
+ /* Peripheral Cluster #6 */
+ CLK(dmc_ed, "dmc", NULL),
+ CLK(cryp1_ed, "cryp1", NULL),
+ CLK(rng_ed, "rng", NULL),
+
+ /* Peripheral Cluster #7 */
+ CLK(tzpc0_ed, "tzpc0", NULL),
+ CLK(mtu1_ed, "mtu1", NULL),
+ CLK(mtu0_ed, "mtu0", NULL),
+ CLK(wdg_ed, "wdg", NULL),
+ CLK(cfgreg_ed, "cfgreg", NULL),
+};
+
+static struct clk_lookup u8500_v1_clks[] = {
+ /* Peripheral Cluster #1 */
+ CLK(i2c4, "nmk-i2c.4", NULL),
+ CLK(spi3_v1, "spi3", NULL),
+ CLK(msp1_v1, "msp1", NULL),
+
+ /* Peripheral Cluster #2 */
+ CLK(gpio1_v1, "gpio.6", NULL),
+ CLK(gpio1_v1, "gpio.7", NULL),
+ CLK(ssitx_v1, "ssitx", NULL),
+ CLK(ssirx_v1, "ssirx", NULL),
+ CLK(spi0_v1, "spi0", NULL),
+ CLK(sdi3_v1, "sdi3", NULL),
+ CLK(sdi1_v1, "sdi1", NULL),
+ CLK(msp2_v1, "msp2", NULL),
+ CLK(sdi4_v1, "sdi4", NULL),
+ CLK(pwl_v1, "pwl", NULL),
+ CLK(spi1_v1, "spi1", NULL),
+ CLK(spi2_v1, "spi2", NULL),
+ CLK(i2c3_v1, "nmk-i2c.3", NULL),
+
+ /* Peripheral Cluster #3 */
+ CLK(ssp1_v1, "ssp1", NULL),
+ CLK(ssp0_v1, "ssp0", NULL),
+
+ /* Peripheral Cluster #5 */
+ CLK(usb_v1, "musb_hdrc.0", "usb"),
+
+ /* Peripheral Cluster #6 */
+ CLK(mtu1_v1, "mtu1", NULL),
+ CLK(mtu0_v1, "mtu0", NULL),
+ CLK(cfgreg_v1, "cfgreg", NULL),
+ CLK(hash1, "hash1", NULL),
+ CLK(unipro_v1, "unipro", NULL),
+ CLK(rng_v1, "rng", NULL),
+
+ /* PRCMU level clock gating */
+
+ /* Bank 0 */
+ CLK(uniproclk, "uniproclk", NULL),
+ CLK(dsialtclk, "dsialt", NULL),
+
+ /* Bank 1 */
+ CLK(rngclk, "rng", NULL),
+ CLK(uiccclk, "uicc", NULL),
};
static int __init clk_init(void)
{
- /* register the clock lookups */
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+ if (cpu_is_u8500ed()) {
+ clk_prcmu_ops.enable = clk_prcmu_ed_enable;
+ clk_prcmu_ops.disable = clk_prcmu_ed_disable;
+ } else if (cpu_is_u5500()) {
+ /* Clock tree for U5500 not implemented yet */
+ clk_prcc_ops.enable = clk_prcc_ops.disable = NULL;
+ clk_prcmu_ops.enable = clk_prcmu_ops.disable = NULL;
+ }
+
+ clkdev_add_table(u8500_common_clks, ARRAY_SIZE(u8500_common_clks));
+ if (cpu_is_u8500ed())
+ clkdev_add_table(u8500_ed_clks, ARRAY_SIZE(u8500_ed_clks));
+ else
+ clkdev_add_table(u8500_v1_clks, ARRAY_SIZE(u8500_v1_clks));
+
return 0;
}
arch_initcall(clk_init);
diff --git a/arch/arm/mach-ux500/clock.h b/arch/arm/mach-ux500/clock.h
new file mode 100644
index 000000000000..e4f99b65026f
--- /dev/null
+++ b/arch/arm/mach-ux500/clock.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2010 ST-Ericsson
+ * Copyright (C) 2009 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/**
+ * struct clkops - ux500 clock operations
+ * @enable: function to enable the clock
+ * @disable: function to disable the clock
+ * @get_rate: function to get the current clock rate
+ *
+ * This structure contains function pointers to functions that will be used to
+ * control the clock. All of these functions are optional. If get_rate is
+ * NULL, the rate in the struct clk will be used.
+ */
+struct clkops {
+ void (*enable) (struct clk *);
+ void (*disable) (struct clk *);
+ unsigned long (*get_rate) (struct clk *);
+};
+
+/**
+ * struct clk - ux500 clock structure
+ * @ops: pointer to clkops struct used to control this clock
+ * @name: name, for debugging
+ * @enabled: refcount. positive if enabled, zero if disabled
+ * @rate: fixed rate for clocks which don't implement
+ * ops->getrate
+ * @prcmu_cg_off: address offset of the combined enable/disable register
+ * (used on u8500v1)
+ * @prcmu_cg_bit: bit in the combined enable/disable register (used on
+ * u8500v1)
+ * @prcmu_cg_mgt: address of the enable/disable register (used on
+ * u8500ed)
+ * @cluster: peripheral cluster number
+ * @prcc_bus: bit for the bus clock in the peripheral's CLKRST
+ * @prcc_kernel: bit for the kernel clock in the peripheral's CLKRST.
+ * -1 if no kernel clock exists.
+ * @parent_cluster: pointer to parent's cluster clk struct
+ * @parent_periph: pointer to parent's peripheral clk struct
+ *
+ * Peripherals are organised into clusters, and each cluster has an associated
+ * bus clock. Some peripherals also have a parent peripheral clock.
+ *
+ * In order to enable a clock for a peripheral, we need to enable:
+ * (1) the parent cluster (bus) clock at the PRCMU level
+ * (2) the parent peripheral clock (if any) at the PRCMU level
+ * (3) the peripheral's bus & kernel clock at the PRCC level
+ *
+ * (1) and (2) are handled by defining clk structs (DEFINE_PRCMU_CLK) for each
+ * of the cluster and peripheral clocks, and hooking these as the parents of
+ * the individual peripheral clocks.
+ *
+ * (3) is handled by specifying the bits in the PRCC control registers required
+ * to enable these clocks and modifying them in the ->enable and
+ * ->disable callbacks of the peripheral clocks (DEFINE_PRCC_CLK).
+ *
+ * This structure describes both the PRCMU-level clocks and PRCC-level clocks.
+ * The prcmu_* fields are only used for the PRCMU clocks, and the cluster,
+ * prcc, and parent pointers are only used for the PRCC-level clocks.
+ */
+struct clk {
+ const struct clkops *ops;
+ const char *name;
+ unsigned int enabled;
+
+ unsigned long rate;
+ struct list_head list;
+
+ /* These three are only for PRCMU clks */
+
+ unsigned int prcmu_cg_off;
+ unsigned int prcmu_cg_bit;
+ unsigned int prcmu_cg_mgt;
+
+ /* The rest are only for PRCC clks */
+
+ int cluster;
+ unsigned int prcc_bus;
+ unsigned int prcc_kernel;
+
+ struct clk *parent_cluster;
+ struct clk *parent_periph;
+};
+
+#define DEFINE_PRCMU_CLK(_name, _cg_off, _cg_bit, _reg) \
+struct clk clk_##_name = { \
+ .name = #_name, \
+ .ops = &clk_prcmu_ops, \
+ .prcmu_cg_off = _cg_off, \
+ .prcmu_cg_bit = _cg_bit, \
+ .prcmu_cg_mgt = PRCM_##_reg##_MGT \
+ }
+
+#define DEFINE_PRCMU_CLK_RATE(_name, _cg_off, _cg_bit, _reg, _rate) \
+struct clk clk_##_name = { \
+ .name = #_name, \
+ .ops = &clk_prcmu_ops, \
+ .prcmu_cg_off = _cg_off, \
+ .prcmu_cg_bit = _cg_bit, \
+ .rate = _rate, \
+ .prcmu_cg_mgt = PRCM_##_reg##_MGT \
+ }
+
+#define DEFINE_PRCC_CLK(_pclust, _name, _bus_en, _kernel_en, _kernclk) \
+struct clk clk_##_name = { \
+ .name = #_name, \
+ .ops = &clk_prcc_ops, \
+ .cluster = _pclust, \
+ .prcc_bus = _bus_en, \
+ .prcc_kernel = _kernel_en, \
+ .parent_cluster = &clk_per##_pclust##clk, \
+ .parent_periph = _kernclk \
+ }
+
+#define CLK(_clk, _devname, _conname) \
+ { \
+ .clk = &clk_##_clk, \
+ .dev_id = _devname, \
+ .con_id = _conname, \
+ }
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
new file mode 100644
index 000000000000..6a3ac4539f16
--- /dev/null
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/io.h>
+
+#include <asm/mach/map.h>
+
+#include <mach/hardware.h>
+#include <mach/devices.h>
+#include <mach/setup.h>
+
+static struct map_desc u5500_io_desc[] __initdata = {
+ __IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_GPIO1_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_GPIO2_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_GPIO3_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_GPIO4_BASE, SZ_4K),
+};
+
+static struct platform_device *u5500_platform_devs[] __initdata = {
+ &u5500_gpio_devs[0],
+ &u5500_gpio_devs[1],
+ &u5500_gpio_devs[2],
+ &u5500_gpio_devs[3],
+ &u5500_gpio_devs[4],
+ &u5500_gpio_devs[5],
+ &u5500_gpio_devs[6],
+ &u5500_gpio_devs[7],
+};
+
+void __init u5500_map_io(void)
+{
+ ux500_map_io();
+
+ iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc));
+}
+
+void __init u5500_init_devices(void)
+{
+ ux500_init_devices();
+
+ platform_add_devices(u5500_platform_devs,
+ ARRAY_SIZE(u5500_platform_devs));
+}
diff --git a/arch/arm/mach-ux500/cpu-u8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 397bc1f9ed94..d04299f3b6b5 100644
--- a/arch/arm/mach-ux500/cpu-u8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -13,44 +13,55 @@
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
-#include <asm/hardware/gic.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
+#include <mach/setup.h>
+#include <mach/devices.h>
-/* add any platform devices here - TODO */
static struct platform_device *platform_devs[] __initdata = {
- /* yet to be added, add i2c0, gpio.. */
+ &u8500_gpio_devs[0],
+ &u8500_gpio_devs[1],
+ &u8500_gpio_devs[2],
+ &u8500_gpio_devs[3],
+ &u8500_gpio_devs[4],
+ &u8500_gpio_devs[5],
+ &u8500_gpio_devs[6],
+ &u8500_gpio_devs[7],
+ &u8500_gpio_devs[8],
};
-#define __IO_DEV_DESC(x, sz) { \
- .virtual = IO_ADDRESS(x), \
- .pfn = __phys_to_pfn(x), \
- .length = sz, \
- .type = MT_DEVICE, \
-}
-
/* minimum static i/o mapping required to boot U8500 platforms */
static struct map_desc u8500_io_desc[] __initdata = {
- __IO_DEV_DESC(U8500_UART2_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_GIC_CPU_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_GIC_DIST_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_PRCMU_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_GPIO0_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K),
+};
+
+static struct map_desc u8500ed_io_desc[] __initdata = {
+ __IO_DEV_DESC(U8500_MTU0_BASE_ED, SZ_4K),
+ __IO_DEV_DESC(U8500_CLKRST7_BASE_ED, SZ_8K),
+};
+
+static struct map_desc u8500v1_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_TWD_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_SCU_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_BACKUPRAM0_BASE, SZ_8K),
};
void __init u8500_map_io(void)
{
+ ux500_map_io();
+
iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc));
-}
-void __init u8500_init_irq(void)
-{
- gic_dist_init(0, __io_address(U8500_GIC_DIST_BASE), 29);
- gic_cpu_init(0, __io_address(U8500_GIC_CPU_BASE));
+ if (cpu_is_u8500ed())
+ iotable_init(u8500ed_io_desc, ARRAY_SIZE(u8500ed_io_desc));
+ else
+ iotable_init(u8500v1_io_desc, ARRAY_SIZE(u8500v1_io_desc));
}
/*
@@ -58,6 +69,8 @@ void __init u8500_init_irq(void)
*/
void __init u8500_init_devices(void)
{
+ ux500_init_devices();
+
/* Register the platform devices */
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
new file mode 100644
index 000000000000..d81ad023963c
--- /dev/null
+++ b/arch/arm/mach-ux500/cpu.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/localtimer.h>
+
+#include <plat/mtu.h>
+#include <mach/hardware.h>
+#include <mach/setup.h>
+#include <mach/devices.h>
+
+#include "clock.h"
+
+static struct map_desc ux500_io_desc[] __initdata = {
+ __IO_DEV_DESC(UX500_UART0_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_UART2_BASE, SZ_4K),
+
+ __IO_DEV_DESC(UX500_GIC_CPU_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_GIC_DIST_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_L2CC_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_TWD_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_SCU_BASE, SZ_4K),
+
+ __IO_DEV_DESC(UX500_CLKRST1_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_CLKRST2_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_CLKRST3_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_CLKRST5_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_CLKRST6_BASE, SZ_4K),
+
+ __IO_DEV_DESC(UX500_MTU0_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_MTU1_BASE, SZ_4K),
+
+ __IO_DEV_DESC(UX500_BACKUPRAM0_BASE, SZ_8K),
+};
+
+static struct amba_device *ux500_amba_devs[] __initdata = {
+ &ux500_pl031_device,
+};
+
+void __init ux500_map_io(void)
+{
+ iotable_init(ux500_io_desc, ARRAY_SIZE(ux500_io_desc));
+}
+
+void __init ux500_init_devices(void)
+{
+ amba_add_devices(ux500_amba_devs, ARRAY_SIZE(ux500_amba_devs));
+}
+
+void __init ux500_init_irq(void)
+{
+ gic_dist_init(0, __io_address(UX500_GIC_DIST_BASE), 29);
+ gic_cpu_init(0, __io_address(UX500_GIC_CPU_BASE));
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static int ux500_l2x0_init(void)
+{
+ void __iomem *l2x0_base;
+
+ l2x0_base = __io_address(UX500_L2CC_BASE);
+
+ /* 64KB way size, 8 way associativity, force WA */
+ l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
+
+ return 0;
+}
+early_initcall(ux500_l2x0_init);
+#endif
+
+static void __init ux500_timer_init(void)
+{
+#ifdef CONFIG_LOCAL_TIMERS
+ /* Setup the local timer base */
+ twd_base = __io_address(UX500_TWD_BASE);
+#endif
+ /* Setup the MTU base */
+ if (cpu_is_u8500ed())
+ mtu_base = __io_address(U8500_MTU0_BASE_ED);
+ else
+ mtu_base = __io_address(UX500_MTU0_BASE);
+
+ nmdk_timer_init();
+}
+
+struct sys_timer ux500_timer = {
+ .init = ux500_timer_init,
+};
diff --git a/arch/arm/mach-ux500/devices-db5500.c b/arch/arm/mach-ux500/devices-db5500.c
new file mode 100644
index 000000000000..33e5b56bebb6
--- /dev/null
+++ b/arch/arm/mach-ux500/devices-db5500.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+#include <mach/hardware.h>
+#include <mach/devices.h>
+
+static struct nmk_gpio_platform_data u5500_gpio_data[] = {
+ GPIO_DATA("GPIO-0-31", 0),
+ GPIO_DATA("GPIO-32-63", 32), /* 36..63 not routed to pin */
+ GPIO_DATA("GPIO-64-95", 64), /* 83..95 not routed to pin */
+ GPIO_DATA("GPIO-96-127", 96), /* 102..127 not routed to pin */
+ GPIO_DATA("GPIO-128-159", 128), /* 149..159 not routed to pin */
+ GPIO_DATA("GPIO-160-191", 160),
+ GPIO_DATA("GPIO-192-223", 192),
+ GPIO_DATA("GPIO-224-255", 224), /* 228..255 not routed to pin */
+};
+
+static struct resource u5500_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+ GPIO_RESOURCE(4),
+ GPIO_RESOURCE(5),
+ GPIO_RESOURCE(6),
+ GPIO_RESOURCE(7),
+};
+
+struct platform_device u5500_gpio_devs[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
+ GPIO_DEVICE(4),
+ GPIO_DEVICE(5),
+ GPIO_DEVICE(6),
+ GPIO_DEVICE(7),
+};
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
new file mode 100644
index 000000000000..20334236afce
--- /dev/null
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/amba/bus.h>
+
+#include <mach/hardware.h>
+#include <mach/setup.h>
+
+static struct nmk_gpio_platform_data u8500_gpio_data[] = {
+ GPIO_DATA("GPIO-0-31", 0),
+ GPIO_DATA("GPIO-32-63", 32), /* 37..63 not routed to pin */
+ GPIO_DATA("GPIO-64-95", 64),
+ GPIO_DATA("GPIO-96-127", 96), /* 98..127 not routed to pin */
+ GPIO_DATA("GPIO-128-159", 128),
+ GPIO_DATA("GPIO-160-191", 160), /* 172..191 not routed to pin */
+ GPIO_DATA("GPIO-192-223", 192),
+ GPIO_DATA("GPIO-224-255", 224), /* 231..255 not routed to pin */
+ GPIO_DATA("GPIO-256-288", 256), /* 268..288 not routed to pin */
+};
+
+static struct resource u8500_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+ GPIO_RESOURCE(4),
+ GPIO_RESOURCE(5),
+ GPIO_RESOURCE(6),
+ GPIO_RESOURCE(7),
+ GPIO_RESOURCE(8),
+};
+
+struct platform_device u8500_gpio_devs[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
+ GPIO_DEVICE(4),
+ GPIO_DEVICE(5),
+ GPIO_DEVICE(6),
+ GPIO_DEVICE(7),
+ GPIO_DEVICE(8),
+};
+
+struct amba_device u8500_ssp0_device = {
+ .dev = {
+ .coherent_dma_mask = ~0,
+ .init_name = "ssp0",
+ },
+ .res = {
+ .start = U8500_SSP0_BASE,
+ .end = U8500_SSP0_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_SSP0, NO_IRQ },
+ /* ST-Ericsson modified id */
+ .periphid = SSP_PER_ID,
+};
+
+static struct resource u8500_i2c0_resources[] = {
+ [0] = {
+ .start = U8500_I2C0_BASE,
+ .end = U8500_I2C0_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_I2C0,
+ .end = IRQ_I2C0,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+struct platform_device u8500_i2c0_device = {
+ .name = "nmk-i2c",
+ .id = 0,
+ .resource = u8500_i2c0_resources,
+ .num_resources = ARRAY_SIZE(u8500_i2c0_resources),
+};
+
+static struct resource u8500_i2c4_resources[] = {
+ [0] = {
+ .start = U8500_I2C4_BASE,
+ .end = U8500_I2C4_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_I2C4,
+ .end = IRQ_I2C4,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+struct platform_device u8500_i2c4_device = {
+ .name = "nmk-i2c",
+ .id = 4,
+ .resource = u8500_i2c4_resources,
+ .num_resources = ARRAY_SIZE(u8500_i2c4_resources),
+};
diff --git a/arch/arm/mach-ux500/devices.c b/arch/arm/mach-ux500/devices.c
new file mode 100644
index 000000000000..8a268893cb7f
--- /dev/null
+++ b/arch/arm/mach-ux500/devices.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/amba/bus.h>
+
+#include <mach/hardware.h>
+#include <mach/setup.h>
+
+#define __MEM_4K_RESOURCE(x) \
+ .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
+
+struct amba_device ux500_pl031_device = {
+ .dev = {
+ .init_name = "pl031",
+ },
+ .res = {
+ .start = UX500_RTC_BASE,
+ .end = UX500_RTC_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_RTC_RTT, NO_IRQ},
+};
+
+struct amba_device ux500_uart0_device = {
+ .dev = { .init_name = "uart0" },
+ __MEM_4K_RESOURCE(UX500_UART0_BASE),
+ .irq = {IRQ_UART0, NO_IRQ},
+};
+
+struct amba_device ux500_uart1_device = {
+ .dev = { .init_name = "uart1" },
+ __MEM_4K_RESOURCE(UX500_UART1_BASE),
+ .irq = {IRQ_UART1, NO_IRQ},
+};
+
+struct amba_device ux500_uart2_device = {
+ .dev = { .init_name = "uart2" },
+ __MEM_4K_RESOURCE(UX500_UART2_BASE),
+ .irq = {IRQ_UART2, NO_IRQ},
+};
+
+#define UX500_I2C_RESOURCES(id, size) \
+static struct resource ux500_i2c##id##_resources[] = { \
+ [0] = { \
+ .start = UX500_I2C##id##_BASE, \
+ .end = UX500_I2C##id##_BASE + size - 1, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ [1] = { \
+ .start = IRQ_I2C##id, \
+ .end = IRQ_I2C##id, \
+ .flags = IORESOURCE_IRQ \
+ } \
+}
+
+UX500_I2C_RESOURCES(1, SZ_4K);
+UX500_I2C_RESOURCES(2, SZ_4K);
+UX500_I2C_RESOURCES(3, SZ_4K);
+
+#define UX500_I2C_PDEVICE(cid) \
+struct platform_device ux500_i2c##cid##_device = { \
+ .name = "nmk-i2c", \
+ .id = cid, \
+ .num_resources = 2, \
+ .resource = ux500_i2c##cid##_resources, \
+}
+
+UX500_I2C_PDEVICE(1);
+UX500_I2C_PDEVICE(2);
+UX500_I2C_PDEVICE(3);
+
+void __init amba_add_devices(struct amba_device *devs[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct amba_device *d = devs[i];
+ amba_device_register(d, &iomem_resource);
+ }
+}
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
new file mode 100644
index 000000000000..545c80fc8024
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __MACH_DB5500_REGS_H
+#define __MACH_DB5500_REGS_H
+
+#define U5500_PER1_BASE 0xA0020000
+#define U5500_PER2_BASE 0xA0010000
+#define U5500_PER3_BASE 0x80140000
+#define U5500_PER4_BASE 0x80150000
+#define U5500_PER5_BASE 0x80100000
+#define U5500_PER6_BASE 0x80120000
+
+#define U5500_GIC_DIST_BASE 0xA0411000
+#define U5500_GIC_CPU_BASE 0xA0410100
+#define U5500_DMA_BASE 0x90030000
+#define U5500_MCDE_BASE 0xA0400000
+#define U5500_MODEM_BASE 0xB0000000
+#define U5500_L2CC_BASE 0xA0412000
+#define U5500_SCU_BASE 0xA0410000
+#define U5500_DSI1_BASE 0xA0401000
+#define U5500_DSI2_BASE 0xA0402000
+#define U5500_SIA_BASE 0xA0100000
+#define U5500_SVA_BASE 0x80200000
+#define U5500_HSEM_BASE 0xA0000000
+#define U5500_NAND0_BASE 0x60000000
+#define U5500_NAND1_BASE 0x70000000
+#define U5500_TWD_BASE 0xa0410600
+#define U5500_B2R2_BASE 0xa0200000
+
+#define U5500_FSMC_BASE (U5500_PER1_BASE + 0x0000)
+#define U5500_SDI0_BASE (U5500_PER1_BASE + 0x1000)
+#define U5500_SDI2_BASE (U5500_PER1_BASE + 0x2000)
+#define U5500_UART0_BASE (U5500_PER1_BASE + 0x3000)
+#define U5500_I2C1_BASE (U5500_PER1_BASE + 0x4000)
+#define U5500_MSP0_BASE (U5500_PER1_BASE + 0x5000)
+#define U5500_GPIO0_BASE (U5500_PER1_BASE + 0xE000)
+#define U5500_CLKRST1_BASE (U5500_PER1_BASE + 0xF000)
+
+#define U5500_USBOTG_BASE (U5500_PER2_BASE + 0x0000)
+#define U5500_GPIO1_BASE (U5500_PER2_BASE + 0xE000)
+#define U5500_CLKRST2_BASE (U5500_PER2_BASE + 0xF000)
+
+#define U5500_KEYPAD_BASE (U5500_PER3_BASE + 0x0000)
+#define U5500_PWM_BASE (U5500_PER3_BASE + 0x1000)
+#define U5500_GPIO3_BASE (U5500_PER3_BASE + 0xE000)
+#define U5500_CLKRST3_BASE (U5500_PER3_BASE + 0xF000)
+
+#define U5500_BACKUPRAM0_BASE (U5500_PER4_BASE + 0x0000)
+#define U5500_BACKUPRAM1_BASE (U5500_PER4_BASE + 0x1000)
+#define U5500_RTT0_BASE (U5500_PER4_BASE + 0x2000)
+#define U5500_RTT1_BASE (U5500_PER4_BASE + 0x3000)
+#define U5500_RTC_BASE (U5500_PER4_BASE + 0x4000)
+#define U5500_SCR_BASE (U5500_PER4_BASE + 0x5000)
+#define U5500_DMC_BASE (U5500_PER4_BASE + 0x6000)
+#define U5500_PRCMU_BASE (U5500_PER4_BASE + 0x7000)
+#define U5500_MSP1_BASE (U5500_PER4_BASE + 0x9000)
+#define U5500_GPIO2_BASE (U5500_PER4_BASE + 0xA000)
+#define U5500_CDETECT_BASE (U5500_PER4_BASE + 0xF000)
+
+#define U5500_SPI0_BASE (U5500_PER5_BASE + 0x0000)
+#define U5500_SPI1_BASE (U5500_PER5_BASE + 0x1000)
+#define U5500_SPI2_BASE (U5500_PER5_BASE + 0x2000)
+#define U5500_SPI3_BASE (U5500_PER5_BASE + 0x3000)
+#define U5500_UART1_BASE (U5500_PER5_BASE + 0x4000)
+#define U5500_UART2_BASE (U5500_PER5_BASE + 0x5000)
+#define U5500_UART3_BASE (U5500_PER5_BASE + 0x6000)
+#define U5500_SDI1_BASE (U5500_PER5_BASE + 0x7000)
+#define U5500_SDI3_BASE (U5500_PER5_BASE + 0x8000)
+#define U5500_SDI4_BASE (U5500_PER5_BASE + 0x9000)
+#define U5500_I2C2_BASE (U5500_PER5_BASE + 0xA000)
+#define U5500_I2C3_BASE (U5500_PER5_BASE + 0xB000)
+#define U5500_MSP2_BASE (U5500_PER5_BASE + 0xC000)
+#define U5500_IRDA_BASE (U5500_PER5_BASE + 0xD000)
+#define U5500_IRRC_BASE (U5500_PER5_BASE + 0x10000)
+#define U5500_GPIO4_BASE (U5500_PER5_BASE + 0x1E000)
+#define U5500_CLKRST5_BASE (U5500_PER5_BASE + 0x1F000)
+
+#define U5500_RNG_BASE (U5500_PER6_BASE + 0x0000)
+#define U5500_HASH0_BASE (U5500_PER6_BASE + 0x1000)
+#define U5500_HASH1_BASE (U5500_PER6_BASE + 0x2000)
+#define U5500_PKA_BASE (U5500_PER6_BASE + 0x4000)
+#define U5500_PKAM_BASE (U5500_PER6_BASE + 0x5000)
+#define U5500_MTU0_BASE (U5500_PER6_BASE + 0x6000)
+#define U5500_MTU1_BASE (U5500_PER6_BASE + 0x7000)
+#define U5500_CR_BASE (U5500_PER6_BASE + 0x8000)
+#define U5500_CRYP0_BASE (U5500_PER6_BASE + 0xA000)
+#define U5500_CRYP1_BASE (U5500_PER6_BASE + 0xB000)
+#define U5500_CLKRST6_BASE (U5500_PER6_BASE + 0xF000)
+
+#define U5500_GPIOBANK0_BASE U5500_GPIO0_BASE
+#define U5500_GPIOBANK1_BASE (U5500_GPIO0_BASE + 0x80)
+#define U5500_GPIOBANK2_BASE U5500_GPIO1_BASE
+#define U5500_GPIOBANK3_BASE U5500_GPIO2_BASE
+#define U5500_GPIOBANK4_BASE U5500_GPIO3_BASE
+#define U5500_GPIOBANK5_BASE U5500_GPIO4_BASE
+#define U5500_GPIOBANK6_BASE (U5500_GPIO4_BASE + 0x80)
+#define U5500_GPIOBANK7_BASE (U5500_GPIO4_BASE + 0x100)
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
new file mode 100644
index 000000000000..9169e1e382a3
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __MACH_DB8500_REGS_H
+#define __MACH_DB8500_REGS_H
+
+#define U8500_PER3_BASE 0x80000000
+#define U8500_STM_BASE 0x80100000
+#define U8500_STM_REG_BASE (U8500_STM_BASE + 0xF000)
+#define U8500_PER2_BASE 0x80110000
+#define U8500_PER1_BASE 0x80120000
+#define U8500_B2R2_BASE 0x80130000
+#define U8500_HSEM_BASE 0x80140000
+#define U8500_PER4_BASE 0x80150000
+#define U8500_ICN_BASE 0x81000000
+
+#define U8500_BOOT_ROM_BASE 0x90000000
+/* ASIC ID is at 0xff4 offset within this region */
+#define U8500_ASIC_ID_BASE 0x9001F000
+
+#define U8500_PER6_BASE 0xa03c0000
+#define U8500_PER5_BASE 0xa03e0000
+#define U8500_PER7_BASE_ED 0xa03d0000
+
+#define U8500_SVA_BASE 0xa0100000
+#define U8500_SIA_BASE 0xa0200000
+
+#define U8500_SGA_BASE 0xa0300000
+#define U8500_MCDE_BASE 0xa0350000
+#define U8500_DMA_BASE_ED 0xa0362000
+#define U8500_DMA_BASE 0x801C0000 /* v1 */
+
+#define U8500_SBAG_BASE 0xa0390000
+
+#define U8500_SCU_BASE 0xa0410000
+#define U8500_GIC_CPU_BASE 0xa0410100
+#define U8500_TWD_BASE 0xa0410600
+#define U8500_GIC_DIST_BASE 0xa0411000
+#define U8500_L2CC_BASE 0xa0412000
+
+#define U8500_MODEM_I2C 0xb7e02000
+
+#define U8500_GPIO0_BASE (U8500_PER1_BASE + 0xE000)
+#define U8500_GPIO1_BASE (U8500_PER3_BASE + 0xE000)
+#define U8500_GPIO2_BASE (U8500_PER2_BASE + 0xE000)
+#define U8500_GPIO3_BASE (U8500_PER5_BASE + 0x1E000)
+
+/* per7 base addressess */
+#define U8500_CR_BASE_ED (U8500_PER7_BASE_ED + 0x8000)
+#define U8500_MTU0_BASE_ED (U8500_PER7_BASE_ED + 0xa000)
+#define U8500_MTU1_BASE_ED (U8500_PER7_BASE_ED + 0xb000)
+#define U8500_TZPC0_BASE_ED (U8500_PER7_BASE_ED + 0xc000)
+#define U8500_CLKRST7_BASE_ED (U8500_PER7_BASE_ED + 0xf000)
+
+#define U8500_UART0_BASE (U8500_PER1_BASE + 0x0000)
+#define U8500_UART1_BASE (U8500_PER1_BASE + 0x1000)
+
+/* per6 base addressess */
+#define U8500_RNG_BASE (U8500_PER6_BASE + 0x0000)
+#define U8500_PKA_BASE (U8500_PER6_BASE + 0x1000)
+#define U8500_PKAM_BASE (U8500_PER6_BASE + 0x2000)
+#define U8500_MTU0_BASE (U8500_PER6_BASE + 0x6000) /* v1 */
+#define U8500_MTU1_BASE (U8500_PER6_BASE + 0x7000) /* v1 */
+#define U8500_CR_BASE (U8500_PER6_BASE + 0x8000) /* v1 */
+#define U8500_CRYPTO0_BASE (U8500_PER6_BASE + 0xa000)
+#define U8500_CRYPTO1_BASE (U8500_PER6_BASE + 0xb000)
+#define U8500_CLKRST6_BASE (U8500_PER6_BASE + 0xf000)
+
+/* per5 base addressess */
+#define U8500_USBOTG_BASE (U8500_PER5_BASE + 0x00000)
+#define U8500_CLKRST5_BASE (U8500_PER5_BASE + 0x1f000)
+
+/* per4 base addressess */
+#define U8500_BACKUPRAM0_BASE (U8500_PER4_BASE + 0x00000)
+#define U8500_BACKUPRAM1_BASE (U8500_PER4_BASE + 0x01000)
+#define U8500_RTT0_BASE (U8500_PER4_BASE + 0x02000)
+#define U8500_RTT1_BASE (U8500_PER4_BASE + 0x03000)
+#define U8500_RTC_BASE (U8500_PER4_BASE + 0x04000)
+#define U8500_SCR_BASE (U8500_PER4_BASE + 0x05000)
+#define U8500_DMC_BASE (U8500_PER4_BASE + 0x06000)
+#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x07000)
+#define U8500_PRCMU_TCDM_BASE (U8500_PER4_BASE + 0x0f000)
+
+/* per3 base addresses */
+#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000)
+#define U8500_SSP0_BASE (U8500_PER3_BASE + 0x2000)
+#define U8500_SSP1_BASE (U8500_PER3_BASE + 0x3000)
+#define U8500_I2C0_BASE (U8500_PER3_BASE + 0x4000)
+#define U8500_SDI2_BASE (U8500_PER3_BASE + 0x5000)
+#define U8500_SKE_BASE (U8500_PER3_BASE + 0x6000)
+#define U8500_UART2_BASE (U8500_PER3_BASE + 0x7000)
+#define U8500_SDI5_BASE (U8500_PER3_BASE + 0x8000)
+#define U8500_CLKRST3_BASE (U8500_PER3_BASE + 0xf000)
+
+/* per2 base addressess */
+#define U8500_I2C3_BASE (U8500_PER2_BASE + 0x0000)
+#define U8500_SPI2_BASE (U8500_PER2_BASE + 0x1000)
+#define U8500_SPI1_BASE (U8500_PER2_BASE + 0x2000)
+#define U8500_PWL_BASE (U8500_PER2_BASE + 0x3000)
+#define U8500_SDI4_BASE (U8500_PER2_BASE + 0x4000)
+#define U8500_MSP2_BASE (U8500_PER2_BASE + 0x7000)
+#define U8500_SDI1_BASE (U8500_PER2_BASE + 0x8000)
+#define U8500_SDI3_BASE (U8500_PER2_BASE + 0x9000)
+#define U8500_SPI0_BASE (U8500_PER2_BASE + 0xa000)
+#define U8500_HSIR_BASE (U8500_PER2_BASE + 0xb000)
+#define U8500_HSIT_BASE (U8500_PER2_BASE + 0xc000)
+#define U8500_CLKRST2_BASE (U8500_PER2_BASE + 0xf000)
+
+/* per1 base addresses */
+#define U8500_I2C1_BASE (U8500_PER1_BASE + 0x2000)
+#define U8500_MSP0_BASE (U8500_PER1_BASE + 0x3000)
+#define U8500_MSP1_BASE (U8500_PER1_BASE + 0x4000)
+#define U8500_SDI0_BASE (U8500_PER1_BASE + 0x6000)
+#define U8500_I2C2_BASE (U8500_PER1_BASE + 0x8000)
+#define U8500_SPI3_BASE (U8500_PER1_BASE + 0x9000)
+#define U8500_I2C4_BASE (U8500_PER1_BASE + 0xa000)
+#define U8500_SLIM0_BASE (U8500_PER1_BASE + 0xb000)
+#define U8500_CLKRST1_BASE (U8500_PER1_BASE + 0xf000)
+
+#define U8500_SHRM_GOP_INTERRUPT_BASE 0xB7C00040
+
+#define U8500_GPIOBANK0_BASE U8500_GPIO0_BASE
+#define U8500_GPIOBANK1_BASE (U8500_GPIO0_BASE + 0x80)
+#define U8500_GPIOBANK2_BASE U8500_GPIO1_BASE
+#define U8500_GPIOBANK3_BASE (U8500_GPIO1_BASE + 0x80)
+#define U8500_GPIOBANK4_BASE (U8500_GPIO1_BASE + 0x100)
+#define U8500_GPIOBANK5_BASE (U8500_GPIO1_BASE + 0x180)
+#define U8500_GPIOBANK6_BASE U8500_GPIO2_BASE
+#define U8500_GPIOBANK7_BASE (U8500_GPIO2_BASE + 0x80)
+#define U8500_GPIOBANK8_BASE U8500_GPIO3_BASE
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/debug-macro.S b/arch/arm/mach-ux500/include/mach/debug-macro.S
index 09cbfda8aee5..c5203b7ea552 100644
--- a/arch/arm/mach-ux500/include/mach/debug-macro.S
+++ b/arch/arm/mach-ux500/include/mach/debug-macro.S
@@ -10,11 +10,19 @@
*/
#include <mach/hardware.h>
+#if CONFIG_UX500_DEBUG_UART > 2
+#error Invalid Ux500 debug UART
+#endif
+
+#define __UX500_UART(n) UX500_UART##n##_BASE
+#define UX500_UART(n) __UX500_UART(n)
+#define UART_BASE UX500_UART(CONFIG_UX500_DEBUG_UART)
+
.macro addruart, rx, tmp
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
- ldreq \rx, =U8500_UART2_BASE @ no, physical address
- ldrne \rx, =IO_ADDRESS(U8500_UART2_BASE) @ yes, virtual address
+ ldreq \rx, =UART_BASE @ no, physical address
+ ldrne \rx, =IO_ADDRESS(UART_BASE) @ yes, virtual address
.endm
#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
new file mode 100644
index 000000000000..0422af00a56e
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __ASM_ARCH_DEVICES_H__
+#define __ASM_ARCH_DEVICES_H__
+
+struct platform_device;
+struct amba_device;
+
+extern struct platform_device u5500_gpio_devs[];
+extern struct platform_device u8500_gpio_devs[];
+
+extern struct amba_device ux500_pl031_device;
+extern struct amba_device u8500_ssp0_device;
+extern struct amba_device ux500_uart0_device;
+extern struct amba_device ux500_uart1_device;
+extern struct amba_device ux500_uart2_device;
+
+extern struct platform_device ux500_i2c1_device;
+extern struct platform_device ux500_i2c2_device;
+extern struct platform_device ux500_i2c3_device;
+
+extern struct platform_device u8500_i2c0_device;
+extern struct platform_device u8500_i2c4_device;
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/entry-macro.S b/arch/arm/mach-ux500/include/mach/entry-macro.S
index eece3301fef7..60ea88db8283 100644
--- a/arch/arm/mach-ux500/include/mach/entry-macro.S
+++ b/arch/arm/mach-ux500/include/mach/entry-macro.S
@@ -17,7 +17,7 @@
.endm
.macro get_irqnr_preamble, base, tmp
- ldr \base, =IO_ADDRESS(U8500_GIC_CPU_BASE)
+ ldr \base, =IO_ADDRESS(UX500_GIC_CPU_BASE)
.endm
.macro arch_ret_to_user, tmp1, tmp2
diff --git a/arch/arm/mach-ux500/include/mach/gpio.h b/arch/arm/mach-ux500/include/mach/gpio.h
new file mode 100644
index 000000000000..d548a622e7d2
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/gpio.h
@@ -0,0 +1,50 @@
+#ifndef __ASM_ARCH_GPIO_H
+#define __ASM_ARCH_GPIO_H
+
+/*
+ * 288 (#267 is the highest one actually hooked up) onchip GPIOs, plus enough
+ * room for a couple of GPIO expanders.
+ */
+#define ARCH_NR_GPIOS 350
+
+#include <plat/gpio.h>
+
+#define __GPIO_RESOURCE(soc, block) \
+ { \
+ .start = soc##_GPIOBANK##block##_BASE, \
+ .end = soc##_GPIOBANK##block##_BASE + 127, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = IRQ_GPIO##block, \
+ .end = IRQ_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }
+
+#define __GPIO_DEVICE(soc, block) \
+ { \
+ .name = "gpio", \
+ .id = block, \
+ .num_resources = 2, \
+ .resource = &soc##_gpio_resources[block * 2], \
+ .dev = { \
+ .platform_data = &soc##_gpio_data[block], \
+ }, \
+ }
+
+#define GPIO_DATA(_name, first) \
+ { \
+ .name = _name, \
+ .first_gpio = first, \
+ .first_irq = NOMADIK_GPIO_TO_IRQ(first), \
+ }
+
+#ifdef CONFIG_UX500_SOC_DB8500
+#define GPIO_RESOURCE(block) __GPIO_RESOURCE(U8500, block)
+#define GPIO_DEVICE(block) __GPIO_DEVICE(u8500, block)
+#elif defined(CONFIG_UX500_SOC_DB5500)
+#define GPIO_RESOURCE(block) __GPIO_RESOURCE(U5500, block)
+#define GPIO_DEVICE(block) __GPIO_DEVICE(u5500, block)
+#endif
+
+#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 04ea836969b3..8656379a8309 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -23,109 +23,106 @@
/* typesafe io address */
#define __io_address(n) __io(IO_ADDRESS(n))
+/* used by some plat-nomadik code */
+#define io_p2v(n) __io_address(n)
-/*
- * Base address definitions for U8500 Onchip IPs. All the
- * peripherals are contained in a single 1 Mbyte region, with
- * AHB peripherals at the bottom and APB peripherals at the
- * top of the region. PER stands for PERIPHERAL region which
- * itself divided into sub regions.
- */
-#define U8500_PER3_BASE 0x80000000
-#define U8500_PER2_BASE 0x80110000
-#define U8500_PER1_BASE 0x80120000
-#define U8500_PER4_BASE 0x80150000
-
-#define U8500_PER6_BASE 0xa03c0000
-#define U8500_PER5_BASE 0xa03e0000
-#define U8500_PER7_BASE 0xa03d0000
-
-#define U8500_SVA_BASE 0xa0100000
-#define U8500_SIA_BASE 0xa0200000
-
-#define U8500_SGA_BASE 0xa0300000
-#define U8500_MCDE_BASE 0xa0350000
-#define U8500_DMA_BASE 0xa0362000
-
-#define U8500_SCU_BASE 0xa0410000
-#define U8500_GIC_CPU_BASE 0xa0410100
-#define U8500_TWD_BASE 0xa0410600
-#define U8500_GIC_DIST_BASE 0xa0411000
-#define U8500_L2CC_BASE 0xa0412000
-
-#define U8500_TWD_SIZE 0x100
-
-/* per7 base addressess */
-#define U8500_CR_BASE (U8500_PER7_BASE + 0x8000)
-#define U8500_MTU0_BASE (U8500_PER7_BASE + 0xa000)
-#define U8500_MTU1_BASE (U8500_PER7_BASE + 0xb000)
-#define U8500_TZPC0_BASE (U8500_PER7_BASE + 0xc000)
-#define U8500_CLKRST7_BASE (U8500_PER7_BASE + 0xf000)
-
-/* per6 base addressess */
-#define U8500_RNG_BASE (U8500_PER6_BASE + 0x0000)
-#define U8500_PKA_BASE (U8500_PER6_BASE + 0x1000)
-#define U8500_PKAM_BASE (U8500_PER6_BASE + 0x2000)
-#define U8500_CRYPTO0_BASE (U8500_PER6_BASE + 0xa000)
-#define U8500_CRYPTO1_BASE (U8500_PER6_BASE + 0xb000)
-#define U8500_CLKRST6_BASE (U8500_PER6_BASE + 0xf000)
-
-/* per5 base addressess */
-#define U8500_USBOTG_BASE (U8500_PER5_BASE + 0x00000)
-#define U8500_GPIO5_BASE (U8500_PER5_BASE + 0x1e000)
-#define U8500_CLKRST5_BASE (U8500_PER5_BASE + 0x1f000)
-
-/* per4 base addressess */
-#define U8500_BACKUPRAM0_BASE (U8500_PER4_BASE + 0x0000)
-#define U8500_BACKUPRAM1_BASE (U8500_PER4_BASE + 0x1000)
-#define U8500_RTT0_BASE (U8500_PER4_BASE + 0x2000)
-#define U8500_RTT1_BASE (U8500_PER4_BASE + 0x3000)
-#define U8500_RTC_BASE (U8500_PER4_BASE + 0x4000)
-#define U8500_SCR_BASE (U8500_PER4_BASE + 0x5000)
-#define U8500_DMC_BASE (U8500_PER4_BASE + 0x6000)
-#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x7000)
-
-/* per3 base addressess */
-#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000)
-#define U8500_SSP0_BASE (U8500_PER3_BASE + 0x2000)
-#define U8500_SSP1_BASE (U8500_PER3_BASE + 0x3000)
-#define U8500_I2C0_BASE (U8500_PER3_BASE + 0x4000)
-#define U8500_SDI2_BASE (U8500_PER3_BASE + 0x5000)
-#define U8500_SKE_BASE (U8500_PER3_BASE + 0x6000)
-#define U8500_UART2_BASE (U8500_PER3_BASE + 0x7000)
-#define U8500_SDI5_BASE (U8500_PER3_BASE + 0x8000)
-#define U8500_GPIO3_BASE (U8500_PER3_BASE + 0xe000)
-#define U8500_CLKRST3_BASE (U8500_PER3_BASE + 0xf000)
-
-/* per2 base addressess */
-#define U8500_I2C3_BASE (U8500_PER2_BASE + 0x0000)
-#define U8500_SPI2_BASE (U8500_PER2_BASE + 0x1000)
-#define U8500_SPI1_BASE (U8500_PER2_BASE + 0x2000)
-#define U8500_PWL_BASE (U8500_PER2_BASE + 0x3000)
-#define U8500_SDI4_BASE (U8500_PER2_BASE + 0x4000)
-#define U8500_MSP2_BASE (U8500_PER2_BASE + 0x7000)
-#define U8500_SDI1_BASE (U8500_PER2_BASE + 0x8000)
-#define U8500_SDI3_BASE (U8500_PER2_BASE + 0x9000)
-#define U8500_SPI0_BASE (U8500_PER2_BASE + 0xa000)
-#define U8500_HSIR_BASE (U8500_PER2_BASE + 0xb000)
-#define U8500_HSIT_BASE (U8500_PER2_BASE + 0xc000)
-#define U8500_GPIO2_BASE (U8500_PER2_BASE + 0xe000)
-#define U8500_CLKRST2_BASE (U8500_PER2_BASE + 0xf000)
-
-/* per1 base addresses */
-#define U8500_UART0_BASE (U8500_PER1_BASE + 0x0000)
-#define U8500_UART1_BASE (U8500_PER1_BASE + 0x1000)
-#define U8500_I2C1_BASE (U8500_PER1_BASE + 0x2000)
-#define U8500_MSP0_BASE (U8500_PER1_BASE + 0x3000)
-#define U8500_MSP1_BASE (U8500_PER1_BASE + 0x4000)
-#define U8500_SDI0_BASE (U8500_PER1_BASE + 0x6000)
-#define U8500_I2C2_BASE (U8500_PER1_BASE + 0x8000)
-#define U8500_SPI3_BASE (U8500_PER1_BASE + 0x9000)
-#define U8500_SLIM0_BASE (U8500_PER1_BASE + 0xa000)
-#define U8500_GPIO1_BASE (U8500_PER1_BASE + 0xe000)
-#define U8500_CLKRST1_BASE (U8500_PER1_BASE + 0xf000)
+#include <mach/db8500-regs.h>
+#include <mach/db5500-regs.h>
+
+#ifdef CONFIG_UX500_SOC_DB8500
+#define UX500(periph) U8500_##periph##_BASE
+#elif defined(CONFIG_UX500_SOC_DB5500)
+#define UX500(periph) U5500_##periph##_BASE
+#endif
+
+#define UX500_BACKUPRAM0_BASE UX500(BACKUPRAM0)
+#define UX500_BACKUPRAM1_BASE UX500(BACKUPRAM1)
+#define UX500_B2R2_BASE UX500(B2R2)
+
+#define UX500_CLKRST1_BASE UX500(CLKRST1)
+#define UX500_CLKRST2_BASE UX500(CLKRST2)
+#define UX500_CLKRST3_BASE UX500(CLKRST3)
+#define UX500_CLKRST5_BASE UX500(CLKRST5)
+#define UX500_CLKRST6_BASE UX500(CLKRST6)
+
+#define UX500_DMA_BASE UX500(DMA)
+#define UX500_FSMC_BASE UX500(FSMC)
+
+#define UX500_GIC_CPU_BASE UX500(GIC_CPU)
+#define UX500_GIC_DIST_BASE UX500(GIC_DIST)
+
+#define UX500_I2C1_BASE UX500(I2C1)
+#define UX500_I2C2_BASE UX500(I2C2)
+#define UX500_I2C3_BASE UX500(I2C3)
+
+#define UX500_L2CC_BASE UX500(L2CC)
+#define UX500_MCDE_BASE UX500(MCDE)
+#define UX500_MTU0_BASE UX500(MTU0)
+#define UX500_MTU1_BASE UX500(MTU1)
+#define UX500_PRCMU_BASE UX500(PRCMU)
+
+#define UX500_RNG_BASE UX500(RNG)
+#define UX500_RTC_BASE UX500(RTC)
+
+#define UX500_SCU_BASE UX500(SCU)
+
+#define UX500_SDI0_BASE UX500(SDI0)
+#define UX500_SDI1_BASE UX500(SDI1)
+#define UX500_SDI2_BASE UX500(SDI2)
+#define UX500_SDI3_BASE UX500(SDI3)
+#define UX500_SDI4_BASE UX500(SDI4)
+
+#define UX500_SPI0_BASE UX500(SPI0)
+#define UX500_SPI1_BASE UX500(SPI1)
+#define UX500_SPI2_BASE UX500(SPI2)
+#define UX500_SPI3_BASE UX500(SPI3)
+
+#define UX500_SIA_BASE UX500(SIA)
+#define UX500_SVA_BASE UX500(SVA)
+
+#define UX500_TWD_BASE UX500(TWD)
+
+#define UX500_UART0_BASE UX500(UART0)
+#define UX500_UART1_BASE UX500(UART1)
+#define UX500_UART2_BASE UX500(UART2)
+
+#define UX500_USBOTG_BASE UX500(USBOTG)
/* ST-Ericsson modified pl022 id */
#define SSP_PER_ID 0x01080022
+#ifndef __ASSEMBLY__
+
+#include <asm/cputype.h>
+
+static inline bool cpu_is_u8500(void)
+{
+#ifdef CONFIG_UX500_SOC_DB8500
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static inline bool cpu_is_u8500ed(void)
+{
+ return cpu_is_u8500() && (read_cpuid_id() & 15) == 0;
+}
+
+static inline bool cpu_is_u8500v1(void)
+{
+ return cpu_is_u8500() && (read_cpuid_id() & 15) == 1;
+}
+
+static inline bool cpu_is_u5500(void)
+{
+#ifdef CONFIG_UX500_SOC_DB5500
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+#endif
+
#endif /* __MACH_HARDWARE_H */
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index 394b5dd2200f..7970684b1d09 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -42,6 +42,7 @@
#define IRQ_AB4500 (IRQ_SHPI_START + 40)
#define IRQ_DISP (IRQ_SHPI_START + 48)
#define IRQ_SiPI3 (IRQ_SHPI_START + 49)
+#define IRQ_I2C4 (IRQ_SHPI_START + 51)
#define IRQ_SSP1 (IRQ_SHPI_START + 52)
#define IRQ_I2C2 (IRQ_SHPI_START + 55)
#define IRQ_SDMMC0 (IRQ_SHPI_START + 60)
@@ -66,6 +67,12 @@
/* There are 128 shared peripheral interrupts assigned to
* INTID[160:32]. The first 32 interrupts are reserved.
*/
-#define NR_IRQS 161
+#define U8500_SOC_NR_IRQS 161
+
+/* After chip-specific IRQ numbers we have the GPIO ones */
+#define NOMADIK_NR_GPIO 288
+#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + U8500_SOC_NR_IRQS)
+#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - U8500_SOC_NR_IRQS)
+#define NR_IRQS NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
#endif /*ASM_ARCH_IRQS_H*/
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index cf0ce1687f24..e978dbd9e210 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -14,10 +14,28 @@
#include <asm/mach/time.h>
#include <linux/init.h>
-extern void u8500_map_io(void);
-extern void u8500_init_devices(void);
-extern void u8500_init_irq(void);
+extern void __init ux500_map_io(void);
+extern void __init u5500_map_io(void);
+extern void __init u8500_map_io(void);
+
+extern void __init ux500_init_devices(void);
+extern void __init u5500_init_devices(void);
+extern void __init u8500_init_devices(void);
+
+extern void __init ux500_init_irq(void);
/* We re-use nomadik_timer for this platform */
extern void nmdk_timer_init(void);
+extern void __init amba_add_devices(struct amba_device *devs[], int num);
+
+struct sys_timer;
+extern struct sys_timer ux500_timer;
+
+#define __IO_DEV_DESC(x, sz) { \
+ .virtual = IO_ADDRESS(x), \
+ .pfn = __phys_to_pfn(x), \
+ .length = sz, \
+ .type = MT_DEVICE, \
+}
+
#endif /* __ASM_ARCH_SETUP_H */
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 8dfe7ca245d8..438ef16aec90 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -30,7 +30,7 @@ volatile int __cpuinitdata pen_release = -1;
static unsigned int __init get_core_count(void)
{
- return scu_get_core_count(__io_address(U8500_SCU_BASE));
+ return scu_get_core_count(__io_address(UX500_SCU_BASE));
}
static DEFINE_SPINLOCK(boot_lock);
@@ -44,7 +44,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
*/
- gic_cpu_init(0, __io_address(U8500_GIC_CPU_BASE));
+ gic_cpu_init(0, __io_address(UX500_GIC_CPU_BASE));
/*
* let the primary processor know we're out of the
@@ -75,7 +75,8 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* that it has been released by resetting pen_release.
*/
pen_release = cpu;
- flush_cache_all();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
@@ -105,12 +106,12 @@ static void __init wakeup_secondary(void)
*/
#define U8500_CPU1_JUMPADDR_OFFSET 0x1FF4
__raw_writel(virt_to_phys(u8500_secondary_startup),
- (void __iomem *)IO_ADDRESS(U8500_BACKUPRAM0_BASE) +
+ __io_address(UX500_BACKUPRAM0_BASE) +
U8500_CPU1_JUMPADDR_OFFSET);
#define U8500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
__raw_writel(0xA1FEED01,
- (void __iomem *)IO_ADDRESS(U8500_BACKUPRAM0_BASE) +
+ __io_address(UX500_BACKUPRAM0_BASE) +
U8500_CPU1_WAKEMAGIC_OFFSET);
/* make sure write buffer is drained */
@@ -171,7 +172,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* boot CPU, but only if we have more than one CPU.
*/
percpu_timer_setup();
- scu_enable(__io_address(U8500_SCU_BASE));
+ scu_enable(__io_address(UX500_SCU_BASE));
wakeup_secondary();
}
}
diff --git a/arch/arm/mach-versatile/Makefile b/arch/arm/mach-versatile/Makefile
index ba81e70ed813..97cf4d831b0c 100644
--- a/arch/arm/mach-versatile/Makefile
+++ b/arch/arm/mach-versatile/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y := core.o clock.o
+obj-y := core.o
obj-$(CONFIG_ARCH_VERSATILE_PB) += versatile_pb.o
obj-$(CONFIG_MACH_VERSATILE_AB) += versatile_ab.o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c
deleted file mode 100644
index c50a44ea7ee6..000000000000
--- a/arch/arm/mach-versatile/clock.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * linux/arch/arm/mach-versatile/clock.c
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-
-#include <asm/clkdev.h>
-#include <asm/hardware/icst307.h>
-
-#include "clock.h"
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- struct icst307_vco vco;
- vco = icst307_khz_to_vco(clk->params, rate / 1000);
- return icst307_khz(clk->params, vco) * 1000;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- int ret = -EIO;
-
- if (clk->setvco) {
- struct icst307_vco vco;
-
- vco = icst307_khz_to_vco(clk->params, rate / 1000);
- clk->rate = icst307_khz(clk->params, vco) * 1000;
- clk->setvco(clk, vco);
- ret = 0;
- }
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
diff --git a/arch/arm/mach-versatile/clock.h b/arch/arm/mach-versatile/clock.h
deleted file mode 100644
index 03468fdc3e58..000000000000
--- a/arch/arm/mach-versatile/clock.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * linux/arch/arm/mach-versatile/clock.h
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-struct module;
-struct icst307_params;
-
-struct clk {
- unsigned long rate;
- const struct icst307_params *params;
- u32 oscoff;
- void *data;
- void (*setvco)(struct clk *, struct icst307_vco vco);
-};
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 3b1a4ee01815..3dff8641b03f 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -28,19 +28,15 @@
#include <linux/amba/clcd.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/cnt32_to_63.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <asm/clkdev.h>
#include <asm/system.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/hardware/arm_timer.h>
-#include <asm/hardware/icst307.h>
+#include <asm/hardware/icst.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
@@ -49,9 +45,12 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
+#include <mach/clkdev.h>
+#include <mach/hardware.h>
+#include <mach/platform.h>
+#include <plat/timer-sp.h>
#include "core.h"
-#include "clock.h"
/*
* All IO addresses are mapped onto VA 0xFFFx.xxxx, where x.xxxx
@@ -59,7 +58,6 @@
*
* Setup a VA for the Versatile Vectored Interrupt Controller.
*/
-#define __io_address(n) __io(IO_ADDRESS(n))
#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
@@ -229,27 +227,6 @@ void __init versatile_map_io(void)
iotable_init(versatile_io_desc, ARRAY_SIZE(versatile_io_desc));
}
-#define VERSATILE_REFCOUNTER (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_24MHz_OFFSET)
-
-/*
- * This is the Versatile sched_clock implementation. This has
- * a resolution of 41.7ns, and a maximum value of about 35583 days.
- *
- * The return value is guaranteed to be monotonic in that range as
- * long as there is always less than 89 seconds between successive
- * calls to this function.
- */
-unsigned long long sched_clock(void)
-{
- unsigned long long v = cnt32_to_63(readl(VERSATILE_REFCOUNTER));
-
- /* the <<1 gets rid of the cnt_32_to_63 top bit saving on a bic insn */
- v *= 125<<1;
- do_div(v, 3<<1);
-
- return v;
-}
-
#define VERSATILE_FLASHCTRL (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_FLASH_OFFSET)
@@ -380,33 +357,40 @@ static struct mmci_platform_data mmc0_plat_data = {
/*
* Clock handling
*/
-static const struct icst307_params versatile_oscvco_params = {
- .ref = 24000,
- .vco_max = 200000,
+static const struct icst_params versatile_oscvco_params = {
+ .ref = 24000000,
+ .vco_max = ICST307_VCO_MAX,
+ .vco_min = ICST307_VCO_MIN,
.vd_min = 4 + 8,
.vd_max = 511 + 8,
.rd_min = 1 + 2,
.rd_max = 127 + 2,
+ .s2div = icst307_s2div,
+ .idx2s = icst307_idx2s,
};
-static void versatile_oscvco_set(struct clk *clk, struct icst307_vco vco)
+static void versatile_oscvco_set(struct clk *clk, struct icst_vco vco)
{
- void __iomem *sys = __io_address(VERSATILE_SYS_BASE);
- void __iomem *sys_lock = sys + VERSATILE_SYS_LOCK_OFFSET;
+ void __iomem *sys_lock = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LOCK_OFFSET;
u32 val;
- val = readl(sys + clk->oscoff) & ~0x7ffff;
+ val = readl(clk->vcoreg) & ~0x7ffff;
val |= vco.v | (vco.r << 9) | (vco.s << 16);
writel(0xa05f, sys_lock);
- writel(val, sys + clk->oscoff);
+ writel(val, clk->vcoreg);
writel(0, sys_lock);
}
+static const struct clk_ops osc4_clk_ops = {
+ .round = icst_clk_round,
+ .set = icst_clk_set,
+ .setvco = versatile_oscvco_set,
+};
+
static struct clk osc4_clk = {
+ .ops = &osc4_clk_ops,
.params = &versatile_oscvco_params,
- .oscoff = VERSATILE_SYS_OSCCLCD_OFFSET,
- .setvco = versatile_oscvco_set,
};
/*
@@ -852,6 +836,8 @@ void __init versatile_init(void)
{
int i;
+ osc4_clk.vcoreg = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSCCLCD_OFFSET;
+
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
platform_device_register(&versatile_flash_device);
@@ -875,120 +861,6 @@ void __init versatile_init(void)
#define TIMER1_VA_BASE (__io_address(VERSATILE_TIMER0_1_BASE) + 0x20)
#define TIMER2_VA_BASE __io_address(VERSATILE_TIMER2_3_BASE)
#define TIMER3_VA_BASE (__io_address(VERSATILE_TIMER2_3_BASE) + 0x20)
-#define VA_IC_BASE __io_address(VERSATILE_VIC_BASE)
-
-/*
- * How long is the timer interval?
- */
-#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
-#if TIMER_INTERVAL >= 0x100000
-#define TIMER_RELOAD (TIMER_INTERVAL >> 8)
-#define TIMER_DIVISOR (TIMER_CTRL_DIV256)
-#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
-#elif TIMER_INTERVAL >= 0x10000
-#define TIMER_RELOAD (TIMER_INTERVAL >> 4) /* Divide by 16 */
-#define TIMER_DIVISOR (TIMER_CTRL_DIV16)
-#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
-#else
-#define TIMER_RELOAD (TIMER_INTERVAL)
-#define TIMER_DIVISOR (TIMER_CTRL_DIV1)
-#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
-#endif
-
-static void timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
-{
- unsigned long ctrl;
-
- switch(mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- writel(TIMER_RELOAD, TIMER0_VA_BASE + TIMER_LOAD);
-
- ctrl = TIMER_CTRL_PERIODIC;
- ctrl |= TIMER_CTRL_32BIT | TIMER_CTRL_IE | TIMER_CTRL_ENABLE;
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* period set, and timer enabled in 'next_event' hook */
- ctrl = TIMER_CTRL_ONESHOT;
- ctrl |= TIMER_CTRL_32BIT | TIMER_CTRL_IE;
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- ctrl = 0;
- }
-
- writel(ctrl, TIMER0_VA_BASE + TIMER_CTRL);
-}
-
-static int timer_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long ctrl = readl(TIMER0_VA_BASE + TIMER_CTRL);
-
- writel(evt, TIMER0_VA_BASE + TIMER_LOAD);
- writel(ctrl | TIMER_CTRL_ENABLE, TIMER0_VA_BASE + TIMER_CTRL);
-
- return 0;
-}
-
-static struct clock_event_device timer0_clockevent = {
- .name = "timer0",
- .shift = 32,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = timer_set_mode,
- .set_next_event = timer_set_next_event,
-};
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t versatile_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &timer0_clockevent;
-
- writel(1, TIMER0_VA_BASE + TIMER_INTCLR);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction versatile_timer_irq = {
- .name = "Versatile Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = versatile_timer_interrupt,
-};
-
-static cycle_t versatile_get_cycles(struct clocksource *cs)
-{
- return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
-}
-
-static struct clocksource clocksource_versatile = {
- .name = "timer3",
- .rating = 200,
- .read = versatile_get_cycles,
- .mask = CLOCKSOURCE_MASK(32),
- .shift = 20,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init versatile_clocksource_init(void)
-{
- /* setup timer3 as free-running clocksource */
- writel(0, TIMER3_VA_BASE + TIMER_CTRL);
- writel(0xffffffff, TIMER3_VA_BASE + TIMER_LOAD);
- writel(0xffffffff, TIMER3_VA_BASE + TIMER_VALUE);
- writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
- TIMER3_VA_BASE + TIMER_CTRL);
-
- clocksource_versatile.mult =
- clocksource_khz2mult(1000, clocksource_versatile.shift);
- clocksource_register(&clocksource_versatile);
-
- return 0;
-}
/*
* Set up timer interrupt, and return the current time in seconds.
@@ -1017,22 +889,8 @@ static void __init versatile_timer_init(void)
writel(0, TIMER2_VA_BASE + TIMER_CTRL);
writel(0, TIMER3_VA_BASE + TIMER_CTRL);
- /*
- * Make irqs happen for the system timer
- */
- setup_irq(IRQ_TIMERINT0_1, &versatile_timer_irq);
-
- versatile_clocksource_init();
-
- timer0_clockevent.mult =
- div_sc(1000000, NSEC_PER_SEC, timer0_clockevent.shift);
- timer0_clockevent.max_delta_ns =
- clockevent_delta2ns(0xffffffff, &timer0_clockevent);
- timer0_clockevent.min_delta_ns =
- clockevent_delta2ns(0xf, &timer0_clockevent);
-
- timer0_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&timer0_clockevent);
+ sp804_clocksource_init(TIMER3_VA_BASE);
+ sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMERINT0_1);
}
struct sys_timer versatile_timer = {
diff --git a/arch/arm/mach-versatile/include/mach/clkdev.h b/arch/arm/mach-versatile/include/mach/clkdev.h
index 04b37a89801c..e58d0771b64e 100644
--- a/arch/arm/mach-versatile/include/mach/clkdev.h
+++ b/arch/arm/mach-versatile/include/mach/clkdev.h
@@ -1,6 +1,15 @@
#ifndef __ASM_MACH_CLKDEV_H
#define __ASM_MACH_CLKDEV_H
+#include <plat/clock.h>
+
+struct clk {
+ unsigned long rate;
+ const struct clk_ops *ops;
+ const struct icst_params *params;
+ void __iomem *vcoreg;
+};
+
#define __clk_get(clk) ({ 1; })
#define __clk_put(clk) do { } while (0)
diff --git a/arch/arm/mach-versatile/include/mach/entry-macro.S b/arch/arm/mach-versatile/include/mach/entry-macro.S
index 8c8020980585..e6f7c1663160 100644
--- a/arch/arm/mach-versatile/include/mach/entry-macro.S
+++ b/arch/arm/mach-versatile/include/mach/entry-macro.S
@@ -8,6 +8,7 @@
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/hardware/vic.h>
.macro disable_fiq
diff --git a/arch/arm/mach-versatile/include/mach/hardware.h b/arch/arm/mach-versatile/include/mach/hardware.h
index 7aa906c93154..4f8f99aac938 100644
--- a/arch/arm/mach-versatile/include/mach/hardware.h
+++ b/arch/arm/mach-versatile/include/mach/hardware.h
@@ -23,7 +23,6 @@
#define __ASM_ARCH_HARDWARE_H
#include <asm/sizes.h>
-#include <mach/platform.h>
/*
* PCI space virtual addresses
@@ -49,4 +48,6 @@
/* macro to get at IO space when running virtually */
#define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
+#define __io_address(n) __io(IO_ADDRESS(n))
+
#endif
diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h
index 83207395191a..ec087407b163 100644
--- a/arch/arm/mach-versatile/include/mach/platform.h
+++ b/arch/arm/mach-versatile/include/mach/platform.h
@@ -205,7 +205,7 @@
#define VERSATILE_CLCD_BASE 0x10120000 /* CLCD */
#define VERSATILE_DMAC_BASE 0x10130000 /* DMA controller */
#define VERSATILE_VIC_BASE 0x10140000 /* Vectored interrupt controller */
-#define VERSATILE_PERIPH_BASE 0x10150000 /* off-chip peripherals alias from */
+#define VERSATILE_PERIPH_BASE 0x10150000 /* off-chip peripherals alias from */
/* 0x10000000 - 0x100FFFFF */
#define VERSATILE_AHBM_BASE 0x101D0000 /* AHB monitor */
#define VERSATILE_SCTL_BASE 0x101E0000 /* System controller */
@@ -213,7 +213,7 @@
#define VERSATILE_TIMER0_1_BASE 0x101E2000 /* Timer 0 and 1 */
#define VERSATILE_TIMER2_3_BASE 0x101E3000 /* Timer 2 and 3 */
#define VERSATILE_GPIO0_BASE 0x101E4000 /* GPIO port 0 */
-#define VERSATILE_GPIO1_BASE 0x101E5000 /* GPIO port 1 */
+#define VERSATILE_GPIO1_BASE 0x101E5000 /* GPIO port 1 */
#define VERSATILE_GPIO2_BASE 0x101E6000 /* GPIO port 2 */
#define VERSATILE_GPIO3_BASE 0x101E7000 /* GPIO port 3 */
#define VERSATILE_RTC_BASE 0x101E8000 /* Real Time Clock */
@@ -379,12 +379,6 @@
#define SIC_INT_PCI3 30
-/*
- * Clean base - dummy
- *
- */
-#define CLEAN_BASE VERSATILE_BOOT_ROM_HI
-
/*
* System controller bit assignment
*/
@@ -397,20 +391,6 @@
#define VERSATILE_TIMER4_EnSel 21
-#define MAX_TIMER 2
-#define MAX_PERIOD 699050
-#define TICKS_PER_uSEC 1
-
-/*
- * These are useconds NOT ticks.
- *
- */
-#define mSEC_1 1000
-#define mSEC_5 (mSEC_1 * 5)
-#define mSEC_10 (mSEC_1 * 10)
-#define mSEC_25 (mSEC_1 * 25)
-#define SEC_1 (mSEC_1 * 1000)
-
#define VERSATILE_CSR_BASE 0x10000000
#define VERSATILE_CSR_SIZE 0x10000000
@@ -432,5 +412,3 @@
#endif
#endif
-
-/* END */
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
new file mode 100644
index 000000000000..3f19b660a165
--- /dev/null
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -0,0 +1,9 @@
+menu "Versatile Express platform type"
+ depends on ARCH_VEXPRESS
+
+config ARCH_VEXPRESS_CA9X4
+ bool "Versatile Express Cortex-A9x4 tile"
+ select CPU_V7
+ select ARM_GIC
+
+endmenu
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
new file mode 100644
index 000000000000..1b71b77ade22
--- /dev/null
+++ b/arch/arm/mach-vexpress/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := v2m.o
+obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
+obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
diff --git a/arch/arm/mach-vexpress/Makefile.boot b/arch/arm/mach-vexpress/Makefile.boot
new file mode 100644
index 000000000000..07c2d9c457ec
--- /dev/null
+++ b/arch/arm/mach-vexpress/Makefile.boot
@@ -0,0 +1,3 @@
+ zreladdr-y := 0x60008000
+params_phys-y := 0x60000100
+initrd_phys-y := 0x60800000
diff --git a/arch/arm/mach-vexpress/core.h b/arch/arm/mach-vexpress/core.h
new file mode 100644
index 000000000000..57dd95ce41f9
--- /dev/null
+++ b/arch/arm/mach-vexpress/core.h
@@ -0,0 +1,26 @@
+#define __MMIO_P2V(x) (((x) & 0xfffff) | (((x) & 0x0f000000) >> 4) | 0xf8000000)
+#define MMIO_P2V(x) ((void __iomem *)__MMIO_P2V(x))
+
+#define AMBA_DEVICE(name,busid,base,plat) \
+struct amba_device name##_device = { \
+ .dev = { \
+ .coherent_dma_mask = ~0UL, \
+ .init_name = busid, \
+ .platform_data = plat, \
+ }, \
+ .res = { \
+ .start = base, \
+ .end = base + SZ_4K - 1, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ .dma_mask = ~0UL, \
+ .irq = IRQ_##base, \
+ /* .dma = DMA_##base,*/ \
+}
+
+struct map_desc;
+
+void v2m_map_io(struct map_desc *tile, size_t num);
+extern struct sys_timer v2m_timer;
+
+extern void __iomem *gic_cpu_base_addr;
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
new file mode 100644
index 000000000000..9b11eedba65f
--- /dev/null
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -0,0 +1,250 @@
+/*
+ * Versatile Express Core Tile Cortex A9x4 Support
+ */
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
+
+#include <asm/clkdev.h>
+#include <asm/hardware/arm_timer.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach-types.h>
+#include <asm/pmu.h>
+
+#include <mach/clkdev.h>
+#include <mach/ct-ca9x4.h>
+
+#include <plat/timer-sp.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+
+#include "core.h"
+
+#include <mach/motherboard.h>
+
+#define V2M_PA_CS7 0x10000000
+
+static struct map_desc ct_ca9x4_io_desc[] __initdata = {
+ {
+ .virtual = __MMIO_P2V(CT_CA9X4_MPIC),
+ .pfn = __phys_to_pfn(CT_CA9X4_MPIC),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = __MMIO_P2V(CT_CA9X4_SP804_TIMER),
+ .pfn = __phys_to_pfn(CT_CA9X4_SP804_TIMER),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = __MMIO_P2V(CT_CA9X4_L2CC),
+ .pfn = __phys_to_pfn(CT_CA9X4_L2CC),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init ct_ca9x4_map_io(void)
+{
+ v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
+}
+
+void __iomem *gic_cpu_base_addr;
+
+static void __init ct_ca9x4_init_irq(void)
+{
+ gic_cpu_base_addr = MMIO_P2V(A9_MPCORE_GIC_CPU);
+ gic_dist_init(0, MMIO_P2V(A9_MPCORE_GIC_DIST), 29);
+ gic_cpu_init(0, gic_cpu_base_addr);
+}
+
+#if 0
+static void ct_ca9x4_timer_init(void)
+{
+ writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
+ writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);
+
+ sp804_clocksource_init(MMIO_P2V(CT_CA9X4_TIMER1));
+ sp804_clockevents_init(MMIO_P2V(CT_CA9X4_TIMER0), IRQ_CT_CA9X4_TIMER0);
+}
+
+static struct sys_timer ct_ca9x4_timer = {
+ .init = ct_ca9x4_timer_init,
+};
+#endif
+
+static struct clcd_panel xvga_panel = {
+ .mode = {
+ .name = "XVGA",
+ .refresh = 60,
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 15384,
+ .left_margin = 168,
+ .right_margin = 8,
+ .upper_margin = 29,
+ .lower_margin = 3,
+ .hsync_len = 144,
+ .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .bpp = 16,
+};
+
+static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
+{
+ v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0);
+ v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2);
+}
+
+static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
+{
+ unsigned long framesize = 1024 * 768 * 2;
+ dma_addr_t dma;
+
+ fb->panel = &xvga_panel;
+
+ fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
+ &dma, GFP_KERNEL);
+ if (!fb->fb.screen_base) {
+ printk(KERN_ERR "CLCD: unable to map frame buffer\n");
+ return -ENOMEM;
+ }
+ fb->fb.fix.smem_start = dma;
+ fb->fb.fix.smem_len = framesize;
+
+ return 0;
+}
+
+static int ct_ca9x4_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+ return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
+}
+
+static void ct_ca9x4_clcd_remove(struct clcd_fb *fb)
+{
+ dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
+ fb->fb.screen_base, fb->fb.fix.smem_start);
+}
+
+static struct clcd_board ct_ca9x4_clcd_data = {
+ .name = "CT-CA9X4",
+ .check = clcdfb_check,
+ .decode = clcdfb_decode,
+ .enable = ct_ca9x4_clcd_enable,
+ .setup = ct_ca9x4_clcd_setup,
+ .mmap = ct_ca9x4_clcd_mmap,
+ .remove = ct_ca9x4_clcd_remove,
+};
+
+static AMBA_DEVICE(clcd, "ct:clcd", CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data);
+static AMBA_DEVICE(dmc, "ct:dmc", CT_CA9X4_DMC, NULL);
+static AMBA_DEVICE(smc, "ct:smc", CT_CA9X4_SMC, NULL);
+static AMBA_DEVICE(gpio, "ct:gpio", CT_CA9X4_GPIO, NULL);
+
+static struct amba_device *ct_ca9x4_amba_devs[] __initdata = {
+ &clcd_device,
+ &dmc_device,
+ &smc_device,
+ &gpio_device,
+};
+
+
+static long ct_round(struct clk *clk, unsigned long rate)
+{
+ return rate;
+}
+
+static int ct_set(struct clk *clk, unsigned long rate)
+{
+ return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate);
+}
+
+static const struct clk_ops osc1_clk_ops = {
+ .round = ct_round,
+ .set = ct_set,
+};
+
+static struct clk osc1_clk = {
+ .ops = &osc1_clk_ops,
+ .rate = 24000000,
+};
+
+static struct clk_lookup lookups[] = {
+ { /* CLCD */
+ .dev_id = "ct:clcd",
+ .clk = &osc1_clk,
+ },
+};
+
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = IRQ_CT_CA9X4_PMU_CPU0,
+ .end = IRQ_CT_CA9X4_PMU_CPU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_CT_CA9X4_PMU_CPU1,
+ .end = IRQ_CT_CA9X4_PMU_CPU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_CT_CA9X4_PMU_CPU2,
+ .end = IRQ_CT_CA9X4_PMU_CPU2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_CT_CA9X4_PMU_CPU3,
+ .end = IRQ_CT_CA9X4_PMU_CPU3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
+static void ct_ca9x4_init(void)
+{
+ int i;
+
+#ifdef CONFIG_CACHE_L2X0
+ l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+#endif
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
+ amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
+
+ platform_device_register(&pmu_device);
+}
+
+MACHINE_START(VEXPRESS, "ARM-Versatile Express CA9x4")
+ .phys_io = V2M_UART0,
+ .io_pg_offst = (__MMIO_P2V(V2M_UART0) >> 18) & 0xfffc,
+ .boot_params = PHYS_OFFSET + 0x00000100,
+ .map_io = ct_ca9x4_map_io,
+ .init_irq = ct_ca9x4_init_irq,
+#if 0
+ .timer = &ct_ca9x4_timer,
+#else
+ .timer = &v2m_timer,
+#endif
+ .init_machine = ct_ca9x4_init,
+MACHINE_END
diff --git a/arch/arm/mach-vexpress/headsmp.S b/arch/arm/mach-vexpress/headsmp.S
new file mode 100644
index 000000000000..8a78ff68e1ee
--- /dev/null
+++ b/arch/arm/mach-vexpress/headsmp.S
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/arm/mach-vexpress/headsmp.S
+ *
+ * Copyright (c) 2003 ARM Limited
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ __INIT
+
+/*
+ * Versatile Express specific entry point for secondary CPUs. This
+ * provides a "holding pen" into which all secondary cores are held
+ * until we're ready for them to initialise.
+ */
+ENTRY(vexpress_secondary_startup)
+ mrc p15, 0, r0, c0, c0, 5
+ and r0, r0, #15
+ adr r4, 1f
+ ldmia r4, {r5, r6}
+ sub r4, r4, r5
+ add r6, r6, r4
+pen: ldr r7, [r6]
+ cmp r7, r0
+ bne pen
+
+ /*
+ * we've been released from the holding pen: secondary_stack
+ * should now contain the SVC stack for this core
+ */
+ b secondary_startup
+
+1: .long .
+ .long pen_release
diff --git a/arch/arm/mach-vexpress/include/mach/clkdev.h b/arch/arm/mach-vexpress/include/mach/clkdev.h
new file mode 100644
index 000000000000..3f8307d73cad
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/clkdev.h
@@ -0,0 +1,15 @@
+#ifndef __ASM_MACH_CLKDEV_H
+#define __ASM_MACH_CLKDEV_H
+
+#include <plat/clock.h>
+
+struct clk {
+ const struct clk_ops *ops;
+ unsigned long rate;
+ const struct icst_params *params;
+};
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h b/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h
new file mode 100644
index 000000000000..8650f04136ef
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h
@@ -0,0 +1,47 @@
+#ifndef __MACH_CT_CA9X4_H
+#define __MACH_CT_CA9X4_H
+
+/*
+ * Physical base addresses
+ */
+#define CT_CA9X4_CLCDC (0x10020000)
+#define CT_CA9X4_AXIRAM (0x10060000)
+#define CT_CA9X4_DMC (0x100e0000)
+#define CT_CA9X4_SMC (0x100e1000)
+#define CT_CA9X4_SCC (0x100e2000)
+#define CT_CA9X4_SP804_TIMER (0x100e4000)
+#define CT_CA9X4_SP805_WDT (0x100e5000)
+#define CT_CA9X4_TZPC (0x100e6000)
+#define CT_CA9X4_GPIO (0x100e8000)
+#define CT_CA9X4_FASTAXI (0x100e9000)
+#define CT_CA9X4_SLOWAXI (0x100ea000)
+#define CT_CA9X4_TZASC (0x100ec000)
+#define CT_CA9X4_CORESIGHT (0x10200000)
+#define CT_CA9X4_MPIC (0x1e000000)
+#define CT_CA9X4_SYSTIMER (0x1e004000)
+#define CT_CA9X4_SYSWDT (0x1e007000)
+#define CT_CA9X4_L2CC (0x1e00a000)
+
+#define CT_CA9X4_TIMER0 (CT_CA9X4_SP804_TIMER + 0x000)
+#define CT_CA9X4_TIMER1 (CT_CA9X4_SP804_TIMER + 0x020)
+
+#define A9_MPCORE_SCU (CT_CA9X4_MPIC + 0x0000)
+#define A9_MPCORE_GIC_CPU (CT_CA9X4_MPIC + 0x0100)
+#define A9_MPCORE_GIT (CT_CA9X4_MPIC + 0x0200)
+#define A9_MPCORE_GIC_DIST (CT_CA9X4_MPIC + 0x1000)
+
+/*
+ * Interrupts. Those in {} are for AMBA devices
+ */
+#define IRQ_CT_CA9X4_CLCDC { 76 }
+#define IRQ_CT_CA9X4_DMC { -1 }
+#define IRQ_CT_CA9X4_SMC { 77, 78 }
+#define IRQ_CT_CA9X4_TIMER0 80
+#define IRQ_CT_CA9X4_TIMER1 81
+#define IRQ_CT_CA9X4_GPIO { 82 }
+#define IRQ_CT_CA9X4_PMU_CPU0 92
+#define IRQ_CT_CA9X4_PMU_CPU1 93
+#define IRQ_CT_CA9X4_PMU_CPU2 94
+#define IRQ_CT_CA9X4_PMU_CPU3 95
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/debug-macro.S b/arch/arm/mach-vexpress/include/mach/debug-macro.S
new file mode 100644
index 000000000000..5167e2aceeba
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/debug-macro.S
@@ -0,0 +1,23 @@
+/* arch/arm/mach-realview/include/mach/debug-macro.S
+ *
+ * Debugging macro include header
+ *
+ * Copyright (C) 1994-1999 Russell King
+ * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define DEBUG_LL_UART_OFFSET 0x00009000
+
+ .macro addruart,rx,tmp
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0x10000000
+ movne \rx, #0xf8000000 @ virtual base
+ orr \rx, \rx, #DEBUG_LL_UART_OFFSET
+ .endm
+
+#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-vexpress/include/mach/entry-macro.S b/arch/arm/mach-vexpress/include/mach/entry-macro.S
new file mode 100644
index 000000000000..20e9fb514f0a
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/entry-macro.S
@@ -0,0 +1,67 @@
+#include <asm/hardware/gic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =gic_cpu_base_addr
+ ldr \base, [\base]
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ /*
+ * The interrupt numbering scheme is defined in the
+ * interrupt controller spec. To wit:
+ *
+ * Interrupts 0-15 are IPI
+ * 16-28 are reserved
+ * 29-31 are local. We allow 30 to be used for the watchdog.
+ * 32-1020 are global
+ * 1021-1022 are reserved
+ * 1023 is "spurious" (no interrupt)
+ *
+ * For now, we ignore all local interrupts so only return an interrupt if it's
+ * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
+ *
+ * A simple read from the controller will tell us the number of the highest
+ * priority enabled interrupt. We then just need to check whether it is in the
+ * valid range for an IRQ (30-1020 inclusive).
+ */
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
+ ldr \tmp, =1021
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #29
+ cmpcc \irqnr, \irqnr
+ cmpne \irqnr, \tmp
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* We assume that irqstat (the raw value of the IRQ acknowledge
+ * register) is preserved from the macro above.
+ * If there is an IPI, we immediately signal end of interrupt on the
+ * controller, since this requires the original irqstat value which
+ * we won't easily be able to recreate later.
+ */
+
+ .macro test_for_ipi, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #16
+ strcc \irqstat, [\base, #GIC_CPU_EOI]
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* As above, this assumes that irqstat and base are preserved.. */
+
+ .macro test_for_ltirq, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ mov \tmp, #0
+ cmp \irqnr, #29
+ moveq \tmp, #1
+ streq \irqstat, [\base, #GIC_CPU_EOI]
+ cmp \tmp, #0
+ .endm
+
diff --git a/arch/arm/mach-vexpress/include/mach/hardware.h b/arch/arm/mach-vexpress/include/mach/hardware.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/hardware.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/arm/mach-vexpress/include/mach/io.h b/arch/arm/mach-vexpress/include/mach/io.h
new file mode 100644
index 000000000000..748bb524ee71
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/io.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/io.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+#define __io(a) __typesafe_io(a)
+#define __mem_pci(a) (a)
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/irqs.h b/arch/arm/mach-vexpress/include/mach/irqs.h
new file mode 100644
index 000000000000..7054cbfc9de5
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/irqs.h
@@ -0,0 +1,4 @@
+#define IRQ_LOCALTIMER 29
+#define IRQ_LOCALWDOG 30
+
+#define NR_IRQS 128
diff --git a/arch/arm/mach-vexpress/include/mach/memory.h b/arch/arm/mach-vexpress/include/mach/memory.h
new file mode 100644
index 000000000000..be28232ae639
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/memory.h
@@ -0,0 +1,25 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/memory.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_MEMORY_H
+#define __ASM_ARCH_MEMORY_H
+
+#define PHYS_OFFSET UL(0x60000000)
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/motherboard.h b/arch/arm/mach-vexpress/include/mach/motherboard.h
new file mode 100644
index 000000000000..98a8ded055bf
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/motherboard.h
@@ -0,0 +1,121 @@
+#ifndef __MACH_MOTHERBOARD_H
+#define __MACH_MOTHERBOARD_H
+
+/*
+ * Physical addresses, offset from V2M_PA_CS0-3
+ */
+#define V2M_NOR0 (V2M_PA_CS0)
+#define V2M_NOR1 (V2M_PA_CS1)
+#define V2M_SRAM (V2M_PA_CS2)
+#define V2M_VIDEO_SRAM (V2M_PA_CS3 + 0x00000000)
+#define V2M_LAN9118 (V2M_PA_CS3 + 0x02000000)
+#define V2M_ISP1761 (V2M_PA_CS3 + 0x03000000)
+
+/*
+ * Physical addresses, offset from V2M_PA_CS7
+ */
+#define V2M_SYSREGS (V2M_PA_CS7 + 0x00000000)
+#define V2M_SYSCTL (V2M_PA_CS7 + 0x00001000)
+#define V2M_SERIAL_BUS_PCI (V2M_PA_CS7 + 0x00002000)
+
+#define V2M_AACI (V2M_PA_CS7 + 0x00004000)
+#define V2M_MMCI (V2M_PA_CS7 + 0x00005000)
+#define V2M_KMI0 (V2M_PA_CS7 + 0x00006000)
+#define V2M_KMI1 (V2M_PA_CS7 + 0x00007000)
+
+#define V2M_UART0 (V2M_PA_CS7 + 0x00009000)
+#define V2M_UART1 (V2M_PA_CS7 + 0x0000a000)
+#define V2M_UART2 (V2M_PA_CS7 + 0x0000b000)
+#define V2M_UART3 (V2M_PA_CS7 + 0x0000c000)
+
+#define V2M_WDT (V2M_PA_CS7 + 0x0000f000)
+
+#define V2M_TIMER01 (V2M_PA_CS7 + 0x00011000)
+#define V2M_TIMER23 (V2M_PA_CS7 + 0x00012000)
+
+#define V2M_SERIAL_BUS_DVI (V2M_PA_CS7 + 0x00016000)
+#define V2M_RTC (V2M_PA_CS7 + 0x00017000)
+
+#define V2M_CF (V2M_PA_CS7 + 0x0001a000)
+#define V2M_CLCD (V2M_PA_CS7 + 0x0001f000)
+
+#define V2M_SYS_ID (V2M_SYSREGS + 0x000)
+#define V2M_SYS_SW (V2M_SYSREGS + 0x004)
+#define V2M_SYS_LED (V2M_SYSREGS + 0x008)
+#define V2M_SYS_100HZ (V2M_SYSREGS + 0x024)
+#define V2M_SYS_FLAGS (V2M_SYSREGS + 0x030)
+#define V2M_SYS_FLAGSSET (V2M_SYSREGS + 0x030)
+#define V2M_SYS_FLAGSCLR (V2M_SYSREGS + 0x034)
+#define V2M_SYS_NVFLAGS (V2M_SYSREGS + 0x038)
+#define V2M_SYS_NVFLAGSSET (V2M_SYSREGS + 0x038)
+#define V2M_SYS_NVFLAGSCLR (V2M_SYSREGS + 0x03c)
+#define V2M_SYS_MCI (V2M_SYSREGS + 0x048)
+#define V2M_SYS_FLASH (V2M_SYSREGS + 0x03c)
+#define V2M_SYS_CFGSW (V2M_SYSREGS + 0x058)
+#define V2M_SYS_24MHZ (V2M_SYSREGS + 0x05c)
+#define V2M_SYS_MISC (V2M_SYSREGS + 0x060)
+#define V2M_SYS_DMA (V2M_SYSREGS + 0x064)
+#define V2M_SYS_PROCID0 (V2M_SYSREGS + 0x084)
+#define V2M_SYS_PROCID1 (V2M_SYSREGS + 0x088)
+#define V2M_SYS_CFGDATA (V2M_SYSREGS + 0x0a0)
+#define V2M_SYS_CFGCTRL (V2M_SYSREGS + 0x0a4)
+#define V2M_SYS_CFGSTAT (V2M_SYSREGS + 0x0a8)
+
+#define V2M_TIMER0 (V2M_TIMER01 + 0x000)
+#define V2M_TIMER1 (V2M_TIMER01 + 0x020)
+
+#define V2M_TIMER2 (V2M_TIMER23 + 0x000)
+#define V2M_TIMER3 (V2M_TIMER23 + 0x020)
+
+
+/*
+ * Interrupts. Those in {} are for AMBA devices
+ */
+#define IRQ_V2M_WDT { (32 + 0) }
+#define IRQ_V2M_TIMER0 (32 + 2)
+#define IRQ_V2M_TIMER1 (32 + 2)
+#define IRQ_V2M_TIMER2 (32 + 3)
+#define IRQ_V2M_TIMER3 (32 + 3)
+#define IRQ_V2M_RTC { (32 + 4) }
+#define IRQ_V2M_UART0 { (32 + 5) }
+#define IRQ_V2M_UART1 { (32 + 6) }
+#define IRQ_V2M_UART2 { (32 + 7) }
+#define IRQ_V2M_UART3 { (32 + 8) }
+#define IRQ_V2M_MMCI { (32 + 9), (32 + 10) }
+#define IRQ_V2M_AACI { (32 + 11) }
+#define IRQ_V2M_KMI0 { (32 + 12) }
+#define IRQ_V2M_KMI1 { (32 + 13) }
+#define IRQ_V2M_CLCD { (32 + 14) }
+#define IRQ_V2M_LAN9118 (32 + 15)
+#define IRQ_V2M_ISP1761 (32 + 16)
+#define IRQ_V2M_PCIE (32 + 17)
+
+
+/*
+ * Configuration
+ */
+#define SYS_CFG_START (1 << 31)
+#define SYS_CFG_WRITE (1 << 30)
+#define SYS_CFG_OSC (1 << 20)
+#define SYS_CFG_VOLT (2 << 20)
+#define SYS_CFG_AMP (3 << 20)
+#define SYS_CFG_TEMP (4 << 20)
+#define SYS_CFG_RESET (5 << 20)
+#define SYS_CFG_SCC (6 << 20)
+#define SYS_CFG_MUXFPGA (7 << 20)
+#define SYS_CFG_SHUTDOWN (8 << 20)
+#define SYS_CFG_REBOOT (9 << 20)
+#define SYS_CFG_DVIMODE (11 << 20)
+#define SYS_CFG_POWER (12 << 20)
+#define SYS_CFG_SITE_MB (0 << 16)
+#define SYS_CFG_SITE_DB1 (1 << 16)
+#define SYS_CFG_SITE_DB2 (2 << 16)
+#define SYS_CFG_STACK(n) ((n) << 12)
+
+#define SYS_CFG_ERR (1 << 1)
+#define SYS_CFG_COMPLETE (1 << 0)
+
+int v2m_cfg_write(u32 devfn, u32 data);
+int v2m_cfg_read(u32 devfn, u32 *data);
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/smp.h b/arch/arm/mach-vexpress/include/mach/smp.h
new file mode 100644
index 000000000000..72a9621ed087
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/smp.h
@@ -0,0 +1,21 @@
+#ifndef __MACH_SMP_H
+#define __MACH_SMP_H
+
+#include <asm/hardware/gic.h>
+
+#define hard_smp_processor_id() \
+ ({ \
+ unsigned int cpunum; \
+ __asm__("mrc p15, 0, %0, c0, c0, 5" \
+ : "=r" (cpunum)); \
+ cpunum &= 0x0F; \
+ })
+
+/*
+ * We use IRQ1 as the IPI
+ */
+static inline void smp_cross_call(const struct cpumask *mask)
+{
+ gic_raise_softirq(mask, 1);
+}
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/system.h b/arch/arm/mach-vexpress/include/mach/system.h
new file mode 100644
index 000000000000..899a4e628a4c
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/system.h
@@ -0,0 +1,37 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/system.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_SYSTEM_H
+#define __ASM_ARCH_SYSTEM_H
+
+static inline void arch_idle(void)
+{
+ /*
+ * This should do all the clock switching
+ * and wait for interrupt tricks
+ */
+ cpu_do_idle();
+}
+
+static inline void arch_reset(char mode, const char *cmd)
+{
+}
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/timex.h b/arch/arm/mach-vexpress/include/mach/timex.h
new file mode 100644
index 000000000000..00029bacd43c
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/timex.h
@@ -0,0 +1,23 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/timex.h
+ *
+ * RealView architecture timex specifications
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define CLOCK_TICK_RATE (50000000 / 16)
diff --git a/arch/arm/mach-vexpress/include/mach/uncompress.h b/arch/arm/mach-vexpress/include/mach/uncompress.h
new file mode 100644
index 000000000000..7972c5748d0e
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/uncompress.h
@@ -0,0 +1,52 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/uncompress.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00))
+#define AMBA_UART_LCRH(base) (*(volatile unsigned char *)((base) + 0x2c))
+#define AMBA_UART_CR(base) (*(volatile unsigned char *)((base) + 0x30))
+#define AMBA_UART_FR(base) (*(volatile unsigned char *)((base) + 0x18))
+
+#define get_uart_base() (0x10000000 + 0x00009000)
+
+/*
+ * This does not append a newline
+ */
+static inline void putc(int c)
+{
+ unsigned long base = get_uart_base();
+
+ while (AMBA_UART_FR(base) & (1 << 5))
+ barrier();
+
+ AMBA_UART_DR(base) = c;
+}
+
+static inline void flush(void)
+{
+ unsigned long base = get_uart_base();
+
+ while (AMBA_UART_FR(base) & (1 << 3))
+ barrier();
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
diff --git a/arch/arm/mach-vexpress/include/mach/vmalloc.h b/arch/arm/mach-vexpress/include/mach/vmalloc.h
new file mode 100644
index 000000000000..f43a36ef678b
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/vmalloc.h
@@ -0,0 +1,21 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/vmalloc.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ * Copyright (C) 2000 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#define VMALLOC_END 0xf8000000UL
diff --git a/arch/arm/mach-vexpress/localtimer.c b/arch/arm/mach-vexpress/localtimer.c
new file mode 100644
index 000000000000..c0e3a59a0bfc
--- /dev/null
+++ b/arch/arm/mach-vexpress/localtimer.c
@@ -0,0 +1,26 @@
+/*
+ * linux/arch/arm/mach-vexpress/localtimer.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/clockchips.h>
+
+#include <asm/smp_twd.h>
+#include <asm/localtimer.h>
+#include <mach/irqs.h>
+
+/*
+ * Setup the local clock events for a CPU.
+ */
+void __cpuinit local_timer_setup(struct clock_event_device *evt)
+{
+ evt->irq = IRQ_LOCALTIMER;
+ twd_timer_setup(evt);
+}
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
new file mode 100644
index 000000000000..670970699ba9
--- /dev/null
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -0,0 +1,190 @@
+/*
+ * linux/arch/arm/mach-vexpress/platsmp.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+#include <asm/localtimer.h>
+#include <asm/smp_scu.h>
+#include <asm/unified.h>
+
+#include <mach/ct-ca9x4.h>
+#include <mach/motherboard.h>
+#define V2M_PA_CS7 0x10000000
+
+#include "core.h"
+
+extern void vexpress_secondary_startup(void);
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+
+static void __iomem *scu_base_addr(void)
+{
+ return MMIO_P2V(A9_MPCORE_SCU);
+}
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ trace_hardirqs_off();
+
+ /*
+ * if any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+ gic_cpu_init(0, gic_cpu_base_addr);
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ pen_release = -1;
+ smp_wmb();
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+ * CPUs in the holding pen until we're ready for them. However,
+ * since we haven't sent them a soft interrupt, they shouldn't
+ * be there.
+ */
+ pen_release = cpu;
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+ * the boot monitor to read the system wide flags register,
+ * and branch to the address found there.
+ */
+ smp_cross_call(cpumask_of(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ void __iomem *scu_base = scu_base_addr();
+ unsigned int i, ncores;
+
+ ncores = scu_base ? scu_get_core_count(scu_base) : 1;
+
+ /* sanity check */
+ if (ncores == 0) {
+ printk(KERN_ERR
+ "vexpress: strange CM count of 0? Default to 1\n");
+
+ ncores = 1;
+ }
+
+ if (ncores > NR_CPUS) {
+ printk(KERN_WARNING
+ "vexpress: no. of cores (%d) greater than configured "
+ "maximum of %d - clipping\n",
+ ncores, NR_CPUS);
+ ncores = NR_CPUS;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned int ncores = num_possible_cpus();
+ unsigned int cpu = smp_processor_id();
+ int i;
+
+ smp_store_cpu_info(cpu);
+
+ /*
+ * are we trying to boot more cores than exist?
+ */
+ if (max_cpus > ncores)
+ max_cpus = ncores;
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+
+ /*
+ * Initialise the SCU if there are more than one CPU and let
+ * them know where to start.
+ */
+ if (max_cpus > 1) {
+ /*
+ * Enable the local timer or broadcast device for the
+ * boot CPU, but only if we have more than one CPU.
+ */
+ percpu_timer_setup();
+
+ scu_enable(scu_base_addr());
+
+ /*
+ * Write the address of secondary startup into the
+ * system-wide flags register. The boot monitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
+ */
+ writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
+ writel(BSYM(virt_to_phys(vexpress_secondary_startup)),
+ MMIO_P2V(V2M_SYS_FLAGSSET));
+ }
+}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
new file mode 100644
index 000000000000..d250711b8c7a
--- /dev/null
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -0,0 +1,361 @@
+/*
+ * Versatile Express V2M Motherboard Support
+ */
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/mmci.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/smsc911x.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/usb/isp1760.h>
+
+#include <asm/clkdev.h>
+#include <asm/sizes.h>
+#include <asm/mach/flash.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/hardware/arm_timer.h>
+
+#include <mach/clkdev.h>
+#include <mach/motherboard.h>
+
+#include <plat/timer-sp.h>
+
+#include "core.h"
+
+#define V2M_PA_CS0 0x40000000
+#define V2M_PA_CS1 0x44000000
+#define V2M_PA_CS2 0x48000000
+#define V2M_PA_CS3 0x4c000000
+#define V2M_PA_CS7 0x10000000
+
+static struct map_desc v2m_io_desc[] __initdata = {
+ {
+ .virtual = __MMIO_P2V(V2M_PA_CS7),
+ .pfn = __phys_to_pfn(V2M_PA_CS7),
+ .length = SZ_128K,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init v2m_map_io(struct map_desc *tile, size_t num)
+{
+ iotable_init(v2m_io_desc, ARRAY_SIZE(v2m_io_desc));
+ iotable_init(tile, num);
+}
+
+
+static void v2m_timer_init(void)
+{
+ writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
+ writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
+
+ sp804_clocksource_init(MMIO_P2V(V2M_TIMER1));
+ sp804_clockevents_init(MMIO_P2V(V2M_TIMER0), IRQ_V2M_TIMER0);
+}
+
+struct sys_timer v2m_timer = {
+ .init = v2m_timer_init,
+};
+
+
+static DEFINE_SPINLOCK(v2m_cfg_lock);
+
+int v2m_cfg_write(u32 devfn, u32 data)
+{
+ /* Configuration interface broken? */
+ u32 val;
+
+ printk("%s: writing %08x to %08x\n", __func__, data, devfn);
+
+ devfn |= SYS_CFG_START | SYS_CFG_WRITE;
+
+ spin_lock(&v2m_cfg_lock);
+ val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
+ writel(val & ~SYS_CFG_COMPLETE, MMIO_P2V(V2M_SYS_CFGSTAT));
+
+ writel(data, MMIO_P2V(V2M_SYS_CFGDATA));
+ writel(devfn, MMIO_P2V(V2M_SYS_CFGCTRL));
+
+ do {
+ val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
+ } while (val == 0);
+ spin_unlock(&v2m_cfg_lock);
+
+ return !!(val & SYS_CFG_ERR);
+}
+
+int v2m_cfg_read(u32 devfn, u32 *data)
+{
+ u32 val;
+
+ devfn |= SYS_CFG_START;
+
+ spin_lock(&v2m_cfg_lock);
+ writel(0, MMIO_P2V(V2M_SYS_CFGSTAT));
+ writel(devfn, MMIO_P2V(V2M_SYS_CFGCTRL));
+
+ mb();
+
+ do {
+ cpu_relax();
+ val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
+ } while (val == 0);
+
+ *data = readl(MMIO_P2V(V2M_SYS_CFGDATA));
+ spin_unlock(&v2m_cfg_lock);
+
+ return !!(val & SYS_CFG_ERR);
+}
+
+
+static struct resource v2m_pcie_i2c_resource = {
+ .start = V2M_SERIAL_BUS_PCI,
+ .end = V2M_SERIAL_BUS_PCI + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device v2m_pcie_i2c_device = {
+ .name = "versatile-i2c",
+ .id = 0,
+ .num_resources = 1,
+ .resource = &v2m_pcie_i2c_resource,
+};
+
+static struct resource v2m_ddc_i2c_resource = {
+ .start = V2M_SERIAL_BUS_DVI,
+ .end = V2M_SERIAL_BUS_DVI + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device v2m_ddc_i2c_device = {
+ .name = "versatile-i2c",
+ .id = 1,
+ .num_resources = 1,
+ .resource = &v2m_ddc_i2c_resource,
+};
+
+static struct resource v2m_eth_resources[] = {
+ {
+ .start = V2M_LAN9118,
+ .end = V2M_LAN9118 + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_V2M_LAN9118,
+ .end = IRQ_V2M_LAN9118,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct smsc911x_platform_config v2m_eth_config = {
+ .flags = SMSC911X_USE_32BIT,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device v2m_eth_device = {
+ .name = "smsc911x",
+ .id = -1,
+ .resource = v2m_eth_resources,
+ .num_resources = ARRAY_SIZE(v2m_eth_resources),
+ .dev.platform_data = &v2m_eth_config,
+};
+
+static struct resource v2m_usb_resources[] = {
+ {
+ .start = V2M_ISP1761,
+ .end = V2M_ISP1761 + SZ_128K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_V2M_ISP1761,
+ .end = IRQ_V2M_ISP1761,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct isp1760_platform_data v2m_usb_config = {
+ .is_isp1761 = true,
+ .bus_width_16 = false,
+ .port1_otg = true,
+ .analog_oc = false,
+ .dack_polarity_high = false,
+ .dreq_polarity_high = false,
+};
+
+static struct platform_device v2m_usb_device = {
+ .name = "isp1760",
+ .id = -1,
+ .resource = v2m_usb_resources,
+ .num_resources = ARRAY_SIZE(v2m_usb_resources),
+ .dev.platform_data = &v2m_usb_config,
+};
+
+static int v2m_flash_init(void)
+{
+ writel(0, MMIO_P2V(V2M_SYS_FLASH));
+ return 0;
+}
+
+static void v2m_flash_exit(void)
+{
+ writel(0, MMIO_P2V(V2M_SYS_FLASH));
+}
+
+static void v2m_flash_set_vpp(int on)
+{
+ writel(on != 0, MMIO_P2V(V2M_SYS_FLASH));
+}
+
+static struct flash_platform_data v2m_flash_data = {
+ .map_name = "cfi_probe",
+ .width = 4,
+ .init = v2m_flash_init,
+ .exit = v2m_flash_exit,
+ .set_vpp = v2m_flash_set_vpp,
+};
+
+static struct resource v2m_flash_resources[] = {
+ {
+ .start = V2M_NOR0,
+ .end = V2M_NOR0 + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = V2M_NOR1,
+ .end = V2M_NOR1 + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device v2m_flash_device = {
+ .name = "armflash",
+ .id = -1,
+ .resource = v2m_flash_resources,
+ .num_resources = ARRAY_SIZE(v2m_flash_resources),
+ .dev.platform_data = &v2m_flash_data,
+};
+
+
+static unsigned int v2m_mmci_status(struct device *dev)
+{
+ return !(readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0));
+}
+
+static struct mmci_platform_data v2m_mmci_data = {
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .status = v2m_mmci_status,
+};
+
+static AMBA_DEVICE(aaci, "mb:aaci", V2M_AACI, NULL);
+static AMBA_DEVICE(mmci, "mb:mmci", V2M_MMCI, &v2m_mmci_data);
+static AMBA_DEVICE(kmi0, "mb:kmi0", V2M_KMI0, NULL);
+static AMBA_DEVICE(kmi1, "mb:kmi1", V2M_KMI1, NULL);
+static AMBA_DEVICE(uart0, "mb:uart0", V2M_UART0, NULL);
+static AMBA_DEVICE(uart1, "mb:uart1", V2M_UART1, NULL);
+static AMBA_DEVICE(uart2, "mb:uart2", V2M_UART2, NULL);
+static AMBA_DEVICE(uart3, "mb:uart3", V2M_UART3, NULL);
+static AMBA_DEVICE(wdt, "mb:wdt", V2M_WDT, NULL);
+static AMBA_DEVICE(rtc, "mb:rtc", V2M_RTC, NULL);
+
+static struct amba_device *v2m_amba_devs[] __initdata = {
+ &aaci_device,
+ &mmci_device,
+ &kmi0_device,
+ &kmi1_device,
+ &uart0_device,
+ &uart1_device,
+ &uart2_device,
+ &uart3_device,
+ &wdt_device,
+ &rtc_device,
+};
+
+
+static long v2m_osc_round(struct clk *clk, unsigned long rate)
+{
+ return rate;
+}
+
+static int v2m_osc1_set(struct clk *clk, unsigned long rate)
+{
+ return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_MB | 1, rate);
+}
+
+static const struct clk_ops osc1_clk_ops = {
+ .round = v2m_osc_round,
+ .set = v2m_osc1_set,
+};
+
+static struct clk osc1_clk = {
+ .ops = &osc1_clk_ops,
+ .rate = 24000000,
+};
+
+static struct clk osc2_clk = {
+ .rate = 24000000,
+};
+
+static struct clk_lookup v2m_lookups[] = {
+ { /* UART0 */
+ .dev_id = "mb:uart0",
+ .clk = &osc2_clk,
+ }, { /* UART1 */
+ .dev_id = "mb:uart1",
+ .clk = &osc2_clk,
+ }, { /* UART2 */
+ .dev_id = "mb:uart2",
+ .clk = &osc2_clk,
+ }, { /* UART3 */
+ .dev_id = "mb:uart3",
+ .clk = &osc2_clk,
+ }, { /* KMI0 */
+ .dev_id = "mb:kmi0",
+ .clk = &osc2_clk,
+ }, { /* KMI1 */
+ .dev_id = "mb:kmi1",
+ .clk = &osc2_clk,
+ }, { /* MMC0 */
+ .dev_id = "mb:mmci",
+ .clk = &osc2_clk,
+ }, { /* CLCD */
+ .dev_id = "mb:clcd",
+ .clk = &osc1_clk,
+ },
+};
+
+static void v2m_power_off(void)
+{
+ if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
+ printk(KERN_EMERG "Unable to shutdown\n");
+}
+
+static void v2m_restart(char str, const char *cmd)
+{
+ if (v2m_cfg_write(SYS_CFG_REBOOT | SYS_CFG_SITE_MB, 0))
+ printk(KERN_EMERG "Unable to reboot\n");
+}
+
+static int __init v2m_init(void)
+{
+ int i;
+
+ clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
+
+ platform_device_register(&v2m_pcie_i2c_device);
+ platform_device_register(&v2m_ddc_i2c_device);
+ platform_device_register(&v2m_flash_device);
+ platform_device_register(&v2m_eth_device);
+ platform_device_register(&v2m_usb_device);
+
+ for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++)
+ amba_device_register(v2m_amba_devs[i], &iomem_resource);
+
+ pm_power_off = v2m_power_off;
+ arm_pm_restart = v2m_restart;
+
+ return 0;
+}
+arch_initcall(v2m_init);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5bd7c89a6045..d43aa8e40f07 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -572,6 +572,8 @@ config CPU_TLB_V6
config CPU_TLB_V7
bool
+config VERIFY_PERMISSION_FAULT
+ bool
endif
config CPU_HAS_ASID
@@ -760,7 +762,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
+ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
+ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
default y
select OUTER_CACHE
select OUTER_CACHE_SYNC
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 2e6dc040c654..ec88b157d3bb 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -29,5 +29,26 @@ ENTRY(v7_early_abort)
* V6 code adjusts the returned DFSR.
* New designs should not need to patch up faults.
*/
+
+#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
+ /*
+ * Detect erroneous permission failures and fix
+ */
+ ldr r3, =0x40d @ On permission fault
+ and r3, r1, r3
+ cmp r3, #0x0d
+ movne pc, lr
+
+ mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
+ isb
+ mrc p15, 0, r2, c7, c4, 0 @ Read the PAR
+ and r3, r2, #0x7b @ On translation fault
+ cmp r3, #0x0b
+ movne pc, lr
+ bic r1, r1, #0xf @ Fix up FSR FS[5:0]
+ and r2, r2, #0x7e
+ orr r1, r1, r2, LSR #1
+#endif
+
mov pc, lr
ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index a2ab51fa73e2..6f98c358989a 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -94,36 +95,29 @@ static const char *usermode_action[] = {
"signal+warn"
};
-static int
-proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int alignment_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len;
-
- p += sprintf(p, "User:\t\t%lu\n", ai_user);
- p += sprintf(p, "System:\t\t%lu\n", ai_sys);
- p += sprintf(p, "Skipped:\t%lu\n", ai_skipped);
- p += sprintf(p, "Half:\t\t%lu\n", ai_half);
- p += sprintf(p, "Word:\t\t%lu\n", ai_word);
+ seq_printf(m, "User:\t\t%lu\n", ai_user);
+ seq_printf(m, "System:\t\t%lu\n", ai_sys);
+ seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
+ seq_printf(m, "Half:\t\t%lu\n", ai_half);
+ seq_printf(m, "Word:\t\t%lu\n", ai_word);
if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
- p += sprintf(p, "DWord:\t\t%lu\n", ai_dword);
- p += sprintf(p, "Multi:\t\t%lu\n", ai_multi);
- p += sprintf(p, "User faults:\t%i (%s)\n", ai_usermode,
+ seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
usermode_action[ai_usermode]);
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
+ return 0;
+}
- return len;
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, alignment_proc_show, NULL);
}
-static int proc_alignment_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
char mode;
@@ -136,6 +130,13 @@ static int proc_alignment_write(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations alignment_proc_fops = {
+ .open = alignment_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = alignment_proc_write,
+};
#endif /* CONFIG_PROC_FS */
union offset_union {
@@ -901,12 +902,10 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
- res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL);
+ res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
+ &alignment_proc_fops);
if (!res)
return -ENOMEM;
-
- res->read_proc = proc_alignment_read;
- res->write_proc = proc_alignment_write;
#endif
/*
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 9d89c67a1cc3..e46ecd847138 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,6 +211,9 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
+#ifdef CONFIG_SMP
+ str r0, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
@@ -231,6 +234,9 @@ v6_dma_inv_range:
v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
@@ -251,6 +257,10 @@ v6_dma_clean_range:
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+ str r2, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
@@ -273,7 +283,9 @@ ENTRY(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
- b v6_dma_clean_range
+ teq r2, #DMA_TO_DEVICE
+ beq v6_dma_clean_range
+ b v6_dma_flush_range
ENDPROC(v6_dma_map_area)
/*
@@ -283,9 +295,6 @@ ENDPROC(v6_dma_map_area)
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
- add r1, r1, r0
- teq r2, #DMA_TO_DEVICE
- bne v6_dma_inv_range
mov pc, lr
ENDPROC(v6_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index bcd64f265870..06a90dcfc60a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -167,7 +167,11 @@ ENTRY(v7_coherent_user_range)
cmp r0, r1
blo 1b
mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
+#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
+#endif
dsb
isb
mov pc, lr
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index b2a6008b0111..d2852e1635b1 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -40,7 +40,7 @@ fa_copy_user_page(void *kto, const void *kfrom)
}
void fa_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 0d414c28eb2c..9b906dec1ca1 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -134,8 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
flush_dcache_mmap_unlock(mapping);
if (aliases)
do_adjust_pte(vma, addr, pfn, ptep);
- else
- flush_cache_page(vma, addr, pfn);
}
/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0ed29bfeba1c..1ba6cf5a2c02 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,7 +15,6 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
-#include <linux/sort.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
@@ -224,20 +223,6 @@ static int __init check_initrd(struct meminfo *mi)
return initrd_node;
}
-static inline void map_memory_bank(struct membank *bank)
-{
-#ifdef CONFIG_MMU
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-#endif
-}
-
static void __init bootmem_init_node(int node, struct meminfo *mi,
unsigned long start_pfn, unsigned long end_pfn)
{
@@ -247,16 +232,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
int i;
/*
- * Map the memory banks for this node.
- */
- for_each_nodebank(i, mi, node) {
- struct membank *bank = &mi->bank[i];
-
- if (!bank->highmem)
- map_memory_bank(bank);
- }
-
- /*
* Allocate the bootmem bitmap page.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
@@ -385,21 +360,12 @@ static void arm_memory_present(struct meminfo *mi, int node)
}
#endif
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
int node, initrd_node;
- sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
-
/*
* Locate which node contains the ramdisk image, if any.
*/
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 28c8b950ef04..03f11935ed08 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -268,6 +268,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
return NULL;
+ /*
+ * Don't allow RAM to be mapped - this causes problems with ARMv6+
+ */
+ if (WARN_ON(pfn_valid(pfn)))
+ return NULL;
+
type = get_mem_type(mtype);
if (!type)
return NULL;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a888363398f8..815d08eecbb0 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -28,10 +28,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif
-struct map_desc;
-struct meminfo;
struct pglist_data;
-void __init create_mapping(struct map_desc *md);
void __init bootmem_init(void);
void reserve_node_zero(struct pglist_data *pgdat);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 241c24a1c18f..e7113d0b8168 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,6 +14,7 @@
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <linux/sort.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
@@ -603,7 +604,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
-void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md)
{
unsigned long phys, addr, length, end;
const struct mem_type *type;
@@ -1017,6 +1018,39 @@ static void __init kmap_init(void)
#endif
}
+static inline void map_memory_bank(struct membank *bank)
+{
+ struct map_desc map;
+
+ map.pfn = bank_pfn_start(bank);
+ map.virtual = __phys_to_virt(bank_phys_start(bank));
+ map.length = bank_phys_size(bank);
+ map.type = MT_MEMORY;
+
+ create_mapping(&map);
+}
+
+static void __init map_lowmem(void)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ /* Map all the lowmem memory banks. */
+ for (i = 0; i < mi->nr_banks; i++) {
+ struct membank *bank = &mi->bank[i];
+
+ if (!bank->highmem)
+ map_memory_bank(bank);
+ }
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1025,9 +1059,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
+ map_lowmem();
bootmem_init();
devicemaps_init(mdesc);
kmap_init();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 9bfeb6b9509a..33b327379f07 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -65,6 +65,15 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC)
+ __cpuc_coherent_user_range(uaddr, uaddr + len);
+}
+
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
size_t size, unsigned int mtype)
{
@@ -87,8 +96,8 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
-void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
- unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
return __arm_ioremap(phys_addr, size, mtype);
}
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 0cb1848bd876..f3f288a9546d 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -50,7 +50,11 @@ ENTRY(v7wbi_flush_user_tlb_range)
cmp r0, r1
blo 1b
mov ip, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -79,7 +83,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
cmp r0, r1
blo 1b
mov r2, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
isb
mov pc, lr
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index 88e31f549f50..e666eafed152 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,9 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
-oprofile-$(CONFIG_CPU_XSCALE) += op_model_xscale.o
-oprofile-$(CONFIG_OPROFILE_ARM11_CORE) += op_model_arm11_core.o
-oprofile-$(CONFIG_OPROFILE_ARMV6) += op_model_v6.o
-oprofile-$(CONFIG_OPROFILE_MPCORE) += op_model_mpcore.o
-oprofile-$(CONFIG_OPROFILE_ARMV7) += op_model_v7.o
+oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arm/oprofile/backtrace.c b/arch/arm/oprofile/backtrace.c
deleted file mode 100644
index d805a52b5032..000000000000
--- a/arch/arm/oprofile/backtrace.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Arm specific backtracing code for oprofile
- *
- * Copyright 2005 Openedhand Ltd.
- *
- * Author: Richard Purdie <rpurdie@openedhand.com>
- *
- * Based on i386 oprofile backtrace code by John Levon, David Smith
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/stacktrace.h>
-
-static int report_trace(struct stackframe *frame, void *d)
-{
- unsigned int *depth = d;
-
- if (*depth) {
- oprofile_add_trace(frame->pc);
- (*depth)--;
- }
-
- return *depth == 0;
-}
-
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct frame_tail *)(xxx->fp)-1
- */
-struct frame_tail {
- struct frame_tail *fp;
- unsigned long sp;
- unsigned long lr;
-} __attribute__((packed));
-
-static struct frame_tail* user_backtrace(struct frame_tail *tail)
-{
- struct frame_tail buftail[2];
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
- return NULL;
- if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
- return NULL;
-
- oprofile_add_trace(buftail[0].lr);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (tail >= buftail[0].fp)
- return NULL;
-
- return buftail[0].fp-1;
-}
-
-void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
-
- if (!user_mode(regs)) {
- struct stackframe frame;
- frame.fp = regs->ARM_fp;
- frame.sp = regs->ARM_sp;
- frame.lr = regs->ARM_lr;
- frame.pc = regs->ARM_pc;
- walk_stackframe(&frame, report_trace, &depth);
- return;
- }
-
- while (depth-- && tail && !((unsigned long) tail & 3))
- tail = user_backtrace(tail);
-}
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 3fcd752d6146..0691176899ff 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -2,32 +2,184 @@
* @file common.c
*
* @remark Copyright 2004 Oprofile Authors
+ * @remark Copyright 2010 ARM Ltd.
* @remark Read the file COPYING
*
* @author Zwane Mwaikambo
+ * @author Will Deacon [move to perf]
*/
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#include <linux/oprofile.h>
-#include <linux/errno.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/sysdev.h>
-#include <linux/mutex.h>
+#include <asm/stacktrace.h>
+#include <linux/uaccess.h>
-#include "op_counter.h"
-#include "op_arm_model.h"
+#include <asm/perf_event.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_HW_PERF_EVENTS
+/*
+ * Per performance monitor configuration as set via oprofilefs.
+ */
+struct op_counter_config {
+ unsigned long count;
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long unit_mask;
+ unsigned long kernel;
+ unsigned long user;
+ struct perf_event_attr attr;
+};
-static struct op_arm_model_spec *op_arm_model;
static int op_arm_enabled;
static DEFINE_MUTEX(op_arm_mutex);
-struct op_counter_config *counter_config;
+static struct op_counter_config *counter_config;
+static struct perf_event **perf_events[nr_cpumask_bits];
+static int perf_num_counters;
+
+/*
+ * Overflow callback for oprofile.
+ */
+static void op_overflow_handler(struct perf_event *event, int unused,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ int id;
+ u32 cpu = smp_processor_id();
+
+ for (id = 0; id < perf_num_counters; ++id)
+ if (perf_events[cpu][id] == event)
+ break;
+
+ if (id != perf_num_counters)
+ oprofile_add_sample(regs, id);
+ else
+ pr_warning("oprofile: ignoring spurious overflow "
+ "on cpu %u\n", cpu);
+}
+
+/*
+ * Called by op_arm_setup to create perf attributes to mirror the oprofile
+ * settings in counter_config. Attributes are created as `pinned' events and
+ * so are permanently scheduled on the PMU.
+ */
+static void op_perf_setup(void)
+{
+ int i;
+ u32 size = sizeof(struct perf_event_attr);
+ struct perf_event_attr *attr;
+
+ for (i = 0; i < perf_num_counters; ++i) {
+ attr = &counter_config[i].attr;
+ memset(attr, 0, size);
+ attr->type = PERF_TYPE_RAW;
+ attr->size = size;
+ attr->config = counter_config[i].event;
+ attr->sample_period = counter_config[i].count;
+ attr->pinned = 1;
+ }
+}
+
+static int op_create_counter(int cpu, int event)
+{
+ int ret = 0;
+ struct perf_event *pevent;
+
+ if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
+ return ret;
+
+ pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
+ cpu, -1,
+ op_overflow_handler);
+
+ if (IS_ERR(pevent)) {
+ ret = PTR_ERR(pevent);
+ } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
+ pr_warning("oprofile: failed to enable event %d "
+ "on CPU %d\n", event, cpu);
+ ret = -EBUSY;
+ } else {
+ perf_events[cpu][event] = pevent;
+ }
+
+ return ret;
+}
+
+static void op_destroy_counter(int cpu, int event)
+{
+ struct perf_event *pevent = perf_events[cpu][event];
+
+ if (pevent) {
+ perf_event_release_kernel(pevent);
+ perf_events[cpu][event] = NULL;
+ }
+}
+
+/*
+ * Called by op_arm_start to create active perf events based on the
+ * perviously configured attributes.
+ */
+static int op_perf_start(void)
+{
+ int cpu, event, ret = 0;
+
+ for_each_online_cpu(cpu) {
+ for (event = 0; event < perf_num_counters; ++event) {
+ ret = op_create_counter(cpu, event);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * Called by op_arm_stop at the end of a profiling run.
+ */
+static void op_perf_stop(void)
+{
+ int cpu, event;
+
+ for_each_online_cpu(cpu)
+ for (event = 0; event < perf_num_counters; ++event)
+ op_destroy_counter(cpu, event);
+}
+
+
+static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
+{
+ switch (id) {
+ case ARM_PERF_PMU_ID_XSCALE1:
+ return "arm/xscale1";
+ case ARM_PERF_PMU_ID_XSCALE2:
+ return "arm/xscale2";
+ case ARM_PERF_PMU_ID_V6:
+ return "arm/armv6";
+ case ARM_PERF_PMU_ID_V6MP:
+ return "arm/mpcore";
+ case ARM_PERF_PMU_ID_CA8:
+ return "arm/armv7";
+ case ARM_PERF_PMU_ID_CA9:
+ return "arm/armv7-ca9";
+ default:
+ return NULL;
+ }
+}
static int op_arm_create_files(struct super_block *sb, struct dentry *root)
{
unsigned int i;
- for (i = 0; i < op_arm_model->num_counters; i++) {
+ for (i = 0; i < perf_num_counters; i++) {
struct dentry *dir;
char buf[4];
@@ -46,12 +198,10 @@ static int op_arm_create_files(struct super_block *sb, struct dentry *root)
static int op_arm_setup(void)
{
- int ret;
-
spin_lock(&oprofilefs_lock);
- ret = op_arm_model->setup_ctrs();
+ op_perf_setup();
spin_unlock(&oprofilefs_lock);
- return ret;
+ return 0;
}
static int op_arm_start(void)
@@ -60,8 +210,9 @@ static int op_arm_start(void)
mutex_lock(&op_arm_mutex);
if (!op_arm_enabled) {
- ret = op_arm_model->start();
- op_arm_enabled = !ret;
+ ret = 0;
+ op_perf_start();
+ op_arm_enabled = 1;
}
mutex_unlock(&op_arm_mutex);
return ret;
@@ -71,113 +222,205 @@ static void op_arm_stop(void)
{
mutex_lock(&op_arm_mutex);
if (op_arm_enabled)
- op_arm_model->stop();
+ op_perf_stop();
op_arm_enabled = 0;
mutex_unlock(&op_arm_mutex);
}
#ifdef CONFIG_PM
-static int op_arm_suspend(struct sys_device *dev, pm_message_t state)
+static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
{
mutex_lock(&op_arm_mutex);
if (op_arm_enabled)
- op_arm_model->stop();
+ op_perf_stop();
mutex_unlock(&op_arm_mutex);
return 0;
}
-static int op_arm_resume(struct sys_device *dev)
+static int op_arm_resume(struct platform_device *dev)
{
mutex_lock(&op_arm_mutex);
- if (op_arm_enabled && op_arm_model->start())
+ if (op_arm_enabled && op_perf_start())
op_arm_enabled = 0;
mutex_unlock(&op_arm_mutex);
return 0;
}
-static struct sysdev_class oprofile_sysclass = {
- .name = "oprofile",
+static struct platform_driver oprofile_driver = {
+ .driver = {
+ .name = "arm-oprofile",
+ },
.resume = op_arm_resume,
.suspend = op_arm_suspend,
};
-static struct sys_device device_oprofile = {
- .id = 0,
- .cls = &oprofile_sysclass,
-};
+static struct platform_device *oprofile_pdev;
static int __init init_driverfs(void)
{
int ret;
- if (!(ret = sysdev_class_register(&oprofile_sysclass)))
- ret = sysdev_register(&device_oprofile);
+ ret = platform_driver_register(&oprofile_driver);
+ if (ret)
+ goto out;
+ oprofile_pdev = platform_device_register_simple(
+ oprofile_driver.driver.name, 0, NULL, 0);
+ if (IS_ERR(oprofile_pdev)) {
+ ret = PTR_ERR(oprofile_pdev);
+ platform_driver_unregister(&oprofile_driver);
+ }
+
+out:
return ret;
}
static void exit_driverfs(void)
{
- sysdev_unregister(&device_oprofile);
- sysdev_class_unregister(&oprofile_sysclass);
+ platform_device_unregister(oprofile_pdev);
+ platform_driver_unregister(&oprofile_driver);
}
#else
-#define init_driverfs() do { } while (0)
+static int __init init_driverfs(void) { return 0; }
#define exit_driverfs() do { } while (0)
#endif /* CONFIG_PM */
-int __init oprofile_arch_init(struct oprofile_operations *ops)
+static int report_trace(struct stackframe *frame, void *d)
{
- struct op_arm_model_spec *spec = NULL;
- int ret = -ENODEV;
+ unsigned int *depth = d;
- ops->backtrace = arm_backtrace;
+ if (*depth) {
+ oprofile_add_trace(frame->pc);
+ (*depth)--;
+ }
-#ifdef CONFIG_CPU_XSCALE
- spec = &op_xscale_spec;
-#endif
+ return *depth == 0;
+}
-#ifdef CONFIG_OPROFILE_ARMV6
- spec = &op_armv6_spec;
-#endif
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct frame_tail *)(xxx->fp)-1
+ */
+struct frame_tail {
+ struct frame_tail *fp;
+ unsigned long sp;
+ unsigned long lr;
+} __attribute__((packed));
-#ifdef CONFIG_OPROFILE_MPCORE
- spec = &op_mpcore_spec;
-#endif
+static struct frame_tail* user_backtrace(struct frame_tail *tail)
+{
+ struct frame_tail buftail[2];
-#ifdef CONFIG_OPROFILE_ARMV7
- spec = &op_armv7_spec;
-#endif
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+ if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
+ return NULL;
- if (spec) {
- ret = spec->init();
- if (ret < 0)
- return ret;
+ oprofile_add_trace(buftail[0].lr);
- counter_config = kcalloc(spec->num_counters, sizeof(struct op_counter_config),
- GFP_KERNEL);
- if (!counter_config)
- return -ENOMEM;
+ /* frame pointers should strictly progress back up the stack
+ * (towards higher addresses) */
+ if (tail >= buftail[0].fp)
+ return NULL;
- op_arm_model = spec;
- init_driverfs();
- ops->create_files = op_arm_create_files;
- ops->setup = op_arm_setup;
- ops->shutdown = op_arm_stop;
- ops->start = op_arm_start;
- ops->stop = op_arm_stop;
- ops->cpu_type = op_arm_model->name;
- printk(KERN_INFO "oprofile: using %s\n", spec->name);
+ return buftail[0].fp-1;
+}
+
+static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
+{
+ struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+
+ if (!user_mode(regs)) {
+ struct stackframe frame;
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+ walk_stackframe(&frame, report_trace, &depth);
+ return;
}
+ while (depth-- && tail && !((unsigned long) tail & 3))
+ tail = user_backtrace(tail);
+}
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ int cpu, ret = 0;
+
+ perf_num_counters = armpmu_get_max_events();
+
+ counter_config = kcalloc(perf_num_counters,
+ sizeof(struct op_counter_config), GFP_KERNEL);
+
+ if (!counter_config) {
+ pr_info("oprofile: failed to allocate %d "
+ "counters\n", perf_num_counters);
+ return -ENOMEM;
+ }
+
+ ret = init_driverfs();
+ if (ret) {
+ kfree(counter_config);
+ return ret;
+ }
+
+ for_each_possible_cpu(cpu) {
+ perf_events[cpu] = kcalloc(perf_num_counters,
+ sizeof(struct perf_event *), GFP_KERNEL);
+ if (!perf_events[cpu]) {
+ pr_info("oprofile: failed to allocate %d perf events "
+ "for cpu %d\n", perf_num_counters, cpu);
+ while (--cpu >= 0)
+ kfree(perf_events[cpu]);
+ return -ENOMEM;
+ }
+ }
+
+ ops->backtrace = arm_backtrace;
+ ops->create_files = op_arm_create_files;
+ ops->setup = op_arm_setup;
+ ops->start = op_arm_start;
+ ops->stop = op_arm_stop;
+ ops->shutdown = op_arm_stop;
+ ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
+
+ if (!ops->cpu_type)
+ ret = -ENODEV;
+ else
+ pr_info("oprofile: using %s\n", ops->cpu_type);
+
return ret;
}
void oprofile_arch_exit(void)
{
- if (op_arm_model) {
+ int cpu, id;
+ struct perf_event *event;
+
+ if (*perf_events) {
exit_driverfs();
- op_arm_model = NULL;
+ for_each_possible_cpu(cpu) {
+ for (id = 0; id < perf_num_counters; ++id) {
+ event = perf_events[cpu][id];
+ if (event != NULL)
+ perf_event_release_kernel(event);
+ }
+ kfree(perf_events[cpu]);
+ }
}
- kfree(counter_config);
+
+ if (counter_config)
+ kfree(counter_config);
+}
+#else
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ pr_info("oprofile: hardware counters not available\n");
+ return -ENODEV;
}
+void oprofile_arch_exit(void) {}
+#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/oprofile/op_arm_model.h b/arch/arm/oprofile/op_arm_model.h
deleted file mode 100644
index 8c4e4f6a1de3..000000000000
--- a/arch/arm/oprofile/op_arm_model.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * @file op_arm_model.h
- * interface to ARM machine specific operations
- *
- * @remark Copyright 2004 Oprofile Authors
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-
-#ifndef OP_ARM_MODEL_H
-#define OP_ARM_MODEL_H
-
-struct op_arm_model_spec {
- int (*init)(void);
- unsigned int num_counters;
- int (*setup_ctrs)(void);
- int (*start)(void);
- void (*stop)(void);
- char *name;
-};
-
-#ifdef CONFIG_CPU_XSCALE
-extern struct op_arm_model_spec op_xscale_spec;
-#endif
-
-extern struct op_arm_model_spec op_armv6_spec;
-extern struct op_arm_model_spec op_mpcore_spec;
-extern struct op_arm_model_spec op_armv7_spec;
-
-extern void arm_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-extern int __init op_arm_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec);
-extern void op_arm_exit(void);
-#endif /* OP_ARM_MODEL_H */
diff --git a/arch/arm/oprofile/op_counter.h b/arch/arm/oprofile/op_counter.h
deleted file mode 100644
index ca942a63b52f..000000000000
--- a/arch/arm/oprofile/op_counter.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * @file op_counter.h
- *
- * @remark Copyright 2004 Oprofile Authors
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-
-#ifndef OP_COUNTER_H
-#define OP_COUNTER_H
-
-/* Per performance monitor configuration as set via
- * oprofilefs.
- */
-struct op_counter_config {
- unsigned long count;
- unsigned long enabled;
- unsigned long event;
- unsigned long unit_mask;
- unsigned long kernel;
- unsigned long user;
-};
-
-extern struct op_counter_config *counter_config;
-
-#endif /* OP_COUNTER_H */
diff --git a/arch/arm/oprofile/op_model_arm11_core.c b/arch/arm/oprofile/op_model_arm11_core.c
deleted file mode 100644
index ef3e2653b90c..000000000000
--- a/arch/arm/oprofile/op_model_arm11_core.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * @file op_model_arm11_core.c
- * ARM11 Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/smp.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-
-/*
- * ARM11 PMU support
- */
-static inline void arm11_write_pmnc(u32 val)
-{
- /* upper 4bits and 7, 11 are write-as-0 */
- val &= 0x0ffff77f;
- asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val));
-}
-
-static inline u32 arm11_read_pmnc(void)
-{
- u32 val;
- asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
- return val;
-}
-
-static void arm11_reset_counter(unsigned int cnt)
-{
- u32 val = -(u32)counter_config[CPU_COUNTER(smp_processor_id(), cnt)].count;
- switch (cnt) {
- case CCNT:
- asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
- break;
-
- case PMN0:
- asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (val));
- break;
-
- case PMN1:
- asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (val));
- break;
- }
-}
-
-int arm11_setup_pmu(void)
-{
- unsigned int cnt;
- u32 pmnc;
-
- if (arm11_read_pmnc() & PMCR_E) {
- printk(KERN_ERR "oprofile: CPU%u PMU still enabled when setup new event counter.\n", smp_processor_id());
- return -EBUSY;
- }
-
- /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
- arm11_write_pmnc(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
- PMCR_C | PMCR_P);
-
- for (pmnc = 0, cnt = PMN0; cnt <= CCNT; cnt++) {
- unsigned long event;
-
- if (!counter_config[CPU_COUNTER(smp_processor_id(), cnt)].enabled)
- continue;
-
- event = counter_config[CPU_COUNTER(smp_processor_id(), cnt)].event & 255;
-
- /*
- * Set event (if destined for PMNx counters)
- */
- if (cnt == PMN0) {
- pmnc |= event << 20;
- } else if (cnt == PMN1) {
- pmnc |= event << 12;
- }
-
- /*
- * We don't need to set the event if it's a cycle count
- * Enable interrupt for this counter
- */
- pmnc |= PMCR_IEN_PMN0 << cnt;
- arm11_reset_counter(cnt);
- }
- arm11_write_pmnc(pmnc);
-
- return 0;
-}
-
-int arm11_start_pmu(void)
-{
- arm11_write_pmnc(arm11_read_pmnc() | PMCR_E);
- return 0;
-}
-
-int arm11_stop_pmu(void)
-{
- unsigned int cnt;
-
- arm11_write_pmnc(arm11_read_pmnc() & ~PMCR_E);
-
- for (cnt = PMN0; cnt <= CCNT; cnt++)
- arm11_reset_counter(cnt);
-
- return 0;
-}
-
-/*
- * CPU counters' IRQ handler (one IRQ per CPU)
- */
-static irqreturn_t arm11_pmu_interrupt(int irq, void *arg)
-{
- struct pt_regs *regs = get_irq_regs();
- unsigned int cnt;
- u32 pmnc;
-
- pmnc = arm11_read_pmnc();
-
- for (cnt = PMN0; cnt <= CCNT; cnt++) {
- if ((pmnc & (PMCR_OFL_PMN0 << cnt)) && (pmnc & (PMCR_IEN_PMN0 << cnt))) {
- arm11_reset_counter(cnt);
- oprofile_add_sample(regs, CPU_COUNTER(smp_processor_id(), cnt));
- }
- }
- /* Clear counter flag(s) */
- arm11_write_pmnc(pmnc);
- return IRQ_HANDLED;
-}
-
-int arm11_request_interrupts(const int *irqs, int nr)
-{
- unsigned int i;
- int ret = 0;
-
- for(i = 0; i < nr; i++) {
- ret = request_irq(irqs[i], arm11_pmu_interrupt, IRQF_DISABLED, "CP15 PMU", NULL);
- if (ret != 0) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u for MPCORE-EM\n",
- irqs[i]);
- break;
- }
- }
-
- if (i != nr)
- while (i-- != 0)
- free_irq(irqs[i], NULL);
-
- return ret;
-}
-
-void arm11_release_interrupts(const int *irqs, int nr)
-{
- unsigned int i;
-
- for (i = 0; i < nr; i++)
- free_irq(irqs[i], NULL);
-}
diff --git a/arch/arm/oprofile/op_model_arm11_core.h b/arch/arm/oprofile/op_model_arm11_core.h
deleted file mode 100644
index 1902b99d9dfd..000000000000
--- a/arch/arm/oprofile/op_model_arm11_core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * @file op_model_arm11_core.h
- * ARM11 Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-#ifndef OP_MODEL_ARM11_CORE_H
-#define OP_MODEL_ARM11_CORE_H
-
-/*
- * Per-CPU PMCR
- */
-#define PMCR_E (1 << 0) /* Enable */
-#define PMCR_P (1 << 1) /* Count reset */
-#define PMCR_C (1 << 2) /* Cycle counter reset */
-#define PMCR_D (1 << 3) /* Cycle counter counts every 64th cpu cycle */
-#define PMCR_IEN_PMN0 (1 << 4) /* Interrupt enable count reg 0 */
-#define PMCR_IEN_PMN1 (1 << 5) /* Interrupt enable count reg 1 */
-#define PMCR_IEN_CCNT (1 << 6) /* Interrupt enable cycle counter */
-#define PMCR_OFL_PMN0 (1 << 8) /* Count reg 0 overflow */
-#define PMCR_OFL_PMN1 (1 << 9) /* Count reg 1 overflow */
-#define PMCR_OFL_CCNT (1 << 10) /* Cycle counter overflow */
-
-#define PMN0 0
-#define PMN1 1
-#define CCNT 2
-
-#define CPU_COUNTER(cpu, counter) ((cpu) * 3 + (counter))
-
-int arm11_setup_pmu(void);
-int arm11_start_pmu(void);
-int arm11_stop_pmu(void);
-int arm11_request_interrupts(const int *, int);
-void arm11_release_interrupts(const int *, int);
-
-#endif
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
deleted file mode 100644
index f73ce875a395..000000000000
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/**
- * @file op_model_mpcore.c
- * MPCORE Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- *
- * Counters:
- * 0: PMN0 on CPU0, per-cpu configurable event counter
- * 1: PMN1 on CPU0, per-cpu configurable event counter
- * 2: CCNT on CPU0
- * 3: PMN0 on CPU1
- * 4: PMN1 on CPU1
- * 5: CCNT on CPU1
- * 6: PMN0 on CPU1
- * 7: PMN1 on CPU1
- * 8: CCNT on CPU1
- * 9: PMN0 on CPU1
- * 10: PMN1 on CPU1
- * 11: CCNT on CPU1
- * 12-19: configurable SCU event counters
- */
-
-/* #define DEBUG */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/board-eb.h>
-#include <asm/system.h>
-#include <asm/pmu.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-#include "op_model_mpcore.h"
-
-/*
- * MPCore SCU event monitor support
- */
-#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_EB11MP_SCU_BASE + 0x10)
-
-/*
- * Bitmask of used SCU counters
- */
-static unsigned int scu_em_used;
-static const struct pmu_irqs *pmu_irqs;
-
-/*
- * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number)
- */
-static inline void scu_reset_counter(struct eventmonitor __iomem *emc, unsigned int n)
-{
- writel(-(u32)counter_config[SCU_COUNTER(n)].count, &emc->MC[n]);
-}
-
-static inline void scu_set_event(struct eventmonitor __iomem *emc, unsigned int n, u32 event)
-{
- event &= 0xff;
- writeb(event, &emc->MCEB[n]);
-}
-
-/*
- * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
- */
-static irqreturn_t scu_em_interrupt(int irq, void *arg)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int cnt;
-
- cnt = irq - IRQ_EB11MP_PMU_SCU0;
- oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt));
- scu_reset_counter(emc, cnt);
-
- /* Clear overflow flag for this counter */
- writel(1 << (cnt + 16), &emc->PMCR);
-
- return IRQ_HANDLED;
-}
-
-/* Configure just the SCU counters that the user has requested */
-static void scu_setup(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int i;
-
- scu_em_used = 0;
-
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (counter_config[SCU_COUNTER(i)].enabled &&
- counter_config[SCU_COUNTER(i)].event) {
- scu_set_event(emc, i, 0); /* disable counter for now */
- scu_em_used |= 1 << i;
- }
- }
-}
-
-static int scu_start(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int temp, i;
- unsigned long event;
- int ret = 0;
-
- /*
- * request the SCU counter interrupts that we need
- */
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- ret = request_irq(IRQ_EB11MP_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED, "SCU PMU", NULL);
- if (ret) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u for SCU Event Monitor\n",
- IRQ_EB11MP_PMU_SCU0 + i);
- goto err_free_scu;
- }
- }
- }
-
- /*
- * clear overflow and enable interrupt for all used counters
- */
- temp = readl(&emc->PMCR);
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- scu_reset_counter(emc, i);
- event = counter_config[SCU_COUNTER(i)].event;
- scu_set_event(emc, i, event);
-
- /* clear overflow/interrupt */
- temp |= 1 << (i + 16);
- /* enable interrupt*/
- temp |= 1 << (i + 8);
- }
- }
-
- /* Enable all 8 counters */
- temp |= PMCR_E;
- writel(temp, &emc->PMCR);
-
- return 0;
-
- err_free_scu:
- while (i--)
- free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
- return ret;
-}
-
-static void scu_stop(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int temp, i;
-
- /* Disable counter interrupts */
- /* Don't disable all 8 counters (with the E bit) as they may be in use */
- temp = readl(&emc->PMCR);
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i))
- temp &= ~(1 << (i + 8));
- }
- writel(temp, &emc->PMCR);
-
- /* Free counter interrupts and reset counters */
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- scu_reset_counter(emc, i);
- free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
- }
- }
-}
-
-struct em_function_data {
- int (*fn)(void);
- int ret;
-};
-
-static void em_func(void *data)
-{
- struct em_function_data *d = data;
- int ret = d->fn();
- if (ret)
- d->ret = ret;
-}
-
-static int em_call_function(int (*fn)(void))
-{
- struct em_function_data data;
-
- data.fn = fn;
- data.ret = 0;
-
- preempt_disable();
- smp_call_function(em_func, &data, 1);
- em_func(&data);
- preempt_enable();
-
- return data.ret;
-}
-
-/*
- * Glue to stick the individual ARM11 PMUs and the SCU
- * into the oprofile framework.
- */
-static int em_setup_ctrs(void)
-{
- int ret;
-
- /* Configure CPU counters by cross-calling to the other CPUs */
- ret = em_call_function(arm11_setup_pmu);
- if (ret == 0)
- scu_setup();
-
- return 0;
-}
-
-static int em_start(void)
-{
- int ret;
-
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs)) {
- ret = PTR_ERR(pmu_irqs);
- goto out;
- }
-
- ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- if (ret == 0) {
- em_call_function(arm11_start_pmu);
-
- ret = scu_start();
- if (ret) {
- arm11_release_interrupts(pmu_irqs->irqs,
- pmu_irqs->num_irqs);
- } else {
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
- }
- }
-
-out:
- return ret;
-}
-
-static void em_stop(void)
-{
- em_call_function(arm11_stop_pmu);
- arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- scu_stop();
- release_pmu(pmu_irqs);
-}
-
-/*
- * Why isn't there a function to route an IRQ to a specific CPU in
- * genirq?
- */
-static void em_route_irq(int irq, unsigned int cpu)
-{
- struct irq_desc *desc = irq_desc + irq;
- const struct cpumask *mask = cpumask_of(cpu);
-
- spin_lock_irq(&desc->lock);
- cpumask_copy(desc->affinity, mask);
- desc->chip->set_affinity(irq, mask);
- spin_unlock_irq(&desc->lock);
-}
-
-static int em_setup(void)
-{
- /*
- * Send SCU PMU interrupts to the "owner" CPU.
- */
- em_route_irq(IRQ_EB11MP_PMU_SCU0, 0);
- em_route_irq(IRQ_EB11MP_PMU_SCU1, 0);
- em_route_irq(IRQ_EB11MP_PMU_SCU2, 1);
- em_route_irq(IRQ_EB11MP_PMU_SCU3, 1);
- em_route_irq(IRQ_EB11MP_PMU_SCU4, 2);
- em_route_irq(IRQ_EB11MP_PMU_SCU5, 2);
- em_route_irq(IRQ_EB11MP_PMU_SCU6, 3);
- em_route_irq(IRQ_EB11MP_PMU_SCU7, 3);
-
- return init_pmu();
-}
-
-struct op_arm_model_spec op_mpcore_spec = {
- .init = em_setup,
- .num_counters = MPCORE_NUM_COUNTERS,
- .setup_ctrs = em_setup_ctrs,
- .start = em_start,
- .stop = em_stop,
- .name = "arm/mpcore",
-};
diff --git a/arch/arm/oprofile/op_model_mpcore.h b/arch/arm/oprofile/op_model_mpcore.h
deleted file mode 100644
index 73d811023688..000000000000
--- a/arch/arm/oprofile/op_model_mpcore.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * @file op_model_mpcore.c
- * MPCORE Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-#ifndef OP_MODEL_MPCORE_H
-#define OP_MODEL_MPCORE_H
-
-struct eventmonitor {
- unsigned long PMCR;
- unsigned char MCEB[8];
- unsigned long MC[8];
-};
-
-/*
- * List of userspace counter numbers: note that the structure is important.
- * The code relies on CPUn's counters being CPU0's counters + 3n
- * and on CPU0's counters starting at 0
- */
-
-#define COUNTER_CPU0_PMN0 0
-#define COUNTER_CPU0_PMN1 1
-#define COUNTER_CPU0_CCNT 2
-
-#define COUNTER_CPU1_PMN0 3
-#define COUNTER_CPU1_PMN1 4
-#define COUNTER_CPU1_CCNT 5
-
-#define COUNTER_CPU2_PMN0 6
-#define COUNTER_CPU2_PMN1 7
-#define COUNTER_CPU2_CCNT 8
-
-#define COUNTER_CPU3_PMN0 9
-#define COUNTER_CPU3_PMN1 10
-#define COUNTER_CPU3_CCNT 11
-
-#define COUNTER_SCU_MN0 12
-#define COUNTER_SCU_MN1 13
-#define COUNTER_SCU_MN2 14
-#define COUNTER_SCU_MN3 15
-#define COUNTER_SCU_MN4 16
-#define COUNTER_SCU_MN5 17
-#define COUNTER_SCU_MN6 18
-#define COUNTER_SCU_MN7 19
-#define NUM_SCU_COUNTERS 8
-
-#define SCU_COUNTER(number) ((number) + COUNTER_SCU_MN0)
-
-#define MPCORE_NUM_COUNTERS SCU_COUNTER(NUM_SCU_COUNTERS)
-
-#endif
diff --git a/arch/arm/oprofile/op_model_v6.c b/arch/arm/oprofile/op_model_v6.c
deleted file mode 100644
index a22357a2fd08..000000000000
--- a/arch/arm/oprofile/op_model_v6.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * @file op_model_v6.c
- * ARM11 Performance Monitor Driver
- *
- * Based on op_model_xscale.c
- *
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 OProfile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Tony Lindgren <tony@atomide.com>
- */
-
-/* #define DEBUG */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/pmu.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-
-static const struct pmu_irqs *pmu_irqs;
-
-static void armv6_pmu_stop(void)
-{
- arm11_stop_pmu();
- arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
-}
-
-static int armv6_pmu_start(void)
-{
- int ret;
-
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs)) {
- ret = PTR_ERR(pmu_irqs);
- goto out;
- }
-
- ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- if (ret >= 0) {
- ret = arm11_start_pmu();
- } else {
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
- }
-
-out:
- return ret;
-}
-
-static int armv6_detect_pmu(void)
-{
- return 0;
-}
-
-struct op_arm_model_spec op_armv6_spec = {
- .init = armv6_detect_pmu,
- .num_counters = 3,
- .setup_ctrs = arm11_setup_pmu,
- .start = armv6_pmu_start,
- .stop = armv6_pmu_stop,
- .name = "arm/armv6",
-};
diff --git a/arch/arm/oprofile/op_model_v7.c b/arch/arm/oprofile/op_model_v7.c
deleted file mode 100644
index 8642d0891ae1..000000000000
--- a/arch/arm/oprofile/op_model_v7.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/**
- * op_model_v7.c
- * ARM V7 (Cortex A8) Event Monitor Driver
- *
- * Copyright 2008 Jean Pihet <jpihet@mvista.com>
- * Copyright 2004 ARM SMP Development Team
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/smp.h>
-
-#include <asm/pmu.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_v7.h"
-
-/* #define DEBUG */
-
-
-/*
- * ARM V7 PMNC support
- */
-
-static u32 cnt_en[CNTMAX];
-
-static inline void armv7_pmnc_write(u32 val)
-{
- val &= PMNC_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
-}
-
-static inline u32 armv7_pmnc_read(void)
-{
- u32 val;
-
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- return val;
-}
-
-static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = CNTENS_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= CNTENS_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u disabling wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = CNTENC_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= CNTENC_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_enable_intens(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
- " interrupt enable %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = INTENS_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= INTENS_MASK;
- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_getreset_flags(void)
-{
- u32 val;
-
- /* Read */
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
-
- /* Write to clear flags */
- val &= FLAG_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
-
- return val;
-}
-
-static inline int armv7_pmnc_select_counter(unsigned int cnt)
-{
- u32 val;
-
- if ((cnt == CCNT) || (cnt >= CNTMAX)) {
- printk(KERN_ERR "oprofile: CPU%u selecting wrong PMNC counteri"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- val = (cnt - CNT0) & SELECT_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
-
- return cnt;
-}
-
-static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
-{
- if (armv7_pmnc_select_counter(cnt) == cnt) {
- val &= EVTSEL_MASK;
- asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
- }
-}
-
-static void armv7_pmnc_reset_counter(unsigned int cnt)
-{
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
- u32 val = -(u32)counter_config[cpu_cnt].count;
-
- switch (cnt) {
- case CCNT:
- armv7_pmnc_disable_counter(cnt);
-
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
-
- if (cnt_en[cnt] != 0)
- armv7_pmnc_enable_counter(cnt);
-
- break;
-
- case CNT0:
- case CNT1:
- case CNT2:
- case CNT3:
- armv7_pmnc_disable_counter(cnt);
-
- if (armv7_pmnc_select_counter(cnt) == cnt)
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
-
- if (cnt_en[cnt] != 0)
- armv7_pmnc_enable_counter(cnt);
-
- break;
-
- default:
- printk(KERN_ERR "oprofile: CPU%u resetting wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- break;
- }
-}
-
-int armv7_setup_pmnc(void)
-{
- unsigned int cnt;
-
- if (armv7_pmnc_read() & PMNC_E) {
- printk(KERN_ERR "oprofile: CPU%u PMNC still enabled when setup"
- " new event counter.\n", smp_processor_id());
- return -EBUSY;
- }
-
- /* Initialize & Reset PMNC: C bit and P bit */
- armv7_pmnc_write(PMNC_P | PMNC_C);
-
-
- for (cnt = CCNT; cnt < CNTMAX; cnt++) {
- unsigned long event;
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(cnt);
- cnt_en[cnt] = 0;
-
- if (!counter_config[cpu_cnt].enabled)
- continue;
-
- event = counter_config[cpu_cnt].event & 255;
-
- /*
- * Set event (if destined for PMNx counters)
- * We don't need to set the event if it's a cycle count
- */
- if (cnt != CCNT)
- armv7_pmnc_write_evtsel(cnt, event);
-
- /*
- * Enable interrupt for this counter
- */
- armv7_pmnc_enable_intens(cnt);
-
- /*
- * Reset counter
- */
- armv7_pmnc_reset_counter(cnt);
-
- /*
- * Enable counter
- */
- armv7_pmnc_enable_counter(cnt);
- cnt_en[cnt] = 1;
- }
-
- return 0;
-}
-
-static inline void armv7_start_pmnc(void)
-{
- armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
-}
-
-static inline void armv7_stop_pmnc(void)
-{
- armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
-}
-
-/*
- * CPU counters' IRQ handler (one IRQ per CPU)
- */
-static irqreturn_t armv7_pmnc_interrupt(int irq, void *arg)
-{
- struct pt_regs *regs = get_irq_regs();
- unsigned int cnt;
- u32 flags;
-
-
- /*
- * Stop IRQ generation
- */
- armv7_stop_pmnc();
-
- /*
- * Get and reset overflow status flags
- */
- flags = armv7_pmnc_getreset_flags();
-
- /*
- * Cycle counter
- */
- if (flags & FLAG_C) {
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), CCNT);
- armv7_pmnc_reset_counter(CCNT);
- oprofile_add_sample(regs, cpu_cnt);
- }
-
- /*
- * PMNC counters 0:3
- */
- for (cnt = CNT0; cnt < CNTMAX; cnt++) {
- if (flags & (1 << (cnt - CNT0))) {
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
- armv7_pmnc_reset_counter(cnt);
- oprofile_add_sample(regs, cpu_cnt);
- }
- }
-
- /*
- * Allow IRQ generation
- */
- armv7_start_pmnc();
-
- return IRQ_HANDLED;
-}
-
-int armv7_request_interrupts(const int *irqs, int nr)
-{
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < nr; i++) {
- ret = request_irq(irqs[i], armv7_pmnc_interrupt,
- IRQF_DISABLED, "CP15 PMNC", NULL);
- if (ret != 0) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u"
- " for ARMv7\n",
- irqs[i]);
- break;
- }
- }
-
- if (i != nr)
- while (i-- != 0)
- free_irq(irqs[i], NULL);
-
- return ret;
-}
-
-void armv7_release_interrupts(const int *irqs, int nr)
-{
- unsigned int i;
-
- for (i = 0; i < nr; i++)
- free_irq(irqs[i], NULL);
-}
-
-#ifdef DEBUG
-static void armv7_pmnc_dump_regs(void)
-{
- u32 val;
- unsigned int cnt;
-
- printk(KERN_INFO "PMNC registers dump:\n");
-
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- printk(KERN_INFO "PMNC =0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
- printk(KERN_INFO "CNTENS=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
- printk(KERN_INFO "INTENS=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
- printk(KERN_INFO "FLAGS =0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
- printk(KERN_INFO "SELECT=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
- printk(KERN_INFO "CCNT =0x%08x\n", val);
-
- for (cnt = CNT0; cnt < CNTMAX; cnt++) {
- armv7_pmnc_select_counter(cnt);
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
- printk(KERN_INFO "CNT[%d] count =0x%08x\n", cnt-CNT0, val);
- asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
- printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", cnt-CNT0, val);
- }
-}
-#endif
-
-static const struct pmu_irqs *pmu_irqs;
-
-static void armv7_pmnc_stop(void)
-{
-#ifdef DEBUG
- armv7_pmnc_dump_regs();
-#endif
- armv7_stop_pmnc();
- armv7_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
-}
-
-static int armv7_pmnc_start(void)
-{
- int ret;
-
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs))
- return PTR_ERR(pmu_irqs);
-
-#ifdef DEBUG
- armv7_pmnc_dump_regs();
-#endif
- ret = armv7_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- if (ret >= 0) {
- armv7_start_pmnc();
- } else {
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
- }
-
- return ret;
-}
-
-static int armv7_detect_pmnc(void)
-{
- return 0;
-}
-
-struct op_arm_model_spec op_armv7_spec = {
- .init = armv7_detect_pmnc,
- .num_counters = 5,
- .setup_ctrs = armv7_setup_pmnc,
- .start = armv7_pmnc_start,
- .stop = armv7_pmnc_stop,
- .name = "arm/armv7",
-};
diff --git a/arch/arm/oprofile/op_model_v7.h b/arch/arm/oprofile/op_model_v7.h
deleted file mode 100644
index 9ca334b39c75..000000000000
--- a/arch/arm/oprofile/op_model_v7.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * op_model_v7.h
- * ARM v7 (Cortex A8) Event Monitor Driver
- *
- * Copyright 2008 Jean Pihet <jpihet@mvista.com>
- * Copyright 2004 ARM SMP Development Team
- * Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * Copyright 2000-2004 MontaVista Software Inc
- * Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * Copyright 2004 Intel Corporation
- * Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * Copyright 2004 Oprofile Authors
- *
- * Read the file COPYING
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef OP_MODEL_V7_H
-#define OP_MODEL_V7_H
-
-/*
- * Per-CPU PMNC: config reg
- */
-#define PMNC_E (1 << 0) /* Enable all counters */
-#define PMNC_P (1 << 1) /* Reset all counters */
-#define PMNC_C (1 << 2) /* Cycle counter reset */
-#define PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define PMNC_X (1 << 4) /* Export to ETM */
-#define PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define PMNC_MASK 0x3f /* Mask for writable bits */
-
-/*
- * Available counters
- */
-#define CCNT 0
-#define CNT0 1
-#define CNT1 2
-#define CNT2 3
-#define CNT3 4
-#define CNTMAX 5
-
-#define CPU_COUNTER(cpu, counter) ((cpu) * CNTMAX + (counter))
-
-/*
- * CNTENS: counters enable reg
- */
-#define CNTENS_P0 (1 << 0)
-#define CNTENS_P1 (1 << 1)
-#define CNTENS_P2 (1 << 2)
-#define CNTENS_P3 (1 << 3)
-#define CNTENS_C (1 << 31)
-#define CNTENS_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * CNTENC: counters disable reg
- */
-#define CNTENC_P0 (1 << 0)
-#define CNTENC_P1 (1 << 1)
-#define CNTENC_P2 (1 << 2)
-#define CNTENC_P3 (1 << 3)
-#define CNTENC_C (1 << 31)
-#define CNTENC_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * INTENS: counters overflow interrupt enable reg
- */
-#define INTENS_P0 (1 << 0)
-#define INTENS_P1 (1 << 1)
-#define INTENS_P2 (1 << 2)
-#define INTENS_P3 (1 << 3)
-#define INTENS_C (1 << 31)
-#define INTENS_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * EVTSEL: Event selection reg
- */
-#define EVTSEL_MASK 0x7f /* Mask for writable bits */
-
-/*
- * SELECT: Counter selection reg
- */
-#define SELECT_MASK 0x1f /* Mask for writable bits */
-
-/*
- * FLAG: counters overflow flag status reg
- */
-#define FLAG_P0 (1 << 0)
-#define FLAG_P1 (1 << 1)
-#define FLAG_P2 (1 << 2)
-#define FLAG_P3 (1 << 3)
-#define FLAG_C (1 << 31)
-#define FLAG_MASK 0x8000000f /* Mask for writable bits */
-
-
-int armv7_setup_pmu(void);
-int armv7_start_pmu(void);
-int armv7_stop_pmu(void);
-int armv7_request_interrupts(const int *, int);
-void armv7_release_interrupts(const int *, int);
-
-#endif
diff --git a/arch/arm/oprofile/op_model_xscale.c b/arch/arm/oprofile/op_model_xscale.c
deleted file mode 100644
index 1d34a02048bd..000000000000
--- a/arch/arm/oprofile/op_model_xscale.c
+++ /dev/null
@@ -1,444 +0,0 @@
-/**
- * @file op_model_xscale.c
- * XScale Performance Monitor Driver
- *
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 OProfile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-
-/* #define DEBUG */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-
-#include <asm/cputype.h>
-#include <asm/pmu.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-
-#define PMU_ENABLE 0x001 /* Enable counters */
-#define PMN_RESET 0x002 /* Reset event counters */
-#define CCNT_RESET 0x004 /* Reset clock counter */
-#define PMU_RESET (CCNT_RESET | PMN_RESET)
-#define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */
-
-/*
- * Different types of events that can be counted by the XScale PMU
- * as used by Oprofile userspace. Here primarily for documentation
- * purposes.
- */
-
-#define EVT_ICACHE_MISS 0x00
-#define EVT_ICACHE_NO_DELIVER 0x01
-#define EVT_DATA_STALL 0x02
-#define EVT_ITLB_MISS 0x03
-#define EVT_DTLB_MISS 0x04
-#define EVT_BRANCH 0x05
-#define EVT_BRANCH_MISS 0x06
-#define EVT_INSTRUCTION 0x07
-#define EVT_DCACHE_FULL_STALL 0x08
-#define EVT_DCACHE_FULL_STALL_CONTIG 0x09
-#define EVT_DCACHE_ACCESS 0x0A
-#define EVT_DCACHE_MISS 0x0B
-#define EVT_DCACE_WRITE_BACK 0x0C
-#define EVT_PC_CHANGED 0x0D
-#define EVT_BCU_REQUEST 0x10
-#define EVT_BCU_FULL 0x11
-#define EVT_BCU_DRAIN 0x12
-#define EVT_BCU_ECC_NO_ELOG 0x14
-#define EVT_BCU_1_BIT_ERR 0x15
-#define EVT_RMW 0x16
-/* EVT_CCNT is not hardware defined */
-#define EVT_CCNT 0xFE
-#define EVT_UNUSED 0xFF
-
-struct pmu_counter {
- volatile unsigned long ovf;
- unsigned long reset_counter;
-};
-
-enum { CCNT, PMN0, PMN1, PMN2, PMN3, MAX_COUNTERS };
-
-static struct pmu_counter results[MAX_COUNTERS];
-
-/*
- * There are two versions of the PMU in current XScale processors
- * with differing register layouts and number of performance counters.
- * e.g. IOP32x is xsc1 whilst IOP33x is xsc2.
- * We detect which register layout to use in xscale_detect_pmu()
- */
-enum { PMU_XSC1, PMU_XSC2 };
-
-struct pmu_type {
- int id;
- char *name;
- int num_counters;
- unsigned int int_enable;
- unsigned int cnt_ovf[MAX_COUNTERS];
- unsigned int int_mask[MAX_COUNTERS];
-};
-
-static struct pmu_type pmu_parms[] = {
- {
- .id = PMU_XSC1,
- .name = "arm/xscale1",
- .num_counters = 3,
- .int_mask = { [PMN0] = 0x10, [PMN1] = 0x20,
- [CCNT] = 0x40 },
- .cnt_ovf = { [CCNT] = 0x400, [PMN0] = 0x100,
- [PMN1] = 0x200},
- },
- {
- .id = PMU_XSC2,
- .name = "arm/xscale2",
- .num_counters = 5,
- .int_mask = { [CCNT] = 0x01, [PMN0] = 0x02,
- [PMN1] = 0x04, [PMN2] = 0x08,
- [PMN3] = 0x10 },
- .cnt_ovf = { [CCNT] = 0x01, [PMN0] = 0x02,
- [PMN1] = 0x04, [PMN2] = 0x08,
- [PMN3] = 0x10 },
- },
-};
-
-static struct pmu_type *pmu;
-
-static void write_pmnc(u32 val)
-{
- if (pmu->id == PMU_XSC1) {
- /* upper 4bits and 7, 11 are write-as-0 */
- val &= 0xffff77f;
- __asm__ __volatile__ ("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
- } else {
- /* bits 4-23 are write-as-0, 24-31 are write ignored */
- val &= 0xf;
- __asm__ __volatile__ ("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
- }
-}
-
-static u32 read_pmnc(void)
-{
- u32 val;
-
- if (pmu->id == PMU_XSC1)
- __asm__ __volatile__ ("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
- else {
- __asm__ __volatile__ ("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
- /* bits 1-2 and 4-23 are read-unpredictable */
- val &= 0xff000009;
- }
-
- return val;
-}
-
-static u32 __xsc1_read_counter(int counter)
-{
- u32 val = 0;
-
- switch (counter) {
- case CCNT:
- __asm__ __volatile__ ("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
- break;
- case PMN0:
- __asm__ __volatile__ ("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
- break;
- case PMN1:
- __asm__ __volatile__ ("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
- break;
- }
- return val;
-}
-
-static u32 __xsc2_read_counter(int counter)
-{
- u32 val = 0;
-
- switch (counter) {
- case CCNT:
- __asm__ __volatile__ ("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
- break;
- case PMN0:
- __asm__ __volatile__ ("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
- break;
- case PMN1:
- __asm__ __volatile__ ("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
- break;
- case PMN2:
- __asm__ __volatile__ ("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
- break;
- case PMN3:
- __asm__ __volatile__ ("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
- break;
- }
- return val;
-}
-
-static u32 read_counter(int counter)
-{
- u32 val;
-
- if (pmu->id == PMU_XSC1)
- val = __xsc1_read_counter(counter);
- else
- val = __xsc2_read_counter(counter);
-
- return val;
-}
-
-static void __xsc1_write_counter(int counter, u32 val)
-{
- switch (counter) {
- case CCNT:
- __asm__ __volatile__ ("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
- break;
- case PMN0:
- __asm__ __volatile__ ("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
- break;
- case PMN1:
- __asm__ __volatile__ ("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
- break;
- }
-}
-
-static void __xsc2_write_counter(int counter, u32 val)
-{
- switch (counter) {
- case CCNT:
- __asm__ __volatile__ ("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
- break;
- case PMN0:
- __asm__ __volatile__ ("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
- break;
- case PMN1:
- __asm__ __volatile__ ("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
- break;
- case PMN2:
- __asm__ __volatile__ ("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
- break;
- case PMN3:
- __asm__ __volatile__ ("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
- break;
- }
-}
-
-static void write_counter(int counter, u32 val)
-{
- if (pmu->id == PMU_XSC1)
- __xsc1_write_counter(counter, val);
- else
- __xsc2_write_counter(counter, val);
-}
-
-static int xscale_setup_ctrs(void)
-{
- u32 evtsel, pmnc;
- int i;
-
- for (i = CCNT; i < MAX_COUNTERS; i++) {
- if (counter_config[i].enabled)
- continue;
-
- counter_config[i].event = EVT_UNUSED;
- }
-
- switch (pmu->id) {
- case PMU_XSC1:
- pmnc = (counter_config[PMN1].event << 20) | (counter_config[PMN0].event << 12);
- pr_debug("xscale_setup_ctrs: pmnc: %#08x\n", pmnc);
- write_pmnc(pmnc);
- break;
-
- case PMU_XSC2:
- evtsel = counter_config[PMN0].event | (counter_config[PMN1].event << 8) |
- (counter_config[PMN2].event << 16) | (counter_config[PMN3].event << 24);
-
- pr_debug("xscale_setup_ctrs: evtsel %#08x\n", evtsel);
- __asm__ __volatile__ ("mcr p14, 0, %0, c8, c1, 0" : : "r" (evtsel));
- break;
- }
-
- for (i = CCNT; i < MAX_COUNTERS; i++) {
- if (counter_config[i].event == EVT_UNUSED) {
- counter_config[i].event = 0;
- pmu->int_enable &= ~pmu->int_mask[i];
- continue;
- }
-
- results[i].reset_counter = counter_config[i].count;
- write_counter(i, -(u32)counter_config[i].count);
- pmu->int_enable |= pmu->int_mask[i];
- pr_debug("xscale_setup_ctrs: counter%d %#08x from %#08lx\n", i,
- read_counter(i), counter_config[i].count);
- }
-
- return 0;
-}
-
-static void inline __xsc1_check_ctrs(void)
-{
- int i;
- u32 pmnc = read_pmnc();
-
- /* NOTE: there's an A stepping errata that states if an overflow */
- /* bit already exists and another occurs, the previous */
- /* Overflow bit gets cleared. There's no workaround. */
- /* Fixed in B stepping or later */
-
- /* Write the value back to clear the overflow flags. Overflow */
- /* flags remain in pmnc for use below */
- write_pmnc(pmnc & ~PMU_ENABLE);
-
- for (i = CCNT; i <= PMN1; i++) {
- if (!(pmu->int_mask[i] & pmu->int_enable))
- continue;
-
- if (pmnc & pmu->cnt_ovf[i])
- results[i].ovf++;
- }
-}
-
-static void inline __xsc2_check_ctrs(void)
-{
- int i;
- u32 flag = 0, pmnc = read_pmnc();
-
- pmnc &= ~PMU_ENABLE;
- write_pmnc(pmnc);
-
- /* read overflow flag register */
- __asm__ __volatile__ ("mrc p14, 0, %0, c5, c1, 0" : "=r" (flag));
-
- for (i = CCNT; i <= PMN3; i++) {
- if (!(pmu->int_mask[i] & pmu->int_enable))
- continue;
-
- if (flag & pmu->cnt_ovf[i])
- results[i].ovf++;
- }
-
- /* writeback clears overflow bits */
- __asm__ __volatile__ ("mcr p14, 0, %0, c5, c1, 0" : : "r" (flag));
-}
-
-static irqreturn_t xscale_pmu_interrupt(int irq, void *arg)
-{
- int i;
- u32 pmnc;
-
- if (pmu->id == PMU_XSC1)
- __xsc1_check_ctrs();
- else
- __xsc2_check_ctrs();
-
- for (i = CCNT; i < MAX_COUNTERS; i++) {
- if (!results[i].ovf)
- continue;
-
- write_counter(i, -(u32)results[i].reset_counter);
- oprofile_add_sample(get_irq_regs(), i);
- results[i].ovf--;
- }
-
- pmnc = read_pmnc() | PMU_ENABLE;
- write_pmnc(pmnc);
-
- return IRQ_HANDLED;
-}
-
-static const struct pmu_irqs *pmu_irqs;
-
-static void xscale_pmu_stop(void)
-{
- u32 pmnc = read_pmnc();
-
- pmnc &= ~PMU_ENABLE;
- write_pmnc(pmnc);
-
- free_irq(pmu_irqs->irqs[0], results);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
-}
-
-static int xscale_pmu_start(void)
-{
- int ret;
- u32 pmnc;
-
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs))
- return PTR_ERR(pmu_irqs);
-
- pmnc = read_pmnc();
-
- ret = request_irq(pmu_irqs->irqs[0], xscale_pmu_interrupt,
- IRQF_DISABLED, "XScale PMU", (void *)results);
-
- if (ret < 0) {
- printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n",
- pmu_irqs->irqs[0]);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
- return ret;
- }
-
- if (pmu->id == PMU_XSC1)
- pmnc |= pmu->int_enable;
- else {
- __asm__ __volatile__ ("mcr p14, 0, %0, c4, c1, 0" : : "r" (pmu->int_enable));
- pmnc &= ~PMU_CNT64;
- }
-
- pmnc |= PMU_ENABLE;
- write_pmnc(pmnc);
- pr_debug("xscale_pmu_start: pmnc: %#08x mask: %08x\n", pmnc, pmu->int_enable);
- return 0;
-}
-
-static int xscale_detect_pmu(void)
-{
- int ret = 0;
- u32 id;
-
- id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
-
- switch (id) {
- case 1:
- pmu = &pmu_parms[PMU_XSC1];
- break;
- case 2:
- pmu = &pmu_parms[PMU_XSC2];
- break;
- default:
- ret = -ENODEV;
- break;
- }
-
- if (!ret) {
- op_xscale_spec.name = pmu->name;
- op_xscale_spec.num_counters = pmu->num_counters;
- pr_debug("xscale_detect_pmu: detected %s PMU\n", pmu->name);
- }
-
- return ret;
-}
-
-struct op_arm_model_spec op_xscale_spec = {
- .init = xscale_detect_pmu,
- .setup_ctrs = xscale_setup_ctrs,
- .start = xscale_pmu_start,
- .stop = xscale_pmu_stop,
-};
-
diff --git a/arch/arm/plat-iop/Makefile b/arch/arm/plat-iop/Makefile
index 36bff0325959..69b09c1cec8b 100644
--- a/arch/arm/plat-iop/Makefile
+++ b/arch/arm/plat-iop/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_ARCH_IOP32X) += time.o
obj-$(CONFIG_ARCH_IOP32X) += io.o
obj-$(CONFIG_ARCH_IOP32X) += cp6.o
obj-$(CONFIG_ARCH_IOP32X) += adma.o
+obj-$(CONFIG_ARCH_IOP32X) += pmu.o
# IOP33X
obj-$(CONFIG_ARCH_IOP33X) += gpio.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_ARCH_IOP33X) += time.o
obj-$(CONFIG_ARCH_IOP33X) += io.o
obj-$(CONFIG_ARCH_IOP33X) += cp6.o
obj-$(CONFIG_ARCH_IOP33X) += adma.o
+obj-$(CONFIG_ARCH_IOP33X) += pmu.o
# IOP13XX
obj-$(CONFIG_ARCH_IOP13XX) += cp6.o
diff --git a/arch/arm/plat-iop/pmu.c b/arch/arm/plat-iop/pmu.c
new file mode 100644
index 000000000000..a2024b8685a1
--- /dev/null
+++ b/arch/arm/plat-iop/pmu.c
@@ -0,0 +1,40 @@
+/*
+ * PMU IRQ registration for the iop3xx xscale PMU families.
+ * Copyright (C) 2010 Will Deacon, ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <asm/pmu.h>
+#include <mach/irqs.h>
+
+static struct resource pmu_resource = {
+#ifdef CONFIG_ARCH_IOP32X
+ .start = IRQ_IOP32X_CORE_PMU,
+ .end = IRQ_IOP32X_CORE_PMU,
+#endif
+#ifdef CONFIG_ARCH_IOP33X
+ .start = IRQ_IOP33X_CORE_PMU,
+ .end = IRQ_IOP33X_CORE_PMU,
+#endif
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .resource = &pmu_resource,
+ .num_resources = 1,
+};
+
+static int __init iop3xx_pmu_init(void)
+{
+ platform_device_register(&pmu_device);
+ return 0;
+}
+
+arch_initcall(iop3xx_pmu_init);
diff --git a/arch/arm/plat-mxc/ehci.c b/arch/arm/plat-mxc/ehci.c
index cb0b63874482..2a8646173c2f 100644
--- a/arch/arm/plat-mxc/ehci.c
+++ b/arch/arm/plat-mxc/ehci.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -50,7 +51,26 @@
#define MX35_H1_TLL_BIT (1 << 5)
#define MX35_H1_USBTE_BIT (1 << 4)
-int mxc_set_usbcontrol(int port, unsigned int flags)
+#define MXC_OTG_OFFSET 0
+#define MXC_H1_OFFSET 0x200
+
+/* USB_CTRL */
+#define MXC_OTG_UCTRL_OWIE_BIT (1 << 27) /* OTG wakeup intr enable */
+#define MXC_OTG_UCTRL_OPM_BIT (1 << 24) /* OTG power mask */
+#define MXC_H1_UCTRL_H1UIE_BIT (1 << 12) /* Host1 ULPI interrupt enable */
+#define MXC_H1_UCTRL_H1WIE_BIT (1 << 11) /* HOST1 wakeup intr enable */
+#define MXC_H1_UCTRL_H1PM_BIT (1 << 8) /* HOST1 power mask */
+
+/* USB_PHY_CTRL_FUNC */
+#define MXC_OTG_PHYCTRL_OC_DIS_BIT (1 << 8) /* OTG Disable Overcurrent Event */
+#define MXC_H1_OC_DIS_BIT (1 << 5) /* UH1 Disable Overcurrent Event */
+
+#define MXC_USBCMD_OFFSET 0x140
+
+/* USBCMD */
+#define MXC_UCMD_ITC_NO_THRESHOLD_MASK (~(0xff << 16)) /* Interrupt Threshold Control */
+
+int mxc_initialize_usb_hw(int port, unsigned int flags)
{
unsigned int v;
#ifdef CONFIG_ARCH_MX3
@@ -186,9 +206,85 @@ int mxc_set_usbcontrol(int port, unsigned int flags)
return 0;
}
#endif /* CONFIG_MACH_MX27 */
+#ifdef CONFIG_ARCH_MX51
+ if (cpu_is_mx51()) {
+ void __iomem *usb_base;
+ u32 usbotg_base;
+ u32 usbother_base;
+ int ret = 0;
+
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+
+ switch (port) {
+ case 0: /* OTG port */
+ usbotg_base = usb_base + MXC_OTG_OFFSET;
+ break;
+ case 1: /* Host 1 port */
+ usbotg_base = usb_base + MXC_H1_OFFSET;
+ break;
+ default:
+ printk(KERN_ERR"%s no such port %d\n", __func__, port);
+ ret = -ENOENT;
+ goto error;
+ }
+ usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
+
+ switch (port) {
+ case 0: /*OTG port */
+ if (flags & MXC_EHCI_INTERNAL_PHY) {
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v |= (MXC_OTG_PHYCTRL_OC_DIS_BIT | MXC_OTG_UCTRL_OPM_BIT); /* OC/USBPWR is not used */
+ else
+ v &= ~(MXC_OTG_PHYCTRL_OC_DIS_BIT | MXC_OTG_UCTRL_OPM_BIT); /* OC/USBPWR is used */
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
+ if (flags & MXC_EHCI_WAKEUP_ENABLED)
+ v |= MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup enable */
+ else
+ v &= ~MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup disable */
+ __raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
+ }
+ break;
+ case 1: /* Host 1 */
+ /*Host ULPI */
+ v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
+ if (flags & MXC_EHCI_WAKEUP_ENABLED)
+ v &= ~(MXC_H1_UCTRL_H1WIE_BIT | MXC_H1_UCTRL_H1UIE_BIT);/* HOST1 wakeup/ULPI intr disable */
+ else
+ v &= ~(MXC_H1_UCTRL_H1WIE_BIT | MXC_H1_UCTRL_H1UIE_BIT);/* HOST1 wakeup/ULPI intr disable */
+
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v &= ~MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
+ else
+ v |= MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
+ __raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
+
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v &= ~MXC_H1_OC_DIS_BIT; /* OC is used */
+ else
+ v |= MXC_H1_OC_DIS_BIT; /* OC is not used */
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ v = __raw_readl(usbotg_base + MXC_USBCMD_OFFSET);
+ if (flags & MXC_EHCI_ITC_NO_THRESHOLD)
+ /* Interrupt Threshold Control:Immediate (no threshold) */
+ v &= MXC_UCMD_ITC_NO_THRESHOLD_MASK;
+ __raw_writel(v, usbotg_base + MXC_USBCMD_OFFSET);
+ break;
+ }
+
+error:
+ iounmap(usb_base);
+ return ret;
+ }
+#endif
printk(KERN_WARNING
"%s() unable to setup USBCONTROL for this CPU\n", __func__);
return -EINVAL;
}
-EXPORT_SYMBOL(mxc_set_usbcontrol);
+EXPORT_SYMBOL(mxc_initialize_usb_hw);
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
index 70b23893f094..71437c61cfd7 100644
--- a/arch/arm/plat-mxc/gpio.c
+++ b/arch/arm/plat-mxc/gpio.c
@@ -3,7 +3,7 @@
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
*
* Based on code from Freescale,
- * Copyright 2004-2006 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -38,7 +38,6 @@ static int gpio_table_size;
#define GPIO_ICR2 (cpu_is_mx1_mx2() ? 0x2C : 0x10)
#define GPIO_IMR (cpu_is_mx1_mx2() ? 0x30 : 0x14)
#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18)
-#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18)
#define GPIO_INT_LOW_LEV (cpu_is_mx1_mx2() ? 0x3 : 0x0)
#define GPIO_INT_HIGH_LEV (cpu_is_mx1_mx2() ? 0x2 : 0x1)
@@ -289,7 +288,7 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
/* its a serious configuration bug when it fails */
BUG_ON( gpiochip_add(&port[i].chip) < 0 );
- if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25()) {
+ if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) {
/* setup one handler for each entry */
set_irq_chained_handler(port[i].irq, mx3_gpio_irq_handler);
set_irq_data(port[i].irq, &port[i]);
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx51.h b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
index b4f975e6a665..80528cc3b557 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx51.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -37,6 +38,11 @@ typedef enum iomux_config {
PAD_CTL_SRE_FAST)
#define MX51_UART3_PAD_CTRL (PAD_CTL_PKE | PAD_CTL_DSE_HIGH | \
PAD_CTL_SRE_FAST)
+#define MX51_USBH1_PAD_CTRL (PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | \
+ PAD_CTL_PUS_100K_UP | PAD_CTL_PUE | \
+ PAD_CTL_PKE | PAD_CTL_HYS)
+#define MX51_GPIO_PAD_CTRL (PAD_CTL_DSE_HIGH | PAD_CTL_PKE | \
+ PAD_CTL_SRE_FAST)
/*
* The naming convention for the pad modes is MX51_PAD_<padname>__<padmode>
@@ -208,18 +214,19 @@ typedef enum iomux_config {
#define MX51_PAD_KEY_COL3__KEY_COL3 IOMUX_PAD(0x658, 0x268, 0, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_KEY_COL4__KEY_COL4 IOMUX_PAD(0x65C, 0x26C, 0, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_KEY_COL5__KEY_COL5 IOMUX_PAD(0x660, 0x270, 0, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_25__USBH1_CLK IOMUX_PAD(0x678, 0x278, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_26__USBH1_DIR IOMUX_PAD(0x67C, 0x27C, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_27__USBH1_STP IOMUX_PAD(0x680, 0x280, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_28__USBH1_NXT IOMUX_PAD(0x684, 0x284, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_11__USBH1_DATA0 IOMUX_PAD(0x688, 0x288, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_12__USBH1_DATA1 IOMUX_PAD(0x68C, 0x28C, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_13__USBH1_DATA2 IOMUX_PAD(0x690, 0x290, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_14__USBH1_DATA3 IOMUX_PAD(0x694, 0x294, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_15__USBH1_DATA4 IOMUX_PAD(0x698, 0x298, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_16__USBH1_DATA5 IOMUX_PAD(0x69C, 0x29C, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_17__USBH1_DATA6 IOMUX_PAD(0x6A0, 0x2A0, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_18__USBH1_DATA7 IOMUX_PAD(0x6A4, 0x2A4, 2, 0x0, 0, NO_PAD_CTRL)
+#define MX51_PAD_USBH1_CLK__USBH1_CLK IOMUX_PAD(0x678, 0x278, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DIR__USBH1_DIR IOMUX_PAD(0x67C, 0x27C, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_STP__USBH1_STP IOMUX_PAD(0x680, 0x280, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_STP__GPIO_1_27 IOMUX_PAD(0x680, 0x280, IOMUX_CONFIG_GPIO, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_NXT__USBH1_NXT IOMUX_PAD(0x684, 0x284, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA0__USBH1_DATA0 IOMUX_PAD(0x688, 0x288, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA1__USBH1_DATA1 IOMUX_PAD(0x68C, 0x28C, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA2__USBH1_DATA2 IOMUX_PAD(0x690, 0x290, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA3__USBH1_DATA3 IOMUX_PAD(0x694, 0x294, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA4__USBH1_DATA4 IOMUX_PAD(0x698, 0x298, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA5__USBH1_DATA5 IOMUX_PAD(0x69C, 0x29C, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA6__USBH1_DATA6 IOMUX_PAD(0x6A0, 0x2A0, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA7__USBH1_DATA7 IOMUX_PAD(0x6A4, 0x2A4, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
#define MX51_PAD_GPIO_3_0__DI1_PIN11 IOMUX_PAD(0x6A8, 0x2A8, 4, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_3_1__DI1_PIN12 IOMUX_PAD(0x6AC, 0x2AC, 4, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_3_2__DI1_PIN13 IOMUX_PAD(0x6B0, 0x2B0, 4, 0x0, 0, NO_PAD_CTRL)
@@ -299,7 +306,7 @@ typedef enum iomux_config {
#define MX51_PAD_GPIO_1_4__GPIO1_4 IOMUX_PAD(0x804, 0x3D8, 0, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_1_5__GPIO1_5 IOMUX_PAD(0x808, 0x3DC, 0, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_1_6__GPIO1_6 IOMUX_PAD(0x80C, 0x3E0, 0, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_7__GPIO1_7 IOMUX_PAD(0x810, 0x3E4, 0, 0x0, 0, NO_PAD_CTRL)
+#define MX51_PAD_GPIO_1_7__GPIO1_7 IOMUX_PAD(0x810, 0x3E4, 0, 0x0, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO_1_8__GPIO1_8 IOMUX_PAD(0x814, 0x3E8, 0, 0x0, 1, \
(PAD_CTL_SRE_SLOW | PAD_CTL_DSE_MED | PAD_CTL_PUS_100K_UP | PAD_CTL_HYS))
#define MX51_PAD_GPIO_1_9__GPIO1_9 IOMUX_PAD(0x818, 0x3EC, 0, 0x0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/mxc_ehci.h b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
index 4b9b8368c0c0..7fc5f9946199 100644
--- a/arch/arm/plat-mxc/include/mach/mxc_ehci.h
+++ b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
@@ -25,6 +25,18 @@
#define MXC_EHCI_INTERNAL_PHY (1 << 7)
#define MXC_EHCI_IPPUE_DOWN (1 << 8)
#define MXC_EHCI_IPPUE_UP (1 << 9)
+#define MXC_EHCI_WAKEUP_ENABLED (1 << 10)
+#define MXC_EHCI_ITC_NO_THRESHOLD (1 << 11)
+
+#define MXC_USBCTRL_OFFSET 0
+#define MXC_USB_PHY_CTR_FUNC_OFFSET 0x8
+#define MXC_USB_PHY_CTR_FUNC2_OFFSET 0xc
+
+#define MX5_USBOTHER_REGS_OFFSET 0x800
+
+/* USB_PHY_CTRL_FUNC2*/
+#define MX5_USB_UTMI_PHYCTRL1_PLLDIV_MASK 0x3
+#define MX5_USB_UTMI_PHYCTRL1_PLLDIV_SHIFT 0
struct mxc_usbh_platform_data {
int (*init)(struct platform_device *pdev);
@@ -35,7 +47,7 @@ struct mxc_usbh_platform_data {
struct otg_transceiver *otg;
};
-int mxc_set_usbcontrol(int port, unsigned int flags);
+int mxc_initialize_usb_hw(int port, unsigned int flags);
#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index c1ce51abdba6..f9a1b059a76c 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -54,14 +54,14 @@
#define MX2_TSTAT_COMP (1 << 0)
/* MX31, MX35, MX25, MXC91231, MX5 */
-#define MX3_TCTL_WAITEN (1 << 3) /* Wait enable mode */
-#define MX3_TCTL_CLK_IPG (1 << 6)
-#define MX3_TCTL_FRR (1 << 9)
-#define MX3_IR 0x0c
-#define MX3_TSTAT 0x08
-#define MX3_TSTAT_OF1 (1 << 0)
-#define MX3_TCN 0x24
-#define MX3_TCMP 0x10
+#define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
+#define V2_TCTL_CLK_IPG (1 << 6)
+#define V2_TCTL_FRR (1 << 9)
+#define V2_IR 0x0c
+#define V2_TSTAT 0x08
+#define V2_TSTAT_OF1 (1 << 0)
+#define V2_TCN 0x24
+#define V2_TCMP 0x10
#define timer_is_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
#define timer_is_v2() (!timer_is_v1())
@@ -76,7 +76,7 @@ static inline void gpt_irq_disable(void)
unsigned int tmp;
if (timer_is_v2())
- __raw_writel(0, timer_base + MX3_IR);
+ __raw_writel(0, timer_base + V2_IR);
else {
tmp = __raw_readl(timer_base + MXC_TCTL);
__raw_writel(tmp & ~MX1_2_TCTL_IRQEN, timer_base + MXC_TCTL);
@@ -86,7 +86,7 @@ static inline void gpt_irq_disable(void)
static inline void gpt_irq_enable(void)
{
if (timer_is_v2())
- __raw_writel(1<<0, timer_base + MX3_IR);
+ __raw_writel(1<<0, timer_base + V2_IR);
else {
__raw_writel(__raw_readl(timer_base + MXC_TCTL) | MX1_2_TCTL_IRQEN,
timer_base + MXC_TCTL);
@@ -102,7 +102,7 @@ static void gpt_irq_acknowledge(void)
__raw_writel(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
timer_base + MX1_2_TSTAT);
} else if (timer_is_v2())
- __raw_writel(MX3_TSTAT_OF1, timer_base + MX3_TSTAT);
+ __raw_writel(V2_TSTAT_OF1, timer_base + V2_TSTAT);
}
static cycle_t mx1_2_get_cycles(struct clocksource *cs)
@@ -110,9 +110,9 @@ static cycle_t mx1_2_get_cycles(struct clocksource *cs)
return __raw_readl(timer_base + MX1_2_TCN);
}
-static cycle_t mx3_get_cycles(struct clocksource *cs)
+static cycle_t v2_get_cycles(struct clocksource *cs)
{
- return __raw_readl(timer_base + MX3_TCN);
+ return __raw_readl(timer_base + V2_TCN);
}
static struct clocksource clocksource_mxc = {
@@ -129,7 +129,7 @@ static int __init mxc_clocksource_init(struct clk *timer_clk)
unsigned int c = clk_get_rate(timer_clk);
if (timer_is_v2())
- clocksource_mxc.read = mx3_get_cycles;
+ clocksource_mxc.read = v2_get_cycles;
clocksource_mxc.mult = clocksource_hz2mult(c,
clocksource_mxc.shift);
@@ -153,16 +153,16 @@ static int mx1_2_set_next_event(unsigned long evt,
-ETIME : 0;
}
-static int mx3_set_next_event(unsigned long evt,
+static int v2_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
unsigned long tcmp;
- tcmp = __raw_readl(timer_base + MX3_TCN) + evt;
+ tcmp = __raw_readl(timer_base + V2_TCN) + evt;
- __raw_writel(tcmp, timer_base + MX3_TCMP);
+ __raw_writel(tcmp, timer_base + V2_TCMP);
- return (int)(tcmp - __raw_readl(timer_base + MX3_TCN)) < 0 ?
+ return (int)(tcmp - __raw_readl(timer_base + V2_TCN)) < 0 ?
-ETIME : 0;
}
@@ -192,8 +192,8 @@ static void mxc_set_mode(enum clock_event_mode mode,
if (mode != clockevent_mode) {
/* Set event time into far-far future */
if (timer_is_v2())
- __raw_writel(__raw_readl(timer_base + MX3_TCN) - 3,
- timer_base + MX3_TCMP);
+ __raw_writel(__raw_readl(timer_base + V2_TCN) - 3,
+ timer_base + V2_TCMP);
else
__raw_writel(__raw_readl(timer_base + MX1_2_TCN) - 3,
timer_base + MX1_2_TCMP);
@@ -245,7 +245,7 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
uint32_t tstat;
if (timer_is_v2())
- tstat = __raw_readl(timer_base + MX3_TSTAT);
+ tstat = __raw_readl(timer_base + V2_TSTAT);
else
tstat = __raw_readl(timer_base + MX1_2_TSTAT);
@@ -276,7 +276,7 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
unsigned int c = clk_get_rate(timer_clk);
if (timer_is_v2())
- clockevent_mxc.set_next_event = mx3_set_next_event;
+ clockevent_mxc.set_next_event = v2_set_next_event;
clockevent_mxc.mult = div_sc(c, NSEC_PER_SEC,
clockevent_mxc.shift);
@@ -308,7 +308,7 @@ void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
__raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
if (timer_is_v2())
- tctl_val = MX3_TCTL_CLK_IPG | MX3_TCTL_FRR | MX3_TCTL_WAITEN | MXC_TCTL_TEN;
+ tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
else
tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index afa6709db0b3..9b86d2a60d43 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C)2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -19,6 +19,7 @@
#include <asm/mach/irq.h>
#include <mach/hardware.h>
+#include <mach/common.h>
/*
*****************************************
@@ -144,6 +145,7 @@ void __init tzic_init_irq(void __iomem *irqbase)
set_irq_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
+ mxc_register_gpios();
pr_info("TrustZone Interrupt Controller (TZIC) initialized\n");
}
diff --git a/arch/arm/plat-nomadik/Kconfig b/arch/arm/plat-nomadik/Kconfig
index 159daf583f85..5da3f97c537b 100644
--- a/arch/arm/plat-nomadik/Kconfig
+++ b/arch/arm/plat-nomadik/Kconfig
@@ -19,4 +19,9 @@ config HAS_MTU
to multiple interrupt generating programmable
32-bit free running decrementing counters.
+config NOMADIK_GPIO
+ bool
+ help
+ Support for the Nomadik GPIO controller.
+
endif
diff --git a/arch/arm/plat-nomadik/Makefile b/arch/arm/plat-nomadik/Makefile
index 37c7cdd0f8f0..c33547361bd7 100644
--- a/arch/arm/plat-nomadik/Makefile
+++ b/arch/arm/plat-nomadik/Makefile
@@ -3,3 +3,4 @@
# Licensed under GPLv2
obj-$(CONFIG_HAS_MTU) += timer.o
+obj-$(CONFIG_NOMADIK_GPIO) += gpio.o
diff --git a/arch/arm/mach-nomadik/gpio.c b/arch/arm/plat-nomadik/gpio.c
index 66b1c91ccc74..5a6ef252c38b 100644
--- a/arch/arm/mach-nomadik/gpio.c
+++ b/arch/arm/plat-nomadik/gpio.c
@@ -13,8 +13,10 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -36,8 +38,9 @@
struct nmk_gpio_chip {
struct gpio_chip chip;
void __iomem *addr;
+ struct clk *clk;
unsigned int parent_irq;
- spinlock_t *lock;
+ spinlock_t lock;
/* Keep track of configured edges */
u32 edge_rising;
u32 edge_falling;
@@ -108,40 +111,37 @@ static void nmk_gpio_irq_ack(unsigned int irq)
writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
}
-static void nmk_gpio_irq_mask(unsigned int irq)
+static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
+ int gpio, bool enable)
{
- int gpio;
- struct nmk_gpio_chip *nmk_chip;
- unsigned long flags;
- u32 bitmask, reg;
-
- gpio = NOMADIK_IRQ_TO_GPIO(irq);
- nmk_chip = get_irq_chip_data(irq);
- bitmask = nmk_gpio_get_bitmask(gpio);
- if (!nmk_chip)
- return;
+ u32 bitmask = nmk_gpio_get_bitmask(gpio);
+ u32 reg;
- /* we must individually clear the two edges */
- spin_lock_irqsave(&nmk_chip->lock, flags);
+ /* we must individually set/clear the two edges */
if (nmk_chip->edge_rising & bitmask) {
- reg = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ reg = readl(nmk_chip->addr + NMK_GPIO_RIMSC);
+ if (enable)
+ reg |= bitmask;
+ else
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + NMK_GPIO_RIMSC);
}
if (nmk_chip->edge_falling & bitmask) {
- reg = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + NMK_GPIO_FWIMSC);
+ reg = readl(nmk_chip->addr + NMK_GPIO_FIMSC);
+ if (enable)
+ reg |= bitmask;
+ else
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + NMK_GPIO_FIMSC);
}
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
-};
+}
-static void nmk_gpio_irq_unmask(unsigned int irq)
+static void nmk_gpio_irq_modify(unsigned int irq, bool enable)
{
int gpio;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- u32 bitmask, reg;
+ u32 bitmask;
gpio = NOMADIK_IRQ_TO_GPIO(irq);
nmk_chip = get_irq_chip_data(irq);
@@ -149,23 +149,24 @@ static void nmk_gpio_irq_unmask(unsigned int irq)
if (!nmk_chip)
return;
- /* we must individually set the two edges */
spin_lock_irqsave(&nmk_chip->lock, flags);
- if (nmk_chip->edge_rising & bitmask) {
- reg = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
- reg |= bitmask;
- writel(reg, nmk_chip->addr + NMK_GPIO_RWIMSC);
- }
- if (nmk_chip->edge_falling & bitmask) {
- reg = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
- reg |= bitmask;
- writel(reg, nmk_chip->addr + NMK_GPIO_FWIMSC);
- }
+ __nmk_gpio_irq_modify(nmk_chip, gpio, enable);
spin_unlock_irqrestore(&nmk_chip->lock, flags);
}
+static void nmk_gpio_irq_mask(unsigned int irq)
+{
+ nmk_gpio_irq_modify(irq, false);
+};
+
+static void nmk_gpio_irq_unmask(unsigned int irq)
+{
+ nmk_gpio_irq_modify(irq, true);
+}
+
static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
{
+ bool enabled = !(irq_to_desc(irq)->status & IRQ_DISABLED);
int gpio;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
@@ -184,19 +185,21 @@ static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
spin_lock_irqsave(&nmk_chip->lock, flags);
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, false);
+
nmk_chip->edge_rising &= ~bitmask;
if (type & IRQ_TYPE_EDGE_RISING)
nmk_chip->edge_rising |= bitmask;
- writel(nmk_chip->edge_rising, nmk_chip->addr + NMK_GPIO_RIMSC);
nmk_chip->edge_falling &= ~bitmask;
if (type & IRQ_TYPE_EDGE_FALLING)
nmk_chip->edge_falling |= bitmask;
- writel(nmk_chip->edge_falling, nmk_chip->addr + NMK_GPIO_FIMSC);
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, true);
- nmk_gpio_irq_unmask(irq);
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
return 0;
}
@@ -212,21 +215,27 @@ static struct irq_chip nmk_gpio_irq_chip = {
static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct nmk_gpio_chip *nmk_chip;
- struct irq_chip *host_chip;
+ struct irq_chip *host_chip = get_irq_chip(irq);
unsigned int gpio_irq;
u32 pending;
unsigned int first_irq;
+ if (host_chip->mask_ack)
+ host_chip->mask_ack(irq);
+ else {
+ host_chip->mask(irq);
+ if (host_chip->ack)
+ host_chip->ack(irq);
+ }
+
nmk_chip = get_irq_data(irq);
first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
while ( (pending = readl(nmk_chip->addr + NMK_GPIO_IS)) ) {
gpio_irq = first_irq + __ffs(pending);
generic_handle_irq(gpio_irq);
}
- if (0) {/* don't ack parent irq, as ack == disable */
- host_chip = get_irq_chip(irq);
- host_chip->ack(irq);
- }
+
+ host_chip->unmask(irq);
}
static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
@@ -240,6 +249,7 @@ static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
set_irq_handler(i, handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
set_irq_chip_data(i, nmk_chip);
+ set_irq_type(i, IRQ_TYPE_EDGE_FALLING);
}
set_irq_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
set_irq_data(nmk_chip->parent_irq, nmk_chip);
@@ -298,30 +308,59 @@ static struct gpio_chip nmk_gpio_template = {
.can_sleep = 0,
};
-static int __init nmk_gpio_probe(struct amba_device *dev, struct amba_id *id)
+static int __init nmk_gpio_probe(struct platform_device *dev)
{
- struct nmk_gpio_platform_data *pdata;
+ struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
+ struct resource *res;
+ struct clk *clk;
+ int irq;
int ret;
- pdata = dev->dev.platform_data;
- ret = amba_request_regions(dev, pdata->name);
- if (ret)
- return ret;
+ if (!pdata)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+
+ if (request_mem_region(res->start, resource_size(res),
+ dev_name(&dev->dev)) == NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out_release;
+ }
+
+ clk_enable(clk);
nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
if (!nmk_chip) {
ret = -ENOMEM;
- goto out_amba;
+ goto out_clk;
}
/*
* The virt address in nmk_chip->addr is in the nomadik register space,
* so we can simply convert the resource address, without remapping
*/
- nmk_chip->addr = io_p2v(dev->res.start);
+ nmk_chip->clk = clk;
+ nmk_chip->addr = io_p2v(res->start);
nmk_chip->chip = nmk_gpio_template;
- nmk_chip->parent_irq = pdata->parent_irq;
+ nmk_chip->parent_irq = irq;
+ spin_lock_init(&nmk_chip->lock);
chip = &nmk_chip->chip;
chip->base = pdata->first_gpio;
@@ -333,7 +372,7 @@ static int __init nmk_gpio_probe(struct amba_device *dev, struct amba_id *id)
if (ret)
goto out_free;
- amba_set_drvdata(dev, nmk_chip);
+ platform_set_drvdata(dev, nmk_chip);
nmk_gpio_init_irq(nmk_chip);
@@ -341,51 +380,50 @@ static int __init nmk_gpio_probe(struct amba_device *dev, struct amba_id *id)
nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
return 0;
- out_free:
+out_free:
kfree(nmk_chip);
- out_amba:
- amba_release_regions(dev);
+out_clk:
+ clk_disable(clk);
+ clk_put(clk);
+out_release:
+ release_mem_region(res->start, resource_size(res));
+out:
dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
pdata->first_gpio, pdata->first_gpio+31);
return ret;
}
-static int nmk_gpio_remove(struct amba_device *dev)
+static int __exit nmk_gpio_remove(struct platform_device *dev)
{
struct nmk_gpio_chip *nmk_chip;
+ struct resource *res;
- nmk_chip = amba_get_drvdata(dev);
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+
+ nmk_chip = platform_get_drvdata(dev);
gpiochip_remove(&nmk_chip->chip);
+ clk_disable(nmk_chip->clk);
+ clk_put(nmk_chip->clk);
kfree(nmk_chip);
- amba_release_regions(dev);
+ release_mem_region(res->start, resource_size(res));
return 0;
}
-/* We have 0x1f080060 and 0x1f180060, accept both using the mask */
-static struct amba_id nmk_gpio_ids[] = {
- {
- .id = 0x1f080060,
- .mask = 0xffefffff,
- },
- {0, 0},
-};
-
-static struct amba_driver nmk_gpio_driver = {
- .drv = {
+static struct platform_driver nmk_gpio_driver = {
+ .driver = {
.owner = THIS_MODULE,
.name = "gpio",
},
.probe = nmk_gpio_probe,
- .remove = nmk_gpio_remove,
+ .remove = __exit_p(nmk_gpio_remove),
.suspend = NULL, /* to be done */
.resume = NULL,
- .id_table = nmk_gpio_ids,
};
static int __init nmk_gpio_init(void)
{
- return amba_driver_register(&nmk_gpio_driver);
+ return platform_driver_register(&nmk_gpio_driver);
}
arch_initcall(nmk_gpio_init);
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
new file mode 100644
index 000000000000..4200811249ca
--- /dev/null
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -0,0 +1,70 @@
+/*
+ * Structures and registers for GPIO access in the Nomadik SoC
+ *
+ * Copyright (C) 2008 STMicroelectronics
+ * Author: Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_PLAT_GPIO_H
+#define __ASM_PLAT_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+/*
+ * These currently cause a function call to happen, they may be optimized
+ * if needed by adding cpu-specific defines to identify blocks
+ * (see mach-pxa/include/mach/gpio.h as an example using GPLR etc)
+ */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+/*
+ * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
+ * the "gpio" namespace for generic and cross-machine functions
+ */
+
+/* Register in the logic block */
+#define NMK_GPIO_DAT 0x00
+#define NMK_GPIO_DATS 0x04
+#define NMK_GPIO_DATC 0x08
+#define NMK_GPIO_PDIS 0x0c
+#define NMK_GPIO_DIR 0x10
+#define NMK_GPIO_DIRS 0x14
+#define NMK_GPIO_DIRC 0x18
+#define NMK_GPIO_SLPC 0x1c
+#define NMK_GPIO_AFSLA 0x20
+#define NMK_GPIO_AFSLB 0x24
+
+#define NMK_GPIO_RIMSC 0x40
+#define NMK_GPIO_FIMSC 0x44
+#define NMK_GPIO_IS 0x48
+#define NMK_GPIO_IC 0x4c
+#define NMK_GPIO_RWIMSC 0x50
+#define NMK_GPIO_FWIMSC 0x54
+#define NMK_GPIO_WKS 0x58
+
+/* Alternate functions: function C is set in hw by setting both A and B */
+#define NMK_GPIO_ALT_GPIO 0
+#define NMK_GPIO_ALT_A 1
+#define NMK_GPIO_ALT_B 2
+#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
+
+extern int nmk_gpio_set_mode(int gpio, int gpio_mode);
+extern int nmk_gpio_get_mode(int gpio);
+
+/*
+ * Platform data to register a block: only the initial gpio/irq number.
+ */
+struct nmk_gpio_platform_data {
+ char *name;
+ int first_gpio;
+ int first_irq;
+};
+
+#endif /* __ASM_PLAT_GPIO_H */
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
new file mode 100644
index 000000000000..4d12ea4ca361
--- /dev/null
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -0,0 +1,239 @@
+/*
+ * arch/arm/plat-nomadik/include/plat/ste_dma40.h
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+
+#ifndef STE_DMA40_H
+#define STE_DMA40_H
+
+#include <linux/dmaengine.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+
+/* dev types for memcpy */
+#define STEDMA40_DEV_DST_MEMORY (-1)
+#define STEDMA40_DEV_SRC_MEMORY (-1)
+
+/*
+ * Description of bitfields of channel_type variable is available in
+ * the info structure.
+ */
+
+/* Priority */
+#define STEDMA40_INFO_PRIO_TYPE_POS 2
+#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS)
+#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS)
+
+/* Mode */
+#define STEDMA40_INFO_CH_MODE_TYPE_POS 6
+#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS)
+#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
+#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
+
+/* Mode options */
+#define STEDMA40_INFO_CH_MODE_OPT_POS 8
+#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
+
+/* Interrupt */
+#define STEDMA40_INFO_TIM_POS 10
+#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
+#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
+
+/* End of channel_type configuration */
+
+#define STEDMA40_ESIZE_8_BIT 0x0
+#define STEDMA40_ESIZE_16_BIT 0x1
+#define STEDMA40_ESIZE_32_BIT 0x2
+#define STEDMA40_ESIZE_64_BIT 0x3
+
+/* The value 4 indicates that PEN-reg shall be set to 0 */
+#define STEDMA40_PSIZE_PHY_1 0x4
+#define STEDMA40_PSIZE_PHY_2 0x0
+#define STEDMA40_PSIZE_PHY_4 0x1
+#define STEDMA40_PSIZE_PHY_8 0x2
+#define STEDMA40_PSIZE_PHY_16 0x3
+
+/*
+ * The number of elements differ in logical and
+ * physical mode
+ */
+#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2
+#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4
+#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
+#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
+
+enum stedma40_flow_ctrl {
+ STEDMA40_NO_FLOW_CTRL,
+ STEDMA40_FLOW_CTRL,
+};
+
+enum stedma40_endianess {
+ STEDMA40_LITTLE_ENDIAN,
+ STEDMA40_BIG_ENDIAN
+};
+
+enum stedma40_periph_data_width {
+ STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
+ STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
+ STEDMA40_WORD_WIDTH = STEDMA40_ESIZE_32_BIT,
+ STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
+};
+
+struct stedma40_half_channel_info {
+ enum stedma40_endianess endianess;
+ enum stedma40_periph_data_width data_width;
+ int psize;
+ enum stedma40_flow_ctrl flow_ctrl;
+};
+
+enum stedma40_xfer_dir {
+ STEDMA40_MEM_TO_MEM,
+ STEDMA40_MEM_TO_PERIPH,
+ STEDMA40_PERIPH_TO_MEM,
+ STEDMA40_PERIPH_TO_PERIPH
+};
+
+
+/**
+ * struct stedma40_chan_cfg - Structure to be filled by client drivers.
+ *
+ * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
+ * @channel_type: priority, mode, mode options and interrupt configuration.
+ * @src_dev_type: Src device type
+ * @dst_dev_type: Dst device type
+ * @src_info: Parameters for dst half channel
+ * @dst_info: Parameters for dst half channel
+ * @pre_transfer_data: Data to be passed on to the pre_transfer() function.
+ * @pre_transfer: Callback used if needed before preparation of transfer.
+ * Only called if device is set. size of bytes to transfer
+ * (in case of multiple element transfer size is size of the first element).
+ *
+ *
+ * This structure has to be filled by the client drivers.
+ * It is recommended to do all dma configurations for clients in the machine.
+ *
+ */
+struct stedma40_chan_cfg {
+ enum stedma40_xfer_dir dir;
+ unsigned int channel_type;
+ int src_dev_type;
+ int dst_dev_type;
+ struct stedma40_half_channel_info src_info;
+ struct stedma40_half_channel_info dst_info;
+ void *pre_transfer_data;
+ int (*pre_transfer) (struct dma_chan *chan,
+ void *data,
+ int size);
+};
+
+/**
+ * struct stedma40_platform_data - Configuration struct for the dma device.
+ *
+ * @dev_len: length of dev_tx and dev_rx
+ * @dev_tx: mapping between destination event line and io address
+ * @dev_rx: mapping between source event line and io address
+ * @memcpy: list of memcpy event lines
+ * @memcpy_len: length of memcpy
+ * @memcpy_conf_phy: default configuration of physical channel memcpy
+ * @memcpy_conf_log: default configuration of logical channel memcpy
+ * @llis_per_log: number of max linked list items per logical channel
+ *
+ */
+struct stedma40_platform_data {
+ u32 dev_len;
+ const dma_addr_t *dev_tx;
+ const dma_addr_t *dev_rx;
+ int *memcpy;
+ u32 memcpy_len;
+ struct stedma40_chan_cfg *memcpy_conf_phy;
+ struct stedma40_chan_cfg *memcpy_conf_log;
+ unsigned int llis_per_log;
+};
+
+/**
+ * setdma40_set_psize() - Used for changing the package size of an
+ * already configured dma channel.
+ *
+ * @chan: dmaengine handle
+ * @src_psize: new package side for src. (STEDMA40_PSIZE*)
+ * @src_psize: new package side for dst. (STEDMA40_PSIZE*)
+ *
+ * returns 0 on ok, otherwise negative error number.
+ */
+int stedma40_set_psize(struct dma_chan *chan,
+ int src_psize,
+ int dst_psize);
+
+/**
+ * stedma40_filter() - Provides stedma40_chan_cfg to the
+ * ste_dma40 dma driver via the dmaengine framework.
+ * does some checking of what's provided.
+ *
+ * Never directly called by client. It used by dmaengine.
+ * @chan: dmaengine handle.
+ * @data: Must be of type: struct stedma40_chan_cfg and is
+ * the configuration of the framework.
+ *
+ *
+ */
+
+bool stedma40_filter(struct dma_chan *chan, void *data);
+
+/**
+ * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from
+ * scattergatter lists.
+ *
+ * @chan: dmaengine handle
+ * @sgl_dst: Destination scatter list
+ * @sgl_src: Source scatter list
+ * @sgl_len: The length of each scatterlist. Both lists must be of equal length
+ * and each element must match the corresponding element in the other scatter
+ * list.
+ * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
+ */
+
+struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *sgl_dst,
+ struct scatterlist *sgl_src,
+ unsigned int sgl_len,
+ unsigned long flags);
+
+/**
+ * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
+ * (=device)
+ *
+ * @chan: dmaengine handle
+ * @addr: source or destination physicall address.
+ * @size: bytes to transfer
+ * @direction: direction of transfer
+ * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
+ */
+
+static inline struct
+dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
+ dma_addr_t addr,
+ unsigned int size,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct scatterlist sg;
+ sg_init_table(&sg, 1);
+ sg.dma_address = addr;
+ sg.length = size;
+
+ return chan->device->device_prep_slave_sg(chan, &sg, 1,
+ direction, flags);
+}
+
+#endif
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index fa7cb3a57cbf..db67402518a6 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -2,7 +2,7 @@
* linux/arch/arm/mach-nomadik/timer.c
*
* Copyright (C) 2008 STMicroelectronics
- * Copyright (C) 2009 Alessandro Rubini, somewhat based on at91sam926x
+ * Copyright (C) 2010 Alessandro Rubini
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
@@ -18,123 +18,124 @@
#include <plat/mtu.h>
-static u32 nmdk_count; /* accumulated count */
-static u32 nmdk_cycle; /* write-once */
+void __iomem *mtu_base; /* ssigned by machine code */
-/* setup by the platform code */
-void __iomem *mtu_base;
-
-/*
- * clocksource: the MTU device is a decrementing counters, so we negate
- * the value being read.
- */
+/* clocksource: MTU decrements, so we negate the value being read. */
static cycle_t nmdk_read_timer(struct clocksource *cs)
{
- u32 count = readl(mtu_base + MTU_VAL(0));
- return nmdk_count + nmdk_cycle - count;
-
+ return -readl(mtu_base + MTU_VAL(0));
}
static struct clocksource nmdk_clksrc = {
.name = "mtu_0",
- .rating = 120,
+ .rating = 200,
.read = nmdk_read_timer,
+ .mask = CLOCKSOURCE_MASK(32),
.shift = 20,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-/*
- * Clockevent device: currently only periodic mode is supported
- */
+/* Clockevent device: use one-shot mode */
static void nmdk_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{
+ u32 cr;
+
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- /* count current value? */
- writel(readl(mtu_base + MTU_IMSC) | 1, mtu_base + MTU_IMSC);
+ pr_err("%s: periodic mode not supported\n", __func__);
break;
case CLOCK_EVT_MODE_ONESHOT:
- BUG(); /* Not supported, yet */
- /* FALLTHROUGH */
+ /* Load highest value, enable device, enable interrupts */
+ cr = readl(mtu_base + MTU_CR(1));
+ writel(0, mtu_base + MTU_LR(1));
+ writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
+ writel(0x2, mtu_base + MTU_IMSC);
+ break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
- writel(readl(mtu_base + MTU_IMSC) & ~1, mtu_base + MTU_IMSC);
+ /* disable irq */
+ writel(0, mtu_base + MTU_IMSC);
break;
case CLOCK_EVT_MODE_RESUME:
break;
}
}
+static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
+{
+ /* writing the value has immediate effect */
+ writel(evt, mtu_base + MTU_LR(1));
+ return 0;
+}
+
static struct clock_event_device nmdk_clkevt = {
- .name = "mtu_0",
- .features = CLOCK_EVT_FEAT_PERIODIC,
+ .name = "mtu_1",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
- .rating = 100,
+ .rating = 200,
.set_mode = nmdk_clkevt_mode,
+ .set_next_event = nmdk_clkevt_next,
};
/*
- * IRQ Handler for the timer 0 of the MTU block. The irq is not shared
- * as we are the only users of mtu0 by now.
+ * IRQ Handler for timer 1 of the MTU block.
*/
static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
{
- /* ack: "interrupt clear register" */
- writel(1 << 0, mtu_base + MTU_ICR);
-
- /* we can't count lost ticks, unfortunately */
- nmdk_count += nmdk_cycle;
- nmdk_clkevt.event_handler(&nmdk_clkevt);
+ struct clock_event_device *evdev = dev_id;
+ writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
+ evdev->event_handler(evdev);
return IRQ_HANDLED;
}
-/*
- * Set up timer interrupt, and return the current time in seconds.
- */
static struct irqaction nmdk_timer_irq = {
.name = "Nomadik Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = nmdk_timer_interrupt,
+ .dev_id = &nmdk_clkevt,
};
-static void nmdk_timer_reset(void)
-{
- u32 cr;
-
- writel(0, mtu_base + MTU_CR(0)); /* off */
-
- /* configure load and background-load, and fire it up */
- writel(nmdk_cycle, mtu_base + MTU_LR(0));
- writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
- cr = MTU_CRn_PERIODIC | MTU_CRn_PRESCALE_1 | MTU_CRn_32BITS;
- writel(cr, mtu_base + MTU_CR(0));
- writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
-}
-
void __init nmdk_timer_init(void)
{
unsigned long rate;
- int bits;
-
- rate = CLOCK_TICK_RATE; /* 2.4MHz */
- nmdk_cycle = (rate + HZ/2) / HZ;
+ u32 cr = MTU_CRn_32BITS;;
+
+ /*
+ * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
+ * use a divide-by-16 counter if it's more than 16MHz
+ */
+ rate = CLOCK_TICK_RATE;
+ if (rate > 16 << 20) {
+ rate /= 16;
+ cr |= MTU_CRn_PRESCALE_16;
+ } else {
+ cr |= MTU_CRn_PRESCALE_1;
+ }
- /* Init the timer and register clocksource */
- nmdk_timer_reset();
+ /* Timer 0 is the free running clocksource */
+ writel(cr, mtu_base + MTU_CR(0));
+ writel(0, mtu_base + MTU_LR(0));
+ writel(0, mtu_base + MTU_BGLR(0));
+ writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
nmdk_clksrc.mult = clocksource_hz2mult(rate, nmdk_clksrc.shift);
- bits = 8*sizeof(nmdk_count);
- nmdk_clksrc.mask = CLOCKSOURCE_MASK(bits);
if (clocksource_register(&nmdk_clksrc))
- printk(KERN_ERR "timer: failed to initialize clock "
- "source %s\n", nmdk_clksrc.name);
+ pr_err("timer: failed to initialize clock source %s\n",
+ nmdk_clksrc.name);
+
+ /* Timer 1 is used for events, fix according to rate */
+ writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
+ nmdk_clkevt.mult = div_sc(rate, NSEC_PER_SEC, nmdk_clkevt.shift);
+ nmdk_clkevt.max_delta_ns =
+ clockevent_delta2ns(0xffffffff, &nmdk_clkevt);
+ nmdk_clkevt.min_delta_ns =
+ clockevent_delta2ns(0x00000002, &nmdk_clkevt);
+ nmdk_clkevt.cpumask = cpumask_of(0);
/* Register irq and clockevents */
setup_irq(IRQ_MTU0, &nmdk_timer_irq);
- nmdk_clkevt.mult = div_sc(rate, NSEC_PER_SEC, nmdk_clkevt.shift);
- nmdk_clkevt.cpumask = cpumask_of(0);
clockevents_register_device(&nmdk_clkevt);
}
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index 624e26298faa..f044b5927508 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -26,9 +26,12 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/i2c-omap.h>
+
#include <mach/irqs.h>
#include <plat/mux.h>
#include <plat/i2c.h>
+#include <plat/omap-pm.h>
#define OMAP_I2C_SIZE 0x3f
#define OMAP1_I2C_BASE 0xfffb3800
@@ -70,14 +73,14 @@ static struct resource i2c_resources[][2] = {
}, \
}
-static u32 i2c_rate[ARRAY_SIZE(i2c_resources)];
+static struct omap_i2c_bus_platform_data i2c_pdata[ARRAY_SIZE(i2c_resources)];
static struct platform_device omap_i2c_devices[] = {
- I2C_DEV_BUILDER(1, i2c_resources[0], &i2c_rate[0]),
+ I2C_DEV_BUILDER(1, i2c_resources[0], &i2c_pdata[0]),
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- I2C_DEV_BUILDER(2, i2c_resources[1], &i2c_rate[1]),
+ I2C_DEV_BUILDER(2, i2c_resources[1], &i2c_pdata[1]),
#endif
#if defined(CONFIG_ARCH_OMAP3)
- I2C_DEV_BUILDER(3, i2c_resources[2], &i2c_rate[2]),
+ I2C_DEV_BUILDER(3, i2c_resources[2], &i2c_pdata[2]),
#endif
};
@@ -100,10 +103,12 @@ static int __init omap_i2c_nr_ports(void)
static int __init omap_i2c_add_bus(int bus_id)
{
struct platform_device *pdev;
+ struct omap_i2c_bus_platform_data *pd;
struct resource *res;
resource_size_t base, irq;
pdev = &omap_i2c_devices[bus_id - 1];
+ pd = pdev->dev.platform_data;
if (bus_id == 1) {
res = pdev->resource;
if (cpu_class_is_omap1()) {
@@ -123,6 +128,15 @@ static int __init omap_i2c_add_bus(int bus_id)
if (cpu_class_is_omap2())
omap2_i2c_mux_pins(bus_id);
+ /*
+ * When waiting for completion of a i2c transfer, we need to
+ * set a wake up latency constraint for the MPU. This is to
+ * ensure quick enough wakeup from idle, when transfer
+ * completes.
+ */
+ if (cpu_is_omap34xx())
+ pd->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat;
+
return platform_device_register(pdev);
}
@@ -146,8 +160,8 @@ static int __init omap_i2c_bus_setup(char *str)
get_options(str, 3, ints);
if (ints[0] < 2 || ints[1] < 1 || ints[1] > ports)
return 0;
- i2c_rate[ints[1] - 1] = ints[2];
- i2c_rate[ints[1] - 1] |= OMAP_I2C_CMDLINE_SETUP;
+ i2c_pdata[ints[1] - 1].clkrate = ints[2];
+ i2c_pdata[ints[1] - 1].clkrate |= OMAP_I2C_CMDLINE_SETUP;
return 1;
}
@@ -161,9 +175,9 @@ static int __init omap_register_i2c_bus_cmdline(void)
{
int i, err = 0;
- for (i = 0; i < ARRAY_SIZE(i2c_rate); i++)
- if (i2c_rate[i] & OMAP_I2C_CMDLINE_SETUP) {
- i2c_rate[i] &= ~OMAP_I2C_CMDLINE_SETUP;
+ for (i = 0; i < ARRAY_SIZE(i2c_pdata); i++)
+ if (i2c_pdata[i].clkrate & OMAP_I2C_CMDLINE_SETUP) {
+ i2c_pdata[i].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
err = omap_i2c_add_bus(i + 1);
if (err)
goto out;
@@ -197,9 +211,10 @@ int __init omap_register_i2c_bus(int bus_id, u32 clkrate,
return err;
}
- if (!i2c_rate[bus_id - 1])
- i2c_rate[bus_id - 1] = clkrate;
- i2c_rate[bus_id - 1] &= ~OMAP_I2C_CMDLINE_SETUP;
+ if (!i2c_pdata[bus_id - 1].clkrate)
+ i2c_pdata[bus_id - 1].clkrate = clkrate;
+
+ i2c_pdata[bus_id - 1].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
return omap_i2c_add_bus(bus_id);
}
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 401701977dbb..c01d9f08a198 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -428,4 +428,8 @@ void omap3_intc_resume_idle(void);
#include <mach/hardware.h>
+#ifdef CONFIG_FIQ
+#define FIQ_START 1024
+#endif
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index 7de903d7c1ce..975744f10a58 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -149,6 +149,8 @@
#define OMAP_MCBSP_REG_WAKEUPEN 0xA8
#define OMAP_MCBSP_REG_XCCR 0xAC
#define OMAP_MCBSP_REG_RCCR 0xB0
+#define OMAP_MCBSP_REG_XBUFFSTAT 0xB4
+#define OMAP_MCBSP_REG_RBUFFSTAT 0xB8
#define OMAP_MCBSP_REG_SSELCR 0xBC
#define OMAP_ST_REG_REV 0x00
@@ -471,6 +473,8 @@ void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold);
void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold);
u16 omap_mcbsp_get_max_tx_threshold(unsigned int id);
u16 omap_mcbsp_get_max_rx_threshold(unsigned int id);
+u16 omap_mcbsp_get_tx_delay(unsigned int id);
+u16 omap_mcbsp_get_rx_delay(unsigned int id);
int omap_mcbsp_get_dma_op_mode(unsigned int id);
#else
static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold)
@@ -479,6 +483,8 @@ static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold)
{ }
static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; }
static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; }
+static inline u16 omap_mcbsp_get_tx_delay(unsigned int id) { return 0; }
+static inline u16 omap_mcbsp_get_rx_delay(unsigned int id) { return 0; }
static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; }
#endif
int omap_mcbsp_request(unsigned int id);
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index f235d32cd942..ffd909fa5287 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -61,9 +61,9 @@
# define OMAP_NAME omap16xx
# endif
#endif
-#if (defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
+#ifdef CONFIG_ARCH_OMAP2PLUS
# if (defined(OMAP_NAME) || defined(MULTI_OMAP1))
-# error "OMAP1 and OMAP2 can't be selected at the same time"
+# error "OMAP1 and OMAP2PLUS can't be selected at the same time"
# endif
#endif
#ifdef CONFIG_ARCH_OMAP2420
@@ -82,12 +82,20 @@
# define OMAP_NAME omap2430
# endif
#endif
-#ifdef CONFIG_ARCH_OMAP3430
+#ifdef CONFIG_ARCH_OMAP3
# ifdef OMAP_NAME
# undef MULTI_OMAP2
# define MULTI_OMAP2
# else
-# define OMAP_NAME omap3430
+# define OMAP_NAME omap3
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME omap4
# endif
#endif
diff --git a/arch/arm/plat-omap/include/plat/omap34xx.h b/arch/arm/plat-omap/include/plat/omap34xx.h
index 2845fdc658b0..98fc8b4a4cc4 100644
--- a/arch/arm/plat-omap/include/plat/omap34xx.h
+++ b/arch/arm/plat-omap/include/plat/omap34xx.h
@@ -82,5 +82,10 @@
#define OMAP34XX_MAILBOX_BASE (L4_34XX_BASE + 0x94000)
+/* Security */
+#define OMAP34XX_SEC_BASE (L4_34XX_BASE + 0xA0000)
+#define OMAP34XX_SEC_SHA1MD5_BASE (OMAP34XX_SEC_BASE + 0x23000)
+#define OMAP34XX_SEC_AES_BASE (OMAP34XX_SEC_BASE + 0x25000)
+
#endif /* __ASM_ARCH_OMAP3_H */
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 83dce4c4f7e6..19145f5c32ba 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -15,6 +15,20 @@
#include <linux/init.h>
+/*
+ * Memory entry used for the DEBUG_LL UART configuration. See also
+ * uncompress.h and debug-macro.S.
+ *
+ * Note that using a memory location for storing the UART configuration
+ * has at least two limitations:
+ *
+ * 1. Kernel uncompress code cannot overlap OMAP_UART_INFO as the
+ * uncompress code could then partially overwrite itself
+ * 2. We assume printascii is called at least once before paging_init,
+ * and addruart has a chance to read OMAP_UART_INFO
+ */
+#define OMAP_UART_INFO (PHYS_OFFSET + 0x3ffc)
+
/* OMAP1 serial ports */
#define OMAP1_UART1_BASE 0xfffb0000
#define OMAP1_UART2_BASE 0xfffb0800
@@ -39,7 +53,7 @@
/* External port on Zoom2/3 */
#define ZOOM_UART_BASE 0x10000000
-#define ZOOM_UART_VIRT 0xfb000000
+#define ZOOM_UART_VIRT 0xfa400000
#define OMAP_PORT_SHIFT 2
#define OMAP7XX_PORT_SHIFT 0
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index 81d9ec540fcf..bbedd71943f6 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -20,27 +20,21 @@
#include <linux/types.h>
#include <linux/serial_reg.h>
+#include <asm/memory.h>
#include <asm/mach-types.h>
#include <plat/serial.h>
-static volatile u8 *uart1_base;
-static int uart1_shift;
-
static volatile u8 *uart_base;
static int uart_shift;
/*
- * Store the DEBUG_LL uart number into UART1 scratchpad register.
+ * Store the DEBUG_LL uart number into memory.
* See also debug-macro.S, and serial.c for related code.
- *
- * Please note that we currently assume that:
- * - UART1 clocks are enabled for register access
- * - UART1 scratchpad register can be used
*/
-static void set_uart1_scratchpad(unsigned char port)
+static void set_omap_uart_info(unsigned char port)
{
- uart1_base[UART_SCR << uart1_shift] = port;
+ *(volatile u32 *)OMAP_UART_INFO = port;
}
static void putc(int c)
@@ -60,42 +54,38 @@ static inline void flush(void)
/*
* Macros to configure UART1 and debug UART
*/
-#define _DEBUG_LL_ENTRY(mach, uart1_phys, uart1_shft, \
- dbg_uart, dbg_shft, dbg_id) \
+#define _DEBUG_LL_ENTRY(mach, dbg_uart, dbg_shft, dbg_id) \
if (machine_is_##mach()) { \
- uart1_base = (volatile u8 *)(uart1_phys); \
- uart1_shift = (uart1_shft); \
uart_base = (volatile u8 *)(dbg_uart); \
uart_shift = (dbg_shft); \
port = (dbg_id); \
- set_uart1_scratchpad(port); \
+ set_omap_uart_info(port); \
break; \
}
#define DEBUG_LL_OMAP7XX(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP1_UART1_BASE, OMAP7XX_PORT_SHIFT, \
- OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT, OMAP1UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT, \
+ OMAP1UART##p)
#define DEBUG_LL_OMAP1(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP1_UART1_BASE, OMAP_PORT_SHIFT, \
- OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP1UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP1UART##p)
#define DEBUG_LL_OMAP2(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP2_UART1_BASE, OMAP_PORT_SHIFT, \
- OMAP2_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP2UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP2_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP2UART##p)
#define DEBUG_LL_OMAP3(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP3_UART1_BASE, OMAP_PORT_SHIFT, \
- OMAP3_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP3UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP3_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP3UART##p)
#define DEBUG_LL_OMAP4(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP4_UART1_BASE, OMAP_PORT_SHIFT, \
- OMAP4_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP4UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP4_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP4UART##p)
/* Zoom2/3 shift is different for UART1 and external port */
#define DEBUG_LL_ZOOM(mach) \
- _DEBUG_LL_ENTRY(mach, OMAP2_UART1_BASE, OMAP_PORT_SHIFT, \
- ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
+ _DEBUG_LL_ENTRY(mach, ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
static inline void __arch_decomp_setup(unsigned long arch_id)
{
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index e1d0440fd4a8..5aee40e19535 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -560,6 +560,61 @@ u16 omap_mcbsp_get_max_rx_threshold(unsigned int id)
}
EXPORT_SYMBOL(omap_mcbsp_get_max_rx_threshold);
+#define MCBSP2_FIFO_SIZE 0x500 /* 1024 + 256 locations */
+#define MCBSP1345_FIFO_SIZE 0x80 /* 128 locations */
+/*
+ * omap_mcbsp_get_tx_delay returns the number of used slots in the McBSP FIFO
+ */
+u16 omap_mcbsp_get_tx_delay(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp;
+ u16 buffstat;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ /* Returns the number of free locations in the buffer */
+ buffstat = MCBSP_READ(mcbsp, XBUFFSTAT);
+
+ /* Number of slots are different in McBSP ports */
+ if (mcbsp->id == 2)
+ return MCBSP2_FIFO_SIZE - buffstat;
+ else
+ return MCBSP1345_FIFO_SIZE - buffstat;
+}
+EXPORT_SYMBOL(omap_mcbsp_get_tx_delay);
+
+/*
+ * omap_mcbsp_get_rx_delay returns the number of free slots in the McBSP FIFO
+ * to reach the threshold value (when the DMA will be triggered to read it)
+ */
+u16 omap_mcbsp_get_rx_delay(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp;
+ u16 buffstat, threshold;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ /* Returns the number of used locations in the buffer */
+ buffstat = MCBSP_READ(mcbsp, RBUFFSTAT);
+ /* RX threshold */
+ threshold = MCBSP_READ(mcbsp, THRSH1);
+
+ /* Return the number of location till we reach the threshold limit */
+ if (threshold <= buffstat)
+ return 0;
+ else
+ return threshold - buffstat;
+}
+EXPORT_SYMBOL(omap_mcbsp_get_rx_delay);
+
/*
* omap_mcbsp_get_dma_op_mode just return the current configured
* operating mode for the mcbsp channel
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 51f4dfb82e2b..226b2e858d6c 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -437,6 +437,20 @@ static inline int omap34xx_sram_init(void)
}
#endif
+#ifdef CONFIG_ARCH_OMAP4
+int __init omap44xx_sram_init(void)
+{
+ printk(KERN_ERR "FIXME: %s not implemented\n", __func__);
+
+ return -ENODEV;
+}
+#else
+static inline int omap44xx_sram_init(void)
+{
+ return 0;
+}
+#endif
+
int __init omap_sram_init(void)
{
omap_detect_sram();
@@ -451,7 +465,7 @@ int __init omap_sram_init(void)
else if (cpu_is_omap34xx())
omap34xx_sram_init();
else if (cpu_is_omap44xx())
- omap34xx_sram_init(); /* FIXME: */
+ omap44xx_sram_init();
return 0;
}
diff --git a/arch/arm/plat-pxa/Makefile b/arch/arm/plat-pxa/Makefile
index 0264bfb0ca4f..f68da35f4fb3 100644
--- a/arch/arm/plat-pxa/Makefile
+++ b/arch/arm/plat-pxa/Makefile
@@ -2,7 +2,7 @@
# Makefile for code common across different PXA processor families
#
-obj-y := dma.o
+obj-y := dma.o pmu.o
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
obj-$(CONFIG_PXA3xx) += mfp.o
diff --git a/arch/arm/plat-pxa/pmu.c b/arch/arm/plat-pxa/pmu.c
new file mode 100644
index 000000000000..267ceb6feb2f
--- /dev/null
+++ b/arch/arm/plat-pxa/pmu.c
@@ -0,0 +1,33 @@
+/*
+ * PMU IRQ registration for the PXA xscale PMU families.
+ * Copyright (C) 2010 Will Deacon, ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <asm/pmu.h>
+#include <mach/irqs.h>
+
+static struct resource pmu_resource = {
+ .start = IRQ_PMU,
+ .end = IRQ_PMU,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .resource = &pmu_resource,
+ .num_resources = 1,
+};
+
+static int __init pxa_pmu_init(void)
+{
+ platform_device_register(&pmu_device);
+ return 0;
+}
+arch_initcall(pxa_pmu_init);
diff --git a/arch/arm/plat-s3c24xx/Kconfig b/arch/arm/plat-s3c24xx/Kconfig
index 6e93ef8f3d43..3ce8f010b3c6 100644
--- a/arch/arm/plat-s3c24xx/Kconfig
+++ b/arch/arm/plat-s3c24xx/Kconfig
@@ -9,6 +9,7 @@ config PLAT_S3C24XX
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
select S3C_DEVICE_NAND
+ select S3C_GPIO_CFG_S3C24XX
help
Base platform code for any Samsung S3C24XX device
@@ -44,6 +45,12 @@ config S3C2410_CLOCK
Clock code for the S3C2410, and similar processors which
is currently includes the S3C2410, S3C2440, S3C2442.
+config S3C2443_CLOCK
+ bool
+ help
+ Clock code for the S3C2443 and similar processors, which includes
+ the S3C2416 and S3C2450.
+
config S3C24XX_DCLK
bool
help
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile
index c2237c41141f..44aea8868f89 100644
--- a/arch/arm/plat-s3c24xx/Makefile
+++ b/arch/arm/plat-s3c24xx/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM) += irq-pm.o
obj-$(CONFIG_PM) += sleep.o
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
+obj-$(CONFIG_S3C2443_CLOCK) += s3c2443-clock.o
obj-$(CONFIG_S3C2410_DMA) += dma.o
obj-$(CONFIG_S3C2410_IOTIMING) += s3c2410-iotiming.o
obj-$(CONFIG_S3C2412_IOTIMING) += s3c2412-iotiming.o
diff --git a/arch/arm/plat-s3c24xx/common-smdk.c b/arch/arm/plat-s3c24xx/common-smdk.c
index 9e0e20ad2e46..7b44d0c592b5 100644
--- a/arch/arm/plat-s3c24xx/common-smdk.c
+++ b/arch/arm/plat-s3c24xx/common-smdk.c
@@ -42,6 +42,7 @@
#include <plat/nand.h>
#include <plat/common-smdk.h>
+#include <plat/gpio-cfg.h>
#include <plat/devs.h>
#include <plat/pm.h>
@@ -185,10 +186,10 @@ void __init smdk_machine_init(void)
{
/* Configure the LEDs (even if we have no LED support)*/
- s3c2410_gpio_cfgpin(S3C2410_GPF(4), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPF(5), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPF(6), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPF(7), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPF(4), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPF(5), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPF(6), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPF(7), S3C2410_GPIO_OUTPUT);
s3c2410_gpio_setpin(S3C2410_GPF(4), 1);
s3c2410_gpio_setpin(S3C2410_GPF(5), 1);
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 9ca64df35bf6..76d0858c3cbb 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -49,6 +49,7 @@
#include <plat/s3c2400.h>
#include <plat/s3c2410.h>
#include <plat/s3c2412.h>
+#include <plat/s3c2416.h>
#include <plat/s3c244x.h>
#include <plat/s3c2443.h>
@@ -57,6 +58,7 @@
static const char name_s3c2400[] = "S3C2400";
static const char name_s3c2410[] = "S3C2410";
static const char name_s3c2412[] = "S3C2412";
+static const char name_s3c2416[] = "S3C2416/S3C2450";
static const char name_s3c2440[] = "S3C2440";
static const char name_s3c2442[] = "S3C2442";
static const char name_s3c2442b[] = "S3C2442B";
@@ -137,6 +139,15 @@ static struct cpu_table cpu_ids[] __initdata = {
.init = s3c2412_init,
.name = name_s3c2412,
},
+ { /* a strange version of the s3c2416 */
+ .idcode = 0x32450003,
+ .idmask = 0xffffffff,
+ .map_io = s3c2416_map_io,
+ .init_clocks = s3c2416_init_clocks,
+ .init_uarts = s3c2416_init_uarts,
+ .init = s3c2416_init,
+ .name = name_s3c2416,
+ },
{
.idcode = 0x32443001,
.idmask = 0xffffffff,
@@ -170,6 +181,16 @@ static struct map_desc s3c_iodesc[] __initdata = {
static unsigned long s3c24xx_read_idcode_v5(void)
{
+#if defined(CONFIG_CPU_S3C2416)
+ /* s3c2416 is v5, with S3C24XX_GSTATUS1 instead of S3C2412_GSTATUS1 */
+
+ u32 gs = __raw_readl(S3C24XX_GSTATUS1);
+
+ /* test for s3c2416 or similar device */
+ if ((gs >> 16) == 0x3245)
+ return gs;
+#endif
+
#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
return __raw_readl(S3C2412_GSTATUS1);
#else
diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c
index 9265f09bfa58..c364a02bb1f7 100644
--- a/arch/arm/plat-s3c24xx/devs.c
+++ b/arch/arm/plat-s3c24xx/devs.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -38,8 +39,7 @@
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/regs-spi.h>
-
-#include <mach/ts.h>
+#include <plat/ts.h>
/* Serial port registrations */
@@ -149,10 +149,14 @@ void __init s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *pd)
{
struct s3c2410fb_mach_info *npd;
- npd = kmalloc(sizeof(*npd), GFP_KERNEL);
+ npd = kmemdup(pd, sizeof(*npd), GFP_KERNEL);
if (npd) {
- memcpy(npd, pd, sizeof(*npd));
s3c_device_lcd.dev.platform_data = npd;
+ npd->displays = kmemdup(pd->displays,
+ sizeof(struct s3c2410fb_display) * npd->num_displays,
+ GFP_KERNEL);
+ if (!npd->displays)
+ printk(KERN_ERR "no memory for LCD display data\n");
} else {
printk(KERN_ERR "no memory for LCD platform data\n");
}
@@ -371,7 +375,7 @@ struct platform_device s3c_device_sdi = {
EXPORT_SYMBOL(s3c_device_sdi);
-void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata)
+void __init s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata)
{
struct s3c24xx_mci_pdata *npd;
diff --git a/arch/arm/plat-s3c24xx/gpio.c b/arch/arm/plat-s3c24xx/gpio.c
index 5467470badfd..45126d3aafc6 100644
--- a/arch/arm/plat-s3c24xx/gpio.c
+++ b/arch/arm/plat-s3c24xx/gpio.c
@@ -1,6 +1,6 @@
/* linux/arch/arm/plat-s3c24xx/gpio.c
*
- * Copyright (c) 2004-2005 Simtec Electronics
+ * Copyright (c) 2004-2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX GPIO support
@@ -20,12 +20,12 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/gpio.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -34,123 +34,34 @@
#include <mach/regs-gpio.h>
-void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function)
-{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long mask;
- unsigned long con;
- unsigned long flags;
-
- if (pin < S3C2410_GPIO_BANKB) {
- mask = 1 << S3C2410_GPIO_OFFSET(pin);
- } else {
- mask = 3 << S3C2410_GPIO_OFFSET(pin)*2;
- }
-
- switch (function) {
- case S3C2410_GPIO_LEAVE:
- mask = 0;
- function = 0;
- break;
-
- case S3C2410_GPIO_INPUT:
- case S3C2410_GPIO_OUTPUT:
- case S3C2410_GPIO_SFN2:
- case S3C2410_GPIO_SFN3:
- if (pin < S3C2410_GPIO_BANKB) {
- function -= 1;
- function &= 1;
- function <<= S3C2410_GPIO_OFFSET(pin);
- } else {
- function &= 3;
- function <<= S3C2410_GPIO_OFFSET(pin)*2;
- }
- }
-
- /* modify the specified register wwith IRQs off */
-
- local_irq_save(flags);
-
- con = __raw_readl(base + 0x00);
- con &= ~mask;
- con |= function;
-
- __raw_writel(con, base + 0x00);
-
- local_irq_restore(flags);
-}
-
-EXPORT_SYMBOL(s3c2410_gpio_cfgpin);
-
-unsigned int s3c2410_gpio_getcfg(unsigned int pin)
-{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long val = __raw_readl(base);
-
- if (pin < S3C2410_GPIO_BANKB) {
- val >>= S3C2410_GPIO_OFFSET(pin);
- val &= 1;
- val += 1;
- } else {
- val >>= S3C2410_GPIO_OFFSET(pin)*2;
- val &= 3;
- }
-
- return val | S3C2410_GPIO_INPUT;
-}
-
-EXPORT_SYMBOL(s3c2410_gpio_getcfg);
+/* gpiolib wrappers until these are totally eliminated */
void s3c2410_gpio_pullup(unsigned int pin, unsigned int to)
{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
- unsigned long flags;
- unsigned long up;
+ int ret;
- if (pin < S3C2410_GPIO_BANKB)
- return;
+ WARN_ON(to); /* should be none of these left */
- local_irq_save(flags);
-
- up = __raw_readl(base + 0x08);
- up &= ~(1L << offs);
- up |= to << offs;
- __raw_writel(up, base + 0x08);
+ if (!to) {
+ /* if pull is enabled, try first with up, and if that
+ * fails, try using down */
- local_irq_restore(flags);
+ ret = s3c_gpio_setpull(pin, S3C_GPIO_PULL_UP);
+ if (ret)
+ s3c_gpio_setpull(pin, S3C_GPIO_PULL_DOWN);
+ } else {
+ s3c_gpio_setpull(pin, S3C_GPIO_PULL_NONE);
+ }
}
-
EXPORT_SYMBOL(s3c2410_gpio_pullup);
-int s3c2410_gpio_getpull(unsigned int pin)
-{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
-
- if (pin < S3C2410_GPIO_BANKB)
- return -EINVAL;
-
- return (__raw_readl(base + 0x08) & (1L << offs)) ? 1 : 0;
-}
-
-EXPORT_SYMBOL(s3c2410_gpio_getpull);
-
void s3c2410_gpio_setpin(unsigned int pin, unsigned int to)
{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
- unsigned long flags;
- unsigned long dat;
-
- local_irq_save(flags);
+ /* do this via gpiolib until all users removed */
- dat = __raw_readl(base + 0x04);
- dat &= ~(1 << offs);
- dat |= to << offs;
- __raw_writel(dat, base + 0x04);
-
- local_irq_restore(flags);
+ gpio_request(pin, "temporary");
+ gpio_set_value(pin, to);
+ gpio_free(pin);
}
EXPORT_SYMBOL(s3c2410_gpio_setpin);
@@ -181,22 +92,3 @@ unsigned int s3c2410_modify_misccr(unsigned int clear, unsigned int change)
}
EXPORT_SYMBOL(s3c2410_modify_misccr);
-
-int s3c2410_gpio_getirq(unsigned int pin)
-{
- if (pin < S3C2410_GPF(0) || pin > S3C2410_GPG(15))
- return -EINVAL; /* not valid interrupts */
-
- if (pin < S3C2410_GPG(0) && pin > S3C2410_GPF(7))
- return -EINVAL; /* not valid pin */
-
- if (pin < S3C2410_GPF(4))
- return (pin - S3C2410_GPF(0)) + IRQ_EINT0;
-
- if (pin < S3C2410_GPG(0))
- return (pin - S3C2410_GPF(4)) + IRQ_EINT4;
-
- return (pin - S3C2410_GPG(0)) + IRQ_EINT8;
-}
-
-EXPORT_SYMBOL(s3c2410_gpio_getirq);
diff --git a/arch/arm/plat-s3c24xx/gpiolib.c b/arch/arm/plat-s3c24xx/gpiolib.c
index 4f0f11a6a677..4c0896f2572d 100644
--- a/arch/arm/plat-s3c24xx/gpiolib.c
+++ b/arch/arm/plat-s3c24xx/gpiolib.c
@@ -1,6 +1,6 @@
/* linux/arch/arm/plat-s3c24xx/gpiolib.c
*
- * Copyright (c) 2008 Simtec Electronics
+ * Copyright (c) 2008-2010 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
@@ -21,6 +21,8 @@
#include <linux/gpio.h>
#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <plat/pm.h>
@@ -77,10 +79,21 @@ static int s3c24xx_gpiolib_bankg_toirq(struct gpio_chip *chip, unsigned offset)
return IRQ_EINT8 + offset;
}
+static struct s3c_gpio_cfg s3c24xx_gpiocfg_banka = {
+ .set_config = s3c_gpio_setcfg_s3c24xx_a,
+ .get_config = s3c_gpio_getcfg_s3c24xx_a,
+};
+
+struct s3c_gpio_cfg s3c24xx_gpiocfg_default = {
+ .set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
+};
+
struct s3c_gpio_chip s3c24xx_gpios[] = {
[0] = {
.base = S3C2410_GPACON,
.pm = __gpio_pm(&s3c_gpio_pm_1bit),
+ .config = &s3c24xx_gpiocfg_banka,
.chip = {
.base = S3C2410_GPA(0),
.owner = THIS_MODULE,
@@ -161,15 +174,58 @@ struct s3c_gpio_chip s3c24xx_gpios[] = {
.ngpio = 11,
},
},
+ /* GPIOS for the S3C2443 and later devices. */
+ {
+ .base = S3C2440_GPJCON,
+ .pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .chip = {
+ .base = S3C2410_GPJ(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOJ",
+ .ngpio = 16,
+ },
+ }, {
+ .base = S3C2443_GPKCON,
+ .pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .chip = {
+ .base = S3C2410_GPK(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOK",
+ .ngpio = 16,
+ },
+ }, {
+ .base = S3C2443_GPLCON,
+ .pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .chip = {
+ .base = S3C2410_GPL(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOL",
+ .ngpio = 15,
+ },
+ }, {
+ .base = S3C2443_GPMCON,
+ .pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .chip = {
+ .base = S3C2410_GPM(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOM",
+ .ngpio = 2,
+ },
+ },
};
+
static __init int s3c24xx_gpiolib_init(void)
{
struct s3c_gpio_chip *chip = s3c24xx_gpios;
int gpn;
- for (gpn = 0; gpn < ARRAY_SIZE(s3c24xx_gpios); gpn++, chip++)
+ for (gpn = 0; gpn < ARRAY_SIZE(s3c24xx_gpios); gpn++, chip++) {
+ if (!chip->config)
+ chip->config = &s3c24xx_gpiocfg_default;
+
s3c_gpiolib_add(chip);
+ }
return 0;
}
diff --git a/arch/arm/plat-s3c24xx/include/plat/pll.h b/arch/arm/plat-s3c24xx/include/plat/pll.h
index 7ea8bffa7a9c..005729a1077a 100644
--- a/arch/arm/plat-s3c24xx/include/plat/pll.h
+++ b/arch/arm/plat-s3c24xx/include/plat/pll.h
@@ -35,3 +35,28 @@ s3c24xx_get_pll(unsigned int pllval, unsigned int baseclk)
return (unsigned int)fvco;
}
+
+#define S3C2416_PLL_M_SHIFT (14)
+#define S3C2416_PLL_P_SHIFT (5)
+#define S3C2416_PLL_S_MASK (7)
+#define S3C2416_PLL_M_MASK ((1 << 10) - 1)
+#define S3C2416_PLL_P_MASK (63)
+
+static inline unsigned int
+s3c2416_get_pll(unsigned int pllval, unsigned int baseclk)
+{
+ unsigned int m, p, s;
+ uint64_t fvco;
+
+ m = pllval >> S3C2416_PLL_M_SHIFT;
+ p = pllval >> S3C2416_PLL_P_SHIFT;
+
+ s = pllval & S3C2416_PLL_S_MASK;
+ m &= S3C2416_PLL_M_MASK;
+ p &= S3C2416_PLL_P_MASK;
+
+ fvco = (uint64_t)baseclk * m;
+ do_div(fvco, (p << s));
+
+ return (unsigned int)fvco;
+}
diff --git a/arch/arm/plat-s3c24xx/include/plat/s3c2416.h b/arch/arm/plat-s3c24xx/include/plat/s3c2416.h
new file mode 100644
index 000000000000..dc3c0907d221
--- /dev/null
+++ b/arch/arm/plat-s3c24xx/include/plat/s3c2416.h
@@ -0,0 +1,31 @@
+/* linux/include/asm-arm/plat-s3c24xx/s3c2443.h
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>
+ *
+ * Header file for s3c2416 cpu support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifdef CONFIG_CPU_S3C2416
+
+struct s3c2410_uartcfg;
+
+extern int s3c2416_init(void);
+
+extern void s3c2416_map_io(void);
+
+extern void s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+extern void s3c2416_init_clocks(int xtal);
+
+extern int s3c2416_baseclk_add(void);
+
+#else
+#define s3c2416_init_clocks NULL
+#define s3c2416_init_uarts NULL
+#define s3c2416_map_io NULL
+#define s3c2416_init NULL
+#endif
diff --git a/arch/arm/plat-s3c24xx/include/plat/s3c2443.h b/arch/arm/plat-s3c24xx/include/plat/s3c2443.h
index 815b107ed890..a19715feb798 100644
--- a/arch/arm/plat-s3c24xx/include/plat/s3c2443.h
+++ b/arch/arm/plat-s3c24xx/include/plat/s3c2443.h
@@ -30,3 +30,22 @@ extern int s3c2443_baseclk_add(void);
#define s3c2443_map_io NULL
#define s3c2443_init NULL
#endif
+
+/* common code used by s3c2443 and others.
+ * note, not to be used outside of arch/arm/mach-s3c* */
+
+struct clk; /* some files don't need clk.h otherwise */
+
+typedef unsigned int (*pll_fn)(unsigned int reg, unsigned int base);
+typedef unsigned int (*fdiv_fn)(unsigned long clkcon0);
+
+extern void s3c2443_common_setup_clocks(pll_fn get_mpll, fdiv_fn fdiv);
+extern void s3c2443_common_init_clocks(int xtal, pll_fn get_mpll, fdiv_fn fdiv);
+
+extern int s3c2443_clkcon_enable_h(struct clk *clk, int enable);
+extern int s3c2443_clkcon_enable_p(struct clk *clk, int enable);
+extern int s3c2443_clkcon_enable_s(struct clk *clk, int enable);
+
+extern struct clksrc_clk clk_epllref;
+extern struct clksrc_clk clk_esysclk;
+extern struct clksrc_clk clk_msysclk;
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/plat-s3c24xx/pm.c
index 3620dd299095..60627e63a254 100644
--- a/arch/arm/plat-s3c24xx/pm.c
+++ b/arch/arm/plat-s3c24xx/pm.c
@@ -43,6 +43,7 @@
#include <asm/mach/time.h>
+#include <plat/gpio-cfg.h>
#include <plat/pm.h>
#define PFX "s3c24xx-pm: "
@@ -90,22 +91,22 @@ static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
{
unsigned long irqstate;
unsigned long pinstate;
- int irq = s3c2410_gpio_getirq(pin);
+ int irq = gpio_to_irq(pin);
if (irqoffs < 4)
irqstate = s3c_irqwake_intmask & (1L<<irqoffs);
else
irqstate = s3c_irqwake_eintmask & (1L<<irqoffs);
- pinstate = s3c2410_gpio_getcfg(pin);
+ pinstate = s3c_gpio_getcfg(pin);
if (!irqstate) {
if (pinstate == S3C2410_GPIO_IRQ)
- S3C_PMDBG("Leaving IRQ %d (pin %d) enabled\n", irq, pin);
+ S3C_PMDBG("Leaving IRQ %d (pin %d) as is\n", irq, pin);
} else {
if (pinstate == S3C2410_GPIO_IRQ) {
S3C_PMDBG("Disabling IRQ %d (pin %d)\n", irq, pin);
- s3c2410_gpio_cfgpin(pin, S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(pin, S3C2410_GPIO_INPUT);
}
}
}
diff --git a/arch/arm/plat-s3c24xx/s3c2410-clock.c b/arch/arm/plat-s3c24xx/s3c2410-clock.c
index b61bdb793734..9ecc5d913679 100644
--- a/arch/arm/plat-s3c24xx/s3c2410-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2410-clock.c
@@ -87,7 +87,7 @@ static int s3c2410_upll_enable(struct clk *clk, int enable)
/* standard clock definitions */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
{
.name = "nand",
.id = -1,
@@ -249,17 +249,8 @@ int __init s3c2410_baseclk_add(void)
/* install (and disable) the clocks we do not need immediately */
- clkp = init_clocks_disable;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
-
- s3c2410_clkcon_enable(clkp, 0);
- }
+ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
/* show the clock-slow value */
diff --git a/arch/arm/plat-s3c24xx/s3c2443-clock.c b/arch/arm/plat-s3c24xx/s3c2443-clock.c
new file mode 100644
index 000000000000..461f070eb62d
--- /dev/null
+++ b/arch/arm/plat-s3c24xx/s3c2443-clock.c
@@ -0,0 +1,472 @@
+/* linux/arch/arm/plat-s3c24xx/s3c2443-clock.c
+ *
+ * Copyright (c) 2007, 2010 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2443 Clock control suport - common code
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <mach/regs-s3c2443-clock.h>
+
+#include <plat/s3c2443.h>
+#include <plat/clock.h>
+#include <plat/clock-clksrc.h>
+#include <plat/cpu.h>
+
+#include <plat/cpu-freq.h>
+
+
+static int s3c2443_gate(void __iomem *reg, struct clk *clk, int enable)
+{
+ u32 ctrlbit = clk->ctrlbit;
+ u32 con = __raw_readl(reg);
+
+ if (enable)
+ con |= ctrlbit;
+ else
+ con &= ~ctrlbit;
+
+ __raw_writel(con, reg);
+ return 0;
+}
+
+int s3c2443_clkcon_enable_h(struct clk *clk, int enable)
+{
+ return s3c2443_gate(S3C2443_HCLKCON, clk, enable);
+}
+
+int s3c2443_clkcon_enable_p(struct clk *clk, int enable)
+{
+ return s3c2443_gate(S3C2443_PCLKCON, clk, enable);
+}
+
+int s3c2443_clkcon_enable_s(struct clk *clk, int enable)
+{
+ return s3c2443_gate(S3C2443_SCLKCON, clk, enable);
+}
+
+/* mpllref is a direct descendant of clk_xtal by default, but it is not
+ * elided as the EPLL can be either sourced by the XTAL or EXTCLK and as
+ * such directly equating the two source clocks is impossible.
+ */
+struct clk clk_mpllref = {
+ .name = "mpllref",
+ .parent = &clk_xtal,
+ .id = -1,
+};
+
+static struct clk *clk_epllref_sources[] = {
+ [0] = &clk_mpllref,
+ [1] = &clk_mpllref,
+ [2] = &clk_xtal,
+ [3] = &clk_ext,
+};
+
+struct clksrc_clk clk_epllref = {
+ .clk = {
+ .name = "epllref",
+ .id = -1,
+ },
+ .sources = &(struct clksrc_sources) {
+ .sources = clk_epllref_sources,
+ .nr_sources = ARRAY_SIZE(clk_epllref_sources),
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 7 },
+};
+
+/* esysclk
+ *
+ * this is sourced from either the EPLL or the EPLLref clock
+*/
+
+static struct clk *clk_sysclk_sources[] = {
+ [0] = &clk_epllref.clk,
+ [1] = &clk_epll,
+};
+
+struct clksrc_clk clk_esysclk = {
+ .clk = {
+ .name = "esysclk",
+ .parent = &clk_epll,
+ .id = -1,
+ },
+ .sources = &(struct clksrc_sources) {
+ .sources = clk_sysclk_sources,
+ .nr_sources = ARRAY_SIZE(clk_sysclk_sources),
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 6 },
+};
+
+static unsigned long s3c2443_getrate_mdivclk(struct clk *clk)
+{
+ unsigned long parent_rate = clk_get_rate(clk->parent);
+ unsigned long div = __raw_readl(S3C2443_CLKDIV0);
+
+ div &= S3C2443_CLKDIV0_EXTDIV_MASK;
+ div >>= (S3C2443_CLKDIV0_EXTDIV_SHIFT-1); /* x2 */
+
+ return parent_rate / (div + 1);
+}
+
+static struct clk clk_mdivclk = {
+ .name = "mdivclk",
+ .parent = &clk_mpllref,
+ .id = -1,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_mdivclk,
+ },
+};
+
+static struct clk *clk_msysclk_sources[] = {
+ [0] = &clk_mpllref,
+ [1] = &clk_mpll,
+ [2] = &clk_mdivclk,
+ [3] = &clk_mpllref,
+};
+
+struct clksrc_clk clk_msysclk = {
+ .clk = {
+ .name = "msysclk",
+ .parent = &clk_xtal,
+ .id = -1,
+ },
+ .sources = &(struct clksrc_sources) {
+ .sources = clk_msysclk_sources,
+ .nr_sources = ARRAY_SIZE(clk_msysclk_sources),
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 3 },
+};
+
+/* prediv
+ *
+ * this divides the msysclk down to pass to h/p/etc.
+ */
+
+static unsigned long s3c2443_prediv_getrate(struct clk *clk)
+{
+ unsigned long rate = clk_get_rate(clk->parent);
+ unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
+
+ clkdiv0 &= S3C2443_CLKDIV0_PREDIV_MASK;
+ clkdiv0 >>= S3C2443_CLKDIV0_PREDIV_SHIFT;
+
+ return rate / (clkdiv0 + 1);
+}
+
+static struct clk clk_prediv = {
+ .name = "prediv",
+ .id = -1,
+ .parent = &clk_msysclk.clk,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_prediv_getrate,
+ },
+};
+
+/* usbhost
+ *
+ * usb host bus-clock, usually 48MHz to provide USB bus clock timing
+*/
+
+static struct clksrc_clk clk_usb_bus_host = {
+ .clk = {
+ .name = "usb-bus-host-parent",
+ .id = -1,
+ .parent = &clk_esysclk.clk,
+ .ctrlbit = S3C2443_SCLKCON_USBHOST,
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 },
+};
+
+/* common clksrc clocks */
+
+static struct clksrc_clk clksrc_clks[] = {
+ {
+ /* ART baud-rate clock sourced from esysclk via a divisor */
+ .clk = {
+ .name = "uartclk",
+ .id = -1,
+ .parent = &clk_esysclk.clk,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 8 },
+ }, {
+ /* camera interface bus-clock, divided down from esysclk */
+ .clk = {
+ .name = "camif-upll", /* same as 2440 name */
+ .id = -1,
+ .parent = &clk_esysclk.clk,
+ .ctrlbit = S3C2443_SCLKCON_CAMCLK,
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 26 },
+ }, {
+ .clk = {
+ .name = "display-if",
+ .id = -1,
+ .parent = &clk_esysclk.clk,
+ .ctrlbit = S3C2443_SCLKCON_DISPCLK,
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 8, .shift = 16 },
+ },
+};
+
+
+static struct clk init_clocks_off[] = {
+ {
+ .name = "adc",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_ADC,
+ }, {
+ .name = "i2c",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_IIC,
+ }
+};
+
+static struct clk init_clocks[] = {
+ {
+ .name = "dma",
+ .id = 0,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA0,
+ }, {
+ .name = "dma",
+ .id = 1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA1,
+ }, {
+ .name = "dma",
+ .id = 2,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA2,
+ }, {
+ .name = "dma",
+ .id = 3,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA3,
+ }, {
+ .name = "dma",
+ .id = 4,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA4,
+ }, {
+ .name = "dma",
+ .id = 5,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA5,
+ }, {
+ .name = "hsmmc",
+ .id = 0,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_HSMMC,
+ }, {
+ .name = "gpio",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_GPIO,
+ }, {
+ .name = "usb-host",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_USBH,
+ }, {
+ .name = "usb-device",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_USBD,
+ }, {
+ .name = "lcd",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_LCDC,
+
+ }, {
+ .name = "timers",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_PWMT,
+ }, {
+ .name = "cfc",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_CFC,
+ }, {
+ .name = "ssmc",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_SSMC,
+ }, {
+ .name = "uart",
+ .id = 0,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_UART0,
+ }, {
+ .name = "uart",
+ .id = 1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_UART1,
+ }, {
+ .name = "uart",
+ .id = 2,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_UART2,
+ }, {
+ .name = "uart",
+ .id = 3,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_UART3,
+ }, {
+ .name = "rtc",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_RTC,
+ }, {
+ .name = "watchdog",
+ .id = -1,
+ .parent = &clk_p,
+ .ctrlbit = S3C2443_PCLKCON_WDT,
+ }, {
+ .name = "ac97",
+ .id = -1,
+ .parent = &clk_p,
+ .ctrlbit = S3C2443_PCLKCON_AC97,
+ }, {
+ .name = "nand",
+ .id = -1,
+ .parent = &clk_h,
+ }, {
+ .name = "usb-bus-host",
+ .id = -1,
+ .parent = &clk_usb_bus_host.clk,
+ }
+};
+
+static inline unsigned long s3c2443_get_hdiv(unsigned long clkcon0)
+{
+ clkcon0 &= S3C2443_CLKDIV0_HCLKDIV_MASK;
+
+ return clkcon0 + 1;
+}
+
+/* EPLLCON compatible enough to get on/off information */
+
+void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll,
+ fdiv_fn get_fdiv)
+{
+ unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
+ unsigned long mpllcon = __raw_readl(S3C2443_MPLLCON);
+ unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
+ struct clk *xtal_clk;
+ unsigned long xtal;
+ unsigned long pll;
+ unsigned long fclk;
+ unsigned long hclk;
+ unsigned long pclk;
+ int ptr;
+
+ xtal_clk = clk_get(NULL, "xtal");
+ xtal = clk_get_rate(xtal_clk);
+ clk_put(xtal_clk);
+
+ pll = get_mpll(mpllcon, xtal);
+ clk_msysclk.clk.rate = pll;
+
+ fclk = pll / get_fdiv(clkdiv0);
+ hclk = s3c2443_prediv_getrate(&clk_prediv);
+ hclk /= s3c2443_get_hdiv(clkdiv0);
+ pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1);
+
+ s3c24xx_setup_clocks(fclk, hclk, pclk);
+
+ printk("CPU: MPLL %s %ld.%03ld MHz, cpu %ld.%03ld MHz, mem %ld.%03ld MHz, pclk %ld.%03ld MHz\n",
+ (mpllcon & S3C2443_PLLCON_OFF) ? "off":"on",
+ print_mhz(pll), print_mhz(fclk),
+ print_mhz(hclk), print_mhz(pclk));
+
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrc_clks); ptr++)
+ s3c_set_clksrc(&clksrc_clks[ptr], true);
+
+ /* ensure usb bus clock is within correct rate of 48MHz */
+
+ if (clk_get_rate(&clk_usb_bus_host.clk) != (48 * 1000 * 1000)) {
+ printk(KERN_INFO "Warning: USB host bus not at 48MHz\n");
+ clk_set_rate(&clk_usb_bus_host.clk, 48*1000*1000);
+ }
+
+ printk("CPU: EPLL %s %ld.%03ld MHz, usb-bus %ld.%03ld MHz\n",
+ (epllcon & S3C2443_PLLCON_OFF) ? "off":"on",
+ print_mhz(clk_get_rate(&clk_epll)),
+ print_mhz(clk_get_rate(&clk_usb_bus)));
+}
+
+static struct clk *clks[] __initdata = {
+ &clk_prediv,
+ &clk_mpllref,
+ &clk_mdivclk,
+ &clk_ext,
+ &clk_epll,
+ &clk_usb_bus,
+};
+
+static struct clksrc_clk *clksrcs[] __initdata = {
+ &clk_usb_bus_host,
+ &clk_epllref,
+ &clk_esysclk,
+ &clk_msysclk,
+};
+
+void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
+ fdiv_fn get_fdiv)
+{
+ int ptr;
+
+ /* s3c2443 parents h and p clocks from prediv */
+ clk_h.parent = &clk_prediv;
+ clk_p.parent = &clk_prediv;
+
+ clk_usb_bus.parent = &clk_usb_bus_host.clk;
+ clk_epll.parent = &clk_epllref.clk;
+
+ s3c24xx_register_baseclocks(xtal);
+ s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
+
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
+ s3c_register_clksrc(clksrcs[ptr], 1);
+
+ s3c_register_clksrc(clksrc_clks, ARRAY_SIZE(clksrc_clks));
+ s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
+
+ /* See s3c2443/etc notes on disabling clocks at init time */
+ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+
+ s3c2443_common_setup_clocks(get_mpll, get_fdiv);
+}
diff --git a/arch/arm/plat-s3c24xx/setup-i2c.c b/arch/arm/plat-s3c24xx/setup-i2c.c
index 71a6accf114e..9e90a7cbd1d6 100644
--- a/arch/arm/plat-s3c24xx/setup-i2c.c
+++ b/arch/arm/plat-s3c24xx/setup-i2c.c
@@ -15,12 +15,13 @@
struct platform_device;
+#include <plat/gpio-cfg.h>
#include <plat/iic.h>
#include <mach/hardware.h>
#include <mach/regs-gpio.h>
void s3c_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c2410_gpio_cfgpin(S3C2410_GPE(15), S3C2410_GPE15_IICSDA);
- s3c2410_gpio_cfgpin(S3C2410_GPE(14), S3C2410_GPE14_IICSCL);
+ s3c_gpio_cfgpin(S3C2410_GPE(15), S3C2410_GPE15_IICSDA);
+ s3c_gpio_cfgpin(S3C2410_GPE(14), S3C2410_GPE14_IICSCL);
}
diff --git a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
index da7a61728c18..9793544a6ace 100644
--- a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
+++ b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
@@ -21,16 +21,16 @@ void s3c24xx_spi_gpiocfg_bus0_gpe11_12_13(struct s3c2410_spi_info *spi,
int enable)
{
if (enable) {
- s3c2410_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPE13_SPICLK0);
- s3c2410_gpio_cfgpin(S3C2410_GPE(12), S3C2410_GPE12_SPIMOSI0);
- s3c2410_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPE11_SPIMISO0);
+ s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPE13_SPICLK0);
+ s3c_gpio_cfgpin(S3C2410_GPE(12), S3C2410_GPE12_SPIMOSI0);
+ s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPE11_SPIMISO0);
s3c2410_gpio_pullup(S3C2410_GPE(11), 0);
s3c2410_gpio_pullup(S3C2410_GPE(13), 0);
} else {
- s3c2410_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT);
- s3c2410_gpio_pullup(S3C2410_GPE(11), 1);
- s3c2410_gpio_pullup(S3C2410_GPE(12), 1);
- s3c2410_gpio_pullup(S3C2410_GPE(13), 1);
+ s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
index 89fcf5308cf6..db9e9e477ec1 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
@@ -23,16 +23,16 @@ void s3c24xx_spi_gpiocfg_bus1_gpd8_9_10(struct s3c2410_spi_info *spi,
printk(KERN_INFO "%s(%d)\n", __func__, enable);
if (enable) {
- s3c2410_gpio_cfgpin(S3C2410_GPD(10), S3C2440_GPD10_SPICLK1);
- s3c2410_gpio_cfgpin(S3C2410_GPD(9), S3C2440_GPD9_SPIMOSI1);
- s3c2410_gpio_cfgpin(S3C2410_GPD(8), S3C2440_GPD8_SPIMISO1);
+ s3c_gpio_cfgpin(S3C2410_GPD(10), S3C2440_GPD10_SPICLK1);
+ s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2440_GPD9_SPIMOSI1);
+ s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2440_GPD8_SPIMISO1);
s3c2410_gpio_pullup(S3C2410_GPD(10), 0);
s3c2410_gpio_pullup(S3C2410_GPD(9), 0);
} else {
- s3c2410_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT);
- s3c2410_gpio_pullup(S3C2410_GPD(10), 1);
- s3c2410_gpio_pullup(S3C2410_GPD(9), 1);
- s3c2410_gpio_pullup(S3C2410_GPD(8), 1);
+ s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
index 86b9edc67413..8ea663a438bb 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
@@ -21,16 +21,16 @@ void s3c24xx_spi_gpiocfg_bus1_gpg5_6_7(struct s3c2410_spi_info *spi,
int enable)
{
if (enable) {
- s3c2410_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPG7_SPICLK1);
- s3c2410_gpio_cfgpin(S3C2410_GPG(6), S3C2410_GPG6_SPIMOSI1);
- s3c2410_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPG5_SPIMISO1);
+ s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPG7_SPICLK1);
+ s3c_gpio_cfgpin(S3C2410_GPG(6), S3C2410_GPG6_SPIMOSI1);
+ s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPG5_SPIMISO1);
s3c2410_gpio_pullup(S3C2410_GPG(5), 0);
s3c2410_gpio_pullup(S3C2410_GPG(6), 0);
} else {
- s3c2410_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT);
- s3c2410_gpio_pullup(S3C2410_GPG(5), 1);
- s3c2410_gpio_pullup(S3C2410_GPG(6), 1);
- s3c2410_gpio_pullup(S3C2410_GPG(7), 1);
+ s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index aa96e335073b..3fef951445dc 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -33,6 +33,11 @@ struct clk clk_ext_xtal_mux = {
.id = -1,
};
+struct clk clk_xusbxti = {
+ .name = "xusbxti",
+ .id = -1,
+};
+
static struct clk s5p_clk_27m = {
.name = "clk_27m",
.id = -1,
diff --git a/arch/arm/plat-s5p/include/plat/s5p-clock.h b/arch/arm/plat-s5p/include/plat/s5p-clock.h
index 56fb8b414d41..a476a9f14697 100644
--- a/arch/arm/plat-s5p/include/plat/s5p-clock.h
+++ b/arch/arm/plat-s5p/include/plat/s5p-clock.h
@@ -23,6 +23,7 @@
#define clk_fin_vpll clk_ext_xtal_mux
extern struct clk clk_ext_xtal_mux;
+extern struct clk clk_xusbxti;
extern struct clk clk_48m;
extern struct clk clk_fout_apll;
extern struct clk clk_fout_mpll;
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index d552c65fa1b0..5d3ef35a5341 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -191,6 +191,12 @@ config S3C64XX_DEV_SPI
Compile in platform device definitions for S3C64XX's type
SPI controllers.
+config S3C64XX_DEV_TS
+ bool
+ help
+ Common in platform device definitions for S3C64XX touchscreen
+ device
+
# DMA
config S3C_DMA
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 1b25c9d8c403..8bf79f3efdfb 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -376,6 +376,21 @@ void __init s3c_register_clocks(struct clk *clkp, int nr_clks)
}
}
+/**
+ * s3c_disable_clocks() - disable an array of clocks
+ * @clkp: Pointer to the first clock in the array.
+ * @nr_clks: Number of clocks to register.
+ *
+ * for internal use only at initialisation time. disable the clocks in the
+ * @clkp array.
+ */
+
+void __init s3c_disable_clocks(struct clk *clkp, int nr_clks)
+{
+ for (; nr_clks > 0; nr_clks--, clkp++)
+ (clkp->enable)(clkp, 0);
+}
+
/* initalise all the clocks */
int __init s3c24xx_register_baseclocks(unsigned long xtal)
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 44a84e896546..3282db360fa8 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -1,7 +1,7 @@
/* linux/arch/arm/plat-s3c/gpio-config.c
*
* Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
+ * Copyright 2008-2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
@@ -41,6 +41,26 @@ int s3c_gpio_cfgpin(unsigned int pin, unsigned int config)
}
EXPORT_SYMBOL(s3c_gpio_cfgpin);
+unsigned s3c_gpio_getcfg(unsigned int pin)
+{
+ struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
+ unsigned long flags;
+ unsigned ret = 0;
+ int offset;
+
+ if (chip) {
+ offset = pin - chip->chip.base;
+
+ local_irq_save(flags);
+ ret = s3c_gpio_do_getcfg(chip, offset);
+ local_irq_restore(flags);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c_gpio_getcfg);
+
+
int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull)
{
struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
@@ -61,8 +81,8 @@ int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull)
EXPORT_SYMBOL(s3c_gpio_setpull);
#ifdef CONFIG_S3C_GPIO_CFG_S3C24XX
-int s3c_gpio_setcfg_s3c24xx_banka(struct s3c_gpio_chip *chip,
- unsigned int off, unsigned int cfg)
+int s3c_gpio_setcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
+ unsigned int off, unsigned int cfg)
{
void __iomem *reg = chip->base;
unsigned int shift = off;
@@ -87,6 +107,19 @@ int s3c_gpio_setcfg_s3c24xx_banka(struct s3c_gpio_chip *chip,
return 0;
}
+unsigned s3c_gpio_getcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ u32 con;
+
+ con = __raw_readl(chip->base);
+ con >>= off;
+ con &= 1;
+ con++;
+
+ return S3C_GPIO_SFN(con);
+}
+
int s3c_gpio_setcfg_s3c24xx(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg)
{
@@ -109,6 +142,19 @@ int s3c_gpio_setcfg_s3c24xx(struct s3c_gpio_chip *chip,
return 0;
}
+
+unsigned int s3c_gpio_getcfg_s3c24xx(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ u32 con;
+
+ con = __raw_readl(chip->base);
+ con >>= off * 2;
+ con &= 3;
+
+ /* this conversion works for IN and OUT as well as special mode */
+ return S3C_GPIO_SPECIAL(con);
+}
#endif
#ifdef CONFIG_S3C_GPIO_CFG_S3C64XX
@@ -134,6 +180,25 @@ int s3c_gpio_setcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
return 0;
}
+
+unsigned s3c_gpio_getcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ void __iomem *reg = chip->base;
+ unsigned int shift = (off & 7) * 4;
+ u32 con;
+
+ if (off < 8 && chip->chip.ngpio > 8)
+ reg -= 4;
+
+ con = __raw_readl(reg);
+ con >>= shift;
+ con &= 0xf;
+
+ /* this conversion works for IN and OUT as well as special mode */
+ return S3C_GPIO_SPECIAL(con);
+}
+
#endif /* CONFIG_S3C_GPIO_CFG_S3C64XX */
#ifdef CONFIG_S3C_GPIO_PULL_UPDOWN
@@ -164,3 +229,35 @@ s3c_gpio_pull_t s3c_gpio_getpull_updown(struct s3c_gpio_chip *chip,
return (__force s3c_gpio_pull_t)pup;
}
#endif
+
+#ifdef CONFIG_S3C_GPIO_PULL_UP
+int s3c_gpio_setpull_1up(struct s3c_gpio_chip *chip,
+ unsigned int off, s3c_gpio_pull_t pull)
+{
+ void __iomem *reg = chip->base + 0x08;
+ u32 pup = __raw_readl(reg);
+
+ pup = __raw_readl(reg);
+
+ if (pup == S3C_GPIO_PULL_UP)
+ pup &= ~(1 << off);
+ else if (pup == S3C_GPIO_PULL_NONE)
+ pup |= (1 << off);
+ else
+ return -EINVAL;
+
+ __raw_writel(pup, reg);
+ return 0;
+}
+
+s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ void __iomem *reg = chip->base + 0x08;
+ u32 pup = __raw_readl(reg);
+
+ pup &= (1 << off);
+ return pup ? S3C_GPIO_PULL_NONE : S3C_GPIO_PULL_UP;
+}
+#endif /* CONFIG_S3C_GPIO_PULL_UP */
+
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index 60b62692ac7a..12caf48a6bdc 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -91,6 +91,7 @@ extern int s3c24xx_register_clock(struct clk *clk);
extern int s3c24xx_register_clocks(struct clk **clk, int nr_clks);
extern void s3c_register_clocks(struct clk *clk, int nr_clks);
+extern void s3c_disable_clocks(struct clk *clkp, int nr_clks);
extern int s3c24xx_register_baseclocks(unsigned long xtal);
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index d316b4a579f4..5dbeb7991e60 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -73,6 +73,7 @@ extern struct sys_timer s3c24xx_timer;
extern struct sysdev_class s3c2410_sysclass;
extern struct sysdev_class s3c2410a_sysclass;
extern struct sysdev_class s3c2412_sysclass;
+extern struct sysdev_class s3c2416_sysclass;
extern struct sysdev_class s3c2440_sysclass;
extern struct sysdev_class s3c2442_sysclass;
extern struct sysdev_class s3c2443_sysclass;
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
index dda19da037ad..3e21c75feefa 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
@@ -30,6 +30,12 @@ static inline int s3c_gpio_do_setcfg(struct s3c_gpio_chip *chip,
return (chip->config->set_config)(chip, off, config);
}
+static inline unsigned s3c_gpio_do_getcfg(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ return (chip->config->get_config)(chip, off);
+}
+
static inline int s3c_gpio_do_setpull(struct s3c_gpio_chip *chip,
unsigned int off, s3c_gpio_pull_t pull)
{
@@ -53,6 +59,18 @@ extern int s3c_gpio_setcfg_s3c24xx(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg);
/**
+ * s3c_gpio_getcfg_s3c24xx - S3C24XX style GPIO configuration read.
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of s3c_gpio_setcfg_s3c24xx(). Will return a value whicg
+ * could be directly passed back to s3c_gpio_setcfg_s3c24xx(), from the
+ * S3C_GPIO_SPECIAL() macro.
+ */
+unsigned int s3c_gpio_getcfg_s3c24xx(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
+/**
* s3c_gpio_setcfg_s3c24xx_a - S3C24XX style GPIO configuration (Bank A)
* @chip: The gpio chip that is being configured.
* @off: The offset for the GPIO being configured.
@@ -65,6 +83,21 @@ extern int s3c_gpio_setcfg_s3c24xx(struct s3c_gpio_chip *chip,
extern int s3c_gpio_setcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg);
+
+/**
+ * s3c_gpio_getcfg_s3c24xx_a - S3C24XX style GPIO configuration read (Bank A)
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of s3c_gpio_setcfg_s3c24xx_a() turning an GPIO into a usable
+ * GPIO configuration value.
+ *
+ * @sa s3c_gpio_getcfg_s3c24xx
+ * @sa s3c_gpio_getcfg_s3c64xx_4bit
+ */
+extern unsigned s3c_gpio_getcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
/**
* s3c_gpio_setcfg_s3c64xx_4bit - S3C64XX 4bit single register GPIO config.
* @chip: The gpio chip that is being configured.
@@ -85,6 +118,20 @@ extern int s3c_gpio_setcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg);
+/**
+ * s3c_gpio_getcfg_s3c64xx_4bit - S3C64XX 4bit single register GPIO config read.
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of s3c_gpio_setcfg_s3c64xx_4bit(), turning a gpio configuration
+ * register setting into a value the software can use, such as could be passed
+ * to s3c_gpio_setcfg_s3c64xx_4bit().
+ *
+ * @sa s3c_gpio_getcfg_s3c24xx
+ */
+extern unsigned s3c_gpio_getcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
/* Pull-{up,down} resistor controls.
*
* S3C2410,S3C2440,S3C24A0 = Pull-UP,
@@ -146,6 +193,17 @@ extern s3c_gpio_pull_t s3c_gpio_getpull_updown(struct s3c_gpio_chip *chip,
unsigned int off);
/**
+ * s3c_gpio_getpull_1up() - Get configuration for choice of up or none
+ * @chip: The gpio chip that the GPIO pin belongs to
+ * @off: The offset to the pin to get the configuration of.
+ *
+ * This helper function reads the state of the pull-up resistor for the
+ * given GPIO in the same case as s3c_gpio_setpull_1up.
+*/
+extern s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
+/**
* s3c_gpio_setpull_s3c2443() - Pull configuration for s3c2443.
* @chip: The gpio chip that is being configured.
* @off: The offset for the GPIO being configured.
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index 29cd6a86cade..8d01e853df39 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -77,6 +77,17 @@ struct s3c_gpio_cfg {
*/
extern int s3c_gpio_cfgpin(unsigned int pin, unsigned int to);
+/**
+ * s3c_gpio_getcfg - Read the current function for a GPIO pin
+ * @pin: The pin to read the configuration value for.
+ *
+ * Read the configuration state of the given @pin, returning a value that
+ * could be passed back to s3c_gpio_cfgpin().
+ *
+ * @sa s3c_gpio_cfgpin
+ */
+extern unsigned s3c_gpio_getcfg(unsigned int pin);
+
/* Define values for the pull-{up,down} available for each gpio pin.
*
* These values control the state of the weak pull-{up,down} resistors
diff --git a/arch/arm/plat-samsung/include/plat/gpio-core.h b/arch/arm/plat-samsung/include/plat/gpio-core.h
index 49ff406a7066..805b6f4dedfb 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-core.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-core.h
@@ -97,7 +97,7 @@ extern void s3c_gpiolib_add(struct s3c_gpio_chip *chip);
* others = Special functions (dependant on bank)
*
* Note, since the code to deal with the case where there are two control
- * registers instead of one, we do not have a seperate set of function
+ * registers instead of one, we do not have a separate set of function
* (samsung_gpiolib_add_4bit2_chips)for each case.
*/
extern void samsung_gpiolib_add_4bit_chips(struct s3c_gpio_chip *chip,
@@ -108,6 +108,9 @@ extern void samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
extern void samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip);
extern void samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip);
+/* exported for core SoC support to change */
+extern struct s3c_gpio_cfg s3c24xx_gpiocfg_default;
+
#ifdef CONFIG_S3C_GPIO_TRACK
extern struct s3c_gpio_chip *s3c_gpios[S3C_GPIO_END];
diff --git a/arch/arm/plat-samsung/include/plat/pll6553x.h b/arch/arm/plat-samsung/include/plat/pll6553x.h
new file mode 100644
index 000000000000..b8b7e1d884f8
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/pll6553x.h
@@ -0,0 +1,51 @@
+/* arch/arm/plat-samsung/include/plat/pll6553x.h
+ * partially from arch/arm/mach-s3c64xx/include/mach/pll.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Samsung PLL6553x PLL code
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* S3C6400 and compatible (S3C2416, etc.) EPLL code */
+
+#define PLL6553X_MDIV_MASK ((1 << (23-16)) - 1)
+#define PLL6553X_PDIV_MASK ((1 << (13-8)) - 1)
+#define PLL6553X_SDIV_MASK ((1 << (2-0)) - 1)
+#define PLL6553X_MDIV_SHIFT (16)
+#define PLL6553X_PDIV_SHIFT (8)
+#define PLL6553X_SDIV_SHIFT (0)
+#define PLL6553X_KDIV_MASK (0xffff)
+
+static inline unsigned long s3c_get_pll6553x(unsigned long baseclk,
+ u32 pll0, u32 pll1)
+{
+ unsigned long result;
+ u32 mdiv, pdiv, sdiv, kdiv;
+ u64 tmp;
+
+ mdiv = (pll0 >> PLL6553X_MDIV_SHIFT) & PLL6553X_MDIV_MASK;
+ pdiv = (pll0 >> PLL6553X_PDIV_SHIFT) & PLL6553X_PDIV_MASK;
+ sdiv = (pll0 >> PLL6553X_SDIV_SHIFT) & PLL6553X_SDIV_MASK;
+ kdiv = pll1 & PLL6553X_KDIV_MASK;
+
+ /* We need to multiple baseclk by mdiv (the integer part) and kdiv
+ * which is in 2^16ths, so shift mdiv up (does not overflow) and
+ * add kdiv before multiplying. The use of tmp is to avoid any
+ * overflows before shifting bac down into result when multipling
+ * by the mdiv and kdiv pair.
+ */
+
+ tmp = baseclk;
+ tmp *= (mdiv << 16) + kdiv;
+ do_div(tmp, (pdiv << sdiv));
+ result = tmp >> 16;
+
+ return result;
+}
diff --git a/arch/arm/mach-s3c2410/include/mach/ts.h b/arch/arm/plat-samsung/include/plat/ts.h
index dc361700d695..8a51675f6b03 100644
--- a/arch/arm/mach-s3c2410/include/mach/ts.h
+++ b/arch/arm/plat-samsung/include/plat/ts.h
@@ -1,4 +1,4 @@
-/* linux/include/asm/arch-s3c2410/ts.h
+/* arch/arm/plat-samsung/include/plat/ts.h
*
* Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
*
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
new file mode 100644
index 000000000000..1bb3dbce8810
--- /dev/null
+++ b/arch/arm/plat-spear/Kconfig
@@ -0,0 +1,31 @@
+#
+# SPEAr Platform configuration file
+#
+
+if PLAT_SPEAR
+
+choice
+ prompt "ST SPEAr Family"
+ default ARCH_SPEAR3XX
+
+config ARCH_SPEAR3XX
+ bool "SPEAr3XX"
+ select ARM_VIC
+ select CPU_ARM926T
+ help
+ Supports for ARM's SPEAR3XX family
+
+config ARCH_SPEAR6XX
+ bool "SPEAr6XX"
+ select ARM_VIC
+ select CPU_ARM926T
+ help
+ Supports for ARM's SPEAR6XX family
+
+endchoice
+
+# Adding SPEAr machine specific configuration files
+source "arch/arm/mach-spear3xx/Kconfig"
+source "arch/arm/mach-spear6xx/Kconfig"
+
+endif
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
new file mode 100644
index 000000000000..eb89540aeda9
--- /dev/null
+++ b/arch/arm/plat-spear/Makefile
@@ -0,0 +1,8 @@
+#
+# SPEAr Platform specific Makefile
+#
+
+# Common support
+obj-y := clock.o padmux.o time.o
+
+obj-$(CONFIG_ARCH_SPEAR3XX) += shirq.o
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
new file mode 100644
index 000000000000..ee4f90e534d8
--- /dev/null
+++ b/arch/arm/plat-spear/clock.c
@@ -0,0 +1,435 @@
+/*
+ * arch/arm/plat-spear/clock.c
+ *
+ * Clock framework for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <mach/misc_regs.h>
+#include <plat/clock.h>
+
+static DEFINE_SPINLOCK(clocks_lock);
+static LIST_HEAD(root_clks);
+
+static void propagate_rate(struct list_head *);
+
+static int generic_clk_enable(struct clk *clk)
+{
+ unsigned int val;
+
+ if (!clk->en_reg)
+ return -EFAULT;
+
+ val = readl(clk->en_reg);
+ if (unlikely(clk->flags & RESET_TO_ENABLE))
+ val &= ~(1 << clk->en_reg_bit);
+ else
+ val |= 1 << clk->en_reg_bit;
+
+ writel(val, clk->en_reg);
+
+ return 0;
+}
+
+static void generic_clk_disable(struct clk *clk)
+{
+ unsigned int val;
+
+ if (!clk->en_reg)
+ return;
+
+ val = readl(clk->en_reg);
+ if (unlikely(clk->flags & RESET_TO_ENABLE))
+ val |= 1 << clk->en_reg_bit;
+ else
+ val &= ~(1 << clk->en_reg_bit);
+
+ writel(val, clk->en_reg);
+}
+
+/* generic clk ops */
+static struct clkops generic_clkops = {
+ .enable = generic_clk_enable,
+ .disable = generic_clk_disable,
+};
+
+/*
+ * clk_enable - inform the system when the clock source should be running.
+ * @clk: clock source
+ *
+ * If the clock can not be enabled/disabled, this should return success.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (!clk || IS_ERR(clk))
+ return -EFAULT;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ if (clk->usage_count == 0) {
+ if (clk->ops && clk->ops->enable)
+ ret = clk->ops->enable(clk);
+ }
+ clk->usage_count++;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+/*
+ * clk_disable - inform the system when the clock source is no longer required.
+ * @clk: clock source
+ *
+ * Inform the system that a clock source is no longer required by
+ * a driver and may be shut down.
+ *
+ * Implementation detail: if the clock source is shared between
+ * multiple drivers, clk_enable() calls must be balanced by the
+ * same number of clk_disable() calls for the clock source to be
+ * disabled.
+ */
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (!clk || IS_ERR(clk))
+ return;
+
+ WARN_ON(clk->usage_count == 0);
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ clk->usage_count--;
+ if (clk->usage_count == 0) {
+ if (clk->ops && clk->ops->disable)
+ clk->ops->disable(clk);
+ }
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+/**
+ * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
+ * This is only valid once the clock source has been enabled.
+ * @clk: clock source
+ */
+unsigned long clk_get_rate(struct clk *clk)
+{
+ unsigned long flags, rate;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ rate = clk->rate;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+/**
+ * clk_set_parent - set the parent clock source for this clock
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ int i, found = 0, val = 0;
+ unsigned long flags;
+
+ if (!clk || IS_ERR(clk) || !parent || IS_ERR(parent))
+ return -EFAULT;
+ if (clk->usage_count)
+ return -EBUSY;
+ if (!clk->pclk_sel)
+ return -EPERM;
+ if (clk->pclk == parent)
+ return 0;
+
+ for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
+ if (clk->pclk_sel->pclk_info[i].pclk == parent) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ /* reflect parent change in hardware */
+ val = readl(clk->pclk_sel->pclk_sel_reg);
+ val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift);
+ val |= clk->pclk_sel->pclk_info[i].pclk_mask << clk->pclk_sel_shift;
+ writel(val, clk->pclk_sel->pclk_sel_reg);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ /* reflect parent change in software */
+ clk->recalc(clk);
+ propagate_rate(&clk->children);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+/* registers clock in platform clock framework */
+void clk_register(struct clk_lookup *cl)
+{
+ struct clk *clk = cl->clk;
+ unsigned long flags;
+
+ if (!clk || IS_ERR(clk))
+ return;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+
+ INIT_LIST_HEAD(&clk->children);
+ if (clk->flags & ALWAYS_ENABLED)
+ clk->ops = NULL;
+ else if (!clk->ops)
+ clk->ops = &generic_clkops;
+
+ /* root clock don't have any parents */
+ if (!clk->pclk && !clk->pclk_sel) {
+ list_add(&clk->sibling, &root_clks);
+ /* add clocks with only one parent to parent's children list */
+ } else if (clk->pclk && !clk->pclk_sel) {
+ list_add(&clk->sibling, &clk->pclk->children);
+ } else {
+ /* add clocks with > 1 parent to 1st parent's children list */
+ list_add(&clk->sibling,
+ &clk->pclk_sel->pclk_info[0].pclk->children);
+ }
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ /* add clock to arm clockdev framework */
+ clkdev_add(cl);
+}
+
+/**
+ * propagate_rate - recalculate and propagate all clocks in list head
+ *
+ * Recalculates all root clocks in list head, which if the clock's .recalc is
+ * set correctly, should also propagate their rates.
+ */
+static void propagate_rate(struct list_head *lhead)
+{
+ struct clk *clkp, *_temp;
+
+ list_for_each_entry_safe(clkp, _temp, lhead, sibling) {
+ if (clkp->recalc)
+ clkp->recalc(clkp);
+ propagate_rate(&clkp->children);
+ }
+}
+
+/* returns current programmed clocks clock info structure */
+static struct pclk_info *pclk_info_get(struct clk *clk)
+{
+ unsigned int mask, i;
+ unsigned long flags;
+ struct pclk_info *info = NULL;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ mask = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
+ & clk->pclk_sel->pclk_sel_mask;
+
+ for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
+ if (clk->pclk_sel->pclk_info[i].pclk_mask == mask)
+ info = &clk->pclk_sel->pclk_info[i];
+ }
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return info;
+}
+
+/*
+ * Set pclk as cclk's parent and add clock sibling node to current parents
+ * children list
+ */
+static void change_parent(struct clk *cclk, struct clk *pclk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ list_del(&cclk->sibling);
+ list_add(&cclk->sibling, &pclk->children);
+
+ cclk->pclk = pclk;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/*
+ * calculates current programmed rate of pll1
+ *
+ * In normal mode
+ * rate = (2 * M[15:8] * Fin)/(N * 2^P)
+ *
+ * In Dithered mode
+ * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
+ */
+void pll1_clk_recalc(struct clk *clk)
+{
+ struct pll_clk_config *config = clk->private_data;
+ unsigned int num = 2, den = 0, val, mode = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ mode = (readl(config->mode_reg) >> PLL_MODE_SHIFT) &
+ PLL_MODE_MASK;
+
+ val = readl(config->cfg_reg);
+ /* calculate denominator */
+ den = (val >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
+ den = 1 << den;
+ den *= (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
+
+ /* calculate numerator & denominator */
+ if (!mode) {
+ /* Normal mode */
+ num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
+ } else {
+ /* Dithered mode */
+ num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
+ den *= 256;
+ }
+
+ clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/* calculates current programmed rate of ahb or apb bus */
+void bus_clk_recalc(struct clk *clk)
+{
+ struct bus_clk_config *config = clk->private_data;
+ unsigned int div;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ div = ((readl(config->reg) >> config->shift) & config->mask) + 1;
+ clk->rate = (unsigned long)clk->pclk->rate / div;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/*
+ * calculates current programmed rate of auxiliary synthesizers
+ * used by: UART, FIRDA
+ *
+ * Fout from synthesizer can be given from two equations:
+ * Fout1 = (Fin * X/Y)/2
+ * Fout2 = Fin * X/Y
+ *
+ * Selection of eqn 1 or 2 is programmed in register
+ */
+void aux_clk_recalc(struct clk *clk)
+{
+ struct aux_clk_config *config = clk->private_data;
+ struct pclk_info *pclk_info = NULL;
+ unsigned int num = 1, den = 1, val, eqn;
+ unsigned long flags;
+
+ /* get current programmed parent */
+ pclk_info = pclk_info_get(clk);
+ if (!pclk_info) {
+ spin_lock_irqsave(&clocks_lock, flags);
+ clk->pclk = NULL;
+ clk->rate = 0;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+ return;
+ }
+
+ change_parent(clk, pclk_info->pclk);
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ if (pclk_info->scalable) {
+ val = readl(config->synth_reg);
+
+ eqn = (val >> AUX_EQ_SEL_SHIFT) & AUX_EQ_SEL_MASK;
+ if (eqn == AUX_EQ1_SEL)
+ den *= 2;
+
+ /* calculate numerator */
+ num = (val >> AUX_XSCALE_SHIFT) & AUX_XSCALE_MASK;
+
+ /* calculate denominator */
+ den *= (val >> AUX_YSCALE_SHIFT) & AUX_YSCALE_MASK;
+ val = (((clk->pclk->rate/10000) * num) / den) * 10000;
+ } else
+ val = clk->pclk->rate;
+
+ clk->rate = val;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/*
+ * calculates current programmed rate of gpt synthesizers
+ * Fout from synthesizer can be given from below equations:
+ * Fout= Fin/((2 ^ (N+1)) * (M+1))
+ */
+void gpt_clk_recalc(struct clk *clk)
+{
+ struct aux_clk_config *config = clk->private_data;
+ struct pclk_info *pclk_info = NULL;
+ unsigned int div = 1, val;
+ unsigned long flags;
+
+ pclk_info = pclk_info_get(clk);
+ if (!pclk_info) {
+ spin_lock_irqsave(&clocks_lock, flags);
+ clk->pclk = NULL;
+ clk->rate = 0;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+ return;
+ }
+
+ change_parent(clk, pclk_info->pclk);
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ if (pclk_info->scalable) {
+ val = readl(config->synth_reg);
+ div += (val >> GPT_MSCALE_SHIFT) & GPT_MSCALE_MASK;
+ div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
+ }
+
+ clk->rate = (unsigned long)clk->pclk->rate / div;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/*
+ * Used for clocks that always have same value as the parent clock divided by a
+ * fixed divisor
+ */
+void follow_parent(struct clk *clk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ clk->rate = clk->pclk->rate;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/**
+ * recalc_root_clocks - recalculate and propagate all root clocks
+ *
+ * Recalculates all root clocks (clocks with no parent), which if the
+ * clock's .recalc is set correctly, should also propagate their rates.
+ */
+void recalc_root_clocks(void)
+{
+ propagate_rate(&root_clks);
+}
diff --git a/arch/arm/plat-spear/include/plat/clkdev.h b/arch/arm/plat-spear/include/plat/clkdev.h
new file mode 100644
index 000000000000..a2d0112fcaf7
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/clkdev.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/plat-spear/include/plat/clkdev.h
+ *
+ * Clock Dev framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_CLKDEV_H
+#define __PLAT_CLKDEV_H
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif /* __PLAT_CLKDEV_H */
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
new file mode 100644
index 000000000000..298bafc0a52f
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/clock.h
@@ -0,0 +1,126 @@
+/*
+ * arch/arm/plat-spear/include/plat/clock.h
+ *
+ * Clock framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_CLOCK_H
+#define __PLAT_CLOCK_H
+
+#include <linux/list.h>
+#include <asm/clkdev.h>
+#include <linux/types.h>
+
+/* clk structure flags */
+#define ALWAYS_ENABLED (1 << 0) /* clock always enabled */
+#define RESET_TO_ENABLE (1 << 1) /* reset register bit to enable clk */
+
+/**
+ * struct clkops - clock operations
+ * @enable: pointer to clock enable function
+ * @disable: pointer to clock disable function
+ */
+struct clkops {
+ int (*enable) (struct clk *);
+ void (*disable) (struct clk *);
+};
+
+/**
+ * struct pclk_info - parents info
+ * @pclk: pointer to parent clk
+ * @pclk_mask: value to be written for selecting this parent
+ * @scalable: Is parent scalable (1 - YES, 0 - NO)
+ */
+struct pclk_info {
+ struct clk *pclk;
+ u8 pclk_mask;
+ u8 scalable;
+};
+
+/**
+ * struct pclk_sel - parents selection configuration
+ * @pclk_info: pointer to array of parent clock info
+ * @pclk_count: number of parents
+ * @pclk_sel_reg: register for selecting a parent
+ * @pclk_sel_mask: mask for selecting parent (can be used to clear bits also)
+ */
+struct pclk_sel {
+ struct pclk_info *pclk_info;
+ u8 pclk_count;
+ unsigned int *pclk_sel_reg;
+ unsigned int pclk_sel_mask;
+};
+
+/**
+ * struct clk - clock structure
+ * @usage_count: num of users who enabled this clock
+ * @flags: flags for clock properties
+ * @rate: programmed clock rate in Hz
+ * @en_reg: clk enable/disable reg
+ * @en_reg_bit: clk enable/disable bit
+ * @ops: clk enable/disable ops - generic_clkops selected if NULL
+ * @recalc: pointer to clock rate recalculate function
+ * @pclk: current parent clk
+ * @pclk_sel: pointer to parent selection structure
+ * @pclk_sel_shift: register shift for selecting parent of this clock
+ * @children: list for childrens or this clock
+ * @sibling: node for list of clocks having same parents
+ * @private_data: clock specific private data
+ */
+struct clk {
+ unsigned int usage_count;
+ unsigned int flags;
+ unsigned long rate;
+ unsigned int *en_reg;
+ u8 en_reg_bit;
+ const struct clkops *ops;
+ void (*recalc) (struct clk *);
+
+ struct clk *pclk;
+ struct pclk_sel *pclk_sel;
+ unsigned int pclk_sel_shift;
+
+ struct list_head children;
+ struct list_head sibling;
+ void *private_data;
+};
+
+/* pll configuration structure */
+struct pll_clk_config {
+ unsigned int *mode_reg;
+ unsigned int *cfg_reg;
+};
+
+/* ahb and apb bus configuration structure */
+struct bus_clk_config {
+ unsigned int *reg;
+ unsigned int mask;
+ unsigned int shift;
+};
+
+/*
+ * Aux clk configuration structure: applicable to GPT, UART and FIRDA
+ */
+struct aux_clk_config {
+ unsigned int *synth_reg;
+};
+
+/* platform specific clock functions */
+void clk_register(struct clk_lookup *cl);
+void recalc_root_clocks(void);
+
+/* clock recalc functions */
+void follow_parent(struct clk *clk);
+void pll1_clk_recalc(struct clk *clk);
+void bus_clk_recalc(struct clk *clk);
+void gpt_clk_recalc(struct clk *clk);
+void aux_clk_recalc(struct clk *clk);
+
+#endif /* __PLAT_CLOCK_H */
diff --git a/arch/arm/plat-spear/include/plat/debug-macro.S b/arch/arm/plat-spear/include/plat/debug-macro.S
new file mode 100644
index 000000000000..1670734b7e51
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/debug-macro.S
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/plat-spear/include/plat/debug-macro.S
+ *
+ * Debugging macro include header for spear platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/amba/serial.h>
+#include <mach/spear.h>
+
+ .macro addruart, rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, =SPEAR_DBG_UART_BASE @ Physical base
+ movne \rx, =VA_SPEAR_DBG_UART_BASE @ Virtual base
+ .endm
+
+ .macro senduart, rd, rx
+ strb \rd, [\rx, #UART01x_DR] @ ASC_TX_BUFFER
+ .endm
+
+ .macro waituart, rd, rx
+1001: ldr \rd, [\rx, #UART01x_FR] @ FLAG REGISTER
+ tst \rd, #UART01x_FR_TXFF @ TX_FULL
+ bne 1001b
+ .endm
+
+ .macro busyuart, rd, rx
+1002: ldr \rd, [\rx, #UART01x_FR] @ FLAG REGISTER
+ tst \rd, #UART011_FR_TXFE @ TX_EMPTY
+ beq 1002b
+ .endm
diff --git a/arch/arm/plat-spear/include/plat/gpio.h b/arch/arm/plat-spear/include/plat/gpio.h
new file mode 100644
index 000000000000..b857c91257dd
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/gpio.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/plat-spear/include/plat/gpio.h
+ *
+ * GPIO macros for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_GPIO_H
+#define __PLAT_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+#endif /* __PLAT_GPIO_H */
diff --git a/arch/arm/plat-spear/include/plat/io.h b/arch/arm/plat-spear/include/plat/io.h
new file mode 100644
index 000000000000..4d4ba822b3eb
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/io.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/plat-spear/include/plat/io.h
+ *
+ * IO definitions for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_IO_H
+#define __PLAT_IO_H
+
+#define IO_SPACE_LIMIT 0xFFFFFFFF
+
+#define __io(a) __typesafe_io(a)
+#define __mem_pci(a) (a)
+
+#endif /* __PLAT_IO_H */
diff --git a/arch/arm/plat-spear/include/plat/memory.h b/arch/arm/plat-spear/include/plat/memory.h
new file mode 100644
index 000000000000..27a4aba77343
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/memory.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/plat-spear/include/plat/memory.h
+ *
+ * Memory map for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_MEMORY_H
+#define __PLAT_MEMORY_H
+
+/* Physical DRAM offset */
+#define PHYS_OFFSET UL(0x00000000)
+
+#endif /* __PLAT_MEMORY_H */
diff --git a/arch/arm/plat-spear/include/plat/padmux.h b/arch/arm/plat-spear/include/plat/padmux.h
new file mode 100644
index 000000000000..877f3adcf610
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/padmux.h
@@ -0,0 +1,92 @@
+/*
+ * arch/arm/plat-spear/include/plat/padmux.h
+ *
+ * SPEAr platform specific gpio pads muxing file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_PADMUX_H
+#define __PLAT_PADMUX_H
+
+#include <linux/types.h>
+
+/*
+ * struct pmx_reg: configuration structure for mode reg and mux reg
+ *
+ * offset: offset of mode reg
+ * mask: mask of mode reg
+ */
+struct pmx_reg {
+ u32 offset;
+ u32 mask;
+};
+
+/*
+ * struct pmx_dev_mode: configuration structure every group of modes of a device
+ *
+ * ids: all modes for this configuration
+ * mask: mask for supported mode
+ */
+struct pmx_dev_mode {
+ u32 ids;
+ u32 mask;
+};
+
+/*
+ * struct pmx_mode: mode definition structure
+ *
+ * name: mode name
+ * mask: mode mask
+ */
+struct pmx_mode {
+ char *name;
+ u32 id;
+ u32 mask;
+};
+
+/*
+ * struct pmx_dev: device definition structure
+ *
+ * name: device name
+ * modes: device configuration array for different modes supported
+ * mode_count: size of modes array
+ * is_active: is peripheral active/enabled
+ * enb_on_reset: if 1, mask bits to be cleared in reg otherwise to be set in reg
+ */
+struct pmx_dev {
+ char *name;
+ struct pmx_dev_mode *modes;
+ u8 mode_count;
+ bool is_active;
+ bool enb_on_reset;
+};
+
+/*
+ * struct pmx_driver: driver definition structure
+ *
+ * mode: mode to be set
+ * devs: array of pointer to pmx devices
+ * devs_count: ARRAY_SIZE of devs
+ * base: base address of soc config registers
+ * mode_reg: structure of mode config register
+ * mux_reg: structure of device mux config register
+ */
+struct pmx_driver {
+ struct pmx_mode *mode;
+ struct pmx_dev **devs;
+ u8 devs_count;
+ u32 *base;
+ struct pmx_reg mode_reg;
+ struct pmx_reg mux_reg;
+};
+
+/* pmx functions */
+int pmx_register(struct pmx_driver *driver);
+
+#endif /* __PLAT_PADMUX_H */
diff --git a/arch/arm/plat-spear/include/plat/shirq.h b/arch/arm/plat-spear/include/plat/shirq.h
new file mode 100644
index 000000000000..03ed8b585dcf
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/shirq.h
@@ -0,0 +1,73 @@
+/*
+ * arch/arm/plat-spear/include/plat/shirq.h
+ *
+ * SPEAr platform shared irq layer header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_SHIRQ_H
+#define __PLAT_SHIRQ_H
+
+#include <linux/irq.h>
+#include <linux/types.h>
+
+/*
+ * struct shirq_dev_config: shared irq device configuration
+ *
+ * virq: virtual irq number of device
+ * enb_mask: enable mask of device
+ * status_mask: status mask of device
+ * clear_mask: clear mask of device
+ */
+struct shirq_dev_config {
+ u32 virq;
+ u32 enb_mask;
+ u32 status_mask;
+ u32 clear_mask;
+};
+
+/*
+ * struct shirq_regs: shared irq register configuration
+ *
+ * base: base address of shared irq register
+ * enb_reg: enable register offset
+ * reset_to_enb: val 1 indicates, we need to clear bit for enabling interrupt
+ * status_reg: status register offset
+ * status_reg_mask: status register valid mask
+ * clear_reg: clear register offset
+ * reset_to_clear: val 1 indicates, we need to clear bit for clearing interrupt
+ */
+struct shirq_regs {
+ void __iomem *base;
+ u32 enb_reg;
+ u32 reset_to_enb;
+ u32 status_reg;
+ u32 status_reg_mask;
+ u32 clear_reg;
+ u32 reset_to_clear;
+};
+
+/*
+ * struct spear_shirq: shared irq structure
+ *
+ * irq: hardware irq number
+ * dev_config: array of device config structures which are using "irq" line
+ * dev_count: size of dev_config array
+ * regs: register configuration for shared irq block
+ */
+struct spear_shirq {
+ u32 irq;
+ struct shirq_dev_config *dev_config;
+ u32 dev_count;
+ struct shirq_regs regs;
+};
+
+int spear_shirq_register(struct spear_shirq *shirq);
+
+#endif /* __PLAT_SHIRQ_H */
diff --git a/arch/arm/plat-spear/include/plat/system.h b/arch/arm/plat-spear/include/plat/system.h
new file mode 100644
index 000000000000..55a4e405d578
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/system.h
@@ -0,0 +1,41 @@
+/*
+ * arch/arm/plat-spear/include/plat/system.h
+ *
+ * SPEAr platform specific architecture functions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_SYSTEM_H
+#define __PLAT_SYSTEM_H
+
+#include <asm/hardware/sp810.h>
+#include <linux/io.h>
+#include <mach/spear.h>
+
+static inline void arch_idle(void)
+{
+ /*
+ * This should do all the clock switching
+ * and wait for interrupt tricks
+ */
+ cpu_do_idle();
+}
+
+static inline void arch_reset(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ /* software reset, Jump into ROM at address 0 */
+ cpu_reset(0);
+ } else {
+ /* hardware reset, Use on-chip reset capability */
+ sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+ }
+}
+
+#endif /* __PLAT_SYSTEM_H */
diff --git a/arch/arm/plat-spear/include/plat/timex.h b/arch/arm/plat-spear/include/plat/timex.h
new file mode 100644
index 000000000000..914d09dd50fd
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/plat-spear/include/plat/timex.h
+ *
+ * SPEAr platform specific timex definitions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_TIMEX_H
+#define __PLAT_TIMEX_H
+
+#define CLOCK_TICK_RATE 48000000
+
+#endif /* __PLAT_TIMEX_H */
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
new file mode 100644
index 000000000000..99ba6789cc97
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -0,0 +1,43 @@
+/*
+ * arch/arm/plat-spear/include/plat/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/amba/serial.h>
+#include <mach/spear.h>
+
+#ifndef __PLAT_UNCOMPRESS_H
+#define __PLAT_UNCOMPRESS_H
+/*
+ * This does not append a newline
+ */
+static inline void putc(int c)
+{
+ void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
+
+ while (readl(base + UART01x_FR) & UART01x_FR_TXFF)
+ barrier();
+
+ writel(c, base + UART01x_DR);
+}
+
+static inline void flush(void)
+{
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
+
+#endif /* __PLAT_UNCOMPRESS_H */
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
new file mode 100644
index 000000000000..09e9372aea21
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/vmalloc.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/plat-spear/include/plat/vmalloc.h
+ *
+ * Defining Vmalloc area for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_VMALLOC_H
+#define __PLAT_VMALLOC_H
+
+#define VMALLOC_END 0xF0000000
+
+#endif /* __PLAT_VMALLOC_H */
diff --git a/arch/arm/plat-spear/padmux.c b/arch/arm/plat-spear/padmux.c
new file mode 100644
index 000000000000..d2aab3adcdeb
--- /dev/null
+++ b/arch/arm/plat-spear/padmux.c
@@ -0,0 +1,164 @@
+/*
+ * arch/arm/plat-spear/include/plat/padmux.c
+ *
+ * SPEAr platform specific gpio pads muxing source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <plat/padmux.h>
+
+/*
+ * struct pmx: pmx definition structure
+ *
+ * base: base address of configuration registers
+ * mode_reg: mode configurations
+ * mux_reg: muxing configurations
+ * active_mode: pointer to current active mode
+ */
+struct pmx {
+ u32 base;
+ struct pmx_reg mode_reg;
+ struct pmx_reg mux_reg;
+ struct pmx_mode *active_mode;
+};
+
+static struct pmx *pmx;
+
+/**
+ * pmx_mode_set - Enables an multiplexing mode
+ * @mode - pointer to pmx mode
+ *
+ * It will set mode of operation in hardware.
+ * Returns -ve on Err otherwise 0
+ */
+static int pmx_mode_set(struct pmx_mode *mode)
+{
+ u32 val;
+
+ if (!mode->name)
+ return -EFAULT;
+
+ pmx->active_mode = mode;
+
+ val = readl(pmx->base + pmx->mode_reg.offset);
+ val &= ~pmx->mode_reg.mask;
+ val |= mode->mask & pmx->mode_reg.mask;
+ writel(val, pmx->base + pmx->mode_reg.offset);
+
+ return 0;
+}
+
+/**
+ * pmx_devs_enable - Enables list of devices
+ * @devs - pointer to pmx device array
+ * @count - number of devices to enable
+ *
+ * It will enable pads for all required peripherals once and only once.
+ * If peripheral is not supported by current mode then request is rejected.
+ * Conflicts between peripherals are not handled and peripherals will be
+ * enabled in the order they are present in pmx_dev array.
+ * In case of conflicts last peripheral enalbed will be present.
+ * Returns -ve on Err otherwise 0
+ */
+static int pmx_devs_enable(struct pmx_dev **devs, u8 count)
+{
+ u32 val, i, mask;
+
+ if (!count)
+ return -EINVAL;
+
+ val = readl(pmx->base + pmx->mux_reg.offset);
+ for (i = 0; i < count; i++) {
+ u8 j = 0;
+
+ if (!devs[i]->name || !devs[i]->modes) {
+ printk(KERN_ERR "padmux: dev name or modes is null\n");
+ continue;
+ }
+ /* check if peripheral exists in active mode */
+ if (pmx->active_mode) {
+ bool found = false;
+ for (j = 0; j < devs[i]->mode_count; j++) {
+ if (devs[i]->modes[j].ids &
+ pmx->active_mode->id) {
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ printk(KERN_ERR "%s device not available in %s"\
+ "mode\n", devs[i]->name,
+ pmx->active_mode->name);
+ continue;
+ }
+ }
+
+ /* enable peripheral */
+ mask = devs[i]->modes[j].mask & pmx->mux_reg.mask;
+ if (devs[i]->enb_on_reset)
+ val &= ~mask;
+ else
+ val |= mask;
+
+ devs[i]->is_active = true;
+ }
+ writel(val, pmx->base + pmx->mux_reg.offset);
+ kfree(pmx);
+
+ /* this will ensure that multiplexing can't be changed now */
+ pmx = (struct pmx *)-1;
+
+ return 0;
+}
+
+/**
+ * pmx_register - registers a platform requesting pad mux feature
+ * @driver - pointer to driver structure containing driver specific parameters
+ *
+ * Also this must be called only once. This will allocate memory for pmx
+ * structure, will call pmx_mode_set, will call pmx_devs_enable.
+ * Returns -ve on Err otherwise 0
+ */
+int pmx_register(struct pmx_driver *driver)
+{
+ int ret = 0;
+
+ if (pmx)
+ return -EPERM;
+ if (!driver->base || !driver->devs)
+ return -EFAULT;
+
+ pmx = kzalloc(sizeof(*pmx), GFP_KERNEL);
+ if (!pmx)
+ return -ENOMEM;
+
+ pmx->base = (u32)driver->base;
+ pmx->mode_reg.offset = driver->mode_reg.offset;
+ pmx->mode_reg.mask = driver->mode_reg.mask;
+ pmx->mux_reg.offset = driver->mux_reg.offset;
+ pmx->mux_reg.mask = driver->mux_reg.mask;
+
+ /* choose mode to enable */
+ if (driver->mode) {
+ ret = pmx_mode_set(driver->mode);
+ if (ret)
+ goto pmx_fail;
+ }
+ ret = pmx_devs_enable(driver->devs, driver->devs_count);
+ if (ret)
+ goto pmx_fail;
+
+ return 0;
+
+pmx_fail:
+ return ret;
+}
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
new file mode 100644
index 000000000000..2172d6946aea
--- /dev/null
+++ b/arch/arm/plat-spear/shirq.c
@@ -0,0 +1,118 @@
+/*
+ * arch/arm/plat-spear/shirq.c
+ *
+ * SPEAr platform shared irq layer source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <plat/shirq.h>
+
+struct spear_shirq *shirq;
+static DEFINE_SPINLOCK(lock);
+
+static void shirq_irq_mask(unsigned irq)
+{
+ struct spear_shirq *shirq = get_irq_chip_data(irq);
+ u32 val, id = irq - shirq->dev_config[0].virq;
+ unsigned long flags;
+
+ if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
+ return;
+
+ spin_lock_irqsave(&lock, flags);
+ val = readl(shirq->regs.base + shirq->regs.enb_reg);
+ if (shirq->regs.reset_to_enb)
+ val |= shirq->dev_config[id].enb_mask;
+ else
+ val &= ~(shirq->dev_config[id].enb_mask);
+ writel(val, shirq->regs.base + shirq->regs.enb_reg);
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+static void shirq_irq_unmask(unsigned irq)
+{
+ struct spear_shirq *shirq = get_irq_chip_data(irq);
+ u32 val, id = irq - shirq->dev_config[0].virq;
+ unsigned long flags;
+
+ if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
+ return;
+
+ spin_lock_irqsave(&lock, flags);
+ val = readl(shirq->regs.base + shirq->regs.enb_reg);
+ if (shirq->regs.reset_to_enb)
+ val &= ~(shirq->dev_config[id].enb_mask);
+ else
+ val |= shirq->dev_config[id].enb_mask;
+ writel(val, shirq->regs.base + shirq->regs.enb_reg);
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+static struct irq_chip shirq_chip = {
+ .name = "spear_shirq",
+ .ack = shirq_irq_mask,
+ .mask = shirq_irq_mask,
+ .unmask = shirq_irq_unmask,
+};
+
+static void shirq_handler(unsigned irq, struct irq_desc *desc)
+{
+ u32 i, val, mask;
+ struct spear_shirq *shirq = get_irq_data(irq);
+
+ desc->chip->ack(irq);
+ while ((val = readl(shirq->regs.base + shirq->regs.status_reg) &
+ shirq->regs.status_reg_mask)) {
+ for (i = 0; (i < shirq->dev_count) && val; i++) {
+ if (!(shirq->dev_config[i].status_mask & val))
+ continue;
+
+ generic_handle_irq(shirq->dev_config[i].virq);
+
+ /* clear interrupt */
+ val &= ~shirq->dev_config[i].status_mask;
+ if ((shirq->regs.clear_reg == -1) ||
+ shirq->dev_config[i].clear_mask == -1)
+ continue;
+ mask = readl(shirq->regs.base + shirq->regs.clear_reg);
+ if (shirq->regs.reset_to_clear)
+ mask &= ~shirq->dev_config[i].clear_mask;
+ else
+ mask |= shirq->dev_config[i].clear_mask;
+ writel(mask, shirq->regs.base + shirq->regs.clear_reg);
+ }
+ }
+ desc->chip->unmask(irq);
+}
+
+int spear_shirq_register(struct spear_shirq *shirq)
+{
+ int i;
+
+ if (!shirq || !shirq->dev_config || !shirq->regs.base)
+ return -EFAULT;
+
+ if (!shirq->dev_count)
+ return -EINVAL;
+
+ set_irq_chained_handler(shirq->irq, shirq_handler);
+ for (i = 0; i < shirq->dev_count; i++) {
+ set_irq_chip(shirq->dev_config[i].virq, &shirq_chip);
+ set_irq_handler(shirq->dev_config[i].virq, handle_simple_irq);
+ set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID);
+ set_irq_chip_data(shirq->dev_config[i].virq, shirq);
+ }
+
+ set_irq_data(shirq->irq, shirq);
+ return 0;
+}
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
new file mode 100644
index 000000000000..a1025d38f383
--- /dev/null
+++ b/arch/arm/plat-spear/time.c
@@ -0,0 +1,292 @@
+/*
+ * arch/arm/plat-spear/time.c
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Shiraz Hashim<shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/irq.h>
+#include <asm/mach/time.h>
+#include <mach/irqs.h>
+#include <mach/hardware.h>
+#include <mach/spear.h>
+#include <mach/generic.h>
+
+/*
+ * We would use TIMER0 and TIMER1 as clockevent and clocksource.
+ * Timer0 and Timer1 both belong to same gpt block in cpu subbsystem. Further
+ * they share same functional clock. Any change in one's functional clock will
+ * also affect other timer.
+ */
+
+#define CLKEVT 0 /* gpt0, channel0 as clockevent */
+#define CLKSRC 1 /* gpt0, channel1 as clocksource */
+
+/* Register offsets, x is channel number */
+#define CR(x) ((x) * 0x80 + 0x80)
+#define IR(x) ((x) * 0x80 + 0x84)
+#define LOAD(x) ((x) * 0x80 + 0x88)
+#define COUNT(x) ((x) * 0x80 + 0x8C)
+
+/* Reg bit definitions */
+#define CTRL_INT_ENABLE 0x0100
+#define CTRL_ENABLE 0x0020
+#define CTRL_ONE_SHOT 0x0010
+
+#define CTRL_PRESCALER1 0x0
+#define CTRL_PRESCALER2 0x1
+#define CTRL_PRESCALER4 0x2
+#define CTRL_PRESCALER8 0x3
+#define CTRL_PRESCALER16 0x4
+#define CTRL_PRESCALER32 0x5
+#define CTRL_PRESCALER64 0x6
+#define CTRL_PRESCALER128 0x7
+#define CTRL_PRESCALER256 0x8
+
+#define INT_STATUS 0x1
+
+static __iomem void *gpt_base;
+static struct clk *gpt_clk;
+
+static void clockevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk_event_dev);
+static int clockevent_next_event(unsigned long evt,
+ struct clock_event_device *clk_event_dev);
+
+/*
+ * Following clocksource_set_clock and clockevent_set_clock picked
+ * from arch/mips/kernel/time.c
+ */
+
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) NSEC_PER_SEC << shift;
+ do_div(temp, clock);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cs->shift = shift;
+ cs->mult = (u32) temp;
+}
+
+void __init clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) clock << shift;
+ do_div(temp, NSEC_PER_SEC);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cd->shift = shift;
+ cd->mult = (u32) temp;
+}
+
+static cycle_t clocksource_read_cycles(struct clocksource *cs)
+{
+ return (cycle_t) readw(gpt_base + COUNT(CLKSRC));
+}
+
+static struct clocksource clksrc = {
+ .name = "tmr1",
+ .rating = 200, /* its a pretty decent clock */
+ .read = clocksource_read_cycles,
+ .mask = 0xFFFF, /* 16 bits */
+ .mult = 0, /* to be computed */
+ .shift = 0, /* to be computed */
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void spear_clocksource_init(void)
+{
+ u32 tick_rate;
+ u16 val;
+
+ /* program the prescaler (/256)*/
+ writew(CTRL_PRESCALER256, gpt_base + CR(CLKSRC));
+
+ /* find out actual clock driving Timer */
+ tick_rate = clk_get_rate(gpt_clk);
+ tick_rate >>= CTRL_PRESCALER256;
+
+ writew(0xFFFF, gpt_base + LOAD(CLKSRC));
+
+ val = readw(gpt_base + CR(CLKSRC));
+ val &= ~CTRL_ONE_SHOT; /* autoreload mode */
+ val |= CTRL_ENABLE ;
+ writew(val, gpt_base + CR(CLKSRC));
+
+ clocksource_set_clock(&clksrc, tick_rate);
+
+ /* register the clocksource */
+ clocksource_register(&clksrc);
+}
+
+static struct clock_event_device clkevt = {
+ .name = "tmr0",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = clockevent_set_mode,
+ .set_next_event = clockevent_next_event,
+ .shift = 0, /* to be computed */
+};
+
+static void clockevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk_event_dev)
+{
+ u32 period;
+ u16 val;
+
+ /* stop the timer */
+ val = readw(gpt_base + CR(CLKEVT));
+ val &= ~CTRL_ENABLE;
+ writew(val, gpt_base + CR(CLKEVT));
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ period = clk_get_rate(gpt_clk) / HZ;
+ period >>= CTRL_PRESCALER16;
+ writew(period, gpt_base + LOAD(CLKEVT));
+
+ val = readw(gpt_base + CR(CLKEVT));
+ val &= ~CTRL_ONE_SHOT;
+ val |= CTRL_ENABLE | CTRL_INT_ENABLE;
+ writew(val, gpt_base + CR(CLKEVT));
+
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ val = readw(gpt_base + CR(CLKEVT));
+ val |= CTRL_ONE_SHOT;
+ writew(val, gpt_base + CR(CLKEVT));
+
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+
+ break;
+ default:
+ pr_err("Invalid mode requested\n");
+ break;
+ }
+}
+
+static int clockevent_next_event(unsigned long cycles,
+ struct clock_event_device *clk_event_dev)
+{
+ u16 val;
+
+ writew(cycles, gpt_base + LOAD(CLKEVT));
+
+ val = readw(gpt_base + CR(CLKEVT));
+ val |= CTRL_ENABLE | CTRL_INT_ENABLE;
+ writew(val, gpt_base + CR(CLKEVT));
+
+ return 0;
+}
+
+static irqreturn_t spear_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &clkevt;
+
+ writew(INT_STATUS, gpt_base + IR(CLKEVT));
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction spear_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = spear_timer_interrupt
+};
+
+static void __init spear_clockevent_init(void)
+{
+ u32 tick_rate;
+
+ /* program the prescaler */
+ writew(CTRL_PRESCALER16, gpt_base + CR(CLKEVT));
+
+ tick_rate = clk_get_rate(gpt_clk);
+ tick_rate >>= CTRL_PRESCALER16;
+
+ clockevent_set_clock(&clkevt, tick_rate);
+
+ clkevt.max_delta_ns = clockevent_delta2ns(0xfff0,
+ &clkevt);
+ clkevt.min_delta_ns = clockevent_delta2ns(3, &clkevt);
+
+ clkevt.cpumask = cpumask_of(0);
+
+ clockevents_register_device(&clkevt);
+
+ setup_irq(SPEAR_GPT0_CHAN0_IRQ, &spear_timer_irq);
+}
+
+void __init spear_setup_timer(void)
+{
+ struct clk *pll3_clk;
+
+ if (!request_mem_region(SPEAR_GPT0_BASE, SZ_1K, "gpt0")) {
+ pr_err("%s:cannot get IO addr\n", __func__);
+ return;
+ }
+
+ gpt_base = (void __iomem *)ioremap(SPEAR_GPT0_BASE, SZ_1K);
+ if (!gpt_base) {
+ pr_err("%s:ioremap failed for gpt\n", __func__);
+ goto err_mem;
+ }
+
+ gpt_clk = clk_get_sys("gpt0", NULL);
+ if (!gpt_clk) {
+ pr_err("%s:couldn't get clk for gpt\n", __func__);
+ goto err_iomap;
+ }
+
+ pll3_clk = clk_get(NULL, "pll3_48m_clk");
+ if (!pll3_clk) {
+ pr_err("%s:couldn't get PLL3 as parent for gpt\n", __func__);
+ goto err_iomap;
+ }
+
+ clk_set_parent(gpt_clk, pll3_clk);
+
+ spear_clockevent_init();
+ spear_clocksource_init();
+
+ return;
+
+err_iomap:
+ iounmap(gpt_base);
+
+err_mem:
+ release_mem_region(SPEAR_GPT0_BASE, SZ_1K);
+}
+
+struct sys_timer spear_sys_timer = {
+ .init = spear_setup_timer,
+};
diff --git a/arch/arm/plat-versatile/Makefile b/arch/arm/plat-versatile/Makefile
new file mode 100644
index 000000000000..9b1a66816aa6
--- /dev/null
+++ b/arch/arm/plat-versatile/Makefile
@@ -0,0 +1,4 @@
+obj-y := clock.o
+obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_ARCH_REALVIEW) += sched-clock.o
+obj-$(CONFIG_ARCH_VERSATILE) += sched-clock.o
diff --git a/arch/arm/mach-integrator/clock.c b/arch/arm/plat-versatile/clock.c
index 989ecf5f5c46..5c8b6564fdc2 100644
--- a/arch/arm/mach-integrator/clock.c
+++ b/arch/arm/plat-versatile/clock.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-integrator/clock.c
+ * linux/arch/arm/plat-versatile/clock.c
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
@@ -14,7 +14,8 @@
#include <linux/clk.h>
#include <linux/mutex.h>
-#include <asm/clkdev.h>
+#include <asm/hardware/icst.h>
+
#include <mach/clkdev.h>
int clk_enable(struct clk *clk)
@@ -36,24 +37,38 @@ EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
- struct icst525_vco vco;
- vco = icst525_khz_to_vco(clk->params, rate / 1000);
- return icst525_khz(clk->params, vco) * 1000;
+ long ret = -EIO;
+ if (clk->ops && clk->ops->round)
+ ret = clk->ops->round(clk, rate);
+ return ret;
}
EXPORT_SYMBOL(clk_round_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
int ret = -EIO;
-
- if (clk->setvco) {
- struct icst525_vco vco;
-
- vco = icst525_khz_to_vco(clk->params, rate / 1000);
- clk->rate = icst525_khz(clk->params, vco) * 1000;
- clk->setvco(clk, vco);
- ret = 0;
- }
+ if (clk->ops && clk->ops->set)
+ ret = clk->ops->set(clk, rate);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
+
+long icst_clk_round(struct clk *clk, unsigned long rate)
+{
+ struct icst_vco vco;
+ vco = icst_hz_to_vco(clk->params, rate);
+ return icst_hz(clk->params, vco);
+}
+EXPORT_SYMBOL(icst_clk_round);
+
+int icst_clk_set(struct clk *clk, unsigned long rate)
+{
+ struct icst_vco vco;
+
+ vco = icst_hz_to_vco(clk->params, rate);
+ clk->rate = icst_hz(clk->params, vco);
+ clk->ops->setvco(clk, vco);
+
+ return 0;
+}
+EXPORT_SYMBOL(icst_clk_set);
diff --git a/arch/arm/plat-versatile/include/plat/clock.h b/arch/arm/plat-versatile/include/plat/clock.h
new file mode 100644
index 000000000000..3cfb024ccd70
--- /dev/null
+++ b/arch/arm/plat-versatile/include/plat/clock.h
@@ -0,0 +1,15 @@
+#ifndef PLAT_CLOCK_H
+#define PLAT_CLOCK_H
+
+#include <asm/hardware/icst.h>
+
+struct clk_ops {
+ long (*round)(struct clk *, unsigned long);
+ int (*set)(struct clk *, unsigned long);
+ void (*setvco)(struct clk *, struct icst_vco);
+};
+
+int icst_clk_set(struct clk *, unsigned long);
+long icst_clk_round(struct clk *, unsigned long);
+
+#endif
diff --git a/arch/arm/plat-versatile/include/plat/timer-sp.h b/arch/arm/plat-versatile/include/plat/timer-sp.h
new file mode 100644
index 000000000000..21e75e30d497
--- /dev/null
+++ b/arch/arm/plat-versatile/include/plat/timer-sp.h
@@ -0,0 +1,2 @@
+void sp804_clocksource_init(void __iomem *);
+void sp804_clockevents_init(void __iomem *, unsigned int);
diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c
new file mode 100644
index 000000000000..9768cf7e83d7
--- /dev/null
+++ b/arch/arm/plat-versatile/sched-clock.c
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/arm/plat-versatile/sched-clock.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/cnt32_to_63.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+
+#include <mach/hardware.h>
+#include <mach/platform.h>
+
+#ifdef VERSATILE_SYS_BASE
+#define REFCOUNTER (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_24MHz_OFFSET)
+#endif
+
+#ifdef REALVIEW_SYS_BASE
+#define REFCOUNTER (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_24MHz_OFFSET)
+#endif
+
+/*
+ * This is the Realview and Versatile sched_clock implementation. This
+ * has a resolution of 41.7ns, and a maximum value of about 35583 days.
+ *
+ * The return value is guaranteed to be monotonic in that range as
+ * long as there is always less than 89 seconds between successive
+ * calls to this function.
+ */
+unsigned long long sched_clock(void)
+{
+ unsigned long long v = cnt32_to_63(readl(REFCOUNTER));
+
+ /* the <<1 gets rid of the cnt_32_to_63 top bit saving on a bic insn */
+ v *= 125<<1;
+ do_div(v, 3<<1);
+
+ return v;
+}
diff --git a/arch/arm/plat-versatile/timer-sp.c b/arch/arm/plat-versatile/timer-sp.c
new file mode 100644
index 000000000000..fb0d1c299718
--- /dev/null
+++ b/arch/arm/plat-versatile/timer-sp.c
@@ -0,0 +1,156 @@
+/*
+ * linux/arch/arm/plat-versatile/timer-sp.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/hardware/arm_timer.h>
+
+#include <plat/timer-sp.h>
+
+/*
+ * These timers are currently always setup to be clocked at 1MHz.
+ */
+#define TIMER_FREQ_KHZ (1000)
+#define TIMER_RELOAD (TIMER_FREQ_KHZ * 1000 / HZ)
+
+static void __iomem *clksrc_base;
+
+static cycle_t sp804_read(struct clocksource *cs)
+{
+ return ~readl(clksrc_base + TIMER_VALUE);
+}
+
+static struct clocksource clocksource_sp804 = {
+ .name = "timer3",
+ .rating = 200,
+ .read = sp804_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .shift = 20,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init sp804_clocksource_init(void __iomem *base)
+{
+ struct clocksource *cs = &clocksource_sp804;
+
+ clksrc_base = base;
+
+ /* setup timer 0 as free-running clocksource */
+ writel(0, clksrc_base + TIMER_CTRL);
+ writel(0xffffffff, clksrc_base + TIMER_LOAD);
+ writel(0xffffffff, clksrc_base + TIMER_VALUE);
+ writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
+ clksrc_base + TIMER_CTRL);
+
+ cs->mult = clocksource_khz2mult(TIMER_FREQ_KHZ, cs->shift);
+ clocksource_register(cs);
+}
+
+
+static void __iomem *clkevt_base;
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ /* clear the interrupt */
+ writel(1, clkevt_base + TIMER_INTCLR);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void sp804_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE;
+
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ writel(TIMER_RELOAD, clkevt_base + TIMER_LOAD);
+ ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* period set, and timer enabled in 'next_event' hook */
+ ctrl |= TIMER_CTRL_ONESHOT;
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ break;
+ }
+
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+}
+
+static int sp804_set_next_event(unsigned long next,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
+
+ writel(next, clkevt_base + TIMER_LOAD);
+ writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+
+ return 0;
+}
+
+static struct clock_event_device sp804_clockevent = {
+ .name = "timer0",
+ .shift = 32,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sp804_set_mode,
+ .set_next_event = sp804_set_next_event,
+ .rating = 300,
+ .cpumask = cpu_all_mask,
+};
+
+static struct irqaction sp804_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = sp804_timer_interrupt,
+ .dev_id = &sp804_clockevent,
+};
+
+void __init sp804_clockevents_init(void __iomem *base, unsigned int timer_irq)
+{
+ struct clock_event_device *evt = &sp804_clockevent;
+
+ clkevt_base = base;
+
+ evt->irq = timer_irq;
+ evt->mult = div_sc(TIMER_FREQ_KHZ, NSEC_PER_MSEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
+ evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
+
+ setup_irq(timer_irq, &sp804_timer_irq);
+ clockevents_register_device(evt);
+}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index f27aa3b259fa..668ed2817e51 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -110,17 +110,17 @@ static struct clock_event_device comparator = {
.set_mode = comparator_mode,
};
+void read_persistent_clock(struct timespec *ts)
+{
+ ts->tv_sec = mktime(2007, 1, 1, 0, 0, 0);
+ ts->tv_nsec = 0;
+}
+
void __init time_init(void)
{
unsigned long counter_hz;
int ret;
- xtime.tv_sec = mktime(2007, 1, 1, 0, 0, 0);
- xtime.tv_nsec = 0;
-
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
/* figure rate for counter */
counter_hz = clk_get_rate(boot_cpu_data.clk);
counter.mult = clocksource_hz2mult(counter_hz, counter.shift);
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 2c501ceb1e55..7367aea4ae59 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -439,6 +439,11 @@ int kgdb_validate_break_address(unsigned long addr)
return -EFAULT;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->retx = ip;
+}
+
int kgdb_arch_init(void)
{
kgdb_single_step = 0;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index cb7a01d4f009..8c9a43daf80f 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -353,9 +353,15 @@ void bfin_coretmr_clockevent_init(void)
#endif /* CONFIG_TICKSOURCE_CORETMR */
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
+ ts->tv_sec = secs_since_1970;
+ ts->tv_nsec = 0;
+}
+
+void __init time_init(void)
+{
#ifdef CONFIG_RTC_DRV_BFIN
/* [#2663] hack to filter junk RTC values that would cause
@@ -368,11 +374,6 @@ void __init time_init(void)
}
#endif
- /* Initialize xtime. From now on, xtime is updated with timer interrupts */
- xtime.tv_sec = secs_since_1970;
- xtime.tv_nsec = 0;
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
-
bfin_cs_cycles_init();
bfin_cs_gptimer0_init();
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 13c1ee3e6408..c9113619029f 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -112,11 +112,6 @@ u32 arch_gettimeoffset(void)
}
#endif
-static inline int set_rtc_mmss(unsigned long nowtime)
-{
- return 0;
-}
-
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
@@ -126,29 +121,8 @@ __attribute__((l1_text))
#endif
irqreturn_t timer_interrupt(int irq, void *dummy)
{
- /* last time the cmos clock got updated */
- static long last_rtc_update;
-
write_seqlock(&xtime_lock);
do_timer(1);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / NSEC_PER_USEC) >=
- 500000 - ((unsigned)TICK_SIZE) / 2
- && (xtime.tv_nsec / NSEC_PER_USEC) <=
- 500000 + ((unsigned)TICK_SIZE) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- /* Do it again in 60s. */
- last_rtc_update = xtime.tv_sec - 600;
- }
write_sequnlock(&xtime_lock);
#ifdef CONFIG_IPIPE
@@ -161,10 +135,15 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
+ ts->tv_sec = secs_since_1970;
+ ts->tv_nsec = 0;
+}
+void __init time_init(void)
+{
#ifdef CONFIG_RTC_DRV_BFIN
/* [#2663] hack to filter junk RTC values that would cause
* userspace to have to deal with time values greater than
@@ -176,11 +155,5 @@ void __init time_init(void)
}
#endif
- /* Initialize xtime. From now on, xtime is updated with timer interrupts */
- xtime.tv_sec = secs_since_1970;
- xtime.tv_nsec = 0;
-
- wall_to_monotonic.tv_sec = -xtime.tv_sec;
-
time_sched_init(timer_interrupt);
}
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 059eac6abda1..e25bf4440b51 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -23,6 +23,9 @@ config RWSEM_XCHGADD_ALGORITHM
config GENERIC_TIME
def_bool y
+config GENERIC_CMOS_UPDATE
+ def_bool y
+
config ARCH_USES_GETTIMEOFFSET
def_bool y
diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c
index 77630df94343..884275629ef7 100644
--- a/arch/cris/arch-v10/drivers/ds1302.c
+++ b/arch/cris/arch-v10/drivers/ds1302.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
+#include <linux/smp_lock.h>
#include <linux/bcd.h>
#include <linux/capability.h>
@@ -238,9 +239,7 @@ static unsigned char days_in_mo[] =
/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */
-static int
-rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned long flags;
@@ -354,6 +353,17 @@ rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
}
}
+static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = rtc_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static void
print_rtc_status(void)
{
@@ -375,8 +385,8 @@ print_rtc_status(void)
/* The various file operations we support. */
static const struct file_operations rtc_fops = {
- .owner = THIS_MODULE,
- .ioctl = rtc_ioctl,
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = rtc_unlocked_ioctl,
};
/* Probe for the chip by writing something to its RAM and try reading it back. */
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
index 1e90c1a9c849..7dcb1f85f42b 100644
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ b/arch/cris/arch-v10/drivers/pcf8563.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/bcd.h>
#include <linux/mutex.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -53,7 +54,7 @@ static DEFINE_MUTEX(rtc_lock); /* Protect state etc */
static const unsigned char days_in_month[] =
{ 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
-int pcf8563_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+static long pcf8563_unlocked_ioctl(struct file *, unsigned int, unsigned long);
/* Cache VL bit value read at driver init since writing the RTC_SECOND
* register clears the VL status.
@@ -62,7 +63,7 @@ static int voltage_low;
static const struct file_operations pcf8563_fops = {
.owner = THIS_MODULE,
- .ioctl = pcf8563_ioctl,
+ .unlocked_ioctl = pcf8563_unlocked_ioctl,
};
unsigned char
@@ -212,8 +213,7 @@ pcf8563_exit(void)
* ioctl calls for this driver. Why return -ENOTTY upon error? Because
* POSIX says so!
*/
-int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
/* Some sanity checks. */
if (_IOC_TYPE(cmd) != RTC_MAGIC)
@@ -339,6 +339,17 @@ int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
+static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ return pcf8563_ioctl(filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static int __init pcf8563_register(void)
{
if (pcf8563_init() < 0) {
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 31ca1418d5a7..30adae594aef 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -26,7 +26,6 @@
/* it will make jiffies at 96 hz instead of 100 hz though */
#undef USE_CASCADE_TIMERS
-extern void update_xtime_from_cmos(void);
extern int set_rtc_mmss(unsigned long nowtime);
extern int have_rtc;
@@ -188,8 +187,6 @@ stop_watchdog(void)
#endif
}
-/* last time the cmos clock got updated */
-static long last_rtc_update = 0;
/*
* timer_interrupt() needs to keep up the real-time clock,
@@ -232,24 +229,6 @@ timer_interrupt(int irq, void *dev_id)
do_timer(1);
cris_do_profile(regs); /* Save profiling information */
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- *
- * The division here is not time critical since it will run once in
- * 11 minutes
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
return IRQ_HANDLED;
}
@@ -274,22 +253,10 @@ time_init(void)
*/
loops_per_usec = 50;
- if(RTC_INIT() < 0) {
- /* no RTC, start at 1980 */
- xtime.tv_sec = 0;
- xtime.tv_nsec = 0;
+ if(RTC_INIT() < 0)
have_rtc = 0;
- } else {
- /* get the current time */
+ else
have_rtc = 1;
- update_xtime_from_cmos();
- }
-
- /*
- * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
- * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
- */
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
/* Setup the etrax timers
* Base frequency is 25000 hz, divider 250 -> 100 HZ
diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c
index 506826399ae7..2fd6a740d895 100644
--- a/arch/cris/arch-v32/drivers/i2c.c
+++ b/arch/cris/arch-v32/drivers/i2c.c
@@ -649,10 +649,10 @@ i2c_release(struct inode *inode, struct file *filp)
/* Main device API. ioctl's to write or read to/from i2c registers.
*/
-static int
-i2c_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long
+i2c_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ int ret;
if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) {
return -ENOTTY;
}
@@ -665,9 +665,13 @@ i2c_ioctl(struct inode *inode, struct file *file,
I2C_ARGREG(arg),
I2C_ARGVALUE(arg)));
- return i2c_writereg(I2C_ARGSLAVE(arg),
+ lock_kernel();
+ ret = i2c_writereg(I2C_ARGSLAVE(arg),
I2C_ARGREG(arg),
I2C_ARGVALUE(arg));
+ unlock_kernel();
+ return ret;
+
case I2C_READREG:
{
unsigned char val;
@@ -675,7 +679,9 @@ i2c_ioctl(struct inode *inode, struct file *file,
D(printk("i2cr %d %d ",
I2C_ARGSLAVE(arg),
I2C_ARGREG(arg)));
+ lock_kernel();
val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg));
+ unlock_kernel();
D(printk("= %d\n", val));
return val;
}
@@ -688,10 +694,10 @@ i2c_ioctl(struct inode *inode, struct file *file,
}
static const struct file_operations i2c_fops = {
- .owner = THIS_MODULE,
- .ioctl = i2c_ioctl,
- .open = i2c_open,
- .release = i2c_release,
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = i2c_ioctl,
+ .open = i2c_open,
+ .release = i2c_release,
};
static int __init i2c_init(void)
diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c
index f4478506e52c..bef6eb53b153 100644
--- a/arch/cris/arch-v32/drivers/pcf8563.c
+++ b/arch/cris/arch-v32/drivers/pcf8563.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
+#include <linux/smp_lock.h>
#include <linux/delay.h>
#include <linux/bcd.h>
#include <linux/mutex.h>
@@ -49,7 +50,7 @@ static DEFINE_MUTEX(rtc_lock); /* Protect state etc */
static const unsigned char days_in_month[] =
{ 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
-int pcf8563_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
/* Cache VL bit value read at driver init since writing the RTC_SECOND
* register clears the VL status.
@@ -57,8 +58,8 @@ int pcf8563_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
static int voltage_low;
static const struct file_operations pcf8563_fops = {
- .owner = THIS_MODULE,
- .ioctl = pcf8563_ioctl
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = pcf8563_unlocked_ioctl,
};
unsigned char
@@ -208,8 +209,7 @@ pcf8563_exit(void)
* ioctl calls for this driver. Why return -ENOTTY upon error? Because
* POSIX says so!
*/
-int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
/* Some sanity checks. */
if (_IOC_TYPE(cmd) != RTC_MAGIC)
@@ -335,6 +335,17 @@ int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
+static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ return pcf8563_ioctl(filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static int __init pcf8563_register(void)
{
if (pcf8563_init() < 0) {
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index b1920d8de403..1ee0e1010228 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -44,7 +44,6 @@ unsigned long timer_regs[NR_CPUS] =
#endif
};
-extern void update_xtime_from_cmos(void);
extern int set_rtc_mmss(unsigned long nowtime);
extern int have_rtc;
@@ -198,9 +197,6 @@ handle_watchdog_bite(struct pt_regs* regs)
#endif
}
-/* Last time the cmos clock got updated. */
-static long last_rtc_update = 0;
-
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick.
@@ -238,25 +234,6 @@ timer_interrupt(int irq, void *dev_id)
/* Call the real timer interrupt handler */
do_timer(1);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- *
- * The division here is not time critical since it will run once in
- * 11 minutes
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- /* Do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
return IRQ_HANDLED;
}
@@ -309,23 +286,10 @@ time_init(void)
*/
loops_per_usec = 50;
- if(RTC_INIT() < 0) {
- /* No RTC, start at 1980 */
- xtime.tv_sec = 0;
- xtime.tv_nsec = 0;
+ if(RTC_INIT() < 0)
have_rtc = 0;
- } else {
- /* Get the current time */
+ else
have_rtc = 1;
- update_xtime_from_cmos();
- }
-
- /*
- * Initialize wall_to_monotonic such that adding it to
- * xtime will yield zero, the tv_nsec field must be normalized
- * (i.e., 0 <= nsec < NSEC_PER_SEC).
- */
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
/* Start CPU local timer. */
cris_timer_init();
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index a05dd31f3efb..c72730d20ef6 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -98,6 +98,8 @@ unsigned long
get_cmos_time(void)
{
unsigned int year, mon, day, hour, min, sec;
+ if(!have_rtc)
+ return 0;
sec = CMOS_READ(RTC_SECONDS);
min = CMOS_READ(RTC_MINUTES);
@@ -119,19 +121,19 @@ get_cmos_time(void)
return mktime(year, mon, day, hour, min, sec);
}
-/* update xtime from the CMOS settings. used when /dev/rtc gets a SET_TIME.
- * TODO: this doesn't reset the fancy NTP phase stuff as do_settimeofday does.
- */
-void
-update_xtime_from_cmos(void)
+int update_persistent_clock(struct timespec now)
{
- if(have_rtc) {
- xtime.tv_sec = get_cmos_time();
- xtime.tv_nsec = 0;
- }
+ return set_rtc_mmss(now.tv_sec);
}
+void read_persistent_clock(struct timespec *ts)
+{
+ ts->tv_sec = get_cmos_time();
+ ts->tv_nsec = 0;
+}
+
+
extern void cris_profile_sample(struct pt_regs* regs);
void
diff --git a/arch/frv/kernel/break.S b/arch/frv/kernel/break.S
index bd0bdf908d93..cbb6958a3147 100644
--- a/arch/frv/kernel/break.S
+++ b/arch/frv/kernel/break.S
@@ -21,7 +21,7 @@
#
# the break handler has its own stack
#
- .section .bss.stack
+ .section .bss..stack
.globl __break_user_context
.balign THREAD_SIZE
__break_stack:
@@ -63,7 +63,7 @@ __break_trace_through_exceptions:
# entry point for Break Exceptions/Interrupts
#
###############################################################################
- .section .text.break
+ .section .text..break
.balign 4
.globl __entry_break
__entry_break:
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 189397ec012a..63d579bf1c29 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -38,7 +38,7 @@
#define nr_syscalls ((syscall_table_size)/4)
- .section .text.entry
+ .section .text..entry
.balign 4
.macro LEDS val
diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S
index b825ef3f2d54..e9a8cc63ac94 100644
--- a/arch/frv/kernel/head.S
+++ b/arch/frv/kernel/head.S
@@ -542,7 +542,7 @@ __head_end:
.size _boot, .-_boot
# provide a point for GDB to place a break
- .section .text.start,"ax"
+ .section .text..start,"ax"
.globl _start
.balign 4
_start:
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index fb0ce7577225..0ddbbae83cb2 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -48,20 +48,12 @@ static struct irqaction timer_irq = {
.name = "timer",
};
-static inline int set_rtc_mmss(unsigned long nowtime)
-{
- return -1;
-}
-
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
- /* last time the cmos clock got updated */
- static long last_rtc_update = 0;
-
profile_tick(CPU_PROFILING);
/*
* Here we are in the timer irq handler. We just have irqs locally
@@ -74,22 +66,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
do_timer(1);
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2
- ) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
-
#ifdef CONFIG_HEARTBEAT
static unsigned short n;
n++;
@@ -119,7 +95,8 @@ void time_divisor_init(void)
__set_TCSR_DATA(0, base >> 8);
}
-void time_init(void)
+
+void read_persistent_clock(struct timespec *ts)
{
unsigned int year, mon, day, hour, min, sec;
@@ -135,9 +112,12 @@ void time_init(void)
if ((year += 1900) < 1970)
year += 100;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = 0;
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = 0;
+}
+void time_init(void)
+{
/* install scheduling interrupt handler */
setup_irq(IRQ_CPU_TIMER0, &timer_irq);
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index cbe811fccfcc..8b973f3cc90e 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -57,10 +57,10 @@ SECTIONS
_text = .;
_stext = .;
.text : {
- *(.text.start)
- *(.text.entry)
- *(.text.break)
- *(.text.tlbmiss)
+ *(.text..start)
+ *(.text..entry)
+ *(.text..break)
+ *(.text..tlbmiss)
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
@@ -114,7 +114,7 @@ SECTIONS
.sbss : { *(.sbss .sbss.*) }
.bss : { *(.bss .bss.*) }
- .bss.stack : { *(.bss) }
+ .bss..stack : { *(.bss) }
__bss_stop = .;
_end = . ;
diff --git a/arch/frv/mm/tlb-miss.S b/arch/frv/mm/tlb-miss.S
index 7f392bc651a3..f3ac019bb18b 100644
--- a/arch/frv/mm/tlb-miss.S
+++ b/arch/frv/mm/tlb-miss.S
@@ -15,7 +15,7 @@
#include <asm/pgtable.h>
#include <asm/spr-regs.h>
- .section .text.tlbmiss
+ .section .text..tlbmiss
.balign 4
.globl __entry_insn_mmu_miss
diff --git a/arch/h8300/boot/compressed/head.S b/arch/h8300/boot/compressed/head.S
index 985a81a2435a..10e9a2d1cc6c 100644
--- a/arch/h8300/boot/compressed/head.S
+++ b/arch/h8300/boot/compressed/head.S
@@ -9,7 +9,7 @@
#define SRAM_START 0xff4000
- .section .text.startup
+ .section .text..startup
.global startup
startup:
mov.l #SRAM_START+0x8000, sp
diff --git a/arch/h8300/boot/compressed/vmlinux.lds b/arch/h8300/boot/compressed/vmlinux.lds
index 65e2a0d1ae39..a0a3a0ed54ef 100644
--- a/arch/h8300/boot/compressed/vmlinux.lds
+++ b/arch/h8300/boot/compressed/vmlinux.lds
@@ -4,7 +4,7 @@ SECTIONS
{
__stext = . ;
__text = .;
- *(.text.startup)
+ *(.text..startup)
*(.text)
__etext = . ;
}
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 7f2d6cfbb4b6..165005aff9df 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -41,7 +41,7 @@ void h8300_timer_tick(void)
update_process_times(user_mode(get_irq_regs()));
}
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
unsigned int year, mon, day, hour, min, sec;
@@ -56,8 +56,12 @@ void __init time_init(void)
#endif
if ((year += 1900) < 1970)
year += 100;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = 0;
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = 0;
+}
+
+void __init time_init(void)
+{
h8300_timer_setup();
}
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index e14c492a8a93..4ce8d1358fee 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2046,13 +2046,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
struct ioc *ioc;
acpi_status status;
u64 hpa, length;
- struct acpi_device_info *dev_info;
+ struct acpi_device_info *adi;
status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status))
return 1;
- status = acpi_get_object_info(device->handle, &dev_info);
+ status = acpi_get_object_info(device->handle, &adi);
if (ACPI_FAILURE(status))
return 1;
@@ -2060,13 +2060,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* root bridges, and its CSR space includes the IOC function.
*/
- if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) {
+ if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
hpa += ZX1_IOC_OFFSET;
/* zx1 based systems default to kernel page size iommu pages */
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
- kfree(dev_info);
+ kfree(adi);
/*
* default anything not caught above or specified on cmdline to 4k
diff --git a/arch/ia64/include/asm/asmmacro.h b/arch/ia64/include/asm/asmmacro.h
index c1642fd64029..3ab6d75aa3db 100644
--- a/arch/ia64/include/asm/asmmacro.h
+++ b/arch/ia64/include/asm/asmmacro.h
@@ -70,12 +70,12 @@ name:
* path (ivt.S - TLB miss processing) or in places where it might not be
* safe to use a "tpa" instruction (mca_asm.S - error recovery).
*/
- .section ".data.patch.vtop", "a" // declare section & section attributes
+ .section ".data..patch.vtop", "a" // declare section & section attributes
.previous
#define LOAD_PHYSICAL(pr, reg, obj) \
[1:](pr)movl reg = obj; \
- .xdata4 ".data.patch.vtop", 1b-.
+ .xdata4 ".data..patch.vtop", 1b-.
/*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
@@ -84,11 +84,11 @@ name:
#define DO_MCKINLEY_E9_WORKAROUND
#ifdef DO_MCKINLEY_E9_WORKAROUND
- .section ".data.patch.mckinley_e9", "a"
+ .section ".data..patch.mckinley_e9", "a"
.previous
/* workaround for Itanium 2 Errata 9: */
# define FSYS_RETURN \
- .xdata4 ".data.patch.mckinley_e9", 1f-.; \
+ .xdata4 ".data..patch.mckinley_e9", 1f-.; \
1:{ .mib; \
nop.m 0; \
mov r16=ar.pfs; \
@@ -107,11 +107,11 @@ name:
* If physical stack register size is different from DEF_NUM_STACK_REG,
* dynamically patch the kernel for correct size.
*/
- .section ".data.patch.phys_stack_reg", "a"
+ .section ".data..patch.phys_stack_reg", "a"
.previous
#define LOAD_PHYS_STACK_REG_SIZE(reg) \
[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
- .xdata4 ".data.patch.phys_stack_reg", 1b-.
+ .xdata4 ".data..patch.phys_stack_reg", 1b-.
/*
* Up until early 2004, use of .align within a function caused bad unwind info.
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 6ebc229a1c51..9da3df6f1a52 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -437,17 +437,18 @@ __fls (unsigned long x)
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
-static __inline__ unsigned long
-hweight64 (unsigned long x)
+static __inline__ unsigned long __arch_hweight64(unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x);
return result;
}
-#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
-#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
-#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
+#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
+#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
+#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
+
+#include <asm-generic/bitops/const_hweight.h>
#endif /* __KERNEL__ */
diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
index e7482bd628ff..988254a7d349 100644
--- a/arch/ia64/include/asm/cache.h
+++ b/arch/ia64/include/asm/cache.h
@@ -24,6 +24,6 @@
# define SMP_CACHE_BYTES (1 << 3)
#endif
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif /* _ASM_IA64_CACHE_H */
diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h
index f2ca32069b3f..e0de61709cf1 100644
--- a/arch/ia64/include/asm/mmzone.h
+++ b/arch/ia64/include/asm/mmzone.h
@@ -19,16 +19,12 @@
static inline int pfn_to_nid(unsigned long pfn)
{
-#ifdef CONFIG_NUMA
extern int paddr_to_nid(unsigned long);
int nid = paddr_to_nid(pfn << PAGE_SHIFT);
if (nid < 0)
return 0;
else
return nid;
-#else
- return 0;
-#endif
}
#ifdef CONFIG_IA64_DIG /* DIG systems are small */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index f7c00a5e0e2b..d7ca2e82302e 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -31,7 +31,7 @@ extern void *per_cpu_init(void);
#endif /* SMP */
-#define PER_CPU_BASE_SECTION ".data.percpu"
+#define PER_CPU_BASE_SECTION ".data..percpu"
/*
* Be extremely careful when taking the address of this variable! Due to virtual
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index ab9b03a9adcc..ceeffc509764 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -21,7 +21,7 @@ GATECFLAGS_gate-syms.o = -r
$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
-# gate-data.o contains the gate DSO image as data in section .data.gate.
+# gate-data.o contains the gate DSO image as data in section .data..gate.
# We must build gate.so before we can assemble it.
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/gate-data.o: $(obj)/gate.so
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 4d1a7e9314cf..c6c90f39f4d9 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -785,6 +785,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
return 0;
}
+int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
+{
+ if (isa_irq >= 16)
+ return -1;
+ *gsi = isa_irq;
+ return 0;
+}
+
/*
* ACPI based hotplug CPU support
*/
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index b0b4e6e710f2..22f61526a8e1 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -113,7 +113,7 @@ processor_get_freq (
dprintk("processor_get_freq\n");
saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu)
goto migrate_end;
@@ -121,7 +121,7 @@ processor_get_freq (
ret = processor_get_pstate(&value);
if (ret) {
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
printk(KERN_WARNING "get performance failed with error %d\n",
ret);
ret = 0;
@@ -131,7 +131,7 @@ processor_get_freq (
ret = (clock_freq*1000);
migrate_end:
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
return ret;
}
@@ -151,7 +151,7 @@ processor_set_freq (
dprintk("processor_set_freq\n");
saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu) {
retval = -EAGAIN;
goto migrate_end;
@@ -208,7 +208,7 @@ processor_set_freq (
retval = 0;
migrate_end:
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
return (retval);
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 9a260b317d8d..9465dfdb28ff 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -114,15 +114,19 @@ GLOBAL_ENTRY(sys_clone2)
alloc r16=ar.pfs,8,2,6,0
DO_SAVE_SWITCH_STACK
adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
+ adds r3=PT(AR_BSPSTORE)+IA64_SWITCH_STACK_SIZE+16,sp
mov loc0=rp
mov loc1=r16 // save ar.pfs across do_fork
.body
mov out1=in1
mov out3=in2
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
+ cmp.eq p7,p0=in1,r0
mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;;
(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread()
+(p7) ld8 out1=[r3] // stack_start: kernel expects a valid value
+(p7) mov out3=r0
mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
mov out0=in0 // out0 = clone_flags
@@ -146,15 +150,19 @@ GLOBAL_ENTRY(sys_clone)
alloc r16=ar.pfs,8,2,6,0
DO_SAVE_SWITCH_STACK
adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
+ adds r3=PT(AR_BSPSTORE)+IA64_SWITCH_STACK_SIZE+16,sp
mov loc0=rp
mov loc1=r16 // save ar.pfs across do_fork
.body
mov out1=in1
mov out3=16 // stacksize (compensates for 16-byte scratch area)
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
+ cmp.eq p7,p0=in1,r0
mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;;
(p6) st8 [r2]=in4 // store TLS in r13 (tp)
+(p7) ld8 out1=[r3] // stack_start: kernel expects a valid value
+(p7) mov out3=r0
mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
mov out0=in0 // out0 = clone_flags
diff --git a/arch/ia64/kernel/gate-data.S b/arch/ia64/kernel/gate-data.S
index 258c0a3238fb..b3ef1c72e132 100644
--- a/arch/ia64/kernel/gate-data.S
+++ b/arch/ia64/kernel/gate-data.S
@@ -1,3 +1,3 @@
- .section .data.gate, "aw"
+ .section .data..gate, "aw"
.incbin "arch/ia64/kernel/gate.so"
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index cf5e0a105e16..245d3e1ec7e1 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -21,18 +21,18 @@
* to targets outside the shared object) and to avoid multi-phase kernel builds, we
* simply create minimalistic "patch lists" in special ELF sections.
*/
- .section ".data.patch.fsyscall_table", "a"
+ .section ".data..patch.fsyscall_table", "a"
.previous
#define LOAD_FSYSCALL_TABLE(reg) \
[1:] movl reg=0; \
- .xdata4 ".data.patch.fsyscall_table", 1b-.
+ .xdata4 ".data..patch.fsyscall_table", 1b-.
- .section ".data.patch.brl_fsys_bubble_down", "a"
+ .section ".data..patch.brl_fsys_bubble_down", "a"
.previous
#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
[1:](pr)brl.cond.sptk 0; \
;; \
- .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
+ .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-.
GLOBAL_ENTRY(__kernel_syscall_via_break)
.prologue
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index 88c64ed47c36..d32b0855110a 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -33,21 +33,21 @@ SECTIONS
*/
. = GATE_ADDR + 0x600;
- .data.patch : {
+ .data..patch : {
__paravirt_start_gate_mckinley_e9_patchlist = .;
- *(.data.patch.mckinley_e9)
+ *(.data..patch.mckinley_e9)
__paravirt_end_gate_mckinley_e9_patchlist = .;
__paravirt_start_gate_vtop_patchlist = .;
- *(.data.patch.vtop)
+ *(.data..patch.vtop)
__paravirt_end_gate_vtop_patchlist = .;
__paravirt_start_gate_fsyscall_patchlist = .;
- *(.data.patch.fsyscall_table)
+ *(.data..patch.fsyscall_table)
__paravirt_end_gate_fsyscall_patchlist = .;
__paravirt_start_gate_brl_fsys_bubble_down_patchlist = .;
- *(.data.patch.brl_fsys_bubble_down)
+ *(.data..patch.brl_fsys_bubble_down)
__paravirt_end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index e253ab8fcbc8..f9efe9739d3f 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -23,7 +23,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* Initial task structure.
*
* We need to make sure that this is properly aligned due to the way process stacks are
- * handled. This is done by having a special ".data.init_task" section...
+ * handled. This is done by having a special ".data..init_task" section...
*/
#define init_thread_info init_task_mem.s.thread_info
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 640479304ac0..f14c35f9b03a 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -29,6 +29,7 @@
#include <linux/threads.h>
#include <linux/bitops.h>
#include <linux/irq.h>
+#include <linux/ratelimit.h>
#include <asm/delay.h>
#include <asm/intrinsics.h>
@@ -467,13 +468,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
sp = ia64_getreg(_IA64_REG_SP);
if ((sp - bsp) < 1024) {
- static unsigned char count;
- static long last_time;
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
- if (time_after(jiffies, last_time + 5 * HZ))
- count = 0;
- if (++count < 5) {
- last_time = jiffies;
+ if (__ratelimit(&ratelimit)) {
printk("ia64_handle_irq: DANGER: less than "
"1KB of free stack space!!\n"
"(bsp=0x%lx, sp=%lx)\n", bsp, sp);
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 179fd122e837..d93e396bf599 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -82,7 +82,7 @@
mov r19=n;; /* prepare to save predicates */ \
br.sptk.many dispatch_to_fault_handler
- .section .text.ivt,"ax"
+ .section .text..ivt,"ax"
.align 32768 // align on 32KB boundary
.global ia64_ivt
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 292e214a3b84..d56753a11636 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -16,7 +16,7 @@
#define ACCOUNT_SYS_ENTER
#endif
-.section ".data.patch.rse", "a"
+.section ".data..patch.rse", "a"
.previous
/*
@@ -215,7 +215,7 @@
(pUStk) extr.u r17=r18,3,6; \
(pUStk) sub r16=r18,r22; \
[1:](pKStk) br.cond.sptk.many 1f; \
- .xdata4 ".data.patch.rse",1b-. \
+ .xdata4 ".data..patch.rse",1b-. \
;; \
cmp.ge p6,p7 = 33,r17; \
;; \
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
index 6158560d7f17..92d880c4d3d1 100644
--- a/arch/ia64/kernel/paravirtentry.S
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -28,7 +28,7 @@
#include "entry.h"
#define DATA8(sym, init_value) \
- .pushsection .data.read_mostly ; \
+ .pushsection .data..read_mostly ; \
.align 8 ; \
.global sym ; \
sym: ; \
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 53f1648c8b81..01acfe00b7ec 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -452,7 +452,7 @@ copy_thread(unsigned long clone_flags,
if (likely(user_mode(child_ptregs))) {
if (clone_flags & CLONE_SETTLS)
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
- if (user_stack_base) {
+ if (user_stack_base && user_stack_size) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16;
child_ptregs->ar_bspstore = user_stack_base;
child_ptregs->ar_rnat = 0;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e6676fca4828..aa8b5fa1a8de 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -404,10 +404,9 @@ static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{
cpumask_t save_cpus_allowed = current->cpus_allowed;
- cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
- set_cpus_allowed(current, new_cpus_allowed);
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
(*fn)(arg);
- set_cpus_allowed(current, save_cpus_allowed);
+ set_cpus_allowed_ptr(current, &save_cpus_allowed);
}
static void
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 47a192781b0a..653b3c46ea82 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -430,18 +430,16 @@ static int __init rtc_init(void)
}
module_init(rtc_init);
+void read_persistent_clock(struct timespec *ts)
+{
+ efi_gettimeofday(ts);
+}
+
void __init
time_init (void)
{
register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
- efi_gettimeofday(&xtime);
ia64_init_itm();
-
- /*
- * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
- * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
- */
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
}
/*
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 28f299de2903..0baa1bbb65fe 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -361,12 +361,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
return 0;
oldmask = current->cpus_allowed;
- retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (unlikely(retval))
return retval;
retval = cpu_cache_sysfs_init(cpu);
- set_cpus_allowed(current, oldmask);
+ set_cpus_allowed_ptr(current, &oldmask);
if (unlikely(retval < 0))
return retval;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 776dd40397e2..622772b7fb6c 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tty.h>
+#include <linux/ratelimit.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
@@ -1283,24 +1284,9 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
/*
* Make sure we log the unaligned access, so that user/sysadmin can notice it and
* eventually fix the program. However, we don't want to do that for every access so we
- * pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be
- * either...
+ * pace it with jiffies.
*/
-static int
-within_logging_rate_limit (void)
-{
- static unsigned long count, last_time;
-
- if (time_after(jiffies, last_time + 5 * HZ))
- count = 0;
- if (count < 5) {
- last_time = jiffies;
- count++;
- return 1;
- }
- return 0;
-
-}
+static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5);
void
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
@@ -1337,7 +1323,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (!no_unaligned_warning &&
!(current->thread.flags & IA64_THREAD_UAC_NOPRINT) &&
- within_logging_rate_limit())
+ __ratelimit(&logging_rate_limit))
{
char buf[200]; /* comm[] is at most 16 bytes... */
size_t len;
@@ -1370,7 +1356,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
}
}
} else {
- if (within_logging_rate_limit()) {
+ if (__ratelimit(&logging_rate_limit)) {
printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
ifa, regs->cr_iip + ipsr->ri);
if (unaligned_dump_stack)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 1295ba327f6f..e07218a2577f 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -8,7 +8,7 @@
#define IVT_TEXT \
VMLINUX_SYMBOL(__start_ivt_text) = .; \
- *(.text.ivt) \
+ *(.text..ivt) \
VMLINUX_SYMBOL(__end_ivt_text) = .;
OUTPUT_FORMAT("elf64-ia64-little")
@@ -54,8 +54,8 @@ SECTIONS
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
#ifdef CONFIG_SMP
- .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
- { *(.text.lock) }
+ .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET)
+ { *(.text..lock) }
#endif
_etext = .;
@@ -75,10 +75,10 @@ SECTIONS
__stop___mca_table = .;
}
- .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET)
+ .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET)
{
__start___phys_stack_reg_patchlist = .;
- *(.data.patch.phys_stack_reg)
+ *(.data..patch.phys_stack_reg)
__end___phys_stack_reg_patchlist = .;
}
@@ -110,24 +110,24 @@ SECTIONS
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
- .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
+ .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET)
{
__start___vtop_patchlist = .;
- *(.data.patch.vtop)
+ *(.data..patch.vtop)
__end___vtop_patchlist = .;
}
- .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
+ .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET)
{
__start___rse_patchlist = .;
- *(.data.patch.rse)
+ *(.data..patch.rse)
__end___rse_patchlist = .;
}
- .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
+ .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET)
{
__start___mckinley_e9_bundles = .;
- *(.data.patch.mckinley_e9)
+ *(.data..patch.mckinley_e9)
__end___mckinley_e9_bundles = .;
}
@@ -175,17 +175,17 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
- .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
+ .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET)
{
PAGE_ALIGNED_DATA(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__start_gate_section = .;
- *(.data.gate)
+ *(.data..gate)
__stop_gate_section = .;
#ifdef CONFIG_XEN
. = ALIGN(PAGE_SIZE);
__xen_start_gate_section = .;
- *(.data.gate.xen)
+ *(.data..gate.xen)
__xen_stop_gate_section = .;
#endif
}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 7f3c0a2e60cd..d5f4e9161201 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -979,11 +979,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
+ r = -ENXIO;
if (irqchip_in_kernel(kvm)) {
__s32 status;
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
if (ioctl == KVM_IRQ_LINE_STATUS) {
+ r = -EFAULT;
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
sizeof irq_event))
@@ -1379,7 +1381,7 @@ static void kvm_release_vm_pages(struct kvm *kvm)
int i, j;
unsigned long base_gfn;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) {
memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn;
@@ -1535,8 +1537,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
goto out;
if (copy_to_user(user_stack, stack,
- sizeof(struct kvm_ia64_vcpu_stack)))
+ sizeof(struct kvm_ia64_vcpu_stack))) {
+ r = -EFAULT;
goto out;
+ }
break;
}
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index 7a62f75778c5..f0b9cac82414 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -51,7 +51,7 @@ static int __init kvm_vmm_init(void)
vmm_fpswa_interface = fpswa_interface;
/*Register vmm data to kvm side*/
- return kvm_init(&vmm_info, 1024, THIS_MODULE);
+ return kvm_init(&vmm_info, 1024, 0, THIS_MODULE);
}
static void __exit kvm_vmm_exit(void)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 40920c630649..24018484c6e9 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -104,7 +104,7 @@ GLOBAL_ENTRY(kvm_vmm_panic)
br.call.sptk.many b6=vmm_panic_handler;
END(kvm_vmm_panic)
- .section .text.ivt,"ax"
+ .section .text..ivt,"ax"
.align 32768 // align on 32KB boundary
.global kvm_ia64_ivt
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 19261a99e623..0799fea4c588 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -148,7 +148,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if ((vma->vm_flags & mask) != mask)
goto bad_area;
- survive:
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
@@ -276,13 +275,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk(KERN_CRIT "VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 64aff520b899..aa2533ae7e9e 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -335,8 +335,11 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
}
struct pci_bus * __devinit
-pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
+pci_acpi_scan_root(struct acpi_pci_root *root)
{
+ struct acpi_device *device = root->device;
+ int domain = root->segment;
+ int bus = root->secondary.start;
struct pci_controller *controller;
unsigned int windows = 0;
struct pci_bus *pbus;
diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py
index c27849889e19..2bfd941ff7c7 100644
--- a/arch/ia64/scripts/unwcheck.py
+++ b/arch/ia64/scripts/unwcheck.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 55ac3c4e11d2..f6c1c5fd075d 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -629,9 +629,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
else {
/* migrate the task before calling SAL */
save_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
sn_hwperf_call_sal(op_info);
- set_cpus_allowed(current, save_allowed);
+ set_cpus_allowed_ptr(current, &save_allowed);
}
}
r = op_info->ret;
diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S
index 7d4830afc91d..6f95b6b32a4e 100644
--- a/arch/ia64/xen/gate-data.S
+++ b/arch/ia64/xen/gate-data.S
@@ -1,3 +1,3 @@
- .section .data.gate.xen, "aw"
+ .section .data..gate.xen, "aw"
.incbin "arch/ia64/xen/gate.so"
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
index aff8346ea193..b820ed02ab9f 100644
--- a/arch/ia64/xen/xensetup.S
+++ b/arch/ia64/xen/xensetup.S
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <xen/interface/elfnote.h>
- .section .data.read_mostly
+ .section .data..read_mostly
.align 8
.global xen_domain_type
xen_domain_type:
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 9cedcef11575..bda86820bffd 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -106,24 +106,6 @@ u32 arch_gettimeoffset(void)
}
/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you won't notice until after reboot!
- */
-static inline int set_rtc_mmss(unsigned long nowtime)
-{
- return 0;
-}
-
-/* last time the cmos clock got updated */
-static long last_rtc_update = 0;
-
-/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
@@ -138,23 +120,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- write_seqlock(&xtime_lock);
- if (ntp_synced()
- && xtime.tv_sec > last_rtc_update + 660
- && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
- && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
- {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else /* do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- write_sequnlock(&xtime_lock);
/* As we return to user mode fire off the other CPU schedulers..
this is basically because we don't yet share IRQ's around.
This message is rigged to be safe on the 386 - basically it's
@@ -174,7 +139,7 @@ static struct irqaction irq0 = {
.name = "MFT2",
};
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
unsigned int epoch, year, mon, day, hour, min, sec;
@@ -194,11 +159,13 @@ void __init time_init(void)
epoch = 1952;
year += epoch;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+}
+
+void __init time_init(void)
+{
#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
|| defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index ed2b69b96805..e5ee54b0f06f 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -54,9 +54,12 @@
#define MCFSIM_SDCS0 0x000a8110 /* SDRAM Chip Select 0 Configuration */
#define MCFSIM_SDCS1 0x000a8114 /* SDRAM Chip Select 1 Configuration */
+#define MCFEPORT_EPPAR 0xFC088000
#define MCFEPORT_EPDDR 0xFC088002
+#define MCFEPORT_EPIER 0xFC088003
#define MCFEPORT_EPDR 0xFC088004
#define MCFEPORT_EPPDR 0xFC088005
+#define MCFEPORT_EPFR 0xFC088006
#define MCFGPIO_PODR_BUSCTL 0xFC0A4000
#define MCFGPIO_PODR_BE 0xFC0A4001
@@ -113,6 +116,7 @@
#define MCF_GPIO_PAR_UART (0xA4036)
#define MCF_GPIO_PAR_FECI2C (0xA4033)
+#define MCF_GPIO_PAR_QSPI (0xA4034)
#define MCF_GPIO_PAR_FEC (0xA4038)
#define MCF_GPIO_PAR_UART_PAR_URXD0 (0x0001)
diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h
index a34894cf8e6f..4ab177639da2 100644
--- a/arch/m68k/include/asm/m523xsim.h
+++ b/arch/m68k/include/asm/m523xsim.h
@@ -110,9 +110,12 @@
* EPort
*/
+#define MCFEPORT_EPPAR (MCF_IPSBAR + 0x130000)
#define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002)
+#define MCFEPORT_EPIER (MCF_IPSBAR + 0x130003)
#define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004)
#define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005)
+#define MCFEPORT_EPFR (MCF_IPSBAR + 0x130006)
/*
* Generic GPIO support
@@ -127,5 +130,10 @@
#define MCFGPIO_IRQ_MAX 8
#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+/*
+ * Pin Assignment
+*/
+#define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10004A)
+#define MCFGPIO_PAR_TIMER (MCF_IPSBAR + 0x10004C)
/****************************************************************************/
#endif /* m523xsim_h */
diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h
index 14bce877ed88..201a710356a4 100644
--- a/arch/m68k/include/asm/m5249sim.h
+++ b/arch/m68k/include/asm/m5249sim.h
@@ -69,10 +69,12 @@
#define MCFSIM_DMA1ICR MCFSIM_ICR7 /* DMA 1 ICR */
#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
+#define MCFSIM_QSPIICR MCFSIM_ICR10 /* QSPI ICR */
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_QSPI 28 /* QSPI, Level 4 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
@@ -126,8 +128,8 @@
* Generic GPIO support
*/
#define MCFGPIO_PIN_MAX 64
-#define MCFGPIO_IRQ_MAX -1
-#define MCFGPIO_IRQ_VECBASE -1
+#define MCFGPIO_IRQ_MAX MCFINTC2_GPIOIRQ7
+#define MCFGPIO_IRQ_VECBASE MCFINTC2_GPIOIRQ0
/****************************************************************************/
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h
index 453356d72d80..26b395e41911 100644
--- a/arch/m68k/include/asm/m527xsim.h
+++ b/arch/m68k/include/asm/m527xsim.h
@@ -31,6 +31,7 @@
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
#define MCFINT_UART1 14 /* Interrupt number for UART1 */
#define MCFINT_UART2 15 /* Interrupt number for UART2 */
+#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
#define MCFINT_PIT1 36 /* Interrupt number for PIT1 */
/*
@@ -120,6 +121,9 @@
#define MCFGPIO_PIN_MAX 100
#define MCFGPIO_IRQ_MAX 8
#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+
+#define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10004A)
+#define MCFGPIO_PAR_TIMER (MCF_IPSBAR + 0x10004C)
#endif
#ifdef CONFIG_M5275
@@ -212,15 +216,21 @@
#define MCFGPIO_PIN_MAX 148
#define MCFGPIO_IRQ_MAX 8
#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+
+#define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10007E)
#endif
/*
* EPort
*/
+#define MCFEPORT_EPPAR (MCF_IPSBAR + 0x130000)
#define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002)
+#define MCFEPORT_EPIER (MCF_IPSBAR + 0x130003)
#define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004)
#define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005)
+#define MCFEPORT_EPFR (MCF_IPSBAR + 0x130006)
+
/*
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index e2ad1f42b657..891cbedad972 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -29,6 +29,7 @@
#define MCFINT_VECBASE 64 /* Vector base number */
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
+#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
#define MCFINT_PIT1 55 /* Interrupt number for PIT1 */
/*
@@ -249,70 +250,4 @@
#define MCF5282_I2C_I2SR_RXAK (0x01) // received acknowledge
-
-/*********************************************************************
-*
-* Queued Serial Peripheral Interface (QSPI) Module
-*
-*********************************************************************/
-/* Derek - 21 Feb 2005 */
-/* change to the format used in I2C */
-/* Read/Write access macros for general use */
-#define MCF5282_QSPI_QMR MCF_IPSBAR + 0x0340
-#define MCF5282_QSPI_QDLYR MCF_IPSBAR + 0x0344
-#define MCF5282_QSPI_QWR MCF_IPSBAR + 0x0348
-#define MCF5282_QSPI_QIR MCF_IPSBAR + 0x034C
-#define MCF5282_QSPI_QAR MCF_IPSBAR + 0x0350
-#define MCF5282_QSPI_QDR MCF_IPSBAR + 0x0354
-#define MCF5282_QSPI_QCR MCF_IPSBAR + 0x0354
-
-/* Bit level definitions and macros */
-#define MCF5282_QSPI_QMR_MSTR (0x8000)
-#define MCF5282_QSPI_QMR_DOHIE (0x4000)
-#define MCF5282_QSPI_QMR_BITS_16 (0x0000)
-#define MCF5282_QSPI_QMR_BITS_8 (0x2000)
-#define MCF5282_QSPI_QMR_BITS_9 (0x2400)
-#define MCF5282_QSPI_QMR_BITS_10 (0x2800)
-#define MCF5282_QSPI_QMR_BITS_11 (0x2C00)
-#define MCF5282_QSPI_QMR_BITS_12 (0x3000)
-#define MCF5282_QSPI_QMR_BITS_13 (0x3400)
-#define MCF5282_QSPI_QMR_BITS_14 (0x3800)
-#define MCF5282_QSPI_QMR_BITS_15 (0x3C00)
-#define MCF5282_QSPI_QMR_CPOL (0x0200)
-#define MCF5282_QSPI_QMR_CPHA (0x0100)
-#define MCF5282_QSPI_QMR_BAUD(x) (((x)&0x00FF))
-
-#define MCF5282_QSPI_QDLYR_SPE (0x80)
-#define MCF5282_QSPI_QDLYR_QCD(x) (((x)&0x007F)<<8)
-#define MCF5282_QSPI_QDLYR_DTL(x) (((x)&0x00FF))
-
-#define MCF5282_QSPI_QWR_HALT (0x8000)
-#define MCF5282_QSPI_QWR_WREN (0x4000)
-#define MCF5282_QSPI_QWR_WRTO (0x2000)
-#define MCF5282_QSPI_QWR_CSIV (0x1000)
-#define MCF5282_QSPI_QWR_ENDQP(x) (((x)&0x000F)<<8)
-#define MCF5282_QSPI_QWR_CPTQP(x) (((x)&0x000F)<<4)
-#define MCF5282_QSPI_QWR_NEWQP(x) (((x)&0x000F))
-
-#define MCF5282_QSPI_QIR_WCEFB (0x8000)
-#define MCF5282_QSPI_QIR_ABRTB (0x4000)
-#define MCF5282_QSPI_QIR_ABRTL (0x1000)
-#define MCF5282_QSPI_QIR_WCEFE (0x0800)
-#define MCF5282_QSPI_QIR_ABRTE (0x0400)
-#define MCF5282_QSPI_QIR_SPIFE (0x0100)
-#define MCF5282_QSPI_QIR_WCEF (0x0008)
-#define MCF5282_QSPI_QIR_ABRT (0x0004)
-#define MCF5282_QSPI_QIR_SPIF (0x0001)
-
-#define MCF5282_QSPI_QAR_ADDR(x) (((x)&0x003F))
-
-#define MCF5282_QSPI_QDR_COMMAND(x) (((x)&0xFF00))
-#define MCF5282_QSPI_QCR_DATA(x) (((x)&0x00FF)<<8)
-#define MCF5282_QSPI_QCR_CONT (0x8000)
-#define MCF5282_QSPI_QCR_BITSE (0x4000)
-#define MCF5282_QSPI_QCR_DT (0x2000)
-#define MCF5282_QSPI_QCR_DSCK (0x1000)
-#define MCF5282_QSPI_QCR_CS (((x)&0x000F)<<8)
-
-/****************************************************************************/
#endif /* m528xsim_h */
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h
index 36bf15aec9ae..c4bf1c81e3cf 100644
--- a/arch/m68k/include/asm/m532xsim.h
+++ b/arch/m68k/include/asm/m532xsim.h
@@ -17,6 +17,7 @@
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
#define MCFINT_UART1 27 /* Interrupt number for UART1 */
#define MCFINT_UART2 28 /* Interrupt number for UART2 */
+#define MCFINT_QSPI 31 /* Interrupt number for QSPI */
#define MCF_WTM_WCR MCF_REG16(0xFC098000)
diff --git a/arch/m68k/include/asm/mcfqspi.h b/arch/m68k/include/asm/mcfqspi.h
new file mode 100644
index 000000000000..39d90d51111d
--- /dev/null
+++ b/arch/m68k/include/asm/mcfqspi.h
@@ -0,0 +1,64 @@
+/*
+ * Definitions for Freescale Coldfire QSPI module
+ *
+ * Copyright 2010 Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+*/
+
+#ifndef mcfqspi_h
+#define mcfqspi_h
+
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
+#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340)
+#elif defined(CONFIG_M5249)
+#define MCFQSPI_IOBASE (MCF_MBAR + 0x300)
+#elif defined(CONFIG_M520x) || defined(CONFIG_M532x)
+#define MCFQSPI_IOBASE 0xFC058000
+#endif
+#define MCFQSPI_IOSIZE 0x40
+
+/**
+ * struct mcfqspi_cs_control - chip select control for the coldfire qspi driver
+ * @setup: setup the control; allocate gpio's, etc. May be NULL.
+ * @teardown: finish with the control; free gpio's, etc. May be NULL.
+ * @select: output the signals to select the device. Can not be NULL.
+ * @deselect: output the signals to deselect the device. Can not be NULL.
+ *
+ * The QSPI module has 4 hardware chip selects. We don't use them. Instead
+ * platforms are required to supply a mcfqspi_cs_control as a part of the
+ * platform data for each QSPI master controller. Only the select and
+ * deselect functions are required.
+*/
+struct mcfqspi_cs_control {
+ int (*setup)(struct mcfqspi_cs_control *);
+ void (*teardown)(struct mcfqspi_cs_control *);
+ void (*select)(struct mcfqspi_cs_control *, u8, bool);
+ void (*deselect)(struct mcfqspi_cs_control *, u8, bool);
+};
+
+/**
+ * struct mcfqspi_platform_data - platform data for the coldfire qspi driver
+ * @bus_num: board specific identifier for this qspi driver.
+ * @num_chipselects: number of chip selects supported by this qspi driver.
+ * @cs_control: platform dependent chip select control.
+*/
+struct mcfqspi_platform_data {
+ s16 bus_num;
+ u16 num_chipselect;
+ struct mcfqspi_cs_control *cs_control;
+};
+
+#endif /* mcfqspi_h */
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 17dc2a31a7ca..4926b3856c15 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -73,21 +73,24 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
struct rtc_time time;
+ ts->tv_sec = 0;
+ ts->tv_nsec = 0;
if (mach_hwclk) {
mach_hwclk(0, &time);
if ((time.tm_year += 1900) < 1970)
time.tm_year += 100;
- xtime.tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
+ ts->tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
time.tm_hour, time.tm_min, time.tm_sec);
- xtime.tv_nsec = 0;
}
- wall_to_monotonic.tv_sec = -xtime.tv_sec;
+}
+void __init time_init(void)
+{
mach_sched_init(timer_interrupt);
}
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 064f5913db1a..63be09e58d4a 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -59,6 +59,10 @@ config GENERIC_HARDIRQS
bool
default y
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index 9f1784f586b9..a91b2713451d 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -57,7 +57,7 @@ SECTIONS {
.romvec : {
__rom_start = . ;
_romvec = .;
- *(.data.initvect)
+ *(.data..initvect)
} > romvec
#endif
@@ -68,7 +68,7 @@ SECTIONS {
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
- *(.text.lock)
+ *(.text..lock)
. = ALIGN(16); /* Exception table */
__start___ex_table = .;
diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c
index 92614de42cd3..71d2ba474c63 100644
--- a/arch/m68knommu/platform/520x/config.c
+++ b/arch/m68knommu/platform/520x/config.c
@@ -15,10 +15,13 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -74,9 +77,152 @@ static struct platform_device m520x_fec = {
.resource = m520x_fec_resources,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m520x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 62
+#define MCFQSPI_CS1 63
+#define MCFQSPI_CS2 44
+
+static int m520x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ return 0;
+
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m520x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m520x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, cs_high);
+ break;
+ }
+}
+
+static void m520x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, !cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, !cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, !cs_high);
+ break;
+ }
+}
+
+static struct mcfqspi_cs_control m520x_cs_control = {
+ .setup = m520x_cs_setup,
+ .teardown = m520x_cs_teardown,
+ .select = m520x_cs_select,
+ .deselect = m520x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m520x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 3,
+ .cs_control = &m520x_cs_control,
+};
+
+static struct platform_device m520x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m520x_qspi_resources),
+ .resource = m520x_qspi_resources,
+ .dev.platform_data = &m520x_qspi_data,
+};
+
+static void __init m520x_qspi_init(void)
+{
+ u16 par;
+ /* setup Port QS for QSPI with gpio CS control */
+ writeb(0x3f, MCF_IPSBAR + MCF_GPIO_PAR_QSPI);
+ /* make U1CTS and U2RTS gpio for cs_control */
+ par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART);
+ par &= 0x00ff;
+ writew(par, MCF_IPSBAR + MCF_GPIO_PAR_UART);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
+
static struct platform_device *m520x_devices[] __initdata = {
&m520x_uart,
&m520x_fec,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m520x_qspi,
+#endif
};
/***************************************************************************/
@@ -147,6 +293,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m520x_cpu_reset;
m520x_uarts_init();
m520x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m520x_qspi_init();
+#endif
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c
index 6ba84f2aa397..8980f6d7715a 100644
--- a/arch/m68knommu/platform/523x/config.c
+++ b/arch/m68knommu/platform/523x/config.c
@@ -16,10 +16,13 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -75,9 +78,173 @@ static struct platform_device m523x_fec = {
.resource = m523x_fec_resources,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m523x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 91
+#define MCFQSPI_CS1 92
+#define MCFQSPI_CS2 103
+#define MCFQSPI_CS3 99
+
+static int m523x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS3 failed\n");
+ goto fail3;
+ }
+ status = gpio_direction_output(MCFQSPI_CS3, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ gpio_free(MCFQSPI_CS3);
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m523x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS3);
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m523x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, cs_high);
+ break;
+ }
+}
+
+static void m523x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, !cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, !cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, !cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, !cs_high);
+ break;
+ }
+}
+
+static struct mcfqspi_cs_control m523x_cs_control = {
+ .setup = m523x_cs_setup,
+ .teardown = m523x_cs_teardown,
+ .select = m523x_cs_select,
+ .deselect = m523x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m523x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 4,
+ .cs_control = &m523x_cs_control,
+};
+
+static struct platform_device m523x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m523x_qspi_resources),
+ .resource = m523x_qspi_resources,
+ .dev.platform_data = &m523x_qspi_data,
+};
+
+static void __init m523x_qspi_init(void)
+{
+ u16 par;
+
+ /* setup QSPS pins for QSPI with gpio CS control */
+ writeb(0x1f, MCFGPIO_PAR_QSPI);
+ /* and CS2 & CS3 as gpio */
+ par = readw(MCFGPIO_PAR_TIMER);
+ par &= 0x3f3f;
+ writew(par, MCFGPIO_PAR_TIMER);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
static struct platform_device *m523x_devices[] __initdata = {
&m523x_uart,
&m523x_fec,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m523x_qspi,
+#endif
};
/***************************************************************************/
@@ -114,6 +281,9 @@ void __init config_BSP(char *commandp, int size)
static int __init init_BSP(void)
{
m523x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m523x_qspi_init();
+#endif
platform_add_devices(m523x_devices, ARRAY_SIZE(m523x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5249/config.c b/arch/m68knommu/platform/5249/config.c
index 646f5ba462fc..72fc4ae0e663 100644
--- a/arch/m68knommu/platform/5249/config.c
+++ b/arch/m68knommu/platform/5249/config.c
@@ -12,10 +12,13 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -37,8 +40,169 @@ static struct platform_device m5249_uart = {
.dev.platform_data = m5249_uart_platform,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m5249_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_QSPI,
+ .end = MCF_IRQ_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 29
+#define MCFQSPI_CS1 24
+#define MCFQSPI_CS2 21
+#define MCFQSPI_CS3 22
+
+static int m5249_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS3 failed\n");
+ goto fail3;
+ }
+ status = gpio_direction_output(MCFQSPI_CS3, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ gpio_free(MCFQSPI_CS3);
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m5249_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS3);
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m5249_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, cs_high);
+ break;
+ }
+}
+
+static void m5249_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, !cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, !cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, !cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, !cs_high);
+ break;
+ }
+}
+
+static struct mcfqspi_cs_control m5249_cs_control = {
+ .setup = m5249_cs_setup,
+ .teardown = m5249_cs_teardown,
+ .select = m5249_cs_select,
+ .deselect = m5249_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m5249_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 4,
+ .cs_control = &m5249_cs_control,
+};
+
+static struct platform_device m5249_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m5249_qspi_resources),
+ .resource = m5249_qspi_resources,
+ .dev.platform_data = &m5249_qspi_data,
+};
+
+static void __init m5249_qspi_init(void)
+{
+ /* QSPI irq setup */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL4 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_QSPIICR);
+ mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
+
static struct platform_device *m5249_devices[] __initdata = {
&m5249_uart,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m5249_qspi,
+#endif
};
/***************************************************************************/
@@ -100,6 +264,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m5249_cpu_reset;
m5249_timers_init();
m5249_uarts_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m5249_qspi_init();
+#endif
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/5272/intc.c b/arch/m68knommu/platform/5272/intc.c
index 7081e0a9720e..2889f7d7cf70 100644
--- a/arch/m68knommu/platform/5272/intc.c
+++ b/arch/m68knommu/platform/5272/intc.c
@@ -103,8 +103,26 @@ static void intc_irq_ack(unsigned int irq)
static int intc_irq_set_type(unsigned int irq, unsigned int type)
{
- /* We can set the edge type here for external interrupts */
- return 0;
+ /* set the edge type for external interrupts */
+ u32 pitr;
+
+ if ((type != IRQF_TRIGGER_RISING) || (type != IRQF_TRIGGER_FALLING))
+ return -EINVAL;
+
+ switch (irq) {
+ case MCF_IRQ_EINT1 ... MCF_IRQ_EINT4:
+ case MCF_IRQ_EINT5 ... MCF_IRQ_EINT6:
+ pitr = __raw_readl(MCFSIM_PITR);
+ if (type & IRQF_TRIGGER_RISING)
+ pitr |= 1 << (96 - irq);
+ else
+ pitr &= ~(1 << (96 - irq));
+ __raw_writel(pitr, MCFSIM_PITR);
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct irq_chip intc_irq_chip = {
@@ -128,11 +146,16 @@ void __init init_IRQ(void)
writel(0x88888888, MCF_MBAR + MCFSIM_ICR4);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
- intc_irq_set_type(irq, 0);
+ switch (irq) {
+ case MCF_IRQ_EINT1 ... MCF_IRQ_EINT4:
+ case MCF_IRQ_EINT5 ... MCF_IRQ_EINT6:
+ set_irq_chip_and_handler(irq, &intc_irq_chip,
+ handle_edge_irq);
+ break;
+ default:
+ set_irq_chip_and_handler(irq, &intc_irq_chip,
+ handle_level_irq);
+ break;
+ }
}
}
-
diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c
index fa51be172830..3d9c35c98b98 100644
--- a/arch/m68knommu/platform/527x/config.c
+++ b/arch/m68knommu/platform/527x/config.c
@@ -16,10 +16,13 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -106,12 +109,188 @@ static struct platform_device m527x_fec[] = {
},
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m527x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#if defined(CONFIG_M5271)
+#define MCFQSPI_CS0 91
+#define MCFQSPI_CS1 92
+#define MCFQSPI_CS2 99
+#define MCFQSPI_CS3 103
+#elif defined(CONFIG_M5275)
+#define MCFQSPI_CS0 59
+#define MCFQSPI_CS1 60
+#define MCFQSPI_CS2 61
+#define MCFQSPI_CS3 62
+#endif
+
+static int m527x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS3 failed\n");
+ goto fail3;
+ }
+ status = gpio_direction_output(MCFQSPI_CS3, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ gpio_free(MCFQSPI_CS3);
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m527x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS3);
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m527x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, cs_high);
+ break;
+ }
+}
+
+static void m527x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, !cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, !cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, !cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, !cs_high);
+ break;
+ }
+}
+
+static struct mcfqspi_cs_control m527x_cs_control = {
+ .setup = m527x_cs_setup,
+ .teardown = m527x_cs_teardown,
+ .select = m527x_cs_select,
+ .deselect = m527x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m527x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 4,
+ .cs_control = &m527x_cs_control,
+};
+
+static struct platform_device m527x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m527x_qspi_resources),
+ .resource = m527x_qspi_resources,
+ .dev.platform_data = &m527x_qspi_data,
+};
+
+static void __init m527x_qspi_init(void)
+{
+#if defined(CONFIG_M5271)
+ u16 par;
+
+ /* setup QSPS pins for QSPI with gpio CS control */
+ writeb(0x1f, MCFGPIO_PAR_QSPI);
+ /* and CS2 & CS3 as gpio */
+ par = readw(MCFGPIO_PAR_TIMER);
+ par &= 0x3f3f;
+ writew(par, MCFGPIO_PAR_TIMER);
+#elif defined(CONFIG_M5275)
+ /* setup QSPS pins for QSPI with gpio CS control */
+ writew(0x003e, MCFGPIO_PAR_QSPI);
+#endif
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
static struct platform_device *m527x_devices[] __initdata = {
&m527x_uart,
&m527x_fec[0],
#ifdef CONFIG_FEC2
&m527x_fec[1],
#endif
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m527x_qspi,
+#endif
};
/***************************************************************************/
@@ -187,6 +366,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m527x_cpu_reset;
m527x_uarts_init();
m527x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m527x_qspi_init();
+#endif
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c
index 6e608d1836f1..76b743343bfa 100644
--- a/arch/m68knommu/platform/528x/config.c
+++ b/arch/m68knommu/platform/528x/config.c
@@ -17,10 +17,13 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -76,10 +79,141 @@ static struct platform_device m528x_fec = {
.resource = m528x_fec_resources,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m528x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 147
+#define MCFQSPI_CS1 148
+#define MCFQSPI_CS2 149
+#define MCFQSPI_CS3 150
+
+static int m528x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS3 failed\n");
+ goto fail3;
+ }
+ status = gpio_direction_output(MCFQSPI_CS3, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ gpio_free(MCFQSPI_CS3);
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m528x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS3);
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m528x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ gpio_set_value(MCFQSPI_CS0 + chip_select, cs_high);
+}
+
+static void m528x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ gpio_set_value(MCFQSPI_CS0 + chip_select, !cs_high);
+}
+
+static struct mcfqspi_cs_control m528x_cs_control = {
+ .setup = m528x_cs_setup,
+ .teardown = m528x_cs_teardown,
+ .select = m528x_cs_select,
+ .deselect = m528x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m528x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 4,
+ .cs_control = &m528x_cs_control,
+};
+
+static struct platform_device m528x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m528x_qspi_resources),
+ .resource = m528x_qspi_resources,
+ .dev.platform_data = &m528x_qspi_data,
+};
+
+static void __init m528x_qspi_init(void)
+{
+ /* setup Port QS for QSPI with gpio CS control */
+ __raw_writeb(0x07, MCFGPIO_PQSPAR);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
static struct platform_device *m528x_devices[] __initdata = {
&m528x_uart,
&m528x_fec,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m528x_qspi,
+#endif
};
/***************************************************************************/
@@ -174,6 +308,9 @@ static int __init init_BSP(void)
mach_reset = m528x_cpu_reset;
m528x_uarts_init();
m528x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m528x_qspi_init();
+#endif
platform_add_devices(m528x_devices, ARRAY_SIZE(m528x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/532x/config.c b/arch/m68knommu/platform/532x/config.c
index d632948e64e5..ca51323f957b 100644
--- a/arch/m68knommu/platform/532x/config.c
+++ b/arch/m68knommu/platform/532x/config.c
@@ -21,12 +21,15 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfdma.h>
#include <asm/mcfwdebug.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -82,9 +85,127 @@ static struct platform_device m532x_fec = {
.resource = m532x_fec_resources,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m532x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 84
+#define MCFQSPI_CS1 85
+#define MCFQSPI_CS2 86
+
+static int m532x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ return 0;
+
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m532x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m532x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ gpio_set_value(MCFQSPI_CS0 + chip_select, cs_high);
+}
+
+static void m532x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ gpio_set_value(MCFQSPI_CS0 + chip_select, !cs_high);
+}
+
+static struct mcfqspi_cs_control m532x_cs_control = {
+ .setup = m532x_cs_setup,
+ .teardown = m532x_cs_teardown,
+ .select = m532x_cs_select,
+ .deselect = m532x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m532x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 3,
+ .cs_control = &m532x_cs_control,
+};
+
+static struct platform_device m532x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m532x_qspi_resources),
+ .resource = m532x_qspi_resources,
+ .dev.platform_data = &m532x_qspi_data,
+};
+
+static void __init m532x_qspi_init(void)
+{
+ /* setup QSPS pins for QSPI with gpio CS control */
+ writew(0x01f0, MCF_GPIO_PAR_QSPI);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
+
static struct platform_device *m532x_devices[] __initdata = {
&m532x_uart,
&m532x_fec,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m532x_qspi,
+#endif
};
/***************************************************************************/
@@ -158,6 +279,9 @@ static int __init init_BSP(void)
{
m532x_uarts_init();
m532x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m532x_qspi_init();
+#endif
platform_add_devices(m532x_devices, ARRAY_SIZE(m532x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index b91ee85d4b5d..8a6a10212fb5 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -178,11 +178,7 @@ void __init init_IRQ(void)
/* turn off all interrupts */
IMR = ~0;
- for (i = 0; (i < NR_IRQS); i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &intc_irq_chip;
- }
+ for (i = 0; (i < NR_IRQS); i++)
+ set_irq_chip_and_handler(i, &intc_irq_chip, handle_level_irq);
}
diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S
index 2ef06242398b..8eb94fb6b971 100644
--- a/arch/m68knommu/platform/68360/head-ram.S
+++ b/arch/m68knommu/platform/68360/head-ram.S
@@ -280,7 +280,7 @@ _dprbase:
* and then overwritten as needed.
*/
-.section ".data.initvect","awx"
+.section ".data..initvect","awx"
.long RAMEND /* Reset: Initial Stack Pointer - 0. */
.long _start /* Reset: Initial Program Counter - 1. */
.long buserr /* Bus Error - 2. */
diff --git a/arch/m68knommu/platform/68360/head-rom.S b/arch/m68knommu/platform/68360/head-rom.S
index 62ecf4144b3b..97510e55b802 100644
--- a/arch/m68knommu/platform/68360/head-rom.S
+++ b/arch/m68knommu/platform/68360/head-rom.S
@@ -291,7 +291,7 @@ _dprbase:
* and then overwritten as needed.
*/
-.section ".data.initvect","awx"
+.section ".data..initvect","awx"
.long RAMEND /* Reset: Initial Stack Pointer - 0. */
.long _start /* Reset: Initial Program Counter - 1. */
.long buserr /* Bus Error - 2. */
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index 6f22970d8c20..8fd487e620d0 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -131,11 +131,7 @@ void init_IRQ(void)
/* turn off all CPM interrupts */
pquicc->intr_cimr = 0x00000000;
- for (i = 0; (i < NR_IRQS); i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &intc_irq_chip;
- }
+ for (i = 0; (i < NR_IRQS); i++)
+ set_irq_chip_and_handler(i, &intc_irq_chip, handle_level_irq);
}
diff --git a/arch/m68knommu/platform/coldfire/intc-2.c b/arch/m68knommu/platform/coldfire/intc-2.c
index 5598c8b8661f..2863285927de 100644
--- a/arch/m68knommu/platform/coldfire/intc-2.c
+++ b/arch/m68knommu/platform/coldfire/intc-2.c
@@ -39,6 +39,15 @@ static void intc_irq_mask(unsigned int irq)
val = __raw_readl(imraddr);
__raw_writel(val | imrbit, imraddr);
+
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+
+ u8 epier = __raw_readb(MCFEPORT_EPIER);
+ epier &= ~(1 << (irq - MCFGPIO_IRQ_VECBASE));
+ __raw_writeb(epier, MCFEPORT_EPIER);
+ }
}
}
@@ -64,13 +73,72 @@ static void intc_irq_unmask(unsigned int irq)
val = __raw_readl(imraddr);
__raw_writel(val & ~imrbit, imraddr);
+
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+
+ u8 epier = __raw_readb(MCFEPORT_EPIER);
+ epier |= 1 << (irq - MCFGPIO_IRQ_VECBASE);
+ __raw_writeb(epier, MCFEPORT_EPIER);
+ }
+ }
+}
+
+static void intc_irq_ack(unsigned int irq)
+{
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+ u8 epfr = __raw_readb(MCFEPORT_EPFR);
+ epfr |= 1 << (irq - MCFGPIO_IRQ_VECBASE);
+ __raw_writeb(epfr, MCFEPORT_EPFR);
}
}
+static int intc_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ unsigned shift;
+ u16 eppar;
+
+ /* only on eport */
+ if (irq < MCFGPIO_IRQ_VECBASE ||
+ irq >= (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX))
+ return -EINVAL;
+
+ /* we only support TRIGGER_LOW or either (or both) RISING and FALLING */
+ if ((flow_type & IRQF_TRIGGER_HIGH) ||
+ ((flow_type & IRQF_TRIGGER_LOW) &&
+ (flow_type & (IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING))))
+ return -EINVAL;
+
+ shift = (irq - MCFGPIO_IRQ_VECBASE) * 2;
+
+ /* default to TRIGGER_LOW */
+ eppar = 0;
+ if (flow_type & IRQF_TRIGGER_RISING)
+ eppar |= (0x01 << shift);
+ if (flow_type & IRQF_TRIGGER_FALLING)
+ eppar |= (0x02 << shift);
+
+ if (eppar)
+ set_irq_handler(irq, handle_edge_irq);
+ else
+ set_irq_handler(irq, handle_level_irq);
+
+ eppar |= (__raw_readw(MCFEPORT_EPPAR) & ~(0x3 << shift));
+ __raw_writew(eppar, MCFEPORT_EPPAR);
+
+ return 0;
+}
+
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.mask = intc_irq_mask,
.unmask = intc_irq_unmask,
+ .ack = intc_irq_ack,
+ .set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
@@ -83,11 +151,7 @@ void __init init_IRQ(void)
__raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
__raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC1 + MCFINTC_IMRL);
- for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
- }
+ for (irq = 0; (irq < NR_IRQS); irq++)
+ set_irq_chip_and_handler(irq, &intc_irq_chip, handle_level_irq);
}
diff --git a/arch/m68knommu/platform/coldfire/intc-simr.c b/arch/m68knommu/platform/coldfire/intc-simr.c
index 1b01e79c2f63..cc5473f6c2c7 100644
--- a/arch/m68knommu/platform/coldfire/intc-simr.c
+++ b/arch/m68knommu/platform/coldfire/intc-simr.c
@@ -26,6 +26,15 @@ static void intc_irq_mask(unsigned int irq)
else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_SIMR)
__raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_SIMR);
}
+
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+
+ u8 epier = __raw_readb(MCFEPORT_EPIER);
+ epier &= ~(1 << (irq - MCFGPIO_IRQ_VECBASE));
+ __raw_writeb(epier, MCFEPORT_EPIER);
+ }
}
static void intc_irq_unmask(unsigned int irq)
@@ -36,10 +45,63 @@ static void intc_irq_unmask(unsigned int irq)
else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_CIMR)
__raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_CIMR);
}
+
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+
+ u8 epier = __raw_readb(MCFEPORT_EPIER);
+ epier |= 1 << (irq - MCFGPIO_IRQ_VECBASE);
+ __raw_writeb(epier, MCFEPORT_EPIER);
+ }
+}
+
+static void intc_irq_ack(unsigned int irq)
+{
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+ u8 epfr = __raw_readb(MCFEPORT_EPFR);
+ epfr |= 1 << (irq - MCFGPIO_IRQ_VECBASE);
+ __raw_writeb(epfr, MCFEPORT_EPFR);
+ }
}
static int intc_irq_set_type(unsigned int irq, unsigned int type)
{
+ unsigned shift;
+ u16 eppar;
+
+ /* only on eport */
+ if (irq < MCFGPIO_IRQ_VECBASE ||
+ irq >= (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX))
+ return -EINVAL;
+
+ /* we only support TRIGGER_LOW or either (or both) RISING and FALLING */
+ if ((type & IRQF_TRIGGER_HIGH) ||
+ ((type & IRQF_TRIGGER_LOW) &&
+ (type & (IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING))))
+ return -EINVAL;
+
+ shift = (irq - MCFGPIO_IRQ_VECBASE) * 2;
+
+ /* default to TRIGGER_LOW */
+ eppar = 0;
+ if (type & IRQF_TRIGGER_RISING)
+ eppar |= (0x01 << shift);
+ if (type & IRQF_TRIGGER_FALLING)
+ eppar |= (0x02 << shift);
+
+ if (eppar)
+ set_irq_handler(irq, handle_edge_irq);
+ else
+ set_irq_handler(irq, handle_level_irq);
+
+ eppar |= (__raw_readw(MCFEPORT_EPPAR) & ~(0x3 << shift));
+ __raw_writew(eppar, MCFEPORT_EPPAR);
+
+
if (irq >= MCFINT_VECBASE) {
if (irq < MCFINT_VECBASE + 64)
__raw_writeb(5, MCFINTC0_ICR0 + irq - MCFINT_VECBASE);
@@ -53,6 +115,7 @@ static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.mask = intc_irq_mask,
.unmask = intc_irq_unmask,
+ .ack = intc_irq_ack,
.set_type = intc_irq_set_type,
};
@@ -68,10 +131,7 @@ void __init init_IRQ(void)
__raw_writeb(0xff, MCFINTC1_SIMR);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
+ set_irq_chip_and_handler(irq, &intc_irq_chip, handle_level_irq);
intc_irq_set_type(irq, 0);
}
}
diff --git a/arch/m68knommu/platform/coldfire/intc.c b/arch/m68knommu/platform/coldfire/intc.c
index a4560c86db71..ad392a515cd2 100644
--- a/arch/m68knommu/platform/coldfire/intc.c
+++ b/arch/m68knommu/platform/coldfire/intc.c
@@ -115,16 +115,69 @@ static void intc_irq_mask(unsigned int irq)
{
if (mcf_irq2imr[irq])
mcf_setimr(mcf_irq2imr[irq]);
+
+#if defined MCFINTC2_GPIOIRQ0
+ if (irq >= MCFINTC2_GPIOIRQ0 && irq <= MCFINTC2_GPIOIRQ7) {
+ u32 gpiointenable = __raw_readl(MCFSIM2_GPIOINTENABLE);
+
+ gpiointenable &= ~(0x101 << (irq - MCFINTC2_GPIOIRQ0));
+ __raw_writel(gpiointenable, MCFSIM2_GPIOINTENABLE);
+ }
+#endif
}
static void intc_irq_unmask(unsigned int irq)
{
if (mcf_irq2imr[irq])
mcf_clrimr(mcf_irq2imr[irq]);
+
+#if defined MCFINTC2_GPIOIRQ0
+ if (irq >= MCFINTC2_GPIOIRQ0 && irq <= MCFINTC2_GPIOIRQ7) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ u32 gpiointenable = __raw_readl(MCFSIM2_GPIOINTENABLE);
+
+ if (desc->status & IRQF_TRIGGER_RISING)
+ gpiointenable |= 0x0001 << (irq - MCFINTC2_GPIOIRQ0);
+ if (desc->status & IRQF_TRIGGER_FALLING)
+ gpiointenable |= 0x0100 << (irq - MCFINTC2_GPIOIRQ0);
+ __raw_writel(gpiointenable, MCFSIM2_GPIOINTENABLE);
+ }
+#endif
+}
+
+static void intc_irq_ack(unsigned int irq)
+{
+#if defined MCFINTC2_GPIOIRQ0
+ if (irq >= MCFINTC2_GPIOIRQ0 && irq <= MCFINTC2_GPIOIRQ7) {
+ u32 gpiointclear = __raw_readl(MCFSIM2_GPIOINTCLEAR);
+
+ gpiointclear |= 0x0101 << (irq - MCFINTC2_GPIOIRQ0);
+ __raw_writel(gpiointclear, MCFSIM2_GPIOINTCLEAR);
+ }
+#endif
}
static int intc_irq_set_type(unsigned int irq, unsigned int type)
{
+#if defined MCFINTC2_GPIOIRQ0
+ u32 gpiointenable;
+
+ if (type & ~(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
+ return -EINVAL;
+
+ if ((irq < MCFINTC2_GPIOIRQ0) || (irq > MCFINTC2_GPIOIRQ7))
+ return -EINVAL;
+
+ /* enable rising or falling or both */
+ gpiointenable = __raw_readl(MCFSIM2_GPIOINTENABLE);
+ gpiointenable &= ~(0x101 << (irq - MCFINTC2_GPIOIRQ0));
+ if (type & IRQF_TRIGGER_RISING)
+ gpiointenable |= 0x0001 << (irq - MCFINTC2_GPIOIRQ0);
+ if (type & IRQF_TRIGGER_FALLING)
+ gpiointenable |= 0x0100 << (irq - MCFINTC2_GPIOIRQ0);
+ __raw_writel(gpiointenable, MCFSIM2_GPIOINTENABLE);
+#endif
+
return 0;
}
@@ -132,6 +185,7 @@ static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.mask = intc_irq_mask,
.unmask = intc_irq_unmask,
+ .ack = intc_irq_ack,
.set_type = intc_irq_set_type,
};
@@ -143,10 +197,7 @@ void __init init_IRQ(void)
mcf_maskimr(0xffffffff);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
+ set_irq_chip_and_handler(irq, &intc_irq_chip, handle_level_irq);
intc_irq_set_type(irq, 0);
}
}
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 59efb3fef957..48c4f0335e3f 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -12,6 +12,7 @@
#include <asm/registers.h>
#include <asm/setup.h>
#include <asm/irqflags.h>
+#include <asm/cache.h>
#include <asm-generic/cmpxchg.h>
#include <asm-generic/cmpxchg-local.h>
@@ -96,4 +97,14 @@ extern struct dentry *of_debugfs_root;
#define arch_align_stack(x) (x)
+/*
+ * MicroBlaze doesn't handle unaligned accesses in hardware.
+ *
+ * Based on this we force the IP header alignment in network drivers.
+ * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
+ * cacheline alignment of buffers.
+ */
+#define NET_IP_ALIGN 2
+#define NET_SKB_PAD L1_CACHE_BYTES
+
#endif /* _ASM_MICROBLAZE_SYSTEM_H */
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 391d6197fc3b..8cc18cd2cce6 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -476,6 +476,8 @@ ENTRY(ret_from_fork)
nop
work_pending:
+ enable_irq
+
andi r11, r19, _TIF_NEED_RESCHED
beqi r11, 1f
bralid r15, schedule
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index bc4dcb7d3861..ecfb852cd1c5 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -52,3 +52,13 @@ EXPORT_SYMBOL_GPL(_ebss);
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
+
+/*
+ * Assembly functions that may be used (directly or indirectly) by modules
+ */
+EXPORT_SYMBOL(__copy_tofrom_user);
+
+#ifdef CONFIG_OPT_LIB_ASM
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+#endif
diff --git a/arch/microblaze/kernel/of_device.c b/arch/microblaze/kernel/of_device.c
index 9a0f7632c47c..f6c521898ebf 100644
--- a/arch/microblaze/kernel/of_device.c
+++ b/arch/microblaze/kernel/of_device.c
@@ -54,6 +54,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev->dev.parent = parent;
dev->dev.release = of_release_dev;
dev->dev.archdata.of_node = np;
+ dev->dev.of_node = np;
if (bus_id)
dev_set_name(&dev->dev, bus_id);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7e6fd1cbd3f8..cdaae942623d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1075,6 +1075,8 @@ config CPU_LOONGSON2F
bool "Loongson 2F"
depends on SYS_HAS_CPU_LOONGSON2F
select CPU_LOONGSON2
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
help
The Loongson 2F processor implements the MIPS III instruction set
with many extensions.
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 99ae84ce5af3..ca0506a8585a 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/sysdev.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
@@ -174,10 +175,6 @@ static dbdev_tab_t dbdev_tab[] = {
#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
-#ifdef CONFIG_PM
-static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][6];
-#endif
-
static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
@@ -960,29 +957,37 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
return nbytes;
}
-#ifdef CONFIG_PM
-void au1xxx_dbdma_suspend(void)
+
+struct alchemy_dbdma_sysdev {
+ struct sys_device sysdev;
+ u32 pm_regs[NUM_DBDMA_CHANS + 1][6];
+};
+
+static int alchemy_dbdma_suspend(struct sys_device *dev,
+ pm_message_t state)
{
+ struct alchemy_dbdma_sysdev *sdev =
+ container_of(dev, struct alchemy_dbdma_sysdev, sysdev);
int i;
u32 addr;
addr = DDMA_GLOBAL_BASE;
- au1xxx_dbdma_pm_regs[0][0] = au_readl(addr + 0x00);
- au1xxx_dbdma_pm_regs[0][1] = au_readl(addr + 0x04);
- au1xxx_dbdma_pm_regs[0][2] = au_readl(addr + 0x08);
- au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
+ sdev->pm_regs[0][0] = au_readl(addr + 0x00);
+ sdev->pm_regs[0][1] = au_readl(addr + 0x04);
+ sdev->pm_regs[0][2] = au_readl(addr + 0x08);
+ sdev->pm_regs[0][3] = au_readl(addr + 0x0c);
/* save channel configurations */
for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
- au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
- au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
- au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
- au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
- au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
- au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
+ sdev->pm_regs[i][0] = au_readl(addr + 0x00);
+ sdev->pm_regs[i][1] = au_readl(addr + 0x04);
+ sdev->pm_regs[i][2] = au_readl(addr + 0x08);
+ sdev->pm_regs[i][3] = au_readl(addr + 0x0c);
+ sdev->pm_regs[i][4] = au_readl(addr + 0x10);
+ sdev->pm_regs[i][5] = au_readl(addr + 0x14);
/* halt channel */
- au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
+ au_writel(sdev->pm_regs[i][0] & ~1, addr + 0x00);
au_sync();
while (!(au_readl(addr + 0x14) & 1))
au_sync();
@@ -992,32 +997,65 @@ void au1xxx_dbdma_suspend(void)
/* disable channel interrupts */
au_writel(0, DDMA_GLOBAL_BASE + 0x0c);
au_sync();
+
+ return 0;
}
-void au1xxx_dbdma_resume(void)
+static int alchemy_dbdma_resume(struct sys_device *dev)
{
+ struct alchemy_dbdma_sysdev *sdev =
+ container_of(dev, struct alchemy_dbdma_sysdev, sysdev);
int i;
u32 addr;
addr = DDMA_GLOBAL_BASE;
- au_writel(au1xxx_dbdma_pm_regs[0][0], addr + 0x00);
- au_writel(au1xxx_dbdma_pm_regs[0][1], addr + 0x04);
- au_writel(au1xxx_dbdma_pm_regs[0][2], addr + 0x08);
- au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
+ au_writel(sdev->pm_regs[0][0], addr + 0x00);
+ au_writel(sdev->pm_regs[0][1], addr + 0x04);
+ au_writel(sdev->pm_regs[0][2], addr + 0x08);
+ au_writel(sdev->pm_regs[0][3], addr + 0x0c);
/* restore channel configurations */
for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
- au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
- au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
- au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
- au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
- au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
- au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
+ au_writel(sdev->pm_regs[i][0], addr + 0x00);
+ au_writel(sdev->pm_regs[i][1], addr + 0x04);
+ au_writel(sdev->pm_regs[i][2], addr + 0x08);
+ au_writel(sdev->pm_regs[i][3], addr + 0x0c);
+ au_writel(sdev->pm_regs[i][4], addr + 0x10);
+ au_writel(sdev->pm_regs[i][5], addr + 0x14);
au_sync();
addr += 0x100; /* next channel base */
}
+
+ return 0;
+}
+
+static struct sysdev_class alchemy_dbdma_sysdev_class = {
+ .name = "dbdma",
+ .suspend = alchemy_dbdma_suspend,
+ .resume = alchemy_dbdma_resume,
+};
+
+static int __init alchemy_dbdma_sysdev_init(void)
+{
+ struct alchemy_dbdma_sysdev *sdev;
+ int ret;
+
+ ret = sysdev_class_register(&alchemy_dbdma_sysdev_class);
+ if (ret)
+ return ret;
+
+ sdev = kzalloc(sizeof(struct alchemy_dbdma_sysdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->sysdev.id = -1;
+ sdev->sysdev.cls = &alchemy_dbdma_sysdev_class;
+ ret = sysdev_register(&sdev->sysdev);
+ if (ret)
+ kfree(sdev);
+
+ return ret;
}
-#endif /* CONFIG_PM */
static int __init au1xxx_dbdma_init(void)
{
@@ -1046,6 +1084,11 @@ static int __init au1xxx_dbdma_init(void)
else {
dbdma_initialized = 1;
printk(KERN_INFO "Alchemy DBDMA initialized\n");
+ ret = alchemy_dbdma_sysdev_init();
+ if (ret) {
+ printk(KERN_ERR "DBDMA PM init failed\n");
+ ret = 0;
+ }
}
return ret;
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index b2821ace4d00..9f78ada83b3c 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -29,6 +29,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/sysdev.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
@@ -216,90 +218,6 @@ struct au1xxx_irqmap au1200_irqmap[] __initdata = {
};
-#ifdef CONFIG_PM
-
-/*
- * Save/restore the interrupt controller state.
- * Called from the save/restore core registers as part of the
- * au_sleep function in power.c.....maybe I should just pm_register()
- * them instead?
- */
-static unsigned int sleep_intctl_config0[2];
-static unsigned int sleep_intctl_config1[2];
-static unsigned int sleep_intctl_config2[2];
-static unsigned int sleep_intctl_src[2];
-static unsigned int sleep_intctl_assign[2];
-static unsigned int sleep_intctl_wake[2];
-static unsigned int sleep_intctl_mask[2];
-
-void save_au1xxx_intctl(void)
-{
- sleep_intctl_config0[0] = au_readl(IC0_CFG0RD);
- sleep_intctl_config1[0] = au_readl(IC0_CFG1RD);
- sleep_intctl_config2[0] = au_readl(IC0_CFG2RD);
- sleep_intctl_src[0] = au_readl(IC0_SRCRD);
- sleep_intctl_assign[0] = au_readl(IC0_ASSIGNRD);
- sleep_intctl_wake[0] = au_readl(IC0_WAKERD);
- sleep_intctl_mask[0] = au_readl(IC0_MASKRD);
-
- sleep_intctl_config0[1] = au_readl(IC1_CFG0RD);
- sleep_intctl_config1[1] = au_readl(IC1_CFG1RD);
- sleep_intctl_config2[1] = au_readl(IC1_CFG2RD);
- sleep_intctl_src[1] = au_readl(IC1_SRCRD);
- sleep_intctl_assign[1] = au_readl(IC1_ASSIGNRD);
- sleep_intctl_wake[1] = au_readl(IC1_WAKERD);
- sleep_intctl_mask[1] = au_readl(IC1_MASKRD);
-}
-
-/*
- * For most restore operations, we clear the entire register and
- * then set the bits we found during the save.
- */
-void restore_au1xxx_intctl(void)
-{
- au_writel(0xffffffff, IC0_MASKCLR); au_sync();
-
- au_writel(0xffffffff, IC0_CFG0CLR); au_sync();
- au_writel(sleep_intctl_config0[0], IC0_CFG0SET); au_sync();
- au_writel(0xffffffff, IC0_CFG1CLR); au_sync();
- au_writel(sleep_intctl_config1[0], IC0_CFG1SET); au_sync();
- au_writel(0xffffffff, IC0_CFG2CLR); au_sync();
- au_writel(sleep_intctl_config2[0], IC0_CFG2SET); au_sync();
- au_writel(0xffffffff, IC0_SRCCLR); au_sync();
- au_writel(sleep_intctl_src[0], IC0_SRCSET); au_sync();
- au_writel(0xffffffff, IC0_ASSIGNCLR); au_sync();
- au_writel(sleep_intctl_assign[0], IC0_ASSIGNSET); au_sync();
- au_writel(0xffffffff, IC0_WAKECLR); au_sync();
- au_writel(sleep_intctl_wake[0], IC0_WAKESET); au_sync();
- au_writel(0xffffffff, IC0_RISINGCLR); au_sync();
- au_writel(0xffffffff, IC0_FALLINGCLR); au_sync();
- au_writel(0x00000000, IC0_TESTBIT); au_sync();
-
- au_writel(0xffffffff, IC1_MASKCLR); au_sync();
-
- au_writel(0xffffffff, IC1_CFG0CLR); au_sync();
- au_writel(sleep_intctl_config0[1], IC1_CFG0SET); au_sync();
- au_writel(0xffffffff, IC1_CFG1CLR); au_sync();
- au_writel(sleep_intctl_config1[1], IC1_CFG1SET); au_sync();
- au_writel(0xffffffff, IC1_CFG2CLR); au_sync();
- au_writel(sleep_intctl_config2[1], IC1_CFG2SET); au_sync();
- au_writel(0xffffffff, IC1_SRCCLR); au_sync();
- au_writel(sleep_intctl_src[1], IC1_SRCSET); au_sync();
- au_writel(0xffffffff, IC1_ASSIGNCLR); au_sync();
- au_writel(sleep_intctl_assign[1], IC1_ASSIGNSET); au_sync();
- au_writel(0xffffffff, IC1_WAKECLR); au_sync();
- au_writel(sleep_intctl_wake[1], IC1_WAKESET); au_sync();
- au_writel(0xffffffff, IC1_RISINGCLR); au_sync();
- au_writel(0xffffffff, IC1_FALLINGCLR); au_sync();
- au_writel(0x00000000, IC1_TESTBIT); au_sync();
-
- au_writel(sleep_intctl_mask[1], IC1_MASKSET); au_sync();
-
- au_writel(sleep_intctl_mask[0], IC0_MASKSET); au_sync();
-}
-#endif /* CONFIG_PM */
-
-
static void au1x_ic0_unmask(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
@@ -635,3 +553,91 @@ void __init arch_init_irq(void)
break;
}
}
+
+struct alchemy_ic_sysdev {
+ struct sys_device sysdev;
+ void __iomem *base;
+ unsigned long pmdata[7];
+};
+
+static int alchemy_ic_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct alchemy_ic_sysdev *icdev =
+ container_of(dev, struct alchemy_ic_sysdev, sysdev);
+
+ icdev->pmdata[0] = __raw_readl(icdev->base + IC_CFG0RD);
+ icdev->pmdata[1] = __raw_readl(icdev->base + IC_CFG1RD);
+ icdev->pmdata[2] = __raw_readl(icdev->base + IC_CFG2RD);
+ icdev->pmdata[3] = __raw_readl(icdev->base + IC_SRCRD);
+ icdev->pmdata[4] = __raw_readl(icdev->base + IC_ASSIGNRD);
+ icdev->pmdata[5] = __raw_readl(icdev->base + IC_WAKERD);
+ icdev->pmdata[6] = __raw_readl(icdev->base + IC_MASKRD);
+
+ return 0;
+}
+
+static int alchemy_ic_resume(struct sys_device *dev)
+{
+ struct alchemy_ic_sysdev *icdev =
+ container_of(dev, struct alchemy_ic_sysdev, sysdev);
+
+ __raw_writel(0xffffffff, icdev->base + IC_MASKCLR);
+ __raw_writel(0xffffffff, icdev->base + IC_CFG0CLR);
+ __raw_writel(0xffffffff, icdev->base + IC_CFG1CLR);
+ __raw_writel(0xffffffff, icdev->base + IC_CFG2CLR);
+ __raw_writel(0xffffffff, icdev->base + IC_SRCCLR);
+ __raw_writel(0xffffffff, icdev->base + IC_ASSIGNCLR);
+ __raw_writel(0xffffffff, icdev->base + IC_WAKECLR);
+ __raw_writel(0xffffffff, icdev->base + IC_RISINGCLR);
+ __raw_writel(0xffffffff, icdev->base + IC_FALLINGCLR);
+ __raw_writel(0x00000000, icdev->base + IC_TESTBIT);
+ wmb();
+ __raw_writel(icdev->pmdata[0], icdev->base + IC_CFG0SET);
+ __raw_writel(icdev->pmdata[1], icdev->base + IC_CFG1SET);
+ __raw_writel(icdev->pmdata[2], icdev->base + IC_CFG2SET);
+ __raw_writel(icdev->pmdata[3], icdev->base + IC_SRCSET);
+ __raw_writel(icdev->pmdata[4], icdev->base + IC_ASSIGNSET);
+ __raw_writel(icdev->pmdata[5], icdev->base + IC_WAKESET);
+ wmb();
+
+ __raw_writel(icdev->pmdata[6], icdev->base + IC_MASKSET);
+ wmb();
+
+ return 0;
+}
+
+static struct sysdev_class alchemy_ic_sysdev_class = {
+ .name = "ic",
+ .suspend = alchemy_ic_suspend,
+ .resume = alchemy_ic_resume,
+};
+
+static int __init alchemy_ic_sysdev_init(void)
+{
+ struct alchemy_ic_sysdev *icdev;
+ unsigned long icbase[2] = { IC0_PHYS_ADDR, IC1_PHYS_ADDR };
+ int err, i;
+
+ err = sysdev_class_register(&alchemy_ic_sysdev_class);
+ if (err)
+ return err;
+
+ for (i = 0; i < 2; i++) {
+ icdev = kzalloc(sizeof(struct alchemy_ic_sysdev), GFP_KERNEL);
+ if (!icdev)
+ return -ENOMEM;
+
+ icdev->base = ioremap(icbase[i], 0x1000);
+
+ icdev->sysdev.id = i;
+ icdev->sysdev.cls = &alchemy_ic_sysdev_class;
+ err = sysdev_register(&icdev->sysdev);
+ if (err) {
+ kfree(icdev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+device_initcall(alchemy_ic_sysdev_init);
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 6ab7b42aa1be..14eb8c492da2 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -36,9 +36,6 @@
#include <asm/uaccess.h>
#include <asm/mach-au1x00/au1000.h>
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
-#include <asm/mach-au1x00/au1xxx_dbdma.h>
-#endif
#ifdef CONFIG_PM
@@ -106,9 +103,6 @@ static void save_core_regs(void)
sleep_usb[1] = au_readl(0xb4020024); /* OTG_MUX */
#endif
- /* Save interrupt controller state. */
- save_au1xxx_intctl();
-
/* Clocks and PLLs. */
sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0);
sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1);
@@ -132,10 +126,6 @@ static void save_core_regs(void)
sleep_static_memctlr[3][0] = au_readl(MEM_STCFG3);
sleep_static_memctlr[3][1] = au_readl(MEM_STTIME3);
sleep_static_memctlr[3][2] = au_readl(MEM_STADDR3);
-
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
- au1xxx_dbdma_suspend();
-#endif
}
static void restore_core_regs(void)
@@ -199,12 +189,6 @@ static void restore_core_regs(void)
au_writel(sleep_uart0_linectl, UART0_ADDR + UART_LCR); au_sync();
au_writel(sleep_uart0_clkdiv, UART0_ADDR + UART_CLK); au_sync();
}
-
- restore_au1xxx_intctl();
-
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
- au1xxx_dbdma_resume();
-#endif
}
void au_sleep(void)
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c
index b5311d8a29ab..4ef50d86b181 100644
--- a/arch/mips/alchemy/devboards/pb1000/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c
@@ -27,8 +27,10 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1000.h>
+#include <asm/reboot.h>
#include <prom.h>
#include "../platform.h"
@@ -38,8 +40,16 @@ const char *get_system_type(void)
return "Alchemy Pb1000";
}
-void board_reset(void)
+static void board_reset(char *c)
{
+ asm volatile ("jr %0" : : "r" (0xbfc00000));
+}
+
+static void board_power_off(void)
+{
+ printk(KERN_ALERT "It's now safe to remove power\n");
+ while (1)
+ asm volatile (".set mips3 ; wait ; .set mips1");
}
void __init board_setup(void)
@@ -177,6 +187,10 @@ void __init board_setup(void)
au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
break;
}
+
+ pm_power_off = board_power_off;
+ _machine_halt = board_power_off;
+ _machine_restart = board_reset;
}
static int __init pb1000_init_irq(void)
diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c
index c7b4caa81a35..90dda5f3ecc5 100644
--- a/arch/mips/alchemy/devboards/pb1100/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c
@@ -39,11 +39,6 @@ const char *get_system_type(void)
return "Alchemy Pb1100";
}
-void board_reset(void)
-{
- bcsr_write(BCSR_SYSTEM, 0);
-}
-
void __init board_setup(void)
{
volatile void __iomem *base = (volatile void __iomem *)0xac000000UL;
diff --git a/arch/mips/alchemy/devboards/pb1200/board_setup.c b/arch/mips/alchemy/devboards/pb1200/board_setup.c
index 3184063f8042..8b4466f2d44a 100644
--- a/arch/mips/alchemy/devboards/pb1200/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1200/board_setup.c
@@ -48,12 +48,6 @@ const char *get_system_type(void)
return "Alchemy Pb1200";
}
-void board_reset(void)
-{
- bcsr_write(BCSR_RESETS, 0);
- bcsr_write(BCSR_SYSTEM, 0);
-}
-
void __init board_setup(void)
{
printk(KERN_INFO "AMD Alchemy Pb1200 Board\n");
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c
index fa9770ac358a..9cd9dfa698e7 100644
--- a/arch/mips/alchemy/devboards/pb1500/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c
@@ -45,11 +45,6 @@ const char *get_system_type(void)
return "Alchemy Pb1500";
}
-void board_reset(void)
-{
- bcsr_write(BCSR_SYSTEM, 0);
-}
-
void __init board_setup(void)
{
u32 pin_func;
diff --git a/arch/mips/alchemy/devboards/pb1550/board_setup.c b/arch/mips/alchemy/devboards/pb1550/board_setup.c
index 1e8fb3ddd726..9d7d6edafa8d 100644
--- a/arch/mips/alchemy/devboards/pb1550/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1550/board_setup.c
@@ -48,11 +48,6 @@ const char *get_system_type(void)
return "Alchemy Pb1550";
}
-void board_reset(void)
-{
- bcsr_write(BCSR_SYSTEM, 0);
-}
-
void __init board_setup(void)
{
u32 pin_func;
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 2fafc78e5ce1..566f2d7f2ea3 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -576,7 +576,6 @@ static int __init ar7_register_devices(void)
{
void __iomem *bootcr;
u32 val;
- u16 chip_id;
int res;
res = ar7_register_uarts();
@@ -635,18 +634,10 @@ static int __init ar7_register_devices(void)
val = readl(bootcr);
iounmap(bootcr);
if (val & AR7_WDT_HW_ENA) {
- chip_id = ar7_chip_id();
- switch (chip_id) {
- case AR7_CHIP_7100:
- case AR7_CHIP_7200:
- ar7_wdt_res.start = AR7_REGS_WDT;
- break;
- case AR7_CHIP_7300:
+ if (ar7_has_high_vlynq())
ar7_wdt_res.start = UR8_REGS_WDT;
- break;
- default:
- break;
- }
+ else
+ ar7_wdt_res.start = AR7_REGS_WDT;
ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
res = platform_device_register(&ar7_wdt);
@@ -656,4 +647,4 @@ static int __init ar7_register_devices(void)
return 0;
}
-arch_initcall(ar7_register_devices);
+device_initcall(ar7_register_devices);
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c
index 315bc7f79ce1..f560fe7d38dd 100644
--- a/arch/mips/bcm63xx/gpio.c
+++ b/arch/mips/bcm63xx/gpio.c
@@ -91,7 +91,7 @@ static int bcm63xx_gpio_set_direction(struct gpio_chip *chip,
spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
tmp = bcm_gpio_readl(reg);
- if (dir == GPIO_DIR_IN)
+ if (dir == BCM63XX_GPIO_DIR_IN)
tmp &= ~mask;
else
tmp |= mask;
@@ -103,14 +103,14 @@ static int bcm63xx_gpio_set_direction(struct gpio_chip *chip,
static int bcm63xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
- return bcm63xx_gpio_set_direction(chip, gpio, GPIO_DIR_IN);
+ return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_IN);
}
static int bcm63xx_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
bcm63xx_gpio_set(chip, gpio, value);
- return bcm63xx_gpio_set_direction(chip, gpio, GPIO_DIR_OUT);
+ return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_OUT);
}
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 5a5b6ba7514e..e70009584090 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Wed Jun 24 14:08:59 2009
+# Linux kernel version: 2.6.34-rc6
+# Sat May 1 11:35:01 2010
#
CONFIG_MIPS=y
@@ -11,11 +11,12 @@ CONFIG_MIPS=y
# CONFIG_MACH_ALCHEMY is not set
CONFIG_AR7=y
# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
# CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
# CONFIG_MIPS_SIM is not set
# CONFIG_NEC_MARKEINS is not set
@@ -26,6 +27,7 @@ CONFIG_AR7=y
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
# CONFIG_SGI_IP28 is not set
@@ -46,6 +48,7 @@ CONFIG_AR7=y
# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -63,10 +66,8 @@ CONFIG_CEVT_R4K=y
CONFIG_CSRC_R4K_LIB=y
CONFIG_CSRC_R4K=y
CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_SYS_HAS_EARLY_PRINTK=y
-# CONFIG_HOTPLUG_CPU is not set
# CONFIG_NO_IOPORT is not set
CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
@@ -81,7 +82,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -103,6 +105,8 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
+CONFIG_SYS_SUPPORTS_ZBOOT_UART16550=y
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
@@ -124,6 +128,7 @@ CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
@@ -141,8 +146,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_TICK_ONESHOT=y
# CONFIG_NO_HZ is not set
@@ -165,6 +169,7 @@ CONFIG_KEXEC=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -174,6 +179,14 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -186,14 +199,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -204,6 +215,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
CONFIG_RD_LZMA=y
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -225,19 +237,22 @@ CONFIG_SHMEM=y
CONFIG_AIO=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
# CONFIG_VM_EVENT_COUNTERS is not set
-CONFIG_STRIP_ASM_SYMS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
@@ -248,7 +263,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -256,14 +271,41 @@ CONFIG_BLOCK=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
CONFIG_IOSCHED_DEADLINE=y
# CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -293,7 +335,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -377,6 +418,7 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NETFILTER_XTABLES=m
# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
# CONFIG_NETFILTER_XT_TARGET_HL is not set
# CONFIG_NETFILTER_XT_TARGET_LED is not set
@@ -458,6 +500,7 @@ CONFIG_IP_NF_RAW=m
# CONFIG_IP_NF_ARPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
CONFIG_ATM=m
# CONFIG_ATM_CLIP is not set
@@ -466,6 +509,7 @@ CONFIG_ATM_BR2684=m
CONFIG_ATM_BR2684_IPFILTER=y
CONFIG_STP=y
CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=y
# CONFIG_VLAN_8021Q_GVRP is not set
@@ -541,20 +585,19 @@ CONFIG_HAMRADIO=y
# CONFIG_AF_RXRPC is not set
CONFIG_FIB_RULES=y
CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
# CONFIG_CFG80211_DEBUGFS is not set
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
-CONFIG_WIRELESS_EXT=y
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
# CONFIG_LIB80211 is not set
CONFIG_MAC80211=m
-CONFIG_MAC80211_DEFAULT_PS=y
-CONFIG_MAC80211_DEFAULT_PS_VALUE=1
-
-#
-# Rate control algorithm selection
-#
CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_MINSTREL=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
@@ -576,6 +619,7 @@ CONFIG_MAC80211_RC_DEFAULT="pid"
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -585,9 +629,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -636,6 +680,7 @@ CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_GPIO_ADDR is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -668,6 +713,10 @@ CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_CDROM_PKTCDVD is not set
@@ -687,6 +736,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -727,6 +777,7 @@ CONFIG_MII=y
# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -737,23 +788,21 @@ CONFIG_MII=y
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
CONFIG_CPMAC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_LIBERTAS is not set
+CONFIG_WLAN=y
# CONFIG_LIBERTAS_THINFIRM is not set
# CONFIG_MAC80211_HWSIM is not set
-# CONFIG_P54_COMMON is not set
-# CONFIG_HOSTAP is not set
+# CONFIG_ATH_COMMON is not set
# CONFIG_B43 is not set
# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -813,6 +862,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -824,11 +874,39 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -842,13 +920,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB=y
-# CONFIG_SSB_SILENT is not set
-# CONFIG_SSB_DEBUG is not set
-CONFIG_SSB_SERIAL=y
-CONFIG_SSB_DRIVER_MIPS=y
-CONFIG_SSB_EMBEDDED=y
-CONFIG_SSB_DRIVER_EXTIF=y
+# CONFIG_SSB is not set
#
# Multifunction device drivers
@@ -882,15 +954,18 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
-# CONFIG_LEDS_GPIO is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
#
@@ -921,6 +996,7 @@ CONFIG_VLYNQ=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -984,6 +1060,7 @@ CONFIG_JFFS2_RTIME=y
CONFIG_JFFS2_CMODE_PRIORITY=y
# CONFIG_JFFS2_CMODE_SIZE is not set
# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
CONFIG_SQUASHFS=y
# CONFIG_SQUASHFS_EMBEDDED is not set
@@ -996,11 +1073,11 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1039,21 +1116,29 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_STRIP_ASM_SYMS=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
# CONFIG_CMDLINE_OVERRIDE is not set
+# CONFIG_SPINLOCK_TEST is not set
#
# Security options
@@ -1061,13 +1146,16 @@ CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=m
CONFIG_CRYPTO_ALGAPI2=m
CONFIG_CRYPTO_AEAD2=m
@@ -1108,11 +1196,13 @@ CONFIG_CRYPTO_ECB=m
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 267bd46120bc..bbd826b8032d 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25-rc2
-# Mon Feb 18 11:55:24 2008
+# Linux kernel version: 2.6.34-rc6
+# Sat May 1 12:14:30 2010
#
CONFIG_MIPS=y
@@ -9,20 +9,25 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
CONFIG_BCM47XX=y
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
# CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
# CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
# CONFIG_SGI_IP28 is not set
@@ -36,10 +41,14 @@ CONFIG_BCM47XX=y
# CONFIG_SIBYTE_SENTOSA is not set
# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -50,16 +59,16 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K_LIB=y
CONFIG_CSRC_R4K=y
CONFIG_CFE=y
CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_SYS_HAS_EARLY_PRINTK=y
-# CONFIG_HOTPLUG_CPU is not set
# CONFIG_NO_IOPORT is not set
CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
@@ -71,7 +80,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -84,6 +94,7 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R8000 is not set
@@ -91,11 +102,13 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
#
# Kernel type
@@ -105,11 +118,13 @@ CONFIG_32BIT=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
@@ -122,12 +137,13 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -144,12 +160,12 @@ CONFIG_HZ=250
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-CONFIG_RCU_TRACE=y
CONFIG_KEXEC=y
# CONFIG_SECCOMP is not set
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -163,6 +179,7 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -170,25 +187,37 @@ CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_AUDIT=y
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
CONFIG_CGROUP_NS=y
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CPUSETS is not set
CONFIG_CGROUP_CPUACCT=y
# CONFIG_RESOURCE_COUNTERS is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
@@ -197,54 +226,90 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-CONFIG_COMPAT_BRK=y
+CONFIG_PCSPKR_PLATFORM=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_SLOW_WORK_DEBUG is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-CONFIG_LBD=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_LSF=y
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
+# CONFIG_CFQ_GROUP_IOSCHED is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_FREEZER is not set
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -253,7 +318,8 @@ CONFIG_HW_HAS_PCI=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
CONFIG_MMU=y
# CONFIG_PCCARD is not set
# CONFIG_HOTPLUG_PCI is not set
@@ -262,31 +328,30 @@ CONFIG_MMU=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=m
CONFIG_TRAD_SIGNALS=y
#
# Power management options
#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_PM is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
CONFIG_NET_KEY=m
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -315,7 +380,7 @@ CONFIG_INET_TUNNEL=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_LRO=m
+CONFIG_INET_LRO=y
CONFIG_INET_DIAG=m
CONFIG_INET_TCP_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
@@ -339,36 +404,6 @@ CONFIG_DEFAULT_BIC=y
# CONFIG_DEFAULT_RENO is not set
CONFIG_DEFAULT_TCP_CONG="bic"
# CONFIG_TCP_MD5SIG is not set
-CONFIG_IP_VS=m
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
CONFIG_IPV6=m
CONFIG_IPV6_PRIVACY=y
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -384,9 +419,12 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+# CONFIG_IPV6_MROUTE is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
@@ -404,6 +442,7 @@ CONFIG_NF_CT_ACCT=y
CONFIG_NF_CONNTRACK_MARK=y
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
CONFIG_NF_CT_PROTO_GRE=m
CONFIG_NF_CT_PROTO_SCTP=m
CONFIG_NF_CT_PROTO_UDPLITE=m
@@ -417,20 +456,25 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
+# CONFIG_NETFILTER_TPROXY is not set
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
@@ -439,20 +483,23 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_HL=m
# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
@@ -460,20 +507,53 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_IPV6 is not set
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+# CONFIG_IP_VS_PROTO_SCTP is not set
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
@@ -481,10 +561,13 @@ CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_DCCP=m
CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
CONFIG_NF_NAT_FTP=m
CONFIG_NF_NAT_IRC=m
CONFIG_NF_NAT_TFTP=m
@@ -493,9 +576,9 @@ CONFIG_NF_NAT_PPTP=m
CONFIG_NF_NAT_H323=m
CONFIG_NF_NAT_SIP=m
CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
@@ -507,24 +590,20 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_RAW=m
-
-#
-# Bridge: Netfilter Configuration
-#
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -533,6 +612,7 @@ CONFIG_BRIDGE_EBT_802_3=m
CONFIG_BRIDGE_EBT_AMONG=m
CONFIG_BRIDGE_EBT_ARP=m
CONFIG_BRIDGE_EBT_IP=m
+# CONFIG_BRIDGE_EBT_IP6 is not set
CONFIG_BRIDGE_EBT_LIMIT=m
CONFIG_BRIDGE_EBT_MARK=m
CONFIG_BRIDGE_EBT_PKTTYPE=m
@@ -545,31 +625,30 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_ULOG=m
+# CONFIG_BRIDGE_EBT_NFLOG is not set
CONFIG_IP_DCCP=m
CONFIG_INET_DCCP_DIAG=m
-CONFIG_IP_DCCP_ACKVEC=y
#
# DCCP CCIDs Configuration (EXPERIMENTAL)
#
-CONFIG_IP_DCCP_CCID2=m
# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=m
+CONFIG_IP_DCCP_CCID3=y
# CONFIG_IP_DCCP_CCID3_DEBUG is not set
CONFIG_IP_DCCP_CCID3_RTO=100
-CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_DCCP_TFRC_LIB=y
CONFIG_IP_SCTP=m
# CONFIG_SCTP_DBG_MSG is not set
# CONFIG_SCTP_DBG_OBJCNT is not set
# CONFIG_SCTP_HMAC_NONE is not set
# CONFIG_SCTP_HMAC_SHA1 is not set
CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
CONFIG_TIPC=m
CONFIG_TIPC_ADVANCED=y
CONFIG_TIPC_ZONES=3
CONFIG_TIPC_CLUSTERS=1
CONFIG_TIPC_NODES=255
-CONFIG_TIPC_SLAVE_NODES=0
CONFIG_TIPC_PORTS=8191
CONFIG_TIPC_LOG=0
# CONFIG_TIPC_DEBUG is not set
@@ -580,8 +659,12 @@ CONFIG_ATM_LANE=m
CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_STP=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
CONFIG_LLC=m
# CONFIG_LLC2 is not set
@@ -591,6 +674,8 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
#
@@ -601,7 +686,7 @@ CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_ATM=m
CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RR=m
+# CONFIG_NET_SCH_MULTIQ is not set
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
@@ -609,6 +694,7 @@ CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
CONFIG_NET_SCH_INGRESS=m
#
@@ -626,6 +712,7 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_STACK=32
CONFIG_NET_EMATCH_CMP=m
@@ -642,8 +729,10 @@ CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
+# CONFIG_NET_ACT_SKBEDIT is not set
CONFIG_NET_CLS_IND=y
CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -651,58 +740,7 @@ CONFIG_NET_SCH_FIFO=y
CONFIG_NET_PKTGEN=m
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
-CONFIG_IRDA=m
-
-#
-# IrDA protocols
-#
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-# CONFIG_IRDA_ULTRA is not set
-
-#
-# IrDA options
-#
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-# CONFIG_IRDA_DEBUG is not set
-
-#
-# Infrared-port device drivers
-#
-
-#
-# SIR device drivers
-#
-CONFIG_IRTTY_SIR=m
-
-#
-# Dongle support
-#
-CONFIG_DONGLE=y
-CONFIG_ESI_DONGLE=m
-CONFIG_ACTISYS_DONGLE=m
-CONFIG_TEKRAM_DONGLE=m
-CONFIG_TOIM3232_DONGLE=m
-CONFIG_LITELINK_DONGLE=m
-CONFIG_MA600_DONGLE=m
-CONFIG_GIRBIL_DONGLE=m
-CONFIG_MCP2120_DONGLE=m
-CONFIG_OLD_BELKIN_DONGLE=m
-CONFIG_ACT200L_DONGLE=m
-CONFIG_KINGSUN_DONGLE=m
-CONFIG_KSDAZZLE_DONGLE=m
-CONFIG_KS959_DONGLE=m
-
-#
-# FIR device drivers
-#
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_TOSHIBA_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_MCS_FIR=m
+# CONFIG_IRDA is not set
CONFIG_BT=m
# CONFIG_BT_L2CAP is not set
# CONFIG_BT_SCO is not set
@@ -710,8 +748,7 @@ CONFIG_BT=m
#
# Bluetooth device drivers
#
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_BCSP=y
@@ -720,51 +757,37 @@ CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_MRVL is not set
# CONFIG_AF_RXRPC is not set
CONFIG_FIB_RULES=y
-
-#
-# Wireless
-#
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
CONFIG_CFG80211=m
-CONFIG_NL80211=y
-CONFIG_WIRELESS_EXT=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
CONFIG_MAC80211=m
-
-#
-# Rate control algorithm selection
-#
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_MINSTREL=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
-# CONFIG_MAC80211_RC_DEFAULT_SIMPLE is not set
-# CONFIG_MAC80211_RC_DEFAULT_NONE is not set
-
-#
-# Selecting 'y' for an algorithm will
-#
-
-#
-# build the algorithm into mac80211.
-#
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
CONFIG_MAC80211_RC_DEFAULT="pid"
-CONFIG_MAC80211_RC_PID=y
-# CONFIG_MAC80211_RC_SIMPLE is not set
+CONFIG_MAC80211_MESH=y
CONFIG_MAC80211_LEDS=y
# CONFIG_MAC80211_DEBUGFS is not set
-# CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set
-# CONFIG_MAC80211_DEBUG is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_IEEE80211_SOFTMAC=m
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
CONFIG_RFKILL=m
-CONFIG_RFKILL_INPUT=m
CONFIG_RFKILL_LEDS=y
-CONFIG_NET_9P=m
-CONFIG_NET_9P_FD=m
-# CONFIG_NET_9P_DEBUG is not set
+CONFIG_RFKILL_INPUT=y
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -774,17 +797,22 @@ CONFIG_NET_9P_FD=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
CONFIG_CONNECTOR=m
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -829,9 +857,7 @@ CONFIG_MTD_ABSENT=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_INTEL_VR_NOR is not set
# CONFIG_MTD_PLATRAM is not set
@@ -854,6 +880,11 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -866,6 +897,7 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_DRBD is not set
CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set
@@ -875,18 +907,27 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
CONFIG_ATA_OVER_ETH=m
+# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -904,10 +945,6 @@ CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
@@ -924,21 +961,30 @@ CONFIG_SCSI_ISCSI_ATTRS=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
CONFIG_ISCSI_TCP=m
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_BE2ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_IPS is not set
@@ -954,7 +1000,12 @@ CONFIG_ISCSI_TCP=m
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
# CONFIG_FUSION is not set
@@ -962,11 +1013,18 @@ CONFIG_ISCSI_TCP=m
#
# IEEE 1394 (FireWire) support
#
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_IFB is not set
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
@@ -990,8 +1048,11 @@ CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
CONFIG_ICPLUS_PHY=m
# CONFIG_REALTEK_PHY is not set
-# CONFIG_FIXED_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
CONFIG_MDIO_BITBANG=m
+# CONFIG_MDIO_GPIO is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_AX88796 is not set
@@ -999,24 +1060,31 @@ CONFIG_MII=y
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
CONFIG_B44=y
CONFIG_B44_PCI_AUTOSELECT=y
CONFIG_B44_PCICORE_AUTOSELECT=y
CONFIG_B44_PCI=y
# CONFIG_FORCEDETH is not set
# CONFIG_TC35815 is not set
-# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
@@ -1026,41 +1094,67 @@ CONFIG_B44_PCI=y
# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_HERMES is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
# CONFIG_PRISM54 is not set
# CONFIG_USB_ZD1201 is not set
# CONFIG_USB_NET_RNDIS_WLAN is not set
# CONFIG_RTL8180 is not set
# CONFIG_RTL8187 is not set
# CONFIG_ADM8211 is not set
-# CONFIG_P54_COMMON is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+CONFIG_ATH_COMMON=m
+# CONFIG_ATH_DEBUG is not set
CONFIG_ATH5K=m
-# CONFIG_IWL4965 is not set
-# CONFIG_IWL3945 is not set
+# CONFIG_ATH5K_DEBUG is not set
+# CONFIG_ATH9K is not set
+# CONFIG_AR9170_USB is not set
+CONFIG_B43=m
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_LEDS=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_DEBUG=y
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
# CONFIG_HOSTAP is not set
-# CONFIG_BCM43XX is not set
-# CONFIG_B43 is not set
-# CONFIG_B43LEGACY is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_HERMES is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
CONFIG_ZD1211RW=m
# CONFIG_ZD1211RW_DEBUG is not set
-# CONFIG_RT2X00 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
#
# USB Network Adapters
@@ -1072,7 +1166,10 @@ CONFIG_USB_RTL8150=m
CONFIG_USB_USBNET=m
CONFIG_USB_NET_AX8817X=m
CONFIG_USB_NET_CDCETHER=m
+# CONFIG_USB_NET_CDC_EEM is not set
CONFIG_USB_NET_DM9601=m
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
CONFIG_USB_NET_GL620A=m
CONFIG_USB_NET_NET1080=m
CONFIG_USB_NET_PLUSB=m
@@ -1086,6 +1183,10 @@ CONFIG_USB_ARMLINUX=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+CONFIG_USB_SIERRA_NET=m
# CONFIG_WAN is not set
CONFIG_ATM_DRIVERS=y
CONFIG_ATM_DUMMY=m
@@ -1099,8 +1200,9 @@ CONFIG_ATM_TCP=m
# CONFIG_ATM_AMBASSADOR is not set
# CONFIG_ATM_HORIZON is not set
# CONFIG_ATM_IA is not set
-# CONFIG_ATM_FORE200E_MAYBE is not set
+# CONFIG_ATM_FORE200E is not set
# CONFIG_ATM_HE is not set
+# CONFIG_ATM_SOLOS is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
CONFIG_PPP=m
@@ -1120,11 +1222,10 @@ CONFIG_SLHC=m
# CONFIG_SLIP_SMART is not set
# CONFIG_SLIP_MODE_SLIP6 is not set
# CONFIG_NET_FC is not set
-CONFIG_NETCONSOLE=y
-# CONFIG_NETCONSOLE_DYNAMIC is not set
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -1134,6 +1235,7 @@ CONFIG_NET_POLL_CONTROLLER=y
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -1166,6 +1268,7 @@ CONFIG_INPUT_EVDEV=m
# Character devices
#
# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
@@ -1185,23 +1288,24 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
+# CONFIG_SPI is not set
#
-# SPI support
+# PPS support
#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+# CONFIG_PPS is not set
CONFIG_W1=m
CONFIG_W1_CON=y
@@ -1217,21 +1321,45 @@ CONFIG_W1_MASTER_DS2490=m
#
CONFIG_W1_SLAVE_THERM=m
CONFIG_W1_SLAVE_SMEM=m
+# CONFIG_W1_SLAVE_DS2431 is not set
CONFIG_W1_SLAVE_DS2433=m
# CONFIG_W1_SLAVE_DS2433_CRC is not set
CONFIG_W1_SLAVE_DS2760=m
+# CONFIG_W1_SLAVE_BQ27000 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-# CONFIG_WATCHDOG is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
#
-# Sonics Silicon Backplane
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_BCM47XX_WDT=y
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
CONFIG_SSB=y
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_BLOCKIO=y
CONFIG_SSB_PCIHOST_POSSIBLE=y
CONFIG_SSB_PCIHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
# CONFIG_SSB_SILENT is not set
# CONFIG_SSB_DEBUG is not set
CONFIG_SSB_SERIAL=y
@@ -1239,24 +1367,26 @@ CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
CONFIG_SSB_DRIVER_PCICORE=y
CONFIG_SSB_PCICORE_HOSTMODE=y
CONFIG_SSB_DRIVER_MIPS=y
+CONFIG_SSB_EMBEDDED=y
CONFIG_SSB_DRIVER_EXTIF=y
+CONFIG_SSB_DRIVER_GIGE=y
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1271,15 +1401,9 @@ CONFIG_DISPLAY_SUPPORT=m
#
# Display hardware drivers
#
-
-#
-# Sound
-#
CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -1292,24 +1416,24 @@ CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
-
-#
-# PCI devices
-#
+CONFIG_SND_PCI=y
# CONFIG_SND_AD1889 is not set
# CONFIG_SND_ALS300 is not set
# CONFIG_SND_ALI5451 is not set
@@ -1318,6 +1442,7 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_AU8810 is not set
# CONFIG_SND_AU8820 is not set
# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
# CONFIG_SND_AZT3328 is not set
# CONFIG_SND_BT87X is not set
# CONFIG_SND_CA0106 is not set
@@ -1325,6 +1450,8 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
# CONFIG_SND_DARLA20 is not set
# CONFIG_SND_GINA20 is not set
# CONFIG_SND_LAYLA20 is not set
@@ -1337,6 +1464,8 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_INDIGO is not set
# CONFIG_SND_INDIGOIO is not set
# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
# CONFIG_SND_EMU10K1 is not set
# CONFIG_SND_EMU10K1X is not set
# CONFIG_SND_ENS1370 is not set
@@ -1353,6 +1482,7 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_INTEL8X0 is not set
# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
# CONFIG_SND_MAESTRO3 is not set
# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
@@ -1368,45 +1498,22 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_VIRTUOSO is not set
# CONFIG_SND_VX222 is not set
# CONFIG_SND_YMFPCI is not set
-
-#
-# ALSA MIPS devices
-#
-
-#
-# USB devices
-#
+CONFIG_SND_MIPS=y
+CONFIG_SND_USB=y
CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
-
-#
-# System on Chip audio support
-#
# CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
-#
-# Open Sound System
-#
# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=m
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
# USB Input Devices
#
CONFIG_USB_HID=m
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
CONFIG_USB_HIDDEV=y
#
@@ -1414,6 +1521,41 @@ CONFIG_USB_HIDDEV=y
#
# CONFIG_USB_KBD is not set
# CONFIG_USB_MOUSE is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1429,14 +1571,24 @@ CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_HCD_SSB is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1446,26 +1598,30 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_U132_HCD=m
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=m
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
# USB Device Class drivers
#
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
# CONFIG_USB_STORAGE_ISD200 is not set
-CONFIG_USB_STORAGE_DPCM=y
CONFIG_USB_STORAGE_USBAT=y
CONFIG_USB_STORAGE_SDDR09=y
CONFIG_USB_STORAGE_SDDR55=y
@@ -1473,6 +1629,7 @@ CONFIG_USB_STORAGE_JUMPSHOT=y
CONFIG_USB_STORAGE_ALAUDA=y
CONFIG_USB_STORAGE_ONETOUCH=y
CONFIG_USB_STORAGE_KARMA=y
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
# CONFIG_USB_LIBUSUAL is not set
#
@@ -1480,7 +1637,6 @@ CONFIG_USB_STORAGE_KARMA=y
#
CONFIG_USB_MDC800=m
CONFIG_USB_MICROTEK=m
-# CONFIG_USB_MON is not set
#
# USB port drivers
@@ -1489,13 +1645,12 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_EZUSB=y
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_AIRPRIME=m
CONFIG_USB_SERIAL_ARK3116=m
CONFIG_USB_SERIAL_BELKIN=m
CONFIG_USB_SERIAL_CH341=m
# CONFIG_USB_SERIAL_WHITEHEAT is not set
CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP2101=m
+# CONFIG_USB_SERIAL_CP210X is not set
CONFIG_USB_SERIAL_CYPRESS_M8=m
CONFIG_USB_SERIAL_EMPEG=m
CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -1515,18 +1670,26 @@ CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_SERIAL_MOS7720=m
CONFIG_USB_SERIAL_MOS7840=m
+# CONFIG_USB_SERIAL_MOTOROLA is not set
CONFIG_USB_SERIAL_NAVMAN=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_OTI6858=m
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
# CONFIG_USB_SERIAL_TI is not set
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
CONFIG_USB_SERIAL_DEBUG=m
#
@@ -1535,18 +1698,13 @@ CONFIG_USB_SERIAL_DEBUG=m
# CONFIG_USB_EMI62 is not set
# CONFIG_USB_EMI26 is not set
CONFIG_USB_ADUTUX=m
-CONFIG_USB_AUERSWALD=m
+# CONFIG_USB_SEVSEG is not set
CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
-CONFIG_USB_BERRY_CHARGE=m
CONFIG_USB_LED=m
CONFIG_USB_CYPRESS_CY7C63=m
CONFIG_USB_CYTHERM=m
-CONFIG_USB_PHIDGET=m
-CONFIG_USB_PHIDGETKIT=m
-CONFIG_USB_PHIDGETMOTORCONTROL=m
-CONFIG_USB_PHIDGETSERVO=m
CONFIG_USB_IDMOUSE=m
CONFIG_USB_FTDI_ELAN=m
# CONFIG_USB_APPLEDISPLAY is not set
@@ -1555,6 +1713,7 @@ CONFIG_USB_LD=m
CONFIG_USB_TRANCEVIBRATOR=m
CONFIG_USB_IOWARRIOR=m
CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
CONFIG_USB_ATM=m
CONFIG_USB_SPEEDTOUCH=m
CONFIG_USB_CXACRU=m
@@ -1563,30 +1722,51 @@ CONFIG_USB_XUSBATM=m
CONFIG_USB_GADGET=m
# CONFIG_USB_GADGET_DEBUG_FILES is not set
# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
-CONFIG_USB_GADGET_NET2280=y
-CONFIG_USB_NET2280=m
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2280=y
+CONFIG_USB_NET2280=m
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
CONFIG_USB_G_SERIAL=m
CONFIG_USB_MIDI_GADGET=m
# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -1596,21 +1776,33 @@ CONFIG_LEDS_CLASS=y
# LED drivers
#
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
CONFIG_RTC_LIB=y
# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
#
-# Userspace I/O
+# TI VLYNQ
#
-# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
#
# File systems
@@ -1621,10 +1813,11 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
@@ -1642,28 +1835,39 @@ CONFIG_JFS_SECURITY=y
CONFIG_FS_POSIX_ACL=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
+# CONFIG_XFS_DEBUG is not set
CONFIG_GFS2_FS=m
-CONFIG_GFS2_FS_LOCKING_NOLOCK=m
-CONFIG_GFS2_FS_LOCKING_DLM=m
+# CONFIG_GFS2_FS_LOCKING_DLM is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=m
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_QUOTACTL=y
CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
CONFIG_GENERIC_ACL=y
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
CONFIG_ISO9660_FS=m
@@ -1690,15 +1894,13 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
CONFIG_ADFS_FS=m
# CONFIG_ADFS_FS_RW is not set
CONFIG_AFFS_FS=m
@@ -1721,12 +1923,19 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=m
+# CONFIG_SQUASHFS is not set
CONFIG_VXFS_FS=m
CONFIG_MINIX_FS=m
+# CONFIG_OMFS_FS is not set
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
# CONFIG_UFS_FS_WRITE is not set
@@ -1736,13 +1945,12 @@ CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
-# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFS_V4_1 is not set
CONFIG_NFSD=m
CONFIG_NFSD_V2_ACL=y
CONFIG_NFSD_V3=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
@@ -1750,10 +1958,10 @@ CONFIG_NFS_ACL_SUPPORT=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
-CONFIG_SUNRPC_BIND34=y
CONFIG_RPCSEC_GSS_KRB5=m
CONFIG_RPCSEC_GSS_SPKM3=m
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
# CONFIG_CIFS_WEAK_PW_HASH is not set
@@ -1771,9 +1979,7 @@ CONFIG_NCPFS_OS2_NS=y
CONFIG_NCPFS_NLS=y
CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
-# CONFIG_CODA_FS_OLD_API is not set
# CONFIG_AFS_FS is not set
-CONFIG_9P_FS=m
#
# Partition Types
@@ -1846,90 +2052,167 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_SPINLOCK_TEST is not set
#
# Security options
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=m
-# CONFIG_CRYPTO_SEQIV is not set
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_XTS=m
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_SEED=m
# CONFIG_CRYPTO_SALSA20 is not set
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_AUTHENC=m
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
+# CONFIG_CRC_T10DIF is not set
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
CONFIG_CRC7=m
CONFIG_LIBCRC32C=m
CONFIG_AUDIT_GENERIC=y
-CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 144b94d9a6ad..cff8f4c0e57c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23-rc8
-# Sun Sep 30 12:56:10 2007
+# Linux kernel version: 2.6.34-rc6
+# Sat May 1 13:39:10 2010
#
CONFIG_MIPS=y
@@ -9,20 +9,28 @@ CONFIG_MIPS=y
# Machine selection
#
CONFIG_MACH_ALCHEMY=y
+# CONFIG_AR7 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_LASAT is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
# CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
# CONFIG_SGI_IP32 is not set
# CONFIG_SIBYTE_CRHINE is not set
# CONFIG_SIBYTE_CARMEL is not set
@@ -33,10 +41,14 @@ CONFIG_MACH_ALCHEMY=y
# CONFIG_SIBYTE_SENTOSA is not set
# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+CONFIG_ALCHEMY_GPIOINT_AU1000=y
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
CONFIG_MIPS_MTX1=y
# CONFIG_MIPS_BOSPORUS is not set
# CONFIG_MIPS_DB1000 is not set
@@ -53,29 +65,38 @@ CONFIG_MIPS_MTX1=y
# CONFIG_MIPS_XXS1500 is not set
CONFIG_SOC_AU1500=y
CONFIG_SOC_AU1X00=y
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_TIME=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
+CONFIG_CSRC_R4K_LIB=y
CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-# CONFIG_HOTPLUG_CPU is not set
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+CONFIG_IRQ_CPU=y
CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -88,6 +109,7 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R8000 is not set
@@ -95,11 +117,14 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
#
# Kernel type
@@ -109,28 +134,36 @@ CONFIG_32BIT=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
CONFIG_64BIT_PHYS_ADDR=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_CPU_SUPPORTS_HIGHMEM=y
CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
# CONFIG_HZ_48 is not set
# CONFIG_HZ_100 is not set
# CONFIG_HZ_128 is not set
@@ -148,6 +181,7 @@ CONFIG_SECCOMP=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -157,23 +191,49 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
CONFIG_AUDIT=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
@@ -182,61 +242,104 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_SLOW_WORK_DEBUG is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-CONFIG_LBD=y
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
#
CONFIG_HW_HAS_PCI=y
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
CONFIG_MMU=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
CONFIG_PCCARD=m
-# CONFIG_PCMCIA_DEBUG is not set
CONFIG_PCMCIA=m
CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_IOCTL=y
CONFIG_CARDBUS=y
#
@@ -251,6 +354,7 @@ CONFIG_YENTA_TOSHIBA=y
CONFIG_PD6729=m
CONFIG_I82092=m
# CONFIG_PCMCIA_AU1X00 is not set
+# CONFIG_PCMCIA_ALCHEMY_DEVBOARD is not set
CONFIG_PCCARD_NONSTATIC=m
# CONFIG_HOTPLUG_PCI is not set
@@ -258,35 +362,38 @@ CONFIG_PCCARD_NONSTATIC=m
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=m
CONFIG_TRAD_SIGNALS=y
#
# Power management options
#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
CONFIG_PM_SLEEP=y
-CONFIG_SUSPEND_UP_POSSIBLE=y
CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
# CONFIG_APM_EMULATION is not set
-
-#
-# Networking
-#
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
CONFIG_NET=y
#
# Networking options
#
CONFIG_PACKET=m
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
CONFIG_NET_KEY=m
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -315,42 +422,13 @@ CONFIG_INET_TUNNEL=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
-CONFIG_IP_VS=m
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
CONFIG_IPV6=m
CONFIG_IPV6_PRIVACY=y
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -366,12 +444,15 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
CONFIG_IPV6_TUNNEL=m
# CONFIG_IPV6_MULTIPLE_TABLES is not set
-# CONFIG_NETLABEL is not set
+# CONFIG_IPV6_MROUTE is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
CONFIG_BRIDGE_NETFILTER=y
#
@@ -380,57 +461,97 @@ CONFIG_BRIDGE_NETFILTER=y
CONFIG_NETFILTER_NETLINK=m
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
-# CONFIG_NF_CONNTRACK_ENABLED is not set
# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_TPROXY is not set
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_NETFILTER_XT_MATCH_HL=m
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
# CONFIG_NETFILTER_XT_MATCH_U32 is not set
-# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_IPV6 is not set
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+# CONFIG_IP_VS_PROTO_SCTP is not set
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
#
# IP: Netfilter Configuration
#
+# CONFIG_NF_DEFRAG_IPV4 is not set
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_OWNER=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
@@ -439,34 +560,29 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
#
-# IPv6: Netfilter Configuration (EXPERIMENTAL)
+# IPv6: Netfilter Configuration
#
CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_OWNER=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AH=m
# CONFIG_IP6_NF_MATCH_MH is not set
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_RAW=m
#
# DECnet: Netfilter Configuration
#
CONFIG_DECNET_NF_GRABULATOR=m
-
-#
-# Bridge: Netfilter Configuration
-#
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -475,6 +591,7 @@ CONFIG_BRIDGE_EBT_802_3=m
CONFIG_BRIDGE_EBT_AMONG=m
CONFIG_BRIDGE_EBT_ARP=m
CONFIG_BRIDGE_EBT_IP=m
+# CONFIG_BRIDGE_EBT_IP6 is not set
CONFIG_BRIDGE_EBT_LIMIT=m
CONFIG_BRIDGE_EBT_MARK=m
CONFIG_BRIDGE_EBT_PKTTYPE=m
@@ -487,25 +604,25 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_ULOG=m
+# CONFIG_BRIDGE_EBT_NFLOG is not set
CONFIG_IP_DCCP=m
CONFIG_INET_DCCP_DIAG=m
-CONFIG_IP_DCCP_ACKVEC=y
#
# DCCP CCIDs Configuration (EXPERIMENTAL)
#
-CONFIG_IP_DCCP_CCID2=m
# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=m
-CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_DCCP_CCID3=y
# CONFIG_IP_DCCP_CCID3_DEBUG is not set
CONFIG_IP_DCCP_CCID3_RTO=100
+CONFIG_IP_DCCP_TFRC_LIB=y
CONFIG_IP_SCTP=m
# CONFIG_SCTP_DBG_MSG is not set
# CONFIG_SCTP_DBG_OBJCNT is not set
# CONFIG_SCTP_HMAC_NONE is not set
# CONFIG_SCTP_HMAC_SHA1 is not set
CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
CONFIG_TIPC=m
# CONFIG_TIPC_ADVANCED is not set
# CONFIG_TIPC_DEBUG is not set
@@ -516,8 +633,12 @@ CONFIG_ATM_LANE=m
CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_STP=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
CONFIG_DECNET=m
# CONFIG_DECNET_ROUTER is not set
CONFIG_LLC=y
@@ -535,12 +656,9 @@ CONFIG_ECONET=m
CONFIG_ECONET_AUNUDP=y
CONFIG_ECONET_NATIVE=y
CONFIG_WAN_ROUTER=m
-
-#
-# QoS and/or fair queueing
-#
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_FIFO=y
#
# Queueing/Scheduling
@@ -550,7 +668,7 @@ CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_ATM=m
CONFIG_NET_SCH_PRIO=m
-# CONFIG_NET_SCH_RR is not set
+# CONFIG_NET_SCH_MULTIQ is not set
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
@@ -558,6 +676,7 @@ CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
CONFIG_NET_SCH_INGRESS=m
#
@@ -574,6 +693,7 @@ CONFIG_NET_CLS_U32=m
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
+# CONFIG_NET_CLS_FLOW is not set
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_STACK=32
CONFIG_NET_EMATCH_CMP=m
@@ -586,10 +706,13 @@ CONFIG_NET_ACT_POLICE=y
# CONFIG_NET_ACT_GACT is not set
# CONFIG_NET_ACT_MIRRED is not set
# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
# CONFIG_NET_ACT_PEDIT is not set
# CONFIG_NET_ACT_SIMP is not set
-CONFIG_NET_CLS_POLICE=y
+# CONFIG_NET_ACT_SKBEDIT is not set
# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -613,9 +736,8 @@ CONFIG_6PACK=m
CONFIG_BPQETHER=m
CONFIG_BAYCOM_SER_FDX=m
CONFIG_BAYCOM_SER_HDX=m
-CONFIG_BAYCOM_PAR=m
-CONFIG_BAYCOM_EPP=m
CONFIG_YAM=m
+# CONFIG_CAN is not set
CONFIG_IRDA=m
#
@@ -657,15 +779,8 @@ CONFIG_MCP2120_DONGLE=m
CONFIG_OLD_BELKIN_DONGLE=m
CONFIG_ACT200L_DONGLE=m
# CONFIG_KINGSUN_DONGLE is not set
-
-#
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
+# CONFIG_KSDAZZLE_DONGLE is not set
+# CONFIG_KS959_DONGLE is not set
#
# FIR device drivers
@@ -683,17 +798,17 @@ CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_CMTP=m
CONFIG_BT_HIDP=m
#
# Bluetooth device drivers
#
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_BCSP=y
+# CONFIG_BT_HCIUART_LL is not set
CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
@@ -702,24 +817,19 @@ CONFIG_BT_HCIBT3C=m
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_MRVL is not set
CONFIG_AF_RXRPC=m
# CONFIG_AF_RXRPC_DEBUG is not set
# CONFIG_RXKAD is not set
CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_EXT=y
-# CONFIG_MAC80211 is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_IEEE80211_SOFTMAC=m
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -730,40 +840,43 @@ CONFIG_IEEE80211_SOFTMAC=m
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH=""
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
CONFIG_CONNECTOR=m
-CONFIG_MTD=m
+CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_CONCAT=m
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_REDBOOT_PARTS=m
-CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
#
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLKDEVS=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-CONFIG_RFD_FTL=m
-CONFIG_SSFDC=m
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
#
# RAM/ROM/Flash chip drivers
#
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
@@ -775,206 +888,81 @@ CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
#
# Mapping drivers for chip access
#
CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x4000000
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
-# CONFIG_MTD_ALCHEMY is not set
-# CONFIG_MTD_MTX1 is not set
-CONFIG_MTD_PCI=m
-CONFIG_MTD_PLATRAM=m
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PCI is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
-CONFIG_MTD_PMC551=m
-# CONFIG_MTD_PMC551_BUGFIX is not set
-# CONFIG_MTD_PMC551_DEBUG is not set
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
-CONFIG_MTD_BLOCK2MTD=m
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
#
# Disk-On-Chip Device Drivers
#
-CONFIG_MTD_DOC2000=m
-CONFIG_MTD_DOC2001=m
-CONFIG_MTD_DOC2001PLUS=m
-CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
-# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-CONFIG_MTD_DOCPROBE_ADDRESS=0
-CONFIG_MTD_NAND=m
-# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-# CONFIG_MTD_NAND_ECC_SMC is not set
-# CONFIG_MTD_NAND_MUSEUM_IDS is not set
-CONFIG_MTD_NAND_IDS=m
-CONFIG_MTD_NAND_DISKONCHIP=m
-# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
-# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
-# CONFIG_MTD_NAND_CAFE is not set
-CONFIG_MTD_NAND_NANDSIM=m
-# CONFIG_MTD_NAND_PLATFORM is not set
-CONFIG_MTD_ONENAND=m
-CONFIG_MTD_ONENAND_VERIFY_WRITE=y
-# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
-CONFIG_PARPORT=m
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_SERIAL=m
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_PARPORT_PC_SUPERIO=y
-CONFIG_PARPORT_PC_PCMCIA=m
-# CONFIG_PARPORT_GSC is not set
-CONFIG_PARPORT_AX88796=m
-CONFIG_PARPORT_1284=y
-CONFIG_PARPORT_NOT_PC=y
+# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
-CONFIG_PARIDE=m
-
-#
-# Parallel IDE high-level drivers
-#
-CONFIG_PARIDE_PD=m
-CONFIG_PARIDE_PCD=m
-CONFIG_PARIDE_PF=m
-CONFIG_PARIDE_PT=m
-CONFIG_PARIDE_PG=m
-
-#
-# Parallel IDE protocol modules
-#
-CONFIG_PARIDE_ATEN=m
-CONFIG_PARIDE_BPCK=m
-CONFIG_PARIDE_BPCK6=m
-CONFIG_PARIDE_COMM=m
-CONFIG_PARIDE_DSTR=m
-CONFIG_PARIDE_FIT2=m
-CONFIG_PARIDE_FIT3=m
-CONFIG_PARIDE_EPAT=m
-CONFIG_PARIDE_EPATC8=y
-CONFIG_PARIDE_EPIA=m
-CONFIG_PARIDE_FRIQ=m
-CONFIG_PARIDE_FRPW=m
-CONFIG_PARIDE_KBIC=m
-CONFIG_PARIDE_KTTI=m
-CONFIG_PARIDE_ON20=m
-CONFIG_PARIDE_ON26=m
-CONFIG_BLK_CPQ_DA=m
-CONFIG_BLK_CPQ_CISS_DA=m
-CONFIG_CISS_SCSI_TAPE=y
-CONFIG_BLK_DEV_DAC960=m
-CONFIG_BLK_DEV_UMEM=m
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
+# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_CDROM_PKTCDVD_BUFFERS=8
-# CONFIG_CDROM_PKTCDVD_WCACHE is not set
-CONFIG_ATA_OVER_ETH=m
-CONFIG_MISC_DEVICES=y
-# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
-CONFIG_SGI_IOC4=m
-CONFIG_TIFM_CORE=m
-CONFIG_TIFM_7XX1=m
-CONFIG_IDE=y
-CONFIG_IDE_MAX_HWIFS=4
-CONFIG_BLK_DEV_IDE=y
-
-#
-# Please see Documentation/ide.txt for help/info on IDE drives
-#
-# CONFIG_BLK_DEV_IDE_SATA is not set
-CONFIG_BLK_DEV_IDEDISK=m
-# CONFIG_IDEDISK_MULTI_MODE is not set
-CONFIG_BLK_DEV_IDECS=m
-# CONFIG_BLK_DEV_DELKIN is not set
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_IDETAPE=m
-CONFIG_BLK_DEV_IDEFLOPPY=m
-CONFIG_BLK_DEV_IDESCSI=m
-# CONFIG_IDE_TASK_IOCTL is not set
-CONFIG_IDE_PROC_FS=y
-
-#
-# IDE chipset support/bugfixes
-#
-CONFIG_IDE_GENERIC=m
-CONFIG_BLK_DEV_IDEPCI=y
-CONFIG_IDEPCI_SHARE_IRQ=y
-CONFIG_IDEPCI_PCIBUS_ORDER=y
-# CONFIG_BLK_DEV_OFFBOARD is not set
-CONFIG_BLK_DEV_GENERIC=m
-CONFIG_BLK_DEV_OPTI621=m
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-# CONFIG_IDEDMA_ONLYDISK is not set
-CONFIG_BLK_DEV_AEC62XX=m
-CONFIG_BLK_DEV_ALI15X3=m
-# CONFIG_WDC_ALI15X3 is not set
-CONFIG_BLK_DEV_AMD74XX=m
-CONFIG_BLK_DEV_CMD64X=m
-CONFIG_BLK_DEV_TRIFLEX=m
-CONFIG_BLK_DEV_CY82C693=m
-# CONFIG_BLK_DEV_CS5520 is not set
-CONFIG_BLK_DEV_CS5530=m
-CONFIG_BLK_DEV_HPT34X=m
-# CONFIG_HPT34X_AUTODMA is not set
-CONFIG_BLK_DEV_HPT366=m
-# CONFIG_BLK_DEV_JMICRON is not set
-CONFIG_BLK_DEV_SC1200=m
-CONFIG_BLK_DEV_PIIX=m
-# CONFIG_BLK_DEV_IT8213 is not set
-CONFIG_BLK_DEV_IT821X=m
-CONFIG_BLK_DEV_NS87415=m
-CONFIG_BLK_DEV_PDC202XX_OLD=m
-CONFIG_PDC202XX_BURST=y
-CONFIG_BLK_DEV_PDC202XX_NEW=m
-CONFIG_BLK_DEV_SVWKS=m
-CONFIG_BLK_DEV_SIIMAGE=m
-# CONFIG_BLK_DEV_SLC90E66 is not set
-CONFIG_BLK_DEV_TRM290=m
-# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_BLK_DEV_TC86C001 is not set
-# CONFIG_IDE_ARM is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_TIFM_CORE=m
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
#
# SCSI device support
#
-CONFIG_RAID_ATTRS=m
+CONFIG_SCSI_MOD=m
+# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=m
CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
@@ -985,18 +973,13 @@ CONFIG_SCSI_PROC_FS=y
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=m
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=m
-# CONFIG_BLK_DEV_SR_VENDOR is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
CONFIG_CHR_DEV_SG=m
-CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
+# CONFIG_CHR_DEV_SCH is not set
CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_CONSTANTS is not set
CONFIG_SCSI_LOGGING=y
# CONFIG_SCSI_SCAN_ASYNC is not set
CONFIG_SCSI_WAIT_SCAN=m
@@ -1009,198 +992,39 @@ CONFIG_SCSI_FC_ATTRS=m
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_ATA is not set
+CONFIG_SCSI_SAS_HOST_SMP=y
# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_LOWLEVEL=y
-CONFIG_ISCSI_TCP=m
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_3W_9XXX=m
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AACRAID=m
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-CONFIG_AIC7XXX_DEBUG_ENABLE=y
-CONFIG_AIC7XXX_DEBUG_MASK=0
-CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-CONFIG_SCSI_AIC79XX=m
-CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-CONFIG_AIC79XX_DEBUG_ENABLE=y
-CONFIG_AIC79XX_DEBUG_MASK=0
-CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-CONFIG_SCSI_AIC94XX=m
-# CONFIG_AIC94XX_DEBUG is not set
-CONFIG_SCSI_DPT_I2O=m
-CONFIG_SCSI_ARCMSR=m
-CONFIG_MEGARAID_NEWGEN=y
-CONFIG_MEGARAID_MM=m
-CONFIG_MEGARAID_MAILBOX=m
-CONFIG_MEGARAID_LEGACY=m
-CONFIG_MEGARAID_SAS=m
-CONFIG_SCSI_HPTIOP=m
-CONFIG_SCSI_DMX3191D=m
-CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_IPS=m
-CONFIG_SCSI_INITIO=m
-# CONFIG_SCSI_INIA100 is not set
-CONFIG_SCSI_PPA=m
-CONFIG_SCSI_IMM=m
-# CONFIG_SCSI_IZIP_EPP16 is not set
-# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-CONFIG_SCSI_STEX=m
-CONFIG_SCSI_SYM53C8XX_2=m
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-CONFIG_SCSI_SYM53C8XX_MMIO=y
-CONFIG_SCSI_IPR=m
-# CONFIG_SCSI_IPR_TRACE is not set
-# CONFIG_SCSI_IPR_DUMP is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA_FC=m
-CONFIG_SCSI_QLA_ISCSI=m
-CONFIG_SCSI_LPFC=m
-CONFIG_SCSI_DC395x=m
-CONFIG_SCSI_DC390T=m
-CONFIG_SCSI_NSP32=m
-CONFIG_SCSI_DEBUG=m
-# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
-CONFIG_ATA=m
-# CONFIG_ATA_NONSTANDARD is not set
-CONFIG_SATA_AHCI=m
-CONFIG_SATA_SVW=m
-CONFIG_ATA_PIIX=m
-CONFIG_SATA_MV=m
-CONFIG_SATA_NV=m
-CONFIG_PDC_ADMA=m
-CONFIG_SATA_QSTOR=m
-CONFIG_SATA_PROMISE=m
-CONFIG_SATA_SX4=m
-CONFIG_SATA_SIL=m
-CONFIG_SATA_SIL24=m
-CONFIG_SATA_SIS=m
-CONFIG_SATA_ULI=m
-CONFIG_SATA_VIA=m
-CONFIG_SATA_VITESSE=m
-# CONFIG_SATA_INIC162X is not set
-# CONFIG_PATA_ALI is not set
-# CONFIG_PATA_AMD is not set
-# CONFIG_PATA_ARTOP is not set
-# CONFIG_PATA_ATIIXP is not set
-# CONFIG_PATA_CMD640_PCI is not set
-# CONFIG_PATA_CMD64X is not set
-CONFIG_PATA_CS5520=m
-# CONFIG_PATA_CS5530 is not set
-# CONFIG_PATA_CYPRESS is not set
-CONFIG_PATA_EFAR=m
-CONFIG_ATA_GENERIC=m
-# CONFIG_PATA_HPT366 is not set
-# CONFIG_PATA_HPT37X is not set
-# CONFIG_PATA_HPT3X2N is not set
-# CONFIG_PATA_HPT3X3 is not set
-# CONFIG_PATA_IT821X is not set
-# CONFIG_PATA_IT8213 is not set
-CONFIG_PATA_JMICRON=m
-CONFIG_PATA_TRIFLEX=m
-# CONFIG_PATA_MARVELL is not set
-CONFIG_PATA_MPIIX=m
-# CONFIG_PATA_OLDPIIX is not set
-CONFIG_PATA_NETCELL=m
-# CONFIG_PATA_NS87410 is not set
-# CONFIG_PATA_OPTI is not set
-# CONFIG_PATA_OPTIDMA is not set
-CONFIG_PATA_PCMCIA=m
-# CONFIG_PATA_PDC_OLD is not set
-# CONFIG_PATA_RADISYS is not set
-CONFIG_PATA_RZ1000=m
-# CONFIG_PATA_SC1200 is not set
-# CONFIG_PATA_SERVERWORKS is not set
-CONFIG_PATA_PDC2027X=m
-CONFIG_PATA_SIL680=m
-CONFIG_PATA_SIS=m
-CONFIG_PATA_VIA=m
-CONFIG_PATA_WINBOND=m
-# CONFIG_PATA_PLATFORM is not set
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
-# CONFIG_MD_RAID5_RESHAPE is not set
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
-CONFIG_BLK_DEV_DM=m
-# CONFIG_DM_DEBUG is not set
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_EMC=m
-# CONFIG_DM_MULTIPATH_RDAC is not set
-# CONFIG_DM_DELAY is not set
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_FC=m
-CONFIG_FUSION_SAS=m
-CONFIG_FUSION_MAX_SGE=128
-CONFIG_FUSION_CTL=m
-CONFIG_FUSION_LAN=m
-# CONFIG_FUSION_LOGGING is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support
#
-# CONFIG_FIREWIRE is not set
-CONFIG_IEEE1394=m
-
-#
-# Subsystem Options
-#
-# CONFIG_IEEE1394_VERBOSEDEBUG is not set
#
-# Controllers
+# You can enable one or both FireWire driver stacks.
#
-CONFIG_IEEE1394_PCILYNX=m
-CONFIG_IEEE1394_OHCI1394=m
#
-# Protocols
+# The newer stack is recommended.
#
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_SBP2=m
-# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_DV1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_I2O=m
-CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
-CONFIG_I2O_EXT_ADAPTEC=y
-CONFIG_I2O_CONFIG=m
-CONFIG_I2O_CONFIG_OLD_IOCTL=y
-CONFIG_I2O_BUS=m
-CONFIG_I2O_BLOCK=m
-CONFIG_I2O_SCSI=m
-CONFIG_I2O_PROC=m
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_IFB is not set
CONFIG_DUMMY=m
CONFIG_BONDING=m
# CONFIG_MACVLAN is not set
CONFIG_EQUALIZER=m
CONFIG_TUN=m
+# CONFIG_VETH is not set
CONFIG_ARCNET=m
CONFIG_ARCNET_1201=m
CONFIG_ARCNET_1051=m
@@ -1225,9 +1049,11 @@ CONFIG_VITESSE_PHY=m
CONFIG_SMSC_PHY=m
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
-CONFIG_FIXED_PHY=m
-# CONFIG_FIXED_MII_10_FDX is not set
-# CONFIG_FIXED_MII_100_FDX is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=m
# CONFIG_AX88796 is not set
@@ -1240,8 +1066,12 @@ CONFIG_VORTEX=m
CONFIG_TYPHOON=m
# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
+CONFIG_DE2104X_DSL=0
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
@@ -1251,21 +1081,26 @@ CONFIG_WINBOND_840=m
CONFIG_DM9102=m
CONFIG_ULI526X=m
CONFIG_PCMCIA_XIRCOM=m
-# CONFIG_PCMCIA_XIRTULIP is not set
CONFIG_HP100=m
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
CONFIG_PCNET32=m
-# CONFIG_PCNET32_NAPI is not set
CONFIG_AMD8111_ETH=m
-# CONFIG_AMD8111E_NAPI is not set
CONFIG_ADAPTEC_STARFIRE=m
-# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
+# CONFIG_KSZ884X_PCI is not set
CONFIG_B44=m
+CONFIG_B44_PCI_AUTOSELECT=y
+CONFIG_B44_PCICORE_AUTOSELECT=y
+CONFIG_B44_PCI=y
CONFIG_FORCEDETH=m
# CONFIG_FORCEDETH_NAPI is not set
# CONFIG_TC35815 is not set
-CONFIG_DGRS=m
-CONFIG_EEPRO100=m
CONFIG_E100=m
CONFIG_FEALNX=m
CONFIG_NATSEMI=m
@@ -1276,52 +1111,71 @@ CONFIG_8139TOO=m
# CONFIG_8139TOO_TUNE_TWISTER is not set
CONFIG_8139TOO_8129=y
# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_R6040 is not set
CONFIG_SIS900=m
CONFIG_EPIC100=m
+# CONFIG_SMSC9420 is not set
CONFIG_SUNDANCE=m
# CONFIG_SUNDANCE_MMIO is not set
CONFIG_TLAN=m
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
CONFIG_VIA_RHINE=m
# CONFIG_VIA_RHINE_MMIO is not set
-# CONFIG_VIA_RHINE_NAPI is not set
# CONFIG_SC92031 is not set
-CONFIG_NET_POCKET=y
-CONFIG_DE600=m
-CONFIG_DE620=m
+# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
CONFIG_ACENIC=m
# CONFIG_ACENIC_OMIT_TIGON_I is not set
CONFIG_DL2K=m
CONFIG_E1000=m
-# CONFIG_E1000_NAPI is not set
-# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
+# CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
CONFIG_NS83820=m
CONFIG_HAMACHI=m
CONFIG_YELLOWFIN=m
CONFIG_R8169=m
-# CONFIG_R8169_NAPI is not set
CONFIG_R8169_VLAN=y
CONFIG_SIS190=m
CONFIG_SKGE=m
+# CONFIG_SKGE_DEBUG is not set
CONFIG_SKY2=m
-CONFIG_SK98LIN=m
+# CONFIG_SKY2_DEBUG is not set
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+# CONFIG_CNIC is not set
CONFIG_QLA3XXX=m
# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_JME is not set
CONFIG_NETDEV_10000=y
+CONFIG_MDIO=m
CONFIG_CHELSIO_T1=m
# CONFIG_CHELSIO_T1_1G is not set
-CONFIG_CHELSIO_T1_NAPI=y
+CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_ENIC is not set
+# CONFIG_IXGBE is not set
CONFIG_IXGB=m
-# CONFIG_IXGB_NAPI is not set
CONFIG_S2IO=m
-# CONFIG_S2IO_NAPI is not set
+# CONFIG_VXGE is not set
CONFIG_MYRI10GE=m
# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_SFC is not set
+# CONFIG_BE2NET is not set
CONFIG_TR=y
CONFIG_IBMOL=m
CONFIG_IBMLS=m
@@ -1329,12 +1183,18 @@ CONFIG_3C359=m
CONFIG_TMS380TR=m
CONFIG_TMSPCI=m
CONFIG_ABYSS=m
+CONFIG_WLAN=y
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AIRO_CS is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
#
# USB Network Adapters
@@ -1343,11 +1203,13 @@ CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET_MII=m
CONFIG_USB_USBNET=m
CONFIG_USB_NET_AX8817X=m
CONFIG_USB_NET_CDCETHER=m
+# CONFIG_USB_NET_CDC_EEM is not set
# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
CONFIG_USB_NET_GL620A=m
CONFIG_USB_NET_NET1080=m
CONFIG_USB_NET_PLUSB=m
@@ -1361,6 +1223,9 @@ CONFIG_USB_ARMLINUX=y
CONFIG_USB_EPSON2888=y
# CONFIG_USB_KC2190 is not set
CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+CONFIG_USB_SIERRA_NET=m
CONFIG_NET_PCMCIA=y
CONFIG_PCMCIA_3C589=m
CONFIG_PCMCIA_3C574=m
@@ -1383,16 +1248,6 @@ CONFIG_HDLC_PPP=m
CONFIG_HDLC_X25=m
CONFIG_PCI200SYN=m
CONFIG_WANXL=m
-CONFIG_PC300=m
-CONFIG_PC300_MLPPP=y
-
-#
-# Cyclades-PC300 MLPPP support is disabled.
-#
-
-#
-# Refer to the file README.mlppp, provided by PC300 package.
-#
# CONFIG_PC300TOO is not set
CONFIG_FARSYNC=m
CONFIG_DSCC4=m
@@ -1428,15 +1283,13 @@ CONFIG_ATM_HORIZON=m
# CONFIG_ATM_HORIZON_DEBUG is not set
CONFIG_ATM_IA=m
# CONFIG_ATM_IA_DEBUG is not set
-CONFIG_ATM_FORE200E_MAYBE=m
-CONFIG_ATM_FORE200E_PCA=y
-CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
+CONFIG_ATM_FORE200E=m
# CONFIG_ATM_FORE200E_USE_TASKLET is not set
CONFIG_ATM_FORE200E_TX_RETRY=16
CONFIG_ATM_FORE200E_DEBUG=0
-CONFIG_ATM_FORE200E=m
CONFIG_ATM_HE=m
CONFIG_ATM_HE_USE_SUNI=y
+# CONFIG_ATM_SOLOS is not set
CONFIG_FDDI=y
CONFIG_DEFXX=m
# CONFIG_DEFXX_MMIO is not set
@@ -1444,7 +1297,6 @@ CONFIG_SKFP=m
CONFIG_HIPPI=y
CONFIG_ROADRUNNER=m
# CONFIG_ROADRUNNER_LARGE_RINGS is not set
-CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_MULTILINK=y
CONFIG_PPP_FILTER=y
@@ -1462,219 +1314,53 @@ CONFIG_SLHC=m
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
CONFIG_NET_FC=y
-CONFIG_SHAPER=m
CONFIG_NETCONSOLE=m
+# CONFIG_NETCONSOLE_DYNAMIC is not set
CONFIG_NETPOLL=y
# CONFIG_NETPOLL_TRAP is not set
CONFIG_NET_POLL_CONTROLLER=y
-CONFIG_ISDN=m
-CONFIG_ISDN_I4L=m
-CONFIG_ISDN_PPP=y
-CONFIG_ISDN_PPP_VJ=y
-CONFIG_ISDN_MPP=y
-CONFIG_IPPP_FILTER=y
-CONFIG_ISDN_PPP_BSDCOMP=m
-CONFIG_ISDN_AUDIO=y
-CONFIG_ISDN_TTY_FAX=y
-CONFIG_ISDN_X25=y
-
-#
-# ISDN feature submodules
-#
-# CONFIG_ISDN_DRV_LOOP is not set
-CONFIG_ISDN_DIVERSION=m
-
-#
-# ISDN4Linux hardware drivers
-#
-
-#
-# Passive cards
-#
-CONFIG_ISDN_DRV_HISAX=m
-
-#
-# D-channel protocol features
-#
-CONFIG_HISAX_EURO=y
-CONFIG_DE_AOC=y
-# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-# CONFIG_HISAX_NO_LLC is not set
-# CONFIG_HISAX_NO_KEYPAD is not set
-CONFIG_HISAX_1TR6=y
-CONFIG_HISAX_NI1=y
-CONFIG_HISAX_MAX_CARDS=8
-
-#
-# HiSax supported cards
-#
-CONFIG_HISAX_16_3=y
-CONFIG_HISAX_TELESPCI=y
-CONFIG_HISAX_S0BOX=y
-CONFIG_HISAX_FRITZPCI=y
-CONFIG_HISAX_AVM_A1_PCMCIA=y
-CONFIG_HISAX_ELSA=y
-CONFIG_HISAX_DIEHLDIVA=y
-CONFIG_HISAX_SEDLBAUER=y
-CONFIG_HISAX_NETJET=y
-CONFIG_HISAX_NETJET_U=y
-CONFIG_HISAX_NICCY=y
-CONFIG_HISAX_BKM_A4T=y
-CONFIG_HISAX_SCT_QUADRO=y
-CONFIG_HISAX_GAZEL=y
-CONFIG_HISAX_HFC_PCI=y
-CONFIG_HISAX_W6692=y
-CONFIG_HISAX_HFC_SX=y
-CONFIG_HISAX_ENTERNOW_PCI=y
-# CONFIG_HISAX_DEBUG is not set
-
-#
-# HiSax PCMCIA card service modules
-#
-CONFIG_HISAX_SEDLBAUER_CS=m
-CONFIG_HISAX_ELSA_CS=m
-CONFIG_HISAX_AVM_A1_CS=m
-CONFIG_HISAX_TELES_CS=m
-
-#
-# HiSax sub driver modules
-#
-CONFIG_HISAX_ST5481=m
-CONFIG_HISAX_HFCUSB=m
-CONFIG_HISAX_HFC4S8S=m
-CONFIG_HISAX_FRITZ_PCIPNP=m
-CONFIG_HISAX_HDLC=y
-
-#
-# Active cards
-#
-# CONFIG_HYSDN is not set
-CONFIG_ISDN_DRV_GIGASET=m
-CONFIG_GIGASET_BASE=m
-CONFIG_GIGASET_M105=m
-# CONFIG_GIGASET_M101 is not set
-# CONFIG_GIGASET_DEBUG is not set
-# CONFIG_GIGASET_UNDOCREQ is not set
-CONFIG_ISDN_CAPI=m
-CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
-CONFIG_CAPI_TRACE=y
-CONFIG_ISDN_CAPI_MIDDLEWARE=y
-CONFIG_ISDN_CAPI_CAPI20=m
-CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
-CONFIG_ISDN_CAPI_CAPIFS=m
-CONFIG_ISDN_CAPI_CAPIDRV=m
-
-#
-# CAPI hardware drivers
-#
-CONFIG_CAPI_AVM=y
-CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
-CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-CONFIG_ISDN_DRV_AVMB1_C4=m
-CONFIG_CAPI_EICON=y
-CONFIG_ISDN_DIVAS=m
-CONFIG_ISDN_DIVAS_BRIPCI=y
-CONFIG_ISDN_DIVAS_PRIPCI=y
-CONFIG_ISDN_DIVAS_DIVACAPI=m
-CONFIG_ISDN_DIVAS_USERIDI=m
-CONFIG_ISDN_DIVAS_MAINT=m
-CONFIG_PHONE=m
-CONFIG_PHONE_IXJ=m
-CONFIG_PHONE_IXJ_PCMCIA=m
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
#
# Input device support
#
CONFIG_INPUT=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-CONFIG_INPUT_JOYDEV=m
-CONFIG_INPUT_TSDEV=m
-CONFIG_INPUT_TSDEV_SCREEN_X=240
-CONFIG_INPUT_TSDEV_SCREEN_Y=320
-CONFIG_INPUT_EVDEV=m
-CONFIG_INPUT_EVBUG=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_LKKBD=m
-CONFIG_KEYBOARD_XTKBD=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_PS2_ALPS=y
-CONFIG_MOUSE_PS2_LOGIPS2PP=y
-CONFIG_MOUSE_PS2_SYNAPTICS=y
-CONFIG_MOUSE_PS2_LIFEBOOK=y
-CONFIG_MOUSE_PS2_TRACKPOINT=y
-# CONFIG_MOUSE_PS2_TOUCHKIT is not set
-CONFIG_MOUSE_SERIAL=m
-# CONFIG_MOUSE_APPLETOUCH is not set
-CONFIG_MOUSE_VSXXXAA=m
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_JOYSTICK_ANALOG=m
-CONFIG_JOYSTICK_A3D=m
-CONFIG_JOYSTICK_ADI=m
-CONFIG_JOYSTICK_COBRA=m
-CONFIG_JOYSTICK_GF2K=m
-CONFIG_JOYSTICK_GRIP=m
-CONFIG_JOYSTICK_GRIP_MP=m
-CONFIG_JOYSTICK_GUILLEMOT=m
-CONFIG_JOYSTICK_INTERACT=m
-CONFIG_JOYSTICK_SIDEWINDER=m
-CONFIG_JOYSTICK_TMDC=m
-CONFIG_JOYSTICK_IFORCE=m
-CONFIG_JOYSTICK_IFORCE_USB=y
-CONFIG_JOYSTICK_IFORCE_232=y
-CONFIG_JOYSTICK_WARRIOR=m
-CONFIG_JOYSTICK_MAGELLAN=m
-CONFIG_JOYSTICK_SPACEORB=m
-CONFIG_JOYSTICK_SPACEBALL=m
-CONFIG_JOYSTICK_STINGER=m
-CONFIG_JOYSTICK_TWIDJOY=m
-CONFIG_JOYSTICK_DB9=m
-CONFIG_JOYSTICK_GAMECON=m
-CONFIG_JOYSTICK_TURBOGRAFX=m
-CONFIG_JOYSTICK_JOYDUMP=m
-# CONFIG_JOYSTICK_XPAD is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=m
-# CONFIG_TOUCHSCREEN_FUJITSU is not set
-CONFIG_TOUCHSCREEN_GUNZE=m
-CONFIG_TOUCHSCREEN_ELO=m
-CONFIG_TOUCHSCREEN_MTOUCH=m
-CONFIG_TOUCHSCREEN_MK712=m
-CONFIG_TOUCHSCREEN_PENMOUNT=m
-CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
-CONFIG_TOUCHSCREEN_TOUCHWIN=m
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
-# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_PCSPKR=m
-# CONFIG_INPUT_ATI_REMOTE is not set
-# CONFIG_INPUT_ATI_REMOTE2 is not set
-# CONFIG_INPUT_KEYSPAN_REMOTE is not set
-# CONFIG_INPUT_POWERMATE is not set
-# CONFIG_INPUT_YEALINK is not set
-CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
#
# Hardware I/O ports
@@ -1682,10 +1368,10 @@ CONFIG_INPUT_UINPUT=m
CONFIG_SERIO=y
CONFIG_SERIO_I8042=y
CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_PARKBD=m
CONFIG_SERIO_PCIPS2=m
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_RAW=m
+# CONFIG_SERIO_ALTERA_PS2 is not set
CONFIG_GAMEPORT=m
CONFIG_GAMEPORT_NS558=m
CONFIG_GAMEPORT_L4=m
@@ -1696,30 +1382,13 @@ CONFIG_GAMEPORT_FM801=m
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_NONSTANDARD=y
-# CONFIG_COMPUTONE is not set
-CONFIG_ROCKETPORT=m
-CONFIG_CYCLADES=m
-# CONFIG_CYZ_INTR is not set
-CONFIG_DIGIEPCA=m
-# CONFIG_MOXA_INTELLIO is not set
-CONFIG_MOXA_SMARTIO=m
-# CONFIG_MOXA_SMARTIO_NEW is not set
-# CONFIG_ISI is not set
-CONFIG_SYNCLINKMP=m
-CONFIG_SYNCLINK_GT=m
-CONFIG_N_HDLC=m
-# CONFIG_RISCOM8 is not set
-CONFIG_SPECIALIX=m
-# CONFIG_SPECIALIX_RTSCTS is not set
-CONFIG_SX=m
-# CONFIG_RIO is not set
-CONFIG_STALDRV=y
-# CONFIG_STALLION is not set
-# CONFIG_ISTALLION is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
#
# Serial drivers
@@ -1741,161 +1410,128 @@ CONFIG_SERIAL_8250_RSA=y
#
CONFIG_SERIAL_CORE=m
CONFIG_SERIAL_JSM=m
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
-CONFIG_PRINTER=m
-# CONFIG_LP_CONSOLE is not set
-CONFIG_PPDEV=m
-CONFIG_TIPAR=m
-CONFIG_IPMI_HANDLER=m
-# CONFIG_IPMI_PANIC_EVENT is not set
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_WATCHDOG=m
-CONFIG_IPMI_POWEROFF=m
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
-
-#
-# Watchdog Device Drivers
-#
-CONFIG_SOFT_WATCHDOG=m
-# CONFIG_WDT_MTX1 is not set
-
-#
-# PCI-based Watchdog Cards
-#
-CONFIG_PCIPCWATCHDOG=m
-CONFIG_WDTPCI=m
-CONFIG_WDT_501_PCI=y
-
-#
-# USB-based Watchdog Cards
-#
-CONFIG_USBPCWATCHDOG=m
+# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
-CONFIG_RTC=y
-CONFIG_R3964=m
-CONFIG_APPLICOM=m
-CONFIG_DRM=m
-CONFIG_DRM_TDFX=m
-CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_VIA=m
-CONFIG_DRM_SAVAGE=m
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
#
# PCMCIA character devices
#
-CONFIG_SYNCLINK_CS=m
-CONFIG_CARDMAN_4000=m
-CONFIG_CARDMAN_4040=m
-CONFIG_RAW_DRIVER=m
-CONFIG_MAX_RAW_DEVS=256
-CONFIG_TCG_TPM=m
-CONFIG_TCG_ATMEL=m
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_IPWIRELESS is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
#
-# I2C Algorithms
+# I2C Hardware Bus support
#
-CONFIG_I2C_ALGOBIT=m
-CONFIG_I2C_ALGOPCF=m
-CONFIG_I2C_ALGOPCA=m
#
-# I2C Hardware Bus support
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-CONFIG_I2C_AMD756_S4882=m
-CONFIG_I2C_AMD8111=m
-CONFIG_I2C_I801=m
-CONFIG_I2C_I810=m
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_NFORCE2=m
-CONFIG_I2C_OCORES=m
-CONFIG_I2C_PARPORT=m
-CONFIG_I2C_PARPORT_LIGHT=m
-CONFIG_I2C_PROSAVAGE=m
-CONFIG_I2C_SAVAGE4=m
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
-CONFIG_I2C_STUB=m
# CONFIG_I2C_TINY_USB is not set
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-CONFIG_I2C_VOODOO3=m
#
-# Miscellaneous I2C Chip support
+# Other I2C/SMBus bus drivers
#
-CONFIG_SENSORS_DS1337=m
-CONFIG_SENSORS_DS1374=m
-# CONFIG_DS1682 is not set
-CONFIG_EEPROM_LEGACY=m
-CONFIG_SENSORS_PCF8574=m
-CONFIG_SENSORS_PCA9539=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_EEPROM_MAX6875=m
-# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
#
-# SPI support
+# PPS support
#
-CONFIG_SPI=y
-CONFIG_SPI_MASTER=y
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
#
-# SPI Master Controller Drivers
+# Memory mapped GPIO expanders:
#
-CONFIG_SPI_BITBANG=m
-CONFIG_SPI_BUTTERFLY=m
-# CONFIG_SPI_LM70_LLP is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_SCH is not set
#
-# SPI Protocol Masters
+# I2C GPIO expanders:
#
-# CONFIG_EEPROM_AT25 is not set
-# CONFIG_SPI_SPIDEV is not set
-# CONFIG_SPI_TLE62X0 is not set
-CONFIG_W1=m
-CONFIG_W1_CON=y
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
-# 1-wire Bus Masters
+# PCI GPIO expanders:
#
-CONFIG_W1_MASTER_MATROX=m
-CONFIG_W1_MASTER_DS2490=m
-CONFIG_W1_MASTER_DS2482=m
+# CONFIG_GPIO_CS5535 is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_LANGWELL is not set
#
-# 1-wire Slaves
+# SPI GPIO expanders:
#
-CONFIG_W1_SLAVE_THERM=m
-CONFIG_W1_SLAVE_SMEM=m
-CONFIG_W1_SLAVE_DS2433=m
-# CONFIG_W1_SLAVE_DS2433_CRC is not set
-# CONFIG_W1_SLAVE_DS2760 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
CONFIG_HWMON_VID=m
-CONFIG_SENSORS_ABITUGURU=m
-# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
CONFIG_SENSORS_ADM1021=m
CONFIG_SENSORS_ADM1025=m
@@ -1903,17 +1539,23 @@ CONFIG_SENSORS_ADM1026=m
# CONFIG_SENSORS_ADM1029 is not set
CONFIG_SENSORS_ADM1031=m
CONFIG_SENSORS_ADM9240=m
-CONFIG_SENSORS_ASB100=m
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
CONFIG_SENSORS_ATXP1=m
CONFIG_SENSORS_DS1621=m
+# CONFIG_SENSORS_I5K_AMB is not set
CONFIG_SENSORS_F71805F=m
-CONFIG_SENSORS_FSCHER=m
-CONFIG_SENSORS_FSCPOS=m
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_GL520SM=m
CONFIG_SENSORS_IT87=m
CONFIG_SENSORS_LM63=m
-CONFIG_SENSORS_LM70=m
+# CONFIG_SENSORS_LM73 is not set
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM77=m
CONFIG_SENSORS_LM78=m
@@ -1924,16 +1566,25 @@ CONFIG_SENSORS_LM87=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_LM92=m
# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
CONFIG_SENSORS_MAX1619=m
# CONFIG_SENSORS_MAX6650 is not set
CONFIG_SENSORS_PC87360=m
# CONFIG_SENSORS_PC87427 is not set
+CONFIG_SENSORS_PCF8591=m
+# CONFIG_SENSORS_SHT15 is not set
CONFIG_SENSORS_SIS5595=m
# CONFIG_SENSORS_DME1737 is not set
CONFIG_SENSORS_SMSC47M1=m
CONFIG_SENSORS_SMSC47M192=m
CONFIG_SENSORS_SMSC47B397=m
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
CONFIG_SENSORS_VIA686A=m
CONFIG_SENSORS_VT1211=m
CONFIG_SENSORS_VT8231=m
@@ -1942,370 +1593,94 @@ CONFIG_SENSORS_W83791D=m
CONFIG_SENSORS_W83792D=m
# CONFIG_SENSORS_W83793 is not set
CONFIG_SENSORS_W83L785TS=m
+# CONFIG_SENSORS_W83L786NG is not set
CONFIG_SENSORS_W83627HF=m
CONFIG_SENSORS_W83627EHF=m
-# CONFIG_HWMON_DEBUG_CHIP is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L1=y
-CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_VIDEO_V4L2=y
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
-# CONFIG_VIDEO_ADV_DEBUG is not set
-CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
-CONFIG_VIDEO_TVAUDIO=m
-CONFIG_VIDEO_TDA7432=m
-CONFIG_VIDEO_TDA9840=m
-CONFIG_VIDEO_TDA9875=m
-CONFIG_VIDEO_TEA6415C=m
-CONFIG_VIDEO_TEA6420=m
-CONFIG_VIDEO_MSP3400=m
-CONFIG_VIDEO_WM8775=m
-CONFIG_VIDEO_BT819=m
-CONFIG_VIDEO_BT856=m
-CONFIG_VIDEO_KS0127=m
-CONFIG_VIDEO_SAA7110=m
-CONFIG_VIDEO_SAA7111=m
-CONFIG_VIDEO_SAA7114=m
-CONFIG_VIDEO_SAA711X=m
-CONFIG_VIDEO_TVP5150=m
-CONFIG_VIDEO_VPX3220=m
-CONFIG_VIDEO_CX25840=m
-CONFIG_VIDEO_CX2341X=m
-CONFIG_VIDEO_SAA7185=m
-CONFIG_VIDEO_ADV7170=m
-CONFIG_VIDEO_ADV7175=m
-CONFIG_VIDEO_VIVI=m
-CONFIG_VIDEO_BT848=m
-CONFIG_VIDEO_BT848_DVB=y
-CONFIG_VIDEO_SAA6588=m
-CONFIG_VIDEO_BWQCAM=m
-CONFIG_VIDEO_CQCAM=m
-CONFIG_VIDEO_W9966=m
-CONFIG_VIDEO_CPIA=m
-CONFIG_VIDEO_CPIA_PP=m
-CONFIG_VIDEO_CPIA_USB=m
-CONFIG_VIDEO_CPIA2=m
-CONFIG_VIDEO_SAA5246A=m
-CONFIG_VIDEO_SAA5249=m
-CONFIG_TUNER_3036=m
-# CONFIG_TUNER_TEA5761 is not set
-CONFIG_VIDEO_STRADIS=m
-CONFIG_VIDEO_ZORAN_ZR36060=m
-CONFIG_VIDEO_ZORAN=m
-CONFIG_VIDEO_ZORAN_BUZ=m
-CONFIG_VIDEO_ZORAN_DC10=m
-CONFIG_VIDEO_ZORAN_DC30=m
-CONFIG_VIDEO_ZORAN_LML33=m
-CONFIG_VIDEO_ZORAN_LML33R10=m
-CONFIG_VIDEO_ZORAN_AVS6EYES=m
-CONFIG_VIDEO_SAA7134=m
-CONFIG_VIDEO_SAA7134_ALSA=m
-CONFIG_VIDEO_SAA7134_OSS=m
-CONFIG_VIDEO_SAA7134_DVB=m
-CONFIG_VIDEO_MXB=m
-CONFIG_VIDEO_DPC=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_CX88=m
-CONFIG_VIDEO_CX88_ALSA=m
-CONFIG_VIDEO_CX88_BLACKBIRD=m
-CONFIG_VIDEO_CX88_DVB=m
-CONFIG_VIDEO_CX88_VP3054=m
-# CONFIG_VIDEO_IVTV is not set
-# CONFIG_VIDEO_CAFE_CCIC is not set
-CONFIG_V4L_USB_DRIVERS=y
-CONFIG_VIDEO_PVRUSB2=m
-CONFIG_VIDEO_PVRUSB2_29XXX=y
-CONFIG_VIDEO_PVRUSB2_24XXX=y
-CONFIG_VIDEO_PVRUSB2_SYSFS=y
-# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
-CONFIG_VIDEO_EM28XX=m
-# CONFIG_VIDEO_USBVISION is not set
-CONFIG_VIDEO_USBVIDEO=m
-CONFIG_USB_VICAM=m
-CONFIG_USB_IBMCAM=m
-CONFIG_USB_KONICAWC=m
-CONFIG_USB_QUICKCAM_MESSENGER=m
-CONFIG_USB_ET61X251=m
-CONFIG_VIDEO_OVCAMCHIP=m
-CONFIG_USB_W9968CF=m
-# CONFIG_USB_OV511 is not set
-CONFIG_USB_SE401=m
-CONFIG_USB_SN9C102=m
-CONFIG_USB_STV680=m
-CONFIG_USB_ZC0301=m
-CONFIG_USB_PWC=m
-# CONFIG_USB_PWC_DEBUG is not set
-# CONFIG_USB_ZR364XX is not set
-CONFIG_RADIO_ADAPTERS=y
-CONFIG_RADIO_GEMTEK_PCI=m
-CONFIG_RADIO_MAXIRADIO=m
-CONFIG_RADIO_MAESTRO=m
-CONFIG_USB_DSBR=m
-CONFIG_DVB_CORE=m
-CONFIG_DVB_CORE_ATTACH=y
-CONFIG_DVB_CAPTURE_DRIVERS=y
-
-#
-# Supported SAA7146 based PCI Adapters
-#
-CONFIG_DVB_AV7110=m
-CONFIG_DVB_AV7110_OSD=y
-CONFIG_DVB_BUDGET=m
-CONFIG_DVB_BUDGET_CI=m
-CONFIG_DVB_BUDGET_AV=m
-CONFIG_DVB_BUDGET_PATCH=m
-
-#
-# Supported USB Adapters
-#
-CONFIG_DVB_USB=m
-# CONFIG_DVB_USB_DEBUG is not set
-CONFIG_DVB_USB_A800=m
-CONFIG_DVB_USB_DIBUSB_MB=m
-CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y
-CONFIG_DVB_USB_DIBUSB_MC=m
-CONFIG_DVB_USB_DIB0700=m
-CONFIG_DVB_USB_UMT_010=m
-CONFIG_DVB_USB_CXUSB=m
-# CONFIG_DVB_USB_M920X is not set
-# CONFIG_DVB_USB_GL861 is not set
-# CONFIG_DVB_USB_AU6610 is not set
-CONFIG_DVB_USB_DIGITV=m
-CONFIG_DVB_USB_VP7045=m
-CONFIG_DVB_USB_VP702X=m
-CONFIG_DVB_USB_GP8PSK=m
-CONFIG_DVB_USB_NOVA_T_USB2=m
-# CONFIG_DVB_USB_TTUSB2 is not set
-CONFIG_DVB_USB_DTT200U=m
-# CONFIG_DVB_USB_OPERA1 is not set
-# CONFIG_DVB_USB_AF9005 is not set
-CONFIG_DVB_TTUSB_BUDGET=m
-CONFIG_DVB_TTUSB_DEC=m
-CONFIG_DVB_CINERGYT2=m
-CONFIG_DVB_CINERGYT2_TUNING=y
-CONFIG_DVB_CINERGYT2_STREAM_URB_COUNT=32
-CONFIG_DVB_CINERGYT2_STREAM_BUF_SIZE=512
-CONFIG_DVB_CINERGYT2_QUERY_INTERVAL=250
-CONFIG_DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE=y
-CONFIG_DVB_CINERGYT2_RC_QUERY_INTERVAL=100
-
-#
-# Supported FlexCopII (B2C2) Adapters
-#
-CONFIG_DVB_B2C2_FLEXCOP=m
-CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-CONFIG_DVB_B2C2_FLEXCOP_USB=m
-# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
-
-#
-# Supported BT878 Adapters
-#
-CONFIG_DVB_BT8XX=m
-
-#
-# Supported Pluto2 Adapters
-#
-CONFIG_DVB_PLUTO2=m
-
-#
-# Supported DVB Frontends
-#
-
-#
-# Customise DVB Frontends
-#
-# CONFIG_DVB_FE_CUSTOMISE is not set
-
-#
-# DVB-S (satellite) frontends
-#
-CONFIG_DVB_STV0299=m
-CONFIG_DVB_CX24110=m
-CONFIG_DVB_CX24123=m
-CONFIG_DVB_TDA8083=m
-CONFIG_DVB_MT312=m
-CONFIG_DVB_VES1X93=m
-CONFIG_DVB_S5H1420=m
-CONFIG_DVB_TDA10086=m
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
#
-# DVB-T (terrestrial) frontends
+# Watchdog Device Drivers
#
-CONFIG_DVB_SP8870=m
-CONFIG_DVB_SP887X=m
-CONFIG_DVB_CX22700=m
-CONFIG_DVB_CX22702=m
-CONFIG_DVB_L64781=m
-CONFIG_DVB_TDA1004X=m
-CONFIG_DVB_NXT6000=m
-CONFIG_DVB_MT352=m
-CONFIG_DVB_ZL10353=m
-CONFIG_DVB_DIB3000MB=m
-CONFIG_DVB_DIB3000MC=m
-CONFIG_DVB_DIB7000M=m
-CONFIG_DVB_DIB7000P=m
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_WDT_MTX1=y
#
-# DVB-C (cable) frontends
+# PCI-based Watchdog Cards
#
-CONFIG_DVB_VES1820=m
-CONFIG_DVB_TDA10021=m
-CONFIG_DVB_TDA10023=m
-CONFIG_DVB_STV0297=m
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
#
-# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+# USB-based Watchdog Cards
#
-CONFIG_DVB_NXT200X=m
-CONFIG_DVB_OR51211=m
-CONFIG_DVB_OR51132=m
-CONFIG_DVB_BCM3510=m
-CONFIG_DVB_LGDT330X=m
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
-# Tuners/PLL support
+# Sonics Silicon Backplane
#
-CONFIG_DVB_PLL=m
-CONFIG_DVB_TDA826X=m
-CONFIG_DVB_TDA827X=m
-# CONFIG_DVB_TUNER_QT1010 is not set
-CONFIG_DVB_TUNER_MT2060=m
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+# CONFIG_SSB_B43_PCI_BRIDGE is not set
+CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
+# CONFIG_SSB_PCMCIAHOST is not set
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+# CONFIG_SSB_SDIOHOST is not set
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+# CONFIG_SSB_DRIVER_MIPS is not set
#
-# Miscellaneous devices
+# Multifunction device drivers
#
-CONFIG_DVB_LNBP21=m
-CONFIG_DVB_ISL6421=m
-CONFIG_DVB_TUA6100=m
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_VIDEO_BUF=m
-CONFIG_VIDEO_BUF_DVB=m
-CONFIG_VIDEO_BTCX=m
-CONFIG_VIDEO_IR_I2C=m
-CONFIG_VIDEO_IR=m
-CONFIG_VIDEO_TVEEPROM=m
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+# CONFIG_VGA_ARB is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=m
+# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
#
# Display device support
#
# CONFIG_DISPLAY_SUPPORT is not set
-CONFIG_VGASTATE=m
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_DDC=m
-CONFIG_FB_CFB_FILLRECT=m
-CONFIG_FB_CFB_COPYAREA=m
-CONFIG_FB_CFB_IMAGEBLIT=m
-# CONFIG_FB_SYS_FILLRECT is not set
-# CONFIG_FB_SYS_COPYAREA is not set
-# CONFIG_FB_SYS_IMAGEBLIT is not set
-# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-CONFIG_FB_BACKLIGHT=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-
-#
-# Frame buffer hardware drivers
-#
-CONFIG_FB_CIRRUS=m
-CONFIG_FB_PM2=m
-CONFIG_FB_PM2_FIFO_DISCONNECT=y
-CONFIG_FB_CYBER2000=m
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-CONFIG_FB_S1D13XXX=m
-CONFIG_FB_NVIDIA=m
-CONFIG_FB_NVIDIA_I2C=y
-# CONFIG_FB_NVIDIA_DEBUG is not set
-CONFIG_FB_NVIDIA_BACKLIGHT=y
-CONFIG_FB_RIVA=m
-CONFIG_FB_RIVA_I2C=y
-# CONFIG_FB_RIVA_DEBUG is not set
-CONFIG_FB_RIVA_BACKLIGHT=y
-CONFIG_FB_MATROX=m
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G=y
-CONFIG_FB_MATROX_I2C=m
-CONFIG_FB_MATROX_MAVEN=m
-CONFIG_FB_MATROX_MULTIHEAD=y
-CONFIG_FB_RADEON=m
-CONFIG_FB_RADEON_I2C=y
-CONFIG_FB_RADEON_BACKLIGHT=y
-# CONFIG_FB_RADEON_DEBUG is not set
-CONFIG_FB_ATY128=m
-CONFIG_FB_ATY128_BACKLIGHT=y
-CONFIG_FB_ATY=m
-CONFIG_FB_ATY_CT=y
-CONFIG_FB_ATY_GENERIC_LCD=y
-CONFIG_FB_ATY_GX=y
-CONFIG_FB_ATY_BACKLIGHT=y
-# CONFIG_FB_S3 is not set
-CONFIG_FB_SAVAGE=m
-CONFIG_FB_SAVAGE_I2C=y
-CONFIG_FB_SAVAGE_ACCEL=y
-CONFIG_FB_SIS=m
-CONFIG_FB_SIS_300=y
-CONFIG_FB_SIS_315=y
-CONFIG_FB_NEOMAGIC=m
-CONFIG_FB_KYRO=m
-CONFIG_FB_3DFX=m
-# CONFIG_FB_3DFX_ACCEL is not set
-CONFIG_FB_VOODOO1=m
-# CONFIG_FB_VT8623 is not set
-CONFIG_FB_TRIDENT=m
-# CONFIG_FB_TRIDENT_ACCEL is not set
-# CONFIG_FB_ARK is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_VIRTUAL is not set
#
# Console display driver support
#
-CONFIG_VGA_CONSOLE=y
-# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=m
-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-# CONFIG_LOGO is not set
-
-#
-# Sound
-#
CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -2318,32 +1693,29 @@ CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_RTCTIMER=m
-CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+CONFIG_SND_OPL3_LIB_SEQ=m
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+CONFIG_SND_EMU10K1_SEQ=m
CONFIG_SND_MPU401_UART=m
CONFIG_SND_OPL3_LIB=m
CONFIG_SND_VX_LIB=m
CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_DRIVERS=y
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
-CONFIG_SND_MTS64=m
CONFIG_SND_SERIAL_U16550=m
CONFIG_SND_MPU401=m
-# CONFIG_SND_PORTMAN2X4 is not set
-
-#
-# PCI devices
-#
+# CONFIG_SND_AC97_POWER_SAVE is not set
+CONFIG_SND_PCI=y
CONFIG_SND_AD1889=m
CONFIG_SND_ALS300=m
CONFIG_SND_ALI5451=m
@@ -2352,14 +1724,18 @@ CONFIG_SND_ATIIXP_MODEM=m
CONFIG_SND_AU8810=m
CONFIG_SND_AU8820=m
CONFIG_SND_AU8830=m
+# CONFIG_SND_AW2 is not set
CONFIG_SND_AZT3328=m
CONFIG_SND_BT87X=m
# CONFIG_SND_BT87X_OVERCLOCK is not set
CONFIG_SND_CA0106=m
CONFIG_SND_CMIPCI=m
+# CONFIG_SND_OXYGEN is not set
CONFIG_SND_CS4281=m
CONFIG_SND_CS46XX=m
CONFIG_SND_CS46XX_NEW_DSP=y
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
CONFIG_SND_DARLA20=m
CONFIG_SND_GINA20=m
CONFIG_SND_LAYLA20=m
@@ -2372,6 +1748,8 @@ CONFIG_SND_ECHO3G=m
CONFIG_SND_INDIGO=m
CONFIG_SND_INDIGOIO=m
CONFIG_SND_INDIGODJ=m
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
CONFIG_SND_EMU10K1=m
CONFIG_SND_EMU10K1X=m
CONFIG_SND_ENS1370=m
@@ -2379,19 +1757,36 @@ CONFIG_SND_ENS1371=m
CONFIG_SND_ES1938=m
CONFIG_SND_ES1968=m
CONFIG_SND_FM801=m
-CONFIG_SND_FM801_TEA575X_BOOL=y
-CONFIG_SND_FM801_TEA575X=m
CONFIG_SND_HDA_INTEL=m
+# CONFIG_SND_HDA_HWDEP is not set
+# CONFIG_SND_HDA_INPUT_BEEP is not set
+# CONFIG_SND_HDA_INPUT_JACK is not set
+# CONFIG_SND_HDA_PATCH_LOADER is not set
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_ANALOG=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_VIA=y
+CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_NVHDMI=y
+CONFIG_SND_HDA_CODEC_INTELHDMI=y
+CONFIG_SND_HDA_ELD=y
+CONFIG_SND_HDA_CODEC_CIRRUS=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_HDA_CODEC_CA0110=y
+CONFIG_SND_HDA_CODEC_CMEDIA=y
+CONFIG_SND_HDA_CODEC_SI3054=y
+CONFIG_SND_HDA_GENERIC=y
+# CONFIG_SND_HDA_POWER_SAVE is not set
CONFIG_SND_HDSP=m
CONFIG_SND_HDSPM=m
+# CONFIG_SND_HIFIER is not set
CONFIG_SND_ICE1712=m
CONFIG_SND_ICE1724=m
CONFIG_SND_INTEL8X0=m
CONFIG_SND_INTEL8X0M=m
CONFIG_SND_KORG1212=m
-CONFIG_SND_KORG1212_FIRMWARE_IN_KERNEL=y
+# CONFIG_SND_LX6464ES is not set
CONFIG_SND_MAESTRO3=m
-CONFIG_SND_MAESTRO3_FIRMWARE_IN_KERNEL=y
CONFIG_SND_MIXART=m
CONFIG_SND_NM256=m
CONFIG_SND_PCXHR=m
@@ -2403,55 +1798,30 @@ CONFIG_SND_SONICVIBES=m
CONFIG_SND_TRIDENT=m
CONFIG_SND_VIA82XX=m
CONFIG_SND_VIA82XX_MODEM=m
+# CONFIG_SND_VIRTUOSO is not set
CONFIG_SND_VX222=m
CONFIG_SND_YMFPCI=m
-CONFIG_SND_YMFPCI_FIRMWARE_IN_KERNEL=y
-# CONFIG_SND_AC97_POWER_SAVE is not set
-
-#
-# ALSA MIPS devices
-#
+CONFIG_SND_MIPS=y
# CONFIG_SND_AU1X00 is not set
-
-#
-# USB devices
-#
+CONFIG_SND_USB=y
CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
-
-#
-# PCMCIA devices
-#
+CONFIG_SND_PCMCIA=y
CONFIG_SND_VXPOCKET=m
CONFIG_SND_PDAUDIOCF=m
-
-#
-# System on Chip audio support
-#
# CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# Open Sound System
-#
CONFIG_SOUND_PRIME=m
-CONFIG_SOUND_TRIDENT=m
-# CONFIG_SOUND_MSNDCLAS is not set
-# CONFIG_SOUND_MSNDPIN is not set
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
#
# USB Input Devices
#
CONFIG_USB_HID=m
-CONFIG_USB_HIDINPUT_POWERBOOK=y
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
CONFIG_USB_HIDDEV=y
#
@@ -2459,12 +1829,50 @@ CONFIG_USB_HIDDEV=y
#
CONFIG_USB_KBD=m
CONFIG_USB_MOUSE=m
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_ZEROPLUS is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=m
# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
#
# Miscellaneous USB options
@@ -2472,19 +1880,27 @@ CONFIG_USB=m
CONFIG_USB_DEVICEFS=y
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
-CONFIG_USB_SUSPEND=y
-# CONFIG_USB_PERSIST is not set
# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=m
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_SPLIT_ISO=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_HCD_SSB is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
@@ -2493,32 +1909,38 @@ CONFIG_USB_U132_HCD=m
CONFIG_USB_SL811_HCD=m
CONFIG_USB_SL811_CS=m
# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
# USB Device Class drivers
#
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_DPCM=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_USB_STORAGE_ALAUDA=y
-CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+CONFIG_USB_STORAGE_KARMA=m
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
CONFIG_USB_LIBUSUAL=y
#
@@ -2526,25 +1948,20 @@ CONFIG_USB_LIBUSUAL=y
#
CONFIG_USB_MDC800=m
CONFIG_USB_MICROTEK=m
-CONFIG_USB_MON=y
#
# USB port drivers
#
-CONFIG_USB_USS720=m
-
-#
-# USB Serial Converter support
-#
CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_AIRPRIME=m
CONFIG_USB_SERIAL_ARK3116=m
CONFIG_USB_SERIAL_BELKIN=m
+# CONFIG_USB_SERIAL_CH341 is not set
CONFIG_USB_SERIAL_WHITEHEAT=m
CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP2101=m
+# CONFIG_USB_SERIAL_CP210X is not set
CONFIG_USB_SERIAL_CYPRESS_M8=m
CONFIG_USB_SERIAL_EMPEG=m
CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -2556,6 +1973,7 @@ CONFIG_USB_SERIAL_EDGEPORT=m
CONFIG_USB_SERIAL_EDGEPORT_TI=m
CONFIG_USB_SERIAL_GARMIN=m
CONFIG_USB_SERIAL_IPW=m
+# CONFIG_USB_SERIAL_IUU is not set
CONFIG_USB_SERIAL_KEYSPAN_PDA=m
CONFIG_USB_SERIAL_KEYSPAN=m
# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
@@ -2575,20 +1993,27 @@ CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_SERIAL_MOS7720=m
CONFIG_USB_SERIAL_MOS7840=m
+# CONFIG_USB_SERIAL_MOTOROLA is not set
CONFIG_USB_SERIAL_NAVMAN=m
CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
# CONFIG_USB_SERIAL_DEBUG is not set
-CONFIG_USB_EZUSB=y
#
# USB Miscellaneous drivers
@@ -2596,18 +2021,13 @@ CONFIG_USB_EZUSB=y
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
CONFIG_USB_ADUTUX=m
-CONFIG_USB_AUERSWALD=m
+# CONFIG_USB_SEVSEG is not set
CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
-# CONFIG_USB_BERRY_CHARGE is not set
CONFIG_USB_LED=m
CONFIG_USB_CYPRESS_CY7C63=m
CONFIG_USB_CYTHERM=m
-CONFIG_USB_PHIDGET=m
-CONFIG_USB_PHIDGETKIT=m
-CONFIG_USB_PHIDGETMOTORCONTROL=m
-CONFIG_USB_PHIDGETSERVO=m
CONFIG_USB_IDMOUSE=m
CONFIG_USB_FTDI_ELAN=m
CONFIG_USB_APPLEDISPLAY=m
@@ -2617,86 +2037,113 @@ CONFIG_USB_LD=m
CONFIG_USB_TRANCEVIBRATOR=m
# CONFIG_USB_IOWARRIOR is not set
CONFIG_USB_TEST=m
-
-#
-# USB DSL modem support
-#
+# CONFIG_USB_ISIGHTFW is not set
CONFIG_USB_ATM=m
CONFIG_USB_SPEEDTOUCH=m
CONFIG_USB_CXACRU=m
CONFIG_USB_UEAGLEATM=m
CONFIG_USB_XUSBATM=m
-
-#
-# USB Gadget Support
-#
CONFIG_USB_GADGET=m
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
-CONFIG_USB_GADGET_NET2280=y
-CONFIG_USB_NET2280=m
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2280=y
+CONFIG_USB_NET2280=m
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
CONFIG_USB_G_SERIAL=m
CONFIG_USB_MIDI_GADGET=m
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
CONFIG_MMC=m
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=m
CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
CONFIG_MMC_SDHCI=m
+# CONFIG_MMC_SDHCI_PCI is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
CONFIG_MMC_TIFM_SD=m
+# CONFIG_MMC_SDRICOH_CS is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-# CONFIG_LEDS_TRIGGERS is not set
-CONFIG_INFINIBAND=m
-CONFIG_INFINIBAND_USER_MAD=m
-CONFIG_INFINIBAND_USER_ACCESS=m
-CONFIG_INFINIBAND_USER_MEM=y
-CONFIG_INFINIBAND_ADDR_TRANS=y
-CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_MTHCA_DEBUG=y
-CONFIG_INFINIBAND_AMSO1100=m
-CONFIG_INFINIBAND_AMSO1100_DEBUG=y
-# CONFIG_MLX4_INFINIBAND is not set
-CONFIG_INFINIBAND_IPOIB=m
-# CONFIG_INFINIBAND_IPOIB_CM is not set
-CONFIG_INFINIBAND_IPOIB_DEBUG=y
-# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
-CONFIG_INFINIBAND_SRP=m
-CONFIG_INFINIBAND_ISER=m
-CONFIG_RTC_LIB=m
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=m
#
@@ -2712,6 +2159,7 @@ CONFIG_RTC_DRV_TEST=m
# I2C RTC drivers
#
CONFIG_RTC_DRV_DS1307=m
+# CONFIG_RTC_DRV_DS1374 is not set
CONFIG_RTC_DRV_DS1672=m
# CONFIG_RTC_DRV_MAX6900 is not set
CONFIG_RTC_DRV_RS5C372=m
@@ -2720,48 +2168,45 @@ CONFIG_RTC_DRV_X1205=m
CONFIG_RTC_DRV_PCF8563=m
CONFIG_RTC_DRV_PCF8583=m
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
-CONFIG_RTC_DRV_RS5C348=m
-CONFIG_RTC_DRV_MAX6902=m
#
# Platform RTC drivers
#
# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
CONFIG_RTC_DRV_DS1553=m
-# CONFIG_RTC_DRV_STK17TA8 is not set
CONFIG_RTC_DRV_DS1742=m
+# CONFIG_RTC_DRV_STK17TA8 is not set
CONFIG_RTC_DRV_M48T86=m
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
CONFIG_RTC_DRV_V3020=m
#
# on-CPU RTC drivers
#
-
-#
-# DMA Engine support
-#
-CONFIG_DMA_ENGINE=y
-
-#
-# DMA Clients
-#
-CONFIG_NET_DMA=y
-
-#
-# DMA Devices
-#
-CONFIG_INTEL_IOATDMA=m
+# CONFIG_RTC_DRV_AU1XXX is not set
+# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
#
-# Userspace I/O
+# TI VLYNQ
#
-# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
#
# File systems
@@ -2772,44 +2217,43 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=m
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=m
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=m
-CONFIG_REISERFS_FS=m
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-# CONFIG_JFS_DEBUG is not set
-CONFIG_JFS_STATISTICS=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_XFS_RT=y
+# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-CONFIG_MINIX_FS=m
-CONFIG_ROMFS_FS=m
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
CONFIG_QUOTACTL=y
-CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -2838,73 +2282,79 @@ CONFIG_NTFS_FS=m
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
-CONFIG_ADFS_FS=m
-# CONFIG_ADFS_FS_RW is not set
-CONFIG_AFFS_FS=m
-CONFIG_ECRYPT_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-# CONFIG_BEFS_DEBUG is not set
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
# CONFIG_JFFS2_SUMMARY is not set
-# CONFIG_JFFS2_FS_XATTR is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_UFS_DEBUG is not set
-
-#
-# Network File Systems
-#
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
-# CONFIG_SUNRPC_BIND34 is not set
CONFIG_RPCSEC_GSS_KRB5=m
CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_SMB_FS=m
# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CEPH_FS is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_UPCALL is not set
# CONFIG_CIFS_XATTR is not set
# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_DFS_UPCALL is not set
# CONFIG_CIFS_EXPERIMENTAL is not set
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
@@ -2916,7 +2366,6 @@ CONFIG_NCPFS_OS2_NS=y
CONFIG_NCPFS_NLS=y
CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
-# CONFIG_CODA_FS_OLD_API is not set
CONFIG_AFS_FS=m
# CONFIG_AFS_DEBUG is not set
@@ -2948,10 +2397,6 @@ CONFIG_SUN_PARTITION=y
CONFIG_KARMA_PARTITION=y
CONFIG_EFI_PARTITION=y
# CONFIG_SYSV68_PARTITION is not set
-
-#
-# Native Language Support
-#
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="cp437"
CONFIG_NLS_CODEPAGE_437=m
@@ -2992,118 +2437,179 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-
-#
-# Distributed Lock Manager
-#
-CONFIG_DLM=m
-# CONFIG_DLM_DEBUG is not set
-
-#
-# Profiling support
-#
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
+# CONFIG_DLM is not set
#
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_CROSSCOMPILE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_RING_BUFFER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_SPINLOCK_TEST is not set
#
# Security options
#
CONFIG_KEYS=y
# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-# CONFIG_SECURITY_NETWORK_XFRM is not set
-CONFIG_SECURITY_CAPABILITIES=m
-CONFIG_SECURITY_ROOTPLUG=m
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_SECURITY_SELINUX_DEVELOP=y
-CONFIG_SECURITY_SELINUX_AVC_STATS=y
-CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
-# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
-CONFIG_XOR_BLOCKS=m
-CONFIG_ASYNC_CORE=m
-CONFIG_ASYNC_MEMCPY=m
-CONFIG_ASYNC_XOR=m
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
-CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_TGR192=m
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_FCRYPT is not set
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_CAMELLIA is not set
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_FCRYPT is not set
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-# CONFIG_CRYPTO_CAMELLIA is not set
-CONFIG_CRYPTO_TEST=m
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
-# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
CONFIG_LIBCRC32C=m
CONFIG_AUDIT_GENERIC=y
CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=m
-CONFIG_REED_SOLOMON=m
-CONFIG_REED_SOLOMON_DEC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
-CONFIG_CHECK_SIGNATURE=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 57a50483abdf..90a032af95ce 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25
-# Mon Apr 28 12:24:17 2008
+# Linux kernel version: 2.6.34-rc6
+# Sat May 1 11:49:51 2010
#
CONFIG_MIPS=y
@@ -9,22 +9,25 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
# CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
-# CONFIG_MIPS_ATLAS is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
-# CONFIG_MIPS_SEAD is not set
# CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
# CONFIG_SGI_IP28 is not set
@@ -38,11 +41,14 @@ CONFIG_MIPS=y
# CONFIG_SIBYTE_SENTOSA is not set
# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
CONFIG_MIKROTIK_RB532=y
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -53,14 +59,15 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_BOOT_RAW=y
+CONFIG_CEVT_R4K_LIB=y
CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K_LIB=y
CONFIG_CSRC_R4K=y
CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-# CONFIG_HOTPLUG_CPU is not set
+CONFIG_NEED_DMA_MAP_STATE=y
# CONFIG_NO_IOPORT is not set
CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
@@ -73,7 +80,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=4
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -86,6 +94,7 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R8000 is not set
@@ -93,11 +102,13 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
#
# Kernel type
@@ -107,11 +118,13 @@ CONFIG_32BIT=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
@@ -124,12 +137,13 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -151,6 +165,7 @@ CONFIG_PREEMPT_NONE=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -168,23 +183,31 @@ CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_BSD_PROCESS_ACCT_V3 is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TREE_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
# CONFIG_KALLSYMS is not set
@@ -192,54 +215,87 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
-CONFIG_COMPAT_BRK=y
+CONFIG_PCSPKR_PLATFORM=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_PCI_QUIRKS is not set
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-# CONFIG_KMOD is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
CONFIG_IOSCHED_DEADLINE=y
# CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
-CONFIG_CLASSIC_RCU=y
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_FREEZER is not set
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -248,7 +304,8 @@ CONFIG_HW_HAS_PCI=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
CONFIG_MMU=y
# CONFIG_PCCARD is not set
# CONFIG_HOTPLUG_PCI is not set
@@ -257,25 +314,22 @@ CONFIG_MMU=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_TRAD_SIGNALS=y
#
# Power management options
#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_PM is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -325,7 +379,6 @@ CONFIG_DEFAULT_VEGAS=y
# CONFIG_DEFAULT_RENO is not set
CONFIG_DEFAULT_TCP_CONG="vegas"
# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
# CONFIG_IPV6 is not set
# CONFIG_NETWORK_SECMARK is not set
CONFIG_NETFILTER=y
@@ -336,8 +389,9 @@ CONFIG_NETFILTER_ADVANCED=y
#
# Core Netfilter Configuration
#
+CONFIG_NETFILTER_NETLINK=m
# CONFIG_NETFILTER_NETLINK_QUEUE is not set
-# CONFIG_NETFILTER_NETLINK_LOG is not set
+CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CT_ACCT=y
CONFIG_NF_CONNTRACK_MARK=y
@@ -355,18 +409,23 @@ CONFIG_NF_CONNTRACK_IRC=m
# CONFIG_NF_CONNTRACK_SIP is not set
CONFIG_NF_CONNTRACK_TFTP=m
# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NETFILTER_TPROXY is not set
CONFIG_NETFILTER_XTABLES=y
# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+# CONFIG_NETFILTER_XT_TARGET_HL is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
# CONFIG_NETFILTER_XT_TARGET_MARK is not set
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
CONFIG_NETFILTER_XT_TARGET_TRACE=m
# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
@@ -375,18 +434,21 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
# CONFIG_NETFILTER_XT_MATCH_MAC is not set
# CONFIG_NETFILTER_XT_MATCH_MARK is not set
-# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=y
# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
@@ -394,20 +456,21 @@ CONFIG_NETFILTER_XT_MATCH_STATE=y
# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
# CONFIG_NETFILTER_XT_MATCH_TIME is not set
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=y
CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
# CONFIG_IP_NF_QUEUE is not set
CONFIG_IP_NF_IPTABLES=y
-# CONFIG_IP_NF_MATCH_RECENT is not set
-# CONFIG_IP_NF_MATCH_ECN is not set
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
# CONFIG_IP_NF_MATCH_TTL is not set
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
# CONFIG_IP_NF_TARGET_LOG is not set
@@ -415,8 +478,8 @@ CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_NF_NAT=y
CONFIG_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
-# CONFIG_IP_NF_TARGET_REDIRECT is not set
# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
# CONFIG_NF_NAT_SNMP_BASIC is not set
CONFIG_NF_NAT_FTP=m
CONFIG_NF_NAT_IRC=m
@@ -426,17 +489,22 @@ CONFIG_NF_NAT_TFTP=m
# CONFIG_NF_NAT_H323 is not set
# CONFIG_NF_NAT_SIP is not set
CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
# CONFIG_IP_NF_TARGET_ECN is not set
# CONFIG_IP_NF_TARGET_TTL is not set
-# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
CONFIG_IP_NF_RAW=m
# CONFIG_IP_NF_ARPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
+CONFIG_STP=y
CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
CONFIG_LLC=y
CONFIG_LLC2=m
@@ -446,6 +514,8 @@ CONFIG_LLC2=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
#
@@ -455,7 +525,7 @@ CONFIG_NET_SCH_CBQ=m
# CONFIG_NET_SCH_HTB is not set
# CONFIG_NET_SCH_HFSC is not set
CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RR=m
+# CONFIG_NET_SCH_MULTIQ is not set
# CONFIG_NET_SCH_RED is not set
# CONFIG_NET_SCH_SFQ is not set
# CONFIG_NET_SCH_TEQL is not set
@@ -463,6 +533,7 @@ CONFIG_NET_SCH_RR=m
# CONFIG_NET_SCH_GRED is not set
# CONFIG_NET_SCH_DSMARK is not set
CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
# CONFIG_NET_SCH_INGRESS is not set
#
@@ -496,8 +567,10 @@ CONFIG_NET_ACT_IPT=m
# CONFIG_NET_ACT_NAT is not set
CONFIG_NET_ACT_PEDIT=m
# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
CONFIG_NET_CLS_IND=y
CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -514,14 +587,19 @@ CONFIG_HAMRADIO=y
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_PRIV=y
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_EXT=y
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -533,13 +611,17 @@ CONFIG_WIRELESS_EXT=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -612,6 +694,11 @@ CONFIG_MTD_NAND_PLATFORM=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -623,23 +710,36 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -656,10 +756,6 @@ CONFIG_SCSI_PROC_FS=y
# CONFIG_BLK_DEV_SR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -676,27 +772,35 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_BE2ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_STEX is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_IPR is not set
@@ -708,9 +812,15 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+# CONFIG_ATA_VERBOSE_ERROR is not set
# CONFIG_SATA_PMP is not set
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -732,6 +842,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -747,6 +858,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -757,29 +869,39 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_NS87415 is not set
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
CONFIG_PATA_RB532=y
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_SIL680 is not set
# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
# CONFIG_PATA_VIA is not set
# CONFIG_PATA_WINBOND is not set
# CONFIG_PATA_PLATFORM is not set
+# CONFIG_PATA_SCH is not set
# CONFIG_MD is not set
# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support
#
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
CONFIG_IFB=m
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
@@ -797,21 +919,28 @@ CONFIG_KORINA=y
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_TC35815 is not set
-# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
@@ -821,30 +950,27 @@ CONFIG_NET_PCI=y
# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
CONFIG_VIA_RHINE=y
# CONFIG_VIA_RHINE_MMIO is not set
-CONFIG_VIA_RHINE_NAPI=y
# CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_HERMES is not set
+CONFIG_WLAN=y
CONFIG_ATMEL=m
# CONFIG_PCI_ATMEL is not set
# CONFIG_PRISM54 is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_HOSTAP is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -864,6 +990,7 @@ CONFIG_SLHC=m
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -872,7 +999,8 @@ CONFIG_SLHC=m
#
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
-# CONFIG_INPUT_POLLDEV is not set
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -887,17 +1015,29 @@ CONFIG_INPUT=y
#
CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+CONFIG_INPUT_RB532_BUTTON=y
#
# Hardware I/O ports
@@ -909,6 +1049,7 @@ CONFIG_INPUT_KEYBOARD=y
# Character devices
#
# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
@@ -928,105 +1069,95 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
-# CONFIG_RTC is not set
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
-
-#
-# SPI support
-#
# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
-# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
-# CONFIG_HWMON is not set
-# CONFIG_THERMAL is not set
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
#
-# Watchdog Device Drivers
+# PPS support
#
-# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
#
-# PCI-based Watchdog Cards
+# Memory mapped GPIO expanders:
#
-# CONFIG_PCIPCWATCHDOG is not set
-# CONFIG_WDTPCI is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_SCH is not set
#
-# Sonics Silicon Backplane
+# I2C GPIO expanders:
#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
#
-# Multifunction device drivers
-#
-# CONFIG_MFD_SM501 is not set
-# CONFIG_HTC_PASIC3 is not set
-
-#
-# Multimedia devices
-#
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_COMMON=m
-CONFIG_VIDEO_ALLOW_V4L1=y
-CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_VIDEO_V4L2=m
-CONFIG_VIDEO_V4L1=m
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
-# CONFIG_VIDEO_ADV_DEBUG is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-
-#
-# Encoders/decoders and other helper chips
+# PCI GPIO expanders:
#
+# CONFIG_GPIO_CS5535 is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_LANGWELL is not set
#
-# Audio decoders
+# SPI GPIO expanders:
#
#
-# Video decoders
+# AC97 GPIO expanders:
#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
#
-# Video and audio decoders
+# Watchdog Device Drivers
#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_RC32434_WDT=y
#
-# MPEG video encoders
+# PCI-based Watchdog Cards
#
-# CONFIG_VIDEO_CX2341X is not set
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+CONFIG_SSB_POSSIBLE=y
#
-# Video encoders
+# Sonics Silicon Backplane
#
+# CONFIG_SSB is not set
#
-# Video improvement chips
+# Multifunction device drivers
#
-# CONFIG_VIDEO_VIVI is not set
-# CONFIG_VIDEO_CPIA is not set
-# CONFIG_VIDEO_STRADIS is not set
-# CONFIG_SOC_CAMERA is not set
-# CONFIG_RADIO_ADAPTERS is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+# CONFIG_VGA_ARB is not set
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1037,13 +1168,10 @@ CONFIG_VIDEO_CAPTURE_DRIVERS=y
# Display device support
#
# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
# CONFIG_HID is not set
+# CONFIG_HID_PID is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1053,9 +1181,18 @@ CONFIG_USB_ARCH_HAS_EHCI=y
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# Enable Host or Gadget support to see Inventra options
+#
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -1064,41 +1201,67 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+CONFIG_LEDS_MIKROTIK_RB532=y
# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
CONFIG_RTC_LIB=y
# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -1117,15 +1280,13 @@ CONFIG_EXT2_FS=y
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=y
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1148,9 +1309,14 @@ CONFIG_JFFS2_RTIME=y
CONFIG_JFFS2_CMODE_PRIORITY=y
# CONFIG_JFFS2_CMODE_SIZE is not set
# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
@@ -1160,6 +1326,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1198,11 +1365,22 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_STRIP_ASM_SYMS=y
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_CMDLINE_BOOL is not set
#
@@ -1210,18 +1388,32 @@ CONFIG_FRAME_WARN=1024
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=m
-CONFIG_CRYPTO_AEAD=m
-CONFIG_CRYPTO_BLKCIPHER=m
-# CONFIG_CRYPTO_MANAGER is not set
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=m
+CONFIG_CRYPTO_BLKCIPHER2=m
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=m
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=m
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=m
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=m
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set
CONFIG_CRYPTO_TEST=m
@@ -1249,14 +1441,20 @@ CONFIG_CRYPTO_TEST=m
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
-# CONFIG_CRYPTO_CRC32C is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
# CONFIG_CRYPTO_SHA1 is not set
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
@@ -1266,7 +1464,7 @@ CONFIG_CRYPTO_TEST=m
#
# Ciphers
#
-# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES=m
# CONFIG_CRYPTO_ANUBIS is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1286,27 +1484,36 @@ CONFIG_CRYPTO_TEST=m
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
+CONFIG_CRYPTO_ZLIB=y
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
+# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h
index 032ca73f181b..48bb82372994 100644
--- a/arch/mips/include/asm/i8253.h
+++ b/arch/mips/include/asm/i8253.h
@@ -12,7 +12,7 @@
#define PIT_CH0 0x40
#define PIT_CH2 0x42
-extern spinlock_t i8253_lock;
+extern raw_spinlock_t i8253_lock;
extern void setup_pit_timer(void);
diff --git a/arch/mips/include/asm/kgdb.h b/arch/mips/include/asm/kgdb.h
index 48223b09396c..19002d605ac4 100644
--- a/arch/mips/include/asm/kgdb.h
+++ b/arch/mips/include/asm/kgdb.h
@@ -38,6 +38,8 @@ extern int kgdb_early_setup;
extern void *saved_vectors[32];
extern void handle_exception(struct pt_regs *regs);
extern void breakinst(void);
+extern int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index ae07423e6e82..e76941db2312 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -190,8 +190,6 @@ extern unsigned long au1xxx_calc_clock(void);
/* PM: arch/mips/alchemy/common/sleeper.S, power.c, irq.c */
void au1xxx_save_and_sleep(void);
void au_sleep(void);
-void save_au1xxx_intctl(void);
-void restore_au1xxx_intctl(void);
/* SOC Interrupt numbers */
@@ -835,6 +833,38 @@ enum soc_au1200_ints {
#define MEM_STNAND_DATA 0x20
#endif
+
+/* Interrupt Controller register offsets */
+#define IC_CFG0RD 0x40
+#define IC_CFG0SET 0x40
+#define IC_CFG0CLR 0x44
+#define IC_CFG1RD 0x48
+#define IC_CFG1SET 0x48
+#define IC_CFG1CLR 0x4C
+#define IC_CFG2RD 0x50
+#define IC_CFG2SET 0x50
+#define IC_CFG2CLR 0x54
+#define IC_REQ0INT 0x54
+#define IC_SRCRD 0x58
+#define IC_SRCSET 0x58
+#define IC_SRCCLR 0x5C
+#define IC_REQ1INT 0x5C
+#define IC_ASSIGNRD 0x60
+#define IC_ASSIGNSET 0x60
+#define IC_ASSIGNCLR 0x64
+#define IC_WAKERD 0x68
+#define IC_WAKESET 0x68
+#define IC_WAKECLR 0x6C
+#define IC_MASKRD 0x70
+#define IC_MASKSET 0x70
+#define IC_MASKCLR 0x74
+#define IC_RISINGRD 0x78
+#define IC_RISINGCLR 0x78
+#define IC_FALLINGRD 0x7C
+#define IC_FALLINGCLR 0x7C
+#define IC_TESTBIT 0x80
+
+
/* Interrupt Controller 0 */
#define IC0_CFG0RD 0xB0400040
#define IC0_CFG0SET 0xB0400040
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index 8c6b1105ce0b..c8a553a36ba4 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -358,10 +358,6 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr);
u32 au1xxx_ddma_add_device(dbdev_tab_t *dev);
extern void au1xxx_ddma_del_device(u32 devid);
void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp);
-#ifdef CONFIG_PM
-void au1xxx_dbdma_suspend(void);
-void au1xxx_dbdma_resume(void);
-#endif
/*
* Flags for the put_source/put_dest functions.
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 43d4da0b1e9f..3999ec0aa7f5 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -20,7 +20,7 @@ static inline unsigned long bcm63xx_gpio_count(void)
}
}
-#define GPIO_DIR_OUT 0x0
-#define GPIO_DIR_IN 0x1
+#define BCM63XX_GPIO_DIR_OUT 0x0
+#define BCM63XX_GPIO_DIR_IN 0x1
#endif /* !BCM63XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 16210cedd929..675bd8641d5a 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -52,6 +52,8 @@
#define cpu_has_tx39_cache 0
#define cpu_has_userlocal 0
#define cpu_has_vce 0
+#define cpu_has_veic 0
+#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
#define cpu_has_watch 1
diff --git a/arch/mips/include/asm/mach-loongson/gpio.h b/arch/mips/include/asm/mach-loongson/gpio.h
new file mode 100644
index 000000000000..e30e73d443df
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/gpio.h
@@ -0,0 +1,35 @@
+/*
+ * STLS2F GPIO Support
+ *
+ * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
+ * Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STLS2F_GPIO_H
+#define __STLS2F_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+extern void gpio_set_value(unsigned gpio, int value);
+extern int gpio_get_value(unsigned gpio);
+extern int gpio_cansleep(unsigned gpio);
+
+/* The chip can do interrupt
+ * but it has not been tested and doc not clear
+ */
+static inline int gpio_to_irq(int gpio)
+{
+ return -EINVAL;
+}
+
+static inline int irq_to_gpio(int gpio)
+{
+ return -EINVAL;
+}
+
+#endif /* __STLS2F_GPIO_H */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index be5bb16be4e0..3562b854f2cd 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -125,6 +125,30 @@ static int __init wait_disable(char *s)
__setup("nowait", wait_disable);
+static int __cpuinitdata mips_fpu_disabled;
+
+static int __init fpu_disable(char *s)
+{
+ cpu_data[0].options &= ~MIPS_CPU_FPU;
+ mips_fpu_disabled = 1;
+
+ return 1;
+}
+
+__setup("nofpu", fpu_disable);
+
+int __cpuinitdata mips_dsp_disabled;
+
+static int __init dsp_disable(char *s)
+{
+ cpu_data[0].ases &= ~MIPS_ASE_DSP;
+ mips_dsp_disabled = 1;
+
+ return 1;
+}
+
+__setup("nodsp", dsp_disable);
+
void __init check_wait(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -982,6 +1006,12 @@ __cpuinit void cpu_probe(void)
*/
BUG_ON(current_cpu_type() != c->cputype);
+ if (mips_fpu_disabled)
+ c->options &= ~MIPS_CPU_FPU;
+
+ if (mips_dsp_disabled)
+ c->ases &= ~MIPS_ASE_DSP;
+
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
index 2f6a0b147ab8..ae5db206347c 100644
--- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
+++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
@@ -65,7 +65,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
return -ENODEV;
cpus_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (cpufreq_frequency_table_target
(policy, &loongson2_clockmod_table[0], target_freq, relation,
@@ -91,7 +91,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
/* notifiers */
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- set_cpus_allowed(current, cpus_allowed);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
clk_set_rate(cpuclk, freq);
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index ed5c441615e4..94794062a177 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -15,7 +15,7 @@
#include <asm/io.h>
#include <asm/time.h>
-DEFINE_SPINLOCK(i8253_lock);
+DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
/*
@@ -26,7 +26,7 @@ EXPORT_SYMBOL(i8253_lock);
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
switch(mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -55,7 +55,7 @@ static void init_pit_timer(enum clock_event_mode mode,
/* Nothing to do here */
break;
}
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
}
/*
@@ -65,10 +65,10 @@ static void init_pit_timer(enum clock_event_mode mode,
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
outb_p(delta & 0xff , PIT_CH0); /* LSB */
outb(delta >> 8 , PIT_CH0); /* MSB */
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
return 0;
}
@@ -137,7 +137,7 @@ static cycle_t pit_read(struct clocksource *cs)
static int old_count;
static u32 old_jifs;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
@@ -183,7 +183,7 @@ static cycle_t pit_read(struct clocksource *cs)
old_count = count;
old_jifs = jifs;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
count = (LATCH - 1) - count;
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 50c9bb880667..9b78ff6e9b84 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -180,6 +180,11 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
*(ptr++) = regs->cp0_epc;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->cp0_epc = pc;
+}
+
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
* then try to fall into the debugger
@@ -198,7 +203,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
- if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs))
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
return NOTIFY_DONE;
if (atomic_read(&kgdb_setting_breakpoint))
@@ -212,6 +217,26 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
return NOTIFY_STOP;
}
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_mips_notify,
};
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index cbc6182b0065..f5981c499109 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -100,10 +100,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
cpus_intersects(new_mask, mt_fpu_cpumask)) {
cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
- retval = set_cpus_allowed(p, effective_mask);
+ retval = set_cpus_allowed_ptr(p, &effective_mask);
} else {
clear_ti_thread_flag(ti, TIF_FPUBOUND);
- retval = set_cpus_allowed(p, new_mask);
+ retval = set_cpus_allowed_ptr(p, &new_mask);
}
out_unlock:
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index f9513f9e61d3..85aef3fc6716 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -569,27 +569,6 @@ void __init setup_arch(char **cmdline_p)
plat_smp_setup();
}
-static int __init fpu_disable(char *s)
-{
- int i;
-
- for (i = 0; i < NR_CPUS; i++)
- cpu_data[i].options &= ~MIPS_CPU_FPU;
-
- return 1;
-}
-
-__setup("nofpu", fpu_disable);
-
-static int __init dsp_disable(char *s)
-{
- cpu_data[0].ases &= ~MIPS_ASE_DSP;
-
- return 1;
-}
-
-__setup("nodsp", dsp_disable);
-
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d612c6dcb746..8bdd6a663c7f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -26,6 +26,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/notifier.h>
+#include <linux/kdb.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -185,6 +186,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
+#ifdef CONFIG_KGDB_KDB
+ } else if (atomic_read(&kgdb_active) != -1 &&
+ kdb_current_regs) {
+ memcpy(&regs, kdb_current_regs, sizeof(regs));
+#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(&regs);
}
@@ -360,6 +366,8 @@ void __noreturn die(const char * str, struct pt_regs * regs)
unsigned long dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
+ notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0);
+
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
@@ -704,6 +712,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
siginfo_t info;
char b[40];
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
return;
@@ -854,7 +867,7 @@ static void mt_ase_fp_affinity(void)
= current->cpus_allowed;
cpus_and(tmask, current->cpus_allowed,
mt_fpu_cpumask);
- set_cpus_allowed(current, tmask);
+ set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
}
}
diff --git a/arch/mips/lasat/image/head.S b/arch/mips/lasat/image/head.S
index efb95f2609c2..e0ecda92c40a 100644
--- a/arch/mips/lasat/image/head.S
+++ b/arch/mips/lasat/image/head.S
@@ -1,7 +1,7 @@
#include <asm/lasat/head.h>
.text
- .section .text.start, "ax"
+ .section .text..start, "ax"
.set noreorder
.set mips3
diff --git a/arch/mips/lasat/image/romscript.normal b/arch/mips/lasat/image/romscript.normal
index 988f8ad189cb..0864c963e188 100644
--- a/arch/mips/lasat/image/romscript.normal
+++ b/arch/mips/lasat/image/romscript.normal
@@ -4,7 +4,7 @@ SECTIONS
{
.text :
{
- *(.text.start)
+ *(.text..start)
}
/* Data in ROM */
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index 7668c4de1151..cdd2e812ba1a 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -4,6 +4,7 @@
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
pci.o bonito-irq.o mem.o machtype.o platform.o
+obj-$(CONFIG_GENERIC_GPIO) += gpio.o
#
# Serial port support
diff --git a/arch/mips/loongson/common/gpio.c b/arch/mips/loongson/common/gpio.c
new file mode 100644
index 000000000000..e8a0ffa935b4
--- /dev/null
+++ b/arch/mips/loongson/common/gpio.c
@@ -0,0 +1,139 @@
+/*
+ * STLS2F GPIO Support
+ *
+ * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
+ * Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <asm/types.h>
+#include <loongson.h>
+#include <linux/gpio.h>
+
+#define STLS2F_N_GPIO 4
+#define STLS2F_GPIO_IN_OFFSET 16
+
+static DEFINE_SPINLOCK(gpio_lock);
+
+int gpio_get_value(unsigned gpio)
+{
+ u32 val;
+ u32 mask;
+
+ if (gpio >= STLS2F_N_GPIO)
+ return __gpio_get_value(gpio);
+
+ mask = 1 << (gpio + STLS2F_GPIO_IN_OFFSET);
+ spin_lock(&gpio_lock);
+ val = LOONGSON_GPIODATA;
+ spin_unlock(&gpio_lock);
+
+ return ((val & mask) != 0);
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+void gpio_set_value(unsigned gpio, int state)
+{
+ u32 val;
+ u32 mask;
+
+ if (gpio >= STLS2F_N_GPIO) {
+ __gpio_set_value(gpio, state);
+ return ;
+ }
+
+ mask = 1 << gpio;
+
+ spin_lock(&gpio_lock);
+ val = LOONGSON_GPIODATA;
+ if (state)
+ val |= mask;
+ else
+ val &= (~mask);
+ LOONGSON_GPIODATA = val;
+ spin_unlock(&gpio_lock);
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+int gpio_cansleep(unsigned gpio)
+{
+ if (gpio < STLS2F_N_GPIO)
+ return 0;
+ else
+ return __gpio_cansleep(gpio);
+}
+EXPORT_SYMBOL(gpio_cansleep);
+
+static int ls2f_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ u32 temp;
+ u32 mask;
+
+ if (gpio >= STLS2F_N_GPIO)
+ return -EINVAL;
+
+ spin_lock(&gpio_lock);
+ mask = 1 << gpio;
+ temp = LOONGSON_GPIOIE;
+ temp |= mask;
+ LOONGSON_GPIOIE = temp;
+ spin_unlock(&gpio_lock);
+
+ return 0;
+}
+
+static int ls2f_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int level)
+{
+ u32 temp;
+ u32 mask;
+
+ if (gpio >= STLS2F_N_GPIO)
+ return -EINVAL;
+
+ gpio_set_value(gpio, level);
+ spin_lock(&gpio_lock);
+ mask = 1 << gpio;
+ temp = LOONGSON_GPIOIE;
+ temp &= (~mask);
+ LOONGSON_GPIOIE = temp;
+ spin_unlock(&gpio_lock);
+
+ return 0;
+}
+
+static int ls2f_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ return gpio_get_value(gpio);
+}
+
+static void ls2f_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ gpio_set_value(gpio, value);
+}
+
+static struct gpio_chip ls2f_chip = {
+ .label = "ls2f",
+ .direction_input = ls2f_gpio_direction_input,
+ .get = ls2f_gpio_get_value,
+ .direction_output = ls2f_gpio_direction_output,
+ .set = ls2f_gpio_set_value,
+ .base = 0,
+ .ngpio = STLS2F_N_GPIO,
+};
+
+static int __init ls2f_gpio_setup(void)
+{
+ return gpiochip_add(&ls2f_chip);
+}
+arch_initcall(ls2f_gpio_setup);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 8f2f8e9d8b21..8c8ab0bd628d 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -351,7 +351,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
if (MIPSInst_RD(ir) == FPCREG_CSR) {
value = ctx->fcr31;
- value = (value & ~0x3) | mips_rm[value & 0x3];
+ value = (value & ~FPU_CSR_RM) |
+ mips_rm[modeindex(value)];
#ifdef CSRTRACE
printk("%p gpr[%d]<-csr=%08x\n",
(void *) (xcp->cp0_epc),
@@ -900,7 +901,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+ ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754sp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
@@ -926,7 +927,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+ ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754sp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
@@ -1074,7 +1075,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+ ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754dp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
@@ -1100,7 +1101,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+ ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754dp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
index 29e2326b6257..d0d24e047676 100644
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -8,7 +8,6 @@
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
- *
*/
#include <linux/init.h>
#include <linux/oprofile.h>
@@ -17,24 +16,18 @@
#include <loongson.h> /* LOONGSON2_PERFCNT_IRQ */
#include "op_impl.h"
-/*
- * a patch should be sent to oprofile with the loongson-specific support.
- * otherwise, the oprofile tool will not recognize this and complain about
- * "cpu_type 'unset' is not valid".
- */
#define LOONGSON2_CPU_TYPE "mips/loongson2"
-#define LOONGSON2_COUNTER1_EVENT(event) ((event & 0x0f) << 5)
-#define LOONGSON2_COUNTER2_EVENT(event) ((event & 0x0f) << 9)
-
-#define LOONGSON2_PERFCNT_EXL (1UL << 0)
-#define LOONGSON2_PERFCNT_KERNEL (1UL << 1)
-#define LOONGSON2_PERFCNT_SUPERVISOR (1UL << 2)
-#define LOONGSON2_PERFCNT_USER (1UL << 3)
-#define LOONGSON2_PERFCNT_INT_EN (1UL << 4)
#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
-/* Loongson2 performance counter register */
+#define LOONGSON2_PERFCTRL_EXL (1UL << 0)
+#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
+#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
+#define LOONGSON2_PERFCTRL_USER (1UL << 3)
+#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
+#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
+ (((event) & 0x0f) << ((idx) ? 9 : 5))
+
#define read_c0_perfctrl() __read_64bit_c0_register($24, 0)
#define write_c0_perfctrl(val) __write_64bit_c0_register($24, 0, val)
#define read_c0_perfcnt() __read_64bit_c0_register($25, 0)
@@ -49,7 +42,6 @@ static struct loongson2_register_config {
static char *oprofid = "LoongsonPerf";
static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id);
-/* Compute all of the registers in preparation for enabling profiling. */
static void loongson2_reg_setup(struct op_counter_config *cfg)
{
@@ -57,41 +49,38 @@ static void loongson2_reg_setup(struct op_counter_config *cfg)
reg.reset_counter1 = 0;
reg.reset_counter2 = 0;
- /* Compute the performance counter ctrl word. */
- /* For now count kernel and user mode */
+
+ /*
+ * Compute the performance counter ctrl word.
+ * For now, count kernel and user mode.
+ */
if (cfg[0].enabled) {
- ctrl |= LOONGSON2_COUNTER1_EVENT(cfg[0].event);
+ ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event);
reg.reset_counter1 = 0x80000000ULL - cfg[0].count;
}
if (cfg[1].enabled) {
- ctrl |= LOONGSON2_COUNTER2_EVENT(cfg[1].event);
- reg.reset_counter2 = (0x80000000ULL - cfg[1].count);
+ ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event);
+ reg.reset_counter2 = 0x80000000ULL - cfg[1].count;
}
if (cfg[0].enabled || cfg[1].enabled) {
- ctrl |= LOONGSON2_PERFCNT_EXL | LOONGSON2_PERFCNT_INT_EN;
+ ctrl |= LOONGSON2_PERFCTRL_EXL | LOONGSON2_PERFCTRL_ENABLE;
if (cfg[0].kernel || cfg[1].kernel)
- ctrl |= LOONGSON2_PERFCNT_KERNEL;
+ ctrl |= LOONGSON2_PERFCTRL_KERNEL;
if (cfg[0].user || cfg[1].user)
- ctrl |= LOONGSON2_PERFCNT_USER;
+ ctrl |= LOONGSON2_PERFCTRL_USER;
}
reg.ctrl = ctrl;
reg.cnt1_enabled = cfg[0].enabled;
reg.cnt2_enabled = cfg[1].enabled;
-
}
-/* Program all of the registers in preparation for enabling profiling. */
-
static void loongson2_cpu_setup(void *args)
{
- uint64_t perfcount;
-
- perfcount = (reg.reset_counter2 << 32) | reg.reset_counter1;
- write_c0_perfcnt(perfcount);
+ write_c0_perfcnt((reg.reset_counter2 << 32) | reg.reset_counter1);
}
static void loongson2_cpu_start(void *args)
@@ -114,15 +103,8 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
struct pt_regs *regs = get_irq_regs();
int enabled;
- /*
- * LOONGSON2 defines two 32-bit performance counters.
- * To avoid a race updating the registers we need to stop the counters
- * while we're messing with
- * them ...
- */
-
/* Check whether the irq belongs to me */
- enabled = read_c0_perfcnt() & LOONGSON2_PERFCNT_INT_EN;
+ enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE;
if (!enabled)
return IRQ_NONE;
enabled = reg.cnt1_enabled | reg.cnt2_enabled;
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
index cd5b76a1c951..3fc5d46687a9 100644
--- a/arch/mips/powertv/asic/prealloc-calliope.c
+++ b/arch/mips/powertv/asic/prealloc-calliope.c
@@ -22,7 +22,9 @@
*/
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
/*
* NON_DVR_CAPABLE CALLIOPE RESOURCES
@@ -32,432 +34,234 @@ struct resource non_dvr_calliope_resources[] __initdata =
/*
* VIDEO / LX1
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x24200000 - 1, /*2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /*8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24202000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x26700000 - 1, /*~36.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~36.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26700000-1,
+ IORESOURCE_MEM)
+
/*
* Sysaudio Driver
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* STAVEM driver/STAPI
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x00000000,
- .end = 0x00600000 - 1, /* 6 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 6MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* DOCSIS Subsystem
*/
- {
- .name = "Docsis",
- .start = 0x22000000,
- .end = 0x22700000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x27500000, 0x27c00000-1, IORESOURCE_MEM)
+
/*
* GHW HAL Driver
*/
- {
- .name = "GraphicsHeap",
- .start = 0x22700000,
- .end = 0x23500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x26700000, 0x26700000+(14*1048576)-1,
+ IORESOURCE_MEM)
+
/*
* multi com buffer area
*/
- {
- .name = "MulticomSHM",
- .start = 0x23700000,
- .end = 0x23720000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
+ IORESOURCE_MEM)
+
/*
* DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit0
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x00000000,
- .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_MEM,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* PMEM
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Smartcard
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE + 0x400 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
/*
* Synopsys GMAC Memory Region
*/
- {
- .name = "GMAC",
- .start = 0x00000000,
- .end = 0x00010000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 64KiB */
+ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- * Add other resources here
+ * TFTPBuffer
*
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
*/
- { },
-};
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-struct resource non_dvr_vz_calliope_resources[] __initdata =
-{
/*
- * VIDEO / LX1
- */
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x24200000 - 1, /*2 Meg */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8k block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24202000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x22202000,
- .end = 0x22C20B85 - 1, /* 10.12 Meg */
- .flags = IORESOURCE_MEM,
- },
- /*
- * Sysaudio Driver
- */
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- /*
- * STAVEM driver/STAPI
- */
- {
- .name = "AVMEMPartition0",
- .start = 0x20300000,
- .end = 0x20620000-1, /*3.125 MB total */
- .flags = IORESOURCE_MEM,
- },
- /*
- * GHW HAL Driver
- */
- {
- .name = "GraphicsHeap",
- .start = 0x20100000,
- .end = 0x20300000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * multi com buffer area
- */
- {
- .name = "MulticomSHM",
- .start = 0x23900000,
- .end = 0x23920000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * DMA Ring buffer
- */
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * Display bins buffer for unit0
- */
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF,
- .flags = IORESOURCE_MEM,
- },
- /*
- * PMEM
- */
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * Smartcard
- */
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * NAND Flash
+ * Add other resources here
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE+0x400 - 1,
- .flags = IORESOURCE_IO,
- },
+
/*
- * Synopsys GMAC Memory Region
+ * End of Resource marker
*/
{
- .name = "GMAC",
- .start = 0x00000000,
- .end = 0x00010000 - 1,
- .flags = IORESOURCE_MEM,
+ .flags = 0,
},
- /*
- * Add other resources here
- */
- { },
};
+
struct resource non_dvr_vze_calliope_resources[] __initdata =
{
/*
* VIDEO / LX1
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x22000000,
- .end = 0x22200000 - 1, /*2 Meg */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8k block ST231a monitor */
- .start = 0x22200000,
- .end = 0x22202000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x22202000,
- .end = 0x22C20B85 - 1, /* 10.12 Meg */
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x22000000, 0x22200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x22200000, 0x22202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (10.12MiB) */
+ PREALLOC_NORMAL("MediaMemory1", 0x22202000, 0x22C20B85-1,
+ IORESOURCE_MEM)
+
/*
* Sysaudio Driver
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* STAVEM driver/STAPI
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x20396000,
- .end = 0x206B6000 - 1, /* 3.125 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 3.125MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x20396000, 0x206B6000-1,
+ IORESOURCE_MEM)
+
/*
* GHW HAL Driver
*/
- {
- .name = "GraphicsHeap",
- .start = 0x20100000,
- .end = 0x20396000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (2.59MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x20100000, 0x20396000-1,
+ IORESOURCE_MEM)
+
/*
* multi com buffer area
*/
- {
- .name = "MulticomSHM",
- .start = 0x206B6000,
- .end = 0x206D6000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x206B6000, 0x206D6000-1,
+ IORESOURCE_MEM)
+
/*
- * DMA Ring buffer
+ * DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit0
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* PMEM
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Smartcard
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE+0x400 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
/*
* Synopsys GMAC Memory Region
*/
- {
- .name = "GMAC",
- .start = 0x00000000,
- .end = 0x00010000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 64KiB */
+ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
struct resource non_dvr_vzf_calliope_resources[] __initdata =
@@ -465,156 +269,117 @@ struct resource non_dvr_vzf_calliope_resources[] __initdata =
/*
* VIDEO / LX1
*/
- {
- .name = "ST231aImage", /*Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x24200000 - 1, /*2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /*8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24202000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- /* ~19.4 (21.5MiB - (2MiB + 8KiB)) */
- .end = 0x25580000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~19.4 (21.5MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x25580000-1,
+ IORESOURCE_MEM)
+
/*
* Sysaudio Driver
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* STAVEM driver/STAPI
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x00000000,
- .end = 0x00480000 - 1, /* 4.5 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4.5MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00480000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* GHW HAL Driver
*/
- {
- .name = "GraphicsHeap",
- .start = 0x22700000,
- .end = 0x23500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x25600000, 0x25600000+(14*1048576)-1,
+ IORESOURCE_MEM)
+
/*
* multi com buffer area
*/
- {
- .name = "MulticomSHM",
- .start = 0x23700000,
- .end = 0x23720000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
+ IORESOURCE_MEM)
+
/*
* DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit0
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x00000000,
- .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_MEM,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* PMEM
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Smartcard
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE + 0x400 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
/*
* Synopsys GMAC Memory Region
*/
- {
- .name = "GMAC",
- .start = 0x00000000,
- .end = 0x00010000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 64KiB */
+ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
index 45a5c3ea718c..c532b50521ec 100644
--- a/arch/mips/powertv/asic/prealloc-cronus.c
+++ b/arch/mips/powertv/asic/prealloc-cronus.c
@@ -22,7 +22,9 @@
*/
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
/*
* DVR_CAPABLE CRONUS RESOURCES
@@ -30,305 +32,161 @@
struct resource dvr_cronus_resources[] __initdata =
{
/*
- *
* VIDEO1 / LX1
- *
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* VIDEO2 / LX2
- *
*/
- {
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 2 image (2MiB) */
+ PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* STAVEM driver/STAPI
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
* by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
- *
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 12MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
/*
- *
* GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
*/
- {
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+ IORESOURCE_MEM)
+
/*
- *
* multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x002EA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
+ * Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ IORESOURCE_MEM)
+
/*
- *
* ITFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "ITFS",
- .start = 0x64180000,
- /* 815,104 bytes each for 2 ITFS partitions. */
- .end = 0x6430DFFF,
- .flags = IORESOURCE_IO,
- },
+ /* 815,104 bytes each for 2 ITFS partitions. */
+ PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1, IORESOURCE_MEM)
+
/*
- *
* AVFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
- /* (945K * 8) = (128K *3) 5 playbacks / 3 server */
- .end = 0x64AD0000 - 1,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "AvfsFileSys",
- .start = 0x64AD0000,
- .end = 0x64AD1000 - 1, /* 4K */
- .flags = IORESOURCE_IO,
- },
+ /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
+ IORESOURCE_MEM)
+
+ /* 4KiB */
+ PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
+ IORESOURCE_MEM)
+
/*
- *
* PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Smartcard
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
*/
- {
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ IORESOURCE_MEM)
+
/*
- *
* KAVNET
- * NP Reset Vector - must be of the form xxCxxxxx
- * NP Image - must be video bank 1
- * NP IPC - must be video bank 2
*/
- {
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+ PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+ IORESOURCE_MEM)
+ /* NP Image - must be video bank 1 (320KiB) */
+ PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+ /* NP IPC - must be video bank 2 (512KiB) */
+ PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
+ /*
+ * TFTPBuffer
+ *
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
+ */
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
/*
@@ -337,272 +195,146 @@ struct resource dvr_cronus_resources[] __initdata =
struct resource non_dvr_cronus_resources[] __initdata =
{
/*
- *
* VIDEO1 / LX1
- *
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* VIDEO2 / LX2
- *
*/
- {
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 2 image (2MiB) */
+ PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* STAVEM driver/STAPI
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
* by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
- *
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 12MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
/*
- *
* GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
*/
- {
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+ IORESOURCE_MEM)
+
/*
- *
* multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+ IORESOURCE_MEM)
+
/*
- *
- * DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
+ * DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
+ * Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ IORESOURCE_MEM)
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
- .end = 0x645D2C00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_IO,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, IORESOURCE_MEM)
+
/*
- *
* PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Smartcard
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
*/
- {
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
+
/*
- *
* KAVNET
- * NP Reset Vector - must be of the form xxCxxxxx
- * NP Image - must be video bank 1
- * NP IPC - must be video bank 2
+ */
+ /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+ PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+ IORESOURCE_MEM)
+ /* NP Image - must be video bank 1 (320KiB) */
+ PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+ /* NP IPC - must be video bank 2 (512KiB) */
+ PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
+ /*
+ * NAND Flash
+ */
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
+ /*
+ * Add other resources here
+ */
+
+ /*
+ * End of Resource marker
*/
{
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
+ .flags = 0,
},
- { },
};
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
index 23a905613c04..b5537e49e7f5 100644
--- a/arch/mips/powertv/asic/prealloc-cronuslite.c
+++ b/arch/mips/powertv/asic/prealloc-cronuslite.c
@@ -22,7 +22,9 @@
*/
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
/*
* NON_DVR_CAPABLE CRONUSLITE RESOURCES
@@ -30,261 +32,143 @@
struct resource non_dvr_cronuslite_resources[] __initdata =
{
/*
- *
* VIDEO2 / LX2
- *
*/
- {
- .name = "ST231aImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory1",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x60000000, 0x60200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x60200000, 0x60202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x60202000, 0x62000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* STAVEM driver/STAPI
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
* by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
- *
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x63B80000 - 1, /* 6 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 6MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
/*
- *
* GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
*/
- {
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+ IORESOURCE_MEM)
+
/*
- *
* multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+ IORESOURCE_MEM)
+
/*
- *
- * DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
+ * DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
+ * Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x63B83000,
- .end = 0x63B84000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ IORESOURCE_MEM)
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x63B84000,
- .end = 0x63E48C00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_IO,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Smartcard
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
*/
- {
- .name = "SmartCardInfo",
- .start = 0x63B80000,
- .end = 0x63B82800 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
+
/*
- *
* KAVNET
- * NP Reset Vector - must be of the form xxCxxxxx
- * NP Image - must be video bank 1
- * NP IPC - must be video bank 2
*/
- {
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+ PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+ IORESOURCE_MEM)
+ /* NP Image - must be video bank 1 (320KiB) */
+ PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+ /* NP IPC - must be video bank 2 (512KiB) */
+ PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE + 0x400 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
+ /*
+ * TFTPBuffer
+ *
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
+ */
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
index 018d4514dbe3..96480a2395c0 100644
--- a/arch/mips/powertv/asic/prealloc-zeus.c
+++ b/arch/mips/powertv/asic/prealloc-zeus.c
@@ -22,7 +22,9 @@
*/
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
/*
* DVR_CAPABLE RESOURCES
@@ -30,280 +32,151 @@
struct resource dvr_zeus_resources[] __initdata =
{
/*
- *
* VIDEO1 / LX1
- *
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x20000000,
- .end = 0x201FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x20200000,
- .end = 0x20201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory1",
- .start = 0x20202000,
- .end = 0x21FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* VIDEO2 / LX2
- *
*/
- {
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x30000000,
- .end = 0x301FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x30200000,
- .end = 0x30201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory2",
- .start = 0x30202000,
- .end = 0x31FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 2 image (2MiB) */
+ PREALLOC_NORMAL("ST231bImage", 0x30000000, 0x30200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231bMonitor", 0x30200000, 0x30202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory2", 0x30202000, 0x32000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* STAVEM driver/STAPI
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
* by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
- *
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x00000000,
- .end = 0x00c00000 - 1, /* 12 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 12MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "Docsis",
- .start = 0x40100000,
- .end = 0x407fffff,
- .flags = IORESOURCE_MEM,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
+
/*
- *
* GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
*/
- {
- .name = "GraphicsHeap",
- .start = 0x46900000,
- .end = 0x47700000 - 1, /* 14 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
+ IORESOURCE_MEM)
+
/*
- *
* multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "MulticomSHM",
- .start = 0x47900000,
- .end = 0x47920000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 2.5MiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
+ * Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* ITFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "ITFS",
- .start = 0x00000000,
- /* 815,104 bytes each for 2 ITFS partitions. */
- .end = 0x0018DFFF,
- .flags = IORESOURCE_MEM,
- },
+ /* 815,104 bytes each for 2 ITFS partitions. */
+ PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* AVFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x00000000,
- /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
- .end = 0x007c2000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "AvfsFileSys",
- .start = 0x00000000,
- .end = 0x00001000 - 1, /* 4K */
- .flags = IORESOURCE_MEM,
- },
+ /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* 4KiB */
+ PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Smartcard
+ */
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
+ /*
+ * TFTPBuffer
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
/*
@@ -314,146 +187,118 @@ struct resource non_dvr_zeus_resources[] __initdata =
/*
* VIDEO1 / LX1
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x20000000,
- .end = 0x201FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x20200000,
- .end = 0x20201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory1",
- .start = 0x20202000,
- .end = 0x21FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
+ IORESOURCE_MEM)
+
/*
* Sysaudio Driver
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* STAVEM driver/STAPI
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x00000000,
- .end = 0x00600000 - 1, /* 6 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 6MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* DOCSIS Subsystem
*/
- {
- .name = "Docsis",
- .start = 0x40100000,
- .end = 0x407fffff,
- .flags = IORESOURCE_MEM,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
+
/*
* GHW HAL Driver
*/
- {
- .name = "GraphicsHeap",
- .start = 0x46900000,
- .end = 0x47700000 - 1, /* 14 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
+ IORESOURCE_MEM)
+
/*
* multi com buffer area
*/
- {
- .name = "MulticomSHM",
- .start = 0x47900000,
- .end = 0x47920000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
+ IORESOURCE_MEM)
+
/*
* DMA Ring buffer
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 2.5MiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit0
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x00000000,
- .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_MEM,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* PMEM
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Smartcard
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE + 0x400 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
+ /*
+ * TFTPBuffer
+ *
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
+ */
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
diff --git a/arch/mips/powertv/asic/prealloc.h b/arch/mips/powertv/asic/prealloc.h
new file mode 100644
index 000000000000..8e682df17856
--- /dev/null
+++ b/arch/mips/powertv/asic/prealloc.h
@@ -0,0 +1,70 @@
+/*
+ * Definitions for memory preallocations
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
+#define _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
+
+#define KIBIBYTE(n) ((n) * 1024) /* Number of kibibytes */
+#define MEBIBYTE(n) ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
+
+/* "struct resource" array element definition */
+#define PREALLOC(NAME, START, END, FLAGS) { \
+ .name = (NAME), \
+ .start = (START), \
+ .end = (END), \
+ .flags = (FLAGS) \
+ },
+
+/* Individual resources in the preallocated resource arrays are defined using
+ * macros. These macros are conditionally defined based on their
+ * corresponding kernel configuration flag:
+ * - CONFIG_PREALLOC_NORMAL: preallocate resources for a normal settop box
+ * - CONFIG_PREALLOC_TFTP: preallocate the TFTP download resource
+ * - CONFIG_PREALLOC_DOCSIS: preallocate the DOCSIS resource
+ * - CONFIG_PREALLOC_PMEM: reserve space for persistent memory
+ */
+#ifdef CONFIG_PREALLOC_NORMAL
+#define PREALLOC_NORMAL(name, start, end, flags) \
+ PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_NORMAL(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_TFTP
+#define PREALLOC_TFTP(name, start, end, flags) \
+ PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_TFTP(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_DOCSIS
+#define PREALLOC_DOCSIS(name, start, end, flags) \
+ PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_DOCSIS(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_PMEM
+#define PREALLOC_PMEM(name, start, end, flags) \
+ PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_PMEM(name, start, end, flags)
+#endif
+#endif
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 7a8b0a8b643a..044bbe462c2c 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -253,7 +253,7 @@ void __init init_bcm1480_irqs(void)
* On the second cpu, everything is set to IP5, which is
* ignored, EXCEPT the mailbox interrupt. That one is
* set to IP[2] so it is handled. This is needed so we
- * can do cross-cpu function calls, as requred by SMP
+ * can do cross-cpu function calls, as required by SMP
*/
#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 62371f772553..12ac04a658ee 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -236,7 +236,7 @@ void __init init_sb1250_irqs(void)
* On the second cpu, everything is set to IP5, which is
* ignored, EXCEPT the mailbox interrupt. That one is
* set to IP[2] so it is handled. This is needed so we
- * can do cross-cpu function calls, as requred by SMP
+ * can do cross-cpu function calls, as required by SMP
*/
#define IMR_IP2_VAL K_INT_MAP_I0
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
index 54847fe1e564..097335262fb3 100644
--- a/arch/mips/sibyte/swarm/platform.c
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -83,3 +83,57 @@ static int __init swarm_pata_init(void)
device_initcall(swarm_pata_init);
#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */
+
+#define sb1250_dev_struct(num) \
+ static struct resource sb1250_res##num = { \
+ .name = "SB1250 MAC " __stringify(num), \
+ .flags = IORESOURCE_MEM, \
+ .start = A_MAC_CHANNEL_BASE(num), \
+ .end = A_MAC_CHANNEL_BASE(num + 1) -1, \
+ };\
+ static struct platform_device sb1250_dev##num = { \
+ .name = "sb1250-mac", \
+ .id = num, \
+ .resource = &sb1250_res##num, \
+ .num_resources = 1, \
+ }
+
+sb1250_dev_struct(0);
+sb1250_dev_struct(1);
+sb1250_dev_struct(2);
+sb1250_dev_struct(3);
+
+static struct platform_device *sb1250_devs[] __initdata = {
+ &sb1250_dev0,
+ &sb1250_dev1,
+ &sb1250_dev2,
+ &sb1250_dev3,
+};
+
+static int __init sb1250_device_init(void)
+{
+ int ret;
+
+ /* Set the number of available units based on the SOC type. */
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1250:
+ case K_SYS_SOC_TYPE_BCM1250_ALT:
+ ret = platform_add_devices(sb1250_devs, 3);
+ break;
+ case K_SYS_SOC_TYPE_BCM1120:
+ case K_SYS_SOC_TYPE_BCM1125:
+ case K_SYS_SOC_TYPE_BCM1125H:
+ case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
+ ret = platform_add_devices(sb1250_devs, 2);
+ break;
+ case K_SYS_SOC_TYPE_BCM1x55:
+ case K_SYS_SOC_TYPE_BCM1x80:
+ ret = platform_add_devices(sb1250_devs, 4);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+device_initcall(sb1250_device_init);
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 89faacad5d17..1c4565a9102b 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -37,6 +37,9 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
config GENERIC_CALIBRATE_DELAY
def_bool y
+config GENERIC_CMOS_UPDATE
+ def_bool y
+
config GENERIC_FIND_NEXT_BIT
def_bool y
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index 7978470b5749..815a933aafa8 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -26,17 +26,15 @@ static long last_rtc_update;
/* time for RTC to update itself in ioclks */
static unsigned long mn10300_rtc_update_period;
-/*
- * read the current RTC time
- */
-unsigned long __init get_initial_rtc_time(void)
+void read_persistent_clock(struct timespec *ts)
{
struct rtc_time tm;
get_rtc_time(&tm);
- return mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
+ ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
+ ts->tv_nsec = 0;
}
/*
@@ -110,24 +108,9 @@ static int set_rtc_mmss(unsigned long nowtime)
return retval;
}
-void check_rtc_time(void)
+int update_persistent_clock(struct timespec now)
{
- /* the RTC clock just finished ticking over again this second
- * - if we have an externally synchronized Linux clock, then update
- * RTC clock accordingly every ~11 minutes. set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_nsec / 1000 >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- xtime.tv_nsec / 1000 <= 500000 + ((unsigned) TICK_SIZE) / 2
- ) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- /* do it again in 60s */
- last_rtc_update = xtime.tv_sec - 600;
- }
+ return set_rtc_mms(now.tv_sec);
}
/*
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index 395caf01b909..8f7f6d22783d 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -111,7 +111,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
/* advance the kernel's time tracking system */
profile_tick(CPU_PROFILING);
do_timer(1);
- check_rtc_time();
}
write_sequnlock(&xtime_lock);
@@ -139,9 +138,6 @@ void __init time_init(void)
" (calibrated against RTC)\n",
MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
- xtime.tv_sec = get_initial_rtc_time();
- xtime.tv_nsec = 0;
-
mn10300_last_tsc = TMTSCBC;
/* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 32c2cca74345..45effe6978fa 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -28,7 +28,7 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h
index 4653c77bf9d1..2ab4af58ecb9 100644
--- a/arch/parisc/include/asm/system.h
+++ b/arch/parisc/include/asm/system.h
@@ -174,7 +174,7 @@ static inline void set_eiem(unsigned long val)
})
#ifdef CONFIG_SMP
-# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
+# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
#endif
#define arch_align_stack(x) (x)
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 0e3d9f9b9e33..4dbdf0ed6fa0 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -345,7 +345,7 @@ smp_slave_stext:
ENDPROC(stext)
#ifndef CONFIG_64BIT
- .section .data.read_mostly
+ .section .data..read_mostly
.align 4
.export $global$,data
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
index d020eae6525c..4a91e433416f 100644
--- a/arch/parisc/kernel/init_task.c
+++ b/arch/parisc/kernel/init_task.c
@@ -53,11 +53,11 @@ union thread_union init_thread_union __init_task_data
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
-pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE)));
+pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
#endif
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE)));
-pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE)));
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
+pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
/*
* Initial task structure.
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 9dab4a4e09f7..d64a6bbec2aa 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -94,8 +94,8 @@ SECTIONS
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
- .data.lock_aligned : {
- *(.data.lock_aligned)
+ .data..lock_aligned : {
+ *(.data..lock_aligned)
}
/* End of data section */
@@ -105,10 +105,10 @@ SECTIONS
__bss_start = .;
/* page table entries need to be PAGE_SIZE aligned */
. = ALIGN(PAGE_SIZE);
- .data.vmpages : {
- *(.data.vm0.pmd)
- *(.data.vm0.pgd)
- *(.data.vm0.pte)
+ .data..vmpages : {
+ *(.data..vm0.pmd)
+ *(.data..vm0.pgd)
+ *(.data..vm0.pte)
}
.bss : {
*(.bss)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2e19500921f9..c4c4549c22bb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,6 +140,7 @@ config PPC
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
select HAVE_PERF_EVENTS
+ select HAVE_REGS_AND_STACK_ACCESS_API
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 5cdd7ed9a12e..53696da4518f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -44,6 +44,18 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat.
+config DEBUG_PER_CPU_MAPS
+ bool "Debug access to per_cpu maps"
+ depends on DEBUG_KERNEL
+ depends on SMP
+ default n
+ ---help---
+ Say Y to verify that the per_cpu map being accessed has
+ been setup. Adds a fair amount of code to kernel memory
+ and decreases performance.
+
+ Say N if unsure.
+
config HCALL_STATS
bool "Hypervisor call instrumentation"
depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 1a54a3b3a3fa..42dcd3f4ad7b 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -112,6 +112,11 @@ KBUILD_CFLAGS += $(call cc-option,-mspe=no)
# kernel considerably.
KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
+# FIXME: the module load should be taught about the additional relocs
+# generated by this.
+# revert to pre-gcc-4.4 behaviour of .eh_frame
+KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
+
# Never use string load/store instructions as they are
# often slow when they are implemented at all
KBUILD_CFLAGS += -mno-string
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index bb2465bcb327..ad0df7d0a643 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -44,6 +44,7 @@ $(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
+$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
@@ -77,7 +78,7 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c
cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
- gamecube-head.S gamecube.c wii-head.S wii.c
+ gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c
src-boot := $(src-wlib) $(src-plat) empty.c
src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -169,7 +170,7 @@ quiet_cmd_wrap = WRAP $@
$(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) vmlinux
image-$(CONFIG_PPC_PSERIES) += zImage.pseries
-image-$(CONFIG_PPC_MAPLE) += zImage.pseries
+image-$(CONFIG_PPC_MAPLE) += zImage.maple
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
image-$(CONFIG_PPC_PS3) += dtbImage.ps3
image-$(CONFIG_PPC_CELLEB) += zImage.pseries
@@ -206,6 +207,8 @@ image-$(CONFIG_TAISHAN) += cuImage.taishan
image-$(CONFIG_KATMAI) += cuImage.katmai
image-$(CONFIG_WARP) += cuImage.warp
image-$(CONFIG_YOSEMITE) += cuImage.yosemite
+image-$(CONFIG_ISS4xx) += treeImage.iss4xx \
+ treeImage.iss4xx-mpic
# Board ports in arch/powerpc/platform/8xx/Kconfig
image-$(CONFIG_MPC86XADS) += cuImage.mpc866ads
@@ -351,7 +354,7 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
zImage.iseries zImage.miboot zImage.pmac zImage.pseries \
- simpleImage.* otheros.bld *.dtb
+ zImage.maple simpleImage.* otheros.bld *.dtb
# clean up files cached by wrapper
clean-kernel := vmlinux.strip vmlinux.bin
diff --git a/arch/powerpc/boot/dts/iss4xx-mpic.dts b/arch/powerpc/boot/dts/iss4xx-mpic.dts
new file mode 100644
index 000000000000..23e9d9b7e400
--- /dev/null
+++ b/arch/powerpc/boot/dts/iss4xx-mpic.dts
@@ -0,0 +1,155 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ * Copyright (c) 2006, 2007 IBM Corp.
+ * Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x01f00000 0x00100000;
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ model = "ibm,iss-4xx";
+ compatible = "ibm,iss-4xx";
+ dcr-parent = <&{/cpus/cpu@0}>;
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <0>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "ok";
+ };
+ cpu@1 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <1>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "disabled";
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x01f00100>;
+ };
+ cpu@2 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <2>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "disabled";
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x01f00200>;
+ };
+ cpu@3 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <3>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "disabled";
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x01f00300>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
+
+ };
+
+ MPIC: interrupt-controller {
+ compatible = "chrp,open-pic";
+ interrupt-controller;
+ dcr-reg = <0xffc00000 0x00030000>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ };
+
+ plb {
+ compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+ clock-frequency = <0>; // Filled in by zImage
+
+ POB0: opb {
+ compatible = "ibm,opb-4xx", "ibm,opb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* Wish there was a nicer way of specifying a full 32-bit
+ range */
+ ranges = <0x00000000 0x00000001 0x00000000 0x80000000
+ 0x80000000 0x00000001 0x80000000 0x80000000>;
+ clock-frequency = <0>; // Filled in by zImage
+ UART0: serial@40000200 {
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0x40000200 0x00000008>;
+ virtual-reg = <0xe0000200>;
+ clock-frequency = <11059200>;
+ current-speed = <115200>;
+ interrupt-parent = <&MPIC>;
+ interrupts = <0x0 0x2>;
+ };
+ };
+ };
+
+ nvrtc {
+ compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
+ reg = <0 0xEF703000 0x2000>;
+ };
+ iss-block {
+ compatible = "ibm,iss-sim-block-device";
+ reg = <0 0xEF701000 0x1000>;
+ };
+
+ chosen {
+ linux,stdout-path = "/plb/opb/serial@40000200";
+ };
+};
diff --git a/arch/powerpc/boot/dts/iss4xx.dts b/arch/powerpc/boot/dts/iss4xx.dts
new file mode 100644
index 000000000000..4ff6555c866d
--- /dev/null
+++ b/arch/powerpc/boot/dts/iss4xx.dts
@@ -0,0 +1,116 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ * Copyright (c) 2006, 2007 IBM Corp.
+ * Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ model = "ibm,iss-4xx";
+ compatible = "ibm,iss-4xx";
+ dcr-parent = <&{/cpus/cpu@0}>;
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <0x00000000>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>; // may need fixup in sim
+ d-cache-line-size = <32>; // may need fixup in sim
+ i-cache-size = <32768>; /* may need fixup in sim */
+ d-cache-size = <32768>; /* may need fixup in sim */
+ dcr-controller;
+ dcr-access-method = "native";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
+ };
+
+ UIC0: interrupt-controller0 {
+ compatible = "ibm,uic-4xx", "ibm,uic";
+ interrupt-controller;
+ cell-index = <0>;
+ dcr-reg = <0x0c0 0x009>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ };
+
+ UIC1: interrupt-controller1 {
+ compatible = "ibm,uic-4xx", "ibm,uic";
+ interrupt-controller;
+ cell-index = <1>;
+ dcr-reg = <0x0d0 0x009>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
+ interrupt-parent = <&UIC0>;
+ };
+
+ plb {
+ compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+ clock-frequency = <0>; // Filled in by zImage
+
+ POB0: opb {
+ compatible = "ibm,opb-4xx", "ibm,opb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* Wish there was a nicer way of specifying a full 32-bit
+ range */
+ ranges = <0x00000000 0x00000001 0x00000000 0x80000000
+ 0x80000000 0x00000001 0x80000000 0x80000000>;
+ clock-frequency = <0>; // Filled in by zImage
+ UART0: serial@40000200 {
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0x40000200 0x00000008>;
+ virtual-reg = <0xe0000200>;
+ clock-frequency = <11059200>;
+ current-speed = <115200>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x0 0x4>;
+ };
+ };
+ };
+
+ nvrtc {
+ compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
+ reg = <0 0xEF703000 0x2000>;
+ };
+ iss-block {
+ compatible = "ibm,iss-sim-block-device";
+ reg = <0 0xEF701000 0x1000>;
+ };
+
+ chosen {
+ linux,stdout-path = "/plb/opb/serial@40000200";
+ };
+};
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
index 8cf2c0c88c05..7c3be5e45748 100644
--- a/arch/powerpc/boot/dts/katmai.dts
+++ b/arch/powerpc/boot/dts/katmai.dts
@@ -44,6 +44,7 @@
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
+ reset-type = <2>; /* Use chip-reset */
};
};
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts
index d2af32e2bf7a..81636c01d906 100644
--- a/arch/powerpc/boot/dts/redwood.dts
+++ b/arch/powerpc/boot/dts/redwood.dts
@@ -234,10 +234,132 @@
has-inverted-stacr-oc;
has-new-stacr-staopc;
};
+ };
+ PCIE0: pciex@d00000000 {
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "ibm,plb-pciex-460sx", "ibm,plb-pciex";
+ primary;
+ port = <0x0>; /* port number */
+ reg = <0x0000000d 0x00000000 0x20000000 /* Config space access */
+ 0x0000000c 0x10000000 0x00001000>; /* Registers */
+ dcr-reg = <0x100 0x020>;
+ sdr-base = <0x300>;
+
+ /* Outbound ranges, one memory and one IO,
+ * later cannot be changed
+ */
+ ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
+ 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
+
+ /* Inbound 2GB range starting at 0 */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
+ /* This drives busses 10 to 0x1f */
+ bus-range = <0x10 0x1f>;
+
+ /* Legacy interrupts (note the weird polarity, the bridge seems
+ * to invert PCIe legacy interrupts).
+ * We are de-swizzling here because the numbers are actually for
+ * port of the root complex virtual P2P bridge. But I want
+ * to avoid putting a node for it in the tree, so the numbers
+ * below are basically de-swizzled numbers.
+ * The real slot is on idsel 0, so the swizzling is 1:1
+ */
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x0 0x0 0x0 0x1 &UIC3 0x0 0x4 /* swizzled int A */
+ 0x0 0x0 0x0 0x2 &UIC3 0x1 0x4 /* swizzled int B */
+ 0x0 0x0 0x0 0x3 &UIC3 0x2 0x4 /* swizzled int C */
+ 0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>;
+ };
+
+ PCIE1: pciex@d20000000 {
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "ibm,plb-pciex-460sx", "ibm,plb-pciex";
+ primary;
+ port = <0x1>; /* port number */
+ reg = <0x0000000d 0x20000000 0x20000000 /* Config space access */
+ 0x0000000c 0x10001000 0x00001000>; /* Registers */
+ dcr-reg = <0x120 0x020>;
+ sdr-base = <0x340>;
+
+ /* Outbound ranges, one memory and one IO,
+ * later cannot be changed
+ */
+ ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000
+ 0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>;
+
+ /* Inbound 2GB range starting at 0 */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
+
+ /* This drives busses 10 to 0x1f */
+ bus-range = <0x20 0x2f>;
+
+ /* Legacy interrupts (note the weird polarity, the bridge seems
+ * to invert PCIe legacy interrupts).
+ * We are de-swizzling here because the numbers are actually for
+ * port of the root complex virtual P2P bridge. But I want
+ * to avoid putting a node for it in the tree, so the numbers
+ * below are basically de-swizzled numbers.
+ * The real slot is on idsel 0, so the swizzling is 1:1
+ */
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x0 0x0 0x0 0x1 &UIC3 0x4 0x4 /* swizzled int A */
+ 0x0 0x0 0x0 0x2 &UIC3 0x5 0x4 /* swizzled int B */
+ 0x0 0x0 0x0 0x3 &UIC3 0x6 0x4 /* swizzled int C */
+ 0x0 0x0 0x0 0x4 &UIC3 0x7 0x4 /* swizzled int D */>;
+ };
+
+ PCIE2: pciex@d40000000 {
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "ibm,plb-pciex-460sx", "ibm,plb-pciex";
+ primary;
+ port = <0x2>; /* port number */
+ reg = <0x0000000d 0x40000000 0x20000000 /* Config space access */
+ 0x0000000c 0x10002000 0x00001000>; /* Registers */
+ dcr-reg = <0x140 0x020>;
+ sdr-base = <0x370>;
+
+ /* Outbound ranges, one memory and one IO,
+ * later cannot be changed
+ */
+ ranges = <0x02000000 0x00000000 0x80000000 0x0000000f 0x00000000 0x00000000 0x80000000
+ 0x01000000 0x00000000 0x00000000 0x0000000f 0x80020000 0x00000000 0x00010000>;
+
+ /* Inbound 2GB range starting at 0 */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
+
+ /* This drives busses 10 to 0x1f */
+ bus-range = <0x30 0x3f>;
+
+ /* Legacy interrupts (note the weird polarity, the bridge seems
+ * to invert PCIe legacy interrupts).
+ * We are de-swizzling here because the numbers are actually for
+ * port of the root complex virtual P2P bridge. But I want
+ * to avoid putting a node for it in the tree, so the numbers
+ * below are basically de-swizzled numbers.
+ * The real slot is on idsel 0, so the swizzling is 1:1
+ */
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x0 0x0 0x0 0x1 &UIC3 0x8 0x4 /* swizzled int A */
+ 0x0 0x0 0x0 0x2 &UIC3 0x9 0x4 /* swizzled int B */
+ 0x0 0x0 0x0 0x3 &UIC3 0xa 0x4 /* swizzled int C */
+ 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
};
};
+
chosen {
linux,stdout-path = "/plb/opb/serial@ef600200";
};
diff --git a/arch/powerpc/boot/treeboot-iss4xx.c b/arch/powerpc/boot/treeboot-iss4xx.c
new file mode 100644
index 000000000000..fcc44952874e
--- /dev/null
+++ b/arch/powerpc/boot/treeboot-iss4xx.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2010 Ben. Herrenschmidt, IBM Corporation.
+ *
+ * Based on earlier code:
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2002-2005 MontaVista Software Inc.
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003, 2004 Zultys Technologies
+ *
+ * Copyright 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+#include "reg.h"
+#include "io.h"
+#include "dcr.h"
+#include "4xx.h"
+#include "44x.h"
+#include "libfdt.h"
+
+BSS_STACK(4096);
+
+static void iss_4xx_fixups(void)
+{
+ ibm4xx_sdram_fixup_memsize();
+}
+
+#define SPRN_PIR 0x11E /* Processor Indentification Register */
+void platform_init(void)
+{
+ unsigned long end_of_ram = 0x08000000;
+ unsigned long avail_ram = end_of_ram - (unsigned long)_end;
+ u32 pir_reg;
+
+ simple_alloc_init(_end, avail_ram, 128, 64);
+ platform_ops.fixups = iss_4xx_fixups;
+ platform_ops.exit = ibm44x_dbcr_reset;
+ pir_reg = mfspr(SPRN_PIR);
+ fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
+ fdt_init(_dtb_start);
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index f4594ed09a20..cb97e7511d7e 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -149,6 +149,10 @@ pseries)
platformo=$object/of.o
link_address='0x4000000'
;;
+maple)
+ platformo=$object/of.o
+ link_address='0x400000'
+ ;;
pmac|chrp)
platformo=$object/of.o
;;
@@ -237,6 +241,9 @@ gamecube|wii)
link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o"
;;
+treeboot-iss4xx-mpic)
+ platformo="$object/treeboot-iss4xx.o"
+ ;;
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
@@ -321,7 +328,7 @@ fi
# post-processing needed for some platforms
case "$platform" in
-pseries|chrp)
+pseries|chrp|maple)
$objbin/addnote "$ofile"
;;
coff)
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
new file mode 100644
index 000000000000..8683cbc6c3e1
--- /dev/null
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -0,0 +1,1026 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.33
+# Thu Mar 4 11:50:12 2010
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_PPC_BOOK3S_32 is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+CONFIG_44x=y
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+CONFIG_4xx=y
+CONFIG_BOOKE=y
+CONFIG_PTE_64BIT=y
+CONFIG_PHYS_64BIT=y
+CONFIG_PPC_MMU_NOHASH=y
+CONFIG_PPC_MMU_NOHASH_32=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_NOT_COHERENT_CACHE is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+CONFIG_PPC_UDBG_16550=y
+CONFIG_GENERIC_TBSYNC=y
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DTC=y
+# CONFIG_DEFAULT_UIMAGE is not set
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_PPC_DCR_NATIVE=y
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_PPC_DCR=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+CONFIG_PPC_47x=y
+# CONFIG_BAMBOO is not set
+# CONFIG_EBONY is not set
+# CONFIG_SAM440EP is not set
+# CONFIG_SEQUOIA is not set
+# CONFIG_TAISHAN is not set
+# CONFIG_KATMAI is not set
+# CONFIG_RAINIER is not set
+# CONFIG_WARP is not set
+# CONFIG_ARCHES is not set
+# CONFIG_CANYONLANDS is not set
+# CONFIG_GLACIER is not set
+# CONFIG_REDWOOD is not set
+# CONFIG_EIGER is not set
+# CONFIG_YOSEMITE is not set
+CONFIG_ISS4xx=y
+# CONFIG_XILINX_VIRTEX440_GENERIC_BOARD is not set
+# CONFIG_PPC44x_SIMPLE is not set
+# CONFIG_PPC4xx_GPIO is not set
+# CONFIG_IPIC is not set
+CONFIG_MPIC=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_OF_RTC=y
+# CONFIG_SIMPLE_GPIO is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MATH_EMULATION=y
+# CONFIG_IOMMU_HELPER is not set
+# CONFIG_SWIOTLB is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_MAX_ACTIVE_REGIONS=32
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_STDBINUTILS=y
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_64K_PAGES is not set
+# CONFIG_PPC_256K_PAGES is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="root=/dev/issblk0"
+CONFIG_EXTRA_TARGETS=""
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_4xx_SOC=y
+CONFIG_PPC_PCI_CHOICE=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_OF_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_PHYSMAP_OF=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+# CONFIG_NETDEVICES is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+# CONFIG_SERIAL_8250_RSA is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL is not set
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_HVC_UDBG is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_PPC_DISABLE_WERROR is not set
+CONFIG_PPC_WERROR=y
+CONFIG_PRINT_STACK_DEPTH=64
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_PPC_EMULATED_STATS is not set
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+# CONFIG_MSI_BITMAP_SELFTEST is not set
+# CONFIG_XMON is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_BDI_SWITCH is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
+CONFIG_PPC_EARLY_DEBUG_44x=y
+# CONFIG_PPC_EARLY_DEBUG_40x is not set
+# CONFIG_PPC_EARLY_DEBUG_CPM is not set
+# CONFIG_PPC_EARLY_DEBUG_USBGECKO is not set
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x40000200
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x1
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_PPC_CLOCK is not set
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index a9b91ed3d4b9..2048a6aeea91 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -21,6 +21,7 @@
/* operations for longs and pointers */
#define PPC_LL stringify_in_c(ld)
#define PPC_STL stringify_in_c(std)
+#define PPC_STLU stringify_in_c(stdu)
#define PPC_LCMPI stringify_in_c(cmpdi)
#define PPC_LONG stringify_in_c(.llong)
#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
@@ -44,6 +45,7 @@
/* operations for longs and pointers */
#define PPC_LL stringify_in_c(lwz)
#define PPC_STL stringify_in_c(stw)
+#define PPC_STLU stringify_in_c(stwu)
#define PPC_LCMPI stringify_in_c(cmpwi)
#define PPC_LONG stringify_in_c(.long)
#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 81de6eb3455d..4b509411ad8a 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -12,8 +12,12 @@
#define L1_CACHE_SHIFT 6
#define MAX_COPY_PREFETCH 4
#elif defined(CONFIG_PPC32)
-#define L1_CACHE_SHIFT 5
#define MAX_COPY_PREFETCH 4
+#if defined(CONFIG_PPC_47x)
+#define L1_CACHE_SHIFT 7
+#else
+#define L1_CACHE_SHIFT 5
+#endif
#else /* CONFIG_PPC64 */
#define L1_CACHE_SHIFT 7
#endif
@@ -38,7 +42,7 @@ extern struct ppc64_caches ppc64_caches;
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
#if !defined(__ASSEMBLY__)
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index abb833b0e58f..e3cba4e1eb34 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -72,6 +72,7 @@ extern int machine_check_4xx(struct pt_regs *regs);
extern int machine_check_440A(struct pt_regs *regs);
extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs);
+extern int machine_check_47x(struct pt_regs *regs);
/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
struct cpu_spec {
@@ -365,6 +366,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
CPU_FTR_INDEXED_DCR)
+#define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
@@ -453,6 +455,9 @@ enum {
#ifdef CONFIG_44x
CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
+#ifdef CONFIG_PPC_47x
+ CPU_FTRS_47X |
+#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 |
#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index f0275818b95c..ebe7493e93e3 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -228,6 +228,7 @@
#define H_JOIN 0x298
#define H_VASI_STATE 0x2A4
#define H_ENABLE_CRQ 0x2B0
+#define H_GET_EM_PARMS 0x2B8
#define H_SET_MPP 0x2D0
#define H_GET_MPP 0x2D4
#define MAX_HCALL_OPCODE H_GET_MPP
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index 916369575c97..bca8fdcd2542 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -26,6 +26,7 @@ enum km_type {
KM_SOFTIRQ1,
KM_PPC_SYNC_PAGE,
KM_PPC_SYNC_ICACHE,
+ KM_KDB,
KM_TYPE_NR
};
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 81f3b0b5601e..6c5547d82bbe 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -77,4 +77,14 @@ struct kvm_debug_exit_arch {
struct kvm_guest_debug_arch {
};
+#define KVM_REG_MASK 0x001f
+#define KVM_REG_EXT_MASK 0xffe0
+#define KVM_REG_GPR 0x0000
+#define KVM_REG_FPR 0x0020
+#define KVM_REG_QPR 0x0040
+#define KVM_REG_FQPR 0x0060
+
+#define KVM_INTERRUPT_SET -1U
+#define KVM_INTERRUPT_UNSET -2U
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index aadf2dd6f84e..c5ea4cda34b3 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -88,6 +88,8 @@
#define BOOK3S_HFLAG_DCBZ32 0x1
#define BOOK3S_HFLAG_SLB 0x2
+#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
+#define BOOK3S_HFLAG_NATIVE_PS 0x8
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index db7db0a96967..6f74d93725a0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -22,46 +22,47 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
struct kvmppc_slb {
u64 esid;
u64 vsid;
u64 orige;
u64 origv;
- bool valid;
- bool Ks;
- bool Kp;
- bool nx;
- bool large; /* PTEs are 16MB */
- bool tb; /* 1TB segment */
- bool class;
+ bool valid : 1;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool large : 1; /* PTEs are 16MB */
+ bool tb : 1; /* 1TB segment */
+ bool class : 1;
};
struct kvmppc_sr {
u32 raw;
u32 vsid;
- bool Ks;
- bool Kp;
- bool nx;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool valid : 1;
};
struct kvmppc_bat {
u64 raw;
u32 bepi;
u32 bepi_mask;
- bool vs;
- bool vp;
u32 brpn;
u8 wimg;
u8 pp;
+ bool vs : 1;
+ bool vp : 1;
};
struct kvmppc_sid_map {
u64 guest_vsid;
u64 guest_esid;
u64 host_vsid;
- bool valid;
+ bool valid : 1;
};
#define SID_MAP_BITS 9
@@ -70,7 +71,7 @@ struct kvmppc_sid_map {
struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
- struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
+ struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct kvmppc_slb slb[64];
struct {
@@ -82,9 +83,10 @@ struct kvmppc_vcpu_book3s {
struct kvmppc_bat ibat[8];
struct kvmppc_bat dbat[8];
u64 hid[6];
+ u64 gqr[8];
int slb_nr;
+ u32 dsisr;
u64 sdr1;
- u64 dsisr;
u64 hior;
u64 msr_mask;
u64 vsid_first;
@@ -98,15 +100,15 @@ struct kvmppc_vcpu_book3s {
#define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2
-#define VSID_REAL 0xfffffffffff00000
-#define VSID_REAL_DR 0xffffffffffe00000
-#define VSID_REAL_IR 0xffffffffffd00000
-#define VSID_BAT 0xffffffffffc00000
-#define VSID_PR 0x8000000000000000
+#define VSID_REAL 0x1fffffffffc00000ULL
+#define VSID_BAT 0x1fffffffffb00000ULL
+#define VSID_REAL_DR 0x2000000000000000ULL
+#define VSID_REAL_IR 0x4000000000000000ULL
+#define VSID_PR 0x8000000000000000ULL
-extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
+extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
-extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end);
+extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
@@ -114,11 +116,13 @@ extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data);
-extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data);
-extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr);
+extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
+extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
+extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
+extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
@@ -126,6 +130,8 @@ extern void kvmppc_rmcall(ulong srr0, ulong srr1);
extern void kvmppc_load_up_fpu(void);
extern void kvmppc_load_up_altivec(void);
extern void kvmppc_load_up_vsx(void);
+extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
+extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
@@ -140,7 +146,108 @@ static inline ulong dsisr(void)
}
extern void kvm_return_point(void);
+static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ if ( num < 14 ) {
+ to_svcpu(vcpu)->gpr[num] = val;
+ to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
+ } else
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ if ( num < 14 )
+ return to_svcpu(vcpu)->gpr[num];
+ else
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ to_svcpu(vcpu)->cr = val;
+ to_book3s(vcpu)->shadow_vcpu->cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ to_svcpu(vcpu)->xer = val;
+ to_book3s(vcpu)->shadow_vcpu->xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->xer;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ to_svcpu(vcpu)->ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ to_svcpu(vcpu)->lr = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->lr;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ to_svcpu(vcpu)->pc = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->pc;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu);
+ struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
+
+ return svcpu->last_inst;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->fault_dar;
+}
+
+/* Magic register values loaded into r3 and r4 before the 'sc' assembly
+ * instruction for the OSI hypercalls */
+#define OSI_SC_MAGIC_R3 0x113724FA
+#define OSI_SC_MAGIC_R4 0x77810F9B
#define INS_DCBZ 0x7c0007ec
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#include <asm/kvm_book3s_32.h>
+#else
+#include <asm/kvm_book3s_64.h>
+#endif
+
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
new file mode 100644
index 000000000000..de604db135f5
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -0,0 +1,42 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_32_H__
+#define __ASM_KVM_BOOK3S_32_H__
+
+static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
+{
+ return to_book3s(vcpu)->shadow_vcpu;
+}
+
+#define PTE_SIZE 12
+#define VSID_ALL 0
+#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
+#define SR_KP 0x20000000
+#define PTE_V 0x80000000
+#define PTE_SEC 0x00000040
+#define PTE_M 0x00000010
+#define PTE_R 0x00000100
+#define PTE_C 0x00000080
+
+#define SID_SHIFT 28
+#define ESID_MASK 0xf0000000
+#define VSID_MASK 0x00fffffff0000000ULL
+
+#endif /* __ASM_KVM_BOOK3S_32_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
new file mode 100644
index 000000000000..4cadd612d575
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_64_H__
+#define __ASM_KVM_BOOK3S_64_H__
+
+static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
+{
+ return &get_paca()->shadow_vcpu;
+}
+
+#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 183461b48407..36fdb3aff30b 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -22,7 +22,7 @@
#ifdef __ASSEMBLY__
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
#include <asm/kvm_asm.h>
@@ -55,7 +55,7 @@ kvmppc_resume_\intno:
.macro DO_KVM intno
.endm
-#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
+#endif /* CONFIG_KVM_BOOK3S_HANDLER */
#else /*__ASSEMBLY__ */
@@ -63,12 +63,33 @@ struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14];
u32 cr;
u32 xer;
+
+ u32 fault_dsisr;
+ u32 last_inst;
+ ulong ctr;
+ ulong lr;
+ ulong pc;
+ ulong shadow_srr1;
+ ulong fault_dar;
+
ulong host_r1;
ulong host_r2;
ulong handler;
ulong scratch0;
ulong scratch1;
ulong vmhandler;
+ u8 in_guest;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ u32 sr[16]; /* Guest SRs */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 slb_max; /* highest used guest slb entry */
+ struct {
+ u64 esid;
+ u64 vsid;
+ } slb[64]; /* guest SLB */
+#endif
};
#endif /*__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
new file mode 100644
index 000000000000..9c9ba3d59b1b
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -0,0 +1,96 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOKE_H__
+#define __ASM_KVM_BOOKE_H__
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.xer;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.last_inst;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.lr = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.lr;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.pc = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault_dear;
+}
+
+#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h
new file mode 100644
index 000000000000..94f05de9ad04
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_fpu.h
@@ -0,0 +1,85 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright Novell Inc. 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_FPU_H__
+#define __ASM_KVM_FPU_H__
+
+#include <linux/types.h>
+
+extern void fps_fres(struct thread_struct *t, u32 *dst, u32 *src1);
+extern void fps_frsqrte(struct thread_struct *t, u32 *dst, u32 *src1);
+extern void fps_fsqrts(struct thread_struct *t, u32 *dst, u32 *src1);
+
+extern void fps_fadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fdivs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fmuls(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
+
+extern void fps_fmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fnmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fnmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fsel(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+
+#define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1);
+#define FPD_TWO_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1, u64 *src2);
+#define FPD_THREE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1, u64 *src2, u64 *src3);
+
+extern void fpd_fcmpu(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
+extern void fpd_fcmpo(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
+
+FPD_ONE_IN(fsqrts)
+FPD_ONE_IN(frsqrtes)
+FPD_ONE_IN(fres)
+FPD_ONE_IN(frsp)
+FPD_ONE_IN(fctiw)
+FPD_ONE_IN(fctiwz)
+FPD_ONE_IN(fsqrt)
+FPD_ONE_IN(fre)
+FPD_ONE_IN(frsqrte)
+FPD_ONE_IN(fneg)
+FPD_ONE_IN(fabs)
+FPD_TWO_IN(fadds)
+FPD_TWO_IN(fsubs)
+FPD_TWO_IN(fdivs)
+FPD_TWO_IN(fmuls)
+FPD_TWO_IN(fcpsgn)
+FPD_TWO_IN(fdiv)
+FPD_TWO_IN(fadd)
+FPD_TWO_IN(fmul)
+FPD_TWO_IN(fsub)
+FPD_THREE_IN(fmsubs)
+FPD_THREE_IN(fmadds)
+FPD_THREE_IN(fnmsubs)
+FPD_THREE_IN(fnmadds)
+FPD_THREE_IN(fsel)
+FPD_THREE_IN(fmsub)
+FPD_THREE_IN(fmadd)
+FPD_THREE_IN(fnmsub)
+FPD_THREE_IN(fnmadd)
+
+#endif
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 5e5bae7e152f..0c9ad869decd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -66,7 +66,7 @@ struct kvm_vcpu_stat {
u32 dec_exits;
u32 ext_intr_exits;
u32 halt_wakeup;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
u32 pf_storage;
u32 pf_instruc;
u32 sp_storage;
@@ -124,12 +124,12 @@ struct kvm_arch {
};
struct kvmppc_pte {
- u64 eaddr;
+ ulong eaddr;
u64 vpage;
- u64 raddr;
- bool may_read;
- bool may_write;
- bool may_execute;
+ ulong raddr;
+ bool may_read : 1;
+ bool may_write : 1;
+ bool may_execute : 1;
};
struct kvmppc_mmu {
@@ -145,7 +145,7 @@ struct kvmppc_mmu {
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
- int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid);
+ int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
};
@@ -160,7 +160,7 @@ struct hpte_cache {
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
ulong host_msr;
ulong host_r2;
void *host_retip;
@@ -175,7 +175,7 @@ struct kvm_vcpu_arch {
ulong gpr[32];
u64 fpr[32];
- u32 fpscr;
+ u64 fpscr;
#ifdef CONFIG_ALTIVEC
vector128 vr[32];
@@ -186,19 +186,23 @@ struct kvm_vcpu_arch {
u64 vsr[32];
#endif
+#ifdef CONFIG_PPC_BOOK3S
+ /* For Gekko paired singles */
+ u32 qpr[32];
+#endif
+
+#ifdef CONFIG_BOOKE
ulong pc;
ulong ctr;
ulong lr;
-#ifdef CONFIG_BOOKE
ulong xer;
u32 cr;
#endif
ulong msr;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
ulong shadow_msr;
- ulong shadow_srr1;
ulong hflags;
ulong guest_owned_ext;
#endif
@@ -253,20 +257,22 @@ struct kvm_vcpu_arch {
struct dentry *debugfs_exit_timing;
#endif
+#ifdef CONFIG_BOOKE
u32 last_inst;
-#ifdef CONFIG_PPC64
- ulong fault_dsisr;
-#endif
ulong fault_dear;
ulong fault_esr;
ulong queued_dear;
ulong queued_esr;
+#endif
gpa_t paddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian;
+ u8 mmio_sign_extend;
u8 dcr_needed;
u8 dcr_is_write;
+ u8 osi_needed;
+ u8 osi_enabled;
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
@@ -275,7 +281,7 @@ struct kvm_vcpu_arch {
u64 dec_jiffies;
unsigned long pending_exceptions;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
struct hpte_cache hpte_cache[HPTEG_CACHE_NUM];
int hpte_cache_offset;
#endif
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e2642829e435..18d139ec2d22 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -30,6 +30,8 @@
#include <linux/kvm_host.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/kvm_book3s.h>
+#else
+#include <asm/kvm_booke.h>
#endif
enum emulation_result {
@@ -37,6 +39,7 @@ enum emulation_result {
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
EMULATE_DO_DCR, /* kvm_run filled with DCR request */
EMULATE_FAIL, /* can't emulate this instruction */
+ EMULATE_AGAIN, /* something went wrong. go again */
};
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -48,8 +51,11 @@ extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_bigendian);
+extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_bigendian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
- u32 val, unsigned int bytes, int is_bigendian);
+ u64 val, unsigned int bytes, int is_bigendian);
extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu);
@@ -63,6 +69,7 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
@@ -88,6 +95,8 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
+extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq);
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance);
@@ -99,81 +108,37 @@ extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
-#ifdef CONFIG_PPC_BOOK3S
-
-/* We assume we're always acting on the current vcpu */
-
-static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
-{
- if ( num < 14 ) {
- get_paca()->shadow_vcpu.gpr[num] = val;
- to_book3s(vcpu)->shadow_vcpu.gpr[num] = val;
- } else
- vcpu->arch.gpr[num] = val;
-}
-
-static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
-{
- if ( num < 14 )
- return get_paca()->shadow_vcpu.gpr[num];
- else
- return vcpu->arch.gpr[num];
-}
-
-static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
-{
- get_paca()->shadow_vcpu.cr = val;
- to_book3s(vcpu)->shadow_vcpu.cr = val;
-}
-
-static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
-{
- return get_paca()->shadow_vcpu.cr;
-}
-
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
-{
- get_paca()->shadow_vcpu.xer = val;
- to_book3s(vcpu)->shadow_vcpu.xer = val;
-}
-
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+/*
+ * Cuts out inst bits with ordering according to spec.
+ * That means the leftmost bit is zero. All given bits are included.
+ */
+static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
{
- return get_paca()->shadow_vcpu.xer;
-}
+ u32 r;
+ u32 mask;
-#else
+ BUG_ON(msb > lsb);
-static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
-{
- vcpu->arch.gpr[num] = val;
-}
+ mask = (1 << (lsb - msb + 1)) - 1;
+ r = (inst >> (63 - lsb)) & mask;
-static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
-{
- return vcpu->arch.gpr[num];
+ return r;
}
-static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+/*
+ * Replaces inst bits with ordering according to spec.
+ */
+static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
{
- vcpu->arch.cr = val;
-}
+ u32 r;
+ u32 mask;
-static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.cr;
-}
+ BUG_ON(msb > lsb);
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
-{
- vcpu->arch.xer = val;
-}
+ mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
+ r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.xer;
+ return r;
}
-#endif
-
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index 0372669383a8..bf52d704fc47 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -40,7 +40,7 @@
#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
-#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */
+#define PPC44x_TLB_E 0x00000080 /* Memory is little endian */
#define PPC44x_TLB_PERM_MASK 0x0000003f
#define PPC44x_TLB_UX 0x00000020 /* User execution */
@@ -53,6 +53,52 @@
/* Number of TLB entries */
#define PPC44x_TLB_SIZE 64
+/* 47x bits */
+#define PPC47x_MMUCR_TID 0x0000ffff
+#define PPC47x_MMUCR_STS 0x00010000
+
+/* Page identification fields */
+#define PPC47x_TLB0_EPN_MASK 0xfffff000 /* Effective Page Number */
+#define PPC47x_TLB0_VALID 0x00000800 /* Valid flag */
+#define PPC47x_TLB0_TS 0x00000400 /* Translation address space */
+#define PPC47x_TLB0_4K 0x00000000
+#define PPC47x_TLB0_16K 0x00000010
+#define PPC47x_TLB0_64K 0x00000030
+#define PPC47x_TLB0_1M 0x00000070
+#define PPC47x_TLB0_16M 0x000000f0
+#define PPC47x_TLB0_256M 0x000001f0
+#define PPC47x_TLB0_1G 0x000003f0
+#define PPC47x_TLB0_BOLTED_R 0x00000008 /* tlbre only */
+
+/* Translation fields */
+#define PPC47x_TLB1_RPN_MASK 0xfffff000 /* Real Page Number */
+#define PPC47x_TLB1_ERPN_MASK 0x000003ff
+
+/* Storage attribute and access control fields */
+#define PPC47x_TLB2_ATTR_MASK 0x0003ff80
+#define PPC47x_TLB2_IL1I 0x00020000 /* Memory is guarded */
+#define PPC47x_TLB2_IL1D 0x00010000 /* Memory is guarded */
+#define PPC47x_TLB2_U0 0x00008000 /* User 0 */
+#define PPC47x_TLB2_U1 0x00004000 /* User 1 */
+#define PPC47x_TLB2_U2 0x00002000 /* User 2 */
+#define PPC47x_TLB2_U3 0x00001000 /* User 3 */
+#define PPC47x_TLB2_W 0x00000800 /* Caching is write-through */
+#define PPC47x_TLB2_I 0x00000400 /* Caching is inhibited */
+#define PPC47x_TLB2_M 0x00000200 /* Memory is coherent */
+#define PPC47x_TLB2_G 0x00000100 /* Memory is guarded */
+#define PPC47x_TLB2_E 0x00000080 /* Memory is little endian */
+#define PPC47x_TLB2_PERM_MASK 0x0000003f
+#define PPC47x_TLB2_UX 0x00000020 /* User execution */
+#define PPC47x_TLB2_UW 0x00000010 /* User write */
+#define PPC47x_TLB2_UR 0x00000008 /* User read */
+#define PPC47x_TLB2_SX 0x00000004 /* Super execution */
+#define PPC47x_TLB2_SW 0x00000002 /* Super write */
+#define PPC47x_TLB2_SR 0x00000001 /* Super read */
+#define PPC47x_TLB2_U_RWX (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
+#define PPC47x_TLB2_S_RWX (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
+#define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR)
+#define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
+
#ifndef __ASSEMBLY__
extern unsigned int tlb_44x_hwater;
@@ -79,12 +125,15 @@ typedef struct {
#if (PAGE_SHIFT == 12)
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K
#define mmu_virtual_psize MMU_PAGE_4K
#elif (PAGE_SHIFT == 14)
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K
#define mmu_virtual_psize MMU_PAGE_16K
#elif (PAGE_SHIFT == 16)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K
#define mmu_virtual_psize MMU_PAGE_64K
#elif (PAGE_SHIFT == 18)
#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 7ffbb65ff7a9..7ebf42ed84a2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -18,6 +18,7 @@
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
+#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
/*
* This is individual features
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 26383e0778aa..81fb41289d6c 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -27,6 +27,8 @@ extern int __init_new_context(void);
extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
#else
+extern unsigned long __init_new_context(void);
+extern void __destroy_context(unsigned long context_id);
extern void mmu_context_init(void);
#endif
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 35acac90c8ca..aac87cbceb57 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -30,7 +30,7 @@ extern struct pglist_data *node_data[];
*/
extern int numa_cpu_lookup_table[];
-extern cpumask_t numa_cpumask_lookup_table[];
+extern cpumask_var_t node_to_cpumask_map[];
#ifdef CONFIG_MEMORY_HOTPLUG
extern unsigned long max_pfn;
#endif
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 61913d9a21a0..e000cce8f6dd 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -463,9 +463,6 @@ extern void mpic_cpu_set_priority(int prio);
/* Request IPIs on primary mpic */
extern void mpic_request_ipis(void);
-/* Send an IPI (non offseted number 0..3) */
-extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
-
/* Send a message (IPI) to a given target (cpu number or MSG_*) */
void smp_mpic_message_pass(int target, int msg);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a011603d4079..224eb371ca1d 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -23,7 +23,7 @@
#include <asm/page.h>
#include <asm/exception-64e.h>
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
#endif
register struct paca_struct *local_paca asm("r13");
@@ -136,15 +136,9 @@ struct paca_struct {
u64 startpurr; /* PURR/TB value snapshot */
u64 startspurr; /* SPURR value snapshot */
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- struct {
- u64 esid;
- u64 vsid;
- } kvm_slb[64]; /* guest SLB */
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
- u8 kvm_slb_max; /* highest used guest slb entry */
- u8 kvm_in_guest; /* are we inside the guest? */
#endif
};
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index bfc4e027e2ad..358ff14ea25e 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -162,14 +162,6 @@ do { \
#endif /* !CONFIG_HUGETLB_PAGE */
-#ifdef MODULE
-#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
-#else
-#define __page_aligned \
- __attribute__((__aligned__(PAGE_SIZE), \
- __section__(".data.page_aligned")))
-#endif
-
#define VM_DATA_DEFAULT_FLAGS \
(test_thread_flag(TIF_32BIT) ? \
VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 94942d60ddfd..1ca1102b4a2f 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -19,6 +19,8 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
u32 io1, io2;
int propsize;
int count = 0;
+ int virq;
+
for (np = NULL; (np = of_find_compatible_node(np,
"parallel",
"pnpPNP,400")) != NULL;) {
@@ -26,10 +28,13 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
if (!prop || propsize > 6*sizeof(u32))
continue;
io1 = prop[1]; io2 = prop[2];
- prop = of_get_property(np, "interrupts", NULL);
- if (!prop)
+
+ virq = irq_of_parse_and_map(np, 0);
+ if (virq == NO_IRQ)
continue;
- if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL)
+
+ if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0)
+ != NULL)
count++;
}
return count;
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 605f5c5398d1..292725cec2e3 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -11,6 +11,12 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
+struct vmemmap_backing {
+ struct vmemmap_backing *list;
+ unsigned long phys;
+ unsigned long virt_addr;
+};
+
/*
* Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 55646adfa843..a7db96f2b5c3 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -287,7 +287,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) \
- (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
+ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else
#define pmd_page_vaddr(pmd) \
((unsigned long) (pmd_val(pmd) & PAGE_MASK))
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 221ba6240464..7492fe8ad6e4 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -229,6 +229,9 @@ struct thread_struct {
unsigned long spefscr; /* SPE & eFP status */
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+ void* kvm_shadow_vcpu; /* KVM internal data */
+#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
};
#define ARCH_MIN_TASKALIGN 16
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 9e2d84c06b74..5d8be0416227 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -89,6 +89,7 @@ struct pt_regs {
#define instruction_pointer(regs) ((regs)->nip)
#define user_stack_pointer(regs) ((regs)->gpr[1])
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
#define regs_return_value(regs) ((regs)->gpr[3])
#ifdef CONFIG_SMP
@@ -141,6 +142,69 @@ do { \
#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601))
#define ARCH_HAS_USER_SINGLE_STEP_INFO
+/*
+ * kprobe-based event tracer support
+ */
+
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+
+static inline bool regs_within_kernel_stack(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5572e86223f4..d62fdf4e504b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -293,10 +293,12 @@
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
#define HID1_PS (1<<16) /* 750FX PLL selection */
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
+#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
#define SPRN_HID6 0x3F9 /* BE HID 6 */
#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
@@ -465,6 +467,14 @@
#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
#define SPRN_XER 0x001 /* Fixed Point Exception Register */
+#define SPRN_MMCR0_GEKKO 0x3B8 /* Gekko Monitor Mode Control Register 0 */
+#define SPRN_MMCR1_GEKKO 0x3BC /* Gekko Monitor Mode Control Register 1 */
+#define SPRN_PMC1_GEKKO 0x3B9 /* Gekko Performance Monitor Control 1 */
+#define SPRN_PMC2_GEKKO 0x3BA /* Gekko Performance Monitor Control 2 */
+#define SPRN_PMC3_GEKKO 0x3BD /* Gekko Performance Monitor Control 3 */
+#define SPRN_PMC4_GEKKO 0x3BE /* Gekko Performance Monitor Control 4 */
+#define SPRN_WPAR_GEKKO 0x399 /* Gekko Write Pipe Address Register */
+
#define SPRN_SCOMC 0x114 /* SCOM Access Control */
#define SPRN_SCOMD 0x115 /* SCOM Access DATA */
@@ -817,6 +827,7 @@
#define PVR_403GC 0x00200200
#define PVR_403GCX 0x00201400
#define PVR_405GP 0x40110000
+#define PVR_476 0x11a52000
#define PVR_STB03XXX 0x40310000
#define PVR_NP405H 0x41410000
#define PVR_NP405L 0x41610000
@@ -853,6 +864,9 @@
#define PVR_8245 0x80811014
#define PVR_8260 PVR_8240
+/* 476 Simulator seems to currently have the PVR of the 602... */
+#define PVR_476_ISS 0x00052000
+
/* 64-bit processors */
/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
#define PV_NORTHSTAR 0x0033
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 414d434a66d0..5304a37ba425 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -191,6 +191,10 @@
#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
+#define PPC47x_MCSR_GPR 0x01000000 /* GPR parity error */
+#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */
+#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
+
#ifdef CONFIG_E500
#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
@@ -604,5 +608,25 @@
#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
#endif /* 403GCX */
+
+/* Some 476 specific registers */
+#define SPRN_SSPCR 830
+#define SPRN_USPCR 831
+#define SPRN_ISPCR 829
+#define SPRN_MMUBE0 820
+#define MMUBE0_IBE0_SHIFT 24
+#define MMUBE0_IBE1_SHIFT 16
+#define MMUBE0_IBE2_SHIFT 8
+#define MMUBE0_VBE0 0x00000004
+#define MMUBE0_VBE1 0x00000002
+#define MMUBE0_VBE2 0x00000001
+#define SPRN_MMUBE1 821
+#define MMUBE1_IBE3_SHIFT 24
+#define MMUBE1_IBE4_SHIFT 16
+#define MMUBE1_IBE5_SHIFT 8
+#define MMUBE1_VBE3 0x00000004
+#define MMUBE1_VBE4 0x00000002
+#define MMUBE1_VBE5 0x00000001
+
#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 1d3b270d3083..66e237bbe15f 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -40,7 +40,7 @@ extern void smp_message_recv(int);
DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU
-extern void fixup_irqs(cpumask_t map);
+extern void fixup_irqs(const struct cpumask *map);
int generic_cpu_disable(void);
int generic_cpu_enable(unsigned int cpu);
void generic_cpu_die(unsigned int cpu);
@@ -68,8 +68,19 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
}
#endif
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+
+static inline struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return per_cpu(cpu_sibling_map, cpu);
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+ return per_cpu(cpu_core_map, cpu);
+}
+
extern int cpu_to_core_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
@@ -93,7 +104,6 @@ void smp_init_pSeries(void);
void smp_init_cell(void);
void smp_init_celleb(void);
void smp_setup_cpu_maps(void);
-void smp_setup_cpu_sibling_map(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8eaec310a25b..84ad11f65dc2 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -8,6 +8,16 @@ struct device_node;
#ifdef CONFIG_NUMA
+/*
+ * Before going off node we want the VM to try and reclaim from the local
+ * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
+ * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
+ * 20, we never reclaim and go off node straight away.
+ *
+ * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ */
+#define RECLAIM_DISTANCE 10
+
#include <asm/mmzone.h>
static inline int cpu_to_node(int cpu)
@@ -19,7 +29,7 @@ static inline int cpu_to_node(int cpu)
#define cpumask_of_node(node) ((node) == -1 ? \
cpu_all_mask : \
- &numa_cpumask_lookup_table[node])
+ node_to_cpumask_map[node])
int of_node_to_nid(struct device_node *device);
@@ -102,8 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#ifdef CONFIG_PPC64
#include <asm/smp.h>
-#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
-#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
+#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif
#endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 957ceb7059c5..2716c51906d6 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -50,6 +50,9 @@
#endif
#ifdef CONFIG_KVM
#include <linux/kvm_host.h>
+#ifndef CONFIG_BOOKE
+#include <asm/kvm_book3s.h>
+#endif
#endif
#ifdef CONFIG_PPC32
@@ -105,6 +108,9 @@ int main(void)
DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+ DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
+#endif
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -191,33 +197,9 @@ int main(void)
DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
- DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
- DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
- DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
- DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
- DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
- DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
- DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
- DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
- DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
- DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
- DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
- DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
- DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
- DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
- DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
- DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
- DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
- DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
- DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
- DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
- DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
- shadow_vcpu.vmhandler));
- DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
- shadow_vcpu.scratch0));
- DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
- shadow_vcpu.scratch1));
+ DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
+ DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
+ DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
#endif
#endif /* CONFIG_PPC64 */
@@ -228,8 +210,8 @@ int main(void)
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
-#ifdef CONFIG_PPC64
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+#ifdef CONFIG_PPC64
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
@@ -412,9 +394,6 @@ int main(void)
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
- DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
- DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
- DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
@@ -422,27 +401,68 @@ int main(void)
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
- DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
- DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
- DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
-
- /* book3s_64 */
-#ifdef CONFIG_PPC64
- DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
+ /* book3s */
+#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
- DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
- DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+ DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
+ offsetof(struct kvmppc_vcpu_book3s, vcpu));
+ DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
+ DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
+ DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
+ DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
+ DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
+ DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
+ DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
+ DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
+ DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
+ DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
+ DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
+ DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
+ DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
+ DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
+ DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
+ DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
+ DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
+ DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
+ DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
+ DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
+ DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
+ DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ vmhandler));
+ DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ scratch0));
+ DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ scratch1));
+ DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ in_guest));
+ DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ fault_dsisr));
+ DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ fault_dar));
+ DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ last_inst));
+ DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ shadow_srr1));
+#ifdef CONFIG_PPC_BOOK3S_32
+ DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
+#endif
#else
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
-#endif /* CONFIG_PPC64 */
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+ DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+#endif /* CONFIG_PPC_BOOK3S */
#endif
#ifdef CONFIG_44x
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 8af4949434b2..9556be903e96 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1701,6 +1701,35 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A,
.platform = "ppc440",
},
+ { /* 476 core */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x11a50000,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x |
+ MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
+ { /* 476 iss */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00050000,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_FPU,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_47x |
+ MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
{ /* default match */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 1175a8539e6c..ed4aeb96398b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -373,11 +373,13 @@ syscall_exit_cont:
bnel- load_dbcr0
#endif
#ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
bne- 2f
1:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
#endif /* CONFIG_44x */
BEGIN_FTR_SECTION
lwarx r7,0,r1
@@ -848,6 +850,9 @@ resume_kernel:
/* interrupts are hard-disabled at this point */
restore:
#ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
+ b 1f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e3be98ffe2a7..3e423fbad6bc 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -735,8 +735,11 @@ _STATIC(do_hash_page)
std r3,_DAR(r1)
std r4,_DSISR(r1)
- andis. r0,r4,0xa450 /* weird error? */
+ andis. r0,r4,0xa410 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
+ andis. r0,r4,DSISR_DABRMATCH@h
+ bne- handle_dabr_fault
+
BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */
bne- do_ste_alloc /* If so handle it */
@@ -823,6 +826,14 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
bl .raw_local_irq_restore
b 11f
+/* We have a data breakpoint exception - handle it */
+handle_dabr_fault:
+ ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_dabr
+ b .ret_from_except_lite
+
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
ENABLE_INTS
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e025e89fe93e..98c4b29a56f4 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -33,6 +33,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/bug.h>
+#include <asm/kvm_book3s_asm.h>
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
@@ -303,6 +304,7 @@ __secondary_hold_acknowledge:
*/
#define EXCEPTION(n, label, hdlr, xfer) \
. = n; \
+ DO_KVM n; \
label: \
EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -358,6 +360,7 @@ i##n: \
* -- paulus.
*/
. = 0x200
+ DO_KVM 0x200
mtspr SPRN_SPRG_SCRATCH0,r10
mtspr SPRN_SPRG_SCRATCH1,r11
mfcr r10
@@ -381,6 +384,7 @@ i##n: \
/* Data access exception. */
. = 0x300
+ DO_KVM 0x300
DataAccess:
EXCEPTION_PROLOG
mfspr r10,SPRN_DSISR
@@ -397,6 +401,7 @@ DataAccess:
/* Instruction access exception. */
. = 0x400
+ DO_KVM 0x400
InstructionAccess:
EXCEPTION_PROLOG
andis. r0,r9,0x4000 /* no pte found? */
@@ -413,6 +418,7 @@ InstructionAccess:
/* Alignment exception */
. = 0x600
+ DO_KVM 0x600
Alignment:
EXCEPTION_PROLOG
mfspr r4,SPRN_DAR
@@ -427,6 +433,7 @@ Alignment:
/* Floating-point unavailable */
. = 0x800
+ DO_KVM 0x800
FPUnavailable:
BEGIN_FTR_SECTION
/*
@@ -450,6 +457,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
/* System call */
. = 0xc00
+ DO_KVM 0xc00
SystemCall:
EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0xc00, DoSyscall)
@@ -467,9 +475,11 @@ SystemCall:
* by executing an altivec instruction.
*/
. = 0xf00
+ DO_KVM 0xf00
b PerformanceMonitor
. = 0xf20
+ DO_KVM 0xf20
b AltiVecUnavailable
/*
@@ -882,6 +892,10 @@ __secondary_start:
RFI
#endif /* CONFIG_SMP */
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+#include "../kvm/book3s_rmhandlers.S"
+#endif
+
/*
* Those generic dummy functions are kept for CPUs not
* included in CONFIG_6xx
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 711368b993f2..5ab484ef06a7 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/synch.h>
#include "head_booke.h"
@@ -69,165 +70,7 @@ _ENTRY(_start);
mr r27,r7
li r24,0 /* CPU number */
-/*
- * In case the firmware didn't do it, we apply some workarounds
- * that are good for all 440 core variants here
- */
- mfspr r3,SPRN_CCR0
- rlwinm r3,r3,0,0,27 /* disable icache prefetch */
- isync
- mtspr SPRN_CCR0,r3
- isync
- sync
-
-/*
- * Set up the initial MMU state
- *
- * We are still executing code at the virtual address
- * mappings set by the firmware for the base of RAM.
- *
- * We first invalidate all TLB entries but the one
- * we are running from. We then load the KERNELBASE
- * mappings so we can begin to use kernel addresses
- * natively and so the interrupt vector locations are
- * permanently pinned (necessary since Book E
- * implementations always have translation enabled).
- *
- * TODO: Use the known TLB entry we are running from to
- * determine which physical region we are located
- * in. This can be used to determine where in RAM
- * (on a shared CPU system) or PCI memory space
- * (on a DRAMless system) we are located.
- * For now, we assume a perfect world which means
- * we are located at the base of DRAM (physical 0).
- */
-
-/*
- * Search TLB for entry that we are currently using.
- * Invalidate all entries but the one we are using.
- */
- /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
- mfspr r3,SPRN_PID /* Get PID */
- mfmsr r4 /* Get MSR */
- andi. r4,r4,MSR_IS@l /* TS=1? */
- beq wmmucr /* If not, leave STS=0 */
- oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
-wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
- sync
-
- bl invstr /* Find our address */
-invstr: mflr r5 /* Make it accessible */
- tlbsx r23,0,r5 /* Find entry we are in */
- li r4,0 /* Start at TLB entry 0 */
- li r3,0 /* Set PAGEID inval value */
-1: cmpw r23,r4 /* Is this our entry? */
- beq skpinv /* If so, skip the inval */
- tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
-skpinv: addi r4,r4,1 /* Increment */
- cmpwi r4,64 /* Are we done? */
- bne 1b /* If not, repeat */
- isync /* If so, context change */
-
-/*
- * Configure and load pinned entry into TLB slot 63.
- */
-
- lis r3,PAGE_OFFSET@h
- ori r3,r3,PAGE_OFFSET@l
-
- /* Kernel is at the base of RAM */
- li r4, 0 /* Load the kernel physical address */
-
- /* Load the kernel PID = 0 */
- li r0,0
- mtspr SPRN_PID,r0
- sync
-
- /* Initialize MMUCR */
- li r5,0
- mtspr SPRN_MMUCR,r5
- sync
-
- /* pageid fields */
- clrrwi r3,r3,10 /* Mask off the effective page number */
- ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
-
- /* xlat fields */
- clrrwi r4,r4,10 /* Mask off the real page number */
- /* ERPN is 0 for first 4GB page */
-
- /* attrib fields */
- /* Added guarded bit to protect against speculative loads/stores */
- li r5,0
- ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
-
- li r0,63 /* TLB slot 63 */
-
- tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
- tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
- tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
-
- /* Force context change */
- mfmsr r0
- mtspr SPRN_SRR1, r0
- lis r0,3f@h
- ori r0,r0,3f@l
- mtspr SPRN_SRR0,r0
- sync
- rfi
-
- /* If necessary, invalidate original entry we used */
-3: cmpwi r23,63
- beq 4f
- li r6,0
- tlbwe r6,r23,PPC44x_TLB_PAGEID
- isync
-
-4:
-#ifdef CONFIG_PPC_EARLY_DEBUG_44x
- /* Add UART mapping for early debug. */
-
- /* pageid fields */
- lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
- ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
-
- /* xlat fields */
- lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
- ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
-
- /* attrib fields */
- li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
- li r0,62 /* TLB slot 0 */
-
- tlbwe r3,r0,PPC44x_TLB_PAGEID
- tlbwe r4,r0,PPC44x_TLB_XLAT
- tlbwe r5,r0,PPC44x_TLB_ATTRIB
-
- /* Force context change */
- isync
-#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
-
- /* Establish the interrupt vector offsets */
- SET_IVOR(0, CriticalInput);
- SET_IVOR(1, MachineCheck);
- SET_IVOR(2, DataStorage);
- SET_IVOR(3, InstructionStorage);
- SET_IVOR(4, ExternalInput);
- SET_IVOR(5, Alignment);
- SET_IVOR(6, Program);
- SET_IVOR(7, FloatingPointUnavailable);
- SET_IVOR(8, SystemCall);
- SET_IVOR(9, AuxillaryProcessorUnavailable);
- SET_IVOR(10, Decrementer);
- SET_IVOR(11, FixedIntervalTimer);
- SET_IVOR(12, WatchdogTimer);
- SET_IVOR(13, DataTLBError);
- SET_IVOR(14, InstructionTLBError);
- SET_IVOR(15, DebugCrit);
-
- /* Establish the interrupt vector base */
- lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
- mtspr SPRN_IVPR,r4
+ bl init_cpu_state
/*
* This is where the main kernel code starts.
@@ -349,7 +192,7 @@ interrupt_base:
#endif
/* Data TLB Error Interrupt */
- START_EXCEPTION(DataTLBError)
+ START_EXCEPTION(DataTLBError44x)
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mtspr SPRN_SPRG_WSCRATCH1, r11
mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -440,7 +283,7 @@ tlb_44x_patch_hwater_D:
mfspr r10,SPRN_DEAR
/* Jump to common tlb load */
- b finish_tlb_load
+ b finish_tlb_load_44x
2:
/* The bailout. Restore registers to pre-exception conditions
@@ -460,7 +303,7 @@ tlb_44x_patch_hwater_D:
* information from different registers and bailout
* to a different point.
*/
- START_EXCEPTION(InstructionTLBError)
+ START_EXCEPTION(InstructionTLBError44x)
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mtspr SPRN_SPRG_WSCRATCH1, r11
mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -536,7 +379,7 @@ tlb_44x_patch_hwater_I:
mfspr r10,SPRN_SRR0
/* Jump to common TLB load point */
- b finish_tlb_load
+ b finish_tlb_load_44x
2:
/* The bailout. Restore registers to pre-exception conditions
@@ -550,15 +393,7 @@ tlb_44x_patch_hwater_I:
mfspr r10, SPRN_SPRG_RSCRATCH0
b InstructionStorage
- /* Debug Interrupt */
- DEBUG_CRIT_EXCEPTION
-
-/*
- * Local functions
- */
-
/*
-
* Both the instruction and data TLB miss get to this
* point to load the TLB.
* r10 - EA of fault
@@ -568,7 +403,7 @@ tlb_44x_patch_hwater_I:
* MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
*/
-finish_tlb_load:
+finish_tlb_load_44x:
/* Combine RPN & ERPN an write WS 0 */
rlwimi r11,r12,0,0,31-PAGE_SHIFT
tlbwe r11,r13,PPC44x_TLB_XLAT
@@ -601,6 +436,227 @@ finish_tlb_load:
mfspr r10, SPRN_SPRG_RSCRATCH0
rfi /* Force context change */
+/* TLB error interrupts for 476
+ */
+#ifdef CONFIG_PPC_47x
+ START_EXCEPTION(DataTLBError47x)
+ mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
+ mtspr SPRN_SPRG_WSCRATCH1,r11
+ mtspr SPRN_SPRG_WSCRATCH2,r12
+ mtspr SPRN_SPRG_WSCRATCH3,r13
+ mfcr r11
+ mtspr SPRN_SPRG_WSCRATCH4,r11
+ mfspr r10,SPRN_DEAR /* Get faulting address */
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ lis r11,PAGE_OFFSET@h
+ cmplw cr0,r10,r11
+ blt+ 3f
+ lis r11,swapper_pg_dir@h
+ ori r11,r11, swapper_pg_dir@l
+ li r12,0 /* MMUCR = 0 */
+ b 4f
+
+ /* Get the PGD for the current thread and setup MMUCR */
+3: mfspr r11,SPRN_SPRG3
+ lwz r11,PGDIR(r11)
+ mfspr r12,SPRN_PID /* Get PID */
+4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
+
+ /* Mask of required permission bits. Note that while we
+ * do copy ESR:ST to _PAGE_RW position as trying to write
+ * to an RO page is pretty common, we don't do it with
+ * _PAGE_DIRTY. We could do it, but it's a fairly rare
+ * event so I'd rather take the overhead when it happens
+ * rather than adding an instruction here. We should measure
+ * whether the whole thing is worth it in the first place
+ * as we could avoid loading SPRN_ESR completely in the first
+ * place...
+ *
+ * TODO: Is it worth doing that mfspr & rlwimi in the first
+ * place or can we save a couple of instructions here ?
+ */
+ mfspr r12,SPRN_ESR
+ li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ rlwimi r13,r12,10,30,30
+
+ /* Load the PTE */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
+ lwzx r11,r12,r11 /* Get pgd/pmd entry */
+
+ /* Word 0 is EPN,V,TS,DSIZ */
+ li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
+ rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
+ li r12,0
+ tlbwe r10,r12,0
+
+ /* XXX can we do better ? Need to make sure tlbwe has established
+ * latch V bit in MMUCR0 before the PTE is loaded further down */
+#ifdef CONFIG_SMP
+ isync
+#endif
+
+ rlwinm. r12,r11,0,0,20 /* Extract pt base address */
+ /* Compute pte address */
+ rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
+ beq 2f /* Bail if no table */
+ lwz r11,0(r12) /* Get high word of pte entry */
+
+ /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
+ * bottom of r12 to create a data dependency... We can also use r10
+ * as destination nowadays
+ */
+#ifdef CONFIG_SMP
+ lwsync
+#endif
+ lwz r12,4(r12) /* Get low word of pte entry */
+
+ andc. r13,r13,r12 /* Check permission */
+
+ /* Jump to common tlb load */
+ beq finish_tlb_load_47x
+
+2: /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out.
+ */
+ mfspr r11,SPRN_SPRG_RSCRATCH4
+ mtcr r11
+ mfspr r13,SPRN_SPRG_RSCRATCH3
+ mfspr r12,SPRN_SPRG_RSCRATCH2
+ mfspr r11,SPRN_SPRG_RSCRATCH1
+ mfspr r10,SPRN_SPRG_RSCRATCH0
+ b DataStorage
+
+ /* Instruction TLB Error Interrupt */
+ /*
+ * Nearly the same as above, except we get our
+ * information from different registers and bailout
+ * to a different point.
+ */
+ START_EXCEPTION(InstructionTLBError47x)
+ mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
+ mtspr SPRN_SPRG_WSCRATCH1,r11
+ mtspr SPRN_SPRG_WSCRATCH2,r12
+ mtspr SPRN_SPRG_WSCRATCH3,r13
+ mfcr r11
+ mtspr SPRN_SPRG_WSCRATCH4,r11
+ mfspr r10,SPRN_SRR0 /* Get faulting address */
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ lis r11,PAGE_OFFSET@h
+ cmplw cr0,r10,r11
+ blt+ 3f
+ lis r11,swapper_pg_dir@h
+ ori r11,r11, swapper_pg_dir@l
+ li r12,0 /* MMUCR = 0 */
+ b 4f
+
+ /* Get the PGD for the current thread and setup MMUCR */
+3: mfspr r11,SPRN_SPRG_THREAD
+ lwz r11,PGDIR(r11)
+ mfspr r12,SPRN_PID /* Get PID */
+4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
+
+ /* Make up the required permissions */
+ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+
+ /* Load PTE */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
+ lwzx r11,r12,r11 /* Get pgd/pmd entry */
+
+ /* Word 0 is EPN,V,TS,DSIZ */
+ li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
+ rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
+ li r12,0
+ tlbwe r10,r12,0
+
+ /* XXX can we do better ? Need to make sure tlbwe has established
+ * latch V bit in MMUCR0 before the PTE is loaded further down */
+#ifdef CONFIG_SMP
+ isync
+#endif
+
+ rlwinm. r12,r11,0,0,20 /* Extract pt base address */
+ /* Compute pte address */
+ rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
+ beq 2f /* Bail if no table */
+
+ lwz r11,0(r12) /* Get high word of pte entry */
+ /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
+ * bottom of r12 to create a data dependency... We can also use r10
+ * as destination nowadays
+ */
+#ifdef CONFIG_SMP
+ lwsync
+#endif
+ lwz r12,4(r12) /* Get low word of pte entry */
+
+ andc. r13,r13,r12 /* Check permission */
+
+ /* Jump to common TLB load point */
+ beq finish_tlb_load_47x
+
+2: /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out.
+ */
+ mfspr r11, SPRN_SPRG_RSCRATCH4
+ mtcr r11
+ mfspr r13, SPRN_SPRG_RSCRATCH3
+ mfspr r12, SPRN_SPRG_RSCRATCH2
+ mfspr r11, SPRN_SPRG_RSCRATCH1
+ mfspr r10, SPRN_SPRG_RSCRATCH0
+ b InstructionStorage
+
+/*
+ * Both the instruction and data TLB miss get to this
+ * point to load the TLB.
+ * r10 - free to use
+ * r11 - PTE high word value
+ * r12 - PTE low word value
+ * r13 - free to use
+ * MMUCR - loaded with proper value when we get here
+ * Upon exit, we reload everything and RFI.
+ */
+finish_tlb_load_47x:
+ /* Combine RPN & ERPN an write WS 1 */
+ rlwimi r11,r12,0,0,31-PAGE_SHIFT
+ tlbwe r11,r13,1
+
+ /* And make up word 2 */
+ li r10,0xf85 /* Mask to apply from PTE */
+ rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
+ and r11,r12,r10 /* Mask PTE bits to keep */
+ andi. r10,r12,_PAGE_USER /* User page ? */
+ beq 1f /* nope, leave U bits empty */
+ rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+1: tlbwe r11,r13,2
+
+ /* Done...restore registers and get out of here.
+ */
+ mfspr r11, SPRN_SPRG_RSCRATCH4
+ mtcr r11
+ mfspr r13, SPRN_SPRG_RSCRATCH3
+ mfspr r12, SPRN_SPRG_RSCRATCH2
+ mfspr r11, SPRN_SPRG_RSCRATCH1
+ mfspr r10, SPRN_SPRG_RSCRATCH0
+ rfi
+
+#endif /* CONFIG_PPC_47x */
+
+ /* Debug Interrupt */
+ /*
+ * This statement needs to exist at the end of the IVPR
+ * definition just in case you end up taking a debug
+ * exception within another exception.
+ */
+ DEBUG_CRIT_EXCEPTION
+
/*
* Global functions
*/
@@ -647,6 +703,428 @@ _GLOBAL(set_context)
blr
/*
+ * Init CPU state. This is called at boot time or for secondary CPUs
+ * to setup initial TLB entries, setup IVORs, etc...
+ *
+ */
+_GLOBAL(init_cpu_state)
+ mflr r22
+#ifdef CONFIG_PPC_47x
+ /* We use the PVR to differenciate 44x cores from 476 */
+ mfspr r3,SPRN_PVR
+ srwi r3,r3,16
+ cmplwi cr0,r3,PVR_476@h
+ beq head_start_47x
+ cmplwi cr0,r3,PVR_476_ISS@h
+ beq head_start_47x
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * In case the firmware didn't do it, we apply some workarounds
+ * that are good for all 440 core variants here
+ */
+ mfspr r3,SPRN_CCR0
+ rlwinm r3,r3,0,0,27 /* disable icache prefetch */
+ isync
+ mtspr SPRN_CCR0,r3
+ isync
+ sync
+
+/*
+ * Set up the initial MMU state for 44x
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ *
+ * We first invalidate all TLB entries but the one
+ * we are running from. We then load the KERNELBASE
+ * mappings so we can begin to use kernel addresses
+ * natively and so the interrupt vector locations are
+ * permanently pinned (necessary since Book E
+ * implementations always have translation enabled).
+ *
+ * TODO: Use the known TLB entry we are running from to
+ * determine which physical region we are located
+ * in. This can be used to determine where in RAM
+ * (on a shared CPU system) or PCI memory space
+ * (on a DRAMless system) we are located.
+ * For now, we assume a perfect world which means
+ * we are located at the base of DRAM (physical 0).
+ */
+
+/*
+ * Search TLB for entry that we are currently using.
+ * Invalidate all entries but the one we are using.
+ */
+ /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+ mfspr r3,SPRN_PID /* Get PID */
+ mfmsr r4 /* Get MSR */
+ andi. r4,r4,MSR_IS@l /* TS=1? */
+ beq wmmucr /* If not, leave STS=0 */
+ oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
+wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
+ sync
+
+ bl invstr /* Find our address */
+invstr: mflr r5 /* Make it accessible */
+ tlbsx r23,0,r5 /* Find entry we are in */
+ li r4,0 /* Start at TLB entry 0 */
+ li r3,0 /* Set PAGEID inval value */
+1: cmpw r23,r4 /* Is this our entry? */
+ beq skpinv /* If so, skip the inval */
+ tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
+skpinv: addi r4,r4,1 /* Increment */
+ cmpwi r4,64 /* Are we done? */
+ bne 1b /* If not, repeat */
+ isync /* If so, context change */
+
+/*
+ * Configure and load pinned entry into TLB slot 63.
+ */
+
+ lis r3,PAGE_OFFSET@h
+ ori r3,r3,PAGE_OFFSET@l
+
+ /* Kernel is at the base of RAM */
+ li r4, 0 /* Load the kernel physical address */
+
+ /* Load the kernel PID = 0 */
+ li r0,0
+ mtspr SPRN_PID,r0
+ sync
+
+ /* Initialize MMUCR */
+ li r5,0
+ mtspr SPRN_MMUCR,r5
+ sync
+
+ /* pageid fields */
+ clrrwi r3,r3,10 /* Mask off the effective page number */
+ ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
+
+ /* xlat fields */
+ clrrwi r4,r4,10 /* Mask off the real page number */
+ /* ERPN is 0 for first 4GB page */
+
+ /* attrib fields */
+ /* Added guarded bit to protect against speculative loads/stores */
+ li r5,0
+ ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+
+ li r0,63 /* TLB slot 63 */
+
+ tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
+ tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
+ tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+
+ /* Force context change */
+ mfmsr r0
+ mtspr SPRN_SRR1, r0
+ lis r0,3f@h
+ ori r0,r0,3f@l
+ mtspr SPRN_SRR0,r0
+ sync
+ rfi
+
+ /* If necessary, invalidate original entry we used */
+3: cmpwi r23,63
+ beq 4f
+ li r6,0
+ tlbwe r6,r23,PPC44x_TLB_PAGEID
+ isync
+
+4:
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
+ /* Add UART mapping for early debug. */
+
+ /* pageid fields */
+ lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
+ ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
+
+ /* xlat fields */
+ lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
+ ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
+
+ /* attrib fields */
+ li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
+ li r0,62 /* TLB slot 0 */
+
+ tlbwe r3,r0,PPC44x_TLB_PAGEID
+ tlbwe r4,r0,PPC44x_TLB_XLAT
+ tlbwe r5,r0,PPC44x_TLB_ATTRIB
+
+ /* Force context change */
+ isync
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+ /* Establish the interrupt vector offsets */
+ SET_IVOR(0, CriticalInput);
+ SET_IVOR(1, MachineCheck);
+ SET_IVOR(2, DataStorage);
+ SET_IVOR(3, InstructionStorage);
+ SET_IVOR(4, ExternalInput);
+ SET_IVOR(5, Alignment);
+ SET_IVOR(6, Program);
+ SET_IVOR(7, FloatingPointUnavailable);
+ SET_IVOR(8, SystemCall);
+ SET_IVOR(9, AuxillaryProcessorUnavailable);
+ SET_IVOR(10, Decrementer);
+ SET_IVOR(11, FixedIntervalTimer);
+ SET_IVOR(12, WatchdogTimer);
+ SET_IVOR(13, DataTLBError44x);
+ SET_IVOR(14, InstructionTLBError44x);
+ SET_IVOR(15, DebugCrit);
+
+ b head_start_common
+
+
+#ifdef CONFIG_PPC_47x
+
+#ifdef CONFIG_SMP
+
+/* Entry point for secondary 47x processors */
+_GLOBAL(start_secondary_47x)
+ mr r24,r3 /* CPU number */
+
+ bl init_cpu_state
+
+ /* Now we need to bolt the rest of kernel memory which
+ * is done in C code. We must be careful because our task
+ * struct or our stack can (and will probably) be out
+ * of reach of the initial 256M TLB entry, so we use a
+ * small temporary stack in .bss for that. This works
+ * because only one CPU at a time can be in this code
+ */
+ lis r1,temp_boot_stack@h
+ ori r1,r1,temp_boot_stack@l
+ addi r1,r1,1024-STACK_FRAME_OVERHEAD
+ li r0,0
+ stw r0,0(r1)
+ bl mmu_init_secondary
+
+ /* Now we can get our task struct and real stack pointer */
+
+ /* Get current_thread_info and current */
+ lis r1,secondary_ti@ha
+ lwz r1,secondary_ti@l(r1)
+ lwz r2,TI_TASK(r1)
+
+ /* Current stack pointer */
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ li r0,0
+ stw r0,0(r1)
+
+ /* Kernel stack for exception entry in SPRG3 */
+ addi r4,r2,THREAD /* init task's THREAD */
+ mtspr SPRN_SPRG3,r4
+
+ b start_secondary
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Set up the initial MMU state for 44x
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ */
+
+head_start_47x:
+ /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+ mfspr r3,SPRN_PID /* Get PID */
+ mfmsr r4 /* Get MSR */
+ andi. r4,r4,MSR_IS@l /* TS=1? */
+ beq 1f /* If not, leave STS=0 */
+ oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
+1: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
+ sync
+
+ /* Find the entry we are running from */
+ bl 1f
+1: mflr r23
+ tlbsx r23,0,r23
+ tlbre r24,r23,0
+ tlbre r25,r23,1
+ tlbre r26,r23,2
+
+/*
+ * Cleanup time
+ */
+
+ /* Initialize MMUCR */
+ li r5,0
+ mtspr SPRN_MMUCR,r5
+ sync
+
+clear_all_utlb_entries:
+
+ #; Set initial values.
+
+ addis r3,0,0x8000
+ addi r4,0,0
+ addi r5,0,0
+ b clear_utlb_entry
+
+ #; Align the loop to speed things up.
+
+ .align 6
+
+clear_utlb_entry:
+
+ tlbwe r4,r3,0
+ tlbwe r5,r3,1
+ tlbwe r5,r3,2
+ addis r3,r3,0x2000
+ cmpwi r3,0
+ bne clear_utlb_entry
+ addis r3,0,0x8000
+ addis r4,r4,0x100
+ cmpwi r4,0
+ bne clear_utlb_entry
+
+ #; Restore original entry.
+
+ oris r23,r23,0x8000 /* specify the way */
+ tlbwe r24,r23,0
+ tlbwe r25,r23,1
+ tlbwe r26,r23,2
+
+/*
+ * Configure and load pinned entry into TLB for the kernel core
+ */
+
+ lis r3,PAGE_OFFSET@h
+ ori r3,r3,PAGE_OFFSET@l
+
+ /* Kernel is at the base of RAM */
+ li r4, 0 /* Load the kernel physical address */
+
+ /* Load the kernel PID = 0 */
+ li r0,0
+ mtspr SPRN_PID,r0
+ sync
+
+ /* Word 0 */
+ clrrwi r3,r3,12 /* Mask off the effective page number */
+ ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
+
+ /* Word 1 */
+ clrrwi r4,r4,12 /* Mask off the real page number */
+ /* ERPN is 0 for first 4GB page */
+ /* Word 2 */
+ li r5,0
+ ori r5,r5,PPC47x_TLB2_S_RWX
+#ifdef CONFIG_SMP
+ ori r5,r5,PPC47x_TLB2_M
+#endif
+
+ /* We write to way 0 and bolted 0 */
+ lis r0,0x8800
+ tlbwe r3,r0,0
+ tlbwe r4,r0,1
+ tlbwe r5,r0,2
+
+/*
+ * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
+ * them up later
+ */
+ LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
+ mtspr SPRN_SSPCR,r3
+ mtspr SPRN_USPCR,r3
+ LOAD_REG_IMMEDIATE(r3, 0x12345670)
+ mtspr SPRN_ISPCR,r3
+
+ /* Force context change */
+ mfmsr r0
+ mtspr SPRN_SRR1, r0
+ lis r0,3f@h
+ ori r0,r0,3f@l
+ mtspr SPRN_SRR0,r0
+ sync
+ rfi
+
+ /* Invalidate original entry we used */
+3:
+ rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
+ tlbwe r24,r23,0
+ addi r24,0,0
+ tlbwe r24,r23,1
+ tlbwe r24,r23,2
+ isync /* Clear out the shadow TLB entries */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
+ /* Add UART mapping for early debug. */
+
+ /* Word 0 */
+ lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
+ ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
+
+ /* Word 1 */
+ lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
+ ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
+
+ /* Word 2 */
+ li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
+
+ /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
+ * congruence class as the kernel, we need to make sure of it at
+ * some point
+ */
+ lis r0,0x8d00
+ tlbwe r3,r0,0
+ tlbwe r4,r0,1
+ tlbwe r5,r0,2
+
+ /* Force context change */
+ isync
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+ /* Establish the interrupt vector offsets */
+ SET_IVOR(0, CriticalInput);
+ SET_IVOR(1, MachineCheckA);
+ SET_IVOR(2, DataStorage);
+ SET_IVOR(3, InstructionStorage);
+ SET_IVOR(4, ExternalInput);
+ SET_IVOR(5, Alignment);
+ SET_IVOR(6, Program);
+ SET_IVOR(7, FloatingPointUnavailable);
+ SET_IVOR(8, SystemCall);
+ SET_IVOR(9, AuxillaryProcessorUnavailable);
+ SET_IVOR(10, Decrementer);
+ SET_IVOR(11, FixedIntervalTimer);
+ SET_IVOR(12, WatchdogTimer);
+ SET_IVOR(13, DataTLBError47x);
+ SET_IVOR(14, InstructionTLBError47x);
+ SET_IVOR(15, DebugCrit);
+
+ /* We configure icbi to invalidate 128 bytes at a time since the
+ * current 32-bit kernel code isn't too happy with icache != dcache
+ * block size
+ */
+ mfspr r3,SPRN_CCR0
+ oris r3,r3,0x0020
+ mtspr SPRN_CCR0,r3
+ isync
+
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * Here we are back to code that is common between 44x and 47x
+ *
+ * We proceed to further kernel initialization and return to the
+ * main kernel entry
+ */
+head_start_common:
+ /* Establish the interrupt vector base */
+ lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
+ mtspr SPRN_IVPR,r4
+
+ addis r22,r22,KERNELBASE@h
+ mtlr r22
+ isync
+ blr
+
+/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
@@ -671,3 +1149,9 @@ swapper_pg_dir:
*/
abatron_pteptrs:
.space 8
+
+#ifdef CONFIG_SMP
+ .align 12
+temp_boot_stack:
+ .space 1024
+#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index bed9a29ee383..844a44b64472 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -37,7 +37,7 @@
#include <asm/firmware.h>
#include <asm/page_64.h>
#include <asm/irqflags.h>
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
/* The physical memory is layed out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -169,7 +169,7 @@ exception_marker:
/* KVM trampoline code needs to be close to the interrupt handlers */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#include "../kvm/book3s_64_rmhandlers.S"
+#include "../kvm/book3s_rmhandlers.S"
#endif
_GLOBAL(generic_secondary_thread_init)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3ef743fa5d7c..1f1a04b5c2a4 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -71,9 +71,6 @@ _ENTRY(_start);
* in the first level table, but that would require many changes to the
* Linux page directory/table functions that I don't want to do right now.
*
- * I used to use SPRG2 for a temporary register in the TLB handler, but it
- * has since been put to other uses. I now use a hack to save a register
- * and the CCR at memory location 0.....Someday I'll fix this.....
* -- Dan
*/
.globl __start
@@ -302,8 +299,13 @@ InstructionTLBMiss:
DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
+#ifdef CONFIG_8xx_CPU6
stw r10, 0(r0)
stw r11, 4(r0)
+#else
+ mtspr SPRN_DAR, r10
+ mtspr SPRN_SPRG2, r11
+#endif
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
#ifdef CONFIG_8xx_CPU15
addi r11, r10, 0x1000
@@ -318,12 +320,16 @@ InstructionTLBMiss:
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
+#ifdef CONFIG_MODULES
+ /* Only modules will cause ITLB Misses as we always
+ * pin the first 8MB of kernel memory */
andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
beq 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
rlwimi r10, r11, 0, 2, 19
3:
+#endif
lwz r11, 0(r10) /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */
@@ -339,31 +345,35 @@ InstructionTLBMiss:
mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
+#ifdef CONFIG_SWAP
andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
bne- cr0, 2f
-
- /* Clear PP lsb, 0x400 */
- rlwinm r10, r10, 0, 22, 20
-
+#endif
/* The Linux PTE won't go exactly into the MMU TLB.
- * Software indicator bits 22 and 28 must be clear.
+ * Software indicator bits 21 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
li r11, 0x00f0
- rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
+ rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */
DO_8xx_CPU6(0x2d80, r3)
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
- mfspr r10, SPRN_M_TW /* Restore registers */
+ /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+ mfspr r10, SPRN_DAR
+ mtcr r10
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r11, SPRN_SPRG2
+#else
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
+ mfspr r10, SPRN_M_TW
rfi
2:
mfspr r11, SPRN_SRR1
@@ -373,13 +383,20 @@ InstructionTLBMiss:
rlwinm r11, r11, 0, 0xffff
mtspr SPRN_SRR1, r11
- mfspr r10, SPRN_M_TW /* Restore registers */
+ /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+ mfspr r10, SPRN_DAR
+ mtcr r10
+ li r11, 0x00f0
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r11, SPRN_SPRG2
+#else
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
+ mfspr r10, SPRN_M_TW
b InstructionAccess
. = 0x1200
@@ -390,8 +407,13 @@ DataStoreTLBMiss:
DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
+#ifdef CONFIG_8xx_CPU6
stw r10, 0(r0)
stw r11, 4(r0)
+#else
+ mtspr SPRN_DAR, r10
+ mtspr SPRN_SPRG2, r11
+#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
@@ -438,15 +460,14 @@ DataStoreTLBMiss:
* r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
* r10 = (r10 & ~PRESENT) | r11;
*/
+#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
-
+#endif
/* Honour kernel RO, User NA */
/* 0x200 == Extended encoding, bit 22 */
- /* r11 = (r10 & _PAGE_USER) >> 2 */
- rlwinm r11, r10, 32-2, 0x200
- or r10, r11, r10
+ rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */
/* r11 = (r10 & _PAGE_RW) >> 1 */
rlwinm r11, r10, 32-1, 0x200
or r10, r11, r10
@@ -460,18 +481,24 @@ DataStoreTLBMiss:
* of the MMU.
*/
2: li r11, 0x00f0
- mtspr SPRN_DAR,r11 /* Tag DAR */
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
- mfspr r10, SPRN_M_TW /* Restore registers */
+ /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+ mfspr r10, SPRN_DAR
+ mtcr r10
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r11, SPRN_SPRG2
+#else
+ mtspr SPRN_DAR, r11 /* Tag DAR */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
+ mfspr r10, SPRN_M_TW
rfi
/* This is an instruction TLB error on the MPC8xx. This could be due
@@ -683,9 +710,6 @@ start_here:
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRN_SPRG_THREAD,r4
- li r3,0
- /* XXX What is that for ? SPRG2 appears otherwise unused on 8xx */
- mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
/* stack */
lis r1,init_thread_union@ha
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 50504ae39cb7..a0bf158c8b47 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -1,6 +1,7 @@
#ifndef __HEAD_BOOKE_H__
#define __HEAD_BOOKE_H__
+#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
/*
* Macros used for common Book-e exception handling
*/
@@ -48,6 +49,9 @@
stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
+ lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
+ addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
+ stw r10, 8(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 71cf280da184..693b0e6f57a8 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -201,8 +201,11 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
int ibmebus_register_driver(struct of_platform_driver *drv)
{
+ if (!drv->driver.of_match_table)
+ drv->driver.of_match_table = drv->match_table;
+
/* If the driver uses devices that ibmebus doesn't know, add them */
- ibmebus_create_devices(drv->match_table);
+ ibmebus_create_devices(drv->driver.of_match_table);
return of_register_driver(drv, &ibmebus_bus_type);
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 64f6f2031c22..250ee2ebf288 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -290,30 +290,33 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
-void fixup_irqs(cpumask_t map)
+void fixup_irqs(const struct cpumask *map)
{
struct irq_desc *desc;
unsigned int irq;
static int warned;
+ cpumask_var_t mask;
- for_each_irq(irq) {
- cpumask_t mask;
+ alloc_cpumask_var(&mask, GFP_KERNEL);
+ for_each_irq(irq) {
desc = irq_to_desc(irq);
if (desc && desc->status & IRQ_PER_CPU)
continue;
- cpumask_and(&mask, desc->affinity, &map);
- if (any_online_cpu(mask) == NR_CPUS) {
+ cpumask_and(mask, desc->affinity, map);
+ if (cpumask_any(mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
- mask = map;
+ cpumask_copy(mask, map);
}
if (desc->chip->set_affinity)
- desc->chip->set_affinity(irq, &mask);
+ desc->chip->set_affinity(irq, mask);
else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
+ free_cpumask_var(mask);
+
local_irq_enable();
mdelay(1);
local_irq_disable();
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 41bada0298c8..82a7b228c81a 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -20,6 +20,7 @@
#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/ptrace.h>
+#include <linux/kdebug.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/machdep.h>
@@ -115,7 +116,8 @@ void kgdb_roundup_cpus(unsigned long flags)
/* KGDB functions to use existing PowerPC64 hooks. */
static int kgdb_debugger(struct pt_regs *regs)
{
- return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
+ return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
+ DIE_OOPS, regs);
}
static int kgdb_handle_breakpoint(struct pt_regs *regs)
@@ -123,7 +125,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
if (user_mode(regs))
return 0;
- if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
+ if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return 0;
if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
@@ -309,6 +311,11 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
(unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->nip = pc;
+}
+
/*
* This function does PowerPC specific procesing for interfacing to gdb.
*/
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index b36f074524ad..c533525ca56a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -114,6 +114,9 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
regs->msr &= ~MSR_CE;
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+#ifdef CONFIG_PPC_47x
+ isync();
+#endif
#endif
/*
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index c2c70e1b32cd..50362b6ef6e9 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -38,7 +38,7 @@
#include <asm/vio.h>
#include <asm/mmu.h>
-#define MODULE_VERS "1.8"
+#define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg"
/* #define LPARCFG_DEBUG */
@@ -487,6 +487,14 @@ static void splpar_dispatch_data(struct seq_file *m)
seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
}
+static void parse_em_data(struct seq_file *m)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
+}
+
static int pseries_lparcfg_data(struct seq_file *m, void *v)
{
int partition_potential_processors;
@@ -541,6 +549,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "slb_size=%d\n", mmu_slb_size);
+ parse_em_data(m);
+
return 0;
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8649f536f8df..8043d1b73cf0 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
-#ifndef CONFIG_44x
+#ifdef CONFIG_44x
/* We don't flush the icache on 44x. Those have a virtual icache
* and we don't have access to the virtual address here (it's
* not the page vaddr but where it's mapped in user space). The
@@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
* a change in the address space occurs, before returning to
* user space
*/
+BEGIN_MMU_FTR_SECTION
+ blr
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
+#endif /* CONFIG_44x */
mtctr r4
1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
isync
-#endif /* CONFIG_44x */
blr
+#ifndef CONFIG_BOOKE
/*
* Flush a particular page from the data cache to RAM, identified
* by its physical address. We turn off the MMU so we can just use
@@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
mtmsr r10 /* restore DR */
isync
blr
+#endif /* CONFIG_BOOKE */
/*
* Clear pages using the dcbz instruction, which doesn't cause any
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index a359cb08e900..9577e6f4e3bf 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -74,6 +74,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev->dev.parent = parent;
dev->dev.release = of_release_dev;
dev->dev.archdata.of_node = np;
+ dev->dev.of_node = np;
if (bus_id)
dev_set_name(&dev->dev, "%s", bus_id);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0c0567e58409..88da282047c3 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1097,8 +1097,9 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
if (dev->is_added)
continue;
- /* Setup OF node pointer in archdata */
+ /* Setup OF node pointer in the device */
sd->of_node = pci_device_to_OF_node(dev);
+ dev->dev.of_node = pci_device_to_OF_node(dev);
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392ac63c..bc9f39d2598b 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -101,6 +101,10 @@ EXPORT_SYMBOL(pci_dram_offset);
EXPORT_SYMBOL(start_thread);
EXPORT_SYMBOL(kernel_thread);
+#ifndef CONFIG_BOOKE
+EXPORT_SYMBOL_GPL(cvt_df);
+EXPORT_SYMBOL_GPL(cvt_fd);
+#endif
EXPORT_SYMBOL(giveup_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e4d71ced97ef..9d255b4f0a0e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -371,6 +371,9 @@ int set_dabr(unsigned long dabr)
/* XXX should we have a CPU_FTR_HAS_DABR ? */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DAC1, dabr);
+#ifdef CONFIG_PPC_47x
+ isync();
+#endif
#elif defined(CONFIG_PPC_BOOK3S)
mtspr(SPRN_DABR, dabr);
#endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index ed2cfe17d25e..7a0c0199ea28 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -39,6 +39,109 @@
#include <asm/system.h>
/*
+ * The parameter save area on the stack is used to store arguments being passed
+ * to callee function and is located at fixed offset from stack pointer.
+ */
+#ifdef CONFIG_PPC32
+#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
+#else /* CONFIG_PPC32 */
+#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
+#endif
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define STR(s) #s /* convert to string */
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define GPR_OFFSET_NAME(num) \
+ {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ GPR_OFFSET_NAME(0),
+ GPR_OFFSET_NAME(1),
+ GPR_OFFSET_NAME(2),
+ GPR_OFFSET_NAME(3),
+ GPR_OFFSET_NAME(4),
+ GPR_OFFSET_NAME(5),
+ GPR_OFFSET_NAME(6),
+ GPR_OFFSET_NAME(7),
+ GPR_OFFSET_NAME(8),
+ GPR_OFFSET_NAME(9),
+ GPR_OFFSET_NAME(10),
+ GPR_OFFSET_NAME(11),
+ GPR_OFFSET_NAME(12),
+ GPR_OFFSET_NAME(13),
+ GPR_OFFSET_NAME(14),
+ GPR_OFFSET_NAME(15),
+ GPR_OFFSET_NAME(16),
+ GPR_OFFSET_NAME(17),
+ GPR_OFFSET_NAME(18),
+ GPR_OFFSET_NAME(19),
+ GPR_OFFSET_NAME(20),
+ GPR_OFFSET_NAME(21),
+ GPR_OFFSET_NAME(22),
+ GPR_OFFSET_NAME(23),
+ GPR_OFFSET_NAME(24),
+ GPR_OFFSET_NAME(25),
+ GPR_OFFSET_NAME(26),
+ GPR_OFFSET_NAME(27),
+ GPR_OFFSET_NAME(28),
+ GPR_OFFSET_NAME(29),
+ GPR_OFFSET_NAME(30),
+ GPR_OFFSET_NAME(31),
+ REG_OFFSET_NAME(nip),
+ REG_OFFSET_NAME(msr),
+ REG_OFFSET_NAME(ctr),
+ REG_OFFSET_NAME(link),
+ REG_OFFSET_NAME(xer),
+ REG_OFFSET_NAME(ccr),
+#ifdef CONFIG_PPC64
+ REG_OFFSET_NAME(softe),
+#else
+ REG_OFFSET_NAME(mq),
+#endif
+ REG_OFFSET_NAME(trap),
+ REG_OFFSET_NAME(dar),
+ REG_OFFSET_NAME(dsisr),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
+
+/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 74367841615a..0e1ec6f746f6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -691,10 +691,14 @@ void rtas_os_term(char *str)
{
int status;
- if (panic_timeout)
- return;
-
- if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term"))
+ /*
+ * Firmware with the ibm,extended-os-term property is guaranteed
+ * to always return from an ibm,os-term call. Earlier versions without
+ * this property may terminate the partition which we want to avoid
+ * since it interferes with panic_timeout.
+ */
+ if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
+ RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
return;
snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
@@ -705,8 +709,7 @@ void rtas_os_term(char *str)
} while (rtas_busy_delay(status));
if (status != 0)
- printk(KERN_EMERG "ibm,os-term call failed %d\n",
- status);
+ printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
}
static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 4190eae7850a..e907ca66f75a 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -411,9 +411,9 @@ static void rtas_event_scan(struct work_struct *w)
get_online_cpus();
- cpu = next_cpu(smp_processor_id(), cpu_online_map);
- if (cpu == NR_CPUS) {
- cpu = first_cpu(cpu_online_map);
+ cpu = cpumask_next(smp_processor_id(), cpu_online_mask);
+ if (cpu >= nr_cpu_ids) {
+ cpu = cpumask_first(cpu_online_mask);
if (first_pass) {
first_pass = 0;
@@ -466,8 +466,8 @@ static void start_event_scan(void)
/* Retreive errors from nvram if any */
retreive_nvram_error_log();
- schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work,
- event_scan_delay);
+ schedule_delayed_work_on(cpumask_first(cpu_online_mask),
+ &event_scan_work, event_scan_delay);
}
static int __init rtas_init(void)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 48f0a008b20b..5e4d852f640c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -161,45 +161,44 @@ extern u32 cpu_temp_both(unsigned long cpu);
DEFINE_PER_CPU(unsigned int, cpu_pvr);
#endif
-static int show_cpuinfo(struct seq_file *m, void *v)
+static void show_cpuinfo_summary(struct seq_file *m)
{
- unsigned long cpu_id = (unsigned long)v - 1;
- unsigned int pvr;
- unsigned short maj;
- unsigned short min;
-
- if (cpu_id == NR_CPUS) {
- struct device_node *root;
- const char *model = NULL;
+ struct device_node *root;
+ const char *model = NULL;
#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
- unsigned long bogosum = 0;
- int i;
- for_each_online_cpu(i)
- bogosum += loops_per_jiffy;
- seq_printf(m, "total bogomips\t: %lu.%02lu\n",
- bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
+ unsigned long bogosum = 0;
+ int i;
+ for_each_online_cpu(i)
+ bogosum += loops_per_jiffy;
+ seq_printf(m, "total bogomips\t: %lu.%02lu\n",
+ bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
#endif /* CONFIG_SMP && CONFIG_PPC32 */
- seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
- if (ppc_md.name)
- seq_printf(m, "platform\t: %s\n", ppc_md.name);
- root = of_find_node_by_path("/");
- if (root)
- model = of_get_property(root, "model", NULL);
- if (model)
- seq_printf(m, "model\t\t: %s\n", model);
- of_node_put(root);
-
- if (ppc_md.show_cpuinfo != NULL)
- ppc_md.show_cpuinfo(m);
+ seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
+ if (ppc_md.name)
+ seq_printf(m, "platform\t: %s\n", ppc_md.name);
+ root = of_find_node_by_path("/");
+ if (root)
+ model = of_get_property(root, "model", NULL);
+ if (model)
+ seq_printf(m, "model\t\t: %s\n", model);
+ of_node_put(root);
+
+ if (ppc_md.show_cpuinfo != NULL)
+ ppc_md.show_cpuinfo(m);
#ifdef CONFIG_PPC32
- /* Display the amount of memory */
- seq_printf(m, "Memory\t\t: %d MB\n",
- (unsigned int)(total_memory / (1024 * 1024)));
+ /* Display the amount of memory */
+ seq_printf(m, "Memory\t\t: %d MB\n",
+ (unsigned int)(total_memory / (1024 * 1024)));
#endif
+}
- return 0;
- }
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long cpu_id = (unsigned long)v - 1;
+ unsigned int pvr;
+ unsigned short maj;
+ unsigned short min;
/* We only show online cpus: disable preempt (overzealous, I
* knew) to prevent cpu going down. */
@@ -308,19 +307,28 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#endif
preempt_enable();
+
+ /* If this is the last cpu, print the summary */
+ if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
+ show_cpuinfo_summary(m);
+
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
- unsigned long i = *pos;
-
- return i <= NR_CPUS ? (void *)(i + 1) : NULL;
+ if (*pos == 0) /* just in case, cpu 0 is not the first */
+ *pos = cpumask_first(cpu_online_mask);
+ else
+ *pos = cpumask_next(*pos - 1, cpu_online_mask);
+ if ((*pos) < nr_cpu_ids)
+ return (void *)(unsigned long)(*pos + 1);
+ return NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
- ++*pos;
+ (*pos)++;
return c_start(m, pos);
}
@@ -386,14 +394,14 @@ static void __init cpu_init_thread_core_maps(int tpc)
/**
* setup_cpu_maps - initialize the following cpu maps:
- * cpu_possible_map
- * cpu_present_map
+ * cpu_possible_mask
+ * cpu_present_mask
*
* Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
*
* We do not initialize the online map here; cpus set their own bits in
- * cpu_online_map as they come up.
+ * cpu_online_mask as they come up.
*
* This function is valid only for Open Firmware systems. finish_device_tree
* must be called before using this.
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c2ee14498077..bf366167d369 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -59,8 +59,8 @@
struct thread_info *secondary_ti;
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
-DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
@@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(boot_cpuid);
cpu_callin_map[boot_cpuid] = 1;
+ for_each_possible_cpu(cpu) {
+ zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+ zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+ }
+
+ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
+ cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+
if (smp_ops)
if (smp_ops->probe)
max_cpus = smp_ops->probe();
@@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __devinit smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != boot_cpuid);
-
- set_cpu_online(boot_cpuid, true);
- cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
- cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
#ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current;
#endif
@@ -313,7 +319,7 @@ int generic_cpu_disable(void)
set_cpu_online(cpu, false);
#ifdef CONFIG_PPC64
vdso_data->processorCount--;
- fixup_irqs(cpu_online_map);
+ fixup_irqs(cpu_online_mask);
#endif
return 0;
}
@@ -333,7 +339,7 @@ int generic_cpu_enable(unsigned int cpu)
cpu_relax();
#ifdef CONFIG_PPC64
- fixup_irqs(cpu_online_map);
+ fixup_irqs(cpu_online_mask);
/* counter the irq disable in fixup_irqs */
local_irq_enable();
#endif
@@ -462,7 +468,7 @@ out:
return id;
}
-/* Must be called when no change can occur to cpu_present_map,
+/* Must be called when no change can occur to cpu_present_mask,
* i.e. during cpu online or offline.
*/
static struct device_node *cpu_to_l2cache(int cpu)
@@ -495,6 +501,14 @@ int __devinit start_secondary(void *unused)
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
+
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ /* Clear any pending timer interrupts */
+ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
+
+ /* Enable decrementer interrupt */
+ mtspr(SPRN_TCR, TCR_DIE);
+#endif
set_dec(tb_ticks_per_jiffy);
preempt_disable();
cpu_callin_map[cpu] = 1;
@@ -517,15 +531,15 @@ int __devinit start_secondary(void *unused)
for (i = 0; i < threads_per_core; i++) {
if (cpu_is_offline(base + i))
continue;
- cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
- cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
+ cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
+ cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
/* cpu_core_map should be a superset of
* cpu_sibling_map even if we don't have cache
* information, so update the former here, too.
*/
- cpu_set(cpu, per_cpu(cpu_core_map, base +i));
- cpu_set(base + i, per_cpu(cpu_core_map, cpu));
+ cpumask_set_cpu(cpu, cpu_core_mask(base + i));
+ cpumask_set_cpu(base + i, cpu_core_mask(cpu));
}
l2_cache = cpu_to_l2cache(cpu);
for_each_online_cpu(i) {
@@ -533,8 +547,8 @@ int __devinit start_secondary(void *unused)
if (!np)
continue;
if (np == l2_cache) {
- cpu_set(cpu, per_cpu(cpu_core_map, i));
- cpu_set(i, per_cpu(cpu_core_map, cpu));
+ cpumask_set_cpu(cpu, cpu_core_mask(i));
+ cpumask_set_cpu(i, cpu_core_mask(cpu));
}
of_node_put(np);
}
@@ -554,19 +568,22 @@ int setup_profiling_timer(unsigned int multiplier)
void __init smp_cpus_done(unsigned int max_cpus)
{
- cpumask_t old_mask;
+ cpumask_var_t old_mask;
/* We want the setup_cpu() here to be called from CPU 0, but our
* init thread may have been "borrowed" by another CPU in the meantime
* se we pin us down to CPU 0 for a short while
*/
- old_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
+ alloc_cpumask_var(&old_mask, GFP_NOWAIT);
+ cpumask_copy(old_mask, &current->cpus_allowed);
+ set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
if (smp_ops && smp_ops->setup_cpu)
smp_ops->setup_cpu(boot_cpuid);
- set_cpus_allowed(current, old_mask);
+ set_cpus_allowed_ptr(current, old_mask);
+
+ free_cpumask_var(old_mask);
snapshot_timebases();
@@ -591,10 +608,10 @@ int __cpu_disable(void)
/* Update sibling maps */
base = cpu_first_thread_in_core(cpu);
for (i = 0; i < threads_per_core; i++) {
- cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
- cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
- cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
- cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
+ cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
+ cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
+ cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
+ cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
}
l2_cache = cpu_to_l2cache(cpu);
@@ -603,8 +620,8 @@ int __cpu_disable(void)
if (!np)
continue;
if (np == l2_cache) {
- cpu_clear(cpu, per_cpu(cpu_core_map, i));
- cpu_clear(i, per_cpu(cpu_core_map, cpu));
+ cpumask_clear_cpu(cpu, cpu_core_mask(i));
+ cpumask_clear_cpu(i, cpu_core_mask(cpu));
}
of_node_put(np);
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 29d128eb6c43..3031fc712ad0 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -380,6 +380,46 @@ int machine_check_440A(struct pt_regs *regs)
}
return 0;
}
+
+int machine_check_47x(struct pt_regs *regs)
+{
+ unsigned long reason = get_mc_reason(regs);
+ u32 mcsr;
+
+ printk(KERN_ERR "Machine check in kernel mode.\n");
+ if (reason & ESR_IMCP) {
+ printk(KERN_ERR
+ "Instruction Synchronous Machine Check exception\n");
+ mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+ return 0;
+ }
+ mcsr = mfspr(SPRN_MCSR);
+ if (mcsr & MCSR_IB)
+ printk(KERN_ERR "Instruction Read PLB Error\n");
+ if (mcsr & MCSR_DRB)
+ printk(KERN_ERR "Data Read PLB Error\n");
+ if (mcsr & MCSR_DWB)
+ printk(KERN_ERR "Data Write PLB Error\n");
+ if (mcsr & MCSR_TLBP)
+ printk(KERN_ERR "TLB Parity Error\n");
+ if (mcsr & MCSR_ICP) {
+ flush_instruction_cache();
+ printk(KERN_ERR "I-Cache Parity Error\n");
+ }
+ if (mcsr & MCSR_DCSP)
+ printk(KERN_ERR "D-Cache Search Parity Error\n");
+ if (mcsr & PPC47x_MCSR_GPR)
+ printk(KERN_ERR "GPR Parity Error\n");
+ if (mcsr & PPC47x_MCSR_FPR)
+ printk(KERN_ERR "FPR Parity Error\n");
+ if (mcsr & PPC47x_MCSR_IPR)
+ printk(KERN_ERR "Machine Check exception is imprecise\n");
+
+ /* Clear MCSR */
+ mtspr(SPRN_MCSR, mcsr);
+
+ return 0;
+}
#elif defined(CONFIG_E500)
int machine_check_e500(struct pt_regs *regs)
{
@@ -815,12 +855,15 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
}
if (reason & REASON_TRAP) {
+ /* Debugger is first in line to stop recursive faults in
+ * rcu_lock, notify_die, or atomic_notifier_call_chain */
+ if (debugger_bpt(regs))
+ return;
+
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
return;
- if (debugger_bpt(regs))
- return;
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 82237176a2a3..737c260f76c4 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -645,8 +645,10 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
found = 1;
break;
}
- if (!found)
+ if (!found) {
+ spin_unlock_irqrestore(&vio_cmo.lock, flags);
return;
+ }
/* Increase/decrease in desired device entitlement */
if (desired >= viodev->cmo.desired) {
@@ -958,9 +960,12 @@ viodev_cmo_rd_attr(allocated);
static ssize_t name_show(struct device *, struct device_attribute *, char *);
static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
static struct device_attribute vio_cmo_dev_attrs[] = {
__ATTR_RO(name),
__ATTR_RO(devspec),
+ __ATTR_RO(modalias),
__ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
viodev_cmo_desired_show, viodev_cmo_desired_set),
__ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
@@ -1230,7 +1235,8 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
if (unit_address != NULL)
viodev->unit_address = *unit_address;
}
- viodev->dev.archdata.of_node = of_node_get(of_node);
+ viodev->dev.of_node = of_node_get(of_node);
+ viodev->dev.archdata.of_node = viodev->dev.of_node;
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_set_dma_ops(viodev);
@@ -1320,9 +1326,27 @@ static ssize_t devspec_show(struct device *dev,
return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
}
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const struct vio_dev *vio_dev = to_vio_dev(dev);
+ struct device_node *dn;
+ const char *cp;
+
+ dn = dev->archdata.of_node;
+ if (!dn)
+ return -ENODEV;
+ cp = of_get_property(dn, "compatible", NULL);
+ if (!cp)
+ return -ENODEV;
+
+ return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
+}
+
static struct device_attribute vio_dev_attrs[] = {
__ATTR_RO(name),
__ATTR_RO(devspec),
+ __ATTR_RO(modalias),
__ATTR_NULL
};
@@ -1358,6 +1382,29 @@ static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static int vio_pm_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm && pm->suspend)
+ return pm->suspend(dev);
+ return 0;
+}
+
+static int vio_pm_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm && pm->resume)
+ return pm->resume(dev);
+ return 0;
+}
+
+const struct dev_pm_ops vio_dev_pm_ops = {
+ .suspend = vio_pm_suspend,
+ .resume = vio_pm_resume,
+};
+
static struct bus_type vio_bus_type = {
.name = "vio",
.dev_attrs = vio_dev_attrs,
@@ -1365,6 +1412,7 @@ static struct bus_type vio_bus_type = {
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
+ .pm = &vio_dev_pm_ops,
};
/**
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index dcd01c82e701..8a0deefac08d 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -223,19 +223,17 @@ SECTIONS
#endif
/* The initial task and kernel stack */
- .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
- INIT_TASK_DATA(THREAD_SIZE)
- }
+ INIT_TASK_DATA_SECTION(THREAD_SIZE)
- .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+ .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
PAGE_ALIGNED_DATA(PAGE_SIZE)
}
- .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
+ .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
}
- .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+ .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
READ_MOSTLY_DATA(L1_CACHE_BYTES)
}
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 689a57c2ac80..73c0a3f64ed1 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -147,7 +147,7 @@ static int __init kvmppc_44x_init(void)
if (r)
return r;
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE);
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
}
static void __exit kvmppc_44x_exit(void)
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 60624cc9f4d4..b7baff78f90c 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -22,12 +22,34 @@ config KVM
select ANON_INODES
select KVM_MMIO
+config KVM_BOOK3S_HANDLER
+ bool
+
+config KVM_BOOK3S_32_HANDLER
+ bool
+ select KVM_BOOK3S_HANDLER
+
config KVM_BOOK3S_64_HANDLER
bool
+ select KVM_BOOK3S_HANDLER
+
+config KVM_BOOK3S_32
+ tristate "KVM support for PowerPC book3s_32 processors"
+ depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
+ select KVM
+ select KVM_BOOK3S_32_HANDLER
+ ---help---
+ Support running unmodified book3s_32 guest kernels
+ in virtual machines on book3s_32 host processors.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
config KVM_BOOK3S_64
tristate "KVM support for PowerPC book3s_64 processors"
- depends on EXPERIMENTAL && PPC64
+ depends on EXPERIMENTAL && PPC_BOOK3S_64
select KVM
select KVM_BOOK3S_64_HANDLER
---help---
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 56484d652377..ff436066bf77 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -14,7 +14,7 @@ CFLAGS_emulate.o := -I.
common-objs-y += powerpc.o emulate.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
-obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o
+obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
AFLAGS_booke_interrupts.o := -I$(obj)
@@ -40,17 +40,31 @@ kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
kvm-book3s_64-objs := \
$(common-objs-y) \
+ fpu.o \
+ book3s_paired_singles.o \
book3s.o \
- book3s_64_emulate.o \
- book3s_64_interrupts.o \
+ book3s_emulate.o \
+ book3s_interrupts.o \
book3s_64_mmu_host.o \
book3s_64_mmu.o \
book3s_32_mmu.o
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
+kvm-book3s_32-objs := \
+ $(common-objs-y) \
+ fpu.o \
+ book3s_paired_singles.o \
+ book3s.o \
+ book3s_emulate.o \
+ book3s_interrupts.o \
+ book3s_32_mmu_host.o \
+ book3s_32_mmu.o
+kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
+
kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
obj-$(CONFIG_KVM_440) += kvm.o
obj-$(CONFIG_KVM_E500) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
+obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 604af29b71ed..11f226ff4468 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -16,6 +16,7 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -29,6 +30,7 @@
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
+#include <linux/highmem.h>
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -36,7 +38,15 @@
/* #define EXIT_DEBUG_SIMPLE */
/* #define DEBUG_EXT */
-static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr);
+
+/* Some compatibility defines */
+#ifdef CONFIG_PPC_BOOK3S_32
+#define MSR_USER32 MSR_USER
+#define MSR_USER64 MSR_USER
+#define HW_PAGE_SIZE PAGE_SIZE
+#endif
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exits", VCPU_STAT(sum_exits) },
@@ -69,18 +79,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
- memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
- get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
+ to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
+#endif
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
- memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
- memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
- to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
+ to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
+#endif
kvmppc_giveup_ext(vcpu, MSR_FP);
kvmppc_giveup_ext(vcpu, MSR_VEC);
@@ -131,18 +149,22 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
}
- if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
- (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
+ if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+ (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
kvmppc_mmu_flush_segments(vcpu);
- kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}
+
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
}
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
- vcpu->arch.srr0 = vcpu->arch.pc;
+ vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
vcpu->arch.srr1 = vcpu->arch.msr | flags;
- vcpu->arch.pc = to_book3s(vcpu)->hior + vec;
+ kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
@@ -218,6 +240,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
}
+void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+{
+ kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
+}
+
int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
{
int deliver = 1;
@@ -302,7 +330,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
#endif
priority = __ffs(*pending);
- while (priority <= (sizeof(unsigned int) * 8)) {
+ while (priority < BOOK3S_IRQPRIO_MAX) {
if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
(priority != BOOK3S_IRQPRIO_DECREMENTER)) {
/* DEC interrupts get cleared by mtdec */
@@ -318,13 +346,18 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
{
+ u32 host_pvr;
+
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
vcpu->arch.pvr = pvr;
+#ifdef CONFIG_PPC_BOOK3S_64
if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
kvmppc_mmu_book3s_64_init(vcpu);
to_book3s(vcpu)->hior = 0xfff00000;
to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
- } else {
+ } else
+#endif
+ {
kvmppc_mmu_book3s_32_init(vcpu);
to_book3s(vcpu)->hior = 0;
to_book3s(vcpu)->msr_mask = 0xffffffffULL;
@@ -337,6 +370,32 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
!strcmp(cur_cpu_spec->platform, "ppc970"))
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+ /* Cell performs badly if MSR_FEx are set. So let's hope nobody
+ really needs them in a VM on Cell and force disable them. */
+ if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
+ to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* 32 bit Book3S always has 32 byte dcbz */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+#endif
+
+ /* On some CPUs we can execute paired single operations natively */
+ asm ( "mfpvr %0" : "=r"(host_pvr));
+ switch (host_pvr) {
+ case 0x00080200: /* lonestar 2.0 */
+ case 0x00088202: /* lonestar 2.2 */
+ case 0x70000100: /* gekko 1.0 */
+ case 0x00080100: /* gekko 2.0 */
+ case 0x00083203: /* gekko 2.3a */
+ case 0x00083213: /* gekko 2.3b */
+ case 0x00083204: /* gekko 2.4 */
+ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
+ case 0x00087200: /* broadway */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
+ /* Enable HID2.PSE - in case we need it later */
+ mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
+ }
}
/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
@@ -350,34 +409,29 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
*/
static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
{
- bool touched = false;
- hva_t hpage;
+ struct page *hpage;
+ u64 hpage_offset;
u32 *page;
int i;
- hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
- if (kvm_is_error_hva(hpage))
+ hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
+ if (is_error_page(hpage))
return;
- hpage |= pte->raddr & ~PAGE_MASK;
- hpage &= ~0xFFFULL;
-
- page = vmalloc(HW_PAGE_SIZE);
-
- if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE))
- goto out;
+ hpage_offset = pte->raddr & ~PAGE_MASK;
+ hpage_offset &= ~0xFFFULL;
+ hpage_offset /= 4;
- for (i=0; i < HW_PAGE_SIZE / 4; i++)
- if ((page[i] & 0xff0007ff) == INS_DCBZ) {
- page[i] &= 0xfffffff7; // reserved instruction, so we trap
- touched = true;
- }
+ get_page(hpage);
+ page = kmap_atomic(hpage, KM_USER0);
- if (touched)
- copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE);
+ /* patch dcbz into reserved instruction, so we trap */
+ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
+ if ((page[i] & 0xff0007ff) == INS_DCBZ)
+ page[i] &= 0xfffffff7;
-out:
- vfree(page);
+ kunmap_atomic(page, KM_USER0);
+ put_page(hpage);
}
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
@@ -391,15 +445,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
} else {
pte->eaddr = eaddr;
pte->raddr = eaddr & 0xffffffff;
- pte->vpage = eaddr >> 12;
- switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
- case 0:
- pte->vpage |= VSID_REAL;
- case MSR_DR:
- pte->vpage |= VSID_REAL_DR;
- case MSR_IR:
- pte->vpage |= VSID_REAL_IR;
- }
+ pte->vpage = VSID_REAL | eaddr >> 12;
pte->may_read = true;
pte->may_write = true;
pte->may_execute = true;
@@ -434,55 +480,55 @@ err:
return kvmppc_bad_hva();
}
-int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr)
+int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
+ bool data)
{
struct kvmppc_pte pte;
- hva_t hva = eaddr;
vcpu->stat.st++;
- if (kvmppc_xlate(vcpu, eaddr, false, &pte))
- goto err;
+ if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
+ return -ENOENT;
- hva = kvmppc_pte_to_hva(vcpu, &pte, false);
- if (kvm_is_error_hva(hva))
- goto err;
+ *eaddr = pte.raddr;
- if (copy_to_user((void __user *)hva, ptr, size)) {
- printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva);
- goto err;
- }
+ if (!pte.may_write)
+ return -EPERM;
- return 0;
+ if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
+ return EMULATE_DO_MMIO;
-err:
- return -ENOENT;
+ return EMULATE_DONE;
}
-int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr,
+int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data)
{
struct kvmppc_pte pte;
- hva_t hva = eaddr;
+ hva_t hva = *eaddr;
vcpu->stat.ld++;
- if (kvmppc_xlate(vcpu, eaddr, data, &pte))
- goto err;
+ if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
+ goto nopte;
+
+ *eaddr = pte.raddr;
hva = kvmppc_pte_to_hva(vcpu, &pte, true);
if (kvm_is_error_hva(hva))
- goto err;
+ goto mmio;
if (copy_from_user(ptr, (void __user *)hva, size)) {
printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
- goto err;
+ goto mmio;
}
- return 0;
+ return EMULATE_DONE;
-err:
+nopte:
return -ENOENT;
+mmio:
+ return EMULATE_DO_MMIO;
}
static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -499,12 +545,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
int page_found = 0;
struct kvmppc_pte pte;
bool is_mmio = false;
+ bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
+ bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
+ u64 vsid;
- if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) {
- relocated = (vcpu->arch.msr & MSR_DR);
- } else {
- relocated = (vcpu->arch.msr & MSR_IR);
- }
+ relocated = data ? dr : ir;
/* Resolve real address if translation turned on */
if (relocated) {
@@ -516,14 +561,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.raddr = eaddr & 0xffffffff;
pte.eaddr = eaddr;
pte.vpage = eaddr >> 12;
- switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
- case 0:
- pte.vpage |= VSID_REAL;
- case MSR_DR:
- pte.vpage |= VSID_REAL_DR;
- case MSR_IR:
- pte.vpage |= VSID_REAL_IR;
- }
+ }
+
+ switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
+ break;
+ case MSR_DR:
+ case MSR_IR:
+ vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+ if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
+ pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+ else
+ pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+ pte.vpage |= vsid;
+
+ if (vsid == -1)
+ page_found = -EINVAL;
+ break;
}
if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -538,20 +594,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */
- vcpu->arch.dear = vcpu->arch.fault_dear;
- to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
- vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
+ vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
+ to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) {
/* Storage protection */
- vcpu->arch.dear = vcpu->arch.fault_dear;
- to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
+ vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
+ to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
- vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
+ vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) {
/* Page not found in guest SLB */
- vcpu->arch.dear = vcpu->arch.fault_dear;
+ vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
} else if (!is_mmio &&
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -583,11 +639,13 @@ static inline int get_fpr_index(int i)
}
/* Give up external provider (FPU, Altivec, VSX) */
-static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
+void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
{
struct thread_struct *t = &current->thread;
u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
u64 *thread_fpr = (u64*)t->fpr;
int i;
@@ -629,21 +687,65 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
kvmppc_recalc_shadow_msr(vcpu);
}
+static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
+{
+ ulong srr0 = kvmppc_get_pc(vcpu);
+ u32 last_inst = kvmppc_get_last_inst(vcpu);
+ int ret;
+
+ ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
+ if (ret == -ENOENT) {
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
+ return EMULATE_AGAIN;
+ }
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
+{
+
+ /* Need to do paired single emulation? */
+ if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
+ return EMULATE_DONE;
+
+ /* Read out the instruction */
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
+ /* Need to emulate */
+ return EMULATE_FAIL;
+
+ return EMULATE_AGAIN;
+}
+
/* Handle external providers (FPU, Altivec, VSX) */
static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
ulong msr)
{
struct thread_struct *t = &current->thread;
u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
u64 *thread_fpr = (u64*)t->fpr;
int i;
+ /* When we have paired singles, we emulate in software */
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
+ return RESUME_GUEST;
+
if (!(vcpu->arch.msr & msr)) {
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
return RESUME_GUEST;
}
+ /* We already own the ext */
+ if (vcpu->arch.guest_owned_ext & msr) {
+ return RESUME_GUEST;
+ }
+
#ifdef DEBUG_EXT
printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
#endif
@@ -696,21 +798,33 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1;
#ifdef EXIT_DEBUG
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
- exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
- kvmppc_get_dec(vcpu), vcpu->arch.msr);
+ exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
+ kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
#elif defined (EXIT_DEBUG_SIMPLE)
if ((exit_nr != 0x900) && (exit_nr != 0x500))
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
- exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
+ exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
vcpu->arch.msr);
#endif
kvm_resched(vcpu);
switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE:
vcpu->stat.pf_instruc++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
+ == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
/* only care about PTEG not found errors, but leave NX alone */
- if (vcpu->arch.shadow_srr1 & 0x40000000) {
- r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
+ if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
+ r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -719,37 +833,52 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* so we can't use the NX bit inside the guest. Let's cross our fingers,
* that no guest that needs the dcbz hack does NX.
*/
- kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
+ r = RESUME_GUEST;
} else {
- vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000;
+ vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
}
break;
case BOOK3S_INTERRUPT_DATA_STORAGE:
+ {
+ ulong dar = kvmppc_get_fault_dar(vcpu);
vcpu->stat.pf_storage++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, dar);
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
/* The only case we need to handle is missing shadow PTEs */
- if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) {
- r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr);
+ if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
+ r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
} else {
- vcpu->arch.dear = vcpu->arch.fault_dear;
- to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
+ vcpu->arch.dear = dar;
+ to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
r = RESUME_GUEST;
}
break;
+ }
case BOOK3S_INTERRUPT_DATA_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) {
- vcpu->arch.dear = vcpu->arch.fault_dear;
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
+ vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_SEGMENT);
}
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_INST_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) {
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_INST_SEGMENT);
}
@@ -764,18 +893,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
+ case BOOK3S_INTERRUPT_PERFMON:
+ r = RESUME_GUEST;
+ break;
case BOOK3S_INTERRUPT_PROGRAM:
{
enum emulation_result er;
ulong flags;
- flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
+program_interrupt:
+ flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
if (vcpu->arch.msr & MSR_PR) {
#ifdef EXIT_DEBUG
- printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst);
+ printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
#endif
- if ((vcpu->arch.last_inst & 0xff0007ff) !=
+ if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
(INS_DCBZ & 0xfffffff7)) {
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
@@ -789,33 +922,80 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case EMULATE_DONE:
r = RESUME_GUEST_NV;
break;
+ case EMULATE_AGAIN:
+ r = RESUME_GUEST;
+ break;
case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
- __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+ __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ r = RESUME_HOST_NV;
+ break;
default:
BUG();
}
break;
}
case BOOK3S_INTERRUPT_SYSCALL:
-#ifdef EXIT_DEBUG
- printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
-#endif
- vcpu->stat.syscall_exits++;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
+ // XXX make user settable
+ if (vcpu->arch.osi_enabled &&
+ (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
+ (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
+ u64 *gprs = run->osi.gprs;
+ int i;
+
+ run->exit_reason = KVM_EXIT_OSI;
+ for (i = 0; i < 32; i++)
+ gprs[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu->arch.osi_needed = 1;
+ r = RESUME_HOST_NV;
+
+ } else {
+ vcpu->stat.syscall_exits++;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
break;
case BOOK3S_INTERRUPT_FP_UNAVAIL:
- r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP);
- break;
case BOOK3S_INTERRUPT_ALTIVEC:
- r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC);
- break;
case BOOK3S_INTERRUPT_VSX:
- r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX);
+ {
+ int ext_msr = 0;
+
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
+ case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
+ case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
+ }
+
+ switch (kvmppc_check_ext(vcpu, exit_nr)) {
+ case EMULATE_DONE:
+ /* everything ok - let's enable the ext */
+ r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
+ break;
+ case EMULATE_FAIL:
+ /* we need to emulate this instruction */
+ goto program_interrupt;
+ break;
+ default:
+ /* nothing to worry about - go again */
+ break;
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_ALIGNMENT:
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
+ to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ vcpu->arch.dear = kvmppc_alignment_dar(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ }
+ r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_MACHINE_CHECK:
case BOOK3S_INTERRUPT_TRACE:
@@ -825,7 +1005,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
default:
/* Ugh - bork here! What did we get? */
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
- exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1);
+ exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
r = RESUME_HOST;
BUG();
break;
@@ -852,7 +1032,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
#ifdef EXIT_DEBUG
- printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r);
+ printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
#endif
return r;
@@ -867,10 +1047,12 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
- regs->pc = vcpu->arch.pc;
+ vcpu_load(vcpu);
+
+ regs->pc = kvmppc_get_pc(vcpu);
regs->cr = kvmppc_get_cr(vcpu);
- regs->ctr = vcpu->arch.ctr;
- regs->lr = vcpu->arch.lr;
+ regs->ctr = kvmppc_get_ctr(vcpu);
+ regs->lr = kvmppc_get_lr(vcpu);
regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.msr;
regs->srr0 = vcpu->arch.srr0;
@@ -887,6 +1069,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu_put(vcpu);
+
return 0;
}
@@ -894,10 +1078,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
- vcpu->arch.pc = regs->pc;
+ vcpu_load(vcpu);
+
+ kvmppc_set_pc(vcpu, regs->pc);
kvmppc_set_cr(vcpu, regs->cr);
- vcpu->arch.ctr = regs->ctr;
- vcpu->arch.lr = regs->lr;
+ kvmppc_set_ctr(vcpu, regs->ctr);
+ kvmppc_set_lr(vcpu, regs->lr);
kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
vcpu->arch.srr0 = regs->srr0;
@@ -913,6 +1099,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
+ vcpu_put(vcpu);
+
return 0;
}
@@ -1042,24 +1230,33 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_book3s *vcpu_book3s;
struct kvm_vcpu *vcpu;
- int err;
+ int err = -ENOMEM;
- vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO,
- get_order(sizeof(struct kvmppc_vcpu_book3s)));
- if (!vcpu_book3s) {
- err = -ENOMEM;
+ vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
+ if (!vcpu_book3s)
goto out;
- }
+
+ memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
+
+ vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
+ kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
+ if (!vcpu_book3s->shadow_vcpu)
+ goto free_vcpu;
vcpu = &vcpu_book3s->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
- goto free_vcpu;
+ goto free_shadow_vcpu;
vcpu->arch.host_retip = kvm_return_point;
vcpu->arch.host_msr = mfmsr();
+#ifdef CONFIG_PPC_BOOK3S_64
/* default to book3s_64 (970fx) */
vcpu->arch.pvr = 0x3C0301;
+#else
+ /* default to book3s_32 (750) */
+ vcpu->arch.pvr = 0x84202;
+#endif
kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
vcpu_book3s->slb_nr = 64;
@@ -1067,23 +1264,24 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+#ifdef CONFIG_PPC_BOOK3S_64
vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
+#else
+ vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
+#endif
vcpu->arch.shadow_msr = MSR_USER64;
- err = __init_new_context();
+ err = kvmppc_mmu_init(vcpu);
if (err < 0)
- goto free_vcpu;
- vcpu_book3s->context_id = err;
-
- vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
- vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS;
- vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+ goto free_shadow_vcpu;
return vcpu;
+free_shadow_vcpu:
+ kfree(vcpu_book3s->shadow_vcpu);
free_vcpu:
- free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
+ vfree(vcpu_book3s);
out:
return ERR_PTR(err);
}
@@ -1092,9 +1290,9 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
- __destroy_context(vcpu_book3s->context_id);
kvm_vcpu_uninit(vcpu);
- free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
+ kfree(vcpu_book3s->shadow_vcpu);
+ vfree(vcpu_book3s);
}
extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -1102,8 +1300,12 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
struct thread_struct ext_bkp;
+#ifdef CONFIG_ALTIVEC
bool save_vec = current->thread.used_vr;
+#endif
+#ifdef CONFIG_VSX
bool save_vsx = current->thread.used_vsr;
+#endif
ulong ext_msr;
/* No need to go into the guest when all we do is going out */
@@ -1144,6 +1346,10 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* XXX we get called with irq disabled - change that! */
local_irq_enable();
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
+
ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
local_irq_disable();
@@ -1179,7 +1385,8 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
static int kvmppc_book3s_init(void)
{
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE);
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
+ THIS_MODULE);
}
static void kvmppc_book3s_exit(void)
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index faf99f20d993..0b10503c8a4a 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -37,7 +37,7 @@
#define dprintk(X...) do { } while(0)
#endif
-#ifdef DEBUG_PTE
+#ifdef DEBUG_MMU_PTE
#define dprintk_pte(X...) printk(KERN_INFO X)
#else
#define dprintk_pte(X...) do { } while(0)
@@ -45,6 +45,9 @@
#define PTEG_FLAG_ACCESSED 0x00000100
#define PTEG_FLAG_DIRTY 0x00000080
+#ifndef SID_SHIFT
+#define SID_SHIFT 28
+#endif
static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
{
@@ -57,6 +60,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data);
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
+ u64 *vsid);
static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
{
@@ -66,13 +71,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e
static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
bool data)
{
- struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr);
+ u64 vsid;
struct kvmppc_pte pte;
if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
return pte.vpage;
- return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16);
+ kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+ return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
}
static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
@@ -142,8 +148,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
bat->bepi_mask);
}
if ((eaddr & bat->bepi_mask) == bat->bepi) {
+ u64 vsid;
+ kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
+ eaddr >> SID_SHIFT, &vsid);
+ vsid <<= 16;
+ pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
+
pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
- pte->vpage = (eaddr >> 12) | VSID_BAT;
pte->may_read = bat->pp;
pte->may_write = bat->pp > 1;
pte->may_execute = true;
@@ -172,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_sr *sre;
hva_t ptegp;
u32 pteg[16];
- u64 ptem = 0;
+ u32 ptem = 0;
int i;
int found = 0;
@@ -302,6 +313,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
/* And then put in the new SR */
sre->raw = value;
sre->vsid = (value & 0x0fffffff);
+ sre->valid = (value & 0x80000000) ? false : true;
sre->Ks = (value & 0x40000000) ? true : false;
sre->Kp = (value & 0x20000000) ? true : false;
sre->nx = (value & 0x10000000) ? true : false;
@@ -312,36 +324,48 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
{
- kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
}
-static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
+ ulong ea = esid << SID_SHIFT;
+ struct kvmppc_sr *sr;
+ u64 gvsid = esid;
+
+ if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ sr = find_sr(to_book3s(vcpu), ea);
+ if (sr->valid)
+ gvsid = sr->vsid;
+ }
+
/* In case we only have one of MSR_IR or MSR_DR set, let's put
that in the real-mode context (and hope RM doesn't access
high memory) */
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = (VSID_REAL >> 16) | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = (VSID_REAL_IR >> 16) | esid;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- *vsid = (VSID_REAL_DR >> 16) | esid;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
- {
- ulong ea;
- ea = esid << SID_SHIFT;
- *vsid = find_sr(to_book3s(vcpu), ea)->vsid;
+ if (!sr->valid)
+ return -1;
+
+ *vsid = sr->vsid;
break;
- }
default:
BUG();
}
+ if (vcpu->arch.msr & MSR_PR)
+ *vsid |= VSID_PR;
+
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
new file mode 100644
index 000000000000..0bb66005338f
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash32.h>
+#include <asm/machdep.h>
+#include <asm/mmu_context.h>
+#include <asm/hw_irq.h>
+
+/* #define DEBUG_MMU */
+/* #define DEBUG_SR */
+
+#ifdef DEBUG_MMU
+#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
+#else
+#define dprintk_mmu(a, ...) do { } while(0)
+#endif
+
+#ifdef DEBUG_SR
+#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
+#else
+#define dprintk_sr(a, ...) do { } while(0)
+#endif
+
+#if PAGE_SHIFT != 12
+#error Unknown page size
+#endif
+
+#ifdef CONFIG_SMP
+#error XXX need to grab mmu_hash_lock
+#endif
+
+#ifdef CONFIG_PTE_64BIT
+#error Only 32 bit pages are supported for now
+#endif
+
+static ulong htab;
+static u32 htabmask;
+
+static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
+{
+ volatile u32 *pteg;
+
+ dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n",
+ pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+
+ pteg = (u32*)pte->slot;
+
+ pteg[0] = 0;
+ asm volatile ("sync");
+ asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
+ asm volatile ("sync");
+ asm volatile ("tlbsync");
+
+ pte->host_va = 0;
+
+ if (pte->pte.may_write)
+ kvm_release_pfn_dirty(pte->pfn);
+ else
+ kvm_release_pfn_clean(pte->pfn);
+}
+
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
+ vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ guest_ea &= ea_mask;
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.eaddr & ea_mask) == guest_ea) {
+ invalidate_pte(vcpu, pte);
+ }
+ }
+
+ /* Doing a complete flush -> start from scratch */
+ if (!ea_mask)
+ vcpu->arch.hpte_cache_offset = 0;
+}
+
+void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
+ vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ guest_vp &= vp_mask;
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.vpage & vp_mask) == guest_vp) {
+ invalidate_pte(vcpu, pte);
+ }
+ }
+}
+
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
+ vcpu->arch.hpte_cache_offset, pa_start, pa_end);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.raddr >= pa_start) &&
+ (pte->pte.raddr < pa_end)) {
+ invalidate_pte(vcpu, pte);
+ }
+ }
+}
+
+struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
+{
+ int i;
+ u64 guest_vp;
+
+ guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
+ for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if (pte->pte.vpage == guest_vp)
+ return &pte->pte;
+ }
+
+ return NULL;
+}
+
+static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ return vcpu->arch.hpte_cache_offset++;
+}
+
+/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
+ * a hash, so we don't waste cycles on looping */
+static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
+}
+
+
+static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ struct kvmppc_sid_map *map;
+ u16 sid_map_mask;
+
+ if (vcpu->arch.msr & MSR_PR)
+ gvsid |= VSID_PR;
+
+ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
+ map = &to_book3s(vcpu)->sid_map[sid_map_mask];
+ if (map->guest_vsid == gvsid) {
+ dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
+ gvsid, map->host_vsid);
+ return map;
+ }
+
+ map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
+ if (map->guest_vsid == gvsid) {
+ dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
+ gvsid, map->host_vsid);
+ return map;
+ }
+
+ dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
+ return NULL;
+}
+
+static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
+ bool primary)
+{
+ u32 page, hash;
+ ulong pteg = htab;
+
+ page = (eaddr & ~ESID_MASK) >> 12;
+
+ hash = ((vsid ^ page) << 6);
+ if (!primary)
+ hash = ~hash;
+
+ hash &= htabmask;
+
+ pteg |= hash;
+
+ dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
+ htab, hash, htabmask, pteg);
+
+ return (u32*)pteg;
+}
+
+extern char etext[];
+
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
+{
+ pfn_t hpaddr;
+ u64 va;
+ u64 vsid;
+ struct kvmppc_sid_map *map;
+ volatile u32 *pteg;
+ u32 eaddr = orig_pte->eaddr;
+ u32 pteg0, pteg1;
+ register int rr = 0;
+ bool primary = false;
+ bool evict = false;
+ int hpte_id;
+ struct hpte_cache *pte;
+
+ /* Get host physical address for gpa */
+ hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+ if (kvm_is_error_hva(hpaddr)) {
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
+ orig_pte->eaddr);
+ return -EINVAL;
+ }
+ hpaddr <<= PAGE_SHIFT;
+
+ /* and write the mapping ea -> hpa into the pt */
+ vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
+ map = find_sid_vsid(vcpu, vsid);
+ if (!map) {
+ kvmppc_mmu_map_segment(vcpu, eaddr);
+ map = find_sid_vsid(vcpu, vsid);
+ }
+ BUG_ON(!map);
+
+ vsid = map->host_vsid;
+ va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
+
+next_pteg:
+ if (rr == 16) {
+ primary = !primary;
+ evict = true;
+ rr = 0;
+ }
+
+ pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
+
+ /* not evicting yet */
+ if (!evict && (pteg[rr] & PTE_V)) {
+ rr += 2;
+ goto next_pteg;
+ }
+
+ dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
+
+ pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
+ (primary ? 0 : PTE_SEC);
+ pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
+
+ if (orig_pte->may_write) {
+ pteg1 |= PP_RWRW;
+ mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+ } else {
+ pteg1 |= PP_RWRX;
+ }
+
+ local_irq_disable();
+
+ if (pteg[rr]) {
+ pteg[rr] = 0;
+ asm volatile ("sync");
+ }
+ pteg[rr + 1] = pteg1;
+ pteg[rr] = pteg0;
+ asm volatile ("sync");
+
+ local_irq_enable();
+
+ dprintk_mmu("KVM: new PTEG: %p\n", pteg);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
+
+
+ /* Now tell our Shadow PTE code about the new page */
+
+ hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
+ pte = &vcpu->arch.hpte_cache[hpte_id];
+
+ dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
+ orig_pte->may_write ? 'w' : '-',
+ orig_pte->may_execute ? 'x' : '-',
+ orig_pte->eaddr, (ulong)pteg, va,
+ orig_pte->vpage, hpaddr);
+
+ pte->slot = (ulong)&pteg[rr];
+ pte->host_va = va;
+ pte->pte = *orig_pte;
+ pte->pfn = hpaddr >> PAGE_SHIFT;
+
+ return 0;
+}
+
+static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ struct kvmppc_sid_map *map;
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ u16 sid_map_mask;
+ static int backwards_map = 0;
+
+ if (vcpu->arch.msr & MSR_PR)
+ gvsid |= VSID_PR;
+
+ /* We might get collisions that trap in preceding order, so let's
+ map them differently */
+
+ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
+ if (backwards_map)
+ sid_map_mask = SID_MAP_MASK - sid_map_mask;
+
+ map = &to_book3s(vcpu)->sid_map[sid_map_mask];
+
+ /* Make sure we're taking the other map next time */
+ backwards_map = !backwards_map;
+
+ /* Uh-oh ... out of mappings. Let's flush! */
+ if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) {
+ vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+ memset(vcpu_book3s->sid_map, 0,
+ sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ kvmppc_mmu_flush_segments(vcpu);
+ }
+ map->host_vsid = vcpu_book3s->vsid_next;
+
+ /* Would have to be 111 to be completely aligned with the rest of
+ Linux, but that is just way too little space! */
+ vcpu_book3s->vsid_next+=1;
+
+ map->guest_vsid = gvsid;
+ map->valid = true;
+
+ return map;
+}
+
+int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
+{
+ u32 esid = eaddr >> SID_SHIFT;
+ u64 gvsid;
+ u32 sr;
+ struct kvmppc_sid_map *map;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+
+ if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
+ /* Invalidate an entry */
+ svcpu->sr[esid] = SR_INVALID;
+ return -ENOENT;
+ }
+
+ map = find_sid_vsid(vcpu, gvsid);
+ if (!map)
+ map = create_sid_map(vcpu, gvsid);
+
+ map->guest_esid = esid;
+ sr = map->host_vsid | SR_KP;
+ svcpu->sr[esid] = sr;
+
+ dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
+
+ return 0;
+}
+
+void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+
+ dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
+ for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
+ svcpu->sr[i] = SR_INVALID;
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ preempt_disable();
+ __destroy_context(to_book3s(vcpu)->context_id);
+ preempt_enable();
+}
+
+/* From mm/mmu_context_hash32.c */
+#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff)
+
+int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int err;
+ ulong sdr1;
+
+ err = __init_new_context();
+ if (err < 0)
+ return -1;
+ vcpu3s->context_id = err;
+
+ vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1;
+ vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id);
+
+#if 0 /* XXX still doesn't guarantee uniqueness */
+ /* We could collide with the Linux vsid space because the vsid
+ * wraps around at 24 bits. We're safe if we do our own space
+ * though, so let's always set the highest bit. */
+
+ vcpu3s->vsid_max |= 0x00800000;
+ vcpu3s->vsid_first |= 0x00800000;
+#endif
+ BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first);
+
+ vcpu3s->vsid_next = vcpu3s->vsid_first;
+
+ /* Remember where the HTAB is */
+ asm ( "mfsdr1 %0" : "=r"(sdr1) );
+ htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
+ htab = (ulong)__va(sdr1 & 0xffff0000);
+
+ return 0;
+}
diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
new file mode 100644
index 000000000000..3608471ad2d8
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_sr.S
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
+.macro LOAD_GUEST_SEGMENTS
+
+ /* Required state:
+ *
+ * MSR = ~IR|DR
+ * R1 = host R1
+ * R2 = host R2
+ * R3 = shadow vcpu
+ * all other volatile GPRS = free
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
+ */
+
+#define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \
+ mtsr n, r9
+
+ XCHG_SR(0)
+ XCHG_SR(1)
+ XCHG_SR(2)
+ XCHG_SR(3)
+ XCHG_SR(4)
+ XCHG_SR(5)
+ XCHG_SR(6)
+ XCHG_SR(7)
+ XCHG_SR(8)
+ XCHG_SR(9)
+ XCHG_SR(10)
+ XCHG_SR(11)
+ XCHG_SR(12)
+ XCHG_SR(13)
+ XCHG_SR(14)
+ XCHG_SR(15)
+
+ /* Clear BATs. */
+
+#define KVM_KILL_BAT(n, reg) \
+ mtspr SPRN_IBAT##n##U,reg; \
+ mtspr SPRN_IBAT##n##L,reg; \
+ mtspr SPRN_DBAT##n##U,reg; \
+ mtspr SPRN_DBAT##n##L,reg; \
+
+ li r9, 0
+ KVM_KILL_BAT(0, r9)
+ KVM_KILL_BAT(1, r9)
+ KVM_KILL_BAT(2, r9)
+ KVM_KILL_BAT(3, r9)
+
+.endm
+
+/******************************************************************************
+ * *
+ * Exit code *
+ * *
+ *****************************************************************************/
+
+.macro LOAD_HOST_SEGMENTS
+
+ /* Register usage at this point:
+ *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = shadow vcpu - SHADOW_VCPU_OFF
+ * SVCPU.* = guest *
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
+ *
+ */
+
+ /* Restore BATs */
+
+ /* We only overwrite the upper part, so we only restoree
+ the upper part. */
+#define KVM_LOAD_BAT(n, reg, RA, RB) \
+ lwz RA,(n*16)+0(reg); \
+ lwz RB,(n*16)+4(reg); \
+ mtspr SPRN_IBAT##n##U,RA; \
+ mtspr SPRN_IBAT##n##L,RB; \
+ lwz RA,(n*16)+8(reg); \
+ lwz RB,(n*16)+12(reg); \
+ mtspr SPRN_DBAT##n##U,RA; \
+ mtspr SPRN_DBAT##n##L,RB; \
+
+ lis r9, BATS@ha
+ addi r9, r9, BATS@l
+ tophys(r9, r9)
+ KVM_LOAD_BAT(0, r9, r10, r11)
+ KVM_LOAD_BAT(1, r9, r10, r11)
+ KVM_LOAD_BAT(2, r9, r10, r11)
+ KVM_LOAD_BAT(3, r9, r10, r11)
+
+ /* Restore Segment Registers */
+
+ /* 0xc - 0xf */
+
+ li r0, 4
+ mtctr r0
+ LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
+ lis r4, 0xc000
+3: mtsrin r3, r4
+ addi r3, r3, 0x111 /* increment VSID */
+ addis r4, r4, 0x1000 /* address of next segment */
+ bdnz 3b
+
+ /* 0x0 - 0xb */
+
+ /* 'current->mm' needs to be in r4 */
+ tophys(r4, r2)
+ lwz r4, MM(r4)
+ tophys(r4, r4)
+ /* This only clobbers r0, r3, r4 and r5 */
+ bl switch_mmu_context
+
+.endm
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 512dcff77554..4025ea26b3c1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -232,7 +232,7 @@ do_second:
}
dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
- "-> 0x%llx\n",
+ "-> 0x%lx\n",
eaddr, avpn, gpte->vpage, gpte->raddr);
found = true;
break;
@@ -383,7 +383,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
if (vcpu->arch.msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu);
- kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}
}
@@ -439,37 +439,43 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
}
-static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
+ ulong ea = esid << SID_SHIFT;
+ struct kvmppc_slb *slb;
+ u64 gvsid = esid;
+
+ if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+ if (slb)
+ gvsid = slb->vsid;
+ }
+
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = (VSID_REAL >> 16) | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = (VSID_REAL_IR >> 16) | esid;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- *vsid = (VSID_REAL_DR >> 16) | esid;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
- {
- ulong ea;
- struct kvmppc_slb *slb;
- ea = esid << SID_SHIFT;
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
- if (slb)
- *vsid = slb->vsid;
- else
+ if (!slb)
return -ENOENT;
+ *vsid = gvsid;
break;
- }
default:
BUG();
break;
}
+ if (vcpu->arch.msr & MSR_PR)
+ *vsid |= VSID_PR;
+
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index f2899b297ffd..e4b5744977f6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -48,21 +48,25 @@
static void invalidate_pte(struct hpte_cache *pte)
{
- dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n",
- i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+ dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
+ pte->pte.eaddr, pte->pte.vpage, pte->host_va);
ppc_md.hpte_invalidate(pte->slot, pte->host_va,
MMU_PAGE_4K, MMU_SEGSIZE_256M,
false);
pte->host_va = 0;
- kvm_release_pfn_dirty(pte->pfn);
+
+ if (pte->pte.may_write)
+ kvm_release_pfn_dirty(pte->pfn);
+ else
+ kvm_release_pfn_clean(pte->pfn);
}
-void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{
int i;
- dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n",
+ dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
@@ -106,12 +110,12 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
}
}
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
int i;
- dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
- vcpu->arch.hpte_cache_offset, guest_pa, pa_mask);
+ dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
+ vcpu->arch.hpte_cache_offset, pa_start, pa_end);
BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
@@ -182,7 +186,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
if (map->guest_vsid == gvsid) {
- dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
+ dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n",
gvsid, map->host_vsid);
return map;
}
@@ -194,7 +198,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
return map;
}
- dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid);
+ dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n",
+ sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
return NULL;
}
@@ -212,7 +217,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
/* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr);
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
return -EINVAL;
}
hpaddr <<= PAGE_SHIFT;
@@ -227,10 +232,16 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
map = find_sid_vsid(vcpu, vsid);
if (!map) {
- kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
+ ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
+ WARN_ON(ret < 0);
map = find_sid_vsid(vcpu, vsid);
}
- BUG_ON(!map);
+ if (!map) {
+ printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
+ vsid, orig_pte->eaddr);
+ WARN_ON(true);
+ return -EINVAL;
+ }
vsid = map->host_vsid;
va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
@@ -257,26 +268,26 @@ map_again:
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
-#ifdef USE_SECONDARY
hash = ~hash;
+ vflags ^= HPTE_V_SECONDARY;
attempt++;
- if (attempt % 2)
- vflags = HPTE_V_SECONDARY;
- else
- vflags = 0;
-#else
- attempt = 2;
-#endif
goto map_again;
} else {
int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
- dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n",
+ dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
(rflags & HPTE_R_N) ? '-' : 'x',
orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
+ /* The ppc_md code may give us a secondary entry even though we
+ asked for a primary. Fix up. */
+ if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
+ hash = ~hash;
+ hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+ }
+
pte->slot = hpteg + (ret & 7);
pte->host_va = va;
pte->pte = *orig_pte;
@@ -321,6 +332,9 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
map->guest_vsid = gvsid;
map->valid = true;
+ dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n",
+ sid_map_mask, gvsid, map->host_vsid);
+
return map;
}
@@ -331,14 +345,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
int found_inval = -1;
int r;
- if (!get_paca()->kvm_slb_max)
- get_paca()->kvm_slb_max = 1;
+ if (!to_svcpu(vcpu)->slb_max)
+ to_svcpu(vcpu)->slb_max = 1;
/* Are we overwriting? */
- for (i = 1; i < get_paca()->kvm_slb_max; i++) {
- if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V))
+ for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
+ if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
found_inval = i;
- else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid)
+ else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
return i;
}
@@ -352,11 +366,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
max_slb_size = mmu_slb_size;
/* Overflowing -> purge */
- if ((get_paca()->kvm_slb_max) == max_slb_size)
+ if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
kvmppc_mmu_flush_segments(vcpu);
- r = get_paca()->kvm_slb_max;
- get_paca()->kvm_slb_max++;
+ r = to_svcpu(vcpu)->slb_max;
+ to_svcpu(vcpu)->slb_max++;
return r;
}
@@ -374,7 +388,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
/* Invalidate an entry */
- get_paca()->kvm_slb[slb_index].esid = 0;
+ to_svcpu(vcpu)->slb[slb_index].esid = 0;
return -ENOENT;
}
@@ -388,8 +402,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
slb_vsid &= ~SLB_VSID_KP;
slb_esid |= slb_index;
- get_paca()->kvm_slb[slb_index].esid = slb_esid;
- get_paca()->kvm_slb[slb_index].vsid = slb_vsid;
+ to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
+ to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
@@ -398,11 +412,29 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
- get_paca()->kvm_slb_max = 1;
- get_paca()->kvm_slb[0].esid = 0;
+ to_svcpu(vcpu)->slb_max = 1;
+ to_svcpu(vcpu)->slb[0].esid = 0;
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ __destroy_context(to_book3s(vcpu)->context_id);
+}
+
+int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int err;
+
+ err = __init_new_context();
+ if (err < 0)
+ return -1;
+ vcpu3s->context_id = err;
+
+ vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
+ vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
+ vcpu3s->vsid_next = vcpu3s->vsid_first;
+
+ return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 35b762722187..04e7d3bbfe8b 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -44,8 +44,7 @@ slb_exit_skip_ ## num:
* *
*****************************************************************************/
-.global kvmppc_handler_trampoline_enter
-kvmppc_handler_trampoline_enter:
+.macro LOAD_GUEST_SEGMENTS
/* Required state:
*
@@ -53,20 +52,14 @@ kvmppc_handler_trampoline_enter:
* R13 = PACA
* R1 = host R1
* R2 = host R2
- * R9 = guest IP
- * R10 = guest MSR
- * all other GPRS = free
- * PACA[KVM_CR] = guest CR
- * PACA[KVM_XER] = guest XER
+ * R3 = shadow vcpu
+ * all other volatile GPRS = free
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
*/
- mtsrr0 r9
- mtsrr1 r10
-
- /* Activate guest mode, so faults get handled by KVM */
- li r11, KVM_GUEST_MODE_GUEST
- stb r11, PACA_KVM_IN_GUEST(r13)
-
/* Remove LPAR shadow entries */
#if SLB_NUM_BOLTED == 3
@@ -101,14 +94,14 @@ kvmppc_handler_trampoline_enter:
/* Fill SLB with our shadow */
- lbz r12, PACA_KVM_SLB_MAX(r13)
+ lbz r12, SVCPU_SLB_MAX(r3)
mulli r12, r12, 16
- addi r12, r12, PACA_KVM_SLB
- add r12, r12, r13
+ addi r12, r12, SVCPU_SLB
+ add r12, r12, r3
/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
- li r11, PACA_KVM_SLB
- add r11, r11, r13
+ li r11, SVCPU_SLB
+ add r11, r11, r3
slb_loop_enter:
@@ -127,34 +120,7 @@ slb_loop_enter_skip:
slb_do_enter:
- /* Enter guest */
-
- ld r0, (PACA_KVM_R0)(r13)
- ld r1, (PACA_KVM_R1)(r13)
- ld r2, (PACA_KVM_R2)(r13)
- ld r3, (PACA_KVM_R3)(r13)
- ld r4, (PACA_KVM_R4)(r13)
- ld r5, (PACA_KVM_R5)(r13)
- ld r6, (PACA_KVM_R6)(r13)
- ld r7, (PACA_KVM_R7)(r13)
- ld r8, (PACA_KVM_R8)(r13)
- ld r9, (PACA_KVM_R9)(r13)
- ld r10, (PACA_KVM_R10)(r13)
- ld r12, (PACA_KVM_R12)(r13)
-
- lwz r11, (PACA_KVM_CR)(r13)
- mtcr r11
-
- ld r11, (PACA_KVM_XER)(r13)
- mtxer r11
-
- ld r11, (PACA_KVM_R11)(r13)
- ld r13, (PACA_KVM_R13)(r13)
-
- RFI
-kvmppc_handler_trampoline_enter_end:
-
-
+.endm
/******************************************************************************
* *
@@ -162,99 +128,22 @@ kvmppc_handler_trampoline_enter_end:
* *
*****************************************************************************/
-.global kvmppc_handler_trampoline_exit
-kvmppc_handler_trampoline_exit:
+.macro LOAD_HOST_SEGMENTS
/* Register usage at this point:
*
- * SPRG_SCRATCH0 = guest R13
- * R12 = exit handler id
- * R13 = PACA
- * PACA.KVM.SCRATCH0 = guest R12
- * PACA.KVM.SCRATCH1 = guest CR
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * SVCPU.* = guest *
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
*
*/
- /* Save registers */
-
- std r0, PACA_KVM_R0(r13)
- std r1, PACA_KVM_R1(r13)
- std r2, PACA_KVM_R2(r13)
- std r3, PACA_KVM_R3(r13)
- std r4, PACA_KVM_R4(r13)
- std r5, PACA_KVM_R5(r13)
- std r6, PACA_KVM_R6(r13)
- std r7, PACA_KVM_R7(r13)
- std r8, PACA_KVM_R8(r13)
- std r9, PACA_KVM_R9(r13)
- std r10, PACA_KVM_R10(r13)
- std r11, PACA_KVM_R11(r13)
-
- /* Restore R1/R2 so we can handle faults */
- ld r1, PACA_KVM_HOST_R1(r13)
- ld r2, PACA_KVM_HOST_R2(r13)
-
- /* Save guest PC and MSR in GPRs */
- mfsrr0 r3
- mfsrr1 r4
-
- /* Get scratch'ed off registers */
- mfspr r9, SPRN_SPRG_SCRATCH0
- std r9, PACA_KVM_R13(r13)
-
- ld r8, PACA_KVM_SCRATCH0(r13)
- std r8, PACA_KVM_R12(r13)
-
- lwz r7, PACA_KVM_SCRATCH1(r13)
- stw r7, PACA_KVM_CR(r13)
-
- /* Save more register state */
-
- mfxer r6
- stw r6, PACA_KVM_XER(r13)
-
- mfdar r5
- mfdsisr r6
-
- /*
- * In order for us to easily get the last instruction,
- * we got the #vmexit at, we exploit the fact that the
- * virtual layout is still the same here, so we can just
- * ld from the guest's PC address
- */
-
- /* We only load the last instruction when it's safe */
- cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
- beq ld_last_inst
- cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
- beq ld_last_inst
-
- b no_ld_last_inst
-
-ld_last_inst:
- /* Save off the guest instruction we're at */
-
- /* Set guest mode to 'jump over instruction' so if lwz faults
- * we'll just continue at the next IP. */
- li r9, KVM_GUEST_MODE_SKIP
- stb r9, PACA_KVM_IN_GUEST(r13)
-
- /* 1) enable paging for data */
- mfmsr r9
- ori r11, r9, MSR_DR /* Enable paging for data */
- mtmsr r11
- /* 2) fetch the instruction */
- li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */
- lwz r0, 0(r3)
- /* 3) disable paging again */
- mtmsr r9
-
-no_ld_last_inst:
-
- /* Unset guest mode */
- li r9, KVM_GUEST_MODE_NONE
- stb r9, PACA_KVM_IN_GUEST(r13)
-
/* Restore bolted entries from the shadow and fix it along the way */
/* We don't store anything in entry 0, so we don't need to take care of it */
@@ -275,28 +164,4 @@ no_ld_last_inst:
slb_do_exit:
- /* Register usage at this point:
- *
- * R0 = guest last inst
- * R1 = host R1
- * R2 = host R2
- * R3 = guest PC
- * R4 = guest MSR
- * R5 = guest DAR
- * R6 = guest DSISR
- * R12 = exit handler id
- * R13 = PACA
- * PACA.KVM.* = guest *
- *
- */
-
- /* RFI into the highmem handler */
- mfmsr r7
- ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
- mtsrr1 r7
- ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
- mtsrr0 r8
-
- RFI
-kvmppc_handler_trampoline_exit_end:
-
+.endm
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 2b0ee7e040c9..c85f906038ce 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -28,13 +28,16 @@
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_MTMSR 146
#define OP_31_XOP_MTMSRD 178
+#define OP_31_XOP_MTSR 210
#define OP_31_XOP_MTSRIN 242
#define OP_31_XOP_TLBIEL 274
#define OP_31_XOP_TLBIE 306
#define OP_31_XOP_SLBMTE 402
#define OP_31_XOP_SLBIE 434
#define OP_31_XOP_SLBIA 498
+#define OP_31_XOP_MFSR 595
#define OP_31_XOP_MFSRIN 659
+#define OP_31_XOP_DCBA 758
#define OP_31_XOP_SLBMFEV 851
#define OP_31_XOP_EIOIO 854
#define OP_31_XOP_SLBMFEE 915
@@ -42,6 +45,24 @@
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
#define OP_31_XOP_DCBZ 1010
+#define OP_LFS 48
+#define OP_LFD 50
+#define OP_STFS 52
+#define OP_STFD 54
+
+#define SPRN_GQR0 912
+#define SPRN_GQR1 913
+#define SPRN_GQR2 914
+#define SPRN_GQR3 915
+#define SPRN_GQR4 916
+#define SPRN_GQR5 917
+#define SPRN_GQR6 918
+#define SPRN_GQR7 919
+
+/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
+ * function pointers, so let's just disable the define. */
+#undef mfsrin
+
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
@@ -52,7 +73,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_xop(inst)) {
case OP_19_XOP_RFID:
case OP_19_XOP_RFI:
- vcpu->arch.pc = vcpu->arch.srr0;
+ kvmppc_set_pc(vcpu, vcpu->arch.srr0);
kvmppc_set_msr(vcpu, vcpu->arch.srr1);
*advance = 0;
break;
@@ -80,6 +101,18 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case OP_31_XOP_MTMSR:
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
break;
+ case OP_31_XOP_MFSR:
+ {
+ int srnum;
+
+ srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
+ if (vcpu->arch.mmu.mfsrin) {
+ u32 sr;
+ sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
+ kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+ }
+ break;
+ }
case OP_31_XOP_MFSRIN:
{
int srnum;
@@ -92,6 +125,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
break;
}
+ case OP_31_XOP_MTSR:
+ vcpu->arch.mmu.mtsrin(vcpu,
+ (inst >> 16) & 0xf,
+ kvmppc_get_gpr(vcpu, get_rs(inst)));
+ break;
case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
@@ -150,12 +188,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_set_gpr(vcpu, get_rt(inst), t);
}
break;
+ case OP_31_XOP_DCBA:
+ /* Gets treated as NOP */
+ break;
case OP_31_XOP_DCBZ:
{
ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
ulong ra = 0;
- ulong addr;
+ ulong addr, vaddr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ u32 dsisr;
+ int r;
if (get_ra(inst))
ra = kvmppc_get_gpr(vcpu, get_ra(inst));
@@ -163,15 +206,25 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
addr = (ra + rb) & ~31ULL;
if (!(vcpu->arch.msr & MSR_SF))
addr &= 0xffffffff;
+ vaddr = addr;
+
+ r = kvmppc_st(vcpu, &addr, 32, zeros, true);
+ if ((r == -ENOENT) || (r == -EPERM)) {
+ *advance = 0;
+ vcpu->arch.dear = vaddr;
+ to_svcpu(vcpu)->fault_dar = vaddr;
+
+ dsisr = DSISR_ISSTORE;
+ if (r == -ENOENT)
+ dsisr |= DSISR_NOHPTE;
+ else if (r == -EPERM)
+ dsisr |= DSISR_PROTFAULT;
+
+ to_book3s(vcpu)->dsisr = dsisr;
+ to_svcpu(vcpu)->fault_dsisr = dsisr;
- if (kvmppc_st(vcpu, addr, 32, zeros)) {
- vcpu->arch.dear = addr;
- vcpu->arch.fault_dear = addr;
- to_book3s(vcpu)->dsisr = DSISR_PROTFAULT |
- DSISR_ISSTORE;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE);
- kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL);
}
break;
@@ -184,6 +237,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
emulated = EMULATE_FAIL;
}
+ if (emulated == EMULATE_FAIL)
+ emulated = kvmppc_emulate_paired_single(run, vcpu);
+
return emulated;
}
@@ -207,6 +263,34 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
}
}
+static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_bat *bat;
+
+ switch (sprn) {
+ case SPRN_IBAT0U ... SPRN_IBAT3L:
+ bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
+ break;
+ case SPRN_IBAT4U ... SPRN_IBAT7L:
+ bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
+ break;
+ case SPRN_DBAT0U ... SPRN_DBAT3L:
+ bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
+ break;
+ case SPRN_DBAT4U ... SPRN_DBAT7L:
+ bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
+ break;
+ default:
+ BUG();
+ }
+
+ if (sprn % 2)
+ return bat->raw >> 32;
+ else
+ return bat->raw;
+}
+
static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -217,13 +301,13 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
break;
case SPRN_IBAT4U ... SPRN_IBAT7L:
- bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2];
+ bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
break;
case SPRN_DBAT0U ... SPRN_DBAT3L:
bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
break;
case SPRN_DBAT4U ... SPRN_DBAT7L:
- bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2];
+ bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
break;
default:
BUG();
@@ -258,6 +342,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
/* BAT writes happen so rarely that we're ok to flush
* everything here */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ kvmppc_mmu_flush_segments(vcpu);
break;
case SPRN_HID0:
to_book3s(vcpu)->hid[0] = spr_val;
@@ -268,7 +353,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_HID2:
to_book3s(vcpu)->hid[2] = spr_val;
break;
+ case SPRN_HID2_GEKKO:
+ to_book3s(vcpu)->hid[2] = spr_val;
+ /* HID2.PSE controls paired single on gekko */
+ switch (vcpu->arch.pvr) {
+ case 0x00080200: /* lonestar 2.0 */
+ case 0x00088202: /* lonestar 2.2 */
+ case 0x70000100: /* gekko 1.0 */
+ case 0x00080100: /* gekko 2.0 */
+ case 0x00083203: /* gekko 2.3a */
+ case 0x00083213: /* gekko 2.3b */
+ case 0x00083204: /* gekko 2.4 */
+ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
+ case 0x00087200: /* broadway */
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
+ /* Native paired singles */
+ } else if (spr_val & (1 << 29)) { /* HID2.PSE */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ } else {
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
+ }
+ break;
+ }
+ break;
case SPRN_HID4:
+ case SPRN_HID4_GEKKO:
to_book3s(vcpu)->hid[4] = spr_val;
break;
case SPRN_HID5:
@@ -278,12 +388,30 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
(mfmsr() & MSR_HV))
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
break;
+ case SPRN_GQR0:
+ case SPRN_GQR1:
+ case SPRN_GQR2:
+ case SPRN_GQR3:
+ case SPRN_GQR4:
+ case SPRN_GQR5:
+ case SPRN_GQR6:
+ case SPRN_GQR7:
+ to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
+ break;
case SPRN_ICTC:
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
case SPRN_CTRLF:
case SPRN_CTRLT:
+ case SPRN_L2CR:
+ case SPRN_MMCR0_GEKKO:
+ case SPRN_MMCR1_GEKKO:
+ case SPRN_PMC1_GEKKO:
+ case SPRN_PMC2_GEKKO:
+ case SPRN_PMC3_GEKKO:
+ case SPRN_PMC4_GEKKO:
+ case SPRN_WPAR_GEKKO:
break;
default:
printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
@@ -301,6 +429,12 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
int emulated = EMULATE_DONE;
switch (sprn) {
+ case SPRN_IBAT0U ... SPRN_IBAT3L:
+ case SPRN_IBAT4U ... SPRN_IBAT7L:
+ case SPRN_DBAT0U ... SPRN_DBAT3L:
+ case SPRN_DBAT4U ... SPRN_DBAT7L:
+ kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn));
+ break;
case SPRN_SDR1:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
break;
@@ -320,19 +454,40 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
break;
case SPRN_HID2:
+ case SPRN_HID2_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
break;
case SPRN_HID4:
+ case SPRN_HID4_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
break;
case SPRN_HID5:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
break;
+ case SPRN_GQR0:
+ case SPRN_GQR1:
+ case SPRN_GQR2:
+ case SPRN_GQR3:
+ case SPRN_GQR4:
+ case SPRN_GQR5:
+ case SPRN_GQR6:
+ case SPRN_GQR7:
+ kvmppc_set_gpr(vcpu, rt,
+ to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
+ break;
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
case SPRN_CTRLF:
case SPRN_CTRLT:
+ case SPRN_L2CR:
+ case SPRN_MMCR0_GEKKO:
+ case SPRN_MMCR1_GEKKO:
+ case SPRN_PMC1_GEKKO:
+ case SPRN_PMC2_GEKKO:
+ case SPRN_PMC3_GEKKO:
+ case SPRN_PMC4_GEKKO:
+ case SPRN_WPAR_GEKKO:
kvmppc_set_gpr(vcpu, rt, 0);
break;
default:
@@ -346,3 +501,73 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
return emulated;
}
+u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
+{
+ u32 dsisr = 0;
+
+ /*
+ * This is what the spec says about DSISR bits (not mentioned = 0):
+ *
+ * 12:13 [DS] Set to bits 30:31
+ * 15:16 [X] Set to bits 29:30
+ * 17 [X] Set to bit 25
+ * [D/DS] Set to bit 5
+ * 18:21 [X] Set to bits 21:24
+ * [D/DS] Set to bits 1:4
+ * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS)
+ * 27:31 Set to bits 11:15 (RA)
+ */
+
+ switch (get_op(inst)) {
+ /* D-form */
+ case OP_LFS:
+ case OP_LFD:
+ case OP_STFD:
+ case OP_STFS:
+ dsisr |= (inst >> 12) & 0x4000; /* bit 17 */
+ dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
+ break;
+ /* X-form */
+ case 31:
+ dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
+ dsisr |= (inst << 8) & 0x04000; /* bit 17 */
+ dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */
+ break;
+ default:
+ printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
+ break;
+ }
+
+ dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
+
+ return dsisr;
+}
+
+ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
+{
+ ulong dar = 0;
+ ulong ra;
+
+ switch (get_op(inst)) {
+ case OP_LFS:
+ case OP_LFD:
+ case OP_STFD:
+ case OP_STFS:
+ ra = get_ra(inst);
+ if (ra)
+ dar = kvmppc_get_gpr(vcpu, ra);
+ dar += (s32)((s16)inst);
+ break;
+ case 31:
+ ra = get_ra(inst);
+ if (ra)
+ dar = kvmppc_get_gpr(vcpu, ra);
+ dar += kvmppc_get_gpr(vcpu, get_rb(inst));
+ break;
+ default:
+ printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
+ break;
+ }
+
+ return dar;
+}
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 1dd5a1ddfd0d..1dd5a1ddfd0d 100644
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index c1584d0cbce8..2f0bc928b08a 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -24,36 +24,56 @@
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
-#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
-#define ULONG_SIZE 8
-#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+#if defined(CONFIG_PPC_BOOK3S_64)
-.macro DISABLE_INTERRUPTS
- mfmsr r0
- rldicl r0,r0,48,1
- rotldi r0,r0,16
- mtmsrd r0,1
-.endm
+#define ULONG_SIZE 8
+#define FUNC(name) GLUE(.,name)
+#define GET_SHADOW_VCPU(reg) \
+ addi reg, r13, PACA_KVM_SVCPU
+
+#define DISABLE_INTERRUPTS \
+ mfmsr r0; \
+ rldicl r0,r0,48,1; \
+ rotldi r0,r0,16; \
+ mtmsrd r0,1; \
+
+#elif defined(CONFIG_PPC_BOOK3S_32)
+
+#define ULONG_SIZE 4
+#define FUNC(name) name
+
+#define GET_SHADOW_VCPU(reg) \
+ lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
+
+#define DISABLE_INTERRUPTS \
+ mfmsr r0; \
+ rlwinm r0,r0,0,17,15; \
+ mtmsr r0; \
+
+#endif /* CONFIG_PPC_BOOK3S_XX */
+
+
+#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
#define VCPU_LOAD_NVGPRS(vcpu) \
- ld r14, VCPU_GPR(r14)(vcpu); \
- ld r15, VCPU_GPR(r15)(vcpu); \
- ld r16, VCPU_GPR(r16)(vcpu); \
- ld r17, VCPU_GPR(r17)(vcpu); \
- ld r18, VCPU_GPR(r18)(vcpu); \
- ld r19, VCPU_GPR(r19)(vcpu); \
- ld r20, VCPU_GPR(r20)(vcpu); \
- ld r21, VCPU_GPR(r21)(vcpu); \
- ld r22, VCPU_GPR(r22)(vcpu); \
- ld r23, VCPU_GPR(r23)(vcpu); \
- ld r24, VCPU_GPR(r24)(vcpu); \
- ld r25, VCPU_GPR(r25)(vcpu); \
- ld r26, VCPU_GPR(r26)(vcpu); \
- ld r27, VCPU_GPR(r27)(vcpu); \
- ld r28, VCPU_GPR(r28)(vcpu); \
- ld r29, VCPU_GPR(r29)(vcpu); \
- ld r30, VCPU_GPR(r30)(vcpu); \
- ld r31, VCPU_GPR(r31)(vcpu); \
+ PPC_LL r14, VCPU_GPR(r14)(vcpu); \
+ PPC_LL r15, VCPU_GPR(r15)(vcpu); \
+ PPC_LL r16, VCPU_GPR(r16)(vcpu); \
+ PPC_LL r17, VCPU_GPR(r17)(vcpu); \
+ PPC_LL r18, VCPU_GPR(r18)(vcpu); \
+ PPC_LL r19, VCPU_GPR(r19)(vcpu); \
+ PPC_LL r20, VCPU_GPR(r20)(vcpu); \
+ PPC_LL r21, VCPU_GPR(r21)(vcpu); \
+ PPC_LL r22, VCPU_GPR(r22)(vcpu); \
+ PPC_LL r23, VCPU_GPR(r23)(vcpu); \
+ PPC_LL r24, VCPU_GPR(r24)(vcpu); \
+ PPC_LL r25, VCPU_GPR(r25)(vcpu); \
+ PPC_LL r26, VCPU_GPR(r26)(vcpu); \
+ PPC_LL r27, VCPU_GPR(r27)(vcpu); \
+ PPC_LL r28, VCPU_GPR(r28)(vcpu); \
+ PPC_LL r29, VCPU_GPR(r29)(vcpu); \
+ PPC_LL r30, VCPU_GPR(r30)(vcpu); \
+ PPC_LL r31, VCPU_GPR(r31)(vcpu); \
/*****************************************************************************
* *
@@ -69,11 +89,11 @@ _GLOBAL(__kvmppc_vcpu_entry)
kvm_start_entry:
/* Write correct stack frame */
- mflr r0
- std r0,16(r1)
+ mflr r0
+ PPC_STL r0,PPC_LR_STKOFF(r1)
/* Save host state to the stack */
- stdu r1, -SWITCH_FRAME_SIZE(r1)
+ PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
/* Save r3 (kvm_run) and r4 (vcpu) */
SAVE_2GPRS(3, r1)
@@ -82,33 +102,28 @@ kvm_start_entry:
SAVE_NVGPRS(r1)
/* Save LR */
- std r0, _LINK(r1)
+ PPC_STL r0, _LINK(r1)
/* Load non-volatile guest state from the vcpu */
VCPU_LOAD_NVGPRS(r4)
+ GET_SHADOW_VCPU(r5)
+
/* Save R1/R2 in the PACA */
- std r1, PACA_KVM_HOST_R1(r13)
- std r2, PACA_KVM_HOST_R2(r13)
+ PPC_STL r1, SVCPU_HOST_R1(r5)
+ PPC_STL r2, SVCPU_HOST_R2(r5)
/* XXX swap in/out on load? */
- ld r3, VCPU_HIGHMEM_HANDLER(r4)
- std r3, PACA_KVM_VMHANDLER(r13)
+ PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
+ PPC_STL r3, SVCPU_VMHANDLER(r5)
kvm_start_lightweight:
- ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
- ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
-
- /* Load some guest state in the respective registers */
- ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
- /* will be swapped in by rmcall */
-
- ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
- mtlr r3 /* LR = r3 */
+ PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
DISABLE_INTERRUPTS
+#ifdef CONFIG_PPC_BOOK3S_64
/* Some guests may need to have dcbz set to 32 byte length.
*
* Usually we ensure that by patching the guest's instructions
@@ -118,7 +133,7 @@ kvm_start_lightweight:
* because that's a lot faster.
*/
- ld r3, VCPU_HFLAGS(r4)
+ PPC_LL r3, VCPU_HFLAGS(r4)
rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
beq no_dcbz32_on
@@ -128,13 +143,15 @@ kvm_start_lightweight:
no_dcbz32_on:
- ld r6, VCPU_RMCALL(r4)
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+ PPC_LL r6, VCPU_RMCALL(r4)
mtctr r6
- ld r3, VCPU_TRAMPOLINE_ENTER(r4)
+ PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4)
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
- /* Jump to SLB patching handlder and into our guest */
+ /* Jump to segment patching handler and into our guest */
bctr
/*
@@ -149,31 +166,20 @@ kvmppc_handler_highmem:
/*
* Register usage at this point:
*
- * R0 = guest last inst
- * R1 = host R1
- * R2 = host R2
- * R3 = guest PC
- * R4 = guest MSR
- * R5 = guest DAR
- * R6 = guest DSISR
- * R13 = PACA
- * PACA.KVM.* = guest *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = PACA
+ * SVCPU.* = guest *
*
*/
/* R7 = vcpu */
- ld r7, GPR4(r1)
+ PPC_LL r7, GPR4(r1)
- /* Now save the guest state */
+#ifdef CONFIG_PPC_BOOK3S_64
- stw r0, VCPU_LAST_INST(r7)
-
- std r3, VCPU_PC(r7)
- std r4, VCPU_SHADOW_SRR1(r7)
- std r5, VCPU_FAULT_DEAR(r7)
- std r6, VCPU_FAULT_DSISR(r7)
-
- ld r5, VCPU_HFLAGS(r7)
+ PPC_LL r5, VCPU_HFLAGS(r7)
rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
beq no_dcbz32_off
@@ -184,35 +190,29 @@ kvmppc_handler_highmem:
no_dcbz32_off:
- std r14, VCPU_GPR(r14)(r7)
- std r15, VCPU_GPR(r15)(r7)
- std r16, VCPU_GPR(r16)(r7)
- std r17, VCPU_GPR(r17)(r7)
- std r18, VCPU_GPR(r18)(r7)
- std r19, VCPU_GPR(r19)(r7)
- std r20, VCPU_GPR(r20)(r7)
- std r21, VCPU_GPR(r21)(r7)
- std r22, VCPU_GPR(r22)(r7)
- std r23, VCPU_GPR(r23)(r7)
- std r24, VCPU_GPR(r24)(r7)
- std r25, VCPU_GPR(r25)(r7)
- std r26, VCPU_GPR(r26)(r7)
- std r27, VCPU_GPR(r27)(r7)
- std r28, VCPU_GPR(r28)(r7)
- std r29, VCPU_GPR(r29)(r7)
- std r30, VCPU_GPR(r30)(r7)
- std r31, VCPU_GPR(r31)(r7)
-
- /* Save guest CTR */
- mfctr r5
- std r5, VCPU_CTR(r7)
-
- /* Save guest LR */
- mflr r5
- std r5, VCPU_LR(r7)
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+ PPC_STL r14, VCPU_GPR(r14)(r7)
+ PPC_STL r15, VCPU_GPR(r15)(r7)
+ PPC_STL r16, VCPU_GPR(r16)(r7)
+ PPC_STL r17, VCPU_GPR(r17)(r7)
+ PPC_STL r18, VCPU_GPR(r18)(r7)
+ PPC_STL r19, VCPU_GPR(r19)(r7)
+ PPC_STL r20, VCPU_GPR(r20)(r7)
+ PPC_STL r21, VCPU_GPR(r21)(r7)
+ PPC_STL r22, VCPU_GPR(r22)(r7)
+ PPC_STL r23, VCPU_GPR(r23)(r7)
+ PPC_STL r24, VCPU_GPR(r24)(r7)
+ PPC_STL r25, VCPU_GPR(r25)(r7)
+ PPC_STL r26, VCPU_GPR(r26)(r7)
+ PPC_STL r27, VCPU_GPR(r27)(r7)
+ PPC_STL r28, VCPU_GPR(r28)(r7)
+ PPC_STL r29, VCPU_GPR(r29)(r7)
+ PPC_STL r30, VCPU_GPR(r30)(r7)
+ PPC_STL r31, VCPU_GPR(r31)(r7)
/* Restore host msr -> SRR1 */
- ld r6, VCPU_HOST_MSR(r7)
+ PPC_LL r6, VCPU_HOST_MSR(r7)
/*
* For some interrupts, we need to call the real Linux
@@ -228,9 +228,12 @@ no_dcbz32_off:
beq call_linux_handler
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq call_linux_handler
+ cmpwi r12, BOOK3S_INTERRUPT_PERFMON
+ beq call_linux_handler
/* Back to EE=1 */
mtmsr r6
+ sync
b kvm_return_point
call_linux_handler:
@@ -249,14 +252,14 @@ call_linux_handler:
*/
/* Restore host IP -> SRR0 */
- ld r5, VCPU_HOST_RETIP(r7)
+ PPC_LL r5, VCPU_HOST_RETIP(r7)
/* XXX Better move to a safe function?
* What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
mtlr r12
- ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
+ PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7)
mtsrr0 r4
LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
mtsrr1 r3
@@ -274,7 +277,7 @@ kvm_return_point:
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
- bl KVMPPC_HANDLE_EXIT
+ bl FUNC(kvmppc_handle_exit)
/* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST
@@ -285,7 +288,7 @@ kvm_return_point:
kvm_exit_loop:
- ld r4, _LINK(r1)
+ PPC_LL r4, _LINK(r1)
mtlr r4
/* Restore non-volatile host registers (r14 - r31) */
@@ -296,8 +299,8 @@ kvm_exit_loop:
kvm_loop_heavyweight:
- ld r4, _LINK(r1)
- std r4, (16 + SWITCH_FRAME_SIZE)(r1)
+ PPC_LL r4, _LINK(r1)
+ PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
/* Load vcpu and cpu_run */
REST_2GPRS(3, r1)
@@ -315,4 +318,3 @@ kvm_loop_lightweight:
/* Jump back into the beginning of this function */
b kvm_start_lightweight
-
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
new file mode 100644
index 000000000000..a9f66abafcb3
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -0,0 +1,1289 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright Novell Inc 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/kvm.h>
+#include <asm/kvm_ppc.h>
+#include <asm/disassemble.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_fpu.h>
+#include <asm/reg.h>
+#include <asm/cacheflush.h>
+#include <linux/vmalloc.h>
+
+/* #define DEBUG */
+
+#ifdef DEBUG
+#define dprintk printk
+#else
+#define dprintk(...) do { } while(0);
+#endif
+
+#define OP_LFS 48
+#define OP_LFSU 49
+#define OP_LFD 50
+#define OP_LFDU 51
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD 54
+#define OP_STFDU 55
+#define OP_PSQ_L 56
+#define OP_PSQ_LU 57
+#define OP_PSQ_ST 60
+#define OP_PSQ_STU 61
+
+#define OP_31_LFSX 535
+#define OP_31_LFSUX 567
+#define OP_31_LFDX 599
+#define OP_31_LFDUX 631
+#define OP_31_STFSX 663
+#define OP_31_STFSUX 695
+#define OP_31_STFX 727
+#define OP_31_STFUX 759
+#define OP_31_LWIZX 887
+#define OP_31_STFIWX 983
+
+#define OP_59_FADDS 21
+#define OP_59_FSUBS 20
+#define OP_59_FSQRTS 22
+#define OP_59_FDIVS 18
+#define OP_59_FRES 24
+#define OP_59_FMULS 25
+#define OP_59_FRSQRTES 26
+#define OP_59_FMSUBS 28
+#define OP_59_FMADDS 29
+#define OP_59_FNMSUBS 30
+#define OP_59_FNMADDS 31
+
+#define OP_63_FCMPU 0
+#define OP_63_FCPSGN 8
+#define OP_63_FRSP 12
+#define OP_63_FCTIW 14
+#define OP_63_FCTIWZ 15
+#define OP_63_FDIV 18
+#define OP_63_FADD 21
+#define OP_63_FSQRT 22
+#define OP_63_FSEL 23
+#define OP_63_FRE 24
+#define OP_63_FMUL 25
+#define OP_63_FRSQRTE 26
+#define OP_63_FMSUB 28
+#define OP_63_FMADD 29
+#define OP_63_FNMSUB 30
+#define OP_63_FNMADD 31
+#define OP_63_FCMPO 32
+#define OP_63_MTFSB1 38 // XXX
+#define OP_63_FSUB 20
+#define OP_63_FNEG 40
+#define OP_63_MCRFS 64
+#define OP_63_MTFSB0 70
+#define OP_63_FMR 72
+#define OP_63_MTFSFI 134
+#define OP_63_FABS 264
+#define OP_63_MFFS 583
+#define OP_63_MTFSF 711
+
+#define OP_4X_PS_CMPU0 0
+#define OP_4X_PSQ_LX 6
+#define OP_4XW_PSQ_STX 7
+#define OP_4A_PS_SUM0 10
+#define OP_4A_PS_SUM1 11
+#define OP_4A_PS_MULS0 12
+#define OP_4A_PS_MULS1 13
+#define OP_4A_PS_MADDS0 14
+#define OP_4A_PS_MADDS1 15
+#define OP_4A_PS_DIV 18
+#define OP_4A_PS_SUB 20
+#define OP_4A_PS_ADD 21
+#define OP_4A_PS_SEL 23
+#define OP_4A_PS_RES 24
+#define OP_4A_PS_MUL 25
+#define OP_4A_PS_RSQRTE 26
+#define OP_4A_PS_MSUB 28
+#define OP_4A_PS_MADD 29
+#define OP_4A_PS_NMSUB 30
+#define OP_4A_PS_NMADD 31
+#define OP_4X_PS_CMPO0 32
+#define OP_4X_PSQ_LUX 38
+#define OP_4XW_PSQ_STUX 39
+#define OP_4X_PS_NEG 40
+#define OP_4X_PS_CMPU1 64
+#define OP_4X_PS_MR 72
+#define OP_4X_PS_CMPO1 96
+#define OP_4X_PS_NABS 136
+#define OP_4X_PS_ABS 264
+#define OP_4X_PS_MERGE00 528
+#define OP_4X_PS_MERGE01 560
+#define OP_4X_PS_MERGE10 592
+#define OP_4X_PS_MERGE11 624
+
+#define SCALAR_NONE 0
+#define SCALAR_HIGH (1 << 0)
+#define SCALAR_LOW (1 << 1)
+#define SCALAR_NO_PS0 (1 << 2)
+#define SCALAR_NO_PS1 (1 << 3)
+
+#define GQR_ST_TYPE_MASK 0x00000007
+#define GQR_ST_TYPE_SHIFT 0
+#define GQR_ST_SCALE_MASK 0x00003f00
+#define GQR_ST_SCALE_SHIFT 8
+#define GQR_LD_TYPE_MASK 0x00070000
+#define GQR_LD_TYPE_SHIFT 16
+#define GQR_LD_SCALE_MASK 0x3f000000
+#define GQR_LD_SCALE_SHIFT 24
+
+#define GQR_QUANTIZE_FLOAT 0
+#define GQR_QUANTIZE_U8 4
+#define GQR_QUANTIZE_U16 5
+#define GQR_QUANTIZE_S8 6
+#define GQR_QUANTIZE_S16 7
+
+#define FPU_LS_SINGLE 0
+#define FPU_LS_DOUBLE 1
+#define FPU_LS_SINGLE_LOW 2
+
+static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
+{
+ struct thread_struct t;
+
+ t.fpscr.val = vcpu->arch.fpscr;
+ cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t);
+}
+
+static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
+{
+ u64 dsisr;
+
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0);
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
+ vcpu->arch.dear = eaddr;
+ /* Page Fault */
+ dsisr = kvmppc_set_field(0, 33, 33, 1);
+ if (is_store)
+ to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
+}
+
+static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, ulong addr, int ls_type)
+{
+ int emulated = EMULATE_FAIL;
+ struct thread_struct t;
+ int r;
+ char tmp[8];
+ int len = sizeof(u32);
+
+ if (ls_type == FPU_LS_DOUBLE)
+ len = sizeof(u64);
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* read from memory */
+ r = kvmppc_ld(vcpu, &addr, len, tmp, true);
+ vcpu->arch.paddr_accessed = addr;
+
+ if (r < 0) {
+ kvmppc_inject_pf(vcpu, addr, false);
+ goto done_load;
+ } else if (r == EMULATE_DO_MMIO) {
+ emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1);
+ goto done_load;
+ }
+
+ emulated = EMULATE_DONE;
+
+ /* put in registers */
+ switch (ls_type) {
+ case FPU_LS_SINGLE:
+ cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t);
+ vcpu->arch.qpr[rs] = *((u32*)tmp);
+ break;
+ case FPU_LS_DOUBLE:
+ vcpu->arch.fpr[rs] = *((u64*)tmp);
+ break;
+ }
+
+ dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp,
+ addr, len);
+
+done_load:
+ return emulated;
+}
+
+static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, ulong addr, int ls_type)
+{
+ int emulated = EMULATE_FAIL;
+ struct thread_struct t;
+ int r;
+ char tmp[8];
+ u64 val;
+ int len;
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ switch (ls_type) {
+ case FPU_LS_SINGLE:
+ cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t);
+ val = *((u32*)tmp);
+ len = sizeof(u32);
+ break;
+ case FPU_LS_SINGLE_LOW:
+ *((u32*)tmp) = vcpu->arch.fpr[rs];
+ val = vcpu->arch.fpr[rs] & 0xffffffff;
+ len = sizeof(u32);
+ break;
+ case FPU_LS_DOUBLE:
+ *((u64*)tmp) = vcpu->arch.fpr[rs];
+ val = vcpu->arch.fpr[rs];
+ len = sizeof(u64);
+ break;
+ default:
+ val = 0;
+ len = 0;
+ }
+
+ r = kvmppc_st(vcpu, &addr, len, tmp, true);
+ vcpu->arch.paddr_accessed = addr;
+ if (r < 0) {
+ kvmppc_inject_pf(vcpu, addr, true);
+ } else if (r == EMULATE_DO_MMIO) {
+ emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
+ } else {
+ emulated = EMULATE_DONE;
+ }
+
+ dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
+ val, addr, len);
+
+ return emulated;
+}
+
+static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, ulong addr, bool w, int i)
+{
+ int emulated = EMULATE_FAIL;
+ struct thread_struct t;
+ int r;
+ float one = 1.0;
+ u32 tmp[2];
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* read from memory */
+ if (w) {
+ r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
+ memcpy(&tmp[1], &one, sizeof(u32));
+ } else {
+ r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
+ }
+ vcpu->arch.paddr_accessed = addr;
+ if (r < 0) {
+ kvmppc_inject_pf(vcpu, addr, false);
+ goto done_load;
+ } else if ((r == EMULATE_DO_MMIO) && w) {
+ emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1);
+ vcpu->arch.qpr[rs] = tmp[1];
+ goto done_load;
+ } else if (r == EMULATE_DO_MMIO) {
+ emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1);
+ goto done_load;
+ }
+
+ emulated = EMULATE_DONE;
+
+ /* put in registers */
+ cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t);
+ vcpu->arch.qpr[rs] = tmp[1];
+
+ dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
+ tmp[1], addr, w ? 4 : 8);
+
+done_load:
+ return emulated;
+}
+
+static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, ulong addr, bool w, int i)
+{
+ int emulated = EMULATE_FAIL;
+ struct thread_struct t;
+ int r;
+ u32 tmp[2];
+ int len = w ? sizeof(u32) : sizeof(u64);
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t);
+ tmp[1] = vcpu->arch.qpr[rs];
+
+ r = kvmppc_st(vcpu, &addr, len, tmp, true);
+ vcpu->arch.paddr_accessed = addr;
+ if (r < 0) {
+ kvmppc_inject_pf(vcpu, addr, true);
+ } else if ((r == EMULATE_DO_MMIO) && w) {
+ emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
+ } else if (r == EMULATE_DO_MMIO) {
+ u64 val = ((u64)tmp[0] << 32) | tmp[1];
+ emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
+ } else {
+ emulated = EMULATE_DONE;
+ }
+
+ dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
+ tmp[0], tmp[1], addr, len);
+
+ return emulated;
+}
+
+/*
+ * Cuts out inst bits with ordering according to spec.
+ * That means the leftmost bit is zero. All given bits are included.
+ */
+static inline u32 inst_get_field(u32 inst, int msb, int lsb)
+{
+ return kvmppc_get_field(inst, msb + 32, lsb + 32);
+}
+
+/*
+ * Replaces inst bits with ordering according to spec.
+ */
+static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value)
+{
+ return kvmppc_set_field(inst, msb + 32, lsb + 32, value);
+}
+
+bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
+{
+ if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
+ return false;
+
+ switch (get_op(inst)) {
+ case OP_PSQ_L:
+ case OP_PSQ_LU:
+ case OP_PSQ_ST:
+ case OP_PSQ_STU:
+ case OP_LFS:
+ case OP_LFSU:
+ case OP_LFD:
+ case OP_LFDU:
+ case OP_STFS:
+ case OP_STFSU:
+ case OP_STFD:
+ case OP_STFDU:
+ return true;
+ case 4:
+ /* X form */
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_4X_PS_CMPU0:
+ case OP_4X_PSQ_LX:
+ case OP_4X_PS_CMPO0:
+ case OP_4X_PSQ_LUX:
+ case OP_4X_PS_NEG:
+ case OP_4X_PS_CMPU1:
+ case OP_4X_PS_MR:
+ case OP_4X_PS_CMPO1:
+ case OP_4X_PS_NABS:
+ case OP_4X_PS_ABS:
+ case OP_4X_PS_MERGE00:
+ case OP_4X_PS_MERGE01:
+ case OP_4X_PS_MERGE10:
+ case OP_4X_PS_MERGE11:
+ return true;
+ }
+ /* XW form */
+ switch (inst_get_field(inst, 25, 30)) {
+ case OP_4XW_PSQ_STX:
+ case OP_4XW_PSQ_STUX:
+ return true;
+ }
+ /* A form */
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_4A_PS_SUM1:
+ case OP_4A_PS_SUM0:
+ case OP_4A_PS_MULS0:
+ case OP_4A_PS_MULS1:
+ case OP_4A_PS_MADDS0:
+ case OP_4A_PS_MADDS1:
+ case OP_4A_PS_DIV:
+ case OP_4A_PS_SUB:
+ case OP_4A_PS_ADD:
+ case OP_4A_PS_SEL:
+ case OP_4A_PS_RES:
+ case OP_4A_PS_MUL:
+ case OP_4A_PS_RSQRTE:
+ case OP_4A_PS_MSUB:
+ case OP_4A_PS_MADD:
+ case OP_4A_PS_NMSUB:
+ case OP_4A_PS_NMADD:
+ return true;
+ }
+ break;
+ case 59:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_59_FADDS:
+ case OP_59_FSUBS:
+ case OP_59_FDIVS:
+ case OP_59_FRES:
+ case OP_59_FRSQRTES:
+ return true;
+ }
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_59_FMULS:
+ case OP_59_FMSUBS:
+ case OP_59_FMADDS:
+ case OP_59_FNMSUBS:
+ case OP_59_FNMADDS:
+ return true;
+ }
+ break;
+ case 63:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_63_MTFSB0:
+ case OP_63_MTFSB1:
+ case OP_63_MTFSF:
+ case OP_63_MTFSFI:
+ case OP_63_MCRFS:
+ case OP_63_MFFS:
+ case OP_63_FCMPU:
+ case OP_63_FCMPO:
+ case OP_63_FNEG:
+ case OP_63_FMR:
+ case OP_63_FABS:
+ case OP_63_FRSP:
+ case OP_63_FDIV:
+ case OP_63_FADD:
+ case OP_63_FSUB:
+ case OP_63_FCTIW:
+ case OP_63_FCTIWZ:
+ case OP_63_FRSQRTE:
+ case OP_63_FCPSGN:
+ return true;
+ }
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_63_FMUL:
+ case OP_63_FSEL:
+ case OP_63_FMSUB:
+ case OP_63_FMADD:
+ case OP_63_FNMSUB:
+ case OP_63_FNMADD:
+ return true;
+ }
+ break;
+ case 31:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_31_LFSX:
+ case OP_31_LFSUX:
+ case OP_31_LFDX:
+ case OP_31_LFDUX:
+ case OP_31_STFSX:
+ case OP_31_STFSUX:
+ case OP_31_STFX:
+ case OP_31_STFUX:
+ case OP_31_STFIWX:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static int get_d_signext(u32 inst)
+{
+ int d = inst & 0x8ff;
+
+ if (d & 0x800)
+ return -(d & 0x7ff);
+
+ return (d & 0x7ff);
+}
+
+static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
+ int reg_out, int reg_in1, int reg_in2,
+ int reg_in3, int scalar,
+ void (*func)(struct thread_struct *t,
+ u32 *dst, u32 *src1,
+ u32 *src2, u32 *src3))
+{
+ u32 *qpr = vcpu->arch.qpr;
+ u64 *fpr = vcpu->arch.fpr;
+ u32 ps0_out;
+ u32 ps0_in1, ps0_in2, ps0_in3;
+ u32 ps1_in1, ps1_in2, ps1_in3;
+ struct thread_struct t;
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* RC */
+ WARN_ON(rc);
+
+ /* PS0 */
+ cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
+ cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
+ cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t);
+
+ if (scalar & SCALAR_LOW)
+ ps0_in2 = qpr[reg_in2];
+
+ func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
+
+ dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
+ ps0_in1, ps0_in2, ps0_in3, ps0_out);
+
+ if (!(scalar & SCALAR_NO_PS0))
+ cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
+
+ /* PS1 */
+ ps1_in1 = qpr[reg_in1];
+ ps1_in2 = qpr[reg_in2];
+ ps1_in3 = qpr[reg_in3];
+
+ if (scalar & SCALAR_HIGH)
+ ps1_in2 = ps0_in2;
+
+ if (!(scalar & SCALAR_NO_PS1))
+ func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
+
+ dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
+ ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
+ int reg_out, int reg_in1, int reg_in2,
+ int scalar,
+ void (*func)(struct thread_struct *t,
+ u32 *dst, u32 *src1,
+ u32 *src2))
+{
+ u32 *qpr = vcpu->arch.qpr;
+ u64 *fpr = vcpu->arch.fpr;
+ u32 ps0_out;
+ u32 ps0_in1, ps0_in2;
+ u32 ps1_out;
+ u32 ps1_in1, ps1_in2;
+ struct thread_struct t;
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* RC */
+ WARN_ON(rc);
+
+ /* PS0 */
+ cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
+
+ if (scalar & SCALAR_LOW)
+ ps0_in2 = qpr[reg_in2];
+ else
+ cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
+
+ func(&t, &ps0_out, &ps0_in1, &ps0_in2);
+
+ if (!(scalar & SCALAR_NO_PS0)) {
+ dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
+ ps0_in1, ps0_in2, ps0_out);
+
+ cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
+ }
+
+ /* PS1 */
+ ps1_in1 = qpr[reg_in1];
+ ps1_in2 = qpr[reg_in2];
+
+ if (scalar & SCALAR_HIGH)
+ ps1_in2 = ps0_in2;
+
+ func(&t, &ps1_out, &ps1_in1, &ps1_in2);
+
+ if (!(scalar & SCALAR_NO_PS1)) {
+ qpr[reg_out] = ps1_out;
+
+ dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
+ ps1_in1, ps1_in2, qpr[reg_out]);
+ }
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
+ int reg_out, int reg_in,
+ void (*func)(struct thread_struct *t,
+ u32 *dst, u32 *src1))
+{
+ u32 *qpr = vcpu->arch.qpr;
+ u64 *fpr = vcpu->arch.fpr;
+ u32 ps0_out, ps0_in;
+ u32 ps1_in;
+ struct thread_struct t;
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* RC */
+ WARN_ON(rc);
+
+ /* PS0 */
+ cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t);
+ func(&t, &ps0_out, &ps0_in);
+
+ dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
+ ps0_in, ps0_out);
+
+ cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
+
+ /* PS1 */
+ ps1_in = qpr[reg_in];
+ func(&t, &qpr[reg_out], &ps1_in);
+
+ dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
+ ps1_in, qpr[reg_out]);
+
+ return EMULATE_DONE;
+}
+
+int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ u32 inst = kvmppc_get_last_inst(vcpu);
+ enum emulation_result emulated = EMULATE_DONE;
+
+ int ax_rd = inst_get_field(inst, 6, 10);
+ int ax_ra = inst_get_field(inst, 11, 15);
+ int ax_rb = inst_get_field(inst, 16, 20);
+ int ax_rc = inst_get_field(inst, 21, 25);
+ short full_d = inst_get_field(inst, 16, 31);
+
+ u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
+ u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
+ u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
+ u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
+
+ bool rcomp = (inst & 1) ? true : false;
+ u32 cr = kvmppc_get_cr(vcpu);
+ struct thread_struct t;
+#ifdef DEBUG
+ int i;
+#endif
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ if (!kvmppc_inst_is_paired_single(vcpu, inst))
+ return EMULATE_FAIL;
+
+ if (!(vcpu->arch.msr & MSR_FP)) {
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
+ return EMULATE_AGAIN;
+ }
+
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ preempt_disable();
+ enable_kernel_fp();
+ /* Do we need to clear FE0 / FE1 here? Don't think so. */
+
+#ifdef DEBUG
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
+ u32 f;
+ cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
+ dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
+ i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
+ }
+#endif
+
+ switch (get_op(inst)) {
+ case OP_PSQ_L:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+ bool w = inst_get_field(inst, 16, 16) ? true : false;
+ int i = inst_get_field(inst, 17, 19);
+
+ addr += get_d_signext(inst);
+ emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ break;
+ }
+ case OP_PSQ_LU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
+ bool w = inst_get_field(inst, 16, 16) ? true : false;
+ int i = inst_get_field(inst, 17, 19);
+
+ addr += get_d_signext(inst);
+ emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_PSQ_ST:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+ bool w = inst_get_field(inst, 16, 16) ? true : false;
+ int i = inst_get_field(inst, 17, 19);
+
+ addr += get_d_signext(inst);
+ emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ break;
+ }
+ case OP_PSQ_STU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
+ bool w = inst_get_field(inst, 16, 16) ? true : false;
+ int i = inst_get_field(inst, 17, 19);
+
+ addr += get_d_signext(inst);
+ emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case 4:
+ /* X form */
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_4X_PS_CMPU0:
+ /* XXX */
+ emulated = EMULATE_FAIL;
+ break;
+ case OP_4X_PSQ_LX:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+ bool w = inst_get_field(inst, 21, 21) ? true : false;
+ int i = inst_get_field(inst, 22, 24);
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ break;
+ }
+ case OP_4X_PS_CMPO0:
+ /* XXX */
+ emulated = EMULATE_FAIL;
+ break;
+ case OP_4X_PSQ_LUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
+ bool w = inst_get_field(inst, 21, 21) ? true : false;
+ int i = inst_get_field(inst, 22, 24);
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_4X_PS_NEG:
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ vcpu->arch.qpr[ax_rd] ^= 0x80000000;
+ break;
+ case OP_4X_PS_CMPU1:
+ /* XXX */
+ emulated = EMULATE_FAIL;
+ break;
+ case OP_4X_PS_MR:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ break;
+ case OP_4X_PS_CMPO1:
+ /* XXX */
+ emulated = EMULATE_FAIL;
+ break;
+ case OP_4X_PS_NABS:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ vcpu->arch.qpr[ax_rd] |= 0x80000000;
+ break;
+ case OP_4X_PS_ABS:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ vcpu->arch.qpr[ax_rd] &= ~0x80000000;
+ break;
+ case OP_4X_PS_MERGE00:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
+ /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
+ cvt_df((double*)&vcpu->arch.fpr[ax_rb],
+ (float*)&vcpu->arch.qpr[ax_rd], &t);
+ break;
+ case OP_4X_PS_MERGE01:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ break;
+ case OP_4X_PS_MERGE10:
+ WARN_ON(rcomp);
+ /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
+ cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
+ (double*)&vcpu->arch.fpr[ax_rd], &t);
+ /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
+ cvt_df((double*)&vcpu->arch.fpr[ax_rb],
+ (float*)&vcpu->arch.qpr[ax_rd], &t);
+ break;
+ case OP_4X_PS_MERGE11:
+ WARN_ON(rcomp);
+ /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
+ cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
+ (double*)&vcpu->arch.fpr[ax_rd], &t);
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ break;
+ }
+ /* XW form */
+ switch (inst_get_field(inst, 25, 30)) {
+ case OP_4XW_PSQ_STX:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+ bool w = inst_get_field(inst, 21, 21) ? true : false;
+ int i = inst_get_field(inst, 22, 24);
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ break;
+ }
+ case OP_4XW_PSQ_STUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
+ bool w = inst_get_field(inst, 21, 21) ? true : false;
+ int i = inst_get_field(inst, 22, 24);
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ }
+ /* A form */
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_4A_PS_SUM1:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
+ break;
+ case OP_4A_PS_SUM0:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
+ break;
+ case OP_4A_PS_MULS0:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
+ break;
+ case OP_4A_PS_MULS1:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
+ break;
+ case OP_4A_PS_MADDS0:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
+ break;
+ case OP_4A_PS_MADDS1:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
+ break;
+ case OP_4A_PS_DIV:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
+ break;
+ case OP_4A_PS_SUB:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
+ break;
+ case OP_4A_PS_ADD:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
+ break;
+ case OP_4A_PS_SEL:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
+ break;
+ case OP_4A_PS_RES:
+ emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
+ ax_rb, fps_fres);
+ break;
+ case OP_4A_PS_MUL:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
+ break;
+ case OP_4A_PS_RSQRTE:
+ emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
+ ax_rb, fps_frsqrte);
+ break;
+ case OP_4A_PS_MSUB:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
+ break;
+ case OP_4A_PS_MADD:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
+ break;
+ case OP_4A_PS_NMSUB:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
+ break;
+ case OP_4A_PS_NMADD:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
+ break;
+ }
+ break;
+
+ /* Real FPU operations */
+
+ case OP_LFS:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ FPU_LS_SINGLE);
+ break;
+ }
+ case OP_LFSU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ FPU_LS_SINGLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_LFD:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ FPU_LS_DOUBLE);
+ break;
+ }
+ case OP_LFDU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ FPU_LS_DOUBLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_STFS:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ FPU_LS_SINGLE);
+ break;
+ }
+ case OP_STFSU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ FPU_LS_SINGLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_STFD:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ FPU_LS_DOUBLE);
+ break;
+ }
+ case OP_STFDU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ FPU_LS_DOUBLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case 31:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_31_LFSX:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ addr, FPU_LS_SINGLE);
+ break;
+ }
+ case OP_31_LFSUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ addr, FPU_LS_SINGLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_31_LFDX:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ addr, FPU_LS_DOUBLE);
+ break;
+ }
+ case OP_31_LFDUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ addr, FPU_LS_DOUBLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_31_STFSX:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr, FPU_LS_SINGLE);
+ break;
+ }
+ case OP_31_STFSUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr, FPU_LS_SINGLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_31_STFX:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr, FPU_LS_DOUBLE);
+ break;
+ }
+ case OP_31_STFUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr, FPU_LS_DOUBLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_31_STFIWX:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr,
+ FPU_LS_SINGLE_LOW);
+ break;
+ }
+ break;
+ }
+ break;
+ case 59:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_59_FADDS:
+ fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FSUBS:
+ fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FDIVS:
+ fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FRES:
+ fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FRSQRTES:
+ fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ }
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_59_FMULS:
+ fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FMSUBS:
+ fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FMADDS:
+ fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FNMSUBS:
+ fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FNMADDS:
+ fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ }
+ break;
+ case 63:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_63_MTFSB0:
+ case OP_63_MTFSB1:
+ case OP_63_MCRFS:
+ case OP_63_MTFSFI:
+ /* XXX need to implement */
+ break;
+ case OP_63_MFFS:
+ /* XXX missing CR */
+ *fpr_d = vcpu->arch.fpscr;
+ break;
+ case OP_63_MTFSF:
+ /* XXX missing fm bits */
+ /* XXX missing CR */
+ vcpu->arch.fpscr = *fpr_b;
+ break;
+ case OP_63_FCMPU:
+ {
+ u32 tmp_cr;
+ u32 cr0_mask = 0xf0000000;
+ u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
+
+ fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
+ cr &= ~(cr0_mask >> cr_shift);
+ cr |= (cr & cr0_mask) >> cr_shift;
+ break;
+ }
+ case OP_63_FCMPO:
+ {
+ u32 tmp_cr;
+ u32 cr0_mask = 0xf0000000;
+ u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
+
+ fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
+ cr &= ~(cr0_mask >> cr_shift);
+ cr |= (cr & cr0_mask) >> cr_shift;
+ break;
+ }
+ case OP_63_FNEG:
+ fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ break;
+ case OP_63_FMR:
+ *fpr_d = *fpr_b;
+ break;
+ case OP_63_FABS:
+ fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ break;
+ case OP_63_FCPSGN:
+ fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ break;
+ case OP_63_FDIV:
+ fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ break;
+ case OP_63_FADD:
+ fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ break;
+ case OP_63_FSUB:
+ fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ break;
+ case OP_63_FCTIW:
+ fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ break;
+ case OP_63_FCTIWZ:
+ fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ break;
+ case OP_63_FRSP:
+ fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_63_FRSQRTE:
+ {
+ double one = 1.0f;
+
+ /* fD = sqrt(fB) */
+ fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ /* fD = 1.0f / fD */
+ fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
+ break;
+ }
+ }
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_63_FMUL:
+ fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
+ break;
+ case OP_63_FSEL:
+ fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ case OP_63_FMSUB:
+ fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ case OP_63_FMADD:
+ fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ case OP_63_FNMSUB:
+ fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ case OP_63_FNMADD:
+ fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ }
+ break;
+ }
+
+#ifdef DEBUG
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
+ u32 f;
+ cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
+ dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
+ }
+#endif
+
+ if (rcomp)
+ kvmppc_set_cr(vcpu, cr);
+
+ preempt_enable();
+
+ return emulated;
+}
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index c83c60ad96c5..506d5c316c96 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -22,7 +22,10 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/exception-64s.h>
+#endif
/*****************************************************************************
* *
@@ -30,6 +33,39 @@
* *
****************************************************************************/
+#if defined(CONFIG_PPC_BOOK3S_64)
+
+#define LOAD_SHADOW_VCPU(reg) \
+ mfspr reg, SPRN_SPRG_PACA
+
+#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
+#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
+#define FUNC(name) GLUE(.,name)
+
+#elif defined(CONFIG_PPC_BOOK3S_32)
+
+#define LOAD_SHADOW_VCPU(reg) \
+ mfspr reg, SPRN_SPRG_THREAD; \
+ lwz reg, THREAD_KVM_SVCPU(reg); \
+ /* PPC32 can have a NULL pointer - let's check for that */ \
+ mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
+ mfcr r12; \
+ cmpwi reg, 0; \
+ bne 1f; \
+ mfspr reg, SPRN_SPRG_SCRATCH0; \
+ mtcr r12; \
+ mfspr r12, SPRN_SPRG_SCRATCH1; \
+ b kvmppc_resume_\intno; \
+1:; \
+ mtcr r12; \
+ mfspr r12, SPRN_SPRG_SCRATCH1; \
+ tophys(reg, reg)
+
+#define SHADOW_VCPU_OFF 0
+#define MSR_NOIRQ MSR_KERNEL
+#define FUNC(name) name
+
+#endif
.macro INTERRUPT_TRAMPOLINE intno
@@ -42,19 +78,19 @@ kvmppc_trampoline_\intno:
* First thing to do is to find out if we're coming
* from a KVM guest or a Linux process.
*
- * To distinguish, we check a magic byte in the PACA
+ * To distinguish, we check a magic byte in the PACA/current
*/
- mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
- std r12, PACA_KVM_SCRATCH0(r13)
+ LOAD_SHADOW_VCPU(r13)
+ PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
mfcr r12
- stw r12, PACA_KVM_SCRATCH1(r13)
- lbz r12, PACA_KVM_IN_GUEST(r13)
+ stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+ lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
cmpwi r12, KVM_GUEST_MODE_NONE
bne ..kvmppc_handler_hasmagic_\intno
/* No KVM guest? Then jump back to the Linux handler! */
- lwz r12, PACA_KVM_SCRATCH1(r13)
+ lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
mtcr r12
- ld r12, PACA_KVM_SCRATCH0(r13)
+ PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
b kvmppc_resume_\intno /* Get back original handler */
@@ -76,9 +112,7 @@ kvmppc_trampoline_\intno:
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
@@ -88,7 +122,14 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
+
+/* Those are only available on 64 bit machines */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
+#endif
/*
* Bring us back to the faulting code, but skip the
@@ -99,11 +140,11 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
*
* Input Registers:
*
- * R12 = free
- * R13 = PACA
- * PACA.KVM.SCRATCH0 = guest R12
- * PACA.KVM.SCRATCH1 = guest CR
- * SPRG_SCRATCH0 = guest R13
+ * R12 = free
+ * R13 = Shadow VCPU (PACA)
+ * SVCPU.SCRATCH0 = guest R12
+ * SVCPU.SCRATCH1 = guest CR
+ * SPRG_SCRATCH0 = guest R13
*
*/
kvmppc_handler_skip_ins:
@@ -114,9 +155,9 @@ kvmppc_handler_skip_ins:
mtsrr0 r12
/* Clean up all state */
- lwz r12, PACA_KVM_SCRATCH1(r13)
+ lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
mtcr r12
- ld r12, PACA_KVM_SCRATCH0(r13)
+ PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
mfspr r13, SPRN_SPRG_SCRATCH0
/* And get back into the code */
@@ -147,41 +188,48 @@ kvmppc_handler_lowmem_trampoline_end:
*
* R3 = function
* R4 = MSR
- * R5 = CTR
+ * R5 = scratch register
*
*/
_GLOBAL(kvmppc_rmcall)
- mtmsr r4 /* Disable relocation, so mtsrr
+ LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
+ mtmsr r5 /* Disable relocation and interrupts, so mtsrr
doesn't get interrupted */
- mtctr r5
+ sync
mtsrr0 r3
mtsrr1 r4
RFI
+#if defined(CONFIG_PPC_BOOK3S_32)
+#define STACK_LR INT_FRAME_SIZE+4
+#elif defined(CONFIG_PPC_BOOK3S_64)
+#define STACK_LR _LINK
+#endif
+
/*
* Activate current's external feature (FPU/Altivec/VSX)
*/
-#define define_load_up(what) \
- \
-_GLOBAL(kvmppc_load_up_ ## what); \
- subi r1, r1, INT_FRAME_SIZE; \
- mflr r3; \
- std r3, _LINK(r1); \
- mfmsr r4; \
- std r31, GPR3(r1); \
- mr r31, r4; \
- li r5, MSR_DR; \
- oris r5, r5, MSR_EE@h; \
- andc r4, r4, r5; \
- mtmsr r4; \
- \
- bl .load_up_ ## what; \
- \
- mtmsr r31; \
- ld r3, _LINK(r1); \
- ld r31, GPR3(r1); \
- addi r1, r1, INT_FRAME_SIZE; \
- mtlr r3; \
+#define define_load_up(what) \
+ \
+_GLOBAL(kvmppc_load_up_ ## what); \
+ PPC_STLU r1, -INT_FRAME_SIZE(r1); \
+ mflr r3; \
+ PPC_STL r3, STACK_LR(r1); \
+ PPC_STL r20, _NIP(r1); \
+ mfmsr r20; \
+ LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
+ andc r3,r20,r3; /* Disable DR,EE */ \
+ mtmsr r3; \
+ sync; \
+ \
+ bl FUNC(load_up_ ## what); \
+ \
+ mtmsr r20; /* Enable DR,EE */ \
+ sync; \
+ PPC_LL r3, STACK_LR(r1); \
+ PPC_LL r20, _NIP(r1); \
+ mtlr r3; \
+ addi r1, r1, INT_FRAME_SIZE; \
blr
define_load_up(fpu)
@@ -194,11 +242,10 @@ define_load_up(vsx)
.global kvmppc_trampoline_lowmem
kvmppc_trampoline_lowmem:
- .long kvmppc_handler_lowmem_trampoline - _stext
+ .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
.global kvmppc_trampoline_enter
kvmppc_trampoline_enter:
- .long kvmppc_handler_trampoline_enter - _stext
-
-#include "book3s_64_slb.S"
+ .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
+#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
new file mode 100644
index 000000000000..7c52ed0b7051
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -0,0 +1,259 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+/* Real mode helpers */
+
+#if defined(CONFIG_PPC_BOOK3S_64)
+
+#define GET_SHADOW_VCPU(reg) \
+ addi reg, r13, PACA_KVM_SVCPU
+
+#elif defined(CONFIG_PPC_BOOK3S_32)
+
+#define GET_SHADOW_VCPU(reg) \
+ tophys(reg, r2); \
+ lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
+ tophys(reg, reg)
+
+#endif
+
+/* Disable for nested KVM */
+#define USE_QUICK_LAST_INST
+
+
+/* Get helper functions for subarch specific functionality */
+
+#if defined(CONFIG_PPC_BOOK3S_64)
+#include "book3s_64_slb.S"
+#elif defined(CONFIG_PPC_BOOK3S_32)
+#include "book3s_32_sr.S"
+#endif
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
+.global kvmppc_handler_trampoline_enter
+kvmppc_handler_trampoline_enter:
+
+ /* Required state:
+ *
+ * MSR = ~IR|DR
+ * R13 = PACA
+ * R1 = host R1
+ * R2 = host R2
+ * R10 = guest MSR
+ * all other volatile GPRS = free
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
+ */
+
+ /* r3 = shadow vcpu */
+ GET_SHADOW_VCPU(r3)
+
+ /* Move SRR0 and SRR1 into the respective regs */
+ PPC_LL r9, SVCPU_PC(r3)
+ mtsrr0 r9
+ mtsrr1 r10
+
+ /* Activate guest mode, so faults get handled by KVM */
+ li r11, KVM_GUEST_MODE_GUEST
+ stb r11, SVCPU_IN_GUEST(r3)
+
+ /* Switch to guest segment. This is subarch specific. */
+ LOAD_GUEST_SEGMENTS
+
+ /* Enter guest */
+
+ PPC_LL r4, (SVCPU_CTR)(r3)
+ PPC_LL r5, (SVCPU_LR)(r3)
+ lwz r6, (SVCPU_CR)(r3)
+ lwz r7, (SVCPU_XER)(r3)
+
+ mtctr r4
+ mtlr r5
+ mtcr r6
+ mtxer r7
+
+ PPC_LL r0, (SVCPU_R0)(r3)
+ PPC_LL r1, (SVCPU_R1)(r3)
+ PPC_LL r2, (SVCPU_R2)(r3)
+ PPC_LL r4, (SVCPU_R4)(r3)
+ PPC_LL r5, (SVCPU_R5)(r3)
+ PPC_LL r6, (SVCPU_R6)(r3)
+ PPC_LL r7, (SVCPU_R7)(r3)
+ PPC_LL r8, (SVCPU_R8)(r3)
+ PPC_LL r9, (SVCPU_R9)(r3)
+ PPC_LL r10, (SVCPU_R10)(r3)
+ PPC_LL r11, (SVCPU_R11)(r3)
+ PPC_LL r12, (SVCPU_R12)(r3)
+ PPC_LL r13, (SVCPU_R13)(r3)
+
+ PPC_LL r3, (SVCPU_R3)(r3)
+
+ RFI
+kvmppc_handler_trampoline_enter_end:
+
+
+
+/******************************************************************************
+ * *
+ * Exit code *
+ * *
+ *****************************************************************************/
+
+.global kvmppc_handler_trampoline_exit
+kvmppc_handler_trampoline_exit:
+
+ /* Register usage at this point:
+ *
+ * SPRG_SCRATCH0 = guest R13
+ * R12 = exit handler id
+ * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * SVCPU.SCRATCH0 = guest R12
+ * SVCPU.SCRATCH1 = guest CR
+ *
+ */
+
+ /* Save registers */
+
+ PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13)
+ PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13)
+ PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13)
+ PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13)
+ PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13)
+ PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13)
+ PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13)
+ PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13)
+ PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13)
+ PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13)
+ PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13)
+ PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13)
+
+ /* Restore R1/R2 so we can handle faults */
+ PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13)
+ PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
+
+ /* Save guest PC and MSR */
+ mfsrr0 r3
+ mfsrr1 r4
+
+ PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
+ PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
+
+ /* Get scratch'ed off registers */
+ mfspr r9, SPRN_SPRG_SCRATCH0
+ PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
+ lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+
+ PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13)
+ PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13)
+ stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13)
+
+ /* Save more register state */
+
+ mfxer r5
+ mfdar r6
+ mfdsisr r7
+ mfctr r8
+ mflr r9
+
+ stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13)
+ PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13)
+ stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13)
+ PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13)
+ PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13)
+
+ /*
+ * In order for us to easily get the last instruction,
+ * we got the #vmexit at, we exploit the fact that the
+ * virtual layout is still the same here, so we can just
+ * ld from the guest's PC address
+ */
+
+ /* We only load the last instruction when it's safe */
+ cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
+ beq ld_last_inst
+ cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
+ beq ld_last_inst
+ cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
+ beq- ld_last_inst
+
+ b no_ld_last_inst
+
+ld_last_inst:
+ /* Save off the guest instruction we're at */
+
+ /* In case lwz faults */
+ li r0, KVM_INST_FETCH_FAILED
+
+#ifdef USE_QUICK_LAST_INST
+
+ /* Set guest mode to 'jump over instruction' so if lwz faults
+ * we'll just continue at the next IP. */
+ li r9, KVM_GUEST_MODE_SKIP
+ stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+
+ /* 1) enable paging for data */
+ mfmsr r9
+ ori r11, r9, MSR_DR /* Enable paging for data */
+ mtmsr r11
+ sync
+ /* 2) fetch the instruction */
+ lwz r0, 0(r3)
+ /* 3) disable paging again */
+ mtmsr r9
+ sync
+
+#endif
+ stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13)
+
+no_ld_last_inst:
+
+ /* Unset guest mode */
+ li r9, KVM_GUEST_MODE_NONE
+ stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+
+ /* Switch back to host MMU */
+ LOAD_HOST_SEGMENTS
+
+ /* Register usage at this point:
+ *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * SVCPU.* = guest *
+ *
+ */
+
+ /* RFI into the highmem handler */
+ mfmsr r7
+ ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
+ mtsrr1 r7
+ /* Load highmem handler address */
+ PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13)
+ mtsrr0 r8
+
+ RFI
+kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 2a3a1953d4bd..c92224071021 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -133,6 +133,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
}
+void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+{
+ clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
+}
+
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 669a5c5fc7d7..bc2b4004eb26 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -161,7 +161,7 @@ static int __init kvmppc_e500_init(void)
flush_icache_range(kvmppc_booke_handlers,
kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE);
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
}
static void __init kvmppc_e500_exit(void)
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index cb72a65f4ecc..4568ec386c2a 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -38,10 +38,12 @@
#define OP_31_XOP_LBZX 87
#define OP_31_XOP_STWX 151
#define OP_31_XOP_STBX 215
+#define OP_31_XOP_LBZUX 119
#define OP_31_XOP_STBUX 247
#define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MFSPR 339
+#define OP_31_XOP_LHAX 343
#define OP_31_XOP_STHX 407
#define OP_31_XOP_STHUX 439
#define OP_31_XOP_MTSPR 467
@@ -62,10 +64,12 @@
#define OP_STBU 39
#define OP_LHZ 40
#define OP_LHZU 41
+#define OP_LHA 42
+#define OP_LHAU 43
#define OP_STH 44
#define OP_STHU 45
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
{
return 1;
@@ -82,7 +86,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
unsigned long dec_nsec;
pr_debug("mtDEC: %x\n", vcpu->arch.dec);
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
/* mtdec lowers the interrupt line when positive. */
kvmppc_core_dequeue_dec(vcpu);
@@ -128,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
* from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- u32 inst = vcpu->arch.last_inst;
+ u32 inst = kvmppc_get_last_inst(vcpu);
u32 ea;
int ra;
int rb;
@@ -143,13 +147,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
- /* Try again next time */
- if (inst == KVM_INST_FETCH_FAILED)
- return EMULATE_DONE;
-
switch (get_op(inst)) {
case OP_TRAP:
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
case OP_TRAP_64:
kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
@@ -171,6 +171,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
+ case OP_31_XOP_LBZUX:
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = kvmppc_get_gpr(vcpu, rb);
+ if (ra)
+ ea += kvmppc_get_gpr(vcpu, ra);
+
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ kvmppc_set_gpr(vcpu, ra, ea);
+ break;
+
case OP_31_XOP_STWX:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
@@ -200,6 +213,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_set_gpr(vcpu, rs, ea);
break;
+ case OP_31_XOP_LHAX:
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+ break;
+
case OP_31_XOP_LHZX:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
@@ -450,6 +468,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
+ case OP_LHA:
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+ break;
+
+ case OP_LHAU:
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ break;
+
case OP_STH:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
@@ -472,7 +502,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (emulated == EMULATE_FAIL) {
emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
- if (emulated == EMULATE_FAIL) {
+ if (emulated == EMULATE_AGAIN) {
+ advance = 0;
+ } else if (emulated == EMULATE_FAIL) {
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
@@ -480,10 +512,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
}
- trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated);
+ trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
+ /* Advance past emulated instruction. */
if (advance)
- vcpu->arch.pc += 4; /* Advance past emulated instruction. */
+ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
return emulated;
}
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
new file mode 100644
index 000000000000..2b340a3eee90
--- /dev/null
+++ b/arch/powerpc/kvm/fpu.S
@@ -0,0 +1,273 @@
+/*
+ * FPU helper code to use FPU operations from inside the kernel
+ *
+ * Copyright (C) 2010 Alexander Graf (agraf@suse.de)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/* Instructions operating on single parameters */
+
+/*
+ * Single operation with one input operand
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (short*)&result
+ * R5 = (short*)&param1
+ */
+#define FPS_ONE_IN(name) \
+_GLOBAL(fps_ ## name); \
+ lfd 0,0(r3); /* load up fpscr value */ \
+ MTFSF_L(0); \
+ lfs 0,0(r5); \
+ \
+ name 0,0; \
+ \
+ stfs 0,0(r4); \
+ mffs 0; \
+ stfd 0,0(r3); /* save new fpscr value */ \
+ blr
+
+/*
+ * Single operation with two input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (short*)&result
+ * R5 = (short*)&param1
+ * R6 = (short*)&param2
+ */
+#define FPS_TWO_IN(name) \
+_GLOBAL(fps_ ## name); \
+ lfd 0,0(r3); /* load up fpscr value */ \
+ MTFSF_L(0); \
+ lfs 0,0(r5); \
+ lfs 1,0(r6); \
+ \
+ name 0,0,1; \
+ \
+ stfs 0,0(r4); \
+ mffs 0; \
+ stfd 0,0(r3); /* save new fpscr value */ \
+ blr
+
+/*
+ * Single operation with three input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (short*)&result
+ * R5 = (short*)&param1
+ * R6 = (short*)&param2
+ * R7 = (short*)&param3
+ */
+#define FPS_THREE_IN(name) \
+_GLOBAL(fps_ ## name); \
+ lfd 0,0(r3); /* load up fpscr value */ \
+ MTFSF_L(0); \
+ lfs 0,0(r5); \
+ lfs 1,0(r6); \
+ lfs 2,0(r7); \
+ \
+ name 0,0,1,2; \
+ \
+ stfs 0,0(r4); \
+ mffs 0; \
+ stfd 0,0(r3); /* save new fpscr value */ \
+ blr
+
+FPS_ONE_IN(fres)
+FPS_ONE_IN(frsqrte)
+FPS_ONE_IN(fsqrts)
+FPS_TWO_IN(fadds)
+FPS_TWO_IN(fdivs)
+FPS_TWO_IN(fmuls)
+FPS_TWO_IN(fsubs)
+FPS_THREE_IN(fmadds)
+FPS_THREE_IN(fmsubs)
+FPS_THREE_IN(fnmadds)
+FPS_THREE_IN(fnmsubs)
+FPS_THREE_IN(fsel)
+
+
+/* Instructions operating on double parameters */
+
+/*
+ * Beginning of double instruction processing
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * R6 = (double*)&param1
+ * R7 = (double*)&param2 [load_two]
+ * R8 = (double*)&param3 [load_three]
+ * LR = instruction call function
+ */
+fpd_load_three:
+ lfd 2,0(r8) /* load param3 */
+fpd_load_two:
+ lfd 1,0(r7) /* load param2 */
+fpd_load_one:
+ lfd 0,0(r6) /* load param1 */
+fpd_load_none:
+ lfd 3,0(r3) /* load up fpscr value */
+ MTFSF_L(3)
+ lwz r6, 0(r4) /* load cr */
+ mtcr r6
+ blr
+
+/*
+ * End of double instruction processing
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * LR = caller of instruction call function
+ */
+fpd_return:
+ mfcr r6
+ stfd 0,0(r5) /* save result */
+ mffs 0
+ stfd 0,0(r3) /* save new fpscr value */
+ stw r6,0(r4) /* save new cr value */
+ blr
+
+/*
+ * Double operation with no input operand
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ */
+#define FPD_NONE_IN(name) \
+_GLOBAL(fpd_ ## name); \
+ mflr r12; \
+ bl fpd_load_none; \
+ mtlr r12; \
+ \
+ name. 0; /* call instruction */ \
+ b fpd_return
+
+/*
+ * Double operation with one input operand
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * R6 = (double*)&param1
+ */
+#define FPD_ONE_IN(name) \
+_GLOBAL(fpd_ ## name); \
+ mflr r12; \
+ bl fpd_load_one; \
+ mtlr r12; \
+ \
+ name. 0,0; /* call instruction */ \
+ b fpd_return
+
+/*
+ * Double operation with two input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * R6 = (double*)&param1
+ * R7 = (double*)&param2
+ * R8 = (double*)&param3
+ */
+#define FPD_TWO_IN(name) \
+_GLOBAL(fpd_ ## name); \
+ mflr r12; \
+ bl fpd_load_two; \
+ mtlr r12; \
+ \
+ name. 0,0,1; /* call instruction */ \
+ b fpd_return
+
+/*
+ * CR Double operation with two input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&param1
+ * R6 = (double*)&param2
+ * R7 = (double*)&param3
+ */
+#define FPD_TWO_IN_CR(name) \
+_GLOBAL(fpd_ ## name); \
+ lfd 1,0(r6); /* load param2 */ \
+ lfd 0,0(r5); /* load param1 */ \
+ lfd 3,0(r3); /* load up fpscr value */ \
+ MTFSF_L(3); \
+ lwz r6, 0(r4); /* load cr */ \
+ mtcr r6; \
+ \
+ name 0,0,1; /* call instruction */ \
+ mfcr r6; \
+ mffs 0; \
+ stfd 0,0(r3); /* save new fpscr value */ \
+ stw r6,0(r4); /* save new cr value */ \
+ blr
+
+/*
+ * Double operation with three input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * R6 = (double*)&param1
+ * R7 = (double*)&param2
+ * R8 = (double*)&param3
+ */
+#define FPD_THREE_IN(name) \
+_GLOBAL(fpd_ ## name); \
+ mflr r12; \
+ bl fpd_load_three; \
+ mtlr r12; \
+ \
+ name. 0,0,1,2; /* call instruction */ \
+ b fpd_return
+
+FPD_ONE_IN(fsqrts)
+FPD_ONE_IN(frsqrtes)
+FPD_ONE_IN(fres)
+FPD_ONE_IN(frsp)
+FPD_ONE_IN(fctiw)
+FPD_ONE_IN(fctiwz)
+FPD_ONE_IN(fsqrt)
+FPD_ONE_IN(fre)
+FPD_ONE_IN(frsqrte)
+FPD_ONE_IN(fneg)
+FPD_ONE_IN(fabs)
+FPD_TWO_IN(fadds)
+FPD_TWO_IN(fsubs)
+FPD_TWO_IN(fdivs)
+FPD_TWO_IN(fmuls)
+FPD_TWO_IN_CR(fcmpu)
+FPD_TWO_IN(fcpsgn)
+FPD_TWO_IN(fdiv)
+FPD_TWO_IN(fadd)
+FPD_TWO_IN(fmul)
+FPD_TWO_IN_CR(fcmpo)
+FPD_TWO_IN(fsub)
+FPD_THREE_IN(fmsubs)
+FPD_THREE_IN(fmadds)
+FPD_THREE_IN(fnmsubs)
+FPD_THREE_IN(fnmadds)
+FPD_THREE_IN(fsel)
+FPD_THREE_IN(fmsub)
+FPD_THREE_IN(fmadd)
+FPD_THREE_IN(fnmsub)
+FPD_THREE_IN(fnmadd)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 297fcd2ff7d0..9b8683f39e05 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EMULATE_FAIL:
/* XXX Deliver Program interrupt to guest. */
printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
- vcpu->arch.last_inst);
+ kvmppc_get_last_inst(vcpu));
r = RESUME_HOST;
break;
default:
@@ -148,6 +148,10 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
case KVM_CAP_PPC_SEGSTATE:
+ case KVM_CAP_PPC_PAIRED_SINGLES:
+ case KVM_CAP_PPC_UNSET_IRQ:
+ case KVM_CAP_ENABLE_CAP:
+ case KVM_CAP_PPC_OSI:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -193,12 +197,17 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
vcpu = kvmppc_core_vcpu_create(kvm, id);
- kvmppc_create_vcpu_debugfs(vcpu, id);
+ if (!IS_ERR(vcpu))
+ kvmppc_create_vcpu_debugfs(vcpu, id);
return vcpu;
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ /* Make sure we're not using the vcpu anymore */
+ hrtimer_cancel(&vcpu->arch.dec_timer);
+ tasklet_kill(&vcpu->arch.tasklet);
+
kvmppc_remove_vcpu_debugfs(vcpu);
kvmppc_core_vcpu_free(vcpu);
}
@@ -278,7 +287,7 @@ static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
- ulong gpr;
+ u64 gpr;
if (run->mmio.len > sizeof(gpr)) {
printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
@@ -287,6 +296,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
if (vcpu->arch.mmio_is_bigendian) {
switch (run->mmio.len) {
+ case 8: gpr = *(u64 *)run->mmio.data; break;
case 4: gpr = *(u32 *)run->mmio.data; break;
case 2: gpr = *(u16 *)run->mmio.data; break;
case 1: gpr = *(u8 *)run->mmio.data; break;
@@ -300,7 +310,43 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
+ if (vcpu->arch.mmio_sign_extend) {
+ switch (run->mmio.len) {
+#ifdef CONFIG_PPC64
+ case 4:
+ gpr = (s64)(s32)gpr;
+ break;
+#endif
+ case 2:
+ gpr = (s64)(s16)gpr;
+ break;
+ case 1:
+ gpr = (s64)(s8)gpr;
+ break;
+ }
+ }
+
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
+
+ switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
+ case KVM_REG_GPR:
+ kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
+ break;
+ case KVM_REG_FPR:
+ vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ break;
+#ifdef CONFIG_PPC_BOOK3S
+ case KVM_REG_QPR:
+ vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ break;
+ case KVM_REG_FQPR:
+ vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ break;
+#endif
+ default:
+ BUG();
+ }
}
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -319,12 +365,25 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmio_is_bigendian = is_bigendian;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 0;
+ vcpu->arch.mmio_sign_extend = 0;
return EMULATE_DO_MMIO;
}
+/* Same as above, but sign extends */
+int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes, int is_bigendian)
+{
+ int r;
+
+ r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
+ vcpu->arch.mmio_sign_extend = 1;
+
+ return r;
+}
+
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
- u32 val, unsigned int bytes, int is_bigendian)
+ u64 val, unsigned int bytes, int is_bigendian)
{
void *data = run->mmio.data;
@@ -342,6 +401,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Store the value at the lowest bytes in 'data'. */
if (is_bigendian) {
switch (bytes) {
+ case 8: *(u64 *)data = val; break;
case 4: *(u32 *)data = val; break;
case 2: *(u16 *)data = val; break;
case 1: *(u8 *)data = val; break;
@@ -376,6 +436,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (!vcpu->arch.dcr_is_write)
kvmppc_complete_dcr_load(vcpu, run);
vcpu->arch.dcr_needed = 0;
+ } else if (vcpu->arch.osi_needed) {
+ u64 *gprs = run->osi.gprs;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ kvmppc_set_gpr(vcpu, i, gprs[i]);
+ vcpu->arch.osi_needed = 0;
}
kvmppc_core_deliver_interrupts(vcpu);
@@ -396,7 +463,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
- kvmppc_core_queue_external(vcpu, irq);
+ if (irq->irq == KVM_INTERRUPT_UNSET)
+ kvmppc_core_dequeue_external(vcpu, irq);
+ else
+ kvmppc_core_queue_external(vcpu, irq);
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
@@ -406,6 +476,27 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
return 0;
}
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ struct kvm_enable_cap *cap)
+{
+ int r;
+
+ if (cap->flags)
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_PPC_OSI:
+ r = 0;
+ vcpu->arch.osi_enabled = true;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
@@ -434,6 +525,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
break;
}
+ case KVM_ENABLE_CAP:
+ {
+ struct kvm_enable_cap cap;
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ goto out;
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ break;
+ }
default:
r = -EINVAL;
}
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 64e2e499e32a..3ac0cd3a5373 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -71,7 +71,7 @@ _GLOBAL(strcmp)
_GLOBAL(strncmp)
PPC_LCMPI r5,0
- beqlr
+ ble- 2f
mtctr r5
addi r5,r3,-1
addi r4,r4,-1
@@ -82,6 +82,8 @@ _GLOBAL(strncmp)
beqlr 1
bdnzt eq,1b
blr
+2: li r3,0
+ blr
_GLOBAL(strlen)
addi r4,r3,-1
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 3986264b0993..d8c6efb32bc6 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -38,7 +38,9 @@ unsigned int tlb_44x_index; /* = 0 */
unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
int icache_44x_need_flush;
-static void __init ppc44x_update_tlb_hwater(void)
+unsigned long tlb_47x_boltmap[1024/8];
+
+static void __cpuinit ppc44x_update_tlb_hwater(void)
{
extern unsigned int tlb_44x_patch_hwater_D[];
extern unsigned int tlb_44x_patch_hwater_I[];
@@ -59,7 +61,7 @@ static void __init ppc44x_update_tlb_hwater(void)
}
/*
- * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
*/
static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
{
@@ -67,12 +69,18 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
ppc44x_update_tlb_hwater();
+ mtspr(SPRN_MMUCR, 0);
+
__asm__ __volatile__(
"tlbwe %2,%3,%4\n"
"tlbwe %1,%3,%5\n"
"tlbwe %0,%3,%6\n"
:
+#ifdef CONFIG_PPC47x
+ : "r" (PPC47x_TLB2_S_RWX),
+#else
: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
+#endif
"r" (phys),
"r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
"r" (entry),
@@ -81,8 +89,93 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
"i" (PPC44x_TLB_ATTRIB));
}
+static int __init ppc47x_find_free_bolted(void)
+{
+ unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+ unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+ if (!(mmube0 & MMUBE0_VBE0))
+ return 0;
+ if (!(mmube0 & MMUBE0_VBE1))
+ return 1;
+ if (!(mmube0 & MMUBE0_VBE2))
+ return 2;
+ if (!(mmube1 & MMUBE1_VBE3))
+ return 3;
+ if (!(mmube1 & MMUBE1_VBE4))
+ return 4;
+ if (!(mmube1 & MMUBE1_VBE5))
+ return 5;
+ return -1;
+}
+
+static void __init ppc47x_update_boltmap(void)
+{
+ unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+ unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+ if (mmube0 & MMUBE0_VBE0)
+ __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube0 & MMUBE0_VBE1)
+ __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube0 & MMUBE0_VBE2)
+ __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube1 & MMUBE1_VBE3)
+ __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube1 & MMUBE1_VBE4)
+ __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube1 & MMUBE1_VBE5)
+ __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+}
+
+/*
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
+ */
+static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+{
+ unsigned int rA;
+ int bolted;
+
+ /* Base rA is HW way select, way 0, bolted bit set */
+ rA = 0x88000000;
+
+ /* Look for a bolted entry slot */
+ bolted = ppc47x_find_free_bolted();
+ BUG_ON(bolted < 0);
+
+ /* Insert bolted slot number */
+ rA |= bolted << 24;
+
+ pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
+ virt, phys, bolted);
+
+ mtspr(SPRN_MMUCR, 0);
+
+ __asm__ __volatile__(
+ "tlbwe %2,%3,0\n"
+ "tlbwe %1,%3,1\n"
+ "tlbwe %0,%3,2\n"
+ :
+ : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
+ PPC47x_TLB2_SX
+#ifdef CONFIG_SMP
+ | PPC47x_TLB2_M
+#endif
+ ),
+ "r" (phys),
+ "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
+ "r" (rA));
+}
+
void __init MMU_init_hw(void)
{
+ /* This is not useful on 47x but won't hurt either */
ppc44x_update_tlb_hwater();
flush_instruction_cache();
@@ -95,8 +188,51 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
/* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S */
for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
- addr += PPC_PIN_SIZE)
- ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+ addr += PPC_PIN_SIZE) {
+ if (mmu_has_feature(MMU_FTR_TYPE_47x))
+ ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
+ else
+ ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+ }
+ if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+ ppc47x_update_boltmap();
+#ifdef DEBUG
+ {
+ int i;
+
+ printk(KERN_DEBUG "bolted entries: ");
+ for (i = 0; i < 255; i++) {
+ if (test_bit(i, tlb_47x_boltmap))
+ printk("%d ", i);
+ }
+ printk("\n");
+ }
+#endif /* DEBUG */
+ }
return total_lowmem;
}
+
+#ifdef CONFIG_SMP
+void __cpuinit mmu_init_secondary(int cpu)
+{
+ unsigned long addr;
+
+ /* Pin in enough TLBs to cover any lowmem not covered by the
+ * initial 256M mapping established in head_44x.S
+ *
+ * WARNING: This is called with only the first 256M of the
+ * linear mapping in the TLB and we can't take faults yet
+ * so beware of what this code uses. It runs off a temporary
+ * stack. current (r2) isn't initialized, smp_processor_id()
+ * will not work, current thread info isn't accessible, ...
+ */
+ for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
+ addr += PPC_PIN_SIZE) {
+ if (mmu_has_feature(MMU_FTR_TYPE_47x))
+ ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
+ else
+ ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+ }
+}
+#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 26fb6b990b0a..1bd712c33ce2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -151,13 +151,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
+ defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) {
/* DABR match */
do_dabr(regs, address, error_code);
return 0;
}
-#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
+#endif
if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
@@ -307,7 +308,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- survive:
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
@@ -359,15 +359,10 @@ bad_area_nosemaphore:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- return SIGKILL;
+ if (!user_mode(regs))
+ return SIGKILL;
+ pagefault_out_of_memory();
+ return 0;
do_sigbus:
up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d7fa50b09b4a..e267f223fdff 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -252,6 +252,47 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
}
#endif /* CONFIG_PPC_BOOK3E */
+struct vmemmap_backing *vmemmap_list;
+
+static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
+{
+ static struct vmemmap_backing *next;
+ static int num_left;
+
+ /* allocate a page when required and hand out chunks */
+ if (!next || !num_left) {
+ next = vmemmap_alloc_block(PAGE_SIZE, node);
+ if (unlikely(!next)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
+ }
+
+ num_left--;
+
+ return next++;
+}
+
+static __meminit void vmemmap_list_populate(unsigned long phys,
+ unsigned long start,
+ int node)
+{
+ struct vmemmap_backing *vmem_back;
+
+ vmem_back = vmemmap_list_alloc(node);
+ if (unlikely(!vmem_back)) {
+ WARN_ON(1);
+ return;
+ }
+
+ vmem_back->phys = phys;
+ vmem_back->virt_addr = start;
+ vmem_back->list = vmemmap_list;
+
+ vmemmap_list = vmem_back;
+}
+
int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node)
{
@@ -276,6 +317,8 @@ int __meminit vmemmap_populate(struct page *start_page,
if (!p)
return -ENOMEM;
+ vmemmap_list_populate(__pa(p), start, node);
+
pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p);
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
index 0dfba2bf7f31..d0ee554e86e4 100644
--- a/arch/powerpc/mm/mmu_context_hash32.c
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -60,11 +60,7 @@
static unsigned long next_mmu_context;
static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
-
-/*
- * Set up the context for a new address space.
- */
-int init_new_context(struct task_struct *t, struct mm_struct *mm)
+unsigned long __init_new_context(void)
{
unsigned long ctx = next_mmu_context;
@@ -74,19 +70,38 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
- mm->context.id = ctx;
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(__init_new_context);
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = __init_new_context();
return 0;
}
/*
+ * Free a context ID. Make sure to call this with preempt disabled!
+ */
+void __destroy_context(unsigned long ctx)
+{
+ clear_bit(ctx, context_map);
+}
+EXPORT_SYMBOL_GPL(__destroy_context);
+
+/*
* We're finished using the context for an address space.
*/
void destroy_context(struct mm_struct *mm)
{
preempt_disable();
if (mm->context.id != NO_CONTEXT) {
- clear_bit(mm->context.id, context_map);
+ __destroy_context(mm->context.id);
mm->context.id = NO_CONTEXT;
}
preempt_enable();
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1f2d9ff09895..ddfd7ad4e1d6 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -395,10 +395,18 @@ void __init mmu_context_init(void)
* the PID/TID comparison is disabled, so we can use a TID of zero
* to represent all kernel pages as shared among all contexts.
* -- Dan
+ *
+ * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
+ * should normally never have to steal though the facility is
+ * present if needed.
+ * -- BenH
*/
if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
first_context = 0;
last_context = 15;
+ } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+ first_context = 1;
+ last_context = 65535;
} else {
first_context = 1;
last_context = 255;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d49a77503e19..eb11d5d2aa94 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -69,12 +69,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
}
#endif /* CONIFG_8xx */
-/*
- * As of today, we don't support tlbivax broadcast on any
- * implementation. When that becomes the case, this will be
- * an extern.
- */
-#ifdef CONFIG_PPC_BOOK3E
+#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind);
#else
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index eaa7633515b7..aace5e5dfa0a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -33,16 +33,41 @@ static int numa_debug;
#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
int numa_cpu_lookup_table[NR_CPUS];
-cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
-EXPORT_SYMBOL(numa_cpumask_lookup_table);
+EXPORT_SYMBOL(node_to_cpumask_map);
EXPORT_SYMBOL(node_data);
static int min_common_depth;
static int n_mem_addr_cells, n_mem_size_cells;
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: node_to_cpumask() is not valid until after this is done.
+ */
+static void __init setup_node_to_cpumask_map(void)
+{
+ unsigned int node, num = 0;
+
+ /* setup nr_node_ids if not done yet */
+ if (nr_node_ids == MAX_NUMNODES) {
+ for_each_node_mask(node, node_possible_map)
+ num = node;
+ nr_node_ids = num + 1;
+ }
+
+ /* allocate the map */
+ for (node = 0; node < nr_node_ids; node++)
+ alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
+
+ /* cpumask_of_node() will now work */
+ dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
+}
+
static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
unsigned int *nid)
{
@@ -138,8 +163,8 @@ static void __cpuinit map_cpu_to_node(int cpu, int node)
dbg("adding cpu %d to node %d\n", cpu, node);
- if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
- cpu_set(cpu, numa_cpumask_lookup_table[node]);
+ if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
+ cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -149,8 +174,8 @@ static void unmap_cpu_from_node(unsigned long cpu)
dbg("removing cpu %lu from node %d\n", cpu, node);
- if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
- cpu_clear(cpu, numa_cpumask_lookup_table[node]);
+ if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
+ cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
} else {
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
cpu, node);
@@ -750,8 +775,9 @@ void __init dump_numa_cpu_topology(void)
* If we used a CPU iterator here we would miss printing
* the holes in the cpumap.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (cpumask_test_cpu(cpu,
+ node_to_cpumask_map[node])) {
if (count == 0)
printk(" %u", cpu);
++count;
@@ -763,7 +789,7 @@ void __init dump_numa_cpu_topology(void)
}
if (count > 1)
- printk("-%u", NR_CPUS - 1);
+ printk("-%u", nr_cpu_ids - 1);
printk("\n");
}
}
@@ -939,10 +965,6 @@ void __init do_init_bootmem(void)
else
dump_numa_memory_topology();
- register_cpu_notifier(&ppc64_numa_nb);
- cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
- (void *)(unsigned long)boot_cpuid);
-
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
void *bootmem_vaddr;
@@ -996,6 +1018,16 @@ void __init do_init_bootmem(void)
}
init_bootmem_done = 1;
+
+ /*
+ * Now bootmem is initialised we can create the node to cpumask
+ * lookup tables and setup the cpu callback to populate them.
+ */
+ setup_node_to_cpumask_map();
+
+ register_cpu_notifier(&ppc64_numa_nb);
+ cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
+ (void *)(unsigned long)boot_cpuid);
}
void __init paging_init(void)
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index b9243e7557ae..9fc02dc72ce9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -146,6 +146,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC);
+#ifdef _PAGE_BAP_SR
+ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+ * which means that we just cleared supervisor access... oops ;-) This
+ * restores it
+ */
+ flags |= _PAGE_BAP_SR;
+#endif
+
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_flags);
@@ -385,11 +393,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL;
__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
wmb();
-#ifdef CONFIG_PPC_STD_MMU
- flush_hash_pages(0, address, pmd_val(*kpmd), 1);
-#else
flush_tlb_page(NULL, address);
-#endif
pte_unmap(kpte);
return 0;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index d95679a5fb29..d050fc8d9714 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -265,6 +265,14 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC);
+#ifdef _PAGE_BAP_SR
+ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+ * which means that we just cleared supervisor access... oops ;-) This
+ * restores it
+ */
+ flags |= _PAGE_BAP_SR;
+#endif
+
if (ppc_md.ioremap)
return ppc_md.ioremap(addr, size, flags, caller);
return __ioremap_caller(addr, size, flags, caller);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index bbdc5b577b85..e925cb58afd9 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -10,7 +10,7 @@
* - tlbil_va
* - tlbil_pid
* - tlbil_all
- * - tlbivax_bcast (not yet)
+ * - tlbivax_bcast
*
* Code mostly moved over from misc_32.S
*
@@ -33,6 +33,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/processor.h>
+#include <asm/bug.h>
#if defined(CONFIG_40x)
@@ -65,7 +66,7 @@ _GLOBAL(__tlbil_va)
* Nothing to do for 8xx, everything is inline
*/
-#elif defined(CONFIG_44x)
+#elif defined(CONFIG_44x) /* Includes 47x */
/*
* 440 implementation uses tlbsx/we for tlbil_va and a full sweep
@@ -73,7 +74,13 @@ _GLOBAL(__tlbil_va)
*/
_GLOBAL(__tlbil_va)
mfspr r5,SPRN_MMUCR
- rlwimi r5,r4,0,24,31 /* Set TID */
+ mfmsr r10
+
+ /*
+ * We write 16 bits of STID since 47x supports that much, we
+ * will never be passed out of bounds values on 440 (hopefully)
+ */
+ rlwimi r5,r4,0,16,31
/* We have to run the search with interrupts disabled, otherwise
* an interrupt which causes a TLB miss can clobber the MMUCR
@@ -83,24 +90,41 @@ _GLOBAL(__tlbil_va)
* and restoring MMUCR, so only normal interrupts have to be
* taken care of.
*/
- mfmsr r4
wrteei 0
mtspr SPRN_MMUCR,r5
- tlbsx. r3, 0, r3
- wrtee r4
- bne 1f
+ tlbsx. r6,0,r3
+ bne 10f
sync
- /* There are only 64 TLB entries, so r3 < 64,
- * which means bit 22, is clear. Since 22 is
- * the V bit in the TLB_PAGEID, loading this
+BEGIN_MMU_FTR_SECTION
+ b 2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+ /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
+ * 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this
* value will invalidate the TLB entry.
*/
- tlbwe r3, r3, PPC44x_TLB_PAGEID
+ tlbwe r6,r6,PPC44x_TLB_PAGEID
isync
-1: blr
+10: wrtee r10
+ blr
+2:
+#ifdef CONFIG_PPC_47x
+ oris r7,r6,0x8000 /* specify way explicitely */
+ clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */
+ ori r4,r4,PPC47x_TLBE_SIZE
+ tlbwe r4,r7,0 /* write it */
+ isync
+ wrtee r10
+ blr
+#else /* CONFIG_PPC_47x */
+1: trap
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
+#endif /* !CONFIG_PPC_47x */
_GLOBAL(_tlbil_all)
_GLOBAL(_tlbil_pid)
+BEGIN_MMU_FTR_SECTION
+ b 2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
li r3,0
sync
@@ -115,6 +139,76 @@ _GLOBAL(_tlbil_pid)
isync
blr
+2:
+#ifdef CONFIG_PPC_47x
+ /* 476 variant. There's not simple way to do this, hopefully we'll
+ * try to limit the amount of such full invalidates
+ */
+ mfmsr r11 /* Interrupts off */
+ wrteei 0
+ li r3,-1 /* Current set */
+ lis r10,tlb_47x_boltmap@h
+ ori r10,r10,tlb_47x_boltmap@l
+ lis r7,0x8000 /* Specify way explicitely */
+
+ b 9f /* For each set */
+
+1: li r9,4 /* Number of ways */
+ li r4,0 /* Current way */
+ li r6,0 /* Default entry value 0 */
+ andi. r0,r8,1 /* Check if way 0 is bolted */
+ mtctr r9 /* Load way counter */
+ bne- 3f /* Bolted, skip loading it */
+
+2: /* For each way */
+ or r5,r3,r4 /* Make way|index for tlbre */
+ rlwimi r5,r5,16,8,15 /* Copy index into position */
+ tlbre r6,r5,0 /* Read entry */
+3: addis r4,r4,0x2000 /* Next way */
+ andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
+ beq 4f /* Nope, skip it */
+ rlwimi r7,r5,0,1,2 /* Insert way number */
+ rlwinm r6,r6,0,21,19 /* Clear V */
+ tlbwe r6,r7,0 /* Write it */
+4: bdnz 2b /* Loop for each way */
+ srwi r8,r8,1 /* Next boltmap bit */
+9: cmpwi cr1,r3,255 /* Last set done ? */
+ addi r3,r3,1 /* Next set */
+ beq cr1,1f /* End of loop */
+ andi. r0,r3,0x1f /* Need to load a new boltmap word ? */
+ bne 1b /* No, loop */
+ lwz r8,0(r10) /* Load boltmap entry */
+ addi r10,r10,4 /* Next word */
+ b 1b /* Then loop */
+1: isync /* Sync shadows */
+ wrtee r11
+#else /* CONFIG_PPC_47x */
+1: trap
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
+#endif /* !CONFIG_PPC_47x */
+ blr
+
+#ifdef CONFIG_PPC_47x
+/*
+ * _tlbivax_bcast is only on 47x. We don't bother doing a runtime
+ * check though, it will blow up soon enough if we mistakenly try
+ * to use it on a 440.
+ */
+_GLOBAL(_tlbivax_bcast)
+ mfspr r5,SPRN_MMUCR
+ mfmsr r10
+ rlwimi r5,r4,0,16,31
+ wrteei 0
+ mtspr SPRN_MMUCR,r5
+/* tlbivax 0,r3 - use .long to avoid binutils deps */
+ .long 0x7c000624 | (r3 << 11)
+ isync
+ eieio
+ tlbsync
+ sync
+ wrtee r10
+ blr
+#endif /* CONFIG_PPC_47x */
#elif defined(CONFIG_FSL_BOOKE)
/*
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 7486bffd3ebb..eeba0a70e466 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -1,3 +1,12 @@
+config PPC_47x
+ bool "Support for 47x variant"
+ depends on 44x
+ default n
+ select MPIC
+ help
+ This option enables support for the 47x family of processors and is
+ not currently compatible with other 44x or 46x varients
+
config BAMBOO
bool "Bamboo"
depends on 44x
@@ -151,6 +160,17 @@ config YOSEMITE
help
This option enables support for the AMCC PPC440EP evaluation board.
+config ISS4xx
+ bool "ISS 4xx Simulator"
+ depends on (44x || 40x)
+ default n
+ select 405GP if 40x
+ select 440GP if 44x && !PPC_47x
+ select PPC_FPU
+ select OF_RTC
+ help
+ This option enables support for the IBM ISS simulation environment
+
#config LUAN
# bool "Luan"
# depends on 44x
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index ee6185aeaa3b..82ff326e0795 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_SAM440EP) += sam440ep.o
obj-$(CONFIG_WARP) += warp.o
obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
+obj-$(CONFIG_ISS4xx) += iss4xx.o
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
new file mode 100644
index 000000000000..aa46e9d1e771
--- /dev/null
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -0,0 +1,167 @@
+/*
+ * PPC476 board specific routines
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2002-2005 MontaVista Software Inc.
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003-2005 Zultys Technologies
+ *
+ * Rewritten and ported to the merged powerpc tree:
+ * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/time.h>
+#include <asm/uic.h>
+#include <asm/ppc4xx.h>
+#include <asm/mpic.h>
+#include <asm/mmu.h>
+
+static __initdata struct of_device_id iss4xx_of_bus[] = {
+ { .compatible = "ibm,plb4", },
+ { .compatible = "ibm,plb6", },
+ { .compatible = "ibm,opb", },
+ { .compatible = "ibm,ebc", },
+ {},
+};
+
+static int __init iss4xx_device_probe(void)
+{
+ of_platform_bus_probe(NULL, iss4xx_of_bus, NULL);
+ of_instantiate_rtc();
+
+ return 0;
+}
+machine_device_initcall(iss4xx, iss4xx_device_probe);
+
+/* We can have either UICs or MPICs */
+static void __init iss4xx_init_irq(void)
+{
+ struct device_node *np;
+
+ /* Find top level interrupt controller */
+ for_each_node_with_property(np, "interrupt-controller") {
+ if (of_get_property(np, "interrupts", NULL) == NULL)
+ break;
+ }
+ if (np == NULL)
+ panic("Can't find top level interrupt controller");
+
+ /* Check type and do appropriate initialization */
+ if (of_device_is_compatible(np, "ibm,uic")) {
+ uic_init_tree();
+ ppc_md.get_irq = uic_get_irq;
+#ifdef CONFIG_MPIC
+ } else if (of_device_is_compatible(np, "chrp,open-pic")) {
+ /* The MPIC driver will get everything it needs from the
+ * device-tree, just pass 0 to all arguments
+ */
+ struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0,
+ " MPIC ");
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+ ppc_md.get_irq = mpic_get_irq;
+#endif
+ } else
+ panic("Unrecognized top level interrupt controller");
+}
+
+#ifdef CONFIG_SMP
+static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
+{
+ mpic_setup_this_cpu();
+}
+
+static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
+{
+ struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
+ const u64 *spin_table_addr_prop;
+ u32 *spin_table;
+ extern void start_secondary_47x(void);
+
+ BUG_ON(cpunode == NULL);
+
+ /* Assume spin table. We could test for the enable-method in
+ * the device-tree but currently there's little point as it's
+ * our only supported method
+ */
+ spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr",
+ NULL);
+ if (spin_table_addr_prop == NULL) {
+ pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
+ return;
+ }
+
+ /* Assume it's mapped as part of the linear mapping. This is a bit
+ * fishy but will work fine for now
+ */
+ spin_table = (u32 *)__va(*spin_table_addr_prop);
+ pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
+
+ spin_table[3] = cpu;
+ smp_wmb();
+ spin_table[1] = __pa(start_secondary_47x);
+ mb();
+}
+
+static struct smp_ops_t iss_smp_ops = {
+ .probe = smp_mpic_probe,
+ .message_pass = smp_mpic_message_pass,
+ .setup_cpu = smp_iss4xx_setup_cpu,
+ .kick_cpu = smp_iss4xx_kick_cpu,
+ .give_timebase = smp_generic_give_timebase,
+ .take_timebase = smp_generic_take_timebase,
+};
+
+static void __init iss4xx_smp_init(void)
+{
+ if (mmu_has_feature(MMU_FTR_TYPE_47x))
+ smp_ops = &iss_smp_ops;
+}
+
+#else /* CONFIG_SMP */
+static void __init iss4xx_smp_init(void) { }
+#endif /* CONFIG_SMP */
+
+static void __init iss4xx_setup_arch(void)
+{
+ iss4xx_smp_init();
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init iss4xx_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx"))
+ return 0;
+
+ return 1;
+}
+
+define_machine(iss4xx) {
+ .name = "ISS-4xx",
+ .probe = iss4xx_probe,
+ .progress = udbg_progress,
+ .init_IRQ = iss4xx_init_irq,
+ .setup_arch = iss4xx_setup_arch,
+ .restart = ppc4xx_reset_system,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index 4dac9b0525a4..e9dca280a4d2 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -1,29 +1,24 @@
config PPC_MPC512x
- bool
+ bool "512x-based boards"
+ depends on 6xx
select FSL_SOC
select IPIC
select PPC_CLOCK
select PPC_PCI_CHOICE
select FSL_PCI if PCI
-config PPC_MPC5121
- bool
- select PPC_MPC512x
-
config MPC5121_ADS
bool "Freescale MPC5121E ADS"
- depends on 6xx
+ depends on PPC_MPC512x
select DEFAULT_UIMAGE
- select PPC_MPC5121
select MPC5121_ADS_CPLD
help
This option enables support for the MPC5121E ADS board.
config MPC5121_GENERIC
bool "Generic support for simple MPC5121 based boards"
- depends on 6xx
+ depends on PPC_MPC512x
select DEFAULT_UIMAGE
- select PPC_MPC5121
help
This option enables support for simple MPC5121 based boards
which do not need custom platform specific setup.
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a8aae0b54579..d361f8119b1e 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -43,7 +43,7 @@ config 40x
select PPC_PCI_CHOICE
config 44x
- bool "AMCC 44x"
+ bool "AMCC 44x, 46x or 47x"
select PPC_DCR_NATIVE
select PPC_UDBG_16550
select 4xx_SOC
@@ -294,7 +294,7 @@ config PPC_PERF_CTRS
This enables the powerpc-specific perf_event back-end.
config SMP
- depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
+ depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
@@ -322,6 +322,7 @@ config NR_CPUS
config NOT_COHERENT_CACHE
bool
depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
+ default n if PPC_47x
default y
config CHECK_CACHE_COHERENCY
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index e6506cd0ff94..bfa2c0cb3d1e 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cur = cbe_freqs[cur_pmode].frequency;
#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
+ cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 57e5b608fa1a..174a04ac4806 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -69,10 +69,10 @@ void __init wii_memory_fixups(void)
/*
* This is part of a workaround to allow the use of two
- * discontiguous RAM ranges on the Wii, even if this is
+ * discontinuous RAM ranges on the Wii, even if this is
* currently unsupported on 32-bit PowerPC Linux.
*
- * We coealesce the two memory ranges of the Wii into a
+ * We coalesce the two memory ranges of the Wii into a
* single range, then create a reservation for the "hole"
* between both ranges.
*/
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index b841c9a9db87..3fc2e6494b8b 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/ratelimit.h>
#include <asm/types.h>
#include <asm/io.h>
@@ -584,14 +585,9 @@ static inline struct device_node *xlate_iomm_address(
orig_addr = (unsigned long __force)addr;
if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
- static unsigned long last_jiffies;
- static int num_printed;
+ static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
- if (time_after(jiffies, last_jiffies + 60 * HZ)) {
- last_jiffies = jiffies;
- num_printed = 0;
- }
- if (num_printed++ < 10)
+ if (__ratelimit(&ratelimit))
printk(KERN_ERR
"iSeries_%s: invalid access at IO address %p\n",
func, addr);
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 722335e32fd4..6590850045af 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -83,7 +83,7 @@ static void smp_iSeries_message_pass(int target, int msg)
static int smp_iSeries_probe(void)
{
- return cpus_weight(cpu_possible_map);
+ return cpumask_weight(cpu_possible_mask);
}
static void smp_iSeries_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index d35e0520abf0..c16537bc0c6e 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -213,7 +213,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
- cpumask_copy(policy->cpus, &cpu_online_map);
+ cpumask_copy(policy->cpus, cpu_online_mask);
ppc_proc_freq = policy->cur * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 3ca09d3ccce3..9650c6029c82 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -362,7 +362,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
- cpumask_copy(policy->cpus, &cpu_online_map);
+ cpumask_copy(policy->cpus, cpu_online_mask);
cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f45331ab97cb..06a137c5b8bb 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -592,7 +592,7 @@ static void __init kw_i2c_probe(void)
/* Probe keywest-i2c busses */
for_each_compatible_node(np, "i2c","keywest-i2c") {
struct pmac_i2c_host_kw *host;
- int multibus, chans, i;
+ int multibus;
/* Found one, init a host structure */
host = kw_i2c_host_init(np);
@@ -614,6 +614,8 @@ static void __init kw_i2c_probe(void)
* parent type
*/
if (multibus) {
+ int chans, i;
+
parent = of_get_parent(np);
if (parent == NULL)
continue;
@@ -1258,8 +1260,7 @@ static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
if (inst == NULL)
return;
pmac_i2c_close(inst->bus);
- if (inst)
- kfree(inst);
+ kfree(inst);
}
static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 15c2241f9c72..47a2b4488557 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -480,7 +480,7 @@ static void __init pmac_init_early(void)
#endif
/* SMP Init has to be done early as we need to patch up
- * cpu_possible_map before interrupt stacks are allocated
+ * cpu_possible_mask before interrupt stacks are allocated
* or kaboom...
*/
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6898e8241cd0..02d0b8e5b13c 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -315,7 +315,7 @@ static int __init smp_psurge_probe(void)
/* This is necessary because OF doesn't know about the
* secondary cpu(s), and thus there aren't nodes in the
* device tree for them, and smp_setup_cpu_maps hasn't
- * set their bits in cpu_present_map.
+ * set their bits in cpu_present_mask.
*/
if (ncpus > NR_CPUS)
ncpus = NR_CPUS;
@@ -944,7 +944,7 @@ void __init pmac_setup_smp(void)
}
#ifdef CONFIG_PPC32
else {
- /* We have to set bits in cpu_possible_map here since the
+ /* We have to set bits in cpu_possible_mask here since the
* secondary CPU(s) aren't in the device tree. Various
* things won't be initialized for CPUs not in the possible
* map, so we really need to fix it up here.
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 6d09f5e3e7e4..e546c86a539b 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -766,6 +766,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
BUG();
};
+ dev->core.of_node = NULL;
dev->core.archdata.of_node = NULL;
set_dev_node(&dev->core, 0);
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index e1682bc168a3..d71e58584086 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -79,13 +79,12 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
* prepend this to the full_name.
*/
name = (char *)ccwa + ccwa->name_offset;
- dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL);
+ dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
if (!dn->full_name) {
kfree(dn);
return NULL;
}
- sprintf(dn->full_name, "/%s", name);
return dn;
}
@@ -410,15 +409,13 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
* directory of the device tree. CPUs actually live in the
* cpus directory so we need to fixup the full_name.
*/
- cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1,
- GFP_KERNEL);
+ cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
if (!cpu_name) {
dlpar_free_cc_nodes(dn);
rc = -ENOMEM;
goto out;
}
- sprintf(cpu_name, "/cpus%s", dn->full_name);
kfree(dn->full_name);
dn->full_name = cpu_name;
@@ -433,6 +430,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
if (rc) {
dlpar_release_drc(drc_index);
dlpar_free_cc_nodes(dn);
+ goto out;
}
rc = dlpar_online_cpu(dn);
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a8e1d5d17a28..8f85f399ab9f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
for(;;);
}
-static int qcss_tok; /* query-cpu-stopped-state token */
-
-/* Get state of physical CPU.
- * Return codes:
- * 0 - The processor is in the RTAS stopped state
- * 1 - stop-self is in progress
- * 2 - The processor is not in the RTAS stopped state
- * -1 - Hardware Error
- * -2 - Hardware Busy, Try again later.
- */
-static int query_cpu_stopped(unsigned int pcpu)
-{
- int cpu_status, status;
-
- status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
- if (status != 0) {
- printk(KERN_ERR
- "RTAS query-cpu-stopped-state failed: %i\n", status);
- return status;
- }
-
- return cpu_status;
-}
-
static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
@@ -187,7 +163,7 @@ static int pseries_cpu_disable(void)
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
- boot_cpuid = any_online_cpu(cpu_online_map);
+ boot_cpuid = cpumask_any(cpu_online_mask);
/* FIXME: abstract this to not be platform specific later on */
xics_migrate_irqs_away();
@@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
for (tries = 0; tries < 25; tries++) {
- cpu_status = query_cpu_stopped(pcpu);
- if (cpu_status == 0 || cpu_status == -1)
+ cpu_status = smp_query_cpu_stopped(pcpu);
+ if (cpu_status == QCSS_STOPPED ||
+ cpu_status == QCSS_HARDWARE_ERROR)
break;
cpu_relax();
}
@@ -245,7 +222,7 @@ static void pseries_cpu_die(unsigned int cpu)
}
/*
- * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
+ * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
* here is that a cpu device node may represent up to two logical cpus
* in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such
@@ -254,7 +231,7 @@ static void pseries_cpu_die(unsigned int cpu)
static int pseries_add_processor(struct device_node *np)
{
unsigned int cpu;
- cpumask_t candidate_map, tmp = CPU_MASK_NONE;
+ cpumask_var_t candidate_mask, tmp;
int err = -ENOSPC, len, nthreads, i;
const u32 *intserv;
@@ -262,48 +239,53 @@ static int pseries_add_processor(struct device_node *np)
if (!intserv)
return 0;
+ zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
+ zalloc_cpumask_var(&tmp, GFP_KERNEL);
+
nthreads = len / sizeof(u32);
for (i = 0; i < nthreads; i++)
- cpu_set(i, tmp);
+ cpumask_set_cpu(i, tmp);
cpu_maps_update_begin();
- BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+ BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
/* Get a bitmap of unoccupied slots. */
- cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
- if (cpus_empty(candidate_map)) {
+ cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
+ if (cpumask_empty(candidate_mask)) {
/* If we get here, it most likely means that NR_CPUS is
* less than the partition's max processors setting.
*/
printk(KERN_ERR "Cannot add cpu %s; this system configuration"
" supports %d logical cpus.\n", np->full_name,
- cpus_weight(cpu_possible_map));
+ cpumask_weight(cpu_possible_mask));
goto out_unlock;
}
- while (!cpus_empty(tmp))
- if (cpus_subset(tmp, candidate_map))
+ while (!cpumask_empty(tmp))
+ if (cpumask_subset(tmp, candidate_mask))
/* Found a range where we can insert the new cpu(s) */
break;
else
- cpus_shift_left(tmp, tmp, nthreads);
+ cpumask_shift_left(tmp, tmp, nthreads);
- if (cpus_empty(tmp)) {
- printk(KERN_ERR "Unable to find space in cpu_present_map for"
+ if (cpumask_empty(tmp)) {
+ printk(KERN_ERR "Unable to find space in cpu_present_mask for"
" processor %s with %d thread(s)\n", np->name,
nthreads);
goto out_unlock;
}
- for_each_cpu_mask(cpu, tmp) {
- BUG_ON(cpu_isset(cpu, cpu_present_map));
+ for_each_cpu(cpu, tmp) {
+ BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask));
set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, *intserv++);
}
err = 0;
out_unlock:
cpu_maps_update_done();
+ free_cpumask_var(candidate_mask);
+ free_cpumask_var(tmp);
return err;
}
@@ -334,7 +316,7 @@ static void pseries_remove_processor(struct device_node *np)
set_hard_smp_processor_id(cpu, -1);
break;
}
- if (cpu == NR_CPUS)
+ if (cpu >= nr_cpu_ids)
printk(KERN_WARNING "Could not find cpu to remove "
"with physical id 0x%x\n", intserv[i]);
}
@@ -388,6 +370,7 @@ static int __init pseries_cpu_hotplug_init(void)
struct device_node *np;
const char *typep;
int cpu;
+ int qcss_tok;
for_each_node_by_name(np, "interrupt-controller") {
typep = of_get_property(np, "compatible", NULL);
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index a05f8d427856..6c4fd2c3f385 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -4,6 +4,14 @@
#include <asm/hvcall.h>
#include <asm/page.h>
+/* Get state of physical CPU from query_cpu_stopped */
+int smp_query_cpu_stopped(unsigned int pcpu);
+#define QCSS_STOPPED 0
+#define QCSS_STOPPING 1
+#define QCSS_NOT_STOPPED 2
+#define QCSS_HARDWARE_ERROR -1
+#define QCSS_HARDWARE_BUSY -2
+
static inline long poll_pending(void)
{
return plpar_hcall_norets(H_POLL_PENDING);
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 4e7f89a84561..3b1bf61c45be 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -55,7 +55,29 @@
* The Primary thread of each non-boot processor was started from the OF client
* interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
*/
-static cpumask_t of_spin_map;
+static cpumask_var_t of_spin_mask;
+
+/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
+int smp_query_cpu_stopped(unsigned int pcpu)
+{
+ int cpu_status, status;
+ int qcss_tok = rtas_token("query-cpu-stopped-state");
+
+ if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
+ printk(KERN_INFO "Firmware doesn't support "
+ "query-cpu-stopped-state\n");
+ return QCSS_HARDWARE_ERROR;
+ }
+
+ status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+ if (status != 0) {
+ printk(KERN_ERR
+ "RTAS query-cpu-stopped-state failed: %i\n", status);
+ return status;
+ }
+
+ return cpu_status;
+}
/**
* smp_startup_cpu() - start the given cpu
@@ -76,12 +98,18 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
unsigned int pcpu;
int start_cpu;
- if (cpu_isset(lcpu, of_spin_map))
+ if (cpumask_test_cpu(lcpu, of_spin_mask))
/* Already started by OF and sitting in spin loop */
return 1;
pcpu = get_hard_smp_processor_id(lcpu);
+ /* Check to see if the CPU out of FW already for kexec */
+ if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
+ cpumask_set_cpu(lcpu, of_spin_mask);
+ return 1;
+ }
+
/* Fixup atomic count: it exited inside IRQ handler. */
task_thread_info(paca[lcpu].__current)->preempt_count = 0;
@@ -115,7 +143,7 @@ static void __devinit smp_xics_setup_cpu(int cpu)
if (firmware_has_feature(FW_FEATURE_SPLPAR))
vpa_init(cpu);
- cpu_clear(cpu, of_spin_map);
+ cpumask_clear_cpu(cpu, of_spin_mask);
set_cpu_current_state(cpu, CPU_STATE_ONLINE);
set_default_offline_state(cpu);
@@ -186,17 +214,19 @@ static void __init smp_init_pseries(void)
pr_debug(" -> smp_init_pSeries()\n");
+ alloc_bootmem_cpumask_var(&of_spin_mask);
+
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
if (cpu_thread_in_core(i) == 0)
- cpu_set(i, of_spin_map);
+ cpumask_set_cpu(i, of_spin_mask);
}
} else {
- of_spin_map = cpu_present_map;
+ cpumask_copy(of_spin_mask, cpu_present_mask);
}
- cpu_clear(boot_cpuid, of_spin_map);
+ cpumask_clear_cpu(boot_cpuid, of_spin_mask);
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1bcedd8b4616..f19d19468393 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -163,29 +163,37 @@ static inline void lpar_qirr_info(int n_cpu , u8 value)
/* Interface to generic irq subsystem */
#ifdef CONFIG_SMP
-static int get_irq_server(unsigned int virq, cpumask_t cpumask,
+/*
+ * For the moment we only implement delivery to all cpus or one cpu.
+ *
+ * If the requested affinity is cpu_all_mask, we set global affinity.
+ * If not we set it to the first cpu in the mask, even if multiple cpus
+ * are set. This is so things like irqbalance (which set core and package
+ * wide affinities) do the right thing.
+ */
+static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check)
{
- int server;
- /* For the moment only implement delivery to all cpus or one cpu */
- cpumask_t tmp = CPU_MASK_NONE;
if (!distribute_irqs)
return default_server;
- if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
- cpus_and(tmp, cpu_online_map, cpumask);
-
- server = first_cpu(tmp);
+ if (!cpumask_equal(cpumask, cpu_all_mask)) {
+ int server = cpumask_first_and(cpu_online_mask, cpumask);
- if (server < NR_CPUS)
+ if (server < nr_cpu_ids)
return get_hard_smp_processor_id(server);
if (strict_check)
return -1;
}
- if (cpus_equal(cpu_online_map, cpu_present_map))
+ /*
+ * Workaround issue with some versions of JS20 firmware that
+ * deliver interrupts to cpus which haven't been started. This
+ * happens when using the maxcpus= boot option.
+ */
+ if (cpumask_equal(cpu_online_mask, cpu_present_mask))
return default_distrib_server;
return default_server;
@@ -207,7 +215,7 @@ static void xics_unmask_irq(unsigned int virq)
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return;
- server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0);
+ server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
@@ -398,11 +406,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
return -1;
}
- /*
- * For the moment only implement delivery to all cpus or one cpu.
- * Get current irq_server for the given irq
- */
- irq_server = get_irq_server(virq, *cpumask, 1);
+ irq_server = get_irq_server(virq, cpumask, 1);
if (irq_server == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
@@ -611,7 +615,7 @@ int __init smp_xics_probe(void)
{
xics_request_ipi();
- return cpus_weight(cpu_possible_map);
+ return cpumask_weight(cpu_possible_mask);
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 260295b10557..2102487612a4 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -568,12 +568,12 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#endif /* CONFIG_MPIC_U3_HT_IRQS */
#ifdef CONFIG_SMP
-static int irq_choose_cpu(const cpumask_t *mask)
+static int irq_choose_cpu(const struct cpumask *mask)
{
int cpuid;
if (cpumask_equal(mask, cpu_all_mask)) {
- static int irq_rover;
+ static int irq_rover = 0;
static DEFINE_RAW_SPINLOCK(irq_rover_lock);
unsigned long flags;
@@ -581,15 +581,11 @@ static int irq_choose_cpu(const cpumask_t *mask)
do_round_robin:
raw_spin_lock_irqsave(&irq_rover_lock, flags);
- while (!cpu_online(irq_rover)) {
- if (++irq_rover >= NR_CPUS)
- irq_rover = 0;
- }
+ irq_rover = cpumask_next(irq_rover, cpu_online_mask);
+ if (irq_rover >= nr_cpu_ids)
+ irq_rover = cpumask_first(cpu_online_mask);
+
cpuid = irq_rover;
- do {
- if (++irq_rover >= NR_CPUS)
- irq_rover = 0;
- } while (!cpu_online(irq_rover));
raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
} else {
@@ -601,7 +597,7 @@ static int irq_choose_cpu(const cpumask_t *mask)
return get_hard_smp_processor_id(cpuid);
}
#else
-static int irq_choose_cpu(const cpumask_t *mask)
+static int irq_choose_cpu(const struct cpumask *mask)
{
return hard_smp_processor_id();
}
@@ -814,12 +810,16 @@ int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
} else {
- cpumask_t tmp;
+ cpumask_var_t tmp;
- cpumask_and(&tmp, cpumask, cpu_online_mask);
+ alloc_cpumask_var(&tmp, GFP_KERNEL);
+
+ cpumask_and(tmp, cpumask, cpu_online_mask);
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
- mpic_physmask(cpus_addr(tmp)[0]));
+ mpic_physmask(cpumask_bits(tmp)[0]));
+
+ free_cpumask_var(tmp);
}
return 0;
@@ -1479,21 +1479,6 @@ void mpic_teardown_this_cpu(int secondary)
}
-void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
-{
- struct mpic *mpic = mpic_primary;
-
- BUG_ON(mpic == NULL);
-
-#ifdef DEBUG_IPI
- DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
-#endif
-
- mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
- ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
- mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
-}
-
static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
{
u32 src;
@@ -1589,8 +1574,25 @@ void mpic_request_ipis(void)
}
}
+static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask)
+{
+ struct mpic *mpic = mpic_primary;
+
+ BUG_ON(mpic == NULL);
+
+#ifdef DEBUG_IPI
+ DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
+#endif
+
+ mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
+ ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
+ mpic_physmask(cpumask_bits(cpu_mask)[0]));
+}
+
void smp_mpic_message_pass(int target, int msg)
{
+ cpumask_var_t tmp;
+
/* make sure we're sending something that translates to an IPI */
if ((unsigned int)msg > 3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@ -1599,13 +1601,17 @@ void smp_mpic_message_pass(int target, int msg)
}
switch (target) {
case MSG_ALL:
- mpic_send_ipi(msg, 0xffffffff);
+ mpic_send_ipi(msg, cpu_online_mask);
break;
case MSG_ALL_BUT_SELF:
- mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
+ alloc_cpumask_var(&tmp, GFP_NOWAIT);
+ cpumask_andnot(tmp, cpu_online_mask,
+ cpumask_of(smp_processor_id()));
+ mpic_send_ipi(msg, tmp);
+ free_cpumask_var(tmp);
break;
default:
- mpic_send_ipi(msg, 1 << target);
+ mpic_send_ipi(msg, cpumask_of(target));
break;
}
}
@@ -1616,7 +1622,7 @@ int __init smp_mpic_probe(void)
DBG("smp_mpic_probe()...\n");
- nr_cpus = cpus_weight(cpu_possible_map);
+ nr_cpus = cpumask_weight(cpu_possible_mask);
DBG("nr_cpus: %d\n", nr_cpus);
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 106d767bf65b..156aa7d36258 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -974,6 +974,123 @@ static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
.setup_utl = ppc460ex_pciex_init_utl,
};
+static int __init ppc460sx_pciex_core_init(struct device_node *np)
+{
+ /* HSS drive amplitude */
+ mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
+
+ mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
+
+ mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
+ mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
+
+ /* HSS TX pre-emphasis */
+ mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
+
+ mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
+
+ mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
+ mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
+
+ /* HSS TX calibration control */
+ mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
+ mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
+ mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
+
+ /* HSS TX slew control */
+ mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
+ mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
+ mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
+
+ udelay(100);
+
+ /* De-assert PLLRESET */
+ dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
+
+ /* Reset DL, UTL, GPL before configuration */
+ mtdcri(SDR0, PESDR0_460SX_RCSSET,
+ PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
+ mtdcri(SDR0, PESDR1_460SX_RCSSET,
+ PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
+ mtdcri(SDR0, PESDR2_460SX_RCSSET,
+ PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
+
+ udelay(100);
+
+ /*
+ * If bifurcation is not enabled, u-boot would have disabled the
+ * third PCIe port
+ */
+ if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
+ 0x00000001)) {
+ printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
+ printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
+ return 3;
+ }
+
+ printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
+ return 2;
+}
+
+static int ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
+{
+
+ if (port->endpoint)
+ dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
+ 0x01000000, 0);
+ else
+ dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
+ 0, 0x01000000);
+
+ /*Gen-1*/
+ mtdcri(SDR0, port->sdr_base + PESDRn_460SX_RCEI, 0x08000000);
+
+ dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
+ (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
+ PESDRx_RCSSET_RSTPYN);
+
+ port->has_ibpre = 1;
+
+ return 0;
+}
+
+static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
+{
+ /* Max 128 Bytes */
+ out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
+ return 0;
+}
+
+static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
+ .core_init = ppc460sx_pciex_core_init,
+ .port_init_hw = ppc460sx_pciex_init_port_hw,
+ .setup_utl = ppc460sx_pciex_init_utl,
+};
+
#endif /* CONFIG_44x */
#ifdef CONFIG_40x
@@ -1089,6 +1206,8 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
}
if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
+ if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
+ ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
#endif /* CONFIG_44x */
#ifdef CONFIG_40x
if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h
index d04e40b306fb..56d9e5deccbf 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.h
+++ b/arch/powerpc/sysdev/ppc4xx_pci.h
@@ -324,6 +324,64 @@
#define PESDR0_460EX_IHS2 0x036D
/*
+ * 460SX addtional DCRs
+ */
+#define PESDRn_460SX_RCEI 0x02
+
+#define PESDR0_460SX_HSSL0DAMP 0x320
+#define PESDR0_460SX_HSSL1DAMP 0x321
+#define PESDR0_460SX_HSSL2DAMP 0x322
+#define PESDR0_460SX_HSSL3DAMP 0x323
+#define PESDR0_460SX_HSSL4DAMP 0x324
+#define PESDR0_460SX_HSSL5DAMP 0x325
+#define PESDR0_460SX_HSSL6DAMP 0x326
+#define PESDR0_460SX_HSSL7DAMP 0x327
+
+#define PESDR1_460SX_HSSL0DAMP 0x354
+#define PESDR1_460SX_HSSL1DAMP 0x355
+#define PESDR1_460SX_HSSL2DAMP 0x356
+#define PESDR1_460SX_HSSL3DAMP 0x357
+
+#define PESDR2_460SX_HSSL0DAMP 0x384
+#define PESDR2_460SX_HSSL1DAMP 0x385
+#define PESDR2_460SX_HSSL2DAMP 0x386
+#define PESDR2_460SX_HSSL3DAMP 0x387
+
+#define PESDR0_460SX_HSSL0COEFA 0x328
+#define PESDR0_460SX_HSSL1COEFA 0x329
+#define PESDR0_460SX_HSSL2COEFA 0x32A
+#define PESDR0_460SX_HSSL3COEFA 0x32B
+#define PESDR0_460SX_HSSL4COEFA 0x32C
+#define PESDR0_460SX_HSSL5COEFA 0x32D
+#define PESDR0_460SX_HSSL6COEFA 0x32E
+#define PESDR0_460SX_HSSL7COEFA 0x32F
+
+#define PESDR1_460SX_HSSL0COEFA 0x358
+#define PESDR1_460SX_HSSL1COEFA 0x359
+#define PESDR1_460SX_HSSL2COEFA 0x35A
+#define PESDR1_460SX_HSSL3COEFA 0x35B
+
+#define PESDR2_460SX_HSSL0COEFA 0x388
+#define PESDR2_460SX_HSSL1COEFA 0x389
+#define PESDR2_460SX_HSSL2COEFA 0x38A
+#define PESDR2_460SX_HSSL3COEFA 0x38B
+
+#define PESDR0_460SX_HSSL1CALDRV 0x339
+#define PESDR1_460SX_HSSL1CALDRV 0x361
+#define PESDR2_460SX_HSSL1CALDRV 0x391
+
+#define PESDR0_460SX_HSSSLEW 0x338
+#define PESDR1_460SX_HSSSLEW 0x360
+#define PESDR2_460SX_HSSSLEW 0x390
+
+#define PESDR0_460SX_HSSCTLSET 0x31E
+#define PESDR1_460SX_HSSCTLSET 0x352
+#define PESDR2_460SX_HSSCTLSET 0x382
+
+#define PESDR0_460SX_RCSSET 0x304
+#define PESDR1_460SX_RCSSET 0x344
+#define PESDR2_460SX_RCSSET 0x374
+/*
* Of the above, some are common offsets from the base
*/
#define PESDRn_UTLSET1 0x00
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
index 5c014350bf16..d3d6ce3c33b4 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/sysdev/ppc4xx_soc.c
@@ -191,11 +191,31 @@ static int __init ppc4xx_l2c_probe(void)
arch_initcall(ppc4xx_l2c_probe);
/*
- * At present, this routine just applies a system reset.
+ * Apply a system reset. Alternatively a board specific value may be
+ * provided via the "reset-type" property in the cpu node.
*/
void ppc4xx_reset_system(char *cmd)
{
- mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_RST_SYSTEM);
+ struct device_node *np;
+ u32 reset_type = DBCR0_RST_SYSTEM;
+ const u32 *prop;
+
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np) {
+ prop = of_get_property(np, "reset-type", NULL);
+
+ /*
+ * Check if property exists and if it is in range:
+ * 1 - PPC4xx core reset
+ * 2 - PPC4xx chip reset
+ * 3 - PPC4xx system reset (default)
+ */
+ if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3)))
+ reset_type = prop[0] << 28;
+ }
+
+ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type);
+
while (1)
; /* Just in case the reset doesn't work */
}
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0da10746e0e5..30c5f01f93b0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -116,6 +116,12 @@ image bzImage: vmlinux
zfcpdump:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+vdso_install:
+ifeq ($(CONFIG_64BIT),y)
+ $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
+endif
+ $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
+
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c53f8ac825ca..95c1aaac06cd 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -145,7 +145,7 @@ static int hypfs_open(struct inode *inode, struct file *filp)
}
mutex_unlock(&fs_info->lock);
}
- return 0;
+ return nonseekable_open(inode, filp);
}
static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 9b866816863c..24aafa68b643 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -14,6 +14,6 @@
#define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 05527c040b7a..f7e78c79b8b1 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -126,16 +126,15 @@ struct _lowcore {
__u32 user_exec_asce; /* 0x02ac */
/* SMP info area */
- struct cpuid cpu_id; /* 0x02b0 */
- __u32 cpu_nr; /* 0x02b8 */
- __u32 softirq_pending; /* 0x02bc */
- __u32 percpu_offset; /* 0x02c0 */
- __u32 ext_call_fast; /* 0x02c4 */
- __u64 int_clock; /* 0x02c8 */
- __u64 clock_comparator; /* 0x02d0 */
- __u32 machine_flags; /* 0x02d8 */
- __u32 ftrace_func; /* 0x02dc */
- __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
+ __u32 cpu_nr; /* 0x02b0 */
+ __u32 softirq_pending; /* 0x02b4 */
+ __u32 percpu_offset; /* 0x02b8 */
+ __u32 ext_call_fast; /* 0x02bc */
+ __u64 int_clock; /* 0x02c0 */
+ __u64 clock_comparator; /* 0x02c8 */
+ __u32 machine_flags; /* 0x02d0 */
+ __u32 ftrace_func; /* 0x02d4 */
+ __u8 pad_0x02d8[0x0300-0x02d8]; /* 0x02d8 */
/* Interrupt response block */
__u8 irb[64]; /* 0x0300 */
@@ -189,14 +188,14 @@ struct _lowcore {
__u32 data_exc_code; /* 0x0090 */
__u16 mon_class_num; /* 0x0094 */
__u16 per_perc_atmid; /* 0x0096 */
- addr_t per_address; /* 0x0098 */
+ __u64 per_address; /* 0x0098 */
__u8 exc_access_id; /* 0x00a0 */
__u8 per_access_id; /* 0x00a1 */
__u8 op_access_id; /* 0x00a2 */
__u8 ar_access_id; /* 0x00a3 */
__u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
- addr_t trans_exc_code; /* 0x00a8 */
- addr_t monitor_code; /* 0x00b0 */
+ __u64 trans_exc_code; /* 0x00a8 */
+ __u64 monitor_code; /* 0x00b0 */
__u16 subchannel_id; /* 0x00b8 */
__u16 subchannel_nr; /* 0x00ba */
__u32 io_int_parm; /* 0x00bc */
@@ -207,7 +206,7 @@ struct _lowcore {
__u32 mcck_interruption_code[2]; /* 0x00e8 */
__u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
__u32 external_damage_code; /* 0x00f4 */
- addr_t failing_storage_address; /* 0x00f8 */
+ __u64 failing_storage_address; /* 0x00f8 */
__u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
__u64 breaking_event_addr; /* 0x0110 */
__u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
@@ -255,17 +254,16 @@ struct _lowcore {
__u64 user_exec_asce; /* 0x0318 */
/* SMP info area */
- struct cpuid cpu_id; /* 0x0320 */
- __u32 cpu_nr; /* 0x0328 */
- __u32 softirq_pending; /* 0x032c */
- __u64 percpu_offset; /* 0x0330 */
- __u64 ext_call_fast; /* 0x0338 */
- __u64 int_clock; /* 0x0340 */
- __u64 clock_comparator; /* 0x0348 */
- __u64 vdso_per_cpu_data; /* 0x0350 */
- __u64 machine_flags; /* 0x0358 */
- __u64 ftrace_func; /* 0x0360 */
- __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */
+ __u32 cpu_nr; /* 0x0320 */
+ __u32 softirq_pending; /* 0x0324 */
+ __u64 percpu_offset; /* 0x0328 */
+ __u64 ext_call_fast; /* 0x0330 */
+ __u64 int_clock; /* 0x0338 */
+ __u64 clock_comparator; /* 0x0340 */
+ __u64 vdso_per_cpu_data; /* 0x0348 */
+ __u64 machine_flags; /* 0x0350 */
+ __u64 ftrace_func; /* 0x0358 */
+ __u8 pad_0x0368[0x0380-0x0360]; /* 0x0360 */
/* Interrupt response block. */
__u8 irb[64]; /* 0x0380 */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 1741c1556a4e..cef66210c846 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -459,11 +459,6 @@ extern void (*_machine_power_off)(void);
#define arch_align_stack(x) (x)
-#ifdef CONFIG_TRACE_IRQFLAGS
-extern psw_t sysc_restore_trace_psw;
-extern psw_t io_restore_trace_psw;
-#endif
-
static inline int tprot(unsigned long addr)
{
int rc = -EFAULT;
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 6e7211abd950..dc8a67297d0f 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -7,8 +7,10 @@
const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
+#define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
int topology_set_cpu_management(int fc);
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index a09408952ed0..32b1ede69858 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -126,7 +126,6 @@ int main(void)
DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
- DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 0168472b2fdf..98192261491d 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -655,6 +655,7 @@ found:
p_info->act_entry_offset = 0;
file->private_data = p_info;
debug_info_get(debug_info);
+ nonseekable_open(inode, file);
out:
mutex_unlock(&debug_mutex);
return rc;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6af7045280a8..b4570c978bd3 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -73,21 +73,24 @@ STACK_SIZE = 1 << STACK_SHIFT
basr %r14,%r1
.endm
- .macro TRACE_IRQS_CHECK
- basr %r2,%r0
+ .macro TRACE_IRQS_CHECK_ON
tm SP_PSW(%r15),0x03 # irqs enabled?
- jz 0f
- l %r1,BASED(.Ltrace_irq_on_caller)
- basr %r14,%r1
- j 1f
-0: l %r1,BASED(.Ltrace_irq_off_caller)
- basr %r14,%r1
-1:
+ bz BASED(0f)
+ TRACE_IRQS_ON
+0:
+ .endm
+
+ .macro TRACE_IRQS_CHECK_OFF
+ tm SP_PSW(%r15),0x03 # irqs enabled?
+ bz BASED(0f)
+ TRACE_IRQS_OFF
+0:
.endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
-#define TRACE_IRQS_CHECK
+#define TRACE_IRQS_CHECK_ON
+#define TRACE_IRQS_CHECK_OFF
#endif
#ifdef CONFIG_LOCKDEP
@@ -273,53 +276,33 @@ sysc_do_restart:
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
sysc_return:
+ LOCKDEP_SYS_EXIT
+sysc_tif:
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_restore:
-#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(sysc_restore_trace_psw_addr)
- l %r1,0(%r1)
- lpsw 0(%r1)
-sysc_restore_trace:
- TRACE_IRQS_CHECK
- LOCKDEP_SYS_EXIT
-#endif
-sysc_leave:
RESTORE_ALL __LC_RETURN_PSW,1
sysc_done:
-#ifdef CONFIG_TRACE_IRQFLAGS
-sysc_restore_trace_psw_addr:
- .long sysc_restore_trace_psw
-
- .section .data,"aw",@progbits
- .align 8
- .globl sysc_restore_trace_psw
-sysc_restore_trace_psw:
- .long 0, sysc_restore_trace + 0x80000000
- .previous
-#endif
-
-#
-# recheck if there is more work to do
-#
-sysc_work_loop:
- tm __TI_flags+3(%r9),_TIF_WORK_SVC
- bz BASED(sysc_restore) # there is no work to do
#
-# One of the work bits is on. Find out which one.
+# There is work to do, but first we need to check if we return to userspace.
#
sysc_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
bno BASED(sysc_restore)
+
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work_tif:
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
bo BASED(sysc_mcck_pending)
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(sysc_reschedule)
tm __TI_flags+3(%r9),_TIF_SIGPENDING
- bnz BASED(sysc_sigpending)
+ bo BASED(sysc_sigpending)
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
- bnz BASED(sysc_notify_resume)
+ bo BASED(sysc_notify_resume)
tm __TI_flags+3(%r9),_TIF_RESTART_SVC
bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
@@ -332,7 +315,7 @@ sysc_work_done:
#
sysc_reschedule:
l %r1,BASED(.Lschedule)
- la %r14,BASED(sysc_work_loop)
+ la %r14,BASED(sysc_return)
br %r1 # call scheduler
#
@@ -340,7 +323,7 @@ sysc_reschedule:
#
sysc_mcck_pending:
l %r1,BASED(.Ls390_handle_mcck)
- la %r14,BASED(sysc_work_loop)
+ la %r14,BASED(sysc_return)
br %r1 # TIF bit will be cleared by handler
#
@@ -355,7 +338,7 @@ sysc_sigpending:
bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
bo BASED(sysc_singlestep)
- b BASED(sysc_work_loop)
+ b BASED(sysc_return)
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@@ -363,7 +346,7 @@ sysc_sigpending:
sysc_notify_resume:
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_notify_resume)
- la %r14,BASED(sysc_work_loop)
+ la %r14,BASED(sysc_return)
br %r1 # call do_notify_resume
@@ -458,11 +441,13 @@ kernel_execve:
br %r14
# execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
+ TRACE_IRQS_OFF
l %r15,__LC_KERNEL_STACK # load ksp
s %r15,BASED(.Lc_spsize) # make room for registers & psw
l %r9,__LC_THREAD_INFO
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
l %r1,BASED(.Lexecve_tail)
basr %r14,%r1
@@ -499,8 +484,8 @@ pgm_check_handler:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime:
+ TRACE_IRQS_CHECK_OFF
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- TRACE_IRQS_OFF
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3
@@ -509,8 +494,10 @@ pgm_do_call:
sll %r8,2
l %r7,0(%r8,%r7) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
- la %r14,BASED(sysc_return)
- br %r7 # branch to interrupt-handler
+ basr %r14,%r7 # branch to interrupt-handler
+pgm_exit:
+ TRACE_IRQS_CHECK_ON
+ b BASED(sysc_return)
#
# handle per exception
@@ -537,19 +524,19 @@ pgm_per_std:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2:
+ TRACE_IRQS_CHECK_OFF
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- TRACE_IRQS_OFF
l %r1,__TI_task(%r9)
+ tm SP_PSW+1(%r15),0x01 # kernel per event ?
+ bz BASED(kernel_per)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
- tm SP_PSW+1(%r15),0x01 # kernel per event ?
- bz BASED(kernel_per)
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3 # clear per-event-bit and ilc
- be BASED(sysc_return) # only per or per+check ?
+ be BASED(pgm_exit) # only per or per+check ?
b BASED(pgm_do_call)
#
@@ -570,8 +557,8 @@ pgm_svcper:
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
TRACE_IRQS_ON
- lm %r2,%r6,SP_R2(%r15) # load svc arguments
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ lm %r2,%r6,SP_R2(%r15) # load svc arguments
b BASED(sysc_do_svc)
#
@@ -582,8 +569,8 @@ kernel_per:
mvi SP_SVCNR+1(%r15),0xff
la %r2,SP_PTREGS(%r15) # address of register-save area
l %r1,BASED(.Lhandle_per) # load adr. of per handler
- la %r14,BASED(sysc_restore)# load adr. of system return
- br %r1 # branch to do_single_step
+ basr %r14,%r1 # branch to do_single_step
+ b BASED(pgm_exit)
/*
* IO interrupt handler routine
@@ -602,134 +589,127 @@ io_int_handler:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
io_no_vtime:
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to standard irq handler
io_return:
+ LOCKDEP_SYS_EXIT
+ TRACE_IRQS_ON
+io_tif:
tm __TI_flags+3(%r9),_TIF_WORK_INT
bnz BASED(io_work) # there is work to do (signals etc.)
io_restore:
-#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(io_restore_trace_psw_addr)
- l %r1,0(%r1)
- lpsw 0(%r1)
-io_restore_trace:
- TRACE_IRQS_CHECK
- LOCKDEP_SYS_EXIT
-#endif
-io_leave:
RESTORE_ALL __LC_RETURN_PSW,0
io_done:
-#ifdef CONFIG_TRACE_IRQFLAGS
-io_restore_trace_psw_addr:
- .long io_restore_trace_psw
-
- .section .data,"aw",@progbits
- .align 8
- .globl io_restore_trace_psw
-io_restore_trace_psw:
- .long 0, io_restore_trace + 0x80000000
- .previous
-#endif
-
#
-# switch to kernel stack, then check the TIF bits
+# There is work todo, find out in which context we have been interrupted:
+# 1) if we return to user space we can do all _TIF_WORK_INT work
+# 2) if we return to kernel code and preemptive scheduling is enabled check
+# the preemption counter and if it is zero call preempt_schedule_irq
+# Before any work can be done, a switch to the kernel stack is required.
#
io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
-#ifndef CONFIG_PREEMPT
- bno BASED(io_restore) # no-> skip resched & signal
-#else
- bnz BASED(io_work_user) # no -> check for preemptive scheduling
+ bo BASED(io_work_user) # yes -> do resched & signal
+#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r9)
bnz BASED(io_restore) # preemption disabled
+ tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
+ bno BASED(io_restore)
+ # switch to kernel stack
l %r1,SP_R15(%r15)
s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1
-io_resume_loop:
- tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
- bno BASED(io_restore)
+ # TRACE_IRQS_ON already done at io_return, call
+ # TRACE_IRQS_OFF to keep things symmetrical
+ TRACE_IRQS_OFF
l %r1,BASED(.Lpreempt_schedule_irq)
- la %r14,BASED(io_resume_loop)
- br %r1 # call schedule
+ basr %r14,%r1 # call preempt_schedule_irq
+ b BASED(io_return)
+#else
+ b BASED(io_restore)
#endif
+#
+# Need to do work before returning to userspace, switch to kernel stack
+#
io_work_user:
l %r1,__LC_KERNEL_STACK
s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1
+
#
# One of the work bits is on. Find out which one.
-# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED
+# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING
#
-io_work_loop:
+io_work_tif:
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
bo BASED(io_mcck_pending)
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(io_reschedule)
tm __TI_flags+3(%r9),_TIF_SIGPENDING
- bnz BASED(io_sigpending)
+ bo BASED(io_sigpending)
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
- bnz BASED(io_notify_resume)
- b BASED(io_restore)
+ bo BASED(io_notify_resume)
+ b BASED(io_return)
io_work_done:
#
# _TIF_MCCK_PENDING is set, call handler
#
io_mcck_pending:
+ # TRACE_IRQS_ON already done at io_return
l %r1,BASED(.Ls390_handle_mcck)
basr %r14,%r1 # TIF bit will be cleared by handler
- b BASED(io_work_loop)
+ TRACE_IRQS_OFF
+ b BASED(io_return)
#
# _TIF_NEED_RESCHED is set, call schedule
#
io_reschedule:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
l %r1,BASED(.Lschedule)
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
basr %r14,%r1 # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- tm __TI_flags+3(%r9),_TIF_WORK_INT
- bz BASED(io_restore) # there is no work to do
- b BASED(io_work_loop)
+ b BASED(io_return)
#
# _TIF_SIGPENDING is set, call do_signal
#
io_sigpending:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- b BASED(io_work_loop)
+ b BASED(io_return)
#
# _TIF_SIGPENDING is set, call do_signal
#
io_notify_resume:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_notify_resume)
basr %r14,%r1 # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- b BASED(io_work_loop)
+ b BASED(io_return)
/*
* External interrupt handler routine
@@ -917,18 +897,18 @@ stack_overflow:
cleanup_table_system_call:
.long system_call + 0x80000000, sysc_do_svc + 0x80000000
-cleanup_table_sysc_return:
- .long sysc_return + 0x80000000, sysc_leave + 0x80000000
-cleanup_table_sysc_leave:
- .long sysc_leave + 0x80000000, sysc_done + 0x80000000
-cleanup_table_sysc_work_loop:
- .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000
-cleanup_table_io_return:
- .long io_return + 0x80000000, io_leave + 0x80000000
-cleanup_table_io_leave:
- .long io_leave + 0x80000000, io_done + 0x80000000
-cleanup_table_io_work_loop:
- .long io_work_loop + 0x80000000, io_work_done + 0x80000000
+cleanup_table_sysc_tif:
+ .long sysc_tif + 0x80000000, sysc_restore + 0x80000000
+cleanup_table_sysc_restore:
+ .long sysc_restore + 0x80000000, sysc_done + 0x80000000
+cleanup_table_sysc_work_tif:
+ .long sysc_work_tif + 0x80000000, sysc_work_done + 0x80000000
+cleanup_table_io_tif:
+ .long io_tif + 0x80000000, io_restore + 0x80000000
+cleanup_table_io_restore:
+ .long io_restore + 0x80000000, io_done + 0x80000000
+cleanup_table_io_work_tif:
+ .long io_work_tif + 0x80000000, io_work_done + 0x80000000
cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_system_call)
@@ -936,35 +916,35 @@ cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_system_call+4)
bl BASED(cleanup_system_call)
0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_return)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_tif)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
- bl BASED(cleanup_sysc_return)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4)
+ bl BASED(cleanup_sysc_tif)
0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_restore)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
- bl BASED(cleanup_sysc_leave)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4)
+ bl BASED(cleanup_sysc_restore)
0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_work_tif)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
- bl BASED(cleanup_sysc_return)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_work_tif+4)
+ bl BASED(cleanup_sysc_tif)
0:
- clc 4(4,%r12),BASED(cleanup_table_io_return)
+ clc 4(4,%r12),BASED(cleanup_table_io_tif)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_return+4)
- bl BASED(cleanup_io_return)
+ clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
+ bl BASED(cleanup_io_tif)
0:
- clc 4(4,%r12),BASED(cleanup_table_io_leave)
+ clc 4(4,%r12),BASED(cleanup_table_io_restore)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
- bl BASED(cleanup_io_leave)
+ clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
+ bl BASED(cleanup_io_restore)
0:
- clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
+ clc 4(4,%r12),BASED(cleanup_table_io_work_tif)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
- bl BASED(cleanup_io_work_loop)
+ clc 4(4,%r12),BASED(cleanup_table_io_work_tif+4)
+ bl BASED(cleanup_io_work_tif)
0:
br %r14
@@ -1011,17 +991,17 @@ cleanup_system_call_insn:
.long sysc_stime + 0x80000000
.long sysc_update + 0x80000000
-cleanup_sysc_return:
+cleanup_sysc_tif:
mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
+ mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave:
- clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
+cleanup_sysc_restore:
+ clc 4(4,%r12),BASED(cleanup_sysc_restore_insn)
be BASED(2f)
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
- clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
+ clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
be BASED(2f)
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
c %r12,BASED(.Lmck_old_psw)
@@ -1033,27 +1013,27 @@ cleanup_sysc_leave:
l %r15,SP_R15(%r15)
2: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave_insn:
+cleanup_sysc_restore_insn:
.long sysc_done - 4 + 0x80000000
.long sysc_done - 8 + 0x80000000
-cleanup_io_return:
+cleanup_io_tif:
mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_return)
+ mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_work_loop:
+cleanup_io_work_tif:
mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
+ mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_leave:
- clc 4(4,%r12),BASED(cleanup_io_leave_insn)
+cleanup_io_restore:
+ clc 4(4,%r12),BASED(cleanup_io_restore_insn)
be BASED(2f)
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
- clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
+ clc 4(4,%r12),BASED(cleanup_io_restore_insn+4)
be BASED(2f)
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
c %r12,BASED(.Lmck_old_psw)
@@ -1065,7 +1045,7 @@ cleanup_io_leave:
l %r15,SP_R15(%r15)
2: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_leave_insn:
+cleanup_io_restore_insn:
.long io_done - 4 + 0x80000000
.long io_done - 8 + 0x80000000
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 52106d53271c..3723045138e3 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -61,28 +61,33 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON
- basr %r2,%r0
- brasl %r14,trace_hardirqs_on_caller
+ basr %r2,%r0
+ brasl %r14,trace_hardirqs_on_caller
.endm
.macro TRACE_IRQS_OFF
- basr %r2,%r0
- brasl %r14,trace_hardirqs_off_caller
+ basr %r2,%r0
+ brasl %r14,trace_hardirqs_off_caller
.endm
- .macro TRACE_IRQS_CHECK
- basr %r2,%r0
+ .macro TRACE_IRQS_CHECK_ON
tm SP_PSW(%r15),0x03 # irqs enabled?
jz 0f
- brasl %r14,trace_hardirqs_on_caller
- j 1f
-0: brasl %r14,trace_hardirqs_off_caller
-1:
+ TRACE_IRQS_ON
+0:
+ .endm
+
+ .macro TRACE_IRQS_CHECK_OFF
+ tm SP_PSW(%r15),0x03 # irqs enabled?
+ jz 0f
+ TRACE_IRQS_OFF
+0:
.endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
-#define TRACE_IRQS_CHECK
+#define TRACE_IRQS_CHECK_ON
+#define TRACE_IRQS_CHECK_OFF
#endif
#ifdef CONFIG_LOCKDEP
@@ -267,49 +272,33 @@ sysc_noemu:
stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
sysc_return:
+ LOCKDEP_SYS_EXIT
+sysc_tif:
tm __TI_flags+7(%r9),_TIF_WORK_SVC
jnz sysc_work # there is work to do (signals etc.)
sysc_restore:
-#ifdef CONFIG_TRACE_IRQFLAGS
- larl %r1,sysc_restore_trace_psw
- lpswe 0(%r1)
-sysc_restore_trace:
- TRACE_IRQS_CHECK
- LOCKDEP_SYS_EXIT
-#endif
-sysc_leave:
RESTORE_ALL __LC_RETURN_PSW,1
sysc_done:
-#ifdef CONFIG_TRACE_IRQFLAGS
- .section .data,"aw",@progbits
- .align 8
- .globl sysc_restore_trace_psw
-sysc_restore_trace_psw:
- .quad 0, sysc_restore_trace
- .previous
-#endif
-
-#
-# recheck if there is more work to do
-#
-sysc_work_loop:
- tm __TI_flags+7(%r9),_TIF_WORK_SVC
- jz sysc_restore # there is no work to do
#
-# One of the work bits is on. Find out which one.
+# There is work to do, but first we need to check if we return to userspace.
#
sysc_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
jno sysc_restore
+
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work_tif:
tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
jo sysc_mcck_pending
tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
jo sysc_reschedule
tm __TI_flags+7(%r9),_TIF_SIGPENDING
- jnz sysc_sigpending
+ jo sysc_sigpending
tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
- jnz sysc_notify_resume
+ jo sysc_notify_resume
tm __TI_flags+7(%r9),_TIF_RESTART_SVC
jo sysc_restart
tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
@@ -321,14 +310,14 @@ sysc_work_done:
# _TIF_NEED_RESCHED is set, call schedule
#
sysc_reschedule:
- larl %r14,sysc_work_loop
- jg schedule # return point is sysc_return
+ larl %r14,sysc_return
+ jg schedule # return point is sysc_return
#
# _TIF_MCCK_PENDING is set, call handler
#
sysc_mcck_pending:
- larl %r14,sysc_work_loop
+ larl %r14,sysc_return
jg s390_handle_mcck # TIF bit will be cleared by handler
#
@@ -342,14 +331,14 @@ sysc_sigpending:
jo sysc_restart
tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
jo sysc_singlestep
- j sysc_work_loop
+ j sysc_return
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
sysc_notify_resume:
la %r2,SP_PTREGS(%r15) # load pt_regs
- larl %r14,sysc_work_loop
+ larl %r14,sysc_return
jg do_notify_resume # call do_notify_resume
#
@@ -435,12 +424,14 @@ kernel_execve:
br %r14
# execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
+# TRACE_IRQS_OFF
lg %r15,__LC_KERNEL_STACK # load ksp
aghi %r15,-SP_SIZE # make room for registers & psw
lg %r13,__LC_SVC_NEW_PSW+8
lg %r9,__LC_THREAD_INFO
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+# TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
brasl %r14,execve_tail
j sysc_return
@@ -476,9 +467,9 @@ pgm_check_handler:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime:
+ TRACE_IRQS_CHECK_OFF
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
- TRACE_IRQS_OFF
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
ngr %r8,%r3
@@ -487,8 +478,10 @@ pgm_do_call:
larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
- larl %r14,sysc_return
- br %r1 # branch to interrupt-handler
+ basr %r14,%r1 # branch to interrupt-handler
+pgm_exit:
+ TRACE_IRQS_CHECK_ON
+ j sysc_return
#
# handle per exception
@@ -515,8 +508,8 @@ pgm_per_std:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2:
+ TRACE_IRQS_CHECK_OFF
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- TRACE_IRQS_OFF
lg %r1,__TI_task(%r9)
tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per
@@ -527,7 +520,7 @@ pgm_no_vtime2:
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
ngr %r8,%r3 # clear per-event-bit and ilc
- je sysc_return
+ je pgm_exit
j pgm_do_call
#
@@ -541,14 +534,15 @@ pgm_svcper:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ TRACE_IRQS_OFF
lg %r8,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
TRACE_IRQS_ON
- lmg %r2,%r6,SP_R2(%r15) # load svc arguments
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_svc
#
@@ -557,8 +551,8 @@ pgm_svcper:
kernel_per:
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area
- larl %r14,sysc_restore # load adr. of system ret, no work
- jg do_single_step # branch to do_single_step
+ brasl %r14,do_single_step
+ j pgm_exit
/*
* IO interrupt handler routine
@@ -581,148 +575,127 @@ io_no_vtime:
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_IRQ # call standard irq handler
io_return:
+ LOCKDEP_SYS_EXIT
+ TRACE_IRQS_ON
+io_tif:
tm __TI_flags+7(%r9),_TIF_WORK_INT
jnz io_work # there is work to do (signals etc.)
io_restore:
-#ifdef CONFIG_TRACE_IRQFLAGS
- larl %r1,io_restore_trace_psw
- lpswe 0(%r1)
-io_restore_trace:
- TRACE_IRQS_CHECK
- LOCKDEP_SYS_EXIT
-#endif
-io_leave:
RESTORE_ALL __LC_RETURN_PSW,0
io_done:
-#ifdef CONFIG_TRACE_IRQFLAGS
- .section .data,"aw",@progbits
- .align 8
- .globl io_restore_trace_psw
-io_restore_trace_psw:
- .quad 0, io_restore_trace
- .previous
-#endif
-
#
-# There is work todo, we need to check if we return to userspace, then
-# check, if we are in SIE, if yes leave it
+# There is work todo, find out in which context we have been interrupted:
+# 1) if we return to user space we can do all _TIF_WORK_INT work
+# 2) if we return to kernel code and kvm is enabled check if we need to
+# modify the psw to leave SIE
+# 3) if we return to kernel code and preemptive scheduling is enabled check
+# the preemption counter and if it is zero call preempt_schedule_irq
+# Before any work can be done, a switch to the kernel stack is required.
#
io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
-#ifndef CONFIG_PREEMPT
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- jnz io_work_user # yes -> no need to check for SIE
- la %r1, BASED(sie_opcode) # we return to kernel here
- lg %r2, SP_PSW+8(%r15)
- clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
- jne io_restore # no-> return to kernel
- lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
- aghi %r1, 4
- stg %r1, SP_PSW+8(%r15)
- j io_restore # return to kernel
-#else
- jno io_restore # no-> skip resched & signal
-#endif
-#else
- jnz io_work_user # yes -> do resched & signal
+ jo io_work_user # yes -> do resched & signal
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- la %r1, BASED(sie_opcode)
- lg %r2, SP_PSW+8(%r15)
- clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
+ lg %r2,SP_PSW+8(%r15) # check if current instruction is SIE
+ lh %r1,0(%r2)
+ chi %r1,-19948 # signed 16 bit compare with 0xb214
jne 0f # no -> leave PSW alone
- lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
- aghi %r1, 4
- stg %r1, SP_PSW+8(%r15)
+ aghi %r2,4 # yes-> add 4 bytes to leave SIE
+ stg %r2,SP_PSW+8(%r15)
0:
#endif
+#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r9)
jnz io_restore # preemption is disabled
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
+ jno io_restore
# switch to kernel stack
lg %r1,SP_R15(%r15)
aghi %r1,-SP_SIZE
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1
-io_resume_loop:
- tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
- jno io_restore
- larl %r14,io_resume_loop
- jg preempt_schedule_irq
+ # TRACE_IRQS_ON already done at io_return, call
+ # TRACE_IRQS_OFF to keep things symmetrical
+ TRACE_IRQS_OFF
+ brasl %r14,preempt_schedule_irq
+ j io_return
+#else
+ j io_restore
#endif
+#
+# Need to do work before returning to userspace, switch to kernel stack
+#
io_work_user:
lg %r1,__LC_KERNEL_STACK
aghi %r1,-SP_SIZE
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1
+
#
# One of the work bits is on. Find out which one.
-# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED
+# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING
#
-io_work_loop:
+io_work_tif:
tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
jo io_mcck_pending
tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
jo io_reschedule
tm __TI_flags+7(%r9),_TIF_SIGPENDING
- jnz io_sigpending
+ jo io_sigpending
tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
- jnz io_notify_resume
- j io_restore
+ jo io_notify_resume
+ j io_return
io_work_done:
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
-sie_opcode:
- .long 0xb2140000
-#endif
-
#
# _TIF_MCCK_PENDING is set, call handler
#
io_mcck_pending:
+ # TRACE_IRQS_ON already done at io_return
brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
- j io_work_loop
+ TRACE_IRQS_OFF
+ j io_return
#
# _TIF_NEED_RESCHED is set, call schedule
#
io_reschedule:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
brasl %r14,schedule # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- tm __TI_flags+7(%r9),_TIF_WORK_INT
- jz io_restore # there is no work to do
- j io_work_loop
+ j io_return
#
# _TIF_SIGPENDING or is set, call do_signal
#
io_sigpending:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_signal # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_work_loop
+ j io_return
#
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
io_notify_resume:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_notify_resume # call do_notify_resume
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_work_loop
+ j io_return
/*
* External interrupt handler routine
@@ -899,18 +872,18 @@ stack_overflow:
cleanup_table_system_call:
.quad system_call, sysc_do_svc
-cleanup_table_sysc_return:
- .quad sysc_return, sysc_leave
-cleanup_table_sysc_leave:
- .quad sysc_leave, sysc_done
-cleanup_table_sysc_work_loop:
- .quad sysc_work_loop, sysc_work_done
-cleanup_table_io_return:
- .quad io_return, io_leave
-cleanup_table_io_leave:
- .quad io_leave, io_done
-cleanup_table_io_work_loop:
- .quad io_work_loop, io_work_done
+cleanup_table_sysc_tif:
+ .quad sysc_tif, sysc_restore
+cleanup_table_sysc_restore:
+ .quad sysc_restore, sysc_done
+cleanup_table_sysc_work_tif:
+ .quad sysc_work_tif, sysc_work_done
+cleanup_table_io_tif:
+ .quad io_tif, io_restore
+cleanup_table_io_restore:
+ .quad io_restore, io_done
+cleanup_table_io_work_tif:
+ .quad io_work_tif, io_work_done
cleanup_critical:
clc 8(8,%r12),BASED(cleanup_table_system_call)
@@ -918,35 +891,35 @@ cleanup_critical:
clc 8(8,%r12),BASED(cleanup_table_system_call+8)
jl cleanup_system_call
0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_return)
+ clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_return+8)
- jl cleanup_sysc_return
+ clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8)
+ jl cleanup_sysc_tif
0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_leave)
+ clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
- jl cleanup_sysc_leave
+ clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
+ jl cleanup_sysc_restore
0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
+ clc 8(8,%r12),BASED(cleanup_table_sysc_work_tif)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
- jl cleanup_sysc_return
+ clc 8(8,%r12),BASED(cleanup_table_sysc_work_tif+8)
+ jl cleanup_sysc_tif
0:
- clc 8(8,%r12),BASED(cleanup_table_io_return)
+ clc 8(8,%r12),BASED(cleanup_table_io_tif)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_io_return+8)
- jl cleanup_io_return
+ clc 8(8,%r12),BASED(cleanup_table_io_tif+8)
+ jl cleanup_io_tif
0:
- clc 8(8,%r12),BASED(cleanup_table_io_leave)
+ clc 8(8,%r12),BASED(cleanup_table_io_restore)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
- jl cleanup_io_leave
+ clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
+ jl cleanup_io_restore
0:
- clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
+ clc 8(8,%r12),BASED(cleanup_table_io_work_tif)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
- jl cleanup_io_work_loop
+ clc 8(8,%r12),BASED(cleanup_table_io_work_tif+8)
+ jl cleanup_io_work_tif
0:
br %r14
@@ -993,16 +966,16 @@ cleanup_system_call_insn:
.quad sysc_stime
.quad sysc_update
-cleanup_sysc_return:
+cleanup_sysc_tif:
mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
+ mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave:
- clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
+cleanup_sysc_restore:
+ clc 8(8,%r12),BASED(cleanup_sysc_restore_insn)
je 3f
- clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
+ clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
jhe 0f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
@@ -1015,26 +988,26 @@ cleanup_sysc_leave:
lg %r15,SP_R15(%r15)
3: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave_insn:
+cleanup_sysc_restore_insn:
.quad sysc_done - 4
.quad sysc_done - 16
-cleanup_io_return:
+cleanup_io_tif:
mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_return)
+ mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_work_loop:
+cleanup_io_work_tif:
mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
+ mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_leave:
- clc 8(8,%r12),BASED(cleanup_io_leave_insn)
+cleanup_io_restore:
+ clc 8(8,%r12),BASED(cleanup_io_restore_insn)
je 3f
- clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
+ clc 8(8,%r12),BASED(cleanup_io_restore_insn+8)
jhe 0f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
@@ -1047,7 +1020,7 @@ cleanup_io_leave:
lg %r15,SP_R15(%r15)
3: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_leave_insn:
+cleanup_io_restore_insn:
.quad io_done - 4
.quad io_done - 16
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 9d1f76702d47..51838ad42d56 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -328,8 +328,8 @@ iplstart:
#
# reset files in VM reader
#
- stidp __LC_CPUID # store cpuid
- tm __LC_CPUID,0xff # running VM ?
+ stidp __LC_SAVE_AREA # store cpuid
+ tm __LC_SAVE_AREA,0xff # running VM ?
bno .Lnoreset
la %r2,.Lreset
lhi %r3,26
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 0729f36c2fe3..ecb2d02b02e4 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,24 +18,42 @@
#include <asm/lowcore.h>
#include <asm/param.h>
+static DEFINE_PER_CPU(struct cpuid, cpu_id);
+
+/*
+ * cpu_init - initializes state that is per-CPU.
+ */
+void __cpuinit cpu_init(void)
+{
+ struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
+
+ get_cpu_id(id);
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+}
+
+/*
+ * print_cpu_info - print basic information about a cpu
+ */
void __cpuinit print_cpu_info(void)
{
+ struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
+
pr_info("Processor %d started, address %d, identification %06X\n",
- S390_lowcore.cpu_nr, S390_lowcore.cpu_addr,
- S390_lowcore.cpu_id.ident);
+ S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident);
}
/*
* show_cpuinfo - Get information on one CPU for use by procfs.
*/
-
static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[10] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs"
};
- struct _lowcore *lc;
unsigned long n = (unsigned long) v - 1;
int i;
@@ -55,19 +73,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
if (cpu_online(n)) {
-#ifdef CONFIG_SMP
- lc = (smp_processor_id() == n) ?
- &S390_lowcore : lowcore_ptr[n];
-#else
- lc = &S390_lowcore;
-#endif
+ struct cpuid *id = &per_cpu(cpu_id, n);
seq_printf(m, "processor %li: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
- n, lc->cpu_id.version,
- lc->cpu_id.ident,
- lc->cpu_id.machine);
+ n, id->version, id->ident, id->machine);
}
preempt_enable();
return 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 91625f759ccd..598752499c3e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -113,22 +113,6 @@ static struct resource data_resource = {
};
/*
- * cpu_init() initializes state that is per-CPU.
- */
-void __cpuinit cpu_init(void)
-{
- /*
- * Store processor id in lowcore (used e.g. in timer_interrupt)
- */
- get_cpu_id(&S390_lowcore.cpu_id);
-
- atomic_inc(&init_mm.mm_count);
- current->active_mm = &init_mm;
- BUG_ON(current->mm);
- enter_lazy_tlb(&init_mm, current);
-}
-
-/*
* condev= and conmode= setup parameter.
*/
@@ -385,10 +369,6 @@ static void setup_addressing_mode(void)
pr_info("Address spaces switched, "
"mvcos not available\n");
}
-#ifdef CONFIG_TRACE_IRQFLAGS
- sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
- io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
-#endif
}
static void __init
@@ -695,6 +675,7 @@ static void __init setup_hwcaps(void)
static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
unsigned long long facility_list_extended;
unsigned int facility_list;
+ struct cpuid cpu_id;
int i;
facility_list = stfl();
@@ -756,7 +737,8 @@ static void __init setup_hwcaps(void)
*/
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
- switch (S390_lowcore.cpu_id.machine) {
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
case 0x9672:
#if !defined(CONFIG_64BIT)
default: /* Use "g5" as default for 31 bit kernels. */
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index c56d3f56d020..1f066e46e83e 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -264,7 +264,7 @@ restore_registers:
lghi %r2,0
br %r14
- .section .data.nosave,"aw",@progbits
+ .section .data..nosave,"aw",@progbits
.align 8
.Ldisabled_wait_31:
.long 0x000a0000,0x00000000
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d906bf19c14a..a2163c95eb98 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -391,7 +391,6 @@ static void __init time_init_wq(void)
if (time_sync_wq)
return;
time_sync_wq = create_singlethread_workqueue("timesync");
- stop_machine_create();
}
/*
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 247b4c2d1e51..bcef00766a64 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -37,7 +37,8 @@ struct tl_cpu {
};
struct tl_container {
- unsigned char reserved[8];
+ unsigned char reserved[7];
+ unsigned char id;
};
union tl_entry {
@@ -58,6 +59,7 @@ struct tl_info {
struct core_info {
struct core_info *next;
+ unsigned char id;
cpumask_t mask;
};
@@ -73,6 +75,7 @@ static DECLARE_WORK(topology_work, topology_work_fn);
static DEFINE_SPINLOCK(topology_lock);
cpumask_t cpu_core_map[NR_CPUS];
+unsigned char cpu_core_id[NR_CPUS];
static cpumask_t cpu_coregroup_map(unsigned int cpu)
{
@@ -116,6 +119,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
for_each_present_cpu(lcpu) {
if (cpu_logical_map(lcpu) == rcpu) {
cpu_set(lcpu, core->mask);
+ cpu_core_id[lcpu] = core->id;
smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
}
@@ -158,6 +162,7 @@ static void tl_to_cores(struct tl_info *info)
break;
case 1:
core = core->next;
+ core->id = tle->container.id;
break;
case 0:
add_cpus_to_core(&tle->cpu, core);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 6bc9c197aa91..6b83870507d5 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -102,11 +102,7 @@ static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
/*
* Allocate/free per cpu vdso data.
*/
-#ifdef CONFIG_64BIT
#define SEGMENT_ORDER 2
-#else
-#define SEGMENT_ORDER 1
-#endif
int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
{
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 49292869a5cd..8093e6f47f49 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -341,11 +341,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
- goto out_free_cpu;
+ goto out_free_sie_block;
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
vcpu->arch.sie_block);
return vcpu;
+out_free_sie_block:
+ free_page((unsigned long)(vcpu->arch.sie_block));
out_free_cpu:
kfree(vcpu);
out_nomem:
@@ -750,7 +752,7 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
static int __init kvm_s390_init(void)
{
int ret;
- ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+ ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (ret)
return ret;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 60f09ab3672c..cfa9d1777457 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -72,7 +72,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
struct kvm_memslots *memslots;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- memslots = rcu_dereference(vcpu->kvm->memslots);
+ memslots = kvm_memslots(vcpu->kvm);
mem = &memslots->memslots[0];
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 8d90564c2bcf..df59351d5781 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -44,6 +44,7 @@ config SUPERH32
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_ARCH_KGDB
select HAVE_HW_BREAKPOINT
+ select HAVE_MIXED_BREAKPOINTS_REGS
select PERF_EVENTS if HAVE_HW_BREAKPOINT
select ARCH_HIBERNATION_POSSIBLE if MMU
@@ -157,7 +158,6 @@ config LOCKDEP_SUPPORT
config HAVE_LATENCYTOP_SUPPORT
def_bool y
- depends on !SMP
config ARCH_HAS_ILOG2_U32
def_bool n
@@ -634,7 +634,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
- depends on SUPERH32 && EXPERIMENTAL
+ depends on SUPERH32 && EXPERIMENTAL && BROKEN_ON_SMP
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
@@ -706,6 +706,13 @@ config NR_CPUS
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
+ depends on SMP && HOTPLUG && EXPERIMENTAL
+ help
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
+
source "kernel/Kconfig.preempt"
config GUSA
@@ -732,6 +739,8 @@ config GUSA_RB
LLSC, this should be more efficient than the other alternative of
disabling interrupts around the atomic sequence.
+source "drivers/sh/Kconfig"
+
endmenu
menu "Boot options"
@@ -863,4 +872,20 @@ source "security/Kconfig"
source "crypto/Kconfig"
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ default n
+ ---help---
+ Say Y here to get to see options for using your Linux host to run other
+ operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
+
source "lib/Kconfig"
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 588579ac2e35..307b3a4a790b 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -83,7 +83,6 @@ defaultimage-$(CONFIG_SH_AP325RXA) := uImage
defaultimage-$(CONFIG_SH_7724_SOLUTION_ENGINE) := uImage
defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux
defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux
-defaultimage-$(CONFIG_SH_SDK7786) := vmlinux.bin
# Set some sensible Kbuild defaults
KBUILD_IMAGE := $(defaultimage-y)
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c
index a9bd6e3ee10b..d81c609decc7 100644
--- a/arch/sh/boards/board-urquell.c
+++ b/arch/sh/boards/board-urquell.c
@@ -24,6 +24,7 @@
#include <cpu/sh7786.h>
#include <asm/heartbeat.h>
#include <asm/sizes.h>
+#include <asm/smp-ops.h>
/*
* bit 1234 5678
@@ -203,6 +204,8 @@ static void __init urquell_setup(char **cmdline_p)
printk(KERN_INFO "Renesas Technology Corp. Urquell support.\n");
pm_power_off = urquell_power_off;
+
+ register_smp_ops(&shx3_smp_ops);
}
/*
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index f094ea2ee783..2ec1ea5cf8ef 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -21,6 +21,7 @@
#include <asm/heartbeat.h>
#include <asm/sizes.h>
#include <asm/reboot.h>
+#include <asm/smp-ops.h>
static struct resource heartbeat_resource = {
.start = 0x07fff8b0,
@@ -165,6 +166,19 @@ static void sdk7786_restart(char *cmd)
fpga_write_reg(0xa5a5, SRSTR);
}
+static void sdk7786_power_off(void)
+{
+ fpga_write_reg(fpga_read_reg(PWRCR) | PWRCR_PDWNREQ, PWRCR);
+
+ /*
+ * It can take up to 20us for the R8C to do its job, back off and
+ * wait a bit until we've been shut off. Even though newer FPGA
+ * versions don't set the ACK bit, the latency issue remains.
+ */
+ while ((fpga_read_reg(PWRCR) & PWRCR_PDWNACK) == 0)
+ cpu_sleep();
+}
+
/* Initialize the board */
static void __init sdk7786_setup(char **cmdline_p)
{
@@ -175,6 +189,9 @@ static void __init sdk7786_setup(char **cmdline_p)
pr_info("\tPCB revision:\t%d\n", fpga_read_reg(PCBRR) & 0xf);
machine_ops.restart = sdk7786_restart;
+ pm_power_off = sdk7786_power_off;
+
+ register_smp_ops(&shx3_smp_ops);
}
/*
diff --git a/arch/sh/boards/mach-x3proto/setup.c b/arch/sh/boards/mach-x3proto/setup.c
index e284592fd42a..102bf56befb4 100644
--- a/arch/sh/boards/mach-x3proto/setup.c
+++ b/arch/sh/boards/mach-x3proto/setup.c
@@ -19,6 +19,7 @@
#include <linux/usb/r8a66597.h>
#include <linux/usb/m66592.h>
#include <asm/ilsel.h>
+#include <asm/smp-ops.h>
static struct resource heartbeat_resources[] = {
[0] = {
@@ -152,7 +153,13 @@ static void __init x3proto_init_irq(void)
__raw_writel(__raw_readl(0xfe410000) | (1 << 21), 0xfe410000);
}
+static void __init x3proto_setup(char **cmdline_p)
+{
+ register_smp_ops(&shx3_smp_ops);
+}
+
static struct sh_machine_vector mv_x3proto __initmv = {
.mv_name = "x3proto",
+ .mv_setup = x3proto_setup,
.mv_init_irq = x3proto_init_irq,
};
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 5d660b90943b..cfa5a087a886 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -14,10 +14,16 @@ OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
#
# IMAGE_OFFSET is the load offset of the compression loader
#
+ifeq ($(CONFIG_32BIT),y)
+IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
+ $$[$(CONFIG_MEMORY_START) + \
+ $(CONFIG_BOOT_LINK_OFFSET)]')
+else
IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_PAGE_OFFSET) + \
$(KERNEL_MEMORY) + \
$(CONFIG_BOOT_LINK_OFFSET)]')
+endif
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S
index 02a30935f0b9..200c1d4f1efe 100644
--- a/arch/sh/boot/compressed/head_32.S
+++ b/arch/sh/boot/compressed/head_32.S
@@ -97,7 +97,11 @@ init_stack_addr:
decompress_kernel_addr:
.long decompress_kernel
kernel_start_addr:
+#ifdef CONFIG_32BIT
+ .long ___pa(_text+PAGE_SIZE)
+#else
.long _text+PAGE_SIZE
+#endif
.align 9
fake_headers_as_bzImage:
diff --git a/arch/sh/boot/compressed/vmlinux.scr b/arch/sh/boot/compressed/vmlinux.scr
index f02382ae5c48..862d74808236 100644
--- a/arch/sh/boot/compressed/vmlinux.scr
+++ b/arch/sh/boot/compressed/vmlinux.scr
@@ -1,6 +1,6 @@
SECTIONS
{
- .rodata.compressed : {
+ .rodata..compressed : {
input_len = .;
LONG(input_data_end - input_data) input_data = .;
*(.data)
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 9b331eab968e..740ada659441 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc7
-# Tue Feb 9 15:27:06 2010
+# Linux kernel version: 2.6.34-rc5
+# Mon Apr 26 16:52:58 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -18,6 +18,8 @@ CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_LOCKBREAK=y
# CONFIG_ARCH_SUSPEND_POSSIBLE is not set
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_SYS_SUPPORTS_HUGETLBFS=y
@@ -27,7 +29,6 @@ CONFIG_SYS_SUPPORTS_PCI=y
CONFIG_SYS_SUPPORTS_TMU=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
@@ -35,6 +36,7 @@ CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_COHERENT=y
# CONFIG_DMA_NONCOHERENT is not set
+# CONFIG_NEED_DMA_MAP_STATE is not set
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -42,7 +44,6 @@ CONFIG_CONSTRUCTORS=y
# General setup
#
CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
@@ -51,25 +52,27 @@ CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_HAVE_KERNEL_LZO=y
-CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_GZIP is not set
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
-# CONFIG_KERNEL_LZO is not set
+CONFIG_KERNEL_LZO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
-# CONFIG_AUDIT is not set
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_TREE=y
#
# RCU Subsystem
#
-CONFIG_TREE_RCU=y
-# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU is not set
+CONFIG_TREE_PREEMPT_RCU=y
# CONFIG_TINY_RCU is not set
CONFIG_RCU_TRACE=y
CONFIG_RCU_FANOUT=32
@@ -77,32 +80,36 @@ CONFIG_RCU_FANOUT=32
CONFIG_TREE_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
+CONFIG_LOG_BUF_SHIFT=17
CONFIG_CGROUPS=y
-# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_NS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
-# CONFIG_CPUSETS is not set
+CONFIG_CPUSETS=y
+# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEM_RES_CTLR=y
-# CONFIG_CGROUP_MEM_RES_CTLR_SWAP is not set
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
CONFIG_MM_OWNER=y
# CONFIG_SYSFS_DEPRECATED_V2 is not set
-# CONFIG_RELAY is not set
+CONFIG_RELAY=y
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
-# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -131,7 +138,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
@@ -142,13 +148,15 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-# CONFIG_OPROFILE is not set
+CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
-# CONFIG_KPROBES is not set
+CONFIG_KPROBES=y
+CONFIG_KRETPROBES=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
CONFIG_HAVE_HW_BREAKPOINT=y
@@ -157,7 +165,8 @@ CONFIG_HAVE_HW_BREAKPOINT=y
# GCOV-based kernel profiling
#
# CONFIG_GCOV_KERNEL is not set
-# CONFIG_SLOW_WORK is not set
+CONFIG_SLOW_WORK=y
+CONFIG_SLOW_WORK_DEBUG=y
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@@ -168,9 +177,10 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
# CONFIG_BLK_DEV_INTEGRITY is not set
CONFIG_BLK_CGROUP=y
# CONFIG_DEBUG_BLK_CGROUP is not set
@@ -215,7 +225,7 @@ CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
-# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
CONFIG_FREEZER=y
#
@@ -272,11 +282,10 @@ CONFIG_MEMORY_SIZE=0x20000000
# CONFIG_29BIT is not set
CONFIG_32BIT=y
CONFIG_PMB=y
-# CONFIG_PMB_LEGACY is not set
CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
-# CONFIG_NUMA is not set
-CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_NUMA=y
+CONFIG_NODES_SHIFT=1
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
@@ -286,21 +295,23 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
CONFIG_ARCH_MEMORY_PROBE=y
CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
-CONFIG_HUGETLB_PAGE_SIZE_1MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
-# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
+CONFIG_HUGETLB_PAGE_SIZE_64MB=y
# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
+CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
CONFIG_MEMORY_HOTPLUG=y
@@ -313,6 +324,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=1
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_SCHED_MC=y
#
# Cache configuration
@@ -328,6 +340,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_SH_FPU=y
CONFIG_SH_STORE_QUEUES=y
+CONFIG_SPECULATIVE_EXECUTION=y
CONFIG_CPU_HAS_INTEVT=y
CONFIG_CPU_HAS_SR_RB=y
CONFIG_CPU_HAS_PTEAEX=y
@@ -372,7 +385,10 @@ CONFIG_SH_CPU_FREQ=y
#
# DMA support
#
-# CONFIG_SH_DMA is not set
+CONFIG_SH_DMA=y
+# CONFIG_SH_DMA_API is not set
+CONFIG_NR_ONCHIP_DMA_CHANNELS=6
+# CONFIG_NR_DMA_CHANNELS_BOOL is not set
#
# Companion Chips
@@ -388,19 +404,21 @@ CONFIG_HEARTBEAT=y
# Kernel features
#
# CONFIG_HZ_100 is not set
-CONFIG_HZ_250=y
+# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
-# CONFIG_HZ_1000 is not set
-CONFIG_HZ=250
+CONFIG_HZ_1000=y
+CONFIG_HZ=1000
CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
# CONFIG_CRASH_DUMP is not set
CONFIG_SECCOMP=y
-# CONFIG_SMP is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
-CONFIG_GUSA=y
+CONFIG_INTC_USERIMASK=y
+CONFIG_INTC_BALANCING=y
#
# Boot options
@@ -410,7 +428,7 @@ CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
CONFIG_CMDLINE_OVERWRITE=y
# CONFIG_CMDLINE_EXTEND is not set
-CONFIG_CMDLINE="console=ttySC1,115200 earlyprintk=sh-sci.1,115200 root=/dev/sda1 nmi_debug=state,debounce rootdelay=10"
+CONFIG_CMDLINE="console=ttySC1,115200 earlyprintk=sh-sci.1,115200 root=/dev/sda1 nmi_debug=state,debounce rootdelay=5 pmb=iomap ignore_loglevel"
#
# Bus options
@@ -424,8 +442,7 @@ CONFIG_PCIEAER_INJECT=y
CONFIG_PCIEASPM=y
CONFIG_PCIEASPM_DEBUG=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCI_LEGACY is not set
-CONFIG_PCI_DEBUG=y
+# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
# CONFIG_PCCARD is not set
@@ -435,7 +452,7 @@ CONFIG_PCI_DEBUG=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=y
@@ -444,9 +461,11 @@ CONFIG_BINFMT_MISC=y
#
CONFIG_PM=y
CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
CONFIG_PM_VERBOSE=y
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
@@ -456,7 +475,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -520,19 +538,14 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-CONFIG_WIRELESS=y
-# CONFIG_CFG80211 is not set
-# CONFIG_LIB80211 is not set
-
-#
-# CFG80211 needs to be enabled for MAC80211
-#
+# CONFIG_WIRELESS is not set
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -553,14 +566,118 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+# CONFIG_MTD_CHAR is not set
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_FTL=y
+CONFIG_NFTL=y
+# CONFIG_NFTL_RW is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+CONFIG_MTD_OOPS=m
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+CONFIG_MTD_PLATRAM=y
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+CONFIG_MTD_PHRAM=y
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_CAFE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+CONFIG_MTD_NAND_SH_FLCTL=m
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+CONFIG_MTD_UBI_GLUEBI=m
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
-# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
#
# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
@@ -575,34 +692,63 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
-CONFIG_MISC_DEVICES=y
-# CONFIG_AD525X_DPOT is not set
-# CONFIG_PHANTOM is not set
-# CONFIG_SGI_IOC4 is not set
-# CONFIG_TIFM_CORE is not set
-# CONFIG_ICS932S401 is not set
-# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HP_ILO is not set
-# CONFIG_ISL29003 is not set
-# CONFIG_DS1682 is not set
-# CONFIG_TI_DAC7512 is not set
-# CONFIG_C2PORT is not set
-
-#
-# EEPROM support
-#
-# CONFIG_EEPROM_AT24 is not set
-# CONFIG_EEPROM_AT25 is not set
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_EEPROM_MAX6875 is not set
-# CONFIG_EEPROM_93CX6 is not set
-# CONFIG_CB710_CORE is not set
+# CONFIG_MISC_DEVICES is not set
CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
+CONFIG_IDE=y
+
+#
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
+#
+CONFIG_IDE_ATAPI=y
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+# CONFIG_IDE_GD_ATAPI is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+CONFIG_IDE_PROC_FS=y
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_BLK_DEV_PLATFORM=y
+
+#
+# PCI IDE chipsets support
+#
+# CONFIG_BLK_DEV_GENERIC is not set
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT8172 is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+# CONFIG_BLK_DEV_IDEDMA is not set
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -616,11 +762,12 @@ CONFIG_SCSI_PROC_FS=y
CONFIG_BLK_DEV_SD=y
# CONFIG_CHR_DEV_ST is not set
# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
+CONFIG_BLK_DEV_SR=y
+# CONFIG_BLK_DEV_SR_VENDOR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
# CONFIG_SCSI_SCAN_ASYNC is not set
CONFIG_SCSI_WAIT_SCAN=m
@@ -631,52 +778,10 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SPI_ATTRS is not set
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
# CONFIG_SCSI_SRP_ATTRS is not set
-CONFIG_SCSI_LOWLEVEL=y
-# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
-# CONFIG_BE2ISCSI is not set
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_HPSA is not set
-# CONFIG_SCSI_3W_9XXX is not set
-# CONFIG_SCSI_3W_SAS is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AACRAID is not set
-# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_AIC94XX is not set
-# CONFIG_SCSI_MVSAS is not set
-# CONFIG_SCSI_ARCMSR is not set
-# CONFIG_MEGARAID_NEWGEN is not set
-# CONFIG_MEGARAID_LEGACY is not set
-# CONFIG_MEGARAID_SAS is not set
-# CONFIG_SCSI_MPT2SAS is not set
-# CONFIG_SCSI_HPTIOP is not set
-# CONFIG_LIBFC is not set
-# CONFIG_LIBFCOE is not set
-# CONFIG_FCOE is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_STEX is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_QLA_FC is not set
-# CONFIG_SCSI_QLA_ISCSI is not set
-# CONFIG_SCSI_LPFC is not set
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_PMCRAID is not set
-# CONFIG_SCSI_PM8001 is not set
-# CONFIG_SCSI_SRP is not set
-# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
@@ -719,6 +824,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -743,7 +849,17 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_WINBOND is not set
CONFIG_PATA_PLATFORM=y
# CONFIG_PATA_SCH is not set
-# CONFIG_MD is not set
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+# CONFIG_DM_CRYPT is not set
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
# CONFIG_FUSION is not set
#
@@ -820,11 +936,7 @@ CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-CONFIG_WLAN=y
-# CONFIG_ATMEL is not set
-# CONFIG_PRISM54 is not set
-# CONFIG_USB_ZD1201 is not set
-# CONFIG_HOSTAP is not set
+# CONFIG_WLAN is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -838,6 +950,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -923,7 +1036,7 @@ CONFIG_VT=y
CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
@@ -940,9 +1053,11 @@ CONFIG_DEVKMEM=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_SH_SCI_DMA=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -986,8 +1101,10 @@ CONFIG_I2C_HELPER_AUTO=y
#
# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_SDK7786=y
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -1001,15 +1118,9 @@ CONFIG_I2C_HELPER_AUTO=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -1045,7 +1156,8 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
# CONFIG_ALIM7101_WDT is not set
-# CONFIG_SH_WDT is not set
+CONFIG_SH_WDT=y
+# CONFIG_SH_WDT_MMAP is not set
#
# PCI-based Watchdog Cards
@@ -1068,6 +1180,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -1075,15 +1188,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_AB4500_CORE is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1091,6 +1206,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -1121,6 +1237,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
# CONFIG_HID_A4TECH is not set
# CONFIG_HID_APPLE is not set
# CONFIG_HID_BELKIN is not set
@@ -1135,12 +1252,16 @@ CONFIG_USB_HID=y
# CONFIG_HID_KENSINGTON is not set
# CONFIG_HID_LOGITECH is not set
# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
# CONFIG_HID_MONTEREY is not set
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
# CONFIG_HID_PANTHERLORD is not set
# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
# CONFIG_HID_SAMSUNG is not set
# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
# CONFIG_HID_SUNPLUS is not set
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1241,7 +1362,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1253,7 +1373,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
@@ -1291,6 +1410,7 @@ CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -1370,43 +1490,102 @@ CONFIG_RTC_DRV_MAX6900=y
#
CONFIG_RTC_DRV_SH=y
# CONFIG_RTC_DRV_GENERIC is not set
-# CONFIG_DMADEVICES is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_SH_DMAE=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
# CONFIG_AUXDISPLAY is not set
CONFIG_UIO=m
# CONFIG_UIO_CIF is not set
-# CONFIG_UIO_PDRV is not set
-# CONFIG_UIO_PDRV_GENIRQ is not set
-# CONFIG_UIO_SMX is not set
+CONFIG_UIO_PDRV=m
+CONFIG_UIO_PDRV_GENIRQ=m
# CONFIG_UIO_AEC is not set
# CONFIG_UIO_SERCOS3 is not set
-# CONFIG_UIO_PCI_GENERIC is not set
+CONFIG_UIO_PCI_GENERIC=m
+# CONFIG_UIO_NETX is not set
#
# TI VLYNQ
#
-# CONFIG_STAGING is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_ECHO is not set
+# CONFIG_POCH is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_TRANZPORT is not set
+
+#
+# Qualcomm MSM Camera And Video
+#
+
+#
+# Camera Sensor Selection
+#
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_IDE_PHISON is not set
+# CONFIG_VME_BUS is not set
+
+#
+# RAR Register Driver
+#
+# CONFIG_RAR_REGISTER is not set
+# CONFIG_IIO is not set
+CONFIG_RAMZSWAP=m
+CONFIG_RAMZSWAP_STATS=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_STRIP is not set
+# CONFIG_DT3155 is not set
+# CONFIG_CRYSTALHD is not set
#
# File systems
#
CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
# CONFIG_EXT3_FS_SECURITY is not set
-# CONFIG_EXT4_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_XATTR=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
-# CONFIG_XFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=y
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_BTRFS_FS is not set
+CONFIG_BTRFS_FS=y
+# CONFIG_BTRFS_FS_POSIX_ACL is not set
# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
@@ -1415,19 +1594,30 @@ CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=m
#
# Caches
#
-# CONFIG_FSCACHE is not set
+CONFIG_FSCACHE=m
+# CONFIG_FSCACHE_STATS is not set
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_CACHEFILES=m
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
#
# CD-ROM/DVD Filesystems
#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
@@ -1448,7 +1638,7 @@ CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=y
CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
@@ -1457,30 +1647,60 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_SQUASHFS is not set
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=m
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+CONFIG_LOGFS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
-# CONFIG_ROMFS_FS is not set
+CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
-# CONFIG_NFS_V4 is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1493,7 +1713,7 @@ CONFIG_SUNRPC=y
CONFIG_MSDOS_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=y
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
# CONFIG_NLS_CODEPAGE_850 is not set
@@ -1508,16 +1728,16 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_865 is not set
# CONFIG_NLS_CODEPAGE_866 is not set
# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=m
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
@@ -1527,10 +1747,10 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_ISO8859_9 is not set
# CONFIG_NLS_ISO8859_13 is not set
# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
+CONFIG_NLS_ISO8859_15=m
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_UTF8=m
# CONFIG_DLM is not set
#
@@ -1538,7 +1758,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_PRINTK_TIME=y
-CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
@@ -1547,7 +1767,7 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SHIRQ=y
+# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1573,24 +1793,27 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
-CONFIG_DEBUG_VM=y
+# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
-# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_RCU_CPU_STALL_VERBOSE is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FTRACE_NMI_ENTER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1598,16 +1821,19 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
+CONFIG_FTRACE_NMI_ENTER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
-# CONFIG_FUNCTION_TRACER is not set
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_FUNCTION_GRAPH_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
@@ -1619,9 +1845,13 @@ CONFIG_KSYM_TRACER=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_DYNAMIC_FTRACE=y
+# CONFIG_FUNCTION_PROFILER is not set
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
# CONFIG_RING_BUFFER_BENCHMARK is not set
# CONFIG_DYNAMIC_DEBUG is not set
-# CONFIG_DMA_API_DEBUG is not set
+CONFIG_DMA_API_DEBUG=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -1632,6 +1862,7 @@ CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DUMP_CODE=y
CONFIG_DWARF_UNWINDER=y
# CONFIG_SH_NO_BSS_INIT is not set
+CONFIG_MCOUNT=y
#
# Security options
@@ -1649,10 +1880,21 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_MANAGER is not set
-# CONFIG_CRYPTO_MANAGER2 is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set
# CONFIG_CRYPTO_TEST is not set
@@ -1667,7 +1909,7 @@ CONFIG_CRYPTO=y
#
# Block modes
#
-# CONFIG_CRYPTO_CBC is not set
+CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_CTR is not set
# CONFIG_CRYPTO_CTS is not set
# CONFIG_CRYPTO_ECB is not set
@@ -1685,10 +1927,10 @@ CONFIG_CRYPTO=y
#
# Digest
#
-# CONFIG_CRYPTO_CRC32C is not set
+CONFIG_CRYPTO_CRC32C=y
# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
-# CONFIG_CRYPTO_MD5 is not set
+CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
# CONFIG_CRYPTO_RMD128 is not set
# CONFIG_CRYPTO_RMD160 is not set
@@ -1710,7 +1952,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_DES is not set
+CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_FCRYPT is not set
# CONFIG_CRYPTO_KHAZAD is not set
# CONFIG_CRYPTO_SALSA20 is not set
@@ -1722,9 +1964,9 @@ CONFIG_CRYPTO=y
#
# Compression
#
-# CONFIG_CRYPTO_DEFLATE is not set
+CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_ZLIB is not set
-# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_LZO=m
#
# Random Number Generation
@@ -1740,12 +1982,22 @@ CONFIG_BINARY_PRINTF=y
CONFIG_BITREVERSE=y
CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC16 is not set
+CONFIG_CRC16=y
# CONFIG_CRC_T10DIF is not set
-# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
-# CONFIG_LIBCRC32C is not set
+CONFIG_LIBCRC32C=y
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_BTREE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
index 02df18ea9608..ef9e555aafba 100644
--- a/arch/sh/include/asm/cache.h
+++ b/arch/sh/include/asm/cache.h
@@ -14,7 +14,7 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#ifndef __ASSEMBLY__
struct cache_info {
@@ -38,14 +38,10 @@ struct cache_info {
* 2. those in the physical page number.
*/
unsigned int alias_mask;
-
unsigned int n_aliases; /* Number of aliases */
unsigned long flags;
};
-
-int __init detect_cpu_and_cache_system(void);
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */
diff --git a/arch/sh/include/asm/clkdev.h b/arch/sh/include/asm/clkdev.h
new file mode 100644
index 000000000000..5645f358128b
--- /dev/null
+++ b/arch/sh/include/asm/clkdev.h
@@ -0,0 +1,35 @@
+/*
+ * arch/sh/include/asm/clkdev.h
+ *
+ * Cloned from arch/arm/include/asm/clkdev.h:
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __ASM_CLKDEV_H
+#define __ASM_CLKDEV_H
+
+struct clk;
+
+struct clk_lookup {
+ struct list_head node;
+ const char *dev_id;
+ const char *con_id;
+ struct clk *clk;
+};
+
+struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
+ const char *dev_fmt, ...);
+
+void clkdev_add(struct clk_lookup *cl);
+void clkdev_drop(struct clk_lookup *cl);
+
+void clkdev_add_table(struct clk_lookup *, size_t);
+int clk_add_alias(const char *, const char *, char *, struct device *);
+
+#endif
diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h
index 11da4c5beb68..405a12c09960 100644
--- a/arch/sh/include/asm/clock.h
+++ b/arch/sh/include/asm/clock.h
@@ -45,13 +45,6 @@ struct clk {
struct cpufreq_frequency_table *freq_table;
};
-struct clk_lookup {
- struct list_head node;
- const char *dev_id;
- const char *con_id;
- struct clk *clk;
-};
-
#define CLK_ENABLE_ON_INIT (1 << 0)
/* Should be defined by processor-specific code */
@@ -158,12 +151,11 @@ int sh_clk_div4_enable_register(struct clk *clks, int nr,
int sh_clk_div4_reparent_register(struct clk *clks, int nr,
struct clk_div4_table *table);
-#define SH_CLK_DIV6(_name, _parent, _reg, _flags) \
-{ \
- .name = _name, \
- .parent = _parent, \
- .enable_reg = (void __iomem *)_reg, \
- .flags = _flags, \
+#define SH_CLK_DIV6(_parent, _reg, _flags) \
+{ \
+ .parent = _parent, \
+ .enable_reg = (void __iomem *)_reg, \
+ .flags = _flags, \
}
int sh_clk_div6_register(struct clk *clks, int nr);
diff --git a/arch/sh/include/asm/dmaengine.h b/arch/sh/include/asm/dmaengine.h
index bf2f30cf0a27..2a02b611a9ad 100644
--- a/arch/sh/include/asm/dmaengine.h
+++ b/arch/sh/include/asm/dmaengine.h
@@ -10,14 +10,9 @@
#ifndef ASM_DMAENGINE_H
#define ASM_DMAENGINE_H
-#include <linux/dmaengine.h>
-#include <linux/list.h>
+#include <linux/sh_dma.h>
-#include <asm/dma-register.h>
-
-#define SH_DMAC_MAX_CHANNELS 6
-
-enum sh_dmae_slave_chan_id {
+enum {
SHDMA_SLAVE_SCIF0_TX,
SHDMA_SLAVE_SCIF0_RX,
SHDMA_SLAVE_SCIF1_TX,
@@ -34,60 +29,6 @@ enum sh_dmae_slave_chan_id {
SHDMA_SLAVE_SIUA_RX,
SHDMA_SLAVE_SIUB_TX,
SHDMA_SLAVE_SIUB_RX,
- SHDMA_SLAVE_NUMBER, /* Must stay last */
-};
-
-struct sh_dmae_slave_config {
- enum sh_dmae_slave_chan_id slave_id;
- dma_addr_t addr;
- u32 chcr;
- char mid_rid;
-};
-
-struct sh_dmae_channel {
- unsigned int offset;
- unsigned int dmars;
- unsigned int dmars_bit;
-};
-
-struct sh_dmae_pdata {
- struct sh_dmae_slave_config *slave;
- int slave_num;
- struct sh_dmae_channel *channel;
- int channel_num;
- unsigned int ts_low_shift;
- unsigned int ts_low_mask;
- unsigned int ts_high_shift;
- unsigned int ts_high_mask;
- unsigned int *ts_shift;
- int ts_shift_num;
- u16 dmaor_init;
-};
-
-struct device;
-
-/* Used by slave DMA clients to request DMA to/from a specific peripheral */
-struct sh_dmae_slave {
- enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */
- struct device *dma_dev; /* Set by the platform */
- struct sh_dmae_slave_config *config; /* Set by the driver */
-};
-
-struct sh_dmae_regs {
- u32 sar; /* SAR / source address */
- u32 dar; /* DAR / destination address */
- u32 tcr; /* TCR / transfer count */
-};
-
-struct sh_desc {
- struct sh_dmae_regs hw;
- struct list_head node;
- struct dma_async_tx_descriptor async_tx;
- enum dma_data_direction direction;
- dma_cookie_t cookie;
- size_t partial;
- int chunks;
- int mark;
};
#endif
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index 965dd780d51b..89890f61a7b9 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -46,17 +46,20 @@ struct pmu;
/* Maximum number of UBC channels */
#define HBP_NUM 2
+static inline int hw_breakpoint_slots(int type)
+{
+ return HBP_NUM;
+}
+
/* arch/sh/kernel/hw_breakpoint.c */
-extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk);
+extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
+extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
-void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
extern void arch_fill_perf_breakpoint(struct perf_event *bp);
extern int register_sh_ubc(struct sh_ubc *);
diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h
index 5d3ccae4202b..855e945c6199 100644
--- a/arch/sh/include/asm/hwblk.h
+++ b/arch/sh/include/asm/hwblk.h
@@ -58,13 +58,11 @@ void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int cnt);
void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int cnt);
/* allow clocks to enable and disable hardware blocks */
-#define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \
-{ \
- .name = _name, \
- .id = _id, \
- .parent = _parent, \
- .arch_flags = _hwblk, \
- .flags = _flags, \
+#define SH_HWBLK_CLK(_hwblk, _parent, _flags) \
+[_hwblk] = { \
+ .parent = _parent, \
+ .arch_flags = _hwblk, \
+ .flags = _flags, \
}
int sh_hwblk_clk_register(struct clk *clks, int nr);
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index df8e1500527c..02c2f0102cfa 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -1,6 +1,7 @@
#ifndef __ASM_SH_IRQ_H
#define __ASM_SH_IRQ_H
+#include <linux/cpumask.h>
#include <asm/machvec.h>
/*
@@ -12,6 +13,14 @@
#define NR_IRQS_LEGACY 8 /* Legacy external IRQ0-7 */
/*
+ * This is a special IRQ number for indicating that no IRQ has been
+ * triggered and to simply ignore the IRQ dispatch. This is a special
+ * case that can happen with IRQ auto-distribution when multiple CPUs
+ * are woken up and signalled in parallel.
+ */
+#define NO_IRQ_IGNORE ((unsigned int)-1)
+
+/*
* Convert back and forth between INTEVT and IRQ values.
*/
#ifdef CONFIG_CPU_HAS_INTEVT
@@ -42,6 +51,8 @@ static inline int generic_irq_demux(int irq)
#define irq_demux(irq) sh_mv.mv_irq_demux(irq)
void init_IRQ(void);
+void migrate_irqs(void);
+
asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
#ifdef CONFIG_IRQSTACKS
@@ -53,6 +64,14 @@ extern void irq_ctx_exit(int cpu);
# define irq_ctx_exit(cpu) do { } while (0)
#endif
+#ifdef CONFIG_INTC_BALANCING
+extern unsigned int irq_lookup(unsigned int irq);
+extern void irq_finish(unsigned int irq);
+#else
+#define irq_lookup(irq) (irq)
+#define irq_finish(irq) do { } while (0)
+#endif
+
#include <asm-generic/irq.h>
#ifdef CONFIG_CPU_SH5
#include <cpu/irq.h>
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index d71feb359304..0152c040f6c3 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -128,13 +128,18 @@ typedef struct page *pgtable_t;
* added or subtracted as required.
*/
#ifdef CONFIG_PMB
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
-#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
+#define ___pa(x) ((x)-PAGE_OFFSET+__MEMORY_START)
+#define ___va(x) ((x)+PAGE_OFFSET-__MEMORY_START)
#else
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define ___pa(x) ((x)-PAGE_OFFSET)
+#define ___va(x) ((x)+PAGE_OFFSET)
#endif
+#ifndef __ASSEMBLY__
+#define __pa(x) ___pa((unsigned long)x)
+#define __va(x) (void *)___va((unsigned long)x)
+#endif /* !__ASSEMBLY__ */
+
#ifdef CONFIG_UNCACHED_MAPPING
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 9605e062840f..0a58cb25a658 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -85,6 +85,10 @@ struct sh_cpuinfo {
struct tlb_info itlb;
struct tlb_info dtlb;
+#ifdef CONFIG_SMP
+ struct task_struct *idle;
+#endif
+
unsigned long flags;
} __attribute__ ((aligned(L1_CACHE_BYTES)));
@@ -102,6 +106,9 @@ struct task_struct;
extern struct pt_regs fake_swapper_regs;
+extern void cpu_init(void);
+extern void cpu_probe(void);
+
/* arch/sh/kernel/process.c */
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 572b4eb09493..61a445d2d02a 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -27,8 +27,6 @@
#define CCN_CVR 0xff000040
#define CCN_PRR 0xff000044
-asmlinkage void __init sh_cpu_init(void);
-
/*
* User space process size: 2GB.
*
diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h
index f1b1e6944a5f..e8d4142baf59 100644
--- a/arch/sh/include/asm/siu.h
+++ b/arch/sh/include/asm/siu.h
@@ -17,10 +17,10 @@ struct device;
struct siu_platform {
struct device *dma_dev;
- enum sh_dmae_slave_chan_id dma_slave_tx_a;
- enum sh_dmae_slave_chan_id dma_slave_rx_a;
- enum sh_dmae_slave_chan_id dma_slave_tx_b;
- enum sh_dmae_slave_chan_id dma_slave_rx_b;
+ unsigned int dma_slave_tx_a;
+ unsigned int dma_slave_rx_a;
+ unsigned int dma_slave_tx_b;
+ unsigned int dma_slave_rx_b;
};
#endif /* ASM_SIU_H */
diff --git a/arch/sh/include/asm/smp-ops.h b/arch/sh/include/asm/smp-ops.h
new file mode 100644
index 000000000000..c590f76856f1
--- /dev/null
+++ b/arch/sh/include/asm/smp-ops.h
@@ -0,0 +1,51 @@
+#ifndef __ASM_SH_SMP_OPS_H
+#define __ASM_SH_SMP_OPS_H
+
+struct plat_smp_ops {
+ void (*smp_setup)(void);
+ unsigned int (*smp_processor_id)(void);
+ void (*prepare_cpus)(unsigned int max_cpus);
+ void (*start_cpu)(unsigned int cpu, unsigned long entry_point);
+ void (*send_ipi)(unsigned int cpu, unsigned int message);
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+ void (*play_dead)(void);
+};
+
+extern struct plat_smp_ops *mp_ops;
+extern struct plat_smp_ops shx3_smp_ops;
+
+#ifdef CONFIG_SMP
+
+static inline void plat_smp_setup(void)
+{
+ BUG_ON(!mp_ops);
+ mp_ops->smp_setup();
+}
+
+static inline void play_dead(void)
+{
+ mp_ops->play_dead();
+}
+
+extern void register_smp_ops(struct plat_smp_ops *ops);
+
+#else
+
+static inline void plat_smp_setup(void)
+{
+ /* UP, nothing to do ... */
+}
+
+static inline void register_smp_ops(struct plat_smp_ops *ops)
+{
+}
+
+static inline void play_dead(void)
+{
+ BUG();
+}
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_SH_SMP_OPS_H */
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 53ef26ced75f..9070d943ddde 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -3,15 +3,16 @@
#include <linux/bitops.h>
#include <linux/cpumask.h>
+#include <asm/smp-ops.h>
#ifdef CONFIG_SMP
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/current.h>
+#include <asm/percpu.h>
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#define hard_smp_processor_id() plat_smp_processor_id()
/* Map from cpu id to sequential logical cpu number. */
extern int __cpu_number_map[NR_CPUS];
@@ -30,20 +31,43 @@ enum {
SMP_MSG_NR, /* must be last */
};
+DECLARE_PER_CPU(int, cpu_state);
+
void smp_message_recv(unsigned int msg);
void smp_timer_broadcast(const struct cpumask *mask);
void local_timer_interrupt(void);
void local_timer_setup(unsigned int cpu);
-
-void plat_smp_setup(void);
-void plat_prepare_cpus(unsigned int max_cpus);
-int plat_smp_processor_id(void);
-void plat_start_cpu(unsigned int cpu, unsigned long entry_point);
-void plat_send_ipi(unsigned int cpu, unsigned int message);
+void local_timer_stop(unsigned int cpu);
void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+void native_play_dead(void);
+void native_cpu_die(unsigned int cpu);
+int native_cpu_disable(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void play_dead_common(void);
+extern int __cpu_disable(void);
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ extern struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->cpu_die(cpu);
+}
+#endif
+
+static inline int hard_smp_processor_id(void)
+{
+ extern struct plat_smp_ops *mp_ops; /* private */
+
+ if (!mp_ops)
+ return 0; /* boot CPU */
+
+ return mp_ops->smp_processor_id();
+}
#else
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h
index de2359533994..9a6125eb0079 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma-register.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h
@@ -23,7 +23,8 @@
#define CHCR_TS_HIGH_MASK 0
#define CHCR_TS_HIGH_SHIFT 0
#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
- defined(CONFIG_CPU_SUBTYPE_SH7724)
+ defined(CONFIG_CPU_SUBTYPE_SH7724) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
#define CHCR_TS_LOW_MASK 0x00000018
#define CHCR_TS_LOW_SHIFT 3
#define CHCR_TS_HIGH_MASK 0x00300000
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
index 5963124c1d4a..e46ec708105a 100644
--- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h
+++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
@@ -19,13 +19,26 @@
#define MMUCR 0xFF000010 /* MMU Control Register */
+#define MMU_TLB_ENTRY_SHIFT 8
+
#define MMU_ITLB_ADDRESS_ARRAY 0xF2000000
#define MMU_ITLB_ADDRESS_ARRAY2 0xF2800000
+#define MMU_ITLB_DATA_ARRAY 0xF3000000
+#define MMU_ITLB_DATA_ARRAY2 0xF3800000
+
#define MMU_UTLB_ADDRESS_ARRAY 0xF6000000
#define MMU_UTLB_ADDRESS_ARRAY2 0xF6800000
+#define MMU_UTLB_DATA_ARRAY 0xF7000000
+#define MMU_UTLB_DATA_ARRAY2 0xF7800000
#define MMU_PAGE_ASSOC_BIT 0x80
-#define MMUCR_TI (1<<2)
+#ifdef CONFIG_MMU
+#define MMUCR_AT (1 << 0)
+#else
+#define MMUCR_AT (0)
+#endif
+
+#define MMUCR_TI (1 << 2)
#define MMUCR_URB 0x00FC0000
#define MMUCR_URB_SHIFT 18
@@ -58,7 +71,8 @@
#endif
#define MMU_NTLB_ENTRIES 64
-#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME|MMUCR_SE|MMUCR_AEX)
+#define MMU_CONTROL_INIT (MMUCR_AT | MMUCR_TI | MMUCR_SQMD | \
+ MMUCR_ME | MMUCR_SE | MMUCR_AEX)
#define TRA 0xff000020
#define EXPEVT 0xff000024
diff --git a/arch/sh/include/mach-sdk7786/mach/fpga.h b/arch/sh/include/mach-sdk7786/mach/fpga.h
index 2120d67dec70..416b621d94d1 100644
--- a/arch/sh/include/mach-sdk7786/mach/fpga.h
+++ b/arch/sh/include/mach-sdk7786/mach/fpga.h
@@ -42,6 +42,15 @@
#define SCBR_I2CCEN BIT(1) /* CPU I2C master enable */
#define PWRCR 0x1a0
+#define PWRCR_SCISEL0 BIT(0)
+#define PWRCR_SCISEL1 BIT(1)
+#define PWRCR_SCIEN BIT(2) /* Serial port enable */
+#define PWRCR_PDWNACK BIT(5) /* Power down acknowledge */
+#define PWRCR_PDWNREQ BIT(7) /* Power down request */
+#define PWRCR_INT2 BIT(11) /* INT2 connection to power manager */
+#define PWRCR_BUPINIT BIT(13) /* DDR backup initialize */
+#define PWRCR_BKPRST BIT(15) /* Backup power reset */
+
#define SPCBR 0x1b0
#define SPICR 0x1c0
#define SPIDR 0x1d0
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 02fd3ae8b0ee..650b92f00ee5 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -11,7 +11,7 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
-obj-y := debugtraps.o dma-nommu.o dumpstack.o \
+obj-y := clkdev.o debugtraps.o dma-nommu.o dumpstack.o \
idle.o io.o io_generic.o irq.o \
irq_$(BITS).o machvec.o nmi_debug.o process.o \
process_$(BITS).o ptrace_$(BITS).o \
diff --git a/arch/sh/kernel/clkdev.c b/arch/sh/kernel/clkdev.c
new file mode 100644
index 000000000000..defdd6e30908
--- /dev/null
+++ b/arch/sh/kernel/clkdev.c
@@ -0,0 +1,169 @@
+/*
+ * arch/sh/kernel/clkdev.c
+ *
+ * Cloned from arch/arm/common/clkdev.c:
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <asm/clock.h>
+#include <asm/clkdev.h>
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+
+/*
+ * Find the correct struct clk for the device and connection ID.
+ * We do slightly fuzzy matching here:
+ * An entry with a NULL ID is assumed to be a wildcard.
+ * If an entry has a device ID, it must match
+ * If an entry has a connection ID, it must match
+ * Then we take the most specific entry - with the following
+ * order of precidence: dev+con > dev only > con only.
+ */
+static struct clk *clk_find(const char *dev_id, const char *con_id)
+{
+ struct clk_lookup *p;
+ struct clk *clk = NULL;
+ int match, best = 0;
+
+ list_for_each_entry(p, &clocks, node) {
+ match = 0;
+ if (p->dev_id) {
+ if (!dev_id || strcmp(p->dev_id, dev_id))
+ continue;
+ match += 2;
+ }
+ if (p->con_id) {
+ if (!con_id || strcmp(p->con_id, con_id))
+ continue;
+ match += 1;
+ }
+ if (match == 0)
+ continue;
+
+ if (match > best) {
+ clk = p->clk;
+ best = match;
+ }
+ }
+ return clk;
+}
+
+struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+{
+ struct clk *clk;
+
+ mutex_lock(&clocks_mutex);
+ clk = clk_find(dev_id, con_id);
+ mutex_unlock(&clocks_mutex);
+
+ return clk ? clk : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(clk_get_sys);
+
+void clkdev_add(struct clk_lookup *cl)
+{
+ mutex_lock(&clocks_mutex);
+ list_add_tail(&cl->node, &clocks);
+ mutex_unlock(&clocks_mutex);
+}
+EXPORT_SYMBOL(clkdev_add);
+
+void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
+{
+ mutex_lock(&clocks_mutex);
+ while (num--) {
+ list_add_tail(&cl->node, &clocks);
+ cl++;
+ }
+ mutex_unlock(&clocks_mutex);
+}
+
+#define MAX_DEV_ID 20
+#define MAX_CON_ID 16
+
+struct clk_lookup_alloc {
+ struct clk_lookup cl;
+ char dev_id[MAX_DEV_ID];
+ char con_id[MAX_CON_ID];
+};
+
+struct clk_lookup * __init_refok
+clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
+{
+ struct clk_lookup_alloc *cla;
+
+ if (!slab_is_available())
+ cla = alloc_bootmem_low_pages(sizeof(*cla));
+ else
+ cla = kzalloc(sizeof(*cla), GFP_KERNEL);
+
+ if (!cla)
+ return NULL;
+
+ cla->cl.clk = clk;
+ if (con_id) {
+ strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
+ cla->cl.con_id = cla->con_id;
+ }
+
+ if (dev_fmt) {
+ va_list ap;
+
+ va_start(ap, dev_fmt);
+ vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
+ cla->cl.dev_id = cla->dev_id;
+ va_end(ap);
+ }
+
+ return &cla->cl;
+}
+EXPORT_SYMBOL(clkdev_alloc);
+
+int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
+ struct device *dev)
+{
+ struct clk *r = clk_get(dev, id);
+ struct clk_lookup *l;
+
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ l = clkdev_alloc(r, alias, alias_dev_name);
+ clk_put(r);
+ if (!l)
+ return -ENODEV;
+ clkdev_add(l);
+ return 0;
+}
+EXPORT_SYMBOL(clk_add_alias);
+
+/*
+ * clkdev_drop - remove a clock dynamically allocated
+ */
+void clkdev_drop(struct clk_lookup *cl)
+{
+ mutex_lock(&clocks_mutex);
+ list_del(&cl->node);
+ mutex_unlock(&clocks_mutex);
+ kfree(cl);
+}
+EXPORT_SYMBOL(clkdev_drop);
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
index eed5eaff96ba..17a73ad7a20d 100644
--- a/arch/sh/kernel/cpu/clock-cpg.c
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -338,6 +338,11 @@ int __init __deprecated cpg_clk_init(void)
ret |= clk_register(clk);
}
+ clk_add_alias("tmu_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("mtu2_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("cmt_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("sci_ick", NULL, "peripheral_clk", NULL);
+
return ret;
}
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index e9fa1bfed53e..a725c7feb747 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -10,10 +10,6 @@
*
* Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
*
- * With clkdev bits:
- *
- * Copyright (C) 2008 Russell King.
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -30,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include <linux/cpufreq.h>
+#include <linux/clk.h>
#include <asm/clock.h>
#include <asm/machvec.h>
@@ -398,56 +395,6 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
EXPORT_SYMBOL_GPL(clk_round_rate);
/*
- * Find the correct struct clk for the device and connection ID.
- * We do slightly fuzzy matching here:
- * An entry with a NULL ID is assumed to be a wildcard.
- * If an entry has a device ID, it must match
- * If an entry has a connection ID, it must match
- * Then we take the most specific entry - with the following
- * order of precedence: dev+con > dev only > con only.
- */
-static struct clk *clk_find(const char *dev_id, const char *con_id)
-{
- struct clk_lookup *p;
- struct clk *clk = NULL;
- int match, best = 0;
-
- list_for_each_entry(p, &clock_list, node) {
- match = 0;
- if (p->dev_id) {
- if (!dev_id || strcmp(p->dev_id, dev_id))
- continue;
- match += 2;
- }
- if (p->con_id) {
- if (!con_id || strcmp(p->con_id, con_id))
- continue;
- match += 1;
- }
- if (match == 0)
- continue;
-
- if (match > best) {
- clk = p->clk;
- best = match;
- }
- }
- return clk;
-}
-
-struct clk *clk_get_sys(const char *dev_id, const char *con_id)
-{
- struct clk *clk;
-
- mutex_lock(&clock_list_sem);
- clk = clk_find(dev_id, con_id);
- mutex_unlock(&clock_list_sem);
-
- return clk ? clk : ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL_GPL(clk_get_sys);
-
-/*
* Returns a clock. Note that we first try to use device id on the bus
* and clock name. If this fails, we try to use clock name only.
*/
@@ -468,7 +415,7 @@ struct clk *clk_get(struct device *dev, const char *id)
mutex_lock(&clock_list_sem);
list_for_each_entry(p, &clock_list, node) {
- if (p->id == idno &&
+ if (p->name && p->id == idno &&
strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
goto found;
@@ -476,7 +423,8 @@ struct clk *clk_get(struct device *dev, const char *id)
}
list_for_each_entry(p, &clock_list, node) {
- if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
+ if (p->name &&
+ strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
break;
}
@@ -647,7 +595,7 @@ static int clk_debugfs_register(struct clk *c)
return err;
}
- if (!c->dentry) {
+ if (!c->dentry && c->name) {
err = clk_debugfs_register_one(c);
if (err)
return err;
@@ -673,7 +621,7 @@ static int __init clk_debugfs_init(void)
}
return 0;
err_out:
- debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
+ debugfs_remove_recursive(clk_debugfs_root);
return err;
}
late_initcall(clk_debugfs_init);
diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c
index 67a1e811cfe8..3e985aae5d91 100644
--- a/arch/sh/kernel/cpu/hwblk.c
+++ b/arch/sh/kernel/cpu/hwblk.c
@@ -146,6 +146,11 @@ int __init sh_hwblk_clk_register(struct clk *clks, int nr)
for (k = 0; !ret && (k < nr); k++) {
clkp = clks + k;
+
+ /* skip over clocks using hwblk 0 (HWBLK_UNKNOWN) */
+ if (!clkp->arch_flags)
+ continue;
+
clkp->ops = &sh_hwblk_clk_ops;
ret |= clk_register(clkp);
}
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index c736422344eb..97661061ff20 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -43,9 +43,9 @@
* peripherals (nofpu, nodsp, and so forth).
*/
#define onchip_setup(x) \
-static int x##_disabled __initdata = !cpu_has_##x; \
+static int x##_disabled __cpuinitdata = !cpu_has_##x; \
\
-static int __init x##_setup(char *opts) \
+static int __cpuinit x##_setup(char *opts) \
{ \
x##_disabled = 1; \
return 1; \
@@ -59,7 +59,7 @@ onchip_setup(dsp);
#define CPUOPM 0xff2f0000
#define CPUOPM_RABD (1 << 5)
-static void __init speculative_execution_init(void)
+static void __cpuinit speculative_execution_init(void)
{
/* Clear RABD */
__raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
@@ -78,7 +78,7 @@ static void __init speculative_execution_init(void)
#define EXPMASK_BRDSSLP (1 << 1)
#define EXPMASK_MMCAW (1 << 4)
-static void __init expmask_init(void)
+static void __cpuinit expmask_init(void)
{
unsigned long expmask = __raw_readl(EXPMASK);
@@ -217,7 +217,7 @@ static void detect_cache_shape(void)
l2_cache_shape = -1; /* No S-cache */
}
-static void __init fpu_init(void)
+static void __cpuinit fpu_init(void)
{
/* Disable the FPU */
if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
@@ -230,7 +230,7 @@ static void __init fpu_init(void)
}
#ifdef CONFIG_SH_DSP
-static void __init release_dsp(void)
+static void __cpuinit release_dsp(void)
{
unsigned long sr;
@@ -244,7 +244,7 @@ static void __init release_dsp(void)
);
}
-static void __init dsp_init(void)
+static void __cpuinit dsp_init(void)
{
unsigned long sr;
@@ -276,11 +276,11 @@ static void __init dsp_init(void)
release_dsp();
}
#else
-static inline void __init dsp_init(void) { }
+static inline void __cpuinit dsp_init(void) { }
#endif /* CONFIG_SH_DSP */
/**
- * sh_cpu_init
+ * cpu_init
*
* This is our initial entry point for each CPU, and is invoked on the
* boot CPU prior to calling start_kernel(). For SMP, a combination of
@@ -293,14 +293,14 @@ static inline void __init dsp_init(void) { }
* subtype and initial configuration will all be done.
*
* Each processor family is still responsible for doing its own probing
- * and cache configuration in detect_cpu_and_cache_system().
+ * and cache configuration in cpu_probe().
*/
-asmlinkage void __init sh_cpu_init(void)
+asmlinkage void __cpuinit cpu_init(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
/* First, probe the CPU */
- detect_cpu_and_cache_system();
+ cpu_probe();
if (current_cpu_data.type == CPU_SH_NONE)
panic("Unknown CPU");
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index 1db6d8883888..bab8e75958ae 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -13,7 +13,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
boot_cpu_data.type = CPU_SH7619;
@@ -30,7 +30,4 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
boot_cpu_data.icache = boot_cpu_data.dcache;
boot_cpu_data.family = CPU_FAMILY_SH2;
-
- return 0;
}
-
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 114c7cee7184..c3638516bffc 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -128,17 +128,14 @@ static struct platform_device eth_device = {
};
static struct sh_timer_config cmt0_platform_data = {
- .name = "CMT0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt0_resources[] = {
[0] = {
- .name = "CMT0",
.start = 0xf84a0072,
.end = 0xf84a0077,
.flags = IORESOURCE_MEM,
@@ -160,17 +157,14 @@ static struct platform_device cmt0_device = {
};
static struct sh_timer_config cmt1_platform_data = {
- .name = "CMT1",
.channel_offset = 0x08,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt1_resources[] = {
[0] = {
- .name = "CMT1",
.start = 0xf84a0078,
.end = 0xf84a007d,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 6825d6507164..48e97a2a0c8d 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -13,7 +13,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
boot_cpu_data.family = CPU_FAMILY_SH2A;
@@ -51,6 +51,4 @@ int __init detect_cpu_and_cache_system(void)
* on the cache info.
*/
boot_cpu_data.icache = boot_cpu_data.dcache;
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index 8f669dc9b0da..6c96ea02bf8d 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -115,16 +115,13 @@ static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups,
mask_registers, prio_registers, NULL);
static struct sh_timer_config mtu2_0_platform_data = {
- .name = "MTU2_0",
.channel_offset = -0x80,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
- .name = "MTU2_0",
.start = 0xff801300,
.end = 0xff801326,
.flags = IORESOURCE_MEM,
@@ -146,16 +143,13 @@ static struct platform_device mtu2_0_device = {
};
static struct sh_timer_config mtu2_1_platform_data = {
- .name = "MTU2_1",
.channel_offset = -0x100,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
- .name = "MTU2_1",
.start = 0xff801380,
.end = 0xff801390,
.flags = IORESOURCE_MEM,
@@ -177,16 +171,13 @@ static struct platform_device mtu2_1_device = {
};
static struct sh_timer_config mtu2_2_platform_data = {
- .name = "MTU2_2",
.channel_offset = 0x80,
.timer_bit = 2,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_2_resources[] = {
[0] = {
- .name = "MTU2_2",
.start = 0xff801000,
.end = 0xff80100a,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index 4ccfeb59eb1a..d08bf4c07d60 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -318,16 +318,13 @@ static struct platform_device rtc_device = {
};
static struct sh_timer_config mtu2_0_platform_data = {
- .name = "MTU2_0",
.channel_offset = -0x80,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
- .name = "MTU2_0",
.start = 0xfffe4300,
.end = 0xfffe4326,
.flags = IORESOURCE_MEM,
@@ -349,16 +346,13 @@ static struct platform_device mtu2_0_device = {
};
static struct sh_timer_config mtu2_1_platform_data = {
- .name = "MTU2_1",
.channel_offset = -0x100,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
- .name = "MTU2_1",
.start = 0xfffe4380,
.end = 0xfffe4390,
.flags = IORESOURCE_MEM,
@@ -380,16 +374,13 @@ static struct platform_device mtu2_1_device = {
};
static struct sh_timer_config mtu2_2_platform_data = {
- .name = "MTU2_2",
.channel_offset = 0x80,
.timer_bit = 2,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_2_resources[] = {
[0] = {
- .name = "MTU2_2",
.start = 0xfffe4000,
.end = 0xfffe400a,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index 3136966cc9b3..832f401b5860 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -234,17 +234,14 @@ static struct platform_device scif3_device = {
};
static struct sh_timer_config cmt0_platform_data = {
- .name = "CMT0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt0_resources[] = {
[0] = {
- .name = "CMT0",
.start = 0xfffec002,
.end = 0xfffec007,
.flags = IORESOURCE_MEM,
@@ -266,17 +263,14 @@ static struct platform_device cmt0_device = {
};
static struct sh_timer_config cmt1_platform_data = {
- .name = "CMT1",
.channel_offset = 0x08,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt1_resources[] = {
[0] = {
- .name = "CMT1",
.start = 0xfffec008,
.end = 0xfffec00d,
.flags = IORESOURCE_MEM,
@@ -298,16 +292,13 @@ static struct platform_device cmt1_device = {
};
static struct sh_timer_config mtu2_0_platform_data = {
- .name = "MTU2_0",
.channel_offset = -0x80,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
- .name = "MTU2_0",
.start = 0xfffe4300,
.end = 0xfffe4326,
.flags = IORESOURCE_MEM,
@@ -329,16 +320,13 @@ static struct platform_device mtu2_0_device = {
};
static struct sh_timer_config mtu2_1_platform_data = {
- .name = "MTU2_1",
.channel_offset = -0x100,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
- .name = "MTU2_1",
.start = 0xfffe4380,
.end = 0xfffe4390,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index 064873585a8b..dc47b04e1049 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -194,17 +194,14 @@ static struct platform_device scif3_device = {
};
static struct sh_timer_config cmt0_platform_data = {
- .name = "CMT0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt0_resources[] = {
[0] = {
- .name = "CMT0",
.start = 0xfffec002,
.end = 0xfffec007,
.flags = IORESOURCE_MEM,
@@ -226,17 +223,14 @@ static struct platform_device cmt0_device = {
};
static struct sh_timer_config cmt1_platform_data = {
- .name = "CMT1",
.channel_offset = 0x08,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt1_resources[] = {
[0] = {
- .name = "CMT1",
.start = 0xfffec008,
.end = 0xfffec00d,
.flags = IORESOURCE_MEM,
@@ -258,16 +252,13 @@ static struct platform_device cmt1_device = {
};
static struct sh_timer_config mtu2_0_platform_data = {
- .name = "MTU2_0",
.channel_offset = -0x80,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
- .name = "MTU2_0",
.start = 0xfffe4300,
.end = 0xfffe4326,
.flags = IORESOURCE_MEM,
@@ -289,16 +280,13 @@ static struct platform_device mtu2_0_device = {
};
static struct sh_timer_config mtu2_1_platform_data = {
- .name = "MTU2_1",
.channel_offset = -0x100,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
- .name = "MTU2_1",
.start = 0xfffe4380,
.end = 0xfffe4390,
.flags = IORESOURCE_MEM,
@@ -320,16 +308,13 @@ static struct platform_device mtu2_1_device = {
};
static struct sh_timer_config mtu2_2_platform_data = {
- .name = "MTU2_2",
.channel_offset = 0x80,
.timer_bit = 2,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_2_resources[] = {
[0] = {
- .name = "MTU2_2",
.start = 0xfffe4000,
.end = 0xfffe400a,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index 295ec4c99e98..bf23c322e164 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
#include <asm/cache.h>
#include <asm/io.h>
-int detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
@@ -108,6 +108,4 @@ int detect_cpu_and_cache_system(void)
boot_cpu_data.icache = boot_cpu_data.dcache;
boot_cpu_data.family = CPU_FAMILY_SH3;
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index 7b892d60e3a0..baadd7f54d94 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -124,16 +124,13 @@ static struct platform_device rtc_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xfffffe94,
.end = 0xfffffe9f,
.flags = IORESOURCE_MEM,
@@ -155,16 +152,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0xe,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xfffffea0,
.end = 0xfffffeab,
.flags = IORESOURCE_MEM,
@@ -186,15 +180,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1a,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xfffffeac,
.end = 0xfffffebb,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index bc0c4f68c7c7..3cf8c8ef7b32 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -157,16 +157,13 @@ static struct platform_device scif2_device = {
#endif
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xfffffe94,
.end = 0xfffffe9f,
.flags = IORESOURCE_MEM,
@@ -188,16 +185,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0xe,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xfffffea0,
.end = 0xfffffeab,
.flags = IORESOURCE_MEM,
@@ -219,15 +213,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1a,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xfffffeac,
.end = 0xfffffebb,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 0845a3ad006d..b0c2fb4ab479 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -127,16 +127,13 @@ static struct platform_device scif1_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xa412fe94,
.end = 0xa412fe9f,
.flags = IORESOURCE_MEM,
@@ -158,16 +155,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0xe,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xa412fea0,
.end = 0xa412feab,
.flags = IORESOURCE_MEM,
@@ -189,15 +183,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1a,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xa412feac,
.end = 0xa412feb5,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index a718a6231091..24b17135d5d2 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -130,17 +130,14 @@ static struct platform_device usbf_device = {
};
static struct sh_timer_config cmt0_platform_data = {
- .name = "CMT0",
.channel_offset = 0x10,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
static struct resource cmt0_resources[] = {
[0] = {
- .name = "CMT0",
.start = 0x044a0010,
.end = 0x044a001b,
.flags = IORESOURCE_MEM,
@@ -162,15 +159,12 @@ static struct platform_device cmt0_device = {
};
static struct sh_timer_config cmt1_platform_data = {
- .name = "CMT1",
.channel_offset = 0x20,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource cmt1_resources[] = {
[0] = {
- .name = "CMT1",
.start = 0x044a0020,
.end = 0x044a002b,
.flags = IORESOURCE_MEM,
@@ -192,15 +186,12 @@ static struct platform_device cmt1_device = {
};
static struct sh_timer_config cmt2_platform_data = {
- .name = "CMT2",
.channel_offset = 0x30,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource cmt2_resources[] = {
[0] = {
- .name = "CMT2",
.start = 0x044a0030,
.end = 0x044a003b,
.flags = IORESOURCE_MEM,
@@ -222,15 +213,12 @@ static struct platform_device cmt2_device = {
};
static struct sh_timer_config cmt3_platform_data = {
- .name = "CMT3",
.channel_offset = 0x40,
.timer_bit = 3,
- .clk = "peripheral_clk",
};
static struct resource cmt3_resources[] = {
[0] = {
- .name = "CMT3",
.start = 0x044a0040,
.end = 0x044a004b,
.flags = IORESOURCE_MEM,
@@ -252,15 +240,12 @@ static struct platform_device cmt3_device = {
};
static struct sh_timer_config cmt4_platform_data = {
- .name = "CMT4",
.channel_offset = 0x50,
.timer_bit = 4,
- .clk = "peripheral_clk",
};
static struct resource cmt4_resources[] = {
[0] = {
- .name = "CMT4",
.start = 0x044a0050,
.end = 0x044a005b,
.flags = IORESOURCE_MEM,
@@ -282,16 +267,13 @@ static struct platform_device cmt4_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xa412fe94,
.end = 0xa412fe9f,
.flags = IORESOURCE_MEM,
@@ -313,16 +295,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0xe,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xa412fea0,
.end = 0xa412feab,
.flags = IORESOURCE_MEM,
@@ -344,15 +323,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1a,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xa412feac,
.end = 0xa412feb5,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 822977a06d84..d180f16281ed 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -15,7 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
unsigned long pvr, prr, cvr;
unsigned long size;
@@ -251,6 +251,4 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.scache.linesz);
}
}
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index b9b7e10ad68f..e916b18e1f7c 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -31,16 +31,13 @@ static struct platform_device scif0_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -62,16 +59,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -93,15 +87,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index ffd79e57254f..911d196e86b5 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -66,16 +66,13 @@ static struct platform_device scif1_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -97,16 +94,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -128,15 +122,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -163,15 +154,12 @@ static struct platform_device tmu2_device = {
defined(CONFIG_CPU_SUBTYPE_SH7751R)
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xfe100008,
.end = 0xfe100013,
.flags = IORESOURCE_MEM,
@@ -193,15 +181,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xfe100014,
.end = 0xfe10001f,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index a16eb3656f4b..48ea8fe85dc5 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -187,16 +187,13 @@ static struct platform_device scif3_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -218,16 +215,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -249,15 +243,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index 2c16df37eda6..a066c438b404 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
/* SH7343 registers */
@@ -135,8 +136,10 @@ struct clk div4_clks[DIV4_NR] = {
[DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
#define MSTP(_str, _parent, _reg, _bit, _flags) \
@@ -154,15 +157,15 @@ static struct clk mstp_clks[] = {
MSTP("sh0", &div4_clks[DIV4_P], MSTPCR0, 20, 0),
MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0),
MSTP("ubc0", &div4_clks[DIV4_P], MSTPCR0, 17, 0),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0),
+ MSTP("tmu_fck", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ MSTP("cmt_fck", &r_clk, MSTPCR0, 14, 0),
MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0),
MSTP("mfi0", &div4_clks[DIV4_P], MSTPCR0, 11, 0),
MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0),
- MSTP("scif3", &div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ SH_CLK_MSTP32("sci_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ SH_CLK_MSTP32("sci_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ SH_CLK_MSTP32("sci_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ SH_CLK_MSTP32("sci_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 4, 0),
MSTP("sio0", &div4_clks[DIV4_P], MSTPCR0, 3, 0),
MSTP("siof0", &div4_clks[DIV4_P], MSTPCR0, 2, 0),
MSTP("siof1", &div4_clks[DIV4_P], MSTPCR0, 1, 0),
@@ -189,6 +192,13 @@ static struct clk mstp_clks[] = {
MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+};
+
int __init arch_clk_init(void)
{
int k, ret = 0;
@@ -202,11 +212,13 @@ int __init arch_clk_init(void)
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
index 91588d280cd8..44cc5a0965d9 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
/* SH7366 registers */
@@ -138,8 +139,10 @@ struct clk div4_clks[DIV4_NR] = {
[DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
#define MSTP(_str, _parent, _reg, _bit, _flags) \
@@ -158,14 +161,14 @@ static struct clk mstp_clks[] = {
MSTP("sh0", &div4_clks[DIV4_P], MSTPCR0, 20, 0),
MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0),
MSTP("ubc0", &div4_clks[DIV4_P], MSTPCR0, 17, 0),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0),
+ MSTP("tmu_fck", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ MSTP("cmt_fck", &r_clk, MSTPCR0, 14, 0),
MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0),
MSTP("mfi0", &div4_clks[DIV4_P], MSTPCR0, 11, 0),
MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ SH_CLK_MSTP32("sci_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ SH_CLK_MSTP32("sci_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ SH_CLK_MSTP32("sci_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 5, 0),
MSTP("msiof0", &div4_clks[DIV4_P], MSTPCR0, 2, 0),
MSTP("sbr0", &div4_clks[DIV4_P], MSTPCR0, 1, 0),
@@ -189,6 +192,13 @@ static struct clk mstp_clks[] = {
MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+};
+
int __init arch_clk_init(void)
{
int k, ret = 0;
@@ -202,11 +212,13 @@ int __init arch_clk_init(void)
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 15db6d521c5c..2798ceaa648f 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/hwblk.h>
#include <cpu/sh7722.h>
@@ -148,41 +149,98 @@ struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
[DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+static struct clk mstp_clks[HWBLK_NR] = {
+ SH_HWBLK_CLK(HWBLK_URAM, &div4_clks[DIV4_U], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_XYMEM, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_TMU, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_FLCTL, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
+
+ SH_HWBLK_CLK(HWBLK_IIC, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
+
+ SH_HWBLK_CLK(HWBLK_SDHI, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_USBF, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SIU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_JPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_BEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_CEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_P], 0),
};
-#define R_CLK &r_clk
-#define P_CLK &div4_clks[DIV4_P]
-#define B_CLK &div4_clks[DIV4_B]
-#define U_CLK &div4_clks[DIV4_U]
-
-static struct clk mstp_clks[] = {
- SH_HWBLK_CLK("uram0", -1, U_CLK, HWBLK_URAM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("xymem0", -1, B_CLK, HWBLK_XYMEM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU, 0),
- SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
- SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
- SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
- SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
- SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
- SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
-
- SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
- SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
-
- SH_HWBLK_CLK("sdhi0", -1, P_CLK, HWBLK_SDHI, 0),
- SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
- SH_HWBLK_CLK("usbf0", -1, P_CLK, HWBLK_USBF, 0),
- SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
- SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
- SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
- SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, 0),
- SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
- SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
- SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, 0),
- SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
- SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0),
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU],
+ },
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
+ {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF0],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF1],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF2],
+ },
+ CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[HWBLK_USBF]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_CON_ID("siu0", &mstp_clks[HWBLK_SIU]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
@@ -198,6 +256,8 @@ int __init arch_clk_init(void)
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
@@ -210,10 +270,10 @@ int __init arch_clk_init(void)
DIV4_REPARENT_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index 50babe01fe44..500715f78142 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -21,6 +21,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/hwblk.h>
#include <cpu/sh7723.h>
@@ -147,68 +149,174 @@ struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
[DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0),
[DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
-};
+enum { DIV6_V, DIV6_NR };
-#define R_CLK (&r_clk)
-#define P_CLK (&div4_clks[DIV4_P])
-#define B_CLK (&div4_clks[DIV4_B])
-#define U_CLK (&div4_clks[DIV4_U])
-#define I_CLK (&div4_clks[DIV4_I])
-#define SH_CLK (&div4_clks[DIV4_SH])
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
static struct clk mstp_clks[] = {
/* See page 60 of Datasheet V1.0: Overview -> Block Diagram */
- SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("intc0", -1, I_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0),
- SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0),
- SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0),
- SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0),
- SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
- SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
- SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0),
- SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0),
- SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
- SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
- SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
- SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
- SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0),
- SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0),
- SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0),
- SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0),
- SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0),
- SH_HWBLK_CLK("meram0", -1, SH_CLK, HWBLK_MERAM, 0),
-
- SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
- SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
-
- SH_HWBLK_CLK("atapi0", -1, SH_CLK, HWBLK_ATAPI, 0),
- SH_HWBLK_CLK("adc0", -1, P_CLK, HWBLK_ADC, 0),
- SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0),
- SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0),
- SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0),
- SH_HWBLK_CLK("icb0", -1, B_CLK, HWBLK_ICB, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0),
- SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0),
- SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
- SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB, 0),
- SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
- SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
- SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU2H1, 0),
- SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
- SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
- SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
- SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU2H0, 0),
- SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
- SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0),
+ SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_OC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_L2C, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_ILMEM, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_FPU, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_INTC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_DMAC0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SHYWAY, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_HUDI, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_UBC, &div4_clks[DIV4_I], 0),
+ SH_HWBLK_CLK(HWBLK_TMU0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_DMAC1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_TMU1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_FLCTL, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF3, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF4, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF5, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MSIOF0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MSIOF1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MERAM, &div4_clks[DIV4_SH], 0),
+
+ SH_HWBLK_CLK(HWBLK_IIC, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
+
+ SH_HWBLK_CLK(HWBLK_ATAPI, &div4_clks[DIV4_SH], 0),
+ SH_HWBLK_CLK(HWBLK_ADC, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_TPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_IRDA, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_TSIF, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_ICB, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_SDHI0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SDHI1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_USB, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SIU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU2H1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_BEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_CEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU2H0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0),
+};
+
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
+ CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
+ CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
+ CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[HWBLK_DMAC0]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ },
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]),
+ {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ }, {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ },
+ CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
+ {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF0],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF1],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF2],
+ }, {
+ /* SCIF3 */
+ .dev_id = "sh-sci.3",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF3],
+ }, {
+ /* SCIF4 */
+ .dev_id = "sh-sci.4",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF4],
+ }, {
+ /* SCIF5 */
+ .dev_id = "sh-sci.5",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF5],
+ },
+ CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]),
+ CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]),
+ CLKDEV_CON_ID("meram0", &mstp_clks[HWBLK_MERAM]),
+ CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
+ CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
+ CLKDEV_CON_ID("icb0", &mstp_clks[HWBLK_ICB]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI0]),
+ CLKDEV_CON_ID("sdhi1", &mstp_clks[HWBLK_SDHI1]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_CON_ID("siu0", &mstp_clks[HWBLK_SIU]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
@@ -222,7 +330,9 @@ int __init arch_clk_init(void)
pll_clk.parent = &extal_clk;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
- ret = clk_register(main_clks[k]);
+ ret |= clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
@@ -236,10 +346,10 @@ int __init arch_clk_init(void)
DIV4_REPARENT_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 6707061fbf54..2bbff53fcd87 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -21,6 +21,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/hwblk.h>
#include <cpu/sh7724.h>
@@ -162,75 +164,190 @@ struct clk div4_clks[DIV4_NR] = {
[DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &div3_clk, VCLKCR, 0),
- SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0),
- SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0),
- SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0),
- SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
+enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
+ [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
+ [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
+ [DIV6_I] = SH_CLK_DIV6(&div3_clk, IRDACLKCR, 0),
+ [DIV6_S] = SH_CLK_DIV6(&div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
+};
+
+static struct clk mstp_clks[HWBLK_NR] = {
+ SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_OC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_RSMEM, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_ILMEM, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_L2C, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_FPU, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_INTC, &div4_clks[DIV4_P], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_DMAC0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SHYWAY, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_HUDI, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_UBC, &div4_clks[DIV4_I], 0),
+ SH_HWBLK_CLK(HWBLK_TMU0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_DMAC1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_TMU1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF3, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF4, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF5, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MSIOF0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MSIOF1, &div4_clks[DIV4_B], 0),
+
+ SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_IIC0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_IIC1, &div4_clks[DIV4_P], 0),
+
+ SH_HWBLK_CLK(HWBLK_MMC, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_ETHER, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_ATAPI, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_TPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_IRDA, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_TSIF, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_USB1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_USB0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SDHI0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SDHI1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_CEU1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_BEU1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_2DDMAC, &div4_clks[DIV4_SH], 0),
+ SH_HWBLK_CLK(HWBLK_SPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_JPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_BEU0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_CEU0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0),
};
-#define R_CLK (&r_clk)
-#define P_CLK (&div4_clks[DIV4_P])
-#define B_CLK (&div4_clks[DIV4_B])
-#define I_CLK (&div4_clks[DIV4_I])
-#define SH_CLK (&div4_clks[DIV4_SH])
-
-static struct clk mstp_clks[] = {
- SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("rs0", -1, B_CLK, HWBLK_RSMEM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("intc0", -1, P_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0),
- SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0),
- SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0),
- SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0),
- SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
- SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
- SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0),
- SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0),
- SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
- SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
- SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
- SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0),
- SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0),
- SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0),
- SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0),
- SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0),
-
- SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
- SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
- SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC0, 0),
- SH_HWBLK_CLK("i2c1", -1, P_CLK, HWBLK_IIC1, 0),
-
- SH_HWBLK_CLK("mmc0", -1, B_CLK, HWBLK_MMC, 0),
- SH_HWBLK_CLK("eth0", -1, B_CLK, HWBLK_ETHER, 0),
- SH_HWBLK_CLK("atapi0", -1, B_CLK, HWBLK_ATAPI, 0),
- SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0),
- SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0),
- SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0),
- SH_HWBLK_CLK("usb1", -1, B_CLK, HWBLK_USB1, 0),
- SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB0, 0),
- SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
- SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0),
- SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0),
- SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, 0),
- SH_HWBLK_CLK("ceu1", -1, B_CLK, HWBLK_CEU1, 0),
- SH_HWBLK_CLK("beu1", -1, B_CLK, HWBLK_BEU1, 0),
- SH_HWBLK_CLK("2ddmac0", -1, SH_CLK, HWBLK_2DDMAC, 0),
- SH_HWBLK_CLK("spu0", -1, B_CLK, HWBLK_SPU, 0),
- SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, 0),
- SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
- SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU0, 0),
- SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU0, 0),
- SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, 0),
- SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
- SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0),
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+ CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]),
+ CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]),
+ CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]),
+ CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
+ CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]),
+ CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
+ CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
+ CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[HWBLK_DMAC0]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ },
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]),
+ {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ }, {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF0],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF1],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF2],
+ }, {
+ /* SCIF3 */
+ .dev_id = "sh-sci.3",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF3],
+ }, {
+ /* SCIF4 */
+ .dev_id = "sh-sci.4",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF4],
+ }, {
+ /* SCIF5 */
+ .dev_id = "sh-sci.5",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF5],
+ },
+ CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]),
+ CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC0]),
+ CLKDEV_CON_ID("i2c1", &mstp_clks[HWBLK_IIC1]),
+ CLKDEV_CON_ID("mmc0", &mstp_clks[HWBLK_MMC]),
+ CLKDEV_CON_ID("eth0", &mstp_clks[HWBLK_ETHER]),
+ CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
+ CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI0]),
+ CLKDEV_CON_ID("sdhi1", &mstp_clks[HWBLK_SDHI1]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]),
+ CLKDEV_CON_ID("ceu1", &mstp_clks[HWBLK_CEU1]),
+ CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]),
+ CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]),
+ CLKDEV_CON_ID("spu0", &mstp_clks[HWBLK_SPU]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU0]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
@@ -246,14 +363,16 @@ int __init arch_clk_init(void)
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index d997f0a25b10..28de049a59b1 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -3,7 +3,7 @@
*
* SH7785 support for the clock framework
*
- * Copyright (C) 2007 - 2009 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/cpufreq.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <cpu/sh7785.h>
@@ -88,12 +89,12 @@ struct clk div4_clks[DIV4_NR] = {
static struct clk mstp_clks[] = {
/* MSTPCR0 */
- SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0),
- SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0),
- SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0),
- SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0),
- SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0),
- SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ SH_CLK_MSTP32("sci_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ SH_CLK_MSTP32("sci_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ SH_CLK_MSTP32("sci_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ SH_CLK_MSTP32("sci_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ SH_CLK_MSTP32("sci_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ SH_CLK_MSTP32("sci_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0),
SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0),
SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0),
SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0),
@@ -113,12 +114,48 @@ static struct clk mstp_clks[] = {
SH_CLK_MSTP32("gdta_fck", -1, NULL, MSTPCR1, 0, 0),
};
+static struct clk_lookup lookups[] = {
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[13], /* tmu012_fck */
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[13],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[13],
+ }, {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[12], /* tmu345_fck */
+ }, {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[12],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[12],
+ },
+};
+
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
index af69fd468703..c4a84bb2f3d9 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -13,6 +13,8 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
@@ -87,12 +89,12 @@ struct clk div4_clks[DIV4_NR] = {
static struct clk mstp_clks[] = {
/* MSTPCR0 */
- SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0),
- SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0),
- SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0),
- SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0),
- SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0),
- SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ SH_CLK_MSTP32("sci_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ SH_CLK_MSTP32("sci_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ SH_CLK_MSTP32("sci_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ SH_CLK_MSTP32("sci_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ SH_CLK_MSTP32("sci_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ SH_CLK_MSTP32("sci_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0),
SH_CLK_MSTP32("ssi_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 23, 0),
SH_CLK_MSTP32("ssi_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 22, 0),
SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0),
@@ -120,12 +122,78 @@ static struct clk mstp_clks[] = {
SH_CLK_MSTP32("ether_fck", -1, NULL, MSTPCR1, 2, 0),
};
+static struct clk_lookup lookups[] = {
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[17], /* tmu012_fck */
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[17],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[17],
+ }, {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[16], /* tmu345_fck */
+ }, {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[16],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[16],
+ }, {
+ /* TMU6 */
+ .dev_id = "sh_tmu.6",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[15], /* tmu678_fck */
+ }, {
+ /* TMU7 */
+ .dev_id = "sh_tmu.7",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[15],
+ }, {
+ /* TMU8 */
+ .dev_id = "sh_tmu.8",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[15],
+ }, {
+ /* TMU9 */
+ .dev_id = "sh_tmu.9",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[14], /* tmu9_11_fck */
+ }, {
+ /* TMU10 */
+ .dev_id = "sh_tmu.10",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[14],
+ }, {
+ /* TMU11 */
+ .dev_id = "sh_tmu.11",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[14],
+ }
+};
+
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 45eb1bfd42c9..3681cafdb4af 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -21,7 +21,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -37,7 +36,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
};
static struct platform_device scif1_device = {
@@ -53,7 +51,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
};
static struct platform_device scif2_device = {
@@ -69,7 +66,6 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 83, 83, 83, 83 },
- .clk = "scif3",
};
static struct platform_device scif3_device = {
@@ -207,17 +203,14 @@ static struct platform_device jpu_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 200,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -239,16 +232,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -270,16 +260,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -301,15 +288,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index c494c193e3b6..8dab9e1bbd89 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -23,7 +23,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -169,17 +168,14 @@ static struct platform_device veu1_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 200,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -201,16 +197,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -232,16 +225,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -263,15 +253,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index fd7e3639e845..24c6167a7181 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -24,7 +24,7 @@
#include <cpu/dma-register.h>
#include <cpu/sh7722.h>
-static struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
+static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
{
.slave_id = SHDMA_SLAVE_SCIF0_TX,
.addr = 0xffe0000c,
@@ -78,7 +78,7 @@ static struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
},
};
-static struct sh_dmae_channel sh7722_dmae_channels[] = {
+static const struct sh_dmae_channel sh7722_dmae_channels[] = {
{
.offset = 0,
.dmars = 0,
@@ -106,7 +106,7 @@ static struct sh_dmae_channel sh7722_dmae_channels[] = {
}
};
-static unsigned int ts_shift[] = TS_SHIFT;
+static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma_platform_data = {
.slave = sh7722_dmae_slaves,
@@ -174,7 +174,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -190,7 +189,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
};
static struct platform_device scif1_device = {
@@ -206,7 +204,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
};
static struct platform_device scif2_device = {
@@ -401,17 +398,14 @@ static struct platform_device jpu_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -436,16 +430,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -470,16 +461,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -504,15 +492,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 85c61f624702..0eadefdbbba1 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -26,7 +26,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -42,7 +41,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
};
static struct platform_device scif1_device = {
@@ -58,7 +56,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
};
static struct platform_device scif2_device = {
@@ -74,7 +71,6 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 56, 56, 56, 56 },
- .clk = "scif3",
};
static struct platform_device scif3_device = {
@@ -90,7 +86,6 @@ static struct plat_sci_port scif4_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 88, 88, 88, 88 },
- .clk = "scif4",
};
static struct platform_device scif4_device = {
@@ -106,7 +101,6 @@ static struct plat_sci_port scif5_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 109, 109, 109, 109 },
- .clk = "scif5",
};
static struct platform_device scif5_device = {
@@ -211,17 +205,14 @@ static struct platform_device veu1_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -246,16 +237,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -280,16 +268,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -314,15 +299,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
@@ -347,15 +329,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu1",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffd90008,
.end = 0xffd90013,
.flags = IORESOURCE_MEM,
@@ -380,15 +359,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu1",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffd90014,
.end = 0xffd9001f,
.flags = IORESOURCE_MEM,
@@ -413,15 +389,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu1",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffd90020,
.end = 0xffd9002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index e7fa2a92fc1f..89fe16d20fdb 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -31,7 +31,7 @@
#include <cpu/sh7724.h>
/* DMA */
-static struct sh_dmae_channel sh7724_dmae0_channels[] = {
+static const struct sh_dmae_channel sh7724_dmae_channels[] = {
{
.offset = 0,
.dmars = 0,
@@ -59,51 +59,11 @@ static struct sh_dmae_channel sh7724_dmae0_channels[] = {
}
};
-static struct sh_dmae_channel sh7724_dmae1_channels[] = {
- {
- .offset = 0,
- .dmars = 0,
- .dmars_bit = 0,
- }, {
- .offset = 0x10,
- .dmars = 0,
- .dmars_bit = 8,
- }, {
- .offset = 0x20,
- .dmars = 4,
- .dmars_bit = 0,
- }, {
- .offset = 0x30,
- .dmars = 4,
- .dmars_bit = 8,
- }, {
- .offset = 0x50,
- .dmars = 8,
- .dmars_bit = 0,
- }, {
- .offset = 0x60,
- .dmars = 8,
- .dmars_bit = 8,
- }
-};
-
-static unsigned int ts_shift[] = TS_SHIFT;
-
-static struct sh_dmae_pdata dma0_platform_data = {
- .channel = sh7724_dmae0_channels,
- .channel_num = ARRAY_SIZE(sh7724_dmae0_channels),
- .ts_low_shift = CHCR_TS_LOW_SHIFT,
- .ts_low_mask = CHCR_TS_LOW_MASK,
- .ts_high_shift = CHCR_TS_HIGH_SHIFT,
- .ts_high_mask = CHCR_TS_HIGH_MASK,
- .ts_shift = ts_shift,
- .ts_shift_num = ARRAY_SIZE(ts_shift),
- .dmaor_init = DMAOR_INIT,
-};
+static const unsigned int ts_shift[] = TS_SHIFT;
-static struct sh_dmae_pdata dma1_platform_data = {
- .channel = sh7724_dmae1_channels,
- .channel_num = ARRAY_SIZE(sh7724_dmae1_channels),
+static struct sh_dmae_pdata dma_platform_data = {
+ .channel = sh7724_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7724_dmae_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
@@ -187,7 +147,7 @@ static struct platform_device dma0_device = {
.resource = sh7724_dmae0_resources,
.num_resources = ARRAY_SIZE(sh7724_dmae0_resources),
.dev = {
- .platform_data = &dma0_platform_data,
+ .platform_data = &dma_platform_data,
},
.archdata = {
.hwblk_id = HWBLK_DMAC0,
@@ -200,7 +160,7 @@ static struct platform_device dma1_device = {
.resource = sh7724_dmae1_resources,
.num_resources = ARRAY_SIZE(sh7724_dmae1_resources),
.dev = {
- .platform_data = &dma1_platform_data,
+ .platform_data = &dma_platform_data,
},
.archdata = {
.hwblk_id = HWBLK_DMAC1,
@@ -213,7 +173,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -229,7 +188,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
};
static struct platform_device scif1_device = {
@@ -245,7 +203,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
};
static struct platform_device scif2_device = {
@@ -261,7 +218,6 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 56, 56, 56, 56 },
- .clk = "scif3",
};
static struct platform_device scif3_device = {
@@ -277,7 +233,6 @@ static struct plat_sci_port scif4_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 88, 88, 88, 88 },
- .clk = "scif4",
};
static struct platform_device scif4_device = {
@@ -293,7 +248,6 @@ static struct plat_sci_port scif5_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 109, 109, 109, 109 },
- .clk = "scif5",
};
static struct platform_device scif5_device = {
@@ -485,17 +439,14 @@ static struct platform_device veu1_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 200,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -520,16 +471,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -554,16 +502,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -588,15 +533,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
@@ -622,15 +564,12 @@ static struct platform_device tmu2_device = {
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu1",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffd90008,
.end = 0xffd90013,
.flags = IORESOURCE_MEM,
@@ -655,15 +594,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu1",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffd90014,
.end = 0xffd9001f,
.flags = IORESOURCE_MEM,
@@ -688,15 +624,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu1",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffd90020,
.end = 0xffd9002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index e75edf58796a..444aca95b20d 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -63,16 +63,13 @@ static struct platform_device scif4_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xfe430008,
.end = 0xfe430013,
.flags = IORESOURCE_MEM,
@@ -94,16 +91,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xfe430014,
.end = 0xfe43001f,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 7f6b0a5f7f82..5b5f6b005fc5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -131,16 +131,13 @@ static struct platform_device usbf_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -162,16 +159,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -193,15 +187,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -223,15 +214,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffd88008,
.end = 0xffd88013,
.flags = IORESOURCE_MEM,
@@ -253,15 +241,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffd88014,
.end = 0xffd8801f,
.flags = IORESOURCE_MEM,
@@ -283,15 +268,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffd88020,
.end = 0xffd8802b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index 86d681ecf90e..7270d7fd6761 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -165,16 +165,13 @@ static struct platform_device scif9_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -196,16 +193,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -227,15 +221,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -257,15 +248,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffd81008,
.end = 0xffd81013,
.flags = IORESOURCE_MEM,
@@ -287,15 +275,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffd81014,
.end = 0xffd8101f,
.flags = IORESOURCE_MEM,
@@ -317,15 +302,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffd81020,
.end = 0xffd8102f,
.flags = IORESOURCE_MEM,
@@ -347,15 +329,12 @@ static struct platform_device tmu5_device = {
};
static struct sh_timer_config tmu6_platform_data = {
- .name = "TMU6",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu6_resources[] = {
[0] = {
- .name = "TMU6",
.start = 0xffd82008,
.end = 0xffd82013,
.flags = IORESOURCE_MEM,
@@ -377,15 +356,12 @@ static struct platform_device tmu6_device = {
};
static struct sh_timer_config tmu7_platform_data = {
- .name = "TMU7",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu7_resources[] = {
[0] = {
- .name = "TMU7",
.start = 0xffd82014,
.end = 0xffd8201f,
.flags = IORESOURCE_MEM,
@@ -407,15 +383,12 @@ static struct platform_device tmu7_device = {
};
static struct sh_timer_config tmu8_platform_data = {
- .name = "TMU8",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu8_resources[] = {
[0] = {
- .name = "TMU8",
.start = 0xffd82020,
.end = 0xffd8202b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 02e792c90de6..b12f537e4dde 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -49,16 +49,13 @@ static struct platform_device scif1_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -80,16 +77,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -111,15 +105,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -141,15 +132,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffdc0008,
.end = 0xffdc0013,
.flags = IORESOURCE_MEM,
@@ -171,15 +159,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffdc0014,
.end = 0xffdc001f,
.flags = IORESOURCE_MEM,
@@ -201,15 +186,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffdc0020,
.end = 0xffdc002b,
.flags = IORESOURCE_MEM,
@@ -251,7 +233,7 @@ static struct platform_device rtc_device = {
};
/* DMA */
-static struct sh_dmae_channel sh7780_dmae0_channels[] = {
+static const struct sh_dmae_channel sh7780_dmae0_channels[] = {
{
.offset = 0,
.dmars = 0,
@@ -279,7 +261,7 @@ static struct sh_dmae_channel sh7780_dmae0_channels[] = {
}
};
-static struct sh_dmae_channel sh7780_dmae1_channels[] = {
+static const struct sh_dmae_channel sh7780_dmae1_channels[] = {
{
.offset = 0,
}, {
@@ -295,7 +277,7 @@ static struct sh_dmae_channel sh7780_dmae1_channels[] = {
}
};
-static unsigned int ts_shift[] = TS_SHIFT;
+static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = sh7780_dmae0_channels,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 1fcd88b1671e..f3e3ea0ce050 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -25,7 +25,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 40, 40, 40, 40 },
- .clk = "scif_fck",
};
static struct platform_device scif0_device = {
@@ -41,7 +40,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 44, 44, 44, 44 },
- .clk = "scif_fck",
};
static struct platform_device scif1_device = {
@@ -57,7 +55,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 60, 60, 60, 60 },
- .clk = "scif_fck",
};
static struct platform_device scif2_device = {
@@ -73,7 +70,6 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 61, 61, 61, 61 },
- .clk = "scif_fck",
};
static struct platform_device scif3_device = {
@@ -89,7 +85,6 @@ static struct plat_sci_port scif4_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 62, 62, 62, 62 },
- .clk = "scif_fck",
};
static struct platform_device scif4_device = {
@@ -105,7 +100,6 @@ static struct plat_sci_port scif5_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 63, 63, 63, 63 },
- .clk = "scif_fck",
};
static struct platform_device scif5_device = {
@@ -117,16 +111,13 @@ static struct platform_device scif5_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu012_fck",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -148,16 +139,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu012_fck",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -179,15 +167,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu012_fck",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -209,15 +194,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu345_fck",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffdc0008,
.end = 0xffdc0013,
.flags = IORESOURCE_MEM,
@@ -239,15 +221,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu345_fck",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffdc0014,
.end = 0xffdc001f,
.flags = IORESOURCE_MEM,
@@ -269,15 +248,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu345_fck",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffdc0020,
.end = 0xffdc002b,
.flags = IORESOURCE_MEM,
@@ -299,7 +275,7 @@ static struct platform_device tmu5_device = {
};
/* DMA */
-static struct sh_dmae_channel sh7785_dmae0_channels[] = {
+static const struct sh_dmae_channel sh7785_dmae0_channels[] = {
{
.offset = 0,
.dmars = 0,
@@ -327,7 +303,7 @@ static struct sh_dmae_channel sh7785_dmae0_channels[] = {
}
};
-static struct sh_dmae_channel sh7785_dmae1_channels[] = {
+static const struct sh_dmae_channel sh7785_dmae1_channels[] = {
{
.offset = 0,
}, {
@@ -343,7 +319,7 @@ static struct sh_dmae_channel sh7785_dmae1_channels[] = {
}
};
-static unsigned int ts_shift[] = TS_SHIFT;
+static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = sh7785_dmae0_channels,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 7e585320710a..81657091da46 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -1,7 +1,7 @@
/*
* SH7786 Setup
*
- * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright (C) 2009 - 2010 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
* Paul Mundt <paul.mundt@renesas.com>
*
@@ -21,7 +21,10 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <cpu/dma-register.h>
#include <asm/mmzone.h>
+#include <asm/dmaengine.h>
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xffea0000,
@@ -117,16 +120,13 @@ static struct platform_device scif5_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -148,16 +148,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -179,15 +176,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -209,15 +203,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffda0008,
.end = 0xffda0013,
.flags = IORESOURCE_MEM,
@@ -239,15 +230,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffda0014,
.end = 0xffda001f,
.flags = IORESOURCE_MEM,
@@ -269,15 +257,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffda0020,
.end = 0xffda002b,
.flags = IORESOURCE_MEM,
@@ -299,15 +284,12 @@ static struct platform_device tmu5_device = {
};
static struct sh_timer_config tmu6_platform_data = {
- .name = "TMU6",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu6_resources[] = {
[0] = {
- .name = "TMU6",
.start = 0xffdc0008,
.end = 0xffdc0013,
.flags = IORESOURCE_MEM,
@@ -329,15 +311,12 @@ static struct platform_device tmu6_device = {
};
static struct sh_timer_config tmu7_platform_data = {
- .name = "TMU7",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu7_resources[] = {
[0] = {
- .name = "TMU7",
.start = 0xffdc0014,
.end = 0xffdc001f,
.flags = IORESOURCE_MEM,
@@ -359,15 +338,12 @@ static struct platform_device tmu7_device = {
};
static struct sh_timer_config tmu8_platform_data = {
- .name = "TMU8",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu8_resources[] = {
[0] = {
- .name = "TMU8",
.start = 0xffdc0020,
.end = 0xffdc002b,
.flags = IORESOURCE_MEM,
@@ -389,15 +365,12 @@ static struct platform_device tmu8_device = {
};
static struct sh_timer_config tmu9_platform_data = {
- .name = "TMU9",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu9_resources[] = {
[0] = {
- .name = "TMU9",
.start = 0xffde0008,
.end = 0xffde0013,
.flags = IORESOURCE_MEM,
@@ -419,15 +392,12 @@ static struct platform_device tmu9_device = {
};
static struct sh_timer_config tmu10_platform_data = {
- .name = "TMU10",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu10_resources[] = {
[0] = {
- .name = "TMU10",
.start = 0xffde0014,
.end = 0xffde001f,
.flags = IORESOURCE_MEM,
@@ -449,15 +419,12 @@ static struct platform_device tmu10_device = {
};
static struct sh_timer_config tmu11_platform_data = {
- .name = "TMU11",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu11_resources[] = {
[0] = {
- .name = "TMU11",
.start = 0xffde0020,
.end = 0xffde002b,
.flags = IORESOURCE_MEM,
@@ -478,6 +445,83 @@ static struct platform_device tmu11_device = {
.num_resources = ARRAY_SIZE(tmu11_resources),
};
+static const struct sh_dmae_channel dmac0_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .channel = dmac0_channels,
+ .channel_num = ARRAY_SIZE(dmac0_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* Resource order important! */
+static struct resource dmac0_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00808f,
+ .flags = IORESOURCE_MEM,
+ }, {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ }, {
+ /* DMA error IRQ */
+ .start = evt2irq(0x5c0),
+ .end = evt2irq(0x5c0),
+ .flags = IORESOURCE_IRQ,
+ }, {
+ /* IRQ for channels 0-5 */
+ .start = evt2irq(0x500),
+ .end = evt2irq(0x5a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = dmac0_resources,
+ .num_resources = ARRAY_SIZE(dmac0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
static struct resource usb_ohci_resources[] = {
[0] = {
.start = 0xffe70400,
@@ -525,10 +569,10 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
};
static struct platform_device *sh7786_devices[] __initdata = {
+ &dma0_device,
&usb_ohci_device,
};
-
/*
* Please call this function if your platform board
* use external clock for USB
@@ -536,6 +580,7 @@ static struct platform_device *sh7786_devices[] __initdata = {
#define USBCTL0 0xffe70858
#define CLOCK_MODE_MASK 0xffffff7f
#define EXT_CLOCK_MODE 0x00000080
+
void __init sh7786_usb_use_exclock(void)
{
u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK;
@@ -553,6 +598,7 @@ void __init sh7786_usb_use_exclock(void)
#define PLL_ENB 0x00000002
#define PHY_RST 0x00000004
#define ACT_PLL_STATUS 0xc0000000
+
static void __init sh7786_usb_setup(void)
{
int i = 1000000;
@@ -708,9 +754,19 @@ static struct intc_vect vectors[] __initdata = {
#define INTMSK2 0xfe410068
#define INTMSKCLR2 0xfe41006c
+#define INTDISTCR0 0xfe4100b0
+#define INTDISTCR1 0xfe4100b4
+#define INTACK 0xfe4100b8
+#define INTACKCLR 0xfe4100bc
+#define INT2DISTCR0 0xfe410900
+#define INT2DISTCR1 0xfe410904
+#define INT2DISTCR2 0xfe410908
+#define INT2DISTCR3 0xfe41090c
+
static struct intc_mask_reg mask_registers[] __initdata = {
{ CnINTMSK0, CnINTMSKCLR0, 32,
- { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 },
+ INTC_SMP_BALANCING(INTDISTCR0) },
{ INTMSK2, INTMSKCLR2, 32,
{ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
@@ -722,7 +778,8 @@ static struct intc_mask_reg mask_registers[] __initdata = {
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
{ CnINT2MSKR0, CnINT2MSKCR0 , 32,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT } },
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT },
+ INTC_SMP_BALANCING(INT2DISTCR0) },
{ CnINT2MSKR1, CnINT2MSKCR1, 32,
{ TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0,
DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
@@ -731,14 +788,14 @@ static struct intc_mask_reg mask_registers[] __initdata = {
HPB_0, HPB_1, HPB_2,
SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
SCIF1,
- TMU2, TMU3, 0, } },
+ TMU2, TMU3, 0, }, INTC_SMP_BALANCING(INT2DISTCR1) },
{ CnINT2MSKR2, CnINT2MSKCR2, 32,
{ 0, 0, SCIF2, SCIF3, SCIF4, SCIF5,
Eth_0, Eth_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PCIeC0_0, PCIeC0_1, PCIeC0_2,
PCIeC1_0, PCIeC1_1, PCIeC1_2,
- USB, 0, 0 } },
+ USB, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR2) },
{ CnINT2MSKR3, CnINT2MSKCR3, 32,
{ 0, 0, 0, 0, 0, 0,
I2C0, I2C1,
@@ -747,7 +804,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
HAC0, HAC1,
FLCTL, 0,
HSPI, GPIO0, GPIO1, Thermal,
- 0, 0, 0, 0, 0, 0, 0, 0 } },
+ 0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) },
};
static struct intc_prio_reg prio_registers[] __initdata = {
@@ -863,6 +920,19 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
#define INTC_INTMSK2 INTMSK2
#define INTC_INTMSKCLR1 CnINTMSKCLR1
#define INTC_INTMSKCLR2 INTMSKCLR2
+#define INTC_USERIMASK 0xfe411000
+
+#ifdef CONFIG_INTC_BALANCING
+unsigned int irq_lookup(unsigned int irq)
+{
+ return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE;
+}
+
+void irq_finish(unsigned int irq)
+{
+ __raw_writel(irq2evt(irq), INTACKCLR);
+}
+#endif
void __init plat_irq_setup(void)
{
@@ -877,6 +947,7 @@ void __init plat_irq_setup(void)
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
register_intc_controller(&intc_desc);
+ register_intc_userimask(INTC_USERIMASK);
}
void __init plat_irq_setup_pins(int mode)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 780ba17a5599..9158bc5ea38b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -70,16 +70,13 @@ static struct platform_device scif2_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffc10008,
.end = 0xffc10013,
.flags = IORESOURCE_MEM,
@@ -101,16 +98,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffc10014,
.end = 0xffc1001f,
.flags = IORESOURCE_MEM,
@@ -132,15 +126,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffc10020,
.end = 0xffc1002f,
.flags = IORESOURCE_MEM,
@@ -162,15 +153,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffc20008,
.end = 0xffc20013,
.flags = IORESOURCE_MEM,
@@ -192,15 +180,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffc20014,
.end = 0xffc2001f,
.flags = IORESOURCE_MEM,
@@ -222,15 +207,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffc20020,
.end = 0xffc2002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 11bf4c1e25c0..de865cac02ee 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -1,7 +1,7 @@
/*
* SH-X3 SMP
*
- * Copyright (C) 2007 - 2008 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
* Copyright (C) 2007 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,16 +9,22 @@
* for more details.
*/
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <asm/sections.h>
#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
#define STBCR_MSTP 0x00000001
#define STBCR_RESET 0x00000002
+#define STBCR_SLEEP 0x00000004
#define STBCR_LTSLP 0x80000000
static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
@@ -37,7 +43,7 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-void __init plat_smp_setup(void)
+static void shx3_smp_setup(void)
{
unsigned int cpu = 0;
int i, num;
@@ -63,7 +69,7 @@ void __init plat_smp_setup(void)
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
-void __init plat_prepare_cpus(unsigned int max_cpus)
+static void shx3_prepare_cpus(unsigned int max_cpus)
{
int i;
@@ -72,11 +78,14 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
BUILD_BUG_ON(SMP_MSG_NR >= 8);
for (i = 0; i < SMP_MSG_NR; i++)
- request_irq(104 + i, ipi_interrupt_handler, IRQF_DISABLED,
- "IPI", (void *)(long)i);
+ request_irq(104 + i, ipi_interrupt_handler,
+ IRQF_DISABLED | IRQF_PERCPU, "IPI", (void *)(long)i);
+
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
}
-void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
+static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
{
if (__in_29bit_mode())
__raw_writel(entry_point, RESET_REG(cpu));
@@ -93,12 +102,12 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
__raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
}
-int plat_smp_processor_id(void)
+static unsigned int shx3_smp_processor_id(void)
{
return __raw_readl(0xff000048); /* CPIDR */
}
-void plat_send_ipi(unsigned int cpu, unsigned int message)
+static void shx3_send_ipi(unsigned int cpu, unsigned int message)
{
unsigned long addr = 0xfe410070 + (cpu * 4);
@@ -106,3 +115,52 @@ void plat_send_ipi(unsigned int cpu, unsigned int message)
__raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
}
+
+static void shx3_update_boot_vector(unsigned int cpu)
+{
+ __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
+ while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ cpu_relax();
+ __raw_writel(STBCR_RESET, STBCR_REG(cpu));
+}
+
+static int __cpuinit
+shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ shx3_update_boot_vector(cpu);
+ break;
+ case CPU_ONLINE:
+ pr_info("CPU %u is now online\n", cpu);
+ break;
+ case CPU_DEAD:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
+ .notifier_call = shx3_cpu_callback,
+};
+
+static int __cpuinit register_shx3_cpu_notifier(void)
+{
+ register_hotcpu_notifier(&shx3_cpu_notifier);
+ return 0;
+}
+late_initcall(register_shx3_cpu_notifier);
+
+struct plat_smp_ops shx3_smp_ops = {
+ .smp_setup = shx3_smp_setup,
+ .prepare_cpus = shx3_prepare_cpus,
+ .start_cpu = shx3_start_cpu,
+ .smp_processor_id = shx3_smp_processor_id,
+ .send_ipi = shx3_send_ipi,
+ .cpu_die = native_cpu_die,
+ .cpu_disable = native_cpu_disable,
+ .play_dead = native_play_dead,
+};
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 521d05b3f7ba..9e882409e4e9 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -17,7 +17,7 @@
#include <asm/cache.h>
#include <asm/tlb.h>
-int __init detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
unsigned long long cir;
@@ -72,6 +72,4 @@ int __init detect_cpu_and_cache_system(void)
/* Setup some I/D TLB defaults */
sh64_tlb_init();
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index e7a3c1e4b604..d910666142b1 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -68,16 +68,13 @@ static struct platform_device rtc_device = {
#define TMU2_BASE (TMU_BASE + 0x8 + (0xc * 0x2))
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = TMU0_BASE,
.end = TMU0_BASE + 0xc - 1,
.flags = IORESOURCE_MEM,
@@ -99,16 +96,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = TMU1_BASE,
.end = TMU1_BASE + 0xc - 1,
.flags = IORESOURCE_MEM,
@@ -130,15 +124,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = TMU2_BASE,
.end = TMU2_BASE + 0xc - 1,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 95d216255565..37c97d444576 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -4,7 +4,6 @@
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
-
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/io.h>
@@ -13,6 +12,25 @@
/* Stores the physical address of elf header of crash image. */
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+/*
+ * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
+ * is_kdump_kernel() to determine if we are booting after a panic. Hence
+ * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
+ *
+ * elfcorehdr= specifies the location of elf core header
+ * stored by the crashed kernel.
+ */
+static int __init parse_elfcorehdr(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ elfcorehdr_addr = memparse(arg, &arg);
+
+ return 0;
+}
+early_param("elfcorehdr", parse_elfcorehdr);
+
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index a8234b2010d1..5ec1d1818691 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -22,6 +22,7 @@
#include <linux/mm.h>
#include <linux/elf.h>
#include <linux/ftrace.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <asm/dwarf.h>
#include <asm/unwinder.h>
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index fe0b743881b0..6e35f012cc03 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -131,6 +131,7 @@ ENTRY(_stext)
* r8 = scratch register
* r9 = scratch register
* r10 = number of PMB entries we've setup
+ * r11 = scratch register
*/
mov.l .LMMUCR, r1 /* Flush the TLB */
@@ -167,8 +168,9 @@ ENTRY(_stext)
.Lvalidate_existing_mappings:
+ mov.l .LPMB_DATA_MASK, r11
mov.l @r7, r8
- and r0, r8
+ and r11, r8
cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
bt .Lpmb_done
@@ -335,12 +337,13 @@ ENTRY(stack_start)
3: .long __bss_start
4: .long _end
5: .long start_kernel
-6: .long sh_cpu_init
+6: .long cpu_init
7: .long init_thread_union
#ifdef CONFIG_PMB
.LPMB_ADDR: .long PMB_ADDR
.LPMB_DATA: .long PMB_DATA
+.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
.LMMUCR: .long MMUCR
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 675eea7785d9..efae6ab3d54c 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -120,25 +120,16 @@ static int get_hbp_len(u16 hbp_len)
}
/*
- * Check for virtual address in user space.
- */
-int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
-{
- unsigned int len;
-
- len = get_hbp_len(hbp_len);
-
- return (va <= TASK_SIZE - len);
-}
-
-/*
* Check for virtual address in kernel space.
*/
-static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
unsigned int len;
+ unsigned long va;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- len = get_hbp_len(hbp_len);
+ va = info->address;
+ len = get_hbp_len(info->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -226,8 +217,7 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk)
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
@@ -270,15 +260,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
if (info->address & align)
return -EINVAL;
- /* Check that the virtual address is in the proper range */
- if (tsk) {
- if (!arch_check_va_in_userspace(info->address, info->len))
- return -EFAULT;
- } else {
- if (!arch_check_va_in_kernelspace(info->address, info->len))
- return -EFAULT;
- }
-
return 0;
}
@@ -363,8 +344,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
perf_bp_event(bp, args->regs);
/* Deliver the signal to userspace */
- if (arch_check_va_in_userspace(bp->attr.bp_addr,
- bp->attr.bp_len)) {
+ if (!arch_check_bp_in_kernelspace(bp)) {
siginfo_t info;
info.si_signo = args->signr;
@@ -425,11 +405,6 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
/* TODO */
}
-void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
-{
- /* TODO */
-}
-
int register_sh_ubc(struct sh_ubc *ubc)
{
/* Bail if it's already assigned */
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 273f890b17ae..425d604e3a28 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -19,6 +19,7 @@
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/atomic.h>
+#include <asm/smp.h>
void (*pm_idle)(void) = NULL;
@@ -89,10 +90,13 @@ void cpu_idle(void)
while (1) {
tick_nohz_stop_sched_tick(1);
- while (!need_resched() && cpu_online(cpu)) {
+ while (!need_resched()) {
check_pgt_cache();
rmb();
+ if (cpu_is_offline(cpu))
+ play_dead();
+
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
@@ -133,7 +137,7 @@ static void do_nothing(void *unused)
void stop_this_cpu(void *unused)
{
local_irq_disable();
- cpu_clear(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), false);
for (;;)
cpu_sleep();
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index d2d41d046657..257de1f0692b 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -12,6 +12,7 @@
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/ftrace.h>
+#include <linux/delay.h>
#include <asm/processor.h>
#include <asm/machvec.h>
#include <asm/uaccess.h>
@@ -113,19 +114,14 @@ union irq_ctx {
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
-#endif
-asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
+static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+
+static inline void handle_one_irq(unsigned int irq)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-#ifdef CONFIG_IRQSTACKS
union irq_ctx *curctx, *irqctx;
-#endif
-
- irq_enter();
- irq = irq_demux(irq);
-#ifdef CONFIG_IRQSTACKS
curctx = (union irq_ctx *)current_thread_info();
irqctx = hardirq_ctx[smp_processor_id()];
@@ -164,20 +160,9 @@ asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
"r5", "r6", "r7", "r8", "t", "pr"
);
} else
-#endif
generic_handle_irq(irq);
-
- irq_exit();
-
- set_irq_regs(old_regs);
- return 1;
}
-#ifdef CONFIG_IRQSTACKS
-static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
-
-static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
-
/*
* allocate per-cpu stacks for hardirq and for softirq processing
*/
@@ -257,8 +242,33 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
+#else
+static inline void handle_one_irq(unsigned int irq)
+{
+ generic_handle_irq(irq);
+}
#endif
+asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ irq = irq_demux(irq_lookup(irq));
+
+ if (irq != NO_IRQ_IGNORE) {
+ handle_one_irq(irq);
+ irq_finish(irq);
+ }
+
+ irq_exit();
+
+ set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
+}
+
void __init init_IRQ(void)
{
plat_irq_setup();
@@ -283,3 +293,44 @@ int __init arch_probe_nr_irqs(void)
return 0;
}
#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
+{
+ printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
+ irq, desc->node, cpu);
+
+ raw_spin_lock_irq(&desc->lock);
+ desc->chip->set_affinity(irq, cpumask_of(cpu));
+ raw_spin_unlock_irq(&desc->lock);
+}
+
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+ struct irq_desc *desc;
+ unsigned int irq, cpu = smp_processor_id();
+
+ for_each_irq_desc(irq, desc) {
+ if (desc->node == cpu) {
+ unsigned int newcpu = cpumask_any_and(desc->affinity,
+ cpu_online_mask);
+ if (newcpu >= nr_cpu_ids) {
+ if (printk_ratelimit())
+ printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
+ irq, cpu);
+
+ cpumask_setall(desc->affinity);
+ newcpu = cpumask_any_and(desc->affinity,
+ cpu_online_mask);
+ }
+
+ route_irq(desc, irq, newcpu);
+ }
+ }
+}
+#endif
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 70c69659b846..efb6d398dec3 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -237,6 +237,18 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
return -1;
}
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ if (exception == 60)
+ return instruction_pointer(regs) - 2;
+ return instruction_pointer(regs);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+}
+
/*
* The primary entry points for the kgdb debug trap table entries.
*/
@@ -247,7 +259,7 @@ BUILD_TRAP_HANDLER(singlestep)
local_irq_save(flags);
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
- kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs);
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c
index 0b04e7d4a9b9..8bfc6dfa8b94 100644
--- a/arch/sh/kernel/localtimer.c
+++ b/arch/sh/kernel/localtimer.c
@@ -44,7 +44,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode,
{
}
-void __cpuinit local_timer_setup(unsigned int cpu)
+void local_timer_setup(unsigned int cpu)
{
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
@@ -60,3 +60,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
clockevents_register_device(clk);
}
+
+void local_timer_stop(unsigned int cpu)
+{
+}
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 7672141c841b..0e90c7f9564f 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -147,4 +147,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
+#ifdef CONFIG_X2TLB
+ VMCOREINFO_CONFIG(X2TLB);
+#endif
}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 17f89aa4e1b3..dcb126dc76fd 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -90,7 +90,7 @@ void arch_task_cache_init(void)
# define HAVE_SOFTFP 0
#endif
-void init_thread_xstate(void)
+void __cpuinit init_thread_xstate(void)
{
if (boot_cpu_data.flags & CPU_HAS_FPU)
xstate_size = sizeof(struct sh_fpu_hard_struct);
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 7759a9a93211..d4104ce9fe53 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -85,7 +85,7 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr)
bp = thread->ptrace_bps[0];
if (!bp) {
- hw_breakpoint_init(&attr);
+ ptrace_breakpoint_init(&attr);
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_2;
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 8870d6ba64bf..4f1585f41f2b 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -39,6 +39,7 @@
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/clock.h>
+#include <asm/smp.h>
#include <asm/mmu_context.h>
/*
@@ -187,6 +188,63 @@ static inline void __init reserve_crashkernel(void)
{}
#endif
+static void __init check_for_initrd(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long start, end;
+
+ /*
+ * Check for the rare cases where boot loaders adhere to the boot
+ * ABI.
+ */
+ if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
+ goto disable;
+
+ start = INITRD_START + __MEMORY_START;
+ end = start + INITRD_SIZE;
+
+ if (unlikely(end <= start))
+ goto disable;
+ if (unlikely(start & ~PAGE_MASK)) {
+ pr_err("initrd must be page aligned\n");
+ goto disable;
+ }
+
+ if (unlikely(start < PAGE_OFFSET)) {
+ pr_err("initrd start < PAGE_OFFSET\n");
+ goto disable;
+ }
+
+ if (unlikely(end > lmb_end_of_DRAM())) {
+ pr_err("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ end, (unsigned long)lmb_end_of_DRAM());
+ goto disable;
+ }
+
+ /*
+ * If we got this far inspite of the boot loader's best efforts
+ * to the contrary, assume we actually have a valid initrd and
+ * fix up the root dev.
+ */
+ ROOT_DEV = Root_RAM0;
+
+ /*
+ * Address sanitization
+ */
+ initrd_start = (unsigned long)__va(__pa(start));
+ initrd_end = initrd_start + INITRD_SIZE;
+
+ reserve_bootmem(__pa(initrd_start), INITRD_SIZE, BOOTMEM_DEFAULT);
+
+ return;
+
+disable:
+ pr_info("initrd disabled\n");
+ initrd_start = initrd_end = 0;
+#endif
+}
+
void __cpuinit calibrate_delay(void)
{
struct clk *clk = clk_get(NULL, "cpu_clk");
@@ -276,26 +334,7 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
sparse_memory_present_with_active_regions(0);
-#ifdef CONFIG_BLK_DEV_INITRD
- ROOT_DEV = Root_RAM0;
-
- if (LOADER_TYPE && INITRD_START) {
- unsigned long initrd_start_phys = INITRD_START + __MEMORY_START;
-
- if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
- reserve_bootmem(initrd_start_phys, INITRD_SIZE,
- BOOTMEM_DEFAULT);
- initrd_start = (unsigned long)__va(initrd_start_phys);
- initrd_end = initrd_start + INITRD_SIZE;
- } else {
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- initrd_start_phys + INITRD_SIZE,
- (unsigned long)PFN_PHYS(max_low_pfn));
- initrd_start = 0;
- }
- }
-#endif
+ check_for_initrd();
reserve_crashkernel();
}
@@ -341,25 +380,6 @@ static void __init setup_memory(void)
extern void __init setup_memory(void);
#endif
-/*
- * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
- * is_kdump_kernel() to determine if we are booting after a panic. Hence
- * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
- */
-#ifdef CONFIG_CRASH_DUMP
-/* elfcorehdr= specifies the location of elf core header
- * stored by the crashed kernel.
- */
-static int __init parse_elfcorehdr(char *arg)
-{
- if (!arg)
- return -EINVAL;
- elfcorehdr_addr = memparse(arg, &arg);
- return 0;
-}
-early_param("elfcorehdr", parse_elfcorehdr);
-#endif
-
void __init __attribute__ ((weak)) plat_early_device_setup(void)
{
}
@@ -459,9 +479,7 @@ void __init setup_arch(char **cmdline_p)
if (likely(sh_mv.mv_setup))
sh_mv.mv_setup(cmdline_p);
-#ifdef CONFIG_SMP
plat_smp_setup();
-#endif
}
/* processor boot mode configuration */
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 002cc612deef..509b36b45115 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -3,7 +3,7 @@
*
* SMP support for the SuperH processors.
*
- * Copyright (C) 2002 - 2008 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
* Copyright (C) 2006 - 2007 Akio Idehara
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -31,7 +31,20 @@
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
-static inline void __init smp_store_cpu_info(unsigned int cpu)
+struct plat_smp_ops *mp_ops = NULL;
+
+/* State of each CPU */
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
+
+void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
+{
+ if (mp_ops)
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
+
+ mp_ops = ops;
+}
+
+static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
@@ -46,14 +59,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
init_new_context(current, &init_mm);
current_thread_info()->cpu = cpu;
- plat_prepare_cpus(max_cpus);
+ mp_ops->prepare_cpus(max_cpus);
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(&cpu_possible_map);
#endif
}
-void __devinit smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
@@ -62,37 +75,137 @@ void __devinit smp_prepare_boot_cpu(void)
set_cpu_online(cpu, true);
set_cpu_possible(cpu, true);
+
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void native_cpu_die(unsigned int cpu)
+{
+ unsigned int i;
+
+ for (i = 0; i < 10; i++) {
+ smp_rmb();
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+
+ return;
+ }
+
+ msleep(100);
+ }
+
+ pr_err("CPU %u didn't die...\n", cpu);
+}
+
+int native_cpu_disable(unsigned int cpu)
+{
+ return cpu == 0 ? -EPERM : 0;
+}
+
+void play_dead_common(void)
+{
+ idle_task_exit();
+ irq_ctx_exit(raw_smp_processor_id());
+ mb();
+
+ __get_cpu_var(cpu_state) = CPU_DEAD;
+ local_irq_disable();
+}
+
+void native_play_dead(void)
+{
+ play_dead_common();
}
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct task_struct *p;
+ int ret;
+
+ ret = mp_ops->cpu_disable(cpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Stop the local timer for this CPU.
+ */
+ local_timer_stop(cpu);
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ flush_cache_all();
+ local_flush_tlb_all();
+
+ read_lock(&tasklist_lock);
+ for_each_process(p)
+ if (p->mm)
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+#else /* ... !CONFIG_HOTPLUG_CPU */
+int native_cpu_disable(unsigned int cpu)
+{
+ return -ENOSYS;
+}
+
+void native_cpu_die(unsigned int cpu)
+{
+ /* We said "no" in __cpu_disable */
+ BUG();
+}
+
+void native_play_dead(void)
+{
+ BUG();
+}
+#endif
+
asmlinkage void __cpuinit start_secondary(void)
{
- unsigned int cpu;
+ unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
enable_mmu();
atomic_inc(&mm->mm_count);
atomic_inc(&mm->mm_users);
current->active_mm = mm;
- BUG_ON(current->mm);
enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
per_cpu_trap_init();
preempt_disable();
- notify_cpu_starting(smp_processor_id());
+ notify_cpu_starting(cpu);
local_irq_enable();
- cpu = smp_processor_id();
-
/* Enable local timers */
local_timer_setup(cpu);
calibrate_delay();
smp_store_cpu_info(cpu);
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, true);
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu_idle();
}
@@ -111,12 +224,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
struct task_struct *tsk;
unsigned long timeout;
- tsk = fork_idle(cpu);
- if (IS_ERR(tsk)) {
- printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
- return PTR_ERR(tsk);
+ tsk = cpu_data[cpu].idle;
+ if (!tsk) {
+ tsk = fork_idle(cpu);
+ if (IS_ERR(tsk)) {
+ pr_err("Failed forking idle task for cpu %d\n", cpu);
+ return PTR_ERR(tsk);
+ }
+
+ cpu_data[cpu].idle = tsk;
}
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
/* Fill in data in head.S for secondary cpus */
stack_start.sp = tsk->thread.sp;
stack_start.thread_info = tsk->stack;
@@ -127,7 +247,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
(unsigned long)&stack_start + sizeof(stack_start));
wmb();
- plat_start_cpu(cpu, (unsigned long)_stext);
+ mp_ops->start_cpu(cpu, (unsigned long)_stext);
timeout = jiffies + HZ;
while (time_before(jiffies, timeout)) {
@@ -135,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
break;
udelay(10);
+ barrier();
}
if (cpu_online(cpu))
@@ -159,7 +280,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
void smp_send_reschedule(int cpu)
{
- plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
+ mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
}
void smp_send_stop(void)
@@ -172,12 +293,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
int cpu;
for_each_cpu(cpu, mask)
- plat_send_ipi(cpu, SMP_MSG_FUNCTION);
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
}
void arch_send_call_function_single_ipi(int cpu)
{
- plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
}
void smp_timer_broadcast(const struct cpumask *mask)
@@ -185,7 +306,7 @@ void smp_timer_broadcast(const struct cpumask *mask)
int cpu;
for_each_cpu(cpu, mask)
- plat_send_ipi(cpu, SMP_MSG_TIMER);
+ mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
}
static void ipi_timer(void)
@@ -249,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm)
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
*/
-
void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 9b0b633b6c92..948fdb656933 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -52,7 +52,11 @@ static int __init topology_init(void)
#endif
for_each_present_cpu(i) {
- ret = register_cpu(&per_cpu(cpu_devices, i), i);
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = 1;
+
+ ret = register_cpu(c, i);
if (unlikely(ret))
printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
__func__, i, ret);
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 3dc8a8a63822..53f7c684afb2 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -10,6 +10,7 @@ cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o
cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
+cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
@@ -18,13 +19,14 @@ mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o
obj-y += $(mmu-y)
-obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
-ifdef CONFIG_DEBUG_FS
-obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
+debugfs-y := asids-debugfs.o
+ifndef CONFIG_CACHE_OFF
+debugfs-$(CONFIG_CPU_SH4) += cache-debugfs.o
endif
ifdef CONFIG_MMU
+debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
@@ -32,6 +34,7 @@ tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
obj-y += $(tlb-y)
endif
+obj-$(CONFIG_DEBUG_FS) += $(debugfs-y)
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c
new file mode 100644
index 000000000000..c0adbee97b5f
--- /dev/null
+++ b/arch/sh/mm/cache-shx3.c
@@ -0,0 +1,44 @@
+/*
+ * arch/sh/mm/cache-shx3.c - SH-X3 optimized cache ops
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/cache.h>
+
+#define CCR_CACHE_SNM 0x40000 /* Hardware-assisted synonym avoidance */
+#define CCR_CACHE_IBE 0x1000000 /* ICBI broadcast */
+
+void __init shx3_cache_init(void)
+{
+ unsigned int ccr;
+
+ ccr = __raw_readl(CCR);
+
+ /*
+ * If we've got cache aliases, resolve them in hardware.
+ */
+ if (boot_cpu_data.dcache.n_aliases || boot_cpu_data.icache.n_aliases) {
+ ccr |= CCR_CACHE_SNM;
+
+ boot_cpu_data.icache.n_aliases = 0;
+ boot_cpu_data.dcache.n_aliases = 0;
+
+ pr_info("Enabling hardware synonym avoidance\n");
+ }
+
+#ifdef CONFIG_SMP
+ /*
+ * Broadcast I-cache block invalidations by default.
+ */
+ ccr |= CCR_CACHE_IBE;
+#endif
+
+ writel_uncached(ccr, CCR);
+}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 0f4095d7ac8b..ba401d137bb9 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -334,6 +334,13 @@ void __init cpu_cache_init(void)
extern void __weak sh4_cache_init(void);
sh4_cache_init();
+
+ if ((boot_cpu_data.type == CPU_SH7786) ||
+ (boot_cpu_data.type == CPU_SHX3)) {
+ extern void __weak shx3_cache_init(void);
+
+ shx3_cache_init();
+ }
}
if (boot_cpu_data.family == CPU_FAMILY_SH5) {
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 8bf79e3b7bdd..d4c34d757f0d 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -200,7 +200,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
-survive:
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
@@ -290,15 +289,10 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
do_sigbus:
up_read(&mm->mmap_sem);
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index e43ec600afcf..e9f5384f3f1c 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -680,7 +680,7 @@ static void __init pmb_merge(struct pmb_entry *head)
/*
* The merged page size must be valid.
*/
- if (!pmb_size_valid(newsize))
+ if (!depth || !pmb_size_valid(newsize))
return;
head->flags &= ~PMB_SZ_MASK;
diff --git a/arch/sh/mm/tlb-debugfs.c b/arch/sh/mm/tlb-debugfs.c
new file mode 100644
index 000000000000..229bf75f28df
--- /dev/null
+++ b/arch/sh/mm/tlb-debugfs.c
@@ -0,0 +1,179 @@
+/*
+ * arch/sh/mm/tlb-debugfs.c
+ *
+ * debugfs ops for SH-4 ITLB/UTLBs.
+ *
+ * Copyright (C) 2010 Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+enum tlb_type {
+ TLB_TYPE_ITLB,
+ TLB_TYPE_UTLB,
+};
+
+static struct {
+ int bits;
+ const char *size;
+} tlb_sizes[] = {
+ { 0x0, " 1KB" },
+ { 0x1, " 4KB" },
+ { 0x2, " 8KB" },
+ { 0x4, " 64KB" },
+ { 0x5, "256KB" },
+ { 0x7, " 1MB" },
+ { 0x8, " 4MB" },
+ { 0xc, " 64MB" },
+};
+
+static int tlb_seq_show(struct seq_file *file, void *iter)
+{
+ unsigned int tlb_type = (unsigned int)file->private;
+ unsigned long addr1, addr2, data1, data2;
+ unsigned long flags;
+ unsigned long mmucr;
+ unsigned int nentries, entry;
+ unsigned int urb;
+
+ mmucr = __raw_readl(MMUCR);
+ if ((mmucr & 0x1) == 0) {
+ seq_printf(file, "address translation disabled\n");
+ return 0;
+ }
+
+ if (tlb_type == TLB_TYPE_ITLB) {
+ addr1 = MMU_ITLB_ADDRESS_ARRAY;
+ addr2 = MMU_ITLB_ADDRESS_ARRAY2;
+ data1 = MMU_ITLB_DATA_ARRAY;
+ data2 = MMU_ITLB_DATA_ARRAY2;
+ nentries = 4;
+ } else {
+ addr1 = MMU_UTLB_ADDRESS_ARRAY;
+ addr2 = MMU_UTLB_ADDRESS_ARRAY2;
+ data1 = MMU_UTLB_DATA_ARRAY;
+ data2 = MMU_UTLB_DATA_ARRAY2;
+ nentries = 64;
+ }
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ urb = (mmucr & MMUCR_URB) >> MMUCR_URB_SHIFT;
+
+ /* Make the "entry >= urb" test fail. */
+ if (urb == 0)
+ urb = MMUCR_URB_NENTRIES + 1;
+
+ if (tlb_type == TLB_TYPE_ITLB) {
+ addr1 = MMU_ITLB_ADDRESS_ARRAY;
+ addr2 = MMU_ITLB_ADDRESS_ARRAY2;
+ data1 = MMU_ITLB_DATA_ARRAY;
+ data2 = MMU_ITLB_DATA_ARRAY2;
+ nentries = 4;
+ } else {
+ addr1 = MMU_UTLB_ADDRESS_ARRAY;
+ addr2 = MMU_UTLB_ADDRESS_ARRAY2;
+ data1 = MMU_UTLB_DATA_ARRAY;
+ data2 = MMU_UTLB_DATA_ARRAY2;
+ nentries = 64;
+ }
+
+ seq_printf(file, "entry: vpn ppn asid size valid wired\n");
+
+ for (entry = 0; entry < nentries; entry++) {
+ unsigned long vpn, ppn, asid, size;
+ unsigned long valid;
+ unsigned long val;
+ const char *sz = " ?";
+ int i;
+
+ val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ vpn = val & 0xfffffc00;
+ valid = val & 0x100;
+
+ val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ asid = val & MMU_CONTEXT_ASID_MASK;
+
+ val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ ppn = (val & 0x0ffffc00) << 4;
+
+ val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ size = (val & 0xf0) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(tlb_sizes); i++) {
+ if (tlb_sizes[i].bits == size)
+ break;
+ }
+
+ if (i != ARRAY_SIZE(tlb_sizes))
+ sz = tlb_sizes[i].size;
+
+ seq_printf(file, "%2d: 0x%08lx 0x%08lx %5lu %s %s %s\n",
+ entry, vpn, ppn, asid,
+ sz, valid ? "V" : "-",
+ (urb <= entry) ? "W" : "-");
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int tlb_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tlb_seq_show, inode->i_private);
+}
+
+static const struct file_operations tlb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = tlb_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tlb_debugfs_init(void)
+{
+ struct dentry *itlb, *utlb;
+
+ itlb = debugfs_create_file("itlb", S_IRUSR, sh_debugfs_root,
+ (unsigned int *)TLB_TYPE_ITLB,
+ &tlb_debugfs_fops);
+ if (unlikely(!itlb))
+ return -ENOMEM;
+ if (IS_ERR(itlb))
+ return PTR_ERR(itlb);
+
+ utlb = debugfs_create_file("utlb", S_IRUSR, sh_debugfs_root,
+ (unsigned int *)TLB_TYPE_UTLB,
+ &tlb_debugfs_fops);
+ if (unlikely(!utlb)) {
+ debugfs_remove(itlb);
+ return -ENOMEM;
+ }
+
+ if (IS_ERR(utlb)) {
+ debugfs_remove(itlb);
+ return PTR_ERR(utlb);
+ }
+
+ return 0;
+}
+module_init(tlb_debugfs_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 706da1d3a67a..03db41cc1268 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -189,7 +189,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
-survive:
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
@@ -294,22 +293,11 @@ no_context:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- if (is_global_init(current)) {
- panic("INIT out of memory\n");
- yield();
- goto survive;
- }
- printk("fault:Out of memory\n");
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
do_sigbus:
printk("fault:Do sigbus\n");
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9908d477ccd9..d6781ce687e2 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -75,7 +75,7 @@ config ARCH_USES_GETTIMEOFFSET
config GENERIC_CMOS_UPDATE
bool
- default y if SPARC64
+ default y
config GENERIC_CLOCKEVENTS
bool
diff --git a/arch/sparc/boot/btfixupprep.c b/arch/sparc/boot/btfixupprep.c
index bbf91b9c3d39..e7f2940bd270 100644
--- a/arch/sparc/boot/btfixupprep.c
+++ b/arch/sparc/boot/btfixupprep.c
@@ -325,7 +325,7 @@ main1:
(*rr)->next = NULL;
}
printf("! Generated by btfixupprep. Do not edit.\n\n");
- printf("\t.section\t\".data.init\",#alloc,#write\n\t.align\t4\n\n");
+ printf("\t.section\t\".data..init\",#alloc,#write\n\t.align\t4\n\n");
printf("\t.global\t___btfixup_start\n___btfixup_start:\n\n");
for (i = 0; i < last; i++) {
f = array + i;
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index e72ac9cdfb98..766121a67a24 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -44,7 +44,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#ifdef ULTRA_HAS_POPULATION_COUNT
-static inline unsigned int hweight64(unsigned long w)
+static inline unsigned int __arch_hweight64(unsigned long w)
{
unsigned int res;
@@ -52,7 +52,7 @@ static inline unsigned int hweight64(unsigned long w)
return res;
}
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
@@ -60,7 +60,7 @@ static inline unsigned int hweight32(unsigned int w)
return res;
}
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
{
unsigned int res;
@@ -68,7 +68,7 @@ static inline unsigned int hweight16(unsigned int w)
return res;
}
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
{
unsigned int res;
@@ -78,9 +78,10 @@ static inline unsigned int hweight8(unsigned int w)
#else
-#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/arch_hweight.h>
#endif
+#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
index 41f85ae4bd4a..2909f0ab6f9e 100644
--- a/arch/sparc/include/asm/cache.h
+++ b/arch/sparc/include/asm/cache.h
@@ -19,7 +19,7 @@
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT)
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#ifdef CONFIG_SPARC32
#include <asm/asi.h>
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 04df4edc0073..539243b236fa 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -158,6 +158,12 @@ void kgdb_arch_exit(void)
{
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+ regs->npc = regs->pc + 4;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 0a2bd0f99fc1..768290a6c028 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -181,6 +181,12 @@ void kgdb_arch_exit(void)
{
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->tpc = ip;
+ regs->tnpc = regs->tpc + 4;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index da527b33ebc7..4926c1babd84 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -348,6 +348,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
sd->prom_node = dp;
sd->op = op;
+ op->dev.of_node = dp;
op->node = dp;
op->clock_freq = of_getintprop_default(dp, "clock-frequency",
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index b3d4cb5d21b3..5bc74161667c 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -643,6 +643,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
sd->prom_node = dp;
sd->op = op;
+ op->dev.of_node = dp;
op->node = dp;
op->clock_freq = of_getintprop_default(dp, "clock-frequency",
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 5ac539a5930f..0c920147b4ef 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -262,6 +262,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
sd->stc = &pbm->stc;
sd->host_controller = pbm;
sd->prom_node = node;
+ dev->dev.of_node = node;
sd->op = op = of_find_device_by_node(node);
sd->numa_node = pbm->numa_node;
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 0d4c09b15efc..4453003032b5 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -78,6 +78,11 @@ __volatile__ unsigned int *master_l10_counter;
u32 (*do_arch_gettimeoffset)(void);
+int update_persistent_clock(struct timespec now)
+{
+ return set_rtc_mmss(now.tv_sec);
+}
+
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
@@ -87,9 +92,6 @@ u32 (*do_arch_gettimeoffset)(void);
static irqreturn_t timer_interrupt(int dummy, void *dev_id)
{
- /* last time the cmos clock got updated */
- static long last_rtc_update;
-
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
@@ -101,16 +103,6 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
do_timer(1);
- /* Determine when to update the Mostek clock. */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 368219cc2366..3ceede02f426 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -186,7 +186,9 @@ static int hostaudio_open(struct inode *inode, struct file *file)
int ret;
#ifdef DEBUG
+ kparam_block_sysfs_write(dsp);
printk(KERN_DEBUG "hostaudio: open called (host: %s)\n", dsp);
+ kparam_unblock_sysfs_write(dsp);
#endif
state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL);
@@ -198,7 +200,9 @@ static int hostaudio_open(struct inode *inode, struct file *file)
if (file->f_mode & FMODE_WRITE)
w = 1;
+ kparam_block_sysfs_write(dsp);
ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0);
+ kparam_unblock_sysfs_write(dsp);
if (ret < 0) {
kfree(state);
return ret;
@@ -254,11 +258,15 @@ static int hostmixer_open_mixdev(struct inode *inode, struct file *file)
if (file->f_mode & FMODE_WRITE)
w = 1;
+ kparam_block_sysfs_write(mixer);
ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0);
+ kparam_unblock_sysfs_write(mixer);
if (ret < 0) {
+ kparam_block_sysfs_write(dsp);
printk(KERN_ERR "hostaudio_open_mixdev failed to open '%s', "
"err = %d\n", dsp, -ret);
+ kparam_unblock_sysfs_write(dsp);
kfree(state);
return ret;
}
@@ -314,8 +322,10 @@ MODULE_LICENSE("GPL");
static int __init hostaudio_init_module(void)
{
+ __kernel_param_lock();
printk(KERN_INFO "UML Audio Relay (host dsp = %s, host mixer = %s)\n",
dsp, mixer);
+ __kernel_param_unlock();
module_data.dev_audio = register_sound_dsp(&hostaudio_fops, -1);
if (module_data.dev_audio < 0) {
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 7a656bd8bd3c..7f7338c90784 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -19,7 +19,6 @@ static irqreturn_t line_interrupt(int irq, void *data)
{
struct chan *chan = data;
struct line *line = chan->line;
- struct tty_struct *tty;
if (line)
chan_interrupt(&line->chan_list, &line->task, line->tty, irq);
diff --git a/arch/um/include/asm/system.h b/arch/um/include/asm/system.h
index 753346e2cdfd..93af1cf0907d 100644
--- a/arch/um/include/asm/system.h
+++ b/arch/um/include/asm/system.h
@@ -3,11 +3,8 @@
#include "sysdep/system.h"
-extern void *switch_to(void *prev, void *next, void *last);
-
extern int get_signals(void);
extern int set_signals(int enable);
-extern int get_signals(void);
extern void block_signals(void);
extern void unblock_signals(void);
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 7fcad58e216d..69268014dd8e 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -94,7 +94,7 @@ SECTIONS
.data : {
INIT_TASK_DATA(KERNEL_STACK_SIZE)
. = ALIGN(KERNEL_STACK_SIZE);
- *(.data.init_irqstack)
+ *(.data..init_irqstack)
DATA_DATA
*(.data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c
index 8aa77b61a5ff..ddc9698b66ed 100644
--- a/arch/um/kernel/init_task.c
+++ b/arch/um/kernel/init_task.c
@@ -34,5 +34,5 @@ union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
union thread_union cpu0_irqstack
- __attribute__((__section__(".data.init_irqstack"))) =
+ __attribute__((__section__(".data..init_irqstack"))) =
{ INIT_THREAD_INFO(init_task) };
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 4e3b820bd2be..f5173e1ec3ac 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -10,7 +10,7 @@
#include "sysdep/syscalls.h"
extern int syscall_table_size;
-#define NR_syscalls (syscall_table_size / sizeof(void *))
+#define NR_SYSCALLS (syscall_table_size / sizeof(void *))
void handle_syscall(struct uml_pt_regs *r)
{
@@ -30,7 +30,7 @@ void handle_syscall(struct uml_pt_regs *r)
* in case it's a compiler bug.
*/
syscall = UPT_SYSCALL_NR(r);
- if ((syscall >= NR_syscalls) || (syscall < 0))
+ if ((syscall >= NR_SYSCALLS) || (syscall < 0))
result = -ENOSYS;
else result = EXECUTE_SYSCALL(syscall, regs);
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index e7a6cca667aa..ec6378550671 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -50,7 +50,7 @@ SECTIONS
{
INIT_TASK_DATA(KERNEL_STACK_SIZE)
. = ALIGN(KERNEL_STACK_SIZE);
- *(.data.init_irqstack)
+ *(.data..init_irqstack)
DATA_DATA
*(.gnu.linkonce.d*)
CONSTRUCTORS
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h
index e64cd41d7bab..a979a22a8d9f 100644
--- a/arch/um/sys-i386/asm/elf.h
+++ b/arch/um/sys-i386/asm/elf.h
@@ -75,6 +75,8 @@ typedef struct user_i387_struct elf_fpregset_t;
pr_reg[16] = PT_REGS_SS(regs); \
} while (0);
+struct task_struct;
+
extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
index 49655c83efd2..d760967f33a7 100644
--- a/arch/um/sys-x86_64/asm/elf.h
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -95,6 +95,8 @@ typedef struct user_i387_struct elf_fpregset_t;
(pr_reg)[25] = 0; \
(pr_reg)[26] = 0;
+struct task_struct;
+
extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
index 1a899a7ed7a6..b6b65c7c7a7d 100644
--- a/arch/um/sys-x86_64/signal.c
+++ b/arch/um/sys-x86_64/signal.c
@@ -6,6 +6,7 @@
#include <linux/personality.h>
#include <linux/ptrace.h>
+#include <linux/kernel.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
@@ -165,8 +166,6 @@ struct rt_sigframe
struct _fpstate fpstate;
};
-#define round_down(m, n) (((m) / (n)) * (n))
-
int setup_signal_stack_si(unsigned long stack_top, int sig,
struct k_sigaction *ka, struct pt_regs * regs,
siginfo_t *info, sigset_t *set)
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore
new file mode 100644
index 000000000000..028079065af6
--- /dev/null
+++ b/arch/x86/.gitignore
@@ -0,0 +1,3 @@
+boot/compressed/vmlinux
+tools/test_get_len
+
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9458685902bd..983843d4522b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -53,11 +53,16 @@ config X86
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_HW_BREAKPOINT
+ select HAVE_MIXED_BREAKPOINTS_REGS
select PERF_EVENTS
+ select PERF_EVENTS_NMI
select ANON_INODES
select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER
+config INSTRUCTION_DECODER
+ def_bool (KPROBES || PERF_EVENTS)
+
config OUTPUT_FORMAT
string
default "elf32-i386" if X86_32
@@ -197,20 +202,17 @@ config HAVE_INTEL_TXT
# Use the generic interrupt handling code in kernel/irq/:
config GENERIC_HARDIRQS
- bool
- default y
+ def_bool y
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config GENERIC_IRQ_PROBE
- bool
- default y
+ def_bool y
config GENERIC_PENDING_IRQ
- bool
+ def_bool y
depends on GENERIC_HARDIRQS && SMP
- default y
config USE_GENERIC_SMP_HELPERS
def_bool y
@@ -225,19 +227,22 @@ config X86_64_SMP
depends on X86_64 && SMP
config X86_HT
- bool
+ def_bool y
depends on SMP
- default y
config X86_TRAMPOLINE
- bool
+ def_bool y
depends on SMP || (64BIT && ACPI_SLEEP)
- default y
config X86_32_LAZY_GS
def_bool y
depends on X86_32 && !CC_STACKPROTECTOR
+config ARCH_HWEIGHT_CFLAGS
+ string
+ default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
+ default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
+
config KTIME_SCALAR
def_bool X86_32
source "init/Kconfig"
@@ -447,7 +452,7 @@ config X86_NUMAQ
firmware with - send email to <Martin.Bligh@us.ibm.com>.
config X86_SUPPORTS_MEMORY_FAILURE
- bool
+ def_bool y
# MCE code calls memory_failure():
depends on X86_MCE
# On 32-bit this adds too big of NODES_SHIFT and we run out of page flags:
@@ -455,7 +460,6 @@ config X86_SUPPORTS_MEMORY_FAILURE
# On 32-bit SPARSEMEM adds too big of SECTIONS_WIDTH:
depends on X86_64 || !SPARSEMEM
select ARCH_SUPPORTS_MEMORY_FAILURE
- default y
config X86_VISWS
bool "SGI 320/540 (Visual Workstation)"
@@ -570,7 +574,6 @@ config PARAVIRT_SPINLOCKS
config PARAVIRT_CLOCK
bool
- default n
endif
@@ -749,7 +752,6 @@ config MAXSMP
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
select CPUMASK_OFFSTACK
- default n
---help---
Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
@@ -829,7 +831,6 @@ config X86_VISWS_APIC
config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
bool "Reroute for broken boot IRQs"
- default n
depends on X86_IO_APIC
---help---
This option enables a workaround that fixes a source of
@@ -876,9 +877,8 @@ config X86_MCE_AMD
the DRAM Error Threshold.
config X86_ANCIENT_MCE
- def_bool n
+ bool "Support for old Pentium 5 / WinChip machine checks"
depends on X86_32 && X86_MCE
- prompt "Support for old Pentium 5 / WinChip machine checks"
---help---
Include support for machine check handling on old Pentium 5 or WinChip
systems. These typically need to be enabled explicitely on the command
@@ -886,8 +886,7 @@ config X86_ANCIENT_MCE
config X86_MCE_THRESHOLD
depends on X86_MCE_AMD || X86_MCE_INTEL
- bool
- default y
+ def_bool y
config X86_MCE_INJECT
depends on X86_MCE
@@ -1026,8 +1025,8 @@ config X86_CPUID
choice
prompt "High Memory Support"
- default HIGHMEM4G if !X86_NUMAQ
default HIGHMEM64G if X86_NUMAQ
+ default HIGHMEM4G
depends on X86_32
config NOHIGHMEM
@@ -1285,7 +1284,7 @@ source "mm/Kconfig"
config HIGHPTE
bool "Allocate 3rd-level pagetables from highmem"
- depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
+ depends on HIGHMEM
---help---
The VM uses one page table entry for each page of physical memory.
For systems with a lot of RAM, this can be wasteful of precious
@@ -1369,8 +1368,7 @@ config MATH_EMULATION
kernel, it won't hurt.
config MTRR
- bool
- default y
+ def_bool y
prompt "MTRR (Memory Type Range Register) support" if EMBEDDED
---help---
On Intel P6 family processors (Pentium Pro, Pentium II and later)
@@ -1436,8 +1434,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
mtrr_spare_reg_nr=N on the kernel command line.
config X86_PAT
- bool
- default y
+ def_bool y
prompt "x86 PAT support" if EMBEDDED
depends on MTRR
---help---
@@ -1605,8 +1602,7 @@ config X86_NEED_RELOCS
depends on X86_32 && RELOCATABLE
config PHYSICAL_ALIGN
- hex
- prompt "Alignment value to which kernel should be aligned" if X86_32
+ hex "Alignment value to which kernel should be aligned" if X86_32
default "0x1000000"
range 0x2000 0x1000000
---help---
@@ -1653,7 +1649,6 @@ config COMPAT_VDSO
config CMDLINE_BOOL
bool "Built-in kernel command line"
- default n
---help---
Allow for specifying boot arguments to the kernel at
build time. On some systems (e.g. embedded ones), it is
@@ -1687,7 +1682,6 @@ config CMDLINE
config CMDLINE_OVERRIDE
bool "Built-in command line overrides boot loader arguments"
- default n
depends on CMDLINE_BOOL
---help---
Set this option to 'Y' to have the kernel ignore the boot loader
@@ -1723,8 +1717,7 @@ source "drivers/acpi/Kconfig"
source "drivers/sfi/Kconfig"
config X86_APM_BOOT
- bool
- default y
+ def_bool y
depends on APM || APM_MODULE
menuconfig APM
@@ -1953,8 +1946,7 @@ config DMAR_DEFAULT_ON
experimental.
config DMAR_BROKEN_GFX_WA
- def_bool n
- prompt "Workaround broken graphics drivers (going away soon)"
+ bool "Workaround broken graphics drivers (going away soon)"
depends on DMAR && BROKEN
---help---
Current Graphics drivers tend to use physical address
@@ -2052,7 +2044,6 @@ config SCx200HR_TIMER
config OLPC
bool "One Laptop Per Child support"
select GPIOLIB
- default n
---help---
Add support for detecting the unique features of the OLPC
XO hardware.
@@ -2067,6 +2058,8 @@ source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
+source "drivers/vbus/Kconfig"
+
endmenu
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index a19829374e6a..2ac9069890cd 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -338,6 +338,10 @@ config X86_F00F_BUG
def_bool y
depends on M586MMX || M586TSC || M586 || M486 || M386
+config X86_INVD_BUG
+ def_bool y
+ depends on M486 || M386
+
config X86_WP_WORKS_OK
def_bool y
depends on !M386
@@ -502,23 +506,3 @@ config CPU_SUP_UMC_32
CPU might render the kernel unbootable.
If unsure, say N.
-
-config X86_DS
- def_bool X86_PTRACE_BTS
- depends on X86_DEBUGCTLMSR
- select HAVE_HW_BRANCH_TRACER
-
-config X86_PTRACE_BTS
- bool "Branch Trace Store"
- default y
- depends on X86_DEBUGCTLMSR
- depends on BROKEN
- ---help---
- This adds a ptrace interface to the hardware's branch trace store.
-
- Debuggers may use it to collect an execution trace of the debugged
- application in order to answer the question 'how did I get here?'.
- Debuggers may trace user mode as well as kernel mode.
-
- Say Y unless there is no application development on this machine
- and you want to save a small amount of code size.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index bc01e3ebfeb2..75085080b63e 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -45,7 +45,6 @@ config EARLY_PRINTK
config EARLY_PRINTK_DBGP
bool "Early printk via EHCI debug port"
- default n
depends on EARLY_PRINTK && PCI
---help---
Write kernel log output directly into the EHCI debug port.
@@ -76,7 +75,6 @@ config DEBUG_PER_CPU_MAPS
bool "Debug access to per_cpu maps"
depends on DEBUG_KERNEL
depends on SMP
- default n
---help---
Say Y to verify that the per_cpu map being accessed has
been setup. Adds a fair amount of code to kernel memory
@@ -174,15 +172,6 @@ config IOMMU_LEAK
Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings.
-config X86_DS_SELFTEST
- bool "DS selftest"
- default y
- depends on DEBUG_KERNEL
- depends on X86_DS
- ---help---
- Perform Debug Store selftests at boot time.
- If in doubt, say "N".
-
config HAVE_MMIOTRACE_SUPPORT
def_bool y
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index bcbd36c41432..5c228129d175 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -77,7 +77,7 @@ int main(int argc, char *argv[])
offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
- printf(".section \".rodata.compressed\",\"a\",@progbits\n");
+ printf(".section \".rodata..compressed\",\"a\",@progbits\n");
printf(".globl z_input_len\n");
printf("z_input_len = %lu\n", ilen);
printf(".globl z_output_len\n");
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index a6f1a59a5b0c..5ddabceee124 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -26,8 +26,8 @@ SECTIONS
HEAD_TEXT
_ehead = . ;
}
- .rodata.compressed : {
- *(.rodata.compressed)
+ .rodata..compressed : {
+ *(.rodata..compressed)
}
.text : {
_text = .; /* Text */
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 20bb0e1ac681..ff16756a51c1 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,9 @@
#define IN IN1
#define KEY %xmm2
#define IV %xmm3
+#define BSWAP_MASK %xmm10
+#define CTR %xmm11
+#define INC %xmm12
#define KEYP %rdi
#define OUTP %rsi
@@ -42,6 +45,7 @@
#define T1 %r10
#define TKEYP T1
#define T2 %r11
+#define TCTR_LOW T2
_key_expansion_128:
_key_expansion_256a:
@@ -724,3 +728,114 @@ ENTRY(aesni_cbc_dec)
movups IV, (IVP)
.Lcbc_dec_just_ret:
ret
+
+.align 16
+.Lbswap_mask:
+ .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/*
+ * _aesni_inc_init: internal ABI
+ * setup registers used by _aesni_inc
+ * input:
+ * IV
+ * output:
+ * CTR: == IV, in little endian
+ * TCTR_LOW: == lower qword of CTR
+ * INC: == 1, in little endian
+ * BSWAP_MASK == endian swapping mask
+ */
+_aesni_inc_init:
+ movaps .Lbswap_mask, BSWAP_MASK
+ movaps IV, CTR
+ PSHUFB_XMM BSWAP_MASK CTR
+ mov $1, TCTR_LOW
+ MOVQ_R64_XMM TCTR_LOW INC
+ MOVQ_R64_XMM CTR TCTR_LOW
+ ret
+
+/*
+ * _aesni_inc: internal ABI
+ * Increase IV by 1, IV is in big endian
+ * input:
+ * IV
+ * CTR: == IV, in little endian
+ * TCTR_LOW: == lower qword of CTR
+ * INC: == 1, in little endian
+ * BSWAP_MASK == endian swapping mask
+ * output:
+ * IV: Increase by 1
+ * changed:
+ * CTR: == output IV, in little endian
+ * TCTR_LOW: == lower qword of CTR
+ */
+_aesni_inc:
+ paddq INC, CTR
+ add $1, TCTR_LOW
+ jnc .Linc_low
+ pslldq $8, INC
+ paddq INC, CTR
+ psrldq $8, INC
+.Linc_low:
+ movaps CTR, IV
+ PSHUFB_XMM BSWAP_MASK IV
+ ret
+
+/*
+ * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ * size_t len, u8 *iv)
+ */
+ENTRY(aesni_ctr_enc)
+ cmp $16, LEN
+ jb .Lctr_enc_just_ret
+ mov 480(KEYP), KLEN
+ movups (IVP), IV
+ call _aesni_inc_init
+ cmp $64, LEN
+ jb .Lctr_enc_loop1
+.align 4
+.Lctr_enc_loop4:
+ movaps IV, STATE1
+ call _aesni_inc
+ movups (INP), IN1
+ movaps IV, STATE2
+ call _aesni_inc
+ movups 0x10(INP), IN2
+ movaps IV, STATE3
+ call _aesni_inc
+ movups 0x20(INP), IN3
+ movaps IV, STATE4
+ call _aesni_inc
+ movups 0x30(INP), IN4
+ call _aesni_enc4
+ pxor IN1, STATE1
+ movups STATE1, (OUTP)
+ pxor IN2, STATE2
+ movups STATE2, 0x10(OUTP)
+ pxor IN3, STATE3
+ movups STATE3, 0x20(OUTP)
+ pxor IN4, STATE4
+ movups STATE4, 0x30(OUTP)
+ sub $64, LEN
+ add $64, INP
+ add $64, OUTP
+ cmp $64, LEN
+ jge .Lctr_enc_loop4
+ cmp $16, LEN
+ jb .Lctr_enc_ret
+.align 4
+.Lctr_enc_loop1:
+ movaps IV, STATE
+ call _aesni_inc
+ movups (INP), IN
+ call _aesni_enc1
+ pxor IN, STATE
+ movups STATE, (OUTP)
+ sub $16, LEN
+ add $16, INP
+ add $16, OUTP
+ cmp $16, LEN
+ jge .Lctr_enc_loop1
+.Lctr_enc_ret:
+ movups IV, (IVP)
+.Lctr_enc_just_ret:
+ ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 49c552c060e9..2cb3dcc4490a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -18,6 +18,7 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/cryptd.h>
+#include <crypto/ctr.h>
#include <asm/i387.h>
#include <asm/aes.h>
@@ -58,6 +59,8 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{
@@ -321,6 +324,72 @@ static struct crypto_alg blk_cbc_alg = {
},
};
+static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
+ struct blkcipher_walk *walk)
+{
+ u8 *ctrblk = walk->iv;
+ u8 keystream[AES_BLOCK_SIZE];
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ unsigned int nbytes = walk->nbytes;
+
+ aesni_enc(ctx, keystream, ctrblk);
+ crypto_xor(keystream, src, nbytes);
+ memcpy(dst, keystream, nbytes);
+ crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ kernel_fpu_begin();
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ if (walk.nbytes) {
+ ctr_crypt_final(ctx, &walk);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_fpu_end();
+
+ return err;
+}
+
+static struct crypto_alg blk_ctr_alg = {
+ .cra_name = "__ctr-aes-aesni",
+ .cra_driver_name = "__driver-ctr-aes-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ },
+ },
+};
+
static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
@@ -467,13 +536,11 @@ static struct crypto_alg ablk_cbc_alg = {
},
};
-#ifdef HAS_CTR
static int ablk_ctr_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
- cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))",
- 0, 0);
+ cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
@@ -500,11 +567,50 @@ static struct crypto_alg ablk_ctr_alg = {
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
+ .decrypt = ablk_encrypt,
.geniv = "chainiv",
},
},
};
+
+#ifdef HAS_CTR
+static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
+{
+ struct cryptd_ablkcipher *cryptd_tfm;
+
+ cryptd_tfm = cryptd_alloc_ablkcipher(
+ "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+ ablk_init_common(tfm, cryptd_tfm);
+ return 0;
+}
+
+static struct crypto_alg ablk_rfc3686_ctr_alg = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
+ .cra_init = ablk_rfc3686_ctr_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ .geniv = "seqiv",
+ },
+ },
+};
#endif
#ifdef HAS_LRW
@@ -640,13 +746,17 @@ static int __init aesni_init(void)
goto blk_ecb_err;
if ((err = crypto_register_alg(&blk_cbc_alg)))
goto blk_cbc_err;
+ if ((err = crypto_register_alg(&blk_ctr_alg)))
+ goto blk_ctr_err;
if ((err = crypto_register_alg(&ablk_ecb_alg)))
goto ablk_ecb_err;
if ((err = crypto_register_alg(&ablk_cbc_alg)))
goto ablk_cbc_err;
-#ifdef HAS_CTR
if ((err = crypto_register_alg(&ablk_ctr_alg)))
goto ablk_ctr_err;
+#ifdef HAS_CTR
+ if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
+ goto ablk_rfc3686_ctr_err;
#endif
#ifdef HAS_LRW
if ((err = crypto_register_alg(&ablk_lrw_alg)))
@@ -675,13 +785,17 @@ ablk_pcbc_err:
ablk_lrw_err:
#endif
#ifdef HAS_CTR
+ crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
+ablk_rfc3686_ctr_err:
+#endif
crypto_unregister_alg(&ablk_ctr_alg);
ablk_ctr_err:
-#endif
crypto_unregister_alg(&ablk_cbc_alg);
ablk_cbc_err:
crypto_unregister_alg(&ablk_ecb_alg);
ablk_ecb_err:
+ crypto_unregister_alg(&blk_ctr_alg);
+blk_ctr_err:
crypto_unregister_alg(&blk_cbc_alg);
blk_cbc_err:
crypto_unregister_alg(&blk_ecb_alg);
@@ -705,10 +819,12 @@ static void __exit aesni_exit(void)
crypto_unregister_alg(&ablk_lrw_alg);
#endif
#ifdef HAS_CTR
- crypto_unregister_alg(&ablk_ctr_alg);
+ crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
#endif
+ crypto_unregister_alg(&ablk_ctr_alg);
crypto_unregister_alg(&ablk_cbc_alg);
crypto_unregister_alg(&ablk_ecb_alg);
+ crypto_unregister_alg(&blk_ctr_alg);
crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg);
crypto_unregister_alg(&__aesni_alg);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index e790bc1fbfa3..17cf65c94804 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -842,4 +842,6 @@ ia32_sys_call_table:
.quad compat_sys_rt_tgsigqueueinfo /* 335 */
.quad sys_perf_event_open
.quad compat_sys_recvmmsg
+ .quad sys_fanotify_init
+ .quad sys32_fanotify_mark
ia32_syscall_end:
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 626be156d88d..3d093311d5e2 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -546,3 +546,12 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo);
}
+
+asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags,
+ u32 mask_lo, u32 mask_hi,
+ int fd, const char __user *pathname)
+{
+ return sys_fanotify_mark(fanotify_fd, flags,
+ ((u64)mask_hi << 32) | mask_lo,
+ fd, pathname);
+}
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index b97f786a48d5..a63a68be1cce 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -6,8 +6,8 @@
.macro LOCK_PREFIX
1: lock
.section .smp_locks,"a"
- _ASM_ALIGN
- _ASM_PTR 1b
+ .balign 4
+ .long 1b - .
.previous
.endm
#else
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index b09ec55650b3..03b6bb5394a0 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -28,20 +28,20 @@
*/
#ifdef CONFIG_SMP
-#define LOCK_PREFIX \
+#define LOCK_PREFIX_HERE \
".section .smp_locks,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR "661f\n" /* address */ \
+ ".balign 4\n" \
+ ".long 671f - .\n" /* offset */ \
".previous\n" \
- "661:\n\tlock; "
+ "671:"
+
+#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
#else /* ! CONFIG_SMP */
+#define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX ""
#endif
-/* This must be included *after* the definition of LOCK_PREFIX */
-#include <asm/cpufeature.h>
-
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
@@ -96,6 +96,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
".previous"
/*
+ * This must be included *after* the definition of ALTERNATIVE due to
+ * <asm/arch_hweight.h>
+ */
+#include <asm/cpufeature.h>
+
+/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index b4ac2cdcb64f..1fa03e04ae44 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -373,6 +373,7 @@ extern atomic_t init_deasserted;
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
#endif
+#ifdef CONFIG_X86_LOCAL_APIC
static inline u32 apic_read(u32 reg)
{
return apic->read(reg);
@@ -403,10 +404,19 @@ static inline u32 safe_apic_wait_icr_idle(void)
return apic->safe_wait_icr_idle();
}
+#else /* CONFIG_X86_LOCAL_APIC */
+
+static inline u32 apic_read(u32 reg) { return 0; }
+static inline void apic_write(u32 reg, u32 val) { }
+static inline u64 apic_icr_read(void) { return 0; }
+static inline void apic_icr_write(u32 low, u32 high) { }
+static inline void apic_wait_icr_idle(void) { }
+static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
+
+#endif /* CONFIG_X86_LOCAL_APIC */
static inline void ack_APIC_irq(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
/*
* ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
@@ -414,7 +424,6 @@ static inline void ack_APIC_irq(void)
/* Docs say use 0 for future compatibility */
apic_write(APIC_EOI, 0);
-#endif
}
static inline unsigned default_get_apic_id(unsigned long x)
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
new file mode 100644
index 000000000000..d1fc3c219ae6
--- /dev/null
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -0,0 +1,59 @@
+#ifndef _ASM_X86_HWEIGHT_H
+#define _ASM_X86_HWEIGHT_H
+
+#ifdef CONFIG_64BIT
+/* popcnt %rdi, %rax */
+#define POPCNT ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
+#define REG_IN "D"
+#define REG_OUT "a"
+#else
+/* popcnt %eax, %eax */
+#define POPCNT ".byte 0xf3,0x0f,0xb8,0xc0"
+#define REG_IN "a"
+#define REG_OUT "a"
+#endif
+
+/*
+ * __sw_hweightXX are called from within the alternatives below
+ * and callee-clobbered registers need to be taken care of. See
+ * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
+ * compiler switches.
+ */
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+ unsigned int res = 0;
+
+ asm (ALTERNATIVE("call __sw_hweight32", POPCNT, X86_FEATURE_POPCNT)
+ : "="REG_OUT (res)
+ : REG_IN (w));
+
+ return res;
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __arch_hweight32(w & 0xffff);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __arch_hweight32(w & 0xff);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ unsigned long res = 0;
+
+#ifdef CONFIG_X86_32
+ return __arch_hweight32((u32)w) +
+ __arch_hweight32((u32)(w >> 32));
+#else
+ asm (ALTERNATIVE("call __sw_hweight64", POPCNT, X86_FEATURE_POPCNT)
+ : "="REG_OUT (res)
+ : REG_IN (w));
+#endif /* CONFIG_X86_32 */
+
+ return res;
+}
+
+#endif
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 8f8217b9bdac..706c69492c14 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -246,6 +246,29 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+/*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline int atomic_dec_if_positive(atomic_t *v)
+{
+ int c, old, dec;
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 03027bf28de5..2a934aa19a43 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,109 +14,193 @@ typedef struct {
#define ATOMIC64_INIT(val) { (val) }
-extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
+#ifdef CONFIG_X86_CMPXCHG64
+#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
+#else
+#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
+#endif
+
+#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
+
+/**
+ * atomic64_cmpxchg - cmpxchg atomic64 variable
+ * @p: pointer to type atomic64_t
+ * @o: expected value
+ * @n: new value
+ *
+ * Atomically sets @v to @n if it was equal to @o and returns
+ * the old value.
+ */
+
+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+{
+ return cmpxchg64(&v->counter, o, n);
+}
/**
* atomic64_xchg - xchg atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
*
- * Atomically xchgs the value of @ptr to @new_val and returns
+ * Atomically xchgs the value of @v to @n and returns
* the old value.
*/
-extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
+static inline long long atomic64_xchg(atomic64_t *v, long long n)
+{
+ long long o;
+ unsigned high = (unsigned)(n >> 32);
+ unsigned low = (unsigned)n;
+ asm volatile(ATOMIC64_ALTERNATIVE(xchg)
+ : "=A" (o), "+b" (low), "+c" (high)
+ : "S" (v)
+ : "memory"
+ );
+ return o;
+}
/**
* atomic64_set - set atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
*
- * Atomically sets the value of @ptr to @new_val.
+ * Atomically sets the value of @v to @n.
*/
-extern void atomic64_set(atomic64_t *ptr, u64 new_val);
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ unsigned high = (unsigned)(i >> 32);
+ unsigned low = (unsigned)i;
+ asm volatile(ATOMIC64_ALTERNATIVE(set)
+ : "+b" (low), "+c" (high)
+ : "S" (v)
+ : "eax", "edx", "memory"
+ );
+}
/**
* atomic64_read - read atomic64 variable
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically reads the value of @ptr and returns it.
+ * Atomically reads the value of @v and returns it.
*/
-static inline u64 atomic64_read(atomic64_t *ptr)
+static inline long long atomic64_read(atomic64_t *v)
{
- u64 res;
-
- /*
- * Note, we inline this atomic64_t primitive because
- * it only clobbers EAX/EDX and leaves the others
- * untouched. We also (somewhat subtly) rely on the
- * fact that cmpxchg8b returns the current 64-bit value
- * of the memory location we are touching:
- */
- asm volatile(
- "mov %%ebx, %%eax\n\t"
- "mov %%ecx, %%edx\n\t"
- LOCK_PREFIX "cmpxchg8b %1\n"
- : "=&A" (res)
- : "m" (*ptr)
- );
-
- return res;
-}
-
-extern u64 atomic64_read(atomic64_t *ptr);
+ long long r;
+ asm volatile(ATOMIC64_ALTERNATIVE(read)
+ : "=A" (r), "+c" (v)
+ : : "memory"
+ );
+ return r;
+ }
/**
* atomic64_add_return - add and return
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr and returns @delta + *@ptr
+ * Atomically adds @i to @v and returns @i + *@v
*/
-extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE(add_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/*
* Other variants with different arithmetic operators:
*/
-extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
-extern u64 atomic64_inc_return(atomic64_t *ptr);
-extern u64 atomic64_dec_return(atomic64_t *ptr);
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
+
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+ long long a;
+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
+ : "=A" (a)
+ : "S" (v)
+ : "memory", "ecx"
+ );
+ return a;
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+ long long a;
+ asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
+ : "=A" (a)
+ : "S" (v)
+ : "memory", "ecx"
+ );
+ return a;
+}
/**
* atomic64_add - add integer to atomic64 variable
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr.
+ * Atomically adds @i to @v.
*/
-extern void atomic64_add(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_add(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/**
* atomic64_sub - subtract the atomic64 variable
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
*
- * Atomically subtracts @delta from @ptr.
+ * Atomically subtracts @i from @v.
*/
-extern void atomic64_sub(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_sub(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/**
* atomic64_sub_and_test - subtract value from variable and test result
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr and returns
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
-extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
+static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
/**
* atomic64_inc - increment atomic64 variable
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically increments @ptr by 1.
+ * Atomically increments @v by 1.
*/
-extern void atomic64_inc(atomic64_t *ptr);
+static inline void atomic64_inc(atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
+ : : "S" (v)
+ : "memory", "eax", "ecx", "edx"
+ );
+}
/**
* atomic64_dec - decrement atomic64 variable
@@ -124,37 +208,97 @@ extern void atomic64_inc(atomic64_t *ptr);
*
* Atomically decrements @ptr by 1.
*/
-extern void atomic64_dec(atomic64_t *ptr);
+static inline void atomic64_dec(atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
+ : : "S" (v)
+ : "memory", "eax", "ecx", "edx"
+ );
+}
/**
* atomic64_dec_and_test - decrement and test
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically decrements @ptr by 1 and
+ * Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
-extern int atomic64_dec_and_test(atomic64_t *ptr);
+static inline int atomic64_dec_and_test(atomic64_t *v)
+{
+ return atomic64_dec_return(v) == 0;
+}
/**
* atomic64_inc_and_test - increment and test
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically increments @ptr by 1
+ * Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
-extern int atomic64_inc_and_test(atomic64_t *ptr);
+static inline int atomic64_inc_and_test(atomic64_t *v)
+{
+ return atomic64_inc_return(v) == 0;
+}
/**
* atomic64_add_negative - add and test if negative
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr and returns true
+ * Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-extern int atomic64_add_negative(u64 delta, atomic64_t *ptr);
+static inline int atomic64_add_negative(long long i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+ unsigned low = (unsigned)u;
+ unsigned high = (unsigned)(u >> 32);
+ asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
+ : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
+ : : "memory");
+ return (int)a;
+}
+
+
+static inline int atomic64_inc_not_zero(atomic64_t *v)
+{
+ int r;
+ asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
+ : "=a" (r)
+ : "S" (v)
+ : "ecx", "edx", "memory"
+ );
+ return r;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long r;
+ asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
+ : "=A" (r)
+ : "S" (v)
+ : "ecx", "memory"
+ );
+ return r;
+}
+
+#undef ATOMIC64_ALTERNATIVE
+#undef ATOMIC64_ALTERNATIVE_
#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 51c5b4056929..4d6e2cd6c88c 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -221,4 +221,27 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long c, old, dec;
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 02b47a603fc8..545776efeb16 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -444,7 +444,9 @@ static inline int fls(int x)
#define ARCH_HAS_FAST_MULTIPLIER 1
-#include <asm-generic/bitops/hweight.h>
+#include <asm/arch_hweight.h>
+
+#include <asm-generic/bitops/const_hweight.h>
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 7a1065958ba9..3b62ab56c7a0 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -24,7 +24,7 @@
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
- (CONFIG_PHYSICAL_ALIGN < (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2))
+ (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
#error "Invalid value for CONFIG_PHYSICAL_ALIGN"
#endif
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 2f9047cfaaca..48f99f15452e 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -7,7 +7,7 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 634c40a739a6..63e35ec9075c 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -44,9 +44,6 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
memcpy(dst, src, len);
}
-#define PG_WC PG_arch_1
-PAGEFLAG(WC, WC)
-
#ifdef CONFIG_X86_PAT
/*
* X86 PAT uses page flags WC and Uncached together to keep track of
@@ -55,16 +52,24 @@ PAGEFLAG(WC, WC)
* _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
* been changed from its default (value of -1 used to denote this).
* Note we do not support _PAGE_CACHE_UC here.
- *
- * Caller must hold memtype_lock for atomicity.
*/
+
+#define _PGMT_DEFAULT 0
+#define _PGMT_WC (1UL << PG_arch_1)
+#define _PGMT_UC_MINUS (1UL << PG_uncached)
+#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
+
static inline unsigned long get_page_memtype(struct page *pg)
{
- if (!PageUncached(pg) && !PageWC(pg))
+ unsigned long pg_flags = pg->flags & _PGMT_MASK;
+
+ if (pg_flags == _PGMT_DEFAULT)
return -1;
- else if (!PageUncached(pg) && PageWC(pg))
+ else if (pg_flags == _PGMT_WC)
return _PAGE_CACHE_WC;
- else if (PageUncached(pg) && !PageWC(pg))
+ else if (pg_flags == _PGMT_UC_MINUS)
return _PAGE_CACHE_UC_MINUS;
else
return _PAGE_CACHE_WB;
@@ -72,25 +77,26 @@ static inline unsigned long get_page_memtype(struct page *pg)
static inline void set_page_memtype(struct page *pg, unsigned long memtype)
{
+ unsigned long memtype_flags = _PGMT_DEFAULT;
+ unsigned long old_flags;
+ unsigned long new_flags;
+
switch (memtype) {
case _PAGE_CACHE_WC:
- ClearPageUncached(pg);
- SetPageWC(pg);
+ memtype_flags = _PGMT_WC;
break;
case _PAGE_CACHE_UC_MINUS:
- SetPageUncached(pg);
- ClearPageWC(pg);
+ memtype_flags = _PGMT_UC_MINUS;
break;
case _PAGE_CACHE_WB:
- SetPageUncached(pg);
- SetPageWC(pg);
- break;
- default:
- case -1:
- ClearPageUncached(pg);
- ClearPageWC(pg);
+ memtype_flags = _PGMT_WB;
break;
}
+
+ do {
+ old_flags = pg->flags;
+ new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
+ } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
@@ -139,9 +145,11 @@ int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
int set_pages_array_wb(struct page **pages, int addrinarray);
/*
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index ffb9bb6b6c37..8859e12dd3cf 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -271,7 +271,8 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
- alternative_io("call cmpxchg8b_emu", \
+ alternative_io(LOCK_PREFIX_HERE \
+ "call cmpxchg8b_emu", \
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0cd82d068613..630e623f61e0 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -161,6 +161,7 @@
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
+#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
/* Virtualization flags: Linux defined */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/ds.h b/arch/x86/include/asm/ds.h
deleted file mode 100644
index 70dac199b093..000000000000
--- a/arch/x86/include/asm/ds.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Debug Store (DS) support
- *
- * This provides a low-level interface to the hardware's Debug Store
- * feature that is used for branch trace store (BTS) and
- * precise-event based sampling (PEBS).
- *
- * It manages:
- * - DS and BTS hardware configuration
- * - buffer overflow handling (to be done)
- * - buffer access
- *
- * It does not do:
- * - security checking (is the caller allowed to trace the task)
- * - buffer allocation (memory accounting)
- *
- *
- * Copyright (C) 2007-2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
- */
-
-#ifndef _ASM_X86_DS_H
-#define _ASM_X86_DS_H
-
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/err.h>
-
-
-#ifdef CONFIG_X86_DS
-
-struct task_struct;
-struct ds_context;
-struct ds_tracer;
-struct bts_tracer;
-struct pebs_tracer;
-
-typedef void (*bts_ovfl_callback_t)(struct bts_tracer *);
-typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
-
-
-/*
- * A list of features plus corresponding macros to talk about them in
- * the ds_request function's flags parameter.
- *
- * We use the enum to index an array of corresponding control bits;
- * we use the macro to index a flags bit-vector.
- */
-enum ds_feature {
- dsf_bts = 0,
- dsf_bts_kernel,
-#define BTS_KERNEL (1 << dsf_bts_kernel)
- /* trace kernel-mode branches */
-
- dsf_bts_user,
-#define BTS_USER (1 << dsf_bts_user)
- /* trace user-mode branches */
-
- dsf_bts_overflow,
- dsf_bts_max,
- dsf_pebs = dsf_bts_max,
-
- dsf_pebs_max,
- dsf_ctl_max = dsf_pebs_max,
- dsf_bts_timestamps = dsf_ctl_max,
-#define BTS_TIMESTAMPS (1 << dsf_bts_timestamps)
- /* add timestamps into BTS trace */
-
-#define BTS_USER_FLAGS (BTS_KERNEL | BTS_USER | BTS_TIMESTAMPS)
-};
-
-
-/*
- * Request BTS or PEBS
- *
- * Due to alignement constraints, the actual buffer may be slightly
- * smaller than the requested or provided buffer.
- *
- * Returns a pointer to a tracer structure on success, or
- * ERR_PTR(errcode) on failure.
- *
- * The interrupt threshold is independent from the overflow callback
- * to allow users to use their own overflow interrupt handling mechanism.
- *
- * The function might sleep.
- *
- * task: the task to request recording for
- * cpu: the cpu to request recording for
- * base: the base pointer for the (non-pageable) buffer;
- * size: the size of the provided buffer in bytes
- * ovfl: pointer to a function to be called on buffer overflow;
- * NULL if cyclic buffer requested
- * th: the interrupt threshold in records from the end of the buffer;
- * -1 if no interrupt threshold is requested.
- * flags: a bit-mask of the above flags
- */
-extern struct bts_tracer *ds_request_bts_task(struct task_struct *task,
- void *base, size_t size,
- bts_ovfl_callback_t ovfl,
- size_t th, unsigned int flags);
-extern struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
- bts_ovfl_callback_t ovfl,
- size_t th, unsigned int flags);
-extern struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
- void *base, size_t size,
- pebs_ovfl_callback_t ovfl,
- size_t th, unsigned int flags);
-extern struct pebs_tracer *ds_request_pebs_cpu(int cpu,
- void *base, size_t size,
- pebs_ovfl_callback_t ovfl,
- size_t th, unsigned int flags);
-
-/*
- * Release BTS or PEBS resources
- * Suspend and resume BTS or PEBS tracing
- *
- * Must be called with irq's enabled.
- *
- * tracer: the tracer handle returned from ds_request_~()
- */
-extern void ds_release_bts(struct bts_tracer *tracer);
-extern void ds_suspend_bts(struct bts_tracer *tracer);
-extern void ds_resume_bts(struct bts_tracer *tracer);
-extern void ds_release_pebs(struct pebs_tracer *tracer);
-extern void ds_suspend_pebs(struct pebs_tracer *tracer);
-extern void ds_resume_pebs(struct pebs_tracer *tracer);
-
-/*
- * Release BTS or PEBS resources
- * Suspend and resume BTS or PEBS tracing
- *
- * Cpu tracers must call this on the traced cpu.
- * Task tracers must call ds_release_~_noirq() for themselves.
- *
- * May be called with irq's disabled.
- *
- * Returns 0 if successful;
- * -EPERM if the cpu tracer does not trace the current cpu.
- * -EPERM if the task tracer does not trace itself.
- *
- * tracer: the tracer handle returned from ds_request_~()
- */
-extern int ds_release_bts_noirq(struct bts_tracer *tracer);
-extern int ds_suspend_bts_noirq(struct bts_tracer *tracer);
-extern int ds_resume_bts_noirq(struct bts_tracer *tracer);
-extern int ds_release_pebs_noirq(struct pebs_tracer *tracer);
-extern int ds_suspend_pebs_noirq(struct pebs_tracer *tracer);
-extern int ds_resume_pebs_noirq(struct pebs_tracer *tracer);
-
-
-/*
- * The raw DS buffer state as it is used for BTS and PEBS recording.
- *
- * This is the low-level, arch-dependent interface for working
- * directly on the raw trace data.
- */
-struct ds_trace {
- /* the number of bts/pebs records */
- size_t n;
- /* the size of a bts/pebs record in bytes */
- size_t size;
- /* pointers into the raw buffer:
- - to the first entry */
- void *begin;
- /* - one beyond the last entry */
- void *end;
- /* - one beyond the newest entry */
- void *top;
- /* - the interrupt threshold */
- void *ith;
- /* flags given on ds_request() */
- unsigned int flags;
-};
-
-/*
- * An arch-independent view on branch trace data.
- */
-enum bts_qualifier {
- bts_invalid,
-#define BTS_INVALID bts_invalid
-
- bts_branch,
-#define BTS_BRANCH bts_branch
-
- bts_task_arrives,
-#define BTS_TASK_ARRIVES bts_task_arrives
-
- bts_task_departs,
-#define BTS_TASK_DEPARTS bts_task_departs
-
- bts_qual_bit_size = 4,
- bts_qual_max = (1 << bts_qual_bit_size),
-};
-
-struct bts_struct {
- __u64 qualifier;
- union {
- /* BTS_BRANCH */
- struct {
- __u64 from;
- __u64 to;
- } lbr;
- /* BTS_TASK_ARRIVES or BTS_TASK_DEPARTS */
- struct {
- __u64 clock;
- pid_t pid;
- } event;
- } variant;
-};
-
-
-/*
- * The BTS state.
- *
- * This gives access to the raw DS state and adds functions to provide
- * an arch-independent view of the BTS data.
- */
-struct bts_trace {
- struct ds_trace ds;
-
- int (*read)(struct bts_tracer *tracer, const void *at,
- struct bts_struct *out);
- int (*write)(struct bts_tracer *tracer, const struct bts_struct *in);
-};
-
-
-/*
- * The PEBS state.
- *
- * This gives access to the raw DS state and the PEBS-specific counter
- * reset value.
- */
-struct pebs_trace {
- struct ds_trace ds;
-
- /* the number of valid counters in the below array */
- unsigned int counters;
-
-#define MAX_PEBS_COUNTERS 4
- /* the counter reset value */
- unsigned long long counter_reset[MAX_PEBS_COUNTERS];
-};
-
-
-/*
- * Read the BTS or PEBS trace.
- *
- * Returns a view on the trace collected for the parameter tracer.
- *
- * The view remains valid as long as the traced task is not running or
- * the tracer is suspended.
- * Writes into the trace buffer are not reflected.
- *
- * tracer: the tracer handle returned from ds_request_~()
- */
-extern const struct bts_trace *ds_read_bts(struct bts_tracer *tracer);
-extern const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer);
-
-
-/*
- * Reset the write pointer of the BTS/PEBS buffer.
- *
- * Returns 0 on success; -Eerrno on error
- *
- * tracer: the tracer handle returned from ds_request_~()
- */
-extern int ds_reset_bts(struct bts_tracer *tracer);
-extern int ds_reset_pebs(struct pebs_tracer *tracer);
-
-/*
- * Set the PEBS counter reset value.
- *
- * Returns 0 on success; -Eerrno on error
- *
- * tracer: the tracer handle returned from ds_request_pebs()
- * counter: the index of the counter
- * value: the new counter reset value
- */
-extern int ds_set_pebs_reset(struct pebs_tracer *tracer,
- unsigned int counter, u64 value);
-
-/*
- * Initialization
- */
-struct cpuinfo_x86;
-extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
-
-/*
- * Context switch work
- */
-extern void ds_switch_to(struct task_struct *prev, struct task_struct *next);
-
-#else /* CONFIG_X86_DS */
-
-struct cpuinfo_x86;
-static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
-static inline void ds_switch_to(struct task_struct *prev,
- struct task_struct *next) {}
-
-#endif /* CONFIG_X86_DS */
-#endif /* _ASM_X86_DS_H */
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 0e22296790d3..ec8a52d14ab1 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -45,7 +45,12 @@
#define E820_NVS 4
#define E820_UNUSABLE 5
-/* reserved RAM used by kernel itself */
+/*
+ * reserved RAM used by kernel itself
+ * if CONFIG_INTEL_TXT is enabled, memory of this type will be
+ * included in the S3 integrity calculation and so should not include
+ * any memory that BIOS might alter over the S3 transition
+ */
#define E820_RESERVED_KERN 128
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 0f8576427cfe..aeab29aee617 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
-#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
+#define inc_irq_stat(member) percpu_inc(irq_stat.member)
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 1d5c08a1bdfd..004e6e25e913 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -68,6 +68,7 @@ extern unsigned long force_hpet_address;
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;
+extern u8 hpet_readback_cmp;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 2a1bd8f4f23a..942255310e6a 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -41,12 +41,16 @@ struct arch_hw_breakpoint {
/* Total number of available HW breakpoint registers */
#define HBP_NUM 4
+static inline int hw_breakpoint_slots(int type)
+{
+ return HBP_NUM;
+}
+
struct perf_event;
struct pmu;
-extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk);
+extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
+extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
index e153a2b3889a..5df477ac3af7 100644
--- a/arch/x86/include/asm/hyperv.h
+++ b/arch/x86/include/asm/hyperv.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_X86_KVM_HYPERV_H
-#define _ASM_X86_KVM_HYPERV_H
+#ifndef _ASM_X86_HYPERV_H
+#define _ASM_X86_HYPERV_H
#include <linux/types.h>
@@ -14,6 +14,10 @@
#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
+#define HYPERV_CPUID_MIN 0x40000005
+#define HYPERV_CPUID_MAX 0x4000ffff
+
/*
* Feature identification. EAX indicates which features are available
* to the partition based upon the current partition privileges.
@@ -129,6 +133,9 @@
/* MSR used to provide vcpu index */
#define HV_X64_MSR_VP_INDEX 0x40000002
+/* MSR used to read the per-partition time reference counter */
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+
/* Define the virtual APIC registers */
#define HV_X64_MSR_EOI 0x40000070
#define HV_X64_MSR_ICR 0x40000071
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b78c0941e422..70abda7058c8 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -17,10 +17,33 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
-#ifndef ASM_X86__HYPERVISOR_H
-#define ASM_X86__HYPERVISOR_H
+#ifndef _ASM_X86_HYPERVISOR_H
+#define _ASM_X86_HYPERVISOR_H
extern void init_hypervisor(struct cpuinfo_x86 *c);
extern void init_hypervisor_platform(void);
+/*
+ * x86 hypervisor information
+ */
+struct hypervisor_x86 {
+ /* Hypervisor name */
+ const char *name;
+
+ /* Detection routine */
+ bool (*detect)(void);
+
+ /* Adjust CPU feature bits (run once per CPU) */
+ void (*set_cpu_features)(struct cpuinfo_x86 *);
+
+ /* Platform setup (run once per boot) */
+ void (*init_platform)(void);
+};
+
+extern const struct hypervisor_x86 *x86_hyper;
+
+/* Recognized hypervisors */
+extern const struct hypervisor_x86 x86_hyper_vmware;
+extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
+
#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index da2930924501..1a8cca33b736 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -16,6 +16,7 @@
#include <linux/kernel_stat.h>
#include <linux/regset.h>
#include <linux/hardirq.h>
+#include <linux/slab.h>
#include <asm/asm.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
@@ -56,6 +57,18 @@ extern int restore_i387_xstate_ia32(void __user *buf);
#define X87_FSW_ES (1 << 7) /* Exception Summary */
+static inline bool use_xsave(void)
+{
+ u8 has_xsave;
+
+ alternative_io("mov $0, %0",
+ "mov $1, %0",
+ X86_FEATURE_XSAVE,
+ "=g"(has_xsave));
+
+ return has_xsave;
+}
+
#ifdef CONFIG_X86_64
/* Ignore delayed exceptions from user space */
@@ -91,15 +104,15 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
values. The kernel data segment can be sometimes 0 and sometimes
new user value. Both should be ok.
Use the PDA as safe address because it should be already in L1. */
-static inline void clear_fpu_state(struct task_struct *tsk)
+static inline void fpu_clear(struct fpu *fpu)
{
- struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct xsave_struct *xstate = &fpu->state->xsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
/*
* xsave header may indicate the init state of the FP.
*/
- if ((task_thread_info(tsk)->status & TS_XSAVE) &&
+ if (use_xsave() &&
!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
return;
@@ -111,6 +124,11 @@ static inline void clear_fpu_state(struct task_struct *tsk)
X86_FEATURE_FXSAVE_LEAK);
}
+static inline void clear_fpu_state(struct task_struct *tsk)
+{
+ fpu_clear(&tsk->thread.fpu);
+}
+
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
int err;
@@ -135,7 +153,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
return err;
}
-static inline void fxsave(struct task_struct *tsk)
+static inline void fpu_fxsave(struct fpu *fpu)
{
/* Using "rex64; fxsave %0" is broken because, if the memory operand
uses any extended registers for addressing, a second REX prefix
@@ -145,42 +163,45 @@ static inline void fxsave(struct task_struct *tsk)
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
- : "=m" (tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave));
#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
needed for addressing (fix submitted to mainline 2005-11-21). */
__asm__ __volatile__("rex64/fxsave %0"
- : "=m" (tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave));
#else
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__("rex64/fxsave (%1)"
- : "=m" (tsk->thread.xstate->fxsave)
- : "cdaSDb" (&tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave)
+ : "cdaSDb" (&fpu->state->fxsave));
#endif
}
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline void fpu_save_init(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE)
- xsave(tsk);
+ if (use_xsave())
+ fpu_xsave(fpu);
else
- fxsave(tsk);
+ fpu_fxsave(fpu);
- clear_fpu_state(tsk);
+ fpu_clear(fpu);
+}
+
+static inline void __save_init_fpu(struct task_struct *tsk)
+{
+ fpu_save_init(&tsk->thread.fpu);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
#else /* CONFIG_X86_32 */
#ifdef CONFIG_MATH_EMULATION
-extern void finit_task(struct task_struct *tsk);
+extern void finit_soft_fpu(struct i387_soft_struct *soft);
#else
-static inline void finit_task(struct task_struct *tsk)
-{
-}
+static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif
static inline void tolerant_fwait(void)
@@ -216,13 +237,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
/*
* These must be called with preempt disabled
*/
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline void fpu_save_init(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE) {
- struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ if (use_xsave()) {
+ struct xsave_struct *xstate = &fpu->state->xsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
- xsave(tsk);
+ fpu_xsave(fpu);
/*
* xsave header may indicate the init state of the FP.
@@ -246,8 +267,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
"fxsave %[fx]\n"
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
X86_FEATURE_FXSR,
- [fx] "m" (tsk->thread.xstate->fxsave),
- [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
+ [fx] "m" (fpu->state->fxsave),
+ [fsw] "m" (fpu->state->fxsave.swd) : "memory");
clear_state:
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
@@ -259,17 +280,34 @@ clear_state:
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));
end:
+ ;
+}
+
+static inline void __save_init_fpu(struct task_struct *tsk)
+{
+ fpu_save_init(&tsk->thread.fpu);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
+
#endif /* CONFIG_X86_64 */
-static inline int restore_fpu_checking(struct task_struct *tsk)
+static inline int fpu_fxrstor_checking(struct fpu *fpu)
+{
+ return fxrstor_checking(&fpu->state->fxsave);
+}
+
+static inline int fpu_restore_checking(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE)
- return xrstor_checking(&tsk->thread.xstate->xsave);
+ if (use_xsave())
+ return fpu_xrstor_checking(fpu);
else
- return fxrstor_checking(&tsk->thread.xstate->fxsave);
+ return fpu_fxrstor_checking(fpu);
+}
+
+static inline int restore_fpu_checking(struct task_struct *tsk)
+{
+ return fpu_restore_checking(&tsk->thread.fpu);
}
/*
@@ -397,30 +435,59 @@ static inline void clear_fpu(struct task_struct *tsk)
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.xstate->fxsave.cwd;
+ return tsk->thread.fpu.state->fxsave.cwd;
} else {
- return (unsigned short)tsk->thread.xstate->fsave.cwd;
+ return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.xstate->fxsave.swd;
+ return tsk->thread.fpu.state->fxsave.swd;
} else {
- return (unsigned short)tsk->thread.xstate->fsave.swd;
+ return (unsigned short)tsk->thread.fpu.state->fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
- return tsk->thread.xstate->fxsave.mxcsr;
+ return tsk->thread.fpu.state->fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
}
+static bool fpu_allocated(struct fpu *fpu)
+{
+ return fpu->state != NULL;
+}
+
+static inline int fpu_alloc(struct fpu *fpu)
+{
+ if (fpu_allocated(fpu))
+ return 0;
+ fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
+ if (!fpu->state)
+ return -ENOMEM;
+ WARN_ON((unsigned long)fpu->state & 15);
+ return 0;
+}
+
+static inline void fpu_free(struct fpu *fpu)
+{
+ if (fpu->state) {
+ kmem_cache_free(task_xstate_cachep, fpu->state);
+ fpu->state = NULL;
+ }
+}
+
+static inline void fpu_copy(struct fpu *dst, struct fpu *src)
+{
+ memcpy(dst->state, src->state, xstate_size);
+}
+
#endif /* __ASSEMBLY__ */
#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
index 1edbf89680fd..fc1f579fb965 100644
--- a/arch/x86/include/asm/i8253.h
+++ b/arch/x86/include/asm/i8253.h
@@ -6,7 +6,7 @@
#define PIT_CH0 0x40
#define PIT_CH2 0x42
-extern spinlock_t i8253_lock;
+extern raw_spinlock_t i8253_lock;
extern struct clock_event_device *global_clock_event;
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 96c2e0ad04ca..88c765e16410 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -68,6 +68,8 @@ struct insn {
const insn_byte_t *next_byte;
};
+#define MAX_INSN_SIZE 16
+
#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 14cf526091f9..280bf7fb6aba 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -7,7 +7,66 @@
#ifdef __ASSEMBLY__
+#define REG_NUM_INVALID 100
+
+#define REG_TYPE_R64 0
+#define REG_TYPE_XMM 1
+#define REG_TYPE_INVALID 100
+
+ .macro R64_NUM opd r64
+ \opd = REG_NUM_INVALID
+ .ifc \r64,%rax
+ \opd = 0
+ .endif
+ .ifc \r64,%rcx
+ \opd = 1
+ .endif
+ .ifc \r64,%rdx
+ \opd = 2
+ .endif
+ .ifc \r64,%rbx
+ \opd = 3
+ .endif
+ .ifc \r64,%rsp
+ \opd = 4
+ .endif
+ .ifc \r64,%rbp
+ \opd = 5
+ .endif
+ .ifc \r64,%rsi
+ \opd = 6
+ .endif
+ .ifc \r64,%rdi
+ \opd = 7
+ .endif
+ .ifc \r64,%r8
+ \opd = 8
+ .endif
+ .ifc \r64,%r9
+ \opd = 9
+ .endif
+ .ifc \r64,%r10
+ \opd = 10
+ .endif
+ .ifc \r64,%r11
+ \opd = 11
+ .endif
+ .ifc \r64,%r12
+ \opd = 12
+ .endif
+ .ifc \r64,%r13
+ \opd = 13
+ .endif
+ .ifc \r64,%r14
+ \opd = 14
+ .endif
+ .ifc \r64,%r15
+ \opd = 15
+ .endif
+ .endm
+
.macro XMM_NUM opd xmm
+ \opd = REG_NUM_INVALID
.ifc \xmm,%xmm0
\opd = 0
.endif
@@ -58,13 +117,25 @@
.endif
.endm
+ .macro REG_TYPE type reg
+ R64_NUM reg_type_r64 \reg
+ XMM_NUM reg_type_xmm \reg
+ .if reg_type_r64 <> REG_NUM_INVALID
+ \type = REG_TYPE_R64
+ .elseif reg_type_xmm <> REG_NUM_INVALID
+ \type = REG_TYPE_XMM
+ .else
+ \type = REG_TYPE_INVALID
+ .endif
+ .endm
+
.macro PFX_OPD_SIZE
.byte 0x66
.endm
- .macro PFX_REX opd1 opd2
- .if (\opd1 | \opd2) & 8
- .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1)
+ .macro PFX_REX opd1 opd2 W=0
+ .if ((\opd1 | \opd2) & 8) || \W
+ .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
.endif
.endm
@@ -145,6 +216,25 @@
.byte 0x0f, 0x38, 0xdf
MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
.endm
+
+ .macro MOVQ_R64_XMM opd1 opd2
+ REG_TYPE movq_r64_xmm_opd1_type \opd1
+ .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+ XMM_NUM movq_r64_xmm_opd1 \opd1
+ R64_NUM movq_r64_xmm_opd2 \opd2
+ .else
+ R64_NUM movq_r64_xmm_opd1 \opd1
+ XMM_NUM movq_r64_xmm_opd2 \opd2
+ .endif
+ PFX_OPD_SIZE
+ PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
+ .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+ .byte 0x0f, 0x7e
+ .else
+ .byte 0x0f, 0x6e
+ .endif
+ MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
+ .endm
#endif
#endif
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 35832a03a515..63cb4096c3dc 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -159,7 +159,6 @@ struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
void setup_IO_APIC_irq_extra(u32 gsi);
-extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
@@ -180,12 +179,13 @@ extern void ioapic_write_entry(int apic, int pin,
extern void setup_ioapic_ids_from_mpc(void);
struct mp_ioapic_gsi{
- int gsi_base;
- int gsi_end;
+ u32 gsi_base;
+ u32 gsi_end;
};
extern struct mp_ioapic_gsi mp_gsi_routing[];
-int mp_find_ioapic(int gsi);
-int mp_find_ioapic_pin(int ioapic, int gsi);
+extern u32 gsi_end;
+int mp_find_ioapic(u32 gsi);
+int mp_find_ioapic_pin(int ioapic, u32 gsi);
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
extern void __init pre_init_apic_IRQ0(void);
@@ -197,7 +197,8 @@ static const int timer_through_8259 = 0;
static inline void ioapic_init_mappings(void) { }
static inline void ioapic_insert_resources(void) { }
static inline void probe_nr_irqs_gsi(void) { }
-static inline int mp_find_ioapic(int gsi) { return 0; }
+#define gsi_end (NR_IRQS_LEGACY - 1)
+static inline int mp_find_ioapic(u32 gsi) { return 0; }
struct io_apic_irq_attr;
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
index f70e60071fe8..af00bd1d2089 100644
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/k8.h
@@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int k8_scan_nodes(void);
#ifdef CONFIG_K8_NB
+extern int num_k8_northbridges;
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
}
+
#else
+#define num_k8_northbridges 0
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return NULL;
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index e6c6c808489f..006da3687cdc 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -76,4 +76,7 @@ static inline void arch_kgdb_breakpoint(void)
#define BREAK_INSTR_SIZE 1
#define CACHE_FLUSH_IS_SAFE 1
+extern int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
+
#endif /* _ASM_X86_KGDB_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4ffa345a8ccb..547882539157 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
+#include <asm/insn.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
@@ -36,7 +37,6 @@ typedef u8 kprobe_opcode_t;
#define RELATIVEJUMP_SIZE 5
#define RELATIVECALL_OPCODE 0xe8
#define RELATIVE_ADDR_SIZE 4
-#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) \
(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index f46b79f6c16c..ff90055c7f0b 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -21,6 +21,7 @@
#define __KVM_HAVE_PIT_STATE2
#define __KVM_HAVE_XEN_HVM
#define __KVM_HAVE_VCPU_EVENTS
+#define __KVM_HAVE_DEBUGREGS
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
@@ -257,6 +258,11 @@ struct kvm_reinject_control {
/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
+#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
+
+/* Interrupt shadow states */
+#define KVM_X86_SHADOW_INT_MOV_SS 0x01
+#define KVM_X86_SHADOW_INT_STI 0x02
/* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events {
@@ -271,7 +277,7 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 soft;
- __u8 pad;
+ __u8 shadow;
} interrupt;
struct {
__u8 injected;
@@ -284,4 +290,13 @@ struct kvm_vcpu_events {
__u32 reserved[10];
};
+/* for KVM_GET/SET_DEBUGREGS */
+struct kvm_debugregs {
+ __u64 db[4];
+ __u64 dr6;
+ __u64 dr7;
+ __u64 flags;
+ __u64 reserved[9];
+};
+
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7a6f54fa13ba..0b2729bf2070 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -11,6 +11,8 @@
#ifndef _ASM_X86_KVM_X86_EMULATE_H
#define _ASM_X86_KVM_X86_EMULATE_H
+#include <asm/desc_defs.h>
+
struct x86_emulate_ctxt;
/*
@@ -63,6 +65,15 @@ struct x86_emulate_ops {
unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
/*
+ * write_std: Write bytes of standard (non-emulated/special) memory.
+ * Used for descriptor writing.
+ * @addr: [IN ] Linear address to which to write.
+ * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to write to memory.
+ */
+ int (*write_std)(unsigned long addr, void *val,
+ unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+ /*
* fetch: Read bytes of standard (non-emulated/special) memory.
* Used for instruction fetch.
* @addr: [IN ] Linear address from which to read.
@@ -109,6 +120,23 @@ struct x86_emulate_ops {
unsigned int bytes,
struct kvm_vcpu *vcpu);
+ int (*pio_in_emulated)(int size, unsigned short port, void *val,
+ unsigned int count, struct kvm_vcpu *vcpu);
+
+ int (*pio_out_emulated)(int size, unsigned short port, const void *val,
+ unsigned int count, struct kvm_vcpu *vcpu);
+
+ bool (*get_cached_descriptor)(struct desc_struct *desc,
+ int seg, struct kvm_vcpu *vcpu);
+ void (*set_cached_descriptor)(struct desc_struct *desc,
+ int seg, struct kvm_vcpu *vcpu);
+ u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
+ void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
+ void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
+ ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
+ void (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
+ int (*cpl)(struct kvm_vcpu *vcpu);
+ void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
};
/* Type, address-of, and value of an instruction's operand. */
@@ -124,6 +152,12 @@ struct fetch_cache {
unsigned long end;
};
+struct read_cache {
+ u8 data[1024];
+ unsigned long pos;
+ unsigned long end;
+};
+
struct decode_cache {
u8 twobyte;
u8 b;
@@ -139,7 +173,7 @@ struct decode_cache {
u8 seg_override;
unsigned int d;
unsigned long regs[NR_VCPU_REGS];
- unsigned long eip, eip_orig;
+ unsigned long eip;
/* modrm */
u8 modrm;
u8 modrm_mod;
@@ -151,16 +185,15 @@ struct decode_cache {
void *modrm_ptr;
unsigned long modrm_val;
struct fetch_cache fetch;
+ struct read_cache io_read;
};
-#define X86_SHADOW_INT_MOV_SS 1
-#define X86_SHADOW_INT_STI 2
-
struct x86_emulate_ctxt {
/* Register state before/after emulation. */
struct kvm_vcpu *vcpu;
unsigned long eflags;
+ unsigned long eip; /* eip before instruction emulation */
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
int mode;
u32 cs_base;
@@ -168,6 +201,7 @@ struct x86_emulate_ctxt {
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
+ bool restart; /* restart string instruction after writeback */
/* decode cache */
struct decode_cache decode;
};
@@ -194,5 +228,9 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops);
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops);
+int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code);
#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 06d9e79ca37d..3f0007b076da 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -171,14 +171,13 @@ struct kvm_pte_chain {
union kvm_mmu_page_role {
unsigned word;
struct {
- unsigned glevels:4;
unsigned level:4;
+ unsigned cr4_pae:1;
unsigned quadrant:2;
unsigned pad_for_nice_hex_output:6;
unsigned direct:1;
unsigned access:3;
unsigned invalid:1;
- unsigned cr4_pge:1;
unsigned nxe:1;
};
};
@@ -187,8 +186,6 @@ struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
- struct list_head oos_link;
-
/*
* The following two entries are used to key the shadow page in the
* hash table.
@@ -204,9 +201,9 @@ struct kvm_mmu_page {
* in this shadow page.
*/
DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
- int multimapped; /* More than one parent_pte? */
- int root_count; /* Currently serving as active root */
+ bool multimapped; /* More than one parent_pte? */
bool unsync;
+ int root_count; /* Currently serving as active root */
unsigned int unsync_children;
union {
u64 *parent_pte; /* !multimapped */
@@ -224,14 +221,9 @@ struct kvm_pv_mmu_op_buffer {
struct kvm_pio_request {
unsigned long count;
- int cur_count;
- gva_t guest_gva;
int in;
int port;
int size;
- int string;
- int down;
- int rep;
};
/*
@@ -320,6 +312,7 @@ struct kvm_vcpu_arch {
struct kvm_queued_exception {
bool pending;
bool has_error_code;
+ bool reinject;
u8 nr;
u32 error_code;
} exception;
@@ -362,8 +355,8 @@ struct kvm_vcpu_arch {
u64 *mce_banks;
/* used for guest single stepping over the given code position */
- u16 singlestep_cs;
unsigned long singlestep_rip;
+
/* fields used by HYPER-V emulation */
u64 hv_vapic;
};
@@ -389,6 +382,7 @@ struct kvm_arch {
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_alloc_mmu_pages;
+ atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
@@ -461,11 +455,6 @@ struct kvm_vcpu_stat {
u32 nmi_injections;
};
-struct descriptor_table {
- u16 limit;
- unsigned long base;
-} __attribute__((packed));
-
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
@@ -503,12 +492,11 @@ struct kvm_x86_ops {
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
- void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
- int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
+ void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -527,7 +515,8 @@ struct kvm_x86_ops {
void (*set_irq)(struct kvm_vcpu *vcpu);
void (*set_nmi)(struct kvm_vcpu *vcpu);
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code);
+ bool has_error_code, u32 error_code,
+ bool reinject);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
@@ -541,6 +530,8 @@ struct kvm_x86_ops {
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
+ void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
+
const struct trace_print_flags *exit_reasons_str;
};
@@ -587,23 +578,14 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
-void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
- unsigned long *rflags);
-unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
-void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
- unsigned long *rflags);
void kvm_enable_efer_bits(u64);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
struct x86_emulate_ctxt;
-int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in,
- int size, unsigned port);
-int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
- int size, unsigned long count, int down,
- gva_t address, int rep, unsigned port);
+int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
@@ -616,12 +598,15 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code);
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -634,6 +619,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
+void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
@@ -649,8 +636,6 @@ int emulator_write_emulated(unsigned long addr,
unsigned int bytes,
struct kvm_vcpu *vcpu);
-unsigned long segment_base(u16 selector);
-
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes,
@@ -675,7 +660,6 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
-int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
bool kvm_check_iopl(struct kvm_vcpu *vcpu);
@@ -724,23 +708,6 @@ static inline void kvm_load_ldt(u16 sel)
asm("lldt %0" : : "rm"(sel));
}
-static inline void kvm_get_idt(struct descriptor_table *table)
-{
- asm("sidt %0" : "=m"(*table));
-}
-
-static inline void kvm_get_gdt(struct descriptor_table *table)
-{
- asm("sgdt %0" : "=m"(*table));
-}
-
-static inline unsigned long kvm_read_tr_base(void)
-{
- u16 tr;
- asm("str %0" : "=g"(tr));
- return segment_base(tr);
-}
-
#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
@@ -826,4 +793,6 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_define_shared_msr(unsigned index, u32 msr);
void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 6c3fdd631ed3..f32a4301c4d4 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -225,5 +225,13 @@ extern void mcheck_intel_therm_init(void);
static inline void mcheck_intel_therm_init(void) { }
#endif
+/*
+ * Used by APEI to report memory error via /dev/mcelog
+ */
+
+struct cper_sec_mem_err;
+extern void apei_mce_report_mem_error(int corrected,
+ struct cper_sec_mem_err *mem_err);
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index d8bf23a88d05..c82868e9f905 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -105,16 +105,6 @@ extern void mp_config_acpi_legacy_irqs(void);
struct device;
extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
int active_high_low);
-extern int acpi_probe_gsi(void);
-#ifdef CONFIG_X86_IO_APIC
-extern int mp_find_ioapic(int gsi);
-extern int mp_find_ioapic_pin(int ioapic, int gsi);
-#endif
-#else /* !CONFIG_ACPI: */
-static inline int acpi_probe_gsi(void)
-{
- return 0;
-}
#endif /* CONFIG_ACPI */
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
new file mode 100644
index 000000000000..79ce5685ab64
--- /dev/null
+++ b/arch/x86/include/asm/mshyperv.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_MSHYPER_H
+#define _ASM_X86_MSHYPER_H
+
+#include <linux/types.h>
+#include <asm/hyperv.h>
+
+struct ms_hyperv_info {
+ u32 features;
+ u32 hints;
+};
+
+extern struct ms_hyperv_info ms_hyperv;
+
+#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4604e6a54d36..bc473acfa7f9 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -71,11 +71,14 @@
#define MSR_IA32_LASTINTTOIP 0x000001de
/* DEBUGCTLMSR bits (others vary by model): */
-#define _DEBUGCTLMSR_LBR 0 /* last branch recording */
-#define _DEBUGCTLMSR_BTF 1 /* single-step on branches */
-
-#define DEBUGCTLMSR_LBR (1UL << _DEBUGCTLMSR_LBR)
-#define DEBUGCTLMSR_BTF (1UL << _DEBUGCTLMSR_BTF)
+#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
+#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
+#define DEBUGCTLMSR_TR (1UL << 6)
+#define DEBUGCTLMSR_BTS (1UL << 7)
+#define DEBUGCTLMSR_BTINT (1UL << 8)
+#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
+#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
+#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
@@ -359,6 +362,8 @@
#define MSR_P4_U2L_ESCR0 0x000003b0
#define MSR_P4_U2L_ESCR1 0x000003b1
+#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
+
/* Intel Core-based CPU performance counters */
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 93da9c3f3341..5b41b0feb6db 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -17,7 +17,9 @@ int do_nmi_callback(struct pt_regs *regs, int cpu);
extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
extern int check_nmi_watchdog(void);
+#if !defined(CONFIG_NMI_WATCHDOG)
extern int nmi_watchdog_enabled;
+#endif
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 1a0422348d6d..cd2a31dc5fb8 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -53,6 +53,8 @@ extern int pcibios_last_bus;
extern struct pci_bus *pci_root_bus;
extern struct pci_ops pci_root_ops;
+void pcibios_scan_specific_bus(int busn);
+
/* pci-irq.c */
struct irq_info {
@@ -83,7 +85,7 @@ struct irq_routing_table {
extern unsigned int pcibios_irq_mask;
-extern spinlock_t pci_config_lock;
+extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 66a272dfd8b8..0797e748d280 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -105,7 +105,7 @@ do { \
/*
* Generate a percpu add to memory instruction and optimize code
- * if a one is added or subtracted.
+ * if one is added or subtracted.
*/
#define percpu_add_op(var, val) \
do { \
@@ -190,6 +190,29 @@ do { \
pfo_ret__; \
})
+#define percpu_unary_op(op, var) \
+({ \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+})
+
/*
* percpu_read() makes gcc load the percpu variable every time it is
* accessed while percpu_read_stable() allows the value to be cached.
@@ -207,6 +230,7 @@ do { \
#define percpu_and(var, val) percpu_to_op("and", var, val)
#define percpu_or(var, val) percpu_to_op("or", var, val)
#define percpu_xor(var, val) percpu_to_op("xor", var, val)
+#define percpu_inc(var) percpu_unary_op("inc", var)
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index db6109a885a7..254883d0c7e0 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -5,7 +5,7 @@
* Performance event hw details:
*/
-#define X86_PMC_MAX_GENERIC 8
+#define X86_PMC_MAX_GENERIC 32
#define X86_PMC_MAX_FIXED 3
#define X86_PMC_IDX_GENERIC 0
@@ -18,39 +18,31 @@
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
-#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
-#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
-#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
-#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
-#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
-
-/*
- * Includes eventsel and unit mask as well:
- */
-
-
-#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
-#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
-#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
-#define INTEL_ARCH_INV_MASK 0x00800000ULL
-#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
-#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
-
-/*
- * filter mask to validate fixed counter events.
- * the following filters disqualify for fixed counters:
- * - inv
- * - edge
- * - cnt-mask
- * The other filters are supported by fixed counters.
- * The any-thread option is supported starting with v3.
- */
-#define INTEL_ARCH_FIXED_MASK \
- (INTEL_ARCH_CNT_MASK| \
- INTEL_ARCH_INV_MASK| \
- INTEL_ARCH_EDGE_MASK|\
- INTEL_ARCH_UNIT_MASK|\
- INTEL_ARCH_EVENT_MASK)
+#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
+#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
+#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
+#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
+#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
+#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
+#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
+#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
+#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
+#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
+
+#define AMD64_EVENTSEL_EVENT \
+ (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
+#define INTEL_ARCH_EVENT_MASK \
+ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
+
+#define X86_RAW_EVENT_MASK \
+ (ARCH_PERFMON_EVENTSEL_EVENT | \
+ ARCH_PERFMON_EVENTSEL_UMASK | \
+ ARCH_PERFMON_EVENTSEL_EDGE | \
+ ARCH_PERFMON_EVENTSEL_INV | \
+ ARCH_PERFMON_EVENTSEL_CMASK)
+#define AMD64_RAW_EVENT_MASK \
+ (X86_RAW_EVENT_MASK | \
+ AMD64_EVENTSEL_EVENT)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -67,7 +59,7 @@
union cpuid10_eax {
struct {
unsigned int version_id:8;
- unsigned int num_events:8;
+ unsigned int num_counters:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
@@ -76,7 +68,7 @@ union cpuid10_eax {
union cpuid10_edx {
struct {
- unsigned int num_events_fixed:4;
+ unsigned int num_counters_fixed:4;
unsigned int reserved:28;
} split;
unsigned int full;
@@ -136,6 +128,18 @@ extern void perf_events_lapic_init(void);
#define PERF_EVENT_INDEX_OFFSET 0
+/*
+ * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
+ * This flag is otherwise unused and ABI specified to be 0, so nobody should
+ * care what we do with it.
+ */
+#define PERF_EFLAGS_EXACT (1UL << 3)
+
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+
#else
static inline void init_hw_perf_events(void) { }
static inline void perf_events_lapic_init(void) { }
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
new file mode 100644
index 000000000000..b05400a542ff
--- /dev/null
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -0,0 +1,794 @@
+/*
+ * Netburst Perfomance Events (P4, old Xeon)
+ */
+
+#ifndef PERF_EVENT_P4_H
+#define PERF_EVENT_P4_H
+
+#include <linux/cpu.h>
+#include <linux/bitops.h>
+
+/*
+ * NetBurst has perfomance MSRs shared between
+ * threads if HT is turned on, ie for both logical
+ * processors (mem: in turn in Atom with HT support
+ * perf-MSRs are not shared and every thread has its
+ * own perf-MSRs set)
+ */
+#define ARCH_P4_TOTAL_ESCR (46)
+#define ARCH_P4_RESERVED_ESCR (2) /* IQ_ESCR(0,1) not always present */
+#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
+#define ARCH_P4_MAX_CCCR (18)
+#define ARCH_P4_MAX_COUNTER (ARCH_P4_MAX_CCCR / 2)
+
+#define P4_ESCR_EVENT_MASK 0x7e000000U
+#define P4_ESCR_EVENT_SHIFT 25
+#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
+#define P4_ESCR_EVENTMASK_SHIFT 9
+#define P4_ESCR_TAG_MASK 0x000001e0U
+#define P4_ESCR_TAG_SHIFT 5
+#define P4_ESCR_TAG_ENABLE 0x00000010U
+#define P4_ESCR_T0_OS 0x00000008U
+#define P4_ESCR_T0_USR 0x00000004U
+#define P4_ESCR_T1_OS 0x00000002U
+#define P4_ESCR_T1_USR 0x00000001U
+
+#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
+#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
+#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
+
+/* Non HT mask */
+#define P4_ESCR_MASK \
+ (P4_ESCR_EVENT_MASK | \
+ P4_ESCR_EVENTMASK_MASK | \
+ P4_ESCR_TAG_MASK | \
+ P4_ESCR_TAG_ENABLE | \
+ P4_ESCR_T0_OS | \
+ P4_ESCR_T0_USR)
+
+/* HT mask */
+#define P4_ESCR_MASK_HT \
+ (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
+
+#define P4_CCCR_OVF 0x80000000U
+#define P4_CCCR_CASCADE 0x40000000U
+#define P4_CCCR_OVF_PMI_T0 0x04000000U
+#define P4_CCCR_OVF_PMI_T1 0x08000000U
+#define P4_CCCR_FORCE_OVF 0x02000000U
+#define P4_CCCR_EDGE 0x01000000U
+#define P4_CCCR_THRESHOLD_MASK 0x00f00000U
+#define P4_CCCR_THRESHOLD_SHIFT 20
+#define P4_CCCR_COMPLEMENT 0x00080000U
+#define P4_CCCR_COMPARE 0x00040000U
+#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U
+#define P4_CCCR_ESCR_SELECT_SHIFT 13
+#define P4_CCCR_ENABLE 0x00001000U
+#define P4_CCCR_THREAD_SINGLE 0x00010000U
+#define P4_CCCR_THREAD_BOTH 0x00020000U
+#define P4_CCCR_THREAD_ANY 0x00030000U
+#define P4_CCCR_RESERVED 0x00000fffU
+
+#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
+#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
+
+/* Custom bits in reerved CCCR area */
+#define P4_CCCR_CACHE_OPS_MASK 0x0000003fU
+
+
+/* Non HT mask */
+#define P4_CCCR_MASK \
+ (P4_CCCR_OVF | \
+ P4_CCCR_CASCADE | \
+ P4_CCCR_OVF_PMI_T0 | \
+ P4_CCCR_FORCE_OVF | \
+ P4_CCCR_EDGE | \
+ P4_CCCR_THRESHOLD_MASK | \
+ P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE | \
+ P4_CCCR_ESCR_SELECT_MASK | \
+ P4_CCCR_ENABLE)
+
+/* HT mask */
+#define P4_CCCR_MASK_HT (P4_CCCR_MASK | P4_CCCR_THREAD_ANY)
+
+#define P4_GEN_ESCR_EMASK(class, name, bit) \
+ class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
+#define P4_ESCR_EMASK_BIT(class, name) class##__##name
+
+/*
+ * config field is 64bit width and consists of
+ * HT << 63 | ESCR << 32 | CCCR
+ * where HT is HyperThreading bit (since ESCR
+ * has it reserved we may use it for own purpose)
+ *
+ * note that this is NOT the addresses of respective
+ * ESCR and CCCR but rather an only packed value should
+ * be unpacked and written to a proper addresses
+ *
+ * the base idea is to pack as much info as
+ * possible
+ */
+#define p4_config_pack_escr(v) (((u64)(v)) << 32)
+#define p4_config_pack_cccr(v) (((u64)(v)) & 0xffffffffULL)
+#define p4_config_unpack_escr(v) (((u64)(v)) >> 32)
+#define p4_config_unpack_cccr(v) (((u64)(v)) & 0xffffffffULL)
+
+#define p4_config_unpack_emask(v) \
+ ({ \
+ u32 t = p4_config_unpack_escr((v)); \
+ t = t & P4_ESCR_EVENTMASK_MASK; \
+ t = t >> P4_ESCR_EVENTMASK_SHIFT; \
+ t; \
+ })
+
+#define p4_config_unpack_event(v) \
+ ({ \
+ u32 t = p4_config_unpack_escr((v)); \
+ t = t & P4_ESCR_EVENT_MASK; \
+ t = t >> P4_ESCR_EVENT_SHIFT; \
+ t; \
+ })
+
+#define p4_config_unpack_cache_event(v) (((u64)(v)) & P4_CCCR_CACHE_OPS_MASK)
+
+#define P4_CONFIG_HT_SHIFT 63
+#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
+
+static inline bool p4_is_event_cascaded(u64 config)
+{
+ u32 cccr = p4_config_unpack_cccr(config);
+ return !!(cccr & P4_CCCR_CASCADE);
+}
+
+static inline int p4_ht_config_thread(u64 config)
+{
+ return !!(config & P4_CONFIG_HT);
+}
+
+static inline u64 p4_set_ht_bit(u64 config)
+{
+ return config | P4_CONFIG_HT;
+}
+
+static inline u64 p4_clear_ht_bit(u64 config)
+{
+ return config & ~P4_CONFIG_HT;
+}
+
+static inline int p4_ht_active(void)
+{
+#ifdef CONFIG_SMP
+ return smp_num_siblings > 1;
+#endif
+ return 0;
+}
+
+static inline int p4_ht_thread(int cpu)
+{
+#ifdef CONFIG_SMP
+ if (smp_num_siblings == 2)
+ return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
+#endif
+ return 0;
+}
+
+static inline int p4_should_swap_ts(u64 config, int cpu)
+{
+ return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
+}
+
+static inline u32 p4_default_cccr_conf(int cpu)
+{
+ /*
+ * Note that P4_CCCR_THREAD_ANY is "required" on
+ * non-HT machines (on HT machines we count TS events
+ * regardless the state of second logical processor
+ */
+ u32 cccr = P4_CCCR_THREAD_ANY;
+
+ if (!p4_ht_thread(cpu))
+ cccr |= P4_CCCR_OVF_PMI_T0;
+ else
+ cccr |= P4_CCCR_OVF_PMI_T1;
+
+ return cccr;
+}
+
+static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
+{
+ u32 escr = 0;
+
+ if (!p4_ht_thread(cpu)) {
+ if (!exclude_os)
+ escr |= P4_ESCR_T0_OS;
+ if (!exclude_usr)
+ escr |= P4_ESCR_T0_USR;
+ } else {
+ if (!exclude_os)
+ escr |= P4_ESCR_T1_OS;
+ if (!exclude_usr)
+ escr |= P4_ESCR_T1_USR;
+ }
+
+ return escr;
+}
+
+enum P4_EVENTS {
+ P4_EVENT_TC_DELIVER_MODE,
+ P4_EVENT_BPU_FETCH_REQUEST,
+ P4_EVENT_ITLB_REFERENCE,
+ P4_EVENT_MEMORY_CANCEL,
+ P4_EVENT_MEMORY_COMPLETE,
+ P4_EVENT_LOAD_PORT_REPLAY,
+ P4_EVENT_STORE_PORT_REPLAY,
+ P4_EVENT_MOB_LOAD_REPLAY,
+ P4_EVENT_PAGE_WALK_TYPE,
+ P4_EVENT_BSQ_CACHE_REFERENCE,
+ P4_EVENT_IOQ_ALLOCATION,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES,
+ P4_EVENT_FSB_DATA_ACTIVITY,
+ P4_EVENT_BSQ_ALLOCATION,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES,
+ P4_EVENT_SSE_INPUT_ASSIST,
+ P4_EVENT_PACKED_SP_UOP,
+ P4_EVENT_PACKED_DP_UOP,
+ P4_EVENT_SCALAR_SP_UOP,
+ P4_EVENT_SCALAR_DP_UOP,
+ P4_EVENT_64BIT_MMX_UOP,
+ P4_EVENT_128BIT_MMX_UOP,
+ P4_EVENT_X87_FP_UOP,
+ P4_EVENT_TC_MISC,
+ P4_EVENT_GLOBAL_POWER_EVENTS,
+ P4_EVENT_TC_MS_XFER,
+ P4_EVENT_UOP_QUEUE_WRITES,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE,
+ P4_EVENT_RETIRED_BRANCH_TYPE,
+ P4_EVENT_RESOURCE_STALL,
+ P4_EVENT_WC_BUFFER,
+ P4_EVENT_B2B_CYCLES,
+ P4_EVENT_BNR,
+ P4_EVENT_SNOOP,
+ P4_EVENT_RESPONSE,
+ P4_EVENT_FRONT_END_EVENT,
+ P4_EVENT_EXECUTION_EVENT,
+ P4_EVENT_REPLAY_EVENT,
+ P4_EVENT_INSTR_RETIRED,
+ P4_EVENT_UOPS_RETIRED,
+ P4_EVENT_UOP_TYPE,
+ P4_EVENT_BRANCH_RETIRED,
+ P4_EVENT_MISPRED_BRANCH_RETIRED,
+ P4_EVENT_X87_ASSIST,
+ P4_EVENT_MACHINE_CLEAR,
+ P4_EVENT_INSTR_COMPLETED,
+};
+
+#define P4_OPCODE(event) event##_OPCODE
+#define P4_OPCODE_ESEL(opcode) ((opcode & 0x00ff) >> 0)
+#define P4_OPCODE_EVNT(opcode) ((opcode & 0xff00) >> 8)
+#define P4_OPCODE_PACK(event, sel) (((event) << 8) | sel)
+
+/*
+ * Comments below the event represent ESCR restriction
+ * for this event and counter index per ESCR
+ *
+ * MSR_P4_IQ_ESCR0 and MSR_P4_IQ_ESCR1 are available only on early
+ * processor builds (family 0FH, models 01H-02H). These MSRs
+ * are not available on later versions, so that we don't use
+ * them completely
+ *
+ * Also note that CCCR1 do not have P4_CCCR_ENABLE bit properly
+ * working so that we should not use this CCCR and respective
+ * counter as result
+ */
+enum P4_EVENT_OPCODES {
+ P4_OPCODE(P4_EVENT_TC_DELIVER_MODE) = P4_OPCODE_PACK(0x01, 0x01),
+ /*
+ * MSR_P4_TC_ESCR0: 4, 5
+ * MSR_P4_TC_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST) = P4_OPCODE_PACK(0x03, 0x00),
+ /*
+ * MSR_P4_BPU_ESCR0: 0, 1
+ * MSR_P4_BPU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_ITLB_REFERENCE) = P4_OPCODE_PACK(0x18, 0x03),
+ /*
+ * MSR_P4_ITLB_ESCR0: 0, 1
+ * MSR_P4_ITLB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_MEMORY_CANCEL) = P4_OPCODE_PACK(0x02, 0x05),
+ /*
+ * MSR_P4_DAC_ESCR0: 8, 9
+ * MSR_P4_DAC_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_MEMORY_COMPLETE) = P4_OPCODE_PACK(0x08, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY) = P4_OPCODE_PACK(0x04, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY) = P4_OPCODE_PACK(0x05, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY) = P4_OPCODE_PACK(0x03, 0x02),
+ /*
+ * MSR_P4_MOB_ESCR0: 0, 1
+ * MSR_P4_MOB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE) = P4_OPCODE_PACK(0x01, 0x04),
+ /*
+ * MSR_P4_PMH_ESCR0: 0, 1
+ * MSR_P4_PMH_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE) = P4_OPCODE_PACK(0x0c, 0x07),
+ /*
+ * MSR_P4_BSU_ESCR0: 0, 1
+ * MSR_P4_BSU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_IOQ_ALLOCATION) = P4_OPCODE_PACK(0x03, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x1a, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY) = P4_OPCODE_PACK(0x17, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_ALLOCATION) = P4_OPCODE_PACK(0x05, 0x07),
+ /*
+ * MSR_P4_BSU_ESCR0: 0, 1
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x06, 0x07),
+ /*
+ * NOTE: no ESCR name in docs, it's guessed
+ * MSR_P4_BSU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST) = P4_OPCODE_PACK(0x34, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_PACKED_SP_UOP) = P4_OPCODE_PACK(0x08, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_PACKED_DP_UOP) = P4_OPCODE_PACK(0x0c, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_SCALAR_SP_UOP) = P4_OPCODE_PACK(0x0a, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_SCALAR_DP_UOP) = P4_OPCODE_PACK(0x0e, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_64BIT_MMX_UOP) = P4_OPCODE_PACK(0x02, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_128BIT_MMX_UOP) = P4_OPCODE_PACK(0x1a, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_X87_FP_UOP) = P4_OPCODE_PACK(0x04, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_TC_MISC) = P4_OPCODE_PACK(0x06, 0x01),
+ /*
+ * MSR_P4_TC_ESCR0: 4, 5
+ * MSR_P4_TC_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS) = P4_OPCODE_PACK(0x13, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_TC_MS_XFER) = P4_OPCODE_PACK(0x05, 0x00),
+ /*
+ * MSR_P4_MS_ESCR0: 4, 5
+ * MSR_P4_MS_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES) = P4_OPCODE_PACK(0x09, 0x00),
+ /*
+ * MSR_P4_MS_ESCR0: 4, 5
+ * MSR_P4_MS_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x05, 0x02),
+ /*
+ * MSR_P4_TBPU_ESCR0: 4, 5
+ * MSR_P4_TBPU_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x04, 0x02),
+ /*
+ * MSR_P4_TBPU_ESCR0: 4, 5
+ * MSR_P4_TBPU_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RESOURCE_STALL) = P4_OPCODE_PACK(0x01, 0x01),
+ /*
+ * MSR_P4_ALF_ESCR0: 12, 13, 16
+ * MSR_P4_ALF_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_WC_BUFFER) = P4_OPCODE_PACK(0x05, 0x05),
+ /*
+ * MSR_P4_DAC_ESCR0: 8, 9
+ * MSR_P4_DAC_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_B2B_CYCLES) = P4_OPCODE_PACK(0x16, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BNR) = P4_OPCODE_PACK(0x08, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_SNOOP) = P4_OPCODE_PACK(0x06, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_RESPONSE) = P4_OPCODE_PACK(0x04, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_FRONT_END_EVENT) = P4_OPCODE_PACK(0x08, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_EXECUTION_EVENT) = P4_OPCODE_PACK(0x0c, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_REPLAY_EVENT) = P4_OPCODE_PACK(0x09, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_INSTR_RETIRED) = P4_OPCODE_PACK(0x02, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_UOPS_RETIRED) = P4_OPCODE_PACK(0x01, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_UOP_TYPE) = P4_OPCODE_PACK(0x02, 0x02),
+ /*
+ * MSR_P4_RAT_ESCR0: 12, 13, 16
+ * MSR_P4_RAT_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_BRANCH_RETIRED) = P4_OPCODE_PACK(0x06, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED) = P4_OPCODE_PACK(0x03, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_X87_ASSIST) = P4_OPCODE_PACK(0x03, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_MACHINE_CLEAR) = P4_OPCODE_PACK(0x02, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_INSTR_COMPLETED) = P4_OPCODE_PACK(0x07, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+};
+
+/*
+ * a caller should use P4_ESCR_EMASK_NAME helper to
+ * pick the EventMask needed, for example
+ *
+ * P4_ESCR_EMASK_NAME(P4_EVENT_TC_DELIVER_MODE, DD)
+ */
+enum P4_ESCR_EMASKS {
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DD, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DB, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DI, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BD, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BB, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BI, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, ID, 6),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BPU_FETCH_REQUEST, TCMISS, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, MISS, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT_UK, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, 64K_CONF, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, LSC, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, SSC, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STA, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STD, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, DTMISS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, ITMISS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS, 10),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, DEFAULT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_READ, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_UC, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WC, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WT, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WP, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WB, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OWN, 13),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OTHER, 14),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, PREFETCH, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN, 13),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER, 14),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1, 12),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2, 13),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1, 12),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2, 13),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SSE_INPUT_ASSIST, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_SP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_DP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_SP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_DP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_64BIT_MMX_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_128BIT_MMX_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_FP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_MISC, FLUSH, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_MS_XFER, CISC, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CALL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RESOURCE_STALL, SBFULL, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_EVICTS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS2, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS3, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS0, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS1, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS2, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS3, 7),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSTAG, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSNTAG, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSTAG, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGLOADS, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGSTORES, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNP, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNM, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTP, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTM, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSU, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSO, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAO, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAU, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, PREA, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, CLEAR, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, MOCLEAR, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, SMCLEAR, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, BOGUS, 1),
+};
+
+/* P4 PEBS: stale for a while */
+#define P4_PEBS_METRIC_MASK 0x00001fffU
+#define P4_PEBS_UOB_TAG 0x01000000U
+#define P4_PEBS_ENABLE 0x02000000U
+
+/* Replay metrics for MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT */
+#define P4_PEBS__1stl_cache_load_miss_retired 0x3000001
+#define P4_PEBS__2ndl_cache_load_miss_retired 0x3000002
+#define P4_PEBS__dtlb_load_miss_retired 0x3000004
+#define P4_PEBS__dtlb_store_miss_retired 0x3000004
+#define P4_PEBS__dtlb_all_miss_retired 0x3000004
+#define P4_PEBS__tagged_mispred_branch 0x3018000
+#define P4_PEBS__mob_load_replay_retired 0x3000200
+#define P4_PEBS__split_load_retired 0x3000400
+#define P4_PEBS__split_store_retired 0x3000400
+
+#define P4_VERT__1stl_cache_load_miss_retired 0x0000001
+#define P4_VERT__2ndl_cache_load_miss_retired 0x0000001
+#define P4_VERT__dtlb_load_miss_retired 0x0000001
+#define P4_VERT__dtlb_store_miss_retired 0x0000002
+#define P4_VERT__dtlb_all_miss_retired 0x0000003
+#define P4_VERT__tagged_mispred_branch 0x0000010
+#define P4_VERT__mob_load_replay_retired 0x0000001
+#define P4_VERT__split_load_retired 0x0000001
+#define P4_VERT__split_store_retired 0x0000002
+
+enum P4_CACHE_EVENTS {
+ P4_CACHE__NONE,
+
+ P4_CACHE__1stl_cache_load_miss_retired,
+ P4_CACHE__2ndl_cache_load_miss_retired,
+ P4_CACHE__dtlb_load_miss_retired,
+ P4_CACHE__dtlb_store_miss_retired,
+ P4_CACHE__itlb_reference_hit,
+ P4_CACHE__itlb_reference_miss,
+
+ P4_CACHE__MAX
+};
+
+#endif /* PERF_EVENT_P4_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index b753ea59703a..7e5c6a60b8ee 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -21,7 +21,6 @@ struct mm_struct;
#include <asm/msr.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
-#include <asm/ds.h>
#include <linux/personality.h>
#include <linux/cpumask.h>
@@ -29,6 +28,7 @@ struct mm_struct;
#include <linux/threads.h>
#include <linux/math64.h>
#include <linux/init.h>
+#include <linux/err.h>
#define HBP_NUM 4
/*
@@ -113,7 +113,6 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
#endif
- unsigned int x86_hyper_vendor;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
@@ -127,9 +126,6 @@ struct cpuinfo_x86 {
#define X86_VENDOR_UNKNOWN 0xff
-#define X86_HYPER_VENDOR_NONE 0
-#define X86_HYPER_VENDOR_VMWARE 1
-
/*
* capabilities of CPUs
*/
@@ -380,6 +376,10 @@ union thread_xstate {
struct xsave_struct xsave;
};
+struct fpu {
+ union thread_xstate *state;
+};
+
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
@@ -457,7 +457,7 @@ struct thread_struct {
unsigned long trap_no;
unsigned long error_code;
/* floating point and extended processor state */
- union thread_xstate *xstate;
+ struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
@@ -473,10 +473,6 @@ struct thread_struct {
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
-/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
- unsigned long debugctlmsr;
- /* Debug Store context; see asm/ds.h */
- struct ds_context *ds_ctx;
};
static inline unsigned long native_get_debugreg(int regno)
@@ -793,6 +789,8 @@ static inline void wbinvd_halt(void)
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
+extern void early_trap_init(void);
+
/* Defined in head.S */
extern struct desc_ptr early_gdt_descr;
@@ -803,7 +801,7 @@ extern void cpu_init(void);
static inline unsigned long get_debugctlmsr(void)
{
- unsigned long debugctlmsr = 0;
+ unsigned long debugctlmsr = 0;
#ifndef CONFIG_X86_DEBUGCTLMSR
if (boot_cpu_data.x86 < 6)
@@ -811,21 +809,6 @@ static inline unsigned long get_debugctlmsr(void)
#endif
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
- return debugctlmsr;
-}
-
-static inline unsigned long get_debugctlmsr_on_cpu(int cpu)
-{
- u64 debugctlmsr = 0;
- u32 val1, val2;
-
-#ifndef CONFIG_X86_DEBUGCTLMSR
- if (boot_cpu_data.x86 < 6)
- return 0;
-#endif
- rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2);
- debugctlmsr = val1 | ((u64)val2 << 32);
-
return debugctlmsr;
}
@@ -838,18 +821,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}
-static inline void update_debugctlmsr_on_cpu(int cpu,
- unsigned long debugctlmsr)
-{
-#ifndef CONFIG_X86_DEBUGCTLMSR
- if (boot_cpu_data.x86 < 6)
- return;
-#endif
- wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR,
- (u32)((u64)debugctlmsr),
- (u32)((u64)debugctlmsr >> 32));
-}
-
/*
* from system description table in BIOS. Mostly for MCA use, but
* others may find it useful:
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
index 86723035a515..52b098a6eebb 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/asm/ptrace-abi.h
@@ -82,61 +82,6 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
-
-/* configuration/status structure used in PTRACE_BTS_CONFIG and
- PTRACE_BTS_STATUS commands.
-*/
-struct ptrace_bts_config {
- /* requested or actual size of BTS buffer in bytes */
- __u32 size;
- /* bitmask of below flags */
- __u32 flags;
- /* buffer overflow signal */
- __u32 signal;
- /* actual size of bts_struct in bytes */
- __u32 bts_size;
-};
-#endif /* __ASSEMBLY__ */
-
-#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
-#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
-#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
- instead of wrapping around */
-#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
-
-#define PTRACE_BTS_CONFIG 40
-/* Configure branch trace recording.
- ADDR points to a struct ptrace_bts_config.
- DATA gives the size of that buffer.
- A new buffer is allocated, if requested in the flags.
- An overflow signal may only be requested for new buffers.
- Returns the number of bytes read.
-*/
-#define PTRACE_BTS_STATUS 41
-/* Return the current configuration in a struct ptrace_bts_config
- pointed to by ADDR; DATA gives the size of that buffer.
- Returns the number of bytes written.
-*/
-#define PTRACE_BTS_SIZE 42
-/* Return the number of available BTS records for draining.
- DATA and ADDR are ignored.
-*/
-#define PTRACE_BTS_GET 43
-/* Get a single BTS record.
- DATA defines the index into the BTS array, where 0 is the newest
- entry, and higher indices refer to older entries.
- ADDR is pointing to struct bts_struct (see asm/ds.h).
-*/
-#define PTRACE_BTS_CLEAR 44
-/* Clear the BTS buffer.
- DATA and ADDR are ignored.
-*/
-#define PTRACE_BTS_DRAIN 45
-/* Read all available BTS records and clear the buffer.
- ADDR points to an array of struct bts_struct.
- DATA gives the size of that buffer.
- BTS records are read from oldest to newest.
- Returns number of BTS records drained.
-*/
+#endif
#endif /* _ASM_X86_PTRACE_ABI_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 69a686a7dff0..78cd1ea94500 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -289,12 +289,6 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
-#ifdef CONFIG_X86_PTRACE_BTS
-extern void ptrace_bts_untrace(struct task_struct *tsk);
-
-#define arch_ptrace_untrace(tsk) ptrace_bts_untrace(tsk)
-#endif /* CONFIG_X86_PTRACE_BTS */
-
#endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
deleted file mode 100644
index c8e9c8bed3d0..000000000000
--- a/arch/x86/include/asm/rdc321x_defs.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#define PFX "rdc321x: "
-
-/* General purpose configuration and data registers */
-#define RDC3210_CFGREG_ADDR 0x0CF8
-#define RDC3210_CFGREG_DATA 0x0CFC
-
-#define RDC321X_GPIO_CTRL_REG1 0x48
-#define RDC321X_GPIO_CTRL_REG2 0x84
-#define RDC321X_GPIO_DATA_REG1 0x4c
-#define RDC321X_GPIO_DATA_REG2 0x88
-
-#define RDC321X_MAX_GPIO 58
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 38638cd2fa4c..0e831059ac5a 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u32 event_inj_err;
u64 nested_cr3;
u64 lbr_ctl;
- u8 reserved_5[832];
+ u64 reserved_5;
+ u64 next_rip;
+ u8 reserved_6[816];
};
@@ -115,6 +117,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
struct __attribute__ ((__packed__)) vmcb_seg {
u16 selector;
u16 attrib;
@@ -238,6 +244,7 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
#define SVM_EXIT_READ_CR0 0x000
#define SVM_EXIT_READ_CR3 0x003
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 3ad421784ae7..cf4e2e381cba 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -80,4 +80,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *);
/* ia32/ipc32.c */
asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
+
+asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
+ const char __user *);
#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e0d28901e969..d4092fac226b 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -92,8 +92,7 @@ struct thread_info {
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
#define TIF_FREEZE 23 /* is freezing for suspend */
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
-#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
-#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
+#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
@@ -115,8 +114,7 @@ struct thread_info {
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
-#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
-#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
+#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
@@ -147,7 +145,7 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC)
+ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
@@ -244,7 +242,6 @@ static inline struct thread_info *current_thread_info(void)
#define TS_POLLING 0x0004 /* true if in idle loop
and not sleeping */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
-#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 4da91ad69e0d..f66cda56781d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -79,7 +79,7 @@ static inline int get_si_code(unsigned long condition)
extern int panic_on_unrecovered_nmi;
-void math_error(void __user *);
+void math_error(struct pt_regs *, int, int);
void math_emulate(struct math_emu_info *);
#ifndef CONFIG_X86_32
asmlinkage void smp_thermal_interrupt(void);
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index beb9b5f8f8a4..80b799cd74f7 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -343,10 +343,12 @@
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
#define __NR_recvmmsg 337
+#define __NR_fanotify_init 338
+#define __NR_fanotify_mark 339
#ifdef __KERNEL__
-#define NR_syscalls 338
+#define NR_syscalls 340
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index ff4307b0e81e..5b7b1d585616 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -663,6 +663,10 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_recvmmsg 299
__SYSCALL(__NR_recvmmsg, sys_recvmmsg)
+#define __NR_fanotify_init 300
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 301
+__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index b414d2b401f6..aa558ac0306e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -27,13 +27,14 @@
* set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
*
* We will use 31 sets, one for sending BAU messages from each of the 32
- * cpu's on the node.
+ * cpu's on the uvhub.
*
* TLB shootdown will use the first of the 8 descriptors of each set.
* Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
*/
#define UV_ITEMS_PER_DESCRIPTOR 8
+#define MAX_BAU_CONCURRENT 3
#define UV_CPUS_PER_ACT_STATUS 32
#define UV_ACT_STATUS_MASK 0x3
#define UV_ACT_STATUS_SIZE 2
@@ -45,6 +46,9 @@
#define UV_PAYLOADQ_PNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
+#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
/*
* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
@@ -55,15 +59,29 @@
#define DESC_STATUS_SOURCE_TIMEOUT 3
/*
- * source side thresholds at which message retries print a warning
+ * source side threshholds at which message retries print a warning
*/
#define SOURCE_TIMEOUT_LIMIT 20
#define DESTINATION_TIMEOUT_LIMIT 20
/*
+ * misc. delays, in microseconds
+ */
+#define THROTTLE_DELAY 10
+#define TIMEOUT_DELAY 10
+#define BIOS_TO 1000
+/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */
+
+/*
+ * threshholds at which to use IPI to free resources
+ */
+#define PLUGSB4RESET 100
+#define TIMEOUTSB4RESET 100
+
+/*
* number of entries in the destination side payload queue
*/
-#define DEST_Q_SIZE 17
+#define DEST_Q_SIZE 20
/*
* number of destination side software ack resources
*/
@@ -72,9 +90,10 @@
/*
* completion statuses for sending a TLB flush message
*/
-#define FLUSH_RETRY 1
-#define FLUSH_GIVEUP 2
-#define FLUSH_COMPLETE 3
+#define FLUSH_RETRY_PLUGGED 1
+#define FLUSH_RETRY_TIMEOUT 2
+#define FLUSH_GIVEUP 3
+#define FLUSH_COMPLETE 4
/*
* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
@@ -86,14 +105,14 @@
* 'base_dest_nodeid' field of the header corresponds to the
* destination nodeID associated with that specified bit.
*/
-struct bau_target_nodemask {
- unsigned long bits[BITS_TO_LONGS(256)];
+struct bau_target_uvhubmask {
+ unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
};
/*
- * mask of cpu's on a node
+ * mask of cpu's on a uvhub
* (during initialization we need to check that unsigned long has
- * enough bits for max. cpu's per node)
+ * enough bits for max. cpu's per uvhub)
*/
struct bau_local_cpumask {
unsigned long bits;
@@ -135,8 +154,8 @@ struct bau_msg_payload {
struct bau_msg_header {
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
/* bits 5:0 */
- unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */
- /* bits 20:6 */ /* first bit in node_map */
+ unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
+ /* bits 20:6 */ /* first bit in uvhub map */
unsigned int command:8; /* message type */
/* bits 28:21 */
/* 0x38: SN3net EndPoint Message */
@@ -146,26 +165,38 @@ struct bau_msg_header {
unsigned int rsvd_2:9; /* must be zero */
/* bits 40:32 */
/* Suppl_A is 56-41 */
- unsigned int payload_2a:8;/* becomes byte 16 of msg */
- /* bits 48:41 */ /* not currently using */
- unsigned int payload_2b:8;/* becomes byte 17 of msg */
- /* bits 56:49 */ /* not currently using */
+ unsigned int sequence:16;/* message sequence number */
+ /* bits 56:41 */ /* becomes bytes 16-17 of msg */
/* Address field (96:57) is never used as an
address (these are address bits 42:3) */
+
unsigned int rsvd_3:1; /* must be zero */
/* bit 57 */
/* address bits 27:4 are payload */
- /* these 24 bits become bytes 12-14 of msg */
+ /* these next 24 (58-81) bits become bytes 12-14 of msg */
+
+ /* bits 65:58 land in byte 12 */
unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */
/* bit 58 */
-
- unsigned int payload_1a:5;/* not currently used */
- /* bits 63:59 */
- unsigned int payload_1b:8;/* not currently used */
- /* bits 71:64 */
- unsigned int payload_1c:8;/* not currently used */
- /* bits 79:72 */
- unsigned int payload_1d:2;/* not currently used */
+ unsigned int msg_type:3; /* software type of the message*/
+ /* bits 61:59 */
+ unsigned int canceled:1; /* message canceled, resource to be freed*/
+ /* bit 62 */
+ unsigned int payload_1a:1;/* not currently used */
+ /* bit 63 */
+ unsigned int payload_1b:2;/* not currently used */
+ /* bits 65:64 */
+
+ /* bits 73:66 land in byte 13 */
+ unsigned int payload_1ca:6;/* not currently used */
+ /* bits 71:66 */
+ unsigned int payload_1c:2;/* not currently used */
+ /* bits 73:72 */
+
+ /* bits 81:74 land in byte 14 */
+ unsigned int payload_1d:6;/* not currently used */
+ /* bits 79:74 */
+ unsigned int payload_1e:2;/* not currently used */
/* bits 81:80 */
unsigned int rsvd_4:7; /* must be zero */
@@ -178,7 +209,7 @@ struct bau_msg_header {
/* bits 95:90 */
unsigned int rsvd_6:5; /* must be zero */
/* bits 100:96 */
- unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */
+ unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */
/* bit 101*/
unsigned int fairness:3;/* usually zero */
/* bits 104:102 */
@@ -191,13 +222,18 @@ struct bau_msg_header {
/* bits 127:107 */
};
+/* see msg_type: */
+#define MSG_NOOP 0
+#define MSG_REGULAR 1
+#define MSG_RETRY 2
+
/*
* The activation descriptor:
* The format of the message to send, plus all accompanying control
* Should be 64 bytes
*/
struct bau_desc {
- struct bau_target_nodemask distribution;
+ struct bau_target_uvhubmask distribution;
/*
* message template, consisting of header and payload:
*/
@@ -237,19 +273,25 @@ struct bau_payload_queue_entry {
unsigned short acknowledge_count; /* filled in by destination */
/* 16 bits, bytes 10-11 */
- unsigned short replied_to:1; /* sent as 0 by the source */
- /* 1 bit */
- unsigned short unused1:7; /* not currently using */
- /* 7 bits: byte 12) */
+ /* these next 3 bytes come from bits 58-81 of the message header */
+ unsigned short replied_to:1; /* sent as 0 by the source */
+ unsigned short msg_type:3; /* software message type */
+ unsigned short canceled:1; /* sent as 0 by the source */
+ unsigned short unused1:3; /* not currently using */
+ /* byte 12 */
- unsigned char unused2[2]; /* not currently using */
- /* bytes 13-14 */
+ unsigned char unused2a; /* not currently using */
+ /* byte 13 */
+ unsigned char unused2; /* not currently using */
+ /* byte 14 */
unsigned char sw_ack_vector; /* filled in by the hardware */
/* byte 15 (bits 127:120) */
- unsigned char unused4[3]; /* not currently using bytes 17-19 */
- /* bytes 17-19 */
+ unsigned short sequence; /* message sequence number */
+ /* bytes 16-17 */
+ unsigned char unused4[2]; /* not currently using bytes 18-19 */
+ /* bytes 18-19 */
int number_of_cpus; /* filled in at destination */
/* 32 bits, bytes 20-23 (aligned) */
@@ -259,63 +301,93 @@ struct bau_payload_queue_entry {
};
/*
- * one for every slot in the destination payload queue
- */
-struct bau_msg_status {
- struct bau_local_cpumask seen_by; /* map of cpu's */
-};
-
-/*
- * one for every slot in the destination software ack resources
- */
-struct bau_sw_ack_status {
- struct bau_payload_queue_entry *msg; /* associated message */
- int watcher; /* cpu monitoring, or -1 */
-};
-
-/*
- * one on every node and per-cpu; to locate the software tables
+ * one per-cpu; to locate the software tables
*/
struct bau_control {
struct bau_desc *descriptor_base;
- struct bau_payload_queue_entry *bau_msg_head;
struct bau_payload_queue_entry *va_queue_first;
struct bau_payload_queue_entry *va_queue_last;
- struct bau_msg_status *msg_statuses;
- int *watching; /* pointer to array */
+ struct bau_payload_queue_entry *bau_msg_head;
+ struct bau_control *uvhub_master;
+ struct bau_control *socket_master;
+ unsigned long timeout_interval;
+ atomic_t active_descriptor_count;
+ int max_concurrent;
+ int max_concurrent_constant;
+ int retry_message_scans;
+ int plugged_tries;
+ int timeout_tries;
+ int ipi_attempts;
+ int conseccompletes;
+ short cpu;
+ short uvhub_cpu;
+ short uvhub;
+ short cpus_in_socket;
+ short cpus_in_uvhub;
+ unsigned short message_number;
+ unsigned short uvhub_quiesce;
+ short socket_acknowledge_count[DEST_Q_SIZE];
+ cycles_t send_message;
+ spinlock_t masks_lock;
+ spinlock_t uvhub_lock;
+ spinlock_t queue_lock;
};
/*
* This structure is allocated per_cpu for UV TLB shootdown statistics.
*/
struct ptc_stats {
- unsigned long ptc_i; /* number of IPI-style flushes */
- unsigned long requestor; /* number of nodes this cpu sent to */
- unsigned long requestee; /* times cpu was remotely requested */
- unsigned long alltlb; /* times all tlb's on this cpu were flushed */
- unsigned long onetlb; /* times just one tlb on this cpu was flushed */
- unsigned long s_retry; /* retries on source side timeouts */
- unsigned long d_retry; /* retries on destination side timeouts */
- unsigned long sflush; /* cycles spent in uv_flush_tlb_others */
- unsigned long dflush; /* cycles spent on destination side */
- unsigned long retriesok; /* successes on retries */
- unsigned long nomsg; /* interrupts with no message */
- unsigned long multmsg; /* interrupts with multiple messages */
- unsigned long ntargeted;/* nodes targeted */
+ /* sender statistics */
+ unsigned long s_giveup; /* number of fall backs to IPI-style flushes */
+ unsigned long s_requestor; /* number of shootdown requests */
+ unsigned long s_stimeout; /* source side timeouts */
+ unsigned long s_dtimeout; /* destination side timeouts */
+ unsigned long s_time; /* time spent in sending side */
+ unsigned long s_retriesok; /* successful retries */
+ unsigned long s_ntargcpu; /* number of cpus targeted */
+ unsigned long s_ntarguvhub; /* number of uvhubs targeted */
+ unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */
+ unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */
+ unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */
+ unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */
+ unsigned long s_ntarguvhub1; /* number of times == 1 target hub */
+ unsigned long s_resets_plug; /* ipi-style resets from plug state */
+ unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
+ unsigned long s_busy; /* status stayed busy past s/w timer */
+ unsigned long s_throttles; /* waits in throttle */
+ unsigned long s_retry_messages; /* retry broadcasts */
+ /* destination statistics */
+ unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
+ unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
+ unsigned long d_multmsg; /* interrupts with multiple messages */
+ unsigned long d_nomsg; /* interrupts with no message */
+ unsigned long d_time; /* time spent on destination side */
+ unsigned long d_requestee; /* number of messages processed */
+ unsigned long d_retries; /* number of retry messages processed */
+ unsigned long d_canceled; /* number of messages canceled by retries */
+ unsigned long d_nocanceled; /* retries that found nothing to cancel */
+ unsigned long d_resets; /* number of ipi-style requests processed */
+ unsigned long d_rcanceled; /* number of messages canceled by resets */
};
-static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp)
+static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
{
- return constant_test_bit(node, &dstp->bits[0]);
+ return constant_test_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_node_set(int node, struct bau_target_nodemask *dstp)
+static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp)
{
- __set_bit(node, &dstp->bits[0]);
+ __set_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits)
+static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
+ int nbits)
{
bitmap_zero(&dstp->bits[0], nbits);
}
+static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp)
+{
+ return bitmap_weight((unsigned long *)&dstp->bits[0],
+ UV_DISTRIBUTION_SIZE);
+}
static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
{
@@ -328,4 +400,35 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void);
+struct atomic_short {
+ short counter;
+};
+
+/**
+ * atomic_read_short - read a short atomic variable
+ * @v: pointer of type atomic_short
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read_short(const struct atomic_short *v)
+{
+ return v->counter;
+}
+
+/**
+ * atomic_add_short_return - add and return a short int
+ * @i: short value to add
+ * @v: pointer of type atomic_short
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atomic_add_short_return(short i, struct atomic_short *v)
+{
+ short __i = i;
+ asm volatile(LOCK_PREFIX "xaddw %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+}
+
#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 14cc74ba5d23..bf6b88ef8eeb 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -307,7 +307,7 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset
* Access Global MMR space using the MMR space located at the top of physical
* memory.
*/
-static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
+static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
{
return __va(UV_GLOBAL_MMR64_BASE |
UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 2cae46c7c8a2..b2f2d2e05cec 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -1,4 +1,3 @@
-
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -15,13 +14,25 @@
#define UV_MMR_ENABLE (1UL << 63)
/* ========================================================================= */
+/* UVH_BAU_DATA_BROADCAST */
+/* ========================================================================= */
+#define UVH_BAU_DATA_BROADCAST 0x61688UL
+#define UVH_BAU_DATA_BROADCAST_32 0x0440
+
+#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
+#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
+
+union uvh_bau_data_broadcast_u {
+ unsigned long v;
+ struct uvh_bau_data_broadcast_s {
+ unsigned long enable : 1; /* RW */
+ unsigned long rsvd_1_63: 63; /* */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_BAU_DATA_CONFIG */
/* ========================================================================= */
-#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
-#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
-/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
#define UVH_BAU_DATA_CONFIG 0x61680UL
#define UVH_BAU_DATA_CONFIG_32 0x0438
@@ -604,6 +615,68 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
/* ========================================================================= */
+/* UVH_LB_BAU_MISC_CONTROL */
+/* ========================================================================= */
+#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
+#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10
+
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+union uvh_lb_bau_misc_control_u {
+ unsigned long v;
+ struct uvh_lb_bau_misc_control_s {
+ unsigned long rejection_delay : 8; /* RW */
+ unsigned long apic_mode : 1; /* RW */
+ unsigned long force_broadcast : 1; /* RW */
+ unsigned long force_lock_nop : 1; /* RW */
+ unsigned long csi_agent_presence_vector : 3; /* RW */
+ unsigned long descriptor_fetch_mode : 1; /* RW */
+ unsigned long enable_intd_soft_ack_mode : 1; /* RW */
+ unsigned long intd_soft_ack_timeout_period : 4; /* RW */
+ unsigned long enable_dual_mapping_mode : 1; /* RW */
+ unsigned long vga_io_port_decode_enable : 1; /* RW */
+ unsigned long vga_io_port_16_bit_decode : 1; /* RW */
+ unsigned long suppress_dest_registration : 1; /* RW */
+ unsigned long programmed_initial_priority : 3; /* RW */
+ unsigned long use_incoming_priority : 1; /* RW */
+ unsigned long enable_programmed_initial_priority : 1; /* RW */
+ unsigned long rsvd_29_47 : 19; /* */
+ unsigned long fun : 16; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
@@ -681,334 +754,6 @@ union uvh_lb_bau_sb_descriptor_base_u {
};
/* ========================================================================= */
-/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */
-/* ========================================================================= */
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
-
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL
-
-union uvh_lb_mcast_aoerr0_rpt_enable_u {
- unsigned long v;
- struct uvh_lb_mcast_aoerr0_rpt_enable_s {
- unsigned long mcast_obese_msg : 1; /* RW */
- unsigned long mcast_data_sb_err : 1; /* RW */
- unsigned long mcast_nack_buff_parity : 1; /* RW */
- unsigned long mcast_timeout : 1; /* RW */
- unsigned long mcast_inactive_reply : 1; /* RW */
- unsigned long mcast_upgrade_error : 1; /* RW */
- unsigned long mcast_reg_count_underflow : 1; /* RW */
- unsigned long mcast_rep_obese_msg : 1; /* RW */
- unsigned long ucache_req_runt_msg : 1; /* RW */
- unsigned long ucache_req_obese_msg : 1; /* RW */
- unsigned long ucache_req_data_sb_err : 1; /* RW */
- unsigned long ucache_rep_runt_msg : 1; /* RW */
- unsigned long ucache_rep_obese_msg : 1; /* RW */
- unsigned long ucache_rep_data_sb_err : 1; /* RW */
- unsigned long ucache_rep_command_err : 1; /* RW */
- unsigned long ucache_pend_timeout : 1; /* RW */
- unsigned long macc_req_runt_msg : 1; /* RW */
- unsigned long macc_req_obese_msg : 1; /* RW */
- unsigned long macc_req_data_sb_err : 1; /* RW */
- unsigned long macc_rep_runt_msg : 1; /* RW */
- unsigned long macc_rep_obese_msg : 1; /* RW */
- unsigned long macc_rep_data_sb_err : 1; /* RW */
- unsigned long macc_amo_timeout : 1; /* RW */
- unsigned long macc_put_timeout : 1; /* RW */
- unsigned long macc_spurious_event : 1; /* RW */
- unsigned long ioh_destination_table_parity : 1; /* RW */
- unsigned long get_had_error_reply : 1; /* RW */
- unsigned long get_timeout : 1; /* RW */
- unsigned long lock_manager_had_error_reply : 1; /* RW */
- unsigned long put_had_error_reply : 1; /* RW */
- unsigned long put_timeout : 1; /* RW */
- unsigned long sb_activation_overrun : 1; /* RW */
- unsigned long completed_gb_activation_had_error_reply : 1; /* RW */
- unsigned long completed_gb_activation_timeout : 1; /* RW */
- unsigned long descriptor_buffer_0_parity : 1; /* RW */
- unsigned long descriptor_buffer_1_parity : 1; /* RW */
- unsigned long socket_destination_table_parity : 1; /* RW */
- unsigned long bau_reply_payload_corruption : 1; /* RW */
- unsigned long io_port_destination_table_parity : 1; /* RW */
- unsigned long intd_soft_ack_timeout : 1; /* RW */
- unsigned long int_rep_obese_msg : 1; /* RW */
- unsigned long int_rep_command_err : 1; /* RW */
- unsigned long int_timeout : 1; /* RW */
- unsigned long rsvd_43_63 : 21; /* */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LOCAL_INT0_CONFIG */
-/* ========================================================================= */
-#define UVH_LOCAL_INT0_CONFIG 0x61000UL
-
-#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
-#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
-#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
-#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
-#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_local_int0_config_u {
- unsigned long v;
- struct uvh_local_int0_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LOCAL_INT0_ENABLE */
-/* ========================================================================= */
-#define UVH_LOCAL_INT0_ENABLE 0x65000UL
-
-#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
-#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
-#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
-#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
-#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
-#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
-#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
-#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
-#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
-#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
-#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
-#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
-#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
-#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
-#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
-#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
-#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
-#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
-#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
-#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
-#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
-#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
-#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
-#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
-#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
-#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
-
-union uvh_local_int0_enable_u {
- unsigned long v;
- struct uvh_local_int0_enable_s {
- unsigned long lb_hcerr : 1; /* RW */
- unsigned long gr0_hcerr : 1; /* RW */
- unsigned long gr1_hcerr : 1; /* RW */
- unsigned long lh_hcerr : 1; /* RW */
- unsigned long rh_hcerr : 1; /* RW */
- unsigned long xn_hcerr : 1; /* RW */
- unsigned long si_hcerr : 1; /* RW */
- unsigned long lb_aoerr0 : 1; /* RW */
- unsigned long gr0_aoerr0 : 1; /* RW */
- unsigned long gr1_aoerr0 : 1; /* RW */
- unsigned long lh_aoerr0 : 1; /* RW */
- unsigned long rh_aoerr0 : 1; /* RW */
- unsigned long xn_aoerr0 : 1; /* RW */
- unsigned long si_aoerr0 : 1; /* RW */
- unsigned long lb_aoerr1 : 1; /* RW */
- unsigned long gr0_aoerr1 : 1; /* RW */
- unsigned long gr1_aoerr1 : 1; /* RW */
- unsigned long lh_aoerr1 : 1; /* RW */
- unsigned long rh_aoerr1 : 1; /* RW */
- unsigned long xn_aoerr1 : 1; /* RW */
- unsigned long si_aoerr1 : 1; /* RW */
- unsigned long rh_vpi_int : 1; /* RW */
- unsigned long system_shutdown_int : 1; /* RW */
- unsigned long lb_irq_int_0 : 1; /* RW */
- unsigned long lb_irq_int_1 : 1; /* RW */
- unsigned long lb_irq_int_2 : 1; /* RW */
- unsigned long lb_irq_int_3 : 1; /* RW */
- unsigned long lb_irq_int_4 : 1; /* RW */
- unsigned long lb_irq_int_5 : 1; /* RW */
- unsigned long lb_irq_int_6 : 1; /* RW */
- unsigned long lb_irq_int_7 : 1; /* RW */
- unsigned long lb_irq_int_8 : 1; /* RW */
- unsigned long lb_irq_int_9 : 1; /* RW */
- unsigned long lb_irq_int_10 : 1; /* RW */
- unsigned long lb_irq_int_11 : 1; /* RW */
- unsigned long lb_irq_int_12 : 1; /* RW */
- unsigned long lb_irq_int_13 : 1; /* RW */
- unsigned long lb_irq_int_14 : 1; /* RW */
- unsigned long lb_irq_int_15 : 1; /* RW */
- unsigned long l1_nmi_int : 1; /* RW */
- unsigned long stop_clock : 1; /* RW */
- unsigned long asic_to_l1 : 1; /* RW */
- unsigned long l1_to_asic : 1; /* RW */
- unsigned long ltc_int : 1; /* RW */
- unsigned long la_seq_trigger : 1; /* RW */
- unsigned long rsvd_45_63 : 19; /* */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
@@ -1112,26 +857,6 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
};
/* ========================================================================= */
-/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */
-/* ========================================================================= */
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL
-
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_rh_gam_cfg_overlay_config_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_cfg_overlay_config_mmr_s {
- unsigned long rsvd_0_25: 26; /* */
- unsigned long base : 20; /* RW */
- unsigned long rsvd_46_62: 17; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -1263,101 +988,6 @@ union uvh_rtc1_int_config_u {
};
/* ========================================================================= */
-/* UVH_RTC2_INT_CONFIG */
-/* ========================================================================= */
-#define UVH_RTC2_INT_CONFIG 0x61600UL
-
-#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_RTC2_INT_CONFIG_P_SHFT 13
-#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_RTC2_INT_CONFIG_T_SHFT 15
-#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_RTC2_INT_CONFIG_M_SHFT 16
-#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
-#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_rtc2_int_config_u {
- unsigned long v;
- struct uvh_rtc2_int_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_RTC3_INT_CONFIG */
-/* ========================================================================= */
-#define UVH_RTC3_INT_CONFIG 0x61640UL
-
-#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_RTC3_INT_CONFIG_P_SHFT 13
-#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_RTC3_INT_CONFIG_T_SHFT 15
-#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_RTC3_INT_CONFIG_M_SHFT 16
-#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
-#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_rtc3_int_config_u {
- unsigned long v;
- struct uvh_rtc3_int_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_RTC_INC_RATIO */
-/* ========================================================================= */
-#define UVH_RTC_INC_RATIO 0x350000UL
-
-#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
-#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
-#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
-#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
-
-union uvh_rtc_inc_ratio_u {
- unsigned long v;
- struct uvh_rtc_inc_ratio_s {
- unsigned long fraction : 20; /* RW */
- unsigned long ratio : 3; /* RW */
- unsigned long rsvd_23_63: 41; /* */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_SI_ADDR_MAP_CONFIG */
/* ========================================================================= */
#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
deleted file mode 100644
index e49ed6d2fd4e..000000000000
--- a/arch/x86/include/asm/vmware.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2008, VMware, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef ASM_X86__VMWARE_H
-#define ASM_X86__VMWARE_H
-
-extern void vmware_platform_setup(void);
-extern int vmware_platform(void);
-extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
-
-#endif
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index fb9a080740ec..9e6779f7cf2d 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -25,6 +25,8 @@
*
*/
+#include <linux/types.h>
+
/*
* Definitions of Primary Processor-Based VM-Execution Controls.
*/
@@ -120,6 +122,8 @@ enum vmcs_field {
GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
GUEST_IA32_PAT = 0x00002804,
GUEST_IA32_PAT_HIGH = 0x00002805,
+ GUEST_IA32_EFER = 0x00002806,
+ GUEST_IA32_EFER_HIGH = 0x00002807,
GUEST_PDPTR0 = 0x0000280a,
GUEST_PDPTR0_HIGH = 0x0000280b,
GUEST_PDPTR1 = 0x0000280c,
@@ -130,6 +134,8 @@ enum vmcs_field {
GUEST_PDPTR3_HIGH = 0x00002811,
HOST_IA32_PAT = 0x00002c00,
HOST_IA32_PAT_HIGH = 0x00002c01,
+ HOST_IA32_EFER = 0x00002c02,
+ HOST_IA32_EFER_HIGH = 0x00002c03,
PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
EXCEPTION_BITMAP = 0x00004004,
@@ -394,6 +400,10 @@ enum vmcs_field {
#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
-
+struct vmx_msr_entry {
+ u32 index;
+ u32 reserved;
+ u64 value;
+} __aligned(16);
#endif
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index ddc04ccad03b..2c4390cae228 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
void __user *fpstate,
struct _fpx_sw_bytes *sw);
-static inline int xrstor_checking(struct xsave_struct *fx)
+static inline int fpu_xrstor_checking(struct fpu *fpu)
{
+ struct xsave_struct *fx = &fpu->state->xsave;
int err;
asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
: "memory");
}
-static inline void xsave(struct task_struct *tsk)
+static inline void fpu_xsave(struct fpu *fpu)
{
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
- : : "D" (&(tsk->thread.xstate->xsave)),
+ : : "D" (&(fpu->state->xsave)),
"a" (-1), "d"(-1) : "memory");
}
#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4c58352209e0..e77b22083721 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -47,8 +47,6 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
-obj-$(CONFIG_X86_DS) += ds.o
-obj-$(CONFIG_X86_DS_SELFTEST) += ds_selftest.o
obj-$(CONFIG_X86_32) += tls.o
obj-$(CONFIG_IA32_EMULATION) += tls.o
obj-y += step.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index cd40aba6aa95..488be461a380 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -94,6 +94,53 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
/*
+ * ISA irqs by default are the first 16 gsis but can be
+ * any gsi as specified by an interrupt source override.
+ */
+static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+};
+
+static unsigned int gsi_to_irq(unsigned int gsi)
+{
+ unsigned int irq = gsi + NR_IRQS_LEGACY;
+ unsigned int i;
+
+ for (i = 0; i < NR_IRQS_LEGACY; i++) {
+ if (isa_irq_to_gsi[i] == gsi) {
+ return i;
+ }
+ }
+
+ /* Provide an identity mapping of gsi == irq
+ * except on truly weird platforms that have
+ * non isa irqs in the first 16 gsis.
+ */
+ if (gsi >= NR_IRQS_LEGACY)
+ irq = gsi;
+ else
+ irq = gsi_end + 1 + gsi;
+
+ return irq;
+}
+
+static u32 irq_to_gsi(int irq)
+{
+ unsigned int gsi;
+
+ if (irq < NR_IRQS_LEGACY)
+ gsi = isa_irq_to_gsi[irq];
+ else if (irq <= gsi_end)
+ gsi = irq;
+ else if (irq <= (gsi_end + NR_IRQS_LEGACY))
+ gsi = irq - gsi_end;
+ else
+ gsi = 0xffffffff;
+
+ return gsi;
+}
+
+/*
* Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
* to map the target physical address. The problem is that set_fixmap()
* provides a single page, and it is possible that the page is not
@@ -313,7 +360,7 @@ acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
/*
* Parse Interrupt Source Override for the ACPI SCI
*/
-static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
+static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi)
{
if (trigger == 0) /* compatible SCI trigger is level */
trigger = 3;
@@ -333,7 +380,7 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
* If GSI is < 16, this will update its flags,
* else it will create a new mp_irqs[] entry.
*/
- mp_override_legacy_irq(gsi, polarity, trigger, gsi);
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
/*
* stash over-ride to indicate we've been here
@@ -357,9 +404,10 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
acpi_table_print_madt_entry(header);
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
- acpi_sci_ioapic_setup(intsrc->global_irq,
+ acpi_sci_ioapic_setup(intsrc->source_irq,
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
- (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
+ (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
+ intsrc->global_irq);
return 0;
}
@@ -448,7 +496,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
- *irq = gsi;
+ *irq = gsi_to_irq(gsi);
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
@@ -458,6 +506,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
return 0;
}
+int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
+{
+ if (isa_irq >= 16)
+ return -1;
+ *gsi = irq_to_gsi(isa_irq);
+ return 0;
+}
+
/*
* success: return IRQ number (>=0)
* failure: return < 0
@@ -482,7 +538,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
}
#endif
- irq = plat_gsi;
+ irq = gsi_to_irq(plat_gsi);
return irq;
}
@@ -867,29 +923,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
extern int es7000_plat;
#endif
-int __init acpi_probe_gsi(void)
-{
- int idx;
- int gsi;
- int max_gsi = 0;
-
- if (acpi_disabled)
- return 0;
-
- if (!acpi_ioapic)
- return 0;
-
- max_gsi = 0;
- for (idx = 0; idx < nr_ioapics; idx++) {
- gsi = mp_gsi_routing[idx].gsi_end;
-
- if (gsi > max_gsi)
- max_gsi = gsi;
- }
-
- return max_gsi + 1;
-}
-
static void assign_to_mp_irq(struct mpc_intsrc *m,
struct mpc_intsrc *mp_irq)
{
@@ -947,13 +980,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
mp_irq.dstirq = pin; /* INTIN# */
save_mp_irq(&mp_irq);
+
+ isa_irq_to_gsi[bus_irq] = gsi;
}
void __init mp_config_acpi_legacy_irqs(void)
{
int i;
- int ioapic;
- unsigned int dstapic;
struct mpc_intsrc mp_irq;
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
@@ -974,19 +1007,27 @@ void __init mp_config_acpi_legacy_irqs(void)
#endif
/*
- * Locate the IOAPIC that manages the ISA IRQs (0-15).
- */
- ioapic = mp_find_ioapic(0);
- if (ioapic < 0)
- return;
- dstapic = mp_ioapics[ioapic].apicid;
-
- /*
* Use the default configuration for the IRQs 0-15. Unless
* overridden by (MADT) interrupt source override entries.
*/
for (i = 0; i < 16; i++) {
+ int ioapic, pin;
+ unsigned int dstapic;
int idx;
+ u32 gsi;
+
+ /* Locate the gsi that irq i maps to. */
+ if (acpi_isa_irq_to_gsi(i, &gsi))
+ continue;
+
+ /*
+ * Locate the IOAPIC that manages the ISA IRQ.
+ */
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0)
+ continue;
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+ dstapic = mp_ioapics[ioapic].apicid;
for (idx = 0; idx < mp_irq_entries; idx++) {
struct mpc_intsrc *irq = mp_irqs + idx;
@@ -996,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void)
break;
/* Do we already have a mapping for this IOAPIC pin */
- if (irq->dstapic == dstapic && irq->dstirq == i)
+ if (irq->dstapic == dstapic && irq->dstirq == pin)
break;
}
@@ -1011,7 +1052,7 @@ void __init mp_config_acpi_legacy_irqs(void)
mp_irq.dstapic = dstapic;
mp_irq.irqtype = mp_INT;
mp_irq.srcbusirq = i; /* Identity mapped */
- mp_irq.dstirq = i;
+ mp_irq.dstirq = pin;
save_mp_irq(&mp_irq);
}
@@ -1076,11 +1117,6 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
-#ifdef CONFIG_X86_32
- if (ioapic_renumber_irq)
- gsi = ioapic_renumber_irq(ioapic, gsi);
-#endif
-
if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
printk(KERN_ERR "Invalid reference to IOAPIC pin "
"%d-%d\n", mp_ioapics[ioapic].apicid,
@@ -1094,7 +1130,7 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
- io_apic_set_pci_routing(dev, gsi, &irq_attr);
+ io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr);
return gsi;
}
@@ -1154,7 +1190,8 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* pretend we got one so we can set the SCI flags.
*/
if (!acpi_sci_override_gsi)
- acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
+ acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
+ acpi_gbl_FADT.sci_interrupt);
/* Fill in identity legacy mappings where no override */
mp_config_acpi_legacy_irqs();
@@ -1576,6 +1613,10 @@ static int __init parse_acpi(char *arg)
/* "acpi=noirq" disables ACPI interrupt routing */
else if (strcmp(arg, "noirq") == 0) {
acpi_noirq_set();
+ }
+ /* "acpi=copy_dsdt" copys DSDT */
+ else if (strcmp(arg, "copy_dsdt") == 0) {
+ acpi_gbl_copy_dsdt_locally = 1;
} else {
/* Core will printk when we return error. */
return -EINVAL;
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 8ded418b0593..13ab720573e3 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -1,4 +1,4 @@
- .section .text.page_aligned
+ .section .text..page_aligned
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/page_types.h>
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 1a160d5d44d0..70237732a6c7 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -194,7 +194,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
}
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-extern u8 *__smp_locks[], *__smp_locks_end[];
+extern s32 __smp_locks[], __smp_locks_end[];
static void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type.
@@ -235,37 +235,41 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
#ifdef CONFIG_SMP
-static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
+static void alternatives_smp_lock(const s32 *start, const s32 *end,
+ u8 *text, u8 *text_end)
{
- u8 **ptr;
+ const s32 *poff;
mutex_lock(&text_mutex);
- for (ptr = start; ptr < end; ptr++) {
- if (*ptr < text)
- continue;
- if (*ptr > text_end)
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
+ if (!*poff || ptr < text || ptr >= text_end)
continue;
/* turn DS segment override prefix into lock prefix */
- text_poke(*ptr, ((unsigned char []){0xf0}), 1);
+ if (*ptr == 0x3e)
+ text_poke(ptr, ((unsigned char []){0xf0}), 1);
};
mutex_unlock(&text_mutex);
}
-static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
+static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+ u8 *text, u8 *text_end)
{
- u8 **ptr;
+ const s32 *poff;
if (noreplace_smp)
return;
mutex_lock(&text_mutex);
- for (ptr = start; ptr < end; ptr++) {
- if (*ptr < text)
- continue;
- if (*ptr > text_end)
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
+ if (!*poff || ptr < text || ptr >= text_end)
continue;
/* turn lock prefix into DS segment override prefix */
- text_poke(*ptr, ((unsigned char []){0x3E}), 1);
+ if (*ptr == 0xf0)
+ text_poke(ptr, ((unsigned char []){0x3E}), 1);
};
mutex_unlock(&text_mutex);
}
@@ -276,8 +280,8 @@ struct smp_alt_module {
char *name;
/* ptrs to lock prefixes */
- u8 **locks;
- u8 **locks_end;
+ const s32 *locks;
+ const s32 *locks_end;
/* .text segment, needed to avoid patching init code ;) */
u8 *text;
@@ -398,16 +402,19 @@ void alternatives_smp_switch(int smp)
int alternatives_text_reserved(void *start, void *end)
{
struct smp_alt_module *mod;
- u8 **ptr;
+ const s32 *poff;
u8 *text_start = start;
u8 *text_end = end;
list_for_each_entry(mod, &smp_alt_modules, next) {
if (mod->text > text_end || mod->text_end < text_start)
continue;
- for (ptr = mod->locks; ptr < mod->locks_end; ptr++)
- if (text_start <= *ptr && text_end >= *ptr)
+ for (poff = mod->locks; poff < mod->locks_end; poff++) {
+ const u8 *ptr = (const u8 *)poff + *poff;
+
+ if (text_start <= ptr && text_end > ptr)
return 1;
+ }
}
return 0;
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index 565c1bfc507d..1a4512e48d24 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -2,7 +2,12 @@
# Makefile for local APIC drivers and for the IO-APIC code
#
-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o probe_$(BITS).o ipi.o nmi.o
+obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o probe_$(BITS).o ipi.o
+ifneq ($(CONFIG_NMI_WATCHDOG),y)
+obj-$(CONFIG_X86_LOCAL_APIC) += nmi.o
+endif
+obj-$(CONFIG_NMI_WATCHDOG) += hw_nmi.o
+
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_SMP) += ipi.o
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 03ba1b895f5e..425e53a87feb 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -131,24 +131,6 @@ int es7000_plat;
static unsigned int base;
-static int
-es7000_rename_gsi(int ioapic, int gsi)
-{
- if (es7000_plat == ES7000_ZORRO)
- return gsi;
-
- if (!base) {
- int i;
- for (i = 0; i < nr_ioapics; i++)
- base += nr_ioapic_registers[i];
- }
-
- if (!ioapic && (gsi < 16))
- gsi += base;
-
- return gsi;
-}
-
static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
{
unsigned long vect = 0, psaival = 0;
@@ -190,7 +172,6 @@ static void setup_unisys(void)
es7000_plat = ES7000_ZORRO;
else
es7000_plat = ES7000_CLASSIC;
- ioapic_renumber_irq = es7000_rename_gsi;
}
/*
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
new file mode 100644
index 000000000000..e8b78a0be5de
--- /dev/null
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -0,0 +1,127 @@
+/*
+ * HW NMI watchdog support
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Arch specific calls to support NMI watchdog
+ *
+ * Bits copied from original nmi.c file
+ *
+ */
+
+#include <asm/apic.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/sched.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/kernel_stat.h>
+#include <asm/mce.h>
+
+#include <linux/nmi.h>
+#include <linux/module.h>
+
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
+
+static DEFINE_PER_CPU(unsigned, last_irq_sum);
+
+/*
+ * Take the local apic timer and PIT/HPET into account. We don't
+ * know which one is active, when we have highres/dyntick on
+ */
+static inline unsigned int get_timer_irqs(int cpu)
+{
+ unsigned int irqs = per_cpu(irq_stat, cpu).irq0_irqs;
+
+#if defined(CONFIG_X86_LOCAL_APIC)
+ irqs += per_cpu(irq_stat, cpu).apic_timer_irqs;
+#endif
+
+ return irqs;
+}
+
+static inline int mce_in_progress(void)
+{
+#if defined(CONFIG_X86_MCE)
+ return atomic_read(&mce_entry) > 0;
+#endif
+ return 0;
+}
+
+int hw_nmi_is_cpu_stuck(struct pt_regs *regs)
+{
+ unsigned int sum;
+ int cpu = smp_processor_id();
+
+ /* FIXME: cheap hack for this check, probably should get its own
+ * die_notifier handler
+ */
+ if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ static DEFINE_SPINLOCK(lock); /* Serialise the printks */
+
+ spin_lock(&lock);
+ printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ dump_stack();
+ spin_unlock(&lock);
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+ }
+
+ /* if we are doing an mce, just assume the cpu is not stuck */
+ /* Could check oops_in_progress here too, but it's safer not to */
+ if (mce_in_progress())
+ return 0;
+
+ /* We determine if the cpu is stuck by checking whether any
+ * interrupts have happened since we last checked. Of course
+ * an nmi storm could create false positives, but the higher
+ * level logic should account for that
+ */
+ sum = get_timer_irqs(cpu);
+ if (__get_cpu_var(last_irq_sum) == sum) {
+ return 1;
+ } else {
+ __get_cpu_var(last_irq_sum) = sum;
+ return 0;
+ }
+}
+
+u64 hw_nmi_get_sample_period(void)
+{
+ return cpu_khz * 1000;
+}
+
+#ifdef ARCH_HAS_NMI_WATCHDOG
+void arch_trigger_all_cpu_backtrace(void)
+{
+ int i;
+
+ cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+
+ printk(KERN_INFO "sending NMI to all CPUs:\n");
+ apic->send_IPI_all(NMI_VECTOR);
+
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(to_cpumask(backtrace_mask)))
+ break;
+ mdelay(1);
+ }
+}
+#endif
+
+/* STUB calls to mimic old nmi_watchdog behaviour */
+#if defined(CONFIG_X86_LOCAL_APIC)
+unsigned int nmi_watchdog = NMI_NONE;
+EXPORT_SYMBOL(nmi_watchdog);
+void acpi_nmi_enable(void) { return; }
+void acpi_nmi_disable(void) { return; }
+#endif
+atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
+EXPORT_SYMBOL(nmi_active);
+int unknown_nmi_panic;
+void cpu_nmi_set_wd_enabled(void) { return; }
+void stop_apic_nmi_watchdog(void *unused) { return; }
+void setup_apic_nmi_watchdog(void *unused) { return; }
+int __init check_nmi_watchdog(void) { return 0; }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index eb2789c3f721..33f3563a2a52 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -89,6 +89,9 @@ int nr_ioapics;
/* IO APIC gsi routing info */
struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
+/* The last gsi number used */
+u32 gsi_end;
+
/* MP IRQ source entries */
struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
@@ -1013,10 +1016,9 @@ static inline int irq_trigger(int idx)
return MPBIOS_trigger(idx);
}
-int (*ioapic_renumber_irq)(int ioapic, int irq);
static int pin_2_irq(int idx, int apic, int pin)
{
- int irq, i;
+ int irq;
int bus = mp_irqs[idx].srcbus;
/*
@@ -1028,18 +1030,12 @@ static int pin_2_irq(int idx, int apic, int pin)
if (test_bit(bus, mp_bus_not_pci)) {
irq = mp_irqs[idx].srcbusirq;
} else {
- /*
- * PCI IRQs are mapped in order
- */
- i = irq = 0;
- while (i < apic)
- irq += nr_ioapic_registers[i++];
- irq += pin;
- /*
- * For MPS mode, so far only needed by ES7000 platform
- */
- if (ioapic_renumber_irq)
- irq = ioapic_renumber_irq(apic, irq);
+ u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
+
+ if (gsi >= NR_IRQS_LEGACY)
+ irq = gsi;
+ else
+ irq = gsi_end + 1 + gsi;
}
#ifdef CONFIG_X86_32
@@ -1950,20 +1946,8 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
void __init enable_IO_APIC(void)
{
- union IO_APIC_reg_01 reg_01;
int i8259_apic, i8259_pin;
int apic;
- unsigned long flags;
-
- /*
- * The number of IO-APIC IRQ registers (== #pins):
- */
- for (apic = 0; apic < nr_ioapics; apic++) {
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- reg_01.raw = io_apic_read(apic, 1);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- nr_ioapic_registers[apic] = reg_01.bits.entries+1;
- }
if (!legacy_pic->nr_legacy_irqs)
return;
@@ -3858,27 +3842,20 @@ int __init io_apic_get_redir_entries (int ioapic)
reg_01.raw = io_apic_read(ioapic, 1);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- return reg_01.bits.entries;
+ /* The register returns the maximum index redir index
+ * supported, which is one less than the total number of redir
+ * entries.
+ */
+ return reg_01.bits.entries + 1;
}
void __init probe_nr_irqs_gsi(void)
{
- int nr = 0;
+ int nr;
- nr = acpi_probe_gsi();
- if (nr > nr_irqs_gsi) {
+ nr = gsi_end + 1 + NR_IRQS_LEGACY;
+ if (nr > nr_irqs_gsi)
nr_irqs_gsi = nr;
- } else {
- /* for acpi=off or acpi is not compiled in */
- int idx;
-
- nr = 0;
- for (idx = 0; idx < nr_ioapics; idx++)
- nr += io_apic_get_redir_entries(idx) + 1;
-
- if (nr > nr_irqs_gsi)
- nr_irqs_gsi = nr;
- }
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
}
@@ -4085,22 +4062,27 @@ int __init io_apic_get_version(int ioapic)
return reg_01.bits.version;
}
-int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
{
- int i;
+ int ioapic, pin, idx;
if (skip_ioapic_setup)
return -1;
- for (i = 0; i < mp_irq_entries; i++)
- if (mp_irqs[i].irqtype == mp_INT &&
- mp_irqs[i].srcbusirq == bus_irq)
- break;
- if (i >= mp_irq_entries)
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0)
return -1;
- *trigger = irq_trigger(i);
- *polarity = irq_polarity(i);
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+ if (pin < 0)
+ return -1;
+
+ idx = find_irq_entry(ioapic, pin, mp_INT);
+ if (idx < 0)
+ return -1;
+
+ *trigger = irq_trigger(idx);
+ *polarity = irq_polarity(idx);
return 0;
}
@@ -4241,7 +4223,7 @@ void __init ioapic_insert_resources(void)
}
}
-int mp_find_ioapic(int gsi)
+int mp_find_ioapic(u32 gsi)
{
int i = 0;
@@ -4256,7 +4238,7 @@ int mp_find_ioapic(int gsi)
return -1;
}
-int mp_find_ioapic_pin(int ioapic, int gsi)
+int mp_find_ioapic_pin(int ioapic, u32 gsi)
{
if (WARN_ON(ioapic == -1))
return -1;
@@ -4284,6 +4266,7 @@ static int bad_ioapic(unsigned long address)
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
{
int idx = 0;
+ int entries;
if (bad_ioapic(address))
return;
@@ -4302,9 +4285,17 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/
+ entries = io_apic_get_redir_entries(idx);
mp_gsi_routing[idx].gsi_base = gsi_base;
- mp_gsi_routing[idx].gsi_end = gsi_base +
- io_apic_get_redir_entries(idx);
+ mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;
+
+ /*
+ * The number of IO-APIC IRQ registers (== #pins):
+ */
+ nr_ioapic_registers[idx] = entries;
+
+ if (mp_gsi_routing[idx].gsi_end > gsi_end)
+ gsi_end = mp_gsi_routing[idx].gsi_end;
printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
"GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 1edaf15c0b8e..a43f71cb30f8 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -401,13 +401,6 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
int cpu = smp_processor_id();
int rc = 0;
- /* check for other users first */
- if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
- == NOTIFY_STOP) {
- rc = 1;
- touched = 1;
- }
-
sum = get_timer_irqs(cpu);
if (__get_cpu_var(nmi_touch)) {
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c085d52dbaf2..e46f98f36e31 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -735,9 +735,6 @@ void __init uv_system_init(void)
uv_node_to_blade[nid] = blade;
uv_cpu_to_blade[cpu] = blade;
max_pnode = max(pnode, max_pnode);
-
- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
- cpu, apicid, pnode, nid, lcpu, blade);
}
/* Add blade/pnode info for nodes without cpus */
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 031aa887b0eb..c4f9182ca3ac 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1224,7 +1224,7 @@ static void reinit_timer(void)
#ifdef INIT_TIMER_AFTER_SUSPEND
unsigned long flags;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
/* set the clock to HZ */
outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
udelay(10);
@@ -1232,7 +1232,7 @@ static void reinit_timer(void)
udelay(10);
outb_pit(LATCH >> 8, PIT_CH0); /* MSB */
udelay(10);
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
#endif
}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index c202b62f3671..3a785da34b6f 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,7 +14,7 @@ CFLAGS_common.o := $(nostackp)
obj-y := intel_cacheinfo.o addon_cpuid_features.o
obj-y += proc.o capflags.o powerflags.o common.o
-obj-y += vmware.o hypervisor.o sched.o
+obj-y += vmware.o hypervisor.o sched.o mshyperv.o
obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
obj-$(CONFIG_X86_64) += bugs_64.o
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 97ad79cdf688..10fa5684a662 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -30,12 +30,14 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
const struct cpuid_bit *cb;
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
- { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
- { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
- { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
- { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
- { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
- { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
+ { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
+ { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
+ { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006 },
+ { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 },
+ { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
+ { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
+ { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
+ { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
{ 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 01a265212395..c39576cb3018 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -86,7 +86,7 @@ static void __init check_fpu(void)
static void __init check_hlt(void)
{
- if (paravirt_enabled())
+ if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
return;
printk(KERN_INFO "Checking 'hlt' instruction... ");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4868e4a951ee..cc83a002786e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1084,6 +1084,20 @@ static void clear_all_debug_regs(void)
}
}
+#ifdef CONFIG_KGDB
+/*
+ * Restore debug regs if using kgdbwait and you have a kernel debugger
+ * connection established.
+ */
+static void dbg_restore_debug_regs(void)
+{
+ if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
+ arch_kgdb_ops.correct_hw_break();
+}
+#else /* ! CONFIG_KGDB */
+#define dbg_restore_debug_regs()
+#endif /* ! CONFIG_KGDB */
+
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
@@ -1174,18 +1188,8 @@ void __cpuinit cpu_init(void)
load_TR_desc();
load_LDT(&init_mm.context);
-#ifdef CONFIG_KGDB
- /*
- * If the kgdb is connected no debug regs should be altered. This
- * is only applicable when KGDB and a KGDB I/O module are built
- * into the kernel and you are using early debugging with
- * kgdbwait. KGDB will control the kernel HW breakpoint registers.
- */
- if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
- arch_kgdb_ops.correct_hw_break();
- else
-#endif
- clear_all_debug_regs();
+ clear_all_debug_regs();
+ dbg_restore_debug_regs();
fpu_init();
@@ -1239,14 +1243,12 @@ void __cpuinit cpu_init(void)
#endif
clear_all_debug_regs();
+ dbg_restore_debug_regs();
/*
* Force FPU initialization:
*/
- if (cpu_has_xsave)
- current_thread_info()->status = TS_XSAVE;
- else
- current_thread_info()->status = 0;
+ current_thread_info()->status = 0;
clear_used_math();
mxcsr_feature_mask_init();
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile
index 1840c0a5170b..bd54bf67e6fb 100644
--- a/arch/x86/kernel/cpu/cpufreq/Makefile
+++ b/arch/x86/kernel/cpu/cpufreq/Makefile
@@ -2,8 +2,8 @@
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
-obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
-obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
+obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 459168083b77..1d3cddaa40ee 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -46,6 +46,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
+#include "mperf.h"
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"acpi-cpufreq", msg)
@@ -71,8 +72,6 @@ struct acpi_cpufreq_data {
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
-
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance *acpi_perf_data;
@@ -240,45 +239,6 @@ static u32 get_cur_val(const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_single(), on the target CPU */
-static void read_measured_perf_ctrs(void *_cur)
-{
- struct aperfmperf *am = _cur;
-
- get_aperfmperf(am);
-}
-
-/*
- * Return the measured active (C0) frequency on this CPU since last call
- * to this function.
- * Input: cpu number
- * Return: Average CPU frequency in terms of max frequency (zero on error)
- *
- * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
- * over a period of time, while CPU is in C0 state.
- * IA32_MPERF counts at the rate of max advertised frequency
- * IA32_APERF counts at the rate of actual CPU frequency
- * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
- * no meaning should be associated with absolute values of these MSRs.
- */
-static unsigned int get_measured_perf(struct cpufreq_policy *policy,
- unsigned int cpu)
-{
- struct aperfmperf perf;
- unsigned long ratio;
- unsigned int retval;
-
- if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
- return 0;
-
- ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
- per_cpu(acfreq_old_perf, cpu) = perf;
-
- retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
-
- return retval;
-}
-
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
@@ -702,7 +662,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* Check for APERF/MPERF support in hardware */
if (cpu_has(c, X86_FEATURE_APERFMPERF))
- acpi_cpufreq_driver.getavg = get_measured_perf;
+ acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
dprintk("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++)
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.c b/arch/x86/kernel/cpu/cpufreq/mperf.c
new file mode 100644
index 000000000000..911e193018ae
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpufreq/mperf.c
@@ -0,0 +1,51 @@
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+
+#include "mperf.h"
+
+static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
+
+/* Called via smp_call_function_single(), on the target CPU */
+static void read_measured_perf_ctrs(void *_cur)
+{
+ struct aperfmperf *am = _cur;
+
+ get_aperfmperf(am);
+}
+
+/*
+ * Return the measured active (C0) frequency on this CPU since last call
+ * to this function.
+ * Input: cpu number
+ * Return: Average CPU frequency in terms of max frequency (zero on error)
+ *
+ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
+ * over a period of time, while CPU is in C0 state.
+ * IA32_MPERF counts at the rate of max advertised frequency
+ * IA32_APERF counts at the rate of actual CPU frequency
+ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
+ * no meaning should be associated with absolute values of these MSRs.
+ */
+unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
+ unsigned int cpu)
+{
+ struct aperfmperf perf;
+ unsigned long ratio;
+ unsigned int retval;
+
+ if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
+ return 0;
+
+ ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
+ per_cpu(acfreq_old_perf, cpu) = perf;
+
+ retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
+MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.h b/arch/x86/kernel/cpu/cpufreq/mperf.h
new file mode 100644
index 000000000000..5dbf2950dc22
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpufreq/mperf.h
@@ -0,0 +1,9 @@
+/*
+ * (c) 2010 Advanced Micro Devices, Inc.
+ * Your use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+ */
+
+unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
+ unsigned int cpu);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index b6215b9798e2..a267dbd64b8e 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1,6 +1,5 @@
-
/*
- * (c) 2003-2006 Advanced Micro Devices, Inc.
+ * (c) 2003-2010 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
@@ -46,6 +45,7 @@
#define PFX "powernow-k8: "
#define VERSION "version 2.20.00"
#include "powernow-k8.h"
+#include "mperf.h"
/* serialize freq changes */
static DEFINE_MUTEX(fidvid_mutex);
@@ -54,6 +54,12 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
static int cpu_family = CPU_OPTERON;
+/* core performance boost */
+static bool cpb_capable, cpb_enabled;
+static struct msr __percpu *msrs;
+
+static struct cpufreq_driver cpufreq_amd64_driver;
+
#ifndef CONFIG_SMP
static inline const struct cpumask *cpu_core_mask(int cpu)
{
@@ -800,6 +806,8 @@ static int find_psb_table(struct powernow_k8_data *data)
* www.amd.com
*/
printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n");
+ printk(KERN_ERR PFX "Make sure that your BIOS is up to date"
+ " and Cool'N'Quiet support is enabled in BIOS setup\n");
return -ENODEV;
}
@@ -1249,6 +1257,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu;
int rc;
+ struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
if (!cpu_online(pol->cpu))
return -ENODEV;
@@ -1323,6 +1332,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
return -EINVAL;
}
+ /* Check for APERF/MPERF support in hardware */
+ if (cpu_has(c, X86_FEATURE_APERFMPERF))
+ cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
+
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
if (cpu_family == CPU_HW_PSTATE)
@@ -1394,8 +1407,77 @@ out:
return khz;
}
+static void _cpb_toggle_msrs(bool t)
+{
+ int cpu;
+
+ get_online_cpus();
+
+ rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ if (t)
+ reg->l &= ~BIT(25);
+ else
+ reg->l |= BIT(25);
+ }
+ wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ put_online_cpus();
+}
+
+/*
+ * Switch on/off core performance boosting.
+ *
+ * 0=disable
+ * 1=enable.
+ */
+static void cpb_toggle(bool t)
+{
+ if (!cpb_capable)
+ return;
+
+ if (t && !cpb_enabled) {
+ cpb_enabled = true;
+ _cpb_toggle_msrs(t);
+ printk(KERN_INFO PFX "Core Boosting enabled.\n");
+ } else if (!t && cpb_enabled) {
+ cpb_enabled = false;
+ _cpb_toggle_msrs(t);
+ printk(KERN_INFO PFX "Core Boosting disabled.\n");
+ }
+}
+
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+ size_t count)
+{
+ int ret = -EINVAL;
+ unsigned long val = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (!ret && (val == 0 || val == 1) && cpb_capable)
+ cpb_toggle(val);
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", cpb_enabled);
+}
+
+#define define_one_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+define_one_rw(cpb);
+
static struct freq_attr *powernow_k8_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
+ &cpb,
NULL,
};
@@ -1411,10 +1493,51 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.attr = powernow_k8_attr,
};
+/*
+ * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
+ * cannot block the remaining ones from boosting. On the CPU_UP path we
+ * simply keep the boost-disable flag in sync with the current global
+ * state.
+ */
+static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action,
+ void *hcpu)
+{
+ unsigned cpu = (long)hcpu;
+ u32 lo, hi;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+
+ if (!cpb_enabled) {
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+ lo |= BIT(25);
+ wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
+ }
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+ lo &= ~BIT(25);
+ wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata cpb_nb = {
+ .notifier_call = cpb_notify,
+};
+
/* driver entry point for init */
static int __cpuinit powernowk8_init(void)
{
- unsigned int i, supported_cpus = 0;
+ unsigned int i, supported_cpus = 0, cpu;
for_each_online_cpu(i) {
int rc;
@@ -1423,15 +1546,36 @@ static int __cpuinit powernowk8_init(void)
supported_cpus++;
}
- if (supported_cpus == num_online_cpus()) {
- printk(KERN_INFO PFX "Found %d %s "
- "processors (%d cpu cores) (" VERSION ")\n",
- num_online_nodes(),
- boot_cpu_data.x86_model_id, supported_cpus);
- return cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (supported_cpus != num_online_cpus())
+ return -ENODEV;
+
+ printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
+
+ if (boot_cpu_has(X86_FEATURE_CPB)) {
+
+ cpb_capable = true;
+
+ register_cpu_notifier(&cpb_nb);
+
+ msrs = msrs_alloc();
+ if (!msrs) {
+ printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
+ return -ENOMEM;
+ }
+
+ rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ cpb_enabled |= !(!!(reg->l & BIT(25)));
+ }
+
+ printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
+ (cpb_enabled ? "on" : "off"));
}
- return -ENODEV;
+ return cpufreq_register_driver(&cpufreq_amd64_driver);
}
/* driver entry point for term */
@@ -1439,6 +1583,13 @@ static void __exit powernowk8_exit(void)
{
dprintk("exit\n");
+ if (boot_cpu_has(X86_FEATURE_CPB)) {
+ msrs_free(msrs);
+ msrs = NULL;
+
+ unregister_cpu_notifier(&cpb_nb);
+ }
+
cpufreq_unregister_driver(&cpufreq_amd64_driver);
}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index 02ce824073cb..df3529b1c02d 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -5,7 +5,6 @@
* http://www.gnu.org/licenses/gpl.html
*/
-
enum pstate {
HW_PSTATE_INVALID = 0xff,
HW_PSTATE_0 = 0,
@@ -55,7 +54,6 @@ struct powernow_k8_data {
struct cpumask *available_cores;
};
-
/* processor's cpuid instruction support */
#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
#define CPUID_XFAM 0x0ff00000 /* extended family */
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 08be922de33a..dd531cc56a8f 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -21,37 +21,55 @@
*
*/
+#include <linux/module.h>
#include <asm/processor.h>
-#include <asm/vmware.h>
#include <asm/hypervisor.h>
-static inline void __cpuinit
-detect_hypervisor_vendor(struct cpuinfo_x86 *c)
+/*
+ * Hypervisor detect order. This is specified explicitly here because
+ * some hypervisors might implement compatibility modes for other
+ * hypervisors and therefore need to be detected in specific sequence.
+ */
+static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
- if (vmware_platform())
- c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
- else
- c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
-}
+ &x86_hyper_vmware,
+ &x86_hyper_ms_hyperv,
+};
-static inline void __cpuinit
-hypervisor_set_feature_bits(struct cpuinfo_x86 *c)
+const struct hypervisor_x86 *x86_hyper;
+EXPORT_SYMBOL(x86_hyper);
+
+static inline void __init
+detect_hypervisor_vendor(void)
{
- if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) {
- vmware_set_feature_bits(c);
- return;
+ const struct hypervisor_x86 *h, * const *p;
+
+ for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
+ h = *p;
+ if (h->detect()) {
+ x86_hyper = h;
+ printk(KERN_INFO "Hypervisor detected: %s\n", h->name);
+ break;
+ }
}
}
void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
{
- detect_hypervisor_vendor(c);
- hypervisor_set_feature_bits(c);
+ if (x86_hyper && x86_hyper->set_cpu_features)
+ x86_hyper->set_cpu_features(c);
}
void __init init_hypervisor_platform(void)
{
+
+ detect_hypervisor_vendor();
+
+ if (!x86_hyper)
+ return;
+
init_hypervisor(&boot_cpu_data);
- if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
- vmware_platform_setup();
+
+ if (x86_hyper->init_platform)
+ x86_hyper->init_platform();
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1366c7cfd483..85f69cdeae10 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -12,7 +12,6 @@
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/msr.h>
-#include <asm/ds.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
@@ -373,12 +372,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
}
- if (c->cpuid_level > 6) {
- unsigned ecx = cpuid_ecx(6);
- if (ecx & 0x01)
- set_cpu_cap(c, X86_FEATURE_APERFMPERF);
- }
-
if (cpu_has_xmm2)
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
if (cpu_has_ds) {
@@ -388,7 +381,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_BTS);
if (!(l1 & (1<<12)))
set_cpu_cap(c, X86_FEATURE_PEBS);
- ds_init_intel(c);
}
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index b3eeb66c0a51..33eae2062cf5 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -148,13 +148,19 @@ union _cpuid4_leaf_ecx {
u32 full;
};
+struct amd_l3_cache {
+ struct pci_dev *dev;
+ bool can_disable;
+ unsigned indices;
+ u8 subcaches[4];
+};
+
struct _cpuid4_info {
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- bool can_disable;
- unsigned int l3_indices;
+ struct amd_l3_cache *l3;
DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};
@@ -164,8 +170,7 @@ struct _cpuid4_info_regs {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- bool can_disable;
- unsigned int l3_indices;
+ struct amd_l3_cache *l3;
};
unsigned short num_cache_leaves;
@@ -302,87 +307,163 @@ struct _cache_attr {
};
#ifdef CONFIG_CPU_SUP_AMD
-static unsigned int __cpuinit amd_calc_l3_indices(void)
+
+/*
+ * L3 cache descriptors
+ */
+static struct amd_l3_cache **__cpuinitdata l3_caches;
+
+static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
{
- /*
- * We're called over smp_call_function_single() and therefore
- * are on the correct cpu.
- */
- int cpu = smp_processor_id();
- int node = cpu_to_node(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned int sc0, sc1, sc2, sc3;
u32 val = 0;
- pci_read_config_dword(dev, 0x1C4, &val);
+ pci_read_config_dword(l3->dev, 0x1C4, &val);
/* calculate subcache sizes */
- sc0 = !(val & BIT(0));
- sc1 = !(val & BIT(4));
- sc2 = !(val & BIT(8)) + !(val & BIT(9));
- sc3 = !(val & BIT(12)) + !(val & BIT(13));
+ l3->subcaches[0] = sc0 = !(val & BIT(0));
+ l3->subcaches[1] = sc1 = !(val & BIT(4));
+ l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
+ l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
- return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+ l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+}
+
+static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
+{
+ struct amd_l3_cache *l3;
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+
+ l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
+ if (!l3) {
+ printk(KERN_WARNING "Error allocating L3 struct\n");
+ return NULL;
+ }
+
+ l3->dev = dev;
+
+ amd_calc_l3_indices(l3);
+
+ return l3;
}
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
- if (index < 3)
+ int node;
+
+ if (boot_cpu_data.x86 != 0x10)
return;
- if (boot_cpu_data.x86 == 0x11)
+ if (index < 3)
return;
/* see errata #382 and #388 */
- if ((boot_cpu_data.x86 == 0x10) &&
- ((boot_cpu_data.x86_model < 0x8) ||
- (boot_cpu_data.x86_mask < 0x1)))
+ if (boot_cpu_data.x86_model < 0x8)
+ return;
+
+ if ((boot_cpu_data.x86_model == 0x8 ||
+ boot_cpu_data.x86_model == 0x9)
+ &&
+ boot_cpu_data.x86_mask < 0x1)
+ return;
+
+ /* not in virtualized environments */
+ if (num_k8_northbridges == 0)
return;
- this_leaf->can_disable = true;
- this_leaf->l3_indices = amd_calc_l3_indices();
+ /*
+ * Strictly speaking, the amount in @size below is leaked since it is
+ * never freed but this is done only on shutdown so it doesn't matter.
+ */
+ if (!l3_caches) {
+ int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
+
+ l3_caches = kzalloc(size, GFP_ATOMIC);
+ if (!l3_caches)
+ return;
+ }
+
+ node = amd_get_nb_id(smp_processor_id());
+
+ if (!l3_caches[node]) {
+ l3_caches[node] = amd_init_l3_cache(node);
+ l3_caches[node]->can_disable = true;
+ }
+
+ WARN_ON(!l3_caches[node]);
+
+ this_leaf->l3 = l3_caches[node];
}
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int index)
+ unsigned int slot)
{
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = amd_get_nb_id(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
+ struct pci_dev *dev = this_leaf->l3->dev;
unsigned int reg = 0;
- if (!this_leaf->can_disable)
+ if (!this_leaf->l3 || !this_leaf->l3->can_disable)
return -EINVAL;
if (!dev)
return -EINVAL;
- pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+ pci_read_config_dword(dev, 0x1BC + slot * 4, &reg);
return sprintf(buf, "0x%08x\n", reg);
}
-#define SHOW_CACHE_DISABLE(index) \
+#define SHOW_CACHE_DISABLE(slot) \
static ssize_t \
-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \
{ \
- return show_cache_disable(this_leaf, buf, index); \
+ return show_cache_disable(this_leaf, buf, slot); \
}
SHOW_CACHE_DISABLE(0)
SHOW_CACHE_DISABLE(1)
+static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
+ unsigned slot, unsigned long idx)
+{
+ int i;
+
+ idx |= BIT(30);
+
+ /*
+ * disable index in all 4 subcaches
+ */
+ for (i = 0; i < 4; i++) {
+ u32 reg = idx | (i << 20);
+
+ if (!l3->subcaches[i])
+ continue;
+
+ pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
+
+ /*
+ * We need to WBINVD on a core on the node containing the L3
+ * cache which indices we disable therefore a simple wbinvd()
+ * is not sufficient.
+ */
+ wbinvd_on_cpu(cpu);
+
+ reg |= BIT(31);
+ pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
+ }
+}
+
+
static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
- const char *buf, size_t count, unsigned int index)
+ const char *buf, size_t count,
+ unsigned int slot)
{
+ struct pci_dev *dev = this_leaf->l3->dev;
int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = amd_get_nb_id(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned long val = 0;
#define SUBCACHE_MASK (3UL << 20)
#define SUBCACHE_INDEX 0xfff
- if (!this_leaf->can_disable)
+ if (!this_leaf->l3 || !this_leaf->l3->can_disable)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
@@ -396,26 +477,20 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
/* do not allow writes outside of allowed bits */
if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
- ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+ ((val & SUBCACHE_INDEX) > this_leaf->l3->indices))
return -EINVAL;
- val |= BIT(30);
- pci_write_config_dword(dev, 0x1BC + index * 4, val);
- /*
- * We need to WBINVD on a core on the node containing the L3 cache which
- * indices we disable therefore a simple wbinvd() is not sufficient.
- */
- wbinvd_on_cpu(cpu);
- pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+ amd_l3_disable_index(this_leaf->l3, cpu, slot, val);
+
return count;
}
-#define STORE_CACHE_DISABLE(index) \
+#define STORE_CACHE_DISABLE(slot) \
static ssize_t \
-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
+store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
const char *buf, size_t count) \
{ \
- return store_cache_disable(this_leaf, buf, count, index); \
+ return store_cache_disable(this_leaf, buf, count, slot); \
}
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)
@@ -443,8 +518,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
amd_cpuid4(index, &eax, &ebx, &ecx);
- if (boot_cpu_data.x86 >= 0x10)
- amd_check_l3_disable(index, this_leaf);
+ amd_check_l3_disable(index, this_leaf);
} else {
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
}
@@ -701,6 +775,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i);
+ kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
kfree(per_cpu(ici_cpuid4_info, cpu));
per_cpu(ici_cpuid4_info, cpu) = NULL;
}
@@ -985,7 +1060,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_leaf = CPUID4_INFO_IDX(cpu, i);
- if (this_leaf->can_disable)
+ if (this_leaf->l3 && this_leaf->l3->can_disable)
ktype_cache.default_attrs = default_l3_attrs;
else
ktype_cache.default_attrs = default_attrs;
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index 4ac6d48fe11b..bb34b03af252 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
+
+obj-$(CONFIG_ACPI_APEI) += mce-apei.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
new file mode 100644
index 000000000000..745b54f9be89
--- /dev/null
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -0,0 +1,138 @@
+/*
+ * Bridge between MCE and APEI
+ *
+ * On some machine, corrected memory errors are reported via APEI
+ * generic hardware error source (GHES) instead of corrected Machine
+ * Check. These corrected memory errors can be reported to user space
+ * through /dev/mcelog via faking a corrected Machine Check, so that
+ * the error memory page can be offlined by /sbin/mcelog if the error
+ * count for one page is beyond the threshold.
+ *
+ * For fatal MCE, save MCE record into persistent storage via ERST, so
+ * that the MCE record can be logged after reboot via ERST.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/cper.h>
+#include <acpi/apei.h>
+#include <asm/mce.h>
+
+#include "mce-internal.h"
+
+void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
+{
+ struct mce m;
+
+ /* Only corrected MC is reported */
+ if (!corrected)
+ return;
+
+ mce_setup(&m);
+ m.bank = 1;
+ /* Fake a memory read corrected error with unknown channel */
+ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
+ m.addr = mem_err->physical_addr;
+ mce_log(&m);
+ mce_notify_irq();
+}
+EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
+
+#define CPER_CREATOR_MCE \
+ UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
+ 0x64, 0x90, 0xb8, 0x9d)
+#define CPER_SECTION_TYPE_MCE \
+ UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
+ 0x04, 0x4a, 0x38, 0xfc)
+
+/*
+ * CPER specification (in UEFI specification 2.3 appendix N) requires
+ * byte-packed.
+ */
+struct cper_mce_record {
+ struct cper_record_header hdr;
+ struct cper_section_descriptor sec_hdr;
+ struct mce mce;
+} __packed;
+
+int apei_write_mce(struct mce *m)
+{
+ struct cper_mce_record rcd;
+
+ memset(&rcd, 0, sizeof(rcd));
+ memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
+ rcd.hdr.revision = CPER_RECORD_REV;
+ rcd.hdr.signature_end = CPER_SIG_END;
+ rcd.hdr.section_count = 1;
+ rcd.hdr.error_severity = CPER_SER_FATAL;
+ /* timestamp, platform_id, partition_id are all invalid */
+ rcd.hdr.validation_bits = 0;
+ rcd.hdr.record_length = sizeof(rcd);
+ rcd.hdr.creator_id = CPER_CREATOR_MCE;
+ rcd.hdr.notification_type = CPER_NOTIFY_MCE;
+ rcd.hdr.record_id = cper_next_record_id();
+ rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
+
+ rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd;
+ rcd.sec_hdr.section_length = sizeof(rcd.mce);
+ rcd.sec_hdr.revision = CPER_SEC_REV;
+ /* fru_id and fru_text is invalid */
+ rcd.sec_hdr.validation_bits = 0;
+ rcd.sec_hdr.flags = CPER_SEC_PRIMARY;
+ rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
+ rcd.sec_hdr.section_severity = CPER_SER_FATAL;
+
+ memcpy(&rcd.mce, m, sizeof(*m));
+
+ return erst_write(&rcd.hdr);
+}
+
+ssize_t apei_read_mce(struct mce *m, u64 *record_id)
+{
+ struct cper_mce_record rcd;
+ ssize_t len;
+
+ len = erst_read_next(&rcd.hdr, sizeof(rcd));
+ if (len <= 0)
+ return len;
+ /* Can not skip other records in storage via ERST unless clear them */
+ else if (len != sizeof(rcd) ||
+ uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) {
+ if (printk_ratelimit())
+ pr_warning(
+ "MCE-APEI: Can not skip the unknown record in ERST");
+ return -EIO;
+ }
+
+ memcpy(m, &rcd.mce, sizeof(*m));
+ *record_id = rcd.hdr.record_id;
+
+ return sizeof(*m);
+}
+
+/* Check whether there is record in ERST */
+int apei_check_mce(void)
+{
+ return erst_get_record_count();
+}
+
+int apei_clear_mce(u64 record_id)
+{
+ return erst_clear(record_id);
+}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 32996f9fab67..fefcc69ee8b5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -28,3 +28,26 @@ extern int mce_ser;
extern struct mce_bank *mce_banks;
+#ifdef CONFIG_ACPI_APEI
+int apei_write_mce(struct mce *m);
+ssize_t apei_read_mce(struct mce *m, u64 *record_id);
+int apei_check_mce(void);
+int apei_clear_mce(u64 record_id);
+#else
+static inline int apei_write_mce(struct mce *m)
+{
+ return -EINVAL;
+}
+static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
+{
+ return 0;
+}
+static inline int apei_check_mce(void)
+{
+ return 0;
+}
+static inline int apei_clear_mce(u64 record_id)
+{
+ return -EINVAL;
+}
+#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8a6f0afa767e..18cc42562250 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -36,6 +36,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
+#include <linux/edac_mce.h>
#include <asm/processor.h>
#include <asm/hw_irq.h>
@@ -169,6 +170,15 @@ void mce_log(struct mce *mce)
entry = rcu_dereference_check_mce(mcelog.next);
for (;;) {
/*
+ * If edac_mce is enabled, it will check the error type
+ * and will process it, if it is a known error.
+ * Otherwise, the error will be sent through mcelog
+ * interface
+ */
+ if (edac_mce_parse(mce))
+ return;
+
+ /*
* When the buffer fills up discard new entries.
* Assume that the earlier errors are the more
* interesting ones:
@@ -264,7 +274,7 @@ static void wait_for_panic(void)
static void mce_panic(char *msg, struct mce *final, char *exp)
{
- int i;
+ int i, apei_err = 0;
if (!fake_panic) {
/*
@@ -287,8 +297,11 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
struct mce *m = &mcelog.entry[i];
if (!(m->status & MCI_STATUS_VAL))
continue;
- if (!(m->status & MCI_STATUS_UC))
+ if (!(m->status & MCI_STATUS_UC)) {
print_mce(m);
+ if (!apei_err)
+ apei_err = apei_write_mce(m);
+ }
}
/* Now print uncorrected but with the final one last */
for (i = 0; i < MCE_LOG_LEN; i++) {
@@ -297,11 +310,17 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
continue;
if (!(m->status & MCI_STATUS_UC))
continue;
- if (!final || memcmp(m, final, sizeof(struct mce)))
+ if (!final || memcmp(m, final, sizeof(struct mce))) {
print_mce(m);
+ if (!apei_err)
+ apei_err = apei_write_mce(m);
+ }
}
- if (final)
+ if (final) {
print_mce(final);
+ if (!apei_err)
+ apei_err = apei_write_mce(final);
+ }
if (cpu_missing)
printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
print_mce_tail();
@@ -539,7 +558,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
struct mce m;
int i;
- __get_cpu_var(mce_poll_count)++;
+ percpu_inc(mce_poll_count);
mce_setup(&m);
@@ -934,7 +953,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
atomic_inc(&mce_entry);
- __get_cpu_var(mce_exception_count)++;
+ percpu_inc(mce_exception_count);
if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP)
@@ -1493,6 +1512,43 @@ static void collect_tscs(void *data)
rdtscll(cpu_tsc[smp_processor_id()]);
}
+static int mce_apei_read_done;
+
+/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
+static int __mce_read_apei(char __user **ubuf, size_t usize)
+{
+ int rc;
+ u64 record_id;
+ struct mce m;
+
+ if (usize < sizeof(struct mce))
+ return -EINVAL;
+
+ rc = apei_read_mce(&m, &record_id);
+ /* Error or no more MCE record */
+ if (rc <= 0) {
+ mce_apei_read_done = 1;
+ return rc;
+ }
+ rc = -EFAULT;
+ if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
+ return rc;
+ /*
+ * In fact, we should have cleared the record after that has
+ * been flushed to the disk or sent to network in
+ * /sbin/mcelog, but we have no interface to support that now,
+ * so just clear it to avoid duplication.
+ */
+ rc = apei_clear_mce(record_id);
+ if (rc) {
+ mce_apei_read_done = 1;
+ return rc;
+ }
+ *ubuf += sizeof(struct mce);
+
+ return 0;
+}
+
static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
loff_t *off)
{
@@ -1506,15 +1562,19 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
return -ENOMEM;
mutex_lock(&mce_read_mutex);
+
+ if (!mce_apei_read_done) {
+ err = __mce_read_apei(&buf, usize);
+ if (err || buf != ubuf)
+ goto out;
+ }
+
next = rcu_dereference_check_mce(mcelog.next);
/* Only supports full reads right now */
- if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
- mutex_unlock(&mce_read_mutex);
- kfree(cpu_tsc);
-
- return -EINVAL;
- }
+ err = -EINVAL;
+ if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
+ goto out;
err = 0;
prev = 0;
@@ -1562,10 +1622,15 @@ timeout:
memset(&mcelog.entry[i], 0, sizeof(struct mce));
}
}
+
+ if (err)
+ err = -EFAULT;
+
+out:
mutex_unlock(&mce_read_mutex);
kfree(cpu_tsc);
- return err ? -EFAULT : buf - ubuf;
+ return err ? err : buf - ubuf;
}
static unsigned int mce_poll(struct file *file, poll_table *wait)
@@ -1573,6 +1638,8 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
poll_wait(file, &mce_wait, wait);
if (rcu_dereference_check_mce(mcelog.next))
return POLLIN | POLLRDNORM;
+ if (!mce_apei_read_done && apei_check_mce())
+ return POLLIN | POLLRDNORM;
return 0;
}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
new file mode 100644
index 000000000000..16f41bbe46b6
--- /dev/null
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -0,0 +1,55 @@
+/*
+ * HyperV Detection code.
+ *
+ * Copyright (C) 2010, Novell, Inc.
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/processor.h>
+#include <asm/hypervisor.h>
+#include <asm/hyperv.h>
+#include <asm/mshyperv.h>
+
+struct ms_hyperv_info ms_hyperv;
+
+static bool __init ms_hyperv_platform(void)
+{
+ u32 eax;
+ u32 hyp_signature[3];
+
+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return false;
+
+ cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
+ &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
+
+ return eax >= HYPERV_CPUID_MIN &&
+ eax <= HYPERV_CPUID_MAX &&
+ !memcmp("Microsoft Hv", hyp_signature, 12);
+}
+
+static void __init ms_hyperv_init_platform(void)
+{
+ /*
+ * Extract the features and hints
+ */
+ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
+ ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
+
+ printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints);
+}
+
+const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+ .name = "Microsoft HyperV",
+ .detect = ms_hyperv_platform,
+ .init_platform = ms_hyperv_init_platform,
+};
+EXPORT_SYMBOL(x86_hyper_ms_hyperv);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index db5bdc8addf8..fd4db0db3708 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -31,46 +31,51 @@
#include <asm/nmi.h>
#include <asm/compat.h>
-static u64 perf_event_mask __read_mostly;
+#if 0
+#undef wrmsrl
+#define wrmsrl(msr, val) \
+do { \
+ trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
+ (unsigned long)(val)); \
+ native_write_msr((msr), (u32)((u64)(val)), \
+ (u32)((u64)(val) >> 32)); \
+} while (0)
+#endif
-/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS 4
+/*
+ * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
+ */
+static unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long offset, addr = (unsigned long)from;
+ int type = in_nmi() ? KM_NMI : KM_IRQ0;
+ unsigned long size, len = 0;
+ struct page *page;
+ void *map;
+ int ret;
-/* The size of a BTS record in bytes: */
-#define BTS_RECORD_SIZE 24
+ do {
+ ret = __get_user_pages_fast(addr, 1, 0, &page);
+ if (!ret)
+ break;
-/* The size of a per-cpu BTS buffer in bytes: */
-#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
+ offset = addr & (PAGE_SIZE - 1);
+ size = min(PAGE_SIZE - offset, n - len);
-/* The BTS overflow threshold in bytes from the end of the buffer: */
-#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
+ map = kmap_atomic(page, type);
+ memcpy(to, map+offset, size);
+ kunmap_atomic(map, type);
+ put_page(page);
+ len += size;
+ to += size;
+ addr += size;
-/*
- * Bits in the debugctlmsr controlling branch tracing.
- */
-#define X86_DEBUGCTL_TR (1 << 6)
-#define X86_DEBUGCTL_BTS (1 << 7)
-#define X86_DEBUGCTL_BTINT (1 << 8)
-#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
-#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
+ } while (len < n);
-/*
- * A debug store configuration.
- *
- * We only support architectures that use 64bit fields.
- */
-struct debug_store {
- u64 bts_buffer_base;
- u64 bts_index;
- u64 bts_absolute_maximum;
- u64 bts_interrupt_threshold;
- u64 pebs_buffer_base;
- u64 pebs_index;
- u64 pebs_absolute_maximum;
- u64 pebs_interrupt_threshold;
- u64 pebs_event_reset[MAX_PEBS_EVENTS];
-};
+ return len;
+}
struct event_constraint {
union {
@@ -89,18 +94,41 @@ struct amd_nb {
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};
+#define MAX_LBR_ENTRIES 16
+
struct cpu_hw_events {
+ /*
+ * Generic x86 PMC bits
+ */
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- unsigned long interrupts;
int enabled;
- struct debug_store *ds;
int n_events;
int n_added;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
+
+ unsigned int group_flag;
+
+ /*
+ * Intel DebugStore bits
+ */
+ struct debug_store *ds;
+ u64 pebs_enabled;
+
+ /*
+ * Intel LBR bits
+ */
+ int lbr_users;
+ void *lbr_context;
+ struct perf_branch_stack lbr_stack;
+ struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
+
+ /*
+ * AMD specific bits
+ */
struct amd_nb *amd_nb;
};
@@ -114,44 +142,75 @@ struct cpu_hw_events {
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
+/*
+ * Constraint on the Event code.
+ */
#define INTEL_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
+ EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
+/*
+ * Constraint on the Event code + UMask + fixed-mask
+ *
+ * filter mask to validate fixed counter events.
+ * the following filters disqualify for fixed counters:
+ * - inv
+ * - edge
+ * - cnt-mask
+ * The other filters are supported by fixed counters.
+ * The any-thread option is supported starting with v3.
+ */
#define FIXED_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
+ EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
+
+/*
+ * Constraint on the Event code + UMask
+ */
+#define PEBS_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->cmask; (e)++)
+ for ((e) = (c); (e)->weight; (e)++)
+
+union perf_capabilities {
+ struct {
+ u64 lbr_format : 6;
+ u64 pebs_trap : 1;
+ u64 pebs_arch_reg : 1;
+ u64 pebs_format : 4;
+ u64 smm_freeze : 1;
+ };
+ u64 capabilities;
+};
/*
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
+ /*
+ * Generic x86 PMC bits
+ */
const char *name;
int version;
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
- void (*enable_all)(void);
+ void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
+ int (*hw_config)(struct perf_event *event);
+ int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
- u64 (*raw_event)(u64);
int max_events;
- int num_events;
- int num_events_fixed;
- int event_bits;
- u64 event_mask;
+ int num_counters;
+ int num_counters_fixed;
+ int cntval_bits;
+ u64 cntval_mask;
int apic;
u64 max_period;
- u64 intel_ctrl;
- void (*enable_bts)(u64 config);
- void (*disable_bts)(void);
-
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
@@ -159,11 +218,32 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
+ void (*quirks)(void);
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
+
+ /*
+ * Intel Arch Perfmon v2+
+ */
+ u64 intel_ctrl;
+ union perf_capabilities intel_cap;
+
+ /*
+ * Intel DebugStore bits
+ */
+ int bts, pebs;
+ int pebs_record_size;
+ void (*drain_pebs)(struct pt_regs *regs);
+ struct event_constraint *pebs_constraints;
+
+ /*
+ * Intel LBR
+ */
+ unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
+ int lbr_nr; /* hardware stack size */
};
static struct x86_pmu x86_pmu __read_mostly;
@@ -198,7 +278,7 @@ static u64
x86_perf_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- int shift = 64 - x86_pmu.event_bits;
+ int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
s64 delta;
@@ -241,33 +321,32 @@ again:
static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
+#ifdef CONFIG_X86_LOCAL_APIC
+
static bool reserve_pmc_hardware(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
int i;
if (nmi_watchdog == NMI_LOCAL_APIC)
disable_lapic_nmi_watchdog();
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
goto perfctr_fail;
}
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
-#endif
return true;
-#ifdef CONFIG_X86_LOCAL_APIC
eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu.eventsel + i);
- i = x86_pmu.num_events;
+ i = x86_pmu.num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
@@ -277,128 +356,36 @@ perfctr_fail:
enable_lapic_nmi_watchdog();
return false;
-#endif
}
static void release_pmc_hardware(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
int i;
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
release_perfctr_nmi(x86_pmu.perfctr + i);
release_evntsel_nmi(x86_pmu.eventsel + i);
}
if (nmi_watchdog == NMI_LOCAL_APIC)
enable_lapic_nmi_watchdog();
-#endif
-}
-
-static inline bool bts_available(void)
-{
- return x86_pmu.enable_bts != NULL;
}
-static void init_debug_store_on_cpu(int cpu)
-{
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- return;
-
- wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
- (u32)((u64)(unsigned long)ds),
- (u32)((u64)(unsigned long)ds >> 32));
-}
-
-static void fini_debug_store_on_cpu(int cpu)
-{
- if (!per_cpu(cpu_hw_events, cpu).ds)
- return;
-
- wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
-}
-
-static void release_bts_hardware(void)
-{
- int cpu;
-
- if (!bts_available())
- return;
-
- get_online_cpus();
-
- for_each_online_cpu(cpu)
- fini_debug_store_on_cpu(cpu);
-
- for_each_possible_cpu(cpu) {
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- continue;
-
- per_cpu(cpu_hw_events, cpu).ds = NULL;
-
- kfree((void *)(unsigned long)ds->bts_buffer_base);
- kfree(ds);
- }
-
- put_online_cpus();
-}
-
-static int reserve_bts_hardware(void)
-{
- int cpu, err = 0;
-
- if (!bts_available())
- return 0;
-
- get_online_cpus();
-
- for_each_possible_cpu(cpu) {
- struct debug_store *ds;
- void *buffer;
-
- err = -ENOMEM;
- buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
- if (unlikely(!buffer))
- break;
-
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
- if (unlikely(!ds)) {
- kfree(buffer);
- break;
- }
-
- ds->bts_buffer_base = (u64)(unsigned long)buffer;
- ds->bts_index = ds->bts_buffer_base;
- ds->bts_absolute_maximum =
- ds->bts_buffer_base + BTS_BUFFER_SIZE;
- ds->bts_interrupt_threshold =
- ds->bts_absolute_maximum - BTS_OVFL_TH;
-
- per_cpu(cpu_hw_events, cpu).ds = ds;
- err = 0;
- }
+#else
- if (err)
- release_bts_hardware();
- else {
- for_each_online_cpu(cpu)
- init_debug_store_on_cpu(cpu);
- }
+static bool reserve_pmc_hardware(void) { return true; }
+static void release_pmc_hardware(void) {}
- put_online_cpus();
+#endif
- return err;
-}
+static int reserve_ds_buffers(void);
+static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
- release_bts_hardware();
+ release_ds_buffers();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -441,54 +428,11 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
return 0;
}
-/*
- * Setup the hardware configuration for a given attr_type
- */
-static int __hw_perf_event_init(struct perf_event *event)
+static int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
u64 config;
- int err;
-
- if (!x86_pmu_initialized())
- return -ENODEV;
-
- err = 0;
- if (!atomic_inc_not_zero(&active_events)) {
- mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&active_events) == 0) {
- if (!reserve_pmc_hardware())
- err = -EBUSY;
- else
- err = reserve_bts_hardware();
- }
- if (!err)
- atomic_inc(&active_events);
- mutex_unlock(&pmc_reserve_mutex);
- }
- if (err)
- return err;
-
- event->destroy = hw_perf_event_destroy;
-
- /*
- * Generate PMC IRQs:
- * (keep 'enabled' bit clear for now)
- */
- hwc->config = ARCH_PERFMON_EVENTSEL_INT;
-
- hwc->idx = -1;
- hwc->last_cpu = -1;
- hwc->last_tag = ~0ULL;
-
- /*
- * Count user and OS events unless requested not to.
- */
- if (!attr->exclude_user)
- hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
- if (!attr->exclude_kernel)
- hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
if (!hwc->sample_period) {
hwc->sample_period = x86_pmu.max_period;
@@ -505,16 +449,8 @@ static int __hw_perf_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- /*
- * Raw hw_event type provide the config in the hw_event structure
- */
- if (attr->type == PERF_TYPE_RAW) {
- hwc->config |= x86_pmu.raw_event(attr->config);
- if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
- perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ if (attr->type == PERF_TYPE_RAW)
return 0;
- }
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, attr);
@@ -539,11 +475,11 @@ static int __hw_perf_event_init(struct perf_event *event)
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
(hwc->sample_period == 1)) {
/* BTS is not supported by this architecture. */
- if (!bts_available())
+ if (!x86_pmu.bts)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
- if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+ if (!attr->exclude_kernel)
return -EOPNOTSUPP;
}
@@ -552,12 +488,87 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0;
}
+static int x86_pmu_hw_config(struct perf_event *event)
+{
+ if (event->attr.precise_ip) {
+ int precise = 0;
+
+ /* Support for constant skid */
+ if (x86_pmu.pebs)
+ precise++;
+
+ /* Support for IP fixup */
+ if (x86_pmu.lbr_nr)
+ precise++;
+
+ if (event->attr.precise_ip > precise)
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Generate PMC IRQs:
+ * (keep 'enabled' bit clear for now)
+ */
+ event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
+
+ /*
+ * Count user and OS events unless requested not to
+ */
+ if (!event->attr.exclude_user)
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
+ if (!event->attr.exclude_kernel)
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
+
+ if (event->attr.type == PERF_TYPE_RAW)
+ event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+
+ return x86_setup_perfctr(event);
+}
+
+/*
+ * Setup the hardware configuration for a given attr_type
+ */
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ int err;
+
+ if (!x86_pmu_initialized())
+ return -ENODEV;
+
+ err = 0;
+ if (!atomic_inc_not_zero(&active_events)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_read(&active_events) == 0) {
+ if (!reserve_pmc_hardware())
+ err = -EBUSY;
+ else {
+ err = reserve_ds_buffers();
+ if (err)
+ release_pmc_hardware();
+ }
+ }
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+ if (err)
+ return err;
+
+ event->destroy = hw_perf_event_destroy;
+
+ event->hw.idx = -1;
+ event->hw.last_cpu = -1;
+ event->hw.last_tag = ~0ULL;
+
+ return x86_pmu.hw_config(event);
+}
+
static void x86_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
if (!test_bit(idx, cpuc->active_mask))
@@ -587,12 +598,12 @@ void hw_perf_disable(void)
x86_pmu.disable_all();
}
-static void x86_pmu_enable_all(void)
+static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
u64 val;
@@ -667,14 +678,14 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* assign events to counters starting with most
* constrained events.
*/
- wmax = x86_pmu.num_events;
+ wmax = x86_pmu.num_counters;
/*
* when fixed event counters are present,
* wmax is incremented by 1 to account
* for one more choice
*/
- if (x86_pmu.num_events_fixed)
+ if (x86_pmu.num_counters_fixed)
wmax++;
for (w = 1, num = n; num && w <= wmax; w++) {
@@ -724,7 +735,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
struct perf_event *event;
int n, max_count;
- max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
+ max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
/* current number of events already accepted */
n = cpuc->n_events;
@@ -795,7 +806,7 @@ void hw_perf_enable(void)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
- int i;
+ int i, added = cpuc->n_added;
if (!x86_pmu_initialized())
return;
@@ -847,19 +858,20 @@ void hw_perf_enable(void)
cpuc->enabled = 1;
barrier();
- x86_pmu.enable_all();
+ x86_pmu.enable_all(added);
}
-static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
+ u64 enable_mask)
{
- (void)checking_wrmsrl(hwc->config_base + hwc->idx,
- hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
+ wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
}
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
+
+ wrmsrl(hwc->config_base + hwc->idx, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -874,7 +886,7 @@ x86_perf_event_set_period(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
s64 left = atomic64_read(&hwc->period_left);
s64 period = hwc->sample_period;
- int err, ret = 0, idx = hwc->idx;
+ int ret = 0, idx = hwc->idx;
if (idx == X86_PMC_IDX_FIXED_BTS)
return 0;
@@ -912,8 +924,8 @@ x86_perf_event_set_period(struct perf_event *event)
*/
atomic64_set(&hwc->prev_count, (u64)-left);
- err = checking_wrmsrl(hwc->event_base + idx,
- (u64)(-left) & x86_pmu.event_mask);
+ wrmsrl(hwc->event_base + idx,
+ (u64)(-left) & x86_pmu.cntval_mask);
perf_event_update_userpage(event);
@@ -924,7 +936,8 @@ static void x86_pmu_enable_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
- __x86_pmu_enable_event(&event->hw);
+ __x86_pmu_enable_event(&event->hw,
+ ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
@@ -950,7 +963,15 @@ static int x86_pmu_enable(struct perf_event *event)
if (n < 0)
return n;
- ret = x86_schedule_events(cpuc, n, assign);
+ /*
+ * If group events scheduling transaction was started,
+ * skip the schedulability test here, it will be peformed
+ * at commit time(->commit_txn) as a whole
+ */
+ if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
+ goto out;
+
+ ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
return ret;
/*
@@ -959,6 +980,7 @@ static int x86_pmu_enable(struct perf_event *event)
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
+out:
cpuc->n_events = n;
cpuc->n_added += n - n0;
@@ -991,11 +1013,12 @@ static void x86_pmu_unthrottle(struct perf_event *event)
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
+ u64 pebs;
struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
- if (!x86_pmu.num_events)
+ if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
@@ -1008,16 +1031,18 @@ void perf_event_print_debug(void)
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
+ rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("\n");
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
+ pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
}
- pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
+ pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
@@ -1030,7 +1055,7 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
- for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1095,7 +1120,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -1103,7 +1128,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
hwc = &event->hw;
val = x86_perf_event_update(event);
- if (val & (1ULL << (x86_pmu.event_bits - 1)))
+ if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;
/*
@@ -1146,7 +1171,6 @@ void set_perf_event_pending(void)
void perf_events_lapic_init(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
if (!x86_pmu.apic || !x86_pmu_initialized())
return;
@@ -1154,7 +1178,6 @@ void perf_events_lapic_init(void)
* Always use NMI for PMU
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
-#endif
}
static int __kprobes
@@ -1178,9 +1201,7 @@ perf_event_nmi_handler(struct notifier_block *self,
regs = args->regs;
-#ifdef CONFIG_X86_LOCAL_APIC
apic_write(APIC_LVTPC, APIC_DM_NMI);
-#endif
/*
* Can't rely on the handled return value to say it was our NMI, two
* events could trigger 'simultaneously' raising two back-to-back NMIs.
@@ -1217,118 +1238,11 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
return &unconstrained;
}
-static int x86_event_sched_in(struct perf_event *event,
- struct perf_cpu_context *cpuctx)
-{
- int ret = 0;
-
- event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = smp_processor_id();
- event->tstamp_running += event->ctx->time - event->tstamp_stopped;
-
- if (!is_x86_event(event))
- ret = event->pmu->enable(event);
-
- if (!ret && !is_software_event(event))
- cpuctx->active_oncpu++;
-
- if (!ret && event->attr.exclusive)
- cpuctx->exclusive = 1;
-
- return ret;
-}
-
-static void x86_event_sched_out(struct perf_event *event,
- struct perf_cpu_context *cpuctx)
-{
- event->state = PERF_EVENT_STATE_INACTIVE;
- event->oncpu = -1;
-
- if (!is_x86_event(event))
- event->pmu->disable(event);
-
- event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
-
- if (!is_software_event(event))
- cpuctx->active_oncpu--;
-
- if (event->attr.exclusive || !cpuctx->active_oncpu)
- cpuctx->exclusive = 0;
-}
-
-/*
- * Called to enable a whole group of events.
- * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
- * Assumes the caller has disabled interrupts and has
- * frozen the PMU with hw_perf_save_disable.
- *
- * called with PMU disabled. If successful and return value 1,
- * then guaranteed to call perf_enable() and hw_perf_enable()
- */
-int hw_perf_group_sched_in(struct perf_event *leader,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct perf_event *sub;
- int assign[X86_PMC_IDX_MAX];
- int n0, n1, ret;
-
- /* n0 = total number of events */
- n0 = collect_events(cpuc, leader, true);
- if (n0 < 0)
- return n0;
-
- ret = x86_schedule_events(cpuc, n0, assign);
- if (ret)
- return ret;
-
- ret = x86_event_sched_in(leader, cpuctx);
- if (ret)
- return ret;
-
- n1 = 1;
- list_for_each_entry(sub, &leader->sibling_list, group_entry) {
- if (sub->state > PERF_EVENT_STATE_OFF) {
- ret = x86_event_sched_in(sub, cpuctx);
- if (ret)
- goto undo;
- ++n1;
- }
- }
- /*
- * copy new assignment, now we know it is possible
- * will be used by hw_perf_enable()
- */
- memcpy(cpuc->assign, assign, n0*sizeof(int));
-
- cpuc->n_events = n0;
- cpuc->n_added += n1;
- ctx->nr_active += n1;
-
- /*
- * 1 means successful and events are active
- * This is not quite true because we defer
- * actual activation until hw_perf_enable() but
- * this way we* ensure caller won't try to enable
- * individual events
- */
- return 1;
-undo:
- x86_event_sched_out(leader, cpuctx);
- n0 = 1;
- list_for_each_entry(sub, &leader->sibling_list, group_entry) {
- if (sub->state == PERF_EVENT_STATE_ACTIVE) {
- x86_event_sched_out(sub, cpuctx);
- if (++n0 == n1)
- break;
- }
- }
- return ret;
-}
-
#include "perf_event_amd.c"
#include "perf_event_p6.c"
+#include "perf_event_p4.c"
+#include "perf_event_intel_lbr.c"
+#include "perf_event_intel_ds.c"
#include "perf_event_intel.c"
static int __cpuinit
@@ -1402,48 +1316,50 @@ void __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
- if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
+ if (x86_pmu.quirks)
+ x86_pmu.quirks();
+
+ if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
- x86_pmu.num_events, X86_PMC_MAX_GENERIC);
- x86_pmu.num_events = X86_PMC_MAX_GENERIC;
+ x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
+ x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
- perf_event_mask = (1 << x86_pmu.num_events) - 1;
- perf_max_events = x86_pmu.num_events;
+ x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+ perf_max_events = x86_pmu.num_counters;
- if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
+ if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
- x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
- x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
+ x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
+ x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}
- perf_event_mask |=
- ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
- x86_pmu.intel_ctrl = perf_event_mask;
+ x86_pmu.intel_ctrl |=
+ ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
unconstrained = (struct event_constraint)
- __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
- 0, x86_pmu.num_events);
+ __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
+ 0, x86_pmu.num_counters);
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
- if (c->cmask != INTEL_ARCH_FIXED_MASK)
+ if (c->cmask != X86_RAW_EVENT_MASK)
continue;
- c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
- c->weight += x86_pmu.num_events;
+ c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+ c->weight += x86_pmu.num_counters;
}
}
pr_info("... version: %d\n", x86_pmu.version);
- pr_info("... bit width: %d\n", x86_pmu.event_bits);
- pr_info("... generic registers: %d\n", x86_pmu.num_events);
- pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
+ pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
+ pr_info("... generic registers: %d\n", x86_pmu.num_counters);
+ pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
- pr_info("... event mask: %016Lx\n", perf_event_mask);
+ pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
+ pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
perf_cpu_notifier(x86_pmu_notifier);
}
@@ -1453,6 +1369,59 @@ static inline void x86_pmu_read(struct perf_event *event)
x86_perf_event_update(event);
}
+/*
+ * Start group events scheduling transaction
+ * Set the flag to make pmu::enable() not perform the
+ * schedulability test, it will be performed at commit time
+ */
+static void x86_pmu_start_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ cpuc->group_flag |= PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Stop group events scheduling transaction
+ * Clear the flag and pmu::enable() will perform the
+ * schedulability test.
+ */
+static void x86_pmu_cancel_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Commit group events scheduling transaction
+ * Perform the group schedulability test as a whole
+ * Return 0 if success
+ */
+static int x86_pmu_commit_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int assign[X86_PMC_IDX_MAX];
+ int n, ret;
+
+ n = cpuc->n_events;
+
+ if (!x86_pmu_initialized())
+ return -EAGAIN;
+
+ ret = x86_pmu.schedule_events(cpuc, n, assign);
+ if (ret)
+ return ret;
+
+ /*
+ * copy new assignment, now we know it is possible
+ * will be used by hw_perf_enable()
+ */
+ memcpy(cpuc->assign, assign, n*sizeof(int));
+
+ return 0;
+}
+
static const struct pmu pmu = {
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
@@ -1460,9 +1429,38 @@ static const struct pmu pmu = {
.stop = x86_pmu_stop,
.read = x86_pmu_read,
.unthrottle = x86_pmu_unthrottle,
+ .start_txn = x86_pmu_start_txn,
+ .cancel_txn = x86_pmu_cancel_txn,
+ .commit_txn = x86_pmu_commit_txn,
};
/*
+ * validate that we can schedule this event
+ */
+static int validate_event(struct perf_event *event)
+{
+ struct cpu_hw_events *fake_cpuc;
+ struct event_constraint *c;
+ int ret = 0;
+
+ fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
+ if (!fake_cpuc)
+ return -ENOMEM;
+
+ c = x86_pmu.get_event_constraints(fake_cpuc, event);
+
+ if (!c || !c->weight)
+ ret = -ENOSPC;
+
+ if (x86_pmu.put_event_constraints)
+ x86_pmu.put_event_constraints(fake_cpuc, event);
+
+ kfree(fake_cpuc);
+
+ return ret;
+}
+
+/*
* validate a single event group
*
* validation include:
@@ -1502,7 +1500,7 @@ static int validate_group(struct perf_event *event)
fake_cpuc->n_events = n;
- ret = x86_schedule_events(fake_cpuc, n, NULL);
+ ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out_free:
kfree(fake_cpuc);
@@ -1527,6 +1525,8 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
if (event->group_leader != event)
err = validate_group(event);
+ else
+ err = validate_event(event);
event->pmu = tmp;
}
@@ -1574,8 +1574,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry *entry = data;
- if (reliable)
- callchain_store(entry, addr);
+ callchain_store(entry, addr);
}
static const struct stacktrace_ops backtrace_ops = {
@@ -1597,41 +1596,6 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
}
-/*
- * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
- */
-static unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
-{
- unsigned long offset, addr = (unsigned long)from;
- int type = in_nmi() ? KM_NMI : KM_IRQ0;
- unsigned long size, len = 0;
- struct page *page;
- void *map;
- int ret;
-
- do {
- ret = __get_user_pages_fast(addr, 1, 0, &page);
- if (!ret)
- break;
-
- offset = addr & (PAGE_SIZE - 1);
- size = min(PAGE_SIZE - offset, n - len);
-
- map = kmap_atomic(page, type);
- memcpy(to, map+offset, size);
- kunmap_atomic(map, type);
- put_page(page);
-
- len += size;
- to += size;
- addr += size;
-
- } while (len < n);
-
- return len;
-}
-
#ifdef CONFIG_COMPAT
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1727,6 +1691,11 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
struct perf_callchain_entry *entry;
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* TODO: We don't support guest os callchain now */
+ return NULL;
+ }
+
if (in_nmi())
entry = &__get_cpu_var(pmc_nmi_entry);
else
@@ -1750,3 +1719,37 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
regs->cs = __KERNEL_CS;
local_save_flags(regs->flags);
}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ unsigned long ip;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+ ip = perf_guest_cbs->get_guest_ip();
+ else
+ ip = instruction_pointer(regs);
+
+ return ip;
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ int misc = 0;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ } else {
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
+ }
+
+ if (regs->flags & PERF_EFLAGS_EXACT)
+ misc |= PERF_RECORD_MISC_EXACT_IP;
+
+ return misc;
+}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index db6f7d4056e1..611df11ba15e 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -2,7 +2,7 @@
static DEFINE_RAW_SPINLOCK(amd_nb_lock);
-static __initconst u64 amd_hw_cache_event_ids
+static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -111,22 +111,19 @@ static u64 amd_pmu_event_map(int hw_event)
return amd_perfmon_event_map[hw_event];
}
-static u64 amd_pmu_raw_event(u64 hw_event)
+static int amd_pmu_hw_config(struct perf_event *event)
{
-#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
-#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
-#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
-#define K7_EVNTSEL_INV_MASK 0x000800000ULL
-#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
-
-#define K7_EVNTSEL_MASK \
- (K7_EVNTSEL_EVENT_MASK | \
- K7_EVNTSEL_UNIT_MASK | \
- K7_EVNTSEL_EDGE_MASK | \
- K7_EVNTSEL_INV_MASK | \
- K7_EVNTSEL_REG_MASK)
-
- return hw_event & K7_EVNTSEL_MASK;
+ int ret = x86_pmu_hw_config(event);
+
+ if (ret)
+ return ret;
+
+ if (event->attr.type != PERF_TYPE_RAW)
+ return 0;
+
+ event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+
+ return 0;
}
/*
@@ -165,7 +162,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
if (nb->owners[i] == event) {
cmpxchg(nb->owners+i, event, NULL);
break;
@@ -215,7 +212,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
struct amd_nb *nb = cpuc->amd_nb;
struct perf_event *old = NULL;
- int max = x86_pmu.num_events;
+ int max = x86_pmu.num_counters;
int i, j, k = -1;
/*
@@ -293,7 +290,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
/*
* initialize all possible NB constraints
*/
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
__set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
@@ -371,21 +368,22 @@ static void amd_pmu_cpu_dead(int cpu)
raw_spin_unlock(&amd_nb_lock);
}
-static __initconst struct x86_pmu amd_pmu = {
+static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
+ .hw_config = amd_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
.event_map = amd_pmu_event_map,
- .raw_event = amd_pmu_raw_event,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_events = 4,
- .event_bits = 48,
- .event_mask = (1ULL << 48) - 1,
+ .num_counters = 4,
+ .cntval_bits = 48,
+ .cntval_mask = (1ULL << 48) - 1,
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 9c794ac87837..fdbc652d3feb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -88,7 +88,7 @@ static u64 intel_pmu_event_map(int hw_event)
return intel_perfmon_event_map[hw_event];
}
-static __initconst u64 westmere_hw_cache_event_ids
+static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -179,7 +179,7 @@ static __initconst u64 westmere_hw_cache_event_ids
},
};
-static __initconst u64 nehalem_hw_cache_event_ids
+static __initconst const u64 nehalem_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -270,7 +270,7 @@ static __initconst u64 nehalem_hw_cache_event_ids
},
};
-static __initconst u64 core2_hw_cache_event_ids
+static __initconst const u64 core2_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -361,7 +361,7 @@ static __initconst u64 core2_hw_cache_event_ids
},
};
-static __initconst u64 atom_hw_cache_event_ids
+static __initconst const u64 atom_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -452,60 +452,6 @@ static __initconst u64 atom_hw_cache_event_ids
},
};
-static u64 intel_pmu_raw_event(u64 hw_event)
-{
-#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
-#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
-#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
-#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
-#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
-
-#define CORE_EVNTSEL_MASK \
- (INTEL_ARCH_EVTSEL_MASK | \
- INTEL_ARCH_UNIT_MASK | \
- INTEL_ARCH_EDGE_MASK | \
- INTEL_ARCH_INV_MASK | \
- INTEL_ARCH_CNT_MASK)
-
- return hw_event & CORE_EVNTSEL_MASK;
-}
-
-static void intel_pmu_enable_bts(u64 config)
-{
- unsigned long debugctlmsr;
-
- debugctlmsr = get_debugctlmsr();
-
- debugctlmsr |= X86_DEBUGCTL_TR;
- debugctlmsr |= X86_DEBUGCTL_BTS;
- debugctlmsr |= X86_DEBUGCTL_BTINT;
-
- if (!(config & ARCH_PERFMON_EVENTSEL_OS))
- debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
-
- if (!(config & ARCH_PERFMON_EVENTSEL_USR))
- debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
-
- update_debugctlmsr(debugctlmsr);
-}
-
-static void intel_pmu_disable_bts(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- unsigned long debugctlmsr;
-
- if (!cpuc->ds)
- return;
-
- debugctlmsr = get_debugctlmsr();
-
- debugctlmsr &=
- ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
- X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
-
- update_debugctlmsr(debugctlmsr);
-}
-
static void intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -514,12 +460,17 @@ static void intel_pmu_disable_all(void)
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
+
+ intel_pmu_pebs_disable_all();
+ intel_pmu_lbr_disable_all();
}
-static void intel_pmu_enable_all(void)
+static void intel_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ intel_pmu_pebs_enable_all();
+ intel_pmu_lbr_enable_all();
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
@@ -533,6 +484,42 @@ static void intel_pmu_enable_all(void)
}
}
+/*
+ * Workaround for:
+ * Intel Errata AAK100 (model 26)
+ * Intel Errata AAP53 (model 30)
+ * Intel Errata BD53 (model 44)
+ *
+ * These chips need to be 'reset' when adding counters by programming
+ * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
+ * either in sequence on the same PMC or on different PMCs.
+ */
+static void intel_pmu_nhm_enable_all(int added)
+{
+ if (added) {
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int i;
+
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
+
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
+
+ for (i = 0; i < 3; i++) {
+ struct perf_event *event = cpuc->events[i];
+
+ if (!event)
+ continue;
+
+ __x86_pmu_enable_event(&event->hw,
+ ARCH_PERFMON_EVENTSEL_ENABLE);
+ }
+ }
+ intel_pmu_enable_all(added);
+}
+
static inline u64 intel_pmu_get_status(void)
{
u64 status;
@@ -547,8 +534,7 @@ static inline void intel_pmu_ack_status(u64 ack)
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
-static inline void
-intel_pmu_disable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
@@ -557,71 +543,10 @@ intel_pmu_disable_fixed(struct hw_perf_event *hwc)
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
- (void)checking_wrmsrl(hwc->config_base, ctrl_val);
-}
-
-static void intel_pmu_drain_bts_buffer(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct debug_store *ds = cpuc->ds;
- struct bts_record {
- u64 from;
- u64 to;
- u64 flags;
- };
- struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
- struct bts_record *at, *top;
- struct perf_output_handle handle;
- struct perf_event_header header;
- struct perf_sample_data data;
- struct pt_regs regs;
-
- if (!event)
- return;
-
- if (!ds)
- return;
-
- at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
- top = (struct bts_record *)(unsigned long)ds->bts_index;
-
- if (top <= at)
- return;
-
- ds->bts_index = ds->bts_buffer_base;
-
- perf_sample_data_init(&data, 0);
-
- data.period = event->hw.last_period;
- regs.ip = 0;
-
- /*
- * Prepare a generic sample, i.e. fill in the invariant fields.
- * We will overwrite the from and to address before we output
- * the sample.
- */
- perf_prepare_sample(&header, &data, event, &regs);
-
- if (perf_output_begin(&handle, event,
- header.size * (top - at), 1, 1))
- return;
-
- for (; at < top; at++) {
- data.ip = at->from;
- data.addr = at->to;
-
- perf_output_sample(&handle, &header, &data, event);
- }
-
- perf_output_end(&handle);
-
- /* There's new data available. */
- event->hw.interrupts++;
- event->pending_kill = POLL_IN;
+ wrmsrl(hwc->config_base, ctrl_val);
}
-static inline void
-intel_pmu_disable_event(struct perf_event *event)
+static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -637,14 +562,15 @@ intel_pmu_disable_event(struct perf_event *event)
}
x86_pmu_disable_event(event);
+
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_disable(event);
}
-static inline void
-intel_pmu_enable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
- int err;
/*
* Enable IRQ generation (0x8),
@@ -669,7 +595,7 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc)
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
ctrl_val |= bits;
- err = checking_wrmsrl(hwc->config_base, ctrl_val);
+ wrmsrl(hwc->config_base, ctrl_val);
}
static void intel_pmu_enable_event(struct perf_event *event)
@@ -689,7 +615,10 @@ static void intel_pmu_enable_event(struct perf_event *event)
return;
}
- __x86_pmu_enable_event(hwc);
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_enable(event);
+
+ __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
@@ -708,20 +637,20 @@ static void intel_pmu_reset(void)
unsigned long flags;
int idx;
- if (!x86_pmu.num_events)
+ if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
}
- for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
- }
+
if (ds)
ds->bts_index = ds->bts_buffer_base;
@@ -747,7 +676,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
if (!status) {
- intel_pmu_enable_all();
+ intel_pmu_enable_all(0);
return 0;
}
@@ -762,6 +691,15 @@ again:
inc_irq_stat(apic_perf_irqs);
ack = status;
+
+ intel_pmu_lbr_read();
+
+ /*
+ * PEBS overflow sets bit 62 in the global status register
+ */
+ if (__test_and_clear_bit(62, (unsigned long *)&status))
+ x86_pmu.drain_pebs(regs);
+
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
@@ -787,26 +725,22 @@ again:
goto again;
done:
- intel_pmu_enable_all();
+ intel_pmu_enable_all(0);
return 1;
}
-static struct event_constraint bts_constraint =
- EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
-
static struct event_constraint *
-intel_special_constraints(struct perf_event *event)
+intel_bts_constraints(struct perf_event *event)
{
- unsigned int hw_event;
-
- hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int hw_event, bts_event;
- if (unlikely((hw_event ==
- x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
- (event->hw.sample_period == 1))) {
+ hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+ bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+ if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
return &bts_constraint;
- }
+
return NULL;
}
@@ -815,24 +749,53 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
{
struct event_constraint *c;
- c = intel_special_constraints(event);
+ c = intel_bts_constraints(event);
+ if (c)
+ return c;
+
+ c = intel_pebs_constraints(event);
if (c)
return c;
return x86_get_event_constraints(cpuc, event);
}
-static __initconst struct x86_pmu core_pmu = {
+static int intel_pmu_hw_config(struct perf_event *event)
+{
+ int ret = x86_pmu_hw_config(event);
+
+ if (ret)
+ return ret;
+
+ if (event->attr.type != PERF_TYPE_RAW)
+ return 0;
+
+ if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
+ return 0;
+
+ if (x86_pmu.version < 3)
+ return -EINVAL;
+
+ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
+
+ return 0;
+}
+
+static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
+ .hw_config = x86_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
- .raw_event = intel_pmu_raw_event,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
/*
@@ -845,17 +808,32 @@ static __initconst struct x86_pmu core_pmu = {
.event_constraints = intel_core_event_constraints,
};
-static __initconst struct x86_pmu intel_pmu = {
+static void intel_pmu_cpu_starting(int cpu)
+{
+ init_debug_store_on_cpu(cpu);
+ /*
+ * Deal with CPUs that don't clear their LBRs on power-up.
+ */
+ intel_pmu_lbr_reset();
+}
+
+static void intel_pmu_cpu_dying(int cpu)
+{
+ fini_debug_store_on_cpu(cpu);
+}
+
+static __initconst const struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
.disable_all = intel_pmu_disable_all,
.enable_all = intel_pmu_enable_all,
.enable = intel_pmu_enable_event,
.disable = intel_pmu_disable_event,
+ .hw_config = intel_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
- .raw_event = intel_pmu_raw_event,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
/*
@@ -864,14 +842,38 @@ static __initconst struct x86_pmu intel_pmu = {
* the generic event period:
*/
.max_period = (1ULL << 31) - 1,
- .enable_bts = intel_pmu_enable_bts,
- .disable_bts = intel_pmu_disable_bts,
.get_event_constraints = intel_get_event_constraints,
- .cpu_starting = init_debug_store_on_cpu,
- .cpu_dying = fini_debug_store_on_cpu,
+ .cpu_starting = intel_pmu_cpu_starting,
+ .cpu_dying = intel_pmu_cpu_dying,
};
+static void intel_clovertown_quirks(void)
+{
+ /*
+ * PEBS is unreliable due to:
+ *
+ * AJ67 - PEBS may experience CPL leaks
+ * AJ68 - PEBS PMI may be delayed by one event
+ * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
+ * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
+ *
+ * AJ67 could be worked around by restricting the OS/USR flags.
+ * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
+ *
+ * AJ106 could possibly be worked around by not allowing LBR
+ * usage from PEBS, including the fixup.
+ * AJ68 could possibly be worked around by always programming
+ * a pebs_event_reset[0] value and coping with the lost events.
+ *
+ * But taken together it might just make sense to not enable PEBS on
+ * these chips.
+ */
+ printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+ x86_pmu.pebs = 0;
+ x86_pmu.pebs_constraints = NULL;
+}
+
static __init int intel_pmu_init(void)
{
union cpuid10_edx edx;
@@ -881,12 +883,13 @@ static __init int intel_pmu_init(void)
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
- /* check for P6 processor family */
- if (boot_cpu_data.x86 == 6) {
- return p6_pmu_init();
- } else {
+ switch (boot_cpu_data.x86) {
+ case 0x6:
+ return p6_pmu_init();
+ case 0xf:
+ return p4_pmu_init();
+ }
return -ENODEV;
- }
}
/*
@@ -904,16 +907,28 @@ static __init int intel_pmu_init(void)
x86_pmu = intel_pmu;
x86_pmu.version = version;
- x86_pmu.num_events = eax.split.num_events;
- x86_pmu.event_bits = eax.split.bit_width;
- x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
+ x86_pmu.num_counters = eax.split.num_counters;
+ x86_pmu.cntval_bits = eax.split.bit_width;
+ x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
*/
if (version > 1)
- x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
+ x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+
+ /*
+ * v2 and above have a perf capabilities MSR
+ */
+ if (version > 1) {
+ u64 capabilities;
+
+ rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+ x86_pmu.intel_cap.capabilities = capabilities;
+ }
+
+ intel_ds_init();
/*
* Install the hw-cache-events table:
@@ -924,12 +939,15 @@ static __init int intel_pmu_init(void)
break;
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+ x86_pmu.quirks = intel_clovertown_quirks;
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 29: /* six-core 45 nm xeon "Dunnington" */
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ intel_pmu_lbr_init_core();
+
x86_pmu.event_constraints = intel_core2_event_constraints;
pr_cont("Core2 events, ");
break;
@@ -940,13 +958,19 @@ static __init int intel_pmu_init(void)
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ intel_pmu_lbr_init_nhm();
+
x86_pmu.event_constraints = intel_nehalem_event_constraints;
- pr_cont("Nehalem/Corei7 events, ");
+ x86_pmu.enable_all = intel_pmu_nhm_enable_all;
+ pr_cont("Nehalem events, ");
break;
+
case 28: /* Atom */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ intel_pmu_lbr_init_atom();
+
x86_pmu.event_constraints = intel_gen_event_constraints;
pr_cont("Atom events, ");
break;
@@ -956,7 +980,10 @@ static __init int intel_pmu_init(void)
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ intel_pmu_lbr_init_nhm();
+
x86_pmu.event_constraints = intel_westmere_event_constraints;
+ x86_pmu.enable_all = intel_pmu_nhm_enable_all;
pr_cont("Westmere events, ");
break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
new file mode 100644
index 000000000000..18018d1311cd
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -0,0 +1,641 @@
+#ifdef CONFIG_CPU_SUP_INTEL
+
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS 4
+
+/* The size of a BTS record in bytes: */
+#define BTS_RECORD_SIZE 24
+
+#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
+#define PEBS_BUFFER_SIZE PAGE_SIZE
+
+/*
+ * pebs_record_32 for p4 and core not supported
+
+struct pebs_record_32 {
+ u32 flags, ip;
+ u32 ax, bc, cx, dx;
+ u32 si, di, bp, sp;
+};
+
+ */
+
+struct pebs_record_core {
+ u64 flags, ip;
+ u64 ax, bx, cx, dx;
+ u64 si, di, bp, sp;
+ u64 r8, r9, r10, r11;
+ u64 r12, r13, r14, r15;
+};
+
+struct pebs_record_nhm {
+ u64 flags, ip;
+ u64 ax, bx, cx, dx;
+ u64 si, di, bp, sp;
+ u64 r8, r9, r10, r11;
+ u64 r12, r13, r14, r15;
+ u64 status, dla, dse, lat;
+};
+
+/*
+ * A debug store configuration.
+ *
+ * We only support architectures that use 64bit fields.
+ */
+struct debug_store {
+ u64 bts_buffer_base;
+ u64 bts_index;
+ u64 bts_absolute_maximum;
+ u64 bts_interrupt_threshold;
+ u64 pebs_buffer_base;
+ u64 pebs_index;
+ u64 pebs_absolute_maximum;
+ u64 pebs_interrupt_threshold;
+ u64 pebs_event_reset[MAX_PEBS_EVENTS];
+};
+
+static void init_debug_store_on_cpu(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds)
+ return;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
+ (u32)((u64)(unsigned long)ds),
+ (u32)((u64)(unsigned long)ds >> 32));
+}
+
+static void fini_debug_store_on_cpu(int cpu)
+{
+ if (!per_cpu(cpu_hw_events, cpu).ds)
+ return;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
+}
+
+static void release_ds_buffers(void)
+{
+ int cpu;
+
+ if (!x86_pmu.bts && !x86_pmu.pebs)
+ return;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu)
+ fini_debug_store_on_cpu(cpu);
+
+ for_each_possible_cpu(cpu) {
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds)
+ continue;
+
+ per_cpu(cpu_hw_events, cpu).ds = NULL;
+
+ kfree((void *)(unsigned long)ds->pebs_buffer_base);
+ kfree((void *)(unsigned long)ds->bts_buffer_base);
+ kfree(ds);
+ }
+
+ put_online_cpus();
+}
+
+static int reserve_ds_buffers(void)
+{
+ int cpu, err = 0;
+
+ if (!x86_pmu.bts && !x86_pmu.pebs)
+ return 0;
+
+ get_online_cpus();
+
+ for_each_possible_cpu(cpu) {
+ struct debug_store *ds;
+ void *buffer;
+ int max, thresh;
+
+ err = -ENOMEM;
+ ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+ if (unlikely(!ds))
+ break;
+ per_cpu(cpu_hw_events, cpu).ds = ds;
+
+ if (x86_pmu.bts) {
+ buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
+ if (unlikely(!buffer))
+ break;
+
+ max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+ thresh = max / 16;
+
+ ds->bts_buffer_base = (u64)(unsigned long)buffer;
+ ds->bts_index = ds->bts_buffer_base;
+ ds->bts_absolute_maximum = ds->bts_buffer_base +
+ max * BTS_RECORD_SIZE;
+ ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+ thresh * BTS_RECORD_SIZE;
+ }
+
+ if (x86_pmu.pebs) {
+ buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
+ if (unlikely(!buffer))
+ break;
+
+ max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+
+ ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+ ds->pebs_index = ds->pebs_buffer_base;
+ ds->pebs_absolute_maximum = ds->pebs_buffer_base +
+ max * x86_pmu.pebs_record_size;
+ /*
+ * Always use single record PEBS
+ */
+ ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
+ x86_pmu.pebs_record_size;
+ }
+
+ err = 0;
+ }
+
+ if (err)
+ release_ds_buffers();
+ else {
+ for_each_online_cpu(cpu)
+ init_debug_store_on_cpu(cpu);
+ }
+
+ put_online_cpus();
+
+ return err;
+}
+
+/*
+ * BTS
+ */
+
+static struct event_constraint bts_constraint =
+ EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
+
+static void intel_pmu_enable_bts(u64 config)
+{
+ unsigned long debugctlmsr;
+
+ debugctlmsr = get_debugctlmsr();
+
+ debugctlmsr |= DEBUGCTLMSR_TR;
+ debugctlmsr |= DEBUGCTLMSR_BTS;
+ debugctlmsr |= DEBUGCTLMSR_BTINT;
+
+ if (!(config & ARCH_PERFMON_EVENTSEL_OS))
+ debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
+
+ if (!(config & ARCH_PERFMON_EVENTSEL_USR))
+ debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
+
+ update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_disable_bts(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long debugctlmsr;
+
+ if (!cpuc->ds)
+ return;
+
+ debugctlmsr = get_debugctlmsr();
+
+ debugctlmsr &=
+ ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
+ DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
+
+ update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_drain_bts_buffer(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct bts_record {
+ u64 from;
+ u64 to;
+ u64 flags;
+ };
+ struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
+ struct bts_record *at, *top;
+ struct perf_output_handle handle;
+ struct perf_event_header header;
+ struct perf_sample_data data;
+ struct pt_regs regs;
+
+ if (!event)
+ return;
+
+ if (!ds)
+ return;
+
+ at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
+ top = (struct bts_record *)(unsigned long)ds->bts_index;
+
+ if (top <= at)
+ return;
+
+ ds->bts_index = ds->bts_buffer_base;
+
+ perf_sample_data_init(&data, 0);
+ data.period = event->hw.last_period;
+ regs.ip = 0;
+
+ /*
+ * Prepare a generic sample, i.e. fill in the invariant fields.
+ * We will overwrite the from and to address before we output
+ * the sample.
+ */
+ perf_prepare_sample(&header, &data, event, &regs);
+
+ if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
+ return;
+
+ for (; at < top; at++) {
+ data.ip = at->from;
+ data.addr = at->to;
+
+ perf_output_sample(&handle, &header, &data, event);
+ }
+
+ perf_output_end(&handle);
+
+ /* There's new data available. */
+ event->hw.interrupts++;
+ event->pending_kill = POLL_IN;
+}
+
+/*
+ * PEBS
+ */
+
+static struct event_constraint intel_core_pebs_events[] = {
+ PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
+ PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
+ PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
+ PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_nehalem_pebs_events[] = {
+ PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
+ PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
+ PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
+ PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint *
+intel_pebs_constraints(struct perf_event *event)
+{
+ struct event_constraint *c;
+
+ if (!event->attr.precise_ip)
+ return NULL;
+
+ if (x86_pmu.pebs_constraints) {
+ for_each_event_constraint(c, x86_pmu.pebs_constraints) {
+ if ((event->hw.config & c->cmask) == c->code)
+ return c;
+ }
+ }
+
+ return &emptyconstraint;
+}
+
+static void intel_pmu_pebs_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
+
+ cpuc->pebs_enabled |= 1ULL << hwc->idx;
+ WARN_ON_ONCE(cpuc->enabled);
+
+ if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
+ intel_pmu_lbr_enable(event);
+}
+
+static void intel_pmu_pebs_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
+ if (cpuc->enabled)
+ wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+
+ hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
+
+ if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
+ intel_pmu_lbr_disable(event);
+}
+
+static void intel_pmu_pebs_enable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (cpuc->pebs_enabled)
+ wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+}
+
+static void intel_pmu_pebs_disable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (cpuc->pebs_enabled)
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+}
+
+#include <asm/insn.h>
+
+static inline bool kernel_ip(unsigned long ip)
+{
+#ifdef CONFIG_X86_32
+ return ip > PAGE_OFFSET;
+#else
+ return (long)ip < 0;
+#endif
+}
+
+static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long from = cpuc->lbr_entries[0].from;
+ unsigned long old_to, to = cpuc->lbr_entries[0].to;
+ unsigned long ip = regs->ip;
+
+ /*
+ * We don't need to fixup if the PEBS assist is fault like
+ */
+ if (!x86_pmu.intel_cap.pebs_trap)
+ return 1;
+
+ /*
+ * No LBR entry, no basic block, no rewinding
+ */
+ if (!cpuc->lbr_stack.nr || !from || !to)
+ return 0;
+
+ /*
+ * Basic blocks should never cross user/kernel boundaries
+ */
+ if (kernel_ip(ip) != kernel_ip(to))
+ return 0;
+
+ /*
+ * unsigned math, either ip is before the start (impossible) or
+ * the basic block is larger than 1 page (sanity)
+ */
+ if ((ip - to) > PAGE_SIZE)
+ return 0;
+
+ /*
+ * We sampled a branch insn, rewind using the LBR stack
+ */
+ if (ip == to) {
+ regs->ip = from;
+ return 1;
+ }
+
+ do {
+ struct insn insn;
+ u8 buf[MAX_INSN_SIZE];
+ void *kaddr;
+
+ old_to = to;
+ if (!kernel_ip(ip)) {
+ int bytes, size = MAX_INSN_SIZE;
+
+ bytes = copy_from_user_nmi(buf, (void __user *)to, size);
+ if (bytes != size)
+ return 0;
+
+ kaddr = buf;
+ } else
+ kaddr = (void *)to;
+
+ kernel_insn_init(&insn, kaddr);
+ insn_get_length(&insn);
+ to += insn.length;
+ } while (to < ip);
+
+ if (to == ip) {
+ regs->ip = old_to;
+ return 1;
+ }
+
+ /*
+ * Even though we decoded the basic block, the instruction stream
+ * never matched the given IP, either the TO or the IP got corrupted.
+ */
+ return 0;
+}
+
+static int intel_pmu_save_and_restart(struct perf_event *event);
+
+static void __intel_pmu_pebs_event(struct perf_event *event,
+ struct pt_regs *iregs, void *__pebs)
+{
+ /*
+ * We cast to pebs_record_core since that is a subset of
+ * both formats and we don't use the other fields in this
+ * routine.
+ */
+ struct pebs_record_core *pebs = __pebs;
+ struct perf_sample_data data;
+ struct pt_regs regs;
+
+ if (!intel_pmu_save_and_restart(event))
+ return;
+
+ perf_sample_data_init(&data, 0);
+ data.period = event->hw.last_period;
+
+ /*
+ * We use the interrupt regs as a base because the PEBS record
+ * does not contain a full regs set, specifically it seems to
+ * lack segment descriptors, which get used by things like
+ * user_mode().
+ *
+ * In the simple case fix up only the IP and BP,SP regs, for
+ * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
+ * A possible PERF_SAMPLE_REGS will have to transfer all regs.
+ */
+ regs = *iregs;
+ regs.ip = pebs->ip;
+ regs.bp = pebs->bp;
+ regs.sp = pebs->sp;
+
+ if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
+ regs.flags |= PERF_EFLAGS_EXACT;
+ else
+ regs.flags &= ~PERF_EFLAGS_EXACT;
+
+ if (perf_event_overflow(event, 1, &data, &regs))
+ x86_pmu_stop(event);
+}
+
+static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct perf_event *event = cpuc->events[0]; /* PMC0 only */
+ struct pebs_record_core *at, *top;
+ int n;
+
+ if (!ds || !x86_pmu.pebs)
+ return;
+
+ at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
+
+ /*
+ * Whatever else happens, drain the thing
+ */
+ ds->pebs_index = ds->pebs_buffer_base;
+
+ if (!test_bit(0, cpuc->active_mask))
+ return;
+
+ WARN_ON_ONCE(!event);
+
+ if (!event->attr.precise_ip)
+ return;
+
+ n = top - at;
+ if (n <= 0)
+ return;
+
+ /*
+ * Should not happen, we program the threshold at 1 and do not
+ * set a reset value.
+ */
+ WARN_ON_ONCE(n > 1);
+ at += n - 1;
+
+ __intel_pmu_pebs_event(event, iregs, at);
+}
+
+static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct pebs_record_nhm *at, *top;
+ struct perf_event *event = NULL;
+ u64 status = 0;
+ int bit, n;
+
+ if (!ds || !x86_pmu.pebs)
+ return;
+
+ at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
+
+ ds->pebs_index = ds->pebs_buffer_base;
+
+ n = top - at;
+ if (n <= 0)
+ return;
+
+ /*
+ * Should not happen, we program the threshold at 1 and do not
+ * set a reset value.
+ */
+ WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
+
+ for ( ; at < top; at++) {
+ for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
+ event = cpuc->events[bit];
+ if (!test_bit(bit, cpuc->active_mask))
+ continue;
+
+ WARN_ON_ONCE(!event);
+
+ if (!event->attr.precise_ip)
+ continue;
+
+ if (__test_and_set_bit(bit, (unsigned long *)&status))
+ continue;
+
+ break;
+ }
+
+ if (!event || bit >= MAX_PEBS_EVENTS)
+ continue;
+
+ __intel_pmu_pebs_event(event, iregs, at);
+ }
+}
+
+/*
+ * BTS, PEBS probe and setup
+ */
+
+static void intel_ds_init(void)
+{
+ /*
+ * No support for 32bit formats
+ */
+ if (!boot_cpu_has(X86_FEATURE_DTES64))
+ return;
+
+ x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
+ x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
+ if (x86_pmu.pebs) {
+ char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
+ int format = x86_pmu.intel_cap.pebs_format;
+
+ switch (format) {
+ case 0:
+ printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
+ x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
+ x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
+ x86_pmu.pebs_constraints = intel_core_pebs_events;
+ break;
+
+ case 1:
+ printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
+ x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
+ x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
+ x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
+ break;
+
+ default:
+ printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
+ x86_pmu.pebs = 0;
+ break;
+ }
+ }
+}
+
+#else /* CONFIG_CPU_SUP_INTEL */
+
+static int reserve_ds_buffers(void)
+{
+ return 0;
+}
+
+static void release_ds_buffers(void)
+{
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
new file mode 100644
index 000000000000..d202c1bece1a
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -0,0 +1,218 @@
+#ifdef CONFIG_CPU_SUP_INTEL
+
+enum {
+ LBR_FORMAT_32 = 0x00,
+ LBR_FORMAT_LIP = 0x01,
+ LBR_FORMAT_EIP = 0x02,
+ LBR_FORMAT_EIP_FLAGS = 0x03,
+};
+
+/*
+ * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
+ * otherwise it becomes near impossible to get a reliable stack.
+ */
+
+static void __intel_pmu_lbr_enable(void)
+{
+ u64 debugctl;
+
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+}
+
+static void __intel_pmu_lbr_disable(void)
+{
+ u64 debugctl;
+
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+}
+
+static void intel_pmu_lbr_reset_32(void)
+{
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++)
+ wrmsrl(x86_pmu.lbr_from + i, 0);
+}
+
+static void intel_pmu_lbr_reset_64(void)
+{
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ wrmsrl(x86_pmu.lbr_from + i, 0);
+ wrmsrl(x86_pmu.lbr_to + i, 0);
+ }
+}
+
+static void intel_pmu_lbr_reset(void)
+{
+ if (!x86_pmu.lbr_nr)
+ return;
+
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
+ intel_pmu_lbr_reset_32();
+ else
+ intel_pmu_lbr_reset_64();
+}
+
+static void intel_pmu_lbr_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (!x86_pmu.lbr_nr)
+ return;
+
+ WARN_ON_ONCE(cpuc->enabled);
+
+ /*
+ * Reset the LBR stack if we changed task context to
+ * avoid data leaks.
+ */
+
+ if (event->ctx->task && cpuc->lbr_context != event->ctx) {
+ intel_pmu_lbr_reset();
+ cpuc->lbr_context = event->ctx;
+ }
+
+ cpuc->lbr_users++;
+}
+
+static void intel_pmu_lbr_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (!x86_pmu.lbr_nr)
+ return;
+
+ cpuc->lbr_users--;
+ WARN_ON_ONCE(cpuc->lbr_users < 0);
+
+ if (cpuc->enabled && !cpuc->lbr_users)
+ __intel_pmu_lbr_disable();
+}
+
+static void intel_pmu_lbr_enable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (cpuc->lbr_users)
+ __intel_pmu_lbr_enable();
+}
+
+static void intel_pmu_lbr_disable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (cpuc->lbr_users)
+ __intel_pmu_lbr_disable();
+}
+
+static inline u64 intel_pmu_lbr_tos(void)
+{
+ u64 tos;
+
+ rdmsrl(x86_pmu.lbr_tos, tos);
+
+ return tos;
+}
+
+static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
+{
+ unsigned long mask = x86_pmu.lbr_nr - 1;
+ u64 tos = intel_pmu_lbr_tos();
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ unsigned long lbr_idx = (tos - i) & mask;
+ union {
+ struct {
+ u32 from;
+ u32 to;
+ };
+ u64 lbr;
+ } msr_lastbranch;
+
+ rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
+
+ cpuc->lbr_entries[i].from = msr_lastbranch.from;
+ cpuc->lbr_entries[i].to = msr_lastbranch.to;
+ cpuc->lbr_entries[i].flags = 0;
+ }
+ cpuc->lbr_stack.nr = i;
+}
+
+#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
+
+/*
+ * Due to lack of segmentation in Linux the effective address (offset)
+ * is the same as the linear address, allowing us to merge the LIP and EIP
+ * LBR formats.
+ */
+static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
+{
+ unsigned long mask = x86_pmu.lbr_nr - 1;
+ int lbr_format = x86_pmu.intel_cap.lbr_format;
+ u64 tos = intel_pmu_lbr_tos();
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ unsigned long lbr_idx = (tos - i) & mask;
+ u64 from, to, flags = 0;
+
+ rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
+ rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
+
+ if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
+ flags = !!(from & LBR_FROM_FLAG_MISPRED);
+ from = (u64)((((s64)from) << 1) >> 1);
+ }
+
+ cpuc->lbr_entries[i].from = from;
+ cpuc->lbr_entries[i].to = to;
+ cpuc->lbr_entries[i].flags = flags;
+ }
+ cpuc->lbr_stack.nr = i;
+}
+
+static void intel_pmu_lbr_read(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (!cpuc->lbr_users)
+ return;
+
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
+ intel_pmu_lbr_read_32(cpuc);
+ else
+ intel_pmu_lbr_read_64(cpuc);
+}
+
+static void intel_pmu_lbr_init_core(void)
+{
+ x86_pmu.lbr_nr = 4;
+ x86_pmu.lbr_tos = 0x01c9;
+ x86_pmu.lbr_from = 0x40;
+ x86_pmu.lbr_to = 0x60;
+}
+
+static void intel_pmu_lbr_init_nhm(void)
+{
+ x86_pmu.lbr_nr = 16;
+ x86_pmu.lbr_tos = 0x01c9;
+ x86_pmu.lbr_from = 0x680;
+ x86_pmu.lbr_to = 0x6c0;
+}
+
+static void intel_pmu_lbr_init_atom(void)
+{
+ x86_pmu.lbr_nr = 8;
+ x86_pmu.lbr_tos = 0x01c9;
+ x86_pmu.lbr_from = 0x40;
+ x86_pmu.lbr_to = 0x60;
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
new file mode 100644
index 000000000000..a603930271f3
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -0,0 +1,841 @@
+/*
+ * Netburst Perfomance Events (P4, old Xeon)
+ *
+ * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
+ * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#ifdef CONFIG_CPU_SUP_INTEL
+
+#include <asm/perf_event_p4.h>
+
+#define P4_CNTR_LIMIT 3
+/*
+ * array indices: 0,1 - HT threads, used with HT enabled cpu
+ */
+struct p4_event_bind {
+ unsigned int opcode; /* Event code and ESCR selector */
+ unsigned int escr_msr[2]; /* ESCR MSR for this event */
+ unsigned char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
+};
+
+struct p4_cache_event_bind {
+ unsigned int metric_pebs;
+ unsigned int metric_vert;
+};
+
+#define P4_GEN_CACHE_EVENT_BIND(name) \
+ [P4_CACHE__##name] = { \
+ .metric_pebs = P4_PEBS__##name, \
+ .metric_vert = P4_VERT__##name, \
+ }
+
+static struct p4_cache_event_bind p4_cache_event_bind_map[] = {
+ P4_GEN_CACHE_EVENT_BIND(1stl_cache_load_miss_retired),
+ P4_GEN_CACHE_EVENT_BIND(2ndl_cache_load_miss_retired),
+ P4_GEN_CACHE_EVENT_BIND(dtlb_load_miss_retired),
+ P4_GEN_CACHE_EVENT_BIND(dtlb_store_miss_retired),
+};
+
+/*
+ * Note that we don't use CCCR1 here, there is an
+ * exception for P4_BSQ_ALLOCATION but we just have
+ * no workaround
+ *
+ * consider this binding as resources which particular
+ * event may borrow, it doesn't contain EventMask,
+ * Tags and friends -- they are left to a caller
+ */
+static struct p4_event_bind p4_event_bind_map[] = {
+ [P4_EVENT_TC_DELIVER_MODE] = {
+ .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
+ .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_BPU_FETCH_REQUEST] = {
+ .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
+ .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_ITLB_REFERENCE] = {
+ .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
+ .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_MEMORY_CANCEL] = {
+ .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
+ .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_MEMORY_COMPLETE] = {
+ .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
+ .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_LOAD_PORT_REPLAY] = {
+ .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
+ .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_STORE_PORT_REPLAY] = {
+ .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
+ .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_MOB_LOAD_REPLAY] = {
+ .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
+ .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_PAGE_WALK_TYPE] = {
+ .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
+ .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_BSQ_CACHE_REFERENCE] = {
+ .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
+ .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_IOQ_ALLOCATION] = {
+ .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
+ .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
+ .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
+ .cntr = { {2, -1, -1}, {3, -1, -1} },
+ },
+ [P4_EVENT_FSB_DATA_ACTIVITY] = {
+ .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
+ .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
+ .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
+ .cntr = { {0, -1, -1}, {1, -1, -1} },
+ },
+ [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
+ .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
+ .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
+ .cntr = { {2, -1, -1}, {3, -1, -1} },
+ },
+ [P4_EVENT_SSE_INPUT_ASSIST] = {
+ .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_PACKED_SP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_PACKED_DP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_SCALAR_SP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_SCALAR_DP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_64BIT_MMX_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_128BIT_MMX_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_X87_FP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_TC_MISC] = {
+ .opcode = P4_OPCODE(P4_EVENT_TC_MISC),
+ .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_GLOBAL_POWER_EVENTS] = {
+ .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_TC_MS_XFER] = {
+ .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
+ .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_UOP_QUEUE_WRITES] = {
+ .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
+ .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
+ .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
+ .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_RETIRED_BRANCH_TYPE] = {
+ .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
+ .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_RESOURCE_STALL] = {
+ .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
+ .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_WC_BUFFER] = {
+ .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
+ .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_B2B_CYCLES] = {
+ .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_BNR] = {
+ .opcode = P4_OPCODE(P4_EVENT_BNR),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_SNOOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_SNOOP),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_RESPONSE] = {
+ .opcode = P4_OPCODE(P4_EVENT_RESPONSE),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_FRONT_END_EVENT] = {
+ .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_EXECUTION_EVENT] = {
+ .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_REPLAY_EVENT] = {
+ .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_INSTR_RETIRED] = {
+ .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
+ .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_UOPS_RETIRED] = {
+ .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
+ .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_UOP_TYPE] = {
+ .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
+ .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_BRANCH_RETIRED] = {
+ .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_MISPRED_BRANCH_RETIRED] = {
+ .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
+ .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_X87_ASSIST] = {
+ .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_MACHINE_CLEAR] = {
+ .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_INSTR_COMPLETED] = {
+ .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
+ .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+};
+
+#define P4_GEN_CACHE_EVENT(event, bit, cache_event) \
+ p4_config_pack_escr(P4_ESCR_EVENT(event) | \
+ P4_ESCR_EMASK_BIT(event, bit)) | \
+ p4_config_pack_cccr(cache_event | \
+ P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
+
+static __initconst const u64 p4_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
+ P4_CACHE__1stl_cache_load_miss_retired),
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
+ P4_CACHE__2ndl_cache_load_miss_retired),
+ },
+},
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
+ P4_CACHE__dtlb_load_miss_retired),
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
+ P4_CACHE__dtlb_store_miss_retired),
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT,
+ P4_CACHE__itlb_reference_hit),
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS,
+ P4_CACHE__itlb_reference_miss),
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
+ /* non-halted CPU clocks */
+ [PERF_COUNT_HW_CPU_CYCLES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
+
+ /*
+ * retired instructions
+ * in a sake of simplicity we don't use the FSB tagging
+ */
+ [PERF_COUNT_HW_INSTRUCTIONS] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)),
+
+ /* cache hits */
+ [PERF_COUNT_HW_CACHE_REFERENCES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)),
+
+ /* cache misses */
+ [PERF_COUNT_HW_CACHE_MISSES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)),
+
+ /* branch instructions retired */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)),
+
+ /* mispredicted branches retired */
+ [PERF_COUNT_HW_BRANCH_MISSES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)),
+
+ /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
+ [PERF_COUNT_HW_BUS_CYCLES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) |
+ p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
+};
+
+static struct p4_event_bind *p4_config_get_bind(u64 config)
+{
+ unsigned int evnt = p4_config_unpack_event(config);
+ struct p4_event_bind *bind = NULL;
+
+ if (evnt < ARRAY_SIZE(p4_event_bind_map))
+ bind = &p4_event_bind_map[evnt];
+
+ return bind;
+}
+
+static u64 p4_pmu_event_map(int hw_event)
+{
+ struct p4_event_bind *bind;
+ unsigned int esel;
+ u64 config;
+
+ config = p4_general_events[hw_event];
+ bind = p4_config_get_bind(config);
+ esel = P4_OPCODE_ESEL(bind->opcode);
+ config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
+
+ return config;
+}
+
+static int p4_hw_config(struct perf_event *event)
+{
+ int cpu = get_cpu();
+ int rc = 0;
+ unsigned int evnt;
+ u32 escr, cccr;
+
+ /*
+ * the reason we use cpu that early is that: if we get scheduled
+ * first time on the same cpu -- we will not need swap thread
+ * specific flags in config (and will save some cpu cycles)
+ */
+
+ cccr = p4_default_cccr_conf(cpu);
+ escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
+ event->attr.exclude_user);
+ event->hw.config = p4_config_pack_escr(escr) |
+ p4_config_pack_cccr(cccr);
+
+ if (p4_ht_active() && p4_ht_thread(cpu))
+ event->hw.config = p4_set_ht_bit(event->hw.config);
+
+ if (event->attr.type == PERF_TYPE_RAW) {
+
+ /* user data may have out-of-bound event index */
+ evnt = p4_config_unpack_event(event->attr.config);
+ if (evnt >= ARRAY_SIZE(p4_event_bind_map)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * We don't control raw events so it's up to the caller
+ * to pass sane values (and we don't count the thread number
+ * on HT machine but allow HT-compatible specifics to be
+ * passed on)
+ *
+ * XXX: HT wide things should check perf_paranoid_cpu() &&
+ * CAP_SYS_ADMIN
+ */
+ event->hw.config |= event->attr.config &
+ (p4_config_pack_escr(P4_ESCR_MASK_HT) |
+ p4_config_pack_cccr(P4_CCCR_MASK_HT));
+ }
+
+ rc = x86_setup_perfctr(event);
+out:
+ put_cpu();
+ return rc;
+}
+
+static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
+{
+ unsigned long dummy;
+
+ rdmsrl(hwc->config_base + hwc->idx, dummy);
+ if (dummy & P4_CCCR_OVF) {
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+ ((u64)dummy) & ~P4_CCCR_OVF);
+ }
+}
+
+static inline void p4_pmu_disable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * If event gets disabled while counter is in overflowed
+ * state we need to clear P4_CCCR_OVF, otherwise interrupt get
+ * asserted again and again
+ */
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+ (u64)(p4_config_unpack_cccr(hwc->config)) &
+ ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
+}
+
+static void p4_pmu_disable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx;
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ p4_pmu_disable_event(event);
+ }
+}
+
+static void p4_pmu_enable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int thread = p4_ht_config_thread(hwc->config);
+ u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
+ unsigned int idx = p4_config_unpack_event(hwc->config);
+ unsigned int idx_cache = p4_config_unpack_cache_event(hwc->config);
+ struct p4_event_bind *bind;
+ struct p4_cache_event_bind *bind_cache;
+ u64 escr_addr, cccr;
+
+ bind = &p4_event_bind_map[idx];
+ escr_addr = (u64)bind->escr_msr[thread];
+
+ /*
+ * - we dont support cascaded counters yet
+ * - and counter 1 is broken (erratum)
+ */
+ WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
+ WARN_ON_ONCE(hwc->idx == 1);
+
+ /* we need a real Event value */
+ escr_conf &= ~P4_ESCR_EVENT_MASK;
+ escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode));
+
+ cccr = p4_config_unpack_cccr(hwc->config);
+
+ /*
+ * it could be Cache event so that we need to
+ * set metrics into additional MSRs
+ */
+ BUILD_BUG_ON(P4_CACHE__MAX > P4_CCCR_CACHE_OPS_MASK);
+ if (idx_cache > P4_CACHE__NONE &&
+ idx_cache < ARRAY_SIZE(p4_cache_event_bind_map)) {
+ bind_cache = &p4_cache_event_bind_map[idx_cache];
+ (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind_cache->metric_pebs);
+ (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind_cache->metric_vert);
+ }
+
+ (void)checking_wrmsrl(escr_addr, escr_conf);
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+ (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
+}
+
+static void p4_pmu_enable_all(int added)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx;
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ p4_pmu_enable_event(event);
+ }
+}
+
+static int p4_pmu_handle_irq(struct pt_regs *regs)
+{
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ int idx, handled = 0;
+ u64 val;
+
+ data.addr = 0;
+ data.raw = NULL;
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ event = cpuc->events[idx];
+ hwc = &event->hw;
+
+ WARN_ON_ONCE(hwc->idx != idx);
+
+ /*
+ * FIXME: Redundant call, actually not needed
+ * but just to check if we're screwed
+ */
+ p4_pmu_clear_cccr_ovf(hwc);
+
+ val = x86_perf_event_update(event);
+ if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
+ continue;
+
+ /*
+ * event overflow
+ */
+ handled = 1;
+ data.period = event->hw.last_period;
+
+ if (!x86_perf_event_set_period(event))
+ continue;
+ if (perf_event_overflow(event, 1, &data, regs))
+ p4_pmu_disable_event(event);
+ }
+
+ if (handled) {
+ /* p4 quirk: unmask it again */
+ apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
+ inc_irq_stat(apic_perf_irqs);
+ }
+
+ return handled;
+}
+
+/*
+ * swap thread specific fields according to a thread
+ * we are going to run on
+ */
+static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
+{
+ u32 escr, cccr;
+
+ /*
+ * we either lucky and continue on same cpu or no HT support
+ */
+ if (!p4_should_swap_ts(hwc->config, cpu))
+ return;
+
+ /*
+ * the event is migrated from an another logical
+ * cpu, so we need to swap thread specific flags
+ */
+
+ escr = p4_config_unpack_escr(hwc->config);
+ cccr = p4_config_unpack_cccr(hwc->config);
+
+ if (p4_ht_thread(cpu)) {
+ cccr &= ~P4_CCCR_OVF_PMI_T0;
+ cccr |= P4_CCCR_OVF_PMI_T1;
+ if (escr & P4_ESCR_T0_OS) {
+ escr &= ~P4_ESCR_T0_OS;
+ escr |= P4_ESCR_T1_OS;
+ }
+ if (escr & P4_ESCR_T0_USR) {
+ escr &= ~P4_ESCR_T0_USR;
+ escr |= P4_ESCR_T1_USR;
+ }
+ hwc->config = p4_config_pack_escr(escr);
+ hwc->config |= p4_config_pack_cccr(cccr);
+ hwc->config |= P4_CONFIG_HT;
+ } else {
+ cccr &= ~P4_CCCR_OVF_PMI_T1;
+ cccr |= P4_CCCR_OVF_PMI_T0;
+ if (escr & P4_ESCR_T1_OS) {
+ escr &= ~P4_ESCR_T1_OS;
+ escr |= P4_ESCR_T0_OS;
+ }
+ if (escr & P4_ESCR_T1_USR) {
+ escr &= ~P4_ESCR_T1_USR;
+ escr |= P4_ESCR_T0_USR;
+ }
+ hwc->config = p4_config_pack_escr(escr);
+ hwc->config |= p4_config_pack_cccr(cccr);
+ hwc->config &= ~P4_CONFIG_HT;
+ }
+}
+
+/* ESCRs are not sequential in memory so we need a map */
+static const unsigned int p4_escr_map[ARCH_P4_TOTAL_ESCR] = {
+ MSR_P4_ALF_ESCR0, /* 0 */
+ MSR_P4_ALF_ESCR1, /* 1 */
+ MSR_P4_BPU_ESCR0, /* 2 */
+ MSR_P4_BPU_ESCR1, /* 3 */
+ MSR_P4_BSU_ESCR0, /* 4 */
+ MSR_P4_BSU_ESCR1, /* 5 */
+ MSR_P4_CRU_ESCR0, /* 6 */
+ MSR_P4_CRU_ESCR1, /* 7 */
+ MSR_P4_CRU_ESCR2, /* 8 */
+ MSR_P4_CRU_ESCR3, /* 9 */
+ MSR_P4_CRU_ESCR4, /* 10 */
+ MSR_P4_CRU_ESCR5, /* 11 */
+ MSR_P4_DAC_ESCR0, /* 12 */
+ MSR_P4_DAC_ESCR1, /* 13 */
+ MSR_P4_FIRM_ESCR0, /* 14 */
+ MSR_P4_FIRM_ESCR1, /* 15 */
+ MSR_P4_FLAME_ESCR0, /* 16 */
+ MSR_P4_FLAME_ESCR1, /* 17 */
+ MSR_P4_FSB_ESCR0, /* 18 */
+ MSR_P4_FSB_ESCR1, /* 19 */
+ MSR_P4_IQ_ESCR0, /* 20 */
+ MSR_P4_IQ_ESCR1, /* 21 */
+ MSR_P4_IS_ESCR0, /* 22 */
+ MSR_P4_IS_ESCR1, /* 23 */
+ MSR_P4_ITLB_ESCR0, /* 24 */
+ MSR_P4_ITLB_ESCR1, /* 25 */
+ MSR_P4_IX_ESCR0, /* 26 */
+ MSR_P4_IX_ESCR1, /* 27 */
+ MSR_P4_MOB_ESCR0, /* 28 */
+ MSR_P4_MOB_ESCR1, /* 29 */
+ MSR_P4_MS_ESCR0, /* 30 */
+ MSR_P4_MS_ESCR1, /* 31 */
+ MSR_P4_PMH_ESCR0, /* 32 */
+ MSR_P4_PMH_ESCR1, /* 33 */
+ MSR_P4_RAT_ESCR0, /* 34 */
+ MSR_P4_RAT_ESCR1, /* 35 */
+ MSR_P4_SAAT_ESCR0, /* 36 */
+ MSR_P4_SAAT_ESCR1, /* 37 */
+ MSR_P4_SSU_ESCR0, /* 38 */
+ MSR_P4_SSU_ESCR1, /* 39 */
+ MSR_P4_TBPU_ESCR0, /* 40 */
+ MSR_P4_TBPU_ESCR1, /* 41 */
+ MSR_P4_TC_ESCR0, /* 42 */
+ MSR_P4_TC_ESCR1, /* 43 */
+ MSR_P4_U2L_ESCR0, /* 44 */
+ MSR_P4_U2L_ESCR1, /* 45 */
+};
+
+static int p4_get_escr_idx(unsigned int addr)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(p4_escr_map); i++) {
+ if (addr == p4_escr_map[i])
+ return i;
+ }
+
+ return -1;
+}
+
+static int p4_next_cntr(int thread, unsigned long *used_mask,
+ struct p4_event_bind *bind)
+{
+ int i = 0, j;
+
+ for (i = 0; i < P4_CNTR_LIMIT; i++) {
+ j = bind->cntr[thread][i++];
+ if (j == -1 || !test_bit(j, used_mask))
+ return j;
+ }
+
+ return -1;
+}
+
+static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
+{
+ unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long escr_mask[BITS_TO_LONGS(ARCH_P4_TOTAL_ESCR)];
+ int cpu = raw_smp_processor_id();
+ struct hw_perf_event *hwc;
+ struct p4_event_bind *bind;
+ unsigned int i, thread, num;
+ int cntr_idx, escr_idx;
+
+ bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+ bitmap_zero(escr_mask, ARCH_P4_TOTAL_ESCR);
+
+ for (i = 0, num = n; i < n; i++, num--) {
+
+ hwc = &cpuc->event_list[i]->hw;
+ thread = p4_ht_thread(cpu);
+ bind = p4_config_get_bind(hwc->config);
+ escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
+
+ if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
+ cntr_idx = hwc->idx;
+ if (assign)
+ assign[i] = hwc->idx;
+ goto reserve;
+ }
+
+ cntr_idx = p4_next_cntr(thread, used_mask, bind);
+ if (cntr_idx == -1 || test_bit(escr_idx, escr_mask))
+ goto done;
+
+ p4_pmu_swap_config_ts(hwc, cpu);
+ if (assign)
+ assign[i] = cntr_idx;
+reserve:
+ set_bit(cntr_idx, used_mask);
+ set_bit(escr_idx, escr_mask);
+ }
+
+done:
+ return num ? -ENOSPC : 0;
+}
+
+static __initconst const struct x86_pmu p4_pmu = {
+ .name = "Netburst P4/Xeon",
+ .handle_irq = p4_pmu_handle_irq,
+ .disable_all = p4_pmu_disable_all,
+ .enable_all = p4_pmu_enable_all,
+ .enable = p4_pmu_enable_event,
+ .disable = p4_pmu_disable_event,
+ .eventsel = MSR_P4_BPU_CCCR0,
+ .perfctr = MSR_P4_BPU_PERFCTR0,
+ .event_map = p4_pmu_event_map,
+ .max_events = ARRAY_SIZE(p4_general_events),
+ .get_event_constraints = x86_get_event_constraints,
+ /*
+ * IF HT disabled we may need to use all
+ * ARCH_P4_MAX_CCCR counters simulaneously
+ * though leave it restricted at moment assuming
+ * HT is on
+ */
+ .num_counters = ARCH_P4_MAX_CCCR,
+ .apic = 1,
+ .cntval_bits = 40,
+ .cntval_mask = (1ULL << 40) - 1,
+ .max_period = (1ULL << 39) - 1,
+ .hw_config = p4_hw_config,
+ .schedule_events = p4_pmu_schedule_events,
+};
+
+static __init int p4_pmu_init(void)
+{
+ unsigned int low, high;
+
+ /* If we get stripped -- indexig fails */
+ BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
+
+ rdmsr(MSR_IA32_MISC_ENABLE, low, high);
+ if (!(low & (1 << 7))) {
+ pr_cont("unsupported Netburst CPU model %d ",
+ boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+
+ memcpy(hw_cache_event_ids, p4_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ pr_cont("Netburst events, ");
+
+ x86_pmu = p4_pmu;
+
+ return 0;
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index a330485d14da..34ba07be2cda 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -27,24 +27,6 @@ static u64 p6_pmu_event_map(int hw_event)
*/
#define P6_NOP_EVENT 0x0000002EULL
-static u64 p6_pmu_raw_event(u64 hw_event)
-{
-#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
-#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
-#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
-#define P6_EVNTSEL_INV_MASK 0x00800000ULL
-#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
-
-#define P6_EVNTSEL_MASK \
- (P6_EVNTSEL_EVENT_MASK | \
- P6_EVNTSEL_UNIT_MASK | \
- P6_EVNTSEL_EDGE_MASK | \
- P6_EVNTSEL_INV_MASK | \
- P6_EVNTSEL_REG_MASK)
-
- return hw_event & P6_EVNTSEL_MASK;
-}
-
static struct event_constraint p6_event_constraints[] =
{
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
@@ -66,7 +48,7 @@ static void p6_pmu_disable_all(void)
wrmsrl(MSR_P6_EVNTSEL0, val);
}
-static void p6_pmu_enable_all(void)
+static void p6_pmu_enable_all(int added)
{
unsigned long val;
@@ -102,22 +84,23 @@ static void p6_pmu_enable_event(struct perf_event *event)
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
}
-static __initconst struct x86_pmu p6_pmu = {
+static __initconst const struct x86_pmu p6_pmu = {
.name = "p6",
.handle_irq = x86_pmu_handle_irq,
.disable_all = p6_pmu_disable_all,
.enable_all = p6_pmu_enable_all,
.enable = p6_pmu_enable_event,
.disable = p6_pmu_disable_event,
+ .hw_config = x86_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
.eventsel = MSR_P6_EVNTSEL0,
.perfctr = MSR_P6_PERFCTR0,
.event_map = p6_pmu_event_map,
- .raw_event = p6_pmu_raw_event,
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
.apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
- .num_events = 2,
+ .num_counters = 2,
/*
* Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the
@@ -125,8 +108,8 @@ static __initconst struct x86_pmu p6_pmu = {
*
* See IA-32 Intel Architecture Software developer manual Vol 3B
*/
- .event_bits = 32,
- .event_mask = (1ULL << 32) - 1,
+ .cntval_bits = 32,
+ .cntval_mask = (1ULL << 32) - 1,
.get_event_constraints = x86_get_event_constraints,
.event_constraints = p6_event_constraints,
};
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index dfdb4dba2320..b9d1ff588445 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -24,8 +24,8 @@
#include <linux/dmi.h>
#include <linux/module.h>
#include <asm/div64.h>
-#include <asm/vmware.h>
#include <asm/x86_init.h>
+#include <asm/hypervisor.h>
#define CPUID_VMWARE_INFO_LEAF 0x40000000
#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@@ -65,7 +65,7 @@ static unsigned long vmware_get_tsc_khz(void)
return tsc_hz;
}
-void __init vmware_platform_setup(void)
+static void __init vmware_platform_setup(void)
{
uint32_t eax, ebx, ecx, edx;
@@ -83,26 +83,22 @@ void __init vmware_platform_setup(void)
* serial key should be enough, as this will always have a VMware
* specific string when running under VMware hypervisor.
*/
-int vmware_platform(void)
+static bool __init vmware_platform(void)
{
if (cpu_has_hypervisor) {
- unsigned int eax, ebx, ecx, edx;
- char hyper_vendor_id[13];
-
- cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &ebx, &ecx, &edx);
- memcpy(hyper_vendor_id + 0, &ebx, 4);
- memcpy(hyper_vendor_id + 4, &ecx, 4);
- memcpy(hyper_vendor_id + 8, &edx, 4);
- hyper_vendor_id[12] = '\0';
- if (!strcmp(hyper_vendor_id, "VMwareVMware"))
- return 1;
+ unsigned int eax;
+ unsigned int hyper_vendor_id[3];
+
+ cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
+ &hyper_vendor_id[1], &hyper_vendor_id[2]);
+ if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
+ return true;
} else if (dmi_available && dmi_name_in_serial("VMware") &&
__vmware_platform())
- return 1;
+ return true;
- return 0;
+ return false;
}
-EXPORT_SYMBOL(vmware_platform);
/*
* VMware hypervisor takes care of exporting a reliable TSC to the guest.
@@ -116,8 +112,16 @@ EXPORT_SYMBOL(vmware_platform);
* so that the kernel could just trust the hypervisor with providing a
* reliable virtual TSC that is suitable for timekeeping.
*/
-void __cpuinit vmware_set_feature_bits(struct cpuinfo_x86 *c)
+static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
}
+
+const __refconst struct hypervisor_x86 x86_hyper_vmware = {
+ .name = "VMware",
+ .detect = vmware_platform,
+ .set_cpu_features = vmware_set_cpu_features,
+ .init_platform = vmware_platform_setup,
+};
+EXPORT_SYMBOL(x86_hyper_vmware);
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
deleted file mode 100644
index 1c47390dd0e5..000000000000
--- a/arch/x86/kernel/ds.c
+++ /dev/null
@@ -1,1437 +0,0 @@
-/*
- * Debug Store support
- *
- * This provides a low-level interface to the hardware's Debug Store
- * feature that is used for branch trace store (BTS) and
- * precise-event based sampling (PEBS).
- *
- * It manages:
- * - DS and BTS hardware configuration
- * - buffer overflow handling (to be done)
- * - buffer access
- *
- * It does not do:
- * - security checking (is the caller allowed to trace the task)
- * - buffer allocation (memory accounting)
- *
- *
- * Copyright (C) 2007-2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/trace_clock.h>
-
-#include <asm/ds.h>
-
-#include "ds_selftest.h"
-
-/*
- * The configuration for a particular DS hardware implementation:
- */
-struct ds_configuration {
- /* The name of the configuration: */
- const char *name;
-
- /* The size of pointer-typed fields in DS, BTS, and PEBS: */
- unsigned char sizeof_ptr_field;
-
- /* The size of a BTS/PEBS record in bytes: */
- unsigned char sizeof_rec[2];
-
- /* The number of pebs counter reset values in the DS structure. */
- unsigned char nr_counter_reset;
-
- /* Control bit-masks indexed by enum ds_feature: */
- unsigned long ctl[dsf_ctl_max];
-};
-static struct ds_configuration ds_cfg __read_mostly;
-
-
-/* Maximal size of a DS configuration: */
-#define MAX_SIZEOF_DS 0x80
-
-/* Maximal size of a BTS record: */
-#define MAX_SIZEOF_BTS (3 * 8)
-
-/* BTS and PEBS buffer alignment: */
-#define DS_ALIGNMENT (1 << 3)
-
-/* Number of buffer pointers in DS: */
-#define NUM_DS_PTR_FIELDS 8
-
-/* Size of a pebs reset value in DS: */
-#define PEBS_RESET_FIELD_SIZE 8
-
-/* Mask of control bits in the DS MSR register: */
-#define BTS_CONTROL \
- ( ds_cfg.ctl[dsf_bts] | \
- ds_cfg.ctl[dsf_bts_kernel] | \
- ds_cfg.ctl[dsf_bts_user] | \
- ds_cfg.ctl[dsf_bts_overflow] )
-
-/*
- * A BTS or PEBS tracer.
- *
- * This holds the configuration of the tracer and serves as a handle
- * to identify tracers.
- */
-struct ds_tracer {
- /* The DS context (partially) owned by this tracer. */
- struct ds_context *context;
- /* The buffer provided on ds_request() and its size in bytes. */
- void *buffer;
- size_t size;
-};
-
-struct bts_tracer {
- /* The common DS part: */
- struct ds_tracer ds;
-
- /* The trace including the DS configuration: */
- struct bts_trace trace;
-
- /* Buffer overflow notification function: */
- bts_ovfl_callback_t ovfl;
-
- /* Active flags affecting trace collection. */
- unsigned int flags;
-};
-
-struct pebs_tracer {
- /* The common DS part: */
- struct ds_tracer ds;
-
- /* The trace including the DS configuration: */
- struct pebs_trace trace;
-
- /* Buffer overflow notification function: */
- pebs_ovfl_callback_t ovfl;
-};
-
-/*
- * Debug Store (DS) save area configuration (see Intel64 and IA32
- * Architectures Software Developer's Manual, section 18.5)
- *
- * The DS configuration consists of the following fields; different
- * architetures vary in the size of those fields.
- *
- * - double-word aligned base linear address of the BTS buffer
- * - write pointer into the BTS buffer
- * - end linear address of the BTS buffer (one byte beyond the end of
- * the buffer)
- * - interrupt pointer into BTS buffer
- * (interrupt occurs when write pointer passes interrupt pointer)
- * - double-word aligned base linear address of the PEBS buffer
- * - write pointer into the PEBS buffer
- * - end linear address of the PEBS buffer (one byte beyond the end of
- * the buffer)
- * - interrupt pointer into PEBS buffer
- * (interrupt occurs when write pointer passes interrupt pointer)
- * - value to which counter is reset following counter overflow
- *
- * Later architectures use 64bit pointers throughout, whereas earlier
- * architectures use 32bit pointers in 32bit mode.
- *
- *
- * We compute the base address for the first 8 fields based on:
- * - the field size stored in the DS configuration
- * - the relative field position
- * - an offset giving the start of the respective region
- *
- * This offset is further used to index various arrays holding
- * information for BTS and PEBS at the respective index.
- *
- * On later 32bit processors, we only access the lower 32bit of the
- * 64bit pointer fields. The upper halves will be zeroed out.
- */
-
-enum ds_field {
- ds_buffer_base = 0,
- ds_index,
- ds_absolute_maximum,
- ds_interrupt_threshold,
-};
-
-enum ds_qualifier {
- ds_bts = 0,
- ds_pebs
-};
-
-static inline unsigned long
-ds_get(const unsigned char *base, enum ds_qualifier qual, enum ds_field field)
-{
- base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
- return *(unsigned long *)base;
-}
-
-static inline void
-ds_set(unsigned char *base, enum ds_qualifier qual, enum ds_field field,
- unsigned long value)
-{
- base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
- (*(unsigned long *)base) = value;
-}
-
-
-/*
- * Locking is done only for allocating BTS or PEBS resources.
- */
-static DEFINE_SPINLOCK(ds_lock);
-
-/*
- * We either support (system-wide) per-cpu or per-thread allocation.
- * We distinguish the two based on the task_struct pointer, where a
- * NULL pointer indicates per-cpu allocation for the current cpu.
- *
- * Allocations are use-counted. As soon as resources are allocated,
- * further allocations must be of the same type (per-cpu or
- * per-thread). We model this by counting allocations (i.e. the number
- * of tracers of a certain type) for one type negatively:
- * =0 no tracers
- * >0 number of per-thread tracers
- * <0 number of per-cpu tracers
- *
- * Tracers essentially gives the number of ds contexts for a certain
- * type of allocation.
- */
-static atomic_t tracers = ATOMIC_INIT(0);
-
-static inline int get_tracer(struct task_struct *task)
-{
- int error;
-
- spin_lock_irq(&ds_lock);
-
- if (task) {
- error = -EPERM;
- if (atomic_read(&tracers) < 0)
- goto out;
- atomic_inc(&tracers);
- } else {
- error = -EPERM;
- if (atomic_read(&tracers) > 0)
- goto out;
- atomic_dec(&tracers);
- }
-
- error = 0;
-out:
- spin_unlock_irq(&ds_lock);
- return error;
-}
-
-static inline void put_tracer(struct task_struct *task)
-{
- if (task)
- atomic_dec(&tracers);
- else
- atomic_inc(&tracers);
-}
-
-/*
- * The DS context is either attached to a thread or to a cpu:
- * - in the former case, the thread_struct contains a pointer to the
- * attached context.
- * - in the latter case, we use a static array of per-cpu context
- * pointers.
- *
- * Contexts are use-counted. They are allocated on first access and
- * deallocated when the last user puts the context.
- */
-struct ds_context {
- /* The DS configuration; goes into MSR_IA32_DS_AREA: */
- unsigned char ds[MAX_SIZEOF_DS];
-
- /* The owner of the BTS and PEBS configuration, respectively: */
- struct bts_tracer *bts_master;
- struct pebs_tracer *pebs_master;
-
- /* Use count: */
- unsigned long count;
-
- /* Pointer to the context pointer field: */
- struct ds_context **this;
-
- /* The traced task; NULL for cpu tracing: */
- struct task_struct *task;
-
- /* The traced cpu; only valid if task is NULL: */
- int cpu;
-};
-
-static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
-
-
-static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
-{
- struct ds_context **p_context =
- (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
- struct ds_context *context = NULL;
- struct ds_context *new_context = NULL;
-
- /* Chances are small that we already have a context. */
- new_context = kzalloc(sizeof(*new_context), GFP_KERNEL);
- if (!new_context)
- return NULL;
-
- spin_lock_irq(&ds_lock);
-
- context = *p_context;
- if (likely(!context)) {
- context = new_context;
-
- context->this = p_context;
- context->task = task;
- context->cpu = cpu;
- context->count = 0;
-
- *p_context = context;
- }
-
- context->count++;
-
- spin_unlock_irq(&ds_lock);
-
- if (context != new_context)
- kfree(new_context);
-
- return context;
-}
-
-static void ds_put_context(struct ds_context *context)
-{
- struct task_struct *task;
- unsigned long irq;
-
- if (!context)
- return;
-
- spin_lock_irqsave(&ds_lock, irq);
-
- if (--context->count) {
- spin_unlock_irqrestore(&ds_lock, irq);
- return;
- }
-
- *(context->this) = NULL;
-
- task = context->task;
-
- if (task)
- clear_tsk_thread_flag(task, TIF_DS_AREA_MSR);
-
- /*
- * We leave the (now dangling) pointer to the DS configuration in
- * the DS_AREA msr. This is as good or as bad as replacing it with
- * NULL - the hardware would crash if we enabled tracing.
- *
- * This saves us some problems with having to write an msr on a
- * different cpu while preventing others from doing the same for the
- * next context for that same cpu.
- */
-
- spin_unlock_irqrestore(&ds_lock, irq);
-
- /* The context might still be in use for context switching. */
- if (task && (task != current))
- wait_task_context_switch(task);
-
- kfree(context);
-}
-
-static void ds_install_ds_area(struct ds_context *context)
-{
- unsigned long ds;
-
- ds = (unsigned long)context->ds;
-
- /*
- * There is a race between the bts master and the pebs master.
- *
- * The thread/cpu access is synchronized via get/put_cpu() for
- * task tracing and via wrmsr_on_cpu for cpu tracing.
- *
- * If bts and pebs are collected for the same task or same cpu,
- * the same confiuration is written twice.
- */
- if (context->task) {
- get_cpu();
- if (context->task == current)
- wrmsrl(MSR_IA32_DS_AREA, ds);
- set_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
- put_cpu();
- } else
- wrmsr_on_cpu(context->cpu, MSR_IA32_DS_AREA,
- (u32)((u64)ds), (u32)((u64)ds >> 32));
-}
-
-/*
- * Call the tracer's callback on a buffer overflow.
- *
- * context: the ds context
- * qual: the buffer type
- */
-static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
-{
- switch (qual) {
- case ds_bts:
- if (context->bts_master &&
- context->bts_master->ovfl)
- context->bts_master->ovfl(context->bts_master);
- break;
- case ds_pebs:
- if (context->pebs_master &&
- context->pebs_master->ovfl)
- context->pebs_master->ovfl(context->pebs_master);
- break;
- }
-}
-
-
-/*
- * Write raw data into the BTS or PEBS buffer.
- *
- * The remainder of any partially written record is zeroed out.
- *
- * context: the DS context
- * qual: the buffer type
- * record: the data to write
- * size: the size of the data
- */
-static int ds_write(struct ds_context *context, enum ds_qualifier qual,
- const void *record, size_t size)
-{
- int bytes_written = 0;
-
- if (!record)
- return -EINVAL;
-
- while (size) {
- unsigned long base, index, end, write_end, int_th;
- unsigned long write_size, adj_write_size;
-
- /*
- * Write as much as possible without producing an
- * overflow interrupt.
- *
- * Interrupt_threshold must either be
- * - bigger than absolute_maximum or
- * - point to a record between buffer_base and absolute_maximum
- *
- * Index points to a valid record.
- */
- base = ds_get(context->ds, qual, ds_buffer_base);
- index = ds_get(context->ds, qual, ds_index);
- end = ds_get(context->ds, qual, ds_absolute_maximum);
- int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
-
- write_end = min(end, int_th);
-
- /*
- * If we are already beyond the interrupt threshold,
- * we fill the entire buffer.
- */
- if (write_end <= index)
- write_end = end;
-
- if (write_end <= index)
- break;
-
- write_size = min((unsigned long) size, write_end - index);
- memcpy((void *)index, record, write_size);
-
- record = (const char *)record + write_size;
- size -= write_size;
- bytes_written += write_size;
-
- adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
- adj_write_size *= ds_cfg.sizeof_rec[qual];
-
- /* Zero out trailing bytes. */
- memset((char *)index + write_size, 0,
- adj_write_size - write_size);
- index += adj_write_size;
-
- if (index >= end)
- index = base;
- ds_set(context->ds, qual, ds_index, index);
-
- if (index >= int_th)
- ds_overflow(context, qual);
- }
-
- return bytes_written;
-}
-
-
-/*
- * Branch Trace Store (BTS) uses the following format. Different
- * architectures vary in the size of those fields.
- * - source linear address
- * - destination linear address
- * - flags
- *
- * Later architectures use 64bit pointers throughout, whereas earlier
- * architectures use 32bit pointers in 32bit mode.
- *
- * We compute the base address for the fields based on:
- * - the field size stored in the DS configuration
- * - the relative field position
- *
- * In order to store additional information in the BTS buffer, we use
- * a special source address to indicate that the record requires
- * special interpretation.
- *
- * Netburst indicated via a bit in the flags field whether the branch
- * was predicted; this is ignored.
- *
- * We use two levels of abstraction:
- * - the raw data level defined here
- * - an arch-independent level defined in ds.h
- */
-
-enum bts_field {
- bts_from,
- bts_to,
- bts_flags,
-
- bts_qual = bts_from,
- bts_clock = bts_to,
- bts_pid = bts_flags,
-
- bts_qual_mask = (bts_qual_max - 1),
- bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
-};
-
-static inline unsigned long bts_get(const char *base, unsigned long field)
-{
- base += (ds_cfg.sizeof_ptr_field * field);
- return *(unsigned long *)base;
-}
-
-static inline void bts_set(char *base, unsigned long field, unsigned long val)
-{
- base += (ds_cfg.sizeof_ptr_field * field);
- (*(unsigned long *)base) = val;
-}
-
-
-/*
- * The raw BTS data is architecture dependent.
- *
- * For higher-level users, we give an arch-independent view.
- * - ds.h defines struct bts_struct
- * - bts_read translates one raw bts record into a bts_struct
- * - bts_write translates one bts_struct into the raw format and
- * writes it into the top of the parameter tracer's buffer.
- *
- * return: bytes read/written on success; -Eerrno, otherwise
- */
-static int
-bts_read(struct bts_tracer *tracer, const void *at, struct bts_struct *out)
-{
- if (!tracer)
- return -EINVAL;
-
- if (at < tracer->trace.ds.begin)
- return -EINVAL;
-
- if (tracer->trace.ds.end < (at + tracer->trace.ds.size))
- return -EINVAL;
-
- memset(out, 0, sizeof(*out));
- if ((bts_get(at, bts_qual) & ~bts_qual_mask) == bts_escape) {
- out->qualifier = (bts_get(at, bts_qual) & bts_qual_mask);
- out->variant.event.clock = bts_get(at, bts_clock);
- out->variant.event.pid = bts_get(at, bts_pid);
- } else {
- out->qualifier = bts_branch;
- out->variant.lbr.from = bts_get(at, bts_from);
- out->variant.lbr.to = bts_get(at, bts_to);
-
- if (!out->variant.lbr.from && !out->variant.lbr.to)
- out->qualifier = bts_invalid;
- }
-
- return ds_cfg.sizeof_rec[ds_bts];
-}
-
-static int bts_write(struct bts_tracer *tracer, const struct bts_struct *in)
-{
- unsigned char raw[MAX_SIZEOF_BTS];
-
- if (!tracer)
- return -EINVAL;
-
- if (MAX_SIZEOF_BTS < ds_cfg.sizeof_rec[ds_bts])
- return -EOVERFLOW;
-
- switch (in->qualifier) {
- case bts_invalid:
- bts_set(raw, bts_from, 0);
- bts_set(raw, bts_to, 0);
- bts_set(raw, bts_flags, 0);
- break;
- case bts_branch:
- bts_set(raw, bts_from, in->variant.lbr.from);
- bts_set(raw, bts_to, in->variant.lbr.to);
- bts_set(raw, bts_flags, 0);
- break;
- case bts_task_arrives:
- case bts_task_departs:
- bts_set(raw, bts_qual, (bts_escape | in->qualifier));
- bts_set(raw, bts_clock, in->variant.event.clock);
- bts_set(raw, bts_pid, in->variant.event.pid);
- break;
- default:
- return -EINVAL;
- }
-
- return ds_write(tracer->ds.context, ds_bts, raw,
- ds_cfg.sizeof_rec[ds_bts]);
-}
-
-
-static void ds_write_config(struct ds_context *context,
- struct ds_trace *cfg, enum ds_qualifier qual)
-{
- unsigned char *ds = context->ds;
-
- ds_set(ds, qual, ds_buffer_base, (unsigned long)cfg->begin);
- ds_set(ds, qual, ds_index, (unsigned long)cfg->top);
- ds_set(ds, qual, ds_absolute_maximum, (unsigned long)cfg->end);
- ds_set(ds, qual, ds_interrupt_threshold, (unsigned long)cfg->ith);
-}
-
-static void ds_read_config(struct ds_context *context,
- struct ds_trace *cfg, enum ds_qualifier qual)
-{
- unsigned char *ds = context->ds;
-
- cfg->begin = (void *)ds_get(ds, qual, ds_buffer_base);
- cfg->top = (void *)ds_get(ds, qual, ds_index);
- cfg->end = (void *)ds_get(ds, qual, ds_absolute_maximum);
- cfg->ith = (void *)ds_get(ds, qual, ds_interrupt_threshold);
-}
-
-static void ds_init_ds_trace(struct ds_trace *trace, enum ds_qualifier qual,
- void *base, size_t size, size_t ith,
- unsigned int flags) {
- unsigned long buffer, adj;
-
- /*
- * Adjust the buffer address and size to meet alignment
- * constraints:
- * - buffer is double-word aligned
- * - size is multiple of record size
- *
- * We checked the size at the very beginning; we have enough
- * space to do the adjustment.
- */
- buffer = (unsigned long)base;
-
- adj = ALIGN(buffer, DS_ALIGNMENT) - buffer;
- buffer += adj;
- size -= adj;
-
- trace->n = size / ds_cfg.sizeof_rec[qual];
- trace->size = ds_cfg.sizeof_rec[qual];
-
- size = (trace->n * trace->size);
-
- trace->begin = (void *)buffer;
- trace->top = trace->begin;
- trace->end = (void *)(buffer + size);
- /*
- * The value for 'no threshold' is -1, which will set the
- * threshold outside of the buffer, just like we want it.
- */
- ith *= ds_cfg.sizeof_rec[qual];
- trace->ith = (void *)(buffer + size - ith);
-
- trace->flags = flags;
-}
-
-
-static int ds_request(struct ds_tracer *tracer, struct ds_trace *trace,
- enum ds_qualifier qual, struct task_struct *task,
- int cpu, void *base, size_t size, size_t th)
-{
- struct ds_context *context;
- int error;
- size_t req_size;
-
- error = -EOPNOTSUPP;
- if (!ds_cfg.sizeof_rec[qual])
- goto out;
-
- error = -EINVAL;
- if (!base)
- goto out;
-
- req_size = ds_cfg.sizeof_rec[qual];
- /* We might need space for alignment adjustments. */
- if (!IS_ALIGNED((unsigned long)base, DS_ALIGNMENT))
- req_size += DS_ALIGNMENT;
-
- error = -EINVAL;
- if (size < req_size)
- goto out;
-
- if (th != (size_t)-1) {
- th *= ds_cfg.sizeof_rec[qual];
-
- error = -EINVAL;
- if (size <= th)
- goto out;
- }
-
- tracer->buffer = base;
- tracer->size = size;
-
- error = -ENOMEM;
- context = ds_get_context(task, cpu);
- if (!context)
- goto out;
- tracer->context = context;
-
- /*
- * Defer any tracer-specific initialization work for the context until
- * context ownership has been clarified.
- */
-
- error = 0;
- out:
- return error;
-}
-
-static struct bts_tracer *ds_request_bts(struct task_struct *task, int cpu,
- void *base, size_t size,
- bts_ovfl_callback_t ovfl, size_t th,
- unsigned int flags)
-{
- struct bts_tracer *tracer;
- int error;
-
- /* Buffer overflow notification is not yet implemented. */
- error = -EOPNOTSUPP;
- if (ovfl)
- goto out;
-
- error = get_tracer(task);
- if (error < 0)
- goto out;
-
- error = -ENOMEM;
- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
- if (!tracer)
- goto out_put_tracer;
- tracer->ovfl = ovfl;
-
- /* Do some more error checking and acquire a tracing context. */
- error = ds_request(&tracer->ds, &tracer->trace.ds,
- ds_bts, task, cpu, base, size, th);
- if (error < 0)
- goto out_tracer;
-
- /* Claim the bts part of the tracing context we acquired above. */
- spin_lock_irq(&ds_lock);
-
- error = -EPERM;
- if (tracer->ds.context->bts_master)
- goto out_unlock;
- tracer->ds.context->bts_master = tracer;
-
- spin_unlock_irq(&ds_lock);
-
- /*
- * Now that we own the bts part of the context, let's complete the
- * initialization for that part.
- */
- ds_init_ds_trace(&tracer->trace.ds, ds_bts, base, size, th, flags);
- ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
- ds_install_ds_area(tracer->ds.context);
-
- tracer->trace.read = bts_read;
- tracer->trace.write = bts_write;
-
- /* Start tracing. */
- ds_resume_bts(tracer);
-
- return tracer;
-
- out_unlock:
- spin_unlock_irq(&ds_lock);
- ds_put_context(tracer->ds.context);
- out_tracer:
- kfree(tracer);
- out_put_tracer:
- put_tracer(task);
- out:
- return ERR_PTR(error);
-}
-
-struct bts_tracer *ds_request_bts_task(struct task_struct *task,
- void *base, size_t size,
- bts_ovfl_callback_t ovfl,
- size_t th, unsigned int flags)
-{
- return ds_request_bts(task, 0, base, size, ovfl, th, flags);
-}
-
-struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
- bts_ovfl_callback_t ovfl,
- size_t th, unsigned int flags)
-{
- return ds_request_bts(NULL, cpu, base, size, ovfl, th, flags);
-}
-
-static struct pebs_tracer *ds_request_pebs(struct task_struct *task, int cpu,
- void *base, size_t size,
- pebs_ovfl_callback_t ovfl, size_t th,
- unsigned int flags)
-{
- struct pebs_tracer *tracer;
- int error;
-
- /* Buffer overflow notification is not yet implemented. */
- error = -EOPNOTSUPP;
- if (ovfl)
- goto out;
-
- error = get_tracer(task);
- if (error < 0)
- goto out;
-
- error = -ENOMEM;
- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
- if (!tracer)
- goto out_put_tracer;
- tracer->ovfl = ovfl;
-
- /* Do some more error checking and acquire a tracing context. */
- error = ds_request(&tracer->ds, &tracer->trace.ds,
- ds_pebs, task, cpu, base, size, th);
- if (error < 0)
- goto out_tracer;
-
- /* Claim the pebs part of the tracing context we acquired above. */
- spin_lock_irq(&ds_lock);
-
- error = -EPERM;
- if (tracer->ds.context->pebs_master)
- goto out_unlock;
- tracer->ds.context->pebs_master = tracer;
-
- spin_unlock_irq(&ds_lock);
-
- /*
- * Now that we own the pebs part of the context, let's complete the
- * initialization for that part.
- */
- ds_init_ds_trace(&tracer->trace.ds, ds_pebs, base, size, th, flags);
- ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
- ds_install_ds_area(tracer->ds.context);
-
- /* Start tracing. */
- ds_resume_pebs(tracer);
-
- return tracer;
-
- out_unlock:
- spin_unlock_irq(&ds_lock);
- ds_put_context(tracer->ds.context);
- out_tracer:
- kfree(tracer);
- out_put_tracer:
- put_tracer(task);
- out:
- return ERR_PTR(error);
-}
-
-struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
- void *base, size_t size,
- pebs_ovfl_callback_t ovfl,
- size_t th, unsigned int flags)
-{
- return ds_request_pebs(task, 0, base, size, ovfl, th, flags);
-}
-
-struct pebs_tracer *ds_request_pebs_cpu(int cpu, void *base, size_t size,
- pebs_ovfl_callback_t ovfl,
- size_t th, unsigned int flags)
-{
- return ds_request_pebs(NULL, cpu, base, size, ovfl, th, flags);
-}
-
-static void ds_free_bts(struct bts_tracer *tracer)
-{
- struct task_struct *task;
-
- task = tracer->ds.context->task;
-
- WARN_ON_ONCE(tracer->ds.context->bts_master != tracer);
- tracer->ds.context->bts_master = NULL;
-
- /* Make sure tracing stopped and the tracer is not in use. */
- if (task && (task != current))
- wait_task_context_switch(task);
-
- ds_put_context(tracer->ds.context);
- put_tracer(task);
-
- kfree(tracer);
-}
-
-void ds_release_bts(struct bts_tracer *tracer)
-{
- might_sleep();
-
- if (!tracer)
- return;
-
- ds_suspend_bts(tracer);
- ds_free_bts(tracer);
-}
-
-int ds_release_bts_noirq(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long irq;
- int error;
-
- if (!tracer)
- return 0;
-
- task = tracer->ds.context->task;
-
- local_irq_save(irq);
-
- error = -EPERM;
- if (!task &&
- (tracer->ds.context->cpu != smp_processor_id()))
- goto out;
-
- error = -EPERM;
- if (task && (task != current))
- goto out;
-
- ds_suspend_bts_noirq(tracer);
- ds_free_bts(tracer);
-
- error = 0;
- out:
- local_irq_restore(irq);
- return error;
-}
-
-static void update_task_debugctlmsr(struct task_struct *task,
- unsigned long debugctlmsr)
-{
- task->thread.debugctlmsr = debugctlmsr;
-
- get_cpu();
- if (task == current)
- update_debugctlmsr(debugctlmsr);
- put_cpu();
-}
-
-void ds_suspend_bts(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long debugctlmsr;
- int cpu;
-
- if (!tracer)
- return;
-
- tracer->flags = 0;
-
- task = tracer->ds.context->task;
- cpu = tracer->ds.context->cpu;
-
- WARN_ON(!task && irqs_disabled());
-
- debugctlmsr = (task ?
- task->thread.debugctlmsr :
- get_debugctlmsr_on_cpu(cpu));
- debugctlmsr &= ~BTS_CONTROL;
-
- if (task)
- update_task_debugctlmsr(task, debugctlmsr);
- else
- update_debugctlmsr_on_cpu(cpu, debugctlmsr);
-}
-
-int ds_suspend_bts_noirq(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long debugctlmsr, irq;
- int cpu, error = 0;
-
- if (!tracer)
- return 0;
-
- tracer->flags = 0;
-
- task = tracer->ds.context->task;
- cpu = tracer->ds.context->cpu;
-
- local_irq_save(irq);
-
- error = -EPERM;
- if (!task && (cpu != smp_processor_id()))
- goto out;
-
- debugctlmsr = (task ?
- task->thread.debugctlmsr :
- get_debugctlmsr());
- debugctlmsr &= ~BTS_CONTROL;
-
- if (task)
- update_task_debugctlmsr(task, debugctlmsr);
- else
- update_debugctlmsr(debugctlmsr);
-
- error = 0;
- out:
- local_irq_restore(irq);
- return error;
-}
-
-static unsigned long ds_bts_control(struct bts_tracer *tracer)
-{
- unsigned long control;
-
- control = ds_cfg.ctl[dsf_bts];
- if (!(tracer->trace.ds.flags & BTS_KERNEL))
- control |= ds_cfg.ctl[dsf_bts_kernel];
- if (!(tracer->trace.ds.flags & BTS_USER))
- control |= ds_cfg.ctl[dsf_bts_user];
-
- return control;
-}
-
-void ds_resume_bts(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long debugctlmsr;
- int cpu;
-
- if (!tracer)
- return;
-
- tracer->flags = tracer->trace.ds.flags;
-
- task = tracer->ds.context->task;
- cpu = tracer->ds.context->cpu;
-
- WARN_ON(!task && irqs_disabled());
-
- debugctlmsr = (task ?
- task->thread.debugctlmsr :
- get_debugctlmsr_on_cpu(cpu));
- debugctlmsr |= ds_bts_control(tracer);
-
- if (task)
- update_task_debugctlmsr(task, debugctlmsr);
- else
- update_debugctlmsr_on_cpu(cpu, debugctlmsr);
-}
-
-int ds_resume_bts_noirq(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long debugctlmsr, irq;
- int cpu, error = 0;
-
- if (!tracer)
- return 0;
-
- tracer->flags = tracer->trace.ds.flags;
-
- task = tracer->ds.context->task;
- cpu = tracer->ds.context->cpu;
-
- local_irq_save(irq);
-
- error = -EPERM;
- if (!task && (cpu != smp_processor_id()))
- goto out;
-
- debugctlmsr = (task ?
- task->thread.debugctlmsr :
- get_debugctlmsr());
- debugctlmsr |= ds_bts_control(tracer);
-
- if (task)
- update_task_debugctlmsr(task, debugctlmsr);
- else
- update_debugctlmsr(debugctlmsr);
-
- error = 0;
- out:
- local_irq_restore(irq);
- return error;
-}
-
-static void ds_free_pebs(struct pebs_tracer *tracer)
-{
- struct task_struct *task;
-
- task = tracer->ds.context->task;
-
- WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer);
- tracer->ds.context->pebs_master = NULL;
-
- ds_put_context(tracer->ds.context);
- put_tracer(task);
-
- kfree(tracer);
-}
-
-void ds_release_pebs(struct pebs_tracer *tracer)
-{
- might_sleep();
-
- if (!tracer)
- return;
-
- ds_suspend_pebs(tracer);
- ds_free_pebs(tracer);
-}
-
-int ds_release_pebs_noirq(struct pebs_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long irq;
- int error;
-
- if (!tracer)
- return 0;
-
- task = tracer->ds.context->task;
-
- local_irq_save(irq);
-
- error = -EPERM;
- if (!task &&
- (tracer->ds.context->cpu != smp_processor_id()))
- goto out;
-
- error = -EPERM;
- if (task && (task != current))
- goto out;
-
- ds_suspend_pebs_noirq(tracer);
- ds_free_pebs(tracer);
-
- error = 0;
- out:
- local_irq_restore(irq);
- return error;
-}
-
-void ds_suspend_pebs(struct pebs_tracer *tracer)
-{
-
-}
-
-int ds_suspend_pebs_noirq(struct pebs_tracer *tracer)
-{
- return 0;
-}
-
-void ds_resume_pebs(struct pebs_tracer *tracer)
-{
-
-}
-
-int ds_resume_pebs_noirq(struct pebs_tracer *tracer)
-{
- return 0;
-}
-
-const struct bts_trace *ds_read_bts(struct bts_tracer *tracer)
-{
- if (!tracer)
- return NULL;
-
- ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
- return &tracer->trace;
-}
-
-const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer)
-{
- if (!tracer)
- return NULL;
-
- ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
-
- tracer->trace.counters = ds_cfg.nr_counter_reset;
- memcpy(tracer->trace.counter_reset,
- tracer->ds.context->ds +
- (NUM_DS_PTR_FIELDS * ds_cfg.sizeof_ptr_field),
- ds_cfg.nr_counter_reset * PEBS_RESET_FIELD_SIZE);
-
- return &tracer->trace;
-}
-
-int ds_reset_bts(struct bts_tracer *tracer)
-{
- if (!tracer)
- return -EINVAL;
-
- tracer->trace.ds.top = tracer->trace.ds.begin;
-
- ds_set(tracer->ds.context->ds, ds_bts, ds_index,
- (unsigned long)tracer->trace.ds.top);
-
- return 0;
-}
-
-int ds_reset_pebs(struct pebs_tracer *tracer)
-{
- if (!tracer)
- return -EINVAL;
-
- tracer->trace.ds.top = tracer->trace.ds.begin;
-
- ds_set(tracer->ds.context->ds, ds_pebs, ds_index,
- (unsigned long)tracer->trace.ds.top);
-
- return 0;
-}
-
-int ds_set_pebs_reset(struct pebs_tracer *tracer,
- unsigned int counter, u64 value)
-{
- if (!tracer)
- return -EINVAL;
-
- if (ds_cfg.nr_counter_reset < counter)
- return -EINVAL;
-
- *(u64 *)(tracer->ds.context->ds +
- (NUM_DS_PTR_FIELDS * ds_cfg.sizeof_ptr_field) +
- (counter * PEBS_RESET_FIELD_SIZE)) = value;
-
- return 0;
-}
-
-static const struct ds_configuration ds_cfg_netburst = {
- .name = "Netburst",
- .ctl[dsf_bts] = (1 << 2) | (1 << 3),
- .ctl[dsf_bts_kernel] = (1 << 5),
- .ctl[dsf_bts_user] = (1 << 6),
- .nr_counter_reset = 1,
-};
-static const struct ds_configuration ds_cfg_pentium_m = {
- .name = "Pentium M",
- .ctl[dsf_bts] = (1 << 6) | (1 << 7),
- .nr_counter_reset = 1,
-};
-static const struct ds_configuration ds_cfg_core2_atom = {
- .name = "Core 2/Atom",
- .ctl[dsf_bts] = (1 << 6) | (1 << 7),
- .ctl[dsf_bts_kernel] = (1 << 9),
- .ctl[dsf_bts_user] = (1 << 10),
- .nr_counter_reset = 1,
-};
-static const struct ds_configuration ds_cfg_core_i7 = {
- .name = "Core i7",
- .ctl[dsf_bts] = (1 << 6) | (1 << 7),
- .ctl[dsf_bts_kernel] = (1 << 9),
- .ctl[dsf_bts_user] = (1 << 10),
- .nr_counter_reset = 4,
-};
-
-static void
-ds_configure(const struct ds_configuration *cfg,
- struct cpuinfo_x86 *cpu)
-{
- unsigned long nr_pebs_fields = 0;
-
- printk(KERN_INFO "[ds] using %s configuration\n", cfg->name);
-
-#ifdef __i386__
- nr_pebs_fields = 10;
-#else
- nr_pebs_fields = 18;
-#endif
-
- /*
- * Starting with version 2, architectural performance
- * monitoring supports a format specifier.
- */
- if ((cpuid_eax(0xa) & 0xff) > 1) {
- unsigned long perf_capabilities, format;
-
- rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_capabilities);
-
- format = (perf_capabilities >> 8) & 0xf;
-
- switch (format) {
- case 0:
- nr_pebs_fields = 18;
- break;
- case 1:
- nr_pebs_fields = 22;
- break;
- default:
- printk(KERN_INFO
- "[ds] unknown PEBS format: %lu\n", format);
- nr_pebs_fields = 0;
- break;
- }
- }
-
- memset(&ds_cfg, 0, sizeof(ds_cfg));
- ds_cfg = *cfg;
-
- ds_cfg.sizeof_ptr_field =
- (cpu_has(cpu, X86_FEATURE_DTES64) ? 8 : 4);
-
- ds_cfg.sizeof_rec[ds_bts] = ds_cfg.sizeof_ptr_field * 3;
- ds_cfg.sizeof_rec[ds_pebs] = ds_cfg.sizeof_ptr_field * nr_pebs_fields;
-
- if (!cpu_has(cpu, X86_FEATURE_BTS)) {
- ds_cfg.sizeof_rec[ds_bts] = 0;
- printk(KERN_INFO "[ds] bts not available\n");
- }
- if (!cpu_has(cpu, X86_FEATURE_PEBS)) {
- ds_cfg.sizeof_rec[ds_pebs] = 0;
- printk(KERN_INFO "[ds] pebs not available\n");
- }
-
- printk(KERN_INFO "[ds] sizes: address: %u bit, ",
- 8 * ds_cfg.sizeof_ptr_field);
- printk("bts/pebs record: %u/%u bytes\n",
- ds_cfg.sizeof_rec[ds_bts], ds_cfg.sizeof_rec[ds_pebs]);
-
- WARN_ON_ONCE(MAX_PEBS_COUNTERS < ds_cfg.nr_counter_reset);
-}
-
-void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
-{
- /* Only configure the first cpu. Others are identical. */
- if (ds_cfg.name)
- return;
-
- switch (c->x86) {
- case 0x6:
- switch (c->x86_model) {
- case 0x9:
- case 0xd: /* Pentium M */
- ds_configure(&ds_cfg_pentium_m, c);
- break;
- case 0xf:
- case 0x17: /* Core2 */
- case 0x1c: /* Atom */
- ds_configure(&ds_cfg_core2_atom, c);
- break;
- case 0x1a: /* Core i7 */
- ds_configure(&ds_cfg_core_i7, c);
- break;
- default:
- /* Sorry, don't know about them. */
- break;
- }
- break;
- case 0xf:
- switch (c->x86_model) {
- case 0x0:
- case 0x1:
- case 0x2: /* Netburst */
- ds_configure(&ds_cfg_netburst, c);
- break;
- default:
- /* Sorry, don't know about them. */
- break;
- }
- break;
- default:
- /* Sorry, don't know about them. */
- break;
- }
-}
-
-static inline void ds_take_timestamp(struct ds_context *context,
- enum bts_qualifier qualifier,
- struct task_struct *task)
-{
- struct bts_tracer *tracer = context->bts_master;
- struct bts_struct ts;
-
- /* Prevent compilers from reading the tracer pointer twice. */
- barrier();
-
- if (!tracer || !(tracer->flags & BTS_TIMESTAMPS))
- return;
-
- memset(&ts, 0, sizeof(ts));
- ts.qualifier = qualifier;
- ts.variant.event.clock = trace_clock_global();
- ts.variant.event.pid = task->pid;
-
- bts_write(tracer, &ts);
-}
-
-/*
- * Change the DS configuration from tracing prev to tracing next.
- */
-void ds_switch_to(struct task_struct *prev, struct task_struct *next)
-{
- struct ds_context *prev_ctx = prev->thread.ds_ctx;
- struct ds_context *next_ctx = next->thread.ds_ctx;
- unsigned long debugctlmsr = next->thread.debugctlmsr;
-
- /* Make sure all data is read before we start. */
- barrier();
-
- if (prev_ctx) {
- update_debugctlmsr(0);
-
- ds_take_timestamp(prev_ctx, bts_task_departs, prev);
- }
-
- if (next_ctx) {
- ds_take_timestamp(next_ctx, bts_task_arrives, next);
-
- wrmsrl(MSR_IA32_DS_AREA, (unsigned long)next_ctx->ds);
- }
-
- update_debugctlmsr(debugctlmsr);
-}
-
-static __init int ds_selftest(void)
-{
- if (ds_cfg.sizeof_rec[ds_bts]) {
- int error;
-
- error = ds_selftest_bts();
- if (error) {
- WARN(1, "[ds] selftest failed. disabling bts.\n");
- ds_cfg.sizeof_rec[ds_bts] = 0;
- }
- }
-
- if (ds_cfg.sizeof_rec[ds_pebs]) {
- int error;
-
- error = ds_selftest_pebs();
- if (error) {
- WARN(1, "[ds] selftest failed. disabling pebs.\n");
- ds_cfg.sizeof_rec[ds_pebs] = 0;
- }
- }
-
- return 0;
-}
-device_initcall(ds_selftest);
diff --git a/arch/x86/kernel/ds_selftest.c b/arch/x86/kernel/ds_selftest.c
deleted file mode 100644
index 6bc7c199ab99..000000000000
--- a/arch/x86/kernel/ds_selftest.c
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Debug Store support - selftest
- *
- *
- * Copyright (C) 2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2009
- */
-
-#include "ds_selftest.h"
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-
-#include <asm/ds.h>
-
-
-#define BUFFER_SIZE 521 /* Intentionally chose an odd size. */
-#define SMALL_BUFFER_SIZE 24 /* A single bts entry. */
-
-struct ds_selftest_bts_conf {
- struct bts_tracer *tracer;
- int error;
- int (*suspend)(struct bts_tracer *);
- int (*resume)(struct bts_tracer *);
-};
-
-static int ds_selftest_bts_consistency(const struct bts_trace *trace)
-{
- int error = 0;
-
- if (!trace) {
- printk(KERN_CONT "failed to access trace...");
- /* Bail out. Other tests are pointless. */
- return -1;
- }
-
- if (!trace->read) {
- printk(KERN_CONT "bts read not available...");
- error = -1;
- }
-
- /* Do some sanity checks on the trace configuration. */
- if (!trace->ds.n) {
- printk(KERN_CONT "empty bts buffer...");
- error = -1;
- }
- if (!trace->ds.size) {
- printk(KERN_CONT "bad bts trace setup...");
- error = -1;
- }
- if (trace->ds.end !=
- (char *)trace->ds.begin + (trace->ds.n * trace->ds.size)) {
- printk(KERN_CONT "bad bts buffer setup...");
- error = -1;
- }
- /*
- * We allow top in [begin; end], since its not clear when the
- * overflow adjustment happens: after the increment or before the
- * write.
- */
- if ((trace->ds.top < trace->ds.begin) ||
- (trace->ds.end < trace->ds.top)) {
- printk(KERN_CONT "bts top out of bounds...");
- error = -1;
- }
-
- return error;
-}
-
-static int ds_selftest_bts_read(struct bts_tracer *tracer,
- const struct bts_trace *trace,
- const void *from, const void *to)
-{
- const unsigned char *at;
-
- /*
- * Check a few things which do not belong to this test.
- * They should be covered by other tests.
- */
- if (!trace)
- return -1;
-
- if (!trace->read)
- return -1;
-
- if (to < from)
- return -1;
-
- if (from < trace->ds.begin)
- return -1;
-
- if (trace->ds.end < to)
- return -1;
-
- if (!trace->ds.size)
- return -1;
-
- /* Now to the test itself. */
- for (at = from; (void *)at < to; at += trace->ds.size) {
- struct bts_struct bts;
- unsigned long index;
- int error;
-
- if (((void *)at - trace->ds.begin) % trace->ds.size) {
- printk(KERN_CONT
- "read from non-integer index...");
- return -1;
- }
- index = ((void *)at - trace->ds.begin) / trace->ds.size;
-
- memset(&bts, 0, sizeof(bts));
- error = trace->read(tracer, at, &bts);
- if (error < 0) {
- printk(KERN_CONT
- "error reading bts trace at [%lu] (0x%p)...",
- index, at);
- return error;
- }
-
- switch (bts.qualifier) {
- case BTS_BRANCH:
- break;
- default:
- printk(KERN_CONT
- "unexpected bts entry %llu at [%lu] (0x%p)...",
- bts.qualifier, index, at);
- return -1;
- }
- }
-
- return 0;
-}
-
-static void ds_selftest_bts_cpu(void *arg)
-{
- struct ds_selftest_bts_conf *conf = arg;
- const struct bts_trace *trace;
- void *top;
-
- if (IS_ERR(conf->tracer)) {
- conf->error = PTR_ERR(conf->tracer);
- conf->tracer = NULL;
-
- printk(KERN_CONT
- "initialization failed (err: %d)...", conf->error);
- return;
- }
-
- /* We should meanwhile have enough trace. */
- conf->error = conf->suspend(conf->tracer);
- if (conf->error < 0)
- return;
-
- /* Let's see if we can access the trace. */
- trace = ds_read_bts(conf->tracer);
-
- conf->error = ds_selftest_bts_consistency(trace);
- if (conf->error < 0)
- return;
-
- /* If everything went well, we should have a few trace entries. */
- if (trace->ds.top == trace->ds.begin) {
- /*
- * It is possible but highly unlikely that we got a
- * buffer overflow and end up at exactly the same
- * position we started from.
- * Let's issue a warning, but continue.
- */
- printk(KERN_CONT "no trace/overflow...");
- }
-
- /* Let's try to read the trace we collected. */
- conf->error =
- ds_selftest_bts_read(conf->tracer, trace,
- trace->ds.begin, trace->ds.top);
- if (conf->error < 0)
- return;
-
- /*
- * Let's read the trace again.
- * Since we suspended tracing, we should get the same result.
- */
- top = trace->ds.top;
-
- trace = ds_read_bts(conf->tracer);
- conf->error = ds_selftest_bts_consistency(trace);
- if (conf->error < 0)
- return;
-
- if (top != trace->ds.top) {
- printk(KERN_CONT "suspend not working...");
- conf->error = -1;
- return;
- }
-
- /* Let's collect some more trace - see if resume is working. */
- conf->error = conf->resume(conf->tracer);
- if (conf->error < 0)
- return;
-
- conf->error = conf->suspend(conf->tracer);
- if (conf->error < 0)
- return;
-
- trace = ds_read_bts(conf->tracer);
-
- conf->error = ds_selftest_bts_consistency(trace);
- if (conf->error < 0)
- return;
-
- if (trace->ds.top == top) {
- /*
- * It is possible but highly unlikely that we got a
- * buffer overflow and end up at exactly the same
- * position we started from.
- * Let's issue a warning and check the full trace.
- */
- printk(KERN_CONT
- "no resume progress/overflow...");
-
- conf->error =
- ds_selftest_bts_read(conf->tracer, trace,
- trace->ds.begin, trace->ds.end);
- } else if (trace->ds.top < top) {
- /*
- * We had a buffer overflow - the entire buffer should
- * contain trace records.
- */
- conf->error =
- ds_selftest_bts_read(conf->tracer, trace,
- trace->ds.begin, trace->ds.end);
- } else {
- /*
- * It is quite likely that the buffer did not overflow.
- * Let's just check the delta trace.
- */
- conf->error =
- ds_selftest_bts_read(conf->tracer, trace, top,
- trace->ds.top);
- }
- if (conf->error < 0)
- return;
-
- conf->error = 0;
-}
-
-static int ds_suspend_bts_wrap(struct bts_tracer *tracer)
-{
- ds_suspend_bts(tracer);
- return 0;
-}
-
-static int ds_resume_bts_wrap(struct bts_tracer *tracer)
-{
- ds_resume_bts(tracer);
- return 0;
-}
-
-static void ds_release_bts_noirq_wrap(void *tracer)
-{
- (void)ds_release_bts_noirq(tracer);
-}
-
-static int ds_selftest_bts_bad_release_noirq(int cpu,
- struct bts_tracer *tracer)
-{
- int error = -EPERM;
-
- /* Try to release the tracer on the wrong cpu. */
- get_cpu();
- if (cpu != smp_processor_id()) {
- error = ds_release_bts_noirq(tracer);
- if (error != -EPERM)
- printk(KERN_CONT "release on wrong cpu...");
- }
- put_cpu();
-
- return error ? 0 : -1;
-}
-
-static int ds_selftest_bts_bad_request_cpu(int cpu, void *buffer)
-{
- struct bts_tracer *tracer;
- int error;
-
- /* Try to request cpu tracing while task tracing is active. */
- tracer = ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE, NULL,
- (size_t)-1, BTS_KERNEL);
- error = PTR_ERR(tracer);
- if (!IS_ERR(tracer)) {
- ds_release_bts(tracer);
- error = 0;
- }
-
- if (error != -EPERM)
- printk(KERN_CONT "cpu/task tracing overlap...");
-
- return error ? 0 : -1;
-}
-
-static int ds_selftest_bts_bad_request_task(void *buffer)
-{
- struct bts_tracer *tracer;
- int error;
-
- /* Try to request cpu tracing while task tracing is active. */
- tracer = ds_request_bts_task(current, buffer, BUFFER_SIZE, NULL,
- (size_t)-1, BTS_KERNEL);
- error = PTR_ERR(tracer);
- if (!IS_ERR(tracer)) {
- error = 0;
- ds_release_bts(tracer);
- }
-
- if (error != -EPERM)
- printk(KERN_CONT "task/cpu tracing overlap...");
-
- return error ? 0 : -1;
-}
-
-int ds_selftest_bts(void)
-{
- struct ds_selftest_bts_conf conf;
- unsigned char buffer[BUFFER_SIZE], *small_buffer;
- unsigned long irq;
- int cpu;
-
- printk(KERN_INFO "[ds] bts selftest...");
- conf.error = 0;
-
- small_buffer = (unsigned char *)ALIGN((unsigned long)buffer, 8) + 8;
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- conf.suspend = ds_suspend_bts_wrap;
- conf.resume = ds_resume_bts_wrap;
- conf.tracer =
- ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
- ds_selftest_bts_cpu(&conf);
- if (conf.error >= 0)
- conf.error = ds_selftest_bts_bad_request_task(buffer);
- ds_release_bts(conf.tracer);
- if (conf.error < 0)
- goto out;
-
- conf.suspend = ds_suspend_bts_noirq;
- conf.resume = ds_resume_bts_noirq;
- conf.tracer =
- ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
- smp_call_function_single(cpu, ds_selftest_bts_cpu, &conf, 1);
- if (conf.error >= 0) {
- conf.error =
- ds_selftest_bts_bad_release_noirq(cpu,
- conf.tracer);
- /* We must not release the tracer twice. */
- if (conf.error < 0)
- conf.tracer = NULL;
- }
- if (conf.error >= 0)
- conf.error = ds_selftest_bts_bad_request_task(buffer);
- smp_call_function_single(cpu, ds_release_bts_noirq_wrap,
- conf.tracer, 1);
- if (conf.error < 0)
- goto out;
- }
-
- conf.suspend = ds_suspend_bts_wrap;
- conf.resume = ds_resume_bts_wrap;
- conf.tracer =
- ds_request_bts_task(current, buffer, BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
- ds_selftest_bts_cpu(&conf);
- if (conf.error >= 0)
- conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
- ds_release_bts(conf.tracer);
- if (conf.error < 0)
- goto out;
-
- conf.suspend = ds_suspend_bts_noirq;
- conf.resume = ds_resume_bts_noirq;
- conf.tracer =
- ds_request_bts_task(current, small_buffer, SMALL_BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
- local_irq_save(irq);
- ds_selftest_bts_cpu(&conf);
- if (conf.error >= 0)
- conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
- ds_release_bts_noirq(conf.tracer);
- local_irq_restore(irq);
- if (conf.error < 0)
- goto out;
-
- conf.error = 0;
- out:
- put_online_cpus();
- printk(KERN_CONT "%s.\n", (conf.error ? "failed" : "passed"));
-
- return conf.error;
-}
-
-int ds_selftest_pebs(void)
-{
- return 0;
-}
diff --git a/arch/x86/kernel/ds_selftest.h b/arch/x86/kernel/ds_selftest.h
deleted file mode 100644
index 2ba8745c6663..000000000000
--- a/arch/x86/kernel/ds_selftest.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Debug Store support - selftest
- *
- *
- * Copyright (C) 2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2009
- */
-
-#ifdef CONFIG_X86_DS_SELFTEST
-extern int ds_selftest_bts(void);
-extern int ds_selftest_pebs(void);
-#else
-static inline int ds_selftest_bts(void) { return 0; }
-static inline int ds_selftest_pebs(void) { return 0; }
-#endif
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 6d817554780a..c89a386930b7 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -224,11 +224,6 @@ unsigned __kprobes long oops_begin(void)
int cpu;
unsigned long flags;
- /* notify the hw-branch tracer so it may disable tracing and
- add the last trace to the trace buffer -
- the earlier this happens, the more useful the trace. */
- trace_hw_branch_oops();
-
oops_enter();
/* racy, but better than risking deadlock. */
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index b9c830c12b4a..fa99bae75ace 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -41,6 +41,14 @@ static void early_vga_write(struct console *con, const char *str, unsigned n)
writew(0x720, VGABASE + 2*(max_xpos*j + i));
current_ypos = max_ypos-1;
}
+#ifdef CONFIG_KGDB_KDB
+ if (c == '\b') {
+ if (current_xpos > 0)
+ current_xpos--;
+ } else if (c == '\r') {
+ current_xpos = 0;
+ } else
+#endif
if (c == '\n') {
current_xpos = 0;
current_ypos++;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 44a8e0dc6737..cd49141cf153 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -53,6 +53,7 @@
#include <asm/processor-flags.h>
#include <asm/ftrace.h>
#include <asm/irq_vectors.h>
+#include <asm/cpufeature.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
@@ -905,7 +906,25 @@ ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
pushl $0
CFI_ADJUST_CFA_OFFSET 4
+#ifdef CONFIG_X86_INVD_BUG
+ /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
+661: pushl $do_general_protection
+662:
+.section .altinstructions,"a"
+ .balign 4
+ .long 661b
+ .long 663f
+ .byte X86_FEATURE_XMM
+ .byte 662b-661b
+ .byte 664f-663f
+.previous
+.section .altinstr_replacement,"ax"
+663: pushl $do_simd_coprocessor_error
+664:
+.previous
+#else
pushl $do_simd_coprocessor_error
+#endif
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 23b4ecdffa9b..a198b7c87a12 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -36,6 +36,7 @@
unsigned long hpet_address;
u8 hpet_blockid; /* OS timer block num */
u8 hpet_msi_disable;
+u8 hpet_readback_cmp;
#ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers;
@@ -395,19 +396,23 @@ static int hpet_next_event(unsigned long delta,
* at that point and we would wait for the next hpet interrupt
* forever. We found out that reading the CMP register back
* forces the transfer so we can rely on the comparison with
- * the counter register below. If the read back from the
- * compare register does not match the value we programmed
- * then we might have a real hardware problem. We can not do
- * much about it here, but at least alert the user/admin with
- * a prominent warning.
- * An erratum on some chipsets (ICH9,..), results in comparator read
- * immediately following a write returning old value. Workaround
- * for this is to read this value second time, when first
- * read returns old value.
+ * the counter register below.
+ *
+ * That works fine on those ATI chipsets, but on newer Intel
+ * chipsets (ICH9...) this triggers due to an erratum: Reading
+ * the comparator immediately following a write is returning
+ * the old value.
+ *
+ * We restrict the read back to the affected ATI chipsets (set
+ * by quirks) and also run it with hpet=verbose for debugging
+ * purposes.
*/
- if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
- WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
- KERN_WARNING "hpet: compare register read back failed.\n");
+ if (hpet_readback_cmp || hpet_verbose) {
+ u32 cmp = hpet_readl(HPET_Tn_CMP(timer));
+
+ if (cmp != cnt)
+ printk_once(KERN_WARNING
+ "hpet: compare register read back failed.\n");
}
return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index d6cc065f519f..a8f1b803d2fd 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -189,25 +189,16 @@ static int get_hbp_len(u8 hbp_len)
}
/*
- * Check for virtual address in user space.
- */
-int arch_check_va_in_userspace(unsigned long va, u8 hbp_len)
-{
- unsigned int len;
-
- len = get_hbp_len(hbp_len);
-
- return (va <= TASK_SIZE - len);
-}
-
-/*
* Check for virtual address in kernel space.
*/
-static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
unsigned int len;
+ unsigned long va;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- len = get_hbp_len(hbp_len);
+ va = info->address;
+ len = get_hbp_len(info->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -300,8 +291,7 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk)
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
@@ -314,16 +304,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
ret = -EINVAL;
- if (info->type == X86_BREAKPOINT_EXECUTE)
- /*
- * Ptrace-refactoring code
- * For now, we'll allow instruction breakpoint only for user-space
- * addresses
- */
- if ((!arch_check_va_in_userspace(info->address, info->len)) &&
- info->len != X86_BREAKPOINT_EXECUTE)
- return ret;
-
switch (info->len) {
case X86_BREAKPOINT_LEN_1:
align = 0;
@@ -350,15 +330,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
if (info->address & align)
return -EINVAL;
- /* Check that the virtual address is in the proper range */
- if (tsk) {
- if (!arch_check_va_in_userspace(info->address, info->len))
- return -EFAULT;
- } else {
- if (!arch_check_va_in_kernelspace(info->address, info->len))
- return -EFAULT;
- }
-
return 0;
}
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 54c31c285488..86cef6b32253 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -102,65 +102,62 @@ void __cpuinit fpu_init(void)
mxcsr_feature_mask_init();
/* clean state in init */
- if (cpu_has_xsave)
- current_thread_info()->status = TS_XSAVE;
- else
- current_thread_info()->status = 0;
+ current_thread_info()->status = 0;
clear_used_math();
}
#endif /* CONFIG_X86_64 */
-/*
- * The _current_ task is using the FPU for the first time
- * so initialize it and set the mxcsr to its default
- * value at reset if we support XMM instructions and then
- * remeber the current task has used the FPU.
- */
-int init_fpu(struct task_struct *tsk)
+static void fpu_finit(struct fpu *fpu)
{
- if (tsk_used_math(tsk)) {
- if (HAVE_HWFP && tsk == current)
- unlazy_fpu(tsk);
- return 0;
- }
-
- /*
- * Memory allocation at the first usage of the FPU and other state.
- */
- if (!tsk->thread.xstate) {
- tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
- GFP_KERNEL);
- if (!tsk->thread.xstate)
- return -ENOMEM;
- }
-
#ifdef CONFIG_X86_32
if (!HAVE_HWFP) {
- memset(tsk->thread.xstate, 0, xstate_size);
- finit_task(tsk);
- set_stopped_child_used_math(tsk);
- return 0;
+ finit_soft_fpu(&fpu->state->soft);
+ return;
}
#endif
if (cpu_has_fxsr) {
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
memset(fx, 0, xstate_size);
fx->cwd = 0x37f;
if (cpu_has_xmm)
fx->mxcsr = MXCSR_DEFAULT;
} else {
- struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
+ struct i387_fsave_struct *fp = &fpu->state->fsave;
memset(fp, 0, xstate_size);
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;
fp->fos = 0xffff0000u;
}
+}
+
+/*
+ * The _current_ task is using the FPU for the first time
+ * so initialize it and set the mxcsr to its default
+ * value at reset if we support XMM instructions and then
+ * remeber the current task has used the FPU.
+ */
+int init_fpu(struct task_struct *tsk)
+{
+ int ret;
+
+ if (tsk_used_math(tsk)) {
+ if (HAVE_HWFP && tsk == current)
+ unlazy_fpu(tsk);
+ return 0;
+ }
+
/*
- * Only the device not available exception or ptrace can call init_fpu.
+ * Memory allocation at the first usage of the FPU and other state.
*/
+ ret = fpu_alloc(&tsk->thread.fpu);
+ if (ret)
+ return ret;
+
+ fpu_finit(&tsk->thread.fpu);
+
set_stopped_child_used_math(tsk);
return 0;
}
@@ -194,7 +191,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fxsave, 0, -1);
+ &target->thread.fpu.state->fxsave, 0, -1);
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -211,19 +208,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fxsave, 0, -1);
+ &target->thread.fpu.state->fxsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
/*
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
if (cpu_has_xsave)
- target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
+ target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
return ret;
}
@@ -246,14 +243,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
* memory layout in the thread struct, so that we can copy the entire
* xstateregs to the user using one user_regset_copyout().
*/
- memcpy(&target->thread.xstate->fxsave.sw_reserved,
+ memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
/*
* Copy the xstate memory layout.
*/
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->xsave, 0, -1);
+ &target->thread.fpu.state->xsave, 0, -1);
return ret;
}
@@ -272,14 +269,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->xsave, 0, -1);
+ &target->thread.fpu.state->xsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
- xsave_hdr = &target->thread.xstate->xsave.xsave_hdr;
+ xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
xsave_hdr->xstate_bv &= pcntxt_mask;
/*
@@ -365,7 +362,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
static void
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -405,7 +402,7 @@ static void convert_to_fxsr(struct task_struct *tsk,
const struct user_i387_ia32_struct *env)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -445,7 +442,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr) {
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fsave, 0,
+ &target->thread.fpu.state->fsave, 0,
-1);
}
@@ -475,7 +472,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr) {
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fsave, 0, -1);
+ &target->thread.fpu.state->fsave, 0, -1);
}
if (pos > 0 || count < sizeof(env))
@@ -490,7 +487,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
* presence of FP.
*/
if (cpu_has_xsave)
- target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
+ target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
return ret;
}
@@ -501,7 +498,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
+ struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
fp->status = fp->swd;
if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
@@ -512,7 +509,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
struct user_i387_ia32_struct env;
int err = 0;
@@ -547,7 +544,7 @@ static int save_i387_xsave(void __user *buf)
* header as well as change any contents in the memory layout.
* xrestore as part of sigreturn will capture all the changes.
*/
- tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
+ tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
if (save_i387_fxsave(fx) < 0)
return -1;
@@ -599,7 +596,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- return __copy_from_user(&tsk->thread.xstate->fsave, buf,
+ return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
sizeof(struct i387_fsave_struct));
}
@@ -610,10 +607,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
struct user_i387_ia32_struct env;
int err;
- err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0],
+ err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
size);
/* mxcsr reserved bits must be masked to zero for security reasons */
- tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
if (err || __copy_from_user(&env, buf, sizeof(env)))
return 1;
convert_to_fxsr(tsk, &env);
@@ -629,7 +626,7 @@ static int restore_i387_xsave(void __user *buf)
struct i387_fxsave_struct __user *fx =
(struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
struct xsave_hdr_struct *xsave_hdr =
- &current->thread.xstate->xsave.xsave_hdr;
+ &current->thread.fpu.state->xsave.xsave_hdr;
u64 mask;
int err;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 23c167925a5c..2dfd31597443 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -16,7 +16,7 @@
#include <asm/hpet.h>
#include <asm/smp.h>
-DEFINE_SPINLOCK(i8253_lock);
+DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
/*
@@ -33,7 +33,7 @@ struct clock_event_device *global_clock_event;
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -62,7 +62,7 @@ static void init_pit_timer(enum clock_event_mode mode,
/* Nothing to do here */
break;
}
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
}
/*
@@ -72,10 +72,10 @@ static void init_pit_timer(enum clock_event_mode mode,
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
outb_pit(delta & 0xff , PIT_CH0); /* LSB */
outb_pit(delta >> 8 , PIT_CH0); /* MSB */
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
return 0;
}
@@ -130,7 +130,7 @@ static cycle_t pit_read(struct clocksource *cs)
int count;
u32 jifs;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
@@ -176,7 +176,7 @@ static cycle_t pit_read(struct clocksource *cs)
old_count = count;
old_jifs = jifs;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
count = (LATCH - 1) - count;
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index 3a54dcb9cd0e..43e9ccf44947 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
- * so they are allowed to end up in the .data.cacheline_aligned
+ * so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 0ed2d300cd46..990ae7cfc578 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -60,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
outb(0, 0xF0);
if (ignore_fpu_irq || !boot_cpu_data.hard_math)
return IRQ_NONE;
- math_error((void __user *)get_irq_regs()->ip);
+ math_error(get_irq_regs(), 0, 16);
return IRQ_HANDLED;
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index b2258ca91003..4f4af75b9482 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -47,20 +47,8 @@
#include <asm/debugreg.h>
#include <asm/apicdef.h>
#include <asm/system.h>
-
#include <asm/apic.h>
-/*
- * Put the error code here just in case the user cares:
- */
-static int gdb_x86errcode;
-
-/*
- * Likewise, the vector number here (since GDB only gets the signal
- * number through the usual means, and that's not very specific):
- */
-static int gdb_x86vector = -1;
-
/**
* pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
* @gdb_regs: A pointer to hold the registers in the order GDB wants.
@@ -211,6 +199,8 @@ static struct hw_breakpoint {
struct perf_event **pev;
} breakinfo[4];
+static unsigned long early_dr7;
+
static void kgdb_correct_hw_break(void)
{
int breakno;
@@ -222,6 +212,14 @@ static void kgdb_correct_hw_break(void)
int cpu = raw_smp_processor_id();
if (!breakinfo[breakno].enabled)
continue;
+ if (dbg_is_early) {
+ set_debugreg(breakinfo[breakno].addr, breakno);
+ early_dr7 |= encode_dr7(breakno,
+ breakinfo[breakno].len,
+ breakinfo[breakno].type);
+ set_debugreg(early_dr7, 7);
+ continue;
+ }
bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
info = counter_arch_bp(bp);
if (bp->attr.disabled != 1)
@@ -236,7 +234,8 @@ static void kgdb_correct_hw_break(void)
if (!val)
bp->attr.disabled = 0;
}
- hw_breakpoint_restore();
+ if (!dbg_is_early)
+ hw_breakpoint_restore();
}
static int hw_break_reserve_slot(int breakno)
@@ -245,6 +244,9 @@ static int hw_break_reserve_slot(int breakno)
int cnt = 0;
struct perf_event **pevent;
+ if (dbg_is_early)
+ return 0;
+
for_each_online_cpu(cpu) {
cnt++;
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
@@ -270,6 +272,9 @@ static int hw_break_release_slot(int breakno)
struct perf_event **pevent;
int cpu;
+ if (dbg_is_early)
+ return 0;
+
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_release_bp_slot(*pevent))
@@ -314,7 +319,11 @@ static void kgdb_remove_all_hw_break(void)
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (bp->attr.disabled == 1)
continue;
- arch_uninstall_hw_breakpoint(bp);
+ if (dbg_is_early)
+ early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
+ breakinfo[i].type);
+ else
+ arch_uninstall_hw_breakpoint(bp);
bp->attr.disabled = 1;
}
}
@@ -391,6 +400,11 @@ void kgdb_disable_hw_debug(struct pt_regs *regs)
for (i = 0; i < 4; i++) {
if (!breakinfo[i].enabled)
continue;
+ if (dbg_is_early) {
+ early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
+ breakinfo[i].type);
+ continue;
+ }
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (bp->attr.disabled == 1)
continue;
@@ -399,23 +413,6 @@ void kgdb_disable_hw_debug(struct pt_regs *regs)
}
}
-/**
- * kgdb_post_primary_code - Save error vector/code numbers.
- * @regs: Original pt_regs.
- * @e_vector: Original error vector.
- * @err_code: Original error code.
- *
- * This is needed on architectures which support SMP and KGDB.
- * This function is called after all the slave cpus have been put
- * to a know spin state and the primary CPU has control over KGDB.
- */
-void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
- /* primary processor is completely in the debugger */
- gdb_x86vector = e_vector;
- gdb_x86errcode = err_code;
-}
-
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
@@ -567,7 +564,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
return NOTIFY_DONE;
}
- if (kgdb_handle_exception(args->trapnr, args->signr, args->err, regs))
+ if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
return NOTIFY_DONE;
/* Must touch watchdog before return to normal operation */
@@ -575,6 +572,26 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
return NOTIFY_STOP;
}
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return __kgdb_notify(&args, cmd);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
@@ -605,14 +622,15 @@ static struct notifier_block kgdb_notifier = {
*/
int kgdb_arch_init(void)
{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_late(void)
+{
int i, cpu;
- int ret;
struct perf_event_attr attr;
struct perf_event **pevent;
- ret = register_die_notifier(&kgdb_notifier);
- if (ret != 0)
- return ret;
/*
* Pre-allocate the hw breakpoint structions in the non-atomic
* portion of kgdb because this operation requires mutexs to
@@ -624,12 +642,15 @@ int kgdb_arch_init(void)
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
for (i = 0; i < 4; i++) {
+ if (breakinfo[i].pev)
+ continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL);
if (IS_ERR(breakinfo[i].pev)) {
- printk(KERN_ERR "kgdb: Could not allocate hw breakpoints\n");
+ printk(KERN_ERR "kgdb: Could not allocate hw"
+ "breakpoints\nDisabling the kernel debugger\n");
breakinfo[i].pev = NULL;
kgdb_arch_exit();
- return -1;
+ return;
}
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
@@ -640,7 +661,6 @@ int kgdb_arch_init(void)
}
}
}
- return ret;
}
/**
@@ -690,6 +710,11 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
return instruction_pointer(regs);
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->ip = ip;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc },
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index b43bbaebe2c0..345a4b1fe144 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -422,14 +422,22 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
static void __kprobes clear_btf(void)
{
- if (test_thread_flag(TIF_DEBUGCTLMSR))
- update_debugctlmsr(0);
+ if (test_thread_flag(TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ }
}
static void __kprobes restore_btf(void)
{
- if (test_thread_flag(TIF_DEBUGCTLMSR))
- update_debugctlmsr(current->thread.debugctlmsr);
+ if (test_thread_flag(TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl |= DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ }
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
@@ -534,20 +542,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
struct kprobe_ctlblk *kcb;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- * Back up over the (now missing) int3 and run
- * the original instruction.
- */
- regs->ip = (unsigned long)addr;
- return 1;
- }
-
/*
* We don't want to be preempted for the entire
* duration of kprobe processing. We conditionally
@@ -579,6 +573,19 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
setup_singlestep(p, regs, kcb, 0);
return 1;
}
+ } else if (*addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Back up over the (now missing) int3 and run
+ * the original instruction.
+ */
+ regs->ip = (unsigned long)addr;
+ preempt_enable_no_resched();
+ return 1;
} else if (kprobe_running()) {
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index cceb5bc3c3c2..2cd8c544e41a 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -201,9 +201,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
return error;
}
-static int microcode_open(struct inode *unused1, struct file *unused2)
+static int microcode_open(struct inode *inode, struct file *file)
{
- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+ return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
}
static ssize_t microcode_write(struct file *file, const char __user *buf,
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 85a343e28937..356170262a93 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -343,10 +343,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
int (*get_ucode_data)(void *, const void *, size_t))
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- u8 *ucode_ptr = data, *new_mc = NULL, *mc;
+ u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
enum ucode_state state = UCODE_OK;
+ unsigned int curr_mc_size = 0;
while (leftover) {
struct microcode_header_intel mc_header;
@@ -361,9 +362,15 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
break;
}
- mc = vmalloc(mc_size);
- if (!mc)
- break;
+ /* For performance reasons, reuse mc area when possible */
+ if (!mc || mc_size > curr_mc_size) {
+ if (mc)
+ vfree(mc);
+ mc = vmalloc(mc_size);
+ if (!mc)
+ break;
+ curr_mc_size = mc_size;
+ }
if (get_ucode_data(mc, ucode_ptr, mc_size) ||
microcode_sanity_check(mc) < 0) {
@@ -376,13 +383,16 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
- } else
- vfree(mc);
+ mc = NULL; /* trigger new vmalloc */
+ }
ucode_ptr += mc_size;
leftover -= mc_size;
}
+ if (mc)
+ vfree(mc);
+
if (leftover) {
if (new_mc)
vfree(new_mc);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index e81030f71a8f..5ae5d2426edf 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -115,21 +115,6 @@ static void __init MP_bus_info(struct mpc_bus *m)
printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
}
-static int bad_ioapic(unsigned long address)
-{
- if (nr_ioapics >= MAX_IO_APICS) {
- printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
- "(found %d)\n", MAX_IO_APICS, nr_ioapics);
- panic("Recompile kernel with bigger MAX_IO_APICS!\n");
- }
- if (!address) {
- printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
- " found in table, skipping!\n");
- return 1;
- }
- return 0;
-}
-
static void __init MP_ioapic_info(struct mpc_ioapic *m)
{
if (!(m->flags & MPC_APIC_USABLE))
@@ -138,15 +123,7 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
m->apicid, m->apicver, m->apicaddr);
- if (bad_ioapic(m->apicaddr))
- return;
-
- mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
- mp_ioapics[nr_ioapics].apicid = m->apicid;
- mp_ioapics[nr_ioapics].type = m->type;
- mp_ioapics[nr_ioapics].apicver = m->apicver;
- mp_ioapics[nr_ioapics].flags = m->flags;
- nr_ioapics++;
+ mp_register_ioapic(m->apicid, m->apicaddr, gsi_end + 1);
}
static void print_MP_intsrc_info(struct mpc_intsrc *m)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 28ad9f4d8b94..b2cfcf937463 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -20,7 +20,6 @@
#include <asm/idle.h>
#include <asm/uaccess.h>
#include <asm/i387.h>
-#include <asm/ds.h>
#include <asm/debugreg.h>
unsigned long idle_halt;
@@ -32,26 +31,21 @@ struct kmem_cache *task_xstate_cachep;
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
+ int ret;
+
*dst = *src;
- if (src->thread.xstate) {
- dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
- GFP_KERNEL);
- if (!dst->thread.xstate)
- return -ENOMEM;
- WARN_ON((unsigned long)dst->thread.xstate & 15);
- memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+ if (fpu_allocated(&src->thread.fpu)) {
+ memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
+ ret = fpu_alloc(&dst->thread.fpu);
+ if (ret)
+ return ret;
+ fpu_copy(&dst->thread.fpu, &src->thread.fpu);
}
return 0;
}
-
void free_thread_xstate(struct task_struct *tsk)
{
- if (tsk->thread.xstate) {
- kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
- tsk->thread.xstate = NULL;
- }
-
- WARN(tsk->thread.ds_ctx, "leaking DS context\n");
+ fpu_free(&tsk->thread.fpu);
}
void free_thread_info(struct thread_info *ti)
@@ -198,11 +192,16 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
prev = &prev_p->thread;
next = &next_p->thread;
- if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
- test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
- ds_switch_to(prev_p, next_p);
- else if (next->debugctlmsr != prev->debugctlmsr)
- update_debugctlmsr(next->debugctlmsr);
+ if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
+ test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
+ debugctl |= DEBUGCTLMSR_BTF;
+
+ update_debugctlmsr(debugctl);
+ }
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f6c62667e30c..8d128783af47 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -55,7 +55,6 @@
#include <asm/cpu.h>
#include <asm/idle.h>
#include <asm/syscalls.h>
-#include <asm/ds.h>
#include <asm/debugreg.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -238,13 +237,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
-
- clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
- p->thread.ds_ctx = NULL;
-
- clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
- p->thread.debugctlmsr = 0;
-
return err;
}
@@ -317,7 +309,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
- prefetch(next->xstate);
+ prefetch(next->fpu.state);
/*
* Reload esp0.
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 17cb3295cbf7..3c2422a99f1f 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -49,7 +49,6 @@
#include <asm/ia32.h>
#include <asm/idle.h>
#include <asm/syscalls.h>
-#include <asm/ds.h>
#include <asm/debugreg.h>
asmlinkage extern void ret_from_fork(void);
@@ -313,13 +312,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
if (err)
goto out;
}
-
- clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
- p->thread.ds_ctx = NULL;
-
- clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
- p->thread.debugctlmsr = 0;
-
err = 0;
out:
if (err && p->thread.io_bitmap_ptr) {
@@ -396,7 +388,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
- prefetch(next->xstate);
+ prefetch(next->fpu.state);
/*
* Reload esp0, LDT and the page table pointer:
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 2e9b55027b7e..70c4872cd8aa 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -2,9 +2,6 @@
/*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * BTS tracing
- * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
*/
#include <linux/kernel.h>
@@ -22,7 +19,6 @@
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/signal.h>
-#include <linux/workqueue.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
@@ -36,7 +32,6 @@
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/proto.h>
-#include <asm/ds.h>
#include <asm/hw_breakpoint.h>
#include "tls.h"
@@ -693,7 +688,7 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
struct perf_event_attr attr;
if (!t->ptrace_bps[nr]) {
- hw_breakpoint_init(&attr);
+ ptrace_breakpoint_init(&attr);
/*
* Put stub len and type to register (reserve) an inactive but
* correct bp
@@ -789,342 +784,6 @@ static int ioperm_get(struct task_struct *target,
0, IO_BITMAP_BYTES);
}
-#ifdef CONFIG_X86_PTRACE_BTS
-/*
- * A branch trace store context.
- *
- * Contexts may only be installed by ptrace_bts_config() and only for
- * ptraced tasks.
- *
- * Contexts are destroyed when the tracee is detached from the tracer.
- * The actual destruction work requires interrupts enabled, so the
- * work is deferred and will be scheduled during __ptrace_unlink().
- *
- * Contexts hold an additional task_struct reference on the traced
- * task, as well as a reference on the tracer's mm.
- *
- * Ptrace already holds a task_struct for the duration of ptrace operations,
- * but since destruction is deferred, it may be executed after both
- * tracer and tracee exited.
- */
-struct bts_context {
- /* The branch trace handle. */
- struct bts_tracer *tracer;
-
- /* The buffer used to store the branch trace and its size. */
- void *buffer;
- unsigned int size;
-
- /* The mm that paid for the above buffer. */
- struct mm_struct *mm;
-
- /* The task this context belongs to. */
- struct task_struct *task;
-
- /* The signal to send on a bts buffer overflow. */
- unsigned int bts_ovfl_signal;
-
- /* The work struct to destroy a context. */
- struct work_struct work;
-};
-
-static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
-{
- void *buffer = NULL;
- int err = -ENOMEM;
-
- err = account_locked_memory(current->mm, current->signal->rlim, size);
- if (err < 0)
- return err;
-
- buffer = kzalloc(size, GFP_KERNEL);
- if (!buffer)
- goto out_refund;
-
- context->buffer = buffer;
- context->size = size;
- context->mm = get_task_mm(current);
-
- return 0;
-
- out_refund:
- refund_locked_memory(current->mm, size);
- return err;
-}
-
-static inline void free_bts_buffer(struct bts_context *context)
-{
- if (!context->buffer)
- return;
-
- kfree(context->buffer);
- context->buffer = NULL;
-
- refund_locked_memory(context->mm, context->size);
- context->size = 0;
-
- mmput(context->mm);
- context->mm = NULL;
-}
-
-static void free_bts_context_work(struct work_struct *w)
-{
- struct bts_context *context;
-
- context = container_of(w, struct bts_context, work);
-
- ds_release_bts(context->tracer);
- put_task_struct(context->task);
- free_bts_buffer(context);
- kfree(context);
-}
-
-static inline void free_bts_context(struct bts_context *context)
-{
- INIT_WORK(&context->work, free_bts_context_work);
- schedule_work(&context->work);
-}
-
-static inline struct bts_context *alloc_bts_context(struct task_struct *task)
-{
- struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (context) {
- context->task = task;
- task->bts = context;
-
- get_task_struct(task);
- }
-
- return context;
-}
-
-static int ptrace_bts_read_record(struct task_struct *child, size_t index,
- struct bts_struct __user *out)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
- struct bts_struct bts;
- const unsigned char *at;
- int error;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- at = trace->ds.top - ((index + 1) * trace->ds.size);
- if ((void *)at < trace->ds.begin)
- at += (trace->ds.n * trace->ds.size);
-
- if (!trace->read)
- return -EOPNOTSUPP;
-
- error = trace->read(context->tracer, at, &bts);
- if (error < 0)
- return error;
-
- if (copy_to_user(out, &bts, sizeof(bts)))
- return -EFAULT;
-
- return sizeof(bts);
-}
-
-static int ptrace_bts_drain(struct task_struct *child,
- long size,
- struct bts_struct __user *out)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
- const unsigned char *at;
- int error, drained = 0;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- if (!trace->read)
- return -EOPNOTSUPP;
-
- if (size < (trace->ds.top - trace->ds.begin))
- return -EIO;
-
- for (at = trace->ds.begin; (void *)at < trace->ds.top;
- out++, drained++, at += trace->ds.size) {
- struct bts_struct bts;
-
- error = trace->read(context->tracer, at, &bts);
- if (error < 0)
- return error;
-
- if (copy_to_user(out, &bts, sizeof(bts)))
- return -EFAULT;
- }
-
- memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
-
- error = ds_reset_bts(context->tracer);
- if (error < 0)
- return error;
-
- return drained;
-}
-
-static int ptrace_bts_config(struct task_struct *child,
- long cfg_size,
- const struct ptrace_bts_config __user *ucfg)
-{
- struct bts_context *context;
- struct ptrace_bts_config cfg;
- unsigned int flags = 0;
-
- if (cfg_size < sizeof(cfg))
- return -EIO;
-
- if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
- return -EFAULT;
-
- context = child->bts;
- if (!context)
- context = alloc_bts_context(child);
- if (!context)
- return -ENOMEM;
-
- if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
- if (!cfg.signal)
- return -EINVAL;
-
- return -EOPNOTSUPP;
- context->bts_ovfl_signal = cfg.signal;
- }
-
- ds_release_bts(context->tracer);
- context->tracer = NULL;
-
- if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
- int err;
-
- free_bts_buffer(context);
- if (!cfg.size)
- return 0;
-
- err = alloc_bts_buffer(context, cfg.size);
- if (err < 0)
- return err;
- }
-
- if (cfg.flags & PTRACE_BTS_O_TRACE)
- flags |= BTS_USER;
-
- if (cfg.flags & PTRACE_BTS_O_SCHED)
- flags |= BTS_TIMESTAMPS;
-
- context->tracer =
- ds_request_bts_task(child, context->buffer, context->size,
- NULL, (size_t)-1, flags);
- if (unlikely(IS_ERR(context->tracer))) {
- int error = PTR_ERR(context->tracer);
-
- free_bts_buffer(context);
- context->tracer = NULL;
- return error;
- }
-
- return sizeof(cfg);
-}
-
-static int ptrace_bts_status(struct task_struct *child,
- long cfg_size,
- struct ptrace_bts_config __user *ucfg)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
- struct ptrace_bts_config cfg;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- if (cfg_size < sizeof(cfg))
- return -EIO;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- memset(&cfg, 0, sizeof(cfg));
- cfg.size = trace->ds.end - trace->ds.begin;
- cfg.signal = context->bts_ovfl_signal;
- cfg.bts_size = sizeof(struct bts_struct);
-
- if (cfg.signal)
- cfg.flags |= PTRACE_BTS_O_SIGNAL;
-
- if (trace->ds.flags & BTS_USER)
- cfg.flags |= PTRACE_BTS_O_TRACE;
-
- if (trace->ds.flags & BTS_TIMESTAMPS)
- cfg.flags |= PTRACE_BTS_O_SCHED;
-
- if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
- return -EFAULT;
-
- return sizeof(cfg);
-}
-
-static int ptrace_bts_clear(struct task_struct *child)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
-
- return ds_reset_bts(context->tracer);
-}
-
-static int ptrace_bts_size(struct task_struct *child)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- return (trace->ds.top - trace->ds.begin) / trace->ds.size;
-}
-
-/*
- * Called from __ptrace_unlink() after the child has been moved back
- * to its original parent.
- */
-void ptrace_bts_untrace(struct task_struct *child)
-{
- if (unlikely(child->bts)) {
- free_bts_context(child->bts);
- child->bts = NULL;
- }
-}
-#endif /* CONFIG_X86_PTRACE_BTS */
-
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -1252,39 +911,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
#endif
- /*
- * These bits need more cooking - not enabled yet:
- */
-#ifdef CONFIG_X86_PTRACE_BTS
- case PTRACE_BTS_CONFIG:
- ret = ptrace_bts_config
- (child, data, (struct ptrace_bts_config __user *)addr);
- break;
-
- case PTRACE_BTS_STATUS:
- ret = ptrace_bts_status
- (child, data, (struct ptrace_bts_config __user *)addr);
- break;
-
- case PTRACE_BTS_SIZE:
- ret = ptrace_bts_size(child);
- break;
-
- case PTRACE_BTS_GET:
- ret = ptrace_bts_read_record
- (child, data, (struct bts_struct __user *) addr);
- break;
-
- case PTRACE_BTS_CLEAR:
- ret = ptrace_bts_clear(child);
- break;
-
- case PTRACE_BTS_DRAIN:
- ret = ptrace_bts_drain
- (child, data, (struct bts_struct __user *) addr);
- break;
-#endif /* CONFIG_X86_PTRACE_BTS */
-
default:
ret = ptrace_request(child, request, addr, data);
break;
@@ -1544,14 +1170,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case PTRACE_GET_THREAD_AREA:
case PTRACE_SET_THREAD_AREA:
-#ifdef CONFIG_X86_PTRACE_BTS
- case PTRACE_BTS_CONFIG:
- case PTRACE_BTS_STATUS:
- case PTRACE_BTS_SIZE:
- case PTRACE_BTS_GET:
- case PTRACE_BTS_CLEAR:
- case PTRACE_BTS_DRAIN:
-#endif /* CONFIG_X86_PTRACE_BTS */
return arch_ptrace(child, request, addr, data);
default:
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 12e9feaa2f7a..cd2c336c8d09 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -495,10 +495,15 @@ void force_hpet_resume(void)
/*
* HPET MSI on some boards (ATI SB700/SB800) has side effect on
* floppy DMA. Disable HPET MSI on such platforms.
+ *
+ * Also force the read back of the CMP register in hpet_next_event()
+ * to work around the problem that the CMP register write seems to be
+ * delayed. See hpet_next_event() for details.
*/
static void force_disable_hpet_msi(struct pci_dev *unused)
{
hpet_msi_disable = 1;
+ hpet_readback_cmp = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c4851eff57b3..e8029896309a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -725,6 +725,7 @@ void __init setup_arch(char **cmdline_p)
/* VMI may relocate the fixmap; do this before touching ioremap area */
vmi_init();
+ early_trap_init();
early_cpu_init();
early_ioremap_init();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef6370b00e70..4e95d38ac1ab 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -247,7 +247,7 @@ void __init setup_per_cpu_areas(void)
#endif
#endif
/*
- * Up to this point, the boot CPU has been using .data.init
+ * Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
*/
if (cpu == boot_cpu_id)
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/kernel/sfi.c
index 34e099382651..7ded57896c0a 100644
--- a/arch/x86/kernel/sfi.c
+++ b/arch/x86/kernel/sfi.c
@@ -81,7 +81,6 @@ static int __init sfi_parse_cpus(struct sfi_table_header *table)
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
-static u32 gsi_base;
static int __init sfi_parse_ioapic(struct sfi_table_header *table)
{
@@ -94,8 +93,7 @@ static int __init sfi_parse_ioapic(struct sfi_table_header *table)
pentry = (struct sfi_apic_table_entry *)sb->pentry;
for (i = 0; i < num; i++) {
- mp_register_ioapic(i, pentry->phys_addr, gsi_base);
- gsi_base += io_apic_get_redir_entries(i);
+ mp_register_ioapic(i, pentry->phys_addr, gsi_end + 1);
pentry++;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 3149032ff107..58de45ee08b6 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -158,22 +158,6 @@ static int enable_single_step(struct task_struct *child)
}
/*
- * Install this value in MSR_IA32_DEBUGCTLMSR whenever child is running.
- */
-static void write_debugctlmsr(struct task_struct *child, unsigned long val)
-{
- if (child->thread.debugctlmsr == val)
- return;
-
- child->thread.debugctlmsr = val;
-
- if (child != current)
- return;
-
- update_debugctlmsr(val);
-}
-
-/*
* Enable single or block step.
*/
static void enable_step(struct task_struct *child, bool block)
@@ -186,15 +170,17 @@ static void enable_step(struct task_struct *child, bool block)
* that uses user-mode single stepping itself.
*/
if (enable_single_step(child) && block) {
- set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
- write_debugctlmsr(child,
- child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
- } else {
- write_debugctlmsr(child,
- child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
-
- if (!child->thread.debugctlmsr)
- clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl |= DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ set_tsk_thread_flag(child, TIF_BLOCKSTEP);
+ } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
}
}
@@ -213,11 +199,13 @@ void user_disable_single_step(struct task_struct *child)
/*
* Make sure block stepping (BTF) is disabled.
*/
- write_debugctlmsr(child,
- child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
+ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
- if (!child->thread.debugctlmsr)
- clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
+ }
/* Always clear TIF_SINGLESTEP... */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 8b3729341216..07ad5eb7cc5c 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -337,3 +337,5 @@ ENTRY(sys_call_table)
.long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open
.long sys_recvmmsg
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 86c9f91b48ae..cc2c60474fd0 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -175,6 +175,9 @@ static void add_mac_region(phys_addr_t start, unsigned long size)
struct tboot_mac_region *mr;
phys_addr_t end = start + size;
+ if (tboot->num_mac_regions >= MAX_TB_MAC_REGIONS)
+ panic("tboot: Too many MAC regions\n");
+
if (start && size) {
mr = &tboot->mac_regions[tboot->num_mac_regions++];
mr->start = round_down(start, PAGE_SIZE);
@@ -184,18 +187,17 @@ static void add_mac_region(phys_addr_t start, unsigned long size)
static int tboot_setup_sleep(void)
{
+ int i;
+
tboot->num_mac_regions = 0;
- /* S3 resume code */
- add_mac_region(acpi_wakeup_address, WAKEUP_SIZE);
+ for (i = 0; i < e820.nr_map; i++) {
+ if ((e820.map[i].type != E820_RAM)
+ && (e820.map[i].type != E820_RESERVED_KERN))
+ continue;
-#ifdef CONFIG_X86_TRAMPOLINE
- /* AP trampoline code */
- add_mac_region(virt_to_phys(trampoline_base), TRAMPOLINE_SIZE);
-#endif
-
- /* kernel code + data + bss */
- add_mac_region(virt_to_phys(_text), _end - _text);
+ add_mac_region(e820.map[i].addr, e820.map[i].size);
+ }
tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 17b03dd3a6b5..7fea555929e2 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -1,7 +1,7 @@
/*
* SGI UltraViolet TLB flush routines.
*
- * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
+ * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
*
* This code is released under the GNU General Public License version 2 or
* later.
@@ -20,42 +20,67 @@
#include <asm/idle.h>
#include <asm/tsc.h>
#include <asm/irq_vectors.h>
+#include <asm/timer.h>
-static struct bau_control **uv_bau_table_bases __read_mostly;
-static int uv_bau_retry_limit __read_mostly;
+struct msg_desc {
+ struct bau_payload_queue_entry *msg;
+ int msg_slot;
+ int sw_ack_slot;
+ struct bau_payload_queue_entry *va_queue_first;
+ struct bau_payload_queue_entry *va_queue_last;
+};
-/* base pnode in this partition */
-static int uv_partition_base_pnode __read_mostly;
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
+
+static int uv_bau_max_concurrent __read_mostly;
+
+static int nobau;
+static int __init setup_nobau(char *arg)
+{
+ nobau = 1;
+ return 0;
+}
+early_param("nobau", setup_nobau);
-static unsigned long uv_mmask __read_mostly;
+/* base pnode in this partition */
+static int uv_partition_base_pnode __read_mostly;
+/* position of pnode (which is nasid>>1): */
+static int uv_nshift __read_mostly;
+static unsigned long uv_mmask __read_mostly;
static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
static DEFINE_PER_CPU(struct bau_control, bau_control);
+static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
+
+struct reset_args {
+ int sender;
+};
/*
- * Determine the first node on a blade.
+ * Determine the first node on a uvhub. 'Nodes' are used for kernel
+ * memory allocation.
*/
-static int __init blade_to_first_node(int blade)
+static int __init uvhub_to_first_node(int uvhub)
{
int node, b;
for_each_online_node(node) {
b = uv_node_to_blade_id(node);
- if (blade == b)
+ if (uvhub == b)
return node;
}
- return -1; /* shouldn't happen */
+ return -1;
}
/*
- * Determine the apicid of the first cpu on a blade.
+ * Determine the apicid of the first cpu on a uvhub.
*/
-static int __init blade_to_first_apicid(int blade)
+static int __init uvhub_to_first_apicid(int uvhub)
{
int cpu;
for_each_present_cpu(cpu)
- if (blade == uv_cpu_to_blade_id(cpu))
+ if (uvhub == uv_cpu_to_blade_id(cpu))
return per_cpu(x86_cpu_to_apicid, cpu);
return -1;
}
@@ -68,195 +93,459 @@ static int __init blade_to_first_apicid(int blade)
* clear of the Timeout bit (as well) will free the resource. No reply will
* be sent (the hardware will only do one reply per message).
*/
-static void uv_reply_to_message(int resource,
- struct bau_payload_queue_entry *msg,
- struct bau_msg_status *msp)
+static inline void uv_reply_to_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
unsigned long dw;
+ struct bau_payload_queue_entry *msg;
- dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
+ msg = mdp->msg;
+ if (!msg->canceled) {
+ dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
+ msg->sw_ack_vector;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
+ }
msg->replied_to = 1;
msg->sw_ack_vector = 0;
- if (msp)
- msp->seen_by.bits = 0;
- uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
}
/*
- * Do all the things a cpu should do for a TLB shootdown message.
- * Other cpu's may come here at the same time for this message.
+ * Process the receipt of a RETRY message
*/
-static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
- int msg_slot, int sw_ack_slot)
+static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
- unsigned long this_cpu_mask;
- struct bau_msg_status *msp;
- int cpu;
+ int i;
+ int cancel_count = 0;
+ int slot2;
+ unsigned long msg_res;
+ unsigned long mmr = 0;
+ struct bau_payload_queue_entry *msg;
+ struct bau_payload_queue_entry *msg2;
+ struct ptc_stats *stat;
- msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
- cpu = uv_blade_processor_id();
- msg->number_of_cpus =
- uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
- this_cpu_mask = 1UL << cpu;
- if (msp->seen_by.bits & this_cpu_mask)
- return;
- atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
+ msg = mdp->msg;
+ stat = &per_cpu(ptcstats, bcp->cpu);
+ stat->d_retries++;
+ /*
+ * cancel any message from msg+1 to the retry itself
+ */
+ for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
+ if (msg2 > mdp->va_queue_last)
+ msg2 = mdp->va_queue_first;
+ if (msg2 == msg)
+ break;
+
+ /* same conditions for cancellation as uv_do_reset */
+ if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
+ (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
+ msg->sw_ack_vector) == 0) &&
+ (msg2->sending_cpu == msg->sending_cpu) &&
+ (msg2->msg_type != MSG_NOOP)) {
+ slot2 = msg2 - mdp->va_queue_first;
+ mmr = uv_read_local_mmr
+ (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+ msg_res = ((msg2->sw_ack_vector << 8) |
+ msg2->sw_ack_vector);
+ /*
+ * This is a message retry; clear the resources held
+ * by the previous message only if they timed out.
+ * If it has not timed out we have an unexpected
+ * situation to report.
+ */
+ if (mmr & (msg_res << 8)) {
+ /*
+ * is the resource timed out?
+ * make everyone ignore the cancelled message.
+ */
+ msg2->canceled = 1;
+ stat->d_canceled++;
+ cancel_count++;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
+ (msg_res << 8) | msg_res);
+ } else
+ printk(KERN_INFO "note bau retry: no effect\n");
+ }
+ }
+ if (!cancel_count)
+ stat->d_nocanceled++;
+}
- if (msg->replied_to == 1)
- return;
+/*
+ * Do all the things a cpu should do for a TLB shootdown message.
+ * Other cpu's may come here at the same time for this message.
+ */
+static void uv_bau_process_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
+{
+ int msg_ack_count;
+ short socket_ack_count = 0;
+ struct ptc_stats *stat;
+ struct bau_payload_queue_entry *msg;
+ struct bau_control *smaster = bcp->socket_master;
+ /*
+ * This must be a normal message, or retry of a normal message
+ */
+ msg = mdp->msg;
+ stat = &per_cpu(ptcstats, bcp->cpu);
if (msg->address == TLB_FLUSH_ALL) {
local_flush_tlb();
- __get_cpu_var(ptcstats).alltlb++;
+ stat->d_alltlb++;
} else {
__flush_tlb_one(msg->address);
- __get_cpu_var(ptcstats).onetlb++;
+ stat->d_onetlb++;
}
+ stat->d_requestee++;
+
+ /*
+ * One cpu on each uvhub has the additional job on a RETRY
+ * of releasing the resource held by the message that is
+ * being retried. That message is identified by sending
+ * cpu number.
+ */
+ if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
+ uv_bau_process_retry_msg(mdp, bcp);
- __get_cpu_var(ptcstats).requestee++;
+ /*
+ * This is a sw_ack message, so we have to reply to it.
+ * Count each responding cpu on the socket. This avoids
+ * pinging the count's cache line back and forth between
+ * the sockets.
+ */
+ socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
+ &smaster->socket_acknowledge_count[mdp->msg_slot]);
+ if (socket_ack_count == bcp->cpus_in_socket) {
+ /*
+ * Both sockets dump their completed count total into
+ * the message's count.
+ */
+ smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
+ msg_ack_count = atomic_add_short_return(socket_ack_count,
+ (struct atomic_short *)&msg->acknowledge_count);
+
+ if (msg_ack_count == bcp->cpus_in_uvhub) {
+ /*
+ * All cpus in uvhub saw it; reply
+ */
+ uv_reply_to_message(mdp, bcp);
+ }
+ }
- atomic_inc_short(&msg->acknowledge_count);
- if (msg->number_of_cpus == msg->acknowledge_count)
- uv_reply_to_message(sw_ack_slot, msg, msp);
+ return;
}
/*
- * Examine the payload queue on one distribution node to see
- * which messages have not been seen, and which cpu(s) have not seen them.
+ * Determine the first cpu on a uvhub.
+ */
+static int uvhub_to_first_cpu(int uvhub)
+{
+ int cpu;
+ for_each_present_cpu(cpu)
+ if (uvhub == uv_cpu_to_blade_id(cpu))
+ return cpu;
+ return -1;
+}
+
+/*
+ * Last resort when we get a large number of destination timeouts is
+ * to clear resources held by a given cpu.
+ * Do this with IPI so that all messages in the BAU message queue
+ * can be identified by their nonzero sw_ack_vector field.
*
- * Returns the number of cpu's that have not responded.
+ * This is entered for a single cpu on the uvhub.
+ * The sender want's this uvhub to free a specific message's
+ * sw_ack resources.
*/
-static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
+static void
+uv_do_reset(void *ptr)
{
- struct bau_payload_queue_entry *msg;
- struct bau_msg_status *msp;
- int count = 0;
int i;
- int j;
+ int slot;
+ int count = 0;
+ unsigned long mmr;
+ unsigned long msg_res;
+ struct bau_control *bcp;
+ struct reset_args *rap;
+ struct bau_payload_queue_entry *msg;
+ struct ptc_stats *stat;
- for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
- msg++, i++) {
- if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
- msp = bau_tablesp->msg_statuses + i;
- printk(KERN_DEBUG
- "blade %d: address:%#lx %d of %d, not cpu(s): ",
- i, msg->address, msg->acknowledge_count,
- msg->number_of_cpus);
- for (j = 0; j < msg->number_of_cpus; j++) {
- if (!((1L << j) & msp->seen_by.bits)) {
- count++;
- printk("%d ", j);
- }
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ rap = (struct reset_args *)ptr;
+ stat = &per_cpu(ptcstats, bcp->cpu);
+ stat->d_resets++;
+
+ /*
+ * We're looking for the given sender, and
+ * will free its sw_ack resource.
+ * If all cpu's finally responded after the timeout, its
+ * message 'replied_to' was set.
+ */
+ for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
+ /* uv_do_reset: same conditions for cancellation as
+ uv_bau_process_retry_msg() */
+ if ((msg->replied_to == 0) &&
+ (msg->canceled == 0) &&
+ (msg->sending_cpu == rap->sender) &&
+ (msg->sw_ack_vector) &&
+ (msg->msg_type != MSG_NOOP)) {
+ /*
+ * make everyone else ignore this message
+ */
+ msg->canceled = 1;
+ slot = msg - bcp->va_queue_first;
+ count++;
+ /*
+ * only reset the resource if it is still pending
+ */
+ mmr = uv_read_local_mmr
+ (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+ msg_res = ((msg->sw_ack_vector << 8) |
+ msg->sw_ack_vector);
+ if (mmr & msg_res) {
+ stat->d_rcanceled++;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
+ msg_res);
}
- printk("\n");
}
}
- return count;
+ return;
}
/*
- * Examine the payload queue on all the distribution nodes to see
- * which messages have not been seen, and which cpu(s) have not seen them.
- *
- * Returns the number of cpu's that have not responded.
+ * Use IPI to get all target uvhubs to release resources held by
+ * a given sending cpu number.
*/
-static int uv_examine_destinations(struct bau_target_nodemask *distribution)
+static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
+ int sender)
{
- int sender;
- int i;
- int count = 0;
+ int uvhub;
+ int cpu;
+ cpumask_t mask;
+ struct reset_args reset_args;
+
+ reset_args.sender = sender;
- sender = smp_processor_id();
- for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
- if (!bau_node_isset(i, distribution))
+ cpus_clear(mask);
+ /* find a single cpu for each uvhub in this distribution mask */
+ for (uvhub = 0;
+ uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
+ uvhub++) {
+ if (!bau_uvhub_isset(uvhub, distribution))
continue;
- count += uv_examine_destination(uv_bau_table_bases[i], sender);
+ /* find a cpu for this uvhub */
+ cpu = uvhub_to_first_cpu(uvhub);
+ cpu_set(cpu, mask);
}
- return count;
+ /* IPI all cpus; Preemption is already disabled */
+ smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
+ return;
+}
+
+static inline unsigned long
+cycles_2_us(unsigned long long cyc)
+{
+ unsigned long long ns;
+ unsigned long us;
+ ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
+ >> CYC2NS_SCALE_FACTOR;
+ us = ns / 1000;
+ return us;
}
/*
- * wait for completion of a broadcast message
- *
- * return COMPLETE, RETRY or GIVEUP
+ * wait for all cpus on this hub to finish their sends and go quiet
+ * leaves uvhub_quiesce set so that no new broadcasts are started by
+ * bau_flush_send_and_wait()
+ */
+static inline void
+quiesce_local_uvhub(struct bau_control *hmaster)
+{
+ atomic_add_short_return(1, (struct atomic_short *)
+ &hmaster->uvhub_quiesce);
+}
+
+/*
+ * mark this quiet-requestor as done
+ */
+static inline void
+end_uvhub_quiesce(struct bau_control *hmaster)
+{
+ atomic_add_short_return(-1, (struct atomic_short *)
+ &hmaster->uvhub_quiesce);
+}
+
+/*
+ * Wait for completion of a broadcast software ack message
+ * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
*/
static int uv_wait_completion(struct bau_desc *bau_desc,
- unsigned long mmr_offset, int right_shift)
+ unsigned long mmr_offset, int right_shift, int this_cpu,
+ struct bau_control *bcp, struct bau_control *smaster, long try)
{
- int exams = 0;
- long destination_timeouts = 0;
- long source_timeouts = 0;
+ int relaxes = 0;
unsigned long descriptor_status;
+ unsigned long mmr;
+ unsigned long mask;
+ cycles_t ttime;
+ cycles_t timeout_time;
+ struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
+ struct bau_control *hmaster;
+
+ hmaster = bcp->uvhub_master;
+ timeout_time = get_cycles() + bcp->timeout_interval;
+ /* spin on the status MMR, waiting for it to go idle */
while ((descriptor_status = (((unsigned long)
uv_read_local_mmr(mmr_offset) >>
right_shift) & UV_ACT_STATUS_MASK)) !=
DESC_STATUS_IDLE) {
- if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
- source_timeouts++;
- if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
- source_timeouts = 0;
- __get_cpu_var(ptcstats).s_retry++;
- return FLUSH_RETRY;
- }
/*
- * spin here looking for progress at the destinations
+ * Our software ack messages may be blocked because there are
+ * no swack resources available. As long as none of them
+ * has timed out hardware will NACK our message and its
+ * state will stay IDLE.
*/
- if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
- destination_timeouts++;
- if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
- /*
- * returns number of cpus not responding
- */
- if (uv_examine_destinations
- (&bau_desc->distribution) == 0) {
- __get_cpu_var(ptcstats).d_retry++;
- return FLUSH_RETRY;
- }
- exams++;
- if (exams >= uv_bau_retry_limit) {
- printk(KERN_DEBUG
- "uv_flush_tlb_others");
- printk("giving up on cpu %d\n",
- smp_processor_id());
+ if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
+ stat->s_stimeout++;
+ return FLUSH_GIVEUP;
+ } else if (descriptor_status ==
+ DESC_STATUS_DESTINATION_TIMEOUT) {
+ stat->s_dtimeout++;
+ ttime = get_cycles();
+
+ /*
+ * Our retries may be blocked by all destination
+ * swack resources being consumed, and a timeout
+ * pending. In that case hardware returns the
+ * ERROR that looks like a destination timeout.
+ */
+ if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) {
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_PLUGGED;
+ }
+
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_TIMEOUT;
+ } else {
+ /*
+ * descriptor_status is still BUSY
+ */
+ cpu_relax();
+ relaxes++;
+ if (relaxes >= 10000) {
+ relaxes = 0;
+ if (get_cycles() > timeout_time) {
+ quiesce_local_uvhub(hmaster);
+
+ /* single-thread the register change */
+ spin_lock(&hmaster->masks_lock);
+ mmr = uv_read_local_mmr(mmr_offset);
+ mask = 0UL;
+ mask |= (3UL < right_shift);
+ mask = ~mask;
+ mmr &= mask;
+ uv_write_local_mmr(mmr_offset, mmr);
+ spin_unlock(&hmaster->masks_lock);
+ end_uvhub_quiesce(hmaster);
+ stat->s_busy++;
return FLUSH_GIVEUP;
}
- /*
- * delays can hang the simulator
- udelay(1000);
- */
- destination_timeouts = 0;
}
}
- cpu_relax();
}
+ bcp->conseccompletes++;
return FLUSH_COMPLETE;
}
+static inline cycles_t
+sec_2_cycles(unsigned long sec)
+{
+ unsigned long ns;
+ cycles_t cyc;
+
+ ns = sec * 1000000000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
+}
+
+/*
+ * conditionally add 1 to *v, unless *v is >= u
+ * return 0 if we cannot add 1 to *v because it is >= u
+ * return 1 if we can add 1 to *v because it is < u
+ * the add is atomic
+ *
+ * This is close to atomic_add_unless(), but this allows the 'u' value
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+{
+ spin_lock(lock);
+ if (atomic_read(v) >= u) {
+ spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+ spin_unlock(lock);
+ return 1;
+}
+
/**
* uv_flush_send_and_wait
*
- * Send a broadcast and wait for a broadcast message to complete.
+ * Send a broadcast and wait for it to complete.
*
- * The flush_mask contains the cpus the broadcast was sent to.
+ * The flush_mask contains the cpus the broadcast is to be sent to, plus
+ * cpus that are on the local uvhub.
*
- * Returns NULL if all remote flushing was done. The mask is zeroed.
+ * Returns NULL if all flushing represented in the mask was done. The mask
+ * is zeroed.
* Returns @flush_mask if some remote flushing remains to be done. The
- * mask will have some bits still set.
+ * mask will have some bits still set, representing any cpus on the local
+ * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
*/
-const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
- struct bau_desc *bau_desc,
- struct cpumask *flush_mask)
+const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ struct cpumask *flush_mask,
+ struct bau_control *bcp)
{
- int completion_status = 0;
int right_shift;
- int tries = 0;
- int pnode;
+ int uvhub;
int bit;
+ int completion_status = 0;
+ int seq_number = 0;
+ long try = 0;
+ int cpu = bcp->uvhub_cpu;
+ int this_cpu = bcp->cpu;
+ int this_uvhub = bcp->uvhub;
unsigned long mmr_offset;
unsigned long index;
cycles_t time1;
cycles_t time2;
+ struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
+ struct bau_control *smaster = bcp->socket_master;
+ struct bau_control *hmaster = bcp->uvhub_master;
+
+ /*
+ * Spin here while there are hmaster->max_concurrent or more active
+ * descriptors. This is the per-uvhub 'throttle'.
+ */
+ if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
+ &hmaster->active_descriptor_count,
+ hmaster->max_concurrent)) {
+ stat->s_throttles++;
+ do {
+ cpu_relax();
+ } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
+ &hmaster->active_descriptor_count,
+ hmaster->max_concurrent));
+ }
+
+ while (hmaster->uvhub_quiesce)
+ cpu_relax();
if (cpu < UV_CPUS_PER_ACT_STATUS) {
mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
@@ -268,24 +557,108 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
}
time1 = get_cycles();
do {
- tries++;
+ /*
+ * Every message from any given cpu gets a unique message
+ * sequence number. But retries use that same number.
+ * Our message may have timed out at the destination because
+ * all sw-ack resources are in use and there is a timeout
+ * pending there. In that case, our last send never got
+ * placed into the queue and we need to persist until it
+ * does.
+ *
+ * Make any retry a type MSG_RETRY so that the destination will
+ * free any resource held by a previous message from this cpu.
+ */
+ if (try == 0) {
+ /* use message type set by the caller the first time */
+ seq_number = bcp->message_number++;
+ } else {
+ /* use RETRY type on all the rest; same sequence */
+ bau_desc->header.msg_type = MSG_RETRY;
+ stat->s_retry_messages++;
+ }
+ bau_desc->header.sequence = seq_number;
index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
- cpu;
+ bcp->uvhub_cpu;
+ bcp->send_message = get_cycles();
+
uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+
+ try++;
completion_status = uv_wait_completion(bau_desc, mmr_offset,
- right_shift);
- } while (completion_status == FLUSH_RETRY);
+ right_shift, this_cpu, bcp, smaster, try);
+
+ if (completion_status == FLUSH_RETRY_PLUGGED) {
+ /*
+ * Our retries may be blocked by all destination swack
+ * resources being consumed, and a timeout pending. In
+ * that case hardware immediately returns the ERROR
+ * that looks like a destination timeout.
+ */
+ udelay(TIMEOUT_DELAY);
+ bcp->plugged_tries++;
+ if (bcp->plugged_tries >= PLUGSB4RESET) {
+ bcp->plugged_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution,
+ this_cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_plug++;
+ }
+ } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
+ hmaster->max_concurrent = 1;
+ bcp->timeout_tries++;
+ udelay(TIMEOUT_DELAY);
+ if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
+ bcp->timeout_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution,
+ this_cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_timeout++;
+ }
+ }
+ if (bcp->ipi_attempts >= 3) {
+ bcp->ipi_attempts = 0;
+ completion_status = FLUSH_GIVEUP;
+ break;
+ }
+ cpu_relax();
+ } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
+ (completion_status == FLUSH_RETRY_TIMEOUT));
time2 = get_cycles();
- __get_cpu_var(ptcstats).sflush += (time2 - time1);
- if (tries > 1)
- __get_cpu_var(ptcstats).retriesok++;
- if (completion_status == FLUSH_GIVEUP) {
+ if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5)
+ && (hmaster->max_concurrent < hmaster->max_concurrent_constant))
+ hmaster->max_concurrent++;
+
+ /*
+ * hold any cpu not timing out here; no other cpu currently held by
+ * the 'throttle' should enter the activation code
+ */
+ while (hmaster->uvhub_quiesce)
+ cpu_relax();
+ atomic_dec(&hmaster->active_descriptor_count);
+
+ /* guard against cycles wrap */
+ if (time2 > time1)
+ stat->s_time += (time2 - time1);
+ else
+ stat->s_requestor--; /* don't count this one */
+ if (completion_status == FLUSH_COMPLETE && try > 1)
+ stat->s_retriesok++;
+ else if (completion_status == FLUSH_GIVEUP) {
/*
* Cause the caller to do an IPI-style TLB shootdown on
- * the cpu's, all of which are still in the mask.
+ * the target cpu's, all of which are still in the mask.
*/
- __get_cpu_var(ptcstats).ptc_i++;
+ stat->s_giveup++;
return flush_mask;
}
@@ -294,18 +667,17 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
* use the IPI method of shootdown on them.
*/
for_each_cpu(bit, flush_mask) {
- pnode = uv_cpu_to_pnode(bit);
- if (pnode == this_pnode)
+ uvhub = uv_cpu_to_blade_id(bit);
+ if (uvhub == this_uvhub)
continue;
cpumask_clear_cpu(bit, flush_mask);
}
if (!cpumask_empty(flush_mask))
return flush_mask;
+
return NULL;
}
-static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
-
/**
* uv_flush_tlb_others - globally purge translation cache of a virtual
* address or all TLB's
@@ -322,8 +694,8 @@ static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
* The caller has derived the cpumask from the mm_struct. This function
* is called only if there are bits set in the mask. (e.g. flush_tlb_page())
*
- * The cpumask is converted into a nodemask of the nodes containing
- * the cpus.
+ * The cpumask is converted into a uvhubmask of the uvhubs containing
+ * those cpus.
*
* Note that this function should be called with preemption disabled.
*
@@ -335,52 +707,82 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va, unsigned int cpu)
{
- struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
- int i;
- int bit;
- int pnode;
- int uv_cpu;
- int this_pnode;
+ int remotes;
+ int tcpu;
+ int uvhub;
int locals = 0;
struct bau_desc *bau_desc;
+ struct cpumask *flush_mask;
+ struct ptc_stats *stat;
+ struct bau_control *bcp;
- cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+ if (nobau)
+ return cpumask;
- uv_cpu = uv_blade_processor_id();
- this_pnode = uv_hub_info->pnode;
- bau_desc = __get_cpu_var(bau_control).descriptor_base;
- bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
+ bcp = &per_cpu(bau_control, cpu);
+ /*
+ * Each sending cpu has a per-cpu mask which it fills from the caller's
+ * cpu mask. Only remote cpus are converted to uvhubs and copied.
+ */
+ flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
+ /*
+ * copy cpumask to flush_mask, removing current cpu
+ * (current cpu should already have been flushed by the caller and
+ * should never be returned if we return flush_mask)
+ */
+ cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+ if (cpu_isset(cpu, *cpumask))
+ locals++; /* current cpu was targeted */
- bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+ bau_desc = bcp->descriptor_base;
+ bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
- i = 0;
- for_each_cpu(bit, flush_mask) {
- pnode = uv_cpu_to_pnode(bit);
- BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
- if (pnode == this_pnode) {
+ bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+ remotes = 0;
+ for_each_cpu(tcpu, flush_mask) {
+ uvhub = uv_cpu_to_blade_id(tcpu);
+ if (uvhub == bcp->uvhub) {
locals++;
continue;
}
- bau_node_set(pnode - uv_partition_base_pnode,
- &bau_desc->distribution);
- i++;
+ bau_uvhub_set(uvhub, &bau_desc->distribution);
+ remotes++;
}
- if (i == 0) {
+ if (remotes == 0) {
/*
- * no off_node flushing; return status for local node
+ * No off_hub flushing; return status for local hub.
+ * Return the caller's mask if all were local (the current
+ * cpu may be in that mask).
*/
if (locals)
- return flush_mask;
+ return cpumask;
else
return NULL;
}
- __get_cpu_var(ptcstats).requestor++;
- __get_cpu_var(ptcstats).ntargeted += i;
+ stat = &per_cpu(ptcstats, cpu);
+ stat->s_requestor++;
+ stat->s_ntargcpu += remotes;
+ remotes = bau_uvhub_weight(&bau_desc->distribution);
+ stat->s_ntarguvhub += remotes;
+ if (remotes >= 16)
+ stat->s_ntarguvhub16++;
+ else if (remotes >= 8)
+ stat->s_ntarguvhub8++;
+ else if (remotes >= 4)
+ stat->s_ntarguvhub4++;
+ else if (remotes >= 2)
+ stat->s_ntarguvhub2++;
+ else
+ stat->s_ntarguvhub1++;
bau_desc->payload.address = va;
bau_desc->payload.sending_cpu = cpu;
- return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
+ /*
+ * uv_flush_send_and_wait returns null if all cpu's were messaged, or
+ * the adjusted flush_mask if any cpu's were not messaged.
+ */
+ return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
}
/*
@@ -389,87 +791,70 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
*
* We received a broadcast assist message.
*
- * Interrupts may have been disabled; this interrupt could represent
+ * Interrupts are disabled; this interrupt could represent
* the receipt of several messages.
*
- * All cores/threads on this node get this interrupt.
- * The last one to see it does the s/w ack.
+ * All cores/threads on this hub get this interrupt.
+ * The last one to see it does the software ack.
* (the resource will not be freed until noninterruptable cpus see this
- * interrupt; hardware will timeout the s/w ack and reply ERROR)
+ * interrupt; hardware may timeout the s/w ack and reply ERROR)
*/
void uv_bau_message_interrupt(struct pt_regs *regs)
{
- struct bau_payload_queue_entry *va_queue_first;
- struct bau_payload_queue_entry *va_queue_last;
- struct bau_payload_queue_entry *msg;
- struct pt_regs *old_regs = set_irq_regs(regs);
- cycles_t time1;
- cycles_t time2;
- int msg_slot;
- int sw_ack_slot;
- int fw;
int count = 0;
- unsigned long local_pnode;
-
- ack_APIC_irq();
- exit_idle();
- irq_enter();
-
- time1 = get_cycles();
-
- local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
-
- va_queue_first = __get_cpu_var(bau_control).va_queue_first;
- va_queue_last = __get_cpu_var(bau_control).va_queue_last;
-
- msg = __get_cpu_var(bau_control).bau_msg_head;
+ cycles_t time_start;
+ struct bau_payload_queue_entry *msg;
+ struct bau_control *bcp;
+ struct ptc_stats *stat;
+ struct msg_desc msgdesc;
+
+ time_start = get_cycles();
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ stat = &per_cpu(ptcstats, smp_processor_id());
+ msgdesc.va_queue_first = bcp->va_queue_first;
+ msgdesc.va_queue_last = bcp->va_queue_last;
+ msg = bcp->bau_msg_head;
while (msg->sw_ack_vector) {
count++;
- fw = msg->sw_ack_vector;
- msg_slot = msg - va_queue_first;
- sw_ack_slot = ffs(fw) - 1;
-
- uv_bau_process_message(msg, msg_slot, sw_ack_slot);
-
+ msgdesc.msg_slot = msg - msgdesc.va_queue_first;
+ msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
+ msgdesc.msg = msg;
+ uv_bau_process_message(&msgdesc, bcp);
msg++;
- if (msg > va_queue_last)
- msg = va_queue_first;
- __get_cpu_var(bau_control).bau_msg_head = msg;
+ if (msg > msgdesc.va_queue_last)
+ msg = msgdesc.va_queue_first;
+ bcp->bau_msg_head = msg;
}
+ stat->d_time += (get_cycles() - time_start);
if (!count)
- __get_cpu_var(ptcstats).nomsg++;
+ stat->d_nomsg++;
else if (count > 1)
- __get_cpu_var(ptcstats).multmsg++;
-
- time2 = get_cycles();
- __get_cpu_var(ptcstats).dflush += (time2 - time1);
-
- irq_exit();
- set_irq_regs(old_regs);
+ stat->d_multmsg++;
+ ack_APIC_irq();
}
/*
* uv_enable_timeouts
*
- * Each target blade (i.e. blades that have cpu's) needs to have
+ * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
* shootdown message timeouts enabled. The timeout does not cause
* an interrupt, but causes an error message to be returned to
* the sender.
*/
static void uv_enable_timeouts(void)
{
- int blade;
- int nblades;
+ int uvhub;
+ int nuvhubs;
int pnode;
unsigned long mmr_image;
- nblades = uv_num_possible_blades();
+ nuvhubs = uv_num_possible_blades();
- for (blade = 0; blade < nblades; blade++) {
- if (!uv_blade_nr_possible_cpus(blade))
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
+ if (!uv_blade_nr_possible_cpus(uvhub))
continue;
- pnode = uv_blade_to_pnode(blade);
+ pnode = uv_blade_to_pnode(uvhub);
mmr_image =
uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
/*
@@ -479,16 +864,16 @@ static void uv_enable_timeouts(void)
* To program the period, the SOFT_ACK_MODE must be off.
*/
mmr_image &= ~((unsigned long)1 <<
- UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
/*
* Set the 4-bit period.
*/
mmr_image &= ~((unsigned long)0xf <<
- UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
- UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
/*
@@ -497,7 +882,7 @@ static void uv_enable_timeouts(void)
* indicated in bits 2:0 (7 causes all of them to timeout).
*/
mmr_image |= ((unsigned long)1 <<
- UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
}
@@ -522,9 +907,20 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data)
{
}
+static inline unsigned long long
+millisec_2_cycles(unsigned long millisec)
+{
+ unsigned long ns;
+ unsigned long long cyc;
+
+ ns = millisec * 1000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
+}
+
/*
- * Display the statistics thru /proc
- * data points to the cpu number
+ * Display the statistics thru /proc.
+ * 'data' points to the cpu number
*/
static int uv_ptc_seq_show(struct seq_file *file, void *data)
{
@@ -535,78 +931,155 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
if (!cpu) {
seq_printf(file,
- "# cpu requestor requestee one all sretry dretry ptc_i ");
+ "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
seq_printf(file,
- "sw_ack sflush dflush sok dnomsg dmult starget\n");
+ "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
+ seq_printf(file,
+ "retries rok resetp resett giveup sto bz throt ");
+ seq_printf(file,
+ "sw_ack recv rtime all ");
+ seq_printf(file,
+ "one mult none retry canc nocan reset rcan\n");
}
if (cpu < num_possible_cpus() && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
- seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
- cpu, stat->requestor,
- stat->requestee, stat->onetlb, stat->alltlb,
- stat->s_retry, stat->d_retry, stat->ptc_i);
- seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
+ /* source side statistics */
+ seq_printf(file,
+ "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
+ cpu, stat->s_requestor, cycles_2_us(stat->s_time),
+ stat->s_ntarguvhub, stat->s_ntarguvhub16,
+ stat->s_ntarguvhub8, stat->s_ntarguvhub4,
+ stat->s_ntarguvhub2, stat->s_ntarguvhub1,
+ stat->s_ntargcpu, stat->s_dtimeout);
+ seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
+ stat->s_retry_messages, stat->s_retriesok,
+ stat->s_resets_plug, stat->s_resets_timeout,
+ stat->s_giveup, stat->s_stimeout,
+ stat->s_busy, stat->s_throttles);
+ /* destination side statistics */
+ seq_printf(file,
+ "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
- stat->sflush, stat->dflush,
- stat->retriesok, stat->nomsg,
- stat->multmsg, stat->ntargeted);
+ stat->d_requestee, cycles_2_us(stat->d_time),
+ stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
+ stat->d_nomsg, stat->d_retries, stat->d_canceled,
+ stat->d_nocanceled, stat->d_resets,
+ stat->d_rcanceled);
}
return 0;
}
/*
+ * -1: resetf the statistics
* 0: display meaning of the statistics
- * >0: retry limit
+ * >0: maximum concurrent active descriptors per uvhub (throttle)
*/
static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
size_t count, loff_t *data)
{
- long newmode;
+ int cpu;
+ long input_arg;
char optstr[64];
+ struct ptc_stats *stat;
+ struct bau_control *bcp;
if (count == 0 || count > sizeof(optstr))
return -EINVAL;
if (copy_from_user(optstr, user, count))
return -EFAULT;
optstr[count - 1] = '\0';
- if (strict_strtoul(optstr, 10, &newmode) < 0) {
+ if (strict_strtol(optstr, 10, &input_arg) < 0) {
printk(KERN_DEBUG "%s is invalid\n", optstr);
return -EINVAL;
}
- if (newmode == 0) {
+ if (input_arg == 0) {
printk(KERN_DEBUG "# cpu: cpu number\n");
+ printk(KERN_DEBUG "Sender statistics:\n");
+ printk(KERN_DEBUG
+ "sent: number of shootdown messages sent\n");
+ printk(KERN_DEBUG
+ "stime: time spent sending messages\n");
+ printk(KERN_DEBUG
+ "numuvhubs: number of hubs targeted with shootdown\n");
+ printk(KERN_DEBUG
+ "numuvhubs16: number times 16 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs8: number times 8 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs4: number times 4 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs2: number times 2 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs1: number times 1 hub targeted\n");
+ printk(KERN_DEBUG
+ "numcpus: number of cpus targeted with shootdown\n");
+ printk(KERN_DEBUG
+ "dto: number of destination timeouts\n");
+ printk(KERN_DEBUG
+ "retries: destination timeout retries sent\n");
+ printk(KERN_DEBUG
+ "rok: : destination timeouts successfully retried\n");
+ printk(KERN_DEBUG
+ "resetp: ipi-style resource resets for plugs\n");
+ printk(KERN_DEBUG
+ "resett: ipi-style resource resets for timeouts\n");
+ printk(KERN_DEBUG
+ "giveup: fall-backs to ipi-style shootdowns\n");
+ printk(KERN_DEBUG
+ "sto: number of source timeouts\n");
+ printk(KERN_DEBUG
+ "bz: number of stay-busy's\n");
+ printk(KERN_DEBUG
+ "throt: number times spun in throttle\n");
+ printk(KERN_DEBUG "Destination side statistics:\n");
printk(KERN_DEBUG
- "requestor: times this cpu was the flush requestor\n");
+ "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
printk(KERN_DEBUG
- "requestee: times this cpu was requested to flush its TLBs\n");
+ "recv: shootdown messages received\n");
printk(KERN_DEBUG
- "one: times requested to flush a single address\n");
+ "rtime: time spent processing messages\n");
printk(KERN_DEBUG
- "all: times requested to flush all TLB's\n");
+ "all: shootdown all-tlb messages\n");
printk(KERN_DEBUG
- "sretry: number of retries of source-side timeouts\n");
+ "one: shootdown one-tlb messages\n");
printk(KERN_DEBUG
- "dretry: number of retries of destination-side timeouts\n");
+ "mult: interrupts that found multiple messages\n");
printk(KERN_DEBUG
- "ptc_i: times UV fell through to IPI-style flushes\n");
+ "none: interrupts that found no messages\n");
printk(KERN_DEBUG
- "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
+ "retry: number of retry messages processed\n");
printk(KERN_DEBUG
- "sflush_us: cycles spent in uv_flush_tlb_others()\n");
+ "canc: number messages canceled by retries\n");
printk(KERN_DEBUG
- "dflush_us: cycles spent in handling flush requests\n");
- printk(KERN_DEBUG "sok: successes on retry\n");
- printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
+ "nocan: number retries that found nothing to cancel\n");
printk(KERN_DEBUG
- "dmult: interrupts with multiple messages\n");
- printk(KERN_DEBUG "starget: nodes targeted\n");
+ "reset: number of ipi-style reset requests processed\n");
+ printk(KERN_DEBUG
+ "rcan: number messages canceled by reset requests\n");
+ } else if (input_arg == -1) {
+ for_each_present_cpu(cpu) {
+ stat = &per_cpu(ptcstats, cpu);
+ memset(stat, 0, sizeof(struct ptc_stats));
+ }
} else {
- uv_bau_retry_limit = newmode;
- printk(KERN_DEBUG "timeout retry limit:%d\n",
- uv_bau_retry_limit);
+ uv_bau_max_concurrent = input_arg;
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ if (uv_bau_max_concurrent < 1 ||
+ uv_bau_max_concurrent > bcp->cpus_in_uvhub) {
+ printk(KERN_DEBUG
+ "Error: BAU max concurrent %d; %d is invalid\n",
+ bcp->max_concurrent, uv_bau_max_concurrent);
+ return -EINVAL;
+ }
+ printk(KERN_DEBUG "Set BAU max concurrent:%d\n",
+ uv_bau_max_concurrent);
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->max_concurrent = uv_bau_max_concurrent;
+ }
}
return count;
@@ -650,79 +1123,30 @@ static int __init uv_ptc_init(void)
}
/*
- * begin the initialization of the per-blade control structures
- */
-static struct bau_control * __init uv_table_bases_init(int blade, int node)
-{
- int i;
- struct bau_msg_status *msp;
- struct bau_control *bau_tabp;
-
- bau_tabp =
- kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
- BUG_ON(!bau_tabp);
-
- bau_tabp->msg_statuses =
- kmalloc_node(sizeof(struct bau_msg_status) *
- DEST_Q_SIZE, GFP_KERNEL, node);
- BUG_ON(!bau_tabp->msg_statuses);
-
- for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
- bau_cpubits_clear(&msp->seen_by, (int)
- uv_blade_nr_possible_cpus(blade));
-
- uv_bau_table_bases[blade] = bau_tabp;
-
- return bau_tabp;
-}
-
-/*
- * finish the initialization of the per-blade control structures
- */
-static void __init
-uv_table_bases_finish(int blade,
- struct bau_control *bau_tablesp,
- struct bau_desc *adp)
-{
- struct bau_control *bcp;
- int cpu;
-
- for_each_present_cpu(cpu) {
- if (blade != uv_cpu_to_blade_id(cpu))
- continue;
-
- bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
- bcp->bau_msg_head = bau_tablesp->va_queue_first;
- bcp->va_queue_first = bau_tablesp->va_queue_first;
- bcp->va_queue_last = bau_tablesp->va_queue_last;
- bcp->msg_statuses = bau_tablesp->msg_statuses;
- bcp->descriptor_base = adp;
- }
-}
-
-/*
* initialize the sending side's sending buffers
*/
-static struct bau_desc * __init
+static void
uv_activation_descriptor_init(int node, int pnode)
{
int i;
+ int cpu;
unsigned long pa;
unsigned long m;
unsigned long n;
- struct bau_desc *adp;
- struct bau_desc *ad2;
+ struct bau_desc *bau_desc;
+ struct bau_desc *bd2;
+ struct bau_control *bcp;
/*
* each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
- * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade
+ * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
*/
- adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
+ bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
- BUG_ON(!adp);
+ BUG_ON(!bau_desc);
- pa = uv_gpa(adp); /* need the real nasid*/
- n = uv_gpa_to_pnode(pa);
+ pa = uv_gpa(bau_desc); /* need the real nasid*/
+ n = pa >> uv_nshift;
m = pa & uv_mmask;
uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
@@ -731,96 +1155,188 @@ uv_activation_descriptor_init(int node, int pnode)
/*
* initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
* cpu even though we only use the first one; one descriptor can
- * describe a broadcast to 256 nodes.
+ * describe a broadcast to 256 uv hubs.
*/
- for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
- i++, ad2++) {
- memset(ad2, 0, sizeof(struct bau_desc));
- ad2->header.sw_ack_flag = 1;
+ for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
+ i++, bd2++) {
+ memset(bd2, 0, sizeof(struct bau_desc));
+ bd2->header.sw_ack_flag = 1;
/*
- * base_dest_nodeid is the first node in the partition, so
- * the bit map will indicate partition-relative node numbers.
- * note that base_dest_nodeid is actually a nasid.
+ * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
+ * in the partition. The bit map will indicate uvhub numbers,
+ * which are 0-N in a partition. Pnodes are unique system-wide.
*/
- ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
- ad2->header.dest_subnodeid = 0x10; /* the LB */
- ad2->header.command = UV_NET_ENDPOINT_INTD;
- ad2->header.int_both = 1;
+ bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
+ bd2->header.dest_subnodeid = 0x10; /* the LB */
+ bd2->header.command = UV_NET_ENDPOINT_INTD;
+ bd2->header.int_both = 1;
/*
* all others need to be set to zero:
* fairness chaining multilevel count replied_to
*/
}
- return adp;
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
+ continue;
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->descriptor_base = bau_desc;
+ }
}
/*
* initialize the destination side's receiving buffers
+ * entered for each uvhub in the partition
+ * - node is first node (kernel memory notion) on the uvhub
+ * - pnode is the uvhub's physical identifier
*/
-static struct bau_payload_queue_entry * __init
-uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
+static void
+uv_payload_queue_init(int node, int pnode)
{
- struct bau_payload_queue_entry *pqp;
- unsigned long pa;
int pn;
+ int cpu;
char *cp;
+ unsigned long pa;
+ struct bau_payload_queue_entry *pqp;
+ struct bau_payload_queue_entry *pqp_malloc;
+ struct bau_control *bcp;
pqp = (struct bau_payload_queue_entry *) kmalloc_node(
(DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
GFP_KERNEL, node);
BUG_ON(!pqp);
+ pqp_malloc = pqp;
cp = (char *)pqp + 31;
pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
- bau_tablesp->va_queue_first = pqp;
+
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_cpu_to_pnode(cpu))
+ continue;
+ /* for every cpu on this pnode: */
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->va_queue_first = pqp;
+ bcp->bau_msg_head = pqp;
+ bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
+ }
/*
* need the pnode of where the memory was really allocated
*/
pa = uv_gpa(pqp);
- pn = uv_gpa_to_pnode(pa);
+ pn = pa >> uv_nshift;
uv_write_global_mmr64(pnode,
UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
uv_physnodeaddr(pqp));
uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
uv_physnodeaddr(pqp));
- bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
(unsigned long)
- uv_physnodeaddr(bau_tablesp->va_queue_last));
+ uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
+ /* in effect, all msg_type's are set to MSG_NOOP */
memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
-
- return pqp;
}
/*
- * Initialization of each UV blade's structures
+ * Initialization of each UV hub's structures
*/
-static int __init uv_init_blade(int blade)
+static void __init uv_init_uvhub(int uvhub, int vector)
{
int node;
int pnode;
- unsigned long pa;
unsigned long apicid;
- struct bau_desc *adp;
- struct bau_payload_queue_entry *pqp;
- struct bau_control *bau_tablesp;
-
- node = blade_to_first_node(blade);
- bau_tablesp = uv_table_bases_init(blade, node);
- pnode = uv_blade_to_pnode(blade);
- adp = uv_activation_descriptor_init(node, pnode);
- pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
- uv_table_bases_finish(blade, bau_tablesp, adp);
+
+ node = uvhub_to_first_node(uvhub);
+ pnode = uv_blade_to_pnode(uvhub);
+ uv_activation_descriptor_init(node, pnode);
+ uv_payload_queue_init(node, pnode);
/*
* the below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS
*/
- apicid = blade_to_first_apicid(blade);
- pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
+ apicid = uvhub_to_first_apicid(uvhub);
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
- ((apicid << 32) | UV_BAU_MESSAGE));
- return 0;
+ ((apicid << 32) | vector));
+}
+
+/*
+ * initialize the bau_control structure for each cpu
+ */
+static void uv_init_per_cpu(int nuvhubs)
+{
+ int i, j, k;
+ int cpu;
+ int pnode;
+ int uvhub;
+ short socket = 0;
+ struct bau_control *bcp;
+ struct uvhub_desc *bdp;
+ struct socket_desc *sdp;
+ struct bau_control *hmaster = NULL;
+ struct bau_control *smaster = NULL;
+ struct socket_desc {
+ short num_cpus;
+ short cpu_number[16];
+ };
+ struct uvhub_desc {
+ short num_sockets;
+ short num_cpus;
+ short uvhub;
+ short pnode;
+ struct socket_desc socket[2];
+ };
+ struct uvhub_desc *uvhub_descs;
+
+ uvhub_descs = (struct uvhub_desc *)
+ kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ memset(bcp, 0, sizeof(struct bau_control));
+ spin_lock_init(&bcp->masks_lock);
+ bcp->max_concurrent = uv_bau_max_concurrent;
+ pnode = uv_cpu_hub_info(cpu)->pnode;
+ uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
+ bdp = &uvhub_descs[uvhub];
+ bdp->num_cpus++;
+ bdp->uvhub = uvhub;
+ bdp->pnode = pnode;
+ /* time interval to catch a hardware stay-busy bug */
+ bcp->timeout_interval = millisec_2_cycles(3);
+ /* kludge: assume uv_hub.h is constant */
+ socket = (cpu_physical_id(cpu)>>5)&1;
+ if (socket >= bdp->num_sockets)
+ bdp->num_sockets = socket+1;
+ sdp = &bdp->socket[socket];
+ sdp->cpu_number[sdp->num_cpus] = cpu;
+ sdp->num_cpus++;
+ }
+ socket = 0;
+ for_each_possible_blade(uvhub) {
+ bdp = &uvhub_descs[uvhub];
+ for (i = 0; i < bdp->num_sockets; i++) {
+ sdp = &bdp->socket[i];
+ for (j = 0; j < sdp->num_cpus; j++) {
+ cpu = sdp->cpu_number[j];
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->cpu = cpu;
+ if (j == 0) {
+ smaster = bcp;
+ if (i == 0)
+ hmaster = bcp;
+ }
+ bcp->cpus_in_uvhub = bdp->num_cpus;
+ bcp->cpus_in_socket = sdp->num_cpus;
+ bcp->socket_master = smaster;
+ bcp->uvhub_master = hmaster;
+ for (k = 0; k < DEST_Q_SIZE; k++)
+ bcp->socket_acknowledge_count[k] = 0;
+ bcp->uvhub_cpu =
+ uv_cpu_hub_info(cpu)->blade_processor_id;
+ }
+ socket++;
+ }
+ }
+ kfree(uvhub_descs);
}
/*
@@ -828,38 +1344,54 @@ static int __init uv_init_blade(int blade)
*/
static int __init uv_bau_init(void)
{
- int blade;
- int nblades;
+ int uvhub;
+ int pnode;
+ int nuvhubs;
int cur_cpu;
+ int vector;
+ unsigned long mmr;
if (!is_uv_system())
return 0;
+ if (nobau)
+ return 0;
+
for_each_possible_cpu(cur_cpu)
zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
GFP_KERNEL, cpu_to_node(cur_cpu));
- uv_bau_retry_limit = 1;
+ uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
+ uv_nshift = uv_hub_info->m_val;
uv_mmask = (1UL << uv_hub_info->m_val) - 1;
- nblades = uv_num_possible_blades();
+ nuvhubs = uv_num_possible_blades();
- uv_bau_table_bases = (struct bau_control **)
- kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
- BUG_ON(!uv_bau_table_bases);
+ uv_init_per_cpu(nuvhubs);
uv_partition_base_pnode = 0x7fffffff;
- for (blade = 0; blade < nblades; blade++)
- if (uv_blade_nr_possible_cpus(blade) &&
- (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
- uv_partition_base_pnode = uv_blade_to_pnode(blade);
- for (blade = 0; blade < nblades; blade++)
- if (uv_blade_nr_possible_cpus(blade))
- uv_init_blade(blade);
-
- alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++)
+ if (uv_blade_nr_possible_cpus(uvhub) &&
+ (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
+ uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
+
+ vector = UV_BAU_MESSAGE;
+ for_each_possible_blade(uvhub)
+ if (uv_blade_nr_possible_cpus(uvhub))
+ uv_init_uvhub(uvhub, vector);
+
uv_enable_timeouts();
+ alloc_intr_gate(vector, uv_bau_message_intr1);
+
+ for_each_possible_blade(uvhub) {
+ pnode = uv_blade_to_pnode(uvhub);
+ /* INIT the bau */
+ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
+ ((unsigned long)1 << 63));
+ mmr = 1; /* should be 1 to broadcast to both sockets */
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
+ }
return 0;
}
-__initcall(uv_bau_init);
-__initcall(uv_ptc_init);
+core_initcall(uv_bau_init);
+core_initcall(uv_ptc_init);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1168e4454188..ec3b8ad0251a 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -15,6 +15,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
+#include <linux/kgdb.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
@@ -108,15 +109,6 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
dec_preempt_count();
}
-#ifdef CONFIG_X86_32
-static inline void
-die_if_kernel(const char *str, struct pt_regs *regs, long err)
-{
- if (!user_mode_vm(regs))
- die(str, regs, err);
-}
-#endif
-
static void __kprobes
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
long error_code, siginfo_t *info)
@@ -400,7 +392,13 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
== NOTIFY_STOP)
return;
+
#ifdef CONFIG_X86_LOCAL_APIC
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
+ == NOTIFY_STOP)
+ return;
+
+#ifndef CONFIG_NMI_WATCHDOG
/*
* Ok, so this is none of the documented NMI sources,
* so it must be the NMI watchdog.
@@ -408,6 +406,7 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
if (nmi_watchdog_tick(regs, reason))
return;
if (!do_nmi_callback(regs, cpu))
+#endif /* !CONFIG_NMI_WATCHDOG */
unknown_nmi_error(reason, regs);
#else
unknown_nmi_error(reason, regs);
@@ -460,6 +459,11 @@ void restart_nmi(void)
/* May run on IST stack. */
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
#ifdef CONFIG_KPROBES
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
== NOTIFY_STOP)
@@ -543,11 +547,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
/* DR6 may or may not be cleared by the CPU */
set_debugreg(0, 6);
+
/*
* The processor cleared BTF, so don't mark that we need it set.
*/
- clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
- tsk->thread.debugctlmsr = 0;
+ clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
@@ -585,55 +589,67 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
return;
}
-#ifdef CONFIG_X86_64
-static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
-{
- if (fixup_exception(regs))
- return 1;
-
- notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
- /* Illegal floating point operation in the kernel */
- current->thread.trap_no = trapnr;
- die(str, regs, 0);
- return 0;
-}
-#endif
-
/*
* Note that we play around with the 'TS' bit in an attempt to get
* the correct behaviour even in the presence of the asynchronous
* IRQ13 behaviour
*/
-void math_error(void __user *ip)
+void math_error(struct pt_regs *regs, int error_code, int trapnr)
{
- struct task_struct *task;
+ struct task_struct *task = current;
siginfo_t info;
- unsigned short cwd, swd, err;
+ unsigned short err;
+ char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
+
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
+ return;
+ conditional_sti(regs);
+
+ if (!user_mode_vm(regs))
+ {
+ if (!fixup_exception(regs)) {
+ task->thread.error_code = error_code;
+ task->thread.trap_no = trapnr;
+ die(str, regs, error_code);
+ }
+ return;
+ }
/*
* Save the info for the exception handler and clear the error.
*/
- task = current;
save_init_fpu(task);
- task->thread.trap_no = 16;
- task->thread.error_code = 0;
+ task->thread.trap_no = trapnr;
+ task->thread.error_code = error_code;
info.si_signo = SIGFPE;
info.si_errno = 0;
- info.si_addr = ip;
- /*
- * (~cwd & swd) will mask out exceptions that are not set to unmasked
- * status. 0x3f is the exception bits in these regs, 0x200 is the
- * C1 reg you need in case of a stack fault, 0x040 is the stack
- * fault bit. We should only be taking one exception at a time,
- * so if this combination doesn't produce any single exception,
- * then we have a bad program that isn't synchronizing its FPU usage
- * and it will suffer the consequences since we won't be able to
- * fully reproduce the context of the exception
- */
- cwd = get_fpu_cwd(task);
- swd = get_fpu_swd(task);
+ info.si_addr = (void __user *)regs->ip;
+ if (trapnr == 16) {
+ unsigned short cwd, swd;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't synchronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(task);
+ swd = get_fpu_swd(task);
- err = swd & ~cwd;
+ err = swd & ~cwd;
+ } else {
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ unsigned short mxcsr = get_fpu_mxcsr(task);
+ err = ~(mxcsr >> 7) & mxcsr;
+ }
if (err & 0x001) { /* Invalid op */
/*
@@ -662,97 +678,17 @@ void math_error(void __user *ip)
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
{
- conditional_sti(regs);
-
#ifdef CONFIG_X86_32
ignore_fpu_irq = 1;
-#else
- if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel x87 math error", 16))
- return;
#endif
- math_error((void __user *)regs->ip);
-}
-
-static void simd_math_error(void __user *ip)
-{
- struct task_struct *task;
- siginfo_t info;
- unsigned short mxcsr;
-
- /*
- * Save the info for the exception handler and clear the error.
- */
- task = current;
- save_init_fpu(task);
- task->thread.trap_no = 19;
- task->thread.error_code = 0;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = __SI_FAULT;
- info.si_addr = ip;
- /*
- * The SIMD FPU exceptions are handled a little differently, as there
- * is only a single status/control register. Thus, to determine which
- * unmasked exception was caught we must mask the exception mask bits
- * at 0x1f80, and then use these to mask the exception bits at 0x3f.
- */
- mxcsr = get_fpu_mxcsr(task);
- switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
- case 0x000:
- default:
- break;
- case 0x001: /* Invalid Op */
- info.si_code = FPE_FLTINV;
- break;
- case 0x002: /* Denormalize */
- case 0x010: /* Underflow */
- info.si_code = FPE_FLTUND;
- break;
- case 0x004: /* Zero Divide */
- info.si_code = FPE_FLTDIV;
- break;
- case 0x008: /* Overflow */
- info.si_code = FPE_FLTOVF;
- break;
- case 0x020: /* Precision */
- info.si_code = FPE_FLTRES;
- break;
- }
- force_sig_info(SIGFPE, &info, task);
+ math_error(regs, error_code, 16);
}
dotraplinkage void
do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
{
- conditional_sti(regs);
-
-#ifdef CONFIG_X86_32
- if (cpu_has_xmm) {
- /* Handle SIMD FPU exceptions on PIII+ processors. */
- ignore_fpu_irq = 1;
- simd_math_error((void __user *)regs->ip);
- return;
- }
- /*
- * Handle strange cache flush from user space exception
- * in all other cases. This is undocumented behaviour.
- */
- if (regs->flags & X86_VM_MASK) {
- handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
- return;
- }
- current->thread.trap_no = 19;
- current->thread.error_code = error_code;
- die_if_kernel("cache flush denied", regs, error_code);
- force_sig(SIGSEGV, current);
-#else
- if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel simd math error", 19))
- return;
- simd_math_error((void __user *)regs->ip);
-#endif
+ math_error(regs, error_code, 19);
}
dotraplinkage void
@@ -879,6 +815,16 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
}
#endif
+/* Set of traps needed for early debugging. */
+void __init early_trap_init(void)
+{
+ set_intr_gate_ist(1, &debug, DEBUG_STACK);
+ /* int3 can be called from all */
+ set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
+ set_intr_gate(14, &page_fault);
+ load_idt(&idt_descr);
+}
+
void __init trap_init(void)
{
int i;
@@ -892,10 +838,7 @@ void __init trap_init(void)
#endif
set_intr_gate(0, &divide_error);
- set_intr_gate_ist(1, &debug, DEBUG_STACK);
set_intr_gate_ist(2, &nmi, NMI_STACK);
- /* int3 can be called from all */
- set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
/* int4 can be called from all */
set_system_intr_gate(4, &overflow);
set_intr_gate(5, &bounds);
@@ -911,7 +854,6 @@ void __init trap_init(void)
set_intr_gate(11, &segment_not_present);
set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
set_intr_gate(13, &general_protection);
- set_intr_gate(14, &page_fault);
set_intr_gate(15, &spurious_interrupt_bug);
set_intr_gate(16, &coprocessor_error);
set_intr_gate(17, &alignment_check);
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index 1d40336b030a..1132129db792 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -44,7 +44,7 @@ static void uv_ack_apic(unsigned int irq)
ack_APIC_irq();
}
-struct irq_chip uv_irq_chip = {
+static struct irq_chip uv_irq_chip = {
.name = "UV-CORE",
.startup = uv_noop_ret,
.shutdown = uv_noop,
@@ -141,7 +141,7 @@ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
*/
static int
arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
- unsigned long mmr_offset, int restrict)
+ unsigned long mmr_offset, int limit)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_desc *desc = irq_to_desc(irq);
@@ -160,7 +160,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
if (err != 0)
return err;
- if (restrict == UV_AFFINITY_CPU)
+ if (limit == UV_AFFINITY_CPU)
desc->status |= IRQ_NO_BALANCING;
else
desc->status |= IRQ_MOVE_PCNTXT;
@@ -214,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
unsigned long mmr_offset;
- unsigned mmr_pnode;
+ int mmr_pnode;
if (set_desc_affinity(desc, mask, &dest))
return -1;
@@ -248,7 +248,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
* interrupt is raised.
*/
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
- unsigned long mmr_offset, int restrict)
+ unsigned long mmr_offset, int limit)
{
int irq, ret;
@@ -258,7 +258,7 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
return -EBUSY;
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
- restrict);
+ limit);
if (ret == irq)
uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
else
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 2cc249718c46..d0bb52296fa3 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -97,7 +97,7 @@ SECTIONS
HEAD_TEXT
#ifdef CONFIG_X86_32
. = ALIGN(PAGE_SIZE);
- *(.text.page_aligned)
+ *(.text..page_aligned)
#endif
. = ALIGN(8);
_stext = .;
@@ -305,7 +305,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
- *(.bss.page_aligned)
+ *(.bss..page_aligned)
*(.bss)
. = ALIGN(4);
__bss_stop = .;
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 693920b22496..1b950d151e58 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(init_level4_pgt);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
#endif
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 782c3a362ec6..37e68fc5e24a 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -99,7 +99,7 @@ int save_i387_xstate(void __user *buf)
if (err)
return err;
- if (task_thread_info(tsk)->status & TS_XSAVE)
+ if (use_xsave())
err = xsave_user(buf);
else
err = fxsave_user(buf);
@@ -109,14 +109,14 @@ int save_i387_xstate(void __user *buf)
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
} else {
- if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
+ if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
xstate_size))
return -1;
}
clear_used_math(); /* trigger finit */
- if (task_thread_info(tsk)->status & TS_XSAVE) {
+ if (use_xsave()) {
struct _fpstate __user *fx = buf;
struct _xstate __user *x = buf;
u64 xstate_bv;
@@ -225,7 +225,7 @@ int restore_i387_xstate(void __user *buf)
clts();
task_thread_info(current)->status |= TS_USEDFPU;
}
- if (task_thread_info(tsk)->status & TS_XSAVE)
+ if (use_xsave())
err = restore_user_xstate(buf);
else
err = fxrstor_checking((__force struct i387_fxsave_struct *)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4dade6ac0827..5ac0bb465ed6 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -33,6 +33,7 @@
#include <asm/kvm_emulate.h>
#include "x86.h"
+#include "tss.h"
/*
* Opcode effective-address decode tables.
@@ -50,6 +51,8 @@
#define DstReg (2<<1) /* Register operand. */
#define DstMem (3<<1) /* Memory operand. */
#define DstAcc (4<<1) /* Destination Accumulator */
+#define DstDI (5<<1) /* Destination is in ES:(E)DI */
+#define DstMem64 (6<<1) /* 64bit memory operand */
#define DstMask (7<<1)
/* Source operand type. */
#define SrcNone (0<<4) /* No source operand. */
@@ -63,6 +66,7 @@
#define SrcOne (7<<4) /* Implied '1' */
#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
#define SrcImmU (9<<4) /* Immediate operand, unsigned */
+#define SrcSI (0xa<<4) /* Source is in the DS:RSI */
#define SrcMask (0xf<<4)
/* Generic ModRM decode. */
#define ModRM (1<<8)
@@ -85,6 +89,9 @@
#define Src2ImmByte (2<<29)
#define Src2One (3<<29)
#define Src2Imm16 (4<<29)
+#define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
+ in memory and second argument is located
+ immediately after the first one in memory. */
#define Src2Mask (7<<29)
enum {
@@ -147,8 +154,8 @@ static u32 opcode_table[256] = {
0, 0, 0, 0,
/* 0x68 - 0x6F */
SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */
+ DstDI | ByteOp | Mov | String, DstDI | Mov | String, /* insb, insw/insd */
+ SrcSI | ByteOp | ImplicitOps | String, SrcSI | ImplicitOps | String, /* outsb, outsw/outsd */
/* 0x70 - 0x77 */
SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
@@ -173,12 +180,12 @@ static u32 opcode_table[256] = {
/* 0xA0 - 0xA7 */
ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs,
- ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | String, ImplicitOps | String,
+ ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String,
+ ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String,
/* 0xA8 - 0xAF */
- 0, 0, ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | String, ImplicitOps | String,
+ 0, 0, ByteOp | DstDI | Mov | String, DstDI | Mov | String,
+ ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String,
+ ByteOp | DstDI | String, DstDI | String,
/* 0xB0 - 0xB7 */
ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
@@ -204,13 +211,13 @@ static u32 opcode_table[256] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xE0 - 0xE7 */
0, 0, 0, 0,
- ByteOp | SrcImmUByte, SrcImmUByte,
- ByteOp | SrcImmUByte, SrcImmUByte,
+ ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
+ ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
/* 0xE8 - 0xEF */
SrcImm | Stack, SrcImm | ImplicitOps,
SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
+ SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
+ SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
@@ -343,7 +350,8 @@ static u32 group_table[] = {
[Group5*8] =
DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
SrcMem | ModRM | Stack, 0,
- SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0,
+ SrcMem | ModRM | Stack, SrcMem | ModRM | Src2Mem16 | ImplicitOps,
+ SrcMem | ModRM | Stack, 0,
[Group7*8] =
0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
SrcNone | ModRM | DstMem | Mov, 0,
@@ -353,14 +361,14 @@ static u32 group_table[] = {
DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
[Group9*8] =
- 0, ImplicitOps | ModRM | Lock, 0, 0, 0, 0, 0, 0,
+ 0, DstMem64 | ModRM | Lock, 0, 0, 0, 0, 0, 0,
};
static u32 group2_table[] = {
[Group7*8] =
- SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM,
+ SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv,
SrcNone | ModRM | DstMem | Mov, 0,
- SrcMem16 | ModRM | Mov, 0,
+ SrcMem16 | ModRM | Mov | Priv, 0,
[Group9*8] =
0, 0, 0, 0, 0, 0, 0, 0,
};
@@ -562,7 +570,7 @@ static u32 group2_table[] = {
#define insn_fetch(_type, _size, _eip) \
({ unsigned long _x; \
rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
- if (rc != 0) \
+ if (rc != X86EMUL_CONTINUE) \
goto done; \
(_eip) += (_size); \
(_type)_x; \
@@ -638,40 +646,40 @@ static unsigned long ss_base(struct x86_emulate_ctxt *ctxt)
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
- unsigned long linear, u8 *dest)
+ unsigned long eip, u8 *dest)
{
struct fetch_cache *fc = &ctxt->decode.fetch;
int rc;
- int size;
+ int size, cur_size;
- if (linear < fc->start || linear >= fc->end) {
- size = min(15UL, PAGE_SIZE - offset_in_page(linear));
- rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
- if (rc)
+ if (eip == fc->end) {
+ cur_size = fc->end - fc->start;
+ size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
+ rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
+ size, ctxt->vcpu, NULL);
+ if (rc != X86EMUL_CONTINUE)
return rc;
- fc->start = linear;
- fc->end = linear + size;
+ fc->end += size;
}
- *dest = fc->data[linear - fc->start];
- return 0;
+ *dest = fc->data[eip - fc->start];
+ return X86EMUL_CONTINUE;
}
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
unsigned long eip, void *dest, unsigned size)
{
- int rc = 0;
+ int rc;
/* x86 instructions are limited to 15 bytes. */
- if (eip + size - ctxt->decode.eip_orig > 15)
+ if (eip + size - ctxt->eip > 15)
return X86EMUL_UNHANDLEABLE;
- eip += ctxt->cs_base;
while (size--) {
rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
/*
@@ -702,7 +710,7 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
*address = 0;
rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
ctxt->vcpu, NULL);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
ctxt->vcpu, NULL);
@@ -782,7 +790,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct decode_cache *c = &ctxt->decode;
u8 sib;
int index_reg = 0, base_reg = 0, scale;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
if (c->rex_prefix) {
c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
@@ -895,7 +903,7 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
switch (c->ad_bytes) {
case 2:
@@ -916,14 +924,18 @@ int
x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, group;
- /* Shadow copy of register state. Committed on successful emulation. */
+ /* we cannot decode insn before we complete previous rep insn */
+ WARN_ON(ctxt->restart);
+
+ /* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache));
- c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu);
+ c->eip = ctxt->eip;
+ c->fetch.start = c->fetch.end = c->eip;
ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
@@ -1015,11 +1027,6 @@ done_prefixes:
}
}
- if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
- kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");
- return -1;
- }
-
if (c->d & Group) {
group = c->d & GroupMask;
c->modrm = insn_fetch(u8, 1, c->eip);
@@ -1046,7 +1053,7 @@ done_prefixes:
rc = decode_modrm(ctxt, ops);
else if (c->d & MemAbs)
rc = decode_abs(ctxt, ops);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
if (!c->has_seg_override)
@@ -1057,6 +1064,10 @@ done_prefixes:
if (c->ad_bytes != 8)
c->modrm_ea = (u32)c->modrm_ea;
+
+ if (c->rip_relative)
+ c->modrm_ea += c->eip;
+
/*
* Decode and fetch the source operand: register, memory
* or immediate.
@@ -1091,6 +1102,8 @@ done_prefixes:
break;
}
c->src.type = OP_MEM;
+ c->src.ptr = (unsigned long *)c->modrm_ea;
+ c->src.val = 0;
break;
case SrcImm:
case SrcImmU:
@@ -1139,6 +1152,14 @@ done_prefixes:
c->src.bytes = 1;
c->src.val = 1;
break;
+ case SrcSI:
+ c->src.type = OP_MEM;
+ c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->src.ptr = (unsigned long *)
+ register_address(c, seg_override_base(ctxt, c),
+ c->regs[VCPU_REGS_RSI]);
+ c->src.val = 0;
+ break;
}
/*
@@ -1168,6 +1189,12 @@ done_prefixes:
c->src2.bytes = 1;
c->src2.val = 1;
break;
+ case Src2Mem16:
+ c->src2.type = OP_MEM;
+ c->src2.bytes = 2;
+ c->src2.ptr = (unsigned long *)(c->modrm_ea + c->src.bytes);
+ c->src2.val = 0;
+ break;
}
/* Decode and fetch the destination operand: register or memory. */
@@ -1180,6 +1207,7 @@ done_prefixes:
c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
break;
case DstMem:
+ case DstMem64:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.type = OP_REG;
@@ -1188,12 +1216,24 @@ done_prefixes:
break;
}
c->dst.type = OP_MEM;
+ c->dst.ptr = (unsigned long *)c->modrm_ea;
+ if ((c->d & DstMask) == DstMem64)
+ c->dst.bytes = 8;
+ else
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->dst.val = 0;
+ if (c->d & BitOp) {
+ unsigned long mask = ~(c->dst.bytes * 8 - 1);
+
+ c->dst.ptr = (void *)c->dst.ptr +
+ (c->src.val & mask) / 8;
+ }
break;
case DstAcc:
c->dst.type = OP_REG;
- c->dst.bytes = c->op_bytes;
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = &c->regs[VCPU_REGS_RAX];
- switch (c->op_bytes) {
+ switch (c->dst.bytes) {
case 1:
c->dst.val = *(u8 *)c->dst.ptr;
break;
@@ -1203,18 +1243,248 @@ done_prefixes:
case 4:
c->dst.val = *(u32 *)c->dst.ptr;
break;
+ case 8:
+ c->dst.val = *(u64 *)c->dst.ptr;
+ break;
}
c->dst.orig_val = c->dst.val;
break;
+ case DstDI:
+ c->dst.type = OP_MEM;
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->dst.ptr = (unsigned long *)
+ register_address(c, es_base(ctxt),
+ c->regs[VCPU_REGS_RDI]);
+ c->dst.val = 0;
+ break;
}
- if (c->rip_relative)
- c->modrm_ea += c->eip;
-
done:
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}
+static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ unsigned int size, unsigned short port,
+ void *dest)
+{
+ struct read_cache *rc = &ctxt->decode.io_read;
+
+ if (rc->pos == rc->end) { /* refill pio read ahead */
+ struct decode_cache *c = &ctxt->decode;
+ unsigned int in_page, n;
+ unsigned int count = c->rep_prefix ?
+ address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
+ in_page = (ctxt->eflags & EFLG_DF) ?
+ offset_in_page(c->regs[VCPU_REGS_RDI]) :
+ PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
+ n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
+ count);
+ if (n == 0)
+ n = 1;
+ rc->pos = rc->end = 0;
+ if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
+ return 0;
+ rc->end = n * size;
+ }
+
+ memcpy(dest, rc->data + rc->pos, size);
+ rc->pos += size;
+ return 1;
+}
+
+static u32 desc_limit_scaled(struct desc_struct *desc)
+{
+ u32 limit = get_desc_limit(desc);
+
+ return desc->g ? (limit << 12) | 0xfff : limit;
+}
+
+static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_ptr *dt)
+{
+ if (selector & 1 << 2) {
+ struct desc_struct desc;
+ memset (dt, 0, sizeof *dt);
+ if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
+ return;
+
+ dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
+ dt->address = get_desc_base(&desc);
+ } else
+ ops->get_gdt(dt, ctxt->vcpu);
+}
+
+/* allowed just for 8 bytes segments */
+static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_struct *desc)
+{
+ struct desc_ptr dt;
+ u16 index = selector >> 3;
+ int ret;
+ u32 err;
+ ulong addr;
+
+ get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+
+ if (dt.size < index * 8 + 7) {
+ kvm_inject_gp(ctxt->vcpu, selector & 0xfffc);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ addr = dt.address + index * 8;
+ ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(ctxt->vcpu, addr, err);
+
+ return ret;
+}
+
+/* allowed just for 8 bytes segments */
+static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_struct *desc)
+{
+ struct desc_ptr dt;
+ u16 index = selector >> 3;
+ u32 err;
+ ulong addr;
+ int ret;
+
+ get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+
+ if (dt.size < index * 8 + 7) {
+ kvm_inject_gp(ctxt->vcpu, selector & 0xfffc);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+ addr = dt.address + index * 8;
+ ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(ctxt->vcpu, addr, err);
+
+ return ret;
+}
+
+static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, int seg)
+{
+ struct desc_struct seg_desc;
+ u8 dpl, rpl, cpl;
+ unsigned err_vec = GP_VECTOR;
+ u32 err_code = 0;
+ bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+ int ret;
+
+ memset(&seg_desc, 0, sizeof seg_desc);
+
+ if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
+ || ctxt->mode == X86EMUL_MODE_REAL) {
+ /* set real mode segment descriptor */
+ set_desc_base(&seg_desc, selector << 4);
+ set_desc_limit(&seg_desc, 0xffff);
+ seg_desc.type = 3;
+ seg_desc.p = 1;
+ seg_desc.s = 1;
+ goto load;
+ }
+
+ /* NULL selector is not valid for TR, CS and SS */
+ if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
+ && null_selector)
+ goto exception;
+
+ /* TR should be in GDT only */
+ if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
+ goto exception;
+
+ if (null_selector) /* for NULL selector skip all following checks */
+ goto load;
+
+ ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ err_code = selector & 0xfffc;
+ err_vec = GP_VECTOR;
+
+ /* can't load system descriptor into segment selecor */
+ if (seg <= VCPU_SREG_GS && !seg_desc.s)
+ goto exception;
+
+ if (!seg_desc.p) {
+ err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
+ goto exception;
+ }
+
+ rpl = selector & 3;
+ dpl = seg_desc.dpl;
+ cpl = ops->cpl(ctxt->vcpu);
+
+ switch (seg) {
+ case VCPU_SREG_SS:
+ /*
+ * segment is not a writable data segment or segment
+ * selector's RPL != CPL or segment selector's RPL != CPL
+ */
+ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
+ goto exception;
+ break;
+ case VCPU_SREG_CS:
+ if (!(seg_desc.type & 8))
+ goto exception;
+
+ if (seg_desc.type & 4) {
+ /* conforming */
+ if (dpl > cpl)
+ goto exception;
+ } else {
+ /* nonconforming */
+ if (rpl > cpl || dpl != cpl)
+ goto exception;
+ }
+ /* CS(RPL) <- CPL */
+ selector = (selector & 0xfffc) | cpl;
+ break;
+ case VCPU_SREG_TR:
+ if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
+ goto exception;
+ break;
+ case VCPU_SREG_LDTR:
+ if (seg_desc.s || seg_desc.type != 2)
+ goto exception;
+ break;
+ default: /* DS, ES, FS, or GS */
+ /*
+ * segment is not a data or readable code segment or
+ * ((segment is a data or nonconforming code segment)
+ * and (both RPL and CPL > DPL))
+ */
+ if ((seg_desc.type & 0xa) == 0x8 ||
+ (((seg_desc.type & 0xc) != 0xc) &&
+ (rpl > dpl && cpl > dpl)))
+ goto exception;
+ break;
+ }
+
+ if (seg_desc.s) {
+ /* mark segment as accessed */
+ seg_desc.type |= 1;
+ ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ }
+load:
+ ops->set_segment_selector(selector, seg, ctxt->vcpu);
+ ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
+ return X86EMUL_CONTINUE;
+exception:
+ kvm_queue_exception_e(ctxt->vcpu, err_vec, err_code);
+ return X86EMUL_PROPAGATE_FAULT;
+}
+
static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;
@@ -1251,7 +1521,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
+ int cpl = ops->cpl(ctxt->vcpu);
rc = emulate_pop(ctxt, ops, &val, len);
if (rc != X86EMUL_CONTINUE)
@@ -1306,10 +1576,10 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
int rc;
rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg);
+ rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
return rc;
}
@@ -1332,7 +1602,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
while (reg >= VCPU_REGS_RAX) {
@@ -1343,7 +1613,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt,
}
rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
break;
--reg;
}
@@ -1354,12 +1624,8 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc;
- rc = emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
- if (rc != 0)
- return rc;
- return 0;
+ return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
}
static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
@@ -1395,7 +1661,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
switch (c->modrm_reg) {
case 0 ... 1: /* test */
@@ -1408,11 +1673,9 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
emulate_1op("neg", c->dst, ctxt->eflags);
break;
default:
- DPRINTF("Cannot emulate %02x\n", c->b);
- rc = X86EMUL_UNHANDLEABLE;
- break;
+ return 0;
}
- return rc;
+ return 1;
}
static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
@@ -1442,20 +1705,14 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
emulate_push(ctxt);
break;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
- unsigned long memop)
+ struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- u64 old, new;
- int rc;
-
- rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- return rc;
+ u64 old = c->dst.orig_val;
if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
@@ -1463,17 +1720,13 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
-
} else {
- new = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+ c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
(u32) c->regs[VCPU_REGS_RBX];
- rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- return rc;
ctxt->eflags |= EFLG_ZF;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
@@ -1484,14 +1737,14 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
unsigned long cs;
rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
if (c->op_bytes == 4)
c->eip = (u32)c->eip;
rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
return rc;
}
@@ -1544,7 +1797,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
default:
break;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
@@ -1598,8 +1851,11 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
u64 msr_data;
/* syscall is not available in real mode */
- if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86)
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->mode == X86EMUL_MODE_REAL ||
+ ctxt->mode == X86EMUL_MODE_VM86) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1649,14 +1905,16 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL) {
kvm_inject_gp(ctxt->vcpu, 0);
- return X86EMUL_UNHANDLEABLE;
+ return X86EMUL_PROPAGATE_FAULT;
}
/* XXX sysenter/sysexit have not been tested in 64bit mode.
* Therefore, we inject an #UD.
*/
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->mode == X86EMUL_MODE_PROT64) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1711,7 +1969,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86) {
kvm_inject_gp(ctxt->vcpu, 0);
- return X86EMUL_UNHANDLEABLE;
+ return X86EMUL_PROPAGATE_FAULT;
}
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1756,7 +2014,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
+static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
@@ -1764,7 +2023,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl;
+ return ops->cpl(ctxt->vcpu) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
@@ -1801,22 +2060,419 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 port, u16 len)
{
- if (emulator_bad_iopl(ctxt))
+ if (emulator_bad_iopl(ctxt, ops))
if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
return false;
return true;
}
+static u32 get_cached_descriptor_base(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ int seg)
+{
+ struct desc_struct desc;
+ if (ops->get_cached_descriptor(&desc, seg, ctxt->vcpu))
+ return get_desc_base(&desc);
+ else
+ return ~0;
+}
+
+static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_16 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ tss->ip = c->eip;
+ tss->flag = ctxt->eflags;
+ tss->ax = c->regs[VCPU_REGS_RAX];
+ tss->cx = c->regs[VCPU_REGS_RCX];
+ tss->dx = c->regs[VCPU_REGS_RDX];
+ tss->bx = c->regs[VCPU_REGS_RBX];
+ tss->sp = c->regs[VCPU_REGS_RSP];
+ tss->bp = c->regs[VCPU_REGS_RBP];
+ tss->si = c->regs[VCPU_REGS_RSI];
+ tss->di = c->regs[VCPU_REGS_RDI];
+
+ tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
+ tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
+ tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
+ tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
+ tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
+}
+
+static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_16 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int ret;
+
+ c->eip = tss->ip;
+ ctxt->eflags = tss->flag | 2;
+ c->regs[VCPU_REGS_RAX] = tss->ax;
+ c->regs[VCPU_REGS_RCX] = tss->cx;
+ c->regs[VCPU_REGS_RDX] = tss->dx;
+ c->regs[VCPU_REGS_RBX] = tss->bx;
+ c->regs[VCPU_REGS_RSP] = tss->sp;
+ c->regs[VCPU_REGS_RBP] = tss->bp;
+ c->regs[VCPU_REGS_RSI] = tss->si;
+ c->regs[VCPU_REGS_RDI] = tss->di;
+
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
+ ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
+ ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int task_switch_16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+{
+ struct tss_segment_16 tss_seg;
+ int ret;
+ u32 err, new_tss_base = get_desc_base(new_desc);
+
+ ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ save_state_to_tss16(ctxt, ops, &tss_seg);
+
+ ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+ ret = ops->write_std(new_tss_base,
+ &tss_seg.prev_task_link,
+ sizeof tss_seg.prev_task_link,
+ ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+ }
+
+ return load_state_from_tss16(ctxt, ops, &tss_seg);
+}
+
+static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_32 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ tss->cr3 = ops->get_cr(3, ctxt->vcpu);
+ tss->eip = c->eip;
+ tss->eflags = ctxt->eflags;
+ tss->eax = c->regs[VCPU_REGS_RAX];
+ tss->ecx = c->regs[VCPU_REGS_RCX];
+ tss->edx = c->regs[VCPU_REGS_RDX];
+ tss->ebx = c->regs[VCPU_REGS_RBX];
+ tss->esp = c->regs[VCPU_REGS_RSP];
+ tss->ebp = c->regs[VCPU_REGS_RBP];
+ tss->esi = c->regs[VCPU_REGS_RSI];
+ tss->edi = c->regs[VCPU_REGS_RDI];
+
+ tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
+ tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
+ tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
+ tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
+ tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
+ tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
+ tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
+}
+
+static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_32 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int ret;
+
+ ops->set_cr(3, tss->cr3, ctxt->vcpu);
+ c->eip = tss->eip;
+ ctxt->eflags = tss->eflags | 2;
+ c->regs[VCPU_REGS_RAX] = tss->eax;
+ c->regs[VCPU_REGS_RCX] = tss->ecx;
+ c->regs[VCPU_REGS_RDX] = tss->edx;
+ c->regs[VCPU_REGS_RBX] = tss->ebx;
+ c->regs[VCPU_REGS_RSP] = tss->esp;
+ c->regs[VCPU_REGS_RBP] = tss->ebp;
+ c->regs[VCPU_REGS_RSI] = tss->esi;
+ c->regs[VCPU_REGS_RDI] = tss->edi;
+
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
+ ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
+ ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
+ ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
+ ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int task_switch_32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+{
+ struct tss_segment_32 tss_seg;
+ int ret;
+ u32 err, new_tss_base = get_desc_base(new_desc);
+
+ ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ save_state_to_tss32(ctxt, ops, &tss_seg);
+
+ ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+ ret = ops->write_std(new_tss_base,
+ &tss_seg.prev_task_link,
+ sizeof tss_seg.prev_task_link,
+ ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+ }
+
+ return load_state_from_tss32(ctxt, ops, &tss_seg);
+}
+
+static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
+{
+ struct desc_struct curr_tss_desc, next_tss_desc;
+ int ret;
+ u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
+ ulong old_tss_base =
+ get_cached_descriptor_base(ctxt, ops, VCPU_SREG_TR);
+ u32 desc_limit;
+
+ /* FIXME: old_tss_base == ~0 ? */
+
+ ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ /* FIXME: check that next_tss_desc is tss */
+
+ if (reason != TASK_SWITCH_IRET) {
+ if ((tss_selector & 3) > next_tss_desc.dpl ||
+ ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ }
+
+ desc_limit = desc_limit_scaled(&next_tss_desc);
+ if (!next_tss_desc.p ||
+ ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
+ desc_limit < 0x2b)) {
+ kvm_queue_exception_e(ctxt->vcpu, TS_VECTOR,
+ tss_selector & 0xfffc);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+ if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
+ curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
+ write_segment_descriptor(ctxt, ops, old_tss_sel,
+ &curr_tss_desc);
+ }
+
+ if (reason == TASK_SWITCH_IRET)
+ ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
+
+ /* set back link to prev task only if NT bit is set in eflags
+ note that old_tss_sel is not used afetr this point */
+ if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
+ old_tss_sel = 0xffff;
+
+ if (next_tss_desc.type & 8)
+ ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
+ old_tss_base, &next_tss_desc);
+ else
+ ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
+ old_tss_base, &next_tss_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
+ ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
+
+ if (reason != TASK_SWITCH_IRET) {
+ next_tss_desc.type |= (1 << 1); /* set busy flag */
+ write_segment_descriptor(ctxt, ops, tss_selector,
+ &next_tss_desc);
+ }
+
+ ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
+ ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
+ ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
+
+ if (has_error_code) {
+ struct decode_cache *c = &ctxt->decode;
+
+ c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
+ c->lock_prefix = 0;
+ c->src.val = (unsigned long) error_code;
+ emulate_push(ctxt);
+ }
+
+ return ret;
+}
+
+int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int rc;
+
+ memset(c, 0, sizeof(struct decode_cache));
+ c->eip = ctxt->eip;
+ memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
+ c->dst.type = OP_NONE;
+
+ rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
+ has_error_code, error_code);
+
+ if (rc == X86EMUL_CONTINUE) {
+ memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
+ kvm_rip_write(ctxt->vcpu, c->eip);
+ rc = writeback(ctxt, ops);
+ }
+
+ return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
+}
+
+static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
+ int reg, struct operand *op)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
+
+ register_address_increment(c, &c->regs[reg], df * op->bytes);
+ op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]);
+}
+
int
x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{
- unsigned long memop = 0;
u64 msr_data;
- unsigned long saved_eip = 0;
struct decode_cache *c = &ctxt->decode;
- unsigned int port;
- int io_dir_in;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
+ int saved_dst_type = c->dst.type;
ctxt->interruptibility = 0;
@@ -1826,26 +2482,30 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
*/
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
- saved_eip = c->eip;
+
+ if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
/* LOCK prefix is allowed only with some instructions */
- if (c->lock_prefix && !(c->d & Lock)) {
+ if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
- if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) {
+ if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
kvm_inject_gp(ctxt->vcpu, 0);
goto done;
}
- if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
- memop = c->modrm_ea;
-
if (c->rep_prefix && (c->d & String)) {
+ ctxt->restart = true;
/* All REP prefixes have the same first termination condition */
- if (c->regs[VCPU_REGS_RCX] == 0) {
+ if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
+ string_done:
+ ctxt->restart = false;
kvm_rip_write(ctxt->vcpu, c->eip);
goto done;
}
@@ -1857,25 +2517,18 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
* - if REPNE/REPNZ and ZF = 1 then done
*/
if ((c->b == 0xa6) || (c->b == 0xa7) ||
- (c->b == 0xae) || (c->b == 0xaf)) {
+ (c->b == 0xae) || (c->b == 0xaf)) {
if ((c->rep_prefix == REPE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == 0)) {
- kvm_rip_write(ctxt->vcpu, c->eip);
- goto done;
- }
+ ((ctxt->eflags & EFLG_ZF) == 0))
+ goto string_done;
if ((c->rep_prefix == REPNE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
- kvm_rip_write(ctxt->vcpu, c->eip);
- goto done;
- }
+ ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))
+ goto string_done;
}
- c->regs[VCPU_REGS_RCX]--;
- c->eip = kvm_rip_read(ctxt->vcpu);
+ c->eip = ctxt->eip;
}
if (c->src.type == OP_MEM) {
- c->src.ptr = (unsigned long *)memop;
- c->src.val = 0;
rc = ops->read_emulated((unsigned long)c->src.ptr,
&c->src.val,
c->src.bytes,
@@ -1885,29 +2538,25 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->src.orig_val = c->src.val;
}
+ if (c->src2.type == OP_MEM) {
+ rc = ops->read_emulated((unsigned long)c->src2.ptr,
+ &c->src2.val,
+ c->src2.bytes,
+ ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ }
+
if ((c->d & DstMask) == ImplicitOps)
goto special_insn;
- if (c->dst.type == OP_MEM) {
- c->dst.ptr = (unsigned long *)memop;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.val = 0;
- if (c->d & BitOp) {
- unsigned long mask = ~(c->dst.bytes * 8 - 1);
-
- c->dst.ptr = (void *)c->dst.ptr +
- (c->src.val & mask) / 8;
- }
- if (!(c->d & Mov)) {
- /* optimisation - avoid slow emulated read */
- rc = ops->read_emulated((unsigned long)c->dst.ptr,
- &c->dst.val,
- c->dst.bytes,
- ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
- }
+ if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
+ /* optimisation - avoid slow emulated read if Mov */
+ rc = ops->read_emulated((unsigned long)c->dst.ptr, &c->dst.val,
+ c->dst.bytes, ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
}
c->dst.orig_val = c->dst.val;
@@ -1926,7 +2575,7 @@ special_insn:
break;
case 0x07: /* pop es */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x08 ... 0x0d:
@@ -1945,7 +2594,7 @@ special_insn:
break;
case 0x17: /* pop ss */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x18 ... 0x1d:
@@ -1957,7 +2606,7 @@ special_insn:
break;
case 0x1f: /* pop ds */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x20 ... 0x25:
@@ -1988,7 +2637,7 @@ special_insn:
case 0x58 ... 0x5f: /* pop reg */
pop_instruction:
rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x60: /* pusha */
@@ -1996,7 +2645,7 @@ special_insn:
break;
case 0x61: /* popa */
rc = emulate_popa(ctxt, ops);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x63: /* movsxd */
@@ -2010,47 +2659,29 @@ special_insn:
break;
case 0x6c: /* insb */
case 0x6d: /* insw/insd */
+ c->dst.bytes = min(c->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
- (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ c->dst.bytes)) {
kvm_inject_gp(ctxt->vcpu, 0);
goto done;
}
- if (kvm_emulate_pio_string(ctxt->vcpu,
- 1,
- (c->d & ByteOp) ? 1 : c->op_bytes,
- c->rep_prefix ?
- address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
- (ctxt->eflags & EFLG_DF),
- register_address(c, es_base(ctxt),
- c->regs[VCPU_REGS_RDI]),
- c->rep_prefix,
- c->regs[VCPU_REGS_RDX]) == 0) {
- c->eip = saved_eip;
- return -1;
- }
- return 0;
+ if (!pio_in_emulated(ctxt, ops, c->dst.bytes,
+ c->regs[VCPU_REGS_RDX], &c->dst.val))
+ goto done; /* IO is needed, skip writeback */
+ break;
case 0x6e: /* outsb */
case 0x6f: /* outsw/outsd */
+ c->src.bytes = min(c->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
- (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ c->src.bytes)) {
kvm_inject_gp(ctxt->vcpu, 0);
goto done;
}
- if (kvm_emulate_pio_string(ctxt->vcpu,
- 0,
- (c->d & ByteOp) ? 1 : c->op_bytes,
- c->rep_prefix ?
- address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
- (ctxt->eflags & EFLG_DF),
- register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]),
- c->rep_prefix,
- c->regs[VCPU_REGS_RDX]) == 0) {
- c->eip = saved_eip;
- return -1;
- }
- return 0;
+ ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX],
+ &c->src.val, 1, ctxt->vcpu);
+
+ c->dst.type = OP_NONE; /* nothing to writeback */
+ break;
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(c->b, ctxt->eflags))
jmp_rel(c, c->src.val);
@@ -2107,12 +2738,11 @@ special_insn:
case 0x8c: { /* mov r/m, sreg */
struct kvm_segment segreg;
- if (c->modrm_reg <= 5)
+ if (c->modrm_reg <= VCPU_SREG_GS)
kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg);
else {
- printk(KERN_INFO "0x8c: Invalid segreg in modrm byte 0x%02x\n",
- c->modrm);
- goto cannot_emulate;
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
}
c->dst.val = segreg.selector;
break;
@@ -2132,16 +2762,16 @@ special_insn:
}
if (c->modrm_reg == VCPU_SREG_SS)
- toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
+ toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_MOV_SS);
- rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
+ rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
}
case 0x8f: /* pop (sole member of Grp1a) */
rc = emulate_grp1a(ctxt, ops);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x90: /* nop / xchg r8,rax */
@@ -2175,89 +2805,16 @@ special_insn:
c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX];
break;
case 0xa4 ... 0xa5: /* movs */
- c->dst.type = OP_MEM;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(c,
- es_base(ctxt),
- c->regs[VCPU_REGS_RDI]);
- rc = ops->read_emulated(register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]),
- &c->dst.val,
- c->dst.bytes, ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
- register_address_increment(c, &c->regs[VCPU_REGS_RSI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
- register_address_increment(c, &c->regs[VCPU_REGS_RDI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
- break;
+ goto mov;
case 0xa6 ... 0xa7: /* cmps */
- c->src.type = OP_NONE; /* Disable writeback. */
- c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.ptr = (unsigned long *)register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]);
- rc = ops->read_emulated((unsigned long)c->src.ptr,
- &c->src.val,
- c->src.bytes,
- ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
-
c->dst.type = OP_NONE; /* Disable writeback. */
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(c,
- es_base(ctxt),
- c->regs[VCPU_REGS_RDI]);
- rc = ops->read_emulated((unsigned long)c->dst.ptr,
- &c->dst.val,
- c->dst.bytes,
- ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
-
DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
-
- emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
-
- register_address_increment(c, &c->regs[VCPU_REGS_RSI],
- (ctxt->eflags & EFLG_DF) ? -c->src.bytes
- : c->src.bytes);
- register_address_increment(c, &c->regs[VCPU_REGS_RDI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
-
- break;
+ goto cmp;
case 0xaa ... 0xab: /* stos */
- c->dst.type = OP_MEM;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(c,
- es_base(ctxt),
- c->regs[VCPU_REGS_RDI]);
c->dst.val = c->regs[VCPU_REGS_RAX];
- register_address_increment(c, &c->regs[VCPU_REGS_RDI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
break;
case 0xac ... 0xad: /* lods */
- c->dst.type = OP_REG;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
- rc = ops->read_emulated(register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]),
- &c->dst.val,
- c->dst.bytes,
- ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
- register_address_increment(c, &c->regs[VCPU_REGS_RSI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
- break;
+ goto mov;
case 0xae ... 0xaf: /* scas */
DPRINTF("Urk! I don't handle SCAS.\n");
goto cannot_emulate;
@@ -2277,7 +2834,7 @@ special_insn:
break;
case 0xcb: /* ret far */
rc = emulate_ret_far(ctxt, ops);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0xd0 ... 0xd1: /* Grp2 */
@@ -2290,14 +2847,10 @@ special_insn:
break;
case 0xe4: /* inb */
case 0xe5: /* in */
- port = c->src.val;
- io_dir_in = 1;
- goto do_io;
+ goto do_io_in;
case 0xe6: /* outb */
case 0xe7: /* out */
- port = c->src.val;
- io_dir_in = 0;
- goto do_io;
+ goto do_io_out;
case 0xe8: /* call (near) */ {
long int rel = c->src.val;
c->src.val = (unsigned long) c->eip;
@@ -2308,8 +2861,9 @@ special_insn:
case 0xe9: /* jmp rel */
goto jmp;
case 0xea: /* jmp far */
- if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
- VCPU_SREG_CS))
+ jump_far:
+ if (load_segment_descriptor(ctxt, ops, c->src2.val,
+ VCPU_SREG_CS))
goto done;
c->eip = c->src.val;
@@ -2321,25 +2875,29 @@ special_insn:
break;
case 0xec: /* in al,dx */
case 0xed: /* in (e/r)ax,dx */
- port = c->regs[VCPU_REGS_RDX];
- io_dir_in = 1;
- goto do_io;
+ c->src.val = c->regs[VCPU_REGS_RDX];
+ do_io_in:
+ c->dst.bytes = min(c->dst.bytes, 4u);
+ if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
+ if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
+ &c->dst.val))
+ goto done; /* IO is needed */
+ break;
case 0xee: /* out al,dx */
case 0xef: /* out (e/r)ax,dx */
- port = c->regs[VCPU_REGS_RDX];
- io_dir_in = 0;
- do_io:
- if (!emulator_io_permited(ctxt, ops, port,
- (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ c->src.val = c->regs[VCPU_REGS_RDX];
+ do_io_out:
+ c->dst.bytes = min(c->dst.bytes, 4u);
+ if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
kvm_inject_gp(ctxt->vcpu, 0);
goto done;
}
- if (kvm_emulate_pio(ctxt->vcpu, io_dir_in,
- (c->d & ByteOp) ? 1 : c->op_bytes,
- port) != 0) {
- c->eip = saved_eip;
- goto cannot_emulate;
- }
+ ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1,
+ ctxt->vcpu);
+ c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->vcpu->arch.halt_request = 1;
@@ -2350,16 +2908,15 @@ special_insn:
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf6 ... 0xf7: /* Grp3 */
- rc = emulate_grp3(ctxt, ops);
- if (rc != 0)
- goto done;
+ if (!emulate_grp3(ctxt, ops))
+ goto cannot_emulate;
break;
case 0xf8: /* clc */
ctxt->eflags &= ~EFLG_CF;
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xfa: /* cli */
- if (emulator_bad_iopl(ctxt))
+ if (emulator_bad_iopl(ctxt, ops))
kvm_inject_gp(ctxt->vcpu, 0);
else {
ctxt->eflags &= ~X86_EFLAGS_IF;
@@ -2367,10 +2924,10 @@ special_insn:
}
break;
case 0xfb: /* sti */
- if (emulator_bad_iopl(ctxt))
+ if (emulator_bad_iopl(ctxt, ops))
kvm_inject_gp(ctxt->vcpu, 0);
else {
- toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
+ toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_STI);
ctxt->eflags |= X86_EFLAGS_IF;
c->dst.type = OP_NONE; /* Disable writeback. */
}
@@ -2383,28 +2940,55 @@ special_insn:
ctxt->eflags |= EFLG_DF;
c->dst.type = OP_NONE; /* Disable writeback. */
break;
- case 0xfe ... 0xff: /* Grp4/Grp5 */
+ case 0xfe: /* Grp4 */
+ grp45:
rc = emulate_grp45(ctxt, ops);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
+ case 0xff: /* Grp5 */
+ if (c->modrm_reg == 5)
+ goto jump_far;
+ goto grp45;
}
writeback:
rc = writeback(ctxt, ops);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
+ /*
+ * restore dst type in case the decoding will be reused
+ * (happens for string instruction )
+ */
+ c->dst.type = saved_dst_type;
+
+ if ((c->d & SrcMask) == SrcSI)
+ string_addr_inc(ctxt, seg_override_base(ctxt, c), VCPU_REGS_RSI,
+ &c->src);
+
+ if ((c->d & DstMask) == DstDI)
+ string_addr_inc(ctxt, es_base(ctxt), VCPU_REGS_RDI, &c->dst);
+
+ if (c->rep_prefix && (c->d & String)) {
+ struct read_cache *rc = &ctxt->decode.io_read;
+ register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
+ /*
+ * Re-enter guest when pio read ahead buffer is empty or,
+ * if it is not used, after each 1024 iteration.
+ */
+ if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) ||
+ (rc->end != 0 && rc->end == rc->pos))
+ ctxt->restart = false;
+ }
+
/* Commit shadow register state. */
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
kvm_rip_write(ctxt->vcpu, c->eip);
+ ops->set_rflags(ctxt->vcpu, ctxt->eflags);
done:
- if (rc == X86EMUL_UNHANDLEABLE) {
- c->eip = saved_eip;
- return -1;
- }
- return 0;
+ return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
twobyte_insn:
switch (c->b) {
@@ -2418,18 +3002,18 @@ twobyte_insn:
goto cannot_emulate;
rc = kvm_fix_hypercall(ctxt->vcpu);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
/* Let the processor re-execute the fixed hypercall */
- c->eip = kvm_rip_read(ctxt->vcpu);
+ c->eip = ctxt->eip;
/* Disable writeback. */
c->dst.type = OP_NONE;
break;
case 2: /* lgdt */
rc = read_descriptor(ctxt, ops, c->src.ptr,
&size, &address, c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
realmode_lgdt(ctxt->vcpu, size, address);
/* Disable writeback. */
@@ -2440,7 +3024,7 @@ twobyte_insn:
switch (c->modrm_rm) {
case 1:
rc = kvm_fix_hypercall(ctxt->vcpu);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
default:
@@ -2450,7 +3034,7 @@ twobyte_insn:
rc = read_descriptor(ctxt, ops, c->src.ptr,
&size, &address,
c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
realmode_lidt(ctxt->vcpu, size, address);
}
@@ -2459,15 +3043,18 @@ twobyte_insn:
break;
case 4: /* smsw */
c->dst.bytes = 2;
- c->dst.val = realmode_get_cr(ctxt->vcpu, 0);
+ c->dst.val = ops->get_cr(0, ctxt->vcpu);
break;
case 6: /* lmsw */
- realmode_lmsw(ctxt->vcpu, (u16)c->src.val,
- &ctxt->eflags);
+ ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0ful) |
+ (c->src.val & 0x0f), ctxt->vcpu);
c->dst.type = OP_NONE;
break;
+ case 5: /* not defined */
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
case 7: /* invlpg*/
- emulate_invlpg(ctxt->vcpu, memop);
+ emulate_invlpg(ctxt->vcpu, c->modrm_ea);
/* Disable writeback. */
c->dst.type = OP_NONE;
break;
@@ -2493,54 +3080,54 @@ twobyte_insn:
c->dst.type = OP_NONE;
break;
case 0x20: /* mov cr, reg */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- c->regs[c->modrm_rm] =
- realmode_get_cr(ctxt->vcpu, c->modrm_reg);
+ switch (c->modrm_reg) {
+ case 1:
+ case 5 ... 7:
+ case 9 ... 15:
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
+ c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu);
c->dst.type = OP_NONE; /* no writeback */
break;
case 0x21: /* mov from dr to reg */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- rc = emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]);
- if (rc)
- goto cannot_emulate;
+ if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
+ (c->modrm_reg == 4 || c->modrm_reg == 5)) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
+ emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]);
c->dst.type = OP_NONE; /* no writeback */
break;
case 0x22: /* mov reg, cr */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- realmode_set_cr(ctxt->vcpu,
- c->modrm_reg, c->modrm_val, &ctxt->eflags);
+ ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu);
c->dst.type = OP_NONE;
break;
case 0x23: /* mov from reg to dr */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- rc = emulator_set_dr(ctxt, c->modrm_reg,
- c->regs[c->modrm_rm]);
- if (rc)
- goto cannot_emulate;
+ if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
+ (c->modrm_reg == 4 || c->modrm_reg == 5)) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
+ emulator_set_dr(ctxt, c->modrm_reg, c->regs[c->modrm_rm]);
c->dst.type = OP_NONE; /* no writeback */
break;
case 0x30:
/* wrmsr */
msr_data = (u32)c->regs[VCPU_REGS_RAX]
| ((u64)c->regs[VCPU_REGS_RDX] << 32);
- rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
- if (rc) {
+ if (kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = kvm_rip_read(ctxt->vcpu);
+ goto done;
}
rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE;
break;
case 0x32:
/* rdmsr */
- rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
- if (rc) {
+ if (kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = kvm_rip_read(ctxt->vcpu);
+ goto done;
} else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data;
c->regs[VCPU_REGS_RDX] = msr_data >> 32;
@@ -2577,7 +3164,7 @@ twobyte_insn:
break;
case 0xa1: /* pop fs */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0xa3:
@@ -2596,7 +3183,7 @@ twobyte_insn:
break;
case 0xa9: /* pop gs */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0xab:
@@ -2668,16 +3255,14 @@ twobyte_insn:
(u64) c->src.val;
break;
case 0xc7: /* Grp9 (cmpxchg8b) */
- rc = emulate_grp9(ctxt, ops, memop);
- if (rc != 0)
+ rc = emulate_grp9(ctxt, ops);
+ if (rc != X86EMUL_CONTINUE)
goto done;
- c->dst.type = OP_NONE;
break;
}
goto writeback;
cannot_emulate:
DPRINTF("Cannot emulate %02x\n", c->b);
- c->eip = saved_eip;
return -1;
}
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index a790fa128a9f..93825ff3338f 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -33,6 +33,29 @@
#include <linux/kvm_host.h>
#include "trace.h"
+static void pic_lock(struct kvm_pic *s)
+ __acquires(&s->lock)
+{
+ raw_spin_lock(&s->lock);
+}
+
+static void pic_unlock(struct kvm_pic *s)
+ __releases(&s->lock)
+{
+ bool wakeup = s->wakeup_needed;
+ struct kvm_vcpu *vcpu;
+
+ s->wakeup_needed = false;
+
+ raw_spin_unlock(&s->lock);
+
+ if (wakeup) {
+ vcpu = s->kvm->bsp_vcpu;
+ if (vcpu)
+ kvm_vcpu_kick(vcpu);
+ }
+}
+
static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
{
s->isr &= ~(1 << irq);
@@ -45,19 +68,19 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
* Other interrupt may be delivered to PIC while lock is dropped but
* it should be safe since PIC state is already updated at this stage.
*/
- raw_spin_unlock(&s->pics_state->lock);
+ pic_unlock(s->pics_state);
kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
- raw_spin_lock(&s->pics_state->lock);
+ pic_lock(s->pics_state);
}
void kvm_pic_clear_isr_ack(struct kvm *kvm)
{
struct kvm_pic *s = pic_irqchip(kvm);
- raw_spin_lock(&s->lock);
+ pic_lock(s);
s->pics[0].isr_ack = 0xff;
s->pics[1].isr_ack = 0xff;
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
}
/*
@@ -158,9 +181,9 @@ static void pic_update_irq(struct kvm_pic *s)
void kvm_pic_update_irq(struct kvm_pic *s)
{
- raw_spin_lock(&s->lock);
+ pic_lock(s);
pic_update_irq(s);
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
}
int kvm_pic_set_irq(void *opaque, int irq, int level)
@@ -168,14 +191,14 @@ int kvm_pic_set_irq(void *opaque, int irq, int level)
struct kvm_pic *s = opaque;
int ret = -1;
- raw_spin_lock(&s->lock);
+ pic_lock(s);
if (irq >= 0 && irq < PIC_NUM_PINS) {
ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
pic_update_irq(s);
trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
s->pics[irq >> 3].imr, ret == 0);
}
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
return ret;
}
@@ -205,7 +228,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
int irq, irq2, intno;
struct kvm_pic *s = pic_irqchip(kvm);
- raw_spin_lock(&s->lock);
+ pic_lock(s);
irq = pic_get_irq(&s->pics[0]);
if (irq >= 0) {
pic_intack(&s->pics[0], irq);
@@ -230,7 +253,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
intno = s->pics[0].irq_base + irq;
}
pic_update_irq(s);
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
return intno;
}
@@ -444,7 +467,7 @@ static int picdev_write(struct kvm_io_device *this,
printk(KERN_ERR "PIC: non byte write\n");
return 0;
}
- raw_spin_lock(&s->lock);
+ pic_lock(s);
switch (addr) {
case 0x20:
case 0x21:
@@ -457,7 +480,7 @@ static int picdev_write(struct kvm_io_device *this,
elcr_ioport_write(&s->pics[addr & 1], addr, data);
break;
}
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
return 0;
}
@@ -474,7 +497,7 @@ static int picdev_read(struct kvm_io_device *this,
printk(KERN_ERR "PIC: non byte read\n");
return 0;
}
- raw_spin_lock(&s->lock);
+ pic_lock(s);
switch (addr) {
case 0x20:
case 0x21:
@@ -488,7 +511,7 @@ static int picdev_read(struct kvm_io_device *this,
break;
}
*(unsigned char *)val = data;
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
return 0;
}
@@ -505,7 +528,7 @@ static void pic_irq_request(void *opaque, int level)
s->output = level;
if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) {
s->pics[0].isr_ack &= ~(1 << irq);
- kvm_vcpu_kick(vcpu);
+ s->wakeup_needed = true;
}
}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 34b15915754d..cd1f362f413d 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -63,6 +63,7 @@ struct kvm_kpic_state {
struct kvm_pic {
raw_spinlock_t lock;
+ bool wakeup_needed;
unsigned pending_acks;
struct kvm *kvm;
struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
diff --git a/arch/x86/kvm/kvm_timer.h b/arch/x86/kvm/kvm_timer.h
index 55c7524dda54..64bc6ea78d90 100644
--- a/arch/x86/kvm/kvm_timer.h
+++ b/arch/x86/kvm/kvm_timer.h
@@ -10,9 +10,7 @@ struct kvm_timer {
};
struct kvm_timer_ops {
- bool (*is_periodic)(struct kvm_timer *);
+ bool (*is_periodic)(struct kvm_timer *);
};
-
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data);
-
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 19a8906bcaa2..51eb6d6abd86 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -148,7 +148,6 @@ module_param(oos_shadow, bool, 0644);
#include <trace/events/kvm.h>
-#undef TRACE_INCLUDE_FILE
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
@@ -174,12 +173,7 @@ struct kvm_shadow_walk_iterator {
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
-
-struct kvm_unsync_walk {
- int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
-};
-
-typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+typedef int (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp);
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
@@ -327,7 +321,6 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
page = alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
- set_page_private(page, 0);
cache->objects[cache->nobjs++] = page_address(page);
}
return 0;
@@ -438,9 +431,9 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
int i;
gfn = unalias_gfn(kvm, gfn);
+ slot = gfn_to_memslot_unaliased(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
- slot = gfn_to_memslot_unaliased(kvm, gfn);
write_count = slot_largepage_idx(gfn, slot, i);
*write_count -= 1;
WARN_ON(*write_count < 0);
@@ -654,7 +647,6 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
{
struct kvm_rmap_desc *desc;
- struct kvm_rmap_desc *prev_desc;
u64 *prev_spte;
int i;
@@ -666,7 +658,6 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
return NULL;
}
desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
- prev_desc = NULL;
prev_spte = NULL;
while (desc) {
for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
@@ -794,7 +785,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
int retval = 0;
struct kvm_memslots *slots;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -925,7 +916,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
- INIT_LIST_HEAD(&sp->oos_link);
bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
sp->multimapped = 0;
sp->parent_pte = parent_pte;
@@ -1009,8 +999,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
}
-static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- mmu_parent_walk_fn fn)
+static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
{
struct kvm_pte_chain *pte_chain;
struct hlist_node *node;
@@ -1019,8 +1008,8 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (!sp->multimapped && sp->parent_pte) {
parent_sp = page_header(__pa(sp->parent_pte));
- fn(vcpu, parent_sp);
- mmu_parent_walk(vcpu, parent_sp, fn);
+ fn(parent_sp);
+ mmu_parent_walk(parent_sp, fn);
return;
}
hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
@@ -1028,8 +1017,8 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (!pte_chain->parent_ptes[i])
break;
parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
- fn(vcpu, parent_sp);
- mmu_parent_walk(vcpu, parent_sp, fn);
+ fn(parent_sp);
+ mmu_parent_walk(parent_sp, fn);
}
}
@@ -1066,16 +1055,15 @@ static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
}
}
-static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int unsync_walk_fn(struct kvm_mmu_page *sp)
{
kvm_mmu_update_parents_unsync(sp);
return 1;
}
-static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp)
+static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
- mmu_parent_walk(vcpu, sp, unsync_walk_fn);
+ mmu_parent_walk(sp, unsync_walk_fn);
kvm_mmu_update_parents_unsync(sp);
}
@@ -1201,6 +1189,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
WARN_ON(!sp->unsync);
+ trace_kvm_mmu_sync_page(sp);
sp->unsync = 0;
--kvm->stat.mmu_unsync;
}
@@ -1209,12 +1198,11 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
- if (sp->role.glevels != vcpu->arch.mmu.root_level) {
+ if (sp->role.cr4_pae != !!is_pae(vcpu)) {
kvm_mmu_zap_page(vcpu->kvm, sp);
return 1;
}
- trace_kvm_mmu_sync_page(sp);
if (rmap_write_protect(vcpu->kvm, sp->gfn))
kvm_flush_remote_tlbs(vcpu->kvm);
kvm_unlink_unsync_page(vcpu->kvm, sp);
@@ -1331,6 +1319,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role = vcpu->arch.mmu.base_role;
role.level = level;
role.direct = direct;
+ if (role.direct)
+ role.cr4_pae = 0;
role.access = access;
if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
@@ -1351,7 +1341,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
if (sp->unsync_children) {
set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
- kvm_mmu_mark_parents_unsync(vcpu, sp);
+ kvm_mmu_mark_parents_unsync(sp);
}
trace_kvm_mmu_get_page(sp, false);
return sp;
@@ -1573,13 +1563,14 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
r = 0;
index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
+restart:
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
if (sp->gfn == gfn && !sp->role.direct) {
pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
if (kvm_mmu_zap_page(kvm, sp))
- n = bucket->first;
+ goto restart;
}
return r;
}
@@ -1593,13 +1584,14 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
+restart:
hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
if (sp->gfn == gfn && !sp->role.direct
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
if (kvm_mmu_zap_page(kvm, sp))
- nn = bucket->first;
+ goto restart;
}
}
}
@@ -1626,20 +1618,6 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
}
}
-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
-{
- struct page *page;
-
- gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
-
- if (gpa == UNMAPPED_GVA)
- return NULL;
-
- page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-
- return page;
-}
-
/*
* The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c
@@ -1752,7 +1730,6 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
- trace_kvm_mmu_unsync_page(sp);
index = kvm_page_table_hashfn(sp->gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
/* don't unsync if pagetable is shadowed with multiple roles */
@@ -1762,10 +1739,11 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (s->role.word != sp->role.word)
return 1;
}
+ trace_kvm_mmu_unsync_page(sp);
++vcpu->kvm->stat.mmu_unsync;
sp->unsync = 1;
- kvm_mmu_mark_parents_unsync(vcpu, sp);
+ kvm_mmu_mark_parents_unsync(sp);
mmu_convert_notrap(sp);
return 0;
@@ -2081,10 +2059,12 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->arch.mmu.root_hpa;
ASSERT(!VALID_PAGE(root));
- if (tdp_enabled)
- direct = 1;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ if (tdp_enabled) {
+ direct = 1;
+ root_gfn = 0;
+ }
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, direct,
ACC_ALL, NULL);
@@ -2094,8 +2074,6 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
return 0;
}
direct = !is_paging(vcpu);
- if (tdp_enabled)
- direct = 1;
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
@@ -2111,6 +2089,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = 0;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ if (tdp_enabled) {
+ direct = 1;
+ root_gfn = i << 30;
+ }
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, direct,
ACC_ALL, NULL);
@@ -2299,13 +2281,19 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
/* no rsvd bits for 2 level 4K page table entries */
context->rsvd_bits_mask[0][1] = 0;
context->rsvd_bits_mask[0][0] = 0;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+
+ if (!is_pse(vcpu)) {
+ context->rsvd_bits_mask[1][1] = 0;
+ break;
+ }
+
if (is_cpuid_PSE36())
/* 36bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
else
/* 32 bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
case PT32E_ROOT_LEVEL:
context->rsvd_bits_mask[0][2] =
@@ -2318,7 +2306,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
break;
case PT64_ROOT_LEVEL:
context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
@@ -2336,7 +2324,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
break;
}
}
@@ -2438,7 +2426,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
else
r = paging32_init_context(vcpu);
- vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
+ vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
return r;
}
@@ -2527,7 +2515,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
}
++vcpu->kvm->stat.mmu_pte_updated;
- if (sp->role.glevels == PT32_ROOT_LEVEL)
+ if (!sp->role.cr4_pae)
paging32_update_pte(vcpu, sp, spte, new);
else
paging64_update_pte(vcpu, sp, spte, new);
@@ -2562,36 +2550,11 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
}
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes)
+ u64 gpte)
{
gfn_t gfn;
- int r;
- u64 gpte = 0;
pfn_t pfn;
- if (bytes != 4 && bytes != 8)
- return;
-
- /*
- * Assume that the pte write on a page table of the same type
- * as the current vcpu paging mode. This is nearly always true
- * (might be false while changing modes). Note it is verified later
- * by update_pte().
- */
- if (is_pae(vcpu)) {
- /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
- if ((bytes == 4) && (gpa % 4 == 0)) {
- r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
- if (r)
- return;
- memcpy((void *)&gpte + (gpa % 8), new, 4);
- } else if ((bytes == 8) && (gpa % 8 == 0)) {
- memcpy((void *)&gpte, new, 8);
- }
- } else {
- if ((bytes == 4) && (gpa % 4 == 0))
- memcpy((void *)&gpte, new, 4);
- }
if (!is_present_gpte(gpte))
return;
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -2640,10 +2603,46 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int flooded = 0;
int npte;
int r;
+ int invlpg_counter;
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
- mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
+
+ invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
+
+ /*
+ * Assume that the pte write on a page table of the same type
+ * as the current vcpu paging mode. This is nearly always true
+ * (might be false while changing modes). Note it is verified later
+ * by update_pte().
+ */
+ if ((is_pae(vcpu) && bytes == 4) || !new) {
+ /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
+ if (is_pae(vcpu)) {
+ gpa &= ~(gpa_t)7;
+ bytes = 8;
+ }
+ r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
+ if (r)
+ gentry = 0;
+ new = (const u8 *)&gentry;
+ }
+
+ switch (bytes) {
+ case 4:
+ gentry = *(const u32 *)new;
+ break;
+ case 8:
+ gentry = *(const u64 *)new;
+ break;
+ default:
+ gentry = 0;
+ break;
+ }
+
+ mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
spin_lock(&vcpu->kvm->mmu_lock);
+ if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
+ gentry = 0;
kvm_mmu_access_page(vcpu, gfn);
kvm_mmu_free_some_pages(vcpu);
++vcpu->kvm->stat.mmu_pte_write;
@@ -2662,10 +2661,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
}
index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+
+restart:
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
continue;
- pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
+ pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4;
if (misaligned || flooded) {
@@ -2682,14 +2683,14 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
if (kvm_mmu_zap_page(vcpu->kvm, sp))
- n = bucket->first;
+ goto restart;
++vcpu->kvm->stat.mmu_flooded;
continue;
}
page_offset = offset;
level = sp->role.level;
npte = 1;
- if (sp->role.glevels == PT32_ROOT_LEVEL) {
+ if (!sp->role.cr4_pae) {
page_offset <<= 1; /* 32->64 */
/*
* A 32-bit pde maps 4MB while the shadow pdes map
@@ -2707,20 +2708,11 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
continue;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
- if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
- gentry = 0;
- r = kvm_read_guest_atomic(vcpu->kvm,
- gpa & ~(u64)(pte_size - 1),
- &gentry, pte_size);
- new = (const void *)&gentry;
- if (r < 0)
- new = NULL;
- }
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
- if (new)
- mmu_pte_write_new_pte(vcpu, sp, spte, new);
+ if (gentry)
+ mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte;
}
@@ -2900,22 +2892,23 @@ void kvm_mmu_zap_all(struct kvm *kvm)
struct kvm_mmu_page *sp, *node;
spin_lock(&kvm->mmu_lock);
+restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
if (kvm_mmu_zap_page(kvm, sp))
- node = container_of(kvm->arch.active_mmu_pages.next,
- struct kvm_mmu_page, link);
+ goto restart;
+
spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
}
-static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
{
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(kvm, page);
+ return kvm_mmu_zap_page(kvm, page) + 1;
}
static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
@@ -2927,7 +2920,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- int npages, idx;
+ int npages, idx, freed_pages;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
@@ -2935,8 +2928,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
kvm->arch.n_free_mmu_pages;
cache_count += npages;
if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
- kvm_mmu_remove_one_alloc_mmu_page(kvm);
- cache_count--;
+ freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm);
+ cache_count -= freed_pages;
kvm_freed = kvm;
}
nr_to_scan--;
@@ -3011,7 +3004,8 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
unsigned int nr_pages = 0;
struct kvm_memslots *slots;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
+
for (i = 0; i < slots->nmemslots; i++)
nr_pages += slots->memslots[i].npages;
@@ -3174,8 +3168,7 @@ static gva_t canonicalize(gva_t gva)
}
-typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
- u64 *sptep);
+typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
inspect_spte_fn fn)
@@ -3191,7 +3184,7 @@ static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
child = page_header(ent & PT64_BASE_ADDR_MASK);
__mmu_spte_walk(kvm, child, fn);
} else
- fn(kvm, sp, &sp->spt[i]);
+ fn(kvm, &sp->spt[i]);
}
}
}
@@ -3282,11 +3275,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
static int count_rmaps(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memslots *slots;
int nmaps = 0;
int i, j, k, idx;
idx = srcu_read_lock(&kvm->srcu);
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *m = &slots->memslots[i];
struct kvm_rmap_desc *d;
@@ -3315,7 +3310,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
return nmaps;
}
-void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
+void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
{
unsigned long *rmapp;
struct kvm_mmu_page *rev_sp;
@@ -3331,14 +3326,14 @@ void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
printk(KERN_ERR "%s: no memslot for gfn %ld\n",
audit_msg, gfn);
printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
- audit_msg, sptep - rev_sp->spt,
+ audit_msg, (long int)(sptep - rev_sp->spt),
rev_sp->gfn);
dump_stack();
return;
}
rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
- is_large_pte(*sptep));
+ rev_sp->role.level);
if (!*rmapp) {
if (!printk_ratelimit())
return;
@@ -3373,7 +3368,7 @@ static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
continue;
if (!(ent & PT_WRITABLE_MASK))
continue;
- inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
+ inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
}
}
return;
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 3e4a5c6ca2a9..42f07b1bfbc9 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -6,14 +6,12 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvmmmu
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE mmutrace
#define KVM_MMU_PAGE_FIELDS \
__field(__u64, gfn) \
__field(__u32, role) \
__field(__u32, root_count) \
- __field(__u32, unsync)
+ __field(bool, unsync)
#define KVM_MMU_PAGE_ASSIGN(sp) \
__entry->gfn = sp->gfn; \
@@ -30,14 +28,14 @@
\
role.word = __entry->role; \
\
- trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \
+ trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \
" %snxe root %u %s%c", \
- __entry->gfn, role.level, role.glevels, \
+ __entry->gfn, role.level, \
+ role.cr4_pae ? " pae" : "", \
role.quadrant, \
role.direct ? " direct" : "", \
access_str[role.access], \
role.invalid ? " invalid" : "", \
- role.cr4_pge ? "" : "!", \
role.nxe ? "" : "!", \
__entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \
@@ -94,15 +92,15 @@ TRACE_EVENT(
TP_printk("pte %llx level %u", __entry->pte, __entry->level)
);
-/* We set a pte accessed bit */
-TRACE_EVENT(
- kvm_mmu_set_accessed_bit,
+DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
+
TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
+
TP_ARGS(table_gfn, index, size),
TP_STRUCT__entry(
__field(__u64, gpa)
- ),
+ ),
TP_fast_assign(
__entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
@@ -112,22 +110,20 @@ TRACE_EVENT(
TP_printk("gpa %llx", __entry->gpa)
);
-/* We set a pte dirty bit */
-TRACE_EVENT(
- kvm_mmu_set_dirty_bit,
+/* We set a pte accessed bit */
+DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
+
TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
- TP_ARGS(table_gfn, index, size),
- TP_STRUCT__entry(
- __field(__u64, gpa)
- ),
+ TP_ARGS(table_gfn, index, size)
+);
- TP_fast_assign(
- __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
- + index * size;
- ),
+/* We set a pte dirty bit */
+DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
- TP_printk("gpa %llx", __entry->gpa)
+ TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
+
+ TP_ARGS(table_gfn, index, size)
);
TRACE_EVENT(
@@ -166,55 +162,45 @@ TRACE_EVENT(
__entry->created ? "new" : "existing")
);
-TRACE_EVENT(
- kvm_mmu_sync_page,
+DECLARE_EVENT_CLASS(kvm_mmu_page_class,
+
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp),
TP_STRUCT__entry(
KVM_MMU_PAGE_FIELDS
- ),
+ ),
TP_fast_assign(
KVM_MMU_PAGE_ASSIGN(sp)
- ),
+ ),
TP_printk("%s", KVM_MMU_PAGE_PRINTK())
);
-TRACE_EVENT(
- kvm_mmu_unsync_page,
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
TP_PROTO(struct kvm_mmu_page *sp),
- TP_ARGS(sp),
-
- TP_STRUCT__entry(
- KVM_MMU_PAGE_FIELDS
- ),
- TP_fast_assign(
- KVM_MMU_PAGE_ASSIGN(sp)
- ),
-
- TP_printk("%s", KVM_MMU_PAGE_PRINTK())
+ TP_ARGS(sp)
);
-TRACE_EVENT(
- kvm_mmu_zap_page,
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
TP_PROTO(struct kvm_mmu_page *sp),
- TP_ARGS(sp),
- TP_STRUCT__entry(
- KVM_MMU_PAGE_FIELDS
- ),
+ TP_ARGS(sp)
+);
- TP_fast_assign(
- KVM_MMU_PAGE_ASSIGN(sp)
- ),
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_zap_page,
+ TP_PROTO(struct kvm_mmu_page *sp),
- TP_printk("%s", KVM_MMU_PAGE_PRINTK())
+ TP_ARGS(sp)
);
-
#endif /* _TRACE_KVMMMU_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mmutrace
+
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 81eab9a50e6a..89d66ca4d87c 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -170,7 +170,7 @@ walk:
goto access_error;
#if PTTYPE == 64
- if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
+ if (fetch_fault && (pte & PT64_NX_MASK))
goto access_error;
#endif
@@ -190,10 +190,10 @@ walk:
if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
((walker->level == PT_DIRECTORY_LEVEL) &&
- (pte & PT_PAGE_SIZE_MASK) &&
+ is_large_pte(pte) &&
(PTTYPE == 64 || is_pse(vcpu))) ||
((walker->level == PT_PDPE_LEVEL) &&
- (pte & PT_PAGE_SIZE_MASK) &&
+ is_large_pte(pte) &&
is_long_mode(vcpu))) {
int lvl = walker->level;
@@ -258,11 +258,17 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
pt_element_t gpte;
unsigned pte_access;
pfn_t pfn;
+ u64 new_spte;
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!is_present_gpte(gpte))
- __set_spte(spte, shadow_notrap_nonpresent_pte);
+ if (!is_present_gpte(gpte)) {
+ if (page->unsync)
+ new_spte = shadow_trap_nonpresent_pte;
+ else
+ new_spte = shadow_notrap_nonpresent_pte;
+ __set_spte(spte, new_spte);
+ }
return;
}
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
@@ -457,6 +463,7 @@ out_unlock:
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
+ gpa_t pte_gpa = -1;
int level;
u64 *sptep;
int need_flush = 0;
@@ -467,9 +474,16 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
level = iterator.level;
sptep = iterator.sptep;
- if (level == PT_PAGE_TABLE_LEVEL ||
- ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
- ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
+ if (is_last_spte(*sptep, level)) {
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+ int offset, shift;
+
+ shift = PAGE_SHIFT -
+ (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
+ offset = sp->role.quadrant << shift;
+
+ pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
+ pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (is_shadow_present_pte(*sptep)) {
rmap_remove(vcpu->kvm, sptep);
@@ -487,7 +501,17 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
+
+ atomic_inc(&vcpu->kvm->arch.invlpg_counter);
+
spin_unlock(&vcpu->kvm->mmu_lock);
+
+ if (pte_gpa == -1)
+ return;
+
+ if (mmu_topup_memory_caches(vcpu))
+ return;
+ kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
@@ -551,12 +575,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
int i, offset, nr_present;
bool reset_host_protection;
+ gpa_t first_pte_gpa;
offset = nr_present = 0;
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
+ first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access;
pt_element_t gpte;
@@ -566,8 +593,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (!is_shadow_present_pte(sp->spt[i]))
continue;
- pte_gpa = gfn_to_gpa(sp->gfn);
- pte_gpa += (i+offset) * sizeof(pt_element_t);
+ pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2ba58206812a..2511664ff671 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -44,10 +44,11 @@ MODULE_LICENSE("GPL");
#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3
-#define SVM_FEATURE_NPT (1 << 0)
-#define SVM_FEATURE_LBRV (1 << 1)
-#define SVM_FEATURE_SVML (1 << 2)
-#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
+#define SVM_FEATURE_NPT (1 << 0)
+#define SVM_FEATURE_LBRV (1 << 1)
+#define SVM_FEATURE_SVML (1 << 2)
+#define SVM_FEATURE_NRIP (1 << 3)
+#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
@@ -70,6 +71,7 @@ struct kvm_vcpu;
struct nested_state {
struct vmcb *hsave;
u64 hsave_msr;
+ u64 vm_cr_msr;
u64 vmcb;
/* These are the merged vectors */
@@ -77,6 +79,7 @@ struct nested_state {
/* gpa pointers to the real vectors */
u64 vmcb_msrpm;
+ u64 vmcb_iopm;
/* A VMEXIT is required but not yet emulated */
bool exit_required;
@@ -91,6 +94,9 @@ struct nested_state {
};
+#define MSRPM_OFFSETS 16
+static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
+
struct vcpu_svm {
struct kvm_vcpu vcpu;
struct vmcb *vmcb;
@@ -110,13 +116,39 @@ struct vcpu_svm {
struct nested_state nested;
bool nmi_singlestep;
+
+ unsigned int3_injected;
+ unsigned long int3_rip;
+};
+
+#define MSR_INVALID 0xffffffffU
+
+static struct svm_direct_access_msrs {
+ u32 index; /* Index of the MSR */
+ bool always; /* True if intercept is always on */
+} direct_access_msrs[] = {
+ { .index = MSR_K6_STAR, .always = true },
+ { .index = MSR_IA32_SYSENTER_CS, .always = true },
+#ifdef CONFIG_X86_64
+ { .index = MSR_GS_BASE, .always = true },
+ { .index = MSR_FS_BASE, .always = true },
+ { .index = MSR_KERNEL_GS_BASE, .always = true },
+ { .index = MSR_LSTAR, .always = true },
+ { .index = MSR_CSTAR, .always = true },
+ { .index = MSR_SYSCALL_MASK, .always = true },
+#endif
+ { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
+ { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
+ { .index = MSR_IA32_LASTINTFROMIP, .always = false },
+ { .index = MSR_IA32_LASTINTTOIP, .always = false },
+ { .index = MSR_INVALID, .always = false },
};
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
#else
-static bool npt_enabled = false;
+static bool npt_enabled;
#endif
static int npt = 1;
@@ -129,6 +161,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static void svm_complete_interrupts(struct vcpu_svm *svm);
static int nested_svm_exit_handled(struct vcpu_svm *svm);
+static int nested_svm_intercept(struct vcpu_svm *svm);
static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
@@ -163,8 +196,8 @@ static unsigned long iopm_base;
struct kvm_ldttss_desc {
u16 limit0;
u16 base0;
- unsigned base1 : 8, type : 5, dpl : 2, p : 1;
- unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+ unsigned base1:8, type:5, dpl:2, p:1;
+ unsigned limit1:4, zero0:3, g:1, base2:8;
u32 base3;
u32 zero1;
} __attribute__((packed));
@@ -194,6 +227,27 @@ static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
+static u32 svm_msrpm_offset(u32 msr)
+{
+ u32 offset;
+ int i;
+
+ for (i = 0; i < NUM_MSR_MAPS; i++) {
+ if (msr < msrpm_ranges[i] ||
+ msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
+ continue;
+
+ offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
+ offset += (i * MSRS_RANGE_SIZE); /* add range offset */
+
+ /* Now we have the u8 offset - but need the u32 offset */
+ return offset / 4;
+ }
+
+ /* MSR not in any range */
+ return MSR_INVALID;
+}
+
#define MAX_INST_SIZE 15
static inline u32 svm_has(u32 feat)
@@ -213,7 +267,7 @@ static inline void stgi(void)
static inline void invlpga(unsigned long addr, u32 asid)
{
- asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
+ asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
}
static inline void force_new_asid(struct kvm_vcpu *vcpu)
@@ -235,23 +289,6 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu->arch.efer = efer;
}
-static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- /* If we are within a nested VM we'd better #VMEXIT and let the
- guest handle the exception */
- if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
- return;
-
- svm->vmcb->control.event_inj = nr
- | SVM_EVTINJ_VALID
- | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
- | SVM_EVTINJ_TYPE_EXEPT;
- svm->vmcb->control.event_inj_err = error_code;
-}
-
static int is_external_interrupt(u32 info)
{
info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
@@ -264,7 +301,7 @@ static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
u32 ret = 0;
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
- ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
+ ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
return ret & mask;
}
@@ -283,6 +320,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (svm->vmcb->control.next_rip != 0)
+ svm->next_rip = svm->vmcb->control.next_rip;
+
if (!svm->next_rip) {
if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
EMULATE_DONE)
@@ -297,6 +337,43 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
svm_set_interrupt_shadow(vcpu, 0);
}
+static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
+ bool has_error_code, u32 error_code,
+ bool reinject)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /*
+ * If we are within a nested VM we'd better #VMEXIT and let the guest
+ * handle the exception
+ */
+ if (!reinject &&
+ nested_svm_check_exception(svm, nr, has_error_code, error_code))
+ return;
+
+ if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
+ unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
+
+ /*
+ * For guest debugging where we have to reinject #BP if some
+ * INT3 is guest-owned:
+ * Emulate nRIP by moving RIP forward. Will fail if injection
+ * raises a fault that is not intercepted. Still better than
+ * failing in all cases.
+ */
+ skip_emulated_instruction(&svm->vcpu);
+ rip = kvm_rip_read(&svm->vcpu);
+ svm->int3_rip = rip + svm->vmcb->save.cs.base;
+ svm->int3_injected = rip - old_rip;
+ }
+
+ svm->vmcb->control.event_inj = nr
+ | SVM_EVTINJ_VALID
+ | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
+ | SVM_EVTINJ_TYPE_EXEPT;
+ svm->vmcb->control.event_inj_err = error_code;
+}
+
static int has_svm(void)
{
const char *msg;
@@ -319,7 +396,7 @@ static int svm_hardware_enable(void *garbage)
struct svm_cpu_data *sd;
uint64_t efer;
- struct descriptor_table gdt_descr;
+ struct desc_ptr gdt_descr;
struct desc_struct *gdt;
int me = raw_smp_processor_id();
@@ -344,8 +421,8 @@ static int svm_hardware_enable(void *garbage)
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
sd->next_asid = sd->max_asid + 1;
- kvm_get_gdt(&gdt_descr);
- gdt = (struct desc_struct *)gdt_descr.base;
+ native_store_gdt(&gdt_descr);
+ gdt = (struct desc_struct *)gdt_descr.address;
sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
wrmsrl(MSR_EFER, efer | EFER_SVME);
@@ -391,42 +468,98 @@ err_1:
}
+static bool valid_msr_intercept(u32 index)
+{
+ int i;
+
+ for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
+ if (direct_access_msrs[i].index == index)
+ return true;
+
+ return false;
+}
+
static void set_msr_interception(u32 *msrpm, unsigned msr,
int read, int write)
{
+ u8 bit_read, bit_write;
+ unsigned long tmp;
+ u32 offset;
+
+ /*
+ * If this warning triggers extend the direct_access_msrs list at the
+ * beginning of the file
+ */
+ WARN_ON(!valid_msr_intercept(msr));
+
+ offset = svm_msrpm_offset(msr);
+ bit_read = 2 * (msr & 0x0f);
+ bit_write = 2 * (msr & 0x0f) + 1;
+ tmp = msrpm[offset];
+
+ BUG_ON(offset == MSR_INVALID);
+
+ read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
+ write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
+
+ msrpm[offset] = tmp;
+}
+
+static void svm_vcpu_init_msrpm(u32 *msrpm)
+{
int i;
- for (i = 0; i < NUM_MSR_MAPS; i++) {
- if (msr >= msrpm_ranges[i] &&
- msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
- u32 msr_offset = (i * MSRS_IN_RANGE + msr -
- msrpm_ranges[i]) * 2;
-
- u32 *base = msrpm + (msr_offset / 32);
- u32 msr_shift = msr_offset % 32;
- u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
- *base = (*base & ~(0x3 << msr_shift)) |
- (mask << msr_shift);
+ memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+
+ for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
+ if (!direct_access_msrs[i].always)
+ continue;
+
+ set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
+ }
+}
+
+static void add_msr_offset(u32 offset)
+{
+ int i;
+
+ for (i = 0; i < MSRPM_OFFSETS; ++i) {
+
+ /* Offset already in list? */
+ if (msrpm_offsets[i] == offset)
return;
- }
+
+ /* Slot used by another offset? */
+ if (msrpm_offsets[i] != MSR_INVALID)
+ continue;
+
+ /* Add offset to list */
+ msrpm_offsets[i] = offset;
+
+ return;
}
+
+ /*
+ * If this BUG triggers the msrpm_offsets table has an overflow. Just
+ * increase MSRPM_OFFSETS in this case.
+ */
BUG();
}
-static void svm_vcpu_init_msrpm(u32 *msrpm)
+static void init_msrpm_offsets(void)
{
- memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+ int i;
-#ifdef CONFIG_X86_64
- set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
- set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
- set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
- set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
- set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
- set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
-#endif
- set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
- set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
+ memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
+
+ for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
+ u32 offset;
+
+ offset = svm_msrpm_offset(direct_access_msrs[i].index);
+ BUG_ON(offset == MSR_INVALID);
+
+ add_msr_offset(offset);
+ }
}
static void svm_enable_lbrv(struct vcpu_svm *svm)
@@ -467,6 +600,8 @@ static __init int svm_hardware_setup(void)
memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
+ init_msrpm_offsets();
+
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@@ -523,7 +658,7 @@ static void init_seg(struct vmcb_seg *seg)
{
seg->selector = 0;
seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
- SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
+ SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
seg->limit = 0xffff;
seg->base = 0;
}
@@ -543,16 +678,16 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->vcpu.fpu_active = 1;
- control->intercept_cr_read = INTERCEPT_CR0_MASK |
+ control->intercept_cr_read = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK;
- control->intercept_cr_write = INTERCEPT_CR0_MASK |
+ control->intercept_cr_write = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK |
INTERCEPT_CR8_MASK;
- control->intercept_dr_read = INTERCEPT_DR0_MASK |
+ control->intercept_dr_read = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
@@ -561,7 +696,7 @@ static void init_vmcb(struct vcpu_svm *svm)
INTERCEPT_DR6_MASK |
INTERCEPT_DR7_MASK;
- control->intercept_dr_write = INTERCEPT_DR0_MASK |
+ control->intercept_dr_write = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
@@ -575,7 +710,7 @@ static void init_vmcb(struct vcpu_svm *svm)
(1 << MC_VECTOR);
- control->intercept = (1ULL << INTERCEPT_INTR) |
+ control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
(1ULL << INTERCEPT_SMI) |
(1ULL << INTERCEPT_SELECTIVE_CR0) |
@@ -636,7 +771,8 @@ static void init_vmcb(struct vcpu_svm *svm)
save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
- /* This is the guest-visible cr0 value.
+ /*
+ * This is the guest-visible cr0 value.
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
*/
svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
@@ -729,6 +865,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm_vcpu_init_msrpm(svm->msrpm);
svm->nested.msrpm = page_address(nested_msrpm_pages);
+ svm_vcpu_init_msrpm(svm->nested.msrpm);
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
@@ -882,7 +1019,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
- /* AMD's VMCB does not have an explicit unusable field, so emulate it
+ /*
+ * AMD's VMCB does not have an explicit unusable field, so emulate it
* for cross vendor migration purposes by "not present"
*/
var->unusable = !var->present || (var->type == 0);
@@ -918,7 +1056,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->type |= 0x1;
break;
case VCPU_SREG_SS:
- /* On AMD CPUs sometimes the DB bit in the segment
+ /*
+ * On AMD CPUs sometimes the DB bit in the segment
* descriptor is left as 1, although the whole segment has
* been made unusable. Clear it here to pass an Intel VMX
* entry check when cross vendor migrating.
@@ -936,36 +1075,36 @@ static int svm_get_cpl(struct kvm_vcpu *vcpu)
return save->cpl;
}
-static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
- dt->limit = svm->vmcb->save.idtr.limit;
- dt->base = svm->vmcb->save.idtr.base;
+ dt->size = svm->vmcb->save.idtr.limit;
+ dt->address = svm->vmcb->save.idtr.base;
}
-static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.idtr.limit = dt->limit;
- svm->vmcb->save.idtr.base = dt->base ;
+ svm->vmcb->save.idtr.limit = dt->size;
+ svm->vmcb->save.idtr.base = dt->address ;
}
-static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
- dt->limit = svm->vmcb->save.gdtr.limit;
- dt->base = svm->vmcb->save.gdtr.base;
+ dt->size = svm->vmcb->save.gdtr.limit;
+ dt->address = svm->vmcb->save.gdtr.base;
}
-static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.gdtr.limit = dt->limit;
- svm->vmcb->save.gdtr.base = dt->base ;
+ svm->vmcb->save.gdtr.limit = dt->size;
+ svm->vmcb->save.gdtr.base = dt->address ;
}
static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
@@ -978,6 +1117,7 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
static void update_cr0_intercept(struct vcpu_svm *svm)
{
+ struct vmcb *vmcb = svm->vmcb;
ulong gcr0 = svm->vcpu.arch.cr0;
u64 *hcr0 = &svm->vmcb->save.cr0;
@@ -989,11 +1129,25 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
- svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
- svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+ vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
+ vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+ if (is_nested(svm)) {
+ struct vmcb *hsave = svm->nested.hsave;
+
+ hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
+ hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+ vmcb->control.intercept_cr_read |= svm->nested.intercept_cr_read;
+ vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
+ }
} else {
svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+ if (is_nested(svm)) {
+ struct vmcb *hsave = svm->nested.hsave;
+
+ hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
+ hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+ }
}
}
@@ -1001,6 +1155,27 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_nested(svm)) {
+ /*
+ * We are here because we run in nested mode, the host kvm
+ * intercepts cr0 writes but the l1 hypervisor does not.
+ * But the L1 hypervisor may intercept selective cr0 writes.
+ * This needs to be checked here.
+ */
+ unsigned long old, new;
+
+ /* Remove bits that would trigger a real cr0 write intercept */
+ old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
+ new = cr0 & SVM_CR0_SELECTIVE_MASK;
+
+ if (old == new) {
+ /* cr0 write with ts and mp unchanged */
+ svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
+ if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
+ return;
+ }
+ }
+
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
@@ -1134,70 +1309,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->vmcb->control.asid = sd->next_asid++;
}
-static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
+static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
- switch (dr) {
- case 0 ... 3:
- *dest = vcpu->arch.db[dr];
- break;
- case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 6:
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- *dest = vcpu->arch.dr6;
- else
- *dest = svm->vmcb->save.dr6;
- break;
- case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 7:
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- *dest = vcpu->arch.dr7;
- else
- *dest = svm->vmcb->save.dr7;
- break;
- }
-
- return EMULATE_DONE;
-}
-
-static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- switch (dr) {
- case 0 ... 3:
- vcpu->arch.db[dr] = value;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
- vcpu->arch.eff_db[dr] = value;
- break;
- case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 6:
- vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
- break;
- case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 7:
- vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
- svm->vmcb->save.dr7 = vcpu->arch.dr7;
- vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
- }
- break;
- }
-
- return EMULATE_DONE;
+ svm->vmcb->save.dr7 = value;
}
static int pf_interception(struct vcpu_svm *svm)
@@ -1234,7 +1350,7 @@ static int db_interception(struct vcpu_svm *svm)
}
if (svm->vcpu.guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc =
svm->vmcb->save.cs.base + svm->vmcb->save.rip;
@@ -1268,7 +1384,22 @@ static int ud_interception(struct vcpu_svm *svm)
static void svm_fpu_activate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
+ u32 excp;
+
+ if (is_nested(svm)) {
+ u32 h_excp, n_excp;
+
+ h_excp = svm->nested.hsave->control.intercept_exceptions;
+ n_excp = svm->nested.intercept_exceptions;
+ h_excp &= ~(1 << NM_VECTOR);
+ excp = h_excp | n_excp;
+ } else {
+ excp = svm->vmcb->control.intercept_exceptions;
+ excp &= ~(1 << NM_VECTOR);
+ }
+
+ svm->vmcb->control.intercept_exceptions = excp;
+
svm->vcpu.fpu_active = 1;
update_cr0_intercept(svm);
}
@@ -1309,29 +1440,23 @@ static int shutdown_interception(struct vcpu_svm *svm)
static int io_interception(struct vcpu_svm *svm)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
int size, in, string;
unsigned port;
++svm->vcpu.stat.io_exits;
-
- svm->next_rip = svm->vmcb->control.exit_info_2;
-
string = (io_info & SVM_IOIO_STR_MASK) != 0;
-
- if (string) {
- if (emulate_instruction(&svm->vcpu,
- 0, 0, 0) == EMULATE_DO_MMIO)
- return 0;
- return 1;
- }
-
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
+ if (string || in)
+ return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
+
port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
-
+ svm->next_rip = svm->vmcb->control.exit_info_2;
skip_emulated_instruction(&svm->vcpu);
- return kvm_emulate_pio(&svm->vcpu, in, size, port);
+
+ return kvm_fast_pio_out(vcpu, size, port);
}
static int nmi_interception(struct vcpu_svm *svm)
@@ -1384,6 +1509,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code)
{
+ int vmexit;
+
if (!is_nested(svm))
return 0;
@@ -1392,21 +1519,28 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
svm->vmcb->control.exit_info_1 = error_code;
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
- return nested_svm_exit_handled(svm);
+ vmexit = nested_svm_intercept(svm);
+ if (vmexit == NESTED_EXIT_DONE)
+ svm->nested.exit_required = true;
+
+ return vmexit;
}
-static inline int nested_svm_intr(struct vcpu_svm *svm)
+/* This function returns true if it is save to enable the irq window */
+static inline bool nested_svm_intr(struct vcpu_svm *svm)
{
if (!is_nested(svm))
- return 0;
+ return true;
if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
- return 0;
+ return true;
if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
- return 0;
+ return false;
- svm->vmcb->control.exit_code = SVM_EXIT_INTR;
+ svm->vmcb->control.exit_code = SVM_EXIT_INTR;
+ svm->vmcb->control.exit_info_1 = 0;
+ svm->vmcb->control.exit_info_2 = 0;
if (svm->nested.intercept & 1ULL) {
/*
@@ -1417,21 +1551,40 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
*/
svm->nested.exit_required = true;
trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
- return 1;
+ return false;
}
- return 0;
+ return true;
+}
+
+/* This function returns true if it is save to enable the nmi window */
+static inline bool nested_svm_nmi(struct vcpu_svm *svm)
+{
+ if (!is_nested(svm))
+ return true;
+
+ if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
+ return true;
+
+ svm->vmcb->control.exit_code = SVM_EXIT_NMI;
+ svm->nested.exit_required = true;
+
+ return false;
}
-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
+static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
{
struct page *page;
+ might_sleep();
+
page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
if (is_error_page(page))
goto error;
- return kmap_atomic(page, idx);
+ *_page = page;
+
+ return kmap(page);
error:
kvm_release_page_clean(page);
@@ -1440,61 +1593,55 @@ error:
return NULL;
}
-static void nested_svm_unmap(void *addr, enum km_type idx)
+static void nested_svm_unmap(struct page *page)
{
- struct page *page;
+ kunmap(page);
+ kvm_release_page_dirty(page);
+}
- if (!addr)
- return;
+static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
+{
+ unsigned port;
+ u8 val, bit;
+ u64 gpa;
- page = kmap_atomic_to_page(addr);
+ if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
+ return NESTED_EXIT_HOST;
- kunmap_atomic(addr, idx);
- kvm_release_page_dirty(page);
+ port = svm->vmcb->control.exit_info_1 >> 16;
+ gpa = svm->nested.vmcb_iopm + (port / 8);
+ bit = port % 8;
+ val = 0;
+
+ if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
+ val &= (1 << bit);
+
+ return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
-static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
+static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
- u32 param = svm->vmcb->control.exit_info_1 & 1;
- u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- bool ret = false;
- u32 t0, t1;
- u8 *msrpm;
+ u32 offset, msr, value;
+ int write, mask;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
- return false;
-
- msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
+ return NESTED_EXIT_HOST;
- if (!msrpm)
- goto out;
+ msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+ offset = svm_msrpm_offset(msr);
+ write = svm->vmcb->control.exit_info_1 & 1;
+ mask = 1 << ((2 * (msr & 0xf)) + write);
- switch (msr) {
- case 0 ... 0x1fff:
- t0 = (msr * 2) % 8;
- t1 = msr / 8;
- break;
- case 0xc0000000 ... 0xc0001fff:
- t0 = (8192 + msr - 0xc0000000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- case 0xc0010000 ... 0xc0011fff:
- t0 = (16384 + msr - 0xc0010000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- default:
- ret = true;
- goto out;
- }
+ if (offset == MSR_INVALID)
+ return NESTED_EXIT_DONE;
- ret = msrpm[t1] & ((1 << param) << t0);
+ /* Offset is in 32 bit units but need in 8 bit units */
+ offset *= 4;
-out:
- nested_svm_unmap(msrpm, KM_USER0);
+ if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
+ return NESTED_EXIT_DONE;
- return ret;
+ return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
static int nested_svm_exit_special(struct vcpu_svm *svm)
@@ -1504,17 +1651,21 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
switch (exit_code) {
case SVM_EXIT_INTR:
case SVM_EXIT_NMI:
+ case SVM_EXIT_EXCP_BASE + MC_VECTOR:
return NESTED_EXIT_HOST;
- /* For now we are always handling NPFs when using them */
case SVM_EXIT_NPF:
+ /* For now we are always handling NPFs when using them */
if (npt_enabled)
return NESTED_EXIT_HOST;
break;
- /* When we're shadowing, trap PFs */
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
+ /* When we're shadowing, trap PFs */
if (!npt_enabled)
return NESTED_EXIT_HOST;
break;
+ case SVM_EXIT_EXCP_BASE + NM_VECTOR:
+ nm_interception(svm);
+ break;
default:
break;
}
@@ -1525,7 +1676,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
/*
* If this function returns true, this #vmexit was already handled
*/
-static int nested_svm_exit_handled(struct vcpu_svm *svm)
+static int nested_svm_intercept(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
int vmexit = NESTED_EXIT_HOST;
@@ -1534,6 +1685,9 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
case SVM_EXIT_MSR:
vmexit = nested_svm_exit_handled_msr(svm);
break;
+ case SVM_EXIT_IOIO:
+ vmexit = nested_svm_intercept_ioio(svm);
+ break;
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
if (svm->nested.intercept_cr_read & cr_bits)
@@ -1564,6 +1718,10 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
vmexit = NESTED_EXIT_DONE;
break;
}
+ case SVM_EXIT_ERR: {
+ vmexit = NESTED_EXIT_DONE;
+ break;
+ }
default: {
u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
if (svm->nested.intercept & exit_bits)
@@ -1571,9 +1729,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
}
}
- if (vmexit == NESTED_EXIT_DONE) {
+ return vmexit;
+}
+
+static int nested_svm_exit_handled(struct vcpu_svm *svm)
+{
+ int vmexit;
+
+ vmexit = nested_svm_intercept(svm);
+
+ if (vmexit == NESTED_EXIT_DONE)
nested_svm_vmexit(svm);
- }
return vmexit;
}
@@ -1615,6 +1781,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
+ struct page *page;
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1,
@@ -1622,10 +1789,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb->control.exit_int_info,
vmcb->control.exit_int_info_err);
- nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
if (!nested_vmcb)
return 1;
+ /* Exit nested SVM mode */
+ svm->nested.vmcb = 0;
+
/* Give the current vmcb to the guest */
disable_gif(svm);
@@ -1635,9 +1805,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
nested_vmcb->save.ds = vmcb->save.ds;
nested_vmcb->save.gdtr = vmcb->save.gdtr;
nested_vmcb->save.idtr = vmcb->save.idtr;
- if (npt_enabled)
- nested_vmcb->save.cr3 = vmcb->save.cr3;
+ nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
+ nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
nested_vmcb->save.cr2 = vmcb->save.cr2;
+ nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
nested_vmcb->save.rflags = vmcb->save.rflags;
nested_vmcb->save.rip = vmcb->save.rip;
nested_vmcb->save.rsp = vmcb->save.rsp;
@@ -1709,10 +1880,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
- /* Exit nested SVM mode */
- svm->nested.vmcb = 0;
-
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
@@ -1722,19 +1890,33 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
- u32 *nested_msrpm;
+ /*
+ * This function merges the msr permission bitmaps of kvm and the
+ * nested vmcb. It is omptimized in that it only merges the parts where
+ * the kvm msr permission bitmap may contain zero bits
+ */
int i;
- nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
- if (!nested_msrpm)
- return false;
+ if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+ return true;
- for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
- svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
+ for (i = 0; i < MSRPM_OFFSETS; i++) {
+ u32 value, p;
+ u64 offset;
- svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
+ if (msrpm_offsets[i] == 0xffffffff)
+ break;
+
+ p = msrpm_offsets[i];
+ offset = svm->nested.vmcb_msrpm + (p * 4);
+
+ if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
+ return false;
- nested_svm_unmap(nested_msrpm, KM_USER0);
+ svm->nested.msrpm[p] = svm->msrpm[p] | value;
+ }
+
+ svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
return true;
}
@@ -1744,26 +1926,34 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
+ struct page *page;
+ u64 vmcb_gpa;
+
+ vmcb_gpa = svm->vmcb->save.rax;
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return false;
- /* nested_vmcb is our indicator if nested SVM is activated */
- svm->nested.vmcb = svm->vmcb->save.rax;
-
- trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, svm->nested.vmcb,
+ trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, vmcb_gpa,
nested_vmcb->save.rip,
nested_vmcb->control.int_ctl,
nested_vmcb->control.event_inj,
nested_vmcb->control.nested_ctl);
+ trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
+ nested_vmcb->control.intercept_cr_write,
+ nested_vmcb->control.intercept_exceptions,
+ nested_vmcb->control.intercept);
+
/* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
- /* Save the old vmcb, so we don't need to pick what we save, but
- can restore everything when a VMEXIT occurs */
+ /*
+ * Save the old vmcb, so we don't need to pick what we save, but can
+ * restore everything when a VMEXIT occurs
+ */
hsave->save.es = vmcb->save.es;
hsave->save.cs = vmcb->save.cs;
hsave->save.ss = vmcb->save.ss;
@@ -1803,14 +1993,17 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
if (npt_enabled) {
svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
- } else {
+ } else
kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
- kvm_mmu_reset_context(&svm->vcpu);
- }
+
+ /* Guest paging mode is active - reset mmu */
+ kvm_mmu_reset_context(&svm->vcpu);
+
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
+
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
@@ -1819,22 +2012,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
- /* We don't want a nested guest to be more powerful than the guest,
- so all intercepts are ORed */
- svm->vmcb->control.intercept_cr_read |=
- nested_vmcb->control.intercept_cr_read;
- svm->vmcb->control.intercept_cr_write |=
- nested_vmcb->control.intercept_cr_write;
- svm->vmcb->control.intercept_dr_read |=
- nested_vmcb->control.intercept_dr_read;
- svm->vmcb->control.intercept_dr_write |=
- nested_vmcb->control.intercept_dr_write;
- svm->vmcb->control.intercept_exceptions |=
- nested_vmcb->control.intercept_exceptions;
-
- svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
-
- svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
+ svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
+ svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
/* cache intercepts */
svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read;
@@ -1851,13 +2030,40 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
+ if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
+ /* We only want the cr8 intercept bits of the guest */
+ svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
+ svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+ }
+
+ /*
+ * We don't want a nested guest to be more powerful than the guest, so
+ * all intercepts are ORed
+ */
+ svm->vmcb->control.intercept_cr_read |=
+ nested_vmcb->control.intercept_cr_read;
+ svm->vmcb->control.intercept_cr_write |=
+ nested_vmcb->control.intercept_cr_write;
+ svm->vmcb->control.intercept_dr_read |=
+ nested_vmcb->control.intercept_dr_read;
+ svm->vmcb->control.intercept_dr_write |=
+ nested_vmcb->control.intercept_dr_write;
+ svm->vmcb->control.intercept_exceptions |=
+ nested_vmcb->control.intercept_exceptions;
+
+ svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+
+ svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);
+
+ /* nested_vmcb is our indicator if nested SVM is activated */
+ svm->nested.vmcb = vmcb_gpa;
enable_gif(svm);
@@ -1883,6 +2089,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
+ struct page *page;
if (nested_svm_check_permissions(svm))
return 1;
@@ -1890,12 +2097,12 @@ static int vmload_interception(struct vcpu_svm *svm)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);
return 1;
}
@@ -1903,6 +2110,7 @@ static int vmload_interception(struct vcpu_svm *svm)
static int vmsave_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
+ struct page *page;
if (nested_svm_check_permissions(svm))
return 1;
@@ -1910,12 +2118,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;
nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);
return 1;
}
@@ -2018,6 +2226,8 @@ static int task_switch_interception(struct vcpu_svm *svm)
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
uint32_t idt_v =
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
+ bool has_error_code = false;
+ u32 error_code = 0;
tss_selector = (u16)svm->vmcb->control.exit_info_1;
@@ -2038,6 +2248,12 @@ static int task_switch_interception(struct vcpu_svm *svm)
svm->vcpu.arch.nmi_injected = false;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
+ if (svm->vmcb->control.exit_info_2 &
+ (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
+ has_error_code = true;
+ error_code =
+ (u32)svm->vmcb->control.exit_info_2;
+ }
kvm_clear_exception_queue(&svm->vcpu);
break;
case SVM_EXITINTINFO_TYPE_INTR:
@@ -2054,7 +2270,14 @@ static int task_switch_interception(struct vcpu_svm *svm)
(int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
skip_emulated_instruction(&svm->vcpu);
- return kvm_task_switch(&svm->vcpu, tss_selector, reason);
+ if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
+ has_error_code, error_code) == EMULATE_FAIL) {
+ svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ svm->vcpu.run->internal.ndata = 0;
+ return 0;
+ }
+ return 1;
}
static int cpuid_interception(struct vcpu_svm *svm)
@@ -2145,9 +2368,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
case MSR_IA32_SYSENTER_ESP:
*data = svm->sysenter_esp;
break;
- /* Nobody will change the following 5 values in the VMCB so
- we can safely return them on rdmsr. They will always be 0
- until LBRV is implemented. */
+ /*
+ * Nobody will change the following 5 values in the VMCB so we can
+ * safely return them on rdmsr. They will always be 0 until LBRV is
+ * implemented.
+ */
case MSR_IA32_DEBUGCTLMSR:
*data = svm->vmcb->save.dbgctl;
break;
@@ -2167,7 +2392,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
*data = svm->nested.hsave_msr;
break;
case MSR_VM_CR:
- *data = 0;
+ *data = svm->nested.vm_cr_msr;
break;
case MSR_IA32_UCODE_REV:
*data = 0x01000065;
@@ -2197,6 +2422,31 @@ static int rdmsr_interception(struct vcpu_svm *svm)
return 1;
}
+static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ int svm_dis, chg_mask;
+
+ if (data & ~SVM_VM_CR_VALID_MASK)
+ return 1;
+
+ chg_mask = SVM_VM_CR_VALID_MASK;
+
+ if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
+ chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
+
+ svm->nested.vm_cr_msr &= ~chg_mask;
+ svm->nested.vm_cr_msr |= (data & chg_mask);
+
+ svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
+
+ /* check for svm_disable while efer.svme is set */
+ if (svm_dis && (vcpu->arch.efer & EFER_SVME))
+ return 1;
+
+ return 0;
+}
+
static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -2263,6 +2513,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
svm->nested.hsave_msr = data;
break;
case MSR_VM_CR:
+ return svm_set_vm_cr(vcpu, data);
case MSR_VM_IGNNE:
pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
@@ -2326,16 +2577,16 @@ static int pause_interception(struct vcpu_svm *svm)
}
static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
- [SVM_EXIT_READ_CR0] = emulate_on_interception,
- [SVM_EXIT_READ_CR3] = emulate_on_interception,
- [SVM_EXIT_READ_CR4] = emulate_on_interception,
- [SVM_EXIT_READ_CR8] = emulate_on_interception,
+ [SVM_EXIT_READ_CR0] = emulate_on_interception,
+ [SVM_EXIT_READ_CR3] = emulate_on_interception,
+ [SVM_EXIT_READ_CR4] = emulate_on_interception,
+ [SVM_EXIT_READ_CR8] = emulate_on_interception,
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
- [SVM_EXIT_READ_DR0] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
+ [SVM_EXIT_READ_DR0] = emulate_on_interception,
[SVM_EXIT_READ_DR1] = emulate_on_interception,
[SVM_EXIT_READ_DR2] = emulate_on_interception,
[SVM_EXIT_READ_DR3] = emulate_on_interception,
@@ -2354,15 +2605,14 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
[SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
- [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
- [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
- [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
- [SVM_EXIT_INTR] = intr_interception,
+ [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
+ [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
+ [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
+ [SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
[SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
- /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
@@ -2370,7 +2620,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
[SVM_EXIT_INVLPGA] = invlpga_interception,
- [SVM_EXIT_IOIO] = io_interception,
+ [SVM_EXIT_IOIO] = io_interception,
[SVM_EXIT_MSR] = msr_interception,
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
@@ -2393,7 +2643,12 @@ static int handle_exit(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run = vcpu->run;
u32 exit_code = svm->vmcb->control.exit_code;
- trace_kvm_exit(exit_code, svm->vmcb->save.rip);
+ trace_kvm_exit(exit_code, vcpu);
+
+ if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
+ vcpu->arch.cr0 = svm->vmcb->save.cr0;
+ if (npt_enabled)
+ vcpu->arch.cr3 = svm->vmcb->save.cr3;
if (unlikely(svm->nested.exit_required)) {
nested_svm_vmexit(svm);
@@ -2422,11 +2677,6 @@ static int handle_exit(struct kvm_vcpu *vcpu)
svm_complete_interrupts(svm);
- if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
- vcpu->arch.cr0 = svm->vmcb->save.cr0;
- if (npt_enabled)
- vcpu->arch.cr3 = svm->vmcb->save.cr3;
-
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
@@ -2511,6 +2761,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
if (irr == -1)
return;
@@ -2522,8 +2775,12 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
- return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
- !(svm->vcpu.arch.hflags & HF_NMI_MASK);
+ int ret;
+ ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+ !(svm->vcpu.arch.hflags & HF_NMI_MASK);
+ ret = ret && gif_set(svm) && nested_svm_nmi(svm);
+
+ return ret;
}
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -2568,13 +2825,13 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- nested_svm_intr(svm);
-
- /* In case GIF=0 we can't rely on the CPU to tell us when
- * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
- * The next time we get that intercept, this function will be
- * called again though and we'll get the vintr intercept. */
- if (gif_set(svm)) {
+ /*
+ * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
+ * 1, because that's a separate STGI/VMRUN intercept. The next time we
+ * get that intercept, this function will be called again though and
+ * we'll get the vintr intercept.
+ */
+ if (gif_set(svm) && nested_svm_intr(svm)) {
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
}
@@ -2588,9 +2845,10 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
== HF_NMI_MASK)
return; /* IRET will cause a vm exit */
- /* Something prevents NMI from been injected. Single step over
- possible problem (IRET or exception injection or interrupt
- shadow) */
+ /*
+ * Something prevents NMI from been injected. Single step over possible
+ * problem (IRET or exception injection or interrupt shadow)
+ */
svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_intercept(vcpu);
@@ -2614,6 +2872,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
kvm_set_cr8(vcpu, cr8);
@@ -2625,6 +2886,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr8;
+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
cr8 = kvm_get_cr8(vcpu);
svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
@@ -2635,6 +2899,9 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
u8 vector;
int type;
u32 exitintinfo = svm->vmcb->control.exit_int_info;
+ unsigned int3_injected = svm->int3_injected;
+
+ svm->int3_injected = 0;
if (svm->vcpu.arch.hflags & HF_IRET_MASK)
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
@@ -2654,18 +2921,25 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
svm->vcpu.arch.nmi_injected = true;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
- /* In case of software exception do not reinject an exception
- vector, but re-execute and instruction instead */
- if (is_nested(svm))
- break;
- if (kvm_exception_is_soft(vector))
+ /*
+ * In case of software exceptions, do not reinject the vector,
+ * but re-execute the instruction instead. Rewind RIP first
+ * if we emulated INT3 before.
+ */
+ if (kvm_exception_is_soft(vector)) {
+ if (vector == BP_VECTOR && int3_injected &&
+ kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
+ kvm_rip_write(&svm->vcpu,
+ kvm_rip_read(&svm->vcpu) -
+ int3_injected);
break;
+ }
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
u32 err = svm->vmcb->control.exit_int_info_err;
- kvm_queue_exception_e(&svm->vcpu, vector, err);
+ kvm_requeue_exception_e(&svm->vcpu, vector, err);
} else
- kvm_queue_exception(&svm->vcpu, vector);
+ kvm_requeue_exception(&svm->vcpu, vector);
break;
case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(&svm->vcpu, vector, false);
@@ -2688,6 +2962,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
u16 gs_selector;
u16 ldt_selector;
+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+ svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+
/*
* A vmexit emulation is required before the vcpu can be executed
* again.
@@ -2695,10 +2973,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(svm->nested.exit_required))
return;
- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
-
pre_svm_run(svm);
sync_lapic_to_cr8(vcpu);
@@ -2879,25 +3153,39 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
}
+static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+ switch (func) {
+ case 0x8000000A:
+ entry->eax = 1; /* SVM revision 1 */
+ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
+ ASID emulation to nested SVM */
+ entry->ecx = 0; /* Reserved */
+ entry->edx = 0; /* Do not support any additional features */
+
+ break;
+ }
+}
+
static const struct trace_print_flags svm_exit_reasons_str[] = {
- { SVM_EXIT_READ_CR0, "read_cr0" },
- { SVM_EXIT_READ_CR3, "read_cr3" },
- { SVM_EXIT_READ_CR4, "read_cr4" },
- { SVM_EXIT_READ_CR8, "read_cr8" },
- { SVM_EXIT_WRITE_CR0, "write_cr0" },
- { SVM_EXIT_WRITE_CR3, "write_cr3" },
- { SVM_EXIT_WRITE_CR4, "write_cr4" },
- { SVM_EXIT_WRITE_CR8, "write_cr8" },
- { SVM_EXIT_READ_DR0, "read_dr0" },
- { SVM_EXIT_READ_DR1, "read_dr1" },
- { SVM_EXIT_READ_DR2, "read_dr2" },
- { SVM_EXIT_READ_DR3, "read_dr3" },
- { SVM_EXIT_WRITE_DR0, "write_dr0" },
- { SVM_EXIT_WRITE_DR1, "write_dr1" },
- { SVM_EXIT_WRITE_DR2, "write_dr2" },
- { SVM_EXIT_WRITE_DR3, "write_dr3" },
- { SVM_EXIT_WRITE_DR5, "write_dr5" },
- { SVM_EXIT_WRITE_DR7, "write_dr7" },
+ { SVM_EXIT_READ_CR0, "read_cr0" },
+ { SVM_EXIT_READ_CR3, "read_cr3" },
+ { SVM_EXIT_READ_CR4, "read_cr4" },
+ { SVM_EXIT_READ_CR8, "read_cr8" },
+ { SVM_EXIT_WRITE_CR0, "write_cr0" },
+ { SVM_EXIT_WRITE_CR3, "write_cr3" },
+ { SVM_EXIT_WRITE_CR4, "write_cr4" },
+ { SVM_EXIT_WRITE_CR8, "write_cr8" },
+ { SVM_EXIT_READ_DR0, "read_dr0" },
+ { SVM_EXIT_READ_DR1, "read_dr1" },
+ { SVM_EXIT_READ_DR2, "read_dr2" },
+ { SVM_EXIT_READ_DR3, "read_dr3" },
+ { SVM_EXIT_WRITE_DR0, "write_dr0" },
+ { SVM_EXIT_WRITE_DR1, "write_dr1" },
+ { SVM_EXIT_WRITE_DR2, "write_dr2" },
+ { SVM_EXIT_WRITE_DR3, "write_dr3" },
+ { SVM_EXIT_WRITE_DR5, "write_dr5" },
+ { SVM_EXIT_WRITE_DR7, "write_dr7" },
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
@@ -2946,8 +3234,10 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- update_cr0_intercept(svm);
svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
+ if (is_nested(svm))
+ svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
+ update_cr0_intercept(svm);
}
static struct kvm_x86_ops svm_x86_ops = {
@@ -2986,8 +3276,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
- .get_dr = svm_get_dr,
- .set_dr = svm_set_dr,
+ .set_dr7 = svm_set_dr7,
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
@@ -3023,12 +3312,14 @@ static struct kvm_x86_ops svm_x86_ops = {
.cpuid_update = svm_cpuid_update,
.rdtscp_supported = svm_rdtscp_supported,
+
+ .set_supported_cpuid = svm_set_supported_cpuid,
};
static int __init svm_init(void)
{
return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
- THIS_MODULE);
+ __alignof__(struct vcpu_svm), THIS_MODULE);
}
static void __exit svm_exit(void)
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index eea40439066c..4ddadb1a5ffe 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -12,7 +12,8 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
/*
* There is a race window between reading and incrementing, but we do
* not care about potentially loosing timer events in the !reinject
- * case anyway.
+ * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
+ * in vcpu_enter_guest.
*/
if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 6ad30a29f044..a6544b8e7c0f 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -5,8 +5,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
-#define TRACE_INCLUDE_PATH arch/x86/kvm
-#define TRACE_INCLUDE_FILE trace
/*
* Tracepoint for guest mode entry.
@@ -184,8 +182,8 @@ TRACE_EVENT(kvm_apic,
* Tracepoint for kvm guest exit:
*/
TRACE_EVENT(kvm_exit,
- TP_PROTO(unsigned int exit_reason, unsigned long guest_rip),
- TP_ARGS(exit_reason, guest_rip),
+ TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu),
+ TP_ARGS(exit_reason, vcpu),
TP_STRUCT__entry(
__field( unsigned int, exit_reason )
@@ -194,7 +192,7 @@ TRACE_EVENT(kvm_exit,
TP_fast_assign(
__entry->exit_reason = exit_reason;
- __entry->guest_rip = guest_rip;
+ __entry->guest_rip = kvm_rip_read(vcpu);
),
TP_printk("reason %s rip 0x%lx",
@@ -221,6 +219,38 @@ TRACE_EVENT(kvm_inj_virq,
TP_printk("irq %u", __entry->irq)
);
+#define EXS(x) { x##_VECTOR, "#" #x }
+
+#define kvm_trace_sym_exc \
+ EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
+ EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
+ EXS(MF), EXS(MC)
+
+/*
+ * Tracepoint for kvm interrupt injection:
+ */
+TRACE_EVENT(kvm_inj_exception,
+ TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
+ TP_ARGS(exception, has_error, error_code),
+
+ TP_STRUCT__entry(
+ __field( u8, exception )
+ __field( u8, has_error )
+ __field( u32, error_code )
+ ),
+
+ TP_fast_assign(
+ __entry->exception = exception;
+ __entry->has_error = has_error;
+ __entry->error_code = error_code;
+ ),
+
+ TP_printk("%s (0x%x)",
+ __print_symbolic(__entry->exception, kvm_trace_sym_exc),
+ /* FIXME: don't print error_code if not present */
+ __entry->has_error ? __entry->error_code : 0)
+);
+
/*
* Tracepoint for page fault.
*/
@@ -413,12 +443,34 @@ TRACE_EVENT(kvm_nested_vmrun,
),
TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
- "event_inj: 0x%08x npt: %s\n",
+ "event_inj: 0x%08x npt: %s",
__entry->rip, __entry->vmcb, __entry->nested_rip,
__entry->int_ctl, __entry->event_inj,
__entry->npt ? "on" : "off")
);
+TRACE_EVENT(kvm_nested_intercepts,
+ TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
+ TP_ARGS(cr_read, cr_write, exceptions, intercept),
+
+ TP_STRUCT__entry(
+ __field( __u16, cr_read )
+ __field( __u16, cr_write )
+ __field( __u32, exceptions )
+ __field( __u64, intercept )
+ ),
+
+ TP_fast_assign(
+ __entry->cr_read = cr_read;
+ __entry->cr_write = cr_write;
+ __entry->exceptions = exceptions;
+ __entry->intercept = intercept;
+ ),
+
+ TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
+ __entry->cr_read, __entry->cr_write, __entry->exceptions,
+ __entry->intercept)
+);
/*
* Tracepoint for #VMEXIT while nested
*/
@@ -447,7 +499,7 @@ TRACE_EVENT(kvm_nested_vmexit,
__entry->exit_int_info_err = exit_int_info_err;
),
TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
- "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n",
+ "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
__entry->rip,
ftrace_print_symbols_seq(p, __entry->exit_code,
kvm_x86_ops->exit_reasons_str),
@@ -482,7 +534,7 @@ TRACE_EVENT(kvm_nested_vmexit_inject,
),
TP_printk("reason: %s ext_inf1: 0x%016llx "
- "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n",
+ "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
ftrace_print_symbols_seq(p, __entry->exit_code,
kvm_x86_ops->exit_reasons_str),
__entry->exit_info1, __entry->exit_info2,
@@ -504,7 +556,7 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
__entry->rip = rip
),
- TP_printk("rip: 0x%016llx\n", __entry->rip)
+ TP_printk("rip: 0x%016llx", __entry->rip)
);
/*
@@ -526,7 +578,7 @@ TRACE_EVENT(kvm_invlpga,
__entry->address = address;
),
- TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx\n",
+ TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
__entry->rip, __entry->asid, __entry->address)
);
@@ -547,11 +599,102 @@ TRACE_EVENT(kvm_skinit,
__entry->slb = slb;
),
- TP_printk("rip: 0x%016llx slb: 0x%08x\n",
+ TP_printk("rip: 0x%016llx slb: 0x%08x",
__entry->rip, __entry->slb)
);
+#define __print_insn(insn, ilen) ({ \
+ int i; \
+ const char *ret = p->buffer + p->len; \
+ \
+ for (i = 0; i < ilen; ++i) \
+ trace_seq_printf(p, " %02x", insn[i]); \
+ trace_seq_printf(p, "%c", 0); \
+ ret; \
+ })
+
+#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
+#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
+#define KVM_EMUL_INSN_F_CS_D (1 << 2)
+#define KVM_EMUL_INSN_F_CS_L (1 << 3)
+
+#define kvm_trace_symbol_emul_flags \
+ { 0, "real" }, \
+ { KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \
+ { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \
+ { KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_CS_D, "prot32" }, \
+ { KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_CS_L, "prot64" }
+
+#define kei_decode_mode(mode) ({ \
+ u8 flags = 0xff; \
+ switch (mode) { \
+ case X86EMUL_MODE_REAL: \
+ flags = 0; \
+ break; \
+ case X86EMUL_MODE_VM86: \
+ flags = KVM_EMUL_INSN_F_EFL_VM; \
+ break; \
+ case X86EMUL_MODE_PROT16: \
+ flags = KVM_EMUL_INSN_F_CR0_PE; \
+ break; \
+ case X86EMUL_MODE_PROT32: \
+ flags = KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_CS_D; \
+ break; \
+ case X86EMUL_MODE_PROT64: \
+ flags = KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_CS_L; \
+ break; \
+ } \
+ flags; \
+ })
+
+TRACE_EVENT(kvm_emulate_insn,
+ TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
+ TP_ARGS(vcpu, failed),
+
+ TP_STRUCT__entry(
+ __field( __u64, rip )
+ __field( __u32, csbase )
+ __field( __u8, len )
+ __array( __u8, insn, 15 )
+ __field( __u8, flags )
+ __field( __u8, failed )
+ ),
+
+ TP_fast_assign(
+ __entry->rip = vcpu->arch.emulate_ctxt.decode.fetch.start;
+ __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
+ __entry->len = vcpu->arch.emulate_ctxt.decode.eip
+ - vcpu->arch.emulate_ctxt.decode.fetch.start;
+ memcpy(__entry->insn,
+ vcpu->arch.emulate_ctxt.decode.fetch.data,
+ 15);
+ __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
+ __entry->failed = failed;
+ ),
+
+ TP_printk("%x:%llx:%s (%s)%s",
+ __entry->csbase, __entry->rip,
+ __print_insn(__entry->insn, __entry->len),
+ __print_symbolic(__entry->flags,
+ kvm_trace_symbol_emul_flags),
+ __entry->failed ? " failed" : ""
+ )
+ );
+
+#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
+#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
+
#endif /* _TRACE_KVM_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH arch/x86/kvm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bc933cfb4e66..c4f3955c64e0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -98,6 +98,8 @@ module_param(ple_gap, int, S_IRUGO);
static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO);
+#define NR_AUTOLOAD_MSRS 1
+
struct vmcs {
u32 revision_id;
u32 abort;
@@ -125,6 +127,11 @@ struct vcpu_vmx {
u64 msr_guest_kernel_gs_base;
#endif
struct vmcs *vmcs;
+ struct msr_autoload {
+ unsigned nr;
+ struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
+ struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+ } msr_autoload;
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
@@ -234,56 +241,56 @@ static const u32 vmx_msr_index[] = {
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
-static inline int is_page_fault(u32 intr_info)
+static inline bool is_page_fault(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
}
-static inline int is_no_device(u32 intr_info)
+static inline bool is_no_device(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
}
-static inline int is_invalid_opcode(u32 intr_info)
+static inline bool is_invalid_opcode(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
}
-static inline int is_external_interrupt(u32 intr_info)
+static inline bool is_external_interrupt(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static inline int is_machine_check(u32 intr_info)
+static inline bool is_machine_check(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
}
-static inline int cpu_has_vmx_msr_bitmap(void)
+static inline bool cpu_has_vmx_msr_bitmap(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
}
-static inline int cpu_has_vmx_tpr_shadow(void)
+static inline bool cpu_has_vmx_tpr_shadow(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
}
-static inline int vm_need_tpr_shadow(struct kvm *kvm)
+static inline bool vm_need_tpr_shadow(struct kvm *kvm)
{
return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
}
-static inline int cpu_has_secondary_exec_ctrls(void)
+static inline bool cpu_has_secondary_exec_ctrls(void)
{
return vmcs_config.cpu_based_exec_ctrl &
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -303,80 +310,80 @@ static inline bool cpu_has_vmx_flexpriority(void)
static inline bool cpu_has_vmx_ept_execute_only(void)
{
- return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
+ return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
}
static inline bool cpu_has_vmx_eptp_uncacheable(void)
{
- return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
+ return vmx_capability.ept & VMX_EPTP_UC_BIT;
}
static inline bool cpu_has_vmx_eptp_writeback(void)
{
- return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
+ return vmx_capability.ept & VMX_EPTP_WB_BIT;
}
static inline bool cpu_has_vmx_ept_2m_page(void)
{
- return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
+ return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
}
static inline bool cpu_has_vmx_ept_1g_page(void)
{
- return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT);
+ return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
}
-static inline int cpu_has_vmx_invept_individual_addr(void)
+static inline bool cpu_has_vmx_invept_individual_addr(void)
{
- return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
+ return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
}
-static inline int cpu_has_vmx_invept_context(void)
+static inline bool cpu_has_vmx_invept_context(void)
{
- return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
+ return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
}
-static inline int cpu_has_vmx_invept_global(void)
+static inline bool cpu_has_vmx_invept_global(void)
{
- return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
+ return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
}
-static inline int cpu_has_vmx_ept(void)
+static inline bool cpu_has_vmx_ept(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_ENABLE_EPT;
}
-static inline int cpu_has_vmx_unrestricted_guest(void)
+static inline bool cpu_has_vmx_unrestricted_guest(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_UNRESTRICTED_GUEST;
}
-static inline int cpu_has_vmx_ple(void)
+static inline bool cpu_has_vmx_ple(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_PAUSE_LOOP_EXITING;
}
-static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
+static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
{
return flexpriority_enabled && irqchip_in_kernel(kvm);
}
-static inline int cpu_has_vmx_vpid(void)
+static inline bool cpu_has_vmx_vpid(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_ENABLE_VPID;
}
-static inline int cpu_has_vmx_rdtscp(void)
+static inline bool cpu_has_vmx_rdtscp(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_RDTSCP;
}
-static inline int cpu_has_virtual_nmis(void)
+static inline bool cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}
@@ -595,16 +602,56 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb);
}
+static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+{
+ unsigned i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ for (i = 0; i < m->nr; ++i)
+ if (m->guest[i].index == msr)
+ break;
+
+ if (i == m->nr)
+ return;
+ --m->nr;
+ m->guest[i] = m->guest[m->nr];
+ m->host[i] = m->host[m->nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+}
+
+static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+{
+ unsigned i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ for (i = 0; i < m->nr; ++i)
+ if (m->guest[i].index == msr)
+ break;
+
+ if (i == m->nr) {
+ ++m->nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+ }
+
+ m->guest[i].index = msr;
+ m->guest[i].value = guest_val;
+ m->host[i].index = msr;
+ m->host[i].value = host_val;
+}
+
static void reload_tss(void)
{
/*
* VT restores TR but not its size. Useless.
*/
- struct descriptor_table gdt;
+ struct desc_ptr gdt;
struct desc_struct *descs;
- kvm_get_gdt(&gdt);
- descs = (void *)gdt.base;
+ native_store_gdt(&gdt);
+ descs = (void *)gdt.address;
descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
load_TR_desc();
}
@@ -631,9 +678,57 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
guest_efer |= host_efer & ignore_bits;
vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+
+ clear_atomic_switch_msr(vmx, MSR_EFER);
+ /* On ept, can't emulate nx, and must switch nx atomically */
+ if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
+ guest_efer = vmx->vcpu.arch.efer;
+ if (!(guest_efer & EFER_LMA))
+ guest_efer &= ~EFER_LME;
+ add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
+ return false;
+ }
+
return true;
}
+static unsigned long segment_base(u16 selector)
+{
+ struct desc_ptr gdt;
+ struct desc_struct *d;
+ unsigned long table_base;
+ unsigned long v;
+
+ if (!(selector & ~3))
+ return 0;
+
+ native_store_gdt(&gdt);
+ table_base = gdt.address;
+
+ if (selector & 4) { /* from ldt */
+ u16 ldt_selector = kvm_read_ldt();
+
+ if (!(ldt_selector & ~3))
+ return 0;
+
+ table_base = segment_base(ldt_selector);
+ }
+ d = (struct desc_struct *)(table_base + (selector & ~7));
+ v = get_desc_base(d);
+#ifdef CONFIG_X86_64
+ if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+ v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
+#endif
+ return v;
+}
+
+static inline unsigned long kvm_read_tr_base(void)
+{
+ u16 tr;
+ asm("str %0" : "=g"(tr));
+ return segment_base(tr);
+}
+
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -758,7 +853,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
if (vcpu->cpu != cpu) {
- struct descriptor_table dt;
+ struct desc_ptr dt;
unsigned long sysenter_esp;
vcpu->cpu = cpu;
@@ -767,8 +862,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* processors.
*/
vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
- kvm_get_gdt(&dt);
- vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
+ native_store_gdt(&dt);
+ vmcs_writel(HOST_GDTR_BASE, dt.address); /* 22.2.4 */
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@ -846,9 +941,9 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
int ret = 0;
if (interruptibility & GUEST_INTR_STATE_STI)
- ret |= X86_SHADOW_INT_STI;
+ ret |= KVM_X86_SHADOW_INT_STI;
if (interruptibility & GUEST_INTR_STATE_MOV_SS)
- ret |= X86_SHADOW_INT_MOV_SS;
+ ret |= KVM_X86_SHADOW_INT_MOV_SS;
return ret & mask;
}
@@ -860,9 +955,9 @@ static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
- if (mask & X86_SHADOW_INT_MOV_SS)
+ if (mask & KVM_X86_SHADOW_INT_MOV_SS)
interruptibility |= GUEST_INTR_STATE_MOV_SS;
- if (mask & X86_SHADOW_INT_STI)
+ else if (mask & KVM_X86_SHADOW_INT_STI)
interruptibility |= GUEST_INTR_STATE_STI;
if ((interruptibility != interruptibility_old))
@@ -882,7 +977,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
}
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code)
+ bool has_error_code, u32 error_code,
+ bool reinject)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
@@ -1521,7 +1617,7 @@ static gva_t rmode_tss_base(struct kvm *kvm)
struct kvm_memslots *slots;
gfn_t base_gfn;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
base_gfn = kvm->memslots->memslots[0].base_gfn +
kvm->memslots->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
@@ -1649,6 +1745,7 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
& ~VM_ENTRY_IA32E_MODE);
+ vmx_set_efer(vcpu, vcpu->arch.efer);
}
#endif
@@ -1934,28 +2031,28 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
*l = (ar >> 13) & 1;
}
-static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
- dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
- dt->base = vmcs_readl(GUEST_IDTR_BASE);
+ dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
+ dt->address = vmcs_readl(GUEST_IDTR_BASE);
}
-static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
- vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
- vmcs_writel(GUEST_IDTR_BASE, dt->base);
+ vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
+ vmcs_writel(GUEST_IDTR_BASE, dt->address);
}
-static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
- dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
- dt->base = vmcs_readl(GUEST_GDTR_BASE);
+ dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
+ dt->address = vmcs_readl(GUEST_GDTR_BASE);
}
-static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
- vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
- vmcs_writel(GUEST_GDTR_BASE, dt->base);
+ vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
+ vmcs_writel(GUEST_GDTR_BASE, dt->address);
}
static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
@@ -2296,6 +2393,16 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
spin_unlock(&vmx_vpid_lock);
}
+static void free_vpid(struct vcpu_vmx *vmx)
+{
+ if (!enable_vpid)
+ return;
+ spin_lock(&vmx_vpid_lock);
+ if (vmx->vpid != 0)
+ __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+ spin_unlock(&vmx_vpid_lock);
+}
+
static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
@@ -2334,7 +2441,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
u32 junk;
u64 host_pat, tsc_this, tsc_base;
unsigned long a;
- struct descriptor_table dt;
+ struct desc_ptr dt;
int i;
unsigned long kvm_vmx_return;
u32 exec_control;
@@ -2415,14 +2522,16 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
- kvm_get_idt(&dt);
- vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
+ native_store_idt(&dt);
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@ -2948,22 +3057,20 @@ static int handle_io(struct kvm_vcpu *vcpu)
int size, in, string;
unsigned port;
- ++vcpu->stat.io_exits;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
string = (exit_qualification & 16) != 0;
+ in = (exit_qualification & 8) != 0;
- if (string) {
- if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO)
- return 0;
- return 1;
- }
+ ++vcpu->stat.io_exits;
- size = (exit_qualification & 7) + 1;
- in = (exit_qualification & 8) != 0;
- port = exit_qualification >> 16;
+ if (string || in)
+ return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
+ port = exit_qualification >> 16;
+ size = (exit_qualification & 7) + 1;
skip_emulated_instruction(vcpu);
- return kvm_emulate_pio(vcpu, in, size, port);
+
+ return kvm_fast_pio_out(vcpu, size, port);
}
static void
@@ -3054,19 +3161,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
return 0;
}
-static int check_dr_alias(struct kvm_vcpu *vcpu)
-{
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return -1;
- }
- return 0;
-}
-
static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
- unsigned long val;
int dr, reg;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */
@@ -3101,67 +3198,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
- switch (dr) {
- case 0 ... 3:
- val = vcpu->arch.db[dr];
- break;
- case 4:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- case 6:
- val = vcpu->arch.dr6;
- break;
- case 5:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- default: /* 7 */
- val = vcpu->arch.dr7;
- break;
- }
- kvm_register_write(vcpu, reg, val);
- } else {
- val = vcpu->arch.regs[reg];
- switch (dr) {
- case 0 ... 3:
- vcpu->arch.db[dr] = val;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
- vcpu->arch.eff_db[dr] = val;
- break;
- case 4:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- case 6:
- if (val & 0xffffffff00000000ULL) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
- break;
- case 5:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- default: /* 7 */
- if (val & 0xffffffff00000000ULL) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
- vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
- vcpu->arch.switch_db_regs =
- (val & DR7_BP_EN_MASK);
- }
- break;
- }
- }
+ unsigned long val;
+ if (!kvm_get_dr(vcpu, dr, &val))
+ kvm_register_write(vcpu, reg, val);
+ } else
+ kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
}
+static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ vmcs_writel(GUEST_DR7, val);
+}
+
static int handle_cpuid(struct kvm_vcpu *vcpu)
{
kvm_emulate_cpuid(vcpu);
@@ -3293,6 +3343,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification;
+ bool has_error_code = false;
+ u32 error_code = 0;
u16 tss_selector;
int reason, type, idt_v;
@@ -3315,6 +3367,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
kvm_clear_interrupt_queue(vcpu);
break;
case INTR_TYPE_HARD_EXCEPTION:
+ if (vmx->idt_vectoring_info &
+ VECTORING_INFO_DELIVER_CODE_MASK) {
+ has_error_code = true;
+ error_code =
+ vmcs_read32(IDT_VECTORING_ERROR_CODE);
+ }
+ /* fall through */
case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu);
break;
@@ -3329,8 +3388,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
type != INTR_TYPE_NMI_INTR))
skip_emulated_instruction(vcpu);
- if (!kvm_task_switch(vcpu, tss_selector, reason))
+ if (kvm_task_switch(vcpu, tss_selector, reason,
+ has_error_code, error_code) == EMULATE_FAIL) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
return 0;
+ }
/* clear all local breakpoint enable flags */
vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
@@ -3575,7 +3639,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
u32 exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
- trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
+ trace_kvm_exit(exit_reason, vcpu);
/* If guest state is invalid, start emulating */
if (vmx->emulation_required && emulate_invalid_guest_state)
@@ -3660,8 +3724,11 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
/* We need to handle NMIs before interrupts are enabled */
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
- (exit_intr_info & INTR_INFO_VALID_MASK))
+ (exit_intr_info & INTR_INFO_VALID_MASK)) {
+ kvm_before_handle_nmi(&vmx->vcpu);
asm("int $2");
+ kvm_after_handle_nmi(&vmx->vcpu);
+ }
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
@@ -3921,10 +3988,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- spin_lock(&vmx_vpid_lock);
- if (vmx->vpid != 0)
- __clear_bit(vmx->vpid, vmx_vpid_bitmap);
- spin_unlock(&vmx_vpid_lock);
+ free_vpid(vmx);
vmx_free_vmcs(vcpu);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
@@ -3986,6 +4050,7 @@ free_msrs:
uninit_vcpu:
kvm_vcpu_uninit(&vmx->vcpu);
free_vcpu:
+ free_vpid(vmx);
kmem_cache_free(kvm_vcpu_cache, vmx);
return ERR_PTR(err);
}
@@ -4116,6 +4181,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
}
}
+static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -4152,6 +4221,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
+ .set_dr7 = vmx_set_dr7,
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
@@ -4187,6 +4257,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.cpuid_update = vmx_cpuid_update,
.rdtscp_supported = vmx_rdtscp_supported,
+
+ .set_supported_cpuid = vmx_set_supported_cpuid,
};
static int __init vmx_init(void)
@@ -4234,7 +4306,8 @@ static int __init vmx_init(void)
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
goto out3;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3c4ca98ad27f..6b2ce1d2d748 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -40,8 +40,9 @@
#include <linux/user-return-notifier.h>
#include <linux/srcu.h>
#include <linux/slab.h>
+#include <linux/perf_event.h>
#include <trace/events/kvm.h>
-#undef TRACE_INCLUDE_FILE
+
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -223,34 +224,6 @@ static void drop_user_return_notifiers(void *ignore)
kvm_on_user_return(&smsr->urn);
}
-unsigned long segment_base(u16 selector)
-{
- struct descriptor_table gdt;
- struct desc_struct *d;
- unsigned long table_base;
- unsigned long v;
-
- if (selector == 0)
- return 0;
-
- kvm_get_gdt(&gdt);
- table_base = gdt.base;
-
- if (selector & 4) { /* from ldt */
- u16 ldt_selector = kvm_read_ldt();
-
- table_base = segment_base(ldt_selector);
- }
- d = (struct desc_struct *)(table_base + (selector & ~7));
- v = get_desc_base(d);
-#ifdef CONFIG_X86_64
- if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
- v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
-#endif
- return v;
-}
-EXPORT_SYMBOL_GPL(segment_base);
-
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
if (irqchip_in_kernel(vcpu->kvm))
@@ -292,7 +265,8 @@ static int exception_class(int vector)
}
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
- unsigned nr, bool has_error, u32 error_code)
+ unsigned nr, bool has_error, u32 error_code,
+ bool reinject)
{
u32 prev_nr;
int class1, class2;
@@ -303,6 +277,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr;
vcpu->arch.exception.error_code = error_code;
+ vcpu->arch.exception.reinject = true;
return;
}
@@ -331,10 +306,16 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- kvm_multiple_exception(vcpu, nr, false, 0);
+ kvm_multiple_exception(vcpu, nr, false, 0, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
+void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
+{
+ kvm_multiple_exception(vcpu, nr, false, 0, true);
+}
+EXPORT_SYMBOL_GPL(kvm_requeue_exception);
+
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
u32 error_code)
{
@@ -351,10 +332,16 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- kvm_multiple_exception(vcpu, nr, true, error_code);
+ kvm_multiple_exception(vcpu, nr, true, error_code, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
+void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
+{
+ kvm_multiple_exception(vcpu, nr, true, error_code, true);
+}
+EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
+
/*
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue
* a #GP and return false.
@@ -475,7 +462,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
kvm_x86_ops->set_cr0(vcpu, cr0);
- vcpu->arch.cr0 = cr0;
kvm_mmu_reset_context(vcpu);
return;
@@ -516,7 +502,6 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
}
kvm_x86_ops->set_cr4(vcpu, cr4);
vcpu->arch.cr4 = cr4;
- vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
kvm_mmu_reset_context(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@ -591,6 +576,80 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
+{
+ switch (dr) {
+ case 0 ... 3:
+ vcpu->arch.db[dr] = val;
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
+ vcpu->arch.eff_db[dr] = val;
+ break;
+ case 4:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ case 6:
+ if (val & 0xffffffff00000000ULL) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
+ break;
+ case 5:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ default: /* 7 */
+ if (val & 0xffffffff00000000ULL) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
+ kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
+ vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_set_dr);
+
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+{
+ switch (dr) {
+ case 0 ... 3:
+ *val = vcpu->arch.db[dr];
+ break;
+ case 4:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ case 6:
+ *val = vcpu->arch.dr6;
+ break;
+ case 5:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ default: /* 7 */
+ *val = vcpu->arch.dr7;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_get_dr);
+
static inline u32 bit(int bitno)
{
return 1 << (bitno & 31);
@@ -1090,6 +1149,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
break;
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
+ data &= ~(u64)0x100; /* ignore ignne emulation enable */
if (data != 0) {
pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
@@ -1548,6 +1608,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT:
+ case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
r = 1;
break;
@@ -1913,6 +1974,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_supported_word6_x86_features;
break;
}
+
+ kvm_x86_ops->set_supported_cpuid(function, entry);
+
put_cpu();
}
@@ -2100,14 +2164,20 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
{
vcpu_load(vcpu);
- events->exception.injected = vcpu->arch.exception.pending;
+ events->exception.injected =
+ vcpu->arch.exception.pending &&
+ !kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
events->exception.error_code = vcpu->arch.exception.error_code;
- events->interrupt.injected = vcpu->arch.interrupt.pending;
+ events->interrupt.injected =
+ vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
events->interrupt.nr = vcpu->arch.interrupt.nr;
- events->interrupt.soft = vcpu->arch.interrupt.soft;
+ events->interrupt.soft = 0;
+ events->interrupt.shadow =
+ kvm_x86_ops->get_interrupt_shadow(vcpu,
+ KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending;
@@ -2116,7 +2186,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
- | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
+ | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+ | KVM_VCPUEVENT_VALID_SHADOW);
vcpu_put(vcpu);
}
@@ -2125,7 +2196,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
- | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
+ | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+ | KVM_VCPUEVENT_VALID_SHADOW))
return -EINVAL;
vcpu_load(vcpu);
@@ -2140,6 +2212,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.soft = events->interrupt.soft;
if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
kvm_pic_clear_isr_ack(vcpu->kvm);
+ if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
+ kvm_x86_ops->set_interrupt_shadow(vcpu,
+ events->interrupt.shadow);
vcpu->arch.nmi_injected = events->nmi.injected;
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
@@ -2154,6 +2229,36 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return 0;
}
+static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
+ struct kvm_debugregs *dbgregs)
+{
+ vcpu_load(vcpu);
+
+ memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
+ dbgregs->dr6 = vcpu->arch.dr6;
+ dbgregs->dr7 = vcpu->arch.dr7;
+ dbgregs->flags = 0;
+
+ vcpu_put(vcpu);
+}
+
+static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+ struct kvm_debugregs *dbgregs)
+{
+ if (dbgregs->flags)
+ return -EINVAL;
+
+ vcpu_load(vcpu);
+
+ memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+ vcpu->arch.dr6 = dbgregs->dr6;
+ vcpu->arch.dr7 = dbgregs->dr7;
+
+ vcpu_put(vcpu);
+
+ return 0;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2332,6 +2437,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
break;
}
+ case KVM_GET_DEBUGREGS: {
+ struct kvm_debugregs dbgregs;
+
+ kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
+
+ r = -EFAULT;
+ if (copy_to_user(argp, &dbgregs,
+ sizeof(struct kvm_debugregs)))
+ break;
+ r = 0;
+ break;
+ }
+ case KVM_SET_DEBUGREGS: {
+ struct kvm_debugregs dbgregs;
+
+ r = -EFAULT;
+ if (copy_from_user(&dbgregs, argp,
+ sizeof(struct kvm_debugregs)))
+ break;
+
+ r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
+ break;
+ }
default:
r = -EINVAL;
}
@@ -2385,7 +2513,7 @@ gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
struct kvm_mem_alias *alias;
struct kvm_mem_aliases *aliases;
- aliases = rcu_dereference(kvm->arch.aliases);
+ aliases = kvm_aliases(kvm);
for (i = 0; i < aliases->naliases; ++i) {
alias = &aliases->aliases[i];
@@ -2404,7 +2532,7 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
struct kvm_mem_alias *alias;
struct kvm_mem_aliases *aliases;
- aliases = rcu_dereference(kvm->arch.aliases);
+ aliases = kvm_aliases(kvm);
for (i = 0; i < aliases->naliases; ++i) {
alias = &aliases->aliases[i];
@@ -2799,11 +2927,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
+ r = -ENXIO;
if (irqchip_in_kernel(kvm)) {
__s32 status;
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
if (ioctl == KVM_IRQ_LINE_STATUS) {
+ r = -EFAULT;
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
sizeof irq_event))
@@ -3019,6 +3149,18 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
}
+static void kvm_set_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg)
+{
+ kvm_x86_ops->set_segment(vcpu, var, seg);
+}
+
+void kvm_get_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg)
+{
+ kvm_x86_ops->get_segment(vcpu, var, seg);
+}
+
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
@@ -3099,14 +3241,17 @@ static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
}
-static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu, u32 *error)
+static int kvm_write_guest_virt_system(gva_t addr, void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu,
+ u32 *error)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
- gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
+ gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
+ PFERR_WRITE_MASK, error);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -3129,7 +3274,6 @@ out:
return r;
}
-
static int emulator_read_emulated(unsigned long addr,
void *val,
unsigned int bytes,
@@ -3232,9 +3376,9 @@ mmio:
}
int emulator_write_emulated(unsigned long addr,
- const void *val,
- unsigned int bytes,
- struct kvm_vcpu *vcpu)
+ const void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu)
{
/* Crossing a page boundary? */
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
@@ -3252,45 +3396,150 @@ int emulator_write_emulated(unsigned long addr,
}
EXPORT_SYMBOL_GPL(emulator_write_emulated);
+#define CMPXCHG_TYPE(t, ptr, old, new) \
+ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
+
+#ifdef CONFIG_X86_64
+# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
+#else
+# define CMPXCHG64(ptr, old, new) \
+ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
+#endif
+
static int emulator_cmpxchg_emulated(unsigned long addr,
const void *old,
const void *new,
unsigned int bytes,
struct kvm_vcpu *vcpu)
{
- printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
-#ifndef CONFIG_X86_64
- /* guests cmpxchg8b have to be emulated atomically */
- if (bytes == 8) {
- gpa_t gpa;
- struct page *page;
- char *kaddr;
- u64 val;
+ gpa_t gpa;
+ struct page *page;
+ char *kaddr;
+ bool exchanged;
- gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
+ /* guests cmpxchg8b have to be emulated atomically */
+ if (bytes > 8 || (bytes & (bytes - 1)))
+ goto emul_write;
- if (gpa == UNMAPPED_GVA ||
- (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
- goto emul_write;
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
- if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
- goto emul_write;
+ if (gpa == UNMAPPED_GVA ||
+ (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ goto emul_write;
- val = *(u64 *)new;
+ if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
+ goto emul_write;
- page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
- kaddr = kmap_atomic(page, KM_USER0);
- set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
- kunmap_atomic(kaddr, KM_USER0);
- kvm_release_page_dirty(page);
+ kaddr = kmap_atomic(page, KM_USER0);
+ kaddr += offset_in_page(gpa);
+ switch (bytes) {
+ case 1:
+ exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
+ break;
+ case 2:
+ exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
+ break;
+ case 4:
+ exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
+ break;
+ case 8:
+ exchanged = CMPXCHG64(kaddr, old, new);
+ break;
+ default:
+ BUG();
}
+ kunmap_atomic(kaddr, KM_USER0);
+ kvm_release_page_dirty(page);
+
+ if (!exchanged)
+ return X86EMUL_CMPXCHG_FAILED;
+
+ kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
+
+ return X86EMUL_CONTINUE;
+
emul_write:
-#endif
+ printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
return emulator_write_emulated(addr, new, bytes, vcpu);
}
+static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
+{
+ /* TODO: String I/O for in kernel device */
+ int r;
+
+ if (vcpu->arch.pio.in)
+ r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
+ vcpu->arch.pio.size, pd);
+ else
+ r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+ vcpu->arch.pio.port, vcpu->arch.pio.size,
+ pd);
+ return r;
+}
+
+
+static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
+ unsigned int count, struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.pio.count)
+ goto data_avail;
+
+ trace_kvm_pio(1, port, size, 1);
+
+ vcpu->arch.pio.port = port;
+ vcpu->arch.pio.in = 1;
+ vcpu->arch.pio.count = count;
+ vcpu->arch.pio.size = size;
+
+ if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+ data_avail:
+ memcpy(val, vcpu->arch.pio_data, size * count);
+ vcpu->arch.pio.count = 0;
+ return 1;
+ }
+
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+ vcpu->run->io.direction = KVM_EXIT_IO_IN;
+ vcpu->run->io.size = size;
+ vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+ vcpu->run->io.count = count;
+ vcpu->run->io.port = port;
+
+ return 0;
+}
+
+static int emulator_pio_out_emulated(int size, unsigned short port,
+ const void *val, unsigned int count,
+ struct kvm_vcpu *vcpu)
+{
+ trace_kvm_pio(0, port, size, 1);
+
+ vcpu->arch.pio.port = port;
+ vcpu->arch.pio.in = 0;
+ vcpu->arch.pio.count = count;
+ vcpu->arch.pio.size = size;
+
+ memcpy(vcpu->arch.pio_data, val, size * count);
+
+ if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+ vcpu->arch.pio.count = 0;
+ return 1;
+ }
+
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+ vcpu->run->io.direction = KVM_EXIT_IO_OUT;
+ vcpu->run->io.size = size;
+ vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+ vcpu->run->io.count = count;
+ vcpu->run->io.port = port;
+
+ return 0;
+}
+
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
return kvm_x86_ops->get_segment_base(vcpu, seg);
@@ -3311,14 +3560,14 @@ int emulate_clts(struct kvm_vcpu *vcpu)
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{
- return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
+ return kvm_get_dr(ctxt->vcpu, dr, dest);
}
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
{
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
- return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
+ return kvm_set_dr(ctxt->vcpu, dr, value & mask);
}
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3339,12 +3588,167 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
}
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
+static u64 mk_cr_64(u64 curr_cr, u32 new_val)
+{
+ return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
+}
+
+static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
+{
+ unsigned long value;
+
+ switch (cr) {
+ case 0:
+ value = kvm_read_cr0(vcpu);
+ break;
+ case 2:
+ value = vcpu->arch.cr2;
+ break;
+ case 3:
+ value = vcpu->arch.cr3;
+ break;
+ case 4:
+ value = kvm_read_cr4(vcpu);
+ break;
+ case 8:
+ value = kvm_get_cr8(vcpu);
+ break;
+ default:
+ vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
+ return 0;
+ }
+
+ return value;
+}
+
+static void emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
+{
+ switch (cr) {
+ case 0:
+ kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
+ break;
+ case 2:
+ vcpu->arch.cr2 = val;
+ break;
+ case 3:
+ kvm_set_cr3(vcpu, val);
+ break;
+ case 4:
+ kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
+ break;
+ case 8:
+ kvm_set_cr8(vcpu, val & 0xfUL);
+ break;
+ default:
+ vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
+ }
+}
+
+static int emulator_get_cpl(struct kvm_vcpu *vcpu)
+{
+ return kvm_x86_ops->get_cpl(vcpu);
+}
+
+static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->get_gdt(vcpu, dt);
+}
+
+static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment var;
+
+ kvm_get_segment(vcpu, &var, seg);
+
+ if (var.unusable)
+ return false;
+
+ if (var.g)
+ var.limit >>= 12;
+ set_desc_limit(desc, var.limit);
+ set_desc_base(desc, (unsigned long)var.base);
+ desc->type = var.type;
+ desc->s = var.s;
+ desc->dpl = var.dpl;
+ desc->p = var.present;
+ desc->avl = var.avl;
+ desc->l = var.l;
+ desc->d = var.db;
+ desc->g = var.g;
+
+ return true;
+}
+
+static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment var;
+
+ /* needed to preserve selector */
+ kvm_get_segment(vcpu, &var, seg);
+
+ var.base = get_desc_base(desc);
+ var.limit = get_desc_limit(desc);
+ if (desc->g)
+ var.limit = (var.limit << 12) | 0xfff;
+ var.type = desc->type;
+ var.present = desc->p;
+ var.dpl = desc->dpl;
+ var.db = desc->d;
+ var.s = desc->s;
+ var.l = desc->l;
+ var.g = desc->g;
+ var.avl = desc->avl;
+ var.present = desc->p;
+ var.unusable = !var.present;
+ var.padding = 0;
+
+ kvm_set_segment(vcpu, &var, seg);
+ return;
+}
+
+static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment kvm_seg;
+
+ kvm_get_segment(vcpu, &kvm_seg, seg);
+ return kvm_seg.selector;
+}
+
+static void emulator_set_segment_selector(u16 sel, int seg,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment kvm_seg;
+
+ kvm_get_segment(vcpu, &kvm_seg, seg);
+ kvm_seg.selector = sel;
+ kvm_set_segment(vcpu, &kvm_seg, seg);
+}
+
+static void emulator_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+ kvm_x86_ops->set_rflags(vcpu, rflags);
+}
+
static struct x86_emulate_ops emulate_ops = {
.read_std = kvm_read_guest_virt_system,
+ .write_std = kvm_write_guest_virt_system,
.fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
.cmpxchg_emulated = emulator_cmpxchg_emulated,
+ .pio_in_emulated = emulator_pio_in_emulated,
+ .pio_out_emulated = emulator_pio_out_emulated,
+ .get_cached_descriptor = emulator_get_cached_descriptor,
+ .set_cached_descriptor = emulator_set_cached_descriptor,
+ .get_segment_selector = emulator_get_segment_selector,
+ .set_segment_selector = emulator_set_segment_selector,
+ .get_gdt = emulator_get_gdt,
+ .get_cr = emulator_get_cr,
+ .set_cr = emulator_set_cr,
+ .cpl = emulator_get_cpl,
+ .set_rflags = emulator_set_rflags,
};
static void cache_all_regs(struct kvm_vcpu *vcpu)
@@ -3375,14 +3779,14 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
cache_all_regs(vcpu);
vcpu->mmio_is_write = 0;
- vcpu->arch.pio.string = 0;
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
int cs_db, cs_l;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
vcpu->arch.emulate_ctxt.vcpu = vcpu;
- vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
+ vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+ vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
vcpu->arch.emulate_ctxt.mode =
(!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
@@ -3391,6 +3795,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
+ trace_kvm_emulate_insn_start(vcpu);
/* Only allow emulation of specific instructions on #UD
* (namely VMMCALL, sysenter, sysexit, syscall)*/
@@ -3423,6 +3828,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
++vcpu->stat.insn_emulation;
if (r) {
++vcpu->stat.insn_emulation_fail;
+ trace_kvm_emulate_insn_failed(vcpu);
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
return EMULATE_DONE;
return EMULATE_FAIL;
@@ -3434,16 +3840,20 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DONE;
}
+restart:
r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
if (r == 0)
kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
- if (vcpu->arch.pio.string)
+ if (vcpu->arch.pio.count) {
+ if (!vcpu->arch.pio.in)
+ vcpu->arch.pio.count = 0;
return EMULATE_DO_MMIO;
+ }
- if ((r || vcpu->mmio_is_write) && run) {
+ if (r || vcpu->mmio_is_write) {
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = vcpu->mmio_phys_addr;
memcpy(run->mmio.data, vcpu->mmio_data, 8);
@@ -3453,222 +3863,41 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
if (r) {
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
- return EMULATE_DONE;
+ goto done;
if (!vcpu->mmio_needed) {
+ ++vcpu->stat.insn_emulation_fail;
+ trace_kvm_emulate_insn_failed(vcpu);
kvm_report_emulation_failure(vcpu, "mmio");
return EMULATE_FAIL;
}
return EMULATE_DO_MMIO;
}
- kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
-
if (vcpu->mmio_is_write) {
vcpu->mmio_needed = 0;
return EMULATE_DO_MMIO;
}
- return EMULATE_DONE;
-}
-EXPORT_SYMBOL_GPL(emulate_instruction);
-
-static int pio_copy_data(struct kvm_vcpu *vcpu)
-{
- void *p = vcpu->arch.pio_data;
- gva_t q = vcpu->arch.pio.guest_gva;
- unsigned bytes;
- int ret;
- u32 error_code;
-
- bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
- if (vcpu->arch.pio.in)
- ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
- else
- ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
-
- if (ret == X86EMUL_PROPAGATE_FAULT)
- kvm_inject_page_fault(vcpu, q, error_code);
-
- return ret;
-}
-
-int complete_pio(struct kvm_vcpu *vcpu)
-{
- struct kvm_pio_request *io = &vcpu->arch.pio;
- long delta;
- int r;
- unsigned long val;
-
- if (!io->string) {
- if (io->in) {
- val = kvm_register_read(vcpu, VCPU_REGS_RAX);
- memcpy(&val, vcpu->arch.pio_data, io->size);
- kvm_register_write(vcpu, VCPU_REGS_RAX, val);
- }
- } else {
- if (io->in) {
- r = pio_copy_data(vcpu);
- if (r)
- goto out;
- }
-
- delta = 1;
- if (io->rep) {
- delta *= io->cur_count;
- /*
- * The size of the register should really depend on
- * current address size.
- */
- val = kvm_register_read(vcpu, VCPU_REGS_RCX);
- val -= delta;
- kvm_register_write(vcpu, VCPU_REGS_RCX, val);
- }
- if (io->down)
- delta = -delta;
- delta *= io->size;
- if (io->in) {
- val = kvm_register_read(vcpu, VCPU_REGS_RDI);
- val += delta;
- kvm_register_write(vcpu, VCPU_REGS_RDI, val);
- } else {
- val = kvm_register_read(vcpu, VCPU_REGS_RSI);
- val += delta;
- kvm_register_write(vcpu, VCPU_REGS_RSI, val);
- }
- }
-out:
- io->count -= io->cur_count;
- io->cur_count = 0;
-
- return 0;
-}
+done:
+ if (vcpu->arch.exception.pending)
+ vcpu->arch.emulate_ctxt.restart = false;
-static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
-{
- /* TODO: String I/O for in kernel device */
- int r;
+ if (vcpu->arch.emulate_ctxt.restart)
+ goto restart;
- if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
- vcpu->arch.pio.size, pd);
- else
- r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
- vcpu->arch.pio.port, vcpu->arch.pio.size,
- pd);
- return r;
-}
-
-static int pio_string_write(struct kvm_vcpu *vcpu)
-{
- struct kvm_pio_request *io = &vcpu->arch.pio;
- void *pd = vcpu->arch.pio_data;
- int i, r = 0;
-
- for (i = 0; i < io->cur_count; i++) {
- if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
- io->port, io->size, pd)) {
- r = -EOPNOTSUPP;
- break;
- }
- pd += io->size;
- }
- return r;
-}
-
-int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
-{
- unsigned long val;
-
- trace_kvm_pio(!in, port, size, 1);
-
- vcpu->run->exit_reason = KVM_EXIT_IO;
- vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
- vcpu->run->io.size = vcpu->arch.pio.size = size;
- vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
- vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
- vcpu->run->io.port = vcpu->arch.pio.port = port;
- vcpu->arch.pio.in = in;
- vcpu->arch.pio.string = 0;
- vcpu->arch.pio.down = 0;
- vcpu->arch.pio.rep = 0;
-
- if (!vcpu->arch.pio.in) {
- val = kvm_register_read(vcpu, VCPU_REGS_RAX);
- memcpy(vcpu->arch.pio_data, &val, 4);
- }
-
- if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
- complete_pio(vcpu);
- return 1;
- }
- return 0;
+ return EMULATE_DONE;
}
-EXPORT_SYMBOL_GPL(kvm_emulate_pio);
+EXPORT_SYMBOL_GPL(emulate_instruction);
-int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
- int size, unsigned long count, int down,
- gva_t address, int rep, unsigned port)
+int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
{
- unsigned now, in_page;
- int ret = 0;
-
- trace_kvm_pio(!in, port, size, count);
-
- vcpu->run->exit_reason = KVM_EXIT_IO;
- vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
- vcpu->run->io.size = vcpu->arch.pio.size = size;
- vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
- vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
- vcpu->run->io.port = vcpu->arch.pio.port = port;
- vcpu->arch.pio.in = in;
- vcpu->arch.pio.string = 1;
- vcpu->arch.pio.down = down;
- vcpu->arch.pio.rep = rep;
-
- if (!count) {
- kvm_x86_ops->skip_emulated_instruction(vcpu);
- return 1;
- }
-
- if (!down)
- in_page = PAGE_SIZE - offset_in_page(address);
- else
- in_page = offset_in_page(address) + size;
- now = min(count, (unsigned long)in_page / size);
- if (!now)
- now = 1;
- if (down) {
- /*
- * String I/O in reverse. Yuck. Kill the guest, fix later.
- */
- pr_unimpl(vcpu, "guest string pio down\n");
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- vcpu->run->io.count = now;
- vcpu->arch.pio.cur_count = now;
-
- if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
- kvm_x86_ops->skip_emulated_instruction(vcpu);
-
- vcpu->arch.pio.guest_gva = address;
-
- if (!vcpu->arch.pio.in) {
- /* string PIO write */
- ret = pio_copy_data(vcpu);
- if (ret == X86EMUL_PROPAGATE_FAULT)
- return 1;
- if (ret == 0 && !pio_string_write(vcpu)) {
- complete_pio(vcpu);
- if (vcpu->arch.pio.count == 0)
- ret = 1;
- }
- }
- /* no string PIO read support yet */
-
+ unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
+ /* do not return to emulator after return from userspace */
+ vcpu->arch.pio.count = 0;
return ret;
}
-EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
+EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
static void bounce_off(void *info)
{
@@ -3743,6 +3972,51 @@ static void kvm_timer_init(void)
}
}
+static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
+
+static int kvm_is_in_guest(void)
+{
+ return percpu_read(current_vcpu) != NULL;
+}
+
+static int kvm_is_user_mode(void)
+{
+ int user_mode = 3;
+
+ if (percpu_read(current_vcpu))
+ user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
+
+ return user_mode != 0;
+}
+
+static unsigned long kvm_get_guest_ip(void)
+{
+ unsigned long ip = 0;
+
+ if (percpu_read(current_vcpu))
+ ip = kvm_rip_read(percpu_read(current_vcpu));
+
+ return ip;
+}
+
+static struct perf_guest_info_callbacks kvm_guest_cbs = {
+ .is_in_guest = kvm_is_in_guest,
+ .is_user_mode = kvm_is_user_mode,
+ .get_guest_ip = kvm_get_guest_ip,
+};
+
+void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
+{
+ percpu_write(current_vcpu, vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
+
+void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
+{
+ percpu_write(current_vcpu, NULL);
+}
+EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
+
int kvm_arch_init(void *opaque)
{
int r;
@@ -3779,6 +4053,8 @@ int kvm_arch_init(void *opaque)
kvm_timer_init();
+ perf_register_guest_info_callbacks(&kvm_guest_cbs);
+
return 0;
out:
@@ -3787,6 +4063,8 @@ out:
void kvm_arch_exit(void)
{
+ perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -3942,85 +4220,20 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
return emulator_write_emulated(rip, instruction, 3, vcpu);
}
-static u64 mk_cr_64(u64 curr_cr, u32 new_val)
-{
- return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
-}
-
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
{
- struct descriptor_table dt = { limit, base };
+ struct desc_ptr dt = { limit, base };
kvm_x86_ops->set_gdt(vcpu, &dt);
}
void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
{
- struct descriptor_table dt = { limit, base };
+ struct desc_ptr dt = { limit, base };
kvm_x86_ops->set_idt(vcpu, &dt);
}
-void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
- unsigned long *rflags)
-{
- kvm_lmsw(vcpu, msw);
- *rflags = kvm_get_rflags(vcpu);
-}
-
-unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
-{
- unsigned long value;
-
- switch (cr) {
- case 0:
- value = kvm_read_cr0(vcpu);
- break;
- case 2:
- value = vcpu->arch.cr2;
- break;
- case 3:
- value = vcpu->arch.cr3;
- break;
- case 4:
- value = kvm_read_cr4(vcpu);
- break;
- case 8:
- value = kvm_get_cr8(vcpu);
- break;
- default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
- return 0;
- }
-
- return value;
-}
-
-void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
- unsigned long *rflags)
-{
- switch (cr) {
- case 0:
- kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
- *rflags = kvm_get_rflags(vcpu);
- break;
- case 2:
- vcpu->arch.cr2 = val;
- break;
- case 3:
- kvm_set_cr3(vcpu, val);
- break;
- case 4:
- kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
- break;
- case 8:
- kvm_set_cr8(vcpu, val & 0xfUL);
- break;
- default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
- }
-}
-
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
{
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
@@ -4084,9 +4297,13 @@ int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
+ best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+ if (!best || best->eax < 0x80000008)
+ goto not_found;
best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
if (best)
return best->eax & 0xff;
+not_found:
return 36;
}
@@ -4200,9 +4417,13 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
{
/* try to reinject previous events if any */
if (vcpu->arch.exception.pending) {
+ trace_kvm_inj_exception(vcpu->arch.exception.nr,
+ vcpu->arch.exception.has_error_code,
+ vcpu->arch.exception.error_code);
kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
- vcpu->arch.exception.error_code);
+ vcpu->arch.exception.error_code,
+ vcpu->arch.exception.reinject);
return;
}
@@ -4460,26 +4681,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (!irqchip_in_kernel(vcpu->kvm))
kvm_set_cr8(vcpu, kvm_run->cr8);
- if (vcpu->arch.pio.cur_count) {
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = complete_pio(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
- if (r)
- goto out;
- }
- if (vcpu->mmio_needed) {
- memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
- vcpu->mmio_read_completed = 1;
- vcpu->mmio_needed = 0;
-
+ if (vcpu->arch.pio.count || vcpu->mmio_needed ||
+ vcpu->arch.emulate_ctxt.restart) {
+ if (vcpu->mmio_needed) {
+ memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+ vcpu->mmio_read_completed = 1;
+ vcpu->mmio_needed = 0;
+ }
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
- EMULTYPE_NO_DECODE);
+ r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r == EMULATE_DO_MMIO) {
- /*
- * Read-modify-write. Back to userspace.
- */
r = 0;
goto out;
}
@@ -4562,12 +4774,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0;
}
-void kvm_get_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
-{
- kvm_x86_ops->get_segment(vcpu, var, seg);
-}
-
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
struct kvm_segment cs;
@@ -4581,7 +4787,7 @@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
- struct descriptor_table dt;
+ struct desc_ptr dt;
vcpu_load(vcpu);
@@ -4596,11 +4802,11 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
kvm_x86_ops->get_idt(vcpu, &dt);
- sregs->idt.limit = dt.limit;
- sregs->idt.base = dt.base;
+ sregs->idt.limit = dt.size;
+ sregs->idt.base = dt.address;
kvm_x86_ops->get_gdt(vcpu, &dt);
- sregs->gdt.limit = dt.limit;
- sregs->gdt.base = dt.base;
+ sregs->gdt.limit = dt.size;
+ sregs->gdt.base = dt.address;
sregs->cr0 = kvm_read_cr0(vcpu);
sregs->cr2 = vcpu->arch.cr2;
@@ -4639,563 +4845,33 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return 0;
}
-static void kvm_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
-{
- kvm_x86_ops->set_segment(vcpu, var, seg);
-}
-
-static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
- struct kvm_segment *kvm_desct)
-{
- kvm_desct->base = get_desc_base(seg_desc);
- kvm_desct->limit = get_desc_limit(seg_desc);
- if (seg_desc->g) {
- kvm_desct->limit <<= 12;
- kvm_desct->limit |= 0xfff;
- }
- kvm_desct->selector = selector;
- kvm_desct->type = seg_desc->type;
- kvm_desct->present = seg_desc->p;
- kvm_desct->dpl = seg_desc->dpl;
- kvm_desct->db = seg_desc->d;
- kvm_desct->s = seg_desc->s;
- kvm_desct->l = seg_desc->l;
- kvm_desct->g = seg_desc->g;
- kvm_desct->avl = seg_desc->avl;
- if (!selector)
- kvm_desct->unusable = 1;
- else
- kvm_desct->unusable = 0;
- kvm_desct->padding = 0;
-}
-
-static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
- u16 selector,
- struct descriptor_table *dtable)
-{
- if (selector & 1 << 2) {
- struct kvm_segment kvm_seg;
-
- kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
-
- if (kvm_seg.unusable)
- dtable->limit = 0;
- else
- dtable->limit = kvm_seg.limit;
- dtable->base = kvm_seg.base;
- }
- else
- kvm_x86_ops->get_gdt(vcpu, dtable);
-}
-
-/* allowed just for 8 bytes segments */
-static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
- struct desc_struct *seg_desc)
-{
- struct descriptor_table dtable;
- u16 index = selector >> 3;
- int ret;
- u32 err;
- gva_t addr;
-
- get_segment_descriptor_dtable(vcpu, selector, &dtable);
-
- if (dtable.limit < index * 8 + 7) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
- return X86EMUL_PROPAGATE_FAULT;
- }
- addr = dtable.base + index * 8;
- ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
- vcpu, &err);
- if (ret == X86EMUL_PROPAGATE_FAULT)
- kvm_inject_page_fault(vcpu, addr, err);
-
- return ret;
-}
-
-/* allowed just for 8 bytes segments */
-static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
- struct desc_struct *seg_desc)
-{
- struct descriptor_table dtable;
- u16 index = selector >> 3;
-
- get_segment_descriptor_dtable(vcpu, selector, &dtable);
-
- if (dtable.limit < index * 8 + 7)
- return 1;
- return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
-}
-
-static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
- struct desc_struct *seg_desc)
-{
- u32 base_addr = get_desc_base(seg_desc);
-
- return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
-}
-
-static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
- struct desc_struct *seg_desc)
-{
- u32 base_addr = get_desc_base(seg_desc);
-
- return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
-}
-
-static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
-{
- struct kvm_segment kvm_seg;
-
- kvm_get_segment(vcpu, &kvm_seg, seg);
- return kvm_seg.selector;
-}
-
-static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
-{
- struct kvm_segment segvar = {
- .base = selector << 4,
- .limit = 0xffff,
- .selector = selector,
- .type = 3,
- .present = 1,
- .dpl = 3,
- .db = 0,
- .s = 1,
- .l = 0,
- .g = 0,
- .avl = 0,
- .unusable = 0,
- };
- kvm_x86_ops->set_segment(vcpu, &segvar, seg);
- return X86EMUL_CONTINUE;
-}
-
-static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
-{
- return (seg != VCPU_SREG_LDTR) &&
- (seg != VCPU_SREG_TR) &&
- (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
-}
-
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
{
- struct kvm_segment kvm_seg;
- struct desc_struct seg_desc;
- u8 dpl, rpl, cpl;
- unsigned err_vec = GP_VECTOR;
- u32 err_code = 0;
- bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
- int ret;
+ int cs_db, cs_l, ret;
+ cache_all_regs(vcpu);
- if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
- return kvm_load_realmode_segment(vcpu, selector, seg);
+ kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
- /* NULL selector is not valid for TR, CS and SS */
- if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
- && null_selector)
- goto exception;
+ vcpu->arch.emulate_ctxt.vcpu = vcpu;
+ vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+ vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
+ vcpu->arch.emulate_ctxt.mode =
+ (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
+ (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
+ ? X86EMUL_MODE_VM86 : cs_l
+ ? X86EMUL_MODE_PROT64 : cs_db
+ ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
- /* TR should be in GDT only */
- if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
- goto exception;
+ ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
+ tss_selector, reason, has_error_code,
+ error_code);
- ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
if (ret)
- return ret;
-
- seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
-
- if (null_selector) { /* for NULL selector skip all following checks */
- kvm_seg.unusable = 1;
- goto load;
- }
-
- err_code = selector & 0xfffc;
- err_vec = GP_VECTOR;
-
- /* can't load system descriptor into segment selecor */
- if (seg <= VCPU_SREG_GS && !kvm_seg.s)
- goto exception;
-
- if (!kvm_seg.present) {
- err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
- goto exception;
- }
-
- rpl = selector & 3;
- dpl = kvm_seg.dpl;
- cpl = kvm_x86_ops->get_cpl(vcpu);
-
- switch (seg) {
- case VCPU_SREG_SS:
- /*
- * segment is not a writable data segment or segment
- * selector's RPL != CPL or segment selector's RPL != CPL
- */
- if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
- goto exception;
- break;
- case VCPU_SREG_CS:
- if (!(kvm_seg.type & 8))
- goto exception;
-
- if (kvm_seg.type & 4) {
- /* conforming */
- if (dpl > cpl)
- goto exception;
- } else {
- /* nonconforming */
- if (rpl > cpl || dpl != cpl)
- goto exception;
- }
- /* CS(RPL) <- CPL */
- selector = (selector & 0xfffc) | cpl;
- break;
- case VCPU_SREG_TR:
- if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
- goto exception;
- break;
- case VCPU_SREG_LDTR:
- if (kvm_seg.s || kvm_seg.type != 2)
- goto exception;
- break;
- default: /* DS, ES, FS, or GS */
- /*
- * segment is not a data or readable code segment or
- * ((segment is a data or nonconforming code segment)
- * and (both RPL and CPL > DPL))
- */
- if ((kvm_seg.type & 0xa) == 0x8 ||
- (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
- goto exception;
- break;
- }
-
- if (!kvm_seg.unusable && kvm_seg.s) {
- /* mark segment as accessed */
- kvm_seg.type |= 1;
- seg_desc.type |= 1;
- save_guest_segment_descriptor(vcpu, selector, &seg_desc);
- }
-load:
- kvm_set_segment(vcpu, &kvm_seg, seg);
- return X86EMUL_CONTINUE;
-exception:
- kvm_queue_exception_e(vcpu, err_vec, err_code);
- return X86EMUL_PROPAGATE_FAULT;
-}
-
-static void save_state_to_tss32(struct kvm_vcpu *vcpu,
- struct tss_segment_32 *tss)
-{
- tss->cr3 = vcpu->arch.cr3;
- tss->eip = kvm_rip_read(vcpu);
- tss->eflags = kvm_get_rflags(vcpu);
- tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
- tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
- tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
- tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
- tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
- tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
- tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
- tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
- tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
- tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
- tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
- tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
- tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
- tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
-}
-
-static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
-{
- struct kvm_segment kvm_seg;
- kvm_get_segment(vcpu, &kvm_seg, seg);
- kvm_seg.selector = sel;
- kvm_set_segment(vcpu, &kvm_seg, seg);
-}
-
-static int load_state_from_tss32(struct kvm_vcpu *vcpu,
- struct tss_segment_32 *tss)
-{
- kvm_set_cr3(vcpu, tss->cr3);
-
- kvm_rip_write(vcpu, tss->eip);
- kvm_set_rflags(vcpu, tss->eflags | 2);
-
- kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
- kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
- kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
- kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
- kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
- kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
- kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
-
- /*
- * SDM says that segment selectors are loaded before segment
- * descriptors
- */
- kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
- kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
- kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
- kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
- kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
- kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
- kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
-
- /*
- * Now load segment descriptors. If fault happenes at this stage
- * it is handled in a context of new task
- */
- if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
- return 1;
- return 0;
-}
-
-static void save_state_to_tss16(struct kvm_vcpu *vcpu,
- struct tss_segment_16 *tss)
-{
- tss->ip = kvm_rip_read(vcpu);
- tss->flag = kvm_get_rflags(vcpu);
- tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
- tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
- tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
- tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
- tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
- tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
- tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
-
- tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
- tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
- tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
- tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
- tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
-}
-
-static int load_state_from_tss16(struct kvm_vcpu *vcpu,
- struct tss_segment_16 *tss)
-{
- kvm_rip_write(vcpu, tss->ip);
- kvm_set_rflags(vcpu, tss->flag | 2);
- kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
- kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
- kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
- kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
- kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
- kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
- kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
-
- /*
- * SDM says that segment selectors are loaded before segment
- * descriptors
- */
- kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
- kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
- kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
- kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
- kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
-
- /*
- * Now load segment descriptors. If fault happenes at this stage
- * it is handled in a context of new task
- */
- if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
- return 1;
+ return EMULATE_FAIL;
- if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
- return 1;
- return 0;
-}
-
-static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
- u16 old_tss_sel, u32 old_tss_base,
- struct desc_struct *nseg_desc)
-{
- struct tss_segment_16 tss_segment_16;
- int ret = 0;
-
- if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
- sizeof tss_segment_16))
- goto out;
-
- save_state_to_tss16(vcpu, &tss_segment_16);
-
- if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
- sizeof tss_segment_16))
- goto out;
-
- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
- &tss_segment_16, sizeof tss_segment_16))
- goto out;
-
- if (old_tss_sel != 0xffff) {
- tss_segment_16.prev_task_link = old_tss_sel;
-
- if (kvm_write_guest(vcpu->kvm,
- get_tss_base_addr_write(vcpu, nseg_desc),
- &tss_segment_16.prev_task_link,
- sizeof tss_segment_16.prev_task_link))
- goto out;
- }
-
- if (load_state_from_tss16(vcpu, &tss_segment_16))
- goto out;
-
- ret = 1;
-out:
- return ret;
-}
-
-static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
- u16 old_tss_sel, u32 old_tss_base,
- struct desc_struct *nseg_desc)
-{
- struct tss_segment_32 tss_segment_32;
- int ret = 0;
-
- if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
- sizeof tss_segment_32))
- goto out;
-
- save_state_to_tss32(vcpu, &tss_segment_32);
-
- if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
- sizeof tss_segment_32))
- goto out;
-
- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
- &tss_segment_32, sizeof tss_segment_32))
- goto out;
-
- if (old_tss_sel != 0xffff) {
- tss_segment_32.prev_task_link = old_tss_sel;
-
- if (kvm_write_guest(vcpu->kvm,
- get_tss_base_addr_write(vcpu, nseg_desc),
- &tss_segment_32.prev_task_link,
- sizeof tss_segment_32.prev_task_link))
- goto out;
- }
-
- if (load_state_from_tss32(vcpu, &tss_segment_32))
- goto out;
-
- ret = 1;
-out:
- return ret;
-}
-
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
-{
- struct kvm_segment tr_seg;
- struct desc_struct cseg_desc;
- struct desc_struct nseg_desc;
- int ret = 0;
- u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
- u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
- u32 desc_limit;
-
- old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
-
- /* FIXME: Handle errors. Failure to read either TSS or their
- * descriptors should generate a pagefault.
- */
- if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
- goto out;
-
- if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
- goto out;
-
- if (reason != TASK_SWITCH_IRET) {
- int cpl;
-
- cpl = kvm_x86_ops->get_cpl(vcpu);
- if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
- return 1;
- }
- }
-
- desc_limit = get_desc_limit(&nseg_desc);
- if (!nseg_desc.p ||
- ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
- desc_limit < 0x2b)) {
- kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
- return 1;
- }
-
- if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
- cseg_desc.type &= ~(1 << 1); //clear the B flag
- save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
- }
-
- if (reason == TASK_SWITCH_IRET) {
- u32 eflags = kvm_get_rflags(vcpu);
- kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
- }
-
- /* set back link to prev task only if NT bit is set in eflags
- note that old_tss_sel is not used afetr this point */
- if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
- old_tss_sel = 0xffff;
-
- if (nseg_desc.type & 8)
- ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
- old_tss_base, &nseg_desc);
- else
- ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
- old_tss_base, &nseg_desc);
-
- if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
- u32 eflags = kvm_get_rflags(vcpu);
- kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
- }
-
- if (reason != TASK_SWITCH_IRET) {
- nseg_desc.type |= (1 << 1);
- save_guest_segment_descriptor(vcpu, tss_selector,
- &nseg_desc);
- }
-
- kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
- seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
- tr_seg.type = 11;
- kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
-out:
- return ret;
+ kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -5204,15 +4880,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
{
int mmu_reset_needed = 0;
int pending_vec, max_bits;
- struct descriptor_table dt;
+ struct desc_ptr dt;
vcpu_load(vcpu);
- dt.limit = sregs->idt.limit;
- dt.base = sregs->idt.base;
+ dt.size = sregs->idt.limit;
+ dt.address = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);
- dt.limit = sregs->gdt.limit;
- dt.base = sregs->gdt.base;
+ dt.size = sregs->gdt.limit;
+ dt.address = sregs->gdt.base;
kvm_x86_ops->set_gdt(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
@@ -5311,11 +4987,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
}
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- vcpu->arch.singlestep_cs =
- get_segment_selector(vcpu, VCPU_SREG_CS);
- vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
- }
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
+ get_segment_base(vcpu, VCPU_SREG_CS);
/*
* Trigger an rflags update that will inject or remove the trace
@@ -5806,13 +5480,22 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
return kvm_x86_ops->interrupt_allowed(vcpu);
}
+bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
+{
+ unsigned long current_rip = kvm_rip_read(vcpu) +
+ get_segment_base(vcpu, VCPU_SREG_CS);
+
+ return current_rip == linear_rip;
+}
+EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
+
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags;
rflags = kvm_x86_ops->get_rflags(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
+ rflags &= ~X86_EFLAGS_TF;
return rflags;
}
EXPORT_SYMBOL_GPL(kvm_get_rflags);
@@ -5820,10 +5503,8 @@ EXPORT_SYMBOL_GPL(kvm_get_rflags);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
- vcpu->arch.singlestep_cs ==
- get_segment_selector(vcpu, VCPU_SREG_CS) &&
- vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
- rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
+ kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
+ rflags |= X86_EFLAGS_TF;
kvm_x86_ops->set_rflags(vcpu, rflags);
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);
@@ -5839,3 +5520,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 2d101639bd8d..f4b54458285b 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -65,4 +65,14 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
}
+static inline struct kvm_mem_aliases *kvm_aliases(struct kvm *kvm)
+{
+ return rcu_dereference_check(kvm->arch.aliases,
+ srcu_read_lock_held(&kvm->srcu)
+ || lockdep_is_held(&kvm->slots_lock));
+}
+
+void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
+void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 419386c24b82..f871e04b6965 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -20,17 +20,18 @@ lib-y := delay.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_KPROBES) += insn.o inat.o
+lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
obj-y += msr.o msr-reg.o msr-reg-export.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
+ lib-y += atomic64_cx8_32.o
lib-y += checksum_32.o
lib-y += strstr_32.o
lib-y += semaphore_32.o string_32.o
ifneq ($(CONFIG_X86_CMPXCHG64),y)
- lib-y += cmpxchg8b_emu.o
+ lib-y += cmpxchg8b_emu.o atomic64_386_32.o
endif
lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else
diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
index 824fa0be55a3..540179e8e9fa 100644
--- a/arch/x86/lib/atomic64_32.c
+++ b/arch/x86/lib/atomic64_32.c
@@ -6,225 +6,54 @@
#include <asm/cmpxchg.h>
#include <asm/atomic.h>
-static noinline u64 cmpxchg8b(u64 *ptr, u64 old, u64 new)
-{
- u32 low = new;
- u32 high = new >> 32;
-
- asm volatile(
- LOCK_PREFIX "cmpxchg8b %1\n"
- : "+A" (old), "+m" (*ptr)
- : "b" (low), "c" (high)
- );
- return old;
-}
-
-u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
-{
- return cmpxchg8b(&ptr->counter, old_val, new_val);
-}
-EXPORT_SYMBOL(atomic64_cmpxchg);
-
-/**
- * atomic64_xchg - xchg atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
- *
- * Atomically xchgs the value of @ptr to @new_val and returns
- * the old value.
- */
-u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
-{
- /*
- * Try first with a (possibly incorrect) assumption about
- * what we have there. We'll do two loops most likely,
- * but we'll get an ownership MESI transaction straight away
- * instead of a read transaction followed by a
- * flush-for-ownership transaction:
- */
- u64 old_val, real_val = 0;
-
- do {
- old_val = real_val;
-
- real_val = atomic64_cmpxchg(ptr, old_val, new_val);
-
- } while (real_val != old_val);
-
- return old_val;
-}
-EXPORT_SYMBOL(atomic64_xchg);
-
-/**
- * atomic64_set - set atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
- *
- * Atomically sets the value of @ptr to @new_val.
- */
-void atomic64_set(atomic64_t *ptr, u64 new_val)
-{
- atomic64_xchg(ptr, new_val);
-}
-EXPORT_SYMBOL(atomic64_set);
-
-/**
-EXPORT_SYMBOL(atomic64_read);
- * atomic64_add_return - add and return
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr and returns @delta + *@ptr
- */
-noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
-{
- /*
- * Try first with a (possibly incorrect) assumption about
- * what we have there. We'll do two loops most likely,
- * but we'll get an ownership MESI transaction straight away
- * instead of a read transaction followed by a
- * flush-for-ownership transaction:
- */
- u64 old_val, new_val, real_val = 0;
-
- do {
- old_val = real_val;
- new_val = old_val + delta;
-
- real_val = atomic64_cmpxchg(ptr, old_val, new_val);
-
- } while (real_val != old_val);
-
- return new_val;
-}
-EXPORT_SYMBOL(atomic64_add_return);
-
-u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
-{
- return atomic64_add_return(-delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_sub_return);
-
-u64 atomic64_inc_return(atomic64_t *ptr)
-{
- return atomic64_add_return(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc_return);
-
-u64 atomic64_dec_return(atomic64_t *ptr)
-{
- return atomic64_sub_return(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec_return);
-
-/**
- * atomic64_add - add integer to atomic64 variable
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr.
- */
-void atomic64_add(u64 delta, atomic64_t *ptr)
-{
- atomic64_add_return(delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_add);
-
-/**
- * atomic64_sub - subtract the atomic64 variable
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr.
- */
-void atomic64_sub(u64 delta, atomic64_t *ptr)
-{
- atomic64_add(-delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_sub);
-
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-int atomic64_sub_and_test(u64 delta, atomic64_t *ptr)
-{
- u64 new_val = atomic64_sub_return(delta, ptr);
-
- return new_val == 0;
-}
-EXPORT_SYMBOL(atomic64_sub_and_test);
-
-/**
- * atomic64_inc - increment atomic64 variable
- * @ptr: pointer to type atomic64_t
- *
- * Atomically increments @ptr by 1.
- */
-void atomic64_inc(atomic64_t *ptr)
-{
- atomic64_add(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc);
-
-/**
- * atomic64_dec - decrement atomic64 variable
- * @ptr: pointer to type atomic64_t
- *
- * Atomically decrements @ptr by 1.
- */
-void atomic64_dec(atomic64_t *ptr)
-{
- atomic64_sub(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec);
-
-/**
- * atomic64_dec_and_test - decrement and test
- * @ptr: pointer to type atomic64_t
- *
- * Atomically decrements @ptr by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-int atomic64_dec_and_test(atomic64_t *ptr)
-{
- return atomic64_sub_and_test(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec_and_test);
-
-/**
- * atomic64_inc_and_test - increment and test
- * @ptr: pointer to type atomic64_t
- *
- * Atomically increments @ptr by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-int atomic64_inc_and_test(atomic64_t *ptr)
-{
- return atomic64_sub_and_test(-1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc_and_test);
-
-/**
- * atomic64_add_negative - add and test if negative
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-int atomic64_add_negative(u64 delta, atomic64_t *ptr)
-{
- s64 new_val = atomic64_add_return(delta, ptr);
-
- return new_val < 0;
-}
-EXPORT_SYMBOL(atomic64_add_negative);
+long long atomic64_read_cx8(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_read_cx8);
+long long atomic64_set_cx8(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_set_cx8);
+long long atomic64_xchg_cx8(long long, unsigned high);
+EXPORT_SYMBOL(atomic64_xchg_cx8);
+long long atomic64_add_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_return_cx8);
+long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_return_cx8);
+long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_return_cx8);
+long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_return_cx8);
+long long atomic64_dec_if_positive_cx8(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
+int atomic64_inc_not_zero_cx8(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_not_zero_cx8);
+int atomic64_add_unless_cx8(atomic64_t *v, long long a, long long u);
+EXPORT_SYMBOL(atomic64_add_unless_cx8);
+
+#ifndef CONFIG_X86_CMPXCHG64
+long long atomic64_read_386(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_read_386);
+long long atomic64_set_386(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_set_386);
+long long atomic64_xchg_386(long long, unsigned high);
+EXPORT_SYMBOL(atomic64_xchg_386);
+long long atomic64_add_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_return_386);
+long long atomic64_sub_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_return_386);
+long long atomic64_inc_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_return_386);
+long long atomic64_dec_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_return_386);
+long long atomic64_add_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_386);
+long long atomic64_sub_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_386);
+long long atomic64_inc_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_386);
+long long atomic64_dec_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_386);
+long long atomic64_dec_if_positive_386(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_if_positive_386);
+int atomic64_inc_not_zero_386(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_not_zero_386);
+int atomic64_add_unless_386(atomic64_t *v, long long a, long long u);
+EXPORT_SYMBOL(atomic64_add_unless_386);
+#endif
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
new file mode 100644
index 000000000000..4a5979aa6883
--- /dev/null
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -0,0 +1,174 @@
+/*
+ * atomic64_t for 386/486
+ *
+ * Copyright © 2010 Luca Barbieri
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/dwarf2.h>
+
+/* if you want SMP support, implement these with real spinlocks */
+.macro LOCK reg
+ pushfl
+ CFI_ADJUST_CFA_OFFSET 4
+ cli
+.endm
+
+.macro UNLOCK reg
+ popfl
+ CFI_ADJUST_CFA_OFFSET -4
+.endm
+
+.macro BEGIN func reg
+$v = \reg
+
+ENTRY(atomic64_\func\()_386)
+ CFI_STARTPROC
+ LOCK $v
+
+.macro RETURN
+ UNLOCK $v
+ ret
+.endm
+
+.macro END_
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_386)
+.purgem RETURN
+.purgem END_
+.purgem END
+.endm
+
+.macro END
+RETURN
+END_
+.endm
+.endm
+
+BEGIN read %ecx
+ movl ($v), %eax
+ movl 4($v), %edx
+END
+
+BEGIN set %esi
+ movl %ebx, ($v)
+ movl %ecx, 4($v)
+END
+
+BEGIN xchg %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ movl %ebx, ($v)
+ movl %ecx, 4($v)
+END
+
+BEGIN add %ecx
+ addl %eax, ($v)
+ adcl %edx, 4($v)
+END
+
+BEGIN add_return %ecx
+ addl ($v), %eax
+ adcl 4($v), %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN sub %ecx
+ subl %eax, ($v)
+ sbbl %edx, 4($v)
+END
+
+BEGIN sub_return %ecx
+ negl %edx
+ negl %eax
+ sbbl $0, %edx
+ addl ($v), %eax
+ adcl 4($v), %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN inc %esi
+ addl $1, ($v)
+ adcl $0, 4($v)
+END
+
+BEGIN inc_return %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ addl $1, %eax
+ adcl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN dec %esi
+ subl $1, ($v)
+ sbbl $0, 4($v)
+END
+
+BEGIN dec_return %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN add_unless %ecx
+ addl %eax, %esi
+ adcl %edx, %edi
+ addl ($v), %eax
+ adcl 4($v), %edx
+ cmpl %eax, %esi
+ je 3f
+1:
+ movl %eax, ($v)
+ movl %edx, 4($v)
+ movl $1, %eax
+2:
+RETURN
+3:
+ cmpl %edx, %edi
+ jne 1b
+ xorl %eax, %eax
+ jmp 2b
+END_
+
+BEGIN inc_not_zero %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ testl %eax, %eax
+ je 3f
+1:
+ addl $1, %eax
+ adcl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+ movl $1, %eax
+2:
+RETURN
+3:
+ testl %edx, %edx
+ jne 1b
+ jmp 2b
+END_
+
+BEGIN dec_if_positive %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+ js 1f
+ movl %eax, ($v)
+ movl %edx, 4($v)
+1:
+END
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
new file mode 100644
index 000000000000..71e080de3352
--- /dev/null
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -0,0 +1,224 @@
+/*
+ * atomic64_t for 586+
+ *
+ * Copyright © 2010 Luca Barbieri
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/dwarf2.h>
+
+.macro SAVE reg
+ pushl %\reg
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET \reg, 0
+.endm
+
+.macro RESTORE reg
+ popl %\reg
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE \reg
+.endm
+
+.macro read64 reg
+ movl %ebx, %eax
+ movl %ecx, %edx
+/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
+ LOCK_PREFIX
+ cmpxchg8b (\reg)
+.endm
+
+ENTRY(atomic64_read_cx8)
+ CFI_STARTPROC
+
+ read64 %ecx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_read_cx8)
+
+ENTRY(atomic64_set_cx8)
+ CFI_STARTPROC
+
+1:
+/* we don't need LOCK_PREFIX since aligned 64-bit writes
+ * are atomic on 586 and newer */
+ cmpxchg8b (%esi)
+ jne 1b
+
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_set_cx8)
+
+ENTRY(atomic64_xchg_cx8)
+ CFI_STARTPROC
+
+ movl %ebx, %eax
+ movl %ecx, %edx
+1:
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_xchg_cx8)
+
+.macro addsub_return func ins insc
+ENTRY(atomic64_\func\()_return_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+ SAVE esi
+ SAVE edi
+
+ movl %eax, %esi
+ movl %edx, %edi
+ movl %ecx, %ebp
+
+ read64 %ebp
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ \ins\()l %esi, %ebx
+ \insc\()l %edi, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+
+10:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE edi
+ RESTORE esi
+ RESTORE ebx
+ RESTORE ebp
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_return_cx8)
+.endm
+
+addsub_return add add adc
+addsub_return sub sub sbb
+
+.macro incdec_return func ins insc
+ENTRY(atomic64_\func\()_return_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ \ins\()l $1, %ebx
+ \insc\()l $0, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+10:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_return_cx8)
+.endm
+
+incdec_return inc add adc
+incdec_return dec sub sbb
+
+ENTRY(atomic64_dec_if_positive_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ subl $1, %ebx
+ sbb $0, %ecx
+ js 2f
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+2:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_dec_if_positive_cx8)
+
+ENTRY(atomic64_add_unless_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+/* these just push these two parameters on the stack */
+ SAVE edi
+ SAVE esi
+
+ movl %ecx, %ebp
+ movl %eax, %esi
+ movl %edx, %edi
+
+ read64 %ebp
+1:
+ cmpl %eax, 0(%esp)
+ je 4f
+2:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl %esi, %ebx
+ adcl %edi, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+
+ movl $1, %eax
+3:
+ addl $8, %esp
+ CFI_ADJUST_CFA_OFFSET -8
+ RESTORE ebx
+ RESTORE ebp
+ ret
+4:
+ cmpl %edx, 4(%esp)
+ jne 2b
+ xorl %eax, %eax
+ jmp 3b
+ CFI_ENDPROC
+ENDPROC(atomic64_add_unless_cx8)
+
+ENTRY(atomic64_inc_not_zero_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ testl %eax, %eax
+ je 4f
+2:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl $1, %ebx
+ adcl $0, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+ movl $1, %eax
+3:
+ RESTORE ebx
+ ret
+4:
+ testl %edx, %edx
+ jne 2b
+ jmp 3b
+ CFI_ENDPROC
+ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
index aa0987088774..dc8adad10a2f 100644
--- a/arch/x86/math-emu/fpu_aux.c
+++ b/arch/x86/math-emu/fpu_aux.c
@@ -30,10 +30,10 @@ static void fclex(void)
}
/* Needs to be externally visible */
-void finit_task(struct task_struct *tsk)
+void finit_soft_fpu(struct i387_soft_struct *soft)
{
- struct i387_soft_struct *soft = &tsk->thread.xstate->soft;
struct address *oaddr, *iaddr;
+ memset(soft, 0, sizeof(*soft));
soft->cwd = 0x037f;
soft->swd = 0;
soft->ftop = 0; /* We don't keep top in the status word internally. */
@@ -52,7 +52,7 @@ void finit_task(struct task_struct *tsk)
void finit(void)
{
- finit_task(current);
+ finit_soft_fpu(&current->thread.fpu.state->soft);
}
/*
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 5d87f586f8d7..7718541541d4 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -681,7 +681,7 @@ int fpregs_soft_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct i387_soft_struct *s387 = &target->thread.xstate->soft;
+ struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
void *space = s387->st_space;
int ret;
int offset, other, i, tags, regnr, tag, newtop;
@@ -733,7 +733,7 @@ int fpregs_soft_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- struct i387_soft_struct *s387 = &target->thread.xstate->soft;
+ struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
const void *space = s387->st_space;
int ret;
int offset = (S387->ftop & 7) * 10, other = 80 - offset;
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 50fa0ec2c8a5..2c614410a5f3 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -31,7 +31,7 @@
#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
== (1 << 10))
-#define I387 (current->thread.xstate)
+#define I387 (current->thread.fpu.state)
#define FPU_info (I387->soft.info)
#define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 06630d26e56d..a4c768397baa 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -6,6 +6,7 @@ nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_physaddr.o := $(nostackp)
CFLAGS_setup_nx.o := $(nostackp)
+obj-$(CONFIG_X86_PAT) += pat_rbtree.o
obj-$(CONFIG_SMP) += tlb.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 28195c350b97..532e7933d606 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -997,7 +997,8 @@ out_err:
}
EXPORT_SYMBOL(set_memory_uc);
-int set_memory_array_uc(unsigned long *addr, int addrinarray)
+int _set_memory_array(unsigned long *addr, int addrinarray,
+ unsigned long new_type)
{
int i, j;
int ret;
@@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray)
*/
for (i = 0; i < addrinarray; i++) {
ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
- _PAGE_CACHE_UC_MINUS, NULL);
+ new_type, NULL);
if (ret)
goto out_free;
}
ret = change_page_attr_set(addr, addrinarray,
__pgprot(_PAGE_CACHE_UC_MINUS), 1);
+
+ if (!ret && new_type == _PAGE_CACHE_WC)
+ ret = change_page_attr_set_clr(addr, addrinarray,
+ __pgprot(_PAGE_CACHE_WC),
+ __pgprot(_PAGE_CACHE_MASK),
+ 0, CPA_ARRAY, NULL);
if (ret)
goto out_free;
@@ -1025,8 +1032,19 @@ out_free:
return ret;
}
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray)
+{
+ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
+}
EXPORT_SYMBOL(set_memory_array_uc);
+int set_memory_array_wc(unsigned long *addr, int addrinarray)
+{
+ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
+}
+EXPORT_SYMBOL(set_memory_array_wc);
+
int _set_memory_wc(unsigned long addr, int numpages)
{
int ret;
@@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages)
}
EXPORT_SYMBOL(set_pages_uc);
-int set_pages_array_uc(struct page **pages, int addrinarray)
+static int _set_pages_array(struct page **pages, int addrinarray,
+ unsigned long new_type)
{
unsigned long start;
unsigned long end;
int i;
int free_idx;
+ int ret;
for (i = 0; i < addrinarray; i++) {
if (PageHighMem(pages[i]))
continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE;
- if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
+ if (reserve_memtype(start, end, new_type, NULL))
goto err_out;
}
- if (cpa_set_pages_array(pages, addrinarray,
- __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
- return 0; /* Success */
- }
+ ret = cpa_set_pages_array(pages, addrinarray,
+ __pgprot(_PAGE_CACHE_UC_MINUS));
+ if (!ret && new_type == _PAGE_CACHE_WC)
+ ret = change_page_attr_set_clr(NULL, addrinarray,
+ __pgprot(_PAGE_CACHE_WC),
+ __pgprot(_PAGE_CACHE_MASK),
+ 0, CPA_PAGES_ARRAY, pages);
+ if (ret)
+ goto err_out;
+ return 0; /* Success */
err_out:
free_idx = i;
for (i = 0; i < free_idx; i++) {
@@ -1184,8 +1210,19 @@ err_out:
}
return -EINVAL;
}
+
+int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
+}
EXPORT_SYMBOL(set_pages_array_uc);
+int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
+}
+EXPORT_SYMBOL(set_pages_array_wc);
+
int set_pages_wb(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index edc8b95afc1a..bbe5502ee1cb 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -30,6 +30,8 @@
#include <asm/pat.h>
#include <asm/io.h>
+#include "pat_internal.h"
+
#ifdef CONFIG_X86_PAT
int __read_mostly pat_enabled = 1;
@@ -53,19 +55,15 @@ static inline void pat_disable(const char *reason)
#endif
-static int debug_enable;
+int pat_debug_enable;
static int __init pat_debug_setup(char *str)
{
- debug_enable = 1;
+ pat_debug_enable = 1;
return 0;
}
__setup("debugpat", pat_debug_setup);
-#define dprintk(fmt, arg...) \
- do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
-
-
static u64 __read_mostly boot_pat_state;
enum {
@@ -132,84 +130,7 @@ void pat_init(void)
#undef PAT
-static char *cattr_name(unsigned long flags)
-{
- switch (flags & _PAGE_CACHE_MASK) {
- case _PAGE_CACHE_UC: return "uncached";
- case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
- case _PAGE_CACHE_WB: return "write-back";
- case _PAGE_CACHE_WC: return "write-combining";
- default: return "broken";
- }
-}
-
-/*
- * The global memtype list keeps track of memory type for specific
- * physical memory areas. Conflicting memory types in different
- * mappings can cause CPU cache corruption. To avoid this we keep track.
- *
- * The list is sorted based on starting address and can contain multiple
- * entries for each address (this allows reference counting for overlapping
- * areas). All the aliases have the same cache attributes of course.
- * Zero attributes are represented as holes.
- *
- * The data structure is a list that is also organized as an rbtree
- * sorted on the start address of memtype range.
- *
- * memtype_lock protects both the linear list and rbtree.
- */
-
-struct memtype {
- u64 start;
- u64 end;
- unsigned long type;
- struct list_head nd;
- struct rb_node rb;
-};
-
-static struct rb_root memtype_rbroot = RB_ROOT;
-static LIST_HEAD(memtype_list);
-static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
-
-static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
-{
- struct rb_node *node = root->rb_node;
- struct memtype *last_lower = NULL;
-
- while (node) {
- struct memtype *data = container_of(node, struct memtype, rb);
-
- if (data->start < start) {
- last_lower = data;
- node = node->rb_right;
- } else if (data->start > start) {
- node = node->rb_left;
- } else
- return data;
- }
-
- /* Will return NULL if there is no entry with its start <= start */
- return last_lower;
-}
-
-static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
-{
- struct rb_node **new = &(root->rb_node);
- struct rb_node *parent = NULL;
-
- while (*new) {
- struct memtype *this = container_of(*new, struct memtype, rb);
-
- parent = *new;
- if (data->start <= this->start)
- new = &((*new)->rb_left);
- else if (data->start > this->start)
- new = &((*new)->rb_right);
- }
-
- rb_link_node(&data->rb, parent, new);
- rb_insert_color(&data->rb, root);
-}
+static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
/*
* Does intersection of PAT memory type and MTRR memory type and returns
@@ -237,33 +158,6 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
return req_type;
}
-static int
-chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
-{
- if (new->type != entry->type) {
- if (type) {
- new->type = entry->type;
- *type = entry->type;
- } else
- goto conflict;
- }
-
- /* check overlaps with more than one entry in the list */
- list_for_each_entry_continue(entry, &memtype_list, nd) {
- if (new->end <= entry->start)
- break;
- else if (new->type != entry->type)
- goto conflict;
- }
- return 0;
-
- conflict:
- printk(KERN_INFO "%s:%d conflicting memory types "
- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
- new->end, cattr_name(new->type), cattr_name(entry->type));
- return -EBUSY;
-}
-
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
{
int ram_page = 0, not_rampage = 0;
@@ -296,8 +190,6 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
* Here we do two pass:
* - Find the memtype of all the pages in the range, look for any conflicts
* - In case of no conflicts, set the new memtype for pages in the range
- *
- * Caller must hold memtype_lock for atomicity.
*/
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
unsigned long *new_type)
@@ -364,9 +256,8 @@ static int free_ram_pages_type(u64 start, u64 end)
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
unsigned long *new_type)
{
- struct memtype *new, *entry;
+ struct memtype *new;
unsigned long actual_type;
- struct list_head *where;
int is_range_ram;
int err = 0;
@@ -404,9 +295,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
is_range_ram = pat_pagerange_is_ram(start, end);
if (is_range_ram == 1) {
- spin_lock(&memtype_lock);
err = reserve_ram_pages_type(start, end, req_type, new_type);
- spin_unlock(&memtype_lock);
return err;
} else if (is_range_ram < 0) {
@@ -423,42 +312,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock(&memtype_lock);
- /* Search for existing mapping that overlaps the current range */
- where = NULL;
- list_for_each_entry(entry, &memtype_list, nd) {
- if (end <= entry->start) {
- where = entry->nd.prev;
- break;
- } else if (start <= entry->start) { /* end > entry->start */
- err = chk_conflict(new, entry, new_type);
- if (!err) {
- dprintk("Overlap at 0x%Lx-0x%Lx\n",
- entry->start, entry->end);
- where = entry->nd.prev;
- }
- break;
- } else if (start < entry->end) { /* start > entry->start */
- err = chk_conflict(new, entry, new_type);
- if (!err) {
- dprintk("Overlap at 0x%Lx-0x%Lx\n",
- entry->start, entry->end);
-
- /*
- * Move to right position in the linked
- * list to add this new entry
- */
- list_for_each_entry_continue(entry,
- &memtype_list, nd) {
- if (start <= entry->start) {
- where = entry->nd.prev;
- break;
- }
- }
- }
- break;
- }
- }
-
+ err = rbt_memtype_check_insert(new, new_type);
if (err) {
printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
"track %s, req %s\n",
@@ -469,13 +323,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return err;
}
- if (where)
- list_add(&new->nd, where);
- else
- list_add_tail(&new->nd, &memtype_list);
-
- memtype_rb_insert(&memtype_rbroot, new);
-
spin_unlock(&memtype_lock);
dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
@@ -487,7 +334,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
int free_memtype(u64 start, u64 end)
{
- struct memtype *entry, *saved_entry;
int err = -EINVAL;
int is_range_ram;
@@ -501,9 +347,7 @@ int free_memtype(u64 start, u64 end)
is_range_ram = pat_pagerange_is_ram(start, end);
if (is_range_ram == 1) {
- spin_lock(&memtype_lock);
err = free_ram_pages_type(start, end);
- spin_unlock(&memtype_lock);
return err;
} else if (is_range_ram < 0) {
@@ -511,46 +355,7 @@ int free_memtype(u64 start, u64 end)
}
spin_lock(&memtype_lock);
-
- entry = memtype_rb_search(&memtype_rbroot, start);
- if (unlikely(entry == NULL))
- goto unlock_ret;
-
- /*
- * Saved entry points to an entry with start same or less than what
- * we searched for. Now go through the list in both directions to look
- * for the entry that matches with both start and end, with list stored
- * in sorted start address
- */
- saved_entry = entry;
- list_for_each_entry_from(entry, &memtype_list, nd) {
- if (entry->start == start && entry->end == end) {
- rb_erase(&entry->rb, &memtype_rbroot);
- list_del(&entry->nd);
- kfree(entry);
- err = 0;
- break;
- } else if (entry->start > start) {
- break;
- }
- }
-
- if (!err)
- goto unlock_ret;
-
- entry = saved_entry;
- list_for_each_entry_reverse(entry, &memtype_list, nd) {
- if (entry->start == start && entry->end == end) {
- rb_erase(&entry->rb, &memtype_rbroot);
- list_del(&entry->nd);
- kfree(entry);
- err = 0;
- break;
- } else if (entry->start < start) {
- break;
- }
- }
-unlock_ret:
+ err = rbt_memtype_erase(start, end);
spin_unlock(&memtype_lock);
if (err) {
@@ -583,10 +388,8 @@ static unsigned long lookup_memtype(u64 paddr)
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
struct page *page;
- spin_lock(&memtype_lock);
page = pfn_to_page(paddr >> PAGE_SHIFT);
rettype = get_page_memtype(page);
- spin_unlock(&memtype_lock);
/*
* -1 from get_page_memtype() implies RAM page is in its
* default state and not reserved, and hence of type WB
@@ -599,7 +402,7 @@ static unsigned long lookup_memtype(u64 paddr)
spin_lock(&memtype_lock);
- entry = memtype_rb_search(&memtype_rbroot, paddr);
+ entry = rbt_memtype_lookup(paddr);
if (entry != NULL)
rettype = entry->type;
else
@@ -936,29 +739,25 @@ EXPORT_SYMBOL_GPL(pgprot_writecombine);
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
-/* get Nth element of the linked list */
static struct memtype *memtype_get_idx(loff_t pos)
{
- struct memtype *list_node, *print_entry;
- int i = 1;
+ struct memtype *print_entry;
+ int ret;
- print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
+ print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
if (!print_entry)
return NULL;
spin_lock(&memtype_lock);
- list_for_each_entry(list_node, &memtype_list, nd) {
- if (pos == i) {
- *print_entry = *list_node;
- spin_unlock(&memtype_lock);
- return print_entry;
- }
- ++i;
- }
+ ret = rbt_memtype_copy_nth_element(print_entry, pos);
spin_unlock(&memtype_lock);
- kfree(print_entry);
- return NULL;
+ if (!ret) {
+ return print_entry;
+ } else {
+ kfree(print_entry);
+ return NULL;
+ }
}
static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h
new file mode 100644
index 000000000000..4f39eefa3e61
--- /dev/null
+++ b/arch/x86/mm/pat_internal.h
@@ -0,0 +1,46 @@
+#ifndef __PAT_INTERNAL_H_
+#define __PAT_INTERNAL_H_
+
+extern int pat_debug_enable;
+
+#define dprintk(fmt, arg...) \
+ do { if (pat_debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
+
+struct memtype {
+ u64 start;
+ u64 end;
+ u64 subtree_max_end;
+ unsigned long type;
+ struct rb_node rb;
+};
+
+static inline char *cattr_name(unsigned long flags)
+{
+ switch (flags & _PAGE_CACHE_MASK) {
+ case _PAGE_CACHE_UC: return "uncached";
+ case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
+ case _PAGE_CACHE_WB: return "write-back";
+ case _PAGE_CACHE_WC: return "write-combining";
+ default: return "broken";
+ }
+}
+
+#ifdef CONFIG_X86_PAT
+extern int rbt_memtype_check_insert(struct memtype *new,
+ unsigned long *new_type);
+extern int rbt_memtype_erase(u64 start, u64 end);
+extern struct memtype *rbt_memtype_lookup(u64 addr);
+extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
+#else
+static inline int rbt_memtype_check_insert(struct memtype *new,
+ unsigned long *new_type)
+{ return 0; }
+static inline int rbt_memtype_erase(u64 start, u64 end)
+{ return 0; }
+static inline struct memtype *rbt_memtype_lookup(u64 addr)
+{ return NULL; }
+static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
+{ return 0; }
+#endif
+
+#endif /* __PAT_INTERNAL_H_ */
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
new file mode 100644
index 000000000000..07de4cb8cc30
--- /dev/null
+++ b/arch/x86/mm/pat_rbtree.c
@@ -0,0 +1,273 @@
+/*
+ * Handle caching attributes in page tables (PAT)
+ *
+ * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * Suresh B Siddha <suresh.b.siddha@intel.com>
+ *
+ * Interval tree (augmented rbtree) used to store the PAT memory type
+ * reservations.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+
+#include <asm/pgtable.h>
+#include <asm/pat.h>
+
+#include "pat_internal.h"
+
+/*
+ * The memtype tree keeps track of memory type for specific
+ * physical memory areas. Without proper tracking, conflicting memory
+ * types in different mappings can cause CPU cache corruption.
+ *
+ * The tree is an interval tree (augmented rbtree) with tree ordered
+ * on starting address. Tree can contain multiple entries for
+ * different regions which overlap. All the aliases have the same
+ * cache attributes of course.
+ *
+ * memtype_lock protects the rbtree.
+ */
+
+static void memtype_rb_augment_cb(struct rb_node *node);
+static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb);
+
+static int is_node_overlap(struct memtype *node, u64 start, u64 end)
+{
+ if (node->start >= end || node->end <= start)
+ return 0;
+
+ return 1;
+}
+
+static u64 get_subtree_max_end(struct rb_node *node)
+{
+ u64 ret = 0;
+ if (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+ ret = data->subtree_max_end;
+ }
+ return ret;
+}
+
+/* Update 'subtree_max_end' for a node, based on node and its children */
+static void update_node_max_end(struct rb_node *node)
+{
+ struct memtype *data;
+ u64 max_end, child_max_end;
+
+ if (!node)
+ return;
+
+ data = container_of(node, struct memtype, rb);
+ max_end = data->end;
+
+ child_max_end = get_subtree_max_end(node->rb_right);
+ if (child_max_end > max_end)
+ max_end = child_max_end;
+
+ child_max_end = get_subtree_max_end(node->rb_left);
+ if (child_max_end > max_end)
+ max_end = child_max_end;
+
+ data->subtree_max_end = max_end;
+}
+
+/* Update 'subtree_max_end' for a node and all its ancestors */
+static void update_path_max_end(struct rb_node *node)
+{
+ u64 old_max_end, new_max_end;
+
+ while (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+
+ old_max_end = data->subtree_max_end;
+ update_node_max_end(node);
+ new_max_end = data->subtree_max_end;
+
+ if (new_max_end == old_max_end)
+ break;
+
+ node = rb_parent(node);
+ }
+}
+
+/* Find the first (lowest start addr) overlapping range from rb tree */
+static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
+ u64 start, u64 end)
+{
+ struct rb_node *node = root->rb_node;
+ struct memtype *last_lower = NULL;
+
+ while (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+
+ if (get_subtree_max_end(node->rb_left) > start) {
+ /* Lowest overlap if any must be on left side */
+ node = node->rb_left;
+ } else if (is_node_overlap(data, start, end)) {
+ last_lower = data;
+ break;
+ } else if (start >= data->start) {
+ /* Lowest overlap if any must be on right side */
+ node = node->rb_right;
+ } else {
+ break;
+ }
+ }
+ return last_lower; /* Returns NULL if there is no overlap */
+}
+
+static struct memtype *memtype_rb_exact_match(struct rb_root *root,
+ u64 start, u64 end)
+{
+ struct memtype *match;
+
+ match = memtype_rb_lowest_match(root, start, end);
+ while (match != NULL && match->start < end) {
+ struct rb_node *node;
+
+ if (match->start == start && match->end == end)
+ return match;
+
+ node = rb_next(&match->rb);
+ if (node)
+ match = container_of(node, struct memtype, rb);
+ else
+ match = NULL;
+ }
+
+ return NULL; /* Returns NULL if there is no exact match */
+}
+
+static int memtype_rb_check_conflict(struct rb_root *root,
+ u64 start, u64 end,
+ unsigned long reqtype, unsigned long *newtype)
+{
+ struct rb_node *node;
+ struct memtype *match;
+ int found_type = reqtype;
+
+ match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
+ if (match == NULL)
+ goto success;
+
+ if (match->type != found_type && newtype == NULL)
+ goto failure;
+
+ dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
+ found_type = match->type;
+
+ node = rb_next(&match->rb);
+ while (node) {
+ match = container_of(node, struct memtype, rb);
+
+ if (match->start >= end) /* Checked all possible matches */
+ goto success;
+
+ if (is_node_overlap(match, start, end) &&
+ match->type != found_type) {
+ goto failure;
+ }
+
+ node = rb_next(&match->rb);
+ }
+success:
+ if (newtype)
+ *newtype = found_type;
+
+ return 0;
+
+failure:
+ printk(KERN_INFO "%s:%d conflicting memory types "
+ "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
+ end, cattr_name(found_type), cattr_name(match->type));
+ return -EBUSY;
+}
+
+static void memtype_rb_augment_cb(struct rb_node *node)
+{
+ if (node)
+ update_path_max_end(node);
+}
+
+static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
+{
+ struct rb_node **node = &(root->rb_node);
+ struct rb_node *parent = NULL;
+
+ while (*node) {
+ struct memtype *data = container_of(*node, struct memtype, rb);
+
+ parent = *node;
+ if (newdata->start <= data->start)
+ node = &((*node)->rb_left);
+ else if (newdata->start > data->start)
+ node = &((*node)->rb_right);
+ }
+
+ rb_link_node(&newdata->rb, parent, node);
+ rb_insert_color(&newdata->rb, root);
+}
+
+int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
+{
+ int err = 0;
+
+ err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end,
+ new->type, ret_type);
+
+ if (!err) {
+ if (ret_type)
+ new->type = *ret_type;
+
+ memtype_rb_insert(&memtype_rbroot, new);
+ }
+ return err;
+}
+
+int rbt_memtype_erase(u64 start, u64 end)
+{
+ struct memtype *data;
+
+ data = memtype_rb_exact_match(&memtype_rbroot, start, end);
+ if (!data)
+ return -EINVAL;
+
+ rb_erase(&data->rb, &memtype_rbroot);
+ return 0;
+}
+
+struct memtype *rbt_memtype_lookup(u64 addr)
+{
+ struct memtype *data;
+ data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE);
+ return data;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
+{
+ struct rb_node *node;
+ int i = 1;
+
+ node = rb_first(&memtype_rbroot);
+ while (node && pos != i) {
+ node = rb_next(node);
+ i++;
+ }
+
+ if (node) { /* pos == i */
+ struct memtype *this = container_of(node, struct memtype, rb);
+ *out = *this;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+#endif
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 28c68762648f..f9897f7a9ef1 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -363,6 +363,54 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
for (i = 0; i < MAX_NUMNODES; i++)
cutoff_node(i, start, end);
+ /*
+ * Join together blocks on the same node, holes between
+ * which don't overlap with memory on other nodes.
+ */
+ for (i = 0; i < num_node_memblks; ++i) {
+ int j, k;
+
+ for (j = i + 1; j < num_node_memblks; ++j) {
+ unsigned long start, end;
+
+ if (memblk_nodeid[i] != memblk_nodeid[j])
+ continue;
+ start = min(node_memblk_range[i].end,
+ node_memblk_range[j].end);
+ end = max(node_memblk_range[i].start,
+ node_memblk_range[j].start);
+ for (k = 0; k < num_node_memblks; ++k) {
+ if (memblk_nodeid[i] == memblk_nodeid[k])
+ continue;
+ if (start < node_memblk_range[k].end &&
+ end > node_memblk_range[k].start)
+ break;
+ }
+ if (k < num_node_memblks)
+ continue;
+ start = min(node_memblk_range[i].start,
+ node_memblk_range[j].start);
+ end = max(node_memblk_range[i].end,
+ node_memblk_range[j].end);
+ printk(KERN_INFO "SRAT: Node %d "
+ "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
+ memblk_nodeid[i],
+ node_memblk_range[i].start,
+ node_memblk_range[i].end,
+ node_memblk_range[j].start,
+ node_memblk_range[j].end,
+ start, end);
+ node_memblk_range[i].start = start;
+ node_memblk_range[i].end = end;
+ k = --num_node_memblks - j;
+ memmove(memblk_nodeid + j, memblk_nodeid + j+1,
+ k * sizeof(*memblk_nodeid));
+ memmove(node_memblk_range + j, node_memblk_range + j+1,
+ k * sizeof(*node_memblk_range));
+ --j;
+ }
+ }
+
memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
memblk_nodeid);
if (memnode_shift < 0) {
@@ -461,7 +509,8 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
* node, it must now point to the fake node ID.
*/
for (j = 0; j < MAX_LOCAL_APIC; j++)
- if (apicid_to_node[j] == nid)
+ if (apicid_to_node[j] == nid &&
+ fake_apicid_to_node[j] == NUMA_NO_NODE)
fake_apicid_to_node[j] = i;
}
for (i = 0; i < num_nodes; i++)
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 1fd17cfb956b..d769cda54082 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -238,11 +238,11 @@ static void arch_perfmon_setup_counters(void)
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
current_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
- eax.split.num_events = 2;
+ eax.split.num_counters = 2;
eax.split.bit_width = 40;
}
- num_counters = eax.split.num_events;
+ num_counters = eax.split.num_counters;
op_arch_perfmon_spec.num_counters = num_counters;
op_arch_perfmon_spec.num_controls = num_counters;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 31930fd30ea9..9dcf43d7d0c0 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -224,8 +224,11 @@ res_alloc_fail:
return;
}
-struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum)
+struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
{
+ struct acpi_device *device = root->device;
+ int domain = root->segment;
+ int busnum = root->secondary.start;
struct pci_bus *bus;
struct pci_sysdata *sd;
int node;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index cf2e93869c48..215a27ae050d 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -76,7 +76,7 @@ struct pci_ops pci_root_ops = {
* This interrupt-safe spinlock protects all accesses to PCI
* configuration space.
*/
-DEFINE_SPINLOCK(pci_config_lock);
+DEFINE_RAW_SPINLOCK(pci_config_lock);
static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
{
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
index 347d882b3bb3..bd33620b0071 100644
--- a/arch/x86/pci/direct.c
+++ b/arch/x86/pci/direct.c
@@ -27,7 +27,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus,
return -EINVAL;
}
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);
@@ -43,7 +43,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -56,7 +56,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
if ((bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);
@@ -72,7 +72,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -108,7 +108,7 @@ static int pci_conf2_read(unsigned int seg, unsigned int bus,
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
@@ -127,7 +127,7 @@ static int pci_conf2_read(unsigned int seg, unsigned int bus,
outb(0, 0xCF8);
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -147,7 +147,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
@@ -166,7 +166,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
outb(0, 0xCF8);
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 5d362b5ba06f..9810a0f76c91 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -589,8 +589,6 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
- case PCI_DEVICE_ID_INTEL_CPT_LPC1:
- case PCI_DEVICE_ID_INTEL_CPT_LPC2:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
@@ -605,6 +603,13 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
return 1;
}
+ if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) &&
+ (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) {
+ r->name = "PIIX/ICH";
+ r->get = pirq_piix_get;
+ r->set = pirq_piix_set;
+ return 1;
+ }
return 0;
}
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index 0db5eaf54560..8d460eaf524f 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -11,28 +11,14 @@
*/
static void __devinit pcibios_fixup_peer_bridges(void)
{
- int n, devfn;
- long node;
+ int n;
if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff)
return;
DBG("PCI: Peer bridge fixup\n");
- for (n=0; n <= pcibios_last_bus; n++) {
- u32 l;
- if (pci_find_bus(0, n))
- continue;
- node = get_mp_bus_to_node(n);
- for (devfn = 0; devfn < 256; devfn += 8) {
- if (!raw_pci_read(0, n, devfn, PCI_VENDOR_ID, 2, &l) &&
- l != 0x0000 && l != 0xffff) {
- DBG("Found device at %02x:%02x [%04x]\n", n, devfn, l);
- printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n);
- pci_scan_bus_on_node(n, &pci_root_ops, node);
- break;
- }
- }
- }
+ for (n=0; n <= pcibios_last_bus; n++)
+ pcibios_scan_specific_bus(n);
}
int __init pci_legacy_init(void)
@@ -50,6 +36,28 @@ int __init pci_legacy_init(void)
return 0;
}
+void pcibios_scan_specific_bus(int busn)
+{
+ int devfn;
+ long node;
+ u32 l;
+
+ if (pci_find_bus(0, busn))
+ return;
+
+ node = get_mp_bus_to_node(busn);
+ for (devfn = 0; devfn < 256; devfn += 8) {
+ if (!raw_pci_read(0, busn, devfn, PCI_VENDOR_ID, 2, &l) &&
+ l != 0x0000 && l != 0xffff) {
+ DBG("Found device at %02x:%02x [%04x]\n", busn, devfn, l);
+ printk(KERN_INFO "PCI: Discovered peer bus %02x\n", busn);
+ pci_scan_bus_on_node(busn, &pci_root_ops, node);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(pcibios_scan_specific_bus);
+
int __init pci_subsys_init(void)
{
/*
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index 90d5fd476ed4..a3d9c54792ae 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -64,7 +64,7 @@ err: *value = -1;
if (!base)
goto err;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_exp_set_dev_base(base, bus, devfn);
@@ -79,7 +79,7 @@ err: *value = -1;
*value = mmio_config_readl(mmcfg_virt_addr + reg);
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -97,7 +97,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
if (!base)
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_exp_set_dev_base(base, bus, devfn);
@@ -112,7 +112,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
mmio_config_writel(mmcfg_virt_addr + reg, value);
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index 8223738ad806..5c9e2458df4e 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -37,7 +37,7 @@ static int pci_conf1_mq_read(unsigned int seg, unsigned int bus,
if (!value || (bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
write_cf8(bus, devfn, reg);
@@ -62,7 +62,7 @@ static int pci_conf1_mq_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -76,7 +76,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
if ((bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
write_cf8(bus, devfn, reg);
@@ -101,7 +101,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index 59a225c17b84..2492d165096a 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -162,7 +162,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
@@ -213,7 +213,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
@@ -228,7 +228,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
if ((bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
@@ -269,7 +269,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 32764b8880b5..b3c6c59ed302 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -476,6 +476,7 @@ void xen_timer_resume(void)
__init void xen_time_init(void)
{
int cpu = smp_processor_id();
+ struct timespec tp;
clocksource_register(&xen_clocksource);
@@ -487,9 +488,8 @@ __init void xen_time_init(void)
}
/* Set initial system time with full resolution */
- xen_read_wallclock(&xtime);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+ xen_read_wallclock(&tp);
+ do_settimeofday(&tp);
setup_force_cpu_cap(X86_FEATURE_TSC);
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 19f7df30937f..19df764f6399 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -60,11 +60,6 @@ static struct irqaction timer_irqaction = {
void __init time_init(void)
{
- /* FIXME: xtime&wall_to_monotonic are set in timekeeping_init. */
- read_persistent_clock(&xtime);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
platform_calibrate_ccount();
diff --git a/block/Kconfig b/block/Kconfig
index f9e89f4d94bb..9be0b56eaee1 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -77,29 +77,6 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
-config BLK_CGROUP
- tristate "Block cgroup support"
- depends on CGROUPS
- depends on CFQ_GROUP_IOSCHED
- default n
- ---help---
- Generic block IO controller cgroup interface. This is the common
- cgroup interface which should be used by various IO controlling
- policies.
-
- Currently, CFQ IO scheduler uses it to recognize task groups and
- control disk bandwidth allocation (proportional time slice allocation)
- to such task groups.
-
-config DEBUG_BLK_CGROUP
- bool
- depends on BLK_CGROUP
- default n
- ---help---
- Enable some debugging help. Currently it stores the cgroup path
- in the blk group which can be used by cfq for tracing various
- group related activity.
-
endif # BLOCK
config BLOCK_COMPAT
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index fc71cf071fb2..3199b76f795d 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,7 +23,8 @@ config IOSCHED_DEADLINE
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
- select BLK_CGROUP if CFQ_GROUP_IOSCHED
+ # If BLK_CGROUP is a module, CFQ has to be built as module.
+ depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -33,22 +34,15 @@ config IOSCHED_CFQ
This is the default I/O scheduler.
+ Note: If BLK_CGROUP=m, then CFQ can be built only as module.
+
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
- depends on IOSCHED_CFQ && CGROUPS
+ depends on IOSCHED_CFQ && BLK_CGROUP
default n
---help---
Enable group IO scheduling in CFQ.
-config DEBUG_CFQ_IOSCHED
- bool "Debug CFQ Scheduling"
- depends on CFQ_GROUP_IOSCHED
- select DEBUG_BLK_CGROUP
- default n
- ---help---
- Enable CFQ IO scheduling debugging in CFQ. Currently it makes
- blktrace output more verbose.
-
choice
prompt "Default I/O scheduler"
default DEFAULT_CFQ
diff --git a/block/Makefile b/block/Makefile
index cb2d515ebd6e..0bb499a739cd 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-iopoll.o ioctl.o genhd.o scsi_ioctl.o
+ blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6d88544b677f..0d710c9d403b 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -286,26 +286,31 @@ static void bio_end_empty_barrier(struct bio *bio, int err)
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
}
-
- complete(bio->bi_private);
+ if (bio->bi_private)
+ complete(bio->bi_private);
+ bio_put(bio);
}
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
+ * @gfp_mask: memory allocation flags (for bio_alloc)
* @error_sector: error sector
+ * @flags: BLKDEV_IFL_* flags to control behaviour
*
* Description:
* Issue a flush for the block device in question. Caller can supply
* room for storing the error offset in case of a flush error, if they
- * wish to.
+ * wish to. If WAIT flag is not passed then caller may check only what
+ * request was pushed in some internal queue for later handling.
*/
-int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+ sector_t *error_sector, unsigned long flags)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
struct bio *bio;
- int ret;
+ int ret = 0;
if (bdev->bd_disk == NULL)
return -ENXIO;
@@ -314,23 +319,25 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
if (!q)
return -ENXIO;
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(gfp_mask, 0);
bio->bi_end_io = bio_end_empty_barrier;
- bio->bi_private = &wait;
bio->bi_bdev = bdev;
- submit_bio(WRITE_BARRIER, bio);
-
- wait_for_completion(&wait);
+ if (test_bit(BLKDEV_WAIT, &flags))
+ bio->bi_private = &wait;
- /*
- * The driver must store the error location in ->bi_sector, if
- * it supports it. For non-stacked drivers, this should be copied
- * from blk_rq_pos(rq).
- */
- if (error_sector)
- *error_sector = bio->bi_sector;
+ bio_get(bio);
+ submit_bio(WRITE_BARRIER, bio);
+ if (test_bit(BLKDEV_WAIT, &flags)) {
+ wait_for_completion(&wait);
+ /*
+ * The driver must store the error location in ->bi_sector, if
+ * it supports it. For non-stacked drivers, this should be
+ * copied from blk_rq_pos(rq).
+ */
+ if (error_sector)
+ *error_sector = bio->bi_sector;
+ }
- ret = 0;
if (bio_flagged(bio, BIO_EOPNOTSUPP))
ret = -EOPNOTSUPP;
else if (!bio_flagged(bio, BIO_UPTODATE))
@@ -340,107 +347,3 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
-
-static void blkdev_discard_end_io(struct bio *bio, int err)
-{
- if (err) {
- if (err == -EOPNOTSUPP)
- set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- }
-
- if (bio->bi_private)
- complete(bio->bi_private);
- __free_page(bio_page(bio));
-
- bio_put(bio);
-}
-
-/**
- * blkdev_issue_discard - queue a discard
- * @bdev: blockdev to issue discard for
- * @sector: start sector
- * @nr_sects: number of sectors to discard
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @flags: DISCARD_FL_* flags to control behaviour
- *
- * Description:
- * Issue a discard request for the sectors in question.
- */
-int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int flags)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- struct request_queue *q = bdev_get_queue(bdev);
- int type = flags & DISCARD_FL_BARRIER ?
- DISCARD_BARRIER : DISCARD_NOBARRIER;
- struct bio *bio;
- struct page *page;
- int ret = 0;
-
- if (!q)
- return -ENXIO;
-
- if (!blk_queue_discard(q))
- return -EOPNOTSUPP;
-
- while (nr_sects && !ret) {
- unsigned int sector_size = q->limits.logical_block_size;
- unsigned int max_discard_sectors =
- min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-
- bio = bio_alloc(gfp_mask, 1);
- if (!bio)
- goto out;
- bio->bi_sector = sector;
- bio->bi_end_io = blkdev_discard_end_io;
- bio->bi_bdev = bdev;
- if (flags & DISCARD_FL_WAIT)
- bio->bi_private = &wait;
-
- /*
- * Add a zeroed one-sector payload as that's what
- * our current implementations need. If we'll ever need
- * more the interface will need revisiting.
- */
- page = alloc_page(gfp_mask | __GFP_ZERO);
- if (!page)
- goto out_free_bio;
- if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
- goto out_free_page;
-
- /*
- * And override the bio size - the way discard works we
- * touch many more blocks on disk than the actual payload
- * length.
- */
- if (nr_sects > max_discard_sectors) {
- bio->bi_size = max_discard_sectors << 9;
- nr_sects -= max_discard_sectors;
- sector += max_discard_sectors;
- } else {
- bio->bi_size = nr_sects << 9;
- nr_sects = 0;
- }
-
- bio_get(bio);
- submit_bio(type, bio);
-
- if (flags & DISCARD_FL_WAIT)
- wait_for_completion(&wait);
-
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
- else if (!bio_flagged(bio, BIO_UPTODATE))
- ret = -EIO;
- bio_put(bio);
- }
- return ret;
-out_free_page:
- __free_page(page);
-out_free_bio:
- bio_put(bio);
-out:
- return -ENOMEM;
-}
-EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 2cc682b860ea..a6809645d212 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -15,8 +15,12 @@
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/blkdev.h>
#include <linux/slab.h>
#include "blk-cgroup.h"
+#include <linux/genhd.h>
+
+#define MAX_KEY_LEN 100
static DEFINE_SPINLOCK(blkio_list_lock);
static LIST_HEAD(blkio_list);
@@ -49,6 +53,32 @@ struct cgroup_subsys blkio_subsys = {
};
EXPORT_SYMBOL_GPL(blkio_subsys);
+static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
+ struct blkio_policy_node *pn)
+{
+ list_add(&pn->node, &blkcg->policy_list);
+}
+
+/* Must be called with blkcg->lock held */
+static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
+{
+ list_del(&pn->node);
+}
+
+/* Must be called with blkcg->lock held */
+static struct blkio_policy_node *
+blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
+{
+ struct blkio_policy_node *pn;
+
+ list_for_each_entry(pn, &blkcg->policy_list, node) {
+ if (pn->dev == dev)
+ return pn;
+ }
+
+ return NULL;
+}
+
struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
@@ -56,13 +86,259 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
}
EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
-void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
- unsigned long time, unsigned long sectors)
+/*
+ * Add to the appropriate stat variable depending on the request type.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
+ bool sync)
+{
+ if (direction)
+ stat[BLKIO_STAT_WRITE] += add;
+ else
+ stat[BLKIO_STAT_READ] += add;
+ if (sync)
+ stat[BLKIO_STAT_SYNC] += add;
+ else
+ stat[BLKIO_STAT_ASYNC] += add;
+}
+
+/*
+ * Decrements the appropriate stat variable if non-zero depending on the
+ * request type. Panics on value being zero.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
+{
+ if (direction) {
+ BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
+ stat[BLKIO_STAT_WRITE]--;
+ } else {
+ BUG_ON(stat[BLKIO_STAT_READ] == 0);
+ stat[BLKIO_STAT_READ]--;
+ }
+ if (sync) {
+ BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
+ stat[BLKIO_STAT_SYNC]--;
+ } else {
+ BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
+ stat[BLKIO_STAT_ASYNC]--;
+ }
+}
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg)
+{
+ if (blkio_blkg_waiting(&blkg->stats))
+ return;
+ if (blkg == curr_blkg)
+ return;
+ blkg->stats.start_group_wait_time = sched_clock();
+ blkio_mark_blkg_waiting(&blkg->stats);
+}
+
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
+{
+ unsigned long long now;
+
+ if (!blkio_blkg_waiting(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_group_wait_time))
+ stats->group_wait_time += now - stats->start_group_wait_time;
+ blkio_clear_blkg_waiting(stats);
+}
+
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_end_empty_time(struct blkio_group_stats *stats)
+{
+ unsigned long long now;
+
+ if (!blkio_blkg_empty(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_empty_time))
+ stats->empty_time += now - stats->start_empty_time;
+ blkio_clear_blkg_empty(stats);
+}
+
+void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ BUG_ON(blkio_blkg_idling(&blkg->stats));
+ blkg->stats.start_idle_time = sched_clock();
+ blkio_mark_blkg_idling(&blkg->stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
+
+void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ unsigned long long now;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ if (blkio_blkg_idling(stats)) {
+ now = sched_clock();
+ if (time_after64(now, stats->start_idle_time))
+ stats->idle_time += now - stats->start_idle_time;
+ blkio_clear_blkg_idling(stats);
+ }
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
+
+void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ stats->avg_queue_size_sum +=
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
+ stats->avg_queue_size_samples++;
+ blkio_update_group_wait_time(stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
+
+void blkiocg_set_start_empty_time(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+
+ if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ return;
+ }
+
+ /*
+ * group is already marked empty. This can happen if cfqq got new
+ * request in parent group and moved to this group while being added
+ * to service tree. Just ignore the event and move on.
+ */
+ if(blkio_blkg_empty(stats)) {
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ return;
+ }
+
+ stats->start_empty_time = sched_clock();
+ blkio_mark_blkg_empty(stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
+
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue)
+{
+ blkg->stats.dequeue += dequeue;
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
+#else
+static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg) {}
+static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
+#endif
+
+void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction,
+ bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
+ sync);
+ blkio_end_empty_time(&blkg->stats);
+ blkio_set_start_group_wait_time(blkg, curr_blkg);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
+
+void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
+ direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
+
+void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkg->stats.time += time;
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ uint64_t bytes, bool direction, bool sync)
{
- blkg->time += time;
- blkg->sectors += sectors;
+ struct blkio_group_stats *stats;
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ stats->sectors += bytes >> 9;
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
+ sync);
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
+ direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
}
-EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
+EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
+
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
+{
+ struct blkio_group_stats *stats;
+ unsigned long flags;
+ unsigned long long now = sched_clock();
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ if (time_after64(now, io_start_time))
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
+ now - io_start_time, direction, sync);
+ if (time_after64(io_start_time, start_time))
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
+ io_start_time - start_time, direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
+
+void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
+ bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
+ sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev)
@@ -70,14 +346,13 @@ void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
unsigned long flags;
spin_lock_irqsave(&blkcg->lock, flags);
+ spin_lock_init(&blkg->stats_lock);
rcu_assign_pointer(blkg->key, key);
blkg->blkcg_id = css_id(&blkcg->css);
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
spin_unlock_irqrestore(&blkcg->lock, flags);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
/* Need to take css reference ? */
cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
-#endif
blkg->dev = dev;
}
EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
@@ -101,17 +376,16 @@ int blkiocg_del_blkio_group(struct blkio_group *blkg)
rcu_read_lock();
css = css_lookup(&blkio_subsys, blkg->blkcg_id);
- if (!css)
- goto out;
-
- blkcg = container_of(css, struct blkio_cgroup, css);
- spin_lock_irqsave(&blkcg->lock, flags);
- if (!hlist_unhashed(&blkg->blkcg_node)) {
- __blkiocg_del_blkio_group(blkg);
- ret = 0;
+ if (css) {
+ blkcg = container_of(css, struct blkio_cgroup, css);
+ spin_lock_irqsave(&blkcg->lock, flags);
+ if (!hlist_unhashed(&blkg->blkcg_node)) {
+ __blkiocg_del_blkio_group(blkg);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&blkcg->lock, flags);
}
- spin_unlock_irqrestore(&blkcg->lock, flags);
-out:
+
rcu_read_unlock();
return ret;
}
@@ -154,6 +428,7 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
struct blkio_group *blkg;
struct hlist_node *n;
struct blkio_policy_type *blkiop;
+ struct blkio_policy_node *pn;
if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
return -EINVAL;
@@ -162,7 +437,13 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
spin_lock(&blkio_list_lock);
spin_lock_irq(&blkcg->lock);
blkcg->weight = (unsigned int)val;
+
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ pn = blkio_policy_search_node(blkcg, blkg->dev);
+
+ if (pn)
+ continue;
+
list_for_each_entry(blkiop, &blkio_list, list)
blkiop->ops.blkio_update_group_weight_fn(blkg,
blkcg->weight);
@@ -172,13 +453,154 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
return 0;
}
-#define SHOW_FUNCTION_PER_GROUP(__VAR) \
+static int
+blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
+{
+ struct blkio_cgroup *blkcg;
+ struct blkio_group *blkg;
+ struct blkio_group_stats *stats;
+ struct hlist_node *n;
+ uint64_t queued[BLKIO_STAT_TOTAL];
+ int i;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ bool idling, waiting, empty;
+ unsigned long long now = sched_clock();
+#endif
+
+ blkcg = cgroup_to_blkio_cgroup(cgroup);
+ spin_lock_irq(&blkcg->lock);
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ spin_lock(&blkg->stats_lock);
+ stats = &blkg->stats;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ idling = blkio_blkg_idling(stats);
+ waiting = blkio_blkg_waiting(stats);
+ empty = blkio_blkg_empty(stats);
+#endif
+ for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+ queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
+ memset(stats, 0, sizeof(struct blkio_group_stats));
+ for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+ stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ if (idling) {
+ blkio_mark_blkg_idling(stats);
+ stats->start_idle_time = now;
+ }
+ if (waiting) {
+ blkio_mark_blkg_waiting(stats);
+ stats->start_group_wait_time = now;
+ }
+ if (empty) {
+ blkio_mark_blkg_empty(stats);
+ stats->start_empty_time = now;
+ }
+#endif
+ spin_unlock(&blkg->stats_lock);
+ }
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
+}
+
+static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
+ int chars_left, bool diskname_only)
+{
+ snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
+ chars_left -= strlen(str);
+ if (chars_left <= 0) {
+ printk(KERN_WARNING
+ "Possibly incorrect cgroup stat display format");
+ return;
+ }
+ if (diskname_only)
+ return;
+ switch (type) {
+ case BLKIO_STAT_READ:
+ strlcat(str, " Read", chars_left);
+ break;
+ case BLKIO_STAT_WRITE:
+ strlcat(str, " Write", chars_left);
+ break;
+ case BLKIO_STAT_SYNC:
+ strlcat(str, " Sync", chars_left);
+ break;
+ case BLKIO_STAT_ASYNC:
+ strlcat(str, " Async", chars_left);
+ break;
+ case BLKIO_STAT_TOTAL:
+ strlcat(str, " Total", chars_left);
+ break;
+ default:
+ strlcat(str, " Invalid", chars_left);
+ }
+}
+
+static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
+ struct cgroup_map_cb *cb, dev_t dev)
+{
+ blkio_get_key_name(0, dev, str, chars_left, true);
+ cb->fill(cb, str, val);
+ return val;
+}
+
+/* This should be called with blkg->stats_lock held */
+static uint64_t blkio_get_stat(struct blkio_group *blkg,
+ struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
+{
+ uint64_t disk_total;
+ char key_str[MAX_KEY_LEN];
+ enum stat_sub_type sub_type;
+
+ if (type == BLKIO_STAT_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.time, cb, dev);
+ if (type == BLKIO_STAT_SECTORS)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.sectors, cb, dev);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
+ uint64_t sum = blkg->stats.avg_queue_size_sum;
+ uint64_t samples = blkg->stats.avg_queue_size_samples;
+ if (samples)
+ do_div(sum, samples);
+ else
+ sum = 0;
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
+ }
+ if (type == BLKIO_STAT_GROUP_WAIT_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.group_wait_time, cb, dev);
+ if (type == BLKIO_STAT_IDLE_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.idle_time, cb, dev);
+ if (type == BLKIO_STAT_EMPTY_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.empty_time, cb, dev);
+ if (type == BLKIO_STAT_DEQUEUE)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.dequeue, cb, dev);
+#endif
+
+ for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
+ sub_type++) {
+ blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
+ cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
+ }
+ disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
+ blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
+ blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
+ cb->fill(cb, key_str, disk_total);
+ return disk_total;
+}
+
+#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
- struct cftype *cftype, struct seq_file *m) \
+ struct cftype *cftype, struct cgroup_map_cb *cb) \
{ \
struct blkio_cgroup *blkcg; \
struct blkio_group *blkg; \
struct hlist_node *n; \
+ uint64_t cgroup_total = 0; \
\
if (!cgroup_lock_live_group(cgroup)) \
return -ENODEV; \
@@ -186,50 +608,293 @@ static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
blkcg = cgroup_to_blkio_cgroup(cgroup); \
rcu_read_lock(); \
hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
- if (blkg->dev) \
- seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
- MINOR(blkg->dev), blkg->__VAR); \
+ if (blkg->dev) { \
+ spin_lock_irq(&blkg->stats_lock); \
+ cgroup_total += blkio_get_stat(blkg, cb, \
+ blkg->dev, type); \
+ spin_unlock_irq(&blkg->stats_lock); \
+ } \
} \
+ if (show_total) \
+ cb->fill(cb, "Total", cgroup_total); \
rcu_read_unlock(); \
cgroup_unlock(); \
return 0; \
}
-SHOW_FUNCTION_PER_GROUP(time);
-SHOW_FUNCTION_PER_GROUP(sectors);
+SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
+SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
+SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
+SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
+SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
+SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
+SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
#ifdef CONFIG_DEBUG_BLK_CGROUP
-SHOW_FUNCTION_PER_GROUP(dequeue);
+SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
+SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
+SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
#endif
#undef SHOW_FUNCTION_PER_GROUP
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
+static int blkio_check_dev_num(dev_t dev)
{
- blkg->dequeue += dequeue;
+ int part = 0;
+ struct gendisk *disk;
+
+ disk = get_gendisk(dev, &part);
+ if (!disk || part)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int blkio_policy_parse_and_set(char *buf,
+ struct blkio_policy_node *newpn)
+{
+ char *s[4], *p, *major_s = NULL, *minor_s = NULL;
+ int ret;
+ unsigned long major, minor, temp;
+ int i = 0;
+ dev_t dev;
+
+ memset(s, 0, sizeof(s));
+
+ while ((p = strsep(&buf, " ")) != NULL) {
+ if (!*p)
+ continue;
+
+ s[i++] = p;
+
+ /* Prevent from inputing too many things */
+ if (i == 3)
+ break;
+ }
+
+ if (i != 2)
+ return -EINVAL;
+
+ p = strsep(&s[0], ":");
+ if (p != NULL)
+ major_s = p;
+ else
+ return -EINVAL;
+
+ minor_s = s[0];
+ if (!minor_s)
+ return -EINVAL;
+
+ ret = strict_strtoul(major_s, 10, &major);
+ if (ret)
+ return -EINVAL;
+
+ ret = strict_strtoul(minor_s, 10, &minor);
+ if (ret)
+ return -EINVAL;
+
+ dev = MKDEV(major, minor);
+
+ ret = blkio_check_dev_num(dev);
+ if (ret)
+ return ret;
+
+ newpn->dev = dev;
+
+ if (s[1] == NULL)
+ return -EINVAL;
+
+ ret = strict_strtoul(s[1], 10, &temp);
+ if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
+ temp > BLKIO_WEIGHT_MAX)
+ return -EINVAL;
+
+ newpn->weight = temp;
+
+ return 0;
+}
+
+unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
+ dev_t dev)
+{
+ struct blkio_policy_node *pn;
+
+ pn = blkio_policy_search_node(blkcg, dev);
+ if (pn)
+ return pn->weight;
+ else
+ return blkcg->weight;
+}
+EXPORT_SYMBOL_GPL(blkcg_get_weight);
+
+
+static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
+ const char *buffer)
+{
+ int ret = 0;
+ char *buf;
+ struct blkio_policy_node *newpn, *pn;
+ struct blkio_cgroup *blkcg;
+ struct blkio_group *blkg;
+ int keep_newpn = 0;
+ struct hlist_node *n;
+ struct blkio_policy_type *blkiop;
+
+ buf = kstrdup(buffer, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
+ if (!newpn) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ ret = blkio_policy_parse_and_set(buf, newpn);
+ if (ret)
+ goto free_newpn;
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+ spin_lock_irq(&blkcg->lock);
+
+ pn = blkio_policy_search_node(blkcg, newpn->dev);
+ if (!pn) {
+ if (newpn->weight != 0) {
+ blkio_policy_insert_node(blkcg, newpn);
+ keep_newpn = 1;
+ }
+ spin_unlock_irq(&blkcg->lock);
+ goto update_io_group;
+ }
+
+ if (newpn->weight == 0) {
+ /* weight == 0 means deleteing a specific weight */
+ blkio_policy_delete_node(pn);
+ spin_unlock_irq(&blkcg->lock);
+ goto update_io_group;
+ }
+ spin_unlock_irq(&blkcg->lock);
+
+ pn->weight = newpn->weight;
+
+update_io_group:
+ /* update weight for each cfqg */
+ spin_lock(&blkio_list_lock);
+ spin_lock_irq(&blkcg->lock);
+
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ if (newpn->dev == blkg->dev) {
+ list_for_each_entry(blkiop, &blkio_list, list)
+ blkiop->ops.blkio_update_group_weight_fn(blkg,
+ newpn->weight ?
+ newpn->weight :
+ blkcg->weight);
+ }
+ }
+
+ spin_unlock_irq(&blkcg->lock);
+ spin_unlock(&blkio_list_lock);
+
+free_newpn:
+ if (!keep_newpn)
+ kfree(newpn);
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *m)
+{
+ struct blkio_cgroup *blkcg;
+ struct blkio_policy_node *pn;
+
+ seq_printf(m, "dev\tweight\n");
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+ if (!list_empty(&blkcg->policy_list)) {
+ spin_lock_irq(&blkcg->lock);
+ list_for_each_entry(pn, &blkcg->policy_list, node) {
+ seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
+ MINOR(pn->dev), pn->weight);
+ }
+ spin_unlock_irq(&blkcg->lock);
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
-#endif
struct cftype blkio_files[] = {
{
+ .name = "weight_device",
+ .read_seq_string = blkiocg_weight_device_read,
+ .write_string = blkiocg_weight_device_write,
+ .max_write_len = 256,
+ },
+ {
.name = "weight",
.read_u64 = blkiocg_weight_read,
.write_u64 = blkiocg_weight_write,
},
{
.name = "time",
- .read_seq_string = blkiocg_time_read,
+ .read_map = blkiocg_time_read,
},
{
.name = "sectors",
- .read_seq_string = blkiocg_sectors_read,
+ .read_map = blkiocg_sectors_read,
+ },
+ {
+ .name = "io_service_bytes",
+ .read_map = blkiocg_io_service_bytes_read,
+ },
+ {
+ .name = "io_serviced",
+ .read_map = blkiocg_io_serviced_read,
+ },
+ {
+ .name = "io_service_time",
+ .read_map = blkiocg_io_service_time_read,
+ },
+ {
+ .name = "io_wait_time",
+ .read_map = blkiocg_io_wait_time_read,
+ },
+ {
+ .name = "io_merged",
+ .read_map = blkiocg_io_merged_read,
+ },
+ {
+ .name = "io_queued",
+ .read_map = blkiocg_io_queued_read,
+ },
+ {
+ .name = "reset_stats",
+ .write_u64 = blkiocg_reset_stats,
},
#ifdef CONFIG_DEBUG_BLK_CGROUP
- {
+ {
+ .name = "avg_queue_size",
+ .read_map = blkiocg_avg_queue_size_read,
+ },
+ {
+ .name = "group_wait_time",
+ .read_map = blkiocg_group_wait_time_read,
+ },
+ {
+ .name = "idle_time",
+ .read_map = blkiocg_idle_time_read,
+ },
+ {
+ .name = "empty_time",
+ .read_map = blkiocg_empty_time_read,
+ },
+ {
.name = "dequeue",
- .read_seq_string = blkiocg_dequeue_read,
- },
+ .read_map = blkiocg_dequeue_read,
+ },
#endif
};
@@ -246,37 +911,42 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
struct blkio_group *blkg;
void *key;
struct blkio_policy_type *blkiop;
+ struct blkio_policy_node *pn, *pntmp;
rcu_read_lock();
-remove_entry:
- spin_lock_irqsave(&blkcg->lock, flags);
+ do {
+ spin_lock_irqsave(&blkcg->lock, flags);
+
+ if (hlist_empty(&blkcg->blkg_list)) {
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+ break;
+ }
+
+ blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
+ blkcg_node);
+ key = rcu_dereference(blkg->key);
+ __blkiocg_del_blkio_group(blkg);
- if (hlist_empty(&blkcg->blkg_list)) {
spin_unlock_irqrestore(&blkcg->lock, flags);
- goto done;
- }
- blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
- blkcg_node);
- key = rcu_dereference(blkg->key);
- __blkiocg_del_blkio_group(blkg);
+ /*
+ * This blkio_group is being unlinked as associated cgroup is
+ * going away. Let all the IO controlling policies know about
+ * this event. Currently this is static call to one io
+ * controlling policy. Once we have more policies in place, we
+ * need some dynamic registration of callback function.
+ */
+ spin_lock(&blkio_list_lock);
+ list_for_each_entry(blkiop, &blkio_list, list)
+ blkiop->ops.blkio_unlink_group_fn(key, blkg);
+ spin_unlock(&blkio_list_lock);
+ } while (1);
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
+ blkio_policy_delete_node(pn);
+ kfree(pn);
+ }
- /*
- * This blkio_group is being unlinked as associated cgroup is going
- * away. Let all the IO controlling policies know about this event.
- *
- * Currently this is static call to one io controlling policy. Once
- * we have more policies in place, we need some dynamic registration
- * of callback function.
- */
- spin_lock(&blkio_list_lock);
- list_for_each_entry(blkiop, &blkio_list, list)
- blkiop->ops.blkio_unlink_group_fn(key, blkg);
- spin_unlock(&blkio_list_lock);
- goto remove_entry;
-done:
free_css_id(&blkio_subsys, &blkcg->css);
rcu_read_unlock();
if (blkcg != &blkio_root_cgroup)
@@ -307,6 +977,7 @@ done:
spin_lock_init(&blkcg->lock);
INIT_HLIST_HEAD(&blkcg->blkg_list);
+ INIT_LIST_HEAD(&blkcg->policy_list);
return &blkcg->css;
}
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 8ccc20464dae..2b866ec1dcea 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -23,11 +23,84 @@ extern struct cgroup_subsys blkio_subsys;
#define blkio_subsys_id blkio_subsys.subsys_id
#endif
+enum stat_type {
+ /* Total time spent (in ns) between request dispatch to the driver and
+ * request completion for IOs doen by this cgroup. This may not be
+ * accurate when NCQ is turned on. */
+ BLKIO_STAT_SERVICE_TIME = 0,
+ /* Total bytes transferred */
+ BLKIO_STAT_SERVICE_BYTES,
+ /* Total IOs serviced, post merge */
+ BLKIO_STAT_SERVICED,
+ /* Total time spent waiting in scheduler queue in ns */
+ BLKIO_STAT_WAIT_TIME,
+ /* Number of IOs merged */
+ BLKIO_STAT_MERGED,
+ /* Number of IOs queued up */
+ BLKIO_STAT_QUEUED,
+ /* All the single valued stats go below this */
+ BLKIO_STAT_TIME,
+ BLKIO_STAT_SECTORS,
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ BLKIO_STAT_AVG_QUEUE_SIZE,
+ BLKIO_STAT_IDLE_TIME,
+ BLKIO_STAT_EMPTY_TIME,
+ BLKIO_STAT_GROUP_WAIT_TIME,
+ BLKIO_STAT_DEQUEUE
+#endif
+};
+
+enum stat_sub_type {
+ BLKIO_STAT_READ = 0,
+ BLKIO_STAT_WRITE,
+ BLKIO_STAT_SYNC,
+ BLKIO_STAT_ASYNC,
+ BLKIO_STAT_TOTAL
+};
+
+/* blkg state flags */
+enum blkg_state_flags {
+ BLKG_waiting = 0,
+ BLKG_idling,
+ BLKG_empty,
+};
+
struct blkio_cgroup {
struct cgroup_subsys_state css;
unsigned int weight;
spinlock_t lock;
struct hlist_head blkg_list;
+ struct list_head policy_list; /* list of blkio_policy_node */
+};
+
+struct blkio_group_stats {
+ /* total disk time and nr sectors dispatched by this group */
+ uint64_t time;
+ uint64_t sectors;
+ uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* Sum of number of IOs queued across all samples */
+ uint64_t avg_queue_size_sum;
+ /* Count of samples taken for average */
+ uint64_t avg_queue_size_samples;
+ /* How many times this group has been removed from service tree */
+ unsigned long dequeue;
+
+ /* Total time spent waiting for it to be assigned a timeslice. */
+ uint64_t group_wait_time;
+ uint64_t start_group_wait_time;
+
+ /* Time spent idling for this blkio_group */
+ uint64_t idle_time;
+ uint64_t start_idle_time;
+ /*
+ * Total time when we have requests queued and do not contain the
+ * current active queue.
+ */
+ uint64_t empty_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+#endif
};
struct blkio_group {
@@ -35,20 +108,25 @@ struct blkio_group {
void *key;
struct hlist_node blkcg_node;
unsigned short blkcg_id;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
/* Store cgroup path */
char path[128];
- /* How many times this group has been removed from service tree */
- unsigned long dequeue;
-#endif
/* The device MKDEV(major, minor), this group has been created for */
- dev_t dev;
+ dev_t dev;
- /* total disk time and nr sectors dispatched by this group */
- unsigned long time;
- unsigned long sectors;
+ /* Need to serialize the stats in the case of reset/update */
+ spinlock_t stats_lock;
+ struct blkio_group_stats stats;
};
+struct blkio_policy_node {
+ struct list_head node;
+ dev_t dev;
+ unsigned int weight;
+};
+
+extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
+ dev_t dev);
+
typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
unsigned int weight);
@@ -67,6 +145,11 @@ struct blkio_policy_type {
extern void blkio_policy_register(struct blkio_policy_type *);
extern void blkio_policy_unregister(struct blkio_policy_type *);
+static inline char *blkg_path(struct blkio_group *blkg)
+{
+ return blkg->path;
+}
+
#else
struct blkio_group {
@@ -78,6 +161,8 @@ struct blkio_policy_type {
static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
+static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
+
#endif
#define BLKIO_WEIGHT_MIN 100
@@ -85,16 +170,42 @@ static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
#define BLKIO_WEIGHT_DEFAULT 500
#ifdef CONFIG_DEBUG_BLK_CGROUP
-static inline char *blkg_path(struct blkio_group *blkg)
-{
- return blkg->path;
-}
-void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
+void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue);
+void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
+void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
+void blkiocg_set_start_empty_time(struct blkio_group *blkg);
+
+#define BLKG_FLAG_FNS(name) \
+static inline void blkio_mark_blkg_##name( \
+ struct blkio_group_stats *stats) \
+{ \
+ stats->flags |= (1 << BLKG_##name); \
+} \
+static inline void blkio_clear_blkg_##name( \
+ struct blkio_group_stats *stats) \
+{ \
+ stats->flags &= ~(1 << BLKG_##name); \
+} \
+static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
+{ \
+ return (stats->flags & (1 << BLKG_##name)) != 0; \
+} \
+
+BLKG_FLAG_FNS(waiting)
+BLKG_FLAG_FNS(idling)
+BLKG_FLAG_FNS(empty)
+#undef BLKG_FLAG_FNS
#else
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
-static inline void blkiocg_update_blkio_group_dequeue_stats(
- struct blkio_group *blkg, unsigned long dequeue) {}
+static inline void blkiocg_update_avg_queue_size_stats(
+ struct blkio_group *blkg) {}
+static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue) {}
+static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{}
+static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
+static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
#endif
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
@@ -105,26 +216,43 @@ extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
void *key);
-void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
- unsigned long time, unsigned long sectors);
+void blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ unsigned long time);
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
+ bool direction, bool sync);
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
+void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
+ bool sync);
+void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync);
+void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync);
#else
struct cgroup;
static inline struct blkio_cgroup *
cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev)
-{
-}
+ struct blkio_group *blkg, void *key, dev_t dev) {}
static inline int
blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
static inline struct blkio_group *
blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
- unsigned long time, unsigned long sectors)
-{
-}
+static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ unsigned long time) {}
+static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ uint64_t bytes, bool direction, bool sync) {}
+static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction,
+ bool sync) {}
+static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
+ bool direction, bool sync) {}
+static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync) {}
+static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync) {}
#endif
#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 9fe174dc74d1..fdfbc5a1213f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,6 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->tag = -1;
rq->ref_count = 1;
rq->start_time = jiffies;
+ set_start_time_ns(rq);
}
EXPORT_SYMBOL(blk_rq_init);
@@ -450,6 +451,7 @@ void blk_cleanup_queue(struct request_queue *q)
*/
blk_sync_queue(q);
+ del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);
@@ -486,6 +488,21 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_alloc_queue);
+static void laptop_mode_timer_fn(unsigned long data)
+{
+ struct request_queue *q = (struct request_queue *)data;
+ int nr_pages = global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS);
+
+ /*
+ * We want to write everything out, not just down to the dirty
+ * threshold
+ */
+
+ if (bdi_has_dirty_io(&q->backing_dev_info))
+ bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages);
+}
+
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
@@ -510,6 +527,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
return NULL;
}
+ setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+ laptop_mode_timer_fn, (unsigned long) q);
init_timer(&q->unplug_timer);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->timeout_list);
@@ -568,6 +587,22 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ return blk_init_allocated_queue_node(q, rfn, lock, node_id);
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+struct request_queue *
+blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ return blk_init_allocated_queue_node(q, rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_allocated_queue);
+
+struct request_queue *
+blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock, int node_id)
+{
if (!q)
return NULL;
@@ -601,7 +636,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
blk_put_queue(q);
return NULL;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue_node);
int blk_get_queue(struct request_queue *q)
{
@@ -1198,6 +1233,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
drive_stat_acct(req, 0);
+ elv_bio_merged(q, req, bio);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out;
@@ -1231,6 +1267,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
drive_stat_acct(req, 0);
+ elv_bio_merged(q, req, bio);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out;
@@ -1855,8 +1892,10 @@ void blk_dequeue_request(struct request *rq)
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
- if (blk_account_rq(rq))
+ if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]++;
+ set_io_start_time_ns(rq);
+ }
}
/**
@@ -2098,7 +2137,7 @@ static void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && blk_fs_request(req))
- laptop_io_completion();
+ laptop_io_completion(&req->q->backing_dev_info);
blk_delete_timer(req);
@@ -2517,4 +2556,3 @@ int __init blk_dev_init(void)
return 0;
}
-
diff --git a/block/blk-lib.c b/block/blk-lib.c
new file mode 100644
index 000000000000..d0216b9f22d4
--- /dev/null
+++ b/block/blk-lib.c
@@ -0,0 +1,233 @@
+/*
+ * Functions related to generic helpers functions
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+
+#include "blk.h"
+
+static void blkdev_discard_end_io(struct bio *bio, int err)
+{
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
+
+ if (bio->bi_private)
+ complete(bio->bi_private);
+ __free_page(bio_page(bio));
+
+ bio_put(bio);
+}
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ * Issue a discard request for the sectors in question.
+ */
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int type = flags & BLKDEV_IFL_BARRIER ?
+ DISCARD_BARRIER : DISCARD_NOBARRIER;
+ struct bio *bio;
+ struct page *page;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
+ while (nr_sects && !ret) {
+ unsigned int sector_size = q->limits.logical_block_size;
+ unsigned int max_discard_sectors =
+ min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio)
+ goto out;
+ bio->bi_sector = sector;
+ bio->bi_end_io = blkdev_discard_end_io;
+ bio->bi_bdev = bdev;
+ if (flags & BLKDEV_IFL_WAIT)
+ bio->bi_private = &wait;
+
+ /*
+ * Add a zeroed one-sector payload as that's what
+ * our current implementations need. If we'll ever need
+ * more the interface will need revisiting.
+ */
+ page = alloc_page(gfp_mask | __GFP_ZERO);
+ if (!page)
+ goto out_free_bio;
+ if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
+ goto out_free_page;
+
+ /*
+ * And override the bio size - the way discard works we
+ * touch many more blocks on disk than the actual payload
+ * length.
+ */
+ if (nr_sects > max_discard_sectors) {
+ bio->bi_size = max_discard_sectors << 9;
+ nr_sects -= max_discard_sectors;
+ sector += max_discard_sectors;
+ } else {
+ bio->bi_size = nr_sects << 9;
+ nr_sects = 0;
+ }
+
+ bio_get(bio);
+ submit_bio(type, bio);
+
+ if (flags & BLKDEV_IFL_WAIT)
+ wait_for_completion(&wait);
+
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+ bio_put(bio);
+ }
+ return ret;
+out_free_page:
+ __free_page(page);
+out_free_bio:
+ bio_put(bio);
+out:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(blkdev_issue_discard);
+
+struct bio_batch
+{
+ atomic_t done;
+ unsigned long flags;
+ struct completion *wait;
+ bio_end_io_t *end_io;
+};
+
+static void bio_batch_end_io(struct bio *bio, int err)
+{
+ struct bio_batch *bb = bio->bi_private;
+
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bb->flags);
+ else
+ clear_bit(BIO_UPTODATE, &bb->flags);
+ }
+ if (bb) {
+ if (bb->end_io)
+ bb->end_io(bio, err);
+ atomic_inc(&bb->done);
+ complete(bb->wait);
+ }
+ bio_put(bio);
+}
+
+/**
+ * blkdev_issue_zeroout generate number of zero filed write bios
+ * @bdev: blockdev to issue
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ * Generate and issue number of bios with zerofiled pages.
+ * Send barrier at the beginning and at the end if requested. This guarantie
+ * correct request ordering. Empty barrier allow us to avoid post queue flush.
+ */
+
+int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ int ret = 0;
+ struct bio *bio;
+ struct bio_batch bb;
+ unsigned int sz, issued = 0;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ atomic_set(&bb.done, 0);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+ bb.end_io = NULL;
+
+ if (flags & BLKDEV_IFL_BARRIER) {
+ /* issue async barrier before the data */
+ ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
+ if (ret)
+ return ret;
+ }
+submit:
+ while (nr_sects != 0) {
+ bio = bio_alloc(gfp_mask,
+ min(nr_sects, (sector_t)BIO_MAX_PAGES));
+ if (!bio)
+ break;
+
+ bio->bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio->bi_end_io = bio_batch_end_io;
+ if (flags & BLKDEV_IFL_WAIT)
+ bio->bi_private = &bb;
+
+ while (nr_sects != 0) {
+ sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
+ if (sz == 0)
+ /* bio has maximum size possible */
+ break;
+ ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
+ nr_sects -= ret >> 9;
+ sector += ret >> 9;
+ if (ret < (sz << 9))
+ break;
+ }
+ issued++;
+ submit_bio(WRITE, bio);
+ }
+ /*
+ * When all data bios are in flight. Send final barrier if requeted.
+ */
+ if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
+ ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
+ flags & BLKDEV_IFL_WAIT);
+
+
+ if (flags & BLKDEV_IFL_WAIT)
+ /* Wait for bios in-flight */
+ while ( issued != atomic_read(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ /* One of bios in the batch was completed with error.*/
+ ret = -EIO;
+
+ if (ret)
+ goto out;
+
+ if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (nr_sects != 0)
+ goto submit;
+out:
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5f127cfb2e92..54c1e59e5c43 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -55,6 +55,7 @@ static const int cfq_hist_divisor = 4;
#define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private)
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
+#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3)
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
@@ -143,8 +144,6 @@ struct cfq_queue {
struct cfq_queue *new_cfqq;
struct cfq_group *cfqg;
struct cfq_group *orig_cfqg;
- /* Sectors dispatched in current dispatch round */
- unsigned long nr_sectors;
};
/*
@@ -346,7 +345,7 @@ CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
-#ifdef CONFIG_DEBUG_CFQ_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
@@ -858,7 +857,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
if (!RB_EMPTY_NODE(&cfqg->rb_node))
cfq_rb_erase(&cfqg->rb_node, st);
cfqg->saved_workload_slice = 0;
- blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1);
+ blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
}
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
@@ -884,8 +883,7 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
slice_used = cfqq->allocated_slice;
}
- cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used,
- cfqq->nr_sectors);
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
return slice_used;
}
@@ -919,8 +917,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
- blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl,
- cfqq->nr_sectors);
+ blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
+ blkiocg_set_start_empty_time(&cfqg->blkg);
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -961,7 +959,6 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
if (!cfqg)
goto done;
- cfqg->weight = blkcg->weight;
for_each_cfqg_st(cfqg, i, j, st)
*st = CFQ_RB_ROOT;
RB_CLEAR_NODE(&cfqg->rb_node);
@@ -978,6 +975,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
MKDEV(major, minor));
+ cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
/* Add group on cfqd list */
hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
@@ -1004,6 +1002,12 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
return cfqg;
}
+static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+{
+ atomic_inc(&cfqg->ref);
+ return cfqg;
+}
+
static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
/* Currently, all async queues are mapped to root group */
@@ -1087,6 +1091,12 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
{
return &cfqd->root_group;
}
+
+static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+{
+ return cfqg;
+}
+
static inline void
cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
cfqq->cfqg = cfqg;
@@ -1389,7 +1399,12 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
+ blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
cfq_add_rq_rb(rq);
+ blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+ &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
}
static struct request *
@@ -1445,6 +1460,8 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
+ blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
if (rq_is_meta(rq)) {
WARN_ON(!cfqq->meta_pending);
cfqq->meta_pending--;
@@ -1476,6 +1493,13 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
}
}
+static void cfq_bio_merged(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
+ cfq_bio_sync(bio));
+}
+
static void
cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
@@ -1493,6 +1517,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
+ blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
+ rq_is_sync(next));
}
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1520,18 +1546,24 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
return cfqq == RQ_CFQQ(rq);
}
+static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ del_timer(&cfqd->idle_slice_timer);
+ blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+}
+
static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type);
+ blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
cfqq->slice_end = 0;
cfqq->slice_dispatch = 0;
- cfqq->nr_sectors = 0;
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1539,7 +1571,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
cfq_clear_cfqq_fifo_expire(cfqq);
cfq_mark_cfqq_slice_new(cfqq);
- del_timer(&cfqd->idle_slice_timer);
+ cfq_del_timer(cfqd, cfqq);
}
cfqd->active_queue = cfqq;
@@ -1555,7 +1587,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
if (cfq_cfqq_wait_request(cfqq))
- del_timer(&cfqd->idle_slice_timer);
+ cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_wait_busy(cfqq);
@@ -1857,6 +1889,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
+ blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
}
@@ -1876,7 +1909,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
elv_dispatch_sort(q, rq);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
- cfqq->nr_sectors += blk_rq_sectors(rq);
+ blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
+ rq_data_dir(rq), rq_is_sync(rq));
}
/*
@@ -3185,11 +3219,14 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (cfq_cfqq_wait_request(cfqq)) {
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
cfqd->busy_queues > 1) {
- del_timer(&cfqd->idle_slice_timer);
+ cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
- } else
+ } else {
+ blkiocg_update_idle_time_stats(
+ &cfqq->cfqg->blkg);
cfq_mark_cfqq_must_dispatch(cfqq);
+ }
}
} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
/*
@@ -3214,7 +3251,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
-
+ blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+ &cfqd->serving_group->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -3300,6 +3339,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--;
cfqq->dispatched--;
+ blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
+ rq_io_start_time_ns(rq), rq_data_dir(rq),
+ rq_is_sync(rq));
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@@ -3440,6 +3482,10 @@ static void cfq_put_request(struct request *rq)
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
+ /* Put down rq reference on cfqg */
+ cfq_put_cfqg(RQ_CFQG(rq));
+ rq->elevator_private3 = NULL;
+
cfq_put_queue(cfqq);
}
}
@@ -3528,6 +3574,7 @@ new_queue:
rq->elevator_private = cic;
rq->elevator_private2 = cfqq;
+ rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
return 0;
queue_fail:
@@ -3872,6 +3919,7 @@ static struct elevator_type iosched_cfq = {
.elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests,
.elevator_allow_merge_fn = cfq_allow_merge,
+ .elevator_bio_merged_fn = cfq_bio_merged,
.elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request,
diff --git a/block/elevator.c b/block/elevator.c
index 76e3702d5381..6df2b5056b51 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -539,6 +539,15 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
q->last_merge = rq;
}
+void elv_bio_merged(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_bio_merged_fn)
+ e->ops->elevator_bio_merged_fn(q, rq, bio);
+}
+
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
@@ -921,6 +930,7 @@ int elv_register_queue(struct request_queue *q)
}
return error;
}
+EXPORT_SYMBOL(elv_register_queue);
static void __elv_unregister_queue(struct elevator_queue *e)
{
@@ -933,6 +943,7 @@ void elv_unregister_queue(struct request_queue *q)
if (q)
__elv_unregister_queue(q->elevator);
}
+EXPORT_SYMBOL(elv_unregister_queue);
void elv_register(struct elevator_type *e)
{
diff --git a/block/genhd.c b/block/genhd.c
index d13ba76a169c..154b5f80b3ab 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -596,6 +596,7 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
return disk;
}
+EXPORT_SYMBOL(get_gendisk);
/**
* bdget_disk - do bdget() by gendisk and partition number
diff --git a/block/ioctl.c b/block/ioctl.c
index 8905d2a2a717..e8eb679f2f9b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -126,7 +126,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
if (start + len > (bdev->bd_inode->i_size >> 9))
return -EINVAL;
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL,
- DISCARD_FL_WAIT);
+ BLKDEV_IFL_WAIT);
}
static int put_ushort(unsigned long arg, unsigned short val)
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 403857ad06d4..9d9434f08c92 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -28,7 +28,7 @@ config CRYPTO_FIPS
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
certification. You should say no unless you know what
- this is. Note that CRYPTO_ANSI_CPRNG is requred if this
+ this is. Note that CRYPTO_ANSI_CPRNG is required if this
option is selected
config CRYPTO_ALGAPI
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 76fae27ed01c..c3cf1a69a47a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -544,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
{
int err = -EINVAL;
- if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
+ if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
goto out;
spawn->frontend = frontend;
diff --git a/crypto/internal.h b/crypto/internal.h
index 2d226362e594..d4384b08ab29 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -6,7 +6,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 80201241b698..247178cb98ec 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -315,16 +315,13 @@ out_free_inst:
goto out;
}
-static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
+static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
+ u32 type, u32 mask)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- alg = crypto_get_attr_alg(tb, algt->type,
- (algt->mask & CRYPTO_ALG_TYPE_MASK));
+ alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
if (IS_ERR(alg))
return ERR_CAST(alg);
@@ -365,7 +362,7 @@ static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- return pcrypt_alloc_aead(tb);
+ return pcrypt_alloc_aead(tb, algt->type, algt->mask);
}
return ERR_PTR(-EINVAL);
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a35159947a26..ea610ad45aa1 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -437,6 +437,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
+ if (speed[i].klen)
+ crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
+
printk(KERN_INFO "test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -881,6 +884,10 @@ static int do_test(int m)
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
+ case 318:
+ test_hash_speed("ghash-generic", sec, hash_speed_template_16);
+ if (mode > 300 && mode < 400) break;
+
case 399:
break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 966bbfaf95b1..10cb925132c9 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -25,6 +25,7 @@ struct cipher_speed_template {
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
+ unsigned int klen; /* key length */
};
/*
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
+static struct hash_speed hash_speed_template_16[] = {
+ { .blen = 16, .plen = 16, .klen = 16, },
+ { .blen = 64, .plen = 16, .klen = 16, },
+ { .blen = 64, .plen = 64, .klen = 16, },
+ { .blen = 256, .plen = 16, .klen = 16, },
+ { .blen = 256, .plen = 64, .klen = 16, },
+ { .blen = 256, .plen = 256, .klen = 16, },
+ { .blen = 1024, .plen = 16, .klen = 16, },
+ { .blen = 1024, .plen = 256, .klen = 16, },
+ { .blen = 1024, .plen = 1024, .klen = 16, },
+ { .blen = 2048, .plen = 16, .klen = 16, },
+ { .blen = 2048, .plen = 256, .klen = 16, },
+ { .blen = 2048, .plen = 1024, .klen = 16, },
+ { .blen = 2048, .plen = 2048, .klen = 16, },
+ { .blen = 4096, .plen = 16, .klen = 16, },
+ { .blen = 4096, .plen = 256, .klen = 16, },
+ { .blen = 4096, .plen = 1024, .klen = 16, },
+ { .blen = 4096, .plen = 4096, .klen = 16, },
+ { .blen = 8192, .plen = 16, .klen = 16, },
+ { .blen = 8192, .plen = 256, .klen = 16, },
+ { .blen = 8192, .plen = 1024, .klen = 16, },
+ { .blen = 8192, .plen = 4096, .klen = 16, },
+ { .blen = 8192, .plen = 8192, .klen = 16, },
+
+ /* End marker */
+ { .blen = 0, .plen = 0, .klen = 0, }
+};
+
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index fb765173d41c..74e35377fd30 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1669,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-#define VMAC_AES_TEST_VECTORS 1
-static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
+#define VMAC_AES_TEST_VECTORS 8
+static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
'\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07',
'\x04', '\x01', '\x04', '\x03',};
+static char vmac_string2[128] = {'a', 'b', 'c',};
+static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ };
+
static struct hash_testvec aes_vmac128_tv_template[] = {
{
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = NULL,
+ .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string1,
+ .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string2,
+ .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
+ .psize = 128,
+ .ksize = 16,
+ }, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string,
- .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
+ .plaintext = vmac_string3,
+ .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = NULL,
+ .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string1,
+ .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string2,
+ .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string3,
+ .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
.psize = 128,
.ksize = 16,
},
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 0a9468e575de..0999274a27ac 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
+#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
+
#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH 1
#define INDEX_LOW 0
@@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
@@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
- le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
@@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
- le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
- le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
- le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
@@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
- le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
- le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
- le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
- le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
- le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
- le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
- le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
@@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; \
rh = rl = t = 0; \
for (i = 0; i < nw; i += 2) { \
- t1 = le64_to_cpup(mp+i) + kp[i]; \
- t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
+ t1 = pe64_to_cpup(mp+i) + kp[i]; \
+ t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
m2 = MUL32(t1 >> 32, t2); \
m1 = MUL32(t1, t2 >> 32); \
ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
@@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx)
ctx->first_block_processed = 0;
}
-static u64 l3hash(u64 p1, u64 p2,
- u64 k1, u64 k2, u64 len)
+static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
@@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes,
}
p = be64_to_cpup(out_p + i);
h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
- return p + h;
+ return le64_to_cpu(p + h);
}
static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
@@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent,
static int vmac_init(struct shash_desc *pdesc)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-
- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
return 0;
}
diff --git a/drivers/Makefile b/drivers/Makefile
index f42a03029b7c..08e542fb2daf 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -113,3 +113,4 @@ obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
obj-y += ieee802154/
+obj-y += vbus/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c7971df6..746411518802 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,13 @@ config ACPI_SBS
To compile this driver as a module, choose M here:
the modules will be called sbs and sbshc.
+config ACPI_HED
+ tristate "Hardware Error Device"
+ help
+ This driver supports the Hardware Error Device (PNP0C33),
+ which is used to report some hardware errors notified via
+ SCI, mainly the corrected errors.
+
+source "drivers/acpi/apei/Kconfig"
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998dd5c5..6ee33169e1dc 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
-acpi-y += hest.o
+acpi-y += atomicio.o
# sleep related files
acpi-y += wakeup.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
+obj-$(CONFIG_ACPI_HED) += hed.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+
+obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 7423052ece5a..d93cc06f4bf8 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,12 +14,12 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
- exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
+ exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 3e6ba99e4053..64d1e5c2d4ae 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -73,8 +73,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value);
/*
- * evgpe - GPE handling and dispatch
+ * evgpe - Low-level GPE support
*/
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
@@ -85,19 +87,13 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
+struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
+ struct acpi_gpe_block_info
+ *gpe_block);
+
/*
- * evgpeblk
+ * evgpeblk - Upper-level GPE block support
*/
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status
-acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context);
-
acpi_status
acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_generic_address *gpe_block_address,
@@ -116,12 +112,37 @@ u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
u32 gpe_number);
-u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+/*
+ * evgpeinit - GPE initialization and update
+ */
+acpi_status acpi_ev_gpe_initialize(void);
+
+void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
acpi_status
-acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_ev_match_gpe_method(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
-acpi_status acpi_ev_gpe_initialize(void);
+acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/*
+ * evgpeutil - GPE utilities
+ */
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
+
+struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
+
+acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context);
/*
* evregion - Address Space handling
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index f8dd8f250ac4..9070f1fe8f17 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -112,6 +112,19 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+/*
+ * Optionally enable output from the AML Debug Object.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+
+/*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
@@ -145,11 +158,10 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
****************************************************************************/
/*
- * acpi_gbl_root_table_list is the master list of ACPI tables found in the
- * RSDT/XSDT.
- *
+ * acpi_gbl_root_table_list is the master list of ACPI tables that were
+ * found in the RSDT/XSDT.
*/
-ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
+ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
/* These addresses are calculated from the FADT Event Block addresses */
@@ -160,6 +172,11 @@ ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+/* DSDT information. Used to check for DSDT corruption */
+
+ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
+ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
+
/*
* Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
* determined by the revision of the DSDT: If the DSDT revision is less than
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 6df3f8428168..049e203bd621 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -121,6 +121,13 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
struct acpi_walk_state *walk_state);
/*
+ * exdebug - AML debug object
+ */
+void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index);
+
+/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
acpi_status
@@ -274,7 +281,7 @@ acpi_status
acpi_ex_system_do_notify_op(union acpi_operand_object *value,
union acpi_operand_object *obj_desc);
-acpi_status acpi_ex_system_do_suspend(u64 time);
+acpi_status acpi_ex_system_do_sleep(u64 time);
acpi_status acpi_ex_system_do_stall(u32 time);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 24b8faa5c395..147a7e6bd38f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -213,12 +213,12 @@ struct acpi_namespace_node {
#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
-/* One internal RSDT for table management */
+/* Internal ACPI table management - master table list */
-struct acpi_internal_rsdt {
- struct acpi_table_desc *tables;
- u32 count;
- u32 size;
+struct acpi_table_list {
+ struct acpi_table_desc *tables; /* Table descriptor array */
+ u32 current_table_count; /* Tables currently in the array */
+ u32 max_table_count; /* Max tables array will hold */
u8 flags;
};
@@ -427,8 +427,8 @@ struct acpi_gpe_event_info {
struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
u8 flags; /* Misc info about this GPE */
u8 gpe_number; /* This GPE */
- u8 runtime_count;
- u8 wakeup_count;
+ u8 runtime_count; /* References to a run GPE */
+ u8 wakeup_count; /* References to a wake GPE */
};
/* Information about a GPE register pair, one per each status/enable pair in an array */
@@ -454,6 +454,7 @@ struct acpi_gpe_block_info {
struct acpi_gpe_event_info *event_info; /* One for each GPE */
struct acpi_generic_address block_address; /* Base address of the block */
u32 register_count; /* Number of register pairs in block */
+ u16 gpe_count; /* Number of individual GPEs in block */
u8 block_base_number; /* Base GPE number for this block */
};
@@ -469,6 +470,10 @@ struct acpi_gpe_xrupt_info {
struct acpi_gpe_walk_info {
struct acpi_namespace_node *gpe_device;
struct acpi_gpe_block_info *gpe_block;
+ u16 count;
+ acpi_owner_id owner_id;
+ u8 enable_this_gpe;
+ u8 execute_by_owner_id;
};
struct acpi_gpe_device_info {
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 8ff3b741df28..62a576e34361 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -107,6 +107,10 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length);
acpi_status
acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
+void acpi_tb_check_dsdt_header(void);
+
+struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
+
void
acpi_tb_install_table(acpi_physical_address address,
char *signature, u32 table_index);
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index bb13817e0c31..347bee1726f1 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -323,7 +323,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
default:
ACPI_ERROR((AE_INFO,
- "Invalid opcode in field list: %X",
+ "Invalid opcode in field list: 0x%X",
arg->common.aml_opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 721039233aa7..2a9a561c2f01 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -225,7 +225,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
(walk_state->thread->current_sync_level >
obj_desc->method.mutex->mutex.sync_level)) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
+ "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(method_node),
walk_state->thread->current_sync_level));
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index cc343b959540..f3d52f59250b 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -262,7 +262,7 @@ acpi_ds_method_data_get_node(u8 type,
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_ERROR((AE_INFO,
- "Local index %d is invalid (max %d)",
+ "Local index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
@@ -276,7 +276,7 @@ acpi_ds_method_data_get_node(u8 type,
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_ERROR((AE_INFO,
- "Arg index %d is invalid (max %d)",
+ "Arg index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
@@ -287,7 +287,7 @@ acpi_ds_method_data_get_node(u8 type,
break;
default:
- ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
+ ACPI_ERROR((AE_INFO, "Type %u is invalid", type));
return_ACPI_STATUS(AE_TYPE);
}
@@ -424,7 +424,7 @@ acpi_ds_method_data_get_value(u8 type,
case ACPI_REFCLASS_ARG:
ACPI_ERROR((AE_INFO,
- "Uninitialized Arg[%d] at node %p",
+ "Uninitialized Arg[%u] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
@@ -440,7 +440,7 @@ acpi_ds_method_data_get_value(u8 type,
default:
ACPI_ERROR((AE_INFO,
- "Not a Arg/Local opcode: %X",
+ "Not a Arg/Local opcode: 0x%X",
type));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 891e08bf560b..3607adcaf085 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -288,7 +288,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_ERROR((AE_INFO,
- "Expecting bytelist, got AML opcode %X in op %p",
+ "Expecting bytelist, found AML opcode 0x%X in op %p",
byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
@@ -511,7 +511,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
ACPI_INFO((AE_INFO,
- "Actual Package length (0x%X) is larger than NumElements field (0x%X), truncated\n",
+ "Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
i, element_count));
} else if (i < element_count) {
/*
@@ -519,7 +519,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* Note: this is not an error, the package is padded out with NULLs.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (0x%X) smaller than NumElements count (0x%X), padded with null elements\n",
+ "Package List length (%u) smaller than NumElements count (%u), padded with null elements\n",
i, element_count));
}
@@ -701,7 +701,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown constant opcode %X",
+ "Unknown constant opcode 0x%X",
opcode));
status = AE_AML_OPERAND_TYPE;
break;
@@ -717,7 +717,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
+ ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
@@ -806,7 +806,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented reference type for AML opcode: %4.4X",
+ "Unimplemented reference type for AML opcode: 0x%4.4X",
opcode));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -816,7 +816,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
+ ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X",
obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index bf980cadb1e8..53a7e416f33e 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -292,7 +292,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->buffer.node;
if (!node) {
ACPI_ERROR((AE_INFO,
- "No pointer back to NS node in buffer obj %p",
+ "No pointer back to namespace node in buffer object %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -336,7 +336,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->package.node;
if (!node) {
ACPI_ERROR((AE_INFO,
- "No pointer back to NS node in package %p",
+ "No pointer back to namespace node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -580,7 +580,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
default:
ACPI_ERROR((AE_INFO,
- "Unknown field creation opcode %02x", aml_opcode));
+ "Unknown field creation opcode 0x%02X",
+ aml_opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
@@ -589,7 +590,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
+ "Field [%4.4s] at %u exceeds Buffer [%4.4s] size %u (bits)",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.node),
@@ -693,7 +694,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
status = acpi_ex_resolve_operands(op->common.aml_opcode,
ACPI_WALK_OPERANDS, walk_state);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
+ ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X",
acpi_ps_get_opcode_name(op->common.aml_opcode),
status));
@@ -1461,7 +1462,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
default:
- ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
+ ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b76c486d784..d555b374e314 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -140,7 +140,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (local_obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
+ "Bad predicate (not an integer) ObjDesc=%p State=%p Type=0x%X",
obj_desc, walk_state, obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
@@ -354,7 +354,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
op_class = walk_state->op_info->class;
if (op_class == AML_CLASS_UNKNOWN) {
- ACPI_ERROR((AE_INFO, "Unknown opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown opcode 0x%X",
op->common.aml_opcode));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
}
@@ -678,7 +678,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
+ "Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p",
op_class, op_type, op->common.aml_opcode,
op));
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 050df8164165..83155dd8671e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -179,7 +179,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
if (!object) {
ACPI_ERROR((AE_INFO,
- "Null Object! Obj=%p State=%p Num=%X",
+ "Null Object! Obj=%p State=%p Num=%u",
object, walk_state, walk_state->result_count));
return (AE_BAD_PARAMETER);
}
@@ -223,7 +223,7 @@ static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
ACPI_RESULTS_OBJ_NUM_MAX) {
- ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
+ ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%u",
walk_state, walk_state->result_size));
return (AE_STACK_OVERFLOW);
}
@@ -314,7 +314,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_ERROR((AE_INFO,
- "Object stack overflow! Obj=%p State=%p #Ops=%X",
+ "Object stack overflow! Obj=%p State=%p #Ops=%u",
object, walk_state, walk_state->num_operands));
return (AE_STACK_OVERFLOW);
}
@@ -365,7 +365,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
if (walk_state->num_operands == 0) {
ACPI_ERROR((AE_INFO,
- "Object stack underflow! Count=%X State=%p #Ops=%X",
+ "Object stack underflow! Count=%X State=%p #Ops=%u",
pop_count, walk_state,
walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
@@ -377,7 +377,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
walk_state->operands[walk_state->num_operands] = NULL;
}
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%u\n",
pop_count, walk_state, walk_state->num_operands));
return (AE_OK);
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c1e6f472d435..f5795915a2e9 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -302,7 +302,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
ACPI_DISABLE_EVENT);
ACPI_ERROR((AE_INFO,
- "No installed handler for fixed event [%08X]",
+ "No installed handler for fixed event [0x%08X]",
event));
return (ACPI_INTERRUPT_NOT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 78c55508aff5..a221ad404167 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -60,7 +60,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
*
* RETURN: Status
*
- * DESCRIPTION: Updates GPE register enable masks based on the GPE type
+ * DESCRIPTION: Updates GPE register enable masks based upon whether there are
+ * references (either wake or run) to this GPE
*
******************************************************************************/
@@ -81,14 +82,20 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
(1 <<
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
+ /* Clear the wake/run bits up front */
+
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
- if (gpe_event_info->runtime_count)
+ /* Set the mask bits only if there are references to this GPE */
+
+ if (gpe_event_info->runtime_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ }
- if (gpe_event_info->wakeup_count)
+ if (gpe_event_info->wakeup_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ }
return_ACPI_STATUS(AE_OK);
}
@@ -101,7 +108,10 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Enable a GPE based on the GPE type
+ * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless
+ * of type or number of references.
+ *
+ * Note: The GPE lock should be already acquired when this function is called.
*
******************************************************************************/
@@ -109,20 +119,36 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
+
ACPI_FUNCTION_TRACE(ev_enable_gpe);
- /* Make sure HW enable masks are updated */
+
+ /*
+ * We will only allow a GPE to be enabled if it has either an
+ * associated method (_Lxx/_Exx) or a handler. Otherwise, the
+ * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
+ * first time it fires.
+ */
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+ return_ACPI_STATUS(AE_NO_HANDLER);
+ }
+
+ /* Ensure the HW enable masks are current */
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
+
+ /* Clear the GPE (of stale events) */
- /* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
/* Enable the requested GPE */
+
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
return_ACPI_STATUS(status);
}
@@ -135,7 +161,10 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Disable a GPE based on the GPE type
+ * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE,
+ * regardless of the type or number of references.
+ *
+ * Note: The GPE lock should be already acquired when this function is called.
*
******************************************************************************/
@@ -145,24 +174,71 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_disable_gpe);
- /* Make sure HW enable masks are updated */
+
+ /*
+ * Note: Always disable the GPE, even if we think that that it is already
+ * disabled. It is possible that the AML or some other code has enabled
+ * the GPE behind our back.
+ */
+
+ /* Ensure the HW enable masks are current */
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
/*
- * Even if we don't know the GPE type, make sure that we always
- * disable it. low_disable_gpe will just clear the enable bit for this
- * GPE and write it. It will not write out the current GPE enable mask,
- * since this may inadvertently enable GPEs too early, if a rogue GPE has
- * come in during ACPICA initialization - possibly as a result of AML or
- * other code that has enabled the GPE.
+ * Always H/W disable this GPE, even if we don't know the GPE type.
+ * Simply clear the enable bit for this particular GPE, but do not
+ * write out the current GPE enable mask since this may inadvertently
+ * enable GPEs too early. An example is a rogue GPE that has arrived
+ * during ACPICA initialization - possibly because AML or other code
+ * has enabled the GPE.
*/
status = acpi_hw_low_disable_gpe(gpe_event_info);
return_ACPI_STATUS(status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_low_get_gpe_info
+ *
+ * PARAMETERS: gpe_number - Raw GPE number
+ * gpe_block - A GPE info block
+ *
+ * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number
+ * is not within the specified GPE block)
+ *
+ * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
+ * the low-level implementation of ev_get_gpe_event_info.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
+ struct acpi_gpe_block_info
+ *gpe_block)
+{
+ u32 gpe_index;
+
+ /*
+ * Validate that the gpe_number is within the specified gpe_block.
+ * (Two steps)
+ */
+ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
+ return (NULL);
+ }
+
+ gpe_index = gpe_number - gpe_block->block_base_number;
+ if (gpe_index >= gpe_block->gpe_count) {
+ return (NULL);
+ }
+
+ return (&gpe_block->event_info[gpe_index]);
+}
+
+
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_event_info
@@ -184,29 +260,23 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number)
{
union acpi_operand_object *obj_desc;
- struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_event_info *gpe_info;
u32 i;
ACPI_FUNCTION_ENTRY();
- /* A NULL gpe_block means use the FADT-defined GPE block(s) */
+ /* A NULL gpe_device means use the FADT-defined GPE block(s) */
if (!gpe_device) {
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
- gpe_block = acpi_gbl_gpe_fadt_blocks[i];
- if (gpe_block) {
- if ((gpe_number >= gpe_block->block_base_number)
- && (gpe_number <
- gpe_block->block_base_number +
- (gpe_block->register_count * 8))) {
- return (&gpe_block->
- event_info[gpe_number -
- gpe_block->
- block_base_number]);
- }
+ gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
+ acpi_gbl_gpe_fadt_blocks
+ [i]);
+ if (gpe_info) {
+ return (gpe_info);
}
}
@@ -223,16 +293,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
return (NULL);
}
- gpe_block = obj_desc->device.gpe_block;
-
- if ((gpe_number >= gpe_block->block_base_number) &&
- (gpe_number <
- gpe_block->block_base_number + (gpe_block->register_count * 8))) {
- return (&gpe_block->
- event_info[gpe_number - gpe_block->block_base_number]);
- }
-
- return (NULL);
+ return (acpi_ev_low_get_gpe_info
+ (gpe_number, obj_desc->device.gpe_block));
}
/*******************************************************************************
@@ -389,7 +451,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
return_VOID;
}
- /* Set the GPE flags for return to enabled state */
+ /* Update the GPE register masks for return to enabled state */
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
@@ -499,7 +561,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[%2X]",
+ "Unable to clear GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -532,7 +594,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[%2X]",
+ "Unable to clear GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -548,7 +610,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[%2X]",
+ "Unable to disable GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -562,27 +624,30 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to queue handler for GPE[%2X] - event disabled",
+ "Unable to queue handler for GPE[0x%2X] - event disabled",
gpe_number));
}
break;
default:
- /* No handler or method to run! */
-
+ /*
+ * No handler or method to run!
+ * 03/2010: This case should no longer be possible. We will not allow
+ * a GPE to be enabled if it has no handler or method.
+ */
ACPI_ERROR((AE_INFO,
- "No handler or method for GPE[%2X], disabling event",
+ "No handler or method for GPE[0x%2X], disabling event",
gpe_number));
/*
- * Disable the GPE. The GPE will remain disabled until the ACPICA
- * Core Subsystem is restarted, or a handler is installed.
+ * Disable the GPE. The GPE will remain disabled a handler
+ * is installed or ACPICA is restarted.
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[%2X]",
+ "Unable to disable GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index fef721917eaf..7c28f2d9fd35 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -51,20 +51,6 @@ ACPI_MODULE_NAME("evgpeblk")
/* Local prototypes */
static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
- u32 level, void *obj_desc, void **return_value);
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *info, void **return_value);
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
- interrupt_number);
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
-
-static acpi_status
acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
u32 interrupt_number);
@@ -73,527 +59,6 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
/*******************************************************************************
*
- * FUNCTION: acpi_ev_valid_gpe_event
- *
- * PARAMETERS: gpe_event_info - Info for this GPE
- *
- * RETURN: TRUE if the gpe_event is valid
- *
- * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
- * Should be called only when the GPE lists are semaphore locked
- * and not subject to change.
- *
- ******************************************************************************/
-
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
-{
- struct acpi_gpe_xrupt_info *gpe_xrupt_block;
- struct acpi_gpe_block_info *gpe_block;
-
- ACPI_FUNCTION_ENTRY();
-
- /* No need for spin lock since we are not changing any list elements */
-
- /* Walk the GPE interrupt levels */
-
- gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_block) {
- gpe_block = gpe_xrupt_block->gpe_block_list_head;
-
- /* Walk the GPE blocks on this interrupt level */
-
- while (gpe_block) {
- if ((&gpe_block->event_info[0] <= gpe_event_info) &&
- (&gpe_block->event_info[((acpi_size)
- gpe_block->
- register_count) * 8] >
- gpe_event_info)) {
- return (TRUE);
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_block = gpe_xrupt_block->next;
- }
-
- return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_walk_gpe_list
- *
- * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
- * Context - Value passed to callback
- *
- * RETURN: Status
- *
- * DESCRIPTION: Walk the GPE lists.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
-{
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_gpe_xrupt_info *gpe_xrupt_info;
- acpi_status status = AE_OK;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Walk the interrupt level descriptor list */
-
- gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_info) {
-
- /* Walk all Gpe Blocks attached to this interrupt level */
-
- gpe_block = gpe_xrupt_info->gpe_block_list_head;
- while (gpe_block) {
-
- /* One callback per GPE block */
-
- status =
- gpe_walk_callback(gpe_xrupt_info, gpe_block,
- context);
- if (ACPI_FAILURE(status)) {
- if (status == AE_CTRL_END) { /* Callback abort */
- status = AE_OK;
- }
- goto unlock_and_exit;
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_info = gpe_xrupt_info->next;
- }
-
- unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_delete_gpe_handlers
- *
- * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
- * gpe_block - Gpe Block info
- *
- * RETURN: Status
- *
- * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
- * Used only prior to termination.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context)
-{
- struct acpi_gpe_event_info *gpe_event_info;
- u32 i;
- u32 j;
-
- ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
-
- /* Examine each GPE Register within the block */
-
- for (i = 0; i < gpe_block->register_count; i++) {
-
- /* Now look at the individual GPEs in this byte register */
-
- for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- gpe_event_info = &gpe_block->event_info[((acpi_size) i *
- ACPI_GPE_REGISTER_WIDTH)
- + j];
-
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_HANDLER) {
- ACPI_FREE(gpe_event_info->dispatch.handler);
- gpe_event_info->dispatch.handler = NULL;
- gpe_event_info->flags &=
- ~ACPI_GPE_DISPATCH_MASK;
- }
- }
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_save_method_info
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * control method under the _GPE portion of the namespace.
- * Extract the name and GPE type from the object, saving this
- * information for quick lookup during GPE dispatch
- *
- * The name of each GPE control method is of the form:
- * "_Lxx" or "_Exx"
- * Where:
- * L - means that the GPE is level triggered
- * E - means that the GPE is edge triggered
- * xx - is the GPE number [in HEX]
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
- u32 level, void *obj_desc, void **return_value)
-{
- struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
- struct acpi_gpe_event_info *gpe_event_info;
- u32 gpe_number;
- char name[ACPI_NAME_SIZE + 1];
- u8 type;
-
- ACPI_FUNCTION_TRACE(ev_save_method_info);
-
- /*
- * _Lxx and _Exx GPE method support
- *
- * 1) Extract the name from the object and convert to a string
- */
- ACPI_MOVE_32_TO_32(name,
- &((struct acpi_namespace_node *)obj_handle)->name.
- integer);
- name[ACPI_NAME_SIZE] = 0;
-
- /*
- * 2) Edge/Level determination is based on the 2nd character
- * of the method name
- *
- * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
- * if a _PRW object is found that points to this GPE.
- */
- switch (name[1]) {
- case 'L':
- type = ACPI_GPE_LEVEL_TRIGGERED;
- break;
-
- case 'E':
- type = ACPI_GPE_EDGE_TRIGGERED;
- break;
-
- default:
- /* Unknown method type, just ignore it! */
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Ignoring unknown GPE method type: %s "
- "(name not of form _Lxx or _Exx)", name));
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Convert the last two characters of the name to the GPE Number */
-
- gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
- if (gpe_number == ACPI_UINT32_MAX) {
-
- /* Conversion failed; invalid method, just ignore it */
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Could not extract GPE number from name: %s "
- "(name is not of form _Lxx or _Exx)", name));
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Ensure that we have a valid GPE number for this GPE block */
-
- if ((gpe_number < gpe_block->block_base_number) ||
- (gpe_number >= (gpe_block->block_base_number +
- (gpe_block->register_count * 8)))) {
- /*
- * Not valid for this GPE block, just ignore it. However, it may be
- * valid for a different GPE block, since GPE0 and GPE1 methods both
- * appear under \_GPE.
- */
- return_ACPI_STATUS(AE_OK);
- }
-
- /*
- * Now we can add this information to the gpe_event_info block for use
- * during dispatch of this GPE.
- */
- gpe_event_info =
- &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
-
- gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
-
- gpe_event_info->dispatch.method_node =
- (struct acpi_namespace_node *)obj_handle;
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Registered GPE method %s as GPE number 0x%.2X\n",
- name, gpe_number));
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_match_prw_and_gpe
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
- * not aborted on a single _PRW failure.
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * Device. Run the _PRW method. If present, extract the GPE
- * number and mark the GPE as a WAKE GPE.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *info, void **return_value)
-{
- struct acpi_gpe_walk_info *gpe_info = (void *)info;
- struct acpi_namespace_node *gpe_device;
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_namespace_node *target_gpe_device;
- struct acpi_gpe_event_info *gpe_event_info;
- union acpi_operand_object *pkg_desc;
- union acpi_operand_object *obj_desc;
- u32 gpe_number;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
-
- /* Check for a _PRW method under this device */
-
- status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
- ACPI_BTYPE_PACKAGE, &pkg_desc);
- if (ACPI_FAILURE(status)) {
-
- /* Ignore all errors from _PRW, we don't want to abort the subsystem */
-
- return_ACPI_STATUS(AE_OK);
- }
-
- /* The returned _PRW package must have at least two elements */
-
- if (pkg_desc->package.count < 2) {
- goto cleanup;
- }
-
- /* Extract pointers from the input context */
-
- gpe_device = gpe_info->gpe_device;
- gpe_block = gpe_info->gpe_block;
-
- /*
- * The _PRW object must return a package, we are only interested in the
- * first element
- */
- obj_desc = pkg_desc->package.elements[0];
-
- if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
-
- /* Use FADT-defined GPE device (from definition of _PRW) */
-
- target_gpe_device = acpi_gbl_fadt_gpe_device;
-
- /* Integer is the GPE number in the FADT described GPE blocks */
-
- gpe_number = (u32) obj_desc->integer.value;
- } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
-
- /* Package contains a GPE reference and GPE number within a GPE block */
-
- if ((obj_desc->package.count < 2) ||
- ((obj_desc->package.elements[0])->common.type !=
- ACPI_TYPE_LOCAL_REFERENCE) ||
- ((obj_desc->package.elements[1])->common.type !=
- ACPI_TYPE_INTEGER)) {
- goto cleanup;
- }
-
- /* Get GPE block reference and decode */
-
- target_gpe_device =
- obj_desc->package.elements[0]->reference.node;
- gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
- } else {
- /* Unknown type, just ignore it */
-
- goto cleanup;
- }
-
- /*
- * Is this GPE within this block?
- *
- * TRUE if and only if these conditions are true:
- * 1) The GPE devices match.
- * 2) The GPE index(number) is within the range of the Gpe Block
- * associated with the GPE device.
- */
- if ((gpe_device == target_gpe_device) &&
- (gpe_number >= gpe_block->block_base_number) &&
- (gpe_number < gpe_block->block_base_number +
- (gpe_block->register_count * 8))) {
- gpe_event_info = &gpe_block->event_info[gpe_number -
- gpe_block->
- block_base_number];
-
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- }
-
- cleanup:
- acpi_ut_remove_reference(pkg_desc);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_get_gpe_xrupt_block
- *
- * PARAMETERS: interrupt_number - Interrupt for a GPE block
- *
- * RETURN: A GPE interrupt block
- *
- * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
- * block per unique interrupt level used for GPEs. Should be
- * called only when the GPE lists are semaphore locked and not
- * subject to change.
- *
- ******************************************************************************/
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
- interrupt_number)
-{
- struct acpi_gpe_xrupt_info *next_gpe_xrupt;
- struct acpi_gpe_xrupt_info *gpe_xrupt;
- acpi_status status;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
-
- /* No need for lock since we are not changing any list elements here */
-
- next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
- while (next_gpe_xrupt) {
- if (next_gpe_xrupt->interrupt_number == interrupt_number) {
- return_PTR(next_gpe_xrupt);
- }
-
- next_gpe_xrupt = next_gpe_xrupt->next;
- }
-
- /* Not found, must allocate a new xrupt descriptor */
-
- gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
- if (!gpe_xrupt) {
- return_PTR(NULL);
- }
-
- gpe_xrupt->interrupt_number = interrupt_number;
-
- /* Install new interrupt descriptor with spin lock */
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- if (acpi_gbl_gpe_xrupt_list_head) {
- next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
- while (next_gpe_xrupt->next) {
- next_gpe_xrupt = next_gpe_xrupt->next;
- }
-
- next_gpe_xrupt->next = gpe_xrupt;
- gpe_xrupt->previous = next_gpe_xrupt;
- } else {
- acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
- /* Install new interrupt handler if not SCI_INT */
-
- if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
- status = acpi_os_install_interrupt_handler(interrupt_number,
- acpi_ev_gpe_xrupt_handler,
- gpe_xrupt);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not install GPE interrupt handler at level 0x%X",
- interrupt_number));
- return_PTR(NULL);
- }
- }
-
- return_PTR(gpe_xrupt);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_delete_gpe_xrupt
- *
- * PARAMETERS: gpe_xrupt - A GPE interrupt info block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
- * interrupt handler if not the SCI interrupt.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
-{
- acpi_status status;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
-
- /* We never want to remove the SCI interrupt handler */
-
- if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
- gpe_xrupt->gpe_block_list_head = NULL;
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Disable this interrupt */
-
- status =
- acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
- acpi_ev_gpe_xrupt_handler);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Unlink the interrupt block with lock */
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- if (gpe_xrupt->previous) {
- gpe_xrupt->previous->next = gpe_xrupt->next;
- } else {
- /* No previous, update list head */
-
- acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
- }
-
- if (gpe_xrupt->next) {
- gpe_xrupt->next->previous = gpe_xrupt->previous;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
- /* Free the block */
-
- ACPI_FREE(gpe_xrupt);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_install_gpe_block
*
* PARAMETERS: gpe_block - New GPE block
@@ -705,8 +170,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
}
- acpi_current_gpe_count -=
- gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
+ acpi_current_gpe_count -= gpe_block->gpe_count;
/* Free the gpe_block */
@@ -760,9 +224,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
- gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
- register_count *
- ACPI_GPE_REGISTER_WIDTH) *
+ gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
sizeof(struct
acpi_gpe_event_info));
if (!gpe_event_info) {
@@ -880,6 +342,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_walk_info walk_info;
ACPI_FUNCTION_TRACE(ev_create_gpe_block);
@@ -897,6 +360,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Initialize the new GPE block */
gpe_block->node = gpe_device;
+ gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
@@ -921,12 +385,17 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
return_ACPI_STATUS(status);
}
- /* Find all GPE methods (_Lxx, _Exx) for this block */
+ /* Find all GPE methods (_Lxx or_Exx) for this block */
+
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_device;
+ walk_info.enable_this_gpe = FALSE;
+ walk_info.execute_by_owner_id = FALSE;
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
- acpi_ev_save_method_info, NULL,
- gpe_block, NULL);
+ acpi_ev_match_gpe_method, NULL,
+ &walk_info, NULL);
/* Return the new block */
@@ -938,14 +407,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
- ((gpe_block->register_count *
- ACPI_GPE_REGISTER_WIDTH) - 1)),
+ (gpe_block->gpe_count - 1)),
gpe_device->name.ascii, gpe_block->register_count,
interrupt_number));
/* Update global count of currently available GPEs */
- acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
+ acpi_current_gpe_count += gpe_block->gpe_count;
return_ACPI_STATUS(AE_OK);
}
@@ -969,10 +437,13 @@ acpi_status
acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info *gpe_block)
{
+ acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_gpe_walk_info gpe_info;
+ struct acpi_gpe_walk_info walk_info;
u32 wake_gpe_count;
u32 gpe_enabled_count;
+ u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -995,210 +466,75 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
* definition a wake GPE and will not be enabled while the machine
* is running.
*/
- gpe_info.gpe_block = gpe_block;
- gpe_info.gpe_device = gpe_device;
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_device;
+ walk_info.execute_by_owner_id = FALSE;
- acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ev_match_prw_and_gpe, NULL,
- &gpe_info, NULL);
+ &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing _PRW methods"));
+ }
}
/*
- * Enable all GPEs that have a corresponding method and aren't
+ * Enable all GPEs that have a corresponding method and are not
* capable of generating wakeups. Any other GPEs within this block
- * must be enabled via the acpi_enable_gpe() interface.
+ * must be enabled via the acpi_enable_gpe interface.
*/
wake_gpe_count = 0;
gpe_enabled_count = 0;
- if (gpe_device == acpi_gbl_fadt_gpe_device)
+
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
gpe_device = NULL;
+ }
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- acpi_status status;
- acpi_size gpe_index;
- int gpe_number;
/* Get the info block for this particular GPE */
- gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
+
+ gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
wake_gpe_count++;
- if (acpi_gbl_leave_wake_gpes_disabled)
+ if (acpi_gbl_leave_wake_gpes_disabled) {
continue;
+ }
}
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
+ /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
+
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
continue;
+ }
+
+ /* Enable this GPE */
gpe_number = gpe_index + gpe_block->block_base_number;
status = acpi_enable_gpe(gpe_device, gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
- if (ACPI_FAILURE(status))
- ACPI_ERROR((AE_INFO,
- "Failed to enable GPE %02X\n",
- gpe_number));
- else
- gpe_enabled_count++;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
- wake_gpe_count, gpe_enabled_count));
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_gpe_initialize
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Initialize the GPE data structures
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_gpe_initialize(void)
-{
- u32 register_count0 = 0;
- u32 register_count1 = 0;
- u32 gpe_number_max = 0;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_gpe_initialize);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Initialize the GPE Block(s) defined in the FADT
- *
- * Why the GPE register block lengths are divided by 2: From the ACPI
- * Spec, section "General-Purpose Event Registers", we have:
- *
- * "Each register block contains two registers of equal length
- * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
- * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
- * The length of the GPE1_STS and GPE1_EN registers is equal to
- * half the GPE1_LEN. If a generic register block is not supported
- * then its respective block pointer and block length values in the
- * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
- * to be the same size."
- */
-
- /*
- * Determine the maximum GPE number for this machine.
- *
- * Note: both GPE0 and GPE1 are optional, and either can exist without
- * the other.
- *
- * If EITHER the register length OR the block address are zero, then that
- * particular block is not supported.
- */
- if (acpi_gbl_FADT.gpe0_block_length &&
- acpi_gbl_FADT.xgpe0_block.address) {
-
- /* GPE block 0 exists (has both length and address > 0) */
-
- register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
-
- gpe_number_max =
- (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
-
- /* Install GPE Block 0 */
-
- status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- &acpi_gbl_FADT.xgpe0_block,
- register_count0, 0,
- acpi_gbl_FADT.sci_interrupt,
- &acpi_gbl_gpe_fadt_blocks[0]);
-
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not create GPE Block 0"));
- }
- }
-
- if (acpi_gbl_FADT.gpe1_block_length &&
- acpi_gbl_FADT.xgpe1_block.address) {
-
- /* GPE block 1 exists (has both length and address > 0) */
-
- register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
-
- /* Check for GPE0/GPE1 overlap (if both banks exist) */
-
- if ((register_count0) &&
- (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
- ACPI_ERROR((AE_INFO,
- "GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
- "(GPE %d to %d) - Ignoring GPE1",
- gpe_number_max, acpi_gbl_FADT.gpe1_base,
- acpi_gbl_FADT.gpe1_base +
- ((register_count1 *
- ACPI_GPE_REGISTER_WIDTH) - 1)));
-
- /* Ignore GPE1 block by setting the register count to zero */
-
- register_count1 = 0;
- } else {
- /* Install GPE Block 1 */
-
- status =
- acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- &acpi_gbl_FADT.xgpe1_block,
- register_count1,
- acpi_gbl_FADT.gpe1_base,
- acpi_gbl_FADT.
- sci_interrupt,
- &acpi_gbl_gpe_fadt_blocks
- [1]);
-
+ ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not create GPE Block 1"));
+ "Could not enable GPE 0x%02X",
+ gpe_number));
+ continue;
}
- /*
- * GPE0 and GPE1 do not have to be contiguous in the GPE number
- * space. However, GPE0 always starts at GPE number zero.
- */
- gpe_number_max = acpi_gbl_FADT.gpe1_base +
- ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ gpe_enabled_count++;
}
}
- /* Exit if there are no GPE registers */
-
- if ((register_count0 + register_count1) == 0) {
-
- /* GPEs are not required by ACPI, this is OK */
-
+ if (gpe_enabled_count || wake_gpe_count) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "There are no GPE blocks defined in the FADT\n"));
- status = AE_OK;
- goto cleanup;
- }
-
- /* Check for Max GPE number out-of-range */
-
- if (gpe_number_max > ACPI_GPE_MAX) {
- ACPI_ERROR((AE_INFO,
- "Maximum GPE number from FADT is too large: 0x%X",
- gpe_number_max));
- status = AE_BAD_VALUE;
- goto cleanup;
+ "Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
+ gpe_enabled_count, wake_gpe_count));
}
- cleanup:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
new file mode 100644
index 000000000000..3f6c2d26410d
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -0,0 +1,653 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeinit - System GPE initialization and update
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeinit")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
+ *
+ ******************************************************************************/
+acpi_status acpi_ev_gpe_initialize(void)
+{
+ u32 register_count0 = 0;
+ u32 register_count1 = 0;
+ u32 gpe_number_max = 0;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the GPE Block(s) defined in the FADT
+ *
+ * Why the GPE register block lengths are divided by 2: From the ACPI
+ * Spec, section "General-Purpose Event Registers", we have:
+ *
+ * "Each register block contains two registers of equal length
+ * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
+ * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
+ * The length of the GPE1_STS and GPE1_EN registers is equal to
+ * half the GPE1_LEN. If a generic register block is not supported
+ * then its respective block pointer and block length values in the
+ * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
+ * to be the same size."
+ */
+
+ /*
+ * Determine the maximum GPE number for this machine.
+ *
+ * Note: both GPE0 and GPE1 are optional, and either can exist without
+ * the other.
+ *
+ * If EITHER the register length OR the block address are zero, then that
+ * particular block is not supported.
+ */
+ if (acpi_gbl_FADT.gpe0_block_length &&
+ acpi_gbl_FADT.xgpe0_block.address) {
+
+ /* GPE block 0 exists (has both length and address > 0) */
+
+ register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
+
+ gpe_number_max =
+ (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
+
+ /* Install GPE Block 0 */
+
+ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe0_block,
+ register_count0, 0,
+ acpi_gbl_FADT.sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks[0]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 0"));
+ }
+ }
+
+ if (acpi_gbl_FADT.gpe1_block_length &&
+ acpi_gbl_FADT.xgpe1_block.address) {
+
+ /* GPE block 1 exists (has both length and address > 0) */
+
+ register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
+
+ /* Check for GPE0/GPE1 overlap (if both banks exist) */
+
+ if ((register_count0) &&
+ (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
+ ACPI_ERROR((AE_INFO,
+ "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
+ "(GPE %u to %u) - Ignoring GPE1",
+ gpe_number_max, acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.gpe1_base +
+ ((register_count1 *
+ ACPI_GPE_REGISTER_WIDTH) - 1)));
+
+ /* Ignore GPE1 block by setting the register count to zero */
+
+ register_count1 = 0;
+ } else {
+ /* Install GPE Block 1 */
+
+ status =
+ acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe1_block,
+ register_count1,
+ acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.
+ sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks
+ [1]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 1"));
+ }
+
+ /*
+ * GPE0 and GPE1 do not have to be contiguous in the GPE number
+ * space. However, GPE0 always starts at GPE number zero.
+ */
+ gpe_number_max = acpi_gbl_FADT.gpe1_base +
+ ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ }
+ }
+
+ /* Exit if there are no GPE registers */
+
+ if ((register_count0 + register_count1) == 0) {
+
+ /* GPEs are not required by ACPI, this is OK */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "There are no GPE blocks defined in the FADT\n"));
+ status = AE_OK;
+ goto cleanup;
+ }
+
+ /* Check for Max GPE number out-of-range */
+
+ if (gpe_number_max > ACPI_GPE_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Maximum GPE number from FADT is too large: 0x%X",
+ gpe_number_max));
+ status = AE_BAD_VALUE;
+ goto cleanup;
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_update_gpes
+ *
+ * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
+ * result of a Load() or load_table() operation. If new GPE
+ * methods have been installed, register the new methods and
+ * enable and runtime GPEs that are associated with them. Also,
+ * run any newly loaded _PRW methods in order to discover any
+ * new CAN_WAKE GPEs.
+ *
+ ******************************************************************************/
+
+void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_walk_info walk_info;
+ acpi_status status = AE_OK;
+ u32 new_wake_gpe_count = 0;
+
+ /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
+
+ walk_info.owner_id = table_owner_id;
+ walk_info.execute_by_owner_id = TRUE;
+ walk_info.count = 0;
+
+ if (acpi_gbl_leave_wake_gpes_disabled) {
+ /*
+ * 1) Run any newly-loaded _PRW methods to find any GPEs that
+ * can now be marked as CAN_WAKE GPEs. Note: We must run the
+ * _PRW methods before we process the _Lxx/_Exx methods because
+ * we will enable all runtime GPEs associated with the new
+ * _Lxx/_Exx methods at the time we process those methods.
+ *
+ * Unlock interpreter so that we can run the _PRW methods.
+ */
+ walk_info.gpe_block = NULL;
+ walk_info.gpe_device = NULL;
+
+ acpi_ex_exit_interpreter();
+
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_prw_and_gpe, NULL,
+ &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing _PRW methods"));
+ }
+
+ acpi_ex_enter_interpreter();
+ new_wake_gpe_count = walk_info.count;
+ }
+
+ /*
+ * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
+ *
+ * Any GPEs that correspond to new _Lxx/_Exx methods and are not
+ * marked as CAN_WAKE are immediately enabled.
+ *
+ * Examine the namespace underneath each gpe_device within the
+ * gpe_block lists.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ walk_info.count = 0;
+ walk_info.enable_this_gpe = TRUE;
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_block->node;
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
+ walk_info.gpe_device,
+ ACPI_UINT32_MAX,
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_gpe_method,
+ NULL, &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While decoding _Lxx/_Exx methods"));
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ if (walk_info.count || new_wake_gpe_count) {
+ ACPI_INFO((AE_INFO,
+ "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
+ walk_info.count, new_wake_gpe_count));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_gpe_method
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * control method under the _GPE portion of the namespace.
+ * Extract the name and GPE type from the object, saving this
+ * information for quick lookup during GPE dispatch. Allows a
+ * per-owner_id evaluation if execute_by_owner_id is TRUE in the
+ * walk_info parameter block.
+ *
+ * The name of each GPE control method is of the form:
+ * "_Lxx" or "_Exx", where:
+ * L - means that the GPE is level triggered
+ * E - means that the GPE is edge triggered
+ * xx - is the GPE number [in HEX]
+ *
+ * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
+ * with that owner.
+ * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
+ * method is immediately enabled (Used for Load/load_table operators)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_match_gpe_method(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_namespace_node *method_node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+ struct acpi_gpe_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_namespace_node *gpe_device;
+ acpi_status status;
+ u32 gpe_number;
+ char name[ACPI_NAME_SIZE + 1];
+ u8 type;
+
+ ACPI_FUNCTION_TRACE(ev_match_gpe_method);
+
+ /* Check if requested owner_id matches this owner_id */
+
+ if ((walk_info->execute_by_owner_id) &&
+ (method_node->owner_id != walk_info->owner_id)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Match and decode the _Lxx and _Exx GPE method names
+ *
+ * 1) Extract the method name and null terminate it
+ */
+ ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
+ name[ACPI_NAME_SIZE] = 0;
+
+ /* 2) Name must begin with an underscore */
+
+ if (name[0] != '_') {
+ return_ACPI_STATUS(AE_OK); /* Ignore this method */
+ }
+
+ /*
+ * 3) Edge/Level determination is based on the 2nd character
+ * of the method name
+ *
+ * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
+ * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
+ */
+ switch (name[1]) {
+ case 'L':
+ type = ACPI_GPE_LEVEL_TRIGGERED;
+ break;
+
+ case 'E':
+ type = ACPI_GPE_EDGE_TRIGGERED;
+ break;
+
+ default:
+ /* Unknown method type, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Ignoring unknown GPE method type: %s "
+ "(name not of form _Lxx or _Exx)", name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* 4) The last two characters of the name are the hex GPE Number */
+
+ gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+ if (gpe_number == ACPI_UINT32_MAX) {
+
+ /* Conversion failed; invalid method, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Could not extract GPE number from name: %s "
+ "(name is not of form _Lxx or _Exx)", name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Ensure that we have a valid GPE number for this GPE block */
+
+ gpe_event_info =
+ acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
+ if (!gpe_event_info) {
+ /*
+ * This gpe_number is not valid for this GPE block, just ignore it.
+ * However, it may be valid for a different GPE block, since GPE0
+ * and GPE1 methods both appear under \_GPE.
+ */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+
+ /* If there is already a handler, ignore this GPE method */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_METHOD) {
+ /*
+ * If there is already a method, ignore this method. But check
+ * for a type mismatch (if both the _Lxx AND _Exx exist)
+ */
+ if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
+ ACPI_ERROR((AE_INFO,
+ "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
+ gpe_number, gpe_number, gpe_number));
+ }
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Add the GPE information from above to the gpe_event_info block for
+ * use during dispatch of this GPE.
+ */
+ gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
+ gpe_event_info->dispatch.method_node = method_node;
+
+ /*
+ * Enable this GPE if requested. This only happens when during the
+ * execution of a Load or load_table operator. We have found a new
+ * GPE method and want to immediately enable the GPE if it is a
+ * runtime GPE.
+ */
+ if (walk_info->enable_this_gpe) {
+
+ /* Ignore GPEs that can wake the system */
+
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
+ !acpi_gbl_leave_wake_gpes_disabled) {
+ walk_info->count++;
+ gpe_device = walk_info->gpe_device;
+
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
+ gpe_device = NULL;
+ }
+
+ status = acpi_enable_gpe(gpe_device, gpe_number,
+ ACPI_GPE_TYPE_RUNTIME);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not enable GPE 0x%02X",
+ gpe_number));
+ }
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Registered GPE method %s as GPE number 0x%.2X\n",
+ name, gpe_number));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_prw_and_gpe
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
+ * not aborted on a single _PRW failure.
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * Device. Run the _PRW method. If present, extract the GPE
+ * number and mark the GPE as a CAN_WAKE GPE. Allows a
+ * per-owner_id execution if execute_by_owner_id is TRUE in the
+ * walk_info parameter block.
+ *
+ * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
+ * owner.
+ * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
+ * we only execute _PRWs that refer to the input gpe_device.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_gpe_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
+ struct acpi_namespace_node *gpe_device;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_namespace_node *target_gpe_device;
+ struct acpi_namespace_node *prw_node;
+ struct acpi_gpe_event_info *gpe_event_info;
+ union acpi_operand_object *pkg_desc;
+ union acpi_operand_object *obj_desc;
+ u32 gpe_number;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
+
+ /* Check for a _PRW method under this device */
+
+ status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
+ ACPI_NS_NO_UPSEARCH, &prw_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Check if requested owner_id matches this owner_id */
+
+ if ((walk_info->execute_by_owner_id) &&
+ (prw_node->owner_id != walk_info->owner_id)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Execute the _PRW */
+
+ status = acpi_ut_evaluate_object(prw_node, NULL,
+ ACPI_BTYPE_PACKAGE, &pkg_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* The returned _PRW package must have at least two elements */
+
+ if (pkg_desc->package.count < 2) {
+ goto cleanup;
+ }
+
+ /* Extract pointers from the input context */
+
+ gpe_device = walk_info->gpe_device;
+ gpe_block = walk_info->gpe_block;
+
+ /*
+ * The _PRW object must return a package, we are only interested
+ * in the first element
+ */
+ obj_desc = pkg_desc->package.elements[0];
+
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
+
+ /* Use FADT-defined GPE device (from definition of _PRW) */
+
+ target_gpe_device = NULL;
+ if (gpe_device) {
+ target_gpe_device = acpi_gbl_fadt_gpe_device;
+ }
+
+ /* Integer is the GPE number in the FADT described GPE blocks */
+
+ gpe_number = (u32)obj_desc->integer.value;
+ } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
+
+ /* Package contains a GPE reference and GPE number within a GPE block */
+
+ if ((obj_desc->package.count < 2) ||
+ ((obj_desc->package.elements[0])->common.type !=
+ ACPI_TYPE_LOCAL_REFERENCE) ||
+ ((obj_desc->package.elements[1])->common.type !=
+ ACPI_TYPE_INTEGER)) {
+ goto cleanup;
+ }
+
+ /* Get GPE block reference and decode */
+
+ target_gpe_device =
+ obj_desc->package.elements[0]->reference.node;
+ gpe_number = (u32)obj_desc->package.elements[1]->integer.value;
+ } else {
+ /* Unknown type, just ignore it */
+
+ goto cleanup;
+ }
+
+ /* Get the gpe_event_info for this GPE */
+
+ if (gpe_device) {
+ /*
+ * Is this GPE within this block?
+ *
+ * TRUE if and only if these conditions are true:
+ * 1) The GPE devices match.
+ * 2) The GPE index(number) is within the range of the Gpe Block
+ * associated with the GPE device.
+ */
+ if (gpe_device != target_gpe_device) {
+ goto cleanup;
+ }
+
+ gpe_event_info =
+ acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
+ } else {
+ /* gpe_device is NULL, just match the target_device and gpe_number */
+
+ gpe_event_info =
+ acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
+ }
+
+ if (gpe_event_info) {
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+
+ /* This GPE can wake the system */
+
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ walk_info->count++;
+ }
+ }
+
+ cleanup:
+ acpi_ut_remove_reference(pkg_desc);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
new file mode 100644
index 000000000000..19a0e513ea48
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -0,0 +1,337 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeutil - GPE utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeutil")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_walk_gpe_list
+ *
+ * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
+ * Context - Value passed to callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk the GPE lists.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
+{
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+
+ /* One callback per GPE block */
+
+ status =
+ gpe_walk_callback(gpe_xrupt_info, gpe_block,
+ context);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_END) { /* Callback abort */
+ status = AE_OK;
+ }
+ goto unlock_and_exit;
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_valid_gpe_event
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ *
+ * RETURN: TRUE if the gpe_event is valid
+ *
+ * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* No need for spin lock since we are not changing any list elements */
+
+ /* Walk the GPE interrupt levels */
+
+ gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_block) {
+ gpe_block = gpe_xrupt_block->gpe_block_list_head;
+
+ /* Walk the GPE blocks on this interrupt level */
+
+ while (gpe_block) {
+ if ((&gpe_block->event_info[0] <= gpe_event_info) &&
+ (&gpe_block->event_info[gpe_block->gpe_count] >
+ gpe_event_info)) {
+ return (TRUE);
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_block = gpe_xrupt_block->next;
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_get_gpe_xrupt_block
+ *
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ *
+ * RETURN: A GPE interrupt block
+ *
+ * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
+ * block per unique interrupt level used for GPEs. Should be
+ * called only when the GPE lists are semaphore locked and not
+ * subject to change.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
+{
+ struct acpi_gpe_xrupt_info *next_gpe_xrupt;
+ struct acpi_gpe_xrupt_info *gpe_xrupt;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
+
+ /* No need for lock since we are not changing any list elements here */
+
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt) {
+ if (next_gpe_xrupt->interrupt_number == interrupt_number) {
+ return_PTR(next_gpe_xrupt);
+ }
+
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ /* Not found, must allocate a new xrupt descriptor */
+
+ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
+ if (!gpe_xrupt) {
+ return_PTR(NULL);
+ }
+
+ gpe_xrupt->interrupt_number = interrupt_number;
+
+ /* Install new interrupt descriptor with spin lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (acpi_gbl_gpe_xrupt_list_head) {
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt->next) {
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ next_gpe_xrupt->next = gpe_xrupt;
+ gpe_xrupt->previous = next_gpe_xrupt;
+ } else {
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Install new interrupt handler if not SCI_INT */
+
+ if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
+ status = acpi_os_install_interrupt_handler(interrupt_number,
+ acpi_ev_gpe_xrupt_handler,
+ gpe_xrupt);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not install GPE interrupt handler at level 0x%X",
+ interrupt_number));
+ return_PTR(NULL);
+ }
+ }
+
+ return_PTR(gpe_xrupt);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_xrupt
+ *
+ * PARAMETERS: gpe_xrupt - A GPE interrupt info block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
+ * interrupt handler if not the SCI interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
+
+ /* We never want to remove the SCI interrupt handler */
+
+ if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
+ gpe_xrupt->gpe_block_list_head = NULL;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Disable this interrupt */
+
+ status =
+ acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
+ acpi_ev_gpe_xrupt_handler);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Unlink the interrupt block with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_xrupt->previous) {
+ gpe_xrupt->previous->next = gpe_xrupt->next;
+ } else {
+ /* No previous, update list head */
+
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
+ }
+
+ if (gpe_xrupt->next) {
+ gpe_xrupt->next->previous = gpe_xrupt->previous;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Free the block */
+
+ ACPI_FREE(gpe_xrupt);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_handlers
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
+ * Used only prior to termination.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Now look at the individual GPEs in this byte register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+ gpe_event_info = &gpe_block->event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH)
+ + j];
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+ ACPI_FREE(gpe_event_info->dispatch.handler);
+ gpe_event_info->dispatch.handler = NULL;
+ gpe_event_info->flags &=
+ ~ACPI_GPE_DISPATCH_MASK;
+ }
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 9a3cb7045a32..df0aea9a8cfd 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -590,7 +590,7 @@ void acpi_ev_terminate(void)
status = acpi_disable_event(i, 0);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
- "Could not disable fixed event %d",
+ "Could not disable fixed event %u",
(u32) i));
}
}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index b40757955f9b..cc825023012a 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -142,7 +142,7 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
+ ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
event));
/* Remove the handler */
@@ -203,7 +203,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO,
- "Could not write to fixed event enable register %X",
+ "Could not write to fixed event enable register 0x%X",
event));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
@@ -682,14 +682,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Parameter validation */
- if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
- status = AE_BAD_PARAMETER;
- goto exit;
+ if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
- goto exit;
+ return_ACPI_STATUS(status);
}
/* Ensure that we have a valid GPE number */
@@ -720,6 +719,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
+ /* Disable the GPE before installing the handler */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE (status)) {
+ goto unlock_and_exit;
+ }
+
/* Install the handler */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -733,12 +739,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- exit:
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status,
- "Installing notify handler failed"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 5ff32c78ea2d..d5a5efc043bf 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status acpi_enable(void)
{
- acpi_status status = AE_OK;
+ acpi_status status;
ACPI_FUNCTION_TRACE(acpi_enable);
@@ -84,21 +84,30 @@ acpi_status acpi_enable(void)
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in ACPI mode\n"));
- } else {
- /* Transition to ACPI mode */
+ return_ACPI_STATUS(AE_OK);
+ }
- status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not transition to ACPI mode"));
- return_ACPI_STATUS(status);
- }
+ /* Transition to ACPI mode */
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Transition to ACPI mode successful\n"));
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not transition to ACPI mode"));
+ return_ACPI_STATUS(status);
}
- return_ACPI_STATUS(status);
+ /* Sanity check that transition succeeded */
+
+ if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
+ ACPI_ERROR((AE_INFO,
+ "Hardware did not enter ACPI mode"));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Transition to ACPI mode successful\n"));
+
+ return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enable)
@@ -203,21 +212,26 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
*
* FUNCTION: acpi_set_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * action - Enable or disable
- * Called from ISR or not
+ * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
*
* RETURN: Status
*
- * DESCRIPTION: Enable or disable an ACPI event (general purpose)
+ * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
+ * the reference count mechanism used in the acpi_enable_gpe and
+ * acpi_disable_gpe interfaces -- and should be used with care.
+ *
+ * Note: Typically used to disable a runtime GPE for short period of time,
+ * then re-enable it, without disturbing the existing reference counts. This
+ * is useful, for example, in the Embedded Controller (EC) driver.
*
******************************************************************************/
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
- acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_status status;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_set_gpe);
@@ -243,7 +257,6 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid action\n"));
status = AE_BAD_PARAMETER;
break;
}
@@ -259,25 +272,31 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe)
*
* FUNCTION: acpi_enable_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * type - Purpose the GPE will be used for
+ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
+ * or both
*
* RETURN: Status
*
- * DESCRIPTION: Take a reference to a GPE and enable it if necessary
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ * hardware-enabled (for runtime GPEs), or the GPE register mask
+ * is updated (for wake GPEs).
*
******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
{
acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
- if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+ /* Parameter validation */
+
+ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -289,26 +308,43 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
goto unlock_and_exit;
}
- if (type & ACPI_GPE_TYPE_RUNTIME) {
- if (++gpe_event_info->runtime_count == 1) {
+ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
+ if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
+ status = AE_LIMIT; /* Too many references */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->runtime_count++;
+ if (gpe_event_info->runtime_count == 1) {
status = acpi_ev_enable_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count--;
+ goto unlock_and_exit;
+ }
}
}
- if (type & ACPI_GPE_TYPE_WAKE) {
+ if (gpe_type & ACPI_GPE_TYPE_WAKE) {
+ /* The GPE must have the ability to wake the system */
+
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
- status = AE_BAD_PARAMETER;
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) {
+ status = AE_LIMIT; /* Too many references */
goto unlock_and_exit;
}
/*
- * Wake-up GPEs are only enabled right prior to putting the
- * system into a sleep state.
+ * Update the enable mask on the first wakeup reference. Wake GPEs
+ * are only hardware-enabled just before sleeping.
*/
- if (++gpe_event_info->wakeup_count == 1)
- acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ gpe_event_info->wakeup_count++;
+ if (gpe_event_info->wakeup_count == 1) {
+ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ }
}
unlock_and_exit:
@@ -321,27 +357,34 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
*
* FUNCTION: acpi_disable_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * type - Purpose the GPE won't be used for any more
+ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
+ * or both
*
* RETURN: Status
*
- * DESCRIPTION: Release a reference to a GPE and disable it if necessary
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ * removed, only then is the GPE disabled (for runtime GPEs), or
+ * the GPE mask bit disabled (for wake GPEs)
*
******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
{
acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
- if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+ /* Parameter validation */
+
+ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
@@ -350,18 +393,39 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
goto unlock_and_exit;
}
- if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) {
- if (--gpe_event_info->runtime_count == 0)
+ /* Hardware-disable a runtime GPE on removal of the last reference */
+
+ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
+ if (!gpe_event_info->runtime_count) {
+ status = AE_LIMIT; /* There are no references to remove */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->runtime_count--;
+ if (!gpe_event_info->runtime_count) {
status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ gpe_event_info->runtime_count++;
+ goto unlock_and_exit;
+ }
+ }
}
- if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) {
- /*
- * Wake-up GPEs are not enabled after leaving system sleep
- * states, so we don't need to disable them here.
- */
- if (--gpe_event_info->wakeup_count == 0)
- acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ /*
+ * Update masks for wake GPE on removal of the last reference.
+ * No need to hardware-disable wake GPEs here, they are not currently
+ * enabled.
+ */
+ if (gpe_type & ACPI_GPE_TYPE_WAKE) {
+ if (!gpe_event_info->wakeup_count) {
+ status = AE_LIMIT; /* There are no references to remove */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->wakeup_count--;
+ if (!gpe_event_info->wakeup_count) {
+ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ }
}
unlock_and_exit:
@@ -465,30 +529,23 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
*
* FUNCTION: acpi_clear_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Flags - Called from an ISR or not
*
* RETURN: Status
*
* DESCRIPTION: Clear an ACPI event (general purpose)
*
******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_clear_gpe);
- /* Use semaphore lock if not executing at interrupt level */
-
- if (flags & ACPI_NOT_ISR) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -501,9 +558,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
status = acpi_hw_clear_gpe(gpe_event_info);
unlock_and_exit:
- if (flags & ACPI_NOT_ISR) {
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -569,9 +624,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
*
* FUNCTION: acpi_get_gpe_status
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Flags - Called from an ISR or not
* event_status - Where the current status of the event will
* be returned
*
@@ -582,21 +636,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
******************************************************************************/
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
- u32 gpe_number, u32 flags, acpi_event_status * event_status)
+ u32 gpe_number, acpi_event_status *event_status)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
- /* Use semaphore lock if not executing at interrupt level */
-
- if (flags & ACPI_NOT_ISR) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -614,9 +662,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
*event_status |= ACPI_EVENT_FLAG_HANDLE;
unlock_and_exit:
- if (flags & ACPI_NOT_ISR) {
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -673,20 +719,15 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
- /* Run the _PRW methods and enable the GPEs */
-
- status = acpi_ev_initialize_gpe_block(node, gpe_block);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
-
- /* Get the device_object attached to the node */
+ /* Install block in the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
- /* No object, create a new one */
-
+ /*
+ * No object, create a new one (Device nodes do not always have
+ * an attached object)
+ */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
if (!obj_desc) {
status = AE_NO_MEMORY;
@@ -705,10 +746,14 @@ acpi_install_gpe_block(acpi_handle gpe_device,
}
}
- /* Install the GPE block in the device_object */
+ /* Now install the GPE block in the device_object */
obj_desc->device.gpe_block = gpe_block;
+ /* Run the _PRW methods and enable the runtime GPEs in the new block */
+
+ status = acpi_ev_initialize_gpe_block(node, gpe_block);
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
@@ -839,8 +884,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Increment Index by the number of GPEs in this block */
- info->next_block_base_index +=
- (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH);
+ info->next_block_base_index += gpe_block->gpe_count;
if (info->index < info->next_block_base_index) {
/*
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 7e8b3bedc376..008621c5ad85 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -82,8 +82,9 @@ acpi_ex_add_table(u32 table_index,
struct acpi_namespace_node *parent_node,
union acpi_operand_object **ddb_handle)
{
- acpi_status status;
union acpi_operand_object *obj_desc;
+ acpi_status status;
+ acpi_owner_id owner_id;
ACPI_FUNCTION_TRACE(ex_add_table);
@@ -119,7 +120,14 @@ acpi_ex_add_table(u32 table_index,
acpi_ns_exec_module_code_list();
acpi_ex_enter_interpreter();
- return_ACPI_STATUS(status);
+ /* Update GPEs for any new _PRW or _Lxx/_Exx methods. Ignore errors */
+
+ status = acpi_tb_get_owner_id(table_index, &owner_id);
+ if (ACPI_SUCCESS(status)) {
+ acpi_ev_update_gpes(owner_id);
+ }
+
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -248,10 +256,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_SUCCESS(status)) {
- ACPI_INFO((AE_INFO,
- "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
- table->signature, table->oem_id,
- table->oem_table_id));
+ ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ acpi_tb_print_table_header(0, table);
}
/* Invoke table handler if present */
@@ -525,6 +531,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status);
}
+ ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ acpi_tb_print_table_header(0, table_desc.pointer);
+
/* Remove the reference by added by acpi_ex_store above */
acpi_ut_remove_reference(ddb_handle);
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bda7aed0404b..b73bc50c5b76 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -650,7 +650,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Bad destination type during conversion: %X",
+ "Bad destination type during conversion: 0x%X",
destination_type));
status = AE_AML_INTERNAL;
break;
@@ -665,7 +665,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
+ "Unknown Target type ID 0x%X AmlOpcode 0x%X DestType %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 0aa57d938698..3c61b48c73f5 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -306,12 +306,12 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
- ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
+ ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
acpi_ut_get_region_name(region_space), region_space));
/* Create the region descriptor */
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
new file mode 100644
index 000000000000..be8c98b480d7
--- /dev/null
+++ b/drivers/acpi/acpica/exdebug.c
@@ -0,0 +1,261 @@
+/******************************************************************************
+ *
+ * Module Name: exdebug - Support for stores to the AML Debug Object
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exdebug")
+
+#ifndef ACPI_NO_ERROR_MESSAGES
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_debug_object
+ *
+ * PARAMETERS: source_desc - Object to be output to "Debug Object"
+ * Level - Indentation level (used for packages)
+ * Index - Current package element, zero if not pkg
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Handles stores to the AML Debug Object. For example:
+ * Store(INT1, Debug)
+ *
+ * This function is not compiled if ACPI_NO_ERROR_MESSAGES is set.
+ *
+ * This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or
+ * if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal
+ * operational case, stores to the debug object are ignored but can be easily
+ * enabled if necessary.
+ *
+ ******************************************************************************/
+void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
+
+ /* Output must be enabled via the debug_object global or the dbg_level */
+
+ if (!acpi_gbl_enable_aml_debug_object &&
+ !(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) {
+ return_VOID;
+ }
+
+ /*
+ * Print line header as long as we are not in the middle of an
+ * object display
+ */
+ if (!((level > 0) && index == 0)) {
+ acpi_os_printf("[ACPI Debug] %*s", level, " ");
+ }
+
+ /* Display the index for package output only */
+
+ if (index > 0) {
+ acpi_os_printf("(%.2u) ", index - 1);
+ }
+
+ if (!source_desc) {
+ acpi_os_printf("[Null Object]\n");
+ return_VOID;
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
+ acpi_os_printf("%s ",
+ acpi_ut_get_object_type_name(source_desc));
+
+ if (!acpi_ut_valid_internal_object(source_desc)) {
+ acpi_os_printf("%p, Invalid Internal Object!\n",
+ source_desc);
+ return_VOID;
+ }
+ } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf("%s: %p\n",
+ acpi_ut_get_type_name(((struct
+ acpi_namespace_node *)
+ source_desc)->type),
+ source_desc);
+ return_VOID;
+ } else {
+ return_VOID;
+ }
+
+ /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
+
+ switch (source_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Output correct integer width */
+
+ if (acpi_gbl_integer_byte_width == 4) {
+ acpi_os_printf("0x%8.8X\n",
+ (u32)source_desc->integer.value);
+ } else {
+ acpi_os_printf("0x%8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(source_desc->integer.
+ value));
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length);
+ acpi_ut_dump_buffer2(source_desc->buffer.pointer,
+ (source_desc->buffer.length < 256) ?
+ source_desc->buffer.length : 256,
+ DB_BYTE_DISPLAY);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf("[0x%.2X] \"%s\"\n",
+ source_desc->string.length,
+ source_desc->string.pointer);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ acpi_os_printf("[Contains 0x%.2X Elements]\n",
+ source_desc->package.count);
+
+ /* Output the entire contents of the package */
+
+ for (i = 0; i < source_desc->package.count; i++) {
+ acpi_ex_do_debug_object(source_desc->package.
+ elements[i], level + 4, i + 1);
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ acpi_os_printf("[%s] ",
+ acpi_ut_get_reference_name(source_desc));
+
+ /* Decode the reference */
+
+ switch (source_desc->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ acpi_os_printf("0x%X\n", source_desc->reference.value);
+ break;
+
+ case ACPI_REFCLASS_TABLE:
+
+ /* Case for ddb_handle */
+
+ acpi_os_printf("Table Index 0x%X\n",
+ source_desc->reference.value);
+ return;
+
+ default:
+ break;
+ }
+
+ acpi_os_printf(" ");
+
+ /* Check for valid node first, then valid object */
+
+ if (source_desc->reference.node) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.node) !=
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf
+ (" %p - Not a valid namespace node\n",
+ source_desc->reference.node);
+ } else {
+ acpi_os_printf("Node %p [%4.4s] ",
+ source_desc->reference.node,
+ (source_desc->reference.node)->
+ name.ascii);
+
+ switch ((source_desc->reference.node)->type) {
+
+ /* These types have no attached object */
+
+ case ACPI_TYPE_DEVICE:
+ acpi_os_printf("Device\n");
+ break;
+
+ case ACPI_TYPE_THERMAL:
+ acpi_os_printf("Thermal Zone\n");
+ break;
+
+ default:
+ acpi_ex_do_debug_object((source_desc->
+ reference.
+ node)->object,
+ level + 4, 0);
+ break;
+ }
+ }
+ } else if (source_desc->reference.object) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.object) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_ex_do_debug_object(((struct
+ acpi_namespace_node *)
+ source_desc->reference.
+ object)->object,
+ level + 4, 0);
+ } else {
+ acpi_ex_do_debug_object(source_desc->reference.
+ object, level + 4, 0);
+ }
+ }
+ break;
+
+ default:
+
+ acpi_os_printf("%p\n", source_desc);
+ break;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
+ return_VOID;
+}
+#endif
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 6c79fecbee42..f17d2ff0031b 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -281,7 +281,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer of length %X, found length %X",
+ "SMBus or IPMI write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f68a216168be..a6dc26f0b3be 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -94,7 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
/* We must have a valid region */
if (rgn_desc->common.type != ACPI_TYPE_REGION) {
- ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
+ ACPI_ERROR((AE_INFO, "Needed Region, found type 0x%X (%s)",
rgn_desc->common.type,
acpi_ut_get_object_type_name(rgn_desc)));
@@ -175,7 +175,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* byte, and a field with Dword access specified.
*/
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
+ "Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)",
acpi_ut_get_node_name(obj_desc->
common_field.node),
obj_desc->common_field.access_byte_width,
@@ -189,7 +189,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* exceeds region length, indicate an error
*/
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
+ "Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)",
acpi_ut_get_node_name(obj_desc->common_field.node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
@@ -281,13 +281,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_ERROR((AE_INFO,
- "Region %s(%X) not implemented",
+ "Region %s(0x%X) not implemented",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) {
ACPI_ERROR((AE_INFO,
- "Region %s(%X) has no handler",
+ "Region %s(0x%X) has no handler",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
@@ -525,7 +525,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
+ ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %u",
obj_desc->common.type));
status = AE_AML_INTERNAL;
break;
@@ -630,7 +630,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_ERROR((AE_INFO,
- "Unknown UpdateRule value: %X",
+ "Unknown UpdateRule value: 0x%X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
@@ -689,7 +689,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_ERROR((AE_INFO,
- "Field size %X (bits) is too large for buffer (%X)",
+ "Field size %u (bits) is too large for buffer (%u)",
obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c5bb1eeed2df..95db4be0877b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -99,7 +99,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -115,7 +115,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
+ ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X",
ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
return_ACPI_STATUS(AE_TYPE);
}
@@ -276,7 +276,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
}
@@ -378,7 +378,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Invalid object type, should not happen here */
- ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 7116bc86494d..f73be97043c0 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -85,10 +85,10 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
/*
- * Migrate the previous sync level associated with this mutex to the
- * previous mutex on the list so that it may be preserved. This handles
- * the case where several mutexes have been acquired at the same level,
- * but are not released in opposite order.
+ * Migrate the previous sync level associated with this mutex to
+ * the previous mutex on the list so that it may be preserved.
+ * This handles the case where several mutexes have been acquired
+ * at the same level, but are not released in opposite order.
*/
(obj_desc->mutex.prev)->mutex.original_sync_level =
obj_desc->mutex.original_sync_level;
@@ -101,8 +101,8 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
*
* FUNCTION: acpi_ex_link_mutex
*
- * PARAMETERS: obj_desc - The mutex to be linked
- * Thread - Current executing thread object
+ * PARAMETERS: obj_desc - The mutex to be linked
+ * Thread - Current executing thread object
*
* RETURN: None
*
@@ -138,9 +138,9 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_acquire_mutex_object
*
- * PARAMETERS: time_desc - Timeout in milliseconds
+ * PARAMETERS: Timeout - Timeout in milliseconds
* obj_desc - Mutex object
- * Thread - Current thread state
+ * thread_id - Current thread state
*
* RETURN: Status
*
@@ -234,7 +234,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Must have a valid thread ID */
+ /* Must have a valid thread state struct */
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
@@ -249,7 +249,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
+ "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(obj_desc->mutex.node),
walk_state->thread->current_sync_level));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
@@ -359,6 +359,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
{
acpi_status status = AE_OK;
u8 previous_sync_level;
+ struct acpi_thread_state *owner_thread;
ACPI_FUNCTION_TRACE(ex_release_mutex);
@@ -366,9 +367,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ owner_thread = obj_desc->mutex.owner_thread;
+
/* The mutex must have been previously acquired in order to release it */
- if (!obj_desc->mutex.owner_thread) {
+ if (!owner_thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -387,16 +390,13 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
*/
- if ((obj_desc->mutex.owner_thread->thread_id !=
- walk_state->thread->thread_id)
- && (obj_desc != acpi_gbl_global_lock_mutex)) {
+ if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
+ (obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
"Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
ACPI_CAST_PTR(void, walk_state->thread->thread_id),
acpi_ut_get_node_name(obj_desc->mutex.node),
- ACPI_CAST_PTR(void,
- obj_desc->mutex.owner_thread->
- thread_id)));
+ ACPI_CAST_PTR(void, owner_thread->thread_id)));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
@@ -407,10 +407,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* different level can only mean that the mutex ordering rule is being
* violated. This behavior is clarified in ACPI 4.0 specification.
*/
- if (obj_desc->mutex.sync_level !=
- walk_state->thread->current_sync_level) {
+ if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
+ "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.sync_level,
walk_state->thread->current_sync_level));
@@ -423,7 +422,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* acquired, but are not released in reverse order.
*/
previous_sync_level =
- walk_state->thread->acquired_mutex_list->mutex.original_sync_level;
+ owner_thread->acquired_mutex_list->mutex.original_sync_level;
status = acpi_ex_release_mutex_object(obj_desc);
if (ACPI_FAILURE(status)) {
@@ -434,8 +433,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* Restore the previous sync_level */
- walk_state->thread->current_sync_level = previous_sync_level;
+ owner_thread->current_sync_level = previous_sync_level;
}
+
return_ACPI_STATUS(status);
}
@@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_release_all_mutexes
*
- * PARAMETERS: Thread - Current executing thread object
+ * PARAMETERS: Thread - Current executing thread object
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 679f308c5a89..d11e539ef763 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -102,7 +102,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
name_string = ACPI_ALLOCATE(size_needed);
if (!name_string) {
ACPI_ERROR((AE_INFO,
- "Could not allocate size %d", size_needed));
+ "Could not allocate size %u", size_needed));
return_PTR(NULL);
}
@@ -216,7 +216,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
*/
status = AE_AML_BAD_NAME;
ACPI_ERROR((AE_INFO,
- "Bad character %02x in name, at %p",
+ "Bad character 0x%02x in name, at %p",
*aml_address, aml_address));
}
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 99adbab5acbf..84e4d185aa25 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -110,7 +110,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -173,7 +173,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
case AML_SLEEP_OP: /* Sleep (msec_time) */
- status = acpi_ex_system_do_suspend(operand[0]->integer.value);
+ status = acpi_ex_system_do_sleep(operand[0]->integer.value);
break;
case AML_STALL_OP: /* Stall (usec_time) */
@@ -189,7 +189,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -229,7 +229,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -399,7 +399,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
if (digit > 0) {
ACPI_ERROR((AE_INFO,
- "Integer too large to convert to BCD: %8.8X%8.8X",
+ "Integer too large to convert to BCD: 0x%8.8X%8.8X",
ACPI_FORMAT_UINT64(operand[0]->
integer.value)));
status = AE_AML_NUMERIC_OVERFLOW;
@@ -540,7 +540,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -979,7 +979,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown Index TargetType %X in reference object %p",
+ "Unknown Index TargetType 0x%X in reference object %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
@@ -1007,7 +1007,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown class in reference(%p) - %2.2X",
+ "Unknown class in reference(%p) - 0x%2.2X",
operand[0],
operand[0]->reference.class));
@@ -1019,7 +1019,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 22841bbbe63c..10e104cf0fb9 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -119,33 +119,6 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
status = AE_AML_OPERAND_TYPE;
break;
}
-#ifdef ACPI_GPE_NOTIFY_CHECK
- /*
- * GPE method wake/notify check. Here, we want to ensure that we
- * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
- * GPE method during system runtime. If we do, the GPE is marked
- * as "wake-only" and disabled.
- *
- * 1) Is the Notify() value == device_wake?
- * 2) Is this a GPE deferred method? (An _Lxx or _Exx method)
- * 3) Did the original GPE happen at system runtime?
- * (versus during wake)
- *
- * If all three cases are true, this is a wake-only GPE that should
- * be disabled at runtime.
- */
- if (value == 2) { /* device_wake */
- status =
- acpi_ev_check_for_wake_only_gpe(walk_state->
- gpe_event_info);
- if (ACPI_FAILURE(status)) {
-
- /* AE_WAKE_ONLY_GPE only error, means ignore this notify */
-
- return_ACPI_STATUS(AE_OK)
- }
- }
-#endif
/*
* Dispatch the notify to the appropriate handler
@@ -159,7 +132,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
}
@@ -224,7 +197,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -441,7 +414,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Index (%X%8.8X) is beyond end of object",
+ "Index (0x%8.8X%8.8X) is beyond end of object",
ACPI_FORMAT_UINT64(index)));
goto cleanup;
}
@@ -464,7 +437,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -572,7 +545,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 8bb1012ef44e..7a08d23befcd 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -119,7 +119,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -244,7 +244,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f256b6a25f2e..4b50730cf9a0 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -245,7 +245,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
index = operand[5]->integer.value;
if (index >= operand[0]->package.count) {
ACPI_ERROR((AE_INFO,
- "Index (%X%8.8X) beyond package end (%X)",
+ "Index (0x%8.8X%8.8X) beyond package end (0x%X)",
ACPI_FORMAT_UINT64(index),
operand[0]->package.count));
status = AE_AML_PACKAGE_LIMIT;
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 2fbfe51fb141..25059dace0ad 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -275,7 +275,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
default:
/* Invalid field access type */
- ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
+ ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
return_UINT32(0);
}
@@ -430,7 +430,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
type = acpi_ns_get_type(info->region_node);
if (type != ACPI_TYPE_REGION) {
ACPI_ERROR((AE_INFO,
- "Needed Region, found type %X (%s)",
+ "Needed Region, found type 0x%X (%s)",
type, acpi_ut_get_type_name(type)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 486b2e5661b6..531000fc77d2 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -105,7 +105,7 @@ acpi_ex_system_memory_space_handler(u32 function,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
+ ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u",
bit_width));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
@@ -173,7 +173,7 @@ acpi_ex_system_memory_space_handler(u32 function,
mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
if (!mem_info->mapped_logical_address) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X%8.8X, size %X",
+ "Could not map memory at 0x%8.8X%8.8X, size %u",
ACPI_FORMAT_NATIVE_UINT(address),
(u32) map_length));
mem_info->mapped_length = 0;
@@ -491,8 +491,10 @@ acpi_ex_data_table_space_handler(u32 function,
{
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
- /* Perform the memory read or write */
-
+ /*
+ * Perform the memory read or write. The bit_width was already
+ * validated.
+ */
switch (function) {
case ACPI_READ:
@@ -502,9 +504,14 @@ acpi_ex_data_table_space_handler(u32 function,
break;
case ACPI_WRITE:
+
+ ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address),
+ ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
+ break;
+
default:
- return_ACPI_STATUS(AE_SUPPORT);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index fdc1b27999ef..1fa4289a687e 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -252,7 +252,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* No named references are allowed here */
ACPI_ERROR((AE_INFO,
- "Unsupported Reference type %X",
+ "Unsupported Reference type 0x%X",
source_desc->reference.class));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -264,7 +264,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* Default case is for unknown types */
ACPI_ERROR((AE_INFO,
- "Node %p - Unknown object type %X",
+ "Node %p - Unknown object type 0x%X",
node, entry_type));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index fdd6a7079b97..7ca35ea8acea 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -231,7 +231,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
/* Invalid reference object */
ACPI_ERROR((AE_INFO,
- "Unknown TargetType %X in Index/Reference object %p",
+ "Unknown TargetType 0x%X in Index/Reference object %p",
stack_desc->reference.target_type,
stack_desc));
status = AE_AML_INTERNAL;
@@ -273,8 +273,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference type %X in %p", ref_type,
- stack_desc));
+ "Unknown Reference type 0x%X in %p",
+ ref_type, stack_desc));
status = AE_AML_INTERNAL;
break;
}
@@ -403,7 +403,8 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
ACPI_DESC_TYPE_NAMED) {
- ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
+ ACPI_ERROR((AE_INFO,
+ "Not a namespace node %p [%s]",
node,
acpi_ut_get_descriptor_name(node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
@@ -507,7 +508,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference Class %2.2X",
+ "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index c5ecd615f145..8c97cfd6a0fd 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -153,7 +153,7 @@ acpi_ex_resolve_operands(u16 opcode,
arg_types = op_info->runtime_args;
if (arg_types == ARGI_INVALID_OPCODE) {
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode));
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", opcode));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -218,7 +218,7 @@ acpi_ex_resolve_operands(u16 opcode,
if (!acpi_ut_valid_object_type(object_type)) {
ACPI_ERROR((AE_INFO,
- "Bad operand object type [%X]",
+ "Bad operand object type [0x%X]",
object_type));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -253,7 +253,7 @@ acpi_ex_resolve_operands(u16 opcode,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference Class %2.2X in %p",
+ "Unknown Reference Class 0x%2.2X in %p",
obj_desc->reference.class,
obj_desc));
@@ -665,7 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
/* Unknown type */
ACPI_ERROR((AE_INFO,
- "Internal - Unknown ARGI (required operand) type %X",
+ "Internal - Unknown ARGI (required operand) type 0x%X",
this_arg_type));
return_ACPI_STATUS(AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 702b9ecfd44b..1624436ba4c5 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exstore - AML Interpreter object store support
@@ -53,10 +52,6 @@
ACPI_MODULE_NAME("exstore")
/* Local prototypes */
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
- u32 level, u32 index);
-
static acpi_status
acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
union acpi_operand_object *dest_desc,
@@ -64,215 +59,6 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
/*******************************************************************************
*
- * FUNCTION: acpi_ex_do_debug_object
- *
- * PARAMETERS: source_desc - Value to be stored
- * Level - Indentation level (used for packages)
- * Index - Current package element, zero if not pkg
- *
- * RETURN: None
- *
- * DESCRIPTION: Handles stores to the Debug Object.
- *
- ******************************************************************************/
-
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
- u32 level, u32 index)
-{
- u32 i;
-
- ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
-
- /* Print line header as long as we are not in the middle of an object display */
-
- if (!((level > 0) && index == 0)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
- level, " "));
- }
-
- /* Display index for package output only */
-
- if (index > 0) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "(%.2u) ", index - 1));
- }
-
- if (!source_desc) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
- return_VOID;
- }
-
- if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
- acpi_ut_get_object_type_name
- (source_desc)));
-
- if (!acpi_ut_valid_internal_object(source_desc)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "%p, Invalid Internal Object!\n",
- source_desc));
- return_VOID;
- }
- } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
- ACPI_DESC_TYPE_NAMED) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
- acpi_ut_get_type_name(((struct
- acpi_namespace_node
- *)source_desc)->
- type),
- source_desc));
- return_VOID;
- } else {
- return_VOID;
- }
-
- /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
-
- switch (source_desc->common.type) {
- case ACPI_TYPE_INTEGER:
-
- /* Output correct integer width */
-
- if (acpi_gbl_integer_byte_width == 4) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
- (u32) source_desc->integer.
- value));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "0x%8.8X%8.8X\n",
- ACPI_FORMAT_UINT64(source_desc->
- integer.
- value)));
- }
- break;
-
- case ACPI_TYPE_BUFFER:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
- (u32) source_desc->buffer.length));
- ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
- (source_desc->buffer.length <
- 256) ? source_desc->buffer.length : 256);
- break;
-
- case ACPI_TYPE_STRING:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
- source_desc->string.length,
- source_desc->string.pointer));
- break;
-
- case ACPI_TYPE_PACKAGE:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "[Contains 0x%.2X Elements]\n",
- source_desc->package.count));
-
- /* Output the entire contents of the package */
-
- for (i = 0; i < source_desc->package.count; i++) {
- acpi_ex_do_debug_object(source_desc->package.
- elements[i], level + 4, i + 1);
- }
- break;
-
- case ACPI_TYPE_LOCAL_REFERENCE:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
- acpi_ut_get_reference_name(source_desc)));
-
- /* Decode the reference */
-
- switch (source_desc->reference.class) {
- case ACPI_REFCLASS_INDEX:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
- source_desc->reference.value));
- break;
-
- case ACPI_REFCLASS_TABLE:
-
- /* Case for ddb_handle */
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "Table Index 0x%X\n",
- source_desc->reference.value));
- return;
-
- default:
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " "));
-
- /* Check for valid node first, then valid object */
-
- if (source_desc->reference.node) {
- if (ACPI_GET_DESCRIPTOR_TYPE
- (source_desc->reference.node) !=
- ACPI_DESC_TYPE_NAMED) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- " %p - Not a valid namespace node\n",
- source_desc->reference.
- node));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "Node %p [%4.4s] ",
- source_desc->reference.
- node,
- (source_desc->reference.
- node)->name.ascii));
-
- switch ((source_desc->reference.node)->type) {
-
- /* These types have no attached object */
-
- case ACPI_TYPE_DEVICE:
- acpi_os_printf("Device\n");
- break;
-
- case ACPI_TYPE_THERMAL:
- acpi_os_printf("Thermal Zone\n");
- break;
-
- default:
- acpi_ex_do_debug_object((source_desc->
- reference.
- node)->object,
- level + 4, 0);
- break;
- }
- }
- } else if (source_desc->reference.object) {
- if (ACPI_GET_DESCRIPTOR_TYPE
- (source_desc->reference.object) ==
- ACPI_DESC_TYPE_NAMED) {
- acpi_ex_do_debug_object(((struct
- acpi_namespace_node *)
- source_desc->reference.
- object)->object,
- level + 4, 0);
- } else {
- acpi_ex_do_debug_object(source_desc->reference.
- object, level + 4, 0);
- }
- }
- break;
-
- default:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
- source_desc));
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_store
*
* PARAMETERS: *source_desc - Value to be stored
@@ -402,12 +188,12 @@ acpi_ex_store(union acpi_operand_object *source_desc,
source_desc,
acpi_ut_get_object_type_name(source_desc)));
- acpi_ex_do_debug_object(source_desc, 0, 0);
+ ACPI_DEBUG_OBJECT(source_desc, 0, 0);
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
ref_desc->reference.class));
ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index e11b6cb42a57..6d32e09327f1 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -170,7 +170,7 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
* (ACPI specifies 100 usec as max, but this gives some slack in
* order to support existing BIOSs)
*/
- ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)",
+ ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)",
how_long));
status = AE_AML_OPERAND_VALUE;
} else {
@@ -182,18 +182,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
/*******************************************************************************
*
- * FUNCTION: acpi_ex_system_do_suspend
+ * FUNCTION: acpi_ex_system_do_sleep
*
- * PARAMETERS: how_long - The amount of time to suspend,
+ * PARAMETERS: how_long - The amount of time to sleep,
* in milliseconds
*
* RETURN: None
*
- * DESCRIPTION: Suspend running thread for specified amount of time.
+ * DESCRIPTION: Sleep the running thread for specified amount of time.
*
******************************************************************************/
-acpi_status acpi_ex_system_do_suspend(u64 how_long)
+acpi_status acpi_ex_system_do_sleep(u64 how_long)
{
ACPI_FUNCTION_ENTRY();
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 679a112a7d26..b44274a0b62c 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
{
acpi_status status;
- u32 retry;
ACPI_FUNCTION_TRACE(hw_set_mode);
@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
return_ACPI_STATUS(status);
}
- /*
- * Some hardware takes a LONG time to switch modes. Give them 3 sec to
- * do so, but allow faster systems to proceed more quickly.
- */
- retry = 3000;
- while (retry) {
- if (acpi_hw_get_mode() == mode) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Mode %X successfully enabled\n",
- mode));
- return_ACPI_STATUS(AE_OK);
- }
- acpi_os_stall(1000);
- retry--;
- }
-
- ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
- return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index ec7fc227b33f..5d1273b660ae 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -299,7 +299,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
ACPI_FUNCTION_ENTRY();
if (register_id > ACPI_BITREG_MAX) {
- ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
+ ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: 0x%X",
register_id));
return (NULL);
}
@@ -413,7 +413,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
status = AE_BAD_PARAMETER;
break;
}
@@ -549,7 +549,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
status = AE_BAD_PARAMETER;
break;
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 5e6d4dbb8024..36eb803dd9d0 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -245,7 +245,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
- ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
+ ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e26c17d4b716..c10d587c1641 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -150,7 +150,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if (last_address > ACPI_UINT16_MAX) {
ACPI_ERROR((AE_INFO,
- "Illegal I/O port address/length above 64K: 0x%p/%X",
+ "Illegal I/O port address/length above 64K: %p/0x%X",
ACPI_CAST_PTR(void, address), byte_width));
return_ACPI_STATUS(AE_LIMIT);
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index aa2b80132d0a..3a2814676ac3 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -222,7 +222,7 @@ acpi_status acpi_ns_root_initialize(void)
default:
ACPI_ERROR((AE_INFO,
- "Unsupported initial type value %X",
+ "Unsupported initial type value 0x%X",
init_val->type));
acpi_ut_remove_reference(obj_desc);
obj_desc = NULL;
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 0689d36638d9..2110cc2360f0 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -205,8 +205,8 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
/* Check the node type and name */
if (type > ACPI_TYPE_LOCAL_MAX) {
- ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X",
- type));
+ ACPI_WARNING((AE_INFO,
+ "Invalid ACPI Object Type 0x%08X", type));
}
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 959372451635..7dea0031605c 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -107,7 +107,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
if (index != 0) {
ACPI_ERROR((AE_INFO,
- "Could not construct external pathname; index=%X, size=%X, Path=%s",
+ "Could not construct external pathname; index=%u, size=%u, Path=%s",
(u32) index, (u32) size, &name_buffer[size]));
return (AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 08f8b3f5ccaa..a8e42b5e9463 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -311,7 +311,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (!node || !target_name || !return_node) {
ACPI_ERROR((AE_INFO,
- "Null parameter: Node %p Name %X ReturnNode %p",
+ "Null parameter: Node %p Name 0x%X ReturnNode %p",
node, target_name, return_node));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 24d05a87a2a3..bab559712da1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -276,7 +276,7 @@ u32 acpi_ns_local(acpi_object_type type)
/* Type code out of range */
- ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
@@ -764,7 +764,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
/* type code out of range */
- ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 00493e108a01..7df1a4c95274 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -460,7 +460,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType 0x%X", arg_type));
return_VOID;
}
@@ -742,7 +742,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType: 0x%X", arg_type));
status = AE_AML_OPERAND_TYPE;
break;
}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 59aabaeab1d3..2f2e7760938c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -136,7 +136,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
/* The opcode is unrecognized. Just skip unknown opcodes */
ACPI_ERROR((AE_INFO,
- "Found unknown opcode %X at AML address %p offset %X, ignoring",
+ "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring",
walk_state->opcode, walk_state->parser_state.aml,
walk_state->aml_offset));
@@ -1021,7 +1021,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_EXCEPTION((AE_INFO, status,
"Invoked method did not return a value"));
-
}
ACPI_EXCEPTION((AE_INFO, status,
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6064dd4e94c2..c42f067cff9d 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -46,6 +46,7 @@
#include "acparser.h"
#include "acdispat.h"
#include "acinterp.h"
+#include "actables.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
@@ -220,6 +221,10 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
ACPI_FUNCTION_TRACE(ps_execute_method);
+ /* Quick validation of DSDT header */
+
+ acpi_tb_check_dsdt_header();
+
/* Validate the Info and method Node */
if (!info || !info->resolved_node) {
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index f2ee3b548609..c80a2eea3a01 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -212,7 +212,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if ((*top_object_list)->common.type != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X]) Need sub-package, found %s",
+ "(PRT[%u]) Need sub-package, found %s",
index,
acpi_ut_get_object_type_name
(*top_object_list)));
@@ -223,7 +223,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if ((*top_object_list)->package.count != 4) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X]) Need package of length 4, found length %d",
+ "(PRT[%u]) Need package of length 4, found length %u",
index, (*top_object_list)->package.count));
return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
}
@@ -240,7 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[0];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Address) Need Integer, found %s",
+ "(PRT[%u].Address) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -253,7 +253,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[1];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Pin) Need Integer, found %s",
+ "(PRT[%u].Pin) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -289,7 +289,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if (obj_desc->reference.class !=
ACPI_REFCLASS_NAME) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need name, found Reference Class %X",
+ "(PRT[%u].Source) Need name, found Reference Class 0x%X",
index,
obj_desc->reference.class));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -340,7 +340,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
default:
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need Ref/String/Integer, found %s",
+ "(PRT[%u].Source) Need Ref/String/Integer, found %s",
index,
acpi_ut_get_object_type_name
(obj_desc)));
@@ -358,7 +358,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[3];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].SourceIndex) Need Integer, found %s",
+ "(PRT[%u].SourceIndex) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index fd057c72d252..7335f22aac20 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -94,7 +94,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
[resource_index]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert AML resource (Type %X)",
+ "Could not convert AML resource (Type 0x%X)",
*aml));
return_ACPI_STATUS(status);
}
@@ -147,7 +147,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
ACPI_ERROR((AE_INFO,
- "Invalid descriptor type (%X) in resource list",
+ "Invalid descriptor type (0x%X) in resource list",
resource->type));
return_ACPI_STATUS(AE_BAD_DATA);
}
@@ -161,7 +161,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
[resource->type]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert resource (type %X) to AML",
+ "Could not convert resource (type 0x%X) to AML",
resource->type));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 07de352fa443..f8cd9e87d987 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -88,7 +88,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
/* Each internal resource struct is expected to be 32-bit aligned */
ACPI_WARNING((AE_INFO,
- "Misaligned resource pointer (get): %p Type %2.2X Len %X",
+ "Misaligned resource pointer (get): %p Type 0x%2.2X Length %u",
resource, resource->type, resource->length));
}
@@ -541,7 +541,7 @@ if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
* "IRQ Format"), so 0x00 and 0x09 are illegal.
*/
ACPI_ERROR((AE_INFO,
- "Invalid interrupt polarity/trigger in resource list, %X",
+ "Invalid interrupt polarity/trigger in resource list, 0x%X",
aml->irq.flags));
return_ACPI_STATUS(AE_BAD_DATA);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index f43fbe0fc3fc..1728cb9bf600 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -283,7 +283,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
"FADT (revision %u) is longer than ACPI 2.0 version, "
- "truncating length 0x%X to 0x%X",
+ "truncating length %u to %u",
table->revision, length,
(u32)sizeof(struct acpi_table_fadt)));
}
@@ -422,7 +422,7 @@ static void acpi_tb_convert_fadt(void)
if (address64->address && address32 &&
(address64->address != (u64) address32)) {
ACPI_ERROR((AE_INFO,
- "32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 32",
+ "32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32",
fadt_info_table[i].name, address32,
ACPI_FORMAT_UINT64(address64->address)));
}
@@ -481,7 +481,7 @@ static void acpi_tb_validate_fadt(void)
(acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
ACPI_WARNING((AE_INFO,
"32/64X FACS address mismatch in FADT - "
- "%8.8X/%8.8X%8.8X, using 32",
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
acpi_gbl_FADT.facs,
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
@@ -492,7 +492,7 @@ static void acpi_tb_validate_fadt(void)
(acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
ACPI_WARNING((AE_INFO,
"32/64X DSDT address mismatch in FADT - "
- "%8.8X/%8.8X%8.8X, using 32",
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
acpi_gbl_FADT.dsdt,
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
@@ -521,7 +521,7 @@ static void acpi_tb_validate_fadt(void)
if (address64->address &&
(address64->bit_width != ACPI_MUL_8(length))) {
ACPI_WARNING((AE_INFO,
- "32/64X length mismatch in %s: %d/%d",
+ "32/64X length mismatch in %s: %u/%u",
name, ACPI_MUL_8(length),
address64->bit_width));
}
@@ -534,7 +534,7 @@ static void acpi_tb_validate_fadt(void)
if (!address64->address || !length) {
ACPI_ERROR((AE_INFO,
"Required field %s has zero address and/or length:"
- " %8.8X%8.8X/%X",
+ " 0x%8.8X%8.8X/0x%X",
name,
ACPI_FORMAT_UINT64(address64->
address),
@@ -550,7 +550,7 @@ static void acpi_tb_validate_fadt(void)
(!address64->address && length)) {
ACPI_WARNING((AE_INFO,
"Optional field %s has zero address or length: "
- "%8.8X%8.8X/%X",
+ "0x%8.8X%8.8X/0x%X",
name,
ACPI_FORMAT_UINT64(address64->
address),
@@ -600,7 +600,7 @@ static void acpi_tb_setup_fadt_registers(void)
(fadt_info_table[i].default_length !=
target64->bit_width)) {
ACPI_WARNING((AE_INFO,
- "Invalid length for %s: %d, using default %d",
+ "Invalid length for %s: %u, using default %u",
fadt_info_table[i].name,
target64->bit_width,
fadt_info_table[i].
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index e252180ce61c..989d5c867864 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -83,7 +83,7 @@ acpi_tb_find_table(char *signature,
/* Search for the table */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
header.signature, ACPI_NAME_SIZE)) {
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 7ec02b0f69e0..83d7af8d0905 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -137,7 +137,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
/* Check if table is already registered */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (!acpi_gbl_root_table_list.tables[i].pointer) {
status =
acpi_tb_verify_table(&acpi_gbl_root_table_list.
@@ -273,7 +273,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Increase the Table Array size */
tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
- size +
+ max_table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
sizeof(struct acpi_table_desc));
if (!tables) {
@@ -286,8 +286,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
if (acpi_gbl_root_table_list.tables) {
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) acpi_gbl_root_table_list.size *
- sizeof(struct acpi_table_desc));
+ (acpi_size) acpi_gbl_root_table_list.
+ max_table_count * sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -295,8 +295,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
- acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
+ acpi_gbl_root_table_list.max_table_count +=
+ ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
}
@@ -321,38 +322,36 @@ acpi_tb_store_table(acpi_physical_address address,
struct acpi_table_header *table,
u32 length, u8 flags, u32 *table_index)
{
- acpi_status status = AE_OK;
+ acpi_status status;
+ struct acpi_table_desc *new_table;
/* Ensure that there is room for the table in the Root Table List */
- if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
+ if (acpi_gbl_root_table_list.current_table_count >=
+ acpi_gbl_root_table_list.max_table_count) {
status = acpi_tb_resize_root_table_list();
if (ACPI_FAILURE(status)) {
return (status);
}
}
+ new_table =
+ &acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
+ current_table_count];
+
/* Initialize added table */
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- address = address;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- pointer = table;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
- length;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- owner_id = 0;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
- flags;
-
- ACPI_MOVE_32_TO_32(&
- (acpi_gbl_root_table_list.
- tables[acpi_gbl_root_table_list.count].signature),
- table->signature);
-
- *table_index = acpi_gbl_root_table_list.count;
- acpi_gbl_root_table_list.count++;
- return (status);
+ new_table->address = address;
+ new_table->pointer = table;
+ new_table->length = length;
+ new_table->owner_id = 0;
+ new_table->flags = flags;
+
+ ACPI_MOVE_32_TO_32(&new_table->signature, table->signature);
+
+ *table_index = acpi_gbl_root_table_list.current_table_count;
+ acpi_gbl_root_table_list.current_table_count++;
+ return (AE_OK);
}
/*******************************************************************************
@@ -408,7 +407,7 @@ void acpi_tb_terminate(void)
/* Delete the individual tables */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
}
@@ -422,7 +421,7 @@ void acpi_tb_terminate(void)
acpi_gbl_root_table_list.tables = NULL;
acpi_gbl_root_table_list.flags = 0;
- acpi_gbl_root_table_list.count = 0;
+ acpi_gbl_root_table_list.current_table_count = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
@@ -452,7 +451,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
return_ACPI_STATUS(status);
}
- if (table_index >= acpi_gbl_root_table_list.count) {
+ if (table_index >= acpi_gbl_root_table_list.current_table_count) {
/* The table index does not exist */
@@ -505,7 +504,7 @@ acpi_status acpi_tb_allocate_owner_id(u32 table_index)
ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
status = acpi_ut_allocate_owner_id
(&(acpi_gbl_root_table_list.tables[table_index].owner_id));
}
@@ -533,7 +532,7 @@ acpi_status acpi_tb_release_owner_id(u32 table_index)
ACPI_FUNCTION_TRACE(tb_release_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
acpi_ut_release_owner_id(&
(acpi_gbl_root_table_list.
tables[table_index].owner_id));
@@ -564,7 +563,7 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
ACPI_FUNCTION_TRACE(tb_get_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
*owner_id =
acpi_gbl_root_table_list.tables[table_index].owner_id;
status = AE_OK;
@@ -589,7 +588,7 @@ u8 acpi_tb_is_table_loaded(u32 table_index)
u8 is_loaded = FALSE;
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
is_loaded = (u8)
(acpi_gbl_root_table_list.tables[table_index].flags &
ACPI_TABLE_IS_LOADED);
@@ -616,7 +615,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
{
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
if (is_loaded) {
acpi_gbl_root_table_list.tables[table_index].flags |=
ACPI_TABLE_IS_LOADED;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 02723a9fb10c..34f9c2bc5e1f 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -158,7 +158,7 @@ acpi_status acpi_tb_initialize_facs(void)
u8 acpi_tb_tables_loaded(void)
{
- if (acpi_gbl_root_table_list.count >= 3) {
+ if (acpi_gbl_root_table_list.current_table_count >= 3) {
return (TRUE);
}
@@ -309,7 +309,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
if (checksum) {
ACPI_WARNING((AE_INFO,
- "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
+ "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X",
table->signature, table->checksum,
(u8) (table->checksum - checksum)));
@@ -349,6 +349,84 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length)
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_check_dsdt_header
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Quick compare to check validity of the DSDT. This will detect
+ * if the DSDT has been replaced from outside the OS and/or if
+ * the DSDT header has been corrupted.
+ *
+ ******************************************************************************/
+
+void acpi_tb_check_dsdt_header(void)
+{
+
+ /* Compare original length and checksum to current values */
+
+ if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
+ acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
+ ACPI_ERROR((AE_INFO,
+ "The DSDT has been corrupted or replaced - old, new headers below"));
+ acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
+ acpi_tb_print_table_header(0, acpi_gbl_DSDT);
+
+ ACPI_ERROR((AE_INFO,
+ "Please send DMI info to linux-acpi@vger.kernel.org\n"
+ "If system does not work as expected, please boot with acpi=copy_dsdt"));
+
+ /* Disable further error messages */
+
+ acpi_gbl_original_dsdt_header.length = acpi_gbl_DSDT->length;
+ acpi_gbl_original_dsdt_header.checksum =
+ acpi_gbl_DSDT->checksum;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_copy_dsdt
+ *
+ * PARAMETERS: table_desc - Installed table to copy
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory.
+ * Some very bad BIOSs are known to either corrupt the DSDT or
+ * install a new, bad DSDT. This copy works around the problem.
+ *
+ ******************************************************************************/
+
+struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
+{
+ struct acpi_table_header *new_table;
+ struct acpi_table_desc *table_desc;
+
+ table_desc = &acpi_gbl_root_table_list.tables[table_index];
+
+ new_table = ACPI_ALLOCATE(table_desc->length);
+ if (!new_table) {
+ ACPI_ERROR((AE_INFO, "Could not copy DSDT of length 0x%X",
+ table_desc->length));
+ return (NULL);
+ }
+
+ ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length);
+ acpi_tb_delete_table(table_desc);
+ table_desc->pointer = new_table;
+ table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+
+ ACPI_INFO((AE_INFO,
+ "Forced DSDT copy: length 0x%05X copied locally, original unmapped",
+ new_table->length));
+
+ return (new_table);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_install_table
*
* PARAMETERS: Address - Physical address of DSDT or FACS
@@ -496,7 +574,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
/* Will truncate 64-bit address to 32 bits, issue warning */
ACPI_WARNING((AE_INFO,
- "64-bit Physical Address in XSDT is too large (%8.8X%8.8X),"
+ "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
" truncating",
ACPI_FORMAT_UINT64(address64)));
}
@@ -629,14 +707,14 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
*/
table_entry =
ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
- acpi_gbl_root_table_list.count = 2;
+ acpi_gbl_root_table_list.current_table_count = 2;
/*
* Initialize the root table array from the RSDT/XSDT
*/
for (i = 0; i < table_count; i++) {
- if (acpi_gbl_root_table_list.count >=
- acpi_gbl_root_table_list.size) {
+ if (acpi_gbl_root_table_list.current_table_count >=
+ acpi_gbl_root_table_list.max_table_count) {
/* There is no more room in the root table array, attempt resize */
@@ -646,19 +724,20 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
"Truncating %u table entries!",
(unsigned) (table_count -
(acpi_gbl_root_table_list.
- count - 2))));
+ current_table_count -
+ 2))));
break;
}
}
/* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- address =
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
+ current_table_count].address =
acpi_tb_get_root_table_entry(table_entry, table_entry_size);
table_entry += table_entry_size;
- acpi_gbl_root_table_list.count++;
+ acpi_gbl_root_table_list.current_table_count++;
}
/*
@@ -671,7 +750,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
* Complete the initialization of the root table array by examining
* the header of each table
*/
- for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) {
acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
address, NULL, i);
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 5217a6159a31..4a8b9e6ea57a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -72,7 +72,7 @@ static int no_auto_ssdt;
acpi_status acpi_allocate_root_table(u32 initial_table_count)
{
- acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
return (acpi_tb_resize_root_table_list());
@@ -130,7 +130,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
sizeof(struct acpi_table_desc));
acpi_gbl_root_table_list.tables = initial_table_array;
- acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
if (allow_resize) {
acpi_gbl_root_table_list.flags |=
@@ -172,6 +172,7 @@ acpi_status acpi_reallocate_root_table(void)
{
struct acpi_table_desc *tables;
acpi_size new_size;
+ acpi_size current_size;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -183,10 +184,17 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
- new_size = ((acpi_size) acpi_gbl_root_table_list.count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ /*
+ * Get the current size of the root table and add the default
+ * increment to create the new table size.
+ */
+ current_size = (acpi_size)
+ acpi_gbl_root_table_list.current_table_count *
sizeof(struct acpi_table_desc);
+ new_size = current_size +
+ (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
+
/* Create new array and copy the old array */
tables = ACPI_ALLOCATE_ZEROED(new_size);
@@ -194,10 +202,17 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_NO_MEMORY);
}
- ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
+ ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
- acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
+ /*
+ * Update the root table descriptor. The new size will be the current
+ * number of tables plus the increment, independent of the reserved
+ * size of the original table list.
+ */
acpi_gbl_root_table_list.tables = tables;
+ acpi_gbl_root_table_list.max_table_count =
+ acpi_gbl_root_table_list.current_table_count +
+ ACPI_ROOT_TABLE_SIZE_INCREMENT;
acpi_gbl_root_table_list.flags =
ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
@@ -278,7 +293,8 @@ acpi_get_table_header(char *signature,
/* Walk the root table list */
- for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
+ i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
@@ -341,7 +357,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
ACPI_FUNCTION_TRACE(acpi_unload_table_id);
/* Find table in the global table list */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
continue;
}
@@ -391,7 +407,8 @@ acpi_get_table_with_size(char *signature,
/* Walk the root table list */
- for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
+ i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
@@ -459,7 +476,7 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
/* Validate index */
- if (table_index >= acpi_gbl_root_table_list.count) {
+ if (table_index >= acpi_gbl_root_table_list.current_table_count) {
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -500,16 +517,17 @@ static acpi_status acpi_tb_load_namespace(void)
{
acpi_status status;
u32 i;
+ struct acpi_table_header *new_dsdt;
ACPI_FUNCTION_TRACE(tb_load_namespace);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/*
- * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
- * are optional.
+ * Load the namespace. The DSDT is required, but any SSDT and
+ * PSDT tables are optional. Verify the DSDT.
*/
- if (!acpi_gbl_root_table_list.count ||
+ if (!acpi_gbl_root_table_list.current_table_count ||
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.
tables[ACPI_TABLE_INDEX_DSDT].signature),
@@ -522,17 +540,35 @@ static acpi_status acpi_tb_load_namespace(void)
goto unlock_and_exit;
}
- /* A valid DSDT is required */
-
- status =
- acpi_tb_verify_table(&acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT]);
- if (ACPI_FAILURE(status)) {
+ /*
+ * Save the DSDT pointer for simple access. This is the mapped memory
+ * address. We must take care here because the address of the .Tables
+ * array can change dynamically as tables are loaded at run-time. Note:
+ * .Pointer field is not validated until after call to acpi_tb_verify_table.
+ */
+ acpi_gbl_DSDT =
+ acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
- status = AE_NO_ACPI_TABLES;
- goto unlock_and_exit;
+ /*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+ if (acpi_gbl_copy_dsdt_locally) {
+ new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+ if (new_dsdt) {
+ acpi_gbl_DSDT = new_dsdt;
+ }
}
+ /*
+ * Save the original DSDT header for detection of table corruption
+ * and/or replacement of the DSDT from outside the OS.
+ */
+ ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
+ sizeof(struct acpi_table_header));
+
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
/* Load and parse tables */
@@ -545,7 +581,7 @@ static acpi_status acpi_tb_load_namespace(void)
/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if ((!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
ACPI_SIG_SSDT)
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index dda6e8c497d3..fd2c07d1d3ac 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -134,7 +134,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
ACPI_EBDA_PTR_LENGTH);
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -159,7 +159,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
ACPI_EBDA_WINDOW_SIZE);
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
physical_address, ACPI_EBDA_WINDOW_SIZE));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -191,7 +191,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
ACPI_HI_RSDP_WINDOW_BASE,
ACPI_HI_RSDP_WINDOW_SIZE));
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 3d706b8fd449..8f0896281567 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -340,7 +340,7 @@ void *acpi_ut_allocate(acpi_size size,
/* Report allocation error */
ACPI_WARNING((module, line,
- "Could not allocate size %X", (u32) size));
+ "Could not allocate size %u", (u32) size));
return_PTR(NULL);
}
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 97ec3621e71d..6fef83f04bcd 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -677,16 +677,24 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
u16 reference_count;
union acpi_operand_object *next_object;
acpi_status status;
+ acpi_size copy_size;
/* Save fields from destination that we don't want to overwrite */
reference_count = dest_desc->common.reference_count;
next_object = dest_desc->common.next_object;
- /* Copy the entire source object over the destination object */
+ /*
+ * Copy the entire source object over the destination object.
+ * Note: Source can be either an operand object or namespace node.
+ */
+ copy_size = sizeof(union acpi_operand_object);
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) {
+ copy_size = sizeof(struct acpi_namespace_node);
+ }
- ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
- sizeof(union acpi_operand_object));
+ ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
+ ACPI_CAST_PTR(char, source_desc), copy_size);
/* Restore the saved fields */
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 16b51c69606a..ed794cd033ea 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -434,7 +434,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
default:
- ACPI_ERROR((AE_INFO, "Unknown action (%X)", action));
+ ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
break;
}
@@ -444,8 +444,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*/
if (count > ACPI_MAX_REFERENCE_COUNT) {
ACPI_WARNING((AE_INFO,
- "Large Reference Count (%X) in object %p", count,
- object));
+ "Large Reference Count (0x%X) in object %p",
+ count, object));
}
}
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 7f5e734ce7f7..6dfdeb653490 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -307,7 +307,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
prefix_node, path, AE_TYPE);
ACPI_ERROR((AE_INFO,
- "Type returned from %s was incorrect: %s, expected Btypes: %X",
+ "Type returned from %s was incorrect: %s, expected Btypes: 0x%X",
path,
acpi_ut_get_object_type_name(info->return_object),
expected_return_btypes));
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index eda3e656c4af..66116750a0f9 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -785,6 +785,7 @@ acpi_status acpi_ut_init_globals(void)
/* Miscellaneous variables */
+ acpi_gbl_DSDT = NULL;
acpi_gbl_cm_single_step = FALSE;
acpi_gbl_db_terminate_threads = FALSE;
acpi_gbl_shutdown = FALSE;
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 32982e2ac384..e8d0724ee403 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -205,7 +205,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
/* Guard against multiple allocations of ID to the same location */
if (*owner_id) {
- ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists",
+ ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
*owner_id));
return_ACPI_STATUS(AE_ALREADY_EXISTS);
}
@@ -315,7 +315,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
/* Zero is not a valid owner_iD */
if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
return_VOID;
}
@@ -341,7 +341,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
acpi_gbl_owner_id_mask[index] ^= bit;
} else {
ACPI_ERROR((AE_INFO,
- "Release of non-allocated OwnerId: %2.2X",
+ "Release of non-allocated OwnerId: 0x%2.2X",
owner_id + 1));
}
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 55d014ed6d55..058b3df48271 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -258,7 +258,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %p could not acquire Mutex [%X]",
+ "Thread %p could not acquire Mutex [0x%X]",
ACPI_CAST_PTR(void, this_thread_id), mutex_id));
}
@@ -297,7 +297,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [%X] is not acquired, cannot release",
+ "Mutex [0x%X] is not acquired, cannot release",
mutex_id));
return (AE_NOT_ACQUIRED);
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 3356f0cb0745..fd1fa2749ea5 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -251,7 +251,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
if (!buffer) {
- ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ ACPI_ERROR((AE_INFO, "Could not allocate size %u",
(u32) buffer_size));
acpi_ut_remove_reference(buffer_desc);
return_PTR(NULL);
@@ -303,7 +303,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
*/
string = ACPI_ALLOCATE_ZEROED(string_size + 1);
if (!string) {
- ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ ACPI_ERROR((AE_INFO, "Could not allocate size %u",
(u32) string_size));
acpi_ut_remove_reference(string_desc);
return_PTR(NULL);
@@ -533,7 +533,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
*/
ACPI_ERROR((AE_INFO,
"Cannot convert to external object - "
- "unsupported Reference Class [%s] %X in object %p",
+ "unsupported Reference Class [%s] 0x%X in object %p",
acpi_ut_get_reference_name(internal_object),
internal_object->reference.class,
internal_object));
@@ -545,7 +545,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
default:
ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
- "unsupported type [%s] %X in object %p",
+ "unsupported type [%s] 0x%X in object %p",
acpi_ut_get_object_type_name(internal_object),
internal_object->common.type, internal_object));
status = AE_TYPE;
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 000000000000..f8c668f27b5a
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,30 @@
+config ACPI_APEI
+ bool "ACPI Platform Error Interface (APEI)"
+ depends on X86
+ help
+ APEI allows to report errors (for example from the chipset)
+ to the operating system. This improves NMI handling
+ especially. In addition it supports error serialization and
+ error injection.
+
+config ACPI_APEI_GHES
+ tristate "APEI Generic Hardware Error Source"
+ depends on ACPI_APEI && X86
+ select ACPI_HED
+ help
+ Generic Hardware Error Source provides a way to report
+ platform hardware errors (such as that from chipset). It
+ works in so called "Firmware First" mode, that is, hardware
+ errors are reported to firmware firstly, then reported to
+ Linux by firmware. This way, some non-standard hardware
+ error registers or non-standard hardware link can be checked
+ by firmware to produce more valuable hardware error
+ information for Linux.
+
+config ACPI_APEI_EINJ
+ tristate "APEI Error INJection (EINJ)"
+ depends on ACPI_APEI && DEBUG_FS
+ help
+ EINJ provides a hardware error injection mechanism, it is
+ mainly used for debugging and testing the other parts of
+ APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 000000000000..b13b03a17789
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_ACPI_APEI) += apei.o
+obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
+obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+
+apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 000000000000..216e1e948ff6
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,594 @@
+/*
+ * apei-base.c - ACPI Platform Error Interface (APEI) supporting
+ * infrastructure
+ *
+ * APEI allows to report errors (for example from the chipset) to the
+ * the operating system. This improves NMI handling especially. In
+ * addition it supports error serialization and error injection.
+ *
+ * For more information about APEI, please refer to ACPI Specification
+ * version 4.0, chapter 17.
+ *
+ * This file has Common functions used by more than one APEI table,
+ * including framework of interpreter for ERST and EINJ; resource
+ * management for APEI registers.
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <acpi/atomicio.h>
+
+#include "apei-internal.h"
+
+#define APEI_PFX "APEI: "
+
+/*
+ * APEI ERST (Error Record Serialization Table) and EINJ (Error
+ * INJection) interpreter framework.
+ */
+
+#define APEI_EXEC_PRESERVE_REGISTER 0x1
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries)
+{
+ ctx->ins_table = ins_table;
+ ctx->instructions = instructions;
+ ctx->action_table = action_table;
+ ctx->entries = entries;
+}
+EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
+{
+ int rc;
+
+ rc = acpi_atomic_read(val, &entry->register_region);
+ if (rc)
+ return rc;
+ *val >>= entry->register_region.bit_offset;
+ *val &= entry->mask;
+
+ return 0;
+}
+
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val = 0;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ ctx->value = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register);
+
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ rc = apei_exec_read_register(ctx, entry);
+ if (rc)
+ return rc;
+ ctx->value = (ctx->value == entry->value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
+
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
+{
+ int rc;
+
+ val &= entry->mask;
+ val <<= entry->register_region.bit_offset;
+ if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
+ u64 valr = 0;
+ rc = acpi_atomic_read(&valr, &entry->register_region);
+ if (rc)
+ return rc;
+ valr &= ~(entry->mask << entry->register_region.bit_offset);
+ val |= valr;
+ }
+ rc = acpi_atomic_write(val, &entry->register_region);
+
+ return rc;
+}
+
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->value);
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register);
+
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ ctx->value = entry->value;
+ rc = apei_exec_write_register(ctx, entry);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
+
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_noop);
+
+/*
+ * Interpret the specified action. Go through whole action table,
+ * execute all instructions belong to the action.
+ */
+int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ int rc;
+ u32 i, ip;
+ struct acpi_whea_header *entry;
+ apei_exec_ins_func_t run;
+
+ ctx->ip = 0;
+
+ /*
+ * "ip" is the instruction pointer of current instruction,
+ * "ctx->ip" specifies the next instruction to executed,
+ * instruction "run" function may change the "ctx->ip" to
+ * implement "goto" semantics.
+ */
+rewind:
+ ip = 0;
+ for (i = 0; i < ctx->entries; i++) {
+ entry = &ctx->action_table[i];
+ if (entry->action != action)
+ continue;
+ if (ip == ctx->ip) {
+ if (entry->instruction >= ctx->instructions ||
+ !ctx->ins_table[entry->instruction].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ entry->instruction);
+ return -EINVAL;
+ }
+ run = ctx->ins_table[entry->instruction].run;
+ rc = run(ctx, entry);
+ if (rc < 0)
+ return rc;
+ else if (rc != APEI_EXEC_SET_IP)
+ ctx->ip++;
+ }
+ ip++;
+ if (ctx->ip < ip)
+ goto rewind;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_run);
+
+typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data);
+
+static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
+ apei_exec_entry_func_t func,
+ void *data,
+ int *end)
+{
+ u8 ins;
+ int i, rc;
+ struct acpi_whea_header *entry;
+ struct apei_exec_ins_type *ins_table = ctx->ins_table;
+
+ for (i = 0; i < ctx->entries; i++) {
+ entry = ctx->action_table + i;
+ ins = entry->instruction;
+ if (end)
+ *end = i;
+ if (ins >= ctx->instructions || !ins_table[ins].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ ins);
+ return -EINVAL;
+ }
+ rc = func(ctx, entry, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pre_map_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ return acpi_pre_map_gar(&entry->register_region);
+
+ return 0;
+}
+
+/*
+ * Pre-map all GARs in action table to make it possible to access them
+ * in NMI handler.
+ */
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
+{
+ int rc, end;
+
+ rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
+ NULL, &end);
+ if (rc) {
+ struct apei_exec_context ctx_unmap;
+ memcpy(&ctx_unmap, ctx, sizeof(*ctx));
+ ctx_unmap.entries = end;
+ apei_exec_post_unmap_gars(&ctx_unmap);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
+
+static int post_unmap_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ acpi_post_unmap_gar(&entry->register_region);
+
+ return 0;
+}
+
+/* Post-unmap all GAR in action table. */
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
+{
+ return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
+ NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
+
+/*
+ * Resource management for GARs in APEI
+ */
+struct apei_res {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+/* Collect all resources requested, to avoid conflict */
+struct apei_resources apei_resources_all = {
+ .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
+ .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
+};
+
+static int apei_res_add(struct list_head *res_list,
+ unsigned long start, unsigned long size)
+{
+ struct apei_res *res, *resn, *res_ins = NULL;
+ unsigned long end = start + size;
+
+ if (end <= start)
+ return 0;
+repeat:
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ if (res->start > end || res->end < start)
+ continue;
+ else if (end <= res->end && start >= res->start) {
+ kfree(res_ins);
+ return 0;
+ }
+ list_del(&res->list);
+ res->start = start = min(res->start, start);
+ res->end = end = max(res->end, end);
+ kfree(res_ins);
+ res_ins = res;
+ goto repeat;
+ }
+
+ if (res_ins)
+ list_add(&res_ins->list, res_list);
+ else {
+ res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res_ins)
+ return -ENOMEM;
+ res_ins->start = start;
+ res_ins->end = end;
+ list_add(&res_ins->list, res_list);
+ }
+
+ return 0;
+}
+
+static int apei_res_sub(struct list_head *res_list1,
+ struct list_head *res_list2)
+{
+ struct apei_res *res1, *resn1, *res2, *res;
+ res1 = list_entry(res_list1->next, struct apei_res, list);
+ resn1 = list_entry(res1->list.next, struct apei_res, list);
+ while (&res1->list != res_list1) {
+ list_for_each_entry(res2, res_list2, list) {
+ if (res1->start >= res2->end ||
+ res1->end <= res2->start)
+ continue;
+ else if (res1->end <= res2->end &&
+ res1->start >= res2->start) {
+ list_del(&res1->list);
+ kfree(res1);
+ break;
+ } else if (res1->end > res2->end &&
+ res1->start < res2->start) {
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->start = res2->end;
+ res->end = res1->end;
+ res1->end = res2->start;
+ list_add(&res->list, &res1->list);
+ resn1 = res;
+ } else {
+ if (res1->start < res2->start)
+ res1->end = res2->start;
+ else
+ res1->start = res2->end;
+ }
+ }
+ res1 = resn1;
+ resn1 = list_entry(resn1->list.next, struct apei_res, list);
+ }
+
+ return 0;
+}
+
+static void apei_res_clean(struct list_head *res_list)
+{
+ struct apei_res *res, *resn;
+
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ list_del(&res->list);
+ kfree(res);
+ }
+}
+
+void apei_resources_fini(struct apei_resources *resources)
+{
+ apei_res_clean(&resources->iomem);
+ apei_res_clean(&resources->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_fini);
+
+static int apei_resources_merge(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources2->iomem, list) {
+ rc = apei_res_add(&resources1->iomem, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+ list_for_each_entry(res, &resources2->ioport, list) {
+ rc = apei_res_add(&resources1->ioport, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * EINJ has two groups of GARs (EINJ table entry and trigger table
+ * entry), so common resources are subtracted from the trigger table
+ * resources before the second requesting.
+ */
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+
+ rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
+ if (rc)
+ return rc;
+ return apei_res_sub(&resources1->ioport, &resources2->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_sub);
+
+/*
+ * IO memory/port rersource management mechanism is used to check
+ * whether memory/port area used by GARs conflicts with normal memory
+ * or IO memory/port of devices.
+ */
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc)
+{
+ struct apei_res *res, *res_bak;
+ struct resource *r;
+
+ apei_resources_sub(resources, &apei_resources_all);
+
+ list_for_each_entry(res, &resources->iomem, list) {
+ r = request_mem_region(res->start, res->end - res->start,
+ desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_iomem;
+ }
+ }
+
+ list_for_each_entry(res, &resources->ioport, list) {
+ r = request_region(res->start, res->end - res->start, desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_ioport;
+ }
+ }
+
+ apei_resources_merge(&apei_resources_all, resources);
+
+ return 0;
+err_unmap_ioport:
+ list_for_each_entry(res, &resources->ioport, list) {
+ if (res == res_bak)
+ break;
+ release_mem_region(res->start, res->end - res->start);
+ }
+ res_bak = NULL;
+err_unmap_iomem:
+ list_for_each_entry(res, &resources->iomem, list) {
+ if (res == res_bak)
+ break;
+ release_region(res->start, res->end - res->start);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apei_resources_request);
+
+void apei_resources_release(struct apei_resources *resources)
+{
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources->iomem, list)
+ release_mem_region(res->start, res->end - res->start);
+ list_for_each_entry(res, &resources->ioport, list)
+ release_region(res->start, res->end - res->start);
+
+ apei_resources_sub(&apei_resources_all, resources);
+}
+EXPORT_SYMBOL_GPL(apei_resources_release);
+
+static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int collect_res_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ struct apei_resources *resources = data;
+ struct acpi_generic_address *reg = &entry->register_region;
+ u8 ins = entry->instruction;
+ u64 paddr;
+ int rc;
+
+ if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
+ return 0;
+
+ rc = apei_check_gar(reg, &paddr);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return apei_res_add(&resources->iomem, paddr,
+ reg->bit_width / 8);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return apei_res_add(&resources->ioport, paddr,
+ reg->bit_width / 8);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Same register may be used by multiple instructions in GARs, so
+ * resources are collected before requesting.
+ */
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources)
+{
+ return apei_exec_for_each_entry(ctx, collect_res_callback,
+ resources, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
+
+struct dentry *apei_get_debugfs_dir(void)
+{
+ static struct dentry *dapei;
+
+ if (!dapei)
+ dapei = debugfs_create_dir("apei", NULL);
+
+ return dapei;
+}
+EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 000000000000..18df1e940276
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,114 @@
+/*
+ * apei-internal.h - ACPI Platform Error Interface internal
+ * definations.
+ */
+
+#ifndef APEI_INTERNAL_H
+#define APEI_INTERNAL_H
+
+#include <linux/cper.h>
+
+struct apei_exec_context;
+
+typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+
+#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
+
+struct apei_exec_ins_type {
+ u32 flags;
+ apei_exec_ins_func_t run;
+};
+
+struct apei_exec_context {
+ u32 ip;
+ u64 value;
+ u64 var1;
+ u64 var2;
+ u64 src_base;
+ u64 dst_base;
+ struct apei_exec_ins_type *ins_table;
+ u32 instructions;
+ struct acpi_whea_header *action_table;
+ u32 entries;
+};
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries);
+
+static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
+ u64 input)
+{
+ ctx->value = input;
+}
+
+static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
+{
+ return ctx->value;
+}
+
+int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+
+/* Common instruction implementation */
+
+/* IP has been set in instruction function */
+#define APEI_EXEC_SET_IP 1
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
+
+struct apei_resources {
+ struct list_head iomem;
+ struct list_head ioport;
+};
+
+static inline void apei_resources_init(struct apei_resources *resources)
+{
+ INIT_LIST_HEAD(&resources->iomem);
+ INIT_LIST_HEAD(&resources->ioport);
+}
+
+void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2);
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc);
+void apei_resources_release(struct apei_resources *resources);
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources);
+
+struct dentry;
+struct dentry *apei_get_debugfs_dir(void);
+
+#define apei_estatus_for_each_section(estatus, section) \
+ for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
+ (void *)section - (void *)estatus < estatus->data_length; \
+ section = (void *)(section+1) + section->error_data_length)
+
+static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->raw_data_length)
+ return estatus->raw_data_offset + \
+ estatus->raw_data_length;
+ else
+ return sizeof(*estatus) + estatus->data_length;
+}
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
new file mode 100644
index 000000000000..f4cf2fc4c8c1
--- /dev/null
+++ b/drivers/acpi/apei/cper.c
@@ -0,0 +1,84 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CPER is the format used to describe platform hardware error by
+ * various APEI tables, such as ERST, BERT and HEST etc.
+ *
+ * For more information about CPER, please refer to Appendix N of UEFI
+ * Specification version 2.3.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/acpi.h>
+
+/*
+ * CPER record ID need to be unique even after reboot, because record
+ * ID is used as index for ERST storage, while CPER records from
+ * multiple boot may co-exist in ERST.
+ */
+u64 cper_next_record_id(void)
+{
+ static atomic64_t seq;
+
+ if (!atomic64_read(&seq))
+ atomic64_set(&seq, ((u64)get_seconds()) << 32);
+
+ return atomic64_inc_return(&seq);
+}
+EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->data_length &&
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ return -EINVAL;
+ if (estatus->raw_data_length &&
+ estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check_header);
+
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ unsigned int data_len, gedata_len;
+ int rc;
+
+ rc = apei_estatus_check_header(estatus);
+ if (rc)
+ return rc;
+ data_len = estatus->data_length;
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ while (data_len > sizeof(*gdata)) {
+ gedata_len = gdata->error_data_length;
+ if (gedata_len > data_len - sizeof(*gdata))
+ return -EINVAL;
+ data_len -= gedata_len + sizeof(*gdata);
+ }
+ if (data_len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 000000000000..e308975e620b
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,485 @@
+/*
+ * APEI Error INJection support
+ *
+ * EINJ provides a hardware error injection mechanism, this is useful
+ * for debugging and testing of other APEI and RAS features.
+ *
+ * For more information about EINJ, please refer to ACPI Specification
+ * version 4.0, section 17.5.
+ *
+ * Copyright 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/nmi.h>
+#include <acpi/acpi.h>
+
+#include "apei-internal.h"
+
+#define EINJ_PFX "EINJ: "
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+
+#define EINJ_OP_BUSY 0x1
+#define EINJ_STATUS_SUCCESS 0x0
+#define EINJ_STATUS_FAIL 0x1
+#define EINJ_STATUS_INVAL 0x2
+
+#define EINJ_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_einj)))
+
+static struct acpi_table_einj *einj_tab;
+
+static struct apei_resources einj_resources;
+
+static struct apei_exec_ins_type einj_ins_type[] = {
+ [ACPI_EINJ_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_EINJ_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_EINJ_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_EINJ_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+};
+
+/*
+ * Prevent EINJ interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ */
+static DEFINE_MUTEX(einj_mutex);
+
+static void einj_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
+ EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
+}
+
+static int __einj_get_available_error_type(u32 *type)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ *type = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/* Get error injection capabilities of the platform */
+static int einj_get_available_error_type(u32 *type)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_get_available_error_type(type);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static int einj_timedout(u64 *t)
+{
+ if ((s64)*t < SPIN_UNIT) {
+ pr_warning(FW_WARN EINJ_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= SPIN_UNIT;
+ ndelay(SPIN_UNIT);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+/* do sanity check to trigger table */
+static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
+{
+ if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
+ return -EINVAL;
+ if (trigger_tab->table_size > PAGE_SIZE ||
+ trigger_tab->table_size <= trigger_tab->header_size)
+ return -EINVAL;
+ if (trigger_tab->entry_count !=
+ (trigger_tab->table_size - trigger_tab->header_size) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Execute instructions in trigger error action table */
+static int __einj_error_trigger(u64 trigger_paddr)
+{
+ struct acpi_einj_trigger *trigger_tab = NULL;
+ struct apei_exec_context trigger_ctx;
+ struct apei_resources trigger_resources;
+ struct acpi_whea_header *trigger_entry;
+ struct resource *r;
+ u32 table_size;
+ int rc = -EIO;
+
+ r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+ "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ (unsigned long long)trigger_paddr,
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ goto out;
+ }
+ trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_header;
+ }
+ rc = einj_check_trigger_header(trigger_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX
+ "The trigger error action table is invalid\n");
+ goto out_rel_header;
+ }
+ rc = -EIO;
+ table_size = trigger_tab->table_size;
+ r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size);
+ goto out_rel_header;
+ }
+ iounmap(trigger_tab);
+ trigger_tab = ioremap_cache(trigger_paddr, table_size);
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_entry;
+ }
+ trigger_entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ apei_resources_init(&trigger_resources);
+ apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
+ ARRAY_SIZE(einj_ins_type),
+ trigger_entry, trigger_tab->entry_count);
+ rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources, &einj_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
+ if (rc)
+ goto out_fini;
+ rc = apei_exec_pre_map_gars(&trigger_ctx);
+ if (rc)
+ goto out_release;
+
+ rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
+
+ apei_exec_post_unmap_gars(&trigger_ctx);
+out_release:
+ apei_resources_release(&trigger_resources);
+out_fini:
+ apei_resources_fini(&trigger_resources);
+out_rel_entry:
+ release_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab));
+out_rel_header:
+ release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+out:
+ if (trigger_tab)
+ iounmap(trigger_tab);
+
+ return rc;
+}
+
+static int __einj_error_inject(u32 type)
+{
+ struct apei_exec_context ctx;
+ u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, type);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!(val & EINJ_OP_BUSY))
+ break;
+ if (einj_timedout(&timeout))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (val != EINJ_STATUS_SUCCESS)
+ return -EBUSY;
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
+ if (rc)
+ return rc;
+ trigger_paddr = apei_exec_ctx_get_output(&ctx);
+ rc = __einj_error_trigger(trigger_paddr);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+
+ return rc;
+}
+
+/* Inject the specified hardware error */
+static int einj_error_inject(u32 type)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_error_inject(type);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static u32 error_type;
+static struct dentry *einj_debug_dir;
+
+static int available_error_type_show(struct seq_file *m, void *v)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (available_error_type & 0x0001)
+ seq_printf(m, "0x00000001\tProcessor Correctable\n");
+ if (available_error_type & 0x0002)
+ seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0004)
+ seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
+ if (available_error_type & 0x0008)
+ seq_printf(m, "0x00000008\tMemory Correctable\n");
+ if (available_error_type & 0x0010)
+ seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0020)
+ seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
+ if (available_error_type & 0x0040)
+ seq_printf(m, "0x00000040\tPCI Express Correctable\n");
+ if (available_error_type & 0x0080)
+ seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0100)
+ seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
+ if (available_error_type & 0x0200)
+ seq_printf(m, "0x00000200\tPlatform Correctable\n");
+ if (available_error_type & 0x0400)
+ seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0800)
+ seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
+
+ return 0;
+}
+
+static int available_error_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, available_error_type_show, NULL);
+}
+
+static const struct file_operations available_error_type_fops = {
+ .open = available_error_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int error_type_get(void *data, u64 *val)
+{
+ *val = error_type;
+
+ return 0;
+}
+
+static int error_type_set(void *data, u64 val)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ /* Only one error type can be specified */
+ if (val & (val - 1))
+ return -EINVAL;
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ error_type = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
+ error_type_set, "0x%llx\n");
+
+static int error_inject_set(void *data, u64 val)
+{
+ if (!error_type)
+ return -EINVAL;
+
+ return einj_error_inject(error_type);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
+ error_inject_set, "%llu\n");
+
+static int einj_check_table(struct acpi_table_einj *einj_tab)
+{
+ if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->header.length < sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->entries !=
+ (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init einj_init(void)
+{
+ int rc;
+ acpi_status status;
+ struct dentry *fentry;
+ struct apei_exec_context ctx;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table(ACPI_SIG_EINJ, 0,
+ (struct acpi_table_header **)&einj_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(EINJ_PFX "Table is not found!\n");
+ return -ENODEV;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
+ return -EINVAL;
+ }
+
+ rc = einj_check_table(einj_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
+ return -EINVAL;
+ }
+
+ rc = -ENOMEM;
+ einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
+ if (!einj_debug_dir)
+ goto err_cleanup;
+ fentry = debugfs_create_file("available_error_type", S_IRUSR,
+ einj_debug_dir, NULL,
+ &available_error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
+ einj_debug_dir, NULL, &error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_inject", S_IWUSR,
+ einj_debug_dir, NULL, &error_inject_fops);
+ if (!fentry)
+ goto err_cleanup;
+
+ apei_resources_init(&einj_resources);
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &einj_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&einj_resources, "APEI EINJ");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+
+ pr_info(EINJ_PFX "Error INJection is initialized.\n");
+
+ return 0;
+
+err_release:
+ apei_resources_release(&einj_resources);
+err_fini:
+ apei_resources_fini(&einj_resources);
+err_cleanup:
+ debugfs_remove_recursive(einj_debug_dir);
+
+ return rc;
+}
+
+static void __exit einj_exit(void)
+{
+ struct apei_exec_context ctx;
+
+ einj_exec_ctx_init(&ctx);
+ apei_exec_post_unmap_gars(&ctx);
+ apei_resources_release(&einj_resources);
+ apei_resources_fini(&einj_resources);
+ debugfs_remove_recursive(einj_debug_dir);
+}
+
+module_init(einj_init);
+module_exit(einj_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Error INJection support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
new file mode 100644
index 000000000000..5fe5c0f10238
--- /dev/null
+++ b/drivers/acpi/apei/erst.c
@@ -0,0 +1,852 @@
+/*
+ * APEI Error Record Serialization Table support
+ *
+ * ERST is a way provided by APEI to save and retrieve hardware error
+ * infomation to and from a persistent store.
+ *
+ * For more information about ERST, please refer to ACPI Specification
+ * version 4.0, section 17.4.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <linux/cper.h>
+#include <linux/nmi.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define ERST_PFX "ERST: "
+
+/* ERST command status */
+#define ERST_STATUS_SUCCESS 0x0
+#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
+#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
+#define ERST_STATUS_FAILED 0x3
+#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
+#define ERST_STATUS_RECORD_NOT_FOUND 0x5
+
+#define ERST_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_erst)))
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+#define FIRMWARE_MAX_STALL 50 /* 50us */
+
+int erst_disable;
+EXPORT_SYMBOL_GPL(erst_disable);
+
+static struct acpi_table_erst *erst_tab;
+
+/* ERST Error Log Address Range atrributes */
+#define ERST_RANGE_RESERVED 0x0001
+#define ERST_RANGE_NVRAM 0x0002
+#define ERST_RANGE_SLOW 0x0004
+
+/*
+ * ERST Error Log Address Range, used as buffer for reading/writing
+ * error records.
+ */
+static struct erst_erange {
+ u64 base;
+ u64 size;
+ void __iomem *vaddr;
+ u32 attr;
+} erst_erange;
+
+/*
+ * Prevent ERST interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ *
+ * It is used to provide exclusive accessing for ERST Error Log
+ * Address Range too.
+ */
+static DEFINE_SPINLOCK(erst_lock);
+
+static inline int erst_errno(int command_status)
+{
+ switch (command_status) {
+ case ERST_STATUS_SUCCESS:
+ return 0;
+ case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
+ return -ENODEV;
+ case ERST_STATUS_NOT_ENOUGH_SPACE:
+ return -ENOSPC;
+ case ERST_STATUS_RECORD_STORE_EMPTY:
+ case ERST_STATUS_RECORD_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int erst_timedout(u64 *t, u64 spin_unit)
+{
+ if ((s64)*t < spin_unit) {
+ pr_warning(FW_WARN ERST_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= spin_unit;
+ ndelay(spin_unit);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static int erst_exec_load_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var1);
+}
+
+static int erst_exec_load_var2(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var2);
+}
+
+static int erst_exec_store_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->var1);
+}
+
+static int erst_exec_add(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 += ctx->var2;
+ return 0;
+}
+
+static int erst_exec_subtract(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 -= ctx->var2;
+ return 0;
+}
+
+static int erst_exec_add_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val += ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_subtract_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val -= ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_stall(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ u64 stall_time;
+
+ if (ctx->value > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall instruction: %llx.\n",
+ ctx->value);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->value;
+ udelay(stall_time);
+ return 0;
+}
+
+static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 stall_time;
+
+ if (ctx->var1 > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall while true instruction: %llx.\n",
+ ctx->var1);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->var1;
+
+ for (;;) {
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val != ctx->value)
+ break;
+ if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int erst_exec_skip_next_instruction_if_true(
+ struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val == ctx->value) {
+ ctx->ip += 2;
+ return APEI_EXEC_SET_IP;
+ }
+
+ return 0;
+}
+
+static int erst_exec_goto(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->ip = ctx->value;
+ return APEI_EXEC_SET_IP;
+}
+
+static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->src_base);
+}
+
+static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->dst_base);
+}
+
+static int erst_exec_move_data(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 offset;
+
+ rc = __apei_exec_read_register(entry, &offset);
+ if (rc)
+ return rc;
+ memmove((void *)ctx->dst_base + offset,
+ (void *)ctx->src_base + offset,
+ ctx->var2);
+
+ return 0;
+}
+
+static struct apei_exec_ins_type erst_ins_type[] = {
+ [ACPI_ERST_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_ERST_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_ERST_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_ERST_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_ERST_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+ [ACPI_ERST_LOAD_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var1,
+ },
+ [ACPI_ERST_LOAD_VAR2] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var2,
+ },
+ [ACPI_ERST_STORE_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_store_var1,
+ },
+ [ACPI_ERST_ADD] = {
+ .flags = 0,
+ .run = erst_exec_add,
+ },
+ [ACPI_ERST_SUBTRACT] = {
+ .flags = 0,
+ .run = erst_exec_subtract,
+ },
+ [ACPI_ERST_ADD_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_add_value,
+ },
+ [ACPI_ERST_SUBTRACT_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_subtract_value,
+ },
+ [ACPI_ERST_STALL] = {
+ .flags = 0,
+ .run = erst_exec_stall,
+ },
+ [ACPI_ERST_STALL_WHILE_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_stall_while_true,
+ },
+ [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_skip_next_instruction_if_true,
+ },
+ [ACPI_ERST_GOTO] = {
+ .flags = 0,
+ .run = erst_exec_goto,
+ },
+ [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_src_address_base,
+ },
+ [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_dst_address_base,
+ },
+ [ACPI_ERST_MOVE_DATA] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_move_data,
+ },
+};
+
+static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
+ ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
+}
+
+static int erst_get_erange(struct erst_erange *range)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
+ if (rc)
+ return rc;
+ range->base = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
+ if (rc)
+ return rc;
+ range->size = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
+ if (rc)
+ return rc;
+ range->attr = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+static ssize_t __erst_get_record_count(void)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
+ if (rc)
+ return rc;
+ return apei_exec_ctx_get_output(&ctx);
+}
+
+ssize_t erst_get_record_count(void)
+{
+ ssize_t count;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ count = __erst_get_record_count();
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(erst_get_record_count);
+
+static int __erst_get_next_record_id(u64 *record_id)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
+ if (rc)
+ return rc;
+ *record_id = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/*
+ * Get the record ID of an existing error record on the persistent
+ * storage. If there is no error record on the persistent storage, the
+ * returned record_id is APEI_ERST_INVALID_RECORD_ID.
+ */
+int erst_get_next_record_id(u64 *record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_get_next_record_id);
+
+static int __erst_write_to_storage(u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_read_from_storage(u64 record_id, u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ };
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_clear_from_storage(u64 record_id)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+/* NVRAM ERST Error Log Address Range is not supported yet */
+static void pr_unimpl_nvram(void)
+{
+ if (printk_ratelimit())
+ pr_warning(ERST_PFX
+ "NVRAM ERST Log Address Range is not implemented yet\n");
+}
+
+static int __erst_write_to_nvram(const struct cper_record_header *record)
+{
+ /* do not print message, because printk is not safe for NMI */
+ return -ENOSYS;
+}
+
+static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+static int __erst_clear_from_nvram(u64 record_id)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+int erst_write(const struct cper_record_header *record)
+{
+ int rc;
+ unsigned long flags;
+ struct cper_record_header *rcd_erange;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
+ return -EINVAL;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM) {
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ rc = __erst_write_to_nvram(record);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+
+ if (record->record_length > erst_erange.size)
+ return -EINVAL;
+
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ memcpy(erst_erange.vaddr, record, record->record_length);
+ rcd_erange = erst_erange.vaddr;
+ /* signature for serialization system */
+ memcpy(&rcd_erange->persistence_information, "ER", 2);
+
+ rc = __erst_write_to_storage(0);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_write);
+
+static int __erst_read_to_erange(u64 record_id, u64 *offset)
+{
+ int rc;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ return __erst_read_to_erange_from_nvram(
+ record_id, offset);
+
+ rc = __erst_read_from_storage(record_id, 0);
+ if (rc)
+ return rc;
+ *offset = 0;
+
+ return 0;
+}
+
+static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ int rc;
+ u64 offset, len = 0;
+ struct cper_record_header *rcd_tmp;
+
+ rc = __erst_read_to_erange(record_id, &offset);
+ if (rc)
+ return rc;
+ rcd_tmp = erst_erange.vaddr + offset;
+ len = rcd_tmp->record_length;
+ if (len <= buflen)
+ memcpy(record, rcd_tmp, len);
+
+ return len;
+}
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ ssize_t len;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read);
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value = 0, there is no more record to read,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
+{
+ int rc;
+ ssize_t len;
+ unsigned long flags;
+ u64 record_id;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(&record_id);
+ if (rc) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+ /* no more record */
+ if (record_id == APEI_ERST_INVALID_RECORD_ID) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return 0;
+ }
+
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read_next);
+
+int erst_clear(u64 record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ rc = __erst_clear_from_nvram(record_id);
+ else
+ rc = __erst_clear_from_storage(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_clear);
+
+static int __init setup_erst_disable(char *str)
+{
+ erst_disable = 1;
+ return 0;
+}
+
+__setup("erst_disable", setup_erst_disable);
+
+static int erst_check_table(struct acpi_table_erst *erst_tab)
+{
+ if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->header.length < sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->entries !=
+ (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
+ sizeof(struct acpi_erst_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init erst_init(void)
+{
+ int rc = 0;
+ acpi_status status;
+ struct apei_exec_context ctx;
+ struct apei_resources erst_resources;
+ struct resource *r;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (erst_disable) {
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_ERST, 0,
+ (struct acpi_table_header **)&erst_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_err(ERST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(ERST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = erst_check_table(erst_tab);
+ if (rc) {
+ pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
+ goto err;
+ }
+
+ apei_resources_init(&erst_resources);
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &erst_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&erst_resources, "APEI ERST");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ rc = erst_get_erange(&erst_erange);
+ if (rc) {
+ if (rc == -ENODEV)
+ pr_info(ERST_PFX
+ "The corresponding hardware device or firmware implementation "
+ "is not available.\n");
+ else
+ pr_err(ERST_PFX
+ "Failed to get Error Log Address Range.\n");
+ goto err_unmap_reg;
+ }
+
+ r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
+ if (!r) {
+ pr_err(ERST_PFX
+ "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
+ (unsigned long long)erst_erange.base,
+ (unsigned long long)erst_erange.base + erst_erange.size);
+ rc = -EIO;
+ goto err_unmap_reg;
+ }
+ rc = -ENOMEM;
+ erst_erange.vaddr = ioremap_cache(erst_erange.base,
+ erst_erange.size);
+ if (!erst_erange.vaddr)
+ goto err_release_erange;
+
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is initialized.\n");
+
+ return 0;
+
+err_release_erange:
+ release_mem_region(erst_erange.base, erst_erange.size);
+err_unmap_reg:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&erst_resources);
+err_fini:
+ apei_resources_fini(&erst_resources);
+err:
+ erst_disable = 1;
+ return rc;
+}
+
+device_initcall(erst_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
new file mode 100644
index 000000000000..92a702923607
--- /dev/null
+++ b/drivers/acpi/apei/ghes.c
@@ -0,0 +1,425 @@
+/*
+ * APEI Generic Hardware Error Source support
+ *
+ * Generic Hardware Error Source provides a way to report platform
+ * hardware errors (such as that from chipset). It works in so called
+ * "Firmware First" mode, that is, hardware errors are reported to
+ * firmware firstly, then reported to Linux by firmware. This way,
+ * some non-standard hardware error registers or non-standard hardware
+ * link can be checked by firmware to produce more hardware error
+ * information for Linux.
+ *
+ * For more information about Generic Hardware Error Source, please
+ * refer to ACPI Specification version 4.0, section 17.3.2.6
+ *
+ * Now, only SCI notification type and memory errors are
+ * supported. More notification type and hardware error type will be
+ * added later.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/cper.h>
+#include <linux/kdebug.h>
+#include <acpi/apei.h>
+#include <acpi/atomicio.h>
+#include <acpi/hed.h>
+#include <asm/mce.h>
+
+#include "apei-internal.h"
+
+#define GHES_PFX "GHES: "
+
+#define GHES_ESTATUS_MAX_SIZE 65536
+
+/*
+ * One struct ghes is created for each generic hardware error
+ * source.
+ *
+ * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
+ * handler. Handler for one generic hardware error source is only
+ * triggered after the previous one is done. So handler can uses
+ * struct ghes without locking.
+ *
+ * estatus: memory buffer for error status block, allocated during
+ * HEST parsing.
+ */
+#define GHES_TO_CLEAR 0x0001
+
+struct ghes {
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ struct list_head list;
+ u64 buffer_paddr;
+ unsigned long flags;
+};
+
+/*
+ * Error source lists, one list for each notification method. The
+ * members in lists are struct ghes.
+ *
+ * The list members are only added in HEST parsing and deleted during
+ * module_exit, that is, single-threaded. So no lock is needed for
+ * that.
+ *
+ * But the mutual exclusion is needed between members adding/deleting
+ * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
+ * used for that.
+ */
+static LIST_HEAD(ghes_sci);
+
+static struct ghes *ghes_new(struct acpi_hest_generic *generic)
+{
+ struct ghes *ghes;
+ unsigned int error_block_length;
+ int rc;
+
+ ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
+ if (!ghes)
+ return ERR_PTR(-ENOMEM);
+ ghes->generic = generic;
+ INIT_LIST_HEAD(&ghes->list);
+ rc = acpi_pre_map_gar(&generic->error_status_address);
+ if (rc)
+ goto err_free;
+ error_block_length = generic->error_block_length;
+ if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
+ pr_warning(FW_WARN GHES_PFX
+ "Error status block length is too long: %u for "
+ "generic hardware error source: %d.\n",
+ error_block_length, generic->header.source_id);
+ error_block_length = GHES_ESTATUS_MAX_SIZE;
+ }
+ ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
+ if (!ghes->estatus) {
+ rc = -ENOMEM;
+ goto err_unmap;
+ }
+
+ return ghes;
+
+err_unmap:
+ acpi_post_unmap_gar(&generic->error_status_address);
+err_free:
+ kfree(ghes);
+ return ERR_PTR(rc);
+}
+
+static void ghes_fini(struct ghes *ghes)
+{
+ kfree(ghes->estatus);
+ acpi_post_unmap_gar(&ghes->generic->error_status_address);
+}
+
+enum {
+ GHES_SER_NO = 0x0,
+ GHES_SER_CORRECTED = 0x1,
+ GHES_SER_RECOVERABLE = 0x2,
+ GHES_SER_PANIC = 0x3,
+};
+
+static inline int ghes_severity(int severity)
+{
+ switch (severity) {
+ case CPER_SER_INFORMATIONAL:
+ return GHES_SER_NO;
+ case CPER_SER_CORRECTED:
+ return GHES_SER_CORRECTED;
+ case CPER_SER_RECOVERABLE:
+ return GHES_SER_RECOVERABLE;
+ case CPER_SER_FATAL:
+ return GHES_SER_PANIC;
+ default:
+ /* Unkown, go panic */
+ return GHES_SER_PANIC;
+ }
+}
+
+/* SCI handler run in work queue, so ioremap can be used here */
+static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+ int from_phys)
+{
+ void *vaddr;
+
+ vaddr = ioremap_cache(paddr, len);
+ if (!vaddr)
+ return -ENOMEM;
+ if (from_phys)
+ memcpy(buffer, vaddr, len);
+ else
+ memcpy(vaddr, buffer, len);
+ iounmap(vaddr);
+
+ return 0;
+}
+
+static int ghes_read_estatus(struct ghes *ghes, int silent)
+{
+ struct acpi_hest_generic *g = ghes->generic;
+ u64 buf_paddr;
+ u32 len;
+ int rc;
+
+ rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+ if (rc) {
+ if (!silent && printk_ratelimit())
+ pr_warning(FW_WARN GHES_PFX
+"Failed to read error status block address for hardware error source: %d.\n",
+ g->header.source_id);
+ return -EIO;
+ }
+ if (!buf_paddr)
+ return -ENOENT;
+
+ rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+ sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (!ghes->estatus->block_status)
+ return -ENOENT;
+
+ ghes->buffer_paddr = buf_paddr;
+ ghes->flags |= GHES_TO_CLEAR;
+
+ rc = -EIO;
+ len = apei_estatus_len(ghes->estatus);
+ if (len < sizeof(*ghes->estatus))
+ goto err_read_block;
+ if (len > ghes->generic->error_block_length)
+ goto err_read_block;
+ if (apei_estatus_check_header(ghes->estatus))
+ goto err_read_block;
+ rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
+ buf_paddr + sizeof(*ghes->estatus),
+ len - sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (apei_estatus_check(ghes->estatus))
+ goto err_read_block;
+ rc = 0;
+
+err_read_block:
+ if (rc && !silent)
+ pr_warning(FW_WARN GHES_PFX
+ "Failed to read error status block!\n");
+ return rc;
+}
+
+static void ghes_clear_estatus(struct ghes *ghes)
+{
+ ghes->estatus->block_status = 0;
+ if (!(ghes->flags & GHES_TO_CLEAR))
+ return;
+ ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
+ sizeof(ghes->estatus->block_status), 0);
+ ghes->flags &= ~GHES_TO_CLEAR;
+}
+
+static void ghes_do_proc(struct ghes *ghes)
+{
+ int ser, processed = 0;
+ struct acpi_hest_generic_data *gdata;
+
+ ser = ghes_severity(ghes->estatus->error_severity);
+ apei_estatus_for_each_section(ghes->estatus, gdata) {
+ if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+ CPER_SEC_PLATFORM_MEM)) {
+ apei_mce_report_mem_error(
+ ser == GHES_SER_CORRECTED,
+ (struct cper_sec_mem_err *)(gdata+1));
+ processed = 1;
+ }
+ }
+
+ if (!processed && printk_ratelimit())
+ pr_warning(GHES_PFX
+ "Unknown error record from generic hardware error source: %d\n",
+ ghes->generic->header.source_id);
+}
+
+static int ghes_proc(struct ghes *ghes)
+{
+ int rc;
+
+ rc = ghes_read_estatus(ghes, 0);
+ if (rc)
+ goto out;
+ ghes_do_proc(ghes);
+
+out:
+ ghes_clear_estatus(ghes);
+ return 0;
+}
+
+static int ghes_notify_sci(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct ghes *ghes;
+ int ret = NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ghes, &ghes_sci, list) {
+ if (!ghes_proc(ghes))
+ ret = NOTIFY_OK;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct notifier_block ghes_notifier_sci = {
+ .notifier_call = ghes_notify_sci,
+};
+
+static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct acpi_hest_generic *generic;
+ struct ghes *ghes = NULL;
+ int rc = 0;
+
+ if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
+ return 0;
+
+ generic = (struct acpi_hest_generic *)hest_hdr;
+ if (!generic->enabled)
+ return 0;
+
+ if (generic->error_block_length <
+ sizeof(struct acpi_hest_generic_status)) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid error block length: %u for generic hardware error source: %d\n",
+ generic->error_block_length,
+ generic->header.source_id);
+ goto err;
+ }
+ if (generic->records_to_preallocate == 0) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid records to preallocate: %u for generic hardware error source: %d\n",
+ generic->records_to_preallocate,
+ generic->header.source_id);
+ goto err;
+ }
+ ghes = ghes_new(generic);
+ if (IS_ERR(ghes)) {
+ rc = PTR_ERR(ghes);
+ ghes = NULL;
+ goto err;
+ }
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via POLL is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ case ACPI_HEST_NOTIFY_LOCAL:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via IRQ is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_SCI:
+ if (list_empty(&ghes_sci))
+ register_acpi_hed_notifier(&ghes_notifier_sci);
+ list_add_rcu(&ghes->list, &ghes_sci);
+ break;
+ case ACPI_HEST_NOTIFY_NMI:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via NMI is not supported!\n",
+ generic->header.source_id);
+ break;
+ default:
+ pr_warning(FW_WARN GHES_PFX
+ "Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
+ break;
+ }
+
+ return 0;
+err:
+ if (ghes)
+ ghes_fini(ghes);
+ return rc;
+}
+
+static void ghes_cleanup(void)
+{
+ struct ghes *ghes, *nghes;
+
+ if (!list_empty(&ghes_sci))
+ unregister_acpi_hed_notifier(&ghes_notifier_sci);
+
+ synchronize_rcu();
+
+ list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
+ list_del(&ghes->list);
+ ghes_fini(ghes);
+ kfree(ghes);
+ }
+}
+
+static int __init ghes_init(void)
+{
+ int rc;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (hest_disable) {
+ pr_info(GHES_PFX "HEST is not enabled!\n");
+ return -EINVAL;
+ }
+
+ rc = apei_hest_parse(hest_ghes_parse, NULL);
+ if (rc) {
+ pr_err(GHES_PFX
+ "Error during parsing HEST generic hardware error sources.\n");
+ goto err_cleanup;
+ }
+
+ if (list_empty(&ghes_sci)) {
+ pr_info(GHES_PFX
+ "No functional generic hardware error sources.\n");
+ rc = -ENODEV;
+ goto err_cleanup;
+ }
+
+ pr_info(GHES_PFX
+ "Generic Hardware Error Source support is initialized.\n");
+
+ return 0;
+err_cleanup:
+ ghes_cleanup();
+ return rc;
+}
+
+static void __exit ghes_exit(void)
+{
+ ghes_cleanup();
+}
+
+module_init(ghes_init);
+module_exit(ghes_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 000000000000..e7f40d362cb3
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,173 @@
+/*
+ * APEI Hardware Error Souce Table support
+ *
+ * HEST describes error sources in detail; communicates operational
+ * parameters (i.e. severity levels, masking bits, and threshold
+ * values) to Linux as necessary. It also allows the BIOS to report
+ * non-standard error sources to Linux (for example, chipset-specific
+ * error registers).
+ *
+ * For more information about HEST, please refer to ACPI Specification
+ * version 4.0, section 17.3.2.
+ *
+ * Copyright 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/kdebug.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define HEST_PFX "HEST: "
+
+int hest_disable;
+EXPORT_SYMBOL_GPL(hest_disable);
+
+/* HEST table parsing */
+
+static struct acpi_table_hest *hest_tab;
+
+static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ return 0;
+}
+
+static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
+ [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
+ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
+ [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
+ [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
+ [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
+ [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
+ [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
+};
+
+static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
+{
+ u16 hest_type = hest_hdr->type;
+ int len;
+
+ if (hest_type >= ACPI_HEST_TYPE_RESERVED)
+ return 0;
+
+ len = hest_esrc_len_tab[hest_type];
+
+ if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
+ struct acpi_hest_ia_corrected *cmc;
+ cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
+ len = sizeof(*cmc) + cmc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
+ struct acpi_hest_ia_machine_check *mc;
+ mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
+ len = sizeof(*mc) + mc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ }
+ BUG_ON(len == -1);
+
+ return len;
+};
+
+int apei_hest_parse(apei_hest_func_t func, void *data)
+{
+ struct acpi_hest_header *hest_hdr;
+ int i, rc, len;
+
+ if (hest_disable)
+ return -EINVAL;
+
+ hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
+ for (i = 0; i < hest_tab->error_source_count; i++) {
+ len = hest_esrc_len(hest_hdr);
+ if (!len) {
+ pr_warning(FW_WARN HEST_PFX
+ "Unknown or unused hardware error source "
+ "type: %d for hardware error source: %d.\n",
+ hest_hdr->type, hest_hdr->source_id);
+ return -EINVAL;
+ }
+ if ((void *)hest_hdr + len >
+ (void *)hest_tab + hest_tab->header.length) {
+ pr_warning(FW_BUG HEST_PFX
+ "Table contents overflow for hardware error source: %d.\n",
+ hest_hdr->source_id);
+ return -EINVAL;
+ }
+
+ rc = func(hest_hdr, data);
+ if (rc)
+ return rc;
+
+ hest_hdr = (void *)hest_hdr + len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_hest_parse);
+
+static int __init setup_hest_disable(char *str)
+{
+ hest_disable = 1;
+ return 0;
+}
+
+__setup("hest_disable", setup_hest_disable);
+
+static int __init hest_init(void)
+{
+ acpi_status status;
+ int rc = -ENODEV;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (hest_disable) {
+ pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_HEST, 0,
+ (struct acpi_table_header **)&hest_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(HEST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(HEST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = apei_hest_parse(hest_void_parse, NULL);
+ if (rc)
+ goto err;
+
+ pr_info(HEST_PFX "HEST table parsing is initialized.\n");
+
+ return 0;
+err:
+ hest_disable = 1;
+ return rc;
+}
+
+subsys_initcall(hest_init);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 000000000000..8f8bd736d4ff
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,361 @@
+/*
+ * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
+ * accessing in atomic context.
+ *
+ * This is used for NMI handler to access IO memory area, because
+ * ioremap/iounmap can not be used in NMI handler. The IO memory area
+ * is pre-mapped in process context and accessed in NMI handler.
+ *
+ * Copyright (C) 2009-2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <acpi/atomicio.h>
+
+#define ACPI_PFX "ACPI: "
+
+static LIST_HEAD(acpi_iomaps);
+/*
+ * Used for mutual exclusion between writers of acpi_iomaps list, for
+ * synchronization between readers and writer, RCU is used.
+ */
+static DEFINE_SPINLOCK(acpi_iomaps_lock);
+
+struct acpi_iomap {
+ struct list_head list;
+ void __iomem *vaddr;
+ unsigned long size;
+ phys_addr_t paddr;
+ struct kref ref;
+};
+
+/* acpi_iomaps_lock or RCU read lock must be held before calling */
+static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ list_for_each_entry_rcu(map, &acpi_iomaps, list) {
+ if (map->paddr + map->size >= paddr + size &&
+ map->paddr <= paddr)
+ return map;
+ }
+ return NULL;
+}
+
+/*
+ * Atomic "ioremap" used by NMI handler, if the specified IO memory
+ * area is not pre-mapped, NULL will be returned.
+ *
+ * acpi_iomaps_lock or RCU read lock must be held before calling
+ */
+static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map)
+ return map->vaddr + (paddr - map->paddr);
+ else
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map) {
+ kref_get(&map->ref);
+ return map->vaddr + (paddr - map->paddr);
+ } else
+ return NULL;
+}
+
+/*
+ * Used to pre-map the specified IO memory area. First try to find
+ * whether the area is already pre-mapped, if it is, increase the
+ * reference count (in __acpi_try_ioremap) and return; otherwise, do
+ * the real ioremap, and add the mapping into acpi_iomaps list.
+ */
+static void __iomem *acpi_pre_map(phys_addr_t paddr,
+ unsigned long size)
+{
+ void __iomem *vaddr;
+ struct acpi_iomap *map;
+ unsigned long pg_sz, flags;
+ phys_addr_t pg_off;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ if (vaddr)
+ return vaddr;
+
+ pg_off = paddr & PAGE_MASK;
+ pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
+ vaddr = ioremap(pg_off, pg_sz);
+ if (!vaddr)
+ return NULL;
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto err_unmap;
+ INIT_LIST_HEAD(&map->list);
+ map->paddr = pg_off;
+ map->size = pg_sz;
+ map->vaddr = vaddr;
+ kref_init(&map->ref);
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ if (vaddr) {
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ iounmap(map->vaddr);
+ kfree(map);
+ return vaddr;
+ }
+ list_add_tail_rcu(&map->list, &acpi_iomaps);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ return vaddr + (paddr - pg_off);
+err_unmap:
+ iounmap(vaddr);
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __acpi_kref_del_iomap(struct kref *ref)
+{
+ struct acpi_iomap *map;
+
+ map = container_of(ref, struct acpi_iomap, ref);
+ list_del_rcu(&map->list);
+}
+
+/*
+ * Used to post-unmap the specified IO memory area. The iounmap is
+ * done only if the reference count goes zero.
+ */
+static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
+{
+ struct acpi_iomap *map;
+ unsigned long flags;
+ int del;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ map = __acpi_find_iomap(paddr, size);
+ BUG_ON(!map);
+ del = kref_put(&map->ref, __acpi_kref_del_iomap);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ if (!del)
+ return;
+
+ synchronize_rcu();
+ iounmap(map->vaddr);
+ kfree(map);
+}
+
+/* In NMI handler, should set silent = 1 */
+static int acpi_check_gar(struct acpi_generic_address *reg,
+ u64 *paddr, int silent)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Pre-map, working on GAR */
+int acpi_pre_map_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ void __iomem *vaddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
+ if (!vaddr)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
+
+/* Post-unmap, working on GAR */
+int acpi_post_unmap_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ acpi_post_unmap(paddr, reg->bit_width / 8);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
+
+/*
+ * Can be used in atomic (including NMI) or process context. RCU read
+ * lock can only be released after the IO memory area accessing.
+ */
+static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ *val = readb(addr);
+ break;
+ case 16:
+ *val = readw(addr);
+ break;
+ case 32:
+ *val = readl(addr);
+ break;
+ case 64:
+ *val = readq(addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ writeb(val, addr);
+ break;
+ case 16:
+ writew(val, addr);
+ break;
+ case 32:
+ writel(val, addr);
+ break;
+ case 64:
+ writeq(val, addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* GAR accessing in atomic (including NMI) or process context */
+int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ *val = 0;
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_read_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_read);
+
+int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_write_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_write_port(paddr, val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 743576bf1bd7..c0113e619f5f 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -69,6 +69,37 @@ static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
};
+static int set_copy_dsdt(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE "%s detected - "
+ "force copy of DSDT to local memory\n", id->ident);
+ acpi_gbl_copy_dsdt_locally = 1;
+ return 0;
+}
+
+static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+ /*
+ * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=14679
+ */
+ {
+ .callback = set_copy_dsdt,
+ .ident = "TOSHIBA Satellite A505",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
+ },
+ },
+ {
+ .callback = set_copy_dsdt,
+ .ident = "TOSHIBA Satellite L505D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+ },
+ }
+};
+
/* --------------------------------------------------------------------------
Device Management
-------------------------------------------------------------------------- */
@@ -813,6 +844,12 @@ void __init acpi_early_init(void)
acpi_gbl_permanent_mmap = 1;
+ /*
+ * If the machine falls into the DMI check table,
+ * DSDT will be copied to memory
+ */
+ dmi_check_system(dsdt_dmi_table);
+
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index 146135e7a6a1..295dbfa2db9c 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -96,7 +96,8 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
/* --------------------------------------------------------------------------
FS Interface (/sys)
-------------------------------------------------------------------------- */
-static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
+static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
+{
int result = 0;
int i;
@@ -118,7 +119,8 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
return result;
}
-static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
+static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
+{
int result = 0;
int i;
@@ -137,8 +139,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
return result;
}
-module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
-module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
+static struct kernel_param_ops acpi_debug_layer_ops = {
+ .set = param_set_uint,
+ .get = param_get_debug_layer,
+};
+
+static struct kernel_param_ops acpi_debug_level_ops = {
+ .set = param_set_uint,
+ .get = param_get_debug_level,
+};
+
+module_param_cb(debug_layer, &acpi_debug_layer_ops, &acpi_dbg_layer, 0644);
+module_param_cb(debug_level, &acpi_debug_level_ops, &acpi_dbg_level, 0644);
static char trace_method_name[6];
module_param_string(trace_method_name, trace_method_name, 6, 0644);
@@ -147,7 +159,7 @@ module_param(trace_debug_layer, uint, 0644);
static unsigned int trace_debug_level;
module_param(trace_debug_level, uint, 0644);
-static int param_set_trace_state(const char *val, struct kernel_param *kp)
+static int param_set_trace_state(const char *val, const struct kernel_param *kp)
{
int result = 0;
@@ -181,7 +193,7 @@ exit:
return result;
}
-static int param_get_trace_state(char *buffer, struct kernel_param *kp)
+static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
{
if (!acpi_gbl_trace_method_name)
return sprintf(buffer, "disable");
@@ -194,8 +206,12 @@ static int param_get_trace_state(char *buffer, struct kernel_param *kp)
return 0;
}
-module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
- NULL, 0644);
+static struct kernel_param_ops param_ops_trace_state = {
+ .set = param_set_trace_state,
+ .get = param_get_trace_state,
+};
+
+module_param_cb(trace_state, &param_ops_trace_state, NULL, 0644);
/* --------------------------------------------------------------------------
DebugFS Interface
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
new file mode 100644
index 000000000000..7c9fbb1f8541
--- /dev/null
+++ b/drivers/acpi/hed.c
@@ -0,0 +1,113 @@
+/*
+ * ACPI Hardware Error Device (PNP0C33) Driver
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * ACPI Hardware Error Device is used to report some hardware errors
+ * notified via SCI, mainly the corrected errors.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/hed.h>
+
+static struct acpi_device_id acpi_hed_ids[] = {
+ {"PNP0C33", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
+
+static acpi_handle hed_handle;
+
+static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
+
+int register_acpi_hed_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
+
+void unregister_acpi_hed_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
+
+/*
+ * SCI to report hardware error is forwarded to the listeners of HED,
+ * it is used by HEST Generic Hardware Error Source with notify type
+ * SCI.
+ */
+static void acpi_hed_notify(struct acpi_device *device, u32 event)
+{
+ blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
+ return;
+}
+
+static int __devinit acpi_hed_add(struct acpi_device *device)
+{
+ /* Only one hardware error device */
+ if (hed_handle)
+ return -EINVAL;
+ hed_handle = device->handle;
+ return 0;
+}
+
+static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
+{
+ hed_handle = NULL;
+ return 0;
+}
+
+static struct acpi_driver acpi_hed_driver = {
+ .name = "hardware_error_device",
+ .class = "hardware_error",
+ .ids = acpi_hed_ids,
+ .ops = {
+ .add = acpi_hed_add,
+ .remove = acpi_hed_remove,
+ .notify = acpi_hed_notify,
+ },
+};
+
+static int __init acpi_hed_init(void)
+{
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_hed_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_hed_driver);
+}
+
+module_init(acpi_hed_init);
+module_exit(acpi_hed_exit);
+
+ACPI_MODULE_NAME("hed");
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
deleted file mode 100644
index 1c527a192872..000000000000
--- a/drivers/acpi/hest.c
+++ /dev/null
@@ -1,139 +0,0 @@
-#include <linux/acpi.h>
-#include <linux/pci.h>
-
-#define PREFIX "ACPI: "
-
-static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
-{
- return (0 == pci_domain_nr(pci->bus) &&
- p->bus == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn));
-}
-
-static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
-{
- struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
- unsigned long rc=0;
- u8 pcie_type = 0;
- u8 bridge = 0;
- switch (type) {
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- rc = sizeof(struct acpi_hest_aer_root);
- pcie_type = PCI_EXP_TYPE_ROOT_PORT;
- break;
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- rc = sizeof(struct acpi_hest_aer);
- pcie_type = PCI_EXP_TYPE_ENDPOINT;
- break;
- case ACPI_HEST_TYPE_AER_BRIDGE:
- rc = sizeof(struct acpi_hest_aer_bridge);
- if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
- bridge = 1;
- break;
- }
-
- if (p->flags & ACPI_HEST_GLOBAL) {
- if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- }
- else
- if (hest_match_pci(p, pci))
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- return rc;
-}
-
-static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
-{
- struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
- void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
- struct acpi_hest_header *hdr = p;
-
- int i;
- int firmware_first = 0;
- static unsigned char printed_unused = 0;
- static unsigned char printed_reserved = 0;
-
- for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
- switch (hdr->type) {
- case ACPI_HEST_TYPE_IA32_CHECK:
- p += parse_acpi_hest_ia_machine_check(p);
- break;
- case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
- p += parse_acpi_hest_ia_corrected(p);
- break;
- case ACPI_HEST_TYPE_IA32_NMI:
- p += parse_acpi_hest_ia_nmi(p);
- break;
- /* These three should never appear */
- case ACPI_HEST_TYPE_NOT_USED3:
- case ACPI_HEST_TYPE_NOT_USED4:
- case ACPI_HEST_TYPE_NOT_USED5:
- if (!printed_unused) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
- printed_unused = 1;
- }
- break;
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- case ACPI_HEST_TYPE_AER_BRIDGE:
- p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
- break;
- case ACPI_HEST_TYPE_GENERIC_ERROR:
- p += parse_acpi_hest_generic(p);
- break;
- /* These should never appear either */
- case ACPI_HEST_TYPE_RESERVED:
- default:
- if (!printed_reserved) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
- printed_reserved = 1;
- }
- break;
- }
- }
- return firmware_first;
-}
-
-int acpi_hest_firmware_first_pci(struct pci_dev *pci)
-{
- acpi_status status = AE_NOT_FOUND;
- struct acpi_table_header *hest = NULL;
-
- if (acpi_disabled)
- return 0;
-
- status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
-
- if (ACPI_SUCCESS(status)) {
- if (acpi_hest_firmware_first(hest, pci)) {
- return 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7594f65800cf..78418ce4fc78 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1207,6 +1207,15 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
EXPORT_SYMBOL(acpi_check_mem_region);
/*
+ * Let drivers know whether the resource checks are effective
+ */
+int acpi_resources_are_enforced(void)
+{
+ return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
+}
+EXPORT_SYMBOL(acpi_resources_are_enforced);
+
+/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
@@ -1406,7 +1415,7 @@ acpi_os_invalidate_address(
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SytemMemory
+ /* Only interference checks against SystemIO and SystemMemory
are needed */
res.start = address;
res.end = address + length - 1;
@@ -1458,7 +1467,7 @@ acpi_os_validate_address (
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SytemMemory
+ /* Only interference checks against SystemIO and SystemMemory
are needed */
res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
if (!res)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b0a71ecee682..e4804fb05e23 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,11 +401,13 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* driver reported one, then use it. Exit in any case.
*/
if (gsi < 0) {
+ u32 dev_gsi;
dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
/* Interrupt Line values above 0xF are forbidden */
- if (dev->irq > 0 && (dev->irq <= 0xF)) {
- printk(" - using IRQ %d\n", dev->irq);
- acpi_register_gsi(&dev->dev, dev->irq,
+ if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
+ printk(" - using ISA IRQ %d\n", dev->irq);
+ acpi_register_gsi(&dev->dev, dev_gsi,
ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
return 0;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index aefce33f2a09..4eac59393edc 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
struct acpi_pci_root *root;
list_for_each_entry(root, &acpi_pci_roots, node)
- if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
+ if ((root->segment == (u16) seg) &&
+ (root->secondary.start == (u16) bus))
return root->device->handle;
return NULL;
}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
- int *busnr = data;
+ struct resource *res = data;
struct acpi_resource_address64 address;
if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
acpi_resource_to_address64(resource, &address);
if ((address.address_length > 0) &&
- (address.resource_type == ACPI_BUS_NUMBER_RANGE))
- *busnr = address.minimum;
+ (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
+ res->start = address.minimum;
+ res->end = address.minimum + address.address_length - 1;
+ }
return AE_OK;
}
static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
- unsigned long long *bus)
+ struct resource *res)
{
acpi_status status;
- int busnum;
- busnum = -1;
+ res->start = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
- get_root_bridge_busnr_callback, &busnum);
+ get_root_bridge_busnr_callback, res);
if (ACPI_FAILURE(status))
return status;
- /* Check if we really get a bus number from _CRS */
- if (busnum == -1)
+ if (res->start == -1)
return AE_ERROR;
- *bus = busnum;
return AE_OK;
}
@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
struct acpi_device *child;
u32 flags, base_flags;
+ root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
+ if (!root)
+ return -ENOMEM;
+
segment = 0;
status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
- return -ENODEV;
+ result = -ENODEV;
+ goto end;
}
/* Check _CRS first, then _BBN. If no _BBN, default to zero. */
- bus = 0;
- status = try_get_root_bridge_busnr(device->handle, &bus);
+ root->secondary.flags = IORESOURCE_BUS;
+ status = try_get_root_bridge_busnr(device->handle, &root->secondary);
if (ACPI_FAILURE(status)) {
+ /*
+ * We need both the start and end of the downstream bus range
+ * to interpret _CBA (MMCONFIG base address), so it really is
+ * supposed to be in _CRS. If we don't find it there, all we
+ * can do is assume [_BBN-0xFF] or [0-0xFF].
+ */
+ root->secondary.end = 0xFF;
+ printk(KERN_WARNING FW_BUG PREFIX
+ "no secondary bus range in _CRS\n");
status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_ERR PREFIX
- "no bus number in _CRS and can't evaluate _BBN\n");
- return -ENODEV;
+ if (ACPI_SUCCESS(status))
+ root->secondary.start = bus;
+ else if (status == AE_NOT_FOUND)
+ root->secondary.start = 0;
+ else {
+ printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
+ result = -ENODEV;
+ goto end;
}
}
- root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
- if (!root)
- return -ENOMEM;
-
INIT_LIST_HEAD(&root->node);
root->device = device;
root->segment = segment & 0xFFFF;
- root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
/* TBD: Locking */
list_add_tail(&root->node, &acpi_pci_roots);
- printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
+ printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
- root->segment, root->bus_nr);
+ root->segment, &root->secondary);
/*
* Scan the Root Bridge
@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
- root->bus = pci_acpi_scan_root(device, segment, bus);
+ root->bus = pci_acpi_scan_root(root);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
- root->segment, root->bus_nr);
+ root->segment, (unsigned int)root->secondary.start);
result = -ENODEV;
goto end;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index ddc76787b842..f74d3b31e5c9 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -172,7 +172,6 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
- /* */
for (i = 0; i < list->count; i++) {
/*
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5939e7f7d8e9..c3817e1f32c7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -698,7 +698,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
"max_cstate: C%d\n"
"maximum allowed latency: %d usec\n",
pr->power.state ? pr->power.state - pr->power.states : 0,
- max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
+ max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));
seq_puts(seq, "states:\n");
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0338f513a010..7f2e051ed4f1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -765,7 +765,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
}
status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
- ACPI_NOT_ISR, &event_status);
+ &event_status);
if (status == AE_OK)
device->wakeup.flags.run_wake =
!!(event_status & ACPI_EVENT_FLAG_HANDLE);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e2e992599e68..da9da4c51d91 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -227,6 +227,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
static int acpi_suspend_enter(suspend_state_t pm_state)
{
acpi_status status = AE_OK;
+ acpi_status enable_status = AE_OK;
unsigned long flags = 0;
u32 acpi_state = acpi_target_sleep_state;
@@ -254,10 +255,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
}
/* If ACPI is not enabled by the BIOS, we need to enable it here. */
- if (set_sci_en_on_resume)
+ if (!set_sci_en_on_resume)
+ enable_status = acpi_enable();
+
+ if (set_sci_en_on_resume || enable_status == AE_NO_HARDWARE_RESPONSE)
+ /* If we're still in legacy mode then we have a problem. The
+ * spec tells us that this bit is under hardware control, but
+ * there's no plausible way that the OS can transition back to
+ * legacy mode so our choices here are to either ignore the
+ * spec or crash and burn horribly. The latter doesn't seem
+ * like it's ever going to be the preferable choice, so let's
+ * live dangerously.
+ */
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- else
- acpi_enable();
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
@@ -346,12 +356,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
return 0;
}
-static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
-{
- set_sci_en_on_resume = true;
- return 0;
-}
-
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@@ -370,22 +374,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacBook 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacMini 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
.matches = {
@@ -394,182 +382,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Toshiba Satellite L300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP G7000 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv4",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv7",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad W510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad W510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad W510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
@@ -578,30 +390,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1558",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1557",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1555",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
- },
- },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 4aaf24976138..e35525b39f6b 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -303,8 +303,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
"Invalid GPE 0x%x\n", index));
goto end;
}
- result = acpi_get_gpe_status(*handle, index,
- ACPI_NOT_ISR, status);
+ result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
@@ -395,7 +394,7 @@ static ssize_t counter_set(struct kobject *kobj,
result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
- result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
+ result = acpi_clear_gpe(handle, index);
else
all_counters[index].count = strtoul(buf, NULL, 0);
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a0c93b321482..9865d46f49a8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -45,6 +45,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/suspend.h>
+#include <acpi/video.h>
#define PREFIX "ACPI: "
@@ -65,11 +66,6 @@
#define MAX_NAME_LEN 20
-#define ACPI_VIDEO_DISPLAY_CRT 1
-#define ACPI_VIDEO_DISPLAY_TV 2
-#define ACPI_VIDEO_DISPLAY_DVI 3
-#define ACPI_VIDEO_DISPLAY_LCD 4
-
#define _COMPONENT ACPI_VIDEO_COMPONENT
ACPI_MODULE_NAME("video");
@@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
result = acpi_video_init_brightness(device);
if (result)
return;
- name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
+ count++;
- sprintf(name, "acpi_video%d", count++);
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = device->brightness->count - 3;
device->backlight = backlight_device_register(name, NULL, device,
@@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (device->cap._DCS && device->cap._DSS) {
static int count;
char *name;
- name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
- sprintf(name, "acpi_video%d", count++);
+ count++;
device->output_dev = video_output_register(name,
NULL, device, &acpi_output_properties);
kfree(name);
@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id
}
static int
+acpi_video_get_device_type(struct acpi_video_bus *video,
+ unsigned long device_id)
+{
+ struct acpi_video_enumerated_device *ids;
+ int i;
+
+ for (i = 0; i < video->attached_count; i++) {
+ ids = &video->attached_array[i];
+ if ((ids->value.int_val & 0xffff) == device_id)
+ return ids->value.int_val;
+ }
+
+ return 0;
+}
+
+static int
acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
unsigned long long device_id;
- int status;
+ int status, device_type;
struct acpi_video_device *data;
struct acpi_video_device_attrib* attribute;
@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
}
if(attribute->bios_can_detect)
data->flags.bios = 1;
- } else
- data->flags.unknown = 1;
+ } else {
+ /* Check for legacy IDs */
+ device_type = acpi_video_get_device_type(video,
+ device_id);
+ /* Ignore bits 16 and 18-20 */
+ switch (device_type & 0xffe2ffff) {
+ case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
+ data->flags.crt = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
+ data->flags.lcd = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_TV:
+ data->flags.tvout = 1;
+ break;
+ default:
+ data->flags.unknown = 1;
+ }
+ }
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
@@ -2032,6 +2061,71 @@ out:
return result;
}
+int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
+ void **edid)
+{
+ struct acpi_video_bus *video;
+ struct acpi_video_device *video_device;
+ union acpi_object *buffer = NULL;
+ acpi_status status;
+ int i, length;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ video = acpi_driver_data(device);
+
+ for (i = 0; i < video->attached_count; i++) {
+ video_device = video->attached_array[i].bind_info;
+ length = 256;
+
+ if (!video_device)
+ continue;
+
+ if (type) {
+ switch (type) {
+ case ACPI_VIDEO_DISPLAY_CRT:
+ if (!video_device->flags.crt)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_TV:
+ if (!video_device->flags.tvout)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_DVI:
+ if (!video_device->flags.dvi)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_LCD:
+ if (!video_device->flags.lcd)
+ continue;
+ break;
+ }
+ } else if (video_device->device_id != device_id) {
+ continue;
+ }
+
+ status = acpi_video_device_EDID(video_device, &buffer, length);
+
+ if (ACPI_FAILURE(status) || !buffer ||
+ buffer->type != ACPI_TYPE_BUFFER) {
+ length = 128;
+ status = acpi_video_device_EDID(video_device, &buffer,
+ length);
+ if (ACPI_FAILURE(status) || !buffer ||
+ buffer->type != ACPI_TYPE_BUFFER) {
+ continue;
+ }
+ }
+
+ *edid = buffer->buffer.pointer;
+ return length;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(acpi_video_get_edid);
+
static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 01c52c415bdc..cbadb9fa1277 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -65,6 +65,14 @@ config SATA_AHCI
If unsure, say N.
+config SATA_AHCI_PLATFORM
+ tristate "Platform AHCI SATA support"
+ help
+ This option enables support for Platform AHCI Serial ATA
+ controllers.
+
+ If unsure, say N.
+
config SATA_SIL24
tristate "Silicon Image 3124/3132 SATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index fc936d4471d6..d0a93c4ad3ec 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,7 +1,8 @@
obj-$(CONFIG_ATA) += libata.o
-obj-$(CONFIG_SATA_AHCI) += ahci.o
+obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
obj-$(CONFIG_SATA_SVW) += sata_svw.o
obj-$(CONFIG_ATA_PIIX) += ata_piix.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5326af28a410..c44d11237db4 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,403 +46,48 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
+#include "ahci.h"
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
-/* Enclosure Management Control */
-#define EM_CTRL_MSG_TYPE 0x000f0000
-
-/* Enclosure Management LED Message Type */
-#define EM_MSG_LED_HBA_PORT 0x0000000f
-#define EM_MSG_LED_PMP_SLOT 0x0000ff00
-#define EM_MSG_LED_VALUE 0xffff0000
-#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
-#define EM_MSG_LED_VALUE_OFF 0xfff80000
-#define EM_MSG_LED_VALUE_ON 0x00010000
-
-static int ahci_skip_host_reset;
-static int ahci_ignore_sss;
-
-module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
-MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
-
-module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
-MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
-
-static int ahci_enable_alpm(struct ata_port *ap,
- enum link_pm policy);
-static void ahci_disable_alpm(struct ata_port *ap);
-static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
-static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
- size_t size);
-static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
- ssize_t size);
-
enum {
AHCI_PCI_BAR = 5,
- AHCI_MAX_PORTS = 32,
- AHCI_MAX_SG = 168, /* hardware max is 64K */
- AHCI_DMA_BOUNDARY = 0xffffffff,
- AHCI_MAX_CMDS = 32,
- AHCI_CMD_SZ = 32,
- AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
- AHCI_RX_FIS_SZ = 256,
- AHCI_CMD_TBL_CDB = 0x40,
- AHCI_CMD_TBL_HDR_SZ = 0x80,
- AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
- AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
- AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
- AHCI_RX_FIS_SZ,
- AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
- AHCI_CMD_TBL_AR_SZ +
- (AHCI_RX_FIS_SZ * 16),
- AHCI_IRQ_ON_SG = (1 << 31),
- AHCI_CMD_ATAPI = (1 << 5),
- AHCI_CMD_WRITE = (1 << 6),
- AHCI_CMD_PREFETCH = (1 << 7),
- AHCI_CMD_RESET = (1 << 8),
- AHCI_CMD_CLR_BUSY = (1 << 10),
-
- RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
- RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
- RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
-
- board_ahci = 0,
- board_ahci_vt8251 = 1,
- board_ahci_ign_iferr = 2,
- board_ahci_sb600 = 3,
- board_ahci_mv = 4,
- board_ahci_sb700 = 5, /* for SB700 and SB800 */
- board_ahci_mcp65 = 6,
- board_ahci_nopmp = 7,
- board_ahci_yesncq = 8,
- board_ahci_nosntf = 9,
-
- /* global controller registers */
- HOST_CAP = 0x00, /* host capabilities */
- HOST_CTL = 0x04, /* global host control */
- HOST_IRQ_STAT = 0x08, /* interrupt status */
- HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
- HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
- HOST_EM_LOC = 0x1c, /* Enclosure Management location */
- HOST_EM_CTL = 0x20, /* Enclosure Management Control */
- HOST_CAP2 = 0x24, /* host capabilities, extended */
-
- /* HOST_CTL bits */
- HOST_RESET = (1 << 0), /* reset controller; self-clear */
- HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
- HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
-
- /* HOST_CAP bits */
- HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
- HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
- HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
- HOST_CAP_PART = (1 << 13), /* Partial state capable */
- HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
- HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
- HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
- HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
- HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
- HOST_CAP_CLO = (1 << 24), /* Command List Override support */
- HOST_CAP_LED = (1 << 25), /* Supports activity LED */
- HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
- HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
- HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
- HOST_CAP_SNTF = (1 << 29), /* SNotification register */
- HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
- HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
-
- /* HOST_CAP2 bits */
- HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
- HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
- HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
-
- /* registers for each SATA port */
- PORT_LST_ADDR = 0x00, /* command list DMA addr */
- PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
- PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
- PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
- PORT_IRQ_STAT = 0x10, /* interrupt status */
- PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
- PORT_CMD = 0x18, /* port command */
- PORT_TFDATA = 0x20, /* taskfile data */
- PORT_SIG = 0x24, /* device TF signature */
- PORT_CMD_ISSUE = 0x38, /* command issue */
- PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
- PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
- PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
- PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
- PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
- PORT_FBS = 0x40, /* FIS-based Switching */
-
- /* PORT_IRQ_{STAT,MASK} bits */
- PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
- PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
- PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
- PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
- PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
- PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
- PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
- PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
-
- PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
- PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
- PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
- PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
- PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
- PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
- PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
- PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
- PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
-
- PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
- PORT_IRQ_IF_ERR |
- PORT_IRQ_CONNECT |
- PORT_IRQ_PHYRDY |
- PORT_IRQ_UNK_FIS |
- PORT_IRQ_BAD_PMP,
- PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
- PORT_IRQ_TF_ERR |
- PORT_IRQ_HBUS_DATA_ERR,
- DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
- PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
- PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
-
- /* PORT_CMD bits */
- PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
- PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
- PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
- PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
- PORT_CMD_PMP = (1 << 17), /* PMP attached */
- PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
- PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
- PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
- PORT_CMD_CLO = (1 << 3), /* Command list override */
- PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
- PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
- PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
-
- PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
- PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
- PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
- PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
-
- PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
- PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
- PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
- PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
- PORT_FBS_SDE = (1 << 2), /* FBS single device error */
- PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
- PORT_FBS_EN = (1 << 0), /* Enable FBS */
-
- /* hpriv->flags bits */
- AHCI_HFLAG_NO_NCQ = (1 << 0),
- AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
- AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
- AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
- AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
- AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
- AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
- AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
- AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
- AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
- AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
- AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
- link offline */
- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
-
- /* ap->flags bits */
-
- AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
- ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
- ATA_FLAG_IPM,
-
- ICH_MAP = 0x90, /* ICH MAP register */
-
- /* em constants */
- EM_MAX_SLOTS = 8,
- EM_MAX_RETRY = 5,
-
- /* em_ctl bits */
- EM_CTL_RST = (1 << 9), /* Reset */
- EM_CTL_TM = (1 << 8), /* Transmit Message */
- EM_CTL_ALHD = (1 << 26), /* Activity LED */
-};
-
-struct ahci_cmd_hdr {
- __le32 opts;
- __le32 status;
- __le32 tbl_addr;
- __le32 tbl_addr_hi;
- __le32 reserved[4];
-};
-
-struct ahci_sg {
- __le32 addr;
- __le32 addr_hi;
- __le32 reserved;
- __le32 flags_size;
-};
-
-struct ahci_em_priv {
- enum sw_activity blink_policy;
- struct timer_list timer;
- unsigned long saved_activity;
- unsigned long activity;
- unsigned long led_state;
-};
-
-struct ahci_host_priv {
- unsigned int flags; /* AHCI_HFLAG_* */
- u32 cap; /* cap to use */
- u32 cap2; /* cap2 to use */
- u32 port_map; /* port map to use */
- u32 saved_cap; /* saved initial cap */
- u32 saved_cap2; /* saved initial cap2 */
- u32 saved_port_map; /* saved initial port_map */
- u32 em_loc; /* enclosure management location */
};
-struct ahci_port_priv {
- struct ata_link *active_link;
- struct ahci_cmd_hdr *cmd_slot;
- dma_addr_t cmd_slot_dma;
- void *cmd_tbl;
- dma_addr_t cmd_tbl_dma;
- void *rx_fis;
- dma_addr_t rx_fis_dma;
- /* for NCQ spurious interrupt analysis */
- unsigned int ncq_saw_d2h:1;
- unsigned int ncq_saw_dmas:1;
- unsigned int ncq_saw_sdb:1;
- u32 intr_mask; /* interrupts to enable */
- bool fbs_supported; /* set iff FBS is supported */
- bool fbs_enabled; /* set iff FBS is enabled */
- int fbs_last_dev; /* save FBS.DEV of last FIS */
- /* enclosure management info per PM slot */
- struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+enum board_ids {
+ /* board IDs by feature in alphabetical order */
+ board_ahci,
+ board_ahci_ign_iferr,
+ board_ahci_nosntf,
+
+ /* board IDs for specific chipsets in alphabetical order */
+ board_ahci_mcp65,
+ board_ahci_mcp77,
+ board_ahci_mcp89,
+ board_ahci_mv,
+ board_ahci_sb600,
+ board_ahci_sb700, /* for SB700 and SB800 */
+ board_ahci_vt8251,
+
+ /* aliases */
+ board_ahci_mcp_linux = board_ahci_mcp65,
+ board_ahci_mcp67 = board_ahci_mcp65,
+ board_ahci_mcp73 = board_ahci_mcp65,
+ board_ahci_mcp79 = board_ahci_mcp77,
};
-static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
-static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
-static int ahci_port_start(struct ata_port *ap);
-static void ahci_port_stop(struct ata_port *ap);
-static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
-static void ahci_qc_prep(struct ata_queued_cmd *qc);
-static void ahci_freeze(struct ata_port *ap);
-static void ahci_thaw(struct ata_port *ap);
-static void ahci_enable_fbs(struct ata_port *ap);
-static void ahci_disable_fbs(struct ata_port *ap);
-static void ahci_pmp_attach(struct ata_port *ap);
-static void ahci_pmp_detach(struct ata_port *ap);
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
-static int ahci_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
-static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
-static int ahci_port_resume(struct ata_port *ap);
-static void ahci_dev_config(struct ata_device *dev);
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts);
#ifdef CONFIG_PM
-static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
-static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
-static ssize_t ahci_activity_store(struct ata_device *dev,
- enum sw_activity val);
-static void ahci_init_sw_activity(struct ata_link *link);
-
-static ssize_t ahci_show_host_caps(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_host_cap2(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_host_version(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_port_cmd(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
-static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
-static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
-static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
-
-static struct device_attribute *ahci_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
- &dev_attr_em_message_type,
- &dev_attr_em_message,
- &dev_attr_ahci_host_caps,
- &dev_attr_ahci_host_cap2,
- &dev_attr_ahci_host_version,
- &dev_attr_ahci_port_cmd,
- NULL
-};
-
-static struct device_attribute *ahci_sdev_attrs[] = {
- &dev_attr_sw_activity,
- &dev_attr_unload_heads,
- NULL
-};
-
-static struct scsi_host_template ahci_sht = {
- ATA_NCQ_SHT(DRV_NAME),
- .can_queue = AHCI_MAX_CMDS - 1,
- .sg_tablesize = AHCI_MAX_SG,
- .dma_boundary = AHCI_DMA_BOUNDARY,
- .shost_attrs = ahci_shost_attrs,
- .sdev_attrs = ahci_sdev_attrs,
-};
-
-static struct ata_port_operations ahci_ops = {
- .inherits = &sata_pmp_port_ops,
-
- .qc_defer = ahci_pmp_qc_defer,
- .qc_prep = ahci_qc_prep,
- .qc_issue = ahci_qc_issue,
- .qc_fill_rtf = ahci_qc_fill_rtf,
-
- .freeze = ahci_freeze,
- .thaw = ahci_thaw,
- .softreset = ahci_softreset,
- .hardreset = ahci_hardreset,
- .postreset = ahci_postreset,
- .pmp_softreset = ahci_softreset,
- .error_handler = ahci_error_handler,
- .post_internal_cmd = ahci_post_internal_cmd,
- .dev_config = ahci_dev_config,
-
- .scr_read = ahci_scr_read,
- .scr_write = ahci_scr_write,
- .pmp_attach = ahci_pmp_attach,
- .pmp_detach = ahci_pmp_detach,
-
- .enable_pm = ahci_enable_alpm,
- .disable_pm = ahci_disable_alpm,
- .em_show = ahci_led_show,
- .em_store = ahci_led_store,
- .sw_activity_show = ahci_activity_show,
- .sw_activity_store = ahci_activity_store,
-#ifdef CONFIG_PM
- .port_suspend = ahci_port_suspend,
- .port_resume = ahci_port_resume,
-#endif
- .port_start = ahci_port_start,
- .port_stop = ahci_port_stop,
-};
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
@@ -463,6 +108,7 @@ static struct ata_port_operations ahci_sb600_ops = {
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
+ /* by features */
[board_ahci] =
{
.flags = AHCI_FLAG_COMMON,
@@ -470,81 +116,83 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_vt8251] =
+ [board_ahci_ign_iferr] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_vt8251_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_ign_iferr] =
+ [board_ahci_nosntf] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_sb600] =
+ /* by chipsets */
+ [board_ahci_mcp65] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
- AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
- AHCI_HFLAG_32BIT_ONLY),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
+ AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_mv] =
+ [board_ahci_mcp77] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
- AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
+ .flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_sb700] = /* for SB700 and SB800 */
+ [board_ahci_mcp89] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_mcp65] =
+ [board_ahci_mv] =
{
- AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
- .flags = AHCI_FLAG_COMMON,
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nopmp] =
+ [board_ahci_sb600] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
+ AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
+ AHCI_HFLAG_32BIT_ONLY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_sb600_ops,
},
- [board_ahci_yesncq] =
+ [board_ahci_sb700] = /* for SB700 and SB800 */
{
- AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_sb600_ops,
},
- [board_ahci_nosntf] =
+ [board_ahci_vt8251] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_vt8251_ops,
},
};
@@ -629,82 +277,82 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
- { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
/* SiS */
{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
@@ -737,12 +385,6 @@ static struct pci_driver ahci_pci_driver = {
#endif
};
-static int ahci_em_messages = 1;
-module_param(ahci_em_messages, int, 0444);
-/* add other LED protocol types when they become supported */
-MODULE_PARM_DESC(ahci_em_messages,
- "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
-
#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
static int marvell_enable;
#else
@@ -752,166 +394,15 @@ module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
-static inline int ahci_nr_ports(u32 cap)
-{
- return (cap & 0x1f) + 1;
-}
-
-static inline void __iomem *__ahci_port_base(struct ata_host *host,
- unsigned int port_no)
-{
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-
- return mmio + 0x100 + (port_no * 0x80);
-}
-
-static inline void __iomem *ahci_port_base(struct ata_port *ap)
-{
- return __ahci_port_base(ap->host, ap->port_no);
-}
-
-static void ahci_enable_ahci(void __iomem *mmio)
-{
- int i;
- u32 tmp;
-
- /* turn on AHCI_EN */
- tmp = readl(mmio + HOST_CTL);
- if (tmp & HOST_AHCI_EN)
- return;
-
- /* Some controllers need AHCI_EN to be written multiple times.
- * Try a few times before giving up.
- */
- for (i = 0; i < 5; i++) {
- tmp |= HOST_AHCI_EN;
- writel(tmp, mmio + HOST_CTL);
- tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
- if (tmp & HOST_AHCI_EN)
- return;
- msleep(10);
- }
-
- WARN_ON(1);
-}
-
-static ssize_t ahci_show_host_caps(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- return sprintf(buf, "%x\n", hpriv->cap);
-}
-
-static ssize_t ahci_show_host_cap2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- return sprintf(buf, "%x\n", hpriv->cap2);
-}
-
-static ssize_t ahci_show_host_version(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
-
- return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
-}
-
-static ssize_t ahci_show_port_cmd(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- void __iomem *port_mmio = ahci_port_base(ap);
-
- return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
-}
-
-/**
- * ahci_save_initial_config - Save and fixup initial config values
- * @pdev: target PCI device
- * @hpriv: host private area to store config values
- *
- * Some registers containing configuration info might be setup by
- * BIOS and might be cleared on reset. This function saves the
- * initial values of those registers into @hpriv such that they
- * can be restored after controller reset.
- *
- * If inconsistent, config values are fixed up by this function.
- *
- * LOCKING:
- * None.
- */
-static void ahci_save_initial_config(struct pci_dev *pdev,
- struct ahci_host_priv *hpriv)
+static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
{
- void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
- u32 cap, cap2, vers, port_map;
- int i;
- int mv;
-
- /* make sure AHCI mode is enabled before accessing CAP */
- ahci_enable_ahci(mmio);
+ unsigned int force_port_map = 0;
+ unsigned int mask_port_map = 0;
- /* Values prefixed with saved_ are written back to host after
- * reset. Values without are used for driver operation.
- */
- hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
- hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
-
- /* CAP2 register is only defined for AHCI 1.2 and later */
- vers = readl(mmio + HOST_VERSION);
- if ((vers >> 16) > 1 ||
- ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
- hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
- else
- hpriv->saved_cap2 = cap2 = 0;
-
- /* some chips have errata preventing 64bit use */
- if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do 64bit DMA, forcing 32bit\n");
- cap &= ~HOST_CAP_64;
- }
-
- if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do NCQ, turning off CAP_NCQ\n");
- cap &= ~HOST_CAP_NCQ;
- }
-
- if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can do NCQ, turning on CAP_NCQ\n");
- cap |= HOST_CAP_NCQ;
- }
-
- if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do PMP, turning off CAP_PMP\n");
- cap &= ~HOST_CAP_PMP;
- }
-
- if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do SNTF, turning off CAP_SNTF\n");
- cap &= ~HOST_CAP_SNTF;
- }
-
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
- port_map != 1) {
- dev_printk(KERN_INFO, &pdev->dev,
- "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
- port_map, 1);
- port_map = 1;
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+ dev_info(&pdev->dev, "JMB361 has only one port\n");
+ force_port_map = 1;
}
/*
@@ -921,469 +412,25 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
*/
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
- mv = 0x3;
+ mask_port_map = 0x3;
else
- mv = 0xf;
- dev_printk(KERN_ERR, &pdev->dev,
- "MV_AHCI HACK: port_map %x -> %x\n",
- port_map,
- port_map & mv);
- dev_printk(KERN_ERR, &pdev->dev,
+ mask_port_map = 0xf;
+ dev_info(&pdev->dev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
-
- port_map &= mv;
}
- /* cross check port_map and cap.n_ports */
- if (port_map) {
- int map_ports = 0;
-
- for (i = 0; i < AHCI_MAX_PORTS; i++)
- if (port_map & (1 << i))
- map_ports++;
-
- /* If PI has more ports than n_ports, whine, clear
- * port_map and let it be generated from n_ports.
- */
- if (map_ports > ahci_nr_ports(cap)) {
- dev_printk(KERN_WARNING, &pdev->dev,
- "implemented port map (0x%x) contains more "
- "ports than nr_ports (%u), using nr_ports\n",
- port_map, ahci_nr_ports(cap));
- port_map = 0;
- }
- }
-
- /* fabricate port_map from cap.nr_ports */
- if (!port_map) {
- port_map = (1 << ahci_nr_ports(cap)) - 1;
- dev_printk(KERN_WARNING, &pdev->dev,
- "forcing PORTS_IMPL to 0x%x\n", port_map);
-
- /* write the fixed up value to the PI register */
- hpriv->saved_port_map = port_map;
- }
-
- /* record values to use during operation */
- hpriv->cap = cap;
- hpriv->cap2 = cap2;
- hpriv->port_map = port_map;
-}
-
-/**
- * ahci_restore_initial_config - Restore initial config
- * @host: target ATA host
- *
- * Restore initial config stored by ahci_save_initial_config().
- *
- * LOCKING:
- * None.
- */
-static void ahci_restore_initial_config(struct ata_host *host)
-{
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-
- writel(hpriv->saved_cap, mmio + HOST_CAP);
- if (hpriv->saved_cap2)
- writel(hpriv->saved_cap2, mmio + HOST_CAP2);
- writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
- (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
-}
-
-static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
-{
- static const int offset[] = {
- [SCR_STATUS] = PORT_SCR_STAT,
- [SCR_CONTROL] = PORT_SCR_CTL,
- [SCR_ERROR] = PORT_SCR_ERR,
- [SCR_ACTIVE] = PORT_SCR_ACT,
- [SCR_NOTIFICATION] = PORT_SCR_NTF,
- };
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- if (sc_reg < ARRAY_SIZE(offset) &&
- (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
- return offset[sc_reg];
- return 0;
-}
-
-static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- int offset = ahci_scr_offset(link->ap, sc_reg);
-
- if (offset) {
- *val = readl(port_mmio + offset);
- return 0;
- }
- return -EINVAL;
-}
-
-static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- int offset = ahci_scr_offset(link->ap, sc_reg);
-
- if (offset) {
- writel(val, port_mmio + offset);
- return 0;
- }
- return -EINVAL;
-}
-
-static void ahci_start_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- /* start DMA */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_START;
- writel(tmp, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* flush */
-}
-
-static int ahci_stop_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- tmp = readl(port_mmio + PORT_CMD);
-
- /* check if the HBA is idle */
- if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
- return 0;
-
- /* setting HBA to idle */
- tmp &= ~PORT_CMD_START;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* wait for engine to stop. This could be as long as 500 msec */
- tmp = ata_wait_register(port_mmio + PORT_CMD,
- PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
- if (tmp & PORT_CMD_LIST_ON)
- return -EIO;
-
- return 0;
-}
-
-static void ahci_start_fis_rx(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- u32 tmp;
-
- /* set FIS registers */
- if (hpriv->cap & HOST_CAP_64)
- writel((pp->cmd_slot_dma >> 16) >> 16,
- port_mmio + PORT_LST_ADDR_HI);
- writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
-
- if (hpriv->cap & HOST_CAP_64)
- writel((pp->rx_fis_dma >> 16) >> 16,
- port_mmio + PORT_FIS_ADDR_HI);
- writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
-
- /* enable FIS reception */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_FIS_RX;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* flush */
- readl(port_mmio + PORT_CMD);
+ ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
+ mask_port_map);
}
-static int ahci_stop_fis_rx(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- /* disable FIS reception */
- tmp = readl(port_mmio + PORT_CMD);
- tmp &= ~PORT_CMD_FIS_RX;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* wait for completion, spec says 500ms, give it 1000 */
- tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
- PORT_CMD_FIS_ON, 10, 1000);
- if (tmp & PORT_CMD_FIS_ON)
- return -EBUSY;
-
- return 0;
-}
-
-static void ahci_power_up(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
-
- cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
-
- /* spin up device */
- if (hpriv->cap & HOST_CAP_SSS) {
- cmd |= PORT_CMD_SPIN_UP;
- writel(cmd, port_mmio + PORT_CMD);
- }
-
- /* wake up link */
- writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
-}
-
-static void ahci_disable_alpm(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
- struct ahci_port_priv *pp = ap->private_data;
-
- /* IPM bits should be disabled by libata-core */
- /* get the existing command bits */
- cmd = readl(port_mmio + PORT_CMD);
-
- /* disable ALPM and ASP */
- cmd &= ~PORT_CMD_ASP;
- cmd &= ~PORT_CMD_ALPE;
-
- /* force the interface back to active */
- cmd |= PORT_CMD_ICC_ACTIVE;
-
- /* write out new cmd value */
- writel(cmd, port_mmio + PORT_CMD);
- cmd = readl(port_mmio + PORT_CMD);
-
- /* wait 10ms to be sure we've come out of any low power state */
- msleep(10);
-
- /* clear out any PhyRdy stuff from interrupt status */
- writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
-
- /* go ahead and clean out PhyRdy Change from Serror too */
- ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
-
- /*
- * Clear flag to indicate that we should ignore all PhyRdy
- * state changes
- */
- hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
-
- /*
- * Enable interrupts on Phy Ready.
- */
- pp->intr_mask |= PORT_IRQ_PHYRDY;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-
- /*
- * don't change the link pm policy - we can be called
- * just to turn of link pm temporarily
- */
-}
-
-static int ahci_enable_alpm(struct ata_port *ap,
- enum link_pm policy)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
- struct ahci_port_priv *pp = ap->private_data;
- u32 asp;
-
- /* Make sure the host is capable of link power management */
- if (!(hpriv->cap & HOST_CAP_ALPM))
- return -EINVAL;
-
- switch (policy) {
- case MAX_PERFORMANCE:
- case NOT_AVAILABLE:
- /*
- * if we came here with NOT_AVAILABLE,
- * it just means this is the first time we
- * have tried to enable - default to max performance,
- * and let the user go to lower power modes on request.
- */
- ahci_disable_alpm(ap);
- return 0;
- case MIN_POWER:
- /* configure HBA to enter SLUMBER */
- asp = PORT_CMD_ASP;
- break;
- case MEDIUM_POWER:
- /* configure HBA to enter PARTIAL */
- asp = 0;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * Disable interrupts on Phy Ready. This keeps us from
- * getting woken up due to spurious phy ready interrupts
- * TBD - Hot plug should be done via polling now, is
- * that even supported?
- */
- pp->intr_mask &= ~PORT_IRQ_PHYRDY;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-
- /*
- * Set a flag to indicate that we should ignore all PhyRdy
- * state changes since these can happen now whenever we
- * change link state
- */
- hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
-
- /* get the existing command bits */
- cmd = readl(port_mmio + PORT_CMD);
-
- /*
- * Set ASP based on Policy
- */
- cmd |= asp;
-
- /*
- * Setting this bit will instruct the HBA to aggressively
- * enter a lower power link state when it's appropriate and
- * based on the value set above for ASP
- */
- cmd |= PORT_CMD_ALPE;
-
- /* write out new cmd value */
- writel(cmd, port_mmio + PORT_CMD);
- cmd = readl(port_mmio + PORT_CMD);
-
- /* IPM bits should be set by libata-core */
- return 0;
-}
-
-#ifdef CONFIG_PM
-static void ahci_power_down(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd, scontrol;
-
- if (!(hpriv->cap & HOST_CAP_SSS))
- return;
-
- /* put device into listen mode, first set PxSCTL.DET to 0 */
- scontrol = readl(port_mmio + PORT_SCR_CTL);
- scontrol &= ~0xf;
- writel(scontrol, port_mmio + PORT_SCR_CTL);
-
- /* then set PxCMD.SUD to 0 */
- cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
- cmd &= ~PORT_CMD_SPIN_UP;
- writel(cmd, port_mmio + PORT_CMD);
-}
-#endif
-
-static void ahci_start_port(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_link *link;
- struct ahci_em_priv *emp;
- ssize_t rc;
- int i;
-
- /* enable FIS reception */
- ahci_start_fis_rx(ap);
-
- /* enable DMA */
- ahci_start_engine(ap);
-
- /* turn on LEDs */
- if (ap->flags & ATA_FLAG_EM) {
- ata_for_each_link(link, ap, EDGE) {
- emp = &pp->em_priv[link->pmp];
-
- /* EM Transmit bit maybe busy during init */
- for (i = 0; i < EM_MAX_RETRY; i++) {
- rc = ahci_transmit_led_message(ap,
- emp->led_state,
- 4);
- if (rc == -EBUSY)
- msleep(1);
- else
- break;
- }
- }
- }
-
- if (ap->flags & ATA_FLAG_SW_ACTIVITY)
- ata_for_each_link(link, ap, EDGE)
- ahci_init_sw_activity(link);
-
-}
-
-static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
-{
- int rc;
-
- /* disable DMA */
- rc = ahci_stop_engine(ap);
- if (rc) {
- *emsg = "failed to stop engine";
- return rc;
- }
-
- /* disable FIS reception */
- rc = ahci_stop_fis_rx(ap);
- if (rc) {
- *emsg = "failed stop FIS RX";
- return rc;
- }
-
- return 0;
-}
-
-static int ahci_reset_controller(struct ata_host *host)
+static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 tmp;
- /* we must be in AHCI mode, before using anything
- * AHCI-specific, such as HOST_RESET.
- */
- ahci_enable_ahci(mmio);
-
- /* global controller reset */
- if (!ahci_skip_host_reset) {
- tmp = readl(mmio + HOST_CTL);
- if ((tmp & HOST_RESET) == 0) {
- writel(tmp | HOST_RESET, mmio + HOST_CTL);
- readl(mmio + HOST_CTL); /* flush */
- }
-
- /*
- * to perform host reset, OS should set HOST_RESET
- * and poll until this bit is read to be "0".
- * reset must complete within 1 second, or
- * the hardware should be considered fried.
- */
- tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
- HOST_RESET, 10, 1000);
-
- if (tmp & HOST_RESET) {
- dev_printk(KERN_ERR, host->dev,
- "controller reset failed (0x%x)\n", tmp);
- return -EIO;
- }
-
- /* turn on AHCI mode */
- ahci_enable_ahci(mmio);
-
- /* Some registers might be cleared on reset. Restore
- * initial values.
- */
- ahci_restore_initial_config(host);
- } else
- dev_printk(KERN_INFO, host->dev,
- "skipping global host reset\n");
+ ahci_reset_controller(host);
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ struct ahci_host_priv *hpriv = host->private_data;
u16 tmp16;
/* configure PCS */
@@ -1397,267 +444,10 @@ static int ahci_reset_controller(struct ata_host *host)
return 0;
}
-static void ahci_sw_activity(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
- return;
-
- emp->activity++;
- if (!timer_pending(&emp->timer))
- mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void ahci_sw_activity_blink(unsigned long arg)
-{
- struct ata_link *link = (struct ata_link *)arg;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
- unsigned long led_message = emp->led_state;
- u32 activity_led_state;
- unsigned long flags;
-
- led_message &= EM_MSG_LED_VALUE;
- led_message |= ap->port_no | (link->pmp << 8);
-
- /* check to see if we've had activity. If so,
- * toggle state of LED and reset timer. If not,
- * turn LED to desired idle state.
- */
- spin_lock_irqsave(ap->lock, flags);
- if (emp->saved_activity != emp->activity) {
- emp->saved_activity = emp->activity;
- /* get the current LED state */
- activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
-
- if (activity_led_state)
- activity_led_state = 0;
- else
- activity_led_state = 1;
-
- /* clear old state */
- led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
-
- /* toggle state */
- led_message |= (activity_led_state << 16);
- mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
- } else {
- /* switch to idle */
- led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
- if (emp->blink_policy == BLINK_OFF)
- led_message |= (1 << 16);
- }
- spin_unlock_irqrestore(ap->lock, flags);
- ahci_transmit_led_message(ap, led_message, 4);
-}
-
-static void ahci_init_sw_activity(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- /* init activity stats, setup timer */
- emp->saved_activity = emp->activity = 0;
- setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
-
- /* check our blink policy and set flag for link if it's enabled */
- if (emp->blink_policy)
- link->flags |= ATA_LFLAG_SW_ACTIVITY;
-}
-
-static int ahci_reset_em(struct ata_host *host)
-{
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 em_ctl;
-
- em_ctl = readl(mmio + HOST_EM_CTL);
- if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
- return -EINVAL;
-
- writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
- return 0;
-}
-
-static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
- ssize_t size)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
- u32 em_ctl;
- u32 message[] = {0, 0};
- unsigned long flags;
- int pmp;
- struct ahci_em_priv *emp;
-
- /* get the slot number from the message */
- pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
- emp = &pp->em_priv[pmp];
- else
- return -EINVAL;
-
- spin_lock_irqsave(ap->lock, flags);
-
- /*
- * if we are still busy transmitting a previous message,
- * do not allow
- */
- em_ctl = readl(mmio + HOST_EM_CTL);
- if (em_ctl & EM_CTL_TM) {
- spin_unlock_irqrestore(ap->lock, flags);
- return -EBUSY;
- }
-
- /*
- * create message header - this is all zero except for
- * the message size, which is 4 bytes.
- */
- message[0] |= (4 << 8);
-
- /* ignore 0:4 of byte zero, fill in port info yourself */
- message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
-
- /* write message to EM_LOC */
- writel(message[0], mmio + hpriv->em_loc);
- writel(message[1], mmio + hpriv->em_loc+4);
-
- /* save off new led state for port/slot */
- emp->led_state = state;
-
- /*
- * tell hardware to transmit the message
- */
- writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
-
- spin_unlock_irqrestore(ap->lock, flags);
- return size;
-}
-
-static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
-{
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_link *link;
- struct ahci_em_priv *emp;
- int rc = 0;
-
- ata_for_each_link(link, ap, EDGE) {
- emp = &pp->em_priv[link->pmp];
- rc += sprintf(buf, "%lx\n", emp->led_state);
- }
- return rc;
-}
-
-static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
- size_t size)
-{
- int state;
- int pmp;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp;
-
- state = simple_strtoul(buf, NULL, 0);
-
- /* get the slot number from the message */
- pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
- emp = &pp->em_priv[pmp];
- else
- return -EINVAL;
-
- /* mask off the activity bits if we are in sw_activity
- * mode, user should turn off sw_activity before setting
- * activity led through em_message
- */
- if (emp->blink_policy)
- state &= ~EM_MSG_LED_VALUE_ACTIVITY;
-
- return ahci_transmit_led_message(ap, state, size);
-}
-
-static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
-{
- struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
- u32 port_led_state = emp->led_state;
-
- /* save the desired Activity LED behavior */
- if (val == OFF) {
- /* clear LFLAG */
- link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
-
- /* set the LED to OFF */
- port_led_state &= EM_MSG_LED_VALUE_OFF;
- port_led_state |= (ap->port_no | (link->pmp << 8));
- ahci_transmit_led_message(ap, port_led_state, 4);
- } else {
- link->flags |= ATA_LFLAG_SW_ACTIVITY;
- if (val == BLINK_OFF) {
- /* set LED to ON for idle */
- port_led_state &= EM_MSG_LED_VALUE_OFF;
- port_led_state |= (ap->port_no | (link->pmp << 8));
- port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
- ahci_transmit_led_message(ap, port_led_state, 4);
- }
- }
- emp->blink_policy = val;
- return 0;
-}
-
-static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
-{
- struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- /* display the saved value of activity behavior for this
- * disk.
- */
- return sprintf(buf, "%d\n", emp->blink_policy);
-}
-
-static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
- int port_no, void __iomem *mmio,
- void __iomem *port_mmio)
-{
- const char *emsg = NULL;
- int rc;
- u32 tmp;
-
- /* make sure port is not active */
- rc = ahci_deinit_port(ap, &emsg);
- if (rc)
- dev_printk(KERN_WARNING, &pdev->dev,
- "%s (%d)\n", emsg, rc);
-
- /* clear SError */
- tmp = readl(port_mmio + PORT_SCR_ERR);
- VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
- writel(tmp, port_mmio + PORT_SCR_ERR);
-
- /* clear port IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
- if (tmp)
- writel(tmp, port_mmio + PORT_IRQ_STAT);
-
- writel(1 << port_no, mmio + HOST_IRQ_STAT);
-}
-
-static void ahci_init_controller(struct ata_host *host)
+static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- int i;
void __iomem *port_mmio;
u32 tmp;
int mv;
@@ -1678,220 +468,7 @@ static void ahci_init_controller(struct ata_host *host)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- port_mmio = ahci_port_base(ap);
- if (ata_port_is_dummy(ap))
- continue;
-
- ahci_port_init(pdev, ap, i, mmio, port_mmio);
- }
-
- tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
- writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
- tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
-}
-
-static void ahci_dev_config(struct ata_device *dev)
-{
- struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
-
- if (hpriv->flags & AHCI_HFLAG_SECT255) {
- dev->max_sectors = 255;
- ata_dev_printk(dev, KERN_INFO,
- "SB600 AHCI: limiting to 255 sectors per cmd\n");
- }
-}
-
-static unsigned int ahci_dev_classify(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ata_taskfile tf;
- u32 tmp;
-
- tmp = readl(port_mmio + PORT_SIG);
- tf.lbah = (tmp >> 24) & 0xff;
- tf.lbam = (tmp >> 16) & 0xff;
- tf.lbal = (tmp >> 8) & 0xff;
- tf.nsect = (tmp) & 0xff;
-
- return ata_dev_classify(&tf);
-}
-
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts)
-{
- dma_addr_t cmd_tbl_dma;
-
- cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
-
- pp->cmd_slot[tag].opts = cpu_to_le32(opts);
- pp->cmd_slot[tag].status = 0;
- pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
- pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
-}
-
-static int ahci_kick_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_host_priv *hpriv = ap->host->private_data;
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
- u32 tmp;
- int busy, rc;
-
- /* stop engine */
- rc = ahci_stop_engine(ap);
- if (rc)
- goto out_restart;
-
- /* need to do CLO?
- * always do CLO if PMP is attached (AHCI-1.3 9.2)
- */
- busy = status & (ATA_BUSY | ATA_DRQ);
- if (!busy && !sata_pmp_attached(ap)) {
- rc = 0;
- goto out_restart;
- }
-
- if (!(hpriv->cap & HOST_CAP_CLO)) {
- rc = -EOPNOTSUPP;
- goto out_restart;
- }
-
- /* perform CLO */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_CLO;
- writel(tmp, port_mmio + PORT_CMD);
-
- rc = 0;
- tmp = ata_wait_register(port_mmio + PORT_CMD,
- PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
- if (tmp & PORT_CMD_CLO)
- rc = -EIO;
-
- /* restart engine */
- out_restart:
- ahci_start_engine(ap);
- return rc;
-}
-
-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
- struct ata_taskfile *tf, int is_cmd, u16 flags,
- unsigned long timeout_msec)
-{
- const u32 cmd_fis_len = 5; /* five dwords */
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u8 *fis = pp->cmd_tbl;
- u32 tmp;
-
- /* prep the command */
- ata_tf_to_fis(tf, pmp, is_cmd, fis);
- ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
-
- /* issue & wait */
- writel(1, port_mmio + PORT_CMD_ISSUE);
-
- if (timeout_msec) {
- tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
- 1, timeout_msec);
- if (tmp & 0x1) {
- ahci_kick_engine(ap);
- return -EBUSY;
- }
- } else
- readl(port_mmio + PORT_CMD_ISSUE); /* flush */
-
- return 0;
-}
-
-static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
- int pmp, unsigned long deadline,
- int (*check_ready)(struct ata_link *link))
-{
- struct ata_port *ap = link->ap;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- const char *reason = NULL;
- unsigned long now, msecs;
- struct ata_taskfile tf;
- int rc;
-
- DPRINTK("ENTER\n");
-
- /* prepare for SRST (AHCI-1.1 10.4.1) */
- rc = ahci_kick_engine(ap);
- if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING,
- "failed to reset engine (errno=%d)\n", rc);
-
- ata_tf_init(link->device, &tf);
-
- /* issue the first D2H Register FIS */
- msecs = 0;
- now = jiffies;
- if (time_after(now, deadline))
- msecs = jiffies_to_msecs(deadline - now);
-
- tf.ctl |= ATA_SRST;
- if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
- AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
- rc = -EIO;
- reason = "1st FIS failed";
- goto fail;
- }
-
- /* spec says at least 5us, but be generous and sleep for 1ms */
- msleep(1);
-
- /* issue the second D2H Register FIS */
- tf.ctl &= ~ATA_SRST;
- ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
-
- /* wait for link to become ready */
- rc = ata_wait_after_reset(link, deadline, check_ready);
- if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
- /*
- * Workaround for cases where link online status can't
- * be trusted. Treat device readiness timeout as link
- * offline.
- */
- ata_link_printk(link, KERN_INFO,
- "device not ready, treating as offline\n");
- *class = ATA_DEV_NONE;
- } else if (rc) {
- /* link occupied, -ENODEV too is an error */
- reason = "device not ready";
- goto fail;
- } else
- *class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, class=%u\n", *class);
- return 0;
-
- fail:
- ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
- return rc;
-}
-
-static int ahci_check_ready(struct ata_link *link)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
-
- return ata_check_ready(status);
-}
-
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- int pmp = sata_srst_pmp(link);
-
- DPRINTK("ENTER\n");
-
- return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+ ahci_init_controller(host);
}
static int ahci_sb600_check_ready(struct ata_link *link)
@@ -1943,38 +520,6 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
return rc;
}
-static int ahci_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
- struct ata_taskfile tf;
- bool online;
- int rc;
-
- DPRINTK("ENTER\n");
-
- ahci_stop_engine(ap);
-
- /* clear D2H reception area to properly wait for D2H FIS */
- ata_tf_init(link->device, &tf);
- tf.command = 0x80;
- ata_tf_to_fis(&tf, 0, 0, d2h_fis);
-
- rc = sata_link_hardreset(link, timing, deadline, &online,
- ahci_check_ready);
-
- ahci_start_engine(ap);
-
- if (online)
- *class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
- return rc;
-}
-
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -2043,605 +588,12 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
return rc;
}
-static void ahci_postreset(struct ata_link *link, unsigned int *class)
-{
- struct ata_port *ap = link->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 new_tmp, tmp;
-
- ata_std_postreset(link, class);
-
- /* Make sure port's ATAPI bit is set appropriately */
- new_tmp = tmp = readl(port_mmio + PORT_CMD);
- if (*class == ATA_DEV_ATAPI)
- new_tmp |= PORT_CMD_ATAPI;
- else
- new_tmp &= ~PORT_CMD_ATAPI;
- if (new_tmp != tmp) {
- writel(new_tmp, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* flush */
- }
-}
-
-static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
-{
- struct scatterlist *sg;
- struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
- unsigned int si;
-
- VPRINTK("ENTER\n");
-
- /*
- * Next, the S/G list.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- dma_addr_t addr = sg_dma_address(sg);
- u32 sg_len = sg_dma_len(sg);
-
- ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
- ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
- ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
- }
-
- return si;
-}
-
-static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ahci_port_priv *pp = ap->private_data;
-
- if (!sata_pmp_attached(ap) || pp->fbs_enabled)
- return ata_std_qc_defer(qc);
- else
- return sata_pmp_qc_defer_cmd_switch(qc);
-}
-
-static void ahci_qc_prep(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ahci_port_priv *pp = ap->private_data;
- int is_atapi = ata_is_atapi(qc->tf.protocol);
- void *cmd_tbl;
- u32 opts;
- const u32 cmd_fis_len = 5; /* five dwords */
- unsigned int n_elem;
-
- /*
- * Fill in command table information. First, the header,
- * a SATA Register - Host to Device command FIS.
- */
- cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
-
- ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
- if (is_atapi) {
- memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
- memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
- }
-
- n_elem = 0;
- if (qc->flags & ATA_QCFLAG_DMAMAP)
- n_elem = ahci_fill_sg(qc, cmd_tbl);
-
- /*
- * Fill in command slot information.
- */
- opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
- if (qc->tf.flags & ATA_TFLAG_WRITE)
- opts |= AHCI_CMD_WRITE;
- if (is_atapi)
- opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
-
- ahci_fill_cmd_slot(pp, qc->tag, opts);
-}
-
-static void ahci_fbs_dec_intr(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs = readl(port_mmio + PORT_FBS);
- int retries = 3;
-
- DPRINTK("ENTER\n");
- BUG_ON(!pp->fbs_enabled);
-
- /* time to wait for DEC is not specified by AHCI spec,
- * add a retry loop for safety.
- */
- writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- while ((fbs & PORT_FBS_DEC) && retries--) {
- udelay(1);
- fbs = readl(port_mmio + PORT_FBS);
- }
-
- if (fbs & PORT_FBS_DEC)
- dev_printk(KERN_ERR, ap->host->dev,
- "failed to clear device error\n");
-}
-
-static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_eh_info *host_ehi = &ap->link.eh_info;
- struct ata_link *link = NULL;
- struct ata_queued_cmd *active_qc;
- struct ata_eh_info *active_ehi;
- bool fbs_need_dec = false;
- u32 serror;
-
- /* determine active link with error */
- if (pp->fbs_enabled) {
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs = readl(port_mmio + PORT_FBS);
- int pmp = fbs >> PORT_FBS_DWE_OFFSET;
-
- if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
- ata_link_online(&ap->pmp_link[pmp])) {
- link = &ap->pmp_link[pmp];
- fbs_need_dec = true;
- }
-
- } else
- ata_for_each_link(link, ap, EDGE)
- if (ata_link_active(link))
- break;
-
- if (!link)
- link = &ap->link;
-
- active_qc = ata_qc_from_tag(ap, link->active_tag);
- active_ehi = &link->eh_info;
-
- /* record irq stat */
- ata_ehi_clear_desc(host_ehi);
- ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
-
- /* AHCI needs SError cleared; otherwise, it might lock up */
- ahci_scr_read(&ap->link, SCR_ERROR, &serror);
- ahci_scr_write(&ap->link, SCR_ERROR, serror);
- host_ehi->serror |= serror;
-
- /* some controllers set IRQ_IF_ERR on device errors, ignore it */
- if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
- irq_stat &= ~PORT_IRQ_IF_ERR;
-
- if (irq_stat & PORT_IRQ_TF_ERR) {
- /* If qc is active, charge it; otherwise, the active
- * link. There's no active qc on NCQ errors. It will
- * be determined by EH by reading log page 10h.
- */
- if (active_qc)
- active_qc->err_mask |= AC_ERR_DEV;
- else
- active_ehi->err_mask |= AC_ERR_DEV;
-
- if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
- host_ehi->serror &= ~SERR_INTERNAL;
- }
-
- if (irq_stat & PORT_IRQ_UNK_FIS) {
- u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
-
- active_ehi->err_mask |= AC_ERR_HSM;
- active_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(active_ehi,
- "unknown FIS %08x %08x %08x %08x" ,
- unk[0], unk[1], unk[2], unk[3]);
- }
-
- if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
- active_ehi->err_mask |= AC_ERR_HSM;
- active_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(active_ehi, "incorrect PMP");
- }
-
- if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
- host_ehi->err_mask |= AC_ERR_HOST_BUS;
- host_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(host_ehi, "host bus error");
- }
-
- if (irq_stat & PORT_IRQ_IF_ERR) {
- if (fbs_need_dec)
- active_ehi->err_mask |= AC_ERR_DEV;
- else {
- host_ehi->err_mask |= AC_ERR_ATA_BUS;
- host_ehi->action |= ATA_EH_RESET;
- }
-
- ata_ehi_push_desc(host_ehi, "interface fatal error");
- }
-
- if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
- ata_ehi_hotplugged(host_ehi);
- ata_ehi_push_desc(host_ehi, "%s",
- irq_stat & PORT_IRQ_CONNECT ?
- "connection status changed" : "PHY RDY changed");
- }
-
- /* okay, let's hand over to EH */
-
- if (irq_stat & PORT_IRQ_FREEZE)
- ata_port_freeze(ap);
- else if (fbs_need_dec) {
- ata_link_abort(link);
- ahci_fbs_dec_intr(ap);
- } else
- ata_port_abort(ap);
-}
-
-static void ahci_port_intr(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ata_eh_info *ehi = &ap->link.eh_info;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
- u32 status, qc_active = 0;
- int rc;
-
- status = readl(port_mmio + PORT_IRQ_STAT);
- writel(status, port_mmio + PORT_IRQ_STAT);
-
- /* ignore BAD_PMP while resetting */
- if (unlikely(resetting))
- status &= ~PORT_IRQ_BAD_PMP;
-
- /* If we are getting PhyRdy, this is
- * just a power state change, we should
- * clear out this, plus the PhyRdy/Comm
- * Wake bits from Serror
- */
- if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
- (status & PORT_IRQ_PHYRDY)) {
- status &= ~PORT_IRQ_PHYRDY;
- ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
- }
-
- if (unlikely(status & PORT_IRQ_ERROR)) {
- ahci_error_intr(ap, status);
- return;
- }
-
- if (status & PORT_IRQ_SDB_FIS) {
- /* If SNotification is available, leave notification
- * handling to sata_async_notification(). If not,
- * emulate it by snooping SDB FIS RX area.
- *
- * Snooping FIS RX area is probably cheaper than
- * poking SNotification but some constrollers which
- * implement SNotification, ICH9 for example, don't
- * store AN SDB FIS into receive area.
- */
- if (hpriv->cap & HOST_CAP_SNTF)
- sata_async_notification(ap);
- else {
- /* If the 'N' bit in word 0 of the FIS is set,
- * we just received asynchronous notification.
- * Tell libata about it.
- *
- * Lack of SNotification should not appear in
- * ahci 1.2, so the workaround is unnecessary
- * when FBS is enabled.
- */
- if (pp->fbs_enabled)
- WARN_ON_ONCE(1);
- else {
- const __le32 *f = pp->rx_fis + RX_FIS_SDB;
- u32 f0 = le32_to_cpu(f[0]);
- if (f0 & (1 << 15))
- sata_async_notification(ap);
- }
- }
- }
-
- /* pp->active_link is not reliable once FBS is enabled, both
- * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
- * NCQ and non-NCQ commands may be in flight at the same time.
- */
- if (pp->fbs_enabled) {
- if (ap->qc_active) {
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
- }
- } else {
- /* pp->active_link is valid iff any command is in flight */
- if (ap->qc_active && pp->active_link->sactive)
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- else
- qc_active = readl(port_mmio + PORT_CMD_ISSUE);
- }
-
- rc = ata_qc_complete_multiple(ap, qc_active);
-
- /* while resetting, invalid completions are expected */
- if (unlikely(rc < 0 && !resetting)) {
- ehi->err_mask |= AC_ERR_HSM;
- ehi->action |= ATA_EH_RESET;
- ata_port_freeze(ap);
- }
-}
-
-static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct ahci_host_priv *hpriv;
- unsigned int i, handled = 0;
- void __iomem *mmio;
- u32 irq_stat, irq_masked;
-
- VPRINTK("ENTER\n");
-
- hpriv = host->private_data;
- mmio = host->iomap[AHCI_PCI_BAR];
-
- /* sigh. 0xffffffff is a valid return from h/w */
- irq_stat = readl(mmio + HOST_IRQ_STAT);
- if (!irq_stat)
- return IRQ_NONE;
-
- irq_masked = irq_stat & hpriv->port_map;
-
- spin_lock(&host->lock);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
-
- if (!(irq_masked & (1 << i)))
- continue;
-
- ap = host->ports[i];
- if (ap) {
- ahci_port_intr(ap);
- VPRINTK("port %u\n", i);
- } else {
- VPRINTK("port %u (no irq)\n", i);
- if (ata_ratelimit())
- dev_printk(KERN_WARNING, host->dev,
- "interrupt on disabled port %u\n", i);
- }
-
- handled = 1;
- }
-
- /* HOST_IRQ_STAT behaves as level triggered latch meaning that
- * it should be cleared after all the port events are cleared;
- * otherwise, it will raise a spurious interrupt after each
- * valid one. Please read section 10.6.2 of ahci 1.1 for more
- * information.
- *
- * Also, use the unmasked value to clear interrupt as spurious
- * pending event on a dummy port might cause screaming IRQ.
- */
- writel(irq_stat, mmio + HOST_IRQ_STAT);
-
- spin_unlock(&host->lock);
-
- VPRINTK("EXIT\n");
-
- return IRQ_RETVAL(handled);
-}
-
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
-
- /* Keep track of the currently active link. It will be used
- * in completion path to determine whether NCQ phase is in
- * progress.
- */
- pp->active_link = qc->dev->link;
-
- if (qc->tf.protocol == ATA_PROT_NCQ)
- writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
-
- if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
- u32 fbs = readl(port_mmio + PORT_FBS);
- fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
- fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
- writel(fbs, port_mmio + PORT_FBS);
- pp->fbs_last_dev = qc->dev->link->pmp;
- }
-
- writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
-
- ahci_sw_activity(qc->dev->link);
-
- return 0;
-}
-
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
-{
- struct ahci_port_priv *pp = qc->ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
-
- if (pp->fbs_enabled)
- d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
-
- ata_tf_from_fis(d2h_fis, &qc->result_tf);
- return true;
-}
-
-static void ahci_freeze(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
-
- /* turn IRQ off */
- writel(0, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_thaw(struct ata_port *ap)
-{
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
- struct ahci_port_priv *pp = ap->private_data;
-
- /* clear IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- writel(tmp, port_mmio + PORT_IRQ_STAT);
- writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
-
- /* turn IRQ back on */
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_error_handler(struct ata_port *ap)
-{
- if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
- /* restart engine */
- ahci_stop_engine(ap);
- ahci_start_engine(ap);
- }
-
- sata_pmp_error_handler(ap);
-}
-
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
-
- /* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
- ahci_kick_engine(ap);
-}
-
-static void ahci_enable_fbs(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs;
- int rc;
-
- if (!pp->fbs_supported)
- return;
-
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN) {
- pp->fbs_enabled = true;
- pp->fbs_last_dev = -1; /* initialization */
- return;
- }
-
- rc = ahci_stop_engine(ap);
- if (rc)
- return;
-
- writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN) {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
- pp->fbs_enabled = true;
- pp->fbs_last_dev = -1; /* initialization */
- } else
- dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
-
- ahci_start_engine(ap);
-}
-
-static void ahci_disable_fbs(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs;
- int rc;
-
- if (!pp->fbs_supported)
- return;
-
- fbs = readl(port_mmio + PORT_FBS);
- if ((fbs & PORT_FBS_EN) == 0) {
- pp->fbs_enabled = false;
- return;
- }
-
- rc = ahci_stop_engine(ap);
- if (rc)
- return;
-
- writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN)
- dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
- else {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
- pp->fbs_enabled = false;
- }
-
- ahci_start_engine(ap);
-}
-
-static void ahci_pmp_attach(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
- u32 cmd;
-
- cmd = readl(port_mmio + PORT_CMD);
- cmd |= PORT_CMD_PMP;
- writel(cmd, port_mmio + PORT_CMD);
-
- ahci_enable_fbs(ap);
-
- pp->intr_mask |= PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_pmp_detach(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
- u32 cmd;
-
- ahci_disable_fbs(ap);
-
- cmd = readl(port_mmio + PORT_CMD);
- cmd &= ~PORT_CMD_PMP;
- writel(cmd, port_mmio + PORT_CMD);
-
- pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static int ahci_port_resume(struct ata_port *ap)
-{
- ahci_power_up(ap);
- ahci_start_port(ap);
-
- if (sata_pmp_attached(ap))
- ahci_pmp_attach(ap);
- else
- ahci_pmp_detach(ap);
-
- return 0;
-}
-
#ifdef CONFIG_PM
-static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
-{
- const char *emsg = NULL;
- int rc;
-
- rc = ahci_deinit_port(ap, &emsg);
- if (rc == 0)
- ahci_power_down(ap);
- else {
- ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
- ahci_start_port(ap);
- }
-
- return rc;
-}
-
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ void __iomem *mmio = hpriv->mmio;
u32 ctl;
if (mesg.event & PM_EVENT_SUSPEND &&
@@ -2675,11 +627,11 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
- ahci_init_controller(host);
+ ahci_pci_init_controller(host);
}
ata_host_resume(host);
@@ -2688,92 +640,6 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
}
#endif
-static int ahci_port_start(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct device *dev = ap->host->dev;
- struct ahci_port_priv *pp;
- void *mem;
- dma_addr_t mem_dma;
- size_t dma_sz, rx_fis_sz;
-
- pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
- if (!pp)
- return -ENOMEM;
-
- /* check FBS capability */
- if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd = readl(port_mmio + PORT_CMD);
- if (cmd & PORT_CMD_FBSCP)
- pp->fbs_supported = true;
- else
- dev_printk(KERN_WARNING, dev,
- "The port is not capable of FBS\n");
- }
-
- if (pp->fbs_supported) {
- dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
- rx_fis_sz = AHCI_RX_FIS_SZ * 16;
- } else {
- dma_sz = AHCI_PORT_PRIV_DMA_SZ;
- rx_fis_sz = AHCI_RX_FIS_SZ;
- }
-
- mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
- memset(mem, 0, dma_sz);
-
- /*
- * First item in chunk of DMA memory: 32-slot command table,
- * 32 bytes each in size
- */
- pp->cmd_slot = mem;
- pp->cmd_slot_dma = mem_dma;
-
- mem += AHCI_CMD_SLOT_SZ;
- mem_dma += AHCI_CMD_SLOT_SZ;
-
- /*
- * Second item: Received-FIS area
- */
- pp->rx_fis = mem;
- pp->rx_fis_dma = mem_dma;
-
- mem += rx_fis_sz;
- mem_dma += rx_fis_sz;
-
- /*
- * Third item: data area for storing a single command
- * and its scatter-gather table
- */
- pp->cmd_tbl = mem;
- pp->cmd_tbl_dma = mem_dma;
-
- /*
- * Save off initial list of interrupts to be enabled.
- * This could be changed later
- */
- pp->intr_mask = DEF_PORT_IRQ;
-
- ap->private_data = pp;
-
- /* engage engines, captain */
- return ahci_port_resume(ap);
-}
-
-static void ahci_port_stop(struct ata_port *ap)
-{
- const char *emsg = NULL;
- int rc;
-
- /* de-initialize port */
- rc = ahci_deinit_port(ap, &emsg);
- if (rc)
- ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
-}
-
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
@@ -2806,31 +672,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
return 0;
}
-static void ahci_print_info(struct ata_host *host)
+static void ahci_pci_print_info(struct ata_host *host)
{
- struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 vers, cap, cap2, impl, speed;
- const char *speed_s;
u16 cc;
const char *scc_s;
- vers = readl(mmio + HOST_VERSION);
- cap = hpriv->cap;
- cap2 = hpriv->cap2;
- impl = hpriv->port_map;
-
- speed = (cap >> 20) & 0xf;
- if (speed == 1)
- speed_s = "1.5";
- else if (speed == 2)
- speed_s = "3";
- else if (speed == 3)
- speed_s = "6";
- else
- speed_s = "?";
-
pci_read_config_word(pdev, 0x0a, &cc);
if (cc == PCI_CLASS_STORAGE_IDE)
scc_s = "IDE";
@@ -2841,50 +688,7 @@ static void ahci_print_info(struct ata_host *host)
else
scc_s = "unknown";
- dev_printk(KERN_INFO, &pdev->dev,
- "AHCI %02x%02x.%02x%02x "
- "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
- ,
-
- (vers >> 24) & 0xff,
- (vers >> 16) & 0xff,
- (vers >> 8) & 0xff,
- vers & 0xff,
-
- ((cap >> 8) & 0x1f) + 1,
- (cap & 0x1f) + 1,
- speed_s,
- impl,
- scc_s);
-
- dev_printk(KERN_INFO, &pdev->dev,
- "flags: "
- "%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s\n"
- ,
-
- cap & HOST_CAP_64 ? "64bit " : "",
- cap & HOST_CAP_NCQ ? "ncq " : "",
- cap & HOST_CAP_SNTF ? "sntf " : "",
- cap & HOST_CAP_MPS ? "ilck " : "",
- cap & HOST_CAP_SSS ? "stag " : "",
- cap & HOST_CAP_ALPM ? "pm " : "",
- cap & HOST_CAP_LED ? "led " : "",
- cap & HOST_CAP_CLO ? "clo " : "",
- cap & HOST_CAP_ONLY ? "only " : "",
- cap & HOST_CAP_PMP ? "pmp " : "",
- cap & HOST_CAP_FBS ? "fbs " : "",
- cap & HOST_CAP_PIO_MULTI ? "pio " : "",
- cap & HOST_CAP_SSC ? "slum " : "",
- cap & HOST_CAP_PART ? "part " : "",
- cap & HOST_CAP_CCC ? "ccc " : "",
- cap & HOST_CAP_EMS ? "ems " : "",
- cap & HOST_CAP_SXS ? "sxs " : "",
- cap2 & HOST_CAP2_APST ? "apst " : "",
- cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
- cap2 & HOST_CAP2_BOH ? "boh " : ""
- );
+ ahci_print_info(host, scc_s);
}
/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
@@ -3308,41 +1112,28 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);
+ hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+
/* save initial config */
- ahci_save_initial_config(pdev, hpriv);
+ ahci_pci_save_initial_config(pdev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
- /* Auto-activate optimization is supposed to be supported on
- all AHCI controllers indicating NCQ support, but it seems
- to be broken at least on some NVIDIA MCP79 chipsets.
- Until we get info on which NVIDIA chipsets don't have this
- issue, if any, disable AA on all NVIDIA AHCIs. */
- if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+ /*
+ * Auto-activate optimization is supposed to be
+ * supported on all AHCI controllers indicating NCQ
+ * capability, but it seems to be broken on some
+ * chipsets including NVIDIAs.
+ */
+ if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
pi.flags |= ATA_FLAG_FPDMA_AA;
}
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
- if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
- u8 messages;
- void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
- u32 em_loc = readl(mmio + HOST_EM_LOC);
- u32 em_ctl = readl(mmio + HOST_EM_CTL);
-
- messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
-
- /* we only support LED message type right now */
- if ((messages & 0x01) && (ahci_em_messages == 1)) {
- /* store em_loc */
- hpriv->em_loc = ((em_loc >> 16) * 4);
- pi.flags |= ATA_FLAG_EM;
- if (!(em_ctl & EM_CTL_ALHD))
- pi.flags |= ATA_FLAG_SW_ACTIVITY;
- }
- }
+ ahci_set_em_messages(hpriv, &pi);
if (ahci_broken_system_poweroff(pdev)) {
pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
@@ -3372,7 +1163,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
- host->iomap = pcim_iomap_table(pdev);
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
@@ -3414,12 +1204,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
- ahci_init_controller(host);
- ahci_print_info(host);
+ ahci_pci_init_controller(host);
+ ahci_pci_print_info(host);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
new file mode 100644
index 000000000000..733def27df07
--- /dev/null
+++ b/drivers/ata/ahci.h
@@ -0,0 +1,333 @@
+/*
+ * ahci.h - Common AHCI SATA definitions and declarations
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#ifndef _AHCI_H
+#define _AHCI_H
+
+#include <linux/libata.h>
+
+/* Enclosure Management Control */
+#define EM_CTRL_MSG_TYPE 0x000f0000
+
+/* Enclosure Management LED Message Type */
+#define EM_MSG_LED_HBA_PORT 0x0000000f
+#define EM_MSG_LED_PMP_SLOT 0x0000ff00
+#define EM_MSG_LED_VALUE 0xffff0000
+#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
+#define EM_MSG_LED_VALUE_OFF 0xfff80000
+#define EM_MSG_LED_VALUE_ON 0x00010000
+
+enum {
+ AHCI_MAX_PORTS = 32,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+ AHCI_CMD_SZ = 32,
+ AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
+ AHCI_RX_FIS_SZ = 256,
+ AHCI_CMD_TBL_CDB = 0x40,
+ AHCI_CMD_TBL_HDR_SZ = 0x80,
+ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
+ AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
+ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
+ AHCI_RX_FIS_SZ,
+ AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
+ AHCI_CMD_TBL_AR_SZ +
+ (AHCI_RX_FIS_SZ * 16),
+ AHCI_IRQ_ON_SG = (1 << 31),
+ AHCI_CMD_ATAPI = (1 << 5),
+ AHCI_CMD_WRITE = (1 << 6),
+ AHCI_CMD_PREFETCH = (1 << 7),
+ AHCI_CMD_RESET = (1 << 8),
+ AHCI_CMD_CLR_BUSY = (1 << 10),
+
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
+ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+ HOST_CTL = 0x04, /* global host control */
+ HOST_IRQ_STAT = 0x08, /* interrupt status */
+ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
+ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
+ HOST_EM_LOC = 0x1c, /* Enclosure Management location */
+ HOST_EM_CTL = 0x20, /* Enclosure Management Control */
+ HOST_CAP2 = 0x24, /* host capabilities, extended */
+
+ /* HOST_CTL bits */
+ HOST_RESET = (1 << 0), /* reset controller; self-clear */
+ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+
+ /* HOST_CAP bits */
+ HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
+ HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
+ HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
+ HOST_CAP_PART = (1 << 13), /* Partial state capable */
+ HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
+ HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
+ HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
+ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
+ HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
+ HOST_CAP_CLO = (1 << 24), /* Command List Override support */
+ HOST_CAP_LED = (1 << 25), /* Supports activity LED */
+ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
+ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
+ HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
+ HOST_CAP_SNTF = (1 << 29), /* SNotification register */
+ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
+ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+ /* HOST_CAP2 bits */
+ HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
+ HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
+ HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
+
+ /* registers for each SATA port */
+ PORT_LST_ADDR = 0x00, /* command list DMA addr */
+ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
+ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
+ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
+ PORT_IRQ_STAT = 0x10, /* interrupt status */
+ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
+ PORT_CMD = 0x18, /* port command */
+ PORT_TFDATA = 0x20, /* taskfile data */
+ PORT_SIG = 0x24, /* device TF signature */
+ PORT_CMD_ISSUE = 0x38, /* command issue */
+ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
+ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
+ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
+ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
+ PORT_FBS = 0x40, /* FIS-based Switching */
+
+ /* PORT_IRQ_{STAT,MASK} bits */
+ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
+ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
+ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
+ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+
+ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
+ PORT_IRQ_IF_ERR |
+ PORT_IRQ_CONNECT |
+ PORT_IRQ_PHYRDY |
+ PORT_IRQ_UNK_FIS |
+ PORT_IRQ_BAD_PMP,
+ PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
+ PORT_IRQ_TF_ERR |
+ PORT_IRQ_HBUS_DATA_ERR,
+ DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+
+ /* PORT_CMD bits */
+ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
+ PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
+ PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
+ PORT_CMD_PMP = (1 << 17), /* PMP attached */
+ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
+ PORT_CMD_CLO = (1 << 3), /* Command list override */
+ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
+ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
+ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
+ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+
+ PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
+ PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
+ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
+ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
+ PORT_FBS_SDE = (1 << 2), /* FBS single device error */
+ PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
+ PORT_FBS_EN = (1 << 0), /* Enable FBS */
+
+ /* hpriv->flags bits */
+ AHCI_HFLAG_NO_NCQ = (1 << 0),
+ AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
+ AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
+ AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
+ AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
+ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
+ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
+ AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
+ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
+ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
+ link offline */
+ AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
+ AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+
+ /* ap->flags bits */
+
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
+ ATA_FLAG_IPM,
+
+ ICH_MAP = 0x90, /* ICH MAP register */
+
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+ EM_MAX_RETRY = 5,
+
+ /* em_ctl bits */
+ EM_CTL_RST = (1 << 9), /* Reset */
+ EM_CTL_TM = (1 << 8), /* Transmit Message */
+ EM_CTL_ALHD = (1 << 26), /* Activity LED */
+};
+
+struct ahci_cmd_hdr {
+ __le32 opts;
+ __le32 status;
+ __le32 tbl_addr;
+ __le32 tbl_addr_hi;
+ __le32 reserved[4];
+};
+
+struct ahci_sg {
+ __le32 addr;
+ __le32 addr_hi;
+ __le32 reserved;
+ __le32 flags_size;
+};
+
+struct ahci_em_priv {
+ enum sw_activity blink_policy;
+ struct timer_list timer;
+ unsigned long saved_activity;
+ unsigned long activity;
+ unsigned long led_state;
+};
+
+struct ahci_port_priv {
+ struct ata_link *active_link;
+ struct ahci_cmd_hdr *cmd_slot;
+ dma_addr_t cmd_slot_dma;
+ void *cmd_tbl;
+ dma_addr_t cmd_tbl_dma;
+ void *rx_fis;
+ dma_addr_t rx_fis_dma;
+ /* for NCQ spurious interrupt analysis */
+ unsigned int ncq_saw_d2h:1;
+ unsigned int ncq_saw_dmas:1;
+ unsigned int ncq_saw_sdb:1;
+ u32 intr_mask; /* interrupts to enable */
+ bool fbs_supported; /* set iff FBS is supported */
+ bool fbs_enabled; /* set iff FBS is enabled */
+ int fbs_last_dev; /* save FBS.DEV of last FIS */
+ /* enclosure management info per PM slot */
+ struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+};
+
+struct ahci_host_priv {
+ void __iomem * mmio; /* bus-independant mem map */
+ unsigned int flags; /* AHCI_HFLAG_* */
+ u32 cap; /* cap to use */
+ u32 cap2; /* cap2 to use */
+ u32 port_map; /* port map to use */
+ u32 saved_cap; /* saved initial cap */
+ u32 saved_cap2; /* saved initial cap2 */
+ u32 saved_port_map; /* saved initial port_map */
+ u32 em_loc; /* enclosure management location */
+};
+
+extern int ahci_em_messages;
+extern int ahci_ignore_sss;
+
+extern struct scsi_host_template ahci_sht;
+extern struct ata_port_operations ahci_ops;
+
+void ahci_save_initial_config(struct device *dev,
+ struct ahci_host_priv *hpriv,
+ unsigned int force_port_map,
+ unsigned int mask_port_map);
+void ahci_init_controller(struct ata_host *host);
+int ahci_reset_controller(struct ata_host *host);
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link));
+
+int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_engine(struct ata_port *ap);
+int ahci_check_ready(struct ata_link *link);
+int ahci_kick_engine(struct ata_port *ap);
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+ struct ata_port_info *pi);
+int ahci_reset_em(struct ata_host *host);
+irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+void ahci_print_info(struct ata_host *host, const char *scc_s);
+
+static inline void __iomem *__ahci_port_base(struct ata_host *host,
+ unsigned int port_no)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ return mmio + 0x100 + (port_no * 0x80);
+}
+
+static inline void __iomem *ahci_port_base(struct ata_port *ap)
+{
+ return __ahci_port_base(ap->host, ap->port_no);
+}
+
+static inline int ahci_nr_ports(u32 cap)
+{
+ return (cap & 0x1f) + 1;
+}
+
+#endif /* _AHCI_H */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
new file mode 100644
index 000000000000..19bfd1d76d40
--- /dev/null
+++ b/drivers/ata/ahci_platform.c
@@ -0,0 +1,192 @@
+/*
+ * AHCI SATA platform driver
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include "ahci.h"
+
+static int __init ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev->platform_data;
+ struct ata_port_info pi = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ };
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ahci_host_priv *hpriv;
+ struct ata_host *host;
+ struct resource *mem;
+ int irq;
+ int n_ports;
+ int i;
+ int rc;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(dev, "no mmio space\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no irq\n");
+ return -EINVAL;
+ }
+
+ if (pdata && pdata->init) {
+ rc = pdata->init(dev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdata && pdata->ata_port_info)
+ pi = *pdata->ata_port_info;
+
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ hpriv->flags |= (unsigned long)pi.private_data;
+
+ hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
+ if (!hpriv->mmio) {
+ dev_err(dev, "can't map %pR\n", mem);
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ ahci_save_initial_config(dev, hpriv,
+ pdata ? pdata->force_port_map : 0,
+ pdata ? pdata->mask_port_map : 0);
+
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ)
+ pi.flags |= ATA_FLAG_NCQ;
+
+ if (hpriv->cap & HOST_CAP_PMP)
+ pi.flags |= ATA_FLAG_PMP;
+
+ ahci_set_em_messages(hpriv, &pi);
+
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+ if (!host) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ host->private_data = hpriv;
+
+ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+ else
+ printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+
+ if (pi.flags & ATA_FLAG_EM)
+ ahci_reset_em(host);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_desc(ap, "mmio %pR", mem);
+ ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+ /* set initial link pm policy */
+ ap->pm_policy = NOT_AVAILABLE;
+
+ /* set enclosure management message type */
+ if (ap->flags & ATA_FLAG_EM)
+ ap->em_message_type = ahci_em_messages;
+
+ /* disabled/not-implemented port */
+ if (!(hpriv->port_map & (1 << i)))
+ ap->ops = &ata_dummy_port_ops;
+ }
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ goto err0;
+
+ ahci_init_controller(host);
+ ahci_print_info(host, "platform");
+
+ rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+ &ahci_sht);
+ if (rc)
+ goto err0;
+
+ return 0;
+err0:
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+ return rc;
+}
+
+static int __devexit ahci_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev->platform_data;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+
+ return 0;
+}
+
+static struct platform_driver ahci_driver = {
+ .probe = ahci_probe,
+ .remove = __devexit_p(ahci_remove),
+ .driver = {
+ .name = "ahci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ahci_init(void)
+{
+ return platform_driver_probe(&ahci_driver, ahci_probe);
+}
+module_init(ahci_init);
+
+static void __exit ahci_exit(void)
+{
+ platform_driver_unregister(&ahci_driver);
+}
+module_exit(ahci_exit);
+
+MODULE_DESCRIPTION("AHCI SATA platform driver");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ahci");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 83bc49fac9bb..ec52fc618763 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -43,7 +43,7 @@
* driver the list of errata that are relevant is below, going back to
* PIIX4. Older device documentation is now a bit tricky to find.
*
- * The chipsets all follow very much the same design. The orginal Triton
+ * The chipsets all follow very much the same design. The original Triton
* series chipsets do _not_ support independant device timings, but this
* is fixed in Triton II. With the odd mobile exception the chips then
* change little except in gaining more modes until SATA arrives. This
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
new file mode 100644
index 000000000000..34fc57d0a9b8
--- /dev/null
+++ b/drivers/ata/libahci.c
@@ -0,0 +1,2110 @@
+/*
+ * libahci.c - Common AHCI SATA low-level routines
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+static int ahci_skip_host_reset;
+int ahci_ignore_sss;
+EXPORT_SYMBOL_GPL(ahci_ignore_sss);
+
+module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
+MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
+
+module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
+MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy);
+static void ahci_disable_alpm(struct ata_port *ap);
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size);
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size);
+
+
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int ahci_port_start(struct ata_port *ap);
+static void ahci_port_stop(struct ata_port *ap);
+static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
+static void ahci_freeze(struct ata_port *ap);
+static void ahci_thaw(struct ata_port *ap);
+static void ahci_enable_fbs(struct ata_port *ap);
+static void ahci_disable_fbs(struct ata_port *ap);
+static void ahci_pmp_attach(struct ata_port *ap);
+static void ahci_pmp_detach(struct ata_port *ap);
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void ahci_postreset(struct ata_link *link, unsigned int *class);
+static void ahci_error_handler(struct ata_port *ap);
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+static int ahci_port_resume(struct ata_port *ap);
+static void ahci_dev_config(struct ata_device *dev);
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts);
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
+#endif
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
+static ssize_t ahci_activity_store(struct ata_device *dev,
+ enum sw_activity val);
+static void ahci_init_sw_activity(struct ata_link *link);
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_cap2(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
+static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
+static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
+
+static struct device_attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ &dev_attr_em_message_type,
+ &dev_attr_em_message,
+ &dev_attr_ahci_host_caps,
+ &dev_attr_ahci_host_cap2,
+ &dev_attr_ahci_host_version,
+ &dev_attr_ahci_port_cmd,
+ NULL
+};
+
+static struct device_attribute *ahci_sdev_attrs[] = {
+ &dev_attr_sw_activity,
+ &dev_attr_unload_heads,
+ NULL
+};
+
+struct scsi_host_template ahci_sht = {
+ ATA_NCQ_SHT("ahci"),
+ .can_queue = AHCI_MAX_CMDS - 1,
+ .sg_tablesize = AHCI_MAX_SG,
+ .dma_boundary = AHCI_DMA_BOUNDARY,
+ .shost_attrs = ahci_shost_attrs,
+ .sdev_attrs = ahci_sdev_attrs,
+};
+EXPORT_SYMBOL_GPL(ahci_sht);
+
+struct ata_port_operations ahci_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = ahci_pmp_qc_defer,
+ .qc_prep = ahci_qc_prep,
+ .qc_issue = ahci_qc_issue,
+ .qc_fill_rtf = ahci_qc_fill_rtf,
+
+ .freeze = ahci_freeze,
+ .thaw = ahci_thaw,
+ .softreset = ahci_softreset,
+ .hardreset = ahci_hardreset,
+ .postreset = ahci_postreset,
+ .pmp_softreset = ahci_softreset,
+ .error_handler = ahci_error_handler,
+ .post_internal_cmd = ahci_post_internal_cmd,
+ .dev_config = ahci_dev_config,
+
+ .scr_read = ahci_scr_read,
+ .scr_write = ahci_scr_write,
+ .pmp_attach = ahci_pmp_attach,
+ .pmp_detach = ahci_pmp_detach,
+
+ .enable_pm = ahci_enable_alpm,
+ .disable_pm = ahci_disable_alpm,
+ .em_show = ahci_led_show,
+ .em_store = ahci_led_store,
+ .sw_activity_show = ahci_activity_show,
+ .sw_activity_store = ahci_activity_store,
+#ifdef CONFIG_PM
+ .port_suspend = ahci_port_suspend,
+ .port_resume = ahci_port_resume,
+#endif
+ .port_start = ahci_port_start,
+ .port_stop = ahci_port_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_ops);
+
+int ahci_em_messages = 1;
+EXPORT_SYMBOL_GPL(ahci_em_messages);
+module_param(ahci_em_messages, int, 0444);
+/* add other LED protocol types when they become supported */
+MODULE_PARM_DESC(ahci_em_messages,
+ "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
+
+static void ahci_enable_ahci(void __iomem *mmio)
+{
+ int i;
+ u32 tmp;
+
+ /* turn on AHCI_EN */
+ tmp = readl(mmio + HOST_CTL);
+ if (tmp & HOST_AHCI_EN)
+ return;
+
+ /* Some controllers need AHCI_EN to be written multiple times.
+ * Try a few times before giving up.
+ */
+ for (i = 0; i < 5; i++) {
+ tmp |= HOST_AHCI_EN;
+ writel(tmp, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
+ if (tmp & HOST_AHCI_EN)
+ return;
+ msleep(10);
+ }
+
+ WARN_ON(1);
+}
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap);
+}
+
+static ssize_t ahci_show_host_cap2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap2);
+}
+
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
+}
+
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+}
+
+/**
+ * ahci_save_initial_config - Save and fixup initial config values
+ * @dev: target AHCI device
+ * @hpriv: host private area to store config values
+ * @force_port_map: force port map to a specified value
+ * @mask_port_map: mask out particular bits from port map
+ *
+ * Some registers containing configuration info might be setup by
+ * BIOS and might be cleared on reset. This function saves the
+ * initial values of those registers into @hpriv such that they
+ * can be restored after controller reset.
+ *
+ * If inconsistent, config values are fixed up by this function.
+ *
+ * LOCKING:
+ * None.
+ */
+void ahci_save_initial_config(struct device *dev,
+ struct ahci_host_priv *hpriv,
+ unsigned int force_port_map,
+ unsigned int mask_port_map)
+{
+ void __iomem *mmio = hpriv->mmio;
+ u32 cap, cap2, vers, port_map;
+ int i;
+
+ /* make sure AHCI mode is enabled before accessing CAP */
+ ahci_enable_ahci(mmio);
+
+ /* Values prefixed with saved_ are written back to host after
+ * reset. Values without are used for driver operation.
+ */
+ hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
+ hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
+
+ /* CAP2 register is only defined for AHCI 1.2 and later */
+ vers = readl(mmio + HOST_VERSION);
+ if ((vers >> 16) > 1 ||
+ ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
+ hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
+ else
+ hpriv->saved_cap2 = cap2 = 0;
+
+ /* some chips have errata preventing 64bit use */
+ if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do 64bit DMA, forcing 32bit\n");
+ cap &= ~HOST_CAP_64;
+ }
+
+ if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do NCQ, turning off CAP_NCQ\n");
+ cap &= ~HOST_CAP_NCQ;
+ }
+
+ if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can do NCQ, turning on CAP_NCQ\n");
+ cap |= HOST_CAP_NCQ;
+ }
+
+ if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do PMP, turning off CAP_PMP\n");
+ cap &= ~HOST_CAP_PMP;
+ }
+
+ if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do SNTF, turning off CAP_SNTF\n");
+ cap &= ~HOST_CAP_SNTF;
+ }
+
+ if (force_port_map && port_map != force_port_map) {
+ dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, force_port_map);
+ port_map = force_port_map;
+ }
+
+ if (mask_port_map) {
+ dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
+ port_map,
+ port_map & mask_port_map);
+ port_map &= mask_port_map;
+ }
+
+ /* cross check port_map and cap.n_ports */
+ if (port_map) {
+ int map_ports = 0;
+
+ for (i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1 << i))
+ map_ports++;
+
+ /* If PI has more ports than n_ports, whine, clear
+ * port_map and let it be generated from n_ports.
+ */
+ if (map_ports > ahci_nr_ports(cap)) {
+ dev_printk(KERN_WARNING, dev,
+ "implemented port map (0x%x) contains more "
+ "ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
+ port_map = 0;
+ }
+ }
+
+ /* fabricate port_map from cap.nr_ports */
+ if (!port_map) {
+ port_map = (1 << ahci_nr_ports(cap)) - 1;
+ dev_printk(KERN_WARNING, dev,
+ "forcing PORTS_IMPL to 0x%x\n", port_map);
+
+ /* write the fixed up value to the PI register */
+ hpriv->saved_port_map = port_map;
+ }
+
+ /* record values to use during operation */
+ hpriv->cap = cap;
+ hpriv->cap2 = cap2;
+ hpriv->port_map = port_map;
+}
+EXPORT_SYMBOL_GPL(ahci_save_initial_config);
+
+/**
+ * ahci_restore_initial_config - Restore initial config
+ * @host: target ATA host
+ *
+ * Restore initial config stored by ahci_save_initial_config().
+ *
+ * LOCKING:
+ * None.
+ */
+static void ahci_restore_initial_config(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ writel(hpriv->saved_cap, mmio + HOST_CAP);
+ if (hpriv->saved_cap2)
+ writel(hpriv->saved_cap2, mmio + HOST_CAP2);
+ writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
+ (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
+}
+
+static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
+{
+ static const int offset[] = {
+ [SCR_STATUS] = PORT_SCR_STAT,
+ [SCR_CONTROL] = PORT_SCR_CTL,
+ [SCR_ERROR] = PORT_SCR_ERR,
+ [SCR_ACTIVE] = PORT_SCR_ACT,
+ [SCR_NOTIFICATION] = PORT_SCR_NTF,
+ };
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ if (sc_reg < ARRAY_SIZE(offset) &&
+ (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
+ return offset[sc_reg];
+ return 0;
+}
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ *val = readl(port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ writel(val, port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_is_device_present(void __iomem *port_mmio)
+{
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xff;
+
+ /* Make sure PxTFD.STS.BSY and PxTFD.STS.DRQ are 0 */
+ if (status & (ATA_BUSY | ATA_DRQ))
+ return 0;
+
+ /* Make sure PxSSTS.DET is 3h */
+ status = readl(port_mmio + PORT_SCR_STAT) & 0xf;
+ if (status != 3)
+ return 0;
+ return 1;
+}
+
+void ahci_start_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ if (!ahci_is_device_present(port_mmio))
+ return;
+
+ /* start DMA */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+}
+EXPORT_SYMBOL_GPL(ahci_start_engine);
+
+int ahci_stop_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_CMD);
+
+ /* check if the HBA is idle */
+ if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+ return 0;
+
+ /* setting HBA to idle */
+ tmp &= ~PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for engine to stop. This could be as long as 500 msec */
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+ if (tmp & PORT_CMD_LIST_ON)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_stop_engine);
+
+static void ahci_start_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 tmp;
+
+ /* set FIS registers */
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->cmd_slot_dma >> 16) >> 16,
+ port_mmio + PORT_LST_ADDR_HI);
+ writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
+
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->rx_fis_dma >> 16) >> 16,
+ port_mmio + PORT_FIS_ADDR_HI);
+ writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
+
+ /* enable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* flush */
+ readl(port_mmio + PORT_CMD);
+}
+
+static int ahci_stop_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* disable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp &= ~PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for completion, spec says 500ms, give it 1000 */
+ tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
+ PORT_CMD_FIS_ON, 10, 1000);
+ if (tmp & PORT_CMD_FIS_ON)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void ahci_power_up(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+
+ /* spin up device */
+ if (hpriv->cap & HOST_CAP_SSS) {
+ cmd |= PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+ }
+
+ /* wake up link */
+ writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
+}
+
+static void ahci_disable_alpm(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* IPM bits should be disabled by libata-core */
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* disable ALPM and ASP */
+ cmd &= ~PORT_CMD_ASP;
+ cmd &= ~PORT_CMD_ALPE;
+
+ /* force the interface back to active */
+ cmd |= PORT_CMD_ICC_ACTIVE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* wait 10ms to be sure we've come out of any low power state */
+ msleep(10);
+
+ /* clear out any PhyRdy stuff from interrupt status */
+ writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
+
+ /* go ahead and clean out PhyRdy Change from Serror too */
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+
+ /*
+ * Clear flag to indicate that we should ignore all PhyRdy
+ * state changes
+ */
+ hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
+
+ /*
+ * Enable interrupts on Phy Ready.
+ */
+ pp->intr_mask |= PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * don't change the link pm policy - we can be called
+ * just to turn of link pm temporarily
+ */
+}
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 asp;
+
+ /* Make sure the host is capable of link power management */
+ if (!(hpriv->cap & HOST_CAP_ALPM))
+ return -EINVAL;
+
+ switch (policy) {
+ case MAX_PERFORMANCE:
+ case NOT_AVAILABLE:
+ /*
+ * if we came here with NOT_AVAILABLE,
+ * it just means this is the first time we
+ * have tried to enable - default to max performance,
+ * and let the user go to lower power modes on request.
+ */
+ ahci_disable_alpm(ap);
+ return 0;
+ case MIN_POWER:
+ /* configure HBA to enter SLUMBER */
+ asp = PORT_CMD_ASP;
+ break;
+ case MEDIUM_POWER:
+ /* configure HBA to enter PARTIAL */
+ asp = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Disable interrupts on Phy Ready. This keeps us from
+ * getting woken up due to spurious phy ready interrupts
+ * TBD - Hot plug should be done via polling now, is
+ * that even supported?
+ */
+ pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * Set a flag to indicate that we should ignore all PhyRdy
+ * state changes since these can happen now whenever we
+ * change link state
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
+
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /*
+ * Set ASP based on Policy
+ */
+ cmd |= asp;
+
+ /*
+ * Setting this bit will instruct the HBA to aggressively
+ * enter a lower power link state when it's appropriate and
+ * based on the value set above for ASP
+ */
+ cmd |= PORT_CMD_ALPE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* IPM bits should be set by libata-core */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void ahci_power_down(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd, scontrol;
+
+ if (!(hpriv->cap & HOST_CAP_SSS))
+ return;
+
+ /* put device into listen mode, first set PxSCTL.DET to 0 */
+ scontrol = readl(port_mmio + PORT_SCR_CTL);
+ scontrol &= ~0xf;
+ writel(scontrol, port_mmio + PORT_SCR_CTL);
+
+ /* then set PxCMD.SUD to 0 */
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+ cmd &= ~PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+}
+#endif
+
+static void ahci_start_port(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ ssize_t rc;
+ int i;
+
+ /* enable FIS reception */
+ ahci_start_fis_rx(ap);
+
+ /* enable DMA */
+ ahci_start_engine(ap);
+
+ /* turn on LEDs */
+ if (ap->flags & ATA_FLAG_EM) {
+ ata_for_each_link(link, ap, EDGE) {
+ emp = &pp->em_priv[link->pmp];
+
+ /* EM Transmit bit maybe busy during init */
+ for (i = 0; i < EM_MAX_RETRY; i++) {
+ rc = ahci_transmit_led_message(ap,
+ emp->led_state,
+ 4);
+ if (rc == -EBUSY)
+ msleep(1);
+ else
+ break;
+ }
+ }
+ }
+
+ if (ap->flags & ATA_FLAG_SW_ACTIVITY)
+ ata_for_each_link(link, ap, EDGE)
+ ahci_init_sw_activity(link);
+
+}
+
+static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
+{
+ int rc;
+
+ /* disable DMA */
+ rc = ahci_stop_engine(ap);
+ if (rc) {
+ *emsg = "failed to stop engine";
+ return rc;
+ }
+
+ /* disable FIS reception */
+ rc = ahci_stop_fis_rx(ap);
+ if (rc) {
+ *emsg = "failed stop FIS RX";
+ return rc;
+ }
+
+ return 0;
+}
+
+int ahci_reset_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 tmp;
+
+ /* we must be in AHCI mode, before using anything
+ * AHCI-specific, such as HOST_RESET.
+ */
+ ahci_enable_ahci(mmio);
+
+ /* global controller reset */
+ if (!ahci_skip_host_reset) {
+ tmp = readl(mmio + HOST_CTL);
+ if ((tmp & HOST_RESET) == 0) {
+ writel(tmp | HOST_RESET, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ }
+
+ /*
+ * to perform host reset, OS should set HOST_RESET
+ * and poll until this bit is read to be "0".
+ * reset must complete within 1 second, or
+ * the hardware should be considered fried.
+ */
+ tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
+ HOST_RESET, 10, 1000);
+
+ if (tmp & HOST_RESET) {
+ dev_printk(KERN_ERR, host->dev,
+ "controller reset failed (0x%x)\n", tmp);
+ return -EIO;
+ }
+
+ /* turn on AHCI mode */
+ ahci_enable_ahci(mmio);
+
+ /* Some registers might be cleared on reset. Restore
+ * initial values.
+ */
+ ahci_restore_initial_config(host);
+ } else
+ dev_printk(KERN_INFO, host->dev,
+ "skipping global host reset\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_controller);
+
+static void ahci_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
+ return;
+
+ emp->activity++;
+ if (!timer_pending(&emp->timer))
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
+}
+
+static void ahci_sw_activity_blink(unsigned long arg)
+{
+ struct ata_link *link = (struct ata_link *)arg;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ unsigned long led_message = emp->led_state;
+ u32 activity_led_state;
+ unsigned long flags;
+
+ led_message &= EM_MSG_LED_VALUE;
+ led_message |= ap->port_no | (link->pmp << 8);
+
+ /* check to see if we've had activity. If so,
+ * toggle state of LED and reset timer. If not,
+ * turn LED to desired idle state.
+ */
+ spin_lock_irqsave(ap->lock, flags);
+ if (emp->saved_activity != emp->activity) {
+ emp->saved_activity = emp->activity;
+ /* get the current LED state */
+ activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
+
+ if (activity_led_state)
+ activity_led_state = 0;
+ else
+ activity_led_state = 1;
+
+ /* clear old state */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ /* toggle state */
+ led_message |= (activity_led_state << 16);
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
+ } else {
+ /* switch to idle */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+ if (emp->blink_policy == BLINK_OFF)
+ led_message |= (1 << 16);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+ ahci_transmit_led_message(ap, led_message, 4);
+}
+
+static void ahci_init_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* init activity stats, setup timer */
+ emp->saved_activity = emp->activity = 0;
+ setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
+
+ /* check our blink policy and set flag for link if it's enabled */
+ if (emp->blink_policy)
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+}
+
+int ahci_reset_em(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
+ return -EINVAL;
+
+ writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_em);
+
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+ u32 message[] = {0, 0};
+ unsigned long flags;
+ int pmp;
+ struct ahci_em_priv *emp;
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * if we are still busy transmitting a previous message,
+ * do not allow
+ */
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (em_ctl & EM_CTL_TM) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EBUSY;
+ }
+
+ /*
+ * create message header - this is all zero except for
+ * the message size, which is 4 bytes.
+ */
+ message[0] |= (4 << 8);
+
+ /* ignore 0:4 of byte zero, fill in port info yourself */
+ message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
+
+ /* write message to EM_LOC */
+ writel(message[0], mmio + hpriv->em_loc);
+ writel(message[1], mmio + hpriv->em_loc+4);
+
+ /* save off new led state for port/slot */
+ emp->led_state = state;
+
+ /*
+ * tell hardware to transmit the message
+ */
+ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ return size;
+}
+
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ int rc = 0;
+
+ ata_for_each_link(link, ap, EDGE) {
+ emp = &pp->em_priv[link->pmp];
+ rc += sprintf(buf, "%lx\n", emp->led_state);
+ }
+ return rc;
+}
+
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size)
+{
+ int state;
+ int pmp;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp;
+
+ state = simple_strtoul(buf, NULL, 0);
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ /* mask off the activity bits if we are in sw_activity
+ * mode, user should turn off sw_activity before setting
+ * activity led through em_message
+ */
+ if (emp->blink_policy)
+ state &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ return ahci_transmit_led_message(ap, state, size);
+}
+
+static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ u32 port_led_state = emp->led_state;
+
+ /* save the desired Activity LED behavior */
+ if (val == OFF) {
+ /* clear LFLAG */
+ link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
+
+ /* set the LED to OFF */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ } else {
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+ if (val == BLINK_OFF) {
+ /* set LED to ON for idle */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ }
+ }
+ emp->blink_policy = val;
+ return 0;
+}
+
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* display the saved value of activity behavior for this
+ * disk.
+ */
+ return sprintf(buf, "%d\n", emp->blink_policy);
+}
+
+static void ahci_port_init(struct device *dev, struct ata_port *ap,
+ int port_no, void __iomem *mmio,
+ void __iomem *port_mmio)
+{
+ const char *emsg = NULL;
+ int rc;
+ u32 tmp;
+
+ /* make sure port is not active */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ dev_warn(dev, "%s (%d)\n", emsg, rc);
+
+ /* clear SError */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << port_no, mmio + HOST_IRQ_STAT);
+}
+
+void ahci_init_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ int i;
+ void __iomem *port_mmio;
+ u32 tmp;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ port_mmio = ahci_port_base(ap);
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ ahci_port_init(host->dev, ap, i, mmio, port_mmio);
+ }
+
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+ writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+}
+EXPORT_SYMBOL_GPL(ahci_init_controller);
+
+static void ahci_dev_config(struct ata_device *dev)
+{
+ struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+
+ if (hpriv->flags & AHCI_HFLAG_SECT255) {
+ dev->max_sectors = 255;
+ ata_dev_printk(dev, KERN_INFO,
+ "SB600 AHCI: limiting to 255 sectors per cmd\n");
+ }
+}
+
+static unsigned int ahci_dev_classify(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_taskfile tf;
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_SIG);
+ tf.lbah = (tmp >> 24) & 0xff;
+ tf.lbam = (tmp >> 16) & 0xff;
+ tf.lbal = (tmp >> 8) & 0xff;
+ tf.nsect = (tmp) & 0xff;
+
+ return ata_dev_classify(&tf);
+}
+
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts)
+{
+ dma_addr_t cmd_tbl_dma;
+
+ cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
+
+ pp->cmd_slot[tag].opts = cpu_to_le32(opts);
+ pp->cmd_slot[tag].status = 0;
+ pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
+ pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
+}
+
+int ahci_kick_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+ u32 tmp;
+ int busy, rc;
+
+ /* stop engine */
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ goto out_restart;
+
+ /* need to do CLO?
+ * always do CLO if PMP is attached (AHCI-1.3 9.2)
+ */
+ busy = status & (ATA_BUSY | ATA_DRQ);
+ if (!busy && !sata_pmp_attached(ap)) {
+ rc = 0;
+ goto out_restart;
+ }
+
+ if (!(hpriv->cap & HOST_CAP_CLO)) {
+ rc = -EOPNOTSUPP;
+ goto out_restart;
+ }
+
+ /* perform CLO */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_CLO;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ rc = 0;
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
+ if (tmp & PORT_CMD_CLO)
+ rc = -EIO;
+
+ /* restart engine */
+ out_restart:
+ ahci_start_engine(ap);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_kick_engine);
+
+static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+ struct ata_taskfile *tf, int is_cmd, u16 flags,
+ unsigned long timeout_msec)
+{
+ const u32 cmd_fis_len = 5; /* five dwords */
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u8 *fis = pp->cmd_tbl;
+ u32 tmp;
+
+ /* prep the command */
+ ata_tf_to_fis(tf, pmp, is_cmd, fis);
+ ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+
+ /* issue & wait */
+ writel(1, port_mmio + PORT_CMD_ISSUE);
+
+ if (timeout_msec) {
+ tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
+ 1, timeout_msec);
+ if (tmp & 0x1) {
+ ahci_kick_engine(ap);
+ return -EBUSY;
+ }
+ } else
+ readl(port_mmio + PORT_CMD_ISSUE); /* flush */
+
+ return 0;
+}
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ const char *reason = NULL;
+ unsigned long now, msecs;
+ struct ata_taskfile tf;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ /* prepare for SRST (AHCI-1.1 10.4.1) */
+ rc = ahci_kick_engine(ap);
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_printk(link, KERN_WARNING,
+ "failed to reset engine (errno=%d)\n", rc);
+
+ ata_tf_init(link->device, &tf);
+
+ /* issue the first D2H Register FIS */
+ msecs = 0;
+ now = jiffies;
+ if (time_after(now, deadline))
+ msecs = jiffies_to_msecs(deadline - now);
+
+ tf.ctl |= ATA_SRST;
+ if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
+ AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
+ rc = -EIO;
+ reason = "1st FIS failed";
+ goto fail;
+ }
+
+ /* spec says at least 5us, but be generous and sleep for 1ms */
+ msleep(1);
+
+ /* issue the second D2H Register FIS */
+ tf.ctl &= ~ATA_SRST;
+ ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
+
+ /* wait for link to become ready */
+ rc = ata_wait_after_reset(link, deadline, check_ready);
+ if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
+ /*
+ * Workaround for cases where link online status can't
+ * be trusted. Treat device readiness timeout as link
+ * offline.
+ */
+ ata_link_printk(link, KERN_INFO,
+ "device not ready, treating as offline\n");
+ *class = ATA_DEV_NONE;
+ } else if (rc) {
+ /* link occupied, -ENODEV too is an error */
+ reason = "device not ready";
+ goto fail;
+ } else
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+ fail:
+ ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ return rc;
+}
+
+int ahci_check_ready(struct ata_link *link)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+
+ return ata_check_ready(status);
+}
+EXPORT_SYMBOL_GPL(ahci_check_ready);
+
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int pmp = sata_srst_pmp(link);
+
+ DPRINTK("ENTER\n");
+
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+}
+EXPORT_SYMBOL_GPL(ahci_do_softreset);
+
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = 0x80;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+ ahci_start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ return rc;
+}
+
+static void ahci_postreset(struct ata_link *link, unsigned int *class)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 new_tmp, tmp;
+
+ ata_std_postreset(link, class);
+
+ /* Make sure port's ATAPI bit is set appropriately */
+ new_tmp = tmp = readl(port_mmio + PORT_CMD);
+ if (*class == ATA_DEV_ATAPI)
+ new_tmp |= PORT_CMD_ATAPI;
+ else
+ new_tmp &= ~PORT_CMD_ATAPI;
+ if (new_tmp != tmp) {
+ writel(new_tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+ }
+}
+
+static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+ struct scatterlist *sg;
+ struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+ unsigned int si;
+
+ VPRINTK("ENTER\n");
+
+ /*
+ * Next, the S/G list.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+ ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
+ }
+
+ return si;
+}
+
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ if (!sata_pmp_attached(ap) || pp->fbs_enabled)
+ return ata_std_qc_defer(qc);
+ else
+ return sata_pmp_qc_defer_cmd_switch(qc);
+}
+
+static void ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ int is_atapi = ata_is_atapi(qc->tf.protocol);
+ void *cmd_tbl;
+ u32 opts;
+ const u32 cmd_fis_len = 5; /* five dwords */
+ unsigned int n_elem;
+
+ /*
+ * Fill in command table information. First, the header,
+ * a SATA Register - Host to Device command FIS.
+ */
+ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
+
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+ if (is_atapi) {
+ memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+ memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+ }
+
+ n_elem = 0;
+ if (qc->flags & ATA_QCFLAG_DMAMAP)
+ n_elem = ahci_fill_sg(qc, cmd_tbl);
+
+ /*
+ * Fill in command slot information.
+ */
+ opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ opts |= AHCI_CMD_WRITE;
+ if (is_atapi)
+ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+ ahci_fill_cmd_slot(pp, qc->tag, opts);
+}
+
+static void ahci_fbs_dec_intr(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int retries = 3;
+
+ DPRINTK("ENTER\n");
+ BUG_ON(!pp->fbs_enabled);
+
+ /* time to wait for DEC is not specified by AHCI spec,
+ * add a retry loop for safety.
+ */
+ writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ while ((fbs & PORT_FBS_DEC) && retries--) {
+ udelay(1);
+ fbs = readl(port_mmio + PORT_FBS);
+ }
+
+ if (fbs & PORT_FBS_DEC)
+ dev_printk(KERN_ERR, ap->host->dev,
+ "failed to clear device error\n");
+}
+
+static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_eh_info *host_ehi = &ap->link.eh_info;
+ struct ata_link *link = NULL;
+ struct ata_queued_cmd *active_qc;
+ struct ata_eh_info *active_ehi;
+ bool fbs_need_dec = false;
+ u32 serror;
+
+ /* determine active link with error */
+ if (pp->fbs_enabled) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
+ ata_link_online(&ap->pmp_link[pmp])) {
+ link = &ap->pmp_link[pmp];
+ fbs_need_dec = true;
+ }
+
+ } else
+ ata_for_each_link(link, ap, EDGE)
+ if (ata_link_active(link))
+ break;
+
+ if (!link)
+ link = &ap->link;
+
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ active_ehi = &link->eh_info;
+
+ /* record irq stat */
+ ata_ehi_clear_desc(host_ehi);
+ ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
+
+ /* AHCI needs SError cleared; otherwise, it might lock up */
+ ahci_scr_read(&ap->link, SCR_ERROR, &serror);
+ ahci_scr_write(&ap->link, SCR_ERROR, serror);
+ host_ehi->serror |= serror;
+
+ /* some controllers set IRQ_IF_ERR on device errors, ignore it */
+ if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
+ irq_stat &= ~PORT_IRQ_IF_ERR;
+
+ if (irq_stat & PORT_IRQ_TF_ERR) {
+ /* If qc is active, charge it; otherwise, the active
+ * link. There's no active qc on NCQ errors. It will
+ * be determined by EH by reading log page 10h.
+ */
+ if (active_qc)
+ active_qc->err_mask |= AC_ERR_DEV;
+ else
+ active_ehi->err_mask |= AC_ERR_DEV;
+
+ if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
+ host_ehi->serror &= ~SERR_INTERNAL;
+ }
+
+ if (irq_stat & PORT_IRQ_UNK_FIS) {
+ u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi,
+ "unknown FIS %08x %08x %08x %08x" ,
+ unk[0], unk[1], unk[2], unk[3]);
+ }
+
+ if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi, "incorrect PMP");
+ }
+
+ if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
+ host_ehi->err_mask |= AC_ERR_HOST_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(host_ehi, "host bus error");
+ }
+
+ if (irq_stat & PORT_IRQ_IF_ERR) {
+ if (fbs_need_dec)
+ active_ehi->err_mask |= AC_ERR_DEV;
+ else {
+ host_ehi->err_mask |= AC_ERR_ATA_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ }
+
+ ata_ehi_push_desc(host_ehi, "interface fatal error");
+ }
+
+ if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
+ ata_ehi_hotplugged(host_ehi);
+ ata_ehi_push_desc(host_ehi, "%s",
+ irq_stat & PORT_IRQ_CONNECT ?
+ "connection status changed" : "PHY RDY changed");
+ }
+
+ /* okay, let's hand over to EH */
+
+ if (irq_stat & PORT_IRQ_FREEZE)
+ ata_port_freeze(ap);
+ else if (fbs_need_dec) {
+ ata_link_abort(link);
+ ahci_fbs_dec_intr(ap);
+ } else
+ ata_port_abort(ap);
+}
+
+static void ahci_port_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
+ u32 status, qc_active = 0;
+ int rc;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ /* ignore BAD_PMP while resetting */
+ if (unlikely(resetting))
+ status &= ~PORT_IRQ_BAD_PMP;
+
+ /* If we are getting PhyRdy, this is
+ * just a power state change, we should
+ * clear out this, plus the PhyRdy/Comm
+ * Wake bits from Serror
+ */
+ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+ (status & PORT_IRQ_PHYRDY)) {
+ status &= ~PORT_IRQ_PHYRDY;
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ }
+
+ if (unlikely(status & PORT_IRQ_ERROR)) {
+ ahci_error_intr(ap, status);
+ return;
+ }
+
+ if (status & PORT_IRQ_SDB_FIS) {
+ /* If SNotification is available, leave notification
+ * handling to sata_async_notification(). If not,
+ * emulate it by snooping SDB FIS RX area.
+ *
+ * Snooping FIS RX area is probably cheaper than
+ * poking SNotification but some constrollers which
+ * implement SNotification, ICH9 for example, don't
+ * store AN SDB FIS into receive area.
+ */
+ if (hpriv->cap & HOST_CAP_SNTF)
+ sata_async_notification(ap);
+ else {
+ /* If the 'N' bit in word 0 of the FIS is set,
+ * we just received asynchronous notification.
+ * Tell libata about it.
+ *
+ * Lack of SNotification should not appear in
+ * ahci 1.2, so the workaround is unnecessary
+ * when FBS is enabled.
+ */
+ if (pp->fbs_enabled)
+ WARN_ON_ONCE(1);
+ else {
+ const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+ u32 f0 = le32_to_cpu(f[0]);
+ if (f0 & (1 << 15))
+ sata_async_notification(ap);
+ }
+ }
+ }
+
+ /* pp->active_link is not reliable once FBS is enabled, both
+ * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
+ * NCQ and non-NCQ commands may be in flight at the same time.
+ */
+ if (pp->fbs_enabled) {
+ if (ap->qc_active) {
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
+ }
+ } else {
+ /* pp->active_link is valid iff any command is in flight */
+ if (ap->qc_active && pp->active_link->sactive)
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ else
+ qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+ }
+
+
+ rc = ata_qc_complete_multiple(ap, qc_active);
+
+ /* while resetting, invalid completions are expected */
+ if (unlikely(rc < 0 && !resetting)) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ }
+}
+
+irqreturn_t ahci_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int i, handled = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ hpriv = host->private_data;
+ mmio = hpriv->mmio;
+
+ /* sigh. 0xffffffff is a valid return from h/w */
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ if (!(irq_masked & (1 << i)))
+ continue;
+
+ ap = host->ports[i];
+ if (ap) {
+ ahci_port_intr(ap);
+ VPRINTK("port %u\n", i);
+ } else {
+ VPRINTK("port %u (no irq)\n", i);
+ if (ata_ratelimit())
+ dev_printk(KERN_WARNING, host->dev,
+ "interrupt on disabled port %u\n", i);
+ }
+
+ handled = 1;
+ }
+
+ /* HOST_IRQ_STAT behaves as level triggered latch meaning that
+ * it should be cleared after all the port events are cleared;
+ * otherwise, it will raise a spurious interrupt after each
+ * valid one. Please read section 10.6.2 of ahci 1.1 for more
+ * information.
+ *
+ * Also, use the unmasked value to clear interrupt as spurious
+ * pending event on a dummy port might cause screaming IRQ.
+ */
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+EXPORT_SYMBOL_GPL(ahci_interrupt);
+
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* Keep track of the currently active link. It will be used
+ * in completion path to determine whether NCQ phase is in
+ * progress.
+ */
+ pp->active_link = qc->dev->link;
+
+ if (qc->tf.protocol == ATA_PROT_NCQ)
+ writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
+
+ if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+ fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
+ writel(fbs, port_mmio + PORT_FBS);
+ pp->fbs_last_dev = qc->dev->link->pmp;
+ }
+
+ writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
+
+ ahci_sw_activity(qc->dev->link);
+
+ return 0;
+}
+
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ahci_port_priv *pp = qc->ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+
+ if (pp->fbs_enabled)
+ d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+
+ ata_tf_from_fis(d2h_fis, &qc->result_tf);
+ return true;
+}
+
+static void ahci_freeze(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ /* turn IRQ off */
+ writel(0, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_thaw(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* clear IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+ writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
+
+ /* turn IRQ back on */
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_error_handler(struct ata_port *ap)
+{
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ /* restart engine */
+ ahci_stop_engine(ap);
+ ahci_start_engine(ap);
+ }
+
+ sata_pmp_error_handler(ap);
+}
+
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* make DMA engine forget about the failed command */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ahci_kick_engine(ap);
+}
+
+static void ahci_enable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ } else
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_disable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if ((fbs & PORT_FBS_EN) == 0) {
+ pp->fbs_enabled = false;
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN)
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
+ else {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
+ pp->fbs_enabled = false;
+ }
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_pmp_attach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd |= PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ ahci_enable_fbs(ap);
+
+ pp->intr_mask |= PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_pmp_detach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ ahci_disable_fbs(ap);
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd &= ~PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static int ahci_port_resume(struct ata_port *ap)
+{
+ ahci_power_up(ap);
+ ahci_start_port(ap);
+
+ if (sata_pmp_attached(ap))
+ ahci_pmp_attach(ap);
+ else
+ ahci_pmp_detach(ap);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc == 0)
+ ahci_power_down(ap);
+ else {
+ ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
+ ahci_start_port(ap);
+ }
+
+ return rc;
+}
+#endif
+
+static int ahci_port_start(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct device *dev = ap->host->dev;
+ struct ahci_port_priv *pp;
+ void *mem;
+ dma_addr_t mem_dma;
+ size_t dma_sz, rx_fis_sz;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ /* check FBS capability */
+ if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd = readl(port_mmio + PORT_CMD);
+ if (cmd & PORT_CMD_FBSCP)
+ pp->fbs_supported = true;
+ else
+ dev_printk(KERN_WARNING, dev,
+ "The port is not capable of FBS\n");
+ }
+
+ if (pp->fbs_supported) {
+ dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ * 16;
+ } else {
+ dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ;
+ }
+
+ mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ memset(mem, 0, dma_sz);
+
+ /*
+ * First item in chunk of DMA memory: 32-slot command table,
+ * 32 bytes each in size
+ */
+ pp->cmd_slot = mem;
+ pp->cmd_slot_dma = mem_dma;
+
+ mem += AHCI_CMD_SLOT_SZ;
+ mem_dma += AHCI_CMD_SLOT_SZ;
+
+ /*
+ * Second item: Received-FIS area
+ */
+ pp->rx_fis = mem;
+ pp->rx_fis_dma = mem_dma;
+
+ mem += rx_fis_sz;
+ mem_dma += rx_fis_sz;
+
+ /*
+ * Third item: data area for storing a single command
+ * and its scatter-gather table
+ */
+ pp->cmd_tbl = mem;
+ pp->cmd_tbl_dma = mem_dma;
+
+ /*
+ * Save off initial list of interrupts to be enabled.
+ * This could be changed later
+ */
+ pp->intr_mask = DEF_PORT_IRQ;
+
+ ap->private_data = pp;
+
+ /* engage engines, captain */
+ return ahci_port_resume(ap);
+}
+
+static void ahci_port_stop(struct ata_port *ap)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ /* de-initialize port */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
+}
+
+void ahci_print_info(struct ata_host *host, const char *scc_s)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 vers, cap, cap2, impl, speed;
+ const char *speed_s;
+
+ vers = readl(mmio + HOST_VERSION);
+ cap = hpriv->cap;
+ cap2 = hpriv->cap2;
+ impl = hpriv->port_map;
+
+ speed = (cap >> 20) & 0xf;
+ if (speed == 1)
+ speed_s = "1.5";
+ else if (speed == 2)
+ speed_s = "3";
+ else if (speed == 3)
+ speed_s = "6";
+ else
+ speed_s = "?";
+
+ dev_info(host->dev,
+ "AHCI %02x%02x.%02x%02x "
+ "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ ,
+
+ (vers >> 24) & 0xff,
+ (vers >> 16) & 0xff,
+ (vers >> 8) & 0xff,
+ vers & 0xff,
+
+ ((cap >> 8) & 0x1f) + 1,
+ (cap & 0x1f) + 1,
+ speed_s,
+ impl,
+ scc_s);
+
+ dev_info(host->dev,
+ "flags: "
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s\n"
+ ,
+
+ cap & HOST_CAP_64 ? "64bit " : "",
+ cap & HOST_CAP_NCQ ? "ncq " : "",
+ cap & HOST_CAP_SNTF ? "sntf " : "",
+ cap & HOST_CAP_MPS ? "ilck " : "",
+ cap & HOST_CAP_SSS ? "stag " : "",
+ cap & HOST_CAP_ALPM ? "pm " : "",
+ cap & HOST_CAP_LED ? "led " : "",
+ cap & HOST_CAP_CLO ? "clo " : "",
+ cap & HOST_CAP_ONLY ? "only " : "",
+ cap & HOST_CAP_PMP ? "pmp " : "",
+ cap & HOST_CAP_FBS ? "fbs " : "",
+ cap & HOST_CAP_PIO_MULTI ? "pio " : "",
+ cap & HOST_CAP_SSC ? "slum " : "",
+ cap & HOST_CAP_PART ? "part " : "",
+ cap & HOST_CAP_CCC ? "ccc " : "",
+ cap & HOST_CAP_EMS ? "ems " : "",
+ cap & HOST_CAP_SXS ? "sxs " : "",
+ cap2 & HOST_CAP2_APST ? "apst " : "",
+ cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
+ cap2 & HOST_CAP2_BOH ? "boh " : ""
+ );
+}
+EXPORT_SYMBOL_GPL(ahci_print_info);
+
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+ struct ata_port_info *pi)
+{
+ u8 messages;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_loc = readl(mmio + HOST_EM_LOC);
+ u32 em_ctl = readl(mmio + HOST_EM_CTL);
+
+ if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
+ return;
+
+ messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
+
+ /* we only support LED message type right now */
+ if ((messages & 0x01) && (ahci_em_messages == 1)) {
+ /* store em_loc */
+ hpriv->em_loc = ((em_loc >> 16) * 4);
+ pi->flags |= ATA_FLAG_EM;
+ if (!(em_ctl & EM_CTL_ALHD))
+ pi->flags |= ATA_FLAG_SW_ACTIVITY;
+ }
+}
+EXPORT_SYMBOL_GPL(ahci_set_em_messages);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 49cffb6094a3..86f405b4831c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,6 +65,7 @@
#include <linux/libata.h>
#include <asm/byteorder.h>
#include <linux/cdrom.h>
+#include <linux/ratelimit.h>
#include "libata.h"
@@ -3631,9 +3632,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
unsigned long start = jiffies;
- unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+ unsigned long nodev_deadline;
int warned = 0;
+ /* choose which 0xff timeout to use, read comment in libata.h */
+ if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
+ nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
+ else
+ nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+
/* Slave readiness can't be tested separately from master. On
* M/S emulation configuration, this function should be called
* only on the master and it will handle both master and slave.
@@ -3651,12 +3658,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
if (ready > 0)
return 0;
- /* -ENODEV could be transient. Ignore -ENODEV if link
+ /*
+ * -ENODEV could be transient. Ignore -ENODEV if link
* is online. Also, some SATA devices take a long
- * time to clear 0xff after reset. For example,
- * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
- * GoVault needs even more than that. Wait for
- * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
+ * time to clear 0xff after reset. Wait for
+ * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
+ * offline.
*
* Note that some PATA controllers (pata_ali) explode
* if status register is read more than once when
@@ -6700,25 +6707,11 @@ static void __exit ata_exit(void)
subsys_initcall(ata_init);
module_exit(ata_exit);
-static unsigned long ratelimit_time;
-static DEFINE_SPINLOCK(ata_ratelimit_lock);
+static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
int ata_ratelimit(void)
{
- int rc;
- unsigned long flags;
-
- spin_lock_irqsave(&ata_ratelimit_lock, flags);
-
- if (time_after(jiffies, ratelimit_time)) {
- rc = 1;
- ratelimit_time = jiffies + (HZ/5);
- } else
- rc = 0;
-
- spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
-
- return rc;
+ return __ratelimit(&ratelimit);
}
/**
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 00305f41ed86..224faabd7b7e 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -231,10 +231,14 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
return "<unknown>";
}
+#define PMP_GSCR_SII_POL 129
+
static int sata_pmp_configure(struct ata_device *dev, int print_info)
{
struct ata_port *ap = dev->link->ap;
u32 *gscr = dev->gscr;
+ u16 vendor = sata_pmp_gscr_vendor(gscr);
+ u16 devid = sata_pmp_gscr_devid(gscr);
unsigned int err_mask = 0;
const char *reason;
int nr_ports, rc;
@@ -260,12 +264,34 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
goto fail;
}
+ /* Disable sending Early R_OK.
+ * With "cached read" HDD testing and multiple ports busy on a SATA
+ * host controller, 3726 PMP will very rarely drop a deferred
+ * R_OK that was intended for the host. Symptom will be all
+ * 5 drives under test will timeout, get reset, and recover.
+ */
+ if (vendor == 0x1095 && devid == 0x3726) {
+ u32 reg;
+
+ err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to read Sil3726 Private Register";
+ goto fail;
+ }
+ reg &= ~0x1;
+ err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to write Sil3726 Private Register";
+ goto fail;
+ }
+ }
+
if (print_info) {
ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
"0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
- sata_pmp_spec_rev_str(gscr),
- sata_pmp_gscr_vendor(gscr),
- sata_pmp_gscr_devid(gscr),
+ sata_pmp_spec_rev_str(gscr), vendor, devid,
sata_pmp_gscr_rev(gscr),
nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
gscr[SATA_PMP_GSCR_FEAT]);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index e3877b6843c9..b31389605bee 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2631,100 +2631,6 @@ u8 ata_bmdma_status(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ata_bmdma_status);
-/**
- * ata_bus_reset - reset host port and associated ATA channel
- * @ap: port to reset
- *
- * This is typically the first time we actually start issuing
- * commands to the ATA channel. We wait for BSY to clear, then
- * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
- * result. Determine what devices, if any, are on the channel
- * by looking at the device 0/1 error register. Look at the signature
- * stored in each device's taskfile registers, to determine if
- * the device is ATA or ATAPI.
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- * Obtains host lock.
- *
- * SIDE EFFECTS:
- * Sets ATA_FLAG_DISABLED if bus reset fails.
- *
- * DEPRECATED:
- * This function is only for drivers which still use old EH and
- * will be removed soon.
- */
-void ata_bus_reset(struct ata_port *ap)
-{
- struct ata_device *device = ap->link.device;
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- u8 err;
- unsigned int dev0, dev1 = 0, devmask = 0;
- int rc;
-
- DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
-
- /* determine if device 0/1 are present */
- if (ap->flags & ATA_FLAG_SATA_RESET)
- dev0 = 1;
- else {
- dev0 = ata_devchk(ap, 0);
- if (slave_possible)
- dev1 = ata_devchk(ap, 1);
- }
-
- if (dev0)
- devmask |= (1 << 0);
- if (dev1)
- devmask |= (1 << 1);
-
- /* select device 0 again */
- ap->ops->sff_dev_select(ap, 0);
-
- /* issue bus reset */
- if (ap->flags & ATA_FLAG_SRST) {
- rc = ata_bus_softreset(ap, devmask,
- ata_deadline(jiffies, 40000));
- if (rc && rc != -ENODEV)
- goto err_out;
- }
-
- /*
- * determine by signature whether we have ATA or ATAPI devices
- */
- device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
- if ((slave_possible) && (err != 0x81))
- device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
-
- /* is double-select really necessary? */
- if (device[1].class != ATA_DEV_NONE)
- ap->ops->sff_dev_select(ap, 1);
- if (device[0].class != ATA_DEV_NONE)
- ap->ops->sff_dev_select(ap, 0);
-
- /* if no devices were detected, disable this port */
- if ((device[0].class == ATA_DEV_NONE) &&
- (device[1].class == ATA_DEV_NONE))
- goto err_out;
-
- if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
- /* set up device control for ATA_FLAG_SATA_RESET */
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- ap->last_ctl = ap->ctl;
- }
-
- DPRINTK("EXIT\n");
- return;
-
-err_out:
- ata_port_printk(ap, KERN_ERR, "disabling port\n");
- ata_port_disable(ap);
-
- DPRINTK("EXIT\n");
-}
-EXPORT_SYMBOL_GPL(ata_bus_reset);
-
#ifdef CONFIG_PCI
/**
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 9f5b053611dd..96b11b604ae0 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -64,13 +64,13 @@ struct mpc52xx_ata_priv {
/* ATAPI-4 PIO specs (in ns) */
-static const int ataspec_t0[5] = {600, 383, 240, 180, 120};
-static const int ataspec_t1[5] = { 70, 50, 30, 30, 25};
-static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70};
-static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70};
-static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25};
-static const int ataspec_t4[5] = { 30, 20, 15, 10, 10};
-static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
+static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120};
+static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25};
+static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70};
+static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70};
+static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25};
+static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10};
+static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35};
#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
@@ -78,13 +78,13 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
/* ATAPI-4 MDMA specs (in clocks) */
struct mdmaspec {
- u32 t0M;
- u32 td;
- u32 th;
- u32 tj;
- u32 tkw;
- u32 tm;
- u32 tn;
+ u8 t0M;
+ u8 td;
+ u8 th;
+ u8 tj;
+ u8 tkw;
+ u8 tm;
+ u8 tn;
};
static const struct mdmaspec mdmaspec66[3] = {
@@ -101,23 +101,23 @@ static const struct mdmaspec mdmaspec132[3] = {
/* ATAPI-4 UDMA specs (in clocks) */
struct udmaspec {
- u32 tcyc;
- u32 t2cyc;
- u32 tds;
- u32 tdh;
- u32 tdvs;
- u32 tdvh;
- u32 tfs;
- u32 tli;
- u32 tmli;
- u32 taz;
- u32 tzah;
- u32 tenv;
- u32 tsr;
- u32 trfs;
- u32 trp;
- u32 tack;
- u32 tss;
+ u8 tcyc;
+ u8 t2cyc;
+ u8 tds;
+ u8 tdh;
+ u8 tdvs;
+ u8 tdvh;
+ u8 tfs;
+ u8 tli;
+ u8 tmli;
+ u8 taz;
+ u8 tzah;
+ u8 tenv;
+ u8 tsr;
+ u8 trfs;
+ u8 trp;
+ u8 tack;
+ u8 tss;
};
static const struct udmaspec udmaspec66[6] = {
@@ -270,7 +270,7 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
{
struct mpc52xx_ata_timings *timing = &priv->timings[dev];
unsigned int ipb_period = priv->ipb_period;
- unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta;
+ u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
if ((pio < 0) || (pio > 4))
return -EINVAL;
@@ -299,8 +299,8 @@ mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
if (speed < 0 || speed > 2)
return -EINVAL;
- t->mdma1 = (s->t0M << 24) | (s->td << 16) | (s->tkw << 8) | (s->tm);
- t->mdma2 = (s->th << 24) | (s->tj << 16) | (s->tn << 8);
+ t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
+ t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
t->using_udma = 0;
return 0;
@@ -316,11 +316,11 @@ mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
if (speed < 0 || speed > 2)
return -EINVAL;
- t->udma1 = (s->t2cyc << 24) | (s->tcyc << 16) | (s->tds << 8) | s->tdh;
- t->udma2 = (s->tdvs << 24) | (s->tdvh << 16) | (s->tfs << 8) | s->tli;
- t->udma3 = (s->tmli << 24) | (s->taz << 16) | (s->tenv << 8) | s->tsr;
- t->udma4 = (s->tss << 24) | (s->trfs << 16) | (s->trp << 8) | s->tack;
- t->udma5 = (s->tzah << 24);
+ t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
+ t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
+ t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
+ t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
+ t->udma5 = (u32)s->tzah << 24;
t->using_udma = 1;
return 0;
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index d94b8f0bd743..aa39bda6441a 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -45,16 +45,6 @@
#define DRV_NAME "pata_pcmcia"
#define DRV_VERSION "0.3.5"
-/*
- * Private data structure to glue stuff together
- */
-
-struct ata_pcmcia_info {
- struct pcmcia_device *pdev;
- int ndev;
- dev_node_t node;
-};
-
/**
* pcmcia_set_mode - PCMCIA specific mode setup
* @link: link
@@ -248,7 +238,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
- struct ata_pcmcia_info *info;
struct pcmcia_config_check *stk = NULL;
int is_kme = 0, ret = -ENOMEM, p;
unsigned long io_base, ctl_base;
@@ -256,19 +245,10 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
int n_ports = 1;
struct ata_port_operations *ops = &pcmcia_port_ops;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info == NULL)
- return -ENOMEM;
-
- /* Glue stuff together. FIXME: We may be able to get rid of info with care */
- info->pdev = pdev;
- pdev->priv = info;
-
/* Set up attributes in order to probe card and get resources */
pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
pdev->io.IOAddrLines = 3;
- pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
pdev->conf.Attributes = CONF_ENABLE_IRQ;
pdev->conf.IntType = INT_MEMORY_AND_IO;
@@ -293,8 +273,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
}
io_base = pdev->io.BasePort1;
ctl_base = stk->ctl_base;
- ret = pcmcia_request_irq(pdev, &pdev->irq);
- if (ret)
+ if (!pdev->irq)
goto failed;
ret = pcmcia_request_configuration(pdev, &pdev->conf);
@@ -344,21 +323,19 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
}
/* activate */
- ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt,
+ ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
IRQF_SHARED, &pcmcia_sht);
if (ret)
goto failed;
- info->ndev = 1;
+ pdev->priv = host;
kfree(stk);
return 0;
failed:
kfree(stk);
- info->ndev = 0;
pcmcia_disable_device(pdev);
out1:
- kfree(info);
return ret;
}
@@ -372,20 +349,12 @@ out1:
static void pcmcia_remove_one(struct pcmcia_device *pdev)
{
- struct ata_pcmcia_info *info = pdev->priv;
- struct device *dev = &pdev->dev;
-
- if (info != NULL) {
- /* If we have attached the device to the ATA layer, detach it */
- if (info->ndev) {
- struct ata_host *host = dev_get_drvdata(dev);
- ata_host_detach(host);
- }
- info->ndev = 0;
- pdev->priv = NULL;
- }
+ struct ata_host *host = pdev->priv;
+
+ if (host)
+ ata_host_detach(host);
+
pcmcia_disable_device(pdev);
- kfree(info);
}
static struct pcmcia_device_id pcmcia_devices[] = {
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b86712167eb8..b9101818b47b 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -68,7 +68,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
*(struct atm_vcc **) &new_msg->vcc = vcc;
old_test = test_bit(flag,&vcc->flags);
out_vcc->push(out_vcc,skb);
- add_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
while (test_bit(flag,&vcc->flags) == old_test) {
mb();
out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL;
@@ -80,7 +80,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
return error;
}
@@ -105,7 +105,7 @@ static int atmtcp_recv_control(const struct atmtcp_control *msg)
msg->type);
return -EINVAL;
}
- wake_up(sk_atm(vcc)->sk_sleep);
+ wake_up(sk_sleep(sk_atm(vcc)));
return 0;
}
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 719ec5a0dca5..90a5a7cac740 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1131,7 +1131,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
if (i == -1)
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb->data,
- skb->len - skb->data_len);
+ skb_headlen(skb));
else
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset,
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c213e0da0343..56c2e99e458f 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2664,8 +2664,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
#ifdef USE_SCATTERGATHER
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
- tpd->iovec[slot].len = skb->len - skb->data_len;
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ tpd->iovec[slot].len = skb_headlen(skb);
++slot;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f35719aab3c1..251acea3d359 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -186,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
/* display offline cpus < nr_cpu_ids */
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
- cpumask_complement(offline, cpu_online_mask);
+ cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
n = cpulist_scnprintf(buf, len, offline);
free_cpumask_var(offline);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 985da11174e7..4c70b9148b28 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -130,6 +130,17 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
+static void firmware_free_data(const struct firmware *fw)
+{
+ int i;
+ vunmap(fw->data);
+ if (fw->pages) {
+ for (i = 0; i < PFN_UP(fw->size); i++)
+ __free_page(fw->pages[i]);
+ kfree(fw->pages);
+ }
+}
+
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -162,21 +173,21 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_unlock(&fw_lock);
break;
}
- vfree(fw_priv->fw->data);
- fw_priv->fw->data = NULL;
+ firmware_free_data(fw_priv->fw);
+ memset(fw_priv->fw, 0, sizeof(struct firmware));
+ /* If the pages are not owned by 'struct firmware' */
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kfree(fw_priv->pages);
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
- fw_priv->fw->size = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
- vfree(fw_priv->fw->data);
+ vunmap(fw_priv->fw->data);
fw_priv->fw->data = vmap(fw_priv->pages,
fw_priv->nr_pages,
0, PAGE_KERNEL_RO);
@@ -184,7 +195,10 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: vmap() failed\n", __func__);
goto err;
}
- /* Pages will be freed by vfree() */
+ /* Pages are now owned by 'struct firmware' */
+ fw_priv->fw->pages = fw_priv->pages;
+ fw_priv->pages = NULL;
+
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
complete(&fw_priv->completion);
@@ -578,7 +592,7 @@ release_firmware(const struct firmware *fw)
if (fw->data == builtin->data)
goto free_fw;
}
- vfree(fw->data);
+ firmware_free_data(fw);
free_fw:
kfree(fw);
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4b4b565c835f..e753eb0e44d8 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -967,17 +967,17 @@ static int platform_pm_restore_noirq(struct device *dev)
int __weak platform_pm_runtime_suspend(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_suspend(dev);
};
int __weak platform_pm_runtime_resume(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_resume(dev);
};
int __weak platform_pm_runtime_idle(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_idle(dev);
};
#else /* !CONFIG_PM_RUNTIME */
@@ -1254,6 +1254,26 @@ static int __init early_platform_driver_probe_id(char *class_str,
}
if (match) {
+ /*
+ * Set up a sensible init_name to enable
+ * dev_name() and others to be used before the
+ * rest of the driver core is initialized.
+ */
+ if (!match->dev.init_name) {
+ if (match->id != -1)
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s.%d",
+ match->name,
+ match->id);
+ else
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s",
+ match->name);
+
+ if (!match->dev.init_name)
+ return -ENOMEM;
+ }
+
if (epdrv->pdrv->probe(match))
pr_warning("%s: unable to probe %s early.\n",
class_str, match->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 626dd147b75f..b0ec0e9f27e9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -229,14 +229,16 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
if (retval) {
dev->power.runtime_status = RPM_ACTIVE;
- pm_runtime_cancel_pending(dev);
-
if (retval == -EAGAIN || retval == -EBUSY) {
- notify = true;
+ if (dev->power.timer_expires == 0)
+ notify = true;
dev->power.runtime_error = 0;
+ } else {
+ pm_runtime_cancel_pending(dev);
}
} else {
dev->power.runtime_status = RPM_SUSPENDED;
+ pm_runtime_deactivate_timer(dev);
if (dev->parent) {
parent = dev->parent;
@@ -659,8 +661,6 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
- else if (dev->power.runtime_status == RPM_SUSPENDING)
- retval = -EINPROGRESS;
else if (atomic_read(&dev->power.usage_count) > 0
|| dev->power.disable_depth > 0)
retval = -EAGAIN;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 86fd9373447e..a4c33bc51257 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/pm_runtime.h>
+#include <asm/atomic.h>
#include "power.h"
/*
@@ -143,7 +144,59 @@ wake_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
-#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_RUNTIME
+
+static ssize_t rtpm_usagecount_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
+}
+
+static ssize_t rtpm_children_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
+}
+
+static ssize_t rtpm_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ return sprintf(buf, "disabled & forbidden\n");
+ else if (dev->power.disable_depth)
+ return sprintf(buf, "disabled\n");
+ else if (dev->power.runtime_auto == false)
+ return sprintf(buf, "forbidden\n");
+ return sprintf(buf, "enabled\n");
+}
+
+static ssize_t rtpm_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (dev->power.runtime_error)
+ return sprintf(buf, "error\n");
+ switch (dev->power.runtime_status) {
+ case RPM_SUSPENDED:
+ return sprintf(buf, "suspended\n");
+ case RPM_SUSPENDING:
+ return sprintf(buf, "suspending\n");
+ case RPM_RESUMING:
+ return sprintf(buf, "resuming\n");
+ case RPM_ACTIVE:
+ return sprintf(buf, "active\n");
+ }
+ return -EIO;
+}
+
+static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
+static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
+static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+
+#endif
+
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -170,15 +223,21 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(async, 0644, async_show, async_store);
-#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute * power_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_control.attr,
#endif
&dev_attr_wakeup.attr,
-#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+#ifdef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_async.attr,
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_status.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
#endif
NULL,
};
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 77bfce52e9ca..de277689da61 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -76,6 +76,17 @@ config BLK_DEV_XD
It's pretty unlikely that you have one of these: say N.
+config GDROM
+ tristate "SEGA Dreamcast GD-ROM drive"
+ depends on SH_DREAMCAST
+ help
+ A standard SEGA Dreamcast comes with a modified CD ROM drive called a
+ "GD-ROM" by SEGA to signify it is capable of reading special disks
+ with up to 1 GB of data. This drive will also read standard CD ROM
+ disks. Select this option to access any disks in your GD ROM drive.
+ Most users will want to say "Y" here.
+ You can also build this as a module which will be called gdrom.
+
config PARIDE
tristate "Parallel port IDE device support"
depends on PARPORT_PC
@@ -103,17 +114,6 @@ config PARIDE
"MicroSolutions backpack protocol", "DataStor Commuter protocol"
etc.).
-config GDROM
- tristate "SEGA Dreamcast GD-ROM drive"
- depends on SH_DREAMCAST
- help
- A standard SEGA Dreamcast comes with a modified CD ROM drive called a
- "GD-ROM" by SEGA to signify it is capable of reading special disks
- with up to 1 GB of data. This drive will also read standard CD ROM
- disks. Select this option to access any disks in your GD ROM drive.
- Most users will want to say "Y" here.
- You can also build this as a module which will be called gdrom.
-
source "drivers/block/paride/Kconfig"
config BLK_CPQ_DA
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 5674bd01d96d..b1e5c624b110 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -987,8 +987,12 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
}
sysminor = SYSMINOR(aoemajor, h->minor);
- if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
- printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
+ if (h->minor >= NPERSHELF) {
+ printk(KERN_INFO "aoe: e%ld.%d: AoE minor address too large\n",
+ aoemajor, (int) h->minor);
+ return;
+ } else if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
+ printk(KERN_INFO "aoe: e%ld.%d: AoE major address too large\n",
aoemajor, (int) h->minor);
return;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index eb5ff0531cfb..51ceaee98f9f 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1588,7 +1588,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
c->Request = ioc->Request;
if (ioc->buf_size > 0) {
- int i;
for (i = 0; i < sg_used; i++) {
temp64.val =
pci_map_single(host->pdev, buff[i],
@@ -2434,7 +2433,7 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
/* if it was the last disk, find the new hightest lun */
if (clear_all && recalculate_highest_lun) {
- int i, newhighest = -1;
+ int newhighest = -1;
for (i = 0; i <= h->highest_lun; i++) {
/* if the disk has size > 0, it is available */
if (h->drv[i] && h->drv[i]->heads)
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e5e86a781820..d6f1ae342b1d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -2251,7 +2251,8 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
if (test_bit(MD_NO_BARRIER, &mdev->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, NULL);
+ r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
if (r) {
set_bit(MD_NO_BARRIER, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 3f096e7959b4..c786023001d2 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -946,7 +946,8 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
int rv;
if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
+ NULL, BLKDEV_IFL_WAIT);
if (rv) {
dev_err(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 034e6dfc878c..81c78b3ce2df 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -164,12 +164,12 @@ unsigned long read_timer(void)
unsigned long t, flags;
int i;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
t = jiffies * 11932;
outb_p(0, 0x43);
i = inb_p(0x40);
i |= inb(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return(t - i);
}
#endif
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2138a7ae050c..135615607e71 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq)
unsigned long flags;
spin_lock_irqsave(&vblk->lock, flags);
- while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
int error;
switch (vbr->status) {
@@ -70,6 +70,8 @@ static void blk_done(struct virtqueue *vq)
vbr->req->sense_len = vbr->in_hdr.sense_len;
vbr->req->errors = vbr->in_hdr.errors;
}
+ if (blk_special_request(vbr->req))
+ vbr->req->errors = (error != 0);
__blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
@@ -103,6 +105,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
break;
+ case REQ_TYPE_SPECIAL:
+ vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
case REQ_TYPE_LINUX_BLOCK:
if (req->cmd[0] == REQ_LB_OP_FLUSH) {
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
@@ -151,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -180,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q)
}
if (issued)
- vblk->vq->vq_ops->kick(vblk->vq);
+ virtqueue_kick(vblk->vq);
}
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
@@ -189,12 +196,45 @@ static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
req->cmd[0] = REQ_LB_OP_FLUSH;
}
+/* return id (s/n) string for *disk to *id_str
+ */
+static int virtblk_get_id(struct gendisk *disk, char *id_str)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ struct request *req;
+ struct bio *bio;
+
+ bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
+ GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
+ if (IS_ERR(req)) {
+ bio_put(bio);
+ return PTR_ERR(req);
+ }
+
+ req->cmd_type = REQ_TYPE_SPECIAL;
+ return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
+ if (cmd == 'VBID') {
+ void __user *usr_data = (void __user *)data;
+ char id_str[VIRTIO_BLK_ID_BYTES];
+ int err;
+
+ err = virtblk_get_id(disk, id_str);
+ if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+ return err;
+ }
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d9bf87ca9e83..6f907ebed2d5 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -65,7 +65,6 @@ MODULE_LICENSE("GPL");
typedef struct bluecard_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -869,9 +868,6 @@ static int bluecard_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = bluecard_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -908,9 +904,9 @@ static int bluecard_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, bluecard_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -919,9 +915,6 @@ static int bluecard_config(struct pcmcia_device *link)
if (bluecard_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 027cb8bf650f..21e05fdc9121 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -72,7 +72,6 @@ MODULE_FIRMWARE("BT3CPCC.bin");
typedef struct bt3c_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -661,9 +660,6 @@ static int bt3c_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = bt3c_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -743,9 +739,9 @@ static int bt3c_config(struct pcmcia_device *link)
goto failed;
found_port:
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, &bt3c_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -754,9 +750,6 @@ found_port:
if (bt3c_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 204727586ee9..bed0ba630235 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -42,6 +42,8 @@ struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
+ u8 dev_type;
+
u8 tx_dnld_rdy;
u8 psmode;
@@ -88,8 +90,11 @@ struct btmrvl_private {
#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
#define BT_CMD_MODULE_CFG_REQ 0x5B
-/* Sub-commands: Module Bringup/Shutdown Request */
+/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
+#define MODULE_BROUGHT_UP 0x00
+#define MODULE_ALREADY_UP 0x0C
+
#define MODULE_SHUTDOWN_REQ 0xF2
#define BT_EVENT_POWER_STATE 0x20
@@ -123,6 +128,7 @@ struct btmrvl_event {
/* Prototype of global function */
+int btmrvl_register_hdev(struct btmrvl_private *priv);
struct btmrvl_private *btmrvl_add_card(void *card);
int btmrvl_remove_card(struct btmrvl_private *priv);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 53a43adf2e21..ee37ef0caee2 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -66,7 +66,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
{
struct btmrvl_adapter *adapter = priv->adapter;
struct btmrvl_event *event;
- u8 ret = 0;
+ int ret = 0;
event = (struct btmrvl_event *) skb->data;
if (event->ec != 0xff) {
@@ -112,8 +112,17 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
case BT_CMD_MODULE_CFG_REQ:
if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_BRINGUP_REQ) {
- BT_DBG("EVENT:%s", (event->data[2]) ?
- "Bring-up failed" : "Bring-up succeed");
+ BT_DBG("EVENT:%s",
+ ((event->data[2] == MODULE_BROUGHT_UP) ||
+ (event->data[2] == MODULE_ALREADY_UP)) ?
+ "Bring-up succeed" : "Bring-up failed");
+
+ if (event->length > 3)
+ priv->btmrvl_dev.dev_type = event->data[3];
+ else
+ priv->btmrvl_dev.dev_type = HCI_BREDR;
+
+ BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
} else if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_SHUTDOWN_REQ) {
BT_DBG("EVENT:%s", (event->data[2]) ?
@@ -522,47 +531,20 @@ static int btmrvl_service_main_thread(void *data)
return 0;
}
-struct btmrvl_private *btmrvl_add_card(void *card)
+int btmrvl_register_hdev(struct btmrvl_private *priv)
{
struct hci_dev *hdev = NULL;
- struct btmrvl_private *priv;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- BT_ERR("Can not allocate priv");
- goto err_priv;
- }
-
- priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL);
- if (!priv->adapter) {
- BT_ERR("Allocate buffer for btmrvl_adapter failed!");
- goto err_adapter;
- }
-
- btmrvl_init_adapter(priv);
-
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can not allocate HCI device");
goto err_hdev;
}
- BT_DBG("Starting kthread...");
- priv->main_thread.priv = priv;
- spin_lock_init(&priv->driver_lock);
-
- init_waitqueue_head(&priv->main_thread.wait_q);
- priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
- &priv->main_thread, "btmrvl_main_service");
-
priv->btmrvl_dev.hcidev = hdev;
- priv->btmrvl_dev.card = card;
-
hdev->driver_data = priv;
- priv->btmrvl_dev.tx_dnld_rdy = true;
-
hdev->bus = HCI_SDIO;
hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
@@ -572,6 +554,10 @@ struct btmrvl_private *btmrvl_add_card(void *card)
hdev->ioctl = btmrvl_ioctl;
hdev->owner = THIS_MODULE;
+ btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+
+ hdev->dev_type = priv->btmrvl_dev.dev_type;
+
ret = hci_register_dev(hdev);
if (ret < 0) {
BT_ERR("Can not register HCI device");
@@ -582,16 +568,52 @@ struct btmrvl_private *btmrvl_add_card(void *card)
btmrvl_debugfs_init(hdev);
#endif
- return priv;
+ return 0;
err_hci_register_dev:
- /* Stop the thread servicing the interrupts */
- kthread_stop(priv->main_thread.task);
-
hci_free_dev(hdev);
err_hdev:
+ /* Stop the thread servicing the interrupts */
+ kthread_stop(priv->main_thread.task);
+
btmrvl_free_adapter(priv);
+ kfree(priv);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(btmrvl_register_hdev);
+
+struct btmrvl_private *btmrvl_add_card(void *card)
+{
+ struct btmrvl_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ BT_ERR("Can not allocate priv");
+ goto err_priv;
+ }
+
+ priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL);
+ if (!priv->adapter) {
+ BT_ERR("Allocate buffer for btmrvl_adapter failed!");
+ goto err_adapter;
+ }
+
+ btmrvl_init_adapter(priv);
+
+ BT_DBG("Starting kthread...");
+ priv->main_thread.priv = priv;
+ spin_lock_init(&priv->driver_lock);
+
+ init_waitqueue_head(&priv->main_thread.wait_q);
+ priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
+ &priv->main_thread, "btmrvl_main_service");
+
+ priv->btmrvl_dev.card = card;
+ priv->btmrvl_dev.tx_dnld_rdy = true;
+
+ return priv;
err_adapter:
kfree(priv);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0dba76aa2232..df0773ebd9e4 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -931,7 +931,12 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw;
- btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+ if (btmrvl_register_hdev(priv)) {
+ BT_ERR("Register hdev failed!");
+ ret = -ENODEV;
+ goto disable_host_int;
+ }
+
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 60c0953d7d00..4ed7288f99db 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -67,7 +67,6 @@ MODULE_LICENSE("GPL");
typedef struct btuart_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -590,9 +589,6 @@ static int btuart_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = btuart_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -672,9 +668,9 @@ static int btuart_config(struct pcmcia_device *link)
goto failed;
found_port:
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, btuart_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -683,9 +679,6 @@ found_port:
if (btuart_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 17788317c51a..ef044d55cb25 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -67,7 +67,6 @@ MODULE_LICENSE("GPL");
typedef struct dtl1_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -575,9 +574,6 @@ static int dtl1_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = dtl1_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -621,9 +617,9 @@ static int dtl1_config(struct pcmcia_device *link)
if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, dtl1_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -632,9 +628,6 @@ static int dtl1_config(struct pcmcia_device *link)
if (dtl1_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index c0ce8134814e..3f038f5308a4 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -246,7 +246,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
BT_ERR("Can't allocate mem for new packet");
h4->rx_state = H4_W4_PACKET_TYPE;
h4->rx_count = 0;
- return 0;
+ return -ENOMEM;
}
h4->rx_skb->dev = (void *) hu->hdev;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 5c65014635be..fb8445c7365e 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -402,7 +402,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_EVENT_HDR:
- eh = (struct hci_event_hdr *) ll->rx_skb->data;
+ eh = hci_event_hdr(ll->rx_skb);
BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
@@ -410,7 +410,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_ACL_HDR:
- ah = (struct hci_acl_hdr *) ll->rx_skb->data;
+ ah = hci_acl_hdr(ll->rx_skb);
dlen = __le16_to_cpu(ah->dlen);
BT_DBG("ACL header: dlen %d", dlen);
@@ -419,7 +419,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_SCO_HDR:
- sh = (struct hci_sco_hdr *) ll->rx_skb->data;
+ sh = hci_sco_hdr(ll->rx_skb);
BT_DBG("SCO header: dlen %d", sh->dlen);
@@ -491,7 +491,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_ERR("Can't allocate mem for new packet");
ll->rx_state = HCILL_W4_PACKET_TYPE;
ll->rx_count = 0;
- return 0;
+ return -ENOMEM;
}
ll->rx_skb->dev = (void *) hu->hdev;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index bb0aefdb4267..3aa7b2a54b6f 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -157,7 +157,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
break;
case HCI_SCODATA_PKT:
- data->hdev->stat.cmd_tx++;
+ data->hdev->stat.sco_tx++;
break;
};
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 870f12cfed93..120490949997 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -178,86 +178,6 @@ struct agp_bridge_data {
#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
-/* Intel registers */
-#define INTEL_APSIZE 0xb4
-#define INTEL_ATTBASE 0xb8
-#define INTEL_AGPCTRL 0xb0
-#define INTEL_NBXCFG 0x50
-#define INTEL_ERRSTS 0x91
-
-/* Intel i830 registers */
-#define I830_GMCH_CTRL 0x52
-#define I830_GMCH_ENABLED 0x4
-#define I830_GMCH_MEM_MASK 0x1
-#define I830_GMCH_MEM_64M 0x1
-#define I830_GMCH_MEM_128M 0
-#define I830_GMCH_GMS_MASK 0x70
-#define I830_GMCH_GMS_DISABLED 0x00
-#define I830_GMCH_GMS_LOCAL 0x10
-#define I830_GMCH_GMS_STOLEN_512 0x20
-#define I830_GMCH_GMS_STOLEN_1024 0x30
-#define I830_GMCH_GMS_STOLEN_8192 0x40
-#define I830_RDRAM_CHANNEL_TYPE 0x03010
-#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
-#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
-
-/* This one is for I830MP w. an external graphic card */
-#define INTEL_I830_ERRSTS 0x92
-
-/* Intel 855GM/852GM registers */
-#define I855_GMCH_GMS_MASK 0xF0
-#define I855_GMCH_GMS_STOLEN_0M 0x0
-#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-#define I85X_CAPID 0x44
-#define I85X_VARIANT_MASK 0x7
-#define I85X_VARIANT_SHIFT 5
-#define I855_GME 0x0
-#define I855_GM 0x4
-#define I852_GME 0x2
-#define I852_GM 0x5
-
-/* Intel i845 registers */
-#define INTEL_I845_AGPM 0x51
-#define INTEL_I845_ERRSTS 0xc8
-
-/* Intel i860 registers */
-#define INTEL_I860_MCHCFG 0x50
-#define INTEL_I860_ERRSTS 0xc8
-
-/* Intel i810 registers */
-#define I810_GMADDR 0x10
-#define I810_MMADDR 0x14
-#define I810_PTE_BASE 0x10000
-#define I810_PTE_MAIN_UNCACHED 0x00000000
-#define I810_PTE_LOCAL 0x00000002
-#define I810_PTE_VALID 0x00000001
-#define I830_PTE_SYSTEM_CACHED 0x00000006
-#define I810_SMRAM_MISCC 0x70
-#define I810_GFX_MEM_WIN_SIZE 0x00010000
-#define I810_GFX_MEM_WIN_32M 0x00010000
-#define I810_GMS 0x000000c0
-#define I810_GMS_DISABLE 0x00000000
-#define I810_PGETBL_CTL 0x2020
-#define I810_PGETBL_ENABLED 0x00000001
-#define I965_PGETBL_SIZE_MASK 0x0000000e
-#define I965_PGETBL_SIZE_512KB (0 << 1)
-#define I965_PGETBL_SIZE_256KB (1 << 1)
-#define I965_PGETBL_SIZE_128KB (2 << 1)
-#define I965_PGETBL_SIZE_1MB (3 << 1)
-#define I965_PGETBL_SIZE_2MB (4 << 1)
-#define I965_PGETBL_SIZE_1_5MB (5 << 1)
-#define G33_PGETBL_SIZE_MASK (3 << 8)
-#define G33_PGETBL_SIZE_1M (1 << 8)
-#define G33_PGETBL_SIZE_2M (2 << 8)
-
-#define I810_DRAM_CTL 0x3000
-#define I810_DRAM_ROW_0 0x00000001
-#define I810_DRAM_ROW_0_SDRAM 0x00000001
-
struct agp_device_ids {
unsigned short device_id; /* first, to make table easier to read */
enum chipset_type chipset;
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index d2ce68f27e4b..fd793519ea2b 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = {
.aperture_sizes = ali_generic_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = ali_configure,
.fetch_size = ali_fetch_size,
.cleanup = ali_cleanup,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index a7637d72cef6..b6b1568314c8 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct amd_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
return 0;
}
@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = {
.aperture_sizes = amd_irongate_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = amd_irongate_configure,
.fetch_size = amd_irongate_fetch_size,
.cleanup = amd_irongate_cleanup,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index fd50ead59c79..73703b115cd2 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
.aperture_sizes = amd_8151_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = amd_8151_configure,
.fetch_size = amd64_fetch_size,
.cleanup = amd64_cleanup,
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3b2ecbe86ebe..dc30e2243494 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct ati_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
return 0;
}
@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = {
.aperture_sizes = ati_generic_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = ati_configure,
.fetch_size = ati_fetch_size,
.cleanup = ati_cleanup,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 793f39ea9618..aa109cbe0e6e 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -28,6 +28,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include "agp.h"
+#include "intel-agp.h"
/*
* The real differences to the generic AGP code is
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index fb86708e47ed..4b51982fd23a 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1214,7 +1214,7 @@ struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
return NULL;
for (i = 0; i < page_count; i++)
- new->pages[i] = 0;
+ new->pages[i] = NULL;
new->page_count = 0;
new->type = type;
new->num_scratch_pages = pages;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index aa4248efc5d8..d836a71bf06d 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,1531 +11,13 @@
#include <linux/agp_backend.h>
#include <asm/smp.h>
#include "agp.h"
+#include "intel-agp.h"
+
+#include "intel-gtt.c"
int intel_agp_enabled;
EXPORT_SYMBOL(intel_agp_enabled);
-/*
- * If we have Intel graphics, we're not going to have anything other than
- * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
- * on the Intel IOMMU support (CONFIG_DMAR).
- * Only newer chipsets need to bother with this, of course.
- */
-#ifdef CONFIG_DMAR
-#define USE_PCI_DMA_API 1
-#endif
-
-#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
-#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
-#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
-#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
-#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
-#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
-#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
-#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
-#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
-#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
-#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
-#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
-#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
-#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
-#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
-#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
-#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
-#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
-#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
-#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
-#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
-#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
-#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
-#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
-#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
-#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
-#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
-#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
-#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
-#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
-#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
-#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
-
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
- IS_SNB)
-
-extern int agp_memory_reserved;
-
-
-/* Intel 815 register */
-#define INTEL_815_APCONT 0x51
-#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
-
-/* Intel i820 registers */
-#define INTEL_I820_RDCR 0x51
-#define INTEL_I820_ERRSTS 0xc8
-
-/* Intel i840 registers */
-#define INTEL_I840_MCHCFG 0x50
-#define INTEL_I840_ERRSTS 0xc8
-
-/* Intel i850 registers */
-#define INTEL_I850_MCHCFG 0x50
-#define INTEL_I850_ERRSTS 0xc8
-
-/* intel 915G registers */
-#define I915_GMADDR 0x18
-#define I915_MMADDR 0x10
-#define I915_PTEADDR 0x1C
-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-
-#define I915_IFPADDR 0x60
-
-/* Intel 965G registers */
-#define I965_MSAC 0x62
-#define I965_IFPADDR 0x70
-
-/* Intel 7505 registers */
-#define INTEL_I7505_APSIZE 0x74
-#define INTEL_I7505_NCAPID 0x60
-#define INTEL_I7505_NISTAT 0x6c
-#define INTEL_I7505_ATTBASE 0x78
-#define INTEL_I7505_ERRSTS 0x42
-#define INTEL_I7505_AGPCTRL 0x70
-#define INTEL_I7505_MCHCFG 0x50
-
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
-#define SNB_GTT_SIZE_0M (0 << 8)
-#define SNB_GTT_SIZE_1M (1 << 8)
-#define SNB_GTT_SIZE_2M (2 << 8)
-#define SNB_GTT_SIZE_MASK (3 << 8)
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
- {64, 16384, 4},
- /* The 32M mode still requires a 64k gatt */
- {32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY 1
-#define AGP_PHYS_MEMORY 2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
- .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-static struct _intel_private {
- struct pci_dev *pcidev; /* device one */
- u8 __iomem *registers;
- u32 __iomem *gtt; /* I915G */
- int num_dcache_entries;
- /* gtt_entries is the number of gtt entries that are already mapped
- * to stolen memory. Stolen memory is larger than the memory mapped
- * through gtt_entries, as it includes some reserved space for the BIOS
- * popup and for the GTT.
- */
- int gtt_entries; /* i830+ */
- int gtt_total_size;
- union {
- void __iomem *i9xx_flush_page;
- void *i8xx_flush_page;
- };
- struct page *i8xx_page;
- struct resource ifp_resource;
- int resource_valid;
-} intel_private;
-
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
- *ret = pci_map_page(intel_private.pcidev, page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(intel_private.pcidev, *ret))
- return -EINVAL;
- return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
- pci_unmap_page(intel_private.pcidev, dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
-
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
- struct sg_table st;
-
- st.sgl = mem->sg_list;
- st.orig_nents = st.nents = mem->page_count;
-
- sg_free_table(&st);
-
- mem->sg_list = NULL;
- mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
-{
- struct sg_table st;
- struct scatterlist *sg;
- int i;
-
- DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
-
- if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
- return -ENOMEM;
-
- mem->sg_list = sg = st.sgl;
-
- for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
- sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
-
- mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(!mem->num_sg)) {
- intel_agp_free_sglist(mem);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void intel_agp_unmap_memory(struct agp_memory *mem)
-{
- DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
-
- pci_unmap_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- intel_agp_free_sglist(mem);
-}
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- struct scatterlist *sg;
- int i, j;
-
- j = pg_start;
-
- WARN_ON(!mem->num_sg);
-
- if (mem->num_sg == mem->page_count) {
- for_each_sg(mem->sg_list, sg, mem->page_count, i) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg), mask_type),
- intel_private.gtt+j);
- j++;
- }
- } else {
- /* sg may merge pages, but we have to separate
- * per-page addr for GTT */
- unsigned int len, m;
-
- for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
- len = sg_dma_len(sg) / PAGE_SIZE;
- for (m = 0; m < len; m++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg) + m * PAGE_SIZE,
- mask_type),
- intel_private.gtt+j);
- j++;
- }
- }
- }
- readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- int i, j;
- u32 cache_bits = 0;
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
- {
- cache_bits = I830_PTE_SYSTEM_CACHED;
- }
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.gtt+j);
- }
-
- readl(intel_private.gtt+j-1);
-}
-
-#endif
-
-static int intel_i810_fetch_size(void)
-{
- u32 smram_miscc;
- struct aper_size_info_fixed *values;
-
- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
- return 0;
- }
- if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
- agp_bridge->previous_size =
- agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- } else {
- agp_bridge->previous_size =
- agp_bridge->current_size = (void *) (values);
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- return 0;
-}
-
-static int intel_i810_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- if (!intel_private.registers) {
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- dev_err(&intel_private.pcidev->dev,
- "can't remap memory\n");
- return -ENOMEM;
- }
- }
-
- if ((readl(intel_private.registers+I810_DRAM_CTL)
- & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
- /* This will need to be dynamically assigned */
- dev_info(&intel_private.pcidev->dev,
- "detected 4MB dedicated video ram\n");
- intel_private.num_dcache_entries = 1024;
- }
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = 0; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
- }
- global_cache_flush();
- return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
- writel(0, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers); /* PCI Posting. */
- iounmap(intel_private.registers);
-}
-
-static void intel_i810_tlbflush(struct agp_memory *mem)
-{
- return;
-}
-
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
-{
- return;
-}
-
-/* Exists to support ARGB cursors */
-static struct page *i8xx_alloc_pages(void)
-{
- struct page *page;
-
- page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
- if (page == NULL)
- return NULL;
-
- if (set_pages_uc(page, 4) < 0) {
- set_pages_wb(page, 4);
- __free_pages(page, 2);
- return NULL;
- }
- get_page(page);
- atomic_inc(&agp_bridge->current_memory_agp);
- return page;
-}
-
-static void i8xx_destroy_pages(struct page *page)
-{
- if (page == NULL)
- return;
-
- set_pages_wb(page, 4);
- put_page(page);
- __free_pages(page, 2);
- atomic_dec(&agp_bridge->current_memory_agp);
-}
-
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- if (type < AGP_USER_TYPES)
- return type;
- else if (type == AGP_USER_CACHED_MEMORY)
- return INTEL_AGP_CACHED_MEMORY;
- else
- return 0;
-}
-
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i, j, num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
-
- for (j = pg_start; j < (pg_start + mem->page_count); j++) {
- if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
- ret = -EBUSY;
- goto out_err;
- }
- }
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- switch (mask_type) {
- case AGP_DCACHE_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = pg_start; i < (pg_start + mem->page_count); i++) {
- writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
- intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
- break;
- case AGP_PHYS_MEMORY:
- case AGP_NORMAL_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- break;
- default:
- goto out_err;
- }
-
- agp_bridge->driver->tlb_flush(mem);
-out:
- ret = 0;
-out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-/*
- * The i810/i830 requires a physical address to program its mouse
- * pointer into hardware.
- * However the Xserver still writes to it through the agp aperture.
- */
-static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
-{
- struct agp_memory *new;
- struct page *page;
-
- switch (pg_count) {
- case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
- break;
- case 4:
- /* kludge to get 4 physical pages for ARGB cursor */
- page = i8xx_alloc_pages();
- break;
- default:
- return NULL;
- }
-
- if (page == NULL)
- return NULL;
-
- new = agp_create_memory(pg_count);
- if (new == NULL)
- return NULL;
-
- new->pages[0] = page;
- if (pg_count == 4) {
- /* kludge to get 4 physical pages for ARGB cursor */
- new->pages[1] = new->pages[0] + 1;
- new->pages[2] = new->pages[1] + 1;
- new->pages[3] = new->pages[2] + 1;
- }
- new->page_count = pg_count;
- new->num_scratch_pages = pg_count;
- new->type = AGP_PHYS_MEMORY;
- new->physical = page_to_phys(new->pages[0]);
- return new;
-}
-
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
- struct agp_memory *new;
-
- if (type == AGP_DCACHE_MEMORY) {
- if (pg_count != intel_private.num_dcache_entries)
- return NULL;
-
- new = agp_create_memory(1);
- if (new == NULL)
- return NULL;
-
- new->type = AGP_DCACHE_MEMORY;
- new->page_count = pg_count;
- new->num_scratch_pages = 0;
- agp_free_page_array(new);
- return new;
- }
- if (type == AGP_PHYS_MEMORY)
- return alloc_agpphysmem_i8xx(pg_count, type);
- return NULL;
-}
-
-static void intel_i810_free_by_type(struct agp_memory *curr)
-{
- agp_free_key(curr->key);
- if (curr->type == AGP_PHYS_MEMORY) {
- if (curr->page_count == 4)
- i8xx_destroy_pages(curr->pages[0]);
- else {
- agp_bridge->driver->agp_destroy_page(curr->pages[0],
- AGP_PAGE_DESTROY_UNMAP);
- agp_bridge->driver->agp_destroy_page(curr->pages[0],
- AGP_PAGE_DESTROY_FREE);
- }
- agp_free_page_array(curr);
- }
- kfree(curr);
-}
-
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static struct aper_size_info_fixed intel_i830_sizes[] =
-{
- {128, 32768, 5},
- /* The 64M mode still requires a 128k gatt */
- {64, 16384, 5},
- {256, 65536, 6},
- {512, 131072, 7},
-};
-
-static void intel_i830_init_gtt_entries(void)
-{
- u16 gmch_ctrl;
- int gtt_entries = 0;
- u8 rdct;
- int local = 0;
- static const int ddt[4] = { 0, 16, 32, 64 };
- int size; /* reserved space (in kb) at the top of stolen memory */
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if (IS_I965) {
- u32 pgetbl_ctl;
- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-
- /* The 965 has a field telling us the size of the GTT,
- * which may be larger than what is necessary to map the
- * aperture.
- */
- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
- case I965_PGETBL_SIZE_128KB:
- size = 128;
- break;
- case I965_PGETBL_SIZE_256KB:
- size = 256;
- break;
- case I965_PGETBL_SIZE_512KB:
- size = 512;
- break;
- case I965_PGETBL_SIZE_1MB:
- size = 1024;
- break;
- case I965_PGETBL_SIZE_2MB:
- size = 2048;
- break;
- case I965_PGETBL_SIZE_1_5MB:
- size = 1024 + 512;
- break;
- default:
- dev_info(&intel_private.pcidev->dev,
- "unknown page table size, assuming 512KB\n");
- size = 512;
- }
- size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_PINEVIEW) {
- /* G33's GTT size defined in gmch_ctrl */
- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
- case G33_PGETBL_SIZE_1M:
- size = 1024;
- break;
- case G33_PGETBL_SIZE_2M:
- size = 2048;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
- size = 512;
- }
- size += 4;
- } else if (IS_G4X || IS_PINEVIEW) {
- /* On 4 series hardware, GTT stolen is separate from graphics
- * stolen, ignore it in stolen gtt entries counting. However,
- * 4KB of the stolen memory doesn't get mapped to the GTT.
- */
- size = 4;
- } else {
- /* On previous hardware, the GTT size was just what was
- * required to map the aperture.
- */
- size = agp_bridge->driver->fetch_size() + 4;
- }
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(size);
- break;
- case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(size);
- break;
- case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(size);
- break;
- case I830_GMCH_GMS_LOCAL:
- rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
- gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
- MB(ddt[I830_RDRAM_DDT(rdct)]);
- local = 1;
- break;
- default:
- gtt_entries = 0;
- break;
- }
- } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
- /*
- * SandyBridge has new memory control reg at 0x50.w
- */
- u16 snb_gmch_ctl;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- case SNB_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- gtt_entries = MB(64) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- gtt_entries = MB(96) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- gtt_entries = MB(128) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- gtt_entries = MB(160) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- gtt_entries = MB(192) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- gtt_entries = MB(224) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- gtt_entries = MB(256) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- gtt_entries = MB(288) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- gtt_entries = MB(320) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- gtt_entries = MB(352) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- gtt_entries = MB(384) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- gtt_entries = MB(416) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- gtt_entries = MB(448) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- gtt_entries = MB(480) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- gtt_entries = MB(512) - KB(size);
- break;
- }
- } else {
- switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
- case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
- break;
- case I915_GMCH_GMS_STOLEN_48M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(48) - KB(size);
- else
- gtt_entries = 0;
- break;
- case I915_GMCH_GMS_STOLEN_64M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(64) - KB(size);
- else
- gtt_entries = 0;
- break;
- case G33_GMCH_GMS_STOLEN_128M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(128) - KB(size);
- else
- gtt_entries = 0;
- break;
- case G33_GMCH_GMS_STOLEN_256M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(256) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(96) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(160) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(224) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(352) - KB(size);
- else
- gtt_entries = 0;
- break;
- default:
- gtt_entries = 0;
- break;
- }
- }
- if (gtt_entries > 0) {
- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
- gtt_entries / KB(1), local ? "local" : "stolen");
- gtt_entries /= KB(4);
- } else {
- dev_info(&agp_bridge->dev->dev,
- "no pre-allocated video memory detected\n");
- gtt_entries = 0;
- }
-
- intel_private.gtt_entries = gtt_entries;
-}
-
-static void intel_i830_fini_flush(void)
-{
- kunmap(intel_private.i8xx_page);
- intel_private.i8xx_flush_page = NULL;
- unmap_page_from_agp(intel_private.i8xx_page);
-
- __free_page(intel_private.i8xx_page);
- intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
- /* return if we've already set the flush mechanism up */
- if (intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
- if (!intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- if (!intel_private.i8xx_flush_page)
- intel_i830_fini_flush();
-}
-
-/* The chipset_flush interface needs to get data that has already been
- * flushed out of the CPU all the way out to main memory, because the GPU
- * doesn't snoop those buffers.
- *
- * The 8xx series doesn't have the same lovely interface for flushing the
- * chipset write buffers that the later chips do. According to the 865
- * specs, it's 64 octwords, or 1KB. So, to get those previous things in
- * that buffer out, we just fill 1KB and clflush it out, on the assumption
- * that it'll push whatever was in there out. It appears to work.
- */
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
-{
- unsigned int *pg = intel_private.i8xx_flush_page;
-
- memset(pg, 0, 1024);
-
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
-}
-
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers)
- return -ENOMEM;
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ?? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
-{
- return 0;
-}
-
-static int intel_i830_fetch_size(void)
-{
- u16 gmch_ctrl;
- struct aper_size_info_fixed *values;
-
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
- /* 855GM/852GM/865G has 128MB aperture size */
- agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- } else {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- }
-
- return 0;
-}
-
-static int intel_i830_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
- }
-
- global_cache_flush();
-
- intel_i830_setup_flush();
- return 0;
-}
-
-static void intel_i830_cleanup(void)
-{
- iounmap(intel_private.registers);
-}
-
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i, j, num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
- /* The i830 can't check the GTT for entries since its read only,
- * depend on the caller to make the correct offset decisions.
- */
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
- goto out_err;
-
- if (!mem->is_flushed)
- global_cache_flush();
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- agp_bridge->driver->tlb_flush(mem);
-
-out:
- ret = 0;
-out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
-{
- if (type == AGP_PHYS_MEMORY)
- return alloc_agpphysmem_i8xx(pg_count, type);
- /* always return NULL for other allocation types for now */
- return NULL;
-}
-
-static int intel_alloc_chipset_flush_resource(void)
-{
- int ret;
- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
- PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
- pcibios_align_resource, agp_bridge->dev);
-
- return ret;
-}
-
-static void intel_i915_setup_chipset_flush(void)
-{
- int ret;
- u32 temp;
-
- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
- if (!(temp & 0x1)) {
- intel_alloc_chipset_flush_resource();
- intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
- } else {
- temp &= ~1;
-
- intel_private.resource_valid = 1;
- intel_private.ifp_resource.start = temp;
- intel_private.ifp_resource.end = temp + PAGE_SIZE;
- ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
- /* some BIOSes reserve this area in a pnp some don't */
- if (ret)
- intel_private.resource_valid = 0;
- }
-}
-
-static void intel_i965_g33_setup_chipset_flush(void)
-{
- u32 temp_hi, temp_lo;
- int ret;
-
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
-
- if (!(temp_lo & 0x1)) {
-
- intel_alloc_chipset_flush_resource();
-
- intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
- upper_32_bits(intel_private.ifp_resource.start));
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
- } else {
- u64 l64;
-
- temp_lo &= ~0x1;
- l64 = ((u64)temp_hi << 32) | temp_lo;
-
- intel_private.resource_valid = 1;
- intel_private.ifp_resource.start = l64;
- intel_private.ifp_resource.end = l64 + PAGE_SIZE;
- ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
- /* some BIOSes reserve this area in a pnp some don't */
- if (ret)
- intel_private.resource_valid = 0;
- }
-}
-
-static void intel_i9xx_setup_flush(void)
-{
- /* return if already configured */
- if (intel_private.ifp_resource.start)
- return;
-
- if (IS_SNB)
- return;
-
- /* setup a resource for this object */
- intel_private.ifp_resource.name = "Intel Flush Page";
- intel_private.ifp_resource.flags = IORESOURCE_MEM;
-
- /* Setup chipset flush for 915 */
- if (IS_I965 || IS_G33 || IS_G4X) {
- intel_i965_g33_setup_chipset_flush();
- } else {
- intel_i915_setup_chipset_flush();
- }
-
- if (intel_private.ifp_resource.start) {
- intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
- if (!intel_private.i9xx_flush_page)
- dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
- }
-}
-
-static int intel_i915_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
-
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
- }
- readl(intel_private.gtt+i-1); /* PCI Posting. */
- }
-
- global_cache_flush();
-
- intel_i9xx_setup_flush();
-
- return 0;
-}
-
-static void intel_i915_cleanup(void)
-{
- if (intel_private.i9xx_flush_page)
- iounmap(intel_private.i9xx_flush_page);
- if (intel_private.resource_valid)
- release_resource(&intel_private.ifp_resource);
- intel_private.ifp_resource.start = 0;
- intel_private.resource_valid = 0;
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
-}
-
-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
-{
- if (intel_private.i9xx_flush_page)
- writel(1, intel_private.i9xx_flush_page);
-}
-
-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
- /* The i915 can't check the GTT for entries since it's read only;
- * depend on the caller to make the correct offset decisions.
- */
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
- goto out_err;
-
- if (!mem->is_flushed)
- global_cache_flush();
-
- intel_agp_insert_sg_entries(mem, pg_start, mask_type);
- agp_bridge->driver->tlb_flush(mem);
-
- out:
- ret = 0;
- out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++)
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
-
- readl(intel_private.gtt+i-1);
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-/* Return the aperture size by just checking the resource length. The effect
- * described in the spec of the MSAC registers is just changing of the
- * resource size.
- */
-static int intel_i9xx_fetch_size(void)
-{
- int num_sizes = ARRAY_SIZE(intel_i830_sizes);
- int aper_size; /* size in megabytes */
- int i;
-
- aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
-
- for (i = 0; i < num_sizes; i++) {
- if (aper_size == intel_i830_sizes[i].size) {
- agp_bridge->current_size = intel_i830_sizes + i;
- agp_bridge->previous_size = agp_bridge->current_size;
- return aper_size;
- }
- }
-
- return 0;
-}
-
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp, temp2;
- int gtt_map_size = 256 * 1024;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
-
- if (IS_G33)
- gtt_map_size = 1024 * 1024; /* 1M on G33 */
- intel_private.gtt = ioremap(temp2, gtt_map_size);
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_map_size / 4;
-
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-/*
- * The i965 supports 36-bit physical addresses, but to keep
- * the format of the GTT the same, the bits that don't fit
- * in a 32-bit word are shifted down to bits 4..7.
- *
- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
- * is always zero on 32-bit architectures, so no need to make
- * this conditional.
- */
-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Shift high bits down */
- addr |= (addr >> 28) & 0xf0;
-
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
-{
- u16 snb_gmch_ctl;
-
- switch (agp_bridge->dev->device) {
- case PCI_DEVICE_ID_INTEL_GM45_HB:
- case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
- case PCI_DEVICE_ID_INTEL_Q45_HB:
- case PCI_DEVICE_ID_INTEL_G45_HB:
- case PCI_DEVICE_ID_INTEL_G41_HB:
- case PCI_DEVICE_ID_INTEL_B43_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
- *gtt_offset = *gtt_size = MB(2);
- break;
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
- *gtt_offset = MB(2);
-
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- *gtt_size = MB(0);
- break;
- case SNB_GTT_SIZE_1M:
- *gtt_size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- *gtt_size = MB(2);
- break;
- }
- break;
- default:
- *gtt_offset = *gtt_size = KB(512);
- }
-}
-
-/* The intel i965 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
- int gtt_offset, gtt_size;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-
- temp &= 0xfff00000;
-
- intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
-
- intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
-
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_size / 4;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-
static int intel_fetch_size(void)
{
int i;
@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = {
.aperture_sizes = intel_generic_sizes,
.size_type = U16_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_configure,
.fetch_size = intel_fetch_size,
.cleanup = intel_cleanup,
@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_810_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i810_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 2,
- .needs_scratch_page = true,
- .configure = intel_i810_configure,
- .fetch_size = intel_i810_fetch_size,
- .cleanup = intel_i810_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = agp_generic_create_gatt_table,
- .free_gatt_table = agp_generic_free_gatt_table,
- .insert_memory = intel_i810_insert_entries,
- .remove_memory = intel_i810_remove_entries,
- .alloc_by_type = intel_i810_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
-};
-
static const struct agp_bridge_driver intel_815_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_815_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 2,
+ .needs_scratch_page = true,
.configure = intel_815_configure,
.fetch_size = intel_815_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_830_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i830_configure,
- .fetch_size = intel_i830_fetch_size,
- .cleanup = intel_i830_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i830_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i830_insert_entries,
- .remove_memory = intel_i830_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i830_chipset_flush,
-};
-
static const struct agp_bridge_driver intel_820_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_820_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_820_cleanup,
@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = {
.aperture_sizes = intel_830mp_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 4,
+ .needs_scratch_page = true,
.configure = intel_830mp_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_840_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_845_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_850_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_860_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_915_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
-static const struct agp_bridge_driver intel_i965_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i965_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
static const struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_7505_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_g33_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
static int find_gmch(u16 device)
{
struct pci_dev *gmch_device;
@@ -2392,103 +726,137 @@ static int find_gmch(u16 device)
static const struct intel_driver_description {
unsigned int chip_id;
unsigned int gmch_chip_id;
- unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */
char *name;
const struct agp_bridge_driver *driver;
const struct agp_bridge_driver *gmch_driver;
} intel_agp_chipsets[] = {
- { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815",
+ { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
&intel_815_driver, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M",
+ { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
&intel_830mp_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M",
+ { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854",
+ { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM",
+ { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865",
+ { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)",
+ { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
+ { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
+ { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
+ { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM",
+ { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
+ { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
+ { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35",
+ { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
+ { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
+ { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM",
+ { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
+ { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33",
+ { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35",
+ { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
+ { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
"GM45", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
"Eaglelake", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
"Q45/Q43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
"G45/G43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
+ { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
"B43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
+ { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
"G41", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
"Sandybridge", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
"Sandybridge", NULL, &intel_i965_driver },
- { 0, 0, 0, NULL, NULL, NULL }
+ { 0, 0, NULL, NULL, NULL }
};
+static int __devinit intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge)
+{
+ int i;
+ bridge->driver = NULL;
+
+ for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+ if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
+ find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
+ bridge->driver =
+ intel_agp_chipsets[i].gmch_driver;
+ break;
+ }
+ }
+
+ if (!bridge->driver)
+ return 0;
+
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = pdev;
+
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+
+ if (bridge->driver->mask_memory == intel_i965_mask_memory) {
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask 36bit failed!\n");
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(36));
+ }
+
+ return 1;
+}
+
static int __devinit agp_intel_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (!bridge)
return -ENOMEM;
+ bridge->capndx = cap_ptr;
+
+ if (intel_gmch_probe(pdev, bridge))
+ goto found_gmch;
+
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
/* In case that multiple models of gfx chip may
stand on same host bridge type, this can be
sure we detect the right IGD. */
if (pdev->device == intel_agp_chipsets[i].chip_id) {
- if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
- find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
- bridge->driver =
- intel_agp_chipsets[i].gmch_driver;
- break;
- } else if (intel_agp_chipsets[i].multi_gmch_chip) {
- continue;
- } else {
- bridge->driver = intel_agp_chipsets[i].driver;
- break;
- }
+ bridge->driver = intel_agp_chipsets[i].driver;
+ break;
}
}
@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
return -ENODEV;
}
- if (bridge->driver == NULL) {
- /* bridge has no AGP and no IGD detected */
+ if (!bridge->driver) {
if (cap_ptr)
dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
- intel_agp_chipsets[i].gmch_chip_id);
+ intel_agp_chipsets[i].gmch_chip_id);
agp_put_bridge(bridge);
return -ENODEV;
}
bridge->dev = pdev;
- bridge->capndx = cap_ptr;
- bridge->dev_private_data = &intel_private;
+ bridge->dev_private_data = NULL;
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
&bridge->mode);
}
- if (bridge->driver->mask_memory == intel_i965_mask_memory) {
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask 36bit failed!\n");
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(36));
- }
-
+found_gmch:
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
if (!err)
@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
int ret_val;
- if (bridge->driver == &intel_generic_driver)
- intel_configure();
- else if (bridge->driver == &intel_850_driver)
- intel_850_configure();
- else if (bridge->driver == &intel_845_driver)
- intel_845_configure();
- else if (bridge->driver == &intel_830mp_driver)
- intel_830mp_configure();
- else if (bridge->driver == &intel_915_driver)
- intel_i915_configure();
- else if (bridge->driver == &intel_830_driver)
- intel_i830_configure();
- else if (bridge->driver == &intel_810_driver)
- intel_i810_configure();
- else if (bridge->driver == &intel_i965_driver)
- intel_i915_configure();
+ bridge->driver->configure();
ret_val = agp_rebind_memory();
if (ret_val != 0)
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 000000000000..2547465d4658
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,239 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+
+/* Intel registers */
+#define INTEL_APSIZE 0xb4
+#define INTEL_ATTBASE 0xb8
+#define INTEL_AGPCTRL 0xb0
+#define INTEL_NBXCFG 0x50
+#define INTEL_ERRSTS 0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_ENABLED 0x4
+#define I830_GMCH_MEM_MASK 0x1
+#define I830_GMCH_MEM_64M 0x1
+#define I830_GMCH_MEM_128M 0
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+#define I830_RDRAM_CHANNEL_TYPE 0x03010
+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS 0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_MASK 0xF0
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I85X_CAPID 0x44
+#define I85X_VARIANT_MASK 0x7
+#define I85X_VARIANT_SHIFT 5
+#define I855_GME 0x0
+#define I855_GM 0x4
+#define I852_GME 0x2
+#define I852_GM 0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM 0x51
+#define INTEL_I845_ERRSTS 0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG 0x50
+#define INTEL_I860_ERRSTS 0xc8
+
+/* Intel i810 registers */
+#define I810_GMADDR 0x10
+#define I810_MMADDR 0x14
+#define I810_PTE_BASE 0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL 0x00000002
+#define I810_PTE_VALID 0x00000001
+#define I830_PTE_SYSTEM_CACHED 0x00000006
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+#define I810_GMS 0x000000c0
+#define I810_GMS_DISABLE 0x00000000
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+#define I965_PGETBL_SIZE_MASK 0x0000000e
+#define I965_PGETBL_SIZE_512KB (0 << 1)
+#define I965_PGETBL_SIZE_256KB (1 << 1)
+#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB (3 << 1)
+#define I965_PGETBL_SIZE_2MB (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
+#define G33_PGETBL_SIZE_MASK (3 << 8)
+#define G33_PGETBL_SIZE_1M (1 << 8)
+#define G33_PGETBL_SIZE_2M (2 << 8)
+
+#define I810_DRAM_CTL 0x3000
+#define I810_DRAM_ROW_0 0x00000001
+#define I810_DRAM_ROW_0_SDRAM 0x00000001
+
+/* Intel 815 register */
+#define INTEL_815_APCONT 0x51
+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR 0x51
+#define INTEL_I820_ERRSTS 0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG 0x50
+#define INTEL_I840_ERRSTS 0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG 0x50
+#define INTEL_I850_ERRSTS 0xc8
+
+/* intel 915G registers */
+#define I915_GMADDR 0x18
+#define I915_MMADDR 0x10
+#define I915_PTEADDR 0x1C
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+
+#define I915_IFPADDR 0x60
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+#define I965_IFPADDR 0x70
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE 0x74
+#define INTEL_I7505_NCAPID 0x60
+#define INTEL_I7505_NISTAT 0x6c
+#define INTEL_I7505_ATTBASE 0x78
+#define INTEL_I7505_ERRSTS 0x42
+#define INTEL_I7505_AGPCTRL 0x70
+#define INTEL_I7505_MCHCFG 0x50
+
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+#define SNB_GTT_SIZE_0M (0 << 8)
+#define SNB_GTT_SIZE_1M (1 << 8)
+#define SNB_GTT_SIZE_2M (2 << 8)
+#define SNB_GTT_SIZE_MASK (3 << 8)
+
+/* pci devices ids */
+#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
+#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
+#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
+#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
+#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
+#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
+#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
+#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
+#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
+#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
+#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
+#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
+#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
+#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
+
+/* cover 915 and 945 variants */
+#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
+
+#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+
+#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+ IS_SNB)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 000000000000..e8ea6825822c
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1516 @@
+/*
+ * Intel GTT (Graphics Translation Table) routines
+ *
+ * Caveat: This driver implements the linux agp interface, but this is far from
+ * a agp driver! GTT support ended up here for purely historical reasons: The
+ * old userspace intel graphics drivers needed an interface to map memory into
+ * the GTT. And the drm provides a default interface for graphic devices sitting
+ * on an agp port. So it made sense to fake the GTT support as an agp port to
+ * avoid having to create a new api.
+ *
+ * With gem this does not make much sense anymore, just needlessly complicates
+ * the code. But as long as the old graphics stack is still support, it's stuck
+ * here.
+ *
+ * /fairy-tale-mode off
+ */
+
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_DMAR).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_DMAR
+#define USE_PCI_DMA_API 1
+#endif
+
+static const struct aper_size_info_fixed intel_i810_sizes[] =
+{
+ {64, 16384, 4},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+#define INTEL_AGP_CACHED_MEMORY 3
+
+static struct gatt_mask intel_i810_masks[] =
+{
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
+ .type = INTEL_AGP_CACHED_MEMORY}
+};
+
+static struct _intel_private {
+ struct pci_dev *pcidev; /* device one */
+ u8 __iomem *registers;
+ u32 __iomem *gtt; /* I915G */
+ int num_dcache_entries;
+ /* gtt_entries is the number of gtt entries that are already mapped
+ * to stolen memory. Stolen memory is larger than the memory mapped
+ * through gtt_entries, as it includes some reserved space for the BIOS
+ * popup and for the GTT.
+ */
+ int gtt_entries; /* i830+ */
+ int gtt_total_size;
+ union {
+ void __iomem *i9xx_flush_page;
+ void *i8xx_flush_page;
+ };
+ struct page *i8xx_page;
+ struct resource ifp_resource;
+ int resource_valid;
+} intel_private;
+
+#ifdef USE_PCI_DMA_API
+static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+{
+ *ret = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+ return -EINVAL;
+ return 0;
+}
+
+static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+{
+ pci_unmap_page(intel_private.pcidev, dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
+static void intel_agp_free_sglist(struct agp_memory *mem)
+{
+ struct sg_table st;
+
+ st.sgl = mem->sg_list;
+ st.orig_nents = st.nents = mem->page_count;
+
+ sg_free_table(&st);
+
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
+}
+
+static int intel_agp_map_memory(struct agp_memory *mem)
+{
+ struct sg_table st;
+ struct scatterlist *sg;
+ int i;
+
+ DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+
+ if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+ return -ENOMEM;
+
+ mem->sg_list = sg = st.sgl;
+
+ for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
+ sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+
+ mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(!mem->num_sg)) {
+ intel_agp_free_sglist(mem);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void intel_agp_unmap_memory(struct agp_memory *mem)
+{
+ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+ pci_unmap_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ intel_agp_free_sglist(mem);
+}
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ struct scatterlist *sg;
+ int i, j;
+
+ j = pg_start;
+
+ WARN_ON(!mem->num_sg);
+
+ if (mem->num_sg == mem->page_count) {
+ for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg), mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ } else {
+ /* sg may merge pages, but we have to separate
+ * per-page addr for GTT */
+ unsigned int len, m;
+
+ for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+ len = sg_dma_len(sg) / PAGE_SIZE;
+ for (m = 0; m < len; m++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg) + m * PAGE_SIZE,
+ mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+
+#else
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ int i, j;
+ u32 cache_bits = 0;
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ {
+ cache_bits = I830_PTE_SYSTEM_CACHED;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.gtt+j);
+ }
+
+ readl(intel_private.gtt+j-1);
+}
+
+#endif
+
+static int intel_i810_fetch_size(void)
+{
+ u32 smram_miscc;
+ struct aper_size_info_fixed *values;
+
+ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+ dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+ return 0;
+ }
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ } else {
+ agp_bridge->current_size = (void *) (values);
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ return 0;
+}
+
+static int intel_i810_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ if (!intel_private.registers) {
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ dev_err(&intel_private.pcidev->dev,
+ "can't remap memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ if ((readl(intel_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ /* This will need to be dynamically assigned */
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
+ intel_private.num_dcache_entries = 1024;
+ }
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = 0; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
+ }
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+ writel(0, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers); /* PCI Posting. */
+ iounmap(intel_private.registers);
+}
+
+static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ return;
+}
+
+/* Exists to support ARGB cursors */
+static struct page *i8xx_alloc_pages(void)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
+ if (page == NULL)
+ return NULL;
+
+ if (set_pages_uc(page, 4) < 0) {
+ set_pages_wb(page, 4);
+ __free_pages(page, 2);
+ return NULL;
+ }
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page;
+}
+
+static void i8xx_destroy_pages(struct page *page)
+{
+ if (page == NULL)
+ return;
+
+ set_pages_wb(page, 4);
+ put_page(page);
+ __free_pages(page, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ if (type < AGP_USER_TYPES)
+ return type;
+ else if (type == AGP_USER_CACHED_MEMORY)
+ return INTEL_AGP_CACHED_MEMORY;
+ else
+ return 0;
+}
+
+static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+ }
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ switch (mask_type) {
+ case AGP_DCACHE_MEMORY:
+ if (!mem->is_flushed)
+ global_cache_flush();
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
+ intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+ break;
+ case AGP_PHYS_MEMORY:
+ case AGP_NORMAL_MEMORY:
+ if (!mem->is_flushed)
+ global_cache_flush();
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.registers+I810_PTE_BASE+(j*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+ break;
+ default:
+ goto out_err;
+ }
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+ return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+ struct page *page;
+
+ switch (pg_count) {
+ case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
+ break;
+ case 4:
+ /* kludge to get 4 physical pages for ARGB cursor */
+ page = i8xx_alloc_pages();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (page == NULL)
+ return NULL;
+
+ new = agp_create_memory(pg_count);
+ if (new == NULL)
+ return NULL;
+
+ new->pages[0] = page;
+ if (pg_count == 4) {
+ /* kludge to get 4 physical pages for ARGB cursor */
+ new->pages[1] = new->pages[0] + 1;
+ new->pages[2] = new->pages[1] + 1;
+ new->pages[3] = new->pages[2] + 1;
+ }
+ new->page_count = pg_count;
+ new->num_scratch_pages = pg_count;
+ new->type = AGP_PHYS_MEMORY;
+ new->physical = page_to_phys(new->pages[0]);
+ return new;
+}
+
+static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY) {
+ if (pg_count != intel_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ agp_free_page_array(new);
+ return new;
+ }
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+ return NULL;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+ agp_free_key(curr->key);
+ if (curr->type == AGP_PHYS_MEMORY) {
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(curr->pages[0]);
+ else {
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_UNMAP);
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_FREE);
+ }
+ agp_free_page_array(curr);
+ }
+ kfree(curr);
+}
+
+static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+
+static struct aper_size_info_fixed intel_i830_sizes[] =
+{
+ {128, 32768, 5},
+ /* The 64M mode still requires a 128k gatt */
+ {64, 16384, 5},
+ {256, 65536, 6},
+ {512, 131072, 7},
+};
+
+static void intel_i830_init_gtt_entries(void)
+{
+ u16 gmch_ctrl;
+ int gtt_entries = 0;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+ int size; /* reserved space (in kb) at the top of stolen memory */
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+ if (IS_I965) {
+ u32 pgetbl_ctl;
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ /* The 965 has a field telling us the size of the GTT,
+ * which may be larger than what is necessary to map the
+ * aperture.
+ */
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = 128;
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = 256;
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = 512;
+ break;
+ case I965_PGETBL_SIZE_1MB:
+ size = 1024;
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = 2048;
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = 1024 + 512;
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = 512;
+ }
+ size += 4; /* add in BIOS popup space */
+ } else if (IS_G33 && !IS_PINEVIEW) {
+ /* G33's GTT size defined in gmch_ctrl */
+ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+ case G33_PGETBL_SIZE_1M:
+ size = 1024;
+ break;
+ case G33_PGETBL_SIZE_2M:
+ size = 2048;
+ break;
+ default:
+ dev_info(&agp_bridge->dev->dev,
+ "unknown page table size 0x%x, assuming 512KB\n",
+ (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+ size = 512;
+ }
+ size += 4;
+ } else if (IS_G4X || IS_PINEVIEW) {
+ /* On 4 series hardware, GTT stolen is separate from graphics
+ * stolen, ignore it in stolen gtt entries counting. However,
+ * 4KB of the stolen memory doesn't get mapped to the GTT.
+ */
+ size = 4;
+ } else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ size = agp_bridge->driver->fetch_size() + 4;
+ }
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ gtt_entries = KB(512) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+ gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+ /*
+ * SandyBridge has new memory control reg at 0x50.w
+ */
+ u16 snb_gmch_ctl;
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ case SNB_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ gtt_entries = MB(64) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ gtt_entries = MB(96) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ gtt_entries = MB(128) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ gtt_entries = MB(160) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ gtt_entries = MB(192) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ gtt_entries = MB(224) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ gtt_entries = MB(256) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ gtt_entries = MB(288) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ gtt_entries = MB(320) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ gtt_entries = MB(352) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ gtt_entries = MB(384) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ gtt_entries = MB(416) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ gtt_entries = MB(448) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ gtt_entries = MB(480) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ gtt_entries = MB(512) - KB(size);
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ gtt_entries = MB(4) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ gtt_entries = MB(16) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ /* Check it's really I915G */
+ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+ gtt_entries = MB(48) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ /* Check it's really I915G */
+ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+ gtt_entries = MB(64) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case G33_GMCH_GMS_STOLEN_128M:
+ if (IS_G33 || IS_I965 || IS_G4X)
+ gtt_entries = MB(128) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case G33_GMCH_GMS_STOLEN_256M:
+ if (IS_G33 || IS_I965 || IS_G4X)
+ gtt_entries = MB(256) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(96) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(160) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(224) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(352) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ }
+ if (gtt_entries > 0) {
+ dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+ gtt_entries / KB(1), local ? "local" : "stolen");
+ gtt_entries /= KB(4);
+ } else {
+ dev_info(&agp_bridge->dev->dev,
+ "no pre-allocated video memory detected\n");
+ gtt_entries = 0;
+ }
+
+ intel_private.gtt_entries = gtt_entries;
+}
+
+static void intel_i830_fini_flush(void)
+{
+ kunmap(intel_private.i8xx_page);
+ intel_private.i8xx_flush_page = NULL;
+ unmap_page_from_agp(intel_private.i8xx_page);
+
+ __free_page(intel_private.i8xx_page);
+ intel_private.i8xx_page = NULL;
+}
+
+static void intel_i830_setup_flush(void)
+{
+ /* return if we've already set the flush mechanism up */
+ if (intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ if (!intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+ if (!intel_private.i8xx_flush_page)
+ intel_i830_fini_flush();
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB. So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out. It appears to work.
+ */
+static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+{
+ unsigned int *pg = intel_private.i8xx_flush_page;
+
+ memset(pg, 0, 1024);
+
+ if (cpu_has_clflush)
+ clflush_cache_range(pg, 1024);
+ else if (wbinvd_on_all_cpus() != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+}
+
+/* The intel i830 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ?? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+/* Return the gatt table to a sane state. Use the top of stolen
+ * memory for the GTT.
+ */
+static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ return 0;
+}
+
+static int intel_i830_fetch_size(void)
+{
+ u16 gmch_ctrl;
+ struct aper_size_info_fixed *values;
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+ /* 855GM/852GM/865G has 128MB aperture size */
+ agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+ agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ } else {
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ }
+
+ return 0;
+}
+
+static int intel_i830_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+
+ intel_i830_setup_flush();
+ return 0;
+}
+
+static void intel_i830_cleanup(void)
+{
+ iounmap(intel_private.registers);
+}
+
+static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
+
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
+ goto out_err;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+ /* The i830 can't check the GTT for entries since its read only,
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ mask_type != INTEL_AGP_CACHED_MEMORY)
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.registers+I810_PTE_BASE+(j*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+ return 0;
+}
+
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+{
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+ /* always return NULL for other allocation types for now */
+ return NULL;
+}
+
+static int intel_alloc_chipset_flush_resource(void)
+{
+ int ret;
+ ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+ pcibios_align_resource, agp_bridge->dev);
+
+ return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+ int ret;
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource();
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = temp;
+ intel_private.ifp_resource.end = temp + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+ u32 temp_hi, temp_lo;
+ int ret;
+
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource();
+
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+ upper_32_bits(intel_private.ifp_resource.start));
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+ temp_lo &= ~0x1;
+ l64 = ((u64)temp_hi << 32) | temp_lo;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = l64;
+ intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+ /* return if already configured */
+ if (intel_private.ifp_resource.start)
+ return;
+
+ if (IS_SNB)
+ return;
+
+ /* setup a resource for this object */
+ intel_private.ifp_resource.name = "Intel Flush Page";
+ intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+ if (IS_I965 || IS_G33 || IS_G4X) {
+ intel_i965_g33_setup_chipset_flush();
+ } else {
+ intel_i915_setup_chipset_flush();
+ }
+
+ if (intel_private.ifp_resource.start) {
+ intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ if (!intel_private.i9xx_flush_page)
+ dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
+ }
+}
+
+static int intel_i915_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
+
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
+ writel(agp_bridge->scratch_page, intel_private.gtt+i);
+ }
+ readl(intel_private.gtt+i-1); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+
+ intel_i9xx_setup_flush();
+
+ return 0;
+}
+
+static void intel_i915_cleanup(void)
+{
+ if (intel_private.i9xx_flush_page)
+ iounmap(intel_private.i9xx_flush_page);
+ if (intel_private.resource_valid)
+ release_resource(&intel_private.ifp_resource);
+ intel_private.ifp_resource.start = 0;
+ intel_private.resource_valid = 0;
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+}
+
+static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
+{
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
+
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
+ goto out_err;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+ /* The i915 can't check the GTT for entries since it's read only;
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ mask_type != INTEL_AGP_CACHED_MEMORY)
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ intel_agp_insert_sg_entries(mem, pg_start, mask_type);
+
+ out:
+ ret = 0;
+ out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
+ writel(agp_bridge->scratch_page, intel_private.gtt+i);
+
+ readl(intel_private.gtt+i-1);
+
+ return 0;
+}
+
+/* Return the aperture size by just checking the resource length. The effect
+ * described in the spec of the MSAC registers is just changing of the
+ * resource size.
+ */
+static int intel_i9xx_fetch_size(void)
+{
+ int num_sizes = ARRAY_SIZE(intel_i830_sizes);
+ int aper_size; /* size in megabytes */
+ int i;
+
+ aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (aper_size == intel_i830_sizes[i].size) {
+ agp_bridge->current_size = intel_i830_sizes + i;
+ return aper_size;
+ }
+ }
+
+ return 0;
+}
+
+/* The intel i915 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp, temp2;
+ int gtt_map_size = 256 * 1024;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+ pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+
+ if (IS_G33)
+ gtt_map_size = 1024 * 1024; /* 1M on G33 */
+ intel_private.gtt = ioremap(temp2, gtt_map_size);
+ if (!intel_private.gtt)
+ return -ENOMEM;
+
+ intel_private.gtt_total_size = gtt_map_size / 4;
+
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+/*
+ * The i965 supports 36-bit physical addresses, but to keep
+ * the format of the GTT the same, the bits that don't fit
+ * in a 32-bit word are shifted down to bits 4..7.
+ *
+ * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
+ * is always zero on 32-bit architectures, so no need to make
+ * this conditional.
+ */
+static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* Shift high bits down */
+ addr |= (addr >> 28) & 0xf0;
+
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+
+static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
+{
+ u16 snb_gmch_ctl;
+
+ switch (agp_bridge->dev->device) {
+ case PCI_DEVICE_ID_INTEL_GM45_HB:
+ case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
+ case PCI_DEVICE_ID_INTEL_Q45_HB:
+ case PCI_DEVICE_ID_INTEL_G45_HB:
+ case PCI_DEVICE_ID_INTEL_G41_HB:
+ case PCI_DEVICE_ID_INTEL_B43_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+ *gtt_offset = *gtt_size = MB(2);
+ break;
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+ *gtt_offset = MB(2);
+
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+ default:
+ case SNB_GTT_SIZE_0M:
+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+ *gtt_size = MB(0);
+ break;
+ case SNB_GTT_SIZE_1M:
+ *gtt_size = MB(1);
+ break;
+ case SNB_GTT_SIZE_2M:
+ *gtt_size = MB(2);
+ break;
+ }
+ break;
+ default:
+ *gtt_offset = *gtt_size = KB(512);
+ }
+}
+
+/* The intel i965 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+ int gtt_offset, gtt_size;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+
+ temp &= 0xfff00000;
+
+ intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
+
+ intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+
+ if (!intel_private.gtt)
+ return -ENOMEM;
+
+ intel_private.gtt_total_size = gtt_size / 4;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+static const struct agp_bridge_driver intel_810_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i810_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .needs_scratch_page = true,
+ .configure = intel_i810_configure,
+ .fetch_size = intel_i810_fetch_size,
+ .cleanup = intel_i810_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = intel_i810_insert_entries,
+ .remove_memory = intel_i810_remove_entries,
+ .alloc_by_type = intel_i810_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_830_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i830_configure,
+ .fetch_size = intel_i830_fetch_size,
+ .cleanup = intel_i830_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i830_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i830_insert_entries,
+ .remove_memory = intel_i830_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i830_chipset_flush,
+};
+
+static const struct agp_bridge_driver intel_915_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_i965_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i965_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i965_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_g33_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i965_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 10f24e349a26..b9734a978186 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = {
.aperture_sizes = nvidia_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 5,
+ .needs_scratch_page = true,
.configure = nvidia_configure,
.fetch_size = nvidia_fetch_size,
.cleanup = nvidia_cleanup,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 6c3837a0184d..b53d5f4e9cb2 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = {
.aperture_sizes = sis_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = sis_configure,
.fetch_size = sis_fetch_size,
.cleanup = sis_cleanup,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 6f48931ac1ce..95db71360d24 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -28,6 +28,7 @@
*/
static int uninorth_rev;
static int is_u3;
+static u32 scratch_value;
#define DEFAULT_APERTURE_SIZE 256
#define DEFAULT_APERTURE_STRING "256"
@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i) {
- if (gp[i]) {
+ if (gp[i] != scratch_value) {
dev_info(&agp_bridge->dev->dev,
"uninorth_insert_memory: entry 0x%x occupied (%x)\n",
i, gp[i]);
@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
return 0;
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
- for (i = 0; i < mem->page_count; ++i)
- gp[i] = 0;
+ for (i = 0; i < mem->page_count; ++i) {
+ gp[i] = scratch_value;
+ }
mb();
uninorth_tlbflush(mem);
@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_bus_addr = virt_to_phys(table);
+ if (is_u3)
+ scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
+ else
+ scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
+ 0x1UL);
for (i = 0; i < num_entries; i++)
- bridge->gatt_table[i] = 0;
+ bridge->gatt_table[i] = scratch_value;
return 0;
@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
+ .needs_scratch_page = true,
};
const struct agp_bridge_driver u3_agp_driver = {
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index d3bd243867fc..df67e80019d2 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = {
.aperture_sizes = agp3_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 10,
+ .needs_scratch_page = true,
.configure = via_configure_agp3,
.fetch_size = via_fetch_size_agp3,
.cleanup = via_cleanup_agp3,
@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = {
.aperture_sizes = via_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 9,
+ .needs_scratch_page = true,
.configure = via_configure,
.fetch_size = via_fetch_size,
.cleanup = via_cleanup,
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 7fef305774de..89d871ef8c2f 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -253,7 +253,7 @@ static int bsr_add_node(struct device_node *bn)
cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
cur, cur->bsr_name);
- if (!cur->bsr_device) {
+ if (IS_ERR(cur->bsr_device)) {
printk(KERN_ERR "device_create failed for %s\n",
cur->bsr_name);
cdev_del(&cur->bsr_cdev);
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 5a80ad68ef22..7b01bc609de3 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1149,7 +1149,7 @@ out_err:
* Note: If it is called early in the boot process, @val is stored and
* parsed later in hvc_iucv_init().
*/
-static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
+static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
{
int rc;
@@ -1176,7 +1176,7 @@ static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
* The function stores the filter as a comma-separated list of z/VM user IDs
* in @buffer. Typically, sysfs routines call this function for attr show.
*/
-static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
+static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
{
int rc;
size_t index, len;
@@ -1203,6 +1203,11 @@ static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
#define param_check_vmidfilter(name, p) __param_check(name, p, void)
+static struct kernel_param_ops param_ops_vmidfilter = {
+ .set = param_set_vmidfilter,
+ .get = param_get_vmidfilter,
+};
+
/**
* hvc_iucv_init() - z/VM IUCV HVC device driver initialization
*/
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 64fe0a793efd..75f1cbd61c17 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -32,7 +32,7 @@ static bool busy;
static void random_recv_done(struct virtqueue *vq)
{
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!vq->vq_ops->get_buf(vq, &data_avail))
+ if (!virtqueue_get_buf(vq, &data_avail))
return;
complete(&have_data);
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
}
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index a4d57e31f713..45999d81004f 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -196,7 +196,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf);
*/
static int start_now;
-static int set_param_int(const char *val, struct kernel_param *kp)
+static int set_param_timeout(const char *val, const struct kernel_param *kp)
{
char *endp;
int l;
@@ -215,10 +215,11 @@ static int set_param_int(const char *val, struct kernel_param *kp)
return rv;
}
-static int get_param_int(char *buffer, struct kernel_param *kp)
-{
- return sprintf(buffer, "%i", *((int *)kp->arg));
-}
+static struct kernel_param_ops param_ops_timeout = {
+ .set = set_param_timeout,
+ .get = param_get_int,
+};
+#define param_check_timeout param_check_int
typedef int (*action_fn)(const char *intval, char *outval);
@@ -227,7 +228,7 @@ static int preaction_op(const char *inval, char *outval);
static int preop_op(const char *inval, char *outval);
static void check_parms(void);
-static int set_param_str(const char *val, struct kernel_param *kp)
+static int set_param_str(const char *val, const struct kernel_param *kp)
{
action_fn fn = (action_fn) kp->arg;
int rv = 0;
@@ -251,7 +252,7 @@ static int set_param_str(const char *val, struct kernel_param *kp)
return rv;
}
-static int get_param_str(char *buffer, struct kernel_param *kp)
+static int get_param_str(char *buffer, const struct kernel_param *kp)
{
action_fn fn = (action_fn) kp->arg;
int rv;
@@ -263,7 +264,7 @@ static int get_param_str(char *buffer, struct kernel_param *kp)
}
-static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
{
int rv = param_set_int(val, kp);
if (rv)
@@ -276,27 +277,38 @@ static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
return 0;
}
-module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
- &ifnum_to_use, 0644);
+static struct kernel_param_ops param_ops_wdog_ifnum = {
+ .set = set_param_wdog_ifnum,
+ .get = param_get_int,
+};
+
+#define param_check_wdog_ifnum param_check_int
+
+static struct kernel_param_ops param_ops_str = {
+ .set = set_param_str,
+ .get = get_param_str,
+};
+
+module_param(ifnum_to_use, wdog_ifnum, 0644);
MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
"timer. Setting to -1 defaults to the first registered "
"interface");
-module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
+module_param(timeout, timeout, 0644);
MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
-module_param_call(pretimeout, set_param_int, get_param_int, &pretimeout, 0644);
+module_param(pretimeout, timeout, 0644);
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
-module_param_call(action, set_param_str, get_param_str, action_op, 0644);
+module_param_cb(action, &param_ops_str, action_op, 0644);
MODULE_PARM_DESC(action, "Timeout action. One of: "
"reset, none, power_cycle, power_off.");
-module_param_call(preaction, set_param_str, get_param_str, preaction_op, 0644);
+module_param_cb(preaction, &param_ops_str, preaction_op, 0644);
MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
"pre_none, pre_smi, pre_nmi, pre_int.");
-module_param_call(preop, set_param_str, get_param_str, preop_op, 0644);
+module_param_cb(preop, &param_ops_str, preop_op, 0644);
MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
"preop_none, preop_panic, preop_give_data.");
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index ada25bb8941e..54109dc9240c 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -24,6 +24,8 @@
* 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/consolemap.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -38,7 +40,6 @@
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <linux/vt_kern.h>
-#include <linux/sysrq.h>
#include <linux/input.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
@@ -82,8 +83,7 @@ void compute_shiftstate(void);
typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
char up_flag);
static k_handler_fn K_HANDLERS;
-k_handler_fn *k_handler[16] = { K_HANDLERS };
-EXPORT_SYMBOL_GPL(k_handler);
+static k_handler_fn *k_handler[16] = { K_HANDLERS };
#define FN_HANDLERS\
fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\
@@ -133,7 +133,7 @@ static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
-static int dead_key_next;
+static bool dead_key_next;
static int npadch = -1; /* -1 or number assembled on pad */
static unsigned int diacr;
static char rep; /* flag telling character repeat */
@@ -147,22 +147,6 @@ static struct ledptr {
unsigned char valid:1;
} ledptrs[3];
-/* Simple translation table for the SysRq keys */
-
-#ifdef CONFIG_MAGIC_SYSRQ
-unsigned char kbd_sysrq_xlate[KEY_MAX + 1] =
- "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
- "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
- "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
- "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
- "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */
- "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
- "\r\000/"; /* 0x60 - 0x6f */
-static int sysrq_down;
-static int sysrq_alt_use;
-#endif
-static int sysrq_alt;
-
/*
* Notifier list for console keyboard events
*/
@@ -361,8 +345,8 @@ static void to_utf8(struct vc_data *vc, uint c)
/* 110***** 10****** */
put_queue(vc, 0xc0 | (c >> 6));
put_queue(vc, 0x80 | (c & 0x3f));
- } else if (c < 0x10000) {
- if (c >= 0xD800 && c < 0xE000)
+ } else if (c < 0x10000) {
+ if (c >= 0xD800 && c < 0xE000)
return;
if (c == 0xFFFF)
return;
@@ -370,7 +354,7 @@ static void to_utf8(struct vc_data *vc, uint c)
put_queue(vc, 0xe0 | (c >> 12));
put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
put_queue(vc, 0x80 | (c & 0x3f));
- } else if (c < 0x110000) {
+ } else if (c < 0x110000) {
/* 11110*** 10****** 10****** 10****** */
put_queue(vc, 0xf0 | (c >> 18));
put_queue(vc, 0x80 | ((c >> 12) & 0x3f));
@@ -469,6 +453,7 @@ static void fn_enter(struct vc_data *vc)
}
diacr = 0;
}
+
put_queue(vc, 13);
if (vc_kbd_mode(kbd, VC_CRLF))
put_queue(vc, 10);
@@ -478,6 +463,7 @@ static void fn_caps_toggle(struct vc_data *vc)
{
if (rep)
return;
+
chg_vc_kbd_led(kbd, VC_CAPSLOCK);
}
@@ -485,12 +471,14 @@ static void fn_caps_on(struct vc_data *vc)
{
if (rep)
return;
+
set_vc_kbd_led(kbd, VC_CAPSLOCK);
}
static void fn_show_ptregs(struct vc_data *vc)
{
struct pt_regs *regs = get_irq_regs();
+
if (regs)
show_regs(regs);
}
@@ -515,7 +503,7 @@ static void fn_hold(struct vc_data *vc)
static void fn_num(struct vc_data *vc)
{
- if (vc_kbd_mode(kbd,VC_APPLIC))
+ if (vc_kbd_mode(kbd, VC_APPLIC))
applkey(vc, 'P', 1);
else
fn_bare_num(vc);
@@ -610,7 +598,7 @@ static void fn_boot_it(struct vc_data *vc)
static void fn_compose(struct vc_data *vc)
{
- dead_key_next = 1;
+ dead_key_next = true;
}
static void fn_spawn_con(struct vc_data *vc)
@@ -657,7 +645,7 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag)
{
- printk(KERN_ERR "keyboard.c: k_lowercase was called - impossible\n");
+ pr_err("k_lowercase was called - impossible\n");
}
static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
@@ -669,7 +657,7 @@ static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
value = handle_diacr(vc, value);
if (dead_key_next) {
- dead_key_next = 0;
+ dead_key_next = false;
diacr = value;
return;
}
@@ -691,6 +679,7 @@ static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag)
{
if (up_flag)
return;
+
diacr = (diacr ? handle_diacr(vc, value) : value);
}
@@ -710,29 +699,28 @@ static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)
static void k_dead(struct vc_data *vc, unsigned char value, char up_flag)
{
static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
- value = ret_diacr[value];
- k_deadunicode(vc, value, up_flag);
+
+ k_deadunicode(vc, ret_diacr[value], up_flag);
}
static void k_cons(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
+
set_console(value);
}
static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
{
- unsigned v;
-
if (up_flag)
return;
- v = value;
- if (v < ARRAY_SIZE(func_table)) {
+
+ if ((unsigned)value < ARRAY_SIZE(func_table)) {
if (func_table[value])
puts_queue(vc, func_table[value]);
} else
- printk(KERN_ERR "k_fn called with value=%d\n", value);
+ pr_err("k_fn called with value=%d\n", value);
}
static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
@@ -741,6 +729,7 @@ static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
if (up_flag)
return;
+
applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE));
}
@@ -758,43 +747,45 @@ static void k_pad(struct vc_data *vc, unsigned char value, char up_flag)
return;
}
- if (!vc_kbd_led(kbd, VC_NUMLOCK))
+ if (!vc_kbd_led(kbd, VC_NUMLOCK)) {
+
switch (value) {
- case KVAL(K_PCOMMA):
- case KVAL(K_PDOT):
- k_fn(vc, KVAL(K_REMOVE), 0);
- return;
- case KVAL(K_P0):
- k_fn(vc, KVAL(K_INSERT), 0);
- return;
- case KVAL(K_P1):
- k_fn(vc, KVAL(K_SELECT), 0);
- return;
- case KVAL(K_P2):
- k_cur(vc, KVAL(K_DOWN), 0);
- return;
- case KVAL(K_P3):
- k_fn(vc, KVAL(K_PGDN), 0);
- return;
- case KVAL(K_P4):
- k_cur(vc, KVAL(K_LEFT), 0);
- return;
- case KVAL(K_P6):
- k_cur(vc, KVAL(K_RIGHT), 0);
- return;
- case KVAL(K_P7):
- k_fn(vc, KVAL(K_FIND), 0);
- return;
- case KVAL(K_P8):
- k_cur(vc, KVAL(K_UP), 0);
- return;
- case KVAL(K_P9):
- k_fn(vc, KVAL(K_PGUP), 0);
- return;
- case KVAL(K_P5):
- applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
- return;
+ case KVAL(K_PCOMMA):
+ case KVAL(K_PDOT):
+ k_fn(vc, KVAL(K_REMOVE), 0);
+ return;
+ case KVAL(K_P0):
+ k_fn(vc, KVAL(K_INSERT), 0);
+ return;
+ case KVAL(K_P1):
+ k_fn(vc, KVAL(K_SELECT), 0);
+ return;
+ case KVAL(K_P2):
+ k_cur(vc, KVAL(K_DOWN), 0);
+ return;
+ case KVAL(K_P3):
+ k_fn(vc, KVAL(K_PGDN), 0);
+ return;
+ case KVAL(K_P4):
+ k_cur(vc, KVAL(K_LEFT), 0);
+ return;
+ case KVAL(K_P6):
+ k_cur(vc, KVAL(K_RIGHT), 0);
+ return;
+ case KVAL(K_P7):
+ k_fn(vc, KVAL(K_FIND), 0);
+ return;
+ case KVAL(K_P8):
+ k_cur(vc, KVAL(K_UP), 0);
+ return;
+ case KVAL(K_P9):
+ k_fn(vc, KVAL(K_PGUP), 0);
+ return;
+ case KVAL(K_P5):
+ applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
+ return;
}
+ }
put_queue(vc, pad_chars[value]);
if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF))
@@ -880,6 +871,7 @@ static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag || rep)
return;
+
chg_vc_kbd_lock(kbd, value);
}
@@ -888,6 +880,7 @@ static void k_slock(struct vc_data *vc, unsigned char value, char up_flag)
k_shift(vc, value, up_flag);
if (up_flag || rep)
return;
+
chg_vc_kbd_slock(kbd, value);
/* try to make Alt, oops, AltGr and such work */
if (!key_maps[kbd->lockstate ^ kbd->slockstate]) {
@@ -925,12 +918,12 @@ static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag)
static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
{
- static unsigned pressed,committing;
+ static unsigned pressed, committing;
static unsigned long releasestart;
if (kbd->kbdmode != VC_UNICODE) {
if (!up_flag)
- printk("keyboard mode must be unicode for braille patterns\n");
+ pr_warning("keyboard mode must be unicode for braille patterns\n");
return;
}
@@ -942,32 +935,28 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
if (value > 8)
return;
- if (up_flag) {
- if (brl_timeout) {
- if (!committing ||
- time_after(jiffies,
- releasestart + msecs_to_jiffies(brl_timeout))) {
- committing = pressed;
- releasestart = jiffies;
- }
- pressed &= ~(1 << (value - 1));
- if (!pressed) {
- if (committing) {
- k_brlcommit(vc, committing, 0);
- committing = 0;
- }
- }
- } else {
- if (committing) {
- k_brlcommit(vc, committing, 0);
- committing = 0;
- }
- pressed &= ~(1 << (value - 1));
- }
- } else {
+ if (!up_flag) {
pressed |= 1 << (value - 1);
if (!brl_timeout)
committing = pressed;
+ } else if (brl_timeout) {
+ if (!committing ||
+ time_after(jiffies,
+ releasestart + msecs_to_jiffies(brl_timeout))) {
+ committing = pressed;
+ releasestart = jiffies;
+ }
+ pressed &= ~(1 << (value - 1));
+ if (!pressed && committing) {
+ k_brlcommit(vc, committing, 0);
+ committing = 0;
+ }
+ } else {
+ if (committing) {
+ k_brlcommit(vc, committing, 0);
+ committing = 0;
+ }
+ pressed &= ~(1 << (value - 1));
}
}
@@ -988,6 +977,7 @@ void setledstate(struct kbd_struct *kbd, unsigned int led)
kbd->ledmode = LED_SHOW_IOCTL;
} else
kbd->ledmode = LED_SHOW_FLAGS;
+
set_leds();
}
@@ -1075,7 +1065,7 @@ static const unsigned short x86_keycodes[256] =
332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 };
#ifdef CONFIG_SPARC
-static int sparc_l1_a_state = 0;
+static int sparc_l1_a_state;
extern void sun_do_break(void);
#endif
@@ -1085,52 +1075,54 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
int code;
switch (keycode) {
- case KEY_PAUSE:
- put_queue(vc, 0xe1);
- put_queue(vc, 0x1d | up_flag);
- put_queue(vc, 0x45 | up_flag);
- break;
- case KEY_HANGEUL:
- if (!up_flag)
- put_queue(vc, 0xf2);
- break;
+ case KEY_PAUSE:
+ put_queue(vc, 0xe1);
+ put_queue(vc, 0x1d | up_flag);
+ put_queue(vc, 0x45 | up_flag);
+ break;
- case KEY_HANJA:
- if (!up_flag)
- put_queue(vc, 0xf1);
- break;
+ case KEY_HANGEUL:
+ if (!up_flag)
+ put_queue(vc, 0xf2);
+ break;
- case KEY_SYSRQ:
- /*
- * Real AT keyboards (that's what we're trying
- * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
- * pressing PrtSc/SysRq alone, but simply 0x54
- * when pressing Alt+PrtSc/SysRq.
- */
- if (sysrq_alt) {
- put_queue(vc, 0x54 | up_flag);
- } else {
- put_queue(vc, 0xe0);
- put_queue(vc, 0x2a | up_flag);
- put_queue(vc, 0xe0);
- put_queue(vc, 0x37 | up_flag);
- }
- break;
+ case KEY_HANJA:
+ if (!up_flag)
+ put_queue(vc, 0xf1);
+ break;
- default:
- if (keycode > 255)
- return -1;
+ case KEY_SYSRQ:
+ /*
+ * Real AT keyboards (that's what we're trying
+ * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
+ * pressing PrtSc/SysRq alone, but simply 0x54
+ * when pressing Alt+PrtSc/SysRq.
+ */
+ if (test_bit(KEY_LEFTALT, key_down) ||
+ test_bit(KEY_RIGHTALT, key_down)) {
+ put_queue(vc, 0x54 | up_flag);
+ } else {
+ put_queue(vc, 0xe0);
+ put_queue(vc, 0x2a | up_flag);
+ put_queue(vc, 0xe0);
+ put_queue(vc, 0x37 | up_flag);
+ }
+ break;
- code = x86_keycodes[keycode];
- if (!code)
- return -1;
+ default:
+ if (keycode > 255)
+ return -1;
- if (code & 0x100)
- put_queue(vc, 0xe0);
- put_queue(vc, (code & 0x7f) | up_flag);
+ code = x86_keycodes[keycode];
+ if (!code)
+ return -1;
- break;
+ if (code & 0x100)
+ put_queue(vc, 0xe0);
+ put_queue(vc, (code & 0x7f) | up_flag);
+
+ break;
}
return 0;
@@ -1153,6 +1145,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char u
static void kbd_rawcode(unsigned char data)
{
struct vc_data *vc = vc_cons[fg_console].d;
+
kbd = kbd_table + vc->vc_num;
if (kbd->kbdmode == VC_RAW)
put_queue(vc, data);
@@ -1162,10 +1155,12 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned short keysym, *key_map;
- unsigned char type, raw_mode;
+ unsigned char type;
+ bool raw_mode;
struct tty_struct *tty;
int shift_final;
struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down };
+ int rc;
tty = vc->vc_tty;
@@ -1176,8 +1171,6 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
kbd = kbd_table + vc->vc_num;
- if (keycode == KEY_LEFTALT || keycode == KEY_RIGHTALT)
- sysrq_alt = down ? keycode : 0;
#ifdef CONFIG_SPARC
if (keycode == KEY_STOP)
sparc_l1_a_state = down;
@@ -1185,29 +1178,16 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
rep = (down == 2);
- if ((raw_mode = (kbd->kbdmode == VC_RAW)) && !hw_raw)
+ raw_mode = (kbd->kbdmode == VC_RAW);
+ if (raw_mode && !hw_raw)
if (emulate_raw(vc, keycode, !down << 7))
if (keycode < BTN_MISC && printk_ratelimit())
- printk(KERN_WARNING "keyboard.c: can't emulate rawmode for keycode %d\n", keycode);
+ pr_warning("can't emulate rawmode for keycode %d\n",
+ keycode);
-#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
- if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) {
- if (!sysrq_down) {
- sysrq_down = down;
- sysrq_alt_use = sysrq_alt;
- }
- return;
- }
- if (sysrq_down && !down && keycode == sysrq_alt_use)
- sysrq_down = 0;
- if (sysrq_down && down && !rep) {
- handle_sysrq(kbd_sysrq_xlate[keycode], tty);
- return;
- }
-#endif
#ifdef CONFIG_SPARC
if (keycode == KEY_A && sparc_l1_a_state) {
- sparc_l1_a_state = 0;
+ sparc_l1_a_state = false;
sun_do_break();
}
#endif
@@ -1229,7 +1209,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
put_queue(vc, (keycode >> 7) | 0x80);
put_queue(vc, keycode | 0x80);
}
- raw_mode = 1;
+ raw_mode = true;
}
if (down)
@@ -1252,29 +1232,32 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
param.ledstate = kbd->ledflagstate;
key_map = key_maps[shift_final];
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYCODE, &param) == NOTIFY_STOP || !key_map) {
- atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNBOUND_KEYCODE, &param);
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_KEYCODE, &param);
+ if (rc == NOTIFY_STOP || !key_map) {
+ atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_UNBOUND_KEYCODE, &param);
compute_shiftstate();
kbd->slockstate = 0;
return;
}
- if (keycode >= NR_KEYS)
- if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
- keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
- else
- return;
- else
+ if (keycode < NR_KEYS)
keysym = key_map[keycode];
+ else if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
+ keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
+ else
+ return;
type = KTYP(keysym);
if (type < 0xf0) {
param.value = keysym;
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNICODE, &param) == NOTIFY_STOP)
- return;
- if (down && !raw_mode)
- to_utf8(vc, keysym);
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_UNICODE, &param);
+ if (rc != NOTIFY_STOP)
+ if (down && !raw_mode)
+ to_utf8(vc, keysym);
return;
}
@@ -1288,9 +1271,11 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
keysym = key_map[keycode];
}
}
- param.value = keysym;
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYSYM, &param) == NOTIFY_STOP)
+ param.value = keysym;
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_KEYSYM, &param);
+ if (rc == NOTIFY_STOP)
return;
if (raw_mode && type != KT_SPEC && type != KT_SHIFT)
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 90b199f97bec..e7956acf2ad6 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -106,7 +106,6 @@ static int major; /* major number we get from the kernel */
struct cm4000_dev {
struct pcmcia_device *p_dev;
- dev_node_t node; /* OS node (major,minor) */
unsigned char atr[MAX_ATR];
unsigned char rbuf[512];
@@ -884,8 +883,7 @@ static void monitor_card(unsigned long p)
/* slow down warning, but prompt immediately after insertion */
if (dev->cwarn == 0 || dev->cwarn == 10) {
set_bit(IS_BAD_CARD, &dev->flags);
- printk(KERN_WARNING MODULE_NAME ": device %s: ",
- dev->node.dev_name);
+ dev_warn(&dev->p_dev->dev, MODULE_NAME ": ");
if (test_bit(IS_BAD_CSUM, &dev->flags)) {
DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
"be zero) failed\n", dev->atr_csum);
@@ -1781,11 +1779,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
goto cs_release;
dev = link->priv;
- sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
- dev->node.major = major;
- dev->node.minor = devno;
- dev->node.next = NULL;
- link->dev_node = &dev->node;
return 0;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index a6a70e476bea..c0775c844e08 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -72,7 +72,6 @@ static struct class *cmx_class;
struct reader_dev {
struct pcmcia_device *p_dev;
- dev_node_t node;
wait_queue_head_t devq;
wait_queue_head_t poll_wait;
wait_queue_head_t read_wait;
@@ -568,10 +567,6 @@ static int reader_config(struct pcmcia_device *link, int devno)
}
dev = link->priv;
- sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
- dev->node.major = major;
- dev->node.minor = devno;
- dev->node.next = &dev->node;
DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno,
link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1);
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index dff24dae1485..63c32e3f23ba 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -195,9 +195,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = ipwireless_interrupt;
-
INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1,
@@ -205,8 +202,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
ipw->is_v2_card, signalled_reboot_callback,
ipw);
- ret = pcmcia_request_irq(link, &link->irq);
-
+ ret = pcmcia_request_irq(link, ipwireless_interrupt);
if (ret != 0)
goto exit;
@@ -217,7 +213,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
(unsigned int) link->io.BasePort1,
(unsigned int) (link->io.BasePort1 +
link->io.NumPorts1 - 1),
- (unsigned int) link->irq.AssignedIRQ);
+ (unsigned int) link->irq);
if (ipw->attr_memory && ipw->common_memory)
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": attr memory 0x%08lx-0x%08lx, common memory 0x%08lx-0x%08lx\n",
@@ -232,8 +228,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
if (!ipw->network)
goto exit;
- ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network,
- ipw->nodes);
+ ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network);
if (!ipw->tty)
goto exit;
@@ -248,8 +243,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
if (ret != 0)
goto exit;
- link->dev_node = &ipw->nodes[0];
-
return 0;
exit:
@@ -271,8 +264,6 @@ exit:
static void release_ipwireless(struct ipw_dev *ipw)
{
- pcmcia_disable_device(ipw->link);
-
if (ipw->common_memory) {
release_mem_region(ipw->request_common_memory.Base,
ipw->request_common_memory.Size);
@@ -288,7 +279,6 @@ static void release_ipwireless(struct ipw_dev *ipw)
if (ipw->attr_memory)
pcmcia_release_window(ipw->link, ipw->handle_attr_memory);
- /* Break the link with Card Services */
pcmcia_disable_device(ipw->link);
}
@@ -313,9 +303,6 @@ static int ipwireless_attach(struct pcmcia_device *link)
ipw->link = link;
link->priv = ipw;
- /* Link this device into our device list. */
- link->dev_node = &ipw->nodes[0];
-
ipw->hardware = ipwireless_hardware_create();
if (!ipw->hardware) {
kfree(ipw);
diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h
index 0e0363af9ab2..96d0ef31b172 100644
--- a/drivers/char/pcmcia/ipwireless/main.h
+++ b/drivers/char/pcmcia/ipwireless/main.h
@@ -54,7 +54,6 @@ struct ipw_dev {
void __iomem *common_memory;
win_req_t request_common_memory;
- dev_node_t nodes[2];
/* Reference to attribute memory, containing CIS data */
void *attribute_memory;
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index 2bb7874a6899..1a2c2c3b068f 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -487,7 +487,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
return tty_mode_ioctl(linux_tty, file, cmd , arg);
}
-static int add_tty(dev_node_t *nodesp, int j,
+static int add_tty(int j,
struct ipw_hardware *hardware,
struct ipw_network *network, int channel_idx,
int secondary_channel_idx, int tty_type)
@@ -510,19 +510,13 @@ static int add_tty(dev_node_t *nodesp, int j,
ipwireless_associate_network_tty(network,
secondary_channel_idx,
ttys[j]);
- if (nodesp != NULL) {
- sprintf(nodesp->dev_name, "ttyIPWp%d", j);
- nodesp->major = ipw_tty_driver->major;
- nodesp->minor = j + ipw_tty_driver->minor_start;
- }
if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j])
report_registering(ttys[j]);
return 0;
}
struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
- struct ipw_network *network,
- dev_node_t *nodes)
+ struct ipw_network *network)
{
int i, j;
@@ -539,26 +533,23 @@ struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
if (allfree) {
j = i;
- if (add_tty(&nodes[0], j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS,
TTYTYPE_MODEM))
return NULL;
j += IPWIRELESS_PCMCIA_MINOR_RANGE;
- if (add_tty(&nodes[1], j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_DIALLER, -1,
TTYTYPE_MONITOR))
return NULL;
j += IPWIRELESS_PCMCIA_MINOR_RANGE;
- if (add_tty(NULL, j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_RAS, -1,
TTYTYPE_RAS_RAW))
return NULL;
- nodes[0].next = &nodes[1];
- nodes[1].next = NULL;
-
return ttys[i];
}
}
diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h
index b0deb9168b6b..4da6c201f727 100644
--- a/drivers/char/pcmcia/ipwireless/tty.h
+++ b/drivers/char/pcmcia/ipwireless/tty.h
@@ -34,8 +34,7 @@ int ipwireless_tty_init(void);
void ipwireless_tty_release(void);
struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw,
- struct ipw_network *net,
- dev_node_t *nodes);
+ struct ipw_network *net);
void ipwireless_tty_free(struct ipw_tty *tty);
void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
unsigned int length);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index c31a0d913d37..308903ec8bf8 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -220,7 +220,6 @@ typedef struct _mgslpc_info {
/* PCMCIA support */
struct pcmcia_device *p_dev;
- dev_node_t node;
int stop;
/* SPPP/Cisco HDLC device parts */
@@ -552,10 +551,6 @@ static int mgslpc_probe(struct pcmcia_device *link)
/* Initialize the struct pcmcia_device structure */
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -608,9 +603,7 @@ static int mgslpc_config(struct pcmcia_device *link)
link->conf.ConfigIndex = 8;
link->conf.Present = PRESENT_OPTION;
- link->irq.Handler = mgslpc_isr;
-
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, mgslpc_isr);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -618,17 +611,12 @@ static int mgslpc_config(struct pcmcia_device *link)
goto failed;
info->io_base = link->io.BasePort1;
- info->irq_level = link->irq.AssignedIRQ;
-
- /* add to linked list of devices */
- sprintf(info->node.dev_name, "mgslpc0");
- info->node.major = info->node.minor = 0;
- link->dev_node = &info->node;
+ info->irq_level = link->irq;
- printk(KERN_INFO "%s: index 0x%02x:",
- info->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 59de2525d303..5d15630a5830 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -1,7 +1,4 @@
-/* -*- linux-c -*-
- *
- * $Id: sysrq.c,v 1.15 1998/08/23 14:56:41 mj Exp $
- *
+/*
* Linux Magic System Request Key Hacks
*
* (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
@@ -10,8 +7,13 @@
* (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* overhauled to use key registration
* based upon discusions in irc://irc.openprojects.net/#kernelnewbies
+ *
+ * Copyright (c) 2010 Dmitry Torokhov
+ * Input handler conversion
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
@@ -39,33 +41,34 @@
#include <linux/hrtimer.h>
#include <linux/oom.h>
#include <linux/slab.h>
+#include <linux/input.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
/* Whether we react on sysrq keys or just ignore them */
-int __read_mostly __sysrq_enabled = 1;
-
-static int __read_mostly sysrq_always_enabled;
+static int __read_mostly sysrq_enabled = 1;
+static bool __read_mostly sysrq_always_enabled;
-int sysrq_on(void)
+static bool sysrq_on(void)
{
- return __sysrq_enabled || sysrq_always_enabled;
+ return sysrq_enabled || sysrq_always_enabled;
}
/*
* A value of 1 means 'all', other nonzero values are an op mask:
*/
-static inline int sysrq_on_mask(int mask)
+static bool sysrq_on_mask(int mask)
{
- return sysrq_always_enabled || __sysrq_enabled == 1 ||
- (__sysrq_enabled & mask);
+ return sysrq_always_enabled ||
+ sysrq_enabled == 1 ||
+ (sysrq_enabled & mask);
}
static int __init sysrq_always_enabled_setup(char *str)
{
- sysrq_always_enabled = 1;
- printk(KERN_INFO "debug: sysrq always enabled.\n");
+ sysrq_always_enabled = true;
+ pr_info("sysrq always enabled.\n");
return 1;
}
@@ -76,6 +79,7 @@ __setup("sysrq_always_enabled", sysrq_always_enabled_setup);
static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
{
int i;
+
i = key - '0';
console_loglevel = 7;
printk("Loglevel set to %d\n", i);
@@ -101,7 +105,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_SAK_op (*(struct sysrq_key_op *)0)
+#define sysrq_SAK_op (*(struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_VT
@@ -119,7 +123,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_unraw_op (*(struct sysrq_key_op *)0)
+#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
static void sysrq_handle_crash(int key, struct tty_struct *tty)
@@ -195,7 +199,7 @@ static struct sysrq_key_op sysrq_showlocks_op = {
.action_msg = "Show Locks Held",
};
#else
-#define sysrq_showlocks_op (*(struct sysrq_key_op *)0)
+#define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_SMP
@@ -289,7 +293,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
{
- ftrace_dump();
+ ftrace_dump(DUMP_ALL);
}
static struct sysrq_key_op sysrq_ftrace_dump_op = {
.handler = sysrq_ftrace_dump,
@@ -298,7 +302,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#else
-#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0)
+#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
#endif
static void sysrq_handle_showmem(int key, struct tty_struct *tty)
@@ -477,6 +481,7 @@ struct sysrq_key_op *__sysrq_get_key_op(int key)
i = sysrq_key_table_key2index(key);
if (i != -1)
op_p = sysrq_key_table[i];
+
return op_p;
}
@@ -488,11 +493,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
sysrq_key_table[i] = op_p;
}
-/*
- * This is the non-locking version of handle_sysrq. It must/can only be called
- * by sysrq key handlers, as they are inside of the lock
- */
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+static void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
@@ -544,10 +545,6 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
-/*
- * This function is called by the keyboard handler when SysRq is pressed
- * and any other keycode arrives.
- */
void handle_sysrq(int key, struct tty_struct *tty)
{
if (sysrq_on())
@@ -555,10 +552,177 @@ void handle_sysrq(int key, struct tty_struct *tty)
}
EXPORT_SYMBOL(handle_sysrq);
+#ifdef CONFIG_INPUT
+
+/* Simple translation table for the SysRq keys */
+static const unsigned char sysrq_xlate[KEY_MAX + 1] =
+ "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
+ "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
+ "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
+ "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
+ "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */
+ "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
+ "\r\000/"; /* 0x60 - 0x6f */
+
+static bool sysrq_down;
+static int sysrq_alt_use;
+static int sysrq_alt;
+
+static bool sysrq_filter(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ if (type != EV_KEY)
+ goto out;
+
+ switch (code) {
+
+ case KEY_LEFTALT:
+ case KEY_RIGHTALT:
+ if (value)
+ sysrq_alt = code;
+ else if (sysrq_down && code == sysrq_alt_use)
+ sysrq_down = false;
+ break;
+
+ case KEY_SYSRQ:
+ if (value == 1 && sysrq_alt) {
+ sysrq_down = true;
+ sysrq_alt_use = sysrq_alt;
+ }
+ break;
+
+ default:
+ if (sysrq_down && value && value != 2)
+ __handle_sysrq(sysrq_xlate[code], NULL, 1);
+ break;
+ }
+
+out:
+ return sysrq_down;
+}
+
+static int sysrq_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ sysrq_down = false;
+ sysrq_alt = 0;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "sysrq";
+
+ error = input_register_handle(handle);
+ if (error) {
+ pr_err("Failed to register input sysrq handler, error %d\n",
+ error);
+ goto err_free;
+ }
+
+ error = input_open_device(handle);
+ if (error) {
+ pr_err("Failed to open input device, error %d\n", error);
+ goto err_unregister;
+ }
+
+ return 0;
+
+ err_unregister:
+ input_unregister_handle(handle);
+ err_free:
+ kfree(handle);
+ return error;
+}
+
+static void sysrq_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * We are matching on KEY_LEFTALT insteard of KEY_SYSRQ because not all
+ * keyboards have SysRq ikey predefined and so user may add it to keymap
+ * later, but we expect all such keyboards to have left alt.
+ */
+static const struct input_device_id sysrq_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { BIT_MASK(KEY_LEFTALT) },
+ },
+ { },
+};
+
+static struct input_handler sysrq_handler = {
+ .filter = sysrq_filter,
+ .connect = sysrq_connect,
+ .disconnect = sysrq_disconnect,
+ .name = "sysrq",
+ .id_table = sysrq_ids,
+};
+
+static bool sysrq_handler_registered;
+
+static inline void sysrq_register_handler(void)
+{
+ int error;
+
+ error = input_register_handler(&sysrq_handler);
+ if (error)
+ pr_err("Failed to register input handler, error %d", error);
+ else
+ sysrq_handler_registered = true;
+}
+
+static inline void sysrq_unregister_handler(void)
+{
+ if (sysrq_handler_registered) {
+ input_unregister_handler(&sysrq_handler);
+ sysrq_handler_registered = false;
+ }
+}
+
+#else
+
+static inline void sysrq_register_handler(void)
+{
+}
+
+static inline void sysrq_unregister_handler(void)
+{
+}
+
+#endif /* CONFIG_INPUT */
+
+int sysrq_toggle_support(int enable_mask)
+{
+ bool was_enabled = sysrq_on();
+
+ sysrq_enabled = enable_mask;
+
+ if (was_enabled != sysrq_on()) {
+ if (sysrq_on())
+ sysrq_register_handler();
+ else
+ sysrq_unregister_handler();
+ }
+
+ return 0;
+}
+
static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
struct sysrq_key_op *remove_op_p)
{
-
int retval;
unsigned long flags;
@@ -599,6 +763,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
return -EFAULT;
__handle_sysrq(c, NULL, 0);
}
+
return count;
}
@@ -606,10 +771,28 @@ static const struct file_operations proc_sysrq_trigger_operations = {
.write = write_sysrq_trigger,
};
+static void sysrq_init_procfs(void)
+{
+ if (!proc_create("sysrq-trigger", S_IWUSR, NULL,
+ &proc_sysrq_trigger_operations))
+ pr_err("Failed to register proc interface\n");
+}
+
+#else
+
+static inline void sysrq_init_procfs(void)
+{
+}
+
+#endif /* CONFIG_PROC_FS */
+
static int __init sysrq_init(void)
{
- proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations);
+ sysrq_init_procfs();
+
+ if (sysrq_on())
+ sysrq_register_handler();
+
return 0;
}
module_init(sysrq_init);
-#endif
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 068c816e6942..05ad4a17a28f 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1068,6 +1068,27 @@ void tpm_remove_hardware(struct device *dev)
}
EXPORT_SYMBOL_GPL(tpm_remove_hardware);
+#define TPM_ORD_SAVESTATE cpu_to_be32(152)
+#define SAVESTATE_RESULT_SIZE 10
+
+static struct tpm_input_header savestate_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(10),
+ .ordinal = TPM_ORD_SAVESTATE
+};
+
+/* Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static int tpm_suspend_pcr;
+static int __init tpm_suspend_setup(char *str)
+{
+ get_option(&str, &tpm_suspend_pcr);
+ return 1;
+}
+__setup("tpm_suspend_pcr=", tpm_suspend_setup);
+
/*
* We are about to suspend. Save the TPM state
* so that it can be restored.
@@ -1075,17 +1096,29 @@ EXPORT_SYMBOL_GPL(tpm_remove_hardware);
int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
- u8 savestate[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 10, /* blob length (in bytes) */
- 0, 0, 0, 152 /* TPM_ORD_SaveState */
- };
+ struct tpm_cmd_t cmd;
+ int rc;
+
+ u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
if (chip == NULL)
return -ENODEV;
- tpm_transmit(chip, savestate, sizeof(savestate));
- return 0;
+ /* for buggy tpm, flush pcrs with extend to selected dummy */
+ if (tpm_suspend_pcr) {
+ cmd.header.in = pcrextend_header;
+ cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
+ memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
+ TPM_DIGEST_SIZE);
+ rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ "extending dummy pcr before suspend");
+ }
+
+ /* now do the actual savestate */
+ cmd.header.in = savestate_header;
+ rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
+ "sending savestate before suspend");
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 6da962c9b21c..d71f0fc34b46 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1875,6 +1875,7 @@ got_driver:
*/
if (filp->f_op == &hung_up_tty_fops)
filp->f_op = &tty_fops;
+ unlock_kernel();
goto retry_open;
}
unlock_kernel();
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 196428c2287a..458d907e3621 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -33,35 +33,6 @@
#include <linux/workqueue.h>
#include "hvc_console.h"
-/* Moved here from .h file in order to disable MULTIPORT. */
-#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
-
-struct virtio_console_multiport_conf {
- struct virtio_console_config config;
- /* max. number of ports this device can hold */
- __u32 max_nr_ports;
- /* number of ports added so far */
- __u32 nr_ports;
-} __attribute__((packed));
-
-/*
- * A message that's passed between the Host and the Guest for a
- * particular port.
- */
-struct virtio_console_control {
- __u32 id; /* Port number */
- __u16 event; /* The kind of control event (see below) */
- __u16 value; /* Extra information for the key */
-};
-
-/* Some events for control messages */
-#define VIRTIO_CONSOLE_PORT_READY 0
-#define VIRTIO_CONSOLE_CONSOLE_PORT 1
-#define VIRTIO_CONSOLE_RESIZE 2
-#define VIRTIO_CONSOLE_PORT_OPEN 3
-#define VIRTIO_CONSOLE_PORT_NAME 4
-#define VIRTIO_CONSOLE_PORT_REMOVE 5
-
/*
* This is a global struct for storing common data for all the devices
* this driver handles.
@@ -107,6 +78,9 @@ struct console {
/* The hvc device associated with this console port */
struct hvc_struct *hvc;
+ /* The size of the console */
+ struct winsize ws;
+
/*
* This number identifies the number that we used to register
* with hvc in hvc_instantiate() and hvc_alloc(); this is the
@@ -139,7 +113,6 @@ struct ports_device {
* notification
*/
struct work_struct control_work;
- struct work_struct config_work;
struct list_head ports;
@@ -150,7 +123,7 @@ struct ports_device {
spinlock_t cvq_lock;
/* The current config space is stored here */
- struct virtio_console_multiport_conf config;
+ struct virtio_console_config config;
/* The virtio device we're associated with */
struct virtio_device *vdev;
@@ -189,6 +162,9 @@ struct port {
*/
spinlock_t inbuf_lock;
+ /* Protect the operations on the out_vq. */
+ spinlock_t outvq_lock;
+
/* The IO vqs for this port */
struct virtqueue *in_vq, *out_vq;
@@ -214,6 +190,8 @@ struct port {
/* The 'id' to identify the port with the Host */
u32 id;
+ bool outvq_full;
+
/* Is the host device open */
bool host_connected;
@@ -328,7 +306,7 @@ static void *get_inbuf(struct port *port)
unsigned int len;
vq = port->in_vq;
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
if (buf) {
buf->len = len;
buf->offset = 0;
@@ -349,8 +327,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
- ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf);
- vq->vq_ops->kick(vq);
+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+ virtqueue_kick(vq);
return ret;
}
@@ -366,7 +344,7 @@ static void discard_port_data(struct port *port)
if (port->inbuf)
buf = port->inbuf;
else
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
ret = 0;
while (buf) {
@@ -374,7 +352,7 @@ static void discard_port_data(struct port *port)
ret++;
free_buf(buf);
}
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
}
port->inbuf = NULL;
if (ret)
@@ -403,57 +381,96 @@ out:
return ret;
}
-static ssize_t send_control_msg(struct port *port, unsigned int event,
- unsigned int value)
+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ unsigned int event, unsigned int value)
{
struct scatterlist sg[1];
struct virtio_console_control cpkt;
struct virtqueue *vq;
unsigned int len;
- if (!use_multiport(port->portdev))
+ if (!use_multiport(portdev))
return 0;
- cpkt.id = port->id;
+ cpkt.id = port_id;
cpkt.event = event;
cpkt.value = value;
- vq = port->portdev->c_ovq;
+ vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
- vq->vq_ops->kick(vq);
- while (!vq->vq_ops->get_buf(vq, &len))
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len))
cpu_relax();
}
return 0;
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+ unsigned int value)
+{
+ return __send_control_msg(port->portdev, port->id, event, value);
+}
+
+/* Callers must take the port->outvq_lock */
+static void reclaim_consumed_buffers(struct port *port)
+{
+ void *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ kfree(buf);
+ port->outvq_full = false;
+ }
+}
+
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
+ bool nonblock)
{
struct scatterlist sg[1];
struct virtqueue *out_vq;
ssize_t ret;
+ unsigned long flags;
unsigned int len;
out_vq = port->out_vq;
+ spin_lock_irqsave(&port->outvq_lock, flags);
+
+ reclaim_consumed_buffers(port);
+
sg_init_one(sg, in_buf, in_count);
- ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf);
+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
/* Tell Host to go! */
- out_vq->vq_ops->kick(out_vq);
+ virtqueue_kick(out_vq);
if (ret < 0) {
in_count = 0;
- goto fail;
+ goto done;
}
- /* Wait till the host acknowledges it pushed out the data we sent. */
- while (!out_vq->vq_ops->get_buf(out_vq, &len))
+ if (ret == 0)
+ port->outvq_full = true;
+
+ if (nonblock)
+ goto done;
+
+ /*
+ * Wait till the host acknowledges it pushed out the data we
+ * sent. This is done for ports in blocking mode or for data
+ * from the hvc_console; the tty operations are performed with
+ * spinlocks held so we can't sleep here.
+ */
+ while (!virtqueue_get_buf(out_vq, &len))
cpu_relax();
-fail:
- /* We're expected to return the amount of data we wrote */
+done:
+ spin_unlock_irqrestore(&port->outvq_lock, flags);
+ /*
+ * We're expected to return the amount of data we wrote -- all
+ * of it
+ */
return in_count;
}
@@ -503,9 +520,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
}
/* The condition that must be true for polling to end */
-static bool wait_is_over(struct port *port)
+static bool will_read_block(struct port *port)
+{
+ return !port_has_data(port) && port->host_connected;
+}
+
+static bool will_write_block(struct port *port)
{
- return port_has_data(port) || !port->host_connected;
+ bool ret;
+
+ if (!port->host_connected)
+ return true;
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * Check if the Host has consumed any buffers since we last
+ * sent data (this is only applicable for nonblocking ports).
+ */
+ reclaim_consumed_buffers(port);
+ ret = port->outvq_full;
+ spin_unlock_irq(&port->outvq_lock);
+
+ return ret;
}
static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
@@ -528,7 +564,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
return -EAGAIN;
ret = wait_event_interruptible(port->waitqueue,
- wait_is_over(port));
+ !will_read_block(port));
if (ret < 0)
return ret;
}
@@ -554,9 +590,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
struct port *port;
char *buf;
ssize_t ret;
+ bool nonblock;
port = filp->private_data;
+ nonblock = filp->f_flags & O_NONBLOCK;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+
count = min((size_t)(32 * 1024), count);
buf = kmalloc(count, GFP_KERNEL);
@@ -569,9 +618,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
goto free_buf;
}
- ret = send_buf(port, buf, count);
+ ret = send_buf(port, buf, count, nonblock);
+
+ if (nonblock && ret > 0)
+ goto out;
+
free_buf:
kfree(buf);
+out:
return ret;
}
@@ -586,7 +640,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
ret = 0;
if (port->inbuf)
ret |= POLLIN | POLLRDNORM;
- if (port->host_connected)
+ if (!will_write_block(port))
ret |= POLLOUT;
if (!port->host_connected)
ret |= POLLHUP;
@@ -610,6 +664,10 @@ static int port_fops_release(struct inode *inode, struct file *filp)
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
return 0;
}
@@ -638,6 +696,15 @@ static int port_fops_open(struct inode *inode, struct file *filp)
port->guest_connected = true;
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * There might be a chance that we missed reclaiming a few
+ * buffers in the window of the port getting previously closed
+ * and opening now.
+ */
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
/* Notify host of port being opened */
send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -676,9 +743,9 @@ static int put_chars(u32 vtermno, const char *buf, int count)
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
- return send_buf(port, (void *)buf, count);
+ return send_buf(port, (void *)buf, count, false);
}
/*
@@ -692,9 +759,13 @@ static int get_chars(u32 vtermno, char *buf, int count)
{
struct port *port;
+ /* If we've not set up the port yet, we have no input to give. */
+ if (unlikely(early_put_chars))
+ return 0;
+
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
/* If we don't have an input queue yet, we can't get input. */
BUG_ON(!port->in_vq);
@@ -705,22 +776,14 @@ static int get_chars(u32 vtermno, char *buf, int count)
static void resize_console(struct port *port)
{
struct virtio_device *vdev;
- struct winsize ws;
/* The port could have been hot-unplugged */
- if (!port)
+ if (!port || !is_console_port(port))
return;
vdev = port->portdev->vdev;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, cols),
- &ws.ws_col, sizeof(u16));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, rows),
- &ws.ws_row, sizeof(u16));
- hvc_resize(port->cons.hvc, ws);
- }
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+ hvc_resize(port->cons.hvc, port->cons.ws);
}
/* We set the configuration at this point, since we now have a tty */
@@ -804,6 +867,13 @@ int init_port_console(struct port *port)
spin_unlock_irq(&pdrvdata_lock);
port->guest_connected = true;
+ /*
+ * Start using the new console output if this is the first
+ * console to come up.
+ */
+ if (early_put_chars)
+ early_put_chars = NULL;
+
/* Notify host of port being opened */
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -859,6 +929,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"host_connected: %d\n", port->host_connected);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "outvq_full: %d\n", port->outvq_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"is_console: %s\n",
is_console_port(port) ? "yes" : "no");
out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -875,6 +947,153 @@ static const struct file_operations port_debugfs_ops = {
.read = debugfs_read,
};
+static void set_console_size(struct port *port, u16 rows, u16 cols)
+{
+ if (!port || !is_console_port(port))
+ return;
+
+ port->cons.ws.ws_row = rows;
+ port->cons.ws.ws_col = cols;
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+ struct port_buffer *buf;
+ unsigned int nr_added_bufs;
+ int ret;
+
+ nr_added_bufs = 0;
+ do {
+ buf = alloc_buf(PAGE_SIZE);
+ if (!buf)
+ break;
+
+ spin_lock_irq(lock);
+ ret = add_inbuf(vq, buf);
+ if (ret < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf);
+ break;
+ }
+ nr_added_bufs++;
+ spin_unlock_irq(lock);
+ } while (ret > 0);
+
+ return nr_added_bufs;
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+ char debugfs_name[16];
+ struct port *port;
+ struct port_buffer *buf;
+ dev_t devt;
+ unsigned int nr_added_bufs;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ port->portdev = portdev;
+ port->id = id;
+
+ port->name = NULL;
+ port->inbuf = NULL;
+ port->cons.hvc = NULL;
+
+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+
+ port->host_connected = port->guest_connected = false;
+
+ port->outvq_full = false;
+
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ cdev_init(&port->cdev, &port_fops);
+
+ devt = MKDEV(portdev->chr_major, id);
+ err = cdev_add(&port->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d adding cdev for port %u\n", err, id);
+ goto free_port;
+ }
+ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+ devt, port, "vport%up%u",
+ port->portdev->drv_index, id);
+ if (IS_ERR(port->dev)) {
+ err = PTR_ERR(port->dev);
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d creating device for port %u\n",
+ err, id);
+ goto free_cdev;
+ }
+
+ spin_lock_init(&port->inbuf_lock);
+ spin_lock_init(&port->outvq_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+ /* Fill the in_vq with buffers so the host can send us data. */
+ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (!nr_added_bufs) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+
+ /*
+ * If we're not using multiport support, this has to be a console port
+ */
+ if (!use_multiport(port->portdev)) {
+ err = init_port_console(port);
+ if (err)
+ goto free_inbufs;
+ }
+
+ spin_lock_irq(&portdev->ports_lock);
+ list_add_tail(&port->list, &port->portdev->ports);
+ spin_unlock_irq(&portdev->ports_lock);
+
+ /*
+ * Tell the Host we're set so that it can send us various
+ * configuration parameters for this port (eg, port name,
+ * caching, whether this is a console port, etc.)
+ */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ if (pdrvdata.debugfs_dir) {
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ sprintf(debugfs_name, "vport%up%u",
+ port->portdev->drv_index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port,
+ &port_debugfs_ops);
+ }
+ return 0;
+
+free_inbufs:
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+free_device:
+ device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+ cdev_del(&port->cdev);
+free_port:
+ kfree(port);
+fail:
+ /* The host might want to notify management sw about port add failure */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 0);
+ return err;
+}
+
/* Remove all port-specific data. */
static int remove_port(struct port *port)
{
@@ -888,7 +1107,18 @@ static int remove_port(struct port *port)
spin_lock_irq(&pdrvdata_lock);
list_del(&port->cons.list);
spin_unlock_irq(&pdrvdata_lock);
+#if 0
+ /*
+ * hvc_remove() not called as removing one hvc port
+ * results in other hvc ports getting frozen.
+ *
+ * Once this is resolved in hvc, this functionality
+ * will be enabled. Till that is done, the -EPIPE
+ * return from get_chars() above will help
+ * hvc_console.c to clean up on ports we remove here.
+ */
hvc_remove(port->cons.hvc);
+#endif
}
if (port->guest_connected)
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
@@ -900,8 +1130,10 @@ static int remove_port(struct port *port)
/* Remove unused data this port might have received. */
discard_port_data(port);
+ reclaim_consumed_buffers(port);
+
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
free_buf(buf);
kfree(port->name);
@@ -924,7 +1156,7 @@ static void handle_control_message(struct ports_device *portdev,
cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
port = find_port_by_id(portdev, cpkt->id);
- if (!port) {
+ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
/* No valid header at start of buffer. Drop it. */
dev_dbg(&portdev->vdev->dev,
"Invalid index %u in control packet\n", cpkt->id);
@@ -932,6 +1164,24 @@ static void handle_control_message(struct ports_device *portdev,
}
switch (cpkt->event) {
+ case VIRTIO_CONSOLE_PORT_ADD:
+ if (port) {
+ dev_dbg(&portdev->vdev->dev,
+ "Port %u already added\n", port->id);
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ break;
+ }
+ if (cpkt->id >= portdev->config.max_nr_ports) {
+ dev_warn(&portdev->vdev->dev,
+ "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
+ cpkt->id, portdev->config.max_nr_ports - 1);
+ break;
+ }
+ add_port(portdev, cpkt->id);
+ break;
+ case VIRTIO_CONSOLE_PORT_REMOVE:
+ remove_port(port);
+ break;
case VIRTIO_CONSOLE_CONSOLE_PORT:
if (!cpkt->value)
break;
@@ -944,15 +1194,34 @@ static void handle_control_message(struct ports_device *portdev,
* have to notify the host first.
*/
break;
- case VIRTIO_CONSOLE_RESIZE:
+ case VIRTIO_CONSOLE_RESIZE: {
+ struct {
+ __u16 rows;
+ __u16 cols;
+ } size;
+
if (!is_console_port(port))
break;
+
+ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+ sizeof(size));
+ set_console_size(port, size.rows, size.cols);
+
port->cons.hvc->irq_requested = 1;
resize_console(port);
break;
+ }
case VIRTIO_CONSOLE_PORT_OPEN:
port->host_connected = cpkt->value;
wake_up_interruptible(&port->waitqueue);
+ /*
+ * If the host port got closed and the host had any
+ * unconsumed buffers, we'll be able to reclaim them
+ * now.
+ */
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
break;
case VIRTIO_CONSOLE_PORT_NAME:
/*
@@ -990,32 +1259,6 @@ static void handle_control_message(struct ports_device *portdev,
kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
}
break;
- case VIRTIO_CONSOLE_PORT_REMOVE:
- /*
- * Hot unplug the port. We don't decrement nr_ports
- * since we don't want to deal with extra complexities
- * of using the lowest-available port id: We can just
- * pick up the nr_ports number as the id and not have
- * userspace send it to us. This helps us in two
- * ways:
- *
- * - We don't need to have a 'port_id' field in the
- * config space when a port is hot-added. This is a
- * good thing as we might queue up multiple hotplug
- * requests issued in our workqueue.
- *
- * - Another way to deal with this would have been to
- * use a bitmap of the active ports and select the
- * lowest non-active port from that map. That
- * bloats the already tight config space and we
- * would end up artificially limiting the
- * max. number of ports to sizeof(bitmap). Right
- * now we can support 2^32 ports (as the port id is
- * stored in a u32 type).
- *
- */
- remove_port(port);
- break;
}
}
@@ -1030,7 +1273,7 @@ static void control_work_handler(struct work_struct *work)
vq = portdev->c_ivq;
spin_lock(&portdev->cvq_lock);
- while ((buf = vq->vq_ops->get_buf(vq, &len))) {
+ while ((buf = virtqueue_get_buf(vq, &len))) {
spin_unlock(&portdev->cvq_lock);
buf->len = len;
@@ -1092,204 +1335,29 @@ static void config_intr(struct virtio_device *vdev)
struct ports_device *portdev;
portdev = vdev->priv;
- if (use_multiport(portdev)) {
- /* Handle port hot-add */
- schedule_work(&portdev->config_work);
- }
- /*
- * We'll use this way of resizing only for legacy support.
- * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use
- * control messages to indicate console size changes so that
- * it can be done per-port
- */
- resize_console(find_port_by_id(portdev, 0));
-}
-
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
-{
- struct port_buffer *buf;
- unsigned int nr_added_bufs;
- int ret;
-
- nr_added_bufs = 0;
- do {
- buf = alloc_buf(PAGE_SIZE);
- if (!buf)
- break;
-
- spin_lock_irq(lock);
- ret = add_inbuf(vq, buf);
- if (ret < 0) {
- spin_unlock_irq(lock);
- free_buf(buf);
- break;
- }
- nr_added_bufs++;
- spin_unlock_irq(lock);
- } while (ret > 0);
-
- return nr_added_bufs;
-}
-
-static int add_port(struct ports_device *portdev, u32 id)
-{
- char debugfs_name[16];
- struct port *port;
- struct port_buffer *buf;
- dev_t devt;
- unsigned int nr_added_bufs;
- int err;
-
- port = kmalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto fail;
- }
-
- port->portdev = portdev;
- port->id = id;
-
- port->name = NULL;
- port->inbuf = NULL;
- port->cons.hvc = NULL;
-
- port->host_connected = port->guest_connected = false;
-
- port->in_vq = portdev->in_vqs[port->id];
- port->out_vq = portdev->out_vqs[port->id];
-
- cdev_init(&port->cdev, &port_fops);
-
- devt = MKDEV(portdev->chr_major, id);
- err = cdev_add(&port->cdev, devt, 1);
- if (err < 0) {
- dev_err(&port->portdev->vdev->dev,
- "Error %d adding cdev for port %u\n", err, id);
- goto free_port;
- }
- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
- devt, port, "vport%up%u",
- port->portdev->drv_index, id);
- if (IS_ERR(port->dev)) {
- err = PTR_ERR(port->dev);
- dev_err(&port->portdev->vdev->dev,
- "Error %d creating device for port %u\n",
- err, id);
- goto free_cdev;
- }
-
- spin_lock_init(&port->inbuf_lock);
- init_waitqueue_head(&port->waitqueue);
-
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
- dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
- goto free_device;
- }
-
- /*
- * If we're not using multiport support, this has to be a console port
- */
- if (!use_multiport(port->portdev)) {
- err = init_port_console(port);
- if (err)
- goto free_inbufs;
- }
-
- spin_lock_irq(&portdev->ports_lock);
- list_add_tail(&port->list, &port->portdev->ports);
- spin_unlock_irq(&portdev->ports_lock);
-
- /*
- * Tell the Host we're set so that it can send us various
- * configuration parameters for this port (eg, port name,
- * caching, whether this is a console port, etc.)
- */
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
-
- if (pdrvdata.debugfs_dir) {
- /*
- * Finally, create the debugfs file that we can use to
- * inspect a port's state at any time
- */
- sprintf(debugfs_name, "vport%up%u",
- port->portdev->drv_index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
- pdrvdata.debugfs_dir,
- port,
- &port_debugfs_ops);
- }
- return 0;
-
-free_inbufs:
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
- free_buf(buf);
-free_device:
- device_destroy(pdrvdata.class, port->dev->devt);
-free_cdev:
- cdev_del(&port->cdev);
-free_port:
- kfree(port);
-fail:
- return err;
-}
-/*
- * The workhandler for config-space updates.
- *
- * This is called when ports are hot-added.
- */
-static void config_work_handler(struct work_struct *work)
-{
- struct virtio_console_multiport_conf virtconconf;
- struct ports_device *portdev;
- struct virtio_device *vdev;
- int err;
+ if (!use_multiport(portdev)) {
+ struct port *port;
+ u16 rows, cols;
- portdev = container_of(work, struct ports_device, config_work);
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, cols),
+ &cols, sizeof(u16));
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, rows),
+ &rows, sizeof(u16));
- vdev = portdev->vdev;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &virtconconf.nr_ports,
- sizeof(virtconconf.nr_ports));
+ port = find_port_by_id(portdev, 0);
+ set_console_size(port, rows, cols);
- if (portdev->config.nr_ports == virtconconf.nr_ports) {
/*
- * Port 0 got hot-added. Since we already did all the
- * other initialisation for it, just tell the Host
- * that the port is ready if we find the port. In
- * case the port was hot-removed earlier, we call
- * add_port to add the port.
+ * We'll use this way of resizing only for legacy
+ * support. For newer userspace
+ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
+ * to indicate console size changes so that it can be
+ * done per-port.
*/
- struct port *port;
-
- port = find_port_by_id(portdev, 0);
- if (!port)
- add_port(portdev, 0);
- else
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
- return;
- }
- if (virtconconf.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports specified (%u) than allowed (%u)",
- portdev->config.nr_ports + 1,
- portdev->config.max_nr_ports);
- return;
- }
- if (virtconconf.nr_ports < portdev->config.nr_ports)
- return;
-
- /* Hot-add ports */
- while (virtconconf.nr_ports - portdev->config.nr_ports) {
- err = add_port(portdev, portdev->config.nr_ports);
- if (err)
- break;
- portdev->config.nr_ports++;
+ resize_console(port);
}
}
@@ -1414,7 +1482,6 @@ static const struct file_operations portdev_fops = {
static int __devinit virtcons_probe(struct virtio_device *vdev)
{
struct ports_device *portdev;
- u32 i;
int err;
bool multiport;
@@ -1443,37 +1510,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
}
multiport = false;
- portdev->config.nr_ports = 1;
portdev->config.max_nr_ports = 1;
-#if 0 /* Multiport is not quite ready yet --RR */
if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
multiport = true;
vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &portdev->config.nr_ports,
- sizeof(portdev->config.nr_ports));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- max_nr_ports),
+ vdev->config->get(vdev, offsetof(struct virtio_console_config,
+ max_nr_ports),
&portdev->config.max_nr_ports,
sizeof(portdev->config.max_nr_ports));
- if (portdev->config.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports (%u) specified than allowed (%u). Will init %u ports.",
- portdev->config.nr_ports,
- portdev->config.max_nr_ports,
- portdev->config.max_nr_ports);
-
- portdev->config.nr_ports = portdev->config.max_nr_ports;
- }
}
/* Let the Host know we support multiple ports.*/
vdev->config->finalize_features(vdev);
-#endif
err = init_vqs(portdev);
if (err < 0) {
@@ -1489,7 +1538,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->cvq_lock);
INIT_WORK(&portdev->control_work, &control_work_handler);
- INIT_WORK(&portdev->config_work, &config_work_handler);
nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
if (!nr_added_bufs) {
@@ -1498,13 +1546,16 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
err = -ENOMEM;
goto free_vqs;
}
+ } else {
+ /*
+ * For backward compatibility: Create a console port
+ * if we're running on older host.
+ */
+ add_port(portdev, 0);
}
- for (i = 0; i < portdev->config.nr_ports; i++)
- add_port(portdev, i);
-
- /* Start using the new console output. */
- early_put_chars = NULL;
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 1);
return 0;
free_vqs:
@@ -1516,6 +1567,9 @@ free_chrdev:
free:
kfree(portdev);
fail:
+ /* The host might want to notify mgmt sw about device add failure */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
return err;
}
@@ -1529,17 +1583,16 @@ static void virtcons_remove(struct virtio_device *vdev)
portdev = vdev->priv;
cancel_work_sync(&portdev->control_work);
- cancel_work_sync(&portdev->config_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
remove_port(port);
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
- while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len)))
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
free_buf(buf);
- while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq)))
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
free_buf(buf);
vdev->config->del_vqs(vdev);
@@ -1556,6 +1609,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
+ VIRTIO_CONSOLE_F_MULTIPORT,
};
static struct virtio_driver virtio_console = {
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index b314a999aabe..d7be69f13154 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -154,14 +154,14 @@ static int __init cs5535_mfgpt_init(void)
if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
timer_irq);
- return -EIO;
+ goto err_timer;
}
/* And register it with the kernel */
ret = setup_irq(timer_irq, &mfgptirq);
if (ret) {
printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
- goto err;
+ goto err_irq;
}
/* Set the clock scale and enable the event mode for CMP2 */
@@ -184,8 +184,10 @@ static int __init cs5535_mfgpt_init(void)
return 0;
-err:
+err_irq:
cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
+err_timer:
+ cs5535_mfgpt_free_timer(cs5535_event_clock);
printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
return -EIO;
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 744f748cc84b..f6677cb19789 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -150,13 +150,12 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -279,7 +278,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
delay = 1;
if (!delay)
- pr_warning("sh_cmt: too long delay\n");
+ dev_warn(&p->pdev->dev, "too long delay\n");
} while (delay);
}
@@ -289,7 +288,7 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
unsigned long flags;
if (delta > p->max_match_value)
- pr_warning("sh_cmt: delta out of range\n");
+ dev_warn(&p->pdev->dev, "delta out of range\n");
spin_lock_irqsave(&p->lock, flags);
p->next_match_value = delta;
@@ -451,7 +450,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- pr_info("sh_cmt: %s used as clock source\n", cs->name);
+ dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
}
@@ -497,13 +496,11 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_cmt: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_cmt_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- pr_info("sh_cmt: %s used for oneshot clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_cmt_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
@@ -544,7 +541,7 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
ced->set_next_event = sh_cmt_clock_event_next;
ced->set_mode = sh_cmt_clock_event_mode;
- pr_info("sh_cmt: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
}
@@ -601,22 +598,27 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_cmt: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* request irq using setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "cmt_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
if (resource_size(res) == 6) {
@@ -629,17 +631,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
p->clear_bits = ~0xc000;
}
- ret = sh_cmt_register(p, cfg->name,
+ ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
if (ret) {
- pr_err("sh_cmt: registration failed\n");
+ dev_err(&p->pdev->dev, "registration failed\n");
goto err1;
}
ret = setup_irq(irq, &p->irqaction);
if (ret) {
- pr_err("sh_cmt: failed to request irq %d\n", irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
goto err1;
}
@@ -654,11 +656,10 @@ err0:
static int __devinit sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 5fb78bfd73bb..ef7a5be8a09f 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -119,13 +119,12 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -194,8 +193,7 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_mtu2: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_mtu2_enable(p);
break;
case CLOCK_EVT_MODE_UNUSED:
@@ -222,13 +220,13 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
ced->cpumask = cpumask_of(0);
ced->set_mode = sh_mtu2_clock_event_mode;
- pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
- pr_err("sh_mtu2: failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n",
+ p->irqaction.irq);
return;
}
}
@@ -274,26 +272,32 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_mtu2: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_mtu2_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
- return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
+ return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
+ cfg->clockevent_rating);
err1:
iounmap(p->mapbase);
err0:
@@ -303,11 +307,10 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
static int __devinit sh_mtu2_probe(struct platform_device *pdev)
{
struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index fc9ff1e5b770..8e44e14ec4c2 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -107,13 +107,12 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
static int sh_tmu_enable(struct sh_tmu_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -229,7 +228,7 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
cs->disable = sh_tmu_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- pr_info("sh_tmu: %s used as clock source\n", cs->name);
+ dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
}
@@ -277,13 +276,11 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_tmu: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_tmu_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- pr_info("sh_tmu: %s used for oneshot clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_tmu_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_UNUSED:
@@ -324,13 +321,13 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
- pr_info("sh_tmu: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
- pr_err("sh_tmu: failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n",
+ p->irqaction.irq);
return;
}
}
@@ -379,26 +376,31 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_tmu: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_tmu_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "tmu_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
- return sh_tmu_register(p, cfg->name,
+ return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
err1:
@@ -410,11 +412,10 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
static int __devinit sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 75d293eeb3ee..6ba37c10700d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -68,7 +68,7 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
-int lock_policy_rwsem_##mode \
+static int lock_policy_rwsem_##mode \
(int cpu) \
{ \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
@@ -83,26 +83,22 @@ int lock_policy_rwsem_##mode \
}
lock_policy_rwsem(read, cpu);
-EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
lock_policy_rwsem(write, cpu);
-EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
-void unlock_policy_rwsem_read(int cpu)
+static void unlock_policy_rwsem_read(int cpu)
{
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
-EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
-void unlock_policy_rwsem_write(int cpu)
+static void unlock_policy_rwsem_write(int cpu)
{
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
-EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
/* internal prototypes */
@@ -662,32 +658,20 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
-#define define_one_ro(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-#define define_one_ro0400(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0400, show_##_name, NULL)
-
-#define define_one_rw(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_ro0400(cpuinfo_cur_freq);
-define_one_ro(cpuinfo_min_freq);
-define_one_ro(cpuinfo_max_freq);
-define_one_ro(cpuinfo_transition_latency);
-define_one_ro(scaling_available_governors);
-define_one_ro(scaling_driver);
-define_one_ro(scaling_cur_freq);
-define_one_ro(bios_limit);
-define_one_ro(related_cpus);
-define_one_ro(affected_cpus);
-define_one_rw(scaling_min_freq);
-define_one_rw(scaling_max_freq);
-define_one_rw(scaling_governor);
-define_one_rw(scaling_setspeed);
+cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_min_freq);
+cpufreq_freq_attr_ro(cpuinfo_max_freq);
+cpufreq_freq_attr_ro(cpuinfo_transition_latency);
+cpufreq_freq_attr_ro(scaling_available_governors);
+cpufreq_freq_attr_ro(scaling_driver);
+cpufreq_freq_attr_ro(scaling_cur_freq);
+cpufreq_freq_attr_ro(bios_limit);
+cpufreq_freq_attr_ro(related_cpus);
+cpufreq_freq_attr_ro(affected_cpus);
+cpufreq_freq_attr_rw(scaling_min_freq);
+cpufreq_freq_attr_rw(scaling_max_freq);
+cpufreq_freq_attr_rw(scaling_governor);
+cpufreq_freq_attr_rw(scaling_setspeed);
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 3a147874a465..526bfbf69611 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -178,12 +178,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
return sprintf(buf, "%u\n", min_sampling_rate);
}
-#define define_one_ro(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(sampling_rate_max);
-define_one_ro(sampling_rate_min);
+define_one_global_ro(sampling_rate_max);
+define_one_global_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
#define show_one(file_name, object) \
@@ -221,12 +217,8 @@ show_one_old(freq_step);
show_one_old(sampling_rate_min);
show_one_old(sampling_rate_max);
-#define define_one_ro_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0444, show_##_name##_old, NULL)
-
-define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
-define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
+cpufreq_freq_attr_ro_old(sampling_rate_min);
+cpufreq_freq_attr_ro_old(sampling_rate_max);
/*** delete after deprecation time ***/
@@ -364,16 +356,12 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
return count;
}
-#define define_one_rw(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_rw(sampling_rate);
-define_one_rw(sampling_down_factor);
-define_one_rw(up_threshold);
-define_one_rw(down_threshold);
-define_one_rw(ignore_nice_load);
-define_one_rw(freq_step);
+define_one_global_rw(sampling_rate);
+define_one_global_rw(sampling_down_factor);
+define_one_global_rw(up_threshold);
+define_one_global_rw(down_threshold);
+define_one_global_rw(ignore_nice_load);
+define_one_global_rw(freq_step);
static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -409,16 +397,12 @@ write_one_old(down_threshold);
write_one_old(ignore_nice_load);
write_one_old(freq_step);
-#define define_one_rw_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
-
-define_one_rw_old(sampling_rate_old, sampling_rate);
-define_one_rw_old(sampling_down_factor_old, sampling_down_factor);
-define_one_rw_old(up_threshold_old, up_threshold);
-define_one_rw_old(down_threshold_old, down_threshold);
-define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
-define_one_rw_old(freq_step_old, freq_step);
+cpufreq_freq_attr_rw_old(sampling_rate);
+cpufreq_freq_attr_rw_old(sampling_down_factor);
+cpufreq_freq_attr_rw_old(up_threshold);
+cpufreq_freq_attr_rw_old(down_threshold);
+cpufreq_freq_attr_rw_old(ignore_nice_load);
+cpufreq_freq_attr_rw_old(freq_step);
static struct attribute *dbs_attributes_old[] = {
&sampling_rate_max_old.attr,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bd444dc93cf2..7b5093664e49 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,6 +73,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_iowait;
cputime64_t prev_cpu_wall;
cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
@@ -108,6 +109,7 @@ static struct dbs_tuners {
unsigned int down_differential;
unsigned int ignore_nice;
unsigned int powersave_bias;
+ unsigned int io_is_busy;
} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -148,6 +150,16 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
return idle_time;
}
+static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
+{
+ u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
+
+ if (iowait_time == -1ULL)
+ return 0;
+
+ return iowait_time;
+}
+
/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_jiffies,
@@ -234,12 +246,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
return sprintf(buf, "%u\n", min_sampling_rate);
}
-#define define_one_ro(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(sampling_rate_max);
-define_one_ro(sampling_rate_min);
+define_one_global_ro(sampling_rate_max);
+define_one_global_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object) \
@@ -249,6 +257,7 @@ static ssize_t show_##file_name \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
show_one(sampling_rate, sampling_rate);
+show_one(io_is_busy, io_is_busy);
show_one(up_threshold, up_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
@@ -274,12 +283,8 @@ show_one_old(powersave_bias);
show_one_old(sampling_rate_min);
show_one_old(sampling_rate_max);
-#define define_one_ro_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0444, show_##_name##_old, NULL)
-
-define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
-define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
+cpufreq_freq_attr_ro_old(sampling_rate_min);
+cpufreq_freq_attr_ro_old(sampling_rate_max);
/*** delete after deprecation time ***/
@@ -299,6 +304,23 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
return count;
}
+static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.io_is_busy = !!input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -376,14 +398,11 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
return count;
}
-#define define_one_rw(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_rw(sampling_rate);
-define_one_rw(up_threshold);
-define_one_rw(ignore_nice_load);
-define_one_rw(powersave_bias);
+define_one_global_rw(sampling_rate);
+define_one_global_rw(io_is_busy);
+define_one_global_rw(up_threshold);
+define_one_global_rw(ignore_nice_load);
+define_one_global_rw(powersave_bias);
static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -392,6 +411,7 @@ static struct attribute *dbs_attributes[] = {
&up_threshold.attr,
&ignore_nice_load.attr,
&powersave_bias.attr,
+ &io_is_busy.attr,
NULL
};
@@ -415,14 +435,10 @@ write_one_old(up_threshold);
write_one_old(ignore_nice_load);
write_one_old(powersave_bias);
-#define define_one_rw_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
-
-define_one_rw_old(sampling_rate_old, sampling_rate);
-define_one_rw_old(up_threshold_old, up_threshold);
-define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
-define_one_rw_old(powersave_bias_old, powersave_bias);
+cpufreq_freq_attr_rw_old(sampling_rate);
+cpufreq_freq_attr_rw_old(up_threshold);
+cpufreq_freq_attr_rw_old(ignore_nice_load);
+cpufreq_freq_attr_rw_old(powersave_bias);
static struct attribute *dbs_attributes_old[] = {
&sampling_rate_max_old.attr,
@@ -443,6 +459,17 @@ static struct attribute_group dbs_attr_group_old = {
/************************** sysfs end ************************/
+static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
+{
+ if (dbs_tuners_ins.powersave_bias)
+ freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
+ else if (p->cur == p->max)
+ return;
+
+ __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
+ CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
+}
+
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int max_load_freq;
@@ -470,14 +497,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
- cputime64_t cur_wall_time, cur_idle_time;
- unsigned int idle_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ unsigned int idle_time, wall_time, iowait_time;
unsigned int load, load_freq;
int freq_avg;
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+ cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
wall_time = (unsigned int) cputime64_sub(cur_wall_time,
j_dbs_info->prev_cpu_wall);
@@ -487,6 +515,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
+ iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
+ j_dbs_info->prev_cpu_iowait);
+ j_dbs_info->prev_cpu_iowait = cur_iowait_time;
+
if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice;
unsigned long cur_nice_jiffies;
@@ -504,6 +536,16 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
+ /*
+ * For the purpose of ondemand, waiting for disk IO is an
+ * indication that you're performance critical, and not that
+ * the system is actually idle. So subtract the iowait time
+ * from the cpu idle time.
+ */
+
+ if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
+ idle_time -= iowait_time;
+
if (unlikely(!wall_time || wall_time < idle_time))
continue;
@@ -520,19 +562,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
/* Check for frequency increase */
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
- /* if we are already at full speed then break out early */
- if (!dbs_tuners_ins.powersave_bias) {
- if (policy->cur == policy->max)
- return;
-
- __cpufreq_driver_target(policy, policy->max,
- CPUFREQ_RELATION_H);
- } else {
- int freq = powersave_bias_target(policy, policy->max,
- CPUFREQ_RELATION_H);
- __cpufreq_driver_target(policy, freq,
- CPUFREQ_RELATION_L);
- }
+ dbs_freq_increase(policy, policy->max);
return;
}
@@ -579,7 +609,9 @@ static void do_dbs_timer(struct work_struct *work)
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
- delay -= jiffies % delay;
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
+
mutex_lock(&dbs_info->timer_mutex);
/* Common NORMAL_SAMPLE setup */
@@ -604,7 +636,9 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
{
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
- delay -= jiffies % delay;
+
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
@@ -617,6 +651,29 @@ static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
cancel_delayed_work_sync(&dbs_info->work);
}
+/*
+ * Not all CPUs want IO time to be accounted as busy; this dependson how
+ * efficient idling at a higher frequency/voltage is.
+ * Pavel Machek says this is not so for various generations of AMD and old
+ * Intel systems.
+ * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Because of this, whitelist specific known (series) of CPUs by default, and
+ * leave all others up to the user.
+ */
+static int should_io_be_busy(void)
+{
+#if defined(CONFIG_X86)
+ /*
+ * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model >= 15)
+ return 1;
+#endif
+ return 0;
+}
+
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -679,6 +736,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_tuners_ins.sampling_rate =
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
+ dbs_tuners_ins.io_is_busy = should_io_be_busy();
}
mutex_unlock(&dbs_mutex);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 1c1ceb4f218f..12c98900dcf8 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,7 +67,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
- int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f8e57c6303f2..b81ad9c731ae 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -182,7 +182,7 @@ static u64 div_round64(u64 dividend, u32 divisor)
static int menu_select(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
- int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
int multiplier;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b08403d7d1ca..9073aa05123e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -222,4 +222,13 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
+config CRYPTO_DEV_OMAP_SHAM
+ tristate "Support for OMAP SHA1/MD5 hw accelerator"
+ depends on ARCH_OMAP2 || ARCH_OMAP3
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ help
+ OMAP processors have SHA1/MD5 hw accelerator. Select this if you
+ want to use the OMAP module for SHA1/MD5 algorithms.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6ffcb3f7f942..c9494e163402 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index c7a5a43ba691..09389dd2f96b 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -15,14 +15,14 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
-#include <asm/io.h>
-#include <asm/delay.h>
+#include <linux/io.h>
+#include <linux/delay.h>
#include "geode-aes.h"
/* Static structures */
-static void __iomem * _iobase;
+static void __iomem *_iobase;
static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
@@ -30,7 +30,7 @@ static inline void
_writefield(u32 offset, void *value)
{
int i;
- for(i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
}
@@ -39,7 +39,7 @@ static inline void
_readfield(u32 offset, void *value)
{
int i;
- for(i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++)
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
}
@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags)
do {
status = ioread32(_iobase + AES_INTR_REG);
cpu_relax();
- } while(!(status & AES_INTRA_PENDING) && --counter);
+ } while (!(status & AES_INTRA_PENDING) && --counter);
/* Clear the event */
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
@@ -518,11 +518,12 @@ static int __devinit
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
-
- if ((ret = pci_enable_device(dev)))
+ ret = pci_enable_device(dev);
+ if (ret)
return ret;
- if ((ret = pci_request_regions(dev, "geode-aes")))
+ ret = pci_request_regions(dev, "geode-aes");
+ if (ret)
goto eenable;
_iobase = pci_iomap(dev, 0, 0);
@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
- if ((ret = crypto_register_alg(&geode_alg)))
+ ret = crypto_register_alg(&geode_alg);
+ if (ret)
goto eiomap;
- if ((ret = crypto_register_alg(&geode_ecb_alg)))
+ ret = crypto_register_alg(&geode_ecb_alg);
+ if (ret)
goto ealg;
- if ((ret = crypto_register_alg(&geode_cbc_alg)))
+ ret = crypto_register_alg(&geode_cbc_alg);
+ if (ret)
goto eecb;
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 6f29012bcc43..18a436cafc10 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -15,8 +15,14 @@
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
#include "mv_cesa.h"
+
+#define MV_CESA "MV-CESA:"
+#define MAX_HW_HASH_SIZE 0xFFFF
+
/*
* STM:
* /---------------------------------------\
@@ -39,10 +45,12 @@ enum engine_status {
* @dst_sg_it: sg iterator for dst
* @sg_src_left: bytes left in src to process (scatter list)
* @src_start: offset to add to src start position (scatter list)
- * @crypt_len: length of current crypt process
+ * @crypt_len: length of current hw crypt/hash process
+ * @hw_nbytes: total bytes to process in hw for this request
+ * @copy_back: whether to copy data back (crypt) or not (hash)
* @sg_dst_left: bytes left dst to process in this scatter list
* @dst_start: offset to add to dst start position (scatter list)
- * @total_req_bytes: total number of bytes processed (request).
+ * @hw_processed_bytes: number of bytes processed by hw (request).
*
* sg helper are used to iterate over the scatterlist. Since the size of the
* SRAM may be less than the scatter size, this struct struct is used to keep
@@ -51,15 +59,19 @@ enum engine_status {
struct req_progress {
struct sg_mapping_iter src_sg_it;
struct sg_mapping_iter dst_sg_it;
+ void (*complete) (void);
+ void (*process) (int is_first);
/* src mostly */
int sg_src_left;
int src_start;
int crypt_len;
+ int hw_nbytes;
/* dst mostly */
+ int copy_back;
int sg_dst_left;
int dst_start;
- int total_req_bytes;
+ int hw_processed_bytes;
};
struct crypto_priv {
@@ -72,10 +84,12 @@ struct crypto_priv {
spinlock_t lock;
struct crypto_queue queue;
enum engine_status eng_st;
- struct ablkcipher_request *cur_req;
+ struct crypto_async_request *cur_req;
struct req_progress p;
int max_req_size;
int sram_size;
+ int has_sha1;
+ int has_hmac_sha1;
};
static struct crypto_priv *cpg;
@@ -97,6 +111,31 @@ struct mv_req_ctx {
int decrypt;
};
+enum hash_op {
+ COP_SHA1,
+ COP_HMAC_SHA1
+};
+
+struct mv_tfm_hash_ctx {
+ struct crypto_shash *fallback;
+ struct crypto_shash *base_hash;
+ u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
+ int count_add;
+ enum hash_op op;
+};
+
+struct mv_req_hash_ctx {
+ u64 count;
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u8 buffer[SHA1_BLOCK_SIZE];
+ int first_hash; /* marks that we don't have previous state */
+ int last_chunk; /* marks that this is the 'final' request */
+ int extra_bytes; /* unprocessed bytes in buffer */
+ enum hash_op op;
+ int count_add;
+ struct scatterlist dummysg;
+};
+
static void compute_aes_dec_key(struct mv_ctx *ctx)
{
struct crypto_aes_ctx gen_aes_key;
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static void setup_data_in(struct ablkcipher_request *req)
+static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
{
int ret;
- void *buf;
+ void *sbuf;
+ int copied = 0;
- if (!cpg->p.sg_src_left) {
- ret = sg_miter_next(&cpg->p.src_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_src_left = cpg->p.src_sg_it.length;
- cpg->p.src_start = 0;
- }
-
- cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
-
- buf = cpg->p.src_sg_it.addr;
- buf += cpg->p.src_start;
+ while (1) {
+ if (!p->sg_src_left) {
+ ret = sg_miter_next(&p->src_sg_it);
+ BUG_ON(!ret);
+ p->sg_src_left = p->src_sg_it.length;
+ p->src_start = 0;
+ }
- memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+ sbuf = p->src_sg_it.addr + p->src_start;
+
+ if (p->sg_src_left <= len - copied) {
+ memcpy(dbuf + copied, sbuf, p->sg_src_left);
+ copied += p->sg_src_left;
+ p->sg_src_left = 0;
+ if (copied >= len)
+ break;
+ } else {
+ int copy_len = len - copied;
+ memcpy(dbuf + copied, sbuf, copy_len);
+ p->src_start += copy_len;
+ p->sg_src_left -= copy_len;
+ break;
+ }
+ }
+}
- cpg->p.sg_src_left -= cpg->p.crypt_len;
- cpg->p.src_start += cpg->p.crypt_len;
+static void setup_data_in(void)
+{
+ struct req_progress *p = &cpg->p;
+ int data_in_sram =
+ min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+ copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
+ data_in_sram - p->crypt_len);
+ p->crypt_len = data_in_sram;
}
static void mv_process_current_q(int first_block)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
struct sec_accel_config op;
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block)
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
break;
case COP_AES_CBC:
+ default:
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block)
ENC_P_DST(SRAM_DATA_OUT_START);
op.enc_key_p = SRAM_DATA_KEY_P;
- setup_data_in(req);
+ setup_data_in();
op.enc_len = cpg->p.crypt_len;
memcpy(cpg->sram + SRAM_CONFIG, &op,
sizeof(struct sec_accel_config));
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block)
static void mv_crypto_algo_completion(void)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+
if (req_ctx->op != COP_AES_CBC)
return ;
memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
}
+static void mv_process_hash_current(int first_block)
+{
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+ struct req_progress *p = &cpg->p;
+ struct sec_accel_config op = { 0 };
+ int is_last;
+
+ switch (req_ctx->op) {
+ case COP_SHA1:
+ default:
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
+ break;
+ case COP_HMAC_SHA1:
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+ break;
+ }
+
+ op.mac_src_p =
+ MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
+ req_ctx->
+ count);
+
+ setup_data_in();
+
+ op.mac_digest =
+ MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
+ op.mac_iv =
+ MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
+ MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
+
+ is_last = req_ctx->last_chunk
+ && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
+ && (req_ctx->count <= MAX_HW_HASH_SIZE);
+ if (req_ctx->first_hash) {
+ if (is_last)
+ op.config |= CFG_NOT_FRAG;
+ else
+ op.config |= CFG_FIRST_FRAG;
+
+ req_ctx->first_hash = 0;
+ } else {
+ if (is_last)
+ op.config |= CFG_LAST_FRAG;
+ else
+ op.config |= CFG_MID_FRAG;
+ }
+
+ memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
+ struct shash_desc *desc)
+{
+ int i;
+ struct sha1_state shash_state;
+
+ shash_state.count = ctx->count + ctx->count_add;
+ for (i = 0; i < 5; i++)
+ shash_state.state[i] = ctx->state[i];
+ memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
+ return crypto_shash_import(desc, &shash_state);
+}
+
+static int mv_hash_final_fallback(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
+ } desc;
+ int rc;
+
+ desc.shash.tfm = tfm_ctx->fallback;
+ desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ if (unlikely(req_ctx->first_hash)) {
+ crypto_shash_init(&desc.shash);
+ crypto_shash_update(&desc.shash, req_ctx->buffer,
+ req_ctx->extra_bytes);
+ } else {
+ /* only SHA1 for now....
+ */
+ rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
+ if (rc)
+ goto out;
+ }
+ rc = crypto_shash_final(&desc.shash, req->result);
+out:
+ return rc;
+}
+
+static void mv_hash_algo_completion(void)
+{
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+
+ if (ctx->extra_bytes)
+ copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
+ sg_miter_stop(&cpg->p.src_sg_it);
+
+ ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+ ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+ ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+ ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+ ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+
+ if (likely(ctx->last_chunk)) {
+ if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
+ memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm
+ (req)));
+ } else
+ mv_hash_final_fallback(req);
+ }
+}
+
static void dequeue_complete_req(void)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct crypto_async_request *req = cpg->cur_req;
void *buf;
int ret;
+ cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+ if (cpg->p.copy_back) {
+ int need_copy_len = cpg->p.crypt_len;
+ int sram_offset = 0;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
- cpg->p.total_req_bytes += cpg->p.crypt_len;
- do {
- int dst_copy;
-
- if (!cpg->p.sg_dst_left) {
- ret = sg_miter_next(&cpg->p.dst_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
- cpg->p.dst_start = 0;
- }
-
- buf = cpg->p.dst_sg_it.addr;
- buf += cpg->p.dst_start;
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
- dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+ dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
- memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+ memcpy(buf,
+ cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+ dst_copy);
+ sram_offset += dst_copy;
+ cpg->p.sg_dst_left -= dst_copy;
+ need_copy_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (need_copy_len > 0);
+ }
- cpg->p.sg_dst_left -= dst_copy;
- cpg->p.crypt_len -= dst_copy;
- cpg->p.dst_start += dst_copy;
- } while (cpg->p.crypt_len > 0);
+ cpg->p.crypt_len = 0;
BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
- if (cpg->p.total_req_bytes < req->nbytes) {
+ if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
/* process next scatter list entry */
cpg->eng_st = ENGINE_BUSY;
- mv_process_current_q(0);
+ cpg->p.process(0);
} else {
- sg_miter_stop(&cpg->p.src_sg_it);
- sg_miter_stop(&cpg->p.dst_sg_it);
- mv_crypto_algo_completion();
+ cpg->p.complete();
cpg->eng_st = ENGINE_IDLE;
- req->base.complete(&req->base, 0);
+ local_bh_disable();
+ req->complete(req, 0);
+ local_bh_enable();
}
}
static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
{
int i = 0;
-
- do {
- total_bytes -= sl[i].length;
- i++;
-
- } while (total_bytes > 0);
+ size_t cur_len;
+
+ while (1) {
+ cur_len = sl[i].length;
+ ++i;
+ if (total_bytes > cur_len)
+ total_bytes -= cur_len;
+ else
+ break;
+ }
return i;
}
-static void mv_enqueue_new_req(struct ablkcipher_request *req)
+static void mv_start_new_crypt_req(struct ablkcipher_request *req)
{
+ struct req_progress *p = &cpg->p;
int num_sgs;
- cpg->cur_req = req;
- memset(&cpg->p, 0, sizeof(struct req_progress));
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ p->hw_nbytes = req->nbytes;
+ p->complete = mv_crypto_algo_completion;
+ p->process = mv_process_current_q;
+ p->copy_back = 1;
num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
num_sgs = count_sgs(req->dst, req->nbytes);
- sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+
mv_process_current_q(1);
}
+static void mv_start_new_hash_req(struct ahash_request *req)
+{
+ struct req_progress *p = &cpg->p;
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ int num_sgs, hw_bytes, old_extra_bytes, rc;
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ hw_bytes = req->nbytes + ctx->extra_bytes;
+ old_extra_bytes = ctx->extra_bytes;
+
+ if (unlikely(ctx->extra_bytes)) {
+ memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+ ctx->extra_bytes);
+ p->crypt_len = ctx->extra_bytes;
+ }
+
+ memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
+
+ if (unlikely(!ctx->first_hash)) {
+ writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+ writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+ writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+ writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+ writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ }
+
+ ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
+ if (ctx->extra_bytes != 0
+ && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
+ hw_bytes -= ctx->extra_bytes;
+ else
+ ctx->extra_bytes = 0;
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ if (hw_bytes) {
+ p->hw_nbytes = hw_bytes;
+ p->complete = mv_hash_algo_completion;
+ p->process = mv_process_hash_current;
+
+ mv_process_hash_current(1);
+ } else {
+ copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
+ ctx->extra_bytes - old_extra_bytes);
+ sg_miter_stop(&p->src_sg_it);
+ if (ctx->last_chunk)
+ rc = mv_hash_final_fallback(req);
+ else
+ rc = 0;
+ cpg->eng_st = ENGINE_IDLE;
+ local_bh_disable();
+ req->base.complete(&req->base, rc);
+ local_bh_enable();
+ }
+}
+
static int queue_manag(void *data)
{
cpg->eng_st = ENGINE_IDLE;
do {
- struct ablkcipher_request *req;
struct crypto_async_request *async_req = NULL;
struct crypto_async_request *backlog;
@@ -338,9 +600,18 @@ static int queue_manag(void *data)
}
if (async_req) {
- req = container_of(async_req,
- struct ablkcipher_request, base);
- mv_enqueue_new_req(req);
+ if (async_req->tfm->__crt_alg->cra_type !=
+ &crypto_ahash_type) {
+ struct ablkcipher_request *req =
+ container_of(async_req,
+ struct ablkcipher_request,
+ base);
+ mv_start_new_crypt_req(req);
+ } else {
+ struct ahash_request *req =
+ ahash_request_cast(async_req);
+ mv_start_new_hash_req(req);
+ }
async_req = NULL;
}
@@ -350,13 +621,13 @@ static int queue_manag(void *data)
return 0;
}
-static int mv_handle_req(struct ablkcipher_request *req)
+static int mv_handle_req(struct crypto_async_request *req)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cpg->lock, flags);
- ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ ret = crypto_enqueue_request(&cpg->queue, req);
spin_unlock_irqrestore(&cpg->lock, flags);
wake_up_process(cpg->queue_th);
return ret;
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
req_ctx->op = COP_AES_ECB;
req_ctx->decrypt = 0;
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
req_ctx->op = COP_AES_CBC;
req_ctx->decrypt = 0;
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_cra_init(struct crypto_tfm *tfm)
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
+ int is_last, unsigned int req_len,
+ int count_add)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->op = op;
+ ctx->count = req_len;
+ ctx->first_hash = 1;
+ ctx->last_chunk = is_last;
+ ctx->count_add = count_add;
+}
+
+static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
+ unsigned req_len)
+{
+ ctx->last_chunk = is_last;
+ ctx->count += req_len;
+}
+
+static int mv_hash_init(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
+ tfm_ctx->count_add);
+ return 0;
+}
+
+static int mv_hash_update(struct ahash_request *req)
+{
+ if (!req->nbytes)
+ return 0;
+
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_final(struct ahash_request *req)
+{
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ /* dummy buffer of 4 bytes */
+ sg_init_one(&ctx->dummysg, ctx->buffer, 4);
+ /* I think I'm allowed to do that... */
+ ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+ mv_update_hash_req_ctx(ctx, 1, 0);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_finup(struct ahash_request *req)
+{
+ if (!req->nbytes)
+ return mv_hash_final(req);
+
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_digest(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
+ req->nbytes, tfm_ctx->count_add);
+ return mv_handle_req(&req->base);
+}
+
+static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
+ const void *ostate)
+{
+ const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
+ int i;
+ for (i = 0; i < 5; i++) {
+ ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
+ ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
+ }
+}
+
+static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
+ unsigned int keylen)
+{
+ int rc;
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+ int bs, ds, ss;
+
+ if (!ctx->base_hash)
+ return 0;
+
+ rc = crypto_shash_setkey(ctx->fallback, key, keylen);
+ if (rc)
+ return rc;
+
+ /* Can't see a way to extract the ipad/opad from the fallback tfm
+ so I'm basically copying code from the hmac module */
+ bs = crypto_shash_blocksize(ctx->base_hash);
+ ds = crypto_shash_digestsize(ctx->base_hash);
+ ss = crypto_shash_statesize(ctx->base_hash);
+
+ {
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(ctx->base_hash)];
+ } desc;
+ unsigned int i;
+ char ipad[ss];
+ char opad[ss];
+
+ desc.shash.tfm = ctx->base_hash;
+ desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ if (keylen > bs) {
+ int err;
+
+ err =
+ crypto_shash_digest(&desc.shash, key, keylen, ipad);
+ if (err)
+ return err;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, key, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ rc = crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, ipad, bs) ? :
+ crypto_shash_export(&desc.shash, ipad) ? :
+ crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, opad, bs) ? :
+ crypto_shash_export(&desc.shash, opad);
+
+ if (rc == 0)
+ mv_hash_init_ivs(ctx, ipad, opad);
+
+ return rc;
+ }
+}
+
+static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
+ enum hash_op op, int count_add)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *fallback_tfm = NULL;
+ struct crypto_shash *base_hash = NULL;
+ int err = -ENOMEM;
+
+ ctx->op = op;
+ ctx->count_add = count_add;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ printk(KERN_WARNING MV_CESA
+ "Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
+ }
+ ctx->fallback = fallback_tfm;
+
+ if (base_hash_name) {
+ /* Allocate a hash to compute the ipad/opad of hmac. */
+ base_hash = crypto_alloc_shash(base_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(base_hash)) {
+ printk(KERN_WARNING MV_CESA
+ "Base driver '%s' could not be loaded!\n",
+ base_hash_name);
+ err = PTR_ERR(fallback_tfm);
+ goto err_bad_base;
+ }
+ }
+ ctx->base_hash = base_hash;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct mv_req_hash_ctx) +
+ crypto_shash_descsize(ctx->fallback));
+ return 0;
+err_bad_base:
+ crypto_free_shash(fallback_tfm);
+out:
+ return err;
+}
+
+static void mv_cra_hash_exit(struct crypto_tfm *tfm)
+{
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(ctx->fallback);
+ if (ctx->base_hash)
+ crypto_free_shash(ctx->base_hash);
+}
+
+static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
+{
+ return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
+}
+
+static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
+{
+ return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
+}
+
irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = {
},
};
+struct ahash_alg mv_sha1_alg = {
+ .init = mv_hash_init,
+ .update = mv_hash_update,
+ .final = mv_hash_final,
+ .finup = mv_hash_finup,
+ .digest = mv_hash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "mv-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+ .cra_init = mv_cra_hash_sha1_init,
+ .cra_exit = mv_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+struct ahash_alg mv_hmac_sha1_alg = {
+ .init = mv_hash_init,
+ .update = mv_hash_update,
+ .final = mv_hash_final,
+ .finup = mv_hash_finup,
+ .digest = mv_hash_digest,
+ .setkey = mv_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "mv-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+ .cra_init = mv_cra_hash_hmac_sha1_init,
+ .cra_exit = mv_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
static int mv_probe(struct platform_device *pdev)
{
struct crypto_priv *cp;
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev)
int ret;
if (cpg) {
- printk(KERN_ERR "Second crypto dev?\n");
+ printk(KERN_ERR MV_CESA "Second crypto dev?\n");
return -EEXIST;
}
@@ -546,6 +1073,21 @@ static int mv_probe(struct platform_device *pdev)
ret = crypto_register_alg(&mv_aes_alg_cbc);
if (ret)
goto err_unreg_ecb;
+
+ ret = crypto_register_ahash(&mv_sha1_alg);
+ if (ret == 0)
+ cpg->has_sha1 = 1;
+ else
+ printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
+
+ ret = crypto_register_ahash(&mv_hmac_sha1_alg);
+ if (ret == 0) {
+ cpg->has_hmac_sha1 = 1;
+ } else {
+ printk(KERN_WARNING MV_CESA
+ "Could not register hmac-sha1 driver\n");
+ }
+
return 0;
err_unreg_ecb:
crypto_unregister_alg(&mv_aes_alg_ecb);
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev)
crypto_unregister_alg(&mv_aes_alg_ecb);
crypto_unregister_alg(&mv_aes_alg_cbc);
+ if (cp->has_sha1)
+ crypto_unregister_ahash(&mv_sha1_alg);
+ if (cp->has_hmac_sha1)
+ crypto_unregister_ahash(&mv_hmac_sha1_alg);
kthread_stop(cp->queue_th);
free_irq(cp->irq, cp);
memset(cp->sram, 0, cp->sram_size);
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index c3e25d3bb171..08fcb1116d90 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,6 +1,10 @@
#ifndef __MV_CRYPTO_H__
#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DIGEST_INITIAL_VAL_B 0xdd04
+#define DIGEST_INITIAL_VAL_C 0xdd08
+#define DIGEST_INITIAL_VAL_D 0xdd0c
+#define DIGEST_INITIAL_VAL_E 0xdd10
#define DES_CMD_REG 0xdd58
#define SEC_ACCEL_CMD 0xde00
@@ -70,6 +74,10 @@ struct sec_accel_config {
#define CFG_AES_LEN_128 (0 << 24)
#define CFG_AES_LEN_192 (1 << 24)
#define CFG_AES_LEN_256 (2 << 24)
+#define CFG_NOT_FRAG (0 << 30)
+#define CFG_FIRST_FRAG (1 << 30)
+#define CFG_LAST_FRAG (2 << 30)
+#define CFG_MID_FRAG (3 << 30)
u32 enc_p;
#define ENC_P_SRC(x) (x)
@@ -90,7 +98,11 @@ struct sec_accel_config {
#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
u32 mac_digest;
+#define MAC_DIGEST_P(x) (x)
+#define MAC_FRAG_LEN(x) ((x) << 16)
u32 mac_iv;
+#define MAC_INNER_IV_P(x) (x)
+#define MAC_OUTER_IV_P(x) ((x) << 16)
}__attribute__ ((packed));
/*
* /-----------\ 0
@@ -101,19 +113,37 @@ struct sec_accel_config {
* | IV IN | 4 * 4
* |-----------| 0x40 (inplace)
* | IV BUF | 4 * 4
- * |-----------| 0x50
+ * |-----------| 0x80
* | DATA IN | 16 * x (max ->max_req_size)
- * |-----------| 0x50 (inplace operation)
+ * |-----------| 0x80 (inplace operation)
* | DATA OUT | 16 * x (max ->max_req_size)
* \-----------/ SRAM size
*/
+
+ /* Hashing memory map:
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | Inner IV | 5 * 4
+ * |-----------| 0x34
+ * | Outer IV | 5 * 4
+ * |-----------| 0x48
+ * | Output BUF| 5 * 4
+ * |-----------| 0x80
+ * | DATA IN | 64 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
#define SRAM_CONFIG 0x00
#define SRAM_DATA_KEY_P 0x20
#define SRAM_DATA_IV 0x40
#define SRAM_DATA_IV_BUF 0x40
-#define SRAM_DATA_IN_START 0x50
-#define SRAM_DATA_OUT_START 0x50
+#define SRAM_DATA_IN_START 0x80
+#define SRAM_DATA_OUT_START 0x80
+
+#define SRAM_HMAC_IV_IN 0x20
+#define SRAM_HMAC_IV_OUT 0x34
+#define SRAM_DIGEST_BUF 0x48
-#define SRAM_CFG_SPACE 0x50
+#define SRAM_CFG_SPACE 0x80
#endif
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
new file mode 100644
index 000000000000..8b034337793f
--- /dev/null
+++ b/drivers/crypto/omap-sham.c
@@ -0,0 +1,1259 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP SHA1/MD5 HW acceleration.
+ *
+ * Copyright (c) 2010 Nokia Corporation
+ * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from old omap-sha1-md5.c driver.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+
+#include <plat/cpu.h>
+#include <plat/dma.h>
+#include <mach/irqs.h>
+
+#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
+#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
+
+#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
+#define MD5_DIGEST_SIZE 16
+
+#define SHA_REG_DIGCNT 0x14
+
+#define SHA_REG_CTRL 0x18
+#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
+#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
+#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
+#define SHA_REG_CTRL_ALGO (1 << 2)
+#define SHA_REG_CTRL_INPUT_READY (1 << 1)
+#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
+
+#define SHA_REG_REV 0x5C
+#define SHA_REG_REV_MAJOR 0xF0
+#define SHA_REG_REV_MINOR 0x0F
+
+#define SHA_REG_MASK 0x60
+#define SHA_REG_MASK_DMA_EN (1 << 3)
+#define SHA_REG_MASK_IT_EN (1 << 2)
+#define SHA_REG_MASK_SOFTRESET (1 << 1)
+#define SHA_REG_AUTOIDLE (1 << 0)
+
+#define SHA_REG_SYSSTATUS 0x64
+#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
+
+#define DEFAULT_TIMEOUT_INTERVAL HZ
+
+#define FLAGS_FIRST 0x0001
+#define FLAGS_FINUP 0x0002
+#define FLAGS_FINAL 0x0004
+#define FLAGS_FAST 0x0008
+#define FLAGS_SHA1 0x0010
+#define FLAGS_DMA_ACTIVE 0x0020
+#define FLAGS_OUTPUT_READY 0x0040
+#define FLAGS_CLEAN 0x0080
+#define FLAGS_INIT 0x0100
+#define FLAGS_CPU 0x0200
+#define FLAGS_HMAC 0x0400
+
+/* 3rd byte */
+#define FLAGS_BUSY 16
+
+#define OP_UPDATE 1
+#define OP_FINAL 2
+
+struct omap_sham_dev;
+
+struct omap_sham_reqctx {
+ struct omap_sham_dev *dd;
+ unsigned long flags;
+ unsigned long op;
+
+ size_t digcnt;
+ u8 *buffer;
+ size_t bufcnt;
+ size_t buflen;
+ dma_addr_t dma_addr;
+
+ /* walk state */
+ struct scatterlist *sg;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* total request */
+};
+
+struct omap_sham_hmac_ctx {
+ struct crypto_shash *shash;
+ u8 ipad[SHA1_MD5_BLOCK_SIZE];
+ u8 opad[SHA1_MD5_BLOCK_SIZE];
+};
+
+struct omap_sham_ctx {
+ struct omap_sham_dev *dd;
+
+ unsigned long flags;
+
+ /* fallback stuff */
+ struct crypto_shash *fallback;
+
+ struct omap_sham_hmac_ctx base[0];
+};
+
+#define OMAP_SHAM_QUEUE_LENGTH 1
+
+struct omap_sham_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ struct device *dev;
+ void __iomem *io_base;
+ int irq;
+ struct clk *iclk;
+ spinlock_t lock;
+ int dma;
+ int dma_lch;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+};
+
+struct omap_sham_drv {
+ struct list_head dev_list;
+ spinlock_t lock;
+ unsigned long flags;
+};
+
+static struct omap_sham_drv sham = {
+ .dev_list = LIST_HEAD_INIT(sham.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
+};
+
+static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_base + offset);
+}
+
+static inline void omap_sham_write(struct omap_sham_dev *dd,
+ u32 offset, u32 value)
+{
+ __raw_writel(value, dd->io_base + offset);
+}
+
+static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
+ u32 value, u32 mask)
+{
+ u32 val;
+
+ val = omap_sham_read(dd, address);
+ val &= ~mask;
+ val |= value;
+ omap_sham_write(dd, address, val);
+}
+
+static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
+{
+ unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
+
+ while (!(omap_sham_read(dd, offset) & bit)) {
+ if (time_is_before_jiffies(timeout))
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void omap_sham_copy_hash(struct ahash_request *req, int out)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)req->result;
+ int i;
+
+ if (likely(ctx->flags & FLAGS_SHA1)) {
+ /* SHA1 results are in big endian */
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+ if (out)
+ hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i)));
+ else
+ omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+ cpu_to_be32(hash[i]));
+ } else {
+ /* MD5 results are in little endian */
+ for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
+ if (out)
+ hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i)));
+ else
+ omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+ cpu_to_le32(hash[i]));
+ }
+}
+
+static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+ int final, int dma)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 val = length << 5, mask;
+
+ if (unlikely(!ctx->digcnt)) {
+
+ clk_enable(dd->iclk);
+
+ if (!(dd->flags & FLAGS_INIT)) {
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+ if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+ SHA_REG_SYSSTATUS_RESETDONE))
+ return -ETIMEDOUT;
+
+ dd->flags |= FLAGS_INIT;
+ }
+ } else {
+ omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
+ }
+
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
+ SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+ /*
+ * Setting ALGO_CONST only for the first iteration
+ * and CLOSE_HASH only for the last one.
+ */
+ if (ctx->flags & FLAGS_SHA1)
+ val |= SHA_REG_CTRL_ALGO;
+ if (!ctx->digcnt)
+ val |= SHA_REG_CTRL_ALGO_CONST;
+ if (final)
+ val |= SHA_REG_CTRL_CLOSE_HASH;
+
+ mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
+ SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
+
+ omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
+
+ return 0;
+}
+
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
+ size_t length, int final)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int err, count, len32;
+ const u32 *buffer = (const u32 *)buf;
+
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ err = omap_sham_write_ctrl(dd, length, final, 0);
+ if (err)
+ return err;
+
+ if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
+ return -ETIMEDOUT;
+
+ ctx->digcnt += length;
+
+ if (final)
+ ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ for (count = 0; count < len32; count++)
+ omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+ return -EINPROGRESS;
+}
+
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
+ size_t length, int final)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int err, len32;
+
+ dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ /* flush cache entries related to our page */
+ if (dma_addr == ctx->dma_addr)
+ dma_sync_single_for_device(dd->dev, dma_addr, length,
+ DMA_TO_DEVICE);
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
+ 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC);
+
+ omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_addr, 0, 0);
+
+ err = omap_sham_write_ctrl(dd, length, final, 1);
+ if (err)
+ return err;
+
+ ctx->digcnt += length;
+
+ if (final)
+ ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+ dd->flags |= FLAGS_DMA_ACTIVE;
+
+ omap_start_dma(dd->dma_lch);
+
+ return -EINPROGRESS;
+}
+
+static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
+ const u8 *data, size_t length)
+{
+ size_t count = min(length, ctx->buflen - ctx->bufcnt);
+
+ count = min(count, ctx->total);
+ if (count <= 0)
+ return 0;
+ memcpy(ctx->buffer + ctx->bufcnt, data, count);
+ ctx->bufcnt += count;
+
+ return count;
+}
+
+static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
+{
+ size_t count;
+
+ while (ctx->sg) {
+ count = omap_sham_append_buffer(ctx,
+ sg_virt(ctx->sg) + ctx->offset,
+ ctx->sg->length - ctx->offset);
+ if (!count)
+ break;
+ ctx->offset += count;
+ ctx->total -= count;
+ if (ctx->offset == ctx->sg->length) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ else
+ ctx->total = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int final;
+ size_t count;
+
+ if (!ctx->total)
+ return 0;
+
+ omap_sham_append_sg(ctx);
+
+ final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+ dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
+ ctx->bufcnt, ctx->digcnt, final);
+
+ if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+ }
+
+ return 0;
+}
+
+static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int length;
+
+ ctx->flags |= FLAGS_FAST;
+
+ length = min(ctx->total, sg_dma_len(ctx->sg));
+ ctx->total = length;
+
+ if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ return -EINVAL;
+ }
+
+ ctx->total -= length;
+
+ return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+}
+
+static int omap_sham_update_cpu(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int bufcnt;
+
+ omap_sham_append_sg(ctx);
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+
+ return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ omap_stop_dma(dd->dma_lch);
+ if (ctx->flags & FLAGS_FAST)
+ dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static void omap_sham_cleanup(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (ctx->flags & FLAGS_CLEAN) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return;
+ }
+ ctx->flags |= FLAGS_CLEAN;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (ctx->digcnt)
+ clk_disable(dd->iclk);
+
+ if (ctx->dma_addr)
+ dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+ DMA_TO_DEVICE);
+
+ if (ctx->buffer)
+ free_page((unsigned long)ctx->buffer);
+
+ dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+}
+
+static int omap_sham_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = NULL, *tmp;
+
+ spin_lock_bh(&sham.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &sham.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&sham.lock);
+
+ ctx->dd = dd;
+
+ ctx->flags = 0;
+
+ ctx->flags |= FLAGS_FIRST;
+
+ dev_dbg(dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ ctx->flags |= FLAGS_SHA1;
+
+ ctx->bufcnt = 0;
+ ctx->digcnt = 0;
+
+ ctx->buflen = PAGE_SIZE;
+ ctx->buffer = (void *)__get_free_page(
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ctx->buffer)
+ return -ENOMEM;
+
+ ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+ dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+ free_page((unsigned long)ctx->buffer);
+ return -EINVAL;
+ }
+
+ if (tctx->flags & FLAGS_HMAC) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+ memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
+ ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+ ctx->flags |= FLAGS_HMAC;
+ }
+
+ return 0;
+
+}
+
+static int omap_sham_update_req(struct omap_sham_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err;
+
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+ ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
+
+ if (ctx->flags & FLAGS_CPU)
+ err = omap_sham_update_cpu(dd);
+ else if (ctx->flags & FLAGS_FAST)
+ err = omap_sham_update_dma_fast(dd);
+ else
+ err = omap_sham_update_dma_slow(dd);
+
+ /* wait for dma completion before can take more data */
+ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+
+ return err;
+}
+
+static int omap_sham_final_req(struct omap_sham_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0, use_dma = 1;
+
+ if (ctx->bufcnt <= 64)
+ /* faster to handle last block with cpu */
+ use_dma = 0;
+
+ if (use_dma)
+ err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+ else
+ err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
+
+ ctx->bufcnt = 0;
+
+ if (err != -EINPROGRESS)
+ omap_sham_cleanup(req);
+
+ dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+ return err;
+}
+
+static int omap_sham_finish_req_hmac(struct ahash_request *req)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int ds = crypto_shash_digestsize(bctx->shash);
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(bctx->shash)];
+ } desc;
+
+ desc.shash.tfm = bctx->shash;
+ desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+
+ return crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
+ crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+}
+
+static void omap_sham_finish_req(struct ahash_request *req, int err)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!err) {
+ omap_sham_copy_hash(ctx->dd->req, 1);
+ if (ctx->flags & FLAGS_HMAC)
+ err = omap_sham_finish_req_hmac(req);
+ }
+
+ if (ctx->flags & FLAGS_FINAL)
+ omap_sham_cleanup(req);
+
+ clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+}
+
+static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct omap_sham_reqctx *ctx;
+ struct ahash_request *req, *prev_req;
+ unsigned long flags;
+ int err = 0;
+
+ if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
+ return 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &dd->flags);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return 0;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+
+ prev_req = dd->req;
+ dd->req = req;
+
+ ctx = ahash_request_ctx(req);
+
+ dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ ctx->op, req->nbytes);
+
+ if (req != prev_req && ctx->digcnt)
+ /* request has changed - restore hash */
+ omap_sham_copy_hash(req, 0);
+
+ if (ctx->op == OP_UPDATE) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
+ /* no final() after finup() */
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ err = omap_sham_final_req(dd);
+ }
+
+ if (err != -EINPROGRESS) {
+ /* done_task will not finish it, so do it here */
+ omap_sham_finish_req(req, err);
+ tasklet_schedule(&dd->queue_task);
+ }
+
+ dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+ return err;
+}
+
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_dev *dd = tctx->dd;
+ unsigned long flags;
+ int err;
+
+ ctx->op = op;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ err = ahash_enqueue_request(&dd->queue, req);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ omap_sham_handle_queue(dd);
+
+ return err;
+}
+
+static int omap_sham_update(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ ctx->total = req->nbytes;
+ ctx->sg = req->src;
+ ctx->offset = 0;
+
+ if (ctx->flags & FLAGS_FINUP) {
+ if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
+ /*
+ * OMAP HW accel works only with buffers >= 9
+ * will switch to bypass in final()
+ * final has the same request and data
+ */
+ omap_sham_append_sg(ctx);
+ return 0;
+ } else if (ctx->bufcnt + ctx->total <= 64) {
+ ctx->flags |= FLAGS_CPU;
+ } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
+ /* may be can use faster functions */
+ int aligned = IS_ALIGNED((u32)ctx->sg->offset,
+ sizeof(u32));
+
+ if (aligned && (ctx->flags & FLAGS_FIRST))
+ /* digest: first and final */
+ ctx->flags |= FLAGS_FAST;
+
+ ctx->flags &= ~FLAGS_FIRST;
+ }
+ } else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
+ /* if not finaup -> not fast */
+ omap_sham_append_sg(ctx);
+ return 0;
+ }
+
+ return omap_sham_enqueue(req, OP_UPDATE);
+}
+
+static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(shash)];
+ } desc;
+
+ desc.shash.tfm = shash;
+ desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_digest(&desc.shash, data, len, out);
+}
+
+static int omap_sham_final_shash(struct ahash_request *req)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ return omap_sham_shash_digest(tctx->fallback, req->base.flags,
+ ctx->buffer, ctx->bufcnt, req->result);
+}
+
+static int omap_sham_final(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0;
+
+ ctx->flags |= FLAGS_FINUP;
+
+ /* OMAP HW accel works only with buffers >= 9 */
+ /* HMAC is always >= 9 because of ipad */
+ if ((ctx->digcnt + ctx->bufcnt) < 9)
+ err = omap_sham_final_shash(req);
+ else if (ctx->bufcnt)
+ return omap_sham_enqueue(req, OP_FINAL);
+
+ omap_sham_cleanup(req);
+
+ return err;
+}
+
+static int omap_sham_finup(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->flags |= FLAGS_FINUP;
+
+ err1 = omap_sham_update(req);
+ if (err1 == -EINPROGRESS)
+ return err1;
+ /*
+ * final() has to be always called to cleanup resources
+ * even if udpate() failed, except EINPROGRESS
+ */
+ err2 = omap_sham_final(req);
+
+ return err1 ?: err2;
+}
+
+static int omap_sham_digest(struct ahash_request *req)
+{
+ return omap_sham_init(req) ?: omap_sham_finup(req);
+}
+
+static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int ds = crypto_shash_digestsize(bctx->shash);
+ int err, i;
+ err = crypto_shash_setkey(tctx->fallback, key, keylen);
+ if (err)
+ return err;
+
+ if (keylen > bs) {
+ err = omap_sham_shash_digest(bctx->shash,
+ crypto_shash_get_flags(bctx->shash),
+ key, keylen, bctx->ipad);
+ if (err)
+ return err;
+ keylen = ds;
+ } else {
+ memcpy(bctx->ipad, key, keylen);
+ }
+
+ memset(bctx->ipad + keylen, 0, bs - keylen);
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= 0x36;
+ bctx->opad[i] ^= 0x5c;
+ }
+
+ return err;
+}
+
+static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ /* Allocate a fallback and abort if it failed. */
+ tctx->fallback = crypto_alloc_shash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback)) {
+ pr_err("omap-sham: fallback driver '%s' "
+ "could not be loaded.\n", alg_name);
+ return PTR_ERR(tctx->fallback);
+ }
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct omap_sham_reqctx));
+
+ if (alg_base) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ tctx->flags |= FLAGS_HMAC;
+ bctx->shash = crypto_alloc_shash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(bctx->shash)) {
+ pr_err("omap-sham: base driver '%s' "
+ "could not be loaded.\n", alg_base);
+ crypto_free_shash(tctx->fallback);
+ return PTR_ERR(bctx->shash);
+ }
+
+ }
+
+ return 0;
+}
+
+static int omap_sham_cra_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, NULL);
+}
+
+static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "sha1");
+}
+
+static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "md5");
+}
+
+static void omap_sham_cra_exit(struct crypto_tfm *tfm)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(tctx->fallback);
+ tctx->fallback = NULL;
+
+ if (tctx->flags & FLAGS_HMAC) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ crypto_free_shash(bctx->shash);
+ }
+}
+
+static struct ahash_alg algs[] = {
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "omap-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "omap-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "omap-hmac-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_sha1_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "omap-hmac-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_md5_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+}
+};
+
+static void omap_sham_done_task(unsigned long data)
+{
+ struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int ready = 1;
+
+ if (ctx->flags & FLAGS_OUTPUT_READY) {
+ ctx->flags &= ~FLAGS_OUTPUT_READY;
+ ready = 1;
+ }
+
+ if (dd->flags & FLAGS_DMA_ACTIVE) {
+ dd->flags &= ~FLAGS_DMA_ACTIVE;
+ omap_sham_update_dma_stop(dd);
+ omap_sham_update_dma_slow(dd);
+ }
+
+ if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
+ dev_dbg(dd->dev, "update done\n");
+ /* finish curent request */
+ omap_sham_finish_req(req, 0);
+ /* start new request */
+ omap_sham_handle_queue(dd);
+ }
+}
+
+static void omap_sham_queue_task(unsigned long data)
+{
+ struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+
+ omap_sham_handle_queue(dd);
+}
+
+static irqreturn_t omap_sham_irq(int irq, void *dev_id)
+{
+ struct omap_sham_dev *dd = dev_id;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ if (!ctx) {
+ dev_err(dd->dev, "unknown interrupt.\n");
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(ctx->flags & FLAGS_FINAL))
+ /* final -> allow device to go to power-saving mode */
+ omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
+
+ omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
+ SHA_REG_CTRL_OUTPUT_READY);
+ omap_sham_read(dd, SHA_REG_CTRL);
+
+ ctx->flags |= FLAGS_OUTPUT_READY;
+ tasklet_schedule(&dd->done_task);
+
+ return IRQ_HANDLED;
+}
+
+static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct omap_sham_dev *dd = data;
+
+ if (likely(lch == dd->dma_lch))
+ tasklet_schedule(&dd->done_task);
+}
+
+static int omap_sham_dma_init(struct omap_sham_dev *dd)
+{
+ int err;
+
+ dd->dma_lch = -1;
+
+ err = omap_request_dma(dd->dma, dev_name(dd->dev),
+ omap_sham_dma_callback, dd, &dd->dma_lch);
+ if (err) {
+ dev_err(dd->dev, "Unable to request DMA channel\n");
+ return err;
+ }
+ omap_set_dma_dest_params(dd->dma_lch, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+ omap_set_dma_dest_burst_mode(dd->dma_lch,
+ OMAP_DMA_DATA_BURST_16);
+
+ return 0;
+}
+
+static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
+{
+ if (dd->dma_lch >= 0) {
+ omap_free_dma(dd->dma_lch);
+ dd->dma_lch = -1;
+ }
+}
+
+static int __devinit omap_sham_probe(struct platform_device *pdev)
+{
+ struct omap_sham_dev *dd;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int err, i, j;
+
+ dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ err = -ENOMEM;
+ goto data_err;
+ }
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ INIT_LIST_HEAD(&dd->list);
+ spin_lock_init(&dd->lock);
+ tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+ tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
+ crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
+
+ dd->irq = -1;
+
+ /* Get the base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ dd->phys_base = res->start;
+
+ /* Get the DMA */
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(dev, "no DMA resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ dd->dma = res->start;
+
+ /* Get the IRQ */
+ dd->irq = platform_get_irq(pdev, 0);
+ if (dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = dd->irq;
+ goto res_err;
+ }
+
+ err = request_irq(dd->irq, omap_sham_irq,
+ IRQF_TRIGGER_LOW, dev_name(dev), dd);
+ if (err) {
+ dev_err(dev, "unable to request irq.\n");
+ goto res_err;
+ }
+
+ err = omap_sham_dma_init(dd);
+ if (err)
+ goto dma_err;
+
+ /* Initializing the clock */
+ dd->iclk = clk_get(dev, "ick");
+ if (!dd->iclk) {
+ dev_err(dev, "clock intialization failed.\n");
+ err = -ENODEV;
+ goto clk_err;
+ }
+
+ dd->io_base = ioremap(dd->phys_base, SZ_4K);
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto io_err;
+ }
+
+ clk_enable(dd->iclk);
+ dev_info(dev, "hw accel on OMAP rev %u.%u\n",
+ (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
+ omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
+ clk_disable(dd->iclk);
+
+ spin_lock(&sham.lock);
+ list_add_tail(&dd->list, &sham.dev_list);
+ spin_unlock(&sham.lock);
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ err = crypto_register_ahash(&algs[i]);
+ if (err)
+ goto err_algs;
+ }
+
+ return 0;
+
+err_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&algs[j]);
+ iounmap(dd->io_base);
+io_err:
+ clk_put(dd->iclk);
+clk_err:
+ omap_sham_dma_cleanup(dd);
+dma_err:
+ if (dd->irq >= 0)
+ free_irq(dd->irq, dd);
+res_err:
+ kfree(dd);
+ dd = NULL;
+data_err:
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int __devexit omap_sham_remove(struct platform_device *pdev)
+{
+ static struct omap_sham_dev *dd;
+ int i;
+
+ dd = platform_get_drvdata(pdev);
+ if (!dd)
+ return -ENODEV;
+ spin_lock(&sham.lock);
+ list_del(&dd->list);
+ spin_unlock(&sham.lock);
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_ahash(&algs[i]);
+ tasklet_kill(&dd->done_task);
+ tasklet_kill(&dd->queue_task);
+ iounmap(dd->io_base);
+ clk_put(dd->iclk);
+ omap_sham_dma_cleanup(dd);
+ if (dd->irq >= 0)
+ free_irq(dd->irq, dd);
+ kfree(dd);
+ dd = NULL;
+
+ return 0;
+}
+
+static struct platform_driver omap_sham_driver = {
+ .probe = omap_sham_probe,
+ .remove = omap_sham_remove,
+ .driver = {
+ .name = "omap-sham",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_sham_mod_init(void)
+{
+ pr_info("loading %s driver\n", "omap-sham");
+
+ if (!cpu_class_is_omap2() ||
+ omap_type() != OMAP2_DEVICE_TYPE_SEC) {
+ pr_err("Unsupported cpu\n");
+ return -ENODEV;
+ }
+
+ return platform_driver_register(&omap_sham_driver);
+}
+
+static void __exit omap_sham_mod_exit(void)
+{
+ platform_driver_unregister(&omap_sham_driver);
+}
+
+module_init(omap_sham_mod_init);
+module_exit(omap_sham_mod_exit);
+
+MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dmitry Kasatkin");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c27f80e5d531..dab6f17fbbc7 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -128,7 +128,7 @@ config TXX9_DMAC
config SH_DMAE
tristate "Renesas SuperH DMAC support"
- depends on SUPERH && SH_DMA
+ depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
depends on !SH_DMA_API
select DMA_ENGINE
help
@@ -141,6 +141,13 @@ config COH901318
help
Enable support for ST-Ericsson COH 901 318 DMA.
+config STE_DMA40
+ bool "ST-Ericsson DMA40 support"
+ depends on ARCH_U8500
+ select DMA_ENGINE
+ help
+ Support for ST-Ericsson DMA40 controller
+
config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
@@ -149,6 +156,13 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config TIMB_DMA
+ tristate "Timberdale FPGA DMA support"
+ depends on MFD_TIMBERDALE || HAS_IOMEM
+ select DMA_ENGINE
+ help
+ Enable support for the Timberdale FPGA DMA engine.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 22bba3d5e2b6..20881426c1ac 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -20,3 +20,5 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 278cf5bceef2..6693b130c4ff 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -760,13 +760,17 @@ err_desc_get:
return NULL;
}
-static void atc_terminate_all(struct dma_chan *chan)
+static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -790,32 +794,30 @@ static void atc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
+
+ return 0;
}
/**
- * atc_is_tx_complete - poll for transaction completion
+ * atc_tx_status - poll for transaction completion
* @chan: DMA channel
* @cookie: transaction identifier to check status of
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
+ * @txstate: if not %NULL updated with transaction state
*
- * If @done and @used are passed in, upon return they reflect the driver
+ * If @txstate is passed in, upon return it reflect the driver
* internal state and can be used with dma_async_is_complete() to check
* the status of multiple cookies without re-checking hardware state.
*/
static enum dma_status
-atc_is_tx_complete(struct dma_chan *chan,
+atc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status ret;
- dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
- cookie, done ? *done : 0, used ? *used : 0);
-
spin_lock_bh(&atchan->lock);
last_complete = atchan->completed_cookie;
@@ -833,10 +835,10 @@ atc_is_tx_complete(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock);
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
+ cookie, last_complete ? last_complete : 0,
+ last_used ? last_used : 0);
return ret;
}
@@ -1082,7 +1084,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* set base routines */
atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
- atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
+ atdma->dma_common.device_tx_status = atc_tx_status;
atdma->dma_common.device_issue_pending = atc_issue_pending;
atdma->dma_common.dev = &pdev->dev;
@@ -1092,7 +1094,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
- atdma->dma_common.device_terminate_all = atc_terminate_all;
+ atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 1656fdcdb6c2..4233440741a2 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -37,7 +37,7 @@ struct coh901318_desc {
struct list_head node;
struct scatterlist *sg;
unsigned int sg_len;
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
};
@@ -283,7 +283,7 @@ static int coh901318_start(struct coh901318_chan *cohc)
}
static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
- struct coh901318_lli *data)
+ struct coh901318_lli *lli)
{
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
@@ -292,18 +292,18 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
COH901318_CX_STAT_SPACING*channel) &
COH901318_CX_STAT_ACTIVE);
- writel(data->src_addr,
+ writel(lli->src_addr,
virtbase + COH901318_CX_SRC_ADDR +
COH901318_CX_SRC_ADDR_SPACING * channel);
- writel(data->dst_addr, virtbase +
+ writel(lli->dst_addr, virtbase +
COH901318_CX_DST_ADDR +
COH901318_CX_DST_ADDR_SPACING * channel);
- writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
+ writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR +
COH901318_CX_LNK_ADDR_SPACING * channel);
- writel(data->control, virtbase + COH901318_CX_CTRL +
+ writel(lli->control, virtbase + COH901318_CX_CTRL +
COH901318_CX_CTRL_SPACING * channel);
return 0;
@@ -408,33 +408,107 @@ coh901318_first_queued(struct coh901318_chan *cohc)
return d;
}
+static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
+{
+ struct coh901318_lli *lli = in_lli;
+ u32 bytes = 0;
+
+ while (lli) {
+ bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK;
+ lli = lli->virt_link_addr;
+ }
+ return bytes;
+}
+
/*
- * DMA start/stop controls
+ * Get the number of bytes left to transfer on this channel,
+ * it is unwise to call this before stopping the channel for
+ * absolute measures, but for a rough guess you can still call
+ * it.
*/
-u32 coh901318_get_bytes_left(struct dma_chan *chan)
+static u32 coh901318_get_bytes_left(struct dma_chan *chan)
{
- unsigned long flags;
- u32 ret;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ struct coh901318_desc *cohd;
+ struct list_head *pos;
+ unsigned long flags;
+ u32 left = 0;
+ int i = 0;
spin_lock_irqsave(&cohc->lock, flags);
- /* Read transfer count value */
- ret = readl(cohc->base->virtbase +
- COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
- cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
+ /*
+ * If there are many queued jobs, we iterate and add the
+ * size of them all. We take a special look on the first
+ * job though, since it is probably active.
+ */
+ list_for_each(pos, &cohc->active) {
+ /*
+ * The first job in the list will be working on the
+ * hardware. The job can be stopped but still active,
+ * so that the transfer counter is somewhere inside
+ * the buffer.
+ */
+ cohd = list_entry(pos, struct coh901318_desc, node);
+
+ if (i == 0) {
+ struct coh901318_lli *lli;
+ dma_addr_t ladd;
+
+ /* Read current transfer count value */
+ left = readl(cohc->base->virtbase +
+ COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING * cohc->id) &
+ COH901318_CX_CTRL_TC_VALUE_MASK;
+
+ /* See if the transfer is linked... */
+ ladd = readl(cohc->base->virtbase +
+ COH901318_CX_LNK_ADDR +
+ COH901318_CX_LNK_ADDR_SPACING *
+ cohc->id) &
+ ~COH901318_CX_LNK_LINK_IMMEDIATE;
+ /* Single transaction */
+ if (!ladd)
+ continue;
+
+ /*
+ * Linked transaction, follow the lli, find the
+ * currently processing lli, and proceed to the next
+ */
+ lli = cohd->lli;
+ while (lli && lli->link_addr != ladd)
+ lli = lli->virt_link_addr;
+
+ if (lli)
+ lli = lli->virt_link_addr;
+
+ /*
+ * Follow remaining lli links around to count the total
+ * number of bytes left
+ */
+ left += coh901318_get_bytes_in_lli(lli);
+ } else {
+ left += coh901318_get_bytes_in_lli(cohd->lli);
+ }
+ i++;
+ }
+
+ /* Also count bytes in the queued jobs */
+ list_for_each(pos, &cohc->queue) {
+ cohd = list_entry(pos, struct coh901318_desc, node);
+ left += coh901318_get_bytes_in_lli(cohd->lli);
+ }
spin_unlock_irqrestore(&cohc->lock, flags);
- return ret;
+ return left;
}
-EXPORT_SYMBOL(coh901318_get_bytes_left);
-
-/* Stops a transfer without losing data. Enables power save.
- Use this function in conjunction with coh901318_continue(..)
-*/
-void coh901318_stop(struct dma_chan *chan)
+/*
+ * Pauses a transfer without losing data. Enables power save.
+ * Use this function in conjunction with coh901318_resume.
+ */
+static void coh901318_pause(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -475,12 +549,11 @@ void coh901318_stop(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_stop);
-/* Continues a transfer that has been stopped via 300_dma_stop(..).
+/* Resumes a transfer that has been stopped via 300_dma_stop(..).
Power save is handled.
*/
-void coh901318_continue(struct dma_chan *chan)
+static void coh901318_resume(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -506,7 +579,6 @@ void coh901318_continue(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_continue);
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
@@ -565,29 +637,30 @@ static int coh901318_config(struct coh901318_chan *cohc,
*/
static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
{
- struct coh901318_desc *cohd_que;
+ struct coh901318_desc *cohd;
- /* start queued jobs, if any
+ /*
+ * start queued jobs, if any
* TODO: transmit all queued jobs in one go
*/
- cohd_que = coh901318_first_queued(cohc);
+ cohd = coh901318_first_queued(cohc);
- if (cohd_que != NULL) {
+ if (cohd != NULL) {
/* Remove from queue */
- coh901318_desc_remove(cohd_que);
+ coh901318_desc_remove(cohd);
/* initiate DMA job */
cohc->busy = 1;
- coh901318_desc_submit(cohc, cohd_que);
+ coh901318_desc_submit(cohc, cohd);
- coh901318_prep_linked_list(cohc, cohd_que->data);
+ coh901318_prep_linked_list(cohc, cohd->lli);
- /* start dma job */
+ /* start dma job on this channel */
coh901318_start(cohc);
}
- return cohd_que;
+ return cohd;
}
/*
@@ -622,7 +695,7 @@ static void dma_tasklet(unsigned long data)
cohc->completed = cohd_fin->desc.cookie;
/* release the lli allocation and remove the descriptor */
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd_fin);
@@ -666,23 +739,44 @@ static void dma_tasklet(unsigned long data)
/* called from interrupt context */
static void dma_tc_handle(struct coh901318_chan *cohc)
{
- BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
- list_empty(&cohc->queue)));
-
- if (!cohc->allocated)
+ /*
+ * If the channel is not allocated, then we shouldn't have
+ * any TC interrupts on it.
+ */
+ if (!cohc->allocated) {
+ dev_err(COHC_2_DEV(cohc), "spurious interrupt from "
+ "unallocated channel\n");
return;
+ }
spin_lock(&cohc->lock);
+ /*
+ * When we reach this point, at least one queue item
+ * should have been moved over from cohc->queue to
+ * cohc->active and run to completion, that is why we're
+ * getting a terminal count interrupt is it not?
+ * If you get this BUG() the most probable cause is that
+ * the individual nodes in the lli chain have IRQ enabled,
+ * so check your platform config for lli chain ctrl.
+ */
+ BUG_ON(list_empty(&cohc->active));
+
cohc->nbr_active_done++;
+ /*
+ * This attempt to take a job from cohc->queue, put it
+ * into cohc->active and start it.
+ */
if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
- BUG_ON(list_empty(&cohc->active));
-
spin_unlock(&cohc->lock);
+ /*
+ * This tasklet will remove items from cohc->active
+ * and thus terminates them.
+ */
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
@@ -809,6 +903,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
static int coh901318_alloc_chan_resources(struct dma_chan *chan)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ unsigned long flags;
dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
__func__, cohc->id);
@@ -816,11 +911,15 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
if (chan->client_count > 1)
return -EBUSY;
+ spin_lock_irqsave(&cohc->lock, flags);
+
coh901318_config(cohc, NULL);
cohc->allocated = 1;
cohc->completed = chan->cookie = 1;
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
return 1;
}
@@ -843,7 +942,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL);
}
@@ -870,7 +969,7 @@ static struct dma_async_tx_descriptor *
coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t size, unsigned long flags)
{
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
struct coh901318_desc *cohd;
unsigned long flg;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -892,23 +991,23 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
lli_len++;
- data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
+ lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
- if (data == NULL)
+ if (lli == NULL)
goto err;
ret = coh901318_lli_fill_memcpy(
- &cohc->base->pool, data, src, size, dest,
+ &cohc->base->pool, lli, src, size, dest,
cohc_chan_param(cohc)->ctrl_lli_chained,
ctrl_last);
if (ret)
goto err;
- COH_DBG(coh901318_list_print(cohc, data));
+ COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
- cohd->data = data;
+ cohd->lli = lli;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
@@ -926,7 +1025,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
struct coh901318_desc *cohd;
const struct coh901318_params *params;
struct scatterlist *sg;
@@ -999,13 +1098,13 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
pr_debug("Allocate %d lli:s for this transfer\n", len);
- data = coh901318_lli_alloc(&cohc->base->pool, len);
+ lli = coh901318_lli_alloc(&cohc->base->pool, len);
- if (data == NULL)
+ if (lli == NULL)
goto err_dma_alloc;
- /* initiate allocated data list */
- ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ /* initiate allocated lli list */
+ ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc_dev_addr(cohc),
ctrl_chained,
ctrl,
@@ -1014,14 +1113,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
- COH_DBG(coh901318_list_print(cohc, data));
+ COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
- cohd->data = data;
+ cohd->lli = lli;
spin_unlock_irqrestore(&cohc->lock, flg);
@@ -1035,9 +1134,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
static enum dma_status
-coh901318_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done,
- dma_cookie_t *used)
+coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_cookie_t last_used;
@@ -1049,10 +1147,10 @@ coh901318_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used,
+ coh901318_get_bytes_left(chan));
+ if (ret == DMA_IN_PROGRESS && cohc->stopped)
+ ret = DMA_PAUSED;
return ret;
}
@@ -1065,23 +1163,41 @@ coh901318_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&cohc->lock, flags);
- /* Busy means that pending jobs are already being processed */
+ /*
+ * Busy means that pending jobs are already being processed,
+ * and then there is no point in starting the queue: the
+ * terminal count interrupt on the channel will take the next
+ * job on the queue and execute it anyway.
+ */
if (!cohc->busy)
coh901318_queue_start(cohc);
spin_unlock_irqrestore(&cohc->lock, flags);
}
-static void
-coh901318_terminate_all(struct dma_chan *chan)
+static int
+coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase;
- coh901318_stop(chan);
+ if (cmd == DMA_PAUSE) {
+ coh901318_pause(chan);
+ return 0;
+ }
+
+ if (cmd == DMA_RESUME) {
+ coh901318_resume(chan);
+ return 0;
+ }
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ /* The remainder of this function terminates the transfer */
+ coh901318_pause(chan);
spin_lock_irqsave(&cohc->lock, flags);
/* Clear any pending BE or TC interrupt */
@@ -1099,7 +1215,7 @@ coh901318_terminate_all(struct dma_chan *chan)
while ((cohd = coh901318_first_active_get(cohc))) {
/* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd);
@@ -1108,7 +1224,7 @@ coh901318_terminate_all(struct dma_chan *chan)
while ((cohd = coh901318_first_queued(cohc))) {
/* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd);
@@ -1120,6 +1236,8 @@ coh901318_terminate_all(struct dma_chan *chan)
cohc->busy = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return 0;
}
void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
@@ -1235,9 +1353,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
- base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_terminate_all = coh901318_terminate_all;
+ base->dma_slave.device_control = coh901318_control;
base->dma_slave.dev = &pdev->dev;
err = dma_async_device_register(&base->dma_slave);
@@ -1255,9 +1373,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
- base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
+ base->dma_memcpy.device_control = coh901318_control;
base->dma_memcpy.dev = &pdev->dev;
/*
* This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d18b5d069d7e..9d88ab74b767 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -695,11 +695,11 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_terminate_all);
+ !device->device_control);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
- BUG_ON(!device->device_is_tx_complete);
+ BUG_ON(!device->device_tx_status);
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d28369f7afd2..18fb5b41cedf 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -781,13 +781,17 @@ err_desc_get:
return NULL;
}
-static void dwc_terminate_all(struct dma_chan *chan)
+static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -810,12 +814,14 @@ static void dwc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);
+
+ return 0;
}
static enum dma_status
-dwc_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+dwc_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
dma_cookie_t last_used;
@@ -835,10 +841,7 @@ dwc_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
@@ -1338,9 +1341,9 @@ static int __init dw_probe(struct platform_device *pdev)
dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
- dw->dma.device_terminate_all = dwc_terminate_all;
+ dw->dma.device_control = dwc_control;
- dw->dma.device_is_tx_complete = dwc_is_tx_complete;
+ dw->dma.device_tx_status = dwc_tx_status;
dw->dma.device_issue_pending = dwc_issue_pending;
dma_writel(dw, CFG, DW_CFG_DMA_EN);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 88f470f0d820..6a853e992a1c 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -775,13 +775,18 @@ fail:
return NULL;
}
-static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
+static int fsl_dma_device_control(struct dma_chan *dchan,
+ enum dma_ctrl_cmd cmd)
{
struct fsldma_chan *chan;
unsigned long flags;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!dchan)
- return;
+ return -EINVAL;
chan = to_fsl_chan(dchan);
@@ -795,6 +800,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return 0;
}
/**
@@ -965,13 +972,12 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
}
/**
- * fsl_dma_is_complete - Determine the DMA status
+ * fsl_tx_status - Determine the DMA status
* @chan : Freescale DMA channel
*/
-static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
+static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used;
@@ -982,11 +988,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
last_used = dchan->cookie;
last_complete = chan->completed_cookie;
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -1330,10 +1332,10 @@ static int __devinit fsldma_of_probe(struct of_device *op,
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
- fdev->common.device_is_tx_complete = fsl_dma_is_complete;
+ fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
- fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
+ fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev;
dev_set_drvdata(&op->dev, fdev);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 3e5a8005c62b..c9213ead4a26 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -727,18 +727,18 @@ static void ioat1_timer_event(unsigned long data)
}
enum dma_status
-ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
device->cleanup_fn((unsigned long) c);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
@@ -858,7 +858,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (tmo == 0 ||
- dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+ dma->device_tx_status(dma_chan, cookie, NULL)
!= DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -1199,7 +1199,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_dma_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 86b97ac8774e..26f48ef94c5c 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -142,15 +142,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
}
/**
- * ioat_is_complete - poll the status of an ioat transaction
+ * ioat_tx_status - poll the status of an ioat transaction
* @c: channel handle
* @cookie: transaction identifier
- * @done: if set, updated with last completed transaction
- * @used: if set, updated with last used transaction
+ * @txstate: if set, updated with the transaction state
*/
static inline enum dma_status
-ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
dma_cookie_t last_used;
@@ -159,10 +158,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
last_used = c->cookie;
last_complete = chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -338,8 +334,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
-enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used);
+enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b5ae56c211e6..2d02f6d6fbe4 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -855,7 +855,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 6740e319c9cf..720c3ed04e54 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -439,17 +439,17 @@ static void ioat3_timer_event(unsigned long data)
}
static enum dma_status
-ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
ioat3_cleanup_poll(ioat);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static struct dma_async_tx_descriptor *
@@ -977,7 +977,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1031,7 +1031,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1072,7 +1072,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test memset timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1115,7 +1115,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1259,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) {
- dma->device_is_tx_complete = ioat3_is_complete;
+ dma->device_tx_status = ioat3_tx_status;
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
} else {
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_dma_tx_status;
device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
}
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 1ebc801678b0..161c452923b8 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -894,14 +894,14 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan)
}
/**
- * iop_adma_is_complete - poll the status of an ADMA transaction
+ * iop_adma_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle
* @cookie: ADMA transaction identifier
+ * @txstate: a holder for the current state of the channel or NULL
*/
-static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
+static enum dma_status iop_adma_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
dma_cookie_t last_used;
@@ -910,12 +910,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = iop_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
return ret;
@@ -924,11 +919,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = iop_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -1043,7 +1034,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(1);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
@@ -1143,7 +1134,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
@@ -1190,7 +1181,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1214,7 +1205,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test memset timed out, disabling\n");
err = -ENODEV;
@@ -1246,7 +1237,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1341,7 +1332,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV;
@@ -1378,7 +1369,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV;
@@ -1410,7 +1401,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV;
@@ -1508,7 +1499,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
/* set base routines */
dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
- dma_dev->device_is_tx_complete = iop_adma_is_complete;
+ dma_dev->device_tx_status = iop_adma_status;
dma_dev->device_issue_pending = iop_adma_issue_pending;
dma_dev->dev = &pdev->dev;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 2a446397c884..246a6143e4a7 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1472,13 +1472,17 @@ static void idmac_issue_pending(struct dma_chan *chan)
*/
}
-static void __idmac_terminate_all(struct dma_chan *chan)
+static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
unsigned long flags;
int i;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
ipu_disable_channel(idmac, ichan,
ichan->status >= IPU_CHANNEL_ENABLED);
@@ -1505,17 +1509,22 @@ static void __idmac_terminate_all(struct dma_chan *chan)
tasklet_enable(&to_ipu(idmac)->tasklet);
ichan->status = IPU_CHANNEL_INITIALIZED;
+
+ return 0;
}
-static void idmac_terminate_all(struct dma_chan *chan)
+static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
+ int ret;
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ ret = __idmac_control(chan, cmd);
mutex_unlock(&ichan->chan_mutex);
+
+ return ret;
}
#ifdef DEBUG
@@ -1607,7 +1616,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ __idmac_control(chan, DMA_TERMINATE_ALL);
if (ichan->status > IPU_CHANNEL_FREE) {
#ifdef DEBUG
@@ -1637,15 +1646,12 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
tasklet_schedule(&to_ipu(idmac)->tasklet);
}
-static enum dma_status idmac_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+static enum dma_status idmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
- if (done)
- *done = ichan->completed;
- if (used)
- *used = chan->cookie;
+ dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
if (cookie != chan->cookie)
return DMA_ERROR;
return DMA_SUCCESS;
@@ -1664,12 +1670,12 @@ static int __init ipu_idmac_init(struct ipu *ipu)
dma->dev = ipu->dev;
dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
dma->device_free_chan_resources = idmac_free_chan_resources;
- dma->device_is_tx_complete = idmac_is_tx_complete;
+ dma->device_tx_status = idmac_tx_status;
dma->device_issue_pending = idmac_issue_pending;
/* Compulsory for DMA_SLAVE fields */
dma->device_prep_slave_sg = idmac_prep_slave_sg;
- dma->device_terminate_all = idmac_terminate_all;
+ dma->device_control = idmac_control;
INIT_LIST_HEAD(&dma->channels);
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1703,7 +1709,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i;
- idmac_terminate_all(&ichan->dma_chan);
+ idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL);
idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index bbbd58566625..84326c961d4b 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -541,8 +541,8 @@ static void mpc_dma_issue_pending(struct dma_chan *chan)
/* Check request completion status */
static enum dma_status
-mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
unsigned long flags;
@@ -554,12 +554,7 @@ mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
last_complete = mchan->completed_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -694,7 +689,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
- dma->device_is_tx_complete = mpc_dma_is_tx_complete;
+ dma->device_tx_status = mpc_dma_tx_status;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
INIT_LIST_HEAD(&dma->channels);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e2fd34da64f2..86c5ae9fde34 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -810,14 +810,14 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
}
/**
- * mv_xor_is_complete - poll the status of an XOR transaction
+ * mv_xor_status - poll the status of an XOR transaction
* @chan: XOR channel handle
* @cookie: XOR transaction identifier
+ * @txstate: XOR transactions state holder (or NULL)
*/
-static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
+static enum dma_status mv_xor_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dma_cookie_t last_used;
@@ -827,10 +827,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = mv_chan->completed_cookie;
mv_chan->is_complete_cookie = cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS) {
@@ -842,11 +839,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = mv_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -975,7 +968,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
async_tx_ack(tx);
msleep(1);
- if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
@@ -1073,7 +1066,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
async_tx_ack(tx);
msleep(8);
- if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
@@ -1168,7 +1161,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
/* set base routines */
dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
- dma_dev->device_is_tx_complete = mv_xor_is_complete;
+ dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
dma_dev->dev = &pdev->dev;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index d44626fa35ad..c6079fcca13f 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3935,12 +3935,13 @@ static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
}
/**
- * ppc440spe_adma_is_complete - poll the status of an ADMA transaction
+ * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle
* @cookie: ADMA transaction identifier
+ * @txstate: a holder for the current state of the channel
*/
-static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct ppc440spe_adma_chan *ppc440spe_chan;
dma_cookie_t last_used;
@@ -3951,10 +3952,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
@@ -3965,10 +3963,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -4180,7 +4175,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_alloc_chan_resources;
adev->common.device_free_chan_resources =
ppc440spe_adma_free_chan_resources;
- adev->common.device_is_tx_complete = ppc440spe_adma_is_complete;
+ adev->common.device_tx_status = ppc440spe_adma_tx_status;
adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
/* Set prep routines based on capability */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 6f25a20de99f..207bf7e30901 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -26,8 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
-#include <asm/dmaengine.h>
+#include <linux/sh_dma.h>
#include "shdma.h"
@@ -45,7 +44,7 @@ enum sh_dmae_desc_status {
#define LOG2_DEFAULT_XFER_SIZE 2
/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
-static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
+static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
@@ -190,7 +189,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
struct sh_dmae_device, common);
struct sh_dmae_pdata *pdata = shdev->pdata;
- struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
int shift = chan_pdata->dmars_bit;
@@ -266,8 +265,8 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
return NULL;
}
-static struct sh_dmae_slave_config *sh_dmae_find_slave(
- struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
+static const struct sh_dmae_slave_config *sh_dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
struct dma_device *dma_dev = sh_chan->common.device;
struct sh_dmae_device *shdev = container_of(dma_dev,
@@ -275,11 +274,11 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave(
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
- if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
+ if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
return NULL;
for (i = 0; i < pdata->slave_num; i++)
- if (pdata->slave[i].slave_id == slave_id)
+ if (pdata->slave[i].slave_id == param->slave_id)
return pdata->slave + i;
return NULL;
@@ -299,9 +298,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
* never runs concurrently with itself or free_chan_resources.
*/
if (param) {
- struct sh_dmae_slave_config *cfg;
+ const struct sh_dmae_slave_config *cfg;
- cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
+ cfg = sh_dmae_find_slave(sh_chan, param);
if (!cfg) {
ret = -EINVAL;
goto efindslave;
@@ -574,12 +573,14 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
+ dma_addr_t slave_addr;
if (!chan)
return NULL;
sh_chan = to_sh_chan(chan);
param = chan->private;
+ slave_addr = param->config->addr;
/* Someone calling slave DMA on a public channel? */
if (!param || !sg_len) {
@@ -592,16 +593,20 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
* if (param != NULL), this is a successfully requested slave channel,
* therefore param->config != NULL too.
*/
- return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
+ return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
direction, flags);
}
-static void sh_dmae_terminate_all(struct dma_chan *chan)
+static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!chan)
- return;
+ return -EINVAL;
dmae_halt(sh_chan);
@@ -617,6 +622,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
spin_unlock_bh(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, true);
+
+ return 0;
}
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -748,10 +755,9 @@ static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
sh_chan_xfer_ld_queue(sh_chan);
}
-static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
+static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
dma_cookie_t last_used;
@@ -763,12 +769,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = sh_chan->completed_cookie;
BUG_ON(last_complete < 0);
-
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
spin_lock_bh(&sh_chan->desc_lock);
@@ -810,7 +811,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
return ret;
}
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
static irqreturn_t sh_dmae_err(int irq, void *data)
{
struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
@@ -873,7 +874,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
int irq, unsigned long flags)
{
int err;
- struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
struct platform_device *pdev = to_platform_device(shdev->common.dev);
struct sh_dmae_chan *new_sh_chan;
@@ -1040,18 +1041,18 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
= sh_dmae_alloc_chan_resources;
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
- shdev->common.device_is_tx_complete = sh_dmae_is_complete;
+ shdev->common.device_tx_status = sh_dmae_tx_status;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
/* Compulsory for DMA_SLAVE fields */
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
- shdev->common.device_terminate_all = sh_dmae_terminate_all;
+ shdev->common.device_control = sh_dmae_control;
shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!chanirq_res)
@@ -1076,7 +1077,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
#else
chanirq_res = errirq_res;
-#endif /* CONFIG_CPU_SH4 */
+#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
if (chanirq_res->start == chanirq_res->end &&
!platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
@@ -1123,7 +1124,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
chan_probe_err:
sh_dmae_chan_remove(shdev);
eirqres:
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
free_irq(errirq, shdev);
eirq_err:
#endif
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 153609a1e96c..4021275a0a43 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,8 +17,8 @@
#include <linux/interrupt.h>
#include <linux/list.h>
-#include <asm/dmaengine.h>
-
+#define SH_DMAC_MAX_CHANNELS 6
+#define SH_DMA_SLAVE_NUMBER 256
#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
struct device;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
new file mode 100644
index 000000000000..f86b99f92c72
--- /dev/null
+++ b/drivers/dma/ste_dma40.c
@@ -0,0 +1,2718 @@
+/*
+ * driver/dma/ste_dma40.c
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/amba/dma.h>
+
+#include <plat/ste_dma40.h>
+
+#include "ste_dma40_ll.h"
+
+#define D40_NAME "dma40"
+
+#define D40_PHY_CHAN -1
+
+/* For masking out/in 2 bit channel positions */
+#define D40_CHAN_POS(chan) (2 * (chan / 2))
+#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
+
+/* Maximum iterations taken before giving up suspending a channel */
+#define D40_SUSPEND_MAX_IT 500
+
+#define D40_ALLOC_FREE (1 << 31)
+#define D40_ALLOC_PHY (1 << 30)
+#define D40_ALLOC_LOG_FREE 0
+
+/* The number of free d40_desc to keep in memory before starting
+ * to kfree() them */
+#define D40_DESC_CACHE_SIZE 50
+
+/* Hardware designer of the block */
+#define D40_PERIPHID2_DESIGNER 0x8
+
+/**
+ * enum 40_command - The different commands and/or statuses.
+ *
+ * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
+ * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
+ * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
+ * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
+ */
+enum d40_command {
+ D40_DMA_STOP = 0,
+ D40_DMA_RUN = 1,
+ D40_DMA_SUSPEND_REQ = 2,
+ D40_DMA_SUSPENDED = 3
+};
+
+/**
+ * struct d40_lli_pool - Structure for keeping LLIs in memory
+ *
+ * @base: Pointer to memory area when the pre_alloc_lli's are not large
+ * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
+ * pre_alloc_lli is used.
+ * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
+ * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
+ * one buffer to one buffer.
+ */
+struct d40_lli_pool {
+ void *base;
+ int size;
+ /* Space for dst and src, plus an extra for padding */
+ u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
+};
+
+/**
+ * struct d40_desc - A descriptor is one DMA job.
+ *
+ * @lli_phy: LLI settings for physical channel. Both src and dst=
+ * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
+ * lli_len equals one.
+ * @lli_log: Same as above but for logical channels.
+ * @lli_pool: The pool with two entries pre-allocated.
+ * @lli_len: Number of LLI's in lli_pool
+ * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
+ * then this transfer job is done.
+ * @txd: DMA engine struct. Used for among other things for communication
+ * during a transfer.
+ * @node: List entry.
+ * @dir: The transfer direction of this job.
+ * @is_in_client_list: true if the client owns this descriptor.
+ *
+ * This descriptor is used for both logical and physical transfers.
+ */
+
+struct d40_desc {
+ /* LLI physical */
+ struct d40_phy_lli_bidir lli_phy;
+ /* LLI logical */
+ struct d40_log_lli_bidir lli_log;
+
+ struct d40_lli_pool lli_pool;
+ u32 lli_len;
+ u32 lli_tcount;
+
+ struct dma_async_tx_descriptor txd;
+ struct list_head node;
+
+ enum dma_data_direction dir;
+ bool is_in_client_list;
+};
+
+/**
+ * struct d40_lcla_pool - LCLA pool settings and data.
+ *
+ * @base: The virtual address of LCLA.
+ * @phy: Physical base address of LCLA.
+ * @base_size: size of lcla.
+ * @lock: Lock to protect the content in this struct.
+ * @alloc_map: Mapping between physical channel and LCLA entries.
+ * @num_blocks: The number of entries of alloc_map. Equals to the
+ * number of physical channels.
+ */
+struct d40_lcla_pool {
+ void *base;
+ dma_addr_t phy;
+ resource_size_t base_size;
+ spinlock_t lock;
+ u32 *alloc_map;
+ int num_blocks;
+};
+
+/**
+ * struct d40_phy_res - struct for handling eventlines mapped to physical
+ * channels.
+ *
+ * @lock: A lock protection this entity.
+ * @num: The physical channel number of this entity.
+ * @allocated_src: Bit mapped to show which src event line's are mapped to
+ * this physical channel. Can also be free or physically allocated.
+ * @allocated_dst: Same as for src but is dst.
+ * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
+ * event line number. Both allocated_src and allocated_dst can not be
+ * allocated to a physical channel, since the interrupt handler has then
+ * no way of figure out which one the interrupt belongs to.
+ */
+struct d40_phy_res {
+ spinlock_t lock;
+ int num;
+ u32 allocated_src;
+ u32 allocated_dst;
+};
+
+struct d40_base;
+
+/**
+ * struct d40_chan - Struct that describes a channel.
+ *
+ * @lock: A spinlock to protect this struct.
+ * @log_num: The logical number, if any of this channel.
+ * @completed: Starts with 1, after first interrupt it is set to dma engine's
+ * current cookie.
+ * @pending_tx: The number of pending transfers. Used between interrupt handler
+ * and tasklet.
+ * @busy: Set to true when transfer is ongoing on this channel.
+ * @phy_chan: Pointer to physical channel which this instance runs on.
+ * @chan: DMA engine handle.
+ * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
+ * transfer and call client callback.
+ * @client: Cliented owned descriptor list.
+ * @active: Active descriptor.
+ * @queue: Queued jobs.
+ * @free: List of free descripts, ready to be reused.
+ * @free_len: Number of descriptors in the free list.
+ * @dma_cfg: The client configuration of this dma channel.
+ * @base: Pointer to the device instance struct.
+ * @src_def_cfg: Default cfg register setting for src.
+ * @dst_def_cfg: Default cfg register setting for dst.
+ * @log_def: Default logical channel settings.
+ * @lcla: Space for one dst src pair for logical channel transfers.
+ * @lcpa: Pointer to dst and src lcpa settings.
+ *
+ * This struct can either "be" a logical or a physical channel.
+ */
+struct d40_chan {
+ spinlock_t lock;
+ int log_num;
+ /* ID of the most recent completed transfer */
+ int completed;
+ int pending_tx;
+ bool busy;
+ struct d40_phy_res *phy_chan;
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+ struct list_head client;
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free;
+ int free_len;
+ struct stedma40_chan_cfg dma_cfg;
+ struct d40_base *base;
+ /* Default register configurations */
+ u32 src_def_cfg;
+ u32 dst_def_cfg;
+ struct d40_def_lcsp log_def;
+ struct d40_lcla_elem lcla;
+ struct d40_log_lli_full *lcpa;
+ /* AMBA extensions */
+ dma_addr_t amba_addr;
+ enum dma_data_direction amba_direction;
+};
+
+/**
+ * struct d40_base - The big global struct, one for each probe'd instance.
+ *
+ * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
+ * @execmd_lock: Lock for execute command usage since several channels share
+ * the same physical register.
+ * @dev: The device structure.
+ * @virtbase: The virtual base address of the DMA's register.
+ * @clk: Pointer to the DMA clock structure.
+ * @phy_start: Physical memory start of the DMA registers.
+ * @phy_size: Size of the DMA register map.
+ * @irq: The IRQ number.
+ * @num_phy_chans: The number of physical channels. Read from HW. This
+ * is the number of available channels for this driver, not counting "Secure
+ * mode" allocated physical channels.
+ * @num_log_chans: The number of logical channels. Calculated from
+ * num_phy_chans.
+ * @dma_both: dma_device channels that can do both memcpy and slave transfers.
+ * @dma_slave: dma_device channels that can do only do slave transfers.
+ * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
+ * @log_chans: Room for all possible logical channels in system.
+ * @lookup_log_chans: Used to map interrupt number to logical channel. Points
+ * to log_chans entries.
+ * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
+ * to phy_chans entries.
+ * @plat_data: Pointer to provided platform_data which is the driver
+ * configuration.
+ * @phy_res: Vector containing all physical channels.
+ * @lcla_pool: lcla pool settings and data.
+ * @lcpa_base: The virtual mapped address of LCPA.
+ * @phy_lcpa: The physical address of the LCPA.
+ * @lcpa_size: The size of the LCPA area.
+ */
+struct d40_base {
+ spinlock_t interrupt_lock;
+ spinlock_t execmd_lock;
+ struct device *dev;
+ void __iomem *virtbase;
+ struct clk *clk;
+ phys_addr_t phy_start;
+ resource_size_t phy_size;
+ int irq;
+ int num_phy_chans;
+ int num_log_chans;
+ struct dma_device dma_both;
+ struct dma_device dma_slave;
+ struct dma_device dma_memcpy;
+ struct d40_chan *phy_chans;
+ struct d40_chan *log_chans;
+ struct d40_chan **lookup_log_chans;
+ struct d40_chan **lookup_phy_chans;
+ struct stedma40_platform_data *plat_data;
+ /* Physical half channels */
+ struct d40_phy_res *phy_res;
+ struct d40_lcla_pool lcla_pool;
+ void *lcpa_base;
+ dma_addr_t phy_lcpa;
+ resource_size_t lcpa_size;
+};
+
+/**
+ * struct d40_interrupt_lookup - lookup table for interrupt handler
+ *
+ * @src: Interrupt mask register.
+ * @clr: Interrupt clear register.
+ * @is_error: true if this is an error interrupt.
+ * @offset: start delta in the lookup_log_chans in d40_base. If equals to
+ * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
+ */
+struct d40_interrupt_lookup {
+ u32 src;
+ u32 clr;
+ bool is_error;
+ int offset;
+};
+
+/**
+ * struct d40_reg_val - simple lookup struct
+ *
+ * @reg: The register.
+ * @val: The value that belongs to the register in reg.
+ */
+struct d40_reg_val {
+ unsigned int reg;
+ unsigned int val;
+};
+
+static int d40_pool_lli_alloc(struct d40_desc *d40d,
+ int lli_len, bool is_log)
+{
+ u32 align;
+ void *base;
+
+ if (is_log)
+ align = sizeof(struct d40_log_lli);
+ else
+ align = sizeof(struct d40_phy_lli);
+
+ if (lli_len == 1) {
+ base = d40d->lli_pool.pre_alloc_lli;
+ d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
+ d40d->lli_pool.base = NULL;
+ } else {
+ d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
+
+ base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
+ d40d->lli_pool.base = base;
+
+ if (d40d->lli_pool.base == NULL)
+ return -ENOMEM;
+ }
+
+ if (is_log) {
+ d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
+ align);
+ d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
+ align);
+ } else {
+ d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
+ align);
+ d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
+ align);
+
+ d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
+ d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
+ }
+
+ return 0;
+}
+
+static void d40_pool_lli_free(struct d40_desc *d40d)
+{
+ kfree(d40d->lli_pool.base);
+ d40d->lli_pool.base = NULL;
+ d40d->lli_pool.size = 0;
+ d40d->lli_log.src = NULL;
+ d40d->lli_log.dst = NULL;
+ d40d->lli_phy.src = NULL;
+ d40d->lli_phy.dst = NULL;
+ d40d->lli_phy.src_addr = 0;
+ d40d->lli_phy.dst_addr = 0;
+}
+
+static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
+ struct d40_desc *desc)
+{
+ dma_cookie_t cookie = d40c->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ d40c->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ return cookie;
+}
+
+static void d40_desc_reset(struct d40_desc *d40d)
+{
+ d40d->lli_tcount = 0;
+}
+
+static void d40_desc_remove(struct d40_desc *d40d)
+{
+ list_del(&d40d->node);
+}
+
+static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
+{
+ struct d40_desc *desc;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
+ if (!list_empty(&d40c->client)) {
+ list_for_each_entry_safe(d, _d, &d40c->client, node)
+ if (async_tx_test_ack(&d->txd)) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ desc = d;
+ goto out;
+ }
+ }
+
+ if (list_empty(&d40c->free)) {
+ /* Alloc new desc because we're out of used ones */
+ desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
+ if (desc == NULL)
+ goto out;
+ INIT_LIST_HEAD(&desc->node);
+ } else {
+ /* Reuse an old desc. */
+ desc = list_first_entry(&d40c->free,
+ struct d40_desc,
+ node);
+ list_del(&desc->node);
+ d40c->free_len--;
+ }
+out:
+ return desc;
+}
+
+static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ if (d40c->free_len < D40_DESC_CACHE_SIZE) {
+ list_add_tail(&d40d->node, &d40c->free);
+ d40c->free_len++;
+ } else
+ kfree(d40d);
+}
+
+static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->active);
+}
+
+static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->active))
+ return NULL;
+
+ d = list_first_entry(&d40c->active,
+ struct d40_desc,
+ node);
+ return d;
+}
+
+static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->queue);
+}
+
+static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->queue))
+ return NULL;
+
+ d = list_first_entry(&d40c->queue,
+ struct d40_desc,
+ node);
+ return d;
+}
+
+/* Support functions for logical channels */
+
+static int d40_lcla_id_get(struct d40_chan *d40c,
+ struct d40_lcla_pool *pool)
+{
+ int src_id = 0;
+ int dst_id = 0;
+ struct d40_log_lli *lcla_lidx_base =
+ pool->base + d40c->phy_chan->num * 1024;
+ int i;
+ int lli_per_log = d40c->base->plat_data->llis_per_log;
+
+ if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
+ return 0;
+
+ if (pool->num_blocks > 32)
+ return -EINVAL;
+
+ spin_lock(&pool->lock);
+
+ for (i = 0; i < pool->num_blocks; i++) {
+ if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
+ pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ break;
+ }
+ }
+ src_id = i;
+ if (src_id >= pool->num_blocks)
+ goto err;
+
+ for (; i < pool->num_blocks; i++) {
+ if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
+ pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ break;
+ }
+ }
+
+ dst_id = i;
+ if (dst_id == src_id)
+ goto err;
+
+ d40c->lcla.src_id = src_id;
+ d40c->lcla.dst_id = dst_id;
+ d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
+ d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
+
+
+ spin_unlock(&pool->lock);
+ return 0;
+err:
+ spin_unlock(&pool->lock);
+ return -EINVAL;
+}
+
+static void d40_lcla_id_put(struct d40_chan *d40c,
+ struct d40_lcla_pool *pool,
+ int id)
+{
+ if (id < 0)
+ return;
+
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
+
+ spin_lock(&pool->lock);
+ pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
+ spin_unlock(&pool->lock);
+}
+
+static int d40_channel_execute_command(struct d40_chan *d40c,
+ enum d40_command command)
+{
+ int status, i;
+ void __iomem *active_reg;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->base->execmd_lock, flags);
+
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+ if (command == D40_DMA_SUSPEND_REQ) {
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
+ goto done;
+ }
+
+ writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
+
+ if (command == D40_DMA_SUSPEND_REQ) {
+
+ for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ cpu_relax();
+ /*
+ * Reduce the number of bus accesses while
+ * waiting for the DMA to suspend.
+ */
+ udelay(3);
+
+ if (status == D40_DMA_STOP ||
+ status == D40_DMA_SUSPENDED)
+ break;
+ }
+
+ if (i == D40_SUSPEND_MAX_IT) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
+ __func__, d40c->phy_chan->num, d40c->log_num,
+ status);
+ dump_stack();
+ ret = -EBUSY;
+ }
+
+ }
+done:
+ spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
+ return ret;
+}
+
+static void d40_term_all(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
+ /* Release active descriptors */
+ while ((d40d = d40_first_active_get(d40c))) {
+ d40_desc_remove(d40d);
+
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release queued descriptors waiting for transfer */
+ while ((d40d = d40_first_queued(d40c))) {
+ d40_desc_remove(d40d);
+
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
+ d40c->lcla.src_id);
+ d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
+ d40c->lcla.dst_id);
+
+ d40c->pending_tx = 0;
+ d40c->busy = false;
+}
+
+static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (do_enable)
+ val = D40_ACTIVATE_EVENTLINE;
+ else
+ val = D40_DEACTIVATE_EVENTLINE;
+
+ spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+
+ /* Enable event line connected to device (or memcpy) */
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+
+ writel((val << D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+ }
+ if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+
+ writel((val << D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ }
+
+ spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
+}
+
+static bool d40_chan_has_events(struct d40_chan *d40c)
+{
+ u32 val = 0;
+
+ /* If SSLNK or SDLNK is zero all events are disabled */
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+
+ if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ return (bool) val;
+}
+
+static void d40_config_enable_lidx(struct d40_chan *d40c)
+{
+ /* Set LIDX for lcla */
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
+
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+}
+
+static int d40_config_write(struct d40_chan *d40c)
+{
+ u32 addr_base;
+ u32 var;
+ int res;
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res)
+ return res;
+
+ /* Odd addresses are even addresses + 4 */
+ addr_base = (d40c->phy_chan->num % 2) * 4;
+ /* Setup channel mode to logical or physical */
+ var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
+ D40_CHAN_POS(d40c->phy_chan->num);
+ writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
+
+ /* Setup operational mode option register */
+ var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
+ 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
+
+ writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ /* Set default config for CFG reg */
+ writel(d40c->src_def_cfg,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSCFG);
+ writel(d40c->dst_def_cfg,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDCFG);
+
+ d40_config_enable_lidx(d40c);
+ }
+ return res;
+}
+
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+
+ if (d40d->lli_phy.dst && d40d->lli_phy.src) {
+ d40_phy_lli_write(d40c->base->virtbase,
+ d40c->phy_chan->num,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.src);
+ d40d->lli_tcount = d40d->lli_len;
+ } else if (d40d->lli_log.dst && d40d->lli_log.src) {
+ u32 lli_len;
+ struct d40_log_lli *src = d40d->lli_log.src;
+ struct d40_log_lli *dst = d40d->lli_log.dst;
+
+ src += d40d->lli_tcount;
+ dst += d40d->lli_tcount;
+
+ if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
+ lli_len = d40d->lli_len;
+ else
+ lli_len = d40c->base->plat_data->llis_per_log;
+ d40d->lli_tcount += lli_len;
+ d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
+ d40c->lcla.dst,
+ dst, src,
+ d40c->base->plat_data->llis_per_log);
+ }
+}
+
+static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct d40_chan *d40c = container_of(tx->chan,
+ struct d40_chan,
+ chan);
+ struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ tx->cookie = d40_assign_cookie(d40c, d40d);
+
+ d40_desc_queue(d40c, d40d);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ return tx->cookie;
+}
+
+static int d40_start(struct d40_chan *d40c)
+{
+ int err;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (err)
+ return err;
+ d40_config_set_event(d40c, true);
+ }
+
+ err = d40_channel_execute_command(d40c, D40_DMA_RUN);
+
+ return err;
+}
+
+static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+ int err;
+
+ /* Start queued jobs, if any */
+ d40d = d40_first_queued(d40c);
+
+ if (d40d != NULL) {
+ d40c->busy = true;
+
+ /* Remove from queue */
+ d40_desc_remove(d40d);
+
+ /* Add to active queue */
+ d40_desc_submit(d40c, d40d);
+
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
+
+ /* Start dma job */
+ err = d40_start(d40c);
+
+ if (err)
+ return NULL;
+ }
+
+ return d40d;
+}
+
+/* called from interrupt context */
+static void dma_tc_handle(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+
+ if (!d40c->phy_chan)
+ return;
+
+ /* Get first active entry from list */
+ d40d = d40_first_active_get(d40c);
+
+ if (d40d == NULL)
+ return;
+
+ if (d40d->lli_tcount < d40d->lli_len) {
+
+ d40_desc_load(d40c, d40d);
+ /* Start dma job */
+ (void) d40_start(d40c);
+ return;
+ }
+
+ if (d40_queue_start(d40c) == NULL)
+ d40c->busy = false;
+
+ d40c->pending_tx++;
+ tasklet_schedule(&d40c->tasklet);
+
+}
+
+static void dma_tasklet(unsigned long data)
+{
+ struct d40_chan *d40c = (struct d40_chan *) data;
+ struct d40_desc *d40d_fin;
+ unsigned long flags;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ /* Get first active entry from list */
+ d40d_fin = d40_first_active_get(d40c);
+
+ if (d40d_fin == NULL)
+ goto err;
+
+ d40c->completed = d40d_fin->txd.cookie;
+
+ /*
+ * If terminating a channel pending_tx is set to zero.
+ * This prevents any finished active jobs to return to the client.
+ */
+ if (d40c->pending_tx == 0) {
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return;
+ }
+
+ /* Callback to client */
+ callback = d40d_fin->txd.callback;
+ callback_param = d40d_fin->txd.callback_param;
+
+ if (async_tx_test_ack(&d40d_fin->txd)) {
+ d40_pool_lli_free(d40d_fin);
+ d40_desc_remove(d40d_fin);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d_fin);
+ } else {
+ d40_desc_reset(d40d_fin);
+ if (!d40d_fin->is_in_client_list) {
+ d40_desc_remove(d40d_fin);
+ list_add_tail(&d40d_fin->node, &d40c->client);
+ d40d_fin->is_in_client_list = true;
+ }
+ }
+
+ d40c->pending_tx--;
+
+ if (d40c->pending_tx)
+ tasklet_schedule(&d40c->tasklet);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ if (callback)
+ callback(callback_param);
+
+ return;
+
+ err:
+ /* Rescue manouver if receiving double interrupts */
+ if (d40c->pending_tx > 0)
+ d40c->pending_tx--;
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static irqreturn_t d40_handle_interrupt(int irq, void *data)
+{
+ static const struct d40_interrupt_lookup il[] = {
+ {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
+ {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
+ {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
+ {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
+ {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
+ {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
+ {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
+ {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
+ {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
+ {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
+ };
+
+ int i;
+ u32 regs[ARRAY_SIZE(il)];
+ u32 tmp;
+ u32 idx;
+ u32 row;
+ long chan = -1;
+ struct d40_chan *d40c;
+ unsigned long flags;
+ struct d40_base *base = data;
+
+ spin_lock_irqsave(&base->interrupt_lock, flags);
+
+ /* Read interrupt status of both logical and physical channels */
+ for (i = 0; i < ARRAY_SIZE(il); i++)
+ regs[i] = readl(base->virtbase + il[i].src);
+
+ for (;;) {
+
+ chan = find_next_bit((unsigned long *)regs,
+ BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
+
+ /* No more set bits found? */
+ if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
+ break;
+
+ row = chan / BITS_PER_LONG;
+ idx = chan & (BITS_PER_LONG - 1);
+
+ /* ACK interrupt */
+ tmp = readl(base->virtbase + il[row].clr);
+ tmp |= 1 << idx;
+ writel(tmp, base->virtbase + il[row].clr);
+
+ if (il[row].offset == D40_PHY_CHAN)
+ d40c = base->lookup_phy_chans[idx];
+ else
+ d40c = base->lookup_log_chans[il[row].offset + idx];
+ spin_lock(&d40c->lock);
+
+ if (!il[row].is_error)
+ dma_tc_handle(d40c);
+ else
+ dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
+ __func__, chan, il[row].offset, idx);
+
+ spin_unlock(&d40c->lock);
+ }
+
+ spin_unlock_irqrestore(&base->interrupt_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+static int d40_validate_conf(struct d40_chan *d40c,
+ struct stedma40_chan_cfg *conf)
+{
+ int res = 0;
+ u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
+ u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
+ bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ == STEDMA40_CHANNEL_IN_LOG_MODE;
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
+ dst_event_group == STEDMA40_DEV_DST_MEMORY) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
+ src_event_group == STEDMA40_DEV_SRC_MEMORY) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
+ dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] No event line\n", __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
+ (src_event_group != dst_event_group)) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Invalid event group\n", __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
+ /*
+ * DMAC HW supports it. Will be added to this driver,
+ * in case any dma client requires it.
+ */
+ dev_err(&d40c->chan.dev->device,
+ "[%s] periph to periph not supported\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
+ int log_event_line)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!log_event_line) {
+ /* Physical interrupts are masked per physical full channel */
+ if (phy->allocated_src == D40_ALLOC_FREE &&
+ phy->allocated_dst == D40_ALLOC_FREE) {
+ phy->allocated_dst = D40_ALLOC_PHY;
+ phy->allocated_src = D40_ALLOC_PHY;
+ goto found;
+ } else
+ goto not_found;
+ }
+
+ /* Logical channel */
+ if (is_src) {
+ if (phy->allocated_src == D40_ALLOC_PHY)
+ goto not_found;
+
+ if (phy->allocated_src == D40_ALLOC_FREE)
+ phy->allocated_src = D40_ALLOC_LOG_FREE;
+
+ if (!(phy->allocated_src & (1 << log_event_line))) {
+ phy->allocated_src |= 1 << log_event_line;
+ goto found;
+ } else
+ goto not_found;
+ } else {
+ if (phy->allocated_dst == D40_ALLOC_PHY)
+ goto not_found;
+
+ if (phy->allocated_dst == D40_ALLOC_FREE)
+ phy->allocated_dst = D40_ALLOC_LOG_FREE;
+
+ if (!(phy->allocated_dst & (1 << log_event_line))) {
+ phy->allocated_dst |= 1 << log_event_line;
+ goto found;
+ } else
+ goto not_found;
+ }
+
+not_found:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return false;
+found:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return true;
+}
+
+static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
+ int log_event_line)
+{
+ unsigned long flags;
+ bool is_free = false;
+
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!log_event_line) {
+ /* Physical interrupts are masked per physical full channel */
+ phy->allocated_dst = D40_ALLOC_FREE;
+ phy->allocated_src = D40_ALLOC_FREE;
+ is_free = true;
+ goto out;
+ }
+
+ /* Logical channel */
+ if (is_src) {
+ phy->allocated_src &= ~(1 << log_event_line);
+ if (phy->allocated_src == D40_ALLOC_LOG_FREE)
+ phy->allocated_src = D40_ALLOC_FREE;
+ } else {
+ phy->allocated_dst &= ~(1 << log_event_line);
+ if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
+ phy->allocated_dst = D40_ALLOC_FREE;
+ }
+
+ is_free = ((phy->allocated_src | phy->allocated_dst) ==
+ D40_ALLOC_FREE);
+
+out:
+ spin_unlock_irqrestore(&phy->lock, flags);
+
+ return is_free;
+}
+
+static int d40_allocate_channel(struct d40_chan *d40c)
+{
+ int dev_type;
+ int event_group;
+ int event_line;
+ struct d40_phy_res *phys;
+ int i;
+ int j;
+ int log_num;
+ bool is_src;
+ bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ == STEDMA40_CHANNEL_IN_LOG_MODE;
+
+
+ phys = d40c->base->phy_res;
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ dev_type = d40c->dma_cfg.src_dev_type;
+ log_num = 2 * dev_type;
+ is_src = true;
+ } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ /* dst event lines are used for logical memcpy */
+ dev_type = d40c->dma_cfg.dst_dev_type;
+ log_num = 2 * dev_type + 1;
+ is_src = false;
+ } else
+ return -EINVAL;
+
+ event_group = D40_TYPE_TO_GROUP(dev_type);
+ event_line = D40_TYPE_TO_EVENT(dev_type);
+
+ if (!is_log) {
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ /* Find physical half channel */
+ for (i = 0; i < d40c->base->num_phy_chans; i++) {
+
+ if (d40_alloc_mask_set(&phys[i], is_src, 0))
+ goto found_phy;
+ }
+ } else
+ for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
+ int phy_num = j + event_group * 2;
+ for (i = phy_num; i < phy_num + 2; i++) {
+ if (d40_alloc_mask_set(&phys[i],
+ is_src, 0))
+ goto found_phy;
+ }
+ }
+ return -EINVAL;
+found_phy:
+ d40c->phy_chan = &phys[i];
+ d40c->log_num = D40_PHY_CHAN;
+ goto out;
+ }
+ if (dev_type == -1)
+ return -EINVAL;
+
+ /* Find logical channel */
+ for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
+ int phy_num = j + event_group * 2;
+ /*
+ * Spread logical channels across all available physical rather
+ * than pack every logical channel at the first available phy
+ * channels.
+ */
+ if (is_src) {
+ for (i = phy_num; i < phy_num + 2; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ event_line))
+ goto found_log;
+ }
+ } else {
+ for (i = phy_num + 1; i >= phy_num; i--) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ event_line))
+ goto found_log;
+ }
+ }
+ }
+ return -EINVAL;
+
+found_log:
+ d40c->phy_chan = &phys[i];
+ d40c->log_num = log_num;
+out:
+
+ if (is_log)
+ d40c->base->lookup_log_chans[d40c->log_num] = d40c;
+ else
+ d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
+
+ return 0;
+
+}
+
+static int d40_config_chan(struct d40_chan *d40c,
+ struct stedma40_chan_cfg *info)
+{
+
+ /* Fill in basic CFG register values */
+ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
+ &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_log_cfg(&d40c->dma_cfg,
+ &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.src_dev_type * 32;
+ else
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.dst_dev_type * 32 + 16;
+ }
+
+ /* Write channel configuration to the DMA */
+ return d40_config_write(d40c);
+}
+
+static int d40_config_memcpy(struct d40_chan *d40c)
+{
+ dma_cap_mask_t cap = d40c->chan.device->cap_mask;
+
+ if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
+ d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
+ d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
+ d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
+ memcpy[d40c->chan.chan_id];
+
+ } else if (dma_has_cap(DMA_MEMCPY, cap) &&
+ dma_has_cap(DMA_SLAVE, cap)) {
+ d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
+ } else {
+ dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int d40_free_dma(struct d40_chan *d40c)
+{
+
+ int res = 0;
+ u32 event, dir;
+ struct d40_phy_res *phy = d40c->phy_chan;
+ bool is_src;
+
+ /* Terminate all queued and active transfers */
+ d40_term_all(d40c);
+
+ if (phy == NULL) {
+ dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (phy->allocated_src == D40_ALLOC_FREE &&
+ phy->allocated_dst == D40_ALLOC_FREE) {
+ dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
+ __func__);
+ return -EINVAL;
+ }
+
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res) {
+ dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
+ __func__);
+ return res;
+ }
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ dir = D40_CHAN_REG_SDLNK;
+ is_src = false;
+ } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ dir = D40_CHAN_REG_SSLNK;
+ is_src = true;
+ } else {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unknown direction\n", __func__);
+ return -EINVAL;
+ }
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ /*
+ * Release logical channel, deactivate the event line during
+ * the time physical res is suspended.
+ */
+ writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
+ D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ phy->num * D40_DREG_PCDELTA + dir);
+
+ d40c->base->lookup_log_chans[d40c->log_num] = NULL;
+
+ /*
+ * Check if there are more logical allocation
+ * on this phy channel.
+ */
+ if (!d40_alloc_mask_free(phy, is_src, event)) {
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c)) {
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ if (res) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Executing RUN command\n",
+ __func__);
+ return res;
+ }
+ }
+ return 0;
+ }
+ } else
+ d40_alloc_mask_free(phy, is_src, 0);
+
+ /* Release physical channel */
+ res = d40_channel_execute_command(d40c, D40_DMA_STOP);
+ if (res) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to stop channel\n", __func__);
+ return res;
+ }
+ d40c->phy_chan = NULL;
+ /* Invalidate channel type */
+ d40c->dma_cfg.channel_type = 0;
+ d40c->base->lookup_phy_chans[phy->num] = NULL;
+
+ return 0;
+
+
+}
+
+static int d40_pause(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res;
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res == 0) {
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_config_set_event(d40c, false);
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ }
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static bool d40_tx_is_linked(struct d40_chan *d40c)
+{
+ bool is_link;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
+ else
+ is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK) &
+ D40_SREG_LNK_PHYS_LNK_MASK;
+ return is_link;
+}
+
+static u32 d40_residue(struct d40_chan *d40c)
+{
+ u32 num_elt;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
+ >> D40_MEM_LCSP2_ECNT_POS;
+ else
+ num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDELT) &
+ D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
+ return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+}
+
+static int d40_resume(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res)
+ goto out;
+
+ /* If bytes left to transfer or linked tx resume job */
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+ d40_config_set_event(d40c, true);
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+ }
+ } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+
+out:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static u32 stedma40_residue(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ u32 bytes_left;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+ bytes_left = d40_residue(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ return bytes_left;
+}
+
+/* Public DMA functions in addition to the DMA engine framework */
+
+int stedma40_set_psize(struct dma_chan *chan,
+ int src_psize,
+ int dst_psize)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
+ d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
+ d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ goto out;
+ }
+
+ if (src_psize == STEDMA40_PSIZE_PHY_1)
+ d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
+ else {
+ d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
+ D40_SREG_CFG_PSIZE_POS);
+ d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ if (dst_psize == STEDMA40_PSIZE_PHY_1)
+ d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
+ else {
+ d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
+ D40_SREG_CFG_PSIZE_POS);
+ d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
+ }
+out:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(stedma40_set_psize);
+
+struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *sgl_dst,
+ struct scatterlist *sgl_src,
+ unsigned int sgl_len,
+ unsigned long flags)
+{
+ int res;
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int lli_max = d40c->base->plat_data->llis_per_log;
+
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+
+ if (d40d == NULL)
+ goto err;
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+ d40d->lli_len = sgl_len;
+
+ d40d->txd.flags = flags;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ if (sgl_len > 1)
+ /*
+ * Check if there is space available in lcla. If not,
+ * split list into 1-length and run only in lcpa
+ * space.
+ */
+ if (d40_lcla_id_get(d40c,
+ &d40c->base->lcla_pool) != 0)
+ lli_max = 1;
+
+ if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ (void) d40_log_sg_to_lli(d40c->lcla.src_id,
+ sgl_src,
+ sgl_len,
+ d40d->lli_log.src,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ flags & DMA_PREP_INTERRUPT, lli_max,
+ d40c->base->plat_data->llis_per_log);
+
+ (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
+ sgl_dst,
+ sgl_len,
+ d40d->lli_log.dst,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ flags & DMA_PREP_INTERRUPT, lli_max,
+ d40c->base->plat_data->llis_per_log);
+
+
+ } else {
+ if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ res = d40_phy_sg_to_lli(sgl_src,
+ sgl_len,
+ 0,
+ d40d->lli_phy.src,
+ d40d->lli_phy.src_addr,
+ d40c->src_def_cfg,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.src_info.psize,
+ true);
+
+ if (res < 0)
+ goto err;
+
+ res = d40_phy_sg_to_lli(sgl_dst,
+ sgl_len,
+ 0,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.dst_addr,
+ d40c->dst_def_cfg,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.dst_info.psize,
+ true);
+
+ if (res < 0)
+ goto err;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ }
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ spin_unlock_irqrestore(&d40c->lock, flg);
+
+ return &d40d->txd;
+err:
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return NULL;
+}
+EXPORT_SYMBOL(stedma40_memcpy_sg);
+
+bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+ struct stedma40_chan_cfg *info = data;
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int err;
+
+ if (data) {
+ err = d40_validate_conf(d40c, info);
+ if (!err)
+ d40c->dma_cfg = *info;
+ } else
+ err = d40_config_memcpy(d40c);
+
+ return err == 0;
+}
+EXPORT_SYMBOL(stedma40_filter);
+
+/* DMA ENGINE functions */
+static int d40_alloc_chan_resources(struct dma_chan *chan)
+{
+ int err;
+ unsigned long flags;
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ d40c->completed = chan->cookie = 1;
+
+ /*
+ * If no dma configuration is set (channel_type == 0)
+ * use default configuration
+ */
+ if (d40c->dma_cfg.channel_type == 0) {
+ err = d40_config_memcpy(d40c);
+ if (err)
+ goto err_alloc;
+ }
+
+ err = d40_allocate_channel(d40c);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to allocate channel\n", __func__);
+ goto err_alloc;
+ }
+
+ err = d40_config_chan(d40c, &d40c->dma_cfg);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to configure channel\n",
+ __func__);
+ goto err_config;
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+
+ err_config:
+ (void) d40_free_dma(d40c);
+ err_alloc:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Channel allocation failed\n", __func__);
+ return -EINVAL;
+}
+
+static void d40_free_chan_resources(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ err = d40_free_dma(d40c);
+
+ if (err)
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to free channel\n", __func__);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ size_t size,
+ unsigned long flags)
+{
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int err = 0;
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+
+ if (d40d == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Descriptor is NULL\n", __func__);
+ goto err;
+ }
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+
+ d40d->txd.flags = flags;
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+
+ if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+ d40d->lli_len = 1;
+
+ d40_log_fill_lli(d40d->lli_log.src,
+ src,
+ size,
+ 0,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ true, true);
+
+ d40_log_fill_lli(d40d->lli_log.dst,
+ dst,
+ size,
+ 0,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ true, true);
+
+ } else {
+
+ if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ err = d40_phy_fill_lli(d40d->lli_phy.src,
+ src,
+ size,
+ d40c->dma_cfg.src_info.psize,
+ 0,
+ d40c->src_def_cfg,
+ true,
+ d40c->dma_cfg.src_info.data_width,
+ false);
+ if (err)
+ goto err_fill_lli;
+
+ err = d40_phy_fill_lli(d40d->lli_phy.dst,
+ dst,
+ size,
+ d40c->dma_cfg.dst_info.psize,
+ 0,
+ d40c->dst_def_cfg,
+ true,
+ d40c->dma_cfg.dst_info.data_width,
+ false);
+
+ if (err)
+ goto err_fill_lli;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return &d40d->txd;
+
+err_fill_lli:
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed filling in PHY LLI\n", __func__);
+ d40_pool_lli_free(d40d);
+err:
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return NULL;
+}
+
+static int d40_prep_slave_sg_log(struct d40_desc *d40d,
+ struct d40_chan *d40c,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ dma_addr_t dev_addr = 0;
+ int total_size;
+ int lli_max = d40c->base->plat_data->llis_per_log;
+
+ if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ d40d->lli_len = sg_len;
+ d40d->lli_tcount = 0;
+
+ if (sg_len > 1)
+ /*
+ * Check if there is space available in lcla.
+ * If not, split list into 1-length and run only
+ * in lcpa space.
+ */
+ if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
+ lli_max = 1;
+
+ if (direction == DMA_FROM_DEVICE) {
+ if (d40c->amba_addr)
+ dev_addr = d40c->amba_addr;
+ else
+ dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ flags & DMA_PREP_INTERRUPT,
+ dev_addr, lli_max,
+ d40c->base->plat_data->llis_per_log);
+ } else if (direction == DMA_TO_DEVICE) {
+ if (d40c->amba_addr)
+ dev_addr = d40c->amba_addr;
+ else
+ dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ flags & DMA_PREP_INTERRUPT,
+ dev_addr, lli_max,
+ d40c->base->plat_data->llis_per_log);
+ } else
+ return -EINVAL;
+ if (total_size < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
+ struct d40_chan *d40c,
+ struct scatterlist *sgl,
+ unsigned int sgl_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ dma_addr_t src_dev_addr;
+ dma_addr_t dst_dev_addr;
+ int res;
+
+ if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ d40d->lli_len = sgl_len;
+ d40d->lli_tcount = 0;
+
+ if (direction == DMA_FROM_DEVICE) {
+ dst_dev_addr = 0;
+ src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ } else if (direction == DMA_TO_DEVICE) {
+ dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ src_dev_addr = 0;
+ } else
+ return -EINVAL;
+
+ res = d40_phy_sg_to_lli(sgl,
+ sgl_len,
+ src_dev_addr,
+ d40d->lli_phy.src,
+ d40d->lli_phy.src_addr,
+ d40c->src_def_cfg,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.src_info.psize,
+ true);
+ if (res < 0)
+ return res;
+
+ res = d40_phy_sg_to_lli(sgl,
+ sgl_len,
+ dst_dev_addr,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.dst_addr,
+ d40c->dst_def_cfg,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.dst_info.psize,
+ true);
+ if (res < 0)
+ return res;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int err;
+
+ if (d40c->dma_cfg.pre_transfer)
+ d40c->dma_cfg.pre_transfer(chan,
+ d40c->dma_cfg.pre_transfer_data,
+ sg_dma_len(sgl));
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flg);
+
+ if (d40d == NULL)
+ return NULL;
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
+ direction, flags);
+ else
+ err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
+ direction, flags);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to prepare %s slave sg job: %d\n",
+ __func__,
+ d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
+ return NULL;
+ }
+
+ d40d->txd.flags = flags;
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ return &d40d->txd;
+}
+
+static enum dma_status d40_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ void __iomem *active_reg;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ u32 status;
+ int ret;
+
+ last_complete = d40c->completed;
+ last_used = chan->cookie;
+
+ /* check for pause first */
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ if (status == D40_DMA_SUSPENDED)
+ ret = DMA_PAUSED;
+ else
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ if (txstate) {
+ txstate->last = last_complete;
+ txstate->used = last_used;
+ txstate->residue = stedma40_residue(chan);
+ }
+
+ return ret;
+}
+
+static void d40_issue_pending(struct dma_chan *chan)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ /* Busy means that pending jobs are already being processed */
+ if (!d40c->busy)
+ (void) d40_queue_start(d40c);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
+{
+ unsigned long flags;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&d40c->lock, flags);
+ d40_term_all(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+ case DMA_PAUSE:
+ return d40_pause(chan);
+ case DMA_RESUME:
+ return d40_resume(chan);
+ }
+
+ /* Other commands are unimplemented */
+ return -ENXIO;
+}
+
+/* PrimeCell DMA extension */
+void dma_set_ambaconfig(struct dma_chan *chan,
+ struct amba_dma_channel_config *config)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
+ enum stedma40_periph_data_width addr_width;
+ int psize;
+
+ switch (config->addr_width) {
+ case 1:
+ addr_width = STEDMA40_BYTE_WIDTH;
+ break;
+ case 2:
+ addr_width = STEDMA40_HALFWORD_WIDTH;
+ break;
+ case 4:
+ addr_width = STEDMA40_WORD_WIDTH;
+ break;
+ case 8:
+ addr_width = STEDMA40_DOUBLEWORD_WIDTH;
+ break;
+ default:
+ dev_err(d40c->base->dev,
+ "illegal peripheral address width requested (%d)\n",
+ config->addr_width);
+ return;
+ }
+
+ if (config->maxburst >= 16)
+ psize = STEDMA40_PSIZE_LOG_16;
+ else if (config->maxburst >= 8)
+ psize = STEDMA40_PSIZE_LOG_8;
+ else if (config->maxburst >= 4)
+ psize = STEDMA40_PSIZE_LOG_4;
+ else
+ psize = STEDMA40_PSIZE_LOG_1;
+
+ if (config->direction == DMA_FROM_DEVICE) {
+ dma_addr_t dev_addr_rx =
+ d40c->base->plat_data->dev_rx[cfg->src_dev_type];
+
+ if (dev_addr_rx)
+ dev_warn(d40c->base->dev,
+ "channel has a pre-wired RX address %08x "
+ "overriding with %08x\n",
+ dev_addr_rx, config->addr);
+ if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
+ dev_warn(d40c->base->dev,
+ "channel was not configured for peripheral "
+ "to memory transfer (%d) overriding\n",
+ cfg->dir);
+ cfg->dir = STEDMA40_PERIPH_TO_MEM;
+ } else if (config->direction == DMA_TO_DEVICE) {
+ dma_addr_t dev_addr_tx =
+ d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
+
+ if (dev_addr_tx)
+ dev_warn(d40c->base->dev,
+ "channel has a pre-wired TX address %08x "
+ "overriding with %08x\n",
+ dev_addr_tx, config->addr);
+ if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
+ dev_warn(d40c->base->dev,
+ "channel was not configured for memory "
+ "to peripheral transfer (%d) overriding\n",
+ cfg->dir);
+ cfg->dir = STEDMA40_MEM_TO_PERIPH;
+ } else {
+ dev_err(d40c->base->dev,
+ "unrecognized channel direction %d\n",
+ config->direction);
+ return;
+ }
+
+ /* Set up all the endpoint configs */
+ cfg->src_info.data_width = addr_width;
+ cfg->src_info.psize = psize;
+ cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
+ cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+ cfg->dst_info.data_width = addr_width;
+ cfg->dst_info.psize = psize;
+ cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
+ cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+
+ /* These settings will take precedence later */
+ d40c->amba_addr = config->addr;
+ d40c->amba_direction = config->direction;
+ dev_dbg(d40c->base->dev,
+ "configured channel %s for %s, data width %d, "
+ "maxburst %d bytes, LE, no flow control\n",
+ dma_chan_name(chan),
+ (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ config->addr_width,
+ config->maxburst);
+}
+
+/* Initialization functions */
+
+static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
+ struct d40_chan *chans, int offset,
+ int num_chans)
+{
+ int i = 0;
+ struct d40_chan *d40c;
+
+ INIT_LIST_HEAD(&dma->channels);
+
+ for (i = offset; i < offset + num_chans; i++) {
+ d40c = &chans[i];
+ d40c->base = base;
+ d40c->chan.device = dma;
+
+ /* Invalidate lcla element */
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
+
+ spin_lock_init(&d40c->lock);
+
+ d40c->log_num = D40_PHY_CHAN;
+
+ INIT_LIST_HEAD(&d40c->free);
+ INIT_LIST_HEAD(&d40c->active);
+ INIT_LIST_HEAD(&d40c->queue);
+ INIT_LIST_HEAD(&d40c->client);
+
+ d40c->free_len = 0;
+
+ tasklet_init(&d40c->tasklet, dma_tasklet,
+ (unsigned long) d40c);
+
+ list_add_tail(&d40c->chan.device_node,
+ &dma->channels);
+ }
+}
+
+static int __init d40_dmaengine_init(struct d40_base *base,
+ int num_reserved_chans)
+{
+ int err ;
+
+ d40_chan_init(base, &base->dma_slave, base->log_chans,
+ 0, base->num_log_chans);
+
+ dma_cap_zero(base->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+
+ base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_slave.device_tx_status = d40_tx_status;
+ base->dma_slave.device_issue_pending = d40_issue_pending;
+ base->dma_slave.device_control = d40_control;
+ base->dma_slave.dev = base->dev;
+
+ err = dma_async_device_register(&base->dma_slave);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to register slave channels\n",
+ __func__);
+ goto failure1;
+ }
+
+ d40_chan_init(base, &base->dma_memcpy, base->log_chans,
+ base->num_log_chans, base->plat_data->memcpy_len);
+
+ dma_cap_zero(base->dma_memcpy.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+
+ base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_memcpy.device_tx_status = d40_tx_status;
+ base->dma_memcpy.device_issue_pending = d40_issue_pending;
+ base->dma_memcpy.device_control = d40_control;
+ base->dma_memcpy.dev = base->dev;
+ /*
+ * This controller can only access address at even
+ * 32bit boundaries, i.e. 2^2
+ */
+ base->dma_memcpy.copy_align = 2;
+
+ err = dma_async_device_register(&base->dma_memcpy);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to regsiter memcpy only channels\n",
+ __func__);
+ goto failure2;
+ }
+
+ d40_chan_init(base, &base->dma_both, base->phy_chans,
+ 0, num_reserved_chans);
+
+ dma_cap_zero(base->dma_both.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
+
+ base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_both.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_both.device_tx_status = d40_tx_status;
+ base->dma_both.device_issue_pending = d40_issue_pending;
+ base->dma_both.device_control = d40_control;
+ base->dma_both.dev = base->dev;
+ base->dma_both.copy_align = 2;
+ err = dma_async_device_register(&base->dma_both);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to register logical and physical capable channels\n",
+ __func__);
+ goto failure3;
+ }
+ return 0;
+failure3:
+ dma_async_device_unregister(&base->dma_memcpy);
+failure2:
+ dma_async_device_unregister(&base->dma_slave);
+failure1:
+ return err;
+}
+
+/* Initialization functions. */
+
+static int __init d40_phy_res_init(struct d40_base *base)
+{
+ int i;
+ int num_phy_chans_avail = 0;
+ u32 val[2];
+ int odd_even_bit = -2;
+
+ val[0] = readl(base->virtbase + D40_DREG_PRSME);
+ val[1] = readl(base->virtbase + D40_DREG_PRSMO);
+
+ for (i = 0; i < base->num_phy_chans; i++) {
+ base->phy_res[i].num = i;
+ odd_even_bit += 2 * ((i % 2) == 0);
+ if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
+ /* Mark security only channels as occupied */
+ base->phy_res[i].allocated_src = D40_ALLOC_PHY;
+ base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ } else {
+ base->phy_res[i].allocated_src = D40_ALLOC_FREE;
+ base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ num_phy_chans_avail++;
+ }
+ spin_lock_init(&base->phy_res[i].lock);
+ }
+ dev_info(base->dev, "%d of %d physical DMA channels available\n",
+ num_phy_chans_avail, base->num_phy_chans);
+
+ /* Verify settings extended vs standard */
+ val[0] = readl(base->virtbase + D40_DREG_PRTYP);
+
+ for (i = 0; i < base->num_phy_chans; i++) {
+
+ if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
+ (val[0] & 0x3) != 1)
+ dev_info(base->dev,
+ "[%s] INFO: channel %d is misconfigured (%d)\n",
+ __func__, i, val[0] & 0x3);
+
+ val[0] = val[0] >> 2;
+ }
+
+ return num_phy_chans_avail;
+}
+
+static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
+{
+ static const struct d40_reg_val dma_id_regs[] = {
+ /* Peripheral Id */
+ { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
+ { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
+ /*
+ * D40_DREG_PERIPHID2 Depends on HW revision:
+ * MOP500/HREF ED has 0x0008,
+ * ? has 0x0018,
+ * HREF V1 has 0x0028
+ */
+ { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
+
+ /* PCell Id */
+ { .reg = D40_DREG_CELLID0, .val = 0x000d},
+ { .reg = D40_DREG_CELLID1, .val = 0x00f0},
+ { .reg = D40_DREG_CELLID2, .val = 0x0005},
+ { .reg = D40_DREG_CELLID3, .val = 0x00b1}
+ };
+ struct stedma40_platform_data *plat_data;
+ struct clk *clk = NULL;
+ void __iomem *virtbase = NULL;
+ struct resource *res = NULL;
+ struct d40_base *base = NULL;
+ int num_log_chans = 0;
+ int num_phy_chans;
+ int i;
+
+ clk = clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "[%s] No matching clock found\n",
+ __func__);
+ goto failure;
+ }
+
+ clk_enable(clk);
+
+ /* Get IO for DMAC base address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ if (!res)
+ goto failure;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O base") == NULL)
+ goto failure;
+
+ virtbase = ioremap(res->start, resource_size(res));
+ if (!virtbase)
+ goto failure;
+
+ /* HW version check */
+ for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
+ if (dma_id_regs[i].val !=
+ readl(virtbase + dma_id_regs[i].reg)) {
+ dev_err(&pdev->dev,
+ "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
+ __func__,
+ dma_id_regs[i].val,
+ dma_id_regs[i].reg,
+ readl(virtbase + dma_id_regs[i].reg));
+ goto failure;
+ }
+ }
+
+ i = readl(virtbase + D40_DREG_PERIPHID2);
+
+ if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
+ dev_err(&pdev->dev,
+ "[%s] Unknown designer! Got %x wanted %x\n",
+ __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
+ goto failure;
+ }
+
+ /* The number of physical channels on this HW */
+ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
+
+ dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
+ (i >> 4) & 0xf, res->start);
+
+ plat_data = pdev->dev.platform_data;
+
+ /* Count the number of logical channels in use */
+ for (i = 0; i < plat_data->dev_len; i++)
+ if (plat_data->dev_rx[i] != 0)
+ num_log_chans++;
+
+ for (i = 0; i < plat_data->dev_len; i++)
+ if (plat_data->dev_tx[i] != 0)
+ num_log_chans++;
+
+ base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
+ (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
+ sizeof(struct d40_chan), GFP_KERNEL);
+
+ if (base == NULL) {
+ dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
+ goto failure;
+ }
+
+ base->clk = clk;
+ base->num_phy_chans = num_phy_chans;
+ base->num_log_chans = num_log_chans;
+ base->phy_start = res->start;
+ base->phy_size = resource_size(res);
+ base->virtbase = virtbase;
+ base->plat_data = plat_data;
+ base->dev = &pdev->dev;
+ base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
+ base->log_chans = &base->phy_chans[num_phy_chans];
+
+ base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
+ GFP_KERNEL);
+ if (!base->phy_res)
+ goto failure;
+
+ base->lookup_phy_chans = kzalloc(num_phy_chans *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_phy_chans)
+ goto failure;
+
+ if (num_log_chans + plat_data->memcpy_len) {
+ /*
+ * The max number of logical channels are event lines for all
+ * src devices and dst devices
+ */
+ base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_log_chans)
+ goto failure;
+ }
+ base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
+ GFP_KERNEL);
+ if (!base->lcla_pool.alloc_map)
+ goto failure;
+
+ return base;
+
+failure:
+ if (clk) {
+ clk_disable(clk);
+ clk_put(clk);
+ }
+ if (virtbase)
+ iounmap(virtbase);
+ if (res)
+ release_mem_region(res->start,
+ resource_size(res));
+ if (virtbase)
+ iounmap(virtbase);
+
+ if (base) {
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+ }
+
+ return NULL;
+}
+
+static void __init d40_hw_init(struct d40_base *base)
+{
+
+ static const struct d40_reg_val dma_init_reg[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
+ };
+ int i;
+ u32 prmseo[2] = {0, 0};
+ u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
+ u32 pcmis = 0;
+ u32 pcicr = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
+ writel(dma_init_reg[i].val,
+ base->virtbase + dma_init_reg[i].reg);
+
+ /* Configure all our dma channels to default settings */
+ for (i = 0; i < base->num_phy_chans; i++) {
+
+ activeo[i % 2] = activeo[i % 2] << 2;
+
+ if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
+ == D40_ALLOC_PHY) {
+ activeo[i % 2] |= 3;
+ continue;
+ }
+
+ /* Enable interrupt # */
+ pcmis = (pcmis << 1) | 1;
+
+ /* Clear interrupt # */
+ pcicr = (pcicr << 1) | 1;
+
+ /* Set channel to physical mode */
+ prmseo[i % 2] = prmseo[i % 2] << 2;
+ prmseo[i % 2] |= 1;
+
+ }
+
+ writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
+ writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
+ writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
+ writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
+
+ /* Write which interrupt to enable */
+ writel(pcmis, base->virtbase + D40_DREG_PCMIS);
+
+ /* Write which interrupt to clear */
+ writel(pcicr, base->virtbase + D40_DREG_PCICR);
+
+}
+
+static int __init d40_probe(struct platform_device *pdev)
+{
+ int err;
+ int ret = -ENOENT;
+ struct d40_base *base;
+ struct resource *res = NULL;
+ int num_reserved_chans;
+ u32 val;
+
+ base = d40_hw_detect_init(pdev);
+
+ if (!base)
+ goto failure;
+
+ num_reserved_chans = d40_phy_res_init(base);
+
+ platform_set_drvdata(pdev, base);
+
+ spin_lock_init(&base->interrupt_lock);
+ spin_lock_init(&base->execmd_lock);
+
+ /* Get IO for logical channel parameter address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
+ if (!res) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev,
+ "[%s] No \"lcpa\" memory resource\n",
+ __func__);
+ goto failure;
+ }
+ base->lcpa_size = resource_size(res);
+ base->phy_lcpa = res->start;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O lcpa") == NULL) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "[%s] Failed to request LCPA region 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+
+ /* We make use of ESRAM memory for this. */
+ val = readl(base->virtbase + D40_DREG_LCPA);
+ if (res->start != val && val != 0) {
+ dev_warn(&pdev->dev,
+ "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
+ __func__, val, res->start);
+ } else
+ writel(res->start, base->virtbase + D40_DREG_LCPA);
+
+ base->lcpa_base = ioremap(res->start, resource_size(res));
+ if (!base->lcpa_base) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "[%s] Failed to ioremap LCPA region\n",
+ __func__);
+ goto failure;
+ }
+ /* Get IO for logical channel link address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
+ if (!res) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev,
+ "[%s] No \"lcla\" resource defined\n",
+ __func__);
+ goto failure;
+ }
+
+ base->lcla_pool.base_size = resource_size(res);
+ base->lcla_pool.phy = res->start;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O lcla") == NULL) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "[%s] Failed to request LCLA region 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+ val = readl(base->virtbase + D40_DREG_LCLA);
+ if (res->start != val && val != 0) {
+ dev_warn(&pdev->dev,
+ "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
+ __func__, val, res->start);
+ } else
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
+
+ base->lcla_pool.base = ioremap(res->start, resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+
+ spin_lock_init(&base->lcla_pool.lock);
+
+ base->lcla_pool.num_blocks = base->num_phy_chans;
+
+ base->irq = platform_get_irq(pdev, 0);
+
+ ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
+
+ if (ret) {
+ dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
+ goto failure;
+ }
+
+ err = d40_dmaengine_init(base, num_reserved_chans);
+ if (err)
+ goto failure;
+
+ d40_hw_init(base);
+
+ dev_info(base->dev, "initialized\n");
+ return 0;
+
+failure:
+ if (base) {
+ if (base->virtbase)
+ iounmap(base->virtbase);
+ if (base->lcla_pool.phy)
+ release_mem_region(base->lcla_pool.phy,
+ base->lcla_pool.base_size);
+ if (base->phy_lcpa)
+ release_mem_region(base->phy_lcpa,
+ base->lcpa_size);
+ if (base->phy_start)
+ release_mem_region(base->phy_start,
+ base->phy_size);
+ if (base->clk) {
+ clk_disable(base->clk);
+ clk_put(base->clk);
+ }
+
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+ }
+
+ dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
+ return ret;
+}
+
+static struct platform_driver d40_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = D40_NAME,
+ },
+};
+
+int __init stedma40_init(void)
+{
+ return platform_driver_probe(&d40_driver, d40_probe);
+}
+arch_initcall(stedma40_init);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
new file mode 100644
index 000000000000..561fdd8a80c1
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.c
@@ -0,0 +1,454 @@
+/*
+ * driver/dma/ste_dma40_ll.c
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <plat/ste_dma40.h>
+
+#include "ste_dma40_ll.h"
+
+/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
+void d40_log_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *lcsp1, u32 *lcsp3)
+{
+ u32 l3 = 0; /* dst */
+ u32 l1 = 0; /* src */
+
+ /* src is mem? -> increase address pos */
+ if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
+ cfg->dir == STEDMA40_MEM_TO_MEM)
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
+
+ /* dst is mem? -> increase address pos */
+ if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
+ cfg->dir == STEDMA40_MEM_TO_MEM)
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
+
+ /* src is hw? -> master port 1 */
+ if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
+ cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
+
+ /* dst is hw? -> master port 1 */
+ if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
+ cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
+
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
+ l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
+ l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
+ l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
+
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
+ l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
+ l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
+
+ *lcsp1 = l1;
+ *lcsp3 = l3;
+
+}
+
+/* Sets up SRC and DST CFG register for both logical and physical channels */
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *src_cfg, u32 *dst_cfg, bool is_log)
+{
+ u32 src = 0;
+ u32 dst = 0;
+
+ if (!is_log) {
+ /* Physical channel */
+ if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
+ (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ /* Set master port to 1 */
+ src |= 1 << D40_SREG_CFG_MST_POS;
+ src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
+
+ if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ src |= 1 << D40_SREG_CFG_PHY_TM_POS;
+ else
+ src |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
+ (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ /* Set master port to 1 */
+ dst |= 1 << D40_SREG_CFG_MST_POS;
+ dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
+
+ if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
+ else
+ dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ /* Interrupt on end of transfer for destination */
+ dst |= 1 << D40_SREG_CFG_TIM_POS;
+
+ /* Generate interrupt on error */
+ src |= 1 << D40_SREG_CFG_EIM_POS;
+ dst |= 1 << D40_SREG_CFG_EIM_POS;
+
+ /* PSIZE */
+ if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
+ src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+ if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
+ dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ /* Element size */
+ src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+
+ } else {
+ /* Logical channel */
+ dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ }
+
+ if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
+ src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
+ dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
+
+ *src_cfg = src;
+ *dst_cfg = dst;
+}
+
+int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device)
+{
+ int num_elems;
+
+ if (psize == STEDMA40_PSIZE_PHY_1)
+ num_elems = 1;
+ else
+ num_elems = 2 << psize;
+
+ /*
+ * Size is 16bit. data_width is 8, 16, 32 or 64 bit
+ * Block large than 64 KiB must be split.
+ */
+ if (data_size > (0xffff << data_width))
+ return -EINVAL;
+
+ /* Must be aligned */
+ if (!IS_ALIGNED(data, 0x1 << data_width))
+ return -EINVAL;
+
+ /* Transfer size can't be smaller than (num_elms * elem_size) */
+ if (data_size < num_elems * (0x1 << data_width))
+ return -EINVAL;
+
+ /* The number of elements. IE now many chunks */
+ lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
+
+ /*
+ * Distance to next element sized entry.
+ * Usually the size of the element unless you want gaps.
+ */
+ if (!is_device)
+ lli->reg_elt |= (0x1 << data_width) <<
+ D40_SREG_ELEM_PHY_EIDX_POS;
+
+ /* Where the data is */
+ lli->reg_ptr = data;
+ lli->reg_cfg = reg_cfg;
+
+ /* If this scatter list entry is the last one, no next link */
+ if (next_lli == 0)
+ lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
+ else
+ lli->reg_lnk = next_lli;
+
+ /* Set/clear interrupt generation on this link item.*/
+ if (term_int)
+ lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
+ else
+ lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
+
+ /* Post link */
+ lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
+
+ return 0;
+}
+
+int d40_phy_sg_to_lli(struct scatterlist *sg,
+ int sg_len,
+ dma_addr_t target,
+ struct d40_phy_lli *lli,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ u32 data_width,
+ int psize,
+ bool term_int)
+{
+ int total_size = 0;
+ int i;
+ struct scatterlist *current_sg = sg;
+ dma_addr_t next_lli_phys;
+ dma_addr_t dst;
+ int err = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+
+ total_size += sg_dma_len(current_sg);
+
+ /* If this scatter list entry is the last one, no next link */
+ if (sg_len - 1 == i)
+ next_lli_phys = 0;
+ else
+ next_lli_phys = ALIGN(lli_phys + (i + 1) *
+ sizeof(struct d40_phy_lli),
+ D40_LLI_ALIGN);
+
+ if (target)
+ dst = target;
+ else
+ dst = sg_phys(current_sg);
+
+ err = d40_phy_fill_lli(&lli[i],
+ dst,
+ sg_dma_len(current_sg),
+ psize,
+ next_lli_phys,
+ reg_cfg,
+ !next_lli_phys,
+ data_width,
+ target == dst);
+ if (err)
+ goto err;
+ }
+
+ return total_size;
+ err:
+ return err;
+}
+
+
+void d40_phy_lli_write(void __iomem *virtbase,
+ u32 phy_chan_num,
+ struct d40_phy_lli *lli_dst,
+ struct d40_phy_lli *lli_src)
+{
+
+ writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
+ writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+ writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
+ writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
+
+ writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
+ writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
+ writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
+ writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
+
+}
+
+/* DMA logical lli operations */
+
+void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 lli_next_off, u32 reg_cfg,
+ u32 data_width,
+ bool term_int, bool addr_inc)
+{
+ lli->lcsp13 = reg_cfg;
+
+ /* The number of elements to transfer */
+ lli->lcsp02 = ((data_size >> data_width) <<
+ D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
+ /* 16 LSBs address of the current element */
+ lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
+ /* 16 MSBs address of the current element */
+ lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
+
+ if (addr_inc)
+ lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
+
+ lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
+ /* If this scatter list entry is the last one, no next link */
+ lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
+ D40_MEM_LCSP1_SLOS_MASK;
+
+ if (term_int)
+ lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
+ else
+ lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
+}
+
+int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli_bidir *lli,
+ struct d40_def_lcsp *lcsp,
+ u32 src_data_width,
+ u32 dst_data_width,
+ enum dma_data_direction direction,
+ bool term_int, dma_addr_t dev_addr, int max_len,
+ int llis_per_log)
+{
+ int total_size = 0;
+ struct scatterlist *current_sg = sg;
+ int i;
+ u32 next_lli_off_dst;
+ u32 next_lli_off_src;
+
+ next_lli_off_src = 0;
+ next_lli_off_dst = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+ total_size += sg_dma_len(current_sg);
+
+ /*
+ * If this scatter list entry is the last one or
+ * max length, terminate link.
+ */
+ if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
+ next_lli_off_src = 0;
+ next_lli_off_dst = 0;
+ } else {
+ if (next_lli_off_dst == 0 &&
+ next_lli_off_src == 0) {
+ /* The first lli will be at next_lli_off */
+ next_lli_off_dst = (lcla->dst_id *
+ llis_per_log + 1);
+ next_lli_off_src = (lcla->src_id *
+ llis_per_log + 1);
+ } else {
+ next_lli_off_dst++;
+ next_lli_off_src++;
+ }
+ }
+
+ if (direction == DMA_TO_DEVICE) {
+ d40_log_fill_lli(&lli->src[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off_src,
+ lcsp->lcsp1, src_data_width,
+ term_int && !next_lli_off_src,
+ true);
+ d40_log_fill_lli(&lli->dst[i],
+ dev_addr,
+ sg_dma_len(current_sg),
+ next_lli_off_dst,
+ lcsp->lcsp3, dst_data_width,
+ /* No next == terminal interrupt */
+ term_int && !next_lli_off_dst,
+ false);
+ } else {
+ d40_log_fill_lli(&lli->dst[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off_dst,
+ lcsp->lcsp3, dst_data_width,
+ /* No next == terminal interrupt */
+ term_int && !next_lli_off_dst,
+ true);
+ d40_log_fill_lli(&lli->src[i],
+ dev_addr,
+ sg_dma_len(current_sg),
+ next_lli_off_src,
+ lcsp->lcsp1, src_data_width,
+ term_int && !next_lli_off_src,
+ false);
+ }
+ }
+ return total_size;
+}
+
+int d40_log_sg_to_lli(int lcla_id,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli *lli_sg,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width,
+ bool term_int, int max_len, int llis_per_log)
+{
+ int total_size = 0;
+ struct scatterlist *current_sg = sg;
+ int i;
+ u32 next_lli_off = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+ total_size += sg_dma_len(current_sg);
+
+ /*
+ * If this scatter list entry is the last one or
+ * max length, terminate link.
+ */
+ if (sg_len - 1 == i || ((i+1) % max_len == 0))
+ next_lli_off = 0;
+ else {
+ if (next_lli_off == 0)
+ /* The first lli will be at next_lli_off */
+ next_lli_off = lcla_id * llis_per_log + 1;
+ else
+ next_lli_off++;
+ }
+
+ d40_log_fill_lli(&lli_sg[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off,
+ lcsp13, data_width,
+ term_int && !next_lli_off,
+ true);
+ }
+ return total_size;
+}
+
+void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log)
+{
+ u32 slos = 0;
+ u32 dlos = 0;
+ int i;
+
+ lcpa->lcsp0 = lli_src->lcsp02;
+ lcpa->lcsp1 = lli_src->lcsp13;
+ lcpa->lcsp2 = lli_dst->lcsp02;
+ lcpa->lcsp3 = lli_dst->lcsp13;
+
+ slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+
+ for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
+ writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02);
+ writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13);
+ writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02);
+ writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13);
+
+ slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+ }
+}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
new file mode 100644
index 000000000000..2029280cb332
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.h
@@ -0,0 +1,354 @@
+/*
+ * driver/dma/ste_dma40_ll.h
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+#ifndef STE_DMA40_LL_H
+#define STE_DMA40_LL_H
+
+#define D40_DREG_PCBASE 0x400
+#define D40_DREG_PCDELTA (8 * 4)
+#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
+
+#define D40_TYPE_TO_GROUP(type) (type / 16)
+#define D40_TYPE_TO_EVENT(type) (type % 16)
+
+/* Most bits of the CFG register are the same in log as in phy mode */
+#define D40_SREG_CFG_MST_POS 15
+#define D40_SREG_CFG_TIM_POS 14
+#define D40_SREG_CFG_EIM_POS 13
+#define D40_SREG_CFG_LOG_INCR_POS 12
+#define D40_SREG_CFG_PHY_PEN_POS 12
+#define D40_SREG_CFG_PSIZE_POS 10
+#define D40_SREG_CFG_ESIZE_POS 8
+#define D40_SREG_CFG_PRI_POS 7
+#define D40_SREG_CFG_LBE_POS 6
+#define D40_SREG_CFG_LOG_GIM_POS 5
+#define D40_SREG_CFG_LOG_MFU_POS 4
+#define D40_SREG_CFG_PHY_TM_POS 4
+#define D40_SREG_CFG_PHY_EVTL_POS 0
+
+
+/* Standard channel parameters - basic mode (element register) */
+#define D40_SREG_ELEM_PHY_ECNT_POS 16
+#define D40_SREG_ELEM_PHY_EIDX_POS 0
+
+#define D40_SREG_ELEM_PHY_ECNT_MASK (0xFFFF << D40_SREG_ELEM_PHY_ECNT_POS)
+
+/* Standard channel parameters - basic mode (Link register) */
+#define D40_SREG_LNK_PHY_TCP_POS 0
+#define D40_SREG_LNK_PHY_LMP_POS 1
+#define D40_SREG_LNK_PHY_PRE_POS 2
+/*
+ * Source destination link address. Contains the
+ * 29-bit byte word aligned address of the reload area.
+ */
+#define D40_SREG_LNK_PHYS_LNK_MASK 0xFFFFFFF8UL
+
+/* Standard basic channel logical mode */
+
+/* Element register */
+#define D40_SREG_ELEM_LOG_ECNT_POS 16
+#define D40_SREG_ELEM_LOG_LIDX_POS 8
+#define D40_SREG_ELEM_LOG_LOS_POS 1
+#define D40_SREG_ELEM_LOG_TCP_POS 0
+
+#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
+
+/* Link register */
+#define D40_DEACTIVATE_EVENTLINE 0x0
+#define D40_ACTIVATE_EVENTLINE 0x1
+#define D40_EVENTLINE_POS(i) (2 * i)
+#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
+
+/* Standard basic channel logical params in memory */
+
+/* LCSP0 */
+#define D40_MEM_LCSP0_ECNT_POS 16
+#define D40_MEM_LCSP0_SPTR_POS 0
+
+#define D40_MEM_LCSP0_ECNT_MASK (0xFFFF << D40_MEM_LCSP0_ECNT_POS)
+#define D40_MEM_LCSP0_SPTR_MASK (0xFFFF << D40_MEM_LCSP0_SPTR_POS)
+
+/* LCSP1 */
+#define D40_MEM_LCSP1_SPTR_POS 16
+#define D40_MEM_LCSP1_SCFG_MST_POS 15
+#define D40_MEM_LCSP1_SCFG_TIM_POS 14
+#define D40_MEM_LCSP1_SCFG_EIM_POS 13
+#define D40_MEM_LCSP1_SCFG_INCR_POS 12
+#define D40_MEM_LCSP1_SCFG_PSIZE_POS 10
+#define D40_MEM_LCSP1_SCFG_ESIZE_POS 8
+#define D40_MEM_LCSP1_SLOS_POS 1
+#define D40_MEM_LCSP1_STCP_POS 0
+
+#define D40_MEM_LCSP1_SPTR_MASK (0xFFFF << D40_MEM_LCSP1_SPTR_POS)
+#define D40_MEM_LCSP1_SCFG_TIM_MASK (0x1 << D40_MEM_LCSP1_SCFG_TIM_POS)
+#define D40_MEM_LCSP1_SCFG_INCR_MASK (0x1 << D40_MEM_LCSP1_SCFG_INCR_POS)
+#define D40_MEM_LCSP1_SCFG_PSIZE_MASK (0x3 << D40_MEM_LCSP1_SCFG_PSIZE_POS)
+#define D40_MEM_LCSP1_SLOS_MASK (0x7F << D40_MEM_LCSP1_SLOS_POS)
+#define D40_MEM_LCSP1_STCP_MASK (0x1 << D40_MEM_LCSP1_STCP_POS)
+
+/* LCSP2 */
+#define D40_MEM_LCSP2_ECNT_POS 16
+
+#define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS)
+
+/* LCSP3 */
+#define D40_MEM_LCSP3_DCFG_MST_POS 15
+#define D40_MEM_LCSP3_DCFG_TIM_POS 14
+#define D40_MEM_LCSP3_DCFG_EIM_POS 13
+#define D40_MEM_LCSP3_DCFG_INCR_POS 12
+#define D40_MEM_LCSP3_DCFG_PSIZE_POS 10
+#define D40_MEM_LCSP3_DCFG_ESIZE_POS 8
+#define D40_MEM_LCSP3_DLOS_POS 1
+#define D40_MEM_LCSP3_DTCP_POS 0
+
+#define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS)
+#define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS)
+
+
+/* Standard channel parameter register offsets */
+#define D40_CHAN_REG_SSCFG 0x00
+#define D40_CHAN_REG_SSELT 0x04
+#define D40_CHAN_REG_SSPTR 0x08
+#define D40_CHAN_REG_SSLNK 0x0C
+#define D40_CHAN_REG_SDCFG 0x10
+#define D40_CHAN_REG_SDELT 0x14
+#define D40_CHAN_REG_SDPTR 0x18
+#define D40_CHAN_REG_SDLNK 0x1C
+
+/* DMA Register Offsets */
+#define D40_DREG_GCC 0x000
+#define D40_DREG_PRTYP 0x004
+#define D40_DREG_PRSME 0x008
+#define D40_DREG_PRSMO 0x00C
+#define D40_DREG_PRMSE 0x010
+#define D40_DREG_PRMSO 0x014
+#define D40_DREG_PRMOE 0x018
+#define D40_DREG_PRMOO 0x01C
+#define D40_DREG_LCPA 0x020
+#define D40_DREG_LCLA 0x024
+#define D40_DREG_ACTIVE 0x050
+#define D40_DREG_ACTIVO 0x054
+#define D40_DREG_FSEB1 0x058
+#define D40_DREG_FSEB2 0x05C
+#define D40_DREG_PCMIS 0x060
+#define D40_DREG_PCICR 0x064
+#define D40_DREG_PCTIS 0x068
+#define D40_DREG_PCEIS 0x06C
+#define D40_DREG_LCMIS0 0x080
+#define D40_DREG_LCMIS1 0x084
+#define D40_DREG_LCMIS2 0x088
+#define D40_DREG_LCMIS3 0x08C
+#define D40_DREG_LCICR0 0x090
+#define D40_DREG_LCICR1 0x094
+#define D40_DREG_LCICR2 0x098
+#define D40_DREG_LCICR3 0x09C
+#define D40_DREG_LCTIS0 0x0A0
+#define D40_DREG_LCTIS1 0x0A4
+#define D40_DREG_LCTIS2 0x0A8
+#define D40_DREG_LCTIS3 0x0AC
+#define D40_DREG_LCEIS0 0x0B0
+#define D40_DREG_LCEIS1 0x0B4
+#define D40_DREG_LCEIS2 0x0B8
+#define D40_DREG_LCEIS3 0x0BC
+#define D40_DREG_STFU 0xFC8
+#define D40_DREG_ICFG 0xFCC
+#define D40_DREG_PERIPHID0 0xFE0
+#define D40_DREG_PERIPHID1 0xFE4
+#define D40_DREG_PERIPHID2 0xFE8
+#define D40_DREG_PERIPHID3 0xFEC
+#define D40_DREG_CELLID0 0xFF0
+#define D40_DREG_CELLID1 0xFF4
+#define D40_DREG_CELLID2 0xFF8
+#define D40_DREG_CELLID3 0xFFC
+
+/* LLI related structures */
+
+/**
+ * struct d40_phy_lli - The basic configration register for each physical
+ * channel.
+ *
+ * @reg_cfg: The configuration register.
+ * @reg_elt: The element register.
+ * @reg_ptr: The pointer register.
+ * @reg_lnk: The link register.
+ *
+ * These registers are set up for both physical and logical transfers
+ * Note that the bit in each register means differently in logical and
+ * physical(standard) mode.
+ *
+ * This struct must be 16 bytes aligned, and only contain physical registers
+ * since it will be directly accessed by the DMA.
+ */
+struct d40_phy_lli {
+ u32 reg_cfg;
+ u32 reg_elt;
+ u32 reg_ptr;
+ u32 reg_lnk;
+};
+
+/**
+ * struct d40_phy_lli_bidir - struct for a transfer.
+ *
+ * @src: Register settings for src channel.
+ * @dst: Register settings for dst channel.
+ * @dst_addr: Physical destination address.
+ * @src_addr: Physical source address.
+ *
+ * All DMA transfers have a source and a destination.
+ */
+
+struct d40_phy_lli_bidir {
+ struct d40_phy_lli *src;
+ struct d40_phy_lli *dst;
+ dma_addr_t dst_addr;
+ dma_addr_t src_addr;
+};
+
+
+/**
+ * struct d40_log_lli - logical lli configuration
+ *
+ * @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
+ * @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
+ *
+ * This struct must be 8 bytes aligned since it will be accessed directy by
+ * the DMA. Never add any none hw mapped registers to this struct.
+ */
+
+struct d40_log_lli {
+ u32 lcsp02;
+ u32 lcsp13;
+};
+
+/**
+ * struct d40_log_lli_bidir - For both src and dst
+ *
+ * @src: pointer to src lli configuration.
+ * @dst: pointer to dst lli configuration.
+ *
+ * You always have a src and a dst when doing DMA transfers.
+ */
+
+struct d40_log_lli_bidir {
+ struct d40_log_lli *src;
+ struct d40_log_lli *dst;
+};
+
+/**
+ * struct d40_log_lli_full - LCPA layout
+ *
+ * @lcsp0: Logical Channel Standard Param 0 - Src.
+ * @lcsp1: Logical Channel Standard Param 1 - Src.
+ * @lcsp2: Logical Channel Standard Param 2 - Dst.
+ * @lcsp3: Logical Channel Standard Param 3 - Dst.
+ *
+ * This struct maps to LCPA physical memory layout. Must map to
+ * the hw.
+ */
+struct d40_log_lli_full {
+ u32 lcsp0;
+ u32 lcsp1;
+ u32 lcsp2;
+ u32 lcsp3;
+};
+
+/**
+ * struct d40_def_lcsp - Default LCSP1 and LCSP3 settings
+ *
+ * @lcsp3: The default configuration for dst.
+ * @lcsp1: The default configuration for src.
+ */
+struct d40_def_lcsp {
+ u32 lcsp3;
+ u32 lcsp1;
+};
+
+/**
+ * struct d40_lcla_elem - Info for one LCA element.
+ *
+ * @src_id: logical channel src id
+ * @dst_id: logical channel dst id
+ * @src: LCPA formated src parameters
+ * @dst: LCPA formated dst parameters
+ *
+ */
+struct d40_lcla_elem {
+ int src_id;
+ int dst_id;
+ struct d40_log_lli *src;
+ struct d40_log_lli *dst;
+};
+
+/* Physical channels */
+
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *src_cfg, u32 *dst_cfg, bool is_log);
+
+void d40_log_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *lcsp1, u32 *lcsp2);
+
+int d40_phy_sg_to_lli(struct scatterlist *sg,
+ int sg_len,
+ dma_addr_t target,
+ struct d40_phy_lli *lli,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ u32 data_width,
+ int psize,
+ bool term_int);
+
+int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device);
+
+void d40_phy_lli_write(void __iomem *virtbase,
+ u32 phy_chan_num,
+ struct d40_phy_lli *lli_dst,
+ struct d40_phy_lli *lli_src);
+
+/* Logical channels */
+
+void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 lli_next_off, u32 reg_cfg,
+ u32 data_width,
+ bool term_int, bool addr_inc);
+
+int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli_bidir *lli,
+ struct d40_def_lcsp *lcsp,
+ u32 src_data_width,
+ u32 dst_data_width,
+ enum dma_data_direction direction,
+ bool term_int, dma_addr_t dev_addr, int max_len,
+ int llis_per_log);
+
+void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log);
+
+int d40_log_sg_to_lli(int lcla_id,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli *lli_sg,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width,
+ bool term_int, int max_len, int llis_per_log);
+
+#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
new file mode 100644
index 000000000000..44b346d8d319
--- /dev/null
+++ b/drivers/dma/timb_dma.c
@@ -0,0 +1,859 @@
+/*
+ * timb_dma.c timberdale FPGA DMA driver
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA DMA engine
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/timb_dma.h>
+
+#define DRIVER_NAME "timb-dma"
+
+/* Global DMA registers */
+#define TIMBDMA_ACR 0x34
+#define TIMBDMA_32BIT_ADDR 0x01
+
+#define TIMBDMA_ISR 0x080000
+#define TIMBDMA_IPR 0x080004
+#define TIMBDMA_IER 0x080008
+
+/* Channel specific registers */
+/* RX instances base addresses are 0x00, 0x40, 0x80 ...
+ * TX instances base addresses are 0x18, 0x58, 0x98 ...
+ */
+#define TIMBDMA_INSTANCE_OFFSET 0x40
+#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
+
+/* RX registers, relative the instance base */
+#define TIMBDMA_OFFS_RX_DHAR 0x00
+#define TIMBDMA_OFFS_RX_DLAR 0x04
+#define TIMBDMA_OFFS_RX_LR 0x0C
+#define TIMBDMA_OFFS_RX_BLR 0x10
+#define TIMBDMA_OFFS_RX_ER 0x14
+#define TIMBDMA_RX_EN 0x01
+/* bytes per Row, video specific register
+ * which is placed after the TX registers...
+ */
+#define TIMBDMA_OFFS_RX_BPRR 0x30
+
+/* TX registers, relative the instance base */
+#define TIMBDMA_OFFS_TX_DHAR 0x00
+#define TIMBDMA_OFFS_TX_DLAR 0x04
+#define TIMBDMA_OFFS_TX_BLR 0x0C
+#define TIMBDMA_OFFS_TX_LR 0x14
+
+
+#define TIMB_DMA_DESC_SIZE 8
+
+struct timb_dma_desc {
+ struct list_head desc_node;
+ struct dma_async_tx_descriptor txd;
+ u8 *desc_list;
+ unsigned int desc_list_len;
+ bool interrupt;
+};
+
+struct timb_dma_chan {
+ struct dma_chan chan;
+ void __iomem *membase;
+ spinlock_t lock; /* Used to protect data structures,
+ especially the lists and descriptors,
+ from races between the tasklet and calls
+ from above */
+ dma_cookie_t last_completed_cookie;
+ bool ongoing;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ unsigned int bytes_per_line;
+ enum dma_data_direction direction;
+ unsigned int descs; /* Descriptors to allocate */
+ unsigned int desc_elems; /* number of elems per descriptor */
+};
+
+struct timb_dma {
+ struct dma_device dma;
+ void __iomem *membase;
+ struct tasklet_struct tasklet;
+ struct timb_dma_chan channels[0];
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+static struct device *chan2dmadev(struct dma_chan *chan)
+{
+ return chan2dev(chan)->parent->parent;
+}
+
+static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ return (struct timb_dma *)((u8 *)td_chan -
+ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
+}
+
+/* Must be called with the spinlock held */
+static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ struct timb_dma *td = tdchantotd(td_chan);
+ u32 ier;
+
+ /* enable interrupt for this channel */
+ ier = ioread32(td->membase + TIMBDMA_IER);
+ ier |= 1 << id;
+ dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
+ ier);
+ iowrite32(ier, td->membase + TIMBDMA_IER);
+}
+
+/* Should be called with the spinlock held */
+static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
+ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
+ u32 isr;
+ bool done = false;
+
+ dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
+
+ isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
+ if (isr) {
+ iowrite32(isr, td->membase + TIMBDMA_ISR);
+ done = true;
+ }
+
+ return done;
+}
+
+static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
+ bool single)
+{
+ dma_addr_t addr;
+ int len;
+
+ addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
+ dma_desc[4];
+
+ len = (dma_desc[3] << 8) | dma_desc[2];
+
+ if (single)
+ dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
+ td_chan->direction);
+ else
+ dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
+ td_chan->direction);
+}
+
+static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
+{
+ struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
+ struct timb_dma_chan, chan);
+ u8 *descs;
+
+ for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
+ __td_unmap_desc(td_chan, descs, single);
+ if (descs[0] & 0x02)
+ break;
+ }
+}
+
+static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
+ struct scatterlist *sg, bool last)
+{
+ if (sg_dma_len(sg) > USHORT_MAX) {
+ dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
+ return -EINVAL;
+ }
+
+ /* length must be word aligned */
+ if (sg_dma_len(sg) % sizeof(u32)) {
+ dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
+ sg_dma_len(sg));
+ return -EINVAL;
+ }
+
+ dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
+ dma_desc, (void *)sg_dma_address(sg));
+
+ dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
+ dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
+ dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
+ dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
+
+ dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
+ dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
+
+ dma_desc[1] = 0x00;
+ dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
+
+ return 0;
+}
+
+/* Must be called with the spinlock held */
+static void __td_start_dma(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc;
+
+ if (td_chan->ongoing) {
+ dev_err(chan2dev(&td_chan->chan),
+ "Transfer already ongoing\n");
+ return;
+ }
+
+ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
+ desc_node);
+
+ dev_dbg(chan2dev(&td_chan->chan),
+ "td_chan: %p, chan: %d, membase: %p\n",
+ td_chan, td_chan->chan.chan_id, td_chan->membase);
+
+ if (td_chan->direction == DMA_FROM_DEVICE) {
+
+ /* descriptor address */
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
+ iowrite32(td_desc->txd.phys, td_chan->membase +
+ TIMBDMA_OFFS_RX_DLAR);
+ /* Bytes per line */
+ iowrite32(td_chan->bytes_per_line, td_chan->membase +
+ TIMBDMA_OFFS_RX_BPRR);
+ /* enable RX */
+ iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
+ } else {
+ /* address high */
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
+ iowrite32(td_desc->txd.phys, td_chan->membase +
+ TIMBDMA_OFFS_TX_DLAR);
+ }
+
+ td_chan->ongoing = true;
+
+ if (td_desc->interrupt)
+ __td_enable_chan_irq(td_chan);
+}
+
+static void __td_finish(struct timb_dma_chan *td_chan)
+{
+ dma_async_tx_callback callback;
+ void *param;
+ struct dma_async_tx_descriptor *txd;
+ struct timb_dma_desc *td_desc;
+
+ /* can happen if the descriptor is canceled */
+ if (list_empty(&td_chan->active_list))
+ return;
+
+ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
+ desc_node);
+ txd = &td_desc->txd;
+
+ dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
+ txd->cookie);
+
+ /* make sure to stop the transfer */
+ if (td_chan->direction == DMA_FROM_DEVICE)
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
+/* Currently no support for stopping DMA transfers
+ else
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
+*/
+ td_chan->last_completed_cookie = txd->cookie;
+ td_chan->ongoing = false;
+
+ callback = txd->callback;
+ param = txd->callback_param;
+
+ list_move(&td_desc->desc_node, &td_chan->free_list);
+
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
+ __td_unmap_descs(td_desc,
+ txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+}
+
+static u32 __td_ier_mask(struct timb_dma *td)
+{
+ int i;
+ u32 ret = 0;
+
+ for (i = 0; i < td->dma.chancnt; i++) {
+ struct timb_dma_chan *td_chan = td->channels + i;
+ if (td_chan->ongoing) {
+ struct timb_dma_desc *td_desc =
+ list_entry(td_chan->active_list.next,
+ struct timb_dma_desc, desc_node);
+ if (td_desc->interrupt)
+ ret |= 1 << i;
+ }
+ }
+
+ return ret;
+}
+
+static void __td_start_next(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc;
+
+ BUG_ON(list_empty(&td_chan->queue));
+ BUG_ON(td_chan->ongoing);
+
+ td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
+ desc_node);
+
+ dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
+ __func__, td_desc->txd.cookie);
+
+ list_move(&td_desc->desc_node, &td_chan->active_list);
+ __td_start_dma(td_chan);
+}
+
+static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
+ txd);
+ struct timb_dma_chan *td_chan = container_of(txd->chan,
+ struct timb_dma_chan, chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&td_chan->lock);
+
+ cookie = txd->chan->cookie;
+ if (++cookie < 0)
+ cookie = 1;
+ txd->chan->cookie = cookie;
+ txd->cookie = cookie;
+
+ if (list_empty(&td_chan->active_list)) {
+ dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
+ txd->cookie);
+ list_add_tail(&td_desc->desc_node, &td_chan->active_list);
+ __td_start_dma(td_chan);
+ } else {
+ dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
+ txd->cookie);
+
+ list_add_tail(&td_desc->desc_node, &td_chan->queue);
+ }
+
+ spin_unlock_bh(&td_chan->lock);
+
+ return cookie;
+}
+
+static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
+{
+ struct dma_chan *chan = &td_chan->chan;
+ struct timb_dma_desc *td_desc;
+ int err;
+
+ td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
+ if (!td_desc) {
+ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ goto err;
+ }
+
+ td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
+
+ td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
+ if (!td_desc->desc_list) {
+ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ goto err;
+ }
+
+ dma_async_tx_descriptor_init(&td_desc->txd, chan);
+ td_desc->txd.tx_submit = td_tx_submit;
+ td_desc->txd.flags = DMA_CTRL_ACK;
+
+ td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
+ td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
+ if (err) {
+ dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
+ goto err;
+ }
+
+ return td_desc;
+err:
+ kfree(td_desc->desc_list);
+ kfree(td_desc);
+
+ return NULL;
+
+}
+
+static void td_free_desc(struct timb_dma_desc *td_desc)
+{
+ dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
+ dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
+ td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ kfree(td_desc->desc_list);
+ kfree(td_desc);
+}
+
+static void td_desc_put(struct timb_dma_chan *td_chan,
+ struct timb_dma_desc *td_desc)
+{
+ dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
+
+ spin_lock_bh(&td_chan->lock);
+ list_add(&td_desc->desc_node, &td_chan->free_list);
+ spin_unlock_bh(&td_chan->lock);
+}
+
+static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc, *_td_desc;
+ struct timb_dma_desc *ret = NULL;
+
+ spin_lock_bh(&td_chan->lock);
+ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
+ desc_node) {
+ if (async_tx_test_ack(&td_desc->txd)) {
+ list_del(&td_desc->desc_node);
+ ret = td_desc;
+ break;
+ }
+ dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
+ td_desc);
+ }
+ spin_unlock_bh(&td_chan->lock);
+
+ return ret;
+}
+
+static int td_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ int i;
+
+ dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
+
+ BUG_ON(!list_empty(&td_chan->free_list));
+ for (i = 0; i < td_chan->descs; i++) {
+ struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
+ if (!td_desc) {
+ if (i)
+ break;
+ else {
+ dev_err(chan2dev(chan),
+ "Couldnt allocate any descriptors\n");
+ return -ENOMEM;
+ }
+ }
+
+ td_desc_put(td_chan, td_desc);
+ }
+
+ spin_lock_bh(&td_chan->lock);
+ td_chan->last_completed_cookie = 1;
+ chan->cookie = 1;
+ spin_unlock_bh(&td_chan->lock);
+
+ return 0;
+}
+
+static void td_free_chan_resources(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc, *_td_desc;
+ LIST_HEAD(list);
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ /* check that all descriptors are free */
+ BUG_ON(!list_empty(&td_chan->active_list));
+ BUG_ON(!list_empty(&td_chan->queue));
+
+ spin_lock_bh(&td_chan->lock);
+ list_splice_init(&td_chan->free_list, &list);
+ spin_unlock_bh(&td_chan->lock);
+
+ list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
+ dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
+ td_desc);
+ td_free_desc(td_desc);
+ }
+}
+
+static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ last_complete = td_chan->last_completed_cookie;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ dev_dbg(chan2dev(chan),
+ "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
+ __func__, ret, last_complete, last_used);
+
+ return ret;
+}
+
+static void td_issue_pending(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+ spin_lock_bh(&td_chan->lock);
+
+ if (!list_empty(&td_chan->active_list))
+ /* transfer ongoing */
+ if (__td_dma_done_ack(td_chan))
+ __td_finish(td_chan);
+
+ if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
+ __td_start_next(td_chan);
+
+ spin_unlock_bh(&td_chan->lock);
+}
+
+static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc;
+ struct scatterlist *sg;
+ unsigned int i;
+ unsigned int desc_usage = 0;
+
+ if (!sgl || !sg_len) {
+ dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
+ return NULL;
+ }
+
+ /* even channels are for RX, odd for TX */
+ if (td_chan->direction != direction) {
+ dev_err(chan2dev(chan),
+ "Requesting channel in wrong direction\n");
+ return NULL;
+ }
+
+ td_desc = td_desc_get(td_chan);
+ if (!td_desc) {
+ dev_err(chan2dev(chan), "Not enough descriptors available\n");
+ return NULL;
+ }
+
+ td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ int err;
+ if (desc_usage > td_desc->desc_list_len) {
+ dev_err(chan2dev(chan), "No descriptor space\n");
+ return NULL;
+ }
+
+ err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
+ i == (sg_len - 1));
+ if (err) {
+ dev_err(chan2dev(chan), "Failed to update desc: %d\n",
+ err);
+ td_desc_put(td_chan, td_desc);
+ return NULL;
+ }
+ desc_usage += TIMB_DMA_DESC_SIZE;
+ }
+
+ dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
+ td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ return &td_desc->txd;
+}
+
+static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc, *_td_desc;
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ /* first the easy part, put the queue into the free list */
+ spin_lock_bh(&td_chan->lock);
+ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
+ desc_node)
+ list_move(&td_desc->desc_node, &td_chan->free_list);
+
+ /* now tear down the runnning */
+ __td_finish(td_chan);
+ spin_unlock_bh(&td_chan->lock);
+
+ return 0;
+}
+
+static void td_tasklet(unsigned long data)
+{
+ struct timb_dma *td = (struct timb_dma *)data;
+ u32 isr;
+ u32 ipr;
+ u32 ier;
+ int i;
+
+ isr = ioread32(td->membase + TIMBDMA_ISR);
+ ipr = isr & __td_ier_mask(td);
+
+ /* ack the interrupts */
+ iowrite32(ipr, td->membase + TIMBDMA_ISR);
+
+ for (i = 0; i < td->dma.chancnt; i++)
+ if (ipr & (1 << i)) {
+ struct timb_dma_chan *td_chan = td->channels + i;
+ spin_lock(&td_chan->lock);
+ __td_finish(td_chan);
+ if (!list_empty(&td_chan->queue))
+ __td_start_next(td_chan);
+ spin_unlock(&td_chan->lock);
+ }
+
+ ier = __td_ier_mask(td);
+ iowrite32(ier, td->membase + TIMBDMA_IER);
+}
+
+
+static irqreturn_t td_irq(int irq, void *devid)
+{
+ struct timb_dma *td = devid;
+ u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
+
+ if (ipr) {
+ /* disable interrupts, will be re-enabled in tasklet */
+ iowrite32(0, td->membase + TIMBDMA_IER);
+
+ tasklet_schedule(&td->tasklet);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+
+static int __devinit td_probe(struct platform_device *pdev)
+{
+ struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma *td;
+ struct resource *iomem;
+ int irq;
+ int err;
+ int i;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ DRIVER_NAME))
+ return -EBUSY;
+
+ td = kzalloc(sizeof(struct timb_dma) +
+ sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
+ if (!td) {
+ err = -ENOMEM;
+ goto err_release_region;
+ }
+
+ dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
+
+ td->membase = ioremap(iomem->start, resource_size(iomem));
+ if (!td->membase) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ /* 32bit addressing */
+ iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
+
+ /* disable and clear any interrupts */
+ iowrite32(0x0, td->membase + TIMBDMA_IER);
+ iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
+
+ tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
+
+ err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+ goto err_tasklet_kill;
+ }
+
+ td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
+ td->dma.device_free_chan_resources = td_free_chan_resources;
+ td->dma.device_tx_status = td_tx_status;
+ td->dma.device_issue_pending = td_issue_pending;
+
+ dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
+ td->dma.device_prep_slave_sg = td_prep_slave_sg;
+ td->dma.device_control = td_control;
+
+ td->dma.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&td->dma.channels);
+
+ for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
+ struct timb_dma_chan *td_chan = &td->channels[i];
+ struct timb_dma_platform_data_channel *pchan =
+ pdata->channels + i;
+
+ /* even channels are RX, odd are TX */
+ if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
+ dev_err(&pdev->dev, "Wrong channel configuration\n");
+ err = -EINVAL;
+ goto err_tasklet_kill;
+ }
+
+ td_chan->chan.device = &td->dma;
+ td_chan->chan.cookie = 1;
+ td_chan->chan.chan_id = i;
+ spin_lock_init(&td_chan->lock);
+ INIT_LIST_HEAD(&td_chan->active_list);
+ INIT_LIST_HEAD(&td_chan->queue);
+ INIT_LIST_HEAD(&td_chan->free_list);
+
+ td_chan->descs = pchan->descriptors;
+ td_chan->desc_elems = pchan->descriptor_elements;
+ td_chan->bytes_per_line = pchan->bytes_per_line;
+ td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+
+ td_chan->membase = td->membase +
+ (i / 2) * TIMBDMA_INSTANCE_OFFSET +
+ (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
+
+ dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
+ i, td_chan->membase);
+
+ list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
+ }
+
+ err = dma_async_device_register(&td->dma);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register async device\n");
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, td);
+
+ dev_dbg(&pdev->dev, "Probe result: %d\n", err);
+ return err;
+
+err_free_irq:
+ free_irq(irq, td);
+err_tasklet_kill:
+ tasklet_kill(&td->tasklet);
+ iounmap(td->membase);
+err_free_mem:
+ kfree(td);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ return err;
+
+}
+
+static int __devexit td_remove(struct platform_device *pdev)
+{
+ struct timb_dma *td = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+
+ dma_async_device_unregister(&td->dma);
+ free_irq(irq, td);
+ tasklet_kill(&td->tasklet);
+ iounmap(td->membase);
+ kfree(td);
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ platform_set_drvdata(pdev, NULL);
+
+ dev_dbg(&pdev->dev, "Removed...\n");
+ return 0;
+}
+
+static struct platform_driver td_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = td_probe,
+ .remove = __exit_p(td_remove),
+};
+
+static int __init td_init(void)
+{
+ return platform_driver_register(&td_driver);
+}
+module_init(td_init);
+
+static void __exit td_exit(void)
+{
+ platform_driver_unregister(&td_driver);
+}
+module_exit(td_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Timberdale DMA controller driver");
+MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
+MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 75fcf1ac8bb7..1275c96e4c1e 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -938,12 +938,16 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return &first->txd;
}
-static void txx9dmac_terminate_all(struct dma_chan *chan)
+static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
struct txx9dmac_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -EINVAL;
+
dev_vdbg(chan2dev(chan), "terminate_all\n");
spin_lock_bh(&dc->lock);
@@ -958,12 +962,13 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
txx9dmac_descriptor_complete(dc, desc);
+
+ return 0;
}
static enum dma_status
-txx9dmac_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
dma_cookie_t last_used;
@@ -985,10 +990,7 @@ txx9dmac_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
@@ -1153,8 +1155,8 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
dc->dma.dev = &pdev->dev;
dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
- dc->dma.device_terminate_all = txx9dmac_terminate_all;
- dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
+ dc->dma.device_control = txx9dmac_control;
+ dc->dma.device_tx_status = txx9dmac_tx_status;
dc->dma.device_issue_pending = txx9dmac_issue_pending;
if (pdata && pdata->memcpy_chan == ch) {
dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 55c9c59b3f71..aedef7941b22 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -69,6 +69,9 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'.
+config EDAC_MCE
+ bool
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE
@@ -166,6 +169,16 @@ config EDAC_I5400
Support for error detection and correction the Intel
i5400 MCH chipset (Seaburg).
+config EDAC_I7CORE
+ tristate "Intel i7 Core (Nehalem) processors"
+ depends on EDAC_MM_EDAC && PCI && X86
+ select EDAC_MCE
+ help
+ Support for error detection and correction the Intel
+ i7 Core (Nehalem) Integrated Memory Controller that exists on
+ newer processors like i7 Core, i7 Core Extreme, Xeon 35xx
+ and Xeon 55xx processors.
+
config EDAC_I82860
tristate "Intel 82860"
depends on EDAC_MM_EDAC && PCI && X86_32
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index bc5dc232a0fb..ca6b1bb24ccc 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -8,6 +8,7 @@
obj-$(CONFIG_EDAC) := edac_stub.o
obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
+obj-$(CONFIG_EDAC_MCE) += edac_mce.o
edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
edac_core-objs += edac_module.o edac_device_sysfs.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
+obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 001b2e797fb3..efca9343d26a 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -341,12 +341,30 @@ struct csrow_info {
struct channel_info *channels;
};
+struct mcidev_sysfs_group {
+ const char *name; /* group name */
+ struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
+};
+
+struct mcidev_sysfs_group_kobj {
+ struct list_head list; /* list for all instances within a mc */
+
+ struct kobject kobj; /* kobj for the group */
+
+ struct mcidev_sysfs_group *grp; /* group description table */
+ struct mem_ctl_info *mci; /* the parent */
+};
+
/* mcidev_sysfs_attribute structure
* used for driver sysfs attributes and in mem_ctl_info
* sysfs top level entries
*/
struct mcidev_sysfs_attribute {
- struct attribute attr;
+ /* It should use either attr or grp */
+ struct attribute attr;
+ struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
+
+ /* Ops for show/store values at the attribute - not used on group */
ssize_t (*show)(struct mem_ctl_info *,char *);
ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
};
@@ -424,6 +442,9 @@ struct mem_ctl_info {
/* edac sysfs device control */
struct kobject edac_mci_kobj;
+ /* list for all grp instances within a mc */
+ struct list_head grp_kobj_list;
+
/* Additional top controller level attributes, but specified
* by the low level driver.
*
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 418b65f1a1da..c200c2fd43ea 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -557,6 +557,8 @@ static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
if (mcidev_attr->show)
return mcidev_attr->show(mem_ctl_info, buffer);
@@ -569,6 +571,8 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
if (mcidev_attr->store)
return mcidev_attr->store(mem_ctl_info, buffer, count);
@@ -726,28 +730,118 @@ void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
#define EDAC_DEVICE_SYMLINK "device"
+#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
+
+/* MCI show/store functions for top most object */
+static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
+ if (mcidev_attr->show)
+ return mcidev_attr->show(mem_ctl_info, buffer);
+
+ return -EIO;
+}
+
+static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
+ if (mcidev_attr->store)
+ return mcidev_attr->store(mem_ctl_info, buffer, count);
+
+ return -EIO;
+}
+
+/* No memory to release for this kobj */
+static void edac_inst_grp_release(struct kobject *kobj)
+{
+ struct mcidev_sysfs_group_kobj *grp;
+ struct mem_ctl_info *mci;
+
+ debugf1("%s()\n", __func__);
+
+ grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
+ mci = grp->mci;
+
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+/* Intermediate show/store table */
+static struct sysfs_ops inst_grp_ops = {
+ .show = inst_grp_show,
+ .store = inst_grp_store
+};
+
+/* the kobj_type instance for a instance group */
+static struct kobj_type ktype_inst_grp = {
+ .release = edac_inst_grp_release,
+ .sysfs_ops = &inst_grp_ops,
+};
+
+
/*
* edac_create_mci_instance_attributes
- * create MC driver specific attributes at the topmost level
- * directory of this mci instance.
+ * create MC driver specific attributes bellow an specified kobj
+ * This routine calls itself recursively, in order to create an entire
+ * object tree.
*/
-static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
+static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
+ struct mcidev_sysfs_attribute *sysfs_attrib,
+ struct kobject *kobj)
{
int err;
- struct mcidev_sysfs_attribute *sysfs_attrib;
- /* point to the start of the array and iterate over it
- * adding each attribute listed to this mci instance's kobject
- */
- sysfs_attrib = mci->mc_driver_sysfs_attributes;
+ debugf1("%s()\n", __func__);
+
+ while (sysfs_attrib) {
+ if (sysfs_attrib->grp) {
+ struct mcidev_sysfs_group_kobj *grp_kobj;
+
+ grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL);
+ if (!grp_kobj)
+ return -ENOMEM;
+
+ list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
+
+ grp_kobj->grp = sysfs_attrib->grp;
+ grp_kobj->mci = mci;
+
+ debugf0("%s() grp %s, mci %p\n", __func__,
+ sysfs_attrib->grp->name, mci);
+
+ err = kobject_init_and_add(&grp_kobj->kobj,
+ &ktype_inst_grp,
+ &mci->edac_mci_kobj,
+ sysfs_attrib->grp->name);
+ if (err)
+ return err;
+
+ err = edac_create_mci_instance_attributes(mci,
+ grp_kobj->grp->mcidev_attr,
+ &grp_kobj->kobj);
+
+ if (err)
+ return err;
+ } else if (sysfs_attrib->attr.name) {
+ debugf0("%s() file %s\n", __func__,
+ sysfs_attrib->attr.name);
+
+ err = sysfs_create_file(kobj, &sysfs_attrib->attr);
+ } else
+ break;
- while (sysfs_attrib && sysfs_attrib->attr.name) {
- err = sysfs_create_file(&mci->edac_mci_kobj,
- (struct attribute*) sysfs_attrib);
if (err) {
return err;
}
-
sysfs_attrib++;
}
@@ -759,21 +853,44 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
* remove MC driver specific attributes at the topmost level
* directory of this mci instance.
*/
-static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci)
+static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
+ struct mcidev_sysfs_attribute *sysfs_attrib,
+ struct kobject *kobj, int count)
{
- struct mcidev_sysfs_attribute *sysfs_attrib;
+ struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
- /* point to the start of the array and iterate over it
- * adding each attribute listed to this mci instance's kobject
- */
- sysfs_attrib = mci->mc_driver_sysfs_attributes;
+ debugf1("%s()\n", __func__);
- /* loop if there are attributes and until we hit a NULL entry */
- while (sysfs_attrib && sysfs_attrib->attr.name) {
- sysfs_remove_file(&mci->edac_mci_kobj,
- (struct attribute *) sysfs_attrib);
+ /*
+ * loop if there are attributes and until we hit a NULL entry
+ * Remove first all the atributes
+ */
+ while (sysfs_attrib) {
+ if (sysfs_attrib->grp) {
+ list_for_each_entry(grp_kobj, &mci->grp_kobj_list,
+ list)
+ if (grp_kobj->grp == sysfs_attrib->grp)
+ edac_remove_mci_instance_attributes(mci,
+ grp_kobj->grp->mcidev_attr,
+ &grp_kobj->kobj, count + 1);
+ } else if (sysfs_attrib->attr.name) {
+ debugf0("%s() file %s\n", __func__,
+ sysfs_attrib->attr.name);
+ sysfs_remove_file(kobj, &sysfs_attrib->attr);
+ } else
+ break;
sysfs_attrib++;
}
+
+ /*
+ * Now that all attributes got removed, it is save to remove all groups
+ */
+ if (!count)
+ list_for_each_entry_safe(grp_kobj, tmp, &mci->grp_kobj_list,
+ list) {
+ debugf0("%s() grp %s\n", __func__, grp_kobj->grp->name);
+ kobject_put(&grp_kobj->kobj);
+ }
}
@@ -794,6 +911,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
+ INIT_LIST_HEAD(&mci->grp_kobj_list);
+
/* create a symlink for the device */
err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
EDAC_DEVICE_SYMLINK);
@@ -806,7 +925,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
* then create them now for the driver.
*/
if (mci->mc_driver_sysfs_attributes) {
- err = edac_create_mci_instance_attributes(mci);
+ err = edac_create_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes,
+ &mci->edac_mci_kobj);
if (err) {
debugf1("%s() failure to create mci attributes\n",
__func__);
@@ -841,7 +962,8 @@ fail1:
}
/* remove the mci instance's attributes, if any */
- edac_remove_mci_instance_attributes(mci);
+ edac_remove_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0);
/* remove the symlink */
sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
@@ -875,8 +997,9 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s() remove_mci_instance\n", __func__);
/* remove this mci instance's attribtes */
- edac_remove_mci_instance_attributes(mci);
-
+ edac_remove_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes,
+ &mci->edac_mci_kobj, 0);
debugf0("%s() unregister this mci kobj\n", __func__);
/* unregister this instance's kobject */
diff --git a/drivers/edac/edac_mce.c b/drivers/edac/edac_mce.c
new file mode 100644
index 000000000000..9ccdc5b140e7
--- /dev/null
+++ b/drivers/edac/edac_mce.c
@@ -0,0 +1,61 @@
+/* Provides edac interface to mcelog events
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2.
+ *
+ * Copyright (c) 2009 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ */
+
+#include <linux/module.h>
+#include <linux/edac_mce.h>
+#include <asm/mce.h>
+
+int edac_mce_enabled;
+EXPORT_SYMBOL_GPL(edac_mce_enabled);
+
+
+/*
+ * Extension interface
+ */
+
+static LIST_HEAD(edac_mce_list);
+static DEFINE_MUTEX(edac_mce_lock);
+
+int edac_mce_register(struct edac_mce *edac_mce)
+{
+ mutex_lock(&edac_mce_lock);
+ list_add_tail(&edac_mce->list, &edac_mce_list);
+ mutex_unlock(&edac_mce_lock);
+ return 0;
+}
+EXPORT_SYMBOL(edac_mce_register);
+
+void edac_mce_unregister(struct edac_mce *edac_mce)
+{
+ mutex_lock(&edac_mce_lock);
+ list_del(&edac_mce->list);
+ mutex_unlock(&edac_mce_lock);
+}
+EXPORT_SYMBOL(edac_mce_unregister);
+
+int edac_mce_parse(struct mce *mce)
+{
+ struct edac_mce *edac_mce;
+
+ list_for_each_entry(edac_mce, &edac_mce_list, list) {
+ if (edac_mce->check_error(edac_mce->priv, mce))
+ return 1;
+ }
+
+ /* Nobody queued the error */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(edac_mce_parse);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("EDAC Driver for mcelog captured errors");
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
new file mode 100644
index 000000000000..4d6ecf1291b8
--- /dev/null
+++ b/drivers/edac/i7core_edac.c
@@ -0,0 +1,2021 @@
+/* Intel 7 core Memory Controller kernel module (Nehalem)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2 only.
+ *
+ * Copyright (c) 2009 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ *
+ * Forked and adapted from the i5400_edac driver
+ *
+ * Based on the following public Intel datasheets:
+ * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
+ * Datasheet, Volume 2:
+ * http://download.intel.com/design/processor/datashts/320835.pdf
+ * Intel Xeon Processor 5500 Series Datasheet Volume 2
+ * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
+ * also available at:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/edac_mce.h>
+#include <linux/smp.h>
+#include <asm/processor.h>
+
+#include "edac_core.h"
+
+/*
+ * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
+ * registers start at bus 255, and are not reported by BIOS.
+ * We currently find devices with only 2 sockets. In order to support more QPI
+ * Quick Path Interconnect, just increment this number.
+ */
+#define MAX_SOCKET_BUSES 2
+
+
+/*
+ * Alter this version for the module when modifications are made
+ */
+#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
+#define EDAC_MOD_STR "i7core_edac"
+
+/*
+ * Debug macros
+ */
+#define i7core_printk(level, fmt, arg...) \
+ edac_printk(level, "i7core", fmt, ##arg)
+
+#define i7core_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
+
+/*
+ * i7core Memory Controller Registers
+ */
+
+ /* OFFSETS for Device 0 Function 0 */
+
+#define MC_CFG_CONTROL 0x90
+
+ /* OFFSETS for Device 3 Function 0 */
+
+#define MC_CONTROL 0x48
+#define MC_STATUS 0x4c
+#define MC_MAX_DOD 0x64
+
+/*
+ * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+
+#define MC_TEST_ERR_RCV1 0x60
+ #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
+
+#define MC_TEST_ERR_RCV0 0x64
+ #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
+ #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
+
+/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
+#define MC_COR_ECC_CNT_0 0x80
+#define MC_COR_ECC_CNT_1 0x84
+#define MC_COR_ECC_CNT_2 0x88
+#define MC_COR_ECC_CNT_3 0x8c
+#define MC_COR_ECC_CNT_4 0x90
+#define MC_COR_ECC_CNT_5 0x94
+
+#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
+#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
+
+
+ /* OFFSETS for Devices 4,5 and 6 Function 0 */
+
+#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
+ #define THREE_DIMMS_PRESENT (1 << 24)
+ #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
+ #define QUAD_RANK_PRESENT (1 << 22)
+ #define REGISTERED_DIMM (1 << 15)
+
+#define MC_CHANNEL_MAPPER 0x60
+ #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
+ #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
+
+#define MC_CHANNEL_RANK_PRESENT 0x7c
+ #define RANK_PRESENT_MASK 0xffff
+
+#define MC_CHANNEL_ADDR_MATCH 0xf0
+#define MC_CHANNEL_ERROR_MASK 0xf8
+#define MC_CHANNEL_ERROR_INJECT 0xfc
+ #define INJECT_ADDR_PARITY 0x10
+ #define INJECT_ECC 0x08
+ #define MASK_CACHELINE 0x06
+ #define MASK_FULL_CACHELINE 0x06
+ #define MASK_MSB32_CACHELINE 0x04
+ #define MASK_LSB32_CACHELINE 0x02
+ #define NO_MASK_CACHELINE 0x00
+ #define REPEAT_EN 0x01
+
+ /* OFFSETS for Devices 4,5 and 6 Function 1 */
+
+#define MC_DOD_CH_DIMM0 0x48
+#define MC_DOD_CH_DIMM1 0x4c
+#define MC_DOD_CH_DIMM2 0x50
+ #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
+ #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
+ #define DIMM_PRESENT_MASK (1 << 9)
+ #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
+ #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
+ #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
+ #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
+ #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
+ #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
+ #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
+ #define MC_DOD_NUMCOL_MASK 3
+ #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
+
+#define MC_RANK_PRESENT 0x7c
+
+#define MC_SAG_CH_0 0x80
+#define MC_SAG_CH_1 0x84
+#define MC_SAG_CH_2 0x88
+#define MC_SAG_CH_3 0x8c
+#define MC_SAG_CH_4 0x90
+#define MC_SAG_CH_5 0x94
+#define MC_SAG_CH_6 0x98
+#define MC_SAG_CH_7 0x9c
+
+#define MC_RIR_LIMIT_CH_0 0x40
+#define MC_RIR_LIMIT_CH_1 0x44
+#define MC_RIR_LIMIT_CH_2 0x48
+#define MC_RIR_LIMIT_CH_3 0x4C
+#define MC_RIR_LIMIT_CH_4 0x50
+#define MC_RIR_LIMIT_CH_5 0x54
+#define MC_RIR_LIMIT_CH_6 0x58
+#define MC_RIR_LIMIT_CH_7 0x5C
+#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
+
+#define MC_RIR_WAY_CH 0x80
+ #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
+ #define MC_RIR_WAY_RANK_MASK 0x7
+
+/*
+ * i7core structs
+ */
+
+#define NUM_CHANS 3
+#define MAX_DIMMS 3 /* Max DIMMS per channel */
+#define MAX_MCR_FUNC 4
+#define MAX_CHAN_FUNC 3
+
+struct i7core_info {
+ u32 mc_control;
+ u32 mc_status;
+ u32 max_dod;
+ u32 ch_map;
+};
+
+
+struct i7core_inject {
+ int enable;
+
+ u32 section;
+ u32 type;
+ u32 eccmask;
+
+ /* Error address mask */
+ int channel, dimm, rank, bank, page, col;
+};
+
+struct i7core_channel {
+ u32 ranks;
+ u32 dimms;
+};
+
+struct pci_id_descr {
+ int dev;
+ int func;
+ int dev_id;
+ int optional;
+};
+
+struct i7core_dev {
+ struct list_head list;
+ u8 socket;
+ struct pci_dev **pdev;
+ int n_devs;
+ struct mem_ctl_info *mci;
+};
+
+struct i7core_pvt {
+ struct pci_dev *pci_noncore;
+ struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
+ struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
+
+ struct i7core_dev *i7core_dev;
+
+ struct i7core_info info;
+ struct i7core_inject inject;
+ struct i7core_channel channel[NUM_CHANS];
+
+ int channels; /* Number of active channels */
+
+ int ce_count_available;
+ int csrow_map[NUM_CHANS][MAX_DIMMS];
+
+ /* ECC corrected errors counts per udimm */
+ unsigned long udimm_ce_count[MAX_DIMMS];
+ int udimm_last_ce_count[MAX_DIMMS];
+ /* ECC corrected errors counts per rdimm */
+ unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
+ int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
+
+ unsigned int is_registered;
+
+ /* mcelog glue */
+ struct edac_mce edac_mce;
+
+ /* Fifo double buffers */
+ struct mce mce_entry[MCE_LOG_LEN];
+ struct mce mce_outentry[MCE_LOG_LEN];
+
+ /* Fifo in/out counters */
+ unsigned mce_in, mce_out;
+
+ /* Count indicator to show errors not got */
+ unsigned mce_overrun;
+};
+
+/* Static vars */
+static LIST_HEAD(i7core_edac_list);
+static DEFINE_MUTEX(i7core_edac_lock);
+
+#define PCI_DESCR(device, function, device_id) \
+ .dev = (device), \
+ .func = (function), \
+ .dev_id = (device_id)
+
+struct pci_id_descr pci_dev_descr_i7core[] = {
+ /* Memory controller */
+ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
+ { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
+ /* Exists only for RDIMM */
+ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
+ { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
+
+ /* Channel 0 */
+ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
+ { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
+ { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
+ { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
+
+ /* Channel 1 */
+ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
+ { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
+ { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
+ { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
+
+ /* Channel 2 */
+ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
+ { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
+ { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
+ { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
+
+ /* Generic Non-core registers */
+ /*
+ * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
+ * On Xeon 55xx, however, it has a different id (8086:2c40). So,
+ * the probing code needs to test for the other address in case of
+ * failure of this one
+ */
+ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
+
+};
+
+struct pci_id_descr pci_dev_descr_lynnfield[] = {
+ { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
+ { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
+ { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
+
+ { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
+ { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
+ { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
+ { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
+
+ { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
+ { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
+ { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
+ { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
+
+ /*
+ * This is the PCI device has an alternate address on some
+ * processors like Core i7 860
+ */
+ { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
+};
+
+/*
+ * pci_device_id table for which devices we are looking for
+ */
+static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
+ {0,} /* 0 terminated list. */
+};
+
+static struct edac_pci_ctl_info *i7core_pci;
+
+/****************************************************************************
+ Anciliary status routines
+ ****************************************************************************/
+
+ /* MC_CONTROL bits */
+#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
+#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
+
+ /* MC_STATUS bits */
+#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
+#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
+
+ /* MC_MAX_DOD read functions */
+static inline int numdimms(u32 dimms)
+{
+ return (dimms & 0x3) + 1;
+}
+
+static inline int numrank(u32 rank)
+{
+ static int ranks[4] = { 1, 2, 4, -EINVAL };
+
+ return ranks[rank & 0x3];
+}
+
+static inline int numbank(u32 bank)
+{
+ static int banks[4] = { 4, 8, 16, -EINVAL };
+
+ return banks[bank & 0x3];
+}
+
+static inline int numrow(u32 row)
+{
+ static int rows[8] = {
+ 1 << 12, 1 << 13, 1 << 14, 1 << 15,
+ 1 << 16, -EINVAL, -EINVAL, -EINVAL,
+ };
+
+ return rows[row & 0x7];
+}
+
+static inline int numcol(u32 col)
+{
+ static int cols[8] = {
+ 1 << 10, 1 << 11, 1 << 12, -EINVAL,
+ };
+ return cols[col & 0x3];
+}
+
+static struct i7core_dev *get_i7core_dev(u8 socket)
+{
+ struct i7core_dev *i7core_dev;
+
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
+ if (i7core_dev->socket == socket)
+ return i7core_dev;
+ }
+
+ return NULL;
+}
+
+/****************************************************************************
+ Memory check routines
+ ****************************************************************************/
+static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
+ unsigned func)
+{
+ struct i7core_dev *i7core_dev = get_i7core_dev(socket);
+ int i;
+
+ if (!i7core_dev)
+ return NULL;
+
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ if (!i7core_dev->pdev[i])
+ continue;
+
+ if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
+ PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
+ return i7core_dev->pdev[i];
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * i7core_get_active_channels() - gets the number of channels and csrows
+ * @socket: Quick Path Interconnect socket
+ * @channels: Number of channels that will be returned
+ * @csrows: Number of csrows found
+ *
+ * Since EDAC core needs to know in advance the number of available channels
+ * and csrows, in order to allocate memory for csrows/channels, it is needed
+ * to run two similar steps. At the first step, implemented on this function,
+ * it checks the number of csrows/channels present at one socket.
+ * this is used in order to properly allocate the size of mci components.
+ *
+ * It should be noticed that none of the current available datasheets explain
+ * or even mention how csrows are seen by the memory controller. So, we need
+ * to add a fake description for csrows.
+ * So, this driver is attributing one DIMM memory for one csrow.
+ */
+static int i7core_get_active_channels(u8 socket, unsigned *channels,
+ unsigned *csrows)
+{
+ struct pci_dev *pdev = NULL;
+ int i, j;
+ u32 status, control;
+
+ *channels = 0;
+ *csrows = 0;
+
+ pdev = get_pdev_slot_func(socket, 3, 0);
+ if (!pdev) {
+ i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
+ socket);
+ return -ENODEV;
+ }
+
+ /* Device 3 function 0 reads */
+ pci_read_config_dword(pdev, MC_STATUS, &status);
+ pci_read_config_dword(pdev, MC_CONTROL, &control);
+
+ for (i = 0; i < NUM_CHANS; i++) {
+ u32 dimm_dod[3];
+ /* Check if the channel is active */
+ if (!(control & (1 << (8 + i))))
+ continue;
+
+ /* Check if the channel is disabled */
+ if (status & (1 << i))
+ continue;
+
+ pdev = get_pdev_slot_func(socket, i + 4, 1);
+ if (!pdev) {
+ i7core_printk(KERN_ERR, "Couldn't find socket %d "
+ "fn %d.%d!!!\n",
+ socket, i + 4, 1);
+ return -ENODEV;
+ }
+ /* Devices 4-6 function 1 */
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM0, &dimm_dod[0]);
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM1, &dimm_dod[1]);
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM2, &dimm_dod[2]);
+
+ (*channels)++;
+
+ for (j = 0; j < 3; j++) {
+ if (!DIMM_PRESENT(dimm_dod[j]))
+ continue;
+ (*csrows)++;
+ }
+ }
+
+ debugf0("Number of active channels on socket %d: %d\n",
+ socket, *channels);
+
+ return 0;
+}
+
+static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct csrow_info *csr;
+ struct pci_dev *pdev;
+ int i, j;
+ unsigned long last_page = 0;
+ enum edac_type mode;
+ enum mem_type mtype;
+
+ /* Get data from the MC register, function 0 */
+ pdev = pvt->pci_mcr[0];
+ if (!pdev)
+ return -ENODEV;
+
+ /* Device 3 function 0 reads */
+ pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
+ pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
+ pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
+ pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
+
+ debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
+ pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
+ pvt->info.max_dod, pvt->info.ch_map);
+
+ if (ECC_ENABLED(pvt)) {
+ debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
+ if (ECCx8(pvt))
+ mode = EDAC_S8ECD8ED;
+ else
+ mode = EDAC_S4ECD4ED;
+ } else {
+ debugf0("ECC disabled\n");
+ mode = EDAC_NONE;
+ }
+
+ /* FIXME: need to handle the error codes */
+ debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
+ "x%x x 0x%x\n",
+ numdimms(pvt->info.max_dod),
+ numrank(pvt->info.max_dod >> 2),
+ numbank(pvt->info.max_dod >> 4),
+ numrow(pvt->info.max_dod >> 6),
+ numcol(pvt->info.max_dod >> 9));
+
+ for (i = 0; i < NUM_CHANS; i++) {
+ u32 data, dimm_dod[3], value[8];
+
+ if (!pvt->pci_ch[i][0])
+ continue;
+
+ if (!CH_ACTIVE(pvt, i)) {
+ debugf0("Channel %i is not active\n", i);
+ continue;
+ }
+ if (CH_DISABLED(pvt, i)) {
+ debugf0("Channel %i is disabled\n", i);
+ continue;
+ }
+
+ /* Devices 4-6 function 0 */
+ pci_read_config_dword(pvt->pci_ch[i][0],
+ MC_CHANNEL_DIMM_INIT_PARAMS, &data);
+
+ pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
+ 4 : 2;
+
+ if (data & REGISTERED_DIMM)
+ mtype = MEM_RDDR3;
+ else
+ mtype = MEM_DDR3;
+#if 0
+ if (data & THREE_DIMMS_PRESENT)
+ pvt->channel[i].dimms = 3;
+ else if (data & SINGLE_QUAD_RANK_PRESENT)
+ pvt->channel[i].dimms = 1;
+ else
+ pvt->channel[i].dimms = 2;
+#endif
+
+ /* Devices 4-6 function 1 */
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM0, &dimm_dod[0]);
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM1, &dimm_dod[1]);
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM2, &dimm_dod[2]);
+
+ debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
+ "%d ranks, %cDIMMs\n",
+ i,
+ RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
+ data,
+ pvt->channel[i].ranks,
+ (data & REGISTERED_DIMM) ? 'R' : 'U');
+
+ for (j = 0; j < 3; j++) {
+ u32 banks, ranks, rows, cols;
+ u32 size, npages;
+
+ if (!DIMM_PRESENT(dimm_dod[j]))
+ continue;
+
+ banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
+ ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
+ rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
+ cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
+
+ /* DDR3 has 8 I/O banks */
+ size = (rows * cols * banks * ranks) >> (20 - 3);
+
+ pvt->channel[i].dimms++;
+
+ debugf0("\tdimm %d %d Mb offset: %x, "
+ "bank: %d, rank: %d, row: %#x, col: %#x\n",
+ j, size,
+ RANKOFFSET(dimm_dod[j]),
+ banks, ranks, rows, cols);
+
+#if PAGE_SHIFT > 20
+ npages = size >> (PAGE_SHIFT - 20);
+#else
+ npages = size << (20 - PAGE_SHIFT);
+#endif
+
+ csr = &mci->csrows[*csrow];
+ csr->first_page = last_page + 1;
+ last_page += npages;
+ csr->last_page = last_page;
+ csr->nr_pages = npages;
+
+ csr->page_mask = 0;
+ csr->grain = 8;
+ csr->csrow_idx = *csrow;
+ csr->nr_channels = 1;
+
+ csr->channels[0].chan_idx = i;
+ csr->channels[0].ce_count = 0;
+
+ pvt->csrow_map[i][j] = *csrow;
+
+ switch (banks) {
+ case 4:
+ csr->dtype = DEV_X4;
+ break;
+ case 8:
+ csr->dtype = DEV_X8;
+ break;
+ case 16:
+ csr->dtype = DEV_X16;
+ break;
+ default:
+ csr->dtype = DEV_UNKNOWN;
+ }
+
+ csr->edac_mode = mode;
+ csr->mtype = mtype;
+
+ (*csrow)++;
+ }
+
+ pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
+ pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
+ pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
+ pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
+ pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
+ pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
+ pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
+ pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
+ debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
+ for (j = 0; j < 8; j++)
+ debugf1("\t\t%#x\t%#x\t%#x\n",
+ (value[j] >> 27) & 0x1,
+ (value[j] >> 24) & 0x7,
+ (value[j] && ((1 << 24) - 1)));
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+ Error insertion routines
+ ****************************************************************************/
+
+/* The i7core has independent error injection features per channel.
+ However, to have a simpler code, we don't allow enabling error injection
+ on more than one channel.
+ Also, since a change at an inject parameter will be applied only at enable,
+ we're disabling error injection on all write calls to the sysfs nodes that
+ controls the error code injection.
+ */
+static int disable_inject(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ pvt->inject.enable = 0;
+
+ if (!pvt->pci_ch[pvt->inject.channel][0])
+ return -ENODEV;
+
+ pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, 0);
+
+ return 0;
+}
+
+/*
+ * i7core inject inject.section
+ *
+ * accept and store error injection inject.section value
+ * bit 0 - refers to the lower 32-byte half cacheline
+ * bit 1 - refers to the upper 32-byte half cacheline
+ */
+static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if ((rc < 0) || (value > 3))
+ return -EIO;
+
+ pvt->inject.section = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.section);
+}
+
+/*
+ * i7core inject.type
+ *
+ * accept and store error injection inject.section value
+ * bit 0 - repeat enable - Enable error repetition
+ * bit 1 - inject ECC error
+ * bit 2 - inject parity error
+ */
+static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if ((rc < 0) || (value > 7))
+ return -EIO;
+
+ pvt->inject.type = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.type);
+}
+
+/*
+ * i7core_inject_inject.eccmask_store
+ *
+ * The type of error (UE/CE) will depend on the inject.eccmask value:
+ * Any bits set to a 1 will flip the corresponding ECC bit
+ * Correctable errors can be injected by flipping 1 bit or the bits within
+ * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
+ * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
+ * uncorrectable error to be injected.
+ */
+static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if (rc < 0)
+ return -EIO;
+
+ pvt->inject.eccmask = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
+}
+
+/*
+ * i7core_addrmatch
+ *
+ * The type of error (UE/CE) will depend on the inject.eccmask value:
+ * Any bits set to a 1 will flip the corresponding ECC bit
+ * Correctable errors can be injected by flipping 1 bit or the bits within
+ * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
+ * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
+ * uncorrectable error to be injected.
+ */
+
+#define DECLARE_ADDR_MATCH(param, limit) \
+static ssize_t i7core_inject_store_##param( \
+ struct mem_ctl_info *mci, \
+ const char *data, size_t count) \
+{ \
+ struct i7core_pvt *pvt; \
+ long value; \
+ int rc; \
+ \
+ debugf1("%s()\n", __func__); \
+ pvt = mci->pvt_info; \
+ \
+ if (pvt->inject.enable) \
+ disable_inject(mci); \
+ \
+ if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
+ value = -1; \
+ else { \
+ rc = strict_strtoul(data, 10, &value); \
+ if ((rc < 0) || (value >= limit)) \
+ return -EIO; \
+ } \
+ \
+ pvt->inject.param = value; \
+ \
+ return count; \
+} \
+ \
+static ssize_t i7core_inject_show_##param( \
+ struct mem_ctl_info *mci, \
+ char *data) \
+{ \
+ struct i7core_pvt *pvt; \
+ \
+ pvt = mci->pvt_info; \
+ debugf1("%s() pvt=%p\n", __func__, pvt); \
+ if (pvt->inject.param < 0) \
+ return sprintf(data, "any\n"); \
+ else \
+ return sprintf(data, "%d\n", pvt->inject.param);\
+}
+
+#define ATTR_ADDR_MATCH(param) \
+ { \
+ .attr = { \
+ .name = #param, \
+ .mode = (S_IRUGO | S_IWUSR) \
+ }, \
+ .show = i7core_inject_show_##param, \
+ .store = i7core_inject_store_##param, \
+ }
+
+DECLARE_ADDR_MATCH(channel, 3);
+DECLARE_ADDR_MATCH(dimm, 3);
+DECLARE_ADDR_MATCH(rank, 4);
+DECLARE_ADDR_MATCH(bank, 32);
+DECLARE_ADDR_MATCH(page, 0x10000);
+DECLARE_ADDR_MATCH(col, 0x4000);
+
+static int write_and_test(struct pci_dev *dev, int where, u32 val)
+{
+ u32 read;
+ int count;
+
+ debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ where, val);
+
+ for (count = 0; count < 10; count++) {
+ if (count)
+ msleep(100);
+ pci_write_config_dword(dev, where, val);
+ pci_read_config_dword(dev, where, &read);
+
+ if (read == val)
+ return 0;
+ }
+
+ i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
+ "write=%08x. Read=%08x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ where, val, read);
+
+ return -EINVAL;
+}
+
+/*
+ * This routine prepares the Memory Controller for error injection.
+ * The error will be injected when some process tries to write to the
+ * memory that matches the given criteria.
+ * The criteria can be set in terms of a mask where dimm, rank, bank, page
+ * and col can be specified.
+ * A -1 value for any of the mask items will make the MCU to ignore
+ * that matching criteria for error injection.
+ *
+ * It should be noticed that the error will only happen after a write operation
+ * on a memory that matches the condition. if REPEAT_EN is not enabled at
+ * inject mask, then it will produce just one error. Otherwise, it will repeat
+ * until the injectmask would be cleaned.
+ *
+ * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
+ * is reliable enough to check if the MC is using the
+ * three channels. However, this is not clear at the datasheet.
+ */
+static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 injectmask;
+ u64 mask = 0;
+ int rc;
+ long enable;
+
+ if (!pvt->pci_ch[pvt->inject.channel][0])
+ return 0;
+
+ rc = strict_strtoul(data, 10, &enable);
+ if ((rc < 0))
+ return 0;
+
+ if (enable) {
+ pvt->inject.enable = 1;
+ } else {
+ disable_inject(mci);
+ return count;
+ }
+
+ /* Sets pvt->inject.dimm mask */
+ if (pvt->inject.dimm < 0)
+ mask |= 1LL << 41;
+ else {
+ if (pvt->channel[pvt->inject.channel].dimms > 2)
+ mask |= (pvt->inject.dimm & 0x3LL) << 35;
+ else
+ mask |= (pvt->inject.dimm & 0x1LL) << 36;
+ }
+
+ /* Sets pvt->inject.rank mask */
+ if (pvt->inject.rank < 0)
+ mask |= 1LL << 40;
+ else {
+ if (pvt->channel[pvt->inject.channel].dimms > 2)
+ mask |= (pvt->inject.rank & 0x1LL) << 34;
+ else
+ mask |= (pvt->inject.rank & 0x3LL) << 34;
+ }
+
+ /* Sets pvt->inject.bank mask */
+ if (pvt->inject.bank < 0)
+ mask |= 1LL << 39;
+ else
+ mask |= (pvt->inject.bank & 0x15LL) << 30;
+
+ /* Sets pvt->inject.page mask */
+ if (pvt->inject.page < 0)
+ mask |= 1LL << 38;
+ else
+ mask |= (pvt->inject.page & 0xffff) << 14;
+
+ /* Sets pvt->inject.column mask */
+ if (pvt->inject.col < 0)
+ mask |= 1LL << 37;
+ else
+ mask |= (pvt->inject.col & 0x3fff);
+
+ /*
+ * bit 0: REPEAT_EN
+ * bits 1-2: MASK_HALF_CACHELINE
+ * bit 3: INJECT_ECC
+ * bit 4: INJECT_ADDR_PARITY
+ */
+
+ injectmask = (pvt->inject.type & 1) |
+ (pvt->inject.section & 0x3) << 1 |
+ (pvt->inject.type & 0x6) << (3 - 1);
+
+ /* Unlock writes to registers - this register is write only */
+ pci_write_config_dword(pvt->pci_noncore,
+ MC_CFG_CONTROL, 0x2);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ADDR_MATCH, mask);
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, injectmask);
+
+ /*
+ * This is something undocumented, based on my tests
+ * Without writing 8 to this register, errors aren't injected. Not sure
+ * why.
+ */
+ pci_write_config_dword(pvt->pci_noncore,
+ MC_CFG_CONTROL, 8);
+
+ debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
+ " inject 0x%08x\n",
+ mask, pvt->inject.eccmask, injectmask);
+
+
+ return count;
+}
+
+static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 injectmask;
+
+ if (!pvt->pci_ch[pvt->inject.channel][0])
+ return 0;
+
+ pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, &injectmask);
+
+ debugf0("Inject error read: 0x%018x\n", injectmask);
+
+ if (injectmask & 0x0c)
+ pvt->inject.enable = 1;
+
+ return sprintf(data, "%d\n", pvt->inject.enable);
+}
+
+#define DECLARE_COUNTER(param) \
+static ssize_t i7core_show_counter_##param( \
+ struct mem_ctl_info *mci, \
+ char *data) \
+{ \
+ struct i7core_pvt *pvt = mci->pvt_info; \
+ \
+ debugf1("%s() \n", __func__); \
+ if (!pvt->ce_count_available || (pvt->is_registered)) \
+ return sprintf(data, "data unavailable\n"); \
+ return sprintf(data, "%lu\n", \
+ pvt->udimm_ce_count[param]); \
+}
+
+#define ATTR_COUNTER(param) \
+ { \
+ .attr = { \
+ .name = __stringify(udimm##param), \
+ .mode = (S_IRUGO | S_IWUSR) \
+ }, \
+ .show = i7core_show_counter_##param \
+ }
+
+DECLARE_COUNTER(0);
+DECLARE_COUNTER(1);
+DECLARE_COUNTER(2);
+
+/*
+ * Sysfs struct
+ */
+
+
+static struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
+ ATTR_ADDR_MATCH(channel),
+ ATTR_ADDR_MATCH(dimm),
+ ATTR_ADDR_MATCH(rank),
+ ATTR_ADDR_MATCH(bank),
+ ATTR_ADDR_MATCH(page),
+ ATTR_ADDR_MATCH(col),
+ { .attr = { .name = NULL } }
+};
+
+static struct mcidev_sysfs_group i7core_inject_addrmatch = {
+ .name = "inject_addrmatch",
+ .mcidev_attr = i7core_addrmatch_attrs,
+};
+
+static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
+ ATTR_COUNTER(0),
+ ATTR_COUNTER(1),
+ ATTR_COUNTER(2),
+};
+
+static struct mcidev_sysfs_group i7core_udimm_counters = {
+ .name = "all_channel_counts",
+ .mcidev_attr = i7core_udimm_counters_attrs,
+};
+
+static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
+ {
+ .attr = {
+ .name = "inject_section",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_section_show,
+ .store = i7core_inject_section_store,
+ }, {
+ .attr = {
+ .name = "inject_type",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_type_show,
+ .store = i7core_inject_type_store,
+ }, {
+ .attr = {
+ .name = "inject_eccmask",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_eccmask_show,
+ .store = i7core_inject_eccmask_store,
+ }, {
+ .grp = &i7core_inject_addrmatch,
+ }, {
+ .attr = {
+ .name = "inject_enable",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_enable_show,
+ .store = i7core_inject_enable_store,
+ },
+ { .attr = { .name = NULL } }, /* Reserved for udimm counters */
+ { .attr = { .name = NULL } }
+};
+
+/****************************************************************************
+ Device initialization routines: put/get, init/exit
+ ****************************************************************************/
+
+/*
+ * i7core_put_devices 'put' all the devices that we have
+ * reserved via 'get'
+ */
+static void i7core_put_devices(struct i7core_dev *i7core_dev)
+{
+ int i;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ struct pci_dev *pdev = i7core_dev->pdev[i];
+ if (!pdev)
+ continue;
+ debugf0("Removing dev %02x:%02x.%d\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ pci_dev_put(pdev);
+ }
+ kfree(i7core_dev->pdev);
+ list_del(&i7core_dev->list);
+ kfree(i7core_dev);
+}
+
+static void i7core_put_all_devices(void)
+{
+ struct i7core_dev *i7core_dev, *tmp;
+
+ list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list)
+ i7core_put_devices(i7core_dev);
+}
+
+static void i7core_xeon_pci_fixup(int dev_id)
+{
+ struct pci_dev *pdev = NULL;
+ int i;
+ /*
+ * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
+ * aren't announced by acpi. So, we need to use a legacy scan probing
+ * to detect them
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+ if (unlikely(!pdev)) {
+ for (i = 0; i < MAX_SOCKET_BUSES; i++)
+ pcibios_scan_specific_bus(255-i);
+ }
+}
+
+/*
+ * i7core_get_devices Find and perform 'get' operation on the MCH's
+ * device/functions we want to reference for this driver
+ *
+ * Need to 'get' device 16 func 1 and func 2
+ */
+int i7core_get_onedevice(struct pci_dev **prev, int devno,
+ struct pci_id_descr *dev_descr, unsigned n_devs)
+{
+ struct i7core_dev *i7core_dev;
+
+ struct pci_dev *pdev = NULL;
+ u8 bus = 0;
+ u8 socket = 0;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ dev_descr->dev_id, *prev);
+
+ /*
+ * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
+ * is at addr 8086:2c40, instead of 8086:2c41. So, we need
+ * to probe for the alternate address in case of failure
+ */
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
+
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
+ *prev);
+
+ if (!pdev) {
+ if (*prev) {
+ *prev = pdev;
+ return 0;
+ }
+
+ if (dev_descr->optional)
+ return 0;
+
+ i7core_printk(KERN_ERR,
+ "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
+ dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ /* End of list, leave */
+ return -ENODEV;
+ }
+ bus = pdev->bus->number;
+
+ if (bus == 0x3f)
+ socket = 0;
+ else
+ socket = 255 - bus;
+
+ i7core_dev = get_i7core_dev(socket);
+ if (!i7core_dev) {
+ i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
+ if (!i7core_dev)
+ return -ENOMEM;
+ i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * n_devs,
+ GFP_KERNEL);
+ if (!i7core_dev->pdev)
+ return -ENOMEM;
+ i7core_dev->socket = socket;
+ i7core_dev->n_devs = n_devs;
+ list_add_tail(&i7core_dev->list, &i7core_edac_list);
+ }
+
+ if (i7core_dev->pdev[devno]) {
+ i7core_printk(KERN_ERR,
+ "Duplicated device for "
+ "dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ pci_dev_put(pdev);
+ return -ENODEV;
+ }
+
+ i7core_dev->pdev[devno] = pdev;
+
+ /* Sanity check */
+ if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
+ PCI_FUNC(pdev->devfn) != dev_descr->func)) {
+ i7core_printk(KERN_ERR,
+ "Device PCI ID %04x:%04x "
+ "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
+ bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ bus, dev_descr->dev, dev_descr->func);
+ return -ENODEV;
+ }
+
+ /* Be sure that the device is enabled */
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ i7core_printk(KERN_ERR,
+ "Couldn't enable "
+ "dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ return -ENODEV;
+ }
+
+ debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ socket, bus, dev_descr->dev,
+ dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ *prev = pdev;
+
+ return 0;
+}
+
+static int i7core_get_devices(struct pci_id_descr dev_descr[], unsigned n_devs)
+{
+ int i, rc;
+ struct pci_dev *pdev = NULL;
+
+ for (i = 0; i < n_devs; i++) {
+ pdev = NULL;
+ do {
+ rc = i7core_get_onedevice(&pdev, i, &dev_descr[i],
+ n_devs);
+ if (rc < 0) {
+ i7core_put_all_devices();
+ return -ENODEV;
+ }
+ } while (pdev);
+ }
+
+ return 0;
+}
+
+static int mci_bind_devs(struct mem_ctl_info *mci,
+ struct i7core_dev *i7core_dev)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ int i, func, slot;
+
+ /* Associates i7core_dev and mci for future usage */
+ pvt->i7core_dev = i7core_dev;
+ i7core_dev->mci = mci;
+
+ pvt->is_registered = 0;
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ pdev = i7core_dev->pdev[i];
+ if (!pdev)
+ continue;
+
+ func = PCI_FUNC(pdev->devfn);
+ slot = PCI_SLOT(pdev->devfn);
+ if (slot == 3) {
+ if (unlikely(func > MAX_MCR_FUNC))
+ goto error;
+ pvt->pci_mcr[func] = pdev;
+ } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
+ if (unlikely(func > MAX_CHAN_FUNC))
+ goto error;
+ pvt->pci_ch[slot - 4][func] = pdev;
+ } else if (!slot && !func)
+ pvt->pci_noncore = pdev;
+ else
+ goto error;
+
+ debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev, i7core_dev->socket);
+
+ if (PCI_SLOT(pdev->devfn) == 3 &&
+ PCI_FUNC(pdev->devfn) == 2)
+ pvt->is_registered = 1;
+ }
+
+ /*
+ * Add extra nodes to count errors on udimm
+ * For registered memory, this is not needed, since the counters
+ * are already displayed at the standard locations
+ */
+ if (!pvt->is_registered)
+ i7core_sysfs_attrs[ARRAY_SIZE(i7core_sysfs_attrs)-2].grp =
+ &i7core_udimm_counters;
+
+ return 0;
+
+error:
+ i7core_printk(KERN_ERR, "Device %d, function %d "
+ "is out of the expected range\n",
+ slot, func);
+ return -EINVAL;
+}
+
+/****************************************************************************
+ Error check routines
+ ****************************************************************************/
+static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+ int chan, int dimm, int add)
+{
+ char *msg;
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int row = pvt->csrow_map[chan][dimm], i;
+
+ for (i = 0; i < add; i++) {
+ msg = kasprintf(GFP_KERNEL, "Corrected error "
+ "(Socket=%d channel=%d dimm=%d)",
+ pvt->i7core_dev->socket, chan, dimm);
+
+ edac_mc_handle_fbd_ce(mci, row, 0, msg);
+ kfree (msg);
+ }
+}
+
+static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
+ int chan, int new0, int new1, int new2)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int add0 = 0, add1 = 0, add2 = 0;
+ /* Updates CE counters if it is not the first time here */
+ if (pvt->ce_count_available) {
+ /* Updates CE counters */
+
+ add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
+ add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
+ add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
+
+ if (add2 < 0)
+ add2 += 0x7fff;
+ pvt->rdimm_ce_count[chan][2] += add2;
+
+ if (add1 < 0)
+ add1 += 0x7fff;
+ pvt->rdimm_ce_count[chan][1] += add1;
+
+ if (add0 < 0)
+ add0 += 0x7fff;
+ pvt->rdimm_ce_count[chan][0] += add0;
+ } else
+ pvt->ce_count_available = 1;
+
+ /* Store the new values */
+ pvt->rdimm_last_ce_count[chan][2] = new2;
+ pvt->rdimm_last_ce_count[chan][1] = new1;
+ pvt->rdimm_last_ce_count[chan][0] = new0;
+
+ /*updated the edac core */
+ if (add0 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 0, add0);
+ if (add1 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 1, add1);
+ if (add2 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 2, add2);
+
+}
+
+static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 rcv[3][2];
+ int i, new0, new1, new2;
+
+ /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
+ &rcv[0][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
+ &rcv[0][1]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
+ &rcv[1][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
+ &rcv[1][1]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
+ &rcv[2][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
+ &rcv[2][1]);
+ for (i = 0 ; i < 3; i++) {
+ debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
+ (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
+ /*if the channel has 3 dimms*/
+ if (pvt->channel[i].dimms > 2) {
+ new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
+ new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
+ new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
+ } else {
+ new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
+ DIMM_BOT_COR_ERR(rcv[i][0]);
+ new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
+ DIMM_BOT_COR_ERR(rcv[i][1]);
+ new2 = 0;
+ }
+
+ i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
+ }
+}
+
+/* This function is based on the device 3 function 4 registers as described on:
+ * Intel Xeon Processor 5500 Series Datasheet Volume 2
+ * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
+ * also available at:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 rcv1, rcv0;
+ int new0, new1, new2;
+
+ if (!pvt->pci_mcr[4]) {
+ debugf0("%s MCR registers not found\n", __func__);
+ return;
+ }
+
+ /* Corrected test errors */
+ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
+ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
+
+ /* Store the new values */
+ new2 = DIMM2_COR_ERR(rcv1);
+ new1 = DIMM1_COR_ERR(rcv0);
+ new0 = DIMM0_COR_ERR(rcv0);
+
+ /* Updates CE counters if it is not the first time here */
+ if (pvt->ce_count_available) {
+ /* Updates CE counters */
+ int add0, add1, add2;
+
+ add2 = new2 - pvt->udimm_last_ce_count[2];
+ add1 = new1 - pvt->udimm_last_ce_count[1];
+ add0 = new0 - pvt->udimm_last_ce_count[0];
+
+ if (add2 < 0)
+ add2 += 0x7fff;
+ pvt->udimm_ce_count[2] += add2;
+
+ if (add1 < 0)
+ add1 += 0x7fff;
+ pvt->udimm_ce_count[1] += add1;
+
+ if (add0 < 0)
+ add0 += 0x7fff;
+ pvt->udimm_ce_count[0] += add0;
+
+ if (add0 | add1 | add2)
+ i7core_printk(KERN_ERR, "New Corrected error(s): "
+ "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
+ add0, add1, add2);
+ } else
+ pvt->ce_count_available = 1;
+
+ /* Store the new values */
+ pvt->udimm_last_ce_count[2] = new2;
+ pvt->udimm_last_ce_count[1] = new1;
+ pvt->udimm_last_ce_count[0] = new0;
+}
+
+/*
+ * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
+ * Architectures Software Developer’s Manual Volume 3B.
+ * Nehalem are defined as family 0x06, model 0x1a
+ *
+ * The MCA registers used here are the following ones:
+ * struct mce field MCA Register
+ * m->status MSR_IA32_MC8_STATUS
+ * m->addr MSR_IA32_MC8_ADDR
+ * m->misc MSR_IA32_MC8_MISC
+ * In the case of Nehalem, the error information is masked at .status and .misc
+ * fields
+ */
+static void i7core_mce_output_error(struct mem_ctl_info *mci,
+ struct mce *m)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ char *type, *optype, *err, *msg;
+ unsigned long error = m->status & 0x1ff0000l;
+ u32 optypenum = (m->status >> 4) & 0x07;
+ u32 core_err_cnt = (m->status >> 38) && 0x7fff;
+ u32 dimm = (m->misc >> 16) & 0x3;
+ u32 channel = (m->misc >> 18) & 0x3;
+ u32 syndrome = m->misc >> 32;
+ u32 errnum = find_first_bit(&error, 32);
+ int csrow;
+
+ if (m->mcgstatus & 1)
+ type = "FATAL";
+ else
+ type = "NON_FATAL";
+
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request";
+ break;
+ case 1:
+ optype = "read error";
+ break;
+ case 2:
+ optype = "write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+
+ switch (errnum) {
+ case 16:
+ err = "read ECC error";
+ break;
+ case 17:
+ err = "RAS ECC error";
+ break;
+ case 18:
+ err = "write parity error";
+ break;
+ case 19:
+ err = "redundacy loss";
+ break;
+ case 20:
+ err = "reserved";
+ break;
+ case 21:
+ err = "memory range error";
+ break;
+ case 22:
+ err = "RTID out of range";
+ break;
+ case 23:
+ err = "address parity error";
+ break;
+ case 24:
+ err = "byte enable parity error";
+ break;
+ default:
+ err = "unknown";
+ }
+
+ /* FIXME: should convert addr into bank and rank information */
+ msg = kasprintf(GFP_ATOMIC,
+ "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
+ "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
+ type, (long long) m->addr, m->cpu, dimm, channel,
+ syndrome, core_err_cnt, (long long)m->status,
+ (long long)m->misc, optype, err);
+
+ debugf0("%s", msg);
+
+ csrow = pvt->csrow_map[channel][dimm];
+
+ /* Call the helper to output message */
+ if (m->mcgstatus & 1)
+ edac_mc_handle_fbd_ue(mci, csrow, 0,
+ 0 /* FIXME: should be channel here */, msg);
+ else if (!pvt->is_registered)
+ edac_mc_handle_fbd_ce(mci, csrow,
+ 0 /* FIXME: should be channel here */, msg);
+
+ kfree(msg);
+}
+
+/*
+ * i7core_check_error Retrieve and process errors reported by the
+ * hardware. Called by the Core module.
+ */
+static void i7core_check_error(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int i;
+ unsigned count = 0;
+ struct mce *m;
+
+ /*
+ * MCE first step: Copy all mce errors into a temporary buffer
+ * We use a double buffering here, to reduce the risk of
+ * loosing an error.
+ */
+ smp_rmb();
+ count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
+ % MCE_LOG_LEN;
+ if (!count)
+ return;
+
+ m = pvt->mce_outentry;
+ if (pvt->mce_in + count > MCE_LOG_LEN) {
+ unsigned l = MCE_LOG_LEN - pvt->mce_in;
+
+ memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
+ smp_wmb();
+ pvt->mce_in = 0;
+ count -= l;
+ m += l;
+ }
+ memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
+ smp_wmb();
+ pvt->mce_in += count;
+
+ smp_rmb();
+ if (pvt->mce_overrun) {
+ i7core_printk(KERN_ERR, "Lost %d memory errors\n",
+ pvt->mce_overrun);
+ smp_wmb();
+ pvt->mce_overrun = 0;
+ }
+
+ /*
+ * MCE second step: parse errors and display
+ */
+ for (i = 0; i < count; i++)
+ i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
+
+ /*
+ * Now, let's increment CE error counts
+ */
+ if (!pvt->is_registered)
+ i7core_udimm_check_mc_ecc_err(mci);
+ else
+ i7core_rdimm_check_mc_ecc_err(mci);
+}
+
+/*
+ * i7core_mce_check_error Replicates mcelog routine to get errors
+ * This routine simply queues mcelog errors, and
+ * return. The error itself should be handled later
+ * by i7core_check_error.
+ * WARNING: As this routine should be called at NMI time, extra care should
+ * be taken to avoid deadlocks, and to be as fast as possible.
+ */
+static int i7core_mce_check_error(void *priv, struct mce *mce)
+{
+ struct mem_ctl_info *mci = priv;
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ /*
+ * Just let mcelog handle it if the error is
+ * outside the memory controller
+ */
+ if (((mce->status & 0xffff) >> 7) != 1)
+ return 0;
+
+ /* Bank 8 registers are the only ones that we know how to handle */
+ if (mce->bank != 8)
+ return 0;
+
+#ifdef CONFIG_SMP
+ /* Only handle if it is the right mc controller */
+ if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
+ return 0;
+#endif
+
+ smp_rmb();
+ if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
+ smp_wmb();
+ pvt->mce_overrun++;
+ return 0;
+ }
+
+ /* Copy memory error at the ringbuffer */
+ memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
+ smp_wmb();
+ pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
+
+ /* Handle fatal errors immediately */
+ if (mce->mcgstatus & 1)
+ i7core_check_error(mci);
+
+ /* Advice mcelog that the error were handled */
+ return 1;
+}
+
+static int i7core_register_mci(struct i7core_dev *i7core_dev,
+ int num_channels, int num_csrows)
+{
+ struct mem_ctl_info *mci;
+ struct i7core_pvt *pvt;
+ int csrow = 0;
+ int rc;
+
+ /* allocate a new MC control structure */
+ mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels,
+ i7core_dev->socket);
+ if (unlikely(!mci))
+ return -ENOMEM;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ /* record ptr to the generic device */
+ mci->dev = &i7core_dev->pdev[0]->dev;
+
+ pvt = mci->pvt_info;
+ memset(pvt, 0, sizeof(*pvt));
+
+ /*
+ * FIXME: how to handle RDDR3 at MCI level? It is possible to have
+ * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
+ * memory channels
+ */
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "i7core_edac.c";
+ mci->mod_ver = I7CORE_REVISION;
+ mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
+ i7core_dev->socket);
+ mci->dev_name = pci_name(i7core_dev->pdev[0]);
+ mci->ctl_page_to_phys = NULL;
+ mci->mc_driver_sysfs_attributes = i7core_sysfs_attrs;
+ /* Set the function pointer to an actual operation function */
+ mci->edac_check = i7core_check_error;
+
+ /* Store pci devices at mci for faster access */
+ rc = mci_bind_devs(mci, i7core_dev);
+ if (unlikely(rc < 0))
+ goto fail;
+
+ /* Get dimm basic config */
+ get_dimm_config(mci, &csrow);
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (unlikely(edac_mc_add_mc(mci))) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ /* FIXME: perhaps some code should go here that disables error
+ * reporting if we just enabled it
+ */
+
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ i7core_pci = edac_pci_create_generic_ctl(&i7core_dev->pdev[0]->dev,
+ EDAC_MOD_STR);
+ if (unlikely(!i7core_pci)) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* Default error mask is any memory */
+ pvt->inject.channel = 0;
+ pvt->inject.dimm = -1;
+ pvt->inject.rank = -1;
+ pvt->inject.bank = -1;
+ pvt->inject.page = -1;
+ pvt->inject.col = -1;
+
+ /* Registers on edac_mce in order to receive memory errors */
+ pvt->edac_mce.priv = mci;
+ pvt->edac_mce.check_error = i7core_mce_check_error;
+
+ rc = edac_mce_register(&pvt->edac_mce);
+ if (unlikely(rc < 0)) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mce_register()\n", __func__);
+ }
+
+fail:
+ edac_mc_free(mci);
+ return rc;
+}
+
+/*
+ * i7core_probe Probe for ONE instance of device to see if it is
+ * present.
+ * return:
+ * 0 for FOUND a device
+ * < 0 for error code
+ */
+static int __devinit i7core_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int dev_idx = id->driver_data;
+ int rc;
+ struct i7core_dev *i7core_dev;
+
+ /*
+ * All memory controllers are allocated at the first pass.
+ */
+ if (unlikely(dev_idx >= 1))
+ return -EINVAL;
+
+ /* get the pci devices we want to reserve for our use */
+ mutex_lock(&i7core_edac_lock);
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0) {
+ printk(KERN_INFO "i7core_edac: detected a "
+ "Lynnfield processor\n");
+ rc = i7core_get_devices(pci_dev_descr_lynnfield,
+ ARRAY_SIZE(pci_dev_descr_lynnfield));
+ } else {
+ printk(KERN_INFO "i7core_edac: detected a "
+ "Nehalem/Nehalem-EP processor\n");
+ rc = i7core_get_devices(pci_dev_descr_i7core,
+ ARRAY_SIZE(pci_dev_descr_i7core));
+ }
+
+ if (unlikely(rc < 0))
+ goto fail0;
+
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
+ int channels;
+ int csrows;
+
+ /* Check the number of active and not disabled channels */
+ rc = i7core_get_active_channels(i7core_dev->socket,
+ &channels, &csrows);
+ if (unlikely(rc < 0))
+ goto fail1;
+
+ rc = i7core_register_mci(i7core_dev, channels, csrows);
+ if (unlikely(rc < 0))
+ goto fail1;
+ }
+
+ i7core_printk(KERN_INFO, "Driver loaded.\n");
+
+ mutex_unlock(&i7core_edac_lock);
+ return 0;
+
+fail1:
+ i7core_put_all_devices();
+fail0:
+ mutex_unlock(&i7core_edac_lock);
+ return rc;
+}
+
+/*
+ * i7core_remove destructor for one instance of device
+ *
+ */
+static void __devexit i7core_remove(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i7core_dev *i7core_dev, *tmp;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (i7core_pci)
+ edac_pci_release_generic_ctl(i7core_pci);
+
+ /*
+ * we have a trouble here: pdev value for removal will be wrong, since
+ * it will point to the X58 register used to detect that the machine
+ * is a Nehalem or upper design. However, due to the way several PCI
+ * devices are grouped together to provide MC functionality, we need
+ * to use a different method for releasing the devices
+ */
+
+ mutex_lock(&i7core_edac_lock);
+ list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
+ mci = edac_mc_del_mc(&i7core_dev->pdev[0]->dev);
+ if (mci) {
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ i7core_dev = pvt->i7core_dev;
+ edac_mce_unregister(&pvt->edac_mce);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ i7core_put_devices(i7core_dev);
+ } else {
+ i7core_printk(KERN_ERR,
+ "Couldn't find mci for socket %d\n",
+ i7core_dev->socket);
+ }
+ }
+ mutex_unlock(&i7core_edac_lock);
+}
+
+MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
+
+/*
+ * i7core_driver pci_driver structure for this module
+ *
+ */
+static struct pci_driver i7core_driver = {
+ .name = "i7core_edac",
+ .probe = i7core_probe,
+ .remove = __devexit_p(i7core_remove),
+ .id_table = i7core_pci_tbl,
+};
+
+/*
+ * i7core_init Module entry function
+ * Try to initialize this module for its devices
+ */
+static int __init i7core_init(void)
+{
+ int pci_rc;
+
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ i7core_xeon_pci_fixup(pci_dev_descr_i7core[0].dev_id);
+
+ pci_rc = pci_register_driver(&i7core_driver);
+
+ if (pci_rc >= 0)
+ return 0;
+
+ i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
+ pci_rc);
+
+ return pci_rc;
+}
+
+/*
+ * i7core_exit() Module exit function
+ * Unregister the driver
+ */
+static void __exit i7core_exit(void)
+{
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&i7core_driver);
+}
+
+module_init(i7core_init);
+module_exit(i7core_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
+ I7CORE_REVISION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 5045156c5313..42cf911b73cf 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -63,7 +63,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_CRC(v) ((v) << 0)
#define BIB_CRC_LENGTH(v) ((v) << 16)
#define BIB_INFO_LENGTH(v) ((v) << 24)
-
+#define BIB_BUS_NAME 0x31333934 /* "1394" */
#define BIB_LINK_SPEED(v) ((v) << 0)
#define BIB_GENERATION(v) ((v) << 4)
#define BIB_MAX_ROM(v) ((v) << 8)
@@ -73,7 +73,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_BMC ((1) << 28)
#define BIB_ISC ((1) << 29)
#define BIB_CMC ((1) << 30)
-#define BIB_IMC ((1) << 31)
+#define BIB_IRMC ((1) << 31)
+#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
@@ -91,18 +92,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
config_rom[0] = cpu_to_be32(
BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
- config_rom[1] = cpu_to_be32(0x31333934);
+ config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
config_rom[2] = cpu_to_be32(
BIB_LINK_SPEED(card->link_speed) |
BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
BIB_MAX_ROM(2) |
BIB_MAX_RECEIVE(card->max_receive) |
- BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC);
+ BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
config_rom[3] = cpu_to_be32(card->guid >> 32);
config_rom[4] = cpu_to_be32(card->guid);
/* Generate root directory. */
- config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */
+ config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
i = 7;
j = 7 + descriptor_count;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14a34d99eea2..5bf106b9d791 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
list_add_tail(&client->link, &device->client_list);
mutex_unlock(&device->client_list_mutex);
- return 0;
+ return nonseekable_open(inode, file);
}
static void queue_event(struct client *client, struct event *event,
@@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
const struct file_operations fw_device_ops = {
.owner = THIS_MODULE,
+ .llseek = no_llseek,
.open = fw_device_op_open,
.read = fw_device_op_read,
.unlocked_ioctl = fw_device_op_ioctl,
- .poll = fw_device_op_poll,
- .release = fw_device_op_release,
.mmap = fw_device_op_mmap,
-
+ .release = fw_device_op_release,
+ .poll = fw_device_op_poll,
#ifdef CONFIG_COMPAT
.compat_ioctl = fw_device_op_compat_ioctl,
#endif
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 673b03f8b4ec..901669876c25 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -229,6 +229,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->payload_mapped = false;
}
+static int allocate_tlabel(struct fw_card *card)
+{
+ int tlabel;
+
+ tlabel = card->current_tlabel;
+ while (card->tlabel_mask & (1ULL << tlabel)) {
+ tlabel = (tlabel + 1) & 0x3f;
+ if (tlabel == card->current_tlabel)
+ return -EBUSY;
+ }
+
+ card->current_tlabel = (tlabel + 1) & 0x3f;
+ card->tlabel_mask |= 1ULL << tlabel;
+
+ return tlabel;
+}
+
/**
* This function provides low-level access to the IEEE1394 transaction
* logic. Most C programs would use either fw_read(), fw_write() or
@@ -290,16 +307,13 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
spin_lock_irqsave(&card->lock, flags);
- tlabel = card->current_tlabel;
- if (card->tlabel_mask & (1ULL << tlabel)) {
+ tlabel = allocate_tlabel(card);
+ if (tlabel < 0) {
spin_unlock_irqrestore(&card->lock, flags);
callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
return;
}
- card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
- card->tlabel_mask |= (1ULL << tlabel);
-
t->node_id = destination_id;
t->tlabel = tlabel;
t->callback = callback;
@@ -828,7 +842,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
list_del(&t->link);
- card->tlabel_mask &= ~(1 << t->tlabel);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
}
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index fb0321300cce..7a9759bf6837 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,7 +27,12 @@ struct fw_packet;
#define PHY_LINK_ACTIVE 0x80
#define PHY_CONTENDER 0x40
#define PHY_BUS_RESET 0x40
+#define PHY_EXTENDED_REGISTERS 0xe0
#define PHY_BUS_SHORT_RESET 0x40
+#define PHY_INT_STATUS_BITS 0x3c
+#define PHY_ENABLE_ACCEL 0x02
+#define PHY_ENABLE_MULTI 0x01
+#define PHY_PAGE_SELECT 0xe0
#define BANDWIDTH_AVAILABLE_INITIAL 4915
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 94b16e0340ae..9f627e758cfc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_BE_HEADERS 4
+#define QUIRK_NO_1394A 8
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, flags;
} ohci_quirks[] = {
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
- QUIRK_RESET_PACKET},
+ QUIRK_RESET_PACKET |
+ QUIRK_NO_1394A},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
@@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
")");
-#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
-
#define OHCI_PARAM_DEBUG_AT_AR 1
#define OHCI_PARAM_DEBUG_SELFIDS 2
#define OHCI_PARAM_DEBUG_IRQS 4
#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
@@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
#else
-#define log_irqs(evt)
-#define log_selfids(node_id, generation, self_id_count, sid)
-#define log_ar_at_event(dir, speed, header, evt)
+#define param_debug 0
+static inline void log_irqs(u32 evt) {}
+static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
+static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
@@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
-static int ohci_update_phy_reg(struct fw_card *card, int addr,
- int clear_bits, int set_bits)
+static int read_phy_reg(struct fw_ohci *ohci, int addr)
{
- struct fw_ohci *ohci = fw_ohci(card);
- u32 val, old;
+ u32 val;
+ int i;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
- flush_writes(ohci);
- msleep(2);
- val = reg_read(ohci, OHCI1394_PhyControl);
- if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
- fw_error("failed to set phy reg bits.\n");
- return -EBUSY;
+ for (i = 0; i < 10; i++) {
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if (val & OHCI1394_PhyControl_ReadDone)
+ return OHCI1394_PhyControl_ReadData(val);
+
+ msleep(1);
}
+ fw_error("failed to read phy reg\n");
+
+ return -EBUSY;
+}
+
+static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
+{
+ int i;
- old = OHCI1394_PhyControl_ReadData(val);
- old = (old & ~clear_bits) | set_bits;
reg_write(ohci, OHCI1394_PhyControl,
- OHCI1394_PhyControl_Write(addr, old));
+ OHCI1394_PhyControl_Write(addr, val));
+ for (i = 0; i < 100; i++) {
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!(val & OHCI1394_PhyControl_WritePending))
+ return 0;
- return 0;
+ msleep(1);
+ }
+ fw_error("failed to write phy reg\n");
+
+ return -EBUSY;
+}
+
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+ int clear_bits, int set_bits)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ int ret;
+
+ ret = read_phy_reg(ohci, addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The interrupt status bits are cleared by writing a one bit.
+ * Avoid clearing them unless explicitly requested in set_bits.
+ */
+ if (addr == 5)
+ clear_bits |= PHY_INT_STATUS_BITS;
+
+ return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
+}
+
+static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
+{
+ int ret;
+
+ ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
+ if (ret < 0)
+ return ret;
+
+ return read_phy_reg(ohci, addr);
}
static int ar_context_add_page(struct ar_context *ctx)
@@ -1351,7 +1399,7 @@ static void bus_reset_tasklet(unsigned long data)
* was set up before this reset, the old one is now no longer
* in use and we can free it. Update the config rom pointers
* to point to the current config rom and clear the
- * next_config_rom pointer so a new udpate can take place.
+ * next_config_rom pointer so a new update can take place.
*/
if (ohci->next_config_rom != NULL) {
@@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
}
+static int configure_1394a_enhancements(struct fw_ohci *ohci)
+{
+ bool enable_1394a;
+ int ret, clear, set, offset;
+
+ /* Check if the driver should configure link and PHY. */
+ if (!(reg_read(ohci, OHCI1394_HCControlSet) &
+ OHCI1394_HCControl_programPhyEnable))
+ return 0;
+
+ /* Paranoia: check whether the PHY supports 1394a, too. */
+ enable_1394a = false;
+ ret = read_phy_reg(ohci, 2);
+ if (ret < 0)
+ return ret;
+ if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
+ ret = read_paged_phy_reg(ohci, 1, 8);
+ if (ret < 0)
+ return ret;
+ if (ret >= 1)
+ enable_1394a = true;
+ }
+
+ if (ohci->quirks & QUIRK_NO_1394A)
+ enable_1394a = false;
+
+ /* Configure PHY and link consistently. */
+ if (enable_1394a) {
+ clear = 0;
+ set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
+ } else {
+ clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
+ set = 0;
+ }
+ ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
+ if (ret < 0)
+ return ret;
+
+ if (enable_1394a)
+ offset = OHCI1394_HCControlSet;
+ else
+ offset = OHCI1394_HCControlClear;
+ reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
+
+ /* Clean up: configuration has been taken care of. */
+ reg_write(ohci, OHCI1394_HCControlClear,
+ OHCI1394_HCControl_programPhyEnable);
+
+ return 0;
+}
+
static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
u32 lps;
- int i;
+ int i, ret;
if (software_reset(ohci)) {
fw_error("Failed to reset ohci card.\n");
@@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card,
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+ ret = configure_1394a_enhancements(ohci);
+ if (ret < 0)
+ return ret;
+
/* Activate link_on bit and contender bit in our self ID packets.*/
- if (ohci_update_phy_reg(card, 4, 0,
- PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
- return -EIO;
+ ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
+ if (ret < 0)
+ return ret;
/*
* When the link is not yet enabled, the atomic config rom
@@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = {
};
#ifdef CONFIG_PPC_PMAC
-static void ohci_pmac_on(struct pci_dev *dev)
+static void pmac_ohci_on(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev)
}
}
-static void ohci_pmac_off(struct pci_dev *dev)
+static void pmac_ohci_off(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev)
}
}
#else
-#define ohci_pmac_on(dev)
-#define ohci_pmac_off(dev)
+static inline void pmac_ohci_on(struct pci_dev *dev) {}
+static inline void pmac_ohci_off(struct pci_dev *dev) {}
#endif /* CONFIG_PPC_PMAC */
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
- u32 bus_options, max_receive, link_speed, version;
+ u32 bus_options, max_receive, link_speed, version, link_enh;
u64 guid;
int i, err, n_ir, n_it;
size_t size;
@@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
- ohci_pmac_on(dev);
+ pmac_ohci_on(dev);
err = pci_enable_device(dev);
if (err) {
@@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (param_quirks)
ohci->quirks = param_quirks;
+ /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
+ if (dev->vendor == PCI_VENDOR_ID_TI) {
+ pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
+
+ /* adjust latency of ATx FIFO: use 1.7 KB threshold */
+ link_enh &= ~TI_LinkEnh_atx_thresh_mask;
+ link_enh |= TI_LinkEnh_atx_thresh_1_7K;
+
+ /* use priority arbitration for asynchronous responses */
+ link_enh |= TI_LinkEnh_enab_unfair;
+
+ /* required for aPhyEnhanceEnable to work */
+ link_enh |= TI_LinkEnh_enab_accel;
+
+ pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
+ }
+
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
pci_disable_device(dev);
fail_free:
kfree(&ohci->card);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
fail:
if (err == -ENOMEM)
fw_error("Out of memory\n");
@@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev)
pci_release_region(dev, 0);
pci_disable_device(dev);
kfree(&ohci->card);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
fw_notify("Removed fw-ohci device.\n");
}
@@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
fw_error("pci_set_power_state failed with %d\n", err);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
return 0;
}
@@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev)
struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
- ohci_pmac_on(dev);
+ pmac_ohci_on(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
err = pci_enable_device(dev);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index ba492d85c516..3bc9a5d744eb 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -67,7 +67,7 @@
#define OHCI1394_PhyControl_ReadDone 0x80000000
#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
-#define OHCI1394_PhyControl_WriteDone 0x00004000
+#define OHCI1394_PhyControl_WritePending 0x00004000
#define OHCI1394_IsochronousCycleTimer 0x0F0
#define OHCI1394_AsReqFilterHiSet 0x100
#define OHCI1394_AsReqFilterHiClear 0x104
@@ -154,4 +154,12 @@
#define OHCI1394_phy_tcode 0xe
+/* TI extensions */
+
+#define PCI_CFG_TI_LinkEnh 0xf4
+#define TI_LinkEnh_enab_accel 0x00000002
+#define TI_LinkEnh_enab_unfair 0x00000080
+#define TI_LinkEnh_atx_thresh_mask 0x00003000
+#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
+
#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 1b03ba1d0834..a6c670b8ce52 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -122,8 +122,17 @@ config ISCSI_IBFT_FIND
is necessary for iSCSI Boot Firmware Table Attributes module to work
properly.
+config ISCSI_BOOT_SYSFS
+ tristate "iSCSI Boot Sysfs Interface"
+ default n
+ help
+ This option enables support for exposing iSCSI boot information
+ via sysfs to userspace. If you wish to export this information,
+ say Y. Otherwise, say N.
+
config ISCSI_IBFT
tristate "iSCSI Boot Firmware Table Attributes module"
+ select ISCSI_BOOT_SYSFS
depends on ISCSI_IBFT_FIND
default n
help
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 1c3c17343dbe..5fe7e1662922 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_DCDBAS) += dcdbas.o
obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
+obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
diff --git a/drivers/firmware/iscsi_boot_sysfs.c b/drivers/firmware/iscsi_boot_sysfs.c
new file mode 100644
index 000000000000..df6bff7366cf
--- /dev/null
+++ b/drivers/firmware/iscsi_boot_sysfs.c
@@ -0,0 +1,481 @@
+/*
+ * Export the iSCSI boot info to userland via sysfs.
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/capability.h>
+#include <linux/iscsi_boot_sysfs.h>
+
+
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>");
+MODULE_DESCRIPTION("sysfs interface and helpers to export iSCSI boot information");
+MODULE_LICENSE("GPL");
+/*
+ * The kobject and attribute structures.
+ */
+struct iscsi_boot_attr {
+ struct attribute attr;
+ int type;
+ ssize_t (*show) (void *data, int type, char *buf);
+};
+
+/*
+ * The routine called for all sysfs attributes.
+ */
+static ssize_t iscsi_boot_show_attribute(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+ struct iscsi_boot_attr *boot_attr =
+ container_of(attr, struct iscsi_boot_attr, attr);
+ ssize_t ret = -EIO;
+ char *str = buf;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (boot_kobj->show)
+ ret = boot_kobj->show(boot_kobj->data, boot_attr->type, str);
+ return ret;
+}
+
+static const struct sysfs_ops iscsi_boot_attr_ops = {
+ .show = iscsi_boot_show_attribute,
+};
+
+static void iscsi_boot_kobj_release(struct kobject *kobj)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ kfree(boot_kobj->data);
+ kfree(boot_kobj);
+}
+
+static struct kobj_type iscsi_boot_ktype = {
+ .release = iscsi_boot_kobj_release,
+ .sysfs_ops = &iscsi_boot_attr_ops,
+};
+
+#define iscsi_boot_rd_attr(fnname, sysfs_name, attr_type) \
+static struct iscsi_boot_attr iscsi_boot_attr_##fnname = { \
+ .attr = { .name = __stringify(sysfs_name), .mode = 0444 }, \
+ .type = attr_type, \
+}
+
+/* Target attrs */
+iscsi_boot_rd_attr(tgt_index, index, ISCSI_BOOT_TGT_INDEX);
+iscsi_boot_rd_attr(tgt_flags, flags, ISCSI_BOOT_TGT_FLAGS);
+iscsi_boot_rd_attr(tgt_ip, ip-addr, ISCSI_BOOT_TGT_IP_ADDR);
+iscsi_boot_rd_attr(tgt_port, port, ISCSI_BOOT_TGT_PORT);
+iscsi_boot_rd_attr(tgt_lun, lun, ISCSI_BOOT_TGT_LUN);
+iscsi_boot_rd_attr(tgt_chap, chap-type, ISCSI_BOOT_TGT_CHAP_TYPE);
+iscsi_boot_rd_attr(tgt_nic, nic-assoc, ISCSI_BOOT_TGT_NIC_ASSOC);
+iscsi_boot_rd_attr(tgt_name, target-name, ISCSI_BOOT_TGT_NAME);
+iscsi_boot_rd_attr(tgt_chap_name, chap-name, ISCSI_BOOT_TGT_CHAP_NAME);
+iscsi_boot_rd_attr(tgt_chap_secret, chap-secret, ISCSI_BOOT_TGT_CHAP_SECRET);
+iscsi_boot_rd_attr(tgt_chap_rev_name, rev-chap-name,
+ ISCSI_BOOT_TGT_REV_CHAP_NAME);
+iscsi_boot_rd_attr(tgt_chap_rev_secret, rev-chap-name-secret,
+ ISCSI_BOOT_TGT_REV_CHAP_SECRET);
+
+static struct attribute *target_attrs[] = {
+ &iscsi_boot_attr_tgt_index.attr,
+ &iscsi_boot_attr_tgt_flags.attr,
+ &iscsi_boot_attr_tgt_ip.attr,
+ &iscsi_boot_attr_tgt_port.attr,
+ &iscsi_boot_attr_tgt_lun.attr,
+ &iscsi_boot_attr_tgt_chap.attr,
+ &iscsi_boot_attr_tgt_nic.attr,
+ &iscsi_boot_attr_tgt_name.attr,
+ &iscsi_boot_attr_tgt_chap_name.attr,
+ &iscsi_boot_attr_tgt_chap_secret.attr,
+ &iscsi_boot_attr_tgt_chap_rev_name.attr,
+ &iscsi_boot_attr_tgt_chap_rev_secret.attr,
+ NULL
+};
+
+static mode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ if (attr == &iscsi_boot_attr_tgt_index.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_INDEX);
+ else if (attr == &iscsi_boot_attr_tgt_flags.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_FLAGS);
+ else if (attr == &iscsi_boot_attr_tgt_ip.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_IP_ADDR);
+ else if (attr == &iscsi_boot_attr_tgt_port.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_PORT);
+ else if (attr == &iscsi_boot_attr_tgt_lun.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_LUN);
+ else if (attr == &iscsi_boot_attr_tgt_chap.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_CHAP_TYPE);
+ else if (attr == &iscsi_boot_attr_tgt_nic.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_NIC_ASSOC);
+ else if (attr == &iscsi_boot_attr_tgt_name.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_NAME);
+ else if (attr == &iscsi_boot_attr_tgt_chap_name.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_CHAP_NAME);
+ else if (attr == &iscsi_boot_attr_tgt_chap_secret.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_CHAP_SECRET);
+ else if (attr == &iscsi_boot_attr_tgt_chap_rev_name.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_REV_CHAP_NAME);
+ else if (attr == &iscsi_boot_attr_tgt_chap_rev_secret.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_REV_CHAP_SECRET);
+ return 0;
+}
+
+static struct attribute_group iscsi_boot_target_attr_group = {
+ .attrs = target_attrs,
+ .is_visible = iscsi_boot_tgt_attr_is_visible,
+};
+
+/* Ethernet attrs */
+iscsi_boot_rd_attr(eth_index, index, ISCSI_BOOT_ETH_INDEX);
+iscsi_boot_rd_attr(eth_flags, flags, ISCSI_BOOT_ETH_FLAGS);
+iscsi_boot_rd_attr(eth_ip, ip-addr, ISCSI_BOOT_ETH_IP_ADDR);
+iscsi_boot_rd_attr(eth_subnet, subnet-mask, ISCSI_BOOT_ETH_SUBNET_MASK);
+iscsi_boot_rd_attr(eth_origin, origin, ISCSI_BOOT_ETH_ORIGIN);
+iscsi_boot_rd_attr(eth_gateway, gateway, ISCSI_BOOT_ETH_GATEWAY);
+iscsi_boot_rd_attr(eth_primary_dns, primary-dns, ISCSI_BOOT_ETH_PRIMARY_DNS);
+iscsi_boot_rd_attr(eth_secondary_dns, secondary-dns,
+ ISCSI_BOOT_ETH_SECONDARY_DNS);
+iscsi_boot_rd_attr(eth_dhcp, dhcp, ISCSI_BOOT_ETH_DHCP);
+iscsi_boot_rd_attr(eth_vlan, vlan, ISCSI_BOOT_ETH_VLAN);
+iscsi_boot_rd_attr(eth_mac, mac, ISCSI_BOOT_ETH_MAC);
+iscsi_boot_rd_attr(eth_hostname, hostname, ISCSI_BOOT_ETH_HOSTNAME);
+
+static struct attribute *ethernet_attrs[] = {
+ &iscsi_boot_attr_eth_index.attr,
+ &iscsi_boot_attr_eth_flags.attr,
+ &iscsi_boot_attr_eth_ip.attr,
+ &iscsi_boot_attr_eth_subnet.attr,
+ &iscsi_boot_attr_eth_origin.attr,
+ &iscsi_boot_attr_eth_gateway.attr,
+ &iscsi_boot_attr_eth_primary_dns.attr,
+ &iscsi_boot_attr_eth_secondary_dns.attr,
+ &iscsi_boot_attr_eth_dhcp.attr,
+ &iscsi_boot_attr_eth_vlan.attr,
+ &iscsi_boot_attr_eth_mac.attr,
+ &iscsi_boot_attr_eth_hostname.attr,
+ NULL
+};
+
+static mode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ if (attr == &iscsi_boot_attr_eth_index.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_INDEX);
+ else if (attr == &iscsi_boot_attr_eth_flags.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_FLAGS);
+ else if (attr == &iscsi_boot_attr_eth_ip.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_IP_ADDR);
+ else if (attr == &iscsi_boot_attr_eth_subnet.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_SUBNET_MASK);
+ else if (attr == &iscsi_boot_attr_eth_origin.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_ORIGIN);
+ else if (attr == &iscsi_boot_attr_eth_gateway.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_GATEWAY);
+ else if (attr == &iscsi_boot_attr_eth_primary_dns.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_PRIMARY_DNS);
+ else if (attr == &iscsi_boot_attr_eth_secondary_dns.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_SECONDARY_DNS);
+ else if (attr == &iscsi_boot_attr_eth_dhcp.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_DHCP);
+ else if (attr == &iscsi_boot_attr_eth_vlan.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_VLAN);
+ else if (attr == &iscsi_boot_attr_eth_mac.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_MAC);
+ else if (attr == &iscsi_boot_attr_eth_hostname.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_HOSTNAME);
+ return 0;
+}
+
+static struct attribute_group iscsi_boot_ethernet_attr_group = {
+ .attrs = ethernet_attrs,
+ .is_visible = iscsi_boot_eth_attr_is_visible,
+};
+
+/* Initiator attrs */
+iscsi_boot_rd_attr(ini_index, index, ISCSI_BOOT_INI_INDEX);
+iscsi_boot_rd_attr(ini_flags, flags, ISCSI_BOOT_INI_FLAGS);
+iscsi_boot_rd_attr(ini_isns, isns-server, ISCSI_BOOT_INI_ISNS_SERVER);
+iscsi_boot_rd_attr(ini_slp, slp-server, ISCSI_BOOT_INI_SLP_SERVER);
+iscsi_boot_rd_attr(ini_primary_radius, pri-radius-server,
+ ISCSI_BOOT_INI_PRI_RADIUS_SERVER);
+iscsi_boot_rd_attr(ini_secondary_radius, sec-radius-server,
+ ISCSI_BOOT_INI_SEC_RADIUS_SERVER);
+iscsi_boot_rd_attr(ini_name, initiator-name, ISCSI_BOOT_INI_INITIATOR_NAME);
+
+static struct attribute *initiator_attrs[] = {
+ &iscsi_boot_attr_ini_index.attr,
+ &iscsi_boot_attr_ini_flags.attr,
+ &iscsi_boot_attr_ini_isns.attr,
+ &iscsi_boot_attr_ini_slp.attr,
+ &iscsi_boot_attr_ini_primary_radius.attr,
+ &iscsi_boot_attr_ini_secondary_radius.attr,
+ &iscsi_boot_attr_ini_name.attr,
+ NULL
+};
+
+static mode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ if (attr == &iscsi_boot_attr_ini_index.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_INDEX);
+ if (attr == &iscsi_boot_attr_ini_flags.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_FLAGS);
+ if (attr == &iscsi_boot_attr_ini_isns.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_ISNS_SERVER);
+ if (attr == &iscsi_boot_attr_ini_slp.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_SLP_SERVER);
+ if (attr == &iscsi_boot_attr_ini_primary_radius.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_PRI_RADIUS_SERVER);
+ if (attr == &iscsi_boot_attr_ini_secondary_radius.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_SEC_RADIUS_SERVER);
+ if (attr == &iscsi_boot_attr_ini_name.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_INITIATOR_NAME);
+
+ return 0;
+}
+
+static struct attribute_group iscsi_boot_initiator_attr_group = {
+ .attrs = initiator_attrs,
+ .is_visible = iscsi_boot_ini_attr_is_visible,
+};
+
+static struct iscsi_boot_kobj *
+iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
+ struct attribute_group *attr_group,
+ const char *name, int index, void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ mode_t (*is_visible) (void *data, int type))
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ boot_kobj = kzalloc(sizeof(*boot_kobj), GFP_KERNEL);
+ if (!boot_kobj)
+ return NULL;
+ INIT_LIST_HEAD(&boot_kobj->list);
+
+ boot_kobj->kobj.kset = boot_kset->kset;
+ if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype,
+ NULL, name, index)) {
+ kfree(boot_kobj);
+ return NULL;
+ }
+ boot_kobj->data = data;
+ boot_kobj->show = show;
+ boot_kobj->is_visible = is_visible;
+
+ if (sysfs_create_group(&boot_kobj->kobj, attr_group)) {
+ /*
+ * We do not want to free this because the caller
+ * will assume that since the creation call failed
+ * the boot kobj was not setup and the normal release
+ * path is not being run.
+ */
+ boot_kobj->data = NULL;
+ kobject_put(&boot_kobj->kobj);
+ return NULL;
+ }
+ boot_kobj->attr_group = attr_group;
+
+ kobject_uevent(&boot_kobj->kobj, KOBJ_ADD);
+ /* Nothing broke so lets add it to the list. */
+ list_add_tail(&boot_kobj->list, &boot_kset->kobj_list);
+ return boot_kobj;
+}
+
+static void iscsi_boot_remove_kobj(struct iscsi_boot_kobj *boot_kobj)
+{
+ list_del(&boot_kobj->list);
+ sysfs_remove_group(&boot_kobj->kobj, boot_kobj->attr_group);
+ kobject_put(&boot_kobj->kobj);
+}
+
+/**
+ * iscsi_boot_create_target() - create boot target sysfs dir
+ * @boot_kset: boot kset
+ * @index: the target id
+ * @data: driver specific data for target
+ * @show: attr show function
+ * @is_visible: attr visibility function
+ *
+ * Note: The boot sysfs lib will free the data passed in for the caller
+ * when all refs to the target kobject have been released.
+ */
+struct iscsi_boot_kobj *
+iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ mode_t (*is_visible) (void *data, int type))
+{
+ return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
+ "target%d", index, data, show, is_visible);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_target);
+
+/**
+ * iscsi_boot_create_initiator() - create boot initiator sysfs dir
+ * @boot_kset: boot kset
+ * @index: the initiator id
+ * @data: driver specific data
+ * @show: attr show function
+ * @is_visible: attr visibility function
+ *
+ * Note: The boot sysfs lib will free the data passed in for the caller
+ * when all refs to the initiator kobject have been released.
+ */
+struct iscsi_boot_kobj *
+iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ mode_t (*is_visible) (void *data, int type))
+{
+ return iscsi_boot_create_kobj(boot_kset,
+ &iscsi_boot_initiator_attr_group,
+ "initiator", index, data, show,
+ is_visible);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator);
+
+/**
+ * iscsi_boot_create_ethernet() - create boot ethernet sysfs dir
+ * @boot_kset: boot kset
+ * @index: the ethernet device id
+ * @data: driver specific data
+ * @show: attr show function
+ * @is_visible: attr visibility function
+ *
+ * Note: The boot sysfs lib will free the data passed in for the caller
+ * when all refs to the ethernet kobject have been released.
+ */
+struct iscsi_boot_kobj *
+iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ mode_t (*is_visible) (void *data, int type))
+{
+ return iscsi_boot_create_kobj(boot_kset,
+ &iscsi_boot_ethernet_attr_group,
+ "ethernet%d", index, data, show,
+ is_visible);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet);
+
+/**
+ * iscsi_boot_create_kset() - creates root sysfs tree
+ * @set_name: name of root dir
+ */
+struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name)
+{
+ struct iscsi_boot_kset *boot_kset;
+
+ boot_kset = kzalloc(sizeof(*boot_kset), GFP_KERNEL);
+ if (!boot_kset)
+ return NULL;
+
+ boot_kset->kset = kset_create_and_add(set_name, NULL, firmware_kobj);
+ if (!boot_kset->kset) {
+ kfree(boot_kset);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&boot_kset->kobj_list);
+ return boot_kset;
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_kset);
+
+/**
+ * iscsi_boot_create_host_kset() - creates root sysfs tree for a scsi host
+ * @hostno: host number of scsi host
+ */
+struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno)
+{
+ struct iscsi_boot_kset *boot_kset;
+ char *set_name;
+
+ set_name = kasprintf(GFP_KERNEL, "iscsi_boot%u", hostno);
+ if (!set_name)
+ return NULL;
+
+ boot_kset = iscsi_boot_create_kset(set_name);
+ kfree(set_name);
+ return boot_kset;
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_host_kset);
+
+/**
+ * iscsi_boot_destroy_kset() - destroy kset and kobjects under it
+ * @boot_kset: boot kset
+ *
+ * This will remove the kset and kobjects and attrs under it.
+ */
+void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)
+{
+ struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
+
+ list_for_each_entry_safe(boot_kobj, tmp_kobj,
+ &boot_kset->kobj_list, list)
+ iscsi_boot_remove_kobj(boot_kobj);
+
+ kset_unregister(boot_kset->kset);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset);
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index ed2801c378de..4f04ec0410a0 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2007 Red Hat, Inc.
+ * Copyright 2007-2010 Red Hat, Inc.
* by Peter Jones <pjones@redhat.com>
* Copyright 2008 IBM, Inc.
* by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
@@ -19,6 +19,9 @@
*
* Changelog:
*
+ * 06 Jan 2010 - Peter Jones <pjones@redhat.com>
+ * New changelog entries are in the git log from now on. Not here.
+ *
* 14 Mar 2008 - Konrad Rzeszutek <ketuzsezr@darnok.org>
* Updated comments and copyrights. (v0.4.9)
*
@@ -78,9 +81,11 @@
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/iscsi_boot_sysfs.h>
-#define IBFT_ISCSI_VERSION "0.4.9"
-#define IBFT_ISCSI_DATE "2008-Mar-14"
+#define IBFT_ISCSI_VERSION "0.5.0"
+#define IBFT_ISCSI_DATE "2010-Feb-25"
MODULE_AUTHOR("Peter Jones <pjones@redhat.com> and \
Konrad Rzeszutek <ketuzsezr@darnok.org>");
@@ -166,108 +171,20 @@ enum ibft_id {
};
/*
- * We do not support the other types, hence the usage of NULL.
- * This maps to the enum ibft_id.
- */
-static const char *ibft_id_names[] =
- {NULL, NULL, "initiator", "ethernet%d", "target%d", NULL, NULL};
-
-/*
- * The text attributes names for each of the kobjects.
-*/
-enum ibft_eth_properties_enum {
- ibft_eth_index,
- ibft_eth_flags,
- ibft_eth_ip_addr,
- ibft_eth_subnet_mask,
- ibft_eth_origin,
- ibft_eth_gateway,
- ibft_eth_primary_dns,
- ibft_eth_secondary_dns,
- ibft_eth_dhcp,
- ibft_eth_vlan,
- ibft_eth_mac,
- /* ibft_eth_pci_bdf - this is replaced by link to the device itself. */
- ibft_eth_hostname,
- ibft_eth_end_marker,
-};
-
-static const char *ibft_eth_properties[] =
- {"index", "flags", "ip-addr", "subnet-mask", "origin", "gateway",
- "primary-dns", "secondary-dns", "dhcp", "vlan", "mac", "hostname",
- NULL};
-
-enum ibft_tgt_properties_enum {
- ibft_tgt_index,
- ibft_tgt_flags,
- ibft_tgt_ip_addr,
- ibft_tgt_port,
- ibft_tgt_lun,
- ibft_tgt_chap_type,
- ibft_tgt_nic_assoc,
- ibft_tgt_name,
- ibft_tgt_chap_name,
- ibft_tgt_chap_secret,
- ibft_tgt_rev_chap_name,
- ibft_tgt_rev_chap_secret,
- ibft_tgt_end_marker,
-};
-
-static const char *ibft_tgt_properties[] =
- {"index", "flags", "ip-addr", "port", "lun", "chap-type", "nic-assoc",
- "target-name", "chap-name", "chap-secret", "rev-chap-name",
- "rev-chap-name-secret", NULL};
-
-enum ibft_initiator_properties_enum {
- ibft_init_index,
- ibft_init_flags,
- ibft_init_isns_server,
- ibft_init_slp_server,
- ibft_init_pri_radius_server,
- ibft_init_sec_radius_server,
- ibft_init_initiator_name,
- ibft_init_end_marker,
-};
-
-static const char *ibft_initiator_properties[] =
- {"index", "flags", "isns-server", "slp-server", "pri-radius-server",
- "sec-radius-server", "initiator-name", NULL};
-
-/*
* The kobject and attribute structures.
*/
struct ibft_kobject {
- struct ibft_table_header *header;
+ struct acpi_table_ibft *header;
union {
struct ibft_initiator *initiator;
struct ibft_nic *nic;
struct ibft_tgt *tgt;
struct ibft_hdr *hdr;
};
- struct kobject kobj;
- struct list_head node;
};
-struct ibft_attribute {
- struct attribute attr;
- ssize_t (*show) (struct ibft_kobject *entry,
- struct ibft_attribute *attr, char *buf);
- union {
- struct ibft_initiator *initiator;
- struct ibft_nic *nic;
- struct ibft_tgt *tgt;
- struct ibft_hdr *hdr;
- };
- struct kobject *kobj;
- int type; /* The enum of the type. This can be any value of:
- ibft_eth_properties_enum, ibft_tgt_properties_enum,
- or ibft_initiator_properties_enum. */
- struct list_head node;
-};
-
-static LIST_HEAD(ibft_attr_list);
-static LIST_HEAD(ibft_kobject_list);
+static struct iscsi_boot_kset *boot_kset;
static const char nulls[16];
@@ -306,35 +223,27 @@ static ssize_t sprintf_string(char *str, int len, char *buf)
static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
{
if (hdr->id != id) {
- printk(KERN_ERR "iBFT error: We expected the " \
+ printk(KERN_ERR "iBFT error: We expected the %s " \
"field header.id to have %d but " \
- "found %d instead!\n", id, hdr->id);
+ "found %d instead!\n", t, id, hdr->id);
return -ENODEV;
}
if (hdr->length != length) {
- printk(KERN_ERR "iBFT error: We expected the " \
+ printk(KERN_ERR "iBFT error: We expected the %s " \
"field header.length to have %d but " \
- "found %d instead!\n", length, hdr->length);
+ "found %d instead!\n", t, length, hdr->length);
return -ENODEV;
}
return 0;
}
-static void ibft_release(struct kobject *kobj)
-{
- struct ibft_kobject *ibft =
- container_of(kobj, struct ibft_kobject, kobj);
- kfree(ibft);
-}
-
/*
* Routines for parsing the iBFT data to be human readable.
*/
-static ssize_t ibft_attr_show_initiator(struct ibft_kobject *entry,
- struct ibft_attribute *attr,
- char *buf)
+static ssize_t ibft_attr_show_initiator(void *data, int type, char *buf)
{
+ struct ibft_kobject *entry = data;
struct ibft_initiator *initiator = entry->initiator;
void *ibft_loc = entry->header;
char *str = buf;
@@ -342,26 +251,26 @@ static ssize_t ibft_attr_show_initiator(struct ibft_kobject *entry,
if (!initiator)
return 0;
- switch (attr->type) {
- case ibft_init_index:
+ switch (type) {
+ case ISCSI_BOOT_INI_INDEX:
str += sprintf(str, "%d\n", initiator->hdr.index);
break;
- case ibft_init_flags:
+ case ISCSI_BOOT_INI_FLAGS:
str += sprintf(str, "%d\n", initiator->hdr.flags);
break;
- case ibft_init_isns_server:
+ case ISCSI_BOOT_INI_ISNS_SERVER:
str += sprintf_ipaddr(str, initiator->isns_server);
break;
- case ibft_init_slp_server:
+ case ISCSI_BOOT_INI_SLP_SERVER:
str += sprintf_ipaddr(str, initiator->slp_server);
break;
- case ibft_init_pri_radius_server:
+ case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
str += sprintf_ipaddr(str, initiator->pri_radius_server);
break;
- case ibft_init_sec_radius_server:
+ case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
str += sprintf_ipaddr(str, initiator->sec_radius_server);
break;
- case ibft_init_initiator_name:
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
str += sprintf_string(str, initiator->initiator_name_len,
(char *)ibft_loc +
initiator->initiator_name_off);
@@ -373,10 +282,9 @@ static ssize_t ibft_attr_show_initiator(struct ibft_kobject *entry,
return str - buf;
}
-static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
- struct ibft_attribute *attr,
- char *buf)
+static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
{
+ struct ibft_kobject *entry = data;
struct ibft_nic *nic = entry->nic;
void *ibft_loc = entry->header;
char *str = buf;
@@ -385,42 +293,42 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
if (!nic)
return 0;
- switch (attr->type) {
- case ibft_eth_index:
+ switch (type) {
+ case ISCSI_BOOT_ETH_INDEX:
str += sprintf(str, "%d\n", nic->hdr.index);
break;
- case ibft_eth_flags:
+ case ISCSI_BOOT_ETH_FLAGS:
str += sprintf(str, "%d\n", nic->hdr.flags);
break;
- case ibft_eth_ip_addr:
+ case ISCSI_BOOT_ETH_IP_ADDR:
str += sprintf_ipaddr(str, nic->ip_addr);
break;
- case ibft_eth_subnet_mask:
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
- case ibft_eth_origin:
+ case ISCSI_BOOT_ETH_ORIGIN:
str += sprintf(str, "%d\n", nic->origin);
break;
- case ibft_eth_gateway:
+ case ISCSI_BOOT_ETH_GATEWAY:
str += sprintf_ipaddr(str, nic->gateway);
break;
- case ibft_eth_primary_dns:
+ case ISCSI_BOOT_ETH_PRIMARY_DNS:
str += sprintf_ipaddr(str, nic->primary_dns);
break;
- case ibft_eth_secondary_dns:
+ case ISCSI_BOOT_ETH_SECONDARY_DNS:
str += sprintf_ipaddr(str, nic->secondary_dns);
break;
- case ibft_eth_dhcp:
+ case ISCSI_BOOT_ETH_DHCP:
str += sprintf_ipaddr(str, nic->dhcp);
break;
- case ibft_eth_vlan:
+ case ISCSI_BOOT_ETH_VLAN:
str += sprintf(str, "%d\n", nic->vlan);
break;
- case ibft_eth_mac:
+ case ISCSI_BOOT_ETH_MAC:
str += sprintf(str, "%pM\n", nic->mac);
break;
- case ibft_eth_hostname:
+ case ISCSI_BOOT_ETH_HOSTNAME:
str += sprintf_string(str, nic->hostname_len,
(char *)ibft_loc + nic->hostname_off);
break;
@@ -431,10 +339,9 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
return str - buf;
};
-static ssize_t ibft_attr_show_target(struct ibft_kobject *entry,
- struct ibft_attribute *attr,
- char *buf)
+static ssize_t ibft_attr_show_target(void *data, int type, char *buf)
{
+ struct ibft_kobject *entry = data;
struct ibft_tgt *tgt = entry->tgt;
void *ibft_loc = entry->header;
char *str = buf;
@@ -443,48 +350,48 @@ static ssize_t ibft_attr_show_target(struct ibft_kobject *entry,
if (!tgt)
return 0;
- switch (attr->type) {
- case ibft_tgt_index:
+ switch (type) {
+ case ISCSI_BOOT_TGT_INDEX:
str += sprintf(str, "%d\n", tgt->hdr.index);
break;
- case ibft_tgt_flags:
+ case ISCSI_BOOT_TGT_FLAGS:
str += sprintf(str, "%d\n", tgt->hdr.flags);
break;
- case ibft_tgt_ip_addr:
+ case ISCSI_BOOT_TGT_IP_ADDR:
str += sprintf_ipaddr(str, tgt->ip_addr);
break;
- case ibft_tgt_port:
+ case ISCSI_BOOT_TGT_PORT:
str += sprintf(str, "%d\n", tgt->port);
break;
- case ibft_tgt_lun:
+ case ISCSI_BOOT_TGT_LUN:
for (i = 0; i < 8; i++)
str += sprintf(str, "%x", (u8)tgt->lun[i]);
str += sprintf(str, "\n");
break;
- case ibft_tgt_nic_assoc:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
str += sprintf(str, "%d\n", tgt->nic_assoc);
break;
- case ibft_tgt_chap_type:
+ case ISCSI_BOOT_TGT_CHAP_TYPE:
str += sprintf(str, "%d\n", tgt->chap_type);
break;
- case ibft_tgt_name:
+ case ISCSI_BOOT_TGT_NAME:
str += sprintf_string(str, tgt->tgt_name_len,
(char *)ibft_loc + tgt->tgt_name_off);
break;
- case ibft_tgt_chap_name:
+ case ISCSI_BOOT_TGT_CHAP_NAME:
str += sprintf_string(str, tgt->chap_name_len,
(char *)ibft_loc + tgt->chap_name_off);
break;
- case ibft_tgt_chap_secret:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
str += sprintf_string(str, tgt->chap_secret_len,
(char *)ibft_loc + tgt->chap_secret_off);
break;
- case ibft_tgt_rev_chap_name:
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
str += sprintf_string(str, tgt->rev_chap_name_len,
(char *)ibft_loc +
tgt->rev_chap_name_off);
break;
- case ibft_tgt_rev_chap_secret:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
str += sprintf_string(str, tgt->rev_chap_secret_len,
(char *)ibft_loc +
tgt->rev_chap_secret_off);
@@ -496,52 +403,19 @@ static ssize_t ibft_attr_show_target(struct ibft_kobject *entry,
return str - buf;
}
-/*
- * The routine called for all sysfs attributes.
- */
-static ssize_t ibft_show_attribute(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ibft_kobject *dev =
- container_of(kobj, struct ibft_kobject, kobj);
- struct ibft_attribute *ibft_attr =
- container_of(attr, struct ibft_attribute, attr);
- ssize_t ret = -EIO;
- char *str = buf;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (ibft_attr->show)
- ret = ibft_attr->show(dev, ibft_attr, str);
-
- return ret;
-}
-
-static const struct sysfs_ops ibft_attr_ops = {
- .show = ibft_show_attribute,
-};
-
-static struct kobj_type ibft_ktype = {
- .release = ibft_release,
- .sysfs_ops = &ibft_attr_ops,
-};
-
-static struct kset *ibft_kset;
-
static int __init ibft_check_device(void)
{
int len;
u8 *pos;
u8 csum = 0;
- len = ibft_addr->length;
+ len = ibft_addr->header.length;
/* Sanity checking of iBFT. */
- if (ibft_addr->revision != 1) {
+ if (ibft_addr->header.revision != 1) {
printk(KERN_ERR "iBFT module supports only revision 1, " \
- "while this is %d.\n", ibft_addr->revision);
+ "while this is %d.\n",
+ ibft_addr->header.revision);
return -ENOENT;
}
for (pos = (u8 *)ibft_addr; pos < (u8 *)ibft_addr + len; pos++)
@@ -556,12 +430,149 @@ static int __init ibft_check_device(void)
}
/*
+ * Helper routiners to check to determine if the entry is valid
+ * in the proper iBFT structure.
+ */
+static mode_t ibft_check_nic_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_nic *nic = entry->nic;
+ mode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_INDEX:
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ if (nic->subnet_mask_prefix)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_ORIGIN:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_GATEWAY:
+ if (memcmp(nic->gateway, nulls, sizeof(nic->gateway)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_PRIMARY_DNS:
+ if (memcmp(nic->primary_dns, nulls,
+ sizeof(nic->primary_dns)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_SECONDARY_DNS:
+ if (memcmp(nic->secondary_dns, nulls,
+ sizeof(nic->secondary_dns)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_DHCP:
+ if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_VLAN:
+ case ISCSI_BOOT_ETH_MAC:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_HOSTNAME:
+ if (nic->hostname_off)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static mode_t __init ibft_check_tgt_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_tgt *tgt = entry->tgt;
+ mode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_INDEX:
+ case ISCSI_BOOT_TGT_FLAGS:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_LUN:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_CHAP_TYPE:
+ rc = S_IRUGO;
+ case ISCSI_BOOT_TGT_NAME:
+ if (tgt->tgt_name_len)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ if (tgt->chap_name_len)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ if (tgt->rev_chap_name_len)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static mode_t __init ibft_check_initiator_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_initiator *init = entry->initiator;
+ mode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INDEX:
+ case ISCSI_BOOT_INI_FLAGS:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_ISNS_SERVER:
+ if (memcmp(init->isns_server, nulls,
+ sizeof(init->isns_server)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_SLP_SERVER:
+ if (memcmp(init->slp_server, nulls,
+ sizeof(init->slp_server)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
+ if (memcmp(init->pri_radius_server, nulls,
+ sizeof(init->pri_radius_server)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
+ if (memcmp(init->sec_radius_server, nulls,
+ sizeof(init->sec_radius_server)))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ if (init->initiator_name_len)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/*
* Helper function for ibft_register_kobjects.
*/
-static int __init ibft_create_kobject(struct ibft_table_header *header,
- struct ibft_hdr *hdr,
- struct list_head *list)
+static int __init ibft_create_kobject(struct acpi_table_ibft *header,
+ struct ibft_hdr *hdr)
{
+ struct iscsi_boot_kobj *boot_kobj = NULL;
struct ibft_kobject *ibft_kobj = NULL;
struct ibft_nic *nic = (struct ibft_nic *)hdr;
struct pci_dev *pci_dev;
@@ -578,14 +589,47 @@ static int __init ibft_create_kobject(struct ibft_table_header *header,
case id_initiator:
rc = ibft_verify_hdr("initiator", hdr, id_initiator,
sizeof(*ibft_kobj->initiator));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_initiator,
+ ibft_check_initiator_for);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
break;
case id_nic:
rc = ibft_verify_hdr("ethernet", hdr, id_nic,
sizeof(*ibft_kobj->nic));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_nic,
+ ibft_check_nic_for);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
break;
case id_target:
rc = ibft_verify_hdr("target", hdr, id_target,
sizeof(*ibft_kobj->tgt));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_target,
+ ibft_check_tgt_for);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
break;
case id_reserved:
case id_control:
@@ -596,29 +640,17 @@ static int __init ibft_create_kobject(struct ibft_table_header *header,
default:
printk(KERN_ERR "iBFT has unknown structure type (%d). " \
"Report this bug to %.6s!\n", hdr->id,
- header->oem_id);
+ header->header.oem_id);
rc = 1;
break;
}
if (rc) {
/* Skip adding this kobject, but exit with non-fatal error. */
- kfree(ibft_kobj);
- goto out_invalid_struct;
+ rc = 0;
+ goto free_ibft_obj;
}
- ibft_kobj->kobj.kset = ibft_kset;
-
- rc = kobject_init_and_add(&ibft_kobj->kobj, &ibft_ktype,
- NULL, ibft_id_names[hdr->id], hdr->index);
-
- if (rc) {
- kfree(ibft_kobj);
- goto out;
- }
-
- kobject_uevent(&ibft_kobj->kobj, KOBJ_ADD);
-
if (hdr->id == id_nic) {
/*
* We don't search for the device in other domains than
@@ -629,19 +661,16 @@ static int __init ibft_create_kobject(struct ibft_table_header *header,
pci_dev = pci_get_bus_and_slot((nic->pci_bdf & 0xff00) >> 8,
(nic->pci_bdf & 0xff));
if (pci_dev) {
- rc = sysfs_create_link(&ibft_kobj->kobj,
+ rc = sysfs_create_link(&boot_kobj->kobj,
&pci_dev->dev.kobj, "device");
pci_dev_put(pci_dev);
}
}
+ return 0;
- /* Nothing broke so lets add it to the list. */
- list_add_tail(&ibft_kobj->node, list);
-out:
+free_ibft_obj:
+ kfree(ibft_kobj);
return rc;
-out_invalid_struct:
- /* Unsupported structs are skipped. */
- return 0;
}
/*
@@ -649,8 +678,7 @@ out_invalid_struct:
* found add them on the passed-in list. We do not support the other
* fields at this point, so they are skipped.
*/
-static int __init ibft_register_kobjects(struct ibft_table_header *header,
- struct list_head *list)
+static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
{
struct ibft_control *control = NULL;
void *ptr, *end;
@@ -660,7 +688,7 @@ static int __init ibft_register_kobjects(struct ibft_table_header *header,
control = (void *)header + sizeof(*header);
end = (void *)control + control->hdr.length;
- eot_offset = (void *)header + header->length - (void *)control;
+ eot_offset = (void *)header + header->header.length - (void *)control;
rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control,
sizeof(*control));
@@ -672,10 +700,10 @@ static int __init ibft_register_kobjects(struct ibft_table_header *header,
}
for (ptr = &control->initiator_off; ptr < end; ptr += sizeof(u16)) {
offset = *(u16 *)ptr;
- if (offset && offset < header->length && offset < eot_offset) {
+ if (offset && offset < header->header.length &&
+ offset < eot_offset) {
rc = ibft_create_kobject(header,
- (void *)header + offset,
- list);
+ (void *)header + offset);
if (rc)
break;
}
@@ -684,240 +712,28 @@ static int __init ibft_register_kobjects(struct ibft_table_header *header,
return rc;
}
-static void ibft_unregister(struct list_head *attr_list,
- struct list_head *kobj_list)
+static void ibft_unregister(void)
{
- struct ibft_kobject *data = NULL, *n;
- struct ibft_attribute *attr = NULL, *m;
-
- list_for_each_entry_safe(attr, m, attr_list, node) {
- sysfs_remove_file(attr->kobj, &attr->attr);
- list_del(&attr->node);
- kfree(attr);
+ struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
+ struct ibft_kobject *ibft_kobj;
+
+ list_for_each_entry_safe(boot_kobj, tmp_kobj,
+ &boot_kset->kobj_list, list) {
+ ibft_kobj = boot_kobj->data;
+ if (ibft_kobj->hdr->id == id_nic)
+ sysfs_remove_link(&boot_kobj->kobj, "device");
};
- list_del_init(attr_list);
-
- list_for_each_entry_safe(data, n, kobj_list, node) {
- list_del(&data->node);
- if (data->hdr->id == id_nic)
- sysfs_remove_link(&data->kobj, "device");
- kobject_put(&data->kobj);
- };
- list_del_init(kobj_list);
}
-static int __init ibft_create_attribute(struct ibft_kobject *kobj_data,
- int type,
- const char *name,
- ssize_t (*show)(struct ibft_kobject *,
- struct ibft_attribute*,
- char *buf),
- struct list_head *list)
+static void ibft_cleanup(void)
{
- struct ibft_attribute *attr = NULL;
- struct ibft_hdr *hdr = kobj_data->hdr;
-
- attr = kmalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr)
- return -ENOMEM;
-
- attr->attr.name = name;
- attr->attr.mode = S_IRUSR;
-
- attr->hdr = hdr;
- attr->show = show;
- attr->kobj = &kobj_data->kobj;
- attr->type = type;
-
- list_add_tail(&attr->node, list);
-
- return 0;
-}
-
-/*
- * Helper routiners to check to determine if the entry is valid
- * in the proper iBFT structure.
- */
-static int __init ibft_check_nic_for(struct ibft_nic *nic, int entry)
-{
- int rc = 0;
-
- switch (entry) {
- case ibft_eth_index:
- case ibft_eth_flags:
- rc = 1;
- break;
- case ibft_eth_ip_addr:
- if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr)))
- rc = 1;
- break;
- case ibft_eth_subnet_mask:
- if (nic->subnet_mask_prefix)
- rc = 1;
- break;
- case ibft_eth_origin:
- rc = 1;
- break;
- case ibft_eth_gateway:
- if (memcmp(nic->gateway, nulls, sizeof(nic->gateway)))
- rc = 1;
- break;
- case ibft_eth_primary_dns:
- if (memcmp(nic->primary_dns, nulls,
- sizeof(nic->primary_dns)))
- rc = 1;
- break;
- case ibft_eth_secondary_dns:
- if (memcmp(nic->secondary_dns, nulls,
- sizeof(nic->secondary_dns)))
- rc = 1;
- break;
- case ibft_eth_dhcp:
- if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp)))
- rc = 1;
- break;
- case ibft_eth_vlan:
- case ibft_eth_mac:
- rc = 1;
- break;
- case ibft_eth_hostname:
- if (nic->hostname_off)
- rc = 1;
- break;
- default:
- break;
- }
-
- return rc;
+ ibft_unregister();
+ iscsi_boot_destroy_kset(boot_kset);
}
-static int __init ibft_check_tgt_for(struct ibft_tgt *tgt, int entry)
-{
- int rc = 0;
-
- switch (entry) {
- case ibft_tgt_index:
- case ibft_tgt_flags:
- case ibft_tgt_ip_addr:
- case ibft_tgt_port:
- case ibft_tgt_lun:
- case ibft_tgt_nic_assoc:
- case ibft_tgt_chap_type:
- rc = 1;
- case ibft_tgt_name:
- if (tgt->tgt_name_len)
- rc = 1;
- break;
- case ibft_tgt_chap_name:
- case ibft_tgt_chap_secret:
- if (tgt->chap_name_len)
- rc = 1;
- break;
- case ibft_tgt_rev_chap_name:
- case ibft_tgt_rev_chap_secret:
- if (tgt->rev_chap_name_len)
- rc = 1;
- break;
- default:
- break;
- }
-
- return rc;
-}
-
-static int __init ibft_check_initiator_for(struct ibft_initiator *init,
- int entry)
-{
- int rc = 0;
-
- switch (entry) {
- case ibft_init_index:
- case ibft_init_flags:
- rc = 1;
- break;
- case ibft_init_isns_server:
- if (memcmp(init->isns_server, nulls,
- sizeof(init->isns_server)))
- rc = 1;
- break;
- case ibft_init_slp_server:
- if (memcmp(init->slp_server, nulls,
- sizeof(init->slp_server)))
- rc = 1;
- break;
- case ibft_init_pri_radius_server:
- if (memcmp(init->pri_radius_server, nulls,
- sizeof(init->pri_radius_server)))
- rc = 1;
- break;
- case ibft_init_sec_radius_server:
- if (memcmp(init->sec_radius_server, nulls,
- sizeof(init->sec_radius_server)))
- rc = 1;
- break;
- case ibft_init_initiator_name:
- if (init->initiator_name_len)
- rc = 1;
- break;
- default:
- break;
- }
-
- return rc;
-}
-
-/*
- * Register the attributes for all of the kobjects.
- */
-static int __init ibft_register_attributes(struct list_head *kobject_list,
- struct list_head *attr_list)
+static void __exit ibft_exit(void)
{
- int rc = 0, i = 0;
- struct ibft_kobject *data = NULL;
- struct ibft_attribute *attr = NULL, *m;
-
- list_for_each_entry(data, kobject_list, node) {
- switch (data->hdr->id) {
- case id_nic:
- for (i = 0; i < ibft_eth_end_marker && !rc; i++)
- if (ibft_check_nic_for(data->nic, i))
- rc = ibft_create_attribute(data, i,
- ibft_eth_properties[i],
- ibft_attr_show_nic, attr_list);
- break;
- case id_target:
- for (i = 0; i < ibft_tgt_end_marker && !rc; i++)
- if (ibft_check_tgt_for(data->tgt, i))
- rc = ibft_create_attribute(data, i,
- ibft_tgt_properties[i],
- ibft_attr_show_target,
- attr_list);
- break;
- case id_initiator:
- for (i = 0; i < ibft_init_end_marker && !rc; i++)
- if (ibft_check_initiator_for(
- data->initiator, i))
- rc = ibft_create_attribute(data, i,
- ibft_initiator_properties[i],
- ibft_attr_show_initiator,
- attr_list);
- break;
- default:
- break;
- }
- if (rc)
- break;
- }
- list_for_each_entry_safe(attr, m, attr_list, node) {
- rc = sysfs_create_file(attr->kobj, &attr->attr);
- if (rc) {
- list_del(&attr->node);
- kfree(attr);
- break;
- }
- }
-
- return rc;
+ ibft_cleanup();
}
/*
@@ -927,26 +743,20 @@ static int __init ibft_init(void)
{
int rc = 0;
- ibft_kset = kset_create_and_add("ibft", NULL, firmware_kobj);
- if (!ibft_kset)
- return -ENOMEM;
-
if (ibft_addr) {
printk(KERN_INFO "iBFT detected at 0x%llx.\n",
(u64)isa_virt_to_bus(ibft_addr));
rc = ibft_check_device();
if (rc)
- goto out_firmware_unregister;
+ return rc;
- /* Scan the IBFT for data and register the kobjects. */
- rc = ibft_register_kobjects(ibft_addr, &ibft_kobject_list);
- if (rc)
- goto out_free;
+ boot_kset = iscsi_boot_create_kset("ibft");
+ if (!boot_kset)
+ return -ENOMEM;
- /* Register the attributes */
- rc = ibft_register_attributes(&ibft_kobject_list,
- &ibft_attr_list);
+ /* Scan the IBFT for data and register the kobjects. */
+ rc = ibft_register_kobjects(ibft_addr);
if (rc)
goto out_free;
} else
@@ -955,17 +765,9 @@ static int __init ibft_init(void)
return 0;
out_free:
- ibft_unregister(&ibft_attr_list, &ibft_kobject_list);
-out_firmware_unregister:
- kset_unregister(ibft_kset);
+ ibft_cleanup();
return rc;
}
-static void __exit ibft_exit(void)
-{
- ibft_unregister(&ibft_attr_list, &ibft_kobject_list);
- kset_unregister(ibft_kset);
-}
-
module_init(ibft_init);
module_exit(ibft_exit);
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index d6470ef36e4a..82a7a1566290 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2007 Red Hat, Inc.
+ * Copyright 2007-2010 Red Hat, Inc.
* by Peter Jones <pjones@redhat.com>
* Copyright 2007 IBM, Inc.
* by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
@@ -22,6 +22,7 @@
#include <linux/blkdev.h>
#include <linux/ctype.h>
#include <linux/device.h>
+#include <linux/efi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/limits.h>
@@ -30,13 +31,15 @@
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/iscsi_ibft.h>
#include <asm/mmzone.h>
/*
* Physical location of iSCSI Boot Format Table.
*/
-struct ibft_table_header *ibft_addr;
+struct acpi_table_ibft *ibft_addr;
EXPORT_SYMBOL_GPL(ibft_addr);
#define IBFT_SIGN "iBFT"
@@ -46,19 +49,20 @@ EXPORT_SYMBOL_GPL(ibft_addr);
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
+#ifdef CONFIG_ACPI
+static int __init acpi_find_ibft(struct acpi_table_header *header)
+{
+ ibft_addr = (struct acpi_table_ibft *)header;
+ return 0;
+}
+#endif /* CONFIG_ACPI */
-/*
- * Routine used to find the iSCSI Boot Format Table. The logical
- * kernel address is set in the ibft_addr global variable.
- */
-unsigned long __init find_ibft_region(unsigned long *sizep)
+static int __init find_ibft_in_mem(void)
{
unsigned long pos;
unsigned int len = 0;
void *virt;
- ibft_addr = NULL;
-
for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
/* The table can't be inside the VGA BIOS reserved space,
* so skip that area */
@@ -72,14 +76,42 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
/* if the length of the table extends past 1M,
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
- ibft_addr = (struct ibft_table_header *)virt;
+ ibft_addr = (struct acpi_table_ibft *)virt;
break;
}
}
}
+ return len;
+}
+/*
+ * Routine used to find the iSCSI Boot Format Table. The logical
+ * kernel address is set in the ibft_addr global variable.
+ */
+unsigned long __init find_ibft_region(unsigned long *sizep)
+{
+
+ ibft_addr = NULL;
+
+#ifdef CONFIG_ACPI
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ if (!ibft_addr)
+ acpi_table_parse(ACPI_SIG_IBFT, acpi_find_ibft);
+ if (!ibft_addr)
+ acpi_table_parse("iBFT", acpi_find_ibft);
+#endif /* CONFIG_ACPI */
+
+ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
+ * only use ACPI for this */
+
+ if (!ibft_addr && !efi_enabled)
+ find_ibft_in_mem();
+
if (ibft_addr) {
- *sizep = PAGE_ALIGN(len);
- return pos;
+ *sizep = PAGE_ALIGN(ibft_addr->header.length);
+ return (u64)isa_virt_to_bus(ibft_addr);
}
*sizep = 0;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f74a19..bde1bad7785e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -188,6 +188,13 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
+config GPIO_TC35892
+ bool "TC35892 GPIOs"
+ depends on MFD_TC35892
+ help
+ This enables support for the GPIOs found on the TC35892
+ I/O Expander.
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
@@ -275,6 +282,14 @@ config GPIO_TIMBERDALE
---help---
Add support for the GPIO IP in the timberdale FPGA.
+config GPIO_RDC321X
+ tristate "RDC R-321x GPIO support"
+ depends on PCI && GPIOLIB
+ select MFD_RDC321X
+ help
+ Support for the RDC R321x SoC GPIOs over southbridge
+ PCI configuration space.
+
comment "SPI GPIO expanders:"
config GPIO_MAX7301
@@ -310,4 +325,14 @@ config GPIO_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_gpio.
+comment "MODULbus GPIO expanders:"
+
+config GPIO_JANZ_TTL
+ tristate "Janz VMOD-TTL Digital IO Module"
+ depends on MFD_JANZ_CMODIO
+ help
+ This enables support for the Janz VMOD-TTL Digital IO module.
+ This driver provides support for driving the pins in output
+ mode only. Input mode is not supported.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 10f3f8d958b1..51c3cdd41b5a 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
+obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o
obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
@@ -27,4 +28,6 @@ obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
-obj-$(CONFIG_GPIO_SCH) += sch_gpio.o \ No newline at end of file
+obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
+obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
new file mode 100644
index 000000000000..813ac077e5d7
--- /dev/null
+++ b/drivers/gpio/janz-ttl.c
@@ -0,0 +1,258 @@
+/*
+ * Janz MODULbus VMOD-TTL GPIO Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/janz.h>
+
+#define DRV_NAME "janz-ttl"
+
+#define PORTA_DIRECTION 0x23
+#define PORTB_DIRECTION 0x2B
+#define PORTC_DIRECTION 0x06
+#define PORTA_IOCTL 0x24
+#define PORTB_IOCTL 0x2C
+#define PORTC_IOCTL 0x07
+
+#define MASTER_INT_CTL 0x00
+#define MASTER_CONF_CTL 0x01
+
+#define CONF_PAE (1 << 2)
+#define CONF_PBE (1 << 7)
+#define CONF_PCE (1 << 4)
+
+struct ttl_control_regs {
+ __be16 portc;
+ __be16 portb;
+ __be16 porta;
+ __be16 control;
+};
+
+struct ttl_module {
+ struct gpio_chip gpio;
+
+ /* base address of registers */
+ struct ttl_control_regs __iomem *regs;
+
+ u8 portc_shadow;
+ u8 portb_shadow;
+ u8 porta_shadow;
+
+ spinlock_t lock;
+};
+
+static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
+{
+ struct ttl_module *mod = dev_get_drvdata(gpio->dev);
+ u8 *shadow;
+ int ret;
+
+ if (offset < 8) {
+ shadow = &mod->porta_shadow;
+ } else if (offset < 16) {
+ shadow = &mod->portb_shadow;
+ offset -= 8;
+ } else {
+ shadow = &mod->portc_shadow;
+ offset -= 16;
+ }
+
+ spin_lock(&mod->lock);
+ ret = *shadow & (1 << offset);
+ spin_unlock(&mod->lock);
+ return ret;
+}
+
+static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
+{
+ struct ttl_module *mod = dev_get_drvdata(gpio->dev);
+ void __iomem *port;
+ u8 *shadow;
+
+ if (offset < 8) {
+ port = &mod->regs->porta;
+ shadow = &mod->porta_shadow;
+ } else if (offset < 16) {
+ port = &mod->regs->portb;
+ shadow = &mod->portb_shadow;
+ offset -= 8;
+ } else {
+ port = &mod->regs->portc;
+ shadow = &mod->portc_shadow;
+ offset -= 16;
+ }
+
+ spin_lock(&mod->lock);
+ if (value)
+ *shadow |= (1 << offset);
+ else
+ *shadow &= ~(1 << offset);
+
+ iowrite16be(*shadow, port);
+ spin_unlock(&mod->lock);
+}
+
+static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
+{
+ iowrite16be(reg, &mod->regs->control);
+ iowrite16be(val, &mod->regs->control);
+}
+
+static void __devinit ttl_setup_device(struct ttl_module *mod)
+{
+ /* reset the device to a known state */
+ iowrite16be(0x0000, &mod->regs->control);
+ iowrite16be(0x0001, &mod->regs->control);
+ iowrite16be(0x0000, &mod->regs->control);
+
+ /* put all ports in open-drain mode */
+ ttl_write_reg(mod, PORTA_IOCTL, 0x00ff);
+ ttl_write_reg(mod, PORTB_IOCTL, 0x00ff);
+ ttl_write_reg(mod, PORTC_IOCTL, 0x000f);
+
+ /* set all ports as outputs */
+ ttl_write_reg(mod, PORTA_DIRECTION, 0x0000);
+ ttl_write_reg(mod, PORTB_DIRECTION, 0x0000);
+ ttl_write_reg(mod, PORTC_DIRECTION, 0x0000);
+
+ /* set all ports to drive zeroes */
+ iowrite16be(0x0000, &mod->regs->porta);
+ iowrite16be(0x0000, &mod->regs->portb);
+ iowrite16be(0x0000, &mod->regs->portc);
+
+ /* enable all ports */
+ ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE);
+}
+
+static int __devinit ttl_probe(struct platform_device *pdev)
+{
+ struct janz_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct ttl_module *mod;
+ struct gpio_chip *gpio;
+ struct resource *res;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data\n");
+ ret = -ENXIO;
+ goto out_return;
+ }
+
+ mod = kzalloc(sizeof(*mod), GFP_KERNEL);
+ if (!mod) {
+ dev_err(dev, "unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ platform_set_drvdata(pdev, mod);
+ spin_lock_init(&mod->lock);
+
+ /* get access to the MODULbus registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "MODULbus registers not found\n");
+ ret = -ENODEV;
+ goto out_free_mod;
+ }
+
+ mod->regs = ioremap(res->start, resource_size(res));
+ if (!mod->regs) {
+ dev_err(dev, "MODULbus registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_free_mod;
+ }
+
+ ttl_setup_device(mod);
+
+ /* Initialize the GPIO data structures */
+ gpio = &mod->gpio;
+ gpio->dev = &pdev->dev;
+ gpio->label = pdev->name;
+ gpio->get = ttl_get_value;
+ gpio->set = ttl_set_value;
+ gpio->owner = THIS_MODULE;
+
+ /* request dynamic allocation */
+ gpio->base = -1;
+ gpio->ngpio = 20;
+
+ ret = gpiochip_add(gpio);
+ if (ret) {
+ dev_err(dev, "unable to add GPIO chip\n");
+ goto out_iounmap_regs;
+ }
+
+ dev_info(&pdev->dev, "module %d: registered GPIO device\n",
+ pdata->modno);
+ return 0;
+
+out_iounmap_regs:
+ iounmap(mod->regs);
+out_free_mod:
+ kfree(mod);
+out_return:
+ return ret;
+}
+
+static int __devexit ttl_remove(struct platform_device *pdev)
+{
+ struct ttl_module *mod = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = gpiochip_remove(&mod->gpio);
+ if (ret) {
+ dev_err(dev, "unable to remove GPIO chip\n");
+ return ret;
+ }
+
+ iounmap(mod->regs);
+ kfree(mod);
+ return 0;
+}
+
+static struct platform_driver ttl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ttl_probe,
+ .remove = __devexit_p(ttl_remove),
+};
+
+static int __init ttl_init(void)
+{
+ return platform_driver_register(&ttl_driver);
+}
+
+static void __exit ttl_exit(void)
+{
+ platform_driver_unregister(&ttl_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:janz-ttl");
+
+module_init(ttl_init);
+module_exit(ttl_exit);
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
new file mode 100644
index 000000000000..e344907da5b0
--- /dev/null
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -0,0 +1,246 @@
+/*
+ * RDC321x GPIO driver
+ *
+ * Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/mfd/rdc321x.h>
+#include <linux/slab.h>
+
+struct rdc321x_gpio {
+ spinlock_t lock;
+ struct pci_dev *sb_pdev;
+ u32 data_reg[2];
+ int reg1_ctrl_base;
+ int reg1_data_base;
+ int reg2_ctrl_base;
+ int reg2_data_base;
+ struct gpio_chip chip;
+};
+
+/* read GPIO pin */
+static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct rdc321x_gpio *gpch;
+ u32 value = 0;
+ int reg;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+ reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base;
+
+ spin_lock(&gpch->lock);
+ pci_write_config_dword(gpch->sb_pdev, reg,
+ gpch->data_reg[gpio < 32 ? 0 : 1]);
+ pci_read_config_dword(gpch->sb_pdev, reg, &value);
+ spin_unlock(&gpch->lock);
+
+ return (1 << (gpio & 0x1f)) & value ? 1 : 0;
+}
+
+static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+ int reg = (gpio < 32) ? 0 : 1;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+
+ if (value)
+ gpch->data_reg[reg] |= 1 << (gpio & 0x1f);
+ else
+ gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f));
+
+ pci_write_config_dword(gpch->sb_pdev,
+ reg ? gpch->reg1_data_base : gpch->reg2_data_base,
+ gpch->data_reg[reg]);
+}
+
+/* set GPIO pin to value */
+static void rdc_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+ spin_lock(&gpch->lock);
+ rdc_gpio_set_value_impl(chip, gpio, value);
+ spin_unlock(&gpch->lock);
+}
+
+static int rdc_gpio_config(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+ int err;
+ u32 reg;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+
+ spin_lock(&gpch->lock);
+ err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ?
+ gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, &reg);
+ if (err)
+ goto unlock;
+
+ reg |= 1 << (gpio & 0x1f);
+
+ err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ?
+ gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg);
+ if (err)
+ goto unlock;
+
+ rdc_gpio_set_value_impl(chip, gpio, value);
+
+unlock:
+ spin_unlock(&gpch->lock);
+
+ return err;
+}
+
+/* configure GPIO pin as input */
+static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ return rdc_gpio_config(chip, gpio, 1);
+}
+
+/*
+ * Cache the initial value of both GPIO data registers
+ */
+static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *r;
+ struct rdc321x_gpio *rdc321x_gpio_dev;
+ struct rdc321x_gpio_pdata *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -ENODEV;
+ }
+
+ rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL);
+ if (!rdc321x_gpio_dev) {
+ dev_err(&pdev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio-reg1");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n");
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ spin_lock_init(&rdc321x_gpio_dev->lock);
+ rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev;
+ rdc321x_gpio_dev->reg1_ctrl_base = r->start;
+ rdc321x_gpio_dev->reg1_data_base = r->start + 0x4;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio-reg2");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n");
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ rdc321x_gpio_dev->reg2_ctrl_base = r->start;
+ rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
+
+ rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
+ rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
+ rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
+ rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
+ rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
+ rdc321x_gpio_dev->chip.base = 0;
+ rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
+
+ platform_set_drvdata(pdev, rdc321x_gpio_dev);
+
+ /* This might not be, what others (BIOS, bootloader, etc.)
+ wrote to these registers before, but it's a good guess. Still
+ better than just using 0xffffffff. */
+ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
+ rdc321x_gpio_dev->reg1_data_base,
+ &rdc321x_gpio_dev->data_reg[0]);
+ if (err)
+ goto out_drvdata;
+
+ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
+ rdc321x_gpio_dev->reg2_data_base,
+ &rdc321x_gpio_dev->data_reg[1]);
+ if (err)
+ goto out_drvdata;
+
+ dev_info(&pdev->dev, "registering %d GPIOs\n",
+ rdc321x_gpio_dev->chip.ngpio);
+ return gpiochip_add(&rdc321x_gpio_dev->chip);
+
+out_drvdata:
+ platform_set_drvdata(pdev, NULL);
+out_free:
+ kfree(rdc321x_gpio_dev);
+ return err;
+}
+
+static int __devexit rdc321x_gpio_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
+
+ ret = gpiochip_remove(&rdc321x_gpio_dev->chip);
+ if (ret)
+ dev_err(&pdev->dev, "failed to unregister chip\n");
+
+ kfree(rdc321x_gpio_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static struct platform_driver rdc321x_gpio_driver = {
+ .driver.name = "rdc321x-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = rdc321x_gpio_probe,
+ .remove = __devexit_p(rdc321x_gpio_remove),
+};
+
+static int __init rdc321x_gpio_init(void)
+{
+ return platform_driver_register(&rdc321x_gpio_driver);
+}
+
+static void __exit rdc321x_gpio_exit(void)
+{
+ platform_driver_unregister(&rdc321x_gpio_driver);
+}
+
+module_init(rdc321x_gpio_init);
+module_exit(rdc321x_gpio_exit);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("RDC321x GPIO driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rdc321x-gpio");
diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c
new file mode 100644
index 000000000000..1be6288780de
--- /dev/null
+++ b/drivers/gpio/tc35892-gpio.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/tc35892.h>
+
+/*
+ * These registers are modified under the irq bus lock and cached to avoid
+ * unnecessary writes in bus_sync_unlock.
+ */
+enum { REG_IBE, REG_IEV, REG_IS, REG_IE };
+
+#define CACHE_NR_REGS 4
+#define CACHE_NR_BANKS 3
+
+struct tc35892_gpio {
+ struct gpio_chip chip;
+ struct tc35892 *tc35892;
+ struct device *dev;
+ struct mutex irq_lock;
+
+ int irq_base;
+
+ /* Caches of interrupt control registers for bus_lock */
+ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
+ u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
+};
+
+static inline struct tc35892_gpio *to_tc35892_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tc35892_gpio, chip);
+}
+
+static int tc35892_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
+ u8 mask = 1 << (offset % 8);
+ int ret;
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ return ret;
+
+ return ret & mask;
+}
+
+static void tc35892_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
+ unsigned pos = offset % 8;
+ u8 data[] = {!!val << pos, 1 << pos};
+
+ tc35892_block_write(tc35892, reg, ARRAY_SIZE(data), data);
+}
+
+static int tc35892_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int val)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODIR0 + offset / 8;
+ unsigned pos = offset % 8;
+
+ tc35892_gpio_set(chip, offset, val);
+
+ return tc35892_set_bits(tc35892, reg, 1 << pos, 1 << pos);
+}
+
+static int tc35892_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODIR0 + offset / 8;
+ unsigned pos = offset % 8;
+
+ return tc35892_set_bits(tc35892, reg, 1 << pos, 0);
+}
+
+static int tc35892_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+
+ return tc35892_gpio->irq_base + offset;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tc35892",
+ .owner = THIS_MODULE,
+ .direction_input = tc35892_gpio_direction_input,
+ .get = tc35892_gpio_get,
+ .direction_output = tc35892_gpio_direction_output,
+ .set = tc35892_gpio_set,
+ .to_irq = tc35892_gpio_to_irq,
+ .can_sleep = 1,
+};
+
+static int tc35892_gpio_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ tc35892_gpio->regs[REG_IBE][regoffset] |= mask;
+ return 0;
+ }
+
+ tc35892_gpio->regs[REG_IBE][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ tc35892_gpio->regs[REG_IS][regoffset] |= mask;
+ else
+ tc35892_gpio->regs[REG_IS][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+ tc35892_gpio->regs[REG_IEV][regoffset] |= mask;
+ else
+ tc35892_gpio->regs[REG_IEV][regoffset] &= ~mask;
+
+ return 0;
+}
+
+static void tc35892_gpio_irq_lock(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+
+ mutex_lock(&tc35892_gpio->irq_lock);
+}
+
+static void tc35892_gpio_irq_sync_unlock(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ static const u8 regmap[] = {
+ [REG_IBE] = TC35892_GPIOIBE0,
+ [REG_IEV] = TC35892_GPIOIEV0,
+ [REG_IS] = TC35892_GPIOIS0,
+ [REG_IE] = TC35892_GPIOIE0,
+ };
+ int i, j;
+
+ for (i = 0; i < CACHE_NR_REGS; i++) {
+ for (j = 0; j < CACHE_NR_BANKS; j++) {
+ u8 old = tc35892_gpio->oldregs[i][j];
+ u8 new = tc35892_gpio->regs[i][j];
+
+ if (new == old)
+ continue;
+
+ tc35892_gpio->oldregs[i][j] = new;
+ tc35892_reg_write(tc35892, regmap[i] + j * 8, new);
+ }
+ }
+
+ mutex_unlock(&tc35892_gpio->irq_lock);
+}
+
+static void tc35892_gpio_irq_mask(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ tc35892_gpio->regs[REG_IE][regoffset] &= ~mask;
+}
+
+static void tc35892_gpio_irq_unmask(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ tc35892_gpio->regs[REG_IE][regoffset] |= mask;
+}
+
+static struct irq_chip tc35892_gpio_irq_chip = {
+ .name = "tc35892-gpio",
+ .bus_lock = tc35892_gpio_irq_lock,
+ .bus_sync_unlock = tc35892_gpio_irq_sync_unlock,
+ .mask = tc35892_gpio_irq_mask,
+ .unmask = tc35892_gpio_irq_unmask,
+ .set_type = tc35892_gpio_irq_set_type,
+};
+
+static irqreturn_t tc35892_gpio_irq(int irq, void *dev)
+{
+ struct tc35892_gpio *tc35892_gpio = dev;
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 status[CACHE_NR_BANKS];
+ int ret;
+ int i;
+
+ ret = tc35892_block_read(tc35892, TC35892_GPIOMIS0,
+ ARRAY_SIZE(status), status);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ unsigned int stat = status[i];
+ if (!stat)
+ continue;
+
+ while (stat) {
+ int bit = __ffs(stat);
+ int line = i * 8 + bit;
+
+ handle_nested_irq(tc35892_gpio->irq_base + line);
+ stat &= ~(1 << bit);
+ }
+
+ tc35892_reg_write(tc35892, TC35892_GPIOIC0 + i, status[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tc35892_gpio_irq_init(struct tc35892_gpio *tc35892_gpio)
+{
+ int base = tc35892_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
+ set_irq_chip_data(irq, tc35892_gpio);
+ set_irq_chip_and_handler(irq, &tc35892_gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_gpio_irq_remove(struct tc35892_gpio *tc35892_gpio)
+{
+ int base = tc35892_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int __devinit tc35892_gpio_probe(struct platform_device *pdev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(pdev->dev.parent);
+ struct tc35892_gpio_platform_data *pdata;
+ struct tc35892_gpio *tc35892_gpio;
+ int ret;
+ int irq;
+
+ pdata = tc35892->pdata->gpio;
+ if (!pdata)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ tc35892_gpio = kzalloc(sizeof(struct tc35892_gpio), GFP_KERNEL);
+ if (!tc35892_gpio)
+ return -ENOMEM;
+
+ mutex_init(&tc35892_gpio->irq_lock);
+
+ tc35892_gpio->dev = &pdev->dev;
+ tc35892_gpio->tc35892 = tc35892;
+
+ tc35892_gpio->chip = template_chip;
+ tc35892_gpio->chip.ngpio = tc35892->num_gpio;
+ tc35892_gpio->chip.dev = &pdev->dev;
+ tc35892_gpio->chip.base = pdata->gpio_base;
+
+ tc35892_gpio->irq_base = tc35892->irq_base + TC35892_INT_GPIO(0);
+
+ /* Bring the GPIO module out of reset */
+ ret = tc35892_set_bits(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_GPIRST, 0);
+ if (ret < 0)
+ goto out_free;
+
+ ret = tc35892_gpio_irq_init(tc35892_gpio);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(irq, NULL, tc35892_gpio_irq, IRQF_ONESHOT,
+ "tc35892-gpio", tc35892_gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = gpiochip_add(&tc35892_gpio->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
+ goto out_freeirq;
+ }
+
+ platform_set_drvdata(pdev, tc35892_gpio);
+
+ return 0;
+
+out_freeirq:
+ free_irq(irq, tc35892_gpio);
+out_removeirq:
+ tc35892_gpio_irq_remove(tc35892_gpio);
+out_free:
+ kfree(tc35892_gpio);
+ return ret;
+}
+
+static int __devexit tc35892_gpio_remove(struct platform_device *pdev)
+{
+ struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ int ret;
+
+ ret = gpiochip_remove(&tc35892_gpio->chip);
+ if (ret < 0) {
+ dev_err(tc35892_gpio->dev,
+ "unable to remove gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ free_irq(irq, tc35892_gpio);
+ tc35892_gpio_irq_remove(tc35892_gpio);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(tc35892_gpio);
+
+ return 0;
+}
+
+static struct platform_driver tc35892_gpio_driver = {
+ .driver.name = "tc35892-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = tc35892_gpio_probe,
+ .remove = __devexit_p(tc35892_gpio_remove),
+};
+
+static int __init tc35892_gpio_init(void)
+{
+ return platform_driver_register(&tc35892_gpio_driver);
+}
+subsys_initcall(tc35892_gpio_init);
+
+static void __exit tc35892_gpio_exit(void)
+{
+ platform_driver_unregister(&tc35892_gpio_driver);
+}
+module_exit(tc35892_gpio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 GPIO driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c
index 7607cc61e1dd..2ac9a16d3daa 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/wm8994-gpio.c
@@ -81,6 +81,18 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
}
+static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+ struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+ if (!wm8994->irq_base)
+ return -EINVAL;
+
+ return wm8994->irq_base + offset;
+}
+
+
#ifdef CONFIG_DEBUG_FS
static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 305c59003963..be5aa7d5206b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -23,6 +23,7 @@ config DRM_KMS_HELPER
depends on DRM
select FB
select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select SLOW_WORK
help
FB and CRTC helpers for KMS drivers.
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index f7ba82ebf65a..2092e7bb788f 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -961,7 +961,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
- /* No allocations failed, so now we can replace the orginal pagelist
+ /* No allocations failed, so now we can replace the original pagelist
* with the new one.
*/
if (dma->page_count) {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 61b9bcfdf040..994d23beeb1d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,6 +34,7 @@
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
struct drm_prop_enum_list {
int type;
@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
- kfree(connector->fb_helper_private);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -2350,7 +2349,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0;
+ int ret = 0, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2362,7 +2361,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
return ret;
}
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 51103aa469f8..b142ac260d97 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
}
/**
- * drm_helper_probe_connector_modes - get complete set of display modes
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
@@ -154,21 +154,6 @@ prune:
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
- uint32_t maxY)
-{
- struct drm_connector *connector;
- int count = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- count += drm_helper_probe_single_connector_modes(connector,
- maxX, maxY);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_connector_modes);
-
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
- continue;
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- return mode;
- }
- return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_connector *connector)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
- if (!fb_help_conn)
- return false;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
-
- if (!fb_help_conn)
- return mode;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- if (cmdline_mode->specified == false)
- return mode;
-
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
- */
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- /* check width/height */
- if (mode->hdisplay != cmdline_mode->xres ||
- mode->vdisplay != cmdline_mode->yres)
- continue;
-
- if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
- continue;
- }
-
- if (cmdline_mode->interlace) {
- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
- }
- return mode;
- }
-
-create_mode:
- mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
- cmdline_mode->yres,
- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
- cmdline_mode->rb, cmdline_mode->interlace,
- cmdline_mode->margins);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- list_add(&mode->head, &connector->modes);
- return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
- bool enable;
-
- if (strict) {
- enable = connector->status == connector_status_connected;
- } else {
- enable = connector->status != connector_status_disconnected;
- }
- return enable;
-}
-
-static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-{
- bool any_enabled = false;
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
- any_enabled |= enabled[i];
- i++;
- }
-
- if (any_enabled)
- return;
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, false);
- i++;
- }
-}
-
-static bool drm_target_preferred(struct drm_device *dev,
- struct drm_display_mode **modes,
- bool *enabled, int width, int height)
-{
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
- if (enabled[i] == false) {
- i++;
- continue;
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- connector->base.id);
-
- /* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(connector, width, height);
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- connector->base.id);
- modes[i] = drm_has_preferred_mode(connector, width, height);
- }
- /* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&connector->modes)) {
- list_for_each_entry(modes[i], &connector->modes, head)
- break;
- }
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- i++;
- }
- return true;
-}
-
-static int drm_pick_crtcs(struct drm_device *dev,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
- int n, int width, int height)
-{
- int c, o;
- struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
- struct drm_crtc *best_crtc;
- int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
-
- if (n == dev->mode_config.num_connector)
- return 0;
- c = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (c == n)
- break;
- c++;
- }
-
- best_crtcs[n] = NULL;
- best_crtc = NULL;
- best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
- if (modes[n] == NULL)
- return best_score;
-
- crtcs = kmalloc(dev->mode_config.num_connector *
- sizeof(struct drm_crtc *), GFP_KERNEL);
- if (!crtcs)
- return best_score;
-
- my_score = 1;
- if (connector->status == connector_status_connected)
- my_score++;
- if (drm_has_cmdline_mode(connector))
- my_score++;
- if (drm_has_preferred_mode(connector, width, height))
- my_score++;
-
- connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
- if (!encoder)
- goto out;
-
- connector->encoder = encoder;
-
- /* select a crtc for this connector and then attempt to configure
- remaining connectors */
- c = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
- c++;
- continue;
- }
-
- for (o = 0; o < n; o++)
- if (best_crtcs[o] == crtc)
- break;
-
- if (o < n) {
- /* ignore cloning for now */
- c++;
- continue;
- }
-
- crtcs[n] = crtc;
- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
- score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
- width, height);
- if (score > best_score) {
- best_crtc = crtc;
- best_score = score;
- memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
- sizeof(struct drm_crtc *));
- }
- c++;
- }
-out:
- kfree(crtcs);
- return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_device *dev)
-{
- struct drm_crtc **crtcs;
- struct drm_display_mode **modes;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- bool *enabled;
- int width, height;
- int i, ret;
-
- DRM_DEBUG_KMS("\n");
-
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
-
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
- crtcs = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_crtc *), GFP_KERNEL);
- modes = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_display_mode *), GFP_KERNEL);
- enabled = kcalloc(dev->mode_config.num_connector,
- sizeof(bool), GFP_KERNEL);
-
- drm_enable_connectors(dev, enabled);
-
- ret = drm_target_preferred(dev, modes, enabled, width, height);
- if (!ret)
- DRM_ERROR("Unable to find initial modes\n");
-
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
- drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_display_mode *mode = modes[i];
- struct drm_crtc *crtc = crtcs[i];
-
- if (connector->encoder == NULL) {
- i++;
- continue;
- }
-
- if (mode && crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, crtc->base.id);
- crtc->desired_mode = mode;
- connector->encoder->crtc = crtc;
- } else {
- connector->encoder->crtc = NULL;
- connector->encoder = NULL;
- }
- i++;
- }
-
- kfree(crtcs);
- kfree(modes);
- kfree(enabled);
-}
-
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
- /* TODO are these needed? */
- set->crtc->desired_x = set->x;
- set->crtc->desired_y = set->y;
- set->crtc->desired_mode = set->mode;
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@@ -984,63 +669,6 @@ fail:
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
-bool drm_helper_plugged_event(struct drm_device *dev)
-{
- DRM_DEBUG_KMS("\n");
-
- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- /* FIXME: send hotplug event */
- return true;
-}
-/**
- * drm_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_helper_initial_config(struct drm_device *dev)
-{
- int count = 0;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- drm_fb_helper_parse_command_line(dev);
-
- count = drm_helper_probe_connector_modes(dev,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- /*
- * we shouldn't end up with no modes here.
- */
- if (count == 0)
- printk(KERN_INFO "No connectors reported connected with modes\n");
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_initial_config);
-
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
-/**
- * drm_hotplug_stage_two
- * @dev DRM device
- * @connector hotpluged connector
- *
- * LOCKING.
- * Caller must hold mode config lock, function might grab struct lock.
- *
- * Stage two of a hotplug.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_helper_hotplug_stage_two(struct drm_device *dev)
-{
- drm_helper_plugged_event(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
-
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 18f41d7061f0..71886749fa2c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
@@ -33,10 +34,9 @@
#include "drmP.h"
#include "drm_edid.h"
-/*
- * TODO:
- * - support EDID 1.4 (incl. CE blocks)
- */
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
@@ -65,7 +65,8 @@
#define LEVEL_DMT 0
#define LEVEL_GTF 1
-#define LEVEL_CVT 2
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
static struct edid_quirk {
char *vendor;
@@ -109,36 +110,38 @@ static struct edid_quirk {
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
};
+/*** DDC fetch and block validation ***/
-/* Valid EDID header has these bytes */
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
-/**
- * drm_edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_is_valid(struct edid *edid)
+static bool
+drm_edid_block_valid(u8 *raw_edid)
{
- int i, score = 0;
+ int i;
u8 csum = 0;
- u8 *raw_edid = (u8 *)edid;
+ struct edid *edid = (struct edid *)raw_edid;
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
+ if (raw_edid[0] == 0x00) {
+ int score = 0;
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else
- goto bad;
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
@@ -147,13 +150,21 @@ bool drm_edid_is_valid(struct edid *edid)
goto bad;
}
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
- }
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ default:
+ break;
+ }
return 1;
@@ -165,8 +176,158 @@ bad:
}
return 0;
}
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ return false;
+
+ return true;
+}
EXPORT_SYMBOL(drm_edid_is_valid);
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf + start,
+ }
+ };
+
+ if (i2c_transfer(adapter, msgs, 2) == 2)
+ return 0;
+
+ return -1;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ int i, j = 0;
+ u8 *block, *new;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, j,
+ EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + j * EDID_LENGTH))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+ }
+
+ return block;
+
+carp:
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+
+out:
+ kfree(block);
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+ connector->display_info.raw_edid = (char *)edid;
+
+ return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
@@ -517,6 +678,110 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
return mode;
}
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ /* XXX extension block walk */
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
+
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
* monitors fill with ascii space (0x20) instead.
@@ -536,22 +801,20 @@ bad_std_timing(u8 a, u8 b)
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
*/
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- struct std_timing *t,
- int revision,
- int timing_level)
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
{
- struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
@@ -572,16 +835,36 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
- /* HDTV hack */
- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
mode->vsync_start = mode->vsync_start - 1;
mode->vsync_end = mode->vsync_end - 1;
return mode;
}
- mode = NULL;
+
/* check whether it can be found in default mode table */
mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (mode)
@@ -593,6 +876,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ kfree(mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
@@ -716,10 +1016,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
- drm_mode_set_name(mode);
-
drm_mode_do_interlace_quirk(mode, pt);
+ drm_mode_set_name(mode);
+
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -802,10 +1102,6 @@ static struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
@@ -833,19 +1129,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
return modes;
}
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- return LEVEL_GTF;
- }
- return LEVEL_DMT;
-}
/**
* add_standard_modes - get std. modes from EDID and add them
@@ -856,22 +1139,14 @@ static int standard_timing_level(struct edid *edid)
*/
static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
- struct drm_device *dev = connector->dev;
int i, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
struct drm_display_mode *newmode;
- /* If std timings bytes are 1, 1 it's empty */
- if (t->hsize == 1 && t->vfreq_aspect == 1)
- continue;
-
- newmode = drm_mode_std(dev, &edid->standard_timings[i],
- edid->revision, timing_level);
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -881,36 +1156,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes;
}
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+mode_is_rb(struct drm_display_mode *mode)
{
- struct detailed_data_monitor_range *range;
- int hsync, vrefresh;
-
- range = &timing->data.other_data.data.range;
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
hsync = drm_mode_hsync(mode);
- vrefresh = drm_mode_vrefresh(mode);
- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
return false;
- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+ if (!mode_in_vsync_range(mode, edid, t))
return false;
- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
- /* be forgiving since it's in units of 10MHz */
- int max_clock = range->pixel_clock_mhz * 10 + 9;
- max_clock *= 1000;
+ if ((max_clock = range_pixel_clock(edid, t)))
if (mode->clock > max_clock)
return false;
- }
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
return true;
}
@@ -919,15 +1244,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
- struct detailed_timing *timing)
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -988,13 +1314,100 @@ static int drm_cvt_modes(struct drm_connector *connector,
return modes;
}
+static const struct {
+ short w;
+ short h;
+ short r;
+ short rb;
+} est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m > num_est3_modes)
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r
+ /*, est3_modes[m].rb */);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
static int add_detailed_modes(struct drm_connector *connector,
struct detailed_timing *timing,
struct edid *edid, u32 quirks, int preferred)
{
int i, modes = 0;
struct detailed_non_pixel *data = &timing->data.other_data;
- int timing_level = standard_timing_level(edid);
int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
@@ -1015,7 +1428,8 @@ static int add_detailed_modes(struct drm_connector *connector,
switch (data->type) {
case EDID_DETAIL_MONITOR_RANGE:
if (gtf)
- modes += drm_gtf_modes_for_range(connector, timing);
+ modes += drm_gtf_modes_for_range(connector, edid,
+ timing);
break;
case EDID_DETAIL_STD_MODES:
/* Six modes per detailed section */
@@ -1024,8 +1438,8 @@ static int add_detailed_modes(struct drm_connector *connector,
struct drm_display_mode *newmode;
std = &data->data.timings[i];
- newmode = drm_mode_std(dev, std, edid->revision,
- timing_level);
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -1035,6 +1449,9 @@ static int add_detailed_modes(struct drm_connector *connector,
case EDID_DETAIL_CVT_3BYTE:
modes += drm_cvt_modes(connector, timing);
break;
+ case EDID_DETAIL_EST_TIMINGS:
+ modes += drm_est3_modes(connector, timing);
+ break;
default:
break;
}
@@ -1058,7 +1475,10 @@ static int add_detailed_info(struct drm_connector *connector,
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ int preferred = (i == 0);
+
+ if (preferred && edid->version == 1 && edid->revision < 4)
+ preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
/* In 1.0, only timings are allowed */
if (!timing->pixel_clock && edid->version == 1 &&
@@ -1088,39 +1508,23 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- int edid_ext_num;
int start_offset, end_offset;
int timing_level;
- if (edid->version == 1 && edid->revision < 3) {
- /* If the EDID version is less than 1.3, there is no
- * extension EDID.
- */
+ if (edid->version == 1 && edid->revision < 3)
return 0;
- }
- if (!edid->extensions) {
- /* if there is no extension EDID, it is unnecessary to
- * parse the E-EDID to get detailed info
- */
+ if (!edid->extensions)
return 0;
- }
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
- DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num) {
- /* if there is no additional timing EDID block, return */
+ if (i == edid->extensions)
return 0;
- }
/* Get the start offset of detailed timing block */
start_offset = edid_ext[2];
@@ -1144,123 +1548,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return modes;
}
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- unsigned char start = 0x0;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
- return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter,
- char *buf, int len)
-{
- int i;
-
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, buf, len))
- return -1;
- if (drm_edid_is_valid((struct edid *)buf))
- return 0;
- }
-
- /* repeated checksum failures; warn, but carry on */
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
-{
- int ret;
- struct edid *edid;
-
- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
- if (edid == NULL) {
- dev_warn(&connector->dev->pdev->dev,
- "Failed to allocate EDID\n");
- goto end;
- }
-
- /* Read first EDID block */
- ret = drm_ddc_read_edid(connector, adapter,
- (unsigned char *)edid, EDID_LENGTH);
- if (ret != 0)
- goto clean_up;
-
- /* There are EDID extensions to be read */
- if (edid->extensions != 0) {
- int edid_ext_num = edid->extensions;
-
- if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
- dev_warn(&connector->dev->pdev->dev,
- "The number of extension(%d) is "
- "over max (%d), actually read number (%d)\n",
- edid_ext_num, DRM_MAX_EDID_EXT_NUM,
- DRM_MAX_EDID_EXT_NUM);
- /* Reset EDID extension number to be read */
- edid_ext_num = DRM_MAX_EDID_EXT_NUM;
- }
- /* Read EDID including extensions too */
- ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
- EDID_LENGTH * (edid_ext_num + 1));
- if (ret != 0)
- goto clean_up;
-
- }
-
- connector->display_info.raw_edid = (char *)edid;
- goto end;
-
-clean_up:
- kfree(edid);
- edid = NULL;
-end:
- return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
#define HDMI_IDENTIFIER 0x000C03
#define VENDOR_BLOCK 0x03
/**
@@ -1273,7 +1560,7 @@ EXPORT_SYMBOL(drm_get_edid);
bool drm_detect_hdmi_monitor(struct edid *edid)
{
char *edid_ext = NULL;
- int i, hdmi_id, edid_ext_num;
+ int i, hdmi_id;
int start_offset, end_offset;
bool is_hdmi = false;
@@ -1281,19 +1568,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
if (edid == NULL || edid->extensions == 0)
goto end;
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
- DRM_MAX_EDID_EXT_NUM : edid->extensions;
-
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
/* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num)
+ if (i == edid->extensions)
goto end;
/* Data block offset in CEA extension block */
@@ -1348,10 +1631,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
quirks = edid_get_quirks(edid);
- num_modes += add_established_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We don't quite implement this yet, but we're close.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
num_modes += add_detailed_info(connector, edid, quirks);
num_modes += add_detailed_info_eedid(connector, edid, quirks);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 288ea2f32772..b28e56382e86 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -42,15 +42,35 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
-int drm_fb_helper_add_connector(struct drm_connector *connector)
+static struct slow_work_ops output_status_change_ops;
+
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
- connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!connector->fb_helper_private)
- return -ENOMEM;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i]);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_add_connector);
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
/**
* drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -65,7 +85,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector);
*
* enable/enable Digital/disable bit at the end
*/
-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
const char *mode_option)
{
const char *name;
@@ -75,13 +95,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_connector *connector = fb_helper_conn->connector;
- if (!fb_help_conn)
+ if (!fb_helper_conn)
return false;
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
mode_option = fb_mode_option;
@@ -204,18 +224,21 @@ done:
return true;
}
-int drm_fb_helper_parse_command_line(struct drm_device *dev)
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
- struct drm_connector *connector;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ for (i = 0; i < fb_helper->connector_count; i++) {
char *option = NULL;
+ fb_helper_conn = fb_helper->connector_info[i];
+
/* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(connector), &option))
+ if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
continue;
- drm_fb_helper_connector_parse_command_line(connector, option);
+ drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
}
return 0;
}
@@ -293,6 +316,7 @@ static void drm_fb_helper_on(struct fb_info *info)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_encoder *encoder;
int i;
@@ -300,33 +324,28 @@ static void drm_fb_helper_on(struct fb_info *info)
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
}
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -334,6 +353,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_encoder *encoder;
int i;
@@ -341,32 +361,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, dpms_mode);
}
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- mutex_unlock(&dev->mode_config.mutex);
}
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
+ mutex_unlock(&dev->mode_config.mutex);
}
int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -401,50 +415,89 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
+ for (i = 0; i < helper->connector_count; i++)
+ kfree(helper->connector_info[i]);
+ kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++)
kfree(helper->crtc_info[i].mode_set.connectors);
kfree(helper->crtc_info);
}
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count,
+ bool polled)
{
- struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
int ret = 0;
int i;
- helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
- if (!helper->crtc_info)
+ fb_helper->dev = dev;
+ fb_helper->poll_enabled = polled;
+
+ slow_work_register_user(THIS_MODULE);
+ delayed_slow_work_init(&fb_helper->output_status_change_slow_work,
+ &output_status_change_ops);
+
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
return -ENOMEM;
- helper->crtc_count = crtc_count;
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+ if (!fb_helper->connector_info) {
+ kfree(fb_helper->crtc_info);
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
- helper->crtc_info[i].mode_set.connectors =
+ fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!helper->crtc_info[i].mode_set.connectors) {
+ if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
goto out_free;
}
- helper->crtc_info[i].mode_set.num_connectors = 0;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- helper->crtc_info[i].crtc_id = crtc->base.id;
- helper->crtc_info[i].mode_set.crtc = crtc;
+ fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
- helper->conn_limit = max_conn_count;
+ fb_helper->conn_limit = max_conn_count;
return 0;
out_free:
- drm_fb_helper_crtc_free(helper);
+ drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+ printk(KERN_INFO "unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &paniced);
+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+ delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work);
+ slow_work_unregister_user(THIS_MODULE);
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
@@ -508,20 +561,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, rc = 0;
int start;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
red = cmap->red;
green = cmap->green;
@@ -549,41 +597,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
-int drm_fb_helper_setcolreg(unsigned regno,
- unsigned red,
- unsigned green,
- unsigned blue,
- unsigned transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int i;
- int ret;
-
- if (regno > 255)
- return 1;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- ret = setcolreg(crtc, red, green, blue, regno, info);
- if (ret)
- return ret;
-
- crtc_funcs->load_lut(crtc);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -687,23 +700,21 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ if (ret) {
mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
+ return ret;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (fb_helper->delayed_hotplug) {
+ fb_helper->delayed_hotplug = false;
+ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0);
+ }
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -718,14 +729,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
-
- if (i == fb_helper->crtc_count)
- continue;
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
modeset = &fb_helper->crtc_info[i].mode_set;
@@ -733,181 +739,122 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- mutex_lock(&dev->mode_config.mutex);
ret = crtc->funcs->set_config(modeset);
- mutex_unlock(&dev->mode_config.mutex);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
- int preferred_bpp,
- int (*fb_create)(struct drm_device *dev,
- uint32_t fb_width,
- uint32_t fb_height,
- uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth,
- uint32_t surface_bpp,
- struct drm_framebuffer **fb_ptr))
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
- unsigned int surface_width = 0, surface_height = 0;
int new_fb = 0;
int crtc_count = 0;
- int ret, i, conn_count = 0;
+ int i;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_set *modeset = NULL;
- struct drm_fb_helper *fb_helper;
- uint32_t surface_depth = 24, surface_bpp = 32;
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != surface_bpp) {
- surface_depth = surface_bpp = preferred_bpp;
+ if (preferred_bpp != sizes.surface_bpp) {
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- if (!fb_help_conn)
- continue;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
- surface_depth = surface_bpp = 8;
+ sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
- surface_depth = 15;
- surface_bpp = 16;
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
break;
case 16:
- surface_depth = surface_bpp = 16;
+ sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
- surface_depth = surface_bpp = 24;
+ sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
- surface_depth = 24;
- surface_bpp = 32;
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
break;
}
break;
}
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (drm_helper_crtc_in_use(crtc)) {
- if (crtc->desired_mode) {
- if (crtc->desired_mode->hdisplay < fb_width)
- fb_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay < fb_height)
- fb_height = crtc->desired_mode->vdisplay;
-
- if (crtc->desired_mode->hdisplay > surface_width)
- surface_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay > surface_height)
- surface_height = crtc->desired_mode->vdisplay;
- }
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
- return 0;
- }
-
- /* do we have an fb already? */
- if (list_empty(&dev->mode_config.fb_kernel_list)) {
- ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
- surface_height, surface_depth, surface_bpp,
- &fb);
- if (ret)
- return -EINVAL;
- new_fb = 1;
- } else {
- fb = list_first_entry(&dev->mode_config.fb_kernel_list,
- struct drm_framebuffer, filp_head);
-
- /* if someone hotplugs something bigger than we have already allocated, we are pwned.
- As really we can't resize an fbdev that is in the wild currently due to fbdev
- not really being designed for the lower layers moving stuff around under it.
- - so in the grand style of things - punt. */
- if ((fb->width < surface_width) ||
- (fb->height < surface_height)) {
- DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
- return -EINVAL;
- }
+ DRM_ERROR("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
}
- info = fb->fbdev;
- fb_helper = info->par;
+ /* push down into drivers */
+ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (new_fb < 0)
+ return new_fb;
- crtc_count = 0;
- /* okay we need to setup new connector sets in the crtcs */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- modeset = &fb_helper->crtc_info[crtc_count].mode_set;
- modeset->fb = fb;
- conn_count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == modeset->crtc) {
- modeset->connectors[conn_count] = connector;
- conn_count++;
- if (conn_count > fb_helper->conn_limit)
- BUG();
- }
- }
+ info = fb_helper->fbdev;
- for (i = conn_count; i < fb_helper->conn_limit; i++)
- modeset->connectors[i] = NULL;
-
- modeset->crtc = crtc;
- crtc_count++;
-
- modeset->num_connectors = conn_count;
- if (modeset->crtc->desired_mode) {
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- modeset->crtc->desired_mode);
- }
+ /* set the fb pointer */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
- fb_helper->crtc_count = crtc_count;
- fb_helper->fb = fb;
if (new_fb) {
info->var.pixclock = 0;
- ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
- if (ret)
- return ret;
if (register_framebuffer(info) < 0) {
- fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
} else {
drm_fb_helper_set_par(info);
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
@@ -917,25 +864,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ if (new_fb)
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-void drm_fb_helper_free(struct drm_fb_helper *helper)
-{
- list_del(&helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
- drm_fb_helper_crtc_free(helper);
- fb_dealloc_cmap(&helper->fb->fbdev->cmap);
-}
-EXPORT_SYMBOL(drm_fb_helper_free);
-
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -954,10 +889,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
- info->pseudo_palette = fb->pseudo_palette;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1025,3 +961,454 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
info->var.yres = fb_height;
}
EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
+ cmdline_mode->yres,
+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+ cmdline_mode->rb, cmdline_mode->interlace,
+ cmdline_mode->margins);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict) {
+ enable = connector->status == connector_status_connected;
+ } else {
+ enable = connector->status != connector_status_disconnected;
+ }
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *best_crtc;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_crtc = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ continue;
+ }
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning for now */
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_crtc = crtc;
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ kfree(crtcs);
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_encoder *encoder;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ /* clean out all the encoder/crtc combos */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->crtc = NULL;
+ }
+
+ crtcs = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ modes = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL);
+ enabled = kcalloc(dev->mode_config.num_connector,
+ sizeof(bool), GFP_KERNEL);
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+ if (!ret)
+ DRM_ERROR("Unable to find initial modes\n");
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ }
+ }
+
+ kfree(crtcs);
+ kfree(modes);
+ kfree(enabled);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(fb_helper->dev);
+
+ drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0) {
+ if (fb_helper->poll_enabled) {
+ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work,
+ 5*HZ);
+ printk(KERN_INFO "No connectors reported connected with modes - started polling\n");
+ } else
+ printk(KERN_INFO "No connectors reported connected with modes\n");
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+/* we got a hotplug irq - need to update fbcon */
+void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper)
+{
+ /* if we don't have the fbdev registered yet do nothing */
+ if (!fb_helper->fbdev)
+ return;
+
+ /* schedule a slow work asap */
+ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0);
+}
+EXPORT_SYMBOL(drm_helper_fb_hpd_irq_event);
+
+bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper, bool polled)
+{
+ int count = 0;
+ int ret;
+ u32 max_width, max_height, bpp_sel;
+
+ if (!fb_helper->fb)
+ return false;
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ if (fb_helper->poll_enabled && !polled) {
+ if (count) {
+ delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work);
+ } else {
+ ret = delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 5*HZ);
+ }
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_helper_fb_hotplug_event);
+
+/*
+ * delayed work queue execution function
+ * - check if fbdev is actually in use on the gpu
+ * - if not set delayed flag and repoll if necessary
+ * - check for connector status change
+ * - repoll if 0 modes found
+ *- call driver output status changed notifier
+ */
+static void output_status_change_execute(struct slow_work *work)
+{
+ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
+ struct drm_fb_helper *fb_helper = container_of(delayed_work, struct drm_fb_helper, output_status_change_slow_work);
+ struct drm_connector *connector;
+ enum drm_connector_status old_status, status;
+ bool repoll, changed = false;
+ int ret;
+ int i;
+ bool bound = false, crtcs_bound = false;
+ struct drm_crtc *crtc;
+
+ repoll = fb_helper->poll_enabled;
+
+ /* first of all check the fbcon framebuffer is actually bound to any crtc */
+ /* take into account that no crtc at all maybe bound */
+ list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound = true;
+ if (crtc->fb == fb_helper->fb)
+ bound = true;
+ }
+
+ if (bound == false && crtcs_bound) {
+ fb_helper->delayed_hotplug = true;
+ goto requeue;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ old_status = connector->status;
+ status = connector->funcs->detect(connector);
+ if (old_status != status) {
+ changed = true;
+ }
+ if (status == connector_status_connected && repoll) {
+ DRM_DEBUG("%s is connected - stop polling\n", drm_get_connector_name(connector));
+ repoll = false;
+ }
+ }
+
+ if (changed) {
+ if (fb_helper->funcs->fb_output_status_changed)
+ fb_helper->funcs->fb_output_status_changed(fb_helper);
+ }
+
+requeue:
+ if (repoll) {
+ ret = delayed_slow_work_enqueue(delayed_work, 5*HZ);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+
+static struct slow_work_ops output_status_change_ops = {
+ .execute = output_status_change_execute,
+};
+
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index aa89d4b0b4c4..33dad3fa6043 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(obj->filp))
+ return -ENOMEM;
+
+ kref_init(&obj->refcount);
+ kref_init(&obj->handlecount);
+ obj->size = size;
+
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
+ if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
- kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
- obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
return obj;
fput:
+ /* Object_init mangles the global counters - readjust them. */
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_destroy(&file_private->object_idr);
}
-static void
-drm_gem_object_free_common(struct drm_gem_object *obj)
+void
+drm_gem_object_release(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
fput(obj->filp);
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
}
+EXPORT_SYMBOL(drm_gem_object_release);
/**
* Called after the last reference to the object has been lost.
@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
-
- drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref)
dev->driver->gem_free_object(obj);
mutex_unlock(&dev->struct_mutex);
}
-
- drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free_unlocked);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 76d63394c776..f1f473ea97d3 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
- if (interlaced)
+ if (interlaced) {
drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
- if (interlaced)
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- return drm_mode;
+ return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
+ * @margins :desired margin size
+ * @GTF_[MCKJ] :extended GTF formula parameters
*
* LOCKING.
* none.
*
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
+ * return the modeline based on full GTF algorithm.
*
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
*/
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool interlaced, int margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
- /* blanking formula gradient */
-#define GTF_M 600
- /* blanking formula offset */
-#define GTF_C 40
- /* blanking formula scaling factor */
-#define GTF_K 128
- /* blanking formula scaling factor */
-#define GTF_J 20
/* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
int top_margin, bottom_margin;
@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock = pixel_freq;
- drm_mode_set_name(drm_mode);
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
return drm_mode;
}
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
EXPORT_SYMBOL(drm_gtf_mode);
+
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
- mode->vdisplay);
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 25bbd30ed7af..3a3a451d0bf8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -333,7 +333,7 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
- .size = 128,
+ .size = 0,
.read = edid_show,
};
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50627e2..0d6ff640e1c6 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops {
void (*dpms)(struct intel_dvo_device *dvo, int mode);
/*
- * Saves the output's state for restoration on VT switch.
- */
- void (*save)(struct intel_dvo_device *dvo);
-
- /*
- * Restore's the output's state at VT switch.
- */
- void (*restore)(struct intel_dvo_device *dvo);
-
- /*
* Callback for testing a video mode for a given output.
*
* This function should only check for cases where a mode can't
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14ba87d..14d59804acd7 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,16 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t save_hapi;
- uint8_t save_vali;
- uint8_t save_valo;
- uint8_t save_ailo;
- uint8_t save_lvds_pll_vco;
- uint8_t save_feedback_div;
- uint8_t save_lvds_control_2;
- uint8_t save_outputs_enable;
- uint8_t save_lvds_power_down;
- uint8_t save_power_management;
+ uint8_t dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@ do { \
DUMP(CH7017_LVDS_POWER_DOWN);
}
-static void ch7017_save(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
-}
-
-static void ch7017_restore(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- /* Power down before changing mode */
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
-
- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
-}
-
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.dump_regs = ch7017_dump_regs,
- .save = ch7017_save,
- .restore = ch7017_restore,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5cc22b2..6f1944b24441 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
{ CH7301_VID, "CH7301" },
};
-struct ch7xxx_reg_state {
- uint8_t regs[CH7xxx_NUM_REGS];
-};
-
struct ch7xxx_priv {
bool quiet;
-
- struct ch7xxx_reg_state save_reg;
- struct ch7xxx_reg_state mode_reg;
- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
- uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
};
-static void ch7xxx_save(struct intel_dvo_device *dvo);
-
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
if ((i % 8) == 0 )
DRM_LOG_KMS("\n %02X: ", i);
- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_LOG_KMS("%02X ", val);
}
}
-static void ch7xxx_save(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
-
- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
-}
-
-static void ch7xxx_restore(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
-
- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
-}
-
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.dump_regs = ch7xxx_dump_regs,
- .save = ch7xxx_save,
- .restore = ch7xxx_restore,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e528f0f..a2ec3f487202 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@ struct ivch_priv {
bool quiet;
uint16_t width, height;
-
- uint16_t save_VR01;
- uint16_t save_VR40;
};
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
-static void ivch_save(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_read(dvo, VR01, &priv->save_VR01);
- ivch_read(dvo, VR40, &priv->save_VR40);
-}
-
-static void ivch_restore(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_write(dvo, VR01, priv->save_VR01);
- ivch_write(dvo, VR40, priv->save_VR40);
-}
-
static void ivch_destroy(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops= {
.init = ivch_init,
.dpms = ivch_dpms,
- .save = ivch_save,
- .restore = ivch_restore,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13f0a80..9b8e6765cf26 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_REGC 0x0c
-struct sil164_save_rec {
- uint8_t reg8;
- uint8_t reg9;
- uint8_t regc;
-};
-
struct sil164_priv {
//I2CDevRec d;
bool quiet;
- struct sil164_save_rec save_regs;
- struct sil164_save_rec mode_regs;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
-static void sil164_save(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil= dvo->dev_priv;
-
- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
- return;
-
- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
- return;
-
- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
- return;
-
- return;
-}
-
-static void sil164_restore(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil = dvo->dev_priv;
-
- /* Restore it powered down initially */
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
-
- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
-}
-
static void sil164_destroy(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.dump_regs = sil164_dump_regs,
- .save = sil164_save,
- .restore = sil164_restore,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc116a..66c697bc9b22 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -86,16 +86,8 @@
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
-struct tfp410_save_rec {
- uint8_t ctl1;
- uint8_t ctl2;
-};
-
struct tfp410_priv {
bool quiet;
-
- struct tfp410_save_rec saved_reg;
- struct tfp410_save_rec mode_reg;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
-static void tfp410_save(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
- return;
-
- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
- return;
-}
-
-static void tfp410_restore(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- /* Restore it powered down initially */
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
-
- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
-}
-
static void tfp410_destroy(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.dump_regs = tfp410_dump_regs,
- .save = tfp410_save,
- .restore = tfp410_restore,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a0b8447b06e7..213aa3f67314 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
spin_lock(lock);
list_for_each_entry(obj_priv, head, list)
{
- struct drm_gem_object *obj = obj_priv->obj;
-
seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- obj,
+ &obj_priv->base,
get_pin_flag(obj_priv),
- obj->size,
- obj->read_domains, obj->write_domain,
+ obj_priv->base.size,
+ obj_priv->base.read_domains,
+ obj_priv->base.write_domain,
obj_priv->last_rendering_seqno,
obj_priv->dirty ? " dirty" : "",
obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
+ if (obj_priv->base.name)
+ seq_printf(m, " (name: %d)", obj_priv->base.name);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
if (obj_priv->gtt_space != NULL)
@@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
spin_lock(&dev_priv->mm.active_list_lock);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c3cfafcbfe7d..4c03ee536fec 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1504,7 +1504,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- drm_helper_initial_config(dev);
+ intel_fbdev_init(dev);
return 0;
@@ -1591,7 +1591,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
@@ -1723,6 +1723,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
+ intel_detect_pch(dev);
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev, prealloc_start,
prealloc_size, agp_size);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc03537bb883..5c51e45ab68d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+
+void intel_detect_pch (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pch;
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ */
+ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (pch) {
+ if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+ int id;
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ }
+ }
+ pci_dev_put(pch);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e4790065d9e..ae43f0e8f41a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -222,6 +222,13 @@ enum no_fbc_reason {
FBC_NOT_TILED, /* buffer not tiled */
};
+enum intel_pch {
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+};
+
+struct intel_fbdev;
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -335,6 +342,9 @@ typedef struct drm_i915_private {
/* Display functions */
struct drm_i915_display_funcs display;
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+
/* Register state */
bool modeset_on_lid;
u8 saveLBB;
@@ -637,11 +647,14 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
+
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
- struct drm_gem_object *obj;
+ struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
@@ -740,7 +753,7 @@ struct drm_i915_gem_object {
atomic_t pending_flip;
};
-#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
* Request queue structure.
@@ -902,6 +915,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
@@ -999,6 +1014,9 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+
/**
* Lock test for when it's just for synchronization of ring access.
*
@@ -1130,7 +1148,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+ !IS_GEN6(dev))
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1144,6 +1163,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
IS_GEN6(dev))
#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef3d91dda71a..09a7074ab878 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -124,7 +124,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size = roundup(args->size, PAGE_SIZE);
/* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
+ obj = i915_gem_alloc_object(dev, args->size);
if (obj == NULL)
return -ENOMEM;
@@ -1566,7 +1566,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.gpu_write_list,
gpu_write_list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
@@ -1745,7 +1745,7 @@ i915_gem_retire_request(struct drm_device *dev,
obj_priv = list_first_entry(&dev_priv->mm.active_list,
struct drm_i915_gem_object,
list);
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
/* If the seqno being retired doesn't match the oldest in the
* list, then the oldest in the list must still be newer than
@@ -2119,7 +2119,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
/* Try to find the smallest clean object */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (obj->size >= min_size) {
if ((!obj_priv->dirty ||
i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2253,7 +2253,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
/* Find an object that we can immediately reuse */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->size >= min_size)
break;
@@ -2487,7 +2487,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
i = I915_FENCE_REG_NONE;
list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
fence_list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj_priv->pin_count)
continue;
@@ -4471,34 +4471,40 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_init_object(struct drm_gem_object *obj)
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
- if (obj_priv == NULL)
- return -ENOMEM;
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
+ if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+ kfree(obj);
+ return NULL;
+ }
- obj_priv->agp_type = AGP_USER_MEMORY;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->driver_private = obj_priv;
- obj_priv->obj = obj;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj_priv->list);
- INIT_LIST_HEAD(&obj_priv->gpu_write_list);
- INIT_LIST_HEAD(&obj_priv->fence_list);
- obj_priv->madv = I915_MADV_WILLNEED;
+ obj->agp_type = AGP_USER_MEMORY;
- trace_i915_gem_object_create(obj);
+ obj->base.driver_private = NULL;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ INIT_LIST_HEAD(&obj->list);
+ INIT_LIST_HEAD(&obj->gpu_write_list);
+ INIT_LIST_HEAD(&obj->fence_list);
+ obj->madv = I915_MADV_WILLNEED;
+
+ trace_i915_gem_object_create(&obj->base);
+
+ return &obj->base;
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
return 0;
}
@@ -4521,9 +4527,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
if (obj_priv->mmap_offset)
i915_gem_free_mmap_offset(obj);
+ drm_gem_object_release(obj);
+
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
- kfree(obj->driver_private);
+ kfree(obj_priv);
}
/** Unbinds all inactive objects. */
@@ -4536,9 +4544,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
struct drm_gem_object *obj;
int ret;
- obj = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->obj;
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
@@ -4653,7 +4661,7 @@ i915_gem_init_hws(struct drm_device *dev)
if (!I915_NEED_GFX_HWS(dev))
return 0;
- obj = drm_gem_object_alloc(dev, 4096);
+ obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
ret = -ENOMEM;
@@ -4764,7 +4772,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
if (ret != 0)
return ret;
- obj = drm_gem_object_alloc(dev, 128 * 1024);
+ obj = i915_gem_alloc_object(dev, 128 * 1024);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
i915_gem_cleanup_hws(dev);
@@ -5229,7 +5237,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
break;
}
@@ -5258,7 +5266,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (nr_to_scan > 0) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;
} else
cnt++;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf53fa3..80f380b1d951 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2b8b969d0c15..a7e4b1f27497 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
- else
+ else {
i915_enable_pipestat(dev_priv, 1,
I915_LEGACY_BLC_EVENT_ENABLE);
+ if (IS_I965G(dev))
+ i915_enable_pipestat(dev_priv, 0,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+ }
}
/**
@@ -256,17 +260,18 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ if (mode_config->num_encoder) {
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->hot_plug)
(*intel_encoder->hot_plug) (intel_encoder);
}
}
/* Just fire off a uevent and let userspace tell us what to do */
+ intelfb_hotplug(dev, false);
drm_sysfs_hotplug_event(dev);
}
@@ -608,7 +613,7 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[1] = NULL;
count = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset &&
@@ -644,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
error->active_bo[i].name = obj->name;
@@ -946,7 +951,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip(dev, 1);
}
- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4cbc5210fd30..c754808cdebb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1764,6 +1764,14 @@
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
+/* CPT Link training mode */
+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
+#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
+#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
+#define DP_LINK_TRAIN_SHIFT_CPT 8
+
/* Signal voltages. These are mostly controlled by the other end */
#define DP_VOLTAGE_0_4 (0 << 25)
#define DP_VOLTAGE_0_6 (1 << 25)
@@ -1988,15 +1996,24 @@
#define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_SR_MASK (0x1ff)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -2023,6 +2040,43 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK 0x45100
+#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_SHIFT 16
+#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_SHIFT 8
+#define WM0_PIPE_CURSOR_MASK (0x1f)
+
+#define WM0_PIPEB_ILK 0x45104
+#define WM1_LP_ILK 0x45108
+#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_LATENCY_SHIFT 24
+#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_SR_SHIFT 8
+#define WM1_LP_CURSOR_MASK (0x3f)
+
+/* Memory latency timer register */
+#define MLTR_ILK 0x11222
+/* the unit of memory self-refresh latency time is 0.5us */
+#define ILK_SRLT_MASK 0x3f
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO 128
+#define ILK_DISPLAY_MAXWM 64
+#define ILK_DISPLAY_DFTWM 8
+
+#define ILK_DISPLAY_SR_FIFO 512
+#define ILK_DISPLAY_MAX_SRWM 0x1ff
+#define ILK_DISPLAY_DFT_SRWM 0x3f
+#define ILK_CURSOR_SR_FIFO 64
+#define ILK_CURSOR_MAX_SRWM 0x3f
+#define ILK_CURSOR_DFT_SRWM 8
+
+#define ILK_FIFO_LINE_SIZE 64
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2304,8 +2358,15 @@
#define GTIIR 0x44018
#define GTIER 0x4401c
+#define ILK_DISPLAY_CHICKEN2 0x42004
+#define ILK_DPARB_GATE (1<<22)
+#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DSPCLK_GATE 0x42020
+#define ILK_DPARB_CLK_GATE (1<<5)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+#define DISP_FBC_WM_DIS (1<<15)
/* PCH */
@@ -2316,6 +2377,11 @@
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8)
+/* CPT */
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2407,6 +2473,17 @@
#define PCH_SSC4_PARMS 0xc6210
#define PCH_SSC4_AUX_PARMS 0xc6214
+#define PCH_DPLL_SEL 0xc7000
+#define TRANSA_DPLL_ENABLE (1<<3)
+#define TRANSA_DPLLB_SEL (1<<0)
+#define TRANSA_DPLLA_SEL 0
+#define TRANSB_DPLL_ENABLE (1<<7)
+#define TRANSB_DPLLB_SEL (1<<4)
+#define TRANSB_DPLLA_SEL (0)
+#define TRANSC_DPLL_ENABLE (1<<11)
+#define TRANSC_DPLLB_SEL (1<<8)
+#define TRANSC_DPLLA_SEL (0)
+
/* transcoder */
#define TRANS_HTOTAL_A 0xe0000
@@ -2493,6 +2570,19 @@
#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
#define FDI_DP_PORT_WIDTH_X1 (0<<19)
#define FDI_DP_PORT_WIDTH_X2 (1<<19)
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
@@ -2525,6 +2615,13 @@
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
#define FDI_SEL_RAWCLK (0<<4)
#define FDI_SEL_PCDCLK (1<<4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1<<10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
#define FDI_RXA_MISC 0xf0010
#define FDI_RXB_MISC 0xf1010
@@ -2596,6 +2693,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+/* PCH SDVOB multiplex with HDMIB */
+#define PCH_SDVOB HDMIB
+
#define HDMIC 0xe1150
#define HDMID 0xe1160
@@ -2653,4 +2753,42 @@
#define PCH_DPD_AUX_CH_DATA4 0xe4320
#define PCH_DPD_AUX_CH_DATA5 0xe4324
+/* CPT */
+#define PORT_TRANS_A_SEL_CPT 0
+#define PORT_TRANS_B_SEL_CPT (1<<29)
+#define PORT_TRANS_C_SEL_CPT (2<<29)
+#define PORT_TRANS_SEL_MASK (3<<29)
+
+#define TRANS_DP_CTL_A 0xe0300
+#define TRANS_DP_CTL_B 0xe1300
+#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_OUTPUT_ENABLE (1<<31)
+#define TRANS_DP_PORT_SEL_B (0<<29)
+#define TRANS_DP_PORT_SEL_C (1<<29)
+#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_ENH_FRAMING (1<<18)
+#define TRANS_DP_8BPC (0<<9)
+#define TRANS_DP_10BPC (1<<9)
+#define TRANS_DP_6BPC (2<<9)
+#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_VSYNC_ACTIVE_LOW 0
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ac0d1a73ac22..60a5800fba6e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
}
/* FIXME: save TV & SDVO state */
- /* FBC state */
- if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
- } else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ /* Only save FBC state on the platform that supports FBC */
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
}
/* VGA state */
@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
}
/* FIXME: restore TV & SDVO state */
- /* FBC info */
- if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
- } else {
- i8xx_disable_fbc(dev);
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ /* only restore FBC info on the platform that supports FBC*/
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ }
}
-
/* VGA state */
if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9bc38f..303815321c79 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
__entry->obj, __entry->fence, __entry->tiling_mode)
);
-TRACE_EVENT(i915_gem_object_unbind,
+DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_gem_object *obj),
@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_printk("obj=%p", __entry->obj)
);
-TRACE_EVENT(i915_gem_object_destroy,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj),
- TP_ARGS(obj),
+ TP_ARGS(obj)
+);
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- ),
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
- TP_fast_assign(
- __entry->obj = obj;
- ),
+ TP_PROTO(struct drm_gem_object *obj),
- TP_printk("obj=%p", __entry->obj)
+ TP_ARGS(obj)
);
/* batch tracing */
@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush,
__entry->flush_domains, __entry->invalidate_domains)
);
-
-TRACE_EVENT(i915_gem_request_complete,
+DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete,
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_retire,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_end,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
+ TP_ARGS(dev, seqno)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_ring_wait_begin,
+DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct drm_device *dev),
@@ -291,21 +258,18 @@ TRACE_EVENT(i915_ring_wait_begin,
TP_printk("dev=%u", __entry->dev)
);
-TRACE_EVENT(i915_ring_wait_end,
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct drm_device *dev),
- TP_ARGS(dev),
+ TP_ARGS(dev)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- ),
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- ),
+ TP_PROTO(struct drm_device *dev),
- TP_printk("dev=%u", __entry->dev)
+ TP_ARGS(dev)
);
#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 759c2ef72eff..26756cd34e3c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (intel_crtc->pipe == 0) {
- adpa |= ADPA_PIPE_A_SELECT;
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_A_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_A_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
- adpa |= ADPA_PIPE_B_SELECT;
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_B_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa;
+ u32 adpa, temp;
bool ret;
- adpa = I915_READ(PCH_ADPA);
+ temp = adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
+ if (HAS_PCH_CPT(dev)) {
+ /* Disable DAC before force detect */
+ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+ (void)I915_READ(PCH_ADPA);
+ } else {
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
+ }
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
;
+ if (HAS_PCH_CPT(dev)) {
+ I915_WRITE(PCH_ADPA, temp);
+ (void)I915_READ(PCH_ADPA);
+ }
+
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return false;
}
-static bool intel_crt_detect_ddc(struct drm_connector *connector)
+static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* CRT should always be at 0, but check anyway */
if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
return connector_status_disconnected;
}
- if (intel_crt_detect_ddc(connector))
+ if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
status = intel_crt_load_detect(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder,
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
NULL, &dpms_mode);
if (crtc) {
status = intel_crt_load_detect(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+ intel_release_load_detect_pipe(intel_encoder,
+ connector, dpms_mode);
} else
status = connector_status_unknown;
}
@@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
static void intel_crt_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
int ret;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct i2c_adapter *ddcbus;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret || !IS_G4X(dev))
goto end;
- ddcbus = intel_encoder->ddc_bus;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- intel_encoder->ddc_bus =
- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+ ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
- if (!intel_encoder->ddc_bus) {
- intel_encoder->ddc_bus = ddcbus;
+ if (!ddc_bus) {
dev_printk(KERN_ERR, &connector->dev->pdev->dev,
"DDC bus registration failed for CRTDDC_D.\n");
goto end;
}
/* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(intel_encoder);
- intel_i2c_destroy(ddcbus);
+ ret = intel_ddc_get_modes(connector, ddc_bus);
+ intel_i2c_destroy(ddc_bus);
end:
return ret;
@@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_crt_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 i2c_reg;
@@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev)
if (!intel_encoder)
return;
- connector = &intel_encoder->base;
- drm_connector_init(dev, &intel_encoder->base,
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
/* Set up the DDC bus. */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c7502b6b1600..85a0d1c30901 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
+ struct drm_encoder *l_entry;
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
+ list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+ if (l_entry && l_entry->crtc == crtc) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
if (intel_encoder->type == type)
return true;
}
@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
return false;
}
-static struct drm_connector *
-intel_pipe_get_connector (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry, *ret = NULL;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- ret = l_entry;
- break;
- }
- }
- return ret;
-}
-
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
@@ -905,9 +887,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
- /* based on hardware requriment prefer smaller n to precision */
+ /* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
+ /* based on hardware requirement, prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
@@ -1510,6 +1492,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
udelay(500);
}
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, tries = 0;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ break;
+ }
+ }
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ tries = 0;
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done\n");
+}
+
+static int snb_b_fdi_train_param [] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, i;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -1523,8 +1718,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1541,8 +1734,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
- int tries = 5, j, n;
+ int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1569,12 +1763,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable eDP PLL */
ironlake_enable_pll_edp(crtc);
} else {
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -1584,9 +1772,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
*/
temp &= ~(0x7 << 16);
temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
- FDI_SEL_PCDCLK |
- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
I915_READ(fdi_rx_reg);
udelay(200);
@@ -1629,91 +1823,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
if (!HAS_eDP) {
- /* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
- temp |= FDI_DP_PORT_WIDTH_X4; /* default */
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
-
- udelay(150);
-
- /* Train FDI. */
- /* umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
+ /* For PCH output, training FDI link */
+ if (IS_GEN6(dev))
+ gen6_fdi_link_train(crtc);
+ else
+ ironlake_fdi_link_train(crtc);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_BIT_LOCK)
- break;
- udelay(200);
- }
- if (j != tries)
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- else
- DRM_DEBUG_KMS("train 1 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("train 1 ok 2!\n");
+ /* enable PCH DPLL */
+ temp = I915_READ(pch_dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
}
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
-
- udelay(150);
+ udelay(200);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_SYMBOL_LOCK)
- break;
- udelay(200);
- }
- if (j != tries) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 1!\n");
- } else
- DRM_DEBUG_KMS("train 2 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 2!\n");
+ if (HAS_PCH_CPT(dev)) {
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0 &&
+ (temp & TRANSA_DPLL_ENABLE) == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else if (trans_dpll_sel == 1 &&
+ (temp & TRANSB_DPLL_ENABLE) == 0)
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
}
- DRM_DEBUG_KMS("train done\n");
/* set transcoder timing */
I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1724,6 +1859,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+ /* enable normal train */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+ FDI_TX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_rx_reg);
+
+ /* wait one idle pattern time */
+ udelay(100);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~TRANS_DP_PORT_SEL_MASK;
+ reg = TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING |
+ TRANS_DP_VSYNC_ACTIVE_HIGH |
+ TRANS_DP_HSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ reg |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ reg |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ }
+
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+ }
+
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
/*
@@ -1738,23 +1927,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
;
- /* enable normal */
-
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
-
- /* wait one idle pattern time */
- udelay(100);
-
}
intel_crtc_load_lut(crtc);
@@ -1805,6 +1977,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pf_ctl_reg);
}
I915_WRITE(pf_win_size, 0);
+ POSTING_READ(pf_win_size);
+
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -1825,11 +1999,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(fdi_tx_reg, temp);
+ POSTING_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
I915_WRITE(fdi_rx_reg, temp);
+ POSTING_READ(fdi_rx_reg);
udelay(100);
@@ -1859,6 +2040,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
+
temp = I915_READ(transconf_reg);
/* BPC in transcoder is consistent with that in pipeconf */
temp &= ~PIPE_BPC_MASK;
@@ -1867,35 +2049,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(transconf_reg);
udelay(100);
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ else
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+
+ }
+
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
+ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
if (HAS_eDP) {
ironlake_disable_pll_edp(crtc);
}
+ /* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
+ /* Disable CPU FDI TX PLL */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+ udelay(100);
+
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_RX_PLL_ENABLE;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) != 0) {
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
- }
-
/* Wait for the clocks to turn off. */
udelay(100);
break;
@@ -2331,6 +2531,30 @@ static struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
+static struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -2449,66 +2673,6 @@ static void pineview_disable_cxsr(struct drm_device *dev)
DRM_INFO("Big FIFO is disabled\n");
}
-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
- unsigned long wm;
- struct cxsr_latency *latency;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
- latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= 0x7fffff;
- reg |= wm << 23;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
- latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 24);
- reg |= (wm & 0x3f) << 24;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
- reg = I915_READ(DSPFW3);
- reg &= 0xfffffe00;
- reg |= wm & 0x1ff;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
- latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 16);
- reg |= (wm & 0x3f) << 16;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- reg = I915_READ(DSPFW3);
- reg |= PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, reg);
-
- DRM_INFO("Big FIFO is enabled\n");
-
- return;
-}
-
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -2593,6 +2757,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
+static void pineview_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ unsigned long wm;
+ struct cxsr_latency *latency;
+ int sr_clock;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
+ dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ if (!planea_clock || !planeb_clock) {
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* Display SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ reg = I915_READ(DSPFW3);
+ reg |= PINEVIEW_SELF_REFRESH_EN;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
static void g4x_update_wm(struct drm_device *dev, int planea_clock,
int planeb_clock, int sr_hdisplay, int pixel_size)
{
@@ -2813,6 +3042,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
I915_WRITE(FW_BLC, fwater_lo);
}
+#define ILK_LP0_PLANE_LATENCY 700
+
+static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int sr_wm, cursor_wm;
+ unsigned long line_time_us;
+ int sr_clock, entries_required;
+ u32 reg_value;
+
+ /* Calculate and update the watermark for plane A */
+ if (planea_clock) {
+ entries_required = ((planea_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planea_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+ planea_wm = ironlake_display_wm_info.max_wm;
+
+ cursora_wm = 16;
+ reg_value = I915_READ(WM0_PIPEA_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursora_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+ "cursor: %d\n", planea_wm, cursora_wm);
+ }
+ /* Calculate and update the watermark for plane B */
+ if (planeb_clock) {
+ entries_required = ((planeb_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planeb_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+ planeb_wm = ironlake_display_wm_info.max_wm;
+
+ cursorb_wm = 16;
+ reg_value = I915_READ(WM0_PIPEB_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+ "cursor: %d\n", planeb_wm, cursorb_wm);
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ if (!planea_clock || !planeb_clock) {
+ int line_count;
+ /* Read the self-refresh latency. The unit is 0.5us */
+ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+ / 1000;
+
+ /* calculate the self-refresh watermark for display plane */
+ entries_required = line_count * sr_hdisplay * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_srwm_info.cacheline_size);
+ sr_wm = entries_required +
+ ironlake_display_srwm_info.guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries_required = line_count * pixel_size * 64;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_srwm_info.cacheline_size);
+ cursor_wm = entries_required +
+ ironlake_cursor_srwm_info.guard_size;
+
+ /* configure watermark and enable self-refresh */
+ reg_value = I915_READ(WM1_LP_ILK);
+ reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+ WM1_LP_CURSOR_MASK);
+ reg_value |= WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+
+ I915_WRITE(WM1_LP_ILK, reg_value);
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", sr_wm, cursor_wm);
+
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ }
+}
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -2882,12 +3213,6 @@ static void intel_update_watermarks(struct drm_device *dev)
if (enabled <= 0)
return;
- /* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_PINEVIEW(dev))
- pineview_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_PINEVIEW(dev))
- pineview_disable_cxsr(dev);
-
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
sr_hdisplay, pixel_size);
}
@@ -2924,7 +3249,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
bool is_edp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -2935,6 +3261,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
int lvds_reg = LVDS;
u32 temp;
int sdvo_pixel_multiply;
@@ -2942,12 +3270,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
+ intel_encoder = enc_to_intel_encoder(encoder);
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -3043,14 +3372,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
- int lane, link_bw, bpp;
+ int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_edp) {
- struct drm_connector *edp;
target_clock = mode->clock;
- edp = intel_pipe_get_connector(crtc);
- intel_edp_link_config(to_intel_encoder(edp),
+ intel_edp_link_config(intel_encoder,
&lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
@@ -3059,7 +3386,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
- lane = 4;
link_bw = 270000;
}
@@ -3111,6 +3437,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bpp = 24;
}
+ if (!lane) {
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ lane = bps / (link_bw * 8) + 1;
+ }
+
+ intel_crtc->fdi_lanes = lane;
+
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -3265,11 +3603,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
}
- dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPEACONF_ENABLE;
- dpll |= DPLL_VCO_ENABLE;
-
-
/* Disable the panel fitter if it was on our pipe */
if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
@@ -3292,6 +3625,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ /* enable transcoder DPLL */
+ if (HAS_PCH_CPT(dev)) {
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+ udelay(150);
+ }
+
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -3303,7 +3648,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ lvds |= PORT_TRANS_B_SEL_CPT;
+ else
+ lvds |= LVDS_PIPEB_SELECT;
+ } else {
+ if (HAS_PCH_CPT(dev))
+ lvds &= ~PORT_TRANS_SEL_MASK;
+ else
+ lvds &= ~LVDS_PIPEB_SELECT;
+ }
/* set the corresponsding LVDS_BORDER bit */
lvds |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3337,6 +3693,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (is_dp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ else if (HAS_PCH_SPLIT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery setting.*/
+ if (pipe == 0) {
+ I915_WRITE(TRANSA_DATA_M1, 0);
+ I915_WRITE(TRANSA_DATA_N1, 0);
+ I915_WRITE(TRANSA_DP_LINK_M1, 0);
+ I915_WRITE(TRANSA_DP_LINK_N1, 0);
+ } else {
+ I915_WRITE(TRANSB_DATA_M1, 0);
+ I915_WRITE(TRANSB_DATA_N1, 0);
+ I915_WRITE(TRANSB_DP_LINK_M1, 0);
+ I915_WRITE(TRANSB_DP_LINK_N1, 0);
+ }
+ }
if (!is_edp) {
I915_WRITE(fp_reg, fp);
@@ -3411,6 +3781,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* enable FDI RX PLL too */
temp = I915_READ(fdi_rx_reg);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* enable FDI TX PLL too */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ /* enable FDI RX PCDCLK */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+ I915_READ(fdi_rx_reg);
udelay(200);
}
}
@@ -3671,6 +4053,7 @@ static struct drm_display_mode load_detect_mode = {
};
struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode)
{
@@ -3729,7 +4112,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
}
encoder->crtc = crtc;
- intel_encoder->base.encoder = encoder;
+ connector->encoder = encoder;
intel_encoder->load_detect_temp = true;
intel_crtc = to_intel_crtc(crtc);
@@ -3755,7 +4138,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return crtc;
}
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector, int dpms_mode)
{
struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
@@ -3765,7 +4149,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm
if (intel_encoder->load_detect_temp) {
encoder->crtc = NULL;
- intel_encoder->base.encoder = NULL;
+ connector->encoder = NULL;
intel_encoder->load_detect_temp = false;
crtc->enabled = drm_helper_crtc_in_use(crtc);
drm_helper_disable_unused_functions(dev);
@@ -4392,14 +4776,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
return crtc;
}
-static int intel_connector_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
int entry = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (type_mask & intel_encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
@@ -4411,7 +4795,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
intel_crt_init(dev);
@@ -4426,9 +4810,8 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, DP_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
- /* check SDVOB */
- /* found = intel_sdvo_init(dev, HDMIB); */
- found = 0;
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4494,12 +4877,11 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_encoder *encoder = &intel_encoder->enc;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
encoder->possible_crtcs = intel_encoder->crtc_mask;
- encoder->possible_clones = intel_connector_clones(dev,
+ encoder->possible_clones = intel_encoder_clones(dev,
intel_encoder->clone_mask);
}
}
@@ -4507,10 +4889,6 @@ static void intel_setup_outputs(struct drm_device *dev)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- intelfb_remove(dev, fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(intel_fb->obj);
@@ -4533,18 +4911,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
-int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj)
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return -ENOMEM;
-
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4552,40 +4925,40 @@ int intel_framebuffer_create(struct drm_device *dev,
}
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-
intel_fb->obj = obj;
-
- *fb = &intel_fb->base;
-
return 0;
}
-
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
int ret;
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
if (!obj)
return NULL;
- ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return NULL;
+
+ ret = intel_framebuffer_init(dev, intel_fb,
+ mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(obj);
+ kfree(intel_fb);
return NULL;
}
- return fb;
+ return &intel_fb->base;
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .fb_changed = intelfb_probe,
};
static struct drm_gem_object *
@@ -4594,7 +4967,7 @@ intel_alloc_power_context(struct drm_device *dev)
struct drm_gem_object *pwrctx;
int ret;
- pwrctx = drm_gem_object_alloc(dev, 4096);
+ pwrctx = i915_gem_alloc_object(dev, 4096);
if (!pwrctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
@@ -4732,6 +5105,25 @@ void intel_init_clock_gating(struct drm_device *dev)
}
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ if (IS_IRONLAKE(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ }
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4847,9 +5239,31 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->display.update_wm = NULL;
- else if (IS_G4X(dev))
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ } else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
@@ -4923,13 +5337,6 @@ void intel_modeset_init(struct drm_device *dev)
(unsigned long)dev);
intel_setup_overlay(dev);
-
- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->fsb_freq,
- dev_priv->mem_freq))
- DRM_INFO("failed to find known CxSR latency "
- "(found fsb freq %d, mem freq %d), disabling CxSR\n",
- dev_priv->fsb_freq, dev_priv->mem_freq);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -4940,6 +5347,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
+ intel_fbdev_fini(dev);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4974,14 +5383,29 @@ void intel_modeset_cleanup(struct drm_device *dev)
}
-/* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-*/
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
- return &intel_encoder->enc;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 77e40cfcf216..f6299bb788e5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,8 +48,6 @@ struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- uint32_t save_DP;
- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int dpms_mode;
uint8_t link_bw;
@@ -141,7 +139,8 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
int max_lanes = intel_dp_max_lane_count(intel_encoder);
@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
- int try;
+ int try, precharge;
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_encoder))
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (HAS_PCH_SPLIT(dev))
+ if (IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev))
+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ else
+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ } else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
ctl = (DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+ struct intel_connector *intel_connector, const char *name)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
+ dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
/*
* Find the lane count in the intel_encoder private
*/
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
break;
@@ -626,16 +637,24 @@ static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- dp_priv->DP = (DP_LINK_TRAIN_OFF |
- DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0 |
- DP_SYNC_VS_HIGH |
- DP_SYNC_HS_HIGH);
+ dp_priv->DP = (DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dp_priv->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dp_priv->DP |= DP_SYNC_VS_HIGH;
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ dp_priv->DP |= DP_LINK_TRAIN_OFF;
switch (dp_priv->lane_count) {
case 1:
@@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
- if (intel_crtc->pipe == 1)
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
dp_priv->DP |= DP_PIPEB_SELECT;
if (IS_eDP(intel_encoder)) {
@@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
@@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
return link_status[r - DP_LANE0_1_STATUS];
}
-static void
-intel_dp_save(struct drm_connector *connector)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
- dp_priv->save_DP = I915_READ(dp_priv->output_reg);
- intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
- dp_priv->save_link_configuration,
- sizeof (dp_priv->save_link_configuration));
-}
-
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
return signal_levels;
}
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
@@ -974,7 +999,7 @@ static void
intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
@@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
bool channel_eq = false;
bool first = true;
int tries;
+ u32 reg;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_encoder, 0x100,
+ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- DP &= ~DP_LINK_TRAIN_MASK;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ else
+ DP &= ~DP_LINK_TRAIN_MASK;
memset(train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
@@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
channel_eq = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
@@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
++tries;
}
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_OFF_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ I915_WRITE(dp_priv->output_reg, reg);
POSTING_READ(dp_priv->output_reg);
intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
static void
intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
udelay(100);
}
- DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(dp_priv->output_reg);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(dp_priv->output_reg);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(dp_priv->output_reg);
+ }
udelay(17000);
@@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
POSTING_READ(dp_priv->output_reg);
}
-static void
-intel_dp_restore(struct drm_connector *connector)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
- if (dp_priv->save_DP & DP_PORT_EN)
- intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
- else
- intel_dp_link_down(intel_encoder, dp_priv->save_DP);
-}
-
/*
* According to DP spec
* 5.1.2:
@@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder)
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
@@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector)
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
@@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- temp = I915_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(PORT_HOTPLUG_EN,
- temp |
- DPB_HOTPLUG_INT_EN |
- DPC_HOTPLUG_INT_EN |
- DPD_HOTPLUG_INT_EN);
-
- POSTING_READ(PORT_HOTPLUG_EN);
-
switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
@@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static void
intel_dp_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dp_save,
- .restore = intel_dp_restore,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_dp_destroy,
@@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dp_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_encoder);
}
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ if (!encoder || encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ return dp_priv->output_reg;
+ }
+ }
+ return -1;
+}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
@@ -1313,9 +1379,15 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_encoder)
return;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
- connector = &intel_encoder->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -1349,7 +1421,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@@ -1378,7 +1450,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_encoder, name);
+ intel_dp_i2c_init(intel_encoder, intel_connector, name);
intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e30253755f12..3230e8d2ea43 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -96,8 +96,6 @@ struct intel_framebuffer {
struct intel_encoder {
- struct drm_connector base;
-
struct drm_encoder enc;
int type;
struct i2c_adapter *i2c_bus;
@@ -110,6 +108,11 @@ struct intel_encoder {
int clone_mask;
};
+struct intel_connector {
+ struct drm_connector base;
+ void *dev_priv;
+};
+
struct intel_crtc;
struct intel_overlay {
struct drm_device *dev;
@@ -149,17 +152,18 @@ struct intel_crtc {
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
+ int fdi_lanes;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode);
extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
int dpms_mode);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
-extern int intelfb_probe(struct drm_device *dev);
-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
-extern int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj);
+extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+void intelfb_hotplug(struct drm_device *dev, bool polled);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ebf213c96b9c..227feca7cf8d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_dvo_save(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- /* Each output should probably just save the registers it touches,
- * but for now, use more overkill.
- */
- dev_priv->saveDVOA = I915_READ(DVOA);
- dev_priv->saveDVOB = I915_READ(DVOB);
- dev_priv->saveDVOC = I915_READ(DVOC);
-
- dvo->dev_ops->save(dvo);
-}
-
-static void intel_dvo_restore(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- dvo->dev_ops->restore(dvo);
-
- I915_WRITE(DVOA, dev_priv->saveDVOA);
- I915_WRITE(DVOB, dev_priv->saveDVOB);
- I915_WRITE(DVOC, dev_priv->saveDVOC);
-}
-
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*/
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
return dvo->dev_ops->detect(dvo);
@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(intel_encoder);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- if (dvo) {
- if (dvo->dev_ops->destroy)
- dvo->dev_ops->destroy(dvo);
- if (dvo->panel_fixed_mode)
- kfree(dvo->panel_fixed_mode);
- /* no need, in i830_dvoices[] now */
- //kfree(dvo);
- }
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
-}
-
-#ifdef RANDR_GET_CRTC_INTERFACE
-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
-
- return intel_pipe_to_crtc(pScrn, pipe);
+ kfree(connector);
}
-#endif
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.dpms = intel_dvo_dpms,
@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dvo_save,
- .restore = intel_dvo_restore,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+ if (dvo) {
+ if (dvo->dev_ops->destroy)
+ dvo->dev_ops->destroy(dvo);
+ if (dvo->panel_fixed_mode)
+ kfree(dvo->panel_fixed_mode);
+ }
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev)
if (!intel_encoder)
return;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_encoder->base;
+ struct drm_connector *connector = &intel_connector->base;
int gpio;
dvo = &intel_dvo_devices[i];
@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev)
drm_encoder_helper_add(&intel_encoder->enc,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev)
intel_i2c_destroy(i2cbus);
free_intel:
kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8a0b3bcdc7b1..b04e0a86bf9a 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,9 +44,10 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intelfb_par {
+struct intel_fbdev {
struct drm_fb_helper helper;
- struct intel_framebuffer *intel_fb;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
struct drm_display_mode *our_mode;
};
@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .gamma_set = intel_crtc_fb_gamma_set,
- .gamma_get = intel_crtc_fb_gamma_get,
-};
-
-
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (!fb)
- return 1;
-
- info = fb->fbdev;
- if (!info)
- return 1;
-
- if (!mode)
- return 1;
-
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(intelfb_resize);
-
-static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int intelfb_create(struct intel_fbdev *ifbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = ifbdev->helper.dev;
struct fb_info *info;
- struct intelfb_par *par;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
/* we don't do packed 24bpp */
- if (surface_bpp == 24)
- surface_bpp = 32;
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = drm_gem_object_alloc(dev, size);
+ fbo = i915_gem_alloc_object(dev, size);
if (!fbo) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
@@ -157,39 +107,26 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
/* Flush everything out, we'll be doing GTT only from now on */
i915_gem_object_set_to_gtt_domain(fbo, 1);
- ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
- if (ret) {
- DRM_ERROR("failed to allocate fb.\n");
- goto out_unpin;
- }
-
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- intel_fb = to_intel_framebuffer(fb);
- *fb_p = fb;
-
- info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+ info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
- par = info->par;
+ info->par = ifbdev;
- par->helper.funcs = &intel_fb_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
- INTELFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+
+ fb = &ifbdev->ifb.base;
+
+ ifbdev->helper.fb = fb;
+ ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT;
-
info->fbops = &intelfb_ops;
-
/* setup aperture base/size for vesafb takeover */
info->aperture_base = dev->mode_config.fb_base;
if (IS_I9XX(dev))
@@ -208,12 +145,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
ret = -ENOSPC;
goto out_unpin;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
info->screen_size = size;
// memset(info->screen_base, 0, size);
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,14 +168,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->intel_fb = intel_fb;
-
- /* To allow resizeing without swapping buffers */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
- intel_fb->base.width, intel_fb->base.height,
- obj_priv->gtt_offset, fbo);
+ fb->width, fb->height,
+ obj_priv->gtt_offset, fbo);
+
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -247,35 +186,86 @@ out:
return ret;
}
-int intelfb_probe(struct drm_device *dev)
+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+ int new_fb = 0;
int ret;
- DRM_DEBUG_KMS("\n");
- ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
- return ret;
+ if (!helper->fb) {
+ ret = intelfb_create(ifbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
}
-EXPORT_SYMBOL(intelfb_probe);
-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+void intelfb_hotplug(struct drm_device *dev, bool polled)
{
- struct fb_info *info;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_helper_fb_hpd_irq_event(&dev_priv->fbdev->helper);
+}
- if (!fb)
- return -EINVAL;
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intel_fb_find_or_create_single,
+};
- info = fb->fbdev;
+int intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
+{
+ struct fb_info *info;
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
- if (info) {
- struct intelfb_par *par = info->par;
+ if (ifbdev->helper.fbdev) {
+ info = ifbdev->helper.fbdev;
unregister_framebuffer(info);
iounmap(info->screen_base);
- if (info->par)
- drm_fb_helper_free(&par->helper);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj)
+ drm_gem_object_unreference_unlocked(ifb->obj);
+
+ return 0;
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return -ENOMEM;
+
+ dev_priv->fbdev = ifbdev;
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+ drm_fb_helper_init(dev, &ifbdev->helper, 2,
+ INTELFB_CONN_LIMIT, false);
+
+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+ drm_fb_helper_initial_config(&ifbdev->helper, 32);
return 0;
}
-EXPORT_SYMBOL(intelfb_remove);
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 48cade0cf7b1..8a1c4eddc030 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -39,7 +39,6 @@
struct intel_hdmi_priv {
u32 sdvox_reg;
- u32 save_SDVOX;
bool has_hdmi_sink;
};
@@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_crtc->pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_B_SEL_CPT;
+ else
+ sdvox |= SDVO_PIPE_B_SELECT;
+ }
I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
POSTING_READ(hdmi_priv->sdvox_reg);
@@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_hdmi_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
-}
-
-static void intel_hdmi_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
- POSTING_READ(hdmi_priv->sdvox_reg);
-}
-
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(&intel_encoder->base,
+ edid = drm_get_edid(connector,
intel_encoder->ddc_bus);
if (edid) {
@@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector)
status = connector_status_connected;
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
- intel_encoder->base.display_info.raw_edid = NULL;
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(intel_encoder);
+ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_hdmi_save,
- .restore = intel_hdmi_restore,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_hdmi_destroy,
@@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -231,15 +214,23 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_hdmi_priv *hdmi_priv;
intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_encoder)
return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
- connector = &intel_encoder->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
@@ -285,7 +276,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@@ -303,6 +294,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
kfree(intel_encoder);
+ kfree(intel_connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b66806a37d37..6a1accd83aec 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
/* XXX: We never power down the LVDS pairs. */
}
-static void intel_lvds_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- dev_priv->savePP_ON = I915_READ(pp_on_reg);
- dev_priv->savePP_OFF = I915_READ(pp_off_reg);
- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
-
- /*
- * If the light is off at server startup, just make it full brightness
- */
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
-}
-
-static void intel_lvds_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
- intel_lvds_set_power(dev, true);
- else
- intel_lvds_set_power(dev, false);
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (dev_priv->lvds_edid_good) {
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder =
- to_intel_encoder(connector);
if (property == dev->mode_config.scaling_mode_property &&
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
+ struct drm_encoder *encoder = connector->encoder;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_lvds_save,
- .restore = intel_lvds_restore,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
- connector = &intel_encoder->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
encoder = &intel_encoder->enc;
- drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (IS_I965G(dev))
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
* the initial panel fitting mode will be FULL_SCREEN.
*/
- drm_connector_attach_property(&intel_encoder->base,
+ drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
dev_priv->lvds_edid_good = true;
- if (!intel_ddc_get_modes(intel_encoder))
+ if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
dev_priv->lvds_edid_good = false;
list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -1151,4 +1093,5 @@ failed:
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 8e5c83b2d120..4b1fd3d9c73c 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
}
};
- intel_i2c_quirk_set(intel_encoder->base.dev, true);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, true);
ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_encoder->base.dev, false);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, false);
if (ret == 2)
return true;
@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: i2c adapter
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
+int intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_encoder->base.dev, true);
- edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
- intel_i2c_quirk_set(intel_encoder->base.dev, false);
+ intel_i2c_quirk_set(connector->dev, true);
+ edid = drm_get_edid(connector, adapter);
+ intel_i2c_quirk_set(connector->dev, false);
if (edid) {
- drm_mode_connector_update_edid_property(&intel_encoder->base,
- edid);
- ret = drm_add_edid_modes(&intel_encoder->base, edid);
- intel_encoder->base.display_info.raw_edid = NULL;
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 6d524a1fc271..b0e17b06eb6e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = overlay->vid_bo->obj;
+ obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
@@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
switch (overlay->hw_wedged) {
case RELEASE_OLD_VID:
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -1341,7 +1341,7 @@ void intel_setup_overlay(struct drm_device *dev)
return;
overlay->dev = dev;
- reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 87d953664cb0..42ceb15da689 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,18 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
-#include <linux/dmi.h>
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -86,12 +97,6 @@ struct intel_sdvo_priv {
/* This is for current tv format name */
char *tv_format_name;
- /* This contains all current supported TV format */
- char *tv_format_supported[TV_FORMAT_NUM];
- int format_supported_num;
- struct drm_property *tv_format_property;
- struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
@@ -112,12 +117,6 @@ struct intel_sdvo_priv {
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
- /**
- * Returned SDTV resolutions allowed for the current format, if the
- * device reported it.
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
-
/*
* supported encoding mode, used to determine whether HDMI is
* supported
@@ -130,11 +129,24 @@ struct intel_sdvo_priv {
/* Mac mini hack -- use the same DDC as the analog connector */
struct i2c_adapter *analog_ddc_bus;
- int save_sdvo_mult;
- u16 save_active_outputs;
- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
- struct intel_sdvo_dtd save_output_dtd[16];
- u32 save_SDVOX;
+};
+
+struct intel_sdvo_connector {
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ /* This contains all current supported TV format */
+ char *tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format_property;
+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+ /**
+ * Returned SDTV resolutions allowed for the current format, if the
+ * device reported it.
+ */
+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+
/* add the property for the SDVO-TV */
struct drm_property *left_property;
struct drm_property *right_property;
@@ -162,7 +174,12 @@ struct intel_sdvo_priv {
};
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+ uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
*/
static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
+ if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(sdvo_priv->sdvo_reg, val);
+ I915_READ(sdvo_priv->sdvo_reg);
+ return;
+ }
+
if (sdvo_priv->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
+#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
return true;
}
-static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
- u16 *outputs)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
-}
-
static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
u16 outputs)
{
@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
- struct intel_sdvo_dtd *dtd)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_encoder,
- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_encoder,
- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-}
-
static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en
return false;
}
-static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
-{
- u8 response, status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, 1);
-
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
- return SDVO_CLOCK_RATE_MULT_1X;
- } else {
- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
- }
-
- return response;
-}
-
static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
{
u8 status;
@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
sizeof(format) : sizeof(format_map));
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
sizeof(format));
status = intel_sdvo_read_response(intel_encoder, NULL, 0);
@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
/* Set output timings */
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
intel_sdvo_set_target_output(intel_encoder,
- dev_priv->controlled_output);
+ dev_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
dev_priv->sdvo_lvds_fixed_mode);
intel_sdvo_set_target_output(intel_encoder,
- dev_priv->controlled_output);
+ dev_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = sdvo_priv->controlled_output;
+ in_out.in0 = sdvo_priv->attached_output;
in_out.in1 = 0;
intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->controlled_output);
+ sdvo_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
}
@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
if (0)
intel_sdvo_set_encoder_power_state(intel_encoder, mode);
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
}
return;
}
-static void intel_sdvo_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- int o;
-
- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
- intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_get_input_timing(intel_encoder,
- &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_encoder, false, true);
- intel_sdvo_get_input_timing(intel_encoder,
- &sdvo_priv->save_input_dtd_2);
- }
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output)
- {
- intel_sdvo_set_target_output(intel_encoder, this_output);
- intel_sdvo_get_output_timing(intel_encoder,
- &sdvo_priv->save_output_dtd[o]);
- }
- }
- if (sdvo_priv->is_tv) {
- /* XXX: Save TV format/enhancements. */
- }
-
- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
-}
-
-static void intel_sdvo_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- int o;
- int i;
- bool input1, input2;
- u8 status;
-
- intel_sdvo_set_active_outputs(intel_encoder, 0);
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output) {
- intel_sdvo_set_target_output(intel_encoder, this_output);
- intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
- }
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_encoder, false, true);
- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
- }
-
- intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
-
- if (sdvo_priv->is_tv) {
- /* XXX: Restore TV format/enhancements. */
- }
-
- intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
-
- if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
- {
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
- }
-
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
-}
-
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
return true;
}
+/* No use! */
+#if 0
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
intel_sdvo_read_response(intel_encoder, &response, 2);
}
+#endif
static bool
intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
@@ -1598,12 +1472,17 @@ static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct intel_encoder *intel_encoder;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- intel_encoder = to_intel_encoder(connector);
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
- return connector;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector && encoder == intel_attached_encoder(connector))
+ return connector;
+ }
+ }
}
return NULL;
}
@@ -1627,12 +1506,13 @@ intel_analog_is_connected(struct drm_device *dev)
enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(&intel_encoder->base,
+ edid = drm_get_edid(connector,
intel_encoder->ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
@@ -1646,7 +1526,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
*/
while(temp_ddc > 1) {
sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(&intel_encoder->base,
+ edid = drm_get_edid(connector,
intel_encoder->ddc_bus);
if (edid) {
/*
@@ -1666,8 +1546,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
*/
if (edid == NULL &&
sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_encoder->base.dev))
- edid = drm_get_edid(&intel_encoder->base,
+ !intel_analog_is_connected(connector->dev))
+ edid = drm_get_edid(connector,
sdvo_priv->analog_ddc_bus);
if (edid != NULL) {
/* Don't report the output as connected if it's a DVI-I
@@ -1682,7 +1562,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
}
kfree(edid);
- intel_encoder->base.display_info.raw_edid = NULL;
+ connector->display_info.raw_edid = NULL;
} else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
status = connector_status_disconnected;
@@ -1694,8 +1574,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
{
uint16_t response;
u8 status;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ enum drm_connector_status ret;
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
@@ -1713,24 +1597,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
if (response == 0)
return connector_status_disconnected;
- if (intel_sdvo_multifunc_encoder(intel_encoder) &&
- sdvo_priv->attached_output != response) {
- if (sdvo_priv->controlled_output != response &&
- intel_sdvo_output_setup(intel_encoder, response) != true)
- return connector_status_unknown;
- sdvo_priv->attached_output = response;
+ sdvo_priv->attached_output = response;
+
+ if ((sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+ ret = intel_sdvo_hdmi_sink_detect(connector, response);
+ else
+ ret = connector_status_connected;
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ sdvo_priv->is_tv = false;
+ sdvo_priv->is_lvds = false;
+ intel_encoder->needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ sdvo_priv->is_lvds = true;
}
- return intel_sdvo_hdmi_sink_detect(connector, response);
+
+ return ret;
}
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(intel_encoder);
+ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1740,17 +1641,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
*/
if (num_modes == 0 &&
sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_encoder->base.dev)) {
- struct i2c_adapter *digital_ddc_bus;
-
+ !intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- digital_ddc_bus = intel_encoder->ddc_bus;
- intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
-
- (void) intel_ddc_get_modes(intel_encoder);
-
- intel_encoder->ddc_bus = digital_ddc_bus;
+ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
}
}
@@ -1821,8 +1715,9 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_encoder *output = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@@ -1842,11 +1737,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
sizeof(format_map) ? sizeof(format_map) :
sizeof(struct intel_sdvo_sdtv_resolution_request));
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(output, &reply, 3);
+ status = intel_sdvo_read_response(intel_encoder, &reply, 3);
if (status != SDVO_CMD_STATUS_SUCCESS)
return;
@@ -1863,7 +1758,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
@@ -1873,7 +1769,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(intel_encoder);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1902,12 +1798,12 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *output = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (sdvo_priv->is_tv)
+ if (IS_TV(sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (sdvo_priv->is_lvds == true)
+ else if (IS_LVDS(sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
@@ -1920,11 +1816,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
static
void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct drm_device *dev = connector->dev;
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
if (sdvo_priv->left_property)
drm_property_destroy(dev, sdvo_priv->left_property);
if (sdvo_priv->right_property)
@@ -1937,8 +1833,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
drm_property_destroy(dev, sdvo_priv->hpos_property);
if (sdvo_priv->vpos_property)
drm_property_destroy(dev, sdvo_priv->vpos_property);
- }
- if (sdvo_priv->is_tv) {
if (sdvo_priv->saturation_property)
drm_property_destroy(dev,
sdvo_priv->saturation_property);
@@ -1948,7 +1842,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
if (sdvo_priv->hue_property)
drm_property_destroy(dev, sdvo_priv->hue_property);
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_priv->brightness_property)
drm_property_destroy(dev,
sdvo_priv->brightness_property);
@@ -1958,31 +1852,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (sdvo_priv->analog_ddc_bus)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
-
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(connector->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
-
- if (sdvo_priv->tv_format_property)
+ if (sdvo_connector->tv_format_property)
drm_property_destroy(connector->dev,
- sdvo_priv->tv_format_property);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_destroy_enhance_property(connector);
+ sdvo_connector->tv_format_property);
+ intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
-
- kfree(intel_encoder);
+ kfree(connector);
}
static int
@@ -1990,9 +1870,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -2003,101 +1885,101 @@ intel_sdvo_set_property(struct drm_connector *connector,
if (ret < 0)
goto out;
- if (property == sdvo_priv->tv_format_property) {
+ if (property == sdvo_connector->tv_format_property) {
if (val >= TV_FORMAT_NUM) {
ret = -EINVAL;
goto out;
}
if (sdvo_priv->tv_format_name ==
- sdvo_priv->tv_format_supported[val])
+ sdvo_connector->tv_format_supported[val])
goto out;
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
changed = true;
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
cmd = 0;
temp_value = val;
- if (sdvo_priv->left_property == property) {
+ if (sdvo_connector->left_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->right_property, val);
- if (sdvo_priv->left_margin == temp_value)
+ sdvo_connector->right_property, val);
+ if (sdvo_connector->left_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->right_property == property) {
+ } else if (sdvo_connector->right_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->left_property, val);
- if (sdvo_priv->right_margin == temp_value)
+ sdvo_connector->left_property, val);
+ if (sdvo_connector->right_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->top_property == property) {
+ } else if (sdvo_connector->top_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->bottom_property, val);
- if (sdvo_priv->top_margin == temp_value)
+ sdvo_connector->bottom_property, val);
+ if (sdvo_connector->top_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->bottom_property == property) {
+ } else if (sdvo_connector->bottom_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->top_property, val);
- if (sdvo_priv->bottom_margin == temp_value)
+ sdvo_connector->top_property, val);
+ if (sdvo_connector->bottom_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->hpos_property == property) {
- if (sdvo_priv->cur_hpos == temp_value)
+ } else if (sdvo_connector->hpos_property == property) {
+ if (sdvo_connector->cur_hpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_H;
- sdvo_priv->cur_hpos = temp_value;
- } else if (sdvo_priv->vpos_property == property) {
- if (sdvo_priv->cur_vpos == temp_value)
+ sdvo_connector->cur_hpos = temp_value;
+ } else if (sdvo_connector->vpos_property == property) {
+ if (sdvo_connector->cur_vpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_V;
- sdvo_priv->cur_vpos = temp_value;
- } else if (sdvo_priv->saturation_property == property) {
- if (sdvo_priv->cur_saturation == temp_value)
+ sdvo_connector->cur_vpos = temp_value;
+ } else if (sdvo_connector->saturation_property == property) {
+ if (sdvo_connector->cur_saturation == temp_value)
goto out;
cmd = SDVO_CMD_SET_SATURATION;
- sdvo_priv->cur_saturation = temp_value;
- } else if (sdvo_priv->contrast_property == property) {
- if (sdvo_priv->cur_contrast == temp_value)
+ sdvo_connector->cur_saturation = temp_value;
+ } else if (sdvo_connector->contrast_property == property) {
+ if (sdvo_connector->cur_contrast == temp_value)
goto out;
cmd = SDVO_CMD_SET_CONTRAST;
- sdvo_priv->cur_contrast = temp_value;
- } else if (sdvo_priv->hue_property == property) {
- if (sdvo_priv->cur_hue == temp_value)
+ sdvo_connector->cur_contrast = temp_value;
+ } else if (sdvo_connector->hue_property == property) {
+ if (sdvo_connector->cur_hue == temp_value)
goto out;
cmd = SDVO_CMD_SET_HUE;
- sdvo_priv->cur_hue = temp_value;
- } else if (sdvo_priv->brightness_property == property) {
- if (sdvo_priv->cur_brightness == temp_value)
+ sdvo_connector->cur_hue = temp_value;
+ } else if (sdvo_connector->brightness_property == property) {
+ if (sdvo_connector->cur_brightness == temp_value)
goto out;
cmd = SDVO_CMD_SET_BRIGHTNESS;
- sdvo_priv->cur_brightness = temp_value;
+ sdvo_connector->cur_brightness = temp_value;
}
if (cmd) {
intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
@@ -2127,8 +2009,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_sdvo_save,
- .restore = intel_sdvo_restore,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2138,12 +2018,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (sdvo_priv->analog_ddc_bus)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+
+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ sdvo_priv->sdvo_lvds_fixed_mode);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2196,12 +2091,15 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
{
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
uint8_t status;
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ if (device == 0)
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+ else
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2214,15 +2112,13 @@ static struct intel_encoder *
intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct intel_encoder *intel_encoder = NULL;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, head) {
- if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
- intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->ddc_bus == &chan->adapter)
break;
- }
}
return intel_encoder;
}
@@ -2259,7 +2155,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (sdvo_reg == SDVOB) {
+ if (IS_SDVOB(sdvo_reg)) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -2284,120 +2180,235 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (sdvo_reg == SDVOB)
+ if (IS_SDVOB(sdvo_reg))
return 0x70;
else
return 0x72;
}
-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+static bool
+intel_sdvo_connector_alloc (struct intel_connector **ret)
{
- DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
- return 1;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ *ret = kzalloc(sizeof(*intel_connector) +
+ sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!*ret)
+ return false;
+
+ intel_connector = *ret;
+ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+ intel_connector->dev_priv = sdvo_connector;
+
+ return true;
}
-static struct dmi_system_id intel_sdvo_bad_tv[] = {
- {
- .callback = intel_sdvo_bad_tv_callback,
- .ident = "IntelG45/ICH10R/DME1737",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
- },
- },
+static void
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+ connector->connector_type);
- { } /* terminating entry */
-};
+ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ drm_sysfs_connector_add(connector);
+}
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_connector *connector = &intel_encoder->base;
struct drm_encoder *encoder = &intel_encoder->enc;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- bool ret = true, registered = false;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+ && sdvo_priv->is_hdmi) {
+ /* enable hdmi encoding mode if supported */
+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_encoder,
+ SDVO_COLORIMETRY_RGB256);
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ }
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->controlled_output |= type;
+ sdvo_connector->output_flag = type;
+
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ intel_sdvo_tv_create_property(connector, type);
+
+ intel_sdvo_create_enhance_property(connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->is_lvds = true;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_create_enhance_property(connector);
+ return true;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
sdvo_priv->is_tv = false;
intel_encoder->needs_tv_clock = false;
sdvo_priv->is_lvds = false;
- if (device_is_registered(&connector->kdev)) {
- drm_sysfs_connector_remove(connector);
- registered = true;
- }
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
- if (flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
- else
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
-
- encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
- connector->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- if (intel_sdvo_get_supp_encode(intel_encoder,
- &sdvo_priv->encode) &&
- intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
- sdvo_priv->is_hdmi) {
- /* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_encoder,
- SDVO_COLORIMETRY_RGB256);
- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- intel_encoder->clone_mask =
- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- }
- } else if ((flags & SDVO_OUTPUT_SVID0) &&
- !dmi_check_system(intel_sdvo_bad_tv)) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_RGB0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_RGB1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_CVBS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_LVDS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_LVDS1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else {
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_encoder, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+ return false;
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_encoder, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_encoder, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
sdvo_priv->controlled_output = 0;
@@ -2405,28 +2416,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(sdvo_priv),
bytes[0], bytes[1]);
- ret = false;
+ return false;
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- if (ret && registered)
- ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
-
-
- return ret;
-
+ return true;
}
-static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
uint8_t status;
- intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, type);
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
@@ -2441,35 +2449,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
if (format_map == 0)
return;
- sdvo_priv->format_supported_num = 0;
+ sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
if (format_map & (1 << i)) {
- sdvo_priv->tv_format_supported
- [sdvo_priv->format_supported_num++] =
+ sdvo_connector->tv_format_supported
+ [sdvo_connector->format_supported_num++] =
tv_format_names[i];
}
- sdvo_priv->tv_format_property =
+ sdvo_connector->tv_format_property =
drm_property_create(
connector->dev, DRM_MODE_PROP_ENUM,
- "mode", sdvo_priv->format_supported_num);
+ "mode", sdvo_connector->format_supported_num);
- for (i = 0; i < sdvo_priv->format_supported_num; i++)
+ for (i = 0; i < sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- sdvo_priv->tv_format_property, i,
- i, sdvo_priv->tv_format_supported[i]);
+ sdvo_connector->tv_format_property, i,
+ i, sdvo_connector->tv_format_supported[i]);
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(
- connector, sdvo_priv->tv_format_property, 0);
+ connector, sdvo_connector->tv_format_property, 0);
}
static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct intel_sdvo_enhancements_reply sdvo_data;
struct drm_device *dev = connector->dev;
uint8_t status;
@@ -2488,7 +2498,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
DRM_DEBUG_KMS("No enhancement is supported\n");
return;
}
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
/* when horizontal overscan is supported, Add the left/right
* property
*/
@@ -2636,8 +2646,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
- }
- if (sdvo_priv->is_tv) {
if (sdvo_data.saturation) {
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
@@ -2733,7 +2741,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_data.brightness) {
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
@@ -2773,12 +2781,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_sdvo_priv *sdvo_priv;
-
u8 ch[0x40];
int i;
+ u32 i2c_reg, ddc_reg, analog_ddc_reg;
intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_encoder) {
@@ -2791,11 +2798,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
intel_encoder->dev_priv = sdvo_priv;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ if (HAS_PCH_SPLIT(dev)) {
+ i2c_reg = PCH_GPIOE;
+ ddc_reg = PCH_GPIOE;
+ analog_ddc_reg = PCH_GPIOA;
+ } else {
+ i2c_reg = GPIOE;
+ ddc_reg = GPIOE;
+ analog_ddc_reg = GPIOA;
+ }
+
/* setup the DDC bus. */
- if (sdvo_reg == SDVOB)
- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ if (IS_SDVOB(sdvo_reg))
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
else
- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
if (!intel_encoder->i2c_bus)
goto err_inteloutput;
@@ -2809,20 +2826,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
for (i = 0; i < 0x40; i++) {
if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- sdvo_reg == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
}
/* setup the DDC bus. */
- if (sdvo_reg == SDVOB) {
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ if (IS_SDVOB(sdvo_reg)) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
@@ -2833,40 +2850,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* Wrap with our custom algo which switches to DDC mode */
intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+ /* encoder type will be decided later */
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+
/* In default case sdvo lvds is false */
intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
if (intel_sdvo_output_setup(intel_encoder,
sdvo_priv->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- sdvo_reg == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
-
- connector = &intel_encoder->base;
- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
-
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
- drm_encoder_init(dev, &intel_encoder->enc,
- &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
-
- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
-
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
- if (sdvo_priv->is_tv)
- intel_sdvo_tv_create_property(connector);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_create_enhance_property(connector);
-
- drm_sysfs_connector_add(connector);
-
intel_sdvo_select_ddc_bus(sdvo_priv);
/* Set the input timing to the screen. Assume always input 0. */
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d7d39b2327df..6d553c29d106 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-intel_tv_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- int i;
-
- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
-
- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
-
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
-
- tv_priv->save_TV_DAC = I915_READ(TV_DAC);
- tv_priv->save_TV_CTL = I915_READ(TV_CTL);
-}
-
-static void
-intel_tv_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_crtc *intel_crtc;
- int i;
-
- /* FIXME: No CRTC? */
- if (!crtc)
- return;
-
- intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
-
- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
-
- {
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
- int pipeconf = I915_READ(pipeconf_reg);
- int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
- /* Pipe must be off here */
- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
-
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
-
- /* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
-
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
-
- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
-}
-
static const struct tv_mode *
intel_tv_mode_lookup (char *tv_format)
{
@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder)
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
{
struct drm_crtc *crtc;
struct drm_display_mode mode;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
int dpms_mode;
int type = tv_priv->type;
@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector)
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ &mode, &dpms_mode);
if (crtc) {
type = intel_tv_detect_type(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+ intel_release_load_detect_pipe(intel_encoder, connector,
+ dpms_mode);
} else
type = -1;
}
@@ -1525,7 +1392,8 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1550,7 +1418,8 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_tv_save,
- .restore = intel_tv_restore,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_tv_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev)
return;
}
- connector = &intel_encoder->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 957d17629840..fb164efada3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
-
- man->io_addr = NULL;
- man->io_offset = drm_get_resource_start(dev, 1);
- man->io_size = drm_get_resource_len(dev, 1);
- if (man->io_size > dev_priv->vram_size)
- man->io_size = dev_priv->vram_size;
-
man->gpu_offset = dev_priv->vm_vram_base;
break;
case TTM_PL_TT:
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
dev_priv->gart_info.type);
return -EINVAL;
}
-
- man->io_offset = dev_priv->gart_info.aper_base;
- man->io_size = dev_priv->gart_info.aper_size;
- man->io_addr = NULL;
man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict, bool no_wait,
+ struct nouveau_bo *nvbo, bool evict,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
if (nvbo->channel && nvbo->channel != chan)
ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- int no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
dst_offset += (PAGE_SIZE * line_count);
}
- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (tmp_mem.mm_node) {
spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
!dev_priv->channel) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->gart_info.aper_base;
+ mem->bus.is_iomem = true;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = drm_get_resource_start(dev, 1);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
.sync_obj_flush = nouveau_fence_flush,
.sync_obj_unref = nouveau_fence_unref,
.sync_obj_ref = nouveau_fence_ref,
+ .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+ .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+ .io_mem_free = &nouveau_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index a251886a0ce6..7933de4aff2e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
static int
nouveau_debugfs_channel_info(struct seq_file *m, void *data)
{
@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cf1c5c0a0abe..9d7928f40fdf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -34,10 +34,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct drm_device *dev = drm_fb->dev;
-
- if (drm_fb->fbdev)
- nouveau_fbcon_remove(dev, drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
.create_handle = nouveau_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
- struct drm_mode_fb_cmd *mode_cmd)
+int
+nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
{
- struct nouveau_framebuffer *fb;
int ret;
- fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!fb)
- return NULL;
-
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
if (ret) {
- kfree(fb);
- return NULL;
+ return ret;
}
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
-
- fb->nvbo = nvbo;
- return &fb->base;
+ drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
+ nouveau_fb->nvbo = nvbo;
+ return 0;
}
static struct drm_framebuffer *
@@ -89,24 +78,28 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_framebuffer *fb;
+ struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
+ int ret;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
return NULL;
- fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
- if (!fb) {
+ nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+ if (!nouveau_fb)
+ return NULL;
+
+ ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
+ if (ret) {
drm_gem_object_unreference(gem);
return NULL;
}
- return fb;
+ return &nouveau_fb->base;
}
const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .fb_changed = nouveau_fbcon_probe,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 1de974acbc65..c6079e36669d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
return 0;
NV_INFO(dev, "Disabling fbcon acceleration...\n");
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -230,9 +228,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 1);
+ nouveau_fbcon_set_suspend(dev, 1);
release_console_sem();
- dev_priv->fbdev_info->flags = fbdev_flags;
+ nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -250,14 +248,12 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
@@ -332,13 +328,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 0);
+ nouveau_fbcon_set_suspend(dev, 0);
release_console_sem();
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill_all(dev);
drm_helper_resume_force_mode(dev);
- dev_priv->fbdev_info->flags = fbdev_flags;
+
+ nouveau_fbcon_restore_accel(dev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ace630aa89e1..5b47b79f45e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -535,6 +535,7 @@ struct drm_nouveau_private {
struct fb_info *fbdev_info;
+ int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
struct nouveau_engine engine;
@@ -621,6 +622,8 @@ struct drm_nouveau_private {
struct {
struct dentry *channel_root;
} debugfs;
+
+ struct nouveau_fbdev *nfbdev;
};
static inline struct drm_nouveau_private *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 4a3f31aa1949..d432134b71e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
- struct drm_mode_fb_cmd *);
-
+int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8e7dc1d4912a..f29fa8c117ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -52,8 +52,8 @@
static int
nouveau_fbcon_sync(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv04_fbcon_fillrect,
.fb_copyarea = nv04_fbcon_copyarea,
.fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv50_fbcon_fillrect,
.fb_copyarea = nv50_fbcon_copyarea,
.fb_imageblit = nv50_fbcon_imageblit,
@@ -155,11 +152,6 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = nv_crtc->lut.b[regno];
}
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
- .gamma_set = nouveau_fbcon_gamma_set,
- .gamma_get = nouveau_fbcon_gamma_get
-};
-
#if defined(__i386__) || defined(__x86_64__)
static bool
nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
@@ -198,11 +190,10 @@ not_fb:
}
#endif
-void
-nouveau_fbcon_zfill(struct drm_device *dev)
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct fb_info *info = dev_priv->fbdev_info;
+ struct fb_info *info = nfbdev->helper.fbdev;
struct fb_fillrect rect;
/* Clear the entire fbcon. The drm will program every connector
@@ -218,14 +209,12 @@ nouveau_fbcon_zfill(struct drm_device *dev)
}
static int
-nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height, uint32_t surface_depth,
- uint32_t surface_bpp, struct drm_framebuffer **pfb)
+nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct fb_info *info;
- struct nouveau_fbcon_par *par;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_bo *nvbo;
@@ -233,13 +222,13 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
struct device *device = &dev->pdev->dev;
int size, ret;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
@@ -268,31 +257,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
- if (!fb) {
+ info = framebuffer_alloc(0, device);
+ if (!info) {
ret = -ENOMEM;
- NV_ERROR(dev, "failed to allocate fb.\n");
goto out_unref;
}
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- nouveau_fb = nouveau_framebuffer(fb);
- *pfb = fb;
-
- info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
- if (!info) {
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
ret = -ENOMEM;
goto out_unref;
}
- par = info->par;
- par->helper.funcs = &nouveau_fbcon_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
- if (ret)
- goto out_unref;
- dev_priv->fbdev_info = info;
+ info->par = nfbdev;
+
+ nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+
+ nouveau_fb = &nfbdev->nouveau_fb;
+ fb = &nouveau_fb->base;
+
+ /* setup helper */
+ nfbdev->helper.fb = fb;
+ nfbdev->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (nouveau_nofbaccel)
@@ -310,7 +296,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->screen_size = size;
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
@@ -343,11 +329,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->nouveau_fb = nouveau_fb;
- par->dev = dev;
-
if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
case NV_50:
@@ -361,7 +342,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
};
}
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill(dev, nfbdev);
/* To allow resizeing without swapping buffers */
NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -379,44 +360,129 @@ out:
return ret;
}
-int
-nouveau_fbcon_probe(struct drm_device *dev)
+static int
+nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = nouveau_fbcon_create(nfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
- return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+void nouveau_fbcon_hotplug(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ drm_helper_fb_hpd_irq_event(&dev_priv->nfbdev->helper);
+}
+
+static void nouveau_fbcon_output_status_changed(struct drm_fb_helper *fb_helper)
+{
+ drm_helper_fb_hotplug_event(fb_helper, true);
}
int
-nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+ struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
struct fb_info *info;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
- if (info) {
- struct nouveau_fbcon_par *par = info->par;
-
+ if (nfbdev->helper.fbdev) {
+ info = nfbdev->helper.fbdev;
unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
- if (par)
- drm_fb_helper_free(&par->helper);
- framebuffer_release(info);
}
-
+ drm_fb_helper_fini(&nfbdev->helper);
+ drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+ .gamma_set = nouveau_fbcon_gamma_set,
+ .gamma_get = nouveau_fbcon_gamma_get,
+ .fb_probe = nouveau_fbcon_find_or_create_single,
+ .fb_output_status_changed = nouveau_fbcon_output_status_changed,
+};
+
+
+int nouveau_fbcon_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fbdev *nfbdev;
+
+ nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+ if (!nfbdev)
+ return -ENOMEM;
+
+ nfbdev->dev = dev;
+ dev_priv->nfbdev = nfbdev;
+ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ drm_fb_helper_init(dev, &nfbdev->helper,
+ 2, 4, true);
+ drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+ drm_fb_helper_initial_config(&nfbdev->helper, 32);
+ return 0;
+}
+
+void nouveau_fbcon_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->nfbdev)
+ return;
+
+ nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
+ kfree(dev_priv->nfbdev);
+ dev_priv->nfbdev = NULL;
+}
+
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
+ dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+void nouveau_fbcon_restore_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+}
+
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+}
+
+void nouveau_fbcon_zfill_all(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index f9c34e1a8c11..bf8e00d4de65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,16 +29,16 @@
#include "drm_fb_helper.h"
-struct nouveau_fbcon_par {
+#include "nouveau_fb.h"
+struct nouveau_fbdev {
struct drm_fb_helper helper;
+ struct nouveau_framebuffer nouveau_fb;
+ struct list_head fbdev_list;
struct drm_device *dev;
- struct nouveau_framebuffer *nouveau_fb;
+ unsigned int saved_flags;
};
-int nouveau_fbcon_probe(struct drm_device *dev);
-int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
-void nouveau_fbcon_zfill(struct drm_device *dev);
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+
+int nouveau_fbcon_init(struct drm_device *dev);
+void nouveau_fbcon_fini(struct drm_device *dev);
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+void nouveau_fbcon_zfill_all(struct drm_device *dev);
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
+void nouveau_fbcon_restore_accel(struct drm_device *dev);
+
+void nouveau_fbcon_hotplug(struct drm_device *dev);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1bc0b38a5167..69c76cf93407 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
ttm_bo_unref(&bo);
+
+ drm_gem_object_release(gem);
+ kfree(gem);
}
int
@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- false, false);
+ false, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 13e73cee4c44..53360f156063 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status, fbdev_flags = 0;
+ uint32_t status;
unsigned long flags;
status = nv_rd32(dev, NV03_PMC_INTR_0);
@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- if (dev_priv->fbdev_info) {
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
- }
-
if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
nouveau_fifo_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
if (status)
NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
- if (dev_priv->fbdev_info)
- dev_priv->fbdev_info->flags = fbdev_flags;
-
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e1710640a278..92100a9678ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -34,6 +34,7 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -516,7 +517,7 @@ nouveau_card_init(struct drm_device *dev)
dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_helper_initial_config(dev);
+ nouveau_fbcon_init(dev);
return 0;
@@ -563,6 +564,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+
nouveau_backlight_exit(dev);
if (dev_priv->channel) {
@@ -794,6 +796,7 @@ int nouveau_unload(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ nouveau_fbcon_fini(dev);
if (dev_priv->card_type >= NV_50)
nv50_display_destroy(dev);
else
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 813b25cec726..603090ee6ac7 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,8 +30,8 @@
void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t fg;
@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
int
nv04_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
const int sub = NvSubCtxSurf2D;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 649db4c1b690..f9b304866e66 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -29,6 +29,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
#include "drm_crtc_helper.h"
static void
@@ -945,6 +946,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+
+ nouveau_fbcon_hotplug(dev);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a8c70e7e9184..6bf025c6fc6f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -6,8 +6,8 @@
void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int
nv50_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_gpuobj *eng2d = NULL;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 27e2c715be11..1bc72c3190a9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -3780,7 +3780,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
UCHAR ucReserved[2];
}ATOM_ASIC_SS_ASSIGNMENT;
-//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
//SS is not required or enabled if a match is not found.
#define ASIC_INTERNAL_MEMORY_SS 1
#define ASIC_INTERNAL_ENGINE_SS 2
@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+} ATOM_PPLIB_EXTENDEDHEADER;
+
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
} ATOM_PPLIB_POWERPLAYTABLE;
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
-// remaining 3 bits are reserved
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
@@ -5895,7 +5967,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
- UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a87990b3ae84..3feca6aec4c4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -249,17 +249,13 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
- /* XXX re-enable when interrupt support is added */
- if (!ASIC_IS_DCE4(rdev))
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- /* XXX re-enable when interrupt support is added */
- if (!ASIC_IS_DCE4(rdev))
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8f447e20507..b3d168fb89e5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -28,39 +28,194 @@
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
-#include "rv770d.h"
+#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
- /* XXX */
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
return connected;
}
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
- /* XXX */
+ u32 tmp;
+ bool connected = evergreen_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
}
void evergreen_hpd_init(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ rdev->irq.hpd[2] = true;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ rdev->irq.hpd[3] = true;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ rdev->irq.hpd[4] = true;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ rdev->irq.hpd[5] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ if (rdev->irq.installed)
+ evergreen_irq_set(rdev);
}
-
-void evergreen_bandwidth_update(struct radeon_device *rdev)
+void evergreen_hpd_fini(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ rdev->irq.hpd[3] = false;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ rdev->irq.hpd[4] = false;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ rdev->irq.hpd[5] = false;
+ break;
+ default:
+ break;
+ }
+ }
}
-void evergreen_hpd_fini(struct radeon_device *rdev)
+void evergreen_bandwidth_update(struct radeon_device *rdev)
{
/* XXX */
}
@@ -83,10 +238,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
/*
* GART
*/
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+ if (tmp == 2) {
+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+ return;
+ }
+ if (tmp) {
+ return;
+ }
+ udelay(1);
+ }
+}
+
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
- int r, i;
+ int r;
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -121,10 +297,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- for (i = 1; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
- r600_pcie_gart_tlb_flush(rdev);
+ evergreen_pcie_gart_tlb_flush(rdev);
rdev->gart.ready = true;
return 0;
}
@@ -132,11 +307,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i, r;
+ int r;
/* Disable all tables */
- for (i = 0; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -173,7 +348,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -193,8 +367,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- for (i = 0; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
}
static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -400,40 +574,656 @@ static void evergreen_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
-#if 0
/*
* CP.
*/
-static void evergreen_cp_stop(struct radeon_device *rdev)
-{
- /* XXX */
-}
-
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{
- /* XXX */
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+ r700_cp_stop(rdev);
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
+int evergreen_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB_CNTL, tmp);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB_RPTR_WR, 0);
+ WREG32(CP_RB_WPTR, 0);
+ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+ mdelay(1);
+ WREG32(CP_RB_CNTL, tmp);
+
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB_WPTR);
+
+ r600_cp_start(rdev);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ return r;
+ }
+ return 0;
+}
/*
* Core functions
*/
-static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
+ u32 enabled_backends_mask = 0;
+ u32 enabled_backends_count = 0;
+ u32 cur_pipe;
+ u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
+ u32 cur_backend = 0;
+ u32 i;
+ bool force_no_swizzle;
+
+ if (num_tile_pipes > EVERGREEN_MAX_PIPES)
+ num_tile_pipes = EVERGREEN_MAX_PIPES;
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_backends > EVERGREEN_MAX_BACKENDS)
+ num_backends = EVERGREEN_MAX_BACKENDS;
+ if (num_backends < 1)
+ num_backends = 1;
+
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((backend_disable_mask >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends)
+ break;
+ }
+
+ if (enabled_backends_count == 0) {
+ enabled_backends_mask = 1;
+ enabled_backends_count = 1;
+ }
+
+ if (enabled_backends_count != num_backends)
+ num_backends = enabled_backends_count;
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ force_no_swizzle = false;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ case CHIP_JUNIPER:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+ if (force_no_swizzle) {
+ bool last_backend_enabled = false;
+
+ force_no_swizzle = false;
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((enabled_backends_mask >> i) & 1) == 1) {
+ if (last_backend_enabled)
+ force_no_swizzle = true;
+ last_backend_enabled = true;
+ } else
+ last_backend_enabled = false;
+ }
+ }
+
+ switch (num_tile_pipes) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ DRM_ERROR("odd number of pipes!\n");
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ swizzle_pipe[3] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ }
+ break;
+ }
+
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+
+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+ }
return backend_map;
}
-#endif
static void evergreen_gpu_init(struct radeon_device *rdev)
{
- /* XXX */
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config;
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 gb_backend_map;
+ u32 grbm_gfx_index;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 sq_config;
+ u32 sq_lds_resource_mgmt;
+ u32 sq_gpr_resource_mgmt_1;
+ u32 sq_gpr_resource_mgmt_2;
+ u32 sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt;
+ u32 sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1;
+ u32 sq_stack_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_3;
+ u32 vgt_cache_invalidation;
+ u32 hdp_host_path_cntl;
+ int i, j, num_shader_engines, ps_thread_count;
+
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_JUNIPER:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_REDWOOD:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 5;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_CEDAR:
+ default:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
+
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
+ & EVERGREEN_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
+ & EVERGREEN_MAX_SIMDS_MASK);
+
+ cc_rb_backend_disable =
+ BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
+ & EVERGREEN_MAX_BACKENDS_MASK);
+
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ switch (rdev->config.evergreen.max_tile_pipes) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_PIPES(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_PIPES(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_PIPES(2);
+ break;
+ case 8:
+ gb_addr_config |= NUM_PIPES(3);
+ break;
+ }
+
+ gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
+ gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+
+ if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
+ gb_addr_config |= ROW_SIZE(2);
+ else
+ gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
+
+ if (rdev->ddev->pdev->device == 0x689e) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_131_124;
+
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
+
+ switch(efuse_box_bit_131_124) {
+ case 0x00:
+ gb_backend_map = 0x76543210;
+ break;
+ case 0x55:
+ gb_backend_map = 0x77553311;
+ break;
+ case 0x56:
+ gb_backend_map = 0x77553300;
+ break;
+ case 0x59:
+ gb_backend_map = 0x77552211;
+ break;
+ case 0x66:
+ gb_backend_map = 0x77443300;
+ break;
+ case 0x99:
+ gb_backend_map = 0x66552211;
+ break;
+ case 0x5a:
+ gb_backend_map = 0x77552200;
+ break;
+ case 0xaa:
+ gb_backend_map = 0x66442200;
+ break;
+ case 0x95:
+ gb_backend_map = 0x66553311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else if (rdev->ddev->pdev->device == 0x68b9) {
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_127_124;
+
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+
+ switch(efuse_box_bit_127_124) {
+ case 0x0:
+ gb_backend_map = 0x00003210;
+ break;
+ case 0x5:
+ case 0x6:
+ case 0x9:
+ case 0xa:
+ gb_backend_map = 0x00003311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+
+ WREG32(GB_BACKEND_MAP, gb_backend_map);
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+ num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
+ grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+
+ for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+ u32 rb = cc_rb_backend_disable | (0xf0 << 16);
+ u32 sp = cc_gc_shader_pipe_config;
+ u32 gfx = grbm_gfx_index | SE_INDEX(i);
+
+ if (i == num_shader_engines) {
+ rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
+ sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+ }
+
+ WREG32(GRBM_GFX_INDEX, gfx);
+ WREG32(RLC_GFX_INDEX, gfx);
+
+ WREG32(CC_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+ WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+ }
+
+ grbm_gfx_index |= SE_BROADCAST_WRITES;
+ WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+ WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+
+ WREG32(CGTS_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+
+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+ SYNC_GRADIENT |
+ SYNC_WALKER |
+ SYNC_ALIGNER));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+ WREG32(SPI_CONFIG_CNTL, 0);
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ sq_config = RREG32(SQ_CONFIG);
+ sq_config &= ~(PS_PRIO(3) |
+ VS_PRIO(3) |
+ GS_PRIO(3) |
+ ES_PRIO(3));
+ sq_config |= (VC_ENABLE |
+ EXPORT_SRC_C |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ if (rdev->family == CHIP_CEDAR)
+ /* no vertex cache */
+ sq_config &= ~VC_ENABLE;
+
+ sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+ sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+ sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+ if (rdev->family == CHIP_CEDAR)
+ ps_thread_count = 96;
+ else
+ ps_thread_count = 128;
+
+ sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+ sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+
+ sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+ WREG32(SQ_CONFIG, sq_config);
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+ WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+ WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+ WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ if (rdev->family == CHIP_CEDAR)
+ vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+ else
+ vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+ WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+
}
int evergreen_mc_init(struct radeon_device *rdev)
@@ -476,26 +1266,616 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
-int evergreen_gpu_reset(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
/* FIXME: implement for evergreen */
+ return false;
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 srbm_reset = 0;
+ u32 grbm_reset = 0;
+
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+
+ /* reset all the system blocks */
+ srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
+
+ dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+ WREG32(SRBM_SOFT_RESET, srbm_reset);
+ (void)RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ (void)RREG32(SRBM_SOFT_RESET);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ /* After reset we need to reinit the asic as GPU often endup in an
+ * incoherent state.
+ */
+ atom_asic_init(rdev->mode_info.atom_context);
+ evergreen_mc_resume(rdev, &save);
+ return 0;
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+ return evergreen_gpu_soft_reset(rdev);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ switch (crtc) {
+ case 0:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ case 1:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ case 2:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ case 3:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ case 4:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ case 5:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ default:
+ return 0;
+ }
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ return -EINVAL;
+ }
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ evergreen_disable_interrupt_state(rdev);
+ return 0;
+ }
+
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("evergreen_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+ crtc1 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+ crtc2 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[2]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+ crtc3 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[3]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+ crtc4 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[4]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+ crtc5 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[5]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+ crtc6 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
return 0;
}
+static inline void evergreen_irq_ack(struct radeon_device *rdev,
+ u32 *disp_int,
+ u32 *disp_int_cont,
+ u32 *disp_int_cont2,
+ u32 *disp_int_cont3,
+ u32 *disp_int_cont4,
+ u32 *disp_int_cont5)
+{
+ u32 tmp;
+
+ *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+
+ if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int & DC_HPD1_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+}
+
+void evergreen_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_disable_interrupt_state(rdev);
+}
+
+static void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+ evergreen_irq_disable(rdev);
+ r600_rlc_stop(rdev);
+}
+
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = evergreen_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 ring_index;
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+ unsigned long flags;
+ bool queue_hotplug = false;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ if (!rdev->ih.enabled)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D2 vline */
+ if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+ if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 2);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+ }
+ break;
+ case 1: /* D3 vline */
+ if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+ if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 3);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+ }
+ break;
+ case 1: /* D4 vline */
+ if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+ if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 4);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+ }
+ break;
+ case 1: /* D5 vline */
+ if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+ if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 5);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+ }
+ break;
+ case 1: /* D6 vline */
+ if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+ if (disp_int & DC_HPD1_INTERRUPT) {
+ disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (disp_int_cont & DC_HPD2_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 2:
+ if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 3:
+ if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 4:
+ if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 5:
+ if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
-#if 0
int r;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -505,17 +1885,15 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
}
-#endif
+
evergreen_mc_program(rdev);
-#if 0
if (rdev->flags & RADEON_IS_AGP) {
- evergreem_agp_enable(rdev);
+ evergreen_agp_enable(rdev);
} else {
r = evergreen_pcie_gart_enable(rdev);
if (r)
return r;
}
-#endif
evergreen_gpu_init(rdev);
#if 0
if (!rdev->r600_blit.shader_obj) {
@@ -536,6 +1914,7 @@ static int evergreen_startup(struct radeon_device *rdev)
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
+#endif
/* Enable IRQ */
r = r600_irq_init(rdev);
@@ -544,7 +1923,7 @@ static int evergreen_startup(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
return r;
}
- r600_irq_set(rdev);
+ evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
@@ -552,12 +1931,12 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
- r = r600_cp_resume(rdev);
+ r = evergreen_cp_resume(rdev);
if (r)
return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
-#endif
+
return 0;
}
@@ -582,13 +1961,13 @@ int evergreen_resume(struct radeon_device *rdev)
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
-#if 0
+
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
-#endif
+
return r;
}
@@ -597,12 +1976,14 @@ int evergreen_suspend(struct radeon_device *rdev)
{
#if 0
int r;
-
+#endif
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
+ evergreen_irq_suspend(rdev);
r600_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
+#if 0
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
@@ -702,7 +2083,7 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_bo_init(rdev);
if (r)
return r;
-#if 0
+
r = radeon_irq_kms_init(rdev);
if (r)
return r;
@@ -716,14 +2097,16 @@ int evergreen_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
return r;
-#endif
+
rdev->accel_working = false;
r = evergreen_startup(rdev);
if (r) {
- evergreen_suspend(rdev);
- /*r600_wb_fini(rdev);*/
- /*radeon_ring_fini(rdev);*/
- /*evergreen_pcie_gart_fini(rdev);*/
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
@@ -744,15 +2127,12 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
- evergreen_suspend(rdev);
-#if 0
- r600_blit_fini(rdev);
+ /*r600_blit_fini(rdev);*/
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
evergreen_pcie_gart_fini(rdev);
-#endif
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 000000000000..93e9e17ad54a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS 256
+#define EVERGREEN_MAX_TEMP_GPRS 16
+#define EVERGREEN_MAX_SH_THREADS 256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
+#define EVERGREEN_MAX_FRC_EOV_CNT 16384
+#define EVERGREEN_MAX_BACKENDS 8
+#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
+#define EVERGREEN_MAX_SIMDS 16
+#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
+#define EVERGREEN_MAX_PIPES 8
+#define EVERGREEN_MAX_PIPES_MASK 0xFF
+#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+
+/* Registers */
+
+#define RCU_IND_INDEX 0x100
+#define RCU_IND_DATA 0x104
+
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+#define RLC_GFX_INDEX 0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define WRITE_DIS (1 << 0)
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define NUM_GPUS(x) ((x) << 20)
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define ROW_SIZE(x) ((x) << 28)
+#define GB_BACKEND_MAP 0x98FC
+#define DMIF_ADDR_CONFIG 0xBD4
+#define HDP_ADDR_CONFIG 0x2F48
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define CGTS_USER_TCC_DISABLE 0x914C
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_RAM_DATA 0xC160
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_MEQ_THRESHOLDS 0x8764
+#define STQ_SPLIT(x) ((x) << 0)
+#define CP_PERFMON_CNTL 0x87FC
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_BASE 0xC100
+#define CP_RB_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB_RPTR 0x8700
+#define CP_RB_RPTR_ADDR 0xC10C
+#define CP_RB_RPTR_ADDR_HI 0xC110
+#define CP_RB_RPTR_WR 0xC108
+#define CP_RB_WPTR 0xC114
+#define CP_RB_WPTR_ADDR 0xC118
+#define CP_RB_WPTR_ADDR_HI 0xC11C
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_DEBUG 0xC1FC
+
+
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0x00FF0000
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VC (1 << 13)
+#define SOFT_RESET_VGT (1 << 14)
+
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define SRBM_RQ_PENDING (1 << 5)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+#define HDP_TILING_CONFIG 0x2F3C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define MC_VM_AGP_TOP 0x2028
+#define MC_VM_AGP_BOT 0x202C
+#define MC_VM_AGP_BASE 0x2030
+#define MC_VM_FB_LOCATION 0x2024
+#define MC_VM_MB_L1_TLB0_CNTL 0x2234
+#define MC_VM_MB_L1_TLB1_CNTL 0x2238
+#define MC_VM_MB_L1_TLB2_CNTL 0x223C
+#define MC_VM_MB_L1_TLB3_CNTL 0x2240
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
+#define MC_VM_MD_L1_TLB0_CNTL 0x2654
+#define MC_VM_MD_L1_TLB1_CNTL 0x2658
+#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_AA_CONFIG 0x28C04
+#define PA_SC_CLIPRECT_RULE 0x2820C
+#define PA_SC_EDGERULE 0x28230
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define SPI_INPUT_Z 0x286D8
+#define SPI_PS_IN_CONTROL_0 0x286CC
+#define NUM_INTERP(x) ((x)<<0)
+#define POSITION_ENA (1<<8)
+#define POSITION_CENTROID (1<<9)
+#define POSITION_ADDR(x) ((x)<<10)
+#define PARAM_GEN(x) ((x)<<15)
+#define PARAM_GEN_ADDR(x) ((x)<<19)
+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
+#define PERSP_GRADIENT_ENA (1<<28)
+#define LINEAR_GRADIENT_ENA (1<<29)
+#define POSITION_SAMPLE (1<<30)
+#define BARYC_AT_SAMPLE_ENA (1<<31)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define CS_PRIO(x) ((x) << 18)
+#define LS_PRIO(x) ((x) << 20)
+#define HS_PRIO(x) ((x) << 22)
+#define PS_PRIO(x) ((x) << 24)
+#define VS_PRIO(x) ((x) << 26)
+#define GS_PRIO(x) ((x) << 28)
+#define ES_PRIO(x) ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
+#define NUM_GS_GPRS(x) ((x) << 0)
+#define NUM_ES_GPRS(x) ((x) << 16)
+#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
+#define NUM_HS_GPRS(x) ((x) << 0)
+#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT 0x8C18
+#define NUM_PS_THREADS(x) ((x) << 0)
+#define NUM_VS_THREADS(x) ((x) << 8)
+#define NUM_GS_THREADS(x) ((x) << 16)
+#define NUM_ES_THREADS(x) ((x) << 24)
+#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
+#define NUM_HS_THREADS(x) ((x) << 0)
+#define NUM_LS_THREADS(x) ((x) << 8)
+#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
+#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_LDS_RESOURCE_MGMT 0x8E2C
+
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MISC 0x28350
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+#define SYNC_GRADIENT (1 << 24)
+#define SYNC_WALKER (1 << 25)
+#define SYNC_ALIGNER (1 << 26)
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
+#define DEALLOC_DIST_MASK 0x0000007F
+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
+#define VTX_REUSE_DEPTH_MASK 0x000000FF
+
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+
+#define WAIT_UNTIL 0x8040
+
+#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x6bb8
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x6bbc
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x6b40
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x60f4
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+#define DACB_AUTODETECT_INT_CONTROL 0x67c8
+
+#define DC_HPD1_INT_STATUS 0x601c
+#define DC_HPD2_INT_STATUS 0x6028
+#define DC_HPD3_INT_STATUS 0x6034
+#define DC_HPD4_INT_STATUS 0x6040
+#define DC_HPD5_INT_STATUS 0x604c
+#define DC_HPD6_INT_STATUS 0x6058
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x6020
+#define DC_HPD2_INT_CONTROL 0x602c
+#define DC_HPD3_INT_CONTROL 0x6038
+#define DC_HPD4_INT_CONTROL 0x6044
+#define DC_HPD5_INT_CONTROL 0x6050
+#define DC_HPD6_INT_CONTROL 0x605c
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x6024
+#define DC_HPD2_CONTROL 0x6030
+#define DC_HPD3_CONTROL 0x603c
+#define DC_HPD4_CONTROL 0x6048
+#define DC_HPD5_CONTROL 0x6054
+#define DC_HPD6_CONTROL 0x6060
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cf60c0b3ef15..4de41b0ad5ce 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -663,26 +663,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
if (r100_debugfs_cp_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
- /* Reset CP */
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
- }
- } else {
- DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
- }
-
if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev);
if (r) {
@@ -787,39 +767,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
-int r100_cp_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 16))) {
- DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
-}
-
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1680,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_init(struct radeon_device *rdev)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
- /* TODO: anythings to do here ? pipes ? */
- r100_hdp_reset(rdev);
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
}
-void r100_hdp_reset(struct radeon_device *rdev)
+/**
+ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
+ * @rdev: radeon device structure
+ * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
+ * @cp: radeon_cp structure holding CP information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+{
+ unsigned long cjiffies, elapsed;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, lockup->last_jiffies)) {
+ /* likely a wrap around */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (cp->rptr != lockup->last_cp_rptr) {
+ /* CP is still working no lockup */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
+ if (elapsed >= 3000) {
+ /* very likely the improbable case where current
+ * rptr is equal to last recorded, a while ago, rptr
+ * this is more likely a false positive update tracking
+ * information which should force us to be recall at
+ * latter point
+ */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (elapsed >= 1000) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
+}
+
+bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 rbbm_status;
+ int r;
- tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
- tmp |= (7 << 28);
- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
-int r100_rb2d_reset(struct radeon_device *rdev)
+void r100_bm_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
- int i;
+ u32 tmp;
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
+ /* disable bus mastering */
+ tmp = RREG32(R_000030_BUS_CNTL);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ mdelay(1);
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 26))) {
- DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
}
-int r100_gpu_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
+ struct r100_mc_save save;
+ u32 status, tmp;
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
- /* TODO: reset 3D engine */
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+ S_0000F0_SOFT_RESET_RE(1) |
+ S_0000F0_SOFT_RESET_PP(1) |
+ S_0000F0_SOFT_RESET_RB(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & RADEON_RBBM_ACTIVE) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+ G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
@@ -2002,11 +2036,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -3399,7 +3428,7 @@ static int r100_startup(struct radeon_device *rdev)
/* Resume clock */
r100_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
- r100_gpu_init(rdev);
+// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@@ -3436,7 +3465,7 @@ int r100_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r100_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -3505,7 +3534,7 @@ int r100_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index df29a630c466..de8abd104ab7 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -74,6 +74,134 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define R_000030_BUS_CNTL 0x000030
+#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
+#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
+#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
+#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
+#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
+#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
+#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
+#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
+#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
+#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
+#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
+#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
+#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
+#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
+#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
+#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
+#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
+#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
+#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
+#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
+#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
+#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
+#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
+#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
+#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
+#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
+#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
+#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
+#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
+#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
+#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
+#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
+#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
+#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
+#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
+#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
+#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
+#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
+#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
+#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
+#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
+#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
+#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
+#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
+#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
+#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
+#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
+#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
+#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
+#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
+#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
+#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
+#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
+#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
+#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
+#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
+#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
+#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
+#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
+#define C_000030_BUS_SUSPEND 0xFFBFFFFF
+#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
+#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
+#define C_000030_LAT_16X 0xFF7FFFFF
+#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
+#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
+#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
+#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
+#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
+#define C_000030_ENFRCWRDY 0xFDFFFFFF
+#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
+#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
+#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
+#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
+#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
+#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
+#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
+#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
+#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
+#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
+#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
+#define C_000030_SERR_EN 0xDFFFFFFF
+#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
+#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
+#define C_000030_BUS_READ_BURST 0xBFFFFFFF
+#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
+#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
+#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
#define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index a5ff8076b423..6d9569e002f7 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -27,8 +27,9 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include "drmP.h"
-#include "drm.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
u32 tmp;
int r;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
- r100_hdp_reset(rdev);
if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
(rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
/* r300,r350 */
@@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-int r300_ga_reset(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
- bool reinit_cp;
- int i;
+ u32 rbbm_status;
+ int r;
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(R300_RE_SCISSORS_TL, 0);
- WREG32(R300_RE_SCISSORS_BR, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
-int r300_gpu_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- r300_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
+ struct r100_mc_save save;
+ u32 status, tmp;
+
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* resetting the CP seems to be problematic sometimes it end up
+ * hard locking the computer, but it's necessary for successfull
+ * reset more test & playing is needed on R3XX/R4XX to find a
+ * reliable (if any solution)
+ */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & RADEON_RBBM_ACTIVE) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
-
/*
* r300,r350,rv350,rv380 VRAM info
*/
@@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r300_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -1387,7 +1387,7 @@ int r300_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 4c73114f0de9..968a33317fbf 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -209,7 +209,52 @@
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
-
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_00000D_SCLK_CNTL 0x00000D
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c2bda4ad62e7..be092d243f84 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -241,7 +241,7 @@ int r420_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -322,7 +322,7 @@ int r420_init(struct radeon_device *rdev)
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c44b8d39318..870111e26bd1 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8f3454e2056a..2ec423c3f3f8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -44,6 +44,9 @@
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -75,6 +90,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
/* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -714,11 +730,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP)
@@ -750,7 +761,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 srbm_reset = 0;
u32 tmp;
dev_info(rdev->dev, "GPU softreset \n");
@@ -765,7 +775,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -784,72 +794,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_VGT(1);
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
tmp = S_008020_SOFT_RESET_CP(1);
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- /* Reset others GPU block if necessary */
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
- if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_IH(1);
- if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
- if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
- if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
- dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
- udelay(50);
+ mdelay(1);
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
- /* After reset we need to reinit the asic as GPU often endup in an
- * incoherent state.
- */
- atom_asic_init(rdev->mode_info.atom_context);
rv515_mc_resume(rdev, &save);
return 0;
}
-int r600_gpu_reset(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status2;
+ int r;
+
+ srbm_status = RREG32(R_000E50_SRBM_STATUS);
+ grbm_status = RREG32(R_008010_GRBM_STATUS);
+ grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+ if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
}
@@ -1467,10 +1461,31 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "RV710";
rlc_chip_name = "R700";
break;
+ case CHIP_CEDAR:
+ chip_name = "CEDAR";
+ rlc_chip_name = "CEDAR";
+ break;
+ case CHIP_REDWOOD:
+ chip_name = "REDWOOD";
+ rlc_chip_name = "REDWOOD";
+ break;
+ case CHIP_JUNIPER:
+ chip_name = "JUNIPER";
+ rlc_chip_name = "JUNIPER";
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_name = "CYPRESS";
+ rlc_chip_name = "CYPRESS";
+ break;
default: BUG();
}
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ } else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1584,12 +1599,15 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
- } else {
+ if (rdev->family >= CHIP_CEDAR) {
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ } else if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ } else {
+ radeon_ring_write(rdev, 0x3);
+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
@@ -2290,10 +2308,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
}
}
-static void r600_rlc_stop(struct radeon_device *rdev)
+void r600_rlc_stop(struct radeon_device *rdev)
{
- if (rdev->family >= CHIP_RV770) {
+ if ((rdev->family >= CHIP_RV770) &&
+ (rdev->family <= CHIP_RV740)) {
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
@@ -2330,7 +2349,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2360,7 +2384,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
rdev->ih.enabled = true;
}
-static void r600_disable_interrupts(struct radeon_device *rdev)
+void r600_disable_interrupts(struct radeon_device *rdev)
{
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
u32 ih_cntl = RREG32(IH_CNTL);
@@ -2475,7 +2499,10 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
- r600_disable_interrupt_state(rdev);
+ if (rdev->family >= CHIP_CEDAR)
+ evergreen_disable_interrupt_state(rdev);
+ else
+ r600_disable_interrupt_state(rdev);
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -2485,7 +2512,7 @@ int r600_irq_init(struct radeon_device *rdev)
void r600_irq_suspend(struct radeon_device *rdev)
{
- r600_disable_interrupts(rdev);
+ r600_irq_disable(rdev);
r600_rlc_stop(rdev);
}
@@ -2500,6 +2527,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 hdmi1, hdmi2;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2513,7 +2541,9 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
+ hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
+ hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2523,6 +2553,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
} else {
+ hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2564,10 +2595,20 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.hdmi[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 1\n");
+ hdmi1 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.hdmi[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 2\n");
+ hdmi2 |= R600_HDMI_INT_EN;
+ }
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
+ WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2577,6 +2618,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD6_INT_CONTROL, hpd6);
}
} else {
+ WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2660,6 +2702,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
+ if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ if (ASIC_IS_DCE3(rdev)) {
+ if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ } else {
+ if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ }
}
void r600_irq_disable(struct radeon_device *rdev)
@@ -2713,6 +2767,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
* 19 1 FP Hot plug detection B
* 19 2 DAC A auto-detection
* 19 3 DAC B auto-detection
+ * 21 4 HDMI block A
+ * 21 5 HDMI block B
* 176 - CP_INT RB
* 177 - CP_INT IB1
* 178 - CP_INT IB2
@@ -2852,6 +2908,10 @@ restart_ih:
break;
}
break;
+ case 21: /* HDMI */
+ DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
+ r600_audio_schedule_polling(rdev);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 1d898051c631..2b26553c352c 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
/*
* current number of channels
*/
-static int r600_audio_channels(struct radeon_device *rdev)
+int r600_audio_channels(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev)
/*
* current bits per sample
*/
-static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev)
/*
* current sampling rate in HZ
*/
-static int r600_audio_rate(struct radeon_device *rdev)
+int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev)
/*
* iec 60958 status bits
*/
-static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
/*
* iec 60958 category code
*/
-static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
}
/*
+ * schedule next audio update event
+ */
+void r600_audio_schedule_polling(struct radeon_device *rdev)
+{
+ mod_timer(&rdev->audio_timer,
+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
* update all hdmi interfaces with current audio parameters
*/
static void r600_audio_update_hdmi(unsigned long param)
@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param)
uint8_t category_code = r600_audio_category_code(rdev);
struct drm_encoder *encoder;
- int changes = 0;
+ int changes = 0, still_going = 0;
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ still_going |= radeon_encoder->audio_polling_active;
if (changes || r600_hdmi_buffer_status_changed(encoder))
- r600_hdmi_update_audio_settings(
- encoder, channels,
- rate, bps, status_bits,
- category_code);
+ r600_hdmi_update_audio_settings(encoder);
}
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+ if(still_going) r600_audio_schedule_polling(rdev);
}
/*
@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev)
r600_audio_update_hdmi,
(unsigned long)rdev);
+ return 0;
+}
+
+/*
+ * enable the polling timer, to check for status changes
+ */
+void r600_audio_enable_polling(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+ if (radeon_encoder->audio_polling_active)
+ return;
+
+ radeon_encoder->audio_polling_active = 1;
mod_timer(&rdev->audio_timer, jiffies + 1);
+}
- return 0;
+/*
+ * disable the polling timer, so we get no more status updates
+ */
+void r600_audio_disable_polling(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+ radeon_encoder->audio_polling_active = 0;
}
/*
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index f6c6c77db7e0..d13622ae74e9 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ /* don't reinitialize blit */
+ if (rdev->r600_blit.shader_obj)
+ return 0;
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 2616b822ba68..40b1aca6d1f4 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
if (!offset)
return;
- if (r600_hdmi_is_audio_buffer_filled(encoder)) {
- /* disable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+ if (!radeon_encoder->hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder)) {
- } else if (radeon_encoder->hdmi_audio_workaround) {
- /* enable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+ /* disable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
} else {
- /* disable audio workaround and stop delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+ /* enable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
}
}
@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
}
/*
* update settings with current parameters from audio engine
*/
-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code)
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ int channels = r600_audio_channels(rdev);
+ int rate = r600_audio_rate(rdev);
+ int bps = r600_audio_bits_per_sample(rdev);
+ uint8_t status_bits = r600_audio_status_bits(rdev);
+ uint8_t category_code = r600_audio_category_code(rdev);
+
uint32_t iec;
if (!offset)
@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
r600_hdmi_audio_workaround(encoder);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
}
static int r600_hdmi_find_free_block(struct drm_device *dev)
@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
if (ASIC_IS_DCE4(rdev))
return;
@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
}
+ offset = radeon_encoder->hdmi_offset;
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- int offset = radeon_encoder->hdmi_offset;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
}
+ if (rdev->irq.installed
+ && rdev->family != CHIP_RS600
+ && rdev->family != CHIP_RS690
+ && rdev->family != CHIP_RS740) {
+
+ /* if irq is available use it */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ radeon_irq_set(rdev);
+
+ r600_audio_disable_polling(encoder);
+ } else {
+ /* if not fallback to polling */
+ r600_audio_enable_polling(encoder);
+ }
+
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
}
@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint8_t offset;
if (ASIC_IS_DCE4(rdev))
return;
- if (!radeon_encoder->hdmi_offset) {
+ offset = radeon_encoder->hdmi_offset;
+ if (!offset) {
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
return;
}
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
+
+ /* disable irq */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+ radeon_irq_set(rdev);
+
+ /* disable polling */
+ r600_audio_disable_polling(encoder);
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- int offset = radeon_encoder->hdmi_offset;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 7b1d22370f6e..d84612ae47e0 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -157,33 +157,36 @@
#define R600_HDMI_BLOCK3 0x7800
/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-#define R600_HDMI_CNTL 0x08
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
+#define R600_HDMI_ENABLE 0x00
+#define R600_HDMI_STATUS 0x04
+# define R600_HDMI_INT_PENDING (1 << 29)
+#define R600_HDMI_CNTL 0x08
+# define R600_HDMI_INT_EN (1 << 28)
+# define R600_HDMI_INT_ACK (1 << 29)
+#define R600_HDMI_UNKNOWN_0 0x0C
+#define R600_HDMI_AUDIOCNTL 0x10
+#define R600_HDMI_VIDEOCNTL 0x14
+#define R600_HDMI_VERSION 0x18
+#define R600_HDMI_UNKNOWN_1 0x28
+#define R600_HDMI_VIDEOINFOFRAME_0 0x54
+#define R600_HDMI_VIDEOINFOFRAME_1 0x58
+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3 0x60
+#define R600_HDMI_32kHz_CTS 0xac
+#define R600_HDMI_32kHz_N 0xb0
+#define R600_HDMI_44_1kHz_CTS 0xb4
+#define R600_HDMI_44_1kHz_N 0xb8
+#define R600_HDMI_48kHz_CTS 0xbc
+#define R600_HDMI_48kHz_N 0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
+#define R600_HDMI_IEC60958_1 0xd4
+#define R600_HDMI_IEC60958_2 0xd8
+#define R600_HDMI_UNKNOWN_2 0xdc
+#define R600_HDMI_AUDIO_DEBUG_0 0xe0
+#define R600_HDMI_AUDIO_DEBUG_1 0xe4
+#define R600_HDMI_AUDIO_DEBUG_2 0xe8
+#define R600_HDMI_AUDIO_DEBUG_3 0xec
/* HDMI additional config base register addresses */
#define R600_HDMI_CONFIG1 0x7600
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c3dbbb..ab29d972a167 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -99,6 +99,7 @@ extern int radeon_hw_i2c;
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@@ -182,7 +183,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
atomic_t seq;
uint32_t last_seq;
- unsigned long count_timeout;
+ unsigned long last_jiffies;
+ unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
@@ -197,7 +199,6 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- unsigned long timeout;
bool emited;
bool signaled;
};
@@ -371,10 +372,12 @@ struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
- bool crtc_vblank_int[2];
+ bool crtc_vblank_int[6];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
+ /* FIXME: use defines for max HDMI blocks */
+ bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
};
@@ -746,7 +749,8 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- int (*gpu_reset)(struct radeon_device *rdev);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev);
+ int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -804,39 +808,72 @@ struct radeon_asic {
/*
* Asic structures
*/
+struct r100_gpu_lockup {
+ unsigned long last_jiffies;
+ u32 last_cp_rptr;
+};
+
struct r100_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r300_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 resync_scratch;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r600_asic {
- unsigned max_pipes;
- unsigned max_tile_pipes;
- unsigned max_simds;
- unsigned max_backends;
- unsigned max_gprs;
- unsigned max_threads;
- unsigned max_stack_entries;
- unsigned max_hw_contexts;
- unsigned max_gs_threads;
- unsigned sx_max_export_size;
- unsigned sx_max_export_pos_size;
- unsigned sx_max_export_smx_size;
- unsigned sq_num_cf_insts;
- unsigned tiling_nbanks;
- unsigned tiling_npipes;
- unsigned tiling_group_size;
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
};
struct rv770_asic {
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned sx_num_of_sets;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_fize;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
+};
+
+struct evergreen_asic {
+ unsigned num_ses;
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
@@ -853,7 +890,7 @@ struct rv770_asic {
unsigned sx_num_of_sets;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
- unsigned sc_earlyz_tile_fifo_fize;
+ unsigned sc_earlyz_tile_fifo_size;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
@@ -864,6 +901,7 @@ union radeon_asic_config {
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
+ struct evergreen_asic evergreen;
};
/*
@@ -927,9 +965,6 @@ struct radeon_device {
bool is_atom_bios;
uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
- struct fb_info *fbdev_info;
- struct radeon_bo *fbdev_rbo;
- struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
@@ -1145,7 +1180,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1176,6 +1212,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
/* Common functions */
/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
@@ -1200,6 +1237,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
+extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
+extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1260,6 +1299,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
+extern int r600_cp_start(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1276,29 +1316,39 @@ extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_gpu_reset(struct radeon_device *rdev);
+extern int r600_asic_reset(struct radeon_device *rdev);
/* r600 irq */
extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev);
+extern void r600_disable_interrupts(struct radeon_device *rdev);
+extern void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern int r600_audio_channels(struct radeon_device *rdev);
+extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
+extern int r600_audio_rate(struct radeon_device *rdev);
+extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
+extern void r600_audio_schedule_polling(struct radeon_device *rdev);
+extern void r600_audio_enable_polling(struct drm_encoder *encoder);
+extern void r600_audio_disable_polling(struct drm_encoder *encoder);
extern void r600_audio_fini(struct radeon_device *rdev);
extern void r600_hdmi_init(struct drm_encoder *encoder);
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+
+extern void r700_cp_stop(struct radeon_device *rdev);
+extern void r700_cp_fini(struct radeon_device *rdev);
+extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+extern int evergreen_irq_set(struct radeon_device *rdev);
/* evergreen */
struct evergreen_mc_save {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a4b4bc9fa322..f835333c1b69 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -172,7 +173,8 @@ static struct radeon_asic r200_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -209,7 +211,8 @@ static struct radeon_asic r300_asic = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -247,7 +250,8 @@ static struct radeon_asic r300_asic_pcie = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -284,7 +288,8 @@ static struct radeon_asic r420_asic = {
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -322,7 +327,8 @@ static struct radeon_asic rs400_asic = {
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -360,7 +366,8 @@ static struct radeon_asic rs600_asic = {
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -398,7 +405,8 @@ static struct radeon_asic rs690_asic = {
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -436,7 +444,8 @@ static struct radeon_asic rv515_asic = {
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -474,7 +483,8 @@ static struct radeon_asic r520_asic = {
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -513,7 +523,8 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
+ .asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@@ -549,8 +560,9 @@ static struct radeon_asic rs780_asic = {
.suspend = &r600_suspend,
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
+ .asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@@ -586,7 +598,8 @@ static struct radeon_asic rv770_asic = {
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
- .gpu_reset = &rv770_gpu_reset,
+ .asic_reset = &r600_asic_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
@@ -622,16 +635,17 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = NULL,
- .gpu_reset = &evergreen_gpu_reset,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .ring_test = NULL,
- .ring_ib_execute = NULL,
- .irq_set = NULL,
- .irq_process = NULL,
- .get_vblank_counter = NULL,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = NULL,
.cs_parse = NULL,
.copy_blit = NULL,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a0b8280663d1..ef2c7ba1bdc9 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-int r100_gpu_reset(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev);
void r100_wb_disable(struct radeon_device *rdev);
void r100_wb_fini(struct radeon_device *rdev);
int r100_wb_init(struct radeon_device *rdev);
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@@ -126,7 +125,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
unsigned idx);
void r100_enable_bm(struct radeon_device *rdev);
void r100_set_common_regs(struct radeon_device *rdev);
-
+void r100_bm_disable(struct radeon_device *rdev);
/*
* r200,rv250,rs300,rv280
*/
@@ -134,7 +133,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
- struct radeon_fence *fence);
+ struct radeon_fence *fence);
/*
* r300,r350,rv350,rv380
@@ -143,7 +142,8 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern int r300_gpu_reset(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -178,6 +178,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
/*
* rs600.
*/
+extern int rs600_asic_reset(struct radeon_device *rdev);
extern int rs600_init(struct radeon_device *rdev);
extern void rs600_fini(struct radeon_device *rdev);
extern int rs600_suspend(struct radeon_device *rdev);
@@ -212,7 +213,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
*/
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
-int rv515_gpu_reset(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
@@ -252,7 +252,8 @@ int r600_copy_dma(struct radeon_device *rdev,
struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
-int r600_gpu_reset(struct radeon_device *rdev);
+bool r600_gpu_is_lockup(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -276,20 +277,25 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-int rv770_gpu_reset(struct radeon_device *rdev);
/*
* evergreen
*/
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-int evergreen_gpu_reset(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 9916d825401c..1d05debdd604 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1462,6 +1462,10 @@ static const char *pp_lib_thermal_controller_names[] = {
"RV6xx",
"RV770",
"ADT7473",
+ "External GPIO",
+ "Evergreen",
+ "ADT7473 with internal",
+
};
union power_info {
@@ -1707,15 +1711,21 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
break;
}
}
- } else if (frev == 4) {
+ } else {
/* add the i2c bus for thermal/fan chip */
/* no support for internal controller yet */
if (power_info->info_4.sThermalController.ucType > 0) {
if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
- (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
+ (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
+ (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
DRM_INFO("Internal thermal controller %s fan control\n",
(power_info->info_4.sThermalController.ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ } else if ((power_info->info_4.sThermalController.ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (power_info->info_4.sThermalController.ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
} else {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
@@ -1763,6 +1773,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
clock_info->usVDDC;
mode_index++;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usEngineClockLow);
+ sclk |= clock_info->ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+ mclk |= clock_info->ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ /* XXX usVDDCI */
+ mode_index++;
} else {
struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
(struct _ATOM_PPLIB_R600_CLOCK_INFO *)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 37db8adb2748..0f1fd9254e30 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
int edid_info;
struct edid *edid;
+ unsigned char *raw;
edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
if (!edid_info)
return false;
- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
+ raw = rdev->bios + edid_info;
+ edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
if (edid == NULL)
return false;
- memcpy((unsigned char *)edid,
- (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
+ memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
if (!drm_edid_is_valid(edid)) {
kfree(edid);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4559a53d5e57..40a24c941f20 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_connector_atom_dig *radeon_dig_connector;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1091,9 +1088,7 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1113,9 +1108,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1141,9 +1134,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
if (!radeon_connector->ddc_bus)
@@ -1163,9 +1154,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1191,9 +1180,7 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
@@ -1211,9 +1198,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1250,7 +1235,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1278,9 +1262,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1293,9 +1275,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1309,9 +1289,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1330,9 +1308,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
/* RS400,RC410,RS480 chipset seems to report a lot
* of false positive on load detect, we haven't yet
@@ -1351,9 +1327,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f9b0fe002c0a..ae0fb7356e62 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
int r;
mutex_lock(&rdev->cs_mutex);
- if (rdev->gpu_lockup) {
- mutex_unlock(&rdev->cs_mutex);
- return -EINVAL;
- }
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e305560..26217ffe0355 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -671,7 +671,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
@@ -691,6 +691,8 @@ void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
+ /* evict vram memory */
+ radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
@@ -728,9 +730,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_rbo) {
+ /* don't unpin kernel fb objects */
+ if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
- if (unlikely(r == 0)) {
+ if (r == 0) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
@@ -755,7 +758,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
acquire_console_sem();
- fb_set_suspend(rdev->fbdev_info, 1);
+ radeon_fbdev_set_suspend(rdev, 1);
release_console_sem();
return 0;
}
@@ -779,7 +782,7 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_agp_resume(rdev);
radeon_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- fb_set_suspend(rdev->fbdev_info, 0);
+ radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();
/* reset hpd state */
@@ -789,6 +792,26 @@ int radeon_resume_kms(struct drm_device *dev)
return 0;
}
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+ int r;
+
+ radeon_save_bios_scratch_regs(rdev);
+ radeon_suspend(rdev);
+
+ r = radeon_asic_reset(rdev);
+ if (!r) {
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ radeon_resume(rdev);
+ radeon_restore_bios_scratch_regs(rdev);
+ drm_helper_resume_force_mode(rdev->ddev);
+ return 0;
+ }
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
+ return r;
+}
+
/*
* Debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bb1c122cad21..ce5163ed1fa6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -831,10 +831,6 @@ void radeon_compute_pll(struct radeon_pll *pll,
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- radeonfb_remove(dev, fb);
if (radeon_fb->obj)
drm_gem_object_unreference_unlocked(radeon_fb->obj);
@@ -856,21 +852,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.create_handle = radeon_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+void
+radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct radeon_framebuffer *radeon_fb;
-
- radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL) {
- return NULL;
- }
- drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
- drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
- radeon_fb->obj = obj;
- return &radeon_fb->base;
+ rfb->obj = obj;
+ drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
}
static struct drm_framebuffer *
@@ -879,6 +869,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
+ struct radeon_framebuffer *radeon_fb;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (obj == NULL) {
@@ -886,12 +877,19 @@ radeon_user_framebuffer_create(struct drm_device *dev,
"can't create framebuffer\n", mode_cmd->handle);
return NULL;
}
- return radeon_framebuffer_create(dev, mode_cmd, obj);
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+ if (radeon_fb == NULL) {
+ return NULL;
+ }
+
+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+ return &radeon_fb->base;
}
static const struct drm_mode_config_funcs radeon_mode_funcs = {
.fb_create = radeon_user_framebuffer_create,
- .fb_changed = radeonfb_probe,
};
struct drm_prop_enum_list {
@@ -1031,12 +1029,14 @@ int radeon_modeset_init(struct radeon_device *rdev)
}
/* initialize hpd */
radeon_hpd_init(rdev);
- drm_helper_initial_config(rdev->ddev);
+
+ radeon_fbdev_init(rdev);
return 0;
}
void radeon_modeset_fini(struct radeon_device *rdev)
{
+ radeon_fbdev_fini(rdev);
kfree(rdev->mode_info.bios_hardcoded_edid);
if (rdev->mode_info.mode_config_initialized) {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c5ddaf58563a..fc9a38aadf97 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1546,10 +1546,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev))
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ atombios_ddia_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9ac57a09784b..fcb5b52727b0 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,10 +23,6 @@
* Authors:
* David Airlie
*/
- /*
- * Modularization
- */
-
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
@@ -42,17 +38,21 @@
#include <linux/vga_switcheroo.h>
-struct radeon_fb_device {
+/* object hierarchy -
+ this contains a helper + a radeon fb
+ the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
struct drm_fb_helper helper;
- struct radeon_framebuffer *rfb;
- struct radeon_device *rdev;
+ struct radeon_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct radeon_device *rdev;
};
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (fb == NULL) {
- return 1;
- }
- info = fb->fbdev;
- if (info == NULL) {
- return 1;
- }
- if (mode == NULL) {
- return 1;
- }
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(radeonfb_resize);
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
return aligned;
}
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
- .gamma_set = radeon_crtc_fb_gamma_set,
- .gamma_get = radeon_crtc_fb_gamma_get,
-};
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+ struct radeon_bo *rbo = gobj->driver_private;
+ int ret;
+
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ drm_gem_object_unreference_unlocked(gobj);
+}
-int radeonfb_create(struct drm_device *dev,
- uint32_t fb_width, uint32_t fb_height,
- uint32_t surface_width, uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object **gobj_p)
{
- struct radeon_device *rdev = dev->dev_private;
- struct fb_info *info;
- struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb = NULL;
- struct radeon_framebuffer *rfb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
- struct device *device = &rdev->pdev->dev;
- int size, aligned_size, ret;
- u64 fb_gpuaddr;
- void *fbptr = NULL;
- unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
+ int ret;
+ int aligned_size, size;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
-
- /* avivo can't scanout real 24bpp */
- if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
- surface_bpp = 32;
-
- mode_cmd.bpp = surface_bpp;
/* need to align pitch with crtc limits */
- mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
- mode_cmd.depth = surface_depth;
+ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd->pitch * mode_cmd->height;
aligned_size = ALIGN(size, PAGE_SIZE);
-
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
- surface_width, surface_height);
- ret = -ENOMEM;
- goto out;
+ printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+ aligned_size);
+ return -ENOMEM;
}
rbo = gobj->driver_private;
@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (mode_cmd.bpp) {
+ switch (mode_cmd->bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
- tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd.pitch);
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd->pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
- mutex_lock(&rdev->ddev->struct_mutex);
- fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
- if (fb == NULL) {
- DRM_ERROR("failed to allocate fb.\n");
- ret = -ENOMEM;
- goto out_unref;
- }
+
+
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
- ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
- ret = radeon_bo_kmap(rbo, &fbptr);
+ ret = radeon_bo_kmap(rbo, NULL);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
- list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+ *gobj_p = gobj;
+ return 0;
+out_unref:
+ radeonfb_destroy_pinned_object(gobj);
+ *gobj_p = NULL;
+ return ret;
+}
+
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_device *rdev = rfbdev->rdev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ struct radeon_bo *rbo = NULL;
+ struct device *device = &rdev->pdev->dev;
+ int ret;
+ unsigned long tmp;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ /* avivo can't scanout real 24bpp */
+ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+ sizes->surface_bpp = 32;
+
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
- *fb_p = fb;
- rfb = to_radeon_framebuffer(fb);
- rdev->fbdev_rfb = rfb;
- rdev->fbdev_rbo = rbo;
+ ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ rbo = gobj->driver_private;
- info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+ /* okay we have an object now allocate the framebuffer */
+ info = framebuffer_alloc(0, device);
if (info == NULL) {
ret = -ENOMEM;
goto out_unref;
}
- rdev->fbdev_info = info;
- rfbdev = info->par;
- rfbdev->helper.funcs = &radeon_fb_helper_funcs;
- rfbdev->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
- RADEONFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ info->par = rfbdev;
- memset_io(fbptr, 0x0, aligned_size);
+ radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+
+ fb = &rfbdev->rfb.base;
+
+ /* setup helper */
+ rfbdev->helper.fb = fb;
+ rfbdev->helper.fbdev = info;
+
+ memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
strcpy(info->fix.id, "radeondrmfb");
@@ -255,13 +227,13 @@ int radeonfb_create(struct drm_device *dev,
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
- tmp = fb_gpuaddr - rdev->mc.vram_start;
+ tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
- info->fix.smem_len = size;
- info->screen_base = fbptr;
- info->screen_size = size;
+ info->fix.smem_len = radeon_bo_size(rbo);
+ info->screen_base = rbo->kptr;
+ info->screen_size = radeon_bo_size(rbo);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
info->aperture_base = rdev->ddev->mode_config.fb_base;
@@ -274,44 +246,55 @@ int radeonfb_create(struct drm_device *dev,
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
+
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)size);
+ DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitch);
- fb->fbdev = info;
- rfbdev->rfb = rfb;
- rfbdev->rdev = rdev;
-
- mutex_unlock(&rdev->ddev->struct_mutex);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_unref:
if (rbo) {
- ret = radeon_bo_reserve(rbo, false);
- if (likely(ret == 0)) {
- radeon_bo_kunmap(rbo);
- radeon_bo_unreserve(rbo);
- }
+
}
if (fb && ret) {
- list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
- drm_gem_object_unreference(gobj);
- mutex_unlock(&rdev->ddev->struct_mutex);
-out:
return ret;
}
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = radeonfb_create(rfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
static char *mode_option;
int radeon_parse_options(char *options)
{
@@ -328,46 +311,111 @@ int radeon_parse_options(char *options)
return 0;
}
-int radeonfb_probe(struct drm_device *dev)
+void radeonfb_hotplug(struct drm_device *dev, bool polled)
{
struct radeon_device *rdev = dev->dev_private;
- int bpp_sel = 32;
- /* select 8 bpp console on RN50 or 16MB cards */
- if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
- bpp_sel = 8;
+ drm_helper_fb_hpd_irq_event(&rdev->mode_info.rfbdev->helper);
+}
- return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
+static void radeon_fb_output_status_changed(struct drm_fb_helper *fb_helper)
+{
+ drm_helper_fb_hotplug_event(fb_helper, true);
}
-int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct fb_info *info;
- struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+ struct radeon_framebuffer *rfb = &rfbdev->rfb;
struct radeon_bo *rbo;
int r;
- if (!fb) {
- return -EINVAL;
+ if (rfbdev->helper.fbdev) {
+ info = rfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
}
- info = fb->fbdev;
- if (info) {
- struct radeon_fb_device *rfbdev = info->par;
+
+ if (rfb->obj) {
rbo = rfb->obj->driver_private;
- unregister_framebuffer(info);
r = radeon_bo_reserve(rbo, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
- drm_fb_helper_free(&rfbdev->helper);
- framebuffer_release(info);
+ drm_gem_object_unreference_unlocked(rfb->obj);
}
+ drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_cleanup(&rfb->base);
- printk(KERN_INFO "unregistered panic notifier\n");
+ return 0;
+}
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+ .gamma_set = radeon_crtc_fb_gamma_set,
+ .gamma_get = radeon_crtc_fb_gamma_get,
+ .fb_probe = radeon_fb_find_or_create_single,
+ .fb_output_status_changed = radeon_fb_output_status_changed,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+ struct radeon_fbdev *rfbdev;
+ int bpp_sel = 32;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+ if (!rfbdev)
+ return -ENOMEM;
+
+ rfbdev->rdev = rdev;
+ rdev->mode_info.rfbdev = rfbdev;
+ rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+ drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+ rdev->num_crtc,
+ RADEONFB_CONN_LIMIT, true);
+ drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
+
+}
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+ if (!rdev->mode_info.rfbdev)
+ return;
+
+ radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+ kfree(rdev->mode_info.rfbdev);
+ rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+ struct radeon_bo *robj;
+ int size = 0;
+
+ robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ size += radeon_bo_size(robj);
+ return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+ if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ return true;
+ return false;
}
-EXPORT_SYMBOL(radeonfb_remove);
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d90f95b405c5..b1f9a81b5d1d 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
radeon_fence_ring_emit(rdev, fence);
fence->emited = true;
- fence->timeout = jiffies + ((2000 * HZ) / 1000);
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
+ unsigned long cjiffies;
- if (rdev == NULL) {
- return true;
- }
- if (rdev->shutdown) {
- return true;
- }
seq = RREG32(rdev->fence_drv.scratch_reg);
- rdev->fence_drv.last_seq = seq;
+ if (seq != rdev->fence_drv.last_seq) {
+ rdev->fence_drv.last_seq = seq;
+ rdev->fence_drv.last_jiffies = jiffies;
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ } else {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+ cjiffies -= rdev->fence_drv.last_jiffies;
+ if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+ /* update the timeout */
+ rdev->fence_drv.last_timeout -= cjiffies;
+ } else {
+ /* the 500ms timeout is elapsed we should test
+ * for GPU lockup
+ */
+ rdev->fence_drv.last_timeout = 1;
+ }
+ } else {
+ /* wrap around update last jiffies, we will just wait
+ * a little longer
+ */
+ rdev->fence_drv.last_jiffies = cjiffies;
+ }
+ return false;
+ }
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
- unsigned long cur_jiffies;
- unsigned long timeout;
- bool expired = false;
+ unsigned long irq_flags, timeout;
+ u32 seq;
int r;
if (fence == NULL) {
@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
-
+ timeout = rdev->fence_drv.last_timeout;
retry:
- cur_jiffies = jiffies;
- timeout = HZ / 100;
- if (time_after(fence->timeout, cur_jiffies)) {
- timeout = fence->timeout - cur_jiffies;
- }
-
+ /* save current sequence used to check for GPU lockup */
+ seq = rdev->fence_drv.last_seq;
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
- if (unlikely(r < 0))
+ if (unlikely(r < 0)) {
return r;
+ }
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
@@ -206,38 +220,36 @@ retry:
radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
- if (unlikely(r == 0)) {
- expired = true;
+ /* we were interrupted for some reason and fence isn't
+ * isn't signaled yet, resume wait
+ */
+ if (r) {
+ timeout = r;
+ goto retry;
}
- if (unlikely(expired)) {
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- if (timeout > 500) {
- DRM_ERROR("fence(%p:0x%08X) %lums timeout "
- "going to reset GPU\n",
- fence, fence->seq, timeout);
- radeon_gpu_reset(rdev);
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
- }
+ /* don't protect read access to rdev->fence_drv.last_seq
+ * if we experiencing a lockup the value doesn't change
+ */
+ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+ /* good news we believe it's a lockup */
+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+ /* FIXME: what should we do ? marking everyone
+ * as signaled for now
+ */
+ rdev->gpu_lockup = true;
+ r = radeon_gpu_reset(rdev);
+ if (r)
+ return r;
+ WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ rdev->gpu_lockup = false;
}
+ timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv.last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
- if (unlikely(expired)) {
- rdev->fence_drv.count_timeout++;
- cur_jiffies = jiffies;
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
- fence, fence->seq, timeout);
- DRM_ERROR("last signaled fence(0x%08X)\n",
- rdev->fence_drv.last_seq);
- }
return 0;
}
@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
- rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 1770d3c07fd0..e65b90317fab 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int i, j;
if (!rdev->gart.ready) {
- DRM_ERROR("trying to bind memory to unitialized GART !\n");
+ WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ef92d147d8f0..a72a3ee5d69b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_bo_unref(&robj);
}
+
+ drm_gem_object_release(gobj);
+ kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
args->vram_visible = rdev->mc.real_vram_size;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- if (rdev->fbdev_rbo)
- args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a212041e8b0b..a95907aa7eae 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -55,6 +55,8 @@ static void radeon_hotplug_work_func(struct work_struct *work)
radeon_connector_hotplug(connector);
}
/* Just fire off a uevent and let userspace tell us what to do */
+ radeonfb_hotplug(dev, false);
+
drm_sysfs_hotplug_event(dev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5413fcd63086..a2bc31465e4f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -39,6 +39,7 @@
#include <linux/i2c-algo-bit.h>
#include "radeon_fixed.h"
+struct radeon_bo;
struct radeon_device;
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -202,6 +203,8 @@ enum radeon_dvo_chip {
DVO_SIL1178,
};
+struct radeon_fbdev;
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -218,6 +221,9 @@ struct radeon_mode_info {
struct drm_property *tmds_pll_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
+
+ /* pointer to fbdev info structure */
+ struct radeon_fbdev *rfbdev;
};
#define MAX_H_CODE_TIMING_LEN 32
@@ -339,6 +345,7 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
+ int audio_polling_active;
int hdmi_offset;
int hdmi_config_offset;
int hdmi_audio_workaround;
@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
-
-int radeonfb_probe(struct drm_device *dev);
+void radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -575,4 +581,12 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+void radeonfb_hotplug(struct drm_device *dev, bool polled);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 122774742bd5..6a8617bac142 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -192,7 +192,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -216,7 +216,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -331,7 +331,7 @@ int radeon_bo_list_validate(struct list_head *head)
lobj->rdomain);
}
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
+ true, false, false);
if (unlikely(r))
return r;
}
@@ -499,11 +499,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
radeon_bo_check_tiling(rbo, 0, 1);
}
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
+ struct radeon_device *rdev;
struct radeon_bo *rbo;
+ unsigned long offset, size;
+ int r;
+
if (!radeon_ttm_bo_is_radeon_bo(bo))
- return;
+ return 0;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
+ rdev = rbo->rdev;
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ if ((offset + size) > rdev->mc.visible_vram_size) {
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ if (unlikely(r != 0))
+ return r;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ /* this should not happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ }
+ }
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de1e244..353998dc2c03 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 40ab6d9c3736..cc5316dcf580 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -424,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
- offset = *cmd << 10;
+ offset = *cmd3 << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid second packet offset\n");
@@ -2895,9 +2895,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
return rv;
rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
cmdbuf->bufsz);
- if (rv)
+ if (rv) {
+ drm_buffer_free(cmdbuf->buffer);
return rv;
- }
+ }
+ } else
+ goto done;
orig_nbox = cmdbuf->nbox;
@@ -2905,8 +2908,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
int temp;
temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
return temp;
}
@@ -3012,16 +3014,15 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
}
}
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
+ done:
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
err:
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d031b6863082..af98f45954b3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,6 +33,7 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
(unsigned)type);
return -EINVAL;
}
- man->io_offset = rdev->mc.agp_base;
- man->io_size = rdev->mc.gtt_size;
- man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture)
- man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
- TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- } else
-#endif
- {
- man->io_offset = 0;
- man->io_size = 0;
- man->io_addr = NULL;
}
+#endif
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- man->io_addr = NULL;
- man->io_offset = rdev->mc.aper_base;
- man->io_size = rdev->mc.aper_size;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+ bool evict, int no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -349,7 +338,8 @@ out_cleanup:
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -394,8 +384,9 @@ out_cleanup:
}
static int radeon_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
- struct ttm_mem_reg *new_mem)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
-
return r;
}
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ /* RADEON_IS_AGP is set only if AGP is active */
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = rdev->mc.agp_base;
+ mem->bus.is_iomem = true;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ /* check if it's visible */
+ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ mem->bus.base = rdev->mc.aper_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
.sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+ .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+ .io_mem_free = &radeon_ttm_io_mem_free,
};
int radeon_ttm_init(struct radeon_device *rdev)
@@ -745,8 +781,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +799,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
}
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
+ /* Add ttm page pool to debugfs */
+ sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i].data = NULL;
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 1a41cb268b72..dc76fe76eb25 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs400 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev)
/* setup MC before calling post tables */
rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -497,7 +495,7 @@ int rs400_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index a81bc7a21e14..5e3f21861f45 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -147,6 +147,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
+void rs600_bm_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* disable bus mastering */
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ mdelay(1);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+ u32 status, tmp;
+
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
+ }
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ rs600_bm_disable(rdev);
+ /* reset GA+VAP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset CP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset MC */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ /* Check if GPU is idle */
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
+ return -1;
+ }
+ rv515_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ return 0;
+}
+
/*
* GART.
*/
@@ -454,7 +526,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- r100_hdp_reset(rdev);
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +672,7 @@ int rs600_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -664,7 +735,7 @@ int rs600_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index e52d2695510b..08c4bebd3011 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -178,6 +178,52 @@
#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_000074_MC_IND_DATA 0x00000000
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_000134_HDP_FB_LOCATION 0x000134
#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bbf3da790fd5..56a0aec84af2 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
static void rs690_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs690 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs690_mc_wait_for_idle(rdev)) {
@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -717,7 +715,7 @@ int rs690_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 9035121f4b58..c513473d72ae 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
- r100_rb2d_reset(rdev);
-
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
}
-
rv515_vga_render_disable(rdev);
-
r420_pipes_init(rdev);
gb_pipe_select = RREG32(0x402C);
tmp = RREG32(0x170C);
@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
}
}
-int rv515_ga_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(CP_CSQ_MODE, 0);
- WREG32(CP_CSQ_CNTL, 0);
- WREG32(RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(0x43E0, 0);
- WREG32(0x43E4, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
- DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
- DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
-}
-
-int rv515_gpu_reset(struct radeon_device *rdev)
-{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- rv515_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
- /* Check if GPU is idle */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 31)) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
- return -1;
- }
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
- return 0;
-}
-
static void rv515_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
tmp = RREG32(0x2140);
seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
tmp = RREG32(0x425C);
seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
return 0;
@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -573,7 +483,7 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index fc216e49384d..590309a710b1 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -217,6 +217,52 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 97958a64df1a..a74683e18612 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -237,7 +237,6 @@ void r700_cp_stop(struct radeon_device *rdev)
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
}
-
static int rv770_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -272,6 +271,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
+void r700_cp_fini(struct radeon_device *rdev)
+{
+ r700_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
/*
* Core functions
@@ -906,23 +910,12 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
-int rv770_gpu_reset(struct radeon_device *rdev)
-{
- /* FIXME: implement any rv770 specific bits */
- return r600_gpu_reset(rdev);
-}
-
static int rv770_startup(struct radeon_device *rdev)
{
int r;
@@ -1132,7 +1125,7 @@ int rv770_init(struct radeon_device *rdev)
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -1166,7 +1159,7 @@ void rv770_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r600_blit_fini(rdev);
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae09..4256e2006476 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e3754a3a303..3b51f93f6045 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
- printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
- printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
- bool evict, bool interruptible, bool no_wait)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait, mem);
+ no_wait_reserve, no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
if (ret)
goto out_err;
@@ -606,7 +605,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
EXPORT_SYMBOL(ttm_bo_unref);
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait)
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +614,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
int ret = 0;
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
@@ -631,6 +630,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
+ evict_mem.bus.io_reserved = false;
placement.fpfn = 0;
placement.lpfn = 0;
@@ -638,7 +638,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
@@ -650,7 +650,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +670,8 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +688,11 @@ retry:
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- if (likely(!no_wait))
+ if (likely(!no_wait_gpu))
ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +714,7 @@ retry:
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +765,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible,
+ bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +788,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
}
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -855,7 +858,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -952,7 +956,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
mem->mm_node->private = bo;
@@ -978,7 +982,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -992,20 +997,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* instead of doing it here.
*/
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
+ mem.bus.io_reserved = false;
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
int ret;
@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1153,6 +1160,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
+ bo->mem.bus.io_reserved = false;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1183,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
if (ret)
goto out_err;
@@ -1249,7 +1257,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1553,26 +1561,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- unsigned long *bus_base,
- unsigned long *bus_offset, unsigned long *bus_size)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- *bus_size = 0;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- if (ttm_mem_reg_is_pci(bdev, mem)) {
- *bus_offset = mem->mm_node->start << PAGE_SHIFT;
- *bus_size = mem->num_pages << PAGE_SHIFT;
- *bus_base = man->io_offset;
- }
-
- return 0;
-}
-
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1569,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
-
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ ttm_mem_io_free(bdev, &bo->mem);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
@@ -1811,7 +1799,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false);
+ false, false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799b..13012a1f1486 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ int ret;
+
+ if (!mem->bus.io_reserved) {
+ mem->bus.io_reserved = true;
+ ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ if (bdev->driver->io_mem_reserve) {
+ if (mem->bus.io_reserved) {
+ mem->bus.io_reserved = false;
+ bdev->driver->io_mem_free(bdev, mem);
+ }
+ }
+}
+
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
int ret;
void *addr;
*virtual = NULL;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
- if (ret || bus_size == 0)
+ ret = ttm_mem_io_reserve(bdev, mem);
+ if (ret || !mem->bus.is_iomem)
return ret;
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
- addr = (void *)(((u8 *) man->io_addr) + bus_offset);
- else {
+ if (mem->bus.addr) {
+ addr = mem->bus.addr;
+ } else {
if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(bus_base + bus_offset, bus_size);
+ addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(bus_base + bus_offset, bus_size);
- if (!addr)
+ addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ if (!addr) {
+ ttm_mem_io_free(bdev, mem);
return -ENOMEM;
+ }
}
*virtual = addr;
return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
man = &bdev->man[mem->mem_type];
- if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+ if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
+ ttm_mem_io_free(bdev, mem);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
- unsigned long bus_base,
- unsigned long bus_offset,
- unsigned long bus_size,
+ unsigned long offset,
+ unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+ if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
else
- map->virtual = ioremap_nocache(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ unsigned long offset, size;
int ret;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
+ map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
- ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
- &bus_offset, &bus_size);
+ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
if (ret)
return ret;
- if (bus_size == 0) {
+ if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
- bus_offset += start_page << PAGE_SHIFT;
- bus_size = num_pages << PAGE_SHIFT;
- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+ offset = start_page << PAGE_SHIFT;
+ size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
- unsigned long dst_offset,
- unsigned long *pfn, pgprot_t *prot)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
- int ret;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
- &bus_size);
- if (ret)
- return -EINVAL;
- if (bus_size != 0)
- *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
- else
- if (!bo->ttm)
- return -EINVAL;
- else
- *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
- dst_offset >>
- PAGE_SHIFT));
- *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
- return 0;
-}
-
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
- bool evict, bool no_wait,
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..fe6cb77899f4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- bool is_iomem;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
- if (bdev->driver->fault_reserve_notify)
- bdev->driver->fault_reserve_notify(bo);
+ if (bdev->driver->fault_reserve_notify) {
+ ret = bdev->driver->fault_reserve_notify(bo);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ set_need_resched();
+ case -ERESTARTSYS:
+ retval = VM_FAULT_NOPAGE;
+ goto out_unlock;
+ default:
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
/*
* Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spin_unlock(&bo->lock);
- ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
- &bus_size);
- if (unlikely(ret != 0)) {
+ ret = ttm_mem_io_reserve(bdev, &bo->mem);
+ if (ret) {
retval = VM_FAULT_SIGBUS;
goto out_unlock;
}
- is_iomem = (bus_size != 0);
-
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/
-
- if (is_iomem) {
+ if (bo->mem.bus.is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot);
} else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
- if (is_iomem)
- pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
- page_offset;
+ if (bo->mem.bus.is_iomem)
+ pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_unlock;
-
}
address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
ttm_bo_unref(&bo);
vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702566e6..e70ddd82dc02 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
+#include "ttm/ttm_page_alloc.h"
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
unsigned int i;
struct ttm_mem_zone *zone;
+ /* let the page allocator first stop the shrink work. */
+ ttm_page_alloc_fini();
+
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 000000000000..0d9a42c2394f
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+#include <asm/agp.h>
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 16
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL 1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+ spinlock_t lock;
+ bool fill_lock;
+ struct list_head list;
+ int gfp_flags;
+ unsigned npages;
+ char *name;
+ unsigned long nfrees;
+ unsigned long nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialiazation to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+ struct kobject kobj;
+ struct shrinker mm_shrink;
+ atomic_t page_alloc_inited;
+ struct ttm_pool_opts options;
+
+ union {
+ struct ttm_page_pool pools[NUM_POOLS];
+ struct {
+ struct ttm_page_pool wc_pool;
+ struct ttm_page_pool uc_pool;
+ struct ttm_page_pool wc_pool_dma32;
+ struct ttm_page_pool uc_pool_dma32;
+ } ;
+ };
+};
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ (void)m;
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR "[ttm] Setting allocation size to %lu "
+ "is not allowed. Recomended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING "[ttm] Setting allocation size to "
+ "larger than %lu is not recomended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+static struct ttm_pool_manager _manager = {
+ .page_alloc_inited = ATOMIC_INIT(0)
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+ enum ttm_caching_state cstate)
+{
+ int pool_index;
+
+ if (cstate == tt_cached)
+ return NULL;
+
+ if (cstate == tt_wc)
+ pool_index = 0x0;
+ else
+ pool_index = 0x1;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ pool_index |= 0x2;
+
+ return &_manager.pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+ unsigned i;
+ if (set_pages_array_wb(pages, npages))
+ printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+ npages);
+ for (i = 0; i < npages; ++i)
+ __free_page(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages -= freed_pages;
+ pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct page *p;
+ struct page **pages_to_free;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_to_free) {
+ printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+ return 0;
+ }
+
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ list_for_each_entry_reverse(p, &pool->list, lru) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ pages_to_free[freed_pages++] = p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+ /* remove range of pages from the pool */
+ __list_del(p->lru.prev, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_pages_put(pages_to_free, freed_pages);
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall tough or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ __list_del(&p->lru, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+ unsigned i;
+ int total = 0;
+ for (i = 0; i < NUM_POOLS; ++i)
+ total += _manager.pools[i].npages;
+
+ return total;
+}
+
+/**
+ * Calback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ struct ttm_page_pool *pool;
+
+ pool_offset = pool_offset % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+ unsigned nr_free = shrink_pages;
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
+ }
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+static int ttm_set_pages_caching(struct page **pages,
+ enum ttm_caching_state cstate, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ switch (cstate) {
+ case tt_uncached:
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
+ cpages);
+ break;
+ case tt_wc:
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
+ cpages);
+ break;
+ default:
+ break;
+ }
+ return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct list_head *pages,
+ int ttm_flags, enum ttm_caching_state cstate,
+ struct page **failed_pages, unsigned cpages)
+{
+ unsigned i;
+ /* Failed pages has to be reed */
+ for (i = 0; i < cpages; ++i) {
+ list_del(&failed_pages[i]->lru);
+ __free_page(failed_pages[i]);
+ }
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ struct page **caching_array;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+ return -ENOMEM;
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ p = alloc_page(gfp_flags);
+
+ if (!p) {
+ printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r) {
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+
+ list_add(&p->lru, pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+
+ return r;
+}
+
+/**
+ * Fill the given pool if there isn't enough pages and requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+ unsigned long *irq_flags)
+{
+ struct page *p;
+ int r;
+ unsigned cpages = 0;
+ /**
+ * Only allow one pool fill operation at a time.
+ * If pool doesn't have enough pages for the allocation new pages are
+ * allocated from outside of pool.
+ */
+ if (pool->fill_lock)
+ return;
+
+ pool->fill_lock = true;
+
+ /* If allocation request is small and there is not enough
+ * pages in pool we fill the pool first */
+ if (count < _manager.options.small
+ && count > pool->npages) {
+ struct list_head new_pages;
+ unsigned alloc_size = _manager.options.alloc_size;
+
+ /**
+ * Can't change page caching if in irqsave context. We have to
+ * drop the pool->lock.
+ */
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ INIT_LIST_HEAD(&new_pages);
+ r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
+ cstate, alloc_size);
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+
+ if (!r) {
+ list_splice(&new_pages, &pool->list);
+ ++pool->nrefills;
+ pool->npages += alloc_size;
+ } else {
+ printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+ /* If we have any pages left put them to the pool. */
+ list_for_each_entry(p, &pool->list, lru) {
+ ++cpages;
+ }
+ list_splice(&new_pages, &pool->list);
+ pool->npages += cpages;
+ }
+
+ }
+ pool->fill_lock = false;
+}
+
+/**
+ * Cut count nubmer of pages from the pool and put them to return list
+ *
+ * @return count of pages still to allocate to fill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct list_head *pages, int ttm_flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ unsigned long irq_flags;
+ struct list_head *p;
+ unsigned i;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+
+ if (count >= pool->npages) {
+ /* take all pages from the pool */
+ list_splice_init(&pool->list, pages);
+ count -= pool->npages;
+ pool->npages = 0;
+ goto out;
+ }
+ /* find the last pages to include for requested number of pages. Split
+ * pool to begin and halves to reduce search space. */
+ if (count <= pool->npages/2) {
+ i = 0;
+ list_for_each(p, &pool->list) {
+ if (++i == count)
+ break;
+ }
+ } else {
+ i = pool->npages + 1;
+ list_for_each_prev(p, &pool->list) {
+ if (--i == count)
+ break;
+ }
+ }
+ /* Cut count number of pages from pool */
+ list_cut_position(pages, &pool->list, p);
+ pool->npages -= count;
+ count = 0;
+out:
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return count;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p = NULL;
+ int gfp_flags = 0;
+ int r;
+
+ /* set zero flag for page allocation if required */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ /* No pool for cached pages */
+ if (pool == NULL) {
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= __GFP_HIGHMEM;
+
+ for (r = 0; r < count; ++r) {
+ p = alloc_page(gfp_flags);
+ if (!p) {
+
+ printk(KERN_ERR "[ttm] unable to allocate page.");
+ return -ENOMEM;
+ }
+
+ list_add(&p->lru, pages);
+ }
+ return 0;
+ }
+
+
+ /* combine zero flag to pool flags */
+ gfp_flags |= pool->gfp_flags;
+
+ /* First we take pages from the pool */
+ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ list_for_each_entry(p, pages, lru) {
+ clear_page(page_address(p));
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (count > 0) {
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool. */
+ printk(KERN_ERR "[ttm] Failed to allocate extra pages "
+ "for large request.");
+ ttm_put_pages(pages, 0, flags, cstate);
+ return r;
+ }
+ }
+
+
+ return 0;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+ enum ttm_caching_state cstate)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p, *tmp;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ __free_page(p);
+ }
+ /* Make the pages list empty */
+ INIT_LIST_HEAD(pages);
+ return;
+ }
+ if (page_count == 0) {
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ ++page_count;
+ }
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ list_splice_init(pages, &pool->list);
+ pool->npages += page_count;
+ /* Check that we don't go over the pool limit */
+ page_count = 0;
+ if (pool->npages > _manager.options.max_size) {
+ page_count = pool->npages - _manager.options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (page_count < NUM_PAGES_TO_ALLOC)
+ page_count = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (page_count)
+ ttm_page_pool_free(pool, page_count);
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+ char *name)
+{
+ spin_lock_init(&pool->lock);
+ pool->fill_lock = false;
+ INIT_LIST_HEAD(&pool->list);
+ pool->npages = pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret;
+ if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
+ return 0;
+
+ printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+
+ ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
+
+ ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
+
+ ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
+ "wc dma");
+
+ ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
+ "uc dma");
+
+ _manager.options.max_size = max_pages;
+ _manager.options.small = SMALL_ALLOCATION;
+ _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
+ ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager.kobj);
+ return ret;
+ }
+
+ ttm_pool_mm_shrink_init(&_manager);
+
+ return 0;
+}
+
+void ttm_page_alloc_fini()
+{
+ int i;
+
+ if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
+ return;
+
+ printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+ ttm_pool_mm_shrink_fini(&_manager);
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+
+ kobject_put(&_manager.kobj);
+}
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct ttm_page_pool *p;
+ unsigned i;
+ char *h[] = {"pool", "refills", "pages freed", "size"};
+ if (atomic_read(&_manager.page_alloc_inited) == 0) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%6s %12s %13s %8s\n",
+ h[0], h[1], h[2], h[3]);
+ for (i = 0; i < NUM_POOLS; ++i) {
+ p = &_manager.pools[i];
+
+ seq_printf(m, "%6s %12ld %13ld %8d\n",
+ p->name, p->nrefills,
+ p->nfrees, p->npages);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb3..a7bab87a548b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static int ttm_tt_swapin(struct ttm_tt *ttm);
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
ttm->pages = NULL;
}
-static struct page *ttm_tt_alloc_page(unsigned page_flags)
-{
- gfp_t gfp_flags = GFP_USER;
-
- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= __GFP_DMA32;
- else
- gfp_flags |= __GFP_HIGHMEM;
-
- return alloc_page(gfp_flags);
-}
-
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
+ struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
- p = ttm_tt_alloc_page(ttm->page_flags);
- if (!p)
+ INIT_LIST_HEAD(&h);
+
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+
+ if (ret != 0)
return NULL;
+ p = list_first_entry(&h, struct page, lru);
+
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0))
goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
if (ttm->caching_state == c_state)
return 0;
- if (c_state != tt_cached) {
- ret = ttm_tt_populate(ttm);
- if (unlikely(ret != 0))
- return ret;
+ if (ttm->state == tt_unpopulated) {
+ /* Change caching but don't populate */
+ ttm->caching_state = c_state;
+ return 0;
}
if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
int i;
+ unsigned count = 0;
+ struct list_head h;
struct page *cur_page;
struct ttm_backend *be = ttm->be;
+ INIT_LIST_HEAD(&h);
+
if (be)
be->func->clear(be);
- (void)ttm_tt_set_caching(ttm, tt_cached);
for (i = 0; i < ttm->num_pages; ++i) {
+
cur_page = ttm->pages[i];
ttm->pages[i] = NULL;
if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
- __free_page(cur_page);
+ list_add(&cur_page->lru, &h);
+ count++;
}
}
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3d89d5..c4f5114aee7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = 0;
- man->io_offset = dev_priv->vram_start;
- man->io_size = dev_priv->vram_size;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->io_addr = NULL;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_WC;
break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
vmw_dmabuf_gmr_unbind(bo);
}
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.is_iomem = false;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->vram_start;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
.move_notify = vmw_move_notify,
- .swap_notify = vmw_swap_notify
+ .swap_notify = vmw_swap_notify,
+ .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+ .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+ .io_mem_free = &vmw_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4e..dbd36b8910cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* Put BO in VRAM, only if there is space.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
if (unlikely(ret == -ERESTARTSYS))
return ret;
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cdc..80125ffc4e28 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -628,7 +628,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
ttm_bo_unreserve(bo);
return ret;
@@ -652,7 +652,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
goto err_unlock;
- ret = ttm_bo_validate(bo, &ne_placement, false, false);
+ ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 31f9afed0a63..bbc7c4c30bc7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -752,14 +752,8 @@ err_not_scanout:
return NULL;
}
-static int vmw_kms_fb_changed(struct drm_device *dev)
-{
- return 0;
-}
-
static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
- .fb_changed = vmw_kms_fb_changed,
};
int vmw_kms_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f51..ad566c85b075 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
if (pin)
overlay_placement = &vmw_vram_ne_placement;
- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
ttm_bo_unreserve(bo);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 71d4c0703629..219f5682adfe 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -86,6 +86,12 @@ config HID_BELKIN
---help---
Support for Belkin Flip KVM and Wireless keyboard.
+config HID_CANDO
+ tristate "Cando dual touch panel"
+ depends on USB_HID
+ ---help---
+ Support for Cando dual touch panel.
+
config HID_CHERRY
tristate "Cherry" if EMBEDDED
depends on USB_HID
@@ -122,6 +128,12 @@ config DRAGONRISE_FF
Say Y here if you want to enable force feedback support for DragonRise Inc.
game controllers.
+config HID_EGALAX
+ tristate "eGalax multi-touch panel"
+ depends on USB_HID
+ ---help---
+ Support for the eGalax dual-touch panel
+
config HID_EZKEY
tristate "Ezkey" if EMBEDDED
depends on USB_HID
@@ -262,12 +274,76 @@ config HID_PETALYNX
---help---
Support for Petalynx Maxter remote control.
+config HID_PICOLCD
+ tristate "PicoLCD (graphic version)"
+ depends on USB_HID
+ ---help---
+ This provides support for Minibox PicoLCD devices, currently
+ only the graphical ones are supported.
+
+ This includes support for the following device features:
+ - Keypad
+ - Switching between Firmware and Flash mode
+ - EEProm / Flash access (via debugfs)
+ Features selectively enabled:
+ - Framebuffer for monochrome 256x64 display
+ - Backlight control
+ - Contrast control
+ - General purpose outputs
+ Features that are not (yet) supported:
+ - IR
+
+config HID_PICOLCD_FB
+ bool "Framebuffer support" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=FB || FB=y
+ select FB_DEFERRED_IO
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ ---help---
+ Provide access to PicoLCD's 256x64 monochrome display via a
+ frambuffer device.
+
+config HID_PICOLCD_BACKLIGHT
+ bool "Backlight control" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
+ ---help---
+ Provide access to PicoLCD's backlight control via backlight
+ class.
+
+config HID_PICOLCD_LCD
+ bool "Contrast control" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
+ ---help---
+ Provide access to PicoLCD's LCD contrast via lcd class.
+
+config HID_PICOLCD_LEDS
+ bool "GPO via leds class" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
+ ---help---
+ Provide access to PicoLCD's GPO pins via leds class.
+
config HID_QUANTA
tristate "Quanta Optical Touch"
depends on USB_HID
---help---
Support for Quanta Optical Touch dual-touch panels.
+config HID_ROCCAT_KONE
+ tristate "Roccat Kone Mouse support"
+ depends on USB_HID
+ ---help---
+ Support for Roccat Kone mouse.
+
config HID_SAMSUNG
tristate "Samsung" if EMBEDDED
depends on USB_HID
@@ -332,7 +408,7 @@ config HID_TOPSEED
depends on USB_HID
default !EMBEDDED
---help---
- Say Y if you have a TopSeed Cyberlink remote control.
+ Say Y if you have a TopSeed Cyberlink or BTC Emprex remote control.
config HID_THRUSTMASTER
tristate "ThrustMaster devices support" if EMBEDDED
@@ -357,6 +433,14 @@ config HID_WACOM
---help---
Support for Wacom Graphire Bluetooth tablet.
+config HID_WACOM_POWER_SUPPLY
+ bool "Wacom Bluetooth devices power supply status support"
+ depends on HID_WACOM
+ select POWER_SUPPLY
+ ---help---
+ Say Y here if you want to enable power supply status monitoring for
+ Wacom Bluetooth devices.
+
config HID_ZEROPLUS
tristate "Zeroplus based game controller support" if EMBEDDED
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0b2618f092ca..a5868d032e37 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -26,10 +26,12 @@ obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
+obj-$(CONFIG_HID_CANDO) += hid-cando.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o
+obj-$(CONFIG_HID_EGALAX) += hid-egalax.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
@@ -44,6 +46,8 @@ obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
+obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
+obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
index c31e0be8ccea..2a0d56b7a02b 100644
--- a/drivers/hid/hid-3m-pct.c
+++ b/drivers/hid/hid-3m-pct.c
@@ -1,7 +1,7 @@
/*
* HID driver for 3M PCT multitouch panels
*
- * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
*
*/
@@ -25,7 +25,7 @@ MODULE_LICENSE("GPL");
#include "hid-ids.h"
struct mmm_finger {
- __s32 x, y;
+ __s32 x, y, w, h;
__u8 rank;
bool touch, valid;
};
@@ -82,7 +82,18 @@ static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/* touchscreen emulation */
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
return 1;
+ case HID_DG_WIDTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MAJOR);
+ return 1;
+ case HID_DG_HEIGHT:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MINOR);
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ 1, 1, 0, 0);
+ return 1;
case HID_DG_CONTACTID:
+ field->logical_maximum = 59;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_TRACKING_ID);
return 1;
@@ -128,9 +139,15 @@ static void mmm_filter_event(struct mmm_data *md, struct input_dev *input)
/* this finger is just placeholder data, ignore */
} else if (f->touch) {
/* this finger is on the screen */
+ int wide = (f->w > f->h);
input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i);
input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR,
+ wide ? f->w : f->h);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR,
+ wide ? f->h : f->w);
input_mt_sync(input);
/*
* touchscreen emulation: maintain the age rank
@@ -197,6 +214,14 @@ static int mmm_event(struct hid_device *hid, struct hid_field *field,
case HID_DG_CONFIDENCE:
md->valid = value;
break;
+ case HID_DG_WIDTH:
+ if (md->valid)
+ md->f[md->curid].w = value;
+ break;
+ case HID_DG_HEIGHT:
+ if (md->valid)
+ md->f[md->curid].h = value;
+ break;
case HID_DG_CONTACTID:
if (md->valid) {
md->curid = value;
@@ -255,6 +280,7 @@ static void mmm_remove(struct hid_device *hdev)
static const struct hid_device_id mmm_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ }
};
MODULE_DEVICE_TABLE(hid, mmm_devices);
@@ -287,5 +313,4 @@ static void __exit mmm_exit(void)
module_init(mmm_init);
module_exit(mmm_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
new file mode 100644
index 000000000000..4267a6fdc277
--- /dev/null
+++ b/drivers/hid/hid-cando.c
@@ -0,0 +1,272 @@
+/*
+ * HID driver for Cando dual-touch panels
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("Cando dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct cando_data {
+ __u16 x, y;
+ __u8 id;
+ __s8 oldest; /* id of the oldest finger in previous frame */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool first; /* is this the first finger in this frame? */
+ __s8 firstid; /* id of the first finger in the frame */
+ __u16 firstx, firsty; /* (x, y) of the first finger in the frame */
+};
+
+static int cando_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ case HID_DG_CONTACTMAX:
+ return -1;
+ case HID_DG_INRANGE:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+static int cando_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void cando_filter_event(struct cando_data *td, struct input_dev *input)
+{
+ td->first = !td->first; /* touchscreen emulation */
+
+ if (!td->valid) {
+ /*
+ * touchscreen emulation: if this is the second finger and
+ * the first was valid, the first was the oldest; if the
+ * first was not valid and there was a valid finger in the
+ * previous frame, this is a release.
+ */
+ if (td->first) {
+ td->firstid = -1;
+ } else if (td->firstid >= 0) {
+ input_event(input, EV_ABS, ABS_X, td->firstx);
+ input_event(input, EV_ABS, ABS_Y, td->firsty);
+ td->oldest = td->firstid;
+ } else if (td->oldest >= 0) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ td->oldest = -1;
+ }
+
+ return;
+ }
+
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+
+ input_mt_sync(input);
+
+ /*
+ * touchscreen emulation: if there was no touching finger previously,
+ * emit touch event
+ */
+ if (td->oldest < 0) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ td->oldest = td->id;
+ }
+
+ /*
+ * touchscreen emulation: if this is the first finger, wait for the
+ * second; the oldest is then the second if it was the oldest already
+ * or if there was no first, the first otherwise.
+ */
+ if (td->first) {
+ td->firstx = td->x;
+ td->firsty = td->y;
+ td->firstid = td->id;
+ } else {
+ int x, y, oldest;
+ if (td->id == td->oldest || td->firstid < 0) {
+ x = td->x;
+ y = td->y;
+ oldest = td->id;
+ } else {
+ x = td->firstx;
+ y = td->firsty;
+ oldest = td->firstid;
+ }
+ input_event(input, EV_ABS, ABS_X, x);
+ input_event(input, EV_ABS, ABS_Y, y);
+ td->oldest = oldest;
+ }
+}
+
+
+static int cando_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct cando_data *td = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ td->valid = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->id = value;
+ break;
+ case HID_GD_X:
+ td->x = value;
+ break;
+ case HID_GD_Y:
+ td->y = value;
+ cando_filter_event(td, input);
+ break;
+ case HID_DG_TIPSWITCH:
+ /* avoid interference from generic hidinput handling */
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int cando_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct cando_data *td;
+
+ td = kmalloc(sizeof(struct cando_data), GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate Cando Touch data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, td);
+ td->first = false;
+ td->oldest = -1;
+ td->valid = false;
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (ret)
+ kfree(td);
+
+ return ret;
+}
+
+static void cando_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id cando_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, cando_devices);
+
+static const struct hid_usage_id cando_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver cando_driver = {
+ .name = "cando-touch",
+ .id_table = cando_devices,
+ .probe = cando_probe,
+ .remove = cando_remove,
+ .input_mapping = cando_input_mapping,
+ .input_mapped = cando_input_mapped,
+ .usage_table = cando_grabbed_usages,
+ .event = cando_event,
+};
+
+static int __init cando_init(void)
+{
+ return hid_register_driver(&cando_driver);
+}
+
+static void __exit cando_exit(void)
+{
+ hid_unregister_driver(&cando_driver);
+}
+
+module_init(cando_init);
+module_exit(cando_exit);
+
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index 7e597d7f770f..24663a8717b1 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -59,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ }
};
MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2e2aa759d230..dc3a011a6795 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -940,13 +940,8 @@ static void hid_output_field(struct hid_field *field, __u8 *data)
unsigned count = field->report_count;
unsigned offset = field->report_offset;
unsigned size = field->report_size;
- unsigned bitsused = offset + count * size;
unsigned n;
- /* make sure the unused bits in the last byte are zeros */
- if (count > 0 && size > 0 && (bitsused % 8) != 0)
- data[(bitsused-1)/8] &= (1 << (bitsused % 8)) - 1;
-
for (n = 0; n < count; n++) {
if (field->logical_minimum < 0) /* signed values */
implement(data, offset + n * size, size, s32ton(field->value[n], size));
@@ -966,6 +961,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
if (report->id > 0)
*data++ = report->id;
+ memset(data, 0, ((report->size - 1) >> 3) + 1);
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->field[n], data);
}
@@ -1043,13 +1039,8 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
hid->hiddev_report_event(hid, report);
- if (hid->claimed & HID_CLAIMED_HIDRAW) {
- /* numbered reports need to be passed with the report num */
- if (report_enum->numbered)
- hidraw_report_event(hid, data - 1, size + 1);
- else
- hidraw_report_event(hid, data, size);
- }
+ if (hid->claimed & HID_CLAIMED_HIDRAW)
+ hidraw_report_event(hid, data, size);
for (a = 0; a < report->maxfield; a++)
hid_input_field(hid, report->field[a], cdata, interrupt);
@@ -1091,35 +1082,28 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
- if (!buf) {
- report = hid_get_report(report_enum, data);
+ if (!buf)
goto nomem;
- }
-
- snprintf(buf, HID_DEBUG_BUFSIZE - 1,
- "\nreport (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
- hid_debug_event(hid, buf);
-
- report = hid_get_report(report_enum, data);
- if (!report) {
- kfree(buf);
- return -1;
- }
/* dump the report */
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
- "report %d (size %u) = ", report->id, size);
+ "\nreport (size %u) (%snumbered) = ", size, report_enum->numbered ? "" : "un");
hid_debug_event(hid, buf);
+
for (i = 0; i < size; i++) {
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
" %02x", data[i]);
hid_debug_event(hid, buf);
}
hid_debug_event(hid, "\n");
-
kfree(buf);
nomem:
+ report = hid_get_report(report_enum, data);
+
+ if (!report)
+ return -1;
+
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
if (ret != 0)
@@ -1172,6 +1156,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
unsigned int i;
int len;
+ if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
+ connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
if (hdev->bus != BUS_USB)
connect_mask &= ~HID_CONNECT_HIDDEV;
if (hid_hiddev(hdev))
@@ -1251,6 +1237,7 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
/* a list of devices for which there is a specialized driver on HID bus */
static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
@@ -1295,13 +1282,18 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
@@ -1335,6 +1327,8 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
@@ -1346,6 +1340,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
@@ -1363,6 +1358,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
@@ -1761,7 +1757,7 @@ int hid_add_device(struct hid_device *hdev)
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
- if (hid_ignore(hdev))
+ if (!(hdev->quirks & HID_QUIRK_NO_IGNORE) && hid_ignore(hdev))
return -ENODEV;
/* XXX hack, any other cleaner solution after the driver core
@@ -1769,11 +1765,12 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
+ hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
hdev->status |= HID_STAT_ADDED;
-
- hid_debug_register(hdev, dev_name(&hdev->dev));
+ else
+ hid_debug_unregister(hdev);
return ret;
}
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
new file mode 100644
index 000000000000..f44bdc084cb2
--- /dev/null
+++ b/drivers/hid/hid-egalax.c
@@ -0,0 +1,281 @@
+/*
+ * HID driver for eGalax dual-touch panels
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include "usbhid/usbhid.h"
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("eGalax dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct egalax_data {
+ __u16 x, y, z;
+ __u8 id;
+ bool first; /* is this the first finger in the frame? */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool activity; /* at least one active finger previously? */
+ __u16 lastx, lasty; /* latest valid (x, y) in the frame */
+};
+
+static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ case HID_DG_CONTACTCOUNT:
+ case HID_DG_CONTACTMAX:
+ return -1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ case HID_DG_TIPPRESSURE:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_PRESSURE);
+ return 1;
+ }
+ return 0;
+ }
+
+ /* ignore others (from other reports we won't get anyway) */
+ return -1;
+}
+
+static int egalax_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
+{
+ td->first = !td->first; /* touchscreen emulation */
+
+ if (td->valid) {
+ /* emit multitouch events */
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
+
+ input_mt_sync(input);
+
+ /*
+ * touchscreen emulation: store (x, y) as
+ * the last valid values in this frame
+ */
+ td->lastx = td->x;
+ td->lasty = td->y;
+ }
+
+ /*
+ * touchscreen emulation: if this is the second finger and at least
+ * one in this frame is valid, the latest valid in the frame is
+ * the oldest on the panel, the one we want for single touch
+ */
+ if (!td->first && td->activity) {
+ input_event(input, EV_ABS, ABS_X, td->lastx);
+ input_event(input, EV_ABS, ABS_Y, td->lasty);
+ }
+
+ if (!td->valid) {
+ /*
+ * touchscreen emulation: if the first finger is invalid
+ * and there previously was finger activity, this is a release
+ */
+ if (td->first && td->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ td->activity = false;
+ }
+ return;
+ }
+
+
+ /* touchscreen emulation: if no previous activity, emit touch event */
+ if (!td->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ td->activity = true;
+ }
+}
+
+
+static int egalax_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct egalax_data *td = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ /* avoid interference from generic hidinput handling */
+ break;
+ case HID_DG_TIPSWITCH:
+ td->valid = value;
+ break;
+ case HID_DG_TIPPRESSURE:
+ td->z = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->id = value;
+ break;
+ case HID_GD_X:
+ td->x = value;
+ break;
+ case HID_GD_Y:
+ td->y = value;
+ /* this is the last field in a finger */
+ egalax_filter_event(td, input);
+ break;
+ case HID_DG_CONTACTCOUNT:
+ /* touch emulation: this is the last field in a frame */
+ td->first = false;
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct egalax_data *td;
+ struct hid_report *report;
+
+ td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate eGalax data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, td);
+
+ ret = hid_parse(hdev);
+ if (ret)
+ goto end;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret)
+ goto end;
+
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[5];
+ if (report) {
+ report->field[0]->value[0] = 2;
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ }
+
+end:
+ if (ret)
+ kfree(td);
+
+ return ret;
+}
+
+static void egalax_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id egalax_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, egalax_devices);
+
+static const struct hid_usage_id egalax_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver egalax_driver = {
+ .name = "egalax-touch",
+ .id_table = egalax_devices,
+ .probe = egalax_probe,
+ .remove = egalax_remove,
+ .input_mapping = egalax_input_mapping,
+ .input_mapped = egalax_input_mapped,
+ .usage_table = egalax_grabbed_usages,
+ .event = egalax_event,
+};
+
+static int __init egalax_init(void)
+{
+ return hid_register_driver(&egalax_driver);
+}
+
+static void __exit egalax_exit(void)
+{
+ hid_unregister_driver(&egalax_driver);
+}
+
+module_init(egalax_init);
+module_exit(egalax_exit);
+
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 797e06470356..12dd40d628e3 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -20,6 +20,7 @@
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
+#define USB_DEVICE_ID_3M2256 0x0502
#define USB_VENDOR_ID_A4TECH 0x09da
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
@@ -123,6 +124,13 @@
#define USB_VENDOR_ID_BERKSHIRE 0x0c98
#define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140
+#define USB_VENDOR_ID_BTC 0x046e
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
+
+#define USB_VENDOR_ID_CANDO 0x2087
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
+
#define USB_VENDOR_ID_CH 0x068e
#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
#define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4
@@ -131,6 +139,7 @@
#define USB_VENDOR_ID_CHERRY 0x046a
#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
+#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
#define USB_VENDOR_ID_CHIC 0x05fe
#define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014
@@ -170,6 +179,10 @@
#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_VENDOR_ID_DWAV 0x0EEF
+#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
+
#define USB_VENDOR_ID_ELO 0x04E7
#define USB_DEVICE_ID_ELO_TS2700 0x0020
@@ -341,6 +354,8 @@
#define USB_VENDOR_ID_MICROCHIP 0x04d8
#define USB_DEVICE_ID_PICKIT1 0x0032
#define USB_DEVICE_ID_PICKIT2 0x0033
+#define USB_DEVICE_ID_PICOLCD 0xc002
+#define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
@@ -399,6 +414,9 @@
#define USB_VENDOR_ID_PRODIGE 0x05af
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
+#define USB_VENDOR_ID_ROCCAT 0x1e7d
+#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
+
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
@@ -456,6 +474,7 @@
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0xbd
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 3677c9037a11..f6433d8050a9 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -126,6 +126,9 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1004: lg_map_key_clear(KEY_VIDEO); break;
case 0x1005: lg_map_key_clear(KEY_AUDIO); break;
case 0x100a: lg_map_key_clear(KEY_DOCUMENTS); break;
+ /* The following two entries are Playlist 1 and 2 on the MX3200 */
+ case 0x100f: lg_map_key_clear(KEY_FN_1); break;
+ case 0x1010: lg_map_key_clear(KEY_FN_2); break;
case 0x1011: lg_map_key_clear(KEY_PREVIOUSSONG); break;
case 0x1012: lg_map_key_clear(KEY_NEXTSONG); break;
case 0x1013: lg_map_key_clear(KEY_CAMERA); break;
@@ -137,6 +140,7 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1019: lg_map_key_clear(KEY_PROG1); break;
case 0x101a: lg_map_key_clear(KEY_PROG2); break;
case 0x101b: lg_map_key_clear(KEY_PROG3); break;
+ case 0x101c: lg_map_key_clear(KEY_CYCLEWINDOWS); break;
case 0x101f: lg_map_key_clear(KEY_ZOOMIN); break;
case 0x1020: lg_map_key_clear(KEY_ZOOMOUT); break;
case 0x1021: lg_map_key_clear(KEY_ZOOMRESET); break;
@@ -147,6 +151,11 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1029: lg_map_key_clear(KEY_SHUFFLE); break;
case 0x102a: lg_map_key_clear(KEY_BACK); break;
case 0x102b: lg_map_key_clear(KEY_CYCLEWINDOWS); break;
+ case 0x102d: lg_map_key_clear(KEY_WWW); break;
+ /* The following two are 'Start/answer call' and 'End/reject call'
+ on the MX3200 */
+ case 0x1031: lg_map_key_clear(KEY_OK); break;
+ case 0x1032: lg_map_key_clear(KEY_CANCEL); break;
case 0x1041: lg_map_key_clear(KEY_BATTERY); break;
case 0x1042: lg_map_key_clear(KEY_WORDPROCESSOR); break;
case 0x1043: lg_map_key_clear(KEY_SPREADSHEET); break;
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 9b24fc510712..b6b0caeeac58 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -1,8 +1,8 @@
/*
* HID driver for N-Trig touchscreens
*
- * Copyright (c) 2008 Rafi Rubin
- * Copyright (c) 2009 Stephane Chatty
+ * Copyright (c) 2008-2010 Rafi Rubin
+ * Copyright (c) 2009-2010 Stephane Chatty
*
*/
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/usb.h>
+#include "usbhid/usbhid.h"
#include <linux/module.h>
#include <linux/slab.h>
@@ -22,20 +24,350 @@
#define NTRIG_DUPLICATE_USAGES 0x001
-#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
- EV_KEY, (c))
+static unsigned int min_width;
+module_param(min_width, uint, 0644);
+MODULE_PARM_DESC(min_width, "Minimum touch contact width to accept.");
+
+static unsigned int min_height;
+module_param(min_height, uint, 0644);
+MODULE_PARM_DESC(min_height, "Minimum touch contact height to accept.");
+
+static unsigned int activate_slack = 1;
+module_param(activate_slack, uint, 0644);
+MODULE_PARM_DESC(activate_slack, "Number of touch frames to ignore at "
+ "the start of touch input.");
+
+static unsigned int deactivate_slack = 4;
+module_param(deactivate_slack, uint, 0644);
+MODULE_PARM_DESC(deactivate_slack, "Number of empty frames to ignore before "
+ "deactivating touch.");
+
+static unsigned int activation_width = 64;
+module_param(activation_width, uint, 0644);
+MODULE_PARM_DESC(activation_width, "Width threshold to immediately start "
+ "processing touch events.");
+
+static unsigned int activation_height = 32;
+module_param(activation_height, uint, 0644);
+MODULE_PARM_DESC(activation_height, "Height threshold to immediately start "
+ "processing touch events.");
struct ntrig_data {
/* Incoming raw values for a single contact */
__u16 x, y, w, h;
__u16 id;
- __u8 confidence;
+
+ bool tipswitch;
+ bool confidence;
+ bool first_contact_touch;
bool reading_mt;
- __u8 first_contact_confidence;
__u8 mt_footer[4];
__u8 mt_foot_count;
+
+ /* The current activation state. */
+ __s8 act_state;
+
+ /* Empty frames to ignore before recognizing the end of activity */
+ __s8 deactivate_slack;
+
+ /* Frames to ignore before acknowledging the start of activity */
+ __s8 activate_slack;
+
+ /* Minimum size contact to accept */
+ __u16 min_width;
+ __u16 min_height;
+
+ /* Threshold to override activation slack */
+ __u16 activation_width;
+ __u16 activation_height;
+
+ __u16 sensor_logical_width;
+ __u16 sensor_logical_height;
+ __u16 sensor_physical_width;
+ __u16 sensor_physical_height;
+};
+
+
+static ssize_t show_phys_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_physical_width);
+}
+
+static DEVICE_ATTR(sensor_physical_width, S_IRUGO, show_phys_width, NULL);
+
+static ssize_t show_phys_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_physical_height);
+}
+
+static DEVICE_ATTR(sensor_physical_height, S_IRUGO, show_phys_height, NULL);
+
+static ssize_t show_log_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_logical_width);
+}
+
+static DEVICE_ATTR(sensor_logical_width, S_IRUGO, show_log_width, NULL);
+
+static ssize_t show_log_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_logical_height);
+}
+
+static DEVICE_ATTR(sensor_logical_height, S_IRUGO, show_log_height, NULL);
+
+static ssize_t show_min_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->min_width *
+ nd->sensor_physical_width /
+ nd->sensor_logical_width);
+}
+
+static ssize_t set_min_width(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_width)
+ return -EINVAL;
+
+ nd->min_width = val * nd->sensor_logical_width /
+ nd->sensor_physical_width;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_width, S_IWUSR | S_IRUGO, show_min_width, set_min_width);
+
+static ssize_t show_min_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->min_height *
+ nd->sensor_physical_height /
+ nd->sensor_logical_height);
+}
+
+static ssize_t set_min_height(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_height)
+ return -EINVAL;
+
+ nd->min_height = val * nd->sensor_logical_height /
+ nd->sensor_physical_height;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_height, S_IWUSR | S_IRUGO, show_min_height,
+ set_min_height);
+
+static ssize_t show_activate_slack(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activate_slack);
+}
+
+static ssize_t set_activate_slack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 0x7f)
+ return -EINVAL;
+
+ nd->activate_slack = val;
+
+ return count;
+}
+
+static DEVICE_ATTR(activate_slack, S_IWUSR | S_IRUGO, show_activate_slack,
+ set_activate_slack);
+
+static ssize_t show_activation_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activation_width *
+ nd->sensor_physical_width /
+ nd->sensor_logical_width);
+}
+
+static ssize_t set_activation_width(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_width)
+ return -EINVAL;
+
+ nd->activation_width = val * nd->sensor_logical_width /
+ nd->sensor_physical_width;
+
+ return count;
+}
+
+static DEVICE_ATTR(activation_width, S_IWUSR | S_IRUGO, show_activation_width,
+ set_activation_width);
+
+static ssize_t show_activation_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activation_height *
+ nd->sensor_physical_height /
+ nd->sensor_logical_height);
+}
+
+static ssize_t set_activation_height(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_height)
+ return -EINVAL;
+
+ nd->activation_height = val * nd->sensor_logical_height /
+ nd->sensor_physical_height;
+
+ return count;
+}
+
+static DEVICE_ATTR(activation_height, S_IWUSR | S_IRUGO,
+ show_activation_height, set_activation_height);
+
+static ssize_t show_deactivate_slack(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", -nd->deactivate_slack);
+}
+
+static ssize_t set_deactivate_slack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /*
+ * No more than 8 terminal frames have been observed so far
+ * and higher slack is highly likely to leave the single
+ * touch emulation stuck down.
+ */
+ if (val > 7)
+ return -EINVAL;
+
+ nd->deactivate_slack = -val;
+
+ return count;
+}
+
+static DEVICE_ATTR(deactivate_slack, S_IWUSR | S_IRUGO, show_deactivate_slack,
+ set_deactivate_slack);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_sensor_physical_width.attr,
+ &dev_attr_sensor_physical_height.attr,
+ &dev_attr_sensor_logical_width.attr,
+ &dev_attr_sensor_logical_height.attr,
+ &dev_attr_min_height.attr,
+ &dev_attr_min_width.attr,
+ &dev_attr_activate_slack.attr,
+ &dev_attr_activation_width.attr,
+ &dev_attr_activation_height.attr,
+ &dev_attr_deactivate_slack.attr,
+ NULL
+};
+
+static struct attribute_group ntrig_attribute_group = {
+ .attrs = sysfs_attrs
};
/*
@@ -48,6 +380,8 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
/* No special mappings needed for the pen and single touch */
if (field->physical)
return 0;
@@ -61,6 +395,21 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input, ABS_X,
field->logical_minimum,
field->logical_maximum, 0, 0);
+
+ if (!nd->sensor_logical_width) {
+ nd->sensor_logical_width =
+ field->logical_maximum -
+ field->logical_minimum;
+ nd->sensor_physical_width =
+ field->physical_maximum -
+ field->physical_minimum;
+ nd->activation_width = activation_width *
+ nd->sensor_logical_width /
+ nd->sensor_physical_width;
+ nd->min_width = min_width *
+ nd->sensor_logical_width /
+ nd->sensor_physical_width;
+ }
return 1;
case HID_GD_Y:
hid_map_usage(hi, usage, bit, max,
@@ -68,6 +417,21 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input, ABS_Y,
field->logical_minimum,
field->logical_maximum, 0, 0);
+
+ if (!nd->sensor_logical_height) {
+ nd->sensor_logical_height =
+ field->logical_maximum -
+ field->logical_minimum;
+ nd->sensor_physical_height =
+ field->physical_maximum -
+ field->physical_minimum;
+ nd->activation_height = activation_height *
+ nd->sensor_logical_height /
+ nd->sensor_physical_height;
+ nd->min_height = min_height *
+ nd->sensor_logical_height /
+ nd->sensor_physical_height;
+ }
return 1;
}
return 0;
@@ -139,9 +503,10 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
case 0xff000001:
/* Tag indicating the start of a multitouch group */
nd->reading_mt = 1;
- nd->first_contact_confidence = 0;
+ nd->first_contact_touch = 0;
break;
case HID_DG_TIPSWITCH:
+ nd->tipswitch = value;
/* Prevent emission of touch until validated */
return 1;
case HID_DG_CONFIDENCE:
@@ -169,8 +534,14 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
* to emit a normal (X, Y) position
*/
if (!nd->reading_mt) {
+ /*
+ * TipSwitch indicates the presence of a
+ * finger in single touch mode.
+ */
+ input_report_key(input, BTN_TOUCH,
+ nd->tipswitch);
input_report_key(input, BTN_TOOL_DOUBLETAP,
- (nd->confidence != 0));
+ nd->tipswitch);
input_event(input, EV_ABS, ABS_X, nd->x);
input_event(input, EV_ABS, ABS_Y, nd->y);
}
@@ -193,28 +564,89 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
if (nd->mt_foot_count != 4)
break;
- /* Pen activity signal, trigger end of touch. */
+ /* Pen activity signal. */
if (nd->mt_footer[2]) {
+ /*
+ * When the pen deactivates touch, we see a
+ * bogus frame with ContactCount > 0.
+ * We can
+ * save a bit of work by ensuring act_state < 0
+ * even if deactivation slack is turned off.
+ */
+ nd->act_state = deactivate_slack - 1;
nd->confidence = 0;
break;
}
- /* If the contact was invalid */
- if (!(nd->confidence && nd->mt_footer[0])
- || nd->w <= 250
- || nd->h <= 190) {
- nd->confidence = 0;
+ /*
+ * The first footer value indicates the presence of a
+ * finger.
+ */
+ if (nd->mt_footer[0]) {
+ /*
+ * We do not want to process contacts under
+ * the size threshold, but do not want to
+ * ignore them for activation state
+ */
+ if (nd->w < nd->min_width ||
+ nd->h < nd->min_height)
+ nd->confidence = 0;
+ } else
break;
+
+ if (nd->act_state > 0) {
+ /*
+ * Contact meets the activation size threshold
+ */
+ if (nd->w >= nd->activation_width &&
+ nd->h >= nd->activation_height) {
+ if (nd->id)
+ /*
+ * first contact, activate now
+ */
+ nd->act_state = 0;
+ else {
+ /*
+ * avoid corrupting this frame
+ * but ensure next frame will
+ * be active
+ */
+ nd->act_state = 1;
+ break;
+ }
+ } else
+ /*
+ * Defer adjusting the activation state
+ * until the end of the frame.
+ */
+ break;
}
+ /* Discarding this contact */
+ if (!nd->confidence)
+ break;
+
/* emit a normal (X, Y) for the first point only */
if (nd->id == 0) {
- nd->first_contact_confidence = nd->confidence;
+ /*
+ * TipSwitch is superfluous in multitouch
+ * mode. The footer events tell us
+ * if there is a finger on the screen or
+ * not.
+ */
+ nd->first_contact_touch = nd->confidence;
input_event(input, EV_ABS, ABS_X, nd->x);
input_event(input, EV_ABS, ABS_Y, nd->y);
}
+
+ /* Emit MT events */
input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
+
+ /*
+ * Translate from height and width to size
+ * and orientation.
+ */
if (nd->w > nd->h) {
input_event(input, EV_ABS,
ABS_MT_ORIENTATION, 1);
@@ -234,41 +666,98 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
break;
case HID_DG_CONTACTCOUNT: /* End of a multitouch group */
- if (!nd->reading_mt)
+ if (!nd->reading_mt) /* Just to be sure */
break;
nd->reading_mt = 0;
- if (nd->first_contact_confidence) {
- switch (value) {
- case 0: /* for single touch devices */
- case 1:
- input_report_key(input,
- BTN_TOOL_DOUBLETAP, 1);
- break;
- case 2:
- input_report_key(input,
- BTN_TOOL_TRIPLETAP, 1);
+
+ /*
+ * Activation state machine logic:
+ *
+ * Fundamental states:
+ * state > 0: Inactive
+ * state <= 0: Active
+ * state < -deactivate_slack:
+ * Pen termination of touch
+ *
+ * Specific values of interest
+ * state == activate_slack
+ * no valid input since the last reset
+ *
+ * state == 0
+ * general operational state
+ *
+ * state == -deactivate_slack
+ * read sufficient empty frames to accept
+ * the end of input and reset
+ */
+
+ if (nd->act_state > 0) { /* Currently inactive */
+ if (value)
+ /*
+ * Consider each live contact as
+ * evidence of intentional activity.
+ */
+ nd->act_state = (nd->act_state > value)
+ ? nd->act_state - value
+ : 0;
+ else
+ /*
+ * Empty frame before we hit the
+ * activity threshold, reset.
+ */
+ nd->act_state = nd->activate_slack;
+
+ /*
+ * Entered this block inactive and no
+ * coordinates sent this frame, so hold off
+ * on button state.
+ */
+ break;
+ } else { /* Currently active */
+ if (value && nd->act_state >=
+ nd->deactivate_slack)
+ /*
+ * Live point: clear accumulated
+ * deactivation count.
+ */
+ nd->act_state = 0;
+ else if (nd->act_state <= nd->deactivate_slack)
+ /*
+ * We've consumed the deactivation
+ * slack, time to deactivate and reset.
+ */
+ nd->act_state =
+ nd->activate_slack;
+ else { /* Move towards deactivation */
+ nd->act_state--;
break;
- case 3:
- default:
- input_report_key(input,
- BTN_TOOL_QUADTAP, 1);
}
+ }
+
+ if (nd->first_contact_touch && nd->act_state <= 0) {
+ /*
+ * Check to see if we're ready to start
+ * emitting touch events.
+ *
+ * Note: activation slack will decrease over
+ * the course of the frame, and it will be
+ * inconsistent from the start to the end of
+ * the frame. However if the frame starts
+ * with slack, first_contact_touch will still
+ * be 0 and we will not get to this point.
+ */
+ input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
input_report_key(input, BTN_TOUCH, 1);
} else {
- input_report_key(input,
- BTN_TOOL_DOUBLETAP, 0);
- input_report_key(input,
- BTN_TOOL_TRIPLETAP, 0);
- input_report_key(input,
- BTN_TOOL_QUADTAP, 0);
+ input_report_key(input, BTN_TOOL_DOUBLETAP, 0);
input_report_key(input, BTN_TOUCH, 0);
}
break;
default:
- /* fallback to the generic hidinput handling */
+ /* fall-back to the generic hidinput handling */
return 0;
}
}
@@ -286,6 +775,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct ntrig_data *nd;
struct hid_input *hidinput;
struct input_dev *input;
+ struct hid_report *report;
if (id->driver_data)
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
@@ -297,6 +787,16 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
nd->reading_mt = 0;
+ nd->min_width = 0;
+ nd->min_height = 0;
+ nd->activate_slack = activate_slack;
+ nd->act_state = activate_slack;
+ nd->deactivate_slack = -deactivate_slack;
+ nd->sensor_logical_width = 0;
+ nd->sensor_logical_height = 0;
+ nd->sensor_physical_width = 0;
+ nd->sensor_physical_height = 0;
+
hid_set_drvdata(hdev, nd);
ret = hid_parse(hdev);
@@ -327,13 +827,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
__clear_bit(BTN_TOOL_PEN, input->keybit);
__clear_bit(BTN_TOOL_FINGER, input->keybit);
__clear_bit(BTN_0, input->keybit);
- /*
- * A little something special to enable
- * two and three finger taps.
- */
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
- __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
- __set_bit(BTN_TOOL_QUADTAP, input->keybit);
/*
* The physical touchscreen (single touch)
* input has a value for physical, whereas
@@ -349,6 +843,14 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
+ /* This is needed for devices with more recent firmware versions */
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a];
+ if (report)
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+
+ ret = sysfs_create_group(&hdev->dev.kobj,
+ &ntrig_attribute_group);
+
return 0;
err_free:
kfree(nd);
@@ -357,6 +859,8 @@ err_free:
static void ntrig_remove(struct hid_device *hdev)
{
+ sysfs_remove_group(&hdev->dev.kobj,
+ &ntrig_attribute_group);
hid_hw_stop(hdev);
kfree(hid_get_drvdata(hdev));
}
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
new file mode 100644
index 000000000000..95253b310541
--- /dev/null
+++ b/drivers/hid/hid-picolcd.c
@@ -0,0 +1,2631 @@
+/***************************************************************************
+ * Copyright (C) 2010 by Bruno Prémont <bonbons@linux-vserver.org> *
+ * *
+ * Based on Logitech G13 driver (v0.4) *
+ * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, version 2 of the License. *
+ * *
+ * This driver is distributed in the hope that it will be useful, but *
+ * WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this software. If not see <http://www.gnu.org/licenses/>. *
+ ***************************************************************************/
+
+#include <linux/hid.h>
+#include <linux/hid-debug.h>
+#include <linux/input.h>
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+#include <linux/usb.h>
+
+#include <linux/fb.h>
+#include <linux/vmalloc.h>
+#include <linux/backlight.h>
+#include <linux/lcd.h>
+
+#include <linux/leds.h>
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <linux/completion.h>
+#include <linux/uaccess.h>
+
+#define PICOLCD_NAME "PicoLCD (graphic)"
+
+/* Report numbers */
+#define REPORT_ERROR_CODE 0x10 /* LCD: IN[16] */
+#define ERR_SUCCESS 0x00
+#define ERR_PARAMETER_MISSING 0x01
+#define ERR_DATA_MISSING 0x02
+#define ERR_BLOCK_READ_ONLY 0x03
+#define ERR_BLOCK_NOT_ERASABLE 0x04
+#define ERR_BLOCK_TOO_BIG 0x05
+#define ERR_SECTION_OVERFLOW 0x06
+#define ERR_INVALID_CMD_LEN 0x07
+#define ERR_INVALID_DATA_LEN 0x08
+#define REPORT_KEY_STATE 0x11 /* LCD: IN[2] */
+#define REPORT_IR_DATA 0x21 /* LCD: IN[63] */
+#define REPORT_EE_DATA 0x32 /* LCD: IN[63] */
+#define REPORT_MEMORY 0x41 /* LCD: IN[63] */
+#define REPORT_LED_STATE 0x81 /* LCD: OUT[1] */
+#define REPORT_BRIGHTNESS 0x91 /* LCD: OUT[1] */
+#define REPORT_CONTRAST 0x92 /* LCD: OUT[1] */
+#define REPORT_RESET 0x93 /* LCD: OUT[2] */
+#define REPORT_LCD_CMD 0x94 /* LCD: OUT[63] */
+#define REPORT_LCD_DATA 0x95 /* LCD: OUT[63] */
+#define REPORT_LCD_CMD_DATA 0x96 /* LCD: OUT[63] */
+#define REPORT_EE_READ 0xa3 /* LCD: OUT[63] */
+#define REPORT_EE_WRITE 0xa4 /* LCD: OUT[63] */
+#define REPORT_ERASE_MEMORY 0xb2 /* LCD: OUT[2] */
+#define REPORT_READ_MEMORY 0xb3 /* LCD: OUT[3] */
+#define REPORT_WRITE_MEMORY 0xb4 /* LCD: OUT[63] */
+#define REPORT_SPLASH_RESTART 0xc1 /* LCD: OUT[1] */
+#define REPORT_EXIT_KEYBOARD 0xef /* LCD: OUT[2] */
+#define REPORT_VERSION 0xf1 /* LCD: IN[2],OUT[1] Bootloader: IN[2],OUT[1] */
+#define REPORT_BL_ERASE_MEMORY 0xf2 /* Bootloader: IN[36],OUT[4] */
+#define REPORT_BL_READ_MEMORY 0xf3 /* Bootloader: IN[36],OUT[4] */
+#define REPORT_BL_WRITE_MEMORY 0xf4 /* Bootloader: IN[36],OUT[36] */
+#define REPORT_DEVID 0xf5 /* LCD: IN[5], OUT[1] Bootloader: IN[5],OUT[1] */
+#define REPORT_SPLASH_SIZE 0xf6 /* LCD: IN[4], OUT[1] */
+#define REPORT_HOOK_VERSION 0xf7 /* LCD: IN[2], OUT[1] */
+#define REPORT_EXIT_FLASHER 0xff /* Bootloader: OUT[2] */
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Framebuffer
+ *
+ * The PicoLCD use a Topway LCD module of 256x64 pixel
+ * This display area is tiled over 4 controllers with 8 tiles
+ * each. Each tile has 8x64 pixel, each data byte representing
+ * a 1-bit wide vertical line of the tile.
+ *
+ * The display can be updated at a tile granularity.
+ *
+ * Chip 1 Chip 2 Chip 3 Chip 4
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 1 | Tile 1 | Tile 1 | Tile 1 |
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 2 | Tile 2 | Tile 2 | Tile 2 |
+ * +----------------+----------------+----------------+----------------+
+ * ...
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 8 | Tile 8 | Tile 8 | Tile 8 |
+ * +----------------+----------------+----------------+----------------+
+ */
+#define PICOLCDFB_NAME "picolcdfb"
+#define PICOLCDFB_WIDTH (256)
+#define PICOLCDFB_HEIGHT (64)
+#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8)
+
+#define PICOLCDFB_UPDATE_RATE_LIMIT 10
+#define PICOLCDFB_UPDATE_RATE_DEFAULT 2
+
+/* Framebuffer visual structures */
+static const struct fb_fix_screeninfo picolcdfb_fix = {
+ .id = PICOLCDFB_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO01,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = PICOLCDFB_WIDTH / 8,
+ .accel = FB_ACCEL_NONE,
+};
+
+static const struct fb_var_screeninfo picolcdfb_var = {
+ .xres = PICOLCDFB_WIDTH,
+ .yres = PICOLCDFB_HEIGHT,
+ .xres_virtual = PICOLCDFB_WIDTH,
+ .yres_virtual = PICOLCDFB_HEIGHT,
+ .width = 103,
+ .height = 26,
+ .bits_per_pixel = 1,
+ .grayscale = 1,
+};
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+/* Input device
+ *
+ * The PicoLCD has an IR receiver header, a built-in keypad with 5 keys
+ * and header for 4x4 key matrix. The built-in keys are part of the matrix.
+ */
+static const unsigned short def_keymap[] = {
+ KEY_RESERVED, /* none */
+ KEY_BACK, /* col 4 + row 1 */
+ KEY_HOMEPAGE, /* col 3 + row 1 */
+ KEY_RESERVED, /* col 2 + row 1 */
+ KEY_RESERVED, /* col 1 + row 1 */
+ KEY_SCROLLUP, /* col 4 + row 2 */
+ KEY_OK, /* col 3 + row 2 */
+ KEY_SCROLLDOWN, /* col 2 + row 2 */
+ KEY_RESERVED, /* col 1 + row 2 */
+ KEY_RESERVED, /* col 4 + row 3 */
+ KEY_RESERVED, /* col 3 + row 3 */
+ KEY_RESERVED, /* col 2 + row 3 */
+ KEY_RESERVED, /* col 1 + row 3 */
+ KEY_RESERVED, /* col 4 + row 4 */
+ KEY_RESERVED, /* col 3 + row 4 */
+ KEY_RESERVED, /* col 2 + row 4 */
+ KEY_RESERVED, /* col 1 + row 4 */
+};
+#define PICOLCD_KEYS ARRAY_SIZE(def_keymap)
+
+/* Description of in-progress IO operation, used for operations
+ * that trigger response from device */
+struct picolcd_pending {
+ struct hid_report *out_report;
+ struct hid_report *in_report;
+ struct completion ready;
+ int raw_size;
+ u8 raw_data[64];
+};
+
+/* Per device data structure */
+struct picolcd_data {
+ struct hid_device *hdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_reset;
+ struct dentry *debug_eeprom;
+ struct dentry *debug_flash;
+ struct mutex mutex_flash;
+ int addr_sz;
+#endif
+ u8 version[2];
+ unsigned short opmode_delay;
+ /* input stuff */
+ u8 pressed_keys[2];
+ struct input_dev *input_keys;
+ struct input_dev *input_cir;
+ unsigned short keycode[PICOLCD_KEYS];
+
+#ifdef CONFIG_HID_PICOLCD_FB
+ /* Framebuffer stuff */
+ u8 fb_update_rate;
+ u8 fb_bpp;
+ u8 *fb_vbitmap; /* local copy of what was sent to PicoLCD */
+ u8 *fb_bitmap; /* framebuffer */
+ struct fb_info *fb_info;
+ struct fb_deferred_io fb_defio;
+#endif /* CONFIG_HID_PICOLCD_FB */
+#ifdef CONFIG_HID_PICOLCD_LCD
+ struct lcd_device *lcd;
+ u8 lcd_contrast;
+#endif /* CONFIG_HID_PICOLCD_LCD */
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+ struct backlight_device *backlight;
+ u8 lcd_brightness;
+ u8 lcd_power;
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+#ifdef CONFIG_HID_PICOLCD_LEDS
+ /* LED stuff */
+ u8 led_state;
+ struct led_classdev *led[8];
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+ /* Housekeeping stuff */
+ spinlock_t lock;
+ struct mutex mutex;
+ struct picolcd_pending *pending;
+ int status;
+#define PICOLCD_BOOTLOADER 1
+#define PICOLCD_FAILED 2
+#define PICOLCD_READY_FB 4
+};
+
+
+/* Find a given report */
+#define picolcd_in_report(id, dev) picolcd_report(id, dev, HID_INPUT_REPORT)
+#define picolcd_out_report(id, dev) picolcd_report(id, dev, HID_OUTPUT_REPORT)
+
+static struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir)
+{
+ struct list_head *feature_report_list = &hdev->report_enum[dir].report_list;
+ struct hid_report *report = NULL;
+
+ list_for_each_entry(report, feature_report_list, list) {
+ if (report->id == id)
+ return report;
+ }
+ dev_warn(&hdev->dev, "No report with id 0x%x found\n", id);
+ return NULL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void picolcd_debug_out_report(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report);
+#define usbhid_submit_report(a, b, c) \
+ do { \
+ picolcd_debug_out_report(hid_get_drvdata(a), a, b); \
+ usbhid_submit_report(a, b, c); \
+ } while (0)
+#endif
+
+/* Submit a report and wait for a reply from device - if device fades away
+ * or does not respond in time, return NULL */
+static struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev,
+ int report_id, const u8 *raw_data, int size)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct picolcd_pending *work;
+ struct hid_report *report = picolcd_out_report(report_id, hdev);
+ unsigned long flags;
+ int i, j, k;
+
+ if (!report || !data)
+ return NULL;
+ if (data->status & PICOLCD_FAILED)
+ return NULL;
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return NULL;
+
+ init_completion(&work->ready);
+ work->out_report = report;
+ work->in_report = NULL;
+ work->raw_size = 0;
+
+ mutex_lock(&data->mutex);
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = k = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ hid_set_field(report->field[i], j, k < size ? raw_data[k] : 0);
+ k++;
+ }
+ data->pending = work;
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ wait_for_completion_interruptible_timeout(&work->ready, HZ*2);
+ spin_lock_irqsave(&data->lock, flags);
+ data->pending = NULL;
+ spin_unlock_irqrestore(&data->lock, flags);
+ mutex_unlock(&data->mutex);
+ return work;
+}
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Send a given tile to PicoLCD */
+static int picolcd_fb_send_tile(struct hid_device *hdev, int chip, int tile)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, hdev);
+ struct hid_report *report2 = picolcd_out_report(REPORT_LCD_DATA, hdev);
+ unsigned long flags;
+ u8 *tdata;
+ int i;
+
+ if (!report1 || report1->maxfield != 1 || !report2 || report2->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report1->field[0], 0, chip << 2);
+ hid_set_field(report1->field[0], 1, 0x02);
+ hid_set_field(report1->field[0], 2, 0x00);
+ hid_set_field(report1->field[0], 3, 0x00);
+ hid_set_field(report1->field[0], 4, 0xb8 | tile);
+ hid_set_field(report1->field[0], 5, 0x00);
+ hid_set_field(report1->field[0], 6, 0x00);
+ hid_set_field(report1->field[0], 7, 0x40);
+ hid_set_field(report1->field[0], 8, 0x00);
+ hid_set_field(report1->field[0], 9, 0x00);
+ hid_set_field(report1->field[0], 10, 32);
+
+ hid_set_field(report2->field[0], 0, (chip << 2) | 0x01);
+ hid_set_field(report2->field[0], 1, 0x00);
+ hid_set_field(report2->field[0], 2, 0x00);
+ hid_set_field(report2->field[0], 3, 32);
+
+ tdata = data->fb_vbitmap + (tile * 4 + chip) * 64;
+ for (i = 0; i < 64; i++)
+ if (i < 32)
+ hid_set_field(report1->field[0], 11 + i, tdata[i]);
+ else
+ hid_set_field(report2->field[0], 4 + i - 32, tdata[i]);
+
+ usbhid_submit_report(data->hdev, report1, USB_DIR_OUT);
+ usbhid_submit_report(data->hdev, report2, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+/* Translate a single tile*/
+static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp,
+ int chip, int tile)
+{
+ int i, b, changed = 0;
+ u8 tdata[64];
+ u8 *vdata = vbitmap + (tile * 4 + chip) * 64;
+
+ if (bpp == 1) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i/8] >> (7 - i % 8)) & 0x01;
+ }
+ }
+ } else if (bpp == 8) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00;
+ }
+ }
+ } else {
+ /* Oops, we should never get here! */
+ WARN_ON(1);
+ return 0;
+ }
+
+ for (i = 0; i < 64; i++)
+ if (tdata[i] != vdata[i]) {
+ changed = 1;
+ vdata[i] = tdata[i];
+ }
+ return changed;
+}
+
+/* Reconfigure LCD display */
+static int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev);
+ int i, j;
+ unsigned long flags;
+ static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 };
+
+ if (!report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < report->field[0]->maxusage; j++)
+ if (j == 0)
+ hid_set_field(report->field[0], j, i << 2);
+ else if (j < sizeof(mapcmd))
+ hid_set_field(report->field[0], j, mapcmd[j]);
+ else
+ hid_set_field(report->field[0], j, 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ }
+
+ data->status |= PICOLCD_READY_FB;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (data->fb_bitmap) {
+ if (clear) {
+ memset(data->fb_vbitmap, 0xff, PICOLCDFB_SIZE);
+ memset(data->fb_bitmap, 0, PICOLCDFB_SIZE*data->fb_bpp);
+ } else {
+ /* invert 1 byte in each tile to force resend */
+ for (i = 0; i < PICOLCDFB_SIZE; i += 64)
+ data->fb_vbitmap[i] = ~data->fb_vbitmap[i];
+ }
+ }
+
+ /* schedule first output of framebuffer */
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+
+ return 0;
+}
+
+/* Update fb_vbitmap from the screen_base and send changed tiles to device */
+static void picolcd_fb_update(struct picolcd_data *data)
+{
+ int chip, tile, n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (!(data->status & PICOLCD_READY_FB)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ picolcd_fb_reset(data, 0);
+ } else {
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ /*
+ * Translate the framebuffer into the format needed by the PicoLCD.
+ * See display layout above.
+ * Do this one tile after the other and push those tiles that changed.
+ *
+ * Wait for our IO to complete as otherwise we might flood the queue!
+ */
+ n = 0;
+ for (chip = 0; chip < 4; chip++)
+ for (tile = 0; tile < 8; tile++)
+ if (picolcd_fb_update_tile(data->fb_vbitmap,
+ data->fb_bitmap, data->fb_bpp, chip, tile)) {
+ n += 2;
+ if (n >= HID_OUTPUT_FIFO_SIZE / 2) {
+ usbhid_wait_io(data->hdev);
+ n = 0;
+ }
+ picolcd_fb_send_tile(data->hdev, chip, tile);
+ }
+ if (n)
+ usbhid_wait_io(data->hdev);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ if (!info->par)
+ return;
+ sys_fillrect(info, rect);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ if (!info->par)
+ return;
+ sys_copyarea(info, area);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ if (!info->par)
+ return;
+ sys_imageblit(info, image);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ if (!info->par)
+ return -ENODEV;
+ ret = fb_sys_write(info, buf, count, ppos);
+ if (ret >= 0)
+ schedule_delayed_work(&info->deferred_work, 0);
+ return ret;
+}
+
+static int picolcd_fb_blank(int blank, struct fb_info *info)
+{
+ if (!info->par)
+ return -ENODEV;
+ /* We let fb notification do this for us via lcd/backlight device */
+ return 0;
+}
+
+static void picolcd_fb_destroy(struct fb_info *info)
+{
+ struct picolcd_data *data = info->par;
+ info->par = NULL;
+ if (data)
+ data->fb_info = NULL;
+ fb_deferred_io_cleanup(info);
+ framebuffer_release(info);
+}
+
+static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ __u32 bpp = var->bits_per_pixel;
+ __u32 activate = var->activate;
+
+ /* only allow 1/8 bit depth (8-bit is grayscale) */
+ *var = picolcdfb_var;
+ var->activate = activate;
+ if (bpp >= 8)
+ var->bits_per_pixel = 8;
+ else
+ var->bits_per_pixel = 1;
+ return 0;
+}
+
+static int picolcd_set_par(struct fb_info *info)
+{
+ struct picolcd_data *data = info->par;
+ u8 *o_fb, *n_fb;
+ if (info->var.bits_per_pixel == data->fb_bpp)
+ return 0;
+ /* switch between 1/8 bit depths */
+ if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8)
+ return -EINVAL;
+
+ o_fb = data->fb_bitmap;
+ n_fb = vmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel);
+ if (!n_fb)
+ return -ENOMEM;
+
+ fb_deferred_io_cleanup(info);
+ /* translate FB content to new bits-per-pixel */
+ if (info->var.bits_per_pixel == 1) {
+ int i, b;
+ for (i = 0; i < PICOLCDFB_SIZE; i++) {
+ u8 p = 0;
+ for (b = 0; b < 8; b++) {
+ p <<= 1;
+ p |= o_fb[i*8+b] ? 0x01 : 0x00;
+ }
+ }
+ info->fix.visual = FB_VISUAL_MONO01;
+ info->fix.line_length = PICOLCDFB_WIDTH / 8;
+ } else {
+ int i;
+ for (i = 0; i < PICOLCDFB_SIZE * 8; i++)
+ n_fb[i] = o_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = PICOLCDFB_WIDTH;
+ }
+
+ data->fb_bitmap = n_fb;
+ data->fb_bpp = info->var.bits_per_pixel;
+ info->screen_base = (char __force __iomem *)n_fb;
+ info->fix.smem_start = (unsigned long)n_fb;
+ info->fix.smem_len = PICOLCDFB_SIZE*data->fb_bpp;
+ fb_deferred_io_init(info);
+ vfree(o_fb);
+ return 0;
+}
+
+/* Note this can't be const because of struct fb_info definition */
+static struct fb_ops picolcdfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_destroy = picolcd_fb_destroy,
+ .fb_read = fb_sys_read,
+ .fb_write = picolcd_fb_write,
+ .fb_blank = picolcd_fb_blank,
+ .fb_fillrect = picolcd_fb_fillrect,
+ .fb_copyarea = picolcd_fb_copyarea,
+ .fb_imageblit = picolcd_fb_imageblit,
+ .fb_check_var = picolcd_fb_check_var,
+ .fb_set_par = picolcd_set_par,
+};
+
+
+/* Callback from deferred IO workqueue */
+static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ picolcd_fb_update(info->par);
+}
+
+static const struct fb_deferred_io picolcd_fb_defio = {
+ .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT,
+ .deferred_io = picolcd_fb_deferred_io,
+};
+
+
+/*
+ * The "fb_update_rate" sysfs attribute
+ */
+static ssize_t picolcd_fb_update_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ unsigned i, fb_update_rate = data->fb_update_rate;
+ size_t ret = 0;
+
+ for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++)
+ if (ret >= PAGE_SIZE)
+ break;
+ else if (i == fb_update_rate)
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+ else
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+ if (ret > 0)
+ buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
+ return ret;
+}
+
+static ssize_t picolcd_fb_update_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ int i;
+ unsigned u;
+
+ if (count < 1 || count > 10)
+ return -EINVAL;
+
+ i = sscanf(buf, "%u", &u);
+ if (i != 1)
+ return -EINVAL;
+
+ if (u > PICOLCDFB_UPDATE_RATE_LIMIT)
+ return -ERANGE;
+ else if (u == 0)
+ u = PICOLCDFB_UPDATE_RATE_DEFAULT;
+
+ data->fb_update_rate = u;
+ data->fb_defio.delay = HZ / data->fb_update_rate;
+ return count;
+}
+
+static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show,
+ picolcd_fb_update_rate_store);
+
+/* initialize Framebuffer device */
+static int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ struct device *dev = &data->hdev->dev;
+ struct fb_info *info = NULL;
+ int error = -ENOMEM;
+ u8 *fb_vbitmap = NULL;
+ u8 *fb_bitmap = NULL;
+
+ fb_bitmap = vmalloc(PICOLCDFB_SIZE*picolcdfb_var.bits_per_pixel);
+ if (fb_bitmap == NULL) {
+ dev_err(dev, "can't get a free page for framebuffer\n");
+ goto err_nomem;
+ }
+
+ fb_vbitmap = kmalloc(PICOLCDFB_SIZE, GFP_KERNEL);
+ if (fb_vbitmap == NULL) {
+ dev_err(dev, "can't alloc vbitmap image buffer\n");
+ goto err_nomem;
+ }
+
+ data->fb_update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT;
+ data->fb_defio = picolcd_fb_defio;
+ info = framebuffer_alloc(0, dev);
+ if (info == NULL) {
+ dev_err(dev, "failed to allocate a framebuffer\n");
+ goto err_nomem;
+ }
+
+ info->fbdefio = &data->fb_defio;
+ info->screen_base = (char __force __iomem *)fb_bitmap;
+ info->fbops = &picolcdfb_ops;
+ info->var = picolcdfb_var;
+ info->fix = picolcdfb_fix;
+ info->fix.smem_len = PICOLCDFB_SIZE;
+ info->fix.smem_start = (unsigned long)fb_bitmap;
+ info->par = data;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ data->fb_vbitmap = fb_vbitmap;
+ data->fb_bitmap = fb_bitmap;
+ data->fb_bpp = picolcdfb_var.bits_per_pixel;
+ error = picolcd_fb_reset(data, 1);
+ if (error) {
+ dev_err(dev, "failed to configure display\n");
+ goto err_cleanup;
+ }
+ error = device_create_file(dev, &dev_attr_fb_update_rate);
+ if (error) {
+ dev_err(dev, "failed to create sysfs attributes\n");
+ goto err_cleanup;
+ }
+ data->fb_info = info;
+ error = register_framebuffer(info);
+ if (error) {
+ dev_err(dev, "failed to register framebuffer\n");
+ goto err_sysfs;
+ }
+ fb_deferred_io_init(info);
+ /* schedule first output of framebuffer */
+ schedule_delayed_work(&info->deferred_work, 0);
+ return 0;
+
+err_sysfs:
+ device_remove_file(dev, &dev_attr_fb_update_rate);
+err_cleanup:
+ data->fb_vbitmap = NULL;
+ data->fb_bitmap = NULL;
+ data->fb_bpp = 0;
+ data->fb_info = NULL;
+
+err_nomem:
+ framebuffer_release(info);
+ vfree(fb_bitmap);
+ kfree(fb_vbitmap);
+ return error;
+}
+
+static void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+ struct fb_info *info = data->fb_info;
+ u8 *fb_vbitmap = data->fb_vbitmap;
+ u8 *fb_bitmap = data->fb_bitmap;
+
+ if (!info)
+ return;
+
+ data->fb_vbitmap = NULL;
+ data->fb_bitmap = NULL;
+ data->fb_bpp = 0;
+ data->fb_info = NULL;
+ device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
+ fb_deferred_io_cleanup(info);
+ unregister_framebuffer(info);
+ vfree(fb_bitmap);
+ kfree(fb_vbitmap);
+}
+
+#define picolcd_fbinfo(d) ((d)->fb_info)
+#else
+static inline int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ return 0;
+}
+static inline int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ return 0;
+}
+static inline void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+}
+#define picolcd_fbinfo(d) NULL
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+/*
+ * backlight class device
+ */
+static int picolcd_get_brightness(struct backlight_device *bdev)
+{
+ struct picolcd_data *data = bl_get_data(bdev);
+ return data->lcd_brightness;
+}
+
+static int picolcd_set_brightness(struct backlight_device *bdev)
+{
+ struct picolcd_data *data = bl_get_data(bdev);
+ struct hid_report *report = picolcd_out_report(REPORT_BRIGHTNESS, data->hdev);
+ unsigned long flags;
+
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return -ENODEV;
+
+ data->lcd_brightness = bdev->props.brightness & 0x0ff;
+ data->lcd_power = bdev->props.power;
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb)
+{
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev));
+}
+
+static const struct backlight_ops picolcd_blops = {
+ .update_status = picolcd_set_brightness,
+ .get_brightness = picolcd_get_brightness,
+ .check_fb = picolcd_check_bl_fb,
+};
+
+static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct backlight_device *bdev;
+ struct backlight_properties props;
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported BRIGHTNESS report");
+ return -EINVAL;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = 0xff;
+ bdev = backlight_device_register(dev_name(dev), dev, data,
+ &picolcd_blops, &props);
+ if (IS_ERR(bdev)) {
+ dev_err(dev, "failed to register backlight\n");
+ return PTR_ERR(bdev);
+ }
+ bdev->props.brightness = 0xff;
+ data->lcd_brightness = 0xff;
+ data->backlight = bdev;
+ picolcd_set_brightness(bdev);
+ return 0;
+}
+
+static void picolcd_exit_backlight(struct picolcd_data *data)
+{
+ struct backlight_device *bdev = data->backlight;
+
+ data->backlight = NULL;
+ if (bdev)
+ backlight_device_unregister(bdev);
+}
+
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+ if (!data->backlight)
+ return 0;
+ return picolcd_set_brightness(data->backlight);
+}
+
+#ifdef CONFIG_PM
+static void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+ int bl_power = data->lcd_power;
+ if (!data->backlight)
+ return;
+
+ data->backlight->props.power = FB_BLANK_POWERDOWN;
+ picolcd_set_brightness(data->backlight);
+ data->lcd_power = data->backlight->props.power = bl_power;
+}
+#endif /* CONFIG_PM */
+#else
+static inline int picolcd_init_backlight(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_backlight(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+ return 0;
+}
+static inline void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+
+#ifdef CONFIG_HID_PICOLCD_LCD
+/*
+ * lcd class device
+ */
+static int picolcd_get_contrast(struct lcd_device *ldev)
+{
+ struct picolcd_data *data = lcd_get_data(ldev);
+ return data->lcd_contrast;
+}
+
+static int picolcd_set_contrast(struct lcd_device *ldev, int contrast)
+{
+ struct picolcd_data *data = lcd_get_data(ldev);
+ struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev);
+ unsigned long flags;
+
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return -ENODEV;
+
+ data->lcd_contrast = contrast & 0x0ff;
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->lcd_contrast);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb)
+{
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
+}
+
+static struct lcd_ops picolcd_lcdops = {
+ .get_contrast = picolcd_get_contrast,
+ .set_contrast = picolcd_set_contrast,
+ .check_fb = picolcd_check_lcd_fb,
+};
+
+static int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct lcd_device *ldev;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported CONTRAST report");
+ return -EINVAL;
+ }
+
+ ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops);
+ if (IS_ERR(ldev)) {
+ dev_err(dev, "failed to register LCD\n");
+ return PTR_ERR(ldev);
+ }
+ ldev->props.max_contrast = 0x0ff;
+ data->lcd_contrast = 0xe5;
+ data->lcd = ldev;
+ picolcd_set_contrast(ldev, 0xe5);
+ return 0;
+}
+
+static void picolcd_exit_lcd(struct picolcd_data *data)
+{
+ struct lcd_device *ldev = data->lcd;
+
+ data->lcd = NULL;
+ if (ldev)
+ lcd_device_unregister(ldev);
+}
+
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+ if (!data->lcd)
+ return 0;
+ return picolcd_set_contrast(data->lcd, data->lcd_contrast);
+}
+#else
+static inline int picolcd_init_lcd(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_lcd(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LCD */
+
+#ifdef CONFIG_HID_PICOLCD_LEDS
+/**
+ * LED class device
+ */
+static void picolcd_leds_set(struct picolcd_data *data)
+{
+ struct hid_report *report;
+ unsigned long flags;
+
+ if (!data->led[0])
+ return;
+ report = picolcd_out_report(REPORT_LED_STATE, data->hdev);
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->led_state);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void picolcd_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct picolcd_data *data;
+ int i, state = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data = hid_get_drvdata(hdev);
+ for (i = 0; i < 8; i++) {
+ if (led_cdev != data->led[i])
+ continue;
+ state = (data->led_state >> i) & 1;
+ if (value == LED_OFF && state) {
+ data->led_state &= ~(1 << i);
+ picolcd_leds_set(data);
+ } else if (value != LED_OFF && !state) {
+ data->led_state |= 1 << i;
+ picolcd_leds_set(data);
+ }
+ break;
+ }
+}
+
+static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct picolcd_data *data;
+ int i, value = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data = hid_get_drvdata(hdev);
+ for (i = 0; i < 8; i++)
+ if (led_cdev == data->led[i]) {
+ value = (data->led_state >> i) & 1;
+ break;
+ }
+ return value ? LED_FULL : LED_OFF;
+}
+
+static int picolcd_init_leds(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct led_classdev *led;
+ size_t name_sz = strlen(dev_name(dev)) + 8;
+ char *name;
+ int i, ret = 0;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported LED_STATE report");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 8; i++) {
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ dev_err(dev, "can't allocate memory for LED %d\n", i);
+ ret = -ENOMEM;
+ goto err;
+ }
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "%s::GPO%d", dev_name(dev), i);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = picolcd_led_get_brightness;
+ led->brightness_set = picolcd_led_set_brightness;
+
+ data->led[i] = led;
+ ret = led_classdev_register(dev, data->led[i]);
+ if (ret) {
+ data->led[i] = NULL;
+ kfree(led);
+ dev_err(dev, "can't register LED %d\n", i);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ for (i = 0; i < 8; i++)
+ if (data->led[i]) {
+ led = data->led[i];
+ data->led[i] = NULL;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+ return ret;
+}
+
+static void picolcd_exit_leds(struct picolcd_data *data)
+{
+ struct led_classdev *led;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ led = data->led[i];
+ data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+}
+
+#else
+static inline int picolcd_init_leds(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_leds(struct picolcd_data *data)
+{
+}
+static inline int picolcd_leds_set(struct picolcd_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+/*
+ * input class device
+ */
+static int picolcd_raw_keypad(struct picolcd_data *data,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ /*
+ * Keypad event
+ * First and second data bytes list currently pressed keys,
+ * 0x00 means no key and at most 2 keys may be pressed at same time
+ */
+ int i, j;
+
+ /* determine newly pressed keys */
+ for (i = 0; i < size; i++) {
+ unsigned int key_code;
+ if (raw_data[i] == 0)
+ continue;
+ for (j = 0; j < sizeof(data->pressed_keys); j++)
+ if (data->pressed_keys[j] == raw_data[i])
+ goto key_already_down;
+ for (j = 0; j < sizeof(data->pressed_keys); j++)
+ if (data->pressed_keys[j] == 0) {
+ data->pressed_keys[j] = raw_data[i];
+ break;
+ }
+ input_event(data->input_keys, EV_MSC, MSC_SCAN, raw_data[i]);
+ if (raw_data[i] < PICOLCD_KEYS)
+ key_code = data->keycode[raw_data[i]];
+ else
+ key_code = KEY_UNKNOWN;
+ if (key_code != KEY_UNKNOWN) {
+ dbg_hid(PICOLCD_NAME " got key press for %u:%d",
+ raw_data[i], key_code);
+ input_report_key(data->input_keys, key_code, 1);
+ }
+ input_sync(data->input_keys);
+key_already_down:
+ continue;
+ }
+
+ /* determine newly released keys */
+ for (j = 0; j < sizeof(data->pressed_keys); j++) {
+ unsigned int key_code;
+ if (data->pressed_keys[j] == 0)
+ continue;
+ for (i = 0; i < size; i++)
+ if (data->pressed_keys[j] == raw_data[i])
+ goto key_still_down;
+ input_event(data->input_keys, EV_MSC, MSC_SCAN, data->pressed_keys[j]);
+ if (data->pressed_keys[j] < PICOLCD_KEYS)
+ key_code = data->keycode[data->pressed_keys[j]];
+ else
+ key_code = KEY_UNKNOWN;
+ if (key_code != KEY_UNKNOWN) {
+ dbg_hid(PICOLCD_NAME " got key release for %u:%d",
+ data->pressed_keys[j], key_code);
+ input_report_key(data->input_keys, key_code, 0);
+ }
+ input_sync(data->input_keys);
+ data->pressed_keys[j] = 0;
+key_still_down:
+ continue;
+ }
+ return 1;
+}
+
+static int picolcd_raw_cir(struct picolcd_data *data,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ /* Need understanding of CIR data format to implement ... */
+ return 1;
+}
+
+static int picolcd_check_version(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct picolcd_pending *verinfo;
+ int ret = 0;
+
+ if (!data)
+ return -ENODEV;
+
+ verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0);
+ if (!verinfo) {
+ dev_err(&hdev->dev, "no version response from PicoLCD");
+ return -ENODEV;
+ }
+
+ if (verinfo->raw_size == 2) {
+ data->version[0] = verinfo->raw_data[1];
+ data->version[1] = verinfo->raw_data[0];
+ if (data->status & PICOLCD_BOOTLOADER) {
+ dev_info(&hdev->dev, "PicoLCD, bootloader version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
+ } else {
+ dev_info(&hdev->dev, "PicoLCD, firmware version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
+ }
+ } else {
+ dev_err(&hdev->dev, "confused, got unexpected version response from PicoLCD\n");
+ ret = -EINVAL;
+ }
+ kfree(verinfo);
+ return ret;
+}
+
+/*
+ * Reset our device and wait for answer to VERSION request
+ */
+static int picolcd_reset(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report = picolcd_out_report(REPORT_RESET, hdev);
+ unsigned long flags;
+ int error;
+
+ if (!data || !report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+ data->status |= PICOLCD_BOOTLOADER;
+
+ /* perform the reset */
+ hid_set_field(report->field[0], 0, 1);
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ picolcd_resume_lcd(data);
+ picolcd_resume_backlight(data);
+#ifdef CONFIG_HID_PICOLCD_FB
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+ picolcd_leds_set(data);
+ return 0;
+}
+
+/*
+ * The "operation_mode" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n");
+}
+
+static ssize_t picolcd_operation_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ struct hid_report *report = NULL;
+ size_t cnt = count;
+ int timeout = data->opmode_delay;
+ unsigned long flags;
+
+ if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
+ if (data->status & PICOLCD_BOOTLOADER)
+ report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
+ buf += 3;
+ cnt -= 3;
+ } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
+ if (!(data->status & PICOLCD_BOOTLOADER))
+ report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
+ buf += 10;
+ cnt -= 10;
+ }
+ if (!report)
+ return -EINVAL;
+
+ while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
+ cnt--;
+ if (cnt != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, timeout & 0xff);
+ hid_set_field(report->field[0], 1, (timeout >> 8) & 0xff);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return count;
+}
+
+static DEVICE_ATTR(operation_mode, 0644, picolcd_operation_mode_show,
+ picolcd_operation_mode_store);
+
+/*
+ * The "operation_mode_delay" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay);
+}
+
+static ssize_t picolcd_operation_mode_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ unsigned u;
+ if (sscanf(buf, "%u", &u) != 1)
+ return -EINVAL;
+ if (u > 30000)
+ return -EINVAL;
+ else
+ data->opmode_delay = u;
+ return count;
+}
+
+static DEVICE_ATTR(operation_mode_delay, 0644, picolcd_operation_mode_delay_show,
+ picolcd_operation_mode_delay_store);
+
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * The "reset" file
+ */
+static int picolcd_debug_reset_show(struct seq_file *f, void *p)
+{
+ if (picolcd_fbinfo((struct picolcd_data *)f->private))
+ seq_printf(f, "all fb\n");
+ else
+ seq_printf(f, "all\n");
+ return 0;
+}
+
+static int picolcd_debug_reset_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, picolcd_debug_reset_show, inode->i_private);
+}
+
+static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct picolcd_data *data = ((struct seq_file *)f->private_data)->private;
+ char buf[32];
+ size_t cnt = min(count, sizeof(buf)-1);
+ if (copy_from_user(buf, user_buf, cnt))
+ return -EFAULT;
+
+ while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n'))
+ cnt--;
+ buf[cnt] = '\0';
+ if (strcmp(buf, "all") == 0) {
+ picolcd_reset(data->hdev);
+ picolcd_fb_reset(data, 1);
+ } else if (strcmp(buf, "fb") == 0) {
+ picolcd_fb_reset(data, 1);
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static const struct file_operations picolcd_debug_reset_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_reset_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = picolcd_debug_reset_write,
+ .release = single_release,
+};
+
+/*
+ * The "eeprom" file
+ */
+static int picolcd_debug_eeprom_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ struct picolcd_pending *resp;
+ u8 raw_data[3];
+ ssize_t ret = -EIO;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x0ff)
+ return 0;
+
+ /* prepare buffer with info about what we want to read (addr & len) */
+ raw_data[0] = *off & 0xff;
+ raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[2] = s < 20 ? s : 20;
+ if (*off + raw_data[2] > 0xff)
+ raw_data[2] = 0x100 - *off;
+ resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data,
+ sizeof(raw_data));
+ if (!resp)
+ return -EIO;
+
+ if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+ /* successful read :) */
+ ret = resp->raw_data[2];
+ if (ret > s)
+ ret = s;
+ if (copy_to_user(u, resp->raw_data+3, ret))
+ ret = -EFAULT;
+ else
+ *off += ret;
+ } /* anything else is some kind of IO error */
+
+ kfree(resp);
+ return ret;
+}
+
+static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ struct picolcd_pending *resp;
+ ssize_t ret = -EIO;
+ u8 raw_data[23];
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x0ff)
+ return -ENOSPC;
+
+ memset(raw_data, 0, sizeof(raw_data));
+ raw_data[0] = *off & 0xff;
+ raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[2] = s < 20 ? s : 20;
+ if (*off + raw_data[2] > 0xff)
+ raw_data[2] = 0x100 - *off;
+
+ if (copy_from_user(raw_data+3, u, raw_data[2]))
+ return -EFAULT;
+ resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data,
+ sizeof(raw_data));
+
+ if (!resp)
+ return -EIO;
+
+ if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+ /* check if written data matches */
+ if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) {
+ *off += raw_data[2];
+ ret = raw_data[2];
+ }
+ }
+ kfree(resp);
+ return ret;
+}
+
+/*
+ * Notes:
+ * - read/write happens in chunks of at most 20 bytes, it's up to userspace
+ * to loop in order to get more data.
+ * - on write errors on otherwise correct write request the bytes
+ * that should have been written are in undefined state.
+ */
+static const struct file_operations picolcd_debug_eeprom_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_eeprom_open,
+ .read = picolcd_debug_eeprom_read,
+ .write = picolcd_debug_eeprom_write,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * The "flash" file
+ */
+static int picolcd_debug_flash_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+/* record a flash address to buf (bounds check to be done by caller) */
+static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off)
+{
+ buf[0] = off & 0xff;
+ buf[1] = (off >> 8) & 0xff;
+ if (data->addr_sz == 3)
+ buf[2] = (off >> 16) & 0xff;
+ return data->addr_sz == 2 ? 2 : 3;
+}
+
+/* read a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id,
+ char __user *u, size_t s, loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[4];
+ ssize_t ret = 0;
+ int len_off, err = -EIO;
+
+ while (s > 0) {
+ err = -EIO;
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ raw_data[len_off] = s > 32 ? 32 : s;
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_READ_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off+1) != 0)
+ goto skip;
+ if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) {
+ err = -EFAULT;
+ goto skip;
+ }
+ *off += raw_data[len_off];
+ s -= raw_data[len_off];
+ ret += raw_data[len_off];
+ err = 0;
+ }
+skip:
+ kfree(resp);
+ if (err)
+ return ret > 0 ? ret : err;
+ }
+ return ret;
+}
+
+static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x05fff)
+ return 0;
+ if (*off + s > 0x05fff)
+ s = 0x06000 - *off;
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off);
+ else
+ return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off);
+}
+
+/* erase block aligned to 64bytes boundary */
+static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id,
+ loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[3];
+ int len_off;
+ ssize_t ret = -EIO;
+
+ if (*off & 0x3f)
+ return -EINVAL;
+
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_ERASE_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off) != 0)
+ goto skip;
+ ret = 0;
+ }
+skip:
+ kfree(resp);
+ return ret;
+}
+
+/* write a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id,
+ const char __user *u, size_t s, loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[36];
+ ssize_t ret = 0;
+ int len_off, err = -EIO;
+
+ while (s > 0) {
+ err = -EIO;
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ raw_data[len_off] = s > 32 ? 32 : s;
+ if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) {
+ err = -EFAULT;
+ goto skip;
+ }
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data,
+ len_off+1+raw_data[len_off]);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_WRITE_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0)
+ goto skip;
+ *off += raw_data[len_off];
+ s -= raw_data[len_off];
+ ret += raw_data[len_off];
+ err = 0;
+ }
+skip:
+ kfree(resp);
+ if (err)
+ break;
+ }
+ return ret > 0 ? ret : err;
+}
+
+static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ ssize_t err, ret = 0;
+ int report_erase, report_write;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x5fff)
+ return -ENOSPC;
+ if (s & 0x3f)
+ return -EINVAL;
+ if (*off & 0x3f)
+ return -EINVAL;
+
+ if (data->status & PICOLCD_BOOTLOADER) {
+ report_erase = REPORT_BL_ERASE_MEMORY;
+ report_write = REPORT_BL_WRITE_MEMORY;
+ } else {
+ report_erase = REPORT_ERASE_MEMORY;
+ report_write = REPORT_WRITE_MEMORY;
+ }
+ mutex_lock(&data->mutex_flash);
+ while (s > 0) {
+ err = _picolcd_flash_erase64(data, report_erase, off);
+ if (err)
+ break;
+ err = _picolcd_flash_write(data, report_write, u, 64, off);
+ if (err < 0)
+ break;
+ ret += err;
+ *off += err;
+ s -= err;
+ if (err != 64)
+ break;
+ }
+ mutex_unlock(&data->mutex_flash);
+ return ret > 0 ? ret : err;
+}
+
+/*
+ * Notes:
+ * - concurrent writing is prevented by mutex and all writes must be
+ * n*64 bytes and 64-byte aligned, each write being preceeded by an
+ * ERASE which erases a 64byte block.
+ * If less than requested was written or an error is returned for an
+ * otherwise correct write request the next 64-byte block which should
+ * have been written is in undefined state (mostly: original, erased,
+ * (half-)written with write error)
+ * - reading can happend without special restriction
+ */
+static const struct file_operations picolcd_debug_flash_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_flash_open,
+ .read = picolcd_debug_flash_read,
+ .write = picolcd_debug_flash_write,
+ .llseek = generic_file_llseek,
+};
+
+
+/*
+ * Helper code for HID report level dumping/debugging
+ */
+static const char *error_codes[] = {
+ "success", "parameter missing", "data_missing", "block readonly",
+ "block not erasable", "block too big", "section overflow",
+ "invalid command length", "invalid data length",
+};
+
+static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data,
+ const size_t data_len)
+{
+ int i, j;
+ for (i = j = 0; i < data_len && j + 3 < dst_sz; i++) {
+ dst[j++] = hex_asc[(data[i] >> 4) & 0x0f];
+ dst[j++] = hex_asc[data[i] & 0x0f];
+ dst[j++] = ' ';
+ }
+ if (j < dst_sz) {
+ dst[j--] = '\0';
+ dst[j] = '\n';
+ } else
+ dst[j] = '\0';
+}
+
+static void picolcd_debug_out_report(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report)
+{
+ u8 raw_data[70];
+ int raw_size = (report->size >> 3) + 1;
+ char *buff;
+#define BUFF_SZ 256
+
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (!hdev->debug_events)
+ return;
+
+ buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ",
+ report->id, raw_size);
+ hid_debug_event(hdev, buff);
+ if (raw_size + 5 > sizeof(raw_data)) {
+ hid_debug_event(hdev, " TOO BIG\n");
+ return;
+ } else {
+ raw_data[0] = report->id;
+ hid_output_report(report, raw_data);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
+ hid_debug_event(hdev, buff);
+ }
+
+ switch (report->id) {
+ case REPORT_LED_STATE:
+ /* 1 data byte with GPO state */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LED_STATE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_BRIGHTNESS:
+ /* 1 data byte with brightness */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_BRIGHTNESS", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_CONTRAST:
+ /* 1 data byte with contrast */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_CONTRAST", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_RESET:
+ /* 2 data bytes with reset duration in ms */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_RESET", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n",
+ raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_LCD_CMD:
+ /* 63 data bytes with LCD commands */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO: format decoding */
+ break;
+ case REPORT_LCD_DATA:
+ /* 63 data bytes with LCD data */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ /* TODO: format decoding */
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_LCD_CMD_DATA:
+ /* 63 data bytes with LCD commands and data */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ /* TODO: format decoding */
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EE_READ:
+ /* 3 data bytes with read area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EE_READ", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EE_WRITE:
+ /* 3+1..20 data bytes with write area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EE_WRITE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_ERASE_MEMORY:
+ case REPORT_BL_ERASE_MEMORY:
+ /* 3 data bytes with pointer inside erase block */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_ERASE_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_READ_MEMORY:
+ case REPORT_BL_READ_MEMORY:
+ /* 4 data bytes with read area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_READ_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_WRITE_MEMORY:
+ case REPORT_BL_WRITE_MEMORY:
+ /* 4+1..32 data bytes with write adrea description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_WRITE_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[4] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[4] + 5 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_RESTART:
+ /* TODO */
+ break;
+ case REPORT_EXIT_KEYBOARD:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EXIT_KEYBOARD", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+ raw_data[1] | (raw_data[2] << 8),
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_VERSION:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_DEVID:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_DEVID", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_SIZE:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_SPLASH_SIZE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_HOOK_VERSION:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_HOOK_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EXIT_FLASHER:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+ raw_data[1] | (raw_data[2] << 8),
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "<unknown>", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ }
+ wake_up_interruptible(&hdev->debug_wait);
+ kfree(buff);
+}
+
+static void picolcd_debug_raw_event(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ char *buff;
+
+#define BUFF_SZ 256
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (!hdev->debug_events)
+ return;
+
+ buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ switch (report->id) {
+ case REPORT_ERROR_CODE:
+ /* 2 data bytes with affected report and error code */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_ERROR_CODE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[2] < ARRAY_SIZE(error_codes))
+ snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n",
+ raw_data[2], error_codes[raw_data[2]], raw_data[1]);
+ else
+ snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_KEY_STATE:
+ /* 2 data bytes with key state */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_KEY_STATE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[1] == 0)
+ snprintf(buff, BUFF_SZ, "\tNo key pressed\n");
+ else if (raw_data[2] == 0)
+ snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n",
+ raw_data[1], raw_data[1]);
+ else
+ snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n",
+ raw_data[1], raw_data[1], raw_data[2], raw_data[2]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_IR_DATA:
+ /* Up to 20 byes of IR scancode data */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_IR_DATA", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[1] == 0) {
+ snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n");
+ hid_debug_event(hdev, buff);
+ } else if (raw_data[1] + 1 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ",
+ raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n",
+ raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ }
+ break;
+ case REPORT_EE_DATA:
+ /* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_EE_DATA", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ hid_debug_event(hdev, buff);
+ } else if (raw_data[3] + 4 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ hid_debug_event(hdev, buff);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ hid_debug_event(hdev, buff);
+ }
+ break;
+ case REPORT_MEMORY:
+ /* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[4] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[4] + 5 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_VERSION:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_BL_ERASE_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_ERASE_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_BL_READ_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_READ_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_BL_WRITE_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_WRITE_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_DEVID:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_DEVID", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n",
+ raw_data[1], raw_data[2], raw_data[3], raw_data[4]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n",
+ raw_data[5]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_SIZE:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_SPLASH_SIZE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n",
+ (raw_data[2] << 8) | raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n",
+ (raw_data[4] << 8) | raw_data[3]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_HOOK_VERSION:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_HOOK_VERSION", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+ raw_data[1], raw_data[2]);
+ hid_debug_event(hdev, buff);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "<unknown>", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ }
+ wake_up_interruptible(&hdev->debug_wait);
+ kfree(buff);
+}
+
+static void picolcd_init_devfs(struct picolcd_data *data,
+ struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+ struct hid_report *flash_r, struct hid_report *flash_w,
+ struct hid_report *reset)
+{
+ struct hid_device *hdev = data->hdev;
+
+ mutex_init(&data->mutex_flash);
+
+ /* reset */
+ if (reset)
+ data->debug_reset = debugfs_create_file("reset", 0600,
+ hdev->debug_dir, data, &picolcd_debug_reset_fops);
+
+ /* eeprom */
+ if (eeprom_r || eeprom_w)
+ data->debug_eeprom = debugfs_create_file("eeprom",
+ (eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0),
+ hdev->debug_dir, data, &picolcd_debug_eeprom_fops);
+
+ /* flash */
+ if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8)
+ data->addr_sz = flash_r->field[0]->report_count - 1;
+ else
+ data->addr_sz = -1;
+ if (data->addr_sz == 2 || data->addr_sz == 3) {
+ data->debug_flash = debugfs_create_file("flash",
+ (flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0),
+ hdev->debug_dir, data, &picolcd_debug_flash_fops);
+ } else if (flash_r || flash_w)
+ dev_warn(&hdev->dev, "Unexpected FLASH access reports, "
+ "please submit rdesc for review\n");
+}
+
+static void picolcd_exit_devfs(struct picolcd_data *data)
+{
+ struct dentry *dent;
+
+ dent = data->debug_reset;
+ data->debug_reset = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ dent = data->debug_eeprom;
+ data->debug_eeprom = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ dent = data->debug_flash;
+ data->debug_flash = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ mutex_destroy(&data->mutex_flash);
+}
+#else
+static inline void picolcd_debug_raw_event(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+}
+static inline void picolcd_init_devfs(struct picolcd_data *data,
+ struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+ struct hid_report *flash_r, struct hid_report *flash_w,
+ struct hid_report *reset)
+{
+}
+static inline void picolcd_exit_devfs(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Handle raw report as sent by device
+ */
+static int picolcd_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!data)
+ return 1;
+
+ if (report->id == REPORT_KEY_STATE) {
+ if (data->input_keys)
+ ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+ } else if (report->id == REPORT_IR_DATA) {
+ if (data->input_cir)
+ ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
+ } else {
+ spin_lock_irqsave(&data->lock, flags);
+ /*
+ * We let the caller of picolcd_send_and_wait() check if the
+ * report we got is one of the expected ones or not.
+ */
+ if (data->pending) {
+ memcpy(data->pending->raw_data, raw_data+1, size-1);
+ data->pending->raw_size = size-1;
+ data->pending->in_report = report;
+ complete(&data->pending->ready);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ picolcd_debug_raw_event(data, hdev, report, raw_data, size);
+ return 1;
+}
+
+#ifdef CONFIG_PM
+static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
+{
+ if (message.event & PM_EVENT_AUTO)
+ return 0;
+
+ picolcd_suspend_backlight(hid_get_drvdata(hdev));
+ dbg_hid(PICOLCD_NAME " device ready for suspend\n");
+ return 0;
+}
+
+static int picolcd_resume(struct hid_device *hdev)
+{
+ int ret;
+ ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+ return 0;
+}
+
+static int picolcd_reset_resume(struct hid_device *hdev)
+{
+ int ret;
+ ret = picolcd_reset(hdev);
+ if (ret)
+ dbg_hid(PICOLCD_NAME " resetting our device failed: %d\n", ret);
+ ret = picolcd_fb_reset(hid_get_drvdata(hdev), 0);
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring framebuffer content failed: %d\n", ret);
+ ret = picolcd_resume_lcd(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring lcd failed: %d\n", ret);
+ ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+ picolcd_leds_set(hid_get_drvdata(hdev));
+ return 0;
+}
+#endif
+
+/* initialize keypad input device */
+static int picolcd_init_keys(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ struct hid_device *hdev = data->hdev;
+ struct input_dev *idev;
+ int error, i;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 2 ||
+ report->field[0]->report_size != 8) {
+ dev_err(&hdev->dev, "unsupported KEY_STATE report");
+ return -EINVAL;
+ }
+
+ idev = input_allocate_device();
+ if (idev == NULL) {
+ dev_err(&hdev->dev, "failed to allocate input device");
+ return -ENOMEM;
+ }
+ input_set_drvdata(idev, hdev);
+ memcpy(data->keycode, def_keymap, sizeof(def_keymap));
+ idev->name = hdev->name;
+ idev->phys = hdev->phys;
+ idev->uniq = hdev->uniq;
+ idev->id.bustype = hdev->bus;
+ idev->id.vendor = hdev->vendor;
+ idev->id.product = hdev->product;
+ idev->id.version = hdev->version;
+ idev->dev.parent = hdev->dev.parent;
+ idev->keycode = &data->keycode;
+ idev->keycodemax = PICOLCD_KEYS;
+ idev->keycodesize = sizeof(data->keycode[0]);
+ input_set_capability(idev, EV_MSC, MSC_SCAN);
+ set_bit(EV_REP, idev->evbit);
+ for (i = 0; i < PICOLCD_KEYS; i++)
+ input_set_capability(idev, EV_KEY, data->keycode[i]);
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(&hdev->dev, "error registering the input device");
+ input_free_device(idev);
+ return error;
+ }
+ data->input_keys = idev;
+ return 0;
+}
+
+static void picolcd_exit_keys(struct picolcd_data *data)
+{
+ struct input_dev *idev = data->input_keys;
+
+ data->input_keys = NULL;
+ if (idev)
+ input_unregister_device(idev);
+}
+
+/* initialize CIR input device */
+static inline int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
+{
+ /* support not implemented yet */
+ return 0;
+}
+
+static inline void picolcd_exit_cir(struct picolcd_data *data)
+{
+}
+
+static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data)
+{
+ int error;
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ if (data->version[0] != 0 && data->version[1] != 3)
+ dev_info(&hdev->dev, "Device with untested firmware revision, "
+ "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
+
+ /* Setup keypad input device */
+ error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev));
+ if (error)
+ goto err;
+
+ /* Setup CIR input device */
+ error = picolcd_init_cir(data, picolcd_in_report(REPORT_IR_DATA, hdev));
+ if (error)
+ goto err;
+
+ /* Set up the framebuffer device */
+ error = picolcd_init_framebuffer(data);
+ if (error)
+ goto err;
+
+ /* Setup lcd class device */
+ error = picolcd_init_lcd(data, picolcd_out_report(REPORT_CONTRAST, hdev));
+ if (error)
+ goto err;
+
+ /* Setup backlight class device */
+ error = picolcd_init_backlight(data, picolcd_out_report(REPORT_BRIGHTNESS, hdev));
+ if (error)
+ goto err;
+
+ /* Setup the LED class devices */
+ error = picolcd_init_leds(data, picolcd_out_report(REPORT_LED_STATE, hdev));
+ if (error)
+ goto err;
+
+ picolcd_init_devfs(data, picolcd_out_report(REPORT_EE_READ, hdev),
+ picolcd_out_report(REPORT_EE_WRITE, hdev),
+ picolcd_out_report(REPORT_READ_MEMORY, hdev),
+ picolcd_out_report(REPORT_WRITE_MEMORY, hdev),
+ picolcd_out_report(REPORT_RESET, hdev));
+ return 0;
+err:
+ picolcd_exit_leds(data);
+ picolcd_exit_backlight(data);
+ picolcd_exit_lcd(data);
+ picolcd_exit_framebuffer(data);
+ picolcd_exit_cir(data);
+ picolcd_exit_keys(data);
+ return error;
+}
+
+static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data *data)
+{
+ int error;
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ if (data->version[0] != 1 && data->version[1] != 0)
+ dev_info(&hdev->dev, "Device with untested bootloader revision, "
+ "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
+
+ picolcd_init_devfs(data, NULL, NULL,
+ picolcd_out_report(REPORT_BL_READ_MEMORY, hdev),
+ picolcd_out_report(REPORT_BL_WRITE_MEMORY, hdev), NULL);
+ return 0;
+}
+
+static int picolcd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct picolcd_data *data;
+ int error = -ENOMEM;
+
+ dbg_hid(PICOLCD_NAME " hardware probe...\n");
+
+ /*
+ * Let's allocate the picolcd data structure, set some reasonable
+ * defaults, and associate it with the device
+ */
+ data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&hdev->dev, "can't allocate space for Minibox PicoLCD device data\n");
+ error = -ENOMEM;
+ goto err_no_cleanup;
+ }
+
+ spin_lock_init(&data->lock);
+ mutex_init(&data->mutex);
+ data->hdev = hdev;
+ data->opmode_delay = 5000;
+ if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+ data->status |= PICOLCD_BOOTLOADER;
+ hid_set_drvdata(hdev, data);
+
+ /* Parse the device reports and start it up */
+ error = hid_parse(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "device report parse failed\n");
+ goto err_cleanup_data;
+ }
+
+ /* We don't use hidinput but hid_hw_start() fails if nothing is
+ * claimed. So spoof claimed input. */
+ hdev->claimed = HID_CLAIMED_INPUT;
+ error = hid_hw_start(hdev, 0);
+ hdev->claimed = 0;
+ if (error) {
+ dev_err(&hdev->dev, "hardware start failed\n");
+ goto err_cleanup_data;
+ }
+
+ error = hdev->ll_driver->open(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "failed to open input interrupt pipe for key and IR events\n");
+ goto err_cleanup_hid_hw;
+ }
+
+ error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay);
+ if (error) {
+ dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ goto err_cleanup_hid_ll;
+ }
+
+ error = device_create_file(&hdev->dev, &dev_attr_operation_mode);
+ if (error) {
+ dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ goto err_cleanup_sysfs1;
+ }
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ error = picolcd_probe_bootloader(hdev, data);
+ else
+ error = picolcd_probe_lcd(hdev, data);
+ if (error)
+ goto err_cleanup_sysfs2;
+
+ dbg_hid(PICOLCD_NAME " activated and initialized\n");
+ return 0;
+
+err_cleanup_sysfs2:
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+err_cleanup_sysfs1:
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+err_cleanup_hid_ll:
+ hdev->ll_driver->close(hdev);
+err_cleanup_hid_hw:
+ hid_hw_stop(hdev);
+err_cleanup_data:
+ kfree(data);
+err_no_cleanup:
+ hid_set_drvdata(hdev, NULL);
+
+ return error;
+}
+
+static void picolcd_remove(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ unsigned long flags;
+
+ dbg_hid(PICOLCD_NAME " hardware remove...\n");
+ spin_lock_irqsave(&data->lock, flags);
+ data->status |= PICOLCD_FAILED;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ picolcd_exit_devfs(data);
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+ hdev->ll_driver->close(hdev);
+ hid_hw_stop(hdev);
+ hid_set_drvdata(hdev, NULL);
+
+ /* Shortcut potential pending reply that will never arrive */
+ spin_lock_irqsave(&data->lock, flags);
+ if (data->pending)
+ complete(&data->pending->ready);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* Cleanup LED */
+ picolcd_exit_leds(data);
+ /* Clean up the framebuffer */
+ picolcd_exit_backlight(data);
+ picolcd_exit_lcd(data);
+ picolcd_exit_framebuffer(data);
+ /* Cleanup input */
+ picolcd_exit_cir(data);
+ picolcd_exit_keys(data);
+
+ mutex_destroy(&data->mutex);
+ /* Finally, clean up the picolcd data itself */
+ kfree(data);
+}
+
+static const struct hid_device_id picolcd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, picolcd_devices);
+
+static struct hid_driver picolcd_driver = {
+ .name = "hid-picolcd",
+ .id_table = picolcd_devices,
+ .probe = picolcd_probe,
+ .remove = picolcd_remove,
+ .raw_event = picolcd_raw_event,
+#ifdef CONFIG_PM
+ .suspend = picolcd_suspend,
+ .resume = picolcd_resume,
+ .reset_resume = picolcd_reset_resume,
+#endif
+};
+
+static int __init picolcd_init(void)
+{
+ return hid_register_driver(&picolcd_driver);
+}
+
+static void __exit picolcd_exit(void)
+{
+ hid_unregister_driver(&picolcd_driver);
+}
+
+module_init(picolcd_init);
+module_exit(picolcd_exit);
+MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
new file mode 100644
index 000000000000..7b1178496621
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.c
@@ -0,0 +1,1007 @@
+/*
+ * Roccat Kone driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kone is a gamer mouse which consists of a mouse part and a keyboard
+ * part. The keyboard part enables the mouse to execute stored macros with mixed
+ * key- and button-events.
+ *
+ * TODO implement on-the-fly polling-rate change
+ * The windows driver has the ability to change the polling rate of the
+ * device on the press of a mousebutton.
+ * Is it possible to remove and reinstall the urb in raw-event- or any
+ * other handler, or to defer this action to be executed somewhere else?
+ *
+ * TODO implement notification mechanism for overlong macro execution
+ * If user wants to execute an overlong macro only the names of macroset
+ * and macro are given. Should userland tap hidraw or is there an
+ * additional streaming mechanism?
+ *
+ * TODO is it possible to overwrite group for sysfs attributes via udev?
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "hid-ids.h"
+#include "hid-roccat-kone.h"
+
+static void kone_set_settings_checksum(struct kone_settings *settings)
+{
+ uint16_t checksum = 0;
+ unsigned char *address = (unsigned char *)settings;
+ int i;
+
+ for (i = 0; i < sizeof(struct kone_settings) - 2; ++i, ++address)
+ checksum += *address;
+ settings->checksum = cpu_to_le16(checksum);
+}
+
+/*
+ * Checks success after writing data to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_check_write(struct usb_device *usb_dev)
+{
+ int len;
+ unsigned char *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ do {
+ /*
+ * Mouse needs 50 msecs until it says ok, but there are
+ * 30 more msecs needed for next write to work.
+ */
+ msleep(80);
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE |
+ USB_DIR_IN,
+ kone_command_confirm_write, 0, data, 1,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != 1) {
+ kfree(data);
+ return -EIO;
+ }
+
+ /*
+ * value of 3 seems to mean something like
+ * "not finished yet, but it looks good"
+ * So check again after a moment.
+ */
+ } while (*data == 3);
+
+ if (*data == 1) { /* everything alright */
+ kfree(data);
+ return 0;
+ } else { /* unknown answer */
+ dev_err(&usb_dev->dev, "got retval %d when checking write\n",
+ *data);
+ kfree(data);
+ return -EIO;
+ }
+}
+
+/*
+ * Reads settings from mouse and stores it in @buf
+ * @buf has to be alloced with GFP_KERNEL
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_settings(struct usb_device *usb_dev,
+ struct kone_settings *buf)
+{
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_settings, 0, buf,
+ sizeof(struct kone_settings), USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_settings))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Writes settings from @buf to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_settings(struct usb_device *usb_dev,
+ struct kone_settings const *settings)
+{
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_settings, 0, (char *)settings,
+ sizeof(struct kone_settings),
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_settings))
+ return -EIO;
+
+ if (kone_check_write(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Reads profile data from mouse and stores it in @buf
+ * @number: profile number to read
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_profile(struct usb_device *usb_dev,
+ struct kone_profile *buf, int number)
+{
+ int len;
+
+ if (number < 1 || number > 5)
+ return -EINVAL;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_profile, number, buf,
+ sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_profile))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Writes profile data to mouse.
+ * @number: profile number to write
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_profile(struct usb_device *usb_dev,
+ struct kone_profile const *profile, int number)
+{
+ int len;
+
+ if (number < 1 || number > 5)
+ return -EINVAL;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_profile, number, (char *)profile,
+ sizeof(struct kone_profile),
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_profile))
+ return len;
+
+ if (kone_check_write(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Reads value of "fast-clip-weight" and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_weight(struct usb_device *usb_dev, int *result)
+{
+ int len;
+ uint8_t *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_weight, 0, data, 1, USB_CTRL_SET_TIMEOUT);
+
+ if (len != 1) {
+ kfree(data);
+ return -EIO;
+ }
+ *result = (int)*data;
+ kfree(data);
+ return 0;
+}
+
+/*
+ * Reads firmware_version of mouse and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
+{
+ int len;
+ unsigned char *data;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_firmware_version, 0, data, 2,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != 2) {
+ kfree(data);
+ return -EIO;
+ }
+ *result = le16_to_cpu(*data);
+ kfree(data);
+ return 0;
+}
+
+static ssize_t kone_sysfs_read_settings(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kone_settings))
+ return 0;
+
+ if (off + count > sizeof(struct kone_settings))
+ count = sizeof(struct kone_settings) - off;
+
+ mutex_lock(&kone->kone_lock);
+ memcpy(buf, &kone->settings + off, count);
+ mutex_unlock(&kone->kone_lock);
+
+ return count;
+}
+
+/*
+ * Writing settings automatically activates startup_profile.
+ * This function keeps values in kone_device up to date and assumes that in
+ * case of error the old data is still valid
+ */
+static ssize_t kone_sysfs_write_settings(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0, difference;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_settings))
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+ difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
+ if (difference) {
+ retval = kone_set_settings(usb_dev,
+ (struct kone_settings const *)buf);
+ if (!retval)
+ memcpy(&kone->settings, buf,
+ sizeof(struct kone_settings));
+ }
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ /*
+ * If we get here, treat settings as okay and update actual values
+ * according to startup_profile
+ */
+ kone->actual_profile = kone->settings.startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+ return sizeof(struct kone_settings);
+}
+
+static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, int number) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kone_profile))
+ return 0;
+
+ if (off + count > sizeof(struct kone_profile))
+ count = sizeof(struct kone_profile) - off;
+
+ mutex_lock(&kone->kone_lock);
+ memcpy(buf, &kone->profiles[number - 1], sizeof(struct kone_profile));
+ mutex_unlock(&kone->kone_lock);
+
+ return count;
+}
+
+static ssize_t kone_sysfs_read_profile1(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_read_profile2(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_read_profile3(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_read_profile4(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_read_profile5(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
+}
+
+/* Writes data only if different to stored data */
+static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, int number) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct kone_profile *profile;
+ int retval = 0, difference;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_profile))
+ return -EINVAL;
+
+ profile = &kone->profiles[number - 1];
+
+ mutex_lock(&kone->kone_lock);
+ difference = memcmp(buf, profile, sizeof(struct kone_profile));
+ if (difference) {
+ retval = kone_set_profile(usb_dev,
+ (struct kone_profile const *)buf, number);
+ if (!retval)
+ memcpy(profile, buf, sizeof(struct kone_profile));
+ }
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct kone_profile);
+}
+
+static ssize_t kone_sysfs_write_profile1(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_write_profile2(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_write_profile3(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_write_profile4(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_write_profile5(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
+}
+
+static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
+}
+
+static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
+}
+
+/* weight is read each time, since we don't get informed when it's changed */
+static ssize_t kone_sysfs_show_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int weight = 0;
+ int retval;
+
+ mutex_lock(&kone->kone_lock);
+ retval = kone_get_weight(usb_dev, &weight);
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+ return snprintf(buf, PAGE_SIZE, "%d\n", weight);
+}
+
+static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
+}
+
+static ssize_t kone_sysfs_show_tcu(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
+}
+
+static int kone_tcu_command(struct usb_device *usb_dev, int number)
+{
+ int len;
+ char *value;
+
+ value = kmalloc(1, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ *value = number;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_calibrate, 0, value, 1,
+ USB_CTRL_SET_TIMEOUT);
+
+ kfree(value);
+ return ((len != 1) ? -EIO : 0);
+}
+
+/*
+ * Calibrating the tcu is the only action that changes settings data inside the
+ * mouse, so this data needs to be reread
+ */
+static ssize_t kone_sysfs_set_tcu(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+ unsigned long state;
+
+ retval = strict_strtoul(buf, 10, &state);
+ if (retval)
+ return retval;
+
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+
+ if (state == 1) { /* state activate */
+ retval = kone_tcu_command(usb_dev, 1);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 2);
+ if (retval)
+ goto exit_unlock;
+ ssleep(5); /* tcu needs this time for calibration */
+ retval = kone_tcu_command(usb_dev, 3);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 0);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 4);
+ if (retval)
+ goto exit_unlock;
+ /*
+ * Kone needs this time to settle things.
+ * Reading settings too early will result in invalid data.
+ * Roccat's driver waits 1 sec, maybe this time could be
+ * shortened.
+ */
+ ssleep(1);
+ }
+
+ /* calibration changes values in settings, so reread */
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ goto exit_no_settings;
+
+ /* only write settings back if activation state is different */
+ if (kone->settings.tcu != state) {
+ kone->settings.tcu = state;
+ kone_set_settings_checksum(&kone->settings);
+
+ retval = kone_set_settings(usb_dev, &kone->settings);
+ if (retval) {
+ dev_err(&usb_dev->dev, "couldn't set tcu state\n");
+ /*
+ * try to reread valid settings into buffer overwriting
+ * first error code
+ */
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ goto exit_no_settings;
+ goto exit_unlock;
+ }
+ }
+
+ retval = size;
+exit_no_settings:
+ dev_err(&usb_dev->dev, "couldn't read settings\n");
+exit_unlock:
+ mutex_unlock(&kone->kone_lock);
+ return retval;
+}
+
+static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
+}
+
+static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+ unsigned long new_startup_profile;
+
+ retval = strict_strtoul(buf, 10, &new_startup_profile);
+ if (retval)
+ return retval;
+
+ if (new_startup_profile < 1 || new_startup_profile > 5)
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+
+ kone->settings.startup_profile = new_startup_profile;
+ kone_set_settings_checksum(&kone->settings);
+
+ retval = kone_set_settings(usb_dev, &kone->settings);
+
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ /* changing the startup profile immediately activates this profile */
+ kone->actual_profile = new_startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+ return size;
+}
+
+/*
+ * This file is used by userland software to find devices that are handled by
+ * this driver. This provides a consistent way for actual and older kernels
+ * where this driver replaced usbhid instead of generic-usb.
+ * Driver capabilities are determined by version number.
+ */
+static ssize_t kone_sysfs_show_driver_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, DRIVER_VERSION "\n");
+}
+
+/*
+ * Read actual dpi settings.
+ * Returns raw value for further processing. Refer to enum kone_polling_rates to
+ * get real value.
+ */
+static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
+
+static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
+
+/*
+ * The mouse can be equipped with one of four supplied weights from 5 to 20
+ * grams which are recognized and its value can be read out.
+ * This returns the raw value reported by the mouse for easy evaluation by
+ * software. Refer to enum kone_weights to get corresponding real weight.
+ */
+static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
+
+/*
+ * Prints firmware version stored in mouse as integer.
+ * The raw value reported by the mouse is returned for easy evaluation, to get
+ * the real version number the decimal point has to be shifted 2 positions to
+ * the left. E.g. a value of 138 means 1.38.
+ */
+static DEVICE_ATTR(firmware_version, 0440,
+ kone_sysfs_show_firmware_version, NULL);
+
+/*
+ * Prints state of Tracking Control Unit as number where 0 = off and 1 = on
+ * Writing 0 deactivates tcu and writing 1 calibrates and activates the tcu
+ */
+static DEVICE_ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu);
+
+/* Prints and takes the number of the profile the mouse starts with */
+static DEVICE_ATTR(startup_profile, 0660,
+ kone_sysfs_show_startup_profile,
+ kone_sysfs_set_startup_profile);
+
+static DEVICE_ATTR(kone_driver_version, 0440,
+ kone_sysfs_show_driver_version, NULL);
+
+static struct attribute *kone_attributes[] = {
+ &dev_attr_actual_dpi.attr,
+ &dev_attr_actual_profile.attr,
+ &dev_attr_weight.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_tcu.attr,
+ &dev_attr_startup_profile.attr,
+ &dev_attr_kone_driver_version.attr,
+ NULL
+};
+
+static struct attribute_group kone_attribute_group = {
+ .attrs = kone_attributes
+};
+
+static struct bin_attribute kone_settings_attr = {
+ .attr = { .name = "settings", .mode = 0660 },
+ .size = sizeof(struct kone_settings),
+ .read = kone_sysfs_read_settings,
+ .write = kone_sysfs_write_settings
+};
+
+static struct bin_attribute kone_profile1_attr = {
+ .attr = { .name = "profile1", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile1,
+ .write = kone_sysfs_write_profile1
+};
+
+static struct bin_attribute kone_profile2_attr = {
+ .attr = { .name = "profile2", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile2,
+ .write = kone_sysfs_write_profile2
+};
+
+static struct bin_attribute kone_profile3_attr = {
+ .attr = { .name = "profile3", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile3,
+ .write = kone_sysfs_write_profile3
+};
+
+static struct bin_attribute kone_profile4_attr = {
+ .attr = { .name = "profile4", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile4,
+ .write = kone_sysfs_write_profile4
+};
+
+static struct bin_attribute kone_profile5_attr = {
+ .attr = { .name = "profile5", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile5,
+ .write = kone_sysfs_write_profile5
+};
+
+static int kone_create_sysfs_attributes(struct usb_interface *intf)
+{
+ int retval;
+
+ retval = sysfs_create_group(&intf->dev.kobj, &kone_attribute_group);
+ if (retval)
+ goto exit_1;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_settings_attr);
+ if (retval)
+ goto exit_2;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+ if (retval)
+ goto exit_3;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+ if (retval)
+ goto exit_4;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+ if (retval)
+ goto exit_5;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+ if (retval)
+ goto exit_6;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+ if (retval)
+ goto exit_7;
+
+ return 0;
+
+exit_7:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+exit_6:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+exit_5:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+exit_4:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+exit_3:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+exit_2:
+ sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+exit_1:
+ return retval;
+}
+
+static void kone_remove_sysfs_attributes(struct usb_interface *intf)
+{
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+ sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+}
+
+static int kone_init_kone_device_struct(struct usb_device *usb_dev,
+ struct kone_device *kone)
+{
+ uint i;
+ int retval;
+
+ mutex_init(&kone->kone_lock);
+
+ for (i = 0; i < 5; ++i) {
+ retval = kone_get_profile(usb_dev, &kone->profiles[i], i + 1);
+ if (retval)
+ return retval;
+ }
+
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ return retval;
+
+ retval = kone_get_firmware_version(usb_dev, &kone->firmware_version);
+ if (retval)
+ return retval;
+
+ kone->actual_profile = kone->settings.startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi;
+
+ return 0;
+}
+
+/*
+ * Since IGNORE_MOUSE quirk moved to hid-apple, there is no way to bind only to
+ * mousepart if usb_hid is compiled into the kernel and kone is compiled as
+ * module.
+ * Secial behaviour is bound only to mousepart since only mouseevents contain
+ * additional notifications.
+ */
+static int kone_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct kone_device *kone;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+
+ kone = kzalloc(sizeof(*kone), GFP_KERNEL);
+ if (!kone) {
+ dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, kone);
+
+ retval = kone_init_kone_device_struct(usb_dev, kone);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "couldn't init struct kone_device\n");
+ goto exit_free;
+ }
+ retval = kone_create_sysfs_attributes(intf);
+ if (retval) {
+ dev_err(&hdev->dev, "cannot create sysfs files\n");
+ goto exit_free;
+ }
+ } else {
+ hid_set_drvdata(hdev, NULL);
+ }
+
+ return 0;
+exit_free:
+ kfree(kone);
+ return retval;
+}
+
+
+static void kone_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+ kone_remove_sysfs_attributes(intf);
+ kfree(hid_get_drvdata(hdev));
+ }
+}
+
+static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ dev_err(&hdev->dev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = kone_init_specials(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void kone_remove(struct hid_device *hdev)
+{
+ kone_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+/*
+ * Is called for keyboard- and mousepart.
+ * Only mousepart gets informations about special events in its extended event
+ * structure.
+ */
+static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct kone_device *kone = hid_get_drvdata(hdev);
+ struct kone_mouse_event *event = (struct kone_mouse_event *)data;
+
+ /* keyboard events are always processed by default handler */
+ if (size != sizeof(struct kone_mouse_event))
+ return 0;
+
+ /*
+ * Firmware 1.38 introduced new behaviour for tilt buttons.
+ * Pressed tilt button is reported in each movement event.
+ * Workaround sends only one event per press.
+ */
+ if (kone->last_tilt_state == event->tilt)
+ event->tilt = 0;
+ else
+ kone->last_tilt_state = event->tilt;
+
+ /*
+ * handle special events and keep actual profile and dpi values
+ * up to date
+ */
+ switch (event->event) {
+ case kone_mouse_event_osd_dpi:
+ dev_dbg(&hdev->dev, "osd dpi event. actual dpi %d\n",
+ event->value);
+ return 1; /* return 1 if event was handled */
+ case kone_mouse_event_switch_dpi:
+ kone->actual_dpi = event->value;
+ dev_dbg(&hdev->dev, "switched dpi to %d\n", event->value);
+ return 1;
+ case kone_mouse_event_osd_profile:
+ dev_dbg(&hdev->dev, "osd profile event. actual profile %d\n",
+ event->value);
+ return 1;
+ case kone_mouse_event_switch_profile:
+ kone->actual_profile = event->value;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].
+ startup_dpi;
+ dev_dbg(&hdev->dev, "switched profile to %d\n", event->value);
+ return 1;
+ case kone_mouse_event_call_overlong_macro:
+ dev_dbg(&hdev->dev, "overlong macro called, button %d %s/%s\n",
+ event->macro_key,
+ kone->profiles[kone->actual_profile - 1].
+ button_infos[event->macro_key].macro_set_name,
+ kone->profiles[kone->actual_profile - 1].
+ button_infos[event->macro_key].macro_name
+ );
+ return 1;
+ }
+
+ return 0; /* do further processing */
+}
+
+static const struct hid_device_id kone_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, kone_devices);
+
+static struct hid_driver kone_driver = {
+ .name = "kone",
+ .id_table = kone_devices,
+ .probe = kone_probe,
+ .remove = kone_remove,
+ .raw_event = kone_raw_event
+};
+
+static int kone_init(void)
+{
+ return hid_register_driver(&kone_driver);
+}
+
+static void kone_exit(void)
+{
+ hid_unregister_driver(&kone_driver);
+}
+
+module_init(kone_init);
+module_exit(kone_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE(DRIVER_LICENSE);
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
new file mode 100644
index 000000000000..ee6898c9d92c
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.h
@@ -0,0 +1,214 @@
+#ifndef __HID_ROCCAT_KONE_H
+#define __HID_ROCCAT_KONE_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+#define DRIVER_VERSION "v0.3.0"
+#define DRIVER_AUTHOR "Stefan Achatz"
+#define DRIVER_DESC "USB Roccat Kone driver"
+#define DRIVER_LICENSE "GPL v2"
+
+#pragma pack(push)
+#pragma pack(1)
+
+struct kone_keystroke {
+ uint8_t key;
+ uint8_t action;
+ uint16_t period; /* in milliseconds */
+};
+
+enum kone_keystroke_buttons {
+ kone_keystroke_button_1 = 0xf0, /* left mouse button */
+ kone_keystroke_button_2 = 0xf1, /* right mouse button */
+ kone_keystroke_button_3 = 0xf2, /* wheel */
+ kone_keystroke_button_9 = 0xf3, /* side button up */
+ kone_keystroke_button_8 = 0xf4 /* side button down */
+};
+
+enum kone_keystroke_actions {
+ kone_keystroke_action_press = 0,
+ kone_keystroke_action_release = 1
+};
+
+struct kone_button_info {
+ uint8_t number; /* range 1-8 */
+ uint8_t type;
+ uint8_t macro_type; /* 0 = short, 1 = overlong */
+ uint8_t macro_set_name[16]; /* can be max 15 chars long */
+ uint8_t macro_name[16]; /* can be max 15 chars long */
+ uint8_t count;
+ struct kone_keystroke keystrokes[20];
+};
+
+enum kone_button_info_types {
+ /* valid button types until firmware 1.32 */
+ kone_button_info_type_button_1 = 0x1, /* click (left mouse button) */
+ kone_button_info_type_button_2 = 0x2, /* menu (right mouse button)*/
+ kone_button_info_type_button_3 = 0x3, /* scroll (wheel) */
+ kone_button_info_type_double_click = 0x4,
+ kone_button_info_type_key = 0x5,
+ kone_button_info_type_macro = 0x6,
+ kone_button_info_type_off = 0x7,
+ /* TODO clarify function and rename */
+ kone_button_info_type_osd_xy_prescaling = 0x8,
+ kone_button_info_type_osd_dpi = 0x9,
+ kone_button_info_type_osd_profile = 0xa,
+ kone_button_info_type_button_9 = 0xb, /* ie forward */
+ kone_button_info_type_button_8 = 0xc, /* ie backward */
+ kone_button_info_type_dpi_up = 0xd, /* internal */
+ kone_button_info_type_dpi_down = 0xe, /* internal */
+ kone_button_info_type_button_7 = 0xf, /* tilt left */
+ kone_button_info_type_button_6 = 0x10, /* tilt right */
+ kone_button_info_type_profile_up = 0x11, /* internal */
+ kone_button_info_type_profile_down = 0x12, /* internal */
+ /* additional valid button types since firmware 1.38 */
+ kone_button_info_type_multimedia_open_player = 0x20,
+ kone_button_info_type_multimedia_next_track = 0x21,
+ kone_button_info_type_multimedia_prev_track = 0x22,
+ kone_button_info_type_multimedia_play_pause = 0x23,
+ kone_button_info_type_multimedia_stop = 0x24,
+ kone_button_info_type_multimedia_mute = 0x25,
+ kone_button_info_type_multimedia_volume_up = 0x26,
+ kone_button_info_type_multimedia_volume_down = 0x27
+};
+
+struct kone_light_info {
+ uint8_t number; /* number of light 1-5 */
+ uint8_t mod; /* 1 = on, 2 = off */
+ uint8_t red; /* range 0x00-0xff */
+ uint8_t green; /* range 0x00-0xff */
+ uint8_t blue; /* range 0x00-0xff */
+};
+
+struct kone_profile {
+ uint16_t size; /* always 975 */
+ uint16_t unused; /* always 0 */
+
+ /*
+ * range 1-5
+ * This number does not need to correspond with location where profile
+ * saved
+ */
+ uint8_t profile; /* range 1-5 */
+
+ uint16_t main_sensitivity; /* range 100-1000 */
+ uint8_t xy_sensitivity_enabled; /* 1 = on, 2 = off */
+ uint16_t x_sensitivity; /* range 100-1000 */
+ uint16_t y_sensitivity; /* range 100-1000 */
+ uint8_t dpi_rate; /* bit 1 = 800, ... */
+ uint8_t startup_dpi; /* range 1-6 */
+ uint8_t polling_rate; /* 1 = 125Hz, 2 = 500Hz, 3 = 1000Hz */
+ /* kone has no dcu
+ * value is always 2 in firmwares <= 1.32 and
+ * 1 in firmwares > 1.32
+ */
+ uint8_t dcu_flag;
+ uint8_t light_effect_1; /* range 1-3 */
+ uint8_t light_effect_2; /* range 1-5 */
+ uint8_t light_effect_3; /* range 1-4 */
+ uint8_t light_effect_speed; /* range 0-255 */
+
+ struct kone_light_info light_infos[5];
+ struct kone_button_info button_infos[8];
+
+ uint16_t checksum; /* \brief holds checksum of struct */
+};
+
+enum kone_polling_rates {
+ kone_polling_rate_125 = 1,
+ kone_polling_rate_500 = 2,
+ kone_polling_rate_1000 = 3
+};
+
+struct kone_settings {
+ uint16_t size; /* always 36 */
+ uint8_t startup_profile; /* 1-5 */
+ uint8_t unknown1;
+ uint8_t tcu; /* 0 = off, 1 = on */
+ uint8_t unknown2[23];
+ uint8_t calibration_data[4];
+ uint8_t unknown3[2];
+ uint16_t checksum;
+};
+
+/*
+ * 12 byte mouse event read by interrupt_read
+ */
+struct kone_mouse_event {
+ uint8_t report_number; /* always 1 */
+ uint8_t button;
+ uint16_t x;
+ uint16_t y;
+ uint8_t wheel; /* up = 1, down = -1 */
+ uint8_t tilt; /* right = 1, left = -1 */
+ uint8_t unknown;
+ uint8_t event;
+ uint8_t value; /* press = 0, release = 1 */
+ uint8_t macro_key; /* 0 to 8 */
+};
+
+enum kone_mouse_events {
+ /* osd events are thought to be display on screen */
+ kone_mouse_event_osd_dpi = 0xa0,
+ kone_mouse_event_osd_profile = 0xb0,
+ /* TODO clarify meaning and occurence of kone_mouse_event_calibration */
+ kone_mouse_event_calibration = 0xc0,
+ kone_mouse_event_call_overlong_macro = 0xe0,
+ /* switch events notify if user changed values wiht mousebutton click */
+ kone_mouse_event_switch_dpi = 0xf0,
+ kone_mouse_event_switch_profile = 0xf1
+};
+
+enum kone_commands {
+ kone_command_profile = 0x5a,
+ kone_command_settings = 0x15a,
+ kone_command_firmware_version = 0x25a,
+ kone_command_weight = 0x45a,
+ kone_command_calibrate = 0x55a,
+ kone_command_confirm_write = 0x65a,
+ kone_command_firmware = 0xe5a
+};
+
+#pragma pack(pop)
+
+struct kone_device {
+ /*
+ * Storing actual values when we get informed about changes since there
+ * is no way of getting this information from the device on demand
+ */
+ int actual_profile, actual_dpi;
+ /* Used for neutralizing abnormal tilt button behaviour */
+ int last_tilt_state;
+ /*
+ * It's unlikely that multiple sysfs attributes are accessed at a time,
+ * so only one mutex is used to secure hardware access and profiles and
+ * settings of this struct.
+ */
+ struct mutex kone_lock;
+
+ /*
+ * Storing the data here reduces IO and ensures that data is available
+ * when its needed (E.g. interrupt handler).
+ */
+ struct kone_profile profiles[5];
+ struct kone_settings settings;
+
+ /*
+ * firmware doesn't change unless firmware update is implemented,
+ * so it's read only once
+ */
+ int firmware_version;
+};
+
+#endif
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7502a4b2fa86..402d5574b574 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -76,7 +76,7 @@ static int sony_set_operational_usb(struct hid_device *hdev)
static int sony_set_operational_bt(struct hid_device *hdev)
{
- unsigned char buf[] = { 0x53, 0xf4, 0x42, 0x03, 0x00, 0x00 };
+ unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
}
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 6925eda1081a..2eebdcc57bcf 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2008 Lev Babiev
* based on hid-cherry driver
+ *
+ * Modified to also support BTC "Emprex 3009URF III Vista MCE Remote" by
+ * Wayne Thomas 2010.
*/
/*
@@ -24,23 +27,29 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000)
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
return 0;
switch (usage->hid & HID_USAGE) {
- case 0x00d: ts_map_key_clear(KEY_HOME); break;
- case 0x024: ts_map_key_clear(KEY_MENU); break;
- case 0x025: ts_map_key_clear(KEY_TV); break;
- case 0x048: ts_map_key_clear(KEY_RED); break;
- case 0x047: ts_map_key_clear(KEY_GREEN); break;
- case 0x049: ts_map_key_clear(KEY_YELLOW); break;
- case 0x04a: ts_map_key_clear(KEY_BLUE); break;
- case 0x04b: ts_map_key_clear(KEY_ANGLE); break;
- case 0x04c: ts_map_key_clear(KEY_LANGUAGE); break;
- case 0x04d: ts_map_key_clear(KEY_SUBTITLE); break;
- case 0x031: ts_map_key_clear(KEY_AUDIO); break;
- case 0x032: ts_map_key_clear(KEY_TEXT); break;
- case 0x033: ts_map_key_clear(KEY_CHANNEL); break;
+ case 0x00d: ts_map_key_clear(KEY_MEDIA); break;
+ case 0x024: ts_map_key_clear(KEY_MENU); break;
+ case 0x025: ts_map_key_clear(KEY_TV); break;
+ case 0x031: ts_map_key_clear(KEY_AUDIO); break;
+ case 0x032: ts_map_key_clear(KEY_TEXT); break;
+ case 0x033: ts_map_key_clear(KEY_CHANNEL); break;
+ case 0x047: ts_map_key_clear(KEY_MP3); break;
+ case 0x048: ts_map_key_clear(KEY_TV2); break;
+ case 0x049: ts_map_key_clear(KEY_CAMERA); break;
+ case 0x04a: ts_map_key_clear(KEY_VIDEO); break;
+ case 0x04b: ts_map_key_clear(KEY_ANGLE); break;
+ case 0x04c: ts_map_key_clear(KEY_LANGUAGE); break;
+ case 0x04d: ts_map_key_clear(KEY_SUBTITLE); break;
+ case 0x050: ts_map_key_clear(KEY_RADIO); break;
+ case 0x05a: ts_map_key_clear(KEY_TEXT); break;
+ case 0x05b: ts_map_key_clear(KEY_RED); break;
+ case 0x05c: ts_map_key_clear(KEY_GREEN); break;
+ case 0x05d: ts_map_key_clear(KEY_YELLOW); break;
+ case 0x05e: ts_map_key_clear(KEY_BLUE); break;
default:
return 0;
}
@@ -50,6 +59,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ts_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ }
};
MODULE_DEVICE_TABLE(hid, ts_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index f7700cf49721..1e051f1171e4 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -22,14 +22,159 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+#include <linux/power_supply.h>
+#endif
#include "hid-ids.h"
struct wacom_data {
__u16 tool;
unsigned char butstate;
+ unsigned char high_speed;
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ int battery_capacity;
+ struct power_supply battery;
+ struct power_supply ac;
+#endif
};
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+/*percent of battery capacity, 0 means AC online*/
+static unsigned short batcap[8] = { 1, 15, 25, 35, 50, 70, 100, 0 };
+
+static enum power_supply_property wacom_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY
+};
+
+static enum power_supply_property wacom_ac_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE
+};
+
+static int wacom_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom_data *wdata = container_of(psy,
+ struct wacom_data, battery);
+ int power_state = batcap[wdata->battery_capacity];
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ /* show 100% battery capacity when charging */
+ if (power_state == 0)
+ val->intval = 100;
+ else
+ val->intval = power_state;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int wacom_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom_data *wdata = container_of(psy, struct wacom_data, ac);
+ int power_state = batcap[wdata->battery_capacity];
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ /* fall through */
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (power_state == 0)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+#endif
+
+static void wacom_poke(struct hid_device *hdev, u8 speed)
+{
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+ int limit, ret;
+ char rep_data[2];
+
+ rep_data[0] = 0x03 ; rep_data[1] = 0x00;
+ limit = 3;
+ do {
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ } while (ret < 0 && limit-- > 0);
+
+ if (ret >= 0) {
+ if (speed == 0)
+ rep_data[0] = 0x05;
+ else
+ rep_data[0] = 0x06;
+
+ rep_data[1] = 0x00;
+ limit = 3;
+ do {
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ } while (ret < 0 && limit-- > 0);
+
+ if (ret >= 0) {
+ wdata->high_speed = speed;
+ return;
+ }
+ }
+
+ /*
+ * Note that if the raw queries fail, it's not a hard failure and it
+ * is safe to continue
+ */
+ dev_warn(&hdev->dev, "failed to poke device, command %d, err %d\n",
+ rep_data[0], ret);
+ return;
+}
+
+static ssize_t wacom_show_speed(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct wacom_data *wdata = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%i\n", wdata->high_speed);
+}
+
+static ssize_t wacom_store_speed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ int new_speed;
+
+ if (sscanf(buf, "%1d", &new_speed ) != 1)
+ return -EINVAL;
+
+ if (new_speed == 0 || new_speed == 1) {
+ wacom_poke(hdev, new_speed);
+ return strnlen(buf, PAGE_SIZE);
+ } else
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUGO,
+ wacom_show_speed, wacom_store_speed);
+
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
@@ -148,6 +293,12 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
input_sync(input);
}
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ /* Store current battery capacity */
+ rw = (data[7] >> 2 & 0x07);
+ if (rw != wdata->battery_capacity)
+ wdata->battery_capacity = rw;
+#endif
return 1;
}
@@ -157,9 +308,7 @@ static int wacom_probe(struct hid_device *hdev,
struct hid_input *hidinput;
struct input_dev *input;
struct wacom_data *wdata;
- char rep_data[2];
int ret;
- int limit;
wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
if (wdata == NULL) {
@@ -182,31 +331,53 @@ static int wacom_probe(struct hid_device *hdev,
goto err_free;
}
- /*
- * Note that if the raw queries fail, it's not a hard failure and it
- * is safe to continue
- */
+ ret = device_create_file(&hdev->dev, &dev_attr_speed);
+ if (ret)
+ dev_warn(&hdev->dev,
+ "can't create sysfs speed attribute err: %d\n", ret);
- /* Set Wacom mode2 */
- rep_data[0] = 0x03; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
- } while (ret < 0 && limit-- > 0);
- if (ret < 0)
- dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret);
+ /* Set Wacom mode 2 with high reporting speed */
+ wacom_poke(hdev, 1);
- /* 0x06 - high reporting speed, 0x05 - low speed */
- rep_data[0] = 0x06; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
- } while (ret < 0 && limit-- > 0);
- if (ret < 0)
- dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret);
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ wdata->battery.properties = wacom_battery_props;
+ wdata->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
+ wdata->battery.get_property = wacom_battery_get_property;
+ wdata->battery.name = "wacom_battery";
+ wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ wdata->battery.use_for_apm = 0;
+ ret = power_supply_register(&hdev->dev, &wdata->battery);
+ if (ret) {
+ dev_warn(&hdev->dev,
+ "can't create sysfs battery attribute, err: %d\n", ret);
+ /*
+ * battery attribute is not critical for the tablet, but if it
+ * failed then there is no need to create ac attribute
+ */
+ goto move_on;
+ }
+
+ wdata->ac.properties = wacom_ac_props;
+ wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
+ wdata->ac.get_property = wacom_ac_get_property;
+ wdata->ac.name = "wacom_ac";
+ wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ wdata->ac.use_for_apm = 0;
+
+ ret = power_supply_register(&hdev->dev, &wdata->ac);
+ if (ret) {
+ dev_warn(&hdev->dev,
+ "can't create ac battery attribute, err: %d\n", ret);
+ /*
+ * ac attribute is not critical for the tablet, but if it
+ * failed then we don't want to battery attribute to exist
+ */
+ power_supply_unregister(&wdata->battery);
+ }
+
+move_on:
+#endif
hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
input = hidinput->input;
@@ -251,13 +422,21 @@ err_free:
static void wacom_remove(struct hid_device *hdev)
{
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+#endif
hid_hw_stop(hdev);
+
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ power_supply_unregister(&wdata->battery);
+ power_supply_unregister(&wdata->ac);
+#endif
kfree(hid_get_drvdata(hdev));
}
static const struct hid_device_id wacom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
-
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ }
};
MODULE_DEVICE_TABLE(hid, wacom_devices);
@@ -277,7 +456,6 @@ static int __init wacom_init(void)
ret = hid_register_driver(&wacom_driver);
if (ret)
printk(KERN_ERR "can't register wacom driver\n");
- printk(KERN_ERR "wacom driver registered\n");
return ret;
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 6eadf1a9b3cc..3ccd47850677 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -106,38 +106,48 @@ out:
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
- /* FIXME: What stops hidraw_table going NULL */
- struct hid_device *dev = hidraw_table[minor]->hid;
+ struct hid_device *dev;
__u8 *buf;
int ret = 0;
- if (!dev->hid_output_raw_report)
- return -ENODEV;
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor]->hid;
+
+ if (!dev->hid_output_raw_report) {
+ ret = -ENODEV;
+ goto out;
+ }
if (count > HID_MAX_BUFFER_SIZE) {
printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
task_pid_nr(current));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (count < 2) {
printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
task_pid_nr(current));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (copy_from_user(buf, buffer, count)) {
ret = -EFAULT;
- goto out;
+ goto out_free;
}
ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT);
-out:
+out_free:
kfree(buf);
+out:
+ mutex_unlock(&minors_lock);
return ret;
}
@@ -165,11 +175,8 @@ static int hidraw_open(struct inode *inode, struct file *file)
goto out;
}
- lock_kernel();
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
- printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
- minor);
kfree(list);
err = -ENODEV;
goto out_unlock;
@@ -197,7 +204,6 @@ static int hidraw_open(struct inode *inode, struct file *file)
out_unlock:
mutex_unlock(&minors_lock);
- unlock_kernel();
out:
return err;
@@ -209,11 +215,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
struct hidraw *dev;
struct hidraw_list *list = file->private_data;
- if (!hidraw_table[minor]) {
- printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
- minor);
+ if (!hidraw_table[minor])
return -ENODEV;
- }
list_del(&list->node);
dev = hidraw_table[minor];
@@ -238,11 +241,12 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
struct inode *inode = file->f_path.dentry->d_inode;
unsigned int minor = iminor(inode);
long ret = 0;
- /* FIXME: What stops hidraw_table going NULL */
- struct hidraw *dev = hidraw_table[minor];
+ struct hidraw *dev;
void __user *user_arg = (void __user*) arg;
- lock_kernel();
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor];
+
switch (cmd) {
case HIDIOCGRDESCSIZE:
if (put_user(dev->hid->rsize, (int __user *)arg))
@@ -311,11 +315,11 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
- }
+ }
ret = -ENOTTY;
}
- unlock_kernel();
+ mutex_unlock(&minors_lock);
return ret;
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 56d06cd8075b..8893affa5dd4 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -623,6 +623,7 @@ int usbhid_wait_io(struct hid_device *hid)
return 0;
}
+EXPORT_SYMBOL_GPL(usbhid_wait_io);
static int hid_set_idle(struct usb_device *dev, int ifnum, int report, int idle)
{
@@ -999,13 +1000,6 @@ static int usbhid_start(struct hid_device *hid)
}
}
- init_waitqueue_head(&usbhid->wait);
- INIT_WORK(&usbhid->reset_work, hid_reset);
- INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
- setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
-
- spin_lock_init(&usbhid->lock);
-
usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
if (!usbhid->urbctrl) {
ret = -ENOMEM;
@@ -1026,12 +1020,15 @@ static int usbhid_start(struct hid_device *hid)
/* Some keyboards don't work until their LEDs have been set.
* Since BIOSes do set the LEDs, it must be safe for any device
* that supports the keyboard boot protocol.
+ * In addition, enable remote wakeup by default for all keyboard
+ * devices supporting the boot protocol.
*/
if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT &&
interface->desc.bInterfaceProtocol ==
- USB_INTERFACE_PROTOCOL_KEYBOARD)
+ USB_INTERFACE_PROTOCOL_KEYBOARD) {
usbhid_set_leds(hid);
-
+ device_set_wakeup_enable(&dev->dev, 1);
+ }
return 0;
fail:
@@ -1140,6 +1137,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
hid->name[0] = 0;
+ hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
@@ -1179,6 +1177,12 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
usbhid->intf = intf;
usbhid->ifnum = interface->desc.bInterfaceNumber;
+ init_waitqueue_head(&usbhid->wait);
+ INIT_WORK(&usbhid->reset_work, hid_reset);
+ INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
+ setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
+ spin_lock_init(&usbhid->lock);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1290,6 +1294,11 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
+ if (hid->driver && hid->driver->suspend) {
+ status = hid->driver->suspend(hid, message);
+ if (status < 0)
+ return status;
+ }
} else {
usbhid_mark_busy(usbhid);
spin_unlock_irq(&usbhid->lock);
@@ -1297,6 +1306,11 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
}
} else {
+ if (hid->driver && hid->driver->suspend) {
+ status = hid->driver->suspend(hid, message);
+ if (status < 0)
+ return status;
+ }
spin_lock_irq(&usbhid->lock);
set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
@@ -1351,6 +1365,11 @@ static int hid_resume(struct usb_interface *intf)
hid_io_error(hid);
usbhid_restart_queues(usbhid);
+ if (status >= 0 && hid->driver && hid->driver->resume) {
+ int ret = hid->driver->resume(hid);
+ if (ret < 0)
+ status = ret;
+ }
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
}
@@ -1359,9 +1378,16 @@ static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
+ int status;
clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
- return hid_post_reset(intf);
+ status = hid_post_reset(intf);
+ if (status >= 0 && hid->driver && hid->driver->reset_resume) {
+ int ret = hid->driver->reset_resume(hid);
+ if (ret < 0)
+ status = ret;
+ }
+ return status;
}
#endif /* CONFIG_PM */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1152f9b5fd44..dfe0b38cf468 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 433602aed468..c24d2fa3e3b6 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -267,6 +267,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
struct hiddev_list *list;
int res, i;
+ /* See comment in hiddev_connect() for BKL explanation */
lock_kernel();
i = iminor(inode) - HIDDEV_MINOR_BASE;
@@ -894,8 +895,22 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hiddev->hid = hid;
hiddev->exist = 1;
- /* when lock_kernel() usage is fixed in usb_open(),
- * we could also fix it here */
+ /*
+ * BKL here is used to avoid race after usb_register_dev().
+ * Once the device node has been created, open() could happen on it.
+ * The code below will then fail, as hiddev_table hasn't been
+ * updated.
+ *
+ * The obvious fix -- introducing mutex to guard hiddev_table[]
+ * doesn't work, as usb_open() and usb_register_dev() both take
+ * minor_rwsem, thus we'll have ABBA deadlock.
+ *
+ * Before BKL pushdown, usb_open() had been acquiring it in right
+ * order, so _open() was safe to use it to protect from this race.
+ * Now the order is different, but AB-BA deadlock still doesn't occur
+ * as BKL is dropped on schedule() (i.e. while sleeping on
+ * minor_rwsem). Fugly.
+ */
lock_kernel();
retval = usb_register_dev(usbhid->intf, &hiddev_class);
if (retval) {
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index f843443ba5c3..b2fd0b00de92 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -313,6 +313,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
goto fail2;
usb_set_intfdata(iface, kbd);
+ device_set_wakeup_enable(&dev->dev, 1);
return 0;
fail2:
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9be8e1754a0b..0bd32069b82f 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -447,13 +447,14 @@ config SENSORS_IT87
will be called it87.
config SENSORS_LM63
- tristate "National Semiconductor LM63"
+ tristate "National Semiconductor LM63 and LM64"
depends on I2C
help
- If you say yes here you get support for the National Semiconductor
- LM63 remote diode digital temperature sensor with integrated fan
- control. Such chips are found on the Tyan S4882 (Thunder K8QS Pro)
- motherboard, among others.
+ If you say yes here you get support for the National
+ Semiconductor LM63 and LM64 remote diode digital temperature
+ sensors with integrated fan control. Such chips are found
+ on the Tyan S4882 (Thunder K8QS Pro) motherboard, among
+ others.
This driver can also be built as a module. If so, the module
will be called lm63.
@@ -749,6 +750,16 @@ config SENSORS_DME1737
This driver can also be built as a module. If so, the module
will be called dme1737.
+config SENSORS_EMC1403
+ tristate "SMSC EMC1403 thermal sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the SMSC EMC1403
+ temperature monitoring chip.
+
+ Threshold values can be configured using sysfs.
+ Data from the different diodes are accessible via sysfs.
+
config SENSORS_SMSC47M1
tristate "SMSC LPC47M10x and compatibles"
help
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 4aa1a3d112ad..1e6b08301d19 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
+obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 1644b92e7cc4..15c1a9616af3 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -36,6 +36,7 @@
#define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr))
#define ADM1031_REG_PWM (0x22)
#define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr))
+#define ADM1031_REG_FAN_FILTER (0x23)
#define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr))
#define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr))
@@ -61,6 +62,9 @@
#define ADM1031_CONF2_TACH2_ENABLE 0x08
#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan))
+#define ADM1031_UPDATE_RATE_MASK 0x1c
+#define ADM1031_UPDATE_RATE_SHIFT 2
+
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
@@ -75,6 +79,7 @@ struct adm1031_data {
int chip_type;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
+ unsigned int update_rate; /* In milliseconds */
/* The chan_select_table contains the possible configurations for
* auto fan control.
*/
@@ -738,6 +743,57 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+/* Update Rate */
+static const unsigned int update_rates[] = {
+ 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
+};
+
+static ssize_t show_update_rate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%u\n", data->update_rate);
+}
+
+static ssize_t set_update_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int i, err;
+ u8 reg;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ /* find the nearest update rate from the table */
+ for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
+ if (val >= update_rates[i])
+ break;
+ }
+ /* if not found, we point to the last entry (lowest update rate) */
+
+ /* set the new update rate while preserving other settings */
+ reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
+ reg &= ~ADM1031_UPDATE_RATE_MASK;
+ reg |= i << ADM1031_UPDATE_RATE_SHIFT;
+ adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
+
+ mutex_lock(&data->update_lock);
+ data->update_rate = update_rates[i];
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
+ set_update_rate);
+
static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
@@ -774,6 +830,7 @@ static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
+ &dev_attr_update_rate.attr,
&dev_attr_alarms.attr,
NULL
@@ -900,6 +957,7 @@ static void adm1031_init_client(struct i2c_client *client)
{
unsigned int read_val;
unsigned int mask;
+ int i;
struct adm1031_data *data = i2c_get_clientdata(client);
mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE);
@@ -919,18 +977,24 @@ static void adm1031_init_client(struct i2c_client *client)
ADM1031_CONF1_MONITOR_ENABLE);
}
+ /* Read the chip's update rate */
+ mask = ADM1031_UPDATE_RATE_MASK;
+ read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
+ i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
+ data->update_rate = update_rates[i];
}
static struct adm1031_data *adm1031_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
+ unsigned long next_update;
int chan;
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
+ next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+ if (time_after(jiffies, next_update) || !data->valid) {
dev_dbg(&client->dev, "Starting adm1031 update\n");
for (chan = 0;
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0f28d91f29d8..b6598aa557a0 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -148,6 +148,20 @@ static const char *temperature_sensors_sets[][41] = {
/* Set 18: MacBook Pro 2,2 */
{ "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
"Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
+/* Set 19: Macbook Pro 5,3 */
+ { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
+ "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H",
+ "Tm0P", "Ts0P", "Ts0S", NULL },
+/* Set 20: MacBook Pro 5,4 */
+ { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D",
+ "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL },
+/* Set 21: MacBook Pro 6,2 */
+ { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D",
+ "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P",
+ "Ts0P", "Ts0S", NULL },
+/* Set 22: MacBook Pro 7,1 */
+ { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
+ "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
};
/* List of keys used to read/write fan speeds */
@@ -195,6 +209,9 @@ static unsigned int applesmc_accelerometer;
/* Indicates whether this computer has light sensors and keyboard backlight. */
static unsigned int applesmc_light;
+/* The number of fans handled by the driver */
+static unsigned int fans_handled;
+
/* Indicates which temperature sensors set to use. */
static unsigned int applesmc_temperature_set;
@@ -643,6 +660,17 @@ out:
return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right);
}
+/* Displays sensor key as label */
+static ssize_t applesmc_show_sensor_label(struct device *dev,
+ struct device_attribute *devattr, char *sysfsbuf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ const char *key =
+ temperature_sensors_sets[applesmc_temperature_set][attr->index];
+
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
+}
+
/* Displays degree Celsius * 1000 */
static ssize_t applesmc_show_temperature(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
@@ -1110,6 +1138,86 @@ static const struct attribute_group fan_attribute_groups[] = {
/*
* Temperature sensors sysfs entries.
*/
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 15);
+static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 16);
+static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 17);
+static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 18);
+static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 19);
+static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 20);
+static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 21);
+static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 22);
+static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 23);
+static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 24);
+static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 25);
+static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 26);
+static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 27);
+static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 28);
+static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 29);
+static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 30);
+static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 31);
+static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 32);
+static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 33);
+static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 34);
+static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 35);
+static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 36);
+static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 37);
+static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 38);
+static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 39);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
applesmc_show_temperature, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
@@ -1191,6 +1299,50 @@ static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
applesmc_show_temperature, NULL, 39);
+static struct attribute *label_attributes[] = {
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ &sensor_dev_attr_temp5_label.dev_attr.attr,
+ &sensor_dev_attr_temp6_label.dev_attr.attr,
+ &sensor_dev_attr_temp7_label.dev_attr.attr,
+ &sensor_dev_attr_temp8_label.dev_attr.attr,
+ &sensor_dev_attr_temp9_label.dev_attr.attr,
+ &sensor_dev_attr_temp10_label.dev_attr.attr,
+ &sensor_dev_attr_temp11_label.dev_attr.attr,
+ &sensor_dev_attr_temp12_label.dev_attr.attr,
+ &sensor_dev_attr_temp13_label.dev_attr.attr,
+ &sensor_dev_attr_temp14_label.dev_attr.attr,
+ &sensor_dev_attr_temp15_label.dev_attr.attr,
+ &sensor_dev_attr_temp16_label.dev_attr.attr,
+ &sensor_dev_attr_temp17_label.dev_attr.attr,
+ &sensor_dev_attr_temp18_label.dev_attr.attr,
+ &sensor_dev_attr_temp19_label.dev_attr.attr,
+ &sensor_dev_attr_temp20_label.dev_attr.attr,
+ &sensor_dev_attr_temp21_label.dev_attr.attr,
+ &sensor_dev_attr_temp22_label.dev_attr.attr,
+ &sensor_dev_attr_temp23_label.dev_attr.attr,
+ &sensor_dev_attr_temp24_label.dev_attr.attr,
+ &sensor_dev_attr_temp25_label.dev_attr.attr,
+ &sensor_dev_attr_temp26_label.dev_attr.attr,
+ &sensor_dev_attr_temp27_label.dev_attr.attr,
+ &sensor_dev_attr_temp28_label.dev_attr.attr,
+ &sensor_dev_attr_temp29_label.dev_attr.attr,
+ &sensor_dev_attr_temp30_label.dev_attr.attr,
+ &sensor_dev_attr_temp31_label.dev_attr.attr,
+ &sensor_dev_attr_temp32_label.dev_attr.attr,
+ &sensor_dev_attr_temp33_label.dev_attr.attr,
+ &sensor_dev_attr_temp34_label.dev_attr.attr,
+ &sensor_dev_attr_temp35_label.dev_attr.attr,
+ &sensor_dev_attr_temp36_label.dev_attr.attr,
+ &sensor_dev_attr_temp37_label.dev_attr.attr,
+ &sensor_dev_attr_temp38_label.dev_attr.attr,
+ &sensor_dev_attr_temp39_label.dev_attr.attr,
+ &sensor_dev_attr_temp40_label.dev_attr.attr,
+ NULL
+};
+
static struct attribute *temperature_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
@@ -1238,6 +1390,10 @@ static struct attribute *temperature_attributes[] = {
static const struct attribute_group temperature_attributes_group =
{ .attrs = temperature_attributes };
+static const struct attribute_group label_attributes_group = {
+ .attrs = label_attributes
+};
+
/* Module stuff */
/*
@@ -1360,6 +1516,14 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 0, .light = 0, .temperature_set = 17 },
/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
{ .accelerometer = 1, .light = 1, .temperature_set = 18 },
+/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 19 },
+/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 20 },
+/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 21 },
+/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 22 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1373,6 +1537,22 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
&applesmc_dmi_data[7]},
+ { applesmc_dmi_match, "Apple MacBook Pro 7", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") },
+ &applesmc_dmi_data[22]},
+ { applesmc_dmi_match, "Apple MacBook Pro 5,4", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") },
+ &applesmc_dmi_data[20]},
+ { applesmc_dmi_match, "Apple MacBook Pro 5,3", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") },
+ &applesmc_dmi_data[19]},
+ { applesmc_dmi_match, "Apple MacBook Pro 6", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") },
+ &applesmc_dmi_data[21]},
{ applesmc_dmi_match, "Apple MacBook Pro 5", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
@@ -1492,45 +1672,31 @@ static int __init applesmc_init(void)
/* create fan files */
count = applesmc_get_fan_count();
- if (count < 0) {
+ if (count < 0)
printk(KERN_ERR "applesmc: Cannot get the number of fans.\n");
- } else {
+ else
printk(KERN_INFO "applesmc: %d fans found.\n", count);
- switch (count) {
- default:
- printk(KERN_WARNING "applesmc: More than 4 fans found,"
- " but at most 4 fans are supported"
- " by the driver.\n");
- case 4:
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[3]);
- if (ret)
- goto out_key_enumeration;
- case 3:
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[2]);
- if (ret)
- goto out_key_enumeration;
- case 2:
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[1]);
- if (ret)
- goto out_key_enumeration;
- case 1:
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[0]);
- if (ret)
- goto out_fan_1;
- case 0:
- ;
- }
+ if (count > 4) {
+ count = 4;
+ printk(KERN_WARNING "applesmc: More than 4 fans found,"
+ " but at most 4 fans are supported"
+ " by the driver.\n");
+ }
+
+ while (fans_handled < count) {
+ ret = sysfs_create_group(&pdev->dev.kobj,
+ &fan_attribute_groups[fans_handled]);
+ if (ret)
+ goto out_fans;
+ fans_handled++;
}
for (i = 0;
temperature_sensors_sets[applesmc_temperature_set][i] != NULL;
i++) {
- if (temperature_attributes[i] == NULL) {
+ if (temperature_attributes[i] == NULL ||
+ label_attributes[i] == NULL) {
printk(KERN_ERR "applesmc: More temperature sensors "
"in temperature_sensors_sets (at least %i)"
"than available sysfs files in "
@@ -1542,6 +1708,10 @@ static int __init applesmc_init(void)
temperature_attributes[i]);
if (ret)
goto out_temperature;
+ ret = sysfs_create_file(&pdev->dev.kobj,
+ label_attributes[i]);
+ if (ret)
+ goto out_temperature;
}
if (applesmc_accelerometer) {
@@ -1592,11 +1762,12 @@ out_accelerometer:
if (applesmc_accelerometer)
applesmc_release_accelerometer();
out_temperature:
+ sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
- sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]);
-out_fan_1:
- sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]);
-out_key_enumeration:
+out_fans:
+ while (fans_handled)
+ sysfs_remove_group(&pdev->dev.kobj,
+ &fan_attribute_groups[--fans_handled]);
sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
out_name:
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
@@ -1621,9 +1792,11 @@ static void __exit applesmc_exit(void)
}
if (applesmc_accelerometer)
applesmc_release_accelerometer();
+ sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
- sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]);
- sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]);
+ while (fans_handled)
+ sysfs_remove_group(&pdev->dev.kobj,
+ &fan_attribute_groups[--fans_handled]);
sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
platform_device_unregister(pdev);
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 7f948105d8ad..0f388adc6187 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -268,8 +268,11 @@ static ssize_t store_fan16(struct device *dev,
if (strict_strtol(buf, 10, &reqval))
return -EINVAL;
+ /* If a minimum RPM of zero is requested, then we set the register to
+ 0xffff. This value allows the fan to be stopped completely without
+ generating an alarm. */
reqval =
- (SENSORS_LIMIT((reqval) <= 0 ? 0 : 5400000 / (reqval), 0, 65534));
+ (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe));
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
@@ -285,8 +288,9 @@ static ssize_t store_fan16(struct device *dev,
* Voltages are scaled in the device so that the nominal voltage
* is 3/4ths of the 0-255 range (i.e. 192).
* If all voltages are 'normal' then all voltage registers will
- * read 0xC0. This doesn't help us if we don't have a point of refernce.
- * The data sheet however provides us with the full scale value for each
+ * read 0xC0.
+ *
+ * The data sheet provides us with the 3/4 scale value for each voltage
* which is stored in in_scaling. The sda->index parameter value provides
* the index into in_scaling.
*
@@ -295,7 +299,7 @@ static ssize_t store_fan16(struct device *dev,
*/
static int asc7621_in_scaling[] = {
- 3320, 3000, 4380, 6640, 16000
+ 2500, 2250, 3300, 5000, 12000
};
static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
@@ -306,19 +310,12 @@ static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
u8 nr = sda->index;
mutex_lock(&data->update_lock);
- regval = (data->reg[param->msb[0]] * asc7621_in_scaling[nr]) / 256;
-
- /* The LSB value is a 2-bit scaling of the MSB's LSbit value.
- * I.E. If the maximim voltage for this input is 6640 millivolts then
- * a MSB register value of 0 = 0mv and 255 = 6640mv.
- * A 1 step change therefore represents 25.9mv (6640 / 256).
- * The extra 2-bits therefore represent increments of 6.48mv.
- */
- regval += ((asc7621_in_scaling[nr] / 256) / 4) *
- (data->reg[param->lsb[0]] >> 6);
-
+ regval = (data->reg[param->msb[0]] << 8) | (data->reg[param->lsb[0]]);
mutex_unlock(&data->update_lock);
+ /* The LSB value is a 2-bit scaling of the MSB's LSbit value. */
+ regval = (regval >> 6) * asc7621_in_scaling[nr] / (0xc0 << 2);
+
return sprintf(buf, "%u\n", regval);
}
@@ -331,7 +328,7 @@ static ssize_t show_in8(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n",
((data->reg[param->msb[0]] *
- asc7621_in_scaling[nr]) / 256));
+ asc7621_in_scaling[nr]) / 0xc0));
}
static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
@@ -344,9 +341,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
if (strict_strtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, 0, asc7621_in_scaling[nr]);
+ reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
+
+ reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
- reqval = (reqval * 255 + 128) / asc7621_in_scaling[nr];
+ reqval = SENSORS_LIMIT(reqval, 0, 0xff);
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = reqval;
@@ -846,11 +845,11 @@ static struct asc7621_param asc7621_params[] = {
PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8),
PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8),
- PREAD(in0_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 0, bitmask),
- PREAD(in1_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 1, bitmask),
- PREAD(in2_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 2, bitmask),
- PREAD(in3_alarm, 3, PRI_LOW, 0x41, 0, 0x01, 3, bitmask),
- PREAD(in4_alarm, 4, PRI_LOW, 0x42, 0, 0x01, 0, bitmask),
+ PREAD(in0_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 0, bitmask),
+ PREAD(in1_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 1, bitmask),
+ PREAD(in2_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 2, bitmask),
+ PREAD(in3_alarm, 3, PRI_HIGH, 0x41, 0, 0x01, 3, bitmask),
+ PREAD(in4_alarm, 4, PRI_HIGH, 0x42, 0, 0x01, 0, bitmask),
PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16),
PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16),
@@ -862,10 +861,10 @@ static struct asc7621_param asc7621_params[] = {
PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16),
PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16),
- PREAD(fan1_alarm, 0, PRI_LOW, 0x42, 0, 0x01, 0, bitmask),
- PREAD(fan2_alarm, 1, PRI_LOW, 0x42, 0, 0x01, 1, bitmask),
- PREAD(fan3_alarm, 2, PRI_LOW, 0x42, 0, 0x01, 2, bitmask),
- PREAD(fan4_alarm, 3, PRI_LOW, 0x42, 0, 0x01, 3, bitmask),
+ PREAD(fan1_alarm, 0, PRI_HIGH, 0x42, 0, 0x01, 2, bitmask),
+ PREAD(fan2_alarm, 1, PRI_HIGH, 0x42, 0, 0x01, 3, bitmask),
+ PREAD(fan3_alarm, 2, PRI_HIGH, 0x42, 0, 0x01, 4, bitmask),
+ PREAD(fan4_alarm, 3, PRI_HIGH, 0x42, 0, 0x01, 5, bitmask),
PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10),
PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10),
@@ -886,10 +885,10 @@ static struct asc7621_param asc7621_params[] = {
PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8),
PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8),
- PREAD(temp1_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 4, bitmask),
- PREAD(temp2_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 5, bitmask),
- PREAD(temp3_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 6, bitmask),
- PREAD(temp4_alarm, 3, PRI_LOW, 0x43, 0, 0x01, 0, bitmask),
+ PREAD(temp1_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 4, bitmask),
+ PREAD(temp2_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 5, bitmask),
+ PREAD(temp3_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 6, bitmask),
+ PREAD(temp4_alarm, 3, PRI_HIGH, 0x43, 0, 0x01, 0, bitmask),
PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask),
PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask),
@@ -898,7 +897,7 @@ static struct asc7621_param asc7621_params[] = {
PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask),
PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask),
- PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x64, 0, 0x01, 3, bitmask),
+ PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x63, 0, 0x01, 3, bitmask),
PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask),
PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st),
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 16c420240724..653db1bda934 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1411,6 +1411,13 @@ static int __init atk0110_init(void)
{
int ret;
+ /* Make sure it's safe to access the device through ACPI */
+ if (!acpi_resources_are_enforced()) {
+ pr_err("atk: Resources not safely usable due to "
+ "acpi_enforce_resources kernel parameter\n");
+ return -EBUSY;
+ }
+
ret = acpi_bus_register_driver(&atk_driver);
if (ret)
pr_info("atk: acpi_bus_register_driver failed: %d\n", ret);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
new file mode 100644
index 000000000000..28cca347ccaa
--- /dev/null
+++ b/drivers/hwmon/emc1403.c
@@ -0,0 +1,344 @@
+/*
+ * emc1403.c - SMSC Thermal Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * TODO
+ * - cache alarm and critical limit registers
+ * - add emc1404 support
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/mutex.h>
+
+#define THERMAL_PID_REG 0xfd
+#define THERMAL_SMSC_ID_REG 0xfe
+#define THERMAL_REVISION_REG 0xff
+
+struct thermal_data {
+ struct device *hwmon_dev;
+ struct mutex mutex;
+ /* Cache the hyst value so we don't keep re-reading it. In theory
+ we could cache it forever as nobody else should be writing it. */
+ u8 cached_hyst;
+ unsigned long hyst_valid;
+};
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval = i2c_smbus_read_byte_data(client, sda->index);
+
+ if (retval < 0)
+ return retval;
+ return sprintf(buf, "%d000\n", retval);
+}
+
+static ssize_t show_bit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+ int retval = i2c_smbus_read_byte_data(client, sda->nr);
+
+ if (retval < 0)
+ return retval;
+ retval &= sda->index;
+ return sprintf(buf, "%d\n", retval ? 1 : 0);
+}
+
+static ssize_t store_temp(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ int retval;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ retval = i2c_smbus_write_byte_data(client, sda->index,
+ DIV_ROUND_CLOSEST(val, 1000));
+ if (retval < 0)
+ return retval;
+ return count;
+}
+
+static ssize_t show_hyst(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval;
+ int hyst;
+
+ retval = i2c_smbus_read_byte_data(client, sda->index);
+ if (retval < 0)
+ return retval;
+
+ if (time_after(jiffies, data->hyst_valid)) {
+ hyst = i2c_smbus_read_byte_data(client, 0x21);
+ if (hyst < 0)
+ return retval;
+ data->cached_hyst = hyst;
+ data->hyst_valid = jiffies + HZ;
+ }
+ return sprintf(buf, "%d000\n", retval - data->cached_hyst);
+}
+
+static ssize_t store_hyst(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval;
+ int hyst;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ retval = i2c_smbus_read_byte_data(client, sda->index);
+ if (retval < 0)
+ goto fail;
+
+ hyst = val - retval * 1000;
+ hyst = DIV_ROUND_CLOSEST(hyst, 1000);
+ if (hyst < 0 || hyst > 255) {
+ retval = -ERANGE;
+ goto fail;
+ }
+
+ retval = i2c_smbus_write_byte_data(client, 0x21, hyst);
+ if (retval == 0) {
+ retval = count;
+ data->cached_hyst = hyst;
+ data->hyst_valid = jiffies + HZ;
+ }
+fail:
+ mutex_unlock(&data->mutex);
+ return retval;
+}
+
+/*
+ * Sensors. We pass the actual i2c register to the methods.
+ */
+
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x06);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x05);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x20);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00);
+static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x01);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x20);
+
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x08);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x07);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x19);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x02);
+static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x02);
+static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x02);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x19);
+
+static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x16);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x15);
+static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x1A);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23);
+static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x04);
+static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x04);
+static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x04);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x1A);
+
+static struct attribute *mid_att_thermal[] = {
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group m_thermal_gr = {
+ .attrs = mid_att_thermal
+};
+
+static int emc1403_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ int id;
+ /* Check if thermal chip is SMSC and EMC1403 */
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
+ if (id != 0x5d)
+ return -ENODEV;
+
+ /* Note: 0x25 is the 1404 which is very similar and this
+ driver could be extended */
+ id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
+ if (id != 0x21)
+ return -ENODEV;
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
+ if (id != 0x01)
+ return -ENODEV;
+
+ strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int emc1403_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+ struct thermal_data *data;
+
+ data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_warn(&client->dev, "out of memory");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->mutex);
+ data->hyst_valid = jiffies - 1; /* Expired */
+
+ res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
+ if (res) {
+ dev_warn(&client->dev, "create group failed\n");
+ hwmon_device_unregister(data->hwmon_dev);
+ goto thermal_error1;
+ }
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ res = PTR_ERR(data->hwmon_dev);
+ dev_warn(&client->dev, "register hwmon dev failed\n");
+ goto thermal_error2;
+ }
+ dev_info(&client->dev, "EMC1403 Thermal chip found\n");
+ return res;
+
+thermal_error2:
+ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+thermal_error1:
+ kfree(data);
+ return res;
+}
+
+static int emc1403_remove(struct i2c_client *client)
+{
+ struct thermal_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+ kfree(data);
+ return 0;
+}
+
+static const unsigned short emc1403_address_list[] = {
+ 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+};
+
+const static struct i2c_device_id emc1403_idtable[] = {
+ { "emc1403", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
+
+static struct i2c_driver sensor_emc1403 = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "emc1403",
+ },
+ .detect = emc1403_detect,
+ .probe = emc1403_probe,
+ .remove = emc1403_remove,
+ .id_table = emc1403_idtable,
+ .address_list = emc1403_address_list,
+};
+
+static int __init sensor_emc1403_init(void)
+{
+ return i2c_add_driver(&sensor_emc1403);
+}
+
+static void __exit sensor_emc1403_exit(void)
+{
+ i2c_del_driver(&sensor_emc1403);
+}
+
+module_init(sensor_emc1403_init);
+module_exit(sensor_emc1403_exit);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
+MODULE_DESCRIPTION("emc1403 Thermal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a95fa4256caa..537841ef44b9 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -856,21 +856,19 @@ static inline int superio_inb(int base, int reg)
static int superio_inw(int base, int reg)
{
int val;
- outb(reg++, base);
- val = inb(base + 1) << 8;
- outb(reg, base);
- val |= inb(base + 1);
+ val = superio_inb(base, reg) << 8;
+ val |= superio_inb(base, reg + 1);
return val;
}
static inline void superio_enter(int base)
{
/* according to the datasheet the key must be send twice! */
- outb( SIO_UNLOCK_KEY, base);
- outb( SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
}
-static inline void superio_select( int base, int ld)
+static inline void superio_select(int base, int ld)
{
outb(SIO_REG_LDSEL, base);
outb(ld, base + 1);
@@ -905,10 +903,8 @@ static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
{
u16 val;
- outb(reg++, data->addr + ADDR_REG_OFFSET);
- val = inb(data->addr + DATA_REG_OFFSET) << 8;
- outb(reg, data->addr + ADDR_REG_OFFSET);
- val |= inb(data->addr + DATA_REG_OFFSET);
+ val = f71882fg_read8(data, reg) << 8;
+ val |= f71882fg_read8(data, reg + 1);
return val;
}
@@ -921,10 +917,8 @@ static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
{
- outb(reg++, data->addr + ADDR_REG_OFFSET);
- outb(val >> 8, data->addr + DATA_REG_OFFSET);
- outb(reg, data->addr + ADDR_REG_OFFSET);
- outb(val & 255, data->addr + DATA_REG_OFFSET);
+ f71882fg_write8(data, reg, val >> 8);
+ f71882fg_write8(data, reg + 1, val & 0xff);
}
static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
@@ -945,7 +939,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
mutex_lock(&data->update_lock);
/* Update once every 60 seconds */
- if ( time_after(jiffies, data->last_limits + 60 * HZ ) ||
+ if (time_after(jiffies, data->last_limits + 60 * HZ) ||
!data->valid) {
if (data->type == f71882fg || data->type == f71889fg) {
data->in1_max =
@@ -1127,8 +1121,12 @@ static ssize_t store_fan_full_speed(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
val = SENSORS_LIMIT(val, 23, 1500000);
val = fan_to_reg(val);
@@ -1157,8 +1155,12 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
@@ -1206,7 +1208,14 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- long val = simple_strtol(buf, NULL, 10) / 8;
+ int err;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 8;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1233,8 +1242,12 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
@@ -1299,8 +1312,14 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1333,10 +1352,16 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
ssize_t ret = count;
u8 reg;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
mutex_lock(&data->update_lock);
@@ -1372,8 +1397,14 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1427,8 +1458,12 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
@@ -1490,8 +1525,13 @@ static ssize_t store_pwm(struct device *dev,
size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1551,8 +1591,12 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
/* Special case for F8000 pwm channel 3 which only does auto mode */
if (data->type == f8000 && nr == 2 && val != 2)
@@ -1626,9 +1670,14 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1674,10 +1723,16 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10) / 1000;
u8 reg;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
mutex_lock(&data->update_lock);
data->pwm_auto_point_temp[nr][point] =
@@ -1716,8 +1771,12 @@ static ssize_t store_pwm_interpolate(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->pwm_auto_point_mapping[nr] =
@@ -1752,8 +1811,12 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
switch (val) {
case 1:
@@ -1798,9 +1861,15 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
if (data->type == f71889fg)
val = SENSORS_LIMIT(val, -128, 127);
@@ -2109,6 +2178,13 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
int err = -ENODEV;
u16 devid;
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_region(sioaddr, 2, DRVNAME)) {
+ printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+ (int)sioaddr);
+ return -EBUSY;
+ }
+
superio_enter(sioaddr);
devid = superio_inw(sioaddr, SIO_REG_MANID);
@@ -2151,8 +2227,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
}
*address = superio_inw(sioaddr, SIO_REG_ADDR);
- if (*address == 0)
- {
+ if (*address == 0) {
printk(KERN_WARNING DRVNAME ": Base address not set\n");
goto exit;
}
@@ -2164,6 +2239,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
(int)superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
superio_exit(sioaddr);
+ release_region(sioaddr, 2);
return err;
}
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index bf81aff7051d..776aeb3019d2 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -53,7 +53,7 @@
* Address is fully defined internally and cannot be changed.
*/
-static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
+static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
/*
* The LM63 registers
@@ -131,12 +131,15 @@ static struct lm63_data *lm63_update_device(struct device *dev);
static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm63_init_client(struct i2c_client *client);
+enum chips { lm63, lm64 };
+
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id lm63_id[] = {
- { "lm63", 0 },
+ { "lm63", lm63 },
+ { "lm64", lm64 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -422,6 +425,7 @@ static int lm63_detect(struct i2c_client *new_client,
struct i2c_adapter *adapter = new_client->adapter;
u8 man_id, chip_id, reg_config1, reg_config2;
u8 reg_alert_status, reg_alert_mask;
+ int address = new_client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -439,7 +443,6 @@ static int lm63_detect(struct i2c_client *new_client,
LM63_REG_ALERT_MASK);
if (man_id != 0x01 /* National Semiconductor */
- || chip_id != 0x41 /* LM63 */
|| (reg_config1 & 0x18) != 0x00
|| (reg_config2 & 0xF8) != 0x00
|| (reg_alert_status & 0x20) != 0x00
@@ -450,7 +453,12 @@ static int lm63_detect(struct i2c_client *new_client,
return -ENODEV;
}
- strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ if (chip_id == 0x41 && address == 0x4c)
+ strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
+ strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+ else
+ return -ENODEV;
return 0;
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 7cc2708871ab..760ef72eea56 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -982,7 +982,8 @@ static struct lm90_data *lm90_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
+ if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10)
+ || !data->valid) {
u8 h, l;
dev_dbg(&client->dev, "Updating lm90 data.\n");
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index d14a1af9f550..ad8d535235c5 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -92,17 +92,6 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
#define TMP411_DEVICE_ID 0x12
/*
- * Functions declarations
- */
-
-static int tmp401_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int tmp401_detect(struct i2c_client *client,
- struct i2c_board_info *info);
-static int tmp401_remove(struct i2c_client *client);
-static struct tmp401_data *tmp401_update_device(struct device *dev);
-
-/*
* Driver data (common to all clients)
*/
@@ -113,18 +102,6 @@ static const struct i2c_device_id tmp401_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp401_id);
-static struct i2c_driver tmp401_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "tmp401",
- },
- .probe = tmp401_probe,
- .remove = tmp401_remove,
- .id_table = tmp401_id,
- .detect = tmp401_detect,
- .address_list = normal_i2c,
-};
-
/*
* Client data (each client gets its own)
*/
@@ -194,6 +171,71 @@ static u8 tmp401_crit_temp_to_register(long temp, u8 config)
return (temp + 500) / 1000;
}
+static struct tmp401_data *tmp401_update_device_reg16(
+ struct i2c_client *client, struct tmp401_data *data)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ /*
+ * High byte must be read first immediately followed
+ * by the low byte
+ */
+ data->temp[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_MSB[i]) << 8;
+ data->temp[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LSB[i]);
+ data->temp_low[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
+ data->temp_low[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_LSB[i]);
+ data->temp_high[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
+ data->temp_high[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_LSB[i]);
+ data->temp_crit[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_LIMIT[i]);
+
+ if (data->kind == tmp411) {
+ data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
+ TMP411_TEMP_LOWEST_MSB[i]) << 8;
+ data->temp_lowest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_LOWEST_LSB[i]);
+
+ data->temp_highest[i] = i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
+ data->temp_highest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_LSB[i]);
+ }
+ }
+ return data;
+}
+
+static struct tmp401_data *tmp401_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp401_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
+ data->config = i2c_smbus_read_byte_data(client,
+ TMP401_CONFIG_READ);
+ tmp401_update_device_reg16(client, data);
+
+ data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_HYST);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
static ssize_t show_temp_value(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -420,30 +462,36 @@ static ssize_t reset_temp_history(struct device *dev,
}
static struct sensor_device_attribute tmp401_attr[] = {
- SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0),
- SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0),
- SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0),
- SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0),
- SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst,
+ SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0),
+ SENSOR_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
+ store_temp_min, 0),
+ SENSOR_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
+ store_temp_max, 0),
+ SENSOR_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ store_temp_crit, 0),
+ SENSOR_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_crit_hyst,
store_temp_crit_hyst, 0),
- SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_LOW),
- SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_HIGH),
- SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_CRIT),
- SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1),
- SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1),
- SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1),
- SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1),
- SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1),
- SENSOR_ATTR(temp2_fault, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1),
+ SENSOR_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
+ store_temp_min, 1),
+ SENSOR_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
+ store_temp_max, 1),
+ SENSOR_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ store_temp_crit, 1),
+ SENSOR_ATTR(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 1),
+ SENSOR_ATTR(temp2_fault, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_OPEN),
- SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_LOW),
- SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_HIGH),
- SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_CRIT),
};
@@ -455,11 +503,11 @@ static struct sensor_device_attribute tmp401_attr[] = {
* and remote channels.
*/
static struct sensor_device_attribute tmp411_attr[] = {
- SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0),
- SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0),
- SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1),
- SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1),
- SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0),
+ SENSOR_ATTR(temp1_highest, S_IRUGO, show_temp_highest, NULL, 0),
+ SENSOR_ATTR(temp1_lowest, S_IRUGO, show_temp_lowest, NULL, 0),
+ SENSOR_ATTR(temp2_highest, S_IRUGO, show_temp_highest, NULL, 1),
+ SENSOR_ATTR(temp2_lowest, S_IRUGO, show_temp_lowest, NULL, 1),
+ SENSOR_ATTR(temp_reset_history, S_IWUSR, NULL, reset_temp_history, 0),
};
/*
@@ -529,6 +577,27 @@ static int tmp401_detect(struct i2c_client *client,
return 0;
}
+static int tmp401_remove(struct i2c_client *client)
+{
+ struct tmp401_data *data = i2c_get_clientdata(client);
+ int i;
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
+ device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
+
+ if (data->kind == tmp411) {
+ for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
+ device_remove_file(&client->dev,
+ &tmp411_attr[i].dev_attr);
+ }
+
+ kfree(data);
+ return 0;
+}
+
static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -581,91 +650,17 @@ exit_remove:
return err;
}
-static int tmp401_remove(struct i2c_client *client)
-{
- struct tmp401_data *data = i2c_get_clientdata(client);
- int i;
-
- if (data->hwmon_dev)
- hwmon_device_unregister(data->hwmon_dev);
-
- for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
- device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
-
- if (data->kind == tmp411) {
- for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
- device_remove_file(&client->dev,
- &tmp411_attr[i].dev_attr);
- }
-
- kfree(data);
- return 0;
-}
-
-static struct tmp401_data *tmp401_update_device_reg16(
- struct i2c_client *client, struct tmp401_data *data)
-{
- int i;
-
- for (i = 0; i < 2; i++) {
- /*
- * High byte must be read first immediately followed
- * by the low byte
- */
- data->temp[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_MSB[i]) << 8;
- data->temp[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LSB[i]);
- data->temp_low[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
- data->temp_low[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LOW_LIMIT_LSB[i]);
- data->temp_high[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
- data->temp_high[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_HIGH_LIMIT_LSB[i]);
- data->temp_crit[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_CRIT_LIMIT[i]);
-
- if (data->kind == tmp411) {
- data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
- TMP411_TEMP_LOWEST_MSB[i]) << 8;
- data->temp_lowest[i] |= i2c_smbus_read_byte_data(
- client, TMP411_TEMP_LOWEST_LSB[i]);
-
- data->temp_highest[i] = i2c_smbus_read_byte_data(
- client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
- data->temp_highest[i] |= i2c_smbus_read_byte_data(
- client, TMP411_TEMP_HIGHEST_LSB[i]);
- }
- }
- return data;
-}
-
-static struct tmp401_data *tmp401_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
- data->config = i2c_smbus_read_byte_data(client,
- TMP401_CONFIG_READ);
- tmp401_update_device_reg16(client, data);
-
- data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_CRIT_HYST);
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
+static struct i2c_driver tmp401_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tmp401",
+ },
+ .probe = tmp401_probe,
+ .remove = tmp401_remove,
+ .id_table = tmp401_id,
+ .detect = tmp401_detect,
+ .address_list = normal_i2c,
+};
static int __init tmp401_init(void)
{
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index dcdaf8e675bf..2b9a8f54bb2c 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -109,13 +109,13 @@ static void pca_stop(struct i2c_algo_pca_data *adap)
* returns after the address has been sent
*/
static int pca_address(struct i2c_algo_pca_data *adap,
- struct i2c_msg *msg)
+ struct i2c_msg *msg)
{
int sta = pca_get_con(adap);
int addr;
- addr = ( (0x7f & msg->addr) << 1 );
- if (msg->flags & I2C_M_RD )
+ addr = ((0x7f & msg->addr) << 1);
+ if (msg->flags & I2C_M_RD)
addr |= 1;
DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n",
msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr);
@@ -134,7 +134,7 @@ static int pca_address(struct i2c_algo_pca_data *adap,
* Returns after the byte has been transmitted
*/
static int pca_tx_byte(struct i2c_algo_pca_data *adap,
- __u8 b)
+ __u8 b)
{
int sta = pca_get_con(adap);
DEB2("=== WRITE %#04x\n", b);
@@ -164,13 +164,13 @@ static void pca_rx_byte(struct i2c_algo_pca_data *adap,
* Returns after next byte has arrived.
*/
static int pca_rx_ack(struct i2c_algo_pca_data *adap,
- int ack)
+ int ack)
{
int sta = pca_get_con(adap);
sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI|I2C_PCA_CON_AA);
- if ( ack )
+ if (ack)
sta |= I2C_PCA_CON_AA;
pca_set_con(adap, sta);
@@ -178,12 +178,12 @@ static int pca_rx_ack(struct i2c_algo_pca_data *adap,
}
static int pca_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs,
- int num)
+ struct i2c_msg *msgs,
+ int num)
{
- struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
- struct i2c_msg *msg = NULL;
- int curmsg;
+ struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
+ struct i2c_msg *msg = NULL;
+ int curmsg;
int numbytes = 0;
int state;
int ret;
@@ -202,21 +202,21 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
DEB1("{{{ XFER %d messages\n", num);
- if (i2c_debug>=2) {
+ if (i2c_debug >= 2) {
for (curmsg = 0; curmsg < num; curmsg++) {
int addr, i;
msg = &msgs[curmsg];
addr = (0x7f & msg->addr) ;
- if (msg->flags & I2C_M_RD )
+ if (msg->flags & I2C_M_RD)
printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n",
- curmsg, msg->len, addr, (addr<<1) | 1);
+ curmsg, msg->len, addr, (addr << 1) | 1);
else {
printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s",
- curmsg, msg->len, addr, addr<<1,
+ curmsg, msg->len, addr, addr << 1,
msg->len == 0 ? "" : ", ");
- for(i=0; i < msg->len; i++)
+ for (i = 0; i < msg->len; i++)
printk("%#04x%s", msg->buf[i], i == msg->len - 1 ? "" : ", ");
printk("]\n");
}
@@ -305,7 +305,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
goto out;
case 0x58: /* Data byte has been received; NOT ACK has been returned */
- if ( numbytes == msg->len - 1 ) {
+ if (numbytes == msg->len - 1) {
pca_rx_byte(adap, &msg->buf[numbytes], 0);
curmsg++; numbytes = 0;
if (curmsg == num)
@@ -352,7 +352,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
static u32 pca_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm pca_algo = {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9c6170cd9aac..dec387d3f04d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -529,7 +529,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
- depends on SUPERH
+ depends on SUPERH || ARCH_SHMOBILE
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
@@ -564,7 +564,7 @@ config I2C_STU300
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
- depends on ARCH_VERSATILE || ARCH_REALVIEW
+ depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
select I2C_ALGOBIT
help
Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index bd8f1e4d9e6c..906a3ca50db6 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -60,7 +60,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* ALI1535 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 659f63f5e4af..b14f6d68221d 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -67,7 +67,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* ALI15X3 SMBus address offsets */
#define SMBHSTSTS (0 + ali15x3_smba)
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index c5a9fa488e7f..03bcd07c4697 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -43,7 +43,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* AMD756 SMBus address offsets */
#define SMB_ADDR_OFFSET 0xE0
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 2fbef27b6cd6..af1e5e254b7b 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -18,7 +18,7 @@
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Vojtech Pavlik <vojtech@suse.cz>");
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 06e1ecb4919f..305c07504f7e 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -23,8 +23,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <mach/at91_twi.h>
#include <mach/board.h>
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index f1e14dd590c9..fb26e5c67515 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -25,8 +25,6 @@
#include <asm/portmux.h>
#include <asm/irq.h>
-#define POLL_TIMEOUT (2 * HZ)
-
/* SMBus mode*/
#define TWI_I2C_MODE_STANDARD 1
#define TWI_I2C_MODE_STANDARDSUB 2
@@ -44,8 +42,6 @@ struct bfin_twi_iface {
int cur_mode;
int manual_stop;
int result;
- int timeout_count;
- struct timer_list timeout_timer;
struct i2c_adapter adap;
struct completion complete;
struct i2c_msg *pmsg;
@@ -85,14 +81,15 @@ static const u16 pin_req[2][3] = {
{P_TWI1_SCL, P_TWI1_SDA, 0},
};
-static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
+static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
+ unsigned short twi_int_status)
{
- unsigned short twi_int_status = read_INT_STAT(iface);
unsigned short mast_stat = read_MASTER_STAT(iface);
if (twi_int_status & XMTSERV) {
/* Transmit next data */
if (iface->writeNum > 0) {
+ SSYNC();
write_XMT_DATA8(iface, *(iface->transPtr++));
iface->writeNum--;
}
@@ -114,10 +111,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
write_MASTER_CTL(iface,
(read_MASTER_CTL(iface) | RSTART) & ~MDIR);
}
- SSYNC();
- /* Clear status */
- write_INT_STAT(iface, XMTSERV);
- SSYNC();
}
if (twi_int_status & RCVSERV) {
if (iface->readNum > 0) {
@@ -139,7 +132,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
} else if (iface->manual_stop) {
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) | STOP);
- SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
iface->cur_msg + 1 < iface->msg_num) {
if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
@@ -148,44 +140,37 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
else
write_MASTER_CTL(iface,
(read_MASTER_CTL(iface) | RSTART) & ~MDIR);
- SSYNC();
}
- /* Clear interrupt source */
- write_INT_STAT(iface, RCVSERV);
- SSYNC();
}
if (twi_int_status & MERR) {
- write_INT_STAT(iface, MERR);
write_INT_MASK(iface, 0);
write_MASTER_STAT(iface, 0x3e);
write_MASTER_CTL(iface, 0);
- SSYNC();
iface->result = -EIO;
- /* if both err and complete int stats are set, return proper
- * results.
+
+ if (mast_stat & LOSTARB)
+ dev_dbg(&iface->adap.dev, "Lost Arbitration\n");
+ if (mast_stat & ANAK)
+ dev_dbg(&iface->adap.dev, "Address Not Acknowledged\n");
+ if (mast_stat & DNAK)
+ dev_dbg(&iface->adap.dev, "Data Not Acknowledged\n");
+ if (mast_stat & BUFRDERR)
+ dev_dbg(&iface->adap.dev, "Buffer Read Error\n");
+ if (mast_stat & BUFWRERR)
+ dev_dbg(&iface->adap.dev, "Buffer Write Error\n");
+
+ /* If it is a quick transfer, only address without data,
+ * not an err, return 1.
*/
- if (twi_int_status & MCOMP) {
- write_INT_STAT(iface, MCOMP);
- write_INT_MASK(iface, 0);
- write_MASTER_CTL(iface, 0);
- SSYNC();
- /* If it is a quick transfer, only address bug no data,
- * not an err, return 1.
- */
- if (iface->writeNum == 0 && (mast_stat & BUFRDERR))
- iface->result = 1;
- /* If address not acknowledged return -1,
- * else return 0.
- */
- else if (!(mast_stat & ANAK))
- iface->result = 0;
- }
+ if (iface->cur_mode == TWI_I2C_MODE_STANDARD &&
+ iface->transPtr == NULL &&
+ (twi_int_status & MCOMP) && (mast_stat & DNAK))
+ iface->result = 1;
+
complete(&iface->complete);
return;
}
if (twi_int_status & MCOMP) {
- write_INT_STAT(iface, MCOMP);
- SSYNC();
if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
if (iface->readNum == 0) {
/* set the read number to 1 and ask for manual
@@ -207,7 +192,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
iface->cur_msg+1 < iface->msg_num) {
iface->cur_msg++;
@@ -226,7 +210,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
write_XMT_DATA8(iface,
*(iface->transPtr++));
iface->writeNum--;
- SSYNC();
}
}
@@ -244,15 +227,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- SSYNC();
} else {
iface->result = 1;
write_INT_MASK(iface, 0);
write_MASTER_CTL(iface, 0);
- SSYNC();
- complete(&iface->complete);
}
}
+ complete(&iface->complete);
}
/* Interrupt handler */
@@ -260,38 +241,26 @@ static irqreturn_t bfin_twi_interrupt_entry(int irq, void *dev_id)
{
struct bfin_twi_iface *iface = dev_id;
unsigned long flags;
+ unsigned short twi_int_status;
spin_lock_irqsave(&iface->lock, flags);
- del_timer(&iface->timeout_timer);
- bfin_twi_handle_interrupt(iface);
- spin_unlock_irqrestore(&iface->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void bfin_twi_timeout(unsigned long data)
-{
- struct bfin_twi_iface *iface = (struct bfin_twi_iface *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&iface->lock, flags);
- bfin_twi_handle_interrupt(iface);
- if (iface->result == 0) {
- iface->timeout_count--;
- if (iface->timeout_count > 0) {
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
- } else {
- iface->result = -1;
- complete(&iface->complete);
- }
+ while (1) {
+ twi_int_status = read_INT_STAT(iface);
+ if (!twi_int_status)
+ break;
+ /* Clear interrupt status */
+ write_INT_STAT(iface, twi_int_status);
+ bfin_twi_handle_interrupt(iface, twi_int_status);
+ SSYNC();
}
spin_unlock_irqrestore(&iface->lock, flags);
+ return IRQ_HANDLED;
}
/*
- * Generic i2c master transfer entrypoint
+ * One i2c master transfer
*/
-static int bfin_twi_master_xfer(struct i2c_adapter *adap,
+static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct bfin_twi_iface *iface = adap->algo_data;
@@ -319,7 +288,6 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
iface->transPtr = pmsg->buf;
iface->writeNum = iface->readNum = pmsg->len;
iface->result = 0;
- iface->timeout_count = 10;
init_completion(&(iface->complete));
/* Set Transmit device address */
write_MASTER_ADDR(iface, pmsg->addr);
@@ -358,30 +326,41 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
iface->manual_stop = 1;
}
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
-
/* Master enable */
write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
SSYNC();
- wait_for_completion(&iface->complete);
-
- rc = iface->result;
+ while (!iface->result) {
+ if (!wait_for_completion_timeout(&iface->complete,
+ adap->timeout)) {
+ iface->result = -1;
+ dev_err(&adap->dev, "master transfer timeout\n");
+ }
+ }
- if (rc == 1)
- return num;
+ if (iface->result == 1)
+ rc = iface->cur_msg + 1;
else
- return rc;
+ rc = iface->result;
+
+ return rc;
}
/*
- * SMBus type transfer entrypoint
+ * Generic i2c master transfer entrypoint
*/
+static int bfin_twi_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ return bfin_twi_do_master_xfer(adap, msgs, num);
+}
-int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+/*
+ * One I2C SMBus transfer
+ */
+int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
@@ -469,7 +448,6 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
iface->manual_stop = 0;
iface->read_write = read_write;
iface->command = command;
- iface->timeout_count = 10;
init_completion(&(iface->complete));
/* FIFO Initiation. Data in FIFO should be discarded before
@@ -486,9 +464,6 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
write_MASTER_ADDR(iface, addr);
SSYNC();
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
-
switch (iface->cur_mode) {
case TWI_I2C_MODE_STANDARDSUB:
write_XMT_DATA8(iface, iface->command);
@@ -550,10 +525,8 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
else if (iface->readNum > 255) {
write_MASTER_CTL(iface, 0xff << 6);
iface->manual_stop = 1;
- } else {
- del_timer(&iface->timeout_timer);
+ } else
break;
- }
}
}
write_INT_MASK(iface, MCOMP | MERR |
@@ -569,7 +542,13 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
}
SSYNC();
- wait_for_completion(&iface->complete);
+ while (!iface->result) {
+ if (!wait_for_completion_timeout(&iface->complete,
+ adap->timeout)) {
+ iface->result = -1;
+ dev_err(&adap->dev, "smbus transfer timeout\n");
+ }
+ }
rc = (iface->result >= 0) ? 0 : -1;
@@ -577,6 +556,17 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
}
/*
+ * Generic I2C SMBus transfer entrypoint
+ */
+int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data)
+{
+ return bfin_twi_do_smbus_xfer(adap, addr, flags,
+ read_write, command, size, data);
+}
+
+/*
* Return what the adapter supports
*/
static u32 bfin_twi_functionality(struct i2c_adapter *adap)
@@ -667,10 +657,6 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
goto out_error_no_irq;
}
- init_timer(&(iface->timeout_timer));
- iface->timeout_timer.function = bfin_twi_timeout;
- iface->timeout_timer.data = (unsigned long)iface;
-
p_adap = &iface->adap;
p_adap->nr = pdev->id;
strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
@@ -678,6 +664,8 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
p_adap->algo_data = iface;
p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
p_adap->dev.parent = &pdev->dev;
+ p_adap->timeout = 5 * HZ;
+ p_adap->retries = 3;
rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi");
if (rc) {
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 9c2e10082b79..16948db38973 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -441,7 +441,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
init_waitqueue_head(&cpm->i2c_wait);
cpm->irq = of_irq_to_resource(ofdev->node, 0, NULL);
- if (cpm->irq == NO_IRQ)
+ if (!cpm->irq)
return -EINVAL;
/* Install interrupt handler. */
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 612255614a66..e5b1a3bf5b80 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -37,8 +37,8 @@
#include <linux/isa.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include "../algos/i2c-algo-pcf.h"
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c21077d248af..d9aa9a649e35 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -211,7 +211,7 @@ static int __init i2c_gpio_init(void)
return ret;
}
-module_init(i2c_gpio_init);
+subsys_initcall(i2c_gpio_init);
static void __exit i2c_gpio_exit(void)
{
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c767295ad1fb..9ff1695d8458 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/hydra.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 299b918455a3..f4b21f2bb8ed 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -138,6 +138,17 @@ static struct pci_dev *I801_dev;
#define FEATURE_I2C_BLOCK_READ (1 << 3)
static unsigned int i801_features;
+static const char *i801_feature_names[] = {
+ "SMBus PEC",
+ "Block buffer",
+ "Block process call",
+ "I2C block read",
+};
+
+static unsigned int disable_features;
+module_param(disable_features, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+
/* Make sure the SMBus host is ready to start transmitting.
Return 0 if it is, -EBUSY if it is not. */
static int i801_check_pre(void)
@@ -341,9 +352,8 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
do {
msleep(1);
status = inb_p(SMBHSTSTS);
- }
- while ((!(status & SMBHSTSTS_BYTE_DONE))
- && (timeout++ < MAX_TIMEOUT));
+ } while ((!(status & SMBHSTSTS_BYTE_DONE))
+ && (timeout++ < MAX_TIMEOUT));
result = i801_check_post(status, timeout > MAX_TIMEOUT);
if (result < 0)
@@ -440,9 +450,9 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
}
/* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+static s32 i801_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data * data)
+ int size, union i2c_smbus_data *data)
{
int hwpec;
int block = 0;
@@ -511,7 +521,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
else
outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
- if(block)
+ if (block)
ret = i801_block_transaction(data, read_write, size, hwpec);
else
ret = i801_transaction(xact | ENABLE_INT9);
@@ -523,9 +533,9 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
SMBAUXCTL);
- if(block)
+ if (block)
return ret;
- if(ret)
+ if (ret)
return ret;
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
return 0;
@@ -585,7 +595,7 @@ static const struct pci_device_id i801_ids[] = {
{ 0, }
};
-MODULE_DEVICE_TABLE (pci, i801_ids);
+MODULE_DEVICE_TABLE(pci, i801_ids);
#if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE
static unsigned char apanel_addr;
@@ -689,10 +699,11 @@ static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
}
#endif
-static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int __devinit i801_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
unsigned char temp;
- int err;
+ int err, i;
#if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
const char *vendor;
#endif
@@ -700,26 +711,28 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
I801_dev = dev;
i801_features = 0;
switch (dev->device) {
- case PCI_DEVICE_ID_INTEL_82801EB_3:
- case PCI_DEVICE_ID_INTEL_ESB_4:
- case PCI_DEVICE_ID_INTEL_ICH6_16:
- case PCI_DEVICE_ID_INTEL_ICH7_17:
- case PCI_DEVICE_ID_INTEL_ESB2_17:
- case PCI_DEVICE_ID_INTEL_ICH8_5:
- case PCI_DEVICE_ID_INTEL_ICH9_6:
- case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
- case PCI_DEVICE_ID_INTEL_ICH10_4:
- case PCI_DEVICE_ID_INTEL_ICH10_5:
- case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
- case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
+ default:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
i801_features |= FEATURE_SMBUS_PEC;
i801_features |= FEATURE_BLOCK_BUFFER;
+ /* fall through */
+ case PCI_DEVICE_ID_INTEL_82801CA_3:
+ case PCI_DEVICE_ID_INTEL_82801BA_2:
+ case PCI_DEVICE_ID_INTEL_82801AB_3:
+ case PCI_DEVICE_ID_INTEL_82801AA_3:
break;
}
+ /* Disable features on user request */
+ for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
+ if (i801_features & disable_features & (1 << i))
+ dev_notice(&dev->dev, "%s disabled by user\n",
+ i801_feature_names[i]);
+ }
+ i801_features &= ~disable_features;
+
err = pci_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index b1bc6e277d2a..f8ccc0fe95a8 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/of_platform.h>
@@ -668,12 +668,12 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
int irq;
if (iic_force_poll)
- return NO_IRQ;
+ return 0;
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n");
- return NO_IRQ;
+ return 0;
}
/* Disable interrupts until we finish initialization, assumes
@@ -683,7 +683,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
if (request_irq(irq, iic_handler, 0, "IBM IIC", dev)) {
dev_err(&ofdev->dev, "request_irq %d failed\n", irq);
/* Fallback to the polling mode */
- return NO_IRQ;
+ return 0;
}
return irq;
@@ -719,7 +719,7 @@ static int __devinit iic_probe(struct of_device *ofdev,
init_waitqueue_head(&dev->wq);
dev->irq = iic_request_irq(ofdev, dev);
- if (dev->irq == NO_IRQ)
+ if (!dev->irq)
dev_warn(&ofdev->dev, "using polling mode\n");
/* Board specific settings */
@@ -766,7 +766,7 @@ static int __devinit iic_probe(struct of_device *ofdev,
return 0;
error_cleanup:
- if (dev->irq != NO_IRQ) {
+ if (dev->irq) {
iic_interrupt_mode(dev, 0);
free_irq(dev->irq, dev);
}
@@ -790,7 +790,7 @@ static int __devexit iic_remove(struct of_device *ofdev)
i2c_del_adapter(&dev->adap);
- if (dev->irq != NO_IRQ) {
+ if (dev->irq) {
iic_interrupt_mode(dev, 0);
free_irq(dev->irq, dev);
}
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 5901707fc66a..112c61f7b8cd 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -38,8 +38,7 @@
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include "i2c-iop3xx.h"
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index f1321f763789..e86cef300c7d 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -118,7 +118,7 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
u32 x;
int result = 0;
- if (i2c->irq == NO_IRQ) {
+ if (!i2c->irq) {
while (!(readb(i2c->base + MPC_I2C_SR) & CSR_MIF)) {
schedule();
if (time_after(jiffies, orig_jiffies + timeout)) {
@@ -568,7 +568,7 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
}
i2c->irq = irq_of_parse_and_map(op->node, 0);
- if (i2c->irq != NO_IRQ) { /* i2c->irq = NO_IRQ implies polling */
+ if (i2c->irq) { /* no i2c->irq implies polling */
result = request_irq(i2c->irq, mpc_i2c_isr,
IRQF_SHARED, "i2c-mpc", i2c);
if (result < 0) {
@@ -627,7 +627,7 @@ static int __devexit fsl_i2c_remove(struct of_device *op)
i2c_del_adapter(&i2c->adap);
dev_set_drvdata(&op->dev, NULL);
- if (i2c->irq != NO_IRQ)
+ if (i2c->irq)
free_irq(i2c->irq, i2c);
irq_dispose_mapping(i2c->irq);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 3623a4499084..16242063144f 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -17,8 +17,7 @@
#include <linux/interrupt.h>
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
/* Register defines */
#define MV64XXX_I2C_REG_SLAVE_ADDR 0x00
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 4a48dd4ef787..b23dfe9f2787 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -57,7 +57,7 @@
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>");
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index a4f8d33fa389..73de8ade10b1 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -704,7 +704,8 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_MTD:
case I2C_IT_MTDWS:
if (dev->cli.operation == I2C_READ) {
- while (!readl(dev->virtbase + I2C_RISR) & I2C_IT_RXFE) {
+ while (!(readl(dev->virtbase + I2C_RISR)
+ & I2C_IT_RXFE)) {
if (dev->cli.count == 0)
break;
*dev->cli.buffer =
@@ -914,6 +915,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
static int __devexit nmk_i2c_remove(struct platform_device *pdev)
{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct nmk_i2c_dev *dev = platform_get_drvdata(pdev);
i2c_del_adapter(&dev->adap);
@@ -924,6 +926,8 @@ static int __devexit nmk_i2c_remove(struct platform_device *pdev)
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
free_irq(dev->irq, dev);
iounmap(dev->virtbase);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
clk_disable(dev->clk);
clk_put(dev->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index b4ed4ca802ed..0070371b29f3 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -19,7 +19,7 @@
#include <linux/wait.h>
#include <linux/i2c-ocores.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
struct ocores_i2c {
void __iomem *base;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 389ac6032a7b..7674efb55378 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -38,6 +38,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/i2c-omap.h>
/* I2C controller revisions */
#define OMAP_I2C_REV_2 0x20
@@ -45,29 +46,37 @@
/* I2C controller revisions present on specific hardware */
#define OMAP_I2C_REV_ON_2430 0x36
#define OMAP_I2C_REV_ON_3430 0x3C
+#define OMAP_I2C_REV_ON_4430 0x40
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
-#define OMAP_I2C_REV_REG 0x00
-#define OMAP_I2C_IE_REG 0x01
-#define OMAP_I2C_STAT_REG 0x02
-#define OMAP_I2C_IV_REG 0x03
/* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */
-#define OMAP_I2C_WE_REG 0x03
-#define OMAP_I2C_SYSS_REG 0x04
-#define OMAP_I2C_BUF_REG 0x05
-#define OMAP_I2C_CNT_REG 0x06
-#define OMAP_I2C_DATA_REG 0x07
-#define OMAP_I2C_SYSC_REG 0x08
-#define OMAP_I2C_CON_REG 0x09
-#define OMAP_I2C_OA_REG 0x0a
-#define OMAP_I2C_SA_REG 0x0b
-#define OMAP_I2C_PSC_REG 0x0c
-#define OMAP_I2C_SCLL_REG 0x0d
-#define OMAP_I2C_SCLH_REG 0x0e
-#define OMAP_I2C_SYSTEST_REG 0x0f
-#define OMAP_I2C_BUFSTAT_REG 0x10
+enum {
+ OMAP_I2C_REV_REG = 0,
+ OMAP_I2C_IE_REG,
+ OMAP_I2C_STAT_REG,
+ OMAP_I2C_IV_REG,
+ OMAP_I2C_WE_REG,
+ OMAP_I2C_SYSS_REG,
+ OMAP_I2C_BUF_REG,
+ OMAP_I2C_CNT_REG,
+ OMAP_I2C_DATA_REG,
+ OMAP_I2C_SYSC_REG,
+ OMAP_I2C_CON_REG,
+ OMAP_I2C_OA_REG,
+ OMAP_I2C_SA_REG,
+ OMAP_I2C_PSC_REG,
+ OMAP_I2C_SCLL_REG,
+ OMAP_I2C_SCLH_REG,
+ OMAP_I2C_SYSTEST_REG,
+ OMAP_I2C_BUFSTAT_REG,
+ OMAP_I2C_REVNB_LO,
+ OMAP_I2C_REVNB_HI,
+ OMAP_I2C_IRQSTATUS_RAW,
+ OMAP_I2C_IRQENABLE_SET,
+ OMAP_I2C_IRQENABLE_CLR,
+};
/* I2C Interrupt Enable Register (OMAP_I2C_IE): */
#define OMAP_I2C_IE_XDR (1 << 14) /* TX Buffer drain int enable */
@@ -157,6 +166,9 @@
#define SYSC_IDLEMODE_SMART 0x2
#define SYSC_CLOCKACTIVITY_FCLK 0x2
+/* Errata definitions */
+#define I2C_OMAP_ERRATA_I207 (1 << 0)
+#define I2C_OMAP3_1P153 (1 << 1)
struct omap_i2c_dev {
struct device *dev;
@@ -167,9 +179,13 @@ struct omap_i2c_dev {
struct clk *fclk; /* Functional clock */
struct completion cmd_complete;
struct resource *ioarea;
+ u32 latency; /* maximum mpu wkup latency */
+ void (*set_mpu_wkup_lat)(struct device *dev,
+ long latency);
u32 speed; /* Speed of bus in Khz */
u16 cmd_err;
u8 *buf;
+ u8 *regs;
size_t buf_len;
struct i2c_adapter adapter;
u8 fifo_size; /* use as flag and value
@@ -186,17 +202,67 @@ struct omap_i2c_dev {
u16 bufstate;
u16 syscstate;
u16 westate;
+ u16 errata;
+};
+
+const static u8 reg_map[] = {
+ [OMAP_I2C_REV_REG] = 0x00,
+ [OMAP_I2C_IE_REG] = 0x01,
+ [OMAP_I2C_STAT_REG] = 0x02,
+ [OMAP_I2C_IV_REG] = 0x03,
+ [OMAP_I2C_WE_REG] = 0x03,
+ [OMAP_I2C_SYSS_REG] = 0x04,
+ [OMAP_I2C_BUF_REG] = 0x05,
+ [OMAP_I2C_CNT_REG] = 0x06,
+ [OMAP_I2C_DATA_REG] = 0x07,
+ [OMAP_I2C_SYSC_REG] = 0x08,
+ [OMAP_I2C_CON_REG] = 0x09,
+ [OMAP_I2C_OA_REG] = 0x0a,
+ [OMAP_I2C_SA_REG] = 0x0b,
+ [OMAP_I2C_PSC_REG] = 0x0c,
+ [OMAP_I2C_SCLL_REG] = 0x0d,
+ [OMAP_I2C_SCLH_REG] = 0x0e,
+ [OMAP_I2C_SYSTEST_REG] = 0x0f,
+ [OMAP_I2C_BUFSTAT_REG] = 0x10,
+};
+
+const static u8 omap4_reg_map[] = {
+ [OMAP_I2C_REV_REG] = 0x04,
+ [OMAP_I2C_IE_REG] = 0x2c,
+ [OMAP_I2C_STAT_REG] = 0x28,
+ [OMAP_I2C_IV_REG] = 0x34,
+ [OMAP_I2C_WE_REG] = 0x34,
+ [OMAP_I2C_SYSS_REG] = 0x90,
+ [OMAP_I2C_BUF_REG] = 0x94,
+ [OMAP_I2C_CNT_REG] = 0x98,
+ [OMAP_I2C_DATA_REG] = 0x9c,
+ [OMAP_I2C_SYSC_REG] = 0x20,
+ [OMAP_I2C_CON_REG] = 0xa4,
+ [OMAP_I2C_OA_REG] = 0xa8,
+ [OMAP_I2C_SA_REG] = 0xac,
+ [OMAP_I2C_PSC_REG] = 0xb0,
+ [OMAP_I2C_SCLL_REG] = 0xb4,
+ [OMAP_I2C_SCLH_REG] = 0xb8,
+ [OMAP_I2C_SYSTEST_REG] = 0xbC,
+ [OMAP_I2C_BUFSTAT_REG] = 0xc0,
+ [OMAP_I2C_REVNB_LO] = 0x00,
+ [OMAP_I2C_REVNB_HI] = 0x04,
+ [OMAP_I2C_IRQSTATUS_RAW] = 0x24,
+ [OMAP_I2C_IRQENABLE_SET] = 0x2c,
+ [OMAP_I2C_IRQENABLE_CLR] = 0x30,
};
static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
int reg, u16 val)
{
- __raw_writew(val, i2c_dev->base + (reg << i2c_dev->reg_shift));
+ __raw_writew(val, i2c_dev->base +
+ (i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
{
- return __raw_readw(i2c_dev->base + (reg << i2c_dev->reg_shift));
+ return __raw_readw(i2c_dev->base +
+ (i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static int __init omap_i2c_get_clocks(struct omap_i2c_dev *dev)
@@ -265,7 +331,11 @@ static void omap_i2c_idle(struct omap_i2c_dev *dev)
WARN_ON(dev->idle);
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
+ if (dev->rev >= OMAP_I2C_REV_ON_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 1);
+ else
+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
+
if (dev->rev < OMAP_I2C_REV_2) {
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */
} else {
@@ -330,7 +400,9 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* REVISIT: Some wkup sources might not be needed.
*/
dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+ if (dev->rev < OMAP_I2C_REV_ON_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
+ dev->westate);
}
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -357,7 +429,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -430,6 +502,11 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
/* Take the I2C module out of reset: */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+ dev->errata = 0;
+
+ if (cpu_is_omap2430() || cpu_is_omap34xx())
+ dev->errata |= I2C_OMAP_ERRATA_I207;
+
/* Enable interrupts */
dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
@@ -539,8 +616,12 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, dev->latency);
r = wait_for_completion_timeout(&dev->cmd_complete,
OMAP_I2C_TIMEOUT);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
dev->buf_len = 0;
if (r < 0)
return r;
@@ -623,6 +704,34 @@ omap_i2c_ack_stat(struct omap_i2c_dev *dev, u16 stat)
omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
}
+static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
+{
+ /*
+ * I2C Errata(Errata Nos. OMAP2: 1.67, OMAP3: 1.8)
+ * Not applicable for OMAP4.
+ * Under certain rare conditions, RDR could be set again
+ * when the bus is busy, then ignore the interrupt and
+ * clear the interrupt.
+ */
+ if (stat & OMAP_I2C_STAT_RDR) {
+ /* Step 1: If RDR is set, clear it */
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+
+ /* Step 2: */
+ if (!(omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+ & OMAP_I2C_STAT_BB)) {
+
+ /* Step 3: */
+ if (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+ & OMAP_I2C_STAT_RDR) {
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+ dev_dbg(dev->dev, "RDR when bus is busy.\n");
+ }
+
+ }
+ }
+}
+
/* rev1 devices are apparently only on some 15xx */
#ifdef CONFIG_ARCH_OMAP15XX
@@ -684,6 +793,35 @@ omap_i2c_rev1_isr(int this_irq, void *dev_id)
#define omap_i2c_rev1_isr NULL
#endif
+/*
+ * OMAP3430 Errata 1.153: When an XRDY/XDR is hit, wait for XUDF before writing
+ * data to DATA_REG. Otherwise some data bytes can be lost while transferring
+ * them from the memory to the I2C interface.
+ */
+static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err)
+{
+ unsigned long timeout = 10000;
+
+ while (--timeout && !(*stat & OMAP_I2C_STAT_XUDF)) {
+ if (*stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
+ omap_i2c_ack_stat(dev, *stat & (OMAP_I2C_STAT_XRDY |
+ OMAP_I2C_STAT_XDR));
+ *err |= OMAP_I2C_STAT_XUDF;
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ *stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ }
+
+ if (!timeout) {
+ dev_err(dev->dev, "timeout waiting on XUDF bit\n");
+ return 0;
+ }
+
+ return 0;
+}
+
static irqreturn_t
omap_i2c_isr(int this_irq, void *dev_id)
{
@@ -733,6 +871,10 @@ complete:
}
if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) {
u8 num_bytes = 1;
+
+ if (dev->errata & I2C_OMAP_ERRATA_I207)
+ i2c_omap_errata_i207(dev, stat);
+
if (dev->fifo_size) {
if (stat & OMAP_I2C_STAT_RRDY)
num_bytes = dev->fifo_size;
@@ -747,9 +889,12 @@ complete:
if (dev->buf_len) {
*dev->buf++ = w;
dev->buf_len--;
- /* Data reg from 2430 is 8 bit wide */
- if (!cpu_is_omap2430() &&
- !cpu_is_omap34xx()) {
+ /*
+ * Data reg in 2430, omap3 and
+ * omap4 is 8 bit wide
+ */
+ if (cpu_class_is_omap1() ||
+ cpu_is_omap2420()) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
dev->buf_len--;
@@ -787,9 +932,12 @@ complete:
if (dev->buf_len) {
w = *dev->buf++;
dev->buf_len--;
- /* Data reg from 2430 is 8 bit wide */
- if (!cpu_is_omap2430() &&
- !cpu_is_omap34xx()) {
+ /*
+ * Data reg in 2430, omap3 and
+ * omap4 is 8 bit wide
+ */
+ if (cpu_class_is_omap1() ||
+ cpu_is_omap2420()) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
dev->buf_len--;
@@ -807,25 +955,9 @@ complete:
break;
}
- /*
- * OMAP3430 Errata 1.153: When an XRDY/XDR
- * is hit, wait for XUDF before writing data
- * to DATA_REG. Otherwise some data bytes can
- * be lost while transferring them from the
- * memory to the I2C interface.
- */
-
- if (dev->rev <= OMAP_I2C_REV_ON_3430) {
- while (!(stat & OMAP_I2C_STAT_XUDF)) {
- if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
- omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
- err |= OMAP_I2C_STAT_XUDF;
- goto complete;
- }
- cpu_relax();
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
- }
- }
+ if ((dev->errata & I2C_OMAP3_1P153) &&
+ errata_omap3_1p153(dev, &stat, &err))
+ goto complete;
omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
}
@@ -857,6 +989,7 @@ omap_i2c_probe(struct platform_device *pdev)
struct omap_i2c_dev *dev;
struct i2c_adapter *adap;
struct resource *mem, *irq, *ioarea;
+ struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
irq_handler_t isr;
int r;
u32 speed = 0;
@@ -886,10 +1019,13 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
- if (pdev->dev.platform_data != NULL)
- speed = *(u32 *)pdev->dev.platform_data;
- else
- speed = 100; /* Defualt speed */
+ if (pdata != NULL) {
+ speed = pdata->clkrate;
+ dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
+ } else {
+ speed = 100; /* Default speed */
+ dev->set_mpu_wkup_lat = NULL;
+ }
dev->speed = speed;
dev->idle = 1;
@@ -905,17 +1041,27 @@ omap_i2c_probe(struct platform_device *pdev)
if (cpu_is_omap7xx())
dev->reg_shift = 1;
+ else if (cpu_is_omap44xx())
+ dev->reg_shift = 0;
else
dev->reg_shift = 2;
if ((r = omap_i2c_get_clocks(dev)) != 0)
goto err_iounmap;
+ if (cpu_is_omap44xx())
+ dev->regs = (u8 *) omap4_reg_map;
+ else
+ dev->regs = (u8 *) reg_map;
+
omap_i2c_unidle(dev);
dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (dev->rev <= OMAP_I2C_REV_ON_3430)
+ dev->errata |= I2C_OMAP3_1P153;
+
+ if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -927,8 +1073,17 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- dev->fifo_size = (dev->fifo_size / 2);
- dev->b_hw = 1; /* Enable hardware fixes */
+ if (dev->rev >= OMAP_I2C_REV_ON_4430) {
+ dev->fifo_size = 0;
+ dev->b_hw = 0; /* Disable hardware fixes */
+ } else {
+ dev->fifo_size = (dev->fifo_size / 2);
+ dev->b_hw = 1; /* Enable hardware fixes */
+ }
+ /* calculate wakeup latency constraint for MPU */
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->fifo_size) /
+ (1000 * speed / 8);
}
/* reset ASAP, clearing any IRQs */
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 5f41ec0f72d2..fc5fbd1012c9 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -33,7 +33,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c-smbus.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "i2c-parport.h"
#define DEFAULT_BASE 0x378
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 846583ed4763..0eb1515541e7 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -137,7 +137,7 @@ static int parport_getsda(void *data)
copied. The attaching code will set getscl to NULL for adapters that
cannot read SCL back, and will also make the data field point to
the parallel port structure. */
-static struct i2c_algo_bit_data parport_algo_data = {
+static const struct i2c_algo_bit_data parport_algo_data = {
.setsda = parport_setsda,
.setscl = parport_setscl,
.getsda = parport_getsda,
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index d3d4a4b43a1d..4174101660c9 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -25,7 +25,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
static struct pci_driver pasemi_smb_driver;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index f7346a9bd95f..bbd77603a417 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -30,8 +30,8 @@
#include <linux/isa.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pca.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#define DRIVER "i2c-pca-isa"
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 5b2213df5ed0..ef5c78487eb7 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -23,9 +23,9 @@
#include <linux/i2c-algo-pca.h>
#include <linux/i2c-pca-platform.h>
#include <linux/gpio.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
struct i2c_pca_pf_data {
void __iomem *reg_base;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index ee9da6fcf69a..6d14ac2e3c41 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* PIIX4 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 7b57d5f267e1..dfa7ae9c1b8e 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -33,7 +33,7 @@
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define DRV_NAME "pmcmsptwi"
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 14d249f5ed3f..525f045d50de 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -34,9 +34,9 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <plat/i2c.h>
/*
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index d27072b2249f..72902e0bbfa7 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -35,9 +35,9 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <plat/regs-iic.h>
#include <plat/iic.h>
@@ -482,7 +482,8 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long timeout;
+ unsigned long iicstat, timeout;
+ int spins = 20;
int ret;
if (i2c->suspended)
@@ -521,7 +522,21 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* ensure the stop has been through the bus */
- msleep(1);
+ dev_dbg(i2c->dev, "waiting for bus idle\n");
+
+ /* first, try busy waiting briefly */
+ do {
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
+
+ /* if that timed out sleep */
+ if (!spins) {
+ msleep(1);
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ }
+
+ if (iicstat & S3C2410_IICSTAT_START)
+ dev_warn(i2c->dev, "timeout waiting for bus idle\n");
out:
return ret;
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index c91359f4965c..cadc0216e02f 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -36,8 +36,8 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include "i2c-s6000.h"
#define DRV_NAME "i2c-s6000"
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index b9680f50f541..4f93da31d3ad 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -16,10 +16,10 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/clock.h>
#include <asm/i2c-sh7760.h>
-#include <asm/io.h>
/* register offsets */
#define I2CSCR 0x0 /* slave ctrl */
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index ffb405d7c6f2..598c49acaeb5 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -119,8 +119,10 @@ struct sh_mobile_i2c_data {
struct i2c_adapter adap;
struct clk *clk;
+ u_int8_t icic;
u_int8_t iccl;
u_int8_t icch;
+ u_int8_t flags;
spinlock_t lock;
wait_queue_head_t wait;
@@ -129,15 +131,17 @@ struct sh_mobile_i2c_data {
int sr;
};
+#define IIC_FLAG_HAS_ICIC67 (1 << 0)
+
#define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */
/* Register offsets */
-#define ICDR(pd) (pd->reg + 0x00)
-#define ICCR(pd) (pd->reg + 0x04)
-#define ICSR(pd) (pd->reg + 0x08)
-#define ICIC(pd) (pd->reg + 0x0c)
-#define ICCL(pd) (pd->reg + 0x10)
-#define ICCH(pd) (pd->reg + 0x14)
+#define ICDR 0x00
+#define ICCR 0x04
+#define ICSR 0x08
+#define ICIC 0x0c
+#define ICCL 0x10
+#define ICCH 0x14
/* Register bits */
#define ICCR_ICE 0x80
@@ -155,11 +159,32 @@ struct sh_mobile_i2c_data {
#define ICSR_WAIT 0x02
#define ICSR_DTE 0x01
+#define ICIC_ICCLB8 0x80
+#define ICIC_ICCHB8 0x40
#define ICIC_ALE 0x08
#define ICIC_TACKE 0x04
#define ICIC_WAITE 0x02
#define ICIC_DTEE 0x01
+static void iic_wr(struct sh_mobile_i2c_data *pd, int offs, unsigned char data)
+{
+ if (offs == ICIC)
+ data |= pd->icic;
+
+ iowrite8(data, pd->reg + offs);
+}
+
+static unsigned char iic_rd(struct sh_mobile_i2c_data *pd, int offs)
+{
+ return ioread8(pd->reg + offs);
+}
+
+static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs,
+ unsigned char set, unsigned char clr)
+{
+ iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr);
+}
+
static void activate_ch(struct sh_mobile_i2c_data *pd)
{
unsigned long i2c_clk;
@@ -187,6 +212,14 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
else
pd->iccl = (u_int8_t)(num/denom);
+ /* one more bit of ICCL in ICIC */
+ if (pd->flags & IIC_FLAG_HAS_ICIC67) {
+ if ((num/denom) > 0xff)
+ pd->icic |= ICIC_ICCLB8;
+ else
+ pd->icic &= ~ICIC_ICCLB8;
+ }
+
/* Calculate the value for icch. From the data sheet:
icch = (p clock / transfer rate) * (H / (L + H)) */
num = i2c_clk * 4;
@@ -196,25 +229,33 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
else
pd->icch = (u_int8_t)(num/denom);
+ /* one more bit of ICCH in ICIC */
+ if (pd->flags & IIC_FLAG_HAS_ICIC67) {
+ if ((num/denom) > 0xff)
+ pd->icic |= ICIC_ICCHB8;
+ else
+ pd->icic &= ~ICIC_ICCHB8;
+ }
+
/* Enable channel and configure rx ack */
- iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd));
+ iic_set_clr(pd, ICCR, ICCR_ICE, 0);
/* Mask all interrupts */
- iowrite8(0, ICIC(pd));
+ iic_wr(pd, ICIC, 0);
/* Set the clock */
- iowrite8(pd->iccl, ICCL(pd));
- iowrite8(pd->icch, ICCH(pd));
+ iic_wr(pd, ICCL, pd->iccl);
+ iic_wr(pd, ICCH, pd->icch);
}
static void deactivate_ch(struct sh_mobile_i2c_data *pd)
{
/* Clear/disable interrupts */
- iowrite8(0, ICSR(pd));
- iowrite8(0, ICIC(pd));
+ iic_wr(pd, ICSR, 0);
+ iic_wr(pd, ICIC, 0);
/* Disable channel */
- iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
+ iic_set_clr(pd, ICCR, 0, ICCR_ICE);
/* Disable clock and mark device as idle */
clk_disable(pd->clk);
@@ -233,35 +274,35 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
switch (op) {
case OP_START: /* issue start and trigger DTE interrupt */
- iowrite8(0x94, ICCR(pd));
+ iic_wr(pd, ICCR, 0x94);
break;
case OP_TX_FIRST: /* disable DTE interrupt and write data */
- iowrite8(ICIC_WAITE | ICIC_ALE | ICIC_TACKE, ICIC(pd));
- iowrite8(data, ICDR(pd));
+ iic_wr(pd, ICIC, ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ iic_wr(pd, ICDR, data);
break;
case OP_TX: /* write data */
- iowrite8(data, ICDR(pd));
+ iic_wr(pd, ICDR, data);
break;
case OP_TX_STOP: /* write data and issue a stop afterwards */
- iowrite8(data, ICDR(pd));
- iowrite8(0x90, ICCR(pd));
+ iic_wr(pd, ICDR, data);
+ iic_wr(pd, ICCR, 0x90);
break;
case OP_TX_TO_RX: /* select read mode */
- iowrite8(0x81, ICCR(pd));
+ iic_wr(pd, ICCR, 0x81);
break;
case OP_RX: /* just read data */
- ret = ioread8(ICDR(pd));
+ ret = iic_rd(pd, ICDR);
break;
case OP_RX_STOP: /* enable DTE interrupt, issue stop */
- iowrite8(ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE,
- ICIC(pd));
- iowrite8(0xc0, ICCR(pd));
+ iic_wr(pd, ICIC,
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ iic_wr(pd, ICCR, 0xc0);
break;
case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */
- iowrite8(ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE,
- ICIC(pd));
- ret = ioread8(ICDR(pd));
- iowrite8(0xc0, ICCR(pd));
+ iic_wr(pd, ICIC,
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ ret = iic_rd(pd, ICDR);
+ iic_wr(pd, ICCR, 0xc0);
break;
}
@@ -367,7 +408,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
unsigned char sr;
int wakeup;
- sr = ioread8(ICSR(pd));
+ sr = iic_rd(pd, ICSR);
pd->sr |= sr; /* remember state */
dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
@@ -376,7 +417,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
if (sr & (ICSR_AL | ICSR_TACK)) {
/* don't interrupt transaction - continue to issue stop */
- iowrite8(sr & ~(ICSR_AL | ICSR_TACK), ICSR(pd));
+ iic_wr(pd, ICSR, sr & ~(ICSR_AL | ICSR_TACK));
wakeup = 0;
} else if (pd->msg->flags & I2C_M_RD)
wakeup = sh_mobile_i2c_isr_rx(pd);
@@ -384,7 +425,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
wakeup = sh_mobile_i2c_isr_tx(pd);
if (sr & ICSR_WAIT) /* TODO: add delay here to support slow acks */
- iowrite8(sr & ~ICSR_WAIT, ICSR(pd));
+ iic_wr(pd, ICSR, sr & ~ICSR_WAIT);
if (wakeup) {
pd->sr |= SW_DONE;
@@ -402,21 +443,21 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
}
/* Initialize channel registers */
- iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
+ iic_set_clr(pd, ICCR, 0, ICCR_ICE);
/* Enable channel and configure rx ack */
- iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd));
+ iic_set_clr(pd, ICCR, ICCR_ICE, 0);
/* Set the clock */
- iowrite8(pd->iccl, ICCL(pd));
- iowrite8(pd->icch, ICCH(pd));
+ iic_wr(pd, ICCL, pd->iccl);
+ iic_wr(pd, ICCH, pd->icch);
pd->msg = usr_msg;
pd->pos = -1;
pd->sr = 0;
/* Enable all interrupts to begin with */
- iowrite8(ICIC_WAITE | ICIC_ALE | ICIC_TACKE | ICIC_DTEE, ICIC(pd));
+ iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
return 0;
}
@@ -451,7 +492,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
retry_count = 1000;
again:
- val = ioread8(ICSR(pd));
+ val = iic_rd(pd, ICSR);
dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
@@ -576,6 +617,12 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
goto err_irq;
}
+ /* The IIC blocks on SH-Mobile ARM processors
+ * come with two new bits in ICIC.
+ */
+ if (size > 0x17)
+ pd->flags |= IIC_FLAG_HAS_ICIC67;
+
/* Enable Runtime PM for this device.
*
* Also tell the Runtime PM core to ignore children
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 98b1ec489159..3d76a188e42f 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_smbus.h>
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 78b06107342c..2fc08fbf67a2 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -24,12 +24,11 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
-
struct simtec_i2c_data {
struct resource *ioarea;
void __iomem *reg;
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 55a71370c79b..437586611d4a 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -61,7 +61,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
static int blacklist[] = {
PCI_DEVICE_ID_SI_540,
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 2309c7f1bde2..e6f539e26f65 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -53,7 +53,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* SIS630 SMBus registers */
#define SMB_STS 0x80 /* status */
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index d43d8f8943dd..86837f0c4cb9 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -38,7 +38,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* base address register in PCI config space */
#define SIS96x_BAR 0x04
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
index 0c770eabe85e..b1b3447942c9 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/busses/i2c-stub.c
@@ -29,13 +29,16 @@
#include <linux/i2c.h>
#define MAX_CHIPS 10
+#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
static unsigned short chip_addr[MAX_CHIPS];
module_param_array(chip_addr, ushort, NULL, S_IRUGO);
MODULE_PARM_DESC(chip_addr,
"Chip addresses (up to 10, between 0x03 and 0x77)");
-static unsigned long functionality = ~0UL;
+static unsigned long functionality = STUB_FUNC;
module_param(functionality, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(functionality, "Override functionality bitfield");
@@ -156,9 +159,7 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
static u32 stub_func(struct i2c_adapter *adapter)
{
- return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK) & functionality;
+ return STUB_FUNC & functionality;
}
static const struct i2c_algorithm smbus_algorithm = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 5c473833d948..60556012312f 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -15,8 +15,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define I2C_CONTROL 0x00
#define I2C_CONTROLS 0x00
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index de78283bddbe..7799fe5bda88 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -25,7 +25,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* Power management registers */
#define PM_CFG_REVID 0x08 /* silicon revision code */
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index d57292e5dae0..4c6fff5f330d 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -51,7 +51,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
static struct pci_dev *vt596_pdev;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 684395b6f3e2..4cb4bb009950 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -32,7 +32,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200.h>
@@ -552,7 +552,7 @@ static int __init scx200_create_isa(const char *text, unsigned long base,
* the name and the BAR where the I/O address resource is located. ISA
* devices are flagged with a bar value of -1 */
-static struct pci_device_id scx200_pci[] = {
+static const struct pci_device_id scx200_pci[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE),
.driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 42df0eca43d5..7ee0d502ceab 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -27,7 +27,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200_gpio.h>
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c2258a51fe0c..e0f833cca3f1 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -159,107 +159,131 @@ static void i2c_device_shutdown(struct device *dev)
driver->shutdown(client);
}
-#ifdef CONFIG_SUSPEND
-static int i2c_device_pm_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
{
- const struct dev_pm_ops *pm;
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->suspend)
+ driver = to_i2c_driver(dev->driver);
+ if (!driver->suspend)
return 0;
- return pm->suspend(dev);
+ return driver->suspend(client, mesg);
}
-static int i2c_device_pm_resume(struct device *dev)
+static int i2c_legacy_resume(struct device *dev)
{
- const struct dev_pm_ops *pm;
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->resume)
+ driver = to_i2c_driver(dev->driver);
+ if (!driver->resume)
return 0;
- return pm->resume(dev);
+ return driver->resume(client);
}
-#else
-#define i2c_device_pm_suspend NULL
-#define i2c_device_pm_resume NULL
-#endif
-#ifdef CONFIG_PM_RUNTIME
-static int i2c_device_runtime_suspend(struct device *dev)
+static int i2c_device_pm_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!dev->driver)
- return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->runtime_suspend)
+ if (pm_runtime_suspended(dev))
return 0;
- return pm->runtime_suspend(dev);
-}
-static int i2c_device_runtime_resume(struct device *dev)
-{
- const struct dev_pm_ops *pm;
+ if (pm)
+ return pm->suspend ? pm->suspend(dev) : 0;
- if (!dev->driver)
- return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->runtime_resume)
- return 0;
- return pm->runtime_resume(dev);
+ return i2c_legacy_suspend(dev, PMSG_SUSPEND);
}
-static int i2c_device_runtime_idle(struct device *dev)
+static int i2c_device_pm_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = NULL;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
- if (dev->driver)
- pm = dev->driver->pm;
- if (pm && pm->runtime_idle) {
- ret = pm->runtime_idle(dev);
- if (ret)
- return ret;
+ if (pm)
+ ret = pm->resume ? pm->resume(dev) : 0;
+ else
+ ret = i2c_legacy_resume(dev);
+
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
}
- return pm_runtime_suspend(dev);
+ return ret;
}
-#else
-#define i2c_device_runtime_suspend NULL
-#define i2c_device_runtime_resume NULL
-#define i2c_device_runtime_idle NULL
-#endif
-static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
+static int i2c_device_pm_freeze(struct device *dev)
{
- struct i2c_client *client = i2c_verify_client(dev);
- struct i2c_driver *driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!client || !dev->driver)
+ if (pm_runtime_suspended(dev))
return 0;
- driver = to_i2c_driver(dev->driver);
- if (!driver->suspend)
- return 0;
- return driver->suspend(client, mesg);
+
+ if (pm)
+ return pm->freeze ? pm->freeze(dev) : 0;
+
+ return i2c_legacy_suspend(dev, PMSG_FREEZE);
}
-static int i2c_device_resume(struct device *dev)
+static int i2c_device_pm_thaw(struct device *dev)
{
- struct i2c_client *client = i2c_verify_client(dev);
- struct i2c_driver *driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!client || !dev->driver)
+ if (pm_runtime_suspended(dev))
return 0;
- driver = to_i2c_driver(dev->driver);
- if (!driver->resume)
+
+ if (pm)
+ return pm->thaw ? pm->thaw(dev) : 0;
+
+ return i2c_legacy_resume(dev);
+}
+
+static int i2c_device_pm_poweroff(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm_runtime_suspended(dev))
return 0;
- return driver->resume(client);
+
+ if (pm)
+ return pm->poweroff ? pm->poweroff(dev) : 0;
+
+ return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
}
+static int i2c_device_pm_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ if (pm)
+ ret = pm->restore ? pm->restore(dev) : 0;
+ else
+ ret = i2c_legacy_resume(dev);
+
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return ret;
+}
+#else /* !CONFIG_PM_SLEEP */
+#define i2c_device_pm_suspend NULL
+#define i2c_device_pm_resume NULL
+#define i2c_device_pm_freeze NULL
+#define i2c_device_pm_thaw NULL
+#define i2c_device_pm_poweroff NULL
+#define i2c_device_pm_restore NULL
+#endif /* !CONFIG_PM_SLEEP */
+
static void i2c_client_dev_release(struct device *dev)
{
kfree(to_i2c_client(dev));
@@ -301,9 +325,15 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
- .runtime_suspend = i2c_device_runtime_suspend,
- .runtime_resume = i2c_device_runtime_resume,
- .runtime_idle = i2c_device_runtime_idle,
+ .freeze = i2c_device_pm_freeze,
+ .thaw = i2c_device_pm_thaw,
+ .poweroff = i2c_device_pm_poweroff,
+ .restore = i2c_device_pm_restore,
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
};
struct bus_type i2c_bus_type = {
@@ -312,8 +342,6 @@ struct bus_type i2c_bus_type = {
.probe = i2c_device_probe,
.remove = i2c_device_remove,
.shutdown = i2c_device_shutdown,
- .suspend = i2c_device_suspend,
- .resume = i2c_device_resume,
.pm = &i2c_device_pm_ops,
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
@@ -390,6 +418,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
+#ifdef CONFIG_OF
+ client->dev.of_node = info->of_node;
+#endif
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
client->addr);
@@ -1193,10 +1224,10 @@ EXPORT_SYMBOL(i2c_transfer);
*
* Returns negative errno, or else the number of bytes written.
*/
-int i2c_master_send(struct i2c_client *client,const char *buf ,int count)
+int i2c_master_send(struct i2c_client *client, const char *buf, int count)
{
int ret;
- struct i2c_adapter *adap=client->adapter;
+ struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
msg.addr = client->addr;
@@ -1220,9 +1251,9 @@ EXPORT_SYMBOL(i2c_master_send);
*
* Returns negative errno, or else the number of bytes read.
*/
-int i2c_master_recv(struct i2c_client *client, char *buf ,int count)
+int i2c_master_recv(struct i2c_client *client, char *buf, int count)
{
- struct i2c_adapter *adap=client->adapter;
+ struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
int ret;
@@ -1424,7 +1455,7 @@ i2c_new_probed_device(struct i2c_adapter *adap,
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
-struct i2c_adapter* i2c_get_adapter(int id)
+struct i2c_adapter *i2c_get_adapter(int id)
{
struct i2c_adapter *adapter;
@@ -1451,7 +1482,7 @@ static u8 crc8(u16 data)
{
int i;
- for(i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
if (data & 0x8000)
data = data ^ POLY;
data = data << 1;
@@ -1464,7 +1495,7 @@ static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
{
int i;
- for(i = 0; i < count; i++)
+ for (i = 0; i < count; i++)
crc = crc8((crc ^ p[i]) << 8);
return crc;
}
@@ -1534,7 +1565,7 @@ EXPORT_SYMBOL(i2c_smbus_read_byte);
*/
s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
{
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
}
EXPORT_SYMBOL(i2c_smbus_write_byte);
@@ -1572,9 +1603,9 @@ s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value)
{
union i2c_smbus_data data;
data.byte = value;
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_BYTE_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_byte_data);
@@ -1611,9 +1642,9 @@ s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
{
union i2c_smbus_data data;
data.word = value;
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_WORD_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_WORD_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_word_data);
@@ -1690,9 +1721,9 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
length = I2C_SMBUS_BLOCK_MAX;
data.block[0] = length;
memcpy(&data.block[1], values, length);
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_BLOCK_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BLOCK_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_block_data);
@@ -1734,10 +1765,10 @@ EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data);
/* Simulate a SMBus command using the i2c protocol
No checking of parameters is done! */
-static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
- unsigned short flags,
- char read_write, u8 command, int size,
- union i2c_smbus_data * data)
+static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags,
+ char read_write, u8 command, int size,
+ union i2c_smbus_data *data)
{
/* So we need to generate a series of msgs. In the case of writing, we
need to use only one message; when reading, we need two. We initialize
@@ -1745,7 +1776,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
simpler. */
unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
- int num = read_write == I2C_SMBUS_READ?2:1;
+ int num = read_write == I2C_SMBUS_READ ? 2 : 1;
struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 },
{ addr, flags | I2C_M_RD, 0, msgbuf1 }
};
@@ -1754,7 +1785,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
int status;
msgbuf0[0] = command;
- switch(size) {
+ switch (size) {
case I2C_SMBUS_QUICK:
msg[0].len = 0;
/* Special case: The read/write field is used as data */
@@ -1781,7 +1812,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
if (read_write == I2C_SMBUS_READ)
msg[1].len = 2;
else {
- msg[0].len=3;
+ msg[0].len = 3;
msgbuf0[1] = data->word & 0xff;
msgbuf0[2] = data->word >> 8;
}
@@ -1874,26 +1905,26 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
}
if (read_write == I2C_SMBUS_READ)
- switch(size) {
- case I2C_SMBUS_BYTE:
- data->byte = msgbuf0[0];
- break;
- case I2C_SMBUS_BYTE_DATA:
- data->byte = msgbuf1[0];
- break;
- case I2C_SMBUS_WORD_DATA:
- case I2C_SMBUS_PROC_CALL:
- data->word = msgbuf1[0] | (msgbuf1[1] << 8);
- break;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- for (i = 0; i < data->block[0]; i++)
- data->block[i+1] = msgbuf1[i];
- break;
- case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_BLOCK_PROC_CALL:
- for (i = 0; i < msgbuf1[0] + 1; i++)
- data->block[i] = msgbuf1[i];
- break;
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ data->byte = msgbuf0[0];
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = msgbuf1[0];
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ data->word = msgbuf1[0] | (msgbuf1[1] << 8);
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ for (i = 0; i < data->block[0]; i++)
+ data->block[i+1] = msgbuf1[i];
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ for (i = 0; i < msgbuf1[0] + 1; i++)
+ data->block[i] = msgbuf1[i];
+ break;
}
return 0;
}
@@ -1938,7 +1969,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
}
rt_mutex_unlock(&adapter->bus_lock);
} else
- res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write,
+ res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
command, protocol, data);
return res;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index f4110aa49600..e0694e4d86c7 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,7 +35,7 @@
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/jiffies.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static struct i2c_driver i2cdev_driver;
@@ -132,45 +132,45 @@ static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
* needed by those system calls and by this SMBus interface.
*/
-static ssize_t i2cdev_read (struct file *file, char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
+ loff_t *offset)
{
char *tmp;
int ret;
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
- tmp = kmalloc(count,GFP_KERNEL);
- if (tmp==NULL)
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (tmp == NULL)
return -ENOMEM;
pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
- ret = i2c_master_recv(client,tmp,count);
+ ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
- ret = copy_to_user(buf,tmp,count)?-EFAULT:ret;
+ ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
kfree(tmp);
return ret;
}
-static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t i2cdev_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
{
int ret;
char *tmp;
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
- tmp = kmalloc(count,GFP_KERNEL);
- if (tmp==NULL)
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (tmp == NULL)
return -ENOMEM;
- if (copy_from_user(tmp,buf,count)) {
+ if (copy_from_user(tmp, buf, count)) {
kfree(tmp);
return -EFAULT;
}
@@ -178,7 +178,7 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
- ret = i2c_master_send(client,tmp,count);
+ ret = i2c_master_send(client, tmp, count);
kfree(tmp);
return ret;
}
@@ -369,13 +369,13 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
unsigned long funcs;
dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
cmd, arg);
- switch ( cmd ) {
+ switch (cmd) {
case I2C_SLAVE:
case I2C_SLAVE_FORCE:
/* NOTE: devices set up to work with "new style" drivers
@@ -601,7 +601,7 @@ static void __exit i2c_dev_exit(void)
{
i2c_del_driver(&i2cdev_driver);
class_destroy(i2c_dev_class);
- unregister_chrdev(I2C_MAJOR,"i2c");
+ unregister_chrdev(I2C_MAJOR, "i2c");
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index d2b8b272bc27..cb10201a15ed 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -633,12 +633,10 @@ static void __init cmd640_init_dev(ide_drive_t *drive)
static int cmd640_test_irq(ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
+ u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
CFR_IDE01INTR;
-
- pci_read_config_byte(dev, irq_reg, &irq_stat);
+ u8 irq_stat = get_cmd640_reg(irq_reg);
return (irq_stat & irq_mask) ? 1 : 0;
}
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index b85450865ff0..0b7815d2581c 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -65,8 +65,7 @@ MODULE_LICENSE("Dual MPL/GPL");
typedef struct ide_info_t {
struct pcmcia_device *p_dev;
struct ide_host *host;
- int ndev;
- dev_node_t node;
+ int ndev;
} ide_info_t;
static void ide_release(struct pcmcia_device *);
@@ -102,7 +101,6 @@ static int ide_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.IOAddrLines = 3;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -285,8 +283,7 @@ static int ide_config(struct pcmcia_device *link)
io_base = link->io.BasePort1;
ctl_base = stk->ctl_base;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -299,24 +296,21 @@ static int ide_config(struct pcmcia_device *link)
if (is_kme)
outb(0x81, ctl_base+1);
- host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
+ host = idecs_register(io_base, ctl_base, link->irq, link);
if (host == NULL && link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
host = idecs_register(io_base + 0x10, ctl_base + 0x10,
- link->irq.AssignedIRQ, link);
+ link->irq, link);
}
if (host == NULL)
goto failed;
info->ndev = 1;
- sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
- info->node.major = host->ports[0]->major;
- info->node.minor = 0;
info->host = host;
- link->dev_node = &info->node;
- printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
- info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
+ dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n",
+ 'a' + host->ports[0]->index * 2,
+ link->conf.Vpp / 10, link->conf.Vpp % 10);
kfree(stk);
return 0;
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 06b14bc9a1d4..d4136908f916 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -449,7 +449,6 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
ide_hwif_t *hwif = drive->hwif;
const struct ide_dma_ops *dma_ops = hwif->dma_ops;
struct ide_cmd *cmd = &hwif->cmd;
- struct request *rq;
ide_startstop_t ret = ide_stopped;
/*
@@ -487,14 +486,10 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
ide_dma_off_quietly(drive);
/*
- * un-busy drive etc and make sure request is sane
+ * make sure request is sane
*/
- rq = hwif->rq;
- if (rq) {
- hwif->rq = NULL;
- rq->errors = 0;
- ide_requeue_and_plug(drive, rq);
- }
+ if (hwif->rq)
+ hwif->rq->errors = 0;
return ret;
}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 3cb9c4e056ff..fa896210ed7b 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -177,7 +177,7 @@ EXPORT_SYMBOL_GPL(ide_pci_clk);
module_param_named(pci_clock, ide_pci_clk, int, 0);
MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
-static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
+static int ide_set_dev_param_mask(const char *s, const struct kernel_param *kp)
{
int a, b, i, j = 1;
unsigned int *dev_param_mask = (unsigned int *)kp->arg;
@@ -200,34 +200,40 @@ static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
return 0;
}
+static struct kernel_param_ops param_ops_ide_dev_mask = {
+ .set = ide_set_dev_param_mask
+};
+
+#define param_check_ide_dev_mask(name, p) param_check_uint(name, p)
+
static unsigned int ide_nodma;
-module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
+module_param_named(nodma, ide_nodma, ide_dev_mask, 0);
MODULE_PARM_DESC(nodma, "disallow DMA for a device");
static unsigned int ide_noflush;
-module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
+module_param_named(noflush, ide_noflush, ide_dev_mask, 0);
MODULE_PARM_DESC(noflush, "disable flush requests for a device");
static unsigned int ide_nohpa;
-module_param_call(nohpa, ide_set_dev_param_mask, NULL, &ide_nohpa, 0);
+module_param_named(nohpa, ide_nohpa, ide_dev_mask, 0);
MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device");
static unsigned int ide_noprobe;
-module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
+module_param_named(noprobe, ide_noprobe, ide_dev_mask, 0);
MODULE_PARM_DESC(noprobe, "skip probing for a device");
static unsigned int ide_nowerr;
-module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
+module_param_named(nowerr, ide_nowerr, ide_dev_mask, 0);
MODULE_PARM_DESC(nowerr, "ignore the ATA_DF bit for a device");
static unsigned int ide_cdroms;
-module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
+module_param_named(cdrom, ide_cdroms, ide_dev_mask, 0);
MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
struct chs_geom {
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 42965b3e30b9..542603b394e4 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -95,6 +95,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
+ d.irq_flags = res_irq->flags;
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index c5f3841af360..3a35ec6193d2 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -93,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
* bit 7: error, bit 6: interrupting,
* bit 5: FIFO full, bit 4: FIFO empty
*/
- return ((sc1d & 0x50) == 0x50) ? 1 : 0;
+ return (sc1d & 0x40) ? 1 : 0;
} else {
/*
* bit 3: error, bit 2: interrupting,
* bit 1: FIFO full, bit 0: FIFO empty
*/
- return ((sc1d & 0x05) == 0x05) ? 1 : 0;
+ return (sc1d & 0x04) ? 1 : 0;
}
}
@@ -241,6 +241,7 @@ static const struct ide_port_ops pdc20246_port_ops = {
static const struct ide_port_ops pdc2026x_port_ops = {
.set_pio_mode = pdc202xx_set_pio_mode,
.set_dma_mode = pdc202xx_set_mode,
+ .test_irq = pdc202xx_test_irq,
.cable_detect = pdc2026x_cable_detect,
};
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 9fd4a0d3206e..adaefabc40e9 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
"and will not be available in the new firewire driver stack. "
"Try libraw1394 based programs instead.\n", current->comm);
- return 0;
+ return nonseekable_open(inode, file);
}
@@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev;
static const struct file_operations dv1394_fops=
{
.owner = THIS_MODULE,
- .poll = dv1394_poll,
+ .poll = dv1394_poll,
.unlocked_ioctl = dv1394_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = dv1394_compat_ioctl,
#endif
.mmap = dv1394_mmap,
.open = dv1394_open,
- .write = dv1394_write,
- .read = dv1394_read,
+ .write = dv1394_write,
+ .read = dv1394_read,
.release = dv1394_release,
- .fasync = dv1394_fasync,
+ .fasync = dv1394_fasync,
+ .llseek = no_llseek,
};
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 8aa56ac07e29..b563d5e9fa2e 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
file->private_data = fi;
- return 0;
+ return nonseekable_open(inode, file);
}
static int raw1394_release(struct inode *inode, struct file *file)
@@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = {
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
+ .llseek = no_llseek,
};
static int __init init_raw1394(void)
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 949064a05675..a42bd6893bcf 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
ctx->current_ctx = NULL;
file->private_data = ctx;
- return 0;
+ return nonseekable_open(inode, file);
}
static int video1394_release(struct inode *inode, struct file *file)
@@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops=
.poll = video1394_poll,
.mmap = video1394_mmap,
.open = video1394_open,
- .release = video1394_release
+ .release = video1394_release,
+ .llseek = no_llseek,
};
/*** HOTPLUG STUFF **********************************************************/
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 975adce5f40c..330d2a423362 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -46,6 +46,7 @@ source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
+source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index ed35e4496241..0c4e589d746e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
+obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6d777069d86d..6ae418e81d82 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
static DEFINE_IDR(udp_ps);
static DEFINE_IDR(ipoib_ps);
-static int next_port;
struct cma_device {
struct list_head list;
@@ -1970,47 +1969,33 @@ err1:
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
{
- struct rdma_bind_list *bind_list;
- int port, ret, low, high;
-
- bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
- if (!bind_list)
- return -ENOMEM;
-
-retry:
- /* FIXME: add proper port randomization per like inet_csk_get_port */
- do {
- ret = idr_get_new_above(ps, bind_list, next_port, &port);
- } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
-
- if (ret)
- goto err1;
+ static unsigned int last_used_port;
+ int low, high, remaining;
+ unsigned int rover;
inet_get_local_port_range(&low, &high);
- if (port > high) {
- if (next_port != low) {
- idr_remove(ps, port);
- next_port = low;
- goto retry;
- }
- ret = -EADDRNOTAVAIL;
- goto err2;
+ remaining = (high - low) + 1;
+ rover = net_random() % remaining + low;
+retry:
+ if (last_used_port != rover &&
+ !idr_find(ps, (unsigned short) rover)) {
+ int ret = cma_alloc_port(ps, id_priv, rover);
+ /*
+ * Remember previously used port number in order to avoid
+ * re-using same port immediately after it is closed.
+ */
+ if (!ret)
+ last_used_port = rover;
+ if (ret != -EADDRNOTAVAIL)
+ return ret;
}
-
- if (port == high)
- next_port = low;
- else
- next_port = port + 1;
-
- bind_list->ps = ps;
- bind_list->port = (unsigned short) port;
- cma_bind_port(bind_list, id_priv);
- return 0;
-err2:
- idr_remove(ps, port);
-err1:
- kfree(bind_list);
- return ret;
+ if (--remaining) {
+ rover++;
+ if ((rover < low) || (rover > high))
+ rover = low;
+ goto retry;
+ }
+ return -EADDRNOTAVAIL;
}
static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
static int __init cma_init(void)
{
- int ret, low, high, remaining;
-
- get_random_bytes(&next_port, sizeof next_port);
- inet_get_local_port_range(&low, &high);
- remaining = (high - low) + 1;
- next_port = ((unsigned int) next_port % remaining) + low;
+ int ret;
cma_wq = create_singlethread_workqueue("rdma_cm");
if (!cma_wq)
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 512b1c43460c..46474842cfe9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1181,7 +1181,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
file->filp = filp;
file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
- return 0;
+ return nonseekable_open(inode, filp);
}
static int ib_ucm_close(struct inode *inode, struct file *filp)
@@ -1229,6 +1229,7 @@ static const struct file_operations ucm_fops = {
.release = ib_ucm_close,
.write = ib_ucm_write,
.poll = ib_ucm_poll,
+ .llseek = no_llseek,
};
static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 46185084121e..ac7edc24165c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
filp->private_data = file;
file->filp = filp;
- return 0;
+
+ return nonseekable_open(inode, filp);
}
static int ucma_close(struct inode *inode, struct file *filp)
@@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = {
.release = ucma_close,
.write = ucma_write,
.poll = ucma_poll,
+ .llseek = no_llseek,
};
static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index e7db054fb1c8..6babb72b39fc 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct ib_umad_port *port;
struct ib_umad_file *file;
- int ret = 0;
+ int ret;
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
if (port)
@@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
list_add_tail(&file->port_list, &port->file_list);
+ ret = nonseekable_open(inode, filp);
+
out:
mutex_unlock(&port->file_mutex);
return ret;
@@ -866,7 +868,8 @@ static const struct file_operations umad_fops = {
.compat_ioctl = ib_umad_compat_ioctl,
#endif
.open = ib_umad_open,
- .release = ib_umad_close
+ .release = ib_umad_close,
+ .llseek = no_llseek,
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
filp->private_data = port;
- return 0;
+ return nonseekable_open(inode, filp);
fail:
kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
static const struct file_operations umad_sm_fops = {
.owner = THIS_MODULE,
.open = ib_umad_sm_open,
- .release = ib_umad_sm_close
+ .release = ib_umad_sm_close,
+ .llseek = no_llseek,
};
static struct ib_client umad_client = {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb3526254426..bd2e6e181a47 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -41,6 +41,7 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched.h>
+#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
@@ -369,7 +370,8 @@ static const struct file_operations uverbs_event_fops = {
.read = ib_uverbs_event_read,
.poll = ib_uverbs_event_poll,
.release = ib_uverbs_event_close,
- .fasync = ib_uverbs_event_fasync
+ .fasync = ib_uverbs_event_fasync,
+ .llseek = no_llseek,
};
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -623,7 +625,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
filp->private_data = file;
- return 0;
+ return nonseekable_open(inode, filp);
err_module:
module_put(dev->ib_dev->owner);
@@ -651,7 +653,8 @@ static const struct file_operations uverbs_fops = {
.owner = THIS_MODULE,
.write = ib_uverbs_write,
.open = ib_uverbs_open,
- .release = ib_uverbs_close
+ .release = ib_uverbs_close,
+ .llseek = no_llseek,
};
static const struct file_operations uverbs_mmap_fops = {
@@ -659,7 +662,8 @@ static const struct file_operations uverbs_mmap_fops = {
.write = ib_uverbs_write,
.mmap = ib_uverbs_mmap,
.open = ib_uverbs_open,
- .release = ib_uverbs_close
+ .release = ib_uverbs_close,
+ .llseek = no_llseek,
};
static struct ib_client uverbs_client = {
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index f7ff66f98361..6ae698e68775 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -250,7 +250,7 @@ struct c2_array {
struct sp_chunk {
struct sp_chunk *next;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
u16 head;
u16 shared_ptr[0];
};
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index d4f5f5d42e90..78d247ec6961 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
return -ENOMEM;
new_head->dma_addr = dma_addr;
- pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
+ dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
new_head->next = NULL;
new_head->head = 0;
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
while (root) {
next = root->next;
dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
- pci_unmap_addr(root, mapping));
+ dma_unmap_addr(root, mapping));
root = next;
}
}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index f7b0fc23f413..49e0e8533f74 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{
dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
- mq->msg_pool.host, pci_unmap_addr(mq, mapping));
+ mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
NULL, /* peer (currently unknown) */
C2_MQ_HOST_TARGET);
- pci_unmap_addr_set(mq, mapping, mq->host_dma);
+ dma_unmap_addr_set(mq, mapping, mq->host_dma);
return 0;
}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index acede007b94a..fc1b9a7cec4b 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -71,7 +71,7 @@ struct c2_mq {
u8 __iomem *adapter;
} msg_pool;
dma_addr_t host_dma;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
u16 hint_count;
u16 priv;
struct c2_mq_shared __iomem *peer;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index 1076df2ee96a..bf189987711f 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -50,7 +50,7 @@
struct c2_buf_list {
void *buf;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 78c4bcc6ef60..85cfae4cad71 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
err = -ENOMEM;
goto bail1;
}
- pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
+ dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
(unsigned long long) c2dev->rep_vq.host_dma);
c2_mq_rep_init(&c2dev->rep_vq,
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
err = -ENOMEM;
goto bail2;
}
- pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
+ dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
(unsigned long long) c2dev->aeq.host_dma);
c2_mq_rep_init(&c2dev->aeq,
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
bail3:
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->aeq.q_size * c2dev->aeq.msg_size,
- q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
+ q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
bail2:
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
+ q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
bail1:
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
bail0:
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->aeq.q_size * c2dev->aeq.msg_size,
c2dev->aeq.msg_pool.host,
- pci_unmap_addr(&c2dev->aeq, mapping));
+ dma_unmap_addr(&c2dev->aeq, mapping));
/* Free the verbs reply queue */
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
c2dev->rep_vq.msg_pool.host,
- pci_unmap_addr(&c2dev->rep_vq, mapping));
+ dma_unmap_addr(&c2dev->rep_vq, mapping));
/* Free the MQ shared pointer pool */
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 35f286f1ad1e..005b7b52bc1e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
kfree(cq->sw_queue);
return -ENOMEM;
}
- pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, size);
setup.id = cq->cqid;
setup.base_addr = (u64) (cq->dma_addr);
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
goto err4;
memset(wq->queue, 0, depth * sizeof(union t3_wr));
- pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
if (!kernel_domain)
wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (cq->size_log2))
* sizeof(struct t3_cqe), cq->queue,
- pci_unmap_addr(cq, mapping));
+ dma_unmap_addr(cq, mapping));
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
return err;
}
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (wq->size_log2))
* sizeof(union t3_wr), wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
kfree(wq->sq);
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
kfree(wq->rq);
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
err = -ENOMEM;
goto err;
}
- pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
+ dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
rdev_p->ctrl_qp.dma_addr);
rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
memset(rdev_p->ctrl_qp.workq, 0,
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << T3_CTRL_QP_SIZE_LOG2)
* sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- pci_unmap_addr(&rdev_p->ctrl_qp, mapping));
+ dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 073373c2c560..8f0caf7d4482 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
union t3_wr *workq; /* the work request queue */
dma_addr_t dma_addr; /* pci bus address of the workq */
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
void __iomem *doorbell;
};
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 15073b2da1c5..e5ddb63e7d23 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -691,7 +691,7 @@ struct t3_swrq {
struct t3_wq {
union t3_wr *queue; /* DMA accessable memory */
dma_addr_t dma_addr; /* DMA address for HW */
- DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */
+ DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
u32 error; /* 1 once we go to ERROR */
u32 qpid;
u32 wptr; /* idx to next available WR slot */
@@ -718,7 +718,7 @@ struct t3_cq {
u32 wptr;
u32 size_log2;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
struct t3_cqe *queue;
struct t3_cqe *sw_queue;
u32 sw_rptr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30f..8e77dc543dd1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-
static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *);
static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 4fef03296276..ebfb117ba68b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
module_param(cong_flavor, uint, 0644);
MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-static void process_work(struct work_struct *work);
static struct workqueue_struct *workq;
-static DECLARE_WORK(skb_work, process_work);
static struct sk_buff_head rxq;
-static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg);
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
return -EIO;
}
error = l2t_send(tdev, skb, l2e);
- if (error)
+ if (error < 0)
kfree_skb(skb);
return error;
}
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
return -EIO;
}
error = cxgb3_ofld_send(tdev, skb);
- if (error)
+ if (error < 0)
kfree_skb(skb);
return error;
}
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
put_ep(&ep->com);
}
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
static int status2errno(int status)
{
switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
/*
* All the CM events are handled on a work queue to have a safe context.
+ * These are the real handlers that are called from the work queue.
*/
+static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = act_establish,
+ [CPL_ACT_OPEN_RPL] = act_open_rpl,
+ [CPL_RX_DATA] = rx_data,
+ [CPL_TX_DMA_ACK] = tx_ack,
+ [CPL_ABORT_RPL_RSS] = abort_rpl,
+ [CPL_ABORT_RPL] = abort_rpl,
+ [CPL_PASS_OPEN_RPL] = pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
+ [CPL_PASS_ESTABLISH] = pass_establish,
+ [CPL_PEER_CLOSE] = peer_close,
+ [CPL_ABORT_REQ_RSS] = peer_abort,
+ [CPL_CLOSE_CON_RPL] = close_con_rpl,
+ [CPL_RDMA_TERMINATE] = terminate,
+ [CPL_RDMA_EC_STATUS] = ec_status,
+};
+
+static void process_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ void *ep;
+ struct t3cdev *tdev;
+ int ret;
+
+ while ((skb = skb_dequeue(&rxq))) {
+ ep = *((void **) (skb->cb));
+ tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
+ ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+
+ /*
+ * ep was referenced in sched(), and is freed here.
+ */
+ put_ep((struct iwch_ep_common *)ep);
+ }
+}
+
+static DECLARE_WORK(skb_work, process_work);
+
static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
{
struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
+/*
+ * All upcalls from the T3 Core go to sched() to schedule the
+ * processing on a work queue.
+ */
+cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = sched,
+ [CPL_ACT_OPEN_RPL] = sched,
+ [CPL_RX_DATA] = sched,
+ [CPL_TX_DMA_ACK] = sched,
+ [CPL_ABORT_RPL_RSS] = sched,
+ [CPL_ABORT_RPL] = sched,
+ [CPL_PASS_OPEN_RPL] = sched,
+ [CPL_CLOSE_LISTSRV_RPL] = sched,
+ [CPL_PASS_ACCEPT_REQ] = sched,
+ [CPL_PASS_ESTABLISH] = sched,
+ [CPL_PEER_CLOSE] = sched,
+ [CPL_CLOSE_CON_RPL] = sched,
+ [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_RDMA_TERMINATE] = sched,
+ [CPL_RDMA_EC_STATUS] = sched,
+ [CPL_SET_TCB_RPL] = set_tcb_rpl,
+};
+
int __init iwch_cm_init(void)
{
skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
if (!workq)
return -ENOMEM;
- /*
- * All upcalls from the T3 Core go to sched() to
- * schedule the processing on a work queue.
- */
- t3c_handlers[CPL_ACT_ESTABLISH] = sched;
- t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
- t3c_handlers[CPL_RX_DATA] = sched;
- t3c_handlers[CPL_TX_DMA_ACK] = sched;
- t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
- t3c_handlers[CPL_ABORT_RPL] = sched;
- t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
- t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
- t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
- t3c_handlers[CPL_PASS_ESTABLISH] = sched;
- t3c_handlers[CPL_PEER_CLOSE] = sched;
- t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
- t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
- t3c_handlers[CPL_RDMA_TERMINATE] = sched;
- t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
- t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
-
- /*
- * These are the real handlers that are called from a
- * work queue.
- */
- work_handlers[CPL_ACT_ESTABLISH] = act_establish;
- work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
- work_handlers[CPL_RX_DATA] = rx_data;
- work_handlers[CPL_TX_DMA_ACK] = tx_ack;
- work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
- work_handlers[CPL_ABORT_RPL] = abort_rpl;
- work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
- work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
- work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
- work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
- work_handlers[CPL_PEER_CLOSE] = peer_close;
- work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
- work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
- work_handlers[CPL_RDMA_TERMINATE] = terminate;
- work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
new file mode 100644
index 000000000000..ccb85eaaad75
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -0,0 +1,18 @@
+config INFINIBAND_CXGB4
+ tristate "Chelsio T4 RDMA Driver"
+ depends on CHELSIO_T4 && INET
+ select GENERIC_ALLOCATOR
+ ---help---
+ This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
+ 10GbE adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.htm>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iw_cxgb4.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
new file mode 100644
index 000000000000..e31a499f0172
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS += -Idrivers/net/cxgb4
+
+obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
+
+iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
new file mode 100644
index 000000000000..30ce0a8eca09
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -0,0 +1,2374 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/route.h>
+
+#include "iw_cxgb4.h"
+
+static char *states[] = {
+ "idle",
+ "listen",
+ "connecting",
+ "mpa_wait_req",
+ "mpa_req_sent",
+ "mpa_req_rcvd",
+ "mpa_rep_sent",
+ "fpdu_mode",
+ "aborting",
+ "closing",
+ "moribund",
+ "dead",
+ NULL,
+};
+
+int c4iw_max_read_depth = 8;
+module_param(c4iw_max_read_depth, int, 0644);
+MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
+
+static int enable_tcp_timestamps;
+module_param(enable_tcp_timestamps, int, 0644);
+MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
+
+static int enable_tcp_sack;
+module_param(enable_tcp_sack, int, 0644);
+MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
+
+static int enable_tcp_window_scaling = 1;
+module_param(enable_tcp_window_scaling, int, 0644);
+MODULE_PARM_DESC(enable_tcp_window_scaling,
+ "Enable tcp window scaling (default=1)");
+
+int c4iw_debug;
+module_param(c4iw_debug, int, 0644);
+MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
+
+static int peer2peer;
+module_param(peer2peer, int, 0644);
+MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
+
+static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
+module_param(p2p_type, int, 0644);
+MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
+ "1=RDMA_READ 0=RDMA_WRITE (default 1)");
+
+static int ep_timeout_secs = 60;
+module_param(ep_timeout_secs, int, 0644);
+MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
+ "in seconds (default=60)");
+
+static int mpa_rev = 1;
+module_param(mpa_rev, int, 0644);
+MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
+ "1 is spec compliant. (default=1)");
+
+static int markers_enabled;
+module_param(markers_enabled, int, 0644);
+MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
+
+static int crc_enabled = 1;
+module_param(crc_enabled, int, 0644);
+MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
+
+static int rcv_win = 256 * 1024;
+module_param(rcv_win, int, 0644);
+MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
+
+static int snd_win = 32 * 1024;
+module_param(snd_win, int, 0644);
+MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
+
+static struct workqueue_struct *workq;
+
+static struct sk_buff_head rxq;
+
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
+static void ep_timeout(unsigned long arg);
+static void connect_reply_upcall(struct c4iw_ep *ep, int status);
+
+static LIST_HEAD(timeout_list);
+static spinlock_t timeout_lock;
+
+static void start_ep_timer(struct c4iw_ep *ep)
+{
+ PDBG("%s ep %p\n", __func__, ep);
+ if (timer_pending(&ep->timer)) {
+ PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
+ del_timer_sync(&ep->timer);
+ } else
+ c4iw_get_ep(&ep->com);
+ ep->timer.expires = jiffies + ep_timeout_secs * HZ;
+ ep->timer.data = (unsigned long)ep;
+ ep->timer.function = ep_timeout;
+ add_timer(&ep->timer);
+}
+
+static void stop_ep_timer(struct c4iw_ep *ep)
+{
+ PDBG("%s ep %p\n", __func__, ep);
+ if (!timer_pending(&ep->timer)) {
+ printk(KERN_ERR "%s timer stopped when its not running! "
+ "ep %p state %u\n", __func__, ep, ep->com.state);
+ WARN_ON(1);
+ return;
+ }
+ del_timer_sync(&ep->timer);
+ c4iw_put_ep(&ep->com);
+}
+
+static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
+ struct l2t_entry *l2e)
+{
+ int error = 0;
+
+ if (c4iw_fatal_error(rdev)) {
+ kfree_skb(skb);
+ PDBG("%s - device in error state - dropping\n", __func__);
+ return -EIO;
+ }
+ error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
+ if (error < 0)
+ kfree_skb(skb);
+ return error;
+}
+
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
+{
+ int error = 0;
+
+ if (c4iw_fatal_error(rdev)) {
+ kfree_skb(skb);
+ PDBG("%s - device in error state - dropping\n", __func__);
+ return -EIO;
+ }
+ error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
+ if (error < 0)
+ kfree_skb(skb);
+ return error;
+}
+
+static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
+{
+ struct cpl_tid_release *req;
+
+ skb = get_skb(skb, sizeof *req, GFP_KERNEL);
+ if (!skb)
+ return;
+ req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ c4iw_ofld_send(rdev, skb);
+ return;
+}
+
+static void set_emss(struct c4iw_ep *ep, u16 opt)
+{
+ ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
+ ep->mss = ep->emss;
+ if (GET_TCPOPT_TSTAMP(opt))
+ ep->emss -= 12;
+ if (ep->emss < 128)
+ ep->emss = 128;
+ PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+ ep->mss, ep->emss);
+}
+
+static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
+{
+ unsigned long flags;
+ enum c4iw_ep_state state;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ state = epc->state;
+ spin_unlock_irqrestore(&epc->lock, flags);
+ return state;
+}
+
+static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+ epc->state = new;
+}
+
+static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ __state_set(epc, new);
+ spin_unlock_irqrestore(&epc->lock, flags);
+ return;
+}
+
+static void *alloc_ep(int size, gfp_t gfp)
+{
+ struct c4iw_ep_common *epc;
+
+ epc = kzalloc(size, gfp);
+ if (epc) {
+ kref_init(&epc->kref);
+ spin_lock_init(&epc->lock);
+ init_waitqueue_head(&epc->waitq);
+ }
+ PDBG("%s alloc ep %p\n", __func__, epc);
+ return epc;
+}
+
+void _c4iw_free_ep(struct kref *kref)
+{
+ struct c4iw_ep *ep;
+
+ ep = container_of(kref, struct c4iw_ep, com.kref);
+ PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
+ if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ }
+ kfree(ep);
+}
+
+static void release_ep_resources(struct c4iw_ep *ep)
+{
+ set_bit(RELEASE_RESOURCES, &ep->com.flags);
+ c4iw_put_ep(&ep->com);
+}
+
+static int status2errno(int status)
+{
+ switch (status) {
+ case CPL_ERR_NONE:
+ return 0;
+ case CPL_ERR_CONN_RESET:
+ return -ECONNRESET;
+ case CPL_ERR_ARP_MISS:
+ return -EHOSTUNREACH;
+ case CPL_ERR_CONN_TIMEDOUT:
+ return -ETIMEDOUT;
+ case CPL_ERR_TCAM_FULL:
+ return -ENOMEM;
+ case CPL_ERR_CONN_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EIO;
+ }
+}
+
+/*
+ * Try and reuse skbs already allocated...
+ */
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
+{
+ if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
+ skb_trim(skb, 0);
+ skb_get(skb);
+ skb_reset_transport_header(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ }
+ return skb;
+}
+
+static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
+ __be32 peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos)
+{
+ struct rtable *rt;
+ struct flowi fl = {
+ .oif = 0,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = peer_ip,
+ .saddr = local_ip,
+ .tos = tos}
+ },
+ .proto = IPPROTO_TCP,
+ .uli_u = {
+ .ports = {
+ .sport = local_port,
+ .dport = peer_port}
+ }
+ };
+
+ if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ return NULL;
+ return rt;
+}
+
+static void arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+ PDBG("%s c4iw_dev %p\n", __func__, handle);
+ kfree_skb(skb);
+}
+
+/*
+ * Handle an ARP failure for an active open.
+ */
+static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
+{
+ printk(KERN_ERR MOD "ARP failure duing connect\n");
+ kfree_skb(skb);
+}
+
+/*
+ * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
+ * and send it along.
+ */
+static void abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct c4iw_rdev *rdev = handle;
+ struct cpl_abort_req *req = cplhdr(skb);
+
+ PDBG("%s rdev %p\n", __func__, rdev);
+ req->cmd = CPL_ABORT_NO_RST;
+ c4iw_ofld_send(rdev, skb);
+}
+
+static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ unsigned int flowclen = 80;
+ struct fw_flowc_wr *flowc;
+ int i;
+
+ skb = get_skb(skb, flowclen, GFP_KERNEL);
+ flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS(8));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
+ 16)) | FW_WR_FLOWID(ep->hwtid));
+
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(0);
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+ flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+ flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
+ flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+ flowc->mnemval[6].val = cpu_to_be32(snd_win);
+ flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+ flowc->mnemval[7].val = cpu_to_be32(ep->emss);
+ /* Pad WR to 16 byte boundary */
+ flowc->mnemval[8].mnemonic = 0;
+ flowc->mnemval[8].val = 0;
+ for (i = 0; i < 9; i++) {
+ flowc->mnemval[i].r4[0] = 0;
+ flowc->mnemval[i].r4[1] = 0;
+ flowc->mnemval[i].r4[2] = 0;
+ }
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ c4iw_ofld_send(&ep->com.dev->rdev, skb);
+}
+
+static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
+{
+ struct cpl_close_con_req *req;
+ struct sk_buff *skb;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb = get_skb(NULL, wrlen, gfp);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
+ ep->hwtid));
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+{
+ struct cpl_abort_req *req;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb = get_skb(skb, wrlen, gfp);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
+ req = (struct cpl_abort_req *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
+ req->cmd = CPL_ABORT_SEND_RST;
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_connect(struct c4iw_ep *ep)
+{
+ struct cpl_act_open_req *req;
+ struct sk_buff *skb;
+ u64 opt0;
+ u32 opt2;
+ unsigned int mtu_idx;
+ int wscale;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ opt0 = KEEP_ALIVE(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ RCV_BUFSIZ(rcv_win>>10);
+ opt2 = RX_CHANNEL(0) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ if (enable_tcp_timestamps)
+ opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack)
+ opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ opt2 |= WND_SCALE_EN(1);
+ t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+
+ req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
+ req->local_port = ep->com.local_addr.sin_port;
+ req->peer_port = ep->com.remote_addr.sin_port;
+ req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = 0;
+ req->opt2 = cpu_to_be32(opt2);
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ BUG_ON(skb_cloned(skb));
+
+ mpalen = sizeof(*mpa) + ep->plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+ skb = get_skb(skb, wrlen, GFP_KERNEL);
+ if (!skb) {
+ connect_reply_upcall(ep, -ENOMEM);
+ return;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
+ mpa->flags = (crc_enabled ? MPA_CRC : 0) |
+ (markers_enabled ? MPA_MARKERS : 0);
+ mpa->private_data_size = htons(ep->plen);
+ mpa->revision = mpa_rev;
+
+ if (ep->plen)
+ memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
+
+ /*
+ * Reference the mpa skb. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ BUG_ON(ep->mpa_skb);
+ ep->mpa_skb = skb;
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ start_ep_timer(ep);
+ state_set(&ep->com, MPA_REQ_SENT);
+ ep->mpa_attr.initiator = 1;
+ return;
+}
+
+static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+ struct sk_buff *skb;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ mpalen = sizeof(*mpa) + plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memset(mpa, 0, sizeof(*mpa));
+ memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+ mpa->flags = MPA_REJECT;
+ mpa->revision = mpa_rev;
+ mpa->private_data_size = htons(plen);
+ if (plen)
+ memcpy(mpa->private_data, pdata, plen);
+
+ /*
+ * Reference the mpa skb again. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ BUG_ON(ep->mpa_skb);
+ ep->mpa_skb = skb;
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+ struct sk_buff *skb;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ mpalen = sizeof(*mpa) + plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memset(mpa, 0, sizeof(*mpa));
+ memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+ mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
+ (markers_enabled ? MPA_MARKERS : 0);
+ mpa->revision = mpa_rev;
+ mpa->private_data_size = htons(plen);
+ if (plen)
+ memcpy(mpa->private_data, pdata, plen);
+
+ /*
+ * Reference the mpa skb. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ ep->mpa_skb = skb;
+ state_set(&ep->com, MPA_REP_SENT);
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_act_establish *req = cplhdr(skb);
+ unsigned int tid = GET_TID(req);
+ unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_atid(t, atid);
+
+ PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+ be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
+
+ dst_confirm(ep->dst);
+
+ /* setup the hwtid for this connection */
+ ep->hwtid = tid;
+ cxgb4_insert_tid(t, ep, tid);
+
+ ep->snd_seq = be32_to_cpu(req->snd_isn);
+ ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+
+ set_emss(ep, ntohs(req->tcp_opt));
+
+ /* dealloc the atid */
+ cxgb4_free_atid(t, atid);
+
+ /* start MPA negotiation */
+ send_flowc(ep, NULL);
+ send_mpa_req(ep, skb);
+
+ return 0;
+}
+
+static void close_complete_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CLOSE;
+ if (ep->com.cm_id) {
+ PDBG("close complete delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+{
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ close_complete_upcall(ep);
+ state_set(&ep->com, ABORTING);
+ return send_abort(ep, skb, gfp);
+}
+
+static void peer_close_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_DISCONNECT;
+ if (ep->com.cm_id) {
+ PDBG("peer close delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+}
+
+static void peer_abort_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CLOSE;
+ event.status = -ECONNRESET;
+ if (ep->com.cm_id) {
+ PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
+ ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static void connect_reply_upcall(struct c4iw_ep *ep, int status)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REPLY;
+ event.status = status;
+ event.local_addr = ep->com.local_addr;
+ event.remote_addr = ep->com.remote_addr;
+
+ if ((status == 0) || (status == -ECONNREFUSED)) {
+ event.private_data_len = ep->plen;
+ event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+ }
+ if (ep->com.cm_id) {
+ PDBG("%s ep %p tid %u status %d\n", __func__, ep,
+ ep->hwtid, status);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+ if (status < 0) {
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static void connect_request_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ event.local_addr = ep->com.local_addr;
+ event.remote_addr = ep->com.remote_addr;
+ event.private_data_len = ep->plen;
+ event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+ event.provider_data = ep;
+ if (state_read(&ep->parent_ep->com) != DEAD) {
+ c4iw_get_ep(&ep->com);
+ ep->parent_ep->com.cm_id->event_handler(
+ ep->parent_ep->com.cm_id,
+ &event);
+ }
+ c4iw_put_ep(&ep->parent_ep->com);
+ ep->parent_ep = NULL;
+}
+
+static void established_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_ESTABLISHED;
+ if (ep->com.cm_id) {
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+}
+
+static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
+{
+ struct cpl_rx_data_ack *req;
+ struct sk_buff *skb;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
+ return 0;
+ }
+
+ req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+ ep->hwtid));
+ req->credit_dack = cpu_to_be32(credits);
+ set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
+ c4iw_ofld_send(&ep->com.dev->rdev, skb);
+ return credits;
+}
+
+static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ struct mpa_message *mpa;
+ u16 plen;
+ struct c4iw_qp_attributes attrs;
+ enum c4iw_qp_attr_mask mask;
+ int err;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ /*
+ * Stop mpa timer. If it expired, then the state has
+ * changed and we bail since ep_timeout already aborted
+ * the connection.
+ */
+ stop_ep_timer(ep);
+ if (state_read(&ep->com) != MPA_REQ_SENT)
+ return;
+
+ /*
+ * If we get more than the supported amount of private data
+ * then we must fail this connection.
+ */
+ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * copy the new data into our accumulation buffer.
+ */
+ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
+ skb->len);
+ ep->mpa_pkt_len += skb->len;
+
+ /*
+ * if we don't even have the mpa message, then bail.
+ */
+ if (ep->mpa_pkt_len < sizeof(*mpa))
+ return;
+ mpa = (struct mpa_message *) ep->mpa_pkt;
+
+ /* Validate MPA header. */
+ if (mpa->revision != mpa_rev) {
+ err = -EPROTO;
+ goto err;
+ }
+ if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ plen = ntohs(mpa->private_data_size);
+
+ /*
+ * Fail if there's too much private data.
+ */
+ if (plen > MPA_MAX_PRIVATE_DATA) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ /*
+ * If plen does not account for pkt size
+ */
+ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ ep->plen = (u8) plen;
+
+ /*
+ * If we don't have all the pdata yet, then bail.
+ * We'll continue process when more data arrives.
+ */
+ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
+ return;
+
+ if (mpa->flags & MPA_REJECT) {
+ err = -ECONNREFUSED;
+ goto err;
+ }
+
+ /*
+ * If we get here we have accumulated the entire mpa
+ * start reply message including private data. And
+ * the MPA header is valid.
+ */
+ state_set(&ep->com, FPDU_MODE);
+ ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+ ep->mpa_attr.recv_marker_enabled = markers_enabled;
+ ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+ ep->mpa_attr.version = mpa_rev;
+ ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
+ FW_RI_INIT_P2PTYPE_DISABLED;
+ PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
+ "xmit_marker_enabled=%d, version=%d\n", __func__,
+ ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
+
+ attrs.mpa_attr = ep->mpa_attr;
+ attrs.max_ird = ep->ird;
+ attrs.max_ord = ep->ord;
+ attrs.llp_stream_handle = ep;
+ attrs.next_state = C4IW_QP_STATE_RTS;
+
+ mask = C4IW_QP_ATTR_NEXT_STATE |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
+
+ /* bind QP and TID with INIT_WR */
+ err = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, mask, &attrs, 1);
+ if (err)
+ goto err;
+ goto out;
+err:
+ abort_connection(ep, skb, GFP_KERNEL);
+out:
+ connect_reply_upcall(ep, err);
+ return;
+}
+
+static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ struct mpa_message *mpa;
+ u16 plen;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ if (state_read(&ep->com) != MPA_REQ_WAIT)
+ return;
+
+ /*
+ * If we get more than the supported amount of private data
+ * then we must fail this connection.
+ */
+ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
+ stop_ep_timer(ep);
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+
+ /*
+ * Copy the new data into our accumulation buffer.
+ */
+ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
+ skb->len);
+ ep->mpa_pkt_len += skb->len;
+
+ /*
+ * If we don't even have the mpa message, then bail.
+ * We'll continue process when more data arrives.
+ */
+ if (ep->mpa_pkt_len < sizeof(*mpa))
+ return;
+
+ PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ stop_ep_timer(ep);
+ mpa = (struct mpa_message *) ep->mpa_pkt;
+
+ /*
+ * Validate MPA Header.
+ */
+ if (mpa->revision != mpa_rev) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ plen = ntohs(mpa->private_data_size);
+
+ /*
+ * Fail if there's too much private data.
+ */
+ if (plen > MPA_MAX_PRIVATE_DATA) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ /*
+ * If plen does not account for pkt size
+ */
+ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+ ep->plen = (u8) plen;
+
+ /*
+ * If we don't have all the pdata yet, then bail.
+ */
+ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
+ return;
+
+ /*
+ * If we get here we have accumulated the entire mpa
+ * start reply message including private data.
+ */
+ ep->mpa_attr.initiator = 0;
+ ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+ ep->mpa_attr.recv_marker_enabled = markers_enabled;
+ ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+ ep->mpa_attr.version = mpa_rev;
+ ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
+ FW_RI_INIT_P2PTYPE_DISABLED;
+ PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
+ "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
+ ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+ ep->mpa_attr.p2p_type);
+
+ state_set(&ep->com, MPA_REQ_RCVD);
+
+ /* drive upcall */
+ connect_request_upcall(ep);
+ return;
+}
+
+static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_rx_data *hdr = cplhdr(skb);
+ unsigned int dlen = ntohs(hdr->len);
+ unsigned int tid = GET_TID(hdr);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+ skb_pull(skb, sizeof(*hdr));
+ skb_trim(skb, dlen);
+
+ ep->rcv_seq += dlen;
+ BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
+
+ /* update RX credits */
+ update_rx_credits(ep, dlen);
+
+ switch (state_read(&ep->com)) {
+ case MPA_REQ_SENT:
+ process_mpa_reply(ep, skb);
+ break;
+ case MPA_REQ_WAIT:
+ process_mpa_request(ep, skb);
+ break;
+ case MPA_REP_SENT:
+ break;
+ default:
+ printk(KERN_ERR MOD "%s Unexpected streaming data."
+ " ep %p state %d tid %u\n",
+ __func__, ep, state_read(&ep->com), ep->hwtid);
+
+ /*
+ * The ep will timeout and inform the ULP of the failure.
+ * See ep_timeout().
+ */
+ break;
+ }
+ return 0;
+}
+
+static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ unsigned long flags;
+ int release = 0;
+ unsigned int tid = GET_TID(rpl);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(!ep);
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case ABORTING:
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ break;
+ default:
+ printk(KERN_ERR "%s ep %p state %d\n",
+ __func__, ep, ep->com.state);
+ break;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+/*
+ * Return whether a failed active open has allocated a TID
+ */
+static inline int act_open_has_tid(int status)
+{
+ return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
+ status != CPL_ERR_ARP_MISS;
+}
+
+static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+ unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
+ ntohl(rpl->atid_status)));
+ struct tid_info *t = dev->rdev.lldi.tids;
+ int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+
+ ep = lookup_atid(t, atid);
+
+ PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
+ status, status2errno(status));
+
+ if (status == CPL_ERR_RTX_NEG_ADVICE) {
+ printk(KERN_WARNING MOD "Connection problems for atid %u\n",
+ atid);
+ return 0;
+ }
+
+ connect_reply_upcall(ep, status2errno(status));
+ state_set(&ep->com, DEAD);
+
+ if (status && act_open_has_tid(status))
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
+
+ cxgb4_free_atid(t, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_put_ep(&ep->com);
+
+ return 0;
+}
+
+static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_pass_open_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+
+ if (!ep) {
+ printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
+ return 0;
+ }
+ PDBG("%s ep %p status %d error %d\n", __func__, ep,
+ rpl->status, status2errno(rpl->status));
+ ep->com.rpl_err = status2errno(rpl->status);
+ ep->com.rpl_done = 1;
+ wake_up(&ep->com.waitq);
+
+ return 0;
+}
+
+static int listen_stop(struct c4iw_listen_ep *ep)
+{
+ struct sk_buff *skb;
+ struct cpl_close_listsvr_req *req;
+
+ PDBG("%s ep %p\n", __func__, ep);
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ return -ENOMEM;
+ }
+ req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
+ ep->stid));
+ req->reply_ctrl = cpu_to_be16(
+ QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ return c4iw_ofld_send(&ep->com.dev->rdev, skb);
+}
+
+static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+
+ PDBG("%s ep %p\n", __func__, ep);
+ ep->com.rpl_err = status2errno(rpl->status);
+ ep->com.rpl_done = 1;
+ wake_up(&ep->com.waitq);
+ return 0;
+}
+
+static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
+ struct cpl_pass_accept_req *req)
+{
+ struct cpl_pass_accept_rpl *rpl;
+ unsigned int mtu_idx;
+ u64 opt0;
+ u32 opt2;
+ int wscale;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(skb_cloned(skb));
+ skb_trim(skb, sizeof(*rpl));
+ skb_get(skb);
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ opt0 = KEEP_ALIVE(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ RCV_BUFSIZ(rcv_win>>10);
+ opt2 = RX_CHANNEL(0) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+
+ if (enable_tcp_timestamps && req->tcpopt.tstamp)
+ opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack && req->tcpopt.sack)
+ opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ opt2 |= WND_SCALE_EN(1);
+
+ rpl = cplhdr(skb);
+ INIT_TP_WR(rpl, ep->hwtid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ ep->hwtid));
+ rpl->opt0 = cpu_to_be64(opt0);
+ rpl->opt2 = cpu_to_be32(opt2);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+
+ return;
+}
+
+static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
+ struct sk_buff *skb)
+{
+ PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
+ peer_ip);
+ BUG_ON(skb_cloned(skb));
+ skb_trim(skb, sizeof(struct cpl_tid_release));
+ skb_get(skb);
+ release_tid(&dev->rdev, hwtid, skb);
+ return;
+}
+
+static void get_4tuple(struct cpl_pass_accept_req *req,
+ __be32 *local_ip, __be32 *peer_ip,
+ __be16 *local_port, __be16 *peer_port)
+{
+ int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
+ int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
+ struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct tcphdr *tcp = (struct tcphdr *)
+ ((u8 *)(req + 1) + eth_len + ip_len);
+
+ PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
+ ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
+ ntohs(tcp->dest));
+
+ *peer_ip = ip->saddr;
+ *local_ip = ip->daddr;
+ *peer_port = tcp->source;
+ *local_port = tcp->dest;
+
+ return;
+}
+
+static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *child_ep, *parent_ep;
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int hwtid = GET_TID(req);
+ struct dst_entry *dst;
+ struct l2t_entry *l2t;
+ struct rtable *rt;
+ __be32 local_ip, peer_ip;
+ __be16 local_port, peer_port;
+ struct net_device *pdev;
+ u32 tx_chan, smac_idx;
+ u16 rss_qid;
+ u32 mtu;
+ int step;
+ int txq_idx;
+
+ parent_ep = lookup_stid(t, stid);
+ PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
+
+ get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
+
+ if (state_read(&parent_ep->com) != LISTEN) {
+ printk(KERN_ERR "%s - listening ep not in LISTEN\n",
+ __func__);
+ goto reject;
+ }
+
+ /* Find output route */
+ rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
+ GET_POPEN_TOS(ntohl(req->tos_stid)));
+ if (!rt) {
+ printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+ dst = &rt->u.dst;
+ if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
+ pdev = ip_dev_find(&init_net, peer_ip);
+ BUG_ON(!pdev);
+ l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
+ pdev, 0);
+ mtu = pdev->mtu;
+ tx_chan = cxgb4_port_chan(pdev);
+ smac_idx = tx_chan << 1;
+ step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
+ txq_idx = cxgb4_port_idx(pdev) * step;
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
+ dst->neighbour->dev, 0);
+ mtu = dst_mtu(dst);
+ tx_chan = cxgb4_port_chan(dst->neighbour->dev);
+ smac_idx = tx_chan << 1;
+ step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
+ txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(dst->neighbour->dev) * step];
+ }
+ if (!l2t) {
+ printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+ __func__);
+ dst_release(dst);
+ goto reject;
+ }
+
+ child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+ if (!child_ep) {
+ printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+ __func__);
+ cxgb4_l2t_release(l2t);
+ dst_release(dst);
+ goto reject;
+ }
+ state_set(&child_ep->com, CONNECTING);
+ child_ep->com.dev = dev;
+ child_ep->com.cm_id = NULL;
+ child_ep->com.local_addr.sin_family = PF_INET;
+ child_ep->com.local_addr.sin_port = local_port;
+ child_ep->com.local_addr.sin_addr.s_addr = local_ip;
+ child_ep->com.remote_addr.sin_family = PF_INET;
+ child_ep->com.remote_addr.sin_port = peer_port;
+ child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
+ c4iw_get_ep(&parent_ep->com);
+ child_ep->parent_ep = parent_ep;
+ child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
+ child_ep->l2t = l2t;
+ child_ep->dst = dst;
+ child_ep->hwtid = hwtid;
+ child_ep->tx_chan = tx_chan;
+ child_ep->smac_idx = smac_idx;
+ child_ep->rss_qid = rss_qid;
+ child_ep->mtu = mtu;
+ child_ep->txq_idx = txq_idx;
+
+ PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
+ tx_chan, smac_idx, rss_qid);
+
+ init_timer(&child_ep->timer);
+ cxgb4_insert_tid(t, child_ep, hwtid);
+ accept_cr(child_ep, peer_ip, skb, req);
+ goto out;
+reject:
+ reject_cr(dev, hwtid, peer_ip, skb);
+out:
+ return 0;
+}
+
+static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_pass_establish *req = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ ep->snd_seq = be32_to_cpu(req->snd_isn);
+ ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+
+ set_emss(ep, ntohs(req->tcp_opt));
+
+ dst_confirm(ep->dst);
+ state_set(&ep->com, MPA_REQ_WAIT);
+ start_ep_timer(ep);
+ send_flowc(ep, skb);
+
+ return 0;
+}
+
+static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_peer_close *hdr = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
+ unsigned long flags;
+ int disconnect = 1;
+ int release = 0;
+ int closing = 0;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(hdr);
+ int start_timer = 0;
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ dst_confirm(ep->dst);
+
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case MPA_REQ_WAIT:
+ __state_set(&ep->com, CLOSING);
+ break;
+ case MPA_REQ_SENT:
+ __state_set(&ep->com, CLOSING);
+ connect_reply_upcall(ep, -ECONNRESET);
+ break;
+ case MPA_REQ_RCVD:
+
+ /*
+ * We're gonna mark this puppy DEAD, but keep
+ * the reference on it until the ULP accepts or
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see c4iw_accept_cr()).
+ */
+ __state_set(&ep->com, CLOSING);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case MPA_REP_SENT:
+ __state_set(&ep->com, CLOSING);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case FPDU_MODE:
+ start_timer = 1;
+ __state_set(&ep->com, CLOSING);
+ closing = 1;
+ peer_close_upcall(ep);
+ break;
+ case ABORTING:
+ disconnect = 0;
+ break;
+ case CLOSING:
+ __state_set(&ep->com, MORIBUND);
+ disconnect = 0;
+ break;
+ case MORIBUND:
+ stop_timer = 1;
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_IDLE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ close_complete_upcall(ep);
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ disconnect = 0;
+ break;
+ case DEAD:
+ disconnect = 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (closing) {
+ attrs.next_state = C4IW_QP_STATE_CLOSING;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ if (start_timer)
+ start_ep_timer(ep);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (disconnect)
+ c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+/*
+ * Returns whether an ABORT_REQ_RSS message is a negative advice.
+ */
+static int is_neg_adv_abort(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct cpl_abort_rpl *rpl;
+ struct sk_buff *rpl_skb;
+ struct c4iw_qp_attributes attrs;
+ int ret;
+ int release = 0;
+ unsigned long flags;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+ ep->hwtid);
+ return 0;
+ }
+ spin_lock_irqsave(&ep->com.lock, flags);
+ PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+ switch (ep->com.state) {
+ case CONNECTING:
+ break;
+ case MPA_REQ_WAIT:
+ stop_timer = 1;
+ break;
+ case MPA_REQ_SENT:
+ stop_timer = 1;
+ connect_reply_upcall(ep, -ECONNRESET);
+ break;
+ case MPA_REP_SENT:
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p\n", ep);
+ wake_up(&ep->com.waitq);
+ break;
+ case MPA_REQ_RCVD:
+
+ /*
+ * We're gonna mark this puppy DEAD, but keep
+ * the reference on it until the ULP accepts or
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see c4iw_accept_cr()).
+ */
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case MORIBUND:
+ case CLOSING:
+ stop_timer = 1;
+ /*FALLTHROUGH*/
+ case FPDU_MODE:
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ ret = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ if (ret)
+ printk(KERN_ERR MOD
+ "%s - qp <- error failed!\n",
+ __func__);
+ }
+ peer_abort_upcall(ep);
+ break;
+ case ABORTING:
+ break;
+ case DEAD:
+ PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ return 0;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ dst_confirm(ep->dst);
+ if (ep->com.state != ABORTING) {
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+
+ rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ if (!rpl_skb) {
+ printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
+ __func__);
+ release = 1;
+ goto out;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
+ INIT_TP_WR(rpl, ep->hwtid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
+ rpl->cmd = CPL_ABORT_NO_RST;
+ c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
+out:
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
+ struct cpl_close_con_rpl *rpl = cplhdr(skb);
+ unsigned long flags;
+ int release = 0;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(rpl);
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(!ep);
+
+ /* The cm_id may be null if we failed to connect */
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case CLOSING:
+ __state_set(&ep->com, MORIBUND);
+ break;
+ case MORIBUND:
+ stop_timer = 1;
+ if ((ep->com.cm_id) && (ep->com.qp)) {
+ attrs.next_state = C4IW_QP_STATE_IDLE;
+ c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+ close_complete_upcall(ep);
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ break;
+ case ABORTING:
+ case DEAD:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_rdma_terminate *term = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(term);
+
+ ep = lookup_tid(t, tid);
+
+ if (state_read(&ep->com) != FPDU_MODE)
+ return 0;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb_pull(skb, sizeof *term);
+ PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
+ skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
+ skb->len);
+ ep->com.qp->attr.terminate_msg_len = skb->len;
+ ep->com.qp->attr.is_terminate_local = 0;
+ return 0;
+}
+
+/*
+ * Upcall from the adapter indicating data has been transmitted.
+ * For us its just the single MPA request or reply. We can now free
+ * the skb holding the mpa message.
+ */
+static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_fw4_ack *hdr = cplhdr(skb);
+ u8 credits = hdr->credits;
+ unsigned int tid = GET_TID(hdr);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ if (credits == 0) {
+ PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, state_read(&ep->com));
+ return 0;
+ }
+
+ dst_confirm(ep->dst);
+ if (ep->mpa_skb) {
+ PDBG("%s last streaming msg ack ep %p tid %u state %u "
+ "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
+ state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ kfree_skb(ep->mpa_skb);
+ ep->mpa_skb = NULL;
+ }
+ return 0;
+}
+
+int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ int err;
+ struct c4iw_ep *ep = to_ep(cm_id);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ if (state_read(&ep->com) == DEAD) {
+ c4iw_put_ep(&ep->com);
+ return -ECONNRESET;
+ }
+ BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+ if (mpa_rev == 0)
+ abort_connection(ep, NULL, GFP_KERNEL);
+ else {
+ err = send_mpa_reject(ep, pdata, pdata_len);
+ err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ }
+ c4iw_put_ep(&ep->com);
+ return 0;
+}
+
+int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ int err;
+ struct c4iw_qp_attributes attrs;
+ enum c4iw_qp_attr_mask mask;
+ struct c4iw_ep *ep = to_ep(cm_id);
+ struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
+ struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ if (state_read(&ep->com) == DEAD) {
+ err = -ECONNRESET;
+ goto err;
+ }
+
+ BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+ BUG_ON(!qp);
+
+ if ((conn_param->ord > c4iw_max_read_depth) ||
+ (conn_param->ird > c4iw_max_read_depth)) {
+ abort_connection(ep, NULL, GFP_KERNEL);
+ err = -EINVAL;
+ goto err;
+ }
+
+ cm_id->add_ref(cm_id);
+ ep->com.cm_id = cm_id;
+ ep->com.qp = qp;
+
+ ep->ird = conn_param->ird;
+ ep->ord = conn_param->ord;
+
+ if (peer2peer && ep->ird == 0)
+ ep->ird = 1;
+
+ PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+
+ /* bind QP to EP and move to RTS */
+ attrs.mpa_attr = ep->mpa_attr;
+ attrs.max_ird = ep->ird;
+ attrs.max_ord = ep->ord;
+ attrs.llp_stream_handle = ep;
+ attrs.next_state = C4IW_QP_STATE_RTS;
+
+ /* bind QP and TID with INIT_WR */
+ mask = C4IW_QP_ATTR_NEXT_STATE |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE |
+ C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_MAX_IRD |
+ C4IW_QP_ATTR_MAX_ORD;
+
+ err = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, mask, &attrs, 1);
+ if (err)
+ goto err1;
+ err = send_mpa_reply(ep, conn_param->private_data,
+ conn_param->private_data_len);
+ if (err)
+ goto err1;
+
+ state_set(&ep->com, FPDU_MODE);
+ established_upcall(ep);
+ c4iw_put_ep(&ep->com);
+ return 0;
+err1:
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ cm_id->rem_ref(cm_id);
+err:
+ c4iw_put_ep(&ep->com);
+ return err;
+}
+
+int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ int err = 0;
+ struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
+ struct c4iw_ep *ep;
+ struct rtable *rt;
+ struct net_device *pdev;
+ int step;
+
+ if ((conn_param->ord > c4iw_max_read_depth) ||
+ (conn_param->ird > c4iw_max_read_depth)) {
+ err = -EINVAL;
+ goto out;
+ }
+ ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+ if (!ep) {
+ printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+ init_timer(&ep->timer);
+ ep->plen = conn_param->private_data_len;
+ if (ep->plen)
+ memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
+ conn_param->private_data, ep->plen);
+ ep->ird = conn_param->ird;
+ ep->ord = conn_param->ord;
+
+ if (peer2peer && ep->ord == 0)
+ ep->ord = 1;
+
+ cm_id->add_ref(cm_id);
+ ep->com.dev = dev;
+ ep->com.cm_id = cm_id;
+ ep->com.qp = get_qhp(dev, conn_param->qpn);
+ BUG_ON(!ep->com.qp);
+ PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
+ ep->com.qp, cm_id);
+
+ /*
+ * Allocate an active TID to initiate a TCP connection.
+ */
+ ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
+ if (ep->atid == -1) {
+ printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port),
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port));
+
+ /* find a route */
+ rt = find_route(dev,
+ cm_id->local_addr.sin_addr.s_addr,
+ cm_id->remote_addr.sin_addr.s_addr,
+ cm_id->local_addr.sin_port,
+ cm_id->remote_addr.sin_port, 0);
+ if (!rt) {
+ printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
+ err = -EHOSTUNREACH;
+ goto fail3;
+ }
+ ep->dst = &rt->u.dst;
+
+ /* get a l2t entry */
+ if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
+ PDBG("%s LOOPBACK\n", __func__);
+ pdev = ip_dev_find(&init_net,
+ cm_id->remote_addr.sin_addr.s_addr);
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ ep->dst->neighbour,
+ pdev, 0);
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = ep->tx_chan << 1;
+ step = ep->com.dev->rdev.lldi.ntxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = ep->com.dev->rdev.lldi.nrxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ ep->dst->neighbour,
+ ep->dst->neighbour->dev, 0);
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
+ ep->smac_idx = ep->tx_chan << 1;
+ step = ep->com.dev->rdev.lldi.ntxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
+ step = ep->com.dev->rdev.lldi.nrxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(ep->dst->neighbour->dev) * step];
+ }
+ if (!ep->l2t) {
+ printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
+ err = -ENOMEM;
+ goto fail4;
+ }
+
+ PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
+
+ state_set(&ep->com, CONNECTING);
+ ep->tos = 0;
+ ep->com.local_addr = cm_id->local_addr;
+ ep->com.remote_addr = cm_id->remote_addr;
+
+ /* send connect request to rnic */
+ err = send_connect(ep);
+ if (!err)
+ goto out;
+
+ cxgb4_l2t_release(ep->l2t);
+fail4:
+ dst_release(ep->dst);
+fail3:
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+fail2:
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+out:
+ return err;
+}
+
+int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ int err = 0;
+ struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
+ struct c4iw_listen_ep *ep;
+
+
+ might_sleep();
+
+ ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+ if (!ep) {
+ printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ err = -ENOMEM;
+ goto fail1;
+ }
+ PDBG("%s ep %p\n", __func__, ep);
+ cm_id->add_ref(cm_id);
+ ep->com.cm_id = cm_id;
+ ep->com.dev = dev;
+ ep->backlog = backlog;
+ ep->com.local_addr = cm_id->local_addr;
+
+ /*
+ * Allocate a server TID.
+ */
+ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+ if (ep->stid == -1) {
+ printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ state_set(&ep->com, LISTEN);
+ err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ ep->com.dev->rdev.lldi.rxq_ids[0]);
+ if (err)
+ goto fail3;
+
+ /* wait for pass_open_rpl */
+ wait_event(ep->com.waitq, ep->com.rpl_done);
+ err = ep->com.rpl_err;
+ if (!err) {
+ cm_id->provider_data = ep;
+ goto out;
+ }
+fail3:
+ cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+fail2:
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+fail1:
+out:
+ return err;
+}
+
+int c4iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+ int err;
+ struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
+
+ PDBG("%s ep %p\n", __func__, ep);
+
+ might_sleep();
+ state_set(&ep->com, DEAD);
+ ep->com.rpl_done = 0;
+ ep->com.rpl_err = 0;
+ err = listen_stop(ep);
+ if (err)
+ goto done;
+ wait_event(ep->com.waitq, ep->com.rpl_done);
+ cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+done:
+ err = ep->com.rpl_err;
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+ return err;
+}
+
+int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
+{
+ int ret = 0;
+ unsigned long flags;
+ int close = 0;
+ int fatal = 0;
+ struct c4iw_rdev *rdev;
+ int start_timer = 0;
+ int stop_timer = 0;
+
+ spin_lock_irqsave(&ep->com.lock, flags);
+
+ PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
+ states[ep->com.state], abrupt);
+
+ rdev = &ep->com.dev->rdev;
+ if (c4iw_fatal_error(rdev)) {
+ fatal = 1;
+ close_complete_upcall(ep);
+ ep->com.state = DEAD;
+ }
+ switch (ep->com.state) {
+ case MPA_REQ_WAIT:
+ case MPA_REQ_SENT:
+ case MPA_REQ_RCVD:
+ case MPA_REP_SENT:
+ case FPDU_MODE:
+ close = 1;
+ if (abrupt)
+ ep->com.state = ABORTING;
+ else {
+ ep->com.state = CLOSING;
+ start_timer = 1;
+ }
+ set_bit(CLOSE_SENT, &ep->com.flags);
+ break;
+ case CLOSING:
+ if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
+ close = 1;
+ if (abrupt) {
+ stop_timer = 1;
+ ep->com.state = ABORTING;
+ } else
+ ep->com.state = MORIBUND;
+ }
+ break;
+ case MORIBUND:
+ case ABORTING:
+ case DEAD:
+ PDBG("%s ignoring disconnect ep %p state %u\n",
+ __func__, ep, ep->com.state);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (start_timer)
+ start_ep_timer(ep);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (close) {
+ if (abrupt)
+ ret = abort_connection(ep, NULL, gfp);
+ else
+ ret = send_halfclose(ep, gfp);
+ if (ret)
+ fatal = 1;
+ }
+ if (fatal)
+ release_ep_resources(ep);
+ return ret;
+}
+
+/*
+ * These are the real handlers that are called from a
+ * work queue.
+ */
+static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = act_establish,
+ [CPL_ACT_OPEN_RPL] = act_open_rpl,
+ [CPL_RX_DATA] = rx_data,
+ [CPL_ABORT_RPL_RSS] = abort_rpl,
+ [CPL_ABORT_RPL] = abort_rpl,
+ [CPL_PASS_OPEN_RPL] = pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
+ [CPL_PASS_ESTABLISH] = pass_establish,
+ [CPL_PEER_CLOSE] = peer_close,
+ [CPL_ABORT_REQ_RSS] = peer_abort,
+ [CPL_CLOSE_CON_RPL] = close_con_rpl,
+ [CPL_RDMA_TERMINATE] = terminate,
+ [CPL_FW4_ACK] = fw4_ack
+};
+
+static void process_timeout(struct c4iw_ep *ep)
+{
+ struct c4iw_qp_attributes attrs;
+ int abort = 1;
+
+ spin_lock_irq(&ep->com.lock);
+ PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+ switch (ep->com.state) {
+ case MPA_REQ_SENT:
+ __state_set(&ep->com, ABORTING);
+ connect_reply_upcall(ep, -ETIMEDOUT);
+ break;
+ case MPA_REQ_WAIT:
+ __state_set(&ep->com, ABORTING);
+ break;
+ case CLOSING:
+ case MORIBUND:
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+ __state_set(&ep->com, ABORTING);
+ break;
+ default:
+ printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, ep->com.state);
+ WARN_ON(1);
+ abort = 0;
+ }
+ spin_unlock_irq(&ep->com.lock);
+ if (abort)
+ abort_connection(ep, NULL, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
+}
+
+static void process_timedout_eps(void)
+{
+ struct c4iw_ep *ep;
+
+ spin_lock_irq(&timeout_lock);
+ while (!list_empty(&timeout_list)) {
+ struct list_head *tmp;
+
+ tmp = timeout_list.next;
+ list_del(tmp);
+ spin_unlock_irq(&timeout_lock);
+ ep = list_entry(tmp, struct c4iw_ep, entry);
+ process_timeout(ep);
+ spin_lock_irq(&timeout_lock);
+ }
+ spin_unlock_irq(&timeout_lock);
+}
+
+static void process_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ struct c4iw_dev *dev;
+ struct cpl_act_establish *rpl = cplhdr(skb);
+ unsigned int opcode;
+ int ret;
+
+ while ((skb = skb_dequeue(&rxq))) {
+ rpl = cplhdr(skb);
+ dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
+ opcode = rpl->ot.opcode;
+
+ BUG_ON(!work_handlers[opcode]);
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
+ process_timedout_eps();
+}
+
+static DECLARE_WORK(skb_work, process_work);
+
+static void ep_timeout(unsigned long arg)
+{
+ struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+
+ spin_lock(&timeout_lock);
+ list_add_tail(&ep->entry, &timeout_list);
+ spin_unlock(&timeout_lock);
+ queue_work(workq, &skb_work);
+}
+
+/*
+ * All the CM events are handled on a work queue to have a safe context.
+ */
+static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+
+ /*
+ * Save dev in the skb->cb area.
+ */
+ *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
+
+ /*
+ * Queue the skb and schedule the worker thread.
+ */
+ skb_queue_tail(&rxq, skb);
+ queue_work(workq, &skb_work);
+ return 0;
+}
+
+static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE) {
+ printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
+ "for tid %u\n", rpl->status, GET_TID(rpl));
+ }
+ return 0;
+}
+
+static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_fw6_msg *rpl = cplhdr(skb);
+ struct c4iw_wr_wait *wr_waitp;
+ int ret;
+
+ PDBG("%s type %u\n", __func__, rpl->type);
+
+ switch (rpl->type) {
+ case 1:
+ ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
+ wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
+ PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+ if (wr_waitp) {
+ wr_waitp->ret = ret;
+ wr_waitp->done = 1;
+ wake_up(&wr_waitp->wait);
+ }
+ break;
+ case 2:
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ break;
+ default:
+ printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
+ rpl->type);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Most upcalls from the T4 Core go to sched() to
+ * schedule the processing on a work queue.
+ */
+c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = sched,
+ [CPL_ACT_OPEN_RPL] = sched,
+ [CPL_RX_DATA] = sched,
+ [CPL_ABORT_RPL_RSS] = sched,
+ [CPL_ABORT_RPL] = sched,
+ [CPL_PASS_OPEN_RPL] = sched,
+ [CPL_CLOSE_LISTSRV_RPL] = sched,
+ [CPL_PASS_ACCEPT_REQ] = sched,
+ [CPL_PASS_ESTABLISH] = sched,
+ [CPL_PEER_CLOSE] = sched,
+ [CPL_CLOSE_CON_RPL] = sched,
+ [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_RDMA_TERMINATE] = sched,
+ [CPL_FW4_ACK] = sched,
+ [CPL_SET_TCB_RPL] = set_tcb_rpl,
+ [CPL_FW6_MSG] = fw6_msg
+};
+
+int __init c4iw_cm_init(void)
+{
+ spin_lock_init(&timeout_lock);
+ skb_queue_head_init(&rxq);
+
+ workq = create_singlethread_workqueue("iw_cxgb4");
+ if (!workq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __exit c4iw_cm_term(void)
+{
+ WARN_ON(!list_empty(&timeout_list));
+ flush_workqueue(workq);
+ destroy_workqueue(workq);
+}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
new file mode 100644
index 000000000000..fb1aafcc294f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "iw_cxgb4.h"
+
+static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+ int ret;
+
+ wr_len = sizeof *res_wr + sizeof *res;
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(1) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.cq.restype = FW_RI_RES_TYPE_CQ;
+ res->u.cq.op = FW_RI_RES_OP_RESET;
+ res->u.cq.iqid = cpu_to_be32(cq->cqid);
+
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(rdev, skb);
+ if (!ret) {
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ }
+
+ kfree(cq->sw_queue);
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ cq->memsize, cq->queue,
+ pci_unmap_addr(cq, mapping));
+ c4iw_put_cqid(rdev, cq->cqid, uctx);
+ return ret;
+}
+
+static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ int user = (uctx != &rdev->uctx);
+ struct c4iw_wr_wait wr_wait;
+ int ret;
+ struct sk_buff *skb;
+
+ cq->cqid = c4iw_get_cqid(rdev, uctx);
+ if (!cq->cqid) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ if (!user) {
+ cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
+ if (!cq->sw_queue) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+ }
+ cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
+ &cq->dma_addr, GFP_KERNEL);
+ if (!cq->queue) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+ pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ memset(cq->queue, 0, cq->memsize);
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof *res_wr + sizeof *res;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err4;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(1) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.cq.restype = FW_RI_RES_TYPE_CQ;
+ res->u.cq.op = FW_RI_RES_OP_WRITE;
+ res->u.cq.iqid = cpu_to_be32(cq->cqid);
+ res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
+ V_FW_RI_RES_WR_IQANUS(0) |
+ V_FW_RI_RES_WR_IQANUD(1) |
+ F_FW_RI_RES_WR_IQANDST |
+ V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
+ res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
+ F_FW_RI_RES_WR_IQDROPRSS |
+ V_FW_RI_RES_WR_IQPCIECH(2) |
+ V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
+ F_FW_RI_RES_WR_IQO |
+ V_FW_RI_RES_WR_IQESIZE(1));
+ res->u.cq.iqsize = cpu_to_be16(cq->size);
+ res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
+
+ c4iw_init_wr_wait(&wr_wait);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ goto err4;
+ PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ if (ret)
+ goto err4;
+
+ cq->gen = 1;
+ cq->gts = rdev->lldi.gts_reg;
+ cq->rdev = rdev;
+ if (user) {
+ cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (cq->cqid << rdev->cqshift);
+ cq->ugts &= PAGE_MASK;
+ }
+ return 0;
+err4:
+ dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
+ pci_unmap_addr(cq, mapping));
+err3:
+ kfree(cq->sw_queue);
+err2:
+ c4iw_put_cqid(rdev, cq->cqid, uctx);
+err1:
+ return ret;
+}
+
+static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
+{
+ struct t4_cqe cqe;
+
+ PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
+ memset(&cqe, 0, sizeof(cqe));
+ cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+ V_CQE_OPCODE(FW_RI_SEND) |
+ V_CQE_TYPE(0) |
+ V_CQE_SWCQE(1) |
+ V_CQE_QPID(wq->rq.qid));
+ cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+}
+
+int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
+{
+ int flushed = 0;
+ int in_use = wq->rq.in_use - count;
+
+ BUG_ON(in_use < 0);
+ PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
+ wq, cq, wq->rq.in_use, count);
+ while (in_use--) {
+ insert_recv_cqe(wq, cq);
+ flushed++;
+ }
+ return flushed;
+}
+
+static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
+ struct t4_swsqe *swcqe)
+{
+ struct t4_cqe cqe;
+
+ PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
+ memset(&cqe, 0, sizeof(cqe));
+ cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+ V_CQE_OPCODE(swcqe->opcode) |
+ V_CQE_TYPE(1) |
+ V_CQE_SWCQE(1) |
+ V_CQE_QPID(wq->sq.qid));
+ CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
+ cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+}
+
+int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
+{
+ int flushed = 0;
+ struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
+ int in_use = wq->sq.in_use - count;
+
+ BUG_ON(in_use < 0);
+ while (in_use--) {
+ swsqe->signaled = 0;
+ insert_sq_cqe(wq, cq, swsqe);
+ swsqe++;
+ if (swsqe == (wq->sq.sw_sq + wq->sq.size))
+ swsqe = wq->sq.sw_sq;
+ flushed++;
+ }
+ return flushed;
+}
+
+/*
+ * Move all CQEs from the HWCQ into the SWCQ.
+ */
+void c4iw_flush_hw_cq(struct t4_cq *cq)
+{
+ struct t4_cqe *cqe = NULL, *swcqe;
+ int ret;
+
+ PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
+ ret = t4_next_hw_cqe(cq, &cqe);
+ while (!ret) {
+ PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
+ __func__, cq->cidx, cq->sw_pidx);
+ swcqe = &cq->sw_queue[cq->sw_pidx];
+ *swcqe = *cqe;
+ swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+ t4_swcq_produce(cq);
+ t4_hwcq_consume(cq);
+ ret = t4_next_hw_cqe(cq, &cqe);
+ }
+}
+
+static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
+{
+ if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
+ return 0;
+
+ if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
+ return 0;
+
+ if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
+ return 0;
+
+ if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
+ return 0;
+ return 1;
+}
+
+void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
+{
+ struct t4_cqe *cqe;
+ u32 ptr;
+
+ *count = 0;
+ ptr = cq->sw_cidx;
+ while (ptr != cq->sw_pidx) {
+ cqe = &cq->sw_queue[ptr];
+ if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
+ wq->sq.oldest_read)) &&
+ (CQE_QPID(cqe) == wq->sq.qid))
+ (*count)++;
+ if (++ptr == cq->size)
+ ptr = 0;
+ }
+ PDBG("%s cq %p count %d\n", __func__, cq, *count);
+}
+
+void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
+{
+ struct t4_cqe *cqe;
+ u32 ptr;
+
+ *count = 0;
+ PDBG("%s count zero %d\n", __func__, *count);
+ ptr = cq->sw_cidx;
+ while (ptr != cq->sw_pidx) {
+ cqe = &cq->sw_queue[ptr];
+ if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
+ (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (*count)++;
+ if (++ptr == cq->size)
+ ptr = 0;
+ }
+ PDBG("%s cq %p count %d\n", __func__, cq, *count);
+}
+
+static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
+{
+ struct t4_swsqe *swsqe;
+ u16 ptr = wq->sq.cidx;
+ int count = wq->sq.in_use;
+ int unsignaled = 0;
+
+ swsqe = &wq->sq.sw_sq[ptr];
+ while (count--)
+ if (!swsqe->signaled) {
+ if (++ptr == wq->sq.size)
+ ptr = 0;
+ swsqe = &wq->sq.sw_sq[ptr];
+ unsignaled++;
+ } else if (swsqe->complete) {
+
+ /*
+ * Insert this completed cqe into the swcq.
+ */
+ PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
+ __func__, ptr, cq->sw_pidx);
+ swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+ cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
+ t4_swcq_produce(cq);
+ swsqe->signaled = 0;
+ wq->sq.in_use -= unsignaled;
+ break;
+ } else
+ break;
+}
+
+static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
+ struct t4_cqe *read_cqe)
+{
+ read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
+ read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
+ read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
+ V_CQE_SWCQE(SW_CQE(hw_cqe)) |
+ V_CQE_OPCODE(FW_RI_READ_REQ) |
+ V_CQE_TYPE(1));
+}
+
+/*
+ * Return a ptr to the next read wr in the SWSQ or NULL.
+ */
+static void advance_oldest_read(struct t4_wq *wq)
+{
+
+ u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
+
+ if (rptr == wq->sq.size)
+ rptr = 0;
+ while (rptr != wq->sq.pidx) {
+ wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
+
+ if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
+ return;
+ if (++rptr == wq->sq.size)
+ rptr = 0;
+ }
+ wq->sq.oldest_read = NULL;
+}
+
+/*
+ * poll_cq
+ *
+ * Caller must:
+ * check the validity of the first CQE,
+ * supply the wq assicated with the qpid.
+ *
+ * credit: cq credit to return to sge.
+ * cqe_flushed: 1 iff the CQE is flushed.
+ * cqe: copy of the polled CQE.
+ *
+ * return value:
+ * 0 CQE returned ok.
+ * -EAGAIN CQE skipped, try again.
+ * -EOVERFLOW CQ overflow detected.
+ */
+static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
+ u8 *cqe_flushed, u64 *cookie, u32 *credit)
+{
+ int ret = 0;
+ struct t4_cqe *hw_cqe, read_cqe;
+
+ *cqe_flushed = 0;
+ *credit = 0;
+ ret = t4_next_cqe(cq, &hw_cqe);
+ if (ret)
+ return ret;
+
+ PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
+ " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
+ __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
+ CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
+ CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
+ CQE_WRID_LOW(hw_cqe));
+
+ /*
+ * skip cqe's not affiliated with a QP.
+ */
+ if (wq == NULL) {
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
+ * Gotta tweak READ completions:
+ * 1) the cqe doesn't contain the sq_wptr from the wr.
+ * 2) opcode not reflected from the wr.
+ * 3) read_len not reflected from the wr.
+ * 4) cq_type is RQ_TYPE not SQ_TYPE.
+ */
+ if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
+
+ /*
+ * If this is an unsolicited read response, then the read
+ * was generated by the kernel driver as part of peer-2-peer
+ * connection setup. So ignore the completion.
+ */
+ if (!wq->sq.oldest_read) {
+ if (CQE_STATUS(hw_cqe))
+ t4_set_wq_in_error(wq);
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
+ * Don't write to the HWCQ, so create a new read req CQE
+ * in local memory.
+ */
+ create_read_req_cqe(wq, hw_cqe, &read_cqe);
+ hw_cqe = &read_cqe;
+ advance_oldest_read(wq);
+ }
+
+ if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
+ *cqe_flushed = t4_wq_in_error(wq);
+ t4_set_wq_in_error(wq);
+ goto proc_cqe;
+ }
+
+ /*
+ * RECV completion.
+ */
+ if (RQ_TYPE(hw_cqe)) {
+
+ /*
+ * HW only validates 4 bits of MSN. So we must validate that
+ * the MSN in the SEND is the next expected MSN. If its not,
+ * then we complete this with T4_ERR_MSN and mark the wq in
+ * error.
+ */
+
+ if (t4_rq_empty(wq)) {
+ t4_set_wq_in_error(wq);
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+ if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+ t4_set_wq_in_error(wq);
+ hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
+ goto proc_cqe;
+ }
+ goto proc_cqe;
+ }
+
+ /*
+ * If we get here its a send completion.
+ *
+ * Handle out of order completion. These get stuffed
+ * in the SW SQ. Then the SW SQ is walked to move any
+ * now in-order completions into the SW CQ. This handles
+ * 2 cases:
+ * 1) reaping unsignaled WRs when the first subsequent
+ * signaled WR is completed.
+ * 2) out of order read completions.
+ */
+ if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
+ struct t4_swsqe *swsqe;
+
+ PDBG("%s out of order completion going in sw_sq at idx %u\n",
+ __func__, CQE_WRID_SQ_IDX(hw_cqe));
+ swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
+ swsqe->cqe = *hw_cqe;
+ swsqe->complete = 1;
+ ret = -EAGAIN;
+ goto flush_wq;
+ }
+
+proc_cqe:
+ *cqe = *hw_cqe;
+
+ /*
+ * Reap the associated WR(s) that are freed up with this
+ * completion.
+ */
+ if (SQ_TYPE(hw_cqe)) {
+ wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
+ PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
+ *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
+ t4_sq_consume(wq);
+ } else {
+ PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
+ *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
+ BUG_ON(t4_rq_empty(wq));
+ t4_rq_consume(wq);
+ }
+
+flush_wq:
+ /*
+ * Flush any completed cqes that are now in-order.
+ */
+ flush_completed_wrs(wq, cq);
+
+skip_cqe:
+ if (SW_CQE(hw_cqe)) {
+ PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
+ __func__, cq, cq->cqid, cq->sw_cidx);
+ t4_swcq_consume(cq);
+ } else {
+ PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
+ __func__, cq, cq->cqid, cq->cidx);
+ t4_hwcq_consume(cq);
+ }
+ return ret;
+}
+
+/*
+ * Get one cq entry from c4iw and map it to openib.
+ *
+ * Returns:
+ * 0 cqe returned
+ * -ENODATA EMPTY;
+ * -EAGAIN caller must try again
+ * any other -errno fatal error
+ */
+static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+{
+ struct c4iw_qp *qhp = NULL;
+ struct t4_cqe cqe = {0, 0}, *rd_cqe;
+ struct t4_wq *wq;
+ u32 credit = 0;
+ u8 cqe_flushed;
+ u64 cookie = 0;
+ int ret;
+
+ ret = t4_next_cqe(&chp->cq, &rd_cqe);
+
+ if (ret)
+ return ret;
+
+ qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
+ if (!qhp)
+ wq = NULL;
+ else {
+ spin_lock(&qhp->lock);
+ wq = &(qhp->wq);
+ }
+ ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
+ if (ret)
+ goto out;
+
+ wc->wr_id = cookie;
+ wc->qp = &qhp->ibqp;
+ wc->vendor_err = CQE_STATUS(&cqe);
+ wc->wc_flags = 0;
+
+ PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
+ "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
+ CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
+ CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
+
+ if (CQE_TYPE(&cqe) == 0) {
+ if (!CQE_STATUS(&cqe))
+ wc->byte_len = CQE_LEN(&cqe);
+ else
+ wc->byte_len = 0;
+ wc->opcode = IB_WC_RECV;
+ if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
+ CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
+ wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+ } else {
+ switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case FW_RI_READ_REQ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = CQE_LEN(&cqe);
+ break;
+ case FW_RI_SEND_WITH_INV:
+ case FW_RI_SEND_WITH_SE_INV:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case FW_RI_SEND:
+ case FW_RI_SEND_WITH_SE:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case FW_RI_BIND_MW:
+ wc->opcode = IB_WC_BIND_MW;
+ break;
+
+ case FW_RI_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ break;
+ case FW_RI_FAST_REGISTER:
+ wc->opcode = IB_WC_FAST_REG_MR;
+ break;
+ default:
+ printk(KERN_ERR MOD "Unexpected opcode %d "
+ "in the CQE received for QPID=0x%0x\n",
+ CQE_OPCODE(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (cqe_flushed)
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ else {
+
+ switch (CQE_STATUS(&cqe)) {
+ case T4_ERR_SUCCESS:
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case T4_ERR_STAG:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case T4_ERR_PDID:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case T4_ERR_QPID:
+ case T4_ERR_ACCESS:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case T4_ERR_WRAP:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ case T4_ERR_BOUND:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case T4_ERR_CRC:
+ case T4_ERR_MARKER:
+ case T4_ERR_PDU_LEN_ERR:
+ case T4_ERR_OUT_OF_RQE:
+ case T4_ERR_DDP_VERSION:
+ case T4_ERR_RDMA_VERSION:
+ case T4_ERR_DDP_QUEUE_NUM:
+ case T4_ERR_MSN:
+ case T4_ERR_TBIT:
+ case T4_ERR_MO:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_IRD_OVERFLOW:
+ case T4_ERR_OPCODE:
+ wc->status = IB_WC_FATAL_ERR;
+ break;
+ case T4_ERR_SWFLUSH:
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ default:
+ printk(KERN_ERR MOD
+ "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
+ CQE_STATUS(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ }
+ }
+out:
+ if (wq)
+ spin_unlock(&qhp->lock);
+ return ret;
+}
+
+int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct c4iw_cq *chp;
+ unsigned long flags;
+ int npolled;
+ int err = 0;
+
+ chp = to_c4iw_cq(ibcq);
+
+ spin_lock_irqsave(&chp->lock, flags);
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ do {
+ err = c4iw_poll_cq_one(chp, wc + npolled);
+ } while (err == -EAGAIN);
+ if (err)
+ break;
+ }
+ spin_unlock_irqrestore(&chp->lock, flags);
+ return !err || err == -ENODATA ? npolled : err;
+}
+
+int c4iw_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct c4iw_cq *chp;
+ struct c4iw_ucontext *ucontext;
+
+ PDBG("%s ib_cq %p\n", __func__, ib_cq);
+ chp = to_c4iw_cq(ib_cq);
+
+ remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
+ atomic_dec(&chp->refcnt);
+ wait_event(chp->wait, !atomic_read(&chp->refcnt));
+
+ ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
+ : NULL;
+ destroy_cq(&chp->rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
+ kfree(chp);
+ return 0;
+}
+
+struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+ int vector, struct ib_ucontext *ib_context,
+ struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_cq *chp;
+ struct c4iw_create_cq_resp uresp;
+ struct c4iw_ucontext *ucontext = NULL;
+ int ret;
+ size_t memsize;
+ struct c4iw_mm_entry *mm, *mm2;
+
+ PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
+
+ rhp = to_c4iw_dev(ibdev);
+
+ chp = kzalloc(sizeof(*chp), GFP_KERNEL);
+ if (!chp)
+ return ERR_PTR(-ENOMEM);
+
+ if (ib_context)
+ ucontext = to_c4iw_ucontext(ib_context);
+
+ /* account for the status page. */
+ entries++;
+
+ /*
+ * entries must be multiple of 16 for HW.
+ */
+ entries = roundup(entries, 16);
+ memsize = entries * sizeof *chp->cq.queue;
+
+ /*
+ * memsize must be a multiple of the page size if its a user cq.
+ */
+ if (ucontext)
+ memsize = roundup(memsize, PAGE_SIZE);
+ chp->cq.size = entries;
+ chp->cq.memsize = memsize;
+
+ ret = create_cq(&rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ if (ret)
+ goto err1;
+
+ chp->rhp = rhp;
+ chp->cq.size--; /* status page */
+ chp->ibcq.cqe = chp->cq.size;
+ spin_lock_init(&chp->lock);
+ atomic_set(&chp->refcnt, 1);
+ init_waitqueue_head(&chp->wait);
+ ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+ if (ret)
+ goto err2;
+
+ if (ucontext) {
+ mm = kmalloc(sizeof *mm, GFP_KERNEL);
+ if (!mm)
+ goto err3;
+ mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+ if (!mm2)
+ goto err4;
+
+ uresp.qid_mask = rhp->rdev.cqmask;
+ uresp.cqid = chp->cq.cqid;
+ uresp.size = chp->cq.size;
+ uresp.memsize = chp->cq.memsize;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ if (ret)
+ goto err5;
+
+ mm->key = uresp.key;
+ mm->addr = virt_to_phys(chp->cq.queue);
+ mm->len = chp->cq.memsize;
+ insert_mmap(ucontext, mm);
+
+ mm2->key = uresp.gts_key;
+ mm2->addr = chp->cq.ugts;
+ mm2->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm2);
+ }
+ PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
+ __func__, chp->cq.cqid, chp, chp->cq.size,
+ chp->cq.memsize,
+ (unsigned long long) chp->cq.dma_addr);
+ return &chp->ibcq;
+err5:
+ kfree(mm2);
+err4:
+ kfree(mm);
+err3:
+ remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
+err2:
+ destroy_cq(&chp->rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+err1:
+ kfree(chp);
+ return ERR_PTR(ret);
+}
+
+int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
+{
+ return -ENOSYS;
+}
+
+int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct c4iw_cq *chp;
+ int ret;
+ unsigned long flag;
+
+ chp = to_c4iw_cq(ibcq);
+ spin_lock_irqsave(&chp->lock, flag);
+ ret = t4_arm_cq(&chp->cq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ spin_unlock_irqrestore(&chp->lock, flag);
+ if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
+ ret = 0;
+ return ret;
+}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
new file mode 100644
index 000000000000..be23b5eab13b
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "iw_cxgb4.h"
+
+#define DRV_VERSION "0.1"
+
+MODULE_AUTHOR("Steve Wise");
+MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_mutex);
+
+static struct dentry *c4iw_debugfs_root;
+
+struct debugfs_qp_data {
+ struct c4iw_dev *devp;
+ char *buf;
+ int bufsize;
+ int pos;
+};
+
+static int count_qps(int id, void *p, void *data)
+{
+ struct c4iw_qp *qp = p;
+ int *countp = data;
+
+ if (id != qp->wq.sq.qid)
+ return 0;
+
+ *countp = *countp + 1;
+ return 0;
+}
+
+static int dump_qps(int id, void *p, void *data)
+{
+ struct c4iw_qp *qp = p;
+ struct debugfs_qp_data *qpd = data;
+ int space;
+ int cc;
+
+ if (id != qp->wq.sq.qid)
+ return 0;
+
+ space = qpd->bufsize - qpd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ if (qp->ep)
+ cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
+ "ep tid %u state %u %pI4:%u->%pI4:%u\n",
+ qp->wq.sq.qid, (int)qp->attr.state,
+ qp->ep->hwtid, (int)qp->ep->com.state,
+ &qp->ep->com.local_addr.sin_addr.s_addr,
+ ntohs(qp->ep->com.local_addr.sin_port),
+ &qp->ep->com.remote_addr.sin_addr.s_addr,
+ ntohs(qp->ep->com.remote_addr.sin_port));
+ else
+ cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
+ qp->wq.sq.qid, (int)qp->attr.state);
+ if (cc < space)
+ qpd->pos += cc;
+ return 0;
+}
+
+static int qp_release(struct inode *inode, struct file *file)
+{
+ struct debugfs_qp_data *qpd = file->private_data;
+ if (!qpd) {
+ printk(KERN_INFO "%s null qpd?\n", __func__);
+ return 0;
+ }
+ kfree(qpd->buf);
+ kfree(qpd);
+ return 0;
+}
+
+static int qp_open(struct inode *inode, struct file *file)
+{
+ struct debugfs_qp_data *qpd;
+ int ret = 0;
+ int count = 1;
+
+ qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
+ if (!qpd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ qpd->devp = inode->i_private;
+ qpd->pos = 0;
+
+ spin_lock_irq(&qpd->devp->lock);
+ idr_for_each(&qpd->devp->qpidr, count_qps, &count);
+ spin_unlock_irq(&qpd->devp->lock);
+
+ qpd->bufsize = count * 128;
+ qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
+ if (!qpd->buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ spin_lock_irq(&qpd->devp->lock);
+ idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
+ spin_unlock_irq(&qpd->devp->lock);
+
+ qpd->buf[qpd->pos++] = 0;
+ file->private_data = qpd;
+ goto out;
+err1:
+ kfree(qpd);
+out:
+ return ret;
+}
+
+static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct debugfs_qp_data *qpd = file->private_data;
+ loff_t pos = *ppos;
+ loff_t avail = qpd->pos;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len = 0;
+
+ len = min((int)count, (int)qpd->pos - (int)pos);
+ if (copy_to_user(buf, qpd->buf + pos, len))
+ return -EFAULT;
+ if (len == 0)
+ return -EINVAL;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations qp_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qp_open,
+ .release = qp_release,
+ .read = qp_read,
+};
+
+static int setup_debugfs(struct c4iw_dev *devp)
+{
+ struct dentry *de;
+
+ if (!devp->debugfs_root)
+ return -1;
+
+ de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &qp_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
+ return 0;
+}
+
+void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct list_head *pos, *nxt;
+ struct c4iw_qid_list *entry;
+
+ mutex_lock(&uctx->lock);
+ list_for_each_safe(pos, nxt, &uctx->qpids) {
+ entry = list_entry(pos, struct c4iw_qid_list, entry);
+ list_del_init(&entry->entry);
+ if (!(entry->qid & rdev->qpmask))
+ c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
+ &rdev->resource.qid_fifo_lock);
+ kfree(entry);
+ }
+
+ list_for_each_safe(pos, nxt, &uctx->qpids) {
+ entry = list_entry(pos, struct c4iw_qid_list, entry);
+ list_del_init(&entry->entry);
+ kfree(entry);
+ }
+ mutex_unlock(&uctx->lock);
+}
+
+void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx)
+{
+ INIT_LIST_HEAD(&uctx->qpids);
+ INIT_LIST_HEAD(&uctx->cqids);
+ mutex_init(&uctx->lock);
+}
+
+/* Caller takes care of locking if needed */
+static int c4iw_rdev_open(struct c4iw_rdev *rdev)
+{
+ int err;
+
+ c4iw_init_dev_ucontext(rdev, &rdev->uctx);
+
+ /*
+ * qpshift is the number of bits to shift the qpid left in order
+ * to get the correct address of the doorbell for that qp.
+ */
+ rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
+ rdev->qpmask = rdev->lldi.udb_density - 1;
+ rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
+ rdev->cqmask = rdev->lldi.ucq_density - 1;
+ PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
+ "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n",
+ __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
+ rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
+ rdev->lldi.vr->pbl.start,
+ rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
+ rdev->lldi.vr->rq.size);
+ PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
+ "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
+ (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
+ (void *)pci_resource_start(rdev->lldi.pdev, 2),
+ rdev->lldi.db_reg,
+ rdev->lldi.gts_reg,
+ rdev->qpshift, rdev->qpmask,
+ rdev->cqshift, rdev->cqmask);
+
+ if (c4iw_num_stags(rdev) == 0) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing resources\n", err);
+ goto err1;
+ }
+ err = c4iw_pblpool_create(rdev);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
+ goto err2;
+ }
+ err = c4iw_rqtpool_create(rdev);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
+ goto err3;
+ }
+ return 0;
+err3:
+ c4iw_pblpool_destroy(rdev);
+err2:
+ c4iw_destroy_resource(&rdev->resource);
+err1:
+ return err;
+}
+
+static void c4iw_rdev_close(struct c4iw_rdev *rdev)
+{
+ c4iw_pblpool_destroy(rdev);
+ c4iw_rqtpool_destroy(rdev);
+ c4iw_destroy_resource(&rdev->resource);
+}
+
+static void c4iw_remove(struct c4iw_dev *dev)
+{
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ cancel_delayed_work_sync(&dev->db_drop_task);
+ list_del(&dev->entry);
+ c4iw_unregister_device(dev);
+ c4iw_rdev_close(&dev->rdev);
+ idr_destroy(&dev->cqidr);
+ idr_destroy(&dev->qpidr);
+ idr_destroy(&dev->mmidr);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
+{
+ struct c4iw_dev *devp;
+ int ret;
+
+ devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
+ if (!devp) {
+ printk(KERN_ERR MOD "Cannot allocate ib device\n");
+ return NULL;
+ }
+ devp->rdev.lldi = *infop;
+
+ mutex_lock(&dev_mutex);
+
+ ret = c4iw_rdev_open(&devp->rdev);
+ if (ret) {
+ mutex_unlock(&dev_mutex);
+ printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
+ ib_dealloc_device(&devp->ibdev);
+ return NULL;
+ }
+
+ idr_init(&devp->cqidr);
+ idr_init(&devp->qpidr);
+ idr_init(&devp->mmidr);
+ spin_lock_init(&devp->lock);
+ list_add_tail(&devp->entry, &dev_list);
+ mutex_unlock(&dev_mutex);
+
+ if (c4iw_register_device(devp)) {
+ printk(KERN_ERR MOD "Unable to register device\n");
+ mutex_lock(&dev_mutex);
+ c4iw_remove(devp);
+ mutex_unlock(&dev_mutex);
+ }
+ if (c4iw_debugfs_root) {
+ devp->debugfs_root = debugfs_create_dir(
+ pci_name(devp->rdev.lldi.pdev),
+ c4iw_debugfs_root);
+ setup_debugfs(devp);
+ }
+ return devp;
+}
+
+static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
+{
+ struct c4iw_dev *dev;
+ static int vers_printed;
+ int i;
+
+ if (!vers_printed++)
+ printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
+ DRV_VERSION);
+
+ dev = c4iw_alloc(infop);
+ if (!dev)
+ goto out;
+
+ PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
+ __func__, pci_name(dev->rdev.lldi.pdev),
+ dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
+ dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
+
+ for (i = 0; i < dev->rdev.lldi.nrxq; i++)
+ PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
+
+ printk(KERN_INFO MOD "Initialized device %s\n",
+ pci_name(dev->rdev.lldi.pdev));
+out:
+ return dev;
+}
+
+static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len,
+ unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ if (gl->tot_len <= 512) {
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags - 1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+out:
+ return skb;
+}
+
+static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct c4iw_dev *dev = handle;
+ struct sk_buff *skb;
+ const struct cpl_act_establish *rpl;
+ unsigned int opcode;
+
+ if (gl == NULL) {
+ /* omit RSS and rsp_ctrl at end of descriptor */
+ unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
+
+ skb = alloc_skb(256, GFP_ATOMIC);
+ if (!skb)
+ goto nomem;
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, &rsp[1], len);
+ } else if (gl == CXGB4_MSG_AN) {
+ const struct rsp_ctrl *rc = (void *)rsp;
+
+ u32 qid = be32_to_cpu(rc->pldbuflen_qid);
+ c4iw_ev_handler(dev, qid);
+ return 0;
+ } else {
+ skb = t4_pktgl_to_skb(gl, 128, 128);
+ if (unlikely(!skb))
+ goto nomem;
+ }
+
+ rpl = cplhdr(skb);
+ opcode = rpl->ot.opcode;
+
+ if (c4iw_handlers[opcode])
+ c4iw_handlers[opcode](dev, skb);
+ else
+ printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
+ opcode);
+
+ return 0;
+nomem:
+ return -1;
+}
+
+static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
+{
+ PDBG("%s new_state %u\n", __func__, new_state);
+ return 0;
+}
+
+static struct cxgb4_uld_info c4iw_uld_info = {
+ .name = DRV_NAME,
+ .add = c4iw_uld_add,
+ .rx_handler = c4iw_uld_rx_handler,
+ .state_change = c4iw_uld_state_change,
+};
+
+static int __init c4iw_init_module(void)
+{
+ int err;
+
+ err = c4iw_cm_init();
+ if (err)
+ return err;
+
+ c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
+ if (!c4iw_debugfs_root)
+ printk(KERN_WARNING MOD
+ "could not create debugfs entry, continuing\n");
+
+ cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
+
+ return 0;
+}
+
+static void __exit c4iw_exit_module(void)
+{
+ struct c4iw_dev *dev, *tmp;
+
+ cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
+ c4iw_remove(dev);
+ }
+ mutex_unlock(&dev_mutex);
+
+ c4iw_cm_term();
+ debugfs_remove_recursive(c4iw_debugfs_root);
+}
+
+module_init(c4iw_init_module);
+module_exit(c4iw_exit_module);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
new file mode 100644
index 000000000000..491e76a0327f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/mman.h>
+#include <net/sock.h>
+
+#include "iw_cxgb4.h"
+
+static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
+ struct c4iw_qp *qhp,
+ struct t4_cqe *err_cqe,
+ enum ib_event_type ib_event)
+{
+ struct ib_event event;
+ struct c4iw_qp_attributes attrs;
+
+ if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
+ (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
+ PDBG("%s AE received after RTS - "
+ "qp state %d qpid 0x%x status 0x%x\n", __func__,
+ qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
+ return;
+ }
+
+ printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
+ "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+ CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
+ CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+
+ if (qhp->attr.state == C4IW_QP_STATE_RTS) {
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+
+ event.event = ib_event;
+ event.device = chp->ibcq.device;
+ if (ib_event == IB_EVENT_CQ_ERR)
+ event.element.cq = &chp->ibcq;
+ else
+ event.element.qp = &qhp->ibqp;
+ if (qhp->ibqp.event_handler)
+ (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+}
+
+void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
+{
+ struct c4iw_cq *chp;
+ struct c4iw_qp *qhp;
+ u32 cqid;
+
+ spin_lock(&dev->lock);
+ qhp = get_qhp(dev, CQE_QPID(err_cqe));
+ if (!qhp) {
+ printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
+ "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ CQE_QPID(err_cqe),
+ CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
+ CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
+ CQE_WRID_LOW(err_cqe));
+ spin_unlock(&dev->lock);
+ goto out;
+ }
+
+ if (SQ_TYPE(err_cqe))
+ cqid = qhp->attr.scq;
+ else
+ cqid = qhp->attr.rcq;
+ chp = get_chp(dev, cqid);
+ if (!chp) {
+ printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
+ "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ cqid, CQE_QPID(err_cqe),
+ CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
+ CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
+ CQE_WRID_LOW(err_cqe));
+ spin_unlock(&dev->lock);
+ goto out;
+ }
+
+ c4iw_qp_add_ref(&qhp->ibqp);
+ atomic_inc(&chp->refcnt);
+ spin_unlock(&dev->lock);
+
+ /* Bad incoming write */
+ if (RQ_TYPE(err_cqe) &&
+ (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
+ goto done;
+ }
+
+ switch (CQE_STATUS(err_cqe)) {
+
+ /* Completion Events */
+ case T4_ERR_SUCCESS:
+ printk(KERN_ERR MOD "AE with status 0!\n");
+ break;
+
+ case T4_ERR_STAG:
+ case T4_ERR_PDID:
+ case T4_ERR_QPID:
+ case T4_ERR_ACCESS:
+ case T4_ERR_WRAP:
+ case T4_ERR_BOUND:
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
+ break;
+
+ /* Device Fatal Errors */
+ case T4_ERR_ECC:
+ case T4_ERR_ECC_PSTAG:
+ case T4_ERR_INTERNAL_ERR:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
+ break;
+
+ /* QP Fatal Errors */
+ case T4_ERR_OUT_OF_RQE:
+ case T4_ERR_PBL_ADDR_BOUND:
+ case T4_ERR_CRC:
+ case T4_ERR_MARKER:
+ case T4_ERR_PDU_LEN_ERR:
+ case T4_ERR_DDP_VERSION:
+ case T4_ERR_RDMA_VERSION:
+ case T4_ERR_OPCODE:
+ case T4_ERR_DDP_QUEUE_NUM:
+ case T4_ERR_MSN:
+ case T4_ERR_TBIT:
+ case T4_ERR_MO:
+ case T4_ERR_MSN_GAP:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_RQE_ADDR_BOUND:
+ case T4_ERR_IRD_OVERFLOW:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
+ break;
+
+ default:
+ printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
+ CQE_STATUS(err_cqe), qhp->wq.sq.qid);
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
+ break;
+ }
+done:
+ if (atomic_dec_and_test(&chp->refcnt))
+ wake_up(&chp->wait);
+ c4iw_qp_rem_ref(&qhp->ibqp);
+out:
+ return;
+}
+
+int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
+{
+ struct c4iw_cq *chp;
+
+ chp = get_chp(dev, qid);
+ if (chp)
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ else
+ PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
new file mode 100644
index 000000000000..a6269981e815
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -0,0 +1,745 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __IW_CXGB4_H__
+#define __IW_CXGB4_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/inet.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+
+#include <asm/byteorder.h>
+
+#include <net/net_namespace.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "user.h"
+
+#define DRV_NAME "iw_cxgb4"
+#define MOD DRV_NAME ":"
+
+extern int c4iw_debug;
+#define PDBG(fmt, args...) \
+do { \
+ if (c4iw_debug) \
+ printk(MOD fmt, ## args); \
+} while (0)
+
+#include "t4.h"
+
+#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
+#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+#define C4IW_WR_TO (10*HZ)
+
+struct c4iw_wr_wait {
+ wait_queue_head_t wait;
+ int done;
+ int ret;
+};
+
+static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ wr_waitp->ret = 0;
+ wr_waitp->done = 0;
+ init_waitqueue_head(&wr_waitp->wait);
+}
+
+struct c4iw_resource {
+ struct kfifo tpt_fifo;
+ spinlock_t tpt_fifo_lock;
+ struct kfifo qid_fifo;
+ spinlock_t qid_fifo_lock;
+ struct kfifo pdid_fifo;
+ spinlock_t pdid_fifo_lock;
+};
+
+struct c4iw_qid_list {
+ struct list_head entry;
+ u32 qid;
+};
+
+struct c4iw_dev_ucontext {
+ struct list_head qpids;
+ struct list_head cqids;
+ struct mutex lock;
+};
+
+enum c4iw_rdev_flags {
+ T4_FATAL_ERROR = (1<<0),
+};
+
+struct c4iw_rdev {
+ struct c4iw_resource resource;
+ unsigned long qpshift;
+ u32 qpmask;
+ unsigned long cqshift;
+ u32 cqmask;
+ struct c4iw_dev_ucontext uctx;
+ struct gen_pool *pbl_pool;
+ struct gen_pool *rqt_pool;
+ u32 flags;
+ struct cxgb4_lld_info lldi;
+};
+
+static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
+{
+ return rdev->flags & T4_FATAL_ERROR;
+}
+
+static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
+{
+ return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
+}
+
+struct c4iw_dev {
+ struct ib_device ibdev;
+ struct c4iw_rdev rdev;
+ u32 device_cap_flags;
+ struct idr cqidr;
+ struct idr qpidr;
+ struct idr mmidr;
+ spinlock_t lock;
+ struct list_head entry;
+ struct delayed_work db_drop_task;
+ struct dentry *debugfs_root;
+};
+
+static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct c4iw_dev, ibdev);
+}
+
+static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
+{
+ return container_of(rdev, struct c4iw_dev, rdev);
+}
+
+static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
+{
+ return idr_find(&rhp->cqidr, cqid);
+}
+
+static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
+{
+ return idr_find(&rhp->qpidr, qpid);
+}
+
+static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
+{
+ return idr_find(&rhp->mmidr, mmid);
+}
+
+static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
+ void *handle, u32 id)
+{
+ int ret;
+ int newid;
+
+ do {
+ if (!idr_pre_get(idr, GFP_KERNEL))
+ return -ENOMEM;
+ spin_lock_irq(&rhp->lock);
+ ret = idr_get_new_above(idr, handle, id, &newid);
+ BUG_ON(newid != id);
+ spin_unlock_irq(&rhp->lock);
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
+{
+ spin_lock_irq(&rhp->lock);
+ idr_remove(idr, id);
+ spin_unlock_irq(&rhp->lock);
+}
+
+struct c4iw_pd {
+ struct ib_pd ibpd;
+ u32 pdid;
+ struct c4iw_dev *rhp;
+};
+
+static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct c4iw_pd, ibpd);
+}
+
+struct tpt_attributes {
+ u64 len;
+ u64 va_fbo;
+ enum fw_ri_mem_perms perms;
+ u32 stag;
+ u32 pdid;
+ u32 qpid;
+ u32 pbl_addr;
+ u32 pbl_size;
+ u32 state:1;
+ u32 type:2;
+ u32 rsvd:1;
+ u32 remote_invaliate_disable:1;
+ u32 zbva:1;
+ u32 mw_bind_enable:1;
+ u32 page_size:5;
+};
+
+struct c4iw_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct c4iw_dev *rhp;
+ u64 kva;
+ struct tpt_attributes attr;
+};
+
+static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct c4iw_mr, ibmr);
+}
+
+struct c4iw_mw {
+ struct ib_mw ibmw;
+ struct c4iw_dev *rhp;
+ u64 kva;
+ struct tpt_attributes attr;
+};
+
+static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct c4iw_mw, ibmw);
+}
+
+struct c4iw_fr_page_list {
+ struct ib_fast_reg_page_list ibpl;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ dma_addr_t dma_addr;
+ struct c4iw_dev *dev;
+ int size;
+};
+
+static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
+ struct ib_fast_reg_page_list *ibpl)
+{
+ return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
+}
+
+struct c4iw_cq {
+ struct ib_cq ibcq;
+ struct c4iw_dev *rhp;
+ struct t4_cq cq;
+ spinlock_t lock;
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+};
+
+static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct c4iw_cq, ibcq);
+}
+
+struct c4iw_mpa_attributes {
+ u8 initiator;
+ u8 recv_marker_enabled;
+ u8 xmit_marker_enabled;
+ u8 crc_enabled;
+ u8 version;
+ u8 p2p_type;
+};
+
+struct c4iw_qp_attributes {
+ u32 scq;
+ u32 rcq;
+ u32 sq_num_entries;
+ u32 rq_num_entries;
+ u32 sq_max_sges;
+ u32 sq_max_sges_rdma_write;
+ u32 rq_max_sges;
+ u32 state;
+ u8 enable_rdma_read;
+ u8 enable_rdma_write;
+ u8 enable_bind;
+ u8 enable_mmid0_fastreg;
+ u32 max_ord;
+ u32 max_ird;
+ u32 pd;
+ u32 next_state;
+ char terminate_buffer[52];
+ u32 terminate_msg_len;
+ u8 is_terminate_local;
+ struct c4iw_mpa_attributes mpa_attr;
+ struct c4iw_ep *llp_stream_handle;
+};
+
+struct c4iw_qp {
+ struct ib_qp ibqp;
+ struct c4iw_dev *rhp;
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attr;
+ struct t4_wq wq;
+ spinlock_t lock;
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+ struct timer_list timer;
+};
+
+static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct c4iw_qp, ibqp);
+}
+
+struct c4iw_ucontext {
+ struct ib_ucontext ibucontext;
+ struct c4iw_dev_ucontext uctx;
+ u32 key;
+ spinlock_t mmap_lock;
+ struct list_head mmaps;
+};
+
+static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
+{
+ return container_of(c, struct c4iw_ucontext, ibucontext);
+}
+
+struct c4iw_mm_entry {
+ struct list_head entry;
+ u64 addr;
+ u32 key;
+ unsigned len;
+};
+
+static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
+ u32 key, unsigned len)
+{
+ struct list_head *pos, *nxt;
+ struct c4iw_mm_entry *mm;
+
+ spin_lock(&ucontext->mmap_lock);
+ list_for_each_safe(pos, nxt, &ucontext->mmaps) {
+
+ mm = list_entry(pos, struct c4iw_mm_entry, entry);
+ if (mm->key == key && mm->len == len) {
+ list_del_init(&mm->entry);
+ spin_unlock(&ucontext->mmap_lock);
+ PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
+ key, (unsigned long long) mm->addr, mm->len);
+ return mm;
+ }
+ }
+ spin_unlock(&ucontext->mmap_lock);
+ return NULL;
+}
+
+static inline void insert_mmap(struct c4iw_ucontext *ucontext,
+ struct c4iw_mm_entry *mm)
+{
+ spin_lock(&ucontext->mmap_lock);
+ PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
+ mm->key, (unsigned long long) mm->addr, mm->len);
+ list_add_tail(&mm->entry, &ucontext->mmaps);
+ spin_unlock(&ucontext->mmap_lock);
+}
+
+enum c4iw_qp_attr_mask {
+ C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
+ C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
+ C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
+ C4IW_QP_ATTR_MAX_ORD = 1 << 11,
+ C4IW_QP_ATTR_MAX_IRD = 1 << 12,
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
+ C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
+ C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
+ C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
+ C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
+ C4IW_QP_ATTR_MAX_ORD |
+ C4IW_QP_ATTR_MAX_IRD |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE |
+ C4IW_QP_ATTR_STREAM_MSG_BUFFER |
+ C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
+};
+
+int c4iw_modify_qp(struct c4iw_dev *rhp,
+ struct c4iw_qp *qhp,
+ enum c4iw_qp_attr_mask mask,
+ struct c4iw_qp_attributes *attrs,
+ int internal);
+
+enum c4iw_qp_state {
+ C4IW_QP_STATE_IDLE,
+ C4IW_QP_STATE_RTS,
+ C4IW_QP_STATE_ERROR,
+ C4IW_QP_STATE_TERMINATE,
+ C4IW_QP_STATE_CLOSING,
+ C4IW_QP_STATE_TOT
+};
+
+static inline int c4iw_convert_state(enum ib_qp_state ib_state)
+{
+ switch (ib_state) {
+ case IB_QPS_RESET:
+ case IB_QPS_INIT:
+ return C4IW_QP_STATE_IDLE;
+ case IB_QPS_RTS:
+ return C4IW_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return C4IW_QP_STATE_CLOSING;
+ case IB_QPS_SQE:
+ return C4IW_QP_STATE_TERMINATE;
+ case IB_QPS_ERR:
+ return C4IW_QP_STATE_ERROR;
+ default:
+ return -1;
+ }
+}
+
+static inline u32 c4iw_ib_to_tpt_access(int a)
+{
+ return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
+ (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
+ (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
+ FW_RI_MEM_ACCESS_LOCAL_READ;
+}
+
+static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
+}
+
+enum c4iw_mmid_state {
+ C4IW_STAG_STATE_VALID,
+ C4IW_STAG_STATE_INVALID
+};
+
+#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
+
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+
+#define MPA_MAX_PRIVATE_DATA 256
+#define MPA_REJECT 0x20
+#define MPA_CRC 0x40
+#define MPA_MARKERS 0x80
+#define MPA_FLAGS_MASK 0xE0
+
+#define c4iw_put_ep(ep) { \
+ PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
+ ep, atomic_read(&((ep)->kref.refcount))); \
+ WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
+ kref_put(&((ep)->kref), _c4iw_free_ep); \
+}
+
+#define c4iw_get_ep(ep) { \
+ PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
+ ep, atomic_read(&((ep)->kref.refcount))); \
+ kref_get(&((ep)->kref)); \
+}
+void _c4iw_free_ep(struct kref *kref);
+
+struct mpa_message {
+ u8 key[16];
+ u8 flags;
+ u8 revision;
+ __be16 private_data_size;
+ u8 private_data[0];
+};
+
+struct terminate_message {
+ u8 layer_etype;
+ u8 ecode;
+ __be16 hdrct_rsvd;
+ u8 len_hdrs[0];
+};
+
+#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
+
+enum c4iw_layers_types {
+ LAYER_RDMAP = 0x00,
+ LAYER_DDP = 0x10,
+ LAYER_MPA = 0x20,
+ RDMAP_LOCAL_CATA = 0x00,
+ RDMAP_REMOTE_PROT = 0x01,
+ RDMAP_REMOTE_OP = 0x02,
+ DDP_LOCAL_CATA = 0x00,
+ DDP_TAGGED_ERR = 0x01,
+ DDP_UNTAGGED_ERR = 0x02,
+ DDP_LLP = 0x03
+};
+
+enum c4iw_rdma_ecodes {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_BASE_BOUNDS = 0x01,
+ RDMAP_ACC_VIOL = 0x02,
+ RDMAP_STAG_NOT_ASSOC = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_VERS = 0x05,
+ RDMAP_INV_OPCODE = 0x06,
+ RDMAP_STREAM_CATA = 0x07,
+ RDMAP_GLOBAL_CATA = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff
+};
+
+enum c4iw_ddp_ecodes {
+ DDPT_INV_STAG = 0x00,
+ DDPT_BASE_BOUNDS = 0x01,
+ DDPT_STAG_NOT_ASSOC = 0x02,
+ DDPT_TO_WRAP = 0x03,
+ DDPT_INV_VERS = 0x04,
+ DDPU_INV_QN = 0x01,
+ DDPU_INV_MSN_NOBUF = 0x02,
+ DDPU_INV_MSN_RANGE = 0x03,
+ DDPU_INV_MO = 0x04,
+ DDPU_MSG_TOOBIG = 0x05,
+ DDPU_INV_VERS = 0x06
+};
+
+enum c4iw_mpa_ecodes {
+ MPA_CRC_ERR = 0x02,
+ MPA_MARKER_ERR = 0x03
+};
+
+enum c4iw_ep_state {
+ IDLE = 0,
+ LISTEN,
+ CONNECTING,
+ MPA_REQ_WAIT,
+ MPA_REQ_SENT,
+ MPA_REQ_RCVD,
+ MPA_REP_SENT,
+ FPDU_MODE,
+ ABORTING,
+ CLOSING,
+ MORIBUND,
+ DEAD,
+};
+
+enum c4iw_ep_flags {
+ PEER_ABORT_IN_PROGRESS = 0,
+ ABORT_REQ_IN_PROGRESS = 1,
+ RELEASE_RESOURCES = 2,
+ CLOSE_SENT = 3,
+};
+
+struct c4iw_ep_common {
+ struct iw_cm_id *cm_id;
+ struct c4iw_qp *qp;
+ struct c4iw_dev *dev;
+ enum c4iw_ep_state state;
+ struct kref kref;
+ spinlock_t lock;
+ struct sockaddr_in local_addr;
+ struct sockaddr_in remote_addr;
+ wait_queue_head_t waitq;
+ int rpl_done;
+ int rpl_err;
+ unsigned long flags;
+};
+
+struct c4iw_listen_ep {
+ struct c4iw_ep_common com;
+ unsigned int stid;
+ int backlog;
+};
+
+struct c4iw_ep {
+ struct c4iw_ep_common com;
+ struct c4iw_ep *parent_ep;
+ struct timer_list timer;
+ struct list_head entry;
+ unsigned int atid;
+ u32 hwtid;
+ u32 snd_seq;
+ u32 rcv_seq;
+ struct l2t_entry *l2t;
+ struct dst_entry *dst;
+ struct sk_buff *mpa_skb;
+ struct c4iw_mpa_attributes mpa_attr;
+ u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
+ unsigned int mpa_pkt_len;
+ u32 ird;
+ u32 ord;
+ u32 smac_idx;
+ u32 tx_chan;
+ u32 mtu;
+ u16 mss;
+ u16 emss;
+ u16 plen;
+ u16 rss_qid;
+ u16 txq_idx;
+ u8 tos;
+};
+
+static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
+{
+ return cm_id->provider_data;
+}
+
+static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
+{
+ return cm_id->provider_data;
+}
+
+static inline int compute_wscale(int win)
+{
+ int wscale = 0;
+
+ while (wscale < 14 && (65535<<wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
+
+int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
+ struct l2t_entry *l2t);
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
+ struct c4iw_dev_ucontext *uctx);
+u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
+void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_pblpool_create(struct c4iw_rdev *rdev);
+int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
+void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_destroy_resource(struct c4iw_resource *rscp);
+int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_register_device(struct c4iw_dev *dev);
+void c4iw_unregister_device(struct c4iw_dev *dev);
+int __init c4iw_cm_init(void);
+void __exit c4iw_cm_term(void);
+void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx);
+void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx);
+int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind);
+int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
+int c4iw_destroy_listen(struct iw_cm_id *cm_id);
+int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+void c4iw_qp_add_ref(struct ib_qp *qp);
+void c4iw_qp_rem_ref(struct ib_qp *qp);
+void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
+struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
+ struct ib_device *device,
+ int page_list_len);
+struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
+int c4iw_dealloc_mw(struct ib_mw *mw);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
+struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
+ u64 length, u64 virt, int acc,
+ struct ib_udata *udata);
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf,
+ int acc,
+ u64 *iova_start);
+int c4iw_reregister_phys_mem(struct ib_mr *mr,
+ int mr_rereg_mask,
+ struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf,
+ int acc, u64 *iova_start);
+int c4iw_dereg_mr(struct ib_mr *ib_mr);
+int c4iw_destroy_cq(struct ib_cq *ib_cq);
+struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+ int vector,
+ struct ib_ucontext *ib_context,
+ struct ib_udata *udata);
+int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int c4iw_destroy_qp(struct ib_qp *ib_qp);
+struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
+int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
+u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
+void c4iw_flush_hw_cq(struct t4_cq *cq);
+void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
+int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
+u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
+int c4iw_post_zb_read(struct c4iw_qp *qhp);
+int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
+u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
+void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx);
+u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx);
+void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
+
+extern struct cxgb4_client t4c_client;
+extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
+extern int c4iw_max_read_depth;
+
+#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
new file mode 100644
index 000000000000..e54ff6d25691
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <asm/atomic.h>
+
+#include "iw_cxgb4.h"
+
+#define T4_ULPTX_MIN_IO 32
+#define C4IW_MAX_INLINE_SIZE 96
+
+static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
+{
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_idata *sc;
+ u8 wr_len, *to_dp, *from_dp;
+ int copy_len, num_wqe, i, ret = 0;
+ struct c4iw_wr_wait wr_wait;
+
+ addr &= 0x7FFFFFF;
+ PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
+ num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
+ c4iw_init_wr_wait(&wr_wait);
+ for (i = 0; i < num_wqe; i++) {
+
+ copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
+ len;
+ wr_len = roundup(sizeof *req + sizeof *sc +
+ roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ memset(req, 0, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, 0);
+
+ if (i == (num_wqe-1)) {
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
+ FW_WR_COMPL(1));
+ req->wr.wr_lo = (__force __be64)&wr_wait;
+ } else
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
+ req->wr.wr_mid = cpu_to_be32(
+ FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+
+ req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
+ req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
+ 16));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
+
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
+
+ to_dp = (u8 *)(sc + 1);
+ from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
+ if (data)
+ memcpy(to_dp, from_dp, copy_len);
+ else
+ memset(to_dp, 0, copy_len);
+ if (copy_len % T4_ULPTX_MIN_IO)
+ memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
+ (copy_len % T4_ULPTX_MIN_IO));
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ return ret;
+ len -= C4IW_MAX_INLINE_SIZE;
+ }
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ return ret;
+}
+
+/*
+ * Build and write a TPT entry.
+ * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
+ * pbl_size and pbl_addr
+ * OUT: stag index
+ */
+static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+ u32 *stag, u8 stag_state, u32 pdid,
+ enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
+ int bind_enabled, u32 zbva, u64 to,
+ u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
+{
+ int err;
+ struct fw_ri_tpte tpt;
+ u32 stag_idx;
+ static atomic_t key;
+
+ if (c4iw_fatal_error(rdev))
+ return -EIO;
+
+ stag_state = stag_state > 0;
+ stag_idx = (*stag) >> 8;
+
+ if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
+ stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
+ &rdev->resource.tpt_fifo_lock);
+ if (!stag_idx)
+ return -ENOMEM;
+ *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
+ }
+ PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ __func__, stag_state, type, pdid, stag_idx);
+
+ /* write TPT entry */
+ if (reset_tpt_entry)
+ memset(&tpt, 0, sizeof(tpt));
+ else {
+ tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
+ V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
+ V_FW_RI_TPTE_STAGSTATE(stag_state) |
+ V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
+ tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
+ (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
+ V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
+ FW_RI_VA_BASED_TO))|
+ V_FW_RI_TPTE_PS(page_size));
+ tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+ V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
+ tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+ tpt.va_hi = cpu_to_be32((u32)(to >> 32));
+ tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+ tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
+ tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+ }
+ err = write_adapter_mem(rdev, stag_idx +
+ (rdev->lldi.vr->stag.start >> 5),
+ sizeof(tpt), &tpt);
+
+ if (reset_tpt_entry)
+ c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
+ &rdev->resource.tpt_fifo_lock);
+ return err;
+}
+
+static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
+ u32 pbl_addr, u32 pbl_size)
+{
+ int err;
+
+ PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ __func__, pbl_addr, rdev->lldi.vr->pbl.start,
+ pbl_size);
+
+ err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
+ return err;
+}
+
+static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
+ u32 pbl_addr)
+{
+ return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
+ pbl_size, pbl_addr);
+}
+
+static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
+{
+ *stag = T4_STAG_UNSET;
+ return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
+ 0UL, 0, 0, 0, 0);
+}
+
+static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
+{
+ return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
+ 0);
+}
+
+static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
+ u32 pbl_size, u32 pbl_addr)
+{
+ *stag = T4_STAG_UNSET;
+ return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
+ 0UL, 0, 0, pbl_size, pbl_addr);
+}
+
+static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
+{
+ u32 mmid;
+
+ mhp->attr.state = 1;
+ mhp->attr.stag = stag;
+ mmid = stag >> 8;
+ mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+ PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+ return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
+}
+
+static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
+ struct c4iw_mr *mhp, int shift)
+{
+ u32 stag = T4_STAG_UNSET;
+ int ret;
+
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, mhp->attr.zbva,
+ mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ return ret;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+ return ret;
+}
+
+static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
+ struct c4iw_mr *mhp, int shift, int npages)
+{
+ u32 stag;
+ int ret;
+
+ if (npages > mhp->attr.pbl_size)
+ return -ENOMEM;
+
+ stag = mhp->attr.stag;
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, mhp->attr.zbva,
+ mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ return ret;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+
+ return ret;
+}
+
+static int alloc_pbl(struct c4iw_mr *mhp, int npages)
+{
+ mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
+ npages << 3);
+
+ if (!mhp->attr.pbl_addr)
+ return -ENOMEM;
+
+ mhp->attr.pbl_size = npages;
+
+ return 0;
+}
+
+static int build_phys_page_list(struct ib_phys_buf *buffer_list,
+ int num_phys_buf, u64 *iova_start,
+ u64 *total_size, int *npages,
+ int *shift, __be64 **page_list)
+{
+ u64 mask;
+ int i, j, n;
+
+ mask = 0;
+ *total_size = 0;
+ for (i = 0; i < num_phys_buf; ++i) {
+ if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (i != 0 && i != num_phys_buf - 1 &&
+ (buffer_list[i].size & ~PAGE_MASK))
+ return -EINVAL;
+ *total_size += buffer_list[i].size;
+ if (i > 0)
+ mask |= buffer_list[i].addr;
+ else
+ mask |= buffer_list[i].addr & PAGE_MASK;
+ if (i != num_phys_buf - 1)
+ mask |= buffer_list[i].addr + buffer_list[i].size;
+ else
+ mask |= (buffer_list[i].addr + buffer_list[i].size +
+ PAGE_SIZE - 1) & PAGE_MASK;
+ }
+
+ if (*total_size > 0xFFFFFFFFULL)
+ return -ENOMEM;
+
+ /* Find largest page shift we can use to cover buffers */
+ for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
+ if ((1ULL << *shift) & mask)
+ break;
+
+ buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
+ buffer_list[0].addr &= ~0ull << *shift;
+
+ *npages = 0;
+ for (i = 0; i < num_phys_buf; ++i)
+ *npages += (buffer_list[i].size +
+ (1ULL << *shift) - 1) >> *shift;
+
+ if (!*npages)
+ return -EINVAL;
+
+ *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
+ if (!*page_list)
+ return -ENOMEM;
+
+ n = 0;
+ for (i = 0; i < num_phys_buf; ++i)
+ for (j = 0;
+ j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
+ ++j)
+ (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
+ ((u64) j << *shift));
+
+ PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
+ __func__, (unsigned long long)*iova_start,
+ (unsigned long long)mask, *shift, (unsigned long long)*total_size,
+ *npages);
+
+ return 0;
+
+}
+
+int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
+ struct ib_pd *pd, struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+
+ struct c4iw_mr mh, *mhp;
+ struct c4iw_pd *php;
+ struct c4iw_dev *rhp;
+ __be64 *page_list = NULL;
+ int shift = 0;
+ u64 total_size;
+ int npages;
+ int ret;
+
+ PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
+
+ /* There can be no memory windows */
+ if (atomic_read(&mr->usecnt))
+ return -EINVAL;
+
+ mhp = to_c4iw_mr(mr);
+ rhp = mhp->rhp;
+ php = to_c4iw_pd(mr->pd);
+
+ /* make sure we are on the same adapter */
+ if (rhp != php->rhp)
+ return -EINVAL;
+
+ memcpy(&mh, mhp, sizeof *mhp);
+
+ if (mr_rereg_mask & IB_MR_REREG_PD)
+ php = to_c4iw_pd(pd);
+ if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
+ mh.attr.perms = c4iw_ib_to_tpt_access(acc);
+ mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
+ IB_ACCESS_MW_BIND;
+ }
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+ ret = build_phys_page_list(buffer_list, num_phys_buf,
+ iova_start,
+ &total_size, &npages,
+ &shift, &page_list);
+ if (ret)
+ return ret;
+ }
+
+ ret = reregister_mem(rhp, php, &mh, shift, npages);
+ kfree(page_list);
+ if (ret)
+ return ret;
+ if (mr_rereg_mask & IB_MR_REREG_PD)
+ mhp->attr.pdid = php->pdid;
+ if (mr_rereg_mask & IB_MR_REREG_ACCESS)
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+ mhp->attr.zbva = 0;
+ mhp->attr.va_fbo = *iova_start;
+ mhp->attr.page_size = shift - 12;
+ mhp->attr.len = (u32) total_size;
+ mhp->attr.pbl_size = npages;
+ }
+
+ return 0;
+}
+
+struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+ __be64 *page_list;
+ int shift;
+ u64 total_size;
+ int npages;
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ int ret;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+
+ /* First check that we have enough alignment */
+ if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (num_phys_buf > 1 &&
+ ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
+ &total_size, &npages, &shift,
+ &page_list);
+ if (ret)
+ goto err;
+
+ ret = alloc_pbl(mhp, npages);
+ if (ret) {
+ kfree(page_list);
+ goto err_pbl;
+ }
+
+ ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
+ npages);
+ kfree(page_list);
+ if (ret)
+ goto err_pbl;
+
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.zbva = 0;
+
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.va_fbo = *iova_start;
+ mhp->attr.page_size = shift - 12;
+
+ mhp->attr.len = (u32) total_size;
+ mhp->attr.pbl_size = npages;
+ ret = register_mem(rhp, php, mhp, shift);
+ if (ret)
+ goto err_pbl;
+
+ return &mhp->ibmr;
+
+err_pbl:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+
+err:
+ kfree(mhp);
+ return ERR_PTR(ret);
+
+}
+
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ int ret;
+ u32 stag = T4_STAG_UNSET;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
+ mhp->attr.zbva = 0;
+ mhp->attr.va_fbo = 0;
+ mhp->attr.page_size = 0;
+ mhp->attr.len = ~0UL;
+ mhp->attr.pbl_size = 0;
+
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
+ if (ret)
+ goto err1;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ goto err2;
+ return &mhp->ibmr;
+err2:
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+err1:
+ kfree(mhp);
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *udata)
+{
+ __be64 *pages;
+ int shift, n, len;
+ int i, j, k;
+ int err = 0;
+ struct ib_umem_chunk *chunk;
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+
+ if (length == ~0ULL)
+ return ERR_PTR(-EINVAL);
+
+ if ((length + start) < start)
+ return ERR_PTR(-EINVAL);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+
+ mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ if (IS_ERR(mhp->umem)) {
+ err = PTR_ERR(mhp->umem);
+ kfree(mhp);
+ return ERR_PTR(err);
+ }
+
+ shift = ffs(mhp->umem->page_size) - 1;
+
+ n = 0;
+ list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
+ n += chunk->nents;
+
+ err = alloc_pbl(mhp, n);
+ if (err)
+ goto err;
+
+ pages = (__be64 *) __get_free_page(GFP_KERNEL);
+ if (!pages) {
+ err = -ENOMEM;
+ goto err_pbl;
+ }
+
+ i = n = 0;
+
+ list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
+ for (j = 0; j < chunk->nmap; ++j) {
+ len = sg_dma_len(&chunk->page_list[j]) >> shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = cpu_to_be64(sg_dma_address(
+ &chunk->page_list[j]) +
+ mhp->umem->page_size * k);
+ if (i == PAGE_SIZE / sizeof *pages) {
+ err = write_pbl(&mhp->rhp->rdev,
+ pages,
+ mhp->attr.pbl_addr + (n << 3), i);
+ if (err)
+ goto pbl_done;
+ n += i;
+ i = 0;
+ }
+ }
+ }
+
+ if (i)
+ err = write_pbl(&mhp->rhp->rdev, pages,
+ mhp->attr.pbl_addr + (n << 3), i);
+
+pbl_done:
+ free_page((unsigned long) pages);
+ if (err)
+ goto err_pbl;
+
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.zbva = 0;
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.va_fbo = virt;
+ mhp->attr.page_size = shift - 12;
+ mhp->attr.len = (u32) length;
+
+ err = register_mem(rhp, php, mhp, shift);
+ if (err)
+ goto err_pbl;
+
+ return &mhp->ibmr;
+
+err_pbl:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+
+err:
+ ib_umem_release(mhp->umem);
+ kfree(mhp);
+ return ERR_PTR(err);
+}
+
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mw *mhp;
+ u32 mmid;
+ u32 stag = 0;
+ int ret;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+ ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+ if (ret) {
+ kfree(mhp);
+ return ERR_PTR(ret);
+ }
+ mhp->rhp = rhp;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.type = FW_RI_STAG_MW;
+ mhp->attr.stag = stag;
+ mmid = (stag) >> 8;
+ mhp->ibmw.rkey = stag;
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ deallocate_window(&rhp->rdev, mhp->attr.stag);
+ kfree(mhp);
+ return ERR_PTR(-ENOMEM);
+ }
+ PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ return &(mhp->ibmw);
+}
+
+int c4iw_dealloc_mw(struct ib_mw *mw)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_mw *mhp;
+ u32 mmid;
+
+ mhp = to_c4iw_mw(mw);
+ rhp = mhp->rhp;
+ mmid = (mw->rkey) >> 8;
+ deallocate_window(&rhp->rdev, mhp->attr.stag);
+ remove_handle(rhp, &rhp->mmidr, mmid);
+ kfree(mhp);
+ PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ return 0;
+}
+
+struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ u32 mmid;
+ u32 stag = 0;
+ int ret = 0;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ goto err;
+
+ mhp->rhp = rhp;
+ ret = alloc_pbl(mhp, pbl_depth);
+ if (ret)
+ goto err1;
+ mhp->attr.pbl_size = pbl_depth;
+ ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ goto err2;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.type = FW_RI_STAG_NSMR;
+ mhp->attr.stag = stag;
+ mhp->attr.state = 1;
+ mmid = (stag) >> 8;
+ mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+ goto err3;
+
+ PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ return &(mhp->ibmr);
+err3:
+ dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+err2:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+err1:
+ kfree(mhp);
+err:
+ return ERR_PTR(ret);
+}
+
+struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
+ int page_list_len)
+{
+ struct c4iw_fr_page_list *c4pl;
+ struct c4iw_dev *dev = to_c4iw_dev(device);
+ dma_addr_t dma_addr;
+ int size = sizeof *c4pl + page_list_len * sizeof(u64);
+
+ if (page_list_len > T4_MAX_FR_DEPTH)
+ return ERR_PTR(-EINVAL);
+
+ c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
+ &dma_addr, GFP_KERNEL);
+ if (!c4pl)
+ return ERR_PTR(-ENOMEM);
+
+ pci_unmap_addr_set(c4pl, mapping, dma_addr);
+ c4pl->dma_addr = dma_addr;
+ c4pl->dev = dev;
+ c4pl->size = size;
+ c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
+ c4pl->ibpl.max_page_list_len = page_list_len;
+
+ return &c4pl->ibpl;
+}
+
+void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
+{
+ struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
+
+ dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
+ c4pl, pci_unmap_addr(c4pl, mapping));
+}
+
+int c4iw_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_mr *mhp;
+ u32 mmid;
+
+ PDBG("%s ib_mr %p\n", __func__, ib_mr);
+ /* There can be no memory windows */
+ if (atomic_read(&ib_mr->usecnt))
+ return -EINVAL;
+
+ mhp = to_c4iw_mr(ib_mr);
+ rhp = mhp->rhp;
+ mmid = mhp->attr.stag >> 8;
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+ if (mhp->attr.pbl_size)
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+ remove_handle(rhp, &rhp->mmidr, mmid);
+ if (mhp->kva)
+ kfree((void *) (unsigned long) mhp->kva);
+ if (mhp->umem)
+ ib_umem_release(mhp->umem);
+ PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
+ kfree(mhp);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
new file mode 100644
index 000000000000..dfc49020bb9c
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/inetdevice.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "iw_cxgb4.h"
+
+static int fastreg_support;
+module_param(fastreg_support, int, 0644);
+MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
+
+static int c4iw_modify_port(struct ib_device *ibdev,
+ u8 port, int port_modify_mask,
+ struct ib_port_modify *props)
+{
+ return -ENOSYS;
+}
+
+static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static int c4iw_ah_destroy(struct ib_ah *ah)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
+ u8 port_num, struct ib_wc *in_wc,
+ struct ib_grh *in_grh, struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct c4iw_dev *rhp = to_c4iw_dev(context->device);
+ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_mm_entry *mm, *tmp;
+
+ PDBG("%s context %p\n", __func__, context);
+ list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
+ kfree(mm);
+ c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
+ kfree(ucontext);
+ return 0;
+}
+
+static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct c4iw_ucontext *context;
+ struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+ c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
+ INIT_LIST_HEAD(&context->mmaps);
+ spin_lock_init(&context->mmap_lock);
+ return &context->ibucontext;
+}
+
+static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ int len = vma->vm_end - vma->vm_start;
+ u32 key = vma->vm_pgoff << PAGE_SHIFT;
+ struct c4iw_rdev *rdev;
+ int ret = 0;
+ struct c4iw_mm_entry *mm;
+ struct c4iw_ucontext *ucontext;
+ u64 addr;
+
+ PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
+ key, len);
+
+ if (vma->vm_start & (PAGE_SIZE-1))
+ return -EINVAL;
+
+ rdev = &(to_c4iw_dev(context->device)->rdev);
+ ucontext = to_c4iw_ucontext(context);
+
+ mm = remove_mmap(ucontext, key, len);
+ if (!mm)
+ return -EINVAL;
+ addr = mm->addr;
+ kfree(mm);
+
+ if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+ pci_resource_len(rdev->lldi.pdev, 2)))) {
+
+ /*
+ * Map T4 DB register.
+ */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_flags &= ~VM_MAYREAD;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ } else {
+
+ /*
+ * Map WQ or CQ contig dma memory...
+ */
+ ret = remap_pfn_range(vma, vma->vm_start,
+ addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static int c4iw_deallocate_pd(struct ib_pd *pd)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
+ c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ kfree(php);
+ return 0;
+}
+
+static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct c4iw_pd *php;
+ u32 pdid;
+ struct c4iw_dev *rhp;
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ rhp = (struct c4iw_dev *) ibdev;
+ pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ if (!pdid)
+ return ERR_PTR(-EINVAL);
+ php = kzalloc(sizeof(*php), GFP_KERNEL);
+ if (!php) {
+ c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+ php->pdid = pdid;
+ php->rhp = rhp;
+ if (context) {
+ if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
+ c4iw_deallocate_pd(&php->ibpd);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+ PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
+ return &php->ibpd;
+}
+
+static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ *pkey = 0;
+ return 0;
+}
+
+static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct c4iw_dev *dev;
+
+ PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
+ __func__, ibdev, port, index, gid);
+ dev = to_c4iw_dev(ibdev);
+ BUG_ON(port == 0);
+ memset(&(gid->raw[0]), 0, sizeof(gid->raw));
+ memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
+ return 0;
+}
+
+static int c4iw_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+
+ struct c4iw_dev *dev;
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+
+ dev = to_c4iw_dev(ibdev);
+ memset(props, 0, sizeof *props);
+ memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ props->hw_ver = dev->rdev.lldi.adapter_type;
+ props->fw_ver = dev->rdev.lldi.fw_vers;
+ props->device_cap_flags = dev->device_cap_flags;
+ props->page_size_cap = T4_PAGESIZE_MASK;
+ props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
+ props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
+ props->max_mr_size = T4_MAX_MR_SIZE;
+ props->max_qp = T4_MAX_NUM_QP;
+ props->max_qp_wr = T4_MAX_QP_DEPTH;
+ props->max_sge = T4_MAX_RECV_SGE;
+ props->max_sge_rd = 1;
+ props->max_qp_rd_atom = c4iw_max_read_depth;
+ props->max_qp_init_rd_atom = c4iw_max_read_depth;
+ props->max_cq = T4_MAX_NUM_CQ;
+ props->max_cqe = T4_MAX_CQ_DEPTH;
+ props->max_mr = c4iw_num_stags(&dev->rdev);
+ props->max_pd = T4_MAX_NUM_PD;
+ props->local_ca_ack_delay = 0;
+ props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
+
+ return 0;
+}
+
+static int c4iw_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct c4iw_dev *dev;
+ struct net_device *netdev;
+ struct in_device *inetdev;
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+
+ dev = to_c4iw_dev(ibdev);
+ netdev = dev->rdev.lldi.ports[port-1];
+
+ memset(props, 0, sizeof(struct ib_port_attr));
+ props->max_mtu = IB_MTU_4096;
+ if (netdev->mtu >= 4096)
+ props->active_mtu = IB_MTU_4096;
+ else if (netdev->mtu >= 2048)
+ props->active_mtu = IB_MTU_2048;
+ else if (netdev->mtu >= 1024)
+ props->active_mtu = IB_MTU_1024;
+ else if (netdev->mtu >= 512)
+ props->active_mtu = IB_MTU_512;
+ else
+ props->active_mtu = IB_MTU_256;
+
+ if (!netif_carrier_ok(netdev))
+ props->state = IB_PORT_DOWN;
+ else {
+ inetdev = in_dev_get(netdev);
+ if (inetdev) {
+ if (inetdev->ifa_list)
+ props->state = IB_PORT_ACTIVE;
+ else
+ props->state = IB_PORT_INIT;
+ in_dev_put(inetdev);
+ } else
+ props->state = IB_PORT_INIT;
+ }
+
+ props->port_cap_flags =
+ IB_PORT_CM_SUP |
+ IB_PORT_SNMP_TUNNEL_SUP |
+ IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->active_width = 2;
+ props->active_speed = 2;
+ props->max_msg_sz = -1;
+
+ return 0;
+}
+
+static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
+}
+
+static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+
+ return sprintf(buf, "%u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
+}
+
+static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ struct ethtool_drvinfo info;
+ struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
+
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ lldev->ethtool_ops->get_drvinfo(lldev, &info);
+ return sprintf(buf, "%s\n", info.driver);
+}
+
+static ssize_t show_board(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
+ c4iw_dev->rdev.lldi.pdev->device);
+}
+
+static int c4iw_get_mib(struct ib_device *ibdev,
+ union rdma_protocol_stats *stats)
+{
+ return -ENOSYS;
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+
+static struct device_attribute *c4iw_class_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_fw_ver,
+ &dev_attr_hca_type,
+ &dev_attr_board_id,
+};
+
+int c4iw_register_device(struct c4iw_dev *dev)
+{
+ int ret;
+ int i;
+
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ BUG_ON(!dev->rdev.lldi.ports[0]);
+ strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
+ memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
+ memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ dev->ibdev.owner = THIS_MODULE;
+ dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
+ if (fastreg_support)
+ dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ dev->ibdev.local_dma_lkey = 0;
+ dev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV);
+ dev->ibdev.node_type = RDMA_NODE_RNIC;
+ memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
+ dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
+ dev->ibdev.num_comp_vectors = 1;
+ dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
+ dev->ibdev.query_device = c4iw_query_device;
+ dev->ibdev.query_port = c4iw_query_port;
+ dev->ibdev.modify_port = c4iw_modify_port;
+ dev->ibdev.query_pkey = c4iw_query_pkey;
+ dev->ibdev.query_gid = c4iw_query_gid;
+ dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
+ dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
+ dev->ibdev.mmap = c4iw_mmap;
+ dev->ibdev.alloc_pd = c4iw_allocate_pd;
+ dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
+ dev->ibdev.create_ah = c4iw_ah_create;
+ dev->ibdev.destroy_ah = c4iw_ah_destroy;
+ dev->ibdev.create_qp = c4iw_create_qp;
+ dev->ibdev.modify_qp = c4iw_ib_modify_qp;
+ dev->ibdev.destroy_qp = c4iw_destroy_qp;
+ dev->ibdev.create_cq = c4iw_create_cq;
+ dev->ibdev.destroy_cq = c4iw_destroy_cq;
+ dev->ibdev.resize_cq = c4iw_resize_cq;
+ dev->ibdev.poll_cq = c4iw_poll_cq;
+ dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
+ dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
+ dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
+ dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
+ dev->ibdev.dereg_mr = c4iw_dereg_mr;
+ dev->ibdev.alloc_mw = c4iw_alloc_mw;
+ dev->ibdev.bind_mw = c4iw_bind_mw;
+ dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
+ dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
+ dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
+ dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
+ dev->ibdev.attach_mcast = c4iw_multicast_attach;
+ dev->ibdev.detach_mcast = c4iw_multicast_detach;
+ dev->ibdev.process_mad = c4iw_process_mad;
+ dev->ibdev.req_notify_cq = c4iw_arm_cq;
+ dev->ibdev.post_send = c4iw_post_send;
+ dev->ibdev.post_recv = c4iw_post_receive;
+ dev->ibdev.get_protocol_stats = c4iw_get_mib;
+
+ dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
+ if (!dev->ibdev.iwcm)
+ return -ENOMEM;
+
+ dev->ibdev.iwcm->connect = c4iw_connect;
+ dev->ibdev.iwcm->accept = c4iw_accept_cr;
+ dev->ibdev.iwcm->reject = c4iw_reject_cr;
+ dev->ibdev.iwcm->create_listen = c4iw_create_listen;
+ dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
+ dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
+ dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
+ dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+
+ ret = ib_register_device(&dev->ibdev);
+ if (ret)
+ goto bail1;
+
+ for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
+ ret = device_create_file(&dev->ibdev.dev,
+ c4iw_class_attributes[i]);
+ if (ret)
+ goto bail2;
+ }
+ return 0;
+bail2:
+ ib_unregister_device(&dev->ibdev);
+bail1:
+ kfree(dev->ibdev.iwcm);
+ return ret;
+}
+
+void c4iw_unregister_device(struct c4iw_dev *dev)
+{
+ int i;
+
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
+ device_remove_file(&dev->ibdev.dev,
+ c4iw_class_attributes[i]);
+ ib_unregister_device(&dev->ibdev);
+ kfree(dev->ibdev.iwcm);
+ return;
+}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
new file mode 100644
index 000000000000..83a01dc0c4c1
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -0,0 +1,1577 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "iw_cxgb4.h"
+
+static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ /*
+ * uP clears EQ contexts when the connection exits rdma mode,
+ * so no need to post a RESET WR for these EQs.
+ */
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, wq->rq.queue,
+ pci_unmap_addr(&wq->rq, mapping));
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, wq->sq.queue,
+ pci_unmap_addr(&wq->sq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ kfree(wq->rq.sw_rq);
+ kfree(wq->sq.sw_sq);
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+ return 0;
+}
+
+static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ struct t4_cq *rcq, struct t4_cq *scq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ int user = (uctx != &rdev->uctx);
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+ int ret;
+ int eqsize;
+
+ wq->sq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->sq.qid)
+ return -ENOMEM;
+
+ wq->rq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->rq.qid)
+ goto err1;
+
+ if (!user) {
+ wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
+ GFP_KERNEL);
+ if (!wq->sq.sw_sq)
+ goto err2;
+
+ wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
+ GFP_KERNEL);
+ if (!wq->rq.sw_rq)
+ goto err3;
+ }
+
+ /*
+ * RQT must be a power of 2.
+ */
+ wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
+ wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
+ if (!wq->rq.rqt_hwaddr)
+ goto err4;
+
+ wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, &(wq->sq.dma_addr),
+ GFP_KERNEL);
+ if (!wq->sq.queue)
+ goto err5;
+ memset(wq->sq.queue, 0, wq->sq.memsize);
+ pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
+
+ wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, &(wq->rq.dma_addr),
+ GFP_KERNEL);
+ if (!wq->rq.queue)
+ goto err6;
+ PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ __func__, wq->sq.queue,
+ (unsigned long long)virt_to_phys(wq->sq.queue),
+ wq->rq.queue,
+ (unsigned long long)virt_to_phys(wq->rq.queue));
+ memset(wq->rq.queue, 0, wq->rq.memsize);
+ pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
+
+ wq->db = rdev->lldi.db_reg;
+ wq->gts = rdev->lldi.gts_reg;
+ if (user) {
+ wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (wq->sq.qid << rdev->qpshift);
+ wq->sq.udb &= PAGE_MASK;
+ wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (wq->rq.qid << rdev->qpshift);
+ wq->rq.udb &= PAGE_MASK;
+ }
+ wq->rdev = rdev;
+ wq->rq.msn = 1;
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof *res_wr + 2 * sizeof *res;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err7;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(2) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+
+ res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
+ V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
+ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
+ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
+ V_FW_RI_RES_WR_IQID(scq->cqid));
+ res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
+ V_FW_RI_RES_WR_DCAEN(0) |
+ V_FW_RI_RES_WR_DCACPU(0) |
+ V_FW_RI_RES_WR_FBMIN(3) |
+ V_FW_RI_RES_WR_FBMAX(3) |
+ V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
+ V_FW_RI_RES_WR_CIDXFTHRESH(0) |
+ V_FW_RI_RES_WR_EQSIZE(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
+ res++;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+ res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
+ V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
+ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
+ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
+ V_FW_RI_RES_WR_IQID(rcq->cqid));
+ res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
+ V_FW_RI_RES_WR_DCAEN(0) |
+ V_FW_RI_RES_WR_DCACPU(0) |
+ V_FW_RI_RES_WR_FBMIN(3) |
+ V_FW_RI_RES_WR_FBMAX(3) |
+ V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
+ V_FW_RI_RES_WR_CIDXFTHRESH(0) |
+ V_FW_RI_RES_WR_EQSIZE(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+
+ c4iw_init_wr_wait(&wr_wait);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ goto err7;
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ if (ret)
+ goto err7;
+
+ PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
+ __func__, wq->sq.qid, wq->rq.qid, wq->db,
+ (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
+
+ return 0;
+err7:
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, wq->rq.queue,
+ pci_unmap_addr(&wq->rq, mapping));
+err6:
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, wq->sq.queue,
+ pci_unmap_addr(&wq->sq, mapping));
+err5:
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+err4:
+ kfree(wq->rq.sw_rq);
+err3:
+ kfree(wq->sq.sw_sq);
+err2:
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+err1:
+ c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+ return -ENOMEM;
+}
+
+static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ int i;
+ u32 plen;
+ int size;
+ u8 *datap;
+
+ if (wr->num_sge > T4_MAX_SEND_SGE)
+ return -EINVAL;
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
+ else
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
+ wqe->send.stag_inv = 0;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
+ else
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
+ wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ plen = 0;
+ if (wr->num_sge) {
+ if (wr->send_flags & IB_SEND_INLINE) {
+ datap = (u8 *)wqe->send.u.immd_src[0].data;
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) >
+ T4_MAX_SEND_INLINE) {
+ return -EMSGSIZE;
+ }
+ plen += wr->sg_list[i].length;
+ memcpy(datap,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ datap += wr->sg_list[i].length;
+ }
+ wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->send.u.immd_src[0].r1 = 0;
+ wqe->send.u.immd_src[0].r2 = 0;
+ wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
+ size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
+ plen;
+ } else {
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->send.u.isgl_src[0].sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->send.u.isgl_src[0].sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->send.u.isgl_src[0].sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
+ wqe->send.u.isgl_src[0].r1 = 0;
+ wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
+ wqe->send.u.isgl_src[0].r2 = 0;
+ size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ }
+ } else {
+ wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->send.u.immd_src[0].r1 = 0;
+ wqe->send.u.immd_src[0].r2 = 0;
+ wqe->send.u.immd_src[0].immdlen = 0;
+ size = sizeof wqe->send + sizeof(struct fw_ri_immd);
+ }
+ *len16 = DIV_ROUND_UP(size, 16);
+ wqe->send.plen = cpu_to_be32(plen);
+ return 0;
+}
+
+static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ int i;
+ u32 plen;
+ int size;
+ u8 *datap;
+
+ if (wr->num_sge > T4_MAX_WRITE_SGE)
+ return -EINVAL;
+ wqe->write.r2 = 0;
+ wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
+ wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
+ plen = 0;
+ if (wr->num_sge) {
+ if (wr->send_flags & IB_SEND_INLINE) {
+ datap = (u8 *)wqe->write.u.immd_src[0].data;
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) >
+ T4_MAX_WRITE_INLINE) {
+ return -EMSGSIZE;
+ }
+ plen += wr->sg_list[i].length;
+ memcpy(datap,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ datap += wr->sg_list[i].length;
+ }
+ wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->write.u.immd_src[0].r1 = 0;
+ wqe->write.u.immd_src[0].r2 = 0;
+ wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
+ size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
+ plen;
+ } else {
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->write.u.isgl_src[0].sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->write.u.isgl_src[0].sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->write.u.isgl_src[0].sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
+ wqe->write.u.isgl_src[0].r1 = 0;
+ wqe->write.u.isgl_src[0].nsge =
+ cpu_to_be16(wr->num_sge);
+ wqe->write.u.isgl_src[0].r2 = 0;
+ size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ }
+ } else {
+ wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->write.u.immd_src[0].r1 = 0;
+ wqe->write.u.immd_src[0].r2 = 0;
+ wqe->write.u.immd_src[0].immdlen = 0;
+ size = sizeof wqe->write + sizeof(struct fw_ri_immd);
+ }
+ *len16 = DIV_ROUND_UP(size, 16);
+ wqe->write.plen = cpu_to_be32(plen);
+ return 0;
+}
+
+static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ if (wr->num_sge > 1)
+ return -EINVAL;
+ if (wr->num_sge) {
+ wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
+ wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
+ >> 32));
+ wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
+ wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
+ wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
+ wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
+ >> 32));
+ wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
+ } else {
+ wqe->read.stag_src = cpu_to_be32(2);
+ wqe->read.to_src_hi = 0;
+ wqe->read.to_src_lo = 0;
+ wqe->read.stag_sink = cpu_to_be32(2);
+ wqe->read.plen = 0;
+ wqe->read.to_sink_hi = 0;
+ wqe->read.to_sink_lo = 0;
+ }
+ wqe->read.r2 = 0;
+ wqe->read.r5 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
+ return 0;
+}
+
+static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
+ struct ib_recv_wr *wr, u8 *len16)
+{
+ int i;
+ int plen = 0;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->recv.isgl.sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->recv.isgl.sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->recv.isgl.sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ for (; i < T4_MAX_RECV_SGE; i++) {
+ wqe->recv.isgl.sge[i].stag = 0;
+ wqe->recv.isgl.sge[i].len = 0;
+ wqe->recv.isgl.sge[i].to = 0;
+ }
+ wqe->recv.isgl.op = FW_RI_DATA_ISGL;
+ wqe->recv.isgl.r1 = 0;
+ wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
+ wqe->recv.isgl.r2 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->recv +
+ wr->num_sge * sizeof(struct fw_ri_sge), 16);
+ return 0;
+}
+
+static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+
+ struct fw_ri_immd *imdp;
+ __be64 *p;
+ int i;
+ int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+
+ if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
+ return -EINVAL;
+
+ wqe->fr.qpbinde_to_dcacpu = 0;
+ wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
+ wqe->fr.addr_type = FW_RI_VA_BASED_TO;
+ wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
+ wqe->fr.len_hi = 0;
+ wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
+ wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
+ wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
+ wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
+ 0xffffffff);
+ if (pbllen > T4_MAX_FR_IMMD) {
+ struct c4iw_fr_page_list *c4pl =
+ to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+ struct fw_ri_dsgl *sglp;
+
+ sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
+ sglp->op = FW_RI_DATA_DSGL;
+ sglp->r1 = 0;
+ sglp->nsge = cpu_to_be16(1);
+ sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
+ sglp->len0 = cpu_to_be32(pbllen);
+
+ *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
+ } else {
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
+ *p = cpu_to_be64(
+ (u64)wr->wr.fast_reg.page_list->page_list[i]);
+ *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
+ 16);
+ }
+ return 0;
+}
+
+static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
+ u8 *len16)
+{
+ wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
+ wqe->inv.r2 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
+ return 0;
+}
+
+void c4iw_qp_add_ref(struct ib_qp *qp)
+{
+ PDBG("%s ib_qp %p\n", __func__, qp);
+ atomic_inc(&(to_c4iw_qp(qp)->refcnt));
+}
+
+void c4iw_qp_rem_ref(struct ib_qp *qp)
+{
+ PDBG("%s ib_qp %p\n", __func__, qp);
+ if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
+ wake_up(&(to_c4iw_qp(qp)->wait));
+}
+
+int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ int err = 0;
+ u8 len16 = 0;
+ enum fw_wr_opcodes fw_opcode = 0;
+ enum fw_ri_wr_flags fw_flags;
+ struct c4iw_qp *qhp;
+ union t4_wr *wqe;
+ u32 num_wrs;
+ struct t4_swsqe *swsqe;
+ unsigned long flag;
+ u16 idx = 0;
+
+ qhp = to_c4iw_qp(ibqp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (t4_wq_in_error(&qhp->wq)) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -EINVAL;
+ }
+ num_wrs = t4_sq_avail(&qhp->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (num_wrs == 0) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
+ fw_flags = 0;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ fw_flags |= FW_RI_COMPLETION_FLAG;
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_INV:
+ case IB_WR_SEND:
+ if (wr->send_flags & IB_SEND_FENCE)
+ fw_flags |= FW_RI_READ_FENCE_FLAG;
+ fw_opcode = FW_RI_SEND_WR;
+ if (wr->opcode == IB_WR_SEND)
+ swsqe->opcode = FW_RI_SEND;
+ else
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
+ err = build_rdma_send(wqe, wr, &len16);
+ break;
+ case IB_WR_RDMA_WRITE:
+ fw_opcode = FW_RI_RDMA_WRITE_WR;
+ swsqe->opcode = FW_RI_RDMA_WRITE;
+ err = build_rdma_write(wqe, wr, &len16);
+ break;
+ case IB_WR_RDMA_READ:
+ fw_opcode = FW_RI_RDMA_READ_WR;
+ swsqe->opcode = FW_RI_READ_REQ;
+ fw_flags = 0;
+ err = build_rdma_read(wqe, wr, &len16);
+ if (err)
+ break;
+ swsqe->read_len = wr->sg_list[0].length;
+ if (!qhp->wq.sq.oldest_read)
+ qhp->wq.sq.oldest_read = swsqe;
+ break;
+ case IB_WR_FAST_REG_MR:
+ fw_opcode = FW_RI_FR_NSMR_WR;
+ swsqe->opcode = FW_RI_FAST_REGISTER;
+ err = build_fastreg(wqe, wr, &len16);
+ break;
+ case IB_WR_LOCAL_INV:
+ fw_opcode = FW_RI_INV_LSTAG_WR;
+ swsqe->opcode = FW_RI_LOCAL_INV;
+ err = build_inv_stag(wqe, wr, &len16);
+ break;
+ default:
+ PDBG("%s post of type=%d TBD!\n", __func__,
+ wr->opcode);
+ err = -EINVAL;
+ }
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
+ swsqe->wr_id = wr->wr_id;
+
+ init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
+
+ PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
+ __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
+ swsqe->opcode, swsqe->read_len);
+ wr = wr->next;
+ num_wrs--;
+ t4_sq_produce(&qhp->wq);
+ idx++;
+ }
+ if (t4_wq_db_enabled(&qhp->wq))
+ t4_ring_sq_db(&qhp->wq, idx);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return err;
+}
+
+int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ struct c4iw_qp *qhp;
+ union t4_recv_wr *wqe;
+ u32 num_wrs;
+ u8 len16 = 0;
+ unsigned long flag;
+ u16 idx = 0;
+
+ qhp = to_c4iw_qp(ibqp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (t4_wq_in_error(&qhp->wq)) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -EINVAL;
+ }
+ num_wrs = t4_rq_avail(&qhp->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (wr->num_sge > T4_MAX_RECV_SGE) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
+ if (num_wrs)
+ err = build_rdma_recv(qhp, wqe, wr, &len16);
+ else
+ err = -ENOMEM;
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
+
+ wqe->recv.opcode = FW_RI_RECV_WR;
+ wqe->recv.r1 = 0;
+ wqe->recv.wrid = qhp->wq.rq.pidx;
+ wqe->recv.r2[0] = 0;
+ wqe->recv.r2[1] = 0;
+ wqe->recv.r2[2] = 0;
+ wqe->recv.len16 = len16;
+ if (len16 < 5)
+ wqe->flits[8] = 0;
+
+ PDBG("%s cookie 0x%llx pidx %u\n", __func__,
+ (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
+ t4_rq_produce(&qhp->wq);
+ wr = wr->next;
+ num_wrs--;
+ idx++;
+ }
+ if (t4_wq_db_enabled(&qhp->wq))
+ t4_ring_rq_db(&qhp->wq, idx);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return err;
+}
+
+int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
+{
+ return -ENOSYS;
+}
+
+static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
+ u8 *ecode)
+{
+ int status;
+ int tagged;
+ int opcode;
+ int rqtype;
+ int send_inv;
+
+ if (!err_cqe) {
+ *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ return;
+ }
+
+ status = CQE_STATUS(err_cqe);
+ opcode = CQE_OPCODE(err_cqe);
+ rqtype = RQ_TYPE(err_cqe);
+ send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
+ (opcode == FW_RI_SEND_WITH_SE_INV);
+ tagged = (opcode == FW_RI_RDMA_WRITE) ||
+ (rqtype && (opcode == FW_RI_READ_RESP));
+
+ switch (status) {
+ case T4_ERR_STAG:
+ if (send_inv) {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_CANT_INV_STAG;
+ } else {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_INV_STAG;
+ }
+ break;
+ case T4_ERR_PDID:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ if ((opcode == FW_RI_SEND_WITH_INV) ||
+ (opcode == FW_RI_SEND_WITH_SE_INV))
+ *ecode = RDMAP_CANT_INV_STAG;
+ else
+ *ecode = RDMAP_STAG_NOT_ASSOC;
+ break;
+ case T4_ERR_QPID:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_STAG_NOT_ASSOC;
+ break;
+ case T4_ERR_ACCESS:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_ACC_VIOL;
+ break;
+ case T4_ERR_WRAP:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_TO_WRAP;
+ break;
+ case T4_ERR_BOUND:
+ if (tagged) {
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_BASE_BOUNDS;
+ } else {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_BASE_BOUNDS;
+ }
+ break;
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_CANT_INV_STAG;
+ break;
+ case T4_ERR_ECC:
+ case T4_ERR_ECC_PSTAG:
+ case T4_ERR_INTERNAL_ERR:
+ *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ case T4_ERR_OUT_OF_RQE:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MSN_NOBUF;
+ break;
+ case T4_ERR_PBL_ADDR_BOUND:
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_BASE_BOUNDS;
+ break;
+ case T4_ERR_CRC:
+ *layer_type = LAYER_MPA|DDP_LLP;
+ *ecode = MPA_CRC_ERR;
+ break;
+ case T4_ERR_MARKER:
+ *layer_type = LAYER_MPA|DDP_LLP;
+ *ecode = MPA_MARKER_ERR;
+ break;
+ case T4_ERR_PDU_LEN_ERR:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_MSG_TOOBIG;
+ break;
+ case T4_ERR_DDP_VERSION:
+ if (tagged) {
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_INV_VERS;
+ } else {
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_VERS;
+ }
+ break;
+ case T4_ERR_RDMA_VERSION:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_INV_VERS;
+ break;
+ case T4_ERR_OPCODE:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_INV_OPCODE;
+ break;
+ case T4_ERR_DDP_QUEUE_NUM:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_QN;
+ break;
+ case T4_ERR_MSN:
+ case T4_ERR_MSN_GAP:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_IRD_OVERFLOW:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MSN_RANGE;
+ break;
+ case T4_ERR_TBIT:
+ *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ case T4_ERR_MO:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MO;
+ break;
+ default:
+ *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ }
+}
+
+int c4iw_post_zb_read(struct c4iw_qp *qhp)
+{
+ union t4_wr *wqe;
+ struct sk_buff *skb;
+ u8 len16;
+
+ PDBG("%s enter\n", __func__);
+ skb = alloc_skb(40, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
+ memset(wqe, 0, sizeof wqe->read);
+ wqe->read.r2 = cpu_to_be64(0);
+ wqe->read.stag_sink = cpu_to_be32(1);
+ wqe->read.to_sink_hi = cpu_to_be32(0);
+ wqe->read.to_sink_lo = cpu_to_be32(1);
+ wqe->read.stag_src = cpu_to_be32(1);
+ wqe->read.plen = cpu_to_be32(0);
+ wqe->read.to_src_hi = cpu_to_be32(0);
+ wqe->read.to_src_lo = cpu_to_be32(1);
+ len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
+ init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
+
+ return c4iw_ofld_send(&qhp->rhp->rdev, skb);
+}
+
+static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
+ gfp_t gfp)
+{
+ struct fw_ri_wr *wqe;
+ struct sk_buff *skb;
+ struct terminate_message *term;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, gfp);
+ if (!skb)
+ return;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+
+ wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
+ wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
+ term = (struct terminate_message *)wqe->u.terminate.termmsg;
+ build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
+ c4iw_ofld_send(&qhp->rhp->rdev, skb);
+}
+
+/*
+ * Assumes qhp lock is held.
+ */
+static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+ struct c4iw_cq *schp, unsigned long *flag)
+{
+ int count;
+ int flushed;
+
+ PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
+ /* take a ref on the qhp since we must release the lock */
+ atomic_inc(&qhp->refcnt);
+ spin_unlock_irqrestore(&qhp->lock, *flag);
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&rchp->lock, *flag);
+ spin_lock(&qhp->lock);
+ c4iw_flush_hw_cq(&rchp->cq);
+ c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
+ flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&rchp->lock, *flag);
+ if (flushed)
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&schp->lock, *flag);
+ spin_lock(&qhp->lock);
+ c4iw_flush_hw_cq(&schp->cq);
+ c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
+ flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&schp->lock, *flag);
+ if (flushed)
+ (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+
+ /* deref */
+ if (atomic_dec_and_test(&qhp->refcnt))
+ wake_up(&qhp->wait);
+
+ spin_lock_irqsave(&qhp->lock, *flag);
+}
+
+static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
+{
+ struct c4iw_cq *rchp, *schp;
+
+ rchp = get_chp(qhp->rhp, qhp->attr.rcq);
+ schp = get_chp(qhp->rhp, qhp->attr.scq);
+
+ if (qhp->ibqp.uobject) {
+ t4_set_wq_in_error(&qhp->wq);
+ t4_set_cq_in_error(&rchp->cq);
+ if (schp != rchp)
+ t4_set_cq_in_error(&schp->cq);
+ return;
+ }
+ __flush_qp(qhp, rchp, schp, flag);
+}
+
+static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+{
+ struct fw_ri_wr *wqe;
+ int ret;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(
+ FW_WR_OP(FW_RI_INIT_WR) |
+ FW_WR_COMPL(1));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ wqe->cookie = (u64)&wr_wait;
+
+ wqe->u.fini.type = FW_RI_TYPE_FINI;
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(&rhp->rdev, skb);
+ if (ret)
+ goto out;
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rhp->rdev.lldi.pdev));
+ rhp->rdev.flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else {
+ ret = wr_wait.ret;
+ if (ret)
+ printk(KERN_WARNING MOD
+ "%s: Abnormal close qpid %d ret %u\n",
+ pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
+ ret);
+ }
+out:
+ PDBG("%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
+{
+ memset(&init->u, 0, sizeof init->u);
+ switch (p2p_type) {
+ case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
+ init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
+ init->u.write.stag_sink = cpu_to_be32(1);
+ init->u.write.to_sink = cpu_to_be64(1);
+ init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
+ sizeof(struct fw_ri_immd),
+ 16);
+ break;
+ case FW_RI_INIT_P2PTYPE_READ_REQ:
+ init->u.write.opcode = FW_RI_RDMA_READ_WR;
+ init->u.read.stag_src = cpu_to_be32(1);
+ init->u.read.to_src_lo = cpu_to_be32(1);
+ init->u.read.stag_sink = cpu_to_be32(1);
+ init->u.read.to_sink_lo = cpu_to_be32(1);
+ init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
+ break;
+ }
+}
+
+static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+{
+ struct fw_ri_wr *wqe;
+ int ret;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(
+ FW_WR_OP(FW_RI_INIT_WR) |
+ FW_WR_COMPL(1));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+
+ wqe->cookie = (u64)&wr_wait;
+
+ wqe->u.init.type = FW_RI_TYPE_INIT;
+ wqe->u.init.mpareqbit_p2ptype =
+ V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
+ V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
+ wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
+ if (qhp->attr.mpa_attr.recv_marker_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
+ if (qhp->attr.mpa_attr.xmit_marker_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
+ if (qhp->attr.mpa_attr.crc_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
+
+ wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
+ FW_RI_QP_RDMA_WRITE_ENABLE |
+ FW_RI_QP_BIND_ENABLE;
+ if (!qhp->ibqp.uobject)
+ wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
+ FW_RI_QP_STAG0_ENABLE;
+ wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
+ wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
+ wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
+ wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
+ wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
+ wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
+ wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
+ wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
+ wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
+ wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
+ wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
+ wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
+ rhp->rdev.lldi.vr->rq.start);
+ if (qhp->attr.mpa_attr.initiator)
+ build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
+
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(&rhp->rdev, skb);
+ if (ret)
+ goto out;
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rhp->rdev.lldi.pdev));
+ rhp->rdev.flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+out:
+ PDBG("%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
+ enum c4iw_qp_attr_mask mask,
+ struct c4iw_qp_attributes *attrs,
+ int internal)
+{
+ int ret = 0;
+ struct c4iw_qp_attributes newattr = qhp->attr;
+ unsigned long flag;
+ int disconnect = 0;
+ int terminate = 0;
+ int abort = 0;
+ int free = 0;
+ struct c4iw_ep *ep = NULL;
+
+ PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
+ qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
+ (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
+
+ spin_lock_irqsave(&qhp->lock, flag);
+
+ /* Process attr changes if in IDLE */
+ if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
+ if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
+ ret = -EIO;
+ goto out;
+ }
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
+ newattr.enable_rdma_read = attrs->enable_rdma_read;
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
+ newattr.enable_rdma_write = attrs->enable_rdma_write;
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
+ newattr.enable_bind = attrs->enable_bind;
+ if (mask & C4IW_QP_ATTR_MAX_ORD) {
+ if (attrs->max_ord > c4iw_max_read_depth) {
+ ret = -EINVAL;
+ goto out;
+ }
+ newattr.max_ord = attrs->max_ord;
+ }
+ if (mask & C4IW_QP_ATTR_MAX_IRD) {
+ if (attrs->max_ird > c4iw_max_read_depth) {
+ ret = -EINVAL;
+ goto out;
+ }
+ newattr.max_ird = attrs->max_ird;
+ }
+ qhp->attr = newattr;
+ }
+
+ if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
+ goto out;
+ if (qhp->attr.state == attrs->next_state)
+ goto out;
+
+ switch (qhp->attr.state) {
+ case C4IW_QP_STATE_IDLE:
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_RTS:
+ if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ qhp->attr.mpa_attr = attrs->mpa_attr;
+ qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
+ qhp->ep = qhp->attr.llp_stream_handle;
+ qhp->attr.state = C4IW_QP_STATE_RTS;
+
+ /*
+ * Ref the endpoint here and deref when we
+ * disassociate the endpoint from the QP. This
+ * happens in CLOSING->IDLE transition or *->ERROR
+ * transition.
+ */
+ c4iw_get_ep(&qhp->ep->com);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ret = rdma_init(rhp, qhp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (ret)
+ goto err;
+ break;
+ case C4IW_QP_STATE_ERROR:
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ flush_qp(qhp, &flag);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case C4IW_QP_STATE_RTS:
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_CLOSING:
+ BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+ qhp->attr.state = C4IW_QP_STATE_CLOSING;
+ if (!internal) {
+ abort = 0;
+ disconnect = 1;
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ }
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ret = rdma_fini(rhp, qhp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (ret) {
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ disconnect = abort = 1;
+ goto err;
+ }
+ break;
+ case C4IW_QP_STATE_TERMINATE:
+ qhp->attr.state = C4IW_QP_STATE_TERMINATE;
+ if (qhp->ibqp.uobject)
+ t4_set_wq_in_error(&qhp->wq);
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ terminate = 1;
+ disconnect = 1;
+ break;
+ case C4IW_QP_STATE_ERROR:
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ if (!internal) {
+ abort = 1;
+ disconnect = 1;
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ }
+ goto err;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case C4IW_QP_STATE_CLOSING:
+ if (!internal) {
+ ret = -EINVAL;
+ goto out;
+ }
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_IDLE:
+ flush_qp(qhp, &flag);
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ qhp->attr.llp_stream_handle = NULL;
+ c4iw_put_ep(&qhp->ep->com);
+ qhp->ep = NULL;
+ wake_up(&qhp->wait);
+ break;
+ case C4IW_QP_STATE_ERROR:
+ goto err;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case C4IW_QP_STATE_ERROR:
+ if (attrs->next_state != C4IW_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ break;
+ case C4IW_QP_STATE_TERMINATE:
+ if (!internal) {
+ ret = -EINVAL;
+ goto out;
+ }
+ goto err;
+ break;
+ default:
+ printk(KERN_ERR "%s in a bad state %d\n",
+ __func__, qhp->attr.state);
+ ret = -EINVAL;
+ goto err;
+ break;
+ }
+ goto out;
+err:
+ PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
+ qhp->wq.sq.qid);
+
+ /* disassociate the LLP connection */
+ qhp->attr.llp_stream_handle = NULL;
+ ep = qhp->ep;
+ qhp->ep = NULL;
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ free = 1;
+ wake_up(&qhp->wait);
+ BUG_ON(!ep);
+ flush_qp(qhp, &flag);
+out:
+ spin_unlock_irqrestore(&qhp->lock, flag);
+
+ if (terminate)
+ post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
+
+ /*
+ * If disconnect is 1, then we need to initiate a disconnect
+ * on the EP. This can be a normal close (RTS->CLOSING) or
+ * an abnormal close (RTS/CLOSING->ERROR).
+ */
+ if (disconnect) {
+ c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
+ GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
+ }
+
+ /*
+ * If free is 1, then we've disassociated the EP from the QP
+ * and we need to dereference the EP.
+ */
+ if (free)
+ c4iw_put_ep(&ep->com);
+
+ PDBG("%s exit state %d\n", __func__, qhp->attr.state);
+ return ret;
+}
+
+int c4iw_destroy_qp(struct ib_qp *ib_qp)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ struct c4iw_qp_attributes attrs;
+ struct c4iw_ucontext *ucontext;
+
+ qhp = to_c4iw_qp(ib_qp);
+ rhp = qhp->rhp;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ wait_event(qhp->wait, !qhp->ep);
+
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
+ atomic_dec(&qhp->refcnt);
+ wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
+
+ ucontext = ib_qp->uobject ?
+ to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+ PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
+ kfree(qhp);
+ return 0;
+}
+
+struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ struct c4iw_pd *php;
+ struct c4iw_cq *schp;
+ struct c4iw_cq *rchp;
+ struct c4iw_create_qp_resp uresp;
+ int sqsize, rqsize;
+ struct c4iw_ucontext *ucontext;
+ int ret;
+ struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+
+ if (attrs->qp_type != IB_QPT_RC)
+ return ERR_PTR(-EINVAL);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
+ rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
+ if (!schp || !rchp)
+ return ERR_PTR(-EINVAL);
+
+ if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
+ return ERR_PTR(-EINVAL);
+
+ rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
+ if (rqsize > T4_MAX_RQ_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
+ if (sqsize > T4_MAX_SQ_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+
+
+ qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
+ if (!qhp)
+ return ERR_PTR(-ENOMEM);
+ qhp->wq.sq.size = sqsize;
+ qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
+ qhp->wq.rq.size = rqsize;
+ qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
+
+ if (ucontext) {
+ qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
+ qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
+ }
+
+ PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
+ __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
+
+ ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ if (ret)
+ goto err1;
+
+ attrs->cap.max_recv_wr = rqsize - 1;
+ attrs->cap.max_send_wr = sqsize - 1;
+ attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
+
+ qhp->rhp = rhp;
+ qhp->attr.pd = php->pdid;
+ qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
+ qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
+ qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
+ qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
+ qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
+ qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
+ qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ qhp->attr.next_state = C4IW_QP_STATE_IDLE;
+ qhp->attr.enable_rdma_read = 1;
+ qhp->attr.enable_rdma_write = 1;
+ qhp->attr.enable_bind = 1;
+ qhp->attr.max_ord = 1;
+ qhp->attr.max_ird = 1;
+ spin_lock_init(&qhp->lock);
+ init_waitqueue_head(&qhp->wait);
+ atomic_set(&qhp->refcnt, 1);
+
+ ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+ if (ret)
+ goto err2;
+
+ ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
+ if (ret)
+ goto err3;
+
+ if (udata) {
+ mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
+ if (!mm1) {
+ ret = -ENOMEM;
+ goto err4;
+ }
+ mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+ if (!mm2) {
+ ret = -ENOMEM;
+ goto err5;
+ }
+ mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
+ if (!mm3) {
+ ret = -ENOMEM;
+ goto err6;
+ }
+ mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
+ if (!mm4) {
+ ret = -ENOMEM;
+ goto err7;
+ }
+
+ uresp.qid_mask = rhp->rdev.qpmask;
+ uresp.sqid = qhp->wq.sq.qid;
+ uresp.sq_size = qhp->wq.sq.size;
+ uresp.sq_memsize = qhp->wq.sq.memsize;
+ uresp.rqid = qhp->wq.rq.qid;
+ uresp.rq_size = qhp->wq.rq.size;
+ uresp.rq_memsize = qhp->wq.rq.memsize;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.sq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.rq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.sq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.rq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ if (ret)
+ goto err8;
+ mm1->key = uresp.sq_key;
+ mm1->addr = virt_to_phys(qhp->wq.sq.queue);
+ mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+ insert_mmap(ucontext, mm1);
+ mm2->key = uresp.rq_key;
+ mm2->addr = virt_to_phys(qhp->wq.rq.queue);
+ mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_mmap(ucontext, mm2);
+ mm3->key = uresp.sq_db_gts_key;
+ mm3->addr = qhp->wq.sq.udb;
+ mm3->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm3);
+ mm4->key = uresp.rq_db_gts_key;
+ mm4->addr = qhp->wq.rq.udb;
+ mm4->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm4);
+ }
+ qhp->ibqp.qp_num = qhp->wq.sq.qid;
+ init_timer(&(qhp->timer));
+ PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
+ __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
+ qhp->wq.sq.qid);
+ return &qhp->ibqp;
+err8:
+ kfree(mm4);
+err7:
+ kfree(mm3);
+err6:
+ kfree(mm2);
+err5:
+ kfree(mm1);
+err4:
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
+err3:
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+err2:
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+err1:
+ kfree(qhp);
+ return ERR_PTR(ret);
+}
+
+int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ enum c4iw_qp_attr_mask mask = 0;
+ struct c4iw_qp_attributes attrs;
+
+ PDBG("%s ib_qp %p\n", __func__, ibqp);
+
+ /* iwarp does not support the RTR state */
+ if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
+ attr_mask &= ~IB_QP_STATE;
+
+ /* Make sure we still have something left to do */
+ if (!attr_mask)
+ return 0;
+
+ memset(&attrs, 0, sizeof attrs);
+ qhp = to_c4iw_qp(ibqp);
+ rhp = qhp->rhp;
+
+ attrs.next_state = c4iw_convert_state(attr->qp_state);
+ attrs.enable_rdma_read = (attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ attrs.enable_rdma_write = (attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
+
+
+ mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
+ mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
+ (C4IW_QP_ATTR_ENABLE_RDMA_READ |
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
+ C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
+
+ return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
+}
+
+struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
+{
+ PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
+ return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
+}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
new file mode 100644
index 000000000000..fb195d1d9015
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* Crude resource management */
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include "iw_cxgb4.h"
+
+#define RANDOM_SIZE 16
+
+static int __c4iw_init_resource_fifo(struct kfifo *fifo,
+ spinlock_t *fifo_lock,
+ u32 nr, u32 skip_low,
+ u32 skip_high,
+ int random)
+{
+ u32 i, j, entry = 0, idx;
+ u32 random_bytes;
+ u32 rarray[16];
+ spin_lock_init(fifo_lock);
+
+ if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = 0; i < skip_low + skip_high; i++)
+ kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
+ if (random) {
+ j = 0;
+ random_bytes = random32();
+ for (i = 0; i < RANDOM_SIZE; i++)
+ rarray[i] = i + skip_low;
+ for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
+ if (j >= RANDOM_SIZE) {
+ j = 0;
+ random_bytes = random32();
+ }
+ idx = (random_bytes >> (j * 2)) & 0xF;
+ kfifo_in(fifo,
+ (unsigned char *) &rarray[idx],
+ sizeof(u32));
+ rarray[idx] = i;
+ j++;
+ }
+ for (i = 0; i < RANDOM_SIZE; i++)
+ kfifo_in(fifo,
+ (unsigned char *) &rarray[i],
+ sizeof(u32));
+ } else
+ for (i = skip_low; i < nr - skip_high; i++)
+ kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
+
+ for (i = 0; i < skip_low + skip_high; i++)
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry,
+ sizeof(u32), fifo_lock))
+ break;
+ return 0;
+}
+
+static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
+ u32 nr, u32 skip_low, u32 skip_high)
+{
+ return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
+ skip_high, 0);
+}
+
+static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
+ spinlock_t *fifo_lock,
+ u32 nr, u32 skip_low, u32 skip_high)
+{
+ return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
+ skip_high, 1);
+}
+
+static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
+{
+ u32 i;
+
+ spin_lock_init(&rdev->resource.qid_fifo_lock);
+
+ if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
+ GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
+ if (!(i & rdev->qpmask))
+ kfifo_in(&rdev->resource.qid_fifo,
+ (unsigned char *) &i, sizeof(u32));
+ return 0;
+}
+
+/* nr_* must be power of 2 */
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+{
+ int err = 0;
+ err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
+ &rdev->resource.tpt_fifo_lock,
+ nr_tpt, 1, 0);
+ if (err)
+ goto tpt_err;
+ err = c4iw_init_qid_fifo(rdev);
+ if (err)
+ goto qid_err;
+ err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
+ &rdev->resource.pdid_fifo_lock,
+ nr_pdid, 1, 0);
+ if (err)
+ goto pdid_err;
+ return 0;
+pdid_err:
+ kfifo_free(&rdev->resource.qid_fifo);
+qid_err:
+ kfifo_free(&rdev->resource.tpt_fifo);
+tpt_err:
+ return -ENOMEM;
+}
+
+/*
+ * returns 0 if no resource available
+ */
+u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
+{
+ u32 entry;
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
+ return entry;
+ else
+ return 0;
+}
+
+void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
+{
+ PDBG("%s entry 0x%x\n", __func__, entry);
+ kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
+}
+
+u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+ u32 qid;
+ int i;
+
+ mutex_lock(&uctx->lock);
+ if (!list_empty(&uctx->cqids)) {
+ entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
+ entry);
+ list_del(&entry->entry);
+ qid = entry->qid;
+ kfree(entry);
+ } else {
+ qid = c4iw_get_resource(&rdev->resource.qid_fifo,
+ &rdev->resource.qid_fifo_lock);
+ if (!qid)
+ goto out;
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ }
+
+ /*
+ * now put the same ids on the qp list since they all
+ * map to the same db/gts page.
+ */
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = qid;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ }
+ }
+out:
+ mutex_unlock(&uctx->lock);
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ return qid;
+}
+
+void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ return;
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ entry->qid = qid;
+ mutex_lock(&uctx->lock);
+ list_add_tail(&entry->entry, &uctx->cqids);
+ mutex_unlock(&uctx->lock);
+}
+
+u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+ u32 qid;
+ int i;
+
+ mutex_lock(&uctx->lock);
+ if (!list_empty(&uctx->qpids)) {
+ entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
+ entry);
+ list_del(&entry->entry);
+ qid = entry->qid;
+ kfree(entry);
+ } else {
+ qid = c4iw_get_resource(&rdev->resource.qid_fifo,
+ &rdev->resource.qid_fifo_lock);
+ if (!qid)
+ goto out;
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ }
+
+ /*
+ * now put the same ids on the cq list since they all
+ * map to the same db/gts page.
+ */
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = qid;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ for (i = qid; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ }
+ }
+out:
+ mutex_unlock(&uctx->lock);
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ return qid;
+}
+
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ return;
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ entry->qid = qid;
+ mutex_lock(&uctx->lock);
+ list_add_tail(&entry->entry, &uctx->qpids);
+ mutex_unlock(&uctx->lock);
+}
+
+void c4iw_destroy_resource(struct c4iw_resource *rscp)
+{
+ kfifo_free(&rscp->tpt_fifo);
+ kfifo_free(&rscp->qid_fifo);
+ kfifo_free(&rscp->pdid_fifo);
+}
+
+/*
+ * PBL Memory Manager. Uses Linux generic allocator.
+ */
+
+#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
+
+u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
+{
+ unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
+ PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ return (u32)addr;
+}
+
+void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+ PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+}
+
+int c4iw_pblpool_create(struct c4iw_rdev *rdev)
+{
+ unsigned pbl_start, pbl_chunk, pbl_top;
+
+ rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
+ if (!rdev->pbl_pool)
+ return -ENOMEM;
+
+ pbl_start = rdev->lldi.vr->pbl.start;
+ pbl_chunk = rdev->lldi.vr->pbl.size;
+ pbl_top = pbl_start + pbl_chunk;
+
+ while (pbl_start < pbl_top) {
+ pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
+ if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
+ PDBG("%s failed to add PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
+ if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
+ printk(KERN_WARNING MOD
+ "Failed to add all PBL chunks (%x/%x)\n",
+ pbl_start,
+ pbl_top - pbl_start);
+ return 0;
+ }
+ pbl_chunk >>= 1;
+ } else {
+ PDBG("%s added PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
+ pbl_start += pbl_chunk;
+ }
+ }
+
+ return 0;
+}
+
+void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
+{
+ gen_pool_destroy(rdev->pbl_pool);
+}
+
+/*
+ * RQT Memory Manager. Uses Linux generic allocator.
+ */
+
+#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
+
+u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
+{
+ unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
+ PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ return (u32)addr;
+}
+
+void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+ PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+ gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+}
+
+int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
+{
+ unsigned rqt_start, rqt_chunk, rqt_top;
+
+ rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
+ if (!rdev->rqt_pool)
+ return -ENOMEM;
+
+ rqt_start = rdev->lldi.vr->rq.start;
+ rqt_chunk = rdev->lldi.vr->rq.size;
+ rqt_top = rqt_start + rqt_chunk;
+
+ while (rqt_start < rqt_top) {
+ rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
+ if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
+ PDBG("%s failed to add RQT chunk (%x/%x)\n",
+ __func__, rqt_start, rqt_chunk);
+ if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
+ printk(KERN_WARNING MOD
+ "Failed to add all RQT chunks (%x/%x)\n",
+ rqt_start, rqt_top - rqt_start);
+ return 0;
+ }
+ rqt_chunk >>= 1;
+ } else {
+ PDBG("%s added RQT chunk (%x/%x)\n",
+ __func__, rqt_start, rqt_chunk);
+ rqt_start += rqt_chunk;
+ }
+ }
+ return 0;
+}
+
+void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
+{
+ gen_pool_destroy(rdev->rqt_pool);
+}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
new file mode 100644
index 000000000000..d0e8af352408
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -0,0 +1,550 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __T4_H__
+#define __T4_H__
+
+#include "t4_hw.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_ri_api.h"
+
+#define T4_QID_BASE 1024
+#define T4_MAX_QIDS 256
+#define T4_MAX_NUM_QP (1<<16)
+#define T4_MAX_NUM_CQ (1<<15)
+#define T4_MAX_NUM_PD (1<<15)
+#define T4_MAX_PBL_SIZE 256
+#define T4_MAX_RQ_SIZE 1024
+#define T4_MAX_SQ_SIZE 1024
+#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1)
+#define T4_MAX_CQ_DEPTH 8192
+#define T4_MAX_NUM_STAG (1<<15)
+#define T4_MAX_MR_SIZE (~0ULL - 1)
+#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
+#define T4_STAG_UNSET 0xffffffff
+#define T4_FW_MAJ 0
+#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+
+struct t4_status_page {
+ __be32 rsvd1; /* flit 0 - hw owns */
+ __be16 rsvd2;
+ __be16 qid;
+ __be16 cidx;
+ __be16 pidx;
+ u8 qp_err; /* flit 1 - sw owns */
+ u8 db_off;
+};
+
+#define T4_EQ_SIZE 64
+
+#define T4_SQ_NUM_SLOTS 4
+#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
+#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
+ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
+ sizeof(struct fw_ri_rdma_write_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
+ sizeof(struct fw_ri_rdma_write_wr) - \
+ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_FR_DEPTH 255
+
+#define T4_RQ_NUM_SLOTS 2
+#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
+#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \
+ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+
+union t4_wr {
+ struct fw_ri_res_wr res;
+ struct fw_ri_wr ri;
+ struct fw_ri_rdma_write_wr write;
+ struct fw_ri_send_wr send;
+ struct fw_ri_rdma_read_wr read;
+ struct fw_ri_bind_mw_wr bind;
+ struct fw_ri_fr_nsmr_wr fr;
+ struct fw_ri_inv_lstag_wr inv;
+ struct t4_status_page status;
+ __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
+};
+
+union t4_recv_wr {
+ struct fw_ri_recv_wr recv;
+ struct t4_status_page status;
+ __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
+};
+
+static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
+ enum fw_wr_opcodes opcode, u8 flags, u8 len16)
+{
+ int slots_used;
+
+ wqe->send.opcode = (u8)opcode;
+ wqe->send.flags = flags;
+ wqe->send.wrid = wrid;
+ wqe->send.r1[0] = 0;
+ wqe->send.r1[1] = 0;
+ wqe->send.r1[2] = 0;
+ wqe->send.len16 = len16;
+
+ slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
+ while (slots_used < T4_SQ_NUM_SLOTS) {
+ wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
+ slots_used++;
+ }
+}
+
+/* CQE/AE status codes */
+#define T4_ERR_SUCCESS 0x0
+#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
+ /* STAG is offlimt, being 0, */
+ /* or STAG_key mismatch */
+#define T4_ERR_PDID 0x2 /* PDID mismatch */
+#define T4_ERR_QPID 0x3 /* QPID mismatch */
+#define T4_ERR_ACCESS 0x4 /* Invalid access right */
+#define T4_ERR_WRAP 0x5 /* Wrap error */
+#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
+#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
+ /* shared memory region */
+#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
+ /* shared memory region */
+#define T4_ERR_ECC 0x9 /* ECC error detected */
+#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
+ /* reading PSTAG for a MW */
+ /* Invalidate */
+#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
+ /* software error */
+#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
+#define T4_ERR_CRC 0x10 /* CRC error */
+#define T4_ERR_MARKER 0x11 /* Marker error */
+#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
+#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
+#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
+#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
+#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
+#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
+#define T4_ERR_MSN 0x18 /* MSN error */
+#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
+#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
+ /* or READ_REQ */
+#define T4_ERR_MSN_GAP 0x1B
+#define T4_ERR_MSN_RANGE 0x1C
+#define T4_ERR_IRD_OVERFLOW 0x1D
+#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
+ /* software error */
+#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
+ /* mismatch) */
+/*
+ * CQE defs
+ */
+struct t4_cqe {
+ __be32 header;
+ __be32 len;
+ union {
+ struct {
+ __be32 stag;
+ __be32 msn;
+ } rcqe;
+ struct {
+ u32 nada1;
+ u16 nada2;
+ u16 cidx;
+ } scqe;
+ struct {
+ __be32 wrid_hi;
+ __be32 wrid_low;
+ } gen;
+ } u;
+ __be64 reserved;
+ __be64 bits_type_ts;
+};
+
+/* macros for flit 0 of the cqe */
+
+#define S_CQE_QPID 12
+#define M_CQE_QPID 0xFFFFF
+#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
+#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
+
+#define S_CQE_SWCQE 11
+#define M_CQE_SWCQE 0x1
+#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
+#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
+
+#define S_CQE_STATUS 5
+#define M_CQE_STATUS 0x1F
+#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
+#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
+
+#define S_CQE_TYPE 4
+#define M_CQE_TYPE 0x1
+#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
+#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
+
+#define S_CQE_OPCODE 0
+#define M_CQE_OPCODE 0xF
+#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
+#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
+
+#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
+#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
+#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
+#define SQ_TYPE(x) (CQE_TYPE((x)))
+#define RQ_TYPE(x) (!CQE_TYPE((x)))
+#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
+#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
+
+#define CQE_SEND_OPCODE(x)( \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
+
+#define CQE_LEN(x) (be32_to_cpu((x)->len))
+
+/* used for RQ completion processing */
+#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
+#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
+
+/* used for SQ completion processing */
+#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
+
+/* generic accessor macros */
+#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
+#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
+
+/* macros for flit 3 of the cqe */
+#define S_CQE_GENBIT 63
+#define M_CQE_GENBIT 0x1
+#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
+#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
+
+#define S_CQE_OVFBIT 62
+#define M_CQE_OVFBIT 0x1
+#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
+
+#define S_CQE_IQTYPE 60
+#define M_CQE_IQTYPE 0x3
+#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
+
+#define M_CQE_TS 0x0fffffffffffffffULL
+#define G_CQE_TS(x) ((x) & M_CQE_TS)
+
+#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
+
+struct t4_swsqe {
+ u64 wr_id;
+ struct t4_cqe cqe;
+ int read_len;
+ int opcode;
+ int complete;
+ int signaled;
+ u16 idx;
+};
+
+struct t4_sq {
+ union t4_wr *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_swsqe *sw_sq;
+ struct t4_swsqe *oldest_read;
+ u64 udb;
+ size_t memsize;
+ u32 qid;
+ u16 in_use;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+};
+
+struct t4_swrqe {
+ u64 wr_id;
+};
+
+struct t4_rq {
+ union t4_recv_wr *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_swrqe *sw_rq;
+ u64 udb;
+ size_t memsize;
+ u32 qid;
+ u32 msn;
+ u32 rqt_hwaddr;
+ u16 rqt_size;
+ u16 in_use;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+};
+
+struct t4_wq {
+ struct t4_sq sq;
+ struct t4_rq rq;
+ void __iomem *db;
+ void __iomem *gts;
+ struct c4iw_rdev *rdev;
+};
+
+static inline int t4_rqes_posted(struct t4_wq *wq)
+{
+ return wq->rq.in_use;
+}
+
+static inline int t4_rq_empty(struct t4_wq *wq)
+{
+ return wq->rq.in_use == 0;
+}
+
+static inline int t4_rq_full(struct t4_wq *wq)
+{
+ return wq->rq.in_use == (wq->rq.size - 1);
+}
+
+static inline u32 t4_rq_avail(struct t4_wq *wq)
+{
+ return wq->rq.size - 1 - wq->rq.in_use;
+}
+
+static inline void t4_rq_produce(struct t4_wq *wq)
+{
+ wq->rq.in_use++;
+ if (++wq->rq.pidx == wq->rq.size)
+ wq->rq.pidx = 0;
+}
+
+static inline void t4_rq_consume(struct t4_wq *wq)
+{
+ wq->rq.in_use--;
+ wq->rq.msn++;
+ if (++wq->rq.cidx == wq->rq.size)
+ wq->rq.cidx = 0;
+}
+
+static inline int t4_sq_empty(struct t4_wq *wq)
+{
+ return wq->sq.in_use == 0;
+}
+
+static inline int t4_sq_full(struct t4_wq *wq)
+{
+ return wq->sq.in_use == (wq->sq.size - 1);
+}
+
+static inline u32 t4_sq_avail(struct t4_wq *wq)
+{
+ return wq->sq.size - 1 - wq->sq.in_use;
+}
+
+static inline void t4_sq_produce(struct t4_wq *wq)
+{
+ wq->sq.in_use++;
+ if (++wq->sq.pidx == wq->sq.size)
+ wq->sq.pidx = 0;
+}
+
+static inline void t4_sq_consume(struct t4_wq *wq)
+{
+ wq->sq.in_use--;
+ if (++wq->sq.cidx == wq->sq.size)
+ wq->sq.cidx = 0;
+}
+
+static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
+{
+ inc *= T4_SQ_NUM_SLOTS;
+ wmb();
+ writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+}
+
+static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
+{
+ inc *= T4_RQ_NUM_SLOTS;
+ wmb();
+ writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+}
+
+static inline int t4_wq_in_error(struct t4_wq *wq)
+{
+ return wq->sq.queue[wq->sq.size].status.qp_err;
+}
+
+static inline void t4_set_wq_in_error(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.qp_err = 1;
+ wq->rq.queue[wq->rq.size].status.qp_err = 1;
+}
+
+static inline void t4_disable_wq_db(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.db_off = 1;
+ wq->rq.queue[wq->rq.size].status.db_off = 1;
+}
+
+static inline void t4_enable_wq_db(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.db_off = 0;
+ wq->rq.queue[wq->rq.size].status.db_off = 0;
+}
+
+static inline int t4_wq_db_enabled(struct t4_wq *wq)
+{
+ return !wq->sq.queue[wq->sq.size].status.db_off;
+}
+
+struct t4_cq {
+ struct t4_cqe *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_cqe *sw_queue;
+ void __iomem *gts;
+ struct c4iw_rdev *rdev;
+ u64 ugts;
+ size_t memsize;
+ u64 timestamp;
+ u32 cqid;
+ u16 size; /* including status page */
+ u16 cidx;
+ u16 sw_pidx;
+ u16 sw_cidx;
+ u16 sw_in_use;
+ u16 cidx_inc;
+ u8 gen;
+ u8 error;
+};
+
+static inline int t4_arm_cq(struct t4_cq *cq, int se)
+{
+ u32 val;
+ u16 inc;
+
+ do {
+ /*
+ * inc must be less the both the max update value -and-
+ * the size of the CQ.
+ */
+ inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
+ CIDXINC_MASK;
+ inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
+ if (inc == cq->cidx_inc)
+ val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
+ INGRESSQID(cq->cqid);
+ else
+ val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
+ INGRESSQID(cq->cqid);
+ cq->cidx_inc -= inc;
+ writel(val, cq->gts);
+ } while (cq->cidx_inc);
+ return 0;
+}
+
+static inline void t4_swcq_produce(struct t4_cq *cq)
+{
+ cq->sw_in_use++;
+ if (++cq->sw_pidx == cq->size)
+ cq->sw_pidx = 0;
+}
+
+static inline void t4_swcq_consume(struct t4_cq *cq)
+{
+ cq->sw_in_use--;
+ if (++cq->sw_cidx == cq->size)
+ cq->sw_cidx = 0;
+}
+
+static inline void t4_hwcq_consume(struct t4_cq *cq)
+{
+ cq->cidx_inc++;
+ if (++cq->cidx == cq->size) {
+ cq->cidx = 0;
+ cq->gen ^= 1;
+ }
+}
+
+static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
+{
+ return (CQE_GENBIT(cqe) == cq->gen);
+}
+
+static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
+{
+ int ret = 0;
+ u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
+
+ if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
+ *cqe = &cq->queue[cq->cidx];
+ cq->timestamp = G_CQE_TS(bits_type_ts);
+ } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
+ ret = -EOVERFLOW;
+ else
+ ret = -ENODATA;
+ if (ret == -EOVERFLOW) {
+ printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+ cq->error = 1;
+ }
+ return ret;
+}
+
+static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
+{
+ if (cq->sw_in_use)
+ return &cq->sw_queue[cq->sw_cidx];
+ return NULL;
+}
+
+static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
+{
+ int ret = 0;
+
+ if (cq->error)
+ ret = -ENODATA;
+ else if (cq->sw_in_use)
+ *cqe = &cq->sw_queue[cq->sw_cidx];
+ else
+ ret = t4_next_hw_cqe(cq, cqe);
+ return ret;
+}
+
+static inline int t4_cq_in_error(struct t4_cq *cq)
+{
+ return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
+}
+
+static inline void t4_set_cq_in_error(struct t4_cq *cq)
+{
+ ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+}
+#endif
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
new file mode 100644
index 000000000000..fc706bd07fae
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -0,0 +1,829 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _T4FW_RI_API_H_
+#define _T4FW_RI_API_H_
+
+#include "t4fw_api.h"
+
+enum fw_ri_wr_opcode {
+ FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
+ FW_RI_READ_REQ = 0x1,
+ FW_RI_READ_RESP = 0x2,
+ FW_RI_SEND = 0x3,
+ FW_RI_SEND_WITH_INV = 0x4,
+ FW_RI_SEND_WITH_SE = 0x5,
+ FW_RI_SEND_WITH_SE_INV = 0x6,
+ FW_RI_TERMINATE = 0x7,
+ FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
+ FW_RI_BIND_MW = 0x9,
+ FW_RI_FAST_REGISTER = 0xa,
+ FW_RI_LOCAL_INV = 0xb,
+ FW_RI_QP_MODIFY = 0xc,
+ FW_RI_BYPASS = 0xd,
+ FW_RI_RECEIVE = 0xe,
+
+ FW_RI_SGE_EC_CR_RETURN = 0xf
+};
+
+enum fw_ri_wr_flags {
+ FW_RI_COMPLETION_FLAG = 0x01,
+ FW_RI_NOTIFICATION_FLAG = 0x02,
+ FW_RI_SOLICITED_EVENT_FLAG = 0x04,
+ FW_RI_READ_FENCE_FLAG = 0x08,
+ FW_RI_LOCAL_FENCE_FLAG = 0x10,
+ FW_RI_RDMA_READ_INVALIDATE = 0x20
+};
+
+enum fw_ri_mpa_attrs {
+ FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
+ FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
+ FW_RI_MPA_CRC_ENABLE = 0x04,
+ FW_RI_MPA_IETF_ENABLE = 0x08
+};
+
+enum fw_ri_qp_caps {
+ FW_RI_QP_RDMA_READ_ENABLE = 0x01,
+ FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
+ FW_RI_QP_BIND_ENABLE = 0x04,
+ FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
+ FW_RI_QP_STAG0_ENABLE = 0x10
+};
+
+enum fw_ri_addr_type {
+ FW_RI_ZERO_BASED_TO = 0x00,
+ FW_RI_VA_BASED_TO = 0x01
+};
+
+enum fw_ri_mem_perms {
+ FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
+ FW_RI_MEM_ACCESS_REM_READ = 0x02,
+ FW_RI_MEM_ACCESS_REM = 0x03,
+ FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
+ FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
+ FW_RI_MEM_ACCESS_LOCAL = 0x0C
+};
+
+enum fw_ri_stag_type {
+ FW_RI_STAG_NSMR = 0x00,
+ FW_RI_STAG_SMR = 0x01,
+ FW_RI_STAG_MW = 0x02,
+ FW_RI_STAG_MW_RELAXED = 0x03
+};
+
+enum fw_ri_data_op {
+ FW_RI_DATA_IMMD = 0x81,
+ FW_RI_DATA_DSGL = 0x82,
+ FW_RI_DATA_ISGL = 0x83
+};
+
+enum fw_ri_sgl_depth {
+ FW_RI_SGL_DEPTH_MAX_SQ = 16,
+ FW_RI_SGL_DEPTH_MAX_RQ = 4
+};
+
+struct fw_ri_dsge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct fw_ri_dsgl {
+ __u8 op;
+ __u8 r1;
+ __be16 nsge;
+ __be32 len0;
+ __be64 addr0;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_dsge_pair sge[0];
+#endif
+};
+
+struct fw_ri_sge {
+ __be32 stag;
+ __be32 len;
+ __be64 to;
+};
+
+struct fw_ri_isgl {
+ __u8 op;
+ __u8 r1;
+ __be16 nsge;
+ __be32 r2;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_sge sge[0];
+#endif
+};
+
+struct fw_ri_immd {
+ __u8 op;
+ __u8 r1;
+ __be16 r2;
+ __be32 immdlen;
+#ifndef C99_NOT_SUPPORTED
+ __u8 data[0];
+#endif
+};
+
+struct fw_ri_tpte {
+ __be32 valid_to_pdid;
+ __be32 locread_to_qpid;
+ __be32 nosnoop_pbladdr;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+ __be32 dca_mwbcnt_pstag;
+ __be32 len_hi;
+};
+
+#define S_FW_RI_TPTE_VALID 31
+#define M_FW_RI_TPTE_VALID 0x1
+#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
+#define G_FW_RI_TPTE_VALID(x) \
+ (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
+#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
+
+#define S_FW_RI_TPTE_STAGKEY 23
+#define M_FW_RI_TPTE_STAGKEY 0xff
+#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
+#define G_FW_RI_TPTE_STAGKEY(x) \
+ (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
+
+#define S_FW_RI_TPTE_STAGSTATE 22
+#define M_FW_RI_TPTE_STAGSTATE 0x1
+#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
+#define G_FW_RI_TPTE_STAGSTATE(x) \
+ (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
+#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
+
+#define S_FW_RI_TPTE_STAGTYPE 20
+#define M_FW_RI_TPTE_STAGTYPE 0x3
+#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
+#define G_FW_RI_TPTE_STAGTYPE(x) \
+ (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
+
+#define S_FW_RI_TPTE_PDID 0
+#define M_FW_RI_TPTE_PDID 0xfffff
+#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
+#define G_FW_RI_TPTE_PDID(x) \
+ (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
+
+#define S_FW_RI_TPTE_PERM 28
+#define M_FW_RI_TPTE_PERM 0xf
+#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
+#define G_FW_RI_TPTE_PERM(x) \
+ (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
+
+#define S_FW_RI_TPTE_REMINVDIS 27
+#define M_FW_RI_TPTE_REMINVDIS 0x1
+#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
+#define G_FW_RI_TPTE_REMINVDIS(x) \
+ (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
+#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
+
+#define S_FW_RI_TPTE_ADDRTYPE 26
+#define M_FW_RI_TPTE_ADDRTYPE 1
+#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
+#define G_FW_RI_TPTE_ADDRTYPE(x) \
+ (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
+#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
+
+#define S_FW_RI_TPTE_MWBINDEN 25
+#define M_FW_RI_TPTE_MWBINDEN 0x1
+#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
+#define G_FW_RI_TPTE_MWBINDEN(x) \
+ (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
+#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
+
+#define S_FW_RI_TPTE_PS 20
+#define M_FW_RI_TPTE_PS 0x1f
+#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
+#define G_FW_RI_TPTE_PS(x) \
+ (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
+
+#define S_FW_RI_TPTE_QPID 0
+#define M_FW_RI_TPTE_QPID 0xfffff
+#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
+#define G_FW_RI_TPTE_QPID(x) \
+ (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
+
+#define S_FW_RI_TPTE_NOSNOOP 30
+#define M_FW_RI_TPTE_NOSNOOP 0x1
+#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
+#define G_FW_RI_TPTE_NOSNOOP(x) \
+ (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
+#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
+
+#define S_FW_RI_TPTE_PBLADDR 0
+#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
+#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
+#define G_FW_RI_TPTE_PBLADDR(x) \
+ (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
+
+#define S_FW_RI_TPTE_DCA 24
+#define M_FW_RI_TPTE_DCA 0x1f
+#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
+#define G_FW_RI_TPTE_DCA(x) \
+ (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
+
+#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
+#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
+#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
+ ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
+#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
+ (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
+
+enum fw_ri_res_type {
+ FW_RI_RES_TYPE_SQ,
+ FW_RI_RES_TYPE_RQ,
+ FW_RI_RES_TYPE_CQ,
+};
+
+enum fw_ri_res_op {
+ FW_RI_RES_OP_WRITE,
+ FW_RI_RES_OP_RESET,
+};
+
+struct fw_ri_res {
+ union fw_ri_restype {
+ struct fw_ri_res_sqrq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ } sqrq;
+ struct fw_ri_res_cq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 iqid;
+ __be32 r4[2];
+ __be32 iqandst_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_iqro;
+ __be32 r6_lo;
+ __be64 r7;
+ } cq;
+ } u;
+};
+
+struct fw_ri_res_wr {
+ __be32 op_nres;
+ __be32 len16_pkd;
+ __u64 cookie;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_res res[0];
+#endif
+};
+
+#define S_FW_RI_RES_WR_NRES 0
+#define M_FW_RI_RES_WR_NRES 0xff
+#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
+#define G_FW_RI_RES_WR_NRES(x) \
+ (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
+
+#define S_FW_RI_RES_WR_FETCHSZM 26
+#define M_FW_RI_RES_WR_FETCHSZM 0x1
+#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
+#define G_FW_RI_RES_WR_FETCHSZM(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
+#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
+
+#define S_FW_RI_RES_WR_STATUSPGNS 25
+#define M_FW_RI_RES_WR_STATUSPGNS 0x1
+#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
+#define G_FW_RI_RES_WR_STATUSPGNS(x) \
+ (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
+#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
+
+#define S_FW_RI_RES_WR_STATUSPGRO 24
+#define M_FW_RI_RES_WR_STATUSPGRO 0x1
+#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
+#define G_FW_RI_RES_WR_STATUSPGRO(x) \
+ (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
+#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
+
+#define S_FW_RI_RES_WR_FETCHNS 23
+#define M_FW_RI_RES_WR_FETCHNS 0x1
+#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
+#define G_FW_RI_RES_WR_FETCHNS(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
+#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
+
+#define S_FW_RI_RES_WR_FETCHRO 22
+#define M_FW_RI_RES_WR_FETCHRO 0x1
+#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
+#define G_FW_RI_RES_WR_FETCHRO(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
+#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
+
+#define S_FW_RI_RES_WR_HOSTFCMODE 20
+#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
+#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
+#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
+ (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
+
+#define S_FW_RI_RES_WR_CPRIO 19
+#define M_FW_RI_RES_WR_CPRIO 0x1
+#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
+#define G_FW_RI_RES_WR_CPRIO(x) \
+ (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
+#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
+
+#define S_FW_RI_RES_WR_ONCHIP 18
+#define M_FW_RI_RES_WR_ONCHIP 0x1
+#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
+#define G_FW_RI_RES_WR_ONCHIP(x) \
+ (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
+#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
+
+#define S_FW_RI_RES_WR_PCIECHN 16
+#define M_FW_RI_RES_WR_PCIECHN 0x3
+#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
+#define G_FW_RI_RES_WR_PCIECHN(x) \
+ (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
+
+#define S_FW_RI_RES_WR_IQID 0
+#define M_FW_RI_RES_WR_IQID 0xffff
+#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
+#define G_FW_RI_RES_WR_IQID(x) \
+ (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
+
+#define S_FW_RI_RES_WR_DCAEN 31
+#define M_FW_RI_RES_WR_DCAEN 0x1
+#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
+#define G_FW_RI_RES_WR_DCAEN(x) \
+ (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
+#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
+
+#define S_FW_RI_RES_WR_DCACPU 26
+#define M_FW_RI_RES_WR_DCACPU 0x1f
+#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
+#define G_FW_RI_RES_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
+
+#define S_FW_RI_RES_WR_FBMIN 23
+#define M_FW_RI_RES_WR_FBMIN 0x7
+#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
+#define G_FW_RI_RES_WR_FBMIN(x) \
+ (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
+
+#define S_FW_RI_RES_WR_FBMAX 20
+#define M_FW_RI_RES_WR_FBMAX 0x7
+#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
+#define G_FW_RI_RES_WR_FBMAX(x) \
+ (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
+
+#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
+#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
+#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
+#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
+#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
+
+#define S_FW_RI_RES_WR_CIDXFTHRESH 16
+#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
+#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
+#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
+ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
+
+#define S_FW_RI_RES_WR_EQSIZE 0
+#define M_FW_RI_RES_WR_EQSIZE 0xffff
+#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
+#define G_FW_RI_RES_WR_EQSIZE(x) \
+ (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
+
+#define S_FW_RI_RES_WR_IQANDST 15
+#define M_FW_RI_RES_WR_IQANDST 0x1
+#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
+#define G_FW_RI_RES_WR_IQANDST(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
+#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
+
+#define S_FW_RI_RES_WR_IQANUS 14
+#define M_FW_RI_RES_WR_IQANUS 0x1
+#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
+#define G_FW_RI_RES_WR_IQANUS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
+#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
+
+#define S_FW_RI_RES_WR_IQANUD 12
+#define M_FW_RI_RES_WR_IQANUD 0x3
+#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
+#define G_FW_RI_RES_WR_IQANUD(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
+
+#define S_FW_RI_RES_WR_IQANDSTINDEX 0
+#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
+#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
+#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
+
+#define S_FW_RI_RES_WR_IQDROPRSS 15
+#define M_FW_RI_RES_WR_IQDROPRSS 0x1
+#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
+#define G_FW_RI_RES_WR_IQDROPRSS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
+#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
+
+#define S_FW_RI_RES_WR_IQGTSMODE 14
+#define M_FW_RI_RES_WR_IQGTSMODE 0x1
+#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
+#define G_FW_RI_RES_WR_IQGTSMODE(x) \
+ (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
+#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
+
+#define S_FW_RI_RES_WR_IQPCIECH 12
+#define M_FW_RI_RES_WR_IQPCIECH 0x3
+#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
+#define G_FW_RI_RES_WR_IQPCIECH(x) \
+ (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
+
+#define S_FW_RI_RES_WR_IQDCAEN 11
+#define M_FW_RI_RES_WR_IQDCAEN 0x1
+#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
+#define G_FW_RI_RES_WR_IQDCAEN(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
+#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
+
+#define S_FW_RI_RES_WR_IQDCACPU 6
+#define M_FW_RI_RES_WR_IQDCACPU 0x1f
+#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
+#define G_FW_RI_RES_WR_IQDCACPU(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
+
+#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
+#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
+#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
+ ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
+#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
+
+#define S_FW_RI_RES_WR_IQO 3
+#define M_FW_RI_RES_WR_IQO 0x1
+#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
+#define G_FW_RI_RES_WR_IQO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
+#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
+
+#define S_FW_RI_RES_WR_IQCPRIO 2
+#define M_FW_RI_RES_WR_IQCPRIO 0x1
+#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
+#define G_FW_RI_RES_WR_IQCPRIO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
+#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
+
+#define S_FW_RI_RES_WR_IQESIZE 0
+#define M_FW_RI_RES_WR_IQESIZE 0x3
+#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
+#define G_FW_RI_RES_WR_IQESIZE(x) \
+ (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
+
+#define S_FW_RI_RES_WR_IQNS 31
+#define M_FW_RI_RES_WR_IQNS 0x1
+#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
+#define G_FW_RI_RES_WR_IQNS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
+#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
+
+#define S_FW_RI_RES_WR_IQRO 30
+#define M_FW_RI_RES_WR_IQRO 0x1
+#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
+#define G_FW_RI_RES_WR_IQRO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
+#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
+
+struct fw_ri_rdma_write_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be64 r2;
+ __be32 plen;
+ __be32 stag_sink;
+ __be64 to_sink;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+struct fw_ri_send_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 sendop_pkd;
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 r3;
+ __be64 r4;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_SEND_WR_SENDOP 0
+#define M_FW_RI_SEND_WR_SENDOP 0xf
+#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
+#define G_FW_RI_SEND_WR_SENDOP(x) \
+ (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
+
+struct fw_ri_rdma_read_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be64 r2;
+ __be32 stag_sink;
+ __be32 to_sink_hi;
+ __be32 to_sink_lo;
+ __be32 plen;
+ __be32 stag_src;
+ __be32 to_src_hi;
+ __be32 to_src_lo;
+ __be32 r5;
+};
+
+struct fw_ri_recv_wr {
+ __u8 opcode;
+ __u8 r1;
+ __u16 wrid;
+ __u8 r2[3];
+ __u8 len16;
+ struct fw_ri_isgl isgl;
+};
+
+struct fw_ri_bind_mw_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag_mr;
+ __be32 stag_mw;
+ __be32 r3;
+ __be64 len_mw;
+ __be64 va_fbo;
+ __be64 r4;
+};
+
+#define S_FW_RI_BIND_MW_WR_QPBINDE 6
+#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
+#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
+#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
+#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
+
+#define S_FW_RI_BIND_MW_WR_NS 5
+#define M_FW_RI_BIND_MW_WR_NS 0x1
+#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
+#define G_FW_RI_BIND_MW_WR_NS(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
+#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
+
+#define S_FW_RI_BIND_MW_WR_DCACPU 0
+#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
+#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
+#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
+
+struct fw_ri_fr_nsmr_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag;
+ __be32 len_hi;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+};
+
+#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
+#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
+#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
+#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
+#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
+
+#define S_FW_RI_FR_NSMR_WR_NS 5
+#define M_FW_RI_FR_NSMR_WR_NS 0x1
+#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
+#define G_FW_RI_FR_NSMR_WR_NS(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
+#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
+
+#define S_FW_RI_FR_NSMR_WR_DCACPU 0
+#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
+#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
+#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
+
+struct fw_ri_inv_lstag_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 stag_inv;
+};
+
+enum fw_ri_type {
+ FW_RI_TYPE_INIT,
+ FW_RI_TYPE_FINI,
+ FW_RI_TYPE_TERMINATE
+};
+
+enum fw_ri_init_p2ptype {
+ FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
+ FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
+ FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
+ FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
+};
+
+struct fw_ri_wr {
+ __be32 op_compl;
+ __be32 flowid_len16;
+ __u64 cookie;
+ union fw_ri {
+ struct fw_ri_init {
+ __u8 type;
+ __u8 mpareqbit_p2ptype;
+ __u8 r4[2];
+ __u8 mpa_attrs;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 ord_max;
+ __be32 ird_max;
+ __be32 iss;
+ __be32 irs;
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __be64 r5;
+ union fw_ri_init_p2p {
+ struct fw_ri_rdma_write_wr write;
+ struct fw_ri_rdma_read_wr read;
+ struct fw_ri_send_wr send;
+ } u;
+ } init;
+ struct fw_ri_fini {
+ __u8 type;
+ __u8 r3[7];
+ __be64 r4;
+ } fini;
+ struct fw_ri_terminate {
+ __u8 type;
+ __u8 r3[3];
+ __be32 immdlen;
+ __u8 termmsg[40];
+ } terminate;
+ } u;
+};
+
+#define S_FW_RI_WR_MPAREQBIT 7
+#define M_FW_RI_WR_MPAREQBIT 0x1
+#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
+#define G_FW_RI_WR_MPAREQBIT(x) \
+ (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
+#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
+
+#define S_FW_RI_WR_P2PTYPE 0
+#define M_FW_RI_WR_P2PTYPE 0xf
+#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
+#define G_FW_RI_WR_P2PTYPE(x) \
+ (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:4;
+ __u8 unknown:1;
+ __u8:1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8:1;
+ __u8 unknown:1;
+ __u8:4;
+#endif
+};
+
+struct cpl_pass_accept_req {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 hdr_len;
+ __be16 vlan;
+ __be16 l2info;
+ __be32 tos_stid;
+ struct tcp_options tcpopt;
+};
+
+/* cpl_pass_accept_req.hdr_len fields */
+#define S_SYN_RX_CHAN 0
+#define M_SYN_RX_CHAN 0xF
+#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
+#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
+
+#define S_TCP_HDR_LEN 10
+#define M_TCP_HDR_LEN 0x3F
+#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
+#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
+
+#define S_IP_HDR_LEN 16
+#define M_IP_HDR_LEN 0x3FF
+#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
+#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
+
+#define S_ETH_HDR_LEN 26
+#define M_ETH_HDR_LEN 0x1F
+#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
+#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
+
+/* cpl_pass_accept_req.l2info fields */
+#define S_SYN_MAC_IDX 0
+#define M_SYN_MAC_IDX 0x1FF
+#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
+#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
+
+#define S_SYN_XACT_MATCH 9
+#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
+#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
+
+#define S_SYN_INTF 12
+#define M_SYN_INTF 0xF
+#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
+#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
+#define S_ULPTX_NSGE 0
+#define M_ULPTX_NSGE 0xFFFF
+#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
new file mode 100644
index 000000000000..ed6414abde02
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __C4IW_USER_H__
+#define __C4IW_USER_H__
+
+#define C4IW_UVERBS_ABI_VERSION 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+struct c4iw_create_cq_resp {
+ __u64 key;
+ __u64 gts_key;
+ __u64 memsize;
+ __u32 cqid;
+ __u32 size;
+ __u32 qid_mask;
+};
+
+struct c4iw_create_qp_resp {
+ __u64 sq_key;
+ __u64 rq_key;
+ __u64 sq_db_gts_key;
+ __u64 rq_db_gts_key;
+ __u64 sq_memsize;
+ __u64 rq_memsize;
+ __u32 sqid;
+ __u32 rqid;
+ __u32 sq_size;
+ __u32 rq_size;
+ __u32 qid_mask;
+};
+#endif
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 37d12e5efa49..1d7aea132a09 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1474,7 +1474,7 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
/**
* ipath_pe_put_tid - write a TID in chip
* @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidptr: pointer to the expected TID (in chip) to update
* @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index fbf8c5379ea8..4b4a30b0dabd 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -1328,7 +1328,7 @@ bail:
/**
* ipath_pe_put_tid - write a TID in chip
* @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidptr: pointer to the expected TID (in chip) to update
* @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
@@ -1394,7 +1394,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
/**
* ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
* @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidptr: pointer to the expected TID (in chip) to update
* @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
index a805402dd4ae..34b778ed97fc 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -1738,7 +1738,7 @@ bail:
/**
* ipath_7220_put_tid - write a TID to the chip
* @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidptr: pointer to the expected TID (in chip) to update
* @tidtype: 0 for eager, 1 for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc2ddd29ac57..5a219a2fdf16 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -661,6 +661,14 @@ repoll:
wc->opcode = IB_WC_FETCH_ADD;
wc->byte_len = 8;
break;
+ case MLX4_OPCODE_MASKED_ATOMIC_CS:
+ wc->opcode = IB_WC_MASKED_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case MLX4_OPCODE_MASKED_ATOMIC_FA:
+ wc->opcode = IB_WC_MASKED_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
case MLX4_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 01f2a3f93355..39051417054c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -139,6 +139,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ props->masked_atomic_cap = IB_ATOMIC_HCA;
props->max_pkeys = dev->dev->caps.pkey_table_len[1];
props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5643f4a8ffef..6a60827b2301 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -74,17 +74,19 @@ enum {
};
static const __be32 mlx4_ib_opcode[] = {
- [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
- [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
- [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
- [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
- [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
- [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
- [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
- [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
- [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
- [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
- [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
+ [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
+ [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
+ [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
+ [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
+ [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
+ [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
+ [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
+ [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
+ [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
+ [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
};
static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1407,6 +1409,9 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
@@ -1414,6 +1419,15 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
}
+static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
+ struct ib_send_wr *wr)
+{
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
+ aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
+ aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
+}
+
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
struct ib_send_wr *wr)
{
@@ -1567,6 +1581,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
@@ -1579,6 +1594,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+ wr->wr.atomic.rkey);
+ wqe += sizeof (struct mlx4_wqe_raddr_seg);
+
+ set_masked_atomic_seg(wqe, wr);
+ wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
+
+ size += (sizeof (struct mlx4_wqe_raddr_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
+
+ break;
+
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index c5ccc2daab60..b4e0cf4e95cd 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -211,7 +211,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
if (!buf->direct.buf)
return -ENOMEM;
- pci_unmap_addr_set(&buf->direct, mapping, t);
+ dma_unmap_addr_set(&buf->direct, mapping, t);
memset(buf->direct.buf, 0, size);
@@ -251,7 +251,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
goto err_free;
dma_list[i] = t;
- pci_unmap_addr_set(&buf->page_list[i], mapping, t);
+ dma_unmap_addr_set(&buf->page_list[i], mapping, t);
clear_page(buf->page_list[i].buf);
}
@@ -289,12 +289,12 @@ void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
if (is_direct)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
- pci_unmap_addr(&buf->direct, mapping));
+ dma_unmap_addr(&buf->direct, mapping));
else {
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->page_list[i].buf,
- pci_unmap_addr(&buf->page_list[i],
+ dma_unmap_addr(&buf->page_list[i],
mapping));
kfree(buf->page_list);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 9388164b6053..8e8c728aff88 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -504,7 +504,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
goto err_out_free_pages;
dma_list[i] = t;
- pci_unmap_addr_set(&eq->page_list[i], mapping, t);
+ dma_unmap_addr_set(&eq->page_list[i], mapping, t);
clear_page(eq->page_list[i].buf);
}
@@ -579,7 +579,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
if (eq->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
- pci_unmap_addr(&eq->page_list[i],
+ dma_unmap_addr(&eq->page_list[i],
mapping));
mthca_free_mailbox(dev, mailbox);
@@ -629,7 +629,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
for (i = 0; i < npages; ++i)
pci_free_consistent(dev->pdev, PAGE_SIZE,
eq->page_list[i].buf,
- pci_unmap_addr(&eq->page_list[i], mapping));
+ dma_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list);
mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 90f4c4d2e983..596acc45569b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -46,7 +46,7 @@
struct mthca_buf_list {
void *buf;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
union mthca_buf {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index c36a3f514929..86acb7d57064 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1297,7 +1297,7 @@ int nes_destroy_cqp(struct nes_device *nesdev)
/**
* nes_init_1g_phy
*/
-int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
u32 counter = 0;
u16 phy_data;
@@ -1351,7 +1351,7 @@ int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
/**
* nes_init_2025_phy
*/
-int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
u32 temp_phy_data = 0;
u32 temp_phy_data2 = 0;
@@ -2458,7 +2458,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
return;
}
nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
/* ack the MAC interrupt */
mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
@@ -2469,11 +2468,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
nesdev->link_status_interrupts++;
- if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) {
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS)))
nes_reset_link(nesdev, mac_index);
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- }
+
/* read the PHY interrupt status register */
if ((nesadapter->OneG_Mode) &&
(nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
@@ -2587,6 +2584,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
break;
}
}
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x0004) {
if (wide_ppm_offset &&
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b7c813f4be43..e95e8d09ff38 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -877,7 +877,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
if (!mc_all_on) {
char *addrs;
int i;
- struct dev_mc_list *mcaddr;
+ struct netdev_hw_addr *ha;
addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
if (!addrs) {
@@ -885,9 +885,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
goto unlock;
}
i = 0;
- netdev_for_each_mc_addr(mcaddr, netdev)
- memcpy(get_addr(addrs, i++),
- mcaddr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
pft_entries_preallocated * 0x8;
@@ -1461,11 +1460,14 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
et_cmd->transceiver = XCVR_INTERNAL;
et_cmd->phy_address = mac_index;
} else {
+ unsigned long flags;
et_cmd->supported = SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg;
et_cmd->advertising = ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg;
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x1000)
et_cmd->autoneg = AUTONEG_ENABLE;
else
@@ -1503,12 +1505,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- u16 phy_data;
if ((nesadapter->OneG_Mode) &&
(nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
- nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
- &phy_data);
+ unsigned long flags;
+ u16 phy_data;
+ u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
if (et_cmd->autoneg) {
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
phy_data |= 0x1300;
@@ -1516,8 +1521,8 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
/* Turn off autoneg */
phy_data &= ~0x1000;
}
- nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
- phy_data);
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
return 0;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 186623d86959..a9f5dd272f1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -381,12 +381,8 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
*/
void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
u32 counter;
- unsigned long flags;
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -402,8 +398,6 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
if (!(u32temp & 1))
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
-
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
@@ -414,14 +408,11 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
*/
void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
u32 counter;
- unsigned long flags;
/* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
phy_addr, nesdev->mac_index); */
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -441,7 +432,6 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
} else {
*data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
}
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e54f312e4bdc..925e1f2d1d55 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -374,7 +374,7 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
/*
* nes_alloc_fast_reg_mr
*/
-struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index d10b4ec68d28..40e858492f90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -49,6 +49,25 @@ static u32 ipoib_get_rx_csum(struct net_device *dev)
!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
+static int ipoib_set_tso(struct net_device *dev, u32 data)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (data) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ (dev->features & NETIF_F_SG) &&
+ (priv->hca_caps & IB_DEVICE_UD_TSO)) {
+ dev->features |= NETIF_F_TSO;
+ } else {
+ ipoib_warn(priv, "can't set TSO on\n");
+ return -EOPNOTSUPP;
+ }
+ } else
+ dev->features &= ~NETIF_F_TSO;
+
+ return 0;
+}
+
static int ipoib_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
@@ -131,6 +150,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_rx_csum = ipoib_get_rx_csum,
+ .set_tso = ipoib_set_tso,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
.get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b166bb75753d..3871ac663554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -768,11 +768,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
-static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
- const u8 *broadcast)
+static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
{
- if (addrlen != INFINIBAND_ALEN)
- return 0;
/* reserved QPN, prefix, scope */
if (memcmp(addr, broadcast, 6))
return 0;
@@ -787,7 +784,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, restart_task);
struct net_device *dev = priv->dev;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
unsigned long flags;
@@ -812,15 +809,13 @@ void ipoib_mcast_restart_task(struct work_struct *work)
clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
/* Mark all of the entries that are found or don't exist */
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
union ib_gid mgid;
- if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
- mclist->dmi_addrlen,
- dev->broadcast))
+ if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
continue;
- memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
+ memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
mcast = __ipoib_mcast_find(dev, &mgid);
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 2ee6c7a68bdc..b8a567328e9a 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -515,6 +515,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
struct input_absinfo abs;
struct ff_effect effect;
int __user *ip = (int __user *)p;
+ struct keycode_table_entry kt, *kt_p = p;
+ char scancode[16];
unsigned int i, t, u, v;
int error;
@@ -569,6 +571,43 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
return input_set_keycode(dev, t, v);
+ case EVIOCGKEYCODEBIG:
+ if (copy_from_user(&kt, kt_p, sizeof(kt)))
+ return -EFAULT;
+
+ if (kt.len > sizeof(scancode))
+ return -EINVAL;
+
+ kt.scancode = scancode;
+
+ error = input_get_keycode_big(dev, &kt);
+ if (error)
+ return error;
+
+ if (copy_to_user(kt_p, &kt, sizeof(kt)))
+ return -EFAULT;
+
+ /* FIXME: probably need some compat32 code */
+ if (copy_to_user(kt_p->scancode, kt.scancode, kt.len))
+ return -EFAULT;
+
+ return 0;
+
+ case EVIOCSKEYCODEBIG:
+ if (copy_from_user(&kt, kt_p, sizeof(kt)))
+ return -EFAULT;
+
+ if (kt.len > sizeof(scancode))
+ return -EINVAL;
+
+ kt.scancode = scancode;
+
+ /* FIXME: probably need some compat32 code */
+ if (copy_from_user(kt.scancode, kt_p->scancode, kt.len))
+ return -EFAULT;
+
+ return input_set_keycode_big(dev, &kt);
+
case EVIOCRMFF:
return input_ff_erase(dev, (int)(unsigned long) p, file);
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 7e18bcf05a66..46239e47a260 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -59,11 +59,11 @@ static unsigned int get_time_pit(void)
unsigned long flags;
unsigned int count;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
outb_p(0x00, 0x43);
count = inb_p(0x40);
count |= inb_p(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return count;
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 9c79bd56b51a..e623edf6cebe 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -568,6 +568,11 @@ static void input_disconnect_device(struct input_dev *dev)
spin_unlock_irq(&dev->event_lock);
}
+/*
+ * Those routines handle the default case where no [gs]etkeycode() is
+ * defined. In this case, an array indexed by the scancode is used.
+ */
+
static int input_fetch_keycode(struct input_dev *dev, int scancode)
{
switch (dev->keycodesize) {
@@ -582,27 +587,74 @@ static int input_fetch_keycode(struct input_dev *dev, int scancode)
}
}
-static int input_default_getkeycode(struct input_dev *dev,
- unsigned int scancode,
- unsigned int *keycode)
+/*
+ * Supports only 8, 16 and 32 bit scancodes. It wouldn't be that
+ * hard to write some machine-endian logic to support 24 bit scancodes,
+ * but it seemed overkill. It should also be noticed that, since there
+ * are, in general, less than 256 scancodes sparsed into the scancode
+ * space, even with 16 bits, the codespace is sparsed, with leads into
+ * memory and code ineficiency, when retrieving the entire scancode
+ * space.
+ * So, it is highly recommended to implement getkeycodebig/setkeycodebig
+ * instead of using a normal table approach, when more than 8 bits is
+ * needed for the scancode.
+ */
+static int input_fetch_scancode(struct keycode_table_entry *kt_entry,
+ u32 *scancode)
+{
+ switch (kt_entry->len) {
+ case 1:
+ *scancode = *((u8 *)kt_entry->scancode);
+ break;
+ case 2:
+ *scancode = *((u16 *)kt_entry->scancode);
+ break;
+ case 4:
+ *scancode = *((u32 *)kt_entry->scancode);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+static int input_default_getkeycode_from_index(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry)
{
+ u32 scancode = kt_entry->index;
+
if (!dev->keycodesize)
return -EINVAL;
if (scancode >= dev->keycodemax)
return -EINVAL;
- *keycode = input_fetch_keycode(dev, scancode);
+ kt_entry->keycode = input_fetch_keycode(dev, scancode);
+ memcpy(kt_entry->scancode, &scancode, 4);
return 0;
}
+static int input_default_getkeycode_from_scancode(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry)
+{
+ if (input_fetch_scancode(kt_entry, &kt_entry->index))
+ return -EINVAL;
+
+ return input_default_getkeycode_from_index(dev, kt_entry);
+}
+
+
static int input_default_setkeycode(struct input_dev *dev,
- unsigned int scancode,
- unsigned int keycode)
+ struct keycode_table_entry *kt_entry)
{
int old_keycode;
int i;
+ u32 scancode;
+
+ if (input_fetch_scancode(kt_entry, &scancode))
+ return -EINVAL;
if (scancode >= dev->keycodemax)
return -EINVAL;
@@ -610,32 +662,33 @@ static int input_default_setkeycode(struct input_dev *dev,
if (!dev->keycodesize)
return -EINVAL;
- if (dev->keycodesize < sizeof(keycode) && (keycode >> (dev->keycodesize * 8)))
+ if (dev->keycodesize < sizeof(dev->keycode) &&
+ (kt_entry->keycode >> (dev->keycodesize * 8)))
return -EINVAL;
switch (dev->keycodesize) {
case 1: {
u8 *k = (u8 *)dev->keycode;
old_keycode = k[scancode];
- k[scancode] = keycode;
+ k[scancode] = kt_entry->keycode;
break;
}
case 2: {
u16 *k = (u16 *)dev->keycode;
old_keycode = k[scancode];
- k[scancode] = keycode;
+ k[scancode] = kt_entry->keycode;
break;
}
default: {
u32 *k = (u32 *)dev->keycode;
old_keycode = k[scancode];
- k[scancode] = keycode;
+ k[scancode] = kt_entry->keycode;
break;
}
}
__clear_bit(old_keycode, dev->keybit);
- __set_bit(keycode, dev->keybit);
+ __set_bit(kt_entry->keycode, dev->keybit);
for (i = 0; i < dev->keycodemax; i++) {
if (input_fetch_keycode(dev, i) == old_keycode) {
@@ -648,6 +701,109 @@ static int input_default_setkeycode(struct input_dev *dev,
}
/**
+ * input_get_keycode_big - retrieve keycode currently mapped to a given scancode
+ * @dev: input device which keymap is being queried
+ * @kt_entry: keytable entry
+ *
+ * This function should be called by anyone interested in retrieving current
+ * keymap. Presently evdev handlers use it.
+ */
+int input_get_keycode_big(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry)
+{
+ if (dev->getkeycode) {
+ u32 scancode = kt_entry->index;
+
+ /*
+ * Support for legacy drivers, that don't implement the new
+ * ioctls
+ */
+ memcpy(kt_entry->scancode, &scancode, 4);
+ return dev->getkeycode(dev, scancode,
+ &kt_entry->keycode);
+ } else
+ return dev->getkeycodebig_from_index(dev, kt_entry);
+}
+EXPORT_SYMBOL(input_get_keycode_big);
+
+/**
+ * input_set_keycode_big - attribute a keycode to a given scancode
+ * @dev: input device which keymap is being queried
+ * @kt_entry: keytable entry
+ *
+ * This function should be called by anyone needing to update current
+ * keymap. Presently keyboard and evdev handlers use it.
+ */
+int input_set_keycode_big(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry)
+{
+ unsigned long flags;
+ int old_keycode;
+ int retval = -EINVAL;
+ u32 uninitialized_var(scancode);
+
+ if (kt_entry->keycode < 0 || kt_entry->keycode > KEY_MAX)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /*
+ * We need to know the old scancode, in order to generate a
+ * keyup effect, if the set operation happens successfully
+ */
+ if (dev->getkeycode) {
+ /*
+ * Support for legacy drivers, that don't implement the new
+ * ioctls
+ */
+ if (!dev->setkeycode)
+ goto out;
+
+ if (input_fetch_scancode(kt_entry, &scancode))
+ return -EINVAL;
+
+ retval = dev->getkeycode(dev, scancode,
+ &old_keycode);
+ } else {
+ int new_keycode = kt_entry->keycode;
+
+ retval = dev->getkeycodebig_from_scancode(dev, kt_entry);
+ old_keycode = kt_entry->keycode;
+ kt_entry->keycode = new_keycode;
+ }
+
+ if (retval)
+ goto out;
+
+ if (dev->getkeycode)
+ retval = dev->setkeycode(dev, scancode,
+ kt_entry->keycode);
+ else
+ retval = dev->setkeycodebig(dev, kt_entry);
+ if (retval)
+ goto out;
+
+ /*
+ * Simulate keyup event if keycode is not present
+ * in the keymap anymore
+ */
+ if (test_bit(EV_KEY, dev->evbit) &&
+ !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+ __test_and_clear_bit(old_keycode, dev->key)) {
+
+ input_pass_event(dev, EV_KEY, old_keycode, 0);
+ if (dev->sync)
+ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+ }
+
+ out:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(input_set_keycode_big);
+
+/**
* input_get_keycode - retrieve keycode currently mapped to a given scancode
* @dev: input device which keymap is being queried
* @scancode: scancode (or its equivalent for device in question) for which
@@ -661,13 +817,34 @@ int input_get_keycode(struct input_dev *dev,
unsigned int scancode, unsigned int *keycode)
{
unsigned long flags;
- int retval;
- spin_lock_irqsave(&dev->event_lock, flags);
- retval = dev->getkeycode(dev, scancode, keycode);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ if (dev->getkeycode) {
+ /*
+ * Use the legacy calls
+ */
+ return dev->getkeycode(dev, scancode, keycode);
+ } else {
+ int retval;
+ struct keycode_table_entry kt_entry;
- return retval;
+ /*
+ * Userspace is using a legacy call with a driver ported
+ * to the new way. This is a bad idea with long sparsed
+ * tables, since lots of the retrieved values will be in
+ * blank. Also, it makes sense only if the table size is
+ * lower than 2^32.
+ */
+ memset(&kt_entry, 0, sizeof(kt_entry));
+ kt_entry.len = 4;
+ kt_entry.index = scancode;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ retval = dev->getkeycodebig_from_index(dev, &kt_entry);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ *keycode = kt_entry.keycode;
+ return retval;
+ }
}
EXPORT_SYMBOL(input_get_keycode);
@@ -692,13 +869,41 @@ int input_set_keycode(struct input_dev *dev,
spin_lock_irqsave(&dev->event_lock, flags);
- retval = dev->getkeycode(dev, scancode, &old_keycode);
- if (retval)
- goto out;
+ if (dev->getkeycode) {
+ /*
+ * Use the legacy calls
+ */
+ retval = dev->getkeycode(dev, scancode, &old_keycode);
+ if (retval)
+ goto out;
- retval = dev->setkeycode(dev, scancode, keycode);
- if (retval)
- goto out;
+ retval = dev->setkeycode(dev, scancode, keycode);
+ if (retval)
+ goto out;
+ } else {
+ struct keycode_table_entry kt_entry;
+
+ /*
+ * Userspace is using a legacy call with a driver ported
+ * to the new way. This is a bad idea with long sparsed
+ * tables, since lots of the retrieved values will be in
+ * blank. Also, it makes sense only if the table size is
+ * lower than 2^32.
+ */
+ memset(&kt_entry, 0, sizeof(kt_entry));
+ kt_entry.len = 4;
+ kt_entry.scancode = (char *)&scancode;
+
+ retval = dev->getkeycodebig_from_scancode(dev, &kt_entry);
+ if (retval)
+ goto out;
+
+ kt_entry.keycode = keycode;
+
+ retval = dev->setkeycodebig(dev, &kt_entry);
+ if (retval)
+ goto out;
+ }
/* Make sure KEY_RESERVED did not get enabled. */
__clear_bit(KEY_RESERVED, dev->keybit);
@@ -1636,11 +1841,17 @@ int input_register_device(struct input_dev *dev)
dev->rep[REP_PERIOD] = 33;
}
- if (!dev->getkeycode)
- dev->getkeycode = input_default_getkeycode;
+ if (!dev->getkeycode) {
+ if (!dev->getkeycodebig_from_index)
+ dev->getkeycodebig_from_index = input_default_getkeycode_from_index;
+ if (!dev->getkeycodebig_from_scancode)
+ dev->getkeycodebig_from_scancode = input_default_getkeycode_from_scancode;
+ }
- if (!dev->setkeycode)
- dev->setkeycode = input_default_setkeycode;
+ if (dev->setkeycode) {
+ if (!dev->setkeycodebig)
+ dev->setkeycodebig = input_default_setkeycode;
+ }
dev_set_name(&dev->dev, "input%ld",
(unsigned long) atomic_inc_return(&input_no) - 1);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 1c0b529c06aa..4afe0a3b4884 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -146,11 +146,11 @@ static unsigned int get_time_pit(void)
unsigned long flags;
unsigned int count;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
outb_p(0x00, 0x43);
count = inb_p(0x40);
count |= inb_p(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return count;
}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b1edd778639c..405febd94f24 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -54,6 +54,9 @@ static signed short btn_avb_wheel[] =
static signed short abs_joystick[] =
{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 };
+static signed short abs_joystick_rudder[] =
+{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, -1 };
+
static signed short abs_avb_pegasus[] =
{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y,
ABS_HAT1X, ABS_HAT1Y, -1 };
@@ -76,8 +79,9 @@ static struct iforce_device iforce_device[] = {
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
{ 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
+ { 0x06f8, 0x0001, "Guillemot Jet Leader Force Feedback", btn_joystick, abs_joystick_rudder, ff_iforce },
{ 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
- { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
+ { 0x06f8, 0xa302, "Guillemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
{ 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce },
{ 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce }
};
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index b41303d3ec54..6c96631ae5d9 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -212,6 +212,7 @@ static struct usb_device_id iforce_usb_ids [] = {
{ USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */
{ USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */
{ USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */
+ { USB_DEVICE(0x06f8, 0x0003) }, /* Guillemot Jet Leader Force Feedback */
{ USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */
{ USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */
{ } /* Terminating entry */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 64c102355f53..3525f533e186 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -143,19 +143,6 @@ config KEYBOARD_BFIN
To compile this driver as a module, choose M here: the
module will be called bf54x-keys.
-config KEYBOARD_CORGI
- tristate "Corgi keyboard (deprecated)"
- depends on PXA_SHARPSL
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-C7xx
- series of PDAs.
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called corgikbd.
-
config KEYBOARD_LKKBD
tristate "DECstation/VAXstation LK201/LK401 keyboard"
select SERIO
@@ -192,6 +179,22 @@ config KEYBOARD_GPIO
To compile this driver as a module, choose M here: the
module will be called gpio_keys.
+config KEYBOARD_TCA6416
+ tristate "TCA6416 Keypad Support"
+ depends on I2C
+ help
+ This driver implements basic keypad functionality
+ for keys connected through TCA6416 IO expander
+
+ Say Y here if your device has keys connected to
+ TCA6416 IO expander. Your board-specific setup logic
+ must also provide pin-mask details(of which TCA6416 pins
+ are used for keypad).
+
+ If enabled the complete TCA6416 device will be managed through
+ this driver.
+
+
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
depends on GENERIC_GPIO
@@ -339,19 +342,6 @@ config KEYBOARD_PXA930_ROTARY
To compile this driver as a module, choose M here: the
module will be called pxa930_rotary.
-config KEYBOARD_SPITZ
- tristate "Spitz keyboard (deprecated)"
- depends on PXA_SHARPSL
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
- SL-C3000 and Sl-C3100 series of PDAs.
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called spitzkbd.
-
config KEYBOARD_STOWAWAY
tristate "Stowaway keyboard"
select SERIO
@@ -414,28 +404,6 @@ config KEYBOARD_TWL4030
To compile this driver as a module, choose M here: the
module will be called twl4030_keypad.
-config KEYBOARD_TOSA
- tristate "Tosa keyboard (deprecated)"
- depends on MACH_TOSA
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called tosakbd.
-
-config KEYBOARD_TOSA_USE_EXT_KEYCODES
- bool "Tosa keyboard: use extended keycodes"
- depends on KEYBOARD_TOSA
- help
- Say Y here to enable the tosa keyboard driver to generate extended
- (>= 127) keycodes. Be aware, that they can't be correctly interpreted
- by either console keyboard driver or by Kdrive keybd driver.
-
- Say Y only if you know, what you are doing!
-
config KEYBOARD_XTKBD
tristate "XT keyboard"
select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 706c6b5ed5f4..4596d0c6f922 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,10 +11,10 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
-obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
+obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
@@ -33,10 +33,8 @@ obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
-obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
-obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
deleted file mode 100644
index 634af6a8e6b3..000000000000
--- a/drivers/input/keyboard/corgikbd.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Keyboard driver for Sharp Corgi models (SL-C7xx)
- *
- * Copyright (c) 2004-2005 Richard Purdie
- *
- * Based on xtkbd.c/locomkbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/corgi.h>
-#include <mach/pxa2xx-gpio.h>
-#include <asm/hardware/scoop.h>
-
-#define KB_ROWS 8
-#define KB_COLS 12
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r,c) ( ((r)<<4) + (c) + 1 )
-/* zero code, 124 scancodes */
-#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 )
-
-#define SCAN_INTERVAL (50) /* ms */
-#define HINGE_SCAN_INTERVAL (250) /* ms */
-
-#define CORGI_KEY_CALENDER KEY_F1
-#define CORGI_KEY_ADDRESS KEY_F2
-#define CORGI_KEY_FN KEY_F3
-#define CORGI_KEY_CANCEL KEY_F4
-#define CORGI_KEY_OFF KEY_SUSPEND
-#define CORGI_KEY_EXOK KEY_F5
-#define CORGI_KEY_EXCANCEL KEY_F6
-#define CORGI_KEY_EXJOGDOWN KEY_F7
-#define CORGI_KEY_EXJOGUP KEY_F8
-#define CORGI_KEY_JAP1 KEY_LEFTCTRL
-#define CORGI_KEY_JAP2 KEY_LEFTALT
-#define CORGI_KEY_MAIL KEY_F10
-#define CORGI_KEY_OK KEY_F11
-#define CORGI_KEY_MENU KEY_F12
-
-static unsigned char corgikbd_keycode[NR_SCANCODES] = {
- 0, /* 0 */
- 0, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, 0, 0, 0, 0, 0, 0, 0, /* 1-16 */
- 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, 0, 0, 0, 0, 0, 0, 0, /* 17-32 */
- KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
- CORGI_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
- CORGI_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, 0, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, /* 65-80 */
- CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
- KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
- CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
-};
-
-
-struct corgikbd {
- unsigned char keycode[ARRAY_SIZE(corgikbd_keycode)];
- struct input_dev *input;
-
- spinlock_t lock;
- struct timer_list timer;
- struct timer_list htimer;
-
- unsigned int suspended;
- unsigned long suspend_jiffies;
-};
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 12 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-static inline void corgikbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR2 = CORGI_GPIO_ALL_STROBE_BIT;
- GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT;
-}
-
-static inline void corgikbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR2 = CORGI_GPIO_ALL_STROBE_BIT;
- GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* Clear any interrupts we may have triggered when altering the GPIO lines */
- GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT;
- GEDR2 = CORGI_GPIO_LOW_SENSE_BIT;
-}
-
-static inline void corgikbd_activate_col(int col)
-{
- /* STROBE col -> High, not col -> HiZ */
- GPSR2 = CORGI_GPIO_STROBE_BIT(col);
- GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
-}
-
-static inline void corgikbd_reset_col(int col)
-{
- /* STROBE col -> Low */
- GPCR2 = CORGI_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
-}
-
-#define GET_ROWS_STATUS(c) (((GPLR1 & CORGI_GPIO_HIGH_SENSE_BIT) >> CORGI_GPIO_HIGH_SENSE_RSHIFT) | ((GPLR2 & CORGI_GPIO_LOW_SENSE_BIT) << CORGI_GPIO_LOW_SENSE_LSHIFT))
-
-/*
- * The corgi keyboard only generates interrupts when a key is pressed.
- * When a key is pressed, we enable a timer which then scans the
- * keyboard to detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data)
-{
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed;
-
- if (corgikbd_data->suspended)
- return;
-
- spin_lock_irqsave(&corgikbd_data->lock, flags);
-
- num_pressed = 0;
- for (col = 0; col < KB_COLS; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
-
- corgikbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- corgikbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = GET_ROWS_STATUS(col);
- for (row = 0; row < KB_ROWS; row++) {
- unsigned int scancode, pressed;
-
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- input_report_key(corgikbd_data->input, corgikbd_data->keycode[scancode], pressed);
-
- if (pressed)
- num_pressed++;
-
- if (pressed && (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
- && time_after(jiffies, corgikbd_data->suspend_jiffies + HZ)) {
- input_event(corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
- corgikbd_data->suspend_jiffies=jiffies;
- }
- }
- corgikbd_reset_col(col);
- }
-
- corgikbd_activate_all();
-
- input_sync(corgikbd_data->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&corgikbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
-
- spin_unlock_irqrestore(&corgikbd_data->lock, flags);
-}
-
-/*
- * corgi keyboard interrupt handler.
- */
-static irqreturn_t corgikbd_interrupt(int irq, void *dev_id)
-{
- struct corgikbd *corgikbd_data = dev_id;
-
- if (!timer_pending(&corgikbd_data->timer)) {
- /** wait chattering delay **/
- udelay(20);
- corgikbd_scankeyboard(corgikbd_data);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * corgi timer checking for released keys
- */
-static void corgikbd_timer_callback(unsigned long data)
-{
- struct corgikbd *corgikbd_data = (struct corgikbd *) data;
- corgikbd_scankeyboard(corgikbd_data);
-}
-
-/*
- * The hinge switches generate no interrupt so they need to be
- * monitored by a timer.
- *
- * We debounce the switches and pass them to the input system.
- *
- * gprr == 0x00 - Keyboard with Landscape Screen
- * 0x08 - No Keyboard with Portrait Screen
- * 0x0c - Keyboard and Screen Closed
- */
-
-#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
-#define HINGE_STABLE_COUNT 2
-static int sharpsl_hinge_state;
-static int hinge_count;
-
-static void corgikbd_hinge_timer(unsigned long data)
-{
- struct corgikbd *corgikbd_data = (struct corgikbd *) data;
- unsigned long gprr;
- unsigned long flags;
-
- gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
- gprr |= (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0);
- if (gprr != sharpsl_hinge_state) {
- hinge_count = 0;
- sharpsl_hinge_state = gprr;
- } else if (hinge_count < HINGE_STABLE_COUNT) {
- hinge_count++;
- if (hinge_count >= HINGE_STABLE_COUNT) {
- spin_lock_irqsave(&corgikbd_data->lock, flags);
-
- input_report_switch(corgikbd_data->input, SW_LID, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
- input_report_switch(corgikbd_data->input, SW_TABLET_MODE, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
- input_report_switch(corgikbd_data->input, SW_HEADPHONE_INSERT, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0));
- input_sync(corgikbd_data->input);
-
- spin_unlock_irqrestore(&corgikbd_data->lock, flags);
- }
- }
- mod_timer(&corgikbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-}
-
-#ifdef CONFIG_PM
-static int corgikbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(dev);
-
- corgikbd->suspended = 1;
- /* strobe 0 is the power key so this can't be made an input for
- powersaving therefore i = 1 */
- for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_IN);
-
- return 0;
-}
-
-static int corgikbd_resume(struct platform_device *dev)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(dev);
-
- for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Upon resume, ignore the suspend key for a short while */
- corgikbd->suspend_jiffies=jiffies;
- corgikbd->suspended = 0;
-
- return 0;
-}
-#else
-#define corgikbd_suspend NULL
-#define corgikbd_resume NULL
-#endif
-
-static int __devinit corgikbd_probe(struct platform_device *pdev)
-{
- struct corgikbd *corgikbd;
- struct input_dev *input_dev;
- int i, err = -ENOMEM;
-
- corgikbd = kzalloc(sizeof(struct corgikbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!corgikbd || !input_dev)
- goto fail;
-
- platform_set_drvdata(pdev, corgikbd);
-
- corgikbd->input = input_dev;
- spin_lock_init(&corgikbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&corgikbd->timer);
- corgikbd->timer.function = corgikbd_timer_callback;
- corgikbd->timer.data = (unsigned long) corgikbd;
-
- /* Init Hinge Timer */
- init_timer(&corgikbd->htimer);
- corgikbd->htimer.function = corgikbd_hinge_timer;
- corgikbd->htimer.data = (unsigned long) corgikbd;
-
- corgikbd->suspend_jiffies=jiffies;
-
- memcpy(corgikbd->keycode, corgikbd_keycode, sizeof(corgikbd->keycode));
-
- input_dev->name = "Corgi Keyboard";
- input_dev->phys = "corgikbd/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
- BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
- input_dev->keycode = corgikbd->keycode;
- input_dev->keycodesize = sizeof(unsigned char);
- input_dev->keycodemax = ARRAY_SIZE(corgikbd_keycode);
-
- for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++)
- set_bit(corgikbd->keycode[i], input_dev->keybit);
- clear_bit(0, input_dev->keybit);
- set_bit(SW_LID, input_dev->swbit);
- set_bit(SW_TABLET_MODE, input_dev->swbit);
- set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
-
- err = input_register_device(corgikbd->input);
- if (err)
- goto fail;
-
- mod_timer(&corgikbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) {
- pxa_gpio_mode(CORGI_GPIO_KEY_SENSE(i) | GPIO_IN);
- if (request_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "corgikbd", corgikbd))
- printk(KERN_WARNING "corgikbd: Can't get IRQ: %d!\n", i);
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Setup the headphone jack as an input */
- pxa_gpio_mode(CORGI_GPIO_AK_INT | GPIO_IN);
-
- return 0;
-
- fail: input_free_device(input_dev);
- kfree(corgikbd);
- return err;
-}
-
-static int __devexit corgikbd_remove(struct platform_device *pdev)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(pdev);
-
- for (i = 0; i < CORGI_KEY_SENSE_NUM; i++)
- free_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd);
-
- del_timer_sync(&corgikbd->htimer);
- del_timer_sync(&corgikbd->timer);
-
- input_unregister_device(corgikbd->input);
-
- kfree(corgikbd);
-
- return 0;
-}
-
-static struct platform_driver corgikbd_driver = {
- .probe = corgikbd_probe,
- .remove = __devexit_p(corgikbd_remove),
- .suspend = corgikbd_suspend,
- .resume = corgikbd_resume,
- .driver = {
- .name = "corgi-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init corgikbd_init(void)
-{
- return platform_driver_register(&corgikbd_driver);
-}
-
-static void __exit corgikbd_exit(void)
-{
- platform_driver_unregister(&corgikbd_driver);
-}
-
-module_init(corgikbd_init);
-module_exit(corgikbd_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Corgi Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:corgi-keyboard");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 60ac4684f875..bc696931fed7 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -670,8 +670,6 @@ static int __devinit lm8323_probe(struct i2c_client *client,
goto fail1;
}
- i2c_set_clientdata(client, lm);
-
lm->client = client;
lm->idev = idev;
mutex_init(&lm->lock);
@@ -753,6 +751,8 @@ static int __devinit lm8323_probe(struct i2c_client *client,
goto fail4;
}
+ i2c_set_clientdata(client, lm);
+
device_init_wakeup(&client->dev, 1);
enable_irq_wake(client->irq);
@@ -778,6 +778,8 @@ static int __devexit lm8323_remove(struct i2c_client *client)
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
+ i2c_set_clientdata(client, NULL);
+
disable_irq_wake(client->irq);
free_irq(client->irq, lm);
cancel_work_sync(&lm->work);
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
deleted file mode 100644
index 13967422658c..000000000000
--- a/drivers/input/keyboard/spitzkbd.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Keyboard driver for Sharp Spitz, Borzoi and Akita (SL-Cxx00 series)
- *
- * Copyright (c) 2005 Richard Purdie
- *
- * Based on corgikbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/spitz.h>
-#include <mach/pxa2xx-gpio.h>
-
-#define KB_ROWS 7
-#define KB_COLS 11
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
-#define NR_SCANCODES ((KB_ROWS<<4) + 1)
-
-#define SCAN_INTERVAL (50) /* ms */
-#define HINGE_SCAN_INTERVAL (150) /* ms */
-
-#define SPITZ_KEY_CALENDER KEY_F1
-#define SPITZ_KEY_ADDRESS KEY_F2
-#define SPITZ_KEY_FN KEY_F3
-#define SPITZ_KEY_CANCEL KEY_F4
-#define SPITZ_KEY_EXOK KEY_F5
-#define SPITZ_KEY_EXCANCEL KEY_F6
-#define SPITZ_KEY_EXJOGDOWN KEY_F7
-#define SPITZ_KEY_EXJOGUP KEY_F8
-#define SPITZ_KEY_JAP1 KEY_LEFTALT
-#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
-#define SPITZ_KEY_SYNC KEY_F9
-#define SPITZ_KEY_MAIL KEY_F10
-#define SPITZ_KEY_OK KEY_F11
-#define SPITZ_KEY_MENU KEY_F12
-
-static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
- 0, /* 0 */
- KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
- 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
- KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
- SPITZ_KEY_ADDRESS, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
- SPITZ_KEY_CALENDER, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
- SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
- KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
-};
-
-static int spitz_strobes[] = {
- SPITZ_GPIO_KEY_STROBE0,
- SPITZ_GPIO_KEY_STROBE1,
- SPITZ_GPIO_KEY_STROBE2,
- SPITZ_GPIO_KEY_STROBE3,
- SPITZ_GPIO_KEY_STROBE4,
- SPITZ_GPIO_KEY_STROBE5,
- SPITZ_GPIO_KEY_STROBE6,
- SPITZ_GPIO_KEY_STROBE7,
- SPITZ_GPIO_KEY_STROBE8,
- SPITZ_GPIO_KEY_STROBE9,
- SPITZ_GPIO_KEY_STROBE10,
-};
-
-static int spitz_senses[] = {
- SPITZ_GPIO_KEY_SENSE0,
- SPITZ_GPIO_KEY_SENSE1,
- SPITZ_GPIO_KEY_SENSE2,
- SPITZ_GPIO_KEY_SENSE3,
- SPITZ_GPIO_KEY_SENSE4,
- SPITZ_GPIO_KEY_SENSE5,
- SPITZ_GPIO_KEY_SENSE6,
-};
-
-struct spitzkbd {
- unsigned char keycode[ARRAY_SIZE(spitzkbd_keycode)];
- struct input_dev *input;
- char phys[32];
-
- spinlock_t lock;
- struct timer_list timer;
- struct timer_list htimer;
-
- unsigned int suspended;
- unsigned long suspend_jiffies;
-};
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 11 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-static inline void spitzkbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR0 = SPITZ_GPIO_G0_STROBE_BIT;
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPCR1 = SPITZ_GPIO_G1_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPCR2 = SPITZ_GPIO_G2_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPCR3 = SPITZ_GPIO_G3_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
-}
-
-static inline void spitzkbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR0 = SPITZ_GPIO_G0_STROBE_BIT;
- GPDR0 |= SPITZ_GPIO_G0_STROBE_BIT;
- GPSR1 = SPITZ_GPIO_G1_STROBE_BIT;
- GPDR1 |= SPITZ_GPIO_G1_STROBE_BIT;
- GPSR2 = SPITZ_GPIO_G2_STROBE_BIT;
- GPDR2 |= SPITZ_GPIO_G2_STROBE_BIT;
- GPSR3 = SPITZ_GPIO_G3_STROBE_BIT;
- GPDR3 |= SPITZ_GPIO_G3_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* Clear any interrupts we may have triggered when altering the GPIO lines */
- GEDR0 = SPITZ_GPIO_G0_SENSE_BIT;
- GEDR1 = SPITZ_GPIO_G1_SENSE_BIT;
- GEDR2 = SPITZ_GPIO_G2_SENSE_BIT;
- GEDR3 = SPITZ_GPIO_G3_SENSE_BIT;
-}
-
-static inline void spitzkbd_activate_col(int col)
-{
- int gpio = spitz_strobes[col];
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
- GPSR(gpio) = GPIO_bit(gpio);
- GPDR(gpio) |= GPIO_bit(gpio);
-}
-
-static inline void spitzkbd_reset_col(int col)
-{
- int gpio = spitz_strobes[col];
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
- GPCR(gpio) = GPIO_bit(gpio);
- GPDR(gpio) |= GPIO_bit(gpio);
-}
-
-static inline int spitzkbd_get_row_status(int col)
-{
- return ((GPLR0 >> 12) & 0x01) | ((GPLR0 >> 16) & 0x02)
- | ((GPLR2 >> 25) & 0x04) | ((GPLR1 << 1) & 0x08)
- | ((GPLR1 >> 0) & 0x10) | ((GPLR1 >> 1) & 0x60);
-}
-
-/*
- * The spitz keyboard only generates interrupts when a key is pressed.
- * When a key is pressed, we enable a timer which then scans the
- * keyboard to detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data)
-{
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed, pwrkey = ((GPLR(SPITZ_GPIO_ON_KEY) & GPIO_bit(SPITZ_GPIO_ON_KEY)) != 0);
-
- if (spitzkbd_data->suspended)
- return;
-
- spin_lock_irqsave(&spitzkbd_data->lock, flags);
-
- num_pressed = 0;
- for (col = 0; col < KB_COLS; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
-
- spitzkbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- spitzkbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = spitzkbd_get_row_status(col);
- for (row = 0; row < KB_ROWS; row++) {
- unsigned int scancode, pressed;
-
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- input_report_key(spitzkbd_data->input, spitzkbd_data->keycode[scancode], pressed);
-
- if (pressed)
- num_pressed++;
- }
- spitzkbd_reset_col(col);
- }
-
- spitzkbd_activate_all();
-
- input_report_key(spitzkbd_data->input, SPITZ_KEY_SYNC, (GPLR(SPITZ_GPIO_SYNC) & GPIO_bit(SPITZ_GPIO_SYNC)) != 0 );
- input_report_key(spitzkbd_data->input, KEY_SUSPEND, pwrkey);
-
- if (pwrkey && time_after(jiffies, spitzkbd_data->suspend_jiffies + msecs_to_jiffies(1000))) {
- input_event(spitzkbd_data->input, EV_PWR, KEY_SUSPEND, 1);
- spitzkbd_data->suspend_jiffies = jiffies;
- }
-
- input_sync(spitzkbd_data->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
-
- spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
-}
-
-/*
- * spitz keyboard interrupt handler.
- */
-static irqreturn_t spitzkbd_interrupt(int irq, void *dev_id)
-{
- struct spitzkbd *spitzkbd_data = dev_id;
-
- if (!timer_pending(&spitzkbd_data->timer)) {
- /** wait chattering delay **/
- udelay(20);
- spitzkbd_scankeyboard(spitzkbd_data);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * spitz timer checking for released keys
- */
-static void spitzkbd_timer_callback(unsigned long data)
-{
- struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
-
- spitzkbd_scankeyboard(spitzkbd_data);
-}
-
-/*
- * The hinge switches generate an interrupt.
- * We debounce the switches and pass them to the input system.
- */
-
-static irqreturn_t spitzkbd_hinge_isr(int irq, void *dev_id)
-{
- struct spitzkbd *spitzkbd_data = dev_id;
-
- if (!timer_pending(&spitzkbd_data->htimer))
- mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- return IRQ_HANDLED;
-}
-
-#define HINGE_STABLE_COUNT 2
-static int sharpsl_hinge_state;
-static int hinge_count;
-
-static void spitzkbd_hinge_timer(unsigned long data)
-{
- struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
- unsigned long state;
- unsigned long flags;
-
- state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
- state |= (GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT));
- if (state != sharpsl_hinge_state) {
- hinge_count = 0;
- sharpsl_hinge_state = state;
- } else if (hinge_count < HINGE_STABLE_COUNT) {
- hinge_count++;
- }
-
- if (hinge_count >= HINGE_STABLE_COUNT) {
- spin_lock_irqsave(&spitzkbd_data->lock, flags);
-
- input_report_switch(spitzkbd_data->input, SW_LID, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
- input_report_switch(spitzkbd_data->input, SW_TABLET_MODE, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
- input_report_switch(spitzkbd_data->input, SW_HEADPHONE_INSERT, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0));
- input_sync(spitzkbd_data->input);
-
- spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
- } else {
- mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
- }
-}
-
-#ifdef CONFIG_PM
-static int spitzkbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
- spitzkbd->suspended = 1;
-
- /* Set Strobe lines as inputs - *except* strobe line 0 leave this
- enabled so we can detect a power button press for resume */
- for (i = 1; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_IN);
-
- return 0;
-}
-
-static int spitzkbd_resume(struct platform_device *dev)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
-
- for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Upon resume, ignore the suspend key for a short while */
- spitzkbd->suspend_jiffies = jiffies;
- spitzkbd->suspended = 0;
-
- return 0;
-}
-#else
-#define spitzkbd_suspend NULL
-#define spitzkbd_resume NULL
-#endif
-
-static int __devinit spitzkbd_probe(struct platform_device *dev)
-{
- struct spitzkbd *spitzkbd;
- struct input_dev *input_dev;
- int i, err = -ENOMEM;
-
- spitzkbd = kzalloc(sizeof(struct spitzkbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!spitzkbd || !input_dev)
- goto fail;
-
- platform_set_drvdata(dev, spitzkbd);
- strcpy(spitzkbd->phys, "spitzkbd/input0");
-
- spin_lock_init(&spitzkbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&spitzkbd->timer);
- spitzkbd->timer.function = spitzkbd_timer_callback;
- spitzkbd->timer.data = (unsigned long) spitzkbd;
-
- /* Init Hinge Timer */
- init_timer(&spitzkbd->htimer);
- spitzkbd->htimer.function = spitzkbd_hinge_timer;
- spitzkbd->htimer.data = (unsigned long) spitzkbd;
-
- spitzkbd->suspend_jiffies = jiffies;
-
- spitzkbd->input = input_dev;
-
- input_dev->name = "Spitz Keyboard";
- input_dev->phys = spitzkbd->phys;
- input_dev->dev.parent = &dev->dev;
-
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
- BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
- input_dev->keycode = spitzkbd->keycode;
- input_dev->keycodesize = sizeof(unsigned char);
- input_dev->keycodemax = ARRAY_SIZE(spitzkbd_keycode);
-
- memcpy(spitzkbd->keycode, spitzkbd_keycode, sizeof(spitzkbd->keycode));
- for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++)
- set_bit(spitzkbd->keycode[i], input_dev->keybit);
- clear_bit(0, input_dev->keybit);
- set_bit(KEY_SUSPEND, input_dev->keybit);
- set_bit(SW_LID, input_dev->swbit);
- set_bit(SW_TABLET_MODE, input_dev->swbit);
- set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
-
- err = input_register_device(input_dev);
- if (err)
- goto fail;
-
- mod_timer(&spitzkbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++) {
- pxa_gpio_mode(spitz_senses[i] | GPIO_IN);
- if (request_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd_interrupt,
- IRQF_DISABLED|IRQF_TRIGGER_RISING,
- "Spitzkbd Sense", spitzkbd))
- printk(KERN_WARNING "spitzkbd: Can't get Sense IRQ: %d!\n", i);
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
-
- pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_ON_KEY | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_SWA | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_SWB | GPIO_IN);
-
- request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd Sync", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd PwrOn", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd SWA", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd SWB", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd HP", spitzkbd);
-
- return 0;
-
- fail: input_free_device(input_dev);
- kfree(spitzkbd);
- return err;
-}
-
-static int __devexit spitzkbd_remove(struct platform_device *dev)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
-
- for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++)
- free_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd);
-
- free_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd);
-
- del_timer_sync(&spitzkbd->htimer);
- del_timer_sync(&spitzkbd->timer);
-
- input_unregister_device(spitzkbd->input);
-
- kfree(spitzkbd);
-
- return 0;
-}
-
-static struct platform_driver spitzkbd_driver = {
- .probe = spitzkbd_probe,
- .remove = __devexit_p(spitzkbd_remove),
- .suspend = spitzkbd_suspend,
- .resume = spitzkbd_resume,
- .driver = {
- .name = "spitz-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init spitzkbd_init(void)
-{
- return platform_driver_register(&spitzkbd_driver);
-}
-
-static void __exit spitzkbd_exit(void)
-{
- platform_driver_unregister(&spitzkbd_driver);
-}
-
-module_init(spitzkbd_init);
-module_exit(spitzkbd_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Spitz Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:spitz-keyboard");
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
new file mode 100644
index 000000000000..493c93f25e2a
--- /dev/null
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -0,0 +1,349 @@
+/*
+ * Driver for keys on TCA6416 I2C IO expander
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Author : Sriramakrishnan.A.G. <srk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/tca6416_keypad.h>
+
+#define TCA6416_INPUT 0
+#define TCA6416_OUTPUT 1
+#define TCA6416_INVERT 2
+#define TCA6416_DIRECTION 3
+
+static const struct i2c_device_id tca6416_id[] = {
+ { "tca6416-keys", 16, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca6416_id);
+
+struct tca6416_drv_data {
+ struct input_dev *input;
+ struct tca6416_button data[0];
+};
+
+struct tca6416_keypad_chip {
+ uint16_t reg_output;
+ uint16_t reg_direction;
+ uint16_t reg_input;
+
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct delayed_work dwork;
+ u16 pinmask;
+ int irqnum;
+ bool use_polling;
+ struct tca6416_button buttons[0];
+};
+
+static int tca6416_write_reg(struct tca6416_keypad_chip *chip, int reg, u16 val)
+{
+ int error;
+
+ error = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+ if (error < 0) {
+ dev_err(&chip->client->dev,
+ "%s failed, reg: %d, val: %d, error: %d\n",
+ __func__, reg, val, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int tca6416_read_reg(struct tca6416_keypad_chip *chip, int reg, u16 *val)
+{
+ int retval;
+
+ retval = i2c_smbus_read_word_data(chip->client, reg << 1);
+ if (retval < 0) {
+ dev_err(&chip->client->dev, "%s failed, reg: %d, error: %d\n",
+ __func__, reg, retval);
+ return retval;
+ }
+
+ *val = (u16)retval;
+ return 0;
+}
+
+static void tca6416_keys_scan(struct tca6416_keypad_chip *chip)
+{
+ struct input_dev *input = chip->input;
+ u16 reg_val, val;
+ int error, i, pin_index;
+
+ error = tca6416_read_reg(chip, TCA6416_INPUT, &reg_val);
+ if (error)
+ return;
+
+ reg_val &= chip->pinmask;
+
+ /* Figure out which lines have changed */
+ val = reg_val ^ chip->reg_input;
+ chip->reg_input = reg_val;
+
+ for (i = 0, pin_index = 0; i < 16; i++) {
+ if (val & (1 << i)) {
+ struct tca6416_button *button = &chip->buttons[pin_index];
+ unsigned int type = button->type ?: EV_KEY;
+ int state = ((reg_val & (1 << i)) ? 1 : 0)
+ ^ button->active_low;
+
+ input_event(input, type, button->code, !!state);
+ input_sync(input);
+ }
+
+ if (chip->pinmask & (1 << i))
+ pin_index++;
+ }
+}
+
+/*
+ * This is threaded IRQ handler and this can (and will) sleep.
+ */
+static irqreturn_t tca6416_keys_isr(int irq, void *dev_id)
+{
+ struct tca6416_keypad_chip *chip = dev_id;
+
+ tca6416_keys_scan(chip);
+
+ return IRQ_HANDLED;
+}
+
+static void tca6416_keys_work_func(struct work_struct *work)
+{
+ struct tca6416_keypad_chip *chip =
+ container_of(work, struct tca6416_keypad_chip, dwork.work);
+
+ tca6416_keys_scan(chip);
+ schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
+}
+
+static int tca6416_keys_open(struct input_dev *dev)
+{
+ struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
+
+ /* Get initial device state in case it has switches */
+ tca6416_keys_scan(chip);
+
+ if (chip->use_polling)
+ schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
+ else
+ enable_irq(chip->irqnum);
+
+ return 0;
+}
+
+static void tca6416_keys_close(struct input_dev *dev)
+{
+ struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
+
+ if (chip->use_polling)
+ cancel_delayed_work_sync(&chip->dwork);
+ else
+ disable_irq(chip->irqnum);
+}
+
+static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
+{
+ int error;
+
+ error = tca6416_read_reg(chip, TCA6416_OUTPUT, &chip->reg_output);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
+ if (error)
+ return error;
+
+ /* ensure that keypad pins are set to input */
+ error = tca6416_write_reg(chip, TCA6416_DIRECTION,
+ chip->reg_direction | chip->pinmask);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_INPUT, &chip->reg_input);
+ if (error)
+ return error;
+
+ chip->reg_input &= chip->pinmask;
+
+ return 0;
+}
+
+static int __devinit tca6416_keypad_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tca6416_keys_platform_data *pdata;
+ struct tca6416_keypad_chip *chip;
+ struct input_dev *input;
+ int error;
+ int i;
+
+ /* Check functionality */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_dbg(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(struct tca6416_keypad_chip) +
+ pdata->nbuttons * sizeof(struct tca6416_button),
+ GFP_KERNEL);
+ input = input_allocate_device();
+ if (!chip || !input) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+
+ chip->client = client;
+ chip->input = input;
+ chip->pinmask = pdata->pinmask;
+ chip->use_polling = pdata->use_polling;
+
+ INIT_DELAYED_WORK(&chip->dwork, tca6416_keys_work_func);
+
+ input->phys = "tca6416-keys/input0";
+ input->name = client->name;
+ input->dev.parent = &client->dev;
+
+ input->open = tca6416_keys_open;
+ input->close = tca6416_keys_close;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ /* Enable auto repeat feature of Linux input subsystem */
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ unsigned int type;
+
+ chip->buttons[i] = pdata->buttons[i];
+ type = (pdata->buttons[i].type) ?: EV_KEY;
+ input_set_capability(input, type, pdata->buttons[i].code);
+ }
+
+ input_set_drvdata(input, chip);
+
+ /*
+ * Initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+ error = tca6416_setup_registers(chip);
+ if (error)
+ goto fail1;
+
+ if (!chip->use_polling) {
+ if (pdata->irq_is_gpio)
+ chip->irqnum = gpio_to_irq(client->irq);
+ else
+ chip->irqnum = client->irq;
+
+ error = request_threaded_irq(chip->irqnum, NULL,
+ tca6416_keys_isr,
+ IRQF_TRIGGER_FALLING,
+ "tca6416-keypad", chip);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to claim irq %d; error %d\n",
+ chip->irqnum, error);
+ goto fail1;
+ }
+ disable_irq(chip->irqnum);
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to register input device, error: %d\n", error);
+ goto fail2;
+ }
+
+ i2c_set_clientdata(client, chip);
+
+ return 0;
+
+fail2:
+ if (!chip->use_polling) {
+ free_irq(chip->irqnum, chip);
+ enable_irq(chip->irqnum);
+ }
+fail1:
+ input_free_device(input);
+ kfree(chip);
+ return error;
+}
+
+static int __devexit tca6416_keypad_remove(struct i2c_client *client)
+{
+ struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+
+ if (!chip->use_polling) {
+ free_irq(chip->irqnum, chip);
+ enable_irq(chip->irqnum);
+ }
+
+ input_unregister_device(chip->input);
+ kfree(chip);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+
+static struct i2c_driver tca6416_keypad_driver = {
+ .driver = {
+ .name = "tca6416-keypad",
+ },
+ .probe = tca6416_keypad_probe,
+ .remove = __devexit_p(tca6416_keypad_remove),
+ .id_table = tca6416_id,
+};
+
+static int __init tca6416_keypad_init(void)
+{
+ return i2c_add_driver(&tca6416_keypad_driver);
+}
+
+subsys_initcall(tca6416_keypad_init);
+
+static void __exit tca6416_keypad_exit(void)
+{
+ i2c_del_driver(&tca6416_keypad_driver);
+}
+module_exit(tca6416_keypad_exit);
+
+MODULE_AUTHOR("Sriramakrishnan <srk@ti.com>");
+MODULE_DESCRIPTION("Keypad driver over tca6146 IO expander");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
deleted file mode 100644
index 3910f269cfc8..000000000000
--- a/drivers/input/keyboard/tosakbd.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Keyboard driver for Sharp Tosa models (SL-6000x)
- *
- * Copyright (c) 2005 Dirk Opfer
- * Copyright (c) 2007 Dmitry Baryshkov
- *
- * Based on xtkbd.c/locomkbd.c/corgikbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include <mach/gpio.h>
-#include <mach/tosa.h>
-
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r, c) (((r)<<4) + (c) + 1)
-#define NR_SCANCODES SCANCODE(TOSA_KEY_SENSE_NUM - 1, TOSA_KEY_STROBE_NUM - 1) + 1
-
-#define SCAN_INTERVAL (HZ/10)
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-static unsigned short tosakbd_keycode[NR_SCANCODES] = {
-0,
-0, KEY_W, 0, 0, 0, KEY_K, KEY_BACKSPACE, KEY_P,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_Q, KEY_E, KEY_T, KEY_Y, 0, KEY_O, KEY_I, KEY_COMMA,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_A, KEY_D, KEY_G, KEY_U, 0, KEY_L, KEY_ENTER, KEY_DOT,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_Z, KEY_C, KEY_V, KEY_J, TOSA_KEY_ADDRESSBOOK, TOSA_KEY_CANCEL, TOSA_KEY_CENTER, TOSA_KEY_OK,
-KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, 0,
-KEY_S, KEY_R, KEY_B, KEY_N, TOSA_KEY_CALENDAR, TOSA_KEY_HOMEPAGE, KEY_LEFTCTRL, TOSA_KEY_LIGHT,
-0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0,
-KEY_TAB, KEY_SLASH, KEY_H, KEY_M, TOSA_KEY_MENU, 0, KEY_UP, 0,
-0, 0, TOSA_KEY_FN, 0, 0, 0, 0, 0,
-KEY_X, KEY_F, KEY_SPACE, KEY_APOSTROPHE, TOSA_KEY_MAIL, KEY_LEFT, KEY_DOWN, KEY_RIGHT,
-0, 0, 0,
-};
-
-struct tosakbd {
- unsigned short keycode[ARRAY_SIZE(tosakbd_keycode)];
- struct input_dev *input;
- bool suspended;
- spinlock_t lock; /* protect kbd scanning */
- struct timer_list timer;
-};
-
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 12 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-#define GET_ROWS_STATUS(c) ((GPLR2 & TOSA_GPIO_ALL_SENSE_BIT) >> TOSA_GPIO_ALL_SENSE_RSHIFT)
-
-static inline void tosakbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR1 = TOSA_GPIO_HIGH_STROBE_BIT;
- GPDR1 &= ~TOSA_GPIO_HIGH_STROBE_BIT;
- GPCR2 = TOSA_GPIO_LOW_STROBE_BIT;
- GPDR2 &= ~TOSA_GPIO_LOW_STROBE_BIT;
-}
-
-static inline void tosakbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR1 = TOSA_GPIO_HIGH_STROBE_BIT;
- GPDR1 |= TOSA_GPIO_HIGH_STROBE_BIT;
- GPSR2 = TOSA_GPIO_LOW_STROBE_BIT;
- GPDR2 |= TOSA_GPIO_LOW_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* STATE CLEAR */
- GEDR2 |= TOSA_GPIO_ALL_SENSE_BIT;
-}
-
-static inline void tosakbd_activate_col(int col)
-{
- if (col <= 5) {
- /* STROBE col -> High, not col -> HiZ */
- GPSR1 = TOSA_GPIO_STROBE_BIT(col);
- GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- } else {
- /* STROBE col -> High, not col -> HiZ */
- GPSR2 = TOSA_GPIO_STROBE_BIT(col);
- GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- }
-}
-
-static inline void tosakbd_reset_col(int col)
-{
- if (col <= 5) {
- /* STROBE col -> Low */
- GPCR1 = TOSA_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- } else {
- /* STROBE col -> Low */
- GPCR2 = TOSA_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- }
-}
-/*
- * The tosa keyboard only generates interrupts when a key is pressed.
- * So when a key is pressed, we enable a timer. This timer scans the
- * keyboard, and this is how we detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void tosakbd_scankeyboard(struct platform_device *dev)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed = 0;
-
- spin_lock_irqsave(&tosakbd->lock, flags);
-
- if (tosakbd->suspended)
- goto out;
-
- for (col = 0; col < TOSA_KEY_STROBE_NUM; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
- tosakbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- tosakbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = GET_ROWS_STATUS(col);
-
- for (row = 0; row < TOSA_KEY_SENSE_NUM; row++) {
- unsigned int scancode, pressed;
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- if (pressed && !tosakbd->keycode[scancode])
- dev_warn(&dev->dev,
- "unhandled scancode: 0x%02x\n",
- scancode);
-
- input_report_key(tosakbd->input,
- tosakbd->keycode[scancode],
- pressed);
- if (pressed)
- num_pressed++;
- }
-
- tosakbd_reset_col(col);
- }
-
- tosakbd_activate_all();
-
- input_sync(tosakbd->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&tosakbd->timer, jiffies + SCAN_INTERVAL);
-
- out:
- spin_unlock_irqrestore(&tosakbd->lock, flags);
-}
-
-/*
- * tosa keyboard interrupt handler.
- */
-static irqreturn_t tosakbd_interrupt(int irq, void *__dev)
-{
- struct platform_device *dev = __dev;
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- if (!timer_pending(&tosakbd->timer)) {
- /** wait chattering delay **/
- udelay(20);
- tosakbd_scankeyboard(dev);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * tosa timer checking for released keys
- */
-static void tosakbd_timer_callback(unsigned long __dev)
-{
- struct platform_device *dev = (struct platform_device *)__dev;
-
- tosakbd_scankeyboard(dev);
-}
-
-#ifdef CONFIG_PM
-static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&tosakbd->lock, flags);
- tosakbd->suspended = true;
- spin_unlock_irqrestore(&tosakbd->lock, flags);
-
- del_timer_sync(&tosakbd->timer);
-
- return 0;
-}
-
-static int tosakbd_resume(struct platform_device *dev)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- tosakbd->suspended = false;
- tosakbd_scankeyboard(dev);
-
- return 0;
-}
-#else
-#define tosakbd_suspend NULL
-#define tosakbd_resume NULL
-#endif
-
-static int __devinit tosakbd_probe(struct platform_device *pdev) {
-
- int i;
- struct tosakbd *tosakbd;
- struct input_dev *input_dev;
- int error;
-
- tosakbd = kzalloc(sizeof(struct tosakbd), GFP_KERNEL);
- if (!tosakbd)
- return -ENOMEM;
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- kfree(tosakbd);
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, tosakbd);
-
- spin_lock_init(&tosakbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&tosakbd->timer);
- tosakbd->timer.function = tosakbd_timer_callback;
- tosakbd->timer.data = (unsigned long) pdev;
-
- tosakbd->input = input_dev;
-
- input_set_drvdata(input_dev, tosakbd);
- input_dev->name = "Tosa Keyboard";
- input_dev->phys = "tosakbd/input0";
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
-
- input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
- input_dev->keycode = tosakbd->keycode;
- input_dev->keycodesize = sizeof(tosakbd->keycode[0]);
- input_dev->keycodemax = ARRAY_SIZE(tosakbd_keycode);
-
- memcpy(tosakbd->keycode, tosakbd_keycode, sizeof(tosakbd_keycode));
-
- for (i = 0; i < ARRAY_SIZE(tosakbd_keycode); i++)
- __set_bit(tosakbd->keycode[i], input_dev->keybit);
- __clear_bit(KEY_RESERVED, input_dev->keybit);
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
- int gpio = TOSA_GPIO_KEY_SENSE(i);
- int irq;
- error = gpio_request(gpio, "tosakbd");
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
- " error %d\n", gpio, error);
- goto fail;
- }
-
- error = gpio_direction_input(TOSA_GPIO_KEY_SENSE(i));
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to configure input"
- " direction for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail;
- }
-
- irq = gpio_to_irq(gpio);
- if (irq < 0) {
- error = irq;
- printk(KERN_ERR "gpio-keys: Unable to get irq number"
- " for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail;
- }
-
- error = request_irq(irq, tosakbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "tosakbd", pdev);
-
- if (error) {
- printk("tosakbd: Can't get IRQ: %d: error %d!\n",
- irq, error);
- gpio_free(gpio);
- goto fail;
- }
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < TOSA_KEY_STROBE_NUM; i++) {
- int gpio = TOSA_GPIO_KEY_STROBE(i);
- error = gpio_request(gpio, "tosakbd");
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
- " error %d\n", gpio, error);
- goto fail2;
- }
-
- error = gpio_direction_output(gpio, 1);
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to configure input"
- " direction for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail2;
- }
-
- }
-
- error = input_register_device(input_dev);
- if (error) {
- printk(KERN_ERR "tosakbd: Unable to register input device, "
- "error: %d\n", error);
- goto fail2;
- }
-
- printk(KERN_INFO "input: Tosa Keyboard Registered\n");
-
- return 0;
-
-fail2:
- while (--i >= 0)
- gpio_free(TOSA_GPIO_KEY_STROBE(i));
-
- i = TOSA_KEY_SENSE_NUM;
-fail:
- while (--i >= 0) {
- free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), pdev);
- gpio_free(TOSA_GPIO_KEY_SENSE(i));
- }
-
- platform_set_drvdata(pdev, NULL);
- input_free_device(input_dev);
- kfree(tosakbd);
-
- return error;
-}
-
-static int __devexit tosakbd_remove(struct platform_device *dev)
-{
- int i;
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- for (i = 0; i < TOSA_KEY_STROBE_NUM; i++)
- gpio_free(TOSA_GPIO_KEY_STROBE(i));
-
- for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
- free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), dev);
- gpio_free(TOSA_GPIO_KEY_SENSE(i));
- }
-
- del_timer_sync(&tosakbd->timer);
-
- input_unregister_device(tosakbd->input);
-
- kfree(tosakbd);
-
- return 0;
-}
-
-static struct platform_driver tosakbd_driver = {
- .probe = tosakbd_probe,
- .remove = __devexit_p(tosakbd_remove),
- .suspend = tosakbd_suspend,
- .resume = tosakbd_resume,
- .driver = {
- .name = "tosa-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __devinit tosakbd_init(void)
-{
- return platform_driver_register(&tosakbd_driver);
-}
-
-static void __exit tosakbd_exit(void)
-{
- platform_driver_unregister(&tosakbd_driver);
-}
-
-module_init(tosakbd_init);
-module_exit(tosakbd_exit);
-
-MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
-MODULE_DESCRIPTION("Tosa Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tosa-keyboard");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 23140a3bb8e0..48cdabec372a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -22,6 +22,36 @@ config INPUT_88PM860X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm860x_onkey.
+config INPUT_AD714X
+ tristate "Analog Devices AD714x Capacitance Touch Sensor"
+ help
+ Say Y here if you want to support an AD7142/3/7/8/7A touch sensor.
+
+ You should select a bus connection too.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x.
+
+config INPUT_AD714X_I2C
+ tristate "support I2C bus connection"
+ depends on INPUT_AD714X && I2C
+ default y
+ help
+ Say Y here if you have AD7142/AD7147 hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x-i2c.
+
+config INPUT_AD714X_SPI
+ tristate "support SPI bus connection"
+ depends on INPUT_AD714X && SPI
+ default y
+ help
+ Say Y here if you have AD7142/AD7147 hooked to a SPI bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x-spi.
+
config INPUT_PCSPKR
tristate "PC Speaker support"
depends on PCSPKR_PLATFORM
@@ -277,6 +307,16 @@ config INPUT_PCF50633_PMU
Say Y to include support for delivering PMU events via input
layer on NXP PCF50633.
+config INPUT_PCF8574
+ tristate "PCF8574 Keypad input device"
+ depends on I2C && EXPERIMENTAL
+ help
+ Say Y here if you want to support a keypad connetced via I2C
+ with a PCF8574.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pcf8574_keypad.
+
config INPUT_GPIO_ROTARY_ENCODER
tristate "Rotary encoders connected to GPIO pins"
depends on GPIOLIB && GENERIC_GPIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 7e95a5d474dc..f9f577031e06 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -5,6 +5,9 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
+obj-$(CONFIG_INPUT_AD714X) += ad714x.o
+obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
+obj-$(CONFIG_INPUT_AD714X_SPI) += ad714x-spi.o
obj-$(CONFIG_INPUT_APANEL) += apanel.o
obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
@@ -19,6 +22,7 @@ obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
+obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
new file mode 100644
index 000000000000..e9adbe49f6a4
--- /dev/null
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -0,0 +1,140 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (I2C bus)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/input.h> /* BUS_I2C */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include "ad714x.h"
+
+#ifdef CONFIG_PM
+static int ad714x_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+ return ad714x_disable(i2c_get_clientdata(client));
+}
+
+static int ad714x_i2c_resume(struct i2c_client *client)
+{
+ return ad714x_enable(i2c_get_clientdata(client));
+}
+#else
+# define ad714x_i2c_suspend NULL
+# define ad714x_i2c_resume NULL
+#endif
+
+static int ad714x_i2c_write(struct device *dev, unsigned short reg,
+ unsigned short data)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
+ u8 *_reg = (u8 *)&reg;
+ u8 *_data = (u8 *)&data;
+
+ u8 tx[4] = {
+ _reg[1],
+ _reg[0],
+ _data[1],
+ _data[0]
+ };
+
+ ret = i2c_master_send(client, tx, 4);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+static int ad714x_i2c_read(struct device *dev, unsigned short reg,
+ unsigned short *data)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
+ u8 *_reg = (u8 *)&reg;
+ u8 *_data = (u8 *)data;
+
+ u8 tx[2] = {
+ _reg[1],
+ _reg[0]
+ };
+ u8 rx[2];
+
+ ret = i2c_master_send(client, tx, 2);
+ if (ret >= 0)
+ ret = i2c_master_recv(client, rx, 2);
+
+ if (unlikely(ret < 0)) {
+ dev_err(&client->dev, "I2C read error\n");
+ } else {
+ _data[0] = rx[1];
+ _data[1] = rx[0];
+ }
+
+ return ret;
+}
+
+static int __devinit ad714x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad714x_chip *chip;
+
+ chip = ad714x_probe(&client->dev, BUS_I2C, client->irq,
+ ad714x_i2c_read, ad714x_i2c_write);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ i2c_set_clientdata(client, chip);
+
+ return 0;
+}
+
+static int __devexit ad714x_i2c_remove(struct i2c_client *client)
+{
+ struct ad714x_chip *chip = i2c_get_clientdata(client);
+
+ ad714x_remove(chip);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad714x_id[] = {
+ { "ad7142_captouch", 0 },
+ { "ad7143_captouch", 0 },
+ { "ad7147_captouch", 0 },
+ { "ad7147a_captouch", 0 },
+ { "ad7148_captouch", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad714x_id);
+
+static struct i2c_driver ad714x_i2c_driver = {
+ .driver = {
+ .name = "ad714x_captouch",
+ },
+ .probe = ad714x_i2c_probe,
+ .remove = __devexit_p(ad714x_i2c_remove),
+ .suspend = ad714x_i2c_suspend,
+ .resume = ad714x_i2c_resume,
+ .id_table = ad714x_id,
+};
+
+static __init int ad714x_i2c_init(void)
+{
+ return i2c_add_driver(&ad714x_i2c_driver);
+}
+module_init(ad714x_i2c_init);
+
+static __exit void ad714x_i2c_exit(void)
+{
+ i2c_del_driver(&ad714x_i2c_driver);
+}
+module_exit(ad714x_i2c_exit);
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor I2C Bus Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
new file mode 100644
index 000000000000..7f8dedfd1bfe
--- /dev/null
+++ b/drivers/input/misc/ad714x-spi.c
@@ -0,0 +1,103 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (SPI bus)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/input.h> /* BUS_I2C */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include "ad714x.h"
+
+#define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */
+#define AD714x_SPI_READ BIT(10)
+
+#ifdef CONFIG_PM
+static int ad714x_spi_suspend(struct spi_device *spi, pm_message_t message)
+{
+ return ad714x_disable(spi_get_drvdata(spi));
+}
+
+static int ad714x_spi_resume(struct spi_device *spi)
+{
+ return ad714x_enable(spi_get_drvdata(spi));
+}
+#else
+# define ad714x_spi_suspend NULL
+# define ad714x_spi_resume NULL
+#endif
+
+static int ad714x_spi_read(struct device *dev, unsigned short reg,
+ unsigned short *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg;
+
+ return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2);
+}
+
+static int ad714x_spi_write(struct device *dev, unsigned short reg,
+ unsigned short data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned short tx[2] = {
+ AD714x_SPI_CMD_PREFIX | reg,
+ data
+ };
+
+ return spi_write(spi, (u8 *)tx, 4);
+}
+
+static int __devinit ad714x_spi_probe(struct spi_device *spi)
+{
+ struct ad714x_chip *chip;
+
+ chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,
+ ad714x_spi_read, ad714x_spi_write);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ spi_set_drvdata(spi, chip);
+
+ return 0;
+}
+
+static int __devexit ad714x_spi_remove(struct spi_device *spi)
+{
+ struct ad714x_chip *chip = spi_get_drvdata(spi);
+
+ ad714x_remove(chip);
+ spi_set_drvdata(spi, NULL);
+
+ return 0;
+}
+
+static struct spi_driver ad714x_spi_driver = {
+ .driver = {
+ .name = "ad714x_captouch",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad714x_spi_probe,
+ .remove = __devexit_p(ad714x_spi_remove),
+ .suspend = ad714x_spi_suspend,
+ .resume = ad714x_spi_resume,
+};
+
+static __init int ad714x_spi_init(void)
+{
+ return spi_register_driver(&ad714x_spi_driver);
+}
+module_init(ad714x_spi_init);
+
+static __exit void ad714x_spi_exit(void)
+{
+ spi_unregister_driver(&ad714x_spi_driver);
+}
+module_exit(ad714x_spi_exit);
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor SPI Bus Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
new file mode 100644
index 000000000000..0fe27baf5e72
--- /dev/null
+++ b/drivers/input/misc/ad714x.c
@@ -0,0 +1,1347 @@
+/*
+ * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input/ad714x.h>
+#include "ad714x.h"
+
+#define AD714X_PWR_CTRL 0x0
+#define AD714X_STG_CAL_EN_REG 0x1
+#define AD714X_AMB_COMP_CTRL0_REG 0x2
+#define AD714X_PARTID_REG 0x17
+#define AD7142_PARTID 0xE620
+#define AD7143_PARTID 0xE630
+#define AD7147_PARTID 0x1470
+#define AD7148_PARTID 0x1480
+#define AD714X_STAGECFG_REG 0x80
+#define AD714X_SYSCFG_REG 0x0
+
+#define STG_LOW_INT_EN_REG 0x5
+#define STG_HIGH_INT_EN_REG 0x6
+#define STG_COM_INT_EN_REG 0x7
+#define STG_LOW_INT_STA_REG 0x8
+#define STG_HIGH_INT_STA_REG 0x9
+#define STG_COM_INT_STA_REG 0xA
+
+#define CDC_RESULT_S0 0xB
+#define CDC_RESULT_S1 0xC
+#define CDC_RESULT_S2 0xD
+#define CDC_RESULT_S3 0xE
+#define CDC_RESULT_S4 0xF
+#define CDC_RESULT_S5 0x10
+#define CDC_RESULT_S6 0x11
+#define CDC_RESULT_S7 0x12
+#define CDC_RESULT_S8 0x13
+#define CDC_RESULT_S9 0x14
+#define CDC_RESULT_S10 0x15
+#define CDC_RESULT_S11 0x16
+
+#define STAGE0_AMBIENT 0xF1
+#define STAGE1_AMBIENT 0x115
+#define STAGE2_AMBIENT 0x139
+#define STAGE3_AMBIENT 0x15D
+#define STAGE4_AMBIENT 0x181
+#define STAGE5_AMBIENT 0x1A5
+#define STAGE6_AMBIENT 0x1C9
+#define STAGE7_AMBIENT 0x1ED
+#define STAGE8_AMBIENT 0x211
+#define STAGE9_AMBIENT 0x234
+#define STAGE10_AMBIENT 0x259
+#define STAGE11_AMBIENT 0x27D
+
+#define PER_STAGE_REG_NUM 36
+#define STAGE_NUM 12
+#define STAGE_CFGREG_NUM 8
+#define SYS_CFGREG_NUM 8
+
+/*
+ * driver information which will be used to maintain the software flow
+ */
+enum ad714x_device_state { IDLE, JITTER, ACTIVE, SPACE };
+
+struct ad714x_slider_drv {
+ int highest_stage;
+ int abs_pos;
+ int flt_pos;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_wheel_drv {
+ int abs_pos;
+ int flt_pos;
+ int pre_mean_value;
+ int pre_highest_stage;
+ int pre_mean_value_no_offset;
+ int mean_value;
+ int mean_value_no_offset;
+ int pos_offset;
+ int pos_ratio;
+ int highest_stage;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_touchpad_drv {
+ int x_highest_stage;
+ int x_flt_pos;
+ int x_abs_pos;
+ int y_highest_stage;
+ int y_flt_pos;
+ int y_abs_pos;
+ int left_ep;
+ int left_ep_val;
+ int right_ep;
+ int right_ep_val;
+ int top_ep;
+ int top_ep_val;
+ int bottom_ep;
+ int bottom_ep_val;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_button_drv {
+ enum ad714x_device_state state;
+ /*
+ * Unlike slider/wheel/touchpad, all buttons point to
+ * same input_dev instance
+ */
+ struct input_dev *input;
+};
+
+struct ad714x_driver_data {
+ struct ad714x_slider_drv *slider;
+ struct ad714x_wheel_drv *wheel;
+ struct ad714x_touchpad_drv *touchpad;
+ struct ad714x_button_drv *button;
+};
+
+/*
+ * information to integrate all things which will be private data
+ * of spi/i2c device
+ */
+struct ad714x_chip {
+ unsigned short h_state;
+ unsigned short l_state;
+ unsigned short c_state;
+ unsigned short adc_reg[STAGE_NUM];
+ unsigned short amb_reg[STAGE_NUM];
+ unsigned short sensor_val[STAGE_NUM];
+
+ struct ad714x_platform_data *hw;
+ struct ad714x_driver_data *sw;
+
+ int irq;
+ struct device *dev;
+ ad714x_read_t read;
+ ad714x_write_t write;
+
+ struct mutex mutex;
+
+ unsigned product;
+ unsigned version;
+};
+
+static void ad714x_use_com_int(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ unsigned short data;
+ unsigned short mask;
+
+ mask = ((1 << (end_stage + 1)) - 1) - (1 << start_stage);
+
+ ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
+ data |= 1 << start_stage;
+ ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
+
+ ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
+ data &= ~mask;
+ ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data);
+}
+
+static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ unsigned short data;
+ unsigned short mask;
+
+ mask = ((1 << (end_stage + 1)) - 1) - (1 << start_stage);
+
+ ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
+ data &= ~(1 << start_stage);
+ ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
+
+ ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
+ data |= mask;
+ ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data);
+}
+
+static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ int max_res = 0;
+ int max_idx = 0;
+ int i;
+
+ for (i = start_stage; i <= end_stage; i++) {
+ if (ad714x->sensor_val[i] > max_res) {
+ max_res = ad714x->sensor_val[i];
+ max_idx = i;
+ }
+ }
+
+ return max_idx;
+}
+
+static int ad714x_cal_abs_pos(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage,
+ int highest_stage, int max_coord)
+{
+ int a_param, b_param;
+
+ if (highest_stage == start_stage) {
+ a_param = ad714x->sensor_val[start_stage + 1];
+ b_param = ad714x->sensor_val[start_stage] +
+ ad714x->sensor_val[start_stage + 1];
+ } else if (highest_stage == end_stage) {
+ a_param = ad714x->sensor_val[end_stage] *
+ (end_stage - start_stage) +
+ ad714x->sensor_val[end_stage - 1] *
+ (end_stage - start_stage - 1);
+ b_param = ad714x->sensor_val[end_stage] +
+ ad714x->sensor_val[end_stage - 1];
+ } else {
+ a_param = ad714x->sensor_val[highest_stage] *
+ (highest_stage - start_stage) +
+ ad714x->sensor_val[highest_stage - 1] *
+ (highest_stage - start_stage - 1) +
+ ad714x->sensor_val[highest_stage + 1] *
+ (highest_stage - start_stage + 1);
+ b_param = ad714x->sensor_val[highest_stage] +
+ ad714x->sensor_val[highest_stage - 1] +
+ ad714x->sensor_val[highest_stage + 1];
+ }
+
+ return (max_coord / (end_stage - start_stage)) * a_param / b_param;
+}
+
+/*
+ * One button can connect to multi positive and negative of CDCs
+ * Multi-buttons can connect to same positive/negative of one CDC
+ */
+static void ad714x_button_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_button_plat *hw = &ad714x->hw->button[idx];
+ struct ad714x_button_drv *sw = &ad714x->sw->button[idx];
+
+ switch (sw->state) {
+ case IDLE:
+ if (((ad714x->h_state & hw->h_mask) == hw->h_mask) &&
+ ((ad714x->l_state & hw->l_mask) == hw->l_mask)) {
+ dev_dbg(ad714x->dev, "button %d touched\n", idx);
+ input_report_key(sw->input, hw->keycode, 1);
+ input_sync(sw->input);
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (((ad714x->h_state & hw->h_mask) != hw->h_mask) ||
+ ((ad714x->l_state & hw->l_mask) != hw->l_mask)) {
+ dev_dbg(ad714x->dev, "button %d released\n", idx);
+ input_report_key(sw->input, hw->keycode, 0);
+ input_sync(sw->input);
+ sw->state = IDLE;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * The response of a sensor is defined by the absolute number of codes
+ * between the current CDC value and the ambient value.
+ */
+static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ int i;
+
+ for (i = hw->start_stage; i <= hw->end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+
+ ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] -
+ ad714x->amb_reg[i]);
+ }
+}
+
+static void ad714x_slider_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->highest_stage = ad714x_cal_highest_stage(ad714x, hw->start_stage,
+ hw->end_stage);
+
+ dev_dbg(ad714x->dev, "slider %d highest_stage:%d\n", idx,
+ sw->highest_stage);
+}
+
+/*
+ * The formulae are very straight forward. It uses the sensor with the
+ * highest response and the 2 adjacent ones.
+ * When Sensor 0 has the highest response, only sensor 0 and sensor 1
+ * are used in the calculations. Similarly when the last sensor has the
+ * highest response, only the last sensor and the second last sensors
+ * are used in the calculations.
+ *
+ * For i= idx_of_peak_Sensor-1 to i= idx_of_peak_Sensor+1
+ * v += Sensor response(i)*i
+ * w += Sensor response(i)
+ * POS=(Number_of_Positions_Wanted/(Number_of_Sensors_Used-1)) *(v/w)
+ */
+static void ad714x_slider_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->abs_pos = ad714x_cal_abs_pos(ad714x, hw->start_stage, hw->end_stage,
+ sw->highest_stage, hw->max_coord);
+
+ dev_dbg(ad714x->dev, "slider %d absolute position:%d\n", idx,
+ sw->abs_pos);
+}
+
+/*
+ * To minimise the Impact of the noise on the algorithm, ADI developed a
+ * routine that filters the CDC results after they have been read by the
+ * host processor.
+ * The filter used is an Infinite Input Response(IIR) filter implemented
+ * in firmware and attenuates the noise on the CDC results after they've
+ * been read by the host processor.
+ * Filtered_CDC_result = (Filtered_CDC_result * (10 - Coefficient) +
+ * Latest_CDC_result * Coefficient)/10
+ */
+static void ad714x_slider_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->flt_pos = (sw->flt_pos * (10 - 4) +
+ sw->abs_pos * 4)/10;
+
+ dev_dbg(ad714x->dev, "slider %d filter position:%d\n", idx,
+ sw->flt_pos);
+}
+
+static void ad714x_slider_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+
+ ad714x_use_com_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_slider_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+
+ ad714x_use_thr_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_slider_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = ((1 << (hw->end_stage + 1)) - 1) - ((1 << hw->start_stage) - 1);
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ ad714x_slider_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "slider %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ ad714x_slider_cal_sensor_val(ad714x, idx);
+ ad714x_slider_cal_highest_stage(ad714x, idx);
+ ad714x_slider_cal_abs_pos(ad714x, idx);
+ sw->flt_pos = sw->abs_pos;
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ ad714x_slider_cal_sensor_val(ad714x, idx);
+ ad714x_slider_cal_highest_stage(ad714x, idx);
+ ad714x_slider_cal_abs_pos(ad714x, idx);
+ ad714x_slider_cal_flt_pos(ad714x, idx);
+
+ input_report_abs(sw->input, ABS_X, sw->flt_pos);
+ input_report_key(sw->input, BTN_TOUCH, 1);
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ ad714x_slider_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+ dev_dbg(ad714x->dev, "slider %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * When the scroll wheel is activated, we compute the absolute position based
+ * on the sensor values. To calculate the position, we first determine the
+ * sensor that has the greatest response among the 8 sensors that constitutes
+ * the scrollwheel. Then we determined the 2 sensors on either sides of the
+ * sensor with the highest response and we apply weights to these sensors.
+ */
+static void ad714x_wheel_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+
+ sw->pre_highest_stage = sw->highest_stage;
+ sw->highest_stage = ad714x_cal_highest_stage(ad714x, hw->start_stage,
+ hw->end_stage);
+
+ dev_dbg(ad714x->dev, "wheel %d highest_stage:%d\n", idx,
+ sw->highest_stage);
+}
+
+static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ int i;
+
+ for (i = hw->start_stage; i <= hw->end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+ if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
+ ad714x->sensor_val[i] = ad714x->adc_reg[i] -
+ ad714x->amb_reg[i];
+ else
+ ad714x->sensor_val[i] = 0;
+ }
+}
+
+/*
+ * When the scroll wheel is activated, we compute the absolute position based
+ * on the sensor values. To calculate the position, we first determine the
+ * sensor that has the greatest response among the 8 sensors that constitutes
+ * the scrollwheel. Then we determined the 2 sensors on either sides of the
+ * sensor with the highest response and we apply weights to these sensors. The
+ * result of this computation gives us the mean value which defined by the
+ * following formula:
+ * For i= second_before_highest_stage to i= second_after_highest_stage
+ * v += Sensor response(i)*WEIGHT*(i+3)
+ * w += Sensor response(i)
+ * Mean_Value=v/w
+ * pos_on_scrollwheel = (Mean_Value - position_offset) / position_ratio
+ */
+
+#define WEIGHT_FACTOR 30
+/* This constant prevents the "PositionOffset" from reaching a big value */
+#define OFFSET_POSITION_CLAMP 120
+static void ad714x_wheel_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ int stage_num = hw->end_stage - hw->start_stage + 1;
+ int second_before, first_before, highest, first_after, second_after;
+ int a_param, b_param;
+
+ /* Calculate Mean value */
+
+ second_before = (sw->highest_stage + stage_num - 2) % stage_num;
+ first_before = (sw->highest_stage + stage_num - 1) % stage_num;
+ highest = sw->highest_stage;
+ first_after = (sw->highest_stage + stage_num + 1) % stage_num;
+ second_after = (sw->highest_stage + stage_num + 2) % stage_num;
+
+ if (((sw->highest_stage - hw->start_stage) > 1) &&
+ ((hw->end_stage - sw->highest_stage) > 1)) {
+ a_param = ad714x->sensor_val[second_before] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_before] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[highest] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_after] *
+ (first_after - hw->start_stage + 3) +
+ ad714x->sensor_val[second_after] *
+ (second_after - hw->start_stage + 3);
+ } else {
+ a_param = ad714x->sensor_val[second_before] *
+ (second_before - hw->start_stage + 1) +
+ ad714x->sensor_val[first_before] *
+ (second_before - hw->start_stage + 2) +
+ ad714x->sensor_val[highest] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_after] *
+ (first_after - hw->start_stage + 4) +
+ ad714x->sensor_val[second_after] *
+ (second_after - hw->start_stage + 5);
+ }
+ a_param *= WEIGHT_FACTOR;
+
+ b_param = ad714x->sensor_val[second_before] +
+ ad714x->sensor_val[first_before] +
+ ad714x->sensor_val[highest] +
+ ad714x->sensor_val[first_after] +
+ ad714x->sensor_val[second_after];
+
+ sw->pre_mean_value = sw->mean_value;
+ sw->mean_value = a_param / b_param;
+
+ /* Calculate the offset */
+
+ if ((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage))
+ sw->pos_offset = sw->mean_value;
+ else if ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage))
+ sw->pos_offset = sw->pre_mean_value;
+
+ if (sw->pos_offset > OFFSET_POSITION_CLAMP)
+ sw->pos_offset = OFFSET_POSITION_CLAMP;
+
+ /* Calculate the mean value without the offset */
+
+ sw->pre_mean_value_no_offset = sw->mean_value_no_offset;
+ sw->mean_value_no_offset = sw->mean_value - sw->pos_offset;
+ if (sw->mean_value_no_offset < 0)
+ sw->mean_value_no_offset = 0;
+
+ /* Calculate ratio to scale down to NUMBER_OF_WANTED_POSITIONS */
+
+ if ((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage))
+ sw->pos_ratio = (sw->pre_mean_value_no_offset * 100) /
+ hw->max_coord;
+ else if ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage))
+ sw->pos_ratio = (sw->mean_value_no_offset * 100) /
+ hw->max_coord;
+ sw->abs_pos = (sw->mean_value_no_offset * 100) / sw->pos_ratio;
+ if (sw->abs_pos > hw->max_coord)
+ sw->abs_pos = hw->max_coord;
+}
+
+static void ad714x_wheel_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ if (((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage)) ||
+ ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage)))
+ sw->flt_pos = sw->abs_pos;
+ else
+ sw->flt_pos = ((sw->flt_pos * 30) + (sw->abs_pos * 71)) / 100;
+
+ if (sw->flt_pos > hw->max_coord)
+ sw->flt_pos = hw->max_coord;
+}
+
+static void ad714x_wheel_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+
+ ad714x_use_com_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_wheel_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+
+ ad714x_use_thr_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_wheel_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = ((1 << (hw->end_stage + 1)) - 1) - ((1 << hw->start_stage) - 1);
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ ad714x_wheel_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "wheel %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ ad714x_wheel_cal_sensor_val(ad714x, idx);
+ ad714x_wheel_cal_highest_stage(ad714x, idx);
+ ad714x_wheel_cal_abs_pos(ad714x, idx);
+ sw->flt_pos = sw->abs_pos;
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ ad714x_wheel_cal_sensor_val(ad714x, idx);
+ ad714x_wheel_cal_highest_stage(ad714x, idx);
+ ad714x_wheel_cal_abs_pos(ad714x, idx);
+ ad714x_wheel_cal_flt_pos(ad714x, idx);
+
+ input_report_abs(sw->input, ABS_WHEEL,
+ sw->abs_pos);
+ input_report_key(sw->input, BTN_TOUCH, 1);
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ ad714x_wheel_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+
+ dev_dbg(ad714x->dev, "wheel %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ int i;
+
+ for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+ if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
+ ad714x->sensor_val[i] = ad714x->adc_reg[i] -
+ ad714x->amb_reg[i];
+ else
+ ad714x->sensor_val[i] = 0;
+ }
+}
+
+static void touchpad_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_highest_stage = ad714x_cal_highest_stage(ad714x,
+ hw->x_start_stage, hw->x_end_stage);
+ sw->y_highest_stage = ad714x_cal_highest_stage(ad714x,
+ hw->y_start_stage, hw->y_end_stage);
+
+ dev_dbg(ad714x->dev,
+ "touchpad %d x_highest_stage:%d, y_highest_stage:%d\n",
+ idx, sw->x_highest_stage, sw->y_highest_stage);
+}
+
+/*
+ * If 2 fingers are touching the sensor then 2 peaks can be observed in the
+ * distribution.
+ * The arithmetic doesn't support to get absolute coordinates for multi-touch
+ * yet.
+ */
+static int touchpad_check_second_peak(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ int i;
+
+ for (i = hw->x_start_stage; i < sw->x_highest_stage; i++) {
+ if ((ad714x->sensor_val[i] - ad714x->sensor_val[i + 1])
+ > (ad714x->sensor_val[i + 1] / 10))
+ return 1;
+ }
+
+ for (i = sw->x_highest_stage; i < hw->x_end_stage; i++) {
+ if ((ad714x->sensor_val[i + 1] - ad714x->sensor_val[i])
+ > (ad714x->sensor_val[i] / 10))
+ return 1;
+ }
+
+ for (i = hw->y_start_stage; i < sw->y_highest_stage; i++) {
+ if ((ad714x->sensor_val[i] - ad714x->sensor_val[i + 1])
+ > (ad714x->sensor_val[i + 1] / 10))
+ return 1;
+ }
+
+ for (i = sw->y_highest_stage; i < hw->y_end_stage; i++) {
+ if ((ad714x->sensor_val[i + 1] - ad714x->sensor_val[i])
+ > (ad714x->sensor_val[i] / 10))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * If only one finger is used to activate the touch pad then only 1 peak will be
+ * registered in the distribution. This peak and the 2 adjacent sensors will be
+ * used in the calculation of the absolute position. This will prevent hand
+ * shadows to affect the absolute position calculation.
+ */
+static void touchpad_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_abs_pos = ad714x_cal_abs_pos(ad714x, hw->x_start_stage,
+ hw->x_end_stage, sw->x_highest_stage, hw->x_max_coord);
+ sw->y_abs_pos = ad714x_cal_abs_pos(ad714x, hw->y_start_stage,
+ hw->y_end_stage, sw->y_highest_stage, hw->y_max_coord);
+
+ dev_dbg(ad714x->dev, "touchpad %d absolute position:(%d, %d)\n", idx,
+ sw->x_abs_pos, sw->y_abs_pos);
+}
+
+static void touchpad_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_flt_pos = (sw->x_flt_pos * (10 - 4) +
+ sw->x_abs_pos * 4)/10;
+ sw->y_flt_pos = (sw->y_flt_pos * (10 - 4) +
+ sw->y_abs_pos * 4)/10;
+
+ dev_dbg(ad714x->dev, "touchpad %d filter position:(%d, %d)\n",
+ idx, sw->x_flt_pos, sw->y_flt_pos);
+}
+
+/*
+ * To prevent distortion from showing in the absolute position, it is
+ * necessary to detect the end points. When endpoints are detected, the
+ * driver stops updating the status variables with absolute positions.
+ * End points are detected on the 4 edges of the touchpad sensor. The
+ * method to detect them is the same for all 4.
+ * To detect the end points, the firmware computes the difference in
+ * percent between the sensor on the edge and the adjacent one. The
+ * difference is calculated in percent in order to make the end point
+ * detection independent of the pressure.
+ */
+
+#define LEFT_END_POINT_DETECTION_LEVEL 550
+#define RIGHT_END_POINT_DETECTION_LEVEL 750
+#define LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL 850
+#define TOP_END_POINT_DETECTION_LEVEL 550
+#define BOTTOM_END_POINT_DETECTION_LEVEL 950
+#define TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL 700
+static int touchpad_check_endpoint(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ int percent_sensor_diff;
+
+ /* left endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->x_start_stage] -
+ ad714x->sensor_val[hw->x_start_stage + 1]) * 100 /
+ ad714x->sensor_val[hw->x_start_stage + 1];
+ if (!sw->left_ep) {
+ if (percent_sensor_diff >= LEFT_END_POINT_DETECTION_LEVEL) {
+ sw->left_ep = 1;
+ sw->left_ep_val =
+ ad714x->sensor_val[hw->x_start_stage + 1];
+ }
+ } else {
+ if ((percent_sensor_diff < LEFT_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->x_start_stage + 1] >
+ LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL + sw->left_ep_val))
+ sw->left_ep = 0;
+ }
+
+ /* right endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->x_end_stage] -
+ ad714x->sensor_val[hw->x_end_stage - 1]) * 100 /
+ ad714x->sensor_val[hw->x_end_stage - 1];
+ if (!sw->right_ep) {
+ if (percent_sensor_diff >= RIGHT_END_POINT_DETECTION_LEVEL) {
+ sw->right_ep = 1;
+ sw->right_ep_val =
+ ad714x->sensor_val[hw->x_end_stage - 1];
+ }
+ } else {
+ if ((percent_sensor_diff < RIGHT_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->x_end_stage - 1] >
+ LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL + sw->right_ep_val))
+ sw->right_ep = 0;
+ }
+
+ /* top endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->y_start_stage] -
+ ad714x->sensor_val[hw->y_start_stage + 1]) * 100 /
+ ad714x->sensor_val[hw->y_start_stage + 1];
+ if (!sw->top_ep) {
+ if (percent_sensor_diff >= TOP_END_POINT_DETECTION_LEVEL) {
+ sw->top_ep = 1;
+ sw->top_ep_val =
+ ad714x->sensor_val[hw->y_start_stage + 1];
+ }
+ } else {
+ if ((percent_sensor_diff < TOP_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->y_start_stage + 1] >
+ TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL + sw->top_ep_val))
+ sw->top_ep = 0;
+ }
+
+ /* bottom endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->y_end_stage] -
+ ad714x->sensor_val[hw->y_end_stage - 1]) * 100 /
+ ad714x->sensor_val[hw->y_end_stage - 1];
+ if (!sw->bottom_ep) {
+ if (percent_sensor_diff >= BOTTOM_END_POINT_DETECTION_LEVEL) {
+ sw->bottom_ep = 1;
+ sw->bottom_ep_val =
+ ad714x->sensor_val[hw->y_end_stage - 1];
+ }
+ } else {
+ if ((percent_sensor_diff < BOTTOM_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->y_end_stage - 1] >
+ TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL + sw->bottom_ep_val))
+ sw->bottom_ep = 0;
+ }
+
+ return sw->left_ep || sw->right_ep || sw->top_ep || sw->bottom_ep;
+}
+
+static void touchpad_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+
+ ad714x_use_com_int(ad714x, hw->x_start_stage, hw->x_end_stage);
+}
+
+static void touchpad_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+
+ ad714x_use_thr_int(ad714x, hw->x_start_stage, hw->x_end_stage);
+ ad714x_use_thr_int(ad714x, hw->y_start_stage, hw->y_end_stage);
+}
+
+static void ad714x_touchpad_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = (((1 << (hw->x_end_stage + 1)) - 1) -
+ ((1 << hw->x_start_stage) - 1)) +
+ (((1 << (hw->y_end_stage + 1)) - 1) -
+ ((1 << hw->y_start_stage) - 1));
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ touchpad_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "touchpad %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ touchpad_cal_sensor_val(ad714x, idx);
+ touchpad_cal_highest_stage(ad714x, idx);
+ if ((!touchpad_check_second_peak(ad714x, idx)) &&
+ (!touchpad_check_endpoint(ad714x, idx))) {
+ dev_dbg(ad714x->dev,
+ "touchpad%d, 2 fingers or endpoint\n",
+ idx);
+ touchpad_cal_abs_pos(ad714x, idx);
+ sw->x_flt_pos = sw->x_abs_pos;
+ sw->y_flt_pos = sw->y_abs_pos;
+ sw->state = ACTIVE;
+ }
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ touchpad_cal_sensor_val(ad714x, idx);
+ touchpad_cal_highest_stage(ad714x, idx);
+ if ((!touchpad_check_second_peak(ad714x, idx))
+ && (!touchpad_check_endpoint(ad714x, idx))) {
+ touchpad_cal_abs_pos(ad714x, idx);
+ touchpad_cal_flt_pos(ad714x, idx);
+ input_report_abs(sw->input, ABS_X,
+ sw->x_flt_pos);
+ input_report_abs(sw->input, ABS_Y,
+ sw->y_flt_pos);
+ input_report_key(sw->input, BTN_TOUCH,
+ 1);
+ }
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ touchpad_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+ dev_dbg(ad714x->dev, "touchpad %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int ad714x_hw_detect(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data);
+ switch (data & 0xFFF0) {
+ case AD7142_PARTID:
+ ad714x->product = 0x7142;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7142 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7143_PARTID:
+ ad714x->product = 0x7143;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7143 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7147_PARTID:
+ ad714x->product = 0x7147;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7147(A) captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7148_PARTID:
+ ad714x->product = 0x7148;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7148 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ default:
+ dev_err(ad714x->dev,
+ "fail to detect AD714X captouch, read ID is %04x\n",
+ data);
+ return -ENODEV;
+ }
+}
+
+static void ad714x_hw_init(struct ad714x_chip *ad714x)
+{
+ int i, j;
+ unsigned short reg_base;
+ unsigned short data;
+
+ /* configuration CDC and interrupts */
+
+ for (i = 0; i < STAGE_NUM; i++) {
+ reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM;
+ for (j = 0; j < STAGE_CFGREG_NUM; j++)
+ ad714x->write(ad714x->dev, reg_base + j,
+ ad714x->hw->stage_cfg_reg[i][j]);
+ }
+
+ for (i = 0; i < SYS_CFGREG_NUM; i++)
+ ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i,
+ ad714x->hw->sys_cfg_reg[i]);
+ for (i = 0; i < SYS_CFGREG_NUM; i++)
+ ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i,
+ &data);
+
+ ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF);
+
+ /* clear all interrupts */
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
+}
+
+static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
+{
+ struct ad714x_chip *ad714x = data;
+ int i;
+
+ mutex_lock(&ad714x->mutex);
+
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state);
+
+ for (i = 0; i < ad714x->hw->button_num; i++)
+ ad714x_button_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->slider_num; i++)
+ ad714x_slider_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->wheel_num; i++)
+ ad714x_wheel_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->touchpad_num; i++)
+ ad714x_touchpad_state_machine(ad714x, i);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return IRQ_HANDLED;
+}
+
+#define MAX_DEVICE_NUM 8
+struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
+ ad714x_read_t read, ad714x_write_t write)
+{
+ int i, alloc_idx;
+ int error;
+ struct input_dev *input[MAX_DEVICE_NUM];
+
+ struct ad714x_platform_data *plat_data = dev->platform_data;
+ struct ad714x_chip *ad714x;
+ void *drv_mem;
+
+ struct ad714x_button_drv *bt_drv;
+ struct ad714x_slider_drv *sd_drv;
+ struct ad714x_wheel_drv *wl_drv;
+ struct ad714x_touchpad_drv *tp_drv;
+
+
+ if (irq <= 0) {
+ dev_err(dev, "IRQ not configured!\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (dev->platform_data == NULL) {
+ dev_err(dev, "platform data for ad714x doesn't exist\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ ad714x = kzalloc(sizeof(*ad714x) + sizeof(*ad714x->sw) +
+ sizeof(*sd_drv) * plat_data->slider_num +
+ sizeof(*wl_drv) * plat_data->wheel_num +
+ sizeof(*tp_drv) * plat_data->touchpad_num +
+ sizeof(*bt_drv) * plat_data->button_num, GFP_KERNEL);
+ if (!ad714x) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+
+ ad714x->hw = plat_data;
+
+ drv_mem = ad714x + 1;
+ ad714x->sw = drv_mem;
+ drv_mem += sizeof(*ad714x->sw);
+ ad714x->sw->slider = sd_drv = drv_mem;
+ drv_mem += sizeof(*sd_drv) * ad714x->hw->slider_num;
+ ad714x->sw->wheel = wl_drv = drv_mem;
+ drv_mem += sizeof(*wl_drv) * ad714x->hw->wheel_num;
+ ad714x->sw->touchpad = tp_drv = drv_mem;
+ drv_mem += sizeof(*tp_drv) * ad714x->hw->touchpad_num;
+ ad714x->sw->button = bt_drv = drv_mem;
+ drv_mem += sizeof(*bt_drv) * ad714x->hw->button_num;
+
+ ad714x->read = read;
+ ad714x->write = write;
+ ad714x->irq = irq;
+ ad714x->dev = dev;
+
+ error = ad714x_hw_detect(ad714x);
+ if (error)
+ goto err_free_mem;
+
+ /* initilize and request sw/hw resources */
+
+ ad714x_hw_init(ad714x);
+ mutex_init(&ad714x->mutex);
+
+ /*
+ * Allocate and register AD714X input device
+ */
+ alloc_idx = 0;
+
+ /* a slider uses one input_dev instance */
+ if (ad714x->hw->slider_num > 0) {
+ struct ad714x_slider_plat *sd_plat = ad714x->hw->slider;
+
+ for (i = 0; i < ad714x->hw->slider_num; i++) {
+ sd_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(ABS_X, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_X, 0, sd_plat->max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* a wheel uses one input_dev instance */
+ if (ad714x->hw->wheel_num > 0) {
+ struct ad714x_wheel_plat *wl_plat = ad714x->hw->wheel;
+
+ for (i = 0; i < ad714x->hw->wheel_num; i++) {
+ wl_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(ABS_WHEEL, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_WHEEL, 0, wl_plat->max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* a touchpad uses one input_dev instance */
+ if (ad714x->hw->touchpad_num > 0) {
+ struct ad714x_touchpad_plat *tp_plat = ad714x->hw->touchpad;
+
+ for (i = 0; i < ad714x->hw->touchpad_num; i++) {
+ tp_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(ABS_X, input[alloc_idx]->absbit);
+ __set_bit(ABS_Y, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_X, 0, tp_plat->x_max_coord, 0, 0);
+ input_set_abs_params(input[alloc_idx],
+ ABS_Y, 0, tp_plat->y_max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* all buttons use one input node */
+ if (ad714x->hw->button_num > 0) {
+ struct ad714x_button_plat *bt_plat = ad714x->hw->button;
+
+ input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ for (i = 0; i < ad714x->hw->button_num; i++) {
+ bt_drv[i].input = input[alloc_idx];
+ __set_bit(bt_plat[i].keycode, input[alloc_idx]->keybit);
+ }
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+
+ error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
+ IRQF_TRIGGER_FALLING, "ad714x_captouch", ad714x);
+ if (error) {
+ dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
+ goto err_unreg_dev;
+ }
+
+ return ad714x;
+
+ err_free_dev:
+ dev_err(dev, "failed to setup AD714x input device %i\n", alloc_idx);
+ input_free_device(input[alloc_idx]);
+ err_unreg_dev:
+ while (--alloc_idx >= 0)
+ input_unregister_device(input[alloc_idx]);
+ err_free_mem:
+ kfree(ad714x);
+ err_out:
+ return ERR_PTR(error);
+}
+EXPORT_SYMBOL(ad714x_probe);
+
+void ad714x_remove(struct ad714x_chip *ad714x)
+{
+ struct ad714x_platform_data *hw = ad714x->hw;
+ struct ad714x_driver_data *sw = ad714x->sw;
+ int i;
+
+ free_irq(ad714x->irq, ad714x);
+
+ /* unregister and free all input devices */
+
+ for (i = 0; i < hw->slider_num; i++)
+ input_unregister_device(sw->slider[i].input);
+
+ for (i = 0; i < hw->wheel_num; i++)
+ input_unregister_device(sw->wheel[i].input);
+
+ for (i = 0; i < hw->touchpad_num; i++)
+ input_unregister_device(sw->touchpad[i].input);
+
+ if (hw->button_num)
+ input_unregister_device(sw->button[0].input);
+
+ kfree(ad714x);
+}
+EXPORT_SYMBOL(ad714x_remove);
+
+#ifdef CONFIG_PM
+int ad714x_disable(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ dev_dbg(ad714x->dev, "%s enter\n", __func__);
+
+ mutex_lock(&ad714x->mutex);
+
+ data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3;
+ ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ad714x_disable);
+
+int ad714x_enable(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ dev_dbg(ad714x->dev, "%s enter\n", __func__);
+
+ mutex_lock(&ad714x->mutex);
+
+ /* resume to non-shutdown mode */
+
+ ad714x->write(ad714x->dev, AD714X_PWR_CTRL,
+ ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]);
+
+ /* make sure the interrupt output line is not low level after resume,
+ * otherwise we will get no chance to enter falling-edge irq again
+ */
+
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ad714x_enable);
+#endif
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h
new file mode 100644
index 000000000000..45c54fb13f07
--- /dev/null
+++ b/drivers/input/misc/ad714x.h
@@ -0,0 +1,26 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (bus interfaces)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _AD714X_H_
+#define _AD714X_H_
+
+#include <linux/types.h>
+
+struct device;
+struct ad714x_chip;
+
+typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *);
+typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short);
+
+int ad714x_disable(struct ad714x_chip *ad714x);
+int ad714x_enable(struct ad714x_chip *ad714x);
+struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
+ ad714x_read_t read, ad714x_write_t write);
+void ad714x_remove(struct ad714x_chip *ad714x);
+
+#endif
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 2124b99378bb..babef88ba1e0 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -38,7 +38,8 @@ enum {
};
static int ati_remote2_set_mask(const char *val,
- struct kernel_param *kp, unsigned int max)
+ const struct kernel_param *kp,
+ unsigned int max)
{
unsigned long mask;
int ret;
@@ -59,28 +60,31 @@ static int ati_remote2_set_mask(const char *val,
}
static int ati_remote2_set_channel_mask(const char *val,
- struct kernel_param *kp)
+ const struct kernel_param *kp)
{
pr_debug("%s()\n", __func__);
return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_CHANNEL_MASK);
}
-static int ati_remote2_get_channel_mask(char *buffer, struct kernel_param *kp)
+static int ati_remote2_get_channel_mask(char *buffer,
+ const struct kernel_param *kp)
{
pr_debug("%s()\n", __func__);
return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg);
}
-static int ati_remote2_set_mode_mask(const char *val, struct kernel_param *kp)
+static int ati_remote2_set_mode_mask(const char *val,
+ const struct kernel_param *kp)
{
pr_debug("%s()\n", __func__);
return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_MODE_MASK);
}
-static int ati_remote2_get_mode_mask(char *buffer, struct kernel_param *kp)
+static int ati_remote2_get_mode_mask(char *buffer,
+ const struct kernel_param *kp)
{
pr_debug("%s()\n", __func__);
@@ -89,15 +93,19 @@ static int ati_remote2_get_mode_mask(char *buffer, struct kernel_param *kp)
static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
#define param_check_channel_mask(name, p) __param_check(name, p, unsigned int)
-#define param_set_channel_mask ati_remote2_set_channel_mask
-#define param_get_channel_mask ati_remote2_get_channel_mask
+static struct kernel_param_ops param_ops_channel_mask = {
+ .set = ati_remote2_set_channel_mask,
+ .get = ati_remote2_get_channel_mask,
+};
module_param(channel_mask, channel_mask, 0644);
MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>");
static unsigned int mode_mask = ATI_REMOTE2_MAX_MODE_MASK;
#define param_check_mode_mask(name, p) __param_check(name, p, unsigned int)
-#define param_set_mode_mask ati_remote2_set_mode_mask
-#define param_get_mode_mask ati_remote2_get_mode_mask
+static struct kernel_param_ops param_ops_mode_mask = {
+ .set = ati_remote2_set_mode_mask,
+ .get = ati_remote2_get_mode_mask,
+};
module_param(mode_mask, mode_mask, 0644);
MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>");
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
new file mode 100644
index 000000000000..5c3ac4e0b055
--- /dev/null
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for a keypad w/16 buttons connected to a PCF8574 I2C I/O expander
+ *
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#define DRV_NAME "pcf8574_keypad"
+
+static const unsigned char pcf8574_kp_btncode[] = {
+ [0] = KEY_RESERVED,
+ [1] = KEY_ENTER,
+ [2] = KEY_BACKSLASH,
+ [3] = KEY_0,
+ [4] = KEY_RIGHTBRACE,
+ [5] = KEY_C,
+ [6] = KEY_9,
+ [7] = KEY_8,
+ [8] = KEY_7,
+ [9] = KEY_B,
+ [10] = KEY_6,
+ [11] = KEY_5,
+ [12] = KEY_4,
+ [13] = KEY_A,
+ [14] = KEY_3,
+ [15] = KEY_2,
+ [16] = KEY_1
+};
+
+struct kp_data {
+ unsigned short btncode[ARRAY_SIZE(pcf8574_kp_btncode)];
+ struct input_dev *idev;
+ struct i2c_client *client;
+ char name[64];
+ char phys[32];
+ unsigned char laststate;
+};
+
+static short read_state(struct kp_data *lp)
+{
+ unsigned char x, y, a, b;
+
+ i2c_smbus_write_byte(lp->client, 240);
+ x = 0xF & (~(i2c_smbus_read_byte(lp->client) >> 4));
+
+ i2c_smbus_write_byte(lp->client, 15);
+ y = 0xF & (~i2c_smbus_read_byte(lp->client));
+
+ for (a = 0; x > 0; a++)
+ x = x >> 1;
+ for (b = 0; y > 0; b++)
+ y = y >> 1;
+
+ return ((a - 1) * 4) + b;
+}
+
+static irqreturn_t pcf8574_kp_irq_handler(int irq, void *dev_id)
+{
+ struct kp_data *lp = dev_id;
+ unsigned char nextstate = read_state(lp);
+
+ if (lp->laststate != nextstate) {
+ int key_down = nextstate <= ARRAY_SIZE(lp->btncode);
+ unsigned short keycode = key_down ?
+ lp->btncode[nextstate] : lp->btncode[lp->laststate];
+
+ input_report_key(lp->idev, keycode, key_down);
+ input_sync(lp->idev);
+
+ lp->laststate = nextstate;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ int i, ret;
+ struct input_dev *idev;
+ struct kp_data *lp;
+
+ if (i2c_smbus_write_byte(client, 240) < 0) {
+ dev_err(&client->dev, "probe: write fail\n");
+ return -ENODEV;
+ }
+
+ lp = kzalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ idev = input_allocate_device();
+ if (!idev) {
+ dev_err(&client->dev, "Can't allocate input device\n");
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ lp->idev = idev;
+ lp->client = client;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY);
+ idev->keycode = lp->btncode;
+ idev->keycodesize = sizeof(lp->btncode[0]);
+ idev->keycodemax = ARRAY_SIZE(lp->btncode);
+
+ for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) {
+ lp->btncode[i] = pcf8574_kp_btncode[i];
+ __set_bit(lp->btncode[i] & KEY_MAX, idev->keybit);
+ }
+
+ sprintf(lp->name, DRV_NAME);
+ sprintf(lp->phys, "kp_data/input0");
+
+ idev->name = lp->name;
+ idev->phys = lp->phys;
+ idev->id.bustype = BUS_I2C;
+ idev->id.vendor = 0x0001;
+ idev->id.product = 0x0001;
+ idev->id.version = 0x0100;
+
+ input_set_drvdata(idev, lp);
+
+ ret = input_register_device(idev);
+ if (ret) {
+ dev_err(&client->dev, "input_register_device() failed\n");
+ goto fail_register;
+ }
+
+ lp->laststate = read_state(lp);
+
+ ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ DRV_NAME, lp);
+ if (ret) {
+ dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
+ goto fail_irq;
+ }
+
+ i2c_set_clientdata(client, lp);
+ return 0;
+
+ fail_irq:
+ input_unregister_device(idev);
+ fail_register:
+ input_set_drvdata(idev, NULL);
+ input_free_device(idev);
+ fail_allocate:
+ kfree(lp);
+
+ return ret;
+}
+
+static int __devexit pcf8574_kp_remove(struct i2c_client *client)
+{
+ struct kp_data *lp = i2c_get_clientdata(client);
+
+ free_irq(client->irq, lp);
+
+ input_unregister_device(lp->idev);
+ kfree(lp);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pcf8574_kp_resume(struct i2c_client *client)
+{
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static int pcf8574_kp_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ disable_irq(client->irq);
+
+ return 0;
+}
+#else
+# define pcf8574_kp_resume NULL
+# define pcf8574_kp_suspend NULL
+#endif
+
+static const struct i2c_device_id pcf8574_kp_id[] = {
+ { DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf8574_kp_id);
+
+static struct i2c_driver pcf8574_kp_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = pcf8574_kp_probe,
+ .remove = __devexit_p(pcf8574_kp_remove),
+ .suspend = pcf8574_kp_suspend,
+ .resume = pcf8574_kp_resume,
+ .id_table = pcf8574_kp_id,
+};
+
+static int __init pcf8574_kp_init(void)
+{
+ return i2c_add_driver(&pcf8574_kp_driver);
+}
+module_init(pcf8574_kp_init);
+
+static void __exit pcf8574_kp_exit(void)
+{
+ i2c_del_driver(&pcf8574_kp_driver);
+}
+module_exit(pcf8574_kp_exit);
+
+MODULE_AUTHOR("Michael Hennerich");
+MODULE_DESCRIPTION("Keypad input driver for 16 keys connected to PCF8574");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index ea4e1fd12651..f080dd31499b 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr");
#include <asm/i8253.h>
#else
#include <asm/8253pit.h>
-static DEFINE_SPINLOCK(i8253_lock);
+static DEFINE_RAW_SPINLOCK(i8253_lock);
#endif
static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
if (value > 20 && value < 32767)
count = PIT_TICK_RATE / value;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
if (count) {
/* set command for counter 2, 2 byte write */
@@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
outb(inb_p(0x61) & 0xFC, 0x61);
}
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return 0;
}
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index cbc807264940..9fc4cd0b2056 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -39,11 +39,13 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static unsigned int psmouse_max_proto = PSMOUSE_AUTO;
-static int psmouse_set_maxproto(const char *val, struct kernel_param *kp);
-static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp);
+static int psmouse_set_maxproto(const char *val, const struct kernel_param *);
+static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp);
+static struct kernel_param_ops param_ops_proto_abbrev = {
+ .set = psmouse_set_maxproto,
+ .get = psmouse_get_maxproto,
+};
#define param_check_proto_abbrev(name, p) __param_check(name, p, unsigned int)
-#define param_set_proto_abbrev psmouse_set_maxproto
-#define param_get_proto_abbrev psmouse_get_maxproto
module_param_named(proto, psmouse_max_proto, proto_abbrev, 0644);
MODULE_PARM_DESC(proto, "Highest protocol extension to probe (bare, imps, exps, any). Useful for KVM switches.");
@@ -1673,7 +1675,7 @@ static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data,
}
-static int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
+static int psmouse_set_maxproto(const char *val, const struct kernel_param *kp)
{
const struct psmouse_protocol *proto;
@@ -1690,7 +1692,7 @@ static int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
return 0;
}
-static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
+static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
{
int type = *((unsigned int *)kp->arg);
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 7e319d65ec57..f34f1dbeb577 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -209,4 +209,20 @@ config SERIO_ALTERA_PS2
To compile this driver as a module, choose M here: the
module will be called altera_ps2.
+config SERIO_AMS_DELTA
+ tristate "Amstrad Delta (E3) mailboard support"
+ depends on MACH_AMS_DELTA
+ default y
+ select AMS_DELTA_FIQ
+ ---help---
+ Say Y here if you have an E3 and want to use its mailboard,
+ or any standard AT keyboard connected to the mailboard port.
+
+ When used for the E3 mailboard, a non-standard key table
+ must be loaded from userspace, possibly using udev extras
+ provided keymap helper utility.
+
+ To compile this driver as a module, choose M here;
+ the module will be called ams_delta_serio.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index bf945f789d05..84c80bf7185e 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_SERIO_PCIPS2) += pcips2.o
obj-$(CONFIG_SERIO_MACEPS2) += maceps2.o
obj-$(CONFIG_SERIO_LIBPS2) += libps2.o
obj-$(CONFIG_SERIO_RAW) += serio_raw.o
+obj-$(CONFIG_SERIO_AMS_DELTA) += ams_delta_serio.o
obj-$(CONFIG_SERIO_XILINX_XPS_PS2) += xilinx_ps2.o
obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
new file mode 100644
index 000000000000..8f1770e1e08b
--- /dev/null
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -0,0 +1,177 @@
+/*
+ * Amstrad E3 (Delta) keyboard port driver
+ *
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Thanks to Cliff Lawson for his help
+ *
+ * The Amstrad Delta keyboard (aka mailboard) uses normal PC-AT style serial
+ * transmission. The keyboard port is formed of two GPIO lines, for clock
+ * and data. Due to strict timing requirements of the interface,
+ * the serial data stream is read and processed by a FIQ handler.
+ * The resulting words are fetched by this driver from a circular buffer.
+ *
+ * Standard AT keyboard driver (atkbd) is used for handling the keyboard data.
+ * However, when used with the E3 mailboard that producecs non-standard
+ * scancodes, a custom key table must be prepared and loaded from userspace.
+ */
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include <asm/mach-types.h>
+#include <plat/board-ams-delta.h>
+
+#include <mach/ams-delta-fiq.h>
+
+MODULE_AUTHOR("Matt Callow");
+MODULE_DESCRIPTION("AMS Delta (E3) keyboard port driver");
+MODULE_LICENSE("GPL");
+
+static struct serio *ams_delta_serio;
+
+static int check_data(int data)
+{
+ int i, parity = 0;
+
+ /* check valid stop bit */
+ if (!(data & 0x400)) {
+ dev_warn(&ams_delta_serio->dev,
+ "invalid stop bit, data=0x%X\n",
+ data);
+ return SERIO_FRAME;
+ }
+ /* calculate the parity */
+ for (i = 1; i < 10; i++) {
+ if (data & (1 << i))
+ parity++;
+ }
+ /* it should be odd */
+ if (!(parity & 0x01)) {
+ dev_warn(&ams_delta_serio->dev,
+ "paritiy check failed, data=0x%X parity=0x%X\n",
+ data, parity);
+ return SERIO_PARITY;
+ }
+ return 0;
+}
+
+static irqreturn_t ams_delta_serio_interrupt(int irq, void *dev_id)
+{
+ int *circ_buff = &fiq_buffer[FIQ_CIRC_BUFF];
+ int data, dfl;
+ u8 scancode;
+
+ fiq_buffer[FIQ_IRQ_PEND] = 0;
+
+ /*
+ * Read data from the circular buffer, check it
+ * and then pass it on the serio
+ */
+ while (fiq_buffer[FIQ_KEYS_CNT] > 0) {
+
+ data = circ_buff[fiq_buffer[FIQ_HEAD_OFFSET]++];
+ fiq_buffer[FIQ_KEYS_CNT]--;
+ if (fiq_buffer[FIQ_HEAD_OFFSET] == fiq_buffer[FIQ_BUF_LEN])
+ fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+
+ dfl = check_data(data);
+ scancode = (u8) (data >> 1) & 0xFF;
+ serio_interrupt(ams_delta_serio, scancode, dfl);
+ }
+ return IRQ_HANDLED;
+}
+
+static int ams_delta_serio_open(struct serio *serio)
+{
+ /* enable keyboard */
+ ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR,
+ AMD_DELTA_LATCH2_KEYBRD_PWR);
+
+ return 0;
+}
+
+static void ams_delta_serio_close(struct serio *serio)
+{
+ /* disable keyboard */
+ ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR, 0);
+}
+
+static int __init ams_delta_serio_init(void)
+{
+ int err;
+
+ if (!machine_is_ams_delta())
+ return -ENODEV;
+
+ ams_delta_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!ams_delta_serio)
+ return -ENOMEM;
+
+ ams_delta_serio->id.type = SERIO_8042;
+ ams_delta_serio->open = ams_delta_serio_open;
+ ams_delta_serio->close = ams_delta_serio_close;
+ strlcpy(ams_delta_serio->name, "AMS DELTA keyboard adapter",
+ sizeof(ams_delta_serio->name));
+ strlcpy(ams_delta_serio->phys, "GPIO/serio0",
+ sizeof(ams_delta_serio->phys));
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_DATA, "serio-data");
+ if (err) {
+ pr_err("ams_delta_serio: Couldn't request gpio pin for data\n");
+ goto serio;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_CLK, "serio-clock");
+ if (err) {
+ pr_err("ams_delta_serio: couldn't request gpio pin for clock\n");
+ goto gpio_data;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+
+ err = request_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ ams_delta_serio_interrupt, IRQ_TYPE_EDGE_RISING,
+ "ams-delta-serio", 0);
+ if (err < 0) {
+ pr_err("ams_delta_serio: couldn't request gpio interrupt %d\n",
+ gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
+ goto gpio_clk;
+ }
+ /*
+ * Since GPIO register handling for keyboard clock pin is performed
+ * at FIQ level, switch back from edge to simple interrupt handler
+ * to avoid bad interaction.
+ */
+ set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ handle_simple_irq);
+
+ serio_register_port(ams_delta_serio);
+ dev_info(&ams_delta_serio->dev, "%s\n", ams_delta_serio->name);
+
+ return 0;
+gpio_clk:
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+gpio_data:
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+serio:
+ kfree(ams_delta_serio);
+ return err;
+}
+module_init(ams_delta_serio_init);
+
+static void __exit ams_delta_serio_exit(void)
+{
+ serio_unregister_port(ams_delta_serio);
+ free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+ kfree(ams_delta_serio);
+}
+module_exit(ams_delta_serio_exit);
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 670c61c5a516..37d0539fefa0 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -66,18 +66,18 @@ static void usb_acecad_irq(struct urb *urb)
int prox, status;
switch (urb->status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__, urb->status);
- return;
- default:
- dbg("%s - nonzero urb status received: %d", __func__, urb->status);
- goto resubmit;
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __func__, urb->status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__, urb->status);
+ goto resubmit;
}
prox = (data[0] & 0x04) >> 2;
@@ -135,7 +135,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
struct usb_acecad *acecad;
struct input_dev *input_dev;
int pipe, maxp;
- int err = -ENOMEM;
+ int err;
if (interface->desc.bNumEndpoints != 1)
return -ENODEV;
@@ -193,40 +193,34 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
input_dev->close = usb_acecad_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->absbit[0] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
- BIT_MASK(ABS_PRESSURE);
- input_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
input_dev->keybit[BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_TOOL_PEN) |
BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS) |
BIT_MASK(BTN_STYLUS2);
switch (id->driver_info) {
- case 0:
- input_dev->absmax[ABS_X] = 5000;
- input_dev->absmax[ABS_Y] = 3750;
- input_dev->absmax[ABS_PRESSURE] = 512;
- if (!strlen(acecad->name))
- snprintf(acecad->name, sizeof(acecad->name),
- "USB Acecad Flair Tablet %04x:%04x",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
- break;
- case 1:
- input_dev->absmax[ABS_X] = 3000;
- input_dev->absmax[ABS_Y] = 2250;
- input_dev->absmax[ABS_PRESSURE] = 1024;
- if (!strlen(acecad->name))
- snprintf(acecad->name, sizeof(acecad->name),
- "USB Acecad 302 Tablet %04x:%04x",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
- break;
+ case 0:
+ input_set_abs_params(input_dev, ABS_X, 0, 5000, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 3750, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 512, 0, 0);
+ if (!strlen(acecad->name))
+ snprintf(acecad->name, sizeof(acecad->name),
+ "USB Acecad Flair Tablet %04x:%04x",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+ break;
+
+ case 1:
+ input_set_abs_params(input_dev, ABS_X, 0, 53000, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 2250, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 1024, 0, 0);
+ if (!strlen(acecad->name))
+ snprintf(acecad->name, sizeof(acecad->name),
+ "USB Acecad 302 Tablet %04x:%04x",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+ break;
}
- input_dev->absfuzz[ABS_X] = 4;
- input_dev->absfuzz[ABS_Y] = 4;
-
usb_fill_int_urb(acecad->irq, dev, pipe,
acecad->data, maxp > 8 ? 8 : maxp,
usb_acecad_irq, acecad, endpoint->bInterval);
@@ -252,13 +246,11 @@ static void usb_acecad_disconnect(struct usb_interface *intf)
struct usb_acecad *acecad = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- if (acecad) {
- usb_kill_urb(acecad->irq);
- input_unregister_device(acecad->input);
- usb_free_urb(acecad->irq);
- usb_buffer_free(interface_to_usbdev(intf), 10, acecad->data, acecad->data_dma);
- kfree(acecad);
- }
+
+ input_unregister_device(acecad->input);
+ usb_free_urb(acecad->irq);
+ usb_buffer_free(acecad->usbdev, 8, acecad->data, acecad->data_dma);
+ kfree(acecad);
}
static struct usb_device_id usb_acecad_id_table [] = {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 6682b17bf844..b9969f120247 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -34,10 +34,6 @@ struct kbtab {
struct input_dev *dev;
struct usb_device *usbdev;
struct urb *irq;
- int x, y;
- int button;
- int pressure;
- __u32 serial[2];
char phys[32];
};
@@ -46,6 +42,7 @@ static void kbtab_irq(struct urb *urb)
struct kbtab *kbtab = urb->context;
unsigned char *data = kbtab->data;
struct input_dev *dev = kbtab->dev;
+ int pressure;
int retval;
switch (urb->status) {
@@ -63,31 +60,27 @@ static void kbtab_irq(struct urb *urb)
goto exit;
}
- kbtab->x = get_unaligned_le16(&data[1]);
- kbtab->y = get_unaligned_le16(&data[3]);
-
- kbtab->pressure = (data[5]);
input_report_key(dev, BTN_TOOL_PEN, 1);
- input_report_abs(dev, ABS_X, kbtab->x);
- input_report_abs(dev, ABS_Y, kbtab->y);
+ input_report_abs(dev, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(dev, ABS_Y, get_unaligned_le16(&data[3]));
/*input_report_key(dev, BTN_TOUCH , data[0] & 0x01);*/
input_report_key(dev, BTN_RIGHT, data[0] & 0x02);
- if (-1 == kb_pressure_click) {
- input_report_abs(dev, ABS_PRESSURE, kbtab->pressure);
- } else {
- input_report_key(dev, BTN_LEFT, (kbtab->pressure > kb_pressure_click) ? 1 : 0);
- };
+ pressure = data[5];
+ if (kb_pressure_click == -1)
+ input_report_abs(dev, ABS_PRESSURE, pressure);
+ else
+ input_report_key(dev, BTN_LEFT, pressure > kb_pressure_click ? 1 : 0);
input_sync(dev);
exit:
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
- err ("%s - usb_submit_urb failed with result %d",
+ err("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
@@ -153,13 +146,11 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
input_dev->open = kbtab_open;
input_dev->close = kbtab_close;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) |
- BIT_MASK(EV_MSC);
- input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
- BIT_MASK(BTN_TOUCH);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_LEFT)] |=
+ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |=
+ BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, 0x2000, 4, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
@@ -182,7 +173,7 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
return 0;
fail3: usb_free_urb(kbtab->irq);
- fail2: usb_buffer_free(dev, 10, kbtab->data, kbtab->data_dma);
+ fail2: usb_buffer_free(dev, 8, kbtab->data, kbtab->data_dma);
fail1: input_free_device(input_dev);
kfree(kbtab);
return error;
@@ -193,13 +184,11 @@ static void kbtab_disconnect(struct usb_interface *intf)
struct kbtab *kbtab = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- if (kbtab) {
- usb_kill_urb(kbtab->irq);
- input_unregister_device(kbtab->dev);
- usb_free_urb(kbtab->irq);
- usb_buffer_free(interface_to_usbdev(intf), 10, kbtab->data, kbtab->data_dma);
- kfree(kbtab);
- }
+
+ input_unregister_device(kbtab->dev);
+ usb_free_urb(kbtab->irq);
+ usb_buffer_free(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma);
+ kfree(kbtab);
}
static struct usb_driver kbtab_driver = {
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 8fef1b689c69..284dfaab6b8c 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -106,44 +106,18 @@ MODULE_LICENSE(DRIVER_LICENSE);
struct wacom {
dma_addr_t data_dma;
- struct input_dev *dev;
struct usb_device *usbdev;
struct usb_interface *intf;
struct urb *irq;
- struct wacom_wac *wacom_wac;
+ struct wacom_wac wacom_wac;
struct mutex lock;
- unsigned int open:1;
+ bool open;
char phys[32];
};
-struct wacom_combo {
- struct wacom *wacom;
- struct urb *urb;
-};
-
extern const struct usb_device_id wacom_ids[];
-extern int wacom_wac_irq(struct wacom_wac * wacom_wac, void * wcombo);
-extern void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data);
-extern void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data);
-extern void wacom_report_key(void *wcombo, unsigned int key_type, int key_data);
-extern void wacom_input_event(void *wcombo, unsigned int type, unsigned int code, int value);
-extern void wacom_input_sync(void *wcombo);
-extern void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_g4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i3(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern __u16 wacom_le16_to_cpu(unsigned char *data);
-extern __u16 wacom_be16_to_cpu(unsigned char *data);
-
+void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
+void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac);
#endif
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index f46502589e4e..d90f4e00e51d 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -11,8 +11,8 @@
* (at your option) any later version.
*/
-#include "wacom.h"
#include "wacom_wac.h"
+#include "wacom.h"
/* defines to get HID report descriptor */
#define HID_DEVICET_HID (USB_TYPE_CLASS | 0x01)
@@ -70,15 +70,9 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
buf, size, 1000);
}
-static struct input_dev * get_input_dev(struct wacom_combo *wcombo)
-{
- return wcombo->wacom->dev;
-}
-
static void wacom_sys_irq(struct urb *urb)
{
struct wacom *wacom = urb->context;
- struct wacom_combo wcombo;
int retval;
switch (urb->status) {
@@ -96,59 +90,16 @@ static void wacom_sys_irq(struct urb *urb)
goto exit;
}
- wcombo.wacom = wacom;
- wcombo.urb = urb;
-
- if (wacom_wac_irq(wacom->wacom_wac, (void *)&wcombo))
- input_sync(get_input_dev(&wcombo));
+ wacom_wac_irq(&wacom->wacom_wac, urb->actual_length);
exit:
usb_mark_last_busy(wacom->usbdev);
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
err ("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
-void wacom_report_key(void *wcombo, unsigned int key_type, int key_data)
-{
- input_report_key(get_input_dev((struct wacom_combo *)wcombo), key_type, key_data);
-}
-
-void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data)
-{
- input_report_abs(get_input_dev((struct wacom_combo *)wcombo), abs_type, abs_data);
-}
-
-void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data)
-{
- input_report_rel(get_input_dev((struct wacom_combo *)wcombo), rel_type, rel_data);
-}
-
-void wacom_input_event(void *wcombo, unsigned int type, unsigned int code, int value)
-{
- input_event(get_input_dev((struct wacom_combo *)wcombo), type, code, value);
-}
-
-__u16 wacom_be16_to_cpu(unsigned char *data)
-{
- __u16 value;
- value = be16_to_cpu(*(__be16 *) data);
- return value;
-}
-
-__u16 wacom_le16_to_cpu(unsigned char *data)
-{
- __u16 value;
- value = le16_to_cpu(*(__le16 *) data);
- return value;
-}
-
-void wacom_input_sync(void *wcombo)
-{
- input_sync(get_input_dev((struct wacom_combo *)wcombo));
-}
-
static int wacom_open(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
@@ -168,7 +119,7 @@ static int wacom_open(struct input_dev *dev)
return -EIO;
}
- wacom->open = 1;
+ wacom->open = true;
wacom->intf->needs_remote_wakeup = 1;
mutex_unlock(&wacom->lock);
@@ -181,128 +132,11 @@ static void wacom_close(struct input_dev *dev)
mutex_lock(&wacom->lock);
usb_kill_urb(wacom->irq);
- wacom->open = 0;
+ wacom->open = false;
wacom->intf->needs_remote_wakeup = 0;
mutex_unlock(&wacom->lock);
}
-void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_1) |
- BIT_MASK(BTN_5);
- input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
-}
-
-void input_dev_g4(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_MSC);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
- BIT_MASK(BTN_4);
-}
-
-void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_REL);
- input_dev->relbit[0] |= BIT_MASK(REL_WHEEL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
- BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
- BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2);
- input_set_abs_params(input_dev, ABS_DISTANCE,
- 0, wacom_wac->features.distance_max, 0, 0);
-}
-
-void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
- BIT_MASK(BTN_1) | BIT_MASK(BTN_2) | BIT_MASK(BTN_3);
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
-}
-
-void input_dev_i3(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_4) |
- BIT_MASK(BTN_5) | BIT_MASK(BTN_6) | BIT_MASK(BTN_7);
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
-}
-
-void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) | BIT_MASK(BTN_1) | BIT_MASK(BTN_2) | BIT_MASK(BTN_3);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_4) | BIT_MASK(BTN_5) | BIT_MASK(BTN_6);
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
-}
-
-void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_7) | BIT_MASK(BTN_8);
-}
-
-void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_8) | BIT_MASK(BTN_9);
-}
-
-void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_MSC) | BIT_MASK(EV_REL);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- input_dev->relbit[0] |= BIT_MASK(REL_WHEEL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) |
- BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
- BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
- BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_TOOL_BRUSH) |
- BIT_MASK(BTN_TOOL_PENCIL) | BIT_MASK(BTN_TOOL_AIRBRUSH) |
- BIT_MASK(BTN_TOOL_LENS) | BIT_MASK(BTN_STYLUS2);
- input_set_abs_params(input_dev, ABS_DISTANCE,
- 0, wacom_wac->features.distance_max, 0, 0);
- input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0);
- input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
- input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
-}
-
-void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
- BIT_MASK(BTN_STYLUS) | BIT_MASK(BTN_STYLUS2);
-}
-
-void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER);
-}
-
-void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- struct wacom_features *features = &wacom_wac->features;
-
- if (features->device_type == BTN_TOOL_DOUBLETAP ||
- features->device_type == BTN_TOOL_TRIPLETAP) {
- input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- }
-}
-
-void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- if (wacom_wac->features.device_type == BTN_TOOL_TRIPLETAP) {
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_TRIPLETAP);
- input_dev->evbit[0] |= BIT_MASK(EV_MSC);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- }
-}
-
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
struct wacom_features *features)
{
@@ -362,9 +196,9 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->device_type = BTN_TOOL_TRIPLETAP;
}
features->x_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
features->x_phy =
- wacom_le16_to_cpu(&report[i + 6]);
+ get_unaligned_le16(&report[i + 6]);
features->unit = report[i + 9];
features->unitExpo = report[i + 11];
i += 12;
@@ -374,7 +208,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->x_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
} else if (usage == WCM_DIGITIZER) {
@@ -396,15 +230,15 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_TPC2FG;
features->device_type = BTN_TOOL_TRIPLETAP;
features->y_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
features->y_phy =
- wacom_le16_to_cpu(&report[i + 6]);
+ get_unaligned_le16(&report[i + 6]);
i += 7;
} else {
features->y_max =
features->x_max;
features->y_phy =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
} else if (pen) {
@@ -413,7 +247,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->y_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
}
@@ -432,7 +266,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
case HID_USAGE_UNDEFINED:
if (usage == WCM_DESKTOP && finger) /* capacity */
features->pressure_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
break;
}
@@ -528,6 +362,81 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
return error;
}
+struct wacom_usbdev_data {
+ struct list_head list;
+ struct kref kref;
+ struct usb_device *dev;
+ struct wacom_shared shared;
+};
+
+static LIST_HEAD(wacom_udev_list);
+static DEFINE_MUTEX(wacom_udev_list_lock);
+
+static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
+{
+ struct wacom_usbdev_data *data;
+
+ list_for_each_entry(data, &wacom_udev_list, list) {
+ if (data->dev == dev) {
+ kref_get(&data->kref);
+ return data;
+ }
+ }
+
+ return NULL;
+}
+
+static int wacom_add_shared_data(struct wacom_wac *wacom,
+ struct usb_device *dev)
+{
+ struct wacom_usbdev_data *data;
+ int retval = 0;
+
+ mutex_lock(&wacom_udev_list_lock);
+
+ data = wacom_get_usbdev_data(dev);
+ if (!data) {
+ data = kzalloc(sizeof(struct wacom_usbdev_data), GFP_KERNEL);
+ if (!data) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ kref_init(&data->kref);
+ data->dev = dev;
+ list_add_tail(&data->list, &wacom_udev_list);
+ }
+
+ wacom->shared = &data->shared;
+
+out:
+ mutex_unlock(&wacom_udev_list_lock);
+ return retval;
+}
+
+static void wacom_release_shared_data(struct kref *kref)
+{
+ struct wacom_usbdev_data *data =
+ container_of(kref, struct wacom_usbdev_data, kref);
+
+ mutex_lock(&wacom_udev_list_lock);
+ list_del(&data->list);
+ mutex_unlock(&wacom_udev_list_lock);
+
+ kfree(data);
+}
+
+static void wacom_remove_shared_data(struct wacom_wac *wacom)
+{
+ struct wacom_usbdev_data *data;
+
+ if (wacom->shared) {
+ data = container_of(wacom->shared, struct wacom_usbdev_data, shared);
+ kref_put(&data->kref, wacom_release_shared_data);
+ wacom->shared = NULL;
+ }
+}
+
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -542,13 +451,13 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
return -EINVAL;
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
- wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL);
input_dev = input_allocate_device();
- if (!wacom || !input_dev || !wacom_wac) {
+ if (!wacom || !input_dev) {
error = -ENOMEM;
goto fail1;
}
+ wacom_wac = &wacom->wacom_wac;
wacom_wac->features = *((struct wacom_features *)id->driver_info);
features = &wacom_wac->features;
if (features->pktlen > WACOM_PKGLEN_MAX) {
@@ -570,20 +479,12 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
}
wacom->usbdev = dev;
- wacom->dev = input_dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
usb_make_path(dev, wacom->phys, sizeof(wacom->phys));
strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
- usb_to_input_id(dev, &input_dev->id);
-
- input_dev->dev.parent = &intf->dev;
-
- input_set_drvdata(input_dev, wacom);
-
- input_dev->open = wacom_open;
- input_dev->close = wacom_close;
+ wacom_wac->input = input_dev;
endpoint = &intf->cur_altsetting->endpoint[0].desc;
@@ -600,20 +501,21 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
features->device_type == BTN_TOOL_PEN ?
" Pen" : " Finger",
sizeof(wacom_wac->name));
+
+ error = wacom_add_shared_data(wacom_wac, dev);
+ if (error)
+ goto fail3;
}
input_dev->name = wacom_wac->name;
- wacom->wacom_wac = wacom_wac;
-
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
- input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
+ input_dev->name = wacom_wac->name;
+ input_dev->dev.parent = &intf->dev;
+ input_dev->open = wacom_open;
+ input_dev->close = wacom_close;
+ usb_to_input_id(dev, &input_dev->id);
+ input_set_drvdata(input_dev, wacom);
- wacom_init_input_dev(input_dev, wacom_wac);
+ wacom_setup_input_capabilities(input_dev, wacom_wac);
usb_fill_int_urb(wacom->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
@@ -622,9 +524,9 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
wacom->irq->transfer_dma = wacom->data_dma;
wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- error = input_register_device(wacom->dev);
+ error = input_register_device(input_dev);
if (error)
- goto fail3;
+ goto fail4;
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(intf, features);
@@ -632,11 +534,11 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
usb_set_intfdata(intf, wacom);
return 0;
+ fail4: wacom_remove_shared_data(wacom_wac);
fail3: usb_free_urb(wacom->irq);
fail2: usb_buffer_free(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
fail1: input_free_device(input_dev);
kfree(wacom);
- kfree(wacom_wac);
return error;
}
@@ -647,11 +549,11 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
usb_kill_urb(wacom->irq);
- input_unregister_device(wacom->dev);
+ input_unregister_device(wacom->wacom_wac.input);
usb_free_urb(wacom->irq);
usb_buffer_free(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
- wacom->wacom_wac->data, wacom->data_dma);
- kfree(wacom->wacom_wac);
+ wacom->wacom_wac.data, wacom->data_dma);
+ wacom_remove_shared_data(&wacom->wacom_wac);
kfree(wacom);
}
@@ -669,7 +571,7 @@ static int wacom_suspend(struct usb_interface *intf, pm_message_t message)
static int wacom_resume(struct usb_interface *intf)
{
struct wacom *wacom = usb_get_intfdata(intf);
- struct wacom_features *features = &wacom->wacom_wac->features;
+ struct wacom_features *features = &wacom->wacom_wac.features;
int rv;
mutex_lock(&wacom->lock);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 4a852d815c68..847fd0135bcf 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -11,52 +11,58 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include "wacom.h"
+
#include "wacom_wac.h"
+#include "wacom.h"
-static int wacom_penpartner_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_penpartner_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
switch (data[0]) {
- case 1:
- if (data[5] & 0x80) {
- wacom->tool[0] = (data[5] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- wacom->id[0] = (data[5] & 0x20) ? ERASER_DEVICE_ID : STYLUS_DEVICE_ID;
- wacom_report_key(wcombo, wacom->tool[0], 1);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_abs(wcombo, ABS_PRESSURE, (signed char)data[6] + 127);
- wacom_report_key(wcombo, BTN_TOUCH, ((signed char)data[6] > -127));
- wacom_report_key(wcombo, BTN_STYLUS, (data[5] & 0x40));
- } else {
- wacom_report_key(wcombo, wacom->tool[0], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0); /* report tool id */
- wacom_report_abs(wcombo, ABS_PRESSURE, -1);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- }
- break;
- case 2:
- wacom_report_key(wcombo, BTN_TOOL_PEN, 1);
- wacom_report_abs(wcombo, ABS_MISC, STYLUS_DEVICE_ID); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_abs(wcombo, ABS_PRESSURE, (signed char)data[6] + 127);
- wacom_report_key(wcombo, BTN_TOUCH, ((signed char)data[6] > -80) && !(data[5] & 0x20));
- wacom_report_key(wcombo, BTN_STYLUS, (data[5] & 0x40));
- break;
- default:
- printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]);
- return 0;
+ case 1:
+ if (data[5] & 0x80) {
+ wacom->tool[0] = (data[5] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ wacom->id[0] = (data[5] & 0x20) ? ERASER_DEVICE_ID : STYLUS_DEVICE_ID;
+ input_report_key(input, wacom->tool[0], 1);
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127);
+ input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -127));
+ input_report_key(input, BTN_STYLUS, (data[5] & 0x40));
+ } else {
+ input_report_key(input, wacom->tool[0], 0);
+ input_report_abs(input, ABS_MISC, 0); /* report tool id */
+ input_report_abs(input, ABS_PRESSURE, -1);
+ input_report_key(input, BTN_TOUCH, 0);
+ }
+ break;
+
+ case 2:
+ input_report_key(input, BTN_TOOL_PEN, 1);
+ input_report_abs(input, ABS_MISC, STYLUS_DEVICE_ID); /* report tool id */
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127);
+ input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -80) && !(data[5] & 0x20));
+ input_report_key(input, BTN_STYLUS, (data[5] & 0x40));
+ break;
+
+ default:
+ printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]);
+ return 0;
}
+
return 1;
}
-static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_pl_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
int prox, pressure;
if (data[0] != WACOM_REPORT_PENABLED) {
@@ -90,8 +96,8 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
/* was entered with stylus2 pressed */
if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) {
/* report out proximity for previous tool */
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_input_sync(wcombo);
+ input_report_key(input, wacom->tool[1], 0);
+ input_sync(input);
wacom->tool[1] = BTN_TOOL_PEN;
return 0;
}
@@ -101,32 +107,33 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
wacom->tool[1] = BTN_TOOL_PEN;
wacom->id[0] = STYLUS_DEVICE_ID;
}
- wacom_report_key(wcombo, wacom->tool[1], prox); /* report in proximity for tool */
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
- wacom_report_abs(wcombo, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
- wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
-
- wacom_report_key(wcombo, BTN_TOUCH, data[4] & 0x08);
- wacom_report_key(wcombo, BTN_STYLUS, data[4] & 0x10);
+ input_report_key(input, wacom->tool[1], prox); /* report in proximity for tool */
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+ input_report_abs(input, ABS_PRESSURE, pressure);
+
+ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
/* Only allow the stylus2 button to be reported for the pen tool. */
- wacom_report_key(wcombo, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20));
+ input_report_key(input, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20));
} else {
/* report proximity-out of a (valid) tool */
if (wacom->tool[1] != BTN_TOOL_RUBBER) {
/* Unknown tool selected default to pen tool */
wacom->tool[1] = BTN_TOOL_PEN;
}
- wacom_report_key(wcombo, wacom->tool[1], prox);
+ input_report_key(input, wacom->tool[1], prox);
}
wacom->tool[0] = prox; /* Save proximity state */
return 1;
}
-static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_ptu_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
if (data[0] != WACOM_REPORT_PENABLED) {
printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]);
@@ -134,40 +141,41 @@ static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
}
if (data[1] & 0x04) {
- wacom_report_key(wcombo, BTN_TOOL_RUBBER, data[1] & 0x20);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x08);
+ input_report_key(input, BTN_TOOL_RUBBER, data[1] & 0x20);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x08);
wacom->id[0] = ERASER_DEVICE_ID;
} else {
- wacom_report_key(wcombo, BTN_TOOL_PEN, data[1] & 0x20);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(input, BTN_TOOL_PEN, data[1] & 0x20);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x01);
wacom->id[0] = STYLUS_DEVICE_ID;
}
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6]));
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
return 1;
}
-static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_graphire_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- int x, y, rw;
- static int penData = 0;
+ struct input_dev *input = wacom->input;
+ int prox;
+ int rw = 0;
+ int retval = 0;
if (data[0] != WACOM_REPORT_PENABLED) {
dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
- return 0;
+ goto exit;
}
- if (data[1] & 0x80) {
- /* in prox and not a pad data */
- penData = 1;
-
- switch ((data[1] >> 5) & 3) {
+ prox = data[1] & 0x80;
+ if (prox || wacom->id[0]) {
+ if (prox) {
+ switch ((data[1] >> 5) & 3) {
case 0: /* Pen */
wacom->tool[0] = BTN_TOOL_PEN;
@@ -180,128 +188,89 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
break;
case 2: /* Mouse with wheel */
- wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04);
- if (features->type == WACOM_G4 || features->type == WACOM_MO) {
- rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03);
- wacom_report_rel(wcombo, REL_WHEEL, -rw);
- } else
- wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]);
+ input_report_key(input, BTN_MIDDLE, data[1] & 0x04);
/* fall through */
case 3: /* Mouse without wheel */
wacom->tool[0] = BTN_TOOL_MOUSE;
wacom->id[0] = CURSOR_DEVICE_ID;
- wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
- wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
- if (features->type == WACOM_G4 || features->type == WACOM_MO)
- wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
- else
- wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
break;
+ }
}
- x = wacom_le16_to_cpu(&data[2]);
- y = wacom_le16_to_cpu(&data[4]);
- wacom_report_abs(wcombo, ABS_X, x);
- wacom_report_abs(wcombo, ABS_Y, y);
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
if (wacom->tool[0] != BTN_TOOL_MOUSE) {
- wacom_report_abs(wcombo, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04);
- }
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_key(wcombo, wacom->tool[0], 1);
- } else if (wacom->id[0]) {
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- if (wacom->tool[0] == BTN_TOOL_MOUSE) {
- wacom_report_key(wcombo, BTN_LEFT, 0);
- wacom_report_key(wcombo, BTN_RIGHT, 0);
- wacom_report_abs(wcombo, ABS_DISTANCE, 0);
+ input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
+ input_report_key(input, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
} else {
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_LEFT, data[1] & 0x01);
+ input_report_key(input, BTN_RIGHT, data[1] & 0x02);
+ if (features->type == WACOM_G4 ||
+ features->type == WACOM_MO) {
+ input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
+ rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+ } else {
+ input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
+ rw = -(signed)data[6];
+ }
+ input_report_rel(input, REL_WHEEL, rw);
}
- wacom->id[0] = 0;
- wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
- wacom_report_key(wcombo, wacom->tool[0], 0);
+
+ if (!prox)
+ wacom->id[0] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_key(input, wacom->tool[0], prox);
+ input_sync(input); /* sync last event */
}
/* send pad data */
switch (features->type) {
- case WACOM_G4:
- if (data[7] & 0xf8) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
+ case WACOM_G4:
+ prox = data[7] & 0xf8;
+ if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
+ input_report_key(input, BTN_0, (data[7] & 0x40));
+ input_report_key(input, BTN_4, (data[7] & 0x80));
rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3);
- wacom_report_rel(wcombo, REL_WHEEL, rw);
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- } else if (wacom->id[1]) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
- wacom->id[1] = 0;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
- wacom_report_rel(wcombo, REL_WHEEL, 0);
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ input_report_rel(input, REL_WHEEL, rw);
+ input_report_key(input, BTN_TOOL_FINGER, 0xf0);
+ if (!prox)
+ wacom->id[1] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ retval = 1;
}
break;
- case WACOM_MO:
- if ((data[7] & 0xf8) || (data[8] & 0xff)) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
+
+ case WACOM_MO:
+ prox = (data[7] & 0xf8) || data[8];
+ if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
- wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
- wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
- wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- } else if (wacom->id[1]) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
- wacom->id[1] = 0;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
- wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
- wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
- wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ input_report_key(input, BTN_0, (data[7] & 0x08));
+ input_report_key(input, BTN_1, (data[7] & 0x20));
+ input_report_key(input, BTN_4, (data[7] & 0x10));
+ input_report_key(input, BTN_5, (data[7] & 0x40));
+ input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f));
+ input_report_key(input, BTN_TOOL_FINGER, 0xf0);
+ if (!prox)
+ wacom->id[1] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
}
+ retval = 1;
break;
}
- return 1;
+exit:
+ return retval;
}
-static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
+static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
int idx = 0;
/* tool number */
@@ -316,64 +285,73 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
(data[6] << 4) + (data[7] >> 4);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4);
+
switch (wacom->id[idx]) {
- case 0x812: /* Inking pen */
- case 0x801: /* Intuos3 Inking pen */
- case 0x20802: /* Intuos4 Classic Pen */
- case 0x012:
- wacom->tool[idx] = BTN_TOOL_PENCIL;
- break;
- case 0x822: /* Pen */
- case 0x842:
- case 0x852:
- case 0x823: /* Intuos3 Grip Pen */
- case 0x813: /* Intuos3 Classic Pen */
- case 0x885: /* Intuos3 Marker Pen */
- case 0x802: /* Intuos4 Grip Pen Eraser */
- case 0x804: /* Intuos4 Marker Pen */
- case 0x40802: /* Intuos4 Classic Pen */
- case 0x022:
- wacom->tool[idx] = BTN_TOOL_PEN;
- break;
- case 0x832: /* Stroke pen */
- case 0x032:
- wacom->tool[idx] = BTN_TOOL_BRUSH;
- break;
- case 0x007: /* Mouse 4D and 2D */
- case 0x09c:
- case 0x094:
- case 0x017: /* Intuos3 2D Mouse */
- case 0x806: /* Intuos4 Mouse */
- wacom->tool[idx] = BTN_TOOL_MOUSE;
- break;
- case 0x096: /* Lens cursor */
- case 0x097: /* Intuos3 Lens cursor */
- case 0x006: /* Intuos4 Lens cursor */
- wacom->tool[idx] = BTN_TOOL_LENS;
- break;
- case 0x82a: /* Eraser */
- case 0x85a:
- case 0x91a:
- case 0xd1a:
- case 0x0fa:
- case 0x82b: /* Intuos3 Grip Pen Eraser */
- case 0x81b: /* Intuos3 Classic Pen Eraser */
- case 0x91b: /* Intuos3 Airbrush Eraser */
- case 0x80c: /* Intuos4 Marker Pen Eraser */
- case 0x80a: /* Intuos4 Grip Pen Eraser */
- case 0x4080a: /* Intuos4 Classic Pen Eraser */
- case 0x90a: /* Intuos4 Airbrush Eraser */
- wacom->tool[idx] = BTN_TOOL_RUBBER;
- break;
- case 0xd12:
- case 0x912:
- case 0x112:
- case 0x913: /* Intuos3 Airbrush */
- case 0x902: /* Intuos4 Airbrush */
- wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
- break;
- default: /* Unknown tool */
- wacom->tool[idx] = BTN_TOOL_PEN;
+ case 0x812: /* Inking pen */
+ case 0x801: /* Intuos3 Inking pen */
+ case 0x20802: /* Intuos4 Classic Pen */
+ case 0x012:
+ wacom->tool[idx] = BTN_TOOL_PENCIL;
+ break;
+
+ case 0x822: /* Pen */
+ case 0x842:
+ case 0x852:
+ case 0x823: /* Intuos3 Grip Pen */
+ case 0x813: /* Intuos3 Classic Pen */
+ case 0x885: /* Intuos3 Marker Pen */
+ case 0x802: /* Intuos4 Grip Pen Eraser */
+ case 0x804: /* Intuos4 Marker Pen */
+ case 0x40802: /* Intuos4 Classic Pen */
+ case 0x022:
+ wacom->tool[idx] = BTN_TOOL_PEN;
+ break;
+
+ case 0x832: /* Stroke pen */
+ case 0x032:
+ wacom->tool[idx] = BTN_TOOL_BRUSH;
+ break;
+
+ case 0x007: /* Mouse 4D and 2D */
+ case 0x09c:
+ case 0x094:
+ case 0x017: /* Intuos3 2D Mouse */
+ case 0x806: /* Intuos4 Mouse */
+ wacom->tool[idx] = BTN_TOOL_MOUSE;
+ break;
+
+ case 0x096: /* Lens cursor */
+ case 0x097: /* Intuos3 Lens cursor */
+ case 0x006: /* Intuos4 Lens cursor */
+ wacom->tool[idx] = BTN_TOOL_LENS;
+ break;
+
+ case 0x82a: /* Eraser */
+ case 0x85a:
+ case 0x91a:
+ case 0xd1a:
+ case 0x0fa:
+ case 0x82b: /* Intuos3 Grip Pen Eraser */
+ case 0x81b: /* Intuos3 Classic Pen Eraser */
+ case 0x91b: /* Intuos3 Airbrush Eraser */
+ case 0x80c: /* Intuos4 Marker Pen Eraser */
+ case 0x80a: /* Intuos4 Grip Pen Eraser */
+ case 0x4080a: /* Intuos4 Classic Pen Eraser */
+ case 0x90a: /* Intuos4 Airbrush Eraser */
+ wacom->tool[idx] = BTN_TOOL_RUBBER;
+ break;
+
+ case 0xd12:
+ case 0x912:
+ case 0x112:
+ case 0x913: /* Intuos3 Airbrush */
+ case 0x902: /* Intuos4 Airbrush */
+ wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
+ break;
+
+ default: /* Unknown tool */
+ wacom->tool[idx] = BTN_TOOL_PEN;
+ break;
}
return 1;
}
@@ -384,41 +362,42 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
* Reset all states otherwise we lose the initial states
* when in-prox next time
*/
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_DISTANCE, 0);
- wacom_report_abs(wcombo, ABS_TILT_X, 0);
- wacom_report_abs(wcombo, ABS_TILT_Y, 0);
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_DISTANCE, 0);
+ input_report_abs(input, ABS_TILT_X, 0);
+ input_report_abs(input, ABS_TILT_Y, 0);
if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
- wacom_report_key(wcombo, BTN_LEFT, 0);
- wacom_report_key(wcombo, BTN_MIDDLE, 0);
- wacom_report_key(wcombo, BTN_RIGHT, 0);
- wacom_report_key(wcombo, BTN_SIDE, 0);
- wacom_report_key(wcombo, BTN_EXTRA, 0);
- wacom_report_abs(wcombo, ABS_THROTTLE, 0);
- wacom_report_abs(wcombo, ABS_RZ, 0);
+ input_report_key(input, BTN_LEFT, 0);
+ input_report_key(input, BTN_MIDDLE, 0);
+ input_report_key(input, BTN_RIGHT, 0);
+ input_report_key(input, BTN_SIDE, 0);
+ input_report_key(input, BTN_EXTRA, 0);
+ input_report_abs(input, ABS_THROTTLE, 0);
+ input_report_abs(input, ABS_RZ, 0);
} else {
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- wacom_report_abs(wcombo, ABS_WHEEL, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_key(input, BTN_STYLUS, 0);
+ input_report_key(input, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_WHEEL, 0);
if (features->type >= INTUOS3S)
- wacom_report_abs(wcombo, ABS_Z, 0);
+ input_report_abs(input, ABS_Z, 0);
}
- wacom_report_key(wcombo, wacom->tool[idx], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ input_report_key(input, wacom->tool[idx], 0);
+ input_report_abs(input, ABS_MISC, 0); /* reset tool id */
+ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->id[idx] = 0;
return 2;
}
return 0;
}
-static void wacom_intuos_general(struct wacom_wac *wacom, void *wcombo)
+static void wacom_intuos_general(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
unsigned int t;
/* general pen packet */
@@ -426,30 +405,30 @@ static void wacom_intuos_general(struct wacom_wac *wacom, void *wcombo)
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if (features->type >= INTUOS4S && features->type <= INTUOS4L)
t = (t << 1) | (data[1] & 1);
- wacom_report_abs(wcombo, ABS_PRESSURE, t);
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_PRESSURE, t);
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 2);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 4);
- wacom_report_key(wcombo, BTN_TOUCH, t > 10);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_key(input, BTN_STYLUS, data[1] & 2);
+ input_report_key(input, BTN_STYLUS2, data[1] & 4);
+ input_report_key(input, BTN_TOUCH, t > 10);
}
/* airbrush second packet */
if ((data[1] & 0xbc) == 0xb4) {
- wacom_report_abs(wcombo, ABS_WHEEL,
+ input_report_abs(input, ABS_WHEEL,
(data[6] << 2) | ((data[7] >> 6) & 3));
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
}
- return;
}
-static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_intuos_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
unsigned int t;
int idx = 0, result;
@@ -470,61 +449,61 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
wacom->tool[1] = BTN_TOOL_FINGER;
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
- wacom_report_key(wcombo, BTN_0, (data[2] & 0x01));
- wacom_report_key(wcombo, BTN_1, (data[3] & 0x01));
- wacom_report_key(wcombo, BTN_2, (data[3] & 0x02));
- wacom_report_key(wcombo, BTN_3, (data[3] & 0x04));
- wacom_report_key(wcombo, BTN_4, (data[3] & 0x08));
- wacom_report_key(wcombo, BTN_5, (data[3] & 0x10));
- wacom_report_key(wcombo, BTN_6, (data[3] & 0x20));
+ input_report_key(input, BTN_0, (data[2] & 0x01));
+ input_report_key(input, BTN_1, (data[3] & 0x01));
+ input_report_key(input, BTN_2, (data[3] & 0x02));
+ input_report_key(input, BTN_3, (data[3] & 0x04));
+ input_report_key(input, BTN_4, (data[3] & 0x08));
+ input_report_key(input, BTN_5, (data[3] & 0x10));
+ input_report_key(input, BTN_6, (data[3] & 0x20));
if (data[1] & 0x80) {
- wacom_report_abs(wcombo, ABS_WHEEL, (data[1] & 0x7f));
+ input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
} else {
/* Out of proximity, clear wheel value. */
- wacom_report_abs(wcombo, ABS_WHEEL, 0);
+ input_report_abs(input, ABS_WHEEL, 0);
}
if (features->type != INTUOS4S) {
- wacom_report_key(wcombo, BTN_7, (data[3] & 0x40));
- wacom_report_key(wcombo, BTN_8, (data[3] & 0x80));
+ input_report_key(input, BTN_7, (data[3] & 0x40));
+ input_report_key(input, BTN_8, (data[3] & 0x80));
}
if (data[1] | (data[2] & 0x01) | data[3]) {
- wacom_report_key(wcombo, wacom->tool[1], 1);
- wacom_report_abs(wcombo, ABS_MISC, PAD_DEVICE_ID);
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
}
} else {
- wacom_report_key(wcombo, BTN_0, (data[5] & 0x01));
- wacom_report_key(wcombo, BTN_1, (data[5] & 0x02));
- wacom_report_key(wcombo, BTN_2, (data[5] & 0x04));
- wacom_report_key(wcombo, BTN_3, (data[5] & 0x08));
- wacom_report_key(wcombo, BTN_4, (data[6] & 0x01));
- wacom_report_key(wcombo, BTN_5, (data[6] & 0x02));
- wacom_report_key(wcombo, BTN_6, (data[6] & 0x04));
- wacom_report_key(wcombo, BTN_7, (data[6] & 0x08));
- wacom_report_key(wcombo, BTN_8, (data[5] & 0x10));
- wacom_report_key(wcombo, BTN_9, (data[6] & 0x10));
- wacom_report_abs(wcombo, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
- wacom_report_abs(wcombo, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
+ input_report_key(input, BTN_0, (data[5] & 0x01));
+ input_report_key(input, BTN_1, (data[5] & 0x02));
+ input_report_key(input, BTN_2, (data[5] & 0x04));
+ input_report_key(input, BTN_3, (data[5] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x01));
+ input_report_key(input, BTN_5, (data[6] & 0x02));
+ input_report_key(input, BTN_6, (data[6] & 0x04));
+ input_report_key(input, BTN_7, (data[6] & 0x08));
+ input_report_key(input, BTN_8, (data[5] & 0x10));
+ input_report_key(input, BTN_9, (data[6] & 0x10));
+ input_report_abs(input, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
+ input_report_abs(input, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
if ((data[5] & 0x1f) | (data[6] & 0x1f) | (data[1] & 0x1f) |
data[2] | (data[3] & 0x1f) | data[4]) {
- wacom_report_key(wcombo, wacom->tool[1], 1);
- wacom_report_abs(wcombo, ABS_MISC, PAD_DEVICE_ID);
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
}
}
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xffffffff);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff);
return 1;
}
/* process in/out prox events */
- result = wacom_intuos_inout(wacom, wcombo);
+ result = wacom_intuos_inout(wacom);
if (result)
- return result-1;
+ return result - 1;
/* don't proceed if we don't know the ID */
if (!wacom->id[idx])
@@ -545,17 +524,17 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
return 0;
if (features->type >= INTUOS3S) {
- wacom_report_abs(wcombo, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
- wacom_report_abs(wcombo, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1));
- wacom_report_abs(wcombo, ABS_DISTANCE, ((data[9] >> 2) & 0x3f));
+ input_report_abs(input, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
+ input_report_abs(input, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1));
+ input_report_abs(input, ABS_DISTANCE, ((data[9] >> 2) & 0x3f));
} else {
- wacom_report_abs(wcombo, ABS_X, wacom_be16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_be16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_DISTANCE, ((data[9] >> 3) & 0x1f));
+ input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[2]));
+ input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[4]));
+ input_report_abs(input, ABS_DISTANCE, ((data[9] >> 3) & 0x1f));
}
/* process general packets */
- wacom_intuos_general(wacom, wcombo);
+ wacom_intuos_general(wacom);
/* 4D mouse, 2D mouse, marker pen rotation, tilt mouse, or Lens cursor packets */
if ((data[1] & 0xbc) == 0xa8 || (data[1] & 0xbe) == 0xb0 || (data[1] & 0xbc) == 0xac) {
@@ -567,174 +546,191 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
t = (data[6] << 3) | ((data[7] >> 5) & 7);
t = (data[7] & 0x20) ? ((t > 900) ? ((t-1) / 2 - 1350) :
((t-1) / 2 + 450)) : (450 - t / 2) ;
- wacom_report_abs(wcombo, ABS_Z, t);
+ input_report_abs(input, ABS_Z, t);
} else {
/* 4D mouse rotation packet */
t = (data[6] << 3) | ((data[7] >> 5) & 7);
- wacom_report_abs(wcombo, ABS_RZ, (data[7] & 0x20) ?
+ input_report_abs(input, ABS_RZ, (data[7] & 0x20) ?
((t - 1) / 2) : -t / 2);
}
} else if (!(data[1] & 0x10) && features->type < INTUOS3S) {
/* 4D mouse packet */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x04);
+ input_report_key(input, BTN_LEFT, data[8] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x20);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x10);
+ input_report_key(input, BTN_SIDE, data[8] & 0x20);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x10);
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- wacom_report_abs(wcombo, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
+ input_report_abs(input, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
/* I4 mouse */
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
- wacom_report_key(wcombo, BTN_LEFT, data[6] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[6] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[6] & 0x04);
- wacom_report_rel(wcombo, REL_WHEEL, ((data[7] & 0x80) >> 7)
+ input_report_key(input, BTN_LEFT, data[6] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[6] & 0x04);
+ input_report_rel(input, REL_WHEEL, ((data[7] & 0x80) >> 7)
- ((data[7] & 0x40) >> 6));
- wacom_report_key(wcombo, BTN_SIDE, data[6] & 0x08);
- wacom_report_key(wcombo, BTN_EXTRA, data[6] & 0x10);
+ input_report_key(input, BTN_SIDE, data[6] & 0x08);
+ input_report_key(input, BTN_EXTRA, data[6] & 0x10);
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
} else {
/* 2D mouse packet */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x08);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x10);
- wacom_report_rel(wcombo, REL_WHEEL, (data[8] & 0x01)
+ input_report_key(input, BTN_LEFT, data[8] & 0x04);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x08);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x10);
+ input_report_rel(input, REL_WHEEL, (data[8] & 0x01)
- ((data[8] & 0x02) >> 1));
/* I3 2D mouse side buttons */
if (features->type >= INTUOS3S && features->type <= INTUOS3L) {
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x40);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x20);
+ input_report_key(input, BTN_SIDE, data[8] & 0x40);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x20);
}
}
} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
features->type == INTUOS4L) &&
wacom->tool[idx] == BTN_TOOL_LENS) {
/* Lens cursor packets */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x10);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x08);
+ input_report_key(input, BTN_LEFT, data[8] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x04);
+ input_report_key(input, BTN_SIDE, data[8] & 0x10);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x08);
}
}
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[idx]); /* report tool id */
- wacom_report_key(wcombo, wacom->tool[idx], 1);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+ input_report_key(input, wacom->tool[idx], 1);
+ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
return 1;
}
-static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
+static void wacom_tpc_finger_in(struct wacom_wac *wacom, char *data, int idx)
{
- wacom_report_abs(wcombo, ABS_X,
- (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
- wacom_report_abs(wcombo, ABS_Y,
- (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[idx], 1);
- if (idx)
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- else
- wacom_report_key(wcombo, BTN_TOUCH, 1);
+ struct input_dev *input = wacom->input;
+ int finger = idx + 1;
+ int x = le16_to_cpup((__le16 *)&data[finger * 2]) & 0x7fff;
+ int y = le16_to_cpup((__le16 *)&data[4 + finger * 2]) & 0x7fff;
+
+ /*
+ * Work around input core suppressing "duplicate" events since
+ * we are abusing ABS_X/ABS_Y to transmit multi-finger data.
+ * This should go away once we switch to true multitouch
+ * protocol.
+ */
+ if (wacom->last_finger != finger) {
+ if (x == input->abs[ABS_X])
+ x++;
+
+ if (y == input->abs[ABS_Y])
+ y++;
+ }
+
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ input_report_key(input, wacom->tool[finger], 1);
+ if (!idx)
+ input_report_key(input, BTN_TOUCH, 1);
+ input_event(input, EV_MSC, MSC_SERIAL, finger);
+ input_sync(wacom->input);
+
+ wacom->last_finger = finger;
}
-static void wacom_tpc_touch_out(struct wacom_wac *wacom, void *wcombo, int idx)
+static void wacom_tpc_touch_out(struct wacom_wac *wacom, int idx)
{
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_report_key(wcombo, wacom->tool[idx], 0);
- if (idx)
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- else
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- return;
+ struct input_dev *input = wacom->input;
+ int finger = idx + 1;
+
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[finger], 0);
+ if (!idx)
+ input_report_key(input, BTN_TOUCH, 0);
+ input_event(input, EV_MSC, MSC_SERIAL, finger);
+ input_sync(input);
}
-static void wacom_tpc_touch_in(struct wacom_wac *wacom, void *wcombo)
+static void wacom_tpc_touch_in(struct wacom_wac *wacom, size_t len)
{
char *data = wacom->data;
- struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
- static int firstFinger = 0;
- static int secondFinger = 0;
+ struct input_dev *input = wacom->input;
- wacom->tool[0] = BTN_TOOL_DOUBLETAP;
+ wacom->tool[1] = BTN_TOOL_DOUBLETAP;
wacom->id[0] = TOUCH_DEVICE_ID;
- wacom->tool[1] = BTN_TOOL_TRIPLETAP;
+ wacom->tool[2] = BTN_TOOL_TRIPLETAP;
+
+ if (len != WACOM_PKGLEN_TPC1FG) {
- if (urb->actual_length != WACOM_PKGLEN_TPC1FG) {
switch (data[0]) {
- case WACOM_REPORT_TPC1FG:
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
- wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[0], 1);
- break;
- case WACOM_REPORT_TPC2FG:
- /* keep this byte to send proper out-prox event */
- wacom->id[1] = data[1] & 0x03;
-
- if (data[1] & 0x01) {
- wacom_tpc_finger_in(wacom, wcombo, data, 0);
- firstFinger = 1;
- } else if (firstFinger) {
- wacom_tpc_touch_out(wacom, wcombo, 0);
- }
- if (data[1] & 0x02) {
- /* sync first finger data */
- if (firstFinger)
- wacom_input_sync(wcombo);
+ case WACOM_REPORT_TPC1FG:
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6]));
+ input_report_key(input, BTN_TOUCH, le16_to_cpup((__le16 *)&data[6]));
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ input_report_key(input, wacom->tool[1], 1);
+ input_sync(input);
+ break;
- wacom_tpc_finger_in(wacom, wcombo, data, 1);
- secondFinger = 1;
- } else if (secondFinger) {
- /* sync first finger data */
- if (firstFinger)
- wacom_input_sync(wcombo);
+ case WACOM_REPORT_TPC2FG:
+ if (data[1] & 0x01)
+ wacom_tpc_finger_in(wacom, data, 0);
+ else if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
- wacom_tpc_touch_out(wacom, wcombo, 1);
- secondFinger = 0;
- }
- if (!(data[1] & 0x01))
- firstFinger = 0;
- break;
+ if (data[1] & 0x02)
+ wacom_tpc_finger_in(wacom, data, 1);
+ else if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+ break;
}
} else {
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_key(wcombo, BTN_TOUCH, 1);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[0], 1);
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_key(input, BTN_TOUCH, 1);
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_report_key(input, wacom->tool[1], 1);
+ input_sync(input);
}
- return;
}
-static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
struct wacom_features *features = &wacom->features;
char *data = wacom->data;
- int prox = 0, pressure, idx = -1;
- static int stylusInProx, touchInProx = 1, touchOut;
- struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
+ struct input_dev *input = wacom->input;
+ int prox = 0, pressure;
+ int retval = 0;
dbg("wacom_tpc_irq: received report #%d", data[0]);
- if (urb->actual_length == WACOM_PKGLEN_TPC1FG || /* single touch */
+ if (len == WACOM_PKGLEN_TPC1FG || /* single touch */
data[0] == WACOM_REPORT_TPC1FG || /* single touch */
data[0] == WACOM_REPORT_TPC2FG) { /* 2FG touch */
- if (urb->actual_length == WACOM_PKGLEN_TPC1FG) { /* with touch */
+
+ if (wacom->shared->stylus_in_proximity) {
+ if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
+
+ if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+
+ wacom->id[1] = 0;
+ return 0;
+ }
+
+ if (len == WACOM_PKGLEN_TPC1FG) { /* with touch */
prox = data[0] & 0x01;
} else { /* with capacity */
if (data[0] == WACOM_REPORT_TPC1FG)
@@ -745,168 +741,264 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
prox = data[1] & 0x03;
}
- if (!stylusInProx) { /* stylus not in prox */
- if (prox) {
- if (touchInProx) {
- wacom_tpc_touch_in(wacom, wcombo);
- touchOut = 1;
- return 1;
- }
- } else {
+ if (prox) {
+ if (!wacom->id[1])
+ wacom->last_finger = 1;
+ wacom_tpc_touch_in(wacom, len);
+ } else {
+ if (data[0] == WACOM_REPORT_TPC2FG) {
/* 2FGT out-prox */
- if (data[0] == WACOM_REPORT_TPC2FG) {
- idx = (wacom->id[1] & 0x01) - 1;
- if (idx == 0) {
- wacom_tpc_touch_out(wacom, wcombo, idx);
- /* sync first finger event */
- if (wacom->id[1] & 0x02)
- wacom_input_sync(wcombo);
- }
- idx = (wacom->id[1] & 0x02) - 1;
- if (idx == 1)
- wacom_tpc_touch_out(wacom, wcombo, idx);
- } else /* one finger touch */
- wacom_tpc_touch_out(wacom, wcombo, 0);
- touchOut = 0;
- touchInProx = 1;
- return 1;
- }
- } else if (touchOut || !prox) { /* force touch out-prox */
- wacom_tpc_touch_out(wacom, wcombo, 0);
- touchOut = 0;
- touchInProx = 1;
- return 1;
+ if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
+
+ if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+ } else
+ /* one finger touch */
+ wacom_tpc_touch_out(wacom, 0);
+
+ wacom->id[0] = 0;
}
+ /* keep prox bit to send proper out-prox event */
+ wacom->id[1] = prox;
} else if (data[0] == WACOM_REPORT_PENABLED) { /* Penabled */
prox = data[1] & 0x20;
- touchInProx = 0;
+ if (!wacom->shared->stylus_in_proximity) { /* first in prox */
+ /* Going into proximity select tool */
+ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom->tool[0] == BTN_TOOL_PEN)
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ else
+ wacom->id[0] = ERASER_DEVICE_ID;
- if (prox) { /* in prox */
- if (!wacom->id[0]) {
- /* Going into proximity select tool */
- wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- if (wacom->tool[0] == BTN_TOOL_PEN)
- wacom->id[0] = STYLUS_DEVICE_ID;
- else
- wacom->id[0] = ERASER_DEVICE_ID;
- }
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- pressure = ((data[7] & 0x01) << 8) | data[6];
- if (pressure < 0)
- pressure = features->pressure_max + pressure + 1;
- wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
- } else {
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
+ wacom->shared->stylus_in_proximity = true;
+ }
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ pressure = ((data[7] & 0x01) << 8) | data[6];
+ if (pressure < 0)
+ pressure = features->pressure_max + pressure + 1;
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x05);
+ if (!prox) { /* out-prox */
wacom->id[0] = 0;
- /* pen is out so touch can be enabled now */
- touchInProx = 1;
+ wacom->shared->stylus_in_proximity = false;
}
- wacom_report_key(wcombo, wacom->tool[0], prox);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- stylusInProx = prox;
- return 1;
+ input_report_key(input, wacom->tool[0], prox);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ retval = 1;
}
- return 0;
+ return retval;
}
-int wacom_wac_irq(struct wacom_wac *wacom_wac, void *wcombo)
+void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
{
+ bool sync;
+
switch (wacom_wac->features.type) {
- case PENPARTNER:
- return wacom_penpartner_irq(wacom_wac, wcombo);
-
- case PL:
- return wacom_pl_irq(wacom_wac, wcombo);
-
- case WACOM_G4:
- case GRAPHIRE:
- case WACOM_MO:
- return wacom_graphire_irq(wacom_wac, wcombo);
-
- case PTU:
- return wacom_ptu_irq(wacom_wac, wcombo);
-
- case INTUOS:
- case INTUOS3S:
- case INTUOS3:
- case INTUOS3L:
- case INTUOS4S:
- case INTUOS4:
- case INTUOS4L:
- case CINTIQ:
- case WACOM_BEE:
- return wacom_intuos_irq(wacom_wac, wcombo);
-
- case TABLETPC:
- case TABLETPC2FG:
- return wacom_tpc_irq(wacom_wac, wcombo);
-
- default:
- return 0;
+ case PENPARTNER:
+ sync = wacom_penpartner_irq(wacom_wac);
+ break;
+
+ case PL:
+ sync = wacom_pl_irq(wacom_wac);
+ break;
+
+ case WACOM_G4:
+ case GRAPHIRE:
+ case WACOM_MO:
+ sync = wacom_graphire_irq(wacom_wac);
+ break;
+
+ case PTU:
+ sync = wacom_ptu_irq(wacom_wac);
+ break;
+
+ case INTUOS:
+ case INTUOS3S:
+ case INTUOS3:
+ case INTUOS3L:
+ case INTUOS4S:
+ case INTUOS4:
+ case INTUOS4L:
+ case CINTIQ:
+ case WACOM_BEE:
+ sync = wacom_intuos_irq(wacom_wac);
+ break;
+
+ case TABLETPC:
+ case TABLETPC2FG:
+ sync = wacom_tpc_irq(wacom_wac, len);
+ break;
+
+ default:
+ sync = false;
+ break;
}
- return 0;
+
+ if (sync)
+ input_sync(wacom_wac->input);
+}
+
+static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
+{
+ struct input_dev *input_dev = wacom_wac->input;
+
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+ __set_bit(BTN_SIDE, input_dev->keybit);
+ __set_bit(BTN_EXTRA, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ __set_bit(BTN_TOOL_BRUSH, input_dev->keybit);
+ __set_bit(BTN_TOOL_PENCIL, input_dev->keybit);
+ __set_bit(BTN_TOOL_AIRBRUSH, input_dev->keybit);
+ __set_bit(BTN_TOOL_LENS, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_DISTANCE,
+ 0, wacom_wac->features.distance_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
+ input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
}
-void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
+void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
{
+ struct wacom_features *features = &wacom_wac->features;
+ int i;
+
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
+
+ __set_bit(ABS_MISC, input_dev->absbit);
+
switch (wacom_wac->features.type) {
- case WACOM_MO:
- input_dev_mo(input_dev, wacom_wac);
- case WACOM_G4:
- input_dev_g4(input_dev, wacom_wac);
- /* fall through */
- case GRAPHIRE:
- input_dev_g(input_dev, wacom_wac);
- break;
- case WACOM_BEE:
- input_dev_bee(input_dev, wacom_wac);
- case INTUOS3:
- case INTUOS3L:
- case CINTIQ:
- input_dev_i3(input_dev, wacom_wac);
- /* fall through */
- case INTUOS3S:
- input_dev_i3s(input_dev, wacom_wac);
- /* fall through */
- case INTUOS:
- input_dev_i(input_dev, wacom_wac);
- break;
- case INTUOS4:
- case INTUOS4L:
- input_dev_i4(input_dev, wacom_wac);
- /* fall through */
- case INTUOS4S:
- input_dev_i4s(input_dev, wacom_wac);
- input_dev_i(input_dev, wacom_wac);
- break;
- case TABLETPC2FG:
- input_dev_tpc2fg(input_dev, wacom_wac);
- /* fall through */
- case TABLETPC:
- input_dev_tpc(input_dev, wacom_wac);
- if (wacom_wac->features.device_type != BTN_TOOL_PEN)
- break; /* no need to process stylus stuff */
-
- /* fall through */
- case PL:
- case PTU:
- input_dev_pl(input_dev, wacom_wac);
- /* fall through */
- case PENPARTNER:
- input_dev_pt(input_dev, wacom_wac);
- break;
+ case WACOM_MO:
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+ /* fall through */
+
+ case WACOM_G4:
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_4, input_dev->keybit);
+ /* fall through */
+
+ case GRAPHIRE:
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+ break;
+
+ case WACOM_BEE:
+ __set_bit(BTN_8, input_dev->keybit);
+ __set_bit(BTN_9, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS3:
+ case INTUOS3L:
+ case CINTIQ:
+ __set_bit(BTN_4, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+ __set_bit(BTN_6, input_dev->keybit);
+ __set_bit(BTN_7, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
+ /* fall through */
+
+ case INTUOS3S:
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ /* fall through */
+
+ case INTUOS:
+ wacom_setup_intuos(wacom_wac);
+ break;
+
+ case INTUOS4:
+ case INTUOS4L:
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS4S:
+ for (i = 0; i < 7; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ wacom_setup_intuos(wacom_wac);
+ break;
+
+ case TABLETPC2FG:
+ if (features->device_type == BTN_TOOL_TRIPLETAP) {
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ }
+ /* fall through */
+
+ case TABLETPC:
+ if (features->device_type == BTN_TOOL_DOUBLETAP ||
+ features->device_type == BTN_TOOL_TRIPLETAP) {
+ input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ }
+
+ if (features->device_type != BTN_TOOL_PEN)
+ break; /* no need to process stylus stuff */
+
+ /* fall through */
+
+ case PL:
+ case PTU:
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+ /* fall through */
+
+ case PENPARTNER:
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ break;
}
- return;
}
static const struct wacom_features wacom_features_0x00 =
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index b50cf04e61a8..063f1af3204f 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -9,6 +9,8 @@
#ifndef WACOM_WAC_H
#define WACOM_WAC_H
+#include <linux/types.h>
+
/* maximum packet length for USB devices */
#define WACOM_PKGLEN_MAX 32
@@ -71,13 +73,20 @@ struct wacom_features {
unsigned char unitExpo;
};
+struct wacom_shared {
+ bool stylus_in_proximity;
+};
+
struct wacom_wac {
char name[64];
unsigned char *data;
- int tool[2];
- int id[2];
+ int tool[3];
+ int id[3];
__u32 serial[2];
+ int last_finger;
struct wacom_features features;
+ struct wacom_shared *shared;
+ struct input_dev *input;
};
#endif
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 8a8fa4d2d6a8..4dbe220b6b09 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -99,22 +99,6 @@ config TOUCHSCREEN_BITSY
To compile this driver as a module, choose M here: the
module will be called h3600_ts_input.
-config TOUCHSCREEN_CORGI
- tristate "SharpSL (Corgi and Spitz series) touchscreen driver (DEPRECATED)"
- depends on PXA_SHARPSL
- select CORGI_SSP_DEPRECATED
- help
- Say Y here to enable the driver for the touchscreen on the
- Sharp SL-C7xx and SL-Cxx00 series of PDAs.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called corgi_ts.
-
- NOTE: this driver is deprecated, try enable SPI and generic
- ADS7846-based touchscreen driver.
-
config TOUCHSCREEN_DA9034
tristate "Touchscreen support for Dialog Semiconductor DA9034"
depends on PMIC_DA903X
@@ -135,6 +119,18 @@ config TOUCHSCREEN_DYNAPRO
To compile this driver as a module, choose M here: the
module will be called dynapro.
+config TOUCHSCREEN_HAMPSHIRE
+ tristate "Hampshire serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Hampshire serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hampshire.
+
config TOUCHSCREEN_EETI
tristate "EETI touchscreen panel support"
depends on I2C
@@ -594,4 +590,17 @@ config TOUCHSCREEN_PCAP
To compile this driver as a module, choose M here: the
module will be called pcap_ts.
+
+config TOUCHSCREEN_TPS6507X
+ tristate "TPS6507x based touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a TPS6507x based touchscreen
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tps6507x_ts.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7fef7d5cca23..497964a7a214 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,8 +12,8 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
-obj-$(CONFIG_TOUCHSCREEN_CORGI) += corgi_ts.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
+obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
@@ -46,3 +46,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
+obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
deleted file mode 100644
index 94a1919d439d..000000000000
--- a/drivers/input/touchscreen/corgi_ts.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Touchscreen driver for Sharp SL-C7xx and SL-Cxx00 models
- *
- * Copyright (c) 2004-2005 Richard Purdie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-
-#include <mach/sharpsl.h>
-#include <mach/hardware.h>
-#include <mach/pxa2xx-gpio.h>
-
-
-#define PWR_MODE_ACTIVE 0
-#define PWR_MODE_SUSPEND 1
-
-#define X_AXIS_MAX 3830
-#define X_AXIS_MIN 150
-#define Y_AXIS_MAX 3830
-#define Y_AXIS_MIN 190
-#define PRESSURE_MIN 0
-#define PRESSURE_MAX 15000
-
-struct ts_event {
- short pressure;
- short x;
- short y;
-};
-
-struct corgi_ts {
- struct input_dev *input;
- struct timer_list timer;
- struct ts_event tc;
- int pendown;
- int power_mode;
- int irq_gpio;
- struct corgits_machinfo *machinfo;
-};
-
-#ifdef CONFIG_PXA25x
-#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
-#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
-#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
-#endif
-#ifdef CONFIG_PXA27x
-#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C1, 0" : "=r"(a))
-#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C1, 0" : "=r"(x))
-#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C1, 0" : : "r"(x))
-#endif
-
-/* ADS7846 Touch Screen Controller bit definitions */
-#define ADSCTRL_PD0 (1u << 0) /* PD0 */
-#define ADSCTRL_PD1 (1u << 1) /* PD1 */
-#define ADSCTRL_DFR (1u << 2) /* SER/DFR */
-#define ADSCTRL_MOD (1u << 3) /* Mode */
-#define ADSCTRL_ADR_SH 4 /* Address setting */
-#define ADSCTRL_STS (1u << 7) /* Start Bit */
-
-/* External Functions */
-extern unsigned int get_clk_frequency_khz(int info);
-
-static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
-{
- unsigned long hsync_invperiod = corgi_ts->machinfo->get_hsync_invperiod();
-
- if (hsync_invperiod)
- return get_clk_frequency_khz(0)*1000/hsync_invperiod;
- else
- return 0;
-}
-
-static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, int doSend,
- unsigned int address, unsigned long wait_time)
-{
- unsigned long timer1 = 0, timer2, pmnc = 0;
- int pos = 0;
-
- if (wait_time && doSend) {
- PMNC_GET(pmnc);
- if (!(pmnc & 0x01))
- PMNC_SET(0x01);
-
- /* polling HSync */
- corgi_ts->machinfo->wait_hsync();
- /* get CCNT */
- CCNT(timer1);
- }
-
- if (doRecive)
- pos = corgi_ssp_ads7846_get();
-
- if (doSend) {
- int cmd = ADSCTRL_PD0 | ADSCTRL_PD1 | (address << ADSCTRL_ADR_SH) | ADSCTRL_STS;
- /* dummy command */
- corgi_ssp_ads7846_put(cmd);
- corgi_ssp_ads7846_get();
-
- if (wait_time) {
- /* Wait after HSync */
- CCNT(timer2);
- if (timer2-timer1 > wait_time) {
- /* too slow - timeout, try again */
- corgi_ts->machinfo->wait_hsync();
- /* get CCNT */
- CCNT(timer1);
- /* Wait after HSync */
- CCNT(timer2);
- }
- while (timer2 - timer1 < wait_time)
- CCNT(timer2);
- }
- corgi_ssp_ads7846_put(cmd);
- if (wait_time && !(pmnc & 0x01))
- PMNC_SET(pmnc);
- }
- return pos;
-}
-
-static int read_xydata(struct corgi_ts *corgi_ts)
-{
- unsigned int x, y, z1, z2;
- unsigned long flags, wait_time;
-
- /* critical section */
- local_irq_save(flags);
- corgi_ssp_ads7846_lock();
- wait_time = calc_waittime(corgi_ts);
-
- /* Y-axis */
- sync_receive_data_send_cmd(corgi_ts, 0, 1, 1u, wait_time);
-
- /* Y-axis */
- sync_receive_data_send_cmd(corgi_ts, 1, 1, 1u, wait_time);
-
- /* X-axis */
- y = sync_receive_data_send_cmd(corgi_ts, 1, 1, 5u, wait_time);
-
- /* Z1 */
- x = sync_receive_data_send_cmd(corgi_ts, 1, 1, 3u, wait_time);
-
- /* Z2 */
- z1 = sync_receive_data_send_cmd(corgi_ts, 1, 1, 4u, wait_time);
- z2 = sync_receive_data_send_cmd(corgi_ts, 1, 0, 4u, wait_time);
-
- /* Power-Down Enable */
- corgi_ssp_ads7846_put((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- corgi_ssp_ads7846_get();
-
- corgi_ssp_ads7846_unlock();
- local_irq_restore(flags);
-
- if (x== 0 || y == 0 || z1 == 0 || (x * (z2 - z1) / z1) >= 15000) {
- corgi_ts->tc.pressure = 0;
- return 0;
- }
-
- corgi_ts->tc.x = x;
- corgi_ts->tc.y = y;
- corgi_ts->tc.pressure = (x * (z2 - z1)) / z1;
- return 1;
-}
-
-static void new_data(struct corgi_ts *corgi_ts)
-{
- struct input_dev *dev = corgi_ts->input;
-
- if (corgi_ts->power_mode != PWR_MODE_ACTIVE)
- return;
-
- if (!corgi_ts->tc.pressure && corgi_ts->pendown == 0)
- return;
-
- input_report_abs(dev, ABS_X, corgi_ts->tc.x);
- input_report_abs(dev, ABS_Y, corgi_ts->tc.y);
- input_report_abs(dev, ABS_PRESSURE, corgi_ts->tc.pressure);
- input_report_key(dev, BTN_TOUCH, corgi_ts->pendown);
- input_sync(dev);
-}
-
-static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
-{
- if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
- /* Disable Interrupt */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_NONE);
- if (read_xydata(corgi_ts)) {
- corgi_ts->pendown = 1;
- new_data(corgi_ts);
- }
- mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
- } else {
- if (corgi_ts->pendown == 1 || corgi_ts->pendown == 2) {
- mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
- corgi_ts->pendown++;
- return;
- }
-
- if (corgi_ts->pendown) {
- corgi_ts->tc.pressure = 0;
- new_data(corgi_ts);
- }
-
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
- corgi_ts->pendown = 0;
- }
-}
-
-static void corgi_ts_timer(unsigned long data)
-{
- struct corgi_ts *corgits_data = (struct corgi_ts *) data;
-
- ts_interrupt_main(corgits_data, 1);
-}
-
-static irqreturn_t ts_interrupt(int irq, void *dev_id)
-{
- struct corgi_ts *corgits_data = dev_id;
-
- ts_interrupt_main(corgits_data, 0);
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_PM
-static int corgits_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
-
- if (corgi_ts->pendown) {
- del_timer_sync(&corgi_ts->timer);
- corgi_ts->tc.pressure = 0;
- new_data(corgi_ts);
- corgi_ts->pendown = 0;
- }
- corgi_ts->power_mode = PWR_MODE_SUSPEND;
-
- corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
-
- return 0;
-}
-
-static int corgits_resume(struct platform_device *dev)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
-
- corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
- corgi_ts->power_mode = PWR_MODE_ACTIVE;
-
- return 0;
-}
-#else
-#define corgits_suspend NULL
-#define corgits_resume NULL
-#endif
-
-static int __devinit corgits_probe(struct platform_device *pdev)
-{
- struct corgi_ts *corgi_ts;
- struct input_dev *input_dev;
- int err = -ENOMEM;
-
- corgi_ts = kzalloc(sizeof(struct corgi_ts), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!corgi_ts || !input_dev)
- goto fail1;
-
- platform_set_drvdata(pdev, corgi_ts);
-
- corgi_ts->machinfo = pdev->dev.platform_data;
- corgi_ts->irq_gpio = platform_get_irq(pdev, 0);
-
- if (corgi_ts->irq_gpio < 0) {
- err = -ENODEV;
- goto fail1;
- }
-
- corgi_ts->input = input_dev;
-
- init_timer(&corgi_ts->timer);
- corgi_ts->timer.data = (unsigned long) corgi_ts;
- corgi_ts->timer.function = corgi_ts_timer;
-
- input_dev->name = "Corgi Touchscreen";
- input_dev->phys = "corgits/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0002;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- input_set_abs_params(input_dev, ABS_X, X_AXIS_MIN, X_AXIS_MAX, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, Y_AXIS_MIN, Y_AXIS_MAX, 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, PRESSURE_MIN, PRESSURE_MAX, 0, 0);
-
- pxa_gpio_mode(IRQ_TO_GPIO(corgi_ts->irq_gpio) | GPIO_IN);
-
- /* Initiaize ADS7846 Difference Reference mode */
- corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((3u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((5u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
-
- if (request_irq(corgi_ts->irq_gpio, ts_interrupt, IRQF_DISABLED, "ts", corgi_ts)) {
- err = -EBUSY;
- goto fail1;
- }
-
- err = input_register_device(corgi_ts->input);
- if (err)
- goto fail2;
-
- corgi_ts->power_mode = PWR_MODE_ACTIVE;
-
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
-
- return 0;
-
- fail2: free_irq(corgi_ts->irq_gpio, corgi_ts);
- fail1: input_free_device(input_dev);
- kfree(corgi_ts);
- return err;
-}
-
-static int __devexit corgits_remove(struct platform_device *pdev)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(pdev);
-
- free_irq(corgi_ts->irq_gpio, corgi_ts);
- del_timer_sync(&corgi_ts->timer);
- corgi_ts->machinfo->put_hsync();
- input_unregister_device(corgi_ts->input);
- kfree(corgi_ts);
-
- return 0;
-}
-
-static struct platform_driver corgits_driver = {
- .probe = corgits_probe,
- .remove = __devexit_p(corgits_remove),
- .suspend = corgits_suspend,
- .resume = corgits_resume,
- .driver = {
- .name = "corgi-ts",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init corgits_init(void)
-{
- return platform_driver_register(&corgits_driver);
-}
-
-static void __exit corgits_exit(void)
-{
- platform_driver_unregister(&corgits_driver);
-}
-
-module_init(corgits_init);
-module_exit(corgits_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Corgi TouchScreen Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:corgi-ts");
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c
new file mode 100644
index 000000000000..2da6cc31bb21
--- /dev/null
+++ b/drivers/input/touchscreen/hampshire.c
@@ -0,0 +1,205 @@
+/*
+ * Hampshire serial touchscreen driver
+ *
+ * Copyright (c) 2010 Adam Bennett
+ * Based on the dynapro driver (c) Tias Guns
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/*
+ * 2010/04/08 Adam Bennett <abennett72@gmail.com>
+ * Copied dynapro.c and edited for Hampshire 4-byte protocol
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/init.h>
+
+#define DRIVER_DESC "Hampshire serial touchscreen driver"
+
+MODULE_AUTHOR("Adam Bennett <abennett72@gmail.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/*
+ * Definitions & global arrays.
+ */
+
+#define HAMPSHIRE_FORMAT_TOUCH_BIT 0x40
+#define HAMPSHIRE_FORMAT_LENGTH 4
+#define HAMPSHIRE_RESPONSE_BEGIN_BYTE 0x80
+
+#define HAMPSHIRE_MIN_XC 0
+#define HAMPSHIRE_MAX_XC 0x1000
+#define HAMPSHIRE_MIN_YC 0
+#define HAMPSHIRE_MAX_YC 0x1000
+
+#define HAMPSHIRE_GET_XC(data) (((data[3] & 0x0c) >> 2) | (data[1] << 2) | ((data[0] & 0x38) << 6))
+#define HAMPSHIRE_GET_YC(data) ((data[3] & 0x03) | (data[2] << 2) | ((data[0] & 0x07) << 9))
+#define HAMPSHIRE_GET_TOUCHED(data) (HAMPSHIRE_FORMAT_TOUCH_BIT & data[0])
+
+/*
+ * Per-touchscreen data.
+ */
+
+struct hampshire {
+ struct input_dev *dev;
+ struct serio *serio;
+ int idx;
+ unsigned char data[HAMPSHIRE_FORMAT_LENGTH];
+ char phys[32];
+};
+
+static void hampshire_process_data(struct hampshire *phampshire)
+{
+ struct input_dev *dev = phampshire->dev;
+
+ if (HAMPSHIRE_FORMAT_LENGTH == ++phampshire->idx) {
+ input_report_abs(dev, ABS_X, HAMPSHIRE_GET_XC(phampshire->data));
+ input_report_abs(dev, ABS_Y, HAMPSHIRE_GET_YC(phampshire->data));
+ input_report_key(dev, BTN_TOUCH,
+ HAMPSHIRE_GET_TOUCHED(phampshire->data));
+ input_sync(dev);
+
+ phampshire->idx = 0;
+ }
+}
+
+static irqreturn_t hampshire_interrupt(struct serio *serio,
+ unsigned char data, unsigned int flags)
+{
+ struct hampshire *phampshire = serio_get_drvdata(serio);
+
+ phampshire->data[phampshire->idx] = data;
+
+ if (HAMPSHIRE_RESPONSE_BEGIN_BYTE & phampshire->data[0])
+ hampshire_process_data(phampshire);
+ else
+ dev_dbg(&serio->dev, "unknown/unsynchronized data: %x\n",
+ phampshire->data[0]);
+
+ return IRQ_HANDLED;
+}
+
+static void hampshire_disconnect(struct serio *serio)
+{
+ struct hampshire *phampshire = serio_get_drvdata(serio);
+
+ input_get_device(phampshire->dev);
+ input_unregister_device(phampshire->dev);
+ serio_close(serio);
+ serio_set_drvdata(serio, NULL);
+ input_put_device(phampshire->dev);
+ kfree(phampshire);
+}
+
+/*
+ * hampshire_connect() is the routine that is called when someone adds a
+ * new serio device that supports hampshire protocol and registers it as
+ * an input device. This is usually accomplished using inputattach.
+ */
+
+static int hampshire_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct hampshire *phampshire;
+ struct input_dev *input_dev;
+ int err;
+
+ phampshire = kzalloc(sizeof(struct hampshire), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!phampshire || !input_dev) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ phampshire->serio = serio;
+ phampshire->dev = input_dev;
+ snprintf(phampshire->phys, sizeof(phampshire->phys),
+ "%s/input0", serio->phys);
+
+ input_dev->name = "Hampshire Serial TouchScreen";
+ input_dev->phys = phampshire->phys;
+ input_dev->id.bustype = BUS_RS232;
+ input_dev->id.vendor = SERIO_HAMPSHIRE;
+ input_dev->id.product = 0;
+ input_dev->id.version = 0x0001;
+ input_dev->dev.parent = &serio->dev;
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(phampshire->dev, ABS_X,
+ HAMPSHIRE_MIN_XC, HAMPSHIRE_MAX_XC, 0, 0);
+ input_set_abs_params(phampshire->dev, ABS_Y,
+ HAMPSHIRE_MIN_YC, HAMPSHIRE_MAX_YC, 0, 0);
+
+ serio_set_drvdata(serio, phampshire);
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto fail2;
+
+ err = input_register_device(phampshire->dev);
+ if (err)
+ goto fail3;
+
+ return 0;
+
+ fail3: serio_close(serio);
+ fail2: serio_set_drvdata(serio, NULL);
+ fail1: input_free_device(input_dev);
+ kfree(phampshire);
+ return err;
+}
+
+/*
+ * The serio driver structure.
+ */
+
+static struct serio_device_id hampshire_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_HAMPSHIRE,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, hampshire_serio_ids);
+
+static struct serio_driver hampshire_drv = {
+ .driver = {
+ .name = "hampshire",
+ },
+ .description = DRIVER_DESC,
+ .id_table = hampshire_serio_ids,
+ .interrupt = hampshire_interrupt,
+ .connect = hampshire_connect,
+ .disconnect = hampshire_disconnect,
+};
+
+/*
+ * The functions for inserting/removing us as a module.
+ */
+
+static int __init hampshire_init(void)
+{
+ return serio_register_driver(&hampshire_drv);
+}
+
+static void __exit hampshire_exit(void)
+{
+ serio_unregister_driver(&hampshire_drv);
+}
+
+module_init(hampshire_init);
+module_exit(hampshire_exit);
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 98a7d1279486..a2e122878f30 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -37,9 +37,9 @@
#include <plat/adc.h>
#include <plat/regs-adc.h>
+#include <plat/ts.h>
#include <mach/regs-gpio.h>
-#include <mach/ts.h>
#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0))
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
new file mode 100644
index 000000000000..5de80a1a730b
--- /dev/null
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -0,0 +1,400 @@
+/*
+ * drivers/input/touchscreen/tps6507x_ts.c
+ *
+ * Touchscreen driver for the tps6507x chip.
+ *
+ * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ * Credits:
+ *
+ * Using code from tsc2007, MtekVision Co., Ltd.
+ *
+ * For licencing details see kernel-base/COPYING
+ *
+ * TPS65070, TPS65073, TPS650731, and TPS650732 support
+ * 10 bit touch screen interface.
+ */
+
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/tps6507x.h>
+#include <linux/input/tps6507x-ts.h>
+#include <linux/delay.h>
+
+#define TSC_DEFAULT_POLL_PERIOD 30 /* ms */
+#define TPS_DEFAULT_MIN_PRESSURE 0x30
+#define MAX_10BIT ((1 << 10) - 1)
+
+#define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \
+ TPS6507X_ADCONFIG_START_CONVERSION | \
+ TPS6507X_ADCONFIG_INPUT_REAL_TSC)
+#define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC)
+
+struct ts_event {
+ u16 x;
+ u16 y;
+ u16 pressure;
+};
+
+struct tps6507x_ts {
+ struct input_dev *input_dev;
+ struct device *dev;
+ char phys[32];
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ unsigned polling; /* polling is active */
+ struct ts_event tc;
+ struct tps6507x_dev *mfd;
+ u16 model;
+ unsigned pendown;
+ int irq;
+ void (*clear_penirq)(void);
+ unsigned long poll_period; /* ms */
+ u16 min_pressure;
+ int vref; /* non-zero to leave vref on */
+};
+
+static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data)
+{
+ int err;
+
+ err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data)
+{
+ return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data);
+}
+
+static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc,
+ u8 tsc_mode, u16 *value)
+{
+ s32 ret;
+ u8 adc_status;
+ u8 result;
+
+ /* Route input signal to A/D converter */
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode);
+ if (ret) {
+ dev_err(tsc->dev, "TSC mode read failed\n");
+ goto err;
+ }
+
+ /* Start A/D conversion */
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
+ TPS6507X_ADCONFIG_CONVERT_TS);
+ if (ret) {
+ dev_err(tsc->dev, "ADC config write failed\n");
+ return ret;
+ }
+
+ do {
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG,
+ &adc_status);
+ if (ret) {
+ dev_err(tsc->dev, "ADC config read failed\n");
+ goto err;
+ }
+ } while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION);
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result);
+ if (ret) {
+ dev_err(tsc->dev, "ADC result 2 read failed\n");
+ goto err;
+ }
+
+ *value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8;
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result);
+ if (ret) {
+ dev_err(tsc->dev, "ADC result 1 read failed\n");
+ goto err;
+ }
+
+ *value |= result;
+
+ dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value);
+
+err:
+ return ret;
+}
+
+/* Need to call tps6507x_adc_standby() after using A/D converter for the
+ * touch screen interrupt to work properly.
+ */
+
+static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc)
+{
+ s32 ret;
+ s32 loops = 0;
+ u8 val;
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
+ TPS6507X_ADCONFIG_INPUT_TSC);
+ if (ret)
+ return ret;
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE,
+ TPS6507X_TSCMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
+ if (ret)
+ return ret;
+
+ while (val & TPS6507X_REG_TSC_INT) {
+ mdelay(10);
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
+ if (ret)
+ return ret;
+ loops++;
+ }
+
+ return ret;
+}
+
+static void tps6507x_ts_handler(struct work_struct *work)
+{
+ struct tps6507x_ts *tsc = container_of(work,
+ struct tps6507x_ts, work.work);
+ struct input_dev *input_dev = tsc->input_dev;
+ int pendown;
+ int schd;
+ int poll = 0;
+ s32 ret;
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE,
+ &tsc->tc.pressure);
+ if (ret)
+ goto done;
+
+ pendown = tsc->tc.pressure > tsc->min_pressure;
+
+ if (unlikely(!pendown && tsc->pendown)) {
+ dev_dbg(tsc->dev, "UP\n");
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+ tsc->pendown = 0;
+ }
+
+ if (pendown) {
+
+ if (!tsc->pendown) {
+ dev_dbg(tsc->dev, "DOWN\n");
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ } else
+ dev_dbg(tsc->dev, "still down\n");
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION,
+ &tsc->tc.x);
+ if (ret)
+ goto done;
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION,
+ &tsc->tc.y);
+ if (ret)
+ goto done;
+
+ input_report_abs(input_dev, ABS_X, tsc->tc.x);
+ input_report_abs(input_dev, ABS_Y, tsc->tc.y);
+ input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure);
+ input_sync(input_dev);
+ tsc->pendown = 1;
+ poll = 1;
+ }
+
+done:
+ /* always poll if not using interrupts */
+ poll = 1;
+
+ if (poll) {
+ schd = queue_delayed_work(tsc->wq, &tsc->work,
+ tsc->poll_period * HZ / 1000);
+ if (schd)
+ tsc->polling = 1;
+ else {
+ tsc->polling = 0;
+ dev_err(tsc->dev, "re-schedule failed");
+ }
+ } else
+ tsc->polling = 0;
+
+ ret = tps6507x_adc_standby(tsc);
+}
+
+static int tps6507x_ts_probe(struct platform_device *pdev)
+{
+ int error;
+ struct tps6507x_ts *tsc;
+ struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
+ struct touchscreen_init_data *init_data;
+ struct input_dev *input_dev;
+ struct tps6507x_board *tps_board;
+ int schd;
+
+ /**
+ * tps_board points to pmic related constants
+ * coming from the board-evm file.
+ */
+
+ tps_board = (struct tps6507x_board *)tps6507x_dev->dev->platform_data;
+
+ if (!tps_board) {
+ dev_err(tps6507x_dev->dev,
+ "Could not find tps6507x platform data\n");
+ return -EIO;
+ }
+
+ /**
+ * init_data points to array of regulator_init structures
+ * coming from the board-evm file.
+ */
+
+ init_data = tps_board->tps6507x_ts_init_data;
+
+ tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL);
+ if (!tsc) {
+ dev_err(tps6507x_dev->dev, "failed to allocate driver data\n");
+ error = -ENOMEM;
+ goto err0;
+ }
+
+ tps6507x_dev->ts = tsc;
+ tsc->mfd = tps6507x_dev;
+ tsc->dev = tps6507x_dev->dev;
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(tsc->dev, "Failed to allocate input device.\n");
+ error = -ENOMEM;
+ goto err1;
+ }
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0);
+
+ input_dev->name = "TPS6507x Touchscreen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = tsc->dev;
+
+ snprintf(tsc->phys, sizeof(tsc->phys),
+ "%s/input0", dev_name(tsc->dev));
+ input_dev->phys = tsc->phys;
+
+ dev_dbg(tsc->dev, "device: %s\n", input_dev->phys);
+
+ input_set_drvdata(input_dev, tsc);
+
+ tsc->input_dev = input_dev;
+
+ INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
+ tsc->wq = create_workqueue("TPS6507x Touchscreen");
+
+ if (init_data) {
+ tsc->poll_period = init_data->poll_period;
+ tsc->vref = init_data->vref;
+ tsc->min_pressure = init_data->min_pressure;
+ input_dev->id.vendor = init_data->vendor;
+ input_dev->id.product = init_data->product;
+ input_dev->id.version = init_data->version;
+ } else {
+ tsc->poll_period = TSC_DEFAULT_POLL_PERIOD;
+ tsc->min_pressure = TPS_DEFAULT_MIN_PRESSURE;
+ }
+
+ error = tps6507x_adc_standby(tsc);
+ if (error)
+ goto err2;
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto err2;
+
+ schd = queue_delayed_work(tsc->wq, &tsc->work,
+ tsc->poll_period * HZ / 1000);
+
+ if (schd)
+ tsc->polling = 1;
+ else {
+ tsc->polling = 0;
+ dev_err(tsc->dev, "schedule failed");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ cancel_delayed_work(&tsc->work);
+ flush_workqueue(tsc->wq);
+ destroy_workqueue(tsc->wq);
+ tsc->wq = 0;
+ input_free_device(input_dev);
+err1:
+ kfree(tsc);
+ tps6507x_dev->ts = NULL;
+err0:
+ return error;
+}
+
+static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
+{
+ struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
+ struct tps6507x_ts *tsc = tps6507x_dev->ts;
+ struct input_dev *input_dev = tsc->input_dev;
+
+ if (!tsc)
+ return 0;
+
+ cancel_delayed_work(&tsc->work);
+ flush_workqueue(tsc->wq);
+ destroy_workqueue(tsc->wq);
+ tsc->wq = 0;
+
+ input_free_device(input_dev);
+
+ tps6507x_dev->ts = NULL;
+ kfree(tsc);
+
+ return 0;
+}
+
+static struct platform_driver tps6507x_ts_driver = {
+ .driver = {
+ .name = "tps6507x-ts",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6507x_ts_probe,
+ .remove = __devexit_p(tps6507x_ts_remove),
+};
+
+static int __init tps6507x_ts_init(void)
+{
+ return platform_driver_register(&tps6507x_ts_driver);
+}
+module_init(tps6507x_ts_init);
+
+static void __exit tps6507x_ts_exit(void)
+{
+ platform_driver_unregister(&tps6507x_ts_driver);
+}
+module_exit(tps6507x_ts_exit);
+
+MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
+MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps6507x-tsc");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index be23780e8a3e..769b479fcaa6 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -347,6 +347,8 @@ static int __devexit tsc2007_remove(struct i2c_client *client)
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
+ i2c_set_clientdata(client, NULL);
+
tsc2007_free_irq(ts);
if (pdata->exit_platform_hw)
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 94b796d84053..f410d0eb2fef 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ptrace.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <linux/serial.h>
@@ -61,31 +60,6 @@ static void avmcs_release(struct pcmcia_device *link);
static void avmcs_detach(struct pcmcia_device *p_dev);
-/*
- A linked list of "instances" of the skeleton device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally can't be allocated dynamically.
-*/
-
-typedef struct local_info_t {
- dev_node_t node;
-} local_info_t;
-
/*======================================================================
avmcs_attach() creates an "instance" of the driver, allocating
@@ -100,32 +74,19 @@ typedef struct local_info_t {
static int avmcs_probe(struct pcmcia_device *p_dev)
{
- local_info_t *local;
/* The io structure describes IO port mapping */
p_dev->io.NumPorts1 = 16;
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.NumPorts2 = 0;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
p_dev->conf.ConfigIndex = 1;
p_dev->conf.Present = PRESENT_OPTION;
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local)
- goto err;
- p_dev->priv = local;
-
return avmcs_config(p_dev);
-
- err:
- return -ENOMEM;
} /* avmcs_attach */
/*======================================================================
@@ -140,7 +101,6 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
static void avmcs_detach(struct pcmcia_device *link)
{
avmcs_release(link);
- kfree(link->priv);
} /* avmcs_detach */
/*======================================================================
@@ -171,14 +131,11 @@ static int avmcs_configcheck(struct pcmcia_device *p_dev,
static int avmcs_config(struct pcmcia_device *link)
{
- local_info_t *dev;
- int i;
+ int i = -1;
char devname[128];
int cardtype;
int (*addcard)(unsigned int port, unsigned irq);
- dev = link->priv;
-
devname[0] = 0;
if (link->prod_id[1])
strlcpy(devname, link->prod_id[1], sizeof(devname));
@@ -190,11 +147,7 @@ static int avmcs_config(struct pcmcia_device *link)
return -ENODEV;
do {
- /*
- * allocate an interrupt line
- */
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
+ if (!link->irq) {
/* undo */
pcmcia_disable_device(link);
break;
@@ -211,15 +164,11 @@ static int avmcs_config(struct pcmcia_device *link)
} while (0);
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. */
-
if (devname[0]) {
char *s = strrchr(devname, ' ');
if (!s)
s = devname;
else s++;
- strcpy(dev->node.dev_name, s);
if (strcmp("M1", s) == 0) {
cardtype = AVM_CARDTYPE_M1;
} else if (strcmp("M2", s) == 0) {
@@ -227,14 +176,8 @@ static int avmcs_config(struct pcmcia_device *link)
} else {
cardtype = AVM_CARDTYPE_B1;
}
- } else {
- strcpy(dev->node.dev_name, "b1");
+ } else
cardtype = AVM_CARDTYPE_B1;
- }
-
- dev->node.major = 64;
- dev->node.minor = 0;
- link->dev_node = &dev->node;
/* If any step failed, release any partially configured state */
if (i != 0) {
@@ -249,13 +192,12 @@ static int avmcs_config(struct pcmcia_device *link)
default:
case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
}
- if ((i = (*addcard)(link->io.BasePort1, link->irq.AssignedIRQ)) < 0) {
- printk(KERN_ERR "avm_cs: failed to add AVM-%s-Controller at i/o %#x, irq %d\n",
- dev->node.dev_name, link->io.BasePort1, link->irq.AssignedIRQ);
- avmcs_release(link);
- return -ENODEV;
+ if ((i = (*addcard)(link->io.BasePort1, link->irq)) < 0) {
+ dev_err(&link->dev, "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
+ link->io.BasePort1, link->irq);
+ avmcs_release(link);
+ return -ENODEV;
}
- dev->node.minor = i;
return 0;
} /* avmcs_config */
@@ -270,7 +212,7 @@ static int avmcs_config(struct pcmcia_device *link)
static void avmcs_release(struct pcmcia_device *link)
{
- b1pcmcia_delcard(link->io.BasePort1, link->irq.AssignedIRQ);
+ b1pcmcia_delcard(link->io.BasePort1, link->irq);
pcmcia_disable_device(link);
} /* avmcs_release */
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 75e71b5d9215..095ed76ebe80 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -117,7 +117,7 @@
* NOTE: only one mode value must be given for every card.
* -> See hfc_multi.h for HFC_IO_MODE_* values
* By default, the IO mode is pci memory IO (MEMIO).
- * Some cards requre specific IO mode, so it cannot be changed.
+ * Some cards require specific IO mode, so it cannot be changed.
* It may be usefull to set IO mode to register io (REGIO) to solve
* PCI bridge problems.
* If unsure, don't give this parameter.
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index 8d1d63a02b34..a80a7617f16f 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -62,31 +62,6 @@ static void avma1cs_release(struct pcmcia_device *link);
static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ;
-/*
- A linked list of "instances" of the skeleton device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally can't be allocated dynamically.
-*/
-
-typedef struct local_info_t {
- dev_node_t node;
-} local_info_t;
-
/*======================================================================
avma1cs_attach() creates an "instance" of the driver, allocating
@@ -101,17 +76,8 @@ typedef struct local_info_t {
static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
{
- local_info_t *local;
-
dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local)
- return -ENOMEM;
-
- p_dev->priv = local;
-
/* The io structure describes IO port mapping */
p_dev->io.NumPorts1 = 16;
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -119,9 +85,6 @@ static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
p_dev->io.IOAddrLines = 5;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -176,14 +139,11 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev,
static int __devinit avma1cs_config(struct pcmcia_device *link)
{
- local_info_t *dev;
- int i;
+ int i = -1;
char devname[128];
IsdnCard_t icard;
int busy = 0;
- dev = link->priv;
-
dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link);
devname[0] = 0;
@@ -197,8 +157,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
/*
* allocate an interrupt line
*/
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
+ if (!link->irq) {
/* undo */
pcmcia_disable_device(link);
break;
@@ -215,14 +174,6 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
} while (0);
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. */
-
- strcpy(dev->node.dev_name, "A1");
- dev->node.major = 45;
- dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* If any step failed, release any partially configured state */
if (i != 0) {
avma1cs_release(link);
@@ -230,9 +181,9 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
}
printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n",
- link->io.BasePort1, link->irq.AssignedIRQ);
+ link->io.BasePort1, link->irq);
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = isdnprot;
icard.typ = ISDN_CTYPE_A1_PCMCIA;
@@ -243,7 +194,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
avma1cs_release(link);
return -ENODEV;
}
- dev->node.minor = i;
+ link->priv = (void *) (unsigned long) i;
return 0;
} /* avma1cs_config */
@@ -258,12 +209,12 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
static void avma1cs_release(struct pcmcia_device *link)
{
- local_info_t *local = link->priv;
+ unsigned long minor = (unsigned long) link->priv;
dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link);
/* now unregister function with hisax */
- HiSax_closecard(local->node.minor);
+ HiSax_closecard(minor);
pcmcia_disable_device(link);
} /* avma1cs_release */
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index c9f2279e21f5..218927e3a4ea 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -87,24 +87,8 @@ static void elsa_cs_release(struct pcmcia_device *link);
static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int busy;
int cardnr;
} local_info_t;
@@ -136,10 +120,6 @@ static int __devinit elsa_cs_probe(struct pcmcia_device *link)
local->cardnr = -1;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -223,28 +203,18 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
- link->irq.AssignedIRQ = 0;
+ if (!link->irq)
goto failed;
- }
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
goto failed;
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. *//* */
- sprintf(dev->node.dev_name, "elsa");
- dev->node.major = dev->node.minor = 0x0;
-
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x: ",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -253,7 +223,7 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_ELSA_PCMCIA;
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 051b44e2556c..384d5118e325 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -310,7 +310,7 @@ wait_busy(hfc4s8s_hw * a)
/******************************************************/
/* function to read critical counter registers that */
-/* may be udpated by the chip during read */
+/* may be updated by the chip during read */
/******************************************************/
static u_char
Read_hfc8_stable(hfc4s8s_hw * hw, int reg)
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 1925118122f8..8b0a7d86b30f 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -74,9 +74,10 @@ static struct pnp_device_id fcpnp_ids[] __devinitdata = {
.id = "AVM0900",
.driver_data = (unsigned long) "Fritz!Card PnP",
},
+ { .id = "" }
};
-MODULE_DEVICE_TABLE(isapnp, fcpnp_ids);
+MODULE_DEVICE_TABLE(pnp, fcpnp_ids);
#endif
static int protocol = 2; /* EURO-ISDN Default */
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 71b3ddef03bb..1f4feaab21af 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -87,32 +87,8 @@ static void sedlbauer_release(struct pcmcia_device *link);
static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'memory_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int stop;
int cardnr;
} local_info_t;
@@ -143,10 +119,6 @@ static int __devinit sedlbauer_probe(struct pcmcia_device *link)
local->p_dev = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -227,9 +199,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -285,7 +255,6 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
static int __devinit sedlbauer_config(struct pcmcia_device *link)
{
- local_info_t *dev = link->priv;
win_req_t *req;
int ret;
IsdnCard_t icard;
@@ -313,17 +282,6 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
goto failed;
/*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
-
- /*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
card and host interface into "Memory and IO" mode.
@@ -332,21 +290,13 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "sedlbauer");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x:",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -358,7 +308,7 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
req->Base+req->Size-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA;
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index d010a0da8e19..5771955cc532 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -68,34 +68,8 @@ static void teles_cs_release(struct pcmcia_device *link);
static void teles_detach(struct pcmcia_device *p_dev) __devexit ;
-/*
- A linked list of "instances" of the teles_cs device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int busy;
int cardnr;
} local_info_t;
@@ -126,10 +100,6 @@ static int __devinit teles_probe(struct pcmcia_device *link)
local->p_dev = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -213,28 +183,18 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
if (i != 0)
goto cs_failed;
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
- link->irq.AssignedIRQ = 0;
+ if (!link->irq)
goto cs_failed;
- }
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
goto cs_failed;
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. *//* */
- sprintf(dev->node.dev_name, "teles");
- dev->node.major = dev->node.minor = 0x0;
-
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x:",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -243,7 +203,7 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_TELESPCMCIA;
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index efcf1f9327e5..fd10d7c785d4 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -194,7 +194,7 @@ static int isdn_x25iface_receive(struct concap_proto *cprot, struct sk_buff *skb
if ( ( (ix25_pdata_t*) (cprot->proto_data) )
-> state == WAN_CONNECTED ){
if( skb_push(skb, 1)){
- skb -> data[0]=0x00;
+ skb->data[0] = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -224,7 +224,7 @@ static int isdn_x25iface_connect_ind(struct concap_proto *cprot)
skb = dev_alloc_skb(1);
if( skb ){
- *( skb_put(skb, 1) ) = 0x01;
+ *(skb_put(skb, 1)) = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -253,7 +253,7 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *cprot)
*state_p = WAN_DISCONNECTED;
skb = dev_alloc_skb(1);
if( skb ){
- *( skb_put(skb, 1) ) = 0x02;
+ *(skb_put(skb, 1)) = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -272,9 +272,10 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
unsigned char firstbyte = skb->data[0];
enum wan_states *state = &((ix25_pdata_t*)cprot->proto_data)->state;
int ret = 0;
- IX25DEBUG( "isdn_x25iface_xmit: %s first=%x state=%d \n", MY_DEVNAME(cprot -> net_dev), firstbyte, *state );
+ IX25DEBUG("isdn_x25iface_xmit: %s first=%x state=%d\n",
+ MY_DEVNAME(cprot->net_dev), firstbyte, *state);
switch ( firstbyte ){
- case 0x00: /* dl_data request */
+ case X25_IFACE_DATA:
if( *state == WAN_CONNECTED ){
skb_pull(skb, 1);
cprot -> net_dev -> trans_start = jiffies;
@@ -285,7 +286,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
}
illegal_state_warn( *state, firstbyte );
break;
- case 0x01: /* dl_connect request */
+ case X25_IFACE_CONNECT:
if( *state == WAN_DISCONNECTED ){
*state = WAN_CONNECTING;
ret = cprot -> dops -> connect_req(cprot);
@@ -298,7 +299,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
illegal_state_warn( *state, firstbyte );
}
break;
- case 0x02: /* dl_disconnect request */
+ case X25_IFACE_DISCONNECT:
switch ( *state ){
case WAN_DISCONNECTED:
/* Should not happen. However, give upper layer a
@@ -318,7 +319,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
illegal_state_warn( *state, firstbyte );
}
break;
- case 0x03: /* changing lapb parameters requested */
+ case X25_IFACE_PARAMS:
printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb"
" options not yet supported\n");
break;
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 16a60c06c96c..b7677106cff8 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -256,8 +256,10 @@ static int pm860x_led_probe(struct platform_device *pdev)
if (pdev->dev.parent->platform_data) {
pm860x_pdata = pdev->dev.parent->platform_data;
pdata = pm860x_pdata->led;
- } else
- pdata = NULL;
+ } else {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
if (data == NULL)
@@ -268,8 +270,11 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->iset = pdata->iset;
data->port = __check_device(pdata, data->name);
- if (data->port < 0)
+ if (data->port < 0) {
+ dev_err(&pdev->dev, "check device failed\n");
+ kfree(data);
return -EINVAL;
+ }
data->current_brightness = 0;
data->cdev.name = data->name;
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index 79119f56e82d..bd6da7a9c55b 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -155,6 +155,7 @@ static int macio_adb_reset_bus(void)
while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
if (--timeout == 0) {
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) & ~ADB_RST);
+ spin_unlock_irqrestore(&macio_lock, flags);
return -1;
}
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 26a303a1d1ab..4b7a0afb5c61 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -40,8 +40,7 @@ static struct macio_chip *macio_on_hold;
static int macio_bus_match(struct device *dev, struct device_driver *drv)
{
struct macio_dev * macio_dev = to_macio_device(dev);
- struct macio_driver * macio_drv = to_macio_driver(drv);
- const struct of_device_id * matches = macio_drv->match_table;
+ const struct of_device_id * matches = drv->of_match_table;
if (!matches)
return 0;
@@ -84,7 +83,7 @@ static int macio_device_probe(struct device *dev)
macio_dev_get(macio_dev);
- match = of_match_device(drv->match_table, &macio_dev->ofdev);
+ match = of_match_device(drv->driver.of_match_table, &macio_dev->ofdev);
if (match)
error = drv->probe(macio_dev, match);
if (error)
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 888448cf7f1f..c9da5c4c167d 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1183,8 +1183,10 @@ static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
return -EOVERFLOW;
spin_lock_irqsave(&pp->lock, flags);
if (pp->cmd.status == 1) {
- if (file->f_flags & O_NONBLOCK)
+ if (file->f_flags & O_NONBLOCK) {
+ spin_unlock_irqrestore(&pp->lock, flags);
return -EAGAIN;
+ }
add_wait_queue(&pp->wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c42eeb43042d..16d82f17ae82 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -182,6 +182,7 @@ remove_thermostat(struct i2c_client *client)
thermostat = NULL;
+ i2c_set_clientdata(client, NULL);
kfree(th);
return 0;
@@ -399,6 +400,7 @@ static int probe_thermostat(struct i2c_client *client,
rc = read_reg(th, CONFIG_REG);
if (rc < 0) {
dev_err(&client->dev, "Thermostat failed to read config!\n");
+ i2c_set_clientdata(client, NULL);
kfree(th);
return -ENODEV;
}
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 565d5b2adc95..749d174b0dc6 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -188,7 +188,7 @@ struct wf_smu_sys_fans_state {
};
/*
- * Configs for SMU Sytem Fan control loop
+ * Configs for SMU System Fan control loop
*/
static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
/* Model ID 2 */
@@ -757,10 +757,8 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
wf_put_control(cpufreq_clamp);
/* Destroy control loops state structures */
- if (wf_smu_sys_fans)
- kfree(wf_smu_sys_fans);
- if (wf_smu_cpu_fans)
- kfree(wf_smu_cpu_fans);
+ kfree(wf_smu_sys_fans);
+ kfree(wf_smu_cpu_fans);
return 0;
}
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index bea99168ff35..344273235124 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -687,12 +687,9 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
wf_put_control(cpufreq_clamp);
/* Destroy control loops state structures */
- if (wf_smu_slots_fans)
- kfree(wf_smu_cpu_fans);
- if (wf_smu_drive_fans)
- kfree(wf_smu_cpu_fans);
- if (wf_smu_cpu_fans)
- kfree(wf_smu_cpu_fans);
+ kfree(wf_smu_slots_fans);
+ kfree(wf_smu_drive_fans);
+ kfree(wf_smu_cpu_fans);
return 0;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index acb3a4e404ff..5c3585708b46 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -100,8 +100,8 @@ config MD_RAID1
If unsure, say Y.
config MD_RAID10
- tristate "RAID-10 (mirrored striping) mode (EXPERIMENTAL)"
- depends on BLK_DEV_MD && EXPERIMENTAL
+ tristate "RAID-10 (mirrored striping) mode"
+ depends on BLK_DEV_MD
---help---
RAID-10 provides a combination of striping (RAID-0) and
mirroring (RAID-1) with easier configuration and more flexible
@@ -319,4 +319,10 @@ config DM_UEVENT
---help---
Generate udev events for DM events.
+config DM_FLAKEY
+ tristate "Flakey target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ ---help---
+ A target that intermittently fails I/O for debugging purposes.
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index e355e7f6a536..1d2a97501477 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
+obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 26ac8aad0b19..c9c6a345e17b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -505,7 +505,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
return;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared) {
/* rocking back to read-only */
@@ -526,7 +526,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->sb_page)
return;
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -575,7 +575,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return err;
}
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -661,7 +661,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
return 0;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
old = le32_to_cpu(sb->state) & bits;
switch (op) {
case MASK_SET: sb->state |= cpu_to_le32(bits);
@@ -1292,9 +1292,14 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
if (!bitmap) return 0;
if (behind) {
+ int bw;
atomic_inc(&bitmap->behind_writes);
+ bw = atomic_read(&bitmap->behind_writes);
+ if (bw > bitmap->behind_writes_used)
+ bitmap->behind_writes_used = bw;
+
PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n",
- atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
+ bw, bitmap->max_write_behind);
}
while (sectors) {
@@ -1351,7 +1356,8 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
{
if (!bitmap) return;
if (behind) {
- atomic_dec(&bitmap->behind_writes);
+ if (atomic_dec_and_test(&bitmap->behind_writes))
+ wake_up(&bitmap->behind_wait);
PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
}
@@ -1675,6 +1681,7 @@ int bitmap_create(mddev_t *mddev)
atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
init_waitqueue_head(&bitmap->overflow_wait);
+ init_waitqueue_head(&bitmap->behind_wait);
bitmap->mddev = mddev;
@@ -2006,6 +2013,27 @@ static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len)
static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
+static ssize_t
+behind_writes_used_show(mddev_t *mddev, char *page)
+{
+ if (mddev->bitmap == NULL)
+ return sprintf(page, "0\n");
+ return sprintf(page, "%lu\n",
+ mddev->bitmap->behind_writes_used);
+}
+
+static ssize_t
+behind_writes_used_reset(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (mddev->bitmap)
+ mddev->bitmap->behind_writes_used = 0;
+ return len;
+}
+
+static struct md_sysfs_entry max_backlog_used =
+__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
+ behind_writes_used_show, behind_writes_used_reset);
+
static struct attribute *md_bitmap_attrs[] = {
&bitmap_location.attr,
&bitmap_timeout.attr,
@@ -2013,6 +2041,7 @@ static struct attribute *md_bitmap_attrs[] = {
&bitmap_chunksize.attr,
&bitmap_metadata.attr,
&bitmap_can_clear.attr,
+ &max_backlog_used.attr,
NULL
};
struct attribute_group md_bitmap_group = {
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index cb821d76d1b4..3797dea4723a 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -227,6 +227,7 @@ struct bitmap {
int allclean;
atomic_t behind_writes;
+ unsigned long behind_writes_used; /* highest actual value at runtime */
/*
* the bitmap daemon - periodically wakes up and sweeps the bitmap
@@ -239,6 +240,7 @@ struct bitmap {
atomic_t pending_writes; /* pending writes to the bitmap file */
wait_queue_head_t write_wait;
wait_queue_head_t overflow_wait;
+ wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear;
};
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
new file mode 100644
index 000000000000..de7d19d33366
--- /dev/null
+++ b/drivers/md/dm-flakey.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2003 Sistina Software (UK) Limited.
+ * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/device-mapper.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+
+#define DM_MSG_PREFIX "flakey"
+
+typedef typeof(jiffies) jiffy_t;
+
+/*
+ * Flakey: Used for testing only, simulates intermittent,
+ * catastrophic device failure.
+ */
+struct flakey_c {
+ struct dm_dev *dev;
+ jiffy_t start_time;
+ sector_t start;
+ unsigned up_interval;
+ unsigned down_interval;
+};
+
+/*
+ * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval>
+ */
+static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct flakey_c *fc;
+ unsigned long long tmp;
+
+ if (argc != 4) {
+ ti->error = "dm-flakey: Invalid argument count";
+ return -EINVAL;
+ }
+
+ fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ if (!fc) {
+ ti->error = "dm-flakey: Cannot allocate linear context";
+ return -ENOMEM;
+ }
+ fc->start_time = jiffies;
+
+ if (sscanf(argv[1], "%llu", &tmp) != 1) {
+ ti->error = "dm-flakey: Invalid device sector";
+ goto bad;
+ }
+ fc->start = tmp;
+
+ if (sscanf(argv[2], "%u", &fc->up_interval) != 1) {
+ ti->error = "dm-flakey: Invalid up interval";
+ goto bad;
+ }
+
+ if (sscanf(argv[3], "%u", &fc->down_interval) != 1) {
+ ti->error = "dm-flakey: Invalid down interval";
+ goto bad;
+ }
+
+ if (!(fc->up_interval + fc->down_interval)) {
+ ti->error = "dm-flakey: Total (up + down) interval is zero";
+ goto bad;
+ }
+
+ if (fc->up_interval + fc->down_interval < fc->up_interval) {
+ ti->error = "dm-flakey: Interval overflow";
+ goto bad;
+ }
+
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) {
+ ti->error = "dm-flakey: Device lookup failed";
+ goto bad;
+ }
+
+ ti->num_flush_requests = 1;
+ ti->private = fc;
+ return 0;
+
+bad:
+ kfree(fc);
+ return -EINVAL;
+}
+
+static void flakey_dtr(struct dm_target *ti)
+{
+ struct flakey_c *fc = ti->private;
+
+ dm_put_device(ti, fc->dev);
+ kfree(fc);
+}
+
+static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
+{
+ struct flakey_c *fc = ti->private;
+
+ return fc->start + (bi_sector - ti->begin);
+}
+
+static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct flakey_c *fc = ti->private;
+
+ bio->bi_bdev = fc->dev->bdev;
+ if (bio_sectors(bio))
+ bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+}
+
+static int flakey_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ struct flakey_c *fc = ti->private;
+ unsigned elapsed;
+
+ /* Are we alive ? */
+ elapsed = (jiffies - fc->start_time) / HZ;
+ if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval)
+ return -EIO;
+
+ flakey_map_bio(ti, bio);
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int flakey_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned int maxlen)
+{
+ struct flakey_c *fc = ti->private;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ result[0] = '\0';
+ break;
+
+ case STATUSTYPE_TABLE:
+ snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name,
+ (unsigned long long)fc->start, fc->up_interval,
+ fc->down_interval);
+ break;
+ }
+ return 0;
+}
+
+static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+{
+ struct flakey_c *fc = ti->private;
+
+ return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
+}
+
+static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct flakey_c *fc = ti->private;
+ struct request_queue *q = bdev_get_queue(fc->dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = fc->dev->bdev;
+ bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+{
+ struct flakey_c *fc = ti->private;
+
+ return fn(ti, fc->dev, fc->start, ti->len, data);
+}
+
+static struct target_type flakey_target = {
+ .name = "flakey",
+ .version = {1, 1, 0},
+ .module = THIS_MODULE,
+ .ctr = flakey_ctr,
+ .dtr = flakey_dtr,
+ .map = flakey_map,
+ .status = flakey_status,
+ .ioctl = flakey_ioctl,
+ .merge = flakey_merge,
+ .iterate_devices = flakey_iterate_devices,
+};
+
+static int __init dm_flakey_init(void)
+{
+ int r = dm_register_target(&flakey_target);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void __exit dm_flakey_exit(void)
+{
+ dm_unregister_target(&flakey_target);
+}
+
+/* Module hooks */
+module_init(dm_flakey_init);
+module_exit(dm_flakey_exit);
+
+MODULE_DESCRIPTION(DM_NAME " flakey target");
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index d7500e1c26f2..df561636a0e1 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -774,8 +774,6 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
if (r)
return r;
- param->data_size = 0;
-
return dm_hash_rename(param->event_nr, &param->flags, param->name,
new_name);
}
@@ -1593,6 +1591,7 @@ static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
#endif
static const struct file_operations _ctl_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = dm_ctl_ioctl,
.compat_ioctl = dm_compat_ctl_ioctl,
.owner = THIS_MODULE,
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 8e3850b98cca..1a8987884614 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,10 +169,9 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(struct request_queue *q, struct bio *bio)
+static int make_request(mddev_t *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = (conf_t*)mddev->private;
+ conf_t *conf = mddev->private;
int failit = 0;
if (bio_data_dir(bio) == WRITE) {
@@ -225,7 +224,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- conf_t *conf = (conf_t*)mddev->private;
+ conf_t *conf = mddev->private;
int n;
if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
@@ -328,7 +327,7 @@ static int run(mddev_t *mddev)
static int stop(mddev_t *mddev)
{
- conf_t *conf = (conf_t *)mddev->private;
+ conf_t *conf = mddev->private;
kfree(conf);
mddev->private = NULL;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 09437e958235..b619b6c27f0c 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -159,7 +159,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
- printk("linear: disk numbering problem. Aborting!\n");
+ printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -187,7 +188,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
}
if (cnt != raid_disks) {
- printk("linear: not enough drives present. Aborting!\n");
+ printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -286,25 +288,16 @@ static int linear_stop (mddev_t *mddev)
return 0;
}
-static int linear_make_request (struct request_queue *q, struct bio *bio)
+static int linear_make_request (mddev_t *mddev, struct bio *bio)
{
- const int rw = bio_data_dir(bio);
- mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev;
sector_t start_sector;
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
return 0;
}
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
rcu_read_lock();
tmp_dev = which_dev(mddev, bio->bi_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
@@ -314,12 +307,14 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
|| (bio->bi_sector < start_sector))) {
char b[BDEVNAME_SIZE];
- printk("linear_make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
- (unsigned long long)bio->bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
+ printk(KERN_ERR
+ "md/linear:%s: make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_sector,
+ bdevname(tmp_dev->rdev->bdev, b),
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
rcu_read_unlock();
bio_io_error(bio);
return 0;
@@ -336,9 +331,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bp = bio_split(bio, end_sector - bio->bi_sector);
- if (linear_make_request(q, &bp->bio1))
+ if (linear_make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (linear_make_request(q, &bp->bio2))
+ if (linear_make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cefd63daff31..004a0b0d6610 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,8 +215,11 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
*/
static int md_make_request(struct request_queue *q, struct bio *bio)
{
+ const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
int rv;
+ int cpu;
+
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
return 0;
@@ -237,13 +240,27 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
- rv = mddev->pers->make_request(q, bio);
+
+ rv = mddev->pers->make_request(mddev, bio);
+
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
+
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
return rv;
}
+/* mddev_suspend makes sure no new requests are submitted
+ * to the device, and that any requests that have been submitted
+ * are completely handled.
+ * Once ->stop is called and completes, the module will be completely
+ * unused.
+ */
static void mddev_suspend(mddev_t *mddev)
{
BUG_ON(mddev->suspended);
@@ -251,13 +268,6 @@ static void mddev_suspend(mddev_t *mddev)
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
- /* we now know that no code is executing in the personality module,
- * except possibly the tail end of a ->bi_end_io function, but that
- * is certain to complete before the module has a chance to get
- * unloaded
- */
}
static void mddev_resume(mddev_t *mddev)
@@ -344,7 +354,7 @@ static void md_submit_barrier(struct work_struct *ws)
bio_endio(bio, 0);
else {
bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
- if (mddev->pers->make_request(mddev->queue, bio))
+ if (mddev->pers->make_request(mddev, bio))
generic_make_request(bio);
mddev->barrier = POST_REQUEST_BARRIER;
submit_barriers(mddev);
@@ -406,6 +416,27 @@ static void mddev_put(mddev_t *mddev)
spin_unlock(&all_mddevs_lock);
}
+static void mddev_init(mddev_t *mddev)
+{
+ mutex_init(&mddev->open_mutex);
+ mutex_init(&mddev->reconfig_mutex);
+ mutex_init(&mddev->bitmap_info.mutex);
+ INIT_LIST_HEAD(&mddev->disks);
+ INIT_LIST_HEAD(&mddev->all_mddevs);
+ init_timer(&mddev->safemode_timer);
+ atomic_set(&mddev->active, 1);
+ atomic_set(&mddev->openers, 0);
+ atomic_set(&mddev->active_io, 0);
+ spin_lock_init(&mddev->write_lock);
+ atomic_set(&mddev->flush_pending, 0);
+ init_waitqueue_head(&mddev->sb_wait);
+ init_waitqueue_head(&mddev->recovery_wait);
+ mddev->reshape_position = MaxSector;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->level = LEVEL_NONE;
+}
+
static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
@@ -472,23 +503,7 @@ static mddev_t * mddev_find(dev_t unit)
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
- mutex_init(&new->open_mutex);
- mutex_init(&new->reconfig_mutex);
- mutex_init(&new->bitmap_info.mutex);
- INIT_LIST_HEAD(&new->disks);
- INIT_LIST_HEAD(&new->all_mddevs);
- init_timer(&new->safemode_timer);
- atomic_set(&new->active, 1);
- atomic_set(&new->openers, 0);
- atomic_set(&new->active_io, 0);
- spin_lock_init(&new->write_lock);
- atomic_set(&new->flush_pending, 0);
- init_waitqueue_head(&new->sb_wait);
- init_waitqueue_head(&new->recovery_wait);
- new->reshape_position = MaxSector;
- new->resync_min = 0;
- new->resync_max = MaxSector;
- new->level = LEVEL_NONE;
+ mddev_init(new);
goto retry;
}
@@ -508,9 +523,36 @@ static inline int mddev_trylock(mddev_t * mddev)
return mutex_trylock(&mddev->reconfig_mutex);
}
-static inline void mddev_unlock(mddev_t * mddev)
+static struct attribute_group md_redundancy_group;
+
+static void mddev_unlock(mddev_t * mddev)
{
- mutex_unlock(&mddev->reconfig_mutex);
+ if (mddev->to_remove) {
+ /* These cannot be removed under reconfig_mutex as
+ * an access to the files will try to take reconfig_mutex
+ * while holding the file unremovable, which leads to
+ * a deadlock.
+ * So hold open_mutex instead - we are allowed to take
+ * it while holding reconfig_mutex, and md_run can
+ * use it to wait for the remove to complete.
+ */
+ struct attribute_group *to_remove = mddev->to_remove;
+ mddev->to_remove = NULL;
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->reconfig_mutex);
+
+ if (to_remove != &md_redundancy_group)
+ sysfs_remove_group(&mddev->kobj, to_remove);
+ if (mddev->pers == NULL ||
+ mddev->pers->sync_request == NULL) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ }
+ mutex_unlock(&mddev->open_mutex);
+ } else
+ mutex_unlock(&mddev->reconfig_mutex);
md_wakeup_thread(mddev->thread);
}
@@ -2365,6 +2407,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return err;
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&rdev->mddev->kobj, nm);
+ rdev->raid_disk = -1;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
@@ -2780,8 +2823,9 @@ static void analyze_sbs(mddev_t * mddev)
i = 0;
rdev_for_each(rdev, tmp, mddev) {
- if (rdev->desc_nr >= mddev->max_disks ||
- i > mddev->max_disks) {
+ if (mddev->max_disks &&
+ (rdev->desc_nr >= mddev->max_disks ||
+ i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
@@ -2897,9 +2941,10 @@ level_show(mddev_t *mddev, char *page)
static ssize_t
level_store(mddev_t *mddev, const char *buf, size_t len)
{
- char level[16];
+ char clevel[16];
ssize_t rv = len;
struct mdk_personality *pers;
+ long level;
void *priv;
mdk_rdev_t *rdev;
@@ -2932,19 +2977,22 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
}
/* Now find the new personality */
- if (len == 0 || len >= sizeof(level))
+ if (len == 0 || len >= sizeof(clevel))
return -EINVAL;
- strncpy(level, buf, len);
- if (level[len-1] == '\n')
+ strncpy(clevel, buf, len);
+ if (clevel[len-1] == '\n')
len--;
- level[len] = 0;
+ clevel[len] = 0;
+ if (strict_strtol(clevel, 10, &level))
+ level = LEVEL_NONE;
- request_module("md-%s", level);
+ if (request_module("md-%s", clevel) != 0)
+ request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
- pers = find_pers(LEVEL_NONE, level);
+ pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %s not loaded\n", level);
+ printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
@@ -2957,7 +3005,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
- mdname(mddev), level);
+ mdname(mddev), clevel);
return -EINVAL;
}
@@ -2973,13 +3021,43 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
- mdname(mddev), level);
+ mdname(mddev), clevel);
return PTR_ERR(priv);
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
+
+ if (mddev->pers->sync_request == NULL &&
+ pers->sync_request != NULL) {
+ /* need to add the md_redundancy_group */
+ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+ printk(KERN_WARNING
+ "md: cannot register extra attributes for %s\n",
+ mdname(mddev));
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+ }
+ if (mddev->pers->sync_request != NULL &&
+ pers->sync_request == NULL) {
+ /* need to remove the md_redundancy_group */
+ if (mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ }
+
+ if (mddev->pers->sync_request == NULL &&
+ mddev->external) {
+ /* We are converting from a no-redundancy array
+ * to a redundancy array and metadata is managed
+ * externally so we need to be sure that writes
+ * won't block due to a need to transition
+ * clean->dirty
+ * until external management is started.
+ */
+ mddev->in_sync = 0;
+ mddev->safemode_delay = 0;
+ mddev->safemode = 0;
+ }
module_put(mddev->pers->owner);
/* Invalidate devices that are now superfluous */
list_for_each_entry(rdev, &mddev->disks, same_set)
@@ -2994,11 +3072,20 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
+ if (mddev->pers->sync_request == NULL) {
+ /* this is now an array without redundancy, so
+ * it must always be in_sync
+ */
+ mddev->in_sync = 1;
+ del_timer_sync(&mddev->safemode_timer);
+ }
pers->run(mddev);
mddev_resume(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ sysfs_notify(&mddev->kobj, NULL, "level");
+ md_new_event(mddev);
return rv;
}
@@ -3237,6 +3324,7 @@ array_state_show(mddev_t *mddev, char *page)
}
static int do_md_stop(mddev_t * mddev, int ro, int is_open);
+static int md_set_readonly(mddev_t * mddev, int is_open);
static int do_md_run(mddev_t * mddev);
static int restart_array(mddev_t *mddev);
@@ -3267,7 +3355,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break; /* not supported yet */
case readonly:
if (mddev->pers)
- err = do_md_stop(mddev, 1, 0);
+ err = md_set_readonly(mddev, 0);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
@@ -3277,7 +3365,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
- err = do_md_stop(mddev, 1, 0);
+ err = md_set_readonly(mddev, 0);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
@@ -4082,15 +4170,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, del_work);
- if (mddev->private) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->private != (void*)1)
- sysfs_remove_group(&mddev->kobj, mddev->private);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
- mddev->private = NULL;
- }
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
@@ -4234,11 +4313,10 @@ static void md_safemode_timeout(unsigned long data)
static int start_dirty_degraded;
-static int do_md_run(mddev_t * mddev)
+static int md_run(mddev_t *mddev)
{
int err;
mdk_rdev_t *rdev;
- struct gendisk *disk;
struct mdk_personality *pers;
if (list_empty(&mddev->disks))
@@ -4248,6 +4326,13 @@ static int do_md_run(mddev_t * mddev)
if (mddev->pers)
return -EBUSY;
+ /* These two calls synchronise us with the
+ * sysfs_remove_group calls in mddev_unlock,
+ * so they must have completed.
+ */
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->open_mutex);
+
/*
* Analyze all RAID superblock(s)
*/
@@ -4296,8 +4381,6 @@ static int do_md_run(mddev_t * mddev)
sysfs_notify_dirent(rdev->sysfs_state);
}
- disk = mddev->gendisk;
-
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
@@ -4425,22 +4508,32 @@ static int do_md_run(mddev_t * mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- set_capacity(disk, mddev->array_sectors);
-
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
- revalidate_disk(mddev->gendisk);
- mddev->changed = 1;
md_new_event(mddev);
sysfs_notify_dirent(mddev->sysfs_state);
if (mddev->sysfs_action)
sysfs_notify_dirent(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
- kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
return 0;
}
+static int do_md_run(mddev_t *mddev)
+{
+ int err;
+
+ err = md_run(mddev);
+ if (err)
+ goto out;
+
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+out:
+ return err;
+}
+
static int restart_array(mddev_t *mddev)
{
struct gendisk *disk = mddev->gendisk;
@@ -4491,9 +4584,109 @@ void restore_bitmap_write_access(struct file *file)
spin_unlock(&inode->i_lock);
}
+static void md_clean(mddev_t *mddev)
+{
+ mddev->array_sectors = 0;
+ mddev->external_size = 0;
+ mddev->dev_sectors = 0;
+ mddev->raid_disks = 0;
+ mddev->recovery_cp = 0;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->reshape_position = MaxSector;
+ mddev->external = 0;
+ mddev->persistent = 0;
+ mddev->level = LEVEL_NONE;
+ mddev->clevel[0] = 0;
+ mddev->flags = 0;
+ mddev->ro = 0;
+ mddev->metadata_type[0] = 0;
+ mddev->chunk_sectors = 0;
+ mddev->ctime = mddev->utime = 0;
+ mddev->layout = 0;
+ mddev->max_disks = 0;
+ mddev->events = 0;
+ mddev->delta_disks = 0;
+ mddev->new_level = LEVEL_NONE;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = 0;
+ mddev->curr_resync = 0;
+ mddev->resync_mismatches = 0;
+ mddev->suspend_lo = mddev->suspend_hi = 0;
+ mddev->sync_speed_min = mddev->sync_speed_max = 0;
+ mddev->recovery = 0;
+ mddev->in_sync = 0;
+ mddev->degraded = 0;
+ mddev->barriers_work = 0;
+ mddev->safemode = 0;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = 0;
+ mddev->bitmap_info.chunksize = 0;
+ mddev->bitmap_info.daemon_sleep = 0;
+ mddev->bitmap_info.max_write_behind = 0;
+}
+
+static void md_stop_writes(mddev_t *mddev)
+{
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(mddev->sync_thread);
+ mddev->sync_thread = NULL;
+ }
+
+ del_timer_sync(&mddev->safemode_timer);
+
+ bitmap_flush(mddev);
+ md_super_wait(mddev);
+
+ if (!mddev->in_sync || mddev->flags) {
+ /* mark array as shutdown cleanly */
+ mddev->in_sync = 1;
+ md_update_sb(mddev, 1);
+ }
+}
+
+static void md_stop(mddev_t *mddev)
+{
+ md_stop_writes(mddev);
+
+ mddev->pers->stop(mddev);
+ if (mddev->pers->sync_request && mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ module_put(mddev->pers->owner);
+ mddev->pers = NULL;
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+}
+
+static int md_set_readonly(mddev_t *mddev, int is_open)
+{
+ int err = 0;
+ mutex_lock(&mddev->open_mutex);
+ if (atomic_read(&mddev->openers) > is_open) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ err = -EBUSY;
+ goto out;
+ }
+ if (mddev->pers) {
+ md_stop_writes(mddev);
+
+ err = -ENXIO;
+ if (mddev->ro==1)
+ goto out;
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ sysfs_notify_dirent(mddev->sysfs_state);
+ err = 0;
+ }
+out:
+ mutex_unlock(&mddev->open_mutex);
+ return err;
+}
+
/* mode:
* 0 - completely stop and dis-assemble array
- * 1 - switch to readonly
* 2 - stop but do not disassemble array
*/
static int do_md_stop(mddev_t * mddev, int mode, int is_open)
@@ -4508,64 +4701,32 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
err = -EBUSY;
} else if (mddev->pers) {
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
- }
+ if (mddev->ro)
+ set_disk_ro(disk, 0);
- del_timer_sync(&mddev->safemode_timer);
+ md_stop(mddev);
+ mddev->queue->merge_bvec_fn = NULL;
+ mddev->queue->unplug_fn = NULL;
+ mddev->queue->backing_dev_info.congested_fn = NULL;
- switch(mode) {
- case 1: /* readonly */
- err = -ENXIO;
- if (mddev->ro==1)
- goto out;
- mddev->ro = 1;
- break;
- case 0: /* disassemble */
- case 2: /* stop */
- bitmap_flush(mddev);
- md_super_wait(mddev);
- if (mddev->ro)
- set_disk_ro(disk, 0);
-
- mddev->pers->stop(mddev);
- mddev->queue->merge_bvec_fn = NULL;
- mddev->queue->unplug_fn = NULL;
- mddev->queue->backing_dev_info.congested_fn = NULL;
- module_put(mddev->pers->owner);
- if (mddev->pers->sync_request && mddev->private == NULL)
- mddev->private = (void*)1;
- mddev->pers = NULL;
- /* tell userspace to handle 'inactive' */
- sysfs_notify_dirent(mddev->sysfs_state);
+ /* tell userspace to handle 'inactive' */
+ sysfs_notify_dirent(mddev->sysfs_state);
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- }
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ }
- set_capacity(disk, 0);
- mddev->changed = 1;
+ set_capacity(disk, 0);
+ revalidate_disk(disk);
- if (mddev->ro)
- mddev->ro = 0;
- }
- if (!mddev->in_sync || mddev->flags) {
- /* mark array as shutdown cleanly */
- mddev->in_sync = 1;
- md_update_sb(mddev, 1);
- }
- if (mode == 1)
- set_disk_ro(disk, 1);
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (mddev->ro)
+ mddev->ro = 0;
+
err = 0;
}
-out:
mutex_unlock(&mddev->open_mutex);
if (err)
return err;
@@ -4586,52 +4747,12 @@ out:
export_array(mddev);
- mddev->array_sectors = 0;
- mddev->external_size = 0;
- mddev->dev_sectors = 0;
- mddev->raid_disks = 0;
- mddev->recovery_cp = 0;
- mddev->resync_min = 0;
- mddev->resync_max = MaxSector;
- mddev->reshape_position = MaxSector;
- mddev->external = 0;
- mddev->persistent = 0;
- mddev->level = LEVEL_NONE;
- mddev->clevel[0] = 0;
- mddev->flags = 0;
- mddev->ro = 0;
- mddev->metadata_type[0] = 0;
- mddev->chunk_sectors = 0;
- mddev->ctime = mddev->utime = 0;
- mddev->layout = 0;
- mddev->max_disks = 0;
- mddev->events = 0;
- mddev->delta_disks = 0;
- mddev->new_level = LEVEL_NONE;
- mddev->new_layout = 0;
- mddev->new_chunk_sectors = 0;
- mddev->curr_resync = 0;
- mddev->resync_mismatches = 0;
- mddev->suspend_lo = mddev->suspend_hi = 0;
- mddev->sync_speed_min = mddev->sync_speed_max = 0;
- mddev->recovery = 0;
- mddev->in_sync = 0;
- mddev->changed = 0;
- mddev->degraded = 0;
- mddev->barriers_work = 0;
- mddev->safemode = 0;
- mddev->bitmap_info.offset = 0;
- mddev->bitmap_info.default_offset = 0;
- mddev->bitmap_info.chunksize = 0;
- mddev->bitmap_info.daemon_sleep = 0;
- mddev->bitmap_info.max_write_behind = 0;
+ md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
- } else if (mddev->pers)
- printk(KERN_INFO "md: %s switched to read-only mode.\n",
- mdname(mddev));
+ }
err = 0;
blk_integrity_unregister(disk);
md_new_event(mddev);
@@ -5349,7 +5470,7 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (raid_disks <= 0 ||
- raid_disks >= mddev->max_disks)
+ (mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -5486,7 +5607,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
geo->heads = 2;
geo->sectors = 4;
- geo->cylinders = get_capacity(mddev->gendisk) / 8;
+ geo->cylinders = mddev->array_sectors / 8;
return 0;
}
@@ -5496,6 +5617,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
mddev_t *mddev = NULL;
+ int ro;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -5628,9 +5750,37 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto done_unlock;
case STOP_ARRAY_RO:
- err = do_md_stop(mddev, 1, 1);
+ err = md_set_readonly(mddev, 1);
goto done_unlock;
+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
+
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
+
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
+
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
+ }
+ }
+ goto done_unlock;
}
/*
@@ -5751,7 +5901,6 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_change(bdev);
out:
return err;
}
@@ -5766,21 +5915,6 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-
-static int md_media_changed(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- mddev->changed = 0;
- return 0;
-}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -5791,8 +5925,6 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
@@ -5906,7 +6038,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(StateChanged, &rdev->flags);
+ sysfs_notify_dirent(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -6898,11 +7030,6 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (test_and_clear_bit(StateChanged, &rdev->flags))
- sysfs_notify_dirent(rdev->sysfs_state);
-
-
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
@@ -7039,7 +7166,7 @@ static int md_notify_reboot(struct notifier_block *this,
* appears to still be in use. Hence
* the '100'.
*/
- do_md_stop(mddev, 1, 100);
+ md_set_readonly(mddev, 100);
mddev_unlock(mddev);
}
/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8e4c75c00d46..a536f5458097 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -74,9 +74,6 @@ struct mdk_rdev_s
#define Blocked 8 /* An error occured on an externally
* managed array, don't allow writes
* until it is cleared */
-#define StateChanged 9 /* Faulty or Blocked has changed during
- * interrupt, so it needs to be
- * notified by the thread */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -240,7 +237,6 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
- int changed; /* true if we might need to reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
@@ -279,9 +275,6 @@ struct mddev_s
atomic_t writes_pending;
struct request_queue *queue; /* for plugging ... */
- atomic_t write_behind; /* outstanding async IO */
- unsigned int max_write_behind; /* 0 = sync */
-
struct bitmap *bitmap; /* the bitmap for the device */
struct {
struct file *file; /* the bitmap file */
@@ -305,6 +298,7 @@ struct mddev_s
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
+ struct attribute_group *to_remove;
/* Generic barrier handling.
* If there is a pending barrier request, all other
* writes are blocked while the devices are flushed.
@@ -336,7 +330,7 @@ struct mdk_personality
int level;
struct list_head list;
struct module *owner;
- int (*make_request)(struct request_queue *q, struct bio *bio);
+ int (*make_request)(mddev_t *mddev, struct bio *bio);
int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev);
void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 789bf535d29c..410fb60699ac 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -85,7 +85,7 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
+ struct multipath_bh *mp_bh = bio->bi_private;
multipath_conf_t *conf = mp_bh->mddev->private;
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
@@ -136,14 +136,11 @@ static void multipath_unplug(struct request_queue *q)
}
-static int multipath_make_request (struct request_queue *q, struct bio * bio)
+static int multipath_make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev->private;
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- const int rw = bio_data_dir(bio);
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
@@ -155,12 +152,6 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
bio_endio(bio, -EIO);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c3bec024612e..e70f004c99e8 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -23,15 +23,17 @@
#include <linux/slab.h>
#include "md.h"
#include "raid0.h"
+#include "raid5.h"
static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i;
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i=0; i < raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
blk_unplug(r_queue);
@@ -43,12 +45,13 @@ static int raid0_congested(void *data, int bits)
mddev_t *mddev = data;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i, ret = 0;
if (mddev_congested(mddev, bits))
return 1;
- for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+ for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
@@ -66,16 +69,17 @@ static void dump_zones(mddev_t *mddev)
sector_t zone_start = 0;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
printk(KERN_INFO "******* %s configuration *********\n",
mdname(mddev));
h = 0;
for (j = 0; j < conf->nr_strip_zones; j++) {
printk(KERN_INFO "zone%d=[", j);
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- printk("%s/",
- bdevname(conf->devlist[j*mddev->raid_disks
+ printk(KERN_CONT "%s/",
+ bdevname(conf->devlist[j*raid_disks
+ k]->bdev, b));
- printk("]\n");
+ printk(KERN_CONT "]\n");
zone_size = conf->strip_zone[j].zone_end - zone_start;
printk(KERN_INFO " zone offset=%llukb "
@@ -88,7 +92,7 @@ static void dump_zones(mddev_t *mddev)
printk(KERN_INFO "**********************************\n\n");
}
-static int create_strip_zones(mddev_t *mddev)
+static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
{
int i, c, err;
sector_t curr_zone_end, sectors;
@@ -101,8 +105,9 @@ static int create_strip_zones(mddev_t *mddev)
if (!conf)
return -ENOMEM;
list_for_each_entry(rdev1, &mddev->disks, same_set) {
- printk(KERN_INFO "raid0: looking at %s\n",
- bdevname(rdev1->bdev,b));
+ printk(KERN_INFO "md/raid0:%s: looking at %s\n",
+ mdname(mddev),
+ bdevname(rdev1->bdev, b));
c = 0;
/* round size to chunk_size */
@@ -111,14 +116,16 @@ static int create_strip_zones(mddev_t *mddev)
rdev1->sectors = sectors * mddev->chunk_sectors;
list_for_each_entry(rdev2, &mddev->disks, same_set) {
- printk(KERN_INFO "raid0: comparing %s(%llu)",
+ printk(KERN_INFO "md/raid0:%s: comparing %s(%llu)",
+ mdname(mddev),
bdevname(rdev1->bdev,b),
(unsigned long long)rdev1->sectors);
- printk(KERN_INFO " with %s(%llu)\n",
+ printk(KERN_CONT " with %s(%llu)\n",
bdevname(rdev2->bdev,b),
(unsigned long long)rdev2->sectors);
if (rdev2 == rdev1) {
- printk(KERN_INFO "raid0: END\n");
+ printk(KERN_INFO "md/raid0:%s: END\n",
+ mdname(mddev));
break;
}
if (rdev2->sectors == rdev1->sectors) {
@@ -126,20 +133,24 @@ static int create_strip_zones(mddev_t *mddev)
* Not unique, don't count it as a new
* group
*/
- printk(KERN_INFO "raid0: EQUAL\n");
+ printk(KERN_INFO "md/raid0:%s: EQUAL\n",
+ mdname(mddev));
c = 1;
break;
}
- printk(KERN_INFO "raid0: NOT EQUAL\n");
+ printk(KERN_INFO "md/raid0:%s: NOT EQUAL\n",
+ mdname(mddev));
}
if (!c) {
- printk(KERN_INFO "raid0: ==> UNIQUE\n");
+ printk(KERN_INFO "md/raid0:%s: ==> UNIQUE\n",
+ mdname(mddev));
conf->nr_strip_zones++;
- printk(KERN_INFO "raid0: %d zones\n",
- conf->nr_strip_zones);
+ printk(KERN_INFO "md/raid0:%s: %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
}
}
- printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
+ printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
@@ -162,14 +173,18 @@ static int create_strip_zones(mddev_t *mddev)
list_for_each_entry(rdev1, &mddev->disks, same_set) {
int j = rdev1->raid_disk;
+ if (mddev->level == 10)
+ /* taking over a raid10-n2 array */
+ j /= 2;
+
if (j < 0 || j >= mddev->raid_disks) {
- printk(KERN_ERR "raid0: bad disk number %d - "
- "aborting!\n", j);
+ printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
if (dev[j]) {
- printk(KERN_ERR "raid0: multiple devices for %d - "
- "aborting!\n", j);
+ printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
dev[j] = rdev1;
@@ -191,8 +206,8 @@ static int create_strip_zones(mddev_t *mddev)
cnt++;
}
if (cnt != mddev->raid_disks) {
- printk(KERN_ERR "raid0: too few disks (%d of %d) - "
- "aborting!\n", cnt, mddev->raid_disks);
+ printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
+ "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
@@ -208,39 +223,44 @@ static int create_strip_zones(mddev_t *mddev)
zone = conf->strip_zone + i;
dev = conf->devlist + i * mddev->raid_disks;
- printk(KERN_INFO "raid0: zone %d\n", i);
+ printk(KERN_INFO "md/raid0:%s: zone %d\n",
+ mdname(mddev), i);
zone->dev_start = smallest->sectors;
smallest = NULL;
c = 0;
for (j=0; j<cnt; j++) {
rdev = conf->devlist[j];
- printk(KERN_INFO "raid0: checking %s ...",
- bdevname(rdev->bdev, b));
+ printk(KERN_INFO "md/raid0:%s: checking %s ...",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
if (rdev->sectors <= zone->dev_start) {
- printk(KERN_INFO " nope.\n");
+ printk(KERN_CONT " nope.\n");
continue;
}
- printk(KERN_INFO " contained as device %d\n", c);
+ printk(KERN_CONT " contained as device %d\n", c);
dev[c] = rdev;
c++;
if (!smallest || rdev->sectors < smallest->sectors) {
smallest = rdev;
- printk(KERN_INFO " (%llu) is smallest!.\n",
- (unsigned long long)rdev->sectors);
+ printk(KERN_INFO "md/raid0:%s: (%llu) is smallest!.\n",
+ mdname(mddev),
+ (unsigned long long)rdev->sectors);
}
}
zone->nb_dev = c;
sectors = (smallest->sectors - zone->dev_start) * c;
- printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
- zone->nb_dev, (unsigned long long)sectors);
+ printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
+ mdname(mddev),
+ zone->nb_dev, (unsigned long long)sectors);
curr_zone_end += sectors;
zone->zone_end = curr_zone_end;
- printk(KERN_INFO "raid0: current zone start: %llu\n",
- (unsigned long long)smallest->sectors);
+ printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n",
+ mdname(mddev),
+ (unsigned long long)smallest->sectors);
}
mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
@@ -251,7 +271,7 @@ static int create_strip_zones(mddev_t *mddev)
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
- printk(KERN_ERR "%s chunk_size of %d not valid\n",
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
mdname(mddev),
mddev->chunk_sectors << 9);
goto abort;
@@ -261,14 +281,15 @@ static int create_strip_zones(mddev_t *mddev)
blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks);
- printk(KERN_INFO "raid0: done.\n");
- mddev->private = conf;
+ printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev));
+ *private_conf = conf;
+
return 0;
abort:
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- mddev->private = NULL;
+ *private_conf = NULL;
return err;
}
@@ -319,10 +340,12 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
static int raid0_run(mddev_t *mddev)
{
+ raid0_conf_t *conf;
int ret;
if (mddev->chunk_sectors == 0) {
- printk(KERN_ERR "md/raid0: chunk size must be set.\n");
+ printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
+ mdname(mddev));
return -EINVAL;
}
if (md_check_no_bitmap(mddev))
@@ -330,15 +353,27 @@ static int raid0_run(mddev_t *mddev)
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
- ret = create_strip_zones(mddev);
- if (ret < 0)
- return ret;
+ /* if private is not null, we are here after takeover */
+ if (mddev->private == NULL) {
+ ret = create_strip_zones(mddev, &conf);
+ if (ret < 0)
+ return ret;
+ mddev->private = conf;
+ }
+ conf = mddev->private;
+ if (conf->scale_raid_disks) {
+ int i;
+ for (i=0; i < conf->strip_zone[0].nb_dev; i++)
+ conf->devlist[i]->raid_disk /= conf->scale_raid_disks;
+ /* FIXME update sysfs rd links */
+ }
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
- printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
- (unsigned long long)mddev->array_sectors);
+ printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
/* calculate the max read-ahead size.
* For read-ahead of large files to be effective, we need to
* readahead at least twice a whole stripe. i.e. number of devices
@@ -402,6 +437,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
unsigned int sect_in_chunk;
sector_t chunk;
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
unsigned int chunk_sects = mddev->chunk_sectors;
if (is_power_of_2(chunk_sects)) {
@@ -424,7 +460,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
* + the position in the chunk
*/
*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
- return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+ return conf->devlist[(zone - conf->strip_zone)*raid_disks
+ sector_div(sector, zone->nb_dev)];
}
@@ -444,27 +480,18 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
}
}
-static int raid0_make_request(struct request_queue *q, struct bio *bio)
+static int raid0_make_request(mddev_t *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
unsigned int chunk_sects;
sector_t sector_offset;
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- const int rw = bio_data_dir(bio);
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
return 0;
}
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
chunk_sects = mddev->chunk_sectors;
if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
sector_t sector = bio->bi_sector;
@@ -482,9 +509,9 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
else
bp = bio_split(bio, chunk_sects -
sector_div(sector, chunk_sects));
- if (raid0_make_request(q, &bp->bio1))
+ if (raid0_make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (raid0_make_request(q, &bp->bio2))
+ if (raid0_make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
@@ -504,9 +531,10 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
return 1;
bad_map:
- printk("raid0_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+ printk("md/raid0:%s: make_request bug: can't convert block across chunks"
+ " or bigger than %dk %llu %d\n",
+ mdname(mddev), chunk_sects / 2,
+ (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
return 0;
@@ -519,6 +547,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
int j, k, h;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
sector_t zone_size;
sector_t zone_start = 0;
@@ -529,7 +558,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, "=[");
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
seq_printf(seq, "%s/", bdevname(
- conf->devlist[j*mddev->raid_disks + k]
+ conf->devlist[j*raid_disks + k]
->bdev, b));
zone_size = conf->strip_zone[j].zone_end - zone_start;
@@ -544,6 +573,104 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
return;
}
+static void *raid0_takeover_raid5(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ raid0_conf_t *priv_conf;
+
+ if (mddev->degraded != 1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ /* check slot number for a disk */
+ if (rdev->raid_disk == mddev->raid_disks-1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks--;
+ mddev->delta_disks = -1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ return priv_conf;
+}
+
+static void *raid0_takeover_raid10(mddev_t *mddev)
+{
+ raid0_conf_t *priv_conf;
+
+ /* Check layout:
+ * - far_copies must be 1
+ * - near_copies must be 2
+ * - disks number must be even
+ * - all mirrors must be already degraded
+ */
+ if (mddev->layout != ((1 << 8) + 2)) {
+ printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->raid_disks & 1) {
+ printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->degraded != (mddev->raid_disks>>1)) {
+ printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = - mddev->raid_disks / 2;
+ mddev->raid_disks += mddev->delta_disks;
+ mddev->degraded = 0;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ priv_conf->scale_raid_disks = 2;
+ return priv_conf;
+}
+
+static void *raid0_takeover(mddev_t *mddev)
+{
+ /* raid0 can take over:
+ * raid5 - providing it is Raid4 layout and one disk is faulty
+ * raid10 - assuming we have all necessary active disks
+ */
+ if (mddev->level == 5) {
+ if (mddev->layout == ALGORITHM_PARITY_N)
+ return raid0_takeover_raid5(mddev);
+
+ printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
+ }
+
+ if (mddev->level == 10)
+ return raid0_takeover_raid10(mddev);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void raid0_quiesce(mddev_t *mddev, int state)
+{
+}
+
static struct mdk_personality raid0_personality=
{
.name = "raid0",
@@ -554,6 +681,8 @@ static struct mdk_personality raid0_personality=
.stop = raid0_stop,
.status = raid0_status,
.size = raid0_size,
+ .takeover = raid0_takeover,
+ .quiesce = raid0_quiesce,
};
static int __init raid0_init (void)
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 91f8e876ee64..d724e664ca4d 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -13,6 +13,9 @@ struct raid0_private_data
struct strip_zone *strip_zone;
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones;
+ int scale_raid_disks; /* divide rdev->raid_disks by this in run()
+ * to handle conversion from raid10
+ */
};
typedef struct raid0_private_data raid0_conf_t;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e59b10e66edb..fd9bb308278a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -263,7 +263,7 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int mirror;
conf_t *conf = r1_bio->mddev->private;
@@ -297,7 +297,8 @@ static void raid1_end_read_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
if (printk_ratelimit())
- printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
+ printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
reschedule_retry(r1_bio);
}
@@ -308,7 +309,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = r1_bio->mddev->private;
struct bio *to_put = NULL;
@@ -418,7 +419,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
*/
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
- const unsigned long this_sector = r1_bio->sector;
+ const sector_t this_sector = r1_bio->sector;
int new_disk = conf->last_used, disk = new_disk;
int wonly_disk = -1;
const int sectors = r1_bio->sectors;
@@ -434,7 +435,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
retry:
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* Choose the first operation device, for consistancy */
+ /* Choose the first operational device, for consistancy */
new_disk = 0;
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
@@ -774,9 +775,8 @@ do_sync_io:
return NULL;
}
-static int make_request(struct request_queue *q, struct bio * bio)
+static int make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
conf_t *conf = mddev->private;
mirror_info_t *mirror;
r1bio_t *r1_bio;
@@ -788,7 +788,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
- int cpu;
bool do_barriers;
mdk_rdev_t *blocked_rdev;
@@ -834,12 +833,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
bitmap = mddev->bitmap;
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
/*
* make_request() can abort the operation when READA is being
* used and no empty request is available.
@@ -866,6 +859,15 @@ static int make_request(struct request_queue *q, struct bio * bio)
}
mirror = conf->mirrors + rdisk;
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ bitmap) {
+ /* Reading from a write-mostly device must
+ * take care not to over-take any writes
+ * that are 'behind'
+ */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
r1_bio->read_disk = rdisk;
read_bio = bio_clone(bio, GFP_NOIO);
@@ -942,10 +944,14 @@ static int make_request(struct request_queue *q, struct bio * bio)
set_bit(R1BIO_Degraded, &r1_bio->state);
}
- /* do behind I/O ? */
+ /* do behind I/O ?
+ * Not if there are too many, or cannot allocate memory,
+ * or a reader on WriteMostly is waiting for behind writes
+ * to flush */
if (bitmap &&
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
+ !waitqueue_active(&bitmap->behind_wait) &&
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
@@ -1070,21 +1076,22 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
} else
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
- "raid1: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(conf_t *conf)
{
int i;
- printk("RAID1 conf printout:\n");
+ printk(KERN_DEBUG "RAID1 conf printout:\n");
if (!conf) {
- printk("(!conf)\n");
+ printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
rcu_read_lock();
@@ -1092,7 +1099,7 @@ static void print_conf(conf_t *conf)
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- printk(" disk %d, wo:%d, o:%d, dev:%s\n",
+ printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
bdevname(rdev->bdev,b));
@@ -1223,7 +1230,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int i;
for (i=r1_bio->mddev->raid_disks; i--; )
@@ -1246,7 +1253,7 @@ static void end_sync_read(struct bio *bio, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev->private;
int i;
@@ -1453,9 +1460,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
char b[BDEVNAME_SIZE];
/* Cannot read from anywhere, array is toast */
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
" for block %llu\n",
- bdevname(bio->bi_bdev,b),
+ mdname(mddev),
+ bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->sectors, 0);
put_buf(r1_bio);
@@ -1577,7 +1585,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
else {
atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO
- "raid1:%s: read error corrected "
+ "md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect +
@@ -1682,8 +1690,9 @@ static void raid1d(mddev_t *mddev)
bio = r1_bio->bios[r1_bio->read_disk];
if ((disk=read_balance(conf, r1_bio)) == -1) {
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
+ mdname(mddev),
bdevname(bio->bi_bdev,b),
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
@@ -1697,10 +1706,11 @@ static void raid1d(mddev_t *mddev)
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
if (printk_ratelimit())
- printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
- " another mirror\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)r1_bio->sector);
+ printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to"
+ " other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev,b));
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
@@ -1755,13 +1765,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int still_degraded = 0;
if (!conf->r1buf_pool)
- {
-/*
- printk("sync start - bitmap %p\n", mddev->bitmap);
-*/
if (init_resync(conf))
return 0;
- }
max_sector = mddev->dev_sectors;
if (sector_nr >= max_sector) {
@@ -2042,7 +2047,7 @@ static conf_t *setup_conf(mddev_t *mddev)
err = -EIO;
if (conf->last_used < 0) {
- printk(KERN_ERR "raid1: no operational mirrors for %s\n",
+ printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
mdname(mddev));
goto abort;
}
@@ -2050,7 +2055,7 @@ static conf_t *setup_conf(mddev_t *mddev)
conf->thread = md_register_thread(raid1d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
- "raid1: couldn't allocate thread for %s\n",
+ "md/raid1:%s: couldn't allocate thread\n",
mdname(mddev));
goto abort;
}
@@ -2076,12 +2081,12 @@ static int run(mddev_t *mddev)
mdk_rdev_t *rdev;
if (mddev->level != 1) {
- printk("raid1: %s: raid level not set to mirroring (%d)\n",
+ printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk("raid1: %s: reshape_position set but not supported\n",
+ printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
mdname(mddev));
return -EIO;
}
@@ -2124,11 +2129,11 @@ static int run(mddev_t *mddev)
mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid1: %s is not clean"
+ printk(KERN_NOTICE "md/raid1:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
- "raid1: raid set %s active with %d out of %d mirrors\n",
+ "md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
@@ -2152,15 +2157,14 @@ static int stop(mddev_t *mddev)
{
conf_t *conf = mddev->private;
struct bitmap *bitmap = mddev->bitmap;
- int behind_wait = 0;
/* wait for behind writes to complete */
- while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- behind_wait++;
- printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ); /* wait a second */
+ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
+ printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
/* need to kick something here to make sure I/O goes? */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
}
raise_barrier(conf);
@@ -2191,7 +2195,6 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp == MaxSector) {
@@ -2286,9 +2289,9 @@ static int raid1_reshape(mddev_t *mddev)
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
printk(KERN_WARNING
- "md/raid1: cannot register "
- "%s for %s\n",
- nm, mdname(mddev));
+ "md/raid1:%s: cannot register "
+ "%s\n",
+ mdname(mddev), nm);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e2766d8251a1..a807be26fe24 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include "md.h"
#include "raid10.h"
+#include "raid0.h"
#include "bitmap.h"
/*
@@ -255,7 +256,7 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
@@ -285,7 +286,8 @@ static void raid10_end_read_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
if (printk_ratelimit())
- printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
+ printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
reschedule_retry(r10_bio);
}
@@ -296,7 +298,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
@@ -494,7 +496,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
*/
static int read_balance(conf_t *conf, r10bio_t *r10_bio)
{
- const unsigned long this_sector = r10_bio->sector;
+ const sector_t this_sector = r10_bio->sector;
int disk, slot, nslot;
const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance;
@@ -601,7 +603,7 @@ static void unplug_slaves(mddev_t *mddev)
int i;
rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i=0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -635,7 +637,7 @@ static int raid10_congested(void *data, int bits)
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock();
- for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
+ for (i = 0; i < conf->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -788,14 +790,12 @@ static void unfreeze_array(conf_t *conf)
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(struct request_queue *q, struct bio * bio)
+static int make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
conf_t *conf = mddev->private;
mirror_info_t *mirror;
r10bio_t *r10_bio;
struct bio *read_bio;
- int cpu;
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
@@ -825,16 +825,16 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
- if (make_request(q, &bp->bio1))
+ if (make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (make_request(q, &bp->bio2))
+ if (make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
return 0;
bad_map:
- printk("raid10_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_sects/2,
+ printk("md/raid10:%s: make_request bug: can't convert block across chunks"
+ " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
@@ -850,12 +850,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
wait_barrier(conf);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
@@ -1039,9 +1033,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
- "raid10: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(conf_t *conf)
@@ -1049,19 +1044,19 @@ static void print_conf(conf_t *conf)
int i;
mirror_info_t *tmp;
- printk("RAID10 conf printout:\n");
+ printk(KERN_DEBUG "RAID10 conf printout:\n");
if (!conf) {
- printk("(!conf)\n");
+ printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->mirrors + i;
if (tmp->rdev)
- printk(" disk %d, wo:%d, o:%d, dev:%s\n",
+ printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &tmp->rdev->flags),
!test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
@@ -1132,7 +1127,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int mirror;
mirror_info_t *p;
int first = 0;
- int last = mddev->raid_disks - 1;
+ int last = conf->raid_disks - 1;
if (mddev->recovery_cp < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
@@ -1224,7 +1219,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
conf_t *conf = r10_bio->mddev->private;
int i,d;
@@ -1261,7 +1256,7 @@ static void end_sync_read(struct bio *bio, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
mddev_t *mddev = r10_bio->mddev;
conf_t *conf = mddev->private;
int i,d;
@@ -1510,13 +1505,14 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
if (cur_read_error_count > max_read_errors) {
rcu_read_unlock();
printk(KERN_NOTICE
- "raid10: %s: Raid device exceeded "
+ "md/raid10:%s: %s: Raid device exceeded "
"read_error threshold "
"[cur %d:max %d]\n",
+ mdname(mddev),
b, cur_read_error_count, max_read_errors);
printk(KERN_NOTICE
- "raid10: %s: Failing raid "
- "device\n", b);
+ "md/raid10:%s: %s: Failing raid "
+ "device\n", mdname(mddev), b);
md_error(mddev, conf->mirrors[d].rdev);
return;
}
@@ -1586,15 +1582,16 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
- "raid10:%s: read correction "
+ "md/raid10:%s: read correction "
"write failed"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "raid10:%s: failing "
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
+ mdname(mddev),
bdevname(rdev->bdev, b));
md_error(mddev, rdev);
}
@@ -1622,20 +1619,21 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
READ) == 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
- "raid10:%s: unable to read back "
+ "md/raid10:%s: unable to read back "
"corrected sectors"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "raid10:%s: failing drive\n",
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
+ mdname(mddev),
bdevname(rdev->bdev, b));
md_error(mddev, rdev);
} else {
printk(KERN_INFO
- "raid10:%s: read error corrected"
+ "md/raid10:%s: read error corrected"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
@@ -1710,8 +1708,9 @@ static void raid10d(mddev_t *mddev)
mddev->ro ? IO_BLOCKED : NULL;
mirror = read_balance(conf, r10_bio);
if (mirror == -1) {
- printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
+ printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
+ mdname(mddev),
bdevname(bio->bi_bdev,b),
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
@@ -1721,8 +1720,9 @@ static void raid10d(mddev_t *mddev)
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
- printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
+ printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
" another mirror\n",
+ mdname(mddev),
bdevname(rdev->bdev,b),
(unsigned long long)r10_bio->sector);
bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
@@ -1980,7 +1980,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
r10_bio = rb2;
if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery))
- printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
+ printk(KERN_INFO "md/raid10:%s: insufficient "
+ "working devices for recovery.\n",
mdname(mddev));
break;
}
@@ -2140,9 +2141,9 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
conf_t *conf = mddev->private;
if (!raid_disks)
- raid_disks = mddev->raid_disks;
+ raid_disks = conf->raid_disks;
if (!sectors)
- sectors = mddev->dev_sectors;
+ sectors = conf->dev_sectors;
size = sectors >> conf->chunk_shift;
sector_div(size, conf->far_copies);
@@ -2152,62 +2153,61 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return size << conf->chunk_shift;
}
-static int run(mddev_t *mddev)
+
+static conf_t *setup_conf(mddev_t *mddev)
{
- conf_t *conf;
- int i, disk_idx, chunk_size;
- mirror_info_t *disk;
- mdk_rdev_t *rdev;
+ conf_t *conf = NULL;
int nc, fc, fo;
sector_t stride, size;
+ int err = -EINVAL;
if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
!is_power_of_2(mddev->chunk_sectors)) {
- printk(KERN_ERR "md/raid10: chunk size must be "
- "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
- return -EINVAL;
+ printk(KERN_ERR "md/raid10:%s: chunk size must be "
+ "at least PAGE_SIZE(%ld) and be a power of 2.\n",
+ mdname(mddev), PAGE_SIZE);
+ goto out;
}
nc = mddev->layout & 255;
fc = (mddev->layout >> 8) & 255;
fo = mddev->layout & (1<<16);
+
if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
(mddev->layout >> 17)) {
- printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
+ printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
mdname(mddev), mddev->layout);
goto out;
}
- /*
- * copy the already verified devices into our private RAID10
- * bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
- */
+
+ err = -ENOMEM;
conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
- mddev->private = conf;
- if (!conf) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf)
goto out;
- }
+
conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
- GFP_KERNEL);
- if (!conf->mirrors) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
+ GFP_KERNEL);
+ if (!conf->mirrors)
+ goto out;
conf->tmppage = alloc_page(GFP_KERNEL);
if (!conf->tmppage)
- goto out_free_conf;
+ goto out;
+
conf->raid_disks = mddev->raid_disks;
conf->near_copies = nc;
conf->far_copies = fc;
conf->copies = nc*fc;
conf->far_offset = fo;
- conf->chunk_mask = mddev->chunk_sectors - 1;
- conf->chunk_shift = ffz(~mddev->chunk_sectors);
+ conf->chunk_mask = mddev->new_chunk_sectors - 1;
+ conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
+
+ conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
+ r10bio_pool_free, conf);
+ if (!conf->r10bio_pool)
+ goto out;
+
size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc);
size = size * conf->raid_disks;
@@ -2221,7 +2221,8 @@ static int run(mddev_t *mddev)
*/
stride += conf->raid_disks - 1;
sector_div(stride, conf->raid_disks);
- mddev->dev_sectors = stride << conf->chunk_shift;
+
+ conf->dev_sectors = stride << conf->chunk_shift;
if (fo)
stride = 1;
@@ -2229,18 +2230,63 @@ static int run(mddev_t *mddev)
sector_div(stride, fc);
conf->stride = stride << conf->chunk_shift;
- conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
- r10bio_pool_free, conf);
- if (!conf->r10bio_pool) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
- conf->mddev = mddev;
spin_lock_init(&conf->device_lock);
+ INIT_LIST_HEAD(&conf->retry_list);
+
+ spin_lock_init(&conf->resync_lock);
+ init_waitqueue_head(&conf->wait_barrier);
+
+ conf->thread = md_register_thread(raid10d, mddev, NULL);
+ if (!conf->thread)
+ goto out;
+
+ conf->scale_disks = 0;
+ conf->mddev = mddev;
+ return conf;
+
+ out:
+ printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
+ mdname(mddev));
+ if (conf) {
+ if (conf->r10bio_pool)
+ mempool_destroy(conf->r10bio_pool);
+ kfree(conf->mirrors);
+ safe_put_page(conf->tmppage);
+ kfree(conf);
+ }
+ return ERR_PTR(err);
+}
+
+static int run(mddev_t *mddev)
+{
+ conf_t *conf;
+ int i, disk_idx, chunk_size;
+ mirror_info_t *disk;
+ mdk_rdev_t *rdev;
+ sector_t size;
+
+ /*
+ * copy the already verified devices into our private RAID10
+ * bookkeeping area. [whatever we allocate in run(),
+ * should be freed in stop()]
+ */
+
+ if (mddev->private == NULL) {
+ conf = setup_conf(mddev);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+ mddev->private = conf;
+ }
+ conf = mddev->private;
+ if (!conf)
+ goto out;
+
mddev->queue->queue_lock = &conf->device_lock;
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->raid_disks % conf->near_copies)
@@ -2251,9 +2297,14 @@ static int run(mddev_t *mddev)
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
- if (disk_idx >= mddev->raid_disks
+ if (disk_idx >= conf->raid_disks
|| disk_idx < 0)
continue;
+ if (conf->scale_disks) {
+ disk_idx *= conf->scale_disks;
+ rdev->raid_disk = disk_idx;
+ /* MOVE 'rd%d' link !! */
+ }
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
@@ -2271,14 +2322,9 @@ static int run(mddev_t *mddev)
disk->head_position = 0;
}
- INIT_LIST_HEAD(&conf->retry_list);
-
- spin_lock_init(&conf->resync_lock);
- init_waitqueue_head(&conf->wait_barrier);
-
/* need to check that every block has at least one working mirror */
if (!enough(conf)) {
- printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
+ printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
}
@@ -2297,28 +2343,20 @@ static int run(mddev_t *mddev)
}
}
-
- mddev->thread = md_register_thread(raid10d, mddev, NULL);
- if (!mddev->thread) {
- printk(KERN_ERR
- "raid10: couldn't allocate thread for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
-
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid10: %s is not clean"
+ printk(KERN_NOTICE "md/raid10:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
- "raid10: raid set %s active with %d out of %d devices\n",
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks);
+ "md/raid10:%s: active with %d out of %d devices\n",
+ mdname(mddev), conf->raid_disks - mddev->degraded,
+ conf->raid_disks);
/*
* Ok, everything is just fine now
*/
- md_set_array_sectors(mddev, raid10_size(mddev, 0, 0));
- mddev->resync_max_sectors = raid10_size(mddev, 0, 0);
+ mddev->dev_sectors = conf->dev_sectors;
+ size = raid10_size(mddev, 0, 0);
+ md_set_array_sectors(mddev, size);
mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
@@ -2336,7 +2374,7 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
- if (conf->near_copies < mddev->raid_disks)
+ if (conf->near_copies < conf->raid_disks)
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
md_integrity_register(mddev);
return 0;
@@ -2348,6 +2386,7 @@ out_free_conf:
kfree(conf->mirrors);
kfree(conf);
mddev->private = NULL;
+ md_unregister_thread(mddev->thread);
out:
return -EIO;
}
@@ -2384,6 +2423,61 @@ static void raid10_quiesce(mddev_t *mddev, int state)
}
}
+static void *raid10_takeover_raid0(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ conf_t *conf;
+
+ if (mddev->degraded > 0) {
+ printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Update slot numbers to obtain
+ * degraded raid10 with missing mirrors
+ */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ rdev->raid_disk *= 2;
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 10;
+ /* new layout: far_copies = 1, near_copies = 2 */
+ mddev->new_layout = (1<<8) + 2;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = mddev->raid_disks;
+ mddev->degraded = mddev->raid_disks;
+ mddev->raid_disks *= 2;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ conf = setup_conf(mddev);
+ conf->scale_disks = 2;
+ return conf;
+}
+
+static void *raid10_takeover(mddev_t *mddev)
+{
+ struct raid0_private_data *raid0_priv;
+
+ /* raid10 can take over:
+ * raid0 - providing it has only two drives
+ */
+ if (mddev->level == 0) {
+ /* for raid0 takeover only one zone is supported */
+ raid0_priv = mddev->private;
+ if (raid0_priv->nr_strip_zones > 1) {
+ printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
+ " with more than one zone.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ return raid10_takeover_raid0(mddev);
+ }
+ return ERR_PTR(-EINVAL);
+}
+
static struct mdk_personality raid10_personality =
{
.name = "raid10",
@@ -2400,6 +2494,7 @@ static struct mdk_personality raid10_personality =
.sync_request = sync_request,
.quiesce = raid10_quiesce,
.size = raid10_size,
+ .takeover = raid10_takeover,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 59cd1efb8d30..3824a087e17c 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -33,9 +33,16 @@ struct r10_private_data_s {
* 1 stripe.
*/
+ sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
+
int chunk_shift; /* shift from chunks to sectors */
sector_t chunk_mask;
+ int scale_disks; /* When starting array, multiply
+ * each ->raid_disk by this.
+ * Need for raid0->raid10 migration
+ */
+
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
@@ -57,6 +64,11 @@ struct r10_private_data_s {
mempool_t *r10bio_pool;
mempool_t *r10buf_pool;
struct page *tmppage;
+
+ /* When taking over an array from a different personality, we store
+ * the new thread here until we fully activate the array.
+ */
+ struct mdk_thread_s *thread;
};
typedef struct r10_private_data_s conf_t;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15348c393b5d..9ea17d6c799b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,7 @@
#include <linux/slab.h>
#include "md.h"
#include "raid5.h"
+#include "raid0.h"
#include "bitmap.h"
/*
@@ -1509,7 +1510,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
rdev = conf->disks[i].rdev;
- printk_rl(KERN_INFO "raid5:%s: read error corrected"
+ printk_rl(KERN_INFO "md/raid:%s: read error corrected"
" (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)(sh->sector
@@ -1529,7 +1530,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
atomic_inc(&rdev->read_errors);
if (conf->mddev->degraded >= conf->max_degraded)
printk_rl(KERN_WARNING
- "raid5:%s: read error not correctable "
+ "md/raid:%s: read error not correctable "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
@@ -1538,7 +1539,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
printk_rl(KERN_WARNING
- "raid5:%s: read error NOT corrected!! "
+ "md/raid:%s: read error NOT corrected!! "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
@@ -1547,7 +1548,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
- "raid5:%s: Too many read errors, failing device %s.\n",
+ "md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
@@ -1619,8 +1620,8 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
- pr_debug("raid5: error called\n");
+ raid5_conf_t *conf = mddev->private;
+ pr_debug("raid456: error called\n");
if (!test_bit(Faulty, &rdev->flags)) {
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1636,9 +1637,13 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
printk(KERN_ALERT
- "raid5: Disk failure on %s, disabling device.\n"
- "raid5: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ "md/raid:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT
+ "md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ mdname(mddev),
+ conf->raid_disks - mddev->degraded);
}
}
@@ -1714,8 +1719,6 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
pd_idx = data_disks;
break;
default:
- printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1832,10 +1835,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
qd_idx = raid_disks - 1;
break;
-
default:
- printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1898,8 +1898,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
case ALGORITHM_PARITY_N:
break;
default:
- printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1958,8 +1956,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
i -= 1;
break;
default:
- printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1972,7 +1968,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
- printk(KERN_ERR "compute_blocknr: map not correct\n");
+ printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
+ mdname(conf->mddev));
return 0;
}
return r_sector;
@@ -3709,10 +3706,10 @@ static void raid5_align_endio(struct bio *bi, int error)
bio_put(bi);
- mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
- conf = mddev->private;
rdev = (void*)raid_bi->bi_next;
raid_bi->bi_next = NULL;
+ mddev = rdev->mddev;
+ conf = mddev->private;
rdev_dec_pending(rdev, conf->mddev);
@@ -3749,9 +3746,8 @@ static int bio_fits_rdev(struct bio *bi)
}
-static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
+static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
{
- mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
int dd_idx;
struct bio* align_bi;
@@ -3866,16 +3862,15 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
return sh;
}
-static int make_request(struct request_queue *q, struct bio * bi)
+static int make_request(mddev_t *mddev, struct bio * bi)
{
- mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
int dd_idx;
sector_t new_sector;
sector_t logical_sector, last_sector;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
- int cpu, remaining;
+ int remaining;
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
/* Drain all pending writes. We only really need
@@ -3890,15 +3885,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
md_write_start(mddev, bi);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bi));
- part_stat_unlock();
-
if (rw == READ &&
mddev->reshape_position == MaxSector &&
- chunk_aligned_read(q,bi))
+ chunk_aligned_read(mddev,bi))
return 0;
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
@@ -3946,7 +3935,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
new_sector = raid5_compute_sector(conf, logical_sector,
previous,
&dd_idx, NULL);
- pr_debug("raid5: make_request, sector %llu logical %llu\n",
+ pr_debug("raid456: make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
@@ -4054,7 +4043,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
* As the reads complete, handle_stripe will copy the data
* into the destination stripe and release that stripe.
*/
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t first_sector, last_sector;
int raid_disks = conf->previous_raid_disks;
@@ -4263,7 +4252,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
/* FIXME go_faster isn't used */
static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t max_sector = mddev->dev_sectors;
int sync_blocks;
@@ -4725,7 +4714,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
- printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
+ printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
@@ -4733,12 +4722,12 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
- printk(KERN_ERR "raid5: %s: layout %d not supported\n",
+ printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
- printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
+ printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
@@ -4746,8 +4735,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
- printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
- mddev->new_chunk_sectors << 9, mdname(mddev));
+ printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
+ mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
@@ -4789,7 +4778,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (raid5_alloc_percpu(conf) != 0)
goto abort;
- pr_debug("raid5: run(%s) called.\n", mdname(mddev));
+ pr_debug("raid456: run(%s) called.\n", mdname(mddev));
list_for_each_entry(rdev, &mddev->disks, same_set) {
raid_disk = rdev->raid_disk;
@@ -4802,9 +4791,9 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "raid5: device %s operational as raid"
- " disk %d\n", bdevname(rdev->bdev,b),
- raid_disk);
+ printk(KERN_INFO "md/raid:%s: device %s operational as raid"
+ " disk %d\n",
+ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -4828,16 +4817,17 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
if (grow_stripes(conf, conf->max_nr_stripes)) {
printk(KERN_ERR
- "raid5: couldn't allocate %dkB for buffers\n", memory);
+ "md/raid:%s: couldn't allocate %dkB for buffers\n",
+ mdname(mddev), memory);
goto abort;
} else
- printk(KERN_INFO "raid5: allocated %dkB for %s\n",
- memory, mdname(mddev));
+ printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
+ mdname(mddev), memory);
conf->thread = md_register_thread(raid5d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
- "raid5: couldn't allocate thread for %s\n",
+ "md/raid:%s: couldn't allocate thread.\n",
mdname(mddev));
goto abort;
}
@@ -4888,7 +4878,7 @@ static int run(mddev_t *mddev)
sector_t reshape_offset = 0;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid5: %s is not clean"
+ printk(KERN_NOTICE "md/raid:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
if (mddev->reshape_position != MaxSector) {
@@ -4902,7 +4892,7 @@ static int run(mddev_t *mddev)
int max_degraded = (mddev->level == 6 ? 2 : 1);
if (mddev->new_level != mddev->level) {
- printk(KERN_ERR "raid5: %s: unsupported reshape "
+ printk(KERN_ERR "md/raid:%s: unsupported reshape "
"required - aborting.\n",
mdname(mddev));
return -EINVAL;
@@ -4915,8 +4905,8 @@ static int run(mddev_t *mddev)
here_new = mddev->reshape_position;
if (sector_div(here_new, mddev->new_chunk_sectors *
(mddev->raid_disks - max_degraded))) {
- printk(KERN_ERR "raid5: reshape_position not "
- "on a stripe boundary\n");
+ printk(KERN_ERR "md/raid:%s: reshape_position not "
+ "on a stripe boundary\n", mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * mddev->new_chunk_sectors;
@@ -4937,8 +4927,9 @@ static int run(mddev_t *mddev)
if ((here_new * mddev->new_chunk_sectors !=
here_old * mddev->chunk_sectors) ||
mddev->ro == 0) {
- printk(KERN_ERR "raid5: in-place reshape must be started"
- " in read-only mode - aborting\n");
+ printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
+ " in read-only mode - aborting\n",
+ mdname(mddev));
return -EINVAL;
}
} else if (mddev->delta_disks < 0
@@ -4947,11 +4938,13 @@ static int run(mddev_t *mddev)
: (here_new * mddev->new_chunk_sectors >=
here_old * mddev->chunk_sectors)) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "raid5: reshape_position too early for "
- "auto-recovery - aborting.\n");
+ printk(KERN_ERR "md/raid:%s: reshape_position too early for "
+ "auto-recovery - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
- printk(KERN_INFO "raid5: reshape will continue\n");
+ printk(KERN_INFO "md/raid:%s: reshape will continue\n",
+ mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
@@ -4993,18 +4986,6 @@ static int run(mddev_t *mddev)
mddev->minor_version > 90)
rdev->recovery_offset = reshape_offset;
- printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
- rdev->raid_disk, working_disks, conf->prev_algo,
- conf->previous_raid_disks, conf->max_degraded,
- conf->algorithm, conf->raid_disks,
- only_parity(rdev->raid_disk,
- conf->prev_algo,
- conf->previous_raid_disks,
- conf->max_degraded),
- only_parity(rdev->raid_disk,
- conf->algorithm,
- conf->raid_disks,
- conf->max_degraded));
if (rdev->recovery_offset < reshape_offset) {
/* We need to check old and new layout */
if (!only_parity(rdev->raid_disk,
@@ -5025,7 +5006,7 @@ static int run(mddev_t *mddev)
- working_disks);
if (mddev->degraded > conf->max_degraded) {
- printk(KERN_ERR "raid5: not enough operational devices for %s"
+ printk(KERN_ERR "md/raid:%s: not enough operational devices"
" (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
@@ -5039,32 +5020,32 @@ static int run(mddev_t *mddev)
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
printk(KERN_WARNING
- "raid5: starting dirty degraded array: %s"
- "- data corruption possible.\n",
+ "md/raid:%s: starting dirty degraded array"
+ " - data corruption possible.\n",
mdname(mddev));
else {
printk(KERN_ERR
- "raid5: cannot start dirty degraded array for %s\n",
+ "md/raid:%s: cannot start dirty degraded array.\n",
mdname(mddev));
goto abort;
}
}
if (mddev->degraded == 0)
- printk("raid5: raid level %d set %s active with %d out of %d"
- " devices, algorithm %d\n", conf->level, mdname(mddev),
+ printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
+ " devices, algorithm %d\n", mdname(mddev), conf->level,
mddev->raid_disks-mddev->degraded, mddev->raid_disks,
mddev->new_layout);
else
- printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
- " out of %d devices, algorithm %d\n", conf->level,
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, mddev->new_layout);
+ printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
+ " out of %d devices, algorithm %d\n",
+ mdname(mddev), conf->level,
+ mddev->raid_disks - mddev->degraded,
+ mddev->raid_disks, mddev->new_layout);
print_raid5_conf(conf);
if (conf->reshape_progress != MaxSector) {
- printk("...ok start reshape thread\n");
conf->reshape_safe = conf->reshape_progress;
atomic_set(&conf->reshape_stripes, 0);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -5087,9 +5068,11 @@ static int run(mddev_t *mddev)
}
/* Ok, everything is just fine now */
- if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
+ if (mddev->to_remove == &raid5_attrs_group)
+ mddev->to_remove = NULL;
+ else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING
- "raid5: failed to create sysfs attributes for %s\n",
+ "md/raid:%s: failed to create sysfs attributes.\n",
mdname(mddev));
mddev->queue->queue_lock = &conf->device_lock;
@@ -5119,22 +5102,21 @@ abort:
free_conf(conf);
}
mddev->private = NULL;
- printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
+ printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
-
-
static int stop(mddev_t *mddev)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
free_conf(conf);
- mddev->private = &raid5_attrs_group;
+ mddev->private = NULL;
+ mddev->to_remove = &raid5_attrs_group;
return 0;
}
@@ -5175,7 +5157,7 @@ static void printall(struct seq_file *seq, raid5_conf_t *conf)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
int i;
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
@@ -5197,21 +5179,22 @@ static void print_raid5_conf (raid5_conf_t *conf)
int i;
struct disk_info *tmp;
- printk("RAID5 conf printout:\n");
+ printk(KERN_DEBUG "RAID conf printout:\n");
if (!conf) {
printk("(conf==NULL)\n");
return;
}
- printk(" --- rd:%d wd:%d\n", conf->raid_disks,
- conf->raid_disks - conf->mddev->degraded);
+ printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+ conf->raid_disks,
+ conf->raid_disks - conf->mddev->degraded);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
- printk(" disk %d, o:%d, dev:%s\n",
- i, !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+ i, !test_bit(Faulty, &tmp->rdev->flags),
+ bdevname(tmp->rdev->bdev, b));
}
}
@@ -5334,7 +5317,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
raid5_size(mddev, sectors, mddev->raid_disks))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
mddev->recovery_cp = mddev->dev_sectors;
@@ -5360,7 +5342,8 @@ static int check_stripe_cache(mddev_t *mddev)
> conf->max_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes) {
- printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
+ printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
+ mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
/ STRIPE_SIZE)*4);
return 0;
@@ -5431,7 +5414,7 @@ static int raid5_start_reshape(mddev_t *mddev)
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
- printk(KERN_ERR "md: %s: array size must be reduced "
+ printk(KERN_ERR "md/raid:%s: array size must be reduced "
"before number of disks\n", mdname(mddev));
return -EINVAL;
}
@@ -5469,9 +5452,9 @@ static int raid5_start_reshape(mddev_t *mddev)
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
printk(KERN_WARNING
- "raid5: failed to create "
- " link %s for %s\n",
- nm, mdname(mddev));
+ "md/raid:%s: failed to create "
+ " link %s\n",
+ mdname(mddev), nm);
} else
break;
}
@@ -5548,7 +5531,6 @@ static void raid5_finish_reshape(mddev_t *mddev)
if (mddev->delta_disks > 0) {
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
} else {
int d;
@@ -5613,6 +5595,29 @@ static void raid5_quiesce(mddev_t *mddev, int state)
}
+static void *raid45_takeover_raid0(mddev_t *mddev, int level)
+{
+ struct raid0_private_data *raid0_priv = mddev->private;
+
+ /* for raid0 takeover only one zone is supported */
+ if (raid0_priv->nr_strip_zones > 1) {
+ printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ mddev->new_level = level;
+ mddev->new_layout = ALGORITHM_PARITY_N;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks += 1;
+ mddev->delta_disks = 1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ return setup_conf(mddev);
+}
+
+
static void *raid5_takeover_raid1(mddev_t *mddev)
{
int chunksect;
@@ -5737,12 +5742,13 @@ static int raid6_check_reshape(mddev_t *mddev)
static void *raid5_takeover(mddev_t *mddev)
{
/* raid5 can take over:
- * raid0 - if all devices are the same - make it a raid4 layout
+ * raid0 - if there is only one strip zone - make it a raid4 layout
* raid1 - if there are two drives. We need to know the chunk size
* raid4 - trivial - just use a raid4 layout.
* raid6 - Providing it is a *_6 layout
*/
-
+ if (mddev->level == 0)
+ return raid45_takeover_raid0(mddev, 5);
if (mddev->level == 1)
return raid5_takeover_raid1(mddev);
if (mddev->level == 4) {
@@ -5756,6 +5762,22 @@ static void *raid5_takeover(mddev_t *mddev)
return ERR_PTR(-EINVAL);
}
+static void *raid4_takeover(mddev_t *mddev)
+{
+ /* raid4 can take over:
+ * raid0 - if there is only one strip zone
+ * raid5 - if layout is right
+ */
+ if (mddev->level == 0)
+ return raid45_takeover_raid0(mddev, 4);
+ if (mddev->level == 5 &&
+ mddev->layout == ALGORITHM_PARITY_N) {
+ mddev->new_layout = 0;
+ mddev->new_level = 4;
+ return setup_conf(mddev);
+ }
+ return ERR_PTR(-EINVAL);
+}
static struct mdk_personality raid5_personality;
@@ -5871,6 +5893,7 @@ static struct mdk_personality raid4_personality =
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
+ .takeover = raid4_takeover,
};
static int __init raid5_init(void)
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
index 4dde7d180a32..195c6cf359f6 100644
--- a/drivers/media/IR/Kconfig
+++ b/drivers/media/IR/Kconfig
@@ -7,3 +7,62 @@ config VIDEO_IR
tristate
depends on IR_CORE
default IR_CORE
+
+source "drivers/media/IR/keymaps/Kconfig"
+
+config IR_NEC_DECODER
+ tristate "Enable IR raw decoder for the NEC protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have IR with NEC protocol, and
+ if the IR is decoded in software
+
+config IR_RC5_DECODER
+ tristate "Enable IR raw decoder for the RC-5 protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have IR with RC-5 protocol, and
+ if the IR is decoded in software
+
+config IR_RC6_DECODER
+ tristate "Enable IR raw decoder for the RC6 protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the RC6 protocol, and you need software decoding support.
+
+config IR_JVC_DECODER
+ tristate "Enable IR raw decoder for the JVC protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the JVC protocol, and you need software decoding support.
+
+config IR_SONY_DECODER
+ tristate "Enable IR raw decoder for the Sony protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the Sony protocol, and you need software decoding support.
+
+config IR_IMON
+ tristate "SoundGraph iMON Receiver and Display"
+ depends on USB_ARCH_HAS_HCD
+ depends on IR_CORE
+ select USB
+ ---help---
+ Say Y here if you want to use a SoundGraph iMON (aka Antec Veris)
+ IR Receiver and/or LCD/VFD/VGA display.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imon.
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
index 171890e7a41d..b998fcced2e4 100644
--- a/drivers/media/IR/Makefile
+++ b/drivers/media/IR/Makefile
@@ -1,5 +1,15 @@
-ir-common-objs := ir-functions.o ir-keymaps.o
-ir-core-objs := ir-keytable.o ir-sysfs.o
+ir-common-objs := ir-functions.o
+ir-core-objs := ir-keytable.o ir-sysfs.o ir-raw-event.o rc-map.o
+
+obj-y += keymaps/
obj-$(CONFIG_IR_CORE) += ir-core.o
obj-$(CONFIG_VIDEO_IR) += ir-common.o
+obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o
+obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
+obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
+obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
+obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
+
+# stand-alone IR receivers/transmitters
+obj-$(CONFIG_IR_IMON) += imon.o
diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c
new file mode 100644
index 000000000000..27743eb0916d
--- /dev/null
+++ b/drivers/media/IR/imon.c
@@ -0,0 +1,2397 @@
+/*
+ * imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD
+ *
+ * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com>
+ * Portions based on the original lirc_imon driver,
+ * Copyright(C) 2004 Venky Raju(dev@venky.ws)
+ *
+ * Huge thanks to R. Geoff Newbury for invaluable debugging on the
+ * 0xffdc iMON devices, and for sending me one to hack on, without
+ * which the support for them wouldn't be nearly as good. Thanks
+ * also to the numerous 0xffdc device owners that tested auto-config
+ * support for me and provided debug dumps from their devices.
+ *
+ * imon is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <media/ir-core.h>
+
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
+#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
+#define MOD_NAME "imon"
+#define MOD_VERSION "0.9.1"
+
+#define DISPLAY_MINOR_BASE 144
+#define DEVICE_NAME "lcd%d"
+
+#define BUF_CHUNK_SIZE 8
+#define BUF_SIZE 128
+
+#define BIT_DURATION 250 /* each bit received is 250us */
+
+#define IMON_CLOCK_ENABLE_PACKETS 2
+#define IMON_KEY_RELEASE_OFFSET 1000
+
+/*** P R O T O T Y P E S ***/
+
+/* USB Callback prototypes */
+static int imon_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+static void imon_disconnect(struct usb_interface *interface);
+static void usb_rx_callback_intf0(struct urb *urb);
+static void usb_rx_callback_intf1(struct urb *urb);
+static void usb_tx_callback(struct urb *urb);
+
+/* suspend/resume support */
+static int imon_resume(struct usb_interface *intf);
+static int imon_suspend(struct usb_interface *intf, pm_message_t message);
+
+/* Display file_operations function prototypes */
+static int display_open(struct inode *inode, struct file *file);
+static int display_close(struct inode *inode, struct file *file);
+
+/* VFD write operation */
+static ssize_t vfd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos);
+
+/* LCD file_operations override function prototypes */
+static ssize_t lcd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos);
+
+/*** G L O B A L S ***/
+
+struct imon_context {
+ struct device *dev;
+ struct ir_dev_props *props;
+ struct ir_input_dev *ir;
+ /* Newer devices have two interfaces */
+ struct usb_device *usbdev_intf0;
+ struct usb_device *usbdev_intf1;
+
+ bool display_supported; /* not all controllers do */
+ bool display_isopen; /* display port has been opened */
+ bool rf_isassociating; /* RF remote associating */
+ bool dev_present_intf0; /* USB device presence, interface 0 */
+ bool dev_present_intf1; /* USB device presence, interface 1 */
+
+ struct mutex lock; /* to lock this object */
+ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
+
+ struct usb_endpoint_descriptor *rx_endpoint_intf0;
+ struct usb_endpoint_descriptor *rx_endpoint_intf1;
+ struct usb_endpoint_descriptor *tx_endpoint;
+ struct urb *rx_urb_intf0;
+ struct urb *rx_urb_intf1;
+ struct urb *tx_urb;
+ bool tx_control;
+ unsigned char usb_rx_buf[8];
+ unsigned char usb_tx_buf[8];
+
+ struct tx_t {
+ unsigned char data_buf[35]; /* user data buffer */
+ struct completion finished; /* wait for write to finish */
+ bool busy; /* write in progress */
+ int status; /* status of tx completion */
+ } tx;
+
+ u16 vendor; /* usb vendor ID */
+ u16 product; /* usb product ID */
+
+ struct input_dev *idev; /* input device for remote */
+ struct input_dev *touch; /* input device for touchscreen */
+
+ u32 kc; /* current input keycode */
+ u32 last_keycode; /* last reported input keycode */
+ u64 ir_type; /* iMON or MCE (RC6) IR protocol? */
+ u8 mce_toggle_bit; /* last mce toggle bit */
+ bool release_code; /* some keys send a release code */
+
+ u8 display_type; /* store the display type */
+ bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */
+
+ char name_idev[128]; /* input device name */
+ char phys_idev[64]; /* input device phys path */
+ struct timer_list itimer; /* input device timer, need for rc6 */
+
+ char name_touch[128]; /* touch screen name */
+ char phys_touch[64]; /* touch screen phys path */
+ struct timer_list ttimer; /* touch screen timer */
+ int touch_x; /* x coordinate on touchscreen */
+ int touch_y; /* y coordinate on touchscreen */
+};
+
+#define TOUCH_TIMEOUT (HZ/30)
+
+/* vfd character device file operations */
+static const struct file_operations vfd_fops = {
+ .owner = THIS_MODULE,
+ .open = &display_open,
+ .write = &vfd_write,
+ .release = &display_close
+};
+
+/* lcd character device file operations */
+static const struct file_operations lcd_fops = {
+ .owner = THIS_MODULE,
+ .open = &display_open,
+ .write = &lcd_write,
+ .release = &display_close
+};
+
+enum {
+ IMON_DISPLAY_TYPE_AUTO = 0,
+ IMON_DISPLAY_TYPE_VFD = 1,
+ IMON_DISPLAY_TYPE_LCD = 2,
+ IMON_DISPLAY_TYPE_VGA = 3,
+ IMON_DISPLAY_TYPE_NONE = 4,
+};
+
+enum {
+ IMON_KEY_IMON = 0,
+ IMON_KEY_MCE = 1,
+ IMON_KEY_PANEL = 2,
+};
+
+/*
+ * USB Device ID for iMON USB Control Boards
+ *
+ * The Windows drivers contain 6 different inf files, more or less one for
+ * each new device until the 0x0034-0x0046 devices, which all use the same
+ * driver. Some of the devices in the 34-46 range haven't been definitively
+ * identified yet. Early devices have either a TriGem Computer, Inc. or a
+ * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later
+ * devices use the SoundGraph vendor ID (0x15c2). This driver only supports
+ * the ffdc and later devices, which do onboard decoding.
+ */
+static struct usb_device_id imon_usb_id_table[] = {
+ /*
+ * Several devices with this same device ID, all use iMON_PAD.inf
+ * SoundGraph iMON PAD (IR & VFD)
+ * SoundGraph iMON PAD (IR & LCD)
+ * SoundGraph iMON Knob (IR only)
+ */
+ { USB_DEVICE(0x15c2, 0xffdc) },
+
+ /*
+ * Newer devices, all driven by the latest iMON Windows driver, full
+ * list of device IDs extracted via 'strings Setup/data1.hdr |grep 15c2'
+ * Need user input to fill in details on unknown devices.
+ */
+ /* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */
+ { USB_DEVICE(0x15c2, 0x0034) },
+ /* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */
+ { USB_DEVICE(0x15c2, 0x0035) },
+ /* SoundGraph iMON OEM VFD (IR & VFD) */
+ { USB_DEVICE(0x15c2, 0x0036) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0037) },
+ /* SoundGraph iMON OEM LCD (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0038) },
+ /* SoundGraph iMON UltraBay (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0039) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003a) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003b) },
+ /* SoundGraph iMON OEM Inside (IR only) */
+ { USB_DEVICE(0x15c2, 0x003c) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003d) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003e) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003f) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0040) },
+ /* SoundGraph iMON MINI (IR only) */
+ { USB_DEVICE(0x15c2, 0x0041) },
+ /* Antec Veris Multimedia Station EZ External (IR only) */
+ { USB_DEVICE(0x15c2, 0x0042) },
+ /* Antec Veris Multimedia Station Basic Internal (IR only) */
+ { USB_DEVICE(0x15c2, 0x0043) },
+ /* Antec Veris Multimedia Station Elite (IR & VFD) */
+ { USB_DEVICE(0x15c2, 0x0044) },
+ /* Antec Veris Multimedia Station Premiere (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0045) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0046) },
+ {}
+};
+
+/* USB Device data */
+static struct usb_driver imon_driver = {
+ .name = MOD_NAME,
+ .probe = imon_probe,
+ .disconnect = imon_disconnect,
+ .suspend = imon_suspend,
+ .resume = imon_resume,
+ .id_table = imon_usb_id_table,
+};
+
+static struct usb_class_driver imon_vfd_class = {
+ .name = DEVICE_NAME,
+ .fops = &vfd_fops,
+ .minor_base = DISPLAY_MINOR_BASE,
+};
+
+static struct usb_class_driver imon_lcd_class = {
+ .name = DEVICE_NAME,
+ .fops = &lcd_fops,
+ .minor_base = DISPLAY_MINOR_BASE,
+};
+
+/* imon receiver front panel/knob key table */
+static const struct {
+ u64 hw_code;
+ u32 keycode;
+} imon_panel_key_table[] = {
+ { 0x000000000f00ffeell, KEY_PROG1 }, /* Go */
+ { 0x000000001f00ffeell, KEY_AUDIO },
+ { 0x000000002000ffeell, KEY_VIDEO },
+ { 0x000000002100ffeell, KEY_CAMERA },
+ { 0x000000002700ffeell, KEY_DVD },
+ { 0x000000002300ffeell, KEY_TV },
+ { 0x000000000500ffeell, KEY_PREVIOUS },
+ { 0x000000000700ffeell, KEY_REWIND },
+ { 0x000000000400ffeell, KEY_STOP },
+ { 0x000000003c00ffeell, KEY_PLAYPAUSE },
+ { 0x000000000800ffeell, KEY_FASTFORWARD },
+ { 0x000000000600ffeell, KEY_NEXT },
+ { 0x000000010000ffeell, KEY_RIGHT },
+ { 0x000001000000ffeell, KEY_LEFT },
+ { 0x000000003d00ffeell, KEY_SELECT },
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000000100ffeell, KEY_MUTE },
+ /* iMON Knob values */
+ { 0x000100ffffffffeell, KEY_VOLUMEUP },
+ { 0x010000ffffffffeell, KEY_VOLUMEDOWN },
+ { 0x000008ffffffffeell, KEY_MUTE },
+};
+
+/* to prevent races between open() and disconnect(), probing, etc */
+static DEFINE_MUTEX(driver_lock);
+
+/* Module bookkeeping bits */
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_VERSION(MOD_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
+
+static bool debug;
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
+
+/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
+static int display_type;
+module_param(display_type, int, S_IRUGO);
+MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, "
+ "1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
+
+static int pad_stabilize = 1;
+module_param(pad_stabilize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
+ "presses in arrow key mode. 0=disable, 1=enable (default).");
+
+/*
+ * In certain use cases, mouse mode isn't really helpful, and could actually
+ * cause confusion, so allow disabling it when the IR device is open.
+ */
+static bool nomouse;
+module_param(nomouse, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is "
+ "open. 0=don't disable, 1=disable. (default: don't disable)");
+
+/* threshold at which a pad push registers as an arrow key in kbd mode */
+static int pad_thresh;
+module_param(pad_thresh, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an "
+ "arrow key in kbd mode (default: 28)");
+
+
+static void free_imon_context(struct imon_context *ictx)
+{
+ struct device *dev = ictx->dev;
+
+ usb_free_urb(ictx->tx_urb);
+ usb_free_urb(ictx->rx_urb_intf0);
+ usb_free_urb(ictx->rx_urb_intf1);
+ kfree(ictx);
+
+ dev_dbg(dev, "%s: iMON context freed\n", __func__);
+}
+
+/**
+ * Called when the Display device (e.g. /dev/lcd0)
+ * is opened by the application.
+ */
+static int display_open(struct inode *inode, struct file *file)
+{
+ struct usb_interface *interface;
+ struct imon_context *ictx = NULL;
+ int subminor;
+ int retval = 0;
+
+ /* prevent races with disconnect */
+ mutex_lock(&driver_lock);
+
+ subminor = iminor(inode);
+ interface = usb_find_interface(&imon_driver, subminor);
+ if (!interface) {
+ err("%s: could not find interface for minor %d",
+ __func__, subminor);
+ retval = -ENODEV;
+ goto exit;
+ }
+ ictx = usb_get_intfdata(interface);
+
+ if (!ictx) {
+ err("%s: no context found for minor %d", __func__, subminor);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: display not supported by device", __func__);
+ retval = -ENODEV;
+ } else if (ictx->display_isopen) {
+ err("%s: display port is already open", __func__);
+ retval = -EBUSY;
+ } else {
+ ictx->display_isopen = 1;
+ file->private_data = ictx;
+ dev_dbg(ictx->dev, "display port opened\n");
+ }
+
+ mutex_unlock(&ictx->lock);
+
+exit:
+ mutex_unlock(&driver_lock);
+ return retval;
+}
+
+/**
+ * Called when the display device (e.g. /dev/lcd0)
+ * is closed by the application.
+ */
+static int display_close(struct inode *inode, struct file *file)
+{
+ struct imon_context *ictx = NULL;
+ int retval = 0;
+
+ ictx = (struct imon_context *)file->private_data;
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: display not supported by device", __func__);
+ retval = -ENODEV;
+ } else if (!ictx->display_isopen) {
+ err("%s: display is not open", __func__);
+ retval = -EIO;
+ } else {
+ ictx->display_isopen = 0;
+ dev_dbg(ictx->dev, "display port closed\n");
+ if (!ictx->dev_present_intf0) {
+ /*
+ * Device disconnected before close and IR port is not
+ * open. If IR port is open, context will be deleted by
+ * ir_close.
+ */
+ mutex_unlock(&ictx->lock);
+ free_imon_context(ictx);
+ return retval;
+ }
+ }
+
+ mutex_unlock(&ictx->lock);
+ return retval;
+}
+
+/**
+ * Sends a packet to the device -- this function must be called
+ * with ictx->lock held.
+ */
+static int send_packet(struct imon_context *ictx)
+{
+ unsigned int pipe;
+ unsigned long timeout;
+ int interval = 0;
+ int retval = 0;
+ struct usb_ctrlrequest *control_req = NULL;
+
+ /* Check if we need to use control or interrupt urb */
+ if (!ictx->tx_control) {
+ pipe = usb_sndintpipe(ictx->usbdev_intf0,
+ ictx->tx_endpoint->bEndpointAddress);
+ interval = ictx->tx_endpoint->bInterval;
+
+ usb_fill_int_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe,
+ ictx->usb_tx_buf,
+ sizeof(ictx->usb_tx_buf),
+ usb_tx_callback, ictx, interval);
+
+ ictx->tx_urb->actual_length = 0;
+ } else {
+ /* fill request into kmalloc'ed space: */
+ control_req = kmalloc(sizeof(struct usb_ctrlrequest),
+ GFP_KERNEL);
+ if (control_req == NULL)
+ return -ENOMEM;
+
+ /* setup packet is '21 09 0200 0001 0008' */
+ control_req->bRequestType = 0x21;
+ control_req->bRequest = 0x09;
+ control_req->wValue = cpu_to_le16(0x0200);
+ control_req->wIndex = cpu_to_le16(0x0001);
+ control_req->wLength = cpu_to_le16(0x0008);
+
+ /* control pipe is endpoint 0x00 */
+ pipe = usb_sndctrlpipe(ictx->usbdev_intf0, 0);
+
+ /* build the control urb */
+ usb_fill_control_urb(ictx->tx_urb, ictx->usbdev_intf0,
+ pipe, (unsigned char *)control_req,
+ ictx->usb_tx_buf,
+ sizeof(ictx->usb_tx_buf),
+ usb_tx_callback, ictx);
+ ictx->tx_urb->actual_length = 0;
+ }
+
+ init_completion(&ictx->tx.finished);
+ ictx->tx.busy = 1;
+ smp_rmb(); /* ensure later readers know we're busy */
+
+ retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL);
+ if (retval) {
+ ictx->tx.busy = 0;
+ smp_rmb(); /* ensure later readers know we're not busy */
+ err("%s: error submitting urb(%d)", __func__, retval);
+ } else {
+ /* Wait for transmission to complete (or abort) */
+ mutex_unlock(&ictx->lock);
+ retval = wait_for_completion_interruptible(
+ &ictx->tx.finished);
+ if (retval)
+ err("%s: task interrupted", __func__);
+ mutex_lock(&ictx->lock);
+
+ retval = ictx->tx.status;
+ if (retval)
+ err("%s: packet tx failed (%d)", __func__, retval);
+ }
+
+ kfree(control_req);
+
+ /*
+ * Induce a mandatory 5ms delay before returning, as otherwise,
+ * send_packet can get called so rapidly as to overwhelm the device,
+ * particularly on faster systems and/or those with quirky usb.
+ */
+ timeout = msecs_to_jiffies(5);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
+
+ return retval;
+}
+
+/**
+ * Sends an associate packet to the iMON 2.4G.
+ *
+ * This might not be such a good idea, since it has an id collision with
+ * some versions of the "IR & VFD" combo. The only way to determine if it
+ * is an RF version is to look at the product description string. (Which
+ * we currently do not fetch).
+ */
+static int send_associate_24g(struct imon_context *ictx)
+{
+ int retval;
+ const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20 };
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ if (!ictx->dev_present_intf0) {
+ err("%s: no iMON device present", __func__);
+ return -ENODEV;
+ }
+
+ memcpy(ictx->usb_tx_buf, packet, sizeof(packet));
+ retval = send_packet(ictx);
+
+ return retval;
+}
+
+/**
+ * Sends packets to setup and show clock on iMON display
+ *
+ * Arguments: year - last 2 digits of year, month - 1..12,
+ * day - 1..31, dow - day of the week (0-Sun...6-Sat),
+ * hour - 0..23, minute - 0..59, second - 0..59
+ */
+static int send_set_imon_clock(struct imon_context *ictx,
+ unsigned int year, unsigned int month,
+ unsigned int day, unsigned int dow,
+ unsigned int hour, unsigned int minute,
+ unsigned int second)
+{
+ unsigned char clock_enable_pkt[IMON_CLOCK_ENABLE_PACKETS][8];
+ int retval = 0;
+ int i;
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ switch (ictx->display_type) {
+ case IMON_DISPLAY_TYPE_LCD:
+ clock_enable_pkt[0][0] = 0x80;
+ clock_enable_pkt[0][1] = year;
+ clock_enable_pkt[0][2] = month-1;
+ clock_enable_pkt[0][3] = day;
+ clock_enable_pkt[0][4] = hour;
+ clock_enable_pkt[0][5] = minute;
+ clock_enable_pkt[0][6] = second;
+
+ clock_enable_pkt[1][0] = 0x80;
+ clock_enable_pkt[1][1] = 0;
+ clock_enable_pkt[1][2] = 0;
+ clock_enable_pkt[1][3] = 0;
+ clock_enable_pkt[1][4] = 0;
+ clock_enable_pkt[1][5] = 0;
+ clock_enable_pkt[1][6] = 0;
+
+ if (ictx->product == 0xffdc) {
+ clock_enable_pkt[0][7] = 0x50;
+ clock_enable_pkt[1][7] = 0x51;
+ } else {
+ clock_enable_pkt[0][7] = 0x88;
+ clock_enable_pkt[1][7] = 0x8a;
+ }
+
+ break;
+
+ case IMON_DISPLAY_TYPE_VFD:
+ clock_enable_pkt[0][0] = year;
+ clock_enable_pkt[0][1] = month-1;
+ clock_enable_pkt[0][2] = day;
+ clock_enable_pkt[0][3] = dow;
+ clock_enable_pkt[0][4] = hour;
+ clock_enable_pkt[0][5] = minute;
+ clock_enable_pkt[0][6] = second;
+ clock_enable_pkt[0][7] = 0x40;
+
+ clock_enable_pkt[1][0] = 0;
+ clock_enable_pkt[1][1] = 0;
+ clock_enable_pkt[1][2] = 1;
+ clock_enable_pkt[1][3] = 0;
+ clock_enable_pkt[1][4] = 0;
+ clock_enable_pkt[1][5] = 0;
+ clock_enable_pkt[1][6] = 0;
+ clock_enable_pkt[1][7] = 0x42;
+
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ for (i = 0; i < IMON_CLOCK_ENABLE_PACKETS; i++) {
+ memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8);
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send_packet failed for packet %d",
+ __func__, i);
+ break;
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * These are the sysfs functions to handle the association on the iMON 2.4G LT.
+ */
+static ssize_t show_associate_remote(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+ if (ictx->rf_isassociating)
+ strcpy(buf, "associating\n");
+ else
+ strcpy(buf, "closed\n");
+
+ dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for "
+ "instructions on how to associate your iMON 2.4G DT/LT "
+ "remote\n");
+ mutex_unlock(&ictx->lock);
+ return strlen(buf);
+}
+
+static ssize_t store_associate_remote(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct imon_context *ictx;
+
+ ictx = dev_get_drvdata(d);
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+ ictx->rf_isassociating = 1;
+ send_associate_24g(ictx);
+ mutex_unlock(&ictx->lock);
+
+ return count;
+}
+
+/**
+ * sysfs functions to control internal imon clock
+ */
+static ssize_t show_imon_clock(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+ size_t len;
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ len = snprintf(buf, PAGE_SIZE, "Not supported.");
+ } else {
+ len = snprintf(buf, PAGE_SIZE,
+ "To set the clock on your iMON display:\n"
+ "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n"
+ "%s", ictx->display_isopen ?
+ "\nNOTE: imon device must be closed\n" : "");
+ }
+
+ mutex_unlock(&ictx->lock);
+
+ return len;
+}
+
+static ssize_t store_imon_clock(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+ ssize_t retval;
+ unsigned int year, month, day, dow, hour, minute, second;
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ retval = -ENODEV;
+ goto exit;
+ } else if (ictx->display_isopen) {
+ retval = -EBUSY;
+ goto exit;
+ }
+
+ if (sscanf(buf, "%u %u %u %u %u %u %u", &year, &month, &day, &dow,
+ &hour, &minute, &second) != 7) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if ((month < 1 || month > 12) ||
+ (day < 1 || day > 31) || (dow > 6) ||
+ (hour > 23) || (minute > 59) || (second > 59)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = send_set_imon_clock(ictx, year, month, day, dow,
+ hour, minute, second);
+ if (retval)
+ goto exit;
+
+ retval = count;
+exit:
+ mutex_unlock(&ictx->lock);
+
+ return retval;
+}
+
+
+static DEVICE_ATTR(imon_clock, S_IWUSR | S_IRUGO, show_imon_clock,
+ store_imon_clock);
+
+static DEVICE_ATTR(associate_remote, S_IWUSR | S_IRUGO, show_associate_remote,
+ store_associate_remote);
+
+static struct attribute *imon_display_sysfs_entries[] = {
+ &dev_attr_imon_clock.attr,
+ NULL
+};
+
+static struct attribute_group imon_display_attribute_group = {
+ .attrs = imon_display_sysfs_entries
+};
+
+static struct attribute *imon_rf_sysfs_entries[] = {
+ &dev_attr_associate_remote.attr,
+ NULL
+};
+
+static struct attribute_group imon_rf_attribute_group = {
+ .attrs = imon_rf_sysfs_entries
+};
+
+/**
+ * Writes data to the VFD. The iMON VFD is 2x16 characters
+ * and requires data in 5 consecutive USB interrupt packets,
+ * each packet but the last carrying 7 bytes.
+ *
+ * I don't know if the VFD board supports features such as
+ * scrolling, clearing rows, blanking, etc. so at
+ * the caller must provide a full screen of data. If fewer
+ * than 32 bytes are provided spaces will be appended to
+ * generate a full screen.
+ */
+static ssize_t vfd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos)
+{
+ int i;
+ int offset;
+ int seq;
+ int retval = 0;
+ struct imon_context *ictx;
+ const unsigned char vfd_packet6[] = {
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
+
+ ictx = (struct imon_context *)file->private_data;
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->dev_present_intf0) {
+ err("%s: no iMON device present", __func__);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (n_bytes <= 0 || n_bytes > 32) {
+ err("%s: invalid payload size", __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (copy_from_user(ictx->tx.data_buf, buf, n_bytes)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ /* Pad with spaces */
+ for (i = n_bytes; i < 32; ++i)
+ ictx->tx.data_buf[i] = ' ';
+
+ for (i = 32; i < 35; ++i)
+ ictx->tx.data_buf[i] = 0xFF;
+
+ offset = 0;
+ seq = 0;
+
+ do {
+ memcpy(ictx->usb_tx_buf, ictx->tx.data_buf + offset, 7);
+ ictx->usb_tx_buf[7] = (unsigned char) seq;
+
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send packet failed for packet #%d",
+ __func__, seq/2);
+ goto exit;
+ } else {
+ seq += 2;
+ offset += 7;
+ }
+
+ } while (offset < 35);
+
+ /* Send packet #6 */
+ memcpy(ictx->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
+ ictx->usb_tx_buf[7] = (unsigned char) seq;
+ retval = send_packet(ictx);
+ if (retval)
+ err("%s: send packet failed for packet #%d",
+ __func__, seq / 2);
+
+exit:
+ mutex_unlock(&ictx->lock);
+
+ return (!retval) ? n_bytes : retval;
+}
+
+/**
+ * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte
+ * packets. We accept data as 16 hexadecimal digits, followed by a
+ * newline (to make it easy to drive the device from a command-line
+ * -- even though the actual binary data is a bit complicated).
+ *
+ * The device itself is not a "traditional" text-mode display. It's
+ * actually a 16x96 pixel bitmap display. That means if you want to
+ * display text, you've got to have your own "font" and translate the
+ * text into bitmaps for display. This is really flexible (you can
+ * display whatever diacritics you need, and so on), but it's also
+ * a lot more complicated than most LCDs...
+ */
+static ssize_t lcd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos)
+{
+ int retval = 0;
+ struct imon_context *ictx;
+
+ ictx = (struct imon_context *)file->private_data;
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: no iMON display present", __func__);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (n_bytes != 8) {
+ err("%s: invalid payload size: %d (expecting 8)",
+ __func__, (int) n_bytes);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (copy_from_user(ictx->usb_tx_buf, buf, 8)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send packet failed!", __func__);
+ goto exit;
+ } else {
+ dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n",
+ __func__, (int) n_bytes);
+ }
+exit:
+ mutex_unlock(&ictx->lock);
+ return (!retval) ? n_bytes : retval;
+}
+
+/**
+ * Callback function for USB core API: transmit data
+ */
+static void usb_tx_callback(struct urb *urb)
+{
+ struct imon_context *ictx;
+
+ if (!urb)
+ return;
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ ictx->tx.status = urb->status;
+
+ /* notify waiters that write has finished */
+ ictx->tx.busy = 0;
+ smp_rmb(); /* ensure later readers know we're not busy */
+ complete(&ictx->tx.finished);
+}
+
+/**
+ * mce/rc6 keypresses have no distinct release code, use timer
+ */
+static void imon_mce_timeout(unsigned long data)
+{
+ struct imon_context *ictx = (struct imon_context *)data;
+
+ input_report_key(ictx->idev, ictx->last_keycode, 0);
+ input_sync(ictx->idev);
+}
+
+/**
+ * report touchscreen input
+ */
+static void imon_touch_display_timeout(unsigned long data)
+{
+ struct imon_context *ictx = (struct imon_context *)data;
+
+ if (!ictx->display_type == IMON_DISPLAY_TYPE_VGA)
+ return;
+
+ input_report_abs(ictx->touch, ABS_X, ictx->touch_x);
+ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y);
+ input_report_key(ictx->touch, BTN_TOUCH, 0x00);
+ input_sync(ictx->touch);
+}
+
+/**
+ * iMON IR receivers support two different signal sets -- those used by
+ * the iMON remotes, and those used by the Windows MCE remotes (which is
+ * really just RC-6), but only one or the other at a time, as the signals
+ * are decoded onboard the receiver.
+ */
+int imon_ir_change_protocol(void *priv, u64 ir_type)
+{
+ int retval;
+ struct imon_context *ictx = priv;
+ struct device *dev = ictx->dev;
+ bool pad_mouse;
+ unsigned char ir_proto_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
+
+ if (ir_type && !(ir_type & ictx->props->allowed_protos))
+ dev_warn(dev, "Looks like you're trying to use an IR protocol "
+ "this device does not support\n");
+
+ switch (ir_type) {
+ case IR_TYPE_RC6:
+ dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
+ ir_proto_packet[0] = 0x01;
+ pad_mouse = false;
+ init_timer(&ictx->itimer);
+ ictx->itimer.data = (unsigned long)ictx;
+ ictx->itimer.function = imon_mce_timeout;
+ break;
+ case IR_TYPE_UNKNOWN:
+ case IR_TYPE_OTHER:
+ dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
+ if (pad_stabilize)
+ pad_mouse = true;
+ else {
+ dev_dbg(dev, "PAD stabilize functionality disabled\n");
+ pad_mouse = false;
+ }
+ /* ir_proto_packet[0] = 0x00; // already the default */
+ ir_type = IR_TYPE_OTHER;
+ break;
+ default:
+ dev_warn(dev, "Unsupported IR protocol specified, overriding "
+ "to iMON IR protocol\n");
+ if (pad_stabilize)
+ pad_mouse = true;
+ else {
+ dev_dbg(dev, "PAD stabilize functionality disabled\n");
+ pad_mouse = false;
+ }
+ /* ir_proto_packet[0] = 0x00; // already the default */
+ ir_type = IR_TYPE_OTHER;
+ break;
+ }
+
+ memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
+
+ retval = send_packet(ictx);
+ if (retval)
+ goto out;
+
+ ictx->ir_type = ir_type;
+ ictx->pad_mouse = pad_mouse;
+
+out:
+ return retval;
+}
+
+static inline int tv2int(const struct timeval *a, const struct timeval *b)
+{
+ int usecs = 0;
+ int sec = 0;
+
+ if (b->tv_usec > a->tv_usec) {
+ usecs = 1000000;
+ sec--;
+ }
+
+ usecs += a->tv_usec - b->tv_usec;
+
+ sec += a->tv_sec - b->tv_sec;
+ sec *= 1000;
+ usecs /= 1000;
+ sec += usecs;
+
+ if (sec < 0)
+ sec = 1000;
+
+ return sec;
+}
+
+/**
+ * The directional pad behaves a bit differently, depending on whether this is
+ * one of the older ffdc devices or a newer device. Newer devices appear to
+ * have a higher resolution matrix for more precise mouse movement, but it
+ * makes things overly sensitive in keyboard mode, so we do some interesting
+ * contortions to make it less touchy. Older devices run through the same
+ * routine with shorter timeout and a smaller threshold.
+ */
+static int stabilize(int a, int b, u16 timeout, u16 threshold)
+{
+ struct timeval ct;
+ static struct timeval prev_time = {0, 0};
+ static struct timeval hit_time = {0, 0};
+ static int x, y, prev_result, hits;
+ int result = 0;
+ int msec, msec_hit;
+
+ do_gettimeofday(&ct);
+ msec = tv2int(&ct, &prev_time);
+ msec_hit = tv2int(&ct, &hit_time);
+
+ if (msec > 100) {
+ x = 0;
+ y = 0;
+ hits = 0;
+ }
+
+ x += a;
+ y += b;
+
+ prev_time = ct;
+
+ if (abs(x) > threshold || abs(y) > threshold) {
+ if (abs(y) > abs(x))
+ result = (y > 0) ? 0x7F : 0x80;
+ else
+ result = (x > 0) ? 0x7F00 : 0x8000;
+
+ x = 0;
+ y = 0;
+
+ if (result == prev_result) {
+ hits++;
+
+ if (hits > 3) {
+ switch (result) {
+ case 0x7F:
+ y = 17 * threshold / 30;
+ break;
+ case 0x80:
+ y -= 17 * threshold / 30;
+ break;
+ case 0x7F00:
+ x = 17 * threshold / 30;
+ break;
+ case 0x8000:
+ x -= 17 * threshold / 30;
+ break;
+ }
+ }
+
+ if (hits == 2 && msec_hit < timeout) {
+ result = 0;
+ hits = 1;
+ }
+ } else {
+ prev_result = result;
+ hits = 1;
+ hit_time = ct;
+ }
+ }
+
+ return result;
+}
+
+static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 hw_code)
+{
+ u32 scancode = be32_to_cpu(hw_code);
+ u32 keycode;
+ u32 release;
+ bool is_release_code = false;
+
+ /* Look for the initial press of a button */
+ keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+
+ /* Look for the release of a button */
+ if (keycode == KEY_RESERVED) {
+ release = scancode & ~0x4000;
+ keycode = ir_g_keycode_from_table(ictx->idev, release);
+ if (keycode != KEY_RESERVED)
+ is_release_code = true;
+ }
+
+ ictx->release_code = is_release_code;
+
+ return keycode;
+}
+
+static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 hw_code)
+{
+ u32 scancode = be32_to_cpu(hw_code);
+ u32 keycode;
+
+#define MCE_KEY_MASK 0x7000
+#define MCE_TOGGLE_BIT 0x8000
+
+ /*
+ * On some receivers, mce keys decode to 0x8000f04xx and 0x8000f84xx
+ * (the toggle bit flipping between alternating key presses), while
+ * on other receivers, we see 0x8000f74xx and 0x8000ff4xx. To keep
+ * the table trim, we always or in the bits to look up 0x8000ff4xx,
+ * but we can't or them into all codes, as some keys are decoded in
+ * a different way w/o the same use of the toggle bit...
+ */
+ if ((scancode >> 24) & 0x80)
+ scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT;
+
+ keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+
+ return keycode;
+}
+
+static u32 imon_panel_key_lookup(u64 hw_code)
+{
+ int i;
+ u64 code = be64_to_cpu(hw_code);
+ u32 keycode;
+
+ for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++)
+ if (imon_panel_key_table[i].hw_code == (code | 0xffee))
+ break;
+
+ keycode = imon_panel_key_table[i % IMON_KEY_RELEASE_OFFSET].keycode;
+
+ return keycode;
+}
+
+static bool imon_mouse_event(struct imon_context *ictx,
+ unsigned char *buf, int len)
+{
+ char rel_x = 0x00, rel_y = 0x00;
+ u8 right_shift = 1;
+ bool mouse_input = 1;
+ int dir = 0;
+
+ /* newer iMON device PAD or mouse button */
+ if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) {
+ rel_x = buf[2];
+ rel_y = buf[3];
+ right_shift = 1;
+ /* 0xffdc iMON PAD or mouse button input */
+ } else if (ictx->product == 0xffdc && (buf[0] & 0x40) &&
+ !((buf[1] & 0x01) || ((buf[1] >> 2) & 0x01))) {
+ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 |
+ (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6;
+ if (buf[0] & 0x02)
+ rel_x |= ~0x0f;
+ rel_x = rel_x + rel_x / 2;
+ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 |
+ (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6;
+ if (buf[0] & 0x01)
+ rel_y |= ~0x0f;
+ rel_y = rel_y + rel_y / 2;
+ right_shift = 2;
+ /* some ffdc devices decode mouse buttons differently... */
+ } else if (ictx->product == 0xffdc && (buf[0] == 0x68)) {
+ right_shift = 2;
+ /* ch+/- buttons, which we use for an emulated scroll wheel */
+ } else if (ictx->kc == KEY_CHANNELUP && (buf[2] & 0x40) != 0x40) {
+ dir = 1;
+ } else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) {
+ dir = -1;
+ } else
+ mouse_input = 0;
+
+ if (mouse_input) {
+ dev_dbg(ictx->dev, "sending mouse data via input subsystem\n");
+
+ if (dir) {
+ input_report_rel(ictx->idev, REL_WHEEL, dir);
+ } else if (rel_x || rel_y) {
+ input_report_rel(ictx->idev, REL_X, rel_x);
+ input_report_rel(ictx->idev, REL_Y, rel_y);
+ } else {
+ input_report_key(ictx->idev, BTN_LEFT, buf[1] & 0x1);
+ input_report_key(ictx->idev, BTN_RIGHT,
+ buf[1] >> right_shift & 0x1);
+ }
+ input_sync(ictx->idev);
+ ictx->last_keycode = ictx->kc;
+ }
+
+ return mouse_input;
+}
+
+static void imon_touch_event(struct imon_context *ictx, unsigned char *buf)
+{
+ mod_timer(&ictx->ttimer, jiffies + TOUCH_TIMEOUT);
+ ictx->touch_x = (buf[0] << 4) | (buf[1] >> 4);
+ ictx->touch_y = 0xfff - ((buf[2] << 4) | (buf[1] & 0xf));
+ input_report_abs(ictx->touch, ABS_X, ictx->touch_x);
+ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y);
+ input_report_key(ictx->touch, BTN_TOUCH, 0x01);
+ input_sync(ictx->touch);
+}
+
+static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
+{
+ int dir = 0;
+ char rel_x = 0x00, rel_y = 0x00;
+ u16 timeout, threshold;
+ u64 temp_key;
+ u32 remote_key;
+
+ /*
+ * The imon directional pad functions more like a touchpad. Bytes 3 & 4
+ * contain a position coordinate (x,y), with each component ranging
+ * from -14 to 14. We want to down-sample this to only 4 discrete values
+ * for up/down/left/right arrow keys. Also, when you get too close to
+ * diagonals, it has a tendancy to jump back and forth, so lets try to
+ * ignore when they get too close.
+ */
+ if (ictx->product != 0xffdc) {
+ /* first, pad to 8 bytes so it conforms with everything else */
+ buf[5] = buf[6] = buf[7] = 0;
+ timeout = 500; /* in msecs */
+ /* (2*threshold) x (2*threshold) square */
+ threshold = pad_thresh ? pad_thresh : 28;
+ rel_x = buf[2];
+ rel_y = buf[3];
+
+ if (ictx->ir_type == IR_TYPE_OTHER && pad_stabilize) {
+ if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) {
+ dir = stabilize((int)rel_x, (int)rel_y,
+ timeout, threshold);
+ if (!dir) {
+ ictx->kc = KEY_UNKNOWN;
+ return;
+ }
+ buf[2] = dir & 0xFF;
+ buf[3] = (dir >> 8) & 0xFF;
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ remote_key = (u32) (le64_to_cpu(temp_key)
+ & 0xffffffff);
+ ictx->kc = imon_remote_key_lookup(ictx,
+ remote_key);
+ }
+ } else {
+ if (abs(rel_y) > abs(rel_x)) {
+ buf[2] = (rel_y > 0) ? 0x7F : 0x80;
+ buf[3] = 0;
+ ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ } else {
+ buf[2] = 0;
+ buf[3] = (rel_x > 0) ? 0x7F : 0x80;
+ ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ }
+ }
+
+ /*
+ * Handle on-board decoded pad events for e.g. older VFD/iMON-Pad
+ * device (15c2:ffdc). The remote generates various codes from
+ * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates
+ * 0x688301b7 and the right one 0x688481b7. All other keys generate
+ * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with
+ * reversed endianess. Extract direction from buffer, rotate endianess,
+ * adjust sign and feed the values into stabilize(). The resulting codes
+ * will be 0x01008000, 0x01007F00, which match the newer devices.
+ */
+ } else {
+ timeout = 10; /* in msecs */
+ /* (2*threshold) x (2*threshold) square */
+ threshold = pad_thresh ? pad_thresh : 15;
+
+ /* buf[1] is x */
+ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 |
+ (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6;
+ if (buf[0] & 0x02)
+ rel_x |= ~0x10+1;
+ /* buf[2] is y */
+ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 |
+ (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6;
+ if (buf[0] & 0x01)
+ rel_y |= ~0x10+1;
+
+ buf[0] = 0x01;
+ buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0;
+
+ if (ictx->ir_type == IR_TYPE_OTHER && pad_stabilize) {
+ dir = stabilize((int)rel_x, (int)rel_y,
+ timeout, threshold);
+ if (!dir) {
+ ictx->kc = KEY_UNKNOWN;
+ return;
+ }
+ buf[2] = dir & 0xFF;
+ buf[3] = (dir >> 8) & 0xFF;
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
+ ictx->kc = imon_remote_key_lookup(ictx, remote_key);
+ } else {
+ if (abs(rel_y) > abs(rel_x)) {
+ buf[2] = (rel_y > 0) ? 0x7F : 0x80;
+ buf[3] = 0;
+ ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ } else {
+ buf[2] = 0;
+ buf[3] = (rel_x > 0) ? 0x7F : 0x80;
+ ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ }
+ }
+ }
+}
+
+static int imon_parse_press_type(struct imon_context *ictx,
+ unsigned char *buf, u8 ktype)
+{
+ int press_type = 0;
+ int rep_delay = ictx->idev->rep[REP_DELAY];
+ int rep_period = ictx->idev->rep[REP_PERIOD];
+
+ /* key release of 0x02XXXXXX key */
+ if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00)
+ ictx->kc = ictx->last_keycode;
+
+ /* mouse button release on (some) 0xffdc devices */
+ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x68 && buf[1] == 0x82 &&
+ buf[2] == 0x81 && buf[3] == 0xb7)
+ ictx->kc = ictx->last_keycode;
+
+ /* mouse button release on (some other) 0xffdc devices */
+ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x01 && buf[1] == 0x00 &&
+ buf[2] == 0x81 && buf[3] == 0xb7)
+ ictx->kc = ictx->last_keycode;
+
+ /* mce-specific button handling */
+ else if (ktype == IMON_KEY_MCE) {
+ /* initial press */
+ if (ictx->kc != ictx->last_keycode
+ || buf[2] != ictx->mce_toggle_bit) {
+ ictx->last_keycode = ictx->kc;
+ ictx->mce_toggle_bit = buf[2];
+ press_type = 1;
+ mod_timer(&ictx->itimer,
+ jiffies + msecs_to_jiffies(rep_delay));
+ /* repeat */
+ } else {
+ press_type = 2;
+ mod_timer(&ictx->itimer,
+ jiffies + msecs_to_jiffies(rep_period));
+ }
+
+ /* incoherent or irrelevant data */
+ } else if (ictx->kc == KEY_RESERVED)
+ press_type = -EINVAL;
+
+ /* key release of 0xXXXXXXb7 key */
+ else if (ictx->release_code)
+ press_type = 0;
+
+ /* this is a button press */
+ else
+ press_type = 1;
+
+ return press_type;
+}
+
+/**
+ * Process the incoming packet
+ */
+static void imon_incoming_packet(struct imon_context *ictx,
+ struct urb *urb, int intf)
+{
+ int len = urb->actual_length;
+ unsigned char *buf = urb->transfer_buffer;
+ struct device *dev = ictx->dev;
+ u32 kc;
+ bool norelease = 0;
+ int i;
+ u64 temp_key;
+ u64 panel_key = 0;
+ u32 remote_key = 0;
+ struct input_dev *idev = NULL;
+ int press_type = 0;
+ int msec;
+ struct timeval t;
+ static struct timeval prev_time = { 0, 0 };
+ u8 ktype = IMON_KEY_IMON;
+
+ idev = ictx->idev;
+
+ /* filter out junk data on the older 0xffdc imon devices */
+ if ((buf[0] == 0xff) && (buf[7] == 0xff))
+ return;
+
+ /* Figure out what key was pressed */
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ if (len == 8 && buf[7] == 0xee) {
+ ktype = IMON_KEY_PANEL;
+ panel_key = le64_to_cpu(temp_key);
+ kc = imon_panel_key_lookup(panel_key);
+ } else {
+ remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
+ if (ictx->ir_type == IR_TYPE_RC6) {
+ if (buf[0] == 0x80)
+ ktype = IMON_KEY_MCE;
+ kc = imon_mce_key_lookup(ictx, remote_key);
+ } else
+ kc = imon_remote_key_lookup(ictx, remote_key);
+ }
+
+ /* keyboard/mouse mode toggle button */
+ if (kc == KEY_KEYBOARD && !ictx->release_code) {
+ ictx->last_keycode = kc;
+ if (!nomouse) {
+ ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
+ dev_dbg(dev, "toggling to %s mode\n",
+ ictx->pad_mouse ? "mouse" : "keyboard");
+ return;
+ } else {
+ ictx->pad_mouse = 0;
+ dev_dbg(dev, "mouse mode disabled, passing key value\n");
+ }
+ }
+
+ ictx->kc = kc;
+
+ /* send touchscreen events through input subsystem if touchpad data */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
+ buf[7] == 0x86) {
+ imon_touch_event(ictx, buf);
+
+ /* look for mouse events with pad in mouse mode */
+ } else if (ictx->pad_mouse) {
+ if (imon_mouse_event(ictx, buf, len))
+ return;
+ }
+
+ /* Now for some special handling to convert pad input to arrow keys */
+ if (((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) ||
+ ((len == 8) && (buf[0] & 0x40) &&
+ !(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) {
+ len = 8;
+ imon_pad_to_keys(ictx, buf);
+ norelease = 1;
+ }
+
+ if (debug) {
+ printk(KERN_INFO "intf%d decoded packet: ", intf);
+ for (i = 0; i < len; ++i)
+ printk("%02x ", buf[i]);
+ printk("\n");
+ }
+
+ press_type = imon_parse_press_type(ictx, buf, ktype);
+ if (press_type < 0)
+ goto not_input_data;
+
+ if (ictx->kc == KEY_UNKNOWN)
+ goto unknown_key;
+
+ /* KEY_MUTE repeats from MCE and knob need to be suppressed */
+ if ((ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode)
+ && (buf[7] == 0xee || ktype == IMON_KEY_MCE)) {
+ do_gettimeofday(&t);
+ msec = tv2int(&t, &prev_time);
+ prev_time = t;
+ if (msec < idev->rep[REP_DELAY])
+ return;
+ }
+
+ input_report_key(idev, ictx->kc, press_type);
+ input_sync(idev);
+
+ /* panel keys and some remote keys don't generate a release */
+ if (panel_key || norelease) {
+ input_report_key(idev, ictx->kc, 0);
+ input_sync(idev);
+ }
+
+ ictx->last_keycode = ictx->kc;
+
+ return;
+
+unknown_key:
+ dev_info(dev, "%s: unknown keypress, code 0x%llx\n", __func__,
+ (panel_key ? be64_to_cpu(panel_key) :
+ be32_to_cpu(remote_key)));
+ return;
+
+not_input_data:
+ if (len != 8) {
+ dev_warn(dev, "imon %s: invalid incoming packet "
+ "size (len = %d, intf%d)\n", __func__, len, intf);
+ return;
+ }
+
+ /* iMON 2.4G associate frame */
+ if (buf[0] == 0x00 &&
+ buf[2] == 0xFF && /* REFID */
+ buf[3] == 0xFF &&
+ buf[4] == 0xFF &&
+ buf[5] == 0xFF && /* iMON 2.4G */
+ ((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */
+ (buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */
+ dev_warn(dev, "%s: remote associated refid=%02X\n",
+ __func__, buf[1]);
+ ictx->rf_isassociating = 0;
+ }
+}
+
+/**
+ * Callback function for USB core API: receive data
+ */
+static void usb_rx_callback_intf0(struct urb *urb)
+{
+ struct imon_context *ictx;
+ int intfnum = 0;
+
+ if (!urb)
+ return;
+
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ switch (urb->status) {
+ case -ENOENT: /* usbcore unlink successful! */
+ return;
+
+ case -ESHUTDOWN: /* transport endpoint was shut down */
+ break;
+
+ case 0:
+ imon_incoming_packet(ictx, urb, intfnum);
+ break;
+
+ default:
+ dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
+ __func__, urb->status);
+ break;
+ }
+
+ usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
+}
+
+static void usb_rx_callback_intf1(struct urb *urb)
+{
+ struct imon_context *ictx;
+ int intfnum = 1;
+
+ if (!urb)
+ return;
+
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ switch (urb->status) {
+ case -ENOENT: /* usbcore unlink successful! */
+ return;
+
+ case -ESHUTDOWN: /* transport endpoint was shut down */
+ break;
+
+ case 0:
+ imon_incoming_packet(ictx, urb, intfnum);
+ break;
+
+ default:
+ dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
+ __func__, urb->status);
+ break;
+ }
+
+ usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
+}
+
+static struct input_dev *imon_init_idev(struct imon_context *ictx)
+{
+ struct input_dev *idev;
+ struct ir_dev_props *props;
+ struct ir_input_dev *ir;
+ int ret, i;
+
+ idev = input_allocate_device();
+ if (!idev) {
+ dev_err(ictx->dev, "remote input dev allocation failed\n");
+ goto idev_alloc_failed;
+ }
+
+ props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
+ if (!props) {
+ dev_err(ictx->dev, "remote ir dev props allocation failed\n");
+ goto props_alloc_failed;
+ }
+
+ ir = kzalloc(sizeof(struct ir_input_dev), GFP_KERNEL);
+ if (!props) {
+ dev_err(ictx->dev, "remote ir input dev allocation failed\n");
+ goto ir_dev_alloc_failed;
+ }
+
+ snprintf(ictx->name_idev, sizeof(ictx->name_idev),
+ "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
+ idev->name = ictx->name_idev;
+
+ usb_make_path(ictx->usbdev_intf0, ictx->phys_idev,
+ sizeof(ictx->phys_idev));
+ strlcat(ictx->phys_idev, "/input0", sizeof(ictx->phys_idev));
+ idev->phys = ictx->phys_idev;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL);
+
+ idev->keybit[BIT_WORD(BTN_MOUSE)] =
+ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
+ idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) |
+ BIT_MASK(REL_WHEEL);
+
+ /* panel and/or knob code support */
+ for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
+ u32 kc = imon_panel_key_table[i].keycode;
+ __set_bit(kc, idev->keybit);
+ }
+
+ props->priv = ictx;
+ props->driver_type = RC_DRIVER_SCANCODE;
+ /* IR_TYPE_OTHER maps to iMON PAD remote, IR_TYPE_RC6 to MCE remote */
+ props->allowed_protos = IR_TYPE_OTHER | IR_TYPE_RC6;
+ props->change_protocol = imon_ir_change_protocol;
+ ictx->props = props;
+
+ ictx->ir = ir;
+ memcpy(&ir->dev, ictx->dev, sizeof(struct device));
+
+ usb_to_input_id(ictx->usbdev_intf0, &idev->id);
+ idev->dev.parent = ictx->dev;
+
+ input_set_drvdata(idev, ir);
+
+ ret = ir_input_register(idev, RC_MAP_IMON_PAD, props, MOD_NAME);
+ if (ret < 0) {
+ dev_err(ictx->dev, "remote input dev register failed\n");
+ goto idev_register_failed;
+ }
+
+ return idev;
+
+idev_register_failed:
+ kfree(ir);
+ir_dev_alloc_failed:
+ kfree(props);
+props_alloc_failed:
+ input_free_device(idev);
+idev_alloc_failed:
+
+ return NULL;
+}
+
+static struct input_dev *imon_init_touch(struct imon_context *ictx)
+{
+ struct input_dev *touch;
+ int ret;
+
+ touch = input_allocate_device();
+ if (!touch) {
+ dev_err(ictx->dev, "touchscreen input dev allocation failed\n");
+ goto touch_alloc_failed;
+ }
+
+ snprintf(ictx->name_touch, sizeof(ictx->name_touch),
+ "iMON USB Touchscreen (%04x:%04x)",
+ ictx->vendor, ictx->product);
+ touch->name = ictx->name_touch;
+
+ usb_make_path(ictx->usbdev_intf1, ictx->phys_touch,
+ sizeof(ictx->phys_touch));
+ strlcat(ictx->phys_touch, "/input1", sizeof(ictx->phys_touch));
+ touch->phys = ictx->phys_touch;
+
+ touch->evbit[0] =
+ BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ touch->keybit[BIT_WORD(BTN_TOUCH)] =
+ BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(touch, ABS_X,
+ 0x00, 0xfff, 0, 0);
+ input_set_abs_params(touch, ABS_Y,
+ 0x00, 0xfff, 0, 0);
+
+ input_set_drvdata(touch, ictx);
+
+ usb_to_input_id(ictx->usbdev_intf1, &touch->id);
+ touch->dev.parent = ictx->dev;
+ ret = input_register_device(touch);
+ if (ret < 0) {
+ dev_info(ictx->dev, "touchscreen input dev register failed\n");
+ goto touch_register_failed;
+ }
+
+ return touch;
+
+touch_register_failed:
+ input_free_device(ictx->touch);
+ mutex_unlock(&ictx->lock);
+
+touch_alloc_failed:
+ return NULL;
+}
+
+static bool imon_find_endpoints(struct imon_context *ictx,
+ struct usb_host_interface *iface_desc)
+{
+ struct usb_endpoint_descriptor *ep;
+ struct usb_endpoint_descriptor *rx_endpoint = NULL;
+ struct usb_endpoint_descriptor *tx_endpoint = NULL;
+ int ifnum = iface_desc->desc.bInterfaceNumber;
+ int num_endpts = iface_desc->desc.bNumEndpoints;
+ int i, ep_dir, ep_type;
+ bool ir_ep_found = 0;
+ bool display_ep_found = 0;
+ bool tx_control = 0;
+
+ /*
+ * Scan the endpoint list and set:
+ * first input endpoint = IR endpoint
+ * first output endpoint = display endpoint
+ */
+ for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
+ ep = &iface_desc->endpoint[i].desc;
+ ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+ ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ if (!ir_ep_found && ep_dir == USB_DIR_IN &&
+ ep_type == USB_ENDPOINT_XFER_INT) {
+
+ rx_endpoint = ep;
+ ir_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__);
+
+ } else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
+ ep_type == USB_ENDPOINT_XFER_INT) {
+ tx_endpoint = ep;
+ display_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__);
+ }
+ }
+
+ if (ifnum == 0) {
+ ictx->rx_endpoint_intf0 = rx_endpoint;
+ /*
+ * tx is used to send characters to lcd/vfd, associate RF
+ * remotes, set IR protocol, and maybe more...
+ */
+ ictx->tx_endpoint = tx_endpoint;
+ } else {
+ ictx->rx_endpoint_intf1 = rx_endpoint;
+ }
+
+ /*
+ * If we didn't find a display endpoint, this is probably one of the
+ * newer iMON devices that use control urb instead of interrupt
+ */
+ if (!display_ep_found) {
+ tx_control = 1;
+ display_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: device uses control endpoint, not "
+ "interface OUT endpoint\n", __func__);
+ }
+
+ /*
+ * Some iMON receivers have no display. Unfortunately, it seems
+ * that SoundGraph recycles device IDs between devices both with
+ * and without... :\
+ */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) {
+ display_ep_found = 0;
+ dev_dbg(ictx->dev, "%s: device has no display\n", __func__);
+ }
+
+ /*
+ * iMON Touch devices have a VGA touchscreen, but no "display", as
+ * that refers to e.g. /dev/lcd0 (a character device LCD or VFD).
+ */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ display_ep_found = 0;
+ dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__);
+ }
+
+ /* Input endpoint is mandatory */
+ if (!ir_ep_found)
+ err("%s: no valid input (IR) endpoint found.", __func__);
+
+ ictx->tx_control = tx_control;
+
+ if (display_ep_found)
+ ictx->display_supported = true;
+
+ return ir_ep_found;
+
+}
+
+static struct imon_context *imon_init_intf0(struct usb_interface *intf)
+{
+ struct imon_context *ictx;
+ struct urb *rx_urb;
+ struct urb *tx_urb;
+ struct device *dev = &intf->dev;
+ struct usb_host_interface *iface_desc;
+ int ret = -ENOMEM;
+
+ ictx = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
+ if (!ictx) {
+ dev_err(dev, "%s: kzalloc failed for context", __func__);
+ goto exit;
+ }
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rx_urb) {
+ dev_err(dev, "%s: usb_alloc_urb failed for IR urb", __func__);
+ goto rx_urb_alloc_failed;
+ }
+ tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tx_urb) {
+ dev_err(dev, "%s: usb_alloc_urb failed for display urb",
+ __func__);
+ goto tx_urb_alloc_failed;
+ }
+
+ mutex_init(&ictx->lock);
+
+ mutex_lock(&ictx->lock);
+
+ ictx->dev = dev;
+ ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf));
+ ictx->dev_present_intf0 = 1;
+ ictx->rx_urb_intf0 = rx_urb;
+ ictx->tx_urb = tx_urb;
+
+ ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
+ ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
+
+ ret = -ENODEV;
+ iface_desc = intf->cur_altsetting;
+ if (!imon_find_endpoints(ictx, iface_desc)) {
+ goto find_endpoint_failed;
+ }
+
+ ictx->idev = imon_init_idev(ictx);
+ if (!ictx->idev) {
+ dev_err(dev, "%s: input device setup failed\n", __func__);
+ goto idev_setup_failed;
+ }
+
+ usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
+ usb_rcvintpipe(ictx->usbdev_intf0,
+ ictx->rx_endpoint_intf0->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf0, ictx,
+ ictx->rx_endpoint_intf0->bInterval);
+
+ ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL);
+ if (ret) {
+ err("%s: usb_submit_urb failed for intf0 (%d)",
+ __func__, ret);
+ goto urb_submit_failed;
+ }
+
+ return ictx;
+
+urb_submit_failed:
+ input_unregister_device(ictx->idev);
+ input_free_device(ictx->idev);
+idev_setup_failed:
+find_endpoint_failed:
+ mutex_unlock(&ictx->lock);
+ usb_free_urb(tx_urb);
+tx_urb_alloc_failed:
+ usb_free_urb(rx_urb);
+rx_urb_alloc_failed:
+ kfree(ictx);
+exit:
+ dev_err(dev, "unable to initialize intf0, err %d\n", ret);
+
+ return NULL;
+}
+
+static struct imon_context *imon_init_intf1(struct usb_interface *intf,
+ struct imon_context *ictx)
+{
+ struct urb *rx_urb;
+ struct usb_host_interface *iface_desc;
+ int ret = -ENOMEM;
+
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rx_urb) {
+ err("%s: usb_alloc_urb failed for IR urb", __func__);
+ goto rx_urb_alloc_failed;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ init_timer(&ictx->ttimer);
+ ictx->ttimer.data = (unsigned long)ictx;
+ ictx->ttimer.function = imon_touch_display_timeout;
+ }
+
+ ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
+ ictx->dev_present_intf1 = 1;
+ ictx->rx_urb_intf1 = rx_urb;
+
+ ret = -ENODEV;
+ iface_desc = intf->cur_altsetting;
+ if (!imon_find_endpoints(ictx, iface_desc))
+ goto find_endpoint_failed;
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ ictx->touch = imon_init_touch(ictx);
+ if (!ictx->touch)
+ goto touch_setup_failed;
+ } else
+ ictx->touch = NULL;
+
+ usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1,
+ usb_rcvintpipe(ictx->usbdev_intf1,
+ ictx->rx_endpoint_intf1->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf1, ictx,
+ ictx->rx_endpoint_intf1->bInterval);
+
+ ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL);
+
+ if (ret) {
+ err("%s: usb_submit_urb failed for intf1 (%d)",
+ __func__, ret);
+ goto urb_submit_failed;
+ }
+
+ return ictx;
+
+urb_submit_failed:
+ if (ictx->touch) {
+ input_unregister_device(ictx->touch);
+ input_free_device(ictx->touch);
+ }
+touch_setup_failed:
+find_endpoint_failed:
+ mutex_unlock(&ictx->lock);
+ usb_free_urb(rx_urb);
+rx_urb_alloc_failed:
+ dev_err(ictx->dev, "unable to initialize intf0, err %d\n", ret);
+
+ return NULL;
+}
+
+/*
+ * The 0x15c2:0xffdc device ID was used for umpteen different imon
+ * devices, and all of them constantly spew interrupts, even when there
+ * is no actual data to report. However, byte 6 of this buffer looks like
+ * its unique across device variants, so we're trying to key off that to
+ * figure out which display type (if any) and what IR protocol the device
+ * actually supports. These devices have their IR protocol hard-coded into
+ * their firmware, they can't be changed on the fly like the newer hardware.
+ */
+static void imon_get_ffdc_type(struct imon_context *ictx)
+{
+ u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
+ u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
+ u64 allowed_protos = IR_TYPE_OTHER;
+
+ switch (ffdc_cfg_byte) {
+ /* iMON Knob, no display, iMON IR + vol knob */
+ case 0x21:
+ dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
+ ictx->display_supported = false;
+ break;
+ /* iMON VFD, no IR (does have vol knob tho) */
+ case 0x35:
+ dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON VFD, iMON IR */
+ case 0x24:
+ case 0x85:
+ dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON LCD, MCE IR */
+ case 0x9f:
+ dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = IR_TYPE_RC6;
+ break;
+ default:
+ dev_info(ictx->dev, "Unknown 0xffdc device, "
+ "defaulting to VFD and iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+
+ printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
+
+ ictx->display_type = detected_display_type;
+ ictx->props->allowed_protos = allowed_protos;
+ ictx->ir_type = allowed_protos;
+}
+
+static void imon_set_display_type(struct imon_context *ictx,
+ struct usb_interface *intf)
+{
+ u8 configured_display_type = IMON_DISPLAY_TYPE_VFD;
+
+ /*
+ * Try to auto-detect the type of display if the user hasn't set
+ * it by hand via the display_type modparam. Default is VFD.
+ */
+
+ if (display_type == IMON_DISPLAY_TYPE_AUTO) {
+ switch (ictx->product) {
+ case 0xffdc:
+ /* set in imon_get_ffdc_type() */
+ configured_display_type = ictx->display_type;
+ break;
+ case 0x0034:
+ case 0x0035:
+ configured_display_type = IMON_DISPLAY_TYPE_VGA;
+ break;
+ case 0x0038:
+ case 0x0039:
+ case 0x0045:
+ configured_display_type = IMON_DISPLAY_TYPE_LCD;
+ break;
+ case 0x003c:
+ case 0x0041:
+ case 0x0042:
+ case 0x0043:
+ configured_display_type = IMON_DISPLAY_TYPE_NONE;
+ ictx->display_supported = false;
+ break;
+ case 0x0036:
+ case 0x0044:
+ default:
+ configured_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+ } else {
+ configured_display_type = display_type;
+ if (display_type == IMON_DISPLAY_TYPE_NONE)
+ ictx->display_supported = false;
+ else
+ ictx->display_supported = true;
+ dev_info(ictx->dev, "%s: overriding display type to %d via "
+ "modparam\n", __func__, display_type);
+ }
+
+ ictx->display_type = configured_display_type;
+}
+
+static void imon_init_display(struct imon_context *ictx,
+ struct usb_interface *intf)
+{
+ int ret;
+
+ dev_dbg(ictx->dev, "Registering iMON display with sysfs\n");
+
+ /* set up sysfs entry for built-in clock */
+ ret = sysfs_create_group(&intf->dev.kobj,
+ &imon_display_attribute_group);
+ if (ret)
+ dev_err(ictx->dev, "Could not create display sysfs "
+ "entries(%d)", ret);
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
+ ret = usb_register_dev(intf, &imon_lcd_class);
+ else
+ ret = usb_register_dev(intf, &imon_vfd_class);
+ if (ret)
+ /* Not a fatal error, so ignore */
+ dev_info(ictx->dev, "could not get a minor number for "
+ "display\n");
+
+}
+
+/**
+ * Callback function for USB core API: Probe
+ */
+static int __devinit imon_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev = NULL;
+ struct usb_host_interface *iface_desc = NULL;
+ struct usb_interface *first_if;
+ struct device *dev = &interface->dev;
+ int ifnum, code_length, sysfs_err;
+ int ret = 0;
+ struct imon_context *ictx = NULL;
+ struct imon_context *first_if_ctx = NULL;
+ u16 vendor, product;
+ const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x88 };
+
+ code_length = BUF_CHUNK_SIZE * 8;
+
+ usbdev = usb_get_dev(interface_to_usbdev(interface));
+ iface_desc = interface->cur_altsetting;
+ ifnum = iface_desc->desc.bInterfaceNumber;
+ vendor = le16_to_cpu(usbdev->descriptor.idVendor);
+ product = le16_to_cpu(usbdev->descriptor.idProduct);
+
+ dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
+ __func__, vendor, product, ifnum);
+
+ /* prevent races probing devices w/multiple interfaces */
+ mutex_lock(&driver_lock);
+
+ first_if = usb_ifnum_to_if(usbdev, 0);
+ first_if_ctx = (struct imon_context *)usb_get_intfdata(first_if);
+
+ if (ifnum == 0) {
+ ictx = imon_init_intf0(interface);
+ if (!ictx) {
+ err("%s: failed to initialize context!\n", __func__);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (product == 0xffdc) {
+ /* RF products *also* use 0xffdc... sigh... */
+ sysfs_err = sysfs_create_group(&interface->dev.kobj,
+ &imon_rf_attribute_group);
+ if (sysfs_err)
+ err("%s: Could not create RF sysfs entries(%d)",
+ __func__, sysfs_err);
+ }
+
+ } else {
+ /* this is the secondary interface on the device */
+ ictx = imon_init_intf1(interface, first_if_ctx);
+ if (!ictx) {
+ err("%s: failed to attach to context!\n", __func__);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ }
+
+ usb_set_intfdata(interface, ictx);
+
+ if (ifnum == 0) {
+ /* Enable front-panel buttons and/or knobs */
+ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
+ ret = send_packet(ictx);
+ /* Not fatal, but warn about it */
+ if (ret)
+ dev_info(dev, "failed to enable panel buttons "
+ "and/or knobs\n");
+
+ if (product == 0xffdc)
+ imon_get_ffdc_type(ictx);
+
+ imon_set_display_type(ictx, interface);
+
+ if (ictx->display_supported)
+ imon_init_display(ictx, interface);
+ }
+
+ /* set IR protocol/remote type */
+ ret = imon_ir_change_protocol(ictx, ictx->ir_type);
+ if (ret) {
+ dev_warn(dev, "%s: failed to set IR protocol, falling back "
+ "to standard iMON protocol mode\n", __func__);
+ ictx->ir_type = IR_TYPE_OTHER;
+ }
+
+ dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
+ "usb<%d:%d> initialized\n", vendor, product, ifnum,
+ usbdev->bus->busnum, usbdev->devnum);
+
+ mutex_unlock(&ictx->lock);
+ mutex_unlock(&driver_lock);
+
+ return 0;
+
+fail:
+ mutex_unlock(&driver_lock);
+ dev_err(dev, "unable to register, err %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * Callback function for USB core API: disconnect
+ */
+static void __devexit imon_disconnect(struct usb_interface *interface)
+{
+ struct imon_context *ictx;
+ struct device *dev;
+ int ifnum;
+
+ /* prevent races with multi-interface device probing and display_open */
+ mutex_lock(&driver_lock);
+
+ ictx = usb_get_intfdata(interface);
+ dev = ictx->dev;
+ ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
+
+ mutex_lock(&ictx->lock);
+
+ /*
+ * sysfs_remove_group is safe to call even if sysfs_create_group
+ * hasn't been called
+ */
+ sysfs_remove_group(&interface->dev.kobj,
+ &imon_display_attribute_group);
+ sysfs_remove_group(&interface->dev.kobj,
+ &imon_rf_attribute_group);
+
+ usb_set_intfdata(interface, NULL);
+
+ /* Abort ongoing write */
+ if (ictx->tx.busy) {
+ usb_kill_urb(ictx->tx_urb);
+ complete_all(&ictx->tx.finished);
+ }
+
+ if (ifnum == 0) {
+ ictx->dev_present_intf0 = 0;
+ usb_kill_urb(ictx->rx_urb_intf0);
+ input_unregister_device(ictx->idev);
+ if (ictx->display_supported) {
+ if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
+ usb_deregister_dev(interface, &imon_lcd_class);
+ else
+ usb_deregister_dev(interface, &imon_vfd_class);
+ }
+ } else {
+ ictx->dev_present_intf1 = 0;
+ usb_kill_urb(ictx->rx_urb_intf1);
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA)
+ input_unregister_device(ictx->touch);
+ }
+
+ if (!ictx->dev_present_intf0 && !ictx->dev_present_intf1) {
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA)
+ del_timer_sync(&ictx->ttimer);
+ mutex_unlock(&ictx->lock);
+ if (!ictx->display_isopen)
+ free_imon_context(ictx);
+ } else {
+ if (ictx->ir_type == IR_TYPE_RC6)
+ del_timer_sync(&ictx->itimer);
+ mutex_unlock(&ictx->lock);
+ }
+
+ mutex_unlock(&driver_lock);
+
+ dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n",
+ __func__, ifnum);
+}
+
+static int imon_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct imon_context *ictx = usb_get_intfdata(intf);
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (ifnum == 0)
+ usb_kill_urb(ictx->rx_urb_intf0);
+ else
+ usb_kill_urb(ictx->rx_urb_intf1);
+
+ return 0;
+}
+
+static int imon_resume(struct usb_interface *intf)
+{
+ int rc = 0;
+ struct imon_context *ictx = usb_get_intfdata(intf);
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (ifnum == 0) {
+ usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
+ usb_rcvintpipe(ictx->usbdev_intf0,
+ ictx->rx_endpoint_intf0->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf0, ictx,
+ ictx->rx_endpoint_intf0->bInterval);
+
+ rc = usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
+
+ } else {
+ usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1,
+ usb_rcvintpipe(ictx->usbdev_intf1,
+ ictx->rx_endpoint_intf1->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf1, ictx,
+ ictx->rx_endpoint_intf1->bInterval);
+
+ rc = usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
+ }
+
+ return rc;
+}
+
+static int __init imon_init(void)
+{
+ int rc;
+
+ rc = usb_register(&imon_driver);
+ if (rc) {
+ err("%s: usb register failed(%d)", __func__, rc);
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+static void __exit imon_exit(void)
+{
+ usb_deregister(&imon_driver);
+}
+
+module_init(imon_init);
+module_exit(imon_exit);
diff --git a/drivers/media/IR/ir-core-priv.h b/drivers/media/IR/ir-core-priv.h
new file mode 100644
index 000000000000..9a5e65a471a5
--- /dev/null
+++ b/drivers/media/IR/ir-core-priv.h
@@ -0,0 +1,126 @@
+/*
+ * Remote Controller core raw events header
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IR_RAW_EVENT
+#define _IR_RAW_EVENT
+
+#include <linux/slab.h>
+#include <media/ir-core.h>
+
+struct ir_raw_handler {
+ struct list_head list;
+
+ int (*decode)(struct input_dev *input_dev, struct ir_raw_event event);
+ int (*raw_register)(struct input_dev *input_dev);
+ int (*raw_unregister)(struct input_dev *input_dev);
+};
+
+struct ir_raw_event_ctrl {
+ struct work_struct rx_work; /* for the rx decoding workqueue */
+ struct kfifo kfifo; /* fifo for the pulse/space durations */
+ ktime_t last_event; /* when last event occurred */
+ enum raw_event_type last_type; /* last event type */
+ struct input_dev *input_dev; /* pointer to the parent input_dev */
+};
+
+/* macros for IR decoders */
+static inline bool geq_margin(unsigned d1, unsigned d2, unsigned margin)
+{
+ return d1 > (d2 - margin);
+}
+
+static inline bool eq_margin(unsigned d1, unsigned d2, unsigned margin)
+{
+ return ((d1 > (d2 - margin)) && (d1 < (d2 + margin)));
+}
+
+static inline bool is_transition(struct ir_raw_event *x, struct ir_raw_event *y)
+{
+ return x->pulse != y->pulse;
+}
+
+static inline void decrease_duration(struct ir_raw_event *ev, unsigned duration)
+{
+ if (duration > ev->duration)
+ ev->duration = 0;
+ else
+ ev->duration -= duration;
+}
+
+#define TO_US(duration) (((duration) + 500) / 1000)
+#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
+#define IS_RESET(ev) (ev.duration == 0)
+
+/*
+ * Routines from ir-sysfs.c - Meant to be called only internally inside
+ * ir-core
+ */
+
+int ir_register_class(struct input_dev *input_dev);
+void ir_unregister_class(struct input_dev *input_dev);
+
+/*
+ * Routines from ir-raw-event.c to be used internally and by decoders
+ */
+int ir_raw_event_register(struct input_dev *input_dev);
+void ir_raw_event_unregister(struct input_dev *input_dev);
+int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
+void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler);
+void ir_raw_init(void);
+
+
+/*
+ * Decoder initialization code
+ *
+ * Those load logic are called during ir-core init, and automatically
+ * loads the compiled decoders for their usage with IR raw events
+ */
+
+/* from ir-nec-decoder.c */
+#ifdef CONFIG_IR_NEC_DECODER_MODULE
+#define load_nec_decode() request_module("ir-nec-decoder")
+#else
+#define load_nec_decode() 0
+#endif
+
+/* from ir-rc5-decoder.c */
+#ifdef CONFIG_IR_RC5_DECODER_MODULE
+#define load_rc5_decode() request_module("ir-rc5-decoder")
+#else
+#define load_rc5_decode() 0
+#endif
+
+/* from ir-rc6-decoder.c */
+#ifdef CONFIG_IR_RC6_DECODER_MODULE
+#define load_rc6_decode() request_module("ir-rc6-decoder")
+#else
+#define load_rc6_decode() 0
+#endif
+
+/* from ir-jvc-decoder.c */
+#ifdef CONFIG_IR_JVC_DECODER_MODULE
+#define load_jvc_decode() request_module("ir-jvc-decoder")
+#else
+#define load_jvc_decode() 0
+#endif
+
+/* from ir-sony-decoder.c */
+#ifdef CONFIG_IR_SONY_DECODER_MODULE
+#define load_sony_decode() request_module("ir-sony-decoder")
+#else
+#define load_sony_decode() 0
+#endif
+
+#endif /* _IR_RAW_EVENT */
diff --git a/drivers/media/IR/ir-functions.c b/drivers/media/IR/ir-functions.c
index ab06919ad5fc..db591e421887 100644
--- a/drivers/media/IR/ir-functions.c
+++ b/drivers/media/IR/ir-functions.c
@@ -24,6 +24,7 @@
#include <linux/string.h>
#include <linux/jiffies.h>
#include <media/ir-common.h>
+#include "ir-core-priv.h"
/* -------------------------------------------------------------------------- */
diff --git a/drivers/media/IR/ir-jvc-decoder.c b/drivers/media/IR/ir-jvc-decoder.c
new file mode 100644
index 000000000000..0b804944cbb0
--- /dev/null
+++ b/drivers/media/IR/ir-jvc-decoder.c
@@ -0,0 +1,320 @@
+/* ir-jvc-decoder.c - handle JVC IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define JVC_NBITS 16 /* dev(8) + func(8) */
+#define JVC_UNIT 525000 /* ns */
+#define JVC_HEADER_PULSE (16 * JVC_UNIT) /* lack of header -> repeat */
+#define JVC_HEADER_SPACE (8 * JVC_UNIT)
+#define JVC_BIT_PULSE (1 * JVC_UNIT)
+#define JVC_BIT_0_SPACE (1 * JVC_UNIT)
+#define JVC_BIT_1_SPACE (3 * JVC_UNIT)
+#define JVC_TRAILER_PULSE (1 * JVC_UNIT)
+#define JVC_TRAILER_SPACE (35 * JVC_UNIT)
+
+/* Used to register jvc_decoder clients */
+static LIST_HEAD(decoder_list);
+DEFINE_SPINLOCK(decoder_lock);
+
+enum jvc_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_TRAILER_PULSE,
+ STATE_TRAILER_SPACE,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum jvc_state state;
+ u16 jvc_bits;
+ u16 jvc_old_bits;
+ unsigned count;
+ bool first;
+ bool toggle;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "jvc_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_jvc_decode() - Decode one JVC pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @duration: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_jvc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, JVC_UNIT, JVC_UNIT / 2))
+ goto out;
+
+ IR_dprintk(2, "JVC decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_HEADER_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->first = true;
+ data->toggle = !data->toggle;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_HEADER_SPACE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_BIT_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ data->jvc_bits <<= 1;
+ if (eq_margin(ev.duration, JVC_BIT_1_SPACE, JVC_UNIT / 2)) {
+ data->jvc_bits |= 1;
+ decrease_duration(&ev, JVC_BIT_1_SPACE);
+ } else if (eq_margin(ev.duration, JVC_BIT_0_SPACE, JVC_UNIT / 2))
+ decrease_duration(&ev, JVC_BIT_0_SPACE);
+ else
+ break;
+ data->count++;
+
+ if (data->count == JVC_NBITS)
+ data->state = STATE_TRAILER_PULSE;
+ else
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_TRAILER_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_TRAILER_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_TRAILER_SPACE;
+ return 0;
+
+ case STATE_TRAILER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, JVC_TRAILER_SPACE, JVC_UNIT / 2))
+ break;
+
+ if (data->first) {
+ u32 scancode;
+ scancode = (bitrev8((data->jvc_bits >> 8) & 0xff) << 8) |
+ (bitrev8((data->jvc_bits >> 0) & 0xff) << 0);
+ IR_dprintk(1, "JVC scancode 0x%04x\n", scancode);
+ ir_keydown(input_dev, scancode, data->toggle);
+ data->first = false;
+ data->jvc_old_bits = data->jvc_bits;
+ } else if (data->jvc_bits == data->jvc_old_bits) {
+ IR_dprintk(1, "JVC repeat\n");
+ ir_repeat(input_dev);
+ } else {
+ IR_dprintk(1, "JVC invalid repeat msg\n");
+ break;
+ }
+
+ data->count = 0;
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "JVC decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_jvc_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_jvc_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler jvc_handler = {
+ .decode = ir_jvc_decode,
+ .raw_register = ir_jvc_register,
+ .raw_unregister = ir_jvc_unregister,
+};
+
+static int __init ir_jvc_decode_init(void)
+{
+ ir_raw_handler_register(&jvc_handler);
+
+ printk(KERN_INFO "IR JVC protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_jvc_decode_exit(void)
+{
+ ir_raw_handler_unregister(&jvc_handler);
+}
+
+module_init(ir_jvc_decode_init);
+module_exit(ir_jvc_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("JVC IR protocol decoder");
diff --git a/drivers/media/IR/ir-keymaps.c b/drivers/media/IR/ir-keymaps.c
deleted file mode 100644
index 0efdefe75f32..000000000000
--- a/drivers/media/IR/ir-keymaps.c
+++ /dev/null
@@ -1,3494 +0,0 @@
-/*
- Keytables for supported remote controls, used on drivers/media
- devices.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-/*
- * NOTICE FOR DEVELOPERS:
- * The IR mappings should be as close as possible to what's
- * specified at:
- * http://linuxtv.org/wiki/index.php/Remote_Controllers
- */
-#include <linux/module.h>
-
-#include <linux/input.h>
-#include <media/ir-common.h>
-
-/* empty keytable, can be used as placeholder for not-yet created keytables */
-static struct ir_scancode ir_codes_empty[] = {
- { 0x2a, KEY_COFFEE },
-};
-
-struct ir_scancode_table ir_codes_empty_table = {
- .scan = ir_codes_empty,
- .size = ARRAY_SIZE(ir_codes_empty),
-};
-EXPORT_SYMBOL_GPL(ir_codes_empty_table);
-
-/* Michal Majchrowicz <mmajchrowicz@gmail.com> */
-static struct ir_scancode ir_codes_proteus_2309[] = {
- /* numeric */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x5c, KEY_POWER }, /* power */
- { 0x20, KEY_ZOOM }, /* full screen */
- { 0x0f, KEY_BACKSPACE }, /* recall */
- { 0x1b, KEY_ENTER }, /* mute */
- { 0x41, KEY_RECORD }, /* record */
- { 0x43, KEY_STOP }, /* stop */
- { 0x16, KEY_S },
- { 0x1a, KEY_POWER2 }, /* off */
- { 0x2e, KEY_RED },
- { 0x1f, KEY_CHANNELDOWN }, /* channel - */
- { 0x1c, KEY_CHANNELUP }, /* channel + */
- { 0x10, KEY_VOLUMEDOWN }, /* volume - */
- { 0x1e, KEY_VOLUMEUP }, /* volume + */
- { 0x14, KEY_F1 },
-};
-
-struct ir_scancode_table ir_codes_proteus_2309_table = {
- .scan = ir_codes_proteus_2309,
- .size = ARRAY_SIZE(ir_codes_proteus_2309),
-};
-EXPORT_SYMBOL_GPL(ir_codes_proteus_2309_table);
-
-/* Matt Jesson <dvb@jesson.eclipse.co.uk */
-static struct ir_scancode ir_codes_avermedia_dvbt[] = {
- { 0x28, KEY_0 }, /* '0' / 'enter' */
- { 0x22, KEY_1 }, /* '1' */
- { 0x12, KEY_2 }, /* '2' / 'up arrow' */
- { 0x32, KEY_3 }, /* '3' */
- { 0x24, KEY_4 }, /* '4' / 'left arrow' */
- { 0x14, KEY_5 }, /* '5' */
- { 0x34, KEY_6 }, /* '6' / 'right arrow' */
- { 0x26, KEY_7 }, /* '7' */
- { 0x16, KEY_8 }, /* '8' / 'down arrow' */
- { 0x36, KEY_9 }, /* '9' */
-
- { 0x20, KEY_LIST }, /* 'source' */
- { 0x10, KEY_TEXT }, /* 'teletext' */
- { 0x00, KEY_POWER }, /* 'power' */
- { 0x04, KEY_AUDIO }, /* 'audio' */
- { 0x06, KEY_ZOOM }, /* 'full screen' */
- { 0x18, KEY_VIDEO }, /* 'display' */
- { 0x38, KEY_SEARCH }, /* 'loop' */
- { 0x08, KEY_INFO }, /* 'preview' */
- { 0x2a, KEY_REWIND }, /* 'backward <<' */
- { 0x1a, KEY_FASTFORWARD }, /* 'forward >>' */
- { 0x3a, KEY_RECORD }, /* 'capture' */
- { 0x0a, KEY_MUTE }, /* 'mute' */
- { 0x2c, KEY_RECORD }, /* 'record' */
- { 0x1c, KEY_PAUSE }, /* 'pause' */
- { 0x3c, KEY_STOP }, /* 'stop' */
- { 0x0c, KEY_PLAY }, /* 'play' */
- { 0x2e, KEY_RED }, /* 'red' */
- { 0x01, KEY_BLUE }, /* 'blue' / 'cancel' */
- { 0x0e, KEY_YELLOW }, /* 'yellow' / 'ok' */
- { 0x21, KEY_GREEN }, /* 'green' */
- { 0x11, KEY_CHANNELDOWN }, /* 'channel -' */
- { 0x31, KEY_CHANNELUP }, /* 'channel +' */
- { 0x1e, KEY_VOLUMEDOWN }, /* 'volume -' */
- { 0x3e, KEY_VOLUMEUP }, /* 'volume +' */
-};
-
-struct ir_scancode_table ir_codes_avermedia_dvbt_table = {
- .scan = ir_codes_avermedia_dvbt,
- .size = ARRAY_SIZE(ir_codes_avermedia_dvbt),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_dvbt_table);
-
-/* Mauro Carvalho Chehab <mchehab@infradead.org> */
-static struct ir_scancode ir_codes_avermedia_m135a[] = {
- { 0x00, KEY_POWER2 },
- { 0x2e, KEY_DOT }, /* '.' */
- { 0x01, KEY_MODE }, /* TV/FM */
-
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x11, KEY_0 },
-
- { 0x13, KEY_RIGHT }, /* -> */
- { 0x12, KEY_LEFT }, /* <- */
-
- { 0x17, KEY_SLEEP }, /* Capturar Imagem */
- { 0x10, KEY_SHUFFLE }, /* Amostra */
-
- /* FIXME: The keys bellow aren't ok */
-
- { 0x43, KEY_CHANNELUP },
- { 0x42, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x0c, KEY_ENTER },
-
- { 0x14, KEY_MUTE },
- { 0x08, KEY_AUDIO },
-
- { 0x03, KEY_TEXT },
- { 0x04, KEY_EPG },
- { 0x2b, KEY_TV2 }, /* TV2 */
-
- { 0x1d, KEY_RED },
- { 0x1c, KEY_YELLOW },
- { 0x41, KEY_GREEN },
- { 0x40, KEY_BLUE },
-
- { 0x1a, KEY_PLAYPAUSE },
- { 0x19, KEY_RECORD },
- { 0x18, KEY_PLAY },
- { 0x1b, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_avermedia_m135a_table = {
- .scan = ir_codes_avermedia_m135a,
- .size = ARRAY_SIZE(ir_codes_avermedia_m135a),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_m135a_table);
-
-/* Oldrich Jedlicka <oldium.pro@seznam.cz> */
-static struct ir_scancode ir_codes_avermedia_cardbus[] = {
- { 0x00, KEY_POWER },
- { 0x01, KEY_TUNER }, /* TV/FM */
- { 0x03, KEY_TEXT }, /* Teletext */
- { 0x04, KEY_EPG },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x08, KEY_AUDIO },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0c, KEY_ZOOM }, /* Full screen */
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x10, KEY_PAGEUP }, /* 16-CH PREV */
- { 0x11, KEY_0 },
- { 0x12, KEY_INFO },
- { 0x13, KEY_AGAIN }, /* CH RTN - channel return */
- { 0x14, KEY_MUTE },
- { 0x15, KEY_EDIT }, /* Autoscan */
- { 0x17, KEY_SAVE }, /* Screenshot */
- { 0x18, KEY_PLAYPAUSE },
- { 0x19, KEY_RECORD },
- { 0x1a, KEY_PLAY },
- { 0x1b, KEY_STOP },
- { 0x1c, KEY_FASTFORWARD },
- { 0x1d, KEY_REWIND },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_VOLUMEUP },
- { 0x22, KEY_SLEEP }, /* Sleep */
- { 0x23, KEY_ZOOM }, /* Aspect */
- { 0x26, KEY_SCREEN }, /* Pos */
- { 0x27, KEY_ANGLE }, /* Size */
- { 0x28, KEY_SELECT }, /* Select */
- { 0x29, KEY_BLUE }, /* Blue/Picture */
- { 0x2a, KEY_BACKSPACE }, /* Back */
- { 0x2b, KEY_MEDIA }, /* PIP (Picture-in-picture) */
- { 0x2c, KEY_DOWN },
- { 0x2e, KEY_DOT },
- { 0x2f, KEY_TV }, /* Live TV */
- { 0x32, KEY_LEFT },
- { 0x33, KEY_CLEAR }, /* Clear */
- { 0x35, KEY_RED }, /* Red/TV */
- { 0x36, KEY_UP },
- { 0x37, KEY_HOME }, /* Home */
- { 0x39, KEY_GREEN }, /* Green/Video */
- { 0x3d, KEY_YELLOW }, /* Yellow/Music */
- { 0x3e, KEY_OK }, /* Ok */
- { 0x3f, KEY_RIGHT },
- { 0x40, KEY_NEXT }, /* Next */
- { 0x41, KEY_PREVIOUS }, /* Previous */
- { 0x42, KEY_CHANNELDOWN }, /* Channel down */
- { 0x43, KEY_CHANNELUP }, /* Channel up */
-};
-
-struct ir_scancode_table ir_codes_avermedia_cardbus_table = {
- .scan = ir_codes_avermedia_cardbus,
- .size = ARRAY_SIZE(ir_codes_avermedia_cardbus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_cardbus_table);
-
-/* Attila Kondoros <attila.kondoros@chello.hu> */
-static struct ir_scancode ir_codes_apac_viewcomp[] = {
-
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x00, KEY_0 },
- { 0x17, KEY_LAST }, /* +100 */
- { 0x0a, KEY_LIST }, /* recall */
-
-
- { 0x1c, KEY_TUNER }, /* TV/FM */
- { 0x15, KEY_SEARCH }, /* scan */
- { 0x12, KEY_POWER }, /* power */
- { 0x1f, KEY_VOLUMEDOWN }, /* vol up */
- { 0x1b, KEY_VOLUMEUP }, /* vol down */
- { 0x1e, KEY_CHANNELDOWN }, /* chn up */
- { 0x1a, KEY_CHANNELUP }, /* chn down */
-
- { 0x11, KEY_VIDEO }, /* video */
- { 0x0f, KEY_ZOOM }, /* full screen */
- { 0x13, KEY_MUTE }, /* mute/unmute */
- { 0x10, KEY_TEXT }, /* min */
-
- { 0x0d, KEY_STOP }, /* freeze */
- { 0x0e, KEY_RECORD }, /* record */
- { 0x1d, KEY_PLAYPAUSE }, /* stop */
- { 0x19, KEY_PLAY }, /* play */
-
- { 0x16, KEY_GOTO }, /* osd */
- { 0x14, KEY_REFRESH }, /* default */
- { 0x0c, KEY_KPPLUS }, /* fine tune >>>> */
- { 0x18, KEY_KPMINUS }, /* fine tune <<<< */
-};
-
-struct ir_scancode_table ir_codes_apac_viewcomp_table = {
- .scan = ir_codes_apac_viewcomp,
- .size = ARRAY_SIZE(ir_codes_apac_viewcomp),
-};
-EXPORT_SYMBOL_GPL(ir_codes_apac_viewcomp_table);
-
-/* ---------------------------------------------------------------------- */
-
-static struct ir_scancode ir_codes_pixelview[] = {
-
- { 0x1e, KEY_POWER }, /* power */
- { 0x07, KEY_MEDIA }, /* source */
- { 0x1c, KEY_SEARCH }, /* scan */
-
-
- { 0x03, KEY_TUNER }, /* TV/FM */
-
- { 0x00, KEY_RECORD },
- { 0x08, KEY_STOP },
- { 0x11, KEY_PLAY },
-
- { 0x1a, KEY_PLAYPAUSE }, /* freeze */
- { 0x19, KEY_ZOOM }, /* zoom */
- { 0x0f, KEY_TEXT }, /* min */
-
- { 0x01, KEY_1 },
- { 0x0b, KEY_2 },
- { 0x1b, KEY_3 },
- { 0x05, KEY_4 },
- { 0x09, KEY_5 },
- { 0x15, KEY_6 },
- { 0x06, KEY_7 },
- { 0x0a, KEY_8 },
- { 0x12, KEY_9 },
- { 0x02, KEY_0 },
- { 0x10, KEY_LAST }, /* +100 */
- { 0x13, KEY_LIST }, /* recall */
-
- { 0x1f, KEY_CHANNELUP }, /* chn down */
- { 0x17, KEY_CHANNELDOWN }, /* chn up */
- { 0x16, KEY_VOLUMEUP }, /* vol down */
- { 0x14, KEY_VOLUMEDOWN }, /* vol up */
-
- { 0x04, KEY_KPMINUS }, /* <<< */
- { 0x0e, KEY_SETUP }, /* function */
- { 0x0c, KEY_KPPLUS }, /* >>> */
-
- { 0x0d, KEY_GOTO }, /* mts */
- { 0x1d, KEY_REFRESH }, /* reset */
- { 0x18, KEY_MUTE }, /* mute/unmute */
-};
-
-struct ir_scancode_table ir_codes_pixelview_table = {
- .scan = ir_codes_pixelview,
- .size = ARRAY_SIZE(ir_codes_pixelview),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pixelview_table);
-
-/*
- Mauro Carvalho Chehab <mchehab@infradead.org>
- present on PV MPEG 8000GT
- */
-static struct ir_scancode ir_codes_pixelview_new[] = {
- { 0x3c, KEY_TIME }, /* Timeshift */
- { 0x12, KEY_POWER },
-
- { 0x3d, KEY_1 },
- { 0x38, KEY_2 },
- { 0x18, KEY_3 },
- { 0x35, KEY_4 },
- { 0x39, KEY_5 },
- { 0x15, KEY_6 },
- { 0x36, KEY_7 },
- { 0x3a, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x3e, KEY_0 },
-
- { 0x1c, KEY_AGAIN }, /* LOOP */
- { 0x3f, KEY_MEDIA }, /* Source */
- { 0x1f, KEY_LAST }, /* +100 */
- { 0x1b, KEY_MUTE },
-
- { 0x17, KEY_CHANNELDOWN },
- { 0x16, KEY_CHANNELUP },
- { 0x10, KEY_VOLUMEUP },
- { 0x14, KEY_VOLUMEDOWN },
- { 0x13, KEY_ZOOM },
-
- { 0x19, KEY_CAMERA }, /* SNAPSHOT */
- { 0x1a, KEY_SEARCH }, /* scan */
-
- { 0x37, KEY_REWIND }, /* << */
- { 0x32, KEY_RECORD }, /* o (red) */
- { 0x33, KEY_FORWARD }, /* >> */
- { 0x11, KEY_STOP }, /* square */
- { 0x3b, KEY_PLAY }, /* > */
- { 0x30, KEY_PLAYPAUSE }, /* || */
-
- { 0x31, KEY_TV },
- { 0x34, KEY_RADIO },
-};
-
-struct ir_scancode_table ir_codes_pixelview_new_table = {
- .scan = ir_codes_pixelview_new,
- .size = ARRAY_SIZE(ir_codes_pixelview_new),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pixelview_new_table);
-
-static struct ir_scancode ir_codes_nebula[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_TV },
- { 0x0b, KEY_AUX },
- { 0x0c, KEY_DVD },
- { 0x0d, KEY_POWER },
- { 0x0e, KEY_MHP }, /* labelled 'Picture' */
- { 0x0f, KEY_AUDIO },
- { 0x10, KEY_INFO },
- { 0x11, KEY_F13 }, /* 16:9 */
- { 0x12, KEY_F14 }, /* 14:9 */
- { 0x13, KEY_EPG },
- { 0x14, KEY_EXIT },
- { 0x15, KEY_MENU },
- { 0x16, KEY_UP },
- { 0x17, KEY_DOWN },
- { 0x18, KEY_LEFT },
- { 0x19, KEY_RIGHT },
- { 0x1a, KEY_ENTER },
- { 0x1b, KEY_CHANNELUP },
- { 0x1c, KEY_CHANNELDOWN },
- { 0x1d, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_RED },
- { 0x20, KEY_GREEN },
- { 0x21, KEY_YELLOW },
- { 0x22, KEY_BLUE },
- { 0x23, KEY_SUBTITLE },
- { 0x24, KEY_F15 }, /* AD */
- { 0x25, KEY_TEXT },
- { 0x26, KEY_MUTE },
- { 0x27, KEY_REWIND },
- { 0x28, KEY_STOP },
- { 0x29, KEY_PLAY },
- { 0x2a, KEY_FASTFORWARD },
- { 0x2b, KEY_F16 }, /* chapter */
- { 0x2c, KEY_PAUSE },
- { 0x2d, KEY_PLAY },
- { 0x2e, KEY_RECORD },
- { 0x2f, KEY_F17 }, /* picture in picture */
- { 0x30, KEY_KPPLUS }, /* zoom in */
- { 0x31, KEY_KPMINUS }, /* zoom out */
- { 0x32, KEY_F18 }, /* capture */
- { 0x33, KEY_F19 }, /* web */
- { 0x34, KEY_EMAIL },
- { 0x35, KEY_PHONE },
- { 0x36, KEY_PC },
-};
-
-struct ir_scancode_table ir_codes_nebula_table = {
- .scan = ir_codes_nebula,
- .size = ARRAY_SIZE(ir_codes_nebula),
-};
-EXPORT_SYMBOL_GPL(ir_codes_nebula_table);
-
-/* DigitalNow DNTV Live DVB-T Remote */
-static struct ir_scancode ir_codes_dntv_live_dvb_t[] = {
- { 0x00, KEY_ESC }, /* 'go up a level?' */
- /* Keys 0 to 9 */
- { 0x0a, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0b, KEY_TUNER }, /* tv/fm */
- { 0x0c, KEY_SEARCH }, /* scan */
- { 0x0d, KEY_STOP },
- { 0x0e, KEY_PAUSE },
- { 0x0f, KEY_LIST }, /* source */
-
- { 0x10, KEY_MUTE },
- { 0x11, KEY_REWIND }, /* backward << */
- { 0x12, KEY_POWER },
- { 0x13, KEY_CAMERA }, /* snap */
- { 0x14, KEY_AUDIO }, /* stereo */
- { 0x15, KEY_CLEAR }, /* reset */
- { 0x16, KEY_PLAY },
- { 0x17, KEY_ENTER },
- { 0x18, KEY_ZOOM }, /* full screen */
- { 0x19, KEY_FASTFORWARD }, /* forward >> */
- { 0x1a, KEY_CHANNELUP },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1c, KEY_INFO }, /* preview */
- { 0x1d, KEY_RECORD }, /* record */
- { 0x1e, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_dntv_live_dvb_t_table = {
- .scan = ir_codes_dntv_live_dvb_t,
- .size = ARRAY_SIZE(ir_codes_dntv_live_dvb_t),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dntv_live_dvb_t_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* IO-DATA BCTV7E Remote */
-static struct ir_scancode ir_codes_iodata_bctv7e[] = {
- { 0x40, KEY_TV },
- { 0x20, KEY_RADIO }, /* FM */
- { 0x60, KEY_EPG },
- { 0x00, KEY_POWER },
-
- /* Keys 0 to 9 */
- { 0x44, KEY_0 }, /* 10 */
- { 0x50, KEY_1 },
- { 0x30, KEY_2 },
- { 0x70, KEY_3 },
- { 0x48, KEY_4 },
- { 0x28, KEY_5 },
- { 0x68, KEY_6 },
- { 0x58, KEY_7 },
- { 0x38, KEY_8 },
- { 0x78, KEY_9 },
-
- { 0x10, KEY_L }, /* Live */
- { 0x08, KEY_TIME }, /* Time Shift */
-
- { 0x18, KEY_PLAYPAUSE }, /* Play */
-
- { 0x24, KEY_ENTER }, /* 11 */
- { 0x64, KEY_ESC }, /* 12 */
- { 0x04, KEY_M }, /* Multi */
-
- { 0x54, KEY_VIDEO },
- { 0x34, KEY_CHANNELUP },
- { 0x74, KEY_VOLUMEUP },
- { 0x14, KEY_MUTE },
-
- { 0x4c, KEY_VCR }, /* SVIDEO */
- { 0x2c, KEY_CHANNELDOWN },
- { 0x6c, KEY_VOLUMEDOWN },
- { 0x0c, KEY_ZOOM },
-
- { 0x5c, KEY_PAUSE },
- { 0x3c, KEY_RED }, /* || (red) */
- { 0x7c, KEY_RECORD }, /* recording */
- { 0x1c, KEY_STOP },
-
- { 0x41, KEY_REWIND }, /* backward << */
- { 0x21, KEY_PLAY },
- { 0x61, KEY_FASTFORWARD }, /* forward >> */
- { 0x01, KEY_NEXT }, /* skip >| */
-};
-
-struct ir_scancode_table ir_codes_iodata_bctv7e_table = {
- .scan = ir_codes_iodata_bctv7e,
- .size = ARRAY_SIZE(ir_codes_iodata_bctv7e),
-};
-EXPORT_SYMBOL_GPL(ir_codes_iodata_bctv7e_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* ADS Tech Instant TV DVB-T PCI Remote */
-static struct ir_scancode ir_codes_adstech_dvb_t_pci[] = {
- /* Keys 0 to 9 */
- { 0x4d, KEY_0 },
- { 0x57, KEY_1 },
- { 0x4f, KEY_2 },
- { 0x53, KEY_3 },
- { 0x56, KEY_4 },
- { 0x4e, KEY_5 },
- { 0x5e, KEY_6 },
- { 0x54, KEY_7 },
- { 0x4c, KEY_8 },
- { 0x5c, KEY_9 },
-
- { 0x5b, KEY_POWER },
- { 0x5f, KEY_MUTE },
- { 0x55, KEY_GOTO },
- { 0x5d, KEY_SEARCH },
- { 0x17, KEY_EPG }, /* Guide */
- { 0x1f, KEY_MENU },
- { 0x0f, KEY_UP },
- { 0x46, KEY_DOWN },
- { 0x16, KEY_LEFT },
- { 0x1e, KEY_RIGHT },
- { 0x0e, KEY_SELECT }, /* Enter */
- { 0x5a, KEY_INFO },
- { 0x52, KEY_EXIT },
- { 0x59, KEY_PREVIOUS },
- { 0x51, KEY_NEXT },
- { 0x58, KEY_REWIND },
- { 0x50, KEY_FORWARD },
- { 0x44, KEY_PLAYPAUSE },
- { 0x07, KEY_STOP },
- { 0x1b, KEY_RECORD },
- { 0x13, KEY_TUNER }, /* Live */
- { 0x0a, KEY_A },
- { 0x12, KEY_B },
- { 0x03, KEY_PROG1 }, /* 1 */
- { 0x01, KEY_PROG2 }, /* 2 */
- { 0x00, KEY_PROG3 }, /* 3 */
- { 0x06, KEY_DVD },
- { 0x48, KEY_AUX }, /* Photo */
- { 0x40, KEY_VIDEO },
- { 0x19, KEY_AUDIO }, /* Music */
- { 0x0b, KEY_CHANNELUP },
- { 0x08, KEY_CHANNELDOWN },
- { 0x15, KEY_VOLUMEUP },
- { 0x1c, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_adstech_dvb_t_pci_table = {
- .scan = ir_codes_adstech_dvb_t_pci,
- .size = ARRAY_SIZE(ir_codes_adstech_dvb_t_pci),
-};
-EXPORT_SYMBOL_GPL(ir_codes_adstech_dvb_t_pci_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* MSI TV@nywhere MASTER remote */
-
-static struct ir_scancode ir_codes_msi_tvanywhere[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0c, KEY_MUTE },
- { 0x0f, KEY_SCREEN }, /* Full Screen */
- { 0x10, KEY_FN }, /* Funtion */
- { 0x11, KEY_TIME }, /* Time shift */
- { 0x12, KEY_POWER },
- { 0x13, KEY_MEDIA }, /* MTS */
- { 0x14, KEY_SLOW },
- { 0x16, KEY_REWIND }, /* backward << */
- { 0x17, KEY_ENTER }, /* Return */
- { 0x18, KEY_FASTFORWARD }, /* forward >> */
- { 0x1a, KEY_CHANNELUP },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1e, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_msi_tvanywhere_table = {
- .scan = ir_codes_msi_tvanywhere,
- .size = ARRAY_SIZE(ir_codes_msi_tvanywhere),
-};
-EXPORT_SYMBOL_GPL(ir_codes_msi_tvanywhere_table);
-
-/* ---------------------------------------------------------------------- */
-
-/*
- Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card
- is marked "KS003". The controller is I2C at address 0x30, but does not seem
- to respond to probes until a read is performed from a valid device.
- I don't know why...
-
- Note: This remote may be of similar or identical design to the
- Pixelview remote (?). The raw codes and duplicate button codes
- appear to be the same.
-
- Henry Wong <henry@stuffedcow.net>
- Some changes to formatting and keycodes by Mark Schultz <n9xmj@yahoo.com>
-
-*/
-
-static struct ir_scancode ir_codes_msi_tvanywhere_plus[] = {
-
-/* ---- Remote Button Layout ----
-
- POWER SOURCE SCAN MUTE
- TV/FM 1 2 3
- |> 4 5 6
- <| 7 8 9
- ^^UP 0 + RECALL
- vvDN RECORD STOP PLAY
-
- MINIMIZE ZOOM
-
- CH+
- VOL- VOL+
- CH-
-
- SNAPSHOT MTS
-
- << FUNC >> RESET
-*/
-
- { 0x01, KEY_1 }, /* 1 */
- { 0x0b, KEY_2 }, /* 2 */
- { 0x1b, KEY_3 }, /* 3 */
- { 0x05, KEY_4 }, /* 4 */
- { 0x09, KEY_5 }, /* 5 */
- { 0x15, KEY_6 }, /* 6 */
- { 0x06, KEY_7 }, /* 7 */
- { 0x0a, KEY_8 }, /* 8 */
- { 0x12, KEY_9 }, /* 9 */
- { 0x02, KEY_0 }, /* 0 */
- { 0x10, KEY_KPPLUS }, /* + */
- { 0x13, KEY_AGAIN }, /* Recall */
-
- { 0x1e, KEY_POWER }, /* Power */
- { 0x07, KEY_TUNER }, /* Source */
- { 0x1c, KEY_SEARCH }, /* Scan */
- { 0x18, KEY_MUTE }, /* Mute */
-
- { 0x03, KEY_RADIO }, /* TV/FM */
- /* The next four keys are duplicates that appear to send the
- same IR code as Ch+, Ch-, >>, and << . The raw code assigned
- to them is the actual code + 0x20 - they will never be
- detected as such unless some way is discovered to distinguish
- these buttons from those that have the same code. */
- { 0x3f, KEY_RIGHT }, /* |> and Ch+ */
- { 0x37, KEY_LEFT }, /* <| and Ch- */
- { 0x2c, KEY_UP }, /* ^^Up and >> */
- { 0x24, KEY_DOWN }, /* vvDn and << */
-
- { 0x00, KEY_RECORD }, /* Record */
- { 0x08, KEY_STOP }, /* Stop */
- { 0x11, KEY_PLAY }, /* Play */
-
- { 0x0f, KEY_CLOSE }, /* Minimize */
- { 0x19, KEY_ZOOM }, /* Zoom */
- { 0x1a, KEY_CAMERA }, /* Snapshot */
- { 0x0d, KEY_LANGUAGE }, /* MTS */
-
- { 0x14, KEY_VOLUMEDOWN }, /* Vol- */
- { 0x16, KEY_VOLUMEUP }, /* Vol+ */
- { 0x17, KEY_CHANNELDOWN }, /* Ch- */
- { 0x1f, KEY_CHANNELUP }, /* Ch+ */
-
- { 0x04, KEY_REWIND }, /* << */
- { 0x0e, KEY_MENU }, /* Function */
- { 0x0c, KEY_FASTFORWARD }, /* >> */
- { 0x1d, KEY_RESTART }, /* Reset */
-};
-
-struct ir_scancode_table ir_codes_msi_tvanywhere_plus_table = {
- .scan = ir_codes_msi_tvanywhere_plus,
- .size = ARRAY_SIZE(ir_codes_msi_tvanywhere_plus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_msi_tvanywhere_plus_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* Cinergy 1400 DVB-T */
-static struct ir_scancode ir_codes_cinergy_1400[] = {
- { 0x01, KEY_POWER },
- { 0x02, KEY_1 },
- { 0x03, KEY_2 },
- { 0x04, KEY_3 },
- { 0x05, KEY_4 },
- { 0x06, KEY_5 },
- { 0x07, KEY_6 },
- { 0x08, KEY_7 },
- { 0x09, KEY_8 },
- { 0x0a, KEY_9 },
- { 0x0c, KEY_0 },
-
- { 0x0b, KEY_VIDEO },
- { 0x0d, KEY_REFRESH },
- { 0x0e, KEY_SELECT },
- { 0x0f, KEY_EPG },
- { 0x10, KEY_UP },
- { 0x11, KEY_LEFT },
- { 0x12, KEY_OK },
- { 0x13, KEY_RIGHT },
- { 0x14, KEY_DOWN },
- { 0x15, KEY_TEXT },
- { 0x16, KEY_INFO },
-
- { 0x17, KEY_RED },
- { 0x18, KEY_GREEN },
- { 0x19, KEY_YELLOW },
- { 0x1a, KEY_BLUE },
-
- { 0x1b, KEY_CHANNELUP },
- { 0x1c, KEY_VOLUMEUP },
- { 0x1d, KEY_MUTE },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_CHANNELDOWN },
-
- { 0x40, KEY_PAUSE },
- { 0x4c, KEY_PLAY },
- { 0x58, KEY_RECORD },
- { 0x54, KEY_PREVIOUS },
- { 0x48, KEY_STOP },
- { 0x5c, KEY_NEXT },
-};
-
-struct ir_scancode_table ir_codes_cinergy_1400_table = {
- .scan = ir_codes_cinergy_1400,
- .size = ARRAY_SIZE(ir_codes_cinergy_1400),
-};
-EXPORT_SYMBOL_GPL(ir_codes_cinergy_1400_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* AVERTV STUDIO 303 Remote */
-static struct ir_scancode ir_codes_avertv_303[] = {
- { 0x2a, KEY_1 },
- { 0x32, KEY_2 },
- { 0x3a, KEY_3 },
- { 0x4a, KEY_4 },
- { 0x52, KEY_5 },
- { 0x5a, KEY_6 },
- { 0x6a, KEY_7 },
- { 0x72, KEY_8 },
- { 0x7a, KEY_9 },
- { 0x0e, KEY_0 },
-
- { 0x02, KEY_POWER },
- { 0x22, KEY_VIDEO },
- { 0x42, KEY_AUDIO },
- { 0x62, KEY_ZOOM },
- { 0x0a, KEY_TV },
- { 0x12, KEY_CD },
- { 0x1a, KEY_TEXT },
-
- { 0x16, KEY_SUBTITLE },
- { 0x1e, KEY_REWIND },
- { 0x06, KEY_PRINT },
-
- { 0x2e, KEY_SEARCH },
- { 0x36, KEY_SLEEP },
- { 0x3e, KEY_SHUFFLE },
- { 0x26, KEY_MUTE },
-
- { 0x4e, KEY_RECORD },
- { 0x56, KEY_PAUSE },
- { 0x5e, KEY_STOP },
- { 0x46, KEY_PLAY },
-
- { 0x6e, KEY_RED },
- { 0x0b, KEY_GREEN },
- { 0x66, KEY_YELLOW },
- { 0x03, KEY_BLUE },
-
- { 0x76, KEY_LEFT },
- { 0x7e, KEY_RIGHT },
- { 0x13, KEY_DOWN },
- { 0x1b, KEY_UP },
-};
-
-struct ir_scancode_table ir_codes_avertv_303_table = {
- .scan = ir_codes_avertv_303,
- .size = ARRAY_SIZE(ir_codes_avertv_303),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avertv_303_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* DigitalNow DNTV Live! DVB-T Pro Remote */
-static struct ir_scancode ir_codes_dntv_live_dvbt_pro[] = {
- { 0x16, KEY_POWER },
- { 0x5b, KEY_HOME },
-
- { 0x55, KEY_TV }, /* live tv */
- { 0x58, KEY_TUNER }, /* digital Radio */
- { 0x5a, KEY_RADIO }, /* FM radio */
- { 0x59, KEY_DVD }, /* dvd menu */
- { 0x03, KEY_1 },
- { 0x01, KEY_2 },
- { 0x06, KEY_3 },
- { 0x09, KEY_4 },
- { 0x1d, KEY_5 },
- { 0x1f, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x19, KEY_8 },
- { 0x1b, KEY_9 },
- { 0x0c, KEY_CANCEL },
- { 0x15, KEY_0 },
- { 0x4a, KEY_CLEAR },
- { 0x13, KEY_BACK },
- { 0x00, KEY_TAB },
- { 0x4b, KEY_UP },
- { 0x4e, KEY_LEFT },
- { 0x4f, KEY_OK },
- { 0x52, KEY_RIGHT },
- { 0x51, KEY_DOWN },
- { 0x1e, KEY_VOLUMEUP },
- { 0x0a, KEY_VOLUMEDOWN },
- { 0x02, KEY_CHANNELDOWN },
- { 0x05, KEY_CHANNELUP },
- { 0x11, KEY_RECORD },
- { 0x14, KEY_PLAY },
- { 0x4c, KEY_PAUSE },
- { 0x1a, KEY_STOP },
- { 0x40, KEY_REWIND },
- { 0x12, KEY_FASTFORWARD },
- { 0x41, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x42, KEY_NEXTSONG }, /* skip >| */
- { 0x54, KEY_CAMERA }, /* capture */
- { 0x50, KEY_LANGUAGE }, /* sap */
- { 0x47, KEY_TV2 }, /* pip */
- { 0x4d, KEY_SCREEN },
- { 0x43, KEY_SUBTITLE },
- { 0x10, KEY_MUTE },
- { 0x49, KEY_AUDIO }, /* l/r */
- { 0x07, KEY_SLEEP },
- { 0x08, KEY_VIDEO }, /* a/v */
- { 0x0e, KEY_PREVIOUS }, /* recall */
- { 0x45, KEY_ZOOM }, /* zoom + */
- { 0x46, KEY_ANGLE }, /* zoom - */
- { 0x56, KEY_RED },
- { 0x57, KEY_GREEN },
- { 0x5c, KEY_YELLOW },
- { 0x5d, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_dntv_live_dvbt_pro_table = {
- .scan = ir_codes_dntv_live_dvbt_pro,
- .size = ARRAY_SIZE(ir_codes_dntv_live_dvbt_pro),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dntv_live_dvbt_pro_table);
-
-static struct ir_scancode ir_codes_em_terratec[] = {
- { 0x01, KEY_CHANNEL },
- { 0x02, KEY_SELECT },
- { 0x03, KEY_MUTE },
- { 0x04, KEY_POWER },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x08, KEY_CHANNELUP },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0c, KEY_CHANNELDOWN },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_0 },
- { 0x12, KEY_MENU },
- { 0x13, KEY_PRINT },
- { 0x14, KEY_VOLUMEDOWN },
- { 0x16, KEY_PAUSE },
- { 0x18, KEY_RECORD },
- { 0x19, KEY_REWIND },
- { 0x1a, KEY_PLAY },
- { 0x1b, KEY_FORWARD },
- { 0x1c, KEY_BACKSPACE },
- { 0x1e, KEY_STOP },
- { 0x40, KEY_ZOOM },
-};
-
-struct ir_scancode_table ir_codes_em_terratec_table = {
- .scan = ir_codes_em_terratec,
- .size = ARRAY_SIZE(ir_codes_em_terratec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_em_terratec_table);
-
-static struct ir_scancode ir_codes_pinnacle_grey[] = {
- { 0x3a, KEY_0 },
- { 0x31, KEY_1 },
- { 0x32, KEY_2 },
- { 0x33, KEY_3 },
- { 0x34, KEY_4 },
- { 0x35, KEY_5 },
- { 0x36, KEY_6 },
- { 0x37, KEY_7 },
- { 0x38, KEY_8 },
- { 0x39, KEY_9 },
-
- { 0x2f, KEY_POWER },
-
- { 0x2e, KEY_P },
- { 0x1f, KEY_L },
- { 0x2b, KEY_I },
-
- { 0x2d, KEY_SCREEN },
- { 0x1e, KEY_ZOOM },
- { 0x1b, KEY_VOLUMEUP },
- { 0x0f, KEY_VOLUMEDOWN },
- { 0x17, KEY_CHANNELUP },
- { 0x1c, KEY_CHANNELDOWN },
- { 0x25, KEY_INFO },
-
- { 0x3c, KEY_MUTE },
-
- { 0x3d, KEY_LEFT },
- { 0x3b, KEY_RIGHT },
-
- { 0x3f, KEY_UP },
- { 0x3e, KEY_DOWN },
- { 0x1a, KEY_ENTER },
-
- { 0x1d, KEY_MENU },
- { 0x19, KEY_AGAIN },
- { 0x16, KEY_PREVIOUSSONG },
- { 0x13, KEY_NEXTSONG },
- { 0x15, KEY_PAUSE },
- { 0x0e, KEY_REWIND },
- { 0x0d, KEY_PLAY },
- { 0x0b, KEY_STOP },
- { 0x07, KEY_FORWARD },
- { 0x27, KEY_RECORD },
- { 0x26, KEY_TUNER },
- { 0x29, KEY_TEXT },
- { 0x2a, KEY_MEDIA },
- { 0x18, KEY_EPG },
-};
-
-struct ir_scancode_table ir_codes_pinnacle_grey_table = {
- .scan = ir_codes_pinnacle_grey,
- .size = ARRAY_SIZE(ir_codes_pinnacle_grey),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_grey_table);
-
-static struct ir_scancode ir_codes_flyvideo[] = {
- { 0x0f, KEY_0 },
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x07, KEY_4 },
- { 0x08, KEY_5 },
- { 0x09, KEY_6 },
- { 0x0b, KEY_7 },
- { 0x0c, KEY_8 },
- { 0x0d, KEY_9 },
-
- { 0x0e, KEY_MODE }, /* Air/Cable */
- { 0x11, KEY_VIDEO }, /* Video */
- { 0x15, KEY_AUDIO }, /* Audio */
- { 0x00, KEY_POWER }, /* Power */
- { 0x18, KEY_TUNER }, /* AV Source */
- { 0x02, KEY_ZOOM }, /* Fullscreen */
- { 0x1a, KEY_LANGUAGE }, /* Stereo */
- { 0x1b, KEY_MUTE }, /* Mute */
- { 0x14, KEY_VOLUMEUP }, /* Volume + */
- { 0x17, KEY_VOLUMEDOWN },/* Volume - */
- { 0x12, KEY_CHANNELUP },/* Channel + */
- { 0x13, KEY_CHANNELDOWN },/* Channel - */
- { 0x06, KEY_AGAIN }, /* Recall */
- { 0x10, KEY_ENTER }, /* Enter */
-
- { 0x19, KEY_BACK }, /* Rewind ( <<< ) */
- { 0x1f, KEY_FORWARD }, /* Forward ( >>> ) */
- { 0x0a, KEY_ANGLE }, /* no label, may be used as the PAUSE button */
-};
-
-struct ir_scancode_table ir_codes_flyvideo_table = {
- .scan = ir_codes_flyvideo,
- .size = ARRAY_SIZE(ir_codes_flyvideo),
-};
-EXPORT_SYMBOL_GPL(ir_codes_flyvideo_table);
-
-static struct ir_scancode ir_codes_flydvb[] = {
- { 0x01, KEY_ZOOM }, /* Full Screen */
- { 0x00, KEY_POWER }, /* Power */
-
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x07, KEY_4 },
- { 0x08, KEY_5 },
- { 0x09, KEY_6 },
- { 0x0b, KEY_7 },
- { 0x0c, KEY_8 },
- { 0x0d, KEY_9 },
- { 0x06, KEY_AGAIN }, /* Recall */
- { 0x0f, KEY_0 },
- { 0x10, KEY_MUTE }, /* Mute */
- { 0x02, KEY_RADIO }, /* TV/Radio */
- { 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
-
- { 0x14, KEY_VOLUMEUP }, /* VOL+ */
- { 0x17, KEY_VOLUMEDOWN }, /* VOL- */
- { 0x12, KEY_CHANNELUP }, /* CH+ */
- { 0x13, KEY_CHANNELDOWN }, /* CH- */
- { 0x1d, KEY_ENTER }, /* Enter */
-
- { 0x1a, KEY_MODE }, /* PIP */
- { 0x18, KEY_TUNER }, /* Source */
-
- { 0x1e, KEY_RECORD }, /* Record/Pause */
- { 0x15, KEY_ANGLE }, /* Swap (no label on key) */
- { 0x1c, KEY_PAUSE }, /* Timeshift/Pause */
- { 0x19, KEY_BACK }, /* Rewind << */
- { 0x0a, KEY_PLAYPAUSE }, /* Play/Pause */
- { 0x1f, KEY_FORWARD }, /* Forward >> */
- { 0x16, KEY_PREVIOUS }, /* Back |<< */
- { 0x11, KEY_STOP }, /* Stop */
- { 0x0e, KEY_NEXT }, /* End >>| */
-};
-
-struct ir_scancode_table ir_codes_flydvb_table = {
- .scan = ir_codes_flydvb,
- .size = ARRAY_SIZE(ir_codes_flydvb),
-};
-EXPORT_SYMBOL_GPL(ir_codes_flydvb_table);
-
-static struct ir_scancode ir_codes_cinergy[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_POWER },
- { 0x0b, KEY_PROG1 }, /* app */
- { 0x0c, KEY_ZOOM }, /* zoom/fullscreen */
- { 0x0d, KEY_CHANNELUP }, /* channel */
- { 0x0e, KEY_CHANNELDOWN }, /* channel- */
- { 0x0f, KEY_VOLUMEUP },
- { 0x10, KEY_VOLUMEDOWN },
- { 0x11, KEY_TUNER }, /* AV */
- { 0x12, KEY_NUMLOCK }, /* -/-- */
- { 0x13, KEY_AUDIO }, /* audio */
- { 0x14, KEY_MUTE },
- { 0x15, KEY_UP },
- { 0x16, KEY_DOWN },
- { 0x17, KEY_LEFT },
- { 0x18, KEY_RIGHT },
- { 0x19, BTN_LEFT, },
- { 0x1a, BTN_RIGHT, },
- { 0x1b, KEY_WWW }, /* text */
- { 0x1c, KEY_REWIND },
- { 0x1d, KEY_FORWARD },
- { 0x1e, KEY_RECORD },
- { 0x1f, KEY_PLAY },
- { 0x20, KEY_PREVIOUSSONG },
- { 0x21, KEY_NEXTSONG },
- { 0x22, KEY_PAUSE },
- { 0x23, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_cinergy_table = {
- .scan = ir_codes_cinergy,
- .size = ARRAY_SIZE(ir_codes_cinergy),
-};
-EXPORT_SYMBOL_GPL(ir_codes_cinergy_table);
-
-/* Alfons Geser <a.geser@cox.net>
- * updates from Job D. R. Borges <jobdrb@ig.com.br> */
-static struct ir_scancode ir_codes_eztv[] = {
- { 0x12, KEY_POWER },
- { 0x01, KEY_TV }, /* DVR */
- { 0x15, KEY_DVD }, /* DVD */
- { 0x17, KEY_AUDIO }, /* music */
- /* DVR mode / DVD mode / music mode */
-
- { 0x1b, KEY_MUTE }, /* mute */
- { 0x02, KEY_LANGUAGE }, /* MTS/SAP / audio / autoseek */
- { 0x1e, KEY_SUBTITLE }, /* closed captioning / subtitle / seek */
- { 0x16, KEY_ZOOM }, /* full screen */
- { 0x1c, KEY_VIDEO }, /* video source / eject / delall */
- { 0x1d, KEY_RESTART }, /* playback / angle / del */
- { 0x2f, KEY_SEARCH }, /* scan / menu / playlist */
- { 0x30, KEY_CHANNEL }, /* CH surfing / bookmark / memo */
-
- { 0x31, KEY_HELP }, /* help */
- { 0x32, KEY_MODE }, /* num/memo */
- { 0x33, KEY_ESC }, /* cancel */
-
- { 0x0c, KEY_UP }, /* up */
- { 0x10, KEY_DOWN }, /* down */
- { 0x08, KEY_LEFT }, /* left */
- { 0x04, KEY_RIGHT }, /* right */
- { 0x03, KEY_SELECT }, /* select */
-
- { 0x1f, KEY_REWIND }, /* rewind */
- { 0x20, KEY_PLAYPAUSE },/* play/pause */
- { 0x29, KEY_FORWARD }, /* forward */
- { 0x14, KEY_AGAIN }, /* repeat */
- { 0x2b, KEY_RECORD }, /* recording */
- { 0x2c, KEY_STOP }, /* stop */
- { 0x2d, KEY_PLAY }, /* play */
- { 0x2e, KEY_CAMERA }, /* snapshot / shuffle */
-
- { 0x00, KEY_0 },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
-
- { 0x2a, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x18, KEY_CHANNELUP },/* CH.tracking up */
- { 0x19, KEY_CHANNELDOWN },/* CH.tracking down */
-
- { 0x13, KEY_ENTER }, /* enter */
- { 0x21, KEY_DOT }, /* . (decimal dot) */
-};
-
-struct ir_scancode_table ir_codes_eztv_table = {
- .scan = ir_codes_eztv,
- .size = ARRAY_SIZE(ir_codes_eztv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_eztv_table);
-
-/* Alex Hermann <gaaf@gmx.net> */
-static struct ir_scancode ir_codes_avermedia[] = {
- { 0x28, KEY_1 },
- { 0x18, KEY_2 },
- { 0x38, KEY_3 },
- { 0x24, KEY_4 },
- { 0x14, KEY_5 },
- { 0x34, KEY_6 },
- { 0x2c, KEY_7 },
- { 0x1c, KEY_8 },
- { 0x3c, KEY_9 },
- { 0x22, KEY_0 },
-
- { 0x20, KEY_TV }, /* TV/FM */
- { 0x10, KEY_CD }, /* CD */
- { 0x30, KEY_TEXT }, /* TELETEXT */
- { 0x00, KEY_POWER }, /* POWER */
-
- { 0x08, KEY_VIDEO }, /* VIDEO */
- { 0x04, KEY_AUDIO }, /* AUDIO */
- { 0x0c, KEY_ZOOM }, /* FULL SCREEN */
-
- { 0x12, KEY_SUBTITLE }, /* DISPLAY */
- { 0x32, KEY_REWIND }, /* LOOP */
- { 0x02, KEY_PRINT }, /* PREVIEW */
-
- { 0x2a, KEY_SEARCH }, /* AUTOSCAN */
- { 0x1a, KEY_SLEEP }, /* FREEZE */
- { 0x3a, KEY_CAMERA }, /* SNAPSHOT */
- { 0x0a, KEY_MUTE }, /* MUTE */
-
- { 0x26, KEY_RECORD }, /* RECORD */
- { 0x16, KEY_PAUSE }, /* PAUSE */
- { 0x36, KEY_STOP }, /* STOP */
- { 0x06, KEY_PLAY }, /* PLAY */
-
- { 0x2e, KEY_RED }, /* RED */
- { 0x21, KEY_GREEN }, /* GREEN */
- { 0x0e, KEY_YELLOW }, /* YELLOW */
- { 0x01, KEY_BLUE }, /* BLUE */
-
- { 0x1e, KEY_VOLUMEDOWN }, /* VOLUME- */
- { 0x3e, KEY_VOLUMEUP }, /* VOLUME+ */
- { 0x11, KEY_CHANNELDOWN }, /* CHANNEL/PAGE- */
- { 0x31, KEY_CHANNELUP } /* CHANNEL/PAGE+ */
-};
-
-struct ir_scancode_table ir_codes_avermedia_table = {
- .scan = ir_codes_avermedia,
- .size = ARRAY_SIZE(ir_codes_avermedia),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_table);
-
-static struct ir_scancode ir_codes_videomate_tv_pvr[] = {
- { 0x14, KEY_MUTE },
- { 0x24, KEY_ZOOM },
-
- { 0x01, KEY_DVD },
- { 0x23, KEY_RADIO },
- { 0x00, KEY_TV },
-
- { 0x0a, KEY_REWIND },
- { 0x08, KEY_PLAYPAUSE },
- { 0x0f, KEY_FORWARD },
-
- { 0x02, KEY_PREVIOUS },
- { 0x07, KEY_STOP },
- { 0x06, KEY_NEXT },
-
- { 0x0c, KEY_UP },
- { 0x0e, KEY_DOWN },
- { 0x0b, KEY_LEFT },
- { 0x0d, KEY_RIGHT },
- { 0x11, KEY_OK },
-
- { 0x03, KEY_MENU },
- { 0x09, KEY_SETUP },
- { 0x05, KEY_VIDEO },
- { 0x22, KEY_CHANNEL },
-
- { 0x12, KEY_VOLUMEUP },
- { 0x15, KEY_VOLUMEDOWN },
- { 0x10, KEY_CHANNELUP },
- { 0x13, KEY_CHANNELDOWN },
-
- { 0x04, KEY_RECORD },
-
- { 0x16, KEY_1 },
- { 0x17, KEY_2 },
- { 0x18, KEY_3 },
- { 0x19, KEY_4 },
- { 0x1a, KEY_5 },
- { 0x1b, KEY_6 },
- { 0x1c, KEY_7 },
- { 0x1d, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x1f, KEY_0 },
-
- { 0x20, KEY_LANGUAGE },
- { 0x21, KEY_SLEEP },
-};
-
-struct ir_scancode_table ir_codes_videomate_tv_pvr_table = {
- .scan = ir_codes_videomate_tv_pvr,
- .size = ARRAY_SIZE(ir_codes_videomate_tv_pvr),
-};
-EXPORT_SYMBOL_GPL(ir_codes_videomate_tv_pvr_table);
-
-/* Michael Tokarev <mjt@tls.msk.ru>
- http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
- keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at
- least, and probably other cards too.
- The "ascii-art picture" below (in comments, first row
- is the keycode in hex, and subsequent row(s) shows
- the button labels (several variants when appropriate)
- helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_manli[] = {
-
- /* 0x1c 0x12 *
- * FUNCTION POWER *
- * FM (|) *
- * */
- { 0x1c, KEY_RADIO }, /*XXX*/
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 *
- * 1 2 3 *
- * *
- * 0x04 0x05 0x06 *
- * 4 5 6 *
- * *
- * 0x07 0x08 0x09 *
- * 7 8 9 *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- /* 0x0a 0x00 0x17 *
- * RECALL 0 +100 *
- * PLUS *
- * */
- { 0x0a, KEY_AGAIN }, /*XXX KEY_REWIND? */
- { 0x00, KEY_0 },
- { 0x17, KEY_DIGITS }, /*XXX*/
-
- /* 0x14 0x10 *
- * MENU INFO *
- * OSD */
- { 0x14, KEY_MENU },
- { 0x10, KEY_INFO },
-
- /* 0x0b *
- * Up *
- * *
- * 0x18 0x16 0x0c *
- * Left Ok Right *
- * *
- * 0x015 *
- * Down *
- * */
- { 0x0b, KEY_UP },
- { 0x18, KEY_LEFT },
- { 0x16, KEY_OK }, /*XXX KEY_SELECT? KEY_ENTER? */
- { 0x0c, KEY_RIGHT },
- { 0x15, KEY_DOWN },
-
- /* 0x11 0x0d *
- * TV/AV MODE *
- * SOURCE STEREO *
- * */
- { 0x11, KEY_TV }, /*XXX*/
- { 0x0d, KEY_MODE }, /*XXX there's no KEY_STEREO */
-
- /* 0x0f 0x1b 0x1a *
- * AUDIO Vol+ Chan+ *
- * TIMESHIFT??? *
- * *
- * 0x0e 0x1f 0x1e *
- * SLEEP Vol- Chan- *
- * */
- { 0x0f, KEY_AUDIO },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1a, KEY_CHANNELUP },
- { 0x0e, KEY_TIME },
- { 0x1f, KEY_VOLUMEDOWN },
- { 0x1e, KEY_CHANNELDOWN },
-
- /* 0x13 0x19 *
- * MUTE SNAPSHOT*
- * */
- { 0x13, KEY_MUTE },
- { 0x19, KEY_CAMERA },
-
- /* 0x1d unused ? */
-};
-
-struct ir_scancode_table ir_codes_manli_table = {
- .scan = ir_codes_manli,
- .size = ARRAY_SIZE(ir_codes_manli),
-};
-EXPORT_SYMBOL_GPL(ir_codes_manli_table);
-
-/* Mike Baikov <mike@baikov.com> */
-static struct ir_scancode ir_codes_gotview7135[] = {
-
- { 0x11, KEY_POWER },
- { 0x35, KEY_TV },
- { 0x1b, KEY_0 },
- { 0x29, KEY_1 },
- { 0x19, KEY_2 },
- { 0x39, KEY_3 },
- { 0x1f, KEY_4 },
- { 0x2c, KEY_5 },
- { 0x21, KEY_6 },
- { 0x24, KEY_7 },
- { 0x18, KEY_8 },
- { 0x2b, KEY_9 },
- { 0x3b, KEY_AGAIN }, /* LOOP */
- { 0x06, KEY_AUDIO },
- { 0x31, KEY_PRINT }, /* PREVIEW */
- { 0x3e, KEY_VIDEO },
- { 0x10, KEY_CHANNELUP },
- { 0x20, KEY_CHANNELDOWN },
- { 0x0c, KEY_VOLUMEDOWN },
- { 0x28, KEY_VOLUMEUP },
- { 0x08, KEY_MUTE },
- { 0x26, KEY_SEARCH }, /* SCAN */
- { 0x3f, KEY_CAMERA }, /* SNAPSHOT */
- { 0x12, KEY_RECORD },
- { 0x32, KEY_STOP },
- { 0x3c, KEY_PLAY },
- { 0x1d, KEY_REWIND },
- { 0x2d, KEY_PAUSE },
- { 0x0d, KEY_FORWARD },
- { 0x05, KEY_ZOOM }, /*FULL*/
-
- { 0x2a, KEY_F21 }, /* LIVE TIMESHIFT */
- { 0x0e, KEY_F22 }, /* MIN TIMESHIFT */
- { 0x1e, KEY_TIME }, /* TIMESHIFT */
- { 0x38, KEY_F24 }, /* NORMAL TIMESHIFT */
-};
-
-struct ir_scancode_table ir_codes_gotview7135_table = {
- .scan = ir_codes_gotview7135,
- .size = ARRAY_SIZE(ir_codes_gotview7135),
-};
-EXPORT_SYMBOL_GPL(ir_codes_gotview7135_table);
-
-static struct ir_scancode ir_codes_purpletv[] = {
- { 0x03, KEY_POWER },
- { 0x6f, KEY_MUTE },
- { 0x10, KEY_BACKSPACE }, /* Recall */
-
- { 0x11, KEY_0 },
- { 0x04, KEY_1 },
- { 0x05, KEY_2 },
- { 0x06, KEY_3 },
- { 0x08, KEY_4 },
- { 0x09, KEY_5 },
- { 0x0a, KEY_6 },
- { 0x0c, KEY_7 },
- { 0x0d, KEY_8 },
- { 0x0e, KEY_9 },
- { 0x12, KEY_DOT }, /* 100+ */
-
- { 0x07, KEY_VOLUMEUP },
- { 0x0b, KEY_VOLUMEDOWN },
- { 0x1a, KEY_KPPLUS },
- { 0x18, KEY_KPMINUS },
- { 0x15, KEY_UP },
- { 0x1d, KEY_DOWN },
- { 0x0f, KEY_CHANNELUP },
- { 0x13, KEY_CHANNELDOWN },
- { 0x48, KEY_ZOOM },
-
- { 0x1b, KEY_VIDEO }, /* Video source */
- { 0x1f, KEY_CAMERA }, /* Snapshot */
- { 0x49, KEY_LANGUAGE }, /* MTS Select */
- { 0x19, KEY_SEARCH }, /* Auto Scan */
-
- { 0x4b, KEY_RECORD },
- { 0x46, KEY_PLAY },
- { 0x45, KEY_PAUSE }, /* Pause */
- { 0x44, KEY_STOP },
- { 0x43, KEY_TIME }, /* Time Shift */
- { 0x17, KEY_CHANNEL }, /* SURF CH */
- { 0x40, KEY_FORWARD }, /* Forward ? */
- { 0x42, KEY_REWIND }, /* Backward ? */
-
-};
-
-struct ir_scancode_table ir_codes_purpletv_table = {
- .scan = ir_codes_purpletv,
- .size = ARRAY_SIZE(ir_codes_purpletv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_purpletv_table);
-
-/* Mapping for the 28 key remote control as seen at
- http://www.sednacomputer.com/photo/cardbus-tv.jpg
- Pavel Mihaylov <bin@bash.info>
- Also for the remote bundled with Kozumi KTV-01C card */
-static struct ir_scancode ir_codes_pctv_sedna[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_AGAIN }, /* Recall */
- { 0x0b, KEY_CHANNELUP },
- { 0x0c, KEY_VOLUMEUP },
- { 0x0d, KEY_MODE }, /* Stereo */
- { 0x0e, KEY_STOP },
- { 0x0f, KEY_PREVIOUSSONG },
- { 0x10, KEY_ZOOM },
- { 0x11, KEY_TUNER }, /* Source */
- { 0x12, KEY_POWER },
- { 0x13, KEY_MUTE },
- { 0x15, KEY_CHANNELDOWN },
- { 0x18, KEY_VOLUMEDOWN },
- { 0x19, KEY_CAMERA }, /* Snapshot */
- { 0x1a, KEY_NEXTSONG },
- { 0x1b, KEY_TIME }, /* Time Shift */
- { 0x1c, KEY_RADIO }, /* FM Radio */
- { 0x1d, KEY_RECORD },
- { 0x1e, KEY_PAUSE },
- /* additional codes for Kozumi's remote */
- { 0x14, KEY_INFO }, /* OSD */
- { 0x16, KEY_OK }, /* OK */
- { 0x17, KEY_DIGITS }, /* Plus */
- { 0x1f, KEY_PLAY }, /* Play */
-};
-
-struct ir_scancode_table ir_codes_pctv_sedna_table = {
- .scan = ir_codes_pctv_sedna,
- .size = ARRAY_SIZE(ir_codes_pctv_sedna),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pctv_sedna_table);
-
-/* Mark Phalan <phalanm@o2.ie> */
-static struct ir_scancode ir_codes_pv951[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x12, KEY_POWER },
- { 0x10, KEY_MUTE },
- { 0x1f, KEY_VOLUMEDOWN },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1a, KEY_CHANNELUP },
- { 0x1e, KEY_CHANNELDOWN },
- { 0x0e, KEY_PAGEUP },
- { 0x1d, KEY_PAGEDOWN },
- { 0x13, KEY_SOUND },
-
- { 0x18, KEY_KPPLUSMINUS }, /* CH +/- */
- { 0x16, KEY_SUBTITLE }, /* CC */
- { 0x0d, KEY_TEXT }, /* TTX */
- { 0x0b, KEY_TV }, /* AIR/CBL */
- { 0x11, KEY_PC }, /* PC/TV */
- { 0x17, KEY_OK }, /* CH RTN */
- { 0x19, KEY_MODE }, /* FUNC */
- { 0x0c, KEY_SEARCH }, /* AUTOSCAN */
-
- /* Not sure what to do with these ones! */
- { 0x0f, KEY_SELECT }, /* SOURCE */
- { 0x0a, KEY_KPPLUS }, /* +100 */
- { 0x14, KEY_EQUAL }, /* SYNC */
- { 0x1c, KEY_MEDIA }, /* PC/TV */
-};
-
-struct ir_scancode_table ir_codes_pv951_table = {
- .scan = ir_codes_pv951,
- .size = ARRAY_SIZE(ir_codes_pv951),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pv951_table);
-
-/* generic RC5 keytable */
-/* see http://users.pandora.be/nenya/electronics/rc5/codes00.htm */
-/* used by old (black) Hauppauge remotes */
-static struct ir_scancode ir_codes_rc5_tv[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0b, KEY_CHANNEL }, /* channel / program (japan: 11) */
- { 0x0c, KEY_POWER }, /* standby */
- { 0x0d, KEY_MUTE }, /* mute / demute */
- { 0x0f, KEY_TV }, /* display */
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x12, KEY_BRIGHTNESSUP },
- { 0x13, KEY_BRIGHTNESSDOWN },
- { 0x1e, KEY_SEARCH }, /* search + */
- { 0x20, KEY_CHANNELUP }, /* channel / program + */
- { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x22, KEY_CHANNEL }, /* alt / channel */
- { 0x23, KEY_LANGUAGE }, /* 1st / 2nd language */
- { 0x26, KEY_SLEEP }, /* sleeptimer */
- { 0x2e, KEY_MENU }, /* 2nd controls (USA: menu) */
- { 0x30, KEY_PAUSE },
- { 0x32, KEY_REWIND },
- { 0x33, KEY_GOTO },
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD }, /* recording */
- { 0x3c, KEY_TEXT }, /* teletext submode (Japan: 12) */
- { 0x3d, KEY_SUSPEND }, /* system standby */
-
-};
-
-struct ir_scancode_table ir_codes_rc5_tv_table = {
- .scan = ir_codes_rc5_tv,
- .size = ARRAY_SIZE(ir_codes_rc5_tv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_rc5_tv_table);
-
-/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
-static struct ir_scancode ir_codes_winfast[] = {
- /* Keys 0 to 9 */
- { 0x12, KEY_0 },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
-
- { 0x00, KEY_POWER },
- { 0x1b, KEY_AUDIO }, /* Audio Source */
- { 0x02, KEY_TUNER }, /* TV/FM, not on Y0400052 */
- { 0x1e, KEY_VIDEO }, /* Video Source */
- { 0x16, KEY_INFO }, /* Display information */
- { 0x04, KEY_VOLUMEUP },
- { 0x08, KEY_VOLUMEDOWN },
- { 0x0c, KEY_CHANNELUP },
- { 0x10, KEY_CHANNELDOWN },
- { 0x03, KEY_ZOOM }, /* fullscreen */
- { 0x1f, KEY_TEXT }, /* closed caption/teletext */
- { 0x20, KEY_SLEEP },
- { 0x29, KEY_CLEAR }, /* boss key */
- { 0x14, KEY_MUTE },
- { 0x2b, KEY_RED },
- { 0x2c, KEY_GREEN },
- { 0x2d, KEY_YELLOW },
- { 0x2e, KEY_BLUE },
- { 0x18, KEY_KPPLUS }, /* fine tune + , not on Y040052 */
- { 0x19, KEY_KPMINUS }, /* fine tune - , not on Y040052 */
- { 0x2a, KEY_MEDIA }, /* PIP (Picture in picture */
- { 0x21, KEY_DOT },
- { 0x13, KEY_ENTER },
- { 0x11, KEY_LAST }, /* Recall (last channel */
- { 0x22, KEY_PREVIOUS },
- { 0x23, KEY_PLAYPAUSE },
- { 0x24, KEY_NEXT },
- { 0x25, KEY_TIME }, /* Time Shifting */
- { 0x26, KEY_STOP },
- { 0x27, KEY_RECORD },
- { 0x28, KEY_SAVE }, /* Screenshot */
- { 0x2f, KEY_MENU },
- { 0x30, KEY_CANCEL },
- { 0x31, KEY_CHANNEL }, /* Channel Surf */
- { 0x32, KEY_SUBTITLE },
- { 0x33, KEY_LANGUAGE },
- { 0x34, KEY_REWIND },
- { 0x35, KEY_FASTFORWARD },
- { 0x36, KEY_TV },
- { 0x37, KEY_RADIO }, /* FM */
- { 0x38, KEY_DVD },
-
- { 0x1a, KEY_MODE}, /* change to MCE mode on Y04G0051 */
- { 0x3e, KEY_F21 }, /* MCE +VOL, on Y04G0033 */
- { 0x3a, KEY_F22 }, /* MCE -VOL, on Y04G0033 */
- { 0x3b, KEY_F23 }, /* MCE +CH, on Y04G0033 */
- { 0x3f, KEY_F24 } /* MCE -CH, on Y04G0033 */
-};
-
-struct ir_scancode_table ir_codes_winfast_table = {
- .scan = ir_codes_winfast,
- .size = ARRAY_SIZE(ir_codes_winfast),
-};
-EXPORT_SYMBOL_GPL(ir_codes_winfast_table);
-
-static struct ir_scancode ir_codes_pinnacle_color[] = {
- { 0x59, KEY_MUTE },
- { 0x4a, KEY_POWER },
-
- { 0x18, KEY_TEXT },
- { 0x26, KEY_TV },
- { 0x3d, KEY_PRINT },
-
- { 0x48, KEY_RED },
- { 0x04, KEY_GREEN },
- { 0x11, KEY_YELLOW },
- { 0x00, KEY_BLUE },
-
- { 0x2d, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
-
- { 0x49, KEY_MENU },
-
- { 0x16, KEY_CHANNELUP },
- { 0x17, KEY_CHANNELDOWN },
-
- { 0x20, KEY_UP },
- { 0x21, KEY_DOWN },
- { 0x22, KEY_LEFT },
- { 0x23, KEY_RIGHT },
- { 0x0d, KEY_SELECT },
-
- { 0x08, KEY_BACK },
- { 0x07, KEY_REFRESH },
-
- { 0x2f, KEY_ZOOM },
- { 0x29, KEY_RECORD },
-
- { 0x4b, KEY_PAUSE },
- { 0x4d, KEY_REWIND },
- { 0x2e, KEY_PLAY },
- { 0x4e, KEY_FORWARD },
- { 0x53, KEY_PREVIOUS },
- { 0x4c, KEY_STOP },
- { 0x54, KEY_NEXT },
-
- { 0x69, KEY_0 },
- { 0x6a, KEY_1 },
- { 0x6b, KEY_2 },
- { 0x6c, KEY_3 },
- { 0x6d, KEY_4 },
- { 0x6e, KEY_5 },
- { 0x6f, KEY_6 },
- { 0x70, KEY_7 },
- { 0x71, KEY_8 },
- { 0x72, KEY_9 },
-
- { 0x74, KEY_CHANNEL },
- { 0x0a, KEY_BACKSPACE },
-};
-
-struct ir_scancode_table ir_codes_pinnacle_color_table = {
- .scan = ir_codes_pinnacle_color,
- .size = ARRAY_SIZE(ir_codes_pinnacle_color),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_color_table);
-
-/* Hauppauge: the newer, gray remotes (seems there are multiple
- * slightly different versions), shipped with cx88+ivtv cards.
- * almost rc5 coding, but some non-standard keys */
-static struct ir_scancode ir_codes_hauppauge_new[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_TEXT }, /* keypad asterisk as well */
- { 0x0b, KEY_RED }, /* red button */
- { 0x0c, KEY_RADIO },
- { 0x0d, KEY_MENU },
- { 0x0e, KEY_SUBTITLE }, /* also the # key */
- { 0x0f, KEY_MUTE },
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x12, KEY_PREVIOUS }, /* previous channel */
- { 0x14, KEY_UP },
- { 0x15, KEY_DOWN },
- { 0x16, KEY_LEFT },
- { 0x17, KEY_RIGHT },
- { 0x18, KEY_VIDEO }, /* Videos */
- { 0x19, KEY_AUDIO }, /* Music */
- /* 0x1a: Pictures - presume this means
- "Multimedia Home Platform" -
- no "PICTURES" key in input.h
- */
- { 0x1a, KEY_MHP },
-
- { 0x1b, KEY_EPG }, /* Guide */
- { 0x1c, KEY_TV },
- { 0x1e, KEY_NEXTSONG }, /* skip >| */
- { 0x1f, KEY_EXIT }, /* back/exit */
- { 0x20, KEY_CHANNELUP }, /* channel / program + */
- { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x22, KEY_CHANNEL }, /* source (old black remote) */
- { 0x24, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x25, KEY_ENTER }, /* OK */
- { 0x26, KEY_SLEEP }, /* minimize (old black remote) */
- { 0x29, KEY_BLUE }, /* blue key */
- { 0x2e, KEY_GREEN }, /* green button */
- { 0x30, KEY_PAUSE }, /* pause */
- { 0x32, KEY_REWIND }, /* backward << */
- { 0x34, KEY_FASTFORWARD }, /* forward >> */
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD }, /* recording */
- { 0x38, KEY_YELLOW }, /* yellow key */
- { 0x3b, KEY_SELECT }, /* top right button */
- { 0x3c, KEY_ZOOM }, /* full */
- { 0x3d, KEY_POWER }, /* system power (green button) */
-};
-
-struct ir_scancode_table ir_codes_hauppauge_new_table = {
- .scan = ir_codes_hauppauge_new,
- .size = ARRAY_SIZE(ir_codes_hauppauge_new),
-};
-EXPORT_SYMBOL_GPL(ir_codes_hauppauge_new_table);
-
-static struct ir_scancode ir_codes_npgtech[] = {
- { 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
- { 0x2a, KEY_FRONT },
-
- { 0x3e, KEY_1 },
- { 0x02, KEY_2 },
- { 0x06, KEY_3 },
- { 0x0a, KEY_4 },
- { 0x0e, KEY_5 },
- { 0x12, KEY_6 },
- { 0x16, KEY_7 },
- { 0x1a, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x3a, KEY_0 },
- { 0x22, KEY_NUMLOCK }, /* -/-- */
- { 0x20, KEY_REFRESH },
-
- { 0x03, KEY_BRIGHTNESSDOWN },
- { 0x28, KEY_AUDIO },
- { 0x3c, KEY_CHANNELUP },
- { 0x3f, KEY_VOLUMEDOWN },
- { 0x2e, KEY_MUTE },
- { 0x3b, KEY_VOLUMEUP },
- { 0x00, KEY_CHANNELDOWN },
- { 0x07, KEY_BRIGHTNESSUP },
- { 0x2c, KEY_TEXT },
-
- { 0x37, KEY_RECORD },
- { 0x17, KEY_PLAY },
- { 0x13, KEY_PAUSE },
- { 0x26, KEY_STOP },
- { 0x18, KEY_FASTFORWARD },
- { 0x14, KEY_REWIND },
- { 0x33, KEY_ZOOM },
- { 0x32, KEY_KEYBOARD },
- { 0x30, KEY_GOTO }, /* Pointing arrow */
- { 0x36, KEY_MACRO }, /* Maximize/Minimize (yellow) */
- { 0x0b, KEY_RADIO },
- { 0x10, KEY_POWER },
-
-};
-
-struct ir_scancode_table ir_codes_npgtech_table = {
- .scan = ir_codes_npgtech,
- .size = ARRAY_SIZE(ir_codes_npgtech),
-};
-EXPORT_SYMBOL_GPL(ir_codes_npgtech_table);
-
-/* Norwood Micro (non-Pro) TV Tuner
- By Peter Naulls <peter@chocky.org>
- Key comments are the functions given in the manual */
-static struct ir_scancode ir_codes_norwood[] = {
- /* Keys 0 to 9 */
- { 0x20, KEY_0 },
- { 0x21, KEY_1 },
- { 0x22, KEY_2 },
- { 0x23, KEY_3 },
- { 0x24, KEY_4 },
- { 0x25, KEY_5 },
- { 0x26, KEY_6 },
- { 0x27, KEY_7 },
- { 0x28, KEY_8 },
- { 0x29, KEY_9 },
-
- { 0x78, KEY_TUNER }, /* Video Source */
- { 0x2c, KEY_EXIT }, /* Open/Close software */
- { 0x2a, KEY_SELECT }, /* 2 Digit Select */
- { 0x69, KEY_AGAIN }, /* Recall */
-
- { 0x32, KEY_BRIGHTNESSUP }, /* Brightness increase */
- { 0x33, KEY_BRIGHTNESSDOWN }, /* Brightness decrease */
- { 0x6b, KEY_KPPLUS }, /* (not named >>>>>) */
- { 0x6c, KEY_KPMINUS }, /* (not named <<<<<) */
-
- { 0x2d, KEY_MUTE }, /* Mute */
- { 0x30, KEY_VOLUMEUP }, /* Volume up */
- { 0x31, KEY_VOLUMEDOWN }, /* Volume down */
- { 0x60, KEY_CHANNELUP }, /* Channel up */
- { 0x61, KEY_CHANNELDOWN }, /* Channel down */
-
- { 0x3f, KEY_RECORD }, /* Record */
- { 0x37, KEY_PLAY }, /* Play */
- { 0x36, KEY_PAUSE }, /* Pause */
- { 0x2b, KEY_STOP }, /* Stop */
- { 0x67, KEY_FASTFORWARD }, /* Foward */
- { 0x66, KEY_REWIND }, /* Rewind */
- { 0x3e, KEY_SEARCH }, /* Auto Scan */
- { 0x2e, KEY_CAMERA }, /* Capture Video */
- { 0x6d, KEY_MENU }, /* Show/Hide Control */
- { 0x2f, KEY_ZOOM }, /* Full Screen */
- { 0x34, KEY_RADIO }, /* FM */
- { 0x65, KEY_POWER }, /* Computer power */
-};
-
-struct ir_scancode_table ir_codes_norwood_table = {
- .scan = ir_codes_norwood,
- .size = ARRAY_SIZE(ir_codes_norwood),
-};
-EXPORT_SYMBOL_GPL(ir_codes_norwood_table);
-
-/* From reading the following remotes:
- * Zenith Universal 7 / TV Mode 807 / VCR Mode 837
- * Hauppauge (from NOVA-CI-s box product)
- * This is a "middle of the road" approach, differences are noted
- */
-static struct ir_scancode ir_codes_budget_ci_old[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_ENTER },
- { 0x0b, KEY_RED },
- { 0x0c, KEY_POWER }, /* RADIO on Hauppauge */
- { 0x0d, KEY_MUTE },
- { 0x0f, KEY_A }, /* TV on Hauppauge */
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x14, KEY_B },
- { 0x1c, KEY_UP },
- { 0x1d, KEY_DOWN },
- { 0x1e, KEY_OPTION }, /* RESERVED on Hauppauge */
- { 0x1f, KEY_BREAK },
- { 0x20, KEY_CHANNELUP },
- { 0x21, KEY_CHANNELDOWN },
- { 0x22, KEY_PREVIOUS }, /* Prev Ch on Zenith, SOURCE on Hauppauge */
- { 0x24, KEY_RESTART },
- { 0x25, KEY_OK },
- { 0x26, KEY_CYCLEWINDOWS }, /* MINIMIZE on Hauppauge */
- { 0x28, KEY_ENTER }, /* VCR mode on Zenith */
- { 0x29, KEY_PAUSE },
- { 0x2b, KEY_RIGHT },
- { 0x2c, KEY_LEFT },
- { 0x2e, KEY_MENU }, /* FULL SCREEN on Hauppauge */
- { 0x30, KEY_SLOW },
- { 0x31, KEY_PREVIOUS }, /* VCR mode on Zenith */
- { 0x32, KEY_REWIND },
- { 0x34, KEY_FASTFORWARD },
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD },
- { 0x38, KEY_TUNER }, /* TV/VCR on Zenith */
- { 0x3a, KEY_C },
- { 0x3c, KEY_EXIT },
- { 0x3d, KEY_POWER2 },
- { 0x3e, KEY_TUNER },
-};
-
-struct ir_scancode_table ir_codes_budget_ci_old_table = {
- .scan = ir_codes_budget_ci_old,
- .size = ARRAY_SIZE(ir_codes_budget_ci_old),
-};
-EXPORT_SYMBOL_GPL(ir_codes_budget_ci_old_table);
-
-/*
- * Marc Fargas <telenieko@telenieko.com>
- * this is the remote control that comes with the asus p7131
- * which has a label saying is "Model PC-39"
- */
-static struct ir_scancode ir_codes_asus_pc39[] = {
- /* Keys 0 to 9 */
- { 0x15, KEY_0 },
- { 0x29, KEY_1 },
- { 0x2d, KEY_2 },
- { 0x2b, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0d, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x31, KEY_7 },
- { 0x35, KEY_8 },
- { 0x33, KEY_9 },
-
- { 0x3e, KEY_RADIO }, /* radio */
- { 0x03, KEY_MENU }, /* dvd/menu */
- { 0x2a, KEY_VOLUMEUP },
- { 0x19, KEY_VOLUMEDOWN },
- { 0x37, KEY_UP },
- { 0x3b, KEY_DOWN },
- { 0x27, KEY_LEFT },
- { 0x2f, KEY_RIGHT },
- { 0x25, KEY_VIDEO }, /* video */
- { 0x39, KEY_AUDIO }, /* music */
-
- { 0x21, KEY_TV }, /* tv */
- { 0x1d, KEY_EXIT }, /* back */
- { 0x0a, KEY_CHANNELUP }, /* channel / program + */
- { 0x1b, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1a, KEY_ENTER }, /* enter */
-
- { 0x06, KEY_PAUSE }, /* play/pause */
- { 0x1e, KEY_PREVIOUS }, /* rew */
- { 0x26, KEY_NEXT }, /* forward */
- { 0x0e, KEY_REWIND }, /* backward << */
- { 0x3a, KEY_FASTFORWARD }, /* forward >> */
- { 0x36, KEY_STOP },
- { 0x2e, KEY_RECORD }, /* recording */
- { 0x16, KEY_POWER }, /* the button that reads "close" */
-
- { 0x11, KEY_ZOOM }, /* full screen */
- { 0x13, KEY_MACRO }, /* recall */
- { 0x23, KEY_HOME }, /* home */
- { 0x05, KEY_PVR }, /* picture */
- { 0x3d, KEY_MUTE }, /* mute */
- { 0x01, KEY_DVD }, /* dvd */
-};
-
-struct ir_scancode_table ir_codes_asus_pc39_table = {
- .scan = ir_codes_asus_pc39,
- .size = ARRAY_SIZE(ir_codes_asus_pc39),
-};
-EXPORT_SYMBOL_GPL(ir_codes_asus_pc39_table);
-
-
-/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
- Juan Pablo Sormani <sorman@gmail.com> */
-static struct ir_scancode ir_codes_encore_enltv[] = {
-
- /* Power button does nothing, neither in Windows app,
- although it sends data (used for BIOS wakeup?) */
- { 0x0d, KEY_MUTE },
-
- { 0x1e, KEY_TV },
- { 0x00, KEY_VIDEO },
- { 0x01, KEY_AUDIO }, /* music */
- { 0x02, KEY_MHP }, /* picture */
-
- { 0x1f, KEY_1 },
- { 0x03, KEY_2 },
- { 0x04, KEY_3 },
- { 0x05, KEY_4 },
- { 0x1c, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x1d, KEY_9 },
- { 0x0a, KEY_0 },
-
- { 0x09, KEY_LIST }, /* -/-- */
- { 0x0b, KEY_LAST }, /* recall */
-
- { 0x14, KEY_HOME }, /* win start menu */
- { 0x15, KEY_EXIT }, /* exit */
- { 0x16, KEY_CHANNELUP }, /* UP */
- { 0x12, KEY_CHANNELDOWN }, /* DOWN */
- { 0x0c, KEY_VOLUMEUP }, /* RIGHT */
- { 0x17, KEY_VOLUMEDOWN }, /* LEFT */
-
- { 0x18, KEY_ENTER }, /* OK */
-
- { 0x0e, KEY_ESC },
- { 0x13, KEY_CYCLEWINDOWS }, /* desktop */
- { 0x11, KEY_TAB },
- { 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
-
- { 0x1a, KEY_MENU },
- { 0x1b, KEY_ZOOM }, /* fullscreen */
- { 0x44, KEY_TIME }, /* time shift */
- { 0x40, KEY_MODE }, /* source */
-
- { 0x5a, KEY_RECORD },
- { 0x42, KEY_PLAY }, /* play/pause */
- { 0x45, KEY_STOP },
- { 0x43, KEY_CAMERA }, /* camera icon */
-
- { 0x48, KEY_REWIND },
- { 0x4a, KEY_FASTFORWARD },
- { 0x49, KEY_PREVIOUS },
- { 0x4b, KEY_NEXT },
-
- { 0x4c, KEY_FAVORITES }, /* tv wall */
- { 0x4d, KEY_SOUND }, /* DVD sound */
- { 0x4e, KEY_LANGUAGE }, /* DVD lang */
- { 0x4f, KEY_TEXT }, /* DVD text */
-
- { 0x50, KEY_SLEEP }, /* shutdown */
- { 0x51, KEY_MODE }, /* stereo > main */
- { 0x52, KEY_SELECT }, /* stereo > sap */
- { 0x53, KEY_PROG1 }, /* teletext */
-
-
- { 0x59, KEY_RED }, /* AP1 */
- { 0x41, KEY_GREEN }, /* AP2 */
- { 0x47, KEY_YELLOW }, /* AP3 */
- { 0x57, KEY_BLUE }, /* AP4 */
-};
-
-struct ir_scancode_table ir_codes_encore_enltv_table = {
- .scan = ir_codes_encore_enltv,
- .size = ARRAY_SIZE(ir_codes_encore_enltv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv_table);
-
-/* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton
- Mauro Carvalho Chehab <mchehab@infradead.org> */
-static struct ir_scancode ir_codes_encore_enltv2[] = {
- { 0x4c, KEY_POWER2 },
- { 0x4a, KEY_TUNER },
- { 0x40, KEY_1 },
- { 0x60, KEY_2 },
- { 0x50, KEY_3 },
- { 0x70, KEY_4 },
- { 0x48, KEY_5 },
- { 0x68, KEY_6 },
- { 0x58, KEY_7 },
- { 0x78, KEY_8 },
- { 0x44, KEY_9 },
- { 0x54, KEY_0 },
-
- { 0x64, KEY_LAST }, /* +100 */
- { 0x4e, KEY_AGAIN }, /* Recall */
-
- { 0x6c, KEY_SWITCHVIDEOMODE }, /* Video Source */
- { 0x5e, KEY_MENU },
- { 0x56, KEY_SCREEN },
- { 0x7a, KEY_SETUP },
-
- { 0x46, KEY_MUTE },
- { 0x5c, KEY_MODE }, /* Stereo */
- { 0x74, KEY_INFO },
- { 0x7c, KEY_CLEAR },
-
- { 0x55, KEY_UP },
- { 0x49, KEY_DOWN },
- { 0x7e, KEY_LEFT },
- { 0x59, KEY_RIGHT },
- { 0x6a, KEY_ENTER },
-
- { 0x42, KEY_VOLUMEUP },
- { 0x62, KEY_VOLUMEDOWN },
- { 0x52, KEY_CHANNELUP },
- { 0x72, KEY_CHANNELDOWN },
-
- { 0x41, KEY_RECORD },
- { 0x51, KEY_CAMERA }, /* Snapshot */
- { 0x75, KEY_TIME }, /* Timeshift */
- { 0x71, KEY_TV2 }, /* PIP */
-
- { 0x45, KEY_REWIND },
- { 0x6f, KEY_PAUSE },
- { 0x7d, KEY_FORWARD },
- { 0x79, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_encore_enltv2_table = {
- .scan = ir_codes_encore_enltv2,
- .size = ARRAY_SIZE(ir_codes_encore_enltv2),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv2_table);
-
-/* for the Technotrend 1500 bundled remotes (grey and black): */
-static struct ir_scancode ir_codes_tt_1500[] = {
- { 0x01, KEY_POWER },
- { 0x02, KEY_SHUFFLE }, /* ? double-arrow key */
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x06, KEY_4 },
- { 0x07, KEY_5 },
- { 0x08, KEY_6 },
- { 0x09, KEY_7 },
- { 0x0a, KEY_8 },
- { 0x0b, KEY_9 },
- { 0x0c, KEY_0 },
- { 0x0d, KEY_UP },
- { 0x0e, KEY_LEFT },
- { 0x0f, KEY_OK },
- { 0x10, KEY_RIGHT },
- { 0x11, KEY_DOWN },
- { 0x12, KEY_INFO },
- { 0x13, KEY_EXIT },
- { 0x14, KEY_RED },
- { 0x15, KEY_GREEN },
- { 0x16, KEY_YELLOW },
- { 0x17, KEY_BLUE },
- { 0x18, KEY_MUTE },
- { 0x19, KEY_TEXT },
- { 0x1a, KEY_MODE }, /* ? TV/Radio */
- { 0x21, KEY_OPTION },
- { 0x22, KEY_EPG },
- { 0x23, KEY_CHANNELUP },
- { 0x24, KEY_CHANNELDOWN },
- { 0x25, KEY_VOLUMEUP },
- { 0x26, KEY_VOLUMEDOWN },
- { 0x27, KEY_SETUP },
- { 0x3a, KEY_RECORD }, /* these keys are only in the black remote */
- { 0x3b, KEY_PLAY },
- { 0x3c, KEY_STOP },
- { 0x3d, KEY_REWIND },
- { 0x3e, KEY_PAUSE },
- { 0x3f, KEY_FORWARD },
-};
-
-struct ir_scancode_table ir_codes_tt_1500_table = {
- .scan = ir_codes_tt_1500,
- .size = ARRAY_SIZE(ir_codes_tt_1500),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tt_1500_table);
-
-/* DViCO FUSION HDTV MCE remote */
-static struct ir_scancode ir_codes_fusionhdtv_mce[] = {
-
- { 0x0b, KEY_1 },
- { 0x17, KEY_2 },
- { 0x1b, KEY_3 },
- { 0x07, KEY_4 },
- { 0x50, KEY_5 },
- { 0x54, KEY_6 },
- { 0x48, KEY_7 },
- { 0x4c, KEY_8 },
- { 0x58, KEY_9 },
- { 0x03, KEY_0 },
-
- { 0x5e, KEY_OK },
- { 0x51, KEY_UP },
- { 0x53, KEY_DOWN },
- { 0x5b, KEY_LEFT },
- { 0x5f, KEY_RIGHT },
-
- { 0x02, KEY_TV }, /* Labeled DTV on remote */
- { 0x0e, KEY_MP3 },
- { 0x1a, KEY_DVD },
- { 0x1e, KEY_FAVORITES }, /* Labeled CPF on remote */
- { 0x16, KEY_SETUP },
- { 0x46, KEY_POWER2 }, /* TV On/Off button on remote */
- { 0x0a, KEY_EPG }, /* Labeled Guide on remote */
-
- { 0x49, KEY_BACK },
- { 0x59, KEY_INFO }, /* Labeled MORE on remote */
- { 0x4d, KEY_MENU }, /* Labeled DVDMENU on remote */
- { 0x55, KEY_CYCLEWINDOWS }, /* Labeled ALT-TAB on remote */
-
- { 0x0f, KEY_PREVIOUSSONG }, /* Labeled |<< REPLAY on remote */
- { 0x12, KEY_NEXTSONG }, /* Labeled >>| SKIP on remote */
- { 0x42, KEY_ENTER }, /* Labeled START with a green
- MS windows logo on remote */
-
- { 0x15, KEY_VOLUMEUP },
- { 0x05, KEY_VOLUMEDOWN },
- { 0x11, KEY_CHANNELUP },
- { 0x09, KEY_CHANNELDOWN },
-
- { 0x52, KEY_CAMERA },
- { 0x5a, KEY_TUNER },
- { 0x19, KEY_OPEN },
-
- { 0x13, KEY_MODE }, /* 4:3 16:9 select */
- { 0x1f, KEY_ZOOM },
-
- { 0x43, KEY_REWIND },
- { 0x47, KEY_PLAYPAUSE },
- { 0x4f, KEY_FASTFORWARD },
- { 0x57, KEY_MUTE },
- { 0x0d, KEY_STOP },
- { 0x01, KEY_RECORD },
- { 0x4e, KEY_POWER },
-};
-
-struct ir_scancode_table ir_codes_fusionhdtv_mce_table = {
- .scan = ir_codes_fusionhdtv_mce,
- .size = ARRAY_SIZE(ir_codes_fusionhdtv_mce),
-};
-EXPORT_SYMBOL_GPL(ir_codes_fusionhdtv_mce_table);
-
-/* Pinnacle PCTV HD 800i mini remote */
-static struct ir_scancode ir_codes_pinnacle_pctv_hd[] = {
-
- { 0x0f, KEY_1 },
- { 0x15, KEY_2 },
- { 0x10, KEY_3 },
- { 0x18, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x1e, KEY_6 },
- { 0x11, KEY_7 },
- { 0x21, KEY_8 },
- { 0x12, KEY_9 },
- { 0x27, KEY_0 },
-
- { 0x24, KEY_ZOOM },
- { 0x2a, KEY_SUBTITLE },
-
- { 0x00, KEY_MUTE },
- { 0x01, KEY_ENTER }, /* Pinnacle Logo */
- { 0x39, KEY_POWER },
-
- { 0x03, KEY_VOLUMEUP },
- { 0x09, KEY_VOLUMEDOWN },
- { 0x06, KEY_CHANNELUP },
- { 0x0c, KEY_CHANNELDOWN },
-
- { 0x2d, KEY_REWIND },
- { 0x30, KEY_PLAYPAUSE },
- { 0x33, KEY_FASTFORWARD },
- { 0x3c, KEY_STOP },
- { 0x36, KEY_RECORD },
- { 0x3f, KEY_EPG }, /* Labeled "?" */
-};
-
-struct ir_scancode_table ir_codes_pinnacle_pctv_hd_table = {
- .scan = ir_codes_pinnacle_pctv_hd,
- .size = ARRAY_SIZE(ir_codes_pinnacle_pctv_hd),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_pctv_hd_table);
-
-/*
- * Igor Kuznetsov <igk72@ya.ru>
- * Andrey J. Melnikov <temnota@kmv.ru>
- *
- * Keytable is used by BeholdTV 60x series, M6 series at
- * least, and probably other cards too.
- * The "ascii-art picture" below (in comments, first row
- * is the keycode in hex, and subsequent row(s) shows
- * the button labels (several variants when appropriate)
- * helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_behold[] = {
-
- /* 0x1c 0x12 *
- * TV/FM POWER *
- * */
- { 0x1c, KEY_TUNER }, /* XXX KEY_TV / KEY_RADIO */
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 *
- * 1 2 3 *
- * *
- * 0x04 0x05 0x06 *
- * 4 5 6 *
- * *
- * 0x07 0x08 0x09 *
- * 7 8 9 *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- /* 0x0a 0x00 0x17 *
- * RECALL 0 MODE *
- * */
- { 0x0a, KEY_AGAIN },
- { 0x00, KEY_0 },
- { 0x17, KEY_MODE },
-
- /* 0x14 0x10 *
- * ASPECT FULLSCREEN *
- * */
- { 0x14, KEY_SCREEN },
- { 0x10, KEY_ZOOM },
-
- /* 0x0b *
- * Up *
- * *
- * 0x18 0x16 0x0c *
- * Left Ok Right *
- * *
- * 0x015 *
- * Down *
- * */
- { 0x0b, KEY_CHANNELUP },
- { 0x18, KEY_VOLUMEDOWN },
- { 0x16, KEY_OK }, /* XXX KEY_ENTER */
- { 0x0c, KEY_VOLUMEUP },
- { 0x15, KEY_CHANNELDOWN },
-
- /* 0x11 0x0d *
- * MUTE INFO *
- * */
- { 0x11, KEY_MUTE },
- { 0x0d, KEY_INFO },
-
- /* 0x0f 0x1b 0x1a *
- * RECORD PLAY/PAUSE STOP *
- * *
- * 0x0e 0x1f 0x1e *
- *TELETEXT AUDIO SOURCE *
- * RED YELLOW *
- * */
- { 0x0f, KEY_RECORD },
- { 0x1b, KEY_PLAYPAUSE },
- { 0x1a, KEY_STOP },
- { 0x0e, KEY_TEXT },
- { 0x1f, KEY_RED }, /*XXX KEY_AUDIO */
- { 0x1e, KEY_YELLOW }, /*XXX KEY_SOURCE */
-
- /* 0x1d 0x13 0x19 *
- * SLEEP PREVIEW DVB *
- * GREEN BLUE *
- * */
- { 0x1d, KEY_SLEEP },
- { 0x13, KEY_GREEN },
- { 0x19, KEY_BLUE }, /* XXX KEY_SAT */
-
- /* 0x58 0x5c *
- * FREEZE SNAPSHOT *
- * */
- { 0x58, KEY_SLOW },
- { 0x5c, KEY_CAMERA },
-
-};
-
-struct ir_scancode_table ir_codes_behold_table = {
- .scan = ir_codes_behold,
- .size = ARRAY_SIZE(ir_codes_behold),
-};
-EXPORT_SYMBOL_GPL(ir_codes_behold_table);
-
-/* Beholder Intl. Ltd. 2008
- * Dmitry Belimov d.belimov@google.com
- * Keytable is used by BeholdTV Columbus
- * The "ascii-art picture" below (in comments, first row
- * is the keycode in hex, and subsequent row(s) shows
- * the button labels (several variants when appropriate)
- * helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_behold_columbus[] = {
-
- /* 0x13 0x11 0x1C 0x12 *
- * Mute Source TV/FM Power *
- * */
-
- { 0x13, KEY_MUTE },
- { 0x11, KEY_PROPS },
- { 0x1C, KEY_TUNER }, /* KEY_TV/KEY_RADIO */
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 0x0D *
- * 1 2 3 Stereo *
- * *
- * 0x04 0x05 0x06 0x19 *
- * 4 5 6 Snapshot *
- * *
- * 0x07 0x08 0x09 0x10 *
- * 7 8 9 Zoom *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x0D, KEY_SETUP }, /* Setup key */
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x19, KEY_CAMERA }, /* Snapshot key */
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x10, KEY_ZOOM },
-
- /* 0x0A 0x00 0x0B 0x0C *
- * RECALL 0 ChannelUp VolumeUp *
- * */
- { 0x0A, KEY_AGAIN },
- { 0x00, KEY_0 },
- { 0x0B, KEY_CHANNELUP },
- { 0x0C, KEY_VOLUMEUP },
-
- /* 0x1B 0x1D 0x15 0x18 *
- * Timeshift Record ChannelDown VolumeDown *
- * */
-
- { 0x1B, KEY_TIME },
- { 0x1D, KEY_RECORD },
- { 0x15, KEY_CHANNELDOWN },
- { 0x18, KEY_VOLUMEDOWN },
-
- /* 0x0E 0x1E 0x0F 0x1A *
- * Stop Pause Previouse Next *
- * */
-
- { 0x0E, KEY_STOP },
- { 0x1E, KEY_PAUSE },
- { 0x0F, KEY_PREVIOUS },
- { 0x1A, KEY_NEXT },
-
-};
-
-struct ir_scancode_table ir_codes_behold_columbus_table = {
- .scan = ir_codes_behold_columbus,
- .size = ARRAY_SIZE(ir_codes_behold_columbus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_behold_columbus_table);
-
-/*
- * Remote control for the Genius TVGO A11MCE
- * Adrian Pardini <pardo.bsso@gmail.com>
- */
-static struct ir_scancode ir_codes_genius_tvgo_a11mce[] = {
- /* Keys 0 to 9 */
- { 0x48, KEY_0 },
- { 0x09, KEY_1 },
- { 0x1d, KEY_2 },
- { 0x1f, KEY_3 },
- { 0x19, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x11, KEY_6 },
- { 0x17, KEY_7 },
- { 0x12, KEY_8 },
- { 0x16, KEY_9 },
-
- { 0x54, KEY_RECORD }, /* recording */
- { 0x06, KEY_MUTE }, /* mute */
- { 0x10, KEY_POWER },
- { 0x40, KEY_LAST }, /* recall */
- { 0x4c, KEY_CHANNELUP }, /* channel / program + */
- { 0x00, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x0d, KEY_VOLUMEUP },
- { 0x15, KEY_VOLUMEDOWN },
- { 0x4d, KEY_OK }, /* also labeled as Pause */
- { 0x1c, KEY_ZOOM }, /* full screen and Stop*/
- { 0x02, KEY_MODE }, /* AV Source or Rewind*/
- { 0x04, KEY_LIST }, /* -/-- */
- /* small arrows above numbers */
- { 0x1a, KEY_NEXT }, /* also Fast Forward */
- { 0x0e, KEY_PREVIOUS }, /* also Rewind */
- /* these are in a rather non standard layout and have
- an alternate name written */
- { 0x1e, KEY_UP }, /* Video Setting */
- { 0x0a, KEY_DOWN }, /* Video Default */
- { 0x05, KEY_CAMERA }, /* Snapshot */
- { 0x0c, KEY_RIGHT }, /* Hide Panel */
- /* Four buttons without label */
- { 0x49, KEY_RED },
- { 0x0b, KEY_GREEN },
- { 0x13, KEY_YELLOW },
- { 0x50, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_genius_tvgo_a11mce_table = {
- .scan = ir_codes_genius_tvgo_a11mce,
- .size = ARRAY_SIZE(ir_codes_genius_tvgo_a11mce),
-};
-EXPORT_SYMBOL_GPL(ir_codes_genius_tvgo_a11mce_table);
-
-/*
- * Remote control for Powercolor Real Angel 330
- * Daniel Fraga <fragabr@gmail.com>
- */
-static struct ir_scancode ir_codes_powercolor_real_angel[] = {
- { 0x38, KEY_SWITCHVIDEOMODE }, /* switch inputs */
- { 0x0c, KEY_MEDIA }, /* Turn ON/OFF App */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_DIGITS }, /* single, double, tripple digit */
- { 0x29, KEY_PREVIOUS }, /* previous channel */
- { 0x12, KEY_BRIGHTNESSUP },
- { 0x13, KEY_BRIGHTNESSDOWN },
- { 0x2b, KEY_MODE }, /* stereo/mono */
- { 0x2c, KEY_TEXT }, /* teletext */
- { 0x20, KEY_CHANNELUP }, /* channel up */
- { 0x21, KEY_CHANNELDOWN }, /* channel down */
- { 0x10, KEY_VOLUMEUP }, /* volume up */
- { 0x11, KEY_VOLUMEDOWN }, /* volume down */
- { 0x0d, KEY_MUTE },
- { 0x1f, KEY_RECORD },
- { 0x17, KEY_PLAY },
- { 0x16, KEY_PAUSE },
- { 0x0b, KEY_STOP },
- { 0x27, KEY_FASTFORWARD },
- { 0x26, KEY_REWIND },
- { 0x1e, KEY_SEARCH }, /* autoscan */
- { 0x0e, KEY_CAMERA }, /* snapshot */
- { 0x2d, KEY_SETUP },
- { 0x0f, KEY_SCREEN }, /* full screen */
- { 0x14, KEY_RADIO }, /* FM radio */
- { 0x25, KEY_POWER }, /* power */
-};
-
-struct ir_scancode_table ir_codes_powercolor_real_angel_table = {
- .scan = ir_codes_powercolor_real_angel,
- .size = ARRAY_SIZE(ir_codes_powercolor_real_angel),
-};
-EXPORT_SYMBOL_GPL(ir_codes_powercolor_real_angel_table);
-
-/* Kworld Plus TV Analog Lite PCI IR
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_kworld_plus_tv_analog[] = {
- { 0x0c, KEY_PROG1 }, /* Kworld key */
- { 0x16, KEY_CLOSECD }, /* -> ) */
- { 0x1d, KEY_POWER2 },
-
- { 0x00, KEY_1 },
- { 0x01, KEY_2 },
- { 0x02, KEY_3 }, /* Two keys have the same code: 3 and left */
- { 0x03, KEY_4 }, /* Two keys have the same code: 3 and right */
- { 0x04, KEY_5 },
- { 0x05, KEY_6 },
- { 0x06, KEY_7 },
- { 0x07, KEY_8 },
- { 0x08, KEY_9 },
- { 0x0a, KEY_0 },
-
- { 0x09, KEY_AGAIN },
- { 0x14, KEY_MUTE },
-
- { 0x20, KEY_UP },
- { 0x21, KEY_DOWN },
- { 0x0b, KEY_ENTER },
-
- { 0x10, KEY_CHANNELUP },
- { 0x11, KEY_CHANNELDOWN },
-
- /* Couldn't map key left/key right since those
- conflict with '3' and '4' scancodes
- I dunno what the original driver does
- */
-
- { 0x13, KEY_VOLUMEUP },
- { 0x12, KEY_VOLUMEDOWN },
-
- /* The lower part of the IR
- There are several duplicated keycodes there.
- Most of them conflict with digits.
- Add mappings just to the unused scancodes.
- Somehow, the original driver has a way to know,
- but this doesn't seem to be on some GPIO.
- Also, it is not related to the time between keyup
- and keydown.
- */
- { 0x19, KEY_TIME}, /* Timeshift */
- { 0x1a, KEY_STOP},
- { 0x1b, KEY_RECORD},
-
- { 0x22, KEY_TEXT},
-
- { 0x15, KEY_AUDIO}, /* ((*)) */
- { 0x0f, KEY_ZOOM},
- { 0x1c, KEY_CAMERA}, /* snapshot */
-
- { 0x18, KEY_RED}, /* B */
- { 0x23, KEY_GREEN}, /* C */
-};
-struct ir_scancode_table ir_codes_kworld_plus_tv_analog_table = {
- .scan = ir_codes_kworld_plus_tv_analog,
- .size = ARRAY_SIZE(ir_codes_kworld_plus_tv_analog),
-};
-EXPORT_SYMBOL_GPL(ir_codes_kworld_plus_tv_analog_table);
-
-/* Kaiomy TVnPC U2
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_kaiomy[] = {
- { 0x43, KEY_POWER2},
- { 0x01, KEY_LIST},
- { 0x0b, KEY_ZOOM},
- { 0x03, KEY_POWER},
-
- { 0x04, KEY_1},
- { 0x08, KEY_2},
- { 0x02, KEY_3},
-
- { 0x0f, KEY_4},
- { 0x05, KEY_5},
- { 0x06, KEY_6},
-
- { 0x0c, KEY_7},
- { 0x0d, KEY_8},
- { 0x0a, KEY_9},
-
- { 0x11, KEY_0},
-
- { 0x09, KEY_CHANNELUP},
- { 0x07, KEY_CHANNELDOWN},
-
- { 0x0e, KEY_VOLUMEUP},
- { 0x13, KEY_VOLUMEDOWN},
-
- { 0x10, KEY_HOME},
- { 0x12, KEY_ENTER},
-
- { 0x14, KEY_RECORD},
- { 0x15, KEY_STOP},
- { 0x16, KEY_PLAY},
- { 0x17, KEY_MUTE},
-
- { 0x18, KEY_UP},
- { 0x19, KEY_DOWN},
- { 0x1a, KEY_LEFT},
- { 0x1b, KEY_RIGHT},
-
- { 0x1c, KEY_RED},
- { 0x1d, KEY_GREEN},
- { 0x1e, KEY_YELLOW},
- { 0x1f, KEY_BLUE},
-};
-struct ir_scancode_table ir_codes_kaiomy_table = {
- .scan = ir_codes_kaiomy,
- .size = ARRAY_SIZE(ir_codes_kaiomy),
-};
-EXPORT_SYMBOL_GPL(ir_codes_kaiomy_table);
-
-static struct ir_scancode ir_codes_avermedia_a16d[] = {
- { 0x20, KEY_LIST},
- { 0x00, KEY_POWER},
- { 0x28, KEY_1},
- { 0x18, KEY_2},
- { 0x38, KEY_3},
- { 0x24, KEY_4},
- { 0x14, KEY_5},
- { 0x34, KEY_6},
- { 0x2c, KEY_7},
- { 0x1c, KEY_8},
- { 0x3c, KEY_9},
- { 0x12, KEY_SUBTITLE},
- { 0x22, KEY_0},
- { 0x32, KEY_REWIND},
- { 0x3a, KEY_SHUFFLE},
- { 0x02, KEY_PRINT},
- { 0x11, KEY_CHANNELDOWN},
- { 0x31, KEY_CHANNELUP},
- { 0x0c, KEY_ZOOM},
- { 0x1e, KEY_VOLUMEDOWN},
- { 0x3e, KEY_VOLUMEUP},
- { 0x0a, KEY_MUTE},
- { 0x04, KEY_AUDIO},
- { 0x26, KEY_RECORD},
- { 0x06, KEY_PLAY},
- { 0x36, KEY_STOP},
- { 0x16, KEY_PAUSE},
- { 0x2e, KEY_REWIND},
- { 0x0e, KEY_FASTFORWARD},
- { 0x30, KEY_TEXT},
- { 0x21, KEY_GREEN},
- { 0x01, KEY_BLUE},
- { 0x08, KEY_EPG},
- { 0x2a, KEY_MENU},
-};
-struct ir_scancode_table ir_codes_avermedia_a16d_table = {
- .scan = ir_codes_avermedia_a16d,
- .size = ARRAY_SIZE(ir_codes_avermedia_a16d),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_a16d_table);
-
-/* Encore ENLTV-FM v5.3
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_encore_enltv_fm53[] = {
- { 0x10, KEY_POWER2},
- { 0x06, KEY_MUTE},
-
- { 0x09, KEY_1},
- { 0x1d, KEY_2},
- { 0x1f, KEY_3},
- { 0x19, KEY_4},
- { 0x1b, KEY_5},
- { 0x11, KEY_6},
- { 0x17, KEY_7},
- { 0x12, KEY_8},
- { 0x16, KEY_9},
- { 0x48, KEY_0},
-
- { 0x04, KEY_LIST}, /* -/-- */
- { 0x40, KEY_LAST}, /* recall */
-
- { 0x02, KEY_MODE}, /* TV/AV */
- { 0x05, KEY_CAMERA}, /* SNAPSHOT */
-
- { 0x4c, KEY_CHANNELUP}, /* UP */
- { 0x00, KEY_CHANNELDOWN}, /* DOWN */
- { 0x0d, KEY_VOLUMEUP}, /* RIGHT */
- { 0x15, KEY_VOLUMEDOWN}, /* LEFT */
- { 0x49, KEY_ENTER}, /* OK */
-
- { 0x54, KEY_RECORD},
- { 0x4d, KEY_PLAY}, /* pause */
-
- { 0x1e, KEY_MENU}, /* video setting */
- { 0x0e, KEY_RIGHT}, /* <- */
- { 0x1a, KEY_LEFT}, /* -> */
-
- { 0x0a, KEY_CLEAR}, /* video default */
- { 0x0c, KEY_ZOOM}, /* hide pannel */
- { 0x47, KEY_SLEEP}, /* shutdown */
-};
-struct ir_scancode_table ir_codes_encore_enltv_fm53_table = {
- .scan = ir_codes_encore_enltv_fm53,
- .size = ARRAY_SIZE(ir_codes_encore_enltv_fm53),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv_fm53_table);
-
-/* Zogis Real Audio 220 - 32 keys IR */
-static struct ir_scancode ir_codes_real_audio_220_32_keys[] = {
- { 0x1c, KEY_RADIO},
- { 0x12, KEY_POWER2},
-
- { 0x01, KEY_1},
- { 0x02, KEY_2},
- { 0x03, KEY_3},
- { 0x04, KEY_4},
- { 0x05, KEY_5},
- { 0x06, KEY_6},
- { 0x07, KEY_7},
- { 0x08, KEY_8},
- { 0x09, KEY_9},
- { 0x00, KEY_0},
-
- { 0x0c, KEY_VOLUMEUP},
- { 0x18, KEY_VOLUMEDOWN},
- { 0x0b, KEY_CHANNELUP},
- { 0x15, KEY_CHANNELDOWN},
- { 0x16, KEY_ENTER},
-
- { 0x11, KEY_LIST}, /* Source */
- { 0x0d, KEY_AUDIO}, /* stereo */
-
- { 0x0f, KEY_PREVIOUS}, /* Prev */
- { 0x1b, KEY_TIME}, /* Timeshift */
- { 0x1a, KEY_NEXT}, /* Next */
-
- { 0x0e, KEY_STOP},
- { 0x1f, KEY_PLAY},
- { 0x1e, KEY_PLAYPAUSE}, /* Pause */
-
- { 0x1d, KEY_RECORD},
- { 0x13, KEY_MUTE},
- { 0x19, KEY_CAMERA}, /* Snapshot */
-
-};
-struct ir_scancode_table ir_codes_real_audio_220_32_keys_table = {
- .scan = ir_codes_real_audio_220_32_keys,
- .size = ARRAY_SIZE(ir_codes_real_audio_220_32_keys),
-};
-EXPORT_SYMBOL_GPL(ir_codes_real_audio_220_32_keys_table);
-
-/* ATI TV Wonder HD 600 USB
- Devin Heitmueller <devin.heitmueller@gmail.com>
- */
-static struct ir_scancode ir_codes_ati_tv_wonder_hd_600[] = {
- { 0x00, KEY_RECORD}, /* Row 1 */
- { 0x01, KEY_PLAYPAUSE},
- { 0x02, KEY_STOP},
- { 0x03, KEY_POWER},
- { 0x04, KEY_PREVIOUS}, /* Row 2 */
- { 0x05, KEY_REWIND},
- { 0x06, KEY_FORWARD},
- { 0x07, KEY_NEXT},
- { 0x08, KEY_EPG}, /* Row 3 */
- { 0x09, KEY_HOME},
- { 0x0a, KEY_MENU},
- { 0x0b, KEY_CHANNELUP},
- { 0x0c, KEY_BACK}, /* Row 4 */
- { 0x0d, KEY_UP},
- { 0x0e, KEY_INFO},
- { 0x0f, KEY_CHANNELDOWN},
- { 0x10, KEY_LEFT}, /* Row 5 */
- { 0x11, KEY_SELECT},
- { 0x12, KEY_RIGHT},
- { 0x13, KEY_VOLUMEUP},
- { 0x14, KEY_LAST}, /* Row 6 */
- { 0x15, KEY_DOWN},
- { 0x16, KEY_MUTE},
- { 0x17, KEY_VOLUMEDOWN},
-};
-struct ir_scancode_table ir_codes_ati_tv_wonder_hd_600_table = {
- .scan = ir_codes_ati_tv_wonder_hd_600,
- .size = ARRAY_SIZE(ir_codes_ati_tv_wonder_hd_600),
-};
-EXPORT_SYMBOL_GPL(ir_codes_ati_tv_wonder_hd_600_table);
-
-/* DVBWorld remotes
- Igor M. Liplianin <liplianin@me.by>
- */
-static struct ir_scancode ir_codes_dm1105_nec[] = {
- { 0x0a, KEY_POWER2}, /* power */
- { 0x0c, KEY_MUTE}, /* mute */
- { 0x11, KEY_1},
- { 0x12, KEY_2},
- { 0x13, KEY_3},
- { 0x14, KEY_4},
- { 0x15, KEY_5},
- { 0x16, KEY_6},
- { 0x17, KEY_7},
- { 0x18, KEY_8},
- { 0x19, KEY_9},
- { 0x10, KEY_0},
- { 0x1c, KEY_CHANNELUP}, /* ch+ */
- { 0x0f, KEY_CHANNELDOWN}, /* ch- */
- { 0x1a, KEY_VOLUMEUP}, /* vol+ */
- { 0x0e, KEY_VOLUMEDOWN}, /* vol- */
- { 0x04, KEY_RECORD}, /* rec */
- { 0x09, KEY_CHANNEL}, /* fav */
- { 0x08, KEY_BACKSPACE}, /* rewind */
- { 0x07, KEY_FASTFORWARD}, /* fast */
- { 0x0b, KEY_PAUSE}, /* pause */
- { 0x02, KEY_ESC}, /* cancel */
- { 0x03, KEY_TAB}, /* tab */
- { 0x00, KEY_UP}, /* up */
- { 0x1f, KEY_ENTER}, /* ok */
- { 0x01, KEY_DOWN}, /* down */
- { 0x05, KEY_RECORD}, /* cap */
- { 0x06, KEY_STOP}, /* stop */
- { 0x40, KEY_ZOOM}, /* full */
- { 0x1e, KEY_TV}, /* tvmode */
- { 0x1b, KEY_B}, /* recall */
-};
-struct ir_scancode_table ir_codes_dm1105_nec_table = {
- .scan = ir_codes_dm1105_nec,
- .size = ARRAY_SIZE(ir_codes_dm1105_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec_table);
-
-static struct ir_scancode ir_codes_tevii_nec[] = {
- { 0x0a, KEY_POWER2},
- { 0x0c, KEY_MUTE},
- { 0x11, KEY_1},
- { 0x12, KEY_2},
- { 0x13, KEY_3},
- { 0x14, KEY_4},
- { 0x15, KEY_5},
- { 0x16, KEY_6},
- { 0x17, KEY_7},
- { 0x18, KEY_8},
- { 0x19, KEY_9},
- { 0x10, KEY_0},
- { 0x1c, KEY_MENU},
- { 0x0f, KEY_VOLUMEDOWN},
- { 0x1a, KEY_LAST},
- { 0x0e, KEY_OPEN},
- { 0x04, KEY_RECORD},
- { 0x09, KEY_VOLUMEUP},
- { 0x08, KEY_CHANNELUP},
- { 0x07, KEY_PVR},
- { 0x0b, KEY_TIME},
- { 0x02, KEY_RIGHT},
- { 0x03, KEY_LEFT},
- { 0x00, KEY_UP},
- { 0x1f, KEY_OK},
- { 0x01, KEY_DOWN},
- { 0x05, KEY_TUNER},
- { 0x06, KEY_CHANNELDOWN},
- { 0x40, KEY_PLAYPAUSE},
- { 0x1e, KEY_REWIND},
- { 0x1b, KEY_FAVORITES},
- { 0x1d, KEY_BACK},
- { 0x4d, KEY_FASTFORWARD},
- { 0x44, KEY_EPG},
- { 0x4c, KEY_INFO},
- { 0x41, KEY_AB},
- { 0x43, KEY_AUDIO},
- { 0x45, KEY_SUBTITLE},
- { 0x4a, KEY_LIST},
- { 0x46, KEY_F1},
- { 0x47, KEY_F2},
- { 0x5e, KEY_F3},
- { 0x5c, KEY_F4},
- { 0x52, KEY_F5},
- { 0x5a, KEY_F6},
- { 0x56, KEY_MODE},
- { 0x58, KEY_SWITCHVIDEOMODE},
-};
-struct ir_scancode_table ir_codes_tevii_nec_table = {
- .scan = ir_codes_tevii_nec,
- .size = ARRAY_SIZE(ir_codes_tevii_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tevii_nec_table);
-
-static struct ir_scancode ir_codes_tbs_nec[] = {
- { 0x04, KEY_POWER2}, /*power*/
- { 0x14, KEY_MUTE}, /*mute*/
- { 0x07, KEY_1},
- { 0x06, KEY_2},
- { 0x05, KEY_3},
- { 0x0b, KEY_4},
- { 0x0a, KEY_5},
- { 0x09, KEY_6},
- { 0x0f, KEY_7},
- { 0x0e, KEY_8},
- { 0x0d, KEY_9},
- { 0x12, KEY_0},
- { 0x16, KEY_CHANNELUP}, /*ch+*/
- { 0x11, KEY_CHANNELDOWN},/*ch-*/
- { 0x13, KEY_VOLUMEUP}, /*vol+*/
- { 0x0c, KEY_VOLUMEDOWN},/*vol-*/
- { 0x03, KEY_RECORD}, /*rec*/
- { 0x18, KEY_PAUSE}, /*pause*/
- { 0x19, KEY_OK}, /*ok*/
- { 0x1a, KEY_CAMERA}, /* snapshot */
- { 0x01, KEY_UP},
- { 0x10, KEY_LEFT},
- { 0x02, KEY_RIGHT},
- { 0x08, KEY_DOWN},
- { 0x15, KEY_FAVORITES},
- { 0x17, KEY_SUBTITLE},
- { 0x1d, KEY_ZOOM},
- { 0x1f, KEY_EXIT},
- { 0x1e, KEY_MENU},
- { 0x1c, KEY_EPG},
- { 0x00, KEY_PREVIOUS},
- { 0x1b, KEY_MODE},
-};
-struct ir_scancode_table ir_codes_tbs_nec_table = {
- .scan = ir_codes_tbs_nec,
- .size = ARRAY_SIZE(ir_codes_tbs_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tbs_nec_table);
-
-/* Terratec Cinergy Hybrid T USB XS
- Devin Heitmueller <dheitmueller@linuxtv.org>
- */
-static struct ir_scancode ir_codes_terratec_cinergy_xs[] = {
- { 0x41, KEY_HOME},
- { 0x01, KEY_POWER},
- { 0x42, KEY_MENU},
- { 0x02, KEY_1},
- { 0x03, KEY_2},
- { 0x04, KEY_3},
- { 0x43, KEY_SUBTITLE},
- { 0x05, KEY_4},
- { 0x06, KEY_5},
- { 0x07, KEY_6},
- { 0x44, KEY_TEXT},
- { 0x08, KEY_7},
- { 0x09, KEY_8},
- { 0x0a, KEY_9},
- { 0x45, KEY_DELETE},
- { 0x0b, KEY_TUNER},
- { 0x0c, KEY_0},
- { 0x0d, KEY_MODE},
- { 0x46, KEY_TV},
- { 0x47, KEY_DVD},
- { 0x49, KEY_VIDEO},
- { 0x4b, KEY_AUX},
- { 0x10, KEY_UP},
- { 0x11, KEY_LEFT},
- { 0x12, KEY_OK},
- { 0x13, KEY_RIGHT},
- { 0x14, KEY_DOWN},
- { 0x0f, KEY_EPG},
- { 0x16, KEY_INFO},
- { 0x4d, KEY_BACKSPACE},
- { 0x1c, KEY_VOLUMEUP},
- { 0x4c, KEY_PLAY},
- { 0x1b, KEY_CHANNELUP},
- { 0x1e, KEY_VOLUMEDOWN},
- { 0x1d, KEY_MUTE},
- { 0x1f, KEY_CHANNELDOWN},
- { 0x17, KEY_RED},
- { 0x18, KEY_GREEN},
- { 0x19, KEY_YELLOW},
- { 0x1a, KEY_BLUE},
- { 0x58, KEY_RECORD},
- { 0x48, KEY_STOP},
- { 0x40, KEY_PAUSE},
- { 0x54, KEY_LAST},
- { 0x4e, KEY_REWIND},
- { 0x4f, KEY_FASTFORWARD},
- { 0x5c, KEY_NEXT},
-};
-struct ir_scancode_table ir_codes_terratec_cinergy_xs_table = {
- .scan = ir_codes_terratec_cinergy_xs,
- .size = ARRAY_SIZE(ir_codes_terratec_cinergy_xs),
-};
-EXPORT_SYMBOL_GPL(ir_codes_terratec_cinergy_xs_table);
-
-/* EVGA inDtube
- Devin Heitmueller <devin.heitmueller@gmail.com>
- */
-static struct ir_scancode ir_codes_evga_indtube[] = {
- { 0x12, KEY_POWER},
- { 0x02, KEY_MODE}, /* TV */
- { 0x14, KEY_MUTE},
- { 0x1a, KEY_CHANNELUP},
- { 0x16, KEY_TV2}, /* PIP */
- { 0x1d, KEY_VOLUMEUP},
- { 0x05, KEY_CHANNELDOWN},
- { 0x0f, KEY_PLAYPAUSE},
- { 0x19, KEY_VOLUMEDOWN},
- { 0x1c, KEY_REWIND},
- { 0x0d, KEY_RECORD},
- { 0x18, KEY_FORWARD},
- { 0x1e, KEY_PREVIOUS},
- { 0x1b, KEY_STOP},
- { 0x1f, KEY_NEXT},
- { 0x13, KEY_CAMERA},
-};
-struct ir_scancode_table ir_codes_evga_indtube_table = {
- .scan = ir_codes_evga_indtube,
- .size = ARRAY_SIZE(ir_codes_evga_indtube),
-};
-EXPORT_SYMBOL_GPL(ir_codes_evga_indtube_table);
-
-static struct ir_scancode ir_codes_videomate_s350[] = {
- { 0x00, KEY_TV},
- { 0x01, KEY_DVD},
- { 0x04, KEY_RECORD},
- { 0x05, KEY_VIDEO}, /* TV/Video */
- { 0x07, KEY_STOP},
- { 0x08, KEY_PLAYPAUSE},
- { 0x0a, KEY_REWIND},
- { 0x0f, KEY_FASTFORWARD},
- { 0x10, KEY_CHANNELUP},
- { 0x12, KEY_VOLUMEUP},
- { 0x13, KEY_CHANNELDOWN},
- { 0x14, KEY_MUTE},
- { 0x15, KEY_VOLUMEDOWN},
- { 0x16, KEY_1},
- { 0x17, KEY_2},
- { 0x18, KEY_3},
- { 0x19, KEY_4},
- { 0x1a, KEY_5},
- { 0x1b, KEY_6},
- { 0x1c, KEY_7},
- { 0x1d, KEY_8},
- { 0x1e, KEY_9},
- { 0x1f, KEY_0},
- { 0x21, KEY_SLEEP},
- { 0x24, KEY_ZOOM},
- { 0x25, KEY_LAST}, /* Recall */
- { 0x26, KEY_SUBTITLE}, /* CC */
- { 0x27, KEY_LANGUAGE}, /* MTS */
- { 0x29, KEY_CHANNEL}, /* SURF */
- { 0x2b, KEY_A},
- { 0x2c, KEY_B},
- { 0x2f, KEY_CAMERA}, /* Snapshot */
- { 0x23, KEY_RADIO},
- { 0x02, KEY_PREVIOUSSONG},
- { 0x06, KEY_NEXTSONG},
- { 0x03, KEY_EPG},
- { 0x09, KEY_SETUP},
- { 0x22, KEY_BACKSPACE},
- { 0x0c, KEY_UP},
- { 0x0e, KEY_DOWN},
- { 0x0b, KEY_LEFT},
- { 0x0d, KEY_RIGHT},
- { 0x11, KEY_ENTER},
- { 0x20, KEY_TEXT},
-};
-struct ir_scancode_table ir_codes_videomate_s350_table = {
- .scan = ir_codes_videomate_s350,
- .size = ARRAY_SIZE(ir_codes_videomate_s350),
-};
-EXPORT_SYMBOL_GPL(ir_codes_videomate_s350_table);
-
-/* GADMEI UTV330+ RM008Z remote
- Shine Liu <shinel@foxmail.com>
- */
-static struct ir_scancode ir_codes_gadmei_rm008z[] = {
- { 0x14, KEY_POWER2}, /* POWER OFF */
- { 0x0c, KEY_MUTE}, /* MUTE */
-
- { 0x18, KEY_TV}, /* TV */
- { 0x0e, KEY_VIDEO}, /* AV */
- { 0x0b, KEY_AUDIO}, /* SV */
- { 0x0f, KEY_RADIO}, /* FM */
-
- { 0x00, KEY_1},
- { 0x01, KEY_2},
- { 0x02, KEY_3},
- { 0x03, KEY_4},
- { 0x04, KEY_5},
- { 0x05, KEY_6},
- { 0x06, KEY_7},
- { 0x07, KEY_8},
- { 0x08, KEY_9},
- { 0x09, KEY_0},
- { 0x0a, KEY_INFO}, /* OSD */
- { 0x1c, KEY_BACKSPACE}, /* LAST */
-
- { 0x0d, KEY_PLAY}, /* PLAY */
- { 0x1e, KEY_CAMERA}, /* SNAPSHOT */
- { 0x1a, KEY_RECORD}, /* RECORD */
- { 0x17, KEY_STOP}, /* STOP */
-
- { 0x1f, KEY_UP}, /* UP */
- { 0x44, KEY_DOWN}, /* DOWN */
- { 0x46, KEY_TAB}, /* BACK */
- { 0x4a, KEY_ZOOM}, /* FULLSECREEN */
-
- { 0x10, KEY_VOLUMEUP}, /* VOLUMEUP */
- { 0x11, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
- { 0x12, KEY_CHANNELUP}, /* CHANNELUP */
- { 0x13, KEY_CHANNELDOWN}, /* CHANNELDOWN */
- { 0x15, KEY_ENTER}, /* OK */
-};
-struct ir_scancode_table ir_codes_gadmei_rm008z_table = {
- .scan = ir_codes_gadmei_rm008z,
- .size = ARRAY_SIZE(ir_codes_gadmei_rm008z),
-};
-EXPORT_SYMBOL_GPL(ir_codes_gadmei_rm008z_table);
-
-/*************************************************************
- * COMPLETE SCANCODE TABLES
- * Instead of just a partial scancode, the tables bellow
- * contains the complete scancode and the receiver protocol
- *************************************************************/
-
-/*
- * Hauppauge:the newer, gray remotes (seems there are multiple
- * slightly different versions), shipped with cx88+ivtv cards.
- *
- * This table contains the complete RC5 code, instead of just the data part
- */
-static struct ir_scancode ir_codes_rc5_hauppauge_new[] = {
- /* Keys 0 to 9 */
- { 0x1e00, KEY_0 },
- { 0x1e01, KEY_1 },
- { 0x1e02, KEY_2 },
- { 0x1e03, KEY_3 },
- { 0x1e04, KEY_4 },
- { 0x1e05, KEY_5 },
- { 0x1e06, KEY_6 },
- { 0x1e07, KEY_7 },
- { 0x1e08, KEY_8 },
- { 0x1e09, KEY_9 },
-
- { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
- { 0x1e0b, KEY_RED }, /* red button */
- { 0x1e0c, KEY_RADIO },
- { 0x1e0d, KEY_MENU },
- { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
- { 0x1e0f, KEY_MUTE },
- { 0x1e10, KEY_VOLUMEUP },
- { 0x1e11, KEY_VOLUMEDOWN },
- { 0x1e12, KEY_PREVIOUS }, /* previous channel */
- { 0x1e14, KEY_UP },
- { 0x1e15, KEY_DOWN },
- { 0x1e16, KEY_LEFT },
- { 0x1e17, KEY_RIGHT },
- { 0x1e18, KEY_VIDEO }, /* Videos */
- { 0x1e19, KEY_AUDIO }, /* Music */
- /* 0x1e1a: Pictures - presume this means
- "Multimedia Home Platform" -
- no "PICTURES" key in input.h
- */
- { 0x1e1a, KEY_MHP },
-
- { 0x1e1b, KEY_EPG }, /* Guide */
- { 0x1e1c, KEY_TV },
- { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
- { 0x1e1f, KEY_EXIT }, /* back/exit */
- { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
- { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
- { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x1e25, KEY_ENTER }, /* OK */
- { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
- { 0x1e29, KEY_BLUE }, /* blue key */
- { 0x1e2e, KEY_GREEN }, /* green button */
- { 0x1e30, KEY_PAUSE }, /* pause */
- { 0x1e32, KEY_REWIND }, /* backward << */
- { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
- { 0x1e35, KEY_PLAY },
- { 0x1e36, KEY_STOP },
- { 0x1e37, KEY_RECORD }, /* recording */
- { 0x1e38, KEY_YELLOW }, /* yellow key */
- { 0x1e3b, KEY_SELECT }, /* top right button */
- { 0x1e3c, KEY_ZOOM }, /* full */
- { 0x1e3d, KEY_POWER }, /* system power (green button) */
-};
-
-struct ir_scancode_table ir_codes_rc5_hauppauge_new_table = {
- .scan = ir_codes_rc5_hauppauge_new,
- .size = ARRAY_SIZE(ir_codes_rc5_hauppauge_new),
- .ir_type = IR_TYPE_RC5,
-};
-EXPORT_SYMBOL_GPL(ir_codes_rc5_hauppauge_new_table);
-
-/* Terratec Cinergy Hybrid T USB XS FM
- Mauro Carvalho Chehab <mchehab@redhat.com>
- */
-static struct ir_scancode ir_codes_nec_terratec_cinergy_xs[] = {
- { 0x1441, KEY_HOME},
- { 0x1401, KEY_POWER2},
-
- { 0x1442, KEY_MENU}, /* DVD menu */
- { 0x1443, KEY_SUBTITLE},
- { 0x1444, KEY_TEXT}, /* Teletext */
- { 0x1445, KEY_DELETE},
-
- { 0x1402, KEY_1},
- { 0x1403, KEY_2},
- { 0x1404, KEY_3},
- { 0x1405, KEY_4},
- { 0x1406, KEY_5},
- { 0x1407, KEY_6},
- { 0x1408, KEY_7},
- { 0x1409, KEY_8},
- { 0x140a, KEY_9},
- { 0x140c, KEY_0},
-
- { 0x140b, KEY_TUNER}, /* AV */
- { 0x140d, KEY_MODE}, /* A.B */
-
- { 0x1446, KEY_TV},
- { 0x1447, KEY_DVD},
- { 0x1449, KEY_VIDEO},
- { 0x144a, KEY_RADIO}, /* Music */
- { 0x144b, KEY_CAMERA}, /* PIC */
-
- { 0x1410, KEY_UP},
- { 0x1411, KEY_LEFT},
- { 0x1412, KEY_OK},
- { 0x1413, KEY_RIGHT},
- { 0x1414, KEY_DOWN},
-
- { 0x140f, KEY_EPG},
- { 0x1416, KEY_INFO},
- { 0x144d, KEY_BACKSPACE},
-
- { 0x141c, KEY_VOLUMEUP},
- { 0x141e, KEY_VOLUMEDOWN},
-
- { 0x144c, KEY_PLAY},
- { 0x141d, KEY_MUTE},
-
- { 0x141b, KEY_CHANNELUP},
- { 0x141f, KEY_CHANNELDOWN},
-
- { 0x1417, KEY_RED},
- { 0x1418, KEY_GREEN},
- { 0x1419, KEY_YELLOW},
- { 0x141a, KEY_BLUE},
-
- { 0x1458, KEY_RECORD},
- { 0x1448, KEY_STOP},
- { 0x1440, KEY_PAUSE},
-
- { 0x1454, KEY_LAST},
- { 0x144e, KEY_REWIND},
- { 0x144f, KEY_FASTFORWARD},
- { 0x145c, KEY_NEXT},
-};
-struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table = {
- .scan = ir_codes_nec_terratec_cinergy_xs,
- .size = ARRAY_SIZE(ir_codes_nec_terratec_cinergy_xs),
- .ir_type = IR_TYPE_NEC,
-};
-EXPORT_SYMBOL_GPL(ir_codes_nec_terratec_cinergy_xs_table);
-
-
-/* Leadtek Winfast TV USB II Deluxe remote
- Magnus Alm <magnus.alm@gmail.com>
- */
-static struct ir_scancode ir_codes_winfast_usbii_deluxe[] = {
- { 0x62, KEY_0},
- { 0x75, KEY_1},
- { 0x76, KEY_2},
- { 0x77, KEY_3},
- { 0x79, KEY_4},
- { 0x7a, KEY_5},
- { 0x7b, KEY_6},
- { 0x7d, KEY_7},
- { 0x7e, KEY_8},
- { 0x7f, KEY_9},
-
- { 0x38, KEY_CAMERA}, /* SNAPSHOT */
- { 0x37, KEY_RECORD}, /* RECORD */
- { 0x35, KEY_TIME}, /* TIMESHIFT */
-
- { 0x74, KEY_VOLUMEUP}, /* VOLUMEUP */
- { 0x78, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
- { 0x64, KEY_MUTE}, /* MUTE */
-
- { 0x21, KEY_CHANNEL}, /* SURF */
- { 0x7c, KEY_CHANNELUP}, /* CHANNELUP */
- { 0x60, KEY_CHANNELDOWN}, /* CHANNELDOWN */
- { 0x61, KEY_LAST}, /* LAST CHANNEL (RECALL) */
-
- { 0x72, KEY_VIDEO}, /* INPUT MODES (TV/FM) */
-
- { 0x70, KEY_POWER2}, /* TV ON/OFF */
-
- { 0x39, KEY_CYCLEWINDOWS}, /* MINIMIZE (BOSS) */
- { 0x3a, KEY_NEW}, /* PIP */
- { 0x73, KEY_ZOOM}, /* FULLSECREEN */
-
- { 0x66, KEY_INFO}, /* OSD (DISPLAY) */
-
- { 0x31, KEY_DOT}, /* '.' */
- { 0x63, KEY_ENTER}, /* ENTER */
-
-};
-struct ir_scancode_table ir_codes_winfast_usbii_deluxe_table = {
- .scan = ir_codes_winfast_usbii_deluxe,
- .size = ARRAY_SIZE(ir_codes_winfast_usbii_deluxe),
-};
-EXPORT_SYMBOL_GPL(ir_codes_winfast_usbii_deluxe_table);
-
-/* Kworld 315U
- */
-static struct ir_scancode ir_codes_kworld_315u[] = {
- { 0x6143, KEY_POWER },
- { 0x6101, KEY_TUNER }, /* source */
- { 0x610b, KEY_ZOOM },
- { 0x6103, KEY_POWER2 }, /* shutdown */
-
- { 0x6104, KEY_1 },
- { 0x6108, KEY_2 },
- { 0x6102, KEY_3 },
- { 0x6109, KEY_CHANNELUP },
-
- { 0x610f, KEY_4 },
- { 0x6105, KEY_5 },
- { 0x6106, KEY_6 },
- { 0x6107, KEY_CHANNELDOWN },
-
- { 0x610c, KEY_7 },
- { 0x610d, KEY_8 },
- { 0x610a, KEY_9 },
- { 0x610e, KEY_VOLUMEUP },
-
- { 0x6110, KEY_LAST },
- { 0x6111, KEY_0 },
- { 0x6112, KEY_ENTER },
- { 0x6113, KEY_VOLUMEDOWN },
-
- { 0x6114, KEY_RECORD },
- { 0x6115, KEY_STOP },
- { 0x6116, KEY_PLAY },
- { 0x6117, KEY_MUTE },
-
- { 0x6118, KEY_UP },
- { 0x6119, KEY_DOWN },
- { 0x611a, KEY_LEFT },
- { 0x611b, KEY_RIGHT },
-
- { 0x611c, KEY_RED },
- { 0x611d, KEY_GREEN },
- { 0x611e, KEY_YELLOW },
- { 0x611f, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_kworld_315u_table = {
- .scan = ir_codes_kworld_315u,
- .size = ARRAY_SIZE(ir_codes_kworld_315u),
- .ir_type = IR_TYPE_NEC,
-};
-EXPORT_SYMBOL_GPL(ir_codes_kworld_315u_table);
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index bfca26d51827..9374a006f43d 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -1,4 +1,4 @@
-/* ir-register.c - handle IR scancode->keycode tables
+/* ir-keytable.c - handle IR scancode->keycode tables
*
* Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
@@ -15,384 +15,408 @@
#include <linux/input.h>
#include <linux/slab.h>
-#include <media/ir-common.h>
+#include "ir-core-priv.h"
-#define IR_TAB_MIN_SIZE 32
-#define IR_TAB_MAX_SIZE 1024
+/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
+#define IR_TAB_MIN_SIZE 256
+#define IR_TAB_MAX_SIZE 8192
+
+/* FIXME: IR_KEYPRESS_TIMEOUT should be protocol specific */
+#define IR_KEYPRESS_TIMEOUT 250
/**
- * ir_seek_table() - returns the element order on the table
- * @rc_tab: the ir_scancode_table with the keymap to be used
- * @scancode: the scancode that we're seeking
+ * ir_resize_table() - resizes a scancode table if necessary
+ * @rc_tab: the ir_scancode_table to resize
+ * @return: zero on success or a negative error code
*
- * This routine is used by the input routines when a key is pressed at the
- * IR. The scancode is received and needs to be converted into a keycode.
- * If the key is not found, it returns KEY_UNKNOWN. Otherwise, returns the
- * corresponding keycode from the table.
+ * This routine will shrink the ir_scancode_table if it has lots of
+ * unused entries and grow it if it is full.
*/
-static int ir_seek_table(struct ir_scancode_table *rc_tab, u32 scancode)
+static int ir_resize_table(struct ir_scancode_table *rc_tab)
{
- int rc;
- unsigned long flags;
- struct ir_scancode *keymap = rc_tab->scan;
+ unsigned int oldalloc = rc_tab->alloc;
+ unsigned int newalloc = oldalloc;
+ struct ir_scancode *oldscan = rc_tab->scan;
+ struct ir_scancode *newscan;
+
+ if (rc_tab->size == rc_tab->len) {
+ /* All entries in use -> grow keytable */
+ if (rc_tab->alloc >= IR_TAB_MAX_SIZE)
+ return -ENOMEM;
- spin_lock_irqsave(&rc_tab->lock, flags);
+ newalloc *= 2;
+ IR_dprintk(1, "Growing table to %u bytes\n", newalloc);
+ }
- /* FIXME: replace it by a binary search */
+ if ((rc_tab->len * 3 < rc_tab->size) && (oldalloc > IR_TAB_MIN_SIZE)) {
+ /* Less than 1/3 of entries in use -> shrink keytable */
+ newalloc /= 2;
+ IR_dprintk(1, "Shrinking table to %u bytes\n", newalloc);
+ }
- for (rc = 0; rc < rc_tab->size; rc++)
- if (keymap[rc].scancode == scancode)
- goto exit;
+ if (newalloc == oldalloc)
+ return 0;
- /* Not found */
- rc = -EINVAL;
+ newscan = kmalloc(newalloc, GFP_ATOMIC);
+ if (!newscan) {
+ IR_dprintk(1, "Failed to kmalloc %u bytes\n", newalloc);
+ return -ENOMEM;
+ }
-exit:
- spin_unlock_irqrestore(&rc_tab->lock, flags);
- return rc;
+ memcpy(newscan, rc_tab->scan, rc_tab->len * sizeof(struct ir_scancode));
+ rc_tab->scan = newscan;
+ rc_tab->alloc = newalloc;
+ rc_tab->size = rc_tab->alloc / sizeof(struct ir_scancode);
+ kfree(oldscan);
+ return 0;
}
/**
- * ir_roundup_tablesize() - gets an optimum value for the table size
- * @n_elems: minimum number of entries to store keycodes
- *
- * This routine is used to choose the keycode table size.
+ * ir_do_setkeycode() - internal function to set a keycode in the
+ * scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @rc_tab: the struct ir_scancode_table to set the keycode in
+ * @scancode: the scancode for the ir command
+ * @keycode: the keycode for the ir command
+ * @resize: whether the keytable may be shrunk
+ * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
*
- * In order to have some empty space for new keycodes,
- * and knowing in advance that kmalloc allocates only power of two
- * segments, it optimizes the allocated space to have some spare space
- * for those new keycodes by using the maximum number of entries that
- * will be effectively be allocated by kmalloc.
- * In order to reduce the quantity of table resizes, it has a minimum
- * table size of IR_TAB_MIN_SIZE.
+ * This routine is used internally to manipulate the scancode->keycode table.
+ * The caller has to hold @rc_tab->lock.
*/
-static int ir_roundup_tablesize(int n_elems)
+static int ir_do_setkeycode(struct input_dev *dev,
+ struct ir_scancode_table *rc_tab,
+ unsigned scancode, unsigned keycode,
+ bool resize)
{
- size_t size;
-
- if (n_elems < IR_TAB_MIN_SIZE)
- n_elems = IR_TAB_MIN_SIZE;
+ unsigned int i;
+ int old_keycode = KEY_RESERVED;
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
/*
- * As kmalloc only allocates sizes of power of two, get as
- * much entries as possible for the allocated memory segment
+ * Unfortunately, some hardware-based IR decoders don't provide
+ * all bits for the complete IR code. In general, they provide only
+ * the command part of the IR code. Yet, as it is possible to replace
+ * the provided IR with another one, it is needed to allow loading
+ * IR tables from other remotes. So,
*/
- size = roundup_pow_of_two(n_elems * sizeof(struct ir_scancode));
- n_elems = size / sizeof(struct ir_scancode);
+ if (ir_dev->props && ir_dev->props->scanmask) {
+ scancode &= ir_dev->props->scanmask;
+ }
- return n_elems;
+ /* First check if we already have a mapping for this ir command */
+ for (i = 0; i < rc_tab->len; i++) {
+ /* Keytable is sorted from lowest to highest scancode */
+ if (rc_tab->scan[i].scancode > scancode)
+ break;
+ else if (rc_tab->scan[i].scancode < scancode)
+ continue;
+
+ old_keycode = rc_tab->scan[i].keycode;
+ rc_tab->scan[i].keycode = keycode;
+
+ /* Did the user wish to remove the mapping? */
+ if (keycode == KEY_RESERVED || keycode == KEY_UNKNOWN) {
+ IR_dprintk(1, "#%d: Deleting scan 0x%04x\n",
+ i, scancode);
+ rc_tab->len--;
+ memmove(&rc_tab->scan[i], &rc_tab->scan[i + 1],
+ (rc_tab->len - i) * sizeof(struct ir_scancode));
+ }
+
+ /* Possibly shrink the keytable, failure is not a problem */
+ ir_resize_table(rc_tab);
+ break;
+ }
+
+ if (old_keycode == KEY_RESERVED && keycode != KEY_RESERVED) {
+ /* No previous mapping found, we might need to grow the table */
+ if (resize && ir_resize_table(rc_tab))
+ return -ENOMEM;
+
+ IR_dprintk(1, "#%d: New scan 0x%04x with key 0x%04x\n",
+ i, scancode, keycode);
+
+ /* i is the proper index to insert our new keycode */
+ memmove(&rc_tab->scan[i + 1], &rc_tab->scan[i],
+ (rc_tab->len - i) * sizeof(struct ir_scancode));
+ rc_tab->scan[i].scancode = scancode;
+ rc_tab->scan[i].keycode = keycode;
+ rc_tab->len++;
+ set_bit(keycode, dev->keybit);
+ } else {
+ IR_dprintk(1, "#%d: Replacing scan 0x%04x with key 0x%04x\n",
+ i, scancode, keycode);
+ /* A previous mapping was updated... */
+ clear_bit(old_keycode, dev->keybit);
+ /* ...but another scancode might use the same keycode */
+ for (i = 0; i < rc_tab->len; i++) {
+ if (rc_tab->scan[i].keycode == old_keycode) {
+ set_bit(old_keycode, dev->keybit);
+ break;
+ }
+ }
+ }
+
+ return 0;
}
/**
- * ir_copy_table() - copies a keytable, discarding the unused entries
- * @destin: destin table
- * @origin: origin table
+ * ir_setkeycode() - set a keycode in the scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @scancode: the desired scancode
+ * @keycode: result
+ * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
*
- * Copies all entries where the keycode is not KEY_UNKNOWN/KEY_RESERVED
- * Also copies table size and table protocol.
- * NOTE: It shouldn't copy the lock field
+ * This routine is used to handle evdev EVIOCSKEY ioctl.
*/
-
-static int ir_copy_table(struct ir_scancode_table *destin,
- const struct ir_scancode_table *origin)
+static int ir_setkeycode(struct input_dev *dev,
+ unsigned int scancode, unsigned int keycode)
{
- int i, j = 0;
-
- for (i = 0; i < origin->size; i++) {
- if (origin->scan[i].keycode == KEY_UNKNOWN ||
- origin->scan[i].keycode == KEY_RESERVED)
- continue;
+ int rc;
+ unsigned long flags;
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- memcpy(&destin->scan[j], &origin->scan[i], sizeof(struct ir_scancode));
- j++;
- }
- destin->size = j;
- destin->ir_type = origin->ir_type;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ rc = ir_do_setkeycode(dev, rc_tab, scancode, keycode, true);
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
+ return rc;
+}
- IR_dprintk(1, "Copied %d scancodes to the new keycode table\n", destin->size);
+/**
+ * ir_setkeytable() - sets several entries in the scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @to: the struct ir_scancode_table to copy entries to
+ * @from: the struct ir_scancode_table to copy entries from
+ * @return: -EINVAL if all keycodes could not be inserted, otherwise zero.
+ *
+ * This routine is used to handle table initialization.
+ */
+static int ir_setkeytable(struct input_dev *dev,
+ struct ir_scancode_table *to,
+ const struct ir_scancode_table *from)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
+ unsigned long flags;
+ unsigned int i;
+ int rc = 0;
- return 0;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ for (i = 0; i < from->size; i++) {
+ rc = ir_do_setkeycode(dev, to, from->scan[i].scancode,
+ from->scan[i].keycode, false);
+ if (rc)
+ break;
+ }
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
+ return rc;
}
/**
- * ir_getkeycode() - get a keycode at the evdev scancode ->keycode table
+ * ir_getkeycode() - get a keycode from the scancode->keycode table
* @dev: the struct input_dev device descriptor
* @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * @keycode: used to return the keycode, if found, or KEY_RESERVED
+ * @return: always returns zero.
*
* This routine is used to handle evdev EVIOCGKEY ioctl.
- * If the key is not found, returns -EINVAL, otherwise, returns 0.
*/
static int ir_getkeycode(struct input_dev *dev,
unsigned int scancode, unsigned int *keycode)
{
- int elem;
+ int start, end, mid;
+ unsigned long flags;
+ int key = KEY_RESERVED;
struct ir_input_dev *ir_dev = input_get_drvdata(dev);
struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- elem = ir_seek_table(rc_tab, scancode);
- if (elem >= 0) {
- *keycode = rc_tab->scan[elem].keycode;
- return 0;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ start = 0;
+ end = rc_tab->len - 1;
+ while (start <= end) {
+ mid = (start + end) / 2;
+ if (rc_tab->scan[mid].scancode < scancode)
+ start = mid + 1;
+ else if (rc_tab->scan[mid].scancode > scancode)
+ end = mid - 1;
+ else {
+ key = rc_tab->scan[mid].keycode;
+ break;
+ }
}
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
- /*
- * Scancode not found and table can't be expanded
- */
- if (elem < 0 && rc_tab->size == IR_TAB_MAX_SIZE)
- return -EINVAL;
+ if (key == KEY_RESERVED)
+ IR_dprintk(1, "unknown key for scancode 0x%04x\n",
+ scancode);
- /*
- * If is there extra space, returns KEY_RESERVED,
- * otherwise, input core won't let ir_setkeycode to work
- */
- *keycode = KEY_RESERVED;
+ *keycode = key;
return 0;
}
/**
- * ir_is_resize_needed() - Check if the table needs rezise
- * @table: keycode table that may need to resize
- * @n_elems: minimum number of entries to store keycodes
- *
- * Considering that kmalloc uses power of two storage areas, this
- * routine detects if the real alloced size will change. If not, it
- * just returns without doing nothing. Otherwise, it will extend or
- * reduce the table size to meet the new needs.
+ * ir_g_keycode_from_table() - gets the keycode that corresponds to a scancode
+ * @input_dev: the struct input_dev descriptor of the device
+ * @scancode: the scancode that we're seeking
*
- * It returns 0 if no resize is needed, 1 otherwise.
+ * This routine is used by the input routines when a key is pressed at the
+ * IR. The scancode is received and needs to be converted into a keycode.
+ * If the key is not found, it returns KEY_RESERVED. Otherwise, returns the
+ * corresponding keycode from the table.
*/
-static int ir_is_resize_needed(struct ir_scancode_table *table, int n_elems)
+u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
{
- int cur_size = ir_roundup_tablesize(table->size);
- int new_size = ir_roundup_tablesize(n_elems);
-
- if (cur_size == new_size)
- return 0;
+ int keycode;
- /* Resize is needed */
- return 1;
+ ir_getkeycode(dev, scancode, &keycode);
+ if (keycode != KEY_RESERVED)
+ IR_dprintk(1, "%s: scancode 0x%04x keycode 0x%02x\n",
+ dev->name, scancode, keycode);
+ return keycode;
}
+EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
/**
- * ir_delete_key() - remove a keycode from the table
- * @rc_tab: keycode table
- * @elem: element to be removed
+ * ir_keyup() - generates input event to cleanup a key press
+ * @ir: the struct ir_input_dev descriptor of the device
*
+ * This routine is used to signal that a key has been released on the
+ * remote control. It reports a keyup input event via input_report_key().
*/
-static void ir_delete_key(struct ir_scancode_table *rc_tab, int elem)
+static void ir_keyup(struct ir_input_dev *ir)
{
- unsigned long flags = 0;
- int newsize = rc_tab->size - 1;
- int resize = ir_is_resize_needed(rc_tab, newsize);
- struct ir_scancode *oldkeymap = rc_tab->scan;
- struct ir_scancode *newkeymap = NULL;
-
- if (resize)
- newkeymap = kzalloc(ir_roundup_tablesize(newsize) *
- sizeof(*newkeymap), GFP_ATOMIC);
-
- /* There's no memory for resize. Keep the old table */
- if (!resize || !newkeymap) {
- newkeymap = oldkeymap;
-
- /* We'll modify the live table. Lock it */
- spin_lock_irqsave(&rc_tab->lock, flags);
- }
+ if (!ir->keypressed)
+ return;
- /*
- * Copy the elements before the one that will be deleted
- * if (!resize), both oldkeymap and newkeymap points
- * to the same place, so, there's no need to copy
- */
- if (resize && elem > 0)
- memcpy(newkeymap, oldkeymap,
- elem * sizeof(*newkeymap));
+ IR_dprintk(1, "keyup key 0x%04x\n", ir->last_keycode);
+ input_report_key(ir->input_dev, ir->last_keycode, 0);
+ input_sync(ir->input_dev);
+ ir->keypressed = false;
+}
+
+/**
+ * ir_timer_keyup() - generates a keyup event after a timeout
+ * @cookie: a pointer to struct ir_input_dev passed to setup_timer()
+ *
+ * This routine will generate a keyup event some time after a keydown event
+ * is generated when no further activity has been detected.
+ */
+static void ir_timer_keyup(unsigned long cookie)
+{
+ struct ir_input_dev *ir = (struct ir_input_dev *)cookie;
+ unsigned long flags;
/*
- * Copy the other elements overwriting the element to be removed
- * This operation applies to both resize and non-resize case
+ * ir->keyup_jiffies is used to prevent a race condition if a
+ * hardware interrupt occurs at this point and the keyup timer
+ * event is moved further into the future as a result.
+ *
+ * The timer will then be reactivated and this function called
+ * again in the future. We need to exit gracefully in that case
+ * to allow the input subsystem to do its auto-repeat magic or
+ * a keyup event might follow immediately after the keydown.
*/
- if (elem < newsize)
- memcpy(&newkeymap[elem], &oldkeymap[elem + 1],
- (newsize - elem) * sizeof(*newkeymap));
-
- if (resize) {
- /*
- * As the copy happened to a temporary table, only here
- * it needs to lock while replacing the table pointers
- * to use the new table
- */
- spin_lock_irqsave(&rc_tab->lock, flags);
- rc_tab->size = newsize;
- rc_tab->scan = newkeymap;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
-
- /* Frees the old keytable */
- kfree(oldkeymap);
- } else {
- rc_tab->size = newsize;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
- }
+ spin_lock_irqsave(&ir->keylock, flags);
+ if (time_is_after_eq_jiffies(ir->keyup_jiffies))
+ ir_keyup(ir);
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
/**
- * ir_insert_key() - insert a keycode at the table
- * @rc_tab: keycode table
- * @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * ir_repeat() - notifies the IR core that a key is still pressed
+ * @dev: the struct input_dev descriptor of the device
*
+ * This routine is used by IR decoders when a repeat message which does
+ * not include the necessary bits to reproduce the scancode has been
+ * received.
*/
-static int ir_insert_key(struct ir_scancode_table *rc_tab,
- int scancode, int keycode)
+void ir_repeat(struct input_dev *dev)
{
unsigned long flags;
- int elem = rc_tab->size;
- int newsize = rc_tab->size + 1;
- int resize = ir_is_resize_needed(rc_tab, newsize);
- struct ir_scancode *oldkeymap = rc_tab->scan;
- struct ir_scancode *newkeymap;
-
- if (resize) {
- newkeymap = kzalloc(ir_roundup_tablesize(newsize) *
- sizeof(*newkeymap), GFP_ATOMIC);
- if (!newkeymap)
- return -ENOMEM;
+ struct ir_input_dev *ir = input_get_drvdata(dev);
- memcpy(newkeymap, oldkeymap,
- rc_tab->size * sizeof(*newkeymap));
- } else
- newkeymap = oldkeymap;
+ spin_lock_irqsave(&ir->keylock, flags);
- /* Stores the new code at the table */
- IR_dprintk(1, "#%d: New scan 0x%04x with key 0x%04x\n",
- rc_tab->size, scancode, keycode);
+ if (!ir->keypressed)
+ goto out;
- spin_lock_irqsave(&rc_tab->lock, flags);
- rc_tab->size = newsize;
- if (resize) {
- rc_tab->scan = newkeymap;
- kfree(oldkeymap);
- }
- newkeymap[elem].scancode = scancode;
- newkeymap[elem].keycode = keycode;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
+ ir->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT);
+ mod_timer(&ir->timer_keyup, ir->keyup_jiffies);
- return 0;
+out:
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
+EXPORT_SYMBOL_GPL(ir_repeat);
/**
- * ir_setkeycode() - set a keycode at the evdev scancode ->keycode table
- * @dev: the struct input_dev device descriptor
- * @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * ir_keydown() - generates input event for a key press
+ * @dev: the struct input_dev descriptor of the device
+ * @scancode: the scancode that we're seeking
+ * @toggle: the toggle value (protocol dependent, if the protocol doesn't
+ * support toggle values, this should be set to zero)
*
- * This routine is used to handle evdev EVIOCSKEY ioctl.
- * There's one caveat here: how can we increase the size of the table?
- * If the key is not found, returns -EINVAL, otherwise, returns 0.
+ * This routine is used by the input routines when a key is pressed at the
+ * IR. It gets the keycode for a scancode and reports an input event via
+ * input_report_key().
*/
-static int ir_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
+void ir_keydown(struct input_dev *dev, int scancode, u8 toggle)
{
- int rc = 0;
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
- struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- struct ir_scancode *keymap = rc_tab->scan;
unsigned long flags;
+ struct ir_input_dev *ir = input_get_drvdata(dev);
- /*
- * Handle keycode table deletions
- *
- * If userspace is adding a KEY_UNKNOWN or KEY_RESERVED,
- * deal as a trial to remove an existing scancode attribution
- * if table become too big, reduce it to save space
- */
- if (keycode == KEY_UNKNOWN || keycode == KEY_RESERVED) {
- rc = ir_seek_table(rc_tab, scancode);
- if (rc < 0)
- return 0;
-
- IR_dprintk(1, "#%d: Deleting scan 0x%04x\n", rc, scancode);
- clear_bit(keymap[rc].keycode, dev->keybit);
- ir_delete_key(rc_tab, rc);
-
- return 0;
- }
+ u32 keycode = ir_g_keycode_from_table(dev, scancode);
- /*
- * Handle keycode replacements
- *
- * If the scancode exists, just replace by the new value
- */
- rc = ir_seek_table(rc_tab, scancode);
- if (rc >= 0) {
- IR_dprintk(1, "#%d: Replacing scan 0x%04x with key 0x%04x\n",
- rc, scancode, keycode);
+ spin_lock_irqsave(&ir->keylock, flags);
- clear_bit(keymap[rc].keycode, dev->keybit);
+ /* Repeat event? */
+ if (ir->keypressed &&
+ ir->last_scancode == scancode &&
+ ir->last_toggle == toggle)
+ goto set_timer;
- spin_lock_irqsave(&rc_tab->lock, flags);
- keymap[rc].keycode = keycode;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
+ /* Release old keypress */
+ ir_keyup(ir);
- set_bit(keycode, dev->keybit);
+ ir->last_scancode = scancode;
+ ir->last_toggle = toggle;
+ ir->last_keycode = keycode;
- return 0;
- }
+ if (keycode == KEY_RESERVED)
+ goto out;
- /*
- * Handle new scancode inserts
- *
- * reallocate table if needed and insert a new keycode
- */
+ /* Register a keypress */
+ ir->keypressed = true;
+ IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
+ dev->name, keycode, scancode);
+ input_report_key(dev, ir->last_keycode, 1);
+ input_sync(dev);
- /* Avoid growing the table indefinitely */
- if (rc_tab->size + 1 > IR_TAB_MAX_SIZE)
- return -EINVAL;
-
- rc = ir_insert_key(rc_tab, scancode, keycode);
- if (rc < 0)
- return rc;
- set_bit(keycode, dev->keybit);
-
- return 0;
+set_timer:
+ ir->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT);
+ mod_timer(&ir->timer_keyup, ir->keyup_jiffies);
+out:
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
+EXPORT_SYMBOL_GPL(ir_keydown);
-/**
- * ir_g_keycode_from_table() - gets the keycode that corresponds to a scancode
- * @input_dev: the struct input_dev descriptor of the device
- * @scancode: the scancode that we're seeking
- *
- * This routine is used by the input routines when a key is pressed at the
- * IR. The scancode is received and needs to be converted into a keycode.
- * If the key is not found, it returns KEY_UNKNOWN. Otherwise, returns the
- * corresponding keycode from the table.
- */
-u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
+static int ir_open(struct input_dev *input_dev)
{
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
- struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- struct ir_scancode *keymap = rc_tab->scan;
- int elem;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- elem = ir_seek_table(rc_tab, scancode);
- if (elem >= 0) {
- IR_dprintk(1, "%s: scancode 0x%04x keycode 0x%02x\n",
- dev->name, scancode, keymap[elem].keycode);
-
- return rc_tab->scan[elem].keycode;
- }
+ return ir_dev->props->open(ir_dev->props->priv);
+}
- printk(KERN_INFO "%s: unknown key for scancode 0x%04x\n",
- dev->name, scancode);
+static void ir_close(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- /* Reports userspace that an unknown keycode were got */
- return KEY_RESERVED;
+ ir_dev->props->close(ir_dev->props->priv);
}
-EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
/**
- * ir_input_register() - sets the IR keycode table and add the handlers
+ * __ir_input_register() - sets the IR keycode table and add the handlers
* for keymap table get/set
* @input_dev: the struct input_dev descriptor of the device
* @rc_tab: the struct ir_scancode_table table of scancode/keymap
@@ -402,13 +426,13 @@ EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
* It will register the input/evdev interface for the device and
* register the syfs code for IR class
*/
-int ir_input_register(struct input_dev *input_dev,
+int __ir_input_register(struct input_dev *input_dev,
const struct ir_scancode_table *rc_tab,
- const struct ir_dev_props *props)
+ const struct ir_dev_props *props,
+ const char *driver_name)
{
struct ir_input_dev *ir_dev;
- struct ir_scancode *keymap = rc_tab->scan;
- int i, rc;
+ int rc;
if (rc_tab->scan == NULL || !rc_tab->size)
return -EINVAL;
@@ -417,57 +441,77 @@ int ir_input_register(struct input_dev *input_dev,
if (!ir_dev)
return -ENOMEM;
- spin_lock_init(&ir_dev->rc_tab.lock);
-
- ir_dev->rc_tab.size = ir_roundup_tablesize(rc_tab->size);
- ir_dev->rc_tab.scan = kzalloc(ir_dev->rc_tab.size *
- sizeof(struct ir_scancode), GFP_KERNEL);
- if (!ir_dev->rc_tab.scan) {
- kfree(ir_dev);
- return -ENOMEM;
+ ir_dev->driver_name = kasprintf(GFP_KERNEL, "%s", driver_name);
+ if (!ir_dev->driver_name) {
+ rc = -ENOMEM;
+ goto out_dev;
}
- IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n",
- ir_dev->rc_tab.size,
- ir_dev->rc_tab.size * sizeof(ir_dev->rc_tab.scan));
+ input_dev->getkeycode = ir_getkeycode;
+ input_dev->setkeycode = ir_setkeycode;
+ input_set_drvdata(input_dev, ir_dev);
+ ir_dev->input_dev = input_dev;
- ir_copy_table(&ir_dev->rc_tab, rc_tab);
- ir_dev->props = props;
+ spin_lock_init(&ir_dev->rc_tab.lock);
+ spin_lock_init(&ir_dev->keylock);
+ setup_timer(&ir_dev->timer_keyup, ir_timer_keyup, (unsigned long)ir_dev);
+
+ ir_dev->rc_tab.name = rc_tab->name;
+ ir_dev->rc_tab.ir_type = rc_tab->ir_type;
+ ir_dev->rc_tab.alloc = roundup_pow_of_two(rc_tab->size *
+ sizeof(struct ir_scancode));
+ ir_dev->rc_tab.scan = kmalloc(ir_dev->rc_tab.alloc, GFP_KERNEL);
+ ir_dev->rc_tab.size = ir_dev->rc_tab.alloc / sizeof(struct ir_scancode);
+ if (props) {
+ ir_dev->props = props;
+ if (props->open)
+ input_dev->open = ir_open;
+ if (props->close)
+ input_dev->close = ir_close;
+ }
- /* set the bits for the keys */
- IR_dprintk(1, "key map size: %d\n", rc_tab->size);
- for (i = 0; i < rc_tab->size; i++) {
- IR_dprintk(1, "#%d: setting bit for keycode 0x%04x\n",
- i, keymap[i].keycode);
- set_bit(keymap[i].keycode, input_dev->keybit);
+ if (!ir_dev->rc_tab.scan) {
+ rc = -ENOMEM;
+ goto out_name;
}
- clear_bit(0, input_dev->keybit);
+
+ IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
+ ir_dev->rc_tab.size, ir_dev->rc_tab.alloc);
set_bit(EV_KEY, input_dev->evbit);
+ set_bit(EV_REP, input_dev->evbit);
- input_dev->getkeycode = ir_getkeycode;
- input_dev->setkeycode = ir_setkeycode;
- input_set_drvdata(input_dev, ir_dev);
+ if (ir_setkeytable(input_dev, &ir_dev->rc_tab, rc_tab)) {
+ rc = -ENOMEM;
+ goto out_table;
+ }
- rc = input_register_device(input_dev);
+ rc = ir_register_class(input_dev);
if (rc < 0)
- goto err;
+ goto out_table;
- rc = ir_register_class(input_dev);
- if (rc < 0) {
- input_unregister_device(input_dev);
- goto err;
+ if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW) {
+ rc = ir_raw_event_register(input_dev);
+ if (rc < 0)
+ goto out_event;
}
+ IR_dprintk(1, "Registered input device on %s for %s remote.\n",
+ driver_name, rc_tab->name);
+
return 0;
-err:
- kfree(rc_tab->scan);
+out_event:
+ ir_unregister_class(input_dev);
+out_table:
+ kfree(ir_dev->rc_tab.scan);
+out_name:
+ kfree(ir_dev->driver_name);
+out_dev:
kfree(ir_dev);
- input_set_drvdata(input_dev, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(ir_input_register);
+EXPORT_SYMBOL_GPL(__ir_input_register);
/**
* ir_input_unregister() - unregisters IR and frees resources
@@ -475,9 +519,9 @@ EXPORT_SYMBOL_GPL(ir_input_register);
* This routine is used to free memory and de-register interfaces.
*/
-void ir_input_unregister(struct input_dev *dev)
+void ir_input_unregister(struct input_dev *input_dev)
{
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
struct ir_scancode_table *rc_tab;
if (!ir_dev)
@@ -485,15 +529,18 @@ void ir_input_unregister(struct input_dev *dev)
IR_dprintk(1, "Freed keycode table\n");
+ del_timer_sync(&ir_dev->timer_keyup);
+ if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW)
+ ir_raw_event_unregister(input_dev);
rc_tab = &ir_dev->rc_tab;
rc_tab->size = 0;
kfree(rc_tab->scan);
rc_tab->scan = NULL;
- ir_unregister_class(dev);
+ ir_unregister_class(input_dev);
+ kfree(ir_dev->driver_name);
kfree(ir_dev);
- input_unregister_device(dev);
}
EXPORT_SYMBOL_GPL(ir_input_unregister);
diff --git a/drivers/media/IR/ir-nec-decoder.c b/drivers/media/IR/ir-nec-decoder.c
new file mode 100644
index 000000000000..ba79233112ef
--- /dev/null
+++ b/drivers/media/IR/ir-nec-decoder.c
@@ -0,0 +1,328 @@
+/* ir-nec-decoder.c - handle NEC IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define NEC_NBITS 32
+#define NEC_UNIT 562500 /* ns */
+#define NEC_HEADER_PULSE (16 * NEC_UNIT)
+#define NECX_HEADER_PULSE (8 * NEC_UNIT) /* Less common NEC variant */
+#define NEC_HEADER_SPACE (8 * NEC_UNIT)
+#define NEC_REPEAT_SPACE (8 * NEC_UNIT)
+#define NEC_BIT_PULSE (1 * NEC_UNIT)
+#define NEC_BIT_0_SPACE (1 * NEC_UNIT)
+#define NEC_BIT_1_SPACE (3 * NEC_UNIT)
+#define NEC_TRAILER_PULSE (1 * NEC_UNIT)
+#define NEC_TRAILER_SPACE (10 * NEC_UNIT) /* even longer in reality */
+
+/* Used to register nec_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum nec_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_TRAILER_PULSE,
+ STATE_TRAILER_SPACE,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum nec_state state;
+ u32 nec_bits;
+ unsigned count;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "nec_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_nec_decode() - Decode one NEC pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @duration: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_nec_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 address, not_address, command, not_command;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(2, "NEC decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_HEADER_PULSE, NEC_UNIT / 2) &&
+ !eq_margin(ev.duration, NECX_HEADER_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (eq_margin(ev.duration, NEC_HEADER_SPACE, NEC_UNIT / 2)) {
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
+ ir_repeat(input_dev);
+ IR_dprintk(1, "Repeat last key\n");
+ data->state = STATE_TRAILER_PULSE;
+ return 0;
+ }
+
+ break;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_BIT_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ data->nec_bits <<= 1;
+ if (eq_margin(ev.duration, NEC_BIT_1_SPACE, NEC_UNIT / 2))
+ data->nec_bits |= 1;
+ else if (!eq_margin(ev.duration, NEC_BIT_0_SPACE, NEC_UNIT / 2))
+ break;
+ data->count++;
+
+ if (data->count == NEC_NBITS)
+ data->state = STATE_TRAILER_PULSE;
+ else
+ data->state = STATE_BIT_PULSE;
+
+ return 0;
+
+ case STATE_TRAILER_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_TRAILER_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->state = STATE_TRAILER_SPACE;
+ return 0;
+
+ case STATE_TRAILER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
+ break;
+
+ address = bitrev8((data->nec_bits >> 24) & 0xff);
+ not_address = bitrev8((data->nec_bits >> 16) & 0xff);
+ command = bitrev8((data->nec_bits >> 8) & 0xff);
+ not_command = bitrev8((data->nec_bits >> 0) & 0xff);
+
+ if ((command ^ not_command) != 0xff) {
+ IR_dprintk(1, "NEC checksum error: received 0x%08x\n",
+ data->nec_bits);
+ break;
+ }
+
+ if ((address ^ not_address) != 0xff) {
+ /* Extended NEC */
+ scancode = address << 16 |
+ not_address << 8 |
+ command;
+ IR_dprintk(1, "NEC (Ext) scancode 0x%06x\n", scancode);
+ } else {
+ /* Normal NEC */
+ scancode = address << 8 | command;
+ IR_dprintk(1, "NEC scancode 0x%04x\n", scancode);
+ }
+
+ ir_keydown(input_dev, scancode, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(1, "NEC decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_nec_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_nec_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler nec_handler = {
+ .decode = ir_nec_decode,
+ .raw_register = ir_nec_register,
+ .raw_unregister = ir_nec_unregister,
+};
+
+static int __init ir_nec_decode_init(void)
+{
+ ir_raw_handler_register(&nec_handler);
+
+ printk(KERN_INFO "IR NEC protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_nec_decode_exit(void)
+{
+ ir_raw_handler_unregister(&nec_handler);
+}
+
+module_init(ir_nec_decode_init);
+module_exit(ir_nec_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("NEC IR protocol decoder");
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
new file mode 100644
index 000000000000..ea68a3f2effa
--- /dev/null
+++ b/drivers/media/IR/ir-raw-event.c
@@ -0,0 +1,251 @@
+/* ir-raw-event.c - handle IR Pulse/Space event
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include "ir-core-priv.h"
+
+/* Define the max number of pulse/space transitions to buffer */
+#define MAX_IR_EVENT_SIZE 512
+
+/* Used to handle IR raw handler extensions */
+static LIST_HEAD(ir_raw_handler_list);
+static DEFINE_SPINLOCK(ir_raw_handler_lock);
+
+/**
+ * RUN_DECODER() - runs an operation on all IR decoders
+ * @ops: IR raw handler operation to be called
+ * @arg: arguments to be passed to the callback
+ *
+ * Calls ir_raw_handler::ops for all registered IR handlers. It prevents
+ * new decode addition/removal while running, by locking ir_raw_handler_lock
+ * mutex. If an error occurs, it stops the ops. Otherwise, it returns a sum
+ * of the return codes.
+ */
+#define RUN_DECODER(ops, ...) ({ \
+ struct ir_raw_handler *_ir_raw_handler; \
+ int _sumrc = 0, _rc; \
+ spin_lock(&ir_raw_handler_lock); \
+ list_for_each_entry(_ir_raw_handler, &ir_raw_handler_list, list) { \
+ if (_ir_raw_handler->ops) { \
+ _rc = _ir_raw_handler->ops(__VA_ARGS__); \
+ if (_rc < 0) \
+ break; \
+ _sumrc += _rc; \
+ } \
+ } \
+ spin_unlock(&ir_raw_handler_lock); \
+ _sumrc; \
+})
+
+#ifdef MODULE
+/* Used to load the decoders */
+static struct work_struct wq_load;
+#endif
+
+static void ir_raw_event_work(struct work_struct *work)
+{
+ struct ir_raw_event ev;
+ struct ir_raw_event_ctrl *raw =
+ container_of(work, struct ir_raw_event_ctrl, rx_work);
+
+ while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev))
+ RUN_DECODER(decode, raw->input_dev, ev);
+}
+
+int ir_raw_event_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ int rc;
+
+ ir->raw = kzalloc(sizeof(*ir->raw), GFP_KERNEL);
+ if (!ir->raw)
+ return -ENOMEM;
+
+ ir->raw->input_dev = input_dev;
+ INIT_WORK(&ir->raw->rx_work, ir_raw_event_work);
+
+ rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE,
+ GFP_KERNEL);
+ if (rc < 0) {
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return rc;
+ }
+
+ rc = RUN_DECODER(raw_register, input_dev);
+ if (rc < 0) {
+ kfifo_free(&ir->raw->kfifo);
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return rc;
+ }
+
+ return rc;
+}
+
+void ir_raw_event_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return;
+
+ cancel_work_sync(&ir->raw->rx_work);
+ RUN_DECODER(raw_unregister, input_dev);
+
+ kfifo_free(&ir->raw->kfifo);
+ kfree(ir->raw);
+ ir->raw = NULL;
+}
+
+/**
+ * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
+ * @input_dev: the struct input_dev device descriptor
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This routine (which may be called from an interrupt context) stores a
+ * pulse/space duration for the raw ir decoding state machines. Pulses are
+ * signalled as positive values and spaces as negative values. A zero value
+ * will reset the decoding state machines.
+ */
+int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return -EINVAL;
+
+ if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_store);
+
+/**
+ * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
+ * @input_dev: the struct input_dev device descriptor
+ * @type: the type of the event that has occurred
+ *
+ * This routine (which may be called from an interrupt context) is used to
+ * store the beginning of an ir pulse or space (or the start/end of ir
+ * reception) for the raw ir decoding state machines. This is used by
+ * hardware which does not provide durations directly but only interrupts
+ * (or similar events) on state change.
+ */
+int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type type)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ ktime_t now;
+ s64 delta; /* ns */
+ struct ir_raw_event ev;
+ int rc = 0;
+
+ if (!ir->raw)
+ return -EINVAL;
+
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event));
+
+ /* Check for a long duration since last event or if we're
+ * being called for the first time, note that delta can't
+ * possibly be negative.
+ */
+ ev.duration = 0;
+ if (delta > IR_MAX_DURATION || !ir->raw->last_type)
+ type |= IR_START_EVENT;
+ else
+ ev.duration = delta;
+
+ if (type & IR_START_EVENT)
+ ir_raw_event_reset(input_dev);
+ else if (ir->raw->last_type & IR_SPACE) {
+ ev.pulse = false;
+ rc = ir_raw_event_store(input_dev, &ev);
+ } else if (ir->raw->last_type & IR_PULSE) {
+ ev.pulse = true;
+ rc = ir_raw_event_store(input_dev, &ev);
+ } else
+ return 0;
+
+ ir->raw->last_event = now;
+ ir->raw->last_type = type;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
+
+/**
+ * ir_raw_event_handle() - schedules the decoding of stored ir data
+ * @input_dev: the struct input_dev device descriptor
+ *
+ * This routine will signal the workqueue to start decoding stored ir data.
+ */
+void ir_raw_event_handle(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return;
+
+ schedule_work(&ir->raw->rx_work);
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_handle);
+
+/*
+ * Extension interface - used to register the IR decoders
+ */
+
+int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
+{
+ spin_lock(&ir_raw_handler_lock);
+ list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
+ spin_unlock(&ir_raw_handler_lock);
+ return 0;
+}
+EXPORT_SYMBOL(ir_raw_handler_register);
+
+void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
+{
+ spin_lock(&ir_raw_handler_lock);
+ list_del(&ir_raw_handler->list);
+ spin_unlock(&ir_raw_handler_lock);
+}
+EXPORT_SYMBOL(ir_raw_handler_unregister);
+
+#ifdef MODULE
+static void init_decoders(struct work_struct *work)
+{
+ /* Load the decoder modules */
+
+ load_nec_decode();
+ load_rc5_decode();
+ load_rc6_decode();
+ load_jvc_decode();
+ load_sony_decode();
+
+ /* If needed, we may later add some init code. In this case,
+ it is needed to change the CONFIG_MODULE test at ir-core.h
+ */
+}
+#endif
+
+void ir_raw_init(void)
+{
+#ifdef MODULE
+ INIT_WORK(&wq_load, init_decoders);
+ schedule_work(&wq_load);
+#endif
+}
diff --git a/drivers/media/IR/ir-rc5-decoder.c b/drivers/media/IR/ir-rc5-decoder.c
new file mode 100644
index 000000000000..23cdb1b1a3bc
--- /dev/null
+++ b/drivers/media/IR/ir-rc5-decoder.c
@@ -0,0 +1,324 @@
+/* ir-rc5-decoder.c - handle RC5(x) IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This code handles 14 bits RC5 protocols and 20 bits RC5x protocols.
+ * There are other variants that use a different number of bits.
+ * This is currently unsupported.
+ * It considers a carrier of 36 kHz, with a total of 14/20 bits, where
+ * the first two bits are start bits, and a third one is a filing bit
+ */
+
+#include "ir-core-priv.h"
+
+#define RC5_NBITS 14
+#define RC5X_NBITS 20
+#define CHECK_RC5X_NBITS 8
+#define RC5_UNIT 888888 /* ns */
+#define RC5_BIT_START (1 * RC5_UNIT)
+#define RC5_BIT_END (1 * RC5_UNIT)
+#define RC5X_SPACE (4 * RC5_UNIT)
+
+/* Used to register rc5_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum rc5_state {
+ STATE_INACTIVE,
+ STATE_BIT_START,
+ STATE_BIT_END,
+ STATE_CHECK_RC5X,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum rc5_state state;
+ u32 rc5_bits;
+ struct ir_raw_event prev_ev;
+ unsigned count;
+ unsigned wanted_bits;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "rc5_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_rc5_decode() - Decode one RC-5 pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rc5_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u8 toggle;
+ u32 scancode;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "RC5(x) decode started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ data->state = STATE_BIT_START;
+ data->count = 1;
+ /* We just need enough bits to get to STATE_CHECK_RC5X */
+ data->wanted_bits = RC5X_NBITS;
+ decrease_duration(&ev, RC5_BIT_START);
+ goto again;
+
+ case STATE_BIT_START:
+ if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2))
+ break;
+
+ data->rc5_bits <<= 1;
+ if (!ev.pulse)
+ data->rc5_bits |= 1;
+ data->count++;
+ data->prev_ev = ev;
+ data->state = STATE_BIT_END;
+ return 0;
+
+ case STATE_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else if (data->count == CHECK_RC5X_NBITS)
+ data->state = STATE_CHECK_RC5X;
+ else
+ data->state = STATE_BIT_START;
+
+ decrease_duration(&ev, RC5_BIT_END);
+ goto again;
+
+ case STATE_CHECK_RC5X:
+ if (!ev.pulse && geq_margin(ev.duration, RC5X_SPACE, RC5_UNIT / 2)) {
+ /* RC5X */
+ data->wanted_bits = RC5X_NBITS;
+ decrease_duration(&ev, RC5X_SPACE);
+ } else {
+ /* RC5 */
+ data->wanted_bits = RC5_NBITS;
+ }
+ data->state = STATE_BIT_START;
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ if (data->wanted_bits == RC5X_NBITS) {
+ /* RC5X */
+ u8 xdata, command, system;
+ xdata = (data->rc5_bits & 0x0003F) >> 0;
+ command = (data->rc5_bits & 0x00FC0) >> 6;
+ system = (data->rc5_bits & 0x1F000) >> 12;
+ toggle = (data->rc5_bits & 0x20000) ? 1 : 0;
+ command += (data->rc5_bits & 0x01000) ? 0 : 0x40;
+ scancode = system << 16 | command << 8 | xdata;
+
+ IR_dprintk(1, "RC5X scancode 0x%06x (toggle: %u)\n",
+ scancode, toggle);
+
+ } else {
+ /* RC5 */
+ u8 command, system;
+ command = (data->rc5_bits & 0x0003F) >> 0;
+ system = (data->rc5_bits & 0x007C0) >> 6;
+ toggle = (data->rc5_bits & 0x00800) ? 1 : 0;
+ command += (data->rc5_bits & 0x01000) ? 0 : 0x40;
+ scancode = system << 8 | command;
+
+ IR_dprintk(1, "RC5 scancode 0x%04x (toggle: %u)\n",
+ scancode, toggle);
+ }
+
+ ir_keydown(input_dev, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "RC5(x) decode failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_rc5_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_rc5_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler rc5_handler = {
+ .decode = ir_rc5_decode,
+ .raw_register = ir_rc5_register,
+ .raw_unregister = ir_rc5_unregister,
+};
+
+static int __init ir_rc5_decode_init(void)
+{
+ ir_raw_handler_register(&rc5_handler);
+
+ printk(KERN_INFO "IR RC5(x) protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rc5_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rc5_handler);
+}
+
+module_init(ir_rc5_decode_init);
+module_exit(ir_rc5_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("RC5(x) IR protocol decoder");
diff --git a/drivers/media/IR/ir-rc6-decoder.c b/drivers/media/IR/ir-rc6-decoder.c
new file mode 100644
index 000000000000..2bf479f4f1bc
--- /dev/null
+++ b/drivers/media/IR/ir-rc6-decoder.c
@@ -0,0 +1,419 @@
+/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ir-core-priv.h"
+
+/*
+ * This decoder currently supports:
+ * RC6-0-16 (standard toggle bit in header)
+ * RC6-6A-24 (no toggle bit)
+ * RC6-6A-32 (MCE version with toggle bit in body)
+ */
+
+#define RC6_UNIT 444444 /* us */
+#define RC6_HEADER_NBITS 4 /* not including toggle bit */
+#define RC6_0_NBITS 16
+#define RC6_6A_SMALL_NBITS 24
+#define RC6_6A_LARGE_NBITS 32
+#define RC6_PREFIX_PULSE (6 * RC6_UNIT)
+#define RC6_PREFIX_SPACE (2 * RC6_UNIT)
+#define RC6_BIT_START (1 * RC6_UNIT)
+#define RC6_BIT_END (1 * RC6_UNIT)
+#define RC6_TOGGLE_START (2 * RC6_UNIT)
+#define RC6_TOGGLE_END (2 * RC6_UNIT)
+#define RC6_MODE_MASK 0x07 /* for the header bits */
+#define RC6_STARTBIT_MASK 0x08 /* for the header bits */
+#define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
+
+/* Used to register rc6_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum rc6_mode {
+ RC6_MODE_0,
+ RC6_MODE_6A,
+ RC6_MODE_UNKNOWN,
+};
+
+enum rc6_state {
+ STATE_INACTIVE,
+ STATE_PREFIX_SPACE,
+ STATE_HEADER_BIT_START,
+ STATE_HEADER_BIT_END,
+ STATE_TOGGLE_START,
+ STATE_TOGGLE_END,
+ STATE_BODY_BIT_START,
+ STATE_BODY_BIT_END,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum rc6_state state;
+ u8 header;
+ u32 body;
+ struct ir_raw_event prev_ev;
+ bool toggle;
+ unsigned count;
+ unsigned wanted_bits;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "rc6_decoder",
+ .attrs = decoder_attributes,
+};
+
+static enum rc6_mode rc6_mode(struct decoder_data *data) {
+ switch (data->header & RC6_MODE_MASK) {
+ case 0:
+ return RC6_MODE_0;
+ case 6:
+ if (!data->toggle)
+ return RC6_MODE_6A;
+ /* fall through */
+ default:
+ return RC6_MODE_UNKNOWN;
+ }
+}
+
+/**
+ * ir_rc6_decode() - Decode one RC6 pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rc6_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 toggle;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ /* Note: larger margin on first pulse since each RC6_UNIT
+ is quite short and some hardware takes some time to
+ adjust to the signal */
+ if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT))
+ break;
+
+ data->state = STATE_PREFIX_SPACE;
+ data->count = 0;
+ return 0;
+
+ case STATE_PREFIX_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2))
+ break;
+
+ data->state = STATE_HEADER_BIT_START;
+ return 0;
+
+ case STATE_HEADER_BIT_START:
+ if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
+ break;
+
+ data->header <<= 1;
+ if (ev.pulse)
+ data->header |= 1;
+ data->count++;
+ data->prev_ev = ev;
+ data->state = STATE_HEADER_BIT_END;
+ return 0;
+
+ case STATE_HEADER_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == RC6_HEADER_NBITS)
+ data->state = STATE_TOGGLE_START;
+ else
+ data->state = STATE_HEADER_BIT_START;
+
+ decrease_duration(&ev, RC6_BIT_END);
+ goto again;
+
+ case STATE_TOGGLE_START:
+ if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2))
+ break;
+
+ data->toggle = ev.pulse;
+ data->prev_ev = ev;
+ data->state = STATE_TOGGLE_END;
+ return 0;
+
+ case STATE_TOGGLE_END:
+ if (!is_transition(&ev, &data->prev_ev) ||
+ !geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2))
+ break;
+
+ if (!(data->header & RC6_STARTBIT_MASK)) {
+ IR_dprintk(1, "RC6 invalid start bit\n");
+ break;
+ }
+
+ data->state = STATE_BODY_BIT_START;
+ data->prev_ev = ev;
+ decrease_duration(&ev, RC6_TOGGLE_END);
+ data->count = 0;
+
+ switch (rc6_mode(data)) {
+ case RC6_MODE_0:
+ data->wanted_bits = RC6_0_NBITS;
+ break;
+ case RC6_MODE_6A:
+ /* This might look weird, but we basically
+ check the value of the first body bit to
+ determine the number of bits in mode 6A */
+ if ((!ev.pulse && !geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) ||
+ geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ data->wanted_bits = RC6_6A_LARGE_NBITS;
+ else
+ data->wanted_bits = RC6_6A_SMALL_NBITS;
+ break;
+ default:
+ IR_dprintk(1, "RC6 unknown mode\n");
+ goto out;
+ }
+ goto again;
+
+ case STATE_BODY_BIT_START:
+ if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
+ break;
+
+ data->body <<= 1;
+ if (ev.pulse)
+ data->body |= 1;
+ data->count++;
+ data->prev_ev = ev;
+
+ data->state = STATE_BODY_BIT_END;
+ return 0;
+
+ case STATE_BODY_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else
+ data->state = STATE_BODY_BIT_START;
+
+ decrease_duration(&ev, RC6_BIT_END);
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ switch (rc6_mode(data)) {
+ case RC6_MODE_0:
+ scancode = data->body & 0xffff;
+ toggle = data->toggle;
+ IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n",
+ scancode, toggle);
+ break;
+ case RC6_MODE_6A:
+ if (data->wanted_bits == RC6_6A_LARGE_NBITS) {
+ toggle = data->body & RC6_6A_MCE_TOGGLE_MASK ? 1 : 0;
+ scancode = data->body & ~RC6_6A_MCE_TOGGLE_MASK;
+ } else {
+ toggle = 0;
+ scancode = data->body & 0xffffff;
+ }
+
+ IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n",
+ scancode, toggle);
+ break;
+ default:
+ IR_dprintk(1, "RC6 unknown mode\n");
+ goto out;
+ }
+
+ ir_keydown(input_dev, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_rc6_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_rc6_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler rc6_handler = {
+ .decode = ir_rc6_decode,
+ .raw_register = ir_rc6_register,
+ .raw_unregister = ir_rc6_unregister,
+};
+
+static int __init ir_rc6_decode_init(void)
+{
+ ir_raw_handler_register(&rc6_handler);
+
+ printk(KERN_INFO "IR RC6 protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rc6_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rc6_handler);
+}
+
+module_init(ir_rc6_decode_init);
+module_exit(ir_rc6_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("RC6 IR protocol decoder");
diff --git a/drivers/media/IR/ir-sony-decoder.c b/drivers/media/IR/ir-sony-decoder.c
new file mode 100644
index 000000000000..9f440c5c060d
--- /dev/null
+++ b/drivers/media/IR/ir-sony-decoder.c
@@ -0,0 +1,312 @@
+/* ir-sony-decoder.c - handle Sony IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define SONY_UNIT 600000 /* ns */
+#define SONY_HEADER_PULSE (4 * SONY_UNIT)
+#define SONY_HEADER_SPACE (1 * SONY_UNIT)
+#define SONY_BIT_0_PULSE (1 * SONY_UNIT)
+#define SONY_BIT_1_PULSE (2 * SONY_UNIT)
+#define SONY_BIT_SPACE (1 * SONY_UNIT)
+#define SONY_TRAILER_SPACE (10 * SONY_UNIT) /* minimum */
+
+/* Used to register sony_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum sony_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum sony_state state;
+ u32 sony_bits;
+ unsigned count;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "sony_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_sony_decode() - Decode one Sony pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_sony_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 device, subdevice, function;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2))
+ goto out;
+
+ IR_dprintk(2, "Sony decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SONY_HEADER_PULSE, SONY_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SONY_HEADER_SPACE, SONY_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ data->sony_bits <<= 1;
+ if (eq_margin(ev.duration, SONY_BIT_1_PULSE, SONY_UNIT / 2))
+ data->sony_bits |= 1;
+ else if (!eq_margin(ev.duration, SONY_BIT_0_PULSE, SONY_UNIT / 2))
+ break;
+
+ data->count++;
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, SONY_BIT_SPACE, SONY_UNIT / 2))
+ break;
+
+ decrease_duration(&ev, SONY_BIT_SPACE);
+
+ if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2)) {
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ }
+
+ data->state = STATE_FINISHED;
+ /* Fall through */
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, SONY_TRAILER_SPACE, SONY_UNIT / 2))
+ break;
+
+ switch (data->count) {
+ case 12:
+ device = bitrev8((data->sony_bits << 3) & 0xF8);
+ subdevice = 0;
+ function = bitrev8((data->sony_bits >> 4) & 0xFE);
+ break;
+ case 15:
+ device = bitrev8((data->sony_bits >> 0) & 0xFF);
+ subdevice = 0;
+ function = bitrev8((data->sony_bits >> 7) & 0xFD);
+ break;
+ case 20:
+ device = bitrev8((data->sony_bits >> 5) & 0xF8);
+ subdevice = bitrev8((data->sony_bits >> 0) & 0xFF);
+ function = bitrev8((data->sony_bits >> 12) & 0xFE);
+ break;
+ default:
+ IR_dprintk(1, "Sony invalid bitcount %u\n", data->count);
+ goto out;
+ }
+
+ scancode = device << 16 | subdevice << 8 | function;
+ IR_dprintk(1, "Sony(%u) scancode 0x%05x\n", data->count, scancode);
+ ir_keydown(input_dev, scancode, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "Sony decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_sony_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_sony_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler sony_handler = {
+ .decode = ir_sony_decode,
+ .raw_register = ir_sony_register,
+ .raw_unregister = ir_sony_unregister,
+};
+
+static int __init ir_sony_decode_init(void)
+{
+ ir_raw_handler_register(&sony_handler);
+
+ printk(KERN_INFO "IR Sony protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_sony_decode_exit(void)
+{
+ ir_raw_handler_unregister(&sony_handler);
+}
+
+module_init(ir_sony_decode_init);
+module_exit(ir_sony_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("Sony IR protocol decoder");
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index e14e6c486b52..d7da63e16c92 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -1,6 +1,6 @@
-/* ir-register.c - handle IR scancode->keycode tables
+/* ir-sysfs.c - sysfs interface for RC devices (/sys/class/rc)
*
- * Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2009-2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,15 +15,23 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/device.h>
-#include <media/ir-core.h>
+#include "ir-core-priv.h"
#define IRRCV_NUM_DEVICES 256
/* bit array to represent IR sysfs device number */
static unsigned long ir_core_dev_number;
-/* class for /sys/class/irrcv */
-static struct class *ir_input_class;
+/* class for /sys/class/rc */
+static char *ir_devnode(struct device *dev, mode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev));
+}
+
+static struct class ir_input_class = {
+ .name = "rc",
+ .devnode = ir_devnode,
+};
/**
* show_protocol() - shows the current IR protocol
@@ -32,7 +40,7 @@ static struct class *ir_input_class;
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type.
- * it is trigged by reading /sys/class/irrcv/irrcv?/current_protocol.
+ * it is trigged by reading /sys/class/rc/rc?/current_protocol.
* It returns the protocol name, as understood by the driver.
*/
static ssize_t show_protocol(struct device *d,
@@ -48,13 +56,17 @@ static ssize_t show_protocol(struct device *d,
if (ir_type == IR_TYPE_UNKNOWN)
s = "Unknown";
else if (ir_type == IR_TYPE_RC5)
- s = "RC-5";
- else if (ir_type == IR_TYPE_PD)
- s = "Pulse/distance";
+ s = "rc-5";
else if (ir_type == IR_TYPE_NEC)
- s = "NEC";
+ s = "nec";
+ else if (ir_type == IR_TYPE_RC6)
+ s = "rc6";
+ else if (ir_type == IR_TYPE_JVC)
+ s = "jvc";
+ else if (ir_type == IR_TYPE_SONY)
+ s = "sony";
else
- s = "Other";
+ s = "other";
return sprintf(buf, "%s\n", s);
}
@@ -67,7 +79,7 @@ static ssize_t show_protocol(struct device *d,
* @len: length of the input buffer
*
* This routine is a callback routine for changing the IR protocol type.
- * it is trigged by reading /sys/class/irrcv/irrcv?/current_protocol.
+ * it is trigged by reading /sys/class/rc/rc?/current_protocol.
* It changes the IR the protocol name, if the IR type is recognized
* by the driver.
* If an unknown protocol name is used, returns -EINVAL.
@@ -78,23 +90,24 @@ static ssize_t store_protocol(struct device *d,
size_t len)
{
struct ir_input_dev *ir_dev = dev_get_drvdata(d);
- u64 ir_type = IR_TYPE_UNKNOWN;
+ u64 ir_type = 0;
int rc = -EINVAL;
unsigned long flags;
char *buf;
- buf = strsep((char **) &data, "\n");
-
- if (!strcasecmp(buf, "rc-5"))
- ir_type = IR_TYPE_RC5;
- else if (!strcasecmp(buf, "pd"))
- ir_type = IR_TYPE_PD;
- else if (!strcasecmp(buf, "nec"))
- ir_type = IR_TYPE_NEC;
+ while ((buf = strsep((char **) &data, " \n")) != NULL) {
+ if (!strcasecmp(buf, "rc-5") || !strcasecmp(buf, "rc5"))
+ ir_type |= IR_TYPE_RC5;
+ if (!strcasecmp(buf, "nec"))
+ ir_type |= IR_TYPE_NEC;
+ if (!strcasecmp(buf, "jvc"))
+ ir_type |= IR_TYPE_JVC;
+ if (!strcasecmp(buf, "sony"))
+ ir_type |= IR_TYPE_SONY;
+ }
- if (ir_type == IR_TYPE_UNKNOWN) {
- IR_dprintk(1, "Error setting protocol to %lld\n",
- (long long)ir_type);
+ if (!ir_type) {
+ IR_dprintk(1, "Unknown protocol\n");
return -EINVAL;
}
@@ -112,25 +125,87 @@ static ssize_t store_protocol(struct device *d,
ir_dev->rc_tab.ir_type = ir_type;
spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags);
- IR_dprintk(1, "Current protocol is %lld\n",
+ IR_dprintk(1, "Current protocol(s) is(are) %lld\n",
(long long)ir_type);
return len;
}
+static ssize_t show_supported_protocols(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ char *orgbuf = buf;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+
+ /* FIXME: doesn't support multiple protocols at the same time */
+ if (ir_dev->props->allowed_protos == IR_TYPE_UNKNOWN)
+ buf += sprintf(buf, "unknown ");
+ if (ir_dev->props->allowed_protos & IR_TYPE_RC5)
+ buf += sprintf(buf, "rc-5 ");
+ if (ir_dev->props->allowed_protos & IR_TYPE_NEC)
+ buf += sprintf(buf, "nec ");
+ if (buf == orgbuf)
+ buf += sprintf(buf, "other ");
+
+ buf += sprintf(buf - 1, "\n");
+
+ return buf - orgbuf;
+}
+
+#define ADD_HOTPLUG_VAR(fmt, val...) \
+ do { \
+ int err = add_uevent_var(env, fmt, val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+static int ir_dev_uevent(struct device *device, struct kobj_uevent_env *env)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(device);
+
+ if (ir_dev->rc_tab.name)
+ ADD_HOTPLUG_VAR("NAME=%s", ir_dev->rc_tab.name);
+ if (ir_dev->driver_name)
+ ADD_HOTPLUG_VAR("DRV_NAME=%s", ir_dev->driver_name);
+
+ return 0;
+}
+
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
-static DEVICE_ATTR(current_protocol, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(protocol, S_IRUGO | S_IWUSR,
show_protocol, store_protocol);
-static struct attribute *ir_dev_attrs[] = {
- &dev_attr_current_protocol.attr,
+static DEVICE_ATTR(supported_protocols, S_IRUGO | S_IWUSR,
+ show_supported_protocols, NULL);
+
+static struct attribute *ir_hw_dev_attrs[] = {
+ &dev_attr_protocol.attr,
+ &dev_attr_supported_protocols.attr,
NULL,
};
+static struct attribute_group ir_hw_dev_attr_grp = {
+ .attrs = ir_hw_dev_attrs,
+};
+
+static const struct attribute_group *ir_hw_dev_attr_groups[] = {
+ &ir_hw_dev_attr_grp,
+ NULL
+};
+
+static struct device_type rc_dev_type = {
+ .groups = ir_hw_dev_attr_groups,
+ .uevent = ir_dev_uevent,
+};
+
+static struct device_type ir_raw_dev_type = {
+ .uevent = ir_dev_uevent,
+};
+
/**
- * ir_register_class() - creates the sysfs for /sys/class/irrcv/irrcv?
+ * ir_register_class() - creates the sysfs for /sys/class/rc/rc?
* @input_dev: the struct input_dev descriptor of the device
*
* This routine is used to register the syfs code for IR class
@@ -138,8 +213,7 @@ static struct attribute *ir_dev_attrs[] = {
int ir_register_class(struct input_dev *input_dev)
{
int rc;
- struct kobject *kobj;
-
+ const char *path;
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
int devno = find_first_zero_bit(&ir_core_dev_number,
IRRCV_NUM_DEVICES);
@@ -147,19 +221,36 @@ int ir_register_class(struct input_dev *input_dev)
if (unlikely(devno < 0))
return devno;
- ir_dev->attr.attrs = ir_dev_attrs;
- ir_dev->class_dev = device_create(ir_input_class, NULL,
- input_dev->dev.devt, ir_dev,
- "irrcv%d", devno);
- kobj = &ir_dev->class_dev->kobj;
-
- printk(KERN_WARNING "Creating IR device %s\n", kobject_name(kobj));
- rc = sysfs_create_group(kobj, &ir_dev->attr);
- if (unlikely(rc < 0)) {
- device_destroy(ir_input_class, input_dev->dev.devt);
- return -ENOMEM;
+ if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
+ ir_dev->dev.type = &rc_dev_type;
+ else
+ ir_dev->dev.type = &ir_raw_dev_type;
+
+ ir_dev->dev.class = &ir_input_class;
+ ir_dev->dev.parent = input_dev->dev.parent;
+ dev_set_name(&ir_dev->dev, "rc%d", devno);
+ dev_set_drvdata(&ir_dev->dev, ir_dev);
+ rc = device_register(&ir_dev->dev);
+ if (rc)
+ return rc;
+
+
+ input_dev->dev.parent = &ir_dev->dev;
+ rc = input_register_device(input_dev);
+ if (rc < 0) {
+ device_del(&ir_dev->dev);
+ return rc;
}
+ __module_get(THIS_MODULE);
+
+ path = kobject_get_path(&ir_dev->dev.kobj, GFP_KERNEL);
+ printk(KERN_INFO "%s: %s as %s\n",
+ dev_name(&ir_dev->dev),
+ input_dev->name ? input_dev->name : "Unspecified device",
+ path ? path : "N/A");
+ kfree(path);
+
ir_dev->devno = devno;
set_bit(devno, &ir_core_dev_number);
@@ -168,7 +259,7 @@ int ir_register_class(struct input_dev *input_dev)
/**
* ir_unregister_class() - removes the sysfs for sysfs for
- * /sys/class/irrcv/irrcv?
+ * /sys/class/rc/rc?
* @input_dev: the struct input_dev descriptor of the device
*
* This routine is used to unregister the syfs code for IR class
@@ -176,36 +267,35 @@ int ir_register_class(struct input_dev *input_dev)
void ir_unregister_class(struct input_dev *input_dev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- struct kobject *kobj;
clear_bit(ir_dev->devno, &ir_core_dev_number);
+ input_unregister_device(input_dev);
+ device_del(&ir_dev->dev);
- kobj = &ir_dev->class_dev->kobj;
-
- sysfs_remove_group(kobj, &ir_dev->attr);
- device_destroy(ir_input_class, input_dev->dev.devt);
-
- kfree(ir_dev->attr.name);
+ module_put(THIS_MODULE);
}
/*
- * Init/exit code for the module. Basically, creates/removes /sys/class/irrcv
+ * Init/exit code for the module. Basically, creates/removes /sys/class/rc
*/
static int __init ir_core_init(void)
{
- ir_input_class = class_create(THIS_MODULE, "irrcv");
- if (IS_ERR(ir_input_class)) {
- printk(KERN_ERR "ir_core: unable to register irrcv class\n");
- return PTR_ERR(ir_input_class);
+ int rc = class_register(&ir_input_class);
+ if (rc) {
+ printk(KERN_ERR "ir_core: unable to register rc class\n");
+ return rc;
}
+ /* Initialize/load the decoders/keymap code that will be used */
+ ir_raw_init();
+
return 0;
}
static void __exit ir_core_exit(void)
{
- class_destroy(ir_input_class);
+ class_unregister(&ir_input_class);
}
module_init(ir_core_init);
diff --git a/drivers/media/IR/keymaps/Kconfig b/drivers/media/IR/keymaps/Kconfig
new file mode 100644
index 000000000000..14b22f58f823
--- /dev/null
+++ b/drivers/media/IR/keymaps/Kconfig
@@ -0,0 +1,15 @@
+config RC_MAP
+ tristate "Compile Remote Controller keymap modules"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ This option enables the compilation of lots of Remote
+ Controller tables. They are short tables, but if you
+ don't use a remote controller, or prefer to load the
+ tables on userspace, you should disable it.
+
+ The ir-keytable program, available at v4l-utils package
+ provide the tool and the same RC maps for load from
+ userspace. Its available at
+ http://git.linuxtv.org/v4l-utils
diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile
new file mode 100644
index 000000000000..ec25258a955f
--- /dev/null
+++ b/drivers/media/IR/keymaps/Makefile
@@ -0,0 +1,67 @@
+obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
+ rc-apac-viewcomp.o \
+ rc-asus-pc39.o \
+ rc-ati-tv-wonder-hd-600.o \
+ rc-avermedia-a16d.o \
+ rc-avermedia.o \
+ rc-avermedia-cardbus.o \
+ rc-avermedia-dvbt.o \
+ rc-avermedia-m135a-rm-jx.o \
+ rc-avertv-303.o \
+ rc-behold.o \
+ rc-behold-columbus.o \
+ rc-budget-ci-old.o \
+ rc-cinergy-1400.o \
+ rc-cinergy.o \
+ rc-dm1105-nec.o \
+ rc-dntv-live-dvb-t.o \
+ rc-dntv-live-dvbt-pro.o \
+ rc-empty.o \
+ rc-em-terratec.o \
+ rc-encore-enltv2.o \
+ rc-encore-enltv.o \
+ rc-encore-enltv-fm53.o \
+ rc-evga-indtube.o \
+ rc-eztv.o \
+ rc-flydvb.o \
+ rc-flyvideo.o \
+ rc-fusionhdtv-mce.o \
+ rc-gadmei-rm008z.o \
+ rc-genius-tvgo-a11mce.o \
+ rc-gotview7135.o \
+ rc-hauppauge-new.o \
+ rc-imon-mce.o \
+ rc-imon-pad.o \
+ rc-iodata-bctv7e.o \
+ rc-kaiomy.o \
+ rc-kworld-315u.o \
+ rc-kworld-plus-tv-analog.o \
+ rc-manli.o \
+ rc-msi-tvanywhere.o \
+ rc-msi-tvanywhere-plus.o \
+ rc-nebula.o \
+ rc-nec-terratec-cinergy-xs.o \
+ rc-norwood.o \
+ rc-npgtech.o \
+ rc-pctv-sedna.o \
+ rc-pinnacle-color.o \
+ rc-pinnacle-grey.o \
+ rc-pinnacle-pctv-hd.o \
+ rc-pixelview.o \
+ rc-pixelview-mk12.o \
+ rc-pixelview-new.o \
+ rc-powercolor-real-angel.o \
+ rc-proteus-2309.o \
+ rc-purpletv.o \
+ rc-pv951.o \
+ rc-rc5-hauppauge-new.o \
+ rc-rc5-tv.o \
+ rc-real-audio-220-32-keys.o \
+ rc-tbs-nec.o \
+ rc-terratec-cinergy-xs.o \
+ rc-tevii-nec.o \
+ rc-tt-1500.o \
+ rc-videomate-s350.o \
+ rc-videomate-tv-pvr.o \
+ rc-winfast.o \
+ rc-winfast-usbii-deluxe.o
diff --git a/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c b/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c
new file mode 100644
index 000000000000..b17283176ecd
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c
@@ -0,0 +1,89 @@
+/* adstech-dvb-t-pci.h - Keytable for adstech_dvb_t_pci Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* ADS Tech Instant TV DVB-T PCI Remote */
+
+static struct ir_scancode adstech_dvb_t_pci[] = {
+ /* Keys 0 to 9 */
+ { 0x4d, KEY_0 },
+ { 0x57, KEY_1 },
+ { 0x4f, KEY_2 },
+ { 0x53, KEY_3 },
+ { 0x56, KEY_4 },
+ { 0x4e, KEY_5 },
+ { 0x5e, KEY_6 },
+ { 0x54, KEY_7 },
+ { 0x4c, KEY_8 },
+ { 0x5c, KEY_9 },
+
+ { 0x5b, KEY_POWER },
+ { 0x5f, KEY_MUTE },
+ { 0x55, KEY_GOTO },
+ { 0x5d, KEY_SEARCH },
+ { 0x17, KEY_EPG }, /* Guide */
+ { 0x1f, KEY_MENU },
+ { 0x0f, KEY_UP },
+ { 0x46, KEY_DOWN },
+ { 0x16, KEY_LEFT },
+ { 0x1e, KEY_RIGHT },
+ { 0x0e, KEY_SELECT }, /* Enter */
+ { 0x5a, KEY_INFO },
+ { 0x52, KEY_EXIT },
+ { 0x59, KEY_PREVIOUS },
+ { 0x51, KEY_NEXT },
+ { 0x58, KEY_REWIND },
+ { 0x50, KEY_FORWARD },
+ { 0x44, KEY_PLAYPAUSE },
+ { 0x07, KEY_STOP },
+ { 0x1b, KEY_RECORD },
+ { 0x13, KEY_TUNER }, /* Live */
+ { 0x0a, KEY_A },
+ { 0x12, KEY_B },
+ { 0x03, KEY_PROG1 }, /* 1 */
+ { 0x01, KEY_PROG2 }, /* 2 */
+ { 0x00, KEY_PROG3 }, /* 3 */
+ { 0x06, KEY_DVD },
+ { 0x48, KEY_AUX }, /* Photo */
+ { 0x40, KEY_VIDEO },
+ { 0x19, KEY_AUDIO }, /* Music */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x08, KEY_CHANNELDOWN },
+ { 0x15, KEY_VOLUMEUP },
+ { 0x1c, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap adstech_dvb_t_pci_map = {
+ .map = {
+ .scan = adstech_dvb_t_pci,
+ .size = ARRAY_SIZE(adstech_dvb_t_pci),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ADSTECH_DVB_T_PCI,
+ }
+};
+
+static int __init init_rc_map_adstech_dvb_t_pci(void)
+{
+ return ir_register_map(&adstech_dvb_t_pci_map);
+}
+
+static void __exit exit_rc_map_adstech_dvb_t_pci(void)
+{
+ ir_unregister_map(&adstech_dvb_t_pci_map);
+}
+
+module_init(init_rc_map_adstech_dvb_t_pci)
+module_exit(exit_rc_map_adstech_dvb_t_pci)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-apac-viewcomp.c b/drivers/media/IR/keymaps/rc-apac-viewcomp.c
new file mode 100644
index 000000000000..0ef2b562baf0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-apac-viewcomp.c
@@ -0,0 +1,80 @@
+/* apac-viewcomp.h - Keytable for apac_viewcomp Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Attila Kondoros <attila.kondoros@chello.hu> */
+
+static struct ir_scancode apac_viewcomp[] = {
+
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x00, KEY_0 },
+ { 0x17, KEY_LAST }, /* +100 */
+ { 0x0a, KEY_LIST }, /* recall */
+
+
+ { 0x1c, KEY_TUNER }, /* TV/FM */
+ { 0x15, KEY_SEARCH }, /* scan */
+ { 0x12, KEY_POWER }, /* power */
+ { 0x1f, KEY_VOLUMEDOWN }, /* vol up */
+ { 0x1b, KEY_VOLUMEUP }, /* vol down */
+ { 0x1e, KEY_CHANNELDOWN }, /* chn up */
+ { 0x1a, KEY_CHANNELUP }, /* chn down */
+
+ { 0x11, KEY_VIDEO }, /* video */
+ { 0x0f, KEY_ZOOM }, /* full screen */
+ { 0x13, KEY_MUTE }, /* mute/unmute */
+ { 0x10, KEY_TEXT }, /* min */
+
+ { 0x0d, KEY_STOP }, /* freeze */
+ { 0x0e, KEY_RECORD }, /* record */
+ { 0x1d, KEY_PLAYPAUSE }, /* stop */
+ { 0x19, KEY_PLAY }, /* play */
+
+ { 0x16, KEY_GOTO }, /* osd */
+ { 0x14, KEY_REFRESH }, /* default */
+ { 0x0c, KEY_KPPLUS }, /* fine tune >>>> */
+ { 0x18, KEY_KPMINUS }, /* fine tune <<<< */
+};
+
+static struct rc_keymap apac_viewcomp_map = {
+ .map = {
+ .scan = apac_viewcomp,
+ .size = ARRAY_SIZE(apac_viewcomp),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_APAC_VIEWCOMP,
+ }
+};
+
+static int __init init_rc_map_apac_viewcomp(void)
+{
+ return ir_register_map(&apac_viewcomp_map);
+}
+
+static void __exit exit_rc_map_apac_viewcomp(void)
+{
+ ir_unregister_map(&apac_viewcomp_map);
+}
+
+module_init(init_rc_map_apac_viewcomp)
+module_exit(exit_rc_map_apac_viewcomp)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-asus-pc39.c b/drivers/media/IR/keymaps/rc-asus-pc39.c
new file mode 100644
index 000000000000..2aa068cd6c75
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-asus-pc39.c
@@ -0,0 +1,91 @@
+/* asus-pc39.h - Keytable for asus_pc39 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Marc Fargas <telenieko@telenieko.com>
+ * this is the remote control that comes with the asus p7131
+ * which has a label saying is "Model PC-39"
+ */
+
+static struct ir_scancode asus_pc39[] = {
+ /* Keys 0 to 9 */
+ { 0x15, KEY_0 },
+ { 0x29, KEY_1 },
+ { 0x2d, KEY_2 },
+ { 0x2b, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0d, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x31, KEY_7 },
+ { 0x35, KEY_8 },
+ { 0x33, KEY_9 },
+
+ { 0x3e, KEY_RADIO }, /* radio */
+ { 0x03, KEY_MENU }, /* dvd/menu */
+ { 0x2a, KEY_VOLUMEUP },
+ { 0x19, KEY_VOLUMEDOWN },
+ { 0x37, KEY_UP },
+ { 0x3b, KEY_DOWN },
+ { 0x27, KEY_LEFT },
+ { 0x2f, KEY_RIGHT },
+ { 0x25, KEY_VIDEO }, /* video */
+ { 0x39, KEY_AUDIO }, /* music */
+
+ { 0x21, KEY_TV }, /* tv */
+ { 0x1d, KEY_EXIT }, /* back */
+ { 0x0a, KEY_CHANNELUP }, /* channel / program + */
+ { 0x1b, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x1a, KEY_ENTER }, /* enter */
+
+ { 0x06, KEY_PAUSE }, /* play/pause */
+ { 0x1e, KEY_PREVIOUS }, /* rew */
+ { 0x26, KEY_NEXT }, /* forward */
+ { 0x0e, KEY_REWIND }, /* backward << */
+ { 0x3a, KEY_FASTFORWARD }, /* forward >> */
+ { 0x36, KEY_STOP },
+ { 0x2e, KEY_RECORD }, /* recording */
+ { 0x16, KEY_POWER }, /* the button that reads "close" */
+
+ { 0x11, KEY_ZOOM }, /* full screen */
+ { 0x13, KEY_MACRO }, /* recall */
+ { 0x23, KEY_HOME }, /* home */
+ { 0x05, KEY_PVR }, /* picture */
+ { 0x3d, KEY_MUTE }, /* mute */
+ { 0x01, KEY_DVD }, /* dvd */
+};
+
+static struct rc_keymap asus_pc39_map = {
+ .map = {
+ .scan = asus_pc39,
+ .size = ARRAY_SIZE(asus_pc39),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ASUS_PC39,
+ }
+};
+
+static int __init init_rc_map_asus_pc39(void)
+{
+ return ir_register_map(&asus_pc39_map);
+}
+
+static void __exit exit_rc_map_asus_pc39(void)
+{
+ ir_unregister_map(&asus_pc39_map);
+}
+
+module_init(init_rc_map_asus_pc39)
+module_exit(exit_rc_map_asus_pc39)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c b/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c
new file mode 100644
index 000000000000..8edfd293d010
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c
@@ -0,0 +1,69 @@
+/* ati-tv-wonder-hd-600.h - Keytable for ati_tv_wonder_hd_600 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* ATI TV Wonder HD 600 USB
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+
+static struct ir_scancode ati_tv_wonder_hd_600[] = {
+ { 0x00, KEY_RECORD}, /* Row 1 */
+ { 0x01, KEY_PLAYPAUSE},
+ { 0x02, KEY_STOP},
+ { 0x03, KEY_POWER},
+ { 0x04, KEY_PREVIOUS}, /* Row 2 */
+ { 0x05, KEY_REWIND},
+ { 0x06, KEY_FORWARD},
+ { 0x07, KEY_NEXT},
+ { 0x08, KEY_EPG}, /* Row 3 */
+ { 0x09, KEY_HOME},
+ { 0x0a, KEY_MENU},
+ { 0x0b, KEY_CHANNELUP},
+ { 0x0c, KEY_BACK}, /* Row 4 */
+ { 0x0d, KEY_UP},
+ { 0x0e, KEY_INFO},
+ { 0x0f, KEY_CHANNELDOWN},
+ { 0x10, KEY_LEFT}, /* Row 5 */
+ { 0x11, KEY_SELECT},
+ { 0x12, KEY_RIGHT},
+ { 0x13, KEY_VOLUMEUP},
+ { 0x14, KEY_LAST}, /* Row 6 */
+ { 0x15, KEY_DOWN},
+ { 0x16, KEY_MUTE},
+ { 0x17, KEY_VOLUMEDOWN},
+};
+
+static struct rc_keymap ati_tv_wonder_hd_600_map = {
+ .map = {
+ .scan = ati_tv_wonder_hd_600,
+ .size = ARRAY_SIZE(ati_tv_wonder_hd_600),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ATI_TV_WONDER_HD_600,
+ }
+};
+
+static int __init init_rc_map_ati_tv_wonder_hd_600(void)
+{
+ return ir_register_map(&ati_tv_wonder_hd_600_map);
+}
+
+static void __exit exit_rc_map_ati_tv_wonder_hd_600(void)
+{
+ ir_unregister_map(&ati_tv_wonder_hd_600_map);
+}
+
+module_init(init_rc_map_ati_tv_wonder_hd_600)
+module_exit(exit_rc_map_ati_tv_wonder_hd_600)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-a16d.c b/drivers/media/IR/keymaps/rc-avermedia-a16d.c
new file mode 100644
index 000000000000..12f043587f2e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-a16d.c
@@ -0,0 +1,75 @@
+/* avermedia-a16d.h - Keytable for avermedia_a16d Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode avermedia_a16d[] = {
+ { 0x20, KEY_LIST},
+ { 0x00, KEY_POWER},
+ { 0x28, KEY_1},
+ { 0x18, KEY_2},
+ { 0x38, KEY_3},
+ { 0x24, KEY_4},
+ { 0x14, KEY_5},
+ { 0x34, KEY_6},
+ { 0x2c, KEY_7},
+ { 0x1c, KEY_8},
+ { 0x3c, KEY_9},
+ { 0x12, KEY_SUBTITLE},
+ { 0x22, KEY_0},
+ { 0x32, KEY_REWIND},
+ { 0x3a, KEY_SHUFFLE},
+ { 0x02, KEY_PRINT},
+ { 0x11, KEY_CHANNELDOWN},
+ { 0x31, KEY_CHANNELUP},
+ { 0x0c, KEY_ZOOM},
+ { 0x1e, KEY_VOLUMEDOWN},
+ { 0x3e, KEY_VOLUMEUP},
+ { 0x0a, KEY_MUTE},
+ { 0x04, KEY_AUDIO},
+ { 0x26, KEY_RECORD},
+ { 0x06, KEY_PLAY},
+ { 0x36, KEY_STOP},
+ { 0x16, KEY_PAUSE},
+ { 0x2e, KEY_REWIND},
+ { 0x0e, KEY_FASTFORWARD},
+ { 0x30, KEY_TEXT},
+ { 0x21, KEY_GREEN},
+ { 0x01, KEY_BLUE},
+ { 0x08, KEY_EPG},
+ { 0x2a, KEY_MENU},
+};
+
+static struct rc_keymap avermedia_a16d_map = {
+ .map = {
+ .scan = avermedia_a16d,
+ .size = ARRAY_SIZE(avermedia_a16d),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_A16D,
+ }
+};
+
+static int __init init_rc_map_avermedia_a16d(void)
+{
+ return ir_register_map(&avermedia_a16d_map);
+}
+
+static void __exit exit_rc_map_avermedia_a16d(void)
+{
+ ir_unregister_map(&avermedia_a16d_map);
+}
+
+module_init(init_rc_map_avermedia_a16d)
+module_exit(exit_rc_map_avermedia_a16d)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-cardbus.c b/drivers/media/IR/keymaps/rc-avermedia-cardbus.c
new file mode 100644
index 000000000000..2a945b02e8ca
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-cardbus.c
@@ -0,0 +1,97 @@
+/* avermedia-cardbus.h - Keytable for avermedia_cardbus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Oldrich Jedlicka <oldium.pro@seznam.cz> */
+
+static struct ir_scancode avermedia_cardbus[] = {
+ { 0x00, KEY_POWER },
+ { 0x01, KEY_TUNER }, /* TV/FM */
+ { 0x03, KEY_TEXT }, /* Teletext */
+ { 0x04, KEY_EPG },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x08, KEY_AUDIO },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0c, KEY_ZOOM }, /* Full screen */
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+ { 0x10, KEY_PAGEUP }, /* 16-CH PREV */
+ { 0x11, KEY_0 },
+ { 0x12, KEY_INFO },
+ { 0x13, KEY_AGAIN }, /* CH RTN - channel return */
+ { 0x14, KEY_MUTE },
+ { 0x15, KEY_EDIT }, /* Autoscan */
+ { 0x17, KEY_SAVE }, /* Screenshot */
+ { 0x18, KEY_PLAYPAUSE },
+ { 0x19, KEY_RECORD },
+ { 0x1a, KEY_PLAY },
+ { 0x1b, KEY_STOP },
+ { 0x1c, KEY_FASTFORWARD },
+ { 0x1d, KEY_REWIND },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_VOLUMEUP },
+ { 0x22, KEY_SLEEP }, /* Sleep */
+ { 0x23, KEY_ZOOM }, /* Aspect */
+ { 0x26, KEY_SCREEN }, /* Pos */
+ { 0x27, KEY_ANGLE }, /* Size */
+ { 0x28, KEY_SELECT }, /* Select */
+ { 0x29, KEY_BLUE }, /* Blue/Picture */
+ { 0x2a, KEY_BACKSPACE }, /* Back */
+ { 0x2b, KEY_MEDIA }, /* PIP (Picture-in-picture) */
+ { 0x2c, KEY_DOWN },
+ { 0x2e, KEY_DOT },
+ { 0x2f, KEY_TV }, /* Live TV */
+ { 0x32, KEY_LEFT },
+ { 0x33, KEY_CLEAR }, /* Clear */
+ { 0x35, KEY_RED }, /* Red/TV */
+ { 0x36, KEY_UP },
+ { 0x37, KEY_HOME }, /* Home */
+ { 0x39, KEY_GREEN }, /* Green/Video */
+ { 0x3d, KEY_YELLOW }, /* Yellow/Music */
+ { 0x3e, KEY_OK }, /* Ok */
+ { 0x3f, KEY_RIGHT },
+ { 0x40, KEY_NEXT }, /* Next */
+ { 0x41, KEY_PREVIOUS }, /* Previous */
+ { 0x42, KEY_CHANNELDOWN }, /* Channel down */
+ { 0x43, KEY_CHANNELUP }, /* Channel up */
+};
+
+static struct rc_keymap avermedia_cardbus_map = {
+ .map = {
+ .scan = avermedia_cardbus,
+ .size = ARRAY_SIZE(avermedia_cardbus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_CARDBUS,
+ }
+};
+
+static int __init init_rc_map_avermedia_cardbus(void)
+{
+ return ir_register_map(&avermedia_cardbus_map);
+}
+
+static void __exit exit_rc_map_avermedia_cardbus(void)
+{
+ ir_unregister_map(&avermedia_cardbus_map);
+}
+
+module_init(init_rc_map_avermedia_cardbus)
+module_exit(exit_rc_map_avermedia_cardbus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-dvbt.c b/drivers/media/IR/keymaps/rc-avermedia-dvbt.c
new file mode 100644
index 000000000000..39dde6222875
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-dvbt.c
@@ -0,0 +1,78 @@
+/* avermedia-dvbt.h - Keytable for avermedia_dvbt Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Matt Jesson <dvb@jesson.eclipse.co.uk */
+
+static struct ir_scancode avermedia_dvbt[] = {
+ { 0x28, KEY_0 }, /* '0' / 'enter' */
+ { 0x22, KEY_1 }, /* '1' */
+ { 0x12, KEY_2 }, /* '2' / 'up arrow' */
+ { 0x32, KEY_3 }, /* '3' */
+ { 0x24, KEY_4 }, /* '4' / 'left arrow' */
+ { 0x14, KEY_5 }, /* '5' */
+ { 0x34, KEY_6 }, /* '6' / 'right arrow' */
+ { 0x26, KEY_7 }, /* '7' */
+ { 0x16, KEY_8 }, /* '8' / 'down arrow' */
+ { 0x36, KEY_9 }, /* '9' */
+
+ { 0x20, KEY_LIST }, /* 'source' */
+ { 0x10, KEY_TEXT }, /* 'teletext' */
+ { 0x00, KEY_POWER }, /* 'power' */
+ { 0x04, KEY_AUDIO }, /* 'audio' */
+ { 0x06, KEY_ZOOM }, /* 'full screen' */
+ { 0x18, KEY_VIDEO }, /* 'display' */
+ { 0x38, KEY_SEARCH }, /* 'loop' */
+ { 0x08, KEY_INFO }, /* 'preview' */
+ { 0x2a, KEY_REWIND }, /* 'backward <<' */
+ { 0x1a, KEY_FASTFORWARD }, /* 'forward >>' */
+ { 0x3a, KEY_RECORD }, /* 'capture' */
+ { 0x0a, KEY_MUTE }, /* 'mute' */
+ { 0x2c, KEY_RECORD }, /* 'record' */
+ { 0x1c, KEY_PAUSE }, /* 'pause' */
+ { 0x3c, KEY_STOP }, /* 'stop' */
+ { 0x0c, KEY_PLAY }, /* 'play' */
+ { 0x2e, KEY_RED }, /* 'red' */
+ { 0x01, KEY_BLUE }, /* 'blue' / 'cancel' */
+ { 0x0e, KEY_YELLOW }, /* 'yellow' / 'ok' */
+ { 0x21, KEY_GREEN }, /* 'green' */
+ { 0x11, KEY_CHANNELDOWN }, /* 'channel -' */
+ { 0x31, KEY_CHANNELUP }, /* 'channel +' */
+ { 0x1e, KEY_VOLUMEDOWN }, /* 'volume -' */
+ { 0x3e, KEY_VOLUMEUP }, /* 'volume +' */
+};
+
+static struct rc_keymap avermedia_dvbt_map = {
+ .map = {
+ .scan = avermedia_dvbt,
+ .size = ARRAY_SIZE(avermedia_dvbt),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_DVBT,
+ }
+};
+
+static int __init init_rc_map_avermedia_dvbt(void)
+{
+ return ir_register_map(&avermedia_dvbt_map);
+}
+
+static void __exit exit_rc_map_avermedia_dvbt(void)
+{
+ ir_unregister_map(&avermedia_dvbt_map);
+}
+
+module_init(init_rc_map_avermedia_dvbt)
+module_exit(exit_rc_map_avermedia_dvbt)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c b/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c
new file mode 100644
index 000000000000..101e7ea85941
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c
@@ -0,0 +1,90 @@
+/* avermedia-m135a-rm-jx.h - Keytable for avermedia_m135a_rm_jx Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Avermedia M135A with IR model RM-JX
+ * The same codes exist on both Positivo (BR) and original IR
+ * Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode avermedia_m135a_rm_jx[] = {
+ { 0x0200, KEY_POWER2 },
+ { 0x022e, KEY_DOT }, /* '.' */
+ { 0x0201, KEY_MODE }, /* TV/FM or SOURCE */
+
+ { 0x0205, KEY_1 },
+ { 0x0206, KEY_2 },
+ { 0x0207, KEY_3 },
+ { 0x0209, KEY_4 },
+ { 0x020a, KEY_5 },
+ { 0x020b, KEY_6 },
+ { 0x020d, KEY_7 },
+ { 0x020e, KEY_8 },
+ { 0x020f, KEY_9 },
+ { 0x0211, KEY_0 },
+
+ { 0x0213, KEY_RIGHT }, /* -> or L */
+ { 0x0212, KEY_LEFT }, /* <- or R */
+
+ { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
+ { 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
+
+ { 0x0303, KEY_CHANNELUP },
+ { 0x0302, KEY_CHANNELDOWN },
+ { 0x021f, KEY_VOLUMEUP },
+ { 0x021e, KEY_VOLUMEDOWN },
+ { 0x020c, KEY_ENTER }, /* Full Screen */
+
+ { 0x0214, KEY_MUTE },
+ { 0x0208, KEY_AUDIO },
+
+ { 0x0203, KEY_TEXT }, /* Teletext */
+ { 0x0204, KEY_EPG },
+ { 0x022b, KEY_TV2 }, /* TV2 or PIP */
+
+ { 0x021d, KEY_RED },
+ { 0x021c, KEY_YELLOW },
+ { 0x0301, KEY_GREEN },
+ { 0x0300, KEY_BLUE },
+
+ { 0x021a, KEY_PLAYPAUSE },
+ { 0x0219, KEY_RECORD },
+ { 0x0218, KEY_PLAY },
+ { 0x021b, KEY_STOP },
+};
+
+static struct rc_keymap avermedia_m135a_rm_jx_map = {
+ .map = {
+ .scan = avermedia_m135a_rm_jx,
+ .size = ARRAY_SIZE(avermedia_m135a_rm_jx),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AVERMEDIA_M135A_RM_JX,
+ }
+};
+
+static int __init init_rc_map_avermedia_m135a_rm_jx(void)
+{
+ return ir_register_map(&avermedia_m135a_rm_jx_map);
+}
+
+static void __exit exit_rc_map_avermedia_m135a_rm_jx(void)
+{
+ ir_unregister_map(&avermedia_m135a_rm_jx_map);
+}
+
+module_init(init_rc_map_avermedia_m135a_rm_jx)
+module_exit(exit_rc_map_avermedia_m135a_rm_jx)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia.c b/drivers/media/IR/keymaps/rc-avermedia.c
new file mode 100644
index 000000000000..21effd5bfb0d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia.c
@@ -0,0 +1,86 @@
+/* avermedia.h - Keytable for avermedia Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Alex Hermann <gaaf@gmx.net> */
+
+static struct ir_scancode avermedia[] = {
+ { 0x28, KEY_1 },
+ { 0x18, KEY_2 },
+ { 0x38, KEY_3 },
+ { 0x24, KEY_4 },
+ { 0x14, KEY_5 },
+ { 0x34, KEY_6 },
+ { 0x2c, KEY_7 },
+ { 0x1c, KEY_8 },
+ { 0x3c, KEY_9 },
+ { 0x22, KEY_0 },
+
+ { 0x20, KEY_TV }, /* TV/FM */
+ { 0x10, KEY_CD }, /* CD */
+ { 0x30, KEY_TEXT }, /* TELETEXT */
+ { 0x00, KEY_POWER }, /* POWER */
+
+ { 0x08, KEY_VIDEO }, /* VIDEO */
+ { 0x04, KEY_AUDIO }, /* AUDIO */
+ { 0x0c, KEY_ZOOM }, /* FULL SCREEN */
+
+ { 0x12, KEY_SUBTITLE }, /* DISPLAY */
+ { 0x32, KEY_REWIND }, /* LOOP */
+ { 0x02, KEY_PRINT }, /* PREVIEW */
+
+ { 0x2a, KEY_SEARCH }, /* AUTOSCAN */
+ { 0x1a, KEY_SLEEP }, /* FREEZE */
+ { 0x3a, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x0a, KEY_MUTE }, /* MUTE */
+
+ { 0x26, KEY_RECORD }, /* RECORD */
+ { 0x16, KEY_PAUSE }, /* PAUSE */
+ { 0x36, KEY_STOP }, /* STOP */
+ { 0x06, KEY_PLAY }, /* PLAY */
+
+ { 0x2e, KEY_RED }, /* RED */
+ { 0x21, KEY_GREEN }, /* GREEN */
+ { 0x0e, KEY_YELLOW }, /* YELLOW */
+ { 0x01, KEY_BLUE }, /* BLUE */
+
+ { 0x1e, KEY_VOLUMEDOWN }, /* VOLUME- */
+ { 0x3e, KEY_VOLUMEUP }, /* VOLUME+ */
+ { 0x11, KEY_CHANNELDOWN }, /* CHANNEL/PAGE- */
+ { 0x31, KEY_CHANNELUP } /* CHANNEL/PAGE+ */
+};
+
+static struct rc_keymap avermedia_map = {
+ .map = {
+ .scan = avermedia,
+ .size = ARRAY_SIZE(avermedia),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA,
+ }
+};
+
+static int __init init_rc_map_avermedia(void)
+{
+ return ir_register_map(&avermedia_map);
+}
+
+static void __exit exit_rc_map_avermedia(void)
+{
+ ir_unregister_map(&avermedia_map);
+}
+
+module_init(init_rc_map_avermedia)
+module_exit(exit_rc_map_avermedia)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avertv-303.c b/drivers/media/IR/keymaps/rc-avertv-303.c
new file mode 100644
index 000000000000..971c59d6f9d6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avertv-303.c
@@ -0,0 +1,85 @@
+/* avertv-303.h - Keytable for avertv_303 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* AVERTV STUDIO 303 Remote */
+
+static struct ir_scancode avertv_303[] = {
+ { 0x2a, KEY_1 },
+ { 0x32, KEY_2 },
+ { 0x3a, KEY_3 },
+ { 0x4a, KEY_4 },
+ { 0x52, KEY_5 },
+ { 0x5a, KEY_6 },
+ { 0x6a, KEY_7 },
+ { 0x72, KEY_8 },
+ { 0x7a, KEY_9 },
+ { 0x0e, KEY_0 },
+
+ { 0x02, KEY_POWER },
+ { 0x22, KEY_VIDEO },
+ { 0x42, KEY_AUDIO },
+ { 0x62, KEY_ZOOM },
+ { 0x0a, KEY_TV },
+ { 0x12, KEY_CD },
+ { 0x1a, KEY_TEXT },
+
+ { 0x16, KEY_SUBTITLE },
+ { 0x1e, KEY_REWIND },
+ { 0x06, KEY_PRINT },
+
+ { 0x2e, KEY_SEARCH },
+ { 0x36, KEY_SLEEP },
+ { 0x3e, KEY_SHUFFLE },
+ { 0x26, KEY_MUTE },
+
+ { 0x4e, KEY_RECORD },
+ { 0x56, KEY_PAUSE },
+ { 0x5e, KEY_STOP },
+ { 0x46, KEY_PLAY },
+
+ { 0x6e, KEY_RED },
+ { 0x0b, KEY_GREEN },
+ { 0x66, KEY_YELLOW },
+ { 0x03, KEY_BLUE },
+
+ { 0x76, KEY_LEFT },
+ { 0x7e, KEY_RIGHT },
+ { 0x13, KEY_DOWN },
+ { 0x1b, KEY_UP },
+};
+
+static struct rc_keymap avertv_303_map = {
+ .map = {
+ .scan = avertv_303,
+ .size = ARRAY_SIZE(avertv_303),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERTV_303,
+ }
+};
+
+static int __init init_rc_map_avertv_303(void)
+{
+ return ir_register_map(&avertv_303_map);
+}
+
+static void __exit exit_rc_map_avertv_303(void)
+{
+ ir_unregister_map(&avertv_303_map);
+}
+
+module_init(init_rc_map_avertv_303)
+module_exit(exit_rc_map_avertv_303)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-behold-columbus.c b/drivers/media/IR/keymaps/rc-behold-columbus.c
new file mode 100644
index 000000000000..9f56c98fef5b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-behold-columbus.c
@@ -0,0 +1,108 @@
+/* behold-columbus.h - Keytable for behold_columbus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Beholder Intl. Ltd. 2008
+ * Dmitry Belimov d.belimov@google.com
+ * Keytable is used by BeholdTV Columbus
+ * The "ascii-art picture" below (in comments, first row
+ * is the keycode in hex, and subsequent row(s) shows
+ * the button labels (several variants when appropriate)
+ * helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode behold_columbus[] = {
+
+ /* 0x13 0x11 0x1C 0x12 *
+ * Mute Source TV/FM Power *
+ * */
+
+ { 0x13, KEY_MUTE },
+ { 0x11, KEY_PROPS },
+ { 0x1C, KEY_TUNER }, /* KEY_TV/KEY_RADIO */
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 0x0D *
+ * 1 2 3 Stereo *
+ * *
+ * 0x04 0x05 0x06 0x19 *
+ * 4 5 6 Snapshot *
+ * *
+ * 0x07 0x08 0x09 0x10 *
+ * 7 8 9 Zoom *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x0D, KEY_SETUP }, /* Setup key */
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x19, KEY_CAMERA }, /* Snapshot key */
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x10, KEY_ZOOM },
+
+ /* 0x0A 0x00 0x0B 0x0C *
+ * RECALL 0 ChannelUp VolumeUp *
+ * */
+ { 0x0A, KEY_AGAIN },
+ { 0x00, KEY_0 },
+ { 0x0B, KEY_CHANNELUP },
+ { 0x0C, KEY_VOLUMEUP },
+
+ /* 0x1B 0x1D 0x15 0x18 *
+ * Timeshift Record ChannelDown VolumeDown *
+ * */
+
+ { 0x1B, KEY_TIME },
+ { 0x1D, KEY_RECORD },
+ { 0x15, KEY_CHANNELDOWN },
+ { 0x18, KEY_VOLUMEDOWN },
+
+ /* 0x0E 0x1E 0x0F 0x1A *
+ * Stop Pause Previouse Next *
+ * */
+
+ { 0x0E, KEY_STOP },
+ { 0x1E, KEY_PAUSE },
+ { 0x0F, KEY_PREVIOUS },
+ { 0x1A, KEY_NEXT },
+
+};
+
+static struct rc_keymap behold_columbus_map = {
+ .map = {
+ .scan = behold_columbus,
+ .size = ARRAY_SIZE(behold_columbus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BEHOLD_COLUMBUS,
+ }
+};
+
+static int __init init_rc_map_behold_columbus(void)
+{
+ return ir_register_map(&behold_columbus_map);
+}
+
+static void __exit exit_rc_map_behold_columbus(void)
+{
+ ir_unregister_map(&behold_columbus_map);
+}
+
+module_init(init_rc_map_behold_columbus)
+module_exit(exit_rc_map_behold_columbus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-behold.c b/drivers/media/IR/keymaps/rc-behold.c
new file mode 100644
index 000000000000..abc140b2098b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-behold.c
@@ -0,0 +1,141 @@
+/* behold.h - Keytable for behold Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Igor Kuznetsov <igk72@ya.ru>
+ * Andrey J. Melnikov <temnota@kmv.ru>
+ *
+ * Keytable is used by BeholdTV 60x series, M6 series at
+ * least, and probably other cards too.
+ * The "ascii-art picture" below (in comments, first row
+ * is the keycode in hex, and subsequent row(s) shows
+ * the button labels (several variants when appropriate)
+ * helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode behold[] = {
+
+ /* 0x1c 0x12 *
+ * TV/FM POWER *
+ * */
+ { 0x1c, KEY_TUNER }, /* XXX KEY_TV / KEY_RADIO */
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 *
+ * 1 2 3 *
+ * *
+ * 0x04 0x05 0x06 *
+ * 4 5 6 *
+ * *
+ * 0x07 0x08 0x09 *
+ * 7 8 9 *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ /* 0x0a 0x00 0x17 *
+ * RECALL 0 MODE *
+ * */
+ { 0x0a, KEY_AGAIN },
+ { 0x00, KEY_0 },
+ { 0x17, KEY_MODE },
+
+ /* 0x14 0x10 *
+ * ASPECT FULLSCREEN *
+ * */
+ { 0x14, KEY_SCREEN },
+ { 0x10, KEY_ZOOM },
+
+ /* 0x0b *
+ * Up *
+ * *
+ * 0x18 0x16 0x0c *
+ * Left Ok Right *
+ * *
+ * 0x015 *
+ * Down *
+ * */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x18, KEY_VOLUMEDOWN },
+ { 0x16, KEY_OK }, /* XXX KEY_ENTER */
+ { 0x0c, KEY_VOLUMEUP },
+ { 0x15, KEY_CHANNELDOWN },
+
+ /* 0x11 0x0d *
+ * MUTE INFO *
+ * */
+ { 0x11, KEY_MUTE },
+ { 0x0d, KEY_INFO },
+
+ /* 0x0f 0x1b 0x1a *
+ * RECORD PLAY/PAUSE STOP *
+ * *
+ * 0x0e 0x1f 0x1e *
+ *TELETEXT AUDIO SOURCE *
+ * RED YELLOW *
+ * */
+ { 0x0f, KEY_RECORD },
+ { 0x1b, KEY_PLAYPAUSE },
+ { 0x1a, KEY_STOP },
+ { 0x0e, KEY_TEXT },
+ { 0x1f, KEY_RED }, /*XXX KEY_AUDIO */
+ { 0x1e, KEY_YELLOW }, /*XXX KEY_SOURCE */
+
+ /* 0x1d 0x13 0x19 *
+ * SLEEP PREVIEW DVB *
+ * GREEN BLUE *
+ * */
+ { 0x1d, KEY_SLEEP },
+ { 0x13, KEY_GREEN },
+ { 0x19, KEY_BLUE }, /* XXX KEY_SAT */
+
+ /* 0x58 0x5c *
+ * FREEZE SNAPSHOT *
+ * */
+ { 0x58, KEY_SLOW },
+ { 0x5c, KEY_CAMERA },
+
+};
+
+static struct rc_keymap behold_map = {
+ .map = {
+ .scan = behold,
+ .size = ARRAY_SIZE(behold),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BEHOLD,
+ }
+};
+
+static int __init init_rc_map_behold(void)
+{
+ return ir_register_map(&behold_map);
+}
+
+static void __exit exit_rc_map_behold(void)
+{
+ ir_unregister_map(&behold_map);
+}
+
+module_init(init_rc_map_behold)
+module_exit(exit_rc_map_behold)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-budget-ci-old.c b/drivers/media/IR/keymaps/rc-budget-ci-old.c
new file mode 100644
index 000000000000..64c2ac913338
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-budget-ci-old.c
@@ -0,0 +1,92 @@
+/* budget-ci-old.h - Keytable for budget_ci_old Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* From reading the following remotes:
+ * Zenith Universal 7 / TV Mode 807 / VCR Mode 837
+ * Hauppauge (from NOVA-CI-s box product)
+ * This is a "middle of the road" approach, differences are noted
+ */
+
+static struct ir_scancode budget_ci_old[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_ENTER },
+ { 0x0b, KEY_RED },
+ { 0x0c, KEY_POWER }, /* RADIO on Hauppauge */
+ { 0x0d, KEY_MUTE },
+ { 0x0f, KEY_A }, /* TV on Hauppauge */
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x14, KEY_B },
+ { 0x1c, KEY_UP },
+ { 0x1d, KEY_DOWN },
+ { 0x1e, KEY_OPTION }, /* RESERVED on Hauppauge */
+ { 0x1f, KEY_BREAK },
+ { 0x20, KEY_CHANNELUP },
+ { 0x21, KEY_CHANNELDOWN },
+ { 0x22, KEY_PREVIOUS }, /* Prev Ch on Zenith, SOURCE on Hauppauge */
+ { 0x24, KEY_RESTART },
+ { 0x25, KEY_OK },
+ { 0x26, KEY_CYCLEWINDOWS }, /* MINIMIZE on Hauppauge */
+ { 0x28, KEY_ENTER }, /* VCR mode on Zenith */
+ { 0x29, KEY_PAUSE },
+ { 0x2b, KEY_RIGHT },
+ { 0x2c, KEY_LEFT },
+ { 0x2e, KEY_MENU }, /* FULL SCREEN on Hauppauge */
+ { 0x30, KEY_SLOW },
+ { 0x31, KEY_PREVIOUS }, /* VCR mode on Zenith */
+ { 0x32, KEY_REWIND },
+ { 0x34, KEY_FASTFORWARD },
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD },
+ { 0x38, KEY_TUNER }, /* TV/VCR on Zenith */
+ { 0x3a, KEY_C },
+ { 0x3c, KEY_EXIT },
+ { 0x3d, KEY_POWER2 },
+ { 0x3e, KEY_TUNER },
+};
+
+static struct rc_keymap budget_ci_old_map = {
+ .map = {
+ .scan = budget_ci_old,
+ .size = ARRAY_SIZE(budget_ci_old),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BUDGET_CI_OLD,
+ }
+};
+
+static int __init init_rc_map_budget_ci_old(void)
+{
+ return ir_register_map(&budget_ci_old_map);
+}
+
+static void __exit exit_rc_map_budget_ci_old(void)
+{
+ ir_unregister_map(&budget_ci_old_map);
+}
+
+module_init(init_rc_map_budget_ci_old)
+module_exit(exit_rc_map_budget_ci_old)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-cinergy-1400.c b/drivers/media/IR/keymaps/rc-cinergy-1400.c
new file mode 100644
index 000000000000..074f2c2c2c61
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-cinergy-1400.c
@@ -0,0 +1,84 @@
+/* cinergy-1400.h - Keytable for cinergy_1400 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Cinergy 1400 DVB-T */
+
+static struct ir_scancode cinergy_1400[] = {
+ { 0x01, KEY_POWER },
+ { 0x02, KEY_1 },
+ { 0x03, KEY_2 },
+ { 0x04, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x06, KEY_5 },
+ { 0x07, KEY_6 },
+ { 0x08, KEY_7 },
+ { 0x09, KEY_8 },
+ { 0x0a, KEY_9 },
+ { 0x0c, KEY_0 },
+
+ { 0x0b, KEY_VIDEO },
+ { 0x0d, KEY_REFRESH },
+ { 0x0e, KEY_SELECT },
+ { 0x0f, KEY_EPG },
+ { 0x10, KEY_UP },
+ { 0x11, KEY_LEFT },
+ { 0x12, KEY_OK },
+ { 0x13, KEY_RIGHT },
+ { 0x14, KEY_DOWN },
+ { 0x15, KEY_TEXT },
+ { 0x16, KEY_INFO },
+
+ { 0x17, KEY_RED },
+ { 0x18, KEY_GREEN },
+ { 0x19, KEY_YELLOW },
+ { 0x1a, KEY_BLUE },
+
+ { 0x1b, KEY_CHANNELUP },
+ { 0x1c, KEY_VOLUMEUP },
+ { 0x1d, KEY_MUTE },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_CHANNELDOWN },
+
+ { 0x40, KEY_PAUSE },
+ { 0x4c, KEY_PLAY },
+ { 0x58, KEY_RECORD },
+ { 0x54, KEY_PREVIOUS },
+ { 0x48, KEY_STOP },
+ { 0x5c, KEY_NEXT },
+};
+
+static struct rc_keymap cinergy_1400_map = {
+ .map = {
+ .scan = cinergy_1400,
+ .size = ARRAY_SIZE(cinergy_1400),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_CINERGY_1400,
+ }
+};
+
+static int __init init_rc_map_cinergy_1400(void)
+{
+ return ir_register_map(&cinergy_1400_map);
+}
+
+static void __exit exit_rc_map_cinergy_1400(void)
+{
+ ir_unregister_map(&cinergy_1400_map);
+}
+
+module_init(init_rc_map_cinergy_1400)
+module_exit(exit_rc_map_cinergy_1400)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-cinergy.c b/drivers/media/IR/keymaps/rc-cinergy.c
new file mode 100644
index 000000000000..cf84c3dba742
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-cinergy.c
@@ -0,0 +1,78 @@
+/* cinergy.h - Keytable for cinergy Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode cinergy[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_POWER },
+ { 0x0b, KEY_PROG1 }, /* app */
+ { 0x0c, KEY_ZOOM }, /* zoom/fullscreen */
+ { 0x0d, KEY_CHANNELUP }, /* channel */
+ { 0x0e, KEY_CHANNELDOWN }, /* channel- */
+ { 0x0f, KEY_VOLUMEUP },
+ { 0x10, KEY_VOLUMEDOWN },
+ { 0x11, KEY_TUNER }, /* AV */
+ { 0x12, KEY_NUMLOCK }, /* -/-- */
+ { 0x13, KEY_AUDIO }, /* audio */
+ { 0x14, KEY_MUTE },
+ { 0x15, KEY_UP },
+ { 0x16, KEY_DOWN },
+ { 0x17, KEY_LEFT },
+ { 0x18, KEY_RIGHT },
+ { 0x19, BTN_LEFT, },
+ { 0x1a, BTN_RIGHT, },
+ { 0x1b, KEY_WWW }, /* text */
+ { 0x1c, KEY_REWIND },
+ { 0x1d, KEY_FORWARD },
+ { 0x1e, KEY_RECORD },
+ { 0x1f, KEY_PLAY },
+ { 0x20, KEY_PREVIOUSSONG },
+ { 0x21, KEY_NEXTSONG },
+ { 0x22, KEY_PAUSE },
+ { 0x23, KEY_STOP },
+};
+
+static struct rc_keymap cinergy_map = {
+ .map = {
+ .scan = cinergy,
+ .size = ARRAY_SIZE(cinergy),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_CINERGY,
+ }
+};
+
+static int __init init_rc_map_cinergy(void)
+{
+ return ir_register_map(&cinergy_map);
+}
+
+static void __exit exit_rc_map_cinergy(void)
+{
+ ir_unregister_map(&cinergy_map);
+}
+
+module_init(init_rc_map_cinergy)
+module_exit(exit_rc_map_cinergy)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dm1105-nec.c b/drivers/media/IR/keymaps/rc-dm1105-nec.c
new file mode 100644
index 000000000000..90684d0efea3
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dm1105-nec.c
@@ -0,0 +1,76 @@
+/* dm1105-nec.h - Keytable for dm1105_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DVBWorld remotes
+ Igor M. Liplianin <liplianin@me.by>
+ */
+
+static struct ir_scancode dm1105_nec[] = {
+ { 0x0a, KEY_POWER2}, /* power */
+ { 0x0c, KEY_MUTE}, /* mute */
+ { 0x11, KEY_1},
+ { 0x12, KEY_2},
+ { 0x13, KEY_3},
+ { 0x14, KEY_4},
+ { 0x15, KEY_5},
+ { 0x16, KEY_6},
+ { 0x17, KEY_7},
+ { 0x18, KEY_8},
+ { 0x19, KEY_9},
+ { 0x10, KEY_0},
+ { 0x1c, KEY_CHANNELUP}, /* ch+ */
+ { 0x0f, KEY_CHANNELDOWN}, /* ch- */
+ { 0x1a, KEY_VOLUMEUP}, /* vol+ */
+ { 0x0e, KEY_VOLUMEDOWN}, /* vol- */
+ { 0x04, KEY_RECORD}, /* rec */
+ { 0x09, KEY_CHANNEL}, /* fav */
+ { 0x08, KEY_BACKSPACE}, /* rewind */
+ { 0x07, KEY_FASTFORWARD}, /* fast */
+ { 0x0b, KEY_PAUSE}, /* pause */
+ { 0x02, KEY_ESC}, /* cancel */
+ { 0x03, KEY_TAB}, /* tab */
+ { 0x00, KEY_UP}, /* up */
+ { 0x1f, KEY_ENTER}, /* ok */
+ { 0x01, KEY_DOWN}, /* down */
+ { 0x05, KEY_RECORD}, /* cap */
+ { 0x06, KEY_STOP}, /* stop */
+ { 0x40, KEY_ZOOM}, /* full */
+ { 0x1e, KEY_TV}, /* tvmode */
+ { 0x1b, KEY_B}, /* recall */
+};
+
+static struct rc_keymap dm1105_nec_map = {
+ .map = {
+ .scan = dm1105_nec,
+ .size = ARRAY_SIZE(dm1105_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DM1105_NEC,
+ }
+};
+
+static int __init init_rc_map_dm1105_nec(void)
+{
+ return ir_register_map(&dm1105_nec_map);
+}
+
+static void __exit exit_rc_map_dm1105_nec(void)
+{
+ ir_unregister_map(&dm1105_nec_map);
+}
+
+module_init(init_rc_map_dm1105_nec)
+module_exit(exit_rc_map_dm1105_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c b/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c
new file mode 100644
index 000000000000..8a4027af964a
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c
@@ -0,0 +1,78 @@
+/* dntv-live-dvb-t.h - Keytable for dntv_live_dvb_t Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DigitalNow DNTV Live DVB-T Remote */
+
+static struct ir_scancode dntv_live_dvb_t[] = {
+ { 0x00, KEY_ESC }, /* 'go up a level?' */
+ /* Keys 0 to 9 */
+ { 0x0a, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0b, KEY_TUNER }, /* tv/fm */
+ { 0x0c, KEY_SEARCH }, /* scan */
+ { 0x0d, KEY_STOP },
+ { 0x0e, KEY_PAUSE },
+ { 0x0f, KEY_LIST }, /* source */
+
+ { 0x10, KEY_MUTE },
+ { 0x11, KEY_REWIND }, /* backward << */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_CAMERA }, /* snap */
+ { 0x14, KEY_AUDIO }, /* stereo */
+ { 0x15, KEY_CLEAR }, /* reset */
+ { 0x16, KEY_PLAY },
+ { 0x17, KEY_ENTER },
+ { 0x18, KEY_ZOOM }, /* full screen */
+ { 0x19, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1c, KEY_INFO }, /* preview */
+ { 0x1d, KEY_RECORD }, /* record */
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x1f, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap dntv_live_dvb_t_map = {
+ .map = {
+ .scan = dntv_live_dvb_t,
+ .size = ARRAY_SIZE(dntv_live_dvb_t),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DNTV_LIVE_DVB_T,
+ }
+};
+
+static int __init init_rc_map_dntv_live_dvb_t(void)
+{
+ return ir_register_map(&dntv_live_dvb_t_map);
+}
+
+static void __exit exit_rc_map_dntv_live_dvb_t(void)
+{
+ ir_unregister_map(&dntv_live_dvb_t_map);
+}
+
+module_init(init_rc_map_dntv_live_dvb_t)
+module_exit(exit_rc_map_dntv_live_dvb_t)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c b/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c
new file mode 100644
index 000000000000..6f4d60764d59
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c
@@ -0,0 +1,97 @@
+/* dntv-live-dvbt-pro.h - Keytable for dntv_live_dvbt_pro Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DigitalNow DNTV Live! DVB-T Pro Remote */
+
+static struct ir_scancode dntv_live_dvbt_pro[] = {
+ { 0x16, KEY_POWER },
+ { 0x5b, KEY_HOME },
+
+ { 0x55, KEY_TV }, /* live tv */
+ { 0x58, KEY_TUNER }, /* digital Radio */
+ { 0x5a, KEY_RADIO }, /* FM radio */
+ { 0x59, KEY_DVD }, /* dvd menu */
+ { 0x03, KEY_1 },
+ { 0x01, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x1d, KEY_5 },
+ { 0x1f, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x19, KEY_8 },
+ { 0x1b, KEY_9 },
+ { 0x0c, KEY_CANCEL },
+ { 0x15, KEY_0 },
+ { 0x4a, KEY_CLEAR },
+ { 0x13, KEY_BACK },
+ { 0x00, KEY_TAB },
+ { 0x4b, KEY_UP },
+ { 0x4e, KEY_LEFT },
+ { 0x4f, KEY_OK },
+ { 0x52, KEY_RIGHT },
+ { 0x51, KEY_DOWN },
+ { 0x1e, KEY_VOLUMEUP },
+ { 0x0a, KEY_VOLUMEDOWN },
+ { 0x02, KEY_CHANNELDOWN },
+ { 0x05, KEY_CHANNELUP },
+ { 0x11, KEY_RECORD },
+ { 0x14, KEY_PLAY },
+ { 0x4c, KEY_PAUSE },
+ { 0x1a, KEY_STOP },
+ { 0x40, KEY_REWIND },
+ { 0x12, KEY_FASTFORWARD },
+ { 0x41, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x42, KEY_NEXTSONG }, /* skip >| */
+ { 0x54, KEY_CAMERA }, /* capture */
+ { 0x50, KEY_LANGUAGE }, /* sap */
+ { 0x47, KEY_TV2 }, /* pip */
+ { 0x4d, KEY_SCREEN },
+ { 0x43, KEY_SUBTITLE },
+ { 0x10, KEY_MUTE },
+ { 0x49, KEY_AUDIO }, /* l/r */
+ { 0x07, KEY_SLEEP },
+ { 0x08, KEY_VIDEO }, /* a/v */
+ { 0x0e, KEY_PREVIOUS }, /* recall */
+ { 0x45, KEY_ZOOM }, /* zoom + */
+ { 0x46, KEY_ANGLE }, /* zoom - */
+ { 0x56, KEY_RED },
+ { 0x57, KEY_GREEN },
+ { 0x5c, KEY_YELLOW },
+ { 0x5d, KEY_BLUE },
+};
+
+static struct rc_keymap dntv_live_dvbt_pro_map = {
+ .map = {
+ .scan = dntv_live_dvbt_pro,
+ .size = ARRAY_SIZE(dntv_live_dvbt_pro),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DNTV_LIVE_DVBT_PRO,
+ }
+};
+
+static int __init init_rc_map_dntv_live_dvbt_pro(void)
+{
+ return ir_register_map(&dntv_live_dvbt_pro_map);
+}
+
+static void __exit exit_rc_map_dntv_live_dvbt_pro(void)
+{
+ ir_unregister_map(&dntv_live_dvbt_pro_map);
+}
+
+module_init(init_rc_map_dntv_live_dvbt_pro)
+module_exit(exit_rc_map_dntv_live_dvbt_pro)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-em-terratec.c b/drivers/media/IR/keymaps/rc-em-terratec.c
new file mode 100644
index 000000000000..3130c9c29e6b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-em-terratec.c
@@ -0,0 +1,69 @@
+/* em-terratec.h - Keytable for em_terratec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode em_terratec[] = {
+ { 0x01, KEY_CHANNEL },
+ { 0x02, KEY_SELECT },
+ { 0x03, KEY_MUTE },
+ { 0x04, KEY_POWER },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x08, KEY_CHANNELUP },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_0 },
+ { 0x12, KEY_MENU },
+ { 0x13, KEY_PRINT },
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x16, KEY_PAUSE },
+ { 0x18, KEY_RECORD },
+ { 0x19, KEY_REWIND },
+ { 0x1a, KEY_PLAY },
+ { 0x1b, KEY_FORWARD },
+ { 0x1c, KEY_BACKSPACE },
+ { 0x1e, KEY_STOP },
+ { 0x40, KEY_ZOOM },
+};
+
+static struct rc_keymap em_terratec_map = {
+ .map = {
+ .scan = em_terratec,
+ .size = ARRAY_SIZE(em_terratec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EM_TERRATEC,
+ }
+};
+
+static int __init init_rc_map_em_terratec(void)
+{
+ return ir_register_map(&em_terratec_map);
+}
+
+static void __exit exit_rc_map_em_terratec(void)
+{
+ ir_unregister_map(&em_terratec_map);
+}
+
+module_init(init_rc_map_em_terratec)
+module_exit(exit_rc_map_em_terratec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-empty.c b/drivers/media/IR/keymaps/rc-empty.c
new file mode 100644
index 000000000000..3b338d84b476
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-empty.c
@@ -0,0 +1,44 @@
+/* empty.h - Keytable for empty Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* empty keytable, can be used as placeholder for not-yet created keytables */
+
+static struct ir_scancode empty[] = {
+ { 0x2a, KEY_COFFEE },
+};
+
+static struct rc_keymap empty_map = {
+ .map = {
+ .scan = empty,
+ .size = ARRAY_SIZE(empty),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EMPTY,
+ }
+};
+
+static int __init init_rc_map_empty(void)
+{
+ return ir_register_map(&empty_map);
+}
+
+static void __exit exit_rc_map_empty(void)
+{
+ ir_unregister_map(&empty_map);
+}
+
+module_init(init_rc_map_empty)
+module_exit(exit_rc_map_empty)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c b/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c
new file mode 100644
index 000000000000..4b816967877e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c
@@ -0,0 +1,81 @@
+/* encore-enltv-fm53.h - Keytable for encore_enltv_fm53 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV-FM v5.3
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode encore_enltv_fm53[] = {
+ { 0x10, KEY_POWER2},
+ { 0x06, KEY_MUTE},
+
+ { 0x09, KEY_1},
+ { 0x1d, KEY_2},
+ { 0x1f, KEY_3},
+ { 0x19, KEY_4},
+ { 0x1b, KEY_5},
+ { 0x11, KEY_6},
+ { 0x17, KEY_7},
+ { 0x12, KEY_8},
+ { 0x16, KEY_9},
+ { 0x48, KEY_0},
+
+ { 0x04, KEY_LIST}, /* -/-- */
+ { 0x40, KEY_LAST}, /* recall */
+
+ { 0x02, KEY_MODE}, /* TV/AV */
+ { 0x05, KEY_CAMERA}, /* SNAPSHOT */
+
+ { 0x4c, KEY_CHANNELUP}, /* UP */
+ { 0x00, KEY_CHANNELDOWN}, /* DOWN */
+ { 0x0d, KEY_VOLUMEUP}, /* RIGHT */
+ { 0x15, KEY_VOLUMEDOWN}, /* LEFT */
+ { 0x49, KEY_ENTER}, /* OK */
+
+ { 0x54, KEY_RECORD},
+ { 0x4d, KEY_PLAY}, /* pause */
+
+ { 0x1e, KEY_MENU}, /* video setting */
+ { 0x0e, KEY_RIGHT}, /* <- */
+ { 0x1a, KEY_LEFT}, /* -> */
+
+ { 0x0a, KEY_CLEAR}, /* video default */
+ { 0x0c, KEY_ZOOM}, /* hide pannel */
+ { 0x47, KEY_SLEEP}, /* shutdown */
+};
+
+static struct rc_keymap encore_enltv_fm53_map = {
+ .map = {
+ .scan = encore_enltv_fm53,
+ .size = ARRAY_SIZE(encore_enltv_fm53),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV_FM53,
+ }
+};
+
+static int __init init_rc_map_encore_enltv_fm53(void)
+{
+ return ir_register_map(&encore_enltv_fm53_map);
+}
+
+static void __exit exit_rc_map_encore_enltv_fm53(void)
+{
+ ir_unregister_map(&encore_enltv_fm53_map);
+}
+
+module_init(init_rc_map_encore_enltv_fm53)
+module_exit(exit_rc_map_encore_enltv_fm53)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv.c b/drivers/media/IR/keymaps/rc-encore-enltv.c
new file mode 100644
index 000000000000..9fabffd28cc9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv.c
@@ -0,0 +1,112 @@
+/* encore-enltv.h - Keytable for encore_enltv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
+ Juan Pablo Sormani <sorman@gmail.com> */
+
+static struct ir_scancode encore_enltv[] = {
+
+ /* Power button does nothing, neither in Windows app,
+ although it sends data (used for BIOS wakeup?) */
+ { 0x0d, KEY_MUTE },
+
+ { 0x1e, KEY_TV },
+ { 0x00, KEY_VIDEO },
+ { 0x01, KEY_AUDIO }, /* music */
+ { 0x02, KEY_MHP }, /* picture */
+
+ { 0x1f, KEY_1 },
+ { 0x03, KEY_2 },
+ { 0x04, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x1c, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x1d, KEY_9 },
+ { 0x0a, KEY_0 },
+
+ { 0x09, KEY_LIST }, /* -/-- */
+ { 0x0b, KEY_LAST }, /* recall */
+
+ { 0x14, KEY_HOME }, /* win start menu */
+ { 0x15, KEY_EXIT }, /* exit */
+ { 0x16, KEY_CHANNELUP }, /* UP */
+ { 0x12, KEY_CHANNELDOWN }, /* DOWN */
+ { 0x0c, KEY_VOLUMEUP }, /* RIGHT */
+ { 0x17, KEY_VOLUMEDOWN }, /* LEFT */
+
+ { 0x18, KEY_ENTER }, /* OK */
+
+ { 0x0e, KEY_ESC },
+ { 0x13, KEY_CYCLEWINDOWS }, /* desktop */
+ { 0x11, KEY_TAB },
+ { 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
+
+ { 0x1a, KEY_MENU },
+ { 0x1b, KEY_ZOOM }, /* fullscreen */
+ { 0x44, KEY_TIME }, /* time shift */
+ { 0x40, KEY_MODE }, /* source */
+
+ { 0x5a, KEY_RECORD },
+ { 0x42, KEY_PLAY }, /* play/pause */
+ { 0x45, KEY_STOP },
+ { 0x43, KEY_CAMERA }, /* camera icon */
+
+ { 0x48, KEY_REWIND },
+ { 0x4a, KEY_FASTFORWARD },
+ { 0x49, KEY_PREVIOUS },
+ { 0x4b, KEY_NEXT },
+
+ { 0x4c, KEY_FAVORITES }, /* tv wall */
+ { 0x4d, KEY_SOUND }, /* DVD sound */
+ { 0x4e, KEY_LANGUAGE }, /* DVD lang */
+ { 0x4f, KEY_TEXT }, /* DVD text */
+
+ { 0x50, KEY_SLEEP }, /* shutdown */
+ { 0x51, KEY_MODE }, /* stereo > main */
+ { 0x52, KEY_SELECT }, /* stereo > sap */
+ { 0x53, KEY_PROG1 }, /* teletext */
+
+
+ { 0x59, KEY_RED }, /* AP1 */
+ { 0x41, KEY_GREEN }, /* AP2 */
+ { 0x47, KEY_YELLOW }, /* AP3 */
+ { 0x57, KEY_BLUE }, /* AP4 */
+};
+
+static struct rc_keymap encore_enltv_map = {
+ .map = {
+ .scan = encore_enltv,
+ .size = ARRAY_SIZE(encore_enltv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV,
+ }
+};
+
+static int __init init_rc_map_encore_enltv(void)
+{
+ return ir_register_map(&encore_enltv_map);
+}
+
+static void __exit exit_rc_map_encore_enltv(void)
+{
+ ir_unregister_map(&encore_enltv_map);
+}
+
+module_init(init_rc_map_encore_enltv)
+module_exit(exit_rc_map_encore_enltv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv2.c b/drivers/media/IR/keymaps/rc-encore-enltv2.c
new file mode 100644
index 000000000000..efefd5166618
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv2.c
@@ -0,0 +1,90 @@
+/* encore-enltv2.h - Keytable for encore_enltv2 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton
+ Mauro Carvalho Chehab <mchehab@infradead.org> */
+
+static struct ir_scancode encore_enltv2[] = {
+ { 0x4c, KEY_POWER2 },
+ { 0x4a, KEY_TUNER },
+ { 0x40, KEY_1 },
+ { 0x60, KEY_2 },
+ { 0x50, KEY_3 },
+ { 0x70, KEY_4 },
+ { 0x48, KEY_5 },
+ { 0x68, KEY_6 },
+ { 0x58, KEY_7 },
+ { 0x78, KEY_8 },
+ { 0x44, KEY_9 },
+ { 0x54, KEY_0 },
+
+ { 0x64, KEY_LAST }, /* +100 */
+ { 0x4e, KEY_AGAIN }, /* Recall */
+
+ { 0x6c, KEY_SWITCHVIDEOMODE }, /* Video Source */
+ { 0x5e, KEY_MENU },
+ { 0x56, KEY_SCREEN },
+ { 0x7a, KEY_SETUP },
+
+ { 0x46, KEY_MUTE },
+ { 0x5c, KEY_MODE }, /* Stereo */
+ { 0x74, KEY_INFO },
+ { 0x7c, KEY_CLEAR },
+
+ { 0x55, KEY_UP },
+ { 0x49, KEY_DOWN },
+ { 0x7e, KEY_LEFT },
+ { 0x59, KEY_RIGHT },
+ { 0x6a, KEY_ENTER },
+
+ { 0x42, KEY_VOLUMEUP },
+ { 0x62, KEY_VOLUMEDOWN },
+ { 0x52, KEY_CHANNELUP },
+ { 0x72, KEY_CHANNELDOWN },
+
+ { 0x41, KEY_RECORD },
+ { 0x51, KEY_CAMERA }, /* Snapshot */
+ { 0x75, KEY_TIME }, /* Timeshift */
+ { 0x71, KEY_TV2 }, /* PIP */
+
+ { 0x45, KEY_REWIND },
+ { 0x6f, KEY_PAUSE },
+ { 0x7d, KEY_FORWARD },
+ { 0x79, KEY_STOP },
+};
+
+static struct rc_keymap encore_enltv2_map = {
+ .map = {
+ .scan = encore_enltv2,
+ .size = ARRAY_SIZE(encore_enltv2),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV2,
+ }
+};
+
+static int __init init_rc_map_encore_enltv2(void)
+{
+ return ir_register_map(&encore_enltv2_map);
+}
+
+static void __exit exit_rc_map_encore_enltv2(void)
+{
+ ir_unregister_map(&encore_enltv2_map);
+}
+
+module_init(init_rc_map_encore_enltv2)
+module_exit(exit_rc_map_encore_enltv2)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-evga-indtube.c b/drivers/media/IR/keymaps/rc-evga-indtube.c
new file mode 100644
index 000000000000..3f3fb13813b3
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-evga-indtube.c
@@ -0,0 +1,61 @@
+/* evga-indtube.h - Keytable for evga_indtube Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* EVGA inDtube
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+
+static struct ir_scancode evga_indtube[] = {
+ { 0x12, KEY_POWER},
+ { 0x02, KEY_MODE}, /* TV */
+ { 0x14, KEY_MUTE},
+ { 0x1a, KEY_CHANNELUP},
+ { 0x16, KEY_TV2}, /* PIP */
+ { 0x1d, KEY_VOLUMEUP},
+ { 0x05, KEY_CHANNELDOWN},
+ { 0x0f, KEY_PLAYPAUSE},
+ { 0x19, KEY_VOLUMEDOWN},
+ { 0x1c, KEY_REWIND},
+ { 0x0d, KEY_RECORD},
+ { 0x18, KEY_FORWARD},
+ { 0x1e, KEY_PREVIOUS},
+ { 0x1b, KEY_STOP},
+ { 0x1f, KEY_NEXT},
+ { 0x13, KEY_CAMERA},
+};
+
+static struct rc_keymap evga_indtube_map = {
+ .map = {
+ .scan = evga_indtube,
+ .size = ARRAY_SIZE(evga_indtube),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EVGA_INDTUBE,
+ }
+};
+
+static int __init init_rc_map_evga_indtube(void)
+{
+ return ir_register_map(&evga_indtube_map);
+}
+
+static void __exit exit_rc_map_evga_indtube(void)
+{
+ ir_unregister_map(&evga_indtube_map);
+}
+
+module_init(init_rc_map_evga_indtube)
+module_exit(exit_rc_map_evga_indtube)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-eztv.c b/drivers/media/IR/keymaps/rc-eztv.c
new file mode 100644
index 000000000000..660907a78db9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-eztv.c
@@ -0,0 +1,96 @@
+/* eztv.h - Keytable for eztv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Alfons Geser <a.geser@cox.net>
+ * updates from Job D. R. Borges <jobdrb@ig.com.br> */
+
+static struct ir_scancode eztv[] = {
+ { 0x12, KEY_POWER },
+ { 0x01, KEY_TV }, /* DVR */
+ { 0x15, KEY_DVD }, /* DVD */
+ { 0x17, KEY_AUDIO }, /* music */
+ /* DVR mode / DVD mode / music mode */
+
+ { 0x1b, KEY_MUTE }, /* mute */
+ { 0x02, KEY_LANGUAGE }, /* MTS/SAP / audio / autoseek */
+ { 0x1e, KEY_SUBTITLE }, /* closed captioning / subtitle / seek */
+ { 0x16, KEY_ZOOM }, /* full screen */
+ { 0x1c, KEY_VIDEO }, /* video source / eject / delall */
+ { 0x1d, KEY_RESTART }, /* playback / angle / del */
+ { 0x2f, KEY_SEARCH }, /* scan / menu / playlist */
+ { 0x30, KEY_CHANNEL }, /* CH surfing / bookmark / memo */
+
+ { 0x31, KEY_HELP }, /* help */
+ { 0x32, KEY_MODE }, /* num/memo */
+ { 0x33, KEY_ESC }, /* cancel */
+
+ { 0x0c, KEY_UP }, /* up */
+ { 0x10, KEY_DOWN }, /* down */
+ { 0x08, KEY_LEFT }, /* left */
+ { 0x04, KEY_RIGHT }, /* right */
+ { 0x03, KEY_SELECT }, /* select */
+
+ { 0x1f, KEY_REWIND }, /* rewind */
+ { 0x20, KEY_PLAYPAUSE },/* play/pause */
+ { 0x29, KEY_FORWARD }, /* forward */
+ { 0x14, KEY_AGAIN }, /* repeat */
+ { 0x2b, KEY_RECORD }, /* recording */
+ { 0x2c, KEY_STOP }, /* stop */
+ { 0x2d, KEY_PLAY }, /* play */
+ { 0x2e, KEY_CAMERA }, /* snapshot / shuffle */
+
+ { 0x00, KEY_0 },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+
+ { 0x2a, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x18, KEY_CHANNELUP },/* CH.tracking up */
+ { 0x19, KEY_CHANNELDOWN },/* CH.tracking down */
+
+ { 0x13, KEY_ENTER }, /* enter */
+ { 0x21, KEY_DOT }, /* . (decimal dot) */
+};
+
+static struct rc_keymap eztv_map = {
+ .map = {
+ .scan = eztv,
+ .size = ARRAY_SIZE(eztv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EZTV,
+ }
+};
+
+static int __init init_rc_map_eztv(void)
+{
+ return ir_register_map(&eztv_map);
+}
+
+static void __exit exit_rc_map_eztv(void)
+{
+ ir_unregister_map(&eztv_map);
+}
+
+module_init(init_rc_map_eztv)
+module_exit(exit_rc_map_eztv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-flydvb.c b/drivers/media/IR/keymaps/rc-flydvb.c
new file mode 100644
index 000000000000..a173c81035f4
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-flydvb.c
@@ -0,0 +1,77 @@
+/* flydvb.h - Keytable for flydvb Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode flydvb[] = {
+ { 0x01, KEY_ZOOM }, /* Full Screen */
+ { 0x00, KEY_POWER }, /* Power */
+
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x08, KEY_5 },
+ { 0x09, KEY_6 },
+ { 0x0b, KEY_7 },
+ { 0x0c, KEY_8 },
+ { 0x0d, KEY_9 },
+ { 0x06, KEY_AGAIN }, /* Recall */
+ { 0x0f, KEY_0 },
+ { 0x10, KEY_MUTE }, /* Mute */
+ { 0x02, KEY_RADIO }, /* TV/Radio */
+ { 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
+
+ { 0x14, KEY_VOLUMEUP }, /* VOL+ */
+ { 0x17, KEY_VOLUMEDOWN }, /* VOL- */
+ { 0x12, KEY_CHANNELUP }, /* CH+ */
+ { 0x13, KEY_CHANNELDOWN }, /* CH- */
+ { 0x1d, KEY_ENTER }, /* Enter */
+
+ { 0x1a, KEY_MODE }, /* PIP */
+ { 0x18, KEY_TUNER }, /* Source */
+
+ { 0x1e, KEY_RECORD }, /* Record/Pause */
+ { 0x15, KEY_ANGLE }, /* Swap (no label on key) */
+ { 0x1c, KEY_PAUSE }, /* Timeshift/Pause */
+ { 0x19, KEY_BACK }, /* Rewind << */
+ { 0x0a, KEY_PLAYPAUSE }, /* Play/Pause */
+ { 0x1f, KEY_FORWARD }, /* Forward >> */
+ { 0x16, KEY_PREVIOUS }, /* Back |<< */
+ { 0x11, KEY_STOP }, /* Stop */
+ { 0x0e, KEY_NEXT }, /* End >>| */
+};
+
+static struct rc_keymap flydvb_map = {
+ .map = {
+ .scan = flydvb,
+ .size = ARRAY_SIZE(flydvb),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FLYDVB,
+ }
+};
+
+static int __init init_rc_map_flydvb(void)
+{
+ return ir_register_map(&flydvb_map);
+}
+
+static void __exit exit_rc_map_flydvb(void)
+{
+ ir_unregister_map(&flydvb_map);
+}
+
+module_init(init_rc_map_flydvb)
+module_exit(exit_rc_map_flydvb)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-flyvideo.c b/drivers/media/IR/keymaps/rc-flyvideo.c
new file mode 100644
index 000000000000..9c73043cbdba
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-flyvideo.c
@@ -0,0 +1,70 @@
+/* flyvideo.h - Keytable for flyvideo Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode flyvideo[] = {
+ { 0x0f, KEY_0 },
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x08, KEY_5 },
+ { 0x09, KEY_6 },
+ { 0x0b, KEY_7 },
+ { 0x0c, KEY_8 },
+ { 0x0d, KEY_9 },
+
+ { 0x0e, KEY_MODE }, /* Air/Cable */
+ { 0x11, KEY_VIDEO }, /* Video */
+ { 0x15, KEY_AUDIO }, /* Audio */
+ { 0x00, KEY_POWER }, /* Power */
+ { 0x18, KEY_TUNER }, /* AV Source */
+ { 0x02, KEY_ZOOM }, /* Fullscreen */
+ { 0x1a, KEY_LANGUAGE }, /* Stereo */
+ { 0x1b, KEY_MUTE }, /* Mute */
+ { 0x14, KEY_VOLUMEUP }, /* Volume + */
+ { 0x17, KEY_VOLUMEDOWN },/* Volume - */
+ { 0x12, KEY_CHANNELUP },/* Channel + */
+ { 0x13, KEY_CHANNELDOWN },/* Channel - */
+ { 0x06, KEY_AGAIN }, /* Recall */
+ { 0x10, KEY_ENTER }, /* Enter */
+
+ { 0x19, KEY_BACK }, /* Rewind ( <<< ) */
+ { 0x1f, KEY_FORWARD }, /* Forward ( >>> ) */
+ { 0x0a, KEY_ANGLE }, /* no label, may be used as the PAUSE button */
+};
+
+static struct rc_keymap flyvideo_map = {
+ .map = {
+ .scan = flyvideo,
+ .size = ARRAY_SIZE(flyvideo),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FLYVIDEO,
+ }
+};
+
+static int __init init_rc_map_flyvideo(void)
+{
+ return ir_register_map(&flyvideo_map);
+}
+
+static void __exit exit_rc_map_flyvideo(void)
+{
+ ir_unregister_map(&flyvideo_map);
+}
+
+module_init(init_rc_map_flyvideo)
+module_exit(exit_rc_map_flyvideo)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c b/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c
new file mode 100644
index 000000000000..cdb10389b10e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c
@@ -0,0 +1,98 @@
+/* fusionhdtv-mce.h - Keytable for fusionhdtv_mce Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DViCO FUSION HDTV MCE remote */
+
+static struct ir_scancode fusionhdtv_mce[] = {
+
+ { 0x0b, KEY_1 },
+ { 0x17, KEY_2 },
+ { 0x1b, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x50, KEY_5 },
+ { 0x54, KEY_6 },
+ { 0x48, KEY_7 },
+ { 0x4c, KEY_8 },
+ { 0x58, KEY_9 },
+ { 0x03, KEY_0 },
+
+ { 0x5e, KEY_OK },
+ { 0x51, KEY_UP },
+ { 0x53, KEY_DOWN },
+ { 0x5b, KEY_LEFT },
+ { 0x5f, KEY_RIGHT },
+
+ { 0x02, KEY_TV }, /* Labeled DTV on remote */
+ { 0x0e, KEY_MP3 },
+ { 0x1a, KEY_DVD },
+ { 0x1e, KEY_FAVORITES }, /* Labeled CPF on remote */
+ { 0x16, KEY_SETUP },
+ { 0x46, KEY_POWER2 }, /* TV On/Off button on remote */
+ { 0x0a, KEY_EPG }, /* Labeled Guide on remote */
+
+ { 0x49, KEY_BACK },
+ { 0x59, KEY_INFO }, /* Labeled MORE on remote */
+ { 0x4d, KEY_MENU }, /* Labeled DVDMENU on remote */
+ { 0x55, KEY_CYCLEWINDOWS }, /* Labeled ALT-TAB on remote */
+
+ { 0x0f, KEY_PREVIOUSSONG }, /* Labeled |<< REPLAY on remote */
+ { 0x12, KEY_NEXTSONG }, /* Labeled >>| SKIP on remote */
+ { 0x42, KEY_ENTER }, /* Labeled START with a green
+ MS windows logo on remote */
+
+ { 0x15, KEY_VOLUMEUP },
+ { 0x05, KEY_VOLUMEDOWN },
+ { 0x11, KEY_CHANNELUP },
+ { 0x09, KEY_CHANNELDOWN },
+
+ { 0x52, KEY_CAMERA },
+ { 0x5a, KEY_TUNER },
+ { 0x19, KEY_OPEN },
+
+ { 0x13, KEY_MODE }, /* 4:3 16:9 select */
+ { 0x1f, KEY_ZOOM },
+
+ { 0x43, KEY_REWIND },
+ { 0x47, KEY_PLAYPAUSE },
+ { 0x4f, KEY_FASTFORWARD },
+ { 0x57, KEY_MUTE },
+ { 0x0d, KEY_STOP },
+ { 0x01, KEY_RECORD },
+ { 0x4e, KEY_POWER },
+};
+
+static struct rc_keymap fusionhdtv_mce_map = {
+ .map = {
+ .scan = fusionhdtv_mce,
+ .size = ARRAY_SIZE(fusionhdtv_mce),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FUSIONHDTV_MCE,
+ }
+};
+
+static int __init init_rc_map_fusionhdtv_mce(void)
+{
+ return ir_register_map(&fusionhdtv_mce_map);
+}
+
+static void __exit exit_rc_map_fusionhdtv_mce(void)
+{
+ ir_unregister_map(&fusionhdtv_mce_map);
+}
+
+module_init(init_rc_map_fusionhdtv_mce)
+module_exit(exit_rc_map_fusionhdtv_mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-gadmei-rm008z.c b/drivers/media/IR/keymaps/rc-gadmei-rm008z.c
new file mode 100644
index 000000000000..c16c0d1263ac
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-gadmei-rm008z.c
@@ -0,0 +1,81 @@
+/* gadmei-rm008z.h - Keytable for gadmei_rm008z Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* GADMEI UTV330+ RM008Z remote
+ Shine Liu <shinel@foxmail.com>
+ */
+
+static struct ir_scancode gadmei_rm008z[] = {
+ { 0x14, KEY_POWER2}, /* POWER OFF */
+ { 0x0c, KEY_MUTE}, /* MUTE */
+
+ { 0x18, KEY_TV}, /* TV */
+ { 0x0e, KEY_VIDEO}, /* AV */
+ { 0x0b, KEY_AUDIO}, /* SV */
+ { 0x0f, KEY_RADIO}, /* FM */
+
+ { 0x00, KEY_1},
+ { 0x01, KEY_2},
+ { 0x02, KEY_3},
+ { 0x03, KEY_4},
+ { 0x04, KEY_5},
+ { 0x05, KEY_6},
+ { 0x06, KEY_7},
+ { 0x07, KEY_8},
+ { 0x08, KEY_9},
+ { 0x09, KEY_0},
+ { 0x0a, KEY_INFO}, /* OSD */
+ { 0x1c, KEY_BACKSPACE}, /* LAST */
+
+ { 0x0d, KEY_PLAY}, /* PLAY */
+ { 0x1e, KEY_CAMERA}, /* SNAPSHOT */
+ { 0x1a, KEY_RECORD}, /* RECORD */
+ { 0x17, KEY_STOP}, /* STOP */
+
+ { 0x1f, KEY_UP}, /* UP */
+ { 0x44, KEY_DOWN}, /* DOWN */
+ { 0x46, KEY_TAB}, /* BACK */
+ { 0x4a, KEY_ZOOM}, /* FULLSECREEN */
+
+ { 0x10, KEY_VOLUMEUP}, /* VOLUMEUP */
+ { 0x11, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
+ { 0x12, KEY_CHANNELUP}, /* CHANNELUP */
+ { 0x13, KEY_CHANNELDOWN}, /* CHANNELDOWN */
+ { 0x15, KEY_ENTER}, /* OK */
+};
+
+static struct rc_keymap gadmei_rm008z_map = {
+ .map = {
+ .scan = gadmei_rm008z,
+ .size = ARRAY_SIZE(gadmei_rm008z),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GADMEI_RM008Z,
+ }
+};
+
+static int __init init_rc_map_gadmei_rm008z(void)
+{
+ return ir_register_map(&gadmei_rm008z_map);
+}
+
+static void __exit exit_rc_map_gadmei_rm008z(void)
+{
+ ir_unregister_map(&gadmei_rm008z_map);
+}
+
+module_init(init_rc_map_gadmei_rm008z)
+module_exit(exit_rc_map_gadmei_rm008z)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c b/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c
new file mode 100644
index 000000000000..89f8e384e52a
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c
@@ -0,0 +1,84 @@
+/* genius-tvgo-a11mce.h - Keytable for genius_tvgo_a11mce Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Remote control for the Genius TVGO A11MCE
+ * Adrian Pardini <pardo.bsso@gmail.com>
+ */
+
+static struct ir_scancode genius_tvgo_a11mce[] = {
+ /* Keys 0 to 9 */
+ { 0x48, KEY_0 },
+ { 0x09, KEY_1 },
+ { 0x1d, KEY_2 },
+ { 0x1f, KEY_3 },
+ { 0x19, KEY_4 },
+ { 0x1b, KEY_5 },
+ { 0x11, KEY_6 },
+ { 0x17, KEY_7 },
+ { 0x12, KEY_8 },
+ { 0x16, KEY_9 },
+
+ { 0x54, KEY_RECORD }, /* recording */
+ { 0x06, KEY_MUTE }, /* mute */
+ { 0x10, KEY_POWER },
+ { 0x40, KEY_LAST }, /* recall */
+ { 0x4c, KEY_CHANNELUP }, /* channel / program + */
+ { 0x00, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x0d, KEY_VOLUMEUP },
+ { 0x15, KEY_VOLUMEDOWN },
+ { 0x4d, KEY_OK }, /* also labeled as Pause */
+ { 0x1c, KEY_ZOOM }, /* full screen and Stop*/
+ { 0x02, KEY_MODE }, /* AV Source or Rewind*/
+ { 0x04, KEY_LIST }, /* -/-- */
+ /* small arrows above numbers */
+ { 0x1a, KEY_NEXT }, /* also Fast Forward */
+ { 0x0e, KEY_PREVIOUS }, /* also Rewind */
+ /* these are in a rather non standard layout and have
+ an alternate name written */
+ { 0x1e, KEY_UP }, /* Video Setting */
+ { 0x0a, KEY_DOWN }, /* Video Default */
+ { 0x05, KEY_CAMERA }, /* Snapshot */
+ { 0x0c, KEY_RIGHT }, /* Hide Panel */
+ /* Four buttons without label */
+ { 0x49, KEY_RED },
+ { 0x0b, KEY_GREEN },
+ { 0x13, KEY_YELLOW },
+ { 0x50, KEY_BLUE },
+};
+
+static struct rc_keymap genius_tvgo_a11mce_map = {
+ .map = {
+ .scan = genius_tvgo_a11mce,
+ .size = ARRAY_SIZE(genius_tvgo_a11mce),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GENIUS_TVGO_A11MCE,
+ }
+};
+
+static int __init init_rc_map_genius_tvgo_a11mce(void)
+{
+ return ir_register_map(&genius_tvgo_a11mce_map);
+}
+
+static void __exit exit_rc_map_genius_tvgo_a11mce(void)
+{
+ ir_unregister_map(&genius_tvgo_a11mce_map);
+}
+
+module_init(init_rc_map_genius_tvgo_a11mce)
+module_exit(exit_rc_map_genius_tvgo_a11mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-gotview7135.c b/drivers/media/IR/keymaps/rc-gotview7135.c
new file mode 100644
index 000000000000..52f025bb35f6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-gotview7135.c
@@ -0,0 +1,79 @@
+/* gotview7135.h - Keytable for gotview7135 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mike Baikov <mike@baikov.com> */
+
+static struct ir_scancode gotview7135[] = {
+
+ { 0x11, KEY_POWER },
+ { 0x35, KEY_TV },
+ { 0x1b, KEY_0 },
+ { 0x29, KEY_1 },
+ { 0x19, KEY_2 },
+ { 0x39, KEY_3 },
+ { 0x1f, KEY_4 },
+ { 0x2c, KEY_5 },
+ { 0x21, KEY_6 },
+ { 0x24, KEY_7 },
+ { 0x18, KEY_8 },
+ { 0x2b, KEY_9 },
+ { 0x3b, KEY_AGAIN }, /* LOOP */
+ { 0x06, KEY_AUDIO },
+ { 0x31, KEY_PRINT }, /* PREVIEW */
+ { 0x3e, KEY_VIDEO },
+ { 0x10, KEY_CHANNELUP },
+ { 0x20, KEY_CHANNELDOWN },
+ { 0x0c, KEY_VOLUMEDOWN },
+ { 0x28, KEY_VOLUMEUP },
+ { 0x08, KEY_MUTE },
+ { 0x26, KEY_SEARCH }, /* SCAN */
+ { 0x3f, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x12, KEY_RECORD },
+ { 0x32, KEY_STOP },
+ { 0x3c, KEY_PLAY },
+ { 0x1d, KEY_REWIND },
+ { 0x2d, KEY_PAUSE },
+ { 0x0d, KEY_FORWARD },
+ { 0x05, KEY_ZOOM }, /*FULL*/
+
+ { 0x2a, KEY_F21 }, /* LIVE TIMESHIFT */
+ { 0x0e, KEY_F22 }, /* MIN TIMESHIFT */
+ { 0x1e, KEY_TIME }, /* TIMESHIFT */
+ { 0x38, KEY_F24 }, /* NORMAL TIMESHIFT */
+};
+
+static struct rc_keymap gotview7135_map = {
+ .map = {
+ .scan = gotview7135,
+ .size = ARRAY_SIZE(gotview7135),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GOTVIEW7135,
+ }
+};
+
+static int __init init_rc_map_gotview7135(void)
+{
+ return ir_register_map(&gotview7135_map);
+}
+
+static void __exit exit_rc_map_gotview7135(void)
+{
+ ir_unregister_map(&gotview7135_map);
+}
+
+module_init(init_rc_map_gotview7135)
+module_exit(exit_rc_map_gotview7135)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-hauppauge-new.c b/drivers/media/IR/keymaps/rc-hauppauge-new.c
new file mode 100644
index 000000000000..c6f8cd7c5186
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-hauppauge-new.c
@@ -0,0 +1,100 @@
+/* hauppauge-new.h - Keytable for hauppauge_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Hauppauge: the newer, gray remotes (seems there are multiple
+ * slightly different versions), shipped with cx88+ivtv cards.
+ * almost rc5 coding, but some non-standard keys */
+
+static struct ir_scancode hauppauge_new[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_TEXT }, /* keypad asterisk as well */
+ { 0x0b, KEY_RED }, /* red button */
+ { 0x0c, KEY_RADIO },
+ { 0x0d, KEY_MENU },
+ { 0x0e, KEY_SUBTITLE }, /* also the # key */
+ { 0x0f, KEY_MUTE },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x12, KEY_PREVIOUS }, /* previous channel */
+ { 0x14, KEY_UP },
+ { 0x15, KEY_DOWN },
+ { 0x16, KEY_LEFT },
+ { 0x17, KEY_RIGHT },
+ { 0x18, KEY_VIDEO }, /* Videos */
+ { 0x19, KEY_AUDIO }, /* Music */
+ /* 0x1a: Pictures - presume this means
+ "Multimedia Home Platform" -
+ no "PICTURES" key in input.h
+ */
+ { 0x1a, KEY_MHP },
+
+ { 0x1b, KEY_EPG }, /* Guide */
+ { 0x1c, KEY_TV },
+ { 0x1e, KEY_NEXTSONG }, /* skip >| */
+ { 0x1f, KEY_EXIT }, /* back/exit */
+ { 0x20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x22, KEY_CHANNEL }, /* source (old black remote) */
+ { 0x24, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x25, KEY_ENTER }, /* OK */
+ { 0x26, KEY_SLEEP }, /* minimize (old black remote) */
+ { 0x29, KEY_BLUE }, /* blue key */
+ { 0x2e, KEY_GREEN }, /* green button */
+ { 0x30, KEY_PAUSE }, /* pause */
+ { 0x32, KEY_REWIND }, /* backward << */
+ { 0x34, KEY_FASTFORWARD }, /* forward >> */
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD }, /* recording */
+ { 0x38, KEY_YELLOW }, /* yellow key */
+ { 0x3b, KEY_SELECT }, /* top right button */
+ { 0x3c, KEY_ZOOM }, /* full */
+ { 0x3d, KEY_POWER }, /* system power (green button) */
+};
+
+static struct rc_keymap hauppauge_new_map = {
+ .map = {
+ .scan = hauppauge_new,
+ .size = ARRAY_SIZE(hauppauge_new),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_HAUPPAUGE_NEW,
+ }
+};
+
+static int __init init_rc_map_hauppauge_new(void)
+{
+ return ir_register_map(&hauppauge_new_map);
+}
+
+static void __exit exit_rc_map_hauppauge_new(void)
+{
+ ir_unregister_map(&hauppauge_new_map);
+}
+
+module_init(init_rc_map_hauppauge_new)
+module_exit(exit_rc_map_hauppauge_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-imon-mce.c b/drivers/media/IR/keymaps/rc-imon-mce.c
new file mode 100644
index 000000000000..e49f350e3a0d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-imon-mce.c
@@ -0,0 +1,142 @@
+/* rc5-imon-mce.c - Keytable for Windows Media Center RC-6 remotes for use
+ * with the SoundGraph iMON/Antec Veris hardware IR decoder
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* mce-mode imon mce remote key table */
+static struct ir_scancode imon_mce[] = {
+ /* keys sorted mostly by frequency of use to optimize lookups */
+ { 0x800ff415, KEY_REWIND },
+ { 0x800ff414, KEY_FASTFORWARD },
+ { 0x800ff41b, KEY_PREVIOUS },
+ { 0x800ff41a, KEY_NEXT },
+
+ { 0x800ff416, KEY_PLAY },
+ { 0x800ff418, KEY_PAUSE },
+ { 0x800ff419, KEY_STOP },
+ { 0x800ff417, KEY_RECORD },
+
+ { 0x02000052, KEY_UP },
+ { 0x02000051, KEY_DOWN },
+ { 0x02000050, KEY_LEFT },
+ { 0x0200004f, KEY_RIGHT },
+
+ { 0x800ff41e, KEY_UP },
+ { 0x800ff41f, KEY_DOWN },
+ { 0x800ff420, KEY_LEFT },
+ { 0x800ff421, KEY_RIGHT },
+
+ /* 0x800ff40b also KEY_NUMERIC_POUND on some receivers */
+ { 0x800ff40b, KEY_ENTER },
+ { 0x02000028, KEY_ENTER },
+/* the OK and Enter buttons decode to the same value on some remotes
+ { 0x02000028, KEY_OK }, */
+ { 0x800ff422, KEY_OK },
+ { 0x0200002a, KEY_EXIT },
+ { 0x800ff423, KEY_EXIT },
+ { 0x02000029, KEY_DELETE },
+ /* 0x800ff40a also KEY_NUMERIC_STAR on some receivers */
+ { 0x800ff40a, KEY_DELETE },
+
+ { 0x800ff40e, KEY_MUTE },
+ { 0x800ff410, KEY_VOLUMEUP },
+ { 0x800ff411, KEY_VOLUMEDOWN },
+ { 0x800ff412, KEY_CHANNELUP },
+ { 0x800ff413, KEY_CHANNELDOWN },
+
+ { 0x0200001e, KEY_NUMERIC_1 },
+ { 0x0200001f, KEY_NUMERIC_2 },
+ { 0x02000020, KEY_NUMERIC_3 },
+ { 0x02000021, KEY_NUMERIC_4 },
+ { 0x02000022, KEY_NUMERIC_5 },
+ { 0x02000023, KEY_NUMERIC_6 },
+ { 0x02000024, KEY_NUMERIC_7 },
+ { 0x02000025, KEY_NUMERIC_8 },
+ { 0x02000026, KEY_NUMERIC_9 },
+ { 0x02000027, KEY_NUMERIC_0 },
+
+ { 0x800ff401, KEY_NUMERIC_1 },
+ { 0x800ff402, KEY_NUMERIC_2 },
+ { 0x800ff403, KEY_NUMERIC_3 },
+ { 0x800ff404, KEY_NUMERIC_4 },
+ { 0x800ff405, KEY_NUMERIC_5 },
+ { 0x800ff406, KEY_NUMERIC_6 },
+ { 0x800ff407, KEY_NUMERIC_7 },
+ { 0x800ff408, KEY_NUMERIC_8 },
+ { 0x800ff409, KEY_NUMERIC_9 },
+ { 0x800ff400, KEY_NUMERIC_0 },
+
+ { 0x02200025, KEY_NUMERIC_STAR },
+ { 0x02200020, KEY_NUMERIC_POUND },
+ /* 0x800ff41d also KEY_BLUE on some receivers */
+ { 0x800ff41d, KEY_NUMERIC_STAR },
+ /* 0x800ff41c also KEY_PREVIOUS on some receivers */
+ { 0x800ff41c, KEY_NUMERIC_POUND },
+
+ { 0x800ff446, KEY_TV },
+ { 0x800ff447, KEY_AUDIO }, /* My Music */
+ { 0x800ff448, KEY_PVR }, /* RecordedTV */
+ { 0x800ff449, KEY_CAMERA },
+ { 0x800ff44a, KEY_VIDEO },
+ /* 0x800ff424 also KEY_MENU on some receivers */
+ { 0x800ff424, KEY_DVD },
+ /* 0x800ff425 also KEY_GREEN on some receivers */
+ { 0x800ff425, KEY_TUNER }, /* LiveTV */
+ { 0x800ff450, KEY_RADIO },
+
+ { 0x800ff44c, KEY_LANGUAGE },
+ { 0x800ff427, KEY_ZOOM }, /* Aspect */
+
+ { 0x800ff45b, KEY_RED },
+ { 0x800ff45c, KEY_GREEN },
+ { 0x800ff45d, KEY_YELLOW },
+ { 0x800ff45e, KEY_BLUE },
+
+ { 0x800ff466, KEY_RED },
+ /* { 0x800ff425, KEY_GREEN }, */
+ { 0x800ff468, KEY_YELLOW },
+ /* { 0x800ff41d, KEY_BLUE }, */
+
+ { 0x800ff40f, KEY_INFO },
+ { 0x800ff426, KEY_EPG }, /* Guide */
+ { 0x800ff45a, KEY_SUBTITLE }, /* Caption/Teletext */
+ { 0x800ff44d, KEY_TITLE },
+
+ { 0x800ff40c, KEY_POWER },
+ { 0x800ff40d, KEY_PROG1 }, /* Windows MCE button */
+
+};
+
+static struct rc_keymap imon_mce_map = {
+ .map = {
+ .scan = imon_mce,
+ .size = ARRAY_SIZE(imon_mce),
+ /* its RC6, but w/a hardware decoder */
+ .ir_type = IR_TYPE_RC6,
+ .name = RC_MAP_IMON_MCE,
+ }
+};
+
+static int __init init_rc_map_imon_mce(void)
+{
+ return ir_register_map(&imon_mce_map);
+}
+
+static void __exit exit_rc_map_imon_mce(void)
+{
+ ir_unregister_map(&imon_mce_map);
+}
+
+module_init(init_rc_map_imon_mce)
+module_exit(exit_rc_map_imon_mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-imon-pad.c b/drivers/media/IR/keymaps/rc-imon-pad.c
new file mode 100644
index 000000000000..bc4db72f02e6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-imon-pad.c
@@ -0,0 +1,156 @@
+/* rc5-imon-pad.c - Keytable for SoundGraph iMON PAD and Antec Veris
+ * RM-200 Remote Control
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * standard imon remote key table, which isn't really entirely
+ * "standard", as different receivers decode the same key on the
+ * same remote to different hex codes, and the silkscreened names
+ * vary a bit between the SoundGraph and Antec remotes... ugh.
+ */
+static struct ir_scancode imon_pad[] = {
+ /* keys sorted mostly by frequency of use to optimize lookups */
+ { 0x2a8195b7, KEY_REWIND },
+ { 0x298315b7, KEY_REWIND },
+ { 0x2b8115b7, KEY_FASTFORWARD },
+ { 0x2b8315b7, KEY_FASTFORWARD },
+ { 0x2b9115b7, KEY_PREVIOUS },
+ { 0x298195b7, KEY_NEXT },
+
+ { 0x2a8115b7, KEY_PLAY },
+ { 0x2a8315b7, KEY_PLAY },
+ { 0x2a9115b7, KEY_PAUSE },
+ { 0x2b9715b7, KEY_STOP },
+ { 0x298115b7, KEY_RECORD },
+
+ { 0x01008000, KEY_UP },
+ { 0x01007f00, KEY_DOWN },
+ { 0x01000080, KEY_LEFT },
+ { 0x0100007f, KEY_RIGHT },
+
+ { 0x2aa515b7, KEY_UP },
+ { 0x289515b7, KEY_DOWN },
+ { 0x29a515b7, KEY_LEFT },
+ { 0x2ba515b7, KEY_RIGHT },
+
+ { 0x0200002c, KEY_SPACE }, /* Select/Space */
+ { 0x2a9315b7, KEY_SPACE }, /* Select/Space */
+ { 0x02000028, KEY_ENTER },
+ { 0x28a195b7, KEY_ENTER },
+ { 0x288195b7, KEY_EXIT },
+ { 0x02000029, KEY_ESC },
+ { 0x2bb715b7, KEY_ESC },
+ { 0x0200002a, KEY_BACKSPACE },
+ { 0x28a115b7, KEY_BACKSPACE },
+
+ { 0x2b9595b7, KEY_MUTE },
+ { 0x28a395b7, KEY_VOLUMEUP },
+ { 0x28a595b7, KEY_VOLUMEDOWN },
+ { 0x289395b7, KEY_CHANNELUP },
+ { 0x288795b7, KEY_CHANNELDOWN },
+
+ { 0x0200001e, KEY_NUMERIC_1 },
+ { 0x0200001f, KEY_NUMERIC_2 },
+ { 0x02000020, KEY_NUMERIC_3 },
+ { 0x02000021, KEY_NUMERIC_4 },
+ { 0x02000022, KEY_NUMERIC_5 },
+ { 0x02000023, KEY_NUMERIC_6 },
+ { 0x02000024, KEY_NUMERIC_7 },
+ { 0x02000025, KEY_NUMERIC_8 },
+ { 0x02000026, KEY_NUMERIC_9 },
+ { 0x02000027, KEY_NUMERIC_0 },
+
+ { 0x28b595b7, KEY_NUMERIC_1 },
+ { 0x2bb195b7, KEY_NUMERIC_2 },
+ { 0x28b195b7, KEY_NUMERIC_3 },
+ { 0x2a8595b7, KEY_NUMERIC_4 },
+ { 0x299595b7, KEY_NUMERIC_5 },
+ { 0x2aa595b7, KEY_NUMERIC_6 },
+ { 0x2b9395b7, KEY_NUMERIC_7 },
+ { 0x2a8515b7, KEY_NUMERIC_8 },
+ { 0x2aa115b7, KEY_NUMERIC_9 },
+ { 0x2ba595b7, KEY_NUMERIC_0 },
+
+ { 0x02200025, KEY_NUMERIC_STAR },
+ { 0x28b515b7, KEY_NUMERIC_STAR },
+ { 0x02200020, KEY_NUMERIC_POUND },
+ { 0x29a115b7, KEY_NUMERIC_POUND },
+
+ { 0x2b8515b7, KEY_VIDEO },
+ { 0x299195b7, KEY_AUDIO },
+ { 0x2ba115b7, KEY_CAMERA },
+ { 0x28a515b7, KEY_TV },
+ { 0x29a395b7, KEY_DVD },
+ { 0x29a295b7, KEY_DVD },
+
+ /* the Menu key between DVD and Subtitle on the RM-200... */
+ { 0x2ba385b7, KEY_MENU },
+ { 0x2ba395b7, KEY_MENU },
+
+ { 0x288515b7, KEY_BOOKMARKS },
+ { 0x2ab715b7, KEY_MEDIA }, /* Thumbnail */
+ { 0x298595b7, KEY_SUBTITLE },
+ { 0x2b8595b7, KEY_LANGUAGE },
+
+ { 0x29a595b7, KEY_ZOOM },
+ { 0x2aa395b7, KEY_SCREEN }, /* FullScreen */
+
+ { 0x299115b7, KEY_KEYBOARD },
+ { 0x299135b7, KEY_KEYBOARD },
+
+ { 0x01010000, BTN_LEFT },
+ { 0x01020000, BTN_RIGHT },
+ { 0x01010080, BTN_LEFT },
+ { 0x01020080, BTN_RIGHT },
+ { 0x688301b7, BTN_LEFT },
+ { 0x688481b7, BTN_RIGHT },
+
+ { 0x2a9395b7, KEY_CYCLEWINDOWS }, /* TaskSwitcher */
+ { 0x2b8395b7, KEY_TIME }, /* Timer */
+
+ { 0x289115b7, KEY_POWER },
+ { 0x29b195b7, KEY_EJECTCD }, /* the one next to play */
+ { 0x299395b7, KEY_EJECTCLOSECD }, /* eject (by TaskSw) */
+
+ { 0x02800000, KEY_CONTEXT_MENU }, /* Left Menu */
+ { 0x2b8195b7, KEY_CONTEXT_MENU }, /* Left Menu*/
+ { 0x02000065, KEY_COMPOSE }, /* RightMenu */
+ { 0x28b715b7, KEY_COMPOSE }, /* RightMenu */
+ { 0x2ab195b7, KEY_PROG1 }, /* Go or MultiMon */
+ { 0x29b715b7, KEY_DASHBOARD }, /* AppLauncher */
+};
+
+static struct rc_keymap imon_pad_map = {
+ .map = {
+ .scan = imon_pad,
+ .size = ARRAY_SIZE(imon_pad),
+ /* actual protocol details unknown, hardware decoder */
+ .ir_type = IR_TYPE_OTHER,
+ .name = RC_MAP_IMON_PAD,
+ }
+};
+
+static int __init init_rc_map_imon_pad(void)
+{
+ return ir_register_map(&imon_pad_map);
+}
+
+static void __exit exit_rc_map_imon_pad(void)
+{
+ ir_unregister_map(&imon_pad_map);
+}
+
+module_init(init_rc_map_imon_pad)
+module_exit(exit_rc_map_imon_pad)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-iodata-bctv7e.c b/drivers/media/IR/keymaps/rc-iodata-bctv7e.c
new file mode 100644
index 000000000000..ef6600259fc0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-iodata-bctv7e.c
@@ -0,0 +1,88 @@
+/* iodata-bctv7e.h - Keytable for iodata_bctv7e Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* IO-DATA BCTV7E Remote */
+
+static struct ir_scancode iodata_bctv7e[] = {
+ { 0x40, KEY_TV },
+ { 0x20, KEY_RADIO }, /* FM */
+ { 0x60, KEY_EPG },
+ { 0x00, KEY_POWER },
+
+ /* Keys 0 to 9 */
+ { 0x44, KEY_0 }, /* 10 */
+ { 0x50, KEY_1 },
+ { 0x30, KEY_2 },
+ { 0x70, KEY_3 },
+ { 0x48, KEY_4 },
+ { 0x28, KEY_5 },
+ { 0x68, KEY_6 },
+ { 0x58, KEY_7 },
+ { 0x38, KEY_8 },
+ { 0x78, KEY_9 },
+
+ { 0x10, KEY_L }, /* Live */
+ { 0x08, KEY_TIME }, /* Time Shift */
+
+ { 0x18, KEY_PLAYPAUSE }, /* Play */
+
+ { 0x24, KEY_ENTER }, /* 11 */
+ { 0x64, KEY_ESC }, /* 12 */
+ { 0x04, KEY_M }, /* Multi */
+
+ { 0x54, KEY_VIDEO },
+ { 0x34, KEY_CHANNELUP },
+ { 0x74, KEY_VOLUMEUP },
+ { 0x14, KEY_MUTE },
+
+ { 0x4c, KEY_VCR }, /* SVIDEO */
+ { 0x2c, KEY_CHANNELDOWN },
+ { 0x6c, KEY_VOLUMEDOWN },
+ { 0x0c, KEY_ZOOM },
+
+ { 0x5c, KEY_PAUSE },
+ { 0x3c, KEY_RED }, /* || (red) */
+ { 0x7c, KEY_RECORD }, /* recording */
+ { 0x1c, KEY_STOP },
+
+ { 0x41, KEY_REWIND }, /* backward << */
+ { 0x21, KEY_PLAY },
+ { 0x61, KEY_FASTFORWARD }, /* forward >> */
+ { 0x01, KEY_NEXT }, /* skip >| */
+};
+
+static struct rc_keymap iodata_bctv7e_map = {
+ .map = {
+ .scan = iodata_bctv7e,
+ .size = ARRAY_SIZE(iodata_bctv7e),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_IODATA_BCTV7E,
+ }
+};
+
+static int __init init_rc_map_iodata_bctv7e(void)
+{
+ return ir_register_map(&iodata_bctv7e_map);
+}
+
+static void __exit exit_rc_map_iodata_bctv7e(void)
+{
+ ir_unregister_map(&iodata_bctv7e_map);
+}
+
+module_init(init_rc_map_iodata_bctv7e)
+module_exit(exit_rc_map_iodata_bctv7e)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kaiomy.c b/drivers/media/IR/keymaps/rc-kaiomy.c
new file mode 100644
index 000000000000..4c7883ba0f15
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kaiomy.c
@@ -0,0 +1,87 @@
+/* kaiomy.h - Keytable for kaiomy Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kaiomy TVnPC U2
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode kaiomy[] = {
+ { 0x43, KEY_POWER2},
+ { 0x01, KEY_LIST},
+ { 0x0b, KEY_ZOOM},
+ { 0x03, KEY_POWER},
+
+ { 0x04, KEY_1},
+ { 0x08, KEY_2},
+ { 0x02, KEY_3},
+
+ { 0x0f, KEY_4},
+ { 0x05, KEY_5},
+ { 0x06, KEY_6},
+
+ { 0x0c, KEY_7},
+ { 0x0d, KEY_8},
+ { 0x0a, KEY_9},
+
+ { 0x11, KEY_0},
+
+ { 0x09, KEY_CHANNELUP},
+ { 0x07, KEY_CHANNELDOWN},
+
+ { 0x0e, KEY_VOLUMEUP},
+ { 0x13, KEY_VOLUMEDOWN},
+
+ { 0x10, KEY_HOME},
+ { 0x12, KEY_ENTER},
+
+ { 0x14, KEY_RECORD},
+ { 0x15, KEY_STOP},
+ { 0x16, KEY_PLAY},
+ { 0x17, KEY_MUTE},
+
+ { 0x18, KEY_UP},
+ { 0x19, KEY_DOWN},
+ { 0x1a, KEY_LEFT},
+ { 0x1b, KEY_RIGHT},
+
+ { 0x1c, KEY_RED},
+ { 0x1d, KEY_GREEN},
+ { 0x1e, KEY_YELLOW},
+ { 0x1f, KEY_BLUE},
+};
+
+static struct rc_keymap kaiomy_map = {
+ .map = {
+ .scan = kaiomy,
+ .size = ARRAY_SIZE(kaiomy),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_KAIOMY,
+ }
+};
+
+static int __init init_rc_map_kaiomy(void)
+{
+ return ir_register_map(&kaiomy_map);
+}
+
+static void __exit exit_rc_map_kaiomy(void)
+{
+ ir_unregister_map(&kaiomy_map);
+}
+
+module_init(init_rc_map_kaiomy)
+module_exit(exit_rc_map_kaiomy)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kworld-315u.c b/drivers/media/IR/keymaps/rc-kworld-315u.c
new file mode 100644
index 000000000000..618c817374e6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kworld-315u.c
@@ -0,0 +1,83 @@
+/* kworld-315u.h - Keytable for kworld_315u Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kworld 315U
+ */
+
+static struct ir_scancode kworld_315u[] = {
+ { 0x6143, KEY_POWER },
+ { 0x6101, KEY_TUNER }, /* source */
+ { 0x610b, KEY_ZOOM },
+ { 0x6103, KEY_POWER2 }, /* shutdown */
+
+ { 0x6104, KEY_1 },
+ { 0x6108, KEY_2 },
+ { 0x6102, KEY_3 },
+ { 0x6109, KEY_CHANNELUP },
+
+ { 0x610f, KEY_4 },
+ { 0x6105, KEY_5 },
+ { 0x6106, KEY_6 },
+ { 0x6107, KEY_CHANNELDOWN },
+
+ { 0x610c, KEY_7 },
+ { 0x610d, KEY_8 },
+ { 0x610a, KEY_9 },
+ { 0x610e, KEY_VOLUMEUP },
+
+ { 0x6110, KEY_LAST },
+ { 0x6111, KEY_0 },
+ { 0x6112, KEY_ENTER },
+ { 0x6113, KEY_VOLUMEDOWN },
+
+ { 0x6114, KEY_RECORD },
+ { 0x6115, KEY_STOP },
+ { 0x6116, KEY_PLAY },
+ { 0x6117, KEY_MUTE },
+
+ { 0x6118, KEY_UP },
+ { 0x6119, KEY_DOWN },
+ { 0x611a, KEY_LEFT },
+ { 0x611b, KEY_RIGHT },
+
+ { 0x611c, KEY_RED },
+ { 0x611d, KEY_GREEN },
+ { 0x611e, KEY_YELLOW },
+ { 0x611f, KEY_BLUE },
+};
+
+static struct rc_keymap kworld_315u_map = {
+ .map = {
+ .scan = kworld_315u,
+ .size = ARRAY_SIZE(kworld_315u),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_KWORLD_315U,
+ }
+};
+
+static int __init init_rc_map_kworld_315u(void)
+{
+ return ir_register_map(&kworld_315u_map);
+}
+
+static void __exit exit_rc_map_kworld_315u(void)
+{
+ ir_unregister_map(&kworld_315u_map);
+}
+
+module_init(init_rc_map_kworld_315u)
+module_exit(exit_rc_map_kworld_315u)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c
new file mode 100644
index 000000000000..366732f1f7b7
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c
@@ -0,0 +1,99 @@
+/* kworld-plus-tv-analog.h - Keytable for kworld_plus_tv_analog Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kworld Plus TV Analog Lite PCI IR
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode kworld_plus_tv_analog[] = {
+ { 0x0c, KEY_PROG1 }, /* Kworld key */
+ { 0x16, KEY_CLOSECD }, /* -> ) */
+ { 0x1d, KEY_POWER2 },
+
+ { 0x00, KEY_1 },
+ { 0x01, KEY_2 },
+ { 0x02, KEY_3 }, /* Two keys have the same code: 3 and left */
+ { 0x03, KEY_4 }, /* Two keys have the same code: 3 and right */
+ { 0x04, KEY_5 },
+ { 0x05, KEY_6 },
+ { 0x06, KEY_7 },
+ { 0x07, KEY_8 },
+ { 0x08, KEY_9 },
+ { 0x0a, KEY_0 },
+
+ { 0x09, KEY_AGAIN },
+ { 0x14, KEY_MUTE },
+
+ { 0x20, KEY_UP },
+ { 0x21, KEY_DOWN },
+ { 0x0b, KEY_ENTER },
+
+ { 0x10, KEY_CHANNELUP },
+ { 0x11, KEY_CHANNELDOWN },
+
+ /* Couldn't map key left/key right since those
+ conflict with '3' and '4' scancodes
+ I dunno what the original driver does
+ */
+
+ { 0x13, KEY_VOLUMEUP },
+ { 0x12, KEY_VOLUMEDOWN },
+
+ /* The lower part of the IR
+ There are several duplicated keycodes there.
+ Most of them conflict with digits.
+ Add mappings just to the unused scancodes.
+ Somehow, the original driver has a way to know,
+ but this doesn't seem to be on some GPIO.
+ Also, it is not related to the time between keyup
+ and keydown.
+ */
+ { 0x19, KEY_TIME}, /* Timeshift */
+ { 0x1a, KEY_STOP},
+ { 0x1b, KEY_RECORD},
+
+ { 0x22, KEY_TEXT},
+
+ { 0x15, KEY_AUDIO}, /* ((*)) */
+ { 0x0f, KEY_ZOOM},
+ { 0x1c, KEY_CAMERA}, /* snapshot */
+
+ { 0x18, KEY_RED}, /* B */
+ { 0x23, KEY_GREEN}, /* C */
+};
+
+static struct rc_keymap kworld_plus_tv_analog_map = {
+ .map = {
+ .scan = kworld_plus_tv_analog,
+ .size = ARRAY_SIZE(kworld_plus_tv_analog),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_KWORLD_PLUS_TV_ANALOG,
+ }
+};
+
+static int __init init_rc_map_kworld_plus_tv_analog(void)
+{
+ return ir_register_map(&kworld_plus_tv_analog_map);
+}
+
+static void __exit exit_rc_map_kworld_plus_tv_analog(void)
+{
+ ir_unregister_map(&kworld_plus_tv_analog_map);
+}
+
+module_init(init_rc_map_kworld_plus_tv_analog)
+module_exit(exit_rc_map_kworld_plus_tv_analog)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-manli.c b/drivers/media/IR/keymaps/rc-manli.c
new file mode 100644
index 000000000000..1e9fbfa90a1e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-manli.c
@@ -0,0 +1,135 @@
+/* manli.h - Keytable for manli Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Michael Tokarev <mjt@tls.msk.ru>
+ http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
+ keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at
+ least, and probably other cards too.
+ The "ascii-art picture" below (in comments, first row
+ is the keycode in hex, and subsequent row(s) shows
+ the button labels (several variants when appropriate)
+ helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode manli[] = {
+
+ /* 0x1c 0x12 *
+ * FUNCTION POWER *
+ * FM (|) *
+ * */
+ { 0x1c, KEY_RADIO }, /*XXX*/
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 *
+ * 1 2 3 *
+ * *
+ * 0x04 0x05 0x06 *
+ * 4 5 6 *
+ * *
+ * 0x07 0x08 0x09 *
+ * 7 8 9 *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ /* 0x0a 0x00 0x17 *
+ * RECALL 0 +100 *
+ * PLUS *
+ * */
+ { 0x0a, KEY_AGAIN }, /*XXX KEY_REWIND? */
+ { 0x00, KEY_0 },
+ { 0x17, KEY_DIGITS }, /*XXX*/
+
+ /* 0x14 0x10 *
+ * MENU INFO *
+ * OSD */
+ { 0x14, KEY_MENU },
+ { 0x10, KEY_INFO },
+
+ /* 0x0b *
+ * Up *
+ * *
+ * 0x18 0x16 0x0c *
+ * Left Ok Right *
+ * *
+ * 0x015 *
+ * Down *
+ * */
+ { 0x0b, KEY_UP },
+ { 0x18, KEY_LEFT },
+ { 0x16, KEY_OK }, /*XXX KEY_SELECT? KEY_ENTER? */
+ { 0x0c, KEY_RIGHT },
+ { 0x15, KEY_DOWN },
+
+ /* 0x11 0x0d *
+ * TV/AV MODE *
+ * SOURCE STEREO *
+ * */
+ { 0x11, KEY_TV }, /*XXX*/
+ { 0x0d, KEY_MODE }, /*XXX there's no KEY_STEREO */
+
+ /* 0x0f 0x1b 0x1a *
+ * AUDIO Vol+ Chan+ *
+ * TIMESHIFT??? *
+ * *
+ * 0x0e 0x1f 0x1e *
+ * SLEEP Vol- Chan- *
+ * */
+ { 0x0f, KEY_AUDIO },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1a, KEY_CHANNELUP },
+ { 0x0e, KEY_TIME },
+ { 0x1f, KEY_VOLUMEDOWN },
+ { 0x1e, KEY_CHANNELDOWN },
+
+ /* 0x13 0x19 *
+ * MUTE SNAPSHOT*
+ * */
+ { 0x13, KEY_MUTE },
+ { 0x19, KEY_CAMERA },
+
+ /* 0x1d unused ? */
+};
+
+static struct rc_keymap manli_map = {
+ .map = {
+ .scan = manli,
+ .size = ARRAY_SIZE(manli),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MANLI,
+ }
+};
+
+static int __init init_rc_map_manli(void)
+{
+ return ir_register_map(&manli_map);
+}
+
+static void __exit exit_rc_map_manli(void)
+{
+ ir_unregister_map(&manli_map);
+}
+
+module_init(init_rc_map_manli)
+module_exit(exit_rc_map_manli)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c b/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c
new file mode 100644
index 000000000000..eb8e42c18ff9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c
@@ -0,0 +1,123 @@
+/* msi-tvanywhere-plus.h - Keytable for msi_tvanywhere_plus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card
+ is marked "KS003". The controller is I2C at address 0x30, but does not seem
+ to respond to probes until a read is performed from a valid device.
+ I don't know why...
+
+ Note: This remote may be of similar or identical design to the
+ Pixelview remote (?). The raw codes and duplicate button codes
+ appear to be the same.
+
+ Henry Wong <henry@stuffedcow.net>
+ Some changes to formatting and keycodes by Mark Schultz <n9xmj@yahoo.com>
+*/
+
+static struct ir_scancode msi_tvanywhere_plus[] = {
+
+/* ---- Remote Button Layout ----
+
+ POWER SOURCE SCAN MUTE
+ TV/FM 1 2 3
+ |> 4 5 6
+ <| 7 8 9
+ ^^UP 0 + RECALL
+ vvDN RECORD STOP PLAY
+
+ MINIMIZE ZOOM
+
+ CH+
+ VOL- VOL+
+ CH-
+
+ SNAPSHOT MTS
+
+ << FUNC >> RESET
+*/
+
+ { 0x01, KEY_1 }, /* 1 */
+ { 0x0b, KEY_2 }, /* 2 */
+ { 0x1b, KEY_3 }, /* 3 */
+ { 0x05, KEY_4 }, /* 4 */
+ { 0x09, KEY_5 }, /* 5 */
+ { 0x15, KEY_6 }, /* 6 */
+ { 0x06, KEY_7 }, /* 7 */
+ { 0x0a, KEY_8 }, /* 8 */
+ { 0x12, KEY_9 }, /* 9 */
+ { 0x02, KEY_0 }, /* 0 */
+ { 0x10, KEY_KPPLUS }, /* + */
+ { 0x13, KEY_AGAIN }, /* Recall */
+
+ { 0x1e, KEY_POWER }, /* Power */
+ { 0x07, KEY_TUNER }, /* Source */
+ { 0x1c, KEY_SEARCH }, /* Scan */
+ { 0x18, KEY_MUTE }, /* Mute */
+
+ { 0x03, KEY_RADIO }, /* TV/FM */
+ /* The next four keys are duplicates that appear to send the
+ same IR code as Ch+, Ch-, >>, and << . The raw code assigned
+ to them is the actual code + 0x20 - they will never be
+ detected as such unless some way is discovered to distinguish
+ these buttons from those that have the same code. */
+ { 0x3f, KEY_RIGHT }, /* |> and Ch+ */
+ { 0x37, KEY_LEFT }, /* <| and Ch- */
+ { 0x2c, KEY_UP }, /* ^^Up and >> */
+ { 0x24, KEY_DOWN }, /* vvDn and << */
+
+ { 0x00, KEY_RECORD }, /* Record */
+ { 0x08, KEY_STOP }, /* Stop */
+ { 0x11, KEY_PLAY }, /* Play */
+
+ { 0x0f, KEY_CLOSE }, /* Minimize */
+ { 0x19, KEY_ZOOM }, /* Zoom */
+ { 0x1a, KEY_CAMERA }, /* Snapshot */
+ { 0x0d, KEY_LANGUAGE }, /* MTS */
+
+ { 0x14, KEY_VOLUMEDOWN }, /* Vol- */
+ { 0x16, KEY_VOLUMEUP }, /* Vol+ */
+ { 0x17, KEY_CHANNELDOWN }, /* Ch- */
+ { 0x1f, KEY_CHANNELUP }, /* Ch+ */
+
+ { 0x04, KEY_REWIND }, /* << */
+ { 0x0e, KEY_MENU }, /* Function */
+ { 0x0c, KEY_FASTFORWARD }, /* >> */
+ { 0x1d, KEY_RESTART }, /* Reset */
+};
+
+static struct rc_keymap msi_tvanywhere_plus_map = {
+ .map = {
+ .scan = msi_tvanywhere_plus,
+ .size = ARRAY_SIZE(msi_tvanywhere_plus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MSI_TVANYWHERE_PLUS,
+ }
+};
+
+static int __init init_rc_map_msi_tvanywhere_plus(void)
+{
+ return ir_register_map(&msi_tvanywhere_plus_map);
+}
+
+static void __exit exit_rc_map_msi_tvanywhere_plus(void)
+{
+ ir_unregister_map(&msi_tvanywhere_plus_map);
+}
+
+module_init(init_rc_map_msi_tvanywhere_plus)
+module_exit(exit_rc_map_msi_tvanywhere_plus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-msi-tvanywhere.c b/drivers/media/IR/keymaps/rc-msi-tvanywhere.c
new file mode 100644
index 000000000000..ef411854f067
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-tvanywhere.c
@@ -0,0 +1,69 @@
+/* msi-tvanywhere.h - Keytable for msi_tvanywhere Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* MSI TV@nywhere MASTER remote */
+
+static struct ir_scancode msi_tvanywhere[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0c, KEY_MUTE },
+ { 0x0f, KEY_SCREEN }, /* Full Screen */
+ { 0x10, KEY_FN }, /* Funtion */
+ { 0x11, KEY_TIME }, /* Time shift */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_MEDIA }, /* MTS */
+ { 0x14, KEY_SLOW },
+ { 0x16, KEY_REWIND }, /* backward << */
+ { 0x17, KEY_ENTER }, /* Return */
+ { 0x18, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x1f, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap msi_tvanywhere_map = {
+ .map = {
+ .scan = msi_tvanywhere,
+ .size = ARRAY_SIZE(msi_tvanywhere),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MSI_TVANYWHERE,
+ }
+};
+
+static int __init init_rc_map_msi_tvanywhere(void)
+{
+ return ir_register_map(&msi_tvanywhere_map);
+}
+
+static void __exit exit_rc_map_msi_tvanywhere(void)
+{
+ ir_unregister_map(&msi_tvanywhere_map);
+}
+
+module_init(init_rc_map_msi_tvanywhere)
+module_exit(exit_rc_map_msi_tvanywhere)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-nebula.c b/drivers/media/IR/keymaps/rc-nebula.c
new file mode 100644
index 000000000000..ccc50eb402ec
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-nebula.c
@@ -0,0 +1,96 @@
+/* nebula.h - Keytable for nebula Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode nebula[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_TV },
+ { 0x0b, KEY_AUX },
+ { 0x0c, KEY_DVD },
+ { 0x0d, KEY_POWER },
+ { 0x0e, KEY_MHP }, /* labelled 'Picture' */
+ { 0x0f, KEY_AUDIO },
+ { 0x10, KEY_INFO },
+ { 0x11, KEY_F13 }, /* 16:9 */
+ { 0x12, KEY_F14 }, /* 14:9 */
+ { 0x13, KEY_EPG },
+ { 0x14, KEY_EXIT },
+ { 0x15, KEY_MENU },
+ { 0x16, KEY_UP },
+ { 0x17, KEY_DOWN },
+ { 0x18, KEY_LEFT },
+ { 0x19, KEY_RIGHT },
+ { 0x1a, KEY_ENTER },
+ { 0x1b, KEY_CHANNELUP },
+ { 0x1c, KEY_CHANNELDOWN },
+ { 0x1d, KEY_VOLUMEUP },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_RED },
+ { 0x20, KEY_GREEN },
+ { 0x21, KEY_YELLOW },
+ { 0x22, KEY_BLUE },
+ { 0x23, KEY_SUBTITLE },
+ { 0x24, KEY_F15 }, /* AD */
+ { 0x25, KEY_TEXT },
+ { 0x26, KEY_MUTE },
+ { 0x27, KEY_REWIND },
+ { 0x28, KEY_STOP },
+ { 0x29, KEY_PLAY },
+ { 0x2a, KEY_FASTFORWARD },
+ { 0x2b, KEY_F16 }, /* chapter */
+ { 0x2c, KEY_PAUSE },
+ { 0x2d, KEY_PLAY },
+ { 0x2e, KEY_RECORD },
+ { 0x2f, KEY_F17 }, /* picture in picture */
+ { 0x30, KEY_KPPLUS }, /* zoom in */
+ { 0x31, KEY_KPMINUS }, /* zoom out */
+ { 0x32, KEY_F18 }, /* capture */
+ { 0x33, KEY_F19 }, /* web */
+ { 0x34, KEY_EMAIL },
+ { 0x35, KEY_PHONE },
+ { 0x36, KEY_PC },
+};
+
+static struct rc_keymap nebula_map = {
+ .map = {
+ .scan = nebula,
+ .size = ARRAY_SIZE(nebula),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NEBULA,
+ }
+};
+
+static int __init init_rc_map_nebula(void)
+{
+ return ir_register_map(&nebula_map);
+}
+
+static void __exit exit_rc_map_nebula(void)
+{
+ ir_unregister_map(&nebula_map);
+}
+
+module_init(init_rc_map_nebula)
+module_exit(exit_rc_map_nebula)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c b/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c
new file mode 100644
index 000000000000..e1b54d20db60
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c
@@ -0,0 +1,105 @@
+/* nec-terratec-cinergy-xs.h - Keytable for nec_terratec_cinergy_xs Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Terratec Cinergy Hybrid T USB XS FM
+ Mauro Carvalho Chehab <mchehab@redhat.com>
+ */
+
+static struct ir_scancode nec_terratec_cinergy_xs[] = {
+ { 0x1441, KEY_HOME},
+ { 0x1401, KEY_POWER2},
+
+ { 0x1442, KEY_MENU}, /* DVD menu */
+ { 0x1443, KEY_SUBTITLE},
+ { 0x1444, KEY_TEXT}, /* Teletext */
+ { 0x1445, KEY_DELETE},
+
+ { 0x1402, KEY_1},
+ { 0x1403, KEY_2},
+ { 0x1404, KEY_3},
+ { 0x1405, KEY_4},
+ { 0x1406, KEY_5},
+ { 0x1407, KEY_6},
+ { 0x1408, KEY_7},
+ { 0x1409, KEY_8},
+ { 0x140a, KEY_9},
+ { 0x140c, KEY_0},
+
+ { 0x140b, KEY_TUNER}, /* AV */
+ { 0x140d, KEY_MODE}, /* A.B */
+
+ { 0x1446, KEY_TV},
+ { 0x1447, KEY_DVD},
+ { 0x1449, KEY_VIDEO},
+ { 0x144a, KEY_RADIO}, /* Music */
+ { 0x144b, KEY_CAMERA}, /* PIC */
+
+ { 0x1410, KEY_UP},
+ { 0x1411, KEY_LEFT},
+ { 0x1412, KEY_OK},
+ { 0x1413, KEY_RIGHT},
+ { 0x1414, KEY_DOWN},
+
+ { 0x140f, KEY_EPG},
+ { 0x1416, KEY_INFO},
+ { 0x144d, KEY_BACKSPACE},
+
+ { 0x141c, KEY_VOLUMEUP},
+ { 0x141e, KEY_VOLUMEDOWN},
+
+ { 0x144c, KEY_PLAY},
+ { 0x141d, KEY_MUTE},
+
+ { 0x141b, KEY_CHANNELUP},
+ { 0x141f, KEY_CHANNELDOWN},
+
+ { 0x1417, KEY_RED},
+ { 0x1418, KEY_GREEN},
+ { 0x1419, KEY_YELLOW},
+ { 0x141a, KEY_BLUE},
+
+ { 0x1458, KEY_RECORD},
+ { 0x1448, KEY_STOP},
+ { 0x1440, KEY_PAUSE},
+
+ { 0x1454, KEY_LAST},
+ { 0x144e, KEY_REWIND},
+ { 0x144f, KEY_FASTFORWARD},
+ { 0x145c, KEY_NEXT},
+};
+
+static struct rc_keymap nec_terratec_cinergy_xs_map = {
+ .map = {
+ .scan = nec_terratec_cinergy_xs,
+ .size = ARRAY_SIZE(nec_terratec_cinergy_xs),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_NEC_TERRATEC_CINERGY_XS,
+ }
+};
+
+static int __init init_rc_map_nec_terratec_cinergy_xs(void)
+{
+ return ir_register_map(&nec_terratec_cinergy_xs_map);
+}
+
+static void __exit exit_rc_map_nec_terratec_cinergy_xs(void)
+{
+ ir_unregister_map(&nec_terratec_cinergy_xs_map);
+}
+
+module_init(init_rc_map_nec_terratec_cinergy_xs)
+module_exit(exit_rc_map_nec_terratec_cinergy_xs)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-norwood.c b/drivers/media/IR/keymaps/rc-norwood.c
new file mode 100644
index 000000000000..e5849a6b3f05
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-norwood.c
@@ -0,0 +1,85 @@
+/* norwood.h - Keytable for norwood Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Norwood Micro (non-Pro) TV Tuner
+ By Peter Naulls <peter@chocky.org>
+ Key comments are the functions given in the manual */
+
+static struct ir_scancode norwood[] = {
+ /* Keys 0 to 9 */
+ { 0x20, KEY_0 },
+ { 0x21, KEY_1 },
+ { 0x22, KEY_2 },
+ { 0x23, KEY_3 },
+ { 0x24, KEY_4 },
+ { 0x25, KEY_5 },
+ { 0x26, KEY_6 },
+ { 0x27, KEY_7 },
+ { 0x28, KEY_8 },
+ { 0x29, KEY_9 },
+
+ { 0x78, KEY_TUNER }, /* Video Source */
+ { 0x2c, KEY_EXIT }, /* Open/Close software */
+ { 0x2a, KEY_SELECT }, /* 2 Digit Select */
+ { 0x69, KEY_AGAIN }, /* Recall */
+
+ { 0x32, KEY_BRIGHTNESSUP }, /* Brightness increase */
+ { 0x33, KEY_BRIGHTNESSDOWN }, /* Brightness decrease */
+ { 0x6b, KEY_KPPLUS }, /* (not named >>>>>) */
+ { 0x6c, KEY_KPMINUS }, /* (not named <<<<<) */
+
+ { 0x2d, KEY_MUTE }, /* Mute */
+ { 0x30, KEY_VOLUMEUP }, /* Volume up */
+ { 0x31, KEY_VOLUMEDOWN }, /* Volume down */
+ { 0x60, KEY_CHANNELUP }, /* Channel up */
+ { 0x61, KEY_CHANNELDOWN }, /* Channel down */
+
+ { 0x3f, KEY_RECORD }, /* Record */
+ { 0x37, KEY_PLAY }, /* Play */
+ { 0x36, KEY_PAUSE }, /* Pause */
+ { 0x2b, KEY_STOP }, /* Stop */
+ { 0x67, KEY_FASTFORWARD }, /* Foward */
+ { 0x66, KEY_REWIND }, /* Rewind */
+ { 0x3e, KEY_SEARCH }, /* Auto Scan */
+ { 0x2e, KEY_CAMERA }, /* Capture Video */
+ { 0x6d, KEY_MENU }, /* Show/Hide Control */
+ { 0x2f, KEY_ZOOM }, /* Full Screen */
+ { 0x34, KEY_RADIO }, /* FM */
+ { 0x65, KEY_POWER }, /* Computer power */
+};
+
+static struct rc_keymap norwood_map = {
+ .map = {
+ .scan = norwood,
+ .size = ARRAY_SIZE(norwood),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NORWOOD,
+ }
+};
+
+static int __init init_rc_map_norwood(void)
+{
+ return ir_register_map(&norwood_map);
+}
+
+static void __exit exit_rc_map_norwood(void)
+{
+ ir_unregister_map(&norwood_map);
+}
+
+module_init(init_rc_map_norwood)
+module_exit(exit_rc_map_norwood)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-npgtech.c b/drivers/media/IR/keymaps/rc-npgtech.c
new file mode 100644
index 000000000000..b9ece1e90296
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-npgtech.c
@@ -0,0 +1,80 @@
+/* npgtech.h - Keytable for npgtech Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode npgtech[] = {
+ { 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
+ { 0x2a, KEY_FRONT },
+
+ { 0x3e, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x0a, KEY_4 },
+ { 0x0e, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x16, KEY_7 },
+ { 0x1a, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x3a, KEY_0 },
+ { 0x22, KEY_NUMLOCK }, /* -/-- */
+ { 0x20, KEY_REFRESH },
+
+ { 0x03, KEY_BRIGHTNESSDOWN },
+ { 0x28, KEY_AUDIO },
+ { 0x3c, KEY_CHANNELUP },
+ { 0x3f, KEY_VOLUMEDOWN },
+ { 0x2e, KEY_MUTE },
+ { 0x3b, KEY_VOLUMEUP },
+ { 0x00, KEY_CHANNELDOWN },
+ { 0x07, KEY_BRIGHTNESSUP },
+ { 0x2c, KEY_TEXT },
+
+ { 0x37, KEY_RECORD },
+ { 0x17, KEY_PLAY },
+ { 0x13, KEY_PAUSE },
+ { 0x26, KEY_STOP },
+ { 0x18, KEY_FASTFORWARD },
+ { 0x14, KEY_REWIND },
+ { 0x33, KEY_ZOOM },
+ { 0x32, KEY_KEYBOARD },
+ { 0x30, KEY_GOTO }, /* Pointing arrow */
+ { 0x36, KEY_MACRO }, /* Maximize/Minimize (yellow) */
+ { 0x0b, KEY_RADIO },
+ { 0x10, KEY_POWER },
+
+};
+
+static struct rc_keymap npgtech_map = {
+ .map = {
+ .scan = npgtech,
+ .size = ARRAY_SIZE(npgtech),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NPGTECH,
+ }
+};
+
+static int __init init_rc_map_npgtech(void)
+{
+ return ir_register_map(&npgtech_map);
+}
+
+static void __exit exit_rc_map_npgtech(void)
+{
+ ir_unregister_map(&npgtech_map);
+}
+
+module_init(init_rc_map_npgtech)
+module_exit(exit_rc_map_npgtech)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pctv-sedna.c b/drivers/media/IR/keymaps/rc-pctv-sedna.c
new file mode 100644
index 000000000000..4129bb44a25b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pctv-sedna.c
@@ -0,0 +1,80 @@
+/* pctv-sedna.h - Keytable for pctv_sedna Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mapping for the 28 key remote control as seen at
+ http://www.sednacomputer.com/photo/cardbus-tv.jpg
+ Pavel Mihaylov <bin@bash.info>
+ Also for the remote bundled with Kozumi KTV-01C card */
+
+static struct ir_scancode pctv_sedna[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_AGAIN }, /* Recall */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_VOLUMEUP },
+ { 0x0d, KEY_MODE }, /* Stereo */
+ { 0x0e, KEY_STOP },
+ { 0x0f, KEY_PREVIOUSSONG },
+ { 0x10, KEY_ZOOM },
+ { 0x11, KEY_TUNER }, /* Source */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_MUTE },
+ { 0x15, KEY_CHANNELDOWN },
+ { 0x18, KEY_VOLUMEDOWN },
+ { 0x19, KEY_CAMERA }, /* Snapshot */
+ { 0x1a, KEY_NEXTSONG },
+ { 0x1b, KEY_TIME }, /* Time Shift */
+ { 0x1c, KEY_RADIO }, /* FM Radio */
+ { 0x1d, KEY_RECORD },
+ { 0x1e, KEY_PAUSE },
+ /* additional codes for Kozumi's remote */
+ { 0x14, KEY_INFO }, /* OSD */
+ { 0x16, KEY_OK }, /* OK */
+ { 0x17, KEY_DIGITS }, /* Plus */
+ { 0x1f, KEY_PLAY }, /* Play */
+};
+
+static struct rc_keymap pctv_sedna_map = {
+ .map = {
+ .scan = pctv_sedna,
+ .size = ARRAY_SIZE(pctv_sedna),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PCTV_SEDNA,
+ }
+};
+
+static int __init init_rc_map_pctv_sedna(void)
+{
+ return ir_register_map(&pctv_sedna_map);
+}
+
+static void __exit exit_rc_map_pctv_sedna(void)
+{
+ ir_unregister_map(&pctv_sedna_map);
+}
+
+module_init(init_rc_map_pctv_sedna)
+module_exit(exit_rc_map_pctv_sedna)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-color.c b/drivers/media/IR/keymaps/rc-pinnacle-color.c
new file mode 100644
index 000000000000..326e023ce126
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-color.c
@@ -0,0 +1,94 @@
+/* pinnacle-color.h - Keytable for pinnacle_color Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pinnacle_color[] = {
+ { 0x59, KEY_MUTE },
+ { 0x4a, KEY_POWER },
+
+ { 0x18, KEY_TEXT },
+ { 0x26, KEY_TV },
+ { 0x3d, KEY_PRINT },
+
+ { 0x48, KEY_RED },
+ { 0x04, KEY_GREEN },
+ { 0x11, KEY_YELLOW },
+ { 0x00, KEY_BLUE },
+
+ { 0x2d, KEY_VOLUMEUP },
+ { 0x1e, KEY_VOLUMEDOWN },
+
+ { 0x49, KEY_MENU },
+
+ { 0x16, KEY_CHANNELUP },
+ { 0x17, KEY_CHANNELDOWN },
+
+ { 0x20, KEY_UP },
+ { 0x21, KEY_DOWN },
+ { 0x22, KEY_LEFT },
+ { 0x23, KEY_RIGHT },
+ { 0x0d, KEY_SELECT },
+
+ { 0x08, KEY_BACK },
+ { 0x07, KEY_REFRESH },
+
+ { 0x2f, KEY_ZOOM },
+ { 0x29, KEY_RECORD },
+
+ { 0x4b, KEY_PAUSE },
+ { 0x4d, KEY_REWIND },
+ { 0x2e, KEY_PLAY },
+ { 0x4e, KEY_FORWARD },
+ { 0x53, KEY_PREVIOUS },
+ { 0x4c, KEY_STOP },
+ { 0x54, KEY_NEXT },
+
+ { 0x69, KEY_0 },
+ { 0x6a, KEY_1 },
+ { 0x6b, KEY_2 },
+ { 0x6c, KEY_3 },
+ { 0x6d, KEY_4 },
+ { 0x6e, KEY_5 },
+ { 0x6f, KEY_6 },
+ { 0x70, KEY_7 },
+ { 0x71, KEY_8 },
+ { 0x72, KEY_9 },
+
+ { 0x74, KEY_CHANNEL },
+ { 0x0a, KEY_BACKSPACE },
+};
+
+static struct rc_keymap pinnacle_color_map = {
+ .map = {
+ .scan = pinnacle_color,
+ .size = ARRAY_SIZE(pinnacle_color),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_COLOR,
+ }
+};
+
+static int __init init_rc_map_pinnacle_color(void)
+{
+ return ir_register_map(&pinnacle_color_map);
+}
+
+static void __exit exit_rc_map_pinnacle_color(void)
+{
+ ir_unregister_map(&pinnacle_color_map);
+}
+
+module_init(init_rc_map_pinnacle_color)
+module_exit(exit_rc_map_pinnacle_color)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-grey.c b/drivers/media/IR/keymaps/rc-pinnacle-grey.c
new file mode 100644
index 000000000000..14cb772515c6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-grey.c
@@ -0,0 +1,89 @@
+/* pinnacle-grey.h - Keytable for pinnacle_grey Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pinnacle_grey[] = {
+ { 0x3a, KEY_0 },
+ { 0x31, KEY_1 },
+ { 0x32, KEY_2 },
+ { 0x33, KEY_3 },
+ { 0x34, KEY_4 },
+ { 0x35, KEY_5 },
+ { 0x36, KEY_6 },
+ { 0x37, KEY_7 },
+ { 0x38, KEY_8 },
+ { 0x39, KEY_9 },
+
+ { 0x2f, KEY_POWER },
+
+ { 0x2e, KEY_P },
+ { 0x1f, KEY_L },
+ { 0x2b, KEY_I },
+
+ { 0x2d, KEY_SCREEN },
+ { 0x1e, KEY_ZOOM },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x0f, KEY_VOLUMEDOWN },
+ { 0x17, KEY_CHANNELUP },
+ { 0x1c, KEY_CHANNELDOWN },
+ { 0x25, KEY_INFO },
+
+ { 0x3c, KEY_MUTE },
+
+ { 0x3d, KEY_LEFT },
+ { 0x3b, KEY_RIGHT },
+
+ { 0x3f, KEY_UP },
+ { 0x3e, KEY_DOWN },
+ { 0x1a, KEY_ENTER },
+
+ { 0x1d, KEY_MENU },
+ { 0x19, KEY_AGAIN },
+ { 0x16, KEY_PREVIOUSSONG },
+ { 0x13, KEY_NEXTSONG },
+ { 0x15, KEY_PAUSE },
+ { 0x0e, KEY_REWIND },
+ { 0x0d, KEY_PLAY },
+ { 0x0b, KEY_STOP },
+ { 0x07, KEY_FORWARD },
+ { 0x27, KEY_RECORD },
+ { 0x26, KEY_TUNER },
+ { 0x29, KEY_TEXT },
+ { 0x2a, KEY_MEDIA },
+ { 0x18, KEY_EPG },
+};
+
+static struct rc_keymap pinnacle_grey_map = {
+ .map = {
+ .scan = pinnacle_grey,
+ .size = ARRAY_SIZE(pinnacle_grey),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_GREY,
+ }
+};
+
+static int __init init_rc_map_pinnacle_grey(void)
+{
+ return ir_register_map(&pinnacle_grey_map);
+}
+
+static void __exit exit_rc_map_pinnacle_grey(void)
+{
+ ir_unregister_map(&pinnacle_grey_map);
+}
+
+module_init(init_rc_map_pinnacle_grey)
+module_exit(exit_rc_map_pinnacle_grey)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c
new file mode 100644
index 000000000000..835bf4ef8de7
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c
@@ -0,0 +1,73 @@
+/* pinnacle-pctv-hd.h - Keytable for pinnacle_pctv_hd Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Pinnacle PCTV HD 800i mini remote */
+
+static struct ir_scancode pinnacle_pctv_hd[] = {
+
+ { 0x0f, KEY_1 },
+ { 0x15, KEY_2 },
+ { 0x10, KEY_3 },
+ { 0x18, KEY_4 },
+ { 0x1b, KEY_5 },
+ { 0x1e, KEY_6 },
+ { 0x11, KEY_7 },
+ { 0x21, KEY_8 },
+ { 0x12, KEY_9 },
+ { 0x27, KEY_0 },
+
+ { 0x24, KEY_ZOOM },
+ { 0x2a, KEY_SUBTITLE },
+
+ { 0x00, KEY_MUTE },
+ { 0x01, KEY_ENTER }, /* Pinnacle Logo */
+ { 0x39, KEY_POWER },
+
+ { 0x03, KEY_VOLUMEUP },
+ { 0x09, KEY_VOLUMEDOWN },
+ { 0x06, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+
+ { 0x2d, KEY_REWIND },
+ { 0x30, KEY_PLAYPAUSE },
+ { 0x33, KEY_FASTFORWARD },
+ { 0x3c, KEY_STOP },
+ { 0x36, KEY_RECORD },
+ { 0x3f, KEY_EPG }, /* Labeled "?" */
+};
+
+static struct rc_keymap pinnacle_pctv_hd_map = {
+ .map = {
+ .scan = pinnacle_pctv_hd,
+ .size = ARRAY_SIZE(pinnacle_pctv_hd),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_PCTV_HD,
+ }
+};
+
+static int __init init_rc_map_pinnacle_pctv_hd(void)
+{
+ return ir_register_map(&pinnacle_pctv_hd_map);
+}
+
+static void __exit exit_rc_map_pinnacle_pctv_hd(void)
+{
+ ir_unregister_map(&pinnacle_pctv_hd_map);
+}
+
+module_init(init_rc_map_pinnacle_pctv_hd)
+module_exit(exit_rc_map_pinnacle_pctv_hd)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview-mk12.c b/drivers/media/IR/keymaps/rc-pixelview-mk12.c
new file mode 100644
index 000000000000..5a735d569a8b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview-mk12.c
@@ -0,0 +1,83 @@
+/* rc-pixelview-mk12.h - Keytable for pixelview Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Keytable for MK-F12 IR remote provided together with Pixelview
+ * Ultra Pro Remote Controller. Uses NEC extended format.
+ */
+static struct ir_scancode pixelview_mk12[] = {
+ { 0x866b03, KEY_TUNER }, /* Timeshift */
+ { 0x866b1e, KEY_POWER2 }, /* power */
+
+ { 0x866b01, KEY_1 },
+ { 0x866b0b, KEY_2 },
+ { 0x866b1b, KEY_3 },
+ { 0x866b05, KEY_4 },
+ { 0x866b09, KEY_5 },
+ { 0x866b15, KEY_6 },
+ { 0x866b06, KEY_7 },
+ { 0x866b0a, KEY_8 },
+ { 0x866b12, KEY_9 },
+ { 0x866b02, KEY_0 },
+
+ { 0x866b13, KEY_AGAIN }, /* loop */
+ { 0x866b10, KEY_DIGITS }, /* +100 */
+
+ { 0x866b00, KEY_MEDIA }, /* source */
+ { 0x866b18, KEY_MUTE }, /* mute */
+ { 0x866b19, KEY_CAMERA }, /* snapshot */
+ { 0x866b1a, KEY_SEARCH }, /* scan */
+
+ { 0x866b16, KEY_CHANNELUP }, /* chn + */
+ { 0x866b14, KEY_CHANNELDOWN }, /* chn - */
+ { 0x866b1f, KEY_VOLUMEUP }, /* vol + */
+ { 0x866b17, KEY_VOLUMEDOWN }, /* vol - */
+ { 0x866b1c, KEY_ZOOM }, /* zoom */
+
+ { 0x866b04, KEY_REWIND },
+ { 0x866b0e, KEY_RECORD },
+ { 0x866b0c, KEY_FORWARD },
+
+ { 0x866b1d, KEY_STOP },
+ { 0x866b08, KEY_PLAY },
+ { 0x866b0f, KEY_PAUSE },
+
+ { 0x866b0d, KEY_TV },
+ { 0x866b07, KEY_RADIO }, /* FM */
+};
+
+static struct rc_keymap pixelview_map = {
+ .map = {
+ .scan = pixelview_mk12,
+ .size = ARRAY_SIZE(pixelview_mk12),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_PIXELVIEW_MK12,
+ }
+};
+
+static int __init init_rc_map_pixelview(void)
+{
+ return ir_register_map(&pixelview_map);
+}
+
+static void __exit exit_rc_map_pixelview(void)
+{
+ ir_unregister_map(&pixelview_map);
+}
+
+module_init(init_rc_map_pixelview)
+module_exit(exit_rc_map_pixelview)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview-new.c b/drivers/media/IR/keymaps/rc-pixelview-new.c
new file mode 100644
index 000000000000..7bbbbf5735e6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview-new.c
@@ -0,0 +1,83 @@
+/* pixelview-new.h - Keytable for pixelview_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ present on PV MPEG 8000GT
+ */
+
+static struct ir_scancode pixelview_new[] = {
+ { 0x3c, KEY_TIME }, /* Timeshift */
+ { 0x12, KEY_POWER },
+
+ { 0x3d, KEY_1 },
+ { 0x38, KEY_2 },
+ { 0x18, KEY_3 },
+ { 0x35, KEY_4 },
+ { 0x39, KEY_5 },
+ { 0x15, KEY_6 },
+ { 0x36, KEY_7 },
+ { 0x3a, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x3e, KEY_0 },
+
+ { 0x1c, KEY_AGAIN }, /* LOOP */
+ { 0x3f, KEY_MEDIA }, /* Source */
+ { 0x1f, KEY_LAST }, /* +100 */
+ { 0x1b, KEY_MUTE },
+
+ { 0x17, KEY_CHANNELDOWN },
+ { 0x16, KEY_CHANNELUP },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x13, KEY_ZOOM },
+
+ { 0x19, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x1a, KEY_SEARCH }, /* scan */
+
+ { 0x37, KEY_REWIND }, /* << */
+ { 0x32, KEY_RECORD }, /* o (red) */
+ { 0x33, KEY_FORWARD }, /* >> */
+ { 0x11, KEY_STOP }, /* square */
+ { 0x3b, KEY_PLAY }, /* > */
+ { 0x30, KEY_PLAYPAUSE }, /* || */
+
+ { 0x31, KEY_TV },
+ { 0x34, KEY_RADIO },
+};
+
+static struct rc_keymap pixelview_new_map = {
+ .map = {
+ .scan = pixelview_new,
+ .size = ARRAY_SIZE(pixelview_new),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PIXELVIEW_NEW,
+ }
+};
+
+static int __init init_rc_map_pixelview_new(void)
+{
+ return ir_register_map(&pixelview_new_map);
+}
+
+static void __exit exit_rc_map_pixelview_new(void)
+{
+ ir_unregister_map(&pixelview_new_map);
+}
+
+module_init(init_rc_map_pixelview_new)
+module_exit(exit_rc_map_pixelview_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview.c b/drivers/media/IR/keymaps/rc-pixelview.c
new file mode 100644
index 000000000000..82ff12e182a0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview.c
@@ -0,0 +1,82 @@
+/* pixelview.h - Keytable for pixelview Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pixelview[] = {
+
+ { 0x1e, KEY_POWER }, /* power */
+ { 0x07, KEY_MEDIA }, /* source */
+ { 0x1c, KEY_SEARCH }, /* scan */
+
+
+ { 0x03, KEY_TUNER }, /* TV/FM */
+
+ { 0x00, KEY_RECORD },
+ { 0x08, KEY_STOP },
+ { 0x11, KEY_PLAY },
+
+ { 0x1a, KEY_PLAYPAUSE }, /* freeze */
+ { 0x19, KEY_ZOOM }, /* zoom */
+ { 0x0f, KEY_TEXT }, /* min */
+
+ { 0x01, KEY_1 },
+ { 0x0b, KEY_2 },
+ { 0x1b, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x09, KEY_5 },
+ { 0x15, KEY_6 },
+ { 0x06, KEY_7 },
+ { 0x0a, KEY_8 },
+ { 0x12, KEY_9 },
+ { 0x02, KEY_0 },
+ { 0x10, KEY_LAST }, /* +100 */
+ { 0x13, KEY_LIST }, /* recall */
+
+ { 0x1f, KEY_CHANNELUP }, /* chn down */
+ { 0x17, KEY_CHANNELDOWN }, /* chn up */
+ { 0x16, KEY_VOLUMEUP }, /* vol down */
+ { 0x14, KEY_VOLUMEDOWN }, /* vol up */
+
+ { 0x04, KEY_KPMINUS }, /* <<< */
+ { 0x0e, KEY_SETUP }, /* function */
+ { 0x0c, KEY_KPPLUS }, /* >>> */
+
+ { 0x0d, KEY_GOTO }, /* mts */
+ { 0x1d, KEY_REFRESH }, /* reset */
+ { 0x18, KEY_MUTE }, /* mute/unmute */
+};
+
+static struct rc_keymap pixelview_map = {
+ .map = {
+ .scan = pixelview,
+ .size = ARRAY_SIZE(pixelview),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PIXELVIEW,
+ }
+};
+
+static int __init init_rc_map_pixelview(void)
+{
+ return ir_register_map(&pixelview_map);
+}
+
+static void __exit exit_rc_map_pixelview(void)
+{
+ ir_unregister_map(&pixelview_map);
+}
+
+module_init(init_rc_map_pixelview)
+module_exit(exit_rc_map_pixelview)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-powercolor-real-angel.c b/drivers/media/IR/keymaps/rc-powercolor-real-angel.c
new file mode 100644
index 000000000000..7cef8190a224
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-powercolor-real-angel.c
@@ -0,0 +1,81 @@
+/* powercolor-real-angel.h - Keytable for powercolor_real_angel Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Remote control for Powercolor Real Angel 330
+ * Daniel Fraga <fragabr@gmail.com>
+ */
+
+static struct ir_scancode powercolor_real_angel[] = {
+ { 0x38, KEY_SWITCHVIDEOMODE }, /* switch inputs */
+ { 0x0c, KEY_MEDIA }, /* Turn ON/OFF App */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_DIGITS }, /* single, double, tripple digit */
+ { 0x29, KEY_PREVIOUS }, /* previous channel */
+ { 0x12, KEY_BRIGHTNESSUP },
+ { 0x13, KEY_BRIGHTNESSDOWN },
+ { 0x2b, KEY_MODE }, /* stereo/mono */
+ { 0x2c, KEY_TEXT }, /* teletext */
+ { 0x20, KEY_CHANNELUP }, /* channel up */
+ { 0x21, KEY_CHANNELDOWN }, /* channel down */
+ { 0x10, KEY_VOLUMEUP }, /* volume up */
+ { 0x11, KEY_VOLUMEDOWN }, /* volume down */
+ { 0x0d, KEY_MUTE },
+ { 0x1f, KEY_RECORD },
+ { 0x17, KEY_PLAY },
+ { 0x16, KEY_PAUSE },
+ { 0x0b, KEY_STOP },
+ { 0x27, KEY_FASTFORWARD },
+ { 0x26, KEY_REWIND },
+ { 0x1e, KEY_SEARCH }, /* autoscan */
+ { 0x0e, KEY_CAMERA }, /* snapshot */
+ { 0x2d, KEY_SETUP },
+ { 0x0f, KEY_SCREEN }, /* full screen */
+ { 0x14, KEY_RADIO }, /* FM radio */
+ { 0x25, KEY_POWER }, /* power */
+};
+
+static struct rc_keymap powercolor_real_angel_map = {
+ .map = {
+ .scan = powercolor_real_angel,
+ .size = ARRAY_SIZE(powercolor_real_angel),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_POWERCOLOR_REAL_ANGEL,
+ }
+};
+
+static int __init init_rc_map_powercolor_real_angel(void)
+{
+ return ir_register_map(&powercolor_real_angel_map);
+}
+
+static void __exit exit_rc_map_powercolor_real_angel(void)
+{
+ ir_unregister_map(&powercolor_real_angel_map);
+}
+
+module_init(init_rc_map_powercolor_real_angel)
+module_exit(exit_rc_map_powercolor_real_angel)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-proteus-2309.c b/drivers/media/IR/keymaps/rc-proteus-2309.c
new file mode 100644
index 000000000000..22e92d39dee5
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-proteus-2309.c
@@ -0,0 +1,69 @@
+/* proteus-2309.h - Keytable for proteus_2309 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Michal Majchrowicz <mmajchrowicz@gmail.com> */
+
+static struct ir_scancode proteus_2309[] = {
+ /* numeric */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x5c, KEY_POWER }, /* power */
+ { 0x20, KEY_ZOOM }, /* full screen */
+ { 0x0f, KEY_BACKSPACE }, /* recall */
+ { 0x1b, KEY_ENTER }, /* mute */
+ { 0x41, KEY_RECORD }, /* record */
+ { 0x43, KEY_STOP }, /* stop */
+ { 0x16, KEY_S },
+ { 0x1a, KEY_POWER2 }, /* off */
+ { 0x2e, KEY_RED },
+ { 0x1f, KEY_CHANNELDOWN }, /* channel - */
+ { 0x1c, KEY_CHANNELUP }, /* channel + */
+ { 0x10, KEY_VOLUMEDOWN }, /* volume - */
+ { 0x1e, KEY_VOLUMEUP }, /* volume + */
+ { 0x14, KEY_F1 },
+};
+
+static struct rc_keymap proteus_2309_map = {
+ .map = {
+ .scan = proteus_2309,
+ .size = ARRAY_SIZE(proteus_2309),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PROTEUS_2309,
+ }
+};
+
+static int __init init_rc_map_proteus_2309(void)
+{
+ return ir_register_map(&proteus_2309_map);
+}
+
+static void __exit exit_rc_map_proteus_2309(void)
+{
+ ir_unregister_map(&proteus_2309_map);
+}
+
+module_init(init_rc_map_proteus_2309)
+module_exit(exit_rc_map_proteus_2309)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-purpletv.c b/drivers/media/IR/keymaps/rc-purpletv.c
new file mode 100644
index 000000000000..4e20fc2269f7
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-purpletv.c
@@ -0,0 +1,81 @@
+/* purpletv.h - Keytable for purpletv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode purpletv[] = {
+ { 0x03, KEY_POWER },
+ { 0x6f, KEY_MUTE },
+ { 0x10, KEY_BACKSPACE }, /* Recall */
+
+ { 0x11, KEY_0 },
+ { 0x04, KEY_1 },
+ { 0x05, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x08, KEY_4 },
+ { 0x09, KEY_5 },
+ { 0x0a, KEY_6 },
+ { 0x0c, KEY_7 },
+ { 0x0d, KEY_8 },
+ { 0x0e, KEY_9 },
+ { 0x12, KEY_DOT }, /* 100+ */
+
+ { 0x07, KEY_VOLUMEUP },
+ { 0x0b, KEY_VOLUMEDOWN },
+ { 0x1a, KEY_KPPLUS },
+ { 0x18, KEY_KPMINUS },
+ { 0x15, KEY_UP },
+ { 0x1d, KEY_DOWN },
+ { 0x0f, KEY_CHANNELUP },
+ { 0x13, KEY_CHANNELDOWN },
+ { 0x48, KEY_ZOOM },
+
+ { 0x1b, KEY_VIDEO }, /* Video source */
+ { 0x1f, KEY_CAMERA }, /* Snapshot */
+ { 0x49, KEY_LANGUAGE }, /* MTS Select */
+ { 0x19, KEY_SEARCH }, /* Auto Scan */
+
+ { 0x4b, KEY_RECORD },
+ { 0x46, KEY_PLAY },
+ { 0x45, KEY_PAUSE }, /* Pause */
+ { 0x44, KEY_STOP },
+ { 0x43, KEY_TIME }, /* Time Shift */
+ { 0x17, KEY_CHANNEL }, /* SURF CH */
+ { 0x40, KEY_FORWARD }, /* Forward ? */
+ { 0x42, KEY_REWIND }, /* Backward ? */
+
+};
+
+static struct rc_keymap purpletv_map = {
+ .map = {
+ .scan = purpletv,
+ .size = ARRAY_SIZE(purpletv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PURPLETV,
+ }
+};
+
+static int __init init_rc_map_purpletv(void)
+{
+ return ir_register_map(&purpletv_map);
+}
+
+static void __exit exit_rc_map_purpletv(void)
+{
+ ir_unregister_map(&purpletv_map);
+}
+
+module_init(init_rc_map_purpletv)
+module_exit(exit_rc_map_purpletv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pv951.c b/drivers/media/IR/keymaps/rc-pv951.c
new file mode 100644
index 000000000000..36679e706cf3
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pv951.c
@@ -0,0 +1,78 @@
+/* pv951.h - Keytable for pv951 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mark Phalan <phalanm@o2.ie> */
+
+static struct ir_scancode pv951[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x12, KEY_POWER },
+ { 0x10, KEY_MUTE },
+ { 0x1f, KEY_VOLUMEDOWN },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x0e, KEY_PAGEUP },
+ { 0x1d, KEY_PAGEDOWN },
+ { 0x13, KEY_SOUND },
+
+ { 0x18, KEY_KPPLUSMINUS }, /* CH +/- */
+ { 0x16, KEY_SUBTITLE }, /* CC */
+ { 0x0d, KEY_TEXT }, /* TTX */
+ { 0x0b, KEY_TV }, /* AIR/CBL */
+ { 0x11, KEY_PC }, /* PC/TV */
+ { 0x17, KEY_OK }, /* CH RTN */
+ { 0x19, KEY_MODE }, /* FUNC */
+ { 0x0c, KEY_SEARCH }, /* AUTOSCAN */
+
+ /* Not sure what to do with these ones! */
+ { 0x0f, KEY_SELECT }, /* SOURCE */
+ { 0x0a, KEY_KPPLUS }, /* +100 */
+ { 0x14, KEY_EQUAL }, /* SYNC */
+ { 0x1c, KEY_MEDIA }, /* PC/TV */
+};
+
+static struct rc_keymap pv951_map = {
+ .map = {
+ .scan = pv951,
+ .size = ARRAY_SIZE(pv951),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PV951,
+ }
+};
+
+static int __init init_rc_map_pv951(void)
+{
+ return ir_register_map(&pv951_map);
+}
+
+static void __exit exit_rc_map_pv951(void)
+{
+ ir_unregister_map(&pv951_map);
+}
+
+module_init(init_rc_map_pv951)
+module_exit(exit_rc_map_pv951)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c b/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c
new file mode 100644
index 000000000000..cc6b8f548747
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c
@@ -0,0 +1,103 @@
+/* rc5-hauppauge-new.h - Keytable for rc5_hauppauge_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Hauppauge:the newer, gray remotes (seems there are multiple
+ * slightly different versions), shipped with cx88+ivtv cards.
+ *
+ * This table contains the complete RC5 code, instead of just the data part
+ */
+
+static struct ir_scancode rc5_hauppauge_new[] = {
+ /* Keys 0 to 9 */
+ { 0x1e00, KEY_0 },
+ { 0x1e01, KEY_1 },
+ { 0x1e02, KEY_2 },
+ { 0x1e03, KEY_3 },
+ { 0x1e04, KEY_4 },
+ { 0x1e05, KEY_5 },
+ { 0x1e06, KEY_6 },
+ { 0x1e07, KEY_7 },
+ { 0x1e08, KEY_8 },
+ { 0x1e09, KEY_9 },
+
+ { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
+ { 0x1e0b, KEY_RED }, /* red button */
+ { 0x1e0c, KEY_RADIO },
+ { 0x1e0d, KEY_MENU },
+ { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
+ { 0x1e0f, KEY_MUTE },
+ { 0x1e10, KEY_VOLUMEUP },
+ { 0x1e11, KEY_VOLUMEDOWN },
+ { 0x1e12, KEY_PREVIOUS }, /* previous channel */
+ { 0x1e14, KEY_UP },
+ { 0x1e15, KEY_DOWN },
+ { 0x1e16, KEY_LEFT },
+ { 0x1e17, KEY_RIGHT },
+ { 0x1e18, KEY_VIDEO }, /* Videos */
+ { 0x1e19, KEY_AUDIO }, /* Music */
+ /* 0x1e1a: Pictures - presume this means
+ "Multimedia Home Platform" -
+ no "PICTURES" key in input.h
+ */
+ { 0x1e1a, KEY_MHP },
+
+ { 0x1e1b, KEY_EPG }, /* Guide */
+ { 0x1e1c, KEY_TV },
+ { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
+ { 0x1e1f, KEY_EXIT }, /* back/exit */
+ { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
+ { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x1e25, KEY_ENTER }, /* OK */
+ { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
+ { 0x1e29, KEY_BLUE }, /* blue key */
+ { 0x1e2e, KEY_GREEN }, /* green button */
+ { 0x1e30, KEY_PAUSE }, /* pause */
+ { 0x1e32, KEY_REWIND }, /* backward << */
+ { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1e35, KEY_PLAY },
+ { 0x1e36, KEY_STOP },
+ { 0x1e37, KEY_RECORD }, /* recording */
+ { 0x1e38, KEY_YELLOW }, /* yellow key */
+ { 0x1e3b, KEY_SELECT }, /* top right button */
+ { 0x1e3c, KEY_ZOOM }, /* full */
+ { 0x1e3d, KEY_POWER }, /* system power (green button) */
+};
+
+static struct rc_keymap rc5_hauppauge_new_map = {
+ .map = {
+ .scan = rc5_hauppauge_new,
+ .size = ARRAY_SIZE(rc5_hauppauge_new),
+ .ir_type = IR_TYPE_RC5,
+ .name = RC_MAP_RC5_HAUPPAUGE_NEW,
+ }
+};
+
+static int __init init_rc_map_rc5_hauppauge_new(void)
+{
+ return ir_register_map(&rc5_hauppauge_new_map);
+}
+
+static void __exit exit_rc_map_rc5_hauppauge_new(void)
+{
+ ir_unregister_map(&rc5_hauppauge_new_map);
+}
+
+module_init(init_rc_map_rc5_hauppauge_new)
+module_exit(exit_rc_map_rc5_hauppauge_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-tv.c b/drivers/media/IR/keymaps/rc-rc5-tv.c
new file mode 100644
index 000000000000..73cce2f8ddfb
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-rc5-tv.c
@@ -0,0 +1,81 @@
+/* rc5-tv.h - Keytable for rc5_tv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* generic RC5 keytable */
+/* see http://users.pandora.be/nenya/electronics/rc5/codes00.htm */
+/* used by old (black) Hauppauge remotes */
+
+static struct ir_scancode rc5_tv[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0b, KEY_CHANNEL }, /* channel / program (japan: 11) */
+ { 0x0c, KEY_POWER }, /* standby */
+ { 0x0d, KEY_MUTE }, /* mute / demute */
+ { 0x0f, KEY_TV }, /* display */
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x12, KEY_BRIGHTNESSUP },
+ { 0x13, KEY_BRIGHTNESSDOWN },
+ { 0x1e, KEY_SEARCH }, /* search + */
+ { 0x20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x22, KEY_CHANNEL }, /* alt / channel */
+ { 0x23, KEY_LANGUAGE }, /* 1st / 2nd language */
+ { 0x26, KEY_SLEEP }, /* sleeptimer */
+ { 0x2e, KEY_MENU }, /* 2nd controls (USA: menu) */
+ { 0x30, KEY_PAUSE },
+ { 0x32, KEY_REWIND },
+ { 0x33, KEY_GOTO },
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD }, /* recording */
+ { 0x3c, KEY_TEXT }, /* teletext submode (Japan: 12) */
+ { 0x3d, KEY_SUSPEND }, /* system standby */
+
+};
+
+static struct rc_keymap rc5_tv_map = {
+ .map = {
+ .scan = rc5_tv,
+ .size = ARRAY_SIZE(rc5_tv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_RC5_TV,
+ }
+};
+
+static int __init init_rc_map_rc5_tv(void)
+{
+ return ir_register_map(&rc5_tv_map);
+}
+
+static void __exit exit_rc_map_rc5_tv(void)
+{
+ ir_unregister_map(&rc5_tv_map);
+}
+
+module_init(init_rc_map_rc5_tv)
+module_exit(exit_rc_map_rc5_tv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c b/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c
new file mode 100644
index 000000000000..ab1a6d2baf72
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c
@@ -0,0 +1,78 @@
+/* real-audio-220-32-keys.h - Keytable for real_audio_220_32_keys Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Zogis Real Audio 220 - 32 keys IR */
+
+static struct ir_scancode real_audio_220_32_keys[] = {
+ { 0x1c, KEY_RADIO},
+ { 0x12, KEY_POWER2},
+
+ { 0x01, KEY_1},
+ { 0x02, KEY_2},
+ { 0x03, KEY_3},
+ { 0x04, KEY_4},
+ { 0x05, KEY_5},
+ { 0x06, KEY_6},
+ { 0x07, KEY_7},
+ { 0x08, KEY_8},
+ { 0x09, KEY_9},
+ { 0x00, KEY_0},
+
+ { 0x0c, KEY_VOLUMEUP},
+ { 0x18, KEY_VOLUMEDOWN},
+ { 0x0b, KEY_CHANNELUP},
+ { 0x15, KEY_CHANNELDOWN},
+ { 0x16, KEY_ENTER},
+
+ { 0x11, KEY_LIST}, /* Source */
+ { 0x0d, KEY_AUDIO}, /* stereo */
+
+ { 0x0f, KEY_PREVIOUS}, /* Prev */
+ { 0x1b, KEY_TIME}, /* Timeshift */
+ { 0x1a, KEY_NEXT}, /* Next */
+
+ { 0x0e, KEY_STOP},
+ { 0x1f, KEY_PLAY},
+ { 0x1e, KEY_PLAYPAUSE}, /* Pause */
+
+ { 0x1d, KEY_RECORD},
+ { 0x13, KEY_MUTE},
+ { 0x19, KEY_CAMERA}, /* Snapshot */
+
+};
+
+static struct rc_keymap real_audio_220_32_keys_map = {
+ .map = {
+ .scan = real_audio_220_32_keys,
+ .size = ARRAY_SIZE(real_audio_220_32_keys),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_REAL_AUDIO_220_32_KEYS,
+ }
+};
+
+static int __init init_rc_map_real_audio_220_32_keys(void)
+{
+ return ir_register_map(&real_audio_220_32_keys_map);
+}
+
+static void __exit exit_rc_map_real_audio_220_32_keys(void)
+{
+ ir_unregister_map(&real_audio_220_32_keys_map);
+}
+
+module_init(init_rc_map_real_audio_220_32_keys)
+module_exit(exit_rc_map_real_audio_220_32_keys)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tbs-nec.c b/drivers/media/IR/keymaps/rc-tbs-nec.c
new file mode 100644
index 000000000000..3309631e6f80
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tbs-nec.c
@@ -0,0 +1,73 @@
+/* tbs-nec.h - Keytable for tbs_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode tbs_nec[] = {
+ { 0x04, KEY_POWER2}, /*power*/
+ { 0x14, KEY_MUTE}, /*mute*/
+ { 0x07, KEY_1},
+ { 0x06, KEY_2},
+ { 0x05, KEY_3},
+ { 0x0b, KEY_4},
+ { 0x0a, KEY_5},
+ { 0x09, KEY_6},
+ { 0x0f, KEY_7},
+ { 0x0e, KEY_8},
+ { 0x0d, KEY_9},
+ { 0x12, KEY_0},
+ { 0x16, KEY_CHANNELUP}, /*ch+*/
+ { 0x11, KEY_CHANNELDOWN},/*ch-*/
+ { 0x13, KEY_VOLUMEUP}, /*vol+*/
+ { 0x0c, KEY_VOLUMEDOWN},/*vol-*/
+ { 0x03, KEY_RECORD}, /*rec*/
+ { 0x18, KEY_PAUSE}, /*pause*/
+ { 0x19, KEY_OK}, /*ok*/
+ { 0x1a, KEY_CAMERA}, /* snapshot */
+ { 0x01, KEY_UP},
+ { 0x10, KEY_LEFT},
+ { 0x02, KEY_RIGHT},
+ { 0x08, KEY_DOWN},
+ { 0x15, KEY_FAVORITES},
+ { 0x17, KEY_SUBTITLE},
+ { 0x1d, KEY_ZOOM},
+ { 0x1f, KEY_EXIT},
+ { 0x1e, KEY_MENU},
+ { 0x1c, KEY_EPG},
+ { 0x00, KEY_PREVIOUS},
+ { 0x1b, KEY_MODE},
+};
+
+static struct rc_keymap tbs_nec_map = {
+ .map = {
+ .scan = tbs_nec,
+ .size = ARRAY_SIZE(tbs_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TBS_NEC,
+ }
+};
+
+static int __init init_rc_map_tbs_nec(void)
+{
+ return ir_register_map(&tbs_nec_map);
+}
+
+static void __exit exit_rc_map_tbs_nec(void)
+{
+ ir_unregister_map(&tbs_nec_map);
+}
+
+module_init(init_rc_map_tbs_nec)
+module_exit(exit_rc_map_tbs_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c b/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c
new file mode 100644
index 000000000000..5326a0b444c1
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c
@@ -0,0 +1,92 @@
+/* terratec-cinergy-xs.h - Keytable for terratec_cinergy_xs Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Terratec Cinergy Hybrid T USB XS
+ Devin Heitmueller <dheitmueller@linuxtv.org>
+ */
+
+static struct ir_scancode terratec_cinergy_xs[] = {
+ { 0x41, KEY_HOME},
+ { 0x01, KEY_POWER},
+ { 0x42, KEY_MENU},
+ { 0x02, KEY_1},
+ { 0x03, KEY_2},
+ { 0x04, KEY_3},
+ { 0x43, KEY_SUBTITLE},
+ { 0x05, KEY_4},
+ { 0x06, KEY_5},
+ { 0x07, KEY_6},
+ { 0x44, KEY_TEXT},
+ { 0x08, KEY_7},
+ { 0x09, KEY_8},
+ { 0x0a, KEY_9},
+ { 0x45, KEY_DELETE},
+ { 0x0b, KEY_TUNER},
+ { 0x0c, KEY_0},
+ { 0x0d, KEY_MODE},
+ { 0x46, KEY_TV},
+ { 0x47, KEY_DVD},
+ { 0x49, KEY_VIDEO},
+ { 0x4b, KEY_AUX},
+ { 0x10, KEY_UP},
+ { 0x11, KEY_LEFT},
+ { 0x12, KEY_OK},
+ { 0x13, KEY_RIGHT},
+ { 0x14, KEY_DOWN},
+ { 0x0f, KEY_EPG},
+ { 0x16, KEY_INFO},
+ { 0x4d, KEY_BACKSPACE},
+ { 0x1c, KEY_VOLUMEUP},
+ { 0x4c, KEY_PLAY},
+ { 0x1b, KEY_CHANNELUP},
+ { 0x1e, KEY_VOLUMEDOWN},
+ { 0x1d, KEY_MUTE},
+ { 0x1f, KEY_CHANNELDOWN},
+ { 0x17, KEY_RED},
+ { 0x18, KEY_GREEN},
+ { 0x19, KEY_YELLOW},
+ { 0x1a, KEY_BLUE},
+ { 0x58, KEY_RECORD},
+ { 0x48, KEY_STOP},
+ { 0x40, KEY_PAUSE},
+ { 0x54, KEY_LAST},
+ { 0x4e, KEY_REWIND},
+ { 0x4f, KEY_FASTFORWARD},
+ { 0x5c, KEY_NEXT},
+};
+
+static struct rc_keymap terratec_cinergy_xs_map = {
+ .map = {
+ .scan = terratec_cinergy_xs,
+ .size = ARRAY_SIZE(terratec_cinergy_xs),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TERRATEC_CINERGY_XS,
+ }
+};
+
+static int __init init_rc_map_terratec_cinergy_xs(void)
+{
+ return ir_register_map(&terratec_cinergy_xs_map);
+}
+
+static void __exit exit_rc_map_terratec_cinergy_xs(void)
+{
+ ir_unregister_map(&terratec_cinergy_xs_map);
+}
+
+module_init(init_rc_map_terratec_cinergy_xs)
+module_exit(exit_rc_map_terratec_cinergy_xs)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tevii-nec.c b/drivers/media/IR/keymaps/rc-tevii-nec.c
new file mode 100644
index 000000000000..e30d411c07bb
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tevii-nec.c
@@ -0,0 +1,88 @@
+/* tevii-nec.h - Keytable for tevii_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode tevii_nec[] = {
+ { 0x0a, KEY_POWER2},
+ { 0x0c, KEY_MUTE},
+ { 0x11, KEY_1},
+ { 0x12, KEY_2},
+ { 0x13, KEY_3},
+ { 0x14, KEY_4},
+ { 0x15, KEY_5},
+ { 0x16, KEY_6},
+ { 0x17, KEY_7},
+ { 0x18, KEY_8},
+ { 0x19, KEY_9},
+ { 0x10, KEY_0},
+ { 0x1c, KEY_MENU},
+ { 0x0f, KEY_VOLUMEDOWN},
+ { 0x1a, KEY_LAST},
+ { 0x0e, KEY_OPEN},
+ { 0x04, KEY_RECORD},
+ { 0x09, KEY_VOLUMEUP},
+ { 0x08, KEY_CHANNELUP},
+ { 0x07, KEY_PVR},
+ { 0x0b, KEY_TIME},
+ { 0x02, KEY_RIGHT},
+ { 0x03, KEY_LEFT},
+ { 0x00, KEY_UP},
+ { 0x1f, KEY_OK},
+ { 0x01, KEY_DOWN},
+ { 0x05, KEY_TUNER},
+ { 0x06, KEY_CHANNELDOWN},
+ { 0x40, KEY_PLAYPAUSE},
+ { 0x1e, KEY_REWIND},
+ { 0x1b, KEY_FAVORITES},
+ { 0x1d, KEY_BACK},
+ { 0x4d, KEY_FASTFORWARD},
+ { 0x44, KEY_EPG},
+ { 0x4c, KEY_INFO},
+ { 0x41, KEY_AB},
+ { 0x43, KEY_AUDIO},
+ { 0x45, KEY_SUBTITLE},
+ { 0x4a, KEY_LIST},
+ { 0x46, KEY_F1},
+ { 0x47, KEY_F2},
+ { 0x5e, KEY_F3},
+ { 0x5c, KEY_F4},
+ { 0x52, KEY_F5},
+ { 0x5a, KEY_F6},
+ { 0x56, KEY_MODE},
+ { 0x58, KEY_SWITCHVIDEOMODE},
+};
+
+static struct rc_keymap tevii_nec_map = {
+ .map = {
+ .scan = tevii_nec,
+ .size = ARRAY_SIZE(tevii_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TEVII_NEC,
+ }
+};
+
+static int __init init_rc_map_tevii_nec(void)
+{
+ return ir_register_map(&tevii_nec_map);
+}
+
+static void __exit exit_rc_map_tevii_nec(void)
+{
+ ir_unregister_map(&tevii_nec_map);
+}
+
+module_init(init_rc_map_tevii_nec)
+module_exit(exit_rc_map_tevii_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tt-1500.c b/drivers/media/IR/keymaps/rc-tt-1500.c
new file mode 100644
index 000000000000..bc88de011d5d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tt-1500.c
@@ -0,0 +1,82 @@
+/* tt-1500.h - Keytable for tt_1500 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* for the Technotrend 1500 bundled remotes (grey and black): */
+
+static struct ir_scancode tt_1500[] = {
+ { 0x01, KEY_POWER },
+ { 0x02, KEY_SHUFFLE }, /* ? double-arrow key */
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x06, KEY_4 },
+ { 0x07, KEY_5 },
+ { 0x08, KEY_6 },
+ { 0x09, KEY_7 },
+ { 0x0a, KEY_8 },
+ { 0x0b, KEY_9 },
+ { 0x0c, KEY_0 },
+ { 0x0d, KEY_UP },
+ { 0x0e, KEY_LEFT },
+ { 0x0f, KEY_OK },
+ { 0x10, KEY_RIGHT },
+ { 0x11, KEY_DOWN },
+ { 0x12, KEY_INFO },
+ { 0x13, KEY_EXIT },
+ { 0x14, KEY_RED },
+ { 0x15, KEY_GREEN },
+ { 0x16, KEY_YELLOW },
+ { 0x17, KEY_BLUE },
+ { 0x18, KEY_MUTE },
+ { 0x19, KEY_TEXT },
+ { 0x1a, KEY_MODE }, /* ? TV/Radio */
+ { 0x21, KEY_OPTION },
+ { 0x22, KEY_EPG },
+ { 0x23, KEY_CHANNELUP },
+ { 0x24, KEY_CHANNELDOWN },
+ { 0x25, KEY_VOLUMEUP },
+ { 0x26, KEY_VOLUMEDOWN },
+ { 0x27, KEY_SETUP },
+ { 0x3a, KEY_RECORD }, /* these keys are only in the black remote */
+ { 0x3b, KEY_PLAY },
+ { 0x3c, KEY_STOP },
+ { 0x3d, KEY_REWIND },
+ { 0x3e, KEY_PAUSE },
+ { 0x3f, KEY_FORWARD },
+};
+
+static struct rc_keymap tt_1500_map = {
+ .map = {
+ .scan = tt_1500,
+ .size = ARRAY_SIZE(tt_1500),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TT_1500,
+ }
+};
+
+static int __init init_rc_map_tt_1500(void)
+{
+ return ir_register_map(&tt_1500_map);
+}
+
+static void __exit exit_rc_map_tt_1500(void)
+{
+ ir_unregister_map(&tt_1500_map);
+}
+
+module_init(init_rc_map_tt_1500)
+module_exit(exit_rc_map_tt_1500)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-videomate-s350.c b/drivers/media/IR/keymaps/rc-videomate-s350.c
new file mode 100644
index 000000000000..4df7fcd1d2fc
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-videomate-s350.c
@@ -0,0 +1,85 @@
+/* videomate-s350.h - Keytable for videomate_s350 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode videomate_s350[] = {
+ { 0x00, KEY_TV},
+ { 0x01, KEY_DVD},
+ { 0x04, KEY_RECORD},
+ { 0x05, KEY_VIDEO}, /* TV/Video */
+ { 0x07, KEY_STOP},
+ { 0x08, KEY_PLAYPAUSE},
+ { 0x0a, KEY_REWIND},
+ { 0x0f, KEY_FASTFORWARD},
+ { 0x10, KEY_CHANNELUP},
+ { 0x12, KEY_VOLUMEUP},
+ { 0x13, KEY_CHANNELDOWN},
+ { 0x14, KEY_MUTE},
+ { 0x15, KEY_VOLUMEDOWN},
+ { 0x16, KEY_1},
+ { 0x17, KEY_2},
+ { 0x18, KEY_3},
+ { 0x19, KEY_4},
+ { 0x1a, KEY_5},
+ { 0x1b, KEY_6},
+ { 0x1c, KEY_7},
+ { 0x1d, KEY_8},
+ { 0x1e, KEY_9},
+ { 0x1f, KEY_0},
+ { 0x21, KEY_SLEEP},
+ { 0x24, KEY_ZOOM},
+ { 0x25, KEY_LAST}, /* Recall */
+ { 0x26, KEY_SUBTITLE}, /* CC */
+ { 0x27, KEY_LANGUAGE}, /* MTS */
+ { 0x29, KEY_CHANNEL}, /* SURF */
+ { 0x2b, KEY_A},
+ { 0x2c, KEY_B},
+ { 0x2f, KEY_CAMERA}, /* Snapshot */
+ { 0x23, KEY_RADIO},
+ { 0x02, KEY_PREVIOUSSONG},
+ { 0x06, KEY_NEXTSONG},
+ { 0x03, KEY_EPG},
+ { 0x09, KEY_SETUP},
+ { 0x22, KEY_BACKSPACE},
+ { 0x0c, KEY_UP},
+ { 0x0e, KEY_DOWN},
+ { 0x0b, KEY_LEFT},
+ { 0x0d, KEY_RIGHT},
+ { 0x11, KEY_ENTER},
+ { 0x20, KEY_TEXT},
+};
+
+static struct rc_keymap videomate_s350_map = {
+ .map = {
+ .scan = videomate_s350,
+ .size = ARRAY_SIZE(videomate_s350),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_VIDEOMATE_S350,
+ }
+};
+
+static int __init init_rc_map_videomate_s350(void)
+{
+ return ir_register_map(&videomate_s350_map);
+}
+
+static void __exit exit_rc_map_videomate_s350(void)
+{
+ ir_unregister_map(&videomate_s350_map);
+}
+
+module_init(init_rc_map_videomate_s350)
+module_exit(exit_rc_map_videomate_s350)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c b/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c
new file mode 100644
index 000000000000..776b0a638d87
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c
@@ -0,0 +1,87 @@
+/* videomate-tv-pvr.h - Keytable for videomate_tv_pvr Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode videomate_tv_pvr[] = {
+ { 0x14, KEY_MUTE },
+ { 0x24, KEY_ZOOM },
+
+ { 0x01, KEY_DVD },
+ { 0x23, KEY_RADIO },
+ { 0x00, KEY_TV },
+
+ { 0x0a, KEY_REWIND },
+ { 0x08, KEY_PLAYPAUSE },
+ { 0x0f, KEY_FORWARD },
+
+ { 0x02, KEY_PREVIOUS },
+ { 0x07, KEY_STOP },
+ { 0x06, KEY_NEXT },
+
+ { 0x0c, KEY_UP },
+ { 0x0e, KEY_DOWN },
+ { 0x0b, KEY_LEFT },
+ { 0x0d, KEY_RIGHT },
+ { 0x11, KEY_OK },
+
+ { 0x03, KEY_MENU },
+ { 0x09, KEY_SETUP },
+ { 0x05, KEY_VIDEO },
+ { 0x22, KEY_CHANNEL },
+
+ { 0x12, KEY_VOLUMEUP },
+ { 0x15, KEY_VOLUMEDOWN },
+ { 0x10, KEY_CHANNELUP },
+ { 0x13, KEY_CHANNELDOWN },
+
+ { 0x04, KEY_RECORD },
+
+ { 0x16, KEY_1 },
+ { 0x17, KEY_2 },
+ { 0x18, KEY_3 },
+ { 0x19, KEY_4 },
+ { 0x1a, KEY_5 },
+ { 0x1b, KEY_6 },
+ { 0x1c, KEY_7 },
+ { 0x1d, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x1f, KEY_0 },
+
+ { 0x20, KEY_LANGUAGE },
+ { 0x21, KEY_SLEEP },
+};
+
+static struct rc_keymap videomate_tv_pvr_map = {
+ .map = {
+ .scan = videomate_tv_pvr,
+ .size = ARRAY_SIZE(videomate_tv_pvr),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_VIDEOMATE_TV_PVR,
+ }
+};
+
+static int __init init_rc_map_videomate_tv_pvr(void)
+{
+ return ir_register_map(&videomate_tv_pvr_map);
+}
+
+static void __exit exit_rc_map_videomate_tv_pvr(void)
+{
+ ir_unregister_map(&videomate_tv_pvr_map);
+}
+
+module_init(init_rc_map_videomate_tv_pvr)
+module_exit(exit_rc_map_videomate_tv_pvr)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c b/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c
new file mode 100644
index 000000000000..9d2d550aaa90
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c
@@ -0,0 +1,82 @@
+/* winfast-usbii-deluxe.h - Keytable for winfast_usbii_deluxe Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Leadtek Winfast TV USB II Deluxe remote
+ Magnus Alm <magnus.alm@gmail.com>
+ */
+
+static struct ir_scancode winfast_usbii_deluxe[] = {
+ { 0x62, KEY_0},
+ { 0x75, KEY_1},
+ { 0x76, KEY_2},
+ { 0x77, KEY_3},
+ { 0x79, KEY_4},
+ { 0x7a, KEY_5},
+ { 0x7b, KEY_6},
+ { 0x7d, KEY_7},
+ { 0x7e, KEY_8},
+ { 0x7f, KEY_9},
+
+ { 0x38, KEY_CAMERA}, /* SNAPSHOT */
+ { 0x37, KEY_RECORD}, /* RECORD */
+ { 0x35, KEY_TIME}, /* TIMESHIFT */
+
+ { 0x74, KEY_VOLUMEUP}, /* VOLUMEUP */
+ { 0x78, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
+ { 0x64, KEY_MUTE}, /* MUTE */
+
+ { 0x21, KEY_CHANNEL}, /* SURF */
+ { 0x7c, KEY_CHANNELUP}, /* CHANNELUP */
+ { 0x60, KEY_CHANNELDOWN}, /* CHANNELDOWN */
+ { 0x61, KEY_LAST}, /* LAST CHANNEL (RECALL) */
+
+ { 0x72, KEY_VIDEO}, /* INPUT MODES (TV/FM) */
+
+ { 0x70, KEY_POWER2}, /* TV ON/OFF */
+
+ { 0x39, KEY_CYCLEWINDOWS}, /* MINIMIZE (BOSS) */
+ { 0x3a, KEY_NEW}, /* PIP */
+ { 0x73, KEY_ZOOM}, /* FULLSECREEN */
+
+ { 0x66, KEY_INFO}, /* OSD (DISPLAY) */
+
+ { 0x31, KEY_DOT}, /* '.' */
+ { 0x63, KEY_ENTER}, /* ENTER */
+
+};
+
+static struct rc_keymap winfast_usbii_deluxe_map = {
+ .map = {
+ .scan = winfast_usbii_deluxe,
+ .size = ARRAY_SIZE(winfast_usbii_deluxe),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_WINFAST_USBII_DELUXE,
+ }
+};
+
+static int __init init_rc_map_winfast_usbii_deluxe(void)
+{
+ return ir_register_map(&winfast_usbii_deluxe_map);
+}
+
+static void __exit exit_rc_map_winfast_usbii_deluxe(void)
+{
+ ir_unregister_map(&winfast_usbii_deluxe_map);
+}
+
+module_init(init_rc_map_winfast_usbii_deluxe)
+module_exit(exit_rc_map_winfast_usbii_deluxe)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-winfast.c b/drivers/media/IR/keymaps/rc-winfast.c
new file mode 100644
index 000000000000..0e90a3bd9499
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-winfast.c
@@ -0,0 +1,102 @@
+/* winfast.h - Keytable for winfast Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
+
+static struct ir_scancode winfast[] = {
+ /* Keys 0 to 9 */
+ { 0x12, KEY_0 },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+
+ { 0x00, KEY_POWER },
+ { 0x1b, KEY_AUDIO }, /* Audio Source */
+ { 0x02, KEY_TUNER }, /* TV/FM, not on Y0400052 */
+ { 0x1e, KEY_VIDEO }, /* Video Source */
+ { 0x16, KEY_INFO }, /* Display information */
+ { 0x04, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x0c, KEY_CHANNELUP },
+ { 0x10, KEY_CHANNELDOWN },
+ { 0x03, KEY_ZOOM }, /* fullscreen */
+ { 0x1f, KEY_TEXT }, /* closed caption/teletext */
+ { 0x20, KEY_SLEEP },
+ { 0x29, KEY_CLEAR }, /* boss key */
+ { 0x14, KEY_MUTE },
+ { 0x2b, KEY_RED },
+ { 0x2c, KEY_GREEN },
+ { 0x2d, KEY_YELLOW },
+ { 0x2e, KEY_BLUE },
+ { 0x18, KEY_KPPLUS }, /* fine tune + , not on Y040052 */
+ { 0x19, KEY_KPMINUS }, /* fine tune - , not on Y040052 */
+ { 0x2a, KEY_MEDIA }, /* PIP (Picture in picture */
+ { 0x21, KEY_DOT },
+ { 0x13, KEY_ENTER },
+ { 0x11, KEY_LAST }, /* Recall (last channel */
+ { 0x22, KEY_PREVIOUS },
+ { 0x23, KEY_PLAYPAUSE },
+ { 0x24, KEY_NEXT },
+ { 0x25, KEY_TIME }, /* Time Shifting */
+ { 0x26, KEY_STOP },
+ { 0x27, KEY_RECORD },
+ { 0x28, KEY_SAVE }, /* Screenshot */
+ { 0x2f, KEY_MENU },
+ { 0x30, KEY_CANCEL },
+ { 0x31, KEY_CHANNEL }, /* Channel Surf */
+ { 0x32, KEY_SUBTITLE },
+ { 0x33, KEY_LANGUAGE },
+ { 0x34, KEY_REWIND },
+ { 0x35, KEY_FASTFORWARD },
+ { 0x36, KEY_TV },
+ { 0x37, KEY_RADIO }, /* FM */
+ { 0x38, KEY_DVD },
+
+ { 0x1a, KEY_MODE}, /* change to MCE mode on Y04G0051 */
+ { 0x3e, KEY_F21 }, /* MCE +VOL, on Y04G0033 */
+ { 0x3a, KEY_F22 }, /* MCE -VOL, on Y04G0033 */
+ { 0x3b, KEY_F23 }, /* MCE +CH, on Y04G0033 */
+ { 0x3f, KEY_F24 } /* MCE -CH, on Y04G0033 */
+};
+
+static struct rc_keymap winfast_map = {
+ .map = {
+ .scan = winfast,
+ .size = ARRAY_SIZE(winfast),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_WINFAST,
+ }
+};
+
+static int __init init_rc_map_winfast(void)
+{
+ return ir_register_map(&winfast_map);
+}
+
+static void __exit exit_rc_map_winfast(void)
+{
+ ir_unregister_map(&winfast_map);
+}
+
+module_init(init_rc_map_winfast)
+module_exit(exit_rc_map_winfast)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/rc-map.c b/drivers/media/IR/rc-map.c
new file mode 100644
index 000000000000..caf6a27b08c2
--- /dev/null
+++ b/drivers/media/IR/rc-map.c
@@ -0,0 +1,83 @@
+/* ir-raw-event.c - handle IR Pulse/Space event
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <media/ir-core.h>
+#include <linux/spinlock.h>
+
+/* Used to handle IR raw handler extensions */
+static LIST_HEAD(rc_map_list);
+static DEFINE_SPINLOCK(rc_map_lock);
+
+static struct rc_keymap *seek_rc_map(const char *name)
+{
+ struct rc_keymap *map = NULL;
+
+ spin_lock(&rc_map_lock);
+ list_for_each_entry(map, &rc_map_list, list) {
+ if (!strcmp(name, map->map.name)) {
+ spin_unlock(&rc_map_lock);
+ return map;
+ }
+ }
+ spin_unlock(&rc_map_lock);
+
+ return NULL;
+}
+
+struct ir_scancode_table *get_rc_map(const char *name)
+{
+
+ struct rc_keymap *map;
+
+ map = seek_rc_map(name);
+#ifdef MODULE
+ if (!map) {
+ int rc = request_module(name);
+ if (rc < 0) {
+ printk(KERN_ERR "Couldn't load IR keymap %s\n", name);
+ return NULL;
+ }
+ msleep(20); /* Give some time for IR to register */
+
+ map = seek_rc_map(name);
+ }
+#endif
+ if (!map) {
+ printk(KERN_ERR "IR keymap %s not found\n", name);
+ return NULL;
+ }
+
+ printk(KERN_INFO "Registered IR keymap %s\n", map->map.name);
+
+ return &map->map;
+}
+EXPORT_SYMBOL_GPL(get_rc_map);
+
+int ir_register_map(struct rc_keymap *map)
+{
+ spin_lock(&rc_map_lock);
+ list_add_tail(&map->list, &rc_map_list);
+ spin_unlock(&rc_map_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ir_register_map);
+
+void ir_unregister_map(struct rc_keymap *map)
+{
+ spin_lock(&rc_map_lock);
+ list_del(&map->list);
+ spin_unlock(&rc_map_lock);
+}
+EXPORT_SYMBOL_GPL(ir_unregister_map);
+
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 96d61707f501..b6ce528e1889 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -100,6 +100,8 @@ struct xc2028_data {
if (size != _rc) \
tuner_info("i2c output error: rc = %d (should be %d)\n",\
_rc, (int)size); \
+ if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -119,6 +121,8 @@ struct xc2028_data {
if (isize != _rc) \
tuner_err("i2c input error: rc = %d (should be %d)\n", \
_rc, (int)isize); \
+ if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -129,8 +133,8 @@ struct xc2028_data {
(_rc = tuner_i2c_xfer_send(&priv->i2c_props, \
_val, sizeof(_val)))) { \
tuner_err("Error on line %d: %d\n", __LINE__, _rc); \
- } else \
- msleep(10); \
+ } else if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -809,10 +813,20 @@ check_device:
hwmodel, (version & 0xf000) >> 12, (version & 0xf00) >> 8,
(version & 0xf0) >> 4, version & 0xf);
+
+ if (priv->ctrl.read_not_reliable)
+ goto read_not_reliable;
+
/* Check firmware version against what we downloaded. */
if (priv->firm_version != ((version & 0xf0) << 4 | (version & 0x0f))) {
- tuner_err("Incorrect readback of firmware version.\n");
- goto fail;
+ if (!priv->ctrl.read_not_reliable) {
+ tuner_err("Incorrect readback of firmware version.\n");
+ goto fail;
+ } else {
+ tuner_err("Returned an incorrect version. However, "
+ "read is not reliable enough. Ignoring it.\n");
+ hwmodel = 3028;
+ }
}
/* Check that the tuner hardware model remains consistent over time. */
@@ -826,6 +840,7 @@ check_device:
goto fail;
}
+read_not_reliable:
memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
/*
@@ -996,6 +1011,8 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
The reset CLK is needed only with tm6000.
Driver should work fine even if this fails.
*/
+ if (priv->ctrl.msleep)
+ msleep(priv->ctrl.msleep);
do_tuner_callback(fe, XC2028_RESET_CLK, 1);
msleep(10);
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index a90c35d50add..9778c96a5006 100644
--- a/drivers/media/common/tuners/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -33,12 +33,14 @@ enum firmware_type {
struct xc2028_ctrl {
char *fname;
int max_len;
+ int msleep;
unsigned int scode_table;
unsigned int mts :1;
unsigned int input1:1;
unsigned int vhfbw7:1;
unsigned int uhfbw8:1;
unsigned int disable_power_mgmt:1;
+ unsigned int read_not_reliable:1;
unsigned int demod;
enum firmware_type type:2;
};
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index 8b0cde38984d..248a2a9d8416 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -930,7 +930,6 @@ static int dst_fw_ver(struct dst_state *state)
dprintk(verbose, DST_INFO, 1, "Unsupported Command");
return -1;
}
- memset(&state->fw_version, '\0', 8);
memcpy(&state->fw_version, &state->rxbuffer, 8);
dprintk(verbose, DST_ERROR, 1, "Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x",
state->fw_version[0] >> 4, state->fw_version[0] & 0x0f,
@@ -1053,7 +1052,6 @@ static int dst_get_tuner_info(struct dst_state *state)
goto force;
}
}
- memset(&state->board_info, '\0', 8);
memcpy(&state->board_info, &state->rxbuffer, 8);
if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
dprintk(verbose, DST_ERROR, 1, "DST type has TS=188");
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index b6d46961a99e..b762e561a6d5 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -28,7 +28,7 @@
#include <linux/dma-mapping.h>
#include <linux/input.h>
#include <linux/slab.h>
-#include <media/ir-common.h>
+#include <media/ir-core.h>
#include "demux.h"
#include "dmxdev.h"
@@ -46,6 +46,8 @@
#include "z0194a.h"
#include "ds3000.h"
+#define MODULE_NAME "dm1105"
+
#define UNSET (-1U)
#define DM1105_BOARD_NOAUTO UNSET
@@ -265,7 +267,6 @@ static void dm1105_card_list(struct pci_dev *pci)
/* infrared remote control */
struct infrared {
struct input_dev *input_dev;
- struct ir_input_state ir;
char input_phys[32];
struct work_struct work;
u32 ir_command;
@@ -531,8 +532,7 @@ static void dm1105_emit_key(struct work_struct *work)
data = (ircom >> 8) & 0x7f;
- ir_input_keydown(ir->input_dev, &ir->ir, data);
- ir_input_nokey(ir->input_dev, &ir->ir);
+ ir_keydown(ir->input_dev, data, 0);
}
/* work handler */
@@ -594,8 +594,7 @@ static irqreturn_t dm1105_irq(int irq, void *dev_id)
int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
{
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = &ir_codes_dm1105_nec_table;
- u64 ir_type = IR_TYPE_OTHER;
+ char *ir_codes = NULL;
int err = -ENOMEM;
input_dev = input_allocate_device();
@@ -606,12 +605,6 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
snprintf(dm1105->ir.input_phys, sizeof(dm1105->ir.input_phys),
"pci-%s/ir0", pci_name(dm1105->pdev));
- err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type);
- if (err < 0) {
- input_free_device(input_dev);
- return err;
- }
-
input_dev->name = "DVB on-card IR receiver";
input_dev->phys = dm1105->ir.input_phys;
input_dev->id.bustype = BUS_PCI;
@@ -628,9 +621,13 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
INIT_WORK(&dm1105->ir.work, dm1105_emit_key);
- err = ir_input_register(input_dev, ir_codes, NULL);
+ err = ir_input_register(input_dev, ir_codes, NULL, MODULE_NAME);
+ if (err < 0) {
+ input_free_device(input_dev);
+ return err;
+ }
- return err;
+ return 0;
}
void __devexit dm1105_ir_exit(struct dm1105_dev *dm1105)
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 67f189b7aa1f..977ddba3e235 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,7 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
};
};
- if (demux->cnt_storage) {
+ if (demux->cnt_storage && dvb_demux_tscheck) {
/* check pkt counter */
if (pid < MAX_PID) {
if (buf[1] & 0x80)
@@ -1248,12 +1248,9 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->feed[i].index = i;
}
- if (dvb_demux_tscheck) {
- dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
-
- if (!dvbdemux->cnt_storage)
- printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
- }
+ dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
+ if (!dvbdemux->cnt_storage)
+ printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
INIT_LIST_HEAD(&dvbdemux->frontend_list);
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 441c0642b30a..cccea412088b 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1109,14 +1109,14 @@ static int dvb_net_feed_stop(struct net_device *dev)
}
-static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc)
+static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
{
struct dvb_net_priv *priv = netdev_priv(dev);
if (priv->multi_num == DVB_NET_MULTICAST_MAX)
return -ENOMEM;
- memcpy(priv->multi_macs[priv->multi_num], mc->dmi_addr, 6);
+ memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
priv->multi_num++;
return 0;
@@ -1140,8 +1140,7 @@ static void wq_set_multicast_list (struct work_struct *work)
dprintk("%s: allmulti mode\n", dev->name);
priv->rx_mode = RX_MODE_ALL_MULTI;
} else if (!netdev_mc_empty(dev)) {
- int mci;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
dprintk("%s: set_mc_list, %d entries\n",
dev->name, netdev_mc_count(dev));
@@ -1149,11 +1148,8 @@ static void wq_set_multicast_list (struct work_struct *work)
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
- for (mci = 0, mc=dev->mc_list;
- mci < netdev_mc_count(dev);
- mc = mc->next, mci++) {
- dvb_set_mc_filter(dev, mc);
- }
+ netdev_for_each_mc_addr(ha, dev)
+ dvb_set_mc_filter(dev, ha->addr);
}
netif_addr_unlock_bh(dev);
diff --git a/drivers/media/dvb/dvb-usb/a800.c b/drivers/media/dvb/dvb-usb/a800.c
index 6247239982e9..b6cbb1dfc5f1 100644
--- a/drivers/media/dvb/dvb-usb/a800.c
+++ b/drivers/media/dvb/dvb-usb/a800.c
@@ -37,7 +37,7 @@ static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_pr
return 0;
}
-static struct dvb_usb_rc_key a800_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_a800_table[] = {
{ 0x0201, KEY_PROG1 }, /* SOURCE */
{ 0x0200, KEY_POWER }, /* POWER */
{ 0x0205, KEY_1 }, /* 1 */
@@ -147,8 +147,8 @@ static struct dvb_usb_device_properties a800_properties = {
.identify_state = a800_identify_state,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = a800_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(a800_rc_keys),
+ .rc_key_map = ir_codes_a800_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_a800_table),
.rc_query = a800_rc_query,
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/af9005-remote.c b/drivers/media/dvb/dvb-usb/af9005-remote.c
index f4379c650a19..b41fa873b04d 100644
--- a/drivers/media/dvb/dvb-usb/af9005-remote.c
+++ b/drivers/media/dvb/dvb-usb/af9005-remote.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(debug,
#define deb_decode(args...) dprintk(dvb_usb_af9005_remote_debug,0x01,args)
-struct dvb_usb_rc_key af9005_rc_keys[] = {
+struct dvb_usb_rc_key ir_codes_af9005_table[] = {
{0x01b7, KEY_POWER},
{0x01a7, KEY_VOLUMEUP},
@@ -74,7 +74,7 @@ struct dvb_usb_rc_key af9005_rc_keys[] = {
{0x00d5, KEY_GOTO}, /* marked jump on the remote */
};
-int af9005_rc_keys_size = ARRAY_SIZE(af9005_rc_keys);
+int ir_codes_af9005_table_size = ARRAY_SIZE(ir_codes_af9005_table);
static int repeatable_keys[] = {
KEY_VOLUMEUP,
@@ -130,10 +130,10 @@ int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
deb_decode("code != inverted code\n");
return 0;
}
- for (i = 0; i < af9005_rc_keys_size; i++) {
- if (rc5_custom(&af9005_rc_keys[i]) == cust
- && rc5_data(&af9005_rc_keys[i]) == dat) {
- *event = af9005_rc_keys[i].event;
+ for (i = 0; i < ir_codes_af9005_table_size; i++) {
+ if (rc5_custom(&ir_codes_af9005_table[i]) == cust
+ && rc5_data(&ir_codes_af9005_table[i]) == dat) {
+ *event = ir_codes_af9005_table[i].event;
*state = REMOTE_KEY_PRESSED;
deb_decode
("key pressed, event %x\n", *event);
@@ -146,8 +146,8 @@ int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
return 0;
}
-EXPORT_SYMBOL(af9005_rc_keys);
-EXPORT_SYMBOL(af9005_rc_keys_size);
+EXPORT_SYMBOL(ir_codes_af9005_table);
+EXPORT_SYMBOL(ir_codes_af9005_table_size);
EXPORT_SYMBOL(af9005_rc_decode);
MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>");
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
index ca5a0a4d2a47..cfd6107d5349 100644
--- a/drivers/media/dvb/dvb-usb/af9005.c
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -1109,8 +1109,8 @@ static int __init af9005_usb_module_init(void)
return result;
}
rc_decode = symbol_request(af9005_rc_decode);
- rc_keys = symbol_request(af9005_rc_keys);
- rc_keys_size = symbol_request(af9005_rc_keys_size);
+ rc_keys = symbol_request(ir_codes_af9005_table);
+ rc_keys_size = symbol_request(ir_codes_af9005_table_size);
if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
err("af9005_rc_decode function not found, disabling remote");
af9005_properties.rc_query = NULL;
@@ -1128,9 +1128,9 @@ static void __exit af9005_usb_module_exit(void)
if (rc_decode != NULL)
symbol_put(af9005_rc_decode);
if (rc_keys != NULL)
- symbol_put(af9005_rc_keys);
+ symbol_put(ir_codes_af9005_table);
if (rc_keys_size != NULL)
- symbol_put(af9005_rc_keys_size);
+ symbol_put(ir_codes_af9005_table_size);
/* deregister this driver from the USB subsystem */
usb_deregister(&af9005_usb_driver);
}
diff --git a/drivers/media/dvb/dvb-usb/af9005.h b/drivers/media/dvb/dvb-usb/af9005.h
index 0bc48a012187..088e7083a39b 100644
--- a/drivers/media/dvb/dvb-usb/af9005.h
+++ b/drivers/media/dvb/dvb-usb/af9005.h
@@ -3490,7 +3490,7 @@ extern u8 regmask[8];
/* remote control decoder */
extern int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len,
u32 * event, int *state);
-extern struct dvb_usb_rc_key af9005_rc_keys[];
-extern int af9005_rc_keys_size;
+extern struct dvb_usb_rc_key ir_codes_af9005_table[];
+extern int ir_codes_af9005_table_size;
#endif
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 74d94e45324d..66c7c3ea7990 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -752,19 +752,19 @@ static const struct af9015_setup *af9015_setup_match(unsigned int id,
static const struct af9015_setup af9015_setup_modparam[] = {
{ AF9015_REMOTE_A_LINK_DTU_M,
- af9015_rc_keys_a_link, ARRAY_SIZE(af9015_rc_keys_a_link),
+ ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
{ AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
- af9015_rc_keys_msi, ARRAY_SIZE(af9015_rc_keys_msi),
+ ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
{ AF9015_REMOTE_MYGICTV_U718,
- af9015_rc_keys_mygictv, ARRAY_SIZE(af9015_rc_keys_mygictv),
+ ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
{ AF9015_REMOTE_DIGITTRADE_DVB_T,
- af9015_rc_keys_digittrade, ARRAY_SIZE(af9015_rc_keys_digittrade),
+ ir_codes_af9015_table_digittrade, ARRAY_SIZE(ir_codes_af9015_table_digittrade),
af9015_ir_table_digittrade, ARRAY_SIZE(af9015_ir_table_digittrade) },
{ AF9015_REMOTE_AVERMEDIA_KS,
- af9015_rc_keys_avermedia, ARRAY_SIZE(af9015_rc_keys_avermedia),
+ ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
af9015_ir_table_avermedia_ks, ARRAY_SIZE(af9015_ir_table_avermedia_ks) },
{ }
};
@@ -772,32 +772,32 @@ static const struct af9015_setup af9015_setup_modparam[] = {
/* don't add new entries here anymore, use hashes instead */
static const struct af9015_setup af9015_setup_usbids[] = {
{ USB_VID_LEADTEK,
- af9015_rc_keys_leadtek, ARRAY_SIZE(af9015_rc_keys_leadtek),
+ ir_codes_af9015_table_leadtek, ARRAY_SIZE(ir_codes_af9015_table_leadtek),
af9015_ir_table_leadtek, ARRAY_SIZE(af9015_ir_table_leadtek) },
{ USB_VID_VISIONPLUS,
- af9015_rc_keys_twinhan, ARRAY_SIZE(af9015_rc_keys_twinhan),
+ ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
af9015_ir_table_twinhan, ARRAY_SIZE(af9015_ir_table_twinhan) },
{ USB_VID_KWORLD_2, /* TODO: use correct rc keys */
- af9015_rc_keys_twinhan, ARRAY_SIZE(af9015_rc_keys_twinhan),
+ ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
af9015_ir_table_kworld, ARRAY_SIZE(af9015_ir_table_kworld) },
{ USB_VID_AVERMEDIA,
- af9015_rc_keys_avermedia, ARRAY_SIZE(af9015_rc_keys_avermedia),
+ ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
af9015_ir_table_avermedia, ARRAY_SIZE(af9015_ir_table_avermedia) },
{ USB_VID_MSI_2,
- af9015_rc_keys_msi_digivox_iii, ARRAY_SIZE(af9015_rc_keys_msi_digivox_iii),
+ ir_codes_af9015_table_msi_digivox_iii, ARRAY_SIZE(ir_codes_af9015_table_msi_digivox_iii),
af9015_ir_table_msi_digivox_iii, ARRAY_SIZE(af9015_ir_table_msi_digivox_iii) },
{ }
};
static const struct af9015_setup af9015_setup_hashes[] = {
{ 0xb8feb708,
- af9015_rc_keys_msi, ARRAY_SIZE(af9015_rc_keys_msi),
+ ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
{ 0xa3703d00,
- af9015_rc_keys_a_link, ARRAY_SIZE(af9015_rc_keys_a_link),
+ ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
{ 0x9b7dc64e,
- af9015_rc_keys_mygictv, ARRAY_SIZE(af9015_rc_keys_mygictv),
+ ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
{ }
};
@@ -836,8 +836,8 @@ static void af9015_set_remote_config(struct usb_device *udev,
} else if (udev->descriptor.idProduct ==
cpu_to_le16(USB_PID_TREKSTOR_DVBT)) {
table = &(const struct af9015_setup){ 0,
- af9015_rc_keys_trekstor,
- ARRAY_SIZE(af9015_rc_keys_trekstor),
+ ir_codes_af9015_table_trekstor,
+ ARRAY_SIZE(ir_codes_af9015_table_trekstor),
af9015_ir_table_trekstor,
ARRAY_SIZE(af9015_ir_table_trekstor)
};
@@ -1297,6 +1297,8 @@ static struct usb_device_id af9015_usb_table[] = {
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
{USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
+/* 30 */{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
{0},
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1500,7 +1502,8 @@ static struct dvb_usb_device_properties af9015_properties[] = {
"(VS-DVB-T 395U)",
.cold_ids = {&af9015_usb_table[16],
&af9015_usb_table[17],
- &af9015_usb_table[18], NULL},
+ &af9015_usb_table[18],
+ &af9015_usb_table[31], NULL},
.warm_ids = {NULL},
},
{
@@ -1569,7 +1572,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 7, /* max 9 */
+ .num_device_descs = 8, /* max 9 */
.devices = {
{
.name = "AverMedia AVerTV Volar GPS 805 (A805)",
@@ -1608,6 +1611,12 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {&af9015_usb_table[29], NULL},
.warm_ids = {NULL},
},
+ {
+ .name = "KWorld USB DVB-T Stick Mobile " \
+ "(UB383-T)",
+ .cold_ids = {&af9015_usb_table[30], NULL},
+ .warm_ids = {NULL},
+ },
}
},
};
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index ef36b1831490..63b2a4907b7e 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -123,7 +123,7 @@ enum af9015_remote {
/* LeadTek - Y04G0051 */
/* Leadtek WinFast DTV Dongle Gold */
-static struct dvb_usb_rc_key af9015_rc_keys_leadtek[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_leadtek[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -227,7 +227,7 @@ static u8 af9015_ir_table_leadtek[] = {
};
/* TwinHan AzureWave AD-TU700(704J) */
-static struct dvb_usb_rc_key af9015_rc_keys_twinhan[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_twinhan[] = {
{ 0x053f, KEY_POWER },
{ 0x0019, KEY_FAVORITES }, /* Favorite List */
{ 0x0004, KEY_TEXT }, /* Teletext */
@@ -338,7 +338,7 @@ static u8 af9015_ir_table_twinhan[] = {
};
/* A-Link DTU(m) */
-static struct dvb_usb_rc_key af9015_rc_keys_a_link[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_a_link[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -381,7 +381,7 @@ static u8 af9015_ir_table_a_link[] = {
};
/* MSI DIGIVOX mini II V3.0 */
-static struct dvb_usb_rc_key af9015_rc_keys_msi[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_msi[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -424,7 +424,7 @@ static u8 af9015_ir_table_msi[] = {
};
/* MYGICTV U718 */
-static struct dvb_usb_rc_key af9015_rc_keys_mygictv[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_mygictv[] = {
{ 0x003d, KEY_SWITCHVIDEOMODE },
/* TV / AV */
{ 0x0545, KEY_POWER },
@@ -550,7 +550,7 @@ static u8 af9015_ir_table_kworld[] = {
};
/* AverMedia Volar X */
-static struct dvb_usb_rc_key af9015_rc_keys_avermedia[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_avermedia[] = {
{ 0x053d, KEY_PROG1 }, /* SOURCE */
{ 0x0512, KEY_POWER }, /* POWER */
{ 0x051e, KEY_1 }, /* 1 */
@@ -656,7 +656,7 @@ static u8 af9015_ir_table_avermedia_ks[] = {
};
/* Digittrade DVB-T USB Stick */
-static struct dvb_usb_rc_key af9015_rc_keys_digittrade[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_digittrade[] = {
{ 0x010f, KEY_LAST }, /* RETURN */
{ 0x0517, KEY_TEXT }, /* TELETEXT */
{ 0x0108, KEY_EPG }, /* EPG */
@@ -719,7 +719,7 @@ static u8 af9015_ir_table_digittrade[] = {
};
/* TREKSTOR DVB-T USB Stick */
-static struct dvb_usb_rc_key af9015_rc_keys_trekstor[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_trekstor[] = {
{ 0x0704, KEY_AGAIN }, /* Home */
{ 0x0705, KEY_MUTE }, /* Mute */
{ 0x0706, KEY_UP }, /* Up */
@@ -782,7 +782,7 @@ static u8 af9015_ir_table_trekstor[] = {
};
/* MSI DIGIVOX mini III */
-static struct dvb_usb_rc_key af9015_rc_keys_msi_digivox_iii[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_msi_digivox_iii[] = {
{ 0x0713, KEY_POWER }, /* [red power button] */
{ 0x073b, KEY_VIDEO }, /* Source */
{ 0x073e, KEY_ZOOM }, /* Zoom */
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index bb69f3719f9a..faca1ad88a67 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -399,7 +399,7 @@ static int anysee_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
-static struct dvb_usb_rc_key anysee_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_anysee_table[] = {
{ 0x0100, KEY_0 },
{ 0x0101, KEY_1 },
{ 0x0102, KEY_2 },
@@ -518,8 +518,8 @@ static struct dvb_usb_device_properties anysee_properties = {
}
},
- .rc_key_map = anysee_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(anysee_rc_keys),
+ .rc_key_map = ir_codes_anysee_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_anysee_table),
.rc_query = anysee_rc_query,
.rc_interval = 200, /* windows driver uses 500ms */
diff --git a/drivers/media/dvb/dvb-usb/az6027.c b/drivers/media/dvb/dvb-usb/az6027.c
index d7290b2c0913..8934788bd83b 100644
--- a/drivers/media/dvb/dvb-usb/az6027.c
+++ b/drivers/media/dvb/dvb-usb/az6027.c
@@ -125,12 +125,12 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x01 },
{ STB0899_AGC1REF , 0x10 },
- { STB0899_RTC , 0x23 },
+ { STB0899_RTC , 0x23 },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xf7 },
- { STB0899_ACLC , 0x87 },
+ { STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xf1 },
@@ -183,10 +183,10 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_ECNT3M , 0x0a },
{ STB0899_ECNT3L , 0xad },
{ STB0899_FECAUTO1 , 0x06 },
- { STB0899_FECM , 0x01 },
+ { STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xb0 },
{ STB0899_VTH23 , 0x7a },
- { STB0899_VTH34 , 0x58 },
+ { STB0899_VTH34 , 0x58 },
{ STB0899_VTH56 , 0x38 },
{ STB0899_VTH67 , 0x34 },
{ STB0899_VTH78 , 0x24 },
@@ -195,7 +195,7 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x41 },
- { STB0899_TSLPL , 0x12 },
+ { STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x00 },
@@ -386,7 +386,7 @@ static int az6027_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
/* keys for the enclosed remote control */
-static struct dvb_usb_rc_key az6027_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_az6027_table[] = {
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
};
@@ -976,17 +976,14 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
i++;
} else {
- if (msg[i].addr == 0xd0) {
- /* demod 16bit addr */
- req = 0xBD;
- index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
- value = msg[i].addr + (2 << 8);
- length = msg[i].len - 2;
- len = msg[i].len - 2;
- for (j = 0; j < len; j++)
- data[j] = msg[i].buf[j + 2];
-
- }
+ /* demod 16bit addr */
+ req = 0xBD;
+ index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
+ value = msg[i].addr + (2 << 8);
+ length = msg[i].len - 2;
+ len = msg[i].len - 2;
+ for (j = 0; j < len; j++)
+ data[j] = msg[i].buf[j + 2];
az6027_usb_out_op(d, req, value, index, data, length);
}
}
@@ -1059,8 +1056,10 @@ int az6027_identify_state(struct usb_device *udev,
static struct usb_device_id az6027_usb_table[] = {
{ USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_AZ6027) },
- { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI) },
- { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI) },
+ { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V1) },
+ { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V2) },
+ { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V1) },
+ { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V2) },
{ },
};
@@ -1097,18 +1096,34 @@ static struct dvb_usb_device_properties az6027_properties = {
.power_ctrl = az6027_power_ctrl,
.read_mac_address = az6027_read_mac_addr,
*/
- .rc_key_map = az6027_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(az6027_rc_keys),
+ .rc_key_map = ir_codes_az6027_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_az6027_table),
.rc_interval = 400,
.rc_query = az6027_rc_query,
.i2c_algo = &az6027_i2c_algo,
- .num_device_descs = 1,
+ .num_device_descs = 5,
.devices = {
{
.name = "AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)",
.cold_ids = { &az6027_usb_table[0], NULL },
.warm_ids = { NULL },
+ }, {
+ .name = "TERRATEC S7",
+ .cold_ids = { &az6027_usb_table[1], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "TERRATEC S7 MKII",
+ .cold_ids = { &az6027_usb_table[2], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "Technisat SkyStar USB 2 HD CI",
+ .cold_ids = { &az6027_usb_table[3], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "Technisat SkyStar USB 2 HD CI",
+ .cold_ids = { &az6027_usb_table[4], NULL },
+ .warm_ids = { NULL },
},
{ NULL },
}
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
index e37ac4d48602..5a9c14bdc980 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
@@ -84,7 +84,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key cinergyt2_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_cinergyt2_table[] = {
{ 0x0401, KEY_POWER },
{ 0x0402, KEY_1 },
{ 0x0403, KEY_2 },
@@ -218,8 +218,8 @@ static struct dvb_usb_device_properties cinergyt2_properties = {
.power_ctrl = cinergyt2_power_ctrl,
.rc_interval = 50,
- .rc_key_map = cinergyt2_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(cinergyt2_rc_keys),
+ .rc_key_map = ir_codes_cinergyt2_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_cinergyt2_table),
.rc_query = cinergyt2_rc_query,
.generic_bulk_ctrl_endpoint = 1,
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 960376da7d59..0eb490889162 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -461,7 +461,7 @@ static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d, u32 *event,
return 0;
}
-static struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dvico_mce_table[] = {
{ 0xfe02, KEY_TV },
{ 0xfe0e, KEY_MP3 },
{ 0xfe1a, KEY_DVD },
@@ -509,7 +509,7 @@ static struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
{ 0xfe4e, KEY_POWER },
};
-static struct dvb_usb_rc_key dvico_portable_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dvico_portable_table[] = {
{ 0xfc02, KEY_SETUP }, /* Profile */
{ 0xfc43, KEY_POWER2 },
{ 0xfc06, KEY_EPG },
@@ -548,7 +548,7 @@ static struct dvb_usb_rc_key dvico_portable_rc_keys[] = {
{ 0xfc00, KEY_UNKNOWN }, /* HD */
};
-static struct dvb_usb_rc_key d680_dmb_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_d680_dmb_table[] = {
{ 0x0038, KEY_UNKNOWN }, /* TV/AV */
{ 0x080c, KEY_ZOOM },
{ 0x0800, KEY_0 },
@@ -1025,8 +1025,9 @@ static int cxusb_dualdig4_rev2_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
- dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
- &cxusb_dualdig4_rev2_config);
+ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
+ &cxusb_dualdig4_rev2_config) < 0)
+ return -ENODEV;
adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
&cxusb_dualdig4_rev2_config);
@@ -1449,8 +1450,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1500,8 +1501,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 150,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1559,8 +1560,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1609,8 +1610,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1658,8 +1659,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_bluebird2_rc_query,
.num_device_descs = 1,
@@ -1706,8 +1707,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_bluebird2_rc_query,
.num_device_descs = 1,
@@ -1756,8 +1757,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.num_device_descs = 1,
@@ -1847,8 +1848,8 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_rc_query,
.num_device_descs = 1,
@@ -1895,8 +1896,8 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = d680_dmb_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(d680_dmb_rc_keys),
+ .rc_key_map = ir_codes_d680_dmb_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_d680_dmb_table),
.rc_query = cxusb_d680_dmb_rc_query,
.num_device_descs = 1,
@@ -1944,8 +1945,8 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = d680_dmb_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(d680_dmb_rc_keys),
+ .rc_key_map = ir_codes_d680_dmb_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_d680_dmb_table),
.rc_query = cxusb_d680_dmb_rc_query,
.num_device_descs = 1,
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 34eab05afc6c..800800a9649e 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -562,7 +562,7 @@ static int dib0700_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
-static struct dvb_usb_rc_key dib0700_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dib0700_table[] = {
/* Key codes for the tiny Pinnacle remote*/
{ 0x0700, KEY_MUTE },
{ 0x0701, KEY_MENU }, /* Pinnacle logo */
@@ -794,6 +794,43 @@ static struct dvb_usb_rc_key dib0700_rc_keys[] = {
{ 0x7a13, KEY_VOLUMEDOWN },
{ 0x7a40, KEY_POWER },
{ 0x7a41, KEY_MUTE },
+
+ /* Key codes for the Elgato EyeTV Diversity silver remote,
+ set dvb_usb_dib0700_ir_proto=0 */
+ { 0x4501, KEY_POWER },
+ { 0x4502, KEY_MUTE },
+ { 0x4503, KEY_1 },
+ { 0x4504, KEY_2 },
+ { 0x4505, KEY_3 },
+ { 0x4506, KEY_4 },
+ { 0x4507, KEY_5 },
+ { 0x4508, KEY_6 },
+ { 0x4509, KEY_7 },
+ { 0x450a, KEY_8 },
+ { 0x450b, KEY_9 },
+ { 0x450c, KEY_LAST },
+ { 0x450d, KEY_0 },
+ { 0x450e, KEY_ENTER },
+ { 0x450f, KEY_RED },
+ { 0x4510, KEY_CHANNELUP },
+ { 0x4511, KEY_GREEN },
+ { 0x4512, KEY_VOLUMEDOWN },
+ { 0x4513, KEY_OK },
+ { 0x4514, KEY_VOLUMEUP },
+ { 0x4515, KEY_YELLOW },
+ { 0x4516, KEY_CHANNELDOWN },
+ { 0x4517, KEY_BLUE },
+ { 0x4518, KEY_LEFT }, /* Skip backwards */
+ { 0x4519, KEY_PLAYPAUSE },
+ { 0x451a, KEY_RIGHT }, /* Skip forward */
+ { 0x451b, KEY_REWIND },
+ { 0x451c, KEY_L }, /* Live */
+ { 0x451d, KEY_FASTFORWARD },
+ { 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */
+ { 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */
+ { 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */
+ { 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */
+ { 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */
};
/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */
@@ -2049,6 +2086,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK8096GP) },
+ { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DIVERSITY) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -2131,8 +2169,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2160,8 +2198,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2214,8 +2252,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2251,8 +2289,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2321,8 +2359,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2360,8 +2398,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2393,7 +2431,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
}
},
- .num_device_descs = 6,
+ .num_device_descs = 7,
.devices = {
{ "DiBcom STK7070PD reference design",
{ &dib0700_usb_id_table[17], NULL },
@@ -2419,11 +2457,15 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ "Sony PlayTV",
{ &dib0700_usb_id_table[44], NULL },
{ NULL },
- }
+ },
+ { "Elgato EyeTV Diversity",
+ { &dib0700_usb_id_table[68], NULL },
+ { NULL },
+ },
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2484,8 +2526,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2513,8 +2555,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2574,8 +2616,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2612,8 +2654,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2656,8 +2698,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2687,8 +2729,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
},
};
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 9143b5631e88..bc08bc0b723c 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -327,7 +327,7 @@ EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
/*
* common remote control stuff
*/
-struct dvb_usb_rc_key dibusb_rc_keys[] = {
+struct dvb_usb_rc_key ir_codes_dibusb_table[] = {
/* Key codes for the little Artec T1/Twinhan/HAMA/ remote. */
{ 0x0016, KEY_POWER },
{ 0x0010, KEY_MUTE },
@@ -456,7 +456,7 @@ struct dvb_usb_rc_key dibusb_rc_keys[] = {
{ 0x804e, KEY_ENTER },
{ 0x804f, KEY_VOLUMEDOWN },
};
-EXPORT_SYMBOL(dibusb_rc_keys);
+EXPORT_SYMBOL(ir_codes_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 5c0126dc1ff9..eb2e6f050fbe 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -212,7 +212,7 @@ static struct dvb_usb_device_properties dibusb1_1_properties = {
.power_ctrl = dibusb_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -296,7 +296,7 @@ static struct dvb_usb_device_properties dibusb1_1_an2235_properties = {
.power_ctrl = dibusb_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -360,7 +360,7 @@ static struct dvb_usb_device_properties dibusb2_0b_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -417,7 +417,7 @@ static struct dvb_usb_device_properties artec_t1_usb2_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mc.c b/drivers/media/dvb/dvb-usb/dibusb-mc.c
index a05b9f875663..588308eb6638 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mc.c
@@ -82,7 +82,7 @@ static struct dvb_usb_device_properties dibusb_mc_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* FIXME */
.rc_query = dibusb_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/dibusb.h b/drivers/media/dvb/dvb-usb/dibusb.h
index 8e847aa73ba1..3d50ac59088f 100644
--- a/drivers/media/dvb/dvb-usb/dibusb.h
+++ b/drivers/media/dvb/dvb-usb/dibusb.h
@@ -124,7 +124,7 @@ extern int dibusb2_0_power_ctrl(struct dvb_usb_device *, int);
#define DEFAULT_RC_INTERVAL 150
//#define DEFAULT_RC_INTERVAL 100000
-extern struct dvb_usb_rc_key dibusb_rc_keys[];
+extern struct dvb_usb_rc_key ir_codes_dibusb_table[];
extern int dibusb_rc_query(struct dvb_usb_device *, u32 *, int *);
extern int dibusb_read_eeprom_byte(struct dvb_usb_device *, u8, u8 *);
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 955147d00756..e826077094fa 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -161,7 +161,7 @@ static int digitv_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key digitv_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_digitv_table[] = {
{ 0x5f55, KEY_0 },
{ 0x6f55, KEY_1 },
{ 0x9f55, KEY_2 },
@@ -311,8 +311,8 @@ static struct dvb_usb_device_properties digitv_properties = {
.identify_state = digitv_identify_state,
.rc_interval = 1000,
- .rc_key_map = digitv_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(digitv_rc_keys),
+ .rc_key_map = ir_codes_digitv_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_digitv_table),
.rc_query = digitv_rc_query,
.i2c_algo = &digitv_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index a1b12b01cbe4..f57e59044d4d 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -57,7 +57,7 @@ static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
/* remote control */
/* key list for the tiny remote control (Yakumo, don't know about the others) */
-static struct dvb_usb_rc_key dtt200u_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dtt200u_table[] = {
{ 0x8001, KEY_MUTE },
{ 0x8002, KEY_CHANNELDOWN },
{ 0x8003, KEY_VOLUMEDOWN },
@@ -162,8 +162,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -207,8 +207,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -252,8 +252,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -297,8 +297,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index ae8b57acfe05..085c4e457e0e 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -124,9 +124,11 @@
#define USB_PID_KWORLD_395U 0xe396
#define USB_PID_KWORLD_395U_2 0xe39b
#define USB_PID_KWORLD_395U_3 0xe395
+#define USB_PID_KWORLD_395U_4 0xe39a
#define USB_PID_KWORLD_MC810 0xc810
#define USB_PID_KWORLD_PC160_2T 0xc160
#define USB_PID_KWORLD_PC160_T 0xc161
+#define USB_PID_KWORLD_UB383_T 0xe383
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
@@ -288,6 +290,7 @@
#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
#define USB_PID_SONY_PLAYTV 0x0003
#define USB_PID_MYGICA_D689 0xd811
+#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
@@ -296,6 +299,8 @@
#define USB_PID_TVWAY_PLUS 0x0002
#define USB_PID_SVEON_STV20 0xe39d
#define USB_PID_AZUREWAVE_AZ6027 0x3275
-#define USB_PID_TERRATEC_DVBS2CI 0x3275
-#define USB_PID_TECHNISAT_USB2_HDCI 0x0002
+#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
+#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
+#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
+#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
#endif
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index 6fe71c6745eb..bb46ba6a3573 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -42,6 +42,8 @@ int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
msleep(delay_ms);
ret = usb_bulk_msg(d->udev,usb_rcvbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint_response ?
+ d->props.generic_bulk_ctrl_endpoint_response :
d->props.generic_bulk_ctrl_endpoint),rbuf,rlen,&actlen,
2000);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 0143aef19ecd..4a9f676087bf 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -198,6 +198,12 @@ struct dvb_usb_adapter_properties {
* is non-zero, one can use dvb_usb_generic_rw and dvb_usb_generic_write-
* helper functions.
*
+ * @generic_bulk_ctrl_endpoint_response: some DVB USB devices use a separate
+ * endpoint for responses to control messages sent with bulk transfers via
+ * the generic_bulk_ctrl_endpoint. When this is non-zero, this will be used
+ * instead of the generic_bulk_ctrl_endpoint when reading usb responses in
+ * the dvb_usb_generic_rw helper function.
+ *
* @num_device_descs: number of struct dvb_usb_device_description in @devices
* @devices: array of struct dvb_usb_device_description compatibles with these
* properties.
@@ -239,6 +245,7 @@ struct dvb_usb_device_properties {
struct i2c_algorithm *i2c_algo;
int generic_bulk_ctrl_endpoint;
+ int generic_bulk_ctrl_endpoint_response;
int num_device_descs;
struct dvb_usb_device_description devices[12];
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index accc65509b07..e8fb85380672 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -73,7 +73,7 @@
"Please see linux/Documentation/dvb/ for more details " \
"on firmware-problems."
-struct dvb_usb_rc_keys_table {
+struct ir_codes_dvb_usb_table_table {
struct dvb_usb_rc_key *rc_keys;
int rc_keys_size;
};
@@ -948,7 +948,7 @@ static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key dw210x_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dw210x_table[] = {
{ 0xf80a, KEY_Q }, /*power*/
{ 0xf80c, KEY_M }, /*mute*/
{ 0xf811, KEY_1 },
@@ -982,7 +982,7 @@ static struct dvb_usb_rc_key dw210x_rc_keys[] = {
{ 0xf81b, KEY_B }, /*recall*/
};
-static struct dvb_usb_rc_key tevii_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_tevii_table[] = {
{ 0xf80a, KEY_POWER },
{ 0xf80c, KEY_MUTE },
{ 0xf811, KEY_1 },
@@ -1032,7 +1032,7 @@ static struct dvb_usb_rc_key tevii_rc_keys[] = {
{ 0xf858, KEY_SWITCHVIDEOMODE },
};
-static struct dvb_usb_rc_key tbs_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_tbs_table[] = {
{ 0xf884, KEY_POWER },
{ 0xf894, KEY_MUTE },
{ 0xf887, KEY_1 },
@@ -1067,10 +1067,10 @@ static struct dvb_usb_rc_key tbs_rc_keys[] = {
{ 0xf89b, KEY_MODE }
};
-static struct dvb_usb_rc_keys_table keys_tables[] = {
- { dw210x_rc_keys, ARRAY_SIZE(dw210x_rc_keys) },
- { tevii_rc_keys, ARRAY_SIZE(tevii_rc_keys) },
- { tbs_rc_keys, ARRAY_SIZE(tbs_rc_keys) },
+static struct ir_codes_dvb_usb_table_table keys_tables[] = {
+ { ir_codes_dw210x_table, ARRAY_SIZE(ir_codes_dw210x_table) },
+ { ir_codes_tevii_table, ARRAY_SIZE(ir_codes_tevii_table) },
+ { ir_codes_tbs_table, ARRAY_SIZE(ir_codes_tbs_table) },
};
static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
@@ -1185,14 +1185,14 @@ static int dw2102_load_firmware(struct usb_device *dev,
/* init registers */
switch (dev->descriptor.idProduct) {
case USB_PID_PROF_1100:
- s6x0_properties.rc_key_map = tbs_rc_keys;
+ s6x0_properties.rc_key_map = ir_codes_tbs_table;
s6x0_properties.rc_key_map_size =
- ARRAY_SIZE(tbs_rc_keys);
+ ARRAY_SIZE(ir_codes_tbs_table);
break;
case USB_PID_TEVII_S650:
- dw2104_properties.rc_key_map = tevii_rc_keys;
+ dw2104_properties.rc_key_map = ir_codes_tevii_table;
dw2104_properties.rc_key_map_size =
- ARRAY_SIZE(tevii_rc_keys);
+ ARRAY_SIZE(ir_codes_tevii_table);
case USB_PID_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
@@ -1255,8 +1255,8 @@ static struct dvb_usb_device_properties dw2102_properties = {
.no_reconnect = 1,
.i2c_algo = &dw2102_serit_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1306,8 +1306,8 @@ static struct dvb_usb_device_properties dw2104_properties = {
.no_reconnect = 1,
.i2c_algo = &dw2104_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1353,8 +1353,8 @@ static struct dvb_usb_device_properties dw3101_properties = {
.no_reconnect = 1,
.i2c_algo = &dw3101_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1396,8 +1396,8 @@ static struct dvb_usb_device_properties s6x0_properties = {
.no_reconnect = 1,
.i2c_algo = &s6x0_i2c_algo,
- .rc_key_map = tevii_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(tevii_rc_keys),
+ .rc_key_map = ir_codes_tevii_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_tevii_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1459,8 +1459,8 @@ static int dw2102_probe(struct usb_interface *intf,
/* fill only different fields */
p7500->firmware = "dvb-usb-p7500.fw";
p7500->devices[0] = d7500;
- p7500->rc_key_map = tbs_rc_keys;
- p7500->rc_key_map_size = ARRAY_SIZE(tbs_rc_keys);
+ p7500->rc_key_map = ir_codes_tbs_table;
+ p7500->rc_key_map_size = ARRAY_SIZE(ir_codes_tbs_table);
p7500->adapter->frontend_attach = prof_7500_frontend_attach;
if (0 == dvb_usb_device_init(intf, &dw2102_properties,
diff --git a/drivers/media/dvb/dvb-usb/friio-fe.c b/drivers/media/dvb/dvb-usb/friio-fe.c
index d14bd227b502..93c21ddd0b77 100644
--- a/drivers/media/dvb/dvb-usb/friio-fe.c
+++ b/drivers/media/dvb/dvb-usb/friio-fe.c
@@ -300,7 +300,7 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe,
struct dvb_frontend_parameters *p)
{
/**
- * NOTE: ignore all the paramters except frequency.
+ * NOTE: ignore all the parameters except frequency.
* others should be fixed to the proper value for ISDB-T,
* but don't check here.
*/
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index afb444db43ad..45106ac49674 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -105,6 +105,10 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
ptr = fw->data;
buf = kmalloc(64, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_rel_fw;
+ }
while (ptr[0] != 0xff) {
u16 buflen = ptr[0] + 4;
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index 737ffa36ac9c..c211fef45fc3 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -589,7 +589,7 @@ static struct m920x_inits pinnacle310e_init[] = {
};
/* ir keymaps */
-static struct dvb_usb_rc_key megasky_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_megasky_table [] = {
{ 0x0012, KEY_POWER },
{ 0x001e, KEY_CYCLEWINDOWS }, /* min/max */
{ 0x0002, KEY_CHANNELUP },
@@ -608,7 +608,7 @@ static struct dvb_usb_rc_key megasky_rc_keys [] = {
{ 0x000e, KEY_COFFEE }, /* "MTS" */
};
-static struct dvb_usb_rc_key tvwalkertwin_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_tvwalkertwin_table [] = {
{ 0x0001, KEY_ZOOM }, /* Full Screen */
{ 0x0002, KEY_CAMERA }, /* snapshot */
{ 0x0003, KEY_MUTE },
@@ -628,7 +628,7 @@ static struct dvb_usb_rc_key tvwalkertwin_rc_keys [] = {
{ 0x001e, KEY_VOLUMEUP },
};
-static struct dvb_usb_rc_key pinnacle310e_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_pinnacle310e_table[] = {
{ 0x16, KEY_POWER },
{ 0x17, KEY_FAVORITES },
{ 0x0f, KEY_TEXT },
@@ -785,8 +785,8 @@ static struct dvb_usb_device_properties megasky_properties = {
.download_firmware = m920x_firmware_download,
.rc_interval = 100,
- .rc_key_map = megasky_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(megasky_rc_keys),
+ .rc_key_map = ir_codes_megasky_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_megasky_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
@@ -886,8 +886,8 @@ static struct dvb_usb_device_properties tvwalkertwin_properties = {
.download_firmware = m920x_firmware_download,
.rc_interval = 100,
- .rc_key_map = tvwalkertwin_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(tvwalkertwin_rc_keys),
+ .rc_key_map = ir_codes_tvwalkertwin_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_tvwalkertwin_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
@@ -993,8 +993,8 @@ static struct dvb_usb_device_properties pinnacle_pctv310e_properties = {
.download_firmware = NULL,
.rc_interval = 100,
- .rc_key_map = pinnacle310e_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(pinnacle310e_rc_keys),
+ .rc_key_map = ir_codes_pinnacle310e_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_pinnacle310e_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index b41d66ef8325..d195a587cc65 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -21,7 +21,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define deb_ee(args...) dprintk(debug,0x02,args)
/* Hauppauge NOVA-T USB2 keys */
-static struct dvb_usb_rc_key haupp_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_haupp_table [] = {
{ 0x1e00, KEY_0 },
{ 0x1e01, KEY_1 },
{ 0x1e02, KEY_2 },
@@ -91,14 +91,14 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle);
- for (i = 0; i < ARRAY_SIZE(haupp_rc_keys); i++) {
- if (rc5_data(&haupp_rc_keys[i]) == data &&
- rc5_custom(&haupp_rc_keys[i]) == custom) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_haupp_table); i++) {
+ if (rc5_data(&ir_codes_haupp_table[i]) == data &&
+ rc5_custom(&ir_codes_haupp_table[i]) == custom) {
- deb_rc("c: %x, d: %x\n", rc5_data(&haupp_rc_keys[i]),
- rc5_custom(&haupp_rc_keys[i]));
+ deb_rc("c: %x, d: %x\n", rc5_data(&ir_codes_haupp_table[i]),
+ rc5_custom(&ir_codes_haupp_table[i]));
- *event = haupp_rc_keys[i].event;
+ *event = ir_codes_haupp_table[i].event;
*state = REMOTE_KEY_PRESSED;
if (st->old_toggle == toggle) {
if (st->last_repeat_count++ < 2)
@@ -196,8 +196,8 @@ static struct dvb_usb_device_properties nova_t_properties = {
.read_mac_address = nova_t_read_mac_address,
.rc_interval = 100,
- .rc_key_map = haupp_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(haupp_rc_keys),
+ .rc_key_map = ir_codes_haupp_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_haupp_table),
.rc_query = nova_t_rc_query,
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 830557696ae6..dfb81ff1d9a7 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -35,7 +35,7 @@
struct opera1_state {
u32 last_key_pressed;
};
-struct opera_rc_keys {
+struct ir_codes_opera_table {
u32 keycode;
u32 event;
};
@@ -331,7 +331,7 @@ static int opera1_pid_filter_control(struct dvb_usb_adapter *adap, int onoff)
return 0;
}
-static struct dvb_usb_rc_key opera1_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_opera1_table[] = {
{0x5fa0, KEY_1},
{0x51af, KEY_2},
{0x5da2, KEY_3},
@@ -404,12 +404,12 @@ static int opera1_rc_query(struct dvb_usb_device *dev, u32 * event, int *state)
send_key = (send_key & 0xffff) | 0x0100;
- for (i = 0; i < ARRAY_SIZE(opera1_rc_keys); i++) {
- if (rc5_scan(&opera1_rc_keys[i]) == (send_key & 0xffff)) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_opera1_table); i++) {
+ if (rc5_scan(&ir_codes_opera1_table[i]) == (send_key & 0xffff)) {
*state = REMOTE_KEY_PRESSED;
- *event = opera1_rc_keys[i].event;
+ *event = ir_codes_opera1_table[i].event;
opst->last_key_pressed =
- opera1_rc_keys[i].event;
+ ir_codes_opera1_table[i].event;
break;
}
opst->last_key_pressed = 0;
@@ -498,8 +498,8 @@ static struct dvb_usb_device_properties opera1_properties = {
.power_ctrl = opera1_power_ctrl,
.i2c_algo = &opera1_i2c_algo,
- .rc_key_map = opera1_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(opera1_rc_keys),
+ .rc_key_map = ir_codes_opera1_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_opera1_table),
.rc_interval = 200,
.rc_query = opera1_rc_query,
.read_mac_address = opera1_read_mac_address,
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index ef4e37d9c5ff..4d332451653b 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -174,7 +174,7 @@ static int vp702x_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
/* keys for the enclosed remote control */
-static struct dvb_usb_rc_key vp702x_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_vp702x_table[] = {
{ 0x0001, KEY_1 },
{ 0x0002, KEY_2 },
};
@@ -197,10 +197,10 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
- for (i = 0; i < ARRAY_SIZE(vp702x_rc_keys); i++)
- if (rc5_custom(&vp702x_rc_keys[i]) == key[1]) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_vp702x_table); i++)
+ if (rc5_custom(&ir_codes_vp702x_table[i]) == key[1]) {
*state = REMOTE_KEY_PRESSED;
- *event = vp702x_rc_keys[i].event;
+ *event = ir_codes_vp702x_table[i].event;
break;
}
return 0;
@@ -283,8 +283,8 @@ static struct dvb_usb_device_properties vp702x_properties = {
},
.read_mac_address = vp702x_read_mac_addr,
- .rc_key_map = vp702x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(vp702x_rc_keys),
+ .rc_key_map = ir_codes_vp702x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_vp702x_table),
.rc_interval = 400,
.rc_query = vp702x_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index a59faa27912a..036893fa4480 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -99,7 +99,7 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
/* The keymapping struct. Somehow this should be loaded to the driver, but
* currently it is hardcoded. */
-static struct dvb_usb_rc_key vp7045_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_vp7045_table[] = {
{ 0x0016, KEY_POWER },
{ 0x0010, KEY_MUTE },
{ 0x0003, KEY_1 },
@@ -165,10 +165,10 @@ static int vp7045_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
- for (i = 0; i < ARRAY_SIZE(vp7045_rc_keys); i++)
- if (rc5_data(&vp7045_rc_keys[i]) == key) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_vp7045_table); i++)
+ if (rc5_data(&ir_codes_vp7045_table[i]) == key) {
*state = REMOTE_KEY_PRESSED;
- *event = vp7045_rc_keys[i].event;
+ *event = ir_codes_vp7045_table[i].event;
break;
}
return 0;
@@ -260,8 +260,8 @@ static struct dvb_usb_device_properties vp7045_properties = {
.read_mac_address = vp7045_read_mac_addr,
.rc_interval = 400,
- .rc_key_map = vp7045_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(vp7045_rc_keys),
+ .rc_key_map = ir_codes_vp7045_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_vp7045_table),
.rc_query = vp7045_rc_query,
.num_device_descs = 2,
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 1b31bebc27d6..28294af752db 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -1096,7 +1096,7 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
c->operand[15] = msg[1]; /* Program number */
c->operand[16] = msg[2];
- c->operand[17] = 0x01; /* Version number=0 + current/next=1 */
+ c->operand[17] = msg[3]; /* Version number and current/next */
c->operand[18] = 0x00; /* Section number=0 */
c->operand[19] = 0x00; /* Last section number=0 */
c->operand[20] = 0x1f; /* PCR_PID=1FFF */
diff --git a/drivers/media/dvb/frontends/atbm8830_priv.h b/drivers/media/dvb/frontends/atbm8830_priv.h
index ce960f76092a..d460058d497e 100644
--- a/drivers/media/dvb/frontends/atbm8830_priv.h
+++ b/drivers/media/dvb/frontends/atbm8830_priv.h
@@ -18,7 +18,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
+
#ifndef __ATBM8830_PRIV_H
#define __ATBM8830_PRIV_H
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index 24268ef2753d..68dba3a4b4da 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -664,6 +664,13 @@ static int au8522_reset(struct v4l2_subdev *sd, u32 val)
{
struct au8522_state *state = to_state(sd);
+ state->operational_mode = AU8522_ANALOG_MODE;
+
+ /* Clear out any state associated with the digital side of the
+ chip, so that when it gets powered back up it won't think
+ that it is already tuned */
+ state->current_frequency = 0;
+
au8522_writereg(state, 0xa4, 1 << 5);
return 0;
diff --git a/drivers/media/dvb/frontends/au8522_dig.c b/drivers/media/dvb/frontends/au8522_dig.c
index a1fed0fa8ed4..65f6a36dfb21 100644
--- a/drivers/media/dvb/frontends/au8522_dig.c
+++ b/drivers/media/dvb/frontends/au8522_dig.c
@@ -84,6 +84,14 @@ static int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
dprintk("%s(%d)\n", __func__, enable);
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're being asked to manage the gate even though we're
+ not in digital mode. This can occur if we get switched
+ over to analog mode before the dvb_frontend kernel thread
+ has completely shutdown */
+ return 0;
+ }
+
if (enable)
return au8522_writereg(state, 0x106, 1);
else
@@ -608,6 +616,13 @@ int au8522_init(struct dvb_frontend *fe)
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
+ state->operational_mode = AU8522_DIGITAL_MODE;
+
+ /* Clear out any state associated with the digital side of the
+ chip, so that when it gets powered back up it won't think
+ that it is already tuned */
+ state->current_frequency = 0;
+
au8522_writereg(state, 0xa4, 1 << 5);
au8522_i2c_gate_ctrl(fe, 1);
@@ -704,6 +719,15 @@ int au8522_sleep(struct dvb_frontend *fe)
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
+ /* Only power down if the digital side is currently using the chip */
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're not in one of the expected power modes, which means
+ that the DVB thread is probably telling us to go to sleep
+ even though the analog frontend has already started using
+ the chip. So ignore the request */
+ return 0;
+ }
+
/* turn off led */
au8522_led_ctrl(state, 0);
@@ -932,6 +956,8 @@ struct dvb_frontend *au8522_attach(const struct au8522_config *config,
/* setup the state */
state->config = config;
state->i2c = i2c;
+ state->operational_mode = AU8522_DIGITAL_MODE;
+
/* create dvb_frontend */
memcpy(&state->frontend.ops, &au8522_ops,
sizeof(struct dvb_frontend_ops));
diff --git a/drivers/media/dvb/frontends/au8522_priv.h b/drivers/media/dvb/frontends/au8522_priv.h
index c74c4e72fe91..609cf04bc312 100644
--- a/drivers/media/dvb/frontends/au8522_priv.h
+++ b/drivers/media/dvb/frontends/au8522_priv.h
@@ -34,10 +34,15 @@
#include "au8522.h"
#include "tuner-i2c.h"
+#define AU8522_ANALOG_MODE 0
+#define AU8522_DIGITAL_MODE 1
+
struct au8522_state {
struct i2c_client *c;
struct i2c_adapter *i2c;
+ u8 operational_mode;
+
/* Used for sharing of the state between analog and digital mode */
struct tuner_i2c_props i2c_props;
struct list_head hybrid_tuner_instance_list;
diff --git a/drivers/media/dvb/frontends/dib3000mc.c b/drivers/media/dvb/frontends/dib3000mc.c
index 40a099810279..afad252abf41 100644
--- a/drivers/media/dvb/frontends/dib3000mc.c
+++ b/drivers/media/dvb/frontends/dib3000mc.c
@@ -814,42 +814,51 @@ EXPORT_SYMBOL(dib3000mc_set_config);
int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib3000mc_config cfg[])
{
- struct dib3000mc_state st = { .i2c_adap = i2c };
+ struct dib3000mc_state *dmcst;
int k;
u8 new_addr;
static u8 DIB3000MC_I2C_ADDRESS[] = {20,22,24,26};
+ dmcst = kzalloc(sizeof(struct dib3000mc_state), GFP_KERNEL);
+ if (dmcst == NULL)
+ return -ENODEV;
+
+ dmcst->i2c_adap = i2c;
+
for (k = no_of_demods-1; k >= 0; k--) {
- st.cfg = &cfg[k];
+ dmcst->cfg = &cfg[k];
/* designated i2c address */
new_addr = DIB3000MC_I2C_ADDRESS[k];
- st.i2c_addr = new_addr;
- if (dib3000mc_identify(&st) != 0) {
- st.i2c_addr = default_addr;
- if (dib3000mc_identify(&st) != 0) {
+ dmcst->i2c_addr = new_addr;
+ if (dib3000mc_identify(dmcst) != 0) {
+ dmcst->i2c_addr = default_addr;
+ if (dib3000mc_identify(dmcst) != 0) {
dprintk("-E- DiB3000P/MC #%d: not identified\n", k);
+ kfree(dmcst);
return -ENODEV;
}
}
- dib3000mc_set_output_mode(&st, OUTMODE_MPEG2_PAR_CONT_CLK);
+ dib3000mc_set_output_mode(dmcst, OUTMODE_MPEG2_PAR_CONT_CLK);
// set new i2c address and force divstr (Bit 1) to value 0 (Bit 0)
- dib3000mc_write_word(&st, 1024, (new_addr << 3) | 0x1);
- st.i2c_addr = new_addr;
+ dib3000mc_write_word(dmcst, 1024, (new_addr << 3) | 0x1);
+ dmcst->i2c_addr = new_addr;
}
for (k = 0; k < no_of_demods; k++) {
- st.cfg = &cfg[k];
- st.i2c_addr = DIB3000MC_I2C_ADDRESS[k];
+ dmcst->cfg = &cfg[k];
+ dmcst->i2c_addr = DIB3000MC_I2C_ADDRESS[k];
- dib3000mc_write_word(&st, 1024, st.i2c_addr << 3);
+ dib3000mc_write_word(dmcst, 1024, dmcst->i2c_addr << 3);
/* turn off data output */
- dib3000mc_set_output_mode(&st, OUTMODE_HIGH_Z);
+ dib3000mc_set_output_mode(dmcst, OUTMODE_HIGH_Z);
}
+
+ kfree(dmcst);
return 0;
}
EXPORT_SYMBOL(dib3000mc_i2c_enumeration);
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 85468a45c344..2e28b973dfd3 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -1324,46 +1324,54 @@ EXPORT_SYMBOL(dib7000p_pid_filter);
int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000p_config cfg[])
{
- struct dib7000p_state st = { .i2c_adap = i2c };
+ struct dib7000p_state *dpst;
int k = 0;
u8 new_addr = 0;
+ dpst = kzalloc(sizeof(struct dib7000p_state), GFP_KERNEL);
+ if (!dpst)
+ return -ENOMEM;
+
+ dpst->i2c_adap = i2c;
+
for (k = no_of_demods-1; k >= 0; k--) {
- st.cfg = cfg[k];
+ dpst->cfg = cfg[k];
/* designated i2c address */
new_addr = (0x40 + k) << 1;
- st.i2c_addr = new_addr;
- dib7000p_write_word(&st, 1287, 0x0003); /* sram lead in, rdy */
- if (dib7000p_identify(&st) != 0) {
- st.i2c_addr = default_addr;
- dib7000p_write_word(&st, 1287, 0x0003); /* sram lead in, rdy */
- if (dib7000p_identify(&st) != 0) {
+ dpst->i2c_addr = new_addr;
+ dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
+ if (dib7000p_identify(dpst) != 0) {
+ dpst->i2c_addr = default_addr;
+ dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
+ if (dib7000p_identify(dpst) != 0) {
dprintk("DiB7000P #%d: not identified\n", k);
+ kfree(dpst);
return -EIO;
}
}
/* start diversity to pull_down div_str - just for i2c-enumeration */
- dib7000p_set_output_mode(&st, OUTMODE_DIVERSITY);
+ dib7000p_set_output_mode(dpst, OUTMODE_DIVERSITY);
/* set new i2c address and force divstart */
- dib7000p_write_word(&st, 1285, (new_addr << 2) | 0x2);
+ dib7000p_write_word(dpst, 1285, (new_addr << 2) | 0x2);
dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
- st.cfg = cfg[k];
- st.i2c_addr = (0x40 + k) << 1;
+ dpst->cfg = cfg[k];
+ dpst->i2c_addr = (0x40 + k) << 1;
// unforce divstr
- dib7000p_write_word(&st, 1285, st.i2c_addr << 2);
+ dib7000p_write_word(dpst, 1285, dpst->i2c_addr << 2);
/* deactivate div - it was just for i2c-enumeration */
- dib7000p_set_output_mode(&st, OUTMODE_HIGH_Z);
+ dib7000p_set_output_mode(dpst, OUTMODE_HIGH_Z);
}
+ kfree(dpst);
return 0;
}
EXPORT_SYMBOL(dib7000p_i2c_enumeration);
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index b1ee20799639..e0a9ded11df4 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -109,6 +109,7 @@ static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
static inline s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
}
#endif
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index cff3535566fe..78001e8bcdb7 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -719,7 +719,7 @@ static int ds3000_read_snr(struct dvb_frontend *fe, u16 *snr)
(ds3000_readreg(state, 0x8d) << 4);
dvbs2_signal_reading = ds3000_readreg(state, 0x8e);
tmp = dvbs2_signal_reading * dvbs2_signal_reading >> 1;
- if (dvbs2_signal_reading == 0) {
+ if (tmp == 0) {
*snr = 0x0000;
return 0;
}
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 01f8f1f802fd..4f5e7d3a0e61 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -1583,7 +1583,7 @@ static enum dvbfe_search stv0900_search(struct dvb_frontend *fe,
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct stv0900_search_params p_search;
- struct stv0900_signal_info p_result;
+ struct stv0900_signal_info p_result = intp->result[demod];
enum fe_stv0900_error error = STV0900_NO_ERROR;
@@ -1842,6 +1842,19 @@ static void stv0900_release(struct dvb_frontend *fe)
kfree(state);
}
+static int stv0900_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct stv0900_state *state = fe->demodulator_priv;
+ struct stv0900_internal *intp = state->internal;
+ enum fe_stv0900_demod_num demod = state->demod;
+ struct stv0900_signal_info p_result = intp->result[demod];
+
+ p->frequency = p_result.locked ? p_result.frequency : 0;
+ p->u.qpsk.symbol_rate = p_result.locked ? p_result.symbol_rate : 0;
+ return 0;
+}
+
static struct dvb_frontend_ops stv0900_ops = {
.info = {
@@ -1862,6 +1875,7 @@ static struct dvb_frontend_ops stv0900_ops = {
},
.release = stv0900_release,
.init = stv0900_init,
+ .get_frontend = stv0900_get_frontend,
.get_frontend_algo = stv0900_frontend_algo,
.i2c_gate_ctrl = stv0900_i2c_gate_ctrl,
.diseqc_send_master_cmd = stv0900_send_master_cmd,
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 96972804f4ad..425e7a43ae19 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -754,11 +754,19 @@ static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 d
return stv090x_write_regs(state, reg, &data, 1);
}
-static int stv090x_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
{
- struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
+ /*
+ * NOTE! A lock is used as a FSM to control the state in which
+ * access is serialized between two tuners on the same demod.
+ * This has nothing to do with a lock to protect a critical section
+ * which may in some other cases be confused with protecting I/O
+ * access to the demodulator gate.
+ * In case of any error, the lock is unlocked and exit within the
+ * relevant operations themselves.
+ */
if (enable)
mutex_lock(&state->internal->tuner_lock);
@@ -1778,7 +1786,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
freq -= cur_step * car_step;
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
@@ -1791,12 +1799,12 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
@@ -1809,7 +1817,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
}
@@ -1822,7 +1830,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
return srate_coarse;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -2167,7 +2175,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
freq -= cur_step * car_step;
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
@@ -2180,12 +2188,12 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
@@ -2198,7 +2206,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
@@ -2222,7 +2230,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
return lock;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -2591,7 +2599,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
}
state->delsys = stv090x_get_std(state);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
@@ -2599,7 +2607,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
offst_freq = stv090x_get_car_freq(state, state->internal->mclk) / 1000;
@@ -2619,7 +2627,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000)) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
@@ -2627,7 +2635,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
@@ -2646,7 +2654,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
return STV090x_OUTOFRANGE;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3000,7 +3008,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
if (state->algo != STV090x_WARM_SEARCH) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_bandwidth) {
@@ -3008,7 +3016,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
}
@@ -3059,7 +3067,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
return 0;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3235,7 +3243,7 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
}
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_bbgain) {
@@ -3256,17 +3264,17 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
if (state->config->tuner_get_status) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status(fe, &reg) < 0)
goto err_gateoff;
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (reg)
@@ -3400,7 +3408,7 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
return signal_state;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3839,6 +3847,17 @@ static int stv090x_sleep(struct dvb_frontend *fe)
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_sleep) {
+ if (state->config->tuner_sleep(fe) < 0)
+ goto err_gateoff;
+ }
+
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
+ goto err;
+
dprintk(FE_DEBUG, 1, "Set %s to sleep",
state->device == STV0900 ? "STV0900" : "STV0903");
@@ -3853,6 +3872,9 @@ static int stv090x_sleep(struct dvb_frontend *fe)
goto err;
return 0;
+
+err_gateoff:
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -4311,6 +4333,20 @@ static int stv090x_init(struct dvb_frontend *fe)
u32 reg;
if (state->internal->mclk == 0) {
+ /* call tuner init to configure the tuner's clock output
+ divider directly before setting up the master clock of
+ the stv090x. */
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
+ goto err;
+
+ if (config->tuner_init) {
+ if (config->tuner_init(fe) < 0)
+ goto err_gateoff;
+ }
+
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
+ goto err;
+
stv090x_set_mclk(state, 135000000, config->xtal); /* 135 Mhz */
msleep(5);
if (stv090x_write_reg(state, STV090x_SYNTCTRL,
@@ -4336,7 +4372,7 @@ static int stv090x_init(struct dvb_frontend *fe)
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (config->tuner_set_mode) {
@@ -4349,7 +4385,7 @@ static int stv090x_init(struct dvb_frontend *fe)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (stv090x_set_tspath(state) < 0)
@@ -4358,7 +4394,7 @@ static int stv090x_init(struct dvb_frontend *fe)
return 0;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -4503,8 +4539,6 @@ static struct dvb_frontend_ops stv090x_ops = {
.sleep = stv090x_sleep,
.get_frontend_algo = stv090x_frontend_algo,
- .i2c_gate_ctrl = stv090x_i2c_gate_ctrl,
-
.diseqc_send_master_cmd = stv090x_send_diseqc_msg,
.diseqc_send_burst = stv090x_send_diseqc_burst,
.diseqc_recv_slave_reply = stv090x_recv_slave_reply,
diff --git a/drivers/media/dvb/frontends/stv090x.h b/drivers/media/dvb/frontends/stv090x.h
index 30f01a6902ac..dd1b93ae4e9d 100644
--- a/drivers/media/dvb/frontends/stv090x.h
+++ b/drivers/media/dvb/frontends/stv090x.h
@@ -87,6 +87,7 @@ struct stv090x_config {
bool diseqc_envelope_mode;
int (*tuner_init) (struct dvb_frontend *fe);
+ int (*tuner_sleep) (struct dvb_frontend *fe);
int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
diff --git a/drivers/media/dvb/frontends/stv6110x.c b/drivers/media/dvb/frontends/stv6110x.c
index dea4245f077c..2f9cd2441340 100644
--- a/drivers/media/dvb/frontends/stv6110x.c
+++ b/drivers/media/dvb/frontends/stv6110x.c
@@ -338,14 +338,12 @@ static struct dvb_tuner_ops stv6110x_ops = {
.frequency_max = 2150000,
.frequency_step = 0,
},
-
- .init = stv6110x_init,
- .sleep = stv6110x_sleep,
.release = stv6110x_release
};
static struct stv6110x_devctl stv6110x_ctl = {
.tuner_init = stv6110x_init,
+ .tuner_sleep = stv6110x_sleep,
.tuner_set_mode = stv6110x_set_mode,
.tuner_set_frequency = stv6110x_set_frequency,
.tuner_get_frequency = stv6110x_get_frequency,
@@ -363,7 +361,6 @@ struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
{
struct stv6110x_state *stv6110x;
u8 default_regs[] = {0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e};
- int ret;
stv6110x = kzalloc(sizeof (struct stv6110x_state), GFP_KERNEL);
if (stv6110x == NULL)
@@ -392,25 +389,6 @@ struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
break;
}
- if (fe->ops.i2c_gate_ctrl) {
- ret = fe->ops.i2c_gate_ctrl(fe, 1);
- if (ret < 0)
- goto error;
- }
-
- ret = stv6110x_write_regs(stv6110x, 0, stv6110x->regs,
- ARRAY_SIZE(stv6110x->regs));
- if (ret < 0) {
- dprintk(FE_ERROR, 1, "Initialization failed");
- goto error;
- }
-
- if (fe->ops.i2c_gate_ctrl) {
- ret = fe->ops.i2c_gate_ctrl(fe, 0);
- if (ret < 0)
- goto error;
- }
-
fe->tuner_priv = stv6110x;
fe->ops.tuner_ops = stv6110x_ops;
diff --git a/drivers/media/dvb/frontends/stv6110x.h b/drivers/media/dvb/frontends/stv6110x.h
index 2429ae6d7847..47516753929a 100644
--- a/drivers/media/dvb/frontends/stv6110x.h
+++ b/drivers/media/dvb/frontends/stv6110x.h
@@ -40,6 +40,7 @@ enum tuner_status {
struct stv6110x_devctl {
int (*tuner_init) (struct dvb_frontend *fe);
+ int (*tuner_sleep) (struct dvb_frontend *fe);
int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
diff --git a/drivers/media/dvb/mantis/mantis_input.c b/drivers/media/dvb/mantis/mantis_input.c
index 4675a3b53c7d..3d4e4663220c 100644
--- a/drivers/media/dvb/mantis/mantis_input.c
+++ b/drivers/media/dvb/mantis/mantis_input.c
@@ -32,6 +32,8 @@
#include "mantis_reg.h"
#include "mantis_uart.h"
+#define MODULE_NAME "mantis_core"
+
static struct ir_scancode mantis_ir_table[] = {
{ 0x29, KEY_POWER },
{ 0x28, KEY_FAVORITES },
@@ -126,7 +128,7 @@ int mantis_input_init(struct mantis_pci *mantis)
rc->id.version = 1;
rc->dev = mantis->pdev->dev;
- err = ir_input_register(rc, &ir_mantis, NULL);
+ err = __ir_input_register(rc, &ir_mantis, NULL, MODULE_NAME);
if (err) {
dprintk(MANTIS_ERROR, 1, "IR device registration failed, ret = %d", err);
input_free_device(rc);
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.c b/drivers/media/dvb/mantis/mantis_vp1041.c
index 515346dd31d0..d1aa2bc0c155 100644
--- a/drivers/media/dvb/mantis/mantis_vp1041.c
+++ b/drivers/media/dvb/mantis/mantis_vp1041.c
@@ -136,12 +136,12 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x01 },
{ STB0899_AGC1REF , 0x10 },
- { STB0899_RTC , 0x23 },
+ { STB0899_RTC , 0x23 },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xf7 },
- { STB0899_ACLC , 0x87 },
+ { STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xf1 },
@@ -194,10 +194,10 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_ECNT3M , 0x0a },
{ STB0899_ECNT3L , 0xad },
{ STB0899_FECAUTO1 , 0x06 },
- { STB0899_FECM , 0x01 },
+ { STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xb0 },
{ STB0899_VTH23 , 0x7a },
- { STB0899_VTH34 , 0x58 },
+ { STB0899_VTH34 , 0x58 },
{ STB0899_VTH56 , 0x38 },
{ STB0899_VTH67 , 0x34 },
{ STB0899_VTH78 , 0x24 },
@@ -206,7 +206,7 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x41 },
- { STB0899_TSLPL , 0x12 },
+ { STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x00 },
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index 645e8b8a7137..c8c997059c40 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -37,7 +37,6 @@
#include <linux/pci_ids.h>
#include <linux/smp_lock.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
@@ -819,7 +818,7 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
msg[0].buf, msg[0].len))
goto done;
if (num == 1 && (msg[0].flags & I2C_M_RD))
- if (!ngene_command_i2c_read(dev, msg[0].addr, 0, 0,
+ if (!ngene_command_i2c_read(dev, msg[0].addr, NULL, 0,
msg[0].buf, msg[0].len, 0))
goto done;
@@ -882,7 +881,7 @@ static void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
if (chan->users > 0)
#endif
dvb_dmx_swfilter(&chan->demux, buf, len);
- return 0;
+ return NULL;
}
u8 fill_ts[188] = { 0x47, 0x1f, 0xff, 0x10 };
@@ -970,7 +969,7 @@ static void set_transfer(struct ngene_channel *chan, int state)
state);
if (!state) {
spin_lock_irq(&chan->state_lock);
- chan->pBufferExchange = 0;
+ chan->pBufferExchange = NULL;
dvb_ringbuffer_flush(&dev->tsout_rbuf);
spin_unlock_irq(&chan->state_lock);
}
@@ -1020,7 +1019,7 @@ static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
dvbdemux->feednum = 256;
dvbdemux->start_feed = start_feed;
dvbdemux->stop_feed = stop_feed;
- dvbdemux->write_to_decoder = 0;
+ dvbdemux->write_to_decoder = NULL;
dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING);
@@ -1094,8 +1093,8 @@ static void free_idlebuffer(struct ngene *dev,
return;
free_ringbuffer(dev, rb);
for (j = 0; j < tb->NumBuffers; j++, Cur = Cur->Next) {
- Cur->Buffer2 = 0;
- Cur->scList2 = 0;
+ Cur->Buffer2 = NULL;
+ Cur->scList2 = NULL;
Cur->ngeneBuffer.Address_of_first_entry_2 = 0;
Cur->ngeneBuffer.Number_of_entries_2 = 0;
}
@@ -1141,7 +1140,7 @@ static int create_ring_buffer(struct pci_dev *pci_dev,
u64 PARingBufferNext;
struct SBufferHeader *Cur, *Next;
- descr->Head = 0;
+ descr->Head = NULL;
descr->MemSize = 0;
descr->PAHead = 0;
descr->NumBuffers = 0;
@@ -1719,7 +1718,7 @@ static void release_channel(struct ngene_channel *chan)
if (chan->fe) {
dvb_unregister_frontend(chan->fe);
dvb_frontend_detach(chan->fe);
- chan->fe = 0;
+ chan->fe = NULL;
}
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
@@ -1820,7 +1819,7 @@ static void __devexit ngene_remove(struct pci_dev *pdev)
release_channel(&dev->channel[i]);
ngene_stop(dev);
ngene_release_buffers(dev);
- pci_set_drvdata(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
@@ -1868,7 +1867,7 @@ fail1:
ngene_release_buffers(dev);
fail0:
pci_disable_device(pci_dev);
- pci_set_drvdata(pci_dev, 0);
+ pci_set_drvdata(pci_dev, NULL);
return stat;
}
@@ -1920,7 +1919,7 @@ static struct ngene_info ngene_info_cineS2 = {
.fw_version = 15,
};
-static struct ngene_info ngene_info_satixs2 = {
+static struct ngene_info ngene_info_satixS2 = {
.type = NGENE_SIDEWINDER,
.name = "Mystique SaTiX-S2 Dual",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
@@ -1933,6 +1932,32 @@ static struct ngene_info ngene_info_satixs2 = {
.fw_version = 15,
};
+static struct ngene_info ngene_info_satixS2v2 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Mystique SaTiX-S2 Dual (v2)",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_cineS2v5 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
/****************************************************************************/
@@ -1951,7 +1976,9 @@ static struct ngene_info ngene_info_satixs2 = {
static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
- NGENE_ID(0x18c3, 0xdb01, ngene_info_satixs2),
+ NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
+ NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
+ NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
{0}
};
MODULE_DEVICE_TABLE(pci, ngene_id_tbl);
diff --git a/drivers/media/dvb/pt1/pt1.c b/drivers/media/dvb/pt1/pt1.c
index 6aded234aa61..69ad94934ec2 100644
--- a/drivers/media/dvb/pt1/pt1.c
+++ b/drivers/media/dvb/pt1/pt1.c
@@ -1,5 +1,5 @@
/*
- * driver for Earthsoft PT1
+ * driver for Earthsoft PT1/PT2
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -77,6 +77,10 @@ struct pt1 {
struct pt1_adapter *adaps[PT1_NR_ADAPS];
struct pt1_table *tables;
struct task_struct *kthread;
+
+ struct mutex lock;
+ int power;
+ int reset;
};
struct pt1_adapter {
@@ -95,6 +99,11 @@ struct pt1_adapter {
struct dvb_frontend *fe;
int (*orig_set_voltage)(struct dvb_frontend *fe,
fe_sec_voltage_t voltage);
+ int (*orig_sleep)(struct dvb_frontend *fe);
+ int (*orig_init)(struct dvb_frontend *fe);
+
+ fe_sec_voltage_t voltage;
+ int sleep;
};
#define pt1_printk(level, pt1, format, arg...) \
@@ -219,8 +228,10 @@ static int pt1_do_enable_ram(struct pt1 *pt1)
static int pt1_enable_ram(struct pt1 *pt1)
{
int i, ret;
+ int phase;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
- for (i = 0; i < 10; i++) {
+ phase = pt1->pdev->device == 0x211a ? 128 : 166;
+ for (i = 0; i < phase; i++) {
ret = pt1_do_enable_ram(pt1);
if (ret < 0)
return ret;
@@ -485,33 +496,47 @@ static int pt1_stop_feed(struct dvb_demux_feed *feed)
}
static void
-pt1_set_power(struct pt1 *pt1, int power, int lnb, int reset)
+pt1_update_power(struct pt1 *pt1)
{
- pt1_write_reg(pt1, 1, power | lnb << 1 | !reset << 3);
+ int bits;
+ int i;
+ struct pt1_adapter *adap;
+ static const int sleep_bits[] = {
+ 1 << 4,
+ 1 << 6 | 1 << 7,
+ 1 << 5,
+ 1 << 6 | 1 << 8,
+ };
+
+ bits = pt1->power | !pt1->reset << 3;
+ mutex_lock(&pt1->lock);
+ for (i = 0; i < PT1_NR_ADAPS; i++) {
+ adap = pt1->adaps[i];
+ switch (adap->voltage) {
+ case SEC_VOLTAGE_13: /* actually 11V */
+ bits |= 1 << 1;
+ break;
+ case SEC_VOLTAGE_18: /* actually 15V */
+ bits |= 1 << 1 | 1 << 2;
+ break;
+ default:
+ break;
+ }
+
+ /* XXX: The bits should be changed depending on adap->sleep. */
+ bits |= sleep_bits[i];
+ }
+ pt1_write_reg(pt1, 1, bits);
+ mutex_unlock(&pt1->lock);
}
static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct pt1_adapter *adap;
- int lnb;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
-
- switch (voltage) {
- case SEC_VOLTAGE_13: /* actually 11V */
- lnb = 2;
- break;
- case SEC_VOLTAGE_18: /* actually 15V */
- lnb = 3;
- break;
- case SEC_VOLTAGE_OFF:
- lnb = 0;
- break;
- default:
- return -EINVAL;
- }
-
- pt1_set_power(adap->pt1, 1, lnb, 0);
+ adap->voltage = voltage;
+ pt1_update_power(adap->pt1);
if (adap->orig_set_voltage)
return adap->orig_set_voltage(fe, voltage);
@@ -519,9 +544,37 @@ static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
return 0;
}
+static int pt1_sleep(struct dvb_frontend *fe)
+{
+ struct pt1_adapter *adap;
+
+ adap = container_of(fe->dvb, struct pt1_adapter, adap);
+ adap->sleep = 1;
+ pt1_update_power(adap->pt1);
+
+ if (adap->orig_sleep)
+ return adap->orig_sleep(fe);
+ else
+ return 0;
+}
+
+static int pt1_wakeup(struct dvb_frontend *fe)
+{
+ struct pt1_adapter *adap;
+
+ adap = container_of(fe->dvb, struct pt1_adapter, adap);
+ adap->sleep = 0;
+ pt1_update_power(adap->pt1);
+ schedule_timeout_uninterruptible((HZ + 999) / 1000);
+
+ if (adap->orig_init)
+ return adap->orig_init(fe);
+ else
+ return 0;
+}
+
static void pt1_free_adapter(struct pt1_adapter *adap)
{
- dvb_unregister_frontend(adap->fe);
dvb_net_release(&adap->net);
adap->demux.dmx.close(&adap->demux.dmx);
dvb_dmxdev_release(&adap->dmxdev);
@@ -534,7 +587,7 @@ static void pt1_free_adapter(struct pt1_adapter *adap)
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static struct pt1_adapter *
-pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
+pt1_alloc_adapter(struct pt1 *pt1)
{
struct pt1_adapter *adap;
void *buf;
@@ -551,8 +604,8 @@ pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
adap->pt1 = pt1;
- adap->orig_set_voltage = fe->ops.set_voltage;
- fe->ops.set_voltage = pt1_set_voltage;
+ adap->voltage = SEC_VOLTAGE_OFF;
+ adap->sleep = 1;
buf = (u8 *)__get_free_page(GFP_KERNEL);
if (!buf) {
@@ -593,17 +646,8 @@ pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
dvb_net_init(dvb_adap, &adap->net, &demux->dmx);
- ret = dvb_register_frontend(dvb_adap, fe);
- if (ret < 0)
- goto err_net_release;
- adap->fe = fe;
-
return adap;
-err_net_release:
- dvb_net_release(&adap->net);
- adap->demux.dmx.close(&adap->demux.dmx);
- dvb_dmxdev_release(&adap->dmxdev);
err_dmx_release:
dvb_dmx_release(demux);
err_unregister_adapter:
@@ -623,6 +667,62 @@ static void pt1_cleanup_adapters(struct pt1 *pt1)
pt1_free_adapter(pt1->adaps[i]);
}
+static int pt1_init_adapters(struct pt1 *pt1)
+{
+ int i;
+ struct pt1_adapter *adap;
+ int ret;
+
+ for (i = 0; i < PT1_NR_ADAPS; i++) {
+ adap = pt1_alloc_adapter(pt1);
+ if (IS_ERR(adap)) {
+ ret = PTR_ERR(adap);
+ goto err;
+ }
+
+ adap->index = i;
+ pt1->adaps[i] = adap;
+ }
+ return 0;
+
+err:
+ while (i--)
+ pt1_free_adapter(pt1->adaps[i]);
+
+ return ret;
+}
+
+static void pt1_cleanup_frontend(struct pt1_adapter *adap)
+{
+ dvb_unregister_frontend(adap->fe);
+}
+
+static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
+{
+ int ret;
+
+ adap->orig_set_voltage = fe->ops.set_voltage;
+ adap->orig_sleep = fe->ops.sleep;
+ adap->orig_init = fe->ops.init;
+ fe->ops.set_voltage = pt1_set_voltage;
+ fe->ops.sleep = pt1_sleep;
+ fe->ops.init = pt1_wakeup;
+
+ ret = dvb_register_frontend(&adap->adap, fe);
+ if (ret < 0)
+ return ret;
+
+ adap->fe = fe;
+ return 0;
+}
+
+static void pt1_cleanup_frontends(struct pt1 *pt1)
+{
+ int i;
+ for (i = 0; i < PT1_NR_ADAPS; i++)
+ pt1_cleanup_frontend(pt1->adaps[i]);
+}
+
struct pt1_config {
struct va1j5jf8007s_config va1j5jf8007s_config;
struct va1j5jf8007t_config va1j5jf8007t_config;
@@ -630,29 +730,63 @@ struct pt1_config {
static const struct pt1_config pt1_configs[2] = {
{
- { .demod_address = 0x1b },
- { .demod_address = 0x1a },
+ {
+ .demod_address = 0x1b,
+ .frequency = VA1J5JF8007S_20MHZ,
+ },
+ {
+ .demod_address = 0x1a,
+ .frequency = VA1J5JF8007T_20MHZ,
+ },
}, {
- { .demod_address = 0x19 },
- { .demod_address = 0x18 },
+ {
+ .demod_address = 0x19,
+ .frequency = VA1J5JF8007S_20MHZ,
+ },
+ {
+ .demod_address = 0x18,
+ .frequency = VA1J5JF8007T_20MHZ,
+ },
},
};
-static int pt1_init_adapters(struct pt1 *pt1)
+static const struct pt1_config pt2_configs[2] = {
+ {
+ {
+ .demod_address = 0x1b,
+ .frequency = VA1J5JF8007S_25MHZ,
+ },
+ {
+ .demod_address = 0x1a,
+ .frequency = VA1J5JF8007T_25MHZ,
+ },
+ }, {
+ {
+ .demod_address = 0x19,
+ .frequency = VA1J5JF8007S_25MHZ,
+ },
+ {
+ .demod_address = 0x18,
+ .frequency = VA1J5JF8007T_25MHZ,
+ },
+ },
+};
+
+static int pt1_init_frontends(struct pt1 *pt1)
{
int i, j;
struct i2c_adapter *i2c_adap;
- const struct pt1_config *config;
+ const struct pt1_config *configs, *config;
struct dvb_frontend *fe[4];
- struct pt1_adapter *adap;
int ret;
i = 0;
j = 0;
i2c_adap = &pt1->i2c_adap;
+ configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
do {
- config = &pt1_configs[i / 2];
+ config = &configs[i / 2];
fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
i2c_adap);
@@ -681,11 +815,9 @@ static int pt1_init_adapters(struct pt1 *pt1)
} while (i < 4);
do {
- adap = pt1_alloc_adapter(pt1, fe[j]);
- if (IS_ERR(adap))
+ ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
+ if (ret < 0)
goto err;
- adap->index = j;
- pt1->adaps[j] = adap;
} while (++j < 4);
return 0;
@@ -695,7 +827,7 @@ err:
fe[i]->ops.release(fe[i]);
while (j--)
- pt1_free_adapter(pt1->adaps[j]);
+ dvb_unregister_frontend(fe[j]);
return ret;
}
@@ -890,9 +1022,12 @@ static void __devexit pt1_remove(struct pci_dev *pdev)
kthread_stop(pt1->kthread);
pt1_cleanup_tables(pt1);
- pt1_cleanup_adapters(pt1);
+ pt1_cleanup_frontends(pt1);
pt1_disable_ram(pt1);
- pt1_set_power(pt1, 0, 0, 1);
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+ pt1_cleanup_adapters(pt1);
i2c_del_adapter(&pt1->i2c_adap);
pci_set_drvdata(pdev, NULL);
kfree(pt1);
@@ -936,10 +1071,21 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_iounmap;
}
+ mutex_init(&pt1->lock);
pt1->pdev = pdev;
pt1->regs = regs;
pci_set_drvdata(pdev, pt1);
+ ret = pt1_init_adapters(pt1);
+ if (ret < 0)
+ goto err_kfree;
+
+ mutex_init(&pt1->lock);
+
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+
i2c_adap = &pt1->i2c_adap;
i2c_adap->class = I2C_CLASS_TV_DIGITAL;
i2c_adap->algo = &pt1_i2c_algo;
@@ -948,9 +1094,7 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i2c_set_adapdata(i2c_adap, pt1);
ret = i2c_add_adapter(i2c_adap);
if (ret < 0)
- goto err_kfree;
-
- pt1_set_power(pt1, 0, 0, 1);
+ goto err_pt1_cleanup_adapters;
pt1_i2c_init(pt1);
pt1_i2c_wait(pt1);
@@ -979,19 +1123,21 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pt1_init_streams(pt1);
- pt1_set_power(pt1, 1, 0, 1);
+ pt1->power = 1;
+ pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 49) / 50);
- pt1_set_power(pt1, 1, 0, 0);
+ pt1->reset = 0;
+ pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 999) / 1000);
- ret = pt1_init_adapters(pt1);
+ ret = pt1_init_frontends(pt1);
if (ret < 0)
goto err_pt1_disable_ram;
ret = pt1_init_tables(pt1);
if (ret < 0)
- goto err_pt1_cleanup_adapters;
+ goto err_pt1_cleanup_frontends;
kthread = kthread_run(pt1_thread, pt1, "pt1");
if (IS_ERR(kthread)) {
@@ -1004,11 +1150,15 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_pt1_cleanup_tables:
pt1_cleanup_tables(pt1);
-err_pt1_cleanup_adapters:
- pt1_cleanup_adapters(pt1);
+err_pt1_cleanup_frontends:
+ pt1_cleanup_frontends(pt1);
err_pt1_disable_ram:
pt1_disable_ram(pt1);
- pt1_set_power(pt1, 0, 0, 1);
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+err_pt1_cleanup_adapters:
+ pt1_cleanup_adapters(pt1);
err_i2c_del_adapter:
i2c_del_adapter(i2c_adap);
err_kfree:
@@ -1027,6 +1177,7 @@ err:
static struct pci_device_id pt1_id_table[] = {
{ PCI_DEVICE(0x10ee, 0x211a) },
+ { PCI_DEVICE(0x10ee, 0x222a) },
{ },
};
MODULE_DEVICE_TABLE(pci, pt1_id_table);
@@ -1054,5 +1205,5 @@ module_init(pt1_init);
module_exit(pt1_cleanup);
MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
-MODULE_DESCRIPTION("Earthsoft PT1 Driver");
+MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/pt1/va1j5jf8007s.c b/drivers/media/dvb/pt1/va1j5jf8007s.c
index fc6594996e79..451641c0c1d2 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007s.c
+++ b/drivers/media/dvb/pt1/va1j5jf8007s.c
@@ -1,5 +1,5 @@
/*
- * ISDB-S driver for VA1J5JF8007
+ * ISDB-S driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -580,7 +580,7 @@ static void va1j5jf8007s_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops va1j5jf8007s_ops = {
.info = {
- .name = "VA1J5JF8007 ISDB-S",
+ .name = "VA1J5JF8007/VA1J5JF8011 ISDB-S",
.type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
@@ -628,28 +628,50 @@ static int va1j5jf8007s_prepare_1(struct va1j5jf8007s_state *state)
return 0;
}
-static const u8 va1j5jf8007s_prepare_bufs[][2] = {
+static const u8 va1j5jf8007s_20mhz_prepare_bufs[][2] = {
{0x04, 0x02}, {0x0d, 0x55}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01},
{0x1c, 0x0a}, {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0},
{0x52, 0x89}, {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69},
{0x87, 0x04}, {0x8e, 0x02}, {0xa3, 0xf7}, {0xa5, 0xc0},
};
+static const u8 va1j5jf8007s_25mhz_prepare_bufs[][2] = {
+ {0x04, 0x02}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01}, {0x1c, 0x0a},
+ {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0}, {0x52, 0x89},
+ {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69}, {0x87, 0x04},
+ {0x8e, 0x26}, {0xa3, 0xf7}, {0xa5, 0xc0},
+};
+
static int va1j5jf8007s_prepare_2(struct va1j5jf8007s_state *state)
{
+ const u8 (*bufs)[2];
+ int size;
u8 addr;
u8 buf[2];
struct i2c_msg msg;
int i;
+ switch (state->config->frequency) {
+ case VA1J5JF8007S_20MHZ:
+ bufs = va1j5jf8007s_20mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007s_20mhz_prepare_bufs);
+ break;
+ case VA1J5JF8007S_25MHZ:
+ bufs = va1j5jf8007s_25mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007s_25mhz_prepare_bufs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
addr = state->config->demod_address;
msg.addr = addr;
msg.flags = 0;
msg.len = 2;
msg.buf = buf;
- for (i = 0; i < ARRAY_SIZE(va1j5jf8007s_prepare_bufs); i++) {
- memcpy(buf, va1j5jf8007s_prepare_bufs[i], sizeof(buf));
+ for (i = 0; i < size; i++) {
+ memcpy(buf, bufs[i], sizeof(buf));
if (i2c_transfer(state->adap, &msg, 1) != 1)
return -EREMOTEIO;
}
diff --git a/drivers/media/dvb/pt1/va1j5jf8007s.h b/drivers/media/dvb/pt1/va1j5jf8007s.h
index aa228a816353..b7d6f05a0e02 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007s.h
+++ b/drivers/media/dvb/pt1/va1j5jf8007s.h
@@ -1,5 +1,5 @@
/*
- * ISDB-S driver for VA1J5JF8007
+ * ISDB-S driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -24,8 +24,14 @@
#ifndef VA1J5JF8007S_H
#define VA1J5JF8007S_H
+enum va1j5jf8007s_frequency {
+ VA1J5JF8007S_20MHZ,
+ VA1J5JF8007S_25MHZ,
+};
+
struct va1j5jf8007s_config {
u8 demod_address;
+ enum va1j5jf8007s_frequency frequency;
};
struct i2c_adapter;
diff --git a/drivers/media/dvb/pt1/va1j5jf8007t.c b/drivers/media/dvb/pt1/va1j5jf8007t.c
index 3db4f3e34e8f..0f085c3e571b 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007t.c
+++ b/drivers/media/dvb/pt1/va1j5jf8007t.c
@@ -1,5 +1,5 @@
/*
- * ISDB-T driver for VA1J5JF8007
+ * ISDB-T driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -429,7 +429,7 @@ static void va1j5jf8007t_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops va1j5jf8007t_ops = {
.info = {
- .name = "VA1J5JF8007 ISDB-T",
+ .name = "VA1J5JF8007/VA1J5JF8011 ISDB-T",
.type = FE_OFDM,
.frequency_min = 90000000,
.frequency_max = 770000000,
@@ -448,29 +448,50 @@ static struct dvb_frontend_ops va1j5jf8007t_ops = {
.release = va1j5jf8007t_release,
};
-static const u8 va1j5jf8007t_prepare_bufs[][2] = {
+static const u8 va1j5jf8007t_20mhz_prepare_bufs[][2] = {
{0x03, 0x90}, {0x14, 0x8f}, {0x1c, 0x2a}, {0x1d, 0xa8}, {0x1e, 0xa2},
{0x22, 0x83}, {0x31, 0x0d}, {0x32, 0xe0}, {0x39, 0xd3}, {0x3a, 0x00},
{0x5c, 0x40}, {0x5f, 0x80}, {0x75, 0x02}, {0x76, 0x4e}, {0x77, 0x03},
{0xef, 0x01}
};
+static const u8 va1j5jf8007t_25mhz_prepare_bufs[][2] = {
+ {0x03, 0x90}, {0x1c, 0x2a}, {0x1d, 0xa8}, {0x1e, 0xa2}, {0x22, 0x83},
+ {0x3a, 0x00}, {0x5c, 0x40}, {0x5f, 0x80}, {0x75, 0x0a}, {0x76, 0x4c},
+ {0x77, 0x03}, {0xef, 0x01}
+};
+
int va1j5jf8007t_prepare(struct dvb_frontend *fe)
{
struct va1j5jf8007t_state *state;
+ const u8 (*bufs)[2];
+ int size;
u8 buf[2];
struct i2c_msg msg;
int i;
state = fe->demodulator_priv;
+ switch (state->config->frequency) {
+ case VA1J5JF8007T_20MHZ:
+ bufs = va1j5jf8007t_20mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007t_20mhz_prepare_bufs);
+ break;
+ case VA1J5JF8007T_25MHZ:
+ bufs = va1j5jf8007t_25mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007t_25mhz_prepare_bufs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
msg.addr = state->config->demod_address;
msg.flags = 0;
msg.len = sizeof(buf);
msg.buf = buf;
- for (i = 0; i < ARRAY_SIZE(va1j5jf8007t_prepare_bufs); i++) {
- memcpy(buf, va1j5jf8007t_prepare_bufs[i], sizeof(buf));
+ for (i = 0; i < size; i++) {
+ memcpy(buf, bufs[i], sizeof(buf));
if (i2c_transfer(state->adap, &msg, 1) != 1)
return -EREMOTEIO;
}
diff --git a/drivers/media/dvb/pt1/va1j5jf8007t.h b/drivers/media/dvb/pt1/va1j5jf8007t.h
index ed49906f7769..2903be519ef5 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007t.h
+++ b/drivers/media/dvb/pt1/va1j5jf8007t.h
@@ -1,5 +1,5 @@
/*
- * ISDB-T driver for VA1J5JF8007
+ * ISDB-T driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -24,8 +24,14 @@
#ifndef VA1J5JF8007T_H
#define VA1J5JF8007T_H
+enum va1j5jf8007t_frequency {
+ VA1J5JF8007T_20MHZ,
+ VA1J5JF8007T_25MHZ,
+};
+
struct va1j5jf8007t_config {
u8 demod_address;
+ enum va1j5jf8007t_frequency frequency;
};
struct i2c_adapter;
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 49c2a817a06f..461714396331 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -35,7 +35,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/spinlock.h>
-#include <media/ir-common.h>
+#include <media/ir-core.h>
#include "budget.h"
@@ -54,6 +54,8 @@
#include "tda1002x.h"
#include "tda827x.h"
+#define MODULE_NAME "budget_ci"
+
/*
* Regarding DEBIADDR_IR:
* Some CI modules hang if random addresses are read.
@@ -80,12 +82,6 @@
#define SLOTSTATUS_READY 8
#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
-/*
- * Milliseconds during which a key is regarded as pressed.
- * If an identical command arrives within this time, the timer will start over.
- */
-#define IR_KEYPRESS_TIMEOUT 250
-
/* RC5 device wildcard */
#define IR_DEVICE_ANY 255
@@ -102,12 +98,9 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct budget_ci_ir {
struct input_dev *dev;
struct tasklet_struct msp430_irq_tasklet;
- struct timer_list timer_keyup;
char name[72]; /* 40 + 32 for (struct saa7146_dev).name */
char phys[32];
- struct ir_input_state state;
int rc5_device;
- u32 last_raw;
u32 ir_key;
bool have_command;
};
@@ -122,18 +115,11 @@ struct budget_ci {
u8 tuner_pll_address; /* used for philips_tdm1316l configs */
};
-static void msp430_ir_keyup(unsigned long data)
-{
- struct budget_ci_ir *ir = (struct budget_ci_ir *) data;
- ir_input_nokey(ir->dev, &ir->state);
-}
-
static void msp430_ir_interrupt(unsigned long data)
{
struct budget_ci *budget_ci = (struct budget_ci *) data;
struct input_dev *dev = budget_ci->ir.dev;
u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8;
- u32 raw;
/*
* The msp430 chip can generate two different bytes, command and device
@@ -169,20 +155,12 @@ static void msp430_ir_interrupt(unsigned long data)
return;
budget_ci->ir.have_command = false;
+ /* FIXME: We should generate complete scancodes with device info */
if (budget_ci->ir.rc5_device != IR_DEVICE_ANY &&
budget_ci->ir.rc5_device != (command & 0x1f))
return;
- /* Is this a repeated key sequence? (same device, command, toggle) */
- raw = budget_ci->ir.ir_key | (command << 8);
- if (budget_ci->ir.last_raw != raw || !timer_pending(&budget_ci->ir.timer_keyup)) {
- ir_input_nokey(dev, &budget_ci->ir.state);
- ir_input_keydown(dev, &budget_ci->ir.state,
- budget_ci->ir.ir_key);
- budget_ci->ir.last_raw = raw;
- }
-
- mod_timer(&budget_ci->ir.timer_keyup, jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT));
+ ir_keydown(dev, budget_ci->ir.ir_key, (command & 0x20) ? 1 : 0);
}
static int msp430_ir_init(struct budget_ci *budget_ci)
@@ -190,7 +168,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
struct saa7146_dev *saa = budget_ci->budget.dev;
struct input_dev *input_dev = budget_ci->ir.dev;
int error;
- struct ir_scancode_table *ir_codes;
+ char *ir_codes = NULL;
budget_ci->ir.dev = input_dev = input_allocate_device();
@@ -230,7 +208,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
case 0x1011:
case 0x1012:
/* The hauppauge keymap is a superset of these remotes */
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
if (rc5_device < 0)
budget_ci->ir.rc5_device = 0x1f;
@@ -239,22 +217,15 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
case 0x1017:
case 0x101a:
/* for the Technotrend 1500 bundled remote */
- ir_codes = &ir_codes_tt_1500_table;
+ ir_codes = RC_MAP_TT_1500;
break;
default:
/* unknown remote */
- ir_codes = &ir_codes_budget_ci_old_table;
+ ir_codes = RC_MAP_BUDGET_CI_OLD;
break;
}
- ir_input_init(input_dev, &budget_ci->ir.state, IR_TYPE_RC5);
-
- /* initialise the key-up timeout handler */
- init_timer(&budget_ci->ir.timer_keyup);
- budget_ci->ir.timer_keyup.function = msp430_ir_keyup;
- budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir;
- budget_ci->ir.last_raw = 0xffff; /* An impossible value */
- error = ir_input_register(input_dev, ir_codes, NULL);
+ error = ir_input_register(input_dev, ir_codes, NULL, MODULE_NAME);
if (error) {
printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
return error;
@@ -282,9 +253,6 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci)
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
tasklet_kill(&budget_ci->ir.msp430_irq_tasklet);
- del_timer_sync(&dev->timer);
- ir_input_nokey(dev, &budget_ci->ir.state);
-
ir_input_unregister(dev);
}
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 1500210c06cf..9a033bac0ca2 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -442,6 +442,7 @@ static struct stv090x_config tt1600_stv090x_config = {
.repeater_level = STV090x_RPTLEVEL_16,
.tuner_init = NULL,
+ .tuner_sleep = NULL,
.tuner_set_mode = NULL,
.tuner_set_frequency = NULL,
.tuner_get_frequency = NULL,
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 9644cf760aaa..ad9e6f9c22e9 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -45,6 +45,10 @@ config VIDEO_TUNER
tristate
depends on MEDIA_TUNER
+config V4L2_MEM2MEM_DEV
+ tristate
+ depends on VIDEOBUF_GEN
+
#
# Multimedia Video device configuration
#
@@ -480,6 +484,12 @@ config VIDEO_ADV7343
To compile this driver as a module, choose M here: the
module will be called adv7343.
+config VIDEO_AK881X
+ tristate "AK8813/AK8814 video encoders"
+ depends on I2C
+ help
+ Video output driver for AKM AK8813 and AK8814 TV encoders
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -520,6 +530,13 @@ config DISPLAY_DAVINCI_DM646X_EVM
To compile this driver as a module, choose M here: the
module will be called vpif_display.
+config VIDEO_SH_VOU
+ tristate "SuperH VOU video output driver"
+ depends on VIDEO_DEV && ARCH_SHMOBILE
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Support for the Video Output Unit (VOU) on SuperH SoCs.
+
config CAPTURE_DAVINCI_DM646X_EVM
tristate "DM646x EVM Video Capture"
depends on VIDEO_DEV && MACH_DAVINCI_DM6467_EVM
@@ -542,7 +559,8 @@ config VIDEO_DAVINCI_VPIF
config VIDEO_VIVI
tristate "Virtual Video Driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
+ depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FONTS
+ select FONT_8x16
select VIDEOBUF_VMALLOC
default n
---help---
@@ -613,6 +631,8 @@ config VIDEO_ISIF
To compile this driver as a module, choose M here: the
module will be called vpfe.
+source "drivers/media/video/omap/Kconfig"
+
source "drivers/media/video/bt8xx/Kconfig"
config VIDEO_PMS
@@ -647,7 +667,7 @@ config VIDEO_CQCAM
config VIDEO_W9966
tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux"
- depends on PARPORT_1284 && PARPORT && VIDEO_V4L1
+ depends on PARPORT_1284 && PARPORT && VIDEO_V4L2
help
Video4linux driver for Winbond's w9966 based Webcams.
Currently tested with the LifeView FlyCam Supra.
@@ -740,7 +760,7 @@ source "drivers/media/video/zoran/Kconfig"
config VIDEO_MEYE
tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
- depends on PCI && SONY_LAPTOP && VIDEO_V4L1
+ depends on PCI && SONY_LAPTOP && VIDEO_V4L2
---help---
This is the video4linux driver for the Motion Eye camera found
in the Vaio Picturebook laptops. Please read the material in
@@ -807,7 +827,7 @@ source "drivers/media/video/saa7164/Kconfig"
config VIDEO_M32R_AR
tristate "AR devices"
- depends on M32R && VIDEO_V4L1
+ depends on M32R && VIDEO_V4L2
---help---
This is a video4linux driver for the Renesas AR (Artificial Retina)
camera module.
@@ -1107,3 +1127,27 @@ config USB_S2255
endif # V4L_USB_DRIVERS
endif # VIDEO_CAPTURE_DRIVERS
+
+menuconfig V4L_MEM2MEM_DRIVERS
+ bool "Memory-to-memory multimedia devices"
+ depends on VIDEO_V4L2
+ default n
+ ---help---
+ Say Y here to enable selecting drivers for V4L devices that
+ use system memory for both source and destination buffers, as opposed
+ to capture and output drivers, which use memory buffers for just
+ one of those.
+
+if V4L_MEM2MEM_DRIVERS
+
+config VIDEO_MEM2MEM_TESTDEV
+ tristate "Virtual test device for mem2mem framework"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a virtual test device for the memory-to-memory driver
+ framework.
+
+endif # V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index c51c386559f2..cc93859d3164 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -10,7 +10,8 @@ stkwebcam-objs := stk-webcam.o stk-sensor.o
omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
-videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o
+videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
+ v4l2-event.o
# V4L2 core modules
@@ -117,6 +118,8 @@ obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
+obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
+
obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
@@ -149,8 +152,11 @@ obj-$(CONFIG_VIDEO_IVTV) += ivtv/
obj-$(CONFIG_VIDEO_CX18) += cx18/
obj-$(CONFIG_VIDEO_VIVI) += vivi.o
+obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
obj-$(CONFIG_VIDEO_CX23885) += cx23885/
+obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
+
obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
@@ -160,6 +166,10 @@ obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
+obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+
+obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
+
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
@@ -169,6 +179,8 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+obj-$(CONFIG_ARCH_OMAP) += omap/
+
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/ak881x.c b/drivers/media/video/ak881x.c
new file mode 100644
index 000000000000..35390d4717b9
--- /dev/null
+++ b/drivers/media/video/ak881x.c
@@ -0,0 +1,368 @@
+/*
+ * Driver for AK8813 / AK8814 TV-ecoders from Asahi Kasei Microsystems Co., Ltd. (AKM)
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <media/ak881x.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+#define AK881X_INTERFACE_MODE 0
+#define AK881X_VIDEO_PROCESS1 1
+#define AK881X_VIDEO_PROCESS2 2
+#define AK881X_VIDEO_PROCESS3 3
+#define AK881X_DAC_MODE 5
+#define AK881X_STATUS 0x24
+#define AK881X_DEVICE_ID 0x25
+#define AK881X_DEVICE_REVISION 0x26
+
+struct ak881x {
+ struct v4l2_subdev subdev;
+ struct ak881x_pdata *pdata;
+ unsigned int lines;
+ int id; /* DEVICE_ID code V4L2_IDENT_AK881X code from v4l2-chip-ident.h */
+ char revision; /* DEVICE_REVISION content */
+};
+
+static int reg_read(struct i2c_client *client, const u8 reg)
+{
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int reg_write(struct i2c_client *client, const u8 reg,
+ const u8 data)
+{
+ return i2c_smbus_write_byte_data(client, reg, data);
+}
+
+static int reg_set(struct i2c_client *client, const u8 reg,
+ const u8 data, u8 mask)
+{
+ int ret = reg_read(client, reg);
+ if (ret < 0)
+ return ret;
+ return reg_write(client, reg, (ret & ~mask) | (data & mask));
+}
+
+static struct ak881x *to_ak881x(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ak881x, subdev);
+}
+
+static int ak881x_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
+
+ if (id->match.addr != client->addr)
+ return -ENODEV;
+
+ id->ident = ak881x->id;
+ id->revision = ak881x->revision;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ak881x_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x26)
+ return -EINVAL;
+
+ if (reg->match.addr != client->addr)
+ return -ENODEV;
+
+ reg->val = reg_read(client, reg->reg);
+
+ if (reg->val > 0xffff)
+ return -EIO;
+
+ return 0;
+}
+
+static int ak881x_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x26)
+ return -EINVAL;
+
+ if (reg->match.addr != client->addr)
+ return -ENODEV;
+
+ if (reg_write(client, reg->reg, reg->val) < 0)
+ return -EIO;
+
+ return 0;
+}
+#endif
+
+static int ak881x_try_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ v4l_bound_align_image(&mf->width, 0, 720, 2,
+ &mf->height, 0, ak881x->lines, 1, 0);
+ mf->field = V4L2_FIELD_INTERLACED;
+ mf->code = V4L2_MBUS_FMT_YUYV8_2X8_LE;
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ return 0;
+}
+
+static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ if (mf->field != V4L2_FIELD_INTERLACED ||
+ mf->code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EINVAL;
+
+ return ak881x_try_g_mbus_fmt(sd, mf);
+}
+
+static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index)
+ return -EINVAL;
+
+ *code = V4L2_MBUS_FMT_YUYV8_2X8_LE;
+ return 0;
+}
+
+static int ak881x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = 720;
+ a->bounds.height = ak881x->lines;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int ak881x_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+ u8 vp1;
+
+ if (std == V4L2_STD_NTSC_443) {
+ vp1 = 3;
+ ak881x->lines = 480;
+ } else if (std == V4L2_STD_PAL_M) {
+ vp1 = 5;
+ ak881x->lines = 480;
+ } else if (std == V4L2_STD_PAL_60) {
+ vp1 = 7;
+ ak881x->lines = 480;
+ } else if (std && !(std & ~V4L2_STD_PAL)) {
+ vp1 = 0xf;
+ ak881x->lines = 576;
+ } else if (std && !(std & ~V4L2_STD_NTSC)) {
+ vp1 = 0;
+ ak881x->lines = 480;
+ } else {
+ /* No SECAM or PAL_N/Nc supported */
+ return -EINVAL;
+ }
+
+ reg_set(client, AK881X_VIDEO_PROCESS1, vp1, 0xf);
+
+ return 0;
+}
+
+static int ak881x_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ if (enable) {
+ u8 dac;
+ /* For colour-bar testing set bit 6 of AK881X_VIDEO_PROCESS1 */
+ /* Default: composite output */
+ if (ak881x->pdata->flags & AK881X_COMPONENT)
+ dac = 3;
+ else
+ dac = 4;
+ /* Turn on the DAC(s) */
+ reg_write(client, AK881X_DAC_MODE, dac);
+ dev_dbg(&client->dev, "chip status 0x%x\n",
+ reg_read(client, AK881X_STATUS));
+ } else {
+ /* ...and clear bit 6 of AK881X_VIDEO_PROCESS1 here */
+ reg_write(client, AK881X_DAC_MODE, 0);
+ dev_dbg(&client->dev, "chip status 0x%x\n",
+ reg_read(client, AK881X_STATUS));
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops ak881x_subdev_core_ops = {
+ .g_chip_ident = ak881x_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ak881x_g_register,
+ .s_register = ak881x_s_register,
+#endif
+};
+
+static struct v4l2_subdev_video_ops ak881x_subdev_video_ops = {
+ .s_mbus_fmt = ak881x_s_mbus_fmt,
+ .g_mbus_fmt = ak881x_try_g_mbus_fmt,
+ .try_mbus_fmt = ak881x_try_g_mbus_fmt,
+ .cropcap = ak881x_cropcap,
+ .enum_mbus_fmt = ak881x_enum_mbus_fmt,
+ .s_std_output = ak881x_s_std_output,
+ .s_stream = ak881x_s_stream,
+};
+
+static struct v4l2_subdev_ops ak881x_subdev_ops = {
+ .core = &ak881x_subdev_core_ops,
+ .video = &ak881x_subdev_video_ops,
+};
+
+static int ak881x_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct ak881x *ak881x;
+ u8 ifmode, data;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_warn(&adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
+ return -EIO;
+ }
+
+ ak881x = kzalloc(sizeof(struct ak881x), GFP_KERNEL);
+ if (!ak881x)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ak881x->subdev, client, &ak881x_subdev_ops);
+
+ data = reg_read(client, AK881X_DEVICE_ID);
+
+ switch (data) {
+ case 0x13:
+ ak881x->id = V4L2_IDENT_AK8813;
+ break;
+ case 0x14:
+ ak881x->id = V4L2_IDENT_AK8814;
+ break;
+ default:
+ dev_err(&client->dev,
+ "No ak881x chip detected, register read %x\n", data);
+ kfree(ak881x);
+ return -ENODEV;
+ }
+
+ ak881x->revision = reg_read(client, AK881X_DEVICE_REVISION);
+ ak881x->pdata = client->dev.platform_data;
+
+ if (ak881x->pdata) {
+ if (ak881x->pdata->flags & AK881X_FIELD)
+ ifmode = 4;
+ else
+ ifmode = 0;
+
+ switch (ak881x->pdata->flags & AK881X_IF_MODE_MASK) {
+ case AK881X_IF_MODE_BT656:
+ ifmode |= 1;
+ break;
+ case AK881X_IF_MODE_MASTER:
+ ifmode |= 2;
+ break;
+ case AK881X_IF_MODE_SLAVE:
+ default:
+ break;
+ }
+
+ dev_dbg(&client->dev, "IF mode %x\n", ifmode);
+
+ /*
+ * "Line Blanking No." seems to be the same as the number of
+ * "black" lines on, e.g., SuperH VOU, whose default value of 20
+ * "incidentally" matches ak881x' default
+ */
+ reg_write(client, AK881X_INTERFACE_MODE, ifmode | (20 << 3));
+ }
+
+ /* Hardware default: NTSC-M */
+ ak881x->lines = 480;
+
+ dev_info(&client->dev, "Detected an ak881x chip ID %x, revision %x\n",
+ data, ak881x->revision);
+
+ return 0;
+}
+
+static int ak881x_remove(struct i2c_client *client)
+{
+ struct ak881x *ak881x = to_ak881x(client);
+
+ v4l2_device_unregister_subdev(&ak881x->subdev);
+ kfree(ak881x);
+
+ return 0;
+}
+
+static const struct i2c_device_id ak881x_id[] = {
+ { "ak8813", 0 },
+ { "ak8814", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ak881x_id);
+
+static struct i2c_driver ak881x_i2c_driver = {
+ .driver = {
+ .name = "ak881x",
+ },
+ .probe = ak881x_probe,
+ .remove = ak881x_remove,
+ .id_table = ak881x_id,
+};
+
+static int __init ak881x_module_init(void)
+{
+ return i2c_add_driver(&ak881x_i2c_driver);
+}
+
+static void __exit ak881x_module_exit(void)
+{
+ i2c_del_driver(&ak881x_i2c_driver);
+}
+
+module_init(ak881x_module_init);
+module_exit(ak881x_module_exit);
+
+MODULE_DESCRIPTION("TV-output driver for ak8813/ak8814");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index a356d6bd3131..31e7a123d19a 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -27,8 +27,10 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/videodev.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/mutex.h>
@@ -39,7 +41,7 @@
#include <asm/byteorder.h>
#if 0
-#define DEBUG(n, args...) printk(args)
+#define DEBUG(n, args...) printk(KERN_INFO args)
#define CHECK_LOST 1
#else
#define DEBUG(n, args...)
@@ -52,10 +54,10 @@
*/
#define USE_INT 0 /* Don't modify */
-#define VERSION "0.03"
+#define VERSION "0.04"
#define ar_inl(addr) inl((unsigned long)(addr))
-#define ar_outl(val, addr) outl((unsigned long)(val),(unsigned long)(addr))
+#define ar_outl(val, addr) outl((unsigned long)(val), (unsigned long)(addr))
extern struct cpuinfo_m32r boot_cpu_data;
@@ -79,7 +81,7 @@ extern struct cpuinfo_m32r boot_cpu_data;
/* bits & bytes per pixel */
#define AR_BITS_PER_PIXEL 16
-#define AR_BYTES_PER_PIXEL (AR_BITS_PER_PIXEL/8)
+#define AR_BYTES_PER_PIXEL (AR_BITS_PER_PIXEL / 8)
/* line buffer size */
#define AR_LINE_BYTES_VGA (AR_WIDTH_VGA * AR_BYTES_PER_PIXEL)
@@ -104,8 +106,9 @@ extern struct cpuinfo_m32r boot_cpu_data;
#define AR_MODE_INTERLACE 0
#define AR_MODE_NORMAL 1
-struct ar_device {
- struct video_device *vdev;
+struct ar {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
unsigned int start_capture; /* duaring capture in INT. mode. */
#if USE_INT
unsigned char *line_buff; /* DMA line buffer */
@@ -116,12 +119,13 @@ struct ar_device {
int width, height;
int frame_bytes, line_bytes;
wait_queue_head_t wait;
- unsigned long in_use;
struct mutex lock;
};
+static struct ar ardev;
+
static int video_nr = -1; /* video device number (first free) */
-static unsigned char yuv[MAX_AR_FRAME_BYTES];
+static unsigned char yuv[MAX_AR_FRAME_BYTES];
/* module parameters */
/* default frequency */
@@ -133,9 +137,7 @@ module_param(freq, int, 0);
module_param(vga, int, 0);
module_param(vga_interlace, int, 0);
-static int ar_initialize(struct video_device *dev);
-
-static inline void wait_for_vsync(void)
+static void wait_for_vsync(void)
{
while (ar_inl(ARVCR0) & ARVCR0_VDS) /* wait for VSYNC */
cpu_relax();
@@ -143,7 +145,7 @@ static inline void wait_for_vsync(void)
cpu_relax();
}
-static inline void wait_acknowledge(void)
+static void wait_acknowledge(void)
{
int i;
@@ -156,7 +158,7 @@ static inline void wait_acknowledge(void)
/*******************************************************************
* I2C functions
*******************************************************************/
-void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
+static void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
unsigned long data3)
{
int i;
@@ -200,7 +202,7 @@ void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
}
-void init_iic(void)
+static void init_iic(void)
{
DEBUG(1, "init_iic:\n");
@@ -214,13 +216,12 @@ void init_iic(void)
/* I2C CLK */
/* 50MH-100k */
- if (freq == 75) {
+ if (freq == 75)
ar_outl(369, PLDI2CFREQ); /* BCLK = 75MHz */
- } else if (freq == 50) {
+ else if (freq == 50)
ar_outl(244, PLDI2CFREQ); /* BCLK = 50MHz */
- } else {
+ else
ar_outl(244, PLDI2CFREQ); /* default: BCLK = 50MHz */
- }
ar_outl(0x1, PLDI2CCR); /* I2CCR Enable */
}
@@ -245,7 +246,7 @@ static inline void clear_dma_status(void)
ar_outl(0x8000, M32R_DMAEDET_PORTL); /* clear status */
}
-static inline void wait_for_vertical_sync(int exp_line)
+static void wait_for_vertical_sync(struct ar *ar, int exp_line)
{
#if CHECK_LOST
int tmout = 10000; /* FIXME */
@@ -260,7 +261,7 @@ static inline void wait_for_vertical_sync(int exp_line)
break;
}
if (tmout < 0)
- printk("arv: lost %d -> %d\n", exp_line, l);
+ v4l2_err(&ar->v4l2_dev, "lost %d -> %d\n", exp_line, l);
#else
while (ar_inl(ARVHCOUNT) != exp_line)
cpu_relax();
@@ -269,15 +270,14 @@ static inline void wait_for_vertical_sync(int exp_line)
static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
{
- struct video_device *v = video_devdata(file);
- struct ar_device *ar = video_get_drvdata(v);
+ struct ar *ar = video_drvdata(file);
long ret = ar->frame_bytes; /* return read bytes */
unsigned long arvcr1 = 0;
unsigned long flags;
unsigned char *p;
int h, w;
unsigned char *py, *pu, *pv;
-#if ! USE_INT
+#if !USE_INT
int l;
#endif
@@ -305,7 +305,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL); /* reload count (bytes) */
/*
- * Okey , kicks AR LSI to invoke an interrupt
+ * Okay, kick AR LSI to invoke an interrupt
*/
ar->start_capture = 0;
ar_outl(arvcr1 | ARVCR1_HIEN, ARVCR1);
@@ -313,7 +313,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
/* .... AR interrupts .... */
interruptible_sleep_on(&ar->wait);
if (signal_pending(current)) {
- printk("arv: interrupted while get frame data.\n");
+ printk(KERN_ERR "arv: interrupted while get frame data.\n");
ret = -EINTR;
goto out_up;
}
@@ -334,7 +334,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
cpu_relax();
if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
for (h = 0; h < ar->height; h++) {
- wait_for_vertical_sync(h);
+ wait_for_vertical_sync(ar, h);
if (h < (AR_HEIGHT_VGA/2))
l = h << 1;
else
@@ -349,7 +349,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
}
} else {
for (h = 0; h < ar->height; h++) {
- wait_for_vertical_sync(h);
+ wait_for_vertical_sync(ar, h);
ar_outl(virt_to_phys(ar->frame[h]), M32R_DMA0CDA_PORTL);
enable_dma();
while (!(ar_inl(M32R_DMAEDET_PORTL) & 0x8000))
@@ -386,7 +386,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
}
}
if (copy_to_user(buf, yuv, ar->frame_bytes)) {
- printk("arv: failed while copy_to_user yuv.\n");
+ v4l2_err(&ar->v4l2_dev, "failed while copy_to_user yuv.\n");
ret = -EFAULT;
goto out_up;
}
@@ -396,153 +396,127 @@ out_up:
return ret;
}
-static long ar_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int ar_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
{
- struct video_device *dev = video_devdata(file);
- struct ar_device *ar = video_get_drvdata(dev);
-
- DEBUG(1, "ar_ioctl()\n");
- switch(cmd) {
- case VIDIOCGCAP:
- {
- struct video_capability *b = arg;
- DEBUG(1, "VIDIOCGCAP:\n");
- strcpy(b->name, ar->vdev->name);
- b->type = VID_TYPE_CAPTURE;
- b->channels = 0;
- b->audios = 0;
- b->maxwidth = MAX_AR_WIDTH;
- b->maxheight = MAX_AR_HEIGHT;
- b->minwidth = MIN_AR_WIDTH;
- b->minheight = MIN_AR_HEIGHT;
- return 0;
- }
- case VIDIOCGCHAN:
- DEBUG(1, "VIDIOCGCHAN:\n");
- return 0;
- case VIDIOCSCHAN:
- DEBUG(1, "VIDIOCSCHAN:\n");
- return 0;
- case VIDIOCGTUNER:
- DEBUG(1, "VIDIOCGTUNER:\n");
- return 0;
- case VIDIOCSTUNER:
- DEBUG(1, "VIDIOCSTUNER:\n");
- return 0;
- case VIDIOCGPICT:
- DEBUG(1, "VIDIOCGPICT:\n");
- return 0;
- case VIDIOCSPICT:
- DEBUG(1, "VIDIOCSPICT:\n");
- return 0;
- case VIDIOCCAPTURE:
- DEBUG(1, "VIDIOCCAPTURE:\n");
+ struct ar *ar = video_drvdata(file);
+
+ strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
+ vcap->version = KERNEL_VERSION(0, 0, 4);
+ vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ return 0;
+}
+
+static int ar_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
+{
+ if (vin->index > 0)
return -EINVAL;
- case VIDIOCGWIN:
- {
- struct video_window *w = arg;
- DEBUG(1, "VIDIOCGWIN:\n");
- memset(w, 0, sizeof(*w));
- w->width = ar->width;
- w->height = ar->height;
- return 0;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = V4L2_STD_ALL;
+ vin->status = 0;
+ return 0;
+}
+
+static int ar_g_input(struct file *file, void *fh, unsigned int *inp)
+{
+ *inp = 0;
+ return 0;
+}
+
+static int ar_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return inp ? -EINVAL : 0;
+}
+
+static int ar_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = ar->width;
+ pix->height = ar->height;
+ pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+ pix->field = (ar->mode == AR_MODE_NORMAL) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED;
+ pix->bytesperline = ar->width;
+ pix->sizeimage = 2 * ar->width * ar->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int ar_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->height <= AR_HEIGHT_QVGA || pix->width <= AR_WIDTH_QVGA) {
+ pix->height = AR_HEIGHT_QVGA;
+ pix->width = AR_WIDTH_QVGA;
+ pix->field = V4L2_FIELD_INTERLACED;
+ } else {
+ pix->height = AR_HEIGHT_VGA;
+ pix->width = AR_WIDTH_VGA;
+ pix->field = vga_interlace ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE;
}
- case VIDIOCSWIN:
- {
- struct video_window *w = arg;
- DEBUG(1, "VIDIOCSWIN:\n");
- if ((w->width != AR_WIDTH_VGA || w->height != AR_HEIGHT_VGA) &&
- (w->width != AR_WIDTH_QVGA || w->height != AR_HEIGHT_QVGA))
- return -EINVAL;
-
- mutex_lock(&ar->lock);
- ar->width = w->width;
- ar->height = w->height;
- if (ar->width == AR_WIDTH_VGA) {
- ar->size = AR_SIZE_VGA;
- ar->frame_bytes = AR_FRAME_BYTES_VGA;
- ar->line_bytes = AR_LINE_BYTES_VGA;
- if (vga_interlace)
- ar->mode = AR_MODE_INTERLACE;
- else
- ar->mode = AR_MODE_NORMAL;
- } else {
- ar->size = AR_SIZE_QVGA;
- ar->frame_bytes = AR_FRAME_BYTES_QVGA;
- ar->line_bytes = AR_LINE_BYTES_QVGA;
+ pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+ pix->bytesperline = ar->width;
+ pix->sizeimage = 2 * ar->width * ar->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int ar_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = ar_try_fmt_vid_cap(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ mutex_lock(&ar->lock);
+ ar->width = pix->width;
+ ar->height = pix->height;
+ if (ar->width == AR_WIDTH_VGA) {
+ ar->size = AR_SIZE_VGA;
+ ar->frame_bytes = AR_FRAME_BYTES_VGA;
+ ar->line_bytes = AR_LINE_BYTES_VGA;
+ if (vga_interlace)
ar->mode = AR_MODE_INTERLACE;
- }
- mutex_unlock(&ar->lock);
- return 0;
- }
- case VIDIOCGFBUF:
- DEBUG(1, "VIDIOCGFBUF:\n");
- return -EINVAL;
- case VIDIOCSFBUF:
- DEBUG(1, "VIDIOCSFBUF:\n");
- return -EINVAL;
- case VIDIOCKEY:
- DEBUG(1, "VIDIOCKEY:\n");
- return 0;
- case VIDIOCGFREQ:
- DEBUG(1, "VIDIOCGFREQ:\n");
- return -EINVAL;
- case VIDIOCSFREQ:
- DEBUG(1, "VIDIOCSFREQ:\n");
- return -EINVAL;
- case VIDIOCGAUDIO:
- DEBUG(1, "VIDIOCGAUDIO:\n");
- return -EINVAL;
- case VIDIOCSAUDIO:
- DEBUG(1, "VIDIOCSAUDIO:\n");
- return -EINVAL;
- case VIDIOCSYNC:
- DEBUG(1, "VIDIOCSYNC:\n");
- return -EINVAL;
- case VIDIOCMCAPTURE:
- DEBUG(1, "VIDIOCMCAPTURE:\n");
- return -EINVAL;
- case VIDIOCGMBUF:
- DEBUG(1, "VIDIOCGMBUF:\n");
- return -EINVAL;
- case VIDIOCGUNIT:
- DEBUG(1, "VIDIOCGUNIT:\n");
- return -EINVAL;
- case VIDIOCGCAPTURE:
- DEBUG(1, "VIDIOCGCAPTURE:\n");
- return -EINVAL;
- case VIDIOCSCAPTURE:
- DEBUG(1, "VIDIOCSCAPTURE:\n");
- return -EINVAL;
- case VIDIOCSPLAYMODE:
- DEBUG(1, "VIDIOCSPLAYMODE:\n");
- return -EINVAL;
- case VIDIOCSWRITEMODE:
- DEBUG(1, "VIDIOCSWRITEMODE:\n");
- return -EINVAL;
- case VIDIOCGPLAYINFO:
- DEBUG(1, "VIDIOCGPLAYINFO:\n");
- return -EINVAL;
- case VIDIOCSMICROCODE:
- DEBUG(1, "VIDIOCSMICROCODE:\n");
- return -EINVAL;
- case VIDIOCGVBIFMT:
- DEBUG(1, "VIDIOCGVBIFMT:\n");
- return -EINVAL;
- case VIDIOCSVBIFMT:
- DEBUG(1, "VIDIOCSVBIFMT:\n");
- return -EINVAL;
- default:
- DEBUG(1, "Unknown ioctl(0x%08x)\n", cmd);
- return -ENOIOCTLCMD;
+ else
+ ar->mode = AR_MODE_NORMAL;
+ } else {
+ ar->size = AR_SIZE_QVGA;
+ ar->frame_bytes = AR_FRAME_BYTES_QVGA;
+ ar->line_bytes = AR_LINE_BYTES_QVGA;
+ ar->mode = AR_MODE_INTERLACE;
}
+ /* Ok we figured out what to use from our wide choice */
+ mutex_unlock(&ar->lock);
return 0;
}
-static long ar_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int ar_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
- return video_usercopy(file, cmd, arg, ar_do_ioctl);
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "YUV 4:2:2 Planar", V4L2_PIX_FMT_YUV422P,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
+
+ if (fmt->index > 0)
+ return -EINVAL;
+
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
}
#if USE_INT
@@ -551,7 +525,7 @@ static long ar_ioctl(struct file *file, unsigned int cmd,
*/
static void ar_interrupt(int irq, void *dev)
{
- struct ar_device *ar = dev;
+ struct ar *ar = dev;
unsigned int line_count;
unsigned int line_number;
unsigned int arvcr1;
@@ -559,11 +533,11 @@ static void ar_interrupt(int irq, void *dev)
line_count = ar_inl(ARVHCOUNT); /* line number */
if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
/* operations for interlace mode */
- if ( line_count < (AR_HEIGHT_VGA/2) ) /* even line */
+ if (line_count < (AR_HEIGHT_VGA / 2)) /* even line */
line_number = (line_count << 1);
else /* odd line */
line_number =
- (((line_count - (AR_HEIGHT_VGA/2)) << 1) + 1);
+ (((line_count - (AR_HEIGHT_VGA / 2)) << 1) + 1);
} else {
line_number = line_count;
}
@@ -623,11 +597,10 @@ static void ar_interrupt(int irq, void *dev)
* 0 is returned in success.
*
*/
-static int ar_initialize(struct video_device *dev)
+static int ar_initialize(struct ar *ar)
{
- struct ar_device *ar = video_get_drvdata(dev);
unsigned long cr = 0;
- int i,found=0;
+ int i, found = 0;
DEBUG(1, "ar_initialize:\n");
@@ -666,129 +639,119 @@ static int ar_initialize(struct video_device *dev)
if (found == 0)
return -ENODEV;
- printk("arv: Initializing ");
-
- iic(2,0x78,0x11,0x01,0x00); /* start */
- iic(3,0x78,0x12,0x00,0x06);
- iic(3,0x78,0x12,0x12,0x30);
- iic(3,0x78,0x12,0x15,0x58);
- iic(3,0x78,0x12,0x17,0x30);
- printk(".");
- iic(3,0x78,0x12,0x1a,0x97);
- iic(3,0x78,0x12,0x1b,0xff);
- iic(3,0x78,0x12,0x1c,0xff);
- iic(3,0x78,0x12,0x26,0x10);
- iic(3,0x78,0x12,0x27,0x00);
- printk(".");
- iic(2,0x78,0x34,0x02,0x00);
- iic(2,0x78,0x7a,0x10,0x00);
- iic(2,0x78,0x80,0x39,0x00);
- iic(2,0x78,0x81,0xe6,0x00);
- iic(2,0x78,0x8d,0x00,0x00);
- printk(".");
- iic(2,0x78,0x8e,0x0c,0x00);
- iic(2,0x78,0x8f,0x00,0x00);
+ v4l2_info(&ar->v4l2_dev, "Initializing ");
+
+ iic(2, 0x78, 0x11, 0x01, 0x00); /* start */
+ iic(3, 0x78, 0x12, 0x00, 0x06);
+ iic(3, 0x78, 0x12, 0x12, 0x30);
+ iic(3, 0x78, 0x12, 0x15, 0x58);
+ iic(3, 0x78, 0x12, 0x17, 0x30);
+ printk(KERN_CONT ".");
+ iic(3, 0x78, 0x12, 0x1a, 0x97);
+ iic(3, 0x78, 0x12, 0x1b, 0xff);
+ iic(3, 0x78, 0x12, 0x1c, 0xff);
+ iic(3, 0x78, 0x12, 0x26, 0x10);
+ iic(3, 0x78, 0x12, 0x27, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x34, 0x02, 0x00);
+ iic(2, 0x78, 0x7a, 0x10, 0x00);
+ iic(2, 0x78, 0x80, 0x39, 0x00);
+ iic(2, 0x78, 0x81, 0xe6, 0x00);
+ iic(2, 0x78, 0x8d, 0x00, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x8e, 0x0c, 0x00);
+ iic(2, 0x78, 0x8f, 0x00, 0x00);
#if 0
- iic(2,0x78,0x90,0x00,0x00); /* AWB on=1 off=0 */
+ iic(2, 0x78, 0x90, 0x00, 0x00); /* AWB on=1 off=0 */
#endif
- iic(2,0x78,0x93,0x01,0x00);
- iic(2,0x78,0x94,0xcd,0x00);
- iic(2,0x78,0x95,0x00,0x00);
- printk(".");
- iic(2,0x78,0x96,0xa0,0x00);
- iic(2,0x78,0x97,0x00,0x00);
- iic(2,0x78,0x98,0x60,0x00);
- iic(2,0x78,0x99,0x01,0x00);
- iic(2,0x78,0x9a,0x19,0x00);
- printk(".");
- iic(2,0x78,0x9b,0x02,0x00);
- iic(2,0x78,0x9c,0xe8,0x00);
- iic(2,0x78,0x9d,0x02,0x00);
- iic(2,0x78,0x9e,0x2e,0x00);
- iic(2,0x78,0xb8,0x78,0x00);
- iic(2,0x78,0xba,0x05,0x00);
+ iic(2, 0x78, 0x93, 0x01, 0x00);
+ iic(2, 0x78, 0x94, 0xcd, 0x00);
+ iic(2, 0x78, 0x95, 0x00, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x96, 0xa0, 0x00);
+ iic(2, 0x78, 0x97, 0x00, 0x00);
+ iic(2, 0x78, 0x98, 0x60, 0x00);
+ iic(2, 0x78, 0x99, 0x01, 0x00);
+ iic(2, 0x78, 0x9a, 0x19, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x9b, 0x02, 0x00);
+ iic(2, 0x78, 0x9c, 0xe8, 0x00);
+ iic(2, 0x78, 0x9d, 0x02, 0x00);
+ iic(2, 0x78, 0x9e, 0x2e, 0x00);
+ iic(2, 0x78, 0xb8, 0x78, 0x00);
+ iic(2, 0x78, 0xba, 0x05, 0x00);
#if 0
- iic(2,0x78,0x83,0x8c,0x00); /* brightness */
+ iic(2, 0x78, 0x83, 0x8c, 0x00); /* brightness */
#endif
- printk(".");
+ printk(KERN_CONT ".");
/* color correction */
- iic(3,0x78,0x49,0x00,0x95); /* a */
- iic(3,0x78,0x49,0x01,0x96); /* b */
- iic(3,0x78,0x49,0x03,0x85); /* c */
- iic(3,0x78,0x49,0x04,0x97); /* d */
- iic(3,0x78,0x49,0x02,0x7e); /* e(Lo) */
- iic(3,0x78,0x49,0x05,0xa4); /* f(Lo) */
- iic(3,0x78,0x49,0x06,0x04); /* e(Hi) */
- iic(3,0x78,0x49,0x07,0x04); /* e(Hi) */
- iic(2,0x78,0x48,0x01,0x00); /* on=1 off=0 */
-
- printk(".");
- iic(2,0x78,0x11,0x00,0x00); /* end */
- printk(" done\n");
+ iic(3, 0x78, 0x49, 0x00, 0x95); /* a */
+ iic(3, 0x78, 0x49, 0x01, 0x96); /* b */
+ iic(3, 0x78, 0x49, 0x03, 0x85); /* c */
+ iic(3, 0x78, 0x49, 0x04, 0x97); /* d */
+ iic(3, 0x78, 0x49, 0x02, 0x7e); /* e(Lo) */
+ iic(3, 0x78, 0x49, 0x05, 0xa4); /* f(Lo) */
+ iic(3, 0x78, 0x49, 0x06, 0x04); /* e(Hi) */
+ iic(3, 0x78, 0x49, 0x07, 0x04); /* e(Hi) */
+ iic(2, 0x78, 0x48, 0x01, 0x00); /* on=1 off=0 */
+
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x11, 0x00, 0x00); /* end */
+ printk(KERN_CONT " done\n");
return 0;
}
-void ar_release(struct video_device *vfd)
-{
- struct ar_device *ar = video_get_drvdata(vfd);
- mutex_lock(&ar->lock);
- video_device_release(vfd);
-}
-
/****************************************************************************
*
* Video4Linux Module functions
*
****************************************************************************/
-static struct ar_device ardev;
-
-static int ar_exclusive_open(struct file *file)
-{
- return test_and_set_bit(0, &ardev.in_use) ? -EBUSY : 0;
-}
-
-static int ar_exclusive_release(struct file *file)
-{
- clear_bit(0, &ardev.in_use);
- return 0;
-}
static const struct v4l2_file_operations ar_fops = {
.owner = THIS_MODULE,
- .open = ar_exclusive_open,
- .release = ar_exclusive_release,
.read = ar_read,
- .ioctl = ar_ioctl,
+ .ioctl = video_ioctl2,
};
-static struct video_device ar_template = {
- .name = "Colour AR VGA",
- .fops = &ar_fops,
- .release = ar_release,
+static const struct v4l2_ioctl_ops ar_ioctl_ops = {
+ .vidioc_querycap = ar_querycap,
+ .vidioc_g_input = ar_g_input,
+ .vidioc_s_input = ar_s_input,
+ .vidioc_enum_input = ar_enum_input,
+ .vidioc_enum_fmt_vid_cap = ar_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = ar_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = ar_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = ar_try_fmt_vid_cap,
};
#define ALIGN4(x) ((((int)(x)) & 0x3) == 0)
static int __init ar_init(void)
{
- struct ar_device *ar;
+ struct ar *ar;
+ struct v4l2_device *v4l2_dev;
int ret;
int i;
- DEBUG(1, "ar_init:\n");
- ret = -EIO;
- printk(KERN_INFO "arv: Colour AR VGA driver %s\n", VERSION);
-
ar = &ardev;
- memset(ar, 0, sizeof(struct ar_device));
+ v4l2_dev = &ar->v4l2_dev;
+ strlcpy(v4l2_dev->name, "arv", sizeof(v4l2_dev->name));
+ v4l2_info(v4l2_dev, "Colour AR VGA driver %s\n", VERSION);
+
+ ret = v4l2_device_register(NULL, v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return ret;
+ }
+ ret = -EIO;
#if USE_INT
/* allocate a DMA buffer for 1 line. */
ar->line_buff = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL | GFP_DMA);
- if (ar->line_buff == NULL || ! ALIGN4(ar->line_buff)) {
- printk("arv: buffer allocation failed for DMA.\n");
+ if (ar->line_buff == NULL || !ALIGN4(ar->line_buff)) {
+ v4l2_err(v4l2_dev, "buffer allocation failed for DMA.\n");
ret = -ENOMEM;
goto out_end;
}
@@ -796,20 +759,19 @@ static int __init ar_init(void)
/* allocate buffers for a frame */
for (i = 0; i < MAX_AR_HEIGHT; i++) {
ar->frame[i] = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL);
- if (ar->frame[i] == NULL || ! ALIGN4(ar->frame[i])) {
- printk("arv: buffer allocation failed for frame.\n");
+ if (ar->frame[i] == NULL || !ALIGN4(ar->frame[i])) {
+ v4l2_err(v4l2_dev, "buffer allocation failed for frame.\n");
ret = -ENOMEM;
goto out_line_buff;
}
}
- ar->vdev = video_device_alloc();
- if (!ar->vdev) {
- printk(KERN_ERR "arv: video_device_alloc() failed\n");
- return -ENOMEM;
- }
- memcpy(ar->vdev, &ar_template, sizeof(ar_template));
- video_set_drvdata(ar->vdev, ar);
+ strlcpy(ar->vdev.name, "Colour AR VGA", sizeof(ar->vdev.name));
+ ar->vdev.v4l2_dev = v4l2_dev;
+ ar->vdev.fops = &ar_fops;
+ ar->vdev.ioctl_ops = &ar_ioctl_ops;
+ ar->vdev.release = video_device_release_empty;
+ video_set_drvdata(&ar->vdev, ar);
if (vga) {
ar->width = AR_WIDTH_VGA;
@@ -834,14 +796,14 @@ static int __init ar_init(void)
#if USE_INT
if (request_irq(M32R_IRQ_INT3, ar_interrupt, 0, "arv", ar)) {
- printk("arv: request_irq(%d) failed.\n", M32R_IRQ_INT3);
+ v4l2_err("request_irq(%d) failed.\n", M32R_IRQ_INT3);
ret = -EIO;
goto out_irq;
}
#endif
- if (ar_initialize(ar->vdev) != 0) {
- printk("arv: M64278 not found.\n");
+ if (ar_initialize(ar) != 0) {
+ v4l2_err(v4l2_dev, "M64278 not found.\n");
ret = -ENODEV;
goto out_dev;
}
@@ -852,15 +814,15 @@ static int __init ar_init(void)
* device is named "video[0-64]".
* video_register_device() initializes h/w using ar_initialize().
*/
- if (video_register_device(ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) {
+ if (video_register_device(&ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) {
/* return -1, -ENFILE(full) or others */
- printk("arv: register video (Colour AR) failed.\n");
+ v4l2_err(v4l2_dev, "register video (Colour AR) failed.\n");
ret = -ENODEV;
goto out_dev;
}
- printk("%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
- video_device_node_name(ar->vdev), M32R_IRQ_INT3, freq);
+ v4l2_info(v4l2_dev, "%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
+ video_device_node_name(&ar->vdev), M32R_IRQ_INT3, freq);
return 0;
@@ -879,6 +841,7 @@ out_line_buff:
out_end:
#endif
+ v4l2_device_unregister(&ar->v4l2_dev);
return ret;
}
@@ -886,7 +849,7 @@ out_end:
static int __init ar_init_module(void)
{
freq = (boot_cpu_data.bus_clock / 1000000);
- printk("arv: Bus clock %d\n", freq);
+ printk(KERN_INFO "arv: Bus clock %d\n", freq);
if (freq != 50 && freq != 75)
freq = DEFAULT_FREQ;
return ar_init();
@@ -894,11 +857,11 @@ static int __init ar_init_module(void)
static void __exit ar_cleanup_module(void)
{
- struct ar_device *ar;
+ struct ar *ar;
int i;
ar = &ardev;
- video_unregister_device(ar->vdev);
+ video_unregister_device(&ar->vdev);
#if USE_INT
free_irq(M32R_IRQ_INT3, ar);
#endif
@@ -907,6 +870,7 @@ static void __exit ar_cleanup_module(void)
#if USE_INT
kfree(ar->line_buff);
#endif
+ v4l2_device_unregister(&ar->v4l2_dev);
}
module_init(ar_init_module);
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 8c140c01c5e6..66150216f976 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -1105,7 +1105,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
tmp = input->index;
- if (tmp > AU0828_MAX_INPUT)
+ if (tmp >= AU0828_MAX_INPUT)
return -EINVAL;
if (AUVI_INPUT(tmp).type == 0)
return -EINVAL;
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 716870ae85d5..7af56cde0c79 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -241,6 +241,10 @@ static struct CARD {
{ 0xa1550101, BTTV_BOARD_IVC200, "IVC-200G" },
{ 0xa1550102, BTTV_BOARD_IVC200, "IVC-200G" },
{ 0xa1550103, BTTV_BOARD_IVC200, "IVC-200G" },
+ { 0xa1550800, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550801, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550802, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550803, BTTV_BOARD_IVC200, "IVC-200" },
{ 0xa182ff00, BTTV_BOARD_IVC120, "IVC-120G" },
{ 0xa182ff01, BTTV_BOARD_IVC120, "IVC-120G" },
{ 0xa182ff02, BTTV_BOARD_IVC120, "IVC-120G" },
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index f4860f03dfc3..350e7af75884 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1806,8 +1806,8 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = gbuffers;
- while (*size * *count > gbuffers * gbufsize)
- (*count)--;
+ if (*size * *count > gbuffers * gbufsize)
+ *count = (gbuffers * gbufsize) / *size;
return 0;
}
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index aa153a986ade..f68717a4bdec 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -49,6 +49,8 @@ module_param(ir_rc5_key_timeout, int, 0644);
#define DEVNAME "bttv-input"
+#define MODULE_NAME "bttv"
+
/* ---------------------------------------------------------------------- */
static void ir_handle_key(struct bttv *btv)
@@ -246,7 +248,7 @@ static void bttv_ir_stop(struct bttv *btv)
int bttv_input_init(struct bttv *btv)
{
struct card_ir *ir;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
struct input_dev *input_dev;
u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
@@ -264,7 +266,7 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_AVERMEDIA:
case BTTV_BOARD_AVPHONE98:
case BTTV_BOARD_AVERMEDIA98:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
ir->mask_keycode = 0xf88000;
ir->mask_keydown = 0x010000;
ir->polling = 50; // ms
@@ -272,14 +274,14 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_AVDVBT_761:
case BTTV_BOARD_AVDVBT_771:
- ir_codes = &ir_codes_avermedia_dvbt_table;
+ ir_codes = RC_MAP_AVERMEDIA_DVBT;
ir->mask_keycode = 0x0f00c0;
ir->mask_keydown = 0x000020;
ir->polling = 50; // ms
break;
case BTTV_BOARD_PXELVWPLTVPAK:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x003e00;
ir->mask_keyup = 0x010000;
ir->polling = 50; // ms
@@ -287,24 +289,24 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_PV_M4900:
case BTTV_BOARD_PV_BT878P_9B:
case BTTV_BOARD_PV_BT878P_PLUS:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x008000;
ir->polling = 50; // ms
break;
case BTTV_BOARD_WINFAST2000:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->mask_keycode = 0x1f8;
break;
case BTTV_BOARD_MAGICTVIEW061:
case BTTV_BOARD_MAGICTVIEW063:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->mask_keycode = 0x0008e000;
ir->mask_keydown = 0x00200000;
break;
case BTTV_BOARD_APAC_VIEWCOMP:
- ir_codes = &ir_codes_apac_viewcomp_table;
+ ir_codes = RC_MAP_APAC_VIEWCOMP;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x008000;
ir->polling = 50; // ms
@@ -312,30 +314,30 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_ASKEY_CPH03X:
case BTTV_BOARD_CONCEPTRONIC_CTVFMI2:
case BTTV_BOARD_CONTVFMI:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x001F00;
ir->mask_keyup = 0x006000;
ir->polling = 50; // ms
break;
case BTTV_BOARD_NEBULA_DIGITV:
- ir_codes = &ir_codes_nebula_table;
+ ir_codes = RC_MAP_NEBULA;
btv->custom_irq = bttv_rc5_irq;
ir->rc5_gpio = 1;
break;
case BTTV_BOARD_MACHTV_MAGICTV:
- ir_codes = &ir_codes_apac_viewcomp_table;
+ ir_codes = RC_MAP_APAC_VIEWCOMP;
ir->mask_keycode = 0x001F00;
ir->mask_keyup = 0x004000;
ir->polling = 50; /* ms */
break;
case BTTV_BOARD_KOZUMI_KTV_01C:
- ir_codes = &ir_codes_pctv_sedna_table;
+ ir_codes = RC_MAP_PCTV_SEDNA;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x006000;
ir->polling = 50; /* ms */
break;
case BTTV_BOARD_ENLTV_FM_2:
- ir_codes = &ir_codes_encore_enltv2_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV2;
ir->mask_keycode = 0x00fd00;
ir->mask_keyup = 0x000080;
ir->polling = 1; /* ms */
@@ -390,7 +392,7 @@ int bttv_input_init(struct bttv *btv)
bttv_ir_start(btv, ir);
/* all done */
- err = ir_input_register(btv->remote->dev, ir_codes, NULL);
+ err = ir_input_register(btv->remote->dev, ir_codes, NULL, MODULE_NAME);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 9e39bc5f7b00..3c9e754d73a0 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -80,8 +80,8 @@ OTHER DEALINGS IN THE SOFTWARE.
#include "bw-qcam.h"
-static unsigned int maxpoll=250; /* Maximum busy-loop count for qcam I/O */
-static unsigned int yieldlines=4; /* Yield after this many during capture */
+static unsigned int maxpoll = 250; /* Maximum busy-loop count for qcam I/O */
+static unsigned int yieldlines = 4; /* Yield after this many during capture */
static int video_nr = -1;
static unsigned int force_init; /* Whether to probe aggressively */
@@ -156,7 +156,7 @@ static int qc_calibrate(struct qcam_device *q)
mdelay(1);
schedule();
count++;
- } while (value == 0xff && count<2048);
+ } while (value == 0xff && count < 2048);
q->whitebal = value;
return value;
@@ -170,16 +170,15 @@ static struct qcam_device *qcam_init(struct parport *port)
struct qcam_device *q;
q = kmalloc(sizeof(struct qcam_device), GFP_KERNEL);
- if(q==NULL)
+ if (q == NULL)
return NULL;
q->pport = port;
q->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
- NULL, 0, NULL);
- if (q->pdev == NULL)
- {
+ NULL, 0, NULL);
+ if (q->pdev == NULL) {
printk(KERN_ERR "bw-qcam: couldn't register for %s.\n",
- port->name);
+ port->name);
kfree(q);
return NULL;
}
@@ -247,12 +246,10 @@ static int qc_readparam(struct qcam_device *q)
static int qc_waithand(struct qcam_device *q, int val)
{
int status;
- int runs=0;
+ int runs = 0;
- if (val)
- {
- while (!((status = read_lpstatus(q)) & 8))
- {
+ if (val) {
+ while (!((status = read_lpstatus(q)) & 8)) {
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
until the camera wakes up. However, we are
@@ -260,18 +257,13 @@ static int qc_waithand(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs>(maxpoll+1000)) /* 5 seconds */
+ if (runs > (maxpoll + 1000)) /* 5 seconds */
return -1;
}
- }
- else
- {
- while (((status = read_lpstatus(q)) & 8))
- {
+ } else {
+ while (((status = read_lpstatus(q)) & 8)) {
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
until the camera wakes up. However, we are
@@ -279,11 +271,9 @@ static int qc_waithand(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs++>(maxpoll+1000)) /* 5 seconds */
+ if (runs++ > (maxpoll + 1000)) /* 5 seconds */
return -1;
}
}
@@ -299,10 +289,9 @@ static int qc_waithand(struct qcam_device *q, int val)
static unsigned int qc_waithand2(struct qcam_device *q, int val)
{
unsigned int status;
- int runs=0;
+ int runs = 0;
- do
- {
+ do {
status = read_lpdata(q);
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
@@ -311,14 +300,11 @@ static unsigned int qc_waithand2(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs++>(maxpoll+1000)) /* 5 seconds */
+ if (runs++ > (maxpoll + 1000)) /* 5 seconds */
return 0;
- }
- while ((status & 1) != val);
+ } while ((status & 1) != val);
return status;
}
@@ -342,8 +328,7 @@ static int qc_detect(struct qcam_device *q)
lastreg = reg = read_lpstatus(q) & 0xf0;
- for (i = 0; i < 500; i++)
- {
+ for (i = 0; i < 500; i++) {
reg = read_lpstatus(q) & 0xf0;
if (reg != lastreg)
count++;
@@ -357,7 +342,7 @@ static int qc_detect(struct qcam_device *q)
won't be flashing these bits. Possibly unloading the module
in the middle of a grab? Or some timeout condition?
I've seen this parameter as low as 19 on my 450Mhz box - mpc */
- printk("Debugging: QCam detection counter <30-200 counts as detected>: %d\n", count);
+ printk(KERN_DEBUG "Debugging: QCam detection counter <30-200 counts as detected>: %d\n", count);
return 1;
#endif
@@ -367,7 +352,7 @@ static int qc_detect(struct qcam_device *q)
return 1; /* found */
} else {
printk(KERN_ERR "No Quickcam found on port %s\n",
- q->pport->name);
+ q->pport->name);
printk(KERN_DEBUG "Quickcam detection counter: %u\n", count);
return 0; /* not found */
}
@@ -381,26 +366,24 @@ static int qc_detect(struct qcam_device *q)
static void qc_reset(struct qcam_device *q)
{
- switch (q->port_mode & QC_FORCE_MASK)
- {
- case QC_FORCE_UNIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- break;
+ switch (q->port_mode & QC_FORCE_MASK) {
+ case QC_FORCE_UNIDIR:
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
+ break;
- case QC_FORCE_BIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- break;
+ case QC_FORCE_BIDIR:
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
+ break;
- case QC_ANY:
- write_lpcontrol(q, 0x20);
- write_lpdata(q, 0x75);
+ case QC_ANY:
+ write_lpcontrol(q, 0x20);
+ write_lpdata(q, 0x75);
- if (read_lpdata(q) != 0x75) {
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- } else {
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- }
- break;
+ if (read_lpdata(q) != 0x75)
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
+ else
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
+ break;
}
write_lpcontrol(q, 0xb);
@@ -423,36 +406,33 @@ static int qc_setscanmode(struct qcam_device *q)
{
int old_mode = q->mode;
- switch (q->transfer_scale)
- {
- case 1:
- q->mode = 0;
- break;
- case 2:
- q->mode = 4;
- break;
- case 4:
- q->mode = 8;
- break;
+ switch (q->transfer_scale) {
+ case 1:
+ q->mode = 0;
+ break;
+ case 2:
+ q->mode = 4;
+ break;
+ case 4:
+ q->mode = 8;
+ break;
}
- switch (q->bpp)
- {
- case 4:
- break;
- case 6:
- q->mode += 2;
- break;
+ switch (q->bpp) {
+ case 4:
+ break;
+ case 6:
+ q->mode += 2;
+ break;
}
- switch (q->port_mode & QC_MODE_MASK)
- {
- case QC_BIDIR:
- q->mode += 1;
- break;
- case QC_NOTSET:
- case QC_UNIDIR:
- break;
+ switch (q->port_mode & QC_MODE_MASK) {
+ case QC_BIDIR:
+ q->mode += 1;
+ break;
+ case QC_NOTSET:
+ case QC_UNIDIR:
+ break;
}
if (q->mode != old_mode)
@@ -493,7 +473,7 @@ static void qc_set(struct qcam_device *q)
} else {
val = q->width * q->bpp;
val2 = (((q->port_mode & QC_MODE_MASK) == QC_BIDIR) ? 24 : 8) *
- q->transfer_scale;
+ q->transfer_scale;
}
val = DIV_ROUND_UP(val, val2);
qc_command(q, 0x13);
@@ -521,85 +501,80 @@ static void qc_set(struct qcam_device *q)
static inline int qc_readbytes(struct qcam_device *q, char buffer[])
{
- int ret=1;
+ int ret = 1;
unsigned int hi, lo;
unsigned int hi2, lo2;
static int state;
- if (buffer == NULL)
- {
+ if (buffer == NULL) {
state = 0;
return 0;
}
- switch (q->port_mode & QC_MODE_MASK)
- {
- case QC_BIDIR: /* Bi-directional Port */
- write_lpcontrol(q, 0x26);
- lo = (qc_waithand2(q, 1) >> 1);
- hi = (read_lpstatus(q) >> 3) & 0x1f;
- write_lpcontrol(q, 0x2e);
- lo2 = (qc_waithand2(q, 0) >> 1);
- hi2 = (read_lpstatus(q) >> 3) & 0x1f;
- switch (q->bpp)
- {
- case 4:
- buffer[0] = lo & 0xf;
- buffer[1] = ((lo & 0x70) >> 4) | ((hi & 1) << 3);
- buffer[2] = (hi & 0x1e) >> 1;
- buffer[3] = lo2 & 0xf;
- buffer[4] = ((lo2 & 0x70) >> 4) | ((hi2 & 1) << 3);
- buffer[5] = (hi2 & 0x1e) >> 1;
- ret = 6;
- break;
- case 6:
- buffer[0] = lo & 0x3f;
- buffer[1] = ((lo & 0x40) >> 6) | (hi << 1);
- buffer[2] = lo2 & 0x3f;
- buffer[3] = ((lo2 & 0x40) >> 6) | (hi2 << 1);
- ret = 4;
- break;
- }
+ switch (q->port_mode & QC_MODE_MASK) {
+ case QC_BIDIR: /* Bi-directional Port */
+ write_lpcontrol(q, 0x26);
+ lo = (qc_waithand2(q, 1) >> 1);
+ hi = (read_lpstatus(q) >> 3) & 0x1f;
+ write_lpcontrol(q, 0x2e);
+ lo2 = (qc_waithand2(q, 0) >> 1);
+ hi2 = (read_lpstatus(q) >> 3) & 0x1f;
+ switch (q->bpp) {
+ case 4:
+ buffer[0] = lo & 0xf;
+ buffer[1] = ((lo & 0x70) >> 4) | ((hi & 1) << 3);
+ buffer[2] = (hi & 0x1e) >> 1;
+ buffer[3] = lo2 & 0xf;
+ buffer[4] = ((lo2 & 0x70) >> 4) | ((hi2 & 1) << 3);
+ buffer[5] = (hi2 & 0x1e) >> 1;
+ ret = 6;
+ break;
+ case 6:
+ buffer[0] = lo & 0x3f;
+ buffer[1] = ((lo & 0x40) >> 6) | (hi << 1);
+ buffer[2] = lo2 & 0x3f;
+ buffer[3] = ((lo2 & 0x40) >> 6) | (hi2 << 1);
+ ret = 4;
break;
+ }
+ break;
+
+ case QC_UNIDIR: /* Unidirectional Port */
+ write_lpcontrol(q, 6);
+ lo = (qc_waithand(q, 1) & 0xf0) >> 4;
+ write_lpcontrol(q, 0xe);
+ hi = (qc_waithand(q, 0) & 0xf0) >> 4;
- case QC_UNIDIR: /* Unidirectional Port */
- write_lpcontrol(q, 6);
- lo = (qc_waithand(q, 1) & 0xf0) >> 4;
- write_lpcontrol(q, 0xe);
- hi = (qc_waithand(q, 0) & 0xf0) >> 4;
-
- switch (q->bpp)
- {
- case 4:
- buffer[0] = lo;
- buffer[1] = hi;
- ret = 2;
- break;
- case 6:
- switch (state)
- {
- case 0:
- buffer[0] = (lo << 2) | ((hi & 0xc) >> 2);
- q->saved_bits = (hi & 3) << 4;
- state = 1;
- ret = 1;
- break;
- case 1:
- buffer[0] = lo | q->saved_bits;
- q->saved_bits = hi << 2;
- state = 2;
- ret = 1;
- break;
- case 2:
- buffer[0] = ((lo & 0xc) >> 2) | q->saved_bits;
- buffer[1] = ((lo & 3) << 4) | hi;
- state = 0;
- ret = 2;
- break;
- }
- break;
+ switch (q->bpp) {
+ case 4:
+ buffer[0] = lo;
+ buffer[1] = hi;
+ ret = 2;
+ break;
+ case 6:
+ switch (state) {
+ case 0:
+ buffer[0] = (lo << 2) | ((hi & 0xc) >> 2);
+ q->saved_bits = (hi & 3) << 4;
+ state = 1;
+ ret = 1;
+ break;
+ case 1:
+ buffer[0] = lo | q->saved_bits;
+ q->saved_bits = hi << 2;
+ state = 2;
+ ret = 1;
+ break;
+ case 2:
+ buffer[0] = ((lo & 0xc) >> 2) | q->saved_bits;
+ buffer[1] = ((lo & 3) << 4) | hi;
+ state = 0;
+ ret = 2;
+ break;
}
break;
+ }
+ break;
}
return ret;
}
@@ -615,7 +590,7 @@ static inline int qc_readbytes(struct qcam_device *q, char buffer[])
* n=2^(bit depth)-1. Ask me for more details if you don't understand
* this. */
-static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long len)
+static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long len)
{
int i, j, k, yield;
int bytes;
@@ -623,9 +598,9 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
int divisor;
int pixels_per_line;
int pixels_read = 0;
- int got=0;
+ int got = 0;
char buffer[6];
- int shift=8-q->bpp;
+ int shift = 8 - q->bpp;
char invert;
if (q->mode == -1)
@@ -634,13 +609,12 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
qc_command(q, 0x7);
qc_command(q, q->mode);
- if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR)
- {
+ if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR) {
write_lpcontrol(q, 0x2e); /* turn port around */
write_lpcontrol(q, 0x26);
- (void) qc_waithand(q, 1);
+ qc_waithand(q, 1);
write_lpcontrol(q, 0x2e);
- (void) qc_waithand(q, 0);
+ qc_waithand(q, 0);
}
/* strange -- should be 15:63 below, but 4bpp is odd */
@@ -650,33 +624,28 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
pixels_per_line = q->width / q->transfer_scale;
transperline = q->width * q->bpp;
divisor = (((q->port_mode & QC_MODE_MASK) == QC_BIDIR) ? 24 : 8) *
- q->transfer_scale;
+ q->transfer_scale;
transperline = DIV_ROUND_UP(transperline, divisor);
- for (i = 0, yield = yieldlines; i < linestotrans; i++)
- {
- for (pixels_read = j = 0; j < transperline; j++)
- {
+ for (i = 0, yield = yieldlines; i < linestotrans; i++) {
+ for (pixels_read = j = 0; j < transperline; j++) {
bytes = qc_readbytes(q, buffer);
- for (k = 0; k < bytes && (pixels_read + k) < pixels_per_line; k++)
- {
+ for (k = 0; k < bytes && (pixels_read + k) < pixels_per_line; k++) {
int o;
- if (buffer[k] == 0 && invert == 16)
- {
+ if (buffer[k] == 0 && invert == 16) {
/* 4bpp is odd (again) -- inverter is 16, not 15, but output
must be 0-15 -- bls */
buffer[k] = 16;
}
- o=i*pixels_per_line + pixels_read + k;
- if(o<len)
- {
+ o = i * pixels_per_line + pixels_read + k;
+ if (o < len) {
got++;
- put_user((invert - buffer[k])<<shift, buf+o);
+ put_user((invert - buffer[k]) << shift, buf + o);
}
}
pixels_read += bytes;
}
- (void) qc_readbytes(q, NULL); /* reset state machine */
+ qc_readbytes(q, NULL); /* reset state machine */
/* Grabbing an entire frame from the quickcam is a lengthy
process. We don't (usually) want to busy-block the
@@ -690,14 +659,13 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
}
}
- if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR)
- {
+ if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR) {
write_lpcontrol(q, 2);
write_lpcontrol(q, 6);
udelay(3);
write_lpcontrol(q, 0xe);
}
- if(got<len)
+ if (got < len)
return got;
return len;
}
@@ -709,11 +677,10 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)dev;
+ struct qcam_device *qcam = (struct qcam_device *)dev;
- switch(cmd)
- {
- case VIDIOCGCAP:
+ switch (cmd) {
+ case VIDIOCGCAP:
{
struct video_capability *b = arg;
strcpy(b->name, "Quickcam");
@@ -726,73 +693,73 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
b->minheight = 60;
return 0;
}
- case VIDIOCGCHAN:
+ case VIDIOCGCHAN:
{
struct video_channel *v = arg;
- if(v->channel!=0)
+ if (v->channel != 0)
return -EINVAL;
- v->flags=0;
- v->tuners=0;
+ v->flags = 0;
+ v->tuners = 0;
/* Good question.. its composite or SVHS so.. */
v->type = VIDEO_TYPE_CAMERA;
strcpy(v->name, "Camera");
return 0;
}
- case VIDIOCSCHAN:
+ case VIDIOCSCHAN:
{
struct video_channel *v = arg;
- if(v->channel!=0)
+ if (v->channel != 0)
return -EINVAL;
return 0;
}
- case VIDIOCGTUNER:
+ case VIDIOCGTUNER:
{
struct video_tuner *v = arg;
- if(v->tuner)
+ if (v->tuner)
return -EINVAL;
strcpy(v->name, "Format");
- v->rangelow=0;
- v->rangehigh=0;
- v->flags= 0;
+ v->rangelow = 0;
+ v->rangehigh = 0;
+ v->flags = 0;
v->mode = VIDEO_MODE_AUTO;
return 0;
}
- case VIDIOCSTUNER:
+ case VIDIOCSTUNER:
{
struct video_tuner *v = arg;
- if(v->tuner)
+ if (v->tuner)
return -EINVAL;
- if(v->mode!=VIDEO_MODE_AUTO)
+ if (v->mode != VIDEO_MODE_AUTO)
return -EINVAL;
return 0;
}
- case VIDIOCGPICT:
+ case VIDIOCGPICT:
{
struct video_picture *p = arg;
- p->colour=0x8000;
- p->hue=0x8000;
- p->brightness=qcam->brightness<<8;
- p->contrast=qcam->contrast<<8;
- p->whiteness=qcam->whitebal<<8;
- p->depth=qcam->bpp;
- p->palette=VIDEO_PALETTE_GREY;
+ p->colour = 0x8000;
+ p->hue = 0x8000;
+ p->brightness = qcam->brightness << 8;
+ p->contrast = qcam->contrast << 8;
+ p->whiteness = qcam->whitebal << 8;
+ p->depth = qcam->bpp;
+ p->palette = VIDEO_PALETTE_GREY;
return 0;
}
- case VIDIOCSPICT:
+ case VIDIOCSPICT:
{
struct video_picture *p = arg;
- if(p->palette!=VIDEO_PALETTE_GREY)
+ if (p->palette != VIDEO_PALETTE_GREY)
return -EINVAL;
- if(p->depth!=4 && p->depth!=6)
+ if (p->depth != 4 && p->depth != 6)
return -EINVAL;
/*
* Now load the camera.
*/
- qcam->brightness = p->brightness>>8;
- qcam->contrast = p->contrast>>8;
- qcam->whitebal = p->whiteness>>8;
+ qcam->brightness = p->brightness >> 8;
+ qcam->contrast = p->contrast >> 8;
+ qcam->whitebal = p->whiteness >> 8;
qcam->bpp = p->depth;
mutex_lock(&qcam->lock);
@@ -802,28 +769,25 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return 0;
}
- case VIDIOCSWIN:
+ case VIDIOCSWIN:
{
struct video_window *vw = arg;
- if(vw->flags)
+ if (vw->flags)
return -EINVAL;
- if(vw->clipcount)
+ if (vw->clipcount)
return -EINVAL;
- if(vw->height<60||vw->height>240)
+ if (vw->height < 60 || vw->height > 240)
return -EINVAL;
- if(vw->width<80||vw->width>320)
+ if (vw->width < 80 || vw->width > 320)
return -EINVAL;
qcam->width = 320;
qcam->height = 240;
qcam->transfer_scale = 4;
- if(vw->width>=160 && vw->height>=120)
- {
+ if (vw->width >= 160 && vw->height >= 120)
qcam->transfer_scale = 2;
- }
- if(vw->width>=320 && vw->height>=240)
- {
+ if (vw->width >= 320 && vw->height >= 240) {
qcam->width = 320;
qcam->height = 240;
qcam->transfer_scale = 1;
@@ -839,41 +803,42 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Ok we figured out what to use from our wide choice */
return 0;
}
- case VIDIOCGWIN:
+ case VIDIOCGWIN:
{
struct video_window *vw = arg;
+
memset(vw, 0, sizeof(*vw));
- vw->width=qcam->width/qcam->transfer_scale;
- vw->height=qcam->height/qcam->transfer_scale;
+ vw->width = qcam->width / qcam->transfer_scale;
+ vw->height = qcam->height / qcam->transfer_scale;
return 0;
}
- case VIDIOCKEY:
- return 0;
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- return -EINVAL;
- default:
- return -ENOIOCTLCMD;
+ case VIDIOCKEY:
+ return 0;
+ case VIDIOCCAPTURE:
+ case VIDIOCGFBUF:
+ case VIDIOCSFBUF:
+ case VIDIOCGFREQ:
+ case VIDIOCSFREQ:
+ case VIDIOCGAUDIO:
+ case VIDIOCSAUDIO:
+ return -EINVAL;
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
static long qcam_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
return video_usercopy(file, cmd, arg, qcam_do_ioctl);
}
static ssize_t qcam_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct video_device *v = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)v;
+ struct qcam_device *qcam = (struct qcam_device *)v;
int len;
parport_claim_or_block(qcam->pdev);
@@ -885,7 +850,7 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
if (qcam->status & QC_PARAM_CHANGE)
qc_set(qcam);
- len=qc_capture(qcam, buf,count);
+ len = qc_capture(qcam, buf, count);
mutex_unlock(&qcam->lock);
@@ -917,8 +882,7 @@ static const struct v4l2_file_operations qcam_fops = {
.ioctl = qcam_ioctl,
.read = qcam_read,
};
-static struct video_device qcam_template=
-{
+static struct video_device qcam_template = {
.name = "Connectix Quickcam",
.fops = &qcam_fops,
.release = video_device_release_empty,
@@ -932,22 +896,20 @@ static int init_bwqcam(struct parport *port)
{
struct qcam_device *qcam;
- if (num_cams == MAX_CAMS)
- {
+ if (num_cams == MAX_CAMS) {
printk(KERN_ERR "Too many Quickcams (max %d)\n", MAX_CAMS);
return -ENOSPC;
}
- qcam=qcam_init(port);
- if(qcam==NULL)
+ qcam = qcam_init(port);
+ if (qcam == NULL)
return -ENODEV;
parport_claim_or_block(qcam->pdev);
qc_reset(qcam);
- if(qc_detect(qcam)==0)
- {
+ if (qc_detect(qcam) == 0) {
parport_release(qcam->pdev);
parport_unregister_device(qcam->pdev);
kfree(qcam);
@@ -1045,12 +1007,12 @@ static int __init init_bw_qcams(void)
#ifdef MODULE
/* Do some sanity checks on the module parameters. */
if (maxpoll > 5000) {
- printk("Connectix Quickcam max-poll was above 5000. Using 5000.\n");
+ printk(KERN_INFO "Connectix Quickcam max-poll was above 5000. Using 5000.\n");
maxpoll = 5000;
}
if (yieldlines < 1) {
- printk("Connectix Quickcam yieldlines was less than 1. Using 1.\n");
+ printk(KERN_INFO "Connectix Quickcam yieldlines was less than 1. Using 1.\n");
yieldlines = 1;
}
#endif
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index e2cbebab959b..8f1dd88b32a6 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -79,17 +79,17 @@ static inline void qcam_set_ack(struct qcam_device *qcam, unsigned int i)
{
/* note: the QC specs refer to the PCAck pin by voltage, not
software level. PC ports have builtin inverters. */
- parport_frob_control(qcam->pport, 8, i?8:0);
+ parport_frob_control(qcam->pport, 8, i ? 8 : 0);
}
static inline unsigned int qcam_ready1(struct qcam_device *qcam)
{
- return (parport_read_status(qcam->pport) & 0x8)?1:0;
+ return (parport_read_status(qcam->pport) & 0x8) ? 1 : 0;
}
static inline unsigned int qcam_ready2(struct qcam_device *qcam)
{
- return (parport_read_data(qcam->pport) & 0x1)?1:0;
+ return (parport_read_data(qcam->pport) & 0x1) ? 1 : 0;
}
static unsigned int qcam_await_ready1(struct qcam_device *qcam,
@@ -99,14 +99,13 @@ static unsigned int qcam_await_ready1(struct qcam_device *qcam,
unsigned int i;
for (oldjiffies = jiffies;
- time_before(jiffies, oldjiffies + msecs_to_jiffies(40)); )
+ time_before(jiffies, oldjiffies + msecs_to_jiffies(40));)
if (qcam_ready1(qcam) == value)
return 0;
/* If the camera didn't respond within 1/25 second, poll slowly
for a while. */
- for (i = 0; i < 50; i++)
- {
+ for (i = 0; i < 50; i++) {
if (qcam_ready1(qcam) == value)
return 0;
msleep_interruptible(100);
@@ -125,14 +124,13 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
unsigned int i;
for (oldjiffies = jiffies;
- time_before(jiffies, oldjiffies + msecs_to_jiffies(40)); )
+ time_before(jiffies, oldjiffies + msecs_to_jiffies(40));)
if (qcam_ready2(qcam) == value)
return 0;
/* If the camera didn't respond within 1/25 second, poll slowly
for a while. */
- for (i = 0; i < 50; i++)
- {
+ for (i = 0; i < 50; i++) {
if (qcam_ready2(qcam) == value)
return 0;
msleep_interruptible(100);
@@ -149,22 +147,25 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
static int qcam_read_data(struct qcam_device *qcam)
{
unsigned int idata;
+
qcam_set_ack(qcam, 0);
- if (qcam_await_ready1(qcam, 1)) return -1;
+ if (qcam_await_ready1(qcam, 1))
+ return -1;
idata = parport_read_status(qcam->pport) & 0xf0;
qcam_set_ack(qcam, 1);
- if (qcam_await_ready1(qcam, 0)) return -1;
- idata |= (parport_read_status(qcam->pport) >> 4);
+ if (qcam_await_ready1(qcam, 0))
+ return -1;
+ idata |= parport_read_status(qcam->pport) >> 4;
return idata;
}
static int qcam_write_data(struct qcam_device *qcam, unsigned int data)
{
unsigned int idata;
+
parport_write_data(qcam->pport, data);
idata = qcam_read_data(qcam);
- if (data != idata)
- {
+ if (data != idata) {
printk(KERN_WARNING "cqcam: sent %x but received %x\n", data,
idata);
return 1;
@@ -212,13 +213,12 @@ static int qc_detect(struct qcam_device *qcam)
/* look for a heartbeat */
ostat = stat = parport_read_status(qcam->pport);
- for (i=0; i<250; i++)
- {
+ for (i = 0; i < 250; i++) {
mdelay(1);
stat = parport_read_status(qcam->pport);
- if (ostat != stat)
- {
- if (++count >= 3) return 1;
+ if (ostat != stat) {
+ if (++count >= 3)
+ return 1;
ostat = stat;
}
}
@@ -232,13 +232,12 @@ static int qc_detect(struct qcam_device *qcam)
count = 0;
ostat = stat = parport_read_status(qcam->pport);
- for (i=0; i<250; i++)
- {
+ for (i = 0; i < 250; i++) {
mdelay(1);
stat = parport_read_status(qcam->pport);
- if (ostat != stat)
- {
- if (++count >= 3) return 1;
+ if (ostat != stat) {
+ if (++count >= 3)
+ return 1;
ostat = stat;
}
}
@@ -263,7 +262,7 @@ static void qc_setup(struct qcam_device *q)
{
qc_reset(q);
- /* Set the brightness. */
+ /* Set the brightness. */
qcam_set(q, 11, q->brightness);
/* Set the height and width. These refer to the actual
@@ -292,25 +291,25 @@ static unsigned int qcam_read_bytes(struct qcam_device *q, unsigned char *buf, u
unsigned int bytes = 0;
qcam_set_ack(q, 0);
- if (q->bidirectional)
- {
+ if (q->bidirectional) {
/* It's a bidirectional port */
- while (bytes < nbytes)
- {
+ while (bytes < nbytes) {
unsigned int lo1, hi1, lo2, hi2;
unsigned char r, g, b;
- if (qcam_await_ready2(q, 1)) return bytes;
+ if (qcam_await_ready2(q, 1))
+ return bytes;
lo1 = parport_read_data(q->pport) >> 1;
hi1 = ((parport_read_status(q->pport) >> 3) & 0x1f) ^ 0x10;
qcam_set_ack(q, 1);
- if (qcam_await_ready2(q, 0)) return bytes;
+ if (qcam_await_ready2(q, 0))
+ return bytes;
lo2 = parport_read_data(q->pport) >> 1;
hi2 = ((parport_read_status(q->pport) >> 3) & 0x1f) ^ 0x10;
qcam_set_ack(q, 0);
- r = (lo1 | ((hi1 & 1)<<7));
- g = ((hi1 & 0x1e)<<3) | ((hi2 & 0x1e)>>1);
- b = (lo2 | ((hi2 & 1)<<7));
+ r = lo1 | ((hi1 & 1) << 7);
+ g = ((hi1 & 0x1e) << 3) | ((hi2 & 0x1e) >> 1);
+ b = lo2 | ((hi2 & 1) << 7);
if (force_rgb) {
buf[bytes++] = r;
buf[bytes++] = g;
@@ -321,21 +320,20 @@ static unsigned int qcam_read_bytes(struct qcam_device *q, unsigned char *buf, u
buf[bytes++] = r;
}
}
- }
- else
- {
+ } else {
/* It's a unidirectional port */
int i = 0, n = bytes;
unsigned char rgb[3];
- while (bytes < nbytes)
- {
+ while (bytes < nbytes) {
unsigned int hi, lo;
- if (qcam_await_ready1(q, 1)) return bytes;
+ if (qcam_await_ready1(q, 1))
+ return bytes;
hi = (parport_read_status(q->pport) & 0xf0);
qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0)) return bytes;
+ if (qcam_await_ready1(q, 0))
+ return bytes;
lo = (parport_read_status(q->pport) & 0xf0);
qcam_set_ack(q, 0);
/* flip some bits */
@@ -374,28 +372,26 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
return -EFAULT;
/* Wait for camera to become ready */
- for (;;)
- {
+ for (;;) {
int i = qcam_get(q, 41);
+
if (i == -1) {
qc_setup(q);
return -EIO;
}
if ((i & 0x80) == 0)
break;
- else
- schedule();
+ schedule();
}
- if (qcam_set(q, 7, (q->mode | (is_bi_dir?1:0)) + 1))
+ if (qcam_set(q, 7, (q->mode | (is_bi_dir ? 1 : 0)) + 1))
return -EIO;
lines = q->height;
pixelsperline = q->width;
bitsperxfer = (is_bi_dir) ? 24 : 8;
- if (is_bi_dir)
- {
+ if (is_bi_dir) {
/* Turn the port around */
parport_data_reverse(q->pport);
mdelay(3);
@@ -413,16 +409,17 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
wantlen = lines * pixelsperline * 24 / 8;
- while (wantlen)
- {
+ while (wantlen) {
size_t t, s;
- s = (wantlen > BUFSZ)?BUFSZ:wantlen;
+
+ s = (wantlen > BUFSZ) ? BUFSZ : wantlen;
t = qcam_read_bytes(q, tmpbuf, s);
- if (outptr < len)
- {
+ if (outptr < len) {
size_t sz = len - outptr;
- if (sz > t) sz = t;
- if (__copy_to_user(buf+outptr, tmpbuf, sz))
+
+ if (sz > t)
+ sz = t;
+ if (__copy_to_user(buf + outptr, tmpbuf, sz))
break;
outptr += sz;
}
@@ -434,33 +431,31 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
len = outptr;
- if (wantlen)
- {
- printk("qcam: short read.\n");
+ if (wantlen) {
+ printk(KERN_ERR "qcam: short read.\n");
if (is_bi_dir)
parport_data_forward(q->pport);
qc_setup(q);
return len;
}
- if (is_bi_dir)
- {
+ if (is_bi_dir) {
int l;
+
do {
l = qcam_read_bytes(q, tmpbuf, 3);
cond_resched();
} while (l && (tmpbuf[0] == 0x7e || tmpbuf[1] == 0x7e || tmpbuf[2] == 0x7e));
if (force_rgb) {
if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
} else {
if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
}
qcam_set_ack(q, 0);
- if (qcam_await_ready1(q, 1))
- {
- printk("qcam: no ack after EOF\n");
+ if (qcam_await_ready1(q, 1)) {
+ printk(KERN_ERR "qcam: no ack after EOF\n");
parport_data_forward(q->pport);
qc_setup(q);
return len;
@@ -468,27 +463,25 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
parport_data_forward(q->pport);
mdelay(3);
qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0))
- {
- printk("qcam: no ack to port turnaround\n");
+ if (qcam_await_ready1(q, 0)) {
+ printk(KERN_ERR "qcam: no ack to port turnaround\n");
qc_setup(q);
return len;
}
- }
- else
- {
+ } else {
int l;
+
do {
l = qcam_read_bytes(q, tmpbuf, 1);
cond_resched();
} while (l && tmpbuf[0] == 0x7e);
- l = qcam_read_bytes(q, tmpbuf+1, 2);
+ l = qcam_read_bytes(q, tmpbuf + 1, 2);
if (force_rgb) {
if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
} else {
if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
}
}
@@ -503,164 +496,166 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)dev;
+ struct qcam_device *qcam = (struct qcam_device *)dev;
- switch(cmd)
+ switch (cmd) {
+ case VIDIOCGCAP:
{
- case VIDIOCGCAP:
- {
- struct video_capability *b = arg;
- strcpy(b->name, "Quickcam");
- b->type = VID_TYPE_CAPTURE|VID_TYPE_SCALES;
- b->channels = 1;
- b->audios = 0;
- b->maxwidth = 320;
- b->maxheight = 240;
- b->minwidth = 80;
- b->minheight = 60;
- return 0;
- }
- case VIDIOCGCHAN:
- {
- struct video_channel *v = arg;
- if(v->channel!=0)
- return -EINVAL;
- v->flags=0;
- v->tuners=0;
- /* Good question.. its composite or SVHS so.. */
- v->type = VIDEO_TYPE_CAMERA;
- strcpy(v->name, "Camera");
- return 0;
- }
- case VIDIOCSCHAN:
- {
- struct video_channel *v = arg;
- if(v->channel!=0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGTUNER:
- {
- struct video_tuner *v = arg;
- if(v->tuner)
- return -EINVAL;
- memset(v,0,sizeof(*v));
- strcpy(v->name, "Format");
- v->mode = VIDEO_MODE_AUTO;
- return 0;
- }
- case VIDIOCSTUNER:
- {
- struct video_tuner *v = arg;
- if(v->tuner)
- return -EINVAL;
- if(v->mode!=VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGPICT:
- {
- struct video_picture *p = arg;
- p->colour=0x8000;
- p->hue=0x8000;
- p->brightness=qcam->brightness<<8;
- p->contrast=qcam->contrast<<8;
- p->whiteness=qcam->whitebal<<8;
- p->depth=24;
- p->palette=VIDEO_PALETTE_RGB24;
- return 0;
+ struct video_capability *b = arg;
+
+ strcpy(b->name, "Quickcam");
+ b->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
+ b->channels = 1;
+ b->audios = 0;
+ b->maxwidth = 320;
+ b->maxheight = 240;
+ b->minwidth = 80;
+ b->minheight = 60;
+ return 0;
+ }
+ case VIDIOCGCHAN:
+ {
+ struct video_channel *v = arg;
+
+ if (v->channel != 0)
+ return -EINVAL;
+ v->flags = 0;
+ v->tuners = 0;
+ /* Good question.. its composite or SVHS so.. */
+ v->type = VIDEO_TYPE_CAMERA;
+ strcpy(v->name, "Camera");
+ return 0;
+ }
+ case VIDIOCSCHAN:
+ {
+ struct video_channel *v = arg;
+
+ if (v->channel != 0)
+ return -EINVAL;
+ return 0;
+ }
+ case VIDIOCGTUNER:
+ {
+ struct video_tuner *v = arg;
+
+ if (v->tuner)
+ return -EINVAL;
+ memset(v, 0, sizeof(*v));
+ strcpy(v->name, "Format");
+ v->mode = VIDEO_MODE_AUTO;
+ return 0;
+ }
+ case VIDIOCSTUNER:
+ {
+ struct video_tuner *v = arg;
+
+ if (v->tuner)
+ return -EINVAL;
+ if (v->mode != VIDEO_MODE_AUTO)
+ return -EINVAL;
+ return 0;
+ }
+ case VIDIOCGPICT:
+ {
+ struct video_picture *p = arg;
+
+ p->colour = 0x8000;
+ p->hue = 0x8000;
+ p->brightness = qcam->brightness << 8;
+ p->contrast = qcam->contrast << 8;
+ p->whiteness = qcam->whitebal << 8;
+ p->depth = 24;
+ p->palette = VIDEO_PALETTE_RGB24;
+ return 0;
+ }
+ case VIDIOCSPICT:
+ {
+ struct video_picture *p = arg;
+
+ /*
+ * Sanity check args
+ */
+ if (p->depth != 24 || p->palette != VIDEO_PALETTE_RGB24)
+ return -EINVAL;
+
+ /*
+ * Now load the camera.
+ */
+ qcam->brightness = p->brightness >> 8;
+ qcam->contrast = p->contrast >> 8;
+ qcam->whitebal = p->whiteness >> 8;
+
+ mutex_lock(&qcam->lock);
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
+ return 0;
+ }
+ case VIDIOCSWIN:
+ {
+ struct video_window *vw = arg;
+
+ if (vw->flags)
+ return -EINVAL;
+ if (vw->clipcount)
+ return -EINVAL;
+ if (vw->height < 60 || vw->height > 240)
+ return -EINVAL;
+ if (vw->width < 80 || vw->width > 320)
+ return -EINVAL;
+
+ qcam->width = 80;
+ qcam->height = 60;
+ qcam->mode = QC_DECIMATION_4;
+
+ if (vw->width >= 160 && vw->height >= 120) {
+ qcam->width = 160;
+ qcam->height = 120;
+ qcam->mode = QC_DECIMATION_2;
}
- case VIDIOCSPICT:
- {
- struct video_picture *p = arg;
-
- /*
- * Sanity check args
- */
- if (p->depth != 24 || p->palette != VIDEO_PALETTE_RGB24)
- return -EINVAL;
-
- /*
- * Now load the camera.
- */
- qcam->brightness = p->brightness>>8;
- qcam->contrast = p->contrast>>8;
- qcam->whitebal = p->whiteness>>8;
-
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
+ if (vw->width >= 320 && vw->height >= 240) {
+ qcam->width = 320;
+ qcam->height = 240;
+ qcam->mode = QC_DECIMATION_1;
}
- case VIDIOCSWIN:
- {
- struct video_window *vw = arg;
-
- if(vw->flags)
- return -EINVAL;
- if(vw->clipcount)
- return -EINVAL;
- if(vw->height<60||vw->height>240)
- return -EINVAL;
- if(vw->width<80||vw->width>320)
- return -EINVAL;
-
- qcam->width = 80;
- qcam->height = 60;
- qcam->mode = QC_DECIMATION_4;
-
- if(vw->width>=160 && vw->height>=120)
- {
- qcam->width = 160;
- qcam->height = 120;
- qcam->mode = QC_DECIMATION_2;
- }
- if(vw->width>=320 && vw->height>=240)
- {
- qcam->width = 320;
- qcam->height = 240;
- qcam->mode = QC_DECIMATION_1;
- }
- qcam->mode |= QC_MILLIONS;
+ qcam->mode |= QC_MILLIONS;
#if 0
- if(vw->width>=640 && vw->height>=480)
- {
- qcam->width = 640;
- qcam->height = 480;
- qcam->mode = QC_BILLIONS | QC_DECIMATION_1;
- }
-#endif
- /* Ok we figured out what to use from our
- wide choice */
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
- }
- case VIDIOCGWIN:
- {
- struct video_window *vw = arg;
- memset(vw, 0, sizeof(*vw));
- vw->width=qcam->width;
- vw->height=qcam->height;
- return 0;
+ if (vw->width >= 640 && vw->height >= 480) {
+ qcam->width = 640;
+ qcam->height = 480;
+ qcam->mode = QC_BILLIONS | QC_DECIMATION_1;
}
- case VIDIOCKEY:
- return 0;
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- return -EINVAL;
- default:
- return -ENOIOCTLCMD;
+#endif
+ /* Ok we figured out what to use from our
+ wide choice */
+ mutex_lock(&qcam->lock);
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
+ return 0;
+ }
+ case VIDIOCGWIN:
+ {
+ struct video_window *vw = arg;
+ memset(vw, 0, sizeof(*vw));
+ vw->width = qcam->width;
+ vw->height = qcam->height;
+ return 0;
+ }
+ case VIDIOCKEY:
+ return 0;
+ case VIDIOCCAPTURE:
+ case VIDIOCGFBUF:
+ case VIDIOCSFBUF:
+ case VIDIOCGFREQ:
+ case VIDIOCSFREQ:
+ case VIDIOCGAUDIO:
+ case VIDIOCSAUDIO:
+ return -EINVAL;
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
@@ -675,13 +670,13 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct video_device *v = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)v;
+ struct qcam_device *qcam = (struct qcam_device *)v;
int len;
mutex_lock(&qcam->lock);
parport_claim_or_block(qcam->pdev);
/* Probably should have a semaphore against multiple users */
- len = qc_capture(qcam, buf,count);
+ len = qc_capture(qcam, buf, count);
parport_release(qcam->pdev);
mutex_unlock(&qcam->lock);
return len;
@@ -713,8 +708,7 @@ static const struct v4l2_file_operations qcam_fops = {
.read = qcam_read,
};
-static struct video_device qcam_template=
-{
+static struct video_device qcam_template = {
.name = "Colour QuickCam",
.fops = &qcam_fops,
.release = video_device_release_empty,
@@ -727,17 +721,16 @@ static struct qcam_device *qcam_init(struct parport *port)
struct qcam_device *q;
q = kmalloc(sizeof(struct qcam_device), GFP_KERNEL);
- if(q==NULL)
+ if (q == NULL)
return NULL;
q->pport = port;
q->pdev = parport_register_device(port, "c-qcam", NULL, NULL,
NULL, 0, NULL);
- q->bidirectional = (q->pport->modes & PARPORT_MODE_TRISTATE)?1:0;
+ q->bidirectional = (q->pport->modes & PARPORT_MODE_TRISTATE) ? 1 : 0;
- if (q->pdev == NULL)
- {
+ if (q->pdev == NULL) {
printk(KERN_ERR "c-qcam: couldn't register for %s.\n",
port->name);
kfree(q);
@@ -765,12 +758,11 @@ static int init_cqcam(struct parport *port)
{
struct qcam_device *qcam;
- if (parport[0] != -1)
- {
+ if (parport[0] != -1) {
/* The user gave specific instructions */
int i, found = 0;
- for (i = 0; i < MAX_CAMS && parport[i] != -1; i++)
- {
+
+ for (i = 0; i < MAX_CAMS && parport[i] != -1; i++) {
if (parport[0] == port->number)
found = 1;
}
@@ -782,15 +774,14 @@ static int init_cqcam(struct parport *port)
return -ENOSPC;
qcam = qcam_init(port);
- if (qcam==NULL)
+ if (qcam == NULL)
return -ENODEV;
parport_claim_or_block(qcam->pdev);
qc_reset(qcam);
- if (probe && qc_detect(qcam)==0)
- {
+ if (probe && qc_detect(qcam) == 0) {
parport_release(qcam->pdev);
parport_unregister_device(qcam->pdev);
kfree(qcam);
@@ -840,14 +831,14 @@ static struct parport_driver cqcam_driver = {
.detach = cq_detach,
};
-static int __init cqcam_init (void)
+static int __init cqcam_init(void)
{
printk(BANNER "\n");
return parport_register_driver(&cqcam_driver);
}
-static void __exit cqcam_cleanup (void)
+static void __exit cqcam_cleanup(void)
{
unsigned int i;
@@ -862,9 +853,9 @@ MODULE_DESCRIPTION(BANNER);
MODULE_LICENSE("GPL");
/* FIXME: parport=auto would never have worked, surely? --RR */
-MODULE_PARM_DESC(parport ,"parport=<auto|n[,n]...> for port detection method\n\
-probe=<0|1|2> for camera detection method\n\
-force_rgb=<0|1> for RGB data format (default BGR)");
+MODULE_PARM_DESC(parport, "parport=<auto|n[,n]...> for port detection method\n"
+ "probe=<0|1|2> for camera detection method\n"
+ "force_rgb=<0|1> for RGB data format (default BGR)");
module_param_array(parport, int, NULL, 0);
module_param(probe, int, 0);
module_param(force_rgb, bool, 0);
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
index 4392c76af5df..0e5006b14279 100644
--- a/drivers/media/video/cx18/cx18-av-core.c
+++ b/drivers/media/video/cx18/cx18-av-core.c
@@ -579,6 +579,7 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
u8 afe_mux_cfg;
u8 adc2_cfg;
+ u8 input_mode;
u32 afe_cfg;
int i;
@@ -589,6 +590,30 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
vid_input <= CX18_AV_COMPOSITE8) {
afe_mux_cfg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1);
ch[0] = CVBS;
+ input_mode = 0x0;
+ } else if (vid_input >= CX18_AV_COMPONENT_LUMA1) {
+ int luma = vid_input & 0xf000;
+ int r_chroma = vid_input & 0xf0000;
+ int b_chroma = vid_input & 0xf00000;
+
+ if ((vid_input & ~0xfff000) ||
+ luma < CX18_AV_COMPONENT_LUMA1 ||
+ luma > CX18_AV_COMPONENT_LUMA8 ||
+ r_chroma < CX18_AV_COMPONENT_R_CHROMA4 ||
+ r_chroma > CX18_AV_COMPONENT_R_CHROMA6 ||
+ b_chroma < CX18_AV_COMPONENT_B_CHROMA7 ||
+ b_chroma > CX18_AV_COMPONENT_B_CHROMA8) {
+ CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n",
+ vid_input);
+ return -EINVAL;
+ }
+ afe_mux_cfg = (luma - CX18_AV_COMPONENT_LUMA1) >> 12;
+ ch[0] = Y;
+ afe_mux_cfg |= (r_chroma - CX18_AV_COMPONENT_R_CHROMA4) >> 12;
+ ch[1] = Pr;
+ afe_mux_cfg |= (b_chroma - CX18_AV_COMPONENT_B_CHROMA7) >> 14;
+ ch[2] = Pb;
+ input_mode = 0x6;
} else {
int luma = vid_input & 0xf0;
int chroma = vid_input & 0xf00;
@@ -598,7 +623,7 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
luma > CX18_AV_SVIDEO_LUMA8 ||
chroma < CX18_AV_SVIDEO_CHROMA4 ||
chroma > CX18_AV_SVIDEO_CHROMA8) {
- CX18_ERR_DEV(sd, "0x%04x is not a valid video input!\n",
+ CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n",
vid_input);
return -EINVAL;
}
@@ -613,8 +638,8 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4;
ch[1] = C;
}
+ input_mode = 0x2;
}
- /* TODO: LeadTek WinFast DVR3100 H & WinFast PVR2100 can do Y/Pb/Pr */
switch (aud_input) {
case CX18_AV_AUDIO_SERIAL1:
@@ -650,8 +675,8 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
/* Set up analog front end multiplexers */
cx18_av_write_expect(cx, 0x103, afe_mux_cfg, afe_mux_cfg, 0xf7);
- /* Set INPUT_MODE to Composite (0) or S-Video (1) */
- cx18_av_and_or(cx, 0x401, ~0x6, ch[0] == CVBS ? 0 : 0x02);
+ /* Set INPUT_MODE to Composite, S-Video, or Component */
+ cx18_av_and_or(cx, 0x401, ~0x6, input_mode);
/* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
adc2_cfg = cx18_av_read(cx, 0x102);
@@ -998,9 +1023,9 @@ static int cx18_av_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
static int cx18_av_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- struct cx18 *cx = v4l2_get_subdevdata(sd);
-
- return cx18_av_vbi_g_fmt(cx, fmt);
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return cx18_av_g_sliced_fmt(sd, &fmt->fmt.sliced);
}
static int cx18_av_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
@@ -1073,12 +1098,6 @@ static int cx18_av_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
cx18_av_write(cx, 0x41e, 0x8 | filter);
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx18_av_vbi_s_fmt(cx, fmt);
-
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return cx18_av_vbi_s_fmt(cx, fmt);
-
default:
return -EINVAL;
}
@@ -1378,17 +1397,24 @@ static const struct v4l2_subdev_audio_ops cx18_av_audio_ops = {
static const struct v4l2_subdev_video_ops cx18_av_video_ops = {
.s_routing = cx18_av_s_video_routing,
- .decode_vbi_line = cx18_av_decode_vbi_line,
.s_stream = cx18_av_s_stream,
.g_fmt = cx18_av_g_fmt,
.s_fmt = cx18_av_s_fmt,
};
+static const struct v4l2_subdev_vbi_ops cx18_av_vbi_ops = {
+ .decode_vbi_line = cx18_av_decode_vbi_line,
+ .g_sliced_fmt = cx18_av_g_sliced_fmt,
+ .s_sliced_fmt = cx18_av_s_sliced_fmt,
+ .s_raw_fmt = cx18_av_s_raw_fmt,
+};
+
static const struct v4l2_subdev_ops cx18_av_ops = {
.core = &cx18_av_general_ops,
.tuner = &cx18_av_tuner_ops,
.audio = &cx18_av_audio_ops,
.video = &cx18_av_video_ops,
+ .vbi = &cx18_av_vbi_ops,
};
int cx18_av_probe(struct cx18 *cx)
diff --git a/drivers/media/video/cx18/cx18-av-core.h b/drivers/media/video/cx18/cx18-av-core.h
index cafb7e99b9a0..c106967bdcc3 100644
--- a/drivers/media/video/cx18/cx18-av-core.h
+++ b/drivers/media/video/cx18/cx18-av-core.h
@@ -61,6 +61,25 @@ enum cx18_av_video_input {
CX18_AV_SVIDEO2 = 0x620,
CX18_AV_SVIDEO3 = 0x730,
CX18_AV_SVIDEO4 = 0x840,
+
+ /* Component Video inputs consist of one luma input (In1-In8) ORed
+ with a red chroma (In4-In6) and blue chroma input (In7-In8) */
+ CX18_AV_COMPONENT_LUMA1 = 0x1000,
+ CX18_AV_COMPONENT_LUMA2 = 0x2000,
+ CX18_AV_COMPONENT_LUMA3 = 0x3000,
+ CX18_AV_COMPONENT_LUMA4 = 0x4000,
+ CX18_AV_COMPONENT_LUMA5 = 0x5000,
+ CX18_AV_COMPONENT_LUMA6 = 0x6000,
+ CX18_AV_COMPONENT_LUMA7 = 0x7000,
+ CX18_AV_COMPONENT_LUMA8 = 0x8000,
+ CX18_AV_COMPONENT_R_CHROMA4 = 0x40000,
+ CX18_AV_COMPONENT_R_CHROMA5 = 0x50000,
+ CX18_AV_COMPONENT_R_CHROMA6 = 0x60000,
+ CX18_AV_COMPONENT_B_CHROMA7 = 0x700000,
+ CX18_AV_COMPONENT_B_CHROMA8 = 0x800000,
+
+ /* Component Video aliases for common combinations */
+ CX18_AV_COMPONENT1 = 0x861000,
};
enum cx18_av_audio_input {
@@ -359,7 +378,8 @@ void cx18_av_audio_set_path(struct cx18 *cx);
/* cx18_av-vbi.c */
int cx18_av_decode_vbi_line(struct v4l2_subdev *sd,
struct v4l2_decode_vbi_line *vbi);
-int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt);
-int cx18_av_vbi_s_fmt(struct cx18 *cx, struct v4l2_format *fmt);
+int cx18_av_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt);
+int cx18_av_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
+int cx18_av_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
#endif
diff --git a/drivers/media/video/cx18/cx18-av-vbi.c b/drivers/media/video/cx18/cx18-av-vbi.c
index a51732bcca4b..baa36fbcd4d4 100644
--- a/drivers/media/video/cx18/cx18-av-vbi.c
+++ b/drivers/media/video/cx18/cx18-av-vbi.c
@@ -129,10 +129,10 @@ static int decode_vps(u8 *dst, u8 *p)
return err & 0xf0;
}
-int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
+int cx18_av_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
- struct v4l2_sliced_vbi_format *svbi;
static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
@@ -143,9 +143,6 @@ int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
int is_pal = !(state->std & V4L2_STD_525_60);
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
memset(svbi, 0, sizeof(*svbi));
/* we're done if raw VBI is active */
if ((cx18_av_read(cx, 0x404) & 0x10) == 0)
@@ -173,30 +170,27 @@ int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
return 0;
}
-int cx18_av_vbi_s_fmt(struct cx18 *cx, struct v4l2_format *fmt)
+int cx18_av_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
- struct v4l2_sliced_vbi_format *svbi;
- int is_pal = !(state->std & V4L2_STD_525_60);
- int i, x;
- u8 lcr[24];
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE &&
- fmt->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* raw VBI */
- memset(svbi, 0, sizeof(*svbi));
+ /* Setup standard */
+ cx18_av_std_setup(cx);
- /* Setup standard */
- cx18_av_std_setup(cx);
+ /* VBI Offset */
+ cx18_av_write(cx, 0x47f, state->slicer_line_delay);
+ cx18_av_write(cx, 0x404, 0x2e);
+ return 0;
+}
- /* VBI Offset */
- cx18_av_write(cx, 0x47f, state->slicer_line_delay);
- cx18_av_write(cx, 0x404, 0x2e);
- return 0;
- }
+int cx18_av_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
+ struct cx18_av_state *state = &cx->av_state;
+ int is_pal = !(state->std & V4L2_STD_525_60);
+ int i, x;
+ u8 lcr[24];
for (x = 0; x <= 23; x++)
lcr[x] = 0x00;
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index f808fb6fc1c1..74e122b5fc49 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -370,6 +370,7 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
+ { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
@@ -422,6 +423,7 @@ static const struct cx18_card cx18_card_leadtek_dvr3100h = {
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
+ { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
@@ -480,7 +482,7 @@ int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input)
"S-Video 2",
"Composite 1",
"Composite 2",
- "Composite 3"
+ "Component 1"
};
memset(input, 0, sizeof(*input));
diff --git a/drivers/media/video/cx18/cx18-cards.h b/drivers/media/video/cx18/cx18-cards.h
index af3d71607dc9..796e517300ac 100644
--- a/drivers/media/video/cx18/cx18-cards.h
+++ b/drivers/media/video/cx18/cx18-cards.h
@@ -43,7 +43,7 @@
#define CX18_CARD_INPUT_SVIDEO2 3
#define CX18_CARD_INPUT_COMPOSITE1 4
#define CX18_CARD_INPUT_COMPOSITE2 5
-#define CX18_CARD_INPUT_COMPOSITE3 6
+#define CX18_CARD_INPUT_COMPONENT1 6
/* audio inputs */
#define CX18_CARD_INPUT_AUD_TUNER 1
@@ -62,7 +62,7 @@
struct cx18_card_video_input {
u8 video_type; /* video input type */
u8 audio_index; /* index in cx18_card_audio_input array */
- u16 video_input; /* hardware video input */
+ u32 video_input; /* hardware video input */
};
struct cx18_card_audio_input {
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index eecf29af916c..cfa1f289b0f5 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -109,7 +109,7 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
/* Our default information for ir-kbd-i2c.c to use */
switch (hw) {
case CX18_HW_Z8F0811_IR_RX_HAUP:
- init_data->ir_codes = &ir_codes_hauppauge_new_table;
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = IR_TYPE_RC5;
init_data->name = cx->card_name;
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index b81dd0ea8eb9..2e6addab3ed6 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -208,7 +208,7 @@ static int cx18_g_fmt_sliced_vbi_cap(struct file *file, void *fh,
* digitizer/slicer. Note, cx18_av_vbi() wipes the passed in
* fmt->fmt.sliced under valid calling conditions
*/
- if (v4l2_subdev_call(cx->sd_av, video, g_fmt, fmt))
+ if (v4l2_subdev_call(cx->sd_av, vbi, g_sliced_fmt, &fmt->fmt.sliced))
return -EINVAL;
/* Ensure V4L2 spec compliant output */
@@ -322,7 +322,7 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
* Note cx18_av_vbi_wipes out alot of the passed in fmt under valid
* calling conditions
*/
- ret = v4l2_subdev_call(cx->sd_av, video, s_fmt, fmt);
+ ret = v4l2_subdev_call(cx->sd_av, vbi, s_raw_fmt, &fmt->fmt.vbi);
if (ret)
return ret;
@@ -359,7 +359,7 @@ static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
* Note, cx18_av_vbi() wipes some "impossible" service lines in the
* passed in fmt->fmt.sliced under valid calling conditions
*/
- ret = v4l2_subdev_call(cx->sd_av, video, s_fmt, fmt);
+ ret = v4l2_subdev_call(cx->sd_av, vbi, s_sliced_fmt, &fmt->fmt.sliced);
if (ret)
return ret;
/* Store our current v4l2 sliced VBI settings */
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 054450f65a60..f5c91261b2db 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -374,7 +374,10 @@ static void cx18_vbi_setup(struct cx18_stream *s)
}
/* setup VBI registers */
- v4l2_subdev_call(cx->sd_av, video, s_fmt, &cx->vbi.in);
+ if (raw)
+ v4l2_subdev_call(cx->sd_av, vbi, s_raw_fmt, &cx->vbi.in.fmt.vbi);
+ else
+ v4l2_subdev_call(cx->sd_av, vbi, s_sliced_fmt, &cx->vbi.in.fmt.sliced);
/*
* Send the CX18_CPU_SET_RAW_VBI_PARAM API command to setup Encoder Raw
diff --git a/drivers/media/video/cx18/cx18-vbi.c b/drivers/media/video/cx18/cx18-vbi.c
index 574c1c6974f8..582227522cf0 100644
--- a/drivers/media/video/cx18/cx18-vbi.c
+++ b/drivers/media/video/cx18/cx18-vbi.c
@@ -174,7 +174,7 @@ static u32 compress_sliced_buf(struct cx18 *cx, u8 *buf, u32 size,
p[3] != sliced_vbi_eav_rp[1]))
continue;
vbi.p = p + 4;
- v4l2_subdev_call(cx->sd_av, video, decode_vbi_line, &vbi);
+ v4l2_subdev_call(cx->sd_av, vbi, decode_vbi_line, &vbi);
if (vbi.type) {
cx->vbi.sliced_data[line].id = vbi.type;
cx->vbi.sliced_data[line].field = vbi.is_second_field;
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index 7793d60966db..7cae95a2245e 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -495,7 +495,7 @@ static int cx231xx_audio_init(struct cx231xx *dev)
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Conexant cx231xx Capture");
- strcpy(card->driver, "Conexant cx231xx Audio");
+ strcpy(card->driver, "Cx231xx-Audio");
strcpy(card->shortname, "Cx231xx Audio");
strcpy(card->longname, "Conexant cx231xx Audio");
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index b24eee115e7e..f5e1a2315fcc 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -96,10 +96,9 @@ int cx231xx_register_extension(struct cx231xx_ops *ops)
mutex_lock(&cx231xx_devlist_mutex);
mutex_lock(&cx231xx_extension_devlist_lock);
list_add_tail(&ops->next, &cx231xx_extension_devlist);
- list_for_each_entry(dev, &cx231xx_devlist, devlist) {
- if (dev)
- ops->init(dev);
- }
+ list_for_each_entry(dev, &cx231xx_devlist, devlist)
+ ops->init(dev);
+
printk(KERN_INFO DRIVER_NAME ": %s initialized\n", ops->name);
mutex_unlock(&cx231xx_extension_devlist_lock);
mutex_unlock(&cx231xx_devlist_mutex);
@@ -112,10 +111,8 @@ void cx231xx_unregister_extension(struct cx231xx_ops *ops)
struct cx231xx *dev = NULL;
mutex_lock(&cx231xx_devlist_mutex);
- list_for_each_entry(dev, &cx231xx_devlist, devlist) {
- if (dev)
- ops->fini(dev);
- }
+ list_for_each_entry(dev, &cx231xx_devlist, devlist)
+ ops->fini(dev);
mutex_lock(&cx231xx_extension_devlist_lock);
printk(KERN_INFO DRIVER_NAME ": %s removed\n", ops->name);
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index b473cd8367f5..fd099153b746 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -35,6 +35,8 @@ static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
+#define MODULE_NAME "cx231xx"
+
#define i2cdprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
@@ -59,7 +61,6 @@ struct cx231xx_ir_poll_result {
struct cx231xx_IR {
struct cx231xx *dev;
struct input_dev *input;
- struct ir_input_state ir;
char name[32];
char phys[32];
@@ -67,9 +68,7 @@ struct cx231xx_IR {
int polling;
struct work_struct work;
struct timer_list timer;
- unsigned int last_toggle:1;
unsigned int last_readcount;
- unsigned int repeat_interval;
int (*get_key) (struct cx231xx_IR *, struct cx231xx_ir_poll_result *);
};
@@ -81,7 +80,6 @@ struct cx231xx_IR {
static void cx231xx_ir_handle_key(struct cx231xx_IR *ir)
{
int result;
- int do_sendkey = 0;
struct cx231xx_ir_poll_result poll_result;
/* read the registers containing the IR status */
@@ -95,44 +93,23 @@ static void cx231xx_ir_handle_key(struct cx231xx_IR *ir)
poll_result.toggle_bit, poll_result.read_count,
ir->last_readcount, poll_result.rc_data[0]);
- if (ir->dev->chip_id == CHIP_ID_EM2874) {
+ if (poll_result.read_count > 0 &&
+ poll_result.read_count != ir->last_readcount)
+ ir_keydown(ir->input,
+ poll_result.rc_data[0],
+ poll_result.toggle_bit);
+
+ if (ir->dev->chip_id == CHIP_ID_EM2874)
/* The em2874 clears the readcount field every time the
register is read. The em2860/2880 datasheet says that it
is supposed to clear the readcount, but it doesn't. So with
the em2874, we are looking for a non-zero read count as
opposed to a readcount that is incrementing */
ir->last_readcount = 0;
- }
-
- if (poll_result.read_count == 0) {
- /* The button has not been pressed since the last read */
- } else if (ir->last_toggle != poll_result.toggle_bit) {
- /* A button has been pressed */
- dprintk("button has been pressed\n");
- ir->last_toggle = poll_result.toggle_bit;
- ir->repeat_interval = 0;
- do_sendkey = 1;
- } else if (poll_result.toggle_bit == ir->last_toggle &&
- poll_result.read_count > 0 &&
- poll_result.read_count != ir->last_readcount) {
- /* The button is still being held down */
- dprintk("button being held down\n");
-
- /* Debouncer for first keypress */
- if (ir->repeat_interval++ > 9) {
- /* Start repeating after 1 second */
- do_sendkey = 1;
- }
- }
+ else
+ ir->last_readcount = poll_result.read_count;
- if (do_sendkey) {
- dprintk("sending keypress\n");
- ir_input_keydown(ir->input, &ir->ir, poll_result.rc_data[0]);
- ir_input_nokey(ir->input, &ir->ir);
}
-
- ir->last_readcount = poll_result.read_count;
- return;
}
static void ir_timer(unsigned long data)
@@ -198,10 +175,6 @@ int cx231xx_ir_init(struct cx231xx *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
- if (err < 0)
- goto err_out_free;
-
input_dev->name = ir->name;
input_dev->phys = ir->phys;
input_dev->id.bustype = BUS_USB;
@@ -217,7 +190,8 @@ int cx231xx_ir_init(struct cx231xx *dev)
cx231xx_ir_start(ir);
/* all done */
- err = ir_input_register(ir->input, dev->board.ir_codes, NULL);
+ err = __ir_input_register(ir->input, dev->board.ir_codes,
+ NULL, MODULE_NAME);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index 16a73eab6726..2782709b263f 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -1669,7 +1669,7 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
f->fmt.sliced.service_set = 0;
- call_all(dev, video, g_fmt, f);
+ call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
@@ -1690,7 +1690,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
return rc;
mutex_lock(&dev->lock);
- call_all(dev, video, g_fmt, f);
+ call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
@@ -1902,9 +1902,12 @@ static int radio_queryctrl(struct file *file, void *priv,
if (c->id < V4L2_CID_BASE || c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE) {
- for (i = 0; i < CX231XX_CTLS; i++)
+ for (i = 0; i < CX231XX_CTLS; i++) {
if (cx231xx_ctls[i].v.id == c->id)
break;
+ }
+ if (i == CX231XX_CTLS)
+ return -EINVAL;
*c = cx231xx_ctls[i].v;
} else
*c = no_ctl;
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 17d4d1a800ce..38d417191a65 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -32,7 +32,7 @@
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
-#include <media/ir-kbd-i2c.h>
+#include <media/ir-core.h>
#if defined(CONFIG_VIDEO_CX231XX_DVB) || \
defined(CONFIG_VIDEO_CX231XX_DVB_MODULE)
#include <media/videobuf-dvb.h>
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 4c8e95853fa3..022b480918cd 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -1000,20 +1000,6 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func,
h, w);
if (err) return err;
}
-
- if (new->width != 720 || new->height != (new->is_50hz ? 576 : 480)) {
- /* Adjust temporal filter if necessary. The problem with the
- temporal filter is that it works well with full resolution
- capturing, but not when the capture window is scaled (the
- filter introduces a ghosting effect). So if the capture
- window is scaled, then force the filter to 0.
-
- For full resolution the filter really improves the video
- quality, especially if the original video quality is
- suboptimal. */
- temporal = 0;
- }
-
if (force || NEQ(stream_type)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_STREAM_TYPE, 1,
mpeg_stream_type[new->stream_type]);
diff --git a/drivers/media/video/cx23885/cimax2.c b/drivers/media/video/cx23885/cimax2.c
index d4a9d2c5947c..c95e7bc14745 100644
--- a/drivers/media/video/cx23885/cimax2.c
+++ b/drivers/media/video/cx23885/cimax2.c
@@ -60,12 +60,18 @@ static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
+static unsigned int ci_irq_enable;
+module_param(ci_irq_enable, int, 0644);
+MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
+
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
+#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
+
/* stores all private variables for communication with CI */
struct netup_ci_state {
struct dvb_ca_en50221 ca;
@@ -392,7 +398,7 @@ int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open
if (0 != slot)
return -EINVAL;
- netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | NETUP_IRQ_IRQAM)
+ netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags())
: NETUP_IRQ_DETAM);
return state->status;
@@ -429,7 +435,7 @@ int netup_ci_init(struct cx23885_tsport *port)
0x01, /* power on (use it like store place) */
0x00, /* RFU */
0x00, /* int status read only */
- NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
+ ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
0x05, /* EXTINT=active-high, INT=push-pull */
0x00, /* USCG1 */
0x04, /* ack active low */
@@ -470,7 +476,7 @@ int netup_ci_init(struct cx23885_tsport *port)
state->ca.poll_slot_status = netup_poll_ci_slot_status;
state->ca.data = state;
state->priv = port;
- state->current_irq_mode = NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM;
+ state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &cimax_init[0], 34);
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index a8ddc227cf86..abd64e89f60f 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1356,7 +1356,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct cx23885_dev *dev = fh->dev;
struct cx23885_tsport *tsport = &dev->ts1;
- strcpy(cap->driver, dev->name);
+ strlcpy(cap->driver, dev->name, sizeof(cap->driver));
strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 939079d7bbb9..9e1460828b2f 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -1006,6 +1006,22 @@ static int dvb_register(struct cx23885_tsport *port)
netup_ci_init(port);
break;
}
+ case CX23885_BOARD_TEVII_S470: {
+ u8 eeprom[256]; /* 24C02 i2c eeprom */
+
+ if (port->nr != 1)
+ break;
+
+ /* Read entire EEPROM */
+ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
+ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
+ printk(KERN_INFO "TeVii S470 MAC= "
+ "%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eeprom[0xa0], eeprom[0xa1], eeprom[0xa2],
+ eeprom[0xa3], eeprom[0xa4], eeprom[0xa5]);
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
+ break;
+ }
}
return ret;
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index 8e9d990dbe93..8d306d8bb61c 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -51,6 +51,8 @@
#define RC5_EXTENDED_COMMAND_OFFSET 64
+#define MODULE_NAME "cx23885"
+
static inline unsigned int rc5_command(u32 rc5_baseband)
{
return RC5_INSTR(rc5_baseband) +
@@ -338,7 +340,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
{
struct card_ir *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
int ir_type, ir_addr, ir_start;
int ret;
@@ -353,7 +355,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
/* Parameters for the grey Hauppauge remote for the HVR-1850 */
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
ir_type = IR_TYPE_RC5;
ir_addr = 0x1e; /* RC-5 system bits emitted by the remote */
ir_start = RC5_START_BITS_NORMAL; /* A basic RC-5 remote */
@@ -398,7 +400,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
dev->ir_input = ir;
cx23885_input_ir_start(dev);
- ret = ir_input_register(ir->dev, ir_codes, NULL);
+ ret = ir_input_register(ir->dev, ir_codes, NULL, MODULE_NAME);
if (ret)
goto err_out_stop;
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 2d3ac8b83dc3..543b854f6a62 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -514,8 +514,8 @@ static int buffer_setup(struct videobuf_queue *q, unsigned int *count,
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f2461cd3de5a..8b6fb3544376 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1018,7 +1018,7 @@ static int cx25840_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
switch (fmt->type) {
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx25840_vbi_g_fmt(sd, fmt);
+ return cx25840_g_sliced_fmt(sd, &fmt->fmt.sliced);
default:
return -EINVAL;
}
@@ -1079,12 +1079,6 @@ static int cx25840_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
cx25840_write(client, 0x41e, 0x8 | filter);
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx25840_vbi_s_fmt(sd, fmt);
-
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return cx25840_vbi_s_fmt(sd, fmt);
-
default:
return -EINVAL;
}
@@ -1635,15 +1629,22 @@ static const struct v4l2_subdev_video_ops cx25840_video_ops = {
.s_routing = cx25840_s_video_routing,
.g_fmt = cx25840_g_fmt,
.s_fmt = cx25840_s_fmt,
- .decode_vbi_line = cx25840_decode_vbi_line,
.s_stream = cx25840_s_stream,
};
+static const struct v4l2_subdev_vbi_ops cx25840_vbi_ops = {
+ .decode_vbi_line = cx25840_decode_vbi_line,
+ .s_raw_fmt = cx25840_s_raw_fmt,
+ .s_sliced_fmt = cx25840_s_sliced_fmt,
+ .g_sliced_fmt = cx25840_g_sliced_fmt,
+};
+
static const struct v4l2_subdev_ops cx25840_ops = {
.core = &cx25840_core_ops,
.tuner = &cx25840_tuner_ops,
.audio = &cx25840_audio_ops,
.video = &cx25840_video_ops,
+ .vbi = &cx25840_vbi_ops,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/cx25840/cx25840-core.h b/drivers/media/video/cx25840/cx25840-core.h
index 55345444417f..04393b971567 100644
--- a/drivers/media/video/cx25840/cx25840-core.h
+++ b/drivers/media/video/cx25840/cx25840-core.h
@@ -99,8 +99,9 @@ int cx25840_audio_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
/* ----------------------------------------------------------------------- */
/* cx25850-vbi.c */
-int cx25840_vbi_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt);
-int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt);
+int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt);
+int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
+int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
int cx25840_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi);
#endif
diff --git a/drivers/media/video/cx25840/cx25840-vbi.c b/drivers/media/video/cx25840/cx25840-vbi.c
index 35f6592f6c47..64a4004f8a97 100644
--- a/drivers/media/video/cx25840/cx25840-vbi.c
+++ b/drivers/media/video/cx25840/cx25840-vbi.c
@@ -82,11 +82,10 @@ static int decode_vps(u8 * dst, u8 * p)
return err & 0xf0;
}
-int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
- struct v4l2_sliced_vbi_format *svbi;
static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
@@ -97,9 +96,6 @@ int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
int is_pal = !(state->std & V4L2_STD_525_60);
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
memset(svbi, 0, sizeof(*svbi));
/* we're done if raw VBI is active */
if ((cx25840_read(client, 0x404) & 0x10) == 0)
@@ -127,32 +123,30 @@ int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
-int cx25840_vbi_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
- struct v4l2_sliced_vbi_format *svbi;
int is_pal = !(state->std & V4L2_STD_525_60);
int vbi_offset = is_pal ? 1 : 0;
- int i, x;
- u8 lcr[24];
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE &&
- fmt->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* raw VBI */
- memset(svbi, 0, sizeof(*svbi));
+ /* Setup standard */
+ cx25840_std_setup(client);
- /* Setup standard */
- cx25840_std_setup(client);
+ /* VBI Offset */
+ cx25840_write(client, 0x47f, vbi_offset);
+ cx25840_write(client, 0x404, 0x2e);
+ return 0;
+}
- /* VBI Offset */
- cx25840_write(client, 0x47f, vbi_offset);
- cx25840_write(client, 0x404, 0x2e);
- return 0;
- }
+int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct cx25840_state *state = to_state(sd);
+ int is_pal = !(state->std & V4L2_STD_525_60);
+ int vbi_offset = is_pal ? 1 : 0;
+ int i, x;
+ u8 lcr[24];
for (x = 0; x <= 23; x++)
lcr[x] = 0x00;
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index b35411160f04..8b21457111b1 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -847,7 +847,8 @@ static int set_tvaudio(struct cx88_core *core)
{
v4l2_std_id norm = core->tvnorm;
- if (CX88_VMUX_TELEVISION != INPUT(core->input).type)
+ if (CX88_VMUX_TELEVISION != INPUT(core->input).type &&
+ CX88_VMUX_CABLE != INPUT(core->input).type)
return 0;
if (V4L2_STD_PAL_BG & norm) {
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 94ab862f0219..faa8e8163a4a 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -1231,7 +1231,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
- case CX88_BOARD_PINNACLE_HYBRID_PCTV:
+ case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_pinnacle_hybrid_pctv,
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 6b6abf062c21..e185289e446c 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -32,12 +32,18 @@
#include "cx88.h"
#include <media/ir-common.h>
+#define MODULE_NAME "cx88xx"
+
/* ---------------------------------------------------------------------- */
struct cx88_IR {
struct cx88_core *core;
struct input_dev *input;
struct ir_input_state ir;
+ struct ir_dev_props props;
+
+ int users;
+
char name[32];
char phys[32];
@@ -159,8 +165,16 @@ static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer)
return HRTIMER_RESTART;
}
-void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
+static int __cx88_ir_start(void *priv)
{
+ struct cx88_core *core = priv;
+ struct cx88_IR *ir;
+
+ if (!core || !core->ir)
+ return -EINVAL;
+
+ ir = core->ir;
+
if (ir->polling) {
hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ir->timer.function = cx88_ir_work;
@@ -173,10 +187,18 @@ void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
cx_write(MO_DDS_IO, 0xa80a80); /* 4 kHz sample rate */
cx_write(MO_DDSCFG_IO, 0x5); /* enable */
}
+ return 0;
}
-void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
+static void __cx88_ir_stop(void *priv)
{
+ struct cx88_core *core = priv;
+ struct cx88_IR *ir;
+
+ if (!core || !core->ir)
+ return;
+
+ ir = core->ir;
if (ir->sampling) {
cx_write(MO_DDSCFG_IO, 0x0);
core->pci_irqmask &= ~PCI_INT_IR_SMPINT;
@@ -186,15 +208,49 @@ void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
hrtimer_cancel(&ir->timer);
}
+int cx88_ir_start(struct cx88_core *core)
+{
+ if (core->ir->users)
+ return __cx88_ir_start(core);
+
+ return 0;
+}
+
+void cx88_ir_stop(struct cx88_core *core)
+{
+ if (core->ir->users)
+ __cx88_ir_stop(core);
+}
+
+static int cx88_ir_open(void *priv)
+{
+ struct cx88_core *core = priv;
+
+ core->ir->users++;
+ return __cx88_ir_start(core);
+}
+
+static void cx88_ir_close(void *priv)
+{
+ struct cx88_core *core = priv;
+
+ core->ir->users--;
+ if (!core->ir->users)
+ __cx88_ir_stop(core);
+}
+
/* ---------------------------------------------------------------------- */
int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
{
struct cx88_IR *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
+ u32 hardware_mask = 0; /* For devices with a hardware mask, when
+ * used with a full-code IR table
+ */
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
input_dev = input_allocate_device();
@@ -208,15 +264,15 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_DNTV_LIVE_DVB_T:
case CX88_BOARD_KWORLD_DVB_T:
case CX88_BOARD_KWORLD_DVB_T_CX22702:
- ir_codes = &ir_codes_dntv_live_dvb_t_table;
+ ir_codes = RC_MAP_DNTV_LIVE_DVB_T;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x60;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
- ir_codes = &ir_codes_cinergy_1400_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_CINERGY_1400;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xeb04; /* address */
break;
case CX88_BOARD_HAUPPAUGE:
@@ -230,14 +286,14 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_PCHDTV_HD3000:
case CX88_BOARD_PCHDTV_HD5500:
case CX88_BOARD_HAUPPAUGE_IRONLY:
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
ir_type = IR_TYPE_RC5;
ir->sampling = 1;
break;
case CX88_BOARD_WINFAST_DTV2000H:
case CX88_BOARD_WINFAST_DTV2000H_J:
case CX88_BOARD_WINFAST_DTV1800H:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
ir->mask_keyup = 0x100;
@@ -246,14 +302,14 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_WINFAST2000XP_EXPERT:
case CX88_BOARD_WINFAST_DTV1000:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
ir->mask_keyup = 0x100;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_IODATA_GVBCTV7E:
- ir_codes = &ir_codes_iodata_bctv7e_table;
+ ir_codes = RC_MAP_IODATA_BCTV7E;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0xfd;
ir->mask_keydown = 0x02;
@@ -261,36 +317,43 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_PROLINK_PLAYTVPVR:
case CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO:
- ir_codes = &ir_codes_pixelview_table;
+ /*
+ * It seems that this hardware is paired with NEC extended
+ * address 0x866b. So, unfortunately, its usage with other
+ * IR's with different address won't work. Still, there are
+ * other IR's from the same manufacturer that works, like the
+ * 002-T mini RC, provided with newer PV hardware
+ */
+ ir_codes = RC_MAP_PIXELVIEW_MK12;
ir->gpio_addr = MO_GP1_IO;
- ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x80;
- ir->polling = 1; /* ms */
+ ir->polling = 10; /* ms */
+ hardware_mask = 0x3f; /* Hardware returns only 6 bits from command part */
break;
case CX88_BOARD_PROLINK_PV_8000GT:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
- ir_codes = &ir_codes_pixelview_new_table;
+ ir_codes = RC_MAP_PIXELVIEW_NEW;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x3f;
ir->mask_keyup = 0x80;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_KWORLD_LTV883:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x60;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_ADSTECH_DVB_T_PCI:
- ir_codes = &ir_codes_adstech_dvb_t_pci_table;
+ ir_codes = RC_MAP_ADSTECH_DVB_T_PCI;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0xbf;
ir->mask_keyup = 0x40;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_MSI_TVANYWHERE_MASTER:
- ir_codes = &ir_codes_msi_tvanywhere_table;
+ ir_codes = RC_MAP_MSI_TVANYWHERE;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x40;
@@ -298,7 +361,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_AVERTV_303:
case CX88_BOARD_AVERTV_STUDIO_303:
- ir_codes = &ir_codes_avertv_303_table;
+ ir_codes = RC_MAP_AVERTV_303;
ir->gpio_addr = MO_GP2_IO;
ir->mask_keycode = 0xfb;
ir->mask_keydown = 0x02;
@@ -311,41 +374,41 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_PROF_7300:
case CX88_BOARD_PROF_7301:
case CX88_BOARD_PROF_6200:
- ir_codes = &ir_codes_tbs_nec_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_TBS_NEC;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_TEVII_S460:
case CX88_BOARD_TEVII_S420:
- ir_codes = &ir_codes_tevii_nec_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_TEVII_NEC;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
- ir_codes = &ir_codes_dntv_live_dvbt_pro_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_DNTV_LIVE_DVBT_PRO;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_NORWOOD_MICRO:
- ir_codes = &ir_codes_norwood_table;
+ ir_codes = RC_MAP_NORWOOD;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x0e;
ir->mask_keyup = 0x80;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_NPGTECH_REALTV_TOP10FM:
- ir_codes = &ir_codes_npgtech_table;
+ ir_codes = RC_MAP_NPGTECH;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0xfa;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
- ir_codes = &ir_codes_pinnacle_pctv_hd_table;
+ ir_codes = RC_MAP_PINNACLE_PCTV_HD;
ir_type = IR_TYPE_RC5;
ir->sampling = 1;
break;
case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
- ir_codes = &ir_codes_powercolor_real_angel_table;
+ ir_codes = RC_MAP_POWERCOLOR_REAL_ANGEL;
ir->gpio_addr = MO_GP2_IO;
ir->mask_keycode = 0x7e;
ir->polling = 100; /* ms */
@@ -357,6 +420,21 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
goto err_out_free;
}
+ /*
+ * The usage of mask_keycode were very convenient, due to several
+ * reasons. Among others, the scancode tables were using the scancode
+ * as the index elements. So, the less bits it was used, the smaller
+ * the table were stored. After the input changes, the better is to use
+ * the full scancodes, since it allows replacing the IR remote by
+ * another one. Unfortunately, there are still some hardware, like
+ * Pixelview Ultra Pro, where only part of the scancode is sent via
+ * GPIO. So, there's no way to get the full scancode. Due to that,
+ * hardware_mask were introduced here: it represents those hardware
+ * that has such limits.
+ */
+ if (hardware_mask && !ir->mask_keycode)
+ ir->mask_keycode = hardware_mask;
+
/* init input device */
snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name);
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci));
@@ -381,19 +459,20 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
ir->core = core;
core->ir = ir;
- cx88_ir_start(core, ir);
+ ir->props.priv = core;
+ ir->props.open = cx88_ir_open;
+ ir->props.close = cx88_ir_close;
+ ir->props.scanmask = hardware_mask;
/* all done */
- err = ir_input_register(ir->input, ir_codes, NULL);
+ err = ir_input_register(ir->input, ir_codes, &ir->props, MODULE_NAME);
if (err)
- goto err_out_stop;
+ goto err_out_free;
return 0;
- err_out_stop:
- cx88_ir_stop(core, ir);
- core->ir = NULL;
err_out_free:
+ core->ir = NULL;
kfree(ir);
return err;
}
@@ -406,7 +485,7 @@ int cx88_ir_fini(struct cx88_core *core)
if (NULL == ir)
return 0;
- cx88_ir_stop(core, ir);
+ cx88_ir_stop(core);
ir_input_unregister(ir->input);
kfree(ir);
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 6aba7af9160a..499f8d512ad6 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -599,13 +599,22 @@ struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board
static int cx8802_request_acquire(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
+ unsigned int i;
/* Fail a request for hardware if the device is busy. */
if (core->active_type_id != CX88_BOARD_NONE &&
core->active_type_id != drv->type_id)
return -EBUSY;
- core->input = CX88_VMUX_DVB;
+ core->input = 0;
+ for (i = 0;
+ i < (sizeof(core->board.input) / sizeof(struct cx88_input));
+ i++) {
+ if (core->board.input[i].type == CX88_VMUX_DVB) {
+ core->input = i;
+ break;
+ }
+ }
if (drv->advise_acquire)
{
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 48c450f4a85a..0fab65c3ab39 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -426,12 +426,13 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
if (core->board.audio_chip &&
core->board.audio_chip == V4L2_IDENT_WM8775) {
call_all(core, audio, s_routing,
- INPUT(input).audioroute, 0, 0);
+ INPUT(input).audioroute, 0, 0);
}
/* cx2388's C-ADC is connected to the tuner only.
When used with S-Video, that ADC is busy dealing with
chroma, so an external must be used for baseband audio */
- if (INPUT(input).type != CX88_VMUX_TELEVISION ) {
+ if (INPUT(input).type != CX88_VMUX_TELEVISION &&
+ INPUT(input).type != CX88_VMUX_CABLE) {
/* "I2S ADC mode" */
core->tvaudio = WW_I2SADC;
cx88_set_tvaudio(core);
@@ -561,8 +562,8 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
@@ -1537,9 +1538,12 @@ static int radio_queryctrl (struct file *file, void *priv,
c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE) {
- for (i = 0; i < CX8800_CTLS; i++)
+ for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == c->id)
break;
+ }
+ if (i == CX8800_CTLS)
+ return -EINVAL;
*c = cx8800_ctls[i].v;
} else
*c = no_ctl;
@@ -1977,7 +1981,7 @@ static void __devexit cx8800_finidev(struct pci_dev *pci_dev)
}
if (core->ir)
- cx88_ir_stop(core, core->ir);
+ cx88_ir_stop(core);
cx88_shutdown(core); /* FIXME */
pci_disable_device(pci_dev);
@@ -2015,7 +2019,7 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
spin_unlock(&dev->slock);
if (core->ir)
- cx88_ir_stop(core, core->ir);
+ cx88_ir_stop(core);
/* FIXME -- shutdown device */
cx88_shutdown(core);
@@ -2056,7 +2060,7 @@ static int cx8800_resume(struct pci_dev *pci_dev)
/* FIXME: re-initialize hardware */
cx88_reset(core);
if (core->ir)
- cx88_ir_start(core, core->ir);
+ cx88_ir_start(core);
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 48b6c04fb497..bdb03d336536 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -41,7 +41,7 @@
#include <linux/version.h>
#include <linux/mutex.h>
-#define CX88_VERSION_CODE KERNEL_VERSION(0,0,7)
+#define CX88_VERSION_CODE KERNEL_VERSION(0, 0, 8)
#define UNSET (-1U)
@@ -290,7 +290,7 @@ struct cx88_subid {
#define RESOURCE_VIDEO 2
#define RESOURCE_VBI 4
-#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+#define BUFFER_TIMEOUT msecs_to_jiffies(2000)
/* buffer for one video frame */
struct cx88_buffer {
@@ -683,8 +683,8 @@ s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core);
int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci);
int cx88_ir_fini(struct cx88_core *core);
void cx88_ir_irq(struct cx88_core *core);
-void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir);
-void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir);
+int cx88_ir_start(struct cx88_core *core);
+void cx88_ir_stop(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-mpeg.c */
diff --git a/drivers/media/video/davinci/dm644x_ccdc.c b/drivers/media/video/davinci/dm644x_ccdc.c
index b4cc96dc99ef..490aafb34e2f 100644
--- a/drivers/media/video/davinci/dm644x_ccdc.c
+++ b/drivers/media/video/davinci/dm644x_ccdc.c
@@ -101,6 +101,9 @@ static u32 ccdc_raw_bayer_pix_formats[] =
static u32 ccdc_raw_yuv_pix_formats[] =
{V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+/* CCDC Save/Restore context */
+static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)];
+
/* register access routines */
static inline u32 regr(u32 offset)
{
@@ -400,7 +403,11 @@ void ccdc_config_ycbcr(void)
* configure the FID, VD, HD pin polarity,
* fld,hd pol positive, vd negative, 8-bit data
*/
- syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE | CCDC_SYN_MODE_8BITS;
+ syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE;
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ syn_mode |= CCDC_SYN_MODE_10BITS;
+ else
+ syn_mode |= CCDC_SYN_MODE_8BITS;
} else {
/* y/c external sync mode */
syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
@@ -419,8 +426,13 @@ void ccdc_config_ycbcr(void)
* configure the order of y cb cr in SDRAM, and disable latch
* internal register on vsync
*/
- regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
- CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT,
+ CCDC_CCDCFG);
+ else
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
/*
* configure the horizontal line offset. This should be a
@@ -435,7 +447,6 @@ void ccdc_config_ycbcr(void)
ccdc_sbl_reset();
dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
- ccdc_readregs();
}
static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
@@ -827,6 +838,7 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
case VPFE_BT656:
case VPFE_YCBCR_SYNC_16:
case VPFE_YCBCR_SYNC_8:
+ case VPFE_BT656_10BIT:
ccdc_cfg.ycbcr.vd_pol = params->vdpol;
ccdc_cfg.ycbcr.hd_pol = params->hdpol;
break;
@@ -837,6 +849,87 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
return 0;
}
+static void ccdc_save_context(void)
+{
+ ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR);
+ ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE);
+ ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID);
+ ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES);
+ ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO);
+ ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START);
+ ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES);
+ ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING);
+ ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF);
+ ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST);
+ ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR);
+ ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP);
+ ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB);
+ ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN);
+ ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP);
+ ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC);
+ ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR);
+ ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT);
+ ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW);
+ ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF);
+ ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG);
+ ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG);
+ ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ);
+ ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT);
+ ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0);
+ ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1);
+ ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2);
+ ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3);
+ ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4);
+ ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5);
+ ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6);
+ ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7);
+ ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0);
+ ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1);
+ ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0);
+ ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1);
+ ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT);
+}
+
+static void ccdc_restore_context(void)
+{
+ regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE);
+ regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID);
+ regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES);
+ regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO);
+ regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START);
+ regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES);
+ regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING);
+ regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF);
+ regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST);
+ regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR);
+ regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP);
+ regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB);
+ regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN);
+ regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP);
+ regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC);
+ regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR);
+ regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT);
+ regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW);
+ regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF);
+ regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG);
+ regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG);
+ regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ);
+ regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT);
+ regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0);
+ regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1);
+ regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2);
+ regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3);
+ regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4);
+ regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5);
+ regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6);
+ regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7);
+ regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0);
+ regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1);
+ regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0);
+ regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1);
+ regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
+ regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
+}
static struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM6446 CCDC",
.owner = THIS_MODULE,
@@ -945,10 +1038,40 @@ static int dm644x_ccdc_remove(struct platform_device *pdev)
return 0;
}
+static int dm644x_ccdc_suspend(struct device *dev)
+{
+ /* Save CCDC context */
+ ccdc_save_context();
+ /* Disable CCDC */
+ ccdc_enable(0);
+ /* Disable both master and slave clock */
+ clk_disable(ccdc_cfg.mclk);
+ clk_disable(ccdc_cfg.sclk);
+
+ return 0;
+}
+
+static int dm644x_ccdc_resume(struct device *dev)
+{
+ /* Enable both master and slave clock */
+ clk_enable(ccdc_cfg.mclk);
+ clk_enable(ccdc_cfg.sclk);
+ /* Restore CCDC context */
+ ccdc_restore_context();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
+ .suspend = dm644x_ccdc_suspend,
+ .resume = dm644x_ccdc_resume,
+};
+
static struct platform_driver dm644x_ccdc_driver = {
.driver = {
.name = "dm644x_ccdc",
.owner = THIS_MODULE,
+ .pm = &dm644x_ccdc_pm_ops,
},
.remove = __devexit_p(dm644x_ccdc_remove),
.probe = dm644x_ccdc_probe,
diff --git a/drivers/media/video/davinci/dm644x_ccdc_regs.h b/drivers/media/video/davinci/dm644x_ccdc_regs.h
index 6e5d05324466..90370e414e2c 100644
--- a/drivers/media/video/davinci/dm644x_ccdc_regs.h
+++ b/drivers/media/video/davinci/dm644x_ccdc_regs.h
@@ -59,7 +59,7 @@
#define CCDC_PRGODD_0 0x8c
#define CCDC_PRGODD_1 0x90
#define CCDC_VP_OUT 0x94
-
+#define CCDC_REG_END 0x98
/***************************************************************
* Define for various register bit mask and shifts for CCDC
@@ -135,11 +135,19 @@
#define CCDC_SYN_MODE_INPMOD_SHIFT 12
#define CCDC_SYN_MODE_INPMOD_MASK 3
#define CCDC_SYN_MODE_8BITS (7 << 8)
+#define CCDC_SYN_MODE_10BITS (6 << 8)
+#define CCDC_SYN_MODE_11BITS (5 << 8)
+#define CCDC_SYN_MODE_12BITS (4 << 8)
+#define CCDC_SYN_MODE_13BITS (3 << 8)
+#define CCDC_SYN_MODE_14BITS (2 << 8)
+#define CCDC_SYN_MODE_15BITS (1 << 8)
+#define CCDC_SYN_MODE_16BITS (0 << 8)
#define CCDC_SYN_FLDMODE_MASK 1
#define CCDC_SYN_FLDMODE_SHIFT 7
#define CCDC_REC656IF_BT656_EN 3
#define CCDC_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
#define CCDC_CCDCFG_Y8POS_SHIFT 11
+#define CCDC_CCDCFG_BW656_10BIT (1 << 5)
#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
#define CCDC_NO_CULLING 0xffff00ff
#endif
diff --git a/drivers/media/video/davinci/isif_regs.h b/drivers/media/video/davinci/isif_regs.h
index f7b8893a2957..aa69a463c122 100644
--- a/drivers/media/video/davinci/isif_regs.h
+++ b/drivers/media/video/davinci/isif_regs.h
@@ -158,7 +158,7 @@
/* gain - offset masks */
#define GAIN_INTEGER_SHIFT 9
-#define OFFSET_MASK 0xFFF
+#define OFFSET_MASK 0xFFF
#define GAIN_SDRAM_EN_SHIFT 12
#define GAIN_IPIPE_EN_SHIFT 13
#define GAIN_H3A_EN_SHIFT 14
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 398dbe71cb82..f6648f6ba2fc 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -475,6 +475,11 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
if (!ret)
vpfe_dev->initialized = 1;
+
+ /* Clear all VPFE/CCDC interrupts */
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(-1);
+
unlock:
mutex_unlock(&ccdc_lock);
return ret;
@@ -534,6 +539,16 @@ static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
list_del(&vpfe_dev->next_frm->queue);
vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
+
+ ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
+{
+ unsigned long addr;
+
+ addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+ addr += vpfe_dev->field_off;
ccdc_dev->hw_ops.setfbaddr(addr);
}
@@ -554,7 +569,6 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
enum v4l2_field field;
- unsigned long addr;
int fid;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
@@ -562,7 +576,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
/* if streaming not started, don't do anything */
if (!vpfe_dev->started)
- return IRQ_HANDLED;
+ goto clear_intr;
/* only for 6446 this will be applicable */
if (NULL != ccdc_dev->hw_ops.reset)
@@ -574,7 +588,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
"frame format is progressive...\n");
if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
vpfe_process_buffer_complete(vpfe_dev);
- return IRQ_HANDLED;
+ goto clear_intr;
}
/* interlaced or TB capture check which field we are in hardware */
@@ -599,12 +613,9 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
* the CCDC memory address
*/
if (field == V4L2_FIELD_SEQ_TB) {
- addr =
- videobuf_to_dma_contig(vpfe_dev->cur_frm);
- addr += vpfe_dev->field_off;
- ccdc_dev->hw_ops.setfbaddr(addr);
+ vpfe_schedule_bottom_field(vpfe_dev);
}
- return IRQ_HANDLED;
+ goto clear_intr;
}
/*
* if one field is just being captured configure
@@ -624,6 +635,10 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
*/
vpfe_dev->field_id = fid;
}
+clear_intr:
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
return IRQ_HANDLED;
}
@@ -635,8 +650,11 @@ static irqreturn_t vdint1_isr(int irq, void *dev_id)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
/* if streaming not started, don't do anything */
- if (!vpfe_dev->started)
+ if (!vpfe_dev->started) {
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
return IRQ_HANDLED;
+ }
spin_lock(&vpfe_dev->dma_queue_lock);
if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
@@ -644,6 +662,10 @@ static irqreturn_t vdint1_isr(int irq, void *dev_id)
vpfe_dev->cur_frm == vpfe_dev->next_frm)
vpfe_schedule_next_buffer(vpfe_dev);
spin_unlock(&vpfe_dev->dma_queue_lock);
+
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
return IRQ_HANDLED;
}
@@ -1218,7 +1240,10 @@ static int vpfe_videobuf_setup(struct videobuf_queue *vq,
struct vpfe_device *vpfe_dev = fh->vpfe_dev;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n");
- *size = config_params.device_bufsize;
+ *size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ if (vpfe_dev->memory == V4L2_MEMORY_MMAP &&
+ vpfe_dev->fmt.fmt.pix.sizeimage > config_params.device_bufsize)
+ *size = config_params.device_bufsize;
if (*count < config_params.min_numbuffers)
*count = config_params.min_numbuffers;
@@ -1233,6 +1258,8 @@ static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
{
struct vpfe_fh *fh = vq->priv_data;
struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long addr;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
@@ -1242,8 +1269,18 @@ static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
vb->height = vpfe_dev->fmt.fmt.pix.height;
vb->size = vpfe_dev->fmt.fmt.pix.sizeimage;
vb->field = field;
+
+ ret = videobuf_iolock(vq, vb, NULL);;
+ if (ret < 0)
+ return ret;
+
+ addr = videobuf_to_dma_contig(vb);
+ /* Make sure user addresses are aligned to 32 bytes */
+ if (!ALIGN(addr, 32))
+ return -EINVAL;
+
+ vb->state = VIDEOBUF_PREPARED;
}
- vb->state = VIDEOBUF_PREPARED;
return 0;
}
@@ -1311,13 +1348,6 @@ static int vpfe_reqbufs(struct file *file, void *priv,
return -EINVAL;
}
- if (V4L2_MEMORY_USERPTR == req_buf->memory) {
- /* we don't support user ptr IO */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs:"
- " USERPTR IO not supported\n");
- return -EINVAL;
- }
-
ret = mutex_lock_interruptible(&vpfe_dev->lock);
if (ret)
return ret;
@@ -1831,7 +1861,6 @@ static __init int vpfe_probe(struct platform_device *pdev)
goto probe_free_dev_mem;
}
- mutex_lock(&ccdc_lock);
/* Allocate memory for ccdc configuration */
ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL);
if (NULL == ccdc_cfg) {
@@ -1840,6 +1869,8 @@ static __init int vpfe_probe(struct platform_device *pdev)
goto probe_free_lock;
}
+ mutex_lock(&ccdc_lock);
+
strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32);
/* Get VINT0 irq resource */
res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -2015,18 +2046,14 @@ static int __devexit vpfe_remove(struct platform_device *pdev)
return 0;
}
-static int
-vpfe_suspend(struct device *dev)
+static int vpfe_suspend(struct device *dev)
{
- /* add suspend code here later */
- return -1;
+ return 0;
}
-static int
-vpfe_resume(struct device *dev)
+static int vpfe_resume(struct device *dev)
{
- /* add resume code here later */
- return -1;
+ return 0;
}
static const struct dev_pm_ops vpfe_dev_pm_ops = {
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 13c3a1b97760..e5bab3d0d53f 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -384,7 +384,7 @@ static int vpif_get_std_info(struct channel_obj *ch)
int index;
std_info->stdid = vid_ch->stdid;
- if (!std_info)
+ if (!std_info->stdid)
return -1;
for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index bd783387b37d..e182abf476c9 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -491,7 +491,7 @@ static int em28xx_audio_init(struct em28xx *dev)
strcpy(pcm->name, "Empia 28xx Capture");
snd_card_set_dev(card, &dev->udev->dev);
- strcpy(card->driver, "Empia Em28xx Audio");
+ strcpy(card->driver, "Em28xx-Audio");
strcpy(card->shortname, "Em28xx Audio");
strcpy(card->longname, "Empia Em28xx Audio");
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index b0fb08337710..3a4fd8514511 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -602,7 +602,7 @@ struct em28xx_board em28xx_boards[] = {
.name = "Gadmei UTV330+",
.tuner_type = TUNER_TNF_5335MF,
.tda9887_conf = TDA9887_PRESENT,
- .ir_codes = &ir_codes_gadmei_rm008z_table,
+ .ir_codes = RC_MAP_GADMEI_RM008Z,
.decoder = EM28XX_SAA711X,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.input = { {
@@ -681,6 +681,20 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_LINE_IN,
} },
},
+ [EM2860_BOARD_TVP5150_REFERENCE_DESIGN] = {
+ .name = "EM2860/TVP5150 Reference Design",
+ .tuner_type = TUNER_ABSENT, /* Capture only device */
+ .decoder = EM28XX_TVP5150,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
[EM2861_BOARD_PLEXTOR_PX_TV100U] = {
.name = "Plextor ConvertX PX-TV100U",
.tuner_type = TUNER_TNF_5335MF,
@@ -777,7 +791,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -802,7 +816,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.mts_firmware = 1,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -828,7 +842,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -854,7 +868,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_rc5_hauppauge_new_table,
+ .ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -880,7 +894,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_pinnacle_pctv_hd_table,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -906,7 +920,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_ati_tv_wonder_hd_600_table,
+ .ir_codes = RC_MAP_ATI_TV_WONDER_HD_600,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -932,7 +946,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
.dvb_gpio = default_digital,
- .ir_codes = &ir_codes_terratec_cinergy_xs_table,
+ .ir_codes = RC_MAP_TERRATEC_CINERGY_XS,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -1282,7 +1296,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_SAA711X,
.has_dvb = 1,
.dvb_gpio = em2882_kworld_315u_digital,
- .ir_codes = &ir_codes_kworld_315u_table,
+ .ir_codes = RC_MAP_KWORLD_315U,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE,
/* Analog mode - still not ready */
@@ -1404,10 +1418,14 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2882_BOARD_KWORLD_VS_DVBT] = {
.name = "Kworld VS-DVB-T 323UR",
- .valid = EM28XX_BOARD_NOT_VALIDATED,
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
+ .mts_firmware = 1,
+ .has_dvb = 1,
+ .dvb_gpio = kworld_330u_digital,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
+ .ir_codes = RC_MAP_KWORLD_315U,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1430,7 +1448,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_terratec_cinergy_xs_table,
+ .ir_codes = RC_MAP_TERRATEC_CINERGY_XS,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -1523,7 +1541,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.decoder = EM28XX_TVP5150,
.tuner_gpio = default_tuner_gpio,
- .ir_codes = &ir_codes_kaiomy_table,
+ .ir_codes = RC_MAP_KAIOMY,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1623,7 +1641,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = evga_indtube_digital,
- .ir_codes = &ir_codes_evga_indtube_table,
+ .ir_codes = RC_MAP_EVGA_INDTUBE,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1672,6 +1690,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2862),
.driver_info = EM2820_BOARD_UNKNOWN },
+ { USB_DEVICE(0xeb1a, 0x2863),
+ .driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2870),
.driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2881),
@@ -1792,6 +1812,7 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
{0xb06a32c3, EM2800_BOARD_TERRATEC_CINERGY_200, TUNER_LG_PAL_NEW_TAPC},
{0xf51200e3, EM2800_BOARD_VGEAR_POCKETTV, TUNER_LG_PAL_NEW_TAPC},
{0x1ba50080, EM2860_BOARD_SAA711X_REFERENCE_DESIGN, TUNER_ABSENT},
+ {0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT},
{0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC},
{0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF},
};
@@ -2138,6 +2159,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
case EM2882_BOARD_DIKOM_DK300:
+ case EM2882_BOARD_KWORLD_VS_DVBT:
ctl->demod = XC3028_FE_CHINA;
ctl->fname = XC2028_DEFAULT_FIRMWARE;
break;
@@ -2313,21 +2335,21 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
switch (dev->model) {
case EM2800_BOARD_TERRATEC_CINERGY_200:
case EM2820_BOARD_TERRATEC_CINERGY_250:
- dev->init_data.ir_codes = &ir_codes_em_terratec_table;
+ dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
dev->init_data.get_key = em28xx_get_key_terratec;
dev->init_data.name = "i2c IR (EM28XX Terratec)";
break;
case EM2820_BOARD_PINNACLE_USB_2:
- dev->init_data.ir_codes = &ir_codes_pinnacle_grey_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
break;
case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
- dev->init_data.ir_codes = &ir_codes_rc5_hauppauge_new_table;
+ dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW;
dev->init_data.get_key = em28xx_get_key_em_haup;
dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
- dev->init_data.ir_codes = &ir_codes_winfast_usbii_deluxe_table;;
+ dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;;
dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
break;
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index a41cc5566778..d3813ed789d9 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -782,11 +782,15 @@ int em28xx_resolution_set(struct em28xx *dev)
em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
- /* If we don't set the start position to 4 in VBI mode, we end up
- with line 21 being YUYV encoded instead of being in 8-bit
- greyscale */
+ /* If we don't set the start position to 2 in VBI mode, we end up
+ with line 20/21 being YUYV encoded instead of being in 8-bit
+ greyscale. The core of the issue is that line 21 (and line 23 for
+ PAL WSS) are inside of active video region, and as a result they
+ get the pixelformatting associated with that area. So by cropping
+ it out, we end up with the same format as the rest of the VBI
+ region */
if (em28xx_vbi_supported(dev) == 1)
- em28xx_capture_area_set(dev, 0, 4, width >> 2, height >> 2);
+ em28xx_capture_area_set(dev, 0, 2, width >> 2, height >> 2);
else
em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
@@ -1174,21 +1178,18 @@ void em28xx_add_into_devlist(struct em28xx *dev)
*/
static LIST_HEAD(em28xx_extension_devlist);
-static DEFINE_MUTEX(em28xx_extension_devlist_lock);
int em28xx_register_extension(struct em28xx_ops *ops)
{
struct em28xx *dev = NULL;
mutex_lock(&em28xx_devlist_mutex);
- mutex_lock(&em28xx_extension_devlist_lock);
list_add_tail(&ops->next, &em28xx_extension_devlist);
list_for_each_entry(dev, &em28xx_devlist, devlist) {
if (dev)
ops->init(dev);
}
printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
- mutex_unlock(&em28xx_extension_devlist_lock);
mutex_unlock(&em28xx_devlist_mutex);
return 0;
}
@@ -1204,10 +1205,8 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
ops->fini(dev);
}
- mutex_lock(&em28xx_extension_devlist_lock);
printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
list_del(&ops->next);
- mutex_unlock(&em28xx_extension_devlist_lock);
mutex_unlock(&em28xx_devlist_mutex);
}
EXPORT_SYMBOL(em28xx_unregister_extension);
@@ -1216,26 +1215,26 @@ void em28xx_init_extension(struct em28xx *dev)
{
struct em28xx_ops *ops = NULL;
- mutex_lock(&em28xx_extension_devlist_lock);
+ mutex_lock(&em28xx_devlist_mutex);
if (!list_empty(&em28xx_extension_devlist)) {
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->init)
ops->init(dev);
}
}
- mutex_unlock(&em28xx_extension_devlist_lock);
+ mutex_unlock(&em28xx_devlist_mutex);
}
void em28xx_close_extension(struct em28xx *dev)
{
struct em28xx_ops *ops = NULL;
- mutex_lock(&em28xx_extension_devlist_lock);
+ mutex_lock(&em28xx_devlist_mutex);
if (!list_empty(&em28xx_extension_devlist)) {
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->fini)
ops->fini(dev);
}
}
- mutex_unlock(&em28xx_extension_devlist_lock);
+ mutex_unlock(&em28xx_devlist_mutex);
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index bcd3c371009b..cf1d8c3655fc 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -467,6 +467,7 @@ static int dvb_init(struct em28xx *dev)
}
dev->dvb = dvb;
+ mutex_lock(&dev->lock);
em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
/* init frontend */
switch (dev->model) {
@@ -506,6 +507,7 @@ static int dvb_init(struct em28xx *dev)
case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
case EM2881_BOARD_PINNACLE_HYBRID_PRO:
case EM2882_BOARD_DIKOM_DK300:
+ case EM2882_BOARD_KWORLD_VS_DVBT:
dvb->frontend = dvb_attach(zl10353_attach,
&em28xx_zl10353_xc3028_no_i2c_gate,
&dev->i2c_adap);
@@ -589,15 +591,16 @@ static int dvb_init(struct em28xx *dev)
if (result < 0)
goto out_free;
- em28xx_set_mode(dev, EM28XX_SUSPEND);
em28xx_info("Successfully loaded em28xx-dvb\n");
- return 0;
+ret:
+ em28xx_set_mode(dev, EM28XX_SUSPEND);
+ mutex_unlock(&dev->lock);
+ return result;
out_free:
- em28xx_set_mode(dev, EM28XX_SUSPEND);
kfree(dvb);
dev->dvb = NULL;
- return result;
+ goto ret;
}
static int dvb_fini(struct em28xx *dev)
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 20a0001e8885..5c3fd9411b1f 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -39,6 +39,8 @@ static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
+#define MODULE_NAME "em28xx"
+
#define i2cdprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
@@ -360,14 +362,20 @@ static void em28xx_ir_work(struct work_struct *work)
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
}
-static void em28xx_ir_start(struct em28xx_IR *ir)
+static int em28xx_ir_start(void *priv)
{
+ struct em28xx_IR *ir = priv;
+
INIT_DELAYED_WORK(&ir->work, em28xx_ir_work);
schedule_delayed_work(&ir->work, 0);
+
+ return 0;
}
-static void em28xx_ir_stop(struct em28xx_IR *ir)
+static void em28xx_ir_stop(void *priv)
{
+ struct em28xx_IR *ir = priv;
+
cancel_delayed_work_sync(&ir->work);
}
@@ -380,7 +388,6 @@ int em28xx_ir_change_protocol(void *priv, u64 ir_type)
/* Adjust xclk based o IR table for RC5/NEC tables */
- dev->board.ir_codes->ir_type = IR_TYPE_OTHER;
if (ir_type == IR_TYPE_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
@@ -388,11 +395,9 @@ int em28xx_ir_change_protocol(void *priv, u64 ir_type)
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
ir_config = EM2874_IR_NEC;
ir->full_code = 1;
- } else
+ } else if (ir_type != IR_TYPE_UNKNOWN)
rc = -EINVAL;
- dev->board.ir_codes->ir_type = ir_type;
-
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
EM28XX_XCLK_IR_RC5_MODE);
@@ -443,6 +448,13 @@ int em28xx_ir_init(struct em28xx *dev)
ir->props.allowed_protos = IR_TYPE_RC5 | IR_TYPE_NEC;
ir->props.priv = ir;
ir->props.change_protocol = em28xx_ir_change_protocol;
+ ir->props.open = em28xx_ir_start;
+ ir->props.close = em28xx_ir_stop;
+
+ /* By default, keep protocol field untouched */
+ err = em28xx_ir_change_protocol(ir, IR_TYPE_UNKNOWN);
+ if (err)
+ goto err_out_free;
/* This is how often we ask the chip for IR information */
ir->polling = 100; /* ms */
@@ -455,7 +467,6 @@ int em28xx_ir_init(struct em28xx *dev)
strlcat(ir->phys, "/input0", sizeof(ir->phys));
/* Set IR protocol */
- em28xx_ir_change_protocol(ir, dev->board.ir_codes->ir_type);
err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
if (err < 0)
goto err_out_free;
@@ -470,17 +481,15 @@ int em28xx_ir_init(struct em28xx *dev)
input_dev->dev.parent = &dev->udev->dev;
- em28xx_ir_start(ir);
/* all done */
err = ir_input_register(ir->input, dev->board.ir_codes,
- &ir->props);
+ &ir->props, MODULE_NAME);
if (err)
goto err_out_stop;
return 0;
err_out_stop:
- em28xx_ir_stop(ir);
dev->ir = NULL;
err_out_free:
kfree(ir);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 0fe20110bfd6..8565fb3ff945 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -203,12 +203,6 @@ static void em28xx_copy_video(struct em28xx *dev,
if (dma_q->pos + len > buf->vb.size)
len = buf->vb.size - dma_q->pos;
- if (p[0] != 0x88 && p[0] != 0x22) {
- em28xx_isocdbg("frame is not complete\n");
- len += 4;
- } else
- p += 4;
-
startread = p;
remain = len;
@@ -309,14 +303,6 @@ static void em28xx_copy_vbi(struct em28xx *dev,
if (dma_q->pos + len > buf->vb.size)
len = buf->vb.size - dma_q->pos;
- if ((p[0] == 0x33 && p[1] == 0x95) ||
- (p[0] == 0x88 && p[1] == 0x88)) {
- /* Header field, advance past it */
- p += 4;
- } else {
- len += 4;
- }
-
startread = p;
startwrite = outp + dma_q->pos;
@@ -507,8 +493,15 @@ static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb)
dma_q->pos = 0;
}
- if (buf != NULL)
+ if (buf != NULL) {
+ if (p[0] != 0x88 && p[0] != 0x22) {
+ em28xx_isocdbg("frame is not complete\n");
+ len += 4;
+ } else {
+ p += 4;
+ }
em28xx_copy_video(dev, dma_q, buf, p, outp, len);
+ }
}
return rc;
}
@@ -555,8 +548,7 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
continue;
}
- len = urb->iso_frame_desc[i].actual_length - 4;
-
+ len = urb->iso_frame_desc[i].actual_length;
if (urb->iso_frame_desc[i].actual_length <= 0) {
/* em28xx_isocdbg("packet %d is empty",i); - spammy */
continue;
@@ -577,6 +569,17 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
dev->vbi_read = 0;
em28xx_isocdbg("VBI START HEADER!!!\n");
dev->cur_field = p[2];
+ p += 4;
+ len -= 4;
+ } else if (p[0] == 0x88 && p[1] == 0x88 &&
+ p[2] == 0x88 && p[3] == 0x88) {
+ /* continuation */
+ p += 4;
+ len -= 4;
+ } else if (p[0] == 0x22 && p[1] == 0x5a) {
+ /* start video */
+ p += 4;
+ len -= 4;
}
vbi_size = dev->vbi_width * dev->vbi_height;
@@ -631,9 +634,6 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
if (dev->capture_type == 1) {
dev->capture_type = 2;
- em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2],
- len, (p[2] & 1) ? "odd" : "even");
-
if (dev->progressive || !(dev->cur_field & 1)) {
if (buf != NULL)
buffer_filled(dev, dma_q, buf);
@@ -652,8 +652,25 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
dma_q->pos = 0;
}
- if (buf != NULL && dev->capture_type == 2)
- em28xx_copy_video(dev, dma_q, buf, p, outp, len);
+
+ if (buf != NULL && dev->capture_type == 2) {
+ if (len > 4 && p[0] == 0x88 && p[1] == 0x88 &&
+ p[2] == 0x88 && p[3] == 0x88) {
+ p += 4;
+ len -= 4;
+ }
+ if (len > 4 && p[0] == 0x22 && p[1] == 0x5a) {
+ em28xx_isocdbg("Video frame %d, len=%i, %s\n",
+ p[2], len, (p[2] & 1) ?
+ "odd" : "even");
+ p += 4;
+ len -= 4;
+ }
+
+ if (len > 0)
+ em28xx_copy_video(dev, dma_q, buf, p, outp,
+ len);
+ }
}
return rc;
}
@@ -1814,7 +1831,7 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
mutex_lock(&dev->lock);
f->fmt.sliced.service_set = 0;
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, g_fmt, f);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
@@ -1836,7 +1853,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
return rc;
mutex_lock(&dev->lock);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, g_fmt, f);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index ba6fe5daff84..b252d1b1b2a7 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -68,6 +68,7 @@
#define EM2820_BOARD_HERCULES_SMART_TV_USB2 26
#define EM2820_BOARD_PINNACLE_USB_2_FM1216ME 27
#define EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE 28
+#define EM2860_BOARD_TVP5150_REFERENCE_DESIGN 29
#define EM2820_BOARD_VIDEOLOGY_20K14XUSB 30
#define EM2821_BOARD_USBGEAR_VD204 31
#define EM2821_BOARD_SUPERCOMP_USB_2 32
@@ -140,10 +141,10 @@
#define EM28XX_NUM_BUFS 5
/* number of packets for each buffer
- windows requests only 40 packets .. so we better do the same
+ windows requests only 64 packets .. so we better do the same
this is what I found out for all alternate numbers there!
*/
-#define EM28XX_NUM_PACKETS 40
+#define EM28XX_NUM_PACKETS 64
#define EM28XX_INTERLACED_DEFAULT 1
@@ -411,7 +412,7 @@ struct em28xx_board {
struct em28xx_input input[MAX_EM28XX_INPUT];
struct em28xx_input radio;
- struct ir_scancode_table *ir_codes;
+ char *ir_codes;
};
struct em28xx_eeprom {
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index e6c23d509862..a5cfc76b40b7 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -1713,7 +1713,7 @@ et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -1723,7 +1723,9 @@ et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/font.h b/drivers/media/video/font.h
deleted file mode 100644
index 8b1fecc37599..000000000000
--- a/drivers/media/video/font.h
+++ /dev/null
@@ -1,407 +0,0 @@
-static unsigned char rom8x16_bits[] = {
-/* Character 0 (0x30):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** *** |
- |** **** |
- |**** ** |
- |*** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xce,
-0xde,
-0xf6,
-0xe6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 1 (0x31):
- ht=16, width=8
- +--------+
- | |
- | |
- | ** |
- | **** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ****** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x18,
-0x78,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x7e,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 2 (0x32):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- |** ** |
- |******* |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0x06,
-0x0c,
-0x18,
-0x30,
-0x60,
-0xc6,
-0xfe,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 3 (0x33):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- | ** |
- | ** |
- | **** |
- | ** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0x06,
-0x06,
-0x3c,
-0x06,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 4 (0x34):
- ht=16, width=8
- +--------+
- | |
- | |
- | ** |
- | *** |
- | **** |
- | ** ** |
- |** ** |
- |** ** |
- |******* |
- | ** |
- | ** |
- | **** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x0c,
-0x1c,
-0x3c,
-0x6c,
-0xcc,
-0xcc,
-0xfe,
-0x0c,
-0x0c,
-0x1e,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 5 (0x35):
- ht=16, width=8
- +--------+
- | |
- | |
- |******* |
- |** |
- |** |
- |** |
- |****** |
- | ** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0xfe,
-0xc0,
-0xc0,
-0xc0,
-0xfc,
-0x06,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 6 (0x36):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** |
- |** |
- |****** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc0,
-0xc0,
-0xfc,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 7 (0x37):
- ht=16, width=8
- +--------+
- | |
- | |
- |******* |
- |** ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0xfe,
-0xc6,
-0x06,
-0x0c,
-0x18,
-0x30,
-0x30,
-0x30,
-0x30,
-0x30,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 8 (0x38):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 9 (0x39):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ****** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7e,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-/* Character : (0x3a):
- ht=16, width=8
- +--------+
- | |
- | |
- | |
- | |
- | |
- | ** |
- | ** |
- | |
- | |
- | ** |
- | ** |
- | |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x00,
-0x00,
-0x00,
-0x0c,
-0x0c,
-0x00,
-0x00,
-0x0c,
-0x0c,
-0x00,
-0x00,
-0x00,
-0x00,
-0x00,
-};
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index e0060c1f0544..5d920e584de7 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -172,12 +172,6 @@ config USB_GSPCA_SN9C20X
To compile this driver as a module, choose M here: the
module will be called gspca_sn9c20x.
-config USB_GSPCA_SN9C20X_EVDEV
- bool "Enable evdev support"
- depends on USB_GSPCA_SN9C20X && INPUT
- ---help---
- Say Y here in order to enable evdev support for sn9c20x webcam button.
-
config USB_GSPCA_SONIXB
tristate "SONIX Bayer USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index 82945ed5cbe5..58b696f455be 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -374,7 +374,7 @@ static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val);
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -861,7 +861,7 @@ static int save_camera_state(struct gspca_dev *gspca_dev)
return do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0);
}
-int command_setformat(struct gspca_dev *gspca_dev)
+static int command_setformat(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret;
@@ -878,7 +878,7 @@ int command_setformat(struct gspca_dev *gspca_dev)
sd->params.roi.rowStart, sd->params.roi.rowEnd);
}
-int command_setcolourparams(struct gspca_dev *gspca_dev)
+static int command_setcolourparams(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetColourParams,
@@ -887,7 +887,7 @@ int command_setcolourparams(struct gspca_dev *gspca_dev)
sd->params.colourParams.saturation, 0);
}
-int command_setapcor(struct gspca_dev *gspca_dev)
+static int command_setapcor(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetApcor,
@@ -897,7 +897,7 @@ int command_setapcor(struct gspca_dev *gspca_dev)
sd->params.apcor.gain8);
}
-int command_setvloffset(struct gspca_dev *gspca_dev)
+static int command_setvloffset(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetVLOffset,
@@ -907,7 +907,7 @@ int command_setvloffset(struct gspca_dev *gspca_dev)
sd->params.vlOffset.gain8);
}
-int command_setexposure(struct gspca_dev *gspca_dev)
+static int command_setexposure(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret;
@@ -943,7 +943,7 @@ int command_setexposure(struct gspca_dev *gspca_dev)
return ret;
}
-int command_setcolourbalance(struct gspca_dev *gspca_dev)
+static int command_setcolourbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -973,7 +973,7 @@ int command_setcolourbalance(struct gspca_dev *gspca_dev)
return -EINVAL;
}
-int command_setcompressiontarget(struct gspca_dev *gspca_dev)
+static int command_setcompressiontarget(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -983,7 +983,7 @@ int command_setcompressiontarget(struct gspca_dev *gspca_dev)
sd->params.compressionTarget.targetQ, 0);
}
-int command_setyuvtresh(struct gspca_dev *gspca_dev)
+static int command_setyuvtresh(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -992,7 +992,7 @@ int command_setyuvtresh(struct gspca_dev *gspca_dev)
sd->params.yuvThreshold.uvThreshold, 0, 0);
}
-int command_setcompressionparams(struct gspca_dev *gspca_dev)
+static int command_setcompressionparams(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1009,7 +1009,7 @@ int command_setcompressionparams(struct gspca_dev *gspca_dev)
sd->params.compressionParams.decimationThreshMod);
}
-int command_setcompression(struct gspca_dev *gspca_dev)
+static int command_setcompression(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1018,7 +1018,7 @@ int command_setcompression(struct gspca_dev *gspca_dev)
sd->params.compression.decimation, 0, 0);
}
-int command_setsensorfps(struct gspca_dev *gspca_dev)
+static int command_setsensorfps(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1027,7 +1027,7 @@ int command_setsensorfps(struct gspca_dev *gspca_dev)
sd->params.sensorFps.baserate, 0, 0);
}
-int command_setflickerctrl(struct gspca_dev *gspca_dev)
+static int command_setflickerctrl(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1038,7 +1038,7 @@ int command_setflickerctrl(struct gspca_dev *gspca_dev)
0);
}
-int command_setecptiming(struct gspca_dev *gspca_dev)
+static int command_setecptiming(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1046,12 +1046,12 @@ int command_setecptiming(struct gspca_dev *gspca_dev)
sd->params.ecpTiming, 0, 0, 0);
}
-int command_pause(struct gspca_dev *gspca_dev)
+static int command_pause(struct gspca_dev *gspca_dev)
{
return do_command(gspca_dev, CPIA_COMMAND_EndStreamCap, 0, 0, 0, 0);
}
-int command_resume(struct gspca_dev *gspca_dev)
+static int command_resume(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1059,7 +1059,8 @@ int command_resume(struct gspca_dev *gspca_dev)
0, sd->params.streamStartLine, 0, 0);
}
-int command_setlights(struct gspca_dev *gspca_dev)
+#if 0 /* Currently unused */
+static int command_setlights(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret, p1, p2;
@@ -1078,6 +1079,7 @@ int command_setlights(struct gspca_dev *gspca_dev)
return do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 2, 0,
p1 | p2 | 0xE0, 0);
}
+#endif
static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
{
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 222af479150b..efe615938783 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1,7 +1,7 @@
/*
* Main USB camera driver
*
- * Copyright (C) 2008-2009 Jean-Francois Moine (http://moinejf.free.fr)
+ * Copyright (C) 2008-2010 Jean-François Moine <http://moinejf.free.fr>
*
* Camera button input handling by Márton Németh
* Copyright (C) 2009-2010 Márton Németh <nm127@freemail.hu>
@@ -35,12 +35,12 @@
#include <linux/io.h>
#include <asm/page.h>
#include <linux/uaccess.h>
-#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <media/v4l2-ioctl.h>
#include "gspca.h"
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
#include <linux/input.h>
#include <linux/usb/input.h>
#endif
@@ -51,7 +51,7 @@
#error "DEF_NURBS too big"
#endif
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -115,7 +115,7 @@ static const struct vm_operations_struct gspca_vm_ops = {
/*
* Input and interrupt endpoint handling functions
*/
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static void int_irq(struct urb *urb)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
@@ -199,7 +199,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
void *buffer = NULL;
int ret = -EINVAL;
- buffer_len = ep->wMaxPacketSize;
+ buffer_len = le16_to_cpu(ep->wMaxPacketSize);
interval = ep->bInterval;
PDEBUG(D_PROBE, "found int in endpoint: 0x%x, "
"buffer_len=%u, interval=%u",
@@ -213,7 +213,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
goto error;
}
- buffer = usb_buffer_alloc(dev, ep->wMaxPacketSize,
+ buffer = usb_buffer_alloc(dev, buffer_len,
GFP_KERNEL, &urb->transfer_dma);
if (!buffer) {
ret = -ENOMEM;
@@ -280,9 +280,18 @@ static void gspca_input_destroy_urb(struct gspca_dev *gspca_dev)
}
}
#else
-#define gspca_input_connect(gspca_dev) 0
-#define gspca_input_create_urb(gspca_dev)
-#define gspca_input_destroy_urb(gspca_dev)
+static inline void gspca_input_destroy_urb(struct gspca_dev *gspca_dev)
+{
+}
+
+static inline void gspca_input_create_urb(struct gspca_dev *gspca_dev)
+{
+}
+
+static inline int gspca_input_connect(struct gspca_dev *dev)
+{
+ return 0;
+}
#endif
/* get the current input frame buffer */
@@ -442,8 +451,7 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
* is not queued, discard the whole frame */
if (packet_type == FIRST_PACKET) {
frame->data_end = frame->data;
- jiffies_to_timeval(get_jiffies_64(),
- &frame->v4l2_buf.timestamp);
+ frame->v4l2_buf.timestamp = ktime_to_timeval(ktime_get());
frame->v4l2_buf.sequence = ++gspca_dev->sequence;
} else if (gspca_dev->last_packet_type == DISCARD_PACKET) {
if (packet_type == LAST_PACKET)
@@ -605,6 +613,37 @@ static void destroy_urbs(struct gspca_dev *gspca_dev)
}
}
+static int gspca_set_alt0(struct gspca_dev *gspca_dev)
+{
+ int ret;
+
+ if (gspca_dev->alt == 0)
+ return 0;
+ ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
+ if (ret < 0)
+ PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
+ return ret;
+}
+
+/* Note: both the queue and the usb locks should be held when calling this */
+static void gspca_stream_off(struct gspca_dev *gspca_dev)
+{
+ gspca_dev->streaming = 0;
+ if (gspca_dev->present) {
+ if (gspca_dev->sd_desc->stopN)
+ gspca_dev->sd_desc->stopN(gspca_dev);
+ destroy_urbs(gspca_dev);
+ gspca_input_destroy_urb(gspca_dev);
+ gspca_set_alt0(gspca_dev);
+ gspca_input_create_urb(gspca_dev);
+ }
+
+ /* always call stop0 to free the subdriver's resources */
+ if (gspca_dev->sd_desc->stop0)
+ gspca_dev->sd_desc->stop0(gspca_dev);
+ PDEBUG(D_STREAM, "stream off OK");
+}
+
/*
* look for an input transfer endpoint in an alternate setting
*/
@@ -830,8 +869,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
}
if (ret >= 0)
break;
- gspca_dev->streaming = 0;
- destroy_urbs(gspca_dev);
+ gspca_stream_off(gspca_dev);
if (ret != -ENOSPC) {
PDEBUG(D_ERR|D_STREAM,
"usb_submit_urb alt %d err %d",
@@ -861,37 +899,6 @@ out:
return ret;
}
-static int gspca_set_alt0(struct gspca_dev *gspca_dev)
-{
- int ret;
-
- if (gspca_dev->alt == 0)
- return 0;
- ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
- if (ret < 0)
- PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
- return ret;
-}
-
-/* Note: both the queue and the usb locks should be held when calling this */
-static void gspca_stream_off(struct gspca_dev *gspca_dev)
-{
- gspca_dev->streaming = 0;
- if (gspca_dev->present) {
- if (gspca_dev->sd_desc->stopN)
- gspca_dev->sd_desc->stopN(gspca_dev);
- destroy_urbs(gspca_dev);
- gspca_input_destroy_urb(gspca_dev);
- gspca_set_alt0(gspca_dev);
- gspca_input_create_urb(gspca_dev);
- }
-
- /* always call stop0 to free the subdriver's resources */
- if (gspca_dev->sd_desc->stop0)
- gspca_dev->sd_desc->stop0(gspca_dev);
- PDEBUG(D_STREAM, "stream off OK");
-}
-
static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
{
int i;
@@ -1495,7 +1502,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct gspca_dev *gspca_dev = priv;
- int i, ret = 0;
+ int i, ret = 0, streaming;
switch (rb->memory) {
case GSPCA_MEMORY_READ: /* (internal call) */
@@ -1530,7 +1537,8 @@ static int vidioc_reqbufs(struct file *file, void *priv,
}
/* stop streaming */
- if (gspca_dev->streaming) {
+ streaming = gspca_dev->streaming;
+ if (streaming) {
mutex_lock(&gspca_dev->usb_lock);
gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
@@ -1549,6 +1557,8 @@ static int vidioc_reqbufs(struct file *file, void *priv,
if (ret == 0) {
rb->count = gspca_dev->nframes;
gspca_dev->capt_file = file;
+ if (streaming)
+ ret = gspca_init_transfer(gspca_dev);
}
out:
mutex_unlock(&gspca_dev->queue_lock);
@@ -1582,6 +1592,12 @@ static int vidioc_streamon(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
+ /* check the capture file */
+ if (gspca_dev->capt_file != file) {
+ ret = -EBUSY;
+ goto out;
+ }
+
if (gspca_dev->nframes == 0
|| !(gspca_dev->frame[0].v4l2_buf.flags & V4L2_BUF_FLAG_QUEUED)) {
ret = -EINVAL;
@@ -1619,6 +1635,12 @@ static int vidioc_streamoff(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
+ /* check the capture file */
+ if (gspca_dev->capt_file != file) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* stop streaming */
if (mutex_lock_interruptible(&gspca_dev->usb_lock)) {
ret = -ERESTARTSYS;
@@ -2124,7 +2146,7 @@ static ssize_t dev_read(struct file *file, char __user *data,
}
/* get a frame */
- jiffies_to_timeval(get_jiffies_64(), &timestamp);
+ timestamp = ktime_to_timeval(ktime_get());
timestamp.tv_sec--;
n = 2;
for (;;) {
@@ -2315,7 +2337,7 @@ int gspca_dev_probe(struct usb_interface *intf,
return 0;
out:
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
if (gspca_dev->input_dev)
input_unregister_device(gspca_dev->input_dev);
#endif
@@ -2334,7 +2356,7 @@ EXPORT_SYMBOL(gspca_dev_probe);
void gspca_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct input_dev *input_dev;
#endif
@@ -2348,7 +2370,7 @@ void gspca_disconnect(struct usb_interface *intf)
wake_up_interruptible(&gspca_dev->wq);
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
gspca_input_destroy_urb(gspca_dev);
input_dev = gspca_dev->input_dev;
if (input_dev) {
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 8bb242fb79de..8b963dfae861 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -130,7 +130,7 @@ struct sd_desc {
cam_reg_op get_register;
#endif
cam_ident_op get_chip_ident;
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
cam_int_pkt_op int_pkt_scan;
/* other_input makes the gspca core create gspca_dev->input even when
int_pkt_scan is NULL, for cams with non interrupt driven buttons */
@@ -158,7 +158,7 @@ struct gspca_dev {
struct module *module; /* subdriver handling the device */
struct usb_device *dev;
struct file *capt_file; /* file doing video capture */
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct input_dev *input_dev;
char phys[64]; /* physical device path */
#endif
@@ -171,7 +171,7 @@ struct gspca_dev {
#define USB_BUF_SZ 64
__u8 *usb_buf; /* buffer for USB exchanges */
struct urb *urb[MAX_NURBS];
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct urb *int_urb;
#endif
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 957e05e2d08f..dc1e4efe30fb 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -10,8 +10,8 @@
* https://jim.sh/svn/jim/devl/playstation/ps3/eye/test/
*
* PS3 Eye camera enhanced by Richard Kaswy http://kaswy.free.fr
- * PS3 Eye camera, brightness, contrast, hue, AWB control added
- * by Max Thrun <bear24rw@gmail.com>
+ * PS3 Eye camera - brightness, contrast, awb, agc, aec controls
+ * added by Max Thrun <bear24rw@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,15 +60,13 @@ struct sd {
u8 contrast;
u8 gain;
u8 exposure;
- u8 redblc;
- u8 blueblc;
- u8 hue;
- u8 autogain;
+ u8 agc;
u8 awb;
+ u8 aec;
s8 sharpness;
u8 hflip;
u8 vflip;
-
+ u8 freqfltr;
};
/* V4L2 controls supported by the driver */
@@ -76,197 +74,183 @@ static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getredblc(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setblueblc(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getblueblc(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getagc(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setaec(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getaec(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreqfltr(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreqfltr(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu);
static const struct ctrl sd_ctrls[] = {
- { /* 0 */
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 20
- .default_value = BRIGHTNESS_DEF,
+ { /* 0 */
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+#define BRIGHTNESS_DEF 0
+ .default_value = BRIGHTNESS_DEF,
+ },
+ .set = sd_setbrightness,
+ .get = sd_getbrightness,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- { /* 1 */
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define CONTRAST_DEF 37
- .default_value = CONTRAST_DEF,
+ { /* 1 */
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+#define CONTRAST_DEF 32
+ .default_value = CONTRAST_DEF,
+ },
+ .set = sd_setcontrast,
+ .get = sd_getcontrast,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- { /* 2 */
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Main Gain",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
+ { /* 2 */
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Main Gain",
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
#define GAIN_DEF 20
- .default_value = GAIN_DEF,
+ .default_value = GAIN_DEF,
+ },
+ .set = sd_setgain,
+ .get = sd_getgain,
},
- .set = sd_setgain,
- .get = sd_getgain,
- },
- { /* 3 */
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
+ { /* 3 */
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
#define EXPO_DEF 120
- .default_value = EXPO_DEF,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
- { /* 4 */
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define RED_BALANCE_DEF 128
- .default_value = RED_BALANCE_DEF,
+ .default_value = EXPO_DEF,
+ },
+ .set = sd_setexposure,
+ .get = sd_getexposure,
},
- .set = sd_setredblc,
- .get = sd_getredblc,
- },
- { /* 5 */
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BLUE_BALANCE_DEF 128
- .default_value = BLUE_BALANCE_DEF,
+ { /* 4 */
+ {
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Gain",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AGC_DEF 1
+ .default_value = AGC_DEF,
+ },
+ .set = sd_setagc,
+ .get = sd_getagc,
},
- .set = sd_setblueblc,
- .get = sd_getblueblc,
- },
- { /* 6 */
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define HUE_DEF 143
- .default_value = HUE_DEF,
+#define AWB_IDX 5
+ { /* 5 */
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto White Balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AWB_DEF 1
+ .default_value = AWB_DEF,
+ },
+ .set = sd_setawb,
+ .get = sd_getawb,
},
- .set = sd_sethue,
- .get = sd_gethue,
- },
- { /* 7 */
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Autogain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 0
- .default_value = AUTOGAIN_DEF,
+ { /* 6 */
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AEC_DEF 1
+ .default_value = AEC_DEF,
+ },
+ .set = sd_setaec,
+ .get = sd_getaec,
},
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-#define AWB_IDX 8
- { /* 8 */
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AWB_DEF 0
- .default_value = AWB_DEF,
- },
- .set = sd_setawb,
- .get = sd_getawb,
- },
- { /* 9 */
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
+ { /* 7 */
+ {
+ .id = V4L2_CID_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
#define SHARPNESS_DEF 0
- .default_value = SHARPNESS_DEF,
+ .default_value = SHARPNESS_DEF,
+ },
+ .set = sd_setsharpness,
+ .get = sd_getsharpness,
},
- .set = sd_setsharpness,
- .get = sd_getsharpness,
- },
- { /* 10 */
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "HFlip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
+ { /* 8 */
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "HFlip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
#define HFLIP_DEF 0
- .default_value = HFLIP_DEF,
+ .default_value = HFLIP_DEF,
+ },
+ .set = sd_sethflip,
+ .get = sd_gethflip,
},
- .set = sd_sethflip,
- .get = sd_gethflip,
- },
- { /* 11 */
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "VFlip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
+ { /* 9 */
+ {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "VFlip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
+ .default_value = VFLIP_DEF,
+ },
+ .set = sd_setvflip,
+ .get = sd_getvflip,
+ },
+ { /* 10 */
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light Frequency Filter",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define FREQFLTR_DEF 0
+ .default_value = FREQFLTR_DEF,
+ },
+ .set = sd_setfreqfltr,
+ .get = sd_getfreqfltr,
},
- .set = sd_setvflip,
- .get = sd_getvflip,
- },
};
static const struct v4l2_pix_format ov772x_mode[] = {
@@ -675,14 +659,14 @@ static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x9B, sd->brightness);
+ sccb_reg_write(gspca_dev, 0x9b, sd->brightness);
}
static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x9C, sd->contrast);
+ sccb_reg_write(gspca_dev, 0x9c, sd->contrast);
}
static void setgain(struct gspca_dev *gspca_dev)
@@ -690,6 +674,9 @@ static void setgain(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
+ if (sd->agc)
+ return;
+
val = sd->gain;
switch (val & 0x30) {
case 0x00:
@@ -717,55 +704,68 @@ static void setexposure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
+ if (sd->aec)
+ return;
+
+ /* 'val' is one byte and represents half of the exposure value we are
+ * going to set into registers, a two bytes value:
+ *
+ * MSB: ((u16) val << 1) >> 8 == val >> 7
+ * LSB: ((u16) val << 1) & 0xff == val << 1
+ */
val = sd->exposure;
sccb_reg_write(gspca_dev, 0x08, val >> 7);
sccb_reg_write(gspca_dev, 0x10, val << 1);
}
-static void setredblc(struct gspca_dev *gspca_dev)
+static void setagc(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x43, sd->redblc);
-}
-
-static void setblueblc(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sccb_reg_write(gspca_dev, 0x42, sd->blueblc);
-}
-
-static void sethue(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (sd->agc) {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x04);
+ sccb_reg_write(gspca_dev, 0x64,
+ sccb_reg_read(gspca_dev, 0x64) | 0x03);
+ } else {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x04);
+ sccb_reg_write(gspca_dev, 0x64,
+ sccb_reg_read(gspca_dev, 0x64) & ~0x03);
- sccb_reg_write(gspca_dev, 0x01, sd->hue);
+ setgain(gspca_dev);
+ }
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setawb(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->autogain) {
- sccb_reg_write(gspca_dev, 0x13, 0xf7); /* AGC,AEC,AWB ON */
- sccb_reg_write(gspca_dev, 0x64,
- sccb_reg_read(gspca_dev, 0x64) | 0x03);
+ if (sd->awb) {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x02);
+ sccb_reg_write(gspca_dev, 0x63,
+ sccb_reg_read(gspca_dev, 0x63) | 0xc0);
} else {
- sccb_reg_write(gspca_dev, 0x13, 0xf0); /* AGC,AEC,AWB OFF */
- sccb_reg_write(gspca_dev, 0x64,
- sccb_reg_read(gspca_dev, 0x64) & 0xfc);
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x02);
+ sccb_reg_write(gspca_dev, 0x63,
+ sccb_reg_read(gspca_dev, 0x63) & ~0xc0);
}
}
-static void setawb(struct gspca_dev *gspca_dev)
+static void setaec(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->awb)
- sccb_reg_write(gspca_dev, 0x63, 0xe0); /* AWB on */
- else
- sccb_reg_write(gspca_dev, 0x63, 0xaa); /* AWB off */
+ if (sd->aec)
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x01);
+ else {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x01);
+ setexposure(gspca_dev);
+ }
}
static void setsharpness(struct gspca_dev *gspca_dev)
@@ -774,8 +774,8 @@ static void setsharpness(struct gspca_dev *gspca_dev)
u8 val;
val = sd->sharpness;
- sccb_reg_write(gspca_dev, 0x91, val); /* vga noise */
- sccb_reg_write(gspca_dev, 0x8e, val); /* qvga noise */
+ sccb_reg_write(gspca_dev, 0x91, val); /* Auto de-noise threshold */
+ sccb_reg_write(gspca_dev, 0x8e, val); /* De-noise threshold */
}
static void sethflip(struct gspca_dev *gspca_dev)
@@ -787,7 +787,7 @@ static void sethflip(struct gspca_dev *gspca_dev)
sccb_reg_read(gspca_dev, 0x0c) | 0x40);
else
sccb_reg_write(gspca_dev, 0x0c,
- sccb_reg_read(gspca_dev, 0x0c) & 0xbf);
+ sccb_reg_read(gspca_dev, 0x0c) & ~0x40);
}
static void setvflip(struct gspca_dev *gspca_dev)
@@ -799,9 +799,20 @@ static void setvflip(struct gspca_dev *gspca_dev)
sccb_reg_read(gspca_dev, 0x0c) | 0x80);
else
sccb_reg_write(gspca_dev, 0x0c,
- sccb_reg_read(gspca_dev, 0x0c) & 0x7f);
+ sccb_reg_read(gspca_dev, 0x0c) & ~0x80);
}
+static void setfreqfltr(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->freqfltr == 0)
+ sccb_reg_write(gspca_dev, 0x2b, 0x00);
+ else
+ sccb_reg_write(gspca_dev, 0x2b, 0x9e);
+}
+
+
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
@@ -825,26 +836,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->contrast = CONTRAST_DEF;
sd->gain = GAIN_DEF;
sd->exposure = EXPO_DEF;
- sd->redblc = RED_BALANCE_DEF;
- sd->blueblc = BLUE_BALANCE_DEF;
- sd->hue = HUE_DEF;
-#if AUTOGAIN_DEF != 0
- sd->autogain = AUTOGAIN_DEF;
+#if AGC_DEF != 0
+ sd->agc = AGC_DEF;
#else
gspca_dev->ctrl_inac |= (1 << AWB_IDX);
#endif
-#if AWB_DEF != 0
- sd->awb = AWB_DEF
-#endif
-#if SHARPNESS_DEF != 0
+ sd->awb = AWB_DEF;
+ sd->aec = AEC_DEF;
sd->sharpness = SHARPNESS_DEF;
-#endif
-#if HFLIP_DEF != 0
sd->hflip = HFLIP_DEF;
-#endif
-#if VFLIP_DEF != 0
sd->vflip = VFLIP_DEF;
-#endif
+ sd->freqfltr = FREQFLTR_DEF;
return 0;
}
@@ -904,18 +906,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
set_frame_rate(gspca_dev);
- setautogain(gspca_dev);
+ setagc(gspca_dev);
setawb(gspca_dev);
+ setaec(gspca_dev);
setgain(gspca_dev);
- setredblc(gspca_dev);
- setblueblc(gspca_dev);
- sethue(gspca_dev);
setexposure(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
setsharpness(gspca_dev);
setvflip(gspca_dev);
sethflip(gspca_dev);
+ setfreqfltr(gspca_dev);
ov534_set_led(gspca_dev, 1);
ov534_reg_write(gspca_dev, 0xe0, 0x00);
@@ -1092,65 +1093,11 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->redblc = val;
- if (gspca_dev->streaming)
- setredblc(gspca_dev);
- return 0;
-}
-
-static int sd_getredblc(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->redblc;
- return 0;
-}
-
-static int sd_setblueblc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->blueblc = val;
- if (gspca_dev->streaming)
- setblueblc(gspca_dev);
- return 0;
-}
-
-static int sd_getblueblc(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->blueblc;
- return 0;
-}
-
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hue = val;
- if (gspca_dev->streaming)
- sethue(gspca_dev);
- return 0;
-}
-
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hue;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
+ sd->agc = val;
if (gspca_dev->streaming) {
@@ -1160,16 +1107,16 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
gspca_dev->ctrl_inac &= ~(1 << AWB_IDX);
else
gspca_dev->ctrl_inac |= (1 << AWB_IDX);
- setautogain(gspca_dev);
+ setagc(gspca_dev);
}
return 0;
}
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_getagc(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->autogain;
+ *val = sd->agc;
return 0;
}
@@ -1191,6 +1138,24 @@ static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setaec(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->aec = val;
+ if (gspca_dev->streaming)
+ setaec(gspca_dev);
+ return 0;
+}
+
+static int sd_getaec(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->aec;
+ return 0;
+}
+
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1245,6 +1210,43 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setfreqfltr(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freqfltr = val;
+ if (gspca_dev->streaming)
+ setfreqfltr(gspca_dev);
+ return 0;
+}
+
+static int sd_getfreqfltr(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freqfltr;
+ return 0;
+}
+
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "Disabled");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
/* get stream parameters (framerate) */
static int sd_get_streamparm(struct gspca_dev *gspca_dev,
struct v4l2_streamparm *parm)
@@ -1296,6 +1298,7 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
+ .querymenu = sd_querymenu,
.get_streamparm = sd_get_streamparm,
.set_streamparm = sd_set_streamparm,
};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 0c87c3490b1e..a40f8893310d 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -99,7 +99,7 @@ static const struct ctrl sd_ctrls[] = {
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
+ .name = "Exposure",
.minimum = PAC207_EXPOSURE_MIN,
.maximum = PAC207_EXPOSURE_MAX,
.step = 1,
@@ -130,7 +130,7 @@ static const struct ctrl sd_ctrls[] = {
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
+ .name = "Gain",
.minimum = PAC207_GAIN_MIN,
.maximum = PAC207_GAIN_MAX,
.step = 1,
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index dda5fd4aa69e..71d9447a7986 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -39,7 +39,7 @@ struct init_command {
};
/* V4L2 controls supported by the driver */
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
};
/* How to change the resolution of any of the VGA cams is unknown */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 3dee3e5844b6..0dededf14727 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -18,10 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <linux/usb/input.h>
+#ifdef CONFIG_INPUT
#include <linux/input.h>
#include <linux/slab.h>
#endif
@@ -30,6 +27,7 @@
#include "jpeg.h"
#include <media/v4l2-chip-ident.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, "
"microdia project <microdia@googlegroups.com>");
@@ -52,9 +50,15 @@ MODULE_LICENSE("GPL");
#define SENSOR_MT9V112 7
#define SENSOR_MT9M001 8
#define SENSOR_MT9M111 9
-#define SENSOR_HV7131R 10
+#define SENSOR_MT9M112 10
+#define SENSOR_HV7131R 11
#define SENSOR_MT9VPRB 20
+/* camera flags */
+#define HAS_BUTTON 0x1
+#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */
+#define FLIP_DETECT 0x4
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev;
@@ -88,11 +92,7 @@ struct sd {
u8 *jpeg_hdr;
u8 quality;
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- struct input_dev *input_dev;
- u8 input_gpio;
- struct task_struct *input_task;
-#endif
+ u8 flags;
};
struct i2c_reg_u8 {
@@ -130,6 +130,25 @@ static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val);
static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val);
static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val);
+static const struct dmi_system_id flip_dmi_table[] = {
+ {
+ .ident = "MSI MS-1034",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1034"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0341")
+ }
+ },
+ {
+ .ident = "MSI MS-1632",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1632")
+ }
+ },
+ {}
+};
+
static const struct ctrl sd_ctrls[] = {
{
#define BRIGHTNESS_IDX 0
@@ -713,6 +732,7 @@ static u16 i2c_ident[] = {
V4L2_IDENT_MT9V112,
V4L2_IDENT_MT9M001C12ST,
V4L2_IDENT_MT9M111,
+ V4L2_IDENT_MT9M112,
V4L2_IDENT_HV7131R,
};
@@ -735,7 +755,8 @@ static u16 bridge_init[][2] = {
{0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f},
{0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f},
{0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01},
- {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80}
+ {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80},
+ {0x1007, 0x00}
};
/* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */
@@ -914,40 +935,30 @@ static struct i2c_reg_u8 ov9650_init[] = {
};
static struct i2c_reg_u8 ov9655_init[] = {
- {0x12, 0x80}, {0x12, 0x01}, {0x0d, 0x00}, {0x0e, 0x61},
- {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24},
- {0x1e, 0x04}, {0x1e, 0x04}, {0x1e, 0x04}, {0x27, 0x08},
- {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x32, 0xbf},
- {0x34, 0x3d}, {0x35, 0x00}, {0x36, 0xf8}, {0x38, 0x12},
- {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c},
- {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40},
- {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a},
- {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc},
- {0x4d, 0xdc}, {0x4e, 0xdc}, {0x69, 0x02}, {0x6c, 0x04},
- {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02},
- {0x8a, 0x23}, {0x8c, 0x0d}, {0x90, 0x7e}, {0x91, 0x7c},
- {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60},
- {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04},
- {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80},
- {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf},
- {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b},
- {0xbf, 0x3a}, {0xc0, 0xe2}, {0xc1, 0xc8}, {0xc2, 0x01},
+ {0x12, 0x80}, {0x0e, 0x61}, {0x11, 0x80}, {0x13, 0xba},
+ {0x14, 0x2e}, {0x16, 0x24}, {0x1e, 0x04}, {0x27, 0x08},
+ {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x34, 0x3d},
+ {0x35, 0x00}, {0x38, 0x12}, {0x0f, 0x42}, {0x39, 0x57},
+ {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, {0x3d, 0x19},
+ {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, {0x42, 0x80},
+ {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, {0x48, 0x3c},
+ {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, {0x4d, 0xdc},
+ {0x4e, 0xdc}, {0x6c, 0x04}, {0x6f, 0x9e}, {0x70, 0x05},
+ {0x71, 0x78}, {0x77, 0x02}, {0x8a, 0x23}, {0x90, 0x7e},
+ {0x91, 0x7c}, {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68},
+ {0xa6, 0x60}, {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92},
+ {0xab, 0x04}, {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80},
+ {0xaf, 0x80}, {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00},
+ {0xb6, 0xaf}, {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44},
+ {0xbe, 0x3b}, {0xbf, 0x3a}, {0xc1, 0xc8}, {0xc2, 0x01},
{0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0},
- {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x12, 0x61},
+ {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x2d, 0x00},
+ {0x2e, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x12, 0x61},
{0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a},
- {0x03, 0x12}, {0x17, 0x14}, {0x18, 0x00}, {0x19, 0x01},
- {0x1a, 0x3d}, {0x32, 0xbf}, {0x11, 0x80}, {0x2a, 0x10},
- {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0x1e, 0x04},
- {0x1e, 0x04}, {0x10, 0x7c}, {0x04, 0x03}, {0xa1, 0x00},
- {0x2d, 0x00}, {0x2e, 0x00}, {0x00, 0x00}, {0x01, 0x80},
- {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d},
- {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x12}, {0x17, 0x14},
- {0x18, 0x00}, {0x19, 0x01}, {0x1a, 0x3d}, {0x32, 0xbf},
- {0x11, 0x80}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00},
- {0x93, 0x00}, {0x04, 0x01}, {0x10, 0x1f}, {0xa1, 0x00},
- {0x00, 0x0a}, {0xa1, 0x00}, {0x10, 0x5d}, {0x04, 0x03},
- {0x00, 0x01}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03},
- {0x00, 0x03}, {0x00, 0x0a}, {0x00, 0x10}, {0x00, 0x13},
+ {0x03, 0x09}, {0x17, 0x16}, {0x18, 0x6e}, {0x19, 0x01},
+ {0x1a, 0x3e}, {0x32, 0x09}, {0x2a, 0x10}, {0x2b, 0x0a},
+ {0x92, 0x00}, {0x93, 0x00}, {0xa1, 0x00}, {0x10, 0x7c},
+ {0x04, 0x03}, {0x00, 0x13},
};
static struct i2c_reg_u16 mt9v112_init[] = {
@@ -1043,6 +1054,13 @@ static struct i2c_reg_u16 mt9m111_init[] = {
{0xf0, 0x0000},
};
+static struct i2c_reg_u16 mt9m112_init[] = {
+ {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008},
+ {0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300},
+ {0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e},
+ {0xf0, 0x0000},
+};
+
static struct i2c_reg_u8 hv7131r_init[] = {
{0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08},
{0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0},
@@ -1240,8 +1258,8 @@ static int ov9655_init_sensor(struct gspca_dev *gspca_dev)
}
/* disable hflip and vflip */
gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX);
- sd->hstart = 0;
- sd->vstart = 7;
+ sd->hstart = 1;
+ sd->vstart = 2;
return 0;
}
@@ -1369,6 +1387,23 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
+static int mt9m112_init_sensor(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(mt9m112_init); i++) {
+ if (i2c_w2(gspca_dev, mt9m112_init[i].reg,
+ mt9m112_init[i].val) < 0) {
+ err("MT9M112 sensor initialization failed");
+ return -ENODEV;
+ }
+ }
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ sd->hstart = 0;
+ sd->vstart = 2;
+ return 0;
+}
+
static int mt9m111_init_sensor(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1579,17 +1614,26 @@ static int set_redblue(struct gspca_dev *gspca_dev)
static int set_hvflip(struct gspca_dev *gspca_dev)
{
- u8 value, tslb;
+ u8 value, tslb, hflip, vflip;
u16 value2;
struct sd *sd = (struct sd *) gspca_dev;
+
+ if ((sd->flags & FLIP_DETECT) && dmi_check_system(flip_dmi_table)) {
+ hflip = !sd->hflip;
+ vflip = !sd->vflip;
+ } else {
+ hflip = sd->hflip;
+ vflip = sd->vflip;
+ }
+
switch (sd->sensor) {
case SENSOR_OV9650:
i2c_r1(gspca_dev, 0x1e, &value);
value &= ~0x30;
tslb = 0x01;
- if (sd->hflip)
+ if (hflip)
value |= 0x20;
- if (sd->vflip) {
+ if (vflip) {
value |= 0x10;
tslb = 0x49;
}
@@ -1600,28 +1644,29 @@ static int set_hvflip(struct gspca_dev *gspca_dev)
case SENSOR_MT9V011:
i2c_r2(gspca_dev, 0x20, &value2);
value2 &= ~0xc0a0;
- if (sd->hflip)
+ if (hflip)
value2 |= 0x8080;
- if (sd->vflip)
+ if (vflip)
value2 |= 0x4020;
i2c_w2(gspca_dev, 0x20, value2);
break;
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
case SENSOR_MT9V112:
i2c_r2(gspca_dev, 0x20, &value2);
value2 &= ~0x0003;
- if (sd->hflip)
+ if (hflip)
value2 |= 0x0002;
- if (sd->vflip)
+ if (vflip)
value2 |= 0x0001;
i2c_w2(gspca_dev, 0x20, value2);
break;
case SENSOR_HV7131R:
i2c_r1(gspca_dev, 0x01, &value);
value &= ~0x03;
- if (sd->vflip)
+ if (vflip)
value |= 0x01;
- if (sd->hflip)
+ if (hflip)
value |= 0x02;
i2c_w1(gspca_dev, 0x01, value);
break;
@@ -1655,9 +1700,9 @@ static int set_exposure(struct gspca_dev *gspca_dev)
case SENSOR_HV7131R:
exp[0] |= (4 << 4);
exp[2] = 0x25;
- exp[3] = ((sd->exposure * 0xffffff) / 0xffff) >> 16;
- exp[4] = ((sd->exposure * 0xffffff) / 0xffff) >> 8;
- exp[5] = ((sd->exposure * 0xffffff) / 0xffff) & 0xff;
+ exp[3] = (sd->exposure >> 5) & 0xff;
+ exp[4] = (sd->exposure << 3) & 0xff;
+ exp[5] = 0;
break;
default:
return 0;
@@ -1931,7 +1976,7 @@ static int sd_dbg_g_register(struct gspca_dev *gspca_dev,
if (reg->match.addr != sd->i2c_addr)
return -EINVAL;
if (sd->sensor >= SENSOR_MT9V011 &&
- sd->sensor <= SENSOR_MT9M111) {
+ sd->sensor <= SENSOR_MT9M112) {
if (i2c_r2(gspca_dev, reg->reg, (u16 *)&reg->val) < 0)
return -EINVAL;
} else {
@@ -1960,7 +2005,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
if (reg->match.addr != sd->i2c_addr)
return -EINVAL;
if (sd->sensor >= SENSOR_MT9V011 &&
- sd->sensor <= SENSOR_MT9M111) {
+ sd->sensor <= SENSOR_MT9M112) {
if (i2c_w2(gspca_dev, reg->reg, reg->val) < 0)
return -EINVAL;
} else {
@@ -2005,8 +2050,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor = (id->driver_info >> 8) & 0xff;
sd->i2c_addr = id->driver_info & 0xff;
+ sd->flags = (id->driver_info >> 16) & 0xff;
switch (sd->sensor) {
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
case SENSOR_OV9650:
case SENSOR_SOI968:
@@ -2039,11 +2086,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->quality = 95;
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- sd->input_gpio = (id->driver_info >> 16) & 0xff;
- if (sn9c20x_input_init(gspca_dev) < 0)
- return -ENODEV;
-#endif
return 0;
}
@@ -2063,6 +2105,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
}
}
+ if (sd->flags & LED_REVERSE)
+ reg_w1(gspca_dev, 0x1006, 0x00);
+ else
+ reg_w1(gspca_dev, 0x1006, 0x20);
+
if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) {
err("Device initialization failed");
return -ENODEV;
@@ -2103,6 +2150,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
return -ENODEV;
info("MT9M111 sensor detected");
break;
+ case SENSOR_MT9M112:
+ if (mt9m112_init_sensor(gspca_dev) < 0)
+ return -ENODEV;
+ info("MT9M112 sensor detected");
+ break;
case SENSOR_MT9M001:
if (mt9m001_init_sensor(gspca_dev) < 0)
return -ENODEV;
@@ -2162,6 +2214,7 @@ static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode)
i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40);
}
break;
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
if (mode & MODE_SXGA) {
i2c_w2(gspca_dev, 0xf0, 0x0002);
@@ -2243,6 +2296,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_exposure(gspca_dev);
set_hvflip(gspca_dev);
+ reg_w1(gspca_dev, 0x1007, 0x20);
+
reg_r(gspca_dev, 0x1061, 1);
reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] | 0x02);
return 0;
@@ -2250,6 +2305,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
+ reg_w1(gspca_dev, 0x1007, 0x00);
+
reg_r(gspca_dev, 0x1061, 1);
reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] & ~0x02);
}
@@ -2343,6 +2400,24 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
do_autoexposure(gspca_dev, avg_lum);
}
+#ifdef CONFIG_INPUT
+static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
+ u8 *data, /* interrupt packet */
+ int len) /* interrupt packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int ret = -EINVAL;
+ if (sd->flags & HAS_BUTTON && len == 1) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
+ input_sync(gspca_dev->input_dev);
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ ret = 0;
+ }
+ return ret;
+}
+#endif
+
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
@@ -2409,6 +2484,9 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
+#ifdef CONFIG_INPUT
+ .int_pkt_scan = sd_int_pkt_scan,
+#endif
.dq_callback = sd_dqcallback,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.set_register = sd_dbg_s_register,
@@ -2417,8 +2495,8 @@ static const struct sd_desc sd_desc = {
.get_chip_ident = sd_chip_ident,
};
-#define SN9C20X(sensor, i2c_addr, button_mask) \
- .driver_info = (button_mask << 16) \
+#define SN9C20X(sensor, i2c_addr, flags) \
+ .driver_info = ((flags & 0xff) << 16) \
| (SENSOR_ ## sensor << 8) \
| (i2c_addr)
@@ -2426,8 +2504,10 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
- {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, 0x10)},
- {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)},
+ {USB_DEVICE(0x0c45, 0x624c), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30,
+ (HAS_BUTTON | LED_REVERSE))},
+ {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, FLIP_DETECT)},
{USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)},
@@ -2437,22 +2517,25 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x627f), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)},
- {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)},
+ {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, HAS_BUTTON)},
+ {USB_DEVICE(0x0c45, 0x628c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)},
{USB_DEVICE(0x0c45, 0x62b0), SN9C20X(MT9VPRB, 0x00, 0)},
{USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, 0)},
- {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, 0)},
+ {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, HAS_BUTTON)},
{USB_DEVICE(0x0c45, 0x62bc), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
{USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
+ {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0xa168, 0x0618), SN9C20X(HV7131R, 0x11, 0)},
- {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, 0)},
+ {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, HAS_BUTTON)},
{USB_DEVICE(0xa168, 0x0615), SN9C20X(MT9M111, 0x5d, 0)},
{USB_DEVICE(0xa168, 0x0617), SN9C20X(MT9M111, 0x5d, 0)},
{}
@@ -2467,22 +2550,11 @@ static int sd_probe(struct usb_interface *intf,
THIS_MODULE);
}
-static void sd_disconnect(struct usb_interface *intf)
-{
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
-
- sn9c20x_input_cleanup(gspca_dev);
-#endif
-
- gspca_disconnect(intf);
-}
-
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
- .disconnect = sd_disconnect,
+ .disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 1d61b92f6bfc..bb923efb75bd 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -1,7 +1,7 @@
/*
* Sonix sn9c102p sn9c105 sn9c120 (jpeg) subdriver
*
- * Copyright (C) 2009 Jean-Francois Moine <http://moinejf.free.fr>
+ * Copyright (C) 2009-2010 Jean-François Moine <http://moinejf.free.fr>
* Copyright (C) 2005 Michel Xhaard mxhaard@magic.fr
*
* This program is free software; you can redistribute it and/or modify
@@ -28,7 +28,7 @@
#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
-MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -67,20 +67,25 @@ struct sd {
#define BRIDGE_SN9C110 2
#define BRIDGE_SN9C120 3
u8 sensor; /* Type of image sensor chip */
-#define SENSOR_ADCM1700 0
-#define SENSOR_HV7131R 1
-#define SENSOR_MI0360 2
-#define SENSOR_MO4000 3
-#define SENSOR_MT9V111 4
-#define SENSOR_OM6802 5
-#define SENSOR_OV7630 6
-#define SENSOR_OV7648 7
-#define SENSOR_OV7660 8
-#define SENSOR_PO1030 9
-#define SENSOR_SP80708 10
+enum {
+ SENSOR_ADCM1700,
+ SENSOR_GC0307,
+ SENSOR_HV7131R,
+ SENSOR_MI0360,
+ SENSOR_MO4000,
+ SENSOR_MT9V111,
+ SENSOR_OM6802,
+ SENSOR_OV7630,
+ SENSOR_OV7648,
+ SENSOR_OV7660,
+ SENSOR_PO1030,
+ SENSOR_PO2030N,
+ SENSOR_SOI768,
+ SENSOR_SP80708,
+} sensors;
u8 i2c_addr;
- u8 *jpeg_hdr;
+ u8 jpeg_hdr[JPEG_HDR_SZ];
};
/* V4L2 controls supported by the driver */
@@ -281,29 +286,60 @@ static const struct ctrl sd_ctrls[] = {
};
/* table of the disabled controls */
-static __u32 ctrl_dis[] = {
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX) |
- (1 << AUTOGAIN_IDX), /* SENSOR_ADCM1700 0 */
- (1 << INFRARED_IDX) | (1 << FREQ_IDX),
- /* SENSOR_HV7131R 1 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MI0360 2 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MO4000 3 */
- (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MT9V111 4 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_OM6802 5 */
- (1 << INFRARED_IDX),
- /* SENSOR_OV7630 6 */
- (1 << INFRARED_IDX),
- /* SENSOR_OV7648 7 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
- /* SENSOR_OV7660 8 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
- (1 << FREQ_IDX), /* SENSOR_PO1030 9 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
- (1 << FREQ_IDX), /* SENSOR_SP80708 10 */
+static const __u32 ctrl_dis[] = {
+[SENSOR_ADCM1700] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_GC0307] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_HV7131R] = (1 << INFRARED_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MI0360] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MO4000] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MT9V111] = (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_OM6802] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_OV7630] = (1 << INFRARED_IDX),
+
+[SENSOR_OV7648] = (1 << INFRARED_IDX),
+
+[SENSOR_OV7660] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX),
+
+[SENSOR_PO1030] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_PO2030N] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+[SENSOR_SOI768] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_SP80708] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
};
static const struct v4l2_pix_format cif_mode[] = {
@@ -343,7 +379,17 @@ static const u8 sn_adcm1700[0x1c] = {
0x06, 0x00, 0x00, 0x00
};
-/*Data from sn9c102p+hv7131r */
+static const u8 sn_gc0307[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x61, 0x62, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x80, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x03, 0x01, 0x08, 0x28, 0x1e, 0x02,
+/* reg18 reg19 reg1a reg1b */
+ 0x06, 0x00, 0x00, 0x00
+};
+
static const u8 sn_hv7131[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
0x00, 0x03, 0x64, 0x00, 0x1a, 0x20, 0x20, 0x20,
@@ -401,7 +447,7 @@ static const u8 sn_om6802[0x1c] = {
static const u8 sn_ov7630[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
- 0x00, 0x21, 0x40, 0x00, 0x1a, 0x20, 0x1f, 0x20,
+ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
/* reg8 reg9 rega regb regc regd rege regf */
0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
@@ -443,6 +489,28 @@ static const u8 sn_po1030[0x1c] = {
0x07, 0x00, 0x00, 0x00
};
+static const u8 sn_po2030n[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x81, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x00, 0x01, 0x14, 0x28, 0x1e, 0x00,
+/* reg18 reg19 reg1a reg1b */
+ 0x07, 0x00, 0x00, 0x00
+};
+
+static const u8 sn_soi768[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x00, 0x01, 0x08, 0x28, 0x1e, 0x00,
+/* reg18 reg19 reg1a reg1b */
+ 0x07, 0x00, 0x00, 0x00
+};
+
static const u8 sn_sp80708[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
0x00, 0x63, 0x60, 0x00, 0x1a, 0x20, 0x20, 0x20,
@@ -456,17 +524,20 @@ static const u8 sn_sp80708[0x1c] = {
/* sequence specific to the sensors - !! index = SENSOR_xxx */
static const u8 *sn_tb[] = {
- sn_adcm1700,
- sn_hv7131,
- sn_mi0360,
- sn_mo4000,
- sn_mt9v111,
- sn_om6802,
- sn_ov7630,
- sn_ov7648,
- sn_ov7660,
- sn_po1030,
- sn_sp80708
+[SENSOR_ADCM1700] = sn_adcm1700,
+[SENSOR_GC0307] = sn_gc0307,
+[SENSOR_HV7131R] = sn_hv7131,
+[SENSOR_MI0360] = sn_mi0360,
+[SENSOR_MO4000] = sn_mo4000,
+[SENSOR_MT9V111] = sn_mt9v111,
+[SENSOR_OM6802] = sn_om6802,
+[SENSOR_OV7630] = sn_ov7630,
+[SENSOR_OV7648] = sn_ov7648,
+[SENSOR_OV7660] = sn_ov7660,
+[SENSOR_PO1030] = sn_po1030,
+[SENSOR_PO2030N] = sn_po2030n,
+[SENSOR_SOI768] = sn_soi768,
+[SENSOR_SP80708] = sn_sp80708,
};
/* default gamma table */
@@ -484,8 +555,13 @@ static const u8 gamma_spec_1[17] = {
0x08, 0x3a, 0x52, 0x65, 0x75, 0x83, 0x91, 0x9d,
0xa9, 0xb4, 0xbe, 0xc8, 0xd2, 0xdb, 0xe4, 0xed, 0xf5
};
-/* gamma for sensor SP80708 */
+/* gamma for sensor GC0307 */
static const u8 gamma_spec_2[17] = {
+ 0x14, 0x37, 0x50, 0x6a, 0x7c, 0x8d, 0x9d, 0xab,
+ 0xb5, 0xbf, 0xc2, 0xcb, 0xd1, 0xd6, 0xdb, 0xe1, 0xeb
+};
+/* gamma for sensor SP80708 */
+static const u8 gamma_spec_3[17] = {
0x0a, 0x2d, 0x4e, 0x68, 0x7d, 0x8f, 0x9f, 0xab,
0xb7, 0xc2, 0xcc, 0xd3, 0xd8, 0xde, 0xe2, 0xe5, 0xe6
};
@@ -533,6 +609,58 @@ static const u8 adcm1700_sensor_param1[][8] = {
{0xb0, 0x51, 0x32, 0x00, 0xa2, 0x00, 0x00, 0x10},
{}
};
+static const u8 gc0307_sensor_init[][8] = {
+ {0xa0, 0x21, 0x43, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x44, 0xa2, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x01, 0x6a, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x02, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x11, 0x05, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x08, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x09, 0x01, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0a, 0xe8, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0b, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0c, 0x80, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0d, 0x22, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0e, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0f, 0xb2, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x12, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/
+ {0xa0, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x15, 0xb8, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x16, 0x13, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x17, 0x52, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x18, 0x50, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1e, 0x0d, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1f, 0x32, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x61, 0x90, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x63, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x65, 0x98, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x67, 0x90, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x04, 0x96, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x45, 0x27, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x47, 0x2c, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x43, 0x47, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x44, 0xd8, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 gc0307_sensor_param1[][8] = {
+ {0xa0, 0x21, 0x68, 0x13, 0x00, 0x00, 0x00, 0x10},
+ {0xd0, 0x21, 0x61, 0x80, 0x00, 0x80, 0x00, 0x10},
+ {0xc0, 0x21, 0x65, 0x80, 0x00, 0x80, 0x00, 0x10},
+ {0xc0, 0x21, 0x63, 0xa0, 0x00, 0xa6, 0x00, 0x10},
+/*param3*/
+ {0xa0, 0x21, 0x01, 0x6e, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x02, 0x88, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+
static const u8 hv7131r_sensor_init[][8] = {
{0xc1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10},
{0xb1, 0x11, 0x34, 0x17, 0x7f, 0x00, 0x00, 0x10},
@@ -767,7 +895,9 @@ static const u8 ov7630_sensor_init[][8] = {
{0xc1, 0x21, 0x7b, 0x00, 0x4c, 0xf7, 0x00, 0x10},
{0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
{0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
-/* */
+ {}
+};
+static const u8 ov7630_sensor_param1[][8] = {
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
/*fixme: + 0x12, 0x04*/
@@ -984,6 +1114,113 @@ static const u8 po1030_sensor_param1[][8] = {
{}
};
+static const u8 po2030n_sensor_init[][8] = {
+ {0xa1, 0x6e, 0x1e, 0x1a, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x1f, 0x99, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {0xa1, 0x6e, 0x1e, 0x0a, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x1f, 0x19, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {0xa1, 0x6e, 0x20, 0x44, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x05, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x08, 0x00, 0xd0, 0x00, 0x08, 0x10},
+ {0xd1, 0x6e, 0x0c, 0x03, 0x50, 0x01, 0xe8, 0x10},
+ {0xd1, 0x6e, 0x1d, 0x20, 0x0a, 0x19, 0x44, 0x10},
+ {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x29, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x35, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x41, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x45, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x49, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x4d, 0x00, 0x00, 0x00, 0xed, 0x10},
+ {0xd1, 0x6e, 0x51, 0x17, 0x4a, 0x2f, 0xc0, 0x10},
+ {0xd1, 0x6e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x59, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x61, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x69, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x6d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x71, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x75, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x79, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x81, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x85, 0x00, 0x00, 0x00, 0x08, 0x10},
+ {0xd1, 0x6e, 0x89, 0x01, 0xe8, 0x00, 0x01, 0x10},
+ {0xa1, 0x6e, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x01, 0x10},
+ {0xd1, 0x6e, 0x29, 0xe6, 0x00, 0xbd, 0x03, 0x10},
+ {0xd1, 0x6e, 0x2d, 0x41, 0x38, 0x68, 0x40, 0x10},
+ {0xd1, 0x6e, 0x31, 0x2b, 0x00, 0x36, 0x00, 0x10},
+ {0xd1, 0x6e, 0x35, 0x30, 0x30, 0x08, 0x00, 0x10},
+ {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x33, 0x06, 0x10},
+ {0xb1, 0x6e, 0x3d, 0x06, 0x02, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 po2030n_sensor_param1[][8] = {
+ {0xa1, 0x6e, 0x1a, 0x01, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */
+ {0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x16, 0x50, 0x40, 0x49, 0x40, 0x10},
+/*param2*/
+ {0xa1, 0x6e, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x05, 0x6f, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
+ {0xc1, 0x6e, 0x16, 0x52, 0x40, 0x48, 0x00, 0x10},
+/*after start*/
+ {0xa1, 0x6e, 0x15, 0x0f, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {0xa1, 0x6e, 0x1a, 0x05, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {0xa1, 0x6e, 0x1b, 0x53, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+
+static const u8 soi768_sensor_init[][8] = {
+ {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */
+ {0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */
+ {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x0f, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x19, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 soi768_sensor_param1[][8] = {
+ {0xa1, 0x21, 0x10, 0x10, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x21, 0x01, 0x7f, 0x7f, 0x00, 0x00, 0x10},
+/* */
+/* {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, */
+/* {0xa1, 0x21, 0x2d, 0x25, 0x00, 0x00, 0x00, 0x10}, */
+ {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10},
+/* {0xb1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, */
+ {0xa1, 0x21, 0x02, 0x8d, 0x00, 0x00, 0x00, 0x10},
+/* the next sequence should be used for auto gain */
+ {0xa1, 0x21, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10},
+ /* global gain ? : 07 - change with 0x15 at the end */
+ {0xa1, 0x21, 0x10, 0x3f, 0x00, 0x00, 0x00, 0x10}, /* ???? : 063f */
+ {0xa1, 0x21, 0x04, 0x06, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x21, 0x2d, 0x00, 0x02, 0x00, 0x00, 0x10},
+ /* exposure ? : 0200 - change with 0x1e at the end */
+ {}
+};
+
static const u8 sp80708_sensor_init[][8] = {
{0xa1, 0x18, 0x06, 0xf9, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x18, 0x09, 0x1f, 0x00, 0x00, 0x00, 0x10},
@@ -1069,18 +1306,21 @@ static const u8 sp80708_sensor_param1[][8] = {
{}
};
-static const u8 (*sensor_init[11])[8] = {
- adcm1700_sensor_init, /* ADCM1700 0 */
- hv7131r_sensor_init, /* HV7131R 1 */
- mi0360_sensor_init, /* MI0360 2 */
- mo4000_sensor_init, /* MO4000 3 */
- mt9v111_sensor_init, /* MT9V111 4 */
- om6802_sensor_init, /* OM6802 5 */
- ov7630_sensor_init, /* OV7630 6 */
- ov7648_sensor_init, /* OV7648 7 */
- ov7660_sensor_init, /* OV7660 8 */
- po1030_sensor_init, /* PO1030 9 */
- sp80708_sensor_init, /* SP80708 10 */
+static const u8 (*sensor_init[])[8] = {
+[SENSOR_ADCM1700] = adcm1700_sensor_init,
+[SENSOR_GC0307] = gc0307_sensor_init,
+[SENSOR_HV7131R] = hv7131r_sensor_init,
+[SENSOR_MI0360] = mi0360_sensor_init,
+[SENSOR_MO4000] = mo4000_sensor_init,
+[SENSOR_MT9V111] = mt9v111_sensor_init,
+[SENSOR_OM6802] = om6802_sensor_init,
+[SENSOR_OV7630] = ov7630_sensor_init,
+[SENSOR_OV7648] = ov7648_sensor_init,
+[SENSOR_OV7660] = ov7660_sensor_init,
+[SENSOR_PO1030] = po1030_sensor_init,
+[SENSOR_PO2030N] = po2030n_sensor_init,
+[SENSOR_SOI768] = soi768_sensor_init,
+[SENSOR_SP80708] = sp80708_sensor_init,
};
/* read <len> bytes to gspca_dev->usb_buf */
@@ -1146,10 +1386,11 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- PDEBUG(D_USBO, "i2c_w2 [%02x] = %02x", reg, val);
+ PDEBUG(D_USBO, "i2c_w1 [%02x] = %02x", reg, val);
switch (sd->sensor) {
case SENSOR_ADCM1700:
- case SENSOR_OM6802: /* i2c command = a0 (100 kHz) */
+ case SENSOR_OM6802:
+ case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */
gspca_dev->usb_buf[0] = 0x80 | (2 << 4);
break;
default: /* i2c command = a1 (400 kHz) */
@@ -1177,6 +1418,8 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
static void i2c_w8(struct gspca_dev *gspca_dev,
const u8 *buffer)
{
+ PDEBUG(D_USBO, "i2c_w8 [%02x] = %02x ..",
+ buffer[2], buffer[3]);
memcpy(gspca_dev->usb_buf, buffer, 8);
usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
@@ -1196,7 +1439,8 @@ static void i2c_r(struct gspca_dev *gspca_dev, u8 reg, int len)
switch (sd->sensor) {
case SENSOR_ADCM1700:
- case SENSOR_OM6802: /* i2c command = 90 (100 kHz) */
+ case SENSOR_OM6802:
+ case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */
mode[0] = 0x80 | 0x10;
break;
default: /* i2c command = 91 (400 kHz) */
@@ -1300,39 +1544,100 @@ static void mi0360_probe(struct gspca_dev *gspca_dev)
}
}
-static void ov7648_probe(struct gspca_dev *gspca_dev)
+static void ov7630_probe(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
/* check ov76xx */
reg_w1(gspca_dev, 0x17, 0x62);
reg_w1(gspca_dev, 0x01, 0x08);
sd->i2c_addr = 0x21;
i2c_r(gspca_dev, 0x0a, 2);
- if (gspca_dev->usb_buf[3] == 0x76) { /* ov76xx */
- PDEBUG(D_PROBE, "Sensor ov%02x%02x",
- gspca_dev->usb_buf[3], gspca_dev->usb_buf[4]);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x7628) { /* soi768 */
+ sd->sensor = SENSOR_SOI768;
+/*fixme: only valid for 0c45:613e?*/
+ gspca_dev->cam.input_flags =
+ V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP;
+ PDEBUG(D_PROBE, "Sensor soi768");
return;
}
+ PDEBUG(D_PROBE, "Sensor ov%04x", val);
+}
- /* reset */
+static void ov7648_probe(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
+
+ /* check ov76xx */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x08);
+ sd->i2c_addr = 0x21;
+ i2c_r(gspca_dev, 0x0a, 2);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if ((val & 0xff00) == 0x7600) { /* ov76xx */
+ PDEBUG(D_PROBE, "Sensor ov%04x", val);
+ return;
+ }
/* check po1030 */
reg_w1(gspca_dev, 0x17, 0x62);
reg_w1(gspca_dev, 0x01, 0x08);
sd->i2c_addr = 0x6e;
i2c_r(gspca_dev, 0x00, 2);
- if (gspca_dev->usb_buf[3] == 0x10 /* po1030 */
- && gspca_dev->usb_buf[4] == 0x30) {
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x1030) { /* po1030 */
PDEBUG(D_PROBE, "Sensor po1030");
sd->sensor = SENSOR_PO1030;
return;
}
- PDEBUG(D_PROBE, "Unknown sensor %02x%02x",
- gspca_dev->usb_buf[3], gspca_dev->usb_buf[4]);
+ PDEBUG(D_PROBE, "Unknown sensor %04x", val);
+}
+
+/* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */
+static void po2030n_probe(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
+
+ /* check gc0307 */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x08);
+ reg_w1(gspca_dev, 0x02, 0x22);
+ sd->i2c_addr = 0x21;
+ i2c_r(gspca_dev, 0x00, 1);
+ val = gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29); /* reset */
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x99) { /* gc0307 (?) */
+ PDEBUG(D_PROBE, "Sensor gc0307");
+ sd->sensor = SENSOR_GC0307;
+ return;
+ }
+
+ /* check po2030n */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x0a);
+ sd->i2c_addr = 0x6e;
+ i2c_r(gspca_dev, 0x00, 2);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x2030) {
+ PDEBUG(D_PROBE, "Sensor po2030n");
+/* sd->sensor = SENSOR_PO2030N; */
+ } else {
+ PDEBUG(D_PROBE, "Unknown sensor ID %04x", val);
+ }
}
static void bridge_init(struct gspca_dev *gspca_dev,
@@ -1355,8 +1660,11 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2);
reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5);
switch (sd->sensor) {
+ case SENSOR_GC0307:
case SENSOR_OV7660:
case SENSOR_PO1030:
+ case SENSOR_PO2030N:
+ case SENSOR_SOI768:
case SENSOR_SP80708:
reg9a = reg9a_spec;
break;
@@ -1377,6 +1685,14 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x42);
reg_w1(gspca_dev, 0x01, 0x42);
break;
+ case SENSOR_GC0307:
+ msleep(50);
+ reg_w1(gspca_dev, 0x01, 0x61);
+ reg_w1(gspca_dev, 0x17, 0x22);
+ reg_w1(gspca_dev, 0x01, 0x60);
+ reg_w1(gspca_dev, 0x01, 0x40);
+ msleep(50);
+ break;
case SENSOR_MT9V111:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0x61);
@@ -1414,11 +1730,18 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x42);
break;
case SENSOR_PO1030:
+ case SENSOR_SOI768:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0x20);
reg_w1(gspca_dev, 0x01, 0x60);
reg_w1(gspca_dev, 0x01, 0x40);
break;
+ case SENSOR_PO2030N:
+ reg_w1(gspca_dev, 0x01, 0x63);
+ reg_w1(gspca_dev, 0x17, 0x20);
+ reg_w1(gspca_dev, 0x01, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x42);
+ break;
case SENSOR_OV7660:
/* fall thru */
case SENSOR_SP80708:
@@ -1523,9 +1846,15 @@ static int sd_init(struct gspca_dev *gspca_dev)
case SENSOR_MI0360:
mi0360_probe(gspca_dev);
break;
+ case SENSOR_OV7630:
+ ov7630_probe(gspca_dev);
+ break;
case SENSOR_OV7648:
ov7648_probe(gspca_dev);
break;
+ case SENSOR_PO2030N:
+ po2030n_probe(gspca_dev);
+ break;
}
regGpio[1] = 0x70;
reg_w(gspca_dev, 0x01, regGpio, 2);
@@ -1558,6 +1887,18 @@ static u32 setexposure(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
switch (sd->sensor) {
+ case SENSOR_GC0307: {
+ int a, b;
+
+ /* expo = 0..255 -> a = 19..43 */
+ a = 19 + expo * 25 / 256;
+ i2c_w1(gspca_dev, 0x68, a);
+ a -= 12;
+ b = a * a * 4; /* heuristic */
+ i2c_w1(gspca_dev, 0x03, b >> 8);
+ i2c_w1(gspca_dev, 0x04, b);
+ break;
+ }
case SENSOR_HV7131R: {
u8 Expodoit[] =
{ 0xc1, 0x11, 0x25, 0x00, 0x00, 0x00, 0x00, 0x16 };
@@ -1668,6 +2009,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
expo = sd->brightness >> 4;
sd->exposure = setexposure(gspca_dev, expo);
break;
+ case SENSOR_GC0307:
case SENSOR_MT9V111:
expo = sd->brightness >> 8;
sd->exposure = setexposure(gspca_dev, expo);
@@ -1703,7 +2045,7 @@ static void setcolors(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int i, v;
u8 reg8a[12]; /* U & V gains */
- static s16 uv[6] = { /* same as reg84 in signed decimal */
+ static const s16 uv[6] = { /* same as reg84 in signed decimal */
-24, -38, 64, /* UR UG UB */
62, -51, -9 /* VR VG VB */
};
@@ -1744,9 +2086,12 @@ static void setgamma(struct gspca_dev *gspca_dev)
case SENSOR_MT9V111:
gamma_base = gamma_spec_1;
break;
- case SENSOR_SP80708:
+ case SENSOR_GC0307:
gamma_base = gamma_spec_2;
break;
+ case SENSOR_SP80708:
+ gamma_base = gamma_spec_3;
+ break;
default:
gamma_base = gamma_def;
break;
@@ -1937,14 +2282,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec };
static const u8 CA_adcm1700[] =
{ 0x14, 0xec, 0x0a, 0xf6 };
+ static const u8 CA_po2030n[] =
+ { 0x1e, 0xe2, 0x14, 0xec };
static const u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */
+ static const u8 CE_gc0307[] =
+ { 0x32, 0xce, 0x2d, 0xd3 };
static const u8 CE_ov76xx[] =
{ 0x32, 0xdd, 0x32, 0xdd };
+ static const u8 CE_po2030n[] =
+ { 0x14, 0xe7, 0x1e, 0xdd };
/* create the JPEG header */
- sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
- if (!sd->jpeg_hdr)
- return -ENOMEM;
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x21); /* JPEG 422 */
jpeg_set_qual(sd->jpeg_hdr, sd->quality);
@@ -1996,6 +2344,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]);
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ reg17 = 0xa2;
+ break;
case SENSOR_MT9V111:
reg17 = 0xe0;
break;
@@ -2007,9 +2358,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0x20;
break;
case SENSOR_OV7660:
+ case SENSOR_SOI768:
reg17 = 0xa0;
break;
case SENSOR_PO1030:
+ case SENSOR_PO2030N:
reg17 = 0xa0;
break;
default:
@@ -2034,12 +2387,18 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_SP80708:
reg_w1(gspca_dev, 0x9a, 0x05);
break;
+ case SENSOR_GC0307:
case SENSOR_MT9V111:
reg_w1(gspca_dev, 0x9a, 0x07);
break;
+ case SENSOR_OV7630:
case SENSOR_OV7648:
reg_w1(gspca_dev, 0x9a, 0x0a);
break;
+ case SENSOR_PO2030N:
+ case SENSOR_SOI768:
+ reg_w1(gspca_dev, 0x9a, 0x06);
+ break;
default:
reg_w1(gspca_dev, 0x9a, 0x08);
break;
@@ -2064,6 +2423,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg1 = 0x46;
reg17 = 0xe2;
break;
+ case SENSOR_GC0307:
+ init = gc0307_sensor_param1;
+ reg17 = 0xa2;
+ reg1 = 0x44;
+ break;
case SENSOR_MO4000:
if (mode) {
/* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */
@@ -2087,6 +2451,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0x64; /* 640 MCKSIZE */
break;
case SENSOR_OV7630:
+ init = ov7630_sensor_param1;
reg17 = 0xe2;
reg1 = 0x44;
break;
@@ -2113,6 +2478,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0xa2;
reg1 = 0x44;
break;
+ case SENSOR_PO2030N:
+ init = po2030n_sensor_param1;
+ reg1 = 0x46;
+ reg17 = 0xa2;
+ break;
+ case SENSOR_SOI768:
+ init = soi768_sensor_param1;
+ reg1 = 0x44;
+ reg17 = 0xa2;
+ break;
default:
/* case SENSOR_SP80708: */
init = sp80708_sensor_param1;
@@ -2132,17 +2507,33 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
reg_w(gspca_dev, 0xc0, C0, 6);
- if (sd->sensor == SENSOR_ADCM1700)
+ switch (sd->sensor) {
+ case SENSOR_ADCM1700:
+ case SENSOR_GC0307:
+ case SENSOR_SOI768:
reg_w(gspca_dev, 0xca, CA_adcm1700, 4);
- else
+ break;
+ case SENSOR_PO2030N:
+ reg_w(gspca_dev, 0xca, CA_po2030n, 4);
+ break;
+ default:
reg_w(gspca_dev, 0xca, CA, 4);
+ break;
+ }
switch (sd->sensor) {
case SENSOR_ADCM1700:
case SENSOR_OV7630:
case SENSOR_OV7648:
case SENSOR_OV7660:
+ case SENSOR_SOI768:
reg_w(gspca_dev, 0xce, CE_ov76xx, 4);
break;
+ case SENSOR_GC0307:
+ reg_w(gspca_dev, 0xce, CE_gc0307, 4);
+ break;
+ case SENSOR_PO2030N:
+ reg_w(gspca_dev, 0xce, CE_po2030n, 4);
+ break;
default:
reg_w(gspca_dev, 0xce, CE, 4);
/* ?? {0x1e, 0xdd, 0x2d, 0xe7} */
@@ -2161,6 +2552,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
setvflip(sd);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
+ setcolors(gspca_dev);
setautogain(gspca_dev);
setfreq(gspca_dev);
return 0;
@@ -2175,11 +2567,16 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
{ 0xb1, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10 };
static const u8 stopov7648[] =
{ 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 };
+ static const u8 stopsoi768[] =
+ { 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 };
u8 data;
const u8 *sn9c1xx;
data = 0x0b;
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ data = 0x29;
+ break;
case SENSOR_HV7131R:
i2c_w8(gspca_dev, stophv7131);
data = 0x2b;
@@ -2196,6 +2593,10 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
case SENSOR_PO1030:
data = 0x29;
break;
+ case SENSOR_SOI768:
+ i2c_w8(gspca_dev, stopsoi768);
+ data = 0x29;
+ break;
}
sn9c1xx = sn_tb[sd->sensor];
reg_w1(gspca_dev, 0x01, sn9c1xx[1]);
@@ -2206,13 +2607,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
/* reg_w1(gspca_dev, 0xf1, 0x01); */
}
-static void sd_stop0(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- kfree(sd->jpeg_hdr);
-}
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -2233,6 +2627,14 @@ static void do_autogain(struct gspca_dev *gspca_dev)
if (delta < luma_mean - luma_delta ||
delta > luma_mean + luma_delta) {
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ expotimes = sd->exposure;
+ expotimes += (luma_mean - delta) >> 6;
+ if (expotimes < 0)
+ expotimes = 0;
+ sd->exposure = setexposure(gspca_dev,
+ (unsigned int) expotimes);
+ break;
case SENSOR_HV7131R:
expotimes = sd->exposure >> 8;
expotimes += (luma_mean - delta) >> 4;
@@ -2584,7 +2986,6 @@ static const struct sd_desc sd_desc = {
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
.get_jcomp = sd_get_jcomp,
@@ -2656,7 +3057,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
#endif
{USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)},
{USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)},
-/* {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, *sn9c120b*/
+ {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)}, /*sn9c120b*/
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index b9c80e2103b9..7bb2355005dc 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -22,6 +22,7 @@
#define MODULE_NAME "spca561"
+#include <linux/input.h>
#include "gspca.h"
MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
@@ -249,9 +250,9 @@ static const __u16 Pb100_2map8300[][2] = {
};
static const __u16 spca561_161rev12A_data1[][2] = {
- {0x29, 0x8118}, /* white balance - was 21 */
- {0x08, 0x8114}, /* white balance - was 01 */
- {0x0e, 0x8112}, /* white balance - was 00 */
+ {0x29, 0x8118}, /* Control register (various enable bits) */
+ {0x08, 0x8114}, /* GPIO: Led off */
+ {0x0e, 0x8112}, /* 0x0e stream off 0x3e stream on */
{0x00, 0x8102}, /* white balance - new */
{0x92, 0x8804},
{0x04, 0x8802}, /* windows uses 08 */
@@ -263,15 +264,11 @@ static const __u16 spca561_161rev12A_data2[][2] = {
{0x07, 0x8601},
{0x07, 0x8602},
{0x04, 0x8501},
- {0x21, 0x8118},
{0x07, 0x8201}, /* windows uses 02 */
{0x08, 0x8200},
{0x01, 0x8200},
- {0x00, 0x8114},
- {0x01, 0x8114}, /* windows uses 00 */
-
{0x90, 0x8604},
{0x00, 0x8605},
{0xb0, 0x8603},
@@ -302,6 +299,9 @@ static const __u16 spca561_161rev12A_data2[][2] = {
{0xf0, 0x8505},
{0x32, 0x850a},
/* {0x99, 0x8700}, * - white balance - new (removed) */
+ /* HDG we used to do this in stop0, making the init state and the state
+ after a start / stop different, so do this here instead. */
+ {0x29, 0x8118},
{}
};
@@ -645,6 +645,9 @@ static int sd_start_12a(struct gspca_dev *gspca_dev)
setwhite(gspca_dev);
setgain(gspca_dev);
setexposure(gspca_dev);
+
+ /* Led ON (bit 3 -> 0 */
+ reg_w_val(gspca_dev->dev, 0x8114, 0x00);
return 0;
}
static int sd_start_72a(struct gspca_dev *gspca_dev)
@@ -691,26 +694,14 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
if (sd->chip_revision == Rev012A) {
reg_w_val(gspca_dev->dev, 0x8112, 0x0e);
+ /* Led Off (bit 3 -> 1 */
+ reg_w_val(gspca_dev->dev, 0x8114, 0x08);
} else {
reg_w_val(gspca_dev->dev, 0x8112, 0x20);
/* reg_w_val(gspca_dev->dev, 0x8102, 0x00); ?? */
}
}
-/* called on streamoff with alt 0 and on disconnect */
-static void sd_stop0(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (!gspca_dev->present)
- return;
- if (sd->chip_revision == Rev012A) {
- reg_w_val(gspca_dev->dev, 0x8118, 0x29);
- reg_w_val(gspca_dev->dev, 0x8114, 0x08);
- }
-/* reg_w_val(gspca_dev->dev, 0x8114, 0); */
-}
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -788,6 +779,23 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
switch (*data++) { /* sequence number */
case 0: /* start of frame */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+
+ /* This should never happen */
+ if (len < 2) {
+ PDEBUG(D_ERR, "Short SOF packet, ignoring");
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+
+#ifdef CONFIG_INPUT
+ if (data[0] & 0x20) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
+ input_sync(gspca_dev->input_dev);
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ }
+#endif
+
if (data[1] & 0x10) {
/* compressed bayer */
gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
@@ -1028,8 +1036,10 @@ static const struct sd_desc sd_desc_12a = {
.init = sd_init_12a,
.start = sd_start_12a,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
+#ifdef CONFIG_INPUT
+ .other_input = 1,
+#endif
};
static const struct sd_desc sd_desc_72a = {
.name = MODULE_NAME,
@@ -1039,9 +1049,11 @@ static const struct sd_desc sd_desc_72a = {
.init = sd_init_72a,
.start = sd_start_72a,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
+#ifdef CONFIG_INPUT
+ .other_input = 1,
+#endif
};
static const struct sd_desc *sd_desc[2] = {
&sd_desc_12a,
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 668a7536af90..63014372adbc 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -44,7 +44,10 @@ struct sd {
u8 gamma;
u8 sharpness;
u8 freq;
- u8 whitebalance;
+ u8 red_balance; /* split balance */
+ u8 blue_balance;
+ u8 global_gain; /* aka gain */
+ u8 whitebalance; /* set default r/g/b and activate */
u8 mirror;
u8 effect;
@@ -70,8 +73,17 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
+
+
static int sd_setwhitebalance(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getwhitebalance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setglobal_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getglobal_gain(struct gspca_dev *gspca_dev, __s32 *val);
+
static int sd_setflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val);
@@ -79,6 +91,7 @@ static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu);
+
static const struct ctrl sd_ctrls[] = {
{
{
@@ -139,7 +152,7 @@ static const struct ctrl sd_ctrls[] = {
},
{
{
- .id = V4L2_CID_GAIN, /* here, i activate only the lowlight,
+ .id = V4L2_CID_BACKLIGHT_COMPENSATION, /* Activa lowlight,
* some apps dont bring up the
* backligth_compensation control) */
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -183,7 +196,7 @@ static const struct ctrl sd_ctrls[] = {
{
{
- .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "White Balance",
.minimum = 0,
@@ -223,6 +236,48 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_seteffect,
.get = sd_geteffect
},
+ {
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define BLUE_BALANCE_DEF 0x20
+ .default_value = BLUE_BALANCE_DEF,
+ },
+ .set = sd_setblue_balance,
+ .get = sd_getblue_balance,
+ },
+ {
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define RED_BALANCE_DEF 0x20
+ .default_value = RED_BALANCE_DEF,
+ },
+ .set = sd_setred_balance,
+ .get = sd_getred_balance,
+ },
+ {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define global_gain_DEF 0x20
+ .default_value = global_gain_DEF,
+ },
+ .set = sd_setglobal_gain,
+ .get = sd_getglobal_gain,
+ },
};
static char *effects_control[] = {
@@ -523,6 +578,10 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
u8 *tmpbuf;
tmpbuf = kmalloc(len, GFP_KERNEL);
+ if (!tmpbuf) {
+ err("Out of memory");
+ return;
+ }
memcpy(tmpbuf, buffer, len);
usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
@@ -542,10 +601,15 @@ static void reg_w_ixbuf(struct gspca_dev *gspca_dev,
int i;
u8 *p, *tmpbuf;
- if (len * 2 <= USB_BUF_SZ)
+ if (len * 2 <= USB_BUF_SZ) {
p = tmpbuf = gspca_dev->usb_buf;
- else
+ } else {
p = tmpbuf = kmalloc(len * 2, GFP_KERNEL);
+ if (!tmpbuf) {
+ err("Out of memory");
+ return;
+ }
+ }
i = len;
while (--i >= 0) {
*p++ = reg++;
@@ -642,6 +706,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->whitebalance = WHITE_BALANCE_DEF;
sd->sharpness = SHARPNESS_DEF;
sd->effect = EFFECTS_DEF;
+ sd->red_balance = RED_BALANCE_DEF;
+ sd->blue_balance = BLUE_BALANCE_DEF;
+ sd->global_gain = global_gain_DEF;
+
return 0;
}
@@ -693,18 +761,40 @@ static void setgamma(struct gspca_dev *gspca_dev)
reg_w_ixbuf(gspca_dev, 0x90,
gamma_table[sd->gamma], sizeof gamma_table[0]);
}
+static void setglobalgain(struct gspca_dev *gspca_dev)
+{
-static void setwhitebalance(struct gspca_dev *gspca_dev)
+ struct sd *sd = (struct sd *) gspca_dev;
+ reg_w(gspca_dev, (sd->red_balance << 8) + 0x87);
+ reg_w(gspca_dev, (sd->blue_balance << 8) + 0x88);
+ reg_w(gspca_dev, (sd->global_gain << 8) + 0x89);
+}
+
+/* Generic fnc for r/b balance, exposure and whitebalance */
+static void setbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 white_balance[8] =
- {0x87, 0x20, 0x88, 0x20, 0x89, 0x20, 0x80, 0x38};
+ /* on whitebalance leave defaults values */
+ if (sd->whitebalance) {
+ reg_w(gspca_dev, 0x3c80);
+ } else {
+ reg_w(gspca_dev, 0x3880);
+ /* shoud we wait here.. */
+ /* update and reset 'global gain' with webcam parameters */
+ sd->red_balance = reg_r(gspca_dev, 0x0087);
+ sd->blue_balance = reg_r(gspca_dev, 0x0088);
+ sd->global_gain = reg_r(gspca_dev, 0x0089);
+ setglobalgain(gspca_dev);
+ }
+
+}
+
- if (sd->whitebalance)
- white_balance[7] = 0x3c;
- reg_w_buf(gspca_dev, white_balance, sizeof white_balance);
+static void setwhitebalance(struct gspca_dev *gspca_dev)
+{
+ setbalance(gspca_dev);
}
static void setsharpness(struct gspca_dev *gspca_dev)
@@ -1018,6 +1108,66 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
+
+static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->blue_balance = val;
+ if (gspca_dev->streaming)
+ reg_w(gspca_dev, (val << 8) + 0x88);
+ return 0;
+}
+
+static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->blue_balance;
+ return 0;
+}
+
+static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->red_balance = val;
+ if (gspca_dev->streaming)
+ reg_w(gspca_dev, (val << 8) + 0x87);
+
+ return 0;
+}
+
+static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->red_balance;
+ return 0;
+}
+
+
+
+static int sd_setglobal_gain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->global_gain = val;
+ if (gspca_dev->streaming)
+ setglobalgain(gspca_dev);
+
+ return 0;
+}
+
+static int sd_getglobal_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->global_gain;
+ return 0;
+}
+
+
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 4989f9afb46e..732c3dfe46ff 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -1,9 +1,9 @@
/*
- * Z-star vc0321 library
- * Copyright (C) 2006 Koninski Artur takeshi87@o2.pl
- * Copyright (C) 2006 Michel Xhaard
+ * Z-star vc0321 library
*
- * V4L2 by Jean-Francois Moine <http://moinejf.free.fr>
+ * Copyright (C) 2009-2010 Jean-François Moine <http://moinejf.free.fr>
+ * Copyright (C) 2006 Koninski Artur takeshi87@o2.pl
+ * Copyright (C) 2006 Michel Xhaard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,7 +24,7 @@
#include "gspca.h"
-MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/VC032X USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -1971,268 +1971,489 @@ static const u8 ov7660_NoFliker[][4] = {
{}
};
-static const u8 ov7670_initVGA_JPG[][4] = {
+static const u8 ov7670_InitVGA[][4] = {
{0xb3, 0x01, 0x05, 0xcc},
- {0x00, 0x00, 0x30, 0xdd}, {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x30, 0xdd},
+ {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb0, 0x04, 0x02, 0xcc},
{0x00, 0x00, 0x10, 0xdd},
- {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc},
+ {0xb3, 0x00, 0x66, 0xcc},
+ {0xb3, 0x00, 0x67, 0xcc},
+ {0xb0, 0x16, 0x01, 0xcc},
{0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */
{0xb3, 0x34, 0x01, 0xcc},
- {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc},
- {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc},
- {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc},
- {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc},
- {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc},
- {0xb3, 0x04, 0x05, 0xcc}, {0xb3, 0x20, 0x00, 0xcc},
- {0xb3, 0x21, 0x00, 0xcc}, {0xb3, 0x22, 0x01, 0xcc},
- {0xb3, 0x23, 0xe0, 0xcc}, {0xbc, 0x00, 0x41, 0xcc},
- {0xbc, 0x01, 0x01, 0xcc}, {0x00, 0x12, 0x80, 0xaa},
- {0x00, 0x00, 0x20, 0xdd}, {0x00, 0x12, 0x00, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x6b, 0x0a, 0xaa},
- {0x00, 0x3a, 0x04, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x7a, 0x29, 0xaa},
- {0x00, 0x7b, 0x0e, 0xaa}, {0x00, 0x7c, 0x1a, 0xaa},
- {0x00, 0x7d, 0x31, 0xaa}, {0x00, 0x7e, 0x53, 0xaa},
- {0x00, 0x7f, 0x60, 0xaa}, {0x00, 0x80, 0x6b, 0xaa},
- {0x00, 0x81, 0x73, 0xaa}, {0x00, 0x82, 0x7b, 0xaa},
- {0x00, 0x83, 0x82, 0xaa}, {0x00, 0x84, 0x89, 0xaa},
- {0x00, 0x85, 0x96, 0xaa}, {0x00, 0x86, 0xa1, 0xaa},
- {0x00, 0x87, 0xb7, 0xaa}, {0x00, 0x88, 0xcc, 0xaa},
- {0x00, 0x89, 0xe1, 0xaa}, {0x00, 0x13, 0xe0, 0xaa},
- {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x10, 0x00, 0xaa},
- {0x00, 0x0d, 0x40, 0xaa}, {0x00, 0x14, 0x28, 0xaa},
- {0x00, 0xa5, 0x05, 0xaa}, {0x00, 0xab, 0x07, 0xaa},
- {0x00, 0x24, 0x95, 0xaa}, {0x00, 0x25, 0x33, 0xaa},
- {0x00, 0x26, 0xe3, 0xaa}, {0x00, 0x9f, 0x88, 0xaa},
- {0x00, 0xa0, 0x78, 0xaa}, {0x00, 0x55, 0x90, 0xaa},
- {0x00, 0xa1, 0x03, 0xaa}, {0x00, 0xa6, 0xe0, 0xaa},
- {0x00, 0xa7, 0xd8, 0xaa}, {0x00, 0xa8, 0xf0, 0xaa},
- {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa},
- {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa},
+ {0xb3, 0x05, 0x01, 0xcc},
+ {0xb3, 0x06, 0x01, 0xcc},
+ {0xb3, 0x08, 0x01, 0xcc},
+ {0xb3, 0x09, 0x0c, 0xcc},
+ {0xb3, 0x02, 0x02, 0xcc},
+ {0xb3, 0x03, 0x1f, 0xcc},
+ {0xb3, 0x14, 0x00, 0xcc},
+ {0xb3, 0x15, 0x00, 0xcc},
+ {0xb3, 0x16, 0x02, 0xcc},
+ {0xb3, 0x17, 0x7f, 0xcc},
+ {0xb3, 0x04, 0x05, 0xcc},
+ {0xb3, 0x20, 0x00, 0xcc},
+ {0xb3, 0x21, 0x00, 0xcc},
+ {0xb3, 0x22, 0x01, 0xcc},
+ {0xb3, 0x23, 0xe0, 0xcc},
+ {0xbc, 0x00, 0x41, 0xcc},
+ {0xbc, 0x01, 0x01, 0xcc},
+ {0x00, 0x12, 0x80, 0xaa},
+ {0x00, 0x00, 0x20, 0xdd},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x6b, 0x0a, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x7a, 0x29, 0xaa},
+ {0x00, 0x7b, 0x0e, 0xaa},
+ {0x00, 0x7c, 0x1a, 0xaa},
+ {0x00, 0x7d, 0x31, 0xaa},
+ {0x00, 0x7e, 0x53, 0xaa},
+ {0x00, 0x7f, 0x60, 0xaa},
+ {0x00, 0x80, 0x6b, 0xaa},
+ {0x00, 0x81, 0x73, 0xaa},
+ {0x00, 0x82, 0x7b, 0xaa},
+ {0x00, 0x83, 0x82, 0xaa},
+ {0x00, 0x84, 0x89, 0xaa},
+ {0x00, 0x85, 0x96, 0xaa},
+ {0x00, 0x86, 0xa1, 0xaa},
+ {0x00, 0x87, 0xb7, 0xaa},
+ {0x00, 0x88, 0xcc, 0xaa},
+ {0x00, 0x89, 0xe1, 0xaa},
+ {0x00, 0x13, 0xe0, 0xaa},
+ {0x00, 0x00, 0x00, 0xaa},
+ {0x00, 0x10, 0x00, 0xaa},
+ {0x00, 0x0d, 0x40, 0xaa},
+ {0x00, 0x14, 0x28, 0xaa},
+ {0x00, 0xa5, 0x05, 0xaa},
+ {0x00, 0xab, 0x07, 0xaa},
+ {0x00, 0x24, 0x95, 0xaa},
+ {0x00, 0x25, 0x33, 0xaa},
+ {0x00, 0x26, 0xe3, 0xaa},
+ {0x00, 0x9f, 0x88, 0xaa},
+ {0x00, 0xa0, 0x78, 0xaa},
+ {0x00, 0x55, 0x90, 0xaa},
+ {0x00, 0xa1, 0x03, 0xaa},
+ {0x00, 0xa6, 0xe0, 0xaa},
+ {0x00, 0xa7, 0xd8, 0xaa},
+ {0x00, 0xa8, 0xf0, 0xaa},
+ {0x00, 0xa9, 0x90, 0xaa},
+ {0x00, 0xaa, 0x14, 0xaa},
+ {0x00, 0x13, 0xe5, 0xaa},
+ {0x00, 0x0e, 0x61, 0xaa},
+ {0x00, 0x0f, 0x4b, 0xaa},
+ {0x00, 0x16, 0x02, 0xaa},
{0x00, 0x1e, 0x07, 0xaa}, /* MVFP */
{0x00, 0x21, 0x02, 0xaa},
- {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa},
- {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa},
- {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa},
- {0x00, 0x39, 0x2a, 0xaa}, {0x00, 0x3c, 0x78, 0xaa},
- {0x00, 0x4d, 0x40, 0xaa}, {0x00, 0x4e, 0x20, 0xaa},
- {0x00, 0x74, 0x19, 0xaa}, {0x00, 0x8d, 0x4f, 0xaa},
- {0x00, 0x8e, 0x00, 0xaa}, {0x00, 0x8f, 0x00, 0xaa},
- {0x00, 0x90, 0x00, 0xaa}, {0x00, 0x91, 0x00, 0xaa},
- {0x00, 0x96, 0x00, 0xaa}, {0x00, 0x9a, 0x80, 0xaa},
- {0x00, 0xb0, 0x84, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
- {0x00, 0xb2, 0x0e, 0xaa}, {0x00, 0xb3, 0x82, 0xaa},
- {0x00, 0xb8, 0x0a, 0xaa}, {0x00, 0x43, 0x14, 0xaa},
- {0x00, 0x44, 0xf0, 0xaa}, {0x00, 0x45, 0x45, 0xaa},
- {0x00, 0x46, 0x63, 0xaa}, {0x00, 0x47, 0x2d, 0xaa},
- {0x00, 0x48, 0x46, 0xaa}, {0x00, 0x59, 0x88, 0xaa},
- {0x00, 0x5a, 0xa0, 0xaa}, {0x00, 0x5b, 0xc6, 0xaa},
- {0x00, 0x5c, 0x7d, 0xaa}, {0x00, 0x5d, 0x5f, 0xaa},
- {0x00, 0x5e, 0x19, 0xaa}, {0x00, 0x6c, 0x0a, 0xaa},
- {0x00, 0x6d, 0x55, 0xaa}, {0x00, 0x6e, 0x11, 0xaa},
- {0x00, 0x6f, 0x9e, 0xaa}, {0x00, 0x69, 0x00, 0xaa},
- {0x00, 0x6a, 0x40, 0xaa}, {0x00, 0x01, 0x40, 0xaa},
- {0x00, 0x02, 0x40, 0xaa}, {0x00, 0x13, 0xe7, 0xaa},
- {0x00, 0x5f, 0xf0, 0xaa}, {0x00, 0x60, 0xf0, 0xaa},
- {0x00, 0x61, 0xf0, 0xaa}, {0x00, 0x27, 0xa0, 0xaa},
- {0x00, 0x28, 0x80, 0xaa}, {0x00, 0x2c, 0x90, 0xaa},
- {0x00, 0x4f, 0x66, 0xaa}, {0x00, 0x50, 0x66, 0xaa},
- {0x00, 0x51, 0x00, 0xaa}, {0x00, 0x52, 0x22, 0xaa},
- {0x00, 0x53, 0x5e, 0xaa}, {0x00, 0x54, 0x80, 0xaa},
- {0x00, 0x58, 0x9e, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x85, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x0a, 0xaa}, {0x00, 0x3d, 0x88, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0x00, 0x62, 0x30, 0xaa},
- {0x00, 0x63, 0x30, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x0b, 0xaa},
- {0x00, 0x65, 0x00, 0xaa}, {0x00, 0x66, 0x05, 0xaa},
- {0x00, 0x56, 0x50, 0xaa}, {0x00, 0x34, 0x11, 0xaa},
- {0x00, 0xa4, 0x88, 0xaa}, {0x00, 0x96, 0x00, 0xaa},
- {0x00, 0x97, 0x30, 0xaa}, {0x00, 0x98, 0x20, 0xaa},
- {0x00, 0x99, 0x30, 0xaa}, {0x00, 0x9a, 0x84, 0xaa},
- {0x00, 0x9b, 0x29, 0xaa}, {0x00, 0x9c, 0x03, 0xaa},
- {0x00, 0x78, 0x04, 0xaa}, {0x00, 0x79, 0x01, 0xaa},
- {0x00, 0xc8, 0xf0, 0xaa}, {0x00, 0x79, 0x0f, 0xaa},
- {0x00, 0xc8, 0x00, 0xaa}, {0x00, 0x79, 0x10, 0xaa},
- {0x00, 0xc8, 0x7e, 0xaa}, {0x00, 0x79, 0x0a, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x0b, 0xaa},
- {0x00, 0xc8, 0x01, 0xaa}, {0x00, 0x79, 0x0c, 0xaa},
- {0x00, 0xc8, 0x0f, 0xaa}, {0x00, 0x79, 0x0d, 0xaa},
- {0x00, 0xc8, 0x20, 0xaa}, {0x00, 0x79, 0x09, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x02, 0xaa},
- {0x00, 0xc8, 0xc0, 0xaa}, {0x00, 0x79, 0x03, 0xaa},
- {0x00, 0xc8, 0x40, 0xaa}, {0x00, 0x79, 0x05, 0xaa},
- {0x00, 0xc8, 0x30, 0xaa}, {0x00, 0x79, 0x26, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x3a, 0x04, 0xaa},
- {0x00, 0x12, 0x00, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x17, 0x14, 0xaa},
- {0x00, 0x18, 0x02, 0xaa}, {0x00, 0x32, 0x92, 0xaa},
- {0x00, 0x19, 0x02, 0xaa}, {0x00, 0x1a, 0x7a, 0xaa},
- {0x00, 0x03, 0x0a, 0xaa}, {0x00, 0x0c, 0x00, 0xaa},
- {0x00, 0x3e, 0x00, 0xaa}, {0x00, 0x70, 0x3a, 0xaa},
- {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa},
- {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa},
- {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0x22, 0x91, 0xaa},
+ {0x00, 0x29, 0x07, 0xaa},
+ {0x00, 0x33, 0x0b, 0xaa},
+ {0x00, 0x35, 0x0b, 0xaa},
+ {0x00, 0x37, 0x1d, 0xaa},
+ {0x00, 0x38, 0x71, 0xaa},
+ {0x00, 0x39, 0x2a, 0xaa},
+ {0x00, 0x3c, 0x78, 0xaa},
+ {0x00, 0x4d, 0x40, 0xaa},
+ {0x00, 0x4e, 0x20, 0xaa},
+ {0x00, 0x74, 0x19, 0xaa},
+ {0x00, 0x8d, 0x4f, 0xaa},
+ {0x00, 0x8e, 0x00, 0xaa},
+ {0x00, 0x8f, 0x00, 0xaa},
+ {0x00, 0x90, 0x00, 0xaa},
+ {0x00, 0x91, 0x00, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x9a, 0x80, 0xaa},
+ {0x00, 0xb0, 0x84, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0xb2, 0x0e, 0xaa},
+ {0x00, 0xb3, 0x82, 0xaa},
+ {0x00, 0xb8, 0x0a, 0xaa},
+ {0x00, 0x43, 0x14, 0xaa},
+ {0x00, 0x44, 0xf0, 0xaa},
+ {0x00, 0x45, 0x45, 0xaa},
+ {0x00, 0x46, 0x63, 0xaa},
+ {0x00, 0x47, 0x2d, 0xaa},
+ {0x00, 0x48, 0x46, 0xaa},
+ {0x00, 0x59, 0x88, 0xaa},
+ {0x00, 0x5a, 0xa0, 0xaa},
+ {0x00, 0x5b, 0xc6, 0xaa},
+ {0x00, 0x5c, 0x7d, 0xaa},
+ {0x00, 0x5d, 0x5f, 0xaa},
+ {0x00, 0x5e, 0x19, 0xaa},
+ {0x00, 0x6c, 0x0a, 0xaa},
+ {0x00, 0x6d, 0x55, 0xaa},
+ {0x00, 0x6e, 0x11, 0xaa},
+ {0x00, 0x6f, 0x9e, 0xaa},
+ {0x00, 0x69, 0x00, 0xaa},
+ {0x00, 0x6a, 0x40, 0xaa},
+ {0x00, 0x01, 0x40, 0xaa},
+ {0x00, 0x02, 0x40, 0xaa},
+ {0x00, 0x13, 0xe7, 0xaa},
+ {0x00, 0x5f, 0xf0, 0xaa},
+ {0x00, 0x60, 0xf0, 0xaa},
+ {0x00, 0x61, 0xf0, 0xaa},
+ {0x00, 0x27, 0xa0, 0xaa},
+ {0x00, 0x28, 0x80, 0xaa},
+ {0x00, 0x2c, 0x90, 0xaa},
+ {0x00, 0x4f, 0x66, 0xaa},
+ {0x00, 0x50, 0x66, 0xaa},
+ {0x00, 0x51, 0x00, 0xaa},
+ {0x00, 0x52, 0x22, 0xaa},
+ {0x00, 0x53, 0x5e, 0xaa},
+ {0x00, 0x54, 0x80, 0xaa},
+ {0x00, 0x58, 0x9e, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x85, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x0a, 0xaa},
+ {0x00, 0x3d, 0x88, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0x00, 0x62, 0x30, 0xaa},
+ {0x00, 0x63, 0x30, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x0b, 0xaa},
+ {0x00, 0x65, 0x00, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x56, 0x50, 0xaa},
+ {0x00, 0x34, 0x11, 0xaa},
+ {0x00, 0xa4, 0x88, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x97, 0x30, 0xaa},
+ {0x00, 0x98, 0x20, 0xaa},
+ {0x00, 0x99, 0x30, 0xaa},
+ {0x00, 0x9a, 0x84, 0xaa},
+ {0x00, 0x9b, 0x29, 0xaa},
+ {0x00, 0x9c, 0x03, 0xaa},
+ {0x00, 0x78, 0x04, 0xaa},
+ {0x00, 0x79, 0x01, 0xaa},
+ {0x00, 0xc8, 0xf0, 0xaa},
+ {0x00, 0x79, 0x0f, 0xaa},
+ {0x00, 0xc8, 0x00, 0xaa},
+ {0x00, 0x79, 0x10, 0xaa},
+ {0x00, 0xc8, 0x7e, 0xaa},
+ {0x00, 0x79, 0x0a, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x0b, 0xaa},
+ {0x00, 0xc8, 0x01, 0xaa},
+ {0x00, 0x79, 0x0c, 0xaa},
+ {0x00, 0xc8, 0x0f, 0xaa},
+ {0x00, 0x79, 0x0d, 0xaa},
+ {0x00, 0xc8, 0x20, 0xaa},
+ {0x00, 0x79, 0x09, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x02, 0xaa},
+ {0x00, 0xc8, 0xc0, 0xaa},
+ {0x00, 0x79, 0x03, 0xaa},
+ {0x00, 0xc8, 0x40, 0xaa},
+ {0x00, 0x79, 0x05, 0xaa},
+ {0x00, 0xc8, 0x30, 0xaa},
+ {0x00, 0x79, 0x26, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x17, 0x14, 0xaa},
+ {0x00, 0x18, 0x02, 0xaa},
+ {0x00, 0x32, 0x92, 0xaa},
+ {0x00, 0x19, 0x02, 0xaa},
+ {0x00, 0x1a, 0x7a, 0xaa},
+ {0x00, 0x03, 0x0a, 0xaa},
+ {0x00, 0x0c, 0x00, 0xaa},
+ {0x00, 0x3e, 0x00, 0xaa},
+ {0x00, 0x70, 0x3a, 0xaa},
+ {0x00, 0x71, 0x35, 0xaa},
+ {0x00, 0x72, 0x11, 0xaa},
+ {0x00, 0x73, 0xf0, 0xaa},
+ {0x00, 0xa2, 0x02, 0xaa},
+ {0x00, 0xb1, 0x00, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
{0x00, 0x1e, 0x37, 0xaa}, /* MVFP */
{0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa},
- {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa},
- {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa},
- {0x00, 0x9e, 0x7f, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x06, 0xaa},
- {0x00, 0x66, 0x05, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x07, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x00, 0xaa}, {0x00, 0x3d, 0xc2, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0xb6, 0x00, 0x00, 0xcc},
- {0xb6, 0x03, 0x02, 0xcc}, {0xb6, 0x02, 0x80, 0xcc},
- {0xb6, 0x05, 0x01, 0xcc}, {0xb6, 0x04, 0xe0, 0xcc},
- {0xb6, 0x12, 0xf8, 0xcc}, {0xb6, 0x13, 0x13, 0xcc},
- {0xb6, 0x18, 0x02, 0xcc}, {0xb6, 0x17, 0x58, 0xcc},
- {0xb6, 0x16, 0x00, 0xcc}, {0xb6, 0x22, 0x12, 0xcc},
- {0xb6, 0x23, 0x0b, 0xcc}, {0xbf, 0xc0, 0x39, 0xcc},
- {0xbf, 0xc1, 0x04, 0xcc}, {0xbf, 0xcc, 0x00, 0xcc},
- {0xb3, 0x5c, 0x01, 0xcc}, {0xb3, 0x01, 0x45, 0xcc},
+ {0x00, 0x24, 0x80, 0xaa},
+ {0x00, 0x25, 0x74, 0xaa},
+ {0x00, 0x26, 0xd3, 0xaa},
+ {0x00, 0x0d, 0x00, 0xaa},
+ {0x00, 0x14, 0x18, 0xaa},
+ {0x00, 0x9d, 0x99, 0xaa},
+ {0x00, 0x9e, 0x7f, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x06, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x07, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x00, 0xaa},
+ {0x00, 0x3d, 0xc2, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0xbf, 0xc0, 0x26, 0xcc},
+ {0xbf, 0xc1, 0x02, 0xcc},
+ {0xbf, 0xcc, 0x04, 0xcc},
+ {0xb3, 0x5c, 0x01, 0xcc},
+ {0xb3, 0x01, 0x45, 0xcc},
{0x00, 0x77, 0x05, 0xaa},
{},
};
-static const u8 ov7670_initQVGA_JPG[][4] = {
- {0xb3, 0x01, 0x05, 0xcc}, {0x00, 0x00, 0x30, 0xdd},
- {0xb0, 0x03, 0x19, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc},
- {0xb3, 0x35, 0xa1, 0xcc}, {0xb3, 0x34, 0x01, 0xcc},
- {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc},
- {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc},
- {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc},
- {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc},
- {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc},
- {0xb3, 0x04, 0x05, 0xcc}, {0xb3, 0x20, 0x00, 0xcc},
- {0xb3, 0x21, 0x00, 0xcc}, {0xb3, 0x22, 0x01, 0xcc},
- {0xb3, 0x23, 0xe0, 0xcc}, {0xbc, 0x00, 0xd1, 0xcc},
- {0xbc, 0x01, 0x01, 0xcc}, {0x00, 0x12, 0x80, 0xaa},
- {0x00, 0x00, 0x20, 0xdd}, {0x00, 0x12, 0x00, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x6b, 0x0a, 0xaa},
- {0x00, 0x3a, 0x04, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x7a, 0x29, 0xaa},
- {0x00, 0x7b, 0x0e, 0xaa}, {0x00, 0x7c, 0x1a, 0xaa},
- {0x00, 0x7d, 0x31, 0xaa}, {0x00, 0x7e, 0x53, 0xaa},
- {0x00, 0x7f, 0x60, 0xaa}, {0x00, 0x80, 0x6b, 0xaa},
- {0x00, 0x81, 0x73, 0xaa}, {0x00, 0x82, 0x7b, 0xaa},
- {0x00, 0x83, 0x82, 0xaa}, {0x00, 0x84, 0x89, 0xaa},
- {0x00, 0x85, 0x96, 0xaa}, {0x00, 0x86, 0xa1, 0xaa},
- {0x00, 0x87, 0xb7, 0xaa}, {0x00, 0x88, 0xcc, 0xaa},
- {0x00, 0x89, 0xe1, 0xaa}, {0x00, 0x13, 0xe0, 0xaa},
- {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x10, 0x00, 0xaa},
- {0x00, 0x0d, 0x40, 0xaa}, {0x00, 0x14, 0x28, 0xaa},
- {0x00, 0xa5, 0x05, 0xaa}, {0x00, 0xab, 0x07, 0xaa},
- {0x00, 0x24, 0x95, 0xaa}, {0x00, 0x25, 0x33, 0xaa},
- {0x00, 0x26, 0xe3, 0xaa}, {0x00, 0x9f, 0x88, 0xaa},
- {0x00, 0xa0, 0x78, 0xaa}, {0x00, 0x55, 0x90, 0xaa},
- {0x00, 0xa1, 0x03, 0xaa}, {0x00, 0xa6, 0xe0, 0xaa},
- {0x00, 0xa7, 0xd8, 0xaa}, {0x00, 0xa8, 0xf0, 0xaa},
- {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa},
- {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa},
+static const u8 ov7670_InitQVGA[][4] = {
+ {0xb3, 0x01, 0x05, 0xcc},
+ {0x00, 0x00, 0x30, 0xdd},
+ {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb0, 0x04, 0x02, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb3, 0x00, 0x66, 0xcc},
+ {0xb3, 0x00, 0x67, 0xcc},
+ {0xb0, 0x16, 0x01, 0xcc},
+ {0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */
+ {0xb3, 0x34, 0x01, 0xcc},
+ {0xb3, 0x05, 0x01, 0xcc},
+ {0xb3, 0x06, 0x01, 0xcc},
+ {0xb3, 0x08, 0x01, 0xcc},
+ {0xb3, 0x09, 0x0c, 0xcc},
+ {0xb3, 0x02, 0x02, 0xcc},
+ {0xb3, 0x03, 0x1f, 0xcc},
+ {0xb3, 0x14, 0x00, 0xcc},
+ {0xb3, 0x15, 0x00, 0xcc},
+ {0xb3, 0x16, 0x02, 0xcc},
+ {0xb3, 0x17, 0x7f, 0xcc},
+ {0xb3, 0x04, 0x05, 0xcc},
+ {0xb3, 0x20, 0x00, 0xcc},
+ {0xb3, 0x21, 0x00, 0xcc},
+ {0xb3, 0x22, 0x01, 0xcc},
+ {0xb3, 0x23, 0xe0, 0xcc},
+ {0xbc, 0x00, 0xd1, 0xcc},
+ {0xbc, 0x01, 0x01, 0xcc},
+ {0x00, 0x12, 0x80, 0xaa},
+ {0x00, 0x00, 0x20, 0xdd},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x6b, 0x0a, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x7a, 0x29, 0xaa},
+ {0x00, 0x7b, 0x0e, 0xaa},
+ {0x00, 0x7c, 0x1a, 0xaa},
+ {0x00, 0x7d, 0x31, 0xaa},
+ {0x00, 0x7e, 0x53, 0xaa},
+ {0x00, 0x7f, 0x60, 0xaa},
+ {0x00, 0x80, 0x6b, 0xaa},
+ {0x00, 0x81, 0x73, 0xaa},
+ {0x00, 0x82, 0x7b, 0xaa},
+ {0x00, 0x83, 0x82, 0xaa},
+ {0x00, 0x84, 0x89, 0xaa},
+ {0x00, 0x85, 0x96, 0xaa},
+ {0x00, 0x86, 0xa1, 0xaa},
+ {0x00, 0x87, 0xb7, 0xaa},
+ {0x00, 0x88, 0xcc, 0xaa},
+ {0x00, 0x89, 0xe1, 0xaa},
+ {0x00, 0x13, 0xe0, 0xaa},
+ {0x00, 0x00, 0x00, 0xaa},
+ {0x00, 0x10, 0x00, 0xaa},
+ {0x00, 0x0d, 0x40, 0xaa},
+ {0x00, 0x14, 0x28, 0xaa},
+ {0x00, 0xa5, 0x05, 0xaa},
+ {0x00, 0xab, 0x07, 0xaa},
+ {0x00, 0x24, 0x95, 0xaa},
+ {0x00, 0x25, 0x33, 0xaa},
+ {0x00, 0x26, 0xe3, 0xaa},
+ {0x00, 0x9f, 0x88, 0xaa},
+ {0x00, 0xa0, 0x78, 0xaa},
+ {0x00, 0x55, 0x90, 0xaa},
+ {0x00, 0xa1, 0x03, 0xaa},
+ {0x00, 0xa6, 0xe0, 0xaa},
+ {0x00, 0xa7, 0xd8, 0xaa},
+ {0x00, 0xa8, 0xf0, 0xaa},
+ {0x00, 0xa9, 0x90, 0xaa},
+ {0x00, 0xaa, 0x14, 0xaa},
+ {0x00, 0x13, 0xe5, 0xaa},
+ {0x00, 0x0e, 0x61, 0xaa},
+ {0x00, 0x0f, 0x4b, 0xaa},
+ {0x00, 0x16, 0x02, 0xaa},
{0x00, 0x1e, 0x07, 0xaa}, /* MVFP */
{0x00, 0x21, 0x02, 0xaa},
- {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa},
- {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa},
- {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa},
- {0x00, 0x39, 0x2a, 0xaa}, {0x00, 0x3c, 0x78, 0xaa},
- {0x00, 0x4d, 0x40, 0xaa}, {0x00, 0x4e, 0x20, 0xaa},
- {0x00, 0x74, 0x19, 0xaa}, {0x00, 0x8d, 0x4f, 0xaa},
- {0x00, 0x8e, 0x00, 0xaa}, {0x00, 0x8f, 0x00, 0xaa},
- {0x00, 0x90, 0x00, 0xaa}, {0x00, 0x91, 0x00, 0xaa},
- {0x00, 0x96, 0x00, 0xaa}, {0x00, 0x9a, 0x80, 0xaa},
- {0x00, 0xb0, 0x84, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
- {0x00, 0xb2, 0x0e, 0xaa}, {0x00, 0xb3, 0x82, 0xaa},
- {0x00, 0xb8, 0x0a, 0xaa}, {0x00, 0x43, 0x14, 0xaa},
- {0x00, 0x44, 0xf0, 0xaa}, {0x00, 0x45, 0x45, 0xaa},
- {0x00, 0x46, 0x63, 0xaa}, {0x00, 0x47, 0x2d, 0xaa},
- {0x00, 0x48, 0x46, 0xaa}, {0x00, 0x59, 0x88, 0xaa},
- {0x00, 0x5a, 0xa0, 0xaa}, {0x00, 0x5b, 0xc6, 0xaa},
- {0x00, 0x5c, 0x7d, 0xaa}, {0x00, 0x5d, 0x5f, 0xaa},
- {0x00, 0x5e, 0x19, 0xaa}, {0x00, 0x6c, 0x0a, 0xaa},
- {0x00, 0x6d, 0x55, 0xaa}, {0x00, 0x6e, 0x11, 0xaa},
- {0x00, 0x6f, 0x9e, 0xaa}, {0x00, 0x69, 0x00, 0xaa},
- {0x00, 0x6a, 0x40, 0xaa}, {0x00, 0x01, 0x40, 0xaa},
- {0x00, 0x02, 0x40, 0xaa}, {0x00, 0x13, 0xe7, 0xaa},
- {0x00, 0x5f, 0xf0, 0xaa}, {0x00, 0x60, 0xf0, 0xaa},
- {0x00, 0x61, 0xf0, 0xaa}, {0x00, 0x27, 0xa0, 0xaa},
- {0x00, 0x28, 0x80, 0xaa}, {0x00, 0x2c, 0x90, 0xaa},
- {0x00, 0x4f, 0x66, 0xaa}, {0x00, 0x50, 0x66, 0xaa},
- {0x00, 0x51, 0x00, 0xaa}, {0x00, 0x52, 0x22, 0xaa},
- {0x00, 0x53, 0x5e, 0xaa}, {0x00, 0x54, 0x80, 0xaa},
- {0x00, 0x58, 0x9e, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x85, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x0a, 0xaa}, {0x00, 0x3d, 0x88, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0x00, 0x62, 0x30, 0xaa},
- {0x00, 0x63, 0x30, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x0b, 0xaa},
- {0x00, 0x65, 0x00, 0xaa}, {0x00, 0x66, 0x05, 0xaa},
- {0x00, 0x56, 0x50, 0xaa}, {0x00, 0x34, 0x11, 0xaa},
- {0x00, 0xa4, 0x88, 0xaa}, {0x00, 0x96, 0x00, 0xaa},
- {0x00, 0x97, 0x30, 0xaa}, {0x00, 0x98, 0x20, 0xaa},
- {0x00, 0x99, 0x30, 0xaa}, {0x00, 0x9a, 0x84, 0xaa},
- {0x00, 0x9b, 0x29, 0xaa}, {0x00, 0x9c, 0x03, 0xaa},
- {0x00, 0x78, 0x04, 0xaa}, {0x00, 0x79, 0x01, 0xaa},
- {0x00, 0xc8, 0xf0, 0xaa}, {0x00, 0x79, 0x0f, 0xaa},
- {0x00, 0xc8, 0x00, 0xaa}, {0x00, 0x79, 0x10, 0xaa},
- {0x00, 0xc8, 0x7e, 0xaa}, {0x00, 0x79, 0x0a, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x0b, 0xaa},
- {0x00, 0xc8, 0x01, 0xaa}, {0x00, 0x79, 0x0c, 0xaa},
- {0x00, 0xc8, 0x0f, 0xaa}, {0x00, 0x79, 0x0d, 0xaa},
- {0x00, 0xc8, 0x20, 0xaa}, {0x00, 0x79, 0x09, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x02, 0xaa},
- {0x00, 0xc8, 0xc0, 0xaa}, {0x00, 0x79, 0x03, 0xaa},
- {0x00, 0xc8, 0x40, 0xaa}, {0x00, 0x79, 0x05, 0xaa},
- {0x00, 0xc8, 0x30, 0xaa}, {0x00, 0x79, 0x26, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x3a, 0x04, 0xaa},
- {0x00, 0x12, 0x00, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x17, 0x14, 0xaa},
- {0x00, 0x18, 0x02, 0xaa}, {0x00, 0x32, 0x92, 0xaa},
- {0x00, 0x19, 0x02, 0xaa}, {0x00, 0x1a, 0x7a, 0xaa},
- {0x00, 0x03, 0x0a, 0xaa}, {0x00, 0x0c, 0x00, 0xaa},
- {0x00, 0x3e, 0x00, 0xaa}, {0x00, 0x70, 0x3a, 0xaa},
- {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa},
- {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa},
- {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0x22, 0x91, 0xaa},
+ {0x00, 0x29, 0x07, 0xaa},
+ {0x00, 0x33, 0x0b, 0xaa},
+ {0x00, 0x35, 0x0b, 0xaa},
+ {0x00, 0x37, 0x1d, 0xaa},
+ {0x00, 0x38, 0x71, 0xaa},
+ {0x00, 0x39, 0x2a, 0xaa},
+ {0x00, 0x3c, 0x78, 0xaa},
+ {0x00, 0x4d, 0x40, 0xaa},
+ {0x00, 0x4e, 0x20, 0xaa},
+ {0x00, 0x74, 0x19, 0xaa},
+ {0x00, 0x8d, 0x4f, 0xaa},
+ {0x00, 0x8e, 0x00, 0xaa},
+ {0x00, 0x8f, 0x00, 0xaa},
+ {0x00, 0x90, 0x00, 0xaa},
+ {0x00, 0x91, 0x00, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x9a, 0x80, 0xaa},
+ {0x00, 0xb0, 0x84, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0xb2, 0x0e, 0xaa},
+ {0x00, 0xb3, 0x82, 0xaa},
+ {0x00, 0xb8, 0x0a, 0xaa},
+ {0x00, 0x43, 0x14, 0xaa},
+ {0x00, 0x44, 0xf0, 0xaa},
+ {0x00, 0x45, 0x45, 0xaa},
+ {0x00, 0x46, 0x63, 0xaa},
+ {0x00, 0x47, 0x2d, 0xaa},
+ {0x00, 0x48, 0x46, 0xaa},
+ {0x00, 0x59, 0x88, 0xaa},
+ {0x00, 0x5a, 0xa0, 0xaa},
+ {0x00, 0x5b, 0xc6, 0xaa},
+ {0x00, 0x5c, 0x7d, 0xaa},
+ {0x00, 0x5d, 0x5f, 0xaa},
+ {0x00, 0x5e, 0x19, 0xaa},
+ {0x00, 0x6c, 0x0a, 0xaa},
+ {0x00, 0x6d, 0x55, 0xaa},
+ {0x00, 0x6e, 0x11, 0xaa},
+ {0x00, 0x6f, 0x9e, 0xaa},
+ {0x00, 0x69, 0x00, 0xaa},
+ {0x00, 0x6a, 0x40, 0xaa},
+ {0x00, 0x01, 0x40, 0xaa},
+ {0x00, 0x02, 0x40, 0xaa},
+ {0x00, 0x13, 0xe7, 0xaa},
+ {0x00, 0x5f, 0xf0, 0xaa},
+ {0x00, 0x60, 0xf0, 0xaa},
+ {0x00, 0x61, 0xf0, 0xaa},
+ {0x00, 0x27, 0xa0, 0xaa},
+ {0x00, 0x28, 0x80, 0xaa},
+ {0x00, 0x2c, 0x90, 0xaa},
+ {0x00, 0x4f, 0x66, 0xaa},
+ {0x00, 0x50, 0x66, 0xaa},
+ {0x00, 0x51, 0x00, 0xaa},
+ {0x00, 0x52, 0x22, 0xaa},
+ {0x00, 0x53, 0x5e, 0xaa},
+ {0x00, 0x54, 0x80, 0xaa},
+ {0x00, 0x58, 0x9e, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x85, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x0a, 0xaa},
+ {0x00, 0x3d, 0x88, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0x00, 0x62, 0x30, 0xaa},
+ {0x00, 0x63, 0x30, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x0b, 0xaa},
+ {0x00, 0x65, 0x00, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x56, 0x50, 0xaa},
+ {0x00, 0x34, 0x11, 0xaa},
+ {0x00, 0xa4, 0x88, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x97, 0x30, 0xaa},
+ {0x00, 0x98, 0x20, 0xaa},
+ {0x00, 0x99, 0x30, 0xaa},
+ {0x00, 0x9a, 0x84, 0xaa},
+ {0x00, 0x9b, 0x29, 0xaa},
+ {0x00, 0x9c, 0x03, 0xaa},
+ {0x00, 0x78, 0x04, 0xaa},
+ {0x00, 0x79, 0x01, 0xaa},
+ {0x00, 0xc8, 0xf0, 0xaa},
+ {0x00, 0x79, 0x0f, 0xaa},
+ {0x00, 0xc8, 0x00, 0xaa},
+ {0x00, 0x79, 0x10, 0xaa},
+ {0x00, 0xc8, 0x7e, 0xaa},
+ {0x00, 0x79, 0x0a, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x0b, 0xaa},
+ {0x00, 0xc8, 0x01, 0xaa},
+ {0x00, 0x79, 0x0c, 0xaa},
+ {0x00, 0xc8, 0x0f, 0xaa},
+ {0x00, 0x79, 0x0d, 0xaa},
+ {0x00, 0xc8, 0x20, 0xaa},
+ {0x00, 0x79, 0x09, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x02, 0xaa},
+ {0x00, 0xc8, 0xc0, 0xaa},
+ {0x00, 0x79, 0x03, 0xaa},
+ {0x00, 0xc8, 0x40, 0xaa},
+ {0x00, 0x79, 0x05, 0xaa},
+ {0x00, 0xc8, 0x30, 0xaa},
+ {0x00, 0x79, 0x26, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x17, 0x14, 0xaa},
+ {0x00, 0x18, 0x02, 0xaa},
+ {0x00, 0x32, 0x92, 0xaa},
+ {0x00, 0x19, 0x02, 0xaa},
+ {0x00, 0x1a, 0x7a, 0xaa},
+ {0x00, 0x03, 0x0a, 0xaa},
+ {0x00, 0x0c, 0x00, 0xaa},
+ {0x00, 0x3e, 0x00, 0xaa},
+ {0x00, 0x70, 0x3a, 0xaa},
+ {0x00, 0x71, 0x35, 0xaa},
+ {0x00, 0x72, 0x11, 0xaa},
+ {0x00, 0x73, 0xf0, 0xaa},
+ {0x00, 0xa2, 0x02, 0xaa},
+ {0x00, 0xb1, 0x00, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
{0x00, 0x1e, 0x37, 0xaa}, /* MVFP */
{0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa},
- {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa},
- {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa},
- {0x00, 0x9e, 0x7f, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x06, 0xaa},
- {0x00, 0x66, 0x05, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x07, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x00, 0xaa}, {0x00, 0x3d, 0xc2, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0xb6, 0x00, 0x00, 0xcc},
- {0xb6, 0x03, 0x01, 0xcc}, {0xb6, 0x02, 0x40, 0xcc},
- {0xb6, 0x05, 0x00, 0xcc}, {0xb6, 0x04, 0xf0, 0xcc},
- {0xb6, 0x12, 0xf8, 0xcc}, {0xb6, 0x13, 0x21, 0xcc},
- {0xb6, 0x18, 0x00, 0xcc}, {0xb6, 0x17, 0x96, 0xcc},
- {0xb6, 0x16, 0x00, 0xcc}, {0xb6, 0x22, 0x12, 0xcc},
- {0xb6, 0x23, 0x0b, 0xcc}, {0xbf, 0xc0, 0x39, 0xcc},
- {0xbf, 0xc1, 0x04, 0xcc}, {0xbf, 0xcc, 0x00, 0xcc},
- {0xbc, 0x02, 0x18, 0xcc}, {0xbc, 0x03, 0x50, 0xcc},
- {0xbc, 0x04, 0x18, 0xcc}, {0xbc, 0x05, 0x00, 0xcc},
- {0xbc, 0x06, 0x00, 0xcc}, {0xbc, 0x08, 0x30, 0xcc},
- {0xbc, 0x09, 0x40, 0xcc}, {0xbc, 0x0a, 0x10, 0xcc},
- {0xbc, 0x0b, 0x00, 0xcc}, {0xbc, 0x0c, 0x00, 0xcc},
- {0xb3, 0x5c, 0x01, 0xcc}, {0xb3, 0x01, 0x45, 0xcc},
- {0x00, 0x77, 0x05, 0xaa },
+ {0x00, 0x24, 0x80, 0xaa},
+ {0x00, 0x25, 0x74, 0xaa},
+ {0x00, 0x26, 0xd3, 0xaa},
+ {0x00, 0x0d, 0x00, 0xaa},
+ {0x00, 0x14, 0x18, 0xaa},
+ {0x00, 0x9d, 0x99, 0xaa},
+ {0x00, 0x9e, 0x7f, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x06, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x07, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x00, 0xaa},
+ {0x00, 0x3d, 0xc2, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0xbc, 0x02, 0x18, 0xcc},
+ {0xbc, 0x03, 0x50, 0xcc},
+ {0xbc, 0x04, 0x18, 0xcc},
+ {0xbc, 0x05, 0x00, 0xcc},
+ {0xbc, 0x06, 0x00, 0xcc},
+ {0xbc, 0x08, 0x30, 0xcc},
+ {0xbc, 0x09, 0x40, 0xcc},
+ {0xbc, 0x0a, 0x10, 0xcc},
+ {0xbc, 0x0b, 0x00, 0xcc},
+ {0xbc, 0x0c, 0x00, 0xcc},
+ {0xbf, 0xc0, 0x26, 0xcc},
+ {0xbf, 0xc1, 0x02, 0xcc},
+ {0xbf, 0xcc, 0x04, 0xcc},
+ {0xb3, 0x5c, 0x01, 0xcc},
+ {0xb3, 0x01, 0x45, 0xcc},
+ {0x00, 0x77, 0x05, 0xaa},
{},
};
@@ -3117,6 +3338,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = bi_mode;
cam->nmodes = ARRAY_SIZE(bi_mode);
break;
+ case SENSOR_OV7670:
+ cam->cam_mode = bi_mode;
+ cam->nmodes = ARRAY_SIZE(bi_mode) - 1;
+ break;
default:
cam->cam_mode = vc0323_mode;
cam->nmodes = ARRAY_SIZE(vc0323_mode) - 1;
@@ -3329,14 +3554,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
else
init = ov7660_initVGA_data; /* 640x480 */
break;
- case SENSOR_OV7670:
- /*GammaT = ov7660_gamma; */
- /*MatrixT = ov7660_matrix; */
- if (mode)
- init = ov7670_initQVGA_JPG; /* 320x240 */
- else
- init = ov7670_initVGA_JPG; /* 640x480 */
- break;
case SENSOR_MI0360:
GammaT = mi1320_gamma;
MatrixT = mi0360_matrix;
@@ -3373,6 +3590,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
MatrixT = mi1320_matrix;
init = mi1320_soc_init[mode];
break;
+ case SENSOR_OV7670:
+ init = mode == 1 ? ov7670_InitVGA : ov7670_InitQVGA;
+ break;
case SENSOR_PO3130NC:
GammaT = po3130_gamma;
MatrixT = po3130_matrix;
@@ -3426,7 +3646,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
sethvflip(gspca_dev);
setlightfreq(gspca_dev);
}
- if (sd->sensor == SENSOR_POxxxx) {
+ switch (sd->sensor) {
+ case SENSOR_OV7670:
+ reg_w(gspca_dev->dev, 0x87, 0xffff, 0xffff);
+ reg_w(gspca_dev->dev, 0x88, 0xff00, 0xf0f1);
+ reg_w(gspca_dev->dev, 0xa0, 0x0000, 0xbfff);
+ break;
+ case SENSOR_POxxxx:
setcolors(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
@@ -3435,6 +3661,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
msleep(80);
reg_w(gspca_dev->dev, 0x89, 0xffff, 0xfdff);
usb_exchange(gspca_dev, poxxxx_init_end_2);
+ break;
}
return 0;
}
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 7d7814c43f92..d02aa5c8472a 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -40,7 +40,6 @@ static int force_sensor = -1;
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 brightness;
u8 contrast;
u8 gamma;
u8 autogain;
@@ -80,8 +79,6 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
@@ -94,21 +91,6 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
-#define BRIGHTNESS_IDX 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 128
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
{
{
.id = V4L2_CID_CONTRAST,
@@ -150,7 +132,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setautogain,
.get = sd_getautogain,
},
-#define LIGHTFREQ_IDX 4
+#define LIGHTFREQ_IDX 3
{
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -6004,33 +5986,6 @@ static void setmatrix(struct gspca_dev *gspca_dev)
reg_w(gspca_dev->dev, matrix[i], 0x010a + i);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 brightness;
-
- switch (sd->sensor) {
- case SENSOR_GC0305:
- case SENSOR_OV7620:
- case SENSOR_PAS202B:
- case SENSOR_PO2030:
- return;
- }
-/*fixme: is it really write to 011d and 018d for all other sensors? */
- brightness = sd->brightness;
- reg_w(gspca_dev->dev, brightness, 0x011d);
- switch (sd->sensor) {
- case SENSOR_ADCM2700:
- case SENSOR_HV7131B:
- return;
- }
- if (brightness < 0x70)
- brightness += 0x10;
- else
- brightness = 0x80;
- reg_w(gspca_dev->dev, brightness, 0x018d);
-}
-
static void setsharpness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -6059,8 +6014,8 @@ static void setcontrast(struct gspca_dev *gspca_dev)
int g, i, k, adj, gp;
u8 gr[16];
static const u8 delta_tb[16] = /* delta for contrast */
- {0x15, 0x0d, 0x0a, 0x09, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08};
+ {0x2c, 0x1a, 0x12, 0x0c, 0x0a, 0x06, 0x06, 0x06,
+ 0x04, 0x06, 0x04, 0x04, 0x03, 0x03, 0x02, 0x02};
static const u8 gamma_tb[6][16] = {
{0x00, 0x00, 0x03, 0x0d, 0x1b, 0x2e, 0x45, 0x5f,
0x79, 0x93, 0xab, 0xc1, 0xd4, 0xe5, 0xf3, 0xff},
@@ -6082,11 +6037,11 @@ static void setcontrast(struct gspca_dev *gspca_dev)
adj = 0;
gp = 0;
for (i = 0; i < 16; i++) {
- g = Tgamma[i] - delta_tb[i] * k / 128 - adj / 2;
+ g = Tgamma[i] - delta_tb[i] * k / 256 - adj / 2;
if (g > 0xff)
g = 0xff;
- else if (g <= 0)
- g = 1;
+ else if (g < 0)
+ g = 0;
reg_w(dev, g, 0x0120 + i); /* gamma */
if (k > 0)
adj--;
@@ -6203,13 +6158,13 @@ static int setlightfreq(struct gspca_dev *gspca_dev)
pas106b_50HZ, pas106b_50HZ,
pas106b_60HZ, pas106b_60HZ},
/* SENSOR_PAS202B 13 */
- {pas202b_NoFlikerScale, pas202b_NoFliker,
- pas202b_50HZScale, pas202b_50HZ,
- pas202b_60HZScale, pas202b_60HZ},
+ {pas202b_NoFliker, pas202b_NoFlikerScale,
+ pas202b_50HZ, pas202b_50HZScale,
+ pas202b_60HZ, pas202b_60HZScale},
/* SENSOR_PB0330 14 */
- {pb0330_NoFlikerScale, pb0330_NoFliker,
- pb0330_50HZScale, pb0330_50HZ,
- pb0330_60HZScale, pb0330_60HZ},
+ {pb0330_NoFliker, pb0330_NoFlikerScale,
+ pb0330_50HZ, pb0330_50HZScale,
+ pb0330_60HZ, pb0330_60HZScale},
/* SENSOR_PO2030 15 */
{po2030_NoFliker, po2030_NoFliker,
po2030_50HZ, po2030_50HZ,
@@ -6789,7 +6744,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(broken_vga_mode);
break;
}
- sd->brightness = BRIGHTNESS_DEF;
sd->contrast = CONTRAST_DEF;
sd->gamma = gamma[sd->sensor];
sd->autogain = AUTOGAIN_DEF;
@@ -6797,12 +6751,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->quality = QUALITY_DEF;
switch (sd->sensor) {
- case SENSOR_GC0305:
- case SENSOR_OV7620:
- case SENSOR_PAS202B:
- case SENSOR_PO2030:
- gspca_dev->ctrl_dis = (1 << BRIGHTNESS_IDX);
- break;
case SENSOR_HV7131B:
case SENSOR_HV7131C:
case SENSOR_OV7630C:
@@ -6893,7 +6841,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
setmatrix(gspca_dev);
- setbrightness(gspca_dev);
switch (sd->sensor) {
case SENSOR_ADCM2700:
case SENSOR_OV7620:
@@ -7015,24 +6962,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index da18d698e7f2..29d439742653 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -61,9 +61,9 @@ module_param(hauppauge, int, 0644); /* Choose Hauppauge remote */
MODULE_PARM_DESC(hauppauge, "Specify Hauppauge remote: 0=black, 1=grey (defaults to 0)");
-#define DEVNAME "ir-kbd-i2c"
+#define MODULE_NAME "ir-kbd-i2c"
#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG DEVNAME ": " fmt , ## arg)
+ printk(KERN_DEBUG MODULE_NAME ": " fmt , ## arg)
/* ----------------------------------------------------------------------- */
@@ -297,7 +297,7 @@ static void ir_work(struct work_struct *work)
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
const char *name = NULL;
u64 ir_type = 0;
struct IR_i2c *ir;
@@ -322,13 +322,13 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
name = "Pixelview";
ir->get_key = get_key_pixelview;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_empty_table;
+ ir_codes = RC_MAP_EMPTY;
break;
case 0x4b:
name = "PV951";
ir->get_key = get_key_pv951;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_pv951_table;
+ ir_codes = RC_MAP_PV951;
break;
case 0x18:
case 0x1f:
@@ -337,22 +337,22 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->get_key = get_key_haup;
ir_type = IR_TYPE_RC5;
if (hauppauge == 1) {
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
} else {
- ir_codes = &ir_codes_rc5_tv_table;
+ ir_codes = RC_MAP_RC5_TV;
}
break;
case 0x30:
name = "KNC One";
ir->get_key = get_key_knc1;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_empty_table;
+ ir_codes = RC_MAP_EMPTY;
break;
case 0x6b:
name = "FusionHDTV";
ir->get_key = get_key_fusionhdtv;
ir_type = IR_TYPE_RC5;
- ir_codes = &ir_codes_fusionhdtv_mce_table;
+ ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
case 0x0b:
case 0x47:
@@ -365,9 +365,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir_type = IR_TYPE_RC5;
ir->get_key = get_key_haup_xvr;
if (hauppauge == 1) {
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
} else {
- ir_codes = &ir_codes_rc5_tv_table;
+ ir_codes = RC_MAP_RC5_TV;
}
} else {
/* Handled by saa7134-input */
@@ -379,7 +379,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_avermedia_cardbus_table;
+ ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
}
@@ -447,11 +447,11 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
input_dev->name = ir->name;
input_dev->phys = ir->phys;
- err = ir_input_register(ir->input, ir->ir_codes, NULL);
+ err = ir_input_register(ir->input, ir->ir_codes, NULL, MODULE_NAME);
if (err)
goto err_out_free;
- printk(DEVNAME ": %s detected at %s [%s]\n",
+ printk(MODULE_NAME ": %s detected at %s [%s]\n",
ir->input->name, ir->input->phys, adap->name);
/* start polling via eventd */
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 9a250548be4d..1b79475ca134 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1293,7 +1293,6 @@ int ivtv_init_on_first_open(struct ivtv *itv)
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
ivtv_init_mpeg_decoder(itv);
}
- ivtv_s_std(NULL, &fh, &itv->tuner_std);
/* On a cx23416 this seems to be able to enable DMA to the chip? */
if (!itv->has_cx23415)
@@ -1310,6 +1309,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
}
else
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
+
+ /* For cards with video out, this call needs interrupts enabled */
+ ivtv_s_std(NULL, &fh, &itv->tuner_std);
+
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 5028e31c564a..5b45fd2b2645 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -63,6 +63,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
#include <media/tuner.h>
#include <media/cx2341x.h>
#include <media/ir-kbd-i2c.h>
@@ -116,6 +117,9 @@
#define IVTV_REG_VPU (0x9058)
#define IVTV_REG_APU (0xA064)
+/* Other registers */
+#define IVTV_REG_DEC_LINE_FIELD (0x28C0)
+
/* debugging */
extern int ivtv_debug;
@@ -372,6 +376,7 @@ struct ivtv_stream {
};
struct ivtv_open_id {
+ struct v4l2_fh fh;
u32 open_id; /* unique ID for this file descriptor */
int type; /* stream type */
int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */
@@ -379,6 +384,11 @@ struct ivtv_open_id {
struct ivtv *itv;
};
+static inline struct ivtv_open_id *fh2id(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct ivtv_open_id, fh);
+}
+
struct yuv_frame_info
{
u32 update;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index babcabd73c08..bee9605bd273 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -32,6 +32,7 @@
#include "ivtv-yuv.h"
#include "ivtv-ioctl.h"
#include "ivtv-cards.h"
+#include <media/v4l2-event.h>
#include <media/saa7115.h>
/* This function tries to claim the stream for a specific file descriptor.
@@ -506,7 +507,7 @@ int ivtv_start_capture(struct ivtv_open_id *id)
ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int rc;
@@ -541,7 +542,7 @@ int ivtv_start_decoding(struct ivtv_open_id *id, int speed)
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -711,19 +712,31 @@ retry:
unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int res = 0;
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Decoder poll\n");
- poll_wait(filp, &s->waitq, wait);
- set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
- if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
- test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
- res = POLLPRI;
+ /* If there are subscribed events, then only use the new event
+ API instead of the old video.h based API. */
+ if (!list_empty(&id->fh.events->subscribed)) {
+ poll_wait(filp, &id->fh.events->wait, wait);
+ /* Turn off the old-style vsync events */
+ clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
+ if (v4l2_event_pending(&id->fh))
+ res = POLLPRI;
+ } else {
+ /* This is the old-style API which is here only for backwards
+ compatibility. */
+ poll_wait(filp, &s->waitq, wait);
+ set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
+ if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
+ test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
+ res = POLLPRI;
+ }
/* Allow write if buffers are available for writing */
if (s->q_free.buffers)
@@ -733,7 +746,7 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
@@ -833,13 +846,16 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
int ivtv_v4l2_close(struct file *filp)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct v4l2_fh *fh = filp->private_data;
+ struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
IVTV_DEBUG_FILE("close %s\n", s->name);
v4l2_prio_close(&itv->prio, &id->prio);
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
/* Easy case first: this stream was never claimed by us */
if (s->id != id->open_id) {
@@ -895,6 +911,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
{
struct ivtv *itv = s->itv;
struct ivtv_open_id *item;
+ int res = 0;
IVTV_DEBUG_FILE("open %s\n", s->name);
@@ -915,17 +932,27 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
}
/* Allocate memory */
- item = kmalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
+ item = kzalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
if (NULL == item) {
IVTV_DEBUG_WARN("nomem on v4l2 open\n");
return -ENOMEM;
}
+ v4l2_fh_init(&item->fh, s->vdev);
+ if (s->type == IVTV_DEC_STREAM_TYPE_YUV ||
+ s->type == IVTV_DEC_STREAM_TYPE_MPG) {
+ res = v4l2_event_alloc(&item->fh, 60);
+ }
+ if (res < 0) {
+ v4l2_fh_exit(&item->fh);
+ kfree(item);
+ return res;
+ }
item->itv = itv;
item->type = s->type;
v4l2_prio_open(&itv->prio, &item->prio);
item->open_id = itv->open_id++;
- filp->private_data = item;
+ filp->private_data = &item->fh;
if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
/* Try to claim this stream */
@@ -940,6 +967,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
/* switching to radio while capture is
in progress is not polite */
ivtv_release_stream(s);
+ v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
}
@@ -970,6 +998,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
itv->yuv_info.stream_size = 0;
}
+ v4l2_fh_add(&item->fh);
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 2ee03c2a1b58..a5b92d109c6c 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -193,7 +193,7 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
/* Our default information for ir-kbd-i2c.c to use */
switch (hw) {
case IVTV_HW_I2C_IR_RX_AVER:
- init_data->ir_codes = &ir_codes_avermedia_cardbus_table;
+ init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
init_data->internal_get_key_func =
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS;
init_data->type = IR_TYPE_OTHER;
@@ -202,14 +202,14 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
case IVTV_HW_I2C_IR_RX_HAUP_EXT:
case IVTV_HW_I2C_IR_RX_HAUP_INT:
/* Default to old black remote */
- init_data->ir_codes = &ir_codes_rc5_tv_table;
+ init_data->ir_codes = RC_MAP_RC5_TV;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
init_data->type = IR_TYPE_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_Z8F0811_IR_RX_HAUP:
/* Default to grey remote */
- init_data->ir_codes = &ir_codes_hauppauge_new_table;
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = IR_TYPE_RC5;
init_data->name = itv->card_name;
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 99f3c39a118b..a17c659e6327 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -35,6 +35,7 @@
#include <media/saa7127.h>
#include <media/tveeprom.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-event.h>
#include <linux/dvb/audio.h>
#include <linux/i2c-id.h>
@@ -391,7 +392,7 @@ static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
return 0;
}
- v4l2_subdev_call(itv->sd_video, video, g_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, g_sliced_fmt, vbifmt);
vbifmt->service_set = ivtv_get_service_set(vbifmt);
return 0;
}
@@ -597,7 +598,7 @@ static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
return -EBUSY;
itv->vbi.sliced_in->service_set = 0;
itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
- v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &fmt->fmt.vbi);
return ivtv_g_fmt_vbi_cap(file, fh, fmt);
}
@@ -615,7 +616,7 @@ static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
return -EBUSY;
itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, vbifmt);
memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in));
return 0;
}
@@ -1087,8 +1088,10 @@ static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
{
+ DEFINE_WAIT(wait);
struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
+ int f;
if ((*std & V4L2_STD_ALL) == 0)
return -EINVAL;
@@ -1128,6 +1131,25 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
itv->is_out_60hz = itv->is_60hz;
itv->is_out_50hz = itv->is_50hz;
ivtv_call_all(itv, video, s_std_output, itv->std_out);
+
+ /*
+ * The next firmware call is time sensitive. Time it to
+ * avoid risk of a hard lock, by trying to ensure the call
+ * happens within the first 100 lines of the top field.
+ * Make 4 attempts to sync to the decoder before giving up.
+ */
+ for (f = 0; f < 4; f++) {
+ prepare_to_wait(&itv->vsync_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+ schedule_timeout(msecs_to_jiffies(25));
+ }
+ finish_wait(&itv->vsync_waitq, &wait);
+
+ if (f == 4)
+ IVTV_WARN("Mode change failed to sync to decoder\n");
+
ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
itv->main_rect.left = itv->main_rect.top = 0;
itv->main_rect.width = 720;
@@ -1431,6 +1453,18 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
return 0;
}
+static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_VSYNC:
+ case V4L2_EVENT_EOS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return v4l2_event_subscribe(fh, sub);
+}
+
static int ivtv_log_status(struct file *file, void *fh)
{
struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
@@ -1539,10 +1573,11 @@ static int ivtv_log_status(struct file *file, void *fh)
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
int nonblocking = filp->f_flags & O_NONBLOCK;
struct ivtv_stream *s = &itv->streams[id->type];
+ unsigned long iarg = (unsigned long)arg;
switch (cmd) {
case IVTV_IOC_DMA_FRAME: {
@@ -1724,6 +1759,33 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
break;
}
+ case VIDEO_SELECT_SOURCE:
+ IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n");
+ if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
+ return -EINVAL;
+ return ivtv_passthrough_mode(itv, iarg == VIDEO_SOURCE_DEMUX);
+
+ case AUDIO_SET_MUTE:
+ IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n");
+ itv->speed_mute_audio = iarg;
+ return 0;
+
+ case AUDIO_CHANNEL_SELECT:
+ IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
+ if (iarg > AUDIO_STEREO_SWAPPED)
+ return -EINVAL;
+ itv->audio_stereo_mode = iarg;
+ ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
+ return 0;
+
+ case AUDIO_BILINGUAL_CHANNEL_SELECT:
+ IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
+ if (iarg > AUDIO_STEREO_SWAPPED)
+ return -EINVAL;
+ itv->audio_bilingual_mode = iarg;
+ ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
+ return 0;
+
default:
return -EINVAL;
}
@@ -1755,6 +1817,10 @@ static long ivtv_default(struct file *file, void *fh, int cmd, void *arg)
case VIDEO_CONTINUE:
case VIDEO_COMMAND:
case VIDEO_TRY_COMMAND:
+ case VIDEO_SELECT_SOURCE:
+ case AUDIO_SET_MUTE:
+ case AUDIO_CHANNEL_SELECT:
+ case AUDIO_BILINGUAL_CHANNEL_SELECT:
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
@@ -1767,42 +1833,9 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct video_device *vfd = video_devdata(filp);
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
long ret;
- /* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */
- switch (cmd) {
- case VIDEO_SELECT_SOURCE:
- IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n");
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
- return -EINVAL;
- return ivtv_passthrough_mode(itv, arg == VIDEO_SOURCE_DEMUX);
-
- case AUDIO_SET_MUTE:
- IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n");
- itv->speed_mute_audio = arg;
- return 0;
-
- case AUDIO_CHANNEL_SELECT:
- IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
- if (arg > AUDIO_STEREO_SWAPPED)
- return -EINVAL;
- itv->audio_stereo_mode = arg;
- ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
- return 0;
-
- case AUDIO_BILINGUAL_CHANNEL_SELECT:
- IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
- if (arg > AUDIO_STEREO_SWAPPED)
- return -EINVAL;
- itv->audio_bilingual_mode = arg;
- ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
- return 0;
-
- default:
- break;
- }
-
/* check priority */
switch (cmd) {
case VIDIOC_S_CTRL:
@@ -1832,10 +1865,13 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
long res;
+ /* DQEVENT can block, so this should not run with the serialize lock */
+ if (cmd == VIDIOC_DQEVENT)
+ return ivtv_serialized_ioctl(itv, filp, cmd, arg);
mutex_lock(&itv->serialize_lock);
res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
mutex_unlock(&itv->serialize_lock);
@@ -1906,6 +1942,8 @@ static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_g_ext_ctrls = ivtv_g_ext_ctrls,
.vidioc_s_ext_ctrls = ivtv_s_ext_ctrls,
.vidioc_try_ext_ctrls = ivtv_try_ext_ctrls,
+ .vidioc_subscribe_event = ivtv_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
void ivtv_set_funcs(struct video_device *vdev)
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 12d36ca91d53..fea1ec33b0df 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -25,6 +25,7 @@
#include "ivtv-mailbox.h"
#include "ivtv-vbi.h"
#include "ivtv-yuv.h"
+#include <media/v4l2-event.h>
#define DMA_MAGIC_COOKIE 0x000001fe
@@ -752,7 +753,7 @@ static void ivtv_irq_vsync(struct ivtv *itv)
* to determine the line being displayed and ensure we handle
* one vsync per frame.
*/
- unsigned int frame = read_reg(0x28c0) & 1;
+ unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
struct yuv_playback_info *yi = &itv->yuv_info;
int last_dma_frame = atomic_read(&yi->next_dma_frame);
struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
@@ -778,6 +779,14 @@ static void ivtv_irq_vsync(struct ivtv *itv)
}
}
if (frame != (itv->last_vsync_field & 1)) {
+ static const struct v4l2_event evtop = {
+ .type = V4L2_EVENT_VSYNC,
+ .u.vsync.field = V4L2_FIELD_TOP,
+ };
+ static const struct v4l2_event evbottom = {
+ .type = V4L2_EVENT_VSYNC,
+ .u.vsync.field = V4L2_FIELD_BOTTOM,
+ };
struct ivtv_stream *s = ivtv_get_output_stream(itv);
itv->last_vsync_field += 1;
@@ -791,10 +800,12 @@ static void ivtv_irq_vsync(struct ivtv *itv)
if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
wake_up(&itv->event_waitq);
+ if (s)
+ wake_up(&s->waitq);
}
+ if (s && s->vdev)
+ v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
wake_up(&itv->vsync_waitq);
- if (s)
- wake_up(&s->waitq);
/* Send VBI to saa7127 */
if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
@@ -852,9 +863,11 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
*/
if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
/* vsync is enabled, see if we're in a new field */
- if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
+ if ((itv->last_vsync_field & 1) !=
+ (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
/* New field, looks like we missed it */
- IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
+ IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
+ read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
vsync_force = 1;
}
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 1f9387f6ca24..de4288cc1889 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -42,6 +42,7 @@
#include "ivtv-yuv.h"
#include "ivtv-cards.h"
#include "ivtv-streams.h"
+#include <media/v4l2-event.h>
static const struct v4l2_file_operations ivtv_v4l2_enc_fops = {
.owner = THIS_MODULE,
@@ -343,7 +344,10 @@ static void ivtv_vbi_setup(struct ivtv *itv)
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0);
/* setup VBI registers */
- v4l2_subdev_call(itv->sd_video, video, s_fmt, &itv->vbi.in);
+ if (raw)
+ v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &itv->vbi.in.fmt.vbi);
+ else
+ v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, &itv->vbi.in.fmt.sliced);
/* determine number of lines and total number of VBI bytes.
A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
@@ -581,10 +585,9 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
/* Avoid unpredictable PCI bus hang - disable video clocks */
v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
- ivtv_msleep_timeout(150, 1);
+ ivtv_msleep_timeout(300, 1);
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
- ivtv_msleep_timeout(150, 1);
}
/* begin_capture */
@@ -830,6 +833,10 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
}
+ /* Raw-passthrough is implied on start. Make sure it's stopped so
+ the encoder will re-initialize when next started */
+ ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 7);
+
wake_up(&s->waitq);
return 0;
@@ -837,6 +844,9 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_EOS,
+ };
struct ivtv *itv = s->itv;
if (s->vdev == NULL)
@@ -888,6 +898,7 @@ int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
set_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags);
wake_up(&itv->event_waitq);
+ v4l2_event_queue(s->vdev, &ev);
/* wake up wait queues */
wake_up(&s->waitq);
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index f420d31b937d..e1c347e5ebd8 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -38,7 +38,7 @@ static void ivtv_set_vps(struct ivtv *itv, int enabled)
data.data[9] = itv->vbi.vps_payload.data[2];
data.data[10] = itv->vbi.vps_payload.data[3];
data.data[11] = itv->vbi.vps_payload.data[4];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
@@ -52,12 +52,12 @@ static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
data.line = (mode & 1) ? 21 : 0;
data.data[0] = cc->odd[0];
data.data[1] = cc->odd[1];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
data.field = 1;
data.line = (mode & 2) ? 21 : 0;
data.data[0] = cc->even[0];
data.data[1] = cc->even[1];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
@@ -80,7 +80,7 @@ static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
data.line = enabled ? 23 : 0;
data.data[0] = mode & 0xff;
data.data[1] = (mode >> 8) & 0xff;
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static int odd_parity(u8 c)
@@ -134,7 +134,7 @@ void ivtv_write_vbi(struct ivtv *itv, const struct v4l2_sliced_vbi_data *sliced,
}
}
}
- if (found_cc && vi->cc_payload_idx < sizeof(vi->cc_payload)) {
+ if (found_cc && vi->cc_payload_idx < ARRAY_SIZE(vi->cc_payload)) {
vi->cc_payload[vi->cc_payload_idx++] = cc;
set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
}
@@ -316,7 +316,7 @@ static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8
continue;
}
vbi.p = p + 4;
- v4l2_subdev_call(itv->sd_video, video, decode_vbi_line, &vbi);
+ v4l2_subdev_call(itv->sd_video, vbi, decode_vbi_line, &vbi);
if (vbi.type && !(lines & (1 << vbi.line))) {
lines |= 1 << vbi.line;
itv->vbi.sliced_data[line].id = vbi.type;
@@ -440,7 +440,7 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
ivtv_set_wss(itv, 1, data.data[0] & 0xf);
vi->wss_missing_cnt = 0;
} else if (vi->wss_missing_cnt == 4) {
@@ -454,13 +454,13 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 1;
cc.odd[0] = data.data[0];
cc.odd[1] = data.data[1];
}
data.field = 1;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 2;
cc.even[0] = data.data[0];
cc.even[1] = data.data[1];
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index de2ff1c6ac34..49e1a283ed36 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -460,7 +460,7 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
- trace = read_reg(0x028c0) >> 16;
+ trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
if (itv->is_50hz && trace > 312)
trace -= 312;
else if (itv->is_60hz && trace > 262)
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
new file mode 100644
index 000000000000..baf211b9f6ce
--- /dev/null
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -0,0 +1,1049 @@
+/*
+ * A virtual v4l2-mem2mem example device.
+ *
+ * This is a virtual device driver for testing mem-to-mem videobuf framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination, processes the data and issues an "irq" (simulated by a timer).
+ * The device is capable of multi-instance, multi-buffer-per-transaction
+ * operation (via the mem2mem framework).
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <p.osciak@samsung.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-vmalloc.h>
+
+#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
+
+MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
+MODULE_AUTHOR("Pawel Osciak, <p.osciak@samsung.com>");
+MODULE_LICENSE("GPL");
+
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 640
+#define MAX_H 480
+#define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "m2m-testdev"
+
+/* Per queue */
+#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
+
+/* Default transaction time in msec */
+#define MEM2MEM_DEF_TRANSTIME 1000
+/* Default number of buffers per transaction */
+#define MEM2MEM_DEF_TRANSLEN 1
+#define MEM2MEM_COLOR_STEP (0xff >> 4)
+#define MEM2MEM_NUM_TILES 8
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, 1, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+void m2mtest_dev_release(struct device *dev)
+{}
+
+static struct platform_device m2mtest_pdev = {
+ .name = MEM2MEM_NAME,
+ .dev.release = m2mtest_dev_release,
+};
+
+struct m2mtest_fmt {
+ char *name;
+ u32 fourcc;
+ int depth;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct m2mtest_fmt formats[] = {
+ {
+ .name = "RGB565 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .depth = 16,
+ /* Both capture and output format */
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+ {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ /* Output-only format */
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+
+/* Per-queue, driver-specific private data */
+struct m2mtest_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ struct m2mtest_fmt *fmt;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+/* Source and destination queue data */
+static struct m2mtest_q_data q_data[2];
+
+static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+#define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE
+#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1)
+
+static struct v4l2_queryctrl m2mtest_ctrls[] = {
+ {
+ .id = V4L2_CID_TRANS_TIME_MSEC,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Transaction time (msec)",
+ .minimum = 1,
+ .maximum = 10000,
+ .step = 100,
+ .default_value = 1000,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_TRANS_NUM_BUFS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Buffers per transaction",
+ .minimum = 1,
+ .maximum = MEM2MEM_DEF_NUM_BUFS,
+ .step = 1,
+ .default_value = 1,
+ .flags = 0,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct m2mtest_fmt *find_format(struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct m2mtest_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd;
+
+ atomic_t num_inst;
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ struct timer_list timer;
+
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct m2mtest_ctx {
+ struct m2mtest_dev *dev;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+ /* Transaction time (i.e. simulated processing time) in milliseconds */
+ u32 transtime;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ struct v4l2_m2m_ctx *m2m_ctx;
+};
+
+struct m2mtest_buffer {
+ /* vb must be first! */
+ struct videobuf_buffer vb;
+};
+
+static struct v4l2_queryctrl *get_ctrl(int id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(m2mtest_ctrls); ++i) {
+ if (id == m2mtest_ctrls[i].id)
+ return &m2mtest_ctrls[i];
+ }
+
+ return NULL;
+}
+
+static int device_process(struct m2mtest_ctx *ctx,
+ struct m2mtest_buffer *in_buf,
+ struct m2mtest_buffer *out_buf)
+{
+ struct m2mtest_dev *dev = ctx->dev;
+ u8 *p_in, *p_out;
+ int x, y, t, w;
+ int tile_w, bytes_left;
+ struct videobuf_queue *src_q;
+ struct videobuf_queue *dst_q;
+
+ src_q = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
+ p_in = videobuf_queue_to_vaddr(src_q, &in_buf->vb);
+ p_out = videobuf_queue_to_vaddr(dst_q, &out_buf->vb);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ if (in_buf->vb.size < out_buf->vb.size) {
+ v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
+ return -EINVAL;
+ }
+
+ tile_w = (in_buf->vb.width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
+ / MEM2MEM_NUM_TILES;
+ bytes_left = in_buf->vb.bytesperline - tile_w * MEM2MEM_NUM_TILES;
+ w = 0;
+
+ for (y = 0; y < in_buf->vb.height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ + MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ - MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left;
+ }
+
+ return 0;
+}
+
+static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
+{
+ dprintk(dev, "Scheduling a simulated irq\n");
+ mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen
+ || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) {
+ dprintk(ctx->dev, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void device_run(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_dev *dev = ctx->dev;
+ struct m2mtest_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ device_process(ctx, src_buf, dst_buf);
+
+ /* Run a timer, which simulates a hardware irq */
+ schedule_irq(dev, ctx->transtime);
+}
+
+
+static void device_isr(unsigned long priv)
+{
+ struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv;
+ struct m2mtest_ctx *curr_ctx;
+ struct m2mtest_buffer *src_buf, *dst_buf;
+ unsigned long flags;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
+
+ if (NULL == curr_ctx) {
+ printk(KERN_ERR
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ curr_ctx->num_processed++;
+
+ if (curr_ctx->num_processed == curr_ctx->translen
+ || curr_ctx->aborting) {
+ dprintk(curr_ctx->dev, "Finishing transaction\n");
+ curr_ctx->num_processed = 0;
+ spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ wake_up(&src_buf->vb.done);
+ wake_up(&dst_buf->vb.done);
+ spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+ v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
+ } else {
+ spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ wake_up(&src_buf->vb.done);
+ wake_up(&dst_buf->vb.done);
+ spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+ device_run(curr_ctx);
+ }
+}
+
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(0, 1, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct m2mtest_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+ struct videobuf_queue *vq;
+ struct m2mtest_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = vq->field;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+ f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
+{
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != field)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ f->fmt.pix.field = field;
+
+ if (f->fmt.pix.height < MIN_H)
+ f->fmt.pix.height = MIN_H;
+ else if (f->fmt.pix.height > MAX_H)
+ f->fmt.pix.height = MAX_H;
+
+ if (f->fmt.pix.width < MIN_W)
+ f->fmt.pix.width = MIN_W;
+ else if (f->fmt.pix.width > MAX_W)
+ f->fmt.pix.width = MAX_W;
+
+ f->fmt.pix.width &= ~DIM_ALIGN_MASK;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ struct m2mtest_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ struct m2mtest_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+ struct m2mtest_q_data *q_data;
+ struct videobuf_queue *vq;
+ int ret = 0;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ mutex_lock(&vq->vb_lock);
+
+ if (videobuf_queue_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ q_data->fmt = find_format(f);
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->sizeimage = q_data->width * q_data->height
+ * q_data->fmt->depth >> 3;
+ vq->field = f->fmt.pix.field;
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+out:
+ mutex_unlock(&vq->vb_lock);
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct v4l2_queryctrl *c;
+
+ c = get_ctrl(qc->id);
+ if (!c)
+ return -EINVAL;
+
+ *qc = *c;
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TRANS_TIME_MSEC:
+ ctrl->value = ctx->transtime;
+ break;
+
+ case V4L2_CID_TRANS_NUM_BUFS:
+ ctrl->value = ctx->translen;
+ break;
+
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_ctrl_val(struct m2mtest_ctx *ctx, struct v4l2_control *ctrl)
+{
+ struct v4l2_queryctrl *c;
+
+ c = get_ctrl(ctrl->id);
+ if (!c)
+ return -EINVAL;
+
+ if (ctrl->value < c->minimum || ctrl->value > c->maximum) {
+ v4l2_err(&ctx->dev->v4l2_dev, "Value out of range\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct m2mtest_ctx *ctx = priv;
+ int ret = 0;
+
+ ret = check_ctrl_val(ctx, ctrl);
+ if (ret != 0)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TRANS_TIME_MSEC:
+ ctx->transtime = ctrl->value;
+ break;
+
+ case V4L2_CID_TRANS_NUM_BUFS:
+ ctx->translen = ctrl->value;
+ break;
+
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static void m2mtest_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+
+ dprintk(ctx->dev, "type: %d, index: %d, state: %d\n",
+ vq->type, vb->i, vb->state);
+
+ videobuf_vmalloc_free(vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int m2mtest_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+ struct m2mtest_q_data *q_data;
+
+ q_data = get_q_data(vq->type);
+
+ *size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
+ dprintk(ctx->dev, "size:%d, w/h %d/%d, depth: %d\n",
+ *size, q_data->width, q_data->height, q_data->fmt->depth);
+
+ if (0 == *count)
+ *count = MEM2MEM_DEF_NUM_BUFS;
+
+ while (*size * *count > MEM2MEM_VID_MEM_LIMIT)
+ (*count)--;
+
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "%d buffers of size %d set up.\n", *count, *size);
+
+ return 0;
+}
+
+static int m2mtest_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+ struct m2mtest_q_data *q_data;
+ int ret;
+
+ dprintk(ctx->dev, "type: %d, index: %d, state: %d\n",
+ vq->type, vb->i, vb->state);
+
+ q_data = get_q_data(vq->type);
+
+ if (vb->baddr) {
+ /* User-provided buffer */
+ if (vb->bsize < q_data->sizeimage) {
+ /* Buffer too small to fit a frame */
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "User-provided buffer too small\n");
+ return -EINVAL;
+ }
+ } else if (vb->state != VIDEOBUF_NEEDS_INIT
+ && vb->bsize < q_data->sizeimage) {
+ /* We provide the buffer, but it's already been initialized
+ * and is too small */
+ return -EINVAL;
+ }
+
+ vb->width = q_data->width;
+ vb->height = q_data->height;
+ vb->bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ vb->size = q_data->sizeimage;
+ vb->field = field;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Iolock failed\n");
+ goto fail;
+ }
+ }
+
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+fail:
+ m2mtest_buf_release(vq, vb);
+ return ret;
+}
+
+static void m2mtest_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+}
+
+static struct videobuf_queue_ops m2mtest_qops = {
+ .buf_setup = m2mtest_buf_setup,
+ .buf_prepare = m2mtest_buf_prepare,
+ .buf_queue = m2mtest_buf_queue,
+ .buf_release = m2mtest_buf_release,
+};
+
+static void queue_init(void *priv, struct videobuf_queue *vq,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ videobuf_queue_vmalloc_init(vq, &m2mtest_qops, ctx->dev->v4l2_dev.dev,
+ &ctx->dev->irqlock, type, V4L2_FIELD_NONE,
+ sizeof(struct m2mtest_buffer), priv);
+}
+
+
+/*
+ * File operations
+ */
+static int m2mtest_open(struct file *file)
+{
+ struct m2mtest_dev *dev = video_drvdata(file);
+ struct m2mtest_ctx *ctx = NULL;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->dev = dev;
+ ctx->translen = MEM2MEM_DEF_TRANSLEN;
+ ctx->transtime = MEM2MEM_DEF_TRANSTIME;
+ ctx->num_processed = 0;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(ctx, dev->m2m_dev, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ kfree(ctx);
+ return PTR_ERR(ctx->m2m_ctx);
+ }
+
+ atomic_inc(&dev->num_inst);
+
+ dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static int m2mtest_release(struct file *file)
+{
+ struct m2mtest_dev *dev = video_drvdata(file);
+ struct m2mtest_ctx *ctx = file->private_data;
+
+ dprintk(dev, "Releasing instance %p\n", ctx);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ kfree(ctx);
+
+ atomic_dec(&dev->num_inst);
+
+ return 0;
+}
+
+static unsigned int m2mtest_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct m2mtest_ctx *ctx = (struct m2mtest_ctx *)file->private_data;
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct m2mtest_ctx *ctx = (struct m2mtest_ctx *)file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations m2mtest_fops = {
+ .owner = THIS_MODULE,
+ .open = m2mtest_open,
+ .release = m2mtest_release,
+ .poll = m2mtest_poll,
+ .ioctl = video_ioctl2,
+ .mmap = m2mtest_mmap,
+};
+
+static struct video_device m2mtest_videodev = {
+ .name = MEM2MEM_NAME,
+ .fops = &m2mtest_fops,
+ .ioctl_ops = &m2mtest_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+static int m2mtest_probe(struct platform_device *pdev)
+{
+ struct m2mtest_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->irqlock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto free_dev;
+
+ atomic_set(&dev->num_inst, 0);
+ mutex_init(&dev->dev_mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ *vfd = m2mtest_videodev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ video_set_drvdata(vfd, dev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name);
+ dev->vfd = vfd;
+ v4l2_info(&dev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ setup_timer(&dev->timer, device_isr, (long)dev);
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto err_m2m;
+ }
+
+ return 0;
+
+err_m2m:
+ video_unregister_device(dev->vfd);
+rel_vdev:
+ video_device_release(vfd);
+unreg_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+static int m2mtest_remove(struct platform_device *pdev)
+{
+ struct m2mtest_dev *dev =
+ (struct m2mtest_dev *)platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+ v4l2_m2m_release(dev->m2m_dev);
+ del_timer_sync(&dev->timer);
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct platform_driver m2mtest_pdrv = {
+ .probe = m2mtest_probe,
+ .remove = m2mtest_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static void __exit m2mtest_exit(void)
+{
+ platform_driver_unregister(&m2mtest_pdrv);
+ platform_device_unregister(&m2mtest_pdev);
+}
+
+static int __init m2mtest_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&m2mtest_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&m2mtest_pdrv);
+ if (ret)
+ platform_device_unregister(&m2mtest_pdev);
+
+ return 0;
+}
+
+module_init(m2mtest_init);
+module_exit(m2mtest_exit);
+
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 4404e5ef818f..bb9fdb782b85 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -30,9 +30,10 @@
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <linux/gfp.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -1168,22 +1169,22 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
case V4L2_CID_BRIGHTNESS:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, c->value);
- meye.picture.brightness = c->value << 10;
+ meye.brightness = c->value << 10;
break;
case V4L2_CID_HUE:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERAHUE, c->value);
- meye.picture.hue = c->value << 10;
+ meye.hue = c->value << 10;
break;
case V4L2_CID_CONTRAST:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERACONTRAST, c->value);
- meye.picture.contrast = c->value << 10;
+ meye.contrast = c->value << 10;
break;
case V4L2_CID_SATURATION:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERACOLOR, c->value);
- meye.picture.colour = c->value << 10;
+ meye.colour = c->value << 10;
break;
case V4L2_CID_AGC:
sony_pic_camera_command(
@@ -1221,16 +1222,16 @@ static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
mutex_lock(&meye.lock);
switch (c->id) {
case V4L2_CID_BRIGHTNESS:
- c->value = meye.picture.brightness >> 10;
+ c->value = meye.brightness >> 10;
break;
case V4L2_CID_HUE:
- c->value = meye.picture.hue >> 10;
+ c->value = meye.hue >> 10;
break;
case V4L2_CID_CONTRAST:
- c->value = meye.picture.contrast >> 10;
+ c->value = meye.contrast >> 10;
break;
case V4L2_CID_SATURATION:
- c->value = meye.picture.colour >> 10;
+ c->value = meye.colour >> 10;
break;
case V4L2_CID_AGC:
c->value = meye.params.agc;
@@ -1729,6 +1730,7 @@ static int meye_resume(struct pci_dev *pdev)
static int __devinit meye_probe(struct pci_dev *pcidev,
const struct pci_device_id *ent)
{
+ struct v4l2_device *v4l2_dev = &meye.v4l2_dev;
int ret = -EBUSY;
unsigned long mchip_adr;
@@ -1737,70 +1739,75 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
goto outnotdev;
}
+ ret = v4l2_device_register(&pcidev->dev, v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return ret;
+ }
ret = -ENOMEM;
meye.mchip_dev = pcidev;
- meye.video_dev = video_device_alloc();
- if (!meye.video_dev) {
- printk(KERN_ERR "meye: video_device_alloc() failed!\n");
+ meye.vdev = video_device_alloc();
+ if (!meye.vdev) {
+ v4l2_err(v4l2_dev, "video_device_alloc() failed!\n");
goto outnotdev;
}
meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
if (!meye.grab_temp) {
- printk(KERN_ERR "meye: grab buffer allocation failed\n");
+ v4l2_err(v4l2_dev, "grab buffer allocation failed\n");
goto outvmalloc;
}
spin_lock_init(&meye.grabq_lock);
if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
GFP_KERNEL)) {
- printk(KERN_ERR "meye: fifo allocation failed\n");
+ v4l2_err(v4l2_dev, "fifo allocation failed\n");
goto outkfifoalloc1;
}
spin_lock_init(&meye.doneq_lock);
if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
GFP_KERNEL)) {
- printk(KERN_ERR "meye: fifo allocation failed\n");
+ v4l2_err(v4l2_dev, "fifo allocation failed\n");
goto outkfifoalloc2;
}
- memcpy(meye.video_dev, &meye_template, sizeof(meye_template));
- meye.video_dev->parent = &meye.mchip_dev->dev;
+ memcpy(meye.vdev, &meye_template, sizeof(meye_template));
+ meye.vdev->v4l2_dev = &meye.v4l2_dev;
ret = -EIO;
if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
- printk(KERN_ERR "meye: unable to power on the camera\n");
- printk(KERN_ERR "meye: did you enable the camera in "
+ v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
+ v4l2_err(v4l2_dev, "meye: did you enable the camera in "
"sonypi using the module options ?\n");
goto outsonypienable;
}
if ((ret = pci_enable_device(meye.mchip_dev))) {
- printk(KERN_ERR "meye: pci_enable_device failed\n");
+ v4l2_err(v4l2_dev, "meye: pci_enable_device failed\n");
goto outenabledev;
}
mchip_adr = pci_resource_start(meye.mchip_dev,0);
if (!mchip_adr) {
- printk(KERN_ERR "meye: mchip has no device base address\n");
+ v4l2_err(v4l2_dev, "meye: mchip has no device base address\n");
goto outregions;
}
if (!request_mem_region(pci_resource_start(meye.mchip_dev, 0),
pci_resource_len(meye.mchip_dev, 0),
"meye")) {
- printk(KERN_ERR "meye: request_mem_region failed\n");
+ v4l2_err(v4l2_dev, "meye: request_mem_region failed\n");
goto outregions;
}
meye.mchip_mmregs = ioremap(mchip_adr, MCHIP_MM_REGS);
if (!meye.mchip_mmregs) {
- printk(KERN_ERR "meye: ioremap failed\n");
+ v4l2_err(v4l2_dev, "meye: ioremap failed\n");
goto outremap;
}
meye.mchip_irq = pcidev->irq;
if (request_irq(meye.mchip_irq, meye_irq,
IRQF_DISABLED | IRQF_SHARED, "meye", meye_irq)) {
- printk(KERN_ERR "meye: request_irq failed\n");
+ v4l2_err(v4l2_dev, "request_irq failed\n");
goto outreqirq;
}
@@ -1824,21 +1831,18 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
msleep(1);
mchip_set(MCHIP_MM_INTA, MCHIP_MM_INTA_HIC_1_MASK);
- if (video_register_device(meye.video_dev, VFL_TYPE_GRABBER,
+ if (video_register_device(meye.vdev, VFL_TYPE_GRABBER,
video_nr) < 0) {
- printk(KERN_ERR "meye: video_register_device failed\n");
+ v4l2_err(v4l2_dev, "video_register_device failed\n");
goto outvideoreg;
}
mutex_init(&meye.lock);
init_waitqueue_head(&meye.proc_list);
- meye.picture.depth = 16;
- meye.picture.palette = VIDEO_PALETTE_YUV422;
- meye.picture.brightness = 32 << 10;
- meye.picture.hue = 32 << 10;
- meye.picture.colour = 32 << 10;
- meye.picture.contrast = 32 << 10;
- meye.picture.whiteness = 0;
+ meye.brightness = 32 << 10;
+ meye.hue = 32 << 10;
+ meye.colour = 32 << 10;
+ meye.contrast = 32 << 10;
meye.params.subsample = 0;
meye.params.quality = 8;
meye.params.sharpness = 32;
@@ -1854,9 +1858,9 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAPICTURE, 0);
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAAGC, 48);
- printk(KERN_INFO "meye: Motion Eye Camera Driver v%s.\n",
+ v4l2_info(v4l2_dev, "Motion Eye Camera Driver v%s.\n",
MEYE_DRIVER_VERSION);
- printk(KERN_INFO "meye: mchip KL5A72002 rev. %d, base %lx, irq %d\n",
+ v4l2_info(v4l2_dev, "mchip KL5A72002 rev. %d, base %lx, irq %d\n",
meye.mchip_dev->revision, mchip_adr, meye.mchip_irq);
return 0;
@@ -1879,14 +1883,14 @@ outkfifoalloc2:
outkfifoalloc1:
vfree(meye.grab_temp);
outvmalloc:
- video_device_release(meye.video_dev);
+ video_device_release(meye.vdev);
outnotdev:
return ret;
}
static void __devexit meye_remove(struct pci_dev *pcidev)
{
- video_unregister_device(meye.video_dev);
+ video_unregister_device(meye.vdev);
mchip_hic_stop();
diff --git a/drivers/media/video/meye.h b/drivers/media/video/meye.h
index 1321ad5d6597..4bdeb03f1644 100644
--- a/drivers/media/video/meye.h
+++ b/drivers/media/video/meye.h
@@ -31,7 +31,7 @@
#define _MEYE_PRIV_H_
#define MEYE_DRIVER_MAJORVERSION 1
-#define MEYE_DRIVER_MINORVERSION 13
+#define MEYE_DRIVER_MINORVERSION 14
#define MEYE_DRIVER_VERSION __stringify(MEYE_DRIVER_MAJORVERSION) "." \
__stringify(MEYE_DRIVER_MINORVERSION)
@@ -289,6 +289,7 @@ struct meye_grab_buffer {
/* Motion Eye device structure */
struct meye {
+ struct v4l2_device v4l2_dev; /* Main v4l2_device struct */
struct pci_dev *mchip_dev; /* pci device */
u8 mchip_irq; /* irq */
u8 mchip_mode; /* actual mchip mode: HIC_MODE... */
@@ -308,8 +309,11 @@ struct meye {
struct kfifo doneq; /* queue for grabbed buffers */
spinlock_t doneq_lock; /* lock protecting the queue */
wait_queue_head_t proc_list; /* wait queue */
- struct video_device *video_dev; /* video device parameters */
- struct video_picture picture; /* video picture parameters */
+ struct video_device *vdev; /* video device parameters */
+ u16 brightness;
+ u16 hue;
+ u16 contrast;
+ u16 colour;
struct meye_params params; /* additional parameters */
unsigned long in_use; /* set to 1 if the device is in use */
#ifdef CONFIG_PM
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index a9061bff79b2..78b4e091d2d5 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -8,14 +8,16 @@
* published by the Free Software Foundation.
*/
-#include <linux/videodev2.h>
-#include <linux/slab.h>
+#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-subdev.h>
/*
* mt9t031 i2c address 0x5d
@@ -681,12 +683,66 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
}
/*
+ * Power Management:
+ * This function does nothing for now but must be present for pm to work
+ */
+static int mt9t031_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+/*
+ * Power Management:
+ * COLUMN_ADDRESS_MODE and ROW_ADDRESS_MODE are not rewritten if unchanged
+ * they are however changed at reset if the platform hook is present
+ * thus we rewrite them with the values stored by the driver
+ */
+static int mt9t031_runtime_resume(struct device *dev)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct soc_camera_device *icd = container_of(vdev->parent,
+ struct soc_camera_device, dev);
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct i2c_client *client = sd->priv;
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ int ret;
+ u16 xbin, ybin;
+
+ xbin = min(mt9t031->xskip, (u16)3);
+ ybin = min(mt9t031->yskip, (u16)3);
+
+ ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE,
+ ((xbin - 1) << 4) | (mt9t031->xskip - 1));
+ if (ret < 0)
+ return ret;
+
+ ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE,
+ ((ybin - 1) << 4) | (mt9t031->yskip - 1));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct dev_pm_ops mt9t031_dev_pm_ops = {
+ .runtime_suspend = mt9t031_runtime_suspend,
+ .runtime_resume = mt9t031_runtime_resume,
+};
+
+static struct device_type mt9t031_dev_type = {
+ .name = "MT9T031",
+ .pm = &mt9t031_dev_pm_ops,
+};
+
+/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
static int mt9t031_video_probe(struct i2c_client *client)
{
struct mt9t031 *mt9t031 = to_mt9t031(client);
+ struct video_device *vdev = soc_camera_i2c_to_vdev(client);
s32 data;
int ret;
@@ -712,6 +768,8 @@ static int mt9t031_video_probe(struct i2c_client *client)
ret = mt9t031_idle(client);
if (ret < 0)
dev_err(&client->dev, "Failed to initialise the camera\n");
+ else
+ vdev->dev.type = &mt9t031_dev_type;
/* mt9t031_idle() has reset the chip to default. */
mt9t031->exposure = 255;
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 1a34d2993e94..e5bae4c9393b 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -325,7 +325,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
if (ret < 0)
return ret;
- dev_dbg(&client->dev, "Frame %ux%u pixel\n", rect.width, rect.height);
+ dev_dbg(&client->dev, "Frame %dx%d pixel\n", rect.width, rect.height);
mt9v022->rect = rect;
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 34a66019190e..5c17f9ec3d7c 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -139,8 +139,8 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (!*count)
*count = 32;
- while (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
- (*count)--;
+ if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index bd297f567dc7..d477e3058002 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -796,7 +796,7 @@ static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam)
* FIXME: learn to use stride != width, then we can keep stride properly aligned
* and support arbitrary (even) widths.
*/
-static inline void stride_align(__s32 *width)
+static inline void stride_align(__u32 *width)
{
if (((*width + 7) & ~7) < 4096)
*width = (*width + 7) & ~7;
@@ -844,7 +844,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
* So far only direct camera-to-memory is supported
*/
if (channel_change_requested(icd, rect)) {
- int ret = acquire_dma_channel(mx3_cam);
+ ret = acquire_dma_channel(mx3_cam);
if (ret < 0)
return ret;
}
diff --git a/drivers/media/video/omap/Kconfig b/drivers/media/video/omap/Kconfig
new file mode 100644
index 000000000000..97c53949ca89
--- /dev/null
+++ b/drivers/media/video/omap/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_OMAP2_VOUT
+ tristate "OMAP2/OMAP3 V4L2-Display driver"
+ depends on ARCH_OMAP24XX || ARCH_OMAP34XX
+ select VIDEOBUF_GEN
+ select VIDEOBUF_DMA_SG
+ select OMAP2_DSS
+ select OMAP2_VRAM
+ select OMAP2_VRFB
+ default n
+ ---help---
+ V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/video/omap/Makefile b/drivers/media/video/omap/Makefile
new file mode 100644
index 000000000000..b8bab00ad010
--- /dev/null
+++ b/drivers/media/video/omap/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the omap video device drivers.
+#
+
+# OMAP2/3 Display driver
+omap-vout-mod-objs := omap_vout.o omap_voutlib.o
+obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout-mod.o
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
new file mode 100644
index 000000000000..4c0ab499228b
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout.c
@@ -0,0 +1,2643 @@
+/*
+ * omap_vout.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Leveraged code from the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * History:
+ * 20-APR-2006 Khasim Modified VRFB based Rotation,
+ * The image data is always read from 0 degree
+ * view and written
+ * to the virtual space of desired rotation angle
+ * 4-DEC-2006 Jian Changed to support better memory management
+ *
+ * 17-Nov-2008 Hardik Changed driver to use video_ioctl2
+ *
+ * 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/videodev2.h>
+
+#include <media/videobuf-dma-sg.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include <plat/dma.h>
+#include <plat/vram.h>
+#include <plat/vrfb.h>
+#include <plat/display.h>
+
+#include "omap_voutlib.h"
+#include "omap_voutdef.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
+MODULE_LICENSE("GPL");
+
+
+/* Driver Configuration macros */
+#define VOUT_NAME "omap_vout"
+
+enum omap_vout_channels {
+ OMAP_VIDEO1,
+ OMAP_VIDEO2,
+};
+
+enum dma_channel_state {
+ DMA_CHAN_NOT_ALLOTED,
+ DMA_CHAN_ALLOTED,
+};
+
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+/* Max Resolution supported by the driver */
+#define VID_MAX_WIDTH 1280 /* Largest width */
+#define VID_MAX_HEIGHT 720 /* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH 2
+#define VID_MIN_HEIGHT 2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define MAX_PIXELS_PER_LINE 2048
+
+#define VRFB_TX_TIMEOUT 1000
+#define VRFB_NUM_BUFS 4
+
+/* Max buffer size tobe allocated during init */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+static struct videobuf_queue_ops video_vbq_ops;
+/* Variables configurable through module params*/
+static u32 video1_numbuffers = 3;
+static u32 video2_numbuffers = 3;
+static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 vid1_static_vrfb_alloc;
+static u32 vid2_static_vrfb_alloc;
+static int debug;
+
+/* Module parameters */
+module_param(video1_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_numbuffers,
+ "Number of buffers to be allocated at init time for Video1 device.");
+
+module_param(video2_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_numbuffers,
+ "Number of buffers to be allocated at init time for Video2 device.");
+
+module_param(video1_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_bufsize,
+ "Size of the buffer to be allocated for video1 device");
+
+module_param(video2_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_bufsize,
+ "Size of the buffer to be allocated for video2 device");
+
+module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid1_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video1 device");
+
+module_param(vid2_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid2_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video2 device");
+
+module_param(debug, bool, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+/* list of image formats supported by OMAP2 video pipelines */
+const static struct v4l2_fmtdesc omap_formats[] = {
+ {
+ /* Note: V4L2 defines RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
+ *
+ * We interpret RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
+ */
+ .description = "RGB565, le",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ /* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use
+ * this for RGB24 unpack mode, the last 8 bits are ignored
+ * */
+ .description = "RGB32, le",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ },
+ {
+ /* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use
+ * this for RGB24 packed mode
+ *
+ */
+ .description = "RGB24, le",
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ },
+ {
+ .description = "YUYV (YUV 4:2:2), packed",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .description = "UYVY, packed",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+};
+
+#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
+
+/*
+ * Allocate buffers
+ */
+static unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+ u32 order, size;
+ unsigned long virt_addr, addr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+ virt_addr = __get_free_pages(GFP_KERNEL | GFP_DMA, order);
+ addr = virt_addr;
+
+ if (virt_addr) {
+ while (size > 0) {
+ SetPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+ *phys_addr = (u32) virt_to_phys((void *) virt_addr);
+ return virt_addr;
+}
+
+/*
+ * Free buffers
+ */
+static void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
+{
+ u32 order, size;
+ unsigned long addr = virtaddr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages((unsigned long) virtaddr, order);
+}
+
+/*
+ * Function for allocating video buffers
+ */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+ unsigned int *count, int startindex)
+{
+ int i, j;
+
+ for (i = 0; i < *count; i++) {
+ if (!vout->smsshado_virt_addr[i]) {
+ vout->smsshado_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->smsshado_size,
+ &vout->smsshado_phy_addr[i]);
+ }
+ if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+ if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+ break;
+ }
+ if (!vout->smsshado_virt_addr[i]) {
+ for (j = 0; j < i; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ *count = 0;
+ return -ENOMEM;
+ }
+ memset((void *) vout->smsshado_virt_addr[i], 0,
+ vout->smsshado_size);
+ }
+ return 0;
+}
+
+/*
+ * Try format
+ */
+static int omap_vout_try_format(struct v4l2_pix_format *pix)
+{
+ int ifmt, bpp = 0;
+
+ pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT,
+ (u32)VID_MAX_HEIGHT);
+ pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
+
+ for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
+ if (pix->pixelformat == omap_formats[ifmt].pixelformat)
+ break;
+ }
+
+ if (ifmt == NUM_OUTPUT_FORMATS)
+ ifmt = 0;
+
+ pix->pixelformat = omap_formats[ifmt].pixelformat;
+ pix->field = V4L2_FIELD_ANY;
+ pix->priv = 0;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ default:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = YUYV_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB565_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB24_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB32_BPP;
+ break;
+ }
+ pix->bytesperline = pix->width * bpp;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ return bpp;
+}
+
+/*
+ * omap_vout_uservirt_to_phys: This inline function is used to convert user
+ * space virtual address to physical address.
+ */
+static u32 omap_vout_uservirt_to_phys(u32 virtp)
+{
+ unsigned long physp = 0;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+
+ vma = find_vma(mm, virtp);
+ /* For kernel direct-mapped memory, take the easy way */
+ if (virtp >= PAGE_OFFSET) {
+ physp = virt_to_phys((void *) virtp);
+ } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ /* this will catch, kernel-allocated, mmaped-to-usermode
+ addresses */
+ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
+ } else {
+ /* otherwise, use get_user_pages() for general userland pages */
+ int res, nr_pages = 1;
+ struct page *pages;
+ down_read(&current->mm->mmap_sem);
+
+ res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
+ 0, &pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (res == nr_pages) {
+ physp = __pa(page_address(&pages[0]) +
+ (virtp & ~PAGE_MASK));
+ } else {
+ printk(KERN_WARNING VOUT_NAME
+ "get_user_pages failed\n");
+ return 0;
+ }
+ }
+
+ return physp;
+}
+
+/*
+ * Wakes up the application once the DMA transfer to VRFB space is completed.
+ */
+static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+ t->tx_status = 1;
+ wake_up_interruptible(&t->wait);
+}
+
+/*
+ * Release the VRFB context once the module exits
+ */
+static void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+ int i;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ }
+}
+
+/*
+ * Return true if rotation is 90 or 270
+ */
+static inline int rotate_90_or_270(const struct omap_vout_device *vout)
+{
+ return (vout->rotation == dss_rotation_90_degree ||
+ vout->rotation == dss_rotation_270_degree);
+}
+
+/*
+ * Return true if rotation is enabled
+ */
+static inline int rotation_enabled(const struct omap_vout_device *vout)
+{
+ return vout->rotation || vout->mirror;
+}
+
+/*
+ * Reverse the rotation degree if mirroring is enabled
+ */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+ if (!vout->mirror)
+ return vout->rotation;
+
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ return dss_rotation_270_degree;
+ case dss_rotation_270_degree:
+ return dss_rotation_90_degree;
+ case dss_rotation_180_degree:
+ return dss_rotation_0_degree;
+ default:
+ return dss_rotation_180_degree;
+ }
+}
+
+/*
+ * Free the V4L2 buffers
+ */
+static void omap_vout_free_buffers(struct omap_vout_device *vout)
+{
+ int i, numbuffers;
+
+ /* Allocate memory for the buffers */
+ numbuffers = (vout->vid) ? video2_numbuffers : video1_numbuffers;
+ vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize;
+
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_phy_addr[i] = 0;
+ vout->buf_virt_addr[i] = 0;
+ }
+}
+
+/*
+ * Free VRFB buffers
+ */
+static void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+ int j;
+
+ for (j = 0; j < VRFB_NUM_BUFS; j++) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+}
+
+/*
+ * Allocate the buffers for the VRFB space. Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.
+ */
+static int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+{
+ int i;
+ bool yuv_mode;
+
+ /* Allocate the VRFB buffers only if the buffers are not
+ * allocated during init time.
+ */
+ if ((rotation_enabled(vout)) && !vout->vrfb_static_allocation)
+ if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+ return -ENOMEM;
+
+ if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+ vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+ yuv_mode = true;
+ else
+ yuv_mode = false;
+
+ for (i = 0; i < *count; i++)
+ omap_vrfb_setup(&vout->vrfb_context[i],
+ vout->smsshado_phy_addr[i], vout->pix.width,
+ vout->pix.height, vout->bpp, yuv_mode);
+
+ return 0;
+}
+
+/*
+ * Convert V4L2 rotation to DSS rotation
+ * V4L2 understand 0, 90, 180, 270.
+ * Convert to 0, 1, 2 and 3 repsectively for DSS
+ */
+static int v4l2_rot_to_dss_rot(int v4l2_rotation,
+ enum dss_rotation *rotation, bool mirror)
+{
+ int ret = 0;
+
+ switch (v4l2_rotation) {
+ case 90:
+ *rotation = dss_rotation_90_degree;
+ break;
+ case 180:
+ *rotation = dss_rotation_180_degree;
+ break;
+ case 270:
+ *rotation = dss_rotation_270_degree;
+ break;
+ case 0:
+ *rotation = dss_rotation_0_degree;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+static int omap_vout_calculate_offset(struct omap_vout_device *vout)
+{
+ struct omap_overlay *ovl;
+ enum dss_rotation rotation;
+ struct omapvideo_info *ovid;
+ bool mirroring = vout->mirror;
+ struct omap_dss_device *cur_display;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int vr_ps = 1, ps = 2, temp_ps = 2;
+ int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device)
+ return -1;
+
+ cur_display = ovl->manager->device;
+ rotation = calc_rotation(vout);
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+ if (rotation_enabled(vout)) {
+ /*
+ * ps - Actual pixel size for YUYV/UYVY for
+ * VRFB/Mirroring is 4 bytes
+ * vr_ps - Virtually pixel size for YUYV/UYVY is
+ * 2 bytes
+ */
+ ps = 4;
+ vr_ps = 2;
+ } else {
+ ps = 2; /* otherwise the pixel size is 2 byte */
+ }
+ } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+ ps = 4;
+ } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+ ps = 3;
+ }
+ vout->ps = ps;
+ vout->vr_ps = vr_ps;
+
+ if (rotation_enabled(vout)) {
+ line_length = MAX_PIXELS_PER_LINE;
+ ctop = (pix->height - crop->height) - crop->top;
+ cleft = (pix->width - crop->width) - crop->left;
+ } else {
+ line_length = pix->width;
+ }
+ vout->line_length = line_length;
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ offset = vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * cleft + crop->top * temp_ps;
+ } else {
+ *cropped_offset = offset + line_length * temp_ps *
+ cleft + crop->top * temp_ps + (line_length *
+ ((crop->width / (vr_ps)) - 1) * ps);
+ }
+ break;
+ case dss_rotation_180_degree:
+ offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp) +
+ (vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp));
+ if (mirroring == 0) {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps;
+
+ } else {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps + (line_length *
+ (crop->height - 1) * ps);
+ }
+ break;
+ case dss_rotation_270_degree:
+ offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps;
+ } else {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps +
+ (line_length * ((crop->width / vr_ps) - 1) *
+ ps);
+ }
+ break;
+ case dss_rotation_0_degree:
+ if (mirroring == 0) {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps;
+ } else {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps +
+ (line_length * (crop->height - 1) * ps);
+ }
+ break;
+ default:
+ *cropped_offset = (line_length * ps * crop->top) /
+ vr_ps + (crop->left * ps) / vr_ps +
+ ((crop->width / vr_ps) - 1) * ps;
+ break;
+ }
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
+ __func__, *cropped_offset);
+ return 0;
+}
+
+/*
+ * Convert V4L2 pixel format to DSS pixel format
+ */
+static int video_mode_to_dss_mode(struct omap_vout_device *vout)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct v4l2_pix_format *pix = &vout->pix;
+ enum omap_color_mode mode;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ switch (pix->pixelformat) {
+ case 0:
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ mode = OMAP_DSS_COLOR_YUV2;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mode = OMAP_DSS_COLOR_UYVY;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ mode = OMAP_DSS_COLOR_RGB16;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ mode = OMAP_DSS_COLOR_RGB24P;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ mode = (ovl->id == OMAP_DSS_VIDEO1) ?
+ OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ mode = OMAP_DSS_COLOR_RGBX32;
+ break;
+ default:
+ mode = -EINVAL;
+ }
+ return mode;
+}
+
+/*
+ * Setup the overlay
+ */
+int omapvid_setup_overlay(struct omap_vout_device *vout,
+ struct omap_overlay *ovl, int posx, int posy, int outw,
+ int outh, u32 addr)
+{
+ int ret = 0;
+ struct omap_overlay_info info;
+ int cropheight, cropwidth, pixheight, pixwidth;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
+ (outw != vout->pix.width || outh != vout->pix.height)) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ vout->dss_mode = video_mode_to_dss_mode(vout);
+ if (vout->dss_mode == -EINVAL) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ /* Setup the input plane parameters according to
+ * rotation value selected.
+ */
+ if (rotate_90_or_270(vout)) {
+ cropheight = vout->crop.width;
+ cropwidth = vout->crop.height;
+ pixheight = vout->pix.width;
+ pixwidth = vout->pix.height;
+ } else {
+ cropheight = vout->crop.height;
+ cropwidth = vout->crop.width;
+ pixheight = vout->pix.height;
+ pixwidth = vout->pix.width;
+ }
+
+ ovl->get_overlay_info(ovl, &info);
+ info.paddr = addr;
+ info.vaddr = NULL;
+ info.width = cropwidth;
+ info.height = cropheight;
+ info.color_mode = vout->dss_mode;
+ info.mirror = vout->mirror;
+ info.pos_x = posx;
+ info.pos_y = posy;
+ info.out_width = outw;
+ info.out_height = outh;
+ info.global_alpha = vout->win.global_alpha;
+ if (!rotation_enabled(vout)) {
+ info.rotation = 0;
+ info.rotation_type = OMAP_DSS_ROT_DMA;
+ info.screen_width = pixwidth;
+ } else {
+ info.rotation = vout->rotation;
+ info.rotation_type = OMAP_DSS_ROT_VRFB;
+ info.screen_width = 2048;
+ }
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
+ "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
+ "out_height=%d rotation_type=%d screen_width=%d\n",
+ __func__, info.enabled, info.paddr, info.width, info.height,
+ info.color_mode, info.rotation, info.mirror, info.pos_x,
+ info.pos_y, info.out_width, info.out_height, info.rotation_type,
+ info.screen_width);
+
+ ret = ovl->set_overlay_info(ovl, &info);
+ if (ret)
+ goto setup_ovl_err;
+
+ return 0;
+
+setup_ovl_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n");
+ return ret;
+}
+
+/*
+ * Initialize the overlay structure
+ */
+int omapvid_init(struct omap_vout_device *vout, u32 addr)
+{
+ int ret = 0, i;
+ struct v4l2_window *win;
+ struct omap_overlay *ovl;
+ int posx, posy, outw, outh, temp;
+ struct omap_video_timings *timing;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ win = &vout->win;
+ for (i = 0; i < ovid->num_overlays; i++) {
+ ovl = ovid->overlays[i];
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+
+ timing = &ovl->manager->device->panel.timings;
+
+ outw = win->w.width;
+ outh = win->w.height;
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ /* Invert the height and width for 90
+ * and 270 degree rotation
+ */
+ temp = outw;
+ outw = outh;
+ outh = temp;
+ posy = (timing->y_res - win->w.width) - win->w.left;
+ posx = win->w.top;
+ break;
+
+ case dss_rotation_180_degree:
+ posx = (timing->x_res - win->w.width) - win->w.left;
+ posy = (timing->y_res - win->w.height) - win->w.top;
+ break;
+
+ case dss_rotation_270_degree:
+ temp = outw;
+ outw = outh;
+ outh = temp;
+ posy = win->w.left;
+ posx = (timing->x_res - win->w.height) - win->w.top;
+ break;
+
+ default:
+ posx = win->w.left;
+ posy = win->w.top;
+ break;
+ }
+
+ ret = omapvid_setup_overlay(vout, ovl, posx, posy,
+ outw, outh, addr);
+ if (ret)
+ goto omapvid_init_err;
+ }
+ return 0;
+
+omapvid_init_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n");
+ return ret;
+}
+
+/*
+ * Apply the changes set the go bit of DSS
+ */
+int omapvid_apply_changes(struct omap_vout_device *vout)
+{
+ int i;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ for (i = 0; i < ovid->num_overlays; i++) {
+ ovl = ovid->overlays[i];
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+ ovl->manager->apply(ovl->manager);
+ }
+
+ return 0;
+}
+
+void omap_vout_isr(void *arg, unsigned int irqstatus)
+{
+ int ret;
+ u32 addr, fid;
+ struct omap_overlay *ovl;
+ struct timeval timevalue;
+ struct omapvideo_info *ovid;
+ struct omap_dss_device *cur_display;
+ struct omap_vout_device *vout = (struct omap_vout_device *)arg;
+
+ if (!vout->streaming)
+ return;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device)
+ return;
+
+ cur_display = ovl->manager->device;
+
+ spin_lock(&vout->vbq_lock);
+ do_gettimeofday(&timevalue);
+ if (cur_display->type == OMAP_DISPLAY_TYPE_DPI) {
+ if (!(irqstatus & DISPC_IRQ_VSYNC))
+ goto vout_isr_err;
+
+ if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ }
+ vout->first_int = 0;
+ if (list_empty(&vout->dma_queue))
+ goto vout_isr_err;
+
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+
+ addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+ + vout->cropped_offset;
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+ } else {
+
+ if (vout->first_int) {
+ vout->first_int = 0;
+ goto vout_isr_err;
+ }
+ if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
+ fid = 1;
+ else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
+ fid = 0;
+ else
+ goto vout_isr_err;
+
+ vout->field_id ^= 1;
+ if (fid != vout->field_id) {
+ if (0 == fid)
+ vout->field_id = fid;
+
+ goto vout_isr_err;
+ }
+ if (0 == fid) {
+ if (vout->cur_frm == vout->next_frm)
+ goto vout_isr_err;
+
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ } else if (1 == fid) {
+ if (list_empty(&vout->dma_queue) ||
+ (vout->cur_frm != vout->next_frm))
+ goto vout_isr_err;
+
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = (unsigned long)
+ vout->queued_buf_addr[vout->next_frm->i] +
+ vout->cropped_offset;
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to change mode\n");
+ }
+
+ }
+
+vout_isr_err:
+ spin_unlock(&vout->vbq_lock);
+}
+
+
+/* Video buffer call backs */
+
+/*
+ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
+ * called. This is used to setup buffers and return size and count of
+ * buffers allocated. After the call to this buffer, videobuf layer will
+ * setup buffer queue depending on the size and count of buffers
+ */
+static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
+ unsigned int *size)
+{
+ int startindex = 0, i, j;
+ u32 phy_addr = 0, virt_addr = 0;
+ struct omap_vout_device *vout = q->priv_data;
+
+ if (!vout)
+ return -EINVAL;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
+ return -EINVAL;
+
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
+ *count = startindex;
+
+ if ((rotation_enabled(vout)) && *count > VRFB_NUM_BUFS)
+ *count = VRFB_NUM_BUFS;
+
+ /* If rotation is enabled, allocate memory for VRFB space also */
+ if (rotation_enabled(vout))
+ if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
+ return -ENOMEM;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return 0;
+
+ /* Now allocated the V4L2 buffers */
+ *size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp);
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = startindex; i < *count; i++) {
+ vout->buffer_size = *size;
+
+ virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
+ &phy_addr);
+ if (!virt_addr) {
+ if (!rotation_enabled(vout))
+ break;
+ /* Free the VRFB buffers if no space for V4L2 buffers */
+ for (j = i; j < *count; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ }
+ vout->buf_virt_addr[i] = virt_addr;
+ vout->buf_phy_addr[i] = phy_addr;
+ }
+ *count = vout->buffer_allocated = i;
+
+ return 0;
+}
+
+/*
+ * Free the V4L2 buffers additionally allocated than default
+ * number of buffers and free all the VRFB buffers
+ */
+static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
+{
+ int num_buffers = 0, i;
+
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ if (vout->buf_virt_addr[i])
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ /* Free the VRFB buffers only if they are allocated
+ * during reqbufs. Don't free if init time allocated
+ */
+ if (!vout->vrfb_static_allocation) {
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (vout->smsshado_virt_addr[i]) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[i],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[i] = 0;
+ vout->smsshado_phy_addr[i] = 0;
+ }
+ }
+ }
+ vout->buffer_allocated = num_buffers;
+}
+
+/*
+ * This function will be called when VIDIOC_QBUF ioctl is called.
+ * It prepare buffers before give out for the display. This function
+ * converts user space virtual address into physical address if userptr memory
+ * exchange mechanism is used. If rotation is enabled, it copies entire
+ * buffer into VRFB memory space before giving it to the DSS.
+ */
+static int omap_vout_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct vid_vrfb_dma *tx;
+ enum dss_rotation rotation;
+ struct videobuf_dmabuf *dmabuf = NULL;
+ struct omap_vout_device *vout = q->priv_data;
+ u32 dest_frame_index = 0, src_element_index = 0;
+ u32 dest_element_index = 0, src_frame_index = 0;
+ u32 elem_count = 0, frame_count = 0, pixsize = 2;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vout->pix.width;
+ vb->height = vout->pix.height;
+ vb->size = vb->width * vb->height * vout->bpp;
+ vb->field = field;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ /* if user pointer memory mechanism is used, get the physical
+ * address of the buffer
+ */
+ if (V4L2_MEMORY_USERPTR == vb->memory) {
+ if (0 == vb->baddr)
+ return -EINVAL;
+ /* Virtual address */
+ /* priv points to struct videobuf_pci_sg_memory. But we went
+ * pointer to videobuf_dmabuf, which is member of
+ * videobuf_pci_sg_memory */
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ dmabuf->vmalloc = (void *) vb->baddr;
+
+ /* Physical address */
+ dmabuf->bus_addr =
+ (dma_addr_t) omap_vout_uservirt_to_phys(vb->baddr);
+ }
+
+ if (!rotation_enabled(vout)) {
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ vout->queued_buf_addr[vb->i] = (u8 *) dmabuf->bus_addr;
+ return 0;
+ }
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ /* If rotation is enabled, copy input buffer into VRFB
+ * memory space using DMA. We are copying input buffer
+ * into VRFB memory space of desired angle and DSS will
+ * read image VRFB memory for 0 degree angle
+ */
+ pixsize = vout->bpp * vout->vrfb_bpp;
+ /*
+ * DMA transfer in double index mode
+ */
+
+ /* Frame index */
+ dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ /* Source and destination parameters */
+ src_element_index = 0;
+ src_frame_index = 0;
+ dest_element_index = 1;
+ /* Number of elements per frame */
+ elem_count = vout->pix.width * vout->bpp;
+ frame_count = vout->pix.height;
+ tx = &vout->vrfb_dma_tx;
+ tx->tx_status = 0;
+ omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
+ tx->dev_id, 0x0);
+ /* src_port required only for OMAP1 */
+ omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dmabuf->bus_addr, src_element_index, src_frame_index);
+ /*set dma source burst mode for VRFB */
+ omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ rotation = calc_rotation(vout);
+
+ /* dest_port required only for OMAP1 */
+ omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
+ vout->vrfb_context[vb->i].paddr[0], dest_element_index,
+ dest_frame_index);
+ /*set dma dest burst mode for VRFB */
+ omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+
+ omap_start_dma(tx->dma_ch);
+ interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
+
+ if (tx->tx_status == 0) {
+ omap_stop_dma(tx->dma_ch);
+ return -EINVAL;
+ }
+ /* Store buffers physical address into an array. Addresses
+ * from this array will be used to configure DSS */
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ vout->vrfb_context[vb->i].paddr[rotation];
+ return 0;
+}
+
+/*
+ * Buffer queue funtion will be called from the videobuf layer when _QBUF
+ * ioctl is called. It is used to enqueue buffer, which is ready to be
+ * displayed.
+ */
+static void omap_vout_buffer_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ /* Driver is also maintainig a queue. So enqueue buffer in the driver
+ * queue */
+ list_add_tail(&vb->queue, &vout->dma_queue);
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * Buffer release function is called from videobuf layer to release buffer
+ * which are already allocated
+ */
+static void omap_vout_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return;
+}
+
+/*
+ * File operations
+ */
+static void omap_vout_vm_open(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count++;
+}
+
+static void omap_vout_vm_close(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count--;
+}
+
+static struct vm_operations_struct omap_vout_vm_ops = {
+ .open = omap_vout_vm_open,
+ .close = omap_vout_vm_close,
+};
+
+static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int i;
+ void *pos;
+ unsigned long start = vma->vm_start;
+ unsigned long size = (vma->vm_end - vma->vm_start);
+ struct videobuf_dmabuf *dmabuf = NULL;
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ " %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
+ vma->vm_pgoff, vma->vm_start, vma->vm_end);
+
+ /* look for the buffer to map */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
+ continue;
+ if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ break;
+ }
+
+ if (VIDEO_MAX_FRAME == i) {
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ return -EINVAL;
+ }
+ q->bufs[i]->baddr = vma->vm_start;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &omap_vout_vm_ops;
+ vma->vm_private_data = (void *) vout;
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ pos = dmabuf->vmalloc;
+ vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT;
+ while (size > 0) {
+ unsigned long pfn;
+ pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ vout->mmap_count++;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+
+ return 0;
+}
+
+static int omap_vout_release(struct file *file)
+{
+ unsigned int ret, i;
+ struct videobuf_queue *q;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = file->private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+ ovid = &vout->vid_info;
+
+ if (!vout)
+ return 0;
+
+ q = &vout->vbq;
+ /* Disable all the overlay managers connected with this interface */
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_overlay *ovl = ovid->overlays[i];
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 0;
+ ovl->set_overlay_info(ovl, &info);
+ }
+ }
+ /* Turn off the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "Unable to apply changes\n");
+
+ /* Free all buffers */
+ omap_vout_free_allbuffers(vout);
+ videobuf_mmap_free(q);
+
+ /* Even if apply changes fails we should continue
+ freeing allocated memeory */
+ if (vout->streaming) {
+ u32 mask = 0;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD;
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+ vout->streaming = 0;
+
+ videobuf_streamoff(q);
+ videobuf_queue_cancel(q);
+ }
+
+ if (vout->mmap_count != 0)
+ vout->mmap_count = 0;
+
+ vout->opened -= 1;
+ file->private_data = NULL;
+
+ if (vout->buffer_allocated)
+ videobuf_mmap_free(q);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return ret;
+}
+
+static int omap_vout_open(struct file *file)
+{
+ struct videobuf_queue *q;
+ struct omap_vout_device *vout = NULL;
+
+ vout = video_drvdata(file);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
+ if (vout == NULL)
+ return -ENODEV;
+
+ /* for now, we only support single open */
+ if (vout->opened)
+ return -EBUSY;
+
+ vout->opened += 1;
+
+ file->private_data = vout;
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ q = &vout->vbq;
+ video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+ video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+ video_vbq_ops.buf_release = omap_vout_buffer_release;
+ video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+ spin_lock_init(&vout->vbq_lock);
+
+ videobuf_queue_sg_init(q, &video_vbq_ops, NULL, &vout->vbq_lock,
+ vout->type, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vout);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return 0;
+}
+
+/*
+ * V4L2 ioctls
+ */
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct omap_vout_device *vout = fh;
+
+ strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
+ cap->bus_info[0] = '\0';
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+
+ f->fmt.pix = vout->pix;
+ return 0;
+
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+ /* get the display device attached to the overlay */
+ timing = &ovl->manager->device->panel.timings;
+
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+
+ omap_vout_try_format(&f->fmt.pix);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret, bpp;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+ timing = &ovl->manager->device->panel.timings;
+
+ /* We dont support RGB24-packed mode if vrfb rotation
+ * is enabled*/
+ if ((rotation_enabled(vout)) &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+
+ /* get the framebuffer parameters */
+
+ if (rotate_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ /* change to samller size is OK */
+
+ bpp = omap_vout_try_format(&f->fmt.pix);
+ f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp;
+
+ /* try & set the new output format */
+ vout->bpp = bpp;
+ vout->pix = f->fmt.pix;
+ vout->vrfb_bpp = 1;
+
+ /* If YUYV then vrfb bpp is 2, for others its 1 */
+ if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat ||
+ V4L2_PIX_FMT_UYVY == vout->pix.pixelformat)
+ vout->vrfb_bpp = 2;
+
+ /* set default crop and win */
+ omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /* Save the changes in the overlay strcuture */
+ ret = omapvid_init(vout, 0);
+ if (ret) {
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+ goto s_fmt_vid_out_exit;
+ }
+
+ ret = 0;
+
+s_fmt_vid_out_exit:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ret = omap_vout_try_window(&vout->fbuf, win);
+
+ if (!ret) {
+ if (vout->vid == OMAP_VIDEO1)
+ win->global_alpha = 255;
+ else
+ win->global_alpha = f->fmt.win.global_alpha;
+ }
+
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
+ if (!ret) {
+ /* Video1 plane does not support global alpha */
+ if (ovl->id == OMAP_DSS_VIDEO1)
+ vout->win.global_alpha = 255;
+ else
+ vout->win.global_alpha = f->fmt.win.global_alpha;
+
+ vout->win.chromakey = f->fmt.win.chromakey;
+ }
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ u32 key_value = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ win->w = vout->win.w;
+ win->field = vout->win.field;
+ win->global_alpha = vout->win.global_alpha;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ key_value = info.trans_key;
+ }
+ win->chromakey = key_value;
+ return 0;
+}
+
+static int vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cropcap)
+{
+ struct omap_vout_device *vout = fh;
+ struct v4l2_pix_format *pix = &vout->pix;
+
+ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Width and height are always even */
+ cropcap->bounds.width = pix->width & ~1;
+ cropcap->bounds.height = pix->height & ~1;
+
+ omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
+ cropcap->pixelaspect.numerator = 1;
+ cropcap->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct omap_vout_device *vout = fh;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ crop->c = vout->crop;
+ return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ int ret = -EINVAL;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_video_timings *timing;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ if (!ovl->manager || !ovl->manager->device) {
+ ret = -EINVAL;
+ goto s_crop_err;
+ }
+ /* get the display device attached to the overlay */
+ timing = &ovl->manager->device->panel.timings;
+
+ if (rotate_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+ &vout->fbuf, &crop->c);
+
+s_crop_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_queryctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *ctrl)
+{
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
+ break;
+ case V4L2_CID_BG_COLOR:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
+ break;
+ default:
+ ctrl->name[0] = '\0';
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ctrl->value = vout->control[0].value;
+ break;
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay_manager_info info;
+ struct omap_overlay *ovl;
+
+ ovl = vout->vid_info.overlays[0];
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ ctrl->value = info.default_color;
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ ctrl->value = vout->control[2].value;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+
+ switch (a->id) {
+ case V4L2_CID_ROTATE:
+ {
+ int rotation = a->value;
+
+ mutex_lock(&vout->lock);
+
+ if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (v4l2_rot_to_dss_rot(rotation, &vout->rotation,
+ vout->mirror)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ vout->control[0].value = rotation;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay *ovl;
+ unsigned int color = a->value;
+ struct omap_overlay_manager_info info;
+
+ ovl = vout->vid_info.overlays[0];
+
+ mutex_lock(&vout->lock);
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.default_color = color;
+ if (ovl->manager->set_manager_info(ovl->manager, &info)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ vout->control[1].value = color;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ {
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ unsigned int mirror = a->value;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ mutex_lock(&vout->lock);
+
+ if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+ vout->mirror = mirror;
+ vout->control[2].value = mirror;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ int ret = 0;
+ unsigned int i, num_buffers = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ struct videobuf_dmabuf *dmabuf = NULL;
+
+ if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0))
+ return -EINVAL;
+ /* if memory is not mmp or userptr
+ return error */
+ if ((V4L2_MEMORY_MMAP != req->memory) &&
+ (V4L2_MEMORY_USERPTR != req->memory))
+ return -EINVAL;
+
+ mutex_lock(&vout->lock);
+ /* Cannot be requested when streaming is on */
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+
+ /* If buffers are already allocated free them */
+ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
+ if (vout->mmap_count) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ omap_vout_free_buffer((u32)dmabuf->vmalloc,
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ vout->buffer_allocated = num_buffers;
+ videobuf_mmap_free(q);
+ } else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
+ if (vout->buffer_allocated) {
+ videobuf_mmap_free(q);
+ for (i = 0; i < vout->buffer_allocated; i++) {
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+ vout->buffer_allocated = 0;
+ }
+ }
+
+ /*store the memory type in data structure */
+ vout->memory = req->memory;
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+
+ /* call videobuf_reqbufs api */
+ ret = videobuf_reqbufs(q, req);
+ if (ret < 0)
+ goto reqbuf_err;
+
+ vout->buffer_allocated = req->count;
+ for (i = 0; i < req->count; i++) {
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ dmabuf->vmalloc = (void *) vout->buf_virt_addr[i];
+ dmabuf->bus_addr = (dma_addr_t) vout->buf_phy_addr[i];
+ dmabuf->sglen = 1;
+ }
+reqbuf_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+
+ return videobuf_querybuf(&vout->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buffer)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
+ (buffer->index >= vout->buffer_allocated) ||
+ (q->bufs[buffer->index]->memory != buffer->memory)) {
+ return -EINVAL;
+ }
+ if (V4L2_MEMORY_USERPTR == buffer->memory) {
+ if ((buffer->length < vout->pix.sizeimage) ||
+ (0 == buffer->m.userptr)) {
+ return -EINVAL;
+ }
+ }
+
+ if ((rotation_enabled(vout)) &&
+ vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "DMA Channel not allocated for Rotation\n");
+ return -EINVAL;
+ }
+
+ return videobuf_qbuf(q, buffer);
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK)
+ /* Call videobuf_dqbuf for non blocking mode */
+ return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+ else
+ /* Call videobuf_dqbuf for blocking mode */
+ return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+}
+
+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ int ret = 0, j;
+ u32 addr = 0, mask = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ mutex_lock(&vout->lock);
+
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto streamon_err;
+ }
+
+ ret = videobuf_streamon(q);
+ if (ret)
+ goto streamon_err;
+
+ if (list_empty(&vout->dma_queue)) {
+ ret = -EIO;
+ goto streamon_err1;
+ }
+
+ /* Get the next frame from the buffer queue */
+ vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ /* Remove buffer from the buffer queue */
+ list_del(&vout->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ vout->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ vout->field_id = 0;
+
+ /* set flag here. Next QBUF will start DMA */
+ vout->streaming = 1;
+
+ vout->first_int = 1;
+
+ if (omap_vout_calculate_offset(vout)) {
+ ret = -EINVAL;
+ goto streamon_err1;
+ }
+ addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ + vout->cropped_offset;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+
+ omap_dispc_register_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 1;
+ info.paddr = addr;
+ if (ovl->set_overlay_info(ovl, &info)) {
+ ret = -EINVAL;
+ goto streamon_err1;
+ }
+ }
+ }
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+
+ ret = 0;
+
+streamon_err1:
+ if (ret)
+ ret = videobuf_streamoff(q);
+streamon_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ u32 mask = 0;
+ int ret = 0, j;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ vout->streaming = 0;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 0;
+ ret = ovl->set_overlay_info(ovl, &info);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to update overlay info in streamoff\n");
+ }
+ }
+
+ /* Turn of the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
+ " streamoff\n");
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+ ret = videobuf_streamoff(&vout->vbq);
+
+ return ret;
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ int enable = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* OMAP DSS doesn't support Source and Destination color
+ key together */
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_CHROMAKEY))
+ return -EINVAL;
+ /* OMAP DSS Doesn't support the Destination color key
+ and alpha blending together */
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA))
+ return -EINVAL;
+
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY;
+
+ if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY))
+ enable = 1;
+ else
+ enable = 0;
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.trans_enabled = enable;
+ info.trans_key_type = key_type;
+ info.trans_key = vout->win.chromakey;
+
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+ if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 1;
+ } else {
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 0;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.alpha_enabled = enable;
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ a->flags = 0x0;
+ a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
+ | V4L2_FBUF_CAP_SRC_CHROMAKEY;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC)
+ a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
+ a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.alpha_enabled)
+ a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vout_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static const struct v4l2_file_operations omap_vout_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = omap_vout_mmap,
+ .open = omap_vout_open,
+ .release = omap_vout_release,
+};
+
+/* Init functions used during driver initialization */
+/* Initial setup of video_data */
+static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+ struct v4l2_pix_format *pix;
+ struct v4l2_control *control;
+ struct omap_dss_device *display =
+ vout->vid_info.overlays[0]->manager->device;
+
+ /* set the default pix */
+ pix = &vout->pix;
+
+ /* Set the default picture of QVGA */
+ pix->width = QQVGA_WIDTH;
+ pix->height = QQVGA_HEIGHT;
+
+ /* Default pixel format is RGB 5-6-5 */
+ pix->pixelformat = V4L2_PIX_FMT_RGB565;
+ pix->field = V4L2_FIELD_ANY;
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->priv = 0;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ vout->bpp = RGB565_BPP;
+ vout->fbuf.fmt.width = display->panel.timings.x_res;
+ vout->fbuf.fmt.height = display->panel.timings.y_res;
+
+ /* Set the data structures for the overlay parameters*/
+ vout->win.global_alpha = 255;
+ vout->fbuf.flags = 0;
+ vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
+ V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
+ vout->win.chromakey = 0;
+
+ omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /*Initialize the control variables for
+ rotation, flipping and background color. */
+ control = vout->control;
+ control[0].id = V4L2_CID_ROTATE;
+ control[0].value = 0;
+ vout->rotation = 0;
+ vout->mirror = 0;
+ vout->control[2].id = V4L2_CID_HFLIP;
+ vout->control[2].value = 0;
+ vout->vrfb_bpp = 2;
+
+ control[1].id = V4L2_CID_BG_COLOR;
+ control[1].value = 0;
+
+ /* initialize the video_device struct */
+ vfd = vout->vfd = video_device_alloc();
+
+ if (!vfd) {
+ printk(KERN_ERR VOUT_NAME ": could not allocate"
+ " video device struct\n");
+ return -ENOMEM;
+ }
+ vfd->release = video_device_release;
+ vfd->ioctl_ops = &vout_ioctl_ops;
+
+ strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
+
+ /* need to register for a VID_HARDWARE_* ID in videodev.h */
+ vfd->fops = &omap_vout_fops;
+ vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
+ mutex_init(&vout->lock);
+
+ vfd->minor = -1;
+ return 0;
+
+}
+
+/* Setup video buffers */
+static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
+ int vid_num)
+{
+ u32 numbuffers;
+ int ret = 0, i, j;
+ int image_width, image_height;
+ struct video_device *vfd;
+ struct omap_vout_device *vout;
+ int static_vrfb_allocation = 0, vrfb_num_bufs = VRFB_NUM_BUFS;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ vfd = vout->vfd;
+
+ numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
+ vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
+ dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size);
+
+ for (i = 0; i < numbuffers; i++) {
+ vout->buf_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->buffer_size,
+ (u32 *) &vout->buf_phy_addr[i]);
+ if (!vout->buf_virt_addr[i]) {
+ numbuffers = i;
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+ dev_info(&pdev->dev, ": VRFB allocation failed\n");
+ for (j = 0; j < i; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+ vout->cropped_offset = 0;
+
+ /* Calculate VRFB memory size */
+ /* allocate for worst case size */
+ image_width = VID_MAX_WIDTH / TILE_SIZE;
+ if (VID_MAX_WIDTH % TILE_SIZE)
+ image_width++;
+
+ image_width = image_width * TILE_SIZE;
+ image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+ if (VID_MAX_HEIGHT % TILE_SIZE)
+ image_height++;
+
+ image_height = image_height * TILE_SIZE;
+ vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+ /*
+ * Request and Initialize DMA, for DMA based VRFB transfer
+ */
+ vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
+ vout->vrfb_dma_tx.dma_ch = -1;
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
+ ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
+ omap_vout_vrfb_dma_tx_callback,
+ (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
+ if (ret < 0) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
+ " video%d\n", vfd->minor);
+ }
+ init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+ /* Allocate VRFB buffers if selected through bootargs */
+ static_vrfb_allocation = (vid_num == 0) ?
+ vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+
+ /* statically allocated the VRFB buffer is done through
+ commands line aruments */
+ if (static_vrfb_allocation) {
+ if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+ ret = -ENOMEM;
+ goto release_vrfb_ctx;
+ }
+ vout->vrfb_static_allocation = 1;
+ }
+ return 0;
+
+release_vrfb_ctx:
+ for (j = 0; j < VRFB_NUM_BUFS; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+
+free_buffers:
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ return ret;
+
+}
+
+/* Create video out devices */
+static int __init omap_vout_create_video_devices(struct platform_device *pdev)
+{
+ int ret = 0, k;
+ struct omap_vout_device *vout;
+ struct video_device *vfd = NULL;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev,
+ struct omap2video_device, v4l2_dev);
+
+ for (k = 0; k < pdev->num_resources; k++) {
+
+ vout = kmalloc(sizeof(struct omap_vout_device), GFP_KERNEL);
+ if (!vout) {
+ dev_err(&pdev->dev, ": could not allocate memory\n");
+ return -ENOMEM;
+ }
+ memset(vout, 0, sizeof(struct omap_vout_device));
+
+ vout->vid = k;
+ vid_dev->vouts[k] = vout;
+ vout->vid_dev = vid_dev;
+ /* Select video2 if only 1 overlay is controlled by V4L2 */
+ if (pdev->num_resources == 1)
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 2];
+ else
+ /* Else select video1 and video2 one by one. */
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
+ vout->vid_info.num_overlays = 1;
+ vout->vid_info.id = k + 1;
+
+ /* Setup the default configuration for the video devices
+ */
+ if (omap_vout_setup_video_data(vout) != 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Allocate default number of buffers for the video streaming
+ * and reserve the VRFB space for rotation
+ */
+ if (omap_vout_setup_video_bufs(pdev, k) != 0) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ /* Register the Video device with V4L2
+ */
+ vfd = vout->vfd;
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) {
+ dev_err(&pdev->dev, ": Could not register "
+ "Video for Linux device\n");
+ vfd->minor = -1;
+ ret = -ENODEV;
+ goto error2;
+ }
+ video_set_drvdata(vfd, vout);
+
+ /* Configure the overlay structure */
+ ret = omapvid_init(vid_dev->vouts[k], 0);
+ if (!ret)
+ goto success;
+
+error2:
+ omap_vout_release_vrfb(vout);
+ omap_vout_free_buffers(vout);
+error1:
+ video_device_release(vfd);
+error:
+ kfree(vout);
+ return ret;
+
+success:
+ dev_info(&pdev->dev, ": registered and initialized"
+ " video device %d\n", vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+ }
+
+ return -ENODEV;
+}
+/* Driver functions */
+static void omap_vout_cleanup_device(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+
+ if (!vout)
+ return;
+
+ vfd = vout->vfd;
+ if (vfd) {
+ if (!video_is_registered(vfd)) {
+ /*
+ * The device was never registered, so release the
+ * video_device struct directly.
+ */
+ video_device_release(vfd);
+ } else {
+ /*
+ * The unregister function will release the video_device
+ * struct as well as unregistering it.
+ */
+ video_unregister_device(vfd);
+ }
+ }
+
+ omap_vout_release_vrfb(vout);
+ omap_vout_free_buffers(vout);
+ /* Free the VRFB buffer if allocated
+ * init time
+ */
+ if (vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+
+ kfree(vout);
+}
+
+static int omap_vout_remove(struct platform_device *pdev)
+{
+ int k;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+ omap2video_device, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ for (k = 0; k < pdev->num_resources; k++)
+ omap_vout_cleanup_device(vid_dev->vouts[k]);
+
+ for (k = 0; k < vid_dev->num_displays; k++) {
+ if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED)
+ vid_dev->displays[k]->disable(vid_dev->displays[k]);
+
+ omap_dss_put_device(vid_dev->displays[k]);
+ }
+ kfree(vid_dev);
+ return 0;
+}
+
+static int __init omap_vout_probe(struct platform_device *pdev)
+{
+ int ret = 0, i;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *def_display;
+ struct omap2video_device *vid_dev = NULL;
+
+ if (pdev->num_resources == 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ return -ENODEV;
+ }
+
+ vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
+ if (vid_dev == NULL)
+ return -ENOMEM;
+
+ vid_dev->num_displays = 0;
+ for_each_dss_dev(dssdev) {
+ omap_dss_get_device(dssdev);
+ vid_dev->displays[vid_dev->num_displays++] = dssdev;
+ }
+
+ if (vid_dev->num_displays == 0) {
+ dev_err(&pdev->dev, "no displays\n");
+ ret = -EINVAL;
+ goto probe_err0;
+ }
+
+ vid_dev->num_overlays = omap_dss_get_num_overlays();
+ for (i = 0; i < vid_dev->num_overlays; i++)
+ vid_dev->overlays[i] = omap_dss_get_overlay(i);
+
+ vid_dev->num_managers = omap_dss_get_num_overlay_managers();
+ for (i = 0; i < vid_dev->num_managers; i++)
+ vid_dev->managers[i] = omap_dss_get_overlay_manager(i);
+
+ /* Get the Video1 overlay and video2 overlay.
+ * Setup the Display attached to that overlays
+ */
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager && ovl->manager->device) {
+ def_display = ovl->manager->device;
+ } else {
+ dev_warn(&pdev->dev, "cannot find display\n");
+ def_display = NULL;
+ }
+ if (def_display) {
+ ret = def_display->enable(def_display);
+ if (ret) {
+ /* Here we are not considering a error
+ * as display may be enabled by frame
+ * buffer driver
+ */
+ dev_warn(&pdev->dev,
+ "'%s' Display already enabled\n",
+ def_display->name);
+ }
+ /* set the update mode */
+ if (def_display->caps &
+ OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 1);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+#else /* MANUAL_UPDATE */
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 0);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_MANUAL);
+#endif
+ } else {
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+ }
+ }
+ }
+
+ if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) {
+ dev_err(&pdev->dev, "v4l2_device_register failed\n");
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+
+ ret = omap_vout_create_video_devices(pdev);
+ if (ret)
+ goto probe_err2;
+
+ for (i = 0; i < vid_dev->num_displays; i++) {
+ struct omap_dss_device *display = vid_dev->displays[i];
+
+ if (display->update)
+ display->update(display, 0, 0,
+ display->panel.timings.x_res,
+ display->panel.timings.y_res);
+ }
+ return 0;
+
+probe_err2:
+ v4l2_device_unregister(&vid_dev->v4l2_dev);
+probe_err1:
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ def_display = NULL;
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager && ovl->manager->device)
+ def_display = ovl->manager->device;
+
+ if (def_display)
+ def_display->disable(def_display);
+ }
+probe_err0:
+ kfree(vid_dev);
+ return ret;
+}
+
+static struct platform_driver omap_vout_driver = {
+ .driver = {
+ .name = VOUT_NAME,
+ },
+ .probe = omap_vout_probe,
+ .remove = omap_vout_remove,
+};
+
+static int __init omap_vout_init(void)
+{
+ if (platform_driver_register(&omap_vout_driver) != 0) {
+ printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void omap_vout_cleanup(void)
+{
+ platform_driver_unregister(&omap_vout_driver);
+}
+
+late_initcall(omap_vout_init);
+module_exit(omap_vout_cleanup);
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
new file mode 100644
index 000000000000..ea3a047f8bca
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -0,0 +1,147 @@
+/*
+ * omap_voutdef.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef OMAP_VOUTDEF_H
+#define OMAP_VOUTDEF_H
+
+#include <plat/display.h>
+
+#define YUYV_BPP 2
+#define RGB565_BPP 2
+#define RGB24_BPP 3
+#define RGB32_BPP 4
+#define TILE_SIZE 32
+#define YUYV_VRFB_BPP 2
+#define RGB_VRFB_BPP 1
+#define MAX_CID 3
+#define MAC_VRFB_CTXS 4
+#define MAX_VOUT_DEV 2
+#define MAX_OVLS 3
+#define MAX_DISPLAYS 3
+#define MAX_MANAGERS 3
+
+/* Enum for Rotation
+ * DSS understands rotation in 0, 1, 2, 3 context
+ * while V4L2 driver understands it as 0, 90, 180, 270
+ */
+enum dss_rotation {
+ dss_rotation_0_degree = 0,
+ dss_rotation_90_degree = 1,
+ dss_rotation_180_degree = 2,
+ dss_rotation_270_degree = 3,
+};
+/*
+ * This structure is used to store the DMA transfer parameters
+ * for VRFB hidden buffer
+ */
+struct vid_vrfb_dma {
+ int dev_id;
+ int dma_ch;
+ int req_status;
+ int tx_status;
+ wait_queue_head_t wait;
+};
+
+struct omapvideo_info {
+ int id;
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+};
+
+struct omap2video_device {
+ struct mutex mtx;
+
+ int state;
+
+ struct v4l2_device v4l2_dev;
+ struct omap_vout_device *vouts[MAX_VOUT_DEV];
+
+ int num_displays;
+ struct omap_dss_device *displays[MAX_DISPLAYS];
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+ int num_managers;
+ struct omap_overlay_manager *managers[MAX_MANAGERS];
+};
+
+/* per-device data structure */
+struct omap_vout_device {
+
+ struct omapvideo_info vid_info;
+ struct video_device *vfd;
+ struct omap2video_device *vid_dev;
+ int vid;
+ int opened;
+
+ /* we don't allow to change image fmt/size once buffer has
+ * been allocated
+ */
+ int buffer_allocated;
+ /* allow to reuse previously allocated buffer which is big enough */
+ int buffer_size;
+ /* keep buffer info across opens */
+ unsigned long buf_virt_addr[VIDEO_MAX_FRAME];
+ unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
+ enum omap_color_mode dss_mode;
+
+ /* we don't allow to request new buffer when old buffers are
+ * still mmaped
+ */
+ int mmap_count;
+
+ spinlock_t vbq_lock; /* spinlock for videobuf queues */
+ unsigned long field_count; /* field counter for videobuf_buffer */
+
+ /* non-NULL means streaming is in progress. */
+ bool streaming;
+
+ struct v4l2_pix_format pix;
+ struct v4l2_rect crop;
+ struct v4l2_window win;
+ struct v4l2_framebuffer fbuf;
+
+ /* Lock to protect the shared data structures in ioctl */
+ struct mutex lock;
+
+ /* V4L2 control structure for different control id */
+ struct v4l2_control control[MAX_CID];
+ enum dss_rotation rotation;
+ bool mirror;
+ int flicker_filter;
+ /* V4L2 control structure for different control id */
+
+ int bpp; /* bytes per pixel */
+ int vrfb_bpp; /* bytes per pixel with respect to VRFB */
+
+ struct vid_vrfb_dma vrfb_dma_tx;
+ unsigned int smsshado_phy_addr[MAC_VRFB_CTXS];
+ unsigned int smsshado_virt_addr[MAC_VRFB_CTXS];
+ struct vrfb vrfb_context[MAC_VRFB_CTXS];
+ bool vrfb_static_allocation;
+ unsigned int smsshado_size;
+ unsigned char pos;
+
+ int ps, vr_ps, line_length, first_int, field_id;
+ enum v4l2_memory memory;
+ struct videobuf_buffer *cur_frm, *next_frm;
+ struct list_head dma_queue;
+ u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ u32 cropped_offset;
+ s32 tv_field1_offset;
+ void *isr_handle;
+
+ /* Buffer queue variables */
+ struct omap_vout_device *vout;
+ enum v4l2_buf_type type;
+ struct videobuf_queue vbq;
+ int io_allowed;
+
+};
+#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
new file mode 100644
index 000000000000..b941c761eef9
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -0,0 +1,293 @@
+/*
+ * omap_voutlib.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Based on the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <plat/cpu.h>
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video library");
+MODULE_LICENSE("GPL");
+
+/* Return the default overlay cropping rectangle in crop given the image
+ * size in pix and the video display size in fbuf. The default
+ * cropping rectangle is the largest rectangle no larger than the capture size
+ * that will fit on the display. The default cropping rectangle is centered in
+ * the image. All dimensions and offsets are rounded down to even numbers.
+ */
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop)
+{
+ crop->width = (pix->width < fbuf->fmt.width) ?
+ pix->width : fbuf->fmt.width;
+ crop->height = (pix->height < fbuf->fmt.height) ?
+ pix->height : fbuf->fmt.height;
+ crop->width &= ~1;
+ crop->height &= ~1;
+ crop->left = ((pix->width - crop->width) >> 1) & ~1;
+ crop->top = ((pix->height - crop->height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_default_crop);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The adjusted window parameters are
+ * returned in new_win.
+ * Returns zero if succesful, or -EINVAL if the requested window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ struct v4l2_rect try_win;
+
+ /* make a working copy of the new_win rectangle */
+ try_win = new_win->w;
+
+ /* adjust the preview window so it fits on the display by clipping any
+ * offscreen areas
+ */
+ if (try_win.left < 0) {
+ try_win.width += try_win.left;
+ try_win.left = 0;
+ }
+ if (try_win.top < 0) {
+ try_win.height += try_win.top;
+ try_win.top = 0;
+ }
+ try_win.width = (try_win.width < fbuf->fmt.width) ?
+ try_win.width : fbuf->fmt.width;
+ try_win.height = (try_win.height < fbuf->fmt.height) ?
+ try_win.height : fbuf->fmt.height;
+ if (try_win.left + try_win.width > fbuf->fmt.width)
+ try_win.width = fbuf->fmt.width - try_win.left;
+ if (try_win.top + try_win.height > fbuf->fmt.height)
+ try_win.height = fbuf->fmt.height - try_win.top;
+ try_win.width &= ~1;
+ try_win.height &= ~1;
+
+ if (try_win.width <= 0 || try_win.height <= 0)
+ return -EINVAL;
+
+ /* We now have a valid preview window, so go with it */
+ new_win->w = try_win;
+ new_win->field = V4L2_FIELD_ANY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_try_window);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The image cropping window in crop
+ * will also be adjusted if necessary. Preference is given to keeping the
+ * the window as close to the requested configuration as possible. If
+ * successful, new_win, vout->win, and crop are updated.
+ * Returns zero if succesful, or -EINVAL if the requested preview window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ int err;
+
+ err = omap_vout_try_window(fbuf, new_win);
+ if (err)
+ return err;
+
+ /* update our preview window */
+ win->w = new_win->w;
+ win->field = new_win->field;
+ win->chromakey = new_win->chromakey;
+
+ /* Adjust the cropping window to allow for resizing limitation */
+ if (cpu_is_omap24xx()) {
+ /* For 24xx limit is 8x to 1/2x scaling. */
+ if ((crop->height/win->w.height) >= 2)
+ crop->height = win->w.height * 2;
+
+ if ((crop->width/win->w.width) >= 2)
+ crop->width = win->w.width * 2;
+
+ if (crop->width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is 768
+ * pixels wide. If the cropped image is wider than
+ * 768 pixels then it cannot be vertically resized.
+ */
+ if (crop->height != win->w.height)
+ crop->width = 768;
+ }
+ } else if (cpu_is_omap34xx()) {
+ /* For 34xx limit is 8x to 1/4x scaling. */
+ if ((crop->height/win->w.height) >= 4)
+ crop->height = win->w.height * 4;
+
+ if ((crop->width/win->w.width) >= 4)
+ crop->width = win->w.width * 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_window);
+
+/* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to
+ * the nearest supported configuration. The image render window in win will
+ * also be adjusted if necessary. The preview window is adjusted such that the
+ * horizontal and vertical rescaling ratios stay constant. If the render
+ * window would fall outside the display boundaries, the cropping rectangle
+ * will also be adjusted to maintain the rescaling ratios. If successful, crop
+ * and win are updated.
+ * Returns zero if succesful, or -EINVAL if the requested cropping rectangle is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop)
+{
+ struct v4l2_rect try_crop;
+ unsigned long vresize, hresize;
+
+ /* make a working copy of the new_crop rectangle */
+ try_crop = *new_crop;
+
+ /* adjust the cropping rectangle so it fits in the image */
+ if (try_crop.left < 0) {
+ try_crop.width += try_crop.left;
+ try_crop.left = 0;
+ }
+ if (try_crop.top < 0) {
+ try_crop.height += try_crop.top;
+ try_crop.top = 0;
+ }
+ try_crop.width = (try_crop.width < pix->width) ?
+ try_crop.width : pix->width;
+ try_crop.height = (try_crop.height < pix->height) ?
+ try_crop.height : pix->height;
+ if (try_crop.left + try_crop.width > pix->width)
+ try_crop.width = pix->width - try_crop.left;
+ if (try_crop.top + try_crop.height > pix->height)
+ try_crop.height = pix->height - try_crop.top;
+
+ try_crop.width &= ~1;
+ try_crop.height &= ~1;
+
+ if (try_crop.width <= 0 || try_crop.height <= 0)
+ return -EINVAL;
+
+ if (cpu_is_omap24xx()) {
+ if (crop->height != win->w.height) {
+ /* If we're resizing vertically, we can't support a
+ * crop width wider than 768 pixels.
+ */
+ if (try_crop.width > 768)
+ try_crop.width = 768;
+ }
+ }
+ /* vertical resizing */
+ vresize = (1024 * crop->height) / win->w.height;
+ if (cpu_is_omap24xx() && (vresize > 2048))
+ vresize = 2048;
+ else if (cpu_is_omap34xx() && (vresize > 4096))
+ vresize = 4096;
+
+ win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
+ if (win->w.height == 0)
+ win->w.height = 2;
+ if (win->w.height + win->w.top > fbuf->fmt.height) {
+ /* We made the preview window extend below the bottom of the
+ * display, so clip it to the display boundary and resize the
+ * cropping height to maintain the vertical resizing ratio.
+ */
+ win->w.height = (fbuf->fmt.height - win->w.top) & ~1;
+ if (try_crop.height == 0)
+ try_crop.height = 2;
+ }
+ /* horizontal resizing */
+ hresize = (1024 * crop->width) / win->w.width;
+ if (cpu_is_omap24xx() && (hresize > 2048))
+ hresize = 2048;
+ else if (cpu_is_omap34xx() && (hresize > 4096))
+ hresize = 4096;
+
+ win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
+ if (win->w.width == 0)
+ win->w.width = 2;
+ if (win->w.width + win->w.left > fbuf->fmt.width) {
+ /* We made the preview window extend past the right side of the
+ * display, so clip it to the display boundary and resize the
+ * cropping width to maintain the horizontal resizing ratio.
+ */
+ win->w.width = (fbuf->fmt.width - win->w.left) & ~1;
+ if (try_crop.width == 0)
+ try_crop.width = 2;
+ }
+ if (cpu_is_omap24xx()) {
+ if ((try_crop.height/win->w.height) >= 2)
+ try_crop.height = win->w.height * 2;
+
+ if ((try_crop.width/win->w.width) >= 2)
+ try_crop.width = win->w.width * 2;
+
+ if (try_crop.width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is
+ * 768 pixels wide. If the cropped image is wider
+ * than 768 pixels then it cannot be vertically resized.
+ */
+ if (try_crop.height != win->w.height)
+ try_crop.width = 768;
+ }
+ } else if (cpu_is_omap34xx()) {
+ if ((try_crop.height/win->w.height) >= 4)
+ try_crop.height = win->w.height * 4;
+
+ if ((try_crop.width/win->w.width) >= 4)
+ try_crop.width = win->w.width * 4;
+ }
+ /* update our cropping rectangle and we're done */
+ *crop = try_crop;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_crop);
+
+/* Given a new format in pix and fbuf, crop and win
+ * structures are initialized to default values. crop
+ * is initialized to the largest window size that will fit on the display. The
+ * crop window is centered in the image. win is initialized to
+ * the same size as crop and is centered on the display.
+ * All sizes and offsets are constrained to be even numbers.
+ */
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win)
+{
+ /* crop defines the preview source window in the image capture
+ * buffer
+ */
+ omap_vout_default_crop(pix, fbuf, crop);
+
+ /* win defines the preview target window on the display */
+ win->w.width = crop->width;
+ win->w.height = crop->height;
+ win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1;
+ win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_format);
+
diff --git a/drivers/media/video/omap/omap_voutlib.h b/drivers/media/video/omap/omap_voutlib.h
new file mode 100644
index 000000000000..a60b16e8bfc3
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutlib.h
@@ -0,0 +1,34 @@
+/*
+ * omap_voutlib.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUTLIB_H
+#define OMAP_VOUTLIB_H
+
+extern void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
+
+extern int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf,
+ const struct v4l2_rect *new_crop);
+
+extern int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+extern int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+extern void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win);
+#endif /* #ifndef OMAP_VOUTLIB_H */
+
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index ce76d952e161..f85b2ed8a2d8 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -452,8 +452,8 @@ static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt,
*size = fh->pix.sizeimage;
/* accessing fh->cam->capture_mem is ok, it's constant */
- while (*size * *cnt > fh->cam->capture_mem)
- (*cnt)--;
+ if (*size * *cnt > fh->cam->capture_mem)
+ *cnt = fh->cam->capture_mem / *size;
return 0;
}
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index e0bce8dc74bf..6085d5582555 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -57,8 +57,8 @@
#define DRIVER_VERSION "v1.64 for Linux 2.5"
#define EMAIL "mark@alpha.dyndns.org"
#define DRIVER_AUTHOR "Mark McClelland <mark@alpha.dyndns.org> & Bret Wallach \
- & Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
- <cpbotha@ieee.org> & Claudio Matsuoka <claudio@conectiva.com>"
+& Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
+<cpbotha@ieee.org> & Claudio Matsuoka <claudio@conectiva.com>"
#define DRIVER_DESC "ov511 USB Camera Driver"
#define OV511_I2C_RETRIES 3
@@ -5916,11 +5916,6 @@ ov51x_disconnect(struct usb_interface *intf)
mutex_lock(&ov->lock);
usb_set_intfdata (intf, NULL);
- if (!ov) {
- mutex_unlock(&ov->lock);
- return;
- }
-
/* Free device number */
ov511_devused &= ~(1 << ov->nr);
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index aaa50f9b8e78..91c886ab15c6 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -198,6 +198,7 @@ struct ov7670_info {
struct ov7670_format_struct *fmt; /* Current format */
unsigned char sat; /* Saturation value */
int hue; /* Hue value */
+ u8 clkrc; /* Clock divider value */
};
static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
@@ -351,7 +352,7 @@ static struct regval_list ov7670_default_regs[] = {
static struct regval_list ov7670_fmt_yuv422[] = {
{ REG_COM7, 0x0 }, /* Selects YUV mode */
{ REG_RGB444, 0 }, /* No RGB444 please */
- { REG_COM1, 0 },
+ { REG_COM1, 0 }, /* CCIR601 */
{ REG_COM15, COM15_R00FF },
{ REG_COM9, 0x18 }, /* 4x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0x80 }, /* "matrix coefficient 1" */
@@ -367,7 +368,7 @@ static struct regval_list ov7670_fmt_yuv422[] = {
static struct regval_list ov7670_fmt_rgb565[] = {
{ REG_COM7, COM7_RGB }, /* Selects RGB mode */
{ REG_RGB444, 0 }, /* No RGB444 please */
- { REG_COM1, 0x0 },
+ { REG_COM1, 0x0 }, /* CCIR601 */
{ REG_COM15, COM15_RGB565 },
{ REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0xb3 }, /* "matrix coefficient 1" */
@@ -383,7 +384,7 @@ static struct regval_list ov7670_fmt_rgb565[] = {
static struct regval_list ov7670_fmt_rgb444[] = {
{ REG_COM7, COM7_RGB }, /* Selects RGB mode */
{ REG_RGB444, R444_ENABLE }, /* Enable xxxxrrrr ggggbbbb */
- { REG_COM1, 0x40 }, /* Magic reserved bit */
+ { REG_COM1, 0x0 }, /* CCIR601 */
{ REG_COM15, COM15_R01FE|COM15_RGB565 }, /* Data range needed? */
{ REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0xb3 }, /* "matrix coefficient 1" */
@@ -408,8 +409,13 @@ static struct regval_list ov7670_fmt_raw[] = {
/*
* Low-level register I/O.
+ *
+ * Note that there are two versions of these. On the XO 1, the
+ * i2c controller only does SMBUS, so that's what we use. The
+ * ov7670 is not really an SMBUS device, though, so the communication
+ * is not always entirely reliable.
*/
-
+#ifdef CONFIG_OLPC_XO_1
static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
unsigned char *value)
{
@@ -432,9 +438,67 @@ static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
int ret = i2c_smbus_write_byte_data(client, reg, value);
if (reg == REG_COM7 && (value & COM7_RESET))
- msleep(2); /* Wait for reset to run */
+ msleep(5); /* Wait for reset to run */
+ return ret;
+}
+
+#else /* ! CONFIG_OLPC_XO_1 */
+/*
+ * On most platforms, we'd rather do straight i2c I/O.
+ */
+static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 data = reg;
+ struct i2c_msg msg;
+ int ret;
+
+ /*
+ * Send out the register address...
+ */
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 1;
+ msg.buf = &data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ printk(KERN_ERR "Error %d on register write\n", ret);
+ return ret;
+ }
+ /*
+ * ...then read back the result.
+ */
+ msg.flags = I2C_M_RD;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret >= 0) {
+ *value = data;
+ ret = 0;
+ }
+ return ret;
+}
+
+
+static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_msg msg;
+ unsigned char data[2] = { reg, value };
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret > 0)
+ ret = 0;
+ if (reg == REG_COM7 && (value & COM7_RESET))
+ msleep(5); /* Wait for reset to run */
return ret;
}
+#endif /* CONFIG_OLPC_XO_1 */
/*
@@ -744,22 +808,12 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
struct ov7670_format_struct *ovfmt;
struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
- unsigned char com7, clkrc = 0;
+ unsigned char com7;
ret = ov7670_try_fmt_internal(sd, fmt, &ovfmt, &wsize);
if (ret)
return ret;
/*
- * HACK: if we're running rgb565 we need to grab then rewrite
- * CLKRC. If we're *not*, however, then rewriting clkrc hoses
- * the colors.
- */
- if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret)
- return ret;
- }
- /*
* COM7 is a pain in the ass, it doesn't like to be read then
* quickly written afterward. But we have everything we need
* to set it absolutely here, as long as the format-specific
@@ -779,8 +833,18 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
ret = ov7670_write_array(sd, wsize->regs);
info->fmt = ovfmt;
- if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565 && ret == 0)
- ret = ov7670_write(sd, REG_CLKRC, clkrc);
+ /*
+ * If we're running RGB565, we must rewrite clkrc after setting
+ * the other parameters or the image looks poor. If we're *not*
+ * doing RGB565, we must not rewrite clkrc or the image looks
+ * *really* poor.
+ *
+ * (Update) Now that we retain clkrc state, we should be able
+ * to write it unconditionally, and that will make the frame
+ * rate persistent too.
+ */
+ if (ret == 0)
+ ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
return ret;
}
@@ -791,20 +855,17 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
static int ov7670_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct v4l2_captureparm *cp = &parms->parm.capture;
- unsigned char clkrc;
- int ret;
+ struct ov7670_info *info = to_state(sd);
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret < 0)
- return ret;
+
memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
cp->timeperframe.denominator = OV7670_FRAME_RATE;
- if ((clkrc & CLK_EXT) == 0 && (clkrc & CLK_SCALE) > 1)
- cp->timeperframe.denominator /= (clkrc & CLK_SCALE);
+ if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
+ cp->timeperframe.denominator /= (info->clkrc & CLK_SCALE);
return 0;
}
@@ -812,19 +873,14 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct v4l2_captureparm *cp = &parms->parm.capture;
struct v4l2_fract *tpf = &cp->timeperframe;
- unsigned char clkrc;
- int ret, div;
+ struct ov7670_info *info = to_state(sd);
+ int div;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (cp->extendedmode != 0)
return -EINVAL;
- /*
- * CLKRC has a reserved bit, so let's preserve it.
- */
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret < 0)
- return ret;
+
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
else
@@ -833,10 +889,10 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
div = 1;
else if (div > CLK_SCALE)
div = CLK_SCALE;
- clkrc = (clkrc & 0x80) | div;
+ info->clkrc = (info->clkrc & 0x80) | div;
tpf->numerator = 1;
tpf->denominator = OV7670_FRAME_RATE/div;
- return ov7670_write(sd, REG_CLKRC, clkrc);
+ return ov7670_write(sd, REG_CLKRC, info->clkrc);
}
@@ -1115,6 +1171,140 @@ static int ov7670_s_vflip(struct v4l2_subdev *sd, int value)
return ret;
}
+/*
+ * GAIN is split between REG_GAIN and REG_VREF[7:6]. If one believes
+ * the data sheet, the VREF parts should be the most significant, but
+ * experience shows otherwise. There seems to be little value in
+ * messing with the VREF bits, so we leave them alone.
+ */
+static int ov7670_g_gain(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char gain;
+
+ ret = ov7670_read(sd, REG_GAIN, &gain);
+ *value = gain;
+ return ret;
+}
+
+static int ov7670_s_gain(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_write(sd, REG_GAIN, value & 0xff);
+ /* Have to turn off AGC as well */
+ if (ret == 0) {
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ ret = ov7670_write(sd, REG_COM8, com8 & ~COM8_AGC);
+ }
+ return ret;
+}
+
+/*
+ * Tweak autogain.
+ */
+static int ov7670_g_autogain(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ *value = (com8 & COM8_AGC) != 0;
+ return ret;
+}
+
+static int ov7670_s_autogain(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (ret == 0) {
+ if (value)
+ com8 |= COM8_AGC;
+ else
+ com8 &= ~COM8_AGC;
+ ret = ov7670_write(sd, REG_COM8, com8);
+ }
+ return ret;
+}
+
+/*
+ * Exposure is spread all over the place: top 6 bits in AECHH, middle
+ * 8 in AECH, and two stashed in COM1 just for the hell of it.
+ */
+static int ov7670_g_exp(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com1, aech, aechh;
+
+ ret = ov7670_read(sd, REG_COM1, &com1) +
+ ov7670_read(sd, REG_AECH, &aech) +
+ ov7670_read(sd, REG_AECHH, &aechh);
+ *value = ((aechh & 0x3f) << 10) | (aech << 2) | (com1 & 0x03);
+ return ret;
+}
+
+static int ov7670_s_exp(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com1, com8, aech, aechh;
+
+ ret = ov7670_read(sd, REG_COM1, &com1) +
+ ov7670_read(sd, REG_COM8, &com8);
+ ov7670_read(sd, REG_AECHH, &aechh);
+ if (ret)
+ return ret;
+
+ com1 = (com1 & 0xfc) | (value & 0x03);
+ aech = (value >> 2) & 0xff;
+ aechh = (aechh & 0xc0) | ((value >> 10) & 0x3f);
+ ret = ov7670_write(sd, REG_COM1, com1) +
+ ov7670_write(sd, REG_AECH, aech) +
+ ov7670_write(sd, REG_AECHH, aechh);
+ /* Have to turn off AEC as well */
+ if (ret == 0)
+ ret = ov7670_write(sd, REG_COM8, com8 & ~COM8_AEC);
+ return ret;
+}
+
+/*
+ * Tweak autoexposure.
+ */
+static int ov7670_g_autoexp(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com8;
+ enum v4l2_exposure_auto_type *atype = (enum v4l2_exposure_auto_type *) value;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (com8 & COM8_AEC)
+ *atype = V4L2_EXPOSURE_AUTO;
+ else
+ *atype = V4L2_EXPOSURE_MANUAL;
+ return ret;
+}
+
+static int ov7670_s_autoexp(struct v4l2_subdev *sd,
+ enum v4l2_exposure_auto_type value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (ret == 0) {
+ if (value == V4L2_EXPOSURE_AUTO)
+ com8 |= COM8_AEC;
+ else
+ com8 &= ~COM8_AEC;
+ ret = ov7670_write(sd, REG_COM8, com8);
+ }
+ return ret;
+}
+
+
+
static int ov7670_queryctrl(struct v4l2_subdev *sd,
struct v4l2_queryctrl *qc)
{
@@ -1131,6 +1321,14 @@ static int ov7670_queryctrl(struct v4l2_subdev *sd,
return v4l2_ctrl_query_fill(qc, 0, 256, 1, 128);
case V4L2_CID_HUE:
return v4l2_ctrl_query_fill(qc, -180, 180, 5, 0);
+ case V4L2_CID_GAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
+ case V4L2_CID_AUTOGAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
+ case V4L2_CID_EXPOSURE:
+ return v4l2_ctrl_query_fill(qc, 0, 65535, 1, 500);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
}
return -EINVAL;
}
@@ -1150,6 +1348,14 @@ static int ov7670_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ov7670_g_vflip(sd, &ctrl->value);
case V4L2_CID_HFLIP:
return ov7670_g_hflip(sd, &ctrl->value);
+ case V4L2_CID_GAIN:
+ return ov7670_g_gain(sd, &ctrl->value);
+ case V4L2_CID_AUTOGAIN:
+ return ov7670_g_autogain(sd, &ctrl->value);
+ case V4L2_CID_EXPOSURE:
+ return ov7670_g_exp(sd, &ctrl->value);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return ov7670_g_autoexp(sd, &ctrl->value);
}
return -EINVAL;
}
@@ -1169,6 +1375,15 @@ static int ov7670_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ov7670_s_vflip(sd, ctrl->value);
case V4L2_CID_HFLIP:
return ov7670_s_hflip(sd, ctrl->value);
+ case V4L2_CID_GAIN:
+ return ov7670_s_gain(sd, ctrl->value);
+ case V4L2_CID_AUTOGAIN:
+ return ov7670_s_autogain(sd, ctrl->value);
+ case V4L2_CID_EXPOSURE:
+ return ov7670_s_exp(sd, ctrl->value);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return ov7670_s_autoexp(sd,
+ (enum v4l2_exposure_auto_type) ctrl->value);
}
return -EINVAL;
}
@@ -1268,6 +1483,7 @@ static int ov7670_probe(struct i2c_client *client,
info->fmt = &ov7670_formats[0];
info->sat = 128; /* Review this */
+ info->clkrc = 1; /* 30fps */
return 0;
}
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 47bf60ceb7a2..36599a65f548 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -59,9 +59,9 @@ static const struct ov9640_reg ov9640_regs_dflt[] = {
* COM12 |= OV9640_COM12_YUV_AVG
*
* for RGB, alter the following registers:
- * COM7 |= OV9640_COM7_RGB
- * COM13 |= OV9640_COM13_RGB_AVG
- * COM15 |= proper RGB color encoding mode
+ * COM7 |= OV9640_COM7_RGB
+ * COM13 |= OV9640_COM13_RGB_AVG
+ * COM15 |= proper RGB color encoding mode
*/
static const struct ov9640_reg ov9640_regs_qqcif[] = {
{ OV9640_CLKRC, OV9640_CLKRC_DPLL_EN | OV9640_CLKRC_DIV(0x0f) },
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 0598bbd3f368..7129b50757db 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -61,7 +61,6 @@ struct pms {
int depth;
int input;
s32 brightness, saturation, hue, contrast;
- unsigned long in_use;
struct mutex lock;
int i2c_count;
struct i2c_info i2cinfo[64];
@@ -931,25 +930,8 @@ static ssize_t pms_read(struct file *file, char __user *buf,
return len;
}
-static int pms_exclusive_open(struct file *file)
-{
- struct pms *dev = video_drvdata(file);
-
- return test_and_set_bit(0, &dev->in_use) ? -EBUSY : 0;
-}
-
-static int pms_exclusive_release(struct file *file)
-{
- struct pms *dev = video_drvdata(file);
-
- clear_bit(0, &dev->in_use);
- return 0;
-}
-
static const struct v4l2_file_operations pms_fops = {
.owner = THIS_MODULE,
- .open = pms_exclusive_open,
- .release = pms_exclusive_release,
.ioctl = video_ioctl2,
.read = pms_read,
};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 712b300f723f..301ef197d038 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -2028,7 +2028,7 @@ static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
memset(&fmt, 0, sizeof(fmt));
fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
- video, s_fmt, &fmt);
+ vbi, s_sliced_fmt, &fmt.fmt.sliced);
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index bf1e0fe9f4d2..fe4159d478b3 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -49,7 +49,7 @@ struct pvr2_v4l2_dev {
struct pvr2_v4l2_fh {
struct pvr2_channel channel;
- struct pvr2_v4l2_dev *dev_info;
+ struct pvr2_v4l2_dev *pdi;
enum v4l2_priority prio;
struct pvr2_ioread *rhp;
struct file *file;
@@ -162,7 +162,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_v4l2 *vp = fh->vhead;
- struct pvr2_v4l2_dev *dev_info = fh->dev_info;
+ struct pvr2_v4l2_dev *pdi = fh->pdi;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
long ret = -EINVAL;
@@ -564,14 +564,14 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_STREAMON:
{
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means
that we're not currently allowed to stream from
this node. */
ret = -EPERM;
break;
}
- ret = pvr2_hdw_set_stream_type(hdw,dev_info->config);
+ ret = pvr2_hdw_set_stream_type(hdw,pdi->config);
if (ret < 0) return ret;
ret = pvr2_hdw_set_streaming(hdw,!0);
break;
@@ -579,7 +579,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_STREAMOFF:
{
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means
that we're not currently allowed to stream from
this node. */
@@ -1032,7 +1032,7 @@ static int pvr2_v4l2_open(struct file *file)
}
init_waitqueue_head(&fhp->wait_data);
- fhp->dev_info = dip;
+ fhp->pdi = dip;
pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp);
pvr2_channel_init(&fhp->channel,vp->channel.mc_head);
@@ -1113,7 +1113,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
struct pvr2_hdw *hdw;
if (fh->rhp) return 0;
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means that we're
not currently allowed to stream from this node. */
return -EPERM;
@@ -1122,21 +1122,21 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
/* First read() attempt. Try to claim the stream and start
it... */
if ((ret = pvr2_channel_claim_stream(&fh->channel,
- fh->dev_info->stream)) != 0) {
+ fh->pdi->stream)) != 0) {
/* Someone else must already have it */
return ret;
}
- fh->rhp = pvr2_channel_create_mpeg_stream(fh->dev_info->stream);
+ fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream);
if (!fh->rhp) {
pvr2_channel_claim_stream(&fh->channel,NULL);
return -ENOMEM;
}
hdw = fh->channel.mc_head->hdw;
- sp = fh->dev_info->stream->stream;
+ sp = fh->pdi->stream->stream;
pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
- pvr2_hdw_set_stream_type(hdw,fh->dev_info->config);
+ pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
return pvr2_ioread_set_enabled(fh->rhp,!0);
}
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 340f954aba34..11980db22d31 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -39,7 +39,7 @@ config USB_PWC_DEBUG
config USB_PWC_INPUT_EVDEV
bool "USB Philips Cameras input events device support"
default y
- depends on USB_PWC=INPUT || INPUT=y
+ depends on USB_PWC && (USB_PWC=INPUT || INPUT=y)
---help---
This option makes USB Philips cameras register the snapshot button as
an input device to report button events.
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 04bf5c11308d..7fe70e718656 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -253,8 +253,8 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 9277194cd821..bbd9c11e2c5a 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -555,15 +555,15 @@ static int rj54n1_commit(struct i2c_client *client)
return ret;
}
-static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
- u32 *out_w, u32 *out_h);
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h);
static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
struct v4l2_rect *rect = &a->c;
- unsigned int dummy = 0, output_w, output_h,
+ int dummy = 0, output_w, output_h,
input_w = rect->width, input_h = rect->height;
int ret;
@@ -577,7 +577,7 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
- dev_dbg(&client->dev, "Scaling for %ux%u : %u = %ux%u\n",
+ dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n",
input_w, input_h, rj54n1->resize, output_w, output_h);
ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
@@ -638,8 +638,8 @@ static int rj54n1_g_fmt(struct v4l2_subdev *sd,
* the output one, updates the window sizes and returns an error or the resize
* coefficient on success. Note: we only use the "Fixed Scaling" on this camera.
*/
-static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
- u32 *out_w, u32 *out_h)
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h)
{
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
@@ -749,7 +749,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
* improve the image quality or stability for larger frames (see comment
* above), but I didn't check the framerate.
*/
- skip = min(resize / 1024, (unsigned)15);
+ skip = min(resize / 1024, 15U);
inc_sel = 1 << skip;
@@ -819,7 +819,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
*out_w = output_w;
*out_h = output_h;
- dev_dbg(&client->dev, "Scaled for %ux%u : %u = %ux%u, skip %u\n",
+ dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n",
*in_w, *in_h, resize, output_w, output_h, skip);
return resize;
@@ -1017,7 +1017,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
- unsigned int output_w, output_h, max_w, max_h,
+ int output_w, output_h, max_w, max_h,
input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
int ret;
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 3de914deb8ee..ac9c40cf5835 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -1,7 +1,7 @@
/*
* s2255drv.c - a driver for the Sensoray 2255 USB video capture device
*
- * Copyright (C) 2007-2008 by Sensoray Company Inc.
+ * Copyright (C) 2007-2010 by Sensoray Company Inc.
* Dean Anderson
*
* Some video buffer code based on vivi driver:
@@ -52,14 +52,19 @@
#include <linux/smp_lock.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
+#define S2255_MAJOR_VERSION 1
+#define S2255_MINOR_VERSION 20
+#define S2255_RELEASE 0
+#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
+ S2255_MINOR_VERSION, \
+ S2255_RELEASE)
#define FIRMWARE_FILE_NAME "f2255usb.bin"
-
-
/* default JPEG quality */
#define S2255_DEF_JPEG_QUAL 50
/* vendor request in */
@@ -76,14 +81,14 @@
#define S2255_LOAD_TIMEOUT (5000 + S2255_DSP_BOOTTIME)
#define S2255_DEF_BUFS 16
#define S2255_SETMODE_TIMEOUT 500
-#define MAX_CHANNELS 4
-#define S2255_MARKER_FRAME 0x2255DA4AL
-#define S2255_MARKER_RESPONSE 0x2255ACACL
-#define S2255_RESPONSE_SETMODE 0x01
-#define S2255_RESPONSE_FW 0x10
+#define S2255_VIDSTATUS_TIMEOUT 350
+#define S2255_MARKER_FRAME cpu_to_le32(0x2255DA4AL)
+#define S2255_MARKER_RESPONSE cpu_to_le32(0x2255ACACL)
+#define S2255_RESPONSE_SETMODE cpu_to_le32(0x01)
+#define S2255_RESPONSE_FW cpu_to_le32(0x10)
+#define S2255_RESPONSE_STATUS cpu_to_le32(0x20)
#define S2255_USB_XFER_SIZE (16 * 1024)
#define MAX_CHANNELS 4
-#define MAX_PIPE_BUFFERS 1
#define SYS_FRAMES 4
/* maximum size is PAL full size plus room for the marker header(s) */
#define SYS_FRAMES_MAXSIZE (720*288*2*2 + 4096)
@@ -118,9 +123,10 @@
#define COLOR_YUVPK 2 /* YUV packed */
#define COLOR_Y8 4 /* monochrome */
#define COLOR_JPG 5 /* JPEG */
-#define MASK_COLOR 0xff
-#define MASK_JPG_QUALITY 0xff00
+#define MASK_COLOR 0x000000ff
+#define MASK_JPG_QUALITY 0x0000ff00
+#define MASK_INPUT_TYPE 0x000f0000
/* frame decimation. Not implemented by V4L yet(experimental in V4L) */
#define FDEC_1 1 /* capture every frame. default */
#define FDEC_2 2 /* capture every 2nd frame */
@@ -139,12 +145,12 @@
#define DEF_HUE 0
/* usb config commands */
-#define IN_DATA_TOKEN 0x2255c0de
-#define CMD_2255 0xc2255000
-#define CMD_SET_MODE (CMD_2255 | 0x10)
-#define CMD_START (CMD_2255 | 0x20)
-#define CMD_STOP (CMD_2255 | 0x30)
-#define CMD_STATUS (CMD_2255 | 0x40)
+#define IN_DATA_TOKEN cpu_to_le32(0x2255c0de)
+#define CMD_2255 cpu_to_le32(0xc2255000)
+#define CMD_SET_MODE cpu_to_le32((CMD_2255 | 0x10))
+#define CMD_START cpu_to_le32((CMD_2255 | 0x20))
+#define CMD_STOP cpu_to_le32((CMD_2255 | 0x30))
+#define CMD_STATUS cpu_to_le32((CMD_2255 | 0x40))
struct s2255_mode {
u32 format; /* input video format (NTSC, PAL) */
@@ -194,7 +200,6 @@ struct s2255_dmaqueue {
#define S2255_FW_SUCCESS 2
#define S2255_FW_FAILED 3
#define S2255_FW_DISCONNECTING 4
-
#define S2255_FW_MARKER cpu_to_le32(0x22552f2f)
/* 2255 read states */
#define S2255_READ_IDLE 0
@@ -223,8 +228,10 @@ struct s2255_pipeinfo {
struct s2255_fmt; /*forward declaration */
struct s2255_dev {
+ struct video_device vdev[MAX_CHANNELS];
+ struct v4l2_device v4l2_dev;
+ atomic_t channels; /* number of channels registered */
int frames;
- int users[MAX_CHANNELS];
struct mutex lock;
struct mutex open_lock;
int resources[MAX_CHANNELS];
@@ -233,11 +240,10 @@ struct s2255_dev {
u8 read_endpoint;
struct s2255_dmaqueue vidq[MAX_CHANNELS];
- struct video_device *vdev[MAX_CHANNELS];
struct timer_list timer;
struct s2255_fw *fw_data;
- struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS];
- struct s2255_bufferi buffer[MAX_CHANNELS];
+ struct s2255_pipeinfo pipe;
+ struct s2255_bufferi buffer[MAX_CHANNELS];
struct s2255_mode mode[MAX_CHANNELS];
/* jpeg compression */
struct v4l2_jpegcompression jc[MAX_CHANNELS];
@@ -261,11 +267,21 @@ struct s2255_dev {
int chn_configured[MAX_CHANNELS];
wait_queue_head_t wait_setmode[MAX_CHANNELS];
int setmode_ready[MAX_CHANNELS];
+ /* video status items */
+ int vidstatus[MAX_CHANNELS];
+ wait_queue_head_t wait_vidstatus[MAX_CHANNELS];
+ int vidstatus_ready[MAX_CHANNELS];
int chn_ready;
- struct kref kref;
spinlock_t slock;
+ /* dsp firmware version (f2255usb.bin) */
+ int dsp_fw_ver;
+ u16 pid; /* product id */
};
-#define to_s2255_dev(d) container_of(d, struct s2255_dev, kref)
+
+static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct s2255_dev, v4l2_dev);
+}
struct s2255_fmt {
char *name;
@@ -296,17 +312,43 @@ struct s2255_fh {
/* current cypress EEPROM firmware version */
#define S2255_CUR_USB_FWVER ((3 << 8) | 6)
-#define S2255_MAJOR_VERSION 1
-#define S2255_MINOR_VERSION 14
-#define S2255_RELEASE 0
-#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
- S2255_MINOR_VERSION, \
- S2255_RELEASE)
-
-/* vendor ids */
-#define USB_S2255_VENDOR_ID 0x1943
-#define USB_S2255_PRODUCT_ID 0x2255
+/* current DSP FW version */
+#define S2255_CUR_DSP_FWVER 8
+/* Need DSP version 5+ for video status feature */
+#define S2255_MIN_DSP_STATUS 5
+#define S2255_MIN_DSP_COLORFILTER 8
#define S2255_NORMS (V4L2_STD_PAL | V4L2_STD_NTSC)
+
+/* private V4L2 controls */
+
+/*
+ * The following chart displays how COLORFILTER should be set
+ * =========================================================
+ * = fourcc = COLORFILTER =
+ * = ===============================
+ * = = 0 = 1 =
+ * =========================================================
+ * = V4L2_PIX_FMT_GREY(Y8) = monochrome from = monochrome=
+ * = = s-video or = composite =
+ * = = B/W camera = input =
+ * =========================================================
+ * = other = color, svideo = color, =
+ * = = = composite =
+ * =========================================================
+ *
+ * Notes:
+ * channels 0-3 on 2255 are composite
+ * channels 0-1 on 2257 are composite, 2-3 are s-video
+ * If COLORFILTER is 0 with a composite color camera connected,
+ * the output will appear monochrome but hatching
+ * will occur.
+ * COLORFILTER is different from "color killer" and "color effects"
+ * for reasons above.
+ */
+#define S2255_V4L2_YC_ON 1
+#define S2255_V4L2_YC_OFF 0
+#define V4L2_CID_PRIVATE_COLORFILTER (V4L2_CID_PRIVATE_BASE + 0)
+
/* frame prefix size (sent once every frame) */
#define PREFIX_SIZE 512
@@ -325,9 +367,8 @@ static void s2255_fillbuff(struct s2255_dev *dev, struct s2255_buffer *buf,
static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
struct s2255_mode *mode);
static int s2255_board_shutdown(struct s2255_dev *dev);
-static void s2255_exit_v4l(struct s2255_dev *dev);
static void s2255_fwload_start(struct s2255_dev *dev, int reset);
-static void s2255_destroy(struct kref *kref);
+static void s2255_destroy(struct s2255_dev *dev);
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
u16 index, u16 value, void *buf,
s32 buf_len, int bOut);
@@ -347,7 +388,6 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
static struct usb_driver s2255_driver;
-
/* Declare static vars that will be used as parameters */
static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
@@ -362,58 +402,16 @@ module_param(video_nr, int, 0644);
MODULE_PARM_DESC(video_nr, "start video minor(-1 default autodetect)");
/* USB device table */
+#define USB_SENSORAY_VID 0x1943
static struct usb_device_id s2255_table[] = {
- {USB_DEVICE(USB_S2255_VENDOR_ID, USB_S2255_PRODUCT_ID)},
+ {USB_DEVICE(USB_SENSORAY_VID, 0x2255)},
+ {USB_DEVICE(USB_SENSORAY_VID, 0x2257)}, /*same family as 2255*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, s2255_table);
-
#define BUFFER_TIMEOUT msecs_to_jiffies(400)
-/* supported controls */
-static struct v4l2_queryctrl s2255_qctrl[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = -127,
- .maximum = 128,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_CONTRAST,
- .flags = 0,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_SATURATION,
- .flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_HUE,
- .flags = 0,
- }
-};
-
-static int qctl_regs[ARRAY_SIZE(s2255_qctrl)];
-
/* image formats. */
static const struct s2255_fmt formats[] = {
{
@@ -505,7 +503,7 @@ static void s2255_reset_dsppower(struct s2255_dev *dev)
static void s2255_timer(unsigned long user_data)
{
struct s2255_fw *data = (struct s2255_fw *)user_data;
- dprintk(100, "s2255 timer\n");
+ dprintk(100, "%s\n", __func__);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
printk(KERN_ERR "s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -527,7 +525,7 @@ static void s2255_fwchunk_complete(struct urb *urb)
struct s2255_fw *data = urb->context;
struct usb_device *udev = urb->dev;
int len;
- dprintk(100, "udev %p urb %p", udev, urb);
+ dprintk(100, "%s: udev %p urb %p", __func__, udev, urb);
if (urb->status) {
dev_err(&udev->dev, "URB failed with status %d\n", urb->status);
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -573,8 +571,8 @@ static void s2255_fwchunk_complete(struct urb *urb)
data->fw_loaded += len;
} else {
atomic_set(&data->fw_state, S2255_FW_LOADED_DSPWAIT);
+ dprintk(100, "%s: firmware upload complete\n", __func__);
}
- dprintk(100, "2255 complete done\n");
return;
}
@@ -585,9 +583,7 @@ static int s2255_got_frame(struct s2255_dev *dev, int chn, int jpgsize)
struct s2255_buffer *buf;
unsigned long flags = 0;
int rc = 0;
- dprintk(2, "wakeup: %p channel: %d\n", &dma_q, chn);
spin_lock_irqsave(&dev->slock, flags);
-
if (list_empty(&dma_q->active)) {
dprintk(1, "No active queue to serve\n");
rc = -1;
@@ -595,23 +591,19 @@ static int s2255_got_frame(struct s2255_dev *dev, int chn, int jpgsize)
}
buf = list_entry(dma_q->active.next,
struct s2255_buffer, vb.queue);
-
list_del(&buf->vb.queue);
do_gettimeofday(&buf->vb.ts);
- dprintk(100, "[%p/%d] wakeup\n", buf, buf->vb.i);
s2255_fillbuff(dev, buf, dma_q->channel, jpgsize);
wake_up(&buf->vb.done);
- dprintk(2, "wakeup [buf/i] [%p/%d]\n", buf, buf->vb.i);
+ dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
-
static const struct s2255_fmt *format_by_fourcc(int fourcc)
{
unsigned int i;
-
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (-1 == formats[i].fourcc)
continue;
@@ -621,9 +613,6 @@ static const struct s2255_fmt *format_by_fourcc(int fourcc)
return NULL;
}
-
-
-
/* video buffer vmalloc implementation based partly on VIVI driver which is
* Copyright (c) 2006 by
* Mauro Carvalho Chehab <mchehab--a.t--infradead.org>
@@ -703,8 +692,8 @@ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
if (0 == *count)
*count = S2255_DEF_BUFS;
- while (*size * (*count) > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
@@ -727,10 +716,10 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
if (fh->fmt == NULL)
return -EINVAL;
- if ((fh->width < norm_minw(fh->dev->vdev[fh->channel])) ||
- (fh->width > norm_maxw(fh->dev->vdev[fh->channel])) ||
- (fh->height < norm_minh(fh->dev->vdev[fh->channel])) ||
- (fh->height > norm_maxh(fh->dev->vdev[fh->channel]))) {
+ if ((fh->width < norm_minw(&fh->dev->vdev[fh->channel])) ||
+ (fh->width > norm_maxw(&fh->dev->vdev[fh->channel])) ||
+ (fh->height < norm_minh(&fh->dev->vdev[fh->channel])) ||
+ (fh->height > norm_maxh(&fh->dev->vdev[fh->channel]))) {
dprintk(4, "invalid buffer prepare\n");
return -EINVAL;
}
@@ -747,7 +736,6 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
buf->vb.height = fh->height;
buf->vb.field = field;
-
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0)
@@ -767,9 +755,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
struct s2255_fh *fh = vq->priv_data;
struct s2255_dev *dev = fh->dev;
struct s2255_dmaqueue *vidq = &dev->vidq[fh->channel];
-
dprintk(1, "%s\n", __func__);
-
buf->vb.state = VIDEOBUF_QUEUED;
list_add_tail(&buf->vb.queue, &vidq->active);
}
@@ -828,6 +814,27 @@ static void res_free(struct s2255_dev *dev, struct s2255_fh *fh)
dprintk(1, "res: put\n");
}
+static int vidioc_querymenu(struct file *file, void *priv,
+ struct v4l2_querymenu *qmenu)
+{
+ static const char *colorfilter[] = {
+ "Off",
+ "On",
+ NULL
+ };
+ if (qmenu->id == V4L2_CID_PRIVATE_COLORFILTER) {
+ int i;
+ const char **menu_items = colorfilter;
+ for (i = 0; i < qmenu->index && menu_items[i]; i++)
+ ; /* do nothing (from v4l2-common.c) */
+ if (menu_items[i] == NULL || menu_items[i][0] == '\0')
+ return -EINVAL;
+ strlcpy(qmenu->name, menu_items[qmenu->index],
+ sizeof(qmenu->name));
+ return 0;
+ }
+ return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
+}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
@@ -883,7 +890,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
int is_ntsc;
is_ntsc =
- (dev->vdev[fh->channel]->current_norm & V4L2_STD_NTSC) ? 1 : 0;
+ (dev->vdev[fh->channel].current_norm & V4L2_STD_NTSC) ? 1 : 0;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -894,10 +901,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
if (field == V4L2_FIELD_ANY)
b_any_field = 1;
- dprintk(4, "try format %d \n", is_ntsc);
- /* supports 3 sizes. see s2255drv.h */
- dprintk(50, "width test %d, height %d\n",
- f->fmt.pix.width, f->fmt.pix.height);
+ dprintk(50, "%s NTSC: %d suggested width: %d, height: %d\n",
+ __func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height);
if (is_ntsc) {
/* NTSC */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_NTSC * 2) {
@@ -952,29 +957,24 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_PAL) {
- dprintk(50, "pal 704\n");
f->fmt.pix.width = LINE_SZ_4CIFS_PAL;
field = V4L2_FIELD_SEQ_TB;
} else if (f->fmt.pix.width >= LINE_SZ_2CIFS_PAL) {
- dprintk(50, "pal 352A\n");
f->fmt.pix.width = LINE_SZ_2CIFS_PAL;
field = V4L2_FIELD_TOP;
} else if (f->fmt.pix.width >= LINE_SZ_1CIFS_PAL) {
- dprintk(50, "pal 352B\n");
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
} else {
- dprintk(50, "pal 352C\n");
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
}
}
-
- dprintk(50, "width %d height %d field %d \n", f->fmt.pix.width,
- f->fmt.pix.height, f->fmt.pix.field);
f->fmt.pix.field = field;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ dprintk(50, "%s: set width %d height %d field %d\n", __func__,
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
@@ -1006,7 +1006,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
if (res_locked(fh->dev, fh)) {
- dprintk(1, "can't change format after started\n");
+ dprintk(1, "%s: channel busy\n", __func__);
ret = -EBUSY;
goto out_s_fmt;
}
@@ -1016,17 +1016,14 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->height = f->fmt.pix.height;
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
- norm = norm_minw(fh->dev->vdev[fh->channel]);
- if (fh->width > norm_minw(fh->dev->vdev[fh->channel])) {
- if (fh->height > norm_minh(fh->dev->vdev[fh->channel])) {
+ norm = norm_minw(&fh->dev->vdev[fh->channel]);
+ if (fh->width > norm_minw(&fh->dev->vdev[fh->channel])) {
+ if (fh->height > norm_minh(&fh->dev->vdev[fh->channel])) {
if (fh->dev->cap_parm[fh->channel].capturemode &
- V4L2_MODE_HIGHQUALITY) {
+ V4L2_MODE_HIGHQUALITY)
fh->mode.scale = SCALE_4CIFSI;
- dprintk(2, "scale 4CIFSI\n");
- } else {
+ else
fh->mode.scale = SCALE_4CIFS;
- dprintk(2, "scale 4CIFS\n");
- }
} else
fh->mode.scale = SCALE_2CIFS;
@@ -1037,19 +1034,23 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
/* color mode */
switch (fh->fmt->fourcc) {
case V4L2_PIX_FMT_GREY:
- fh->mode.color = COLOR_Y8;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_Y8;
break;
case V4L2_PIX_FMT_JPEG:
- fh->mode.color = COLOR_JPG |
- (fh->dev->jc[fh->channel].quality << 8);
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_JPG;
+ fh->mode.color |= (fh->dev->jc[fh->channel].quality << 8);
break;
case V4L2_PIX_FMT_YUV422P:
- fh->mode.color = COLOR_YUVPL;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_YUVPL;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
default:
- fh->mode.color = COLOR_YUVPK;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_YUVPK;
break;
}
ret = 0;
@@ -1178,19 +1179,13 @@ static u32 get_transfer_size(struct s2255_mode *mode)
return usbInSize;
}
-static void dump_verify_mode(struct s2255_dev *sdev, struct s2255_mode *mode)
+static void s2255_print_cfg(struct s2255_dev *sdev, struct s2255_mode *mode)
{
struct device *dev = &sdev->udev->dev;
dev_info(dev, "------------------------------------------------\n");
- dev_info(dev, "verify mode\n");
- dev_info(dev, "format: %d\n", mode->format);
- dev_info(dev, "scale: %d\n", mode->scale);
- dev_info(dev, "fdec: %d\n", mode->fdec);
- dev_info(dev, "color: %d\n", mode->color);
+ dev_info(dev, "format: %d\nscale %d\n", mode->format, mode->scale);
+ dev_info(dev, "fdec: %d\ncolor %d\n", mode->fdec, mode->color);
dev_info(dev, "bright: 0x%x\n", mode->bright);
- dev_info(dev, "restart: 0x%x\n", mode->restart);
- dev_info(dev, "usb_block: 0x%x\n", mode->usb_block);
- dev_info(dev, "single: 0x%x\n", mode->single);
dev_info(dev, "------------------------------------------------\n");
}
@@ -1206,44 +1201,38 @@ static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
struct s2255_mode *mode)
{
int res;
- u32 *buffer;
+ __le32 *buffer;
unsigned long chn_rev;
-
mutex_lock(&dev->lock);
chn_rev = G_chnmap[chn];
- dprintk(3, "mode scale [%ld] %p %d\n", chn, mode, mode->scale);
- dprintk(3, "mode scale [%ld] %p %d\n", chn, &dev->mode[chn],
- dev->mode[chn].scale);
- dprintk(2, "mode contrast %x\n", mode->contrast);
-
+ dprintk(3, "%s channel %lu\n", __func__, chn);
/* if JPEG, set the quality */
- if ((mode->color & MASK_COLOR) == COLOR_JPG)
- mode->color = (dev->jc[chn].quality << 8) | COLOR_JPG;
-
+ if ((mode->color & MASK_COLOR) == COLOR_JPG) {
+ mode->color &= ~MASK_COLOR;
+ mode->color |= COLOR_JPG;
+ mode->color &= ~MASK_JPG_QUALITY;
+ mode->color |= (dev->jc[chn].quality << 8);
+ }
/* save the mode */
dev->mode[chn] = *mode;
dev->req_image_size[chn] = get_transfer_size(mode);
- dprintk(1, "transfer size %ld\n", dev->req_image_size[chn]);
-
+ dprintk(1, "%s: reqsize %ld\n", __func__, dev->req_image_size[chn]);
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
mutex_unlock(&dev->lock);
return -ENOMEM;
}
-
/* set the mode */
buffer[0] = IN_DATA_TOKEN;
- buffer[1] = (u32) chn_rev;
+ buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_SET_MODE;
memcpy(&buffer[3], &dev->mode[chn], sizeof(struct s2255_mode));
dev->setmode_ready[chn] = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (debug)
- dump_verify_mode(dev, mode);
+ s2255_print_cfg(dev, mode);
kfree(buffer);
- dprintk(1, "set mode done chn %lu, %d\n", chn, res);
-
/* wait at least 3 frames before continuing */
if (mode->restart) {
wait_event_timeout(dev->wait_setmode[chn],
@@ -1254,10 +1243,46 @@ static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
res = -EFAULT;
}
}
-
/* clear the restart flag */
dev->mode[chn].restart = 0;
mutex_unlock(&dev->lock);
+ dprintk(1, "%s chn %lu, result: %d\n", __func__, chn, res);
+ return res;
+}
+
+static int s2255_cmd_status(struct s2255_dev *dev, unsigned long chn,
+ u32 *pstatus)
+{
+ int res;
+ __le32 *buffer;
+ u32 chn_rev;
+ mutex_lock(&dev->lock);
+ chn_rev = G_chnmap[chn];
+ dprintk(4, "%s chan %lu\n", __func__, chn);
+ buffer = kzalloc(512, GFP_KERNEL);
+ if (buffer == NULL) {
+ dev_err(&dev->udev->dev, "out of mem\n");
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+ /* form the get vid status command */
+ buffer[0] = IN_DATA_TOKEN;
+ buffer[1] = (__le32) cpu_to_le32(chn_rev);
+ buffer[2] = CMD_STATUS;
+ *pstatus = 0;
+ dev->vidstatus_ready[chn] = 0;
+ res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
+ kfree(buffer);
+ wait_event_timeout(dev->wait_vidstatus[chn],
+ (dev->vidstatus_ready[chn] != 0),
+ msecs_to_jiffies(S2255_VIDSTATUS_TIMEOUT));
+ if (dev->vidstatus_ready[chn] != 1) {
+ printk(KERN_DEBUG "s2255: no vidstatus response\n");
+ res = -EFAULT;
+ }
+ *pstatus = dev->vidstatus[chn];
+ dprintk(4, "%s, vid status %d\n", __func__, *pstatus);
+ mutex_unlock(&dev->lock);
return res;
}
@@ -1291,7 +1316,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
new_mode = &fh->mode;
old_mode = &fh->dev->mode[chn];
- if (new_mode->color != old_mode->color)
+ if ((new_mode->color & MASK_COLOR) != (old_mode->color & MASK_COLOR))
new_mode->restart = 1;
else if (new_mode->scale != old_mode->scale)
new_mode->restart = 1;
@@ -1302,7 +1327,6 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
new_mode->restart = 0;
*old_mode = *new_mode;
dev->cur_fmt[chn] = fh->fmt;
- dprintk(1, "%s[%d]\n", __func__, chn);
dev->last_frame[chn] = -1;
dev->bad_payload[chn] = 0;
dev->cur_frame[chn] = 0;
@@ -1325,7 +1349,6 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
-
dprintk(4, "%s\n, channel: %d", __func__, fh->channel);
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
printk(KERN_ERR "invalid fh type0\n");
@@ -1347,27 +1370,32 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
struct s2255_mode *mode;
struct videobuf_queue *q = &fh->vb_vidq;
int ret = 0;
-
mutex_lock(&q->vb_lock);
if (videobuf_queue_is_busy(q)) {
dprintk(1, "queue busy\n");
ret = -EBUSY;
goto out_s_std;
}
-
if (res_locked(fh->dev, fh)) {
dprintk(1, "can't change standard after started\n");
ret = -EBUSY;
goto out_s_std;
}
mode = &fh->mode;
-
if (*i & V4L2_STD_NTSC) {
- dprintk(4, "vidioc_s_std NTSC\n");
- mode->format = FORMAT_NTSC;
+ dprintk(4, "%s NTSC\n", __func__);
+ /* if changing format, reset frame decimation/intervals */
+ if (mode->format != FORMAT_NTSC) {
+ mode->format = FORMAT_NTSC;
+ mode->fdec = FDEC_1;
+ }
} else if (*i & V4L2_STD_PAL) {
- dprintk(4, "vidioc_s_std PAL\n");
+ dprintk(4, "%s PAL\n", __func__);
mode->format = FORMAT_PAL;
+ if (mode->format != FORMAT_PAL) {
+ mode->format = FORMAT_PAL;
+ mode->fdec = FDEC_1;
+ }
} else {
ret = -EINVAL;
}
@@ -1386,12 +1414,32 @@ out_s_std:
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ u32 status = 0;
if (inp->index != 0)
return -EINVAL;
-
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = S2255_NORMS;
- strlcpy(inp->name, "Camera", sizeof(inp->name));
+ inp->status = 0;
+ if (dev->dsp_fw_ver >= S2255_MIN_DSP_STATUS) {
+ int rc;
+ rc = s2255_cmd_status(dev, fh->channel, &status);
+ dprintk(4, "s2255_cmd_status rc: %d status %x\n", rc, status);
+ if (rc == 0)
+ inp->status = (status & 0x01) ? 0
+ : V4L2_IN_ST_NO_SIGNAL;
+ }
+ switch (dev->pid) {
+ case 0x2255:
+ default:
+ strlcpy(inp->name, "Composite", sizeof(inp->name));
+ break;
+ case 0x2257:
+ strlcpy(inp->name, (fh->channel < 2) ? "Composite" : "S-Video",
+ sizeof(inp->name));
+ break;
+ }
return 0;
}
@@ -1411,74 +1459,113 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- if (qc->id && qc->id == s2255_qctrl[i].id) {
- memcpy(qc, &(s2255_qctrl[i]), sizeof(*qc));
- return 0;
- }
-
- dprintk(4, "query_ctrl -EINVAL %d\n", qc->id);
- return -EINVAL;
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ v4l2_ctrl_query_fill(qc, -127, 127, 1, DEF_BRIGHT);
+ break;
+ case V4L2_CID_CONTRAST:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_CONTRAST);
+ break;
+ case V4L2_CID_SATURATION:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_SATURATION);
+ break;
+ case V4L2_CID_HUE:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_HUE);
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ strlcpy(qc->name, "Color Filter", sizeof(qc->name));
+ qc->type = V4L2_CTRL_TYPE_MENU;
+ qc->minimum = 0;
+ qc->maximum = 1;
+ qc->step = 1;
+ qc->default_value = 1;
+ qc->flags = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dprintk(4, "%s, id %d\n", __func__, qc->id);
+ return 0;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- if (ctrl->id == s2255_qctrl[i].id) {
- ctrl->value = qctl_regs[i];
- return 0;
- }
- dprintk(4, "g_ctrl -EINVAL\n");
-
- return -EINVAL;
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = fh->mode.bright;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = fh->mode.contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = fh->mode.saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = fh->mode.hue;
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ ctrl->value = !((fh->mode.color & MASK_INPUT_TYPE) >> 16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ dprintk(4, "%s, id %d val %d\n", __func__, ctrl->id, ctrl->value);
+ return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- int i;
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_mode *mode;
mode = &fh->mode;
- dprintk(4, "vidioc_s_ctrl\n");
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++) {
- if (ctrl->id == s2255_qctrl[i].id) {
- if (ctrl->value < s2255_qctrl[i].minimum ||
- ctrl->value > s2255_qctrl[i].maximum)
- return -ERANGE;
-
- qctl_regs[i] = ctrl->value;
- /* update the mode to the corresponding value */
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- mode->bright = ctrl->value;
- break;
- case V4L2_CID_CONTRAST:
- mode->contrast = ctrl->value;
- break;
- case V4L2_CID_HUE:
- mode->hue = ctrl->value;
- break;
- case V4L2_CID_SATURATION:
- mode->saturation = ctrl->value;
- break;
- }
- mode->restart = 0;
- /* set mode here. Note: stream does not need restarted.
- some V4L programs restart stream unnecessarily
- after a s_crtl.
- */
- s2255_set_mode(dev, fh->channel, mode);
- return 0;
- }
+ dprintk(4, "%s\n", __func__);
+ /* update the mode to the corresponding value */
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ mode->bright = ctrl->value;
+ break;
+ case V4L2_CID_CONTRAST:
+ mode->contrast = ctrl->value;
+ break;
+ case V4L2_CID_HUE:
+ mode->hue = ctrl->value;
+ break;
+ case V4L2_CID_SATURATION:
+ mode->saturation = ctrl->value;
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ mode->color &= ~MASK_INPUT_TYPE;
+ mode->color |= ((ctrl->value ? 0 : 1) << 16);
+ break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+ mode->restart = 0;
+ /* set mode here. Note: stream does not need restarted.
+ some V4L programs restart stream unnecessarily
+ after a s_crtl.
+ */
+ s2255_set_mode(dev, fh->channel, mode);
+ return 0;
}
static int vidioc_g_jpegcomp(struct file *file, void *priv,
@@ -1487,7 +1574,7 @@ static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
*jc = dev->jc[fh->channel];
- dprintk(2, "getting jpegcompression, quality %d\n", jc->quality);
+ dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
@@ -1499,7 +1586,7 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv,
if (jc->quality < 0 || jc->quality > 100)
return -EINVAL;
dev->jc[fh->channel].quality = jc->quality;
- dprintk(2, "setting jpeg quality %d\n", jc->quality);
+ dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
@@ -1508,10 +1595,34 @@ static int vidioc_g_parm(struct file *file, void *priv,
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
+ __u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ memset(sp, 0, sizeof(struct v4l2_streamparm));
+ sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
sp->parm.capture.capturemode = dev->cap_parm[fh->channel].capturemode;
- dprintk(2, "getting parm %d\n", sp->parm.capture.capturemode);
+ def_num = (fh->mode.format == FORMAT_NTSC) ? 1001 : 1000;
+ def_dem = (fh->mode.format == FORMAT_NTSC) ? 30000 : 25000;
+ sp->parm.capture.timeperframe.denominator = def_dem;
+ switch (fh->mode.fdec) {
+ default:
+ case FDEC_1:
+ sp->parm.capture.timeperframe.numerator = def_num;
+ break;
+ case FDEC_2:
+ sp->parm.capture.timeperframe.numerator = def_num * 2;
+ break;
+ case FDEC_3:
+ sp->parm.capture.timeperframe.numerator = def_num * 3;
+ break;
+ case FDEC_5:
+ sp->parm.capture.timeperframe.numerator = def_num * 5;
+ break;
+ }
+ dprintk(4, "%s capture mode, %d timeperframe %d/%d\n", __func__,
+ sp->parm.capture.capturemode,
+ sp->parm.capture.timeperframe.numerator,
+ sp->parm.capture.timeperframe.denominator);
return 0;
}
@@ -1520,15 +1631,79 @@ static int vidioc_s_parm(struct file *file, void *priv,
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
-
+ int fdec = FDEC_1;
+ __u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ /* high quality capture mode requires a stream restart */
+ if (dev->cap_parm[fh->channel].capturemode
+ != sp->parm.capture.capturemode && res_locked(fh->dev, fh))
+ return -EBUSY;
+ def_num = (fh->mode.format == FORMAT_NTSC) ? 1001 : 1000;
+ def_dem = (fh->mode.format == FORMAT_NTSC) ? 30000 : 25000;
+ if (def_dem != sp->parm.capture.timeperframe.denominator)
+ sp->parm.capture.timeperframe.numerator = def_num;
+ else if (sp->parm.capture.timeperframe.numerator <= def_num)
+ sp->parm.capture.timeperframe.numerator = def_num;
+ else if (sp->parm.capture.timeperframe.numerator <= (def_num * 2)) {
+ sp->parm.capture.timeperframe.numerator = def_num * 2;
+ fdec = FDEC_2;
+ } else if (sp->parm.capture.timeperframe.numerator <= (def_num * 3)) {
+ sp->parm.capture.timeperframe.numerator = def_num * 3;
+ fdec = FDEC_3;
+ } else {
+ sp->parm.capture.timeperframe.numerator = def_num * 5;
+ fdec = FDEC_5;
+ }
+ fh->mode.fdec = fdec;
+ sp->parm.capture.timeperframe.denominator = def_dem;
+ s2255_set_mode(dev, fh->channel, &fh->mode);
+ dprintk(4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n",
+ __func__,
+ sp->parm.capture.capturemode,
+ sp->parm.capture.timeperframe.numerator,
+ sp->parm.capture.timeperframe.denominator, fdec);
+ return 0;
+}
- dev->cap_parm[fh->channel].capturemode = sp->parm.capture.capturemode;
- dprintk(2, "setting param capture mode %d\n",
- sp->parm.capture.capturemode);
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fe)
+{
+ int is_ntsc = 0;
+#define NUM_FRAME_ENUMS 4
+ int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
+ if (fe->index < 0 || fe->index >= NUM_FRAME_ENUMS)
+ return -EINVAL;
+ switch (fe->width) {
+ case 640:
+ if (fe->height != 240 && fe->height != 480)
+ return -EINVAL;
+ is_ntsc = 1;
+ break;
+ case 320:
+ if (fe->height != 240)
+ return -EINVAL;
+ is_ntsc = 1;
+ break;
+ case 704:
+ if (fe->height != 288 && fe->height != 576)
+ return -EINVAL;
+ break;
+ case 352:
+ if (fe->height != 288)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fe->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fe->discrete.denominator = is_ntsc ? 30000 : 25000;
+ fe->discrete.numerator = (is_ntsc ? 1001 : 1000) * frm_dec[fe->index];
+ dprintk(4, "%s discrete %d/%d\n", __func__, fe->discrete.numerator,
+ fe->discrete.denominator);
return 0;
}
+
static int s2255_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
@@ -1538,31 +1713,25 @@ static int s2255_open(struct file *file)
int i = 0;
int cur_channel = -1;
int state;
-
dprintk(1, "s2255: open called (dev=%s)\n",
video_device_node_name(vdev));
- lock_kernel();
-
- for (i = 0; i < MAX_CHANNELS; i++) {
- if (dev->vdev[i] == vdev) {
+ for (i = 0; i < MAX_CHANNELS; i++)
+ if (&dev->vdev[i] == vdev) {
cur_channel = i;
break;
}
- }
-
- if (atomic_read(&dev->fw_data->fw_state) == S2255_FW_DISCONNECTING) {
- unlock_kernel();
- printk(KERN_INFO "disconnecting\n");
- return -ENODEV;
- }
- kref_get(&dev->kref);
+ /*
+ * open lock necessary to prevent multiple instances
+ * of v4l-conf (or other programs) from simultaneously
+ * reloading firmware.
+ */
mutex_lock(&dev->open_lock);
-
- dev->users[cur_channel]++;
- dprintk(4, "s2255: open_handles %d\n", dev->users[cur_channel]);
-
- switch (atomic_read(&dev->fw_data->fw_state)) {
+ state = atomic_read(&dev->fw_data->fw_state);
+ switch (state) {
+ case S2255_FW_DISCONNECTING:
+ mutex_unlock(&dev->open_lock);
+ return -ENODEV;
case S2255_FW_FAILED:
s2255_dev_err(&dev->udev->dev,
"firmware load failed. retrying.\n");
@@ -1573,6 +1742,8 @@ static int s2255_open(struct file *file)
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ /* state may have changed, re-read */
+ state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_NOTLOADED:
case S2255_FW_LOADED_DSPWAIT:
@@ -1584,53 +1755,50 @@ static int s2255_open(struct file *file)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
- msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ /* state may have changed, re-read */
+ state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_SUCCESS:
default:
break;
}
- state = atomic_read(&dev->fw_data->fw_state);
- if (state != S2255_FW_SUCCESS) {
- int rc;
- switch (state) {
- case S2255_FW_FAILED:
- printk(KERN_INFO "2255 FW load failed. %d\n", state);
- rc = -ENODEV;
- break;
- case S2255_FW_DISCONNECTING:
- printk(KERN_INFO "%s: disconnecting\n", __func__);
- rc = -ENODEV;
- break;
- case S2255_FW_LOADED_DSPWAIT:
- case S2255_FW_NOTLOADED:
- printk(KERN_INFO "%s: firmware not loaded yet"
- "please try again later\n",
- __func__);
- rc = -EAGAIN;
- break;
- default:
- printk(KERN_INFO "%s: unknown state\n", __func__);
- rc = -EFAULT;
- break;
- }
- dev->users[cur_channel]--;
+ /* state may have changed in above switch statement */
+ switch (state) {
+ case S2255_FW_SUCCESS:
+ break;
+ case S2255_FW_FAILED:
+ printk(KERN_INFO "2255 firmware load failed.\n");
+ mutex_unlock(&dev->open_lock);
+ return -ENODEV;
+ case S2255_FW_DISCONNECTING:
+ printk(KERN_INFO "%s: disconnecting\n", __func__);
+ mutex_unlock(&dev->open_lock);
+ return -ENODEV;
+ case S2255_FW_LOADED_DSPWAIT:
+ case S2255_FW_NOTLOADED:
+ printk(KERN_INFO "%s: firmware not loaded yet"
+ "please try again later\n",
+ __func__);
+ /*
+ * Timeout on firmware load means device unusable.
+ * Set firmware failure state.
+ * On next s2255_open the firmware will be reloaded.
+ */
+ atomic_set(&dev->fw_data->fw_state,
+ S2255_FW_FAILED);
mutex_unlock(&dev->open_lock);
- kref_put(&dev->kref, s2255_destroy);
- unlock_kernel();
- return rc;
+ return -EAGAIN;
+ default:
+ printk(KERN_INFO "%s: unknown state\n", __func__);
+ mutex_unlock(&dev->open_lock);
+ return -EFAULT;
}
-
+ mutex_unlock(&dev->open_lock);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- dev->users[cur_channel]--;
- mutex_unlock(&dev->open_lock);
- kref_put(&dev->kref, s2255_destroy);
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
-
file->private_data = fh;
fh->dev = dev;
fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1640,35 +1808,23 @@ static int s2255_open(struct file *file)
fh->width = LINE_SZ_4CIFS_NTSC;
fh->height = NUM_LINES_4CIFS_NTSC * 2;
fh->channel = cur_channel;
-
/* configure channel to default state */
if (!dev->chn_configured[cur_channel]) {
s2255_set_mode(dev, cur_channel, &fh->mode);
dev->chn_configured[cur_channel] = 1;
}
-
-
- /* Put all controls at a sane state */
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- qctl_regs[i] = s2255_qctrl[i].default_value;
-
- dprintk(1, "s2255drv: open dev=%s type=%s users=%d\n",
- video_device_node_name(vdev), v4l2_type_names[type],
- dev->users[cur_channel]);
- dprintk(2, "s2255drv: open: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n",
+ dprintk(1, "%s: dev=%s type=%s\n", __func__,
+ video_device_node_name(vdev), v4l2_type_names[type]);
+ dprintk(2, "%s: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n", __func__,
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&dev->vidq[cur_channel]);
- dprintk(4, "s2255drv: open: list_empty active=%d\n",
+ dprintk(4, "%s: list_empty active=%d\n", __func__,
list_empty(&dev->vidq[cur_channel].active));
-
videobuf_queue_vmalloc_init(&fh->vb_vidq, &s2255_video_qops,
NULL, &dev->slock,
fh->type,
V4L2_FIELD_INTERLACED,
sizeof(struct s2255_buffer), fh);
-
- mutex_unlock(&dev->open_lock);
- unlock_kernel();
return 0;
}
@@ -1679,39 +1835,19 @@ static unsigned int s2255_poll(struct file *file,
struct s2255_fh *fh = file->private_data;
int rc;
dprintk(100, "%s\n", __func__);
-
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
-
rc = videobuf_poll_stream(file, &fh->vb_vidq, wait);
return rc;
}
-static void s2255_destroy(struct kref *kref)
+static void s2255_destroy(struct s2255_dev *dev)
{
- struct s2255_dev *dev = to_s2255_dev(kref);
- int i;
- if (!dev) {
- printk(KERN_ERR "s2255drv: kref problem\n");
- return;
- }
- atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
- wake_up(&dev->fw_data->wait_fw);
- for (i = 0; i < MAX_CHANNELS; i++) {
- dev->setmode_ready[i] = 1;
- wake_up(&dev->wait_setmode[i]);
- }
- mutex_lock(&dev->open_lock);
- /* reset the DSP so firmware can be reload next time */
- s2255_reset_dsppower(dev);
- s2255_exit_v4l(dev);
/* board shutdown stops the read pipe if it is running */
s2255_board_shutdown(dev);
/* make sure firmware still not trying to load */
del_timer(&dev->timer); /* only started in .probe and .open */
-
if (dev->fw_data->fw_urb) {
- dprintk(2, "kill fw_urb\n");
usb_kill_urb(dev->fw_data->fw_urb);
usb_free_urb(dev->fw_data->fw_urb);
dev->fw_data->fw_urb = NULL;
@@ -1720,24 +1856,22 @@ static void s2255_destroy(struct kref *kref)
release_firmware(dev->fw_data->fw);
kfree(dev->fw_data->pfw_data);
kfree(dev->fw_data);
+ /* reset the DSP so firmware can be reloaded next time */
+ s2255_reset_dsppower(dev);
+ mutex_destroy(&dev->open_lock);
+ mutex_destroy(&dev->lock);
usb_put_dev(dev->udev);
dprintk(1, "%s", __func__);
-
- mutex_unlock(&dev->open_lock);
kfree(dev);
}
-static int s2255_close(struct file *file)
+static int s2255_release(struct file *file)
{
struct s2255_fh *fh = file->private_data;
struct s2255_dev *dev = fh->dev;
struct video_device *vdev = video_devdata(file);
-
if (!dev)
return -ENODEV;
-
- mutex_lock(&dev->open_lock);
-
/* turn off stream */
if (res_check(fh)) {
if (dev->b_acquire[fh->channel])
@@ -1745,15 +1879,8 @@ static int s2255_close(struct file *file)
videobuf_streamoff(&fh->vb_vidq);
res_free(dev, fh);
}
-
videobuf_mmap_free(&fh->vb_vidq);
- dev->users[fh->channel]--;
-
- mutex_unlock(&dev->open_lock);
-
- kref_put(&dev->kref, s2255_destroy);
- dprintk(1, "s2255: close called (dev=%s, users=%d)\n",
- video_device_node_name(vdev), dev->users[fh->channel]);
+ dprintk(1, "%s (dev=%s)\n", __func__, video_device_node_name(vdev));
kfree(fh);
return 0;
}
@@ -1765,27 +1892,25 @@ static int s2255_mmap_v4l(struct file *file, struct vm_area_struct *vma)
if (!fh)
return -ENODEV;
- dprintk(4, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
-
+ dprintk(4, "%s, vma=0x%08lx\n", __func__, (unsigned long)vma);
ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
-
- dprintk(4, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ dprintk(4, "%s vma start=0x%08lx, size=%ld, ret=%d\n", __func__,
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end - (unsigned long)vma->vm_start, ret);
-
return ret;
}
static const struct v4l2_file_operations s2255_fops_v4l = {
.owner = THIS_MODULE,
.open = s2255_open,
- .release = s2255_close,
+ .release = s2255_release,
.poll = s2255_poll,
.ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = s2255_mmap_v4l,
};
static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
+ .vidioc_querymenu = vidioc_querymenu,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
@@ -1811,13 +1936,23 @@ static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
.vidioc_g_jpegcomp = vidioc_g_jpegcomp,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_g_parm = vidioc_g_parm,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
};
+static void s2255_video_device_release(struct video_device *vdev)
+{
+ struct s2255_dev *dev = video_get_drvdata(vdev);
+ dprintk(4, "%s, chnls: %d \n", __func__, atomic_read(&dev->channels));
+ if (atomic_dec_and_test(&dev->channels))
+ s2255_destroy(dev);
+ return;
+}
+
static struct video_device template = {
.name = "s2255v",
.fops = &s2255_fops_v4l,
.ioctl_ops = &s2255_ioctl_ops,
- .release = video_device_release,
+ .release = s2255_video_device_release,
.tvnorms = S2255_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
@@ -1827,7 +1962,9 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
int ret;
int i;
int cur_nr = video_nr;
-
+ ret = v4l2_device_register(&dev->interface->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
/* initialize all video 4 linux */
/* register 4 video devices */
for (i = 0; i < MAX_CHANNELS; i++) {
@@ -1835,45 +1972,39 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
dev->vidq[i].dev = dev;
dev->vidq[i].channel = i;
/* register 4 video devices */
- dev->vdev[i] = video_device_alloc();
- memcpy(dev->vdev[i], &template, sizeof(struct video_device));
- dev->vdev[i]->parent = &dev->interface->dev;
- video_set_drvdata(dev->vdev[i], dev);
+ memcpy(&dev->vdev[i], &template, sizeof(struct video_device));
+ dev->vdev[i].v4l2_dev = &dev->v4l2_dev;
+ video_set_drvdata(&dev->vdev[i], dev);
if (video_nr == -1)
- ret = video_register_device(dev->vdev[i],
+ ret = video_register_device(&dev->vdev[i],
VFL_TYPE_GRABBER,
video_nr);
else
- ret = video_register_device(dev->vdev[i],
+ ret = video_register_device(&dev->vdev[i],
VFL_TYPE_GRABBER,
cur_nr + i);
- video_set_drvdata(dev->vdev[i], dev);
-
- if (ret != 0) {
+ if (ret) {
dev_err(&dev->udev->dev,
"failed to register video device!\n");
- return ret;
+ break;
}
+ atomic_inc(&dev->channels);
+ v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(&dev->vdev[i]));
+
}
+
printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %d.%d\n",
S2255_MAJOR_VERSION,
S2255_MINOR_VERSION);
- return ret;
-}
-
-static void s2255_exit_v4l(struct s2255_dev *dev)
-{
-
- int i;
- for (i = 0; i < MAX_CHANNELS; i++) {
- if (video_is_registered(dev->vdev[i])) {
- video_unregister_device(dev->vdev[i]);
- printk(KERN_INFO "s2255 unregistered\n");
- } else {
- video_device_release(dev->vdev[i]);
- printk(KERN_INFO "s2255 released\n");
- }
+ /* if no channels registered, return error and probe will fail*/
+ if (atomic_read(&dev->channels) == 0) {
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
}
+ if (atomic_read(&dev->channels) != MAX_CHANNELS)
+ printk(KERN_WARNING "s2255: Not all channels available.\n");
+ return 0;
}
/* this function moves the usb stream read pipe data
@@ -1907,14 +2038,14 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
if (frm->ulState == S2255_READ_IDLE) {
int jj;
unsigned int cc;
- s32 *pdword;
+ __le32 *pdword; /*data from dsp is little endian */
int payload;
/* search for marker codes */
pdata = (unsigned char *)pipe_info->transfer_buffer;
+ pdword = (__le32 *)pdata;
for (jj = 0; jj < (pipe_info->cur_transfer_size - 12); jj++) {
- switch (*(s32 *) pdata) {
+ switch (*pdword) {
case S2255_MARKER_FRAME:
- pdword = (s32 *)pdata;
dprintk(4, "found frame marker at offset:"
" %d [%x %x]\n", jj, pdata[0],
pdata[1]);
@@ -1938,7 +2069,6 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
dev->jpg_size[dev->cc] = pdword[4];
break;
case S2255_MARKER_RESPONSE:
- pdword = (s32 *)pdata;
pdata += DEF_USB_BLOCK;
jj += DEF_USB_BLOCK;
if (pdword[1] >= MAX_CHANNELS)
@@ -1955,7 +2085,6 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
dprintk(5, "setmode ready %d\n", cc);
break;
case S2255_RESPONSE_FW:
-
dev->chn_ready |= (1 << cc);
if ((dev->chn_ready & 0x0f) != 0x0f)
break;
@@ -1965,6 +2094,13 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
S2255_FW_SUCCESS);
wake_up(&dev->fw_data->wait_fw);
break;
+ case S2255_RESPONSE_STATUS:
+ dev->vidstatus[cc] = pdword[3];
+ dev->vidstatus_ready[cc] = 1;
+ wake_up(&dev->wait_vidstatus[cc]);
+ dprintk(5, "got vidstatus %x chan %d\n",
+ pdword[3], cc);
+ break;
default:
printk(KERN_INFO "s2255 unknown resp\n");
}
@@ -2165,28 +2301,22 @@ static int s2255_release_sys_buffers(struct s2255_dev *dev,
static int s2255_board_init(struct s2255_dev *dev)
{
- int j;
struct s2255_mode mode_def = DEF_MODEI_NTSC_CONT;
int fw_ver;
+ int j;
+ struct s2255_pipeinfo *pipe = &dev->pipe;
dprintk(4, "board init: %p", dev);
-
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe = &dev->pipes[j];
-
- memset(pipe, 0, sizeof(*pipe));
- pipe->dev = dev;
- pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
- pipe->max_transfer_size = S2255_USB_XFER_SIZE;
-
- pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
- GFP_KERNEL);
- if (pipe->transfer_buffer == NULL) {
- dprintk(1, "out of memory!\n");
- return -ENOMEM;
- }
-
+ memset(pipe, 0, sizeof(*pipe));
+ pipe->dev = dev;
+ pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
+ pipe->max_transfer_size = S2255_USB_XFER_SIZE;
+
+ pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
+ GFP_KERNEL);
+ if (pipe->transfer_buffer == NULL) {
+ dprintk(1, "out of memory!\n");
+ return -ENOMEM;
}
-
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
@@ -2203,6 +2333,8 @@ static int s2255_board_init(struct s2255_dev *dev)
for (j = 0; j < MAX_CHANNELS; j++) {
dev->b_acquire[j] = 0;
dev->mode[j] = mode_def;
+ if (dev->pid == 0x2257 && j > 1)
+ dev->mode[j].color |= (1 << 16);
dev->jc[j].quality = S2255_DEF_JPEG_QUAL;
dev->cur_fmt[j] = &formats[0];
dev->mode[j].restart = 1;
@@ -2213,16 +2345,14 @@ static int s2255_board_init(struct s2255_dev *dev)
}
/* start read pipe */
s2255_start_readpipe(dev);
-
- dprintk(1, "S2255: board initialized\n");
+ dprintk(1, "%s: success\n", __func__);
return 0;
}
static int s2255_board_shutdown(struct s2255_dev *dev)
{
u32 i;
-
- dprintk(1, "S2255: board shutdown: %p", dev);
+ dprintk(1, "%s: dev: %p", __func__, dev);
for (i = 0; i < MAX_CHANNELS; i++) {
if (dev->b_acquire[i])
@@ -2233,12 +2363,8 @@ static int s2255_board_shutdown(struct s2255_dev *dev)
for (i = 0; i < MAX_CHANNELS; i++)
s2255_release_sys_buffers(dev, i);
-
- /* release transfer buffers */
- for (i = 0; i < MAX_PIPE_BUFFERS; i++) {
- struct s2255_pipeinfo *pipe = &dev->pipes[i];
- kfree(pipe->transfer_buffer);
- }
+ /* release transfer buffer */
+ kfree(dev->pipe.transfer_buffer);
return 0;
}
@@ -2248,9 +2374,8 @@ static void read_pipe_completion(struct urb *purb)
struct s2255_dev *dev;
int status;
int pipe;
-
pipe_info = purb->context;
- dprintk(100, "read pipe completion %p, status %d\n", purb,
+ dprintk(100, "%s: urb:%p, status %d\n", __func__, purb,
purb->status);
if (pipe_info == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
@@ -2265,13 +2390,13 @@ static void read_pipe_completion(struct urb *purb)
status = purb->status;
/* if shutting down, do not resubmit, exit immediately */
if (status == -ESHUTDOWN) {
- dprintk(2, "read_pipe_completion: err shutdown\n");
+ dprintk(2, "%s: err shutdown\n", __func__);
pipe_info->err_count++;
return;
}
if (pipe_info->state == 0) {
- dprintk(2, "exiting USB pipe");
+ dprintk(2, "%s: exiting USB pipe", __func__);
return;
}
@@ -2279,7 +2404,7 @@ static void read_pipe_completion(struct urb *purb)
s2255_read_video_callback(dev, pipe_info);
else {
pipe_info->err_count++;
- dprintk(1, "s2255drv: failed URB %d\n", status);
+ dprintk(1, "%s: failed URB %d\n", __func__, status);
}
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
@@ -2295,7 +2420,7 @@ static void read_pipe_completion(struct urb *purb)
dev_err(&dev->udev->dev, "error submitting urb\n");
}
} else {
- dprintk(2, "read pipe complete state 0\n");
+ dprintk(2, "%s :complete state 0\n", __func__);
}
return;
}
@@ -2304,35 +2429,28 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
{
int pipe;
int retval;
- int i;
- struct s2255_pipeinfo *pipe_info = dev->pipes;
+ struct s2255_pipeinfo *pipe_info = &dev->pipe;
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
- dprintk(2, "start pipe IN %d\n", dev->read_endpoint);
-
- for (i = 0; i < MAX_PIPE_BUFFERS; i++) {
- pipe_info->state = 1;
- pipe_info->err_count = 0;
- pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pipe_info->stream_urb) {
- dev_err(&dev->udev->dev,
- "ReadStream: Unable to alloc URB\n");
- return -ENOMEM;
- }
- /* transfer buffer allocated in board_init */
- usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
- pipe,
- pipe_info->transfer_buffer,
- pipe_info->cur_transfer_size,
- read_pipe_completion, pipe_info);
-
- dprintk(4, "submitting URB %p\n", pipe_info->stream_urb);
- retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
- if (retval) {
- printk(KERN_ERR "s2255: start read pipe failed\n");
- return retval;
- }
+ dprintk(2, "%s: IN %d\n", __func__, dev->read_endpoint);
+ pipe_info->state = 1;
+ pipe_info->err_count = 0;
+ pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pipe_info->stream_urb) {
+ dev_err(&dev->udev->dev,
+ "ReadStream: Unable to alloc URB\n");
+ return -ENOMEM;
+ }
+ /* transfer buffer allocated in board_init */
+ usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
+ pipe,
+ pipe_info->transfer_buffer,
+ pipe_info->cur_transfer_size,
+ read_pipe_completion, pipe_info);
+ retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
+ if (retval) {
+ printk(KERN_ERR "s2255: start read pipe failed\n");
+ return retval;
}
-
return 0;
}
@@ -2347,10 +2465,7 @@ static int s2255_start_acquire(struct s2255_dev *dev, unsigned long chn)
dprintk(2, "start acquire failed, bad channel %lu\n", chn);
return -1;
}
-
chn_rev = G_chnmap[chn];
- dprintk(1, "S2255: start acquire %lu \n", chn);
-
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
@@ -2366,9 +2481,9 @@ static int s2255_start_acquire(struct s2255_dev *dev, unsigned long chn)
}
/* send the start command */
- *(u32 *) buffer = IN_DATA_TOKEN;
- *((u32 *) buffer + 1) = (u32) chn_rev;
- *((u32 *) buffer + 2) = (u32) CMD_START;
+ *(__le32 *) buffer = IN_DATA_TOKEN;
+ *((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
+ *((__le32 *) buffer + 2) = CMD_START;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_START error\n");
@@ -2383,65 +2498,44 @@ static int s2255_stop_acquire(struct s2255_dev *dev, unsigned long chn)
unsigned char *buffer;
int res;
unsigned long chn_rev;
-
if (chn >= MAX_CHANNELS) {
dprintk(2, "stop acquire failed, bad channel %lu\n", chn);
return -1;
}
chn_rev = G_chnmap[chn];
-
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
-
/* send the stop command */
- dprintk(4, "stop acquire %lu\n", chn);
- *(u32 *) buffer = IN_DATA_TOKEN;
- *((u32 *) buffer + 1) = (u32) chn_rev;
- *((u32 *) buffer + 2) = CMD_STOP;
+ *(__le32 *) buffer = IN_DATA_TOKEN;
+ *((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
+ *((__le32 *) buffer + 2) = CMD_STOP;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
-
if (res != 0)
dev_err(&dev->udev->dev, "CMD_STOP error\n");
-
- dprintk(4, "stop acquire: releasing states \n");
-
kfree(buffer);
dev->b_acquire[chn] = 0;
-
+ dprintk(4, "%s: chn %lu, res %d\n", __func__, chn, res);
return res;
}
static void s2255_stop_readpipe(struct s2255_dev *dev)
{
- int j;
-
+ struct s2255_pipeinfo *pipe = &dev->pipe;
if (dev == NULL) {
s2255_dev_err(&dev->udev->dev, "invalid device\n");
return;
}
- dprintk(4, "stop read pipe\n");
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe_info = &dev->pipes[j];
- if (pipe_info) {
- if (pipe_info->state == 0)
- continue;
- pipe_info->state = 0;
- }
- }
-
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe_info = &dev->pipes[j];
- if (pipe_info->stream_urb) {
- /* cancel urb */
- usb_kill_urb(pipe_info->stream_urb);
- usb_free_urb(pipe_info->stream_urb);
- pipe_info->stream_urb = NULL;
- }
+ pipe->state = 0;
+ if (pipe->stream_urb) {
+ /* cancel urb */
+ usb_kill_urb(pipe->stream_urb);
+ usb_free_urb(pipe->stream_urb);
+ pipe->stream_urb = NULL;
}
- dprintk(2, "s2255 stop read pipe: %d\n", j);
+ dprintk(4, "%s", __func__);
return;
}
@@ -2473,32 +2567,28 @@ static int s2255_probe(struct usb_interface *interface,
int retval = -ENOMEM;
__le32 *pdata;
int fw_size;
-
- dprintk(2, "s2255: probe\n");
-
+ dprintk(2, "%s\n", __func__);
/* allocate memory for our device state and initialize it to zero */
dev = kzalloc(sizeof(struct s2255_dev), GFP_KERNEL);
if (dev == NULL) {
s2255_dev_err(&interface->dev, "out of memory\n");
- goto error;
+ return -ENOMEM;
}
-
+ atomic_set(&dev->channels, 0);
+ dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
- goto error;
-
+ goto errorFWDATA1;
mutex_init(&dev->lock);
mutex_init(&dev->open_lock);
-
/* grab usb_device and save it */
dev->udev = usb_get_dev(interface_to_usbdev(interface));
if (dev->udev == NULL) {
dev_err(&interface->dev, "null usb device\n");
retval = -ENODEV;
- goto error;
+ goto errorUDEV;
}
- kref_init(&dev->kref);
- dprintk(1, "dev: %p, kref: %p udev %p interface %p\n", dev, &dev->kref,
+ dprintk(1, "dev: %p, udev %p interface %p\n", dev,
dev->udev, interface);
dev->interface = interface;
/* set up the endpoint information */
@@ -2514,39 +2604,33 @@ static int s2255_probe(struct usb_interface *interface,
if (!dev->read_endpoint) {
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
- goto error;
+ goto errorEP;
}
-
- /* set intfdata */
- usb_set_intfdata(interface, dev);
-
- dprintk(100, "after intfdata %p\n", dev);
-
init_timer(&dev->timer);
dev->timer.function = s2255_timer;
dev->timer.data = (unsigned long)dev->fw_data;
-
init_waitqueue_head(&dev->fw_data->wait_fw);
- for (i = 0; i < MAX_CHANNELS; i++)
+ for (i = 0; i < MAX_CHANNELS; i++) {
init_waitqueue_head(&dev->wait_setmode[i]);
-
+ init_waitqueue_head(&dev->wait_vidstatus[i]);
+ }
dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL);
-
if (!dev->fw_data->fw_urb) {
dev_err(&interface->dev, "out of memory!\n");
- goto error;
+ goto errorFWURB;
}
+
dev->fw_data->pfw_data = kzalloc(CHUNK_SIZE, GFP_KERNEL);
if (!dev->fw_data->pfw_data) {
dev_err(&interface->dev, "out of memory!\n");
- goto error;
+ goto errorFWDATA2;
}
/* load the first chunk */
if (request_firmware(&dev->fw_data->fw,
FIRMWARE_FILE_NAME, &dev->udev->dev)) {
printk(KERN_ERR "sensoray 2255 failed to get firmware\n");
- goto error;
+ goto errorREQFW;
}
/* check the firmware is valid */
fw_size = dev->fw_data->fw->size;
@@ -2555,59 +2639,80 @@ static int s2255_probe(struct usb_interface *interface,
if (*pdata != S2255_FW_MARKER) {
printk(KERN_INFO "Firmware invalid.\n");
retval = -ENODEV;
- goto error;
+ goto errorFWMARKER;
} else {
/* make sure firmware is the latest */
__le32 *pRel;
pRel = (__le32 *) &dev->fw_data->fw->data[fw_size - 4];
printk(KERN_INFO "s2255 dsp fw version %x\n", *pRel);
+ dev->dsp_fw_ver = *pRel;
+ if (*pRel < S2255_CUR_DSP_FWVER)
+ printk(KERN_INFO "s2255: f2255usb.bin out of date.\n");
+ if (dev->pid == 0x2257 && *pRel < S2255_MIN_DSP_COLORFILTER)
+ printk(KERN_WARNING "s2255: 2257 requires firmware %d"
+ "or above.\n", S2255_MIN_DSP_COLORFILTER);
}
- /* loads v4l specific */
- s2255_probe_v4l(dev);
usb_reset_device(dev->udev);
/* load 2255 board specific */
retval = s2255_board_init(dev);
if (retval)
- goto error;
-
- dprintk(4, "before probe done %p\n", dev);
+ goto errorBOARDINIT;
spin_lock_init(&dev->slock);
-
s2255_fwload_start(dev, 0);
+ /* loads v4l specific */
+ retval = s2255_probe_v4l(dev);
+ if (retval)
+ goto errorBOARDINIT;
dev_info(&interface->dev, "Sensoray 2255 detected\n");
return 0;
-error:
+errorBOARDINIT:
+ s2255_board_shutdown(dev);
+errorFWMARKER:
+ release_firmware(dev->fw_data->fw);
+errorREQFW:
+ kfree(dev->fw_data->pfw_data);
+errorFWDATA2:
+ usb_free_urb(dev->fw_data->fw_urb);
+errorFWURB:
+ del_timer(&dev->timer);
+errorEP:
+ usb_put_dev(dev->udev);
+errorUDEV:
+ kfree(dev->fw_data);
+ mutex_destroy(&dev->open_lock);
+ mutex_destroy(&dev->lock);
+errorFWDATA1:
+ kfree(dev);
+ printk(KERN_WARNING "Sensoray 2255 driver load failed: 0x%x\n", retval);
return retval;
}
/* disconnect routine. when board is removed physically or with rmmod */
static void s2255_disconnect(struct usb_interface *interface)
{
- struct s2255_dev *dev = NULL;
+ struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
int i;
- dprintk(1, "s2255: disconnect interface %p\n", interface);
- dev = usb_get_intfdata(interface);
-
- /*
- * wake up any of the timers to allow open_lock to be
- * acquired sooner
- */
+ int channels = atomic_read(&dev->channels);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ /*see comments in the uvc_driver.c usb disconnect function */
+ atomic_inc(&dev->channels);
+ /* unregister each video device. */
+ for (i = 0; i < channels; i++) {
+ if (video_is_registered(&dev->vdev[i]))
+ video_unregister_device(&dev->vdev[i]);
+ }
+ /* wake up any of our timers */
atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
wake_up(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
dev->setmode_ready[i] = 1;
wake_up(&dev->wait_setmode[i]);
+ dev->vidstatus_ready[i] = 1;
+ wake_up(&dev->wait_vidstatus[i]);
}
-
- mutex_lock(&dev->open_lock);
- usb_set_intfdata(interface, NULL);
- mutex_unlock(&dev->open_lock);
-
- if (dev) {
- kref_put(&dev->kref, s2255_destroy);
- dprintk(1, "s2255drv: disconnect\n");
- dev_info(&interface->dev, "s2255usb now disconnected\n");
- }
+ if (atomic_dec_and_test(&dev->channels))
+ s2255_destroy(dev);
+ dev_info(&interface->dev, "%s\n", __func__);
}
static struct usb_driver s2255_driver = {
@@ -2620,15 +2725,12 @@ static struct usb_driver s2255_driver = {
static int __init usb_s2255_init(void)
{
int result;
-
/* register this driver with the USB subsystem */
result = usb_register(&s2255_driver);
-
if (result)
pr_err(KBUILD_MODNAME
- ": usb_register failed. Error number %d\n", result);
-
- dprintk(2, "s2255_init: done\n");
+ ": usb_register failed. Error number %d\n", result);
+ dprintk(2, "%s\n", __func__);
return result;
}
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index c0a7f8a369f4..53b6fcde3800 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -74,6 +74,7 @@ struct saa711x_state {
int contrast;
int hue;
int sat;
+ int chroma_agc;
int width;
int height;
u32 ident;
@@ -592,7 +593,7 @@ static const unsigned char saa7115_init_misc[] = {
R_5D_DID, 0xbd,
R_5E_SDID, 0x35,
- R_02_INPUT_CNTL_1, 0x84, /* input tuner -> input 4, amplifier active */
+ R_02_INPUT_CNTL_1, 0xc4, /* input tuner -> input 4, amplifier active */
R_80_GLOBAL_CNTL_1, 0x20, /* enable task B */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0,
@@ -743,6 +744,7 @@ static int saa711x_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
static int saa711x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct saa711x_state *state = to_state(sd);
+ u8 val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -784,7 +786,21 @@ static int saa711x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
state->hue = ctrl->value;
saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, state->hue);
break;
-
+ case V4L2_CID_CHROMA_AGC:
+ val = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL);
+ state->chroma_agc = ctrl->value;
+ if (ctrl->value)
+ val &= 0x7f;
+ else
+ val |= 0x80;
+ saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, val);
+ break;
+ case V4L2_CID_CHROMA_GAIN:
+ /* Chroma gain cannot be set when AGC is enabled */
+ if (state->chroma_agc == 1)
+ return -EINVAL;
+ saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, ctrl->value | 0x80);
+ break;
default:
return -EINVAL;
}
@@ -809,6 +825,12 @@ static int saa711x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_HUE:
ctrl->value = state->hue;
break;
+ case V4L2_CID_CHROMA_AGC:
+ ctrl->value = state->chroma_agc;
+ break;
+ case V4L2_CID_CHROMA_GAIN:
+ ctrl->value = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
+ break;
default:
return -EINVAL;
}
@@ -1069,7 +1091,7 @@ static void saa711x_set_lcr(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_forma
saa7115_cfg_vbi_off);
}
-static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int saa711x_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *sliced)
{
static u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
@@ -1078,11 +1100,8 @@ static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0
};
- struct v4l2_sliced_vbi_format *sliced = &fmt->fmt.sliced;
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
memset(sliced, 0, sizeof(*sliced));
/* done if using raw VBI */
if (saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10)
@@ -1098,16 +1117,27 @@ static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
+static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return saa711x_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
+
+static int saa711x_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
+{
+ saa711x_set_lcr(sd, NULL);
+ return 0;
+}
+
+static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
+{
+ saa711x_set_lcr(sd, fmt);
+ return 0;
+}
+
static int saa711x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- if (fmt->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
- saa711x_set_lcr(sd, &fmt->fmt.sliced);
- return 0;
- }
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- saa711x_set_lcr(sd, NULL);
- return 0;
- }
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1209,6 +1239,10 @@ static int saa711x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
case V4L2_CID_HUE:
return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
+ case V4L2_CID_CHROMA_AGC:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
+ case V4L2_CID_CHROMA_GAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 127, 1, 48);
default:
return -EINVAL;
}
@@ -1524,18 +1558,25 @@ static const struct v4l2_subdev_video_ops saa711x_video_ops = {
.s_crystal_freq = saa711x_s_crystal_freq,
.g_fmt = saa711x_g_fmt,
.s_fmt = saa711x_s_fmt,
- .g_vbi_data = saa711x_g_vbi_data,
- .decode_vbi_line = saa711x_decode_vbi_line,
.s_stream = saa711x_s_stream,
.querystd = saa711x_querystd,
.g_input_status = saa711x_g_input_status,
};
+static const struct v4l2_subdev_vbi_ops saa711x_vbi_ops = {
+ .g_vbi_data = saa711x_g_vbi_data,
+ .decode_vbi_line = saa711x_decode_vbi_line,
+ .g_sliced_fmt = saa711x_g_sliced_fmt,
+ .s_sliced_fmt = saa711x_s_sliced_fmt,
+ .s_raw_fmt = saa711x_s_raw_fmt,
+};
+
static const struct v4l2_subdev_ops saa711x_ops = {
.core = &saa711x_core_ops,
.tuner = &saa711x_tuner_ops,
.audio = &saa711x_audio_ops,
.video = &saa711x_video_ops,
+ .vbi = &saa711x_vbi_ops,
};
/* ----------------------------------------------------------------------- */
@@ -1593,6 +1634,7 @@ static int saa711x_probe(struct i2c_client *client,
state->contrast = 64;
state->hue = 0;
state->sat = 64;
+ state->chroma_agc = 1;
switch (chip_id) {
case '1':
state->ident = V4L2_IDENT_SAA7111;
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 250ef84cf5ca..87986ad62f86 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -625,29 +625,33 @@ static int saa7127_s_stream(struct v4l2_subdev *sd, int enable)
return saa7127_set_video_enable(sd, enable);
}
-static int saa7127_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int saa7127_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
{
struct saa7127_state *state = to_state(sd);
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
-
- memset(&fmt->fmt.sliced, 0, sizeof(fmt->fmt.sliced));
+ memset(fmt, 0, sizeof(*fmt));
if (state->vps_enable)
- fmt->fmt.sliced.service_lines[0][16] = V4L2_SLICED_VPS;
+ fmt->service_lines[0][16] = V4L2_SLICED_VPS;
if (state->wss_enable)
- fmt->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
+ fmt->service_lines[0][23] = V4L2_SLICED_WSS_625;
if (state->cc_enable) {
- fmt->fmt.sliced.service_lines[0][21] = V4L2_SLICED_CAPTION_525;
- fmt->fmt.sliced.service_lines[1][21] = V4L2_SLICED_CAPTION_525;
+ fmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
+ fmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
}
- fmt->fmt.sliced.service_set =
+ fmt->service_set =
(state->vps_enable ? V4L2_SLICED_VPS : 0) |
(state->wss_enable ? V4L2_SLICED_WSS_625 : 0) |
(state->cc_enable ? V4L2_SLICED_CAPTION_525 : 0);
return 0;
}
+static int saa7127_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return saa7127_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
+
static int saa7127_s_vbi_data(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
{
switch (data->id) {
@@ -727,16 +731,21 @@ static const struct v4l2_subdev_core_ops saa7127_core_ops = {
};
static const struct v4l2_subdev_video_ops saa7127_video_ops = {
- .s_vbi_data = saa7127_s_vbi_data,
.g_fmt = saa7127_g_fmt,
.s_std_output = saa7127_s_std_output,
.s_routing = saa7127_s_routing,
.s_stream = saa7127_s_stream,
};
+static const struct v4l2_subdev_vbi_ops saa7127_vbi_ops = {
+ .s_vbi_data = saa7127_s_vbi_data,
+ .g_sliced_fmt = saa7127_g_sliced_fmt,
+};
+
static const struct v4l2_subdev_ops saa7127_ops = {
.core = &saa7127_core_ops,
.video = &saa7127_video_ops,
+ .vbi = &saa7127_vbi_ops,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index d48c450ed77c..d3bd82ad010a 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -1011,8 +1011,6 @@ static int snd_card_saa7134_new_mixer(snd_card_saa7134_t * chip)
unsigned int idx;
int err, addr;
- if (snd_BUG_ON(!chip))
- return -EINVAL;
strcpy(card->mixername, "SAA7134 Mixer");
for (idx = 0; idx < ARRAY_SIZE(snd_saa7134_volume_controls); idx++) {
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 297833fb3b4a..72700d4e3941 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5355,6 +5355,79 @@ struct saa7134_board saa7134_boards[] = {
.amux = LINE2,
},
},
+ [SAA7134_BOARD_HAWELL_HW_404M7] = {
+ /* Hawell HW-404M7 & Hawell HW-808M7 */
+ /* Bogoslovskiy Viktor <bogovic@bk.ru> */
+ .name = "Hawell HW-404M7",
+ .audio_clock = 0x00200000,
+ .tuner_type = UNSET,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x389c00,
+ .inputs = {{
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x01fc00,
+ } },
+ },
+ [SAA7134_BOARD_BEHOLD_H7] = {
+ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV H7",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_XC5000,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
+ .ts_type = SAA7134_MPEG_TS_PARALLEL,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 2,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 0,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 9,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_A7] = {
+ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV A7",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_XC5000,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 2,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 0,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 9,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ },
+ },
};
@@ -6549,6 +6622,18 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = SAA7134_BOARD_UNKNOWN,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7190,
+ .driver_data = SAA7134_BOARD_BEHOLD_H7,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7090,
+ .driver_data = SAA7134_BOARD_BEHOLD_A7,
},{
/* --- end of list --- */
}
@@ -6602,6 +6687,8 @@ static int saa7134_xc5000_callback(struct saa7134_dev *dev,
{
switch (dev->board) {
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
if (command == XC5000_TUNER_RESET) {
/* Down and UP pheripherial RESET pin for reset all chips */
saa_writeb(SAA7134_SPECIAL_MODE, 0x00);
@@ -6973,6 +7060,8 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_M6_EXTRA:
case SAA7134_BOARD_BEHOLD_H6:
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
dev->has_remote = SAA7134_REMOTE_I2C;
break;
case SAA7134_BOARD_AVERMEDIA_A169_B:
@@ -7215,6 +7304,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
printk(KERN_INFO "%s: P7131 analog only, using "
"entry of %s\n",
dev->name, saa7134_boards[dev->board].name);
+
+ /* IR init has already happened for other cards, so
+ * we have to catch up. */
+ dev->has_remote = SAA7134_REMOTE_GPIO;
+ saa7134_input_init1(dev);
}
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1150:
@@ -7344,6 +7438,23 @@ int saa7134_board_init2(struct saa7134_dev *dev)
}
break;
}
+ case SAA7134_BOARD_BEHOLD_H6:
+ {
+ u8 data[] = { 0x09, 0x9f, 0x86, 0x11};
+ struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = data,
+ .len = sizeof(data)};
+
+ /* The tuner TUNER_PHILIPS_FMD1216MEX_MK3 after hardware */
+ /* start has disabled IF and enabled DVB-T. When saa7134 */
+ /* scan I2C devices it not detect IF tda9887 and can`t */
+ /* watch TV without software reboot. For solve this problem */
+ /* switch the tuner to analog TV mode manually. */
+ if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
+ printk(KERN_WARNING
+ "%s: Unable to enable IF of the tuner.\n",
+ dev->name);
+ break;
+ }
} /* switch() */
/* initialize tuner */
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index a7ad7810fddc..90f231881297 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -471,7 +471,7 @@ static char *irqbits[] = {
"DONE_RA0", "DONE_RA1", "DONE_RA2", "DONE_RA3",
"AR", "PE", "PWR_ON", "RDCAP", "INTL", "FIDT", "MMC",
"TRIG_ERR", "CONF_ERR", "LOAD_ERR",
- "GPIO16?", "GPIO18", "GPIO22", "GPIO23"
+ "GPIO16", "GPIO18", "GPIO22", "GPIO23"
};
#define IRQBITS ARRAY_SIZE(irqbits)
@@ -601,12 +601,14 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
/* disable gpio16 IRQ */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing GPIO16 enable bit\n",dev->name);
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N);
} else if (report & SAA7134_IRQ_REPORT_GPIO18) {
/* disable gpio18 IRQs */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing GPIO18 enable bit\n",dev->name);
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N);
} else {
/* disable all irqs */
printk(KERN_WARNING "%s/irq: looping -- "
@@ -698,11 +700,13 @@ static int saa7134_hw_enable2(struct saa7134_dev *dev)
if (dev->has_remote == SAA7134_REMOTE_GPIO && dev->remote) {
if (dev->remote->mask_keydown & 0x10000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO16;
- else if (dev->remote->mask_keydown & 0x40000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO18;
- else if (dev->remote->mask_keyup & 0x40000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO18A;
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO16_N;
+ else { /* Allow enabling both IRQ edge triggers */
+ if (dev->remote->mask_keydown & 0x40000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_P;
+ if (dev->remote->mask_keyup & 0x40000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_N;
+ }
}
if (dev->has_remote == SAA7134_REMOTE_I2C) {
@@ -1227,7 +1231,7 @@ static int saa7134_resume(struct pci_dev *pci_dev)
if (card_has_mpeg(dev))
saa7134_ts_init_hw(dev);
if (dev->remote)
- saa7134_ir_start(dev, dev->remote);
+ saa7134_ir_start(dev);
saa7134_hw_enable1(dev);
msleep(100);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 4ab4a987c9b9..31e82be1b7e7 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1532,6 +1532,15 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, &behold_x7_tunerconfig);
}
break;
+ case SAA7134_BOARD_BEHOLD_H7:
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &behold_x7_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend) {
+ dvb_attach(xc5000_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, &behold_x7_tunerconfig);
+ }
+ break;
case SAA7134_BOARD_AVERMEDIA_A700_PRO:
case SAA7134_BOARD_AVERMEDIA_A700_HYBRID:
/* Zarlink ZL10313 */
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 58a0cdc8414a..e5565e2fd426 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -28,6 +28,8 @@
#include "saa7134-reg.h"
#include "saa7134.h"
+#define MODULE_NAME "saa7134"
+
static unsigned int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir,"disable infrared remote support");
@@ -66,6 +68,7 @@ MODULE_PARM_DESC(disable_other_ir, "disable full codes of "
/* Helper functions for RC5 and NEC decoding at GPIO16 or GPIO18 */
static int saa7134_rc5_irq(struct saa7134_dev *dev);
static int saa7134_nec_irq(struct saa7134_dev *dev);
+static int saa7134_raw_decode_irq(struct saa7134_dev *dev);
static void nec_task(unsigned long data);
static void saa7134_nec_timer(unsigned long data);
@@ -397,14 +400,23 @@ static int get_key_pinnacle_color(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
void saa7134_input_irq(struct saa7134_dev *dev)
{
- struct card_ir *ir = dev->remote;
+ struct card_ir *ir;
+
+ if (!dev || !dev->remote)
+ return;
+
+ ir = dev->remote;
+ if (!ir->running)
+ return;
if (ir->nec_gpio) {
saa7134_nec_irq(dev);
- } else if (!ir->polling && !ir->rc5_gpio) {
+ } else if (!ir->polling && !ir->rc5_gpio && !ir->raw_decode) {
build_key(dev);
} else if (ir->rc5_gpio) {
saa7134_rc5_irq(dev);
+ } else if (ir->raw_decode) {
+ saa7134_raw_decode_irq(dev);
}
}
@@ -417,8 +429,32 @@ static void saa7134_input_timer(unsigned long data)
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
-void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
+void ir_raw_decode_timer_end(unsigned long data)
+{
+ struct saa7134_dev *dev = (struct saa7134_dev *)data;
+ struct card_ir *ir = dev->remote;
+
+ ir_raw_event_handle(dev->remote->dev);
+
+ ir->active = 0;
+}
+
+static int __saa7134_ir_start(void *priv)
{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir;
+
+ if (!dev)
+ return -EINVAL;
+
+ ir = dev->remote;
+ if (!ir)
+ return -EINVAL;
+
+ if (ir->running)
+ return 0;
+
+ ir->running = 1;
if (ir->polling) {
setup_timer(&ir->timer, saa7134_input_timer,
(unsigned long)dev);
@@ -441,26 +477,125 @@ void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
setup_timer(&ir->timer_keyup, saa7134_nec_timer,
(unsigned long)dev);
tasklet_init(&ir->tlet, nec_task, (unsigned long)dev);
+ } else if (ir->raw_decode) {
+ /* set timer_end for code completion */
+ init_timer(&ir->timer_end);
+ ir->timer_end.function = ir_raw_decode_timer_end;
+ ir->timer_end.data = (unsigned long)dev;
+ ir->active = 0;
}
+
+ return 0;
}
-void saa7134_ir_stop(struct saa7134_dev *dev)
+static void __saa7134_ir_stop(void *priv)
{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir;
+
+ if (!dev)
+ return;
+
+ ir = dev->remote;
+ if (!ir)
+ return;
+
+ if (!ir->running)
+ return;
if (dev->remote->polling)
del_timer_sync(&dev->remote->timer);
+ else if (ir->rc5_gpio)
+ del_timer_sync(&ir->timer_end);
+ else if (ir->nec_gpio)
+ tasklet_kill(&ir->tlet);
+ else if (ir->raw_decode) {
+ del_timer_sync(&ir->timer_end);
+ ir->active = 0;
+ }
+
+ ir->running = 0;
+
+ return;
+}
+
+int saa7134_ir_start(struct saa7134_dev *dev)
+{
+ if (dev->remote->users)
+ return __saa7134_ir_start(dev);
+
+ return 0;
+}
+
+void saa7134_ir_stop(struct saa7134_dev *dev)
+{
+ if (dev->remote->users)
+ __saa7134_ir_stop(dev);
+}
+
+static int saa7134_ir_open(void *priv)
+{
+ struct saa7134_dev *dev = priv;
+
+ dev->remote->users++;
+ return __saa7134_ir_start(dev);
+}
+
+static void saa7134_ir_close(void *priv)
+{
+ struct saa7134_dev *dev = priv;
+
+ dev->remote->users--;
+ if (!dev->remote->users)
+ __saa7134_ir_stop(dev);
+}
+
+
+int saa7134_ir_change_protocol(void *priv, u64 ir_type)
+{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir = dev->remote;
+ u32 nec_gpio, rc5_gpio;
+
+ if (ir_type == IR_TYPE_RC5) {
+ dprintk("Changing protocol to RC5\n");
+ nec_gpio = 0;
+ rc5_gpio = 1;
+ } else if (ir_type == IR_TYPE_NEC) {
+ dprintk("Changing protocol to NEC\n");
+ nec_gpio = 1;
+ rc5_gpio = 0;
+ } else {
+ dprintk("IR protocol type %ud is not supported\n",
+ (unsigned)ir_type);
+ return -EINVAL;
+ }
+
+ if (ir->running) {
+ saa7134_ir_stop(dev);
+ ir->nec_gpio = nec_gpio;
+ ir->rc5_gpio = rc5_gpio;
+ saa7134_ir_start(dev);
+ } else {
+ ir->nec_gpio = nec_gpio;
+ ir->rc5_gpio = rc5_gpio;
+ }
+
+ return 0;
}
int saa7134_input_init1(struct saa7134_dev *dev)
{
struct card_ir *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
u32 mask_keycode = 0;
u32 mask_keydown = 0;
u32 mask_keyup = 0;
int polling = 0;
int rc5_gpio = 0;
int nec_gpio = 0;
+ int raw_decode = 0;
+ int allow_protocol_change = 0;
u64 ir_type = IR_TYPE_OTHER;
int err;
@@ -476,27 +611,27 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_FLYTVPLATINUM_FM:
case SAA7134_BOARD_FLYTVPLATINUM_MINI2:
case SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM:
- ir_codes = &ir_codes_flyvideo_table;
+ ir_codes = RC_MAP_FLYVIDEO;
mask_keycode = 0xEC00000;
mask_keydown = 0x0040000;
break;
case SAA7134_BOARD_CINERGY400:
case SAA7134_BOARD_CINERGY600:
case SAA7134_BOARD_CINERGY600_MK3:
- ir_codes = &ir_codes_cinergy_table;
+ ir_codes = RC_MAP_CINERGY;
mask_keycode = 0x00003f;
mask_keyup = 0x040000;
break;
case SAA7134_BOARD_ECS_TVP3XP:
case SAA7134_BOARD_ECS_TVP3XP_4CB5:
- ir_codes = &ir_codes_eztv_table;
+ ir_codes = RC_MAP_EZTV;
mask_keycode = 0x00017c;
mask_keyup = 0x000002;
polling = 50; // ms
break;
case SAA7134_BOARD_KWORLD_XPERT:
case SAA7134_BOARD_AVACSSMARTTV:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
mask_keycode = 0x00001F;
mask_keyup = 0x000020;
polling = 50; // ms
@@ -513,7 +648,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
case SAA7134_BOARD_AVERMEDIA_M102:
case SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
mask_keycode = 0x0007C8;
mask_keydown = 0x000010;
polling = 50; // ms
@@ -522,14 +657,15 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS0, 0x4);
break;
case SAA7134_BOARD_AVERMEDIA_M135A:
- ir_codes = &ir_codes_avermedia_m135a_table;
- mask_keydown = 0x0040000;
- mask_keycode = 0x00013f;
- nec_gpio = 1;
+ ir_codes = RC_MAP_AVERMEDIA_M135A_RM_JX;
+ mask_keydown = 0x0040000; /* Enable GPIO18 line on both edges */
+ mask_keyup = 0x0040000;
+ mask_keycode = 0xffff;
+ raw_decode = 1;
break;
case SAA7134_BOARD_AVERMEDIA_777:
case SAA7134_BOARD_AVERMEDIA_A16AR:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
mask_keycode = 0x02F200;
mask_keydown = 0x000400;
polling = 50; // ms
@@ -538,7 +674,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS1, 0x1);
break;
case SAA7134_BOARD_AVERMEDIA_A16D:
- ir_codes = &ir_codes_avermedia_a16d_table;
+ ir_codes = RC_MAP_AVERMEDIA_A16D;
mask_keycode = 0x02F200;
mask_keydown = 0x000400;
polling = 50; /* ms */
@@ -547,14 +683,14 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS1, 0x1);
break;
case SAA7134_BOARD_KWORLD_TERMINATOR:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
mask_keycode = 0x00001f;
mask_keyup = 0x000060;
polling = 50; // ms
break;
case SAA7134_BOARD_MANLI_MTV001:
case SAA7134_BOARD_MANLI_MTV002:
- ir_codes = &ir_codes_manli_table;
+ ir_codes = RC_MAP_MANLI;
mask_keycode = 0x001f00;
mask_keyup = 0x004000;
polling = 50; /* ms */
@@ -574,25 +710,25 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_507_9FM:
case SAA7134_BOARD_BEHOLD_507RDS_MK3:
case SAA7134_BOARD_BEHOLD_507RDS_MK5:
- ir_codes = &ir_codes_manli_table;
+ ir_codes = RC_MAP_MANLI;
mask_keycode = 0x003f00;
mask_keyup = 0x004000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM:
- ir_codes = &ir_codes_behold_columbus_table;
+ ir_codes = RC_MAP_BEHOLD_COLUMBUS;
mask_keycode = 0x003f00;
mask_keyup = 0x004000;
polling = 50; // ms
break;
case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS:
- ir_codes = &ir_codes_pctv_sedna_table;
+ ir_codes = RC_MAP_PCTV_SEDNA;
mask_keycode = 0x001f00;
mask_keyup = 0x004000;
polling = 50; // ms
break;
case SAA7134_BOARD_GOTVIEW_7135:
- ir_codes = &ir_codes_gotview7135_table;
+ ir_codes = RC_MAP_GOTVIEW7135;
mask_keycode = 0x0003CC;
mask_keydown = 0x000010;
polling = 5; /* ms */
@@ -601,80 +737,80 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_VIDEOMATE_TV_PVR:
case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII:
- ir_codes = &ir_codes_videomate_tv_pvr_table;
+ ir_codes = RC_MAP_VIDEOMATE_TV_PVR;
mask_keycode = 0x00003F;
mask_keyup = 0x400000;
polling = 50; // ms
break;
case SAA7134_BOARD_PROTEUS_2309:
- ir_codes = &ir_codes_proteus_2309_table;
+ ir_codes = RC_MAP_PROTEUS_2309;
mask_keycode = 0x00007F;
mask_keyup = 0x000080;
polling = 50; // ms
break;
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
case SAA7134_BOARD_VIDEOMATE_DVBT_200:
- ir_codes = &ir_codes_videomate_tv_pvr_table;
+ ir_codes = RC_MAP_VIDEOMATE_TV_PVR;
mask_keycode = 0x003F00;
mask_keyup = 0x040000;
break;
case SAA7134_BOARD_FLYDVBS_LR300:
case SAA7134_BOARD_FLYDVBT_LR301:
case SAA7134_BOARD_FLYDVBTDUO:
- ir_codes = &ir_codes_flydvb_table;
+ ir_codes = RC_MAP_FLYDVB;
mask_keycode = 0x0001F00;
mask_keydown = 0x0040000;
break;
case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
case SAA7134_BOARD_ASUSTeK_P7131_ANALOG:
- ir_codes = &ir_codes_asus_pc39_table;
+ ir_codes = RC_MAP_ASUS_PC39;
mask_keydown = 0x0040000;
rc5_gpio = 1;
break;
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
- ir_codes = &ir_codes_encore_enltv_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV;
mask_keycode = 0x00007f;
mask_keyup = 0x040000;
polling = 50; // ms
break;
case SAA7134_BOARD_ENCORE_ENLTV_FM53:
- ir_codes = &ir_codes_encore_enltv_fm53_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV_FM53;
mask_keydown = 0x0040000;
mask_keycode = 0x00007f;
nec_gpio = 1;
break;
case SAA7134_BOARD_10MOONSTVMASTER3:
- ir_codes = &ir_codes_encore_enltv_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV;
mask_keycode = 0x5f80000;
mask_keyup = 0x8000000;
polling = 50; //ms
break;
case SAA7134_BOARD_GENIUS_TVGO_A11MCE:
- ir_codes = &ir_codes_genius_tvgo_a11mce_table;
+ ir_codes = RC_MAP_GENIUS_TVGO_A11MCE;
mask_keycode = 0xff;
mask_keydown = 0xf00000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_REAL_ANGEL_220:
- ir_codes = &ir_codes_real_audio_220_32_keys_table;
+ ir_codes = RC_MAP_REAL_AUDIO_220_32_KEYS;
mask_keycode = 0x3f00;
mask_keyup = 0x4000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG:
- ir_codes = &ir_codes_kworld_plus_tv_analog_table;
+ ir_codes = RC_MAP_KWORLD_PLUS_TV_ANALOG;
mask_keycode = 0x7f;
polling = 40; /* ms */
break;
case SAA7134_BOARD_VIDEOMATE_S350:
- ir_codes = &ir_codes_videomate_s350_table;
+ ir_codes = RC_MAP_VIDEOMATE_S350;
mask_keycode = 0x003f00;
mask_keydown = 0x040000;
break;
case SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
mask_keycode = 0x5f00;
mask_keyup = 0x020000;
polling = 50; /* ms */
@@ -695,6 +831,9 @@ int saa7134_input_init1(struct saa7134_dev *dev)
}
ir->dev = input_dev;
+ dev->remote = ir;
+
+ ir->running = 0;
/* init hardware-specific stuff */
ir->mask_keycode = mask_keycode;
@@ -703,6 +842,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
ir->polling = polling;
ir->rc5_gpio = rc5_gpio;
ir->nec_gpio = nec_gpio;
+ ir->raw_decode = raw_decode;
/* init input device */
snprintf(ir->name, sizeof(ir->name), "saa7134 IR (%s)",
@@ -710,6 +850,19 @@ int saa7134_input_init1(struct saa7134_dev *dev)
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
pci_name(dev->pci));
+
+ ir->props.priv = dev;
+ ir->props.open = saa7134_ir_open;
+ ir->props.close = saa7134_ir_close;
+
+ if (raw_decode)
+ ir->props.driver_type = RC_DRIVER_IR_RAW;
+
+ if (!raw_decode && allow_protocol_change) {
+ ir->props.allowed_protos = IR_TYPE_RC5 | IR_TYPE_NEC;
+ ir->props.change_protocol = saa7134_ir_change_protocol;
+ }
+
err = ir_input_init(input_dev, &ir->ir, ir_type);
if (err < 0)
goto err_out_free;
@@ -727,12 +880,9 @@ int saa7134_input_init1(struct saa7134_dev *dev)
}
input_dev->dev.parent = &dev->pci->dev;
- dev->remote = ir;
- saa7134_ir_start(dev, ir);
-
- err = ir_input_register(ir->dev, ir_codes, NULL);
+ err = ir_input_register(ir->dev, ir_codes, &ir->props, MODULE_NAME);
if (err)
- goto err_out_stop;
+ goto err_out_free;
/* the remote isn't as bouncy as a keyboard */
ir->dev->rep[REP_DELAY] = repeat_delay;
@@ -740,10 +890,8 @@ int saa7134_input_init1(struct saa7134_dev *dev)
return 0;
- err_out_stop:
- saa7134_ir_stop(dev);
+err_out_free:
dev->remote = NULL;
- err_out_free:
kfree(ir);
return err;
}
@@ -787,24 +935,24 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "Pinnacle PCTV";
if (pinnacle_remote == 0) {
dev->init_data.get_key = get_key_pinnacle_color;
- dev->init_data.ir_codes = &ir_codes_pinnacle_color_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_COLOR;
info.addr = 0x47;
} else {
dev->init_data.get_key = get_key_pinnacle_grey;
- dev->init_data.ir_codes = &ir_codes_pinnacle_grey_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
info.addr = 0x47;
}
break;
case SAA7134_BOARD_UPMOST_PURPLE_TV:
dev->init_data.name = "Purple TV";
dev->init_data.get_key = get_key_purpletv;
- dev->init_data.ir_codes = &ir_codes_purpletv_table;
+ dev->init_data.ir_codes = RC_MAP_PURPLETV;
info.addr = 0x7a;
break;
case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS:
dev->init_data.name = "MSI TV@nywhere Plus";
dev->init_data.get_key = get_key_msi_tvanywhere_plus;
- dev->init_data.ir_codes = &ir_codes_msi_tvanywhere_plus_table;
+ dev->init_data.ir_codes = RC_MAP_MSI_TVANYWHERE_PLUS;
info.addr = 0x30;
/* MSI TV@nywhere Plus controller doesn't seem to
respond to probes unless we read something from
@@ -818,7 +966,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
dev->init_data.name = "HVR 1110";
dev->init_data.get_key = get_key_hvr1110;
- dev->init_data.ir_codes = &ir_codes_hauppauge_new_table;
+ dev->init_data.ir_codes = RC_MAP_HAUPPAUGE_NEW;
info.addr = 0x71;
break;
case SAA7134_BOARD_BEHOLD_607FM_MK3:
@@ -834,9 +982,12 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_M6_EXTRA:
case SAA7134_BOARD_BEHOLD_H6:
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
dev->init_data.name = "BeholdTV";
dev->init_data.get_key = get_key_beholdm6xx;
- dev->init_data.ir_codes = &ir_codes_behold_table;
+ dev->init_data.ir_codes = RC_MAP_BEHOLD;
+ dev->init_data.type = IR_TYPE_NEC;
info.addr = 0x2d;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
@@ -846,7 +997,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_FLYDVB_TRIO:
dev->init_data.name = "FlyDVB Trio";
dev->init_data.get_key = get_key_flydvb_trio;
- dev->init_data.ir_codes = &ir_codes_flydvb_table;
+ dev->init_data.ir_codes = RC_MAP_FLYDVB;
info.addr = 0x0b;
break;
default:
@@ -859,6 +1010,33 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
i2c_new_device(&dev->i2c_adap, &info);
}
+static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
+{
+ struct card_ir *ir = dev->remote;
+ unsigned long timeout;
+ int space;
+
+ /* Generate initial event */
+ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ space = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2) & ir->mask_keydown;
+ ir_raw_event_store_edge(dev->remote->dev, space ? IR_SPACE : IR_PULSE);
+
+
+ /*
+ * Wait 15 ms from the start of the first IR event before processing
+ * the event. This time is enough for NEC protocol. May need adjustments
+ * to work with other protocols.
+ */
+ if (!ir->active) {
+ timeout = jiffies + jiffies_to_msecs(15);
+ mod_timer(&ir->timer_end, timeout);
+ ir->active = 1;
+ }
+
+ return 1;
+}
+
static int saa7134_rc5_irq(struct saa7134_dev *dev)
{
struct card_ir *ir = dev->remote;
@@ -901,7 +1079,6 @@ static int saa7134_rc5_irq(struct saa7134_dev *dev)
return 1;
}
-
/* On NEC protocol, One has 2.25 ms, and zero has 1.125 ms
The first pulse (start) has 9 + 4.5 ms
*/
@@ -1011,14 +1188,14 @@ static void nec_task(unsigned long data)
/* Keep repeating the last key */
mod_timer(&ir->timer_keyup, jiffies + msecs_to_jiffies(150));
- saa_setl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_setl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
}
static int saa7134_nec_irq(struct saa7134_dev *dev)
{
struct card_ir *ir = dev->remote;
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
tasklet_schedule(&ir->tlet);
return 1;
diff --git a/drivers/media/video/saa7134/saa7134-reg.h b/drivers/media/video/saa7134/saa7134-reg.h
index cf89d96d7295..e7e0af101fa7 100644
--- a/drivers/media/video/saa7134/saa7134-reg.h
+++ b/drivers/media/video/saa7134/saa7134-reg.h
@@ -112,17 +112,17 @@
#define SAA7134_IRQ1_INTE_RA0_0 (1 << 0)
#define SAA7134_IRQ2 (0x2c8 >> 2)
-#define SAA7134_IRQ2_INTE_GPIO23A (1 << 17)
-#define SAA7134_IRQ2_INTE_GPIO23 (1 << 16)
-#define SAA7134_IRQ2_INTE_GPIO22A (1 << 15)
-#define SAA7134_IRQ2_INTE_GPIO22 (1 << 14)
-#define SAA7134_IRQ2_INTE_GPIO18A (1 << 13)
-#define SAA7134_IRQ2_INTE_GPIO18 (1 << 12)
-#define SAA7134_IRQ2_INTE_GPIO16 (1 << 11) /* not certain */
-#define SAA7134_IRQ2_INTE_SC2 (1 << 10)
-#define SAA7134_IRQ2_INTE_SC1 (1 << 9)
-#define SAA7134_IRQ2_INTE_SC0 (1 << 8)
-#define SAA7134_IRQ2_INTE_DEC5 (1 << 7)
+#define SAA7134_IRQ2_INTE_GPIO23_N (1 << 17) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO23_P (1 << 16) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO22_N (1 << 15) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO22_P (1 << 14) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO18_N (1 << 13) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO18_P (1 << 12) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO16_N (1 << 11) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO16_P (1 << 10) /* positive edge */
+#define SAA7134_IRQ2_INTE_SC2 (1 << 9)
+#define SAA7134_IRQ2_INTE_SC1 (1 << 8)
+#define SAA7134_IRQ2_INTE_SC0 (1 << 7)
#define SAA7134_IRQ2_INTE_DEC4 (1 << 6)
#define SAA7134_IRQ2_INTE_DEC3 (1 << 5)
#define SAA7134_IRQ2_INTE_DEC2 (1 << 4)
@@ -135,7 +135,7 @@
#define SAA7134_IRQ_REPORT_GPIO23 (1 << 17)
#define SAA7134_IRQ_REPORT_GPIO22 (1 << 16)
#define SAA7134_IRQ_REPORT_GPIO18 (1 << 15)
-#define SAA7134_IRQ_REPORT_GPIO16 (1 << 14) /* not certain */
+#define SAA7134_IRQ_REPORT_GPIO16 (1 << 14)
#define SAA7134_IRQ_REPORT_LOAD_ERR (1 << 13)
#define SAA7134_IRQ_REPORT_CONF_ERR (1 << 12)
#define SAA7134_IRQ_REPORT_TRIG_ERR (1 << 11)
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 31138d3e51bb..7806fb17e742 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1632,8 +1632,15 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
}
f->fmt.pix.field = field;
- v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
- &f->fmt.pix.height, 32, maxh, 0, 0);
+ if (f->fmt.pix.width < 48)
+ f->fmt.pix.width = 48;
+ if (f->fmt.pix.height < 32)
+ f->fmt.pix.height = 32;
+ if (f->fmt.pix.width > maxw)
+ f->fmt.pix.width = maxw;
+ if (f->fmt.pix.height > maxh)
+ f->fmt.pix.height = maxh;
+ f->fmt.pix.width &= ~0x03;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index bf130967ed17..3962534267be 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -20,7 +20,7 @@
*/
#include <linux/version.h>
-#define SAA7134_VERSION_CODE KERNEL_VERSION(0,2,15)
+#define SAA7134_VERSION_CODE KERNEL_VERSION(0, 2, 16)
#include <linux/pci.h>
#include <linux/i2c.h>
@@ -300,6 +300,9 @@ struct saa7134_format {
#define SAA7134_BOARD_ASUS_EUROPA_HYBRID 174
#define SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S 175
#define SAA7134_BOARD_BEHOLD_505RDS_MK3 176
+#define SAA7134_BOARD_HAWELL_HW_404M7 177
+#define SAA7134_BOARD_BEHOLD_H7 178
+#define SAA7134_BOARD_BEHOLD_A7 179
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -809,7 +812,7 @@ int saa7134_input_init1(struct saa7134_dev *dev);
void saa7134_input_fini(struct saa7134_dev *dev);
void saa7134_input_irq(struct saa7134_dev *dev);
void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
-void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir);
+int saa7134_ir_start(struct saa7134_dev *dev);
void saa7134_ir_stop(struct saa7134_dev *dev);
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 1ad980f8e770..4ac3b482fbb4 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -115,9 +115,20 @@ struct sh_mobile_ceu_dev {
};
struct sh_mobile_ceu_cam {
- struct v4l2_rect ceu_rect;
- unsigned int cam_width;
- unsigned int cam_height;
+ /* CEU offsets within scaled by the CEU camera output */
+ unsigned int ceu_left;
+ unsigned int ceu_top;
+ /* Client output, as seen by the CEU */
+ unsigned int width;
+ unsigned int height;
+ /*
+ * User window from S_CROP / G_CROP, produced by client cropping and
+ * scaling, CEU scaling and CEU cropping, mapped back onto the client
+ * input window
+ */
+ struct v4l2_rect subrect;
+ /* Camera cropping rectangle */
+ struct v4l2_rect rect;
const struct soc_mbus_pixelfmt *extra_fmt;
enum v4l2_mbus_pixelcode code;
};
@@ -213,8 +224,8 @@ static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq,
*count = 2;
if (pcdev->video_limit) {
- while (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
- (*count)--;
+ if (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
+ *count = pcdev->video_limit / PAGE_ALIGN(*size);
}
dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
@@ -565,38 +576,36 @@ static u16 calc_scale(unsigned int src, unsigned int *dst)
}
/* rect is guaranteed to not exceed the scaled camera rectangle */
-static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
- unsigned int out_width,
- unsigned int out_height)
+static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *rect = &cam->ceu_rect;
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned int height, width, cdwdr_width, in_width, in_height;
unsigned int left_offset, top_offset;
u32 camor;
- dev_dbg(icd->dev.parent, "Crop %ux%u@%u:%u\n",
- rect->width, rect->height, rect->left, rect->top);
+ dev_geo(icd->dev.parent, "Crop %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
- left_offset = rect->left;
- top_offset = rect->top;
+ left_offset = cam->ceu_left;
+ top_offset = cam->ceu_top;
+ /* CEU cropping (CFSZR) is applied _after_ the scaling filter (CFLCR) */
if (pcdev->image_mode) {
- in_width = rect->width;
+ in_width = cam->width;
if (!pcdev->is_16bit) {
in_width *= 2;
left_offset *= 2;
}
- width = out_width;
- cdwdr_width = out_width;
+ width = icd->user_width;
+ cdwdr_width = icd->user_width;
} else {
- int bytes_per_line = soc_mbus_bytes_per_line(out_width,
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
unsigned int w_factor;
- width = out_width;
+ width = icd->user_width;
switch (icd->current_fmt->host_fmt->packing) {
case SOC_MBUS_PACKING_2X8_PADHI:
@@ -606,17 +615,17 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
w_factor = 1;
}
- in_width = rect->width * w_factor;
+ in_width = cam->width * w_factor;
left_offset = left_offset * w_factor;
if (bytes_per_line < 0)
- cdwdr_width = out_width;
+ cdwdr_width = icd->user_width;
else
cdwdr_width = bytes_per_line;
}
- height = out_height;
- in_height = rect->height;
+ height = icd->user_height;
+ in_height = cam->height;
if (V4L2_FIELD_NONE != pcdev->field) {
height /= 2;
in_height /= 2;
@@ -775,9 +784,10 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
}
ceu_write(pcdev, CAIFR, value);
- sh_mobile_ceu_set_rect(icd, icd->user_width, icd->user_height);
+ sh_mobile_ceu_set_rect(icd);
mdelay(1);
+ dev_geo(icd->dev.parent, "CFLCR 0x%x\n", pcdev->cflcr);
ceu_write(pcdev, CFLCR, pcdev->cflcr);
/*
@@ -866,6 +876,8 @@ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
+static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
+
static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
struct soc_camera_format_xlate *xlate)
{
@@ -894,10 +906,55 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
return 0;
if (!icd->host_priv) {
+ struct v4l2_mbus_framefmt mf;
+ struct v4l2_rect rect;
+ struct device *dev = icd->dev.parent;
+ int shift = 0;
+
+ /* FIXME: subwindow is lost between close / open */
+
+ /* Cache current client geometry */
+ ret = client_g_rect(sd, &rect);
+ if (ret < 0)
+ return ret;
+
+ /* First time */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ while ((mf.width > 2560 || mf.height > 1920) && shift < 4) {
+ /* Try 2560x1920, 1280x960, 640x480, 320x240 */
+ mf.width = 2560 >> shift;
+ mf.height = 1920 >> shift;
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+ shift++;
+ }
+
+ if (shift == 4) {
+ dev_err(dev, "Failed to configure the client below %ux%x\n",
+ mf.width, mf.height);
+ return -EIO;
+ }
+
+ dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
+
cam = kzalloc(sizeof(*cam), GFP_KERNEL);
if (!cam)
return -ENOMEM;
+ /* We are called with current camera crop, initialise subrect with it */
+ cam->rect = rect;
+ cam->subrect = rect;
+
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ cam->width = mf.width;
+ cam->height = mf.height;
+
icd->host_priv = cam;
} else {
cam = icd->host_priv;
@@ -979,16 +1036,12 @@ static unsigned int scale_down(unsigned int size, unsigned int scale)
return (size * 4096 + scale / 2) / scale;
}
-static unsigned int scale_up(unsigned int size, unsigned int scale)
-{
- return (size * scale + 2048) / 4096;
-}
-
static unsigned int calc_generic_scale(unsigned int input, unsigned int output)
{
return (input * 4096 + output / 2) / output;
}
+/* Get and store current client crop */
static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
{
struct v4l2_crop crop;
@@ -1007,25 +1060,51 @@ static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
- if (ret < 0)
- return ret;
-
- *rect = cap.defrect;
+ if (!ret)
+ *rect = cap.defrect;
return ret;
}
+/* Client crop has changed, update our sub-rectangle to remain within the area */
+static void update_subrect(struct sh_mobile_ceu_cam *cam)
+{
+ struct v4l2_rect *rect = &cam->rect, *subrect = &cam->subrect;
+
+ if (rect->width < subrect->width)
+ subrect->width = rect->width;
+
+ if (rect->height < subrect->height)
+ subrect->height = rect->height;
+
+ if (rect->left > subrect->left)
+ subrect->left = rect->left;
+ else if (rect->left + rect->width >
+ subrect->left + subrect->width)
+ subrect->left = rect->left + rect->width -
+ subrect->width;
+
+ if (rect->top > subrect->top)
+ subrect->top = rect->top;
+ else if (rect->top + rect->height >
+ subrect->top + subrect->height)
+ subrect->top = rect->top + rect->height -
+ subrect->height;
+}
+
/*
* The common for both scaling and cropping iterative approach is:
* 1. try if the client can produce exactly what requested by the user
* 2. if (1) failed, try to double the client image until we get one big enough
* 3. if (2) failed, try to request the maximum image
*/
-static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
+static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop,
struct v4l2_crop *cam_crop)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
struct device *dev = sd->v4l2_dev->dev;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_cropcap cap;
int ret;
unsigned int width, height;
@@ -1041,13 +1120,14 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
*/
if (!memcmp(rect, cam_rect, sizeof(*rect))) {
/* Even if camera S_CROP failed, but camera rectangle matches */
- dev_dbg(dev, "Camera S_CROP successful for %ux%u@%u:%u\n",
+ dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n",
rect->width, rect->height, rect->left, rect->top);
+ cam->rect = *cam_rect;
return 0;
}
/* Try to fix cropping, that camera hasn't managed to set */
- dev_geo(dev, "Fix camera S_CROP for %ux%u@%u:%u to %ux%u@%u:%u\n",
+ dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top,
rect->width, rect->height, rect->left, rect->top);
@@ -1057,6 +1137,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
if (ret < 0)
return ret;
+ /* Put user requested rectangle within sensor bounds */
soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2,
cap.bounds.width);
soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4,
@@ -1069,6 +1150,10 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
width = max(cam_rect->width, 2);
height = max(cam_rect->height, 2);
+ /*
+ * Loop as long as sensor is not covering the requested rectangle and
+ * is still within its bounds
+ */
while (!ret && (is_smaller(cam_rect, rect) ||
is_inside(cam_rect, rect)) &&
(cap.bounds.width > width || cap.bounds.height > height)) {
@@ -1086,6 +1171,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
* target left, set it to the middle point between the current
* left and minimum left. But that would add too much
* complexity: we would have to iterate each border separately.
+ * Instead we just drop to the left and top bounds.
*/
if (cam_rect->left > rect->left)
cam_rect->left = cap.bounds.left;
@@ -1103,7 +1189,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
v4l2_subdev_call(sd, video, s_crop, cam_crop);
ret = client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_CROP %d for %ux%u@%u:%u\n", ret,
+ dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
@@ -1117,82 +1203,24 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
*cam_rect = cap.bounds;
v4l2_subdev_call(sd, video, s_crop, cam_crop);
ret = client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_CROP %d for max %ux%u@%u:%u\n", ret,
+ dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
- return ret;
-}
-
-static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect,
- unsigned int *scale_h, unsigned int *scale_v)
-{
- struct v4l2_mbus_framefmt mf;
- int ret;
-
- ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
- if (ret < 0)
- return ret;
-
- *scale_h = calc_generic_scale(rect->width, mf.width);
- *scale_v = calc_generic_scale(rect->height, mf.height);
-
- return 0;
-}
-
-static int get_camera_subwin(struct soc_camera_device *icd,
- struct v4l2_rect *cam_subrect,
- unsigned int cam_hscale, unsigned int cam_vscale)
-{
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *ceu_rect = &cam->ceu_rect;
-
- if (!ceu_rect->width) {
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
- struct v4l2_mbus_framefmt mf;
- int ret;
- /* First time */
-
- ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
-
- if (mf.width > 2560) {
- ceu_rect->width = 2560;
- ceu_rect->left = (mf.width - 2560) / 2;
- } else {
- ceu_rect->width = mf.width;
- ceu_rect->left = 0;
- }
-
- if (mf.height > 1920) {
- ceu_rect->height = 1920;
- ceu_rect->top = (mf.height - 1920) / 2;
- } else {
- ceu_rect->height = mf.height;
- ceu_rect->top = 0;
- }
-
- dev_geo(dev, "initialised CEU rect %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
+ if (!ret) {
+ cam->rect = *cam_rect;
+ update_subrect(cam);
}
- cam_subrect->width = scale_up(ceu_rect->width, cam_hscale);
- cam_subrect->left = scale_up(ceu_rect->left, cam_hscale);
- cam_subrect->height = scale_up(ceu_rect->height, cam_vscale);
- cam_subrect->top = scale_up(ceu_rect->top, cam_vscale);
-
- return 0;
+ return ret;
}
+/* Iterative s_mbus_fmt, also updates cached client crop on success */
static int client_s_fmt(struct soc_camera_device *icd,
struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
@@ -1200,6 +1228,15 @@ static int client_s_fmt(struct soc_camera_device *icd,
struct v4l2_cropcap cap;
int ret;
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
+
+ if ((width == mf->width && height == mf->height) || !ceu_can_scale)
+ goto update_cache;
+
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
@@ -1209,15 +1246,6 @@ static int client_s_fmt(struct soc_camera_device *icd,
max_width = min(cap.bounds.width, 2560);
max_height = min(cap.bounds.height, 1920);
- ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
-
- if ((width == mf->width && height == mf->height) || !ceu_can_scale)
- return 0;
-
/* Camera set a format, but geometry is not precise, try to improve */
tmp_w = mf->width;
tmp_h = mf->height;
@@ -1239,26 +1267,37 @@ static int client_s_fmt(struct soc_camera_device *icd,
}
}
+update_cache:
+ /* Update cache */
+ ret = client_g_rect(sd, &cam->rect);
+ if (ret < 0)
+ return ret;
+
+ update_subrect(cam);
+
return 0;
}
/**
- * @rect - camera cropped rectangle
- * @sub_rect - CEU cropped rectangle, mapped back to camera input area
- * @ceu_rect - on output calculated CEU crop rectangle
+ * @width - on output: user width, mapped back to input
+ * @height - on output: user height, mapped back to input
+ * @mf - in- / output camera output window
*/
-static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
- struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect,
- struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
+static int client_scale(struct soc_camera_device *icd,
+ struct v4l2_mbus_framefmt *mf,
+ unsigned int *width, unsigned int *height,
+ bool ceu_can_scale)
{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf_tmp = *mf;
unsigned int scale_h, scale_v;
int ret;
- /* 5. Apply iterative camera S_FMT for camera user window. */
+ /*
+ * 5. Apply iterative camera S_FMT for camera user window (also updates
+ * client crop cache and the imaginary sub-rectangle).
+ */
ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale);
if (ret < 0)
return ret;
@@ -1270,60 +1309,22 @@ static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
/* unneeded - it is already in "mf_tmp" */
- /* 7. Calculate new camera scales. */
- ret = get_camera_scales(sd, rect, &scale_h, &scale_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v);
+ /* 7. Calculate new client scales. */
+ scale_h = calc_generic_scale(cam->rect.width, mf_tmp.width);
+ scale_v = calc_generic_scale(cam->rect.height, mf_tmp.height);
- cam->cam_width = mf_tmp.width;
- cam->cam_height = mf_tmp.height;
mf->width = mf_tmp.width;
mf->height = mf_tmp.height;
mf->colorspace = mf_tmp.colorspace;
/*
* 8. Calculate new CEU crop - apply camera scales to previously
- * calculated "effective" crop.
+ * updated "effective" crop.
*/
- ceu_rect->left = scale_down(sub_rect->left, scale_h);
- ceu_rect->width = scale_down(sub_rect->width, scale_h);
- ceu_rect->top = scale_down(sub_rect->top, scale_v);
- ceu_rect->height = scale_down(sub_rect->height, scale_v);
+ *width = scale_down(cam->subrect.width, scale_h);
+ *height = scale_down(cam->subrect.height, scale_v);
- dev_geo(dev, "8: new CEU rect %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
-
- return 0;
-}
-
-/* Get combined scales */
-static int get_scales(struct soc_camera_device *icd,
- unsigned int *scale_h, unsigned int *scale_v)
-{
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_crop cam_crop;
- unsigned int width_in, height_in;
- int ret;
-
- cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = client_g_rect(sd, &cam_crop.c);
- if (ret < 0)
- return ret;
-
- ret = get_camera_scales(sd, &cam_crop.c, scale_h, scale_v);
- if (ret < 0)
- return ret;
-
- width_in = scale_up(cam->ceu_rect.width, *scale_h);
- height_in = scale_up(cam->ceu_rect.height, *scale_v);
-
- *scale_h = calc_generic_scale(width_in, icd->user_width);
- *scale_v = calc_generic_scale(height_in, icd->user_height);
+ dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height);
return 0;
}
@@ -1342,115 +1343,165 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_crop cam_crop;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect;
+ struct v4l2_rect *cam_rect = &cam_crop.c;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
- unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v,
- out_width, out_height;
+ unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
+ out_width, out_height, scale_h, scale_v;
+ int interm_width, interm_height;
u32 capsr, cflcr;
int ret;
- /* 1. Calculate current combined scales. */
- ret = get_scales(icd, &scale_comb_h, &scale_comb_v);
- if (ret < 0)
- return ret;
+ dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
+ rect->left, rect->top);
- dev_geo(dev, "1: combined scales %u:%u\n", scale_comb_h, scale_comb_v);
+ /* During camera cropping its output window can change too, stop CEU */
+ capsr = capture_save_reset(pcdev);
+ dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
- /* 2. Apply iterative camera S_CROP for new input window. */
- ret = client_s_crop(sd, a, &cam_crop);
+ /* 1. - 2. Apply iterative camera S_CROP for new input window. */
+ ret = client_s_crop(icd, a, &cam_crop);
if (ret < 0)
return ret;
- dev_geo(dev, "2: camera cropped to %ux%u@%u:%u\n",
+ dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
/* On success cam_crop contains current camera crop */
- /*
- * 3. If old combined scales applied to new crop produce an impossible
- * user window, adjust scales to produce nearest possible window.
- */
- out_width = scale_down(rect->width, scale_comb_h);
- out_height = scale_down(rect->height, scale_comb_v);
-
- if (out_width > 2560)
- out_width = 2560;
- else if (out_width < 2)
- out_width = 2;
-
- if (out_height > 1920)
- out_height = 1920;
- else if (out_height < 4)
- out_height = 4;
-
- dev_geo(dev, "3: Adjusted output %ux%u\n", out_width, out_height);
-
- /* 4. Use G_CROP to retrieve actual input window: already in cam_crop */
-
- /*
- * 5. Using actual input window and calculated combined scales calculate
- * camera target output window.
- */
- mf.width = scale_down(cam_rect->width, scale_comb_h);
- mf.height = scale_down(cam_rect->height, scale_comb_v);
-
- dev_geo(dev, "5: camera target %ux%u\n", mf.width, mf.height);
-
- /* 6. - 9. */
- mf.code = cam->code;
- mf.field = pcdev->field;
-
- capsr = capture_save_reset(pcdev);
- dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
+ /* 3. Retrieve camera output window */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
- /* Make relative to camera rectangle */
- rect->left -= cam_rect->left;
- rect->top -= cam_rect->top;
+ if (mf.width > 2560 || mf.height > 1920)
+ return -EINVAL;
- ret = client_scale(icd, cam_rect, rect, ceu_rect, &mf,
- pcdev->image_mode &&
- V4L2_FIELD_NONE == pcdev->field);
+ /* Cache camera output window */
+ cam->width = mf.width;
+ cam->height = mf.height;
- dev_geo(dev, "6-9: %d\n", ret);
+ /* 4. Calculate camera scales */
+ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width);
+ scale_cam_v = calc_generic_scale(cam_rect->height, mf.height);
- /* 10. Use CEU cropping to crop to the new window. */
- sh_mobile_ceu_set_rect(icd, out_width, out_height);
+ /* Calculate intermediate window */
+ interm_width = scale_down(rect->width, scale_cam_h);
+ interm_height = scale_down(rect->height, scale_cam_v);
- dev_geo(dev, "10: CEU cropped to %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
+ if (pcdev->image_mode) {
+ out_width = min(interm_width, icd->user_width);
+ out_height = min(interm_height, icd->user_height);
+ } else {
+ out_width = interm_width;
+ out_height = interm_height;
+ }
/*
- * 11. Calculate CEU scales from camera scales from results of (10) and
- * user window from (3)
+ * 5. Calculate CEU scales from camera scales from results of (5) and
+ * the user window
*/
- scale_ceu_h = calc_scale(ceu_rect->width, &out_width);
- scale_ceu_v = calc_scale(ceu_rect->height, &out_height);
+ scale_ceu_h = calc_scale(interm_width, &out_width);
+ scale_ceu_v = calc_scale(interm_height, &out_height);
- dev_geo(dev, "11: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+ /* Calculate camera scales */
+ scale_h = calc_generic_scale(cam_rect->width, out_width);
+ scale_v = calc_generic_scale(cam_rect->height, out_height);
- /* 12. Apply CEU scales. */
+ dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+
+ /* Apply CEU scales. */
cflcr = scale_ceu_h | (scale_ceu_v << 16);
if (cflcr != pcdev->cflcr) {
pcdev->cflcr = cflcr;
ceu_write(pcdev, CFLCR, cflcr);
}
+ icd->user_width = out_width;
+ icd->user_height = out_height;
+ cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_h) & ~1;
+ cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_v) & ~1;
+
+ /* 6. Use CEU cropping to crop to the new window. */
+ sh_mobile_ceu_set_rect(icd);
+
+ cam->subrect = *rect;
+
+ dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height,
+ cam->ceu_left, cam->ceu_top);
+
/* Restore capture */
if (pcdev->active)
capsr |= 1;
capture_restore(pcdev, capsr);
- icd->user_width = out_width;
- icd->user_height = out_height;
-
/* Even if only camera cropping succeeded */
return ret;
}
+static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->c = cam->subrect;
+
+ return 0;
+}
+
+/*
+ * Calculate real client output window by applying new scales to the current
+ * client crop. New scales are calculated from the requested output format and
+ * CEU crop, mapped backed onto the client input (subrect).
+ */
+static void calculate_client_output(struct soc_camera_device *icd,
+ struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct device *dev = icd->dev.parent;
+ struct v4l2_rect *cam_subrect = &cam->subrect;
+ unsigned int scale_v, scale_h;
+
+ if (cam_subrect->width == cam->rect.width &&
+ cam_subrect->height == cam->rect.height) {
+ /* No sub-cropping */
+ mf->width = pix->width;
+ mf->height = pix->height;
+ return;
+ }
+
+ /* 1.-2. Current camera scales and subwin - cached. */
+
+ dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
+ cam_subrect->width, cam_subrect->height,
+ cam_subrect->left, cam_subrect->top);
+
+ /*
+ * 3. Calculate new combined scales from input sub-window to requested
+ * user window.
+ */
+
+ /*
+ * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF
+ * (128x96) or larger than VGA
+ */
+ scale_h = calc_generic_scale(cam_subrect->width, pix->width);
+ scale_v = calc_generic_scale(cam_subrect->height, pix->height);
+
+ dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
+
+ /*
+ * 4. Calculate client output window by applying combined scales to real
+ * input window.
+ */
+ mf->width = scale_down(cam->rect.width, scale_h);
+ mf->height = scale_down(cam->rect.height, scale_v);
+}
+
/* Similar to set_crop multistage iterative algorithm */
static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
@@ -1460,18 +1511,17 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
__u32 pixfmt = pix->pixelformat;
const struct soc_camera_format_xlate *xlate;
- struct v4l2_crop cam_crop;
- struct v4l2_rect *cam_rect = &cam_crop.c, cam_subrect, ceu_rect;
- unsigned int scale_cam_h, scale_cam_v;
+ unsigned int ceu_sub_width, ceu_sub_height;
u16 scale_v, scale_h;
int ret;
bool image_mode;
enum v4l2_field field;
+ dev_geo(dev, "S_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height);
+
switch (pix->field) {
default:
pix->field = V4L2_FIELD_NONE;
@@ -1492,46 +1542,8 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* 1. Calculate current camera scales. */
- cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = client_g_rect(sd, cam_rect);
- if (ret < 0)
- return ret;
-
- ret = get_camera_scales(sd, cam_rect, &scale_cam_h, &scale_cam_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "1: camera scales %u:%u\n", scale_cam_h, scale_cam_v);
-
- /*
- * 2. Calculate "effective" input crop (sensor subwindow) - CEU crop
- * scaled back at current camera scales onto input window.
- */
- ret = get_camera_subwin(icd, &cam_subrect, scale_cam_h, scale_cam_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
- cam_subrect.width, cam_subrect.height,
- cam_subrect.left, cam_subrect.top);
-
- /*
- * 3. Calculate new combined scales from "effective" input window to
- * requested user window.
- */
- scale_h = calc_generic_scale(cam_subrect.width, pix->width);
- scale_v = calc_generic_scale(cam_subrect.height, pix->height);
-
- dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
-
- /*
- * 4. Calculate camera output window by applying combined scales to real
- * input window.
- */
- mf.width = scale_down(cam_rect->width, scale_h);
- mf.height = scale_down(cam_rect->height, scale_v);
+ /* 1.-4. Calculate client output geometry */
+ calculate_client_output(icd, &f->fmt.pix, &mf);
mf.field = pix->field;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
@@ -1547,17 +1559,17 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
image_mode = false;
}
- dev_geo(dev, "4: camera output %ux%u\n", mf.width, mf.height);
+ dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
/* 5. - 9. */
- ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &mf,
+ ret = client_scale(icd, &mf, &ceu_sub_width, &ceu_sub_height,
image_mode && V4L2_FIELD_NONE == field);
- dev_geo(dev, "5-9: client scale %d\n", ret);
+ dev_geo(dev, "5-9: client scale return %d\n", ret);
/* Done with the camera. Now see if we can improve the result */
- dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
+ dev_geo(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
ret, mf.width, mf.height, pix->width, pix->height);
if (ret < 0)
return ret;
@@ -1565,40 +1577,44 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
if (mf.code != xlate->code)
return -EINVAL;
+ /* 9. Prepare CEU crop */
+ cam->width = mf.width;
+ cam->height = mf.height;
+
/* 10. Use CEU scaling to scale to the requested user window. */
/* We cannot scale up */
- if (pix->width > mf.width)
- pix->width = mf.width;
- if (pix->width > ceu_rect.width)
- pix->width = ceu_rect.width;
+ if (pix->width > ceu_sub_width)
+ ceu_sub_width = pix->width;
- if (pix->height > mf.height)
- pix->height = mf.height;
- if (pix->height > ceu_rect.height)
- pix->height = ceu_rect.height;
+ if (pix->height > ceu_sub_height)
+ ceu_sub_height = pix->height;
pix->colorspace = mf.colorspace;
if (image_mode) {
/* Scale pix->{width x height} down to width x height */
- scale_h = calc_scale(ceu_rect.width, &pix->width);
- scale_v = calc_scale(ceu_rect.height, &pix->height);
-
- pcdev->cflcr = scale_h | (scale_v << 16);
+ scale_h = calc_scale(ceu_sub_width, &pix->width);
+ scale_v = calc_scale(ceu_sub_height, &pix->height);
} else {
- pix->width = ceu_rect.width;
- pix->height = ceu_rect.height;
- scale_h = scale_v = 0;
- pcdev->cflcr = 0;
+ pix->width = ceu_sub_width;
+ pix->height = ceu_sub_height;
+ scale_h = 0;
+ scale_v = 0;
}
+ pcdev->cflcr = scale_h | (scale_v << 16);
+
+ /*
+ * We have calculated CFLCR, the actual configuration will be performed
+ * in sh_mobile_ceu_set_bus_param()
+ */
+
dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
- ceu_rect.width, scale_h, pix->width,
- ceu_rect.height, scale_v, pix->height);
+ ceu_sub_width, scale_h, pix->width,
+ ceu_sub_height, scale_v, pix->height);
cam->code = xlate->code;
- cam->ceu_rect = ceu_rect;
icd->current_fmt = xlate;
pcdev->field = field;
@@ -1820,6 +1836,7 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.remove = sh_mobile_ceu_remove_device,
.get_formats = sh_mobile_ceu_get_formats,
.put_formats = sh_mobile_ceu_put_formats,
+ .get_crop = sh_mobile_ceu_get_crop,
.set_crop = sh_mobile_ceu_set_crop,
.set_fmt = sh_mobile_ceu_set_fmt,
.try_fmt = sh_mobile_ceu_try_fmt,
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
new file mode 100644
index 000000000000..f5b892a2a8ee
--- /dev/null
+++ b/drivers/media/video/sh_vou.c
@@ -0,0 +1,1476 @@
+/*
+ * SuperH Video Output Unit (VOU) driver
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+
+#include <media/sh_vou.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf-dma-contig.h>
+
+/* Mirror addresses are not available for all registers */
+#define VOUER 0
+#define VOUCR 4
+#define VOUSTR 8
+#define VOUVCR 0xc
+#define VOUISR 0x10
+#define VOUBCR 0x14
+#define VOUDPR 0x18
+#define VOUDSR 0x1c
+#define VOUVPR 0x20
+#define VOUIR 0x24
+#define VOUSRR 0x28
+#define VOUMSR 0x2c
+#define VOUHIR 0x30
+#define VOUDFR 0x34
+#define VOUAD1R 0x38
+#define VOUAD2R 0x3c
+#define VOUAIR 0x40
+#define VOUSWR 0x44
+#define VOURCR 0x48
+#define VOURPR 0x50
+
+enum sh_vou_status {
+ SH_VOU_IDLE,
+ SH_VOU_INITIALISING,
+ SH_VOU_RUNNING,
+};
+
+#define VOU_MAX_IMAGE_WIDTH 720
+#define VOU_MAX_IMAGE_HEIGHT 480
+
+struct sh_vou_device {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ atomic_t use_count;
+ struct sh_vou_pdata *pdata;
+ spinlock_t lock;
+ void __iomem *base;
+ /* State information */
+ struct v4l2_pix_format pix;
+ struct v4l2_rect rect;
+ struct list_head queue;
+ v4l2_std_id std;
+ int pix_idx;
+ struct videobuf_buffer *active;
+ enum sh_vou_status status;
+};
+
+struct sh_vou_file {
+ struct videobuf_queue vbq;
+};
+
+/* Register access routines for sides A, B and mirror addresses */
+static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_ab_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+ __raw_writel(value, vou_dev->base + reg + 0x1000);
+}
+
+static void sh_vou_reg_m_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg + 0x2000);
+}
+
+static u32 sh_vou_reg_a_read(struct sh_vou_device *vou_dev, unsigned int reg)
+{
+ return __raw_readl(vou_dev->base + reg);
+}
+
+static void sh_vou_reg_a_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ u32 old = __raw_readl(vou_dev->base + reg);
+
+ value = (value & mask) | (old & ~mask);
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_b_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg + 0x1000, value, mask);
+}
+
+static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg, value, mask);
+ sh_vou_reg_b_set(vou_dev, reg, value, mask);
+}
+
+struct sh_vou_fmt {
+ u32 pfmt;
+ char *desc;
+ unsigned char bpp;
+ unsigned char rgb;
+ unsigned char yf;
+ unsigned char pkf;
+};
+
+/* Further pixel formats can be added */
+static struct sh_vou_fmt vou_fmt[] = {
+ {
+ .pfmt = V4L2_PIX_FMT_NV12,
+ .bpp = 12,
+ .desc = "YVU420 planar",
+ .yf = 0,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_NV16,
+ .bpp = 16,
+ .desc = "YVYU planar",
+ .yf = 1,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB24,
+ .bpp = 24,
+ .desc = "RGB24",
+ .pkf = 2,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565,
+ .bpp = 16,
+ .desc = "RGB565",
+ .pkf = 3,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565X,
+ .bpp = 16,
+ .desc = "RGB565 byteswapped",
+ .pkf = 3,
+ .rgb = 1,
+ },
+};
+
+static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
+ struct videobuf_buffer *vb)
+{
+ dma_addr_t addr1, addr2;
+
+ addr1 = videobuf_to_dma_contig(vb);
+ switch (vou_dev->pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ addr2 = addr1 + vou_dev->pix.width * vou_dev->pix.height;
+ break;
+ default:
+ addr2 = 0;
+ }
+
+ sh_vou_reg_m_write(vou_dev, VOUAD1R, addr1);
+ sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2);
+}
+
+static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
+ struct videobuf_buffer *vb)
+{
+ unsigned int row_coeff;
+#ifdef __LITTLE_ENDIAN
+ u32 dataswap = 7;
+#else
+ u32 dataswap = 0;
+#endif
+
+ switch (vou_dev->pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ row_coeff = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dataswap ^= 1;
+ case V4L2_PIX_FMT_RGB565X:
+ row_coeff = 2;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ row_coeff = 3;
+ break;
+ }
+
+ sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap);
+ sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff);
+ sh_vou_schedule_next(vou_dev, vb);
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ BUG_ON(in_interrupt());
+
+ /* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */
+ videobuf_waiton(vb, 0, 0);
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+/* Locking: caller holds vq->vb_lock mutex */
+static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ *size = vou_fmt[vou_dev->pix_idx].bpp * vou_dev->pix.width *
+ vou_dev->pix.height / 8;
+
+ if (*count < 2)
+ *count = 2;
+
+ /* Taking into account maximum frame size, *count will stay >= 2 */
+ if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024)
+ *count = 4 * 1024 * 1024 / PAGE_ALIGN(*size);
+
+ dev_dbg(vq->dev, "%s(): count=%d, size=%d\n", __func__, *count, *size);
+
+ return 0;
+}
+
+/* Locking: caller holds vq->vb_lock mutex */
+static int sh_vou_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+ int ret;
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ if (vb->width != pix->width ||
+ vb->height != pix->height ||
+ vb->field != pix->field) {
+ vb->width = pix->width;
+ vb->height = pix->height;
+ vb->field = field;
+ if (vb->state != VIDEOBUF_NEEDS_INIT)
+ free_buffer(vq, vb);
+ }
+
+ vb->size = vb->height * bytes_per_line;
+ if (vb->baddr && vb->bsize < vb->size) {
+ /* User buffer too small */
+ dev_warn(vq->dev, "User buffer too small: [%u] @ %lx\n",
+ vb->bsize, vb->baddr);
+ return -EINVAL;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret < 0) {
+ dev_warn(vq->dev, "IOLOCK buf-type %d: %d\n",
+ vb->memory, ret);
+ return ret;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ }
+
+ dev_dbg(vq->dev,
+ "%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
+ __func__, vou_dev->pix_idx, bytes_per_line,
+ videobuf_to_dma_contig(vb), vb->memory, vb->state);
+
+ return 0;
+}
+
+/* Locking: caller holds vq->vb_lock mutex and vq->irqlock spinlock */
+static void sh_vou_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &vou_dev->queue);
+
+ if (vou_dev->status == SH_VOU_RUNNING) {
+ return;
+ } else if (!vou_dev->active) {
+ vou_dev->active = vb;
+ /* Start from side A: we use mirror addresses, so, set B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 1);
+ dev_dbg(vq->dev, "%s: first buffer status 0x%x\n", __func__,
+ sh_vou_reg_a_read(vou_dev, VOUSTR));
+ sh_vou_schedule_next(vou_dev, vb);
+ /* Only activate VOU after the second buffer */
+ } else if (vou_dev->active->queue.next == &vb->queue) {
+ /* Second buffer - initialise register side B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 0);
+ sh_vou_stream_start(vou_dev, vb);
+
+ /* Register side switching with frame VSYNC */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 5);
+ dev_dbg(vq->dev, "%s: second buffer status 0x%x\n", __func__,
+ sh_vou_reg_a_read(vou_dev, VOUSTR));
+
+ /* Enable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
+ /* Two buffers on the queue - activate the hardware */
+
+ vou_dev->status = SH_VOU_RUNNING;
+ sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+ }
+}
+
+static void sh_vou_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ unsigned long flags;
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ spin_lock_irqsave(&vou_dev->lock, flags);
+
+ if (vou_dev->active == vb) {
+ /* disable output */
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ /* ...but the current frame will complete */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ vou_dev->active = NULL;
+ }
+
+ if ((vb->state == VIDEOBUF_ACTIVE || vb->state == VIDEOBUF_QUEUED)) {
+ vb->state = VIDEOBUF_ERROR;
+ list_del(&vb->queue);
+ }
+
+ spin_unlock_irqrestore(&vou_dev->lock, flags);
+
+ free_buffer(vq, vb);
+}
+
+static struct videobuf_queue_ops sh_vou_video_qops = {
+ .buf_setup = sh_vou_buf_setup,
+ .buf_prepare = sh_vou_buf_prepare,
+ .buf_queue = sh_vou_buf_queue,
+ .buf_release = sh_vou_buf_release,
+};
+
+/* Video IOCTLs */
+static int sh_vou_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
+ cap->version = KERNEL_VERSION(0, 1, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+/* Enumerate formats, that the device can accept from the user */
+static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ if (fmt->index >= ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ strlcpy(fmt->description, vou_fmt[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = vou_fmt[fmt->index].pfmt;
+
+ return 0;
+}
+
+static int sh_vou_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ fmt->fmt.pix = vou_dev->pix;
+
+ return 0;
+}
+
+static const unsigned char vou_scale_h_num[] = {1, 9, 2, 9, 4};
+static const unsigned char vou_scale_h_den[] = {1, 8, 1, 4, 1};
+static const unsigned char vou_scale_h_fld[] = {0, 2, 1, 3};
+static const unsigned char vou_scale_v_num[] = {1, 2, 4};
+static const unsigned char vou_scale_v_den[] = {1, 1, 1};
+static const unsigned char vou_scale_v_fld[] = {0, 1};
+
+static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
+ int pix_idx, int w_idx, int h_idx)
+{
+ struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
+ unsigned int black_left, black_top, width_max, height_max,
+ frame_in_height, frame_out_height, frame_out_top;
+ struct v4l2_rect *rect = &vou_dev->rect;
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ u32 vouvcr = 0, dsr_h, dsr_v;
+
+ if (vou_dev->std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262;
+ } else {
+ width_max = 864;
+ height_max = 312;
+ }
+
+ frame_in_height = pix->height / 2;
+ frame_out_height = rect->height / 2;
+ frame_out_top = rect->top / 2;
+
+ /*
+ * Cropping scheme: max useful image is 720x480, and the total video
+ * area is 858x525 (NTSC) or 864x625 (PAL). AK8813 / 8814 starts
+ * sampling data beginning with fixed 276th (NTSC) / 288th (PAL) clock,
+ * of which the first 33 / 25 clocks HSYNC must be held active. This
+ * has to be configured in CR[HW]. 1 pixel equals 2 clock periods.
+ * This gives CR[HW] = 16 / 12, VPR[HVP] = 138 / 144, which gives
+ * exactly 858 - 138 = 864 - 144 = 720! We call the out-of-display area,
+ * beyond DSR, specified on the left and top by the VPR register "black
+ * pixels" and out-of-image area (DPR) "background pixels." We fix VPR
+ * at 138 / 144 : 20, because that's the HSYNC timing, that our first
+ * client requires, and that's exactly what leaves us 720 pixels for the
+ * image; we leave VPR[VVP] at default 20 for now, because the client
+ * doesn't seem to have any special requirements for it. Otherwise we
+ * could also set it to max - 240 = 22 / 72. Thus VPR depends only on
+ * the selected standard, and DPR and DSR are selected according to
+ * cropping. Q: how does the client detect the first valid line? Does
+ * HSYNC stay inactive during invalid (black) lines?
+ */
+ black_left = width_max - VOU_MAX_IMAGE_WIDTH;
+ black_top = 20;
+
+ dsr_h = rect->width + rect->left;
+ dsr_v = frame_out_height + frame_out_top;
+
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "image %ux%u, black %u:%u, offset %u:%u, display %ux%u\n",
+ pix->width, frame_in_height, black_left, black_top,
+ rect->left, frame_out_top, dsr_h, dsr_v);
+
+ /* VOUISR height - half of a frame height in frame mode */
+ sh_vou_reg_ab_write(vou_dev, VOUISR, (pix->width << 16) | frame_in_height);
+ sh_vou_reg_ab_write(vou_dev, VOUVPR, (black_left << 16) | black_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDPR, (rect->left << 16) | frame_out_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDSR, (dsr_h << 16) | dsr_v);
+
+ /*
+ * if necessary, we could set VOUHIR to
+ * max(black_left + dsr_h, width_max) here
+ */
+
+ if (w_idx)
+ vouvcr |= (1 << 15) | (vou_scale_h_fld[w_idx - 1] << 4);
+ if (h_idx)
+ vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1];
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: scaling 0x%x\n", fmt->desc, vouvcr);
+
+ /* To produce a colour bar for testing set bit 23 of VOUVCR */
+ sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr);
+ sh_vou_reg_ab_write(vou_dev, VOUDFR,
+ fmt->pkf | (fmt->yf << 8) | (fmt->rgb << 16));
+}
+
+struct sh_vou_geometry {
+ struct v4l2_rect output;
+ unsigned int in_width;
+ unsigned int in_height;
+ int scale_idx_h;
+ int scale_idx_v;
+};
+
+/*
+ * Find input geometry, that we can use to produce output, closest to the
+ * requested rectangle, using VOU scaling
+ */
+static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ /* The compiler cannot know, that best and idx will indeed be set */
+ unsigned int best_err = UINT_MAX, best = 0, width_max, height_max;
+ int i, idx = 0;
+
+ if (std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262;
+ } else {
+ width_max = 864;
+ height_max = 312;
+ }
+
+ /* Image width must be a multiple of 4 */
+ v4l_bound_align_image(&geo->in_width, 0, VOU_MAX_IMAGE_WIDTH, 2,
+ &geo->in_height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.width * vou_scale_h_den[i] /
+ vou_scale_h_num[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_width);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_width = best;
+ geo->scale_idx_h = idx;
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = ARRAY_SIZE(vou_scale_v_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.height * vou_scale_v_den[i] /
+ vou_scale_v_num[i];
+
+ if (found > VOU_MAX_IMAGE_HEIGHT)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_height);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_height = best;
+ geo->scale_idx_v = idx;
+}
+
+/*
+ * Find output geometry, that we can produce, using VOU scaling, closest to
+ * the requested rectangle
+ */
+static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ unsigned int best_err = UINT_MAX, best, width_max, height_max;
+ int i, idx;
+
+ if (std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262 * 2;
+ } else {
+ width_max = 864;
+ height_max = 312 * 2;
+ }
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_h_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_width * vou_scale_h_num[i] /
+ vou_scale_h_den[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.width);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.width = best;
+ geo->scale_idx_h = idx;
+ if (geo->output.left + best > width_max)
+ geo->output.left = width_max - best;
+
+ pr_debug("%s(): W %u * %u/%u = %u\n", __func__, geo->in_width,
+ vou_scale_h_num[idx], vou_scale_h_den[idx], best);
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_v_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_height * vou_scale_v_num[i] /
+ vou_scale_v_den[i];
+
+ if (found > VOU_MAX_IMAGE_HEIGHT)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.height);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.height = best;
+ geo->scale_idx_v = idx;
+ if (geo->output.top + best > height_max)
+ geo->output.top = height_max - best;
+
+ pr_debug("%s(): H %u * %u/%u = %u\n", __func__, geo->in_height,
+ vou_scale_v_num[idx], vou_scale_v_den[idx], best);
+}
+
+static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int pix_idx;
+ struct sh_vou_geometry geo;
+ struct v4l2_mbus_framefmt mbfmt = {
+ /* Revisit: is this the correct code? */
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ .field = V4L2_FIELD_INTERLACED,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+ vou_dev->rect.width, vou_dev->rect.height,
+ pix->width, pix->height);
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ if (fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+ if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+ break;
+
+ if (pix_idx == ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ /* Image width must be a multiple of 4 */
+ v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 2,
+ &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+ geo.output = vou_dev->rect;
+
+ vou_adjust_output(&geo, vou_dev->std);
+
+ mbfmt.width = geo.output.width;
+ mbfmt.height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_mbus_fmt, &mbfmt);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+ geo.output.width, geo.output.height, mbfmt.width, mbfmt.height);
+
+ /* Sanity checks */
+ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)mbfmt.height > VOU_MAX_IMAGE_HEIGHT ||
+ mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EIO;
+
+ if (mbfmt.width != geo.output.width ||
+ mbfmt.height != geo.output.height) {
+ geo.output.width = mbfmt.width;
+ geo.output.height = mbfmt.height;
+
+ vou_adjust_input(&geo, vou_dev->std);
+ }
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u\n", __func__,
+ pix->width, pix->height);
+
+ vou_dev->pix_idx = pix_idx;
+
+ vou_dev->pix = *pix;
+
+ sh_vou_configure_geometry(vou_dev, pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_file *vou_file = priv;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int i;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ pix->field = V4L2_FIELD_NONE;
+
+ v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
+ &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+ if (vou_fmt[i].pfmt == pix->pixelformat)
+ return 0;
+
+ pix->pixelformat = vou_fmt[0].pfmt;
+
+ return 0;
+}
+
+static int sh_vou_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ return videobuf_reqbufs(&vou_file->vbq, req);
+}
+
+static int sh_vou_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_querybuf(&vou_file->vbq, b);
+}
+
+static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_qbuf(&vou_file->vbq, b);
+}
+
+static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK);
+}
+
+static int sh_vou_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buftype)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = priv;
+ int ret;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ /* This calls our .buf_queue() (== sh_vou_buf_queue) */
+ return videobuf_streamon(&vou_file->vbq);
+}
+
+static int sh_vou_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buftype)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ /*
+ * This calls buf_release from host driver's videobuf_queue_ops for all
+ * remaining buffers. When the last buffer is freed, stop streaming
+ */
+ videobuf_streamoff(&vou_file->vbq);
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 0);
+
+ return 0;
+}
+
+static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
+{
+ switch (bus_fmt) {
+ default:
+ pr_warning("%s(): Invalid bus-format code %d, using default 8-bit\n",
+ __func__, bus_fmt);
+ case SH_VOU_BUS_8BIT:
+ return 1;
+ case SH_VOU_BUS_16BIT:
+ return 0;
+ case SH_VOU_BUS_BT656:
+ return 3;
+ }
+}
+
+static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, *std_id);
+
+ if (*std_id & ~vdev->tvnorms)
+ return -EINVAL;
+
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_std_output, *std_id);
+ /* Shall we continue, if the subdev doesn't support .s_std_output()? */
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (*std_id & V4L2_STD_525_60)
+ sh_vou_reg_ab_set(vou_dev, VOUCR,
+ sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29);
+ else
+ sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29);
+
+ vou_dev->std = *std_id;
+
+ return 0;
+}
+
+static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ *std = vou_dev->std;
+
+ return 0;
+}
+
+static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->c = vou_dev->rect;
+
+ return 0;
+}
+
+/* Assume a dull encoder, do all the work ourselves. */
+static int sh_vou_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_rect *rect = &a->c;
+ struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ struct sh_vou_geometry geo;
+ struct v4l2_mbus_framefmt mbfmt = {
+ /* Revisit: is this the correct code? */
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ .field = V4L2_FIELD_INTERLACED,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u@%u:%u\n", __func__,
+ rect->width, rect->height, rect->left, rect->top);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ v4l_bound_align_image(&rect->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
+ &rect->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH)
+ rect->left = VOU_MAX_IMAGE_WIDTH - rect->width;
+
+ if (rect->height + rect->top > VOU_MAX_IMAGE_HEIGHT)
+ rect->top = VOU_MAX_IMAGE_HEIGHT - rect->height;
+
+ geo.output = *rect;
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+
+ /* Configure the encoder one-to-one, position at 0, ignore errors */
+ sd_crop.c.width = geo.output.width;
+ sd_crop.c.height = geo.output.height;
+ /*
+ * We first issue a S_CROP, so that the subsequent S_FMT delivers the
+ * final encoder configuration.
+ */
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_crop, &sd_crop);
+ mbfmt.width = geo.output.width;
+ mbfmt.height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_mbus_fmt, &mbfmt);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ /* Sanity checks */
+ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)mbfmt.height > VOU_MAX_IMAGE_HEIGHT ||
+ mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EIO;
+
+ geo.output.width = mbfmt.width;
+ geo.output.height = mbfmt.height;
+
+ /*
+ * No down-scaling. According to the API, current call has precedence:
+ * http://v4l2spec.bytesex.org/spec/x1904.htm#AEN1954 paragraph two.
+ */
+ vou_adjust_input(&geo, vou_dev->std);
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ sh_vou_configure_geometry(vou_dev, vou_dev->pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+/*
+ * Total field: NTSC 858 x 2 * 262/263, PAL 864 x 2 * 312/313, default rectangle
+ * is the initial register values, height takes the interlaced format into
+ * account. The actual image can only go up to 720 x 2 * 240, So, VOUVPR can
+ * actually only meaningfully contain values <= 720 and <= 240 respectively, and
+ * not <= 864 and <= 312.
+ */
+static int sh_vou_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *a)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = VOU_MAX_IMAGE_WIDTH;
+ a->bounds.height = VOU_MAX_IMAGE_HEIGHT;
+ /* Default = max, set VOUDPR = 0, which is not hardware default */
+ a->defrect.left = 0;
+ a->defrect.top = 0;
+ a->defrect.width = VOU_MAX_IMAGE_WIDTH;
+ a->defrect.height = VOU_MAX_IMAGE_HEIGHT;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static irqreturn_t sh_vou_isr(int irq, void *dev_id)
+{
+ struct sh_vou_device *vou_dev = dev_id;
+ static unsigned long j;
+ struct videobuf_buffer *vb;
+ static int cnt;
+ static int side;
+ u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
+ u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
+
+ if (!(irq_status & 0x300)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev, "IRQ status 0x%x!\n",
+ irq_status);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&vou_dev->lock);
+ if (!vou_dev->active || list_empty(&vou_dev->queue)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev,
+ "IRQ without active buffer: %x!\n", irq_status);
+ /* Just ack: buf_release will disable further interrupts */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x300);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ masked = ~(0x300 & irq_status) & irq_status & 0x30304;
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "IRQ status 0x%x -> 0x%x, VOU status 0x%x, cnt %d\n",
+ irq_status, masked, vou_status, cnt);
+
+ cnt++;
+ side = vou_status & 0x10000;
+
+ /* Clear only set interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, masked);
+
+ vb = vou_dev->active;
+ list_del(&vb->queue);
+
+ vb->state = VIDEOBUF_DONE;
+ do_gettimeofday(&vb->ts);
+ vb->field_count++;
+ wake_up(&vb->done);
+
+ if (list_empty(&vou_dev->queue)) {
+ /* Stop VOU */
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: queue empty after %d\n",
+ __func__, cnt);
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ vou_dev->active = NULL;
+ vou_dev->status = SH_VOU_INITIALISING;
+ /* Disable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ vou_dev->active = list_entry(vou_dev->queue.next,
+ struct videobuf_buffer, queue);
+
+ if (vou_dev->active->queue.next != &vou_dev->queue) {
+ struct videobuf_buffer *new = list_entry(vou_dev->active->queue.next,
+ struct videobuf_buffer, queue);
+ sh_vou_schedule_next(vou_dev, new);
+ }
+
+ spin_unlock(&vou_dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
+{
+ struct sh_vou_pdata *pdata = vou_dev->pdata;
+ u32 voucr = sh_vou_ntsc_mode(pdata->bus_fmt) << 29;
+ int i = 100;
+
+ /* Disable all IRQs */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0);
+
+ /* Reset VOU interfaces - registers unaffected */
+ sh_vou_reg_a_write(vou_dev, VOUSRR, 0x101);
+ while (--i && (sh_vou_reg_a_read(vou_dev, VOUSRR) & 0x101))
+ udelay(1);
+
+ if (!i)
+ return -ETIMEDOUT;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "Reset took %dus\n", 100 - i);
+
+ if (pdata->flags & SH_VOU_PCLK_FALLING)
+ voucr |= 1 << 28;
+ if (pdata->flags & SH_VOU_HSYNC_LOW)
+ voucr |= 1 << 27;
+ if (pdata->flags & SH_VOU_VSYNC_LOW)
+ voucr |= 1 << 26;
+ sh_vou_reg_ab_set(vou_dev, VOUCR, voucr, 0xfc000000);
+
+ /* Manual register side switching at first */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 4);
+ /* Default - fixed HSYNC length, can be made configurable is required */
+ sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000);
+
+ return 0;
+}
+
+/* File operations */
+static int sh_vou_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file),
+ GFP_KERNEL);
+
+ if (!vou_file)
+ return -ENOMEM;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ file->private_data = vou_file;
+
+ if (atomic_inc_return(&vou_dev->use_count) == 1) {
+ int ret;
+ /* First open */
+ vou_dev->status = SH_VOU_INITIALISING;
+ pm_runtime_get_sync(vdev->v4l2_dev->dev);
+ ret = sh_vou_hw_init(vou_dev);
+ if (ret < 0) {
+ atomic_dec(&vou_dev->use_count);
+ pm_runtime_put(vdev->v4l2_dev->dev);
+ vou_dev->status = SH_VOU_IDLE;
+ return ret;
+ }
+ }
+
+ videobuf_queue_dma_contig_init(&vou_file->vbq, &sh_vou_video_qops,
+ vou_dev->v4l2_dev.dev, &vou_dev->lock,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vdev);
+
+ return 0;
+}
+
+static int sh_vou_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ if (!atomic_dec_return(&vou_dev->use_count)) {
+ /* Last close */
+ vou_dev->status = SH_VOU_IDLE;
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
+ pm_runtime_put(vdev->v4l2_dev->dev);
+ }
+
+ file->private_data = NULL;
+ kfree(vou_file);
+
+ return 0;
+}
+
+static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_mmap_mapper(&vou_file->vbq, vma);
+}
+
+static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
+{
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_poll_stream(file, &vou_file->vbq, wait);
+}
+
+static int sh_vou_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_chip_ident, id);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int sh_vou_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_register, reg);
+}
+
+static int sh_vou_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, s_register, reg);
+}
+#endif
+
+/* sh_vou display ioctl operations */
+static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = {
+ .vidioc_querycap = sh_vou_querycap,
+ .vidioc_enum_fmt_vid_out = sh_vou_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = sh_vou_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = sh_vou_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = sh_vou_try_fmt_vid_out,
+ .vidioc_reqbufs = sh_vou_reqbufs,
+ .vidioc_querybuf = sh_vou_querybuf,
+ .vidioc_qbuf = sh_vou_qbuf,
+ .vidioc_dqbuf = sh_vou_dqbuf,
+ .vidioc_streamon = sh_vou_streamon,
+ .vidioc_streamoff = sh_vou_streamoff,
+ .vidioc_s_std = sh_vou_s_std,
+ .vidioc_g_std = sh_vou_g_std,
+ .vidioc_cropcap = sh_vou_cropcap,
+ .vidioc_g_crop = sh_vou_g_crop,
+ .vidioc_s_crop = sh_vou_s_crop,
+ .vidioc_g_chip_ident = sh_vou_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = sh_vou_g_register,
+ .vidioc_s_register = sh_vou_s_register,
+#endif
+};
+
+static const struct v4l2_file_operations sh_vou_fops = {
+ .owner = THIS_MODULE,
+ .open = sh_vou_open,
+ .release = sh_vou_release,
+ .ioctl = video_ioctl2,
+ .mmap = sh_vou_mmap,
+ .poll = sh_vou_poll,
+};
+
+static const struct video_device sh_vou_video_template = {
+ .name = "sh_vou",
+ .fops = &sh_vou_fops,
+ .ioctl_ops = &sh_vou_ioctl_ops,
+ .tvnorms = V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+static int __devinit sh_vou_probe(struct platform_device *pdev)
+{
+ struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data;
+ struct v4l2_rect *rect;
+ struct v4l2_pix_format *pix;
+ struct i2c_adapter *i2c_adap;
+ struct video_device *vdev;
+ struct sh_vou_device *vou_dev;
+ struct resource *reg_res, *region;
+ struct v4l2_subdev *subdev;
+ int irq, ret;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+
+ if (!vou_pdata || !reg_res || irq <= 0) {
+ dev_err(&pdev->dev, "Insufficient VOU platform information.\n");
+ return -ENODEV;
+ }
+
+ vou_dev = kzalloc(sizeof(*vou_dev), GFP_KERNEL);
+ if (!vou_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vou_dev->queue);
+ spin_lock_init(&vou_dev->lock);
+ atomic_set(&vou_dev->use_count, 0);
+ vou_dev->pdata = vou_pdata;
+ vou_dev->status = SH_VOU_IDLE;
+
+ rect = &vou_dev->rect;
+ pix = &vou_dev->pix;
+
+ /* Fill in defaults */
+ vou_dev->std = sh_vou_video_template.current_norm;
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = VOU_MAX_IMAGE_WIDTH;
+ rect->height = VOU_MAX_IMAGE_HEIGHT;
+ pix->width = VOU_MAX_IMAGE_WIDTH;
+ pix->height = VOU_MAX_IMAGE_HEIGHT;
+ pix->pixelformat = V4L2_PIX_FMT_YVYU;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = VOU_MAX_IMAGE_WIDTH * 2;
+ pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * VOU_MAX_IMAGE_HEIGHT;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ region = request_mem_region(reg_res->start, resource_size(reg_res),
+ pdev->name);
+ if (!region) {
+ dev_err(&pdev->dev, "VOU region already claimed\n");
+ ret = -EBUSY;
+ goto ereqmemreg;
+ }
+
+ vou_dev->base = ioremap(reg_res->start, resource_size(reg_res));
+ if (!vou_dev->base) {
+ ret = -ENOMEM;
+ goto emap;
+ }
+
+ ret = request_irq(irq, sh_vou_isr, 0, "vou", vou_dev);
+ if (ret < 0)
+ goto ereqirq;
+
+ ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error registering v4l2 device\n");
+ goto ev4l2devreg;
+ }
+
+ /* Allocate memory for video device */
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENOMEM;
+ goto evdevalloc;
+ }
+
+ *vdev = sh_vou_video_template;
+ if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT)
+ vdev->tvnorms |= V4L2_STD_PAL;
+ vdev->v4l2_dev = &vou_dev->v4l2_dev;
+ vdev->release = video_device_release;
+
+ vou_dev->vdev = vdev;
+ video_set_drvdata(vdev, vou_dev);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ i2c_adap = i2c_get_adapter(vou_pdata->i2c_adap);
+ if (!i2c_adap) {
+ ret = -ENODEV;
+ goto ei2cgadap;
+ }
+
+ ret = sh_vou_hw_init(vou_dev);
+ if (ret < 0)
+ goto ereset;
+
+ subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
+ vou_pdata->module_name, vou_pdata->board_info, NULL);
+ if (!subdev) {
+ ret = -ENOMEM;
+ goto ei2cnd;
+ }
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ goto evregdev;
+
+ return 0;
+
+evregdev:
+ei2cnd:
+ereset:
+ i2c_put_adapter(i2c_adap);
+ei2cgadap:
+ video_device_release(vdev);
+ pm_runtime_disable(&pdev->dev);
+evdevalloc:
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ev4l2devreg:
+ free_irq(irq, vou_dev);
+ereqirq:
+ iounmap(vou_dev->base);
+emap:
+ release_mem_region(reg_res->start, resource_size(reg_res));
+ereqmemreg:
+ kfree(vou_dev);
+ return ret;
+}
+
+static int __devexit sh_vou_remove(struct platform_device *pdev)
+{
+ int irq = platform_get_irq(pdev, 0);
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct sh_vou_device *vou_dev = container_of(v4l2_dev,
+ struct sh_vou_device, v4l2_dev);
+ struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct resource *reg_res;
+
+ if (irq > 0)
+ free_irq(irq, vou_dev);
+ pm_runtime_disable(&pdev->dev);
+ video_unregister_device(vou_dev->vdev);
+ i2c_put_adapter(client->adapter);
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ iounmap(vou_dev->base);
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (reg_res)
+ release_mem_region(reg_res->start, resource_size(reg_res));
+ kfree(vou_dev);
+ return 0;
+}
+
+static struct platform_driver __refdata sh_vou = {
+ .remove = __devexit_p(sh_vou_remove),
+ .driver = {
+ .name = "sh-vou",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_vou_init(void)
+{
+ return platform_driver_probe(&sh_vou, sh_vou_probe);
+}
+
+static void __exit sh_vou_exit(void)
+{
+ platform_driver_unregister(&sh_vou);
+}
+
+module_init(sh_vou_init);
+module_exit(sh_vou_exit);
+
+MODULE_DESCRIPTION("SuperH VOU driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index cbf8087b286f..28e19daadec9 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -2295,7 +2295,7 @@ sn9c102_vidioc_s_ctrl(struct sn9c102_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -2305,7 +2305,9 @@ sn9c102_vidioc_s_ctrl(struct sn9c102_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index cc40d6ba9f22..522ba3f4c285 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -40,12 +40,12 @@ struct sn9c102_device;
static const struct usb_device_id sn9c102_id_table[] = {
/* SN9C101 and SN9C102 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6001, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6005, BRIDGE_SN9C102), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6007, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
@@ -53,13 +53,13 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
#endif
@@ -74,7 +74,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), },
@@ -86,7 +86,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
@@ -97,7 +97,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
/* SN9C105 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
@@ -121,11 +121,11 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
#endif
diff --git a/drivers/media/video/sn9c102/sn9c102_hv7131d.c b/drivers/media/video/sn9c102/sn9c102_hv7131d.c
index db2434948939..2dce5c908c8e 100644
--- a/drivers/media/video/sn9c102/sn9c102_hv7131d.c
+++ b/drivers/media/video/sn9c102/sn9c102_hv7131d.c
@@ -255,7 +255,7 @@ int sn9c102_probe_hv7131d(struct sn9c102_device* cam)
if (err || r0 < 0 || r1 < 0)
return -EIO;
- if (r0 != 0x00 || r1 != 0x04)
+ if ((r0 != 0x00 && r0 != 0x01) || r1 != 0x04)
return -ENODEV;
sn9c102_attach_sensor(cam, &hv7131d);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index a24174ddec46..2b36464c4bc4 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -388,6 +389,11 @@ static int soc_camera_open(struct file *file)
goto eiciadd;
}
+ pm_runtime_enable(&icd->vdev->dev);
+ ret = pm_runtime_resume(&icd->vdev->dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto eresume;
+
/*
* Try to configure with default parameters. Notice: this is the
* very first open, so, we cannot race against other calls,
@@ -409,10 +415,12 @@ static int soc_camera_open(struct file *file)
return 0;
/*
- * First five errors are entered with the .video_lock held
+ * First four errors are entered with the .video_lock held
* and use_count == 1
*/
esfmt:
+ pm_runtime_disable(&icd->vdev->dev);
+eresume:
ici->ops->remove(icd);
eiciadd:
if (icl->power)
@@ -437,7 +445,11 @@ static int soc_camera_close(struct file *file)
if (!icd->use_count) {
struct soc_camera_link *icl = to_soc_camera_link(icd);
+ pm_runtime_suspend(&icd->vdev->dev);
+ pm_runtime_disable(&icd->vdev->dev);
+
ici->ops->remove(icd);
+
if (icl->power)
icl->power(icd->pdev, 0);
}
@@ -741,8 +753,7 @@ static int soc_camera_g_crop(struct file *file, void *fh,
/*
* According to the V4L2 API, drivers shall not update the struct v4l2_crop
* argument with the actual geometry, instead, the user shall use G_CROP to
- * retrieve it. However, we expect camera host and client drivers to update
- * the argument, which we then use internally, but do not return to the user.
+ * retrieve it.
*/
static int soc_camera_s_crop(struct file *file, void *fh,
struct v4l2_crop *a)
@@ -1319,6 +1330,7 @@ static int video_dev_create(struct soc_camera_device *icd)
*/
static int soc_camera_video_start(struct soc_camera_device *icd)
{
+ struct device_type *type = icd->vdev->dev.type;
int ret;
if (!icd->dev.parent)
@@ -1335,6 +1347,9 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
return ret;
}
+ /* Restore device type, possibly set by the subdevice driver */
+ icd->vdev->dev.type = type;
+
return 0;
}
diff --git a/drivers/media/video/tlg2300/pd-dvb.c b/drivers/media/video/tlg2300/pd-dvb.c
index ebd9cb5bec74..edd78f8b1baa 100644
--- a/drivers/media/video/tlg2300/pd-dvb.c
+++ b/drivers/media/video/tlg2300/pd-dvb.c
@@ -97,15 +97,17 @@ open_out:
return ret;
}
+#ifdef CONFIG_PM
static void poseidon_fe_release(struct dvb_frontend *fe)
{
struct poseidon *pd = fe->demodulator_priv;
-#ifdef CONFIG_PM
pd->pm_suspend = NULL;
pd->pm_resume = NULL;
-#endif
}
+#else
+#define poseidon_fe_release NULL
+#endif
static s32 poseidon_fe_sleep(struct dvb_frontend *fe)
{
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 2cf0ebf9f28b..c267e0cfb54b 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -24,7 +24,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -55,8 +54,8 @@ int debug_mode;
module_param(debug_mode, int, 0644);
MODULE_PARM_DESC(debug_mode, "0 = disable, 1 = enable, 2 = verbose");
-const char *firmware_name = "tlg2300_firmware.bin";
-struct usb_driver poseidon_driver;
+static const char *firmware_name = "tlg2300_firmware.bin";
+static struct usb_driver poseidon_driver;
static LIST_HEAD(pd_device_list);
/*
@@ -501,7 +500,7 @@ static void poseidon_disconnect(struct usb_interface *interface)
kref_put(&pd->kref, poseidon_delete);
}
-struct usb_driver poseidon_driver = {
+static struct usb_driver poseidon_driver = {
.name = "poseidon",
.probe = poseidon_probe,
.disconnect = poseidon_disconnect,
diff --git a/drivers/media/video/tlg2300/pd-radio.c b/drivers/media/video/tlg2300/pd-radio.c
index 755766b15157..fae84c2a0c39 100644
--- a/drivers/media/video/tlg2300/pd-radio.c
+++ b/drivers/media/video/tlg2300/pd-radio.c
@@ -161,7 +161,8 @@ static const struct v4l2_file_operations poseidon_fm_fops = {
.ioctl = video_ioctl2,
};
-int tlg_fm_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
+static int tlg_fm_vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *vt)
{
struct tuner_fm_sig_stat_s fm_stat = {};
int ret, status, count = 5;
@@ -203,7 +204,8 @@ int tlg_fm_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
return 0;
}
-int fm_get_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
+static int fm_get_freq(struct file *file, void *priv,
+ struct v4l2_frequency *argp)
{
struct poseidon *p = file->private_data;
@@ -246,7 +248,8 @@ error:
return ret;
}
-int fm_set_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
+static int fm_set_freq(struct file *file, void *priv,
+ struct v4l2_frequency *argp)
{
struct poseidon *p = file->private_data;
@@ -258,13 +261,13 @@ int fm_set_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
return set_frequency(p, argp->frequency);
}
-int tlg_fm_vidioc_g_ctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *arg)
{
return 0;
}
-int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
+static int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
struct v4l2_ext_controls *ctrls)
{
struct poseidon *p = file->private_data;
@@ -285,7 +288,7 @@ int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
return 0;
}
-int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
+static int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
struct v4l2_ext_controls *ctrls)
{
int i;
@@ -312,13 +315,13 @@ int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
return 0;
}
-int tlg_fm_vidioc_s_ctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
return 0;
}
-int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *ctrl)
{
if (!(ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL))
@@ -337,7 +340,7 @@ int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
return -EINVAL;
}
-int tlg_fm_vidioc_querymenu(struct file *file, void *fh,
+static int tlg_fm_vidioc_querymenu(struct file *file, void *fh,
struct v4l2_querymenu *qmenu)
{
return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index cf8f18c007e6..c750fd115ec4 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -12,11 +12,13 @@
#include "pd-common.h"
#include "vendorcmds.h"
+#ifdef CONFIG_PM
static int pm_video_suspend(struct poseidon *pd);
static int pm_video_resume(struct poseidon *pd);
+#endif
static void iso_bubble_handler(struct work_struct *w);
-int usb_transfer_mode;
+static int usb_transfer_mode;
module_param(usb_transfer_mode, int, 0644);
MODULE_PARM_DESC(usb_transfer_mode, "0 = Bulk, 1 = Isochronous");
@@ -617,7 +619,7 @@ static int pd_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
return 0;
}
-int fire_all_urb(struct video_data *video)
+static int fire_all_urb(struct video_data *video)
{
int i, ret;
@@ -877,7 +879,7 @@ out:
return ret;
}
-int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *norm)
+static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *norm)
{
struct front_face *front = fh;
logs(front);
@@ -1020,7 +1022,7 @@ static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
return 0;
}
-int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
+static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
a->index = 0;
a->capability = V4L2_AUDCAP_STEREO;
@@ -1029,7 +1031,7 @@ int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
return 0;
}
-int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
+static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
return (0 == a->index) ? 0 : -EINVAL;
}
@@ -1189,7 +1191,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
}
/* Just stop the URBs, do not free the URBs */
-int usb_transfer_stop(struct video_data *video)
+static int usb_transfer_stop(struct video_data *video)
{
if (video->is_streaming) {
int i;
@@ -1518,13 +1520,13 @@ static int pd_video_mmap(struct file *file, struct vm_area_struct *vma)
return videobuf_mmap_mapper(&front->q, vma);
}
-unsigned int pd_video_poll(struct file *file, poll_table *table)
+static unsigned int pd_video_poll(struct file *file, poll_table *table)
{
struct front_face *front = file->private_data;
return videobuf_poll_stream(file, &front->q, table);
}
-ssize_t pd_video_read(struct file *file, char __user *buffer,
+static ssize_t pd_video_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct front_face *front = file->private_data;
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index e4815a1806e3..e826114b7fb8 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -79,6 +79,8 @@ struct tvp514x_std_info {
};
static struct tvp514x_reg tvp514x_reg_list_default[0x40];
+
+static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
/**
* struct tvp514x_decoder - TVP5146/47 decoder object
* @sd: Subdevice Slave handle
@@ -644,6 +646,17 @@ static int tvp514x_s_routing(struct v4l2_subdev *sd,
/* Index out of bound */
return -EINVAL;
+ /*
+ * For the sequence streamon -> streamoff and again s_input
+ * it fails to lock the signal, since streamoff puts TVP514x
+ * into power off state which leads to failure in sub-sequent s_input.
+ *
+ * So power up the TVP514x device here, since it is important to lock
+ * the signal at this stage.
+ */
+ if (!decoder->streaming)
+ tvp514x_s_stream(sd, 1);
+
input_sel = input;
output_sel = output;
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 908ffb68e926..47f0582d50a5 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -891,29 +891,26 @@ static int tvp5150_s_routing(struct v4l2_subdev *sd,
return 0;
}
-static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int tvp5150_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
+{
+ /* this is for capturing 36 raw vbi lines
+ if there's a way to cut off the beginning 2 vbi lines
+ with the tvp5150 then the vbi line count could be lowered
+ to 17 lines/field again, although I couldn't find a register
+ which could do that cropping */
+ if (fmt->sample_format == V4L2_PIX_FMT_GREY)
+ tvp5150_write(sd, TVP5150_LUMA_PROC_CTL_1, 0x70);
+ if (fmt->count[0] == 18 && fmt->count[1] == 18) {
+ tvp5150_write(sd, TVP5150_VERT_BLANKING_START, 0x00);
+ tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, 0x01);
+ }
+ return 0;
+}
+
+static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
- struct v4l2_sliced_vbi_format *svbi;
int i;
- /* raw vbi */
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* this is for capturing 36 raw vbi lines
- if there's a way to cut off the beginning 2 vbi lines
- with the tvp5150 then the vbi line count could be lowered
- to 17 lines/field again, although I couldn't find a register
- which could do that cropping */
- if (fmt->fmt.vbi.sample_format == V4L2_PIX_FMT_GREY)
- tvp5150_write(sd, TVP5150_LUMA_PROC_CTL_1, 0x70);
- if (fmt->fmt.vbi.count[0] == 18 && fmt->fmt.vbi.count[1] == 18) {
- tvp5150_write(sd, TVP5150_VERT_BLANKING_START, 0x00);
- tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, 0x01);
- }
- return 0;
- }
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
if (svbi->service_set != 0) {
for (i = 0; i <= 23; i++) {
svbi->service_lines[1][i] = 0;
@@ -937,14 +934,21 @@ static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
-static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- struct v4l2_sliced_vbi_format *svbi;
- int i, mask = 0;
+ switch (fmt->type) {
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ return tvp5150_s_sliced_fmt(sd, &fmt->fmt.sliced);
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ default:
return -EINVAL;
- svbi = &fmt->fmt.sliced;
+ }
+}
+
+static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ int i, mask = 0;
+
memset(svbi, 0, sizeof(*svbi));
for (i = 0; i <= 23; i++) {
@@ -956,6 +960,12 @@ static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
+static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return tvp5150_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
static int tvp5150_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
@@ -1046,13 +1056,20 @@ static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
.s_routing = tvp5150_s_routing,
.g_fmt = tvp5150_g_fmt,
.s_fmt = tvp5150_s_fmt,
+};
+
+static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
.g_sliced_vbi_cap = tvp5150_g_sliced_vbi_cap,
+ .g_sliced_fmt = tvp5150_g_sliced_fmt,
+ .s_sliced_fmt = tvp5150_s_sliced_fmt,
+ .s_raw_fmt = tvp5150_s_raw_fmt,
};
static const struct v4l2_subdev_ops tvp5150_ops = {
.core = &tvp5150_core_ops,
.tuner = &tvp5150_tuner_ops,
.video = &tvp5150_video_ops,
+ .vbi = &tvp5150_vbi_ops,
};
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index fab48ec6c0ea..fbd665fa1979 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -693,12 +693,13 @@ static int qcm_start_data(struct uvd *uvd)
static void qcm_stop_data(struct uvd *uvd)
{
- struct qcm *cam = (struct qcm *) uvd->user_data;
+ struct qcm *cam;
int i, j;
int ret;
if ((uvd == NULL) || (!uvd->streaming) || (uvd->dev == NULL))
return;
+ cam = (struct qcm *) uvd->user_data;
ret = qcm_camera_off(uvd);
if (ret)
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 7c17ec63c5da..d91f01668074 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -137,8 +137,6 @@ static int PowerOnAtOpen = 1;
static int video_nr = -1;
/* Sequential Number of Radio Device */
static int radio_nr = -1;
-/* Sequential Number of VBI Device */
-static int vbi_nr = -1;
/* Grab parameters for the device driver */
@@ -148,14 +146,12 @@ module_param(video_debug, int, 0444);
module_param(PowerOnAtOpen, int, 0444);
module_param(video_nr, int, 0444);
module_param(radio_nr, int, 0444);
-module_param(vbi_nr, int, 0444);
MODULE_PARM_DESC(isocMode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)");
MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)");
MODULE_PARM_DESC(PowerOnAtOpen, " Set the default device to power on when device is opened. Default: 1 (On)");
MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)");
MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
-MODULE_PARM_DESC(vbi_nr, "Set vbi device number (/dev/vbiX). Default: -1 (autodetect)");
// Misc stuff
@@ -1244,36 +1240,6 @@ static int usbvision_radio_close(struct file *file)
return errCode;
}
-/*
- * Here comes the stuff for vbi on usbvision based devices
- *
- */
-static int usbvision_vbi_open(struct file *file)
-{
- /* TODO */
- return -ENODEV;
-}
-
-static int usbvision_vbi_close(struct file *file)
-{
- /* TODO */
- return -ENODEV;
-}
-
-static long usbvision_do_vbi_ioctl(struct file *file,
- unsigned int cmd, void *arg)
-{
- /* TODO */
- return -ENOIOCTLCMD;
-}
-
-static long usbvision_vbi_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return video_usercopy(file, cmd, arg, usbvision_do_vbi_ioctl);
-}
-
-
//
// Video registration stuff
//
@@ -1367,21 +1333,6 @@ static struct video_device usbvision_radio_template = {
.current_norm = V4L2_STD_PAL
};
-// vbi template
-static const struct v4l2_file_operations usbvision_vbi_fops = {
- .owner = THIS_MODULE,
- .open = usbvision_vbi_open,
- .release = usbvision_vbi_close,
- .ioctl = usbvision_vbi_ioctl,
-};
-
-static struct video_device usbvision_vbi_template=
-{
- .fops = &usbvision_vbi_fops,
- .release = video_device_release,
- .name = "usbvision-vbi",
-};
-
static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
struct video_device *vdev_template,
@@ -1410,18 +1361,6 @@ static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
// unregister video4linux devices
static void usbvision_unregister_video(struct usb_usbvision *usbvision)
{
- // vbi Device:
- if (usbvision->vbi) {
- PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
- video_device_node_name(usbvision->vbi));
- if (video_is_registered(usbvision->vbi)) {
- video_unregister_device(usbvision->vbi);
- } else {
- video_device_release(usbvision->vbi);
- }
- usbvision->vbi = NULL;
- }
-
// Radio Device:
if (usbvision->rdev) {
PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
@@ -1482,22 +1421,6 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n",
usbvision->nr, video_device_node_name(usbvision->rdev));
}
- // vbi Device:
- if (usbvision_device_data[usbvision->DevModel].vbi) {
- usbvision->vbi = usbvision_vdev_init(usbvision,
- &usbvision_vbi_template,
- "USBVision VBI");
- if (usbvision->vbi == NULL) {
- goto err_exit;
- }
- if (video_register_device(usbvision->vbi,
- VFL_TYPE_VBI,
- vbi_nr)<0) {
- goto err_exit;
- }
- printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device %s [v4l2] (Not Working Yet!)\n",
- usbvision->nr, video_device_node_name(usbvision->vbi));
- }
// all done
return 0;
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
index f8d7458daf3e..5ab936e252fe 100644
--- a/drivers/media/video/usbvision/usbvision.h
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -360,7 +360,6 @@ struct usb_usbvision {
struct v4l2_device v4l2_dev;
struct video_device *vdev; /* Video Device */
struct video_device *rdev; /* Radio Device */
- struct video_device *vbi; /* VBI Device */
/* i2c Declaration Section*/
struct i2c_adapter i2c_adap;
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 6d3850b37161..aa0720af07a0 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -217,8 +217,7 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL,
.index = 4,
.size = 1,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
- | UVC_CONTROL_RESTORE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -233,8 +232,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_FOCUS_RELATIVE_CONTROL,
.index = 6,
.size = 2,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -249,8 +249,7 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.index = 8,
.size = 1,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -265,8 +264,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.index = 10,
.size = 3,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -281,8 +281,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.index = 12,
.size = 4,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -297,8 +298,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_ROLL_RELATIVE_CONTROL,
.index = 14,
.size = 2,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -562,6 +564,26 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
},
{
+ .id = V4L2_CID_IRIS_ABSOLUTE,
+ .name = "Iris, Absolute",
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
+ .size = 16,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
+ .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ },
+ {
+ .id = V4L2_CID_IRIS_RELATIVE,
+ .name = "Iris, Relative",
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_IRIS_RELATIVE_CONTROL,
+ .size = 8,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ },
+ {
.id = V4L2_CID_ZOOM_ABSOLUTE,
.name = "Zoom, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
@@ -822,6 +844,8 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name);
v4l2_ctrl->flags = 0;
+ if (!(ctrl->info->flags & UVC_CONTROL_GET_CUR))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
if (!(ctrl->info->flags & UVC_CONTROL_SET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
@@ -1047,6 +1071,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ if (step == 0)
+ step = 1;
xctrl->value = min + (xctrl->value - min + step/2) / step * step;
xctrl->value = clamp(xctrl->value, min, max);
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 86ff8c12ea58..838b56f097cf 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -91,11 +91,16 @@ static struct uvc_format_desc uvc_fmts[] = {
.fcc = V4L2_PIX_FMT_UYVY,
},
{
- .name = "Greyscale",
+ .name = "Greyscale (8-bit)",
.guid = UVC_GUID_FORMAT_Y800,
.fcc = V4L2_PIX_FMT_GREY,
},
{
+ .name = "Greyscale (16-bit)",
+ .guid = UVC_GUID_FORMAT_Y16,
+ .fcc = V4L2_PIX_FMT_Y16,
+ },
+ {
.name = "RGB Bayer",
.guid = UVC_GUID_FORMAT_BY8,
.fcc = V4L2_PIX_FMT_SBGGR8,
@@ -2105,6 +2110,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_STREAM_NO_FID },
+ /* Syntek (Packard Bell EasyNote MX52 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x174f,
+ .idProduct = 0x8a12,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_STREAM_NO_FID },
/* Syntek (Asus F9SG) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2169,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX },
+ /* Arkmicro unbranded */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x18ec,
+ .idProduct = 0x3290,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
/* Bodelin ProScopeHR */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_DEV_HI
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 4a925a31b0e0..133c78d113ac 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -388,8 +388,12 @@ unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_wait(file, &buf->wait, wait);
if (buf->state == UVC_BUF_STATE_DONE ||
- buf->state == UVC_BUF_STATE_ERROR)
- mask |= POLLIN | POLLRDNORM;
+ buf->state == UVC_BUF_STATE_ERROR) {
+ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ mask |= POLLIN | POLLRDNORM;
+ else
+ mask |= POLLOUT | POLLWRNORM;
+ }
done:
mutex_unlock(&queue->mutex);
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 2bba059259e6..d1f88406a5e7 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -131,11 +131,13 @@ struct uvc_xu_control {
#define UVC_GUID_FORMAT_Y800 \
{ 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16 \
+ { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_BY8 \
{ 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
/* ------------------------------------------------------------------------
* Driver specific constants.
*/
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 36b5cb86fb57..035c414507a1 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -51,6 +51,9 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/i2c.h>
+#if defined(CONFIG_SPI)
+#include <linux/spi/spi.h>
+#endif
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -340,6 +343,13 @@ const char **v4l2_ctrl_get_menu(u32 id)
"None",
"Black & White",
"Sepia",
+ "Negative",
+ "Emboss",
+ "Sketch",
+ "Sky blue",
+ "Grass green",
+ "Skin whiten",
+ "Vivid",
NULL
};
static const char *tune_preemphasis[] = {
@@ -429,10 +439,13 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_SHARPNESS: return "Sharpness";
case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
+ case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_COLOR_KILLER: return "Color Killer";
case V4L2_CID_COLORFX: return "Color Effects";
+ case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
+ case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
case V4L2_CID_ROTATE: return "Rotate";
- case V4L2_CID_BG_COLOR: return "Background color";
+ case V4L2_CID_BG_COLOR: return "Background Color";
/* MPEG controls */
case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
@@ -483,6 +496,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic";
+ case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
+ case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
@@ -620,6 +635,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
case V4L2_CID_BLUE_BALANCE:
case V4L2_CID_GAMMA:
case V4L2_CID_SHARPNESS:
+ case V4L2_CID_CHROMA_GAIN:
case V4L2_CID_RDS_TX_DEVIATION:
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
case V4L2_CID_AUDIO_LIMITER_DEVIATION:
@@ -636,6 +652,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
case V4L2_CID_PAN_RELATIVE:
case V4L2_CID_TILT_RELATIVE:
case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_IRIS_RELATIVE:
case V4L2_CID_ZOOM_RELATIVE:
qctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
break;
@@ -951,6 +968,66 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
#endif /* defined(CONFIG_I2C) */
+#if defined(CONFIG_SPI)
+
+/* Load a spi sub-device. */
+
+void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
+ /* the owner is the same as the spi_device's driver owner */
+ sd->owner = spi->dev.driver->owner;
+ /* spi_device and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, spi);
+ spi_set_drvdata(spi, sd);
+ /* initialize name */
+ strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name));
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
+
+struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+ struct spi_master *master, struct spi_board_info *info)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct spi_device *spi = NULL;
+
+ BUG_ON(!v4l2_dev);
+
+ if (info->modalias)
+ request_module(info->modalias);
+
+ spi = spi_new_device(master, info);
+
+ if (spi == NULL || spi->dev.driver == NULL)
+ goto error;
+
+ if (!try_module_get(spi->dev.driver->owner))
+ goto error;
+
+ sd = spi_get_drvdata(spi);
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(spi->dev.driver->owner);
+
+error:
+ /* If we have a client but no subdev, then something went wrong and
+ we must unregister the client. */
+ if (spi && sd == NULL)
+ spi_unregister_device(spi);
+
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
+
+#endif /* defined(CONFIG_SPI) */
+
/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
* and max don't have to be aligned, but there must be at least one valid
* value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index f77f84bfe714..9004a5fe7643 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -1086,6 +1086,9 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_QUERY_DV_PRESET:
case VIDIOC_S_DV_TIMINGS:
case VIDIOC_G_DV_TIMINGS:
+ case VIDIOC_DQEVENT:
+ case VIDIOC_SUBSCRIBE_EVENT:
+ case VIDIOC_UNSUBSCRIBE_EVENT:
ret = do_video_ioctl(file, cmd, arg);
break;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 709069916068..65a7b30524ca 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -421,6 +421,10 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
if (!vdev->release)
return -EINVAL;
+ /* v4l2_fh support */
+ spin_lock_init(&vdev->fh_lock);
+ INIT_LIST_HEAD(&vdev->fh_list);
+
/* Part 1: check device type */
switch (type) {
case VFL_TYPE_GRABBER:
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 0d06e7cbd5b3..5a7dc4afe92a 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -21,6 +21,9 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/i2c.h>
+#if defined(CONFIG_SPI)
+#include <linux/spi/spi.h>
+#endif
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
@@ -97,6 +100,14 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
i2c_unregister_device(client);
}
#endif
+#if defined(CONFIG_SPI)
+ if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) {
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+ if (spi)
+ spi_unregister_device(spi);
+ }
+#endif
}
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
new file mode 100644
index 000000000000..de74ce07b5e2
--- /dev/null
+++ b/drivers/media/video/v4l2-event.c
@@ -0,0 +1,292 @@
+/*
+ * v4l2-event.c
+ *
+ * V4L2 events.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+int v4l2_event_init(struct v4l2_fh *fh)
+{
+ fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
+ if (fh->events == NULL)
+ return -ENOMEM;
+
+ init_waitqueue_head(&fh->events->wait);
+
+ INIT_LIST_HEAD(&fh->events->free);
+ INIT_LIST_HEAD(&fh->events->available);
+ INIT_LIST_HEAD(&fh->events->subscribed);
+
+ fh->events->sequence = -1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_init);
+
+int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
+{
+ struct v4l2_events *events = fh->events;
+ unsigned long flags;
+
+ if (!events) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ while (events->nallocated < n) {
+ struct v4l2_kevent *kev;
+
+ kev = kzalloc(sizeof(*kev), GFP_KERNEL);
+ if (kev == NULL)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add_tail(&kev->list, &events->free);
+ events->nallocated++;
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_alloc);
+
+#define list_kfree(list, type, member) \
+ while (!list_empty(list)) { \
+ type *hi; \
+ hi = list_first_entry(list, type, member); \
+ list_del(&hi->member); \
+ kfree(hi); \
+ }
+
+void v4l2_event_free(struct v4l2_fh *fh)
+{
+ struct v4l2_events *events = fh->events;
+
+ if (!events)
+ return;
+
+ list_kfree(&events->free, struct v4l2_kevent, list);
+ list_kfree(&events->available, struct v4l2_kevent, list);
+ list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
+
+ kfree(events);
+ fh->events = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_free);
+
+static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_kevent *kev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ if (list_empty(&events->available)) {
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ return -ENOENT;
+ }
+
+ WARN_ON(events->navailable == 0);
+
+ kev = list_first_entry(&events->available, struct v4l2_kevent, list);
+ list_move(&kev->list, &events->free);
+ events->navailable--;
+
+ kev->event.pending = events->navailable;
+ *event = kev->event;
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ return 0;
+}
+
+int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
+ int nonblocking)
+{
+ struct v4l2_events *events = fh->events;
+ int ret;
+
+ if (nonblocking)
+ return __v4l2_event_dequeue(fh, event);
+
+ do {
+ ret = wait_event_interruptible(events->wait,
+ events->navailable != 0);
+ if (ret < 0)
+ return ret;
+
+ ret = __v4l2_event_dequeue(fh, event);
+ } while (ret == -ENOENT);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
+
+/* Caller must hold fh->event->lock! */
+static struct v4l2_subscribed_event *v4l2_event_subscribed(
+ struct v4l2_fh *fh, u32 type)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ list_for_each_entry(sev, &events->subscribed, list) {
+ if (sev->type == type)
+ return sev;
+ }
+
+ return NULL;
+}
+
+void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
+{
+ struct v4l2_fh *fh;
+ unsigned long flags;
+ struct timespec timestamp;
+
+ ktime_get_ts(&timestamp);
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+
+ list_for_each_entry(fh, &vdev->fh_list, list) {
+ struct v4l2_events *events = fh->events;
+ struct v4l2_kevent *kev;
+
+ /* Are we subscribed? */
+ if (!v4l2_event_subscribed(fh, ev->type))
+ continue;
+
+ /* Increase event sequence number on fh. */
+ events->sequence++;
+
+ /* Do we have any free events? */
+ if (list_empty(&events->free))
+ continue;
+
+ /* Take one and fill it. */
+ kev = list_first_entry(&events->free, struct v4l2_kevent, list);
+ kev->event.type = ev->type;
+ kev->event.u = ev->u;
+ kev->event.timestamp = timestamp;
+ kev->event.sequence = events->sequence;
+ list_move_tail(&kev->list, &events->available);
+
+ events->navailable++;
+
+ wake_up_all(&events->wait);
+ }
+
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue);
+
+int v4l2_event_pending(struct v4l2_fh *fh)
+{
+ return fh->events->navailable;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_pending);
+
+int v4l2_event_subscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ if (fh->events == NULL) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ sev = kmalloc(sizeof(*sev), GFP_KERNEL);
+ if (!sev)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ if (v4l2_event_subscribed(fh, sub->type) == NULL) {
+ INIT_LIST_HEAD(&sev->list);
+ sev->type = sub->type;
+
+ list_add(&sev->list, &events->subscribed);
+ sev = NULL;
+ }
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ kfree(sev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
+
+static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ do {
+ sev = NULL;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ if (!list_empty(&events->subscribed)) {
+ sev = list_first_entry(&events->subscribed,
+ struct v4l2_subscribed_event, list);
+ list_del(&sev->list);
+ }
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ kfree(sev);
+ } while (sev);
+}
+
+int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ if (sub->type == V4L2_EVENT_ALL) {
+ v4l2_event_unsubscribe_all(fh);
+ return 0;
+ }
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ sev = v4l2_event_subscribed(fh, sub->type);
+ if (sev != NULL)
+ list_del(&sev->list);
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ kfree(sev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
diff --git a/drivers/media/video/v4l2-fh.c b/drivers/media/video/v4l2-fh.c
new file mode 100644
index 000000000000..d78f184f40c5
--- /dev/null
+++ b/drivers/media/video/v4l2-fh.c
@@ -0,0 +1,79 @@
+/*
+ * v4l2-fh.c
+ *
+ * V4L2 file handles.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/bitops.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+int v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
+{
+ fh->vdev = vdev;
+ INIT_LIST_HEAD(&fh->list);
+ set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
+
+ /*
+ * fh->events only needs to be initialized if the driver
+ * supports the VIDIOC_SUBSCRIBE_EVENT ioctl.
+ */
+ if (vdev->ioctl_ops && vdev->ioctl_ops->vidioc_subscribe_event)
+ return v4l2_event_init(fh);
+
+ fh->events = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_init);
+
+void v4l2_fh_add(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&fh->list, &fh->vdev->fh_list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_add);
+
+void v4l2_fh_del(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_del_init(&fh->list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_del);
+
+void v4l2_fh_exit(struct v4l2_fh *fh)
+{
+ if (fh->vdev == NULL)
+ return;
+
+ fh->vdev = NULL;
+
+ v4l2_event_free(fh);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 7d59c107f13b..0eeceae50329 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -26,6 +26,8 @@
#endif
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-chip-ident.h>
#define dbgarg(cmd, fmt, arg...) \
@@ -291,6 +293,9 @@ static const char *v4l2_ioctls[] = {
[_IOC_NR(VIDIOC_QUERY_DV_PRESET)] = "VIDIOC_QUERY_DV_PRESET",
[_IOC_NR(VIDIOC_S_DV_TIMINGS)] = "VIDIOC_S_DV_TIMINGS",
[_IOC_NR(VIDIOC_G_DV_TIMINGS)] = "VIDIOC_G_DV_TIMINGS",
+ [_IOC_NR(VIDIOC_DQEVENT)] = "VIDIOC_DQEVENT",
+ [_IOC_NR(VIDIOC_SUBSCRIBE_EVENT)] = "VIDIOC_SUBSCRIBE_EVENT",
+ [_IOC_NR(VIDIOC_UNSUBSCRIBE_EVENT)] = "VIDIOC_UNSUBSCRIBE_EVENT",
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
@@ -610,17 +615,33 @@ static long __video_do_ioctl(struct file *file,
void *fh = file->private_data;
long ret = -EINVAL;
+ if (ops == NULL) {
+ printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
+ vfd->name);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ /********************************************************
+ All other V4L1 calls are handled by v4l1_compat module.
+ Those calls will be translated into V4L2 calls, and
+ __video_do_ioctl will be called again, with one or more
+ V4L2 ioctls.
+ ********************************************************/
+ if (_IOC_TYPE(cmd) == 'v' && cmd != VIDIOCGMBUF &&
+ _IOC_NR(cmd) < BASE_VIDIOCPRIVATE) {
+ return v4l_compat_translate_ioctl(file, cmd, arg,
+ __video_do_ioctl);
+ }
+#endif
+
if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
!(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
v4l_print_ioctl(vfd->name, cmd);
printk(KERN_CONT "\n");
}
- if (ops == NULL) {
- printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
- vfd->name);
- return -EINVAL;
- }
+ switch (cmd) {
#ifdef CONFIG_VIDEO_V4L1_COMPAT
/***********************************************************
@@ -630,31 +651,21 @@ static long __video_do_ioctl(struct file *file,
***********************************************************/
/* --- streaming capture ------------------------------------- */
- if (cmd == VIDIOCGMBUF) {
+ case VIDIOCGMBUF:
+ {
struct video_mbuf *p = arg;
if (!ops->vidiocgmbuf)
- return ret;
+ break;
ret = ops->vidiocgmbuf(file, fh, p);
if (!ret)
dbgarg(cmd, "size=%d, frames=%d, offsets=0x%08lx\n",
p->size, p->frames,
(unsigned long)p->offsets);
- return ret;
+ break;
}
-
- /********************************************************
- All other V4L1 calls are handled by v4l1_compat module.
- Those calls will be translated into V4L2 calls, and
- __video_do_ioctl will be called again, with one or more
- V4L2 ioctls.
- ********************************************************/
- if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE)
- return v4l_compat_translate_ioctl(file, cmd, arg,
- __video_do_ioctl);
#endif
- switch (cmd) {
/* --- capabilities ------------------------------------------ */
case VIDIOC_QUERYCAP:
{
@@ -1072,7 +1083,7 @@ static long __video_do_ioctl(struct file *file,
id &= ~curr_id;
}
if (i <= index)
- return -EINVAL;
+ break;
v4l2_video_std_construct(p, curr_id, descr);
@@ -1597,7 +1608,7 @@ static long __video_do_ioctl(struct file *file,
v4l2_std_id std = vfd->current_norm;
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
+ break;
ret = 0;
if (ops->vidioc_g_std)
@@ -1942,7 +1953,55 @@ static long __video_do_ioctl(struct file *file,
}
break;
}
+ case VIDIOC_DQEVENT:
+ {
+ struct v4l2_event *ev = arg;
+
+ if (!ops->vidioc_subscribe_event)
+ break;
+ ret = v4l2_event_dequeue(fh, ev, file->f_flags & O_NONBLOCK);
+ if (ret < 0) {
+ dbgarg(cmd, "no pending events?");
+ break;
+ }
+ dbgarg(cmd,
+ "pending=%d, type=0x%8.8x, sequence=%d, "
+ "timestamp=%lu.%9.9lu ",
+ ev->pending, ev->type, ev->sequence,
+ ev->timestamp.tv_sec, ev->timestamp.tv_nsec);
+ break;
+ }
+ case VIDIOC_SUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ if (!ops->vidioc_subscribe_event)
+ break;
+
+ ret = ops->vidioc_subscribe_event(fh, sub);
+ if (ret < 0) {
+ dbgarg(cmd, "failed, ret=%ld", ret);
+ break;
+ }
+ dbgarg(cmd, "type=0x%8.8x", sub->type);
+ break;
+ }
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ if (!ops->vidioc_unsubscribe_event)
+ break;
+
+ ret = ops->vidioc_unsubscribe_event(fh, sub);
+ if (ret < 0) {
+ dbgarg(cmd, "failed, ret=%ld", ret);
+ break;
+ }
+ dbgarg(cmd, "type=0x%8.8x", sub->type);
+ break;
+ }
default:
{
if (!ops->vidioc_default)
@@ -2006,7 +2065,7 @@ long video_ioctl2(struct file *file,
{
char sbuf[128];
void *mbuf = NULL;
- void *parg = NULL;
+ void *parg = (void *)arg;
long err = -EINVAL;
int is_ext_ctrl;
size_t ctrls_size = 0;
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
new file mode 100644
index 000000000000..f45f9405ea39
--- /dev/null
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -0,0 +1,633 @@
+/*
+ * Memory-to-memory device framework for Video for Linux 2 and videobuf.
+ *
+ * Helper functions for devices that use videobuf buffers for both their
+ * source and destination.
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <p.osciak@samsung.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/videobuf-core.h>
+#include <media/v4l2-mem2mem.h>
+
+MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
+MODULE_AUTHOR("Pawel Osciak, <p.osciak@samsung.com>");
+MODULE_LICENSE("GPL");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+#define dprintk(fmt, arg...) \
+ do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
+ } while (0)
+
+
+/* Instance is already queued on the job_queue */
+#define TRANS_QUEUED (1 << 0)
+/* Instance is currently running in hardware */
+#define TRANS_RUNNING (1 << 1)
+
+
+/* Offset base for buffers on the destination queue - used to distinguish
+ * between source and destination buffers when mmapping - they receive the same
+ * offsets but for different queues */
+#define DST_QUEUE_OFF_BASE (1 << 30)
+
+
+/**
+ * struct v4l2_m2m_dev - per-device context
+ * @curr_ctx: currently running instance
+ * @job_queue: instances queued to run
+ * @job_spinlock: protects job_queue
+ * @m2m_ops: driver callbacks
+ */
+struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+
+ struct list_head job_queue;
+ spinlock_t job_spinlock;
+
+ struct v4l2_m2m_ops *m2m_ops;
+};
+
+static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &m2m_ctx->cap_q_ctx;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &m2m_ctx->out_q_ctx;
+ default:
+ printk(KERN_ERR "Invalid buffer type\n");
+ return NULL;
+ }
+}
+
+/**
+ * v4l2_m2m_get_vq() - return videobuf_queue for the given type
+ */
+struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ return &q_ctx->q;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_vq);
+
+/**
+ * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
+ */
+void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ struct videobuf_buffer *vb = NULL;
+ unsigned long flags;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ spin_lock_irqsave(q_ctx->q.irqlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue))
+ goto end;
+
+ vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer, queue);
+ vb->state = VIDEOBUF_ACTIVE;
+
+end:
+ spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
+ return vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
+
+/**
+ * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
+ * return it
+ */
+void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ struct videobuf_buffer *vb = NULL;
+ unsigned long flags;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ spin_lock_irqsave(q_ctx->q.irqlock, flags);
+ if (!list_empty(&q_ctx->rdy_queue)) {
+ vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer,
+ queue);
+ list_del(&vb->queue);
+ q_ctx->num_rdy--;
+ }
+ spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
+
+ return vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
+
+/*
+ * Scheduling handlers
+ */
+
+/**
+ * v4l2_m2m_get_curr_priv() - return driver private data for the currently
+ * running instance or NULL if no instance is running
+ */
+void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+ void *ret = NULL;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_dev->curr_ctx)
+ ret = m2m_dev->curr_ctx->priv;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
+
+/**
+ * v4l2_m2m_try_run() - select next job to perform and run it if possible
+ *
+ * Get next transaction (if present) from the waiting jobs list and run it.
+ */
+static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (NULL != m2m_dev->curr_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Another instance is running, won't run now\n");
+ return;
+ }
+
+ if (list_empty(&m2m_dev->job_queue)) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("No job pending\n");
+ return;
+ }
+
+ m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next,
+ struct v4l2_m2m_ctx, queue);
+ m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
+}
+
+/**
+ * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
+ * the pending job queue and add it if so.
+ * @m2m_ctx: m2m context assigned to the instance to be checked
+ *
+ * There are three basic requirements an instance has to meet to be able to run:
+ * 1) at least one source buffer has to be queued,
+ * 2) at least one destination buffer has to be queued,
+ * 3) streaming has to be on.
+ *
+ * There may also be additional, custom requirements. In such case the driver
+ * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
+ * return 1 if the instance is ready.
+ * An example of the above could be an instance that requires more than one
+ * src/dst buffer per transaction.
+ */
+static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ unsigned long flags_job, flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
+
+ if (!m2m_ctx->out_q_ctx.q.streaming
+ || !m2m_ctx->cap_q_ctx.q.streaming) {
+ dprintk("Streaming needs to be on for both queues\n");
+ return;
+ }
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+ if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("On job queue already\n");
+ return;
+ }
+
+ spin_lock_irqsave(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("No input buffers available\n");
+ return;
+ }
+ if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("No output buffers available\n");
+ return;
+ }
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+
+ if (m2m_dev->m2m_ops->job_ready
+ && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("Driver not ready\n");
+ return;
+ }
+
+ list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
+ m2m_ctx->job_flags |= TRANS_QUEUED;
+
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+
+/**
+ * v4l2_m2m_job_finish() - inform the framework that a job has been finished
+ * and have it clean up
+ *
+ * Called by a driver to yield back the device after it has finished with it.
+ * Should be called as soon as possible after reaching a state which allows
+ * other instances to take control of the device.
+ *
+ * This function has to be called only after device_run() callback has been
+ * called on the driver. To prevent recursion, it should not be called directly
+ * from the device_run() callback though.
+ */
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Called by an instance not currently running\n");
+ return;
+ }
+
+ list_del(&m2m_dev->curr_ctx->queue);
+ m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ m2m_dev->curr_ctx = NULL;
+
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ /* This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes. */
+ v4l2_m2m_try_schedule(m2m_ctx);
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL(v4l2_m2m_job_finish);
+
+/**
+ * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
+ */
+int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
+ return videobuf_reqbufs(vq, reqbufs);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
+
+/**
+ * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
+ *
+ * See v4l2_m2m_mmap() documentation for details.
+ */
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = videobuf_querybuf(vq, buf);
+
+ if (buf->memory == V4L2_MEMORY_MMAP
+ && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ buf->m.offset += DST_QUEUE_OFF_BASE;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
+
+/**
+ * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = videobuf_qbuf(vq, buf);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
+
+/**
+ * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ return videobuf_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
+
+/**
+ * v4l2_m2m_streamon() - turn on streaming for a video queue
+ */
+int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, type);
+ ret = videobuf_streamon(vq);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
+
+/**
+ * v4l2_m2m_streamoff() - turn off streaming for a video queue
+ */
+int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, type);
+ return videobuf_streamoff(vq);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
+
+/**
+ * v4l2_m2m_poll() - poll replacement, for destination buffers only
+ *
+ * Call from the driver's poll() function. Will poll both queues. If a buffer
+ * is available to dequeue (with dqbuf) from the source queue, this will
+ * indicate that a non-blocking write can be performed, while read will be
+ * returned in case of the destination queue.
+ */
+unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
+{
+ struct videobuf_queue *src_q, *dst_q;
+ struct videobuf_buffer *src_vb = NULL, *dst_vb = NULL;
+ unsigned int rc = 0;
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
+
+ mutex_lock(&src_q->vb_lock);
+ mutex_lock(&dst_q->vb_lock);
+
+ if (src_q->streaming && !list_empty(&src_q->stream))
+ src_vb = list_first_entry(&src_q->stream,
+ struct videobuf_buffer, stream);
+ if (dst_q->streaming && !list_empty(&dst_q->stream))
+ dst_vb = list_first_entry(&dst_q->stream,
+ struct videobuf_buffer, stream);
+
+ if (!src_vb && !dst_vb) {
+ rc = POLLERR;
+ goto end;
+ }
+
+ if (src_vb) {
+ poll_wait(file, &src_vb->done, wait);
+ if (src_vb->state == VIDEOBUF_DONE
+ || src_vb->state == VIDEOBUF_ERROR)
+ rc |= POLLOUT | POLLWRNORM;
+ }
+ if (dst_vb) {
+ poll_wait(file, &dst_vb->done, wait);
+ if (dst_vb->state == VIDEOBUF_DONE
+ || dst_vb->state == VIDEOBUF_ERROR)
+ rc |= POLLIN | POLLRDNORM;
+ }
+
+end:
+ mutex_unlock(&dst_q->vb_lock);
+ mutex_unlock(&src_q->vb_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
+
+/**
+ * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
+ *
+ * Call from driver's mmap() function. Will handle mmap() for both queues
+ * seamlessly for videobuffer, which will receive normal per-queue offsets and
+ * proper videobuf queue pointers. The differentiation is made outside videobuf
+ * by adding a predefined offset to buffers from one of the queues and
+ * subtracting it before passing it back to videobuf. Only drivers (and
+ * thus applications) receive modified offsets.
+ */
+int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct vm_area_struct *vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct videobuf_queue *vq;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ vq = v4l2_m2m_get_src_vq(m2m_ctx);
+ } else {
+ vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ }
+
+ return videobuf_mmap_mapper(vq, vma);
+}
+EXPORT_SYMBOL(v4l2_m2m_mmap);
+
+/**
+ * v4l2_m2m_init() - initialize per-driver m2m data
+ *
+ * Usually called from driver's probe() function.
+ */
+struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+
+ if (!m2m_ops)
+ return ERR_PTR(-EINVAL);
+
+ BUG_ON(!m2m_ops->device_run);
+ BUG_ON(!m2m_ops->job_abort);
+
+ m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
+ if (!m2m_dev)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_dev->curr_ctx = NULL;
+ m2m_dev->m2m_ops = m2m_ops;
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+
+ return m2m_dev;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_init);
+
+/**
+ * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
+ *
+ * Usually called from driver's remove() function.
+ */
+void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+{
+ kfree(m2m_dev);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
+/**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ * @priv - driver's instance private data
+ * @m2m_dev - a previously initialized m2m_dev struct
+ * @vq_init - a callback for queue type-specific initialization function to be
+ * used for initializing videobuf_queues
+ *
+ * Usually called from driver's open() function.
+ */
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
+ void (*vq_init)(void *priv, struct videobuf_queue *,
+ enum v4l2_buf_type))
+{
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
+
+ if (!vq_init)
+ return ERR_PTR(-EINVAL);
+
+ m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
+ if (!m2m_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_ctx->priv = priv;
+ m2m_ctx->m2m_dev = m2m_dev;
+
+ out_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ cap_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
+ INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
+
+ INIT_LIST_HEAD(&m2m_ctx->queue);
+
+ vq_init(priv, &out_q_ctx->q, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ vq_init(priv, &cap_q_ctx->q, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ out_q_ctx->q.priv_data = cap_q_ctx->q.priv_data = priv;
+
+ return m2m_ctx;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
+
+/**
+ * v4l2_m2m_ctx_release() - release m2m context
+ *
+ * Usually called from driver's release() function.
+ */
+void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ struct videobuf_buffer *vb;
+ unsigned long flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ vb = v4l2_m2m_next_dst_buf(m2m_ctx);
+ BUG_ON(NULL == vb);
+ wait_event(vb->done, vb->state != VIDEOBUF_ACTIVE
+ && vb->state != VIDEOBUF_QUEUED);
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
+ m2m_ctx);
+ } else {
+ /* Do nothing, was not on queue/running */
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ }
+
+ videobuf_stop(&m2m_ctx->cap_q_ctx.q);
+ videobuf_stop(&m2m_ctx->out_q_ctx.q);
+
+ videobuf_mmap_free(&m2m_ctx->cap_q_ctx.q);
+ videobuf_mmap_free(&m2m_ctx->out_q_ctx.q);
+
+ kfree(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
+
+/**
+ * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
+ *
+ * Call from buf_queue(), videobuf_queue_ops callback.
+ *
+ * Locking: Caller holds q->irqlock (taken by videobuf before calling buf_queue
+ * callback in the driver).
+ */
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+
+ q_ctx = get_queue_ctx(m2m_ctx, vq->type);
+ if (!q_ctx)
+ return;
+
+ list_add_tail(&vb->queue, &q_ctx->rdy_queue);
+ q_ctx->num_rdy++;
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index bb0a1c8de414..7d3378437ded 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -24,10 +24,15 @@
#include <media/videobuf-core.h>
#define MAGIC_BUFFER 0x20070728
-#define MAGIC_CHECK(is, should) do { \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR "magic mismatch: %x (expected %x)\n", is, should); \
- BUG(); } } while (0)
+#define MAGIC_CHECK(is, should) \
+ do { \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR \
+ "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ } \
+ } while (0)
static int debug;
module_param(debug, int, 0644);
@@ -36,16 +41,18 @@ MODULE_DESCRIPTION("helper module to manage video4linux buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) do { \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf: " fmt , ## arg); } while (0)
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
+ } while (0)
/* --------------------------------------------------------------------- */
#define CALL(q, f, arg...) \
((q->int_ops->f) ? q->int_ops->f(arg) : 0)
-void *videobuf_alloc(struct videobuf_queue *q)
+struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q)
{
struct videobuf_buffer *vb;
@@ -57,14 +64,14 @@ void *videobuf_alloc(struct videobuf_queue *q)
}
vb = q->int_ops->alloc(q->msize);
-
if (NULL != vb) {
init_waitqueue_head(&vb->done);
- vb->magic = MAGIC_BUFFER;
+ vb->magic = MAGIC_BUFFER;
}
return vb;
}
+EXPORT_SYMBOL_GPL(videobuf_alloc);
#define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
vb->state != VIDEOBUF_QUEUED)
@@ -86,6 +93,7 @@ int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_waiton);
int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
@@ -95,16 +103,16 @@ int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
return CALL(q, iolock, q, vb, fbuf);
}
+EXPORT_SYMBOL_GPL(videobuf_iolock);
-void *videobuf_queue_to_vmalloc (struct videobuf_queue *q,
- struct videobuf_buffer *buf)
+void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
{
- if (q->int_ops->vmalloc)
- return q->int_ops->vmalloc(buf);
- else
- return NULL;
+ if (q->int_ops->vaddr)
+ return q->int_ops->vaddr(buf);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(videobuf_queue_to_vmalloc);
+EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
/* --------------------------------------------------------------------- */
@@ -146,6 +154,7 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
init_waitqueue_head(&q->wait);
INIT_LIST_HEAD(&q->stream);
}
+EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
/* Locking: Only usage in bttv unsafe find way to remove */
int videobuf_queue_is_busy(struct videobuf_queue *q)
@@ -184,6 +193,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
}
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
/* Locking: Caller holds q->vb_lock */
void videobuf_queue_cancel(struct videobuf_queue *q)
@@ -216,6 +226,7 @@ void videobuf_queue_cancel(struct videobuf_queue *q)
}
INIT_LIST_HEAD(&q->stream);
}
+EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
/* --------------------------------------------------------------------- */
@@ -237,6 +248,7 @@ enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
}
return field;
}
+EXPORT_SYMBOL_GPL(videobuf_next_field);
/* Locking: Caller holds q->vb_lock */
static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
@@ -273,8 +285,10 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
case VIDEOBUF_ACTIVE:
b->flags |= V4L2_BUF_FLAG_QUEUED;
break;
- case VIDEOBUF_DONE:
case VIDEOBUF_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ /* fall through */
+ case VIDEOBUF_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
case VIDEOBUF_NEEDS_INIT:
@@ -298,20 +312,15 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
static int __videobuf_mmap_free(struct videobuf_queue *q)
{
int i;
- int rc;
if (!q)
return 0;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- rc = CALL(q, mmap_free, q);
-
- q->is_mmapped = 0;
-
- if (rc < 0)
- return rc;
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ if (q->bufs[i] && q->bufs[i]->map)
+ return -EBUSY;
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
@@ -321,7 +330,7 @@ static int __videobuf_mmap_free(struct videobuf_queue *q)
q->bufs[i] = NULL;
}
- return rc;
+ return 0;
}
int videobuf_mmap_free(struct videobuf_queue *q)
@@ -332,6 +341,7 @@ int videobuf_mmap_free(struct videobuf_queue *q)
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_free);
/* Locking: Caller holds q->vb_lock */
int __videobuf_mmap_setup(struct videobuf_queue *q,
@@ -351,7 +361,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
for (i = 0; i < bcount; i++) {
q->bufs[i] = videobuf_alloc(q);
- if (q->bufs[i] == NULL)
+ if (NULL == q->bufs[i])
break;
q->bufs[i]->i = i;
@@ -372,11 +382,11 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
if (!i)
return -ENOMEM;
- dprintk(1, "mmap setup: %d buffers, %d bytes each\n",
- i, bsize);
+ dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
return i;
}
+EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
int videobuf_mmap_setup(struct videobuf_queue *q,
unsigned int bcount, unsigned int bsize,
@@ -388,6 +398,7 @@ int videobuf_mmap_setup(struct videobuf_queue *q,
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
int videobuf_reqbufs(struct videobuf_queue *q,
struct v4l2_requestbuffers *req)
@@ -432,7 +443,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
q->ops->buf_setup(q, &count, &size);
dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
count, size,
- (unsigned int)((count*PAGE_ALIGN(size))>>PAGE_SHIFT) );
+ (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
retval = __videobuf_mmap_setup(q, count, size, req->memory);
if (retval < 0) {
@@ -447,6 +458,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_reqbufs);
int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
@@ -473,9 +485,9 @@ done:
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_querybuf);
-int videobuf_qbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b)
+int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
struct videobuf_buffer *buf;
enum v4l2_field field;
@@ -534,6 +546,13 @@ int videobuf_qbuf(struct videobuf_queue *q,
"but buffer addr is zero!\n");
goto done;
}
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
+ || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
+ || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
+ buf->size = b->bytesused;
+ buf->field = b->field;
+ buf->ts = b->timestamp;
+ }
break;
case V4L2_MEMORY_USERPTR:
if (b->length < buf->bsize) {
@@ -567,11 +586,11 @@ int videobuf_qbuf(struct videobuf_queue *q,
q->ops->buf_queue(q, buf);
spin_unlock_irqrestore(q->irqlock, flags);
}
- dprintk(1, "qbuf: succeded\n");
+ dprintk(1, "qbuf: succeeded\n");
retval = 0;
wake_up_interruptible_sync(&q->wait);
- done:
+done:
mutex_unlock(&q->vb_lock);
if (b->memory == V4L2_MEMORY_MMAP)
@@ -579,7 +598,7 @@ int videobuf_qbuf(struct videobuf_queue *q,
return retval;
}
-
+EXPORT_SYMBOL_GPL(videobuf_qbuf);
/* Locking: Caller holds q->vb_lock */
static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
@@ -624,7 +643,6 @@ done:
return retval;
}
-
/* Locking: Caller holds q->vb_lock */
static int stream_next_buffer(struct videobuf_queue *q,
struct videobuf_buffer **vb, int nonblocking)
@@ -647,13 +665,14 @@ done:
}
int videobuf_dqbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b, int nonblocking)
+ struct v4l2_buffer *b, int nonblocking)
{
struct videobuf_buffer *buf = NULL;
int retval;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+ memset(b, 0, sizeof(*b));
mutex_lock(&q->vb_lock);
retval = stream_next_buffer(q, &buf, nonblocking);
@@ -665,28 +684,25 @@ int videobuf_dqbuf(struct videobuf_queue *q,
switch (buf->state) {
case VIDEOBUF_ERROR:
dprintk(1, "dqbuf: state is error\n");
- retval = -EIO;
- CALL(q, sync, q, buf);
- buf->state = VIDEOBUF_IDLE;
break;
case VIDEOBUF_DONE:
dprintk(1, "dqbuf: state is done\n");
- CALL(q, sync, q, buf);
- buf->state = VIDEOBUF_IDLE;
break;
default:
dprintk(1, "dqbuf: state invalid\n");
retval = -EINVAL;
goto done;
}
- list_del(&buf->stream);
- memset(b, 0, sizeof(*b));
+ CALL(q, sync, q, buf);
videobuf_status(q, b, buf, q->type);
-
- done:
+ list_del(&buf->stream);
+ buf->state = VIDEOBUF_IDLE;
+ b->flags &= ~V4L2_BUF_FLAG_DONE;
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_dqbuf);
int videobuf_streamon(struct videobuf_queue *q)
{
@@ -709,10 +725,11 @@ int videobuf_streamon(struct videobuf_queue *q)
spin_unlock_irqrestore(q->irqlock, flags);
wake_up_interruptible_sync(&q->wait);
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_streamon);
/* Locking: Caller holds q->vb_lock */
static int __videobuf_streamoff(struct videobuf_queue *q)
@@ -735,6 +752,7 @@ int videobuf_streamoff(struct videobuf_queue *q)
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_streamoff);
/* Locking: Caller holds q->vb_lock */
static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
@@ -774,7 +792,7 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
retval = q->read_buf->size;
}
- done:
+done:
/* cleanup */
q->ops->buf_release(q, q->read_buf);
kfree(q->read_buf);
@@ -782,6 +800,49 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
return retval;
}
+static int __videobuf_copy_to_user(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count,
+ int nonblocking)
+{
+ void *vaddr = CALL(q, vaddr, buf);
+
+ /* copy to userspace */
+ if (count > buf->size - q->read_off)
+ count = buf->size - q->read_off;
+
+ if (copy_to_user(data, vaddr + q->read_off, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int __videobuf_copy_stream(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count, size_t pos,
+ int vbihack, int nonblocking)
+{
+ unsigned int *fc = CALL(q, vaddr, buf);
+
+ if (vbihack) {
+ /* dirty, undocumented hack -- pass the frame counter
+ * within the last four bytes of each vbi data block.
+ * We need that one to maintain backward compatibility
+ * to all vbi decoding software out there ... */
+ fc += (buf->size >> 2) - 1;
+ *fc = buf->field_count >> 1;
+ dprintk(1, "vbihack: %d\n", *fc);
+ }
+
+ /* copy stuff using the common method */
+ count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
+
+ if ((count == -EFAULT) && (pos == 0))
+ return -EFAULT;
+
+ return count;
+}
+
ssize_t videobuf_read_one(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int nonblocking)
@@ -850,7 +911,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
}
/* Copy to userspace */
- retval = CALL(q, video_copy_to_user, q, data, count, nonblocking);
+ retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
if (retval < 0)
goto done;
@@ -862,10 +923,11 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
q->read_buf = NULL;
}
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_read_one);
/* Locking: Caller holds q->vb_lock */
static int __videobuf_read_start(struct videobuf_queue *q)
@@ -917,7 +979,6 @@ static void __videobuf_read_stop(struct videobuf_queue *q)
q->bufs[i] = NULL;
}
q->read_buf = NULL;
-
}
int videobuf_read_start(struct videobuf_queue *q)
@@ -930,6 +991,7 @@ int videobuf_read_start(struct videobuf_queue *q)
return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_read_start);
void videobuf_read_stop(struct videobuf_queue *q)
{
@@ -937,6 +999,7 @@ void videobuf_read_stop(struct videobuf_queue *q)
__videobuf_read_stop(q);
mutex_unlock(&q->vb_lock);
}
+EXPORT_SYMBOL_GPL(videobuf_read_stop);
void videobuf_stop(struct videobuf_queue *q)
{
@@ -950,7 +1013,7 @@ void videobuf_stop(struct videobuf_queue *q)
mutex_unlock(&q->vb_lock);
}
-
+EXPORT_SYMBOL_GPL(videobuf_stop);
ssize_t videobuf_read_stream(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
@@ -990,7 +1053,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
}
if (q->read_buf->state == VIDEOBUF_DONE) {
- rc = CALL(q, copy_stream, q, data + retval, count,
+ rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
retval, vbihack, nonblocking);
if (rc < 0) {
retval = rc;
@@ -1019,10 +1082,11 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
break;
}
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_read_stream);
unsigned int videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
@@ -1056,27 +1120,51 @@ unsigned int videobuf_poll_stream(struct file *file,
if (0 == rc) {
poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE ||
- buf->state == VIDEOBUF_ERROR)
- rc = POLLIN|POLLRDNORM;
+ buf->state == VIDEOBUF_ERROR) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ rc = POLLOUT | POLLWRNORM;
+ break;
+ default:
+ rc = POLLIN | POLLRDNORM;
+ break;
+ }
+ }
}
mutex_unlock(&q->vb_lock);
return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-int videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
{
- int retval;
+ int rc = -EINVAL;
+ int i;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
+ dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
+ return -EINVAL;
+ }
+
mutex_lock(&q->vb_lock);
- retval = CALL(q, mmap_mapper, q, vma);
- q->is_mmapped = 1;
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ struct videobuf_buffer *buf = q->bufs[i];
+
+ if (buf && buf->memory == V4L2_MEMORY_MMAP &&
+ buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
+ rc = CALL(q, mmap_mapper, q, buf, vma);
+ break;
+ }
+ }
mutex_unlock(&q->vb_lock);
- return retval;
+ return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
#ifdef CONFIG_VIDEO_V4L1_COMPAT
int videobuf_cgmbuf(struct videobuf_queue *q,
@@ -1107,33 +1195,3 @@ int videobuf_cgmbuf(struct videobuf_queue *q,
EXPORT_SYMBOL_GPL(videobuf_cgmbuf);
#endif
-/* --------------------------------------------------------------------- */
-
-EXPORT_SYMBOL_GPL(videobuf_waiton);
-EXPORT_SYMBOL_GPL(videobuf_iolock);
-
-EXPORT_SYMBOL_GPL(videobuf_alloc);
-
-EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
-EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
-EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
-
-EXPORT_SYMBOL_GPL(videobuf_next_field);
-EXPORT_SYMBOL_GPL(videobuf_reqbufs);
-EXPORT_SYMBOL_GPL(videobuf_querybuf);
-EXPORT_SYMBOL_GPL(videobuf_qbuf);
-EXPORT_SYMBOL_GPL(videobuf_dqbuf);
-EXPORT_SYMBOL_GPL(videobuf_streamon);
-EXPORT_SYMBOL_GPL(videobuf_streamoff);
-
-EXPORT_SYMBOL_GPL(videobuf_read_start);
-EXPORT_SYMBOL_GPL(videobuf_read_stop);
-EXPORT_SYMBOL_GPL(videobuf_stop);
-EXPORT_SYMBOL_GPL(videobuf_read_stream);
-EXPORT_SYMBOL_GPL(videobuf_read_one);
-EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-
-EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
-EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
-EXPORT_SYMBOL_GPL(videobuf_mmap_free);
-EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index dce4f3aa4af1..74730c624cfc 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -55,14 +55,14 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_queue *q = map->q;
int i;
- dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+ dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
- dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
+ dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
mutex_lock(&q->vb_lock);
/* We need first to cancel streams, before unmapping */
@@ -89,7 +89,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
- dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
+ dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
dma_free_coherent(q->dev, mem->size,
@@ -190,7 +190,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
return ret;
}
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
@@ -204,7 +204,7 @@ static void *__videobuf_alloc(size_t size)
return vb;
}
-static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
@@ -263,65 +263,34 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return 0;
}
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- unsigned int i;
-
- dev_dbg(q->dev, "%s\n", __func__);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i] && q->bufs[i]->map)
- return -EBUSY;
- }
-
- return 0;
-}
-
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_mapping *map;
- unsigned int first;
int retval;
- unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size;
dev_dbg(q->dev, "%s\n", __func__);
- if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (!q->bufs[first])
- continue;
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == offset)
- break;
- }
- if (VIDEO_MAX_FRAME == first) {
- dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
- offset);
- return -EINVAL;
- }
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (!map)
return -ENOMEM;
- q->bufs[first]->map = map;
+ buf->map = map;
map->start = vma->vm_start;
map->end = vma->vm_end;
map->q = q;
- q->bufs[first]->baddr = vma->vm_start;
+ buf->baddr = vma->vm_start;
- mem = q->bufs[first]->priv;
+ mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
+ mem->size = PAGE_ALIGN(buf->bsize);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
@@ -354,8 +323,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
- (long int) q->bufs[first]->bsize,
- vma->vm_pgoff, first);
+ (long int)buf->bsize,
+ vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
@@ -366,69 +335,13 @@ error:
return -ENOMEM;
}
-static int __videobuf_copy_to_user(struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking)
-{
- struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
- void *vaddr;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- BUG_ON(!mem->vaddr);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- vaddr = mem->vaddr;
-
- if (copy_to_user(data, vaddr + q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream(struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking)
-{
- unsigned int *fc;
- struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int *)mem->vaddr;
- fc += (q->read_buf->size >> 2) - 1;
- *fc = q->read_buf->field_count >> 1;
- dev_dbg(q->dev, "vbihack: %d\n", *fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user(q, data, count, nonblocking);
-
- if ((count == -EFAULT) && (pos == 0))
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = __videobuf_to_vmalloc,
+ .vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index fcd045e7a1c1..8359e6badd36 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -37,8 +37,12 @@
#define MAGIC_DMABUF 0x19721112
#define MAGIC_SG_MEM 0x17890714
-#define MAGIC_CHECK(is,should) if (unlikely((is) != (should))) \
- { printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
static int debug;
module_param(debug, int, 0644);
@@ -47,13 +51,13 @@ MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
/* --------------------------------------------------------------------- */
-struct scatterlist*
-videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
+struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
{
struct scatterlist *sglist;
struct page *pg;
@@ -73,13 +77,14 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
}
return sglist;
- err:
+err:
vfree(sglist);
return NULL;
}
+EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);
-struct scatterlist*
-videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
+struct scatterlist *videobuf_pages_to_sg(struct page **pages, int nr_pages,
+ int offset)
{
struct scatterlist *sglist;
int i;
@@ -104,20 +109,20 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
}
return sglist;
- nopage:
- dprintk(2,"sgl: oops - no page\n");
+nopage:
+ dprintk(2, "sgl: oops - no page\n");
vfree(sglist);
return NULL;
- highmem:
- dprintk(2,"sgl: oops - highmem page\n");
+highmem:
+ dprintk(2, "sgl: oops - highmem page\n");
vfree(sglist);
return NULL;
}
/* --------------------------------------------------------------------- */
-struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf)
+struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem);
@@ -126,17 +131,19 @@ struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf)
return &mem->dma;
}
+EXPORT_SYMBOL_GPL(videobuf_to_dma);
void videobuf_dma_init(struct videobuf_dmabuf *dma)
{
- memset(dma,0,sizeof(*dma));
+ memset(dma, 0, sizeof(*dma));
dma->magic = MAGIC_DMABUF;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init);
static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
int direction, unsigned long data, unsigned long size)
{
- unsigned long first,last;
+ unsigned long first, last;
int err, rw = 0;
dma->direction = direction;
@@ -155,21 +162,21 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
dma->offset = data & ~PAGE_MASK;
dma->nr_pages = last-first+1;
- dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*),
- GFP_KERNEL);
+ dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
if (NULL == dma->pages)
return -ENOMEM;
- dprintk(1,"init user [0x%lx+0x%lx => %d pages]\n",
- data,size,dma->nr_pages);
- err = get_user_pages(current,current->mm,
+ dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
+ data, size, dma->nr_pages);
+
+ err = get_user_pages(current, current->mm,
data & PAGE_MASK, dma->nr_pages,
rw == READ, 1, /* force */
dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1,"get_user_pages: err=%d [%d]\n",err,dma->nr_pages);
+ dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
return 0;
@@ -179,48 +186,58 @@ int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
unsigned long data, unsigned long size)
{
int ret;
+
down_read(&current->mm->mmap_sem);
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
up_read(&current->mm->mmap_sem);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
int nr_pages)
{
- dprintk(1,"init kernel [%d pages]\n",nr_pages);
+ dprintk(1, "init kernel [%d pages]\n", nr_pages);
+
dma->direction = direction;
dma->vmalloc = vmalloc_32(nr_pages << PAGE_SHIFT);
if (NULL == dma->vmalloc) {
- dprintk(1,"vmalloc_32(%d pages) failed\n",nr_pages);
+ dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
return -ENOMEM;
}
- dprintk(1,"vmalloc is at addr 0x%08lx, size=%d\n",
+
+ dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
(unsigned long)dma->vmalloc,
nr_pages << PAGE_SHIFT);
- memset(dma->vmalloc,0,nr_pages << PAGE_SHIFT);
+
+ memset(dma->vmalloc, 0, nr_pages << PAGE_SHIFT);
dma->nr_pages = nr_pages;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
dma_addr_t addr, int nr_pages)
{
- dprintk(1,"init overlay [%d pages @ bus 0x%lx]\n",
- nr_pages,(unsigned long)addr);
+ dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
+ nr_pages, (unsigned long)addr);
dma->direction = direction;
+
if (0 == addr)
return -EINVAL;
dma->bus_addr = addr;
dma->nr_pages = nr_pages;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
-int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
+int videobuf_dma_map(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
{
- MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(0 == dma->nr_pages);
if (dma->pages) {
@@ -228,20 +245,21 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
dma->offset);
}
if (dma->vmalloc) {
- dma->sglist = videobuf_vmalloc_to_sg
- (dma->vmalloc,dma->nr_pages);
+ dma->sglist = videobuf_vmalloc_to_sg(dma->vmalloc,
+ dma->nr_pages);
}
if (dma->bus_addr) {
dma->sglist = vmalloc(sizeof(*dma->sglist));
if (NULL != dma->sglist) {
- dma->sglen = 1;
- sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
- dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
- sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
+ dma->sglen = 1;
+ sg_dma_address(&dma->sglist[0]) = dma->bus_addr
+ & PAGE_MASK;
+ dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
+ sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
}
}
if (NULL == dma->sglist) {
- dprintk(1,"scatterlist is NULL\n");
+ dprintk(1, "scatterlist is NULL\n");
return -ENOMEM;
}
if (!dma->bus_addr) {
@@ -249,47 +267,43 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
dma->nr_pages, dma->direction);
if (0 == dma->sglen) {
printk(KERN_WARNING
- "%s: videobuf_map_sg failed\n",__func__);
+ "%s: videobuf_map_sg failed\n", __func__);
vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
return -ENOMEM;
}
}
- return 0;
-}
-
-int videobuf_dma_sync(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
-{
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
- BUG_ON(!dma->sglen);
- dma_sync_sg_for_cpu(q->dev, dma->sglist, dma->nr_pages, dma->direction);
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_map);
-int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
+int videobuf_dma_unmap(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
{
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+
if (!dma->sglen)
return 0;
- dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction);
+ dma_unmap_sg(q->dev, dma->sglist, dma->sglen, dma->direction);
vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
int videobuf_dma_free(struct videobuf_dmabuf *dma)
{
- MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
+ int i;
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(dma->sglen);
if (dma->pages) {
- int i;
- for (i=0; i < dma->nr_pages; i++)
+ for (i = 0; i < dma->nr_pages; i++)
page_cache_release(dma->pages[i]);
kfree(dma->pages);
dma->pages = NULL;
@@ -298,12 +312,13 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
vfree(dma->vmalloc);
dma->vmalloc = NULL;
- if (dma->bus_addr) {
+ if (dma->bus_addr)
dma->bus_addr = 0;
- }
dma->direction = DMA_NONE;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_free);
/* --------------------------------------------------------------------- */
@@ -315,6 +330,7 @@ int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
return videobuf_dma_map(&q, dma);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_dma_map);
int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
{
@@ -324,49 +340,48 @@ int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
return videobuf_dma_unmap(&q, dma);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap);
/* --------------------------------------------------------------------- */
-static void
-videobuf_vm_open(struct vm_area_struct *vma)
+static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- dprintk(2,"vm_open %p [count=%d,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
map->count++;
}
-static void
-videobuf_vm_close(struct vm_area_struct *vma)
+static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
struct videobuf_dma_sg_memory *mem;
int i;
- dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
- dprintk(1,"munmap %p q=%p\n",map,q);
+ dprintk(1, "munmap %p q=%p\n", map, q);
mutex_lock(&q->vb_lock);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
- mem=q->bufs[i]->priv;
-
+ mem = q->bufs[i]->priv;
if (!mem)
continue;
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
if (q->bufs[i]->map != map)
continue;
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
- q->ops->buf_release(q,q->bufs[i]);
+ q->ops->buf_release(q, q->bufs[i]);
}
mutex_unlock(&q->vb_lock);
kfree(map);
@@ -380,26 +395,27 @@ videobuf_vm_close(struct vm_area_struct *vma)
* now ...). Bounce buffers don't work very well for the data rates
* video capture has.
*/
-static int
-videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page;
- dprintk(3,"fault: fault @ %08lx [vma %08lx-%08lx]\n",
- (unsigned long)vmf->virtual_address,vma->vm_start,vma->vm_end);
+ dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
+ (unsigned long)vmf->virtual_address,
+ vma->vm_start, vma->vm_end);
+
page = alloc_page(GFP_USER | __GFP_DMA32);
if (!page)
return VM_FAULT_OOM;
clear_user_highpage(page, (unsigned long)vmf->virtual_address);
vmf->page = page;
+
return 0;
}
-static const struct vm_operations_struct videobuf_vm_ops =
-{
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
- .fault = videobuf_vm_fault,
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+ .fault = videobuf_vm_fault,
};
/* ---------------------------------------------------------------------
@@ -412,28 +428,28 @@ static const struct vm_operations_struct videobuf_vm_ops =
struct videobuf_dma_sg_memory
*/
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_sg_memory *mem;
struct videobuf_buffer *vb;
- vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
- mem = vb->priv = ((char *)vb)+size;
- mem->magic=MAGIC_SG_MEM;
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_SG_MEM;
videobuf_dma_init(&mem->dma);
- dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb),
- mem,(long)sizeof(*mem));
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
return vb;
}
-static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf)
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem);
@@ -443,11 +459,11 @@ static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf)
return mem->dma.vmalloc;
}
-static int __videobuf_iolock (struct videobuf_queue* q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
{
- int err,pages;
+ int err, pages;
dma_addr_t bus;
struct videobuf_dma_sg_memory *mem = vb->priv;
BUG_ON(!mem);
@@ -460,16 +476,16 @@ static int __videobuf_iolock (struct videobuf_queue* q,
if (0 == vb->baddr) {
/* no userspace addr -- kernel bounce buffer */
pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
- err = videobuf_dma_init_kernel( &mem->dma,
- DMA_FROM_DEVICE,
- pages );
+ err = videobuf_dma_init_kernel(&mem->dma,
+ DMA_FROM_DEVICE,
+ pages);
if (0 != err)
return err;
} else if (vb->memory == V4L2_MEMORY_USERPTR) {
/* dma directly to userspace */
- err = videobuf_dma_init_user( &mem->dma,
- DMA_FROM_DEVICE,
- vb->baddr,vb->bsize );
+ err = videobuf_dma_init_user(&mem->dma,
+ DMA_FROM_DEVICE,
+ vb->baddr, vb->bsize);
if (0 != err)
return err;
} else {
@@ -515,43 +531,27 @@ static int __videobuf_sync(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
+ BUG_ON(!mem || !mem->dma.sglen);
- return videobuf_dma_sync(q,&mem->dma);
-}
-
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- int i;
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+ MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i]) {
- if (q->bufs[i]->map)
- return -EBUSY;
- }
- }
+ dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
+ mem->dma.sglen, mem->dma.direction);
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
{
- struct videobuf_dma_sg_memory *mem;
+ struct videobuf_dma_sg_memory *mem = buf->priv;
struct videobuf_mapping *map;
- unsigned int first,last,size,i;
+ unsigned int first, last, size = 0, i;
int retval;
retval = -EINVAL;
- if (!(vma->vm_flags & VM_WRITE)) {
- dprintk(1,"mmap app bug: PROT_WRITE please\n");
- goto done;
- }
- if (!(vma->vm_flags & VM_SHARED)) {
- dprintk(1,"mmap app bug: MAP_SHARED please\n");
- goto done;
- }
/* This function maintains backwards compatibility with V4L1 and will
* map more than one buffer if the vma length is equal to the combined
@@ -561,48 +561,52 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
* TODO: Allow drivers to specify if they support this mode
*/
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
/* look for first buffer to map */
for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (NULL == q->bufs[first])
- continue;
- mem=q->bufs[first]->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ if (buf == q->bufs[first]) {
+ size = PAGE_ALIGN(q->bufs[first]->bsize);
break;
+ }
}
- if (VIDEO_MAX_FRAME == first) {
- dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
+
+ /* paranoia, should never happen since buf is always valid. */
+ if (!size) {
+ dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
goto done;
}
- /* look for last buffer to map */
- for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
- if (NULL == q->bufs[last])
- continue;
- if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
- continue;
- if (q->bufs[last]->map) {
- retval = -EBUSY;
+ last = first;
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ if (size != (vma->vm_end - vma->vm_start)) {
+ /* look for last buffer to map */
+ for (last = first + 1; last < VIDEO_MAX_FRAME; last++) {
+ if (NULL == q->bufs[last])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
+ continue;
+ if (q->bufs[last]->map) {
+ retval = -EBUSY;
+ goto done;
+ }
+ size += PAGE_ALIGN(q->bufs[last]->bsize);
+ if (size == (vma->vm_end - vma->vm_start))
+ break;
+ }
+ if (VIDEO_MAX_FRAME == last) {
+ dprintk(1, "mmap app bug: size invalid [size=0x%lx]\n",
+ (vma->vm_end - vma->vm_start));
goto done;
}
- size += PAGE_ALIGN(q->bufs[last]->bsize);
- if (size == (vma->vm_end - vma->vm_start))
- break;
- }
- if (VIDEO_MAX_FRAME == last) {
- dprintk(1,"mmap app bug: size invalid [size=0x%lx]\n",
- (vma->vm_end - vma->vm_start));
- goto done;
}
+#endif
/* create mapping + update buffer list */
retval = -ENOMEM;
- map = kmalloc(sizeof(struct videobuf_mapping),GFP_KERNEL);
+ map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
goto done;
@@ -623,72 +627,22 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
vma->vm_private_data = map;
- dprintk(1,"mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
- map,q,vma->vm_start,vma->vm_end,vma->vm_pgoff,first,last);
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
+ map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
retval = 0;
- done:
+done:
return retval;
}
-static int __videobuf_copy_to_user ( struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking )
-{
- struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- if (copy_to_user(data, mem->dma.vmalloc+q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream ( struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking )
-{
- unsigned int *fc;
- struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int*)mem->dma.vmalloc;
- fc += (q->read_buf->size>>2) -1;
- *fc = q->read_buf->field_count >> 1;
- dprintk(1,"vbihack: %d\n",*fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user (q,data,count,nonblocking);
-
- if ( (count==-EFAULT) && (0 == pos) )
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops sg_ops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
.sync = __videobuf_sync,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = __videobuf_to_vmalloc,
+ .vaddr = __videobuf_to_vaddr,
};
void *videobuf_sg_alloc(size_t size)
@@ -702,8 +656,9 @@ void *videobuf_sg_alloc(size_t size)
return videobuf_alloc(&q);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-void videobuf_queue_sg_init(struct videobuf_queue* q,
+void videobuf_queue_sg_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
@@ -715,29 +670,5 @@ void videobuf_queue_sg_init(struct videobuf_queue* q,
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &sg_ops);
}
-
-/* --------------------------------------------------------------------- */
-
-EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);
-
-EXPORT_SYMBOL_GPL(videobuf_to_dma);
-EXPORT_SYMBOL_GPL(videobuf_dma_init);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
-EXPORT_SYMBOL_GPL(videobuf_dma_map);
-EXPORT_SYMBOL_GPL(videobuf_dma_sync);
-EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
-EXPORT_SYMBOL_GPL(videobuf_dma_free);
-
-EXPORT_SYMBOL_GPL(videobuf_sg_dma_map);
-EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap);
-EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-
EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 0afb62e63d99..3f76398968b8 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -67,7 +67,7 @@ static int videobuf_dvb_thread(void *data)
try_to_freeze();
/* feed buffer data to demux */
- outp = videobuf_queue_to_vmalloc (&dvb->dvbq, buf);
+ outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
if (buf->state == VIDEOBUF_DONE)
dvb_dmx_swfilter(&dvb->demux, outp,
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index 136e09383c06..583728f4c221 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -30,8 +30,12 @@
#define MAGIC_DMABUF 0x17760309
#define MAGIC_VMAL_MEM 0x18221223
-#define MAGIC_CHECK(is,should) if (unlikely((is) != (should))) \
- { printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
static int debug;
module_param(debug, int, 0644);
@@ -40,19 +44,19 @@ MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
/***************************************************************************/
-static void
-videobuf_vm_open(struct vm_area_struct *vma)
+static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- dprintk(2,"vm_open %p [count=%u,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
map->count++;
}
@@ -63,7 +67,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_queue *q = map->q;
int i;
- dprintk(2,"vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
+ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count--;
@@ -116,8 +120,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
return;
}
-static const struct vm_operations_struct videobuf_vm_ops =
-{
+static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
@@ -132,28 +135,28 @@ static const struct vm_operations_struct videobuf_vm_ops =
struct videobuf_dma_sg_memory
*/
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_buffer *vb;
- vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
- mem = vb->priv = ((char *)vb)+size;
- mem->magic=MAGIC_VMAL_MEM;
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_VMAL_MEM;
- dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb),
- mem,(long)sizeof(*mem));
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
return vb;
}
-static int __videobuf_iolock (struct videobuf_queue* q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
{
struct videobuf_vmalloc_memory *mem = vb->priv;
int pages;
@@ -177,15 +180,13 @@ static int __videobuf_iolock (struct videobuf_queue* q,
dprintk(1, "%s memory method USERPTR\n", __func__);
-#if 1
if (vb->baddr) {
printk(KERN_ERR "USERPTR is currently not supported\n");
return -EINVAL;
}
-#endif
/* The only USERPTR currently supported is the one needed for
- read() method.
+ * read() method.
*/
mem->vmalloc = vmalloc_user(pages);
@@ -210,7 +211,7 @@ static int __videobuf_iolock (struct videobuf_queue* q,
/* Try to remap memory */
rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
if (rc < 0) {
- printk(KERN_ERR "mmap: remap failed with error %d. ", rc);
+ printk(KERN_ERR "mmap: remap failed with error %d", rc);
return -ENOMEM;
}
#endif
@@ -228,69 +229,29 @@ static int __videobuf_iolock (struct videobuf_queue* q,
return 0;
}
-static int __videobuf_sync(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- return 0;
-}
-
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- unsigned int i;
-
- dprintk(1, "%s\n", __func__);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i]) {
- if (q->bufs[i]->map)
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_mapping *map;
- unsigned int first;
int retval, pages;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
dprintk(1, "%s\n", __func__);
- if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (NULL == q->bufs[first])
- continue;
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == offset)
- break;
- }
- if (VIDEO_MAX_FRAME == first) {
- dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
- return -EINVAL;
- }
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
return -ENOMEM;
- q->bufs[first]->map = map;
+ buf->map = map;
map->start = vma->vm_start;
map->end = vma->vm_end;
map->q = q;
- q->bufs[first]->baddr = vma->vm_start;
+ buf->baddr = vma->vm_start;
- mem = q->bufs[first]->priv;
+ mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
@@ -300,8 +261,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
goto error;
}
- dprintk(1, "vmalloc is at addr %p (%d pages)\n",
- mem->vmalloc, pages);
+ dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vmalloc, pages);
/* Try to remap memory */
retval = remap_vmalloc_range(vma, mem->vmalloc, 0);
@@ -315,10 +275,10 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_private_data = map;
- dprintk(1,"mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
- (long int) q->bufs[first]->bsize,
- vma->vm_pgoff, first);
+ (long int)buf->bsize,
+ vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
@@ -330,69 +290,16 @@ error:
return -ENOMEM;
}
-static int __videobuf_copy_to_user ( struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking )
-{
- struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
-
- BUG_ON (!mem->vmalloc);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- if (copy_to_user(data, mem->vmalloc+q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream ( struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking )
-{
- unsigned int *fc;
- struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int*)mem->vmalloc;
- fc += (q->read_buf->size>>2) -1;
- *fc = q->read_buf->field_count >> 1;
- dprintk(1,"vbihack: %d\n",*fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user (q,data,count,nonblocking);
-
- if ( (count==-EFAULT) && (0 == pos) )
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
- .sync = __videobuf_sync,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = videobuf_to_vmalloc,
+ .vaddr = videobuf_to_vmalloc,
};
-void videobuf_queue_vmalloc_init(struct videobuf_queue* q,
+void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
@@ -404,20 +311,19 @@ void videobuf_queue_vmalloc_init(struct videobuf_queue* q,
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops);
}
-
EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
-void *videobuf_to_vmalloc (struct videobuf_buffer *buf)
+void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
{
- struct videobuf_vmalloc_memory *mem=buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
+ struct videobuf_vmalloc_memory *mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
return mem->vmalloc;
}
EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
-void videobuf_vmalloc_free (struct videobuf_buffer *buf)
+void videobuf_vmalloc_free(struct videobuf_buffer *buf)
{
struct videobuf_vmalloc_memory *mem = buf->priv;
@@ -442,8 +348,3 @@ void videobuf_vmalloc_free (struct videobuf_buffer *buf)
}
EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index cdbe70385c12..d5e30b85c559 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -13,29 +13,22 @@
* License, or (at your option) any later version
*/
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/fs.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/random.h>
+#include <linux/font.h>
#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
#include <linux/kthread.h>
-#include <linux/highmem.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#include <linux/freezer.h>
+#endif
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include "font.h"
+#include <media/v4l2-common.h>
#define VIVI_MODULE_NAME "vivi"
@@ -44,8 +37,11 @@
#define WAKE_DENOMINATOR 1001
#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+
#define VIVI_MAJOR_VERSION 0
-#define VIVI_MINOR_VERSION 6
+#define VIVI_MINOR_VERSION 7
#define VIVI_RELEASE 0
#define VIVI_VERSION \
KERNEL_VERSION(VIVI_MAJOR_VERSION, VIVI_MINOR_VERSION, VIVI_RELEASE)
@@ -70,56 +66,8 @@ static unsigned int vid_limit = 16;
module_param(vid_limit, uint, 0644);
MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
-
-/* supported controls */
-static struct v4l2_queryctrl vivi_qctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535/100,
- .default_value = 65535,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }, {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 0x10,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 127,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 0x1,
- .default_value = 0,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }
-};
+/* Global font descriptor */
+static const u8 *font8x16;
#define dprintk(dev, level, fmt, arg...) \
v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
@@ -214,41 +162,38 @@ struct vivi_dev {
struct list_head vivi_devlist;
struct v4l2_device v4l2_dev;
+ /* controls */
+ int brightness;
+ int contrast;
+ int saturation;
+ int hue;
+ int volume;
+
spinlock_t slock;
struct mutex mutex;
- int users;
-
/* various device info */
struct video_device *vfd;
struct vivi_dmaqueue vidq;
/* Several counters */
- int h, m, s, ms;
+ unsigned ms;
unsigned long jiffies;
- char timestr[13];
int mv_count; /* Controls bars movement */
/* Input Number */
int input;
- /* Control 'registers' */
- int qctl_regs[ARRAY_SIZE(vivi_qctrl)];
-};
-
-struct vivi_fh {
- struct vivi_dev *dev;
-
/* video capture */
struct vivi_fmt *fmt;
unsigned int width, height;
struct videobuf_queue vb_vidq;
- enum v4l2_buf_type type;
- unsigned char bars[8][3];
- int input; /* Input Number on bars */
+ unsigned long generating;
+ u8 bars[9][3];
+ u8 line[MAX_WIDTH * 4];
};
/* ------------------------------------------------------------------
@@ -259,19 +204,20 @@ struct vivi_fh {
enum colors {
WHITE,
- AMBAR,
+ AMBER,
CYAN,
GREEN,
MAGENTA,
RED,
BLUE,
BLACK,
+ TEXT_BLACK,
};
- /* R G B */
+/* R G B */
#define COLOR_WHITE {204, 204, 204}
-#define COLOR_AMBAR {208, 208, 0}
-#define COLOR_CIAN { 0, 206, 206}
+#define COLOR_AMBER {208, 208, 0}
+#define COLOR_CYAN { 0, 206, 206}
#define COLOR_GREEN { 0, 239, 0}
#define COLOR_MAGENTA {239, 0, 239}
#define COLOR_RED {205, 0, 0}
@@ -279,56 +225,24 @@ enum colors {
#define COLOR_BLACK { 0, 0, 0}
struct bar_std {
- u8 bar[8][3];
+ u8 bar[9][3];
};
/* Maximum number of bars are 10 - otherwise, the input print code
should be modified */
static struct bar_std bars[] = {
{ /* Standard ITU-R color bar sequence */
- {
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_CIAN,
- COLOR_GREEN,
- COLOR_MAGENTA,
- COLOR_RED,
- COLOR_BLUE,
- COLOR_BLACK,
- }
+ { COLOR_WHITE, COLOR_AMBER, COLOR_CYAN, COLOR_GREEN,
+ COLOR_MAGENTA, COLOR_RED, COLOR_BLUE, COLOR_BLACK, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_AMBAR,
- }
+ { COLOR_WHITE, COLOR_AMBER, COLOR_BLACK, COLOR_WHITE,
+ COLOR_AMBER, COLOR_BLACK, COLOR_WHITE, COLOR_AMBER, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_CIAN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_CIAN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_CIAN,
- }
+ { COLOR_WHITE, COLOR_CYAN, COLOR_BLACK, COLOR_WHITE,
+ COLOR_CYAN, COLOR_BLACK, COLOR_WHITE, COLOR_CYAN, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_GREEN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_GREEN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_GREEN,
- }
+ { COLOR_WHITE, COLOR_GREEN, COLOR_BLACK, COLOR_WHITE,
+ COLOR_GREEN, COLOR_BLACK, COLOR_WHITE, COLOR_GREEN, COLOR_BLACK }
},
};
@@ -344,21 +258,18 @@ static struct bar_std bars[] = {
(((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128)
/* precalculate color bar values to speed up rendering */
-static void precalculate_bars(struct vivi_fh *fh)
+static void precalculate_bars(struct vivi_dev *dev)
{
- struct vivi_dev *dev = fh->dev;
- unsigned char r, g, b;
+ u8 r, g, b;
int k, is_yuv;
- fh->input = dev->input;
-
- for (k = 0; k < 8; k++) {
- r = bars[fh->input].bar[k][0];
- g = bars[fh->input].bar[k][1];
- b = bars[fh->input].bar[k][2];
+ for (k = 0; k < 9; k++) {
+ r = bars[dev->input].bar[k][0];
+ g = bars[dev->input].bar[k][1];
+ b = bars[dev->input].bar[k][2];
is_yuv = 0;
- switch (fh->fmt->fourcc) {
+ switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
is_yuv = 1;
@@ -378,16 +289,15 @@ static void precalculate_bars(struct vivi_fh *fh)
}
if (is_yuv) {
- fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
- fh->bars[k][1] = TO_U(r, g, b); /* Cb */
- fh->bars[k][2] = TO_V(r, g, b); /* Cr */
+ dev->bars[k][0] = TO_Y(r, g, b); /* Luma */
+ dev->bars[k][1] = TO_U(r, g, b); /* Cb */
+ dev->bars[k][2] = TO_V(r, g, b); /* Cr */
} else {
- fh->bars[k][0] = r;
- fh->bars[k][1] = g;
- fh->bars[k][2] = b;
+ dev->bars[k][0] = r;
+ dev->bars[k][1] = g;
+ dev->bars[k][2] = b;
}
}
-
}
#define TSTAMP_MIN_Y 24
@@ -395,20 +305,20 @@ static void precalculate_bars(struct vivi_fh *fh)
#define TSTAMP_INPUT_X 10
#define TSTAMP_MIN_X (54 + TSTAMP_INPUT_X)
-static void gen_twopix(struct vivi_fh *fh, unsigned char *buf, int colorpos)
+static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
{
- unsigned char r_y, g_u, b_v;
- unsigned char *p;
+ u8 r_y, g_u, b_v;
int color;
+ u8 *p;
- r_y = fh->bars[colorpos][0]; /* R or precalculated Y */
- g_u = fh->bars[colorpos][1]; /* G or precalculated U */
- b_v = fh->bars[colorpos][2]; /* B or precalculated V */
+ r_y = dev->bars[colorpos][0]; /* R or precalculated Y */
+ g_u = dev->bars[colorpos][1]; /* G or precalculated U */
+ b_v = dev->bars[colorpos][2]; /* B or precalculated V */
for (color = 0; color < 4; color++) {
p = buf + color;
- switch (fh->fmt->fourcc) {
+ switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
switch (color) {
case 0:
@@ -489,123 +399,88 @@ static void gen_twopix(struct vivi_fh *fh, unsigned char *buf, int colorpos)
}
}
-static void gen_line(struct vivi_fh *fh, char *basep, int inipos, int wmax,
- int hmax, int line, int count, char *timestr)
+static void precalculate_line(struct vivi_dev *dev)
{
- int w, i, j;
- int pos = inipos;
- char *s;
- u8 chr;
-
- /* We will just duplicate the second pixel at the packet */
- wmax /= 2;
+ int w;
- /* Generate a standard color bar pattern */
- for (w = 0; w < wmax; w++) {
- int colorpos = ((w + count) * 8/(wmax + 1)) % 8;
+ for (w = 0; w < dev->width * 2; w += 2) {
+ int colorpos = (w / (dev->width / 8) % 8);
- gen_twopix(fh, basep + pos, colorpos);
- pos += 4; /* only 16 bpp supported for now */
+ gen_twopix(dev, dev->line + w * 2, colorpos);
}
+}
- /* Prints input entry number */
-
- /* Checks if it is possible to input number */
- if (TSTAMP_MAX_Y >= hmax)
- goto end;
-
- if (TSTAMP_INPUT_X + strlen(timestr) >= wmax)
- goto end;
-
- if (line >= TSTAMP_MIN_Y && line <= TSTAMP_MAX_Y) {
- chr = rom8x16_bits[fh->input * 16 + line - TSTAMP_MIN_Y];
- pos = TSTAMP_INPUT_X;
- for (i = 0; i < 7; i++) {
- /* Draw white font on black background */
- if (chr & 1 << (7 - i))
- gen_twopix(fh, basep + pos, WHITE);
- else
- gen_twopix(fh, basep + pos, BLACK);
- pos += 2;
- }
- }
+static void gen_text(struct vivi_dev *dev, char *basep,
+ int y, int x, char *text)
+{
+ int line;
- /* Checks if it is possible to show timestamp */
- if (TSTAMP_MIN_X + strlen(timestr) >= wmax)
- goto end;
+ /* Checks if it is possible to show string */
+ if (y + 16 >= dev->height || x + strlen(text) * 8 >= dev->width)
+ return;
/* Print stream time */
- if (line >= TSTAMP_MIN_Y && line <= TSTAMP_MAX_Y) {
- j = TSTAMP_MIN_X;
- for (s = timestr; *s; s++) {
- chr = rom8x16_bits[(*s-0x30)*16+line-TSTAMP_MIN_Y];
- for (i = 0; i < 7; i++) {
- pos = inipos + j * 2;
+ for (line = y; line < y + 16; line++) {
+ int j = 0;
+ char *pos = basep + line * dev->width * 2 + x * 2;
+ char *s;
+
+ for (s = text; *s; s++) {
+ u8 chr = font8x16[*s * 16 + line - y];
+ int i;
+
+ for (i = 0; i < 7; i++, j++) {
/* Draw white font on black background */
- if (chr & 1 << (7 - i))
- gen_twopix(fh, basep + pos, WHITE);
+ if (chr & (1 << (7 - i)))
+ gen_twopix(dev, pos + j * 2, WHITE);
else
- gen_twopix(fh, basep + pos, BLACK);
- j++;
+ gen_twopix(dev, pos + j * 2, TEXT_BLACK);
}
}
}
-
-end:
- return;
}
-static void vivi_fillbuff(struct vivi_fh *fh, struct vivi_buffer *buf)
+static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
{
- struct vivi_dev *dev = fh->dev;
- int h , pos = 0;
- int hmax = buf->vb.height;
- int wmax = buf->vb.width;
+ int hmax = buf->vb.height;
+ int wmax = buf->vb.width;
struct timeval ts;
- char *tmpbuf;
void *vbuf = videobuf_to_vmalloc(&buf->vb);
+ unsigned ms;
+ char str[100];
+ int h, line = 1;
if (!vbuf)
return;
- tmpbuf = kmalloc(wmax * 2, GFP_ATOMIC);
- if (!tmpbuf)
- return;
-
- for (h = 0; h < hmax; h++) {
- gen_line(fh, tmpbuf, 0, wmax, hmax, h, dev->mv_count,
- dev->timestr);
- memcpy(vbuf + pos, tmpbuf, wmax * 2);
- pos += wmax*2;
- }
-
- dev->mv_count++;
-
- kfree(tmpbuf);
+ for (h = 0; h < hmax; h++)
+ memcpy(vbuf + h * wmax * 2, dev->line + (dev->mv_count % wmax) * 2, wmax * 2);
/* Updates stream time */
- dev->ms += jiffies_to_msecs(jiffies-dev->jiffies);
+ dev->ms += jiffies_to_msecs(jiffies - dev->jiffies);
dev->jiffies = jiffies;
- if (dev->ms >= 1000) {
- dev->ms -= 1000;
- dev->s++;
- if (dev->s >= 60) {
- dev->s -= 60;
- dev->m++;
- if (dev->m > 60) {
- dev->m -= 60;
- dev->h++;
- if (dev->h > 24)
- dev->h -= 24;
- }
- }
- }
- sprintf(dev->timestr, "%02d:%02d:%02d:%03d",
- dev->h, dev->m, dev->s, dev->ms);
-
- dprintk(dev, 2, "vivifill at %s: Buffer 0x%08lx size= %d\n",
- dev->timestr, (unsigned long)tmpbuf, pos);
+ ms = dev->ms;
+ snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d ",
+ (ms / (60 * 60 * 1000)) % 24,
+ (ms / (60 * 1000)) % 60,
+ (ms / 1000) % 60,
+ ms % 1000);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " %dx%d, input %d ",
+ dev->width, dev->height, dev->input);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+
+ snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
+ dev->brightness,
+ dev->contrast,
+ dev->saturation,
+ dev->hue);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " volume %3d ", dev->volume);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+
+ dev->mv_count += 2;
/* Advice that buffer was filled */
buf->vb.field_count++;
@@ -614,12 +489,10 @@ static void vivi_fillbuff(struct vivi_fh *fh, struct vivi_buffer *buf)
buf->vb.state = VIDEOBUF_DONE;
}
-static void vivi_thread_tick(struct vivi_fh *fh)
+static void vivi_thread_tick(struct vivi_dev *dev)
{
- struct vivi_buffer *buf;
- struct vivi_dev *dev = fh->dev;
struct vivi_dmaqueue *dma_q = &dev->vidq;
-
+ struct vivi_buffer *buf;
unsigned long flags = 0;
dprintk(dev, 1, "Thread tick\n");
@@ -642,22 +515,20 @@ static void vivi_thread_tick(struct vivi_fh *fh)
do_gettimeofday(&buf->vb.ts);
/* Fill buffer */
- vivi_fillbuff(fh, buf);
+ vivi_fillbuff(dev, buf);
dprintk(dev, 1, "filled buffer %p\n", buf);
wake_up(&buf->vb.done);
dprintk(dev, 2, "[%p/%d] wakeup\n", buf, buf->vb. i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
- return;
}
#define frames_to_ms(frames) \
((frames * WAKE_NUMERATOR * 1000) / WAKE_DENOMINATOR)
-static void vivi_sleep(struct vivi_fh *fh)
+static void vivi_sleep(struct vivi_dev *dev)
{
- struct vivi_dev *dev = fh->dev;
struct vivi_dmaqueue *dma_q = &dev->vidq;
int timeout;
DECLARE_WAITQUEUE(wait, current);
@@ -672,7 +543,7 @@ static void vivi_sleep(struct vivi_fh *fh)
/* Calculate time to wake up */
timeout = msecs_to_jiffies(frames_to_ms(1));
- vivi_thread_tick(fh);
+ vivi_thread_tick(dev);
schedule_timeout_interruptible(timeout);
@@ -683,15 +554,14 @@ stop_task:
static int vivi_thread(void *data)
{
- struct vivi_fh *fh = data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = data;
dprintk(dev, 1, "thread started\n");
set_freezable();
for (;;) {
- vivi_sleep(fh);
+ vivi_sleep(dev);
if (kthread_should_stop())
break;
@@ -700,39 +570,61 @@ static int vivi_thread(void *data)
return 0;
}
-static int vivi_start_thread(struct vivi_fh *fh)
+static void vivi_start_generating(struct file *file)
{
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
struct vivi_dmaqueue *dma_q = &dev->vidq;
- dma_q->frame = 0;
- dma_q->ini_jiffies = jiffies;
-
dprintk(dev, 1, "%s\n", __func__);
- dma_q->kthread = kthread_run(vivi_thread, fh, "vivi");
+ if (test_and_set_bit(0, &dev->generating))
+ return;
+ file->private_data = dev;
+
+ /* Resets frame counters */
+ dev->ms = 0;
+ dev->mv_count = 0;
+ dev->jiffies = jiffies;
+
+ dma_q->frame = 0;
+ dma_q->ini_jiffies = jiffies;
+ dma_q->kthread = kthread_run(vivi_thread, dev, dev->v4l2_dev.name);
if (IS_ERR(dma_q->kthread)) {
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dma_q->kthread);
+ clear_bit(0, &dev->generating);
+ return;
}
/* Wakes thread */
wake_up_interruptible(&dma_q->wq);
dprintk(dev, 1, "returning from %s\n", __func__);
- return 0;
}
-static void vivi_stop_thread(struct vivi_dmaqueue *dma_q)
+static void vivi_stop_generating(struct file *file)
{
- struct vivi_dev *dev = container_of(dma_q, struct vivi_dev, vidq);
+ struct vivi_dev *dev = video_drvdata(file);
+ struct vivi_dmaqueue *dma_q = &dev->vidq;
dprintk(dev, 1, "%s\n", __func__);
+
+ if (!file->private_data)
+ return;
+ if (!test_and_clear_bit(0, &dev->generating))
+ return;
+
/* shutdown control thread */
if (dma_q->kthread) {
kthread_stop(dma_q->kthread);
dma_q->kthread = NULL;
}
+ videobuf_stop(&dev->vb_vidq);
+ videobuf_mmap_free(&dev->vb_vidq);
+}
+
+static int vivi_is_generating(struct vivi_dev *dev)
+{
+ return test_bit(0, &dev->generating);
}
/* ------------------------------------------------------------------
@@ -741,10 +633,9 @@ static void vivi_stop_thread(struct vivi_dmaqueue *dma_q)
static int
buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
- *size = fh->width*fh->height*2;
+ *size = dev->width * dev->height * 2;
if (0 == *count)
*count = 32;
@@ -760,49 +651,43 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
static void free_buffer(struct videobuf_queue *vq, struct vivi_buffer *buf)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
dprintk(dev, 1, "%s, state: %i\n", __func__, buf->vb.state);
- if (in_interrupt())
- BUG();
-
videobuf_vmalloc_free(&buf->vb);
dprintk(dev, 1, "free_buffer: freed\n");
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
-#define norm_maxw() 1024
-#define norm_maxh() 768
static int
buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
int rc;
dprintk(dev, 1, "%s, field=%d\n", __func__, field);
- BUG_ON(NULL == fh->fmt);
+ BUG_ON(NULL == dev->fmt);
- if (fh->width < 48 || fh->width > norm_maxw() ||
- fh->height < 32 || fh->height > norm_maxh())
+ if (dev->width < 48 || dev->width > MAX_WIDTH ||
+ dev->height < 32 || dev->height > MAX_HEIGHT)
return -EINVAL;
- buf->vb.size = fh->width*fh->height*2;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ buf->vb.size = dev->width * dev->height * 2;
+ if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
/* These properties only change when queue is idle, see s_fmt */
- buf->fmt = fh->fmt;
- buf->vb.width = fh->width;
- buf->vb.height = fh->height;
+ buf->fmt = dev->fmt;
+ buf->vb.width = dev->width;
+ buf->vb.height = dev->height;
buf->vb.field = field;
- precalculate_bars(fh);
+ precalculate_bars(dev);
+ precalculate_line(dev);
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
@@ -811,7 +696,6 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
}
buf->vb.state = VIDEOBUF_PREPARED;
-
return 0;
fail:
@@ -822,9 +706,8 @@ fail:
static void
buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
+ struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
struct vivi_dmaqueue *vidq = &dev->vidq;
dprintk(dev, 1, "%s\n", __func__);
@@ -836,9 +719,8 @@ buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = (struct vivi_dev *)fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
+ struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
dprintk(dev, 1, "%s\n", __func__);
@@ -858,16 +740,14 @@ static struct videobuf_queue_ops vivi_video_qops = {
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
strcpy(cap->driver, "vivi");
strcpy(cap->card, "vivi");
strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
cap->version = VIVI_VERSION;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | \
+ V4L2_CAP_READWRITE;
return 0;
}
@@ -889,28 +769,25 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- f->fmt.pix.width = fh->width;
- f->fmt.pix.height = fh->height;
- f->fmt.pix.field = fh->vb_vidq.field;
- f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
+ f->fmt.pix.field = dev->vb_vidq.field;
+ f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ (f->fmt.pix.width * dev->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
-
- return (0);
+ return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
struct vivi_fmt *fmt;
enum v4l2_field field;
- unsigned int maxw, maxh;
fmt = get_format(f);
if (!fmt) {
@@ -928,113 +805,109 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
- maxw = norm_maxw();
- maxh = norm_maxh();
-
f->fmt.pix.field = field;
- v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
- &f->fmt.pix.height, 32, maxh, 0, 0);
+ v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
+ &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
-
return 0;
}
-/*FIXME: This seems to be generic enough to be at videodev2 */
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
- struct videobuf_queue *q = &fh->vb_vidq;
+ struct vivi_dev *dev = video_drvdata(file);
+ struct videobuf_queue *q = &dev->vb_vidq;
- int ret = vidioc_try_fmt_vid_cap(file, fh, f);
+ int ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret < 0)
return ret;
mutex_lock(&q->vb_lock);
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
- dprintk(fh->dev, 1, "%s queue busy\n", __func__);
+ if (vivi_is_generating(dev)) {
+ dprintk(dev, 1, "%s device busy\n", __func__);
ret = -EBUSY;
goto out;
}
- fh->fmt = get_format(f);
- fh->width = f->fmt.pix.width;
- fh->height = f->fmt.pix.height;
- fh->vb_vidq.field = f->fmt.pix.field;
- fh->type = f->type;
-
+ dev->fmt = get_format(f);
+ dev->width = f->fmt.pix.width;
+ dev->height = f->fmt.pix.height;
+ dev->vb_vidq.field = f->fmt.pix.field;
ret = 0;
out:
mutex_unlock(&q->vb_lock);
-
return ret;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_reqbufs(&fh->vb_vidq, p));
+ return videobuf_reqbufs(&dev->vb_vidq, p);
}
static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_querybuf(&fh->vb_vidq, p));
+ return videobuf_querybuf(&dev->vb_vidq, p);
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_qbuf(&fh->vb_vidq, p));
+ return videobuf_qbuf(&dev->vb_vidq, p);
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_dqbuf(&fh->vb_vidq, p,
- file->f_flags & O_NONBLOCK));
+ return videobuf_dqbuf(&dev->vb_vidq, p,
+ file->f_flags & O_NONBLOCK);
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return videobuf_cgmbuf(&fh->vb_vidq, mbuf, 8);
+ return videobuf_cgmbuf(&dev->vb_vidq, mbuf, 8);
}
#endif
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
+ int ret;
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (i != fh->type)
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ ret = videobuf_streamon(&dev->vb_vidq);
+ if (ret)
+ return ret;
- return videobuf_streamon(&fh->vb_vidq);
+ vivi_start_generating(file);
+ return 0;
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
+ int ret;
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (i != fh->type)
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
-
- return videobuf_streamoff(&fh->vb_vidq);
+ ret = videobuf_streamoff(&dev->vb_vidq);
+ if (!ret)
+ vivi_stop_generating(file);
+ return ret;
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
@@ -1052,80 +925,104 @@ static int vidioc_enum_input(struct file *file, void *priv,
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = V4L2_STD_525_60;
sprintf(inp->name, "Camera %u", inp->index);
-
- return (0);
+ return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
*i = dev->input;
-
- return (0);
+ return 0;
}
+
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
if (i >= NUM_INPUTS)
return -EINVAL;
dev->input = i;
- precalculate_bars(fh);
-
- return (0);
+ precalculate_bars(dev);
+ precalculate_line(dev);
+ return 0;
}
- /* --- controls ---------------------------------------------- */
+/* --- controls ---------------------------------------------- */
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (qc->id && qc->id == vivi_qctrl[i].id) {
- memcpy(qc, &(vivi_qctrl[i]),
- sizeof(*qc));
- return (0);
- }
-
+ switch (qc->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 200);
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 127);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 16);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 127);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
+ }
return -EINVAL;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (ctrl->id == vivi_qctrl[i].id) {
- ctrl->value = dev->qctl_regs[i];
- return 0;
- }
+ struct vivi_dev *dev = video_drvdata(file);
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ ctrl->value = dev->volume;
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = dev->brightness;
+ return 0;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = dev->contrast;
+ return 0;
+ case V4L2_CID_SATURATION:
+ ctrl->value = dev->saturation;
+ return 0;
+ case V4L2_CID_HUE:
+ ctrl->value = dev->hue;
+ return 0;
+ }
return -EINVAL;
}
+
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (ctrl->id == vivi_qctrl[i].id) {
- if (ctrl->value < vivi_qctrl[i].minimum ||
- ctrl->value > vivi_qctrl[i].maximum) {
- return -ERANGE;
- }
- dev->qctl_regs[i] = ctrl->value;
- return 0;
- }
+ struct vivi_dev *dev = video_drvdata(file);
+ struct v4l2_queryctrl qc;
+ int err;
+
+ qc.id = ctrl->id;
+ err = vidioc_queryctrl(file, priv, &qc);
+ if (err < 0)
+ return err;
+ if (ctrl->value < qc.minimum || ctrl->value > qc.maximum)
+ return -ERANGE;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ dev->volume = ctrl->value;
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ dev->brightness = ctrl->value;
+ return 0;
+ case V4L2_CID_CONTRAST:
+ dev->contrast = ctrl->value;
+ return 0;
+ case V4L2_CID_SATURATION:
+ dev->saturation = ctrl->value;
+ return 0;
+ case V4L2_CID_HUE:
+ dev->hue = ctrl->value;
+ return 0;
+ }
return -EINVAL;
}
@@ -1133,134 +1030,58 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
File operations for the device
------------------------------------------------------------------*/
-static int vivi_open(struct file *file)
-{
- struct vivi_dev *dev = video_drvdata(file);
- struct vivi_fh *fh = NULL;
- int retval = 0;
-
- mutex_lock(&dev->mutex);
- dev->users++;
-
- if (dev->users > 1) {
- dev->users--;
- mutex_unlock(&dev->mutex);
- return -EBUSY;
- }
-
- dprintk(dev, 1, "open %s type=%s users=%d\n",
- video_device_node_name(dev->vfd),
- v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- dev->users--;
- retval = -ENOMEM;
- }
- mutex_unlock(&dev->mutex);
-
- if (retval)
- return retval;
-
- file->private_data = fh;
- fh->dev = dev;
-
- fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fh->fmt = &formats[0];
- fh->width = 640;
- fh->height = 480;
-
- /* Resets frame counters */
- dev->h = 0;
- dev->m = 0;
- dev->s = 0;
- dev->ms = 0;
- dev->mv_count = 0;
- dev->jiffies = jiffies;
- sprintf(dev->timestr, "%02d:%02d:%02d:%03d",
- dev->h, dev->m, dev->s, dev->ms);
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &vivi_video_qops,
- NULL, &dev->slock, fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct vivi_buffer), fh);
-
- vivi_start_thread(fh);
-
- return 0;
-}
-
static ssize_t
vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
- struct vivi_fh *fh = file->private_data;
+ struct vivi_dev *dev = video_drvdata(file);
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- return videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0,
+ vivi_start_generating(file);
+ return videobuf_read_stream(&dev->vb_vidq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
- }
- return 0;
}
static unsigned int
vivi_poll(struct file *file, struct poll_table_struct *wait)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
- struct videobuf_queue *q = &fh->vb_vidq;
+ struct vivi_dev *dev = video_drvdata(file);
+ struct videobuf_queue *q = &dev->vb_vidq;
dprintk(dev, 1, "%s\n", __func__);
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
- return POLLERR;
-
+ vivi_start_generating(file);
return videobuf_poll_stream(file, q, wait);
}
static int vivi_close(struct file *file)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
- struct vivi_dmaqueue *vidq = &dev->vidq;
struct video_device *vdev = video_devdata(file);
+ struct vivi_dev *dev = video_drvdata(file);
- vivi_stop_thread(vidq);
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- kfree(fh);
-
- mutex_lock(&dev->mutex);
- dev->users--;
- mutex_unlock(&dev->mutex);
-
- dprintk(dev, 1, "close called (dev=%s, users=%d)\n",
- video_device_node_name(vdev), dev->users);
+ vivi_stop_generating(file);
+ dprintk(dev, 1, "close called (dev=%s)\n",
+ video_device_node_name(vdev));
return 0;
}
static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
int ret;
dprintk(dev, 1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
- ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
+ ret = videobuf_mmap_mapper(&dev->vb_vidq, vma);
dprintk(dev, 1, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
+ (unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
ret);
-
return ret;
}
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
- .open = vivi_open,
.release = vivi_close,
.read = vivi_read,
.poll = vivi_poll,
@@ -1282,11 +1103,11 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
.vidiocgmbuf = vidiocgmbuf,
#endif
@@ -1330,7 +1151,7 @@ static int __init vivi_create_instance(int inst)
{
struct vivi_dev *dev;
struct video_device *vfd;
- int ret, i;
+ int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -1342,6 +1163,20 @@ static int __init vivi_create_instance(int inst)
if (ret)
goto free_dev;
+ dev->fmt = &formats[0];
+ dev->width = 640;
+ dev->height = 480;
+ dev->volume = 200;
+ dev->brightness = 127;
+ dev->contrast = 16;
+ dev->saturation = 127;
+ dev->hue = 0;
+
+ videobuf_queue_vmalloc_init(&dev->vb_vidq, &vivi_video_qops,
+ NULL, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct vivi_buffer), dev);
+
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
init_waitqueue_head(&dev->vidq.wq);
@@ -1357,6 +1192,7 @@ static int __init vivi_create_instance(int inst)
*vfd = vivi_template;
vfd->debug = debug;
+ vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0)
@@ -1364,10 +1200,6 @@ static int __init vivi_create_instance(int inst)
video_set_drvdata(vfd, dev);
- /* Set all controls to their default value. */
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- dev->qctl_regs[i] = vivi_qctrl[i].default_value;
-
/* Now that everything is fine, let's add it to device list */
list_add_tail(&dev->vivi_devlist, &vivi_devlist);
@@ -1396,8 +1228,15 @@ free_dev:
*/
static int __init vivi_init(void)
{
+ const struct font_desc *font = find_font("VGA8x16");
int ret = 0, i;
+ if (font == NULL) {
+ printk(KERN_ERR "vivi: could not find font\n");
+ return -ENODEV;
+ }
+ font8x16 = font->data;
+
if (n_devs <= 0)
n_devs = 1;
@@ -1412,7 +1251,7 @@ static int __init vivi_init(void)
}
if (ret < 0) {
- printk(KERN_INFO "Error %d while loading vivi driver\n", ret);
+ printk(KERN_ERR "vivi: error %d while loading driver\n", ret);
return ret;
}
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index bf9bf650a317..635420d8d84a 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -1,7 +1,7 @@
/*
Winbond w9966cf Webcam parport driver.
- Version 0.32
+ Version 0.33
Copyright (C) 2001 Jakob Kemi <jakob.kemi@post.utfors.se>
@@ -57,10 +57,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/videodev.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
#include <linux/parport.h>
/*#define DEBUG*/ /* Undef me for production */
@@ -76,12 +78,12 @@
*/
#define W9966_DRIVERNAME "W9966CF Webcam"
-#define W9966_MAXCAMS 4 // Maximum number of cameras
-#define W9966_RBUFFER 2048 // Read buffer (must be an even number)
-#define W9966_SRAMSIZE 131072 // 128kb
-#define W9966_SRAMID 0x02 // check w9966cf.pdf
+#define W9966_MAXCAMS 4 /* Maximum number of cameras */
+#define W9966_RBUFFER 2048 /* Read buffer (must be an even number) */
+#define W9966_SRAMSIZE 131072 /* 128kb */
+#define W9966_SRAMID 0x02 /* check w9966cf.pdf */
-// Empirically determined window limits
+/* Empirically determined window limits */
#define W9966_WND_MIN_X 16
#define W9966_WND_MIN_Y 14
#define W9966_WND_MAX_X 705
@@ -89,7 +91,7 @@
#define W9966_WND_MAX_W (W9966_WND_MAX_X - W9966_WND_MIN_X)
#define W9966_WND_MAX_H (W9966_WND_MAX_Y - W9966_WND_MIN_Y)
-// Keep track of our current state
+/* Keep track of our current state */
#define W9966_STATE_PDEV 0x01
#define W9966_STATE_CLAIMED 0x02
#define W9966_STATE_VDEV 0x04
@@ -101,12 +103,13 @@
#define W9966_I2C_W_DATA 0x02
#define W9966_I2C_W_CLOCK 0x01
-struct w9966_dev {
+struct w9966 {
+ struct v4l2_device v4l2_dev;
unsigned char dev_state;
unsigned char i2c_state;
unsigned short ppmode;
- struct parport* pport;
- struct pardevice* pdev;
+ struct parport *pport;
+ struct pardevice *pdev;
struct video_device vdev;
unsigned short width;
unsigned short height;
@@ -114,7 +117,7 @@ struct w9966_dev {
signed char contrast;
signed char color;
signed char hue;
- unsigned long in_use;
+ struct mutex lock;
};
/*
@@ -127,15 +130,15 @@ MODULE_LICENSE("GPL");
#ifdef MODULE
-static const char* pardev[] = {[0 ... W9966_MAXCAMS] = ""};
+static const char *pardev[] = {[0 ... W9966_MAXCAMS] = ""};
#else
-static const char* pardev[] = {[0 ... W9966_MAXCAMS] = "aggressive"};
+static const char *pardev[] = {[0 ... W9966_MAXCAMS] = "aggressive"};
#endif
module_param_array(pardev, charp, NULL, 0);
-MODULE_PARM_DESC(pardev, "pardev: where to search for\n\
-\teach camera. 'aggressive' means brute-force search.\n\
-\tEg: >pardev=parport3,aggressive,parport2,parport1< would assign\n\
-\tcam 1 to parport3 and search every parport for cam 2 etc...");
+MODULE_PARM_DESC(pardev, "pardev: where to search for\n"
+ "\teach camera. 'aggressive' means brute-force search.\n"
+ "\tEg: >pardev=parport3,aggressive,parport2,parport1< would assign\n"
+ "\tcam 1 to parport3 and search every parport for cam 2 etc...");
static int parmode;
module_param(parmode, int, 0);
@@ -144,117 +147,49 @@ MODULE_PARM_DESC(parmode, "parmode: transfer mode (0=auto, 1=ecp, 2=epp");
static int video_nr = -1;
module_param(video_nr, int, 0);
-/*
- * Private data
- */
-
-static struct w9966_dev w9966_cams[W9966_MAXCAMS];
-
-/*
- * Private function declares
- */
-
-static inline void w9966_setState(struct w9966_dev* cam, int mask, int val);
-static inline int w9966_getState(struct w9966_dev* cam, int mask, int val);
-static inline void w9966_pdev_claim(struct w9966_dev *vdev);
-static inline void w9966_pdev_release(struct w9966_dev *vdev);
-
-static int w9966_rReg(struct w9966_dev* cam, int reg);
-static int w9966_wReg(struct w9966_dev* cam, int reg, int data);
-#if 0
-static int w9966_rReg_i2c(struct w9966_dev* cam, int reg);
-#endif
-static int w9966_wReg_i2c(struct w9966_dev* cam, int reg, int data);
-static int w9966_findlen(int near, int size, int maxlen);
-static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsigned char* factor);
-static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, int w, int h);
-
-static int w9966_init(struct w9966_dev* cam, struct parport* port);
-static void w9966_term(struct w9966_dev* cam);
-
-static inline void w9966_i2c_setsda(struct w9966_dev* cam, int state);
-static inline int w9966_i2c_setscl(struct w9966_dev* cam, int state);
-static inline int w9966_i2c_getsda(struct w9966_dev* cam);
-static inline int w9966_i2c_getscl(struct w9966_dev* cam);
-static int w9966_i2c_wbyte(struct w9966_dev* cam, int data);
-#if 0
-static int w9966_i2c_rbyte(struct w9966_dev* cam);
-#endif
-
-static long w9966_v4l_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg);
-static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-
-static int w9966_exclusive_open(struct file *file)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- return test_and_set_bit(0, &cam->in_use) ? -EBUSY : 0;
-}
-
-static int w9966_exclusive_release(struct file *file)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- clear_bit(0, &cam->in_use);
- return 0;
-}
-
-static const struct v4l2_file_operations w9966_fops = {
- .owner = THIS_MODULE,
- .open = w9966_exclusive_open,
- .release = w9966_exclusive_release,
- .ioctl = w9966_v4l_ioctl,
- .read = w9966_v4l_read,
-};
-static struct video_device w9966_template = {
- .name = W9966_DRIVERNAME,
- .fops = &w9966_fops,
- .release = video_device_release_empty,
-};
+static struct w9966 w9966_cams[W9966_MAXCAMS];
/*
* Private function defines
*/
-// Set camera phase flags, so we know what to uninit when terminating
-static inline void w9966_setState(struct w9966_dev* cam, int mask, int val)
+/* Set camera phase flags, so we know what to uninit when terminating */
+static inline void w9966_set_state(struct w9966 *cam, int mask, int val)
{
cam->dev_state = (cam->dev_state & ~mask) ^ val;
}
-// Get camera phase flags
-static inline int w9966_getState(struct w9966_dev* cam, int mask, int val)
+/* Get camera phase flags */
+static inline int w9966_get_state(struct w9966 *cam, int mask, int val)
{
return ((cam->dev_state & mask) == val);
}
-// Claim parport for ourself
-static inline void w9966_pdev_claim(struct w9966_dev* cam)
+/* Claim parport for ourself */
+static void w9966_pdev_claim(struct w9966 *cam)
{
- if (w9966_getState(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED))
+ if (w9966_get_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED))
return;
parport_claim_or_block(cam->pdev);
- w9966_setState(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED);
+ w9966_set_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED);
}
-// Release parport for others to use
-static inline void w9966_pdev_release(struct w9966_dev* cam)
+/* Release parport for others to use */
+static void w9966_pdev_release(struct w9966 *cam)
{
- if (w9966_getState(cam, W9966_STATE_CLAIMED, 0))
+ if (w9966_get_state(cam, W9966_STATE_CLAIMED, 0))
return;
parport_release(cam->pdev);
- w9966_setState(cam, W9966_STATE_CLAIMED, 0);
+ w9966_set_state(cam, W9966_STATE_CLAIMED, 0);
}
-// Read register from W9966 interface-chip
-// Expects a claimed pdev
-// -1 on error, else register data (byte)
-static int w9966_rReg(struct w9966_dev* cam, int reg)
+/* Read register from W9966 interface-chip
+ Expects a claimed pdev
+ -1 on error, else register data (byte) */
+static int w9966_read_reg(struct w9966 *cam, int reg)
{
- // ECP, read, regtransfer, REG, REG, REG, REG, REG
+ /* ECP, read, regtransfer, REG, REG, REG, REG, REG */
const unsigned char addr = 0x80 | (reg & 0x1f);
unsigned char val;
@@ -270,12 +205,12 @@ static int w9966_rReg(struct w9966_dev* cam, int reg)
return val;
}
-// Write register to W9966 interface-chip
-// Expects a claimed pdev
-// -1 on error
-static int w9966_wReg(struct w9966_dev* cam, int reg, int data)
+/* Write register to W9966 interface-chip
+ Expects a claimed pdev
+ -1 on error */
+static int w9966_write_reg(struct w9966 *cam, int reg, int data)
{
- // ECP, write, regtransfer, REG, REG, REG, REG, REG
+ /* ECP, write, regtransfer, REG, REG, REG, REG, REG */
const unsigned char addr = 0xc0 | (reg & 0x1f);
const unsigned char val = data;
@@ -291,119 +226,186 @@ static int w9966_wReg(struct w9966_dev* cam, int reg, int data)
return 0;
}
-// Initialize camera device. Setup all internal flags, set a
-// default video mode, setup ccd-chip, register v4l device etc..
-// Also used for 'probing' of hardware.
-// -1 on error
-static int w9966_init(struct w9966_dev* cam, struct parport* port)
+/*
+ * Ugly and primitive i2c protocol functions
+ */
+
+/* Sets the data line on the i2c bus.
+ Expects a claimed pdev. */
+static void w9966_i2c_setsda(struct w9966 *cam, int state)
{
- if (cam->dev_state != 0)
- return -1;
+ if (state)
+ cam->i2c_state |= W9966_I2C_W_DATA;
+ else
+ cam->i2c_state &= ~W9966_I2C_W_DATA;
- cam->pport = port;
- cam->brightness = 128;
- cam->contrast = 64;
- cam->color = 64;
- cam->hue = 0;
+ w9966_write_reg(cam, 0x18, cam->i2c_state);
+ udelay(5);
+}
-// Select requested transfer mode
- switch(parmode)
- {
- default: // Auto-detect (priority: hw-ecp, hw-epp, sw-ecp)
- case 0:
- if (port->modes & PARPORT_MODE_ECP)
- cam->ppmode = IEEE1284_MODE_ECP;
- else if (port->modes & PARPORT_MODE_EPP)
- cam->ppmode = IEEE1284_MODE_EPP;
- else
- cam->ppmode = IEEE1284_MODE_ECP;
- break;
- case 1: // hw- or sw-ecp
- cam->ppmode = IEEE1284_MODE_ECP;
- break;
- case 2: // hw- or sw-epp
- cam->ppmode = IEEE1284_MODE_EPP;
- break;
+/* Get peripheral clock line
+ Expects a claimed pdev. */
+static int w9966_i2c_getscl(struct w9966 *cam)
+{
+ const unsigned char state = w9966_read_reg(cam, 0x18);
+ return ((state & W9966_I2C_R_CLOCK) > 0);
+}
+
+/* Sets the clock line on the i2c bus.
+ Expects a claimed pdev. -1 on error */
+static int w9966_i2c_setscl(struct w9966 *cam, int state)
+{
+ unsigned long timeout;
+
+ if (state)
+ cam->i2c_state |= W9966_I2C_W_CLOCK;
+ else
+ cam->i2c_state &= ~W9966_I2C_W_CLOCK;
+
+ w9966_write_reg(cam, 0x18, cam->i2c_state);
+ udelay(5);
+
+ /* we go to high, we also expect the peripheral to ack. */
+ if (state) {
+ timeout = jiffies + 100;
+ while (!w9966_i2c_getscl(cam)) {
+ if (time_after(jiffies, timeout))
+ return -1;
+ }
}
+ return 0;
+}
-// Tell the parport driver that we exists
- cam->pdev = parport_register_device(port, "w9966", NULL, NULL, NULL, 0, NULL);
- if (cam->pdev == NULL) {
- DPRINTF("parport_register_device() failed\n");
- return -1;
+#if 0
+/* Get peripheral data line
+ Expects a claimed pdev. */
+static int w9966_i2c_getsda(struct w9966 *cam)
+{
+ const unsigned char state = w9966_read_reg(cam, 0x18);
+ return ((state & W9966_I2C_R_DATA) > 0);
+}
+#endif
+
+/* Write a byte with ack to the i2c bus.
+ Expects a claimed pdev. -1 on error */
+static int w9966_i2c_wbyte(struct w9966 *cam, int data)
+{
+ int i;
+
+ for (i = 7; i >= 0; i--) {
+ w9966_i2c_setsda(cam, (data >> i) & 0x01);
+
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ w9966_i2c_setscl(cam, 0);
}
- w9966_setState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV);
- w9966_pdev_claim(cam);
+ w9966_i2c_setsda(cam, 1);
-// Setup a default capture mode
- if (w9966_setup(cam, 0, 0, 1023, 1023, 200, 160) != 0) {
- DPRINTF("w9966_setup() failed.\n");
+ if (w9966_i2c_setscl(cam, 1) == -1)
return -1;
+ w9966_i2c_setscl(cam, 0);
+
+ return 0;
+}
+
+/* Read a data byte with ack from the i2c-bus
+ Expects a claimed pdev. -1 on error */
+#if 0
+static int w9966_i2c_rbyte(struct w9966 *cam)
+{
+ unsigned char data = 0x00;
+ int i;
+
+ w9966_i2c_setsda(cam, 1);
+
+ for (i = 0; i < 8; i++) {
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ data = data << 1;
+ if (w9966_i2c_getsda(cam))
+ data |= 0x01;
+
+ w9966_i2c_setscl(cam, 0);
}
+ return data;
+}
+#endif
- w9966_pdev_release(cam);
+/* Read a register from the i2c device.
+ Expects claimed pdev. -1 on error */
+#if 0
+static int w9966_read_reg_i2c(struct w9966 *cam, int reg)
+{
+ int data;
-// Fill in the video_device struct and register us to v4l
- memcpy(&cam->vdev, &w9966_template, sizeof(struct video_device));
- video_set_drvdata(&cam->vdev, cam);
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
- if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
+ if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
+ w9966_i2c_wbyte(cam, reg) == -1)
+ return -1;
+
+ w9966_i2c_setsda(cam, 1);
+ if (w9966_i2c_setscl(cam, 1) == -1)
return -1;
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
- w9966_setState(cam, W9966_STATE_VDEV, W9966_STATE_VDEV);
+ if (w9966_i2c_wbyte(cam, W9966_I2C_R_ID) == -1)
+ return -1;
+ data = w9966_i2c_rbyte(cam);
+ if (data == -1)
+ return -1;
- // All ok
- printk(
- "w9966cf: Found and initialized a webcam on %s.\n",
- cam->pport->name
- );
- return 0;
-}
+ w9966_i2c_setsda(cam, 0);
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ w9966_i2c_setsda(cam, 1);
-// Terminate everything gracefully
-static void w9966_term(struct w9966_dev* cam)
+ return data;
+}
+#endif
+
+/* Write a register to the i2c device.
+ Expects claimed pdev. -1 on error */
+static int w9966_write_reg_i2c(struct w9966 *cam, int reg, int data)
{
-// Unregister from v4l
- if (w9966_getState(cam, W9966_STATE_VDEV, W9966_STATE_VDEV)) {
- video_unregister_device(&cam->vdev);
- w9966_setState(cam, W9966_STATE_VDEV, 0);
- }
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
-// Terminate from IEEE1284 mode and release pdev block
- if (w9966_getState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
- w9966_pdev_claim(cam);
- parport_negotiate(cam->pport, IEEE1284_MODE_COMPAT);
- w9966_pdev_release(cam);
- }
+ if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
+ w9966_i2c_wbyte(cam, reg) == -1 ||
+ w9966_i2c_wbyte(cam, data) == -1)
+ return -1;
-// Unregister from parport
- if (w9966_getState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
- parport_unregister_device(cam->pdev);
- w9966_setState(cam, W9966_STATE_PDEV, 0);
- }
-}
+ w9966_i2c_setsda(cam, 0);
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+
+ w9966_i2c_setsda(cam, 1);
+ return 0;
+}
-// Find a good length for capture window (used both for W and H)
-// A bit ugly but pretty functional. The capture length
-// have to match the downscale
+/* Find a good length for capture window (used both for W and H)
+ A bit ugly but pretty functional. The capture length
+ have to match the downscale */
static int w9966_findlen(int near, int size, int maxlen)
{
int bestlen = size;
int besterr = abs(near - bestlen);
int len;
- for(len = size+1;len < maxlen;len++)
- {
+ for (len = size + 1; len < maxlen; len++) {
int err;
- if ( ((64*size) %len) != 0)
+ if (((64 * size) % len) != 0)
continue;
err = abs(near - len);
- // Only continue as long as we keep getting better values
+ /* Only continue as long as we keep getting better values */
if (err > besterr)
break;
@@ -414,32 +416,32 @@ static int w9966_findlen(int near, int size, int maxlen)
return bestlen;
}
-// Modify capture window (if necessary)
-// and calculate downscaling
-// Return -1 on error
-static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsigned char* factor)
+/* Modify capture window (if necessary)
+ and calculate downscaling
+ Return -1 on error */
+static int w9966_calcscale(int size, int min, int max, int *beg, int *end, unsigned char *factor)
{
int maxlen = max - min;
int len = *end - *beg + 1;
int newlen = w9966_findlen(len, size, maxlen);
int err = newlen - len;
- // Check for bad format
+ /* Check for bad format */
if (newlen > maxlen || newlen < size)
return -1;
- // Set factor (6 bit fixed)
- *factor = (64*size) / newlen;
+ /* Set factor (6 bit fixed) */
+ *factor = (64 * size) / newlen;
if (*factor == 64)
- *factor = 0x00; // downscale is disabled
+ *factor = 0x00; /* downscale is disabled */
else
- *factor |= 0x80; // set downscale-enable bit
+ *factor |= 0x80; /* set downscale-enable bit */
- // Modify old beginning and end
+ /* Modify old beginning and end */
*beg -= err / 2;
*end += err - (err / 2);
- // Move window if outside borders
+ /* Move window if outside borders */
if (*beg < min) {
*end += min - *beg;
*beg += min - *beg;
@@ -452,10 +454,10 @@ static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsig
return 0;
}
-// Setup the cameras capture window etc.
-// Expects a claimed pdev
-// return -1 on error
-static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, int w, int h)
+/* Setup the cameras capture window etc.
+ Expects a claimed pdev
+ return -1 on error */
+static int w9966_setup(struct w9966 *cam, int x1, int y1, int x2, int y2, int w, int h)
{
unsigned int i;
unsigned int enh_s, enh_e;
@@ -469,443 +471,314 @@ static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, in
};
- if (w*h*2 > W9966_SRAMSIZE)
- {
+ if (w * h * 2 > W9966_SRAMSIZE) {
DPRINTF("capture window exceeds SRAM size!.\n");
- w = 200; h = 160; // Pick default values
+ w = 200; h = 160; /* Pick default values */
}
w &= ~0x1;
- if (w < 2) w = 2;
- if (h < 1) h = 1;
- if (w > W9966_WND_MAX_W) w = W9966_WND_MAX_W;
- if (h > W9966_WND_MAX_H) h = W9966_WND_MAX_H;
+ if (w < 2)
+ w = 2;
+ if (h < 1)
+ h = 1;
+ if (w > W9966_WND_MAX_W)
+ w = W9966_WND_MAX_W;
+ if (h > W9966_WND_MAX_H)
+ h = W9966_WND_MAX_H;
cam->width = w;
cam->height = h;
enh_s = 0;
- enh_e = w*h*2;
-
-// Modify capture window if necessary and calculate downscaling
- if (
- w9966_calcscale(w, W9966_WND_MIN_X, W9966_WND_MAX_X, &x1, &x2, &scale_x) != 0 ||
- w9966_calcscale(h, W9966_WND_MIN_Y, W9966_WND_MAX_Y, &y1, &y2, &scale_y) != 0
- ) return -1;
-
- DPRINTF(
- "%dx%d, x: %d<->%d, y: %d<->%d, sx: %d/64, sy: %d/64.\n",
- w, h, x1, x2, y1, y2, scale_x&~0x80, scale_y&~0x80
- );
-
-// Setup registers
- regs[0x00] = 0x00; // Set normal operation
- regs[0x01] = 0x18; // Capture mode
- regs[0x02] = scale_y; // V-scaling
- regs[0x03] = scale_x; // H-scaling
-
- // Capture window
- regs[0x04] = (x1 & 0x0ff); // X-start (8 low bits)
- regs[0x05] = (x1 & 0x300)>>8; // X-start (2 high bits)
- regs[0x06] = (y1 & 0x0ff); // Y-start (8 low bits)
- regs[0x07] = (y1 & 0x300)>>8; // Y-start (2 high bits)
- regs[0x08] = (x2 & 0x0ff); // X-end (8 low bits)
- regs[0x09] = (x2 & 0x300)>>8; // X-end (2 high bits)
- regs[0x0a] = (y2 & 0x0ff); // Y-end (8 low bits)
-
- regs[0x0c] = W9966_SRAMID; // SRAM-banks (1x 128kb)
-
- // Enhancement layer
- regs[0x0d] = (enh_s& 0x000ff); // Enh. start (0-7)
- regs[0x0e] = (enh_s& 0x0ff00)>>8; // Enh. start (8-15)
- regs[0x0f] = (enh_s& 0x70000)>>16; // Enh. start (16-17/18??)
- regs[0x10] = (enh_e& 0x000ff); // Enh. end (0-7)
- regs[0x11] = (enh_e& 0x0ff00)>>8; // Enh. end (8-15)
- regs[0x12] = (enh_e& 0x70000)>>16; // Enh. end (16-17/18??)
-
- // Misc
- regs[0x13] = 0x40; // VEE control (raw 4:2:2)
- regs[0x17] = 0x00; // ???
- regs[0x18] = cam->i2c_state = 0x00; // Serial bus
- regs[0x19] = 0xff; // I/O port direction control
- regs[0x1a] = 0xff; // I/O port data register
- regs[0x1b] = 0x10; // ???
-
- // SAA7111 chip settings
+ enh_e = w * h * 2;
+
+ /* Modify capture window if necessary and calculate downscaling */
+ if (w9966_calcscale(w, W9966_WND_MIN_X, W9966_WND_MAX_X, &x1, &x2, &scale_x) != 0 ||
+ w9966_calcscale(h, W9966_WND_MIN_Y, W9966_WND_MAX_Y, &y1, &y2, &scale_y) != 0)
+ return -1;
+
+ DPRINTF("%dx%d, x: %d<->%d, y: %d<->%d, sx: %d/64, sy: %d/64.\n",
+ w, h, x1, x2, y1, y2, scale_x & ~0x80, scale_y & ~0x80);
+
+ /* Setup registers */
+ regs[0x00] = 0x00; /* Set normal operation */
+ regs[0x01] = 0x18; /* Capture mode */
+ regs[0x02] = scale_y; /* V-scaling */
+ regs[0x03] = scale_x; /* H-scaling */
+
+ /* Capture window */
+ regs[0x04] = (x1 & 0x0ff); /* X-start (8 low bits) */
+ regs[0x05] = (x1 & 0x300)>>8; /* X-start (2 high bits) */
+ regs[0x06] = (y1 & 0x0ff); /* Y-start (8 low bits) */
+ regs[0x07] = (y1 & 0x300)>>8; /* Y-start (2 high bits) */
+ regs[0x08] = (x2 & 0x0ff); /* X-end (8 low bits) */
+ regs[0x09] = (x2 & 0x300)>>8; /* X-end (2 high bits) */
+ regs[0x0a] = (y2 & 0x0ff); /* Y-end (8 low bits) */
+
+ regs[0x0c] = W9966_SRAMID; /* SRAM-banks (1x 128kb) */
+
+ /* Enhancement layer */
+ regs[0x0d] = (enh_s & 0x000ff); /* Enh. start (0-7) */
+ regs[0x0e] = (enh_s & 0x0ff00) >> 8; /* Enh. start (8-15) */
+ regs[0x0f] = (enh_s & 0x70000) >> 16; /* Enh. start (16-17/18??) */
+ regs[0x10] = (enh_e & 0x000ff); /* Enh. end (0-7) */
+ regs[0x11] = (enh_e & 0x0ff00) >> 8; /* Enh. end (8-15) */
+ regs[0x12] = (enh_e & 0x70000) >> 16; /* Enh. end (16-17/18??) */
+
+ /* Misc */
+ regs[0x13] = 0x40; /* VEE control (raw 4:2:2) */
+ regs[0x17] = 0x00; /* ??? */
+ regs[0x18] = cam->i2c_state = 0x00; /* Serial bus */
+ regs[0x19] = 0xff; /* I/O port direction control */
+ regs[0x1a] = 0xff; /* I/O port data register */
+ regs[0x1b] = 0x10; /* ??? */
+
+ /* SAA7111 chip settings */
saa7111_regs[0x0a] = cam->brightness;
saa7111_regs[0x0b] = cam->contrast;
saa7111_regs[0x0c] = cam->color;
saa7111_regs[0x0d] = cam->hue;
-// Reset (ECP-fifo & serial-bus)
- if (w9966_wReg(cam, 0x00, 0x03) == -1)
+ /* Reset (ECP-fifo & serial-bus) */
+ if (w9966_write_reg(cam, 0x00, 0x03) == -1)
return -1;
-// Write regs to w9966cf chip
+ /* Write regs to w9966cf chip */
for (i = 0; i < 0x1c; i++)
- if (w9966_wReg(cam, i, regs[i]) == -1)
+ if (w9966_write_reg(cam, i, regs[i]) == -1)
return -1;
-// Write regs to saa7111 chip
+ /* Write regs to saa7111 chip */
for (i = 0; i < 0x20; i++)
- if (w9966_wReg_i2c(cam, i, saa7111_regs[i]) == -1)
+ if (w9966_write_reg_i2c(cam, i, saa7111_regs[i]) == -1)
return -1;
return 0;
}
/*
- * Ugly and primitive i2c protocol functions
+ * Video4linux interfacing
*/
-// Sets the data line on the i2c bus.
-// Expects a claimed pdev.
-static inline void w9966_i2c_setsda(struct w9966_dev* cam, int state)
+static int cam_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
{
- if (state)
- cam->i2c_state |= W9966_I2C_W_DATA;
- else
- cam->i2c_state &= ~W9966_I2C_W_DATA;
+ struct w9966 *cam = video_drvdata(file);
- w9966_wReg(cam, 0x18, cam->i2c_state);
- udelay(5);
+ strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+ vcap->version = KERNEL_VERSION(0, 33, 0);
+ vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ return 0;
}
-// Get peripheral clock line
-// Expects a claimed pdev.
-static inline int w9966_i2c_getscl(struct w9966_dev* cam)
+static int cam_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- const unsigned char state = w9966_rReg(cam, 0x18);
- return ((state & W9966_I2C_R_CLOCK) > 0);
+ if (vin->index > 0)
+ return -EINVAL;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = 0;
+ vin->status = 0;
+ return 0;
}
-// Sets the clock line on the i2c bus.
-// Expects a claimed pdev. -1 on error
-static inline int w9966_i2c_setscl(struct w9966_dev* cam, int state)
+static int cam_g_input(struct file *file, void *fh, unsigned int *inp)
{
- unsigned long timeout;
-
- if (state)
- cam->i2c_state |= W9966_I2C_W_CLOCK;
- else
- cam->i2c_state &= ~W9966_I2C_W_CLOCK;
-
- w9966_wReg(cam, 0x18, cam->i2c_state);
- udelay(5);
-
- // we go to high, we also expect the peripheral to ack.
- if (state) {
- timeout = jiffies + 100;
- while (!w9966_i2c_getscl(cam)) {
- if (time_after(jiffies, timeout))
- return -1;
- }
- }
+ *inp = 0;
return 0;
}
-// Get peripheral data line
-// Expects a claimed pdev.
-static inline int w9966_i2c_getsda(struct w9966_dev* cam)
+static int cam_s_input(struct file *file, void *fh, unsigned int inp)
{
- const unsigned char state = w9966_rReg(cam, 0x18);
- return ((state & W9966_I2C_R_DATA) > 0);
+ return (inp > 0) ? -EINVAL : 0;
}
-// Write a byte with ack to the i2c bus.
-// Expects a claimed pdev. -1 on error
-static int w9966_i2c_wbyte(struct w9966_dev* cam, int data)
+static int cam_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
{
- int i;
- for (i = 7; i >= 0; i--)
- {
- w9966_i2c_setsda(cam, (data >> i) & 0x01);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setscl(cam, 0);
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
}
-
- w9966_i2c_setsda(cam, 1);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setscl(cam, 0);
-
- return 0;
+ return -EINVAL;
}
-// Read a data byte with ack from the i2c-bus
-// Expects a claimed pdev. -1 on error
-#if 0
-static int w9966_i2c_rbyte(struct w9966_dev* cam)
+static int cam_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
{
- unsigned char data = 0x00;
- int i;
-
- w9966_i2c_setsda(cam, 1);
+ struct w9966 *cam = video_drvdata(file);
+ int ret = 0;
- for (i = 0; i < 8; i++)
- {
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- data = data << 1;
- if (w9966_i2c_getsda(cam))
- data |= 0x01;
-
- w9966_i2c_setscl(cam, 0);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = cam->brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = cam->contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = cam->color;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = cam->hue;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return data;
+ return ret;
}
-#endif
-// Read a register from the i2c device.
-// Expects claimed pdev. -1 on error
-#if 0
-static int w9966_rReg_i2c(struct w9966_dev* cam, int reg)
+static int cam_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
{
- int data;
-
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
- w9966_i2c_wbyte(cam, reg) == -1
- )
- return -1;
-
- w9966_i2c_setsda(cam, 1);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
+ struct w9966 *cam = video_drvdata(file);
+ int ret = 0;
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_R_ID) == -1 ||
- (data = w9966_i2c_rbyte(cam)) == -1
- )
- return -1;
+ mutex_lock(&cam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ cam->brightness = ctrl->value;
+ break;
+ case V4L2_CID_CONTRAST:
+ cam->contrast = ctrl->value;
+ break;
+ case V4L2_CID_SATURATION:
+ cam->color = ctrl->value;
+ break;
+ case V4L2_CID_HUE:
+ cam->hue = ctrl->value;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
- w9966_i2c_setsda(cam, 0);
+ if (ret == 0) {
+ w9966_pdev_claim(cam);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setsda(cam, 1);
+ if (w9966_write_reg_i2c(cam, 0x0a, cam->brightness) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0b, cam->contrast) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0c, cam->color) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0d, cam->hue) == -1) {
+ ret = -EIO;
+ }
- return data;
+ w9966_pdev_release(cam);
+ }
+ mutex_unlock(&cam->lock);
+ return ret;
}
-#endif
-// Write a register to the i2c device.
-// Expects claimed pdev. -1 on error
-static int w9966_wReg_i2c(struct w9966_dev* cam, int reg, int data)
+static int cam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
- w9966_i2c_wbyte(cam, reg) == -1 ||
- w9966_i2c_wbyte(cam, data) == -1
- )
- return -1;
-
- w9966_i2c_setsda(cam, 0);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
-
- w9966_i2c_setsda(cam, 1);
-
+ struct w9966 *cam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = cam->width;
+ pix->height = cam->height;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = 2 * cam->width;
+ pix->sizeimage = 2 * cam->width * cam->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
-/*
- * Video4linux interfacing
- */
-
-static long w9966_v4l_do_ioctl(struct file *file, unsigned int cmd, void *arg)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- switch(cmd)
- {
- case VIDIOCGCAP:
- {
- static struct video_capability vcap = {
- .name = W9966_DRIVERNAME,
- .type = VID_TYPE_CAPTURE | VID_TYPE_SCALES,
- .channels = 1,
- .maxwidth = W9966_WND_MAX_W,
- .maxheight = W9966_WND_MAX_H,
- .minwidth = 2,
- .minheight = 1,
- };
- struct video_capability *cap = arg;
- *cap = vcap;
- return 0;
- }
- case VIDIOCGCHAN:
- {
- struct video_channel *vch = arg;
- if(vch->channel != 0) // We only support one channel (#0)
- return -EINVAL;
- memset(vch,0,sizeof(*vch));
- strcpy(vch->name, "CCD-input");
- vch->type = VIDEO_TYPE_CAMERA;
- return 0;
- }
- case VIDIOCSCHAN:
- {
- struct video_channel *vch = arg;
- if(vch->channel != 0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGTUNER:
- {
- struct video_tuner *vtune = arg;
- if(vtune->tuner != 0)
- return -EINVAL;
- strcpy(vtune->name, "no tuner");
- vtune->rangelow = 0;
- vtune->rangehigh = 0;
- vtune->flags = VIDEO_TUNER_NORM;
- vtune->mode = VIDEO_MODE_AUTO;
- vtune->signal = 0xffff;
- return 0;
- }
- case VIDIOCSTUNER:
- {
- struct video_tuner *vtune = arg;
- if (vtune->tuner != 0)
- return -EINVAL;
- if (vtune->mode != VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGPICT:
- {
- struct video_picture vpic = {
- cam->brightness << 8, // brightness
- (cam->hue + 128) << 8, // hue
- cam->color << 9, // color
- cam->contrast << 9, // contrast
- 0x8000, // whiteness
- 16, VIDEO_PALETTE_YUV422// bpp, palette format
- };
- struct video_picture *pic = arg;
- *pic = vpic;
- return 0;
- }
- case VIDIOCSPICT:
- {
- struct video_picture *vpic = arg;
- if (vpic->depth != 16 || (vpic->palette != VIDEO_PALETTE_YUV422 && vpic->palette != VIDEO_PALETTE_YUYV))
- return -EINVAL;
-
- cam->brightness = vpic->brightness >> 8;
- cam->hue = (vpic->hue >> 8) - 128;
- cam->color = vpic->colour >> 9;
- cam->contrast = vpic->contrast >> 9;
+static int cam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->width < 2)
+ pix->width = 2;
+ if (pix->height < 1)
+ pix->height = 1;
+ if (pix->width > W9966_WND_MAX_W)
+ pix->width = W9966_WND_MAX_W;
+ if (pix->height > W9966_WND_MAX_H)
+ pix->height = W9966_WND_MAX_H;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = 2 * pix->width;
+ pix->sizeimage = 2 * pix->width * pix->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
- w9966_pdev_claim(cam);
+static int cam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct w9966 *cam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = cam_try_fmt_vid_cap(file, fh, fmt);
- if (
- w9966_wReg_i2c(cam, 0x0a, cam->brightness) == -1 ||
- w9966_wReg_i2c(cam, 0x0b, cam->contrast) == -1 ||
- w9966_wReg_i2c(cam, 0x0c, cam->color) == -1 ||
- w9966_wReg_i2c(cam, 0x0d, cam->hue) == -1
- ) {
- w9966_pdev_release(cam);
- return -EIO;
- }
+ if (ret)
+ return ret;
- w9966_pdev_release(cam);
- return 0;
- }
- case VIDIOCSWIN:
- {
- int ret;
- struct video_window *vwin = arg;
-
- if (vwin->flags != 0)
- return -EINVAL;
- if (vwin->clipcount != 0)
- return -EINVAL;
- if (vwin->width < 2 || vwin->width > W9966_WND_MAX_W)
- return -EINVAL;
- if (vwin->height < 1 || vwin->height > W9966_WND_MAX_H)
- return -EINVAL;
-
- // Update camera regs
- w9966_pdev_claim(cam);
- ret = w9966_setup(cam, 0, 0, 1023, 1023, vwin->width, vwin->height);
- w9966_pdev_release(cam);
+ mutex_lock(&cam->lock);
+ /* Update camera regs */
+ w9966_pdev_claim(cam);
+ ret = w9966_setup(cam, 0, 0, 1023, 1023, pix->width, pix->height);
+ w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
- if (ret != 0) {
- DPRINTF("VIDIOCSWIN: w9966_setup() failed.\n");
- return -EIO;
- }
+static int cam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
+{
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "YUV 4:2:2", V4L2_PIX_FMT_YUYV,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
- return 0;
- }
- case VIDIOCGWIN:
- {
- struct video_window *vwin = arg;
- memset(vwin, 0, sizeof(*vwin));
- vwin->width = cam->width;
- vwin->height = cam->height;
- return 0;
- }
- // Unimplemented
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCKEY:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
+ if (fmt->index > 0)
return -EINVAL;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-static long w9966_v4l_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return video_usercopy(file, cmd, arg, w9966_v4l_do_ioctl);
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
}
-// Capture data
+/* Capture data */
static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
- struct w9966_dev *cam = video_drvdata(file);
- unsigned char addr = 0xa0; // ECP, read, CCD-transfer, 00000
+ struct w9966 *cam = video_drvdata(file);
+ unsigned char addr = 0xa0; /* ECP, read, CCD-transfer, 00000 */
unsigned char __user *dest = (unsigned char __user *)buf;
unsigned long dleft = count;
unsigned char *tbuf;
- // Why would anyone want more than this??
+ /* Why would anyone want more than this?? */
if (count > cam->width * cam->height * 2)
return -EINVAL;
+ mutex_lock(&cam->lock);
w9966_pdev_claim(cam);
- w9966_wReg(cam, 0x00, 0x02); // Reset ECP-FIFO buffer
- w9966_wReg(cam, 0x00, 0x00); // Return to normal operation
- w9966_wReg(cam, 0x01, 0x98); // Enable capture
-
- // write special capture-addr and negotiate into data transfer
- if (
- (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_ADDR) != 0 )||
- (parport_write(cam->pport, &addr, 1) != 1 )||
- (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_DATA) != 0 )
- ) {
+ w9966_write_reg(cam, 0x00, 0x02); /* Reset ECP-FIFO buffer */
+ w9966_write_reg(cam, 0x00, 0x00); /* Return to normal operation */
+ w9966_write_reg(cam, 0x01, 0x98); /* Enable capture */
+
+ /* write special capture-addr and negotiate into data transfer */
+ if ((parport_negotiate(cam->pport, cam->ppmode|IEEE1284_ADDR) != 0) ||
+ (parport_write(cam->pport, &addr, 1) != 1) ||
+ (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_DATA) != 0)) {
w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
return -EFAULT;
}
@@ -915,8 +788,7 @@ static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
goto out;
}
- while(dleft > 0)
- {
+ while (dleft > 0) {
unsigned long tsize = (dleft > W9966_RBUFFER) ? W9966_RBUFFER : dleft;
if (parport_read(cam->pport, tbuf, tsize) < tsize) {
@@ -931,43 +803,167 @@ static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
dleft -= tsize;
}
- w9966_wReg(cam, 0x01, 0x18); // Disable capture
+ w9966_write_reg(cam, 0x01, 0x18); /* Disable capture */
out:
kfree(tbuf);
w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
return count;
}
+static const struct v4l2_file_operations w9966_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = video_ioctl2,
+ .read = w9966_v4l_read,
+};
+
+static const struct v4l2_ioctl_ops w9966_ioctl_ops = {
+ .vidioc_querycap = cam_querycap,
+ .vidioc_g_input = cam_g_input,
+ .vidioc_s_input = cam_s_input,
+ .vidioc_enum_input = cam_enum_input,
+ .vidioc_queryctrl = cam_queryctrl,
+ .vidioc_g_ctrl = cam_g_ctrl,
+ .vidioc_s_ctrl = cam_s_ctrl,
+ .vidioc_enum_fmt_vid_cap = cam_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cam_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cam_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cam_try_fmt_vid_cap,
+};
+
-// Called once for every parport on init
+/* Initialize camera device. Setup all internal flags, set a
+ default video mode, setup ccd-chip, register v4l device etc..
+ Also used for 'probing' of hardware.
+ -1 on error */
+static int w9966_init(struct w9966 *cam, struct parport *port)
+{
+ struct v4l2_device *v4l2_dev = &cam->v4l2_dev;
+
+ if (cam->dev_state != 0)
+ return -1;
+
+ strlcpy(v4l2_dev->name, "w9966", sizeof(v4l2_dev->name));
+
+ if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return -1;
+ }
+ cam->pport = port;
+ cam->brightness = 128;
+ cam->contrast = 64;
+ cam->color = 64;
+ cam->hue = 0;
+
+ /* Select requested transfer mode */
+ switch (parmode) {
+ default: /* Auto-detect (priority: hw-ecp, hw-epp, sw-ecp) */
+ case 0:
+ if (port->modes & PARPORT_MODE_ECP)
+ cam->ppmode = IEEE1284_MODE_ECP;
+ else if (port->modes & PARPORT_MODE_EPP)
+ cam->ppmode = IEEE1284_MODE_EPP;
+ else
+ cam->ppmode = IEEE1284_MODE_ECP;
+ break;
+ case 1: /* hw- or sw-ecp */
+ cam->ppmode = IEEE1284_MODE_ECP;
+ break;
+ case 2: /* hw- or sw-epp */
+ cam->ppmode = IEEE1284_MODE_EPP;
+ break;
+ }
+
+ /* Tell the parport driver that we exists */
+ cam->pdev = parport_register_device(port, "w9966", NULL, NULL, NULL, 0, NULL);
+ if (cam->pdev == NULL) {
+ DPRINTF("parport_register_device() failed\n");
+ return -1;
+ }
+ w9966_set_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV);
+
+ w9966_pdev_claim(cam);
+
+ /* Setup a default capture mode */
+ if (w9966_setup(cam, 0, 0, 1023, 1023, 200, 160) != 0) {
+ DPRINTF("w9966_setup() failed.\n");
+ return -1;
+ }
+
+ w9966_pdev_release(cam);
+
+ /* Fill in the video_device struct and register us to v4l */
+ strlcpy(cam->vdev.name, W9966_DRIVERNAME, sizeof(cam->vdev.name));
+ cam->vdev.v4l2_dev = v4l2_dev;
+ cam->vdev.fops = &w9966_fops;
+ cam->vdev.ioctl_ops = &w9966_ioctl_ops;
+ cam->vdev.release = video_device_release_empty;
+ video_set_drvdata(&cam->vdev, cam);
+
+ mutex_init(&cam->lock);
+
+ if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
+ return -1;
+
+ w9966_set_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV);
+
+ /* All ok */
+ v4l2_info(v4l2_dev, "Found and initialized a webcam on %s.\n",
+ cam->pport->name);
+ return 0;
+}
+
+
+/* Terminate everything gracefully */
+static void w9966_term(struct w9966 *cam)
+{
+ /* Unregister from v4l */
+ if (w9966_get_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV)) {
+ video_unregister_device(&cam->vdev);
+ w9966_set_state(cam, W9966_STATE_VDEV, 0);
+ }
+
+ /* Terminate from IEEE1284 mode and release pdev block */
+ if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
+ w9966_pdev_claim(cam);
+ parport_negotiate(cam->pport, IEEE1284_MODE_COMPAT);
+ w9966_pdev_release(cam);
+ }
+
+ /* Unregister from parport */
+ if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
+ parport_unregister_device(cam->pdev);
+ w9966_set_state(cam, W9966_STATE_PDEV, 0);
+ }
+}
+
+
+/* Called once for every parport on init */
static void w9966_attach(struct parport *port)
{
int i;
- for (i = 0; i < W9966_MAXCAMS; i++)
- {
- if (w9966_cams[i].dev_state != 0) // Cam is already assigned
+ for (i = 0; i < W9966_MAXCAMS; i++) {
+ if (w9966_cams[i].dev_state != 0) /* Cam is already assigned */
continue;
- if (
- strcmp(pardev[i], "aggressive") == 0 ||
- strcmp(pardev[i], port->name) == 0
- ) {
+ if (strcmp(pardev[i], "aggressive") == 0 || strcmp(pardev[i], port->name) == 0) {
if (w9966_init(&w9966_cams[i], port) != 0)
- w9966_term(&w9966_cams[i]);
- break; // return
+ w9966_term(&w9966_cams[i]);
+ break; /* return */
}
}
}
-// Called once for every parport on termination
+/* Called once for every parport on termination */
static void w9966_detach(struct parport *port)
{
int i;
+
for (i = 0; i < W9966_MAXCAMS; i++)
- if (w9966_cams[i].dev_state != 0 && w9966_cams[i].pport == port)
- w9966_term(&w9966_cams[i]);
+ if (w9966_cams[i].dev_state != 0 && w9966_cams[i].pport == port)
+ w9966_term(&w9966_cams[i]);
}
@@ -977,17 +973,18 @@ static struct parport_driver w9966_ppd = {
.detach = w9966_detach,
};
-// Module entry point
+/* Module entry point */
static int __init w9966_mod_init(void)
{
int i;
+
for (i = 0; i < W9966_MAXCAMS; i++)
w9966_cams[i].dev_state = 0;
return parport_register_driver(&w9966_ppd);
}
-// Module cleanup
+/* Module cleanup */
static void __exit w9966_mod_term(void)
{
parport_unregister_driver(&w9966_ppd);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index e44e4b5f3e50..bb51cfb0c647 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -1153,7 +1153,7 @@ zc0301_vidioc_s_ctrl(struct zc0301_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -1163,7 +1163,9 @@ zc0301_vidioc_s_ctrl(struct zc0301_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/zc0301/zc0301_sensor.h b/drivers/media/video/zc0301/zc0301_sensor.h
index 3a408de91b9c..0be783c203f7 100644
--- a/drivers/media/video/zc0301/zc0301_sensor.h
+++ b/drivers/media/video/zc0301/zc0301_sensor.h
@@ -58,7 +58,7 @@ zc0301_attach_sensor(struct zc0301_device* cam, struct zc0301_sensor* sensor);
.idProduct = (prod), \
.bInterfaceClass = (intclass)
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_ZC3XX && !defined CONFIG_USB_GSPCA_ZC3XX_MODULE
#define ZC0301_ID_TABLE \
static const struct usb_device_id zc0301_id_table[] = { \
{ ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index cb1de7ea197a..8997add1248e 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -33,6 +33,10 @@
#include <media/v4l2-device.h>
+#define ZORAN_VIDMODE_PAL 0
+#define ZORAN_VIDMODE_NTSC 1
+#define ZORAN_VIDMODE_SECAM 2
+
struct zoran_requestbuffers {
unsigned long count; /* Number of buffers for MJPEG grabbing */
unsigned long size; /* Size PER BUFFER in bytes */
@@ -48,7 +52,7 @@ struct zoran_sync {
struct zoran_status {
int input; /* Input channel, has to be set prior to BUZIOC_G_STATUS */
int signal; /* Returned: 1 if valid video signal detected */
- int norm; /* Returned: VIDEO_MODE_PAL or VIDEO_MODE_NTSC */
+ int norm; /* Returned: ZORAN_VIDMODE_PAL or ZORAN_VIDMODE_NTSC */
int color; /* Returned: 1 if color signal detected */
};
@@ -62,7 +66,7 @@ struct zoran_params {
/* Main control parameters */
int input; /* Input channel: 0 = Composite, 1 = S-VHS */
- int norm; /* Norm: VIDEO_MODE_PAL or VIDEO_MODE_NTSC */
+ int norm; /* Norm: ZORAN_VIDMODE_PAL or ZORAN_VIDMODE_NTSC */
int decimation; /* decimation of captured video,
* enlargement of video played back.
* Valid values are 1, 2, 4 or 0.
@@ -131,13 +135,13 @@ struct zoran_params {
/*
Private IOCTL to set up for displaying MJPEG
*/
-#define BUZIOC_G_PARAMS _IOR ('v', BASE_VIDIOCPRIVATE+0, struct zoran_params)
-#define BUZIOC_S_PARAMS _IOWR('v', BASE_VIDIOCPRIVATE+1, struct zoran_params)
-#define BUZIOC_REQBUFS _IOWR('v', BASE_VIDIOCPRIVATE+2, struct zoran_requestbuffers)
-#define BUZIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOCPRIVATE+3, int)
-#define BUZIOC_QBUF_PLAY _IOW ('v', BASE_VIDIOCPRIVATE+4, int)
-#define BUZIOC_SYNC _IOR ('v', BASE_VIDIOCPRIVATE+5, struct zoran_sync)
-#define BUZIOC_G_STATUS _IOWR('v', BASE_VIDIOCPRIVATE+6, struct zoran_status)
+#define BUZIOC_G_PARAMS _IOR ('v', BASE_VIDIOC_PRIVATE+0, struct zoran_params)
+#define BUZIOC_S_PARAMS _IOWR('v', BASE_VIDIOC_PRIVATE+1, struct zoran_params)
+#define BUZIOC_REQBUFS _IOWR('v', BASE_VIDIOC_PRIVATE+2, struct zoran_requestbuffers)
+#define BUZIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOC_PRIVATE+3, int)
+#define BUZIOC_QBUF_PLAY _IOW ('v', BASE_VIDIOC_PRIVATE+4, int)
+#define BUZIOC_SYNC _IOR ('v', BASE_VIDIOC_PRIVATE+5, struct zoran_sync)
+#define BUZIOC_G_STATUS _IOWR('v', BASE_VIDIOC_PRIVATE+6, struct zoran_status)
#ifdef __KERNEL__
@@ -401,7 +405,7 @@ struct zoran {
spinlock_t spinlock; /* Spinlock */
/* Video for Linux parameters */
- int input; /* card's norm and input - norm=VIDEO_MODE_* */
+ int input; /* card's norm and input */
v4l2_std_id norm;
/* Current buffer params */
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index ec41303544e5..6f89d0a096ea 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -60,7 +60,7 @@
#include <linux/spinlock.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include "videocodec.h"
@@ -1549,11 +1549,11 @@ static long zoran_default(struct file *file, void *__fh, int cmd, void *arg)
mutex_lock(&zr->resource_lock);
if (zr->norm & V4L2_STD_NTSC)
- bparams->norm = VIDEO_MODE_NTSC;
- else if (zr->norm & V4L2_STD_PAL)
- bparams->norm = VIDEO_MODE_PAL;
+ bparams->norm = ZORAN_VIDMODE_NTSC;
+ else if (zr->norm & V4L2_STD_SECAM)
+ bparams->norm = ZORAN_VIDMODE_SECAM;
else
- bparams->norm = VIDEO_MODE_SECAM;
+ bparams->norm = ZORAN_VIDMODE_PAL;
bparams->input = zr->input;
@@ -1789,11 +1789,11 @@ gstat_unlock_and_return:
bstat->signal =
(status & V4L2_IN_ST_NO_SIGNAL) ? 0 : 1;
if (norm & V4L2_STD_NTSC)
- bstat->norm = VIDEO_MODE_NTSC;
+ bstat->norm = ZORAN_VIDMODE_NTSC;
else if (norm & V4L2_STD_SECAM)
- bstat->norm = VIDEO_MODE_SECAM;
+ bstat->norm = ZORAN_VIDMODE_SECAM;
else
- bstat->norm = VIDEO_MODE_PAL;
+ bstat->norm = ZORAN_VIDMODE_PAL;
bstat->color =
(status & V4L2_IN_ST_NO_COLOR) ? 0 : 1;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 3d4bac252902..a82b5bd18d26 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -376,8 +376,8 @@ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
if (*count == 0)
*count = ZR364XX_DEF_BUFS;
- while (*size * (*count) > ZR364XX_DEF_BUFS * 1024 * 1024)
- (*count)--;
+ if (*size * *count > ZR364XX_DEF_BUFS * 1024 * 1024)
+ *count = (ZR364XX_DEF_BUFS * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5382b5a44aff..b112f34d264b 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -109,8 +109,7 @@ MODULE_PARM_DESC(mpt_debug_level, " debug level - refer to mptdebug.h \
int mpt_fwfault_debug;
EXPORT_SYMBOL(mpt_fwfault_debug);
-module_param_call(mpt_fwfault_debug, param_set_int, param_get_int,
- &mpt_fwfault_debug, 0600);
+module_param(mpt_fwfault_debug, int, 0600);
MODULE_PARM_DESC(mpt_fwfault_debug, "Enable detection of Firmware fault"
" and halt Firmware on fault - (default=0)");
@@ -5064,7 +5063,7 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
if (!timeleft) {
printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
@@ -6456,10 +6455,15 @@ out:
issue_hard_reset = 0;
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ if (retry_count == 0) {
+ if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
+ retry_count++;
+ } else
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+
mpt_free_msg_frame(ioc, mf);
/* attempt one retry for a timed out command */
- if (!retry_count) {
+ if (retry_count < 2) {
printk(MYIOC_s_INFO_FMT
"Attempting Retry Config request"
" type 0x%x, page 0x%x,"
@@ -6904,6 +6908,172 @@ mpt_halt_firmware(MPT_ADAPTER *ioc)
}
EXPORT_SYMBOL(mpt_halt_firmware);
+/**
+ * mpt_SoftResetHandler - Issues a less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ *
+ * Message Unit Reset - instructs the IOC to reset the Reply Post and
+ * Free FIFO's. All the Message Frames on Reply Free FIFO are discarded.
+ * All posted buffers are freed, and event notification is turned off.
+ * IOC doesnt reply to any outstanding request. This will transfer IOC
+ * to READY state.
+ **/
+int
+mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
+{
+ int rc;
+ int ii;
+ u8 cb_idx;
+ unsigned long flags;
+ u32 ioc_state;
+ unsigned long time_count;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n",
+ ioc->name));
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ if (ioc_state == MPI_IOC_STATE_FAULT ||
+ ioc_state == MPI_IOC_STATE_RESET) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, either in FAULT or RESET state!\n", ioc->name));
+ return -1;
+ }
+
+ if (ioc->bus_type == FC) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, because the bus type is FC!\n", ioc->name));
+ return -1;
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ ioc->ioc_reset_in_progress = 1;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ rc = -1;
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->taskmgmt_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ /* Disable reply interrupts (also blocks FreeQ) */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+ time_count = jiffies;
+
+ rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
+ }
+
+ if (rc)
+ goto out;
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+ if (ioc_state != MPI_IOC_STATE_READY)
+ goto out;
+
+ for (ii = 0; ii < 5; ii++) {
+ /* Get IOC facts! Allow 5 retries */
+ rc = GetIocFacts(ioc, sleepFlag,
+ MPT_HOSTEVENT_IOC_RECOVER);
+ if (rc == 0)
+ break;
+ if (sleepFlag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+ }
+ if (ii == 5)
+ goto out;
+
+ rc = PrimeIocFifos(ioc);
+ if (rc != 0)
+ goto out;
+
+ rc = SendIocInit(ioc, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ rc = SendEventNotification(ioc, 1, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ if (ioc->hard_resets < -1)
+ ioc->hard_resets++;
+
+ /*
+ * At this point, we know soft reset succeeded.
+ */
+
+ ioc->active = 1;
+ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
+
+ out:
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->ioc_reset_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ ioc->taskmgmt_in_progress = 0;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ if (ioc->active) { /* otherwise, hard reset coming */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc,
+ MPT_IOC_POST_RESET);
+ }
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "SoftResetHandler: completed (%d seconds): %s\n",
+ ioc->name, jiffies_to_msecs(jiffies - time_count)/1000,
+ ((rc == 0) ? "SUCCESS" : "FAILED")));
+
+ return rc;
+}
+
+/**
+ * mpt_Soft_Hard_ResetHandler - Try less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ * Try for softreset first, only if it fails go for expensive
+ * HardReset.
+ **/
+int
+mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) {
+ int ret = -1;
+
+ ret = mpt_SoftResetHandler(ioc, sleepFlag);
+ if (ret == 0)
+ return ret;
+ ret = mpt_HardResetHandler(ioc, sleepFlag);
+ return ret;
+}
+EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler);
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Reset Handling
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 9718c8f2e959..b613eb3d4706 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.14"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14"
+#define MPT_LINUX_VERSION_COMMON "3.04.15"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.15"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -940,6 +940,7 @@ extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
+extern int mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index caa8f568a41c..f06b29193b4e 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -128,7 +128,6 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
struct buflist *buflist, MPT_ADAPTER *ioc);
-static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
/*
* Reset Handler cleanup function
@@ -275,45 +274,6 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
return 1;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/* mptctl_timeout_expired
- *
- * Expecting an interrupt, however timed out.
- *
- */
-static void
-mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
-{
- unsigned long flags;
-
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
- ioc->name, __func__));
-
- if (mpt_fwfault_debug)
- mpt_halt_firmware(ioc);
-
- spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
- if (ioc->ioc_reset_in_progress) {
- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
- mpt_free_msg_frame(ioc, mf);
- return;
- }
- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
-
-
- if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
- return;
-
- /* Issue a reset for this device.
- * The IOC is not responding.
- */
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
- ioc->name));
- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- mpt_free_msg_frame(ioc, mf);
-}
static int
mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
@@ -343,12 +303,8 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 0;
}
-/* mptctl_bus_reset
- *
- * Bus reset code.
- *
- */
-static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
+static int
+mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
@@ -359,13 +315,6 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
unsigned long time_count;
u16 iocstatus;
- /* bus reset is only good for SCSI IO, RAID PASSTHRU */
- if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
- function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt, not SCSI_IO!!\n", ioc->name));
- return -EPERM;
- }
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
@@ -375,15 +324,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = 0;
- /* Send request
- */
mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
if (mf == NULL) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt, no msg frames!!\n", ioc->name));
+ dtmprintk(ioc,
+ printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n",
+ ioc->name));
mpt_clear_taskmgmt_in_progress_flag(ioc);
retval = -ENOMEM;
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
@@ -392,10 +340,13 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
pScsiTm = (SCSITaskMgmt_t *) mf;
memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
- pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
- pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
- pScsiTm->TargetID = 0;
- pScsiTm->Bus = 0;
+ pScsiTm->TaskType = tm_type;
+ if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) &&
+ (ioc->bus_type == FC))
+ pScsiTm->MsgFlags =
+ MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
+ pScsiTm->TargetID = target_id;
+ pScsiTm->Bus = bus_id;
pScsiTm->ChainOffset = 0;
pScsiTm->Reserved = 0;
pScsiTm->Reserved1 = 0;
@@ -413,17 +364,16 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
timeout = 30;
break;
case SPI:
- default:
- timeout = 2;
+ default:
+ timeout = 10;
break;
}
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "TaskMgmt type=%d timeout=%ld\n",
- ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
+ dtmprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n",
+ ioc->name, tm_type, timeout));
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
- CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
time_count = jiffies;
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
@@ -432,17 +382,20 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
if (retval != 0) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ dfailprintk(ioc,
+ printk(MYIOC_s_ERR_FMT
"TaskMgmt send_handshake FAILED!"
" (ioc %p, mf %p, rc=%d) \n", ioc->name,
ioc, mf, retval));
+ mpt_free_msg_frame(ioc, mf);
mpt_clear_taskmgmt_in_progress_flag(ioc);
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
}
/* Now wait for the command to complete */
ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
+
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
@@ -452,14 +405,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = 0;
else
retval = -1; /* return failure */
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
retval = -1; /* return failure */
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
@@ -467,7 +420,7 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
"TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
"iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
"term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
- pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ pScsiTmReply->TargetID, tm_type,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),
pScsiTmReply->ResponseCode,
@@ -485,13 +438,71 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = -1; /* return failure */
}
-
- mptctl_bus_reset_done:
+ tm_done:
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
return retval;
}
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* mptctl_timeout_expired
+ *
+ * Expecting an interrupt, however timed out.
+ *
+ */
+static void
+mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+ unsigned long flags;
+ int ret_val = -1;
+ SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf;
+ u8 function = mf->u.hdr.Function;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
+ ioc->name, __func__));
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+ mpt_free_msg_frame(ioc, mf);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+
+ if (ioc->bus_type == SAS) {
+ if (function == MPI_FUNCTION_SCSI_IO_REQUEST)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ scsi_req->Bus, scsi_req->TargetID);
+ else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ } else {
+ if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH))
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n",
+ ioc->name));
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptctl_ioc_reset
@@ -1318,6 +1329,8 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
if (ioc->sh) {
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
@@ -1439,6 +1452,8 @@ mptctl_gettargetinfo (unsigned long arg)
if (!maxWordsLeft)
continue;
vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
@@ -1967,6 +1982,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
+ if (vtarget == NULL)
+ continue;
+
if ((pScsiReq->TargetID == vtarget->id) &&
(pScsiReq->Bus == vtarget->channel) &&
(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
@@ -2991,6 +3009,14 @@ static int __init mptctl_init(void)
}
mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
+ if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
+ printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
+ mpt_deregister(mptctl_id);
+ misc_deregister(&mptctl_miscdev);
+ err = -EBUSY;
+ goto out_fail;
+ }
+
mpt_reset_register(mptctl_id, mptctl_ioc_reset);
mpt_event_register(mptctl_id, mptctl_event_process);
@@ -3010,12 +3036,15 @@ static void mptctl_exit(void)
printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
+ /* De-register event handler from base module */
+ mpt_event_deregister(mptctl_id);
+
/* De-register reset handler from base module */
mpt_reset_deregister(mptctl_id);
/* De-register callback handler from base module */
+ mpt_deregister(mptctl_taskmgmt_id);
mpt_deregister(mptctl_id);
- mpt_reset_deregister(mptctl_taskmgmt_id);
mpt_device_driver_deregister(MPTCTL_DRIVER);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 33f7256055b1..b5f03ad81568 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -482,6 +482,7 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
if (vtarget) {
vtarget->id = pg0->CurrentTargetID;
vtarget->channel = pg0->CurrentBus;
+ vtarget->deleted = 0;
}
}
*((struct mptfc_rport_info **)rport->dd_data) = ri;
@@ -1092,6 +1093,8 @@ mptfc_setup_reset(struct work_struct *work)
container_of(work, MPT_ADAPTER, fc_setup_reset_work);
u64 pn;
struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
/* reset about to happen, delete (block) all rports */
list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1099,6 +1102,12 @@ mptfc_setup_reset(struct work_struct *work)
ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED;
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
@@ -1119,6 +1128,8 @@ mptfc_rescan_devices(struct work_struct *work)
int ii;
u64 pn;
struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
/* start by tagging all ports as missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1146,6 +1157,12 @@ mptfc_rescan_devices(struct work_struct *work)
MPT_RPORT_INFO_FLAGS_MISSING);
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
@@ -1358,6 +1375,9 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
unsigned long flags;
int rc=1;
+ if (ioc->bus_type != FC)
+ return 0;
+
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
@@ -1396,7 +1416,7 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
unsigned long flags;
rc = mptscsih_ioc_reset(ioc,reset_phase);
- if (rc == 0)
+ if ((ioc->bus_type != FC) || (!rc))
return rc;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 76687126b573..ac000e83db0e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1894,7 +1894,7 @@ static struct scsi_host_template mptsas_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptsas",
.proc_info = mptscsih_proc_info,
- .name = "MPT SPI Host",
+ .name = "MPT SAS Host",
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
.target_alloc = mptsas_target_alloc,
@@ -2038,11 +2038,13 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
10 * HZ);
- if (!timeleft) {
- /* On timeout reset the board */
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ error = -ETIME;
mpt_free_msg_frame(ioc, mf);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- error = -ETIMEDOUT;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out_unlock;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_unlock;
}
@@ -2223,11 +2225,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
- if (!timeleft) {
- printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
- /* On timeout reset the board */
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- ret = -ETIMEDOUT;
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ mpt_free_msg_frame(ioc, mf);
+ mf = NULL;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto unmap;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto unmap;
}
mf = NULL;
@@ -2518,6 +2523,12 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
+
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ error = -ENODEV;
+ goto out_free_consistent;
+ }
+
if (error)
goto out_free_consistent;
@@ -2594,14 +2605,14 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
- if (error)
- goto out_free_consistent;
-
- if (!buffer->NumPhys) {
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
+ if (error)
+ goto out_free_consistent;
+
/* save config data */
port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
@@ -2677,7 +2688,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
- goto out;
+ goto out_free_consistent;
}
if (error)
@@ -2833,7 +2844,7 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_free;
if (!timeleft)
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_free;
}
@@ -4098,6 +4109,7 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
cfg.pageAddr = (channel << 8) + id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
@@ -4717,7 +4729,7 @@ mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
if (issue_reset) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
}
mptsas_free_fw_event(ioc, fw_event);
}
@@ -4779,6 +4791,9 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
struct fw_event_work *fw_event;
unsigned long delay;
+ if (ioc->bus_type != SAS)
+ return 0;
+
/* events turned off due to host reset or driver unloading */
if (ioc->fw_events_off)
return 0;
@@ -5073,6 +5088,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
struct mptsas_portinfo *p, *n;
int i;
+ if (!ioc->sh) {
+ printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
+ mpt_detach(pdev);
+ return;
+ }
+
mptsas_shutdown(pdev);
mptsas_del_device_components(ioc);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 953c2bfcf6aa..7b249edbda78 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -110,7 +110,7 @@ struct fw_event_work {
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
- u8 event_data[1];
+ u8 __attribute__((aligned(4))) event_data[1];
};
struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6796597dcee0..7bd4c0fc23cc 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1149,11 +1149,6 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
int sz1;
- if(!host) {
- mpt_detach(pdev);
- return;
- }
-
scsi_remove_host(host);
if((hd = shost_priv(host)) == NULL)
@@ -1711,7 +1706,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
if (issue_hard_reset) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
@@ -1728,6 +1723,7 @@ mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
case FC:
return 40;
case SAS:
+ return 30;
case SPI:
default:
return 10;
@@ -1777,7 +1773,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
ioc->name, SCpnt));
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
- retval = 0;
+ retval = SUCCESS;
goto out;
}
@@ -1792,6 +1788,17 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
goto out;
}
+ /* Task aborts are not supported for volumes.
+ */
+ if (vdevice->vtarget->raidVolume) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: raid volume (sc=%p)\n",
+ ioc->name, SCpnt));
+ SCpnt->result = DID_RESET << 16;
+ retval = FAILED;
+ goto out;
+ }
+
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) {
@@ -1991,7 +1998,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
if (retval < 0)
status = FAILED;
else
@@ -2344,6 +2351,8 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice = sdev->hostdata;
+ if (!vdevice)
+ return;
mptscsih_search_running_cmds(hd, vdevice);
vtarget->num_luns--;
@@ -3040,7 +3049,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
if (!timeleft) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e44365193fdf..1abaa5d01ae3 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -210,6 +210,10 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
target->maxOffset = offset;
target->maxWidth = width;
+ spi_min_period(scsi_target(sdev)) = factor;
+ spi_max_offset(scsi_target(sdev)) = offset;
+ spi_max_width(scsi_target(sdev)) = width;
+
target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
/* Disable unused features.
@@ -558,6 +562,7 @@ static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
cfg.pageAddr = starget->id;
+ cfg.timeout = 60;
if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name);
@@ -1152,6 +1157,9 @@ mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ if (ioc->bus_type != SPI)
+ return 0;
+
if (hd && event == MPI_EVENT_INTEGRATED_RAID) {
int reason
= (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
@@ -1283,6 +1291,8 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
+ if ((ioc->bus_type != SPI) || (!rc))
+ return rc;
/* only try to do a renegotiation if we're properly set up
* if we get an ioc fault on bringup, ioc->sh will be NULL */
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 11073fa3d9f4..d33693c13368 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -314,22 +314,22 @@ static int i2o_cfg_swul(unsigned long arg)
int ret = 0;
if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
- goto return_fault;
+ return -EFAULT;
if (get_user(swlen, kxfer.swlen) < 0)
- goto return_fault;
+ return -EFAULT;
if (get_user(maxfrag, kxfer.maxfrag) < 0)
- goto return_fault;
+ return -EFAULT;
if (get_user(curfrag, kxfer.curfrag) < 0)
- goto return_fault;
+ return -EFAULT;
if (curfrag == maxfrag)
fragsize = swlen - (maxfrag - 1) * 8192;
if (!kxfer.buf)
- goto return_fault;
+ return -EFAULT;
c = i2o_find_iop(kxfer.iop);
if (!c)
@@ -373,12 +373,8 @@ static int i2o_cfg_swul(unsigned long arg)
i2o_dma_free(&c->pdev->dev, &buffer);
- return_ret:
return ret;
- return_fault:
- ret = -EFAULT;
- goto return_ret;
-};
+}
static int i2o_cfg_swdel(unsigned long arg)
{
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 6a14d2b1ccf0..2c65a2c57294 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -173,33 +173,35 @@ static struct resource regulator_resources[] = {
PM8607_REG_RESOURCE(LDO9, LDO9),
PM8607_REG_RESOURCE(LDO10, LDO10),
PM8607_REG_RESOURCE(LDO12, LDO12),
+ PM8607_REG_RESOURCE(VIBRATOR_SET, VIBRATOR_SET),
PM8607_REG_RESOURCE(LDO14, LDO14),
};
-#define PM8607_REG_DEVS(_name, _id) \
+#define PM8607_REG_DEVS(_id) \
{ \
- .name = "88pm8607-" #_name, \
+ .name = "88pm860x-regulator", \
.num_resources = 1, \
.resources = &regulator_resources[PM8607_ID_##_id], \
.id = PM8607_ID_##_id, \
}
static struct mfd_cell regulator_devs[] = {
- PM8607_REG_DEVS(buck1, BUCK1),
- PM8607_REG_DEVS(buck2, BUCK2),
- PM8607_REG_DEVS(buck3, BUCK3),
- PM8607_REG_DEVS(ldo1, LDO1),
- PM8607_REG_DEVS(ldo2, LDO2),
- PM8607_REG_DEVS(ldo3, LDO3),
- PM8607_REG_DEVS(ldo4, LDO4),
- PM8607_REG_DEVS(ldo5, LDO5),
- PM8607_REG_DEVS(ldo6, LDO6),
- PM8607_REG_DEVS(ldo7, LDO7),
- PM8607_REG_DEVS(ldo8, LDO8),
- PM8607_REG_DEVS(ldo9, LDO9),
- PM8607_REG_DEVS(ldo10, LDO10),
- PM8607_REG_DEVS(ldo12, LDO12),
- PM8607_REG_DEVS(ldo14, LDO14),
+ PM8607_REG_DEVS(BUCK1),
+ PM8607_REG_DEVS(BUCK2),
+ PM8607_REG_DEVS(BUCK3),
+ PM8607_REG_DEVS(LDO1),
+ PM8607_REG_DEVS(LDO2),
+ PM8607_REG_DEVS(LDO3),
+ PM8607_REG_DEVS(LDO4),
+ PM8607_REG_DEVS(LDO5),
+ PM8607_REG_DEVS(LDO6),
+ PM8607_REG_DEVS(LDO7),
+ PM8607_REG_DEVS(LDO8),
+ PM8607_REG_DEVS(LDO9),
+ PM8607_REG_DEVS(LDO10),
+ PM8607_REG_DEVS(LDO12),
+ PM8607_REG_DEVS(LDO13),
+ PM8607_REG_DEVS(LDO14),
};
struct pm860x_irq_data {
@@ -564,7 +566,7 @@ out:
return ret;
}
-static void __devexit device_irq_exit(struct pm860x_chip *chip)
+static void device_irq_exit(struct pm860x_chip *chip)
{
if (chip->core_irq)
free_irq(chip->core_irq, chip);
@@ -701,7 +703,7 @@ out:
return;
}
-int pm860x_device_init(struct pm860x_chip *chip,
+int __devinit pm860x_device_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
chip->core_irq = 0;
@@ -729,7 +731,7 @@ int pm860x_device_init(struct pm860x_chip *chip,
return 0;
}
-void pm860x_device_exit(struct pm860x_chip *chip)
+void __devexit pm860x_device_exit(struct pm860x_chip *chip)
{
device_irq_exit(chip);
mfd_remove_devices(chip->dev);
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index 4a6e7186334e..b0bade1881d4 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -202,6 +202,7 @@ static int __devexit pm860x_remove(struct i2c_client *client)
i2c_unregister_device(chip->companion);
i2c_set_clientdata(chip->companion, NULL);
i2c_set_clientdata(chip->client, NULL);
+ i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 2a5a0b78f84e..fea62a95f13f 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2,8 +2,14 @@
# Multifunction miscellaneous devices
#
-menu "Multifunction device drivers"
+menuconfig MFD_SUPPORT
+ bool "Multifunction device drivers"
depends on HAS_IOMEM
+ default y
+ help
+ Configure MFD device drivers.
+
+if MFD_SUPPORT
config MFD_CORE
tristate
@@ -53,6 +59,10 @@ config MFD_SH_MOBILE_SDHI
This driver supports the SDHI hardware block found in many
SuperH Mobile SoCs.
+config MFD_DAVINCI_VOICECODEC
+ tristate
+ select MFD_CORE
+
config MFD_DM355EVM_MSP
bool "DaVinci DM355 EVM microcontroller"
depends on I2C && MACH_DAVINCI_DM355_EVM
@@ -111,6 +121,18 @@ config TPS65010
This driver can also be built as a module. If so, the module
will be called tps65010.
+config TPS6507X
+ tristate "TPS6507x Power Management / Touch Screen chips"
+ select MFD_CORE
+ depends on I2C
+ help
+ If you say yes here you get support for the TPS6507x series of
+ Power Management / Touch Screen chips. These include voltage
+ regulators, lithium ion/polymer battery charging, touch screen
+ and other features that are often used in portable devices.
+ This driver can also be built as a module. If so, the module
+ will be called tps6507x.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -154,6 +176,17 @@ config TWL4030_CODEC
select MFD_CORE
default n
+config MFD_TC35892
+ bool "Support Toshiba TC35892"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Support for the Toshiba TC35892 I/O Expander.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config MFD_TMIO
bool
default n
@@ -297,9 +330,9 @@ config MFD_WM8350_I2C
selected to enable support for the functionality of the chip.
config MFD_WM8994
- tristate "Support Wolfson Microelectronics WM8994"
+ bool "Support Wolfson Microelectronics WM8994"
select MFD_CORE
- depends on I2C
+ depends on I2C=y && GENERIC_HARDIRQS
help
The WM8994 is a highly integrated hi-fi CODEC designed for
smartphone applicatiosn. As well as audio functionality it
@@ -341,9 +374,19 @@ config PCF50633_GPIO
Say yes here if you want to include support GPIO for pins on
the PCF50633 chip.
+config ABX500_CORE
+ bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
+ default y if ARCH_U300
+ help
+ Say yes here if you have the ABX500 Mixed Signal IC family
+ chips. This core driver expose register access functions.
+ Functionality specific drivers using these functions can
+ remain unchanged when IC changes. Binding of the functions to
+ actual register access is done by the IC core driver.
+
config AB3100_CORE
bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
- depends on I2C=y
+ depends on I2C=y && ABX500_CORE
default y if ARCH_U300
help
Select this to enable the AB3100 Mixed Signal IC core
@@ -380,6 +423,20 @@ config AB4500_CORE
read/write functions for the devices to get access to this chip.
This chip embeds various other multimedia funtionalities as well.
+config AB3550_CORE
+ bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
+ select MFD_CORE
+ depends on I2C=y && GENERIC_HARDIRQS && ABX500_CORE
+ help
+ Select this to enable the AB3550 Mixed Signal IC core
+ functionality. This connects to a AB3550 on the I2C bus
+ and expose a number of symbols needed for dependent devices
+ to read and write registers and subscribe to events from
+ this multi-functional IC. This is needed to use other features
+ of the AB3550 such as battery-backed RTC, charging control,
+ LEDs, vibrator, system power and temperature, power management
+ and ALSA sound.
+
config MFD_TIMBERDALE
tristate "Support for the Timberdale FPGA"
select MFD_CORE
@@ -399,7 +456,26 @@ config LPC_SCH
LPC bridge function of the Intel SCH provides support for
System Management Bus and General Purpose I/O.
-endmenu
+config MFD_RDC321X
+ tristate "Support for RDC-R321x southbridge"
+ select MFD_CORE
+ depends on PCI
+ help
+ Say yes here if you want to have support for the RDC R-321x SoC
+ southbridge which provides access to GPIOs and Watchdog using the
+ southbridge PCI device configuration space.
+
+config MFD_JANZ_CMODIO
+ tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board"
+ select MFD_CORE
+ depends on PCI
+ help
+ This is the core driver for the Janz CMOD-IO PCI MODULbus
+ carrier board. This device is a PCI to MODULbus bridge which may
+ host many different types of MODULbus daughterboards, including
+ CAN and GPIO controllers.
+
+endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 22715add99a7..4c684c137d43 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -12,8 +12,10 @@ obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
+obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
+obj-$(CONFIG_MFD_TC35892) += tc35892.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
@@ -25,9 +27,10 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
wm8350-objs += wm8350-irq.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
-obj-$(CONFIG_MFD_WM8994) += wm8994-core.o
+obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
obj-$(CONFIG_TPS65010) += tps65010.o
+obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
@@ -57,9 +60,13 @@ obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
+obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB4500_CORE) += ab4500-core.o
+obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
-obj-$(CONFIG_LPC_SCH) += lpc_sch.o \ No newline at end of file
+obj-$(CONFIG_LPC_SCH) += lpc_sch.o
+obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
+obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index e4ca5909e424..53ebfee548fa 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -19,7 +19,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
/* These are the only registers inside AB3100 used in this main file */
@@ -59,24 +59,15 @@
* The AB3100 is usually assigned address 0x48 (7-bit)
* The chip is defined in the platform i2c_board_data section.
*/
-
-u8 ab3100_get_chip_type(struct ab3100 *ab3100)
+static int ab3100_get_chip_id(struct device *dev)
{
- u8 chip = ABUNKNOWN;
-
- switch (ab3100->chip_id & 0xf0) {
- case 0xa0:
- chip = AB3000;
- break;
- case 0xc0:
- chip = AB3100;
- break;
- }
- return chip;
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return (int)ab3100->chip_id;
}
-EXPORT_SYMBOL(ab3100_get_chip_type);
-int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval)
+static int ab3100_set_register_interruptible(struct ab3100 *ab3100,
+ u8 reg, u8 regval)
{
u8 regandval[2] = {reg, regval};
int err;
@@ -108,8 +99,14 @@ int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval)
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_set_register_interruptible);
+static int set_register_interruptible(struct device *dev,
+ u8 bank, u8 reg, u8 value)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_set_register_interruptible(ab3100, reg, value);
+}
/*
* The test registers exist at an I2C bus address up one
@@ -148,8 +145,8 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100,
return err;
}
-
-int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval)
+static int ab3100_get_register_interruptible(struct ab3100 *ab3100,
+ u8 reg, u8 *regval)
{
int err;
@@ -203,10 +200,16 @@ int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval)
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_get_register_interruptible);
+static int get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_get_register_interruptible(ab3100, reg, value);
+}
-int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
+static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
u8 first_reg, u8 *regvals, u8 numregs)
{
int err;
@@ -260,10 +263,17 @@ int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_get_register_page_interruptible);
+static int get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_get_register_page_interruptible(ab3100,
+ first_reg, regvals, numregs);
+}
-int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
+static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
u8 reg, u8 andmask, u8 ormask)
{
u8 regandval[2] = {reg, 0};
@@ -331,8 +341,15 @@ int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_mask_and_set_register_interruptible);
+static int mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_mask_and_set_register_interruptible(ab3100,
+ reg, bitmask, (bitmask & bitvalues));
+}
/*
* Register a simple callback for handling any AB3100 events.
@@ -357,15 +374,27 @@ int ab3100_event_unregister(struct ab3100 *ab3100,
EXPORT_SYMBOL(ab3100_event_unregister);
-int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
- u32 *fatevent)
+static int ab3100_event_registers_startup_state_get(struct device *dev,
+ u8 *event)
{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
if (!ab3100->startup_events_read)
return -EAGAIN; /* Try again later */
- *fatevent = ab3100->startup_events;
+ memcpy(event, ab3100->startup_events, 3);
return 0;
}
-EXPORT_SYMBOL(ab3100_event_registers_startup_state_get);
+
+static struct abx500_ops ab3100_ops = {
+ .get_chip_id = ab3100_get_chip_id,
+ .set_register = set_register_interruptible,
+ .get_register = get_register_interruptible,
+ .get_register_page = get_register_page_interruptible,
+ .set_register_page = NULL,
+ .mask_and_set_register = mask_and_set_register_interruptible,
+ .event_registers_startup_state_get =
+ ab3100_event_registers_startup_state_get,
+ .startup_irq_enabled = NULL,
+};
/*
* This is a threaded interrupt handler so we can make some
@@ -390,7 +419,9 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
event_regs[2];
if (!ab3100->startup_events_read) {
- ab3100->startup_events = fatevent;
+ ab3100->startup_events[0] = event_regs[0];
+ ab3100->startup_events[1] = event_regs[1];
+ ab3100->startup_events[2] = event_regs[2];
ab3100->startup_events_read = true;
}
/*
@@ -703,7 +734,8 @@ static int __init ab3100_setup(struct ab3100 *ab3100)
dev_warn(ab3100->dev,
"AB3100 P1E variant detected, "
"forcing chip to 32KHz\n");
- err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08);
+ err = ab3100_set_test_register_interruptible(ab3100,
+ 0x02, 0x08);
}
exit_no_setup:
@@ -898,6 +930,10 @@ static int __init ab3100_probe(struct i2c_client *client,
if (err)
goto exit_no_irq;
+ err = abx500_register_ops(&client->dev, &ab3100_ops);
+ if (err)
+ goto exit_no_ops;
+
/* Set parent and a pointer back to the container in device data */
for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) {
ab3100_platform_devs[i]->dev.parent =
@@ -915,11 +951,13 @@ static int __init ab3100_probe(struct i2c_client *client,
return 0;
+ exit_no_ops:
exit_no_irq:
exit_no_setup:
i2c_unregister_device(ab3100->testreg_client);
exit_no_testreg_client:
exit_no_detect:
+ i2c_set_clientdata(client, NULL);
kfree(ab3100);
return err;
}
@@ -941,6 +979,7 @@ static int __exit ab3100_remove(struct i2c_client *client)
* their notifiers so deactivate IRQ
*/
free_irq(client->irq, ab3100);
+ i2c_set_clientdata(client, NULL);
kfree(ab3100);
return 0;
}
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index 2d14655fdebd..63d2b727ddbb 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -30,7 +30,6 @@
/**
* struct ab3100_otp
* @dev containing device
- * @ab3100 a pointer to the parent ab3100 device struct
* @locked whether the OTP is locked, after locking, no more bits
* can be changed but before locking it is still possible
* to change bits from 1->0.
@@ -49,7 +48,6 @@
*/
struct ab3100_otp {
struct device *dev;
- struct ab3100 *ab3100;
bool locked;
u32 freq;
bool paf;
@@ -63,19 +61,19 @@ struct ab3100_otp {
static int __init ab3100_otp_read(struct ab3100_otp *otp)
{
- struct ab3100 *ab = otp->ab3100;
u8 otpval[8];
u8 otpp;
int err;
- err = ab3100_get_register_interruptible(ab, AB3100_OTPP, &otpp);
+ err = abx500_get_register_interruptible(otp->dev, 0,
+ AB3100_OTPP, &otpp);
if (err) {
dev_err(otp->dev, "unable to read OTPP register\n");
return err;
}
- err = ab3100_get_register_page_interruptible(ab, AB3100_OTP0,
- otpval, 8);
+ err = abx500_get_register_page_interruptible(otp->dev, 0,
+ AB3100_OTP0, otpval, 8);
if (err) {
dev_err(otp->dev, "unable to read OTP register page\n");
return err;
@@ -197,7 +195,6 @@ static int __init ab3100_otp_probe(struct platform_device *pdev)
otp->dev = &pdev->dev;
/* Replace platform data coming in with a local struct */
- otp->ab3100 = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, otp);
err = ab3100_otp_read(otp);
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
new file mode 100644
index 000000000000..1060f8e1c40a
--- /dev/null
+++ b/drivers/mfd/ab3550-core.c
@@ -0,0 +1,1401 @@
+/*
+ * Copyright (C) 2007-2010 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Low-level core for exclusive access to the AB3550 IC on the I2C bus
+ * and some basic chip-configuration.
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/abx500.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/core.h>
+
+#define AB3550_NAME_STRING "ab3550"
+#define AB3550_ID_FORMAT_STRING "AB3550 %s"
+#define AB3550_NUM_BANKS 2
+#define AB3550_NUM_EVENT_REG 5
+
+/* These are the only registers inside AB3550 used in this main file */
+
+/* Chip ID register */
+#define AB3550_CID_REG 0x20
+
+/* Interrupt event registers */
+#define AB3550_EVENT_BANK 0
+#define AB3550_EVENT_REG 0x22
+
+/* Read/write operation values. */
+#define AB3550_PERM_RD (0x01)
+#define AB3550_PERM_WR (0x02)
+
+/* Read/write permissions. */
+#define AB3550_PERM_RO (AB3550_PERM_RD)
+#define AB3550_PERM_RW (AB3550_PERM_RD | AB3550_PERM_WR)
+
+/**
+ * struct ab3550
+ * @access_mutex: lock out concurrent accesses to the AB registers
+ * @i2c_client: I2C client for this chip
+ * @chip_name: name of this chip variant
+ * @chip_id: 8 bit chip ID for this chip variant
+ * @mask_work: a worker for writing to mask registers
+ * @event_lock: a lock to protect the event_mask
+ * @event_mask: a local copy of the mask event registers
+ * @startup_events: a copy of the first reading of the event registers
+ * @startup_events_read: whether the first events have been read
+ */
+struct ab3550 {
+ struct mutex access_mutex;
+ struct i2c_client *i2c_client[AB3550_NUM_BANKS];
+ char chip_name[32];
+ u8 chip_id;
+ struct work_struct mask_work;
+ spinlock_t event_lock;
+ u8 event_mask[AB3550_NUM_EVENT_REG];
+ u8 startup_events[AB3550_NUM_EVENT_REG];
+ bool startup_events_read;
+#ifdef CONFIG_DEBUG_FS
+ unsigned int debug_bank;
+ unsigned int debug_address;
+#endif
+};
+
+/**
+ * struct ab3550_reg_range
+ * @first: the first address of the range
+ * @last: the last address of the range
+ * @perm: access permissions for the range
+ */
+struct ab3550_reg_range {
+ u8 first;
+ u8 last;
+ u8 perm;
+};
+
+/**
+ * struct ab3550_reg_ranges
+ * @count: the number of ranges in the list
+ * @range: the list of register ranges
+ */
+struct ab3550_reg_ranges {
+ u8 count;
+ const struct ab3550_reg_range *range;
+};
+
+/*
+ * Permissible register ranges for reading and writing per device and bank.
+ *
+ * The ranges must be listed in increasing address order, and no overlaps are
+ * allowed. It is assumed that write permission implies read permission
+ * (i.e. only RO and RW permissions should be used). Ranges with write
+ * permission must not be split up.
+ */
+
+#define NO_RANGE {.count = 0, .range = NULL,}
+
+static struct
+ab3550_reg_ranges ab3550_reg_ranges[AB3550_NUM_DEVICES][AB3550_NUM_BANKS] = {
+ [AB3550_DEVID_DAC] = {
+ NO_RANGE,
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0xb0,
+ .last = 0xba,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0xbc,
+ .last = 0xc3,
+ .perm = AB3550_PERM_RW,
+ },
+ },
+ },
+ },
+ [AB3550_DEVID_LEDS] = {
+ NO_RANGE,
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x5a,
+ .last = 0x88,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x8a,
+ .last = 0xad,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_POWER] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_REGULATORS] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x69,
+ .last = 0xa3,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x14,
+ .last = 0x16,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_SIM] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x14,
+ .last = 0x17,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+
+ },
+ },
+ [AB3550_DEVID_UART] = {
+ NO_RANGE,
+ NO_RANGE,
+ },
+ [AB3550_DEVID_RTC] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0c,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_CHARGER] = {
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x10,
+ .last = 0x1a,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_ADC] = {
+ NO_RANGE,
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x20,
+ .last = 0x56,
+ .perm = AB3550_PERM_RW,
+ },
+
+ }
+ },
+ },
+ [AB3550_DEVID_FUELGAUGE] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_VIBRATOR] = {
+ NO_RANGE,
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x10,
+ .last = 0x13,
+ .perm = AB3550_PERM_RW,
+ },
+
+ }
+ },
+ },
+ [AB3550_DEVID_CODEC] = {
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x31,
+ .last = 0x63,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x65,
+ .last = 0x68,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+};
+
+static struct mfd_cell ab3550_devs[AB3550_NUM_DEVICES] = {
+ [AB3550_DEVID_DAC] = {
+ .name = "ab3550-dac",
+ .id = AB3550_DEVID_DAC,
+ .num_resources = 0,
+ },
+ [AB3550_DEVID_LEDS] = {
+ .name = "ab3550-leds",
+ .id = AB3550_DEVID_LEDS,
+ },
+ [AB3550_DEVID_POWER] = {
+ .name = "ab3550-power",
+ .id = AB3550_DEVID_POWER,
+ },
+ [AB3550_DEVID_REGULATORS] = {
+ .name = "ab3550-regulators",
+ .id = AB3550_DEVID_REGULATORS,
+ },
+ [AB3550_DEVID_SIM] = {
+ .name = "ab3550-sim",
+ .id = AB3550_DEVID_SIM,
+ },
+ [AB3550_DEVID_UART] = {
+ .name = "ab3550-uart",
+ .id = AB3550_DEVID_UART,
+ },
+ [AB3550_DEVID_RTC] = {
+ .name = "ab3550-rtc",
+ .id = AB3550_DEVID_RTC,
+ },
+ [AB3550_DEVID_CHARGER] = {
+ .name = "ab3550-charger",
+ .id = AB3550_DEVID_CHARGER,
+ },
+ [AB3550_DEVID_ADC] = {
+ .name = "ab3550-adc",
+ .id = AB3550_DEVID_ADC,
+ .num_resources = 10,
+ .resources = (struct resource[]) {
+ {
+ .name = "TRIGGER-0",
+ .flags = IORESOURCE_IRQ,
+ .start = 16,
+ .end = 16,
+ },
+ {
+ .name = "TRIGGER-1",
+ .flags = IORESOURCE_IRQ,
+ .start = 17,
+ .end = 17,
+ },
+ {
+ .name = "TRIGGER-2",
+ .flags = IORESOURCE_IRQ,
+ .start = 18,
+ .end = 18,
+ },
+ {
+ .name = "TRIGGER-3",
+ .flags = IORESOURCE_IRQ,
+ .start = 19,
+ .end = 19,
+ },
+ {
+ .name = "TRIGGER-4",
+ .flags = IORESOURCE_IRQ,
+ .start = 20,
+ .end = 20,
+ },
+ {
+ .name = "TRIGGER-5",
+ .flags = IORESOURCE_IRQ,
+ .start = 21,
+ .end = 21,
+ },
+ {
+ .name = "TRIGGER-6",
+ .flags = IORESOURCE_IRQ,
+ .start = 22,
+ .end = 22,
+ },
+ {
+ .name = "TRIGGER-7",
+ .flags = IORESOURCE_IRQ,
+ .start = 23,
+ .end = 23,
+ },
+ {
+ .name = "TRIGGER-VBAT-TXON",
+ .flags = IORESOURCE_IRQ,
+ .start = 13,
+ .end = 13,
+ },
+ {
+ .name = "TRIGGER-VBAT",
+ .flags = IORESOURCE_IRQ,
+ .start = 12,
+ .end = 12,
+ },
+ },
+ },
+ [AB3550_DEVID_FUELGAUGE] = {
+ .name = "ab3550-fuelgauge",
+ .id = AB3550_DEVID_FUELGAUGE,
+ },
+ [AB3550_DEVID_VIBRATOR] = {
+ .name = "ab3550-vibrator",
+ .id = AB3550_DEVID_VIBRATOR,
+ },
+ [AB3550_DEVID_CODEC] = {
+ .name = "ab3550-codec",
+ .id = AB3550_DEVID_CODEC,
+ },
+};
+
+/*
+ * I2C transactions with error messages.
+ */
+static int ab3550_i2c_master_send(struct ab3550 *ab, u8 bank, u8 *data,
+ u8 count)
+{
+ int err;
+
+ err = i2c_master_send(ab->i2c_client[bank], data, count);
+ if (err < 0) {
+ dev_err(&ab->i2c_client[0]->dev, "send error: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int ab3550_i2c_master_recv(struct ab3550 *ab, u8 bank, u8 *data,
+ u8 count)
+{
+ int err;
+
+ err = i2c_master_recv(ab->i2c_client[bank], data, count);
+ if (err < 0) {
+ dev_err(&ab->i2c_client[0]->dev, "receive error: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Functionality for getting/setting register values.
+ */
+static int get_register_interruptible(struct ab3550 *ab, u8 bank, u8 reg,
+ u8 *value)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ err = ab3550_i2c_master_send(ab, bank, &reg, 1);
+ if (!err)
+ err = ab3550_i2c_master_recv(ab, bank, value, 1);
+
+ mutex_unlock(&ab->access_mutex);
+ return err;
+}
+
+static int get_register_page_interruptible(struct ab3550 *ab, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ err = ab3550_i2c_master_send(ab, bank, &first_reg, 1);
+ if (!err)
+ err = ab3550_i2c_master_recv(ab, bank, regvals, numregs);
+
+ mutex_unlock(&ab->access_mutex);
+ return err;
+}
+
+static int mask_and_set_register_interruptible(struct ab3550 *ab, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ int err = 0;
+
+ if (likely(bitmask)) {
+ u8 reg_bits[2] = {reg, 0};
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ if (bitmask == 0xFF) /* No need to read in this case. */
+ reg_bits[1] = bitvalues;
+ else { /* Read and modify the register value. */
+ u8 bits;
+
+ err = ab3550_i2c_master_send(ab, bank, &reg, 1);
+ if (err)
+ goto unlock_and_return;
+ err = ab3550_i2c_master_recv(ab, bank, &bits, 1);
+ if (err)
+ goto unlock_and_return;
+ reg_bits[1] = ((~bitmask & bits) |
+ (bitmask & bitvalues));
+ }
+ /* Write the new value. */
+ err = ab3550_i2c_master_send(ab, bank, reg_bits, 2);
+unlock_and_return:
+ mutex_unlock(&ab->access_mutex);
+ }
+ return err;
+}
+
+/*
+ * Read/write permission checking functions.
+ */
+static bool page_write_allowed(const struct ab3550_reg_ranges *ranges,
+ u8 first_reg, u8 last_reg)
+{
+ u8 i;
+
+ if (last_reg < first_reg)
+ return false;
+
+ for (i = 0; i < ranges->count; i++) {
+ if (first_reg < ranges->range[i].first)
+ break;
+ if ((last_reg <= ranges->range[i].last) &&
+ (ranges->range[i].perm & AB3550_PERM_WR))
+ return true;
+ }
+ return false;
+}
+
+static bool reg_write_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
+{
+ return page_write_allowed(ranges, reg, reg);
+}
+
+static bool page_read_allowed(const struct ab3550_reg_ranges *ranges,
+ u8 first_reg, u8 last_reg)
+{
+ u8 i;
+
+ if (last_reg < first_reg)
+ return false;
+ /* Find the range (if it exists in the list) that includes first_reg. */
+ for (i = 0; i < ranges->count; i++) {
+ if (first_reg < ranges->range[i].first)
+ return false;
+ if (first_reg <= ranges->range[i].last)
+ break;
+ }
+ /* Make sure that the entire range up to and including last_reg is
+ * readable. This may span several of the ranges in the list.
+ */
+ while ((i < ranges->count) &&
+ (ranges->range[i].perm & AB3550_PERM_RD)) {
+ if (last_reg <= ranges->range[i].last)
+ return true;
+ if ((++i >= ranges->count) ||
+ (ranges->range[i].first !=
+ (ranges->range[i - 1].last + 1))) {
+ break;
+ }
+ }
+ return false;
+}
+
+static bool reg_read_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
+{
+ return page_read_allowed(ranges, reg, reg);
+}
+
+/*
+ * The exported register access functionality.
+ */
+int ab3550_get_chip_id(struct device *dev)
+{
+ struct ab3550 *ab = dev_get_drvdata(dev->parent);
+ return (int)ab->chip_id;
+}
+
+int ab3550_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !reg_write_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return mask_and_set_register_interruptible(ab, bank, reg,
+ bitmask, bitvalues);
+}
+
+int ab3550_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value)
+{
+ return ab3550_mask_and_set_register_interruptible(dev, bank, reg, 0xFF,
+ value);
+}
+
+int ab3550_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !reg_read_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return get_register_interruptible(ab, bank, reg, value);
+}
+
+int ab3550_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !page_read_allowed(&ab3550_reg_ranges[pdev->id][bank],
+ first_reg, (first_reg + numregs - 1)))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return get_register_page_interruptible(ab, bank, first_reg, regvals,
+ numregs);
+}
+
+int ab3550_event_registers_startup_state_get(struct device *dev, u8 *event)
+{
+ struct ab3550 *ab;
+
+ ab = dev_get_drvdata(dev->parent);
+ if (!ab->startup_events_read)
+ return -EAGAIN; /* Try again later */
+
+ memcpy(event, ab->startup_events, AB3550_NUM_EVENT_REG);
+ return 0;
+}
+
+int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq)
+{
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+ bool val;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+ val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0);
+
+ return val;
+}
+
+static struct abx500_ops ab3550_ops = {
+ .get_chip_id = ab3550_get_chip_id,
+ .get_register = ab3550_get_register_interruptible,
+ .set_register = ab3550_set_register_interruptible,
+ .get_register_page = ab3550_get_register_page_interruptible,
+ .set_register_page = NULL,
+ .mask_and_set_register = ab3550_mask_and_set_register_interruptible,
+ .event_registers_startup_state_get =
+ ab3550_event_registers_startup_state_get,
+ .startup_irq_enabled = ab3550_startup_irq_enabled,
+};
+
+static irqreturn_t ab3550_irq_handler(int irq, void *data)
+{
+ struct ab3550 *ab = data;
+ int err;
+ unsigned int i;
+ u8 e[AB3550_NUM_EVENT_REG];
+ u8 *events;
+ unsigned long flags;
+
+ events = (ab->startup_events_read ? e : ab->startup_events);
+
+ err = get_register_page_interruptible(ab, AB3550_EVENT_BANK,
+ AB3550_EVENT_REG, events, AB3550_NUM_EVENT_REG);
+ if (err)
+ goto err_event_rd;
+
+ if (!ab->startup_events_read) {
+ dev_info(&ab->i2c_client[0]->dev,
+ "startup events 0x%x,0x%x,0x%x,0x%x,0x%x\n",
+ ab->startup_events[0], ab->startup_events[1],
+ ab->startup_events[2], ab->startup_events[3],
+ ab->startup_events[4]);
+ ab->startup_events_read = true;
+ goto out;
+ }
+
+ /* The two highest bits in event[4] are not used. */
+ events[4] &= 0x3f;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
+ events[i] &= ~ab->event_mask[i];
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
+ u8 bit;
+ u8 event_reg;
+
+ dev_dbg(&ab->i2c_client[0]->dev, "IRQ Event[%d]: 0x%2x\n",
+ i, events[i]);
+
+ event_reg = events[i];
+ for (bit = 0; event_reg; bit++, event_reg /= 2) {
+ if (event_reg % 2) {
+ unsigned int irq;
+ struct ab3550_platform_data *plf_data;
+
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq = plf_data->irq.base + (i * 8) + bit;
+ handle_nested_irq(irq);
+ }
+ }
+ }
+out:
+ return IRQ_HANDLED;
+
+err_event_rd:
+ dev_dbg(&ab->i2c_client[0]->dev, "error reading event registers\n");
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct ab3550_reg_ranges debug_ranges[AB3550_NUM_BANKS] = {
+ {
+ .count = 6,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ },
+ {
+ .first = 0x10,
+ .last = 0x1a,
+ },
+ {
+ .first = 0x1e,
+ .last = 0x4f,
+ },
+ {
+ .first = 0x51,
+ .last = 0x63,
+ },
+ {
+ .first = 0x65,
+ .last = 0xa3,
+ },
+ {
+ .first = 0xa5,
+ .last = 0xa8,
+ },
+ }
+ },
+ {
+ .count = 8,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ },
+ {
+ .first = 0x10,
+ .last = 0x17,
+ },
+ {
+ .first = 0x1a,
+ .last = 0x1c,
+ },
+ {
+ .first = 0x20,
+ .last = 0x56,
+ },
+ {
+ .first = 0x5a,
+ .last = 0x88,
+ },
+ {
+ .first = 0x8a,
+ .last = 0xad,
+ },
+ {
+ .first = 0xb0,
+ .last = 0xba,
+ },
+ {
+ .first = 0xbc,
+ .last = 0xc3,
+ },
+ }
+ },
+};
+
+static int ab3550_registers_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+ int bank;
+
+ seq_printf(s, AB3550_NAME_STRING " register values:\n");
+
+ for (bank = 0; bank < AB3550_NUM_BANKS; bank++) {
+ unsigned int i;
+
+ seq_printf(s, " bank %d:\n", bank);
+ for (i = 0; i < debug_ranges[bank].count; i++) {
+ u8 reg;
+
+ for (reg = debug_ranges[bank].range[i].first;
+ reg <= debug_ranges[bank].range[i].last;
+ reg++) {
+ u8 value;
+
+ get_register_interruptible(ab, bank, reg,
+ &value);
+ seq_printf(s, " [%d/0x%02X]: 0x%02X\n", bank,
+ reg, value);
+ }
+ }
+ }
+ return 0;
+}
+
+static int ab3550_registers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_registers_print, inode->i_private);
+}
+
+static const struct file_operations ab3550_registers_fops = {
+ .open = ab3550_registers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab3550_bank_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+
+ seq_printf(s, "%d\n", ab->debug_bank);
+ return 0;
+}
+
+static int ab3550_bank_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_bank_print, inode->i_private);
+}
+
+static ssize_t ab3550_bank_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_bank;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_bank);
+ if (err)
+ return -EINVAL;
+
+ if (user_bank >= AB3550_NUM_BANKS) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > number of banks\n");
+ return -EINVAL;
+ }
+
+ ab->debug_bank = user_bank;
+
+ return buf_size;
+}
+
+static int ab3550_address_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+
+ seq_printf(s, "0x%02X\n", ab->debug_address);
+ return 0;
+}
+
+static int ab3550_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_address_print, inode->i_private);
+}
+
+static ssize_t ab3550_address_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_address;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_address);
+ if (err)
+ return -EINVAL;
+ if (user_address > 0xff) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ ab->debug_address = user_address;
+ return buf_size;
+}
+
+static int ab3550_val_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+ int err;
+ u8 regvalue;
+
+ err = get_register_interruptible(ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, &regvalue);
+ if (err)
+ return -EINVAL;
+ seq_printf(s, "0x%02X\n", regvalue);
+
+ return 0;
+}
+
+static int ab3550_val_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_val_print, inode->i_private);
+}
+
+static ssize_t ab3550_val_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ u8 regvalue;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val > 0xff) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ err = mask_and_set_register_interruptible(
+ ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, 0xFF, (u8)user_val);
+ if (err)
+ return -EINVAL;
+
+ get_register_interruptible(ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, &regvalue);
+ if (err)
+ return -EINVAL;
+
+ return buf_size;
+}
+
+static const struct file_operations ab3550_bank_fops = {
+ .open = ab3550_bank_open,
+ .write = ab3550_bank_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab3550_address_fops = {
+ .open = ab3550_address_open,
+ .write = ab3550_address_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab3550_val_fops = {
+ .open = ab3550_val_open,
+ .write = ab3550_val_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *ab3550_dir;
+static struct dentry *ab3550_reg_file;
+static struct dentry *ab3550_bank_file;
+static struct dentry *ab3550_address_file;
+static struct dentry *ab3550_val_file;
+
+static inline void ab3550_setup_debugfs(struct ab3550 *ab)
+{
+ ab->debug_bank = 0;
+ ab->debug_address = 0x00;
+
+ ab3550_dir = debugfs_create_dir(AB3550_NAME_STRING, NULL);
+ if (!ab3550_dir)
+ goto exit_no_debugfs;
+
+ ab3550_reg_file = debugfs_create_file("all-registers",
+ S_IRUGO, ab3550_dir, ab, &ab3550_registers_fops);
+ if (!ab3550_reg_file)
+ goto exit_destroy_dir;
+
+ ab3550_bank_file = debugfs_create_file("register-bank",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops);
+ if (!ab3550_bank_file)
+ goto exit_destroy_reg;
+
+ ab3550_address_file = debugfs_create_file("register-address",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops);
+ if (!ab3550_address_file)
+ goto exit_destroy_bank;
+
+ ab3550_val_file = debugfs_create_file("register-value",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops);
+ if (!ab3550_val_file)
+ goto exit_destroy_address;
+
+ return;
+
+exit_destroy_address:
+ debugfs_remove(ab3550_address_file);
+exit_destroy_bank:
+ debugfs_remove(ab3550_bank_file);
+exit_destroy_reg:
+ debugfs_remove(ab3550_reg_file);
+exit_destroy_dir:
+ debugfs_remove(ab3550_dir);
+exit_no_debugfs:
+ dev_err(&ab->i2c_client[0]->dev, "failed to create debugfs entries.\n");
+ return;
+}
+
+static inline void ab3550_remove_debugfs(void)
+{
+ debugfs_remove(ab3550_val_file);
+ debugfs_remove(ab3550_address_file);
+ debugfs_remove(ab3550_bank_file);
+ debugfs_remove(ab3550_reg_file);
+ debugfs_remove(ab3550_dir);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+static inline void ab3550_setup_debugfs(struct ab3550 *ab)
+{
+}
+static inline void ab3550_remove_debugfs(void)
+{
+}
+#endif
+
+/*
+ * Basic set-up, datastructure creation/destruction and I2C interface.
+ * This sets up a default config in the AB3550 chip so that it
+ * will work as expected.
+ */
+static int __init ab3550_setup(struct ab3550 *ab)
+{
+ int err = 0;
+ int i;
+ struct ab3550_platform_data *plf_data;
+ struct abx500_init_settings *settings;
+
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ settings = plf_data->init_settings;
+
+ for (i = 0; i < plf_data->init_settings_sz; i++) {
+ err = mask_and_set_register_interruptible(ab,
+ settings[i].bank,
+ settings[i].reg,
+ 0xFF, settings[i].setting);
+ if (err)
+ goto exit_no_setup;
+
+ /* If event mask register update the event mask in ab3550 */
+ if ((settings[i].bank == 0) &&
+ (AB3550_IMR1 <= settings[i].reg) &&
+ (settings[i].reg <= AB3550_IMR5)) {
+ ab->event_mask[settings[i].reg - AB3550_IMR1] =
+ settings[i].setting;
+ }
+ }
+exit_no_setup:
+ return err;
+}
+
+static void ab3550_mask_work(struct work_struct *work)
+{
+ struct ab3550 *ab = container_of(work, struct ab3550, mask_work);
+ int i;
+ unsigned long flags;
+ u8 mask[AB3550_NUM_EVENT_REG];
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
+ mask[i] = ab->event_mask[i];
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
+ int err;
+
+ err = mask_and_set_register_interruptible(ab, 0,
+ (AB3550_IMR1 + i), ~0, mask[i]);
+ if (err)
+ dev_err(&ab->i2c_client[0]->dev,
+ "ab3550_mask_work failed 0x%x,0x%x\n",
+ (AB3550_IMR1 + i), mask[i]);
+ }
+}
+
+static void ab3550_mask(unsigned int irq)
+{
+ unsigned long flags;
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ ab->event_mask[irq / 8] |= BIT(irq % 8);
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ schedule_work(&ab->mask_work);
+}
+
+static void ab3550_unmask(unsigned int irq)
+{
+ unsigned long flags;
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ ab->event_mask[irq / 8] &= ~BIT(irq % 8);
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ schedule_work(&ab->mask_work);
+}
+
+static void noop(unsigned int irq)
+{
+}
+
+static struct irq_chip ab3550_irq_chip = {
+ .name = "ab3550-core", /* Keep the same name as the request */
+ .startup = NULL, /* defaults to enable */
+ .shutdown = NULL, /* defaults to disable */
+ .enable = NULL, /* defaults to unmask */
+ .disable = ab3550_mask, /* No default to mask in chip.c */
+ .ack = noop,
+ .mask = ab3550_mask,
+ .unmask = ab3550_unmask,
+ .end = NULL,
+};
+
+struct ab_family_id {
+ u8 id;
+ char *name;
+};
+
+static const struct ab_family_id ids[] __initdata = {
+ /* AB3550 */
+ {
+ .id = AB3550_P1A,
+ .name = "P1A"
+ },
+ /* Terminator */
+ {
+ .id = 0x00,
+ }
+};
+
+static int __init ab3550_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ab3550 *ab;
+ struct ab3550_platform_data *ab3550_plf_data =
+ client->dev.platform_data;
+ int err;
+ int i;
+ int num_i2c_clients = 0;
+
+ ab = kzalloc(sizeof(struct ab3550), GFP_KERNEL);
+ if (!ab) {
+ dev_err(&client->dev,
+ "could not allocate " AB3550_NAME_STRING " device\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize data structure */
+ mutex_init(&ab->access_mutex);
+ spin_lock_init(&ab->event_lock);
+ ab->i2c_client[0] = client;
+
+ i2c_set_clientdata(client, ab);
+
+ /* Read chip ID register */
+ err = get_register_interruptible(ab, 0, AB3550_CID_REG, &ab->chip_id);
+ if (err) {
+ dev_err(&client->dev, "could not communicate with the analog "
+ "baseband chip\n");
+ goto exit_no_detect;
+ }
+
+ for (i = 0; ids[i].id != 0x0; i++) {
+ if (ids[i].id == ab->chip_id) {
+ snprintf(&ab->chip_name[0], sizeof(ab->chip_name) - 1,
+ AB3550_ID_FORMAT_STRING, ids[i].name);
+ break;
+ }
+ }
+
+ if (ids[i].id == 0x0) {
+ dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n",
+ ab->chip_id);
+ dev_err(&client->dev, "driver not started!\n");
+ goto exit_no_detect;
+ }
+
+ dev_info(&client->dev, "detected AB chip: %s\n", &ab->chip_name[0]);
+
+ /* Attach other dummy I2C clients. */
+ while (++num_i2c_clients < AB3550_NUM_BANKS) {
+ ab->i2c_client[num_i2c_clients] =
+ i2c_new_dummy(client->adapter,
+ (client->addr + num_i2c_clients));
+ if (!ab->i2c_client[num_i2c_clients]) {
+ err = -ENOMEM;
+ goto exit_no_dummy_client;
+ }
+ strlcpy(ab->i2c_client[num_i2c_clients]->name, id->name,
+ sizeof(ab->i2c_client[num_i2c_clients]->name));
+ }
+
+ err = ab3550_setup(ab);
+ if (err)
+ goto exit_no_setup;
+
+ INIT_WORK(&ab->mask_work, ab3550_mask_work);
+
+ for (i = 0; i < ab3550_plf_data->irq.count; i++) {
+ unsigned int irq;
+
+ irq = ab3550_plf_data->irq.base + i;
+ set_irq_chip_data(irq, ab);
+ set_irq_chip_and_handler(irq, &ab3550_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler,
+ IRQF_ONESHOT, "ab3550-core", ab);
+ /* This real unpredictable IRQ is of course sampled for entropy */
+ rand_initialize_irq(client->irq);
+
+ if (err)
+ goto exit_no_irq;
+
+ err = abx500_register_ops(&client->dev, &ab3550_ops);
+ if (err)
+ goto exit_no_ops;
+
+ /* Set up and register the platform devices. */
+ for (i = 0; i < AB3550_NUM_DEVICES; i++) {
+ ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
+ ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i];
+ }
+
+ err = mfd_add_devices(&client->dev, 0, ab3550_devs,
+ ARRAY_SIZE(ab3550_devs), NULL,
+ ab3550_plf_data->irq.base);
+
+ ab3550_setup_debugfs(ab);
+
+ return 0;
+
+exit_no_ops:
+exit_no_irq:
+exit_no_setup:
+exit_no_dummy_client:
+ /* Unregister the dummy i2c clients. */
+ while (--num_i2c_clients)
+ i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
+exit_no_detect:
+ kfree(ab);
+ return err;
+}
+
+static int __exit ab3550_remove(struct i2c_client *client)
+{
+ struct ab3550 *ab = i2c_get_clientdata(client);
+ int num_i2c_clients = AB3550_NUM_BANKS;
+
+ mfd_remove_devices(&client->dev);
+ ab3550_remove_debugfs();
+
+ while (--num_i2c_clients)
+ i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
+
+ /*
+ * At this point, all subscribers should have unregistered
+ * their notifiers so deactivate IRQ
+ */
+ free_irq(client->irq, ab);
+ i2c_set_clientdata(client, NULL);
+ kfree(ab);
+ return 0;
+}
+
+static const struct i2c_device_id ab3550_id[] = {
+ {AB3550_NAME_STRING, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ab3550_id);
+
+static struct i2c_driver ab3550_driver = {
+ .driver = {
+ .name = AB3550_NAME_STRING,
+ .owner = THIS_MODULE,
+ },
+ .id_table = ab3550_id,
+ .probe = ab3550_probe,
+ .remove = __exit_p(ab3550_remove),
+};
+
+static int __init ab3550_i2c_init(void)
+{
+ return i2c_add_driver(&ab3550_driver);
+}
+
+static void __exit ab3550_i2c_exit(void)
+{
+ i2c_del_driver(&ab3550_driver);
+}
+
+subsys_initcall(ab3550_i2c_init);
+module_exit(ab3550_i2c_exit);
+
+MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
+MODULE_DESCRIPTION("AB3550 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
new file mode 100644
index 000000000000..3b3b97ec32a7
--- /dev/null
+++ b/drivers/mfd/abx500-core.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2007-2010 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Register access functions for the ABX500 Mixed Signal IC family.
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/mfd/abx500.h>
+
+static LIST_HEAD(abx500_list);
+
+struct abx500_device_entry {
+ struct list_head list;
+ struct abx500_ops ops;
+ struct device *dev;
+};
+
+static void lookup_ops(struct device *dev, struct abx500_ops **ops)
+{
+ struct abx500_device_entry *dev_entry;
+
+ *ops = NULL;
+ list_for_each_entry(dev_entry, &abx500_list, list) {
+ if (dev_entry->dev == dev) {
+ *ops = &dev_entry->ops;
+ return;
+ }
+ }
+}
+
+int abx500_register_ops(struct device *dev, struct abx500_ops *ops)
+{
+ struct abx500_device_entry *dev_entry;
+
+ dev_entry = kzalloc(sizeof(struct abx500_device_entry), GFP_KERNEL);
+ if (IS_ERR(dev_entry)) {
+ dev_err(dev, "register_ops kzalloc failed");
+ return -ENOMEM;
+ }
+ dev_entry->dev = dev;
+ memcpy(&dev_entry->ops, ops, sizeof(struct abx500_ops));
+
+ list_add_tail(&dev_entry->list, &abx500_list);
+ return 0;
+}
+EXPORT_SYMBOL(abx500_register_ops);
+
+void abx500_remove_ops(struct device *dev)
+{
+ struct abx500_device_entry *dev_entry, *tmp;
+
+ list_for_each_entry_safe(dev_entry, tmp, &abx500_list, list)
+ {
+ if (dev_entry->dev == dev) {
+ list_del(&dev_entry->list);
+ kfree(dev_entry);
+ }
+ }
+}
+EXPORT_SYMBOL(abx500_remove_ops);
+
+int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->set_register != NULL))
+ return ops->set_register(dev, bank, reg, value);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_set_register_interruptible);
+
+int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_register != NULL))
+ return ops->get_register(dev, bank, reg, value);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_register_interruptible);
+
+int abx500_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_register_page != NULL))
+ return ops->get_register_page(dev, bank,
+ first_reg, regvals, numregs);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_register_page_interruptible);
+
+int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->mask_and_set_register != NULL))
+ return ops->mask_and_set_register(dev, bank,
+ reg, bitmask, bitvalues);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_mask_and_set_register_interruptible);
+
+int abx500_get_chip_id(struct device *dev)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_chip_id != NULL))
+ return ops->get_chip_id(dev);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_chip_id);
+
+int abx500_event_registers_startup_state_get(struct device *dev, u8 *event)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->event_registers_startup_state_get != NULL))
+ return ops->event_registers_startup_state_get(dev, event);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_event_registers_startup_state_get);
+
+int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->startup_irq_enabled != NULL))
+ return ops->startup_irq_enabled(dev, irq);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_startup_irq_enabled);
+
+MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
+MODULE_DESCRIPTION("ABX500 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 67181b147ab3..3ad915d0589c 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -544,6 +544,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
+ i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
new file mode 100644
index 000000000000..3e75f02e4778
--- /dev/null
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -0,0 +1,190 @@
+/*
+ * DaVinci Voice Codec Core Interface for TI platforms
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <sound/pcm.h>
+
+#include <linux/mfd/davinci_voicecodec.h>
+
+u32 davinci_vc_read(struct davinci_vc *davinci_vc, int reg)
+{
+ return __raw_readl(davinci_vc->base + reg);
+}
+
+void davinci_vc_write(struct davinci_vc *davinci_vc,
+ int reg, u32 val)
+{
+ __raw_writel(val, davinci_vc->base + reg);
+}
+
+static int __init davinci_vc_probe(struct platform_device *pdev)
+{
+ struct davinci_vc *davinci_vc;
+ struct resource *res, *mem;
+ struct mfd_cell *cell = NULL;
+ int ret;
+
+ davinci_vc = kzalloc(sizeof(struct davinci_vc), GFP_KERNEL);
+ if (!davinci_vc) {
+ dev_dbg(&pdev->dev,
+ "could not allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ davinci_vc->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(davinci_vc->clk)) {
+ dev_dbg(&pdev->dev,
+ "could not get the clock for voice codec\n");
+ ret = -ENODEV;
+ goto fail1;
+ }
+ clk_enable(davinci_vc->clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource\n");
+ ret = -ENODEV;
+ goto fail2;
+ }
+
+ davinci_vc->pbase = res->start;
+ davinci_vc->base_size = resource_size(res);
+
+ mem = request_mem_region(davinci_vc->pbase, davinci_vc->base_size,
+ pdev->name);
+ if (!mem) {
+ dev_err(&pdev->dev, "VCIF region already claimed\n");
+ ret = -EBUSY;
+ goto fail2;
+ }
+
+ davinci_vc->base = ioremap(davinci_vc->pbase, davinci_vc->base_size);
+ if (!davinci_vc->base) {
+ dev_err(&pdev->dev, "can't ioremap mem resource.\n");
+ ret = -ENOMEM;
+ goto fail3;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ return -ENXIO;
+ }
+
+ davinci_vc->davinci_vcif.dma_tx_channel = res->start;
+ davinci_vc->davinci_vcif.dma_tx_addr =
+ (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_WFIFO);
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ return -ENXIO;
+ }
+
+ davinci_vc->davinci_vcif.dma_rx_channel = res->start;
+ davinci_vc->davinci_vcif.dma_rx_addr =
+ (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_RFIFO);
+
+ davinci_vc->dev = &pdev->dev;
+ davinci_vc->pdev = pdev;
+
+ /* Voice codec interface client */
+ cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
+ cell->name = "davinci_vcif";
+ cell->driver_data = davinci_vc;
+
+ /* Voice codec CQ93VC client */
+ cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
+ cell->name = "cq93vc";
+ cell->driver_data = davinci_vc;
+
+ ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
+ DAVINCI_VC_CELLS, NULL, 0);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "fail to register client devices\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ iounmap(davinci_vc->base);
+fail3:
+ release_mem_region(davinci_vc->pbase, davinci_vc->base_size);
+fail2:
+ clk_disable(davinci_vc->clk);
+ clk_put(davinci_vc->clk);
+ davinci_vc->clk = NULL;
+fail1:
+ kfree(davinci_vc);
+
+ return ret;
+}
+
+static int __devexit davinci_vc_remove(struct platform_device *pdev)
+{
+ struct davinci_vc *davinci_vc = platform_get_drvdata(pdev);
+
+ mfd_remove_devices(&pdev->dev);
+
+ iounmap(davinci_vc->base);
+ release_mem_region(davinci_vc->pbase, davinci_vc->base_size);
+
+ clk_disable(davinci_vc->clk);
+ clk_put(davinci_vc->clk);
+ davinci_vc->clk = NULL;
+
+ kfree(davinci_vc);
+
+ return 0;
+}
+
+static struct platform_driver davinci_vc_driver = {
+ .driver = {
+ .name = "davinci_voicecodec",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(davinci_vc_remove),
+};
+
+static int __init davinci_vc_init(void)
+{
+ return platform_driver_probe(&davinci_vc_driver, davinci_vc_probe);
+}
+module_init(davinci_vc_init);
+
+static void __exit davinci_vc_exit(void)
+{
+ platform_driver_unregister(&davinci_vc_driver);
+}
+module_exit(davinci_vc_exit);
+
+MODULE_AUTHOR("Miguel Aguilar");
+MODULE_DESCRIPTION("Texas Instruments DaVinci Voice Codec Core Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
new file mode 100644
index 000000000000..9ed630799acc
--- /dev/null
+++ b/drivers/mfd/janz-cmodio.c
@@ -0,0 +1,304 @@
+/*
+ * Janz CMOD-IO MODULbus Carrier Board PCI Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * Lots of inspiration and code was copied from drivers/mfd/sm501.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+
+#include <linux/mfd/janz.h>
+
+#define DRV_NAME "janz-cmodio"
+
+/* Size of each MODULbus module in PCI BAR4 */
+#define CMODIO_MODULBUS_SIZE 0x200
+
+/* Maximum number of MODULbus modules on a CMOD-IO carrier board */
+#define CMODIO_MAX_MODULES 4
+
+/* Module Parameters */
+static unsigned int num_modules = CMODIO_MAX_MODULES;
+static unsigned char *modules[CMODIO_MAX_MODULES] = {
+ "empty", "empty", "empty", "empty",
+};
+
+module_param_array(modules, charp, &num_modules, S_IRUGO);
+MODULE_PARM_DESC(modules, "MODULbus modules attached to the carrier board");
+
+/* Unique Device Id */
+static unsigned int cmodio_id;
+
+struct cmodio_device {
+ /* Parent PCI device */
+ struct pci_dev *pdev;
+
+ /* PLX control registers */
+ struct janz_cmodio_onboard_regs __iomem *ctrl;
+
+ /* hex switch position */
+ u8 hex;
+
+ /* mfd-core API */
+ struct mfd_cell cells[CMODIO_MAX_MODULES];
+ struct resource resources[3 * CMODIO_MAX_MODULES];
+ struct janz_platform_data pdata[CMODIO_MAX_MODULES];
+};
+
+/*
+ * Subdevices using the mfd-core API
+ */
+
+static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
+ char *name, unsigned int devno,
+ unsigned int modno)
+{
+ struct janz_platform_data *pdata;
+ struct mfd_cell *cell;
+ struct resource *res;
+ struct pci_dev *pci;
+
+ pci = priv->pdev;
+ cell = &priv->cells[devno];
+ res = &priv->resources[devno * 3];
+ pdata = &priv->pdata[devno];
+
+ cell->name = name;
+ cell->resources = res;
+ cell->num_resources = 3;
+
+ /* Setup the subdevice ID -- must be unique */
+ cell->id = cmodio_id++;
+
+ /* Add platform data */
+ pdata->modno = modno;
+ cell->platform_data = pdata;
+ cell->data_size = sizeof(*pdata);
+
+ /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
+ res->flags = IORESOURCE_MEM;
+ res->parent = &pci->resource[3];
+ res->start = pci->resource[3].start + (CMODIO_MODULBUS_SIZE * modno);
+ res->end = res->start + CMODIO_MODULBUS_SIZE - 1;
+ res++;
+
+ /* PLX Control Registers -- PCI BAR4 is interrupt and other registers */
+ res->flags = IORESOURCE_MEM;
+ res->parent = &pci->resource[4];
+ res->start = pci->resource[4].start;
+ res->end = pci->resource[4].end;
+ res++;
+
+ /*
+ * IRQ
+ *
+ * The start and end fields are used as an offset to the irq_base
+ * parameter passed into the mfd_add_devices() function call. All
+ * devices share the same IRQ.
+ */
+ res->flags = IORESOURCE_IRQ;
+ res->parent = NULL;
+ res->start = 0;
+ res->end = 0;
+ res++;
+
+ return 0;
+}
+
+/* Probe each submodule using kernel parameters */
+static int __devinit cmodio_probe_submodules(struct cmodio_device *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ unsigned int num_probed = 0;
+ char *name;
+ int i;
+
+ for (i = 0; i < num_modules; i++) {
+ name = modules[i];
+ if (!strcmp(name, "") || !strcmp(name, "empty"))
+ continue;
+
+ dev_dbg(&priv->pdev->dev, "MODULbus %d: name %s\n", i, name);
+ cmodio_setup_subdevice(priv, name, num_probed, i);
+ num_probed++;
+ }
+
+ /* print an error message if no modules were probed */
+ if (num_probed == 0) {
+ dev_err(&priv->pdev->dev, "no MODULbus modules specified, "
+ "please set the ``modules'' kernel "
+ "parameter according to your "
+ "hardware configuration\n");
+ return -ENODEV;
+ }
+
+ return mfd_add_devices(&pdev->dev, 0, priv->cells,
+ num_probed, NULL, pdev->irq);
+}
+
+/*
+ * SYSFS Attributes
+ */
+
+static ssize_t mbus_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cmodio_device *priv = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", priv->hex);
+}
+
+static DEVICE_ATTR(modulbus_number, S_IRUGO, mbus_show, NULL);
+
+static struct attribute *cmodio_sysfs_attrs[] = {
+ &dev_attr_modulbus_number.attr,
+ NULL,
+};
+
+static const struct attribute_group cmodio_sysfs_attr_group = {
+ .attrs = cmodio_sysfs_attrs,
+};
+
+/*
+ * PCI Driver
+ */
+
+static int __devinit cmodio_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct cmodio_device *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&dev->dev, "unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ pci_set_drvdata(dev, priv);
+ priv->pdev = dev;
+
+ /* Hardware Initialization */
+ ret = pci_enable_device(dev);
+ if (ret) {
+ dev_err(&dev->dev, "unable to enable device\n");
+ goto out_free_priv;
+ }
+
+ pci_set_master(dev);
+ ret = pci_request_regions(dev, DRV_NAME);
+ if (ret) {
+ dev_err(&dev->dev, "unable to request regions\n");
+ goto out_pci_disable_device;
+ }
+
+ /* Onboard configuration registers */
+ priv->ctrl = pci_ioremap_bar(dev, 4);
+ if (!priv->ctrl) {
+ dev_err(&dev->dev, "unable to remap onboard regs\n");
+ ret = -ENOMEM;
+ goto out_pci_release_regions;
+ }
+
+ /* Read the hex switch on the carrier board */
+ priv->hex = ioread8(&priv->ctrl->int_enable);
+
+ /* Add the MODULbus number (hex switch value) to the device's sysfs */
+ ret = sysfs_create_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+ if (ret) {
+ dev_err(&dev->dev, "unable to create sysfs attributes\n");
+ goto out_unmap_ctrl;
+ }
+
+ /*
+ * Disable all interrupt lines, each submodule will enable its
+ * own interrupt line if needed
+ */
+ iowrite8(0xf, &priv->ctrl->int_disable);
+
+ /* Register drivers for all submodules */
+ ret = cmodio_probe_submodules(priv);
+ if (ret) {
+ dev_err(&dev->dev, "unable to probe submodules\n");
+ goto out_sysfs_remove_group;
+ }
+
+ return 0;
+
+out_sysfs_remove_group:
+ sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+out_unmap_ctrl:
+ iounmap(priv->ctrl);
+out_pci_release_regions:
+ pci_release_regions(dev);
+out_pci_disable_device:
+ pci_disable_device(dev);
+out_free_priv:
+ kfree(priv);
+out_return:
+ return ret;
+}
+
+static void __devexit cmodio_pci_remove(struct pci_dev *dev)
+{
+ struct cmodio_device *priv = pci_get_drvdata(dev);
+
+ mfd_remove_devices(&dev->dev);
+ sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+ iounmap(priv->ctrl);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ kfree(priv);
+}
+
+#define PCI_VENDOR_ID_JANZ 0x13c3
+
+/* The list of devices that this module will support */
+static DEFINE_PCI_DEVICE_TABLE(cmodio_pci_ids) = {
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, cmodio_pci_ids);
+
+static struct pci_driver cmodio_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cmodio_pci_ids,
+ .probe = cmodio_pci_probe,
+ .remove = __devexit_p(cmodio_pci_remove),
+};
+
+/*
+ * Module Init / Exit
+ */
+
+static int __init cmodio_init(void)
+{
+ return pci_register_driver(&cmodio_pci_driver);
+}
+
+static void __exit cmodio_exit(void)
+{
+ pci_unregister_driver(&cmodio_pci_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
+MODULE_LICENSE("GPL");
+
+module_init(cmodio_init);
+module_exit(cmodio_exit);
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 85d63c04749b..f621bcea3d02 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -508,7 +508,7 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2);
max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ);
max8925_reg_read(chip->adc, MAX8925_TSC_IRQ);
- /* mask all interrupts */
+ /* mask all interrupts except for TSC */
max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0);
max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0);
max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff);
@@ -516,7 +516,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff);
max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff);
max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff);
- max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0xff);
mutex_init(&chip->irq_lock);
chip->core_irq = irq;
@@ -547,7 +546,11 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret);
chip->core_irq = 0;
}
+
tsc_irq:
+ /* mask TSC interrupt */
+ max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f);
+
if (!pdata->tsc_irq) {
dev_warn(chip->dev, "No interrupt support on TSC IRQ\n");
return 0;
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index a94b131a18ef..721948be12c7 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1228,6 +1228,7 @@ fail2:
free_irq(client->irq, menelaus);
flush_scheduled_work();
fail1:
+ i2c_set_clientdata(client, NULL);
kfree(menelaus);
return err;
}
@@ -1237,8 +1238,8 @@ static int __exit menelaus_remove(struct i2c_client *client)
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
free_irq(client->irq, menelaus);
- kfree(menelaus);
i2c_set_clientdata(client, NULL);
+ kfree(menelaus);
the_menelaus = NULL;
return 0;
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 8ffbb7a85a7e..7dd76bceaae8 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -48,7 +48,7 @@ static int mfd_add_device(struct device *parent, int id,
res[r].flags = cell->resources[r].flags;
/* Find out base to use */
- if (cell->resources[r].flags & IORESOURCE_MEM) {
+ if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) {
res[r].parent = mem_base;
res[r].start = mem_base->start +
cell->resources[r].start;
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 63a614d696c1..d66f3893e87a 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -218,7 +218,7 @@ static struct attribute_group pcf_attr_group = {
int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
void (*handler) (int, void *), void *data)
{
- if (irq < 0 || irq > PCF50633_NUM_IRQ || !handler)
+ if (irq < 0 || irq >= PCF50633_NUM_IRQ || !handler)
return -EINVAL;
if (WARN_ON(pcf->irq_handler[irq].handler))
@@ -235,7 +235,7 @@ EXPORT_SYMBOL_GPL(pcf50633_register_irq);
int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
{
- if (irq < 0 || irq > PCF50633_NUM_IRQ)
+ if (irq < 0 || irq >= PCF50633_NUM_IRQ)
return -EINVAL;
mutex_lock(&pcf->lock);
@@ -676,6 +676,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
+ i2c_set_clientdata(client, NULL);
kfree(pcf);
return 0;
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
new file mode 100644
index 000000000000..256dd560bd48
--- /dev/null
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -0,0 +1,123 @@
+/*
+ * RDC321x MFD southbrige driver
+ *
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Bernhard Loos <bernhardloos@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rdc321x.h>
+
+static struct rdc321x_wdt_pdata rdc321x_wdt_pdata;
+
+static struct resource rdc321x_wdt_resource[] = {
+ {
+ .name = "wdt-reg",
+ .start = RDC321X_WDT_CTRL,
+ .end = RDC321X_WDT_CTRL + 0x3,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = {
+ .max_gpios = RDC321X_MAX_GPIO,
+};
+
+static struct resource rdc321x_gpio_resources[] = {
+ {
+ .name = "gpio-reg1",
+ .start = RDC321X_GPIO_CTRL_REG1,
+ .end = RDC321X_GPIO_CTRL_REG1 + 0x7,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "gpio-reg2",
+ .start = RDC321X_GPIO_CTRL_REG2,
+ .end = RDC321X_GPIO_CTRL_REG2 + 0x7,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct mfd_cell rdc321x_sb_cells[] = {
+ {
+ .name = "rdc321x-wdt",
+ .resources = rdc321x_wdt_resource,
+ .num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
+ .driver_data = &rdc321x_wdt_pdata,
+ }, {
+ .name = "rdc321x-gpio",
+ .resources = rdc321x_gpio_resources,
+ .num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
+ .driver_data = &rdc321x_gpio_pdata,
+ },
+};
+
+static int __devinit rdc321x_sb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable device\n");
+ return err;
+ }
+
+ rdc321x_gpio_pdata.sb_pdev = pdev;
+ rdc321x_wdt_pdata.sb_pdev = pdev;
+
+ return mfd_add_devices(&pdev->dev, -1,
+ rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0);
+}
+
+static void __devexit rdc321x_sb_remove(struct pci_dev *pdev)
+{
+ mfd_remove_devices(&pdev->dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) },
+ {}
+};
+
+static struct pci_driver rdc321x_sb_driver = {
+ .name = "RDC321x Southbridge",
+ .id_table = rdc321x_sb_table,
+ .probe = rdc321x_sb_probe,
+ .remove = __devexit_p(rdc321x_sb_remove),
+};
+
+static int __init rdc321x_sb_init(void)
+{
+ return pci_register_driver(&rdc321x_sb_driver);
+}
+
+static void __exit rdc321x_sb_exit(void)
+{
+ pci_unregister_driver(&rdc321x_sb_driver);
+}
+
+module_init(rdc321x_sb_init);
+module_exit(rdc321x_sb_exit);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RDC R-321x MFD southbridge driver");
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index da6383a934ac..5041d33adf0b 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -318,6 +318,9 @@ static int t7l66xb_probe(struct platform_device *dev)
struct resource *iomem, *rscr;
int ret;
+ if (pdata == NULL)
+ return -EINVAL;
+
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iomem)
return -EINVAL;
diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c
new file mode 100644
index 000000000000..715f095dd7a6
--- /dev/null
+++ b/drivers/mfd/tc35892.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tc35892.h>
+
+/**
+ * tc35892_reg_read() - read a single TC35892 register
+ * @tc35892: Device to read from
+ * @reg: Register to read
+ */
+int tc35892_reg_read(struct tc35892 *tc35892, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(tc35892->i2c, reg);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_read);
+
+/**
+ * tc35892_reg_read() - write a single TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to read
+ * @data: Value to write
+ */
+int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_write);
+
+/**
+ * tc35892_block_read() - read multiple TC35892 registers
+ * @tc35892: Device to read from
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Buffer to write to
+ */
+int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_read);
+
+/**
+ * tc35892_block_write() - write multiple TC35892 registers
+ * @tc35892: Device to write to
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Values to write
+ */
+int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length,
+ values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_write);
+
+/**
+ * tc35892_set_bits() - set the value of a bitfield in a TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to write
+ * @mask: Mask of bits to set
+ * @values: Value to set
+ */
+int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+
+ mutex_lock(&tc35892->lock);
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ goto out;
+
+ ret &= ~mask;
+ ret |= val;
+
+ ret = tc35892_reg_write(tc35892, reg, ret);
+
+out:
+ mutex_unlock(&tc35892->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_set_bits);
+
+static struct resource gpio_resources[] = {
+ {
+ .start = TC35892_INT_GPIIRQ,
+ .end = TC35892_INT_GPIIRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell tc35892_devs[] = {
+ {
+ .name = "tc35892-gpio",
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ .resources = &gpio_resources[0],
+ },
+};
+
+static irqreturn_t tc35892_irq(int irq, void *data)
+{
+ struct tc35892 *tc35892 = data;
+ int status;
+
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status < 0)
+ return IRQ_NONE;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ handle_nested_irq(tc35892->irq_base + bit);
+ status &= ~(1 << bit);
+ }
+
+ /*
+ * A dummy read or write (to any register) appears to be necessary to
+ * have the last interrupt clear (for example, GPIO IC write) take
+ * effect.
+ */
+ tc35892_reg_read(tc35892, TC35892_IRQST);
+
+ return IRQ_HANDLED;
+}
+
+static void tc35892_irq_dummy(unsigned int irq)
+{
+ /* No mask/unmask at this level */
+}
+
+static struct irq_chip tc35892_irq_chip = {
+ .name = "tc35892",
+ .mask = tc35892_irq_dummy,
+ .unmask = tc35892_irq_dummy,
+};
+
+static int tc35892_irq_init(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+ set_irq_chip_data(irq, tc35892);
+ set_irq_chip_and_handler(irq, &tc35892_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_irq_remove(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int tc35892_chip_init(struct tc35892 *tc35892)
+{
+ int manf, ver, ret;
+
+ manf = tc35892_reg_read(tc35892, TC35892_MANFCODE);
+ if (manf < 0)
+ return manf;
+
+ ver = tc35892_reg_read(tc35892, TC35892_VERSION);
+ if (ver < 0)
+ return ver;
+
+ if (manf != TC35892_MANFCODE_MAGIC) {
+ dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf);
+ return -EINVAL;
+ }
+
+ dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver);
+
+ /* Put everything except the IRQ module into reset */
+ ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_TIMRST
+ | TC35892_RSTCTRL_ROTRST
+ | TC35892_RSTCTRL_KBDRST
+ | TC35892_RSTCTRL_GPIRST);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the reset interrupt. */
+ return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1);
+}
+
+static int __devinit tc35892_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tc35892_platform_data *pdata = i2c->dev.platform_data;
+ struct tc35892 *tc35892;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
+
+ tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL);
+ if (!tc35892)
+ return -ENOMEM;
+
+ mutex_init(&tc35892->lock);
+
+ tc35892->dev = &i2c->dev;
+ tc35892->i2c = i2c;
+ tc35892->pdata = pdata;
+ tc35892->irq_base = pdata->irq_base;
+ tc35892->num_gpio = id->driver_data;
+
+ i2c_set_clientdata(i2c, tc35892);
+
+ ret = tc35892_chip_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = tc35892_irq_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tc35892", tc35892);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs,
+ ARRAY_SIZE(tc35892_devs), NULL,
+ tc35892->irq_base);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to add children\n");
+ goto out_freeirq;
+ }
+
+ return 0;
+
+out_freeirq:
+ free_irq(tc35892->i2c->irq, tc35892);
+out_removeirq:
+ tc35892_irq_remove(tc35892);
+out_free:
+ i2c_set_clientdata(i2c, NULL);
+ kfree(tc35892);
+ return ret;
+}
+
+static int __devexit tc35892_remove(struct i2c_client *client)
+{
+ struct tc35892 *tc35892 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(tc35892->dev);
+
+ free_irq(tc35892->i2c->irq, tc35892);
+ tc35892_irq_remove(tc35892);
+
+ i2c_set_clientdata(client, NULL);
+ kfree(tc35892);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc35892_id[] = {
+ { "tc35892", 24 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc35892_id);
+
+static struct i2c_driver tc35892_driver = {
+ .driver.name = "tc35892",
+ .driver.owner = THIS_MODULE,
+ .probe = tc35892_probe,
+ .remove = __devexit_p(tc35892_remove),
+ .id_table = tc35892_id,
+};
+
+static int __init tc35892_init(void)
+{
+ return i2c_add_driver(&tc35892_driver);
+}
+subsys_initcall(tc35892_init);
+
+static void __exit tc35892_exit(void)
+{
+ i2c_del_driver(&tc35892_driver);
+}
+module_exit(tc35892_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 MFD core driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 7f478ec4184b..2d4691acd033 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -31,6 +31,7 @@
#include <linux/i2c.h>
#include <linux/i2c-ocores.h>
+#include <linux/i2c-xiic.h>
#include <linux/i2c/tsc2007.h>
#include <linux/spi/spi.h>
@@ -40,6 +41,8 @@
#include <media/timb_radio.h>
+#include <linux/timb_dma.h>
+
#include "timberdale.h"
#define DRIVER_NAME "timberdale"
@@ -69,6 +72,12 @@ static struct i2c_board_info timberdale_i2c_board_info[] = {
},
};
+static __devinitdata struct xiic_i2c_platform_data
+timberdale_xiic_platform_data = {
+ .devices = timberdale_i2c_board_info,
+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
+};
+
static __devinitdata struct ocores_i2c_platform_data
timberdale_ocores_platform_data = {
.regstep = 4,
@@ -77,6 +86,19 @@ timberdale_ocores_platform_data = {
.num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
};
+const static __devinitconst struct resource timberdale_xiic_resources[] = {
+ {
+ .start = XIICOFFSET,
+ .end = XIICEND,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_TIMBERDALE_I2C,
+ .end = IRQ_TIMBERDALE_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
const static __devinitconst struct resource timberdale_ocores_resources[] = {
{
.start = OCORESOFFSET,
@@ -250,6 +272,65 @@ static __devinitdata struct timb_radio_platform_data
}
};
+static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = {
+ .nr_channels = 10,
+ .channels = {
+ {
+ /* UART RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* UART TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* MLB RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* MLB TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* Video RX */
+ .rx = true,
+ .bytes_per_line = 1440,
+ .descriptors = 2,
+ .descriptor_elements = 16
+ },
+ {
+ /* Video framedrop */
+ },
+ {
+ /* SDHCI RX */
+ .rx = true,
+ },
+ {
+ /* SDHCI TX */
+ },
+ {
+ /* ETH RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* ETH TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ }
+};
+
const static __devinitconst struct resource timberdale_dma_resources[] = {
{
.start = DMAOFFSET,
@@ -265,11 +346,25 @@ const static __devinitconst struct resource timberdale_dma_resources[] = {
static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
{
+ .name = "timb-dma",
+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
+ .resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
+ },
+ {
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -295,14 +390,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
@@ -314,6 +411,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.resources = timberdale_uartlite_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -344,20 +448,29 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -378,14 +491,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.platform_data = &timberdale_xspi_platform_data,
.data_size = sizeof(timberdale_xspi_platform_data),
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
@@ -424,11 +539,6 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
- {
- .name = "timb-dma",
- .num_resources = ARRAY_SIZE(timberdale_dma_resources),
- .resources = timberdale_dma_resources,
- },
};
static const __devinitconst struct resource timberdale_sdhc_resources[] = {
diff --git a/drivers/mfd/timberdale.h b/drivers/mfd/timberdale.h
index 8d27ffabc25d..c11bf6ebfe00 100644
--- a/drivers/mfd/timberdale.h
+++ b/drivers/mfd/timberdale.h
@@ -23,7 +23,7 @@
#ifndef MFD_TIMBERDALE_H
#define MFD_TIMBERDALE_H
-#define DRV_VERSION "0.1"
+#define DRV_VERSION "0.2"
/* This driver only support versions >= 3.8 and < 4.0 */
#define TIMB_SUPPORTED_MAJOR 3
@@ -66,7 +66,7 @@
#define CHIPCTLOFFSET 0x800
#define CHIPCTLEND 0x8ff
-#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
+#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET + 1)
#define INTCOFFSET 0xc00
#define INTCEND 0xfff
@@ -127,4 +127,16 @@
#define GPIO_PIN_BT_RST 15
#define GPIO_NR_PINS 16
+/* DMA Channels */
+#define DMA_UART_RX 0
+#define DMA_UART_TX 1
+#define DMA_MLB_RX 2
+#define DMA_MLB_TX 3
+#define DMA_VIDEO_RX 4
+#define DMA_VIDEO_DROP 5
+#define DMA_SDHCI_RX 6
+#define DMA_SDHCI_TX 7
+#define DMA_ETH_RX 8
+#define DMA_ETH_TX 9
+
#endif
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index e5955306c2fa..9b22a77f70f5 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -530,8 +530,8 @@ static int __exit tps65010_remove(struct i2c_client *client)
cancel_delayed_work(&tps->work);
flush_scheduled_work();
debugfs_remove(tps->file);
- kfree(tps);
i2c_set_clientdata(client, NULL);
+ kfree(tps);
the_tps = NULL;
return 0;
}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
new file mode 100644
index 000000000000..d859dffed39f
--- /dev/null
+++ b/drivers/mfd/tps6507x.c
@@ -0,0 +1,159 @@
+/*
+ * tps6507x.c -- TPS6507x chip family multi-function driver
+ *
+ * Copyright (c) 2010 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ * Author: Todd Fischer
+ * todd.fischer@ridgerun.com
+ *
+ * Credits:
+ *
+ * Using code from wm831x-*.c, wm8400-core, Wolfson Microelectronics PLC.
+ *
+ * For licencing details see kernel-base/COPYING
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6507x.h>
+
+static struct mfd_cell tps6507x_devs[] = {
+ {
+ .name = "tps6507x-pmic",
+ },
+ {
+ .name = "tps6507x-ts",
+ },
+};
+
+
+static int tps6507x_i2c_read_device(struct tps6507x_dev *tps6507x, char reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = tps6507x->i2c_client;
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = bytes;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int tps6507x_i2c_write_device(struct tps6507x_dev *tps6507x, char reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = tps6507x->i2c_client;
+ /* we add 1 byte for device register */
+ u8 msg[TPS6507X_MAX_REGISTER + 1];
+ int ret;
+
+ if (bytes > (TPS6507X_MAX_REGISTER + 1))
+ return -EINVAL;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes + 1)
+ return -EIO;
+ return 0;
+}
+
+static int tps6507x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tps6507x_dev *tps6507x;
+ int ret = 0;
+
+ tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
+ if (tps6507x == NULL) {
+ kfree(i2c);
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(i2c, tps6507x);
+ tps6507x->dev = &i2c->dev;
+ tps6507x->i2c_client = i2c;
+ tps6507x->read_dev = tps6507x_i2c_read_device;
+ tps6507x->write_dev = tps6507x_i2c_write_device;
+
+ ret = mfd_add_devices(tps6507x->dev, -1,
+ tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
+ NULL, 0);
+
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(tps6507x->dev);
+ kfree(tps6507x);
+ return ret;
+}
+
+static int tps6507x_i2c_remove(struct i2c_client *i2c)
+{
+ struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(tps6507x->dev);
+ kfree(tps6507x);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps6507x_i2c_id[] = {
+ { "tps6507x", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
+
+
+static struct i2c_driver tps6507x_i2c_driver = {
+ .driver = {
+ .name = "tps6507x",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6507x_i2c_probe,
+ .remove = tps6507x_i2c_remove,
+ .id_table = tps6507x_i2c_id,
+};
+
+static int __init tps6507x_i2c_init(void)
+{
+ return i2c_add_driver(&tps6507x_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps6507x_i2c_init);
+
+static void __exit tps6507x_i2c_exit(void)
+{
+ i2c_del_driver(&tps6507x_i2c_driver);
+}
+module_exit(tps6507x_i2c_exit);
+
+MODULE_DESCRIPTION("TPS6507x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 562cd4935e17..720e099e506d 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -109,7 +109,7 @@
#endif
#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\
- defined(CONFIG_SND_SOC_TWL6030) || defined(CONFIG_SND_SOC_TWL6030_MODULE)
+ defined(CONFIG_SND_SOC_TWL6040) || defined(CONFIG_SND_SOC_TWL6040_MODULE)
#define twl_has_codec() true
#else
#define twl_has_codec() false
@@ -708,7 +708,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
/* Phoenix*/
if (twl_has_codec() && pdata->codec && twl_class_is_6030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl6030_codec",
+ child = add_child(sub_chip_id, "twl6040_codec",
pdata->codec, sizeof(*pdata->codec),
false, 0, 0);
if (IS_ERR(child))
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index a3d5728b6449..1a968f34d679 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -322,7 +322,11 @@ EXPORT_SYMBOL_GPL(wm831x_set_bits);
*/
int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
{
- int ret, src;
+ int ret, src, irq_masked, timeout;
+
+ /* Are we using the interrupt? */
+ irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK);
+ irq_masked &= WM831X_AUXADC_DATA_EINT;
mutex_lock(&wm831x->auxadc_lock);
@@ -342,6 +346,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
goto out;
}
+ /* Clear any notification from a very late arriving interrupt */
+ try_wait_for_completion(&wm831x->auxadc_done);
+
ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
if (ret < 0) {
@@ -349,19 +356,46 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
goto disable;
}
- /* Ignore the result to allow us to soldier on without IRQ hookup */
- wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5));
-
- ret = wm831x_reg_read(wm831x, WM831X_AUXADC_CONTROL);
- if (ret < 0) {
- dev_err(wm831x->dev, "AUXADC status read failed: %d\n", ret);
- goto disable;
- }
-
- if (ret & WM831X_AUX_CVT_ENA) {
- dev_err(wm831x->dev, "Timed out reading AUXADC\n");
- ret = -EBUSY;
- goto disable;
+ if (irq_masked) {
+ /* If we're not using interrupts then poll the
+ * interrupt status register */
+ timeout = 5;
+ while (timeout) {
+ msleep(1);
+
+ ret = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "ISR 1 read failed: %d\n", ret);
+ goto disable;
+ }
+
+ /* Did it complete? */
+ if (ret & WM831X_AUXADC_DATA_EINT) {
+ wm831x_reg_write(wm831x,
+ WM831X_INTERRUPT_STATUS_1,
+ WM831X_AUXADC_DATA_EINT);
+ break;
+ } else {
+ dev_err(wm831x->dev,
+ "AUXADC conversion timeout\n");
+ ret = -EBUSY;
+ goto disable;
+ }
+ }
+ } else {
+ /* If we are using interrupts then wait for the
+ * interrupt to complete. Use an extremely long
+ * timeout to handle situations with heavy load where
+ * the notification of the interrupt may be delayed by
+ * threaded IRQ handling. */
+ if (!wait_for_completion_timeout(&wm831x->auxadc_done,
+ msecs_to_jiffies(500))) {
+ dev_err(wm831x->dev, "Timed out waiting for AUXADC\n");
+ ret = -EBUSY;
+ goto disable;
+ }
}
ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
@@ -1460,6 +1494,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8310:
parent = WM8310;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1471,6 +1506,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8311:
parent = WM8311;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1482,6 +1518,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8312:
parent = WM8312;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1620,6 +1657,42 @@ static void wm831x_device_exit(struct wm831x *wm831x)
kfree(wm831x);
}
+static int wm831x_device_suspend(struct wm831x *wm831x)
+{
+ int reg, mask;
+
+ /* If the charger IRQs are a wake source then make sure we ack
+ * them even if they're not actively being used (eg, no power
+ * driver or no IRQ line wired up) then acknowledge the
+ * interrupts otherwise suspend won't last very long.
+ */
+ if (wm831x->charger_irq_wake) {
+ reg = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_2_MASK);
+
+ mask = WM831X_CHG_BATT_HOT_EINT |
+ WM831X_CHG_BATT_COLD_EINT |
+ WM831X_CHG_BATT_FAIL_EINT |
+ WM831X_CHG_OV_EINT | WM831X_CHG_END_EINT |
+ WM831X_CHG_TO_EINT | WM831X_CHG_MODE_EINT |
+ WM831X_CHG_START_EINT;
+
+ /* If any of the interrupts are masked read the statuses */
+ if (reg & mask)
+ reg = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_2);
+
+ if (reg & mask) {
+ dev_info(wm831x->dev,
+ "Acknowledging masked charger IRQs: %x\n",
+ reg & mask);
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_2,
+ reg & mask);
+ }
+ }
+
+ return 0;
+}
+
static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg,
int bytes, void *dest)
{
@@ -1694,6 +1767,13 @@ static int wm831x_i2c_remove(struct i2c_client *i2c)
return 0;
}
+static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
+{
+ struct wm831x *wm831x = i2c_get_clientdata(i2c);
+
+ return wm831x_device_suspend(wm831x);
+}
+
static const struct i2c_device_id wm831x_i2c_id[] = {
{ "wm8310", WM8310 },
{ "wm8311", WM8311 },
@@ -1711,6 +1791,7 @@ static struct i2c_driver wm831x_i2c_driver = {
},
.probe = wm831x_i2c_probe,
.remove = wm831x_i2c_remove,
+ .suspend = wm831x_i2c_suspend,
.id_table = wm831x_i2c_id,
};
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 301327697117..7dabe4dbd373 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -21,6 +21,7 @@
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
+#include <linux/mfd/wm831x/gpio.h>
#include <linux/mfd/wm831x/irq.h>
#include <linux/delay.h>
@@ -38,8 +39,6 @@ struct wm831x_irq_data {
int primary;
int reg;
int mask;
- irq_handler_t handler;
- void *handler_data;
};
static struct wm831x_irq_data wm831x_irqs[] = {
@@ -388,12 +387,41 @@ static void wm831x_irq_mask(unsigned int irq)
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
+static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ int val;
+
+ irq = irq - wm831x->irq_base;
+
+ if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
+ return -EINVAL;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = WM831X_GPN_INT_MODE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = WM831X_GPN_POL;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + irq,
+ WM831X_GPN_INT_MODE | WM831X_GPN_POL, val);
+}
+
static struct irq_chip wm831x_irq_chip = {
.name = "wm831x",
.bus_lock = wm831x_irq_lock,
.bus_sync_unlock = wm831x_irq_sync_unlock,
.mask = wm831x_irq_mask,
.unmask = wm831x_irq_unmask,
+ .set_type = wm831x_irq_set_type,
};
/* The processing of the primary interrupt occurs in a thread so that
@@ -462,6 +490,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
mutex_init(&wm831x->irq_lock);
+ /* Mask the individual interrupt sources */
+ for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
+ wm831x->irq_masks_cur[i] = 0xffff;
+ wm831x->irq_masks_cache[i] = 0xffff;
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
+ 0xffff);
+ }
+
if (!irq) {
dev_warn(wm831x->dev,
"No interrupt specified - functionality limited\n");
@@ -477,14 +513,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
wm831x->irq = irq;
wm831x->irq_base = pdata->irq_base;
- /* Mask the individual interrupt sources */
- for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
- wm831x->irq_masks_cur[i] = 0xffff;
- wm831x->irq_masks_cache[i] = 0xffff;
- wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
- 0xffff);
- }
-
/* Register them with genirq */
for (cur_irq = wm831x->irq_base;
cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index e400a3bed063..b5807484b4c9 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -363,6 +363,10 @@ int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref)
reg |= 1 << channel | WM8350_AUXADC_POLL;
wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg);
+ /* If a late IRQ left the completion signalled then consume
+ * the completion. */
+ try_wait_for_completion(&wm8350->auxadc_done);
+
/* We ignore the result of the completion and just check for a
* conversion result, allowing us to soldier on if the IRQ
* infrastructure is not set up for the chip. */
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 65830f57c093..7795af4b1fe1 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -64,10 +64,8 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
int ret = 0;
wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL);
- if (wm8350 == NULL) {
- kfree(i2c);
+ if (wm8350 == NULL)
return -ENOMEM;
- }
i2c_set_clientdata(i2c, wm8350);
wm8350->dev = &i2c->dev;
@@ -82,6 +80,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
return ret;
err:
+ i2c_set_clientdata(i2c, NULL);
kfree(wm8350);
return ret;
}
@@ -91,6 +90,7 @@ static int wm8350_i2c_remove(struct i2c_client *i2c)
struct wm8350 *wm8350 = i2c_get_clientdata(i2c);
wm8350_device_exit(wm8350);
+ i2c_set_clientdata(i2c, NULL);
kfree(wm8350);
return 0;
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 865ce013a821..e08aafa663dc 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -118,7 +118,7 @@ static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
{
int i, ret = 0;
- BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache));
+ BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
/* If there are any volatile reads then read back the entire block */
for (i = reg; i < reg + num_regs; i++)
@@ -144,7 +144,7 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
{
int ret, i;
- BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache));
+ BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
for (i = 0; i < num_regs; i++) {
BUG_ON(!reg_data[reg + i].writable);
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index cc524df10aa1..ec71c9368906 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -173,9 +173,34 @@ static struct mfd_cell wm8994_regulator_devs[] = {
{ .name = "wm8994-ldo", .id = 2 },
};
+static struct resource wm8994_codec_resources[] = {
+ {
+ .start = WM8994_IRQ_TEMP_SHUT,
+ .end = WM8994_IRQ_TEMP_WARN,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource wm8994_gpio_resources[] = {
+ {
+ .start = WM8994_IRQ_GPIO(1),
+ .end = WM8994_IRQ_GPIO(11),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct mfd_cell wm8994_devs[] = {
- { .name = "wm8994-codec" },
- { .name = "wm8994-gpio" },
+ {
+ .name = "wm8994-codec",
+ .num_resources = ARRAY_SIZE(wm8994_codec_resources),
+ .resources = wm8994_codec_resources,
+ },
+
+ {
+ .name = "wm8994-gpio",
+ .num_resources = ARRAY_SIZE(wm8994_gpio_resources),
+ .resources = wm8994_gpio_resources,
+ },
};
/*
@@ -236,6 +261,11 @@ static int wm8994_device_resume(struct device *dev)
return ret;
}
+ ret = wm8994_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK,
+ WM8994_NUM_IRQ_REGS * 2, &wm8994->irq_masks_cur);
+ if (ret < 0)
+ dev_err(dev, "Failed to restore interrupt masks: %d\n", ret);
+
ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
&wm8994->ldo_regs);
if (ret < 0)
@@ -348,6 +378,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
if (pdata) {
+ wm8994->irq_base = pdata->irq_base;
wm8994->gpio_base = pdata->gpio_base;
/* GPIO configuration is only applied if it's non-zero */
@@ -375,16 +406,20 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
WM8994_LDO1_DISCH, 0);
}
+ wm8994_irq_init(wm8994);
+
ret = mfd_add_devices(wm8994->dev, -1,
wm8994_devs, ARRAY_SIZE(wm8994_devs),
NULL, 0);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
- goto err_enable;
+ goto err_irq;
}
return 0;
+err_irq:
+ wm8994_irq_exit(wm8994);
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
wm8994->supplies);
@@ -401,6 +436,7 @@ err:
static void wm8994_device_exit(struct wm8994 *wm8994)
{
mfd_remove_devices(wm8994->dev);
+ wm8994_irq_exit(wm8994);
regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
wm8994->supplies);
regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
@@ -469,6 +505,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
wm8994->control_data = i2c;
wm8994->read_dev = wm8994_i2c_read_device;
wm8994->write_dev = wm8994_i2c_write_device;
+ wm8994->irq = i2c->irq;
return wm8994_device_init(wm8994, id->driver_data, i2c->irq);
}
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
new file mode 100644
index 000000000000..8400eb1ee5db
--- /dev/null
+++ b/drivers/mfd/wm8994-irq.c
@@ -0,0 +1,310 @@
+/*
+ * wm8994-irq.c -- Interrupt controller support for Wolfson WM8994
+ *
+ * Copyright 2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/interrupt.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+
+#include <linux/delay.h>
+
+struct wm8994_irq_data {
+ int reg;
+ int mask;
+};
+
+static struct wm8994_irq_data wm8994_irqs[] = {
+ [WM8994_IRQ_TEMP_SHUT] = {
+ .reg = 2,
+ .mask = WM8994_TEMP_SHUT_EINT,
+ },
+ [WM8994_IRQ_MIC1_DET] = {
+ .reg = 2,
+ .mask = WM8994_MIC1_DET_EINT,
+ },
+ [WM8994_IRQ_MIC1_SHRT] = {
+ .reg = 2,
+ .mask = WM8994_MIC1_SHRT_EINT,
+ },
+ [WM8994_IRQ_MIC2_DET] = {
+ .reg = 2,
+ .mask = WM8994_MIC2_DET_EINT,
+ },
+ [WM8994_IRQ_MIC2_SHRT] = {
+ .reg = 2,
+ .mask = WM8994_MIC2_SHRT_EINT,
+ },
+ [WM8994_IRQ_FLL1_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_FLL1_LOCK_EINT,
+ },
+ [WM8994_IRQ_FLL2_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_FLL2_LOCK_EINT,
+ },
+ [WM8994_IRQ_SRC1_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_SRC1_LOCK_EINT,
+ },
+ [WM8994_IRQ_SRC2_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_SRC2_LOCK_EINT,
+ },
+ [WM8994_IRQ_AIF1DRC1_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF1DRC1_SIG_DET,
+ },
+ [WM8994_IRQ_AIF1DRC2_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF1DRC2_SIG_DET_EINT,
+ },
+ [WM8994_IRQ_AIF2DRC_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF2DRC_SIG_DET_EINT,
+ },
+ [WM8994_IRQ_FIFOS_ERR] = {
+ .reg = 2,
+ .mask = WM8994_FIFOS_ERR_EINT,
+ },
+ [WM8994_IRQ_WSEQ_DONE] = {
+ .reg = 2,
+ .mask = WM8994_WSEQ_DONE_EINT,
+ },
+ [WM8994_IRQ_DCS_DONE] = {
+ .reg = 2,
+ .mask = WM8994_DCS_DONE_EINT,
+ },
+ [WM8994_IRQ_TEMP_WARN] = {
+ .reg = 2,
+ .mask = WM8994_TEMP_WARN_EINT,
+ },
+ [WM8994_IRQ_GPIO(1)] = {
+ .reg = 1,
+ .mask = WM8994_GP1_EINT,
+ },
+ [WM8994_IRQ_GPIO(2)] = {
+ .reg = 1,
+ .mask = WM8994_GP2_EINT,
+ },
+ [WM8994_IRQ_GPIO(3)] = {
+ .reg = 1,
+ .mask = WM8994_GP3_EINT,
+ },
+ [WM8994_IRQ_GPIO(4)] = {
+ .reg = 1,
+ .mask = WM8994_GP4_EINT,
+ },
+ [WM8994_IRQ_GPIO(5)] = {
+ .reg = 1,
+ .mask = WM8994_GP5_EINT,
+ },
+ [WM8994_IRQ_GPIO(6)] = {
+ .reg = 1,
+ .mask = WM8994_GP6_EINT,
+ },
+ [WM8994_IRQ_GPIO(7)] = {
+ .reg = 1,
+ .mask = WM8994_GP7_EINT,
+ },
+ [WM8994_IRQ_GPIO(8)] = {
+ .reg = 1,
+ .mask = WM8994_GP8_EINT,
+ },
+ [WM8994_IRQ_GPIO(9)] = {
+ .reg = 1,
+ .mask = WM8994_GP8_EINT,
+ },
+ [WM8994_IRQ_GPIO(10)] = {
+ .reg = 1,
+ .mask = WM8994_GP10_EINT,
+ },
+ [WM8994_IRQ_GPIO(11)] = {
+ .reg = 1,
+ .mask = WM8994_GP11_EINT,
+ },
+};
+
+static inline int irq_data_to_status_reg(struct wm8994_irq_data *irq_data)
+{
+ return WM8994_INTERRUPT_STATUS_1 - 1 + irq_data->reg;
+}
+
+static inline int irq_data_to_mask_reg(struct wm8994_irq_data *irq_data)
+{
+ return WM8994_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
+}
+
+static inline struct wm8994_irq_data *irq_to_wm8994_irq(struct wm8994 *wm8994,
+ int irq)
+{
+ return &wm8994_irqs[irq - wm8994->irq_base];
+}
+
+static void wm8994_irq_lock(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+
+ mutex_lock(&wm8994->irq_lock);
+}
+
+static void wm8994_irq_sync_unlock(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ /* If there's been a change in the mask write it back
+ * to the hardware. */
+ if (wm8994->irq_masks_cur[i] != wm8994->irq_masks_cache[i]) {
+ wm8994->irq_masks_cache[i] = wm8994->irq_masks_cur[i];
+ wm8994_reg_write(wm8994,
+ WM8994_INTERRUPT_STATUS_1_MASK + i,
+ wm8994->irq_masks_cur[i]);
+ }
+ }
+
+ mutex_unlock(&wm8994->irq_lock);
+}
+
+static void wm8994_irq_unmask(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+
+ wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
+}
+
+static void wm8994_irq_mask(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+
+ wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
+}
+
+static struct irq_chip wm8994_irq_chip = {
+ .name = "wm8994",
+ .bus_lock = wm8994_irq_lock,
+ .bus_sync_unlock = wm8994_irq_sync_unlock,
+ .mask = wm8994_irq_mask,
+ .unmask = wm8994_irq_unmask,
+};
+
+/* The processing of the primary interrupt occurs in a thread so that
+ * we can interact with the device over I2C or SPI. */
+static irqreturn_t wm8994_irq_thread(int irq, void *data)
+{
+ struct wm8994 *wm8994 = data;
+ unsigned int i;
+ u16 status[WM8994_NUM_IRQ_REGS];
+ int ret;
+
+ ret = wm8994_bulk_read(wm8994, WM8994_INTERRUPT_STATUS_1,
+ WM8994_NUM_IRQ_REGS, status);
+ if (ret < 0) {
+ dev_err(wm8994->dev, "Failed to read interrupt status: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ /* Apply masking */
+ for (i = 0; i < WM8994_NUM_IRQ_REGS; i++)
+ status[i] &= ~wm8994->irq_masks_cur[i];
+
+ /* Report */
+ for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
+ if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
+ handle_nested_irq(wm8994->irq_base + i);
+ }
+
+ /* Ack any unmasked IRQs */
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ if (status[i])
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1 + i,
+ status[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int wm8994_irq_init(struct wm8994 *wm8994)
+{
+ int i, cur_irq, ret;
+
+ mutex_init(&wm8994->irq_lock);
+
+ /* Mask the individual interrupt sources */
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ wm8994->irq_masks_cur[i] = 0xffff;
+ wm8994->irq_masks_cache[i] = 0xffff;
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK + i,
+ 0xffff);
+ }
+
+ if (!wm8994->irq) {
+ dev_warn(wm8994->dev,
+ "No interrupt specified, no interrupts\n");
+ wm8994->irq_base = 0;
+ return 0;
+ }
+
+ if (!wm8994->irq_base) {
+ dev_err(wm8994->dev,
+ "No interrupt base specified, no interrupts\n");
+ return 0;
+ }
+
+ /* Register them with genirq */
+ for (cur_irq = wm8994->irq_base;
+ cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base;
+ cur_irq++) {
+ set_irq_chip_data(cur_irq, wm8994);
+ set_irq_chip_and_handler(cur_irq, &wm8994_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ set_irq_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(wm8994->irq, NULL, wm8994_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "wm8994", wm8994);
+ if (ret != 0) {
+ dev_err(wm8994->dev, "Failed to request IRQ %d: %d\n",
+ wm8994->irq, ret);
+ return ret;
+ }
+
+ /* Enable top level interrupt if it was masked */
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_CONTROL, 0);
+
+ return 0;
+}
+
+void wm8994_irq_exit(struct wm8994 *wm8994)
+{
+ if (wm8994->irq)
+ free_irq(wm8994->irq, wm8994);
+}
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7b6f7eefdf8d..f12dc3e54402 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -3,7 +3,6 @@
#
obj-$(CONFIG_IBM_ASM) += ibmasm/
-obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index db7d0f21b65d..0c11e411d8ec 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -54,7 +54,7 @@
struct at24_data {
struct at24_platform_data chip;
struct memory_accessor macc;
- bool use_smbus;
+ int use_smbus;
/*
* Lock protects against activities from other Linux tasks,
@@ -184,11 +184,19 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
if (count > io_limit)
count = io_limit;
- if (at24->use_smbus) {
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
/* Smaller eeproms can work given some SMBus extension calls */
if (count > I2C_SMBUS_BLOCK_MAX)
count = I2C_SMBUS_BLOCK_MAX;
- } else {
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ count = 2;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ count = 1;
+ break;
+ default:
/*
* When we have a better choice than SMBus calls, use a
* combined I2C message. Write address; then read up to
@@ -219,10 +227,27 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
timeout = jiffies + msecs_to_jiffies(write_timeout);
do {
read_time = jiffies;
- if (at24->use_smbus) {
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
status = i2c_smbus_read_i2c_block_data(client, offset,
count, buf);
- } else {
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ status = i2c_smbus_read_word_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status & 0xff;
+ buf[1] = status >> 8;
+ status = count;
+ }
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ status = i2c_smbus_read_byte_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status;
+ status = count;
+ }
+ break;
+ default:
status = i2c_transfer(client->adapter, msg, 2);
if (status == 2)
status = count;
@@ -434,7 +459,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct at24_platform_data chip;
bool writable;
- bool use_smbus = false;
+ int use_smbus = 0;
struct at24_data *at24;
int err;
unsigned i, num_addresses;
@@ -475,12 +500,19 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
err = -EPFNOSUPPORT;
goto err_out;
}
- if (!i2c_check_functionality(client->adapter,
+ if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ use_smbus = I2C_SMBUS_I2C_BLOCK_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ use_smbus = I2C_SMBUS_WORD_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ use_smbus = I2C_SMBUS_BYTE_DATA;
+ } else {
err = -EPFNOSUPPORT;
goto err_out;
}
- use_smbus = true;
}
if (chip.flags & AT24_FLAG_TAKE8ADDR)
@@ -566,11 +598,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_info(&client->dev, "%zu byte %s EEPROM %s\n",
at24->bin.size, client->name,
writable ? "(writable)" : "(read-only)");
+ if (use_smbus == I2C_SMBUS_WORD_DATA ||
+ use_smbus == I2C_SMBUS_BYTE_DATA) {
+ dev_notice(&client->dev, "Falling back to %s reads, "
+ "performance will suffer\n", use_smbus ==
+ I2C_SMBUS_WORD_DATA ? "word" : "byte");
+ }
dev_dbg(&client->dev,
- "page_size %d, num_addresses %d, write_max %d%s\n",
+ "page_size %d, num_addresses %d, write_max %d, use_smbus %d\n",
chip.page_size, num_addresses,
- at24->write_max,
- use_smbus ? ", use_smbus" : "");
+ at24->write_max, use_smbus);
/* export data to kernel code */
if (chip.setup)
diff --git a/drivers/misc/hdpuftrs/Makefile b/drivers/misc/hdpuftrs/Makefile
deleted file mode 100644
index ac74ae679230..000000000000
--- a/drivers/misc/hdpuftrs/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
deleted file mode 100644
index 176fe4e09d3f..000000000000
--- a/drivers/misc/hdpuftrs/hdpu_cpustate.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Sky CPU State Driver
- *
- * Copyright (C) 2002 Brian Waite
- *
- * This driver allows use of the CPU state bits
- * It exports the /dev/sky_cpustate and also
- * /proc/sky_cpustate pseudo-file for status information.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/smp_lock.h>
-#include <linux/miscdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <asm/uaccess.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-#define SKY_CPUSTATE_VERSION "1.1"
-
-static int hdpu_cpustate_probe(struct platform_device *pdev);
-static int hdpu_cpustate_remove(struct platform_device *pdev);
-
-static unsigned char cpustate_get_state(void);
-static int cpustate_proc_open(struct inode *inode, struct file *file);
-static int cpustate_proc_read(struct seq_file *seq, void *offset);
-
-static struct cpustate_t cpustate;
-
-static const struct file_operations proc_cpustate = {
- .open = cpustate_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int cpustate_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cpustate_proc_read, NULL);
-}
-
-static int cpustate_proc_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "CPU State: %04x\n", cpustate_get_state());
- return 0;
-}
-
-static int cpustate_get_ref(int excl)
-{
-
- int retval = -EBUSY;
-
- spin_lock(&cpustate.lock);
-
- if (cpustate.excl)
- goto out_busy;
-
- if (excl) {
- if (cpustate.open_count)
- goto out_busy;
- cpustate.excl = 1;
- }
-
- cpustate.open_count++;
- retval = 0;
-
- out_busy:
- spin_unlock(&cpustate.lock);
- return retval;
-}
-
-static int cpustate_free_ref(void)
-{
-
- spin_lock(&cpustate.lock);
-
- cpustate.excl = 0;
- cpustate.open_count--;
-
- spin_unlock(&cpustate.lock);
- return 0;
-}
-
-static unsigned char cpustate_get_state(void)
-{
-
- return cpustate.cached_val;
-}
-
-static void cpustate_set_state(unsigned char new_state)
-{
- unsigned int state = (new_state << 21);
-
-#ifdef DEBUG_CPUSTATE
- printk("CPUSTATE -> 0x%x\n", new_state);
-#endif
- spin_lock(&cpustate.lock);
- cpustate.cached_val = new_state;
- writel((0xff << 21), cpustate.clr_addr);
- writel(state, cpustate.set_addr);
- spin_unlock(&cpustate.lock);
-}
-
-/*
- * Now all the various file operations that we export.
- */
-
-static ssize_t cpustate_read(struct file *file, char *buf,
- size_t count, loff_t * ppos)
-{
- unsigned char data;
-
- if (count < 0)
- return -EFAULT;
- if (count == 0)
- return 0;
-
- data = cpustate_get_state();
- if (copy_to_user(buf, &data, sizeof(unsigned char)))
- return -EFAULT;
- return sizeof(unsigned char);
-}
-
-static ssize_t cpustate_write(struct file *file, const char *buf,
- size_t count, loff_t * ppos)
-{
- unsigned char data;
-
- if (count < 0)
- return -EFAULT;
-
- if (count == 0)
- return 0;
-
- if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char)))
- return -EFAULT;
-
- cpustate_set_state(data);
- return sizeof(unsigned char);
-}
-
-static int cpustate_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- lock_kernel();
- ret = cpustate_get_ref((file->f_flags & O_EXCL));
- unlock_kernel();
-
- return ret;
-}
-
-static int cpustate_release(struct inode *inode, struct file *file)
-{
- return cpustate_free_ref();
-}
-
-static struct platform_driver hdpu_cpustate_driver = {
- .probe = hdpu_cpustate_probe,
- .remove = hdpu_cpustate_remove,
- .driver = {
- .name = HDPU_CPUSTATE_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-/*
- * The various file operations we support.
- */
-static const struct file_operations cpustate_fops = {
- .owner = THIS_MODULE,
- .open = cpustate_open,
- .release = cpustate_release,
- .read = cpustate_read,
- .write = cpustate_write,
- .llseek = no_llseek,
-};
-
-static struct miscdevice cpustate_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sky_cpustate",
- .fops = &cpustate_fops,
-};
-
-static int hdpu_cpustate_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct proc_dir_entry *proc_de;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- printk(KERN_ERR "sky_cpustate: "
- "Invalid memory resource.\n");
- return -EINVAL;
- }
- cpustate.set_addr = (unsigned long *)res->start;
- cpustate.clr_addr = (unsigned long *)res->end - 1;
-
- ret = misc_register(&cpustate_dev);
- if (ret) {
- printk(KERN_WARNING "sky_cpustate: "
- "Unable to register misc device.\n");
- cpustate.set_addr = NULL;
- cpustate.clr_addr = NULL;
- return ret;
- }
-
- proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
- if (!proc_de) {
- printk(KERN_WARNING "sky_cpustate: "
- "Unable to create proc entry\n");
- }
-
- printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
- return 0;
-}
-
-static int hdpu_cpustate_remove(struct platform_device *pdev)
-{
- cpustate.set_addr = NULL;
- cpustate.clr_addr = NULL;
-
- remove_proc_entry("sky_cpustate", NULL);
- misc_deregister(&cpustate_dev);
-
- return 0;
-}
-
-static int __init cpustate_init(void)
-{
- return platform_driver_register(&hdpu_cpustate_driver);
-}
-
-static void __exit cpustate_exit(void)
-{
- platform_driver_unregister(&hdpu_cpustate_driver);
-}
-
-module_init(cpustate_init);
-module_exit(cpustate_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME);
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
deleted file mode 100644
index ce39fa54949b..000000000000
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Sky Nexus Register Driver
- *
- * Copyright (C) 2002 Brian Waite
- *
- * This driver allows reading the Nexus register
- * It exports the /proc/sky_chassis_id and also
- * /proc/sky_slot_id pseudo-file for status information.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-static int hdpu_nexus_probe(struct platform_device *pdev);
-static int hdpu_nexus_remove(struct platform_device *pdev);
-static int hdpu_slot_id_open(struct inode *inode, struct file *file);
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset);
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file);
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset);
-
-static struct proc_dir_entry *hdpu_slot_id;
-static struct proc_dir_entry *hdpu_chassis_id;
-static int slot_id = -1;
-static int chassis_id = -1;
-
-static const struct file_operations proc_slot_id = {
- .open = hdpu_slot_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations proc_chassis_id = {
- .open = hdpu_chassis_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static struct platform_driver hdpu_nexus_driver = {
- .probe = hdpu_nexus_probe,
- .remove = hdpu_nexus_remove,
- .driver = {
- .name = HDPU_NEXUS_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int hdpu_slot_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hdpu_slot_id_read, NULL);
-}
-
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "%d\n", slot_id);
- return 0;
-}
-
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hdpu_chassis_id_read, NULL);
-}
-
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "%d\n", chassis_id);
- return 0;
-}
-
-static int hdpu_nexus_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int *nexus_id_addr;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- printk(KERN_ERR "sky_nexus: "
- "Invalid memory resource.\n");
- return -EINVAL;
- }
- nexus_id_addr = ioremap(res->start,
- (unsigned long)(res->end - res->start));
- if (nexus_id_addr) {
- slot_id = (*nexus_id_addr >> 8) & 0x1f;
- chassis_id = *nexus_id_addr & 0xff;
- iounmap(nexus_id_addr);
- } else {
- printk(KERN_ERR "sky_nexus: Could not map slot id\n");
- }
-
- hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
- if (!hdpu_slot_id) {
- printk(KERN_WARNING "sky_nexus: "
- "Unable to create proc dir entry: sky_slot_id\n");
- }
-
- hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
- &proc_chassis_id);
- if (!hdpu_chassis_id)
- printk(KERN_WARNING "sky_nexus: "
- "Unable to create proc dir entry: sky_chassis_id\n");
-
- return 0;
-}
-
-static int hdpu_nexus_remove(struct platform_device *pdev)
-{
- slot_id = -1;
- chassis_id = -1;
-
- remove_proc_entry("sky_slot_id", NULL);
- remove_proc_entry("sky_chassis_id", NULL);
-
- hdpu_slot_id = 0;
- hdpu_chassis_id = 0;
-
- return 0;
-}
-
-static int __init nexus_init(void)
-{
- return platform_driver_register(&hdpu_nexus_driver);
-}
-
-static void __exit nexus_exit(void)
-{
- platform_driver_unregister(&hdpu_nexus_driver);
-}
-
-module_init(nexus_init);
-module_exit(nexus_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_NEXUS_NAME);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 31a991161f0a..dcadb5edb234 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -118,9 +118,9 @@ static int count = DEFAULT_COUNT;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
"default is 10");
-module_param(cpoint_name, charp, 0644);
+module_param(cpoint_name, charp, 0444);
MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
-module_param(cpoint_type, charp, 0644);
+module_param(cpoint_type, charp, 0444);
MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
"hitting the crash point");
module_param(cpoint_count, int, 0644);
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
index e7161c4e3798..db9cd0240c6f 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmware_balloon.c
@@ -41,7 +41,7 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <asm/vmware.h>
+#include <asm/hypervisor.h>
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
@@ -767,7 +767,7 @@ static int __init vmballoon_init(void)
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
*/
- if (!vmware_platform())
+ if (x86_hyper != &x86_hyper_vmware)
return -ENODEV;
vmballoon_wq = create_freezeable_workqueue("vmmemctl");
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2dd4cfe7ca17..b9dee28ee7d0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -296,6 +296,12 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
card->type = MMC_TYPE_SDIO;
/*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
* For native busses: set card RCA and quit open drain mode.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 88be37d9e9a5..52f50685ac0e 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -579,7 +579,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
struct dma_chan *chan = host->data_chan;
if (chan) {
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 84c103a7ee13..ff115d920888 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -55,14 +55,16 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
host->cclk = host->mclk / (2 * (clk + 1));
}
if (host->hw_designer == AMBA_VENDOR_ST)
- clk |= MCI_FCEN; /* Bug fix in ST IP block */
+ clk |= MCI_ST_FCEN; /* Bug fix in ST IP block */
clk |= MCI_CLK_ENABLE;
/* This hasn't proven to be worthwhile */
/* clk |= MCI_CLK_PWRSAVE; */
}
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
- clk |= MCI_WIDE_BUS;
+ clk |= MCI_4BIT_BUS;
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ clk |= MCI_ST_8BIT_BUS;
writel(clk, host->base + MMCICLOCK);
}
@@ -629,7 +631,18 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
mmc->ops = &mmci_ops;
mmc->f_min = (host->mclk + 511) / 512;
- mmc->f_max = min(host->mclk, fmax);
+ /*
+ * If the platform data supplies a maximum operating
+ * frequency, this takes precedence. Else, we fall back
+ * to using the module parameter, which has a (low)
+ * default value in case it is not specified. Either
+ * value must not exceed the clock rate into the block,
+ * of course.
+ */
+ if (plat->f_max)
+ mmc->f_max = min(host->mclk, plat->f_max);
+ else
+ mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
#ifdef CONFIG_REGULATOR
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 1ceb9a90f59b..d77062e5e3af 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -25,9 +25,11 @@
#define MCI_CLK_ENABLE (1 << 8)
#define MCI_CLK_PWRSAVE (1 << 9)
#define MCI_CLK_BYPASS (1 << 10)
-#define MCI_WIDE_BUS (1 << 11)
+#define MCI_4BIT_BUS (1 << 11)
+/* 8bit wide buses supported in ST Micro versions */
+#define MCI_ST_8BIT_BUS (1 << 12)
/* HW flow control on the ST Micro version */
-#define MCI_FCEN (1 << 13)
+#define MCI_ST_FCEN (1 << 13)
#define MMCIARGUMENT 0x008
#define MMCICOMMAND 0x00c
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 04ae884383f6..61f1d27fed3f 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google Inc,
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ * Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,7 @@
#include <linux/log2.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/platform_device.h>
@@ -47,6 +49,8 @@
#define DRIVER_NAME "msm-sdcc"
+#define BUSCLK_PWRSAVE 1
+#define BUSCLK_TIMEOUT (HZ)
static unsigned int msmsdcc_fmin = 144000;
static unsigned int msmsdcc_fmax = 50000000;
static unsigned int msmsdcc_4bit = 1;
@@ -57,6 +61,67 @@ static unsigned int msmsdcc_sdioirq;
#define PIO_SPINMAX 30
#define CMD_SPINMAX 20
+
+static inline void
+msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
+{
+ WARN_ON(!host->clks_on);
+
+ BUG_ON(host->curr.mrq);
+
+ if (deferr) {
+ mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
+ } else {
+ del_timer_sync(&host->busclk_timer);
+ /* Need to check clks_on again in case the busclk
+ * timer fired
+ */
+ if (host->clks_on) {
+ clk_disable(host->clk);
+ clk_disable(host->pclk);
+ host->clks_on = 0;
+ }
+ }
+}
+
+static inline int
+msmsdcc_enable_clocks(struct msmsdcc_host *host)
+{
+ int rc;
+
+ del_timer_sync(&host->busclk_timer);
+
+ if (!host->clks_on) {
+ rc = clk_enable(host->pclk);
+ if (rc)
+ return rc;
+ rc = clk_enable(host->clk);
+ if (rc) {
+ clk_disable(host->pclk);
+ return rc;
+ }
+ udelay(1 + ((3 * USEC_PER_SEC) /
+ (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+ host->clks_on = 1;
+ }
+ return 0;
+}
+
+static inline unsigned int
+msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
+{
+ return readl(host->base + reg);
+}
+
+static inline void
+msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
+{
+ writel(data, host->base + reg);
+ /* 3 clk delay required! */
+ udelay(1 + ((3 * USEC_PER_SEC) /
+ (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+}
+
static void
msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
u32 c);
@@ -64,8 +129,6 @@ msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
static void
msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
{
- writel(0, host->base + MMCICOMMAND);
-
BUG_ON(host->curr.data);
host->curr.mrq = NULL;
@@ -76,6 +139,9 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
if (mrq->cmd->error == -ETIMEDOUT)
mdelay(5);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
@@ -88,7 +154,6 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
static void
msmsdcc_stop_data(struct msmsdcc_host *host)
{
- writel(0, host->base + MMCIDATACTRL);
host->curr.data = NULL;
host->curr.got_dataend = host->curr.got_datablkend = 0;
}
@@ -109,6 +174,31 @@ uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
return 0;
}
+static inline void
+msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
+ msmsdcc_writel(host, arg, MMCIARGUMENT);
+ msmsdcc_writel(host, c, MMCICOMMAND);
+}
+
+static void
+msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
+{
+ struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
+
+ msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
+ msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
+ MMCIDATALENGTH);
+ msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1);
+ msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
+
+ if (host->cmd_cmd) {
+ msmsdcc_start_command_exec(host,
+ (u32) host->cmd_cmd->arg,
+ (u32) host->cmd_c);
+ }
+ host->dma.active = 1;
+}
+
static void
msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
unsigned int result,
@@ -121,8 +211,11 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
struct mmc_request *mrq;
spin_lock_irqsave(&host->lock, flags);
+ host->dma.active = 0;
+
mrq = host->curr.mrq;
BUG_ON(!mrq);
+ WARN_ON(!mrq->data);
if (!(result & DMOV_RSLT_VALID)) {
pr_err("msmsdcc: Invalid DataMover result\n");
@@ -146,7 +239,6 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
if (!mrq->data->error)
mrq->data->error = -EIO;
}
- host->dma.busy = 0;
dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
host->dma.dir);
@@ -159,6 +251,7 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
}
host->dma.sg = NULL;
+ host->dma.busy = 0;
if ((host->curr.got_dataend && host->curr.got_datablkend)
|| mrq->data->error) {
@@ -172,12 +265,14 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
if (!mrq->data->error)
host->curr.data_xfered = host->curr.xfer_size;
if (!mrq->data->stop || mrq->cmd->error) {
- writel(0, host->base + MMCICOMMAND);
host->curr.mrq = NULL;
host->curr.cmd = NULL;
mrq->data->bytes_xfered = host->curr.data_xfered;
spin_unlock_irqrestore(&host->lock, flags);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
mmc_request_done(host->mmc, mrq);
return;
} else
@@ -218,6 +313,8 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
host->dma.sg = data->sg;
host->dma.num_ents = data->sg_len;
+ BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
+
nc = host->dma.nc;
switch (host->pdev_id) {
@@ -246,22 +343,15 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
host->curr.user_pages = 0;
- n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
- host->dma.num_ents, host->dma.dir);
-
- if (n != host->dma.num_ents) {
- pr_err("%s: Unable to map in all sg elements\n",
- mmc_hostname(host->mmc));
- host->dma.sg = NULL;
- host->dma.num_ents = 0;
- return -ENOMEM;
- }
-
box = &nc->cmd[0];
for (i = 0; i < host->dma.num_ents; i++) {
box->cmd = CMD_MODE_BOX;
- if (i == (host->dma.num_ents - 1))
+ /* Initialize sg dma address */
+ sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
+ + sg->offset;
+
+ if (i == (host->dma.num_ents - 1))
box->cmd |= CMD_LC;
rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -300,15 +390,70 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+ n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+ host->dma.num_ents, host->dma.dir);
+/* dsb inside dma_map_sg will write nc out to mem as well */
+
+ if (n != host->dma.num_ents) {
+ printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+ mmc_hostname(host->mmc));
+ host->dma.sg = NULL;
+ host->dma.num_ents = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+snoop_cccr_abort(struct mmc_command *cmd)
+{
+ if ((cmd->opcode == 52) &&
+ (cmd->arg & 0x80000000) &&
+ (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
+ return 1;
return 0;
}
static void
-msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
+msmsdcc_start_command_deferred(struct msmsdcc_host *host,
+ struct mmc_command *cmd, u32 *c)
+{
+ *c |= (cmd->opcode | MCI_CPSM_ENABLE);
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136)
+ *c |= MCI_CPSM_LONGRSP;
+ *c |= MCI_CPSM_RESPONSE;
+ }
+
+ if (/*interrupt*/0)
+ *c |= MCI_CPSM_INTERRUPT;
+
+ if ((((cmd->opcode == 17) || (cmd->opcode == 18)) ||
+ ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
+ (cmd->opcode == 53))
+ *c |= MCI_CSPM_DATCMD;
+
+ if (cmd == cmd->mrq->stop)
+ *c |= MCI_CSPM_MCIABORT;
+
+ if (snoop_cccr_abort(cmd))
+ *c |= MCI_CSPM_MCIABORT;
+
+ if (host->curr.cmd != NULL) {
+ printk(KERN_ERR "%s: Overlapping command requests\n",
+ mmc_hostname(host->mmc));
+ }
+ host->curr.cmd = cmd;
+}
+
+static void
+msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
+ struct mmc_command *cmd, u32 c)
{
unsigned int datactrl, timeout;
unsigned long long clks;
- void __iomem *base = host->base;
unsigned int pio_irqmask = 0;
host->curr.data = data;
@@ -320,13 +465,6 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
memset(&host->pio, 0, sizeof(host->pio));
- clks = (unsigned long long)data->timeout_ns * host->clk_rate;
- do_div(clks, NSEC_PER_SEC);
- timeout = data->timeout_clks + (unsigned int)clks;
- writel(timeout, base + MMCIDATATIMER);
-
- writel(host->curr.xfer_size, base + MMCIDATALENGTH);
-
datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
if (!msmsdcc_config_dma(host, data))
@@ -347,47 +485,51 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
- writel(pio_irqmask, base + MMCIMASK1);
- writel(datactrl, base + MMCIDATACTRL);
+ clks = (unsigned long long)data->timeout_ns * host->clk_rate;
+ do_div(clks, NSEC_PER_SEC);
+ timeout = data->timeout_clks + (unsigned int)clks*2 ;
if (datactrl & MCI_DPSM_DMAENABLE) {
+ /* Save parameters for the exec function */
+ host->cmd_timeout = timeout;
+ host->cmd_pio_irqmask = pio_irqmask;
+ host->cmd_datactrl = datactrl;
+ host->cmd_cmd = cmd;
+
+ host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
+ host->dma.hdr.data = (void *)host;
host->dma.busy = 1;
+
+ if (cmd) {
+ msmsdcc_start_command_deferred(host, cmd, &c);
+ host->cmd_c = c;
+ }
msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
+ } else {
+ msmsdcc_writel(host, timeout, MMCIDATATIMER);
+
+ msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
+
+ msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
+ msmsdcc_writel(host, datactrl, MMCIDATACTRL);
+
+ if (cmd) {
+ /* Daisy-chain the command if requested */
+ msmsdcc_start_command(host, cmd, c);
+ }
}
}
static void
msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
{
- void __iomem *base = host->base;
-
- if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
- writel(0, base + MMCICOMMAND);
- udelay(2 + ((5 * 1000000) / host->clk_rate));
- }
-
- c |= cmd->opcode | MCI_CPSM_ENABLE;
-
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136)
- c |= MCI_CPSM_LONGRSP;
- c |= MCI_CPSM_RESPONSE;
- }
-
- if (cmd->opcode == 17 || cmd->opcode == 18 ||
- cmd->opcode == 24 || cmd->opcode == 25 ||
- cmd->opcode == 53)
- c |= MCI_CSPM_DATCMD;
-
if (cmd == cmd->mrq->stop)
c |= MCI_CSPM_MCIABORT;
- host->curr.cmd = cmd;
-
host->stats.cmds++;
- writel(cmd->arg, base + MMCIARGUMENT);
- writel(c, base + MMCICOMMAND);
+ msmsdcc_start_command_deferred(host, cmd, &c);
+ msmsdcc_start_command_exec(host, cmd->arg, c);
}
static void
@@ -421,13 +563,11 @@ msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
static int
msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
{
- void __iomem *base = host->base;
uint32_t *ptr = (uint32_t *) buffer;
int count = 0;
- while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
-
- *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
+ while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
+ *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
ptr++;
count += sizeof(uint32_t);
@@ -459,7 +599,7 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
if (remain == 0)
break;
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
} while (status & MCI_TXFIFOHALFEMPTY);
return ptr - buffer;
@@ -469,7 +609,7 @@ static int
msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
{
while (maxspin) {
- if ((readl(host->base + MMCISTATUS) & mask))
+ if ((msmsdcc_readl(host, MMCISTATUS) & mask))
return 0;
udelay(1);
--maxspin;
@@ -477,14 +617,13 @@ msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
return -ETIMEDOUT;
}
-static int
+static irqreturn_t
msmsdcc_pio_irq(int irq, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
- void __iomem *base = host->base;
uint32_t status;
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
do {
unsigned long flags;
@@ -539,14 +678,14 @@ msmsdcc_pio_irq(int irq, void *dev_id)
host->pio.sg_off = 0;
}
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
} while (1);
if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
- writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+ msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
if (!host->curr.xfer_remain)
- writel(0, base + MMCIMASK1);
+ msmsdcc_writel(host, 0, MMCIMASK1);
return IRQ_HANDLED;
}
@@ -554,15 +693,13 @@ msmsdcc_pio_irq(int irq, void *dev_id)
static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
{
struct mmc_command *cmd = host->curr.cmd;
- void __iomem *base = host->base;
host->curr.cmd = NULL;
- cmd->resp[0] = readl(base + MMCIRESPONSE0);
- cmd->resp[1] = readl(base + MMCIRESPONSE1);
- cmd->resp[2] = readl(base + MMCIRESPONSE2);
- cmd->resp[3] = readl(base + MMCIRESPONSE3);
+ cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
+ cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
+ cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
+ cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
- del_timer(&host->command_timer);
if (status & MCI_CMDTIMEOUT) {
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL &&
@@ -580,8 +717,10 @@ static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
msmsdcc_request_end(host, cmd->mrq);
} else /* host->data == NULL */
msmsdcc_request_end(host, cmd->mrq);
- } else if (!(cmd->data->flags & MMC_DATA_READ))
- msmsdcc_start_data(host, cmd->data);
+ } else if (cmd->data)
+ if (!(cmd->data->flags & MMC_DATA_READ))
+ msmsdcc_start_data(host, cmd->data,
+ NULL, 0);
}
static void
@@ -590,6 +729,11 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
{
struct mmc_data *data = host->curr.data;
+ if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
+ MCI_CMDTIMEOUT) && host->curr.cmd) {
+ msmsdcc_do_cmdirq(host, status);
+ }
+
if (!data)
return;
@@ -602,7 +746,8 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
msm_dmov_stop_cmd(host->dma.channel,
&host->dma.hdr, 0);
else {
- msmsdcc_stop_data(host);
+ if (host->curr.data)
+ msmsdcc_stop_data(host);
if (!data->stop)
msmsdcc_request_end(host, data->mrq);
else
@@ -657,17 +802,18 @@ msmsdcc_irq(int irq, void *dev_id)
spin_lock(&host->lock);
do {
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
+ status &= (msmsdcc_readl(host, MMCIMASK0) |
+ MCI_DATABLOCKENDMASK);
+ msmsdcc_writel(host, status, MMCICLEAR);
- status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
- writel(status, base + MMCICLEAR);
+ if (status & MCI_SDIOINTR)
+ status &= ~MCI_SDIOINTR;
- msmsdcc_handle_irq_data(host, status, base);
+ if (!status)
+ break;
- if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
- MCI_CMDTIMEOUT) && host->curr.cmd) {
- msmsdcc_do_cmdirq(host, status);
- }
+ msmsdcc_handle_irq_data(host, status, base);
if (status & MCI_SDIOINTOPER) {
cardint = 1;
@@ -714,24 +860,27 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
return;
}
+ msmsdcc_enable_clocks(host);
+
host->curr.mrq = mrq;
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
- msmsdcc_start_data(host, mrq->data);
-
- msmsdcc_start_command(host, mrq->cmd, 0);
+ /* Queue/read data, daisy-chain command when data starts */
+ msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
+ else
+ msmsdcc_start_command(host, mrq->cmd, 0);
if (host->cmdpoll && !msmsdcc_spin_on_status(host,
MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
CMD_SPINMAX)) {
- uint32_t status = readl(host->base + MMCISTATUS);
+ uint32_t status = msmsdcc_readl(host, MMCISTATUS);
msmsdcc_do_cmdirq(host, status);
- writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
- host->base + MMCICLEAR);
+ msmsdcc_writel(host,
+ MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
+ MMCICLEAR);
host->stats.cmdpoll_hits++;
} else {
host->stats.cmdpoll_misses++;
- mod_timer(&host->command_timer, jiffies + HZ);
}
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -742,14 +891,13 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct msmsdcc_host *host = mmc_priv(mmc);
u32 clk = 0, pwr = 0;
int rc;
+ unsigned long flags;
- if (ios->clock) {
+ spin_lock_irqsave(&host->lock, flags);
- if (!host->clks_on) {
- clk_enable(host->pclk);
- clk_enable(host->clk);
- host->clks_on = 1;
- }
+ msmsdcc_enable_clocks(host);
+
+ if (ios->clock) {
if (ios->clock != host->clk_rate) {
rc = clk_set_rate(host->clk, ios->clock);
if (rc < 0)
@@ -787,18 +935,16 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pwr |= MCI_OD;
- writel(clk, host->base + MMCICLOCK);
+ msmsdcc_writel(host, clk, MMCICLOCK);
if (host->pwr != pwr) {
host->pwr = pwr;
- writel(pwr, host->base + MMCIPOWER);
- }
-
- if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
- clk_disable(host->clk);
- clk_disable(host->pclk);
- host->clks_on = 0;
+ msmsdcc_writel(host, pwr, MMCIPOWER);
}
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
+ spin_unlock_irqrestore(&host->lock, flags);
}
static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -809,13 +955,13 @@ static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_lock_irqsave(&host->lock, flags);
if (msmsdcc_sdioirq == 1) {
- status = readl(host->base + MMCIMASK0);
+ status = msmsdcc_readl(host, MMCIMASK0);
if (enable)
status |= MCI_SDIOINTOPERMASK;
else
status &= ~MCI_SDIOINTOPERMASK;
host->saved_irq0mask = status;
- writel(status, host->base + MMCIMASK0);
+ msmsdcc_writel(host, status, MMCIMASK0);
}
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -875,42 +1021,13 @@ msmsdcc_status_notify_cb(int card_present, void *dev_id)
msmsdcc_check_status((unsigned long) host);
}
-/*
- * called when a command expires.
- * Dump some debugging, and then error
- * out the transaction.
- */
static void
-msmsdcc_command_expired(unsigned long _data)
+msmsdcc_busclk_expired(unsigned long _data)
{
struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
- struct mmc_request *mrq;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
- mrq = host->curr.mrq;
-
- if (!mrq) {
- pr_info("%s: Command expiry misfire\n",
- mmc_hostname(host->mmc));
- spin_unlock_irqrestore(&host->lock, flags);
- return;
- }
-
- pr_err("%s: Command timeout (%p %p %p %p)\n",
- mmc_hostname(host->mmc), mrq, mrq->cmd,
- mrq->data, host->dma.sg);
-
- mrq->cmd->error = -ETIMEDOUT;
- msmsdcc_stop_data(host);
- writel(0, host->base + MMCICOMMAND);
-
- host->curr.mrq = NULL;
- host->curr.cmd = NULL;
-
- spin_unlock_irqrestore(&host->lock, flags);
- mmc_request_done(host->mmc, mrq);
+ if (host->clks_on)
+ msmsdcc_disable_clocks(host, 0);
}
static int
@@ -1012,6 +1129,7 @@ msmsdcc_probe(struct platform_device *pdev)
host->pdev_id = pdev->id;
host->plat = plat;
host->mmc = mmc;
+ host->curr.cmd = NULL;
host->cmdpoll = 1;
@@ -1027,36 +1145,35 @@ msmsdcc_probe(struct platform_device *pdev)
host->dmares = dmares;
spin_lock_init(&host->lock);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (plat->embedded_sdio)
+ mmc_set_embedded_sdio_data(mmc,
+ &plat->embedded_sdio->cis,
+ &plat->embedded_sdio->cccr,
+ plat->embedded_sdio->funcs,
+ plat->embedded_sdio->num_funcs);
+#endif
+
/*
* Setup DMA
*/
msmsdcc_init_dma(host);
- /*
- * Setup main peripheral bus clock
- */
+ /* Get our clocks */
host->pclk = clk_get(&pdev->dev, "sdc_pclk");
if (IS_ERR(host->pclk)) {
ret = PTR_ERR(host->pclk);
goto host_free;
}
- ret = clk_enable(host->pclk);
- if (ret)
- goto pclk_put;
-
- host->pclk_rate = clk_get_rate(host->pclk);
-
- /*
- * Setup SDC MMC clock
- */
host->clk = clk_get(&pdev->dev, "sdc_clk");
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto pclk_disable;
+ goto pclk_put;
}
- ret = clk_enable(host->clk);
+ /* Enable clocks */
+ ret = msmsdcc_enable_clocks(host);
if (ret)
goto clk_put;
@@ -1066,10 +1183,9 @@ msmsdcc_probe(struct platform_device *pdev)
goto clk_disable;
}
+ host->pclk_rate = clk_get_rate(host->pclk);
host->clk_rate = clk_get_rate(host->clk);
- host->clks_on = 1;
-
/*
* Setup MMC host structure
*/
@@ -1092,10 +1208,10 @@ msmsdcc_probe(struct platform_device *pdev)
mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
mmc->max_seg_size = mmc->max_req_size;
- writel(0, host->base + MMCIMASK0);
- writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
+ msmsdcc_writel(host, 0, MMCIMASK0);
+ msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
host->saved_irq0mask = MCI_IRQENABLE;
/*
@@ -1137,13 +1253,9 @@ msmsdcc_probe(struct platform_device *pdev)
host->eject = !host->oldstat;
}
- /*
- * Setup a command timer. We currently need this due to
- * some 'strange' timeout / error handling situations.
- */
- init_timer(&host->command_timer);
- host->command_timer.data = (unsigned long) host;
- host->command_timer.function = msmsdcc_command_expired;
+ init_timer(&host->busclk_timer);
+ host->busclk_timer.data = (unsigned long) host;
+ host->busclk_timer.function = msmsdcc_busclk_expired;
ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
DRIVER_NAME " (cmd)", host);
@@ -1181,6 +1293,9 @@ msmsdcc_probe(struct platform_device *pdev)
if (host->timer.function)
pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
return 0;
cmd_irq_free:
free_irq(cmd_irqres->start, host);
@@ -1188,11 +1303,9 @@ msmsdcc_probe(struct platform_device *pdev)
if (host->stat_irq)
free_irq(host->stat_irq, host);
clk_disable:
- clk_disable(host->clk);
+ msmsdcc_disable_clocks(host, 0);
clk_put:
clk_put(host->clk);
- pclk_disable:
- clk_disable(host->pclk);
pclk_put:
clk_put(host->pclk);
host_free:
@@ -1215,15 +1328,10 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
rc = mmc_suspend_host(mmc, state);
- if (!rc) {
- writel(0, host->base + MMCIMASK0);
-
- if (host->clks_on) {
- clk_disable(host->clk);
- clk_disable(host->pclk);
- host->clks_on = 0;
- }
- }
+ if (!rc)
+ msmsdcc_writel(host, 0, MMCIMASK0);
+ if (host->clks_on)
+ msmsdcc_disable_clocks(host, 0);
}
return rc;
}
@@ -1232,27 +1340,21 @@ static int
msmsdcc_resume(struct platform_device *dev)
{
struct mmc_host *mmc = mmc_get_drvdata(dev);
- unsigned long flags;
if (mmc) {
struct msmsdcc_host *host = mmc_priv(mmc);
- spin_lock_irqsave(&host->lock, flags);
+ msmsdcc_enable_clocks(host);
- if (!host->clks_on) {
- clk_enable(host->pclk);
- clk_enable(host->clk);
- host->clks_on = 1;
- }
-
- writel(host->saved_irq0mask, host->base + MMCIMASK0);
-
- spin_unlock_irqrestore(&host->lock, flags);
+ msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
mmc_resume_host(mmc);
if (host->stat_irq)
enable_irq(host->stat_irq);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
}
return 0;
}
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 8c8448469811..da0039c9285e 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -171,6 +171,7 @@ struct msmsdcc_dma_data {
int channel;
struct msmsdcc_host *host;
int busy; /* Set if DM is busy */
+ int active;
};
struct msmsdcc_pio_data {
@@ -213,7 +214,7 @@ struct msmsdcc_host {
struct clk *clk; /* main MMC bus clock */
struct clk *pclk; /* SDCC peripheral bus clock */
unsigned int clks_on; /* set if clocks are enabled */
- struct timer_list command_timer;
+ struct timer_list busclk_timer;
unsigned int eject; /* eject state */
@@ -233,6 +234,18 @@ struct msmsdcc_host {
struct msmsdcc_pio_data pio;
int cmdpoll;
struct msmsdcc_stats stats;
+
+#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
+ struct work_struct resume_task;
+#endif
+
+ /* Command parameters */
+ unsigned int cmd_timeout;
+ unsigned int cmd_pio_irqmask;
+ unsigned int cmd_datactrl;
+ struct mmc_command *cmd_cmd;
+ u32 cmd_c;
+
};
#endif
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 2df90412abb5..2c53024ee439 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -119,6 +119,7 @@ struct mxcmci_host {
int detect_irq;
int dma;
int do_dma;
+ int use_sdio;
unsigned int power_mode;
struct imxmmc_platform_data *pdata;
@@ -138,6 +139,7 @@ struct mxcmci_host {
int clock;
struct work_struct datawork;
+ spinlock_t lock;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -151,6 +153,8 @@ static void mxcmci_softreset(struct mxcmci_host *host)
{
int i;
+ dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
+
/* reset sequence */
writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
@@ -224,6 +228,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
unsigned int cmdat)
{
+ u32 int_cntr;
+ unsigned long flags;
+
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
@@ -247,12 +254,16 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
return -EINVAL;
}
+ int_cntr = INT_END_CMD_RES_EN;
+
if (mxcmci_use_dma(host))
- writel(INT_READ_OP_EN | INT_WRITE_OP_DONE_EN |
- INT_END_CMD_RES_EN,
- host->base + MMC_REG_INT_CNTR);
- else
- writel(INT_END_CMD_RES_EN, host->base + MMC_REG_INT_CNTR);
+ int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->use_sdio)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
writew(cmd->opcode, host->base + MMC_REG_CMD);
writel(cmd->arg, host->base + MMC_REG_ARG);
@@ -264,7 +275,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
static void mxcmci_finish_request(struct mxcmci_host *host,
struct mmc_request *req)
{
- writel(0, host->base + MMC_REG_INT_CNTR);
+ u32 int_cntr = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->use_sdio)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
host->req = NULL;
host->cmd = NULL;
@@ -290,16 +308,25 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
stat);
if (stat & STATUS_CRC_READ_ERR) {
+ dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
data->error = -EILSEQ;
} else if (stat & STATUS_CRC_WRITE_ERR) {
u32 err_code = (stat >> 9) & 0x3;
- if (err_code == 2) /* No CRC response */
+ if (err_code == 2) { /* No CRC response */
+ dev_err(mmc_dev(host->mmc),
+ "%s: No CRC -ETIMEDOUT\n", __func__);
data->error = -ETIMEDOUT;
- else
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "%s: -EILSEQ\n", __func__);
data->error = -EILSEQ;
+ }
} else if (stat & STATUS_TIME_OUT_READ) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: read -ETIMEDOUT\n", __func__);
data->error = -ETIMEDOUT;
} else {
+ dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
data->error = -EIO;
}
} else {
@@ -433,8 +460,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
struct scatterlist *sg;
int stat, i;
- host->datasize = 0;
-
host->data = data;
host->datasize = 0;
@@ -523,15 +548,27 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
static irqreturn_t mxcmci_irq(int irq, void *devid)
{
struct mxcmci_host *host = devid;
+ unsigned long flags;
+ bool sdio_irq;
u32 stat;
stat = readl(host->base + MMC_REG_STATUS);
- writel(stat, host->base + MMC_REG_STATUS);
+ writel(stat & ~STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
+ spin_lock_irqsave(&host->lock, flags);
+ sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (sdio_irq) {
+ writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+
if (stat & STATUS_END_CMD_RESP)
mxcmci_cmd_done(host, stat);
+
#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
@@ -668,11 +705,46 @@ static int mxcmci_get_ro(struct mmc_host *mmc)
return -ENOSYS;
}
+static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 int_cntr;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->use_sdio = enable;
+ int_cntr = readl(host->base + MMC_REG_INT_CNTR);
+
+ if (enable)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ else
+ int_cntr &= ~INT_SDIO_IRQ_EN;
+
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
+{
+ /*
+ * MX3 SoCs have a silicon bug which corrupts CRC calculation of
+ * multi-block transfers when connected SDIO peripheral doesn't
+ * drive the BUSY line as required by the specs.
+ * One way to prevent this is to only allow 1-bit transfers.
+ */
+
+ if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
+ host->caps &= ~MMC_CAP_4_BIT_DATA;
+ else
+ host->caps |= MMC_CAP_4_BIT_DATA;
+}
static const struct mmc_host_ops mxcmci_ops = {
- .request = mxcmci_request,
- .set_ios = mxcmci_set_ios,
- .get_ro = mxcmci_get_ro,
+ .request = mxcmci_request,
+ .set_ios = mxcmci_set_ios,
+ .get_ro = mxcmci_get_ro,
+ .enable_sdio_irq = mxcmci_enable_sdio_irq,
+ .init_card = mxcmci_init_card,
};
static int mxcmci_probe(struct platform_device *pdev)
@@ -700,7 +772,7 @@ static int mxcmci_probe(struct platform_device *pdev)
}
mmc->ops = &mxcmci_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
/* MMC core transfer sizes tunable parameters */
mmc->max_hw_segs = 64;
@@ -719,6 +791,7 @@ static int mxcmci_probe(struct platform_device *pdev)
host->mmc = mmc;
host->pdata = pdev->dev.platform_data;
+ spin_lock_init(&host->lock);
if (host->pdata && host->pdata->ocr_avail)
mmc->ocr_avail = host->pdata->ocr_avail;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index ecf90f5c97c2..f8210bf2d241 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,6 +304,19 @@ config SSFDC
This enables read only access to SmartMedia formatted NAND
flash. You can mount it with FAT file system.
+
+config SM_FTL
+ tristate "SmartMedia/xD new translation layer"
+ depends on EXPERIMENTAL && BLOCK
+ select MTD_BLKDEVS
+ select MTD_NAND_ECC
+ help
+ This enables new and very EXPERMENTAL support for SmartMedia/xD
+ FTL (Flash translation layer).
+ Write support isn't yet well tested, therefore this code IS likely to
+ eat your card, so please don't use it together with valuable data.
+ Use readonly driver (CONFIG_SSFDC) instead.
+
config MTD_OOPS
tristate "Log panic/oops to an MTD buffer"
depends on MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4521b1ecce45..760abc533395 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_NFTL) += nftl.o
obj-$(CONFIG_INFTL) += inftl.o
obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
obj-$(CONFIG_SSFDC) += ssfdc.o
+obj-$(CONFIG_SM_FTL) += sm_ftl.o
obj-$(CONFIG_MTD_OOPS) += mtdoops.o
nftl-objs := nftlcore.o nftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5fbf29e1e64f..62f3ea9de848 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -615,10 +615,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
return mtd;
setup_err:
- if(mtd) {
- kfree(mtd->eraseregions);
- kfree(mtd);
- }
+ kfree(mtd->eraseregions);
+ kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
@@ -727,8 +725,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -774,9 +771,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
@@ -823,9 +820,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -852,10 +849,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -901,20 +898,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -923,10 +920,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
@@ -936,10 +933,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -969,12 +966,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -1144,7 +1141,7 @@ static int __xipram xip_wait_for_operation(
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -1154,15 +1151,15 @@ static int __xipram xip_wait_for_operation(
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -1218,10 +1215,10 @@ static int inval_cache_and_wait_for_operation(
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
@@ -1241,7 +1238,7 @@ static int inval_cache_and_wait_for_operation(
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -1256,17 +1253,17 @@ static int inval_cache_and_wait_for_operation(
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occured while sleep: reset timeout */
@@ -1302,7 +1299,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
@@ -1313,7 +1310,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
chip->state = FL_POINT;
chip->ref_point_counter++;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1398,7 +1395,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
@@ -1407,7 +1404,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -1426,10 +1423,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1443,7 +1440,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1506,10 +1503,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
return -EINVAL;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1555,7 +1552,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1664,10 +1661,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1798,7 +1795,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, cmd_adr);
out: put_chip(map, chip, cmd_adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1877,10 +1874,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1936,7 +1933,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
@@ -1948,7 +1945,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1981,7 +1978,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
@@ -1992,7 +1989,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2000,14 +1997,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2053,10 +2050,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2090,7 +2087,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2155,10 +2152,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2177,7 +2174,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -2452,7 +2449,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
@@ -2484,7 +2481,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
case FL_PM_SUSPENDED:
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2493,7 +2490,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -2503,7 +2500,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2544,7 +2541,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -2553,7 +2550,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
@@ -2573,14 +2570,14 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
return 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index f3600e8d5382..c16b8cecc3a8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/reboot.h>
#include <linux/mtd/compatmac.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
@@ -43,10 +44,6 @@
#define MAX_WORD_RETRIES 3
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_ATMEL 0x001F
-#define MANUFACTURER_MACRONIX 0x00C2
-#define MANUFACTURER_SST 0x00BF
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -60,6 +57,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
+static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static void cfi_amdstd_destroy(struct mtd_info *);
@@ -168,7 +166,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
* This reduces the risk of false detection due to
* the 8-bit device ID.
*/
- (cfi->mfr == MANUFACTURER_MACRONIX)) {
+ (cfi->mfr == CFI_MFR_MACRONIX)) {
DEBUG(MTD_DEBUG_LEVEL1,
"%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
@@ -286,7 +284,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
- { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+ { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
#endif
{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
@@ -304,9 +302,9 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
{ 0, 0, NULL, NULL }
};
@@ -355,6 +353,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->name = map->name;
mtd->writesize = 1;
+ mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
+
if (cfi->cfi_mode==CFI_MODE_CFI){
unsigned char bootloc;
/*
@@ -491,13 +491,12 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
#endif
__module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
- if(mtd) {
- kfree(mtd->eraseregions);
- kfree(mtd);
- }
+ kfree(mtd->eraseregions);
+ kfree(mtd);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
return NULL;
@@ -571,9 +570,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
goto retry;
}
@@ -617,9 +616,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -634,6 +633,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
chip->state = FL_READY;
return 0;
+ case FL_SHUTDOWN:
+ /* The machine is rebooting */
+ return -EIO;
+
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
@@ -643,10 +646,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto resettime;
}
}
@@ -778,7 +781,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -788,15 +791,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != FL_XIP_WHILE_ERASING) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -858,17 +861,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
#define UDELAY(map, chip, adr, usec) \
do { \
- spin_unlock(chip->mutex); \
+ mutex_unlock(&chip->mutex); \
cfi_udelay(usec); \
- spin_lock(chip->mutex); \
+ mutex_lock(&chip->mutex); \
} while (0)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
do { \
- spin_unlock(chip->mutex); \
+ mutex_unlock(&chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
cfi_udelay(usec); \
- spin_lock(chip->mutex); \
+ mutex_lock(&chip->mutex); \
} while (0)
#endif
@@ -884,10 +887,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -900,7 +903,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -954,7 +957,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
struct cfi_private *cfi = map->fldrv_priv;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state != FL_READY){
#if 0
@@ -963,7 +966,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@@ -992,7 +995,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
wake_up(&chip->wq);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1061,10 +1064,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1107,11 +1110,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -1143,7 +1146,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1175,7 +1178,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry:
- spin_lock(cfi->chips[chipnum].mutex);
+ mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@@ -1184,7 +1187,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1198,7 +1201,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
/* Load 'tmp_buf' with old contents of flash */
tmp_buf = map_read(map, bus_ofs+chipstart);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
/* Number of bytes to copy from buffer */
n = min_t(int, len, map_bankwidth(map)-i);
@@ -1253,7 +1256,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry1:
- spin_lock(cfi->chips[chipnum].mutex);
+ mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@@ -1262,7 +1265,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1275,7 +1278,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_read(map, ofs + chipstart);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
@@ -1310,10 +1313,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
adr += chip->start;
cmd_adr = adr;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1368,11 +1371,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -1400,7 +1403,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1500,10 +1503,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
adr = cfi->addr_unlock1;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1536,10 +1539,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@@ -1573,7 +1576,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_READY;
xip_enable(map, chip, adr);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1588,10 +1591,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1624,10 +1627,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@@ -1663,7 +1666,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1715,7 +1718,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
if (ret)
goto out_unlock;
@@ -1741,7 +1744,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1751,7 +1754,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
if (ret)
goto out_unlock;
@@ -1769,7 +1772,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1797,7 +1800,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1811,7 +1814,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
break;
default:
@@ -1819,7 +1822,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
@@ -1834,13 +1837,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1856,7 +1859,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1876,7 +1879,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1885,13 +1888,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1910,7 +1913,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
@@ -1920,15 +1923,62 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
else
printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
+
+/*
+ * Ensure that the flash device is put back into read array mode before
+ * unloading the driver or rebooting. On some systems, rebooting while
+ * the flash is in query/program/erase mode will prevent the CPU from
+ * fetching the bootloader code, requiring a hard reset or power cycle.
+ */
+static int cfi_amdstd_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+ struct flchip *chip;
+
+ for (i = 0; i < cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xF0), chip->start);
+ chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+}
+
+
+static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_amdstd_reset(mtd);
+ return NOTIFY_DONE;
+}
+
+
static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
+ cfi_amdstd_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi);
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 0667a671525d..e54e8c169d76 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* If it's in FL_ERASING state, suspend it and make it talk now.
@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
return -EIO;
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
suspended = 1;
@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: chip->state[%d]\n", __func__, chip->state);
#endif
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* Later, we can actually think about interrupting it
@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
status.x[0], map_read(map, cmd_adr).x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
if (map_word_andequal(map, status, status_OK, status_OK))
break;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (++z > 100) {
/* Argh. Not ready for write to buffer */
DISABLE_VPP(map);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
return -EIO;
}
@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(chip->buffer_write_time);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = jiffies + (HZ/2);
z = 0;
@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* Someone's suspended the write. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
z++;
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (!z) {
chip->buffer_write_time--;
@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* put back into read status register mode */
map_write(map, CMD(0x70), adr);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -766,13 +766,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -781,7 +781,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -797,9 +797,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -810,11 +810,11 @@ retry:
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ*20); /* FIXME */
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -828,14 +828,14 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
DISABLE_VPP(map);
@@ -878,7 +878,7 @@ retry:
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
timeo = jiffies + HZ;
chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
}
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
@@ -887,7 +887,7 @@ retry:
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
break;
default:
@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -1071,13 +1071,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -1086,7 +1086,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -1098,9 +1098,9 @@ retry:
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -1118,21 +1118,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -1220,13 +1220,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -1235,7 +1235,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -1247,9 +1247,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -1267,21 +1267,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the unlock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 57e0e4e921f9..d18064977192 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
* to flash memory - that means that we don't have to check status
* and timeout.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
/* Done and happy. */
chip->state = chip->oldstate;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index e2dc96441e05..fcc1bc02c8a2 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
pchip->start = (i << cfi.chipshift);
pchip->state = FL_READY;
init_waitqueue_head(&pchip->wq);
- spin_lock_init(&pchip->_spinlock);
- pchip->mutex = &pchip->_spinlock;
+ mutex_init(&pchip->mutex);
}
}
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index ab5c9b92ac82..f3226b1d38fc 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,5 +1,5 @@
#
-# linux/drivers/devices/Makefile
+# linux/drivers/mtd/devices/Makefile
#
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index ce6424008ed9..93651865ddbe 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -276,12 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Setup the MTD structure */
/* make the name contain the block device in */
- name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1,
- GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
if (!name)
goto devinit_err;
- sprintf(name, "block2mtd: %s", devname);
dev->mtd.name = name;
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index d2fd550f7e09..fc8ea0a57ac2 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -668,7 +668,7 @@ static int __init init_pmc551(void)
{
struct pci_dev *PCI_Device = NULL;
struct mypriv *priv;
- int count, found = 0;
+ int found = 0;
struct mtd_info *mtd;
u32 length = 0;
@@ -695,7 +695,7 @@ static int __init init_pmc551(void)
/*
* PCU-bus chipset probe.
*/
- for (count = 0; count < MAX_MTD_DEVICES; count++) {
+ for (;;) {
if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
PCI_DEVICE_ID_V3_SEMI_V370PDC,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index fe17054ee2fe..bcf040beb835 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -411,17 +411,6 @@ static int __init sst25l_probe(struct spi_device *spi)
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
- if (flash->mtd.numeraseregions)
- for (i = 0; i < flash->mtd.numeraseregions; i++)
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)flash->mtd.eraseregions[i].offset,
- flash->mtd.eraseregions[i].erasesize,
- flash->mtd.eraseregions[i].erasesize / 1024,
- flash->mtd.eraseregions[i].numblocks);
-
if (mtd_has_partitions()) {
struct mtd_partition *parts = NULL;
int nr_parts = 0;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index e56d6b42f020..62da9eb7032b 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1082,7 +1082,6 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
ftl_freepart((partition_t *)dev);
- kfree(dev);
}
static struct mtd_blktrans_ops ftl_tr = {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8aca5523a337..015a7fe1b6ee 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -139,7 +139,6 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
kfree(inftl->PUtable);
kfree(inftl->VUtable);
- kfree(inftl);
}
/*
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 32e82aef3e53..8f988d7d3c5c 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -100,9 +100,10 @@ static int find_boot_record(struct INFTLrecord *inftl)
}
/* To be safer with BIOS, also use erase mark as discriminant */
- if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize +
- SECTORSIZE + 8, 8, &retlen,
- (char *)&h1) < 0)) {
+ ret = inftl_read_oob(mtd,
+ block * inftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen,(char *)&h1);
+ if (ret < 0) {
printk(KERN_WARNING "INFTL: ANAND header found at "
"0x%x in mtd%d, but OOB data read failed "
"(err %d)\n", block * inftl->EraseSize,
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index a73ee12aad81..fece5be58715 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -107,8 +107,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -144,7 +143,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -159,17 +158,17 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended || chip->write_suspended) {
/* Suspend has occured while sleep: reset timeout */
@@ -230,20 +229,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -252,10 +251,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we have suspended erase on this chip.
@@ -265,10 +264,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -337,10 +336,10 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -356,12 +355,12 @@ static void put_chip(struct map_info *map, struct flchip *chip)
if (shared->writing && shared->writing != chip) {
/* give back the ownership */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -414,10 +413,10 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
wbufsize = 1 << lpddr->qinfo->BufSizeShift;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
/* Figure out the number of words to write */
@@ -478,7 +477,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -490,10 +489,10 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
struct flchip *chip = &lpddr->chips[chipnum];
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
@@ -505,7 +504,7 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
goto out;
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -518,10 +517,10 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -529,7 +528,7 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
*retlen = len;
put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -569,9 +568,9 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
else
thislen = len;
/* get the chip */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_POINT);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (ret)
break;
@@ -611,7 +610,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if (chip->ref_point_counter == 0)
@@ -621,7 +620,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
"pointed region\n", map->name);
put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -727,10 +726,10 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -750,7 +749,7 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
goto out;
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -771,10 +770,10 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -788,7 +787,7 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index c0fd99b0c525..85dd18193cf2 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -70,7 +70,7 @@ static void switch_back(struct async_state *state)
local_irq_restore(state->irq_flags);
}
-static map_word bfin_read(struct map_info *map, unsigned long ofs)
+static map_word bfin_flash_read(struct map_info *map, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t word;
@@ -86,7 +86,7 @@ static map_word bfin_read(struct map_info *map, unsigned long ofs)
return test;
}
-static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@@ -97,7 +97,7 @@ static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, s
switch_back(state);
}
-static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
+static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t d;
@@ -112,7 +112,7 @@ static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
switch_back(state);
}
-static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@@ -141,10 +141,10 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENOMEM;
state->map.name = DRIVER_NAME;
- state->map.read = bfin_read;
- state->map.copy_from = bfin_copy_from;
- state->map.write = bfin_write;
- state->map.copy_to = bfin_copy_to;
+ state->map.read = bfin_flash_read;
+ state->map.copy_from = bfin_flash_copy_from;
+ state->map.write = bfin_flash_write;
+ state->map.copy_to = bfin_flash_copy_to;
state->map.bankwidth = pdata->width;
state->map.size = memory->end - memory->start + 1;
state->map.virt = (void __iomem *)memory->start;
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index d41f34766e53..c09f4f57093e 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -253,7 +253,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
static int __init clps_setup_flash(void)
{
- int nr;
+ int nr = 0;
#ifdef CONFIG_ARCH_CEIVA
if (machine_is_ceiva()) {
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7b0515297411..7513d90fee6f 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -107,8 +107,8 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
return;
if (from & 1) {
- *dest++ = BYTE1(flash_read16(src));
- src++;
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
--len;
}
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 689d6a79ffc0..3e339dec2891 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -52,7 +52,6 @@ static const int debug = 0;
struct pcmciamtd_dev {
struct pcmcia_device *p_dev;
- dev_node_t node; /* device node */
caddr_t win_base; /* ioremapped address of PCMCIA window */
unsigned int win_size; /* size of window */
unsigned int offset; /* offset into card the window currently points at */
@@ -647,9 +646,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
pcmciamtd_release(link);
return -ENODEV;
}
- snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index);
info("mtd%d: %s", mtd->index, mtd->name);
- link->dev_node = &dev->node;
return 0;
failed:
@@ -692,8 +689,8 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
if(dev->mtd_info) {
del_mtd_device(dev->mtd_info);
+ info("mtd%d: Removing", dev->mtd_info->index);
map_destroy(dev->mtd_info);
- info("mtd%d: Removed", dev->mtd_info->index);
}
pcmciamtd_release(link);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index d9603f7f9652..426461a5f0d4 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -264,8 +264,11 @@ static int __init physmap_init(void)
err = platform_driver_register(&physmap_flash_driver);
#ifdef CONFIG_MTD_PHYSMAP_COMPAT
- if (err == 0)
- platform_device_register(&physmap_flash);
+ if (err == 0) {
+ err = platform_device_register(&physmap_flash);
+ if (err)
+ platform_driver_unregister(&physmap_flash_driver);
+ }
#endif
return err;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 101ee6ead05c..bbdd21941905 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -218,7 +218,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
dev_set_drvdata(&dev->dev, info);
- mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
+ mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
if (!mtd_list)
goto err_flash_remove;
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 60c068db452d..eb476b7f8d11 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -234,6 +234,7 @@ static int __devexit pismo_remove(struct i2c_client *client)
/* FIXME: set_vpp needs saner arguments */
pismo_setvpp_remove_fix(pismo);
+ i2c_set_clientdata(client, NULL);
kfree(pismo);
return 0;
@@ -272,7 +273,7 @@ static int __devinit pismo_probe(struct i2c_client *client,
ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
if (ret < 0) {
dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
- return ret;
+ goto exit_free;
}
dev_info(&client->dev, "%.15s board found\n", eeprom.board);
@@ -283,6 +284,11 @@ static int __devinit pismo_probe(struct i2c_client *client,
pdata->cs_addrs[i]);
return 0;
+
+ exit_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(pismo);
+ return ret;
}
static const struct i2c_device_id pismo_id[] = {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index c82e09bbc5fd..03e19c1965cc 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -14,7 +14,6 @@
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
-#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
@@ -25,12 +24,42 @@
#include "mtdcore.h"
static LIST_HEAD(blktrans_majors);
+static DEFINE_MUTEX(blktrans_ref_mutex);
+
+void blktrans_dev_release(struct kref *kref)
+{
+ struct mtd_blktrans_dev *dev =
+ container_of(kref, struct mtd_blktrans_dev, ref);
+
+ dev->disk->private_data = NULL;
+ blk_cleanup_queue(dev->rq);
+ put_disk(dev->disk);
+ list_del(&dev->list);
+ kfree(dev);
+}
+
+static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
+{
+ struct mtd_blktrans_dev *dev;
+
+ mutex_lock(&blktrans_ref_mutex);
+ dev = disk->private_data;
+
+ if (!dev)
+ goto unlock;
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&blktrans_ref_mutex);
+ return dev;
+}
+
+void blktrans_dev_put(struct mtd_blktrans_dev *dev)
+{
+ mutex_lock(&blktrans_ref_mutex);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&blktrans_ref_mutex);
+}
-struct mtd_blkcore_priv {
- struct task_struct *thread;
- struct request_queue *rq;
- spinlock_t queue_lock;
-};
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
@@ -61,7 +90,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
return -EIO;
rq_flush_dcache_pages(req);
return 0;
-
case WRITE:
if (!tr->writesect)
return -EIO;
@@ -71,7 +99,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
-
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
return -EIO;
@@ -80,14 +107,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
static int mtd_blktrans_thread(void *arg)
{
- struct mtd_blktrans_ops *tr = arg;
- struct request_queue *rq = tr->blkcore_priv->rq;
+ struct mtd_blktrans_dev *dev = arg;
+ struct request_queue *rq = dev->rq;
struct request *req = NULL;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
- struct mtd_blktrans_dev *dev;
int res;
if (!req && !(req = blk_fetch_request(rq))) {
@@ -98,13 +124,10 @@ static int mtd_blktrans_thread(void *arg)
continue;
}
- dev = req->rq_disk->private_data;
- tr = dev->tr;
-
spin_unlock_irq(rq->queue_lock);
mutex_lock(&dev->lock);
- res = do_blktrans_request(tr, dev, req);
+ res = do_blktrans_request(dev->tr, dev, req);
mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
@@ -123,81 +146,112 @@ static int mtd_blktrans_thread(void *arg)
static void mtd_blktrans_request(struct request_queue *rq)
{
- struct mtd_blktrans_ops *tr = rq->queuedata;
- wake_up_process(tr->blkcore_priv->thread);
-}
+ struct mtd_blktrans_dev *dev;
+ struct request *req = NULL;
+
+ dev = rq->queuedata;
+ if (!dev)
+ while ((req = blk_fetch_request(rq)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ else
+ wake_up_process(dev->thread);
+}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
- int ret = -ENODEV;
-
- if (!get_mtd_device(NULL, dev->mtd->index))
- goto out;
-
- if (!try_module_get(tr->owner))
- goto out_tr;
-
- /* FIXME: Locking. A hot pluggable device can go away
- (del_mtd_device can be called for it) without its module
- being unloaded. */
- dev->mtd->usecount++;
-
- ret = 0;
- if (tr->open && (ret = tr->open(dev))) {
- dev->mtd->usecount--;
- put_mtd_device(dev->mtd);
- out_tr:
- module_put(tr->owner);
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret;
+
+ if (!dev)
+ return -ERESTARTSYS;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd) {
+ ret = -ENXIO;
+ goto unlock;
}
- out:
+
+ ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
+
+ /* Take another reference on the device so it won't go away till
+ last release */
+ if (!ret)
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
- int ret = 0;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
+ int ret = -ENXIO;
- if (tr->release)
- ret = tr->release(dev);
+ if (!dev)
+ return ret;
- if (!ret) {
- dev->mtd->usecount--;
- put_mtd_device(dev->mtd);
- module_put(tr->owner);
- }
+ mutex_lock(&dev->lock);
+
+ /* Release one reference, we sure its not the last one here*/
+ kref_put(&dev->ref, blktrans_dev_release);
+ if (!dev->mtd)
+ goto unlock;
+
+ ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
return ret;
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
- if (dev->tr->getgeo)
- return dev->tr->getgeo(dev, geo);
- return -ENOTTY;
+ ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
switch (cmd) {
case BLKFLSBUF:
- if (tr->flush)
- return tr->flush(dev);
- /* The core code did the work, we had nothing to do. */
- return 0;
+ ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
default:
- return -ENOTTY;
+ ret = -ENOTTY;
}
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static const struct block_device_operations mtd_blktrans_ops = {
@@ -214,12 +268,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
struct mtd_blktrans_dev *d;
int last_devnum = -1;
struct gendisk *gd;
+ int ret;
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
+ mutex_lock(&blktrans_ref_mutex);
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
@@ -231,6 +287,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
+ mutex_unlock(&blktrans_ref_mutex);
return -EBUSY;
} else if (d->devnum > new->devnum) {
/* Required number was free */
@@ -239,24 +296,38 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
last_devnum = d->devnum;
}
+
+ ret = -EBUSY;
if (new->devnum == -1)
new->devnum = last_devnum+1;
- if ((new->devnum << tr->part_bits) > 256) {
- return -EBUSY;
+ /* Check that the device and any partitions will get valid
+ * minor numbers and that the disk naming code below can cope
+ * with this number. */
+ if (new->devnum > (MINORMASK >> tr->part_bits) ||
+ (tr->part_bits && new->devnum >= 27 * 26)) {
+ mutex_unlock(&blktrans_ref_mutex);
+ goto error1;
}
list_add_tail(&new->list, &tr->devs);
added:
+ mutex_unlock(&blktrans_ref_mutex);
+
mutex_init(&new->lock);
+ kref_init(&new->ref);
if (!tr->writesect)
new->readonly = 1;
+ /* Create gendisk */
+ ret = -ENOMEM;
gd = alloc_disk(1 << tr->part_bits);
- if (!gd) {
- list_del(&new->list);
- return -ENOMEM;
- }
+
+ if (!gd)
+ goto error2;
+
+ new->disk = gd;
+ gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
gd->fops = &mtd_blktrans_ops;
@@ -274,13 +345,35 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
- /* 2.5 has capacity in units of 512 bytes while still
- having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
set_capacity(gd, (new->size * tr->blksize) >> 9);
- gd->private_data = new;
- new->blkcore_priv = gd;
- gd->queue = tr->blkcore_priv->rq;
+ /* Create the request queue */
+ spin_lock_init(&new->queue_lock);
+ new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
+
+ if (!new->rq)
+ goto error3;
+
+ new->rq->queuedata = new;
+ blk_queue_logical_block_size(new->rq, tr->blksize);
+
+ if (tr->discard)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ new->rq);
+
+ gd->queue = new->rq;
+
+ __get_mtd_device(new->mtd);
+ __module_get(tr->owner);
+
+ /* Create processing thread */
+ /* TODO: workqueue ? */
+ new->thread = kthread_run(mtd_blktrans_thread, new,
+ "%s%d", tr->name, new->mtd->index);
+ if (IS_ERR(new->thread)) {
+ ret = PTR_ERR(new->thread);
+ goto error4;
+ }
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@@ -288,21 +381,65 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
add_disk(gd);
+ if (new->disk_attributes) {
+ ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
+ new->disk_attributes);
+ WARN_ON(ret);
+ }
return 0;
+error4:
+ module_put(tr->owner);
+ __put_mtd_device(new->mtd);
+ blk_cleanup_queue(new->rq);
+error3:
+ put_disk(new->disk);
+error2:
+ list_del(&new->list);
+error1:
+ kfree(new);
+ return ret;
}
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
+ unsigned long flags;
+
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
- list_del(&old->list);
+ /* Stop new requests to arrive */
+ del_gendisk(old->disk);
+
+ if (old->disk_attributes)
+ sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
+ old->disk_attributes);
+
+ /* Stop the thread */
+ kthread_stop(old->thread);
+
+ /* Kill current requests */
+ spin_lock_irqsave(&old->queue_lock, flags);
+ old->rq->queuedata = NULL;
+ blk_start_queue(old->rq);
+ spin_unlock_irqrestore(&old->queue_lock, flags);
+
+ /* Ask trans driver for release to the mtd device */
+ mutex_lock(&old->lock);
+ if (old->open && old->tr->release) {
+ old->tr->release(old);
+ old->open = 0;
+ }
+
+ __put_mtd_device(old->mtd);
+ module_put(old->tr->owner);
- del_gendisk(old->blkcore_priv);
- put_disk(old->blkcore_priv);
+ /* At that point, we don't touch the mtd anymore */
+ old->mtd = NULL;
+ mutex_unlock(&old->lock);
+ blktrans_dev_put(old);
return 0;
}
@@ -335,7 +472,8 @@ static struct mtd_notifier blktrans_notifier = {
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
- int ret, i;
+ struct mtd_info *mtd;
+ int ret;
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
@@ -343,9 +481,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
- tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
- if (!tr->blkcore_priv)
- return -ENOMEM;
mutex_lock(&mtd_table_mutex);
@@ -353,49 +488,20 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (ret) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
- kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return ret;
}
- spin_lock_init(&tr->blkcore_priv->queue_lock);
-
- tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
- if (!tr->blkcore_priv->rq) {
- unregister_blkdev(tr->major, tr->name);
- kfree(tr->blkcore_priv);
- mutex_unlock(&mtd_table_mutex);
- return -ENOMEM;
- }
-
- tr->blkcore_priv->rq->queuedata = tr;
- blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
- if (tr->discard)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- tr->blkcore_priv->rq);
tr->blkshift = ffs(tr->blksize) - 1;
- tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
- "%sd", tr->name);
- if (IS_ERR(tr->blkcore_priv->thread)) {
- ret = PTR_ERR(tr->blkcore_priv->thread);
- blk_cleanup_queue(tr->blkcore_priv->rq);
- unregister_blkdev(tr->major, tr->name);
- kfree(tr->blkcore_priv);
- mutex_unlock(&mtd_table_mutex);
- return ret;
- }
-
INIT_LIST_HEAD(&tr->devs);
list_add(&tr->list, &blktrans_majors);
- for (i=0; i<MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
- tr->add_mtd(tr, mtd_table[i]);
- }
+ mtd_for_each_device(mtd)
+ if (mtd->type != MTD_ABSENT)
+ tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
-
return 0;
}
@@ -405,22 +511,15 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
- /* Clean up the kernel thread */
- kthread_stop(tr->blkcore_priv->thread);
-
/* Remove it from the list of active majors */
list_del(&tr->list);
list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
- blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
-
mutex_unlock(&mtd_table_mutex);
- kfree(tr->blkcore_priv);
-
BUG_ON(!list_empty(&tr->devs));
return 0;
}
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 9f41b1a853c1..e6edbec609fd 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -19,15 +19,15 @@
#include <linux/mutex.h>
-static struct mtdblk_dev {
- struct mtd_info *mtd;
+struct mtdblk_dev {
+ struct mtd_blktrans_dev mbd;
int count;
struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
-} *mtdblks[MAX_MTD_DEVICES];
+};
static struct mutex mtdblks_lock;
@@ -98,7 +98,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
@@ -128,7 +128,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@@ -198,7 +198,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@@ -244,16 +244,16 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
return do_cached_read(mtdblk, block<<9, 512, buf);
}
static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
- mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
+ mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
if (!mtdblk->cache_data)
return -EINTR;
/* -EINTR is not really correct, but it is the best match
@@ -266,37 +266,26 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
- struct mtdblk_dev *mtdblk;
- struct mtd_info *mtd = mbd->mtd;
- int dev = mbd->devnum;
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
mutex_lock(&mtdblks_lock);
- if (mtdblks[dev]) {
- mtdblks[dev]->count++;
+ if (mtdblk->count) {
+ mtdblk->count++;
mutex_unlock(&mtdblks_lock);
return 0;
}
/* OK, it's not open. Create cache info for it */
- mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
- if (!mtdblk) {
- mutex_unlock(&mtdblks_lock);
- return -ENOMEM;
- }
-
mtdblk->count = 1;
- mtdblk->mtd = mtd;
-
mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
- if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) {
- mtdblk->cache_size = mtdblk->mtd->erasesize;
+ if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
+ mtdblk->cache_size = mbd->mtd->erasesize;
mtdblk->cache_data = NULL;
}
- mtdblks[dev] = mtdblk;
mutex_unlock(&mtdblks_lock);
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
@@ -306,8 +295,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
static int mtdblock_release(struct mtd_blktrans_dev *mbd)
{
- int dev = mbd->devnum;
- struct mtdblk_dev *mtdblk = mtdblks[dev];
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
@@ -318,12 +306,10 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
- /* It was the last usage. Free the device */
- mtdblks[dev] = NULL;
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
+ /* It was the last usage. Free the cache */
+ if (mbd->mtd->sync)
+ mbd->mtd->sync(mbd->mtd);
vfree(mtdblk->cache_data);
- kfree(mtdblk);
}
mutex_unlock(&mtdblks_lock);
@@ -335,40 +321,40 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
+ if (dev->mtd->sync)
+ dev->mtd->sync(dev->mtd);
return 0;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
- struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
- dev->mtd = mtd;
- dev->devnum = mtd->index;
+ dev->mbd.mtd = mtd;
+ dev->mbd.devnum = mtd->index;
- dev->size = mtd->size >> 9;
- dev->tr = tr;
+ dev->mbd.size = mtd->size >> 9;
+ dev->mbd.tr = tr;
if (!(mtd->flags & MTD_WRITEABLE))
- dev->readonly = 1;
+ dev->mbd.readonly = 1;
- add_mtd_blktrans_dev(dev);
+ if (add_mtd_blktrans_dev(&dev->mbd))
+ kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
- kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 852165f8b1c3..d0d3f79f9d03 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -43,13 +43,13 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->tr = tr;
dev->readonly = 1;
- add_mtd_blktrans_dev(dev);
+ if (add_mtd_blktrans_dev(dev))
+ kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
- kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 5b081cb84351..c355491d1326 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -67,9 +67,6 @@ static int mtd_open(struct inode *inode, struct file *file)
DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
- if (devnum >= MAX_MTD_DEVICES)
- return -ENODEV;
-
/* You can't open the RO devices RW */
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
@@ -373,7 +370,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
if (!mtd->write_oob)
ret = -EOPNOTSUPP;
else
- ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
if (ret)
return ret;
@@ -482,7 +479,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
uint32_t ur_idx;
struct mtd_erase_region_info *kr;
- struct region_info_user *ur = (struct region_info_user *) argp;
+ struct region_info_user __user *ur = argp;
if (get_user(ur_idx, &(ur->regionindex)))
return -EFAULT;
@@ -958,7 +955,8 @@ static int __init init_mtdchar(void)
{
int status;
- status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops);
+ status = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
+ "mtd", &mtd_fops);
if (status < 0) {
printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
MTD_CHAR_MAJOR);
@@ -969,7 +967,7 @@ static int __init init_mtdchar(void)
static void __exit cleanup_mtdchar(void)
{
- unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
module_init(init_mtdchar);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b177e750efc3..a1b8b70d2d0a 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -19,7 +19,9 @@
#include <linux/init.h>
#include <linux/mtd/compatmac.h>
#include <linux/proc_fs.h>
+#include <linux/idr.h>
#include <linux/backing-dev.h>
+#include <linux/gfp.h>
#include <linux/mtd/mtd.h>
@@ -63,13 +65,18 @@ static struct class mtd_class = {
.resume = mtd_cls_resume,
};
+static DEFINE_IDR(mtd_idr);
+
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
DEFINE_MUTEX(mtd_table_mutex);
-struct mtd_info *mtd_table[MAX_MTD_DEVICES];
-
EXPORT_SYMBOL_GPL(mtd_table_mutex);
-EXPORT_SYMBOL_GPL(mtd_table);
+
+struct mtd_info *__mtd_next_device(int i)
+{
+ return idr_get_next(&mtd_idr, &i);
+}
+EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
@@ -265,13 +272,13 @@ static struct device_type mtd_devtype = {
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or 1 on failure, which currently will only happen
- * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
- * or there's a sysfs error.
+ * if there is insufficient memory or a sysfs error.
*/
int add_mtd_device(struct mtd_info *mtd)
{
- int i;
+ struct mtd_notifier *not;
+ int i, error;
if (!mtd->backing_dev_info) {
switch (mtd->type) {
@@ -290,70 +297,73 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
- for (i=0; i < MAX_MTD_DEVICES; i++)
- if (!mtd_table[i]) {
- struct mtd_notifier *not;
-
- mtd_table[i] = mtd;
- mtd->index = i;
- mtd->usecount = 0;
-
- if (is_power_of_2(mtd->erasesize))
- mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
- else
- mtd->erasesize_shift = 0;
-
- if (is_power_of_2(mtd->writesize))
- mtd->writesize_shift = ffs(mtd->writesize) - 1;
- else
- mtd->writesize_shift = 0;
-
- mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
- mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
-
- /* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
- printk(KERN_WARNING
- "%s: unlock failed, "
- "writes may not work\n",
- mtd->name);
- }
+ do {
+ if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
+ goto fail_locked;
+ error = idr_get_new(&mtd_idr, mtd, &i);
+ } while (error == -EAGAIN);
- /* Caller should have set dev.parent to match the
- * physical device.
- */
- mtd->dev.type = &mtd_devtype;
- mtd->dev.class = &mtd_class;
- mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
- dev_set_drvdata(&mtd->dev, mtd);
- if (device_register(&mtd->dev) != 0) {
- mtd_table[i] = NULL;
- break;
- }
+ if (error)
+ goto fail_locked;
- if (MTD_DEVT(i))
- device_create(&mtd_class, mtd->dev.parent,
- MTD_DEVT(i) + 1,
- NULL, "mtd%dro", i);
-
- DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
- /* No need to get a refcount on the module containing
- the notifier, since we hold the mtd_table_mutex */
- list_for_each_entry(not, &mtd_notifiers, list)
- not->add(mtd);
-
- mutex_unlock(&mtd_table_mutex);
- /* We _know_ we aren't being removed, because
- our caller is still holding us here. So none
- of this try_ nonsense, and no bitching about it
- either. :) */
- __module_get(THIS_MODULE);
- return 0;
- }
+ mtd->index = i;
+ mtd->usecount = 0;
+
+ if (is_power_of_2(mtd->erasesize))
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ else
+ mtd->erasesize_shift = 0;
+
+ if (is_power_of_2(mtd->writesize))
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+ else
+ mtd->writesize_shift = 0;
+
+ mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+ mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+ /* Some chips always power up locked. Unlock them now */
+ if ((mtd->flags & MTD_WRITEABLE)
+ && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
+ if (mtd->unlock(mtd, 0, mtd->size))
+ printk(KERN_WARNING
+ "%s: unlock failed, writes may not work\n",
+ mtd->name);
+ }
+
+ /* Caller should have set dev.parent to match the
+ * physical device.
+ */
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ dev_set_drvdata(&mtd->dev, mtd);
+ if (device_register(&mtd->dev) != 0)
+ goto fail_added;
+
+ if (MTD_DEVT(i))
+ device_create(&mtd_class, mtd->dev.parent,
+ MTD_DEVT(i) + 1,
+ NULL, "mtd%dro", i);
+
+ DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
+ return 0;
+fail_added:
+ idr_remove(&mtd_idr, i);
+fail_locked:
mutex_unlock(&mtd_table_mutex);
return 1;
}
@@ -371,31 +381,34 @@ int add_mtd_device(struct mtd_info *mtd)
int del_mtd_device (struct mtd_info *mtd)
{
int ret;
+ struct mtd_notifier *not;
mutex_lock(&mtd_table_mutex);
- if (mtd_table[mtd->index] != mtd) {
+ if (idr_find(&mtd_idr, mtd->index) != mtd) {
ret = -ENODEV;
- } else if (mtd->usecount) {
+ goto out_error;
+ }
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->remove(mtd);
+
+ if (mtd->usecount) {
printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
mtd->index, mtd->name, mtd->usecount);
ret = -EBUSY;
} else {
- struct mtd_notifier *not;
-
device_unregister(&mtd->dev);
- /* No need to get a refcount on the module containing
- the notifier, since we hold the mtd_table_mutex */
- list_for_each_entry(not, &mtd_notifiers, list)
- not->remove(mtd);
-
- mtd_table[mtd->index] = NULL;
+ idr_remove(&mtd_idr, mtd->index);
module_put(THIS_MODULE);
ret = 0;
}
+out_error:
mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -411,7 +424,7 @@ int del_mtd_device (struct mtd_info *mtd)
void register_mtd_user (struct mtd_notifier *new)
{
- int i;
+ struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
@@ -419,9 +432,8 @@ void register_mtd_user (struct mtd_notifier *new)
__module_get(THIS_MODULE);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- new->add(mtd_table[i]);
+ mtd_for_each_device(mtd)
+ new->add(mtd);
mutex_unlock(&mtd_table_mutex);
}
@@ -438,15 +450,14 @@ void register_mtd_user (struct mtd_notifier *new)
int unregister_mtd_user (struct mtd_notifier *old)
{
- int i;
+ struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
module_put(THIS_MODULE);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- old->remove(mtd_table[i]);
+ mtd_for_each_device(mtd)
+ old->remove(mtd);
list_del(&old->list);
mutex_unlock(&mtd_table_mutex);
@@ -468,42 +479,56 @@ int unregister_mtd_user (struct mtd_notifier *old)
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
- struct mtd_info *ret = NULL;
- int i, err = -ENODEV;
+ struct mtd_info *ret = NULL, *other;
+ int err = -ENODEV;
mutex_lock(&mtd_table_mutex);
if (num == -1) {
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i] == mtd)
- ret = mtd_table[i];
- } else if (num >= 0 && num < MAX_MTD_DEVICES) {
- ret = mtd_table[num];
+ mtd_for_each_device(other) {
+ if (other == mtd) {
+ ret = mtd;
+ break;
+ }
+ }
+ } else if (num >= 0) {
+ ret = idr_find(&mtd_idr, num);
if (mtd && mtd != ret)
ret = NULL;
}
- if (!ret)
- goto out_unlock;
-
- if (!try_module_get(ret->owner))
- goto out_unlock;
-
- if (ret->get_device) {
- err = ret->get_device(ret);
- if (err)
- goto out_put;
+ if (!ret) {
+ ret = ERR_PTR(err);
+ goto out;
}
- ret->usecount++;
+ err = __get_mtd_device(ret);
+ if (err)
+ ret = ERR_PTR(err);
+out:
mutex_unlock(&mtd_table_mutex);
return ret;
+}
-out_put:
- module_put(ret->owner);
-out_unlock:
- mutex_unlock(&mtd_table_mutex);
- return ERR_PTR(err);
+
+int __get_mtd_device(struct mtd_info *mtd)
+{
+ int err;
+
+ if (!try_module_get(mtd->owner))
+ return -ENODEV;
+
+ if (mtd->get_device) {
+
+ err = mtd->get_device(mtd);
+
+ if (err) {
+ module_put(mtd->owner);
+ return err;
+ }
+ }
+ mtd->usecount++;
+ return 0;
}
/**
@@ -517,14 +542,14 @@ out_unlock:
struct mtd_info *get_mtd_device_nm(const char *name)
{
- int i, err = -ENODEV;
- struct mtd_info *mtd = NULL;
+ int err = -ENODEV;
+ struct mtd_info *mtd = NULL, *other;
mutex_lock(&mtd_table_mutex);
- for (i = 0; i < MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
- mtd = mtd_table[i];
+ mtd_for_each_device(other) {
+ if (!strcmp(name, other->name)) {
+ mtd = other;
break;
}
}
@@ -554,14 +579,19 @@ out_unlock:
void put_mtd_device(struct mtd_info *mtd)
{
- int c;
-
mutex_lock(&mtd_table_mutex);
- c = --mtd->usecount;
+ __put_mtd_device(mtd);
+ mutex_unlock(&mtd_table_mutex);
+
+}
+
+void __put_mtd_device(struct mtd_info *mtd)
+{
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+
if (mtd->put_device)
mtd->put_device(mtd);
- mutex_unlock(&mtd_table_mutex);
- BUG_ON(c < 0);
module_put(mtd->owner);
}
@@ -599,7 +629,9 @@ EXPORT_SYMBOL_GPL(add_mtd_device);
EXPORT_SYMBOL_GPL(del_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+EXPORT_SYMBOL_GPL(__get_mtd_device);
EXPORT_SYMBOL_GPL(put_mtd_device);
+EXPORT_SYMBOL_GPL(__put_mtd_device);
EXPORT_SYMBOL_GPL(register_mtd_user);
EXPORT_SYMBOL_GPL(unregister_mtd_user);
EXPORT_SYMBOL_GPL(default_mtd_writev);
@@ -611,14 +643,9 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
static struct proc_dir_entry *proc_mtd;
-static inline int mtd_proc_info (char *buf, int i)
+static inline int mtd_proc_info(char *buf, struct mtd_info *this)
{
- struct mtd_info *this = mtd_table[i];
-
- if (!this)
- return 0;
-
- return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
+ return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
(unsigned long long)this->size,
this->erasesize, this->name);
}
@@ -626,15 +653,15 @@ static inline int mtd_proc_info (char *buf, int i)
static int mtd_read_proc (char *page, char **start, off_t off, int count,
int *eof, void *data_unused)
{
- int len, l, i;
+ struct mtd_info *mtd;
+ int len, l;
off_t begin = 0;
mutex_lock(&mtd_table_mutex);
len = sprintf(page, "dev: size erasesize name\n");
- for (i=0; i< MAX_MTD_DEVICES; i++) {
-
- l = mtd_proc_info(page + len, i);
+ mtd_for_each_device(mtd) {
+ l = mtd_proc_info(page + len, mtd);
len += l;
if (len+begin > off+count)
goto done;
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index a33251f4b872..6a64fdebc898 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -8,4 +8,9 @@
should not use them for _anything_ else */
extern struct mutex mtd_table_mutex;
-extern struct mtd_info *mtd_table[MAX_MTD_DEVICES];
+extern struct mtd_info *__mtd_next_device(int i);
+
+#define mtd_for_each_device(mtd) \
+ for ((mtd) = __mtd_next_device(0); \
+ (mtd) != NULL; \
+ (mtd) = __mtd_next_device(mtd->index + 1))
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 92e12df0917f..328313c3dccb 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -429,11 +429,6 @@ static int __init mtdoops_init(void)
mtd_index = simple_strtoul(mtddev, &endp, 0);
if (*endp == '\0')
cxt->mtd_index = mtd_index;
- if (cxt->mtd_index > MAX_MTD_DEVICES) {
- printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
- mtd_index);
- return -EINVAL;
- }
cxt->oops_buf = vmalloc(record_size);
if (!cxt->oops_buf) {
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 7c003191fca4..bd9a443ccf69 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -152,18 +152,12 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
dev_name + 4);
- for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
- mtd = get_mtd_device(NULL, mtdnr);
- if (!IS_ERR(mtd)) {
- if (!strcmp(mtd->name, dev_name + 4))
- return get_sb_mtd_aux(
- fs_type, flags,
- dev_name, data, mtd,
- fill_super, mnt);
-
- put_mtd_device(mtd);
- }
- }
+ mtd = get_mtd_device_nm(dev_name + 4);
+ if (!IS_ERR(mtd))
+ return get_sb_mtd_aux(
+ fs_type, flags,
+ dev_name, data, mtd,
+ fill_super, mnt);
printk(KERN_NOTICE "MTD:"
" MTD device with name \"%s\" not found.\n",
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 42e5ea49e975..8f402d46a362 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -2,11 +2,23 @@ menuconfig MTD_NAND
tristate "NAND Device Support"
depends on MTD
select MTD_NAND_IDS
+ select MTD_NAND_ECC
help
This enables support for accessing all type of NAND flash
devices. For further information see
<http://www.linux-mtd.infradead.org/doc/nand.html>.
+config MTD_NAND_ECC
+ tristate
+
+config MTD_NAND_ECC_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
if MTD_NAND
config MTD_NAND_VERIFY_WRITE
@@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE
device thinks the write was successful, a bit could have been
flipped accidentally due to device wear or something else.
-config MTD_NAND_ECC_SMC
- bool "NAND ECC Smart Media byte order"
+config MTD_SM_COMMON
+ tristate
default n
- help
- Software ECC according to the Smart Media Specification.
- The original Linux implementation had byte 0 and 1 swapped.
config MTD_NAND_MUSEUM_IDS
bool "Enable chip ids for obsolete ancient NAND devices"
@@ -95,15 +104,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA
or in DMA interrupt mode.
Say y for DMA mode or MPU mode will be used
-config MTD_NAND_TS7250
- tristate "NAND Flash device on TS-7250 board"
- depends on MACH_TS72XX
- help
- Support for NAND flash on Technologic Systems TS-7250 platform.
-
config MTD_NAND_IDS
tristate
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
depends on SOC_AU1200 || SOC_AU1550
@@ -358,8 +373,6 @@ config MTD_NAND_ATMEL_ECC_NONE
If unsure, say N
- endchoice
-
endchoice
config MTD_NAND_PXA3xx
@@ -442,6 +455,13 @@ config MTD_NAND_FSL_UPM
Enables support for NAND Flash chips wired onto Freescale PowerPC
processor localbus with User-Programmable Machine support.
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 built-in NAND Flash Controller support"
+ depends on PPC_MPC512x
+ help
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
+
config MTD_NAND_MXC
tristate "MXC NAND support"
depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3
@@ -481,11 +501,11 @@ config MTD_NAND_SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
-config MTD_NAND_W90P910
- tristate "Support for NAND on w90p910 evaluation board."
+config MTD_NAND_NUC900
+ tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
depends on ARCH_W90X900 && MTD_PARTITIONS
help
This enables the driver for the NAND Flash on evaluation board based
- on w90p910.
+ on w90p910 / NUC9xx.
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1407bd144015..04bccf9d7b53 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -2,8 +2,10 @@
# linux/drivers/nand/Makefile
#
-obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
+obj-$(CONFIG_MTD_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
@@ -19,7 +21,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
-obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
@@ -39,8 +40,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
-obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
+obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 2d6773281fd9..8691e0482ed2 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -49,7 +49,7 @@
#define TIMEOUT HZ
-static struct usb_device_id alauda_table [] = {
+static const struct usb_device_id alauda_table[] = {
{ USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
{ USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
{ }
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 524e6c9e0672..04d30887ca7f 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto err_scan_ident;
}
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 43d46e424040..3ffe05db4923 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void)
u32 nand_phys;
/* Allocate memory for MTD device structure and private data */
- au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!au1550_mtd) {
printk("Unable to allocate NAND MTD dev structure.\n");
return -ENOMEM;
@@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void)
/* Get pointer to private data */
this = (struct nand_chip *)(&au1550_mtd[1]);
- /* Initialize structures */
- memset(au1550_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
/* Link the private data with the MTD structure */
au1550_mtd->priv = this;
au1550_mtd->owner = THIS_MODULE;
@@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void)
}
nand_phys = (mem_staddr << 4) & 0xFFFC0000;
- p_nand = (void __iomem *)ioremap(nand_phys, 0x1000);
+ p_nand = ioremap(nand_phys, 0x1000);
/* make controller and MTD agree */
if (NAND_CS == 0)
@@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void)
return 0;
outio:
- iounmap((void *)p_nand);
+ iounmap(p_nand);
outmem:
kfree(au1550_mtd);
@@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void)
kfree(au1550_mtd);
/* Unmap */
- iounmap((void *)p_nand);
+ iounmap(p_nand);
}
module_exit(au1550_cleanup);
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index c997f98eeb3d..dfe262c726fb 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -13,7 +13,6 @@
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
* layout we'll be using.
*/
- err = nand_scan_ident(board_mtd, 1);
+ err = nand_scan_ident(board_mtd, 1, NULL);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
iounmap(bcm_umi_io_base);
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 8506e7e606fd..2974995e194d 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -68,6 +68,27 @@
#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
+/* NFC_STAT Masks */
+#define NBUSY 0x01 /* Not Busy */
+#define WB_FULL 0x02 /* Write Buffer Full */
+#define PG_WR_STAT 0x04 /* Page Write Pending */
+#define PG_RD_STAT 0x08 /* Page Read Pending */
+#define WB_EMPTY 0x10 /* Write Buffer Empty */
+
+/* NFC_IRQSTAT Masks */
+#define NBUSYIRQ 0x01 /* Not Busy IRQ */
+#define WB_OVF 0x02 /* Write Buffer Overflow */
+#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
+#define RD_RDY 0x08 /* Read Data Ready */
+#define WR_DONE 0x10 /* Page Write Done */
+
+/* NFC_RST Masks */
+#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
+
+/* NFC_PGCTL Masks */
+#define PG_RD_START 0x01 /* Page Read Start */
+#define PG_WR_START 0x02 /* Page Write Start */
+
#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
static int hardware_ecc = 1;
#else
@@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
* transferred to generate the correct ECC register
* values.
*/
- bfin_write_NFC_RST(0x1);
+ bfin_write_NFC_RST(ECC_RST);
SSYNC();
disable_dma(CH_NFC);
@@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
-/* The DMAs have different size on BF52x and BF54x */
+ /* The DMAs have different size on BF52x and BF54x */
#ifdef CONFIG_BF52x
set_dma_x_count(CH_NFC, (page_size >> 1));
set_dma_x_modify(CH_NFC, 2);
@@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* Start PAGE read/write operation */
if (is_read)
- bfin_write_NFC_PGCTL(0x1);
+ bfin_write_NFC_PGCTL(PG_RD_START);
else
- bfin_write_NFC_PGCTL(0x2);
+ bfin_write_NFC_PGCTL(PG_WR_START);
wait_for_completion(&info->dma_completion);
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e5a9f9ccea60..db1dfc5a1b11 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
/* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 2)) {
+ if (nand_scan_ident(mtd, 2, NULL)) {
err = -ENXIO;
goto out_irq;
}
@@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
kfree(mtd);
}
-static struct pci_device_id cafe_nand_tbl[] = {
+static const struct pci_device_id cafe_nand_tbl[] = {
{ PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
PCI_ANY_ID, PCI_ANY_ID },
{ }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 76e2dc8e62f7..9c9d893affeb 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = ioremap(res1->start, res1->end - res1->start);
- base = ioremap(res2->start, res2->end - res2->start);
+ vaddr = ioremap(res1->start, resource_size(res1));
+ base = ioremap(res2->start, resource_size(res2));
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EINVAL;
@@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ae30fb6eed97..3f38fb8e6666 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
priv->ctrl = ctrl;
priv->dev = ctrl->dev;
- priv->vbase = ioremap(res.start, res.end - res.start + 1);
+ priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
dev_err(ctrl->dev, "failed to map chip region\n");
ret = -ENOMEM;
@@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
if (ret)
goto err;
- ret = nand_scan_ident(&priv->mtd, 1);
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 4b96296af321..2d215ccb564d 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -49,7 +49,10 @@ struct fsl_upm_nand {
uint32_t wait_flags;
};
-#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd)
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+ return container_of(mtdinfo, struct fsl_upm_nand, mtd);
+}
static int fun_chip_ready(struct mtd_info *mtd)
{
@@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
FSL_UPM_WAIT_WRITE_BYTE;
fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
- io_res.end - io_res.start + 1);
+ resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
goto err2;
@@ -350,7 +353,7 @@ static int __devexit fun_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id of_fun_match[] = {
+static const struct of_device_id of_fun_match[] = {
{ .compatible = "fsl,upm-nand" },
{},
};
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 8f902e75aa85..0cde618bcc1e 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
iounmap(gpiomtd->io_sync);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
@@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
{
void __iomem *ptr;
- if (!request_mem_region(res->start, res->end - res->start + 1, name)) {
+ if (!request_mem_region(res->start, resource_size(res), name)) {
*err = -EBUSY;
return NULL;
}
ptr = ioremap(res->start, size);
if (!ptr) {
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
*err = -ENOMEM;
}
return ptr;
@@ -338,10 +338,10 @@ err_nwp:
err_nce:
iounmap(gpiomtd->io_sync);
if (res1)
- release_mem_region(res1->start, res1->end - res1->start + 1);
+ release_mem_region(res1->start, resource_size(res1));
err_sync:
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res0->start, res0->end - res0->start + 1);
+ release_mem_region(res0->start, resource_size(res0));
err_map:
kfree(gpiomtd);
return ret;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 000000000000..3d0867d829cb
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
+
+struct mpc5121_nfc_prv {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ wait_queue_head_t irq_waitq;
+ uint column;
+ int spareonly;
+ void __iomem *csreg;
+ struct device *dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
+#endif
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ wake_up(&prv->irq_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ int rv;
+
+ if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ rv = wait_event_timeout(prv->irq_waitq,
+ (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+ if (!rv)
+ dev_warn(prv->dev,
+ "Timeout while waiting for interrupt.\n");
+ }
+
+ nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+ if (dn) {
+ prv->csreg = of_iomap(dn, 0);
+ of_node_put(dn);
+ if (!prv->csreg)
+ return -ENOMEM;
+
+ /* CPLD Register 9 controls NAND /CE Lines */
+ prv->csreg += 9;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ u8 v;
+
+ v = in_8(prv->csreg);
+ v |= 0x0F;
+
+ if (chip >= 0) {
+ mpc5121_nfc_select_chip(mtd, 0);
+ v &= ~(1 << chip);
+ } else
+ mpc5121_nfc_select_chip(mtd, -1);
+
+ out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 *buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ };
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint)len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
+}
+
+/* Compare buffer with NAND flash */
+static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ u_char tmp[256];
+ uint bsize;
+
+ while (len) {
+ bsize = min(len, 256);
+ mpc5121_nfc_read_buf(mtd, tmp, bsize);
+
+ if (memcmp(buf, tmp, bsize))
+ return 1;
+
+ buf += bsize;
+ len -= bsize;
+ }
+
+ return 0;
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct mpc512x_reset_module *rm;
+ struct device_node *rmnode;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+
+ rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+ if (!rmnode) {
+ dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+ "node in device tree!\n");
+ return -ENODEV;
+ }
+
+ rm = of_iomap(rmnode, 0);
+ if (!rm) {
+ dev_err(prv->dev, "Error mapping reset module node!\n");
+ return -EBUSY;
+ }
+
+ rcwh = in_be32(&rm->rcwhr);
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ dev_notice(prv->dev, "Configured for "
+ "%u-bit NAND, page size %u "
+ "with %u spare.\n",
+ rcw_width * 8, rcw_pagesize,
+ rcw_sparesize);
+ iounmap(rm);
+ of_node_put(rmnode);
+ return 0;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ if (prv->clk) {
+ clk_disable(prv->clk);
+ clk_put(prv->clk);
+ }
+
+ if (prv->csreg)
+ iounmap(prv->csreg);
+}
+
+static int __devinit mpc5121_nfc_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *rootnode, *dn = op->node;
+ struct device *dev = &op->dev;
+ struct mpc5121_nfc_prv *prv;
+ struct resource res;
+ struct mtd_info *mtd;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
+ struct nand_chip *chip;
+ unsigned long regs_paddr, regs_size;
+ const uint *chips_no;
+ int resettime = 0;
+ int retval = 0;
+ int rev, len;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2 and MPC5123 revision 3.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if ((rev != 2) && (rev != 3)) {
+ dev_err(dev, "SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+ if (!prv) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mtd = &prv->mtd;
+ chip = &prv->chip;
+
+ mtd->priv = chip;
+ chip->priv = prv;
+ prv->dev = dev;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ dev_err(dev, "Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->irq = irq_of_parse_and_map(dn, 0);
+ if (prv->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ chips_no = of_get_property(dn, "chips", &len);
+ if (!chips_no || len != sizeof(*chips_no)) {
+ dev_err(dev, "Invalid/missing 'chips' property!\n");
+ return -EINVAL;
+ }
+
+ regs_paddr = res.start;
+ regs_size = res.end - res.start + 1;
+
+ if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+ if (!prv->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mtd->name = "MPC5121 NAND";
+ chip->dev_ready = mpc5121_nfc_dev_ready;
+ chip->cmdfunc = mpc5121_nfc_command;
+ chip->read_byte = mpc5121_nfc_read_byte;
+ chip->read_word = mpc5121_nfc_read_word;
+ chip->read_buf = mpc5121_nfc_read_buf;
+ chip->write_buf = mpc5121_nfc_write_buf;
+ chip->verify_buf = mpc5121_nfc_verify_buf;
+ chip->select_chip = mpc5121_nfc_select_chip;
+ chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Support external chip-select logic on ADS5121 board */
+ rootnode = of_find_node_by_path("/");
+ if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+ retval = ads5121_chipselect_init(mtd);
+ if (retval) {
+ dev_err(dev, "Chipselect init error!\n");
+ of_node_put(rootnode);
+ return retval;
+ }
+
+ chip->select_chip = ads5121_select_chip;
+ }
+ of_node_put(rootnode);
+
+ /* Enable NFC clock */
+ prv->clk = clk_get(dev, "nfc_clk");
+ if (!prv->clk) {
+ dev_err(dev, "Unable to acquire NFC clock!\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ clk_enable(prv->clk);
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ dev_err(dev, "Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ init_waitqueue_head(&prv->irq_waitq);
+ retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+ mtd);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ /* Detect NAND chips */
+ if (nand_scan(mtd, *chips_no)) {
+ dev_err(dev, "NAND Flash not found !\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported NAND flash!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ dev_set_drvdata(dev, mtd);
+
+ /* Register device in MTD */
+#ifdef CONFIG_MTD_PARTITIONS
+ retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
+#ifdef CONFIG_MTD_OF_PARTS
+ if (retval == 0)
+ retval = of_mtd_parse_partitions(dev, dn, &parts);
+#endif
+ if (retval < 0) {
+ dev_err(dev, "Error parsing MTD partitions!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (retval > 0)
+ retval = add_mtd_partitions(mtd, parts, retval);
+ else
+#endif
+ retval = add_mtd_device(mtd);
+
+ if (retval) {
+ dev_err(dev, "Error adding MTD device!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ goto error;
+ }
+
+ return 0;
+error:
+ mpc5121_nfc_free(dev, mtd);
+ return retval;
+}
+
+static int __devexit mpc5121_nfc_remove(struct of_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nand_release(mtd);
+ devm_free_irq(dev, prv->irq, mtd);
+ mpc5121_nfc_free(dev, mtd);
+
+ return 0;
+}
+
+static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+ { .compatible = "fsl,mpc5121-nfc", },
+ {},
+};
+
+static struct of_platform_driver mpc5121_nfc_driver = {
+ .match_table = mpc5121_nfc_match,
+ .probe = mpc5121_nfc_probe,
+ .remove = __devexit_p(mpc5121_nfc_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc5121_nfc_init(void)
+{
+ return of_register_platform_driver(&mpc5121_nfc_driver);
+}
+
+module_init(mpc5121_nfc_init);
+
+static void __exit mpc5121_nfc_cleanup(void)
+{
+ of_unregister_platform_driver(&mpc5121_nfc_driver);
+}
+
+module_exit(mpc5121_nfc_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b2900d8406d3..35da3dc4bd17 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -38,7 +38,7 @@
#define DRIVER_NAME "mxc_nand"
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
-#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27())
+#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
/* Addresses for NFC registers */
#define NFC_BUF_SIZE 0xE00
@@ -168,11 +168,7 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
struct mxc_nand_host *host = dev_id;
- uint16_t tmp;
-
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_INT_MSK; /* Disable interrupt */
- writew(tmp, host->regs + NFC_CONFIG1);
+ disable_irq_nosync(irq);
wake_up(&host->irq_waitq);
@@ -184,15 +180,13 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
*/
static void wait_op_done(struct mxc_nand_host *host, int useirq)
{
- uint32_t tmp;
- int max_retries = 2000;
+ uint16_t tmp;
+ int max_retries = 8000;
if (useirq) {
if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_INT_MSK; /* Enable interrupt */
- writew(tmp, host->regs + NFC_CONFIG1);
+ enable_irq(host->irq);
wait_event(host->irq_waitq,
readw(host->regs + NFC_CONFIG2) & NFC_INT);
@@ -226,8 +220,23 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
writew(cmd, host->regs + NFC_FLASH_CMD);
writew(NFC_CMD, host->regs + NFC_CONFIG2);
- /* Wait for operation to complete */
- wait_op_done(host, useirq);
+ if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+ int max_retries = 100;
+ /* Reset completion is indicated by NFC_CONFIG2 */
+ /* being set to 0 */
+ while (max_retries-- > 0) {
+ if (readw(host->regs + NFC_CONFIG2) == 0) {
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0)
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
+ __func__);
+ } else {
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+ }
}
/* This function sends an address (or partial address) to the
@@ -542,6 +551,41 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
}
}
+static void preset(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t tmp;
+
+ /* enable interrupt, disable spare enable */
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_INT_MSK;
+ tmp &= ~NFC_SP_EN;
+ if (nand_chip->ecc.mode == NAND_ECC_HW) {
+ tmp |= NFC_ECC_EN;
+ } else {
+ tmp &= ~NFC_ECC_EN;
+ }
+ writew(tmp, host->regs + NFC_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, host->regs + NFC_CONFIG);
+
+ /* Blocks to be unlocked */
+ if (nfc_is_v21()) {
+ writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
+ writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
+ } else if (nfc_is_v1()) {
+ writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
+ } else
+ BUG();
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, host->regs + NFC_WRPROT);
+}
+
/* Used by the upper layer to write command to NAND Flash for
* different operations to be carried out on NAND Flash */
static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
@@ -559,6 +603,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
+ case NAND_CMD_RESET:
+ send_cmd(host, command, false);
+ preset(mtd);
+ break;
case NAND_CMD_STATUS:
host->buf_start = 0;
@@ -638,6 +686,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
+ case NAND_CMD_RESET:
send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
@@ -679,7 +728,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
- uint16_t tmp;
int err = 0, nr_parts = 0;
struct nand_ecclayout *oob_smallpage, *oob_largepage;
@@ -743,51 +791,17 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->spare_len = 64;
oob_smallpage = &nandv2_hw_eccoob_smallpage;
oob_largepage = &nandv2_hw_eccoob_largepage;
+ this->ecc.bytes = 9;
} else if (nfc_is_v1()) {
host->regs = host->base;
host->spare0 = host->base + 0x800;
host->spare_len = 16;
oob_smallpage = &nandv1_hw_eccoob_smallpage;
oob_largepage = &nandv1_hw_eccoob_largepage;
- } else
- BUG();
-
- /* disable interrupt and spare enable */
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_INT_MSK;
- tmp &= ~NFC_SP_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
-
- init_waitqueue_head(&host->irq_waitq);
-
- host->irq = platform_get_irq(pdev, 0);
-
- err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
- if (err)
- goto eirq;
-
- /* Reset NAND */
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- /* preset operation */
- /* Unlock the internal RAM Buffer */
- writew(0x2, host->regs + NFC_CONFIG);
-
- /* Blocks to be unlocked */
- if (nfc_is_v21()) {
- writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
- writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
- this->ecc.bytes = 9;
- } else if (nfc_is_v1()) {
- writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
this->ecc.bytes = 3;
} else
BUG();
- /* Unlock Block Command for given address range */
- writew(0x4, host->regs + NFC_WRPROT);
-
this->ecc.size = 512;
this->ecc.layout = oob_smallpage;
@@ -796,14 +810,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.hwctl = mxc_nand_enable_hwecc;
this->ecc.correct = mxc_nand_correct_data;
this->ecc.mode = NAND_ECC_HW;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
} else {
this->ecc.mode = NAND_ECC_SOFT;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
}
/* NAND bus width determines access funtions used by upper layer */
@@ -817,8 +825,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->options |= NAND_USE_FLASH_BBT;
}
+ init_waitqueue_head(&host->irq_waitq);
+
+ host->irq = platform_get_irq(pdev, 0);
+
+ err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+ if (err)
+ goto eirq;
+
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -886,11 +902,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
- if (mtd) {
- ret = mtd->suspend(mtd);
- /* Disable the NFC clock */
- clk_disable(host->clk);
- }
+
+ ret = mtd->suspend(mtd);
+
+ /*
+ * nand_suspend locks the device for exclusive access, so
+ * the clock must already be off.
+ */
+ BUG_ON(!ret && host->clk_act);
return ret;
}
@@ -904,11 +923,7 @@ static int mxcnd_resume(struct platform_device *pdev)
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
- if (mtd) {
- /* Enable the NFC clock */
- clk_enable(host->clk);
- mtd->resume(mtd);
- }
+ mtd->resume(mtd);
return ret;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8f2958fe2148..b9dc65c7253c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
*/
DEFINE_LED_TRIGGER(nand_led_trigger);
+static int check_offs_len(struct mtd_info *mtd,
+ loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1 << chip->phys_erase_shift) - 1)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ /* Do not allow past end of device */
+ if (ofs + len > mtd->size) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
@@ -335,14 +364,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
bad = cpu_to_le16(chip->read_word(mtd));
if (chip->badblockpos & 0x1)
bad >>= 8;
- if ((bad & 0xFF) != 0xff)
- res = 1;
+ else
+ bad &= 0xFF;
} else {
chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
- if (chip->read_byte(mtd) != 0xff)
- res = 1;
+ bad = chip->read_byte(mtd);
}
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+
if (getchip)
nand_release_device(mtd);
@@ -401,6 +434,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
+
+ /* broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
/* Check the WP bit */
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
@@ -744,9 +782,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
chip->state = FL_PM_SUSPENDED;
spin_unlock(lock);
return 0;
- } else {
- spin_unlock(lock);
- return -EAGAIN;
}
}
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -835,6 +870,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
}
/**
+ * __nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ * @invert - when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * whne = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
+ *
+ * @return - unlock status
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len, int invert)
+{
+ int ret = 0;
+ int status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ /* Submit address of first page to unlock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* Submit address of last page to unlock */
+ page = (ofs + len) >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+ (page | invert) & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ udelay(1000);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ *
+ * @return - unlock status
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr;
+ struct nand_chip *chip = mtd->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ /* Align to last block address if size addresses end of the device */
+ if (ofs + len == mtd->size)
+ len -= mtd->erasesize;
+
+ nand_get_device(chip, mtd, FL_UNLOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * nand_lock - [REPLACABLE] locks all blockes present in the device
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ *
+ * @return - lock status
+ *
+ * This feature is not support in many NAND parts. 'Micron' NAND parts
+ * do have this feature, but it allows only to lock all blocks not for
+ * specified range for block.
+ *
+ * Implementing 'lock' feature by making use of 'unlock', for now.
+ */
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr, status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ nand_get_device(chip, mtd, FL_LOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
+ status = MTD_ERASE_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Submit address of first page to lock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ udelay(1000);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
* nand_read_page_raw - [Intern] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1232,6 +1429,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *bufpoi, *oob, *buf;
stats = mtd->ecc_stats;
@@ -1282,18 +1482,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf += bytes;
if (unlikely(oob)) {
- /* Raw mode does data:oob:data:oob */
- if (ops->mode != MTD_OOB_RAW) {
- int toread = min(oobreadlen,
- chip->ecc.layout->oobavail);
- if (toread) {
- oob = nand_transfer_oob(chip,
- oob, ops, toread);
- oobreadlen -= toread;
- }
- } else
- buf = nand_transfer_oob(chip,
- buf, ops, mtd->oobsize);
+
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
}
if (!(chip->options & NAND_NO_READRDY)) {
@@ -1880,11 +2076,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* @oob: oob data buffer
* @ops: oob ops structure
*/
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
- struct mtd_oob_ops *ops)
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
{
- size_t len = ops->ooblen;
-
switch(ops->mode) {
case MTD_OOB_PLACE:
@@ -1939,6 +2133,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int chipnr, realpage, page, blockmask, column;
struct nand_chip *chip = mtd->priv;
uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret, subpage;
@@ -1980,6 +2179,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (likely(!oob))
memset(chip->oob_poi, 0xff, mtd->oobsize);
+ /* Don't allow multipage oob writes with offset */
+ if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
+ return -EINVAL;
+
while(1) {
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
@@ -1995,8 +2198,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
wbuf = chip->buffers->databuf;
}
- if (unlikely(oob))
- oob = nand_fill_oob(chip, oob, ops);
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(chip, oob, len, ops);
+ oobwritelen -= len;
+ }
ret = chip->write_page(mtd, chip, wbuf, page, cached,
(ops->mode == MTD_OOB_RAW));
@@ -2170,7 +2376,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->pagebuf = -1;
memset(chip->oob_poi, 0xff, mtd->oobsize);
- nand_fill_oob(chip, ops->oobbuf, ops);
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
memset(chip->oob_poi, 0xff, mtd->oobsize);
@@ -2293,25 +2499,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
- /* Start address must align on block boundary */
- if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
- }
-
- /* Length must align on block boundary */
- if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow erase past end of device */
- if ((instr->len + instr->addr) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n",
- __func__);
- return -EINVAL;
- }
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
@@ -2582,10 +2771,10 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
- int busw, int *maf_id)
+ int busw, int *maf_id,
+ struct nand_flash_dev *type)
{
- struct nand_flash_dev *type = NULL;
- int i, dev_id, maf_idx;
+ int dev_id, maf_idx;
int tmp_id, tmp_manf;
/* Select the device */
@@ -2624,15 +2813,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
return ERR_PTR(-ENODEV);
}
- /* Lookup the flash id */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (dev_id == nand_flash_ids[i].id) {
- type = &nand_flash_ids[i];
- break;
- }
- }
-
if (!type)
+ type = nand_flash_ids;
+
+ for (; type->name != NULL; type++)
+ if (dev_id == type->id)
+ break;
+
+ if (!type->name)
return ERR_PTR(-ENODEV);
if (!mtd->name)
@@ -2704,6 +2892,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Set the bad block position */
chip->badblockpos = mtd->writesize > 512 ?
NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+ chip->badblockbits = 8;
/* Get chip options, preserve non chip based options */
chip->options &= ~NAND_CHIPOPTIONS_MSK;
@@ -2741,13 +2930,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
* nand_scan_ident - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: Number of chips to scan for
+ * @table: Alternative NAND ID table
*
* This is the first phase of the normal nand_scan() function. It
* reads the flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
-int nand_scan_ident(struct mtd_info *mtd, int maxchips)
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
{
int i, busw, nand_maf_id;
struct nand_chip *chip = mtd->priv;
@@ -2759,7 +2950,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
nand_set_defaults(chip, busw);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
+ type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -2989,7 +3180,8 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Fill in remaining MTD driver data */
mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+ MTD_CAP_NANDFLASH;
mtd->erase = nand_erase;
mtd->point = NULL;
mtd->unpoint = NULL;
@@ -3050,7 +3242,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
BUG();
}
- ret = nand_scan_ident(mtd, maxchips);
+ ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
return ret;
@@ -3077,6 +3269,8 @@ void nand_release(struct mtd_info *mtd)
kfree(chip->buffers);
}
+EXPORT_SYMBOL_GPL(nand_lock);
+EXPORT_SYMBOL_GPL(nand_unlock);
EXPORT_SYMBOL_GPL(nand_scan);
EXPORT_SYMBOL_GPL(nand_scan_ident);
EXPORT_SYMBOL_GPL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 55c23e5cd210..387c45c366fe 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
+ int res;
ops.mode = MTD_OOB_RAW;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
- ops.oobbuf = buf;
- ops.datbuf = buf;
- ops.len = len;
- return mtd->read_oob(mtd, offs, &ops);
+
+ while (len > 0) {
+ if (len <= mtd->writesize) {
+ ops.oobbuf = buf + len;
+ ops.datbuf = buf;
+ ops.len = len;
+ return mtd->read_oob(mtd, offs, &ops);
+ } else {
+ ops.oobbuf = buf + mtd->writesize;
+ ops.datbuf = buf;
+ ops.len = mtd->writesize;
+ res = mtd->read_oob(mtd, offs, &ops);
+
+ if (res)
+ return res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ }
+ return 0;
}
/*
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
index 7cec2cd97854..198b304d6f72 100644
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
int numToRead = 16; /* There are 16 bytes per sector in the OOB */
/* ECC is already paused when this function is called */
+ if (pageSize != NAND_DATA_ACCESS_SIZE) {
+ /* skip BI */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
- if (pageSize == NAND_DATA_ACCESS_SIZE) {
- while (numToRead > numEccBytes) {
- /* skip free oob region */
+ while (numToRead > numEccBytes) {
+ /* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp++ = REG_NAND_DATA8;
#else
- REG_NAND_DATA8;
+ REG_NAND_DATA8;
#endif
- numToRead--;
- }
+ numToRead--;
+ }
+ if (pageSize == NAND_DATA_ACCESS_SIZE) {
/* read ECC bytes before BI */
nand_bcm_umi_bch_resume_read_ecc_calc();
@@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
+ numToRead--;
}
nand_bcm_umi_bch_pause_read_ecc_calc();
@@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
numToRead--;
}
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
- } else {
- /* skip BI */
+ }
+ /* read ECC bytes */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+ while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
#else
- REG_NAND_DATA8;
+ eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
-
- while (numToRead > numEccBytes) {
- /* skip free oob region */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
-#else
- REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
}
}
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 7281000fef2d..261337efe0ee 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -80,6 +80,9 @@
#ifndef CONFIG_NANDSIM_DBG
#define CONFIG_NANDSIM_DBG 0
#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS 32
+#endif
static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
@@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
static uint log = CONFIG_NANDSIM_LOG;
static uint dbg = CONFIG_NANDSIM_DBG;
-static unsigned long parts[MAX_MTD_DEVICES];
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
static unsigned int parts_num;
static char *badblocks = NULL;
static char *weakblocks = NULL;
@@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
-MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
-MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
MODULE_PARM_DESC(log, "Perform logging if not zero");
@@ -288,7 +291,7 @@ union ns_mem {
* The structure which describes all the internal simulator data.
*/
struct nandsim {
- struct mtd_partition partitions[MAX_MTD_DEVICES];
+ struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
@@ -312,7 +315,7 @@ struct nandsim {
union ns_mem buf;
/* NAND flash "geometry" */
- struct nandsin_geometry {
+ struct {
uint64_t totsz; /* total flash size, bytes */
uint32_t secsz; /* flash sector (erase block) size, bytes */
uint pgsz; /* NAND flash page size, bytes */
@@ -331,7 +334,7 @@ struct nandsim {
} geom;
/* NAND flash internal registers */
- struct nandsim_regs {
+ struct {
unsigned command; /* the command register */
u_char status; /* the status register */
uint row; /* the page number */
@@ -342,7 +345,7 @@ struct nandsim {
} regs;
/* NAND flash lines state */
- struct ns_lines_status {
+ struct {
int ce; /* chip Enable */
int cle; /* command Latch Enable */
int ale; /* address Latch Enable */
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 1f6f741af5da..8c0b69375224 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev)
ret = -EIO;
goto err_unmap;
}
- host->addr_va = ioremap(res->start, res->end - res->start + 1);
+ host->addr_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->data_va = ioremap(res->start, res->end - res->start + 1);
+ host->data_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->cmd_va = ioremap(res->start, res->end - res->start + 1);
+ host->cmd_va = ioremap(res->start, resource_size(res));
if (!host->addr_va || !host->data_va || !host->cmd_va) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7680e731348a..6eddf7361ed7 100644
--- a/drivers/mtd/nand/w90p910_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009 Nuvoton technology corporation.
+ * Copyright © 2009 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
@@ -55,7 +55,7 @@
#define write_addr_reg(dev, val) \
__raw_writel((val), (dev)->reg + REG_SMADDR)
-struct w90p910_nand {
+struct nuc900_nand {
struct mtd_info mtd;
struct nand_chip chip;
void __iomem *reg;
@@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = {
}
};
-static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd)
+static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
{
unsigned char ret;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
ret = (unsigned char)read_data_reg(nand);
return ret;
}
-static void w90p910_nand_read_buf(struct mtd_info *mtd,
- unsigned char *buf, int len)
+static void nuc900_nand_read_buf(struct mtd_info *mtd,
+ unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
buf[i] = (unsigned char)read_data_reg(nand);
}
-static void w90p910_nand_write_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static void nuc900_nand_write_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
write_data_reg(nand, buf[i]);
}
-static int w90p910_verify_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static int nuc900_verify_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++) {
if (buf[i] != (unsigned char)read_data_reg(nand))
@@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd,
return 0;
}
-static int w90p910_check_rb(struct w90p910_nand *nand)
+static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand)
return val;
}
-static int w90p910_nand_devready(struct mtd_info *mtd)
+static int nuc900_nand_devready(struct mtd_info *mtd)
{
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
int ready;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
- ready = (w90p910_check_rb(nand)) ? 1 : 0;
+ ready = (nuc900_check_rb(nand)) ? 1 : 0;
return ready;
}
-static void w90p910_nand_command_lp(struct mtd_info *mtd,
- unsigned int command, int column, int page_addr)
+static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
{
register struct nand_chip *chip = mtd->priv;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
@@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
write_cmd_reg(nand, NAND_CMD_STATUS);
write_cmd_reg(nand, command);
- while (!w90p910_check_rb(nand))
+ while (!nuc900_check_rb(nand))
;
return;
@@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
}
-static void w90p910_nand_enable(struct w90p910_nand *nand)
+static void nuc900_nand_enable(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit w90p910_nand_probe(struct platform_device *pdev)
+static int __devinit nuc900_nand_probe(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand;
+ struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
int retval;
struct resource *res;
retval = 0;
- w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL);
- if (!w90p910_nand)
+ nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ if (!nuc900_nand)
return -ENOMEM;
- chip = &(w90p910_nand->chip);
+ chip = &(nuc900_nand->chip);
- w90p910_nand->mtd.priv = chip;
- w90p910_nand->mtd.owner = THIS_MODULE;
- spin_lock_init(&w90p910_nand->lock);
+ nuc900_nand->mtd.priv = chip;
+ nuc900_nand->mtd.owner = THIS_MODULE;
+ spin_lock_init(&nuc900_nand->lock);
- w90p910_nand->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(w90p910_nand->clk)) {
+ nuc900_nand->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk)) {
retval = -ENOENT;
goto fail1;
}
- clk_enable(w90p910_nand->clk);
-
- chip->cmdfunc = w90p910_nand_command_lp;
- chip->dev_ready = w90p910_nand_devready;
- chip->read_byte = w90p910_nand_read_byte;
- chip->write_buf = w90p910_nand_write_buf;
- chip->read_buf = w90p910_nand_read_buf;
- chip->verify_buf = w90p910_verify_buf;
+ clk_enable(nuc900_nand->clk);
+
+ chip->cmdfunc = nuc900_nand_command_lp;
+ chip->dev_ready = nuc900_nand_devready;
+ chip->read_byte = nuc900_nand_read_byte;
+ chip->write_buf = nuc900_nand_write_buf;
+ chip->read_buf = nuc900_nand_read_buf;
+ chip->verify_buf = nuc900_verify_buf;
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
@@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev)
goto fail1;
}
- w90p910_nand->reg = ioremap(res->start, resource_size(res));
- if (!w90p910_nand->reg) {
+ nuc900_nand->reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_nand->reg) {
retval = -ENOMEM;
goto fail2;
}
- w90p910_nand_enable(w90p910_nand);
+ nuc900_nand_enable(nuc900_nand);
- if (nand_scan(&(w90p910_nand->mtd), 1)) {
+ if (nand_scan(&(nuc900_nand->mtd), 1)) {
retval = -ENXIO;
goto fail3;
}
- add_mtd_partitions(&(w90p910_nand->mtd), partitions,
+ add_mtd_partitions(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
- platform_set_drvdata(pdev, w90p910_nand);
+ platform_set_drvdata(pdev, nuc900_nand);
return retval;
-fail3: iounmap(w90p910_nand->reg);
+fail3: iounmap(nuc900_nand->reg);
fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(w90p910_nand);
+fail1: kfree(nuc900_nand);
return retval;
}
-static int __devexit w90p910_nand_remove(struct platform_device *pdev)
+static int __devexit nuc900_nand_remove(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev);
+ struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
- iounmap(w90p910_nand->reg);
+ iounmap(nuc900_nand->reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- clk_disable(w90p910_nand->clk);
- clk_put(w90p910_nand->clk);
+ clk_disable(nuc900_nand->clk);
+ clk_put(nuc900_nand->clk);
- kfree(w90p910_nand);
+ kfree(nuc900_nand);
platform_set_drvdata(pdev, NULL);
return 0;
}
-static struct platform_driver w90p910_nand_driver = {
- .probe = w90p910_nand_probe,
- .remove = __devexit_p(w90p910_nand_remove),
+static struct platform_driver nuc900_nand_driver = {
+ .probe = nuc900_nand_probe,
+ .remove = __devexit_p(nuc900_nand_remove),
.driver = {
- .name = "w90p910-fmi",
+ .name = "nuc900-fmi",
.owner = THIS_MODULE,
},
};
-static int __init w90p910_nand_init(void)
+static int __init nuc900_nand_init(void)
{
- return platform_driver_register(&w90p910_nand_driver);
+ return platform_driver_register(&nuc900_nand_driver);
}
-static void __exit w90p910_nand_exit(void)
+static void __exit nuc900_nand_exit(void)
{
- platform_driver_unregister(&w90p910_nand_driver);
+ platform_driver_unregister(&nuc900_nand_driver);
}
-module_init(w90p910_nand_init);
-module_exit(w90p910_nand_exit);
+module_init(nuc900_nand_init);
+module_exit(nuc900_nand_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910 nand driver!");
+MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:w90p910-fmi");
+MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 7545568fce47..ee87325c7712 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
u32 *p = (u32 *)buf;
/* take care of subpage reads */
- for (; len % 4 != 0; ) {
- *buf++ = __raw_readb(info->nand.IO_ADDR_R);
- len--;
+ if (len % 4) {
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len % 4);
+ else
+ omap_read_buf8(mtd, buf, len % 4);
+ p = (u32 *) (buf + len % 4);
+ len -= len % 4;
}
- p = (u32 *) buf;
/* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
@@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_write_buf_pref(mtd, buf, len);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, buf, len, 0x1);
+ omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
/**
@@ -1028,7 +1031,8 @@ out_free_info:
static int omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct omap_nand_info *info = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
platform_set_drvdata(pdev, NULL);
if (use_dma)
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index d60fc5719fef..f4444fe960a1 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -80,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct nand_chip *nc;
struct orion_nand_data *board;
+ struct resource *res;
void __iomem *io_base;
int ret = 0;
#ifdef CONFIG_MTD_PARTITIONS
@@ -95,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
mtd = (struct mtd_info *)(nc + 1);
- io_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto no_res;
+ }
+
+ io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
printk(KERN_ERR "orion_nand: ioremap failed\n");
ret = -EIO;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a8b9376cf324..090a05c12cbe 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id pasemi_nand_match[] =
+static const struct of_device_id pasemi_nand_match[] =
{
{
.compatible = "pasemi,localbus-nand",
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5d55152162cf..e02fa4f0e3c9 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
goto fail_free_irq;
}
+ if (mtd_has_cmdlinepart()) {
+ static const char *probes[] = { "cmdlinepart", NULL };
+ struct mtd_partition *parts;
+ int nr_parts;
+
+ nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
+
+ if (nr_parts)
+ return add_mtd_partitions(mtd, parts, nr_parts);
+ }
+
return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
fail_free_irq:
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
new file mode 100644
index 000000000000..dd5158cc2256
--- /dev/null
+++ b/drivers/mtd/nand/r852.c
@@ -0,0 +1,1140 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static int r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+ uint8_t reg = readb(dev->mmio + address);
+ return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+ int address, uint8_t value)
+{
+ writeb(value, dev->mmio + address);
+ mmiowb();
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+ uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+ return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+ int address, uint32_t value)
+{
+ writel(cpu_to_le32(value), dev->mmio + address);
+ mmiowb();
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ return (struct r852_device *)chip->priv;
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+ dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+ (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+ if (!dev->dma_usable)
+ message("Non dma capable device detected, dma disabled");
+
+ if (!r852_enable_dma) {
+ message("disabling dma on user request");
+ dev->dma_usable = 0;
+ }
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+ uint8_t dma_reg, dma_irq_reg;
+
+ /* Set up dma settings */
+ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+ dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+ if (dev->dma_dir)
+ dma_reg |= R852_DMA_READ;
+
+ if (dev->dma_state == DMA_INTERNAL) {
+ dma_reg |= R852_DMA_INTERNAL;
+ /* Precaution to make sure HW doesn't write */
+ /* to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ } else {
+ dma_reg |= R852_DMA_MEMORY;
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_dma_addr));
+ }
+
+ /* Precaution: make sure write reached the device */
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+ /* Set dma irq */
+ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ dma_irq_reg |
+ R852_DMA_IRQ_INTERNAL |
+ R852_DMA_IRQ_ERROR |
+ R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+ WARN_ON(dev->dma_stage == 0);
+
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+ r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+ /* Precaution to make sure HW doesn't write to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ dev->dma_error = error;
+ dev->dma_stage = 0;
+
+ if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+ pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
+ dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ complete(&dev->dma_done);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+ long timeout = wait_for_completion_timeout(&dev->dma_done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ dbg("timeout waiting for DMA interrupt");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+ int bounce = 0;
+ unsigned long flags;
+ int error;
+
+ dev->dma_error = 0;
+
+ /* Set dma direction */
+ dev->dma_dir = do_read;
+ dev->dma_stage = 1;
+
+ dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+ /* Set intial dma state: for reading first fill on board buffer,
+ from device, for writes first fill the buffer from memory*/
+ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+ /* if incoming buffer is not page aligned, we should do bounce */
+ if ((unsigned long)buf & (R852_DMA_LEN-1))
+ bounce = 1;
+
+ if (!bounce) {
+ dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+ R852_DMA_LEN,
+ (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+ bounce = 1;
+ }
+
+ if (bounce) {
+ dbg_verbose("dma: using bounce buffer");
+ dev->phys_dma_addr = dev->phys_bounce_buffer;
+ if (!do_read)
+ memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+ }
+
+ /* Enable DMA */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ r852_dma_enable(dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* Wait till complete */
+ error = r852_dma_wait(dev);
+
+ if (error) {
+ r852_dma_done(dev, error);
+ return;
+ }
+
+ if (do_read && bounce)
+ memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ /* Don't allow any access to hardware if we suspect card removal */
+ if (dev->card_unstable)
+ return;
+
+ /* Special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, (uint8_t *)buf, 0);
+ return;
+ }
+
+ /* write DWORD chinks - faster */
+ while (len) {
+ reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+ r852_write_reg_dword(dev, R852_DATALINE, reg);
+ buf += 4;
+ len -= 4;
+
+ }
+
+ /* write rest */
+ while (len)
+ r852_write_reg(dev, R852_DATALINE, *buf++);
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ if (dev->card_unstable) {
+ /* since we can't signal error here, at least, return
+ predictable buffer */
+ memset(buf, 0, len);
+ return;
+ }
+
+ /* special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, buf, 1);
+ return;
+ }
+
+ /* read in dword sized chunks */
+ while (len >= 4) {
+
+ reg = r852_read_reg_dword(dev, R852_DATALINE);
+ *buf++ = reg & 0xFF;
+ *buf++ = (reg >> 8) & 0xFF;
+ *buf++ = (reg >> 16) & 0xFF;
+ *buf++ = (reg >> 24) & 0xFF;
+ len -= 4;
+ }
+
+ /* read the reset by bytes */
+ while (len--)
+ *buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* Same problem as in r852_read_buf.... */
+ if (dev->card_unstable)
+ return 0;
+
+ return r852_read_reg(dev, R852_DATALINE);
+}
+
+
+/*
+ * Readback the buffer to verify it
+ */
+int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* We can't be sure about anything here... */
+ if (dev->card_unstable)
+ return -1;
+
+ /* This will never happen, unless you wired up a nand chip
+ with > 512 bytes page size to the reader */
+ if (len > SM_SECTOR_SIZE)
+ return 0;
+
+ r852_read_buf(mtd, dev->tmp_buffer, len);
+ return memcmp(buf, dev->tmp_buffer, len);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+
+ dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+ R852_CTL_ON | R852_CTL_CARDENABLE);
+
+ if (ctrl & NAND_ALE)
+ dev->ctlreg |= R852_CTL_DATA;
+
+ if (ctrl & NAND_CLE)
+ dev->ctlreg |= R852_CTL_COMMAND;
+
+ if (ctrl & NAND_NCE)
+ dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+ else
+ dev->ctlreg &= ~R852_CTL_WRITE;
+
+ /* when write is stareted, enable write access */
+ if (dat == NAND_CMD_ERASE1)
+ dev->ctlreg |= R852_CTL_WRITE;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+ to set write mode */
+ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+ dev->ctlreg |= R852_CTL_WRITE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ if (dat != NAND_CMD_NONE)
+ r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct r852_device *dev = (struct r852_device *)chip->priv;
+
+ unsigned long timeout;
+ int status;
+
+ timeout = jiffies + (chip->state == FL_ERASING ?
+ msecs_to_jiffies(400) : msecs_to_jiffies(20));
+
+ while (time_before(jiffies, timeout))
+ if (chip->dev_ready(mtd))
+ break;
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ status = (int)chip->read_byte(mtd);
+
+ /* Unfortunelly, no way to send detailed error status... */
+ if (dev->dma_error) {
+ status |= NAND_STATUS_FAIL;
+ dev->dma_error = 0;
+ }
+ return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+int r852_ready(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* enable ecc generation/check*/
+ dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+ /* flush ecc buffer */
+ r852_write_reg(dev, R852_CTL,
+ dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return;
+
+ case NAND_ECC_READSYN:
+ /* disable ecc generation */
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ struct sm_oob *oob = (struct sm_oob *)ecc_code;
+ uint32_t ecc1, ecc2;
+
+ if (dev->card_unstable)
+ return 0;
+
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+ ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+ oob->ecc1[0] = (ecc1) & 0xFF;
+ oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+ oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+ oob->ecc2[0] = (ecc2) & 0xFF;
+ oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+ oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint16_t ecc_reg;
+ uint8_t ecc_status, err_byte;
+ int i, error = 0;
+
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return 0;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+ ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+ for (i = 0 ; i <= 1 ; i++) {
+
+ ecc_status = (ecc_reg >> 8) & 0xFF;
+
+ /* ecc uncorrectable error */
+ if (ecc_status & R852_ECC_FAIL) {
+ dbg("ecc: unrecoverable error, in half %d", i);
+ error = -1;
+ goto exit;
+ }
+
+ /* correctable error */
+ if (ecc_status & R852_ECC_CORRECTABLE) {
+
+ err_byte = ecc_reg & 0xFF;
+ dbg("ecc: recoverable error, "
+ "in half %d, byte %d, bit %d", i,
+ err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+ dat[err_byte] ^=
+ 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+ error++;
+ }
+
+ dat += 256;
+ ecc_reg >>= 16;
+ }
+exit:
+ return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ if (sndcmd) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ sndcmd = 0;
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return sndcmd;
+}
+
+/*
+ * Start the nand engine
+ */
+
+void r852_engine_enable(struct r852_device *dev)
+{
+ if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ } else {
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ }
+ msleep(300);
+ r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+void r852_engine_disable(struct r852_device *dev)
+{
+ r852_write_reg_dword(dev, R852_HW, 0);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+void r852_card_update_present(struct r852_device *dev)
+{
+ unsigned long flags;
+ uint8_t reg;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ reg = r852_read_reg(dev, R852_CARD_STA);
+ dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+void r852_update_card_detect(struct r852_device *dev)
+{
+ int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ dev->card_unstable = 0;
+
+ card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+ card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+ card_detect_reg |= dev->card_detected ?
+ R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+ struct r852_device *dev = r852_get_dev(mtd);
+ char *data = dev->sm ? "smartmedia" : "xd";
+
+ strcpy(buf, data);
+ return strlen(data);
+}
+
+DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+
+
+/* Detect properties of card in slot */
+void r852_update_media_status(struct r852_device *dev)
+{
+ uint8_t reg;
+ unsigned long flags;
+ int readonly;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!dev->card_detected) {
+ message("card removed");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ;
+ }
+
+ readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+ reg = r852_read_reg(dev, R852_DMA_CAP);
+ dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+ message("detected %s %s card in slot",
+ dev->sm ? "SmartMedia" : "xD",
+ readonly ? "readonly" : "writeable");
+
+ dev->readonly = readonly;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+int r852_register_nand_device(struct r852_device *dev)
+{
+ dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+
+ if (!dev->mtd)
+ goto error1;
+
+ WARN_ON(dev->card_registred);
+
+ dev->mtd->owner = THIS_MODULE;
+ dev->mtd->priv = dev->chip;
+ dev->mtd->dev.parent = &dev->pci_dev->dev;
+
+ if (dev->readonly)
+ dev->chip->options |= NAND_ROM;
+
+ r852_engine_enable(dev);
+
+ if (sm_register_device(dev->mtd))
+ goto error2;
+
+ if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
+ message("can't create media type sysfs attribute");
+
+ dev->card_registred = 1;
+ return 0;
+error2:
+ kfree(dev->mtd);
+error1:
+ /* Force card redetect */
+ dev->card_detected = 0;
+ return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+void r852_unregister_nand_device(struct r852_device *dev)
+{
+ if (!dev->card_registred)
+ return;
+
+ device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
+ nand_release(dev->mtd);
+ r852_engine_disable(dev);
+ dev->card_registred = 0;
+ kfree(dev->mtd);
+ dev->mtd = NULL;
+}
+
+/* Card state updater */
+void r852_card_detect_work(struct work_struct *work)
+{
+ struct r852_device *dev =
+ container_of(work, struct r852_device, card_detect_work.work);
+
+ r852_card_update_present(dev);
+ dev->card_unstable = 0;
+
+ /* False alarm */
+ if (dev->card_detected == dev->card_registred)
+ goto exit;
+
+ /* Read media properties */
+ r852_update_media_status(dev);
+
+ /* Register the card */
+ if (dev->card_detected)
+ r852_register_nand_device(dev);
+ else
+ r852_unregister_nand_device(dev);
+exit:
+ /* Update detection logic */
+ r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+ uint8_t reg;
+ reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+ reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ reg & ~R852_DMA_IRQ_MASK);
+
+ r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+ struct r852_device *dev = (struct r852_device *)data;
+
+ uint8_t card_status, dma_status;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ /* We can recieve shared interrupt while pci is suspended
+ in that case reads will return 0xFFFFFFFF.... */
+ if (dev->insuspend)
+ goto out;
+
+ /* handle card detection interrupts first */
+ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+ r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+ if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+ ret = IRQ_HANDLED;
+ dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+ /* we shouldn't recieve any interrupts if we wait for card
+ to settle */
+ WARN_ON(dev->card_unstable);
+
+ /* disable irqs while card is unstable */
+ /* this will timeout DMA if active, but better that garbage */
+ r852_disable_irqs(dev);
+
+ if (dev->card_unstable)
+ goto out;
+
+ /* let, card state to settle a bit, and then do the work */
+ dev->card_unstable = 1;
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(100));
+ goto out;
+ }
+
+
+ /* Handle dma interrupts */
+ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+ if (dma_status & R852_DMA_IRQ_MASK) {
+
+ ret = IRQ_HANDLED;
+
+ if (dma_status & R852_DMA_IRQ_ERROR) {
+ dbg("recieved dma error IRQ");
+ r852_dma_done(dev, -EIO);
+ goto out;
+ }
+
+ /* recieved DMA interrupt out of nowhere? */
+ WARN_ON_ONCE(dev->dma_stage == 0);
+
+ if (dev->dma_stage == 0)
+ goto out;
+
+ /* done device access */
+ if (dev->dma_state == DMA_INTERNAL &&
+ (dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+ dev->dma_state = DMA_MEMORY;
+ dev->dma_stage++;
+ }
+
+ /* done memory DMA */
+ if (dev->dma_state == DMA_MEMORY &&
+ (dma_status & R852_DMA_IRQ_MEMORY)) {
+ dev->dma_state = DMA_INTERNAL;
+ dev->dma_stage++;
+ }
+
+ /* Enable 2nd half of dma dance */
+ if (dev->dma_stage == 2)
+ r852_dma_enable(dev);
+
+ /* Operation done */
+ if (dev->dma_stage == 3)
+ r852_dma_done(dev, 0);
+ goto out;
+ }
+
+ /* Handle unknown interrupts */
+ if (dma_status)
+ dbg("bad dma IRQ status = %x", dma_status);
+
+ if (card_status & ~R852_CARD_STA_CD)
+ dbg("strange card status = %x", card_status);
+
+out:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ret;
+}
+
+int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ int error;
+ struct nand_chip *chip;
+ struct r852_device *dev;
+
+ /* pci initialization */
+ error = pci_enable_device(pci_dev);
+
+ if (error)
+ goto error1;
+
+ pci_set_master(pci_dev);
+
+ error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error2;
+
+ error = pci_request_regions(pci_dev, DRV_NAME);
+
+ if (error)
+ goto error3;
+
+ error = -ENOMEM;
+
+ /* init nand chip, but register it only on card insert */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+ if (!chip)
+ goto error4;
+
+ /* commands */
+ chip->cmd_ctrl = r852_cmdctl;
+ chip->waitfunc = r852_wait;
+ chip->dev_ready = r852_ready;
+
+ /* I/O */
+ chip->read_byte = r852_read_byte;
+ chip->read_buf = r852_read_buf;
+ chip->write_buf = r852_write_buf;
+ chip->verify_buf = r852_verify_buf;
+
+ /* ecc */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ /* init our device structure */
+ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+ if (!dev)
+ goto error5;
+
+ chip->priv = dev;
+ dev->chip = chip;
+ dev->pci_dev = pci_dev;
+ pci_set_drvdata(pci_dev, dev);
+
+ dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer);
+
+ if (!dev->bounce_buffer)
+ goto error6;
+
+
+ error = -ENODEV;
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+ if (!dev->mmio)
+ goto error7;
+
+ error = -ENOMEM;
+ dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+ if (!dev->tmp_buffer)
+ goto error8;
+
+ init_completion(&dev->dma_done);
+
+ dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+
+ if (!dev->card_workqueue)
+ goto error9;
+
+ INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+ /* shutdown everything - precation */
+ r852_engine_disable(dev);
+ r852_disable_irqs(dev);
+
+ r852_dma_test(dev);
+
+ /*register irq handler*/
+ error = -ENODEV;
+ if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error10;
+
+ dev->irq = pci_dev->irq;
+ spin_lock_init(&dev->irqlock);
+
+ /* kick initial present test */
+ dev->card_detected = 0;
+ r852_card_update_present(dev);
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 0);
+
+
+ printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n");
+ return 0;
+
+error10:
+ destroy_workqueue(dev->card_workqueue);
+error9:
+ kfree(dev->tmp_buffer);
+error8:
+ pci_iounmap(pci_dev, dev->mmio);
+error7:
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+error6:
+ kfree(dev);
+error5:
+ kfree(chip);
+error4:
+ pci_release_regions(pci_dev);
+error3:
+error2:
+ pci_disable_device(pci_dev);
+error1:
+ return error;
+}
+
+void r852_remove(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ /* Stop detect workqueue -
+ we are going to unregister the device anyway*/
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ destroy_workqueue(dev->card_workqueue);
+
+ /* Unregister the device, this might make more IO */
+ r852_unregister_nand_device(dev);
+
+ /* Stop interrupts */
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, dev);
+
+ /* Cleanup */
+ kfree(dev->tmp_buffer);
+ pci_iounmap(pci_dev, dev->mmio);
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+
+ kfree(dev->chip);
+ kfree(dev);
+
+ /* Shutdown the PCI device */
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+void r852_shutdown(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM
+int r852_suspend(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+ unsigned long flags;
+
+ if (dev->ctlreg & R852_CTL_CARDENABLE)
+ return -EBUSY;
+
+ /* First make sure the detect work is gone */
+ cancel_delayed_work_sync(&dev->card_detect_work);
+
+ /* Turn off the interrupts and stop the device */
+ r852_disable_irqs(dev);
+ r852_engine_disable(dev);
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->insuspend = 1;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* At that point, even if interrupt handler is running, it will quit */
+ /* So wait for this to happen explictly */
+ synchronize_irq(dev->irq);
+
+ /* If card was pulled off just during the suspend, which is very
+ unlikely, we will remove it on resume, it too late now
+ anyway... */
+ dev->card_unstable = 0;
+
+ pci_save_state(to_pci_dev(device));
+ return pci_prepare_to_sleep(to_pci_dev(device));
+}
+
+int r852_resume(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+ unsigned long flags;
+
+ /* Turn on the hardware */
+ pci_back_from_sleep(to_pci_dev(device));
+ pci_restore_state(to_pci_dev(device));
+
+ r852_disable_irqs(dev);
+ r852_card_update_present(dev);
+ r852_engine_disable(dev);
+
+
+ /* Now its safe for IRQ to run */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->insuspend = 0;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+
+ /* If card status changed, just do the work */
+ if (dev->card_detected != dev->card_registred) {
+ dbg("card was %s during low power state",
+ dev->card_detected ? "added" : "removed");
+
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 1000);
+ return 0;
+ }
+
+ /* Otherwise, initialize the card */
+ if (dev->card_registred) {
+ r852_engine_enable(dev);
+ dev->chip->select_chip(dev->mtd, 0);
+ dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
+ dev->chip->select_chip(dev->mtd, -1);
+ }
+
+ /* Program card detection IRQ */
+ r852_update_card_detect(dev);
+ return 0;
+}
+#else
+#define r852_suspend NULL
+#define r852_resume NULL
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0852), },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r852_pci_id_tbl,
+ .probe = r852_probe,
+ .remove = r852_remove,
+ .shutdown = r852_shutdown,
+ .driver.pm = &r852_pm_ops,
+};
+
+static __init int r852_module_init(void)
+{
+ return pci_register_driver(&r852_pci_driver);
+}
+
+static void __exit r852_module_exit(void)
+{
+ pci_unregister_driver(&r852_pci_driver);
+}
+
+module_init(r852_module_init);
+module_exit(r852_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
new file mode 100644
index 000000000000..8096cc280c73
--- /dev/null
+++ b/drivers/mtd/nand/r852.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/nand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+ byte write/read does one cycle on nand data lines.
+ dword write/read does 4 cycles
+ if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+ results of ecc correction, if DMA read was done before.
+ If write was done two dword reads read generated ecc checksums
+*/
+#define R852_DATALINE 0x00
+
+/* control register */
+#define R852_CTL 0x04
+#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
+#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
+#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
+ /* but has to be set on start...*/
+#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
+#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
+#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA 0x05
+
+#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO 0x02 /* card is readonly */
+#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
+#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
+#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
+#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
+
+#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
+#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
+#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
+#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
+#define R852_CARD_IRQ_MASK 0x1D
+
+
+
+/* hardware enable */
+#define R852_HW 0x08
+#define R852_HW_ENABLED 0x01 /* hw enabled */
+#define R852_HW_UNKNOWN 0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP 0x09
+#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
+ /* hw is smartmedia */
+#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
+#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR 0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS 0x10
+#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA 0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE 0x18
+
+#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
+#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+ each half of the page.
+ first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
+#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
+#define R852_ECC_FAIL 0x40 /* non correctable error detected */
+
+#define R852_DMA_LEN 512
+
+#define DMA_INTERNAL 0
+#define DMA_MEMORY 1
+
+struct r852_device {
+ void __iomem *mmio; /* mmio */
+ struct mtd_info *mtd; /* mtd backpointer */
+ struct nand_chip *chip; /* nand chip backpointer */
+ struct pci_dev *pci_dev; /* pci backpointer */
+
+ /* dma area */
+ dma_addr_t phys_dma_addr; /* bus address of buffer*/
+ struct completion dma_done; /* data transfer done */
+
+ dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
+ uint8_t *bounce_buffer; /* virtual address of bounce buffer */
+
+ int dma_dir; /* 1 = read, 0 = write */
+ int dma_stage; /* 0 - idle, 1 - first step,
+ 2 - second step */
+
+ int dma_state; /* 0 = internal, 1 = memory */
+ int dma_error; /* dma errors */
+ int dma_usable; /* is it possible to use dma */
+
+ /* card status area */
+ struct delayed_work card_detect_work;
+ struct workqueue_struct *card_workqueue;
+ int card_registred; /* card registered with mtd */
+ int card_detected; /* card detected in slot */
+ int card_unstable; /* whenever the card is inserted,
+ is not known yet */
+ int readonly; /* card is readonly */
+ int sm; /* Is card smartmedia */
+
+ /* interrupt handling */
+ spinlock_t irqlock; /* IRQ protecting lock */
+ int irq; /* irq num */
+ int insuspend; /* device is suspended */
+
+ /* misc */
+ void *tmp_buffer; /* temporary buffer */
+ uint8_t ctlreg; /* cached contents of control reg */
+};
+
+#define DRV_NAME "r852"
+
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+ printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index fa6e9c7fe511..dc02dcd0c08f 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -957,7 +957,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* currently we assume we have the one resource */
res = pdev->resource;
- size = res->end - res->start + 1;
+ size = resource_size(res);
info->area = request_mem_region(res->start, size, pdev->name);
@@ -1013,7 +1013,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_init_chip(info, nmtd, sets);
nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
- (sets) ? sets->nr_chips : 1);
+ (sets) ? sets->nr_chips : 1,
+ NULL);
if (nmtd->scan_res == 0) {
s3c2410_nand_update_chip(info, nmtd);
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 34752fce0793..546c2f0eb2e8 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_word = flctl_read_word;
}
- ret = nand_scan_ident(flctl_mtd, 1);
+ ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
new file mode 100644
index 000000000000..aae0b9acd7ae
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/nand.h>
+#include "sm_common.h"
+
+static struct nand_ecclayout nand_oob_sm = {
+ .eccbytes = 6,
+ .eccpos = {8, 9, 10, 13, 14, 15},
+ .oobfree = {
+ {.offset = 0 , .length = 4}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ {.offset = 11, .length = 2} /* LBA2 */
+ }
+};
+
+/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static struct nand_ecclayout nand_oob_sm_small = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3 , .length = 2}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ }
+};
+
+
+static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ struct sm_oob oob;
+ int ret, error = 0;
+
+ memset(&oob, -1, SM_OOB_SIZE);
+ oob.block_status = 0x0F;
+
+ /* As long as this function is called on erase block boundaries
+ it will work correctly for 256 byte nand */
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = (void *)&oob;
+ ops.datbuf = NULL;
+
+
+ ret = mtd->write_oob(mtd, ofs, &ops);
+ if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+ printk(KERN_NOTICE
+ "sm_common: can't mark sector at %i as bad\n",
+ (int)ofs);
+ error = -EIO;
+ } else
+ mtd->ecc_stats.badblocks++;
+
+ return error;
+}
+
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+
+ /* SmartMedia */
+ {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
+ {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
+ {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
+ {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
+
+#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
+ /* xD / SmartMedia */
+ {"SmartMedia/xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
+ {"SmartMedia/xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
+ {"SmartMedia/xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
+ {"SmartMedia/xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
+ {"SmartMedia/xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
+ {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
+
+ /* xD only */
+ {"xD 512MiB 3,3V", 0xDC, 512, 512, 0x4000, XD_TYPEM},
+ {"xD 1GiB 3,3V", 0xD3, 512, 1024, 0x4000, XD_TYPEM},
+ {"xD 2GiB 3,3V", 0xD5, 512, 2048, 0x4000, XD_TYPEM},
+ {NULL,}
+};
+
+int sm_register_device(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Scan for card properties */
+ ret = nand_scan_ident(mtd, 1, nand_smartmedia_flash_ids);
+
+ if (ret)
+ return ret;
+
+ /* Bad block marker postion */
+ chip->badblockpos = 0x05;
+ chip->badblockbits = 7;
+ chip->block_markbad = sm_block_markbad;
+
+ /* ECC layout */
+ if (mtd->writesize == SM_SECTOR_SIZE)
+ chip->ecc.layout = &nand_oob_sm;
+ else if (mtd->writesize == SM_SMALL_PAGE)
+ chip->ecc.layout = &nand_oob_sm_small;
+ else
+ return -ENODEV;
+
+ ret = nand_scan_tail(mtd);
+
+ if (ret)
+ return ret;
+
+ return add_mtd_device(mtd);
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
new file mode 100644
index 000000000000..18284f5fae64
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+ uint32_t reserved;
+ uint8_t data_status;
+ uint8_t block_status;
+ uint8_t lba_copy1[2];
+ uint8_t ecc2[3];
+ uint8_t lba_copy2[2];
+ uint8_t ecc1[3];
+} __attribute__((packed));
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE 512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE 16
+
+/* This is maximum zone size, and all devices that have more that one zone
+ have this size */
+#define SM_MAX_ZONE_SIZE 1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 256
+#define SM_SMALL_OOB_SIZE 8
+
+
+extern int sm_register_device(struct mtd_info *mtd);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+ return 1;
+ return 0;
+}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a4519a7bd683..b37cbde6e7db 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, host);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto out;
}
@@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id socrates_nand_match[] =
+static const struct of_device_id socrates_nand_match[] =
{
{
.compatible = "abb,socrates-nand",
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index fa28f01ae009..3041d1f7ae3f 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
int ret;
if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
struct tmio_nand_data *data = cell->driver_data;
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
@@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
- tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1);
+ tmio->ccr = ioremap(ccr->start, resource_size(ccr));
if (!tmio->ccr) {
retval = -EIO;
goto err_iomap_ccr;
}
tmio->fcr_base = fcr->start & 0xfffff;
- tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1);
+ tmio->fcr = ioremap(fcr->start, resource_size(fcr));
if (!tmio->fcr) {
retval = -EIO;
goto err_iomap_fcr;
@@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
if (cell->suspend)
cell->suspend(dev);
@@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
deleted file mode 100644
index 0f5562aeedc1..000000000000
--- a/drivers/mtd/nand/ts7250.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * drivers/mtd/nand/ts7250.c
- *
- * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- * Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
- *
- * Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * TS-7250 board which utilizes a Samsung 32 Mbyte part.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/ts72xx.h>
-
-#include <asm/sizes.h>
-#include <asm/mach-types.h>
-
-/*
- * MTD structure for TS7250 board
- */
-static struct mtd_info *ts7250_mtd = NULL;
-
-#ifdef CONFIG_MTD_PARTITIONS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
-#define NUM_PARTITIONS 3
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info32[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x01d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x01d04000,
- .size = 0x002fc000,
- },
-};
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info128[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x07d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x07d04000,
- .size = 0x002fc000,
- },
-};
-#endif
-
-
-/*
- * hardware specific access to control-lines
- *
- * ctrl:
- * NAND_NCE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 1
- * NAND_ALE: bit 2 -> bit 0
- */
-static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
- unsigned char bits;
-
- bits = (ctrl & NAND_NCE) << 2;
- bits |= ctrl & NAND_CLE;
- bits |= (ctrl & NAND_ALE) >> 2;
-
- __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-static int ts7250_device_ready(struct mtd_info *mtd)
-{
- return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
-}
-
-/*
- * Main initialization routine
- */
-static int __init ts7250_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
-
- if (!machine_is_ts72xx() || board_is_ts7200())
- return -ENXIO;
-
- /* Allocate memory for MTD device structure and private data */
- ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ts7250_mtd) {
- printk("Unable to allocate TS7250 NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ts7250_mtd[1]);
-
- /* Initialize structures */
- memset(ts7250_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ts7250_mtd->priv = this;
- ts7250_mtd->owner = THIS_MODULE;
-
- /* insert callbacks */
- this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->cmd_ctrl = ts7250_hwcontrol;
- this->dev_ready = ts7250_device_ready;
- this->chip_delay = 15;
- this->ecc.mode = NAND_ECC_SOFT;
-
- printk("Searching for NAND flash...\n");
- /* Scan to find existence of the device */
- if (nand_scan(ts7250_mtd, 1)) {
- kfree(ts7250_mtd);
- return -ENXIO;
- }
-#ifdef CONFIG_MTD_PARTITIONS
- ts7250_mtd->name = "ts7250-nand";
- mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info32;
- if (ts7250_mtd->size >= (128 * 0x100000))
- mtd_parts = partition_info128;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ts7250_init);
-
-/*
- * Clean up routine
- */
-static void __exit ts7250_cleanup(void)
-{
- /* Unregister the device */
- del_mtd_device(ts7250_mtd);
-
- /* Free the MTD device structure */
- kfree(ts7250_mtd);
-}
-
-module_exit(ts7250_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
-MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 863513c3b69a..054a41c0ef4a 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
struct nand_chip *chip = mtd->priv;
int ret;
- ret = nand_scan_ident(mtd, 1);
+ ret = nand_scan_ident(mtd, 1, NULL);
if (!ret) {
if (mtd->writesize >= 512) {
chip->ecc.size = mtd->writesize;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 1002e1882996..a4578bf903aa 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -126,7 +126,6 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(nftl->ReplUnitTable);
kfree(nftl->EUNtable);
- kfree(nftl);
}
/*
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index fd406348fdfd..9f322f1a7f22 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -309,7 +309,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
- if (in_interrupt())
+ if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@@ -386,7 +386,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
- if (in_interrupt())
+ if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@@ -403,7 +403,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@@ -426,7 +426,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
if (*done)
break;
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
if (!*done) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
@@ -521,7 +521,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@@ -539,7 +539,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
return 0;
}
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index d2aa9c46530f..63b83c0d9a13 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -817,7 +817,6 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
vfree(part->sector_map);
kfree(part->header_cache);
kfree(part->blocks);
- kfree(part);
}
static struct mtd_blktrans_ops rfd_ftl_tr = {
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
new file mode 100644
index 000000000000..67822cf6c025
--- /dev/null
+++ b/drivers/mtd/sm_ftl.c
@@ -0,0 +1,1284 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/hdreg.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand_ecc.h>
+#include "nand/sm_common.h"
+#include "sm_ftl.h"
+
+
+
+struct workqueue_struct *cache_flush_workqueue;
+
+static int cache_timeout = 1000;
+module_param(cache_timeout, bool, S_IRUGO);
+MODULE_PARM_DESC(cache_timeout,
+ "Timeout (in ms) for cache flush (1000 ms default");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+
+/* ------------------- sysfs attributtes ---------------------------------- */
+struct sm_sysfs_attribute {
+ struct device_attribute dev_attr;
+ char *data;
+ int len;
+};
+
+ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(attr, struct sm_sysfs_attribute, dev_attr);
+
+ strncpy(buf, sm_attr->data, sm_attr->len);
+ return sm_attr->len;
+}
+
+
+#define NUM_ATTRIBUTES 1
+#define SM_CIS_VENDOR_OFFSET 0x59
+struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute_group *attr_group;
+ struct attribute **attributes;
+ struct sm_sysfs_attribute *vendor_attribute;
+
+ int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
+
+ char *vendor = kmalloc(vendor_len, GFP_KERNEL);
+ memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
+ vendor[vendor_len] = 0;
+
+ /* Initialize sysfs attributes */
+ vendor_attribute =
+ kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
+
+ sysfs_attr_init(&vendor_attribute->dev_attr.attr);
+
+ vendor_attribute->data = vendor;
+ vendor_attribute->len = vendor_len;
+ vendor_attribute->dev_attr.attr.name = "vendor";
+ vendor_attribute->dev_attr.attr.mode = S_IRUGO;
+ vendor_attribute->dev_attr.show = sm_attr_show;
+
+
+ /* Create array of pointers to the attributes */
+ attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
+ GFP_KERNEL);
+ attributes[0] = &vendor_attribute->dev_attr.attr;
+
+ /* Finally create the attribute group */
+ attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ attr_group->attrs = attributes;
+ return attr_group;
+}
+
+void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute **attributes = ftl->disk_attributes->attrs;
+ int i;
+
+ for (i = 0; attributes[i] ; i++) {
+
+ struct device_attribute *dev_attr = container_of(attributes[i],
+ struct device_attribute, attr);
+
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(dev_attr,
+ struct sm_sysfs_attribute, dev_attr);
+
+ kfree(sm_attr->data);
+ kfree(sm_attr);
+ }
+
+ kfree(ftl->disk_attributes->attrs);
+ kfree(ftl->disk_attributes);
+}
+
+
+/* ----------------------- oob helpers -------------------------------------- */
+
+static int sm_get_lba(uint8_t *lba)
+{
+ /* check fixed bits */
+ if ((lba[0] & 0xF8) != 0x10)
+ return -2;
+
+ /* check parity - endianess doesn't matter */
+ if (hweight16(*(uint16_t *)lba) & 1)
+ return -2;
+
+ return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
+}
+
+
+/*
+ * Read LBA asscociated with block
+ * returns -1, if block is erased
+ * returns -2 if error happens
+ */
+static int sm_read_lba(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ uint16_t lba_test;
+ int lba;
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
+ return -1;
+
+ /* Now check is both copies of the LBA differ too much */
+ lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
+ if (lba_test && !is_power_of_2(lba_test))
+ return -2;
+
+ /* And read it */
+ lba = sm_get_lba(oob->lba_copy1);
+
+ if (lba == -2)
+ lba = sm_get_lba(oob->lba_copy2);
+
+ return lba;
+}
+
+static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
+{
+ uint8_t tmp[2];
+
+ WARN_ON(lba >= 1000);
+
+ tmp[0] = 0x10 | ((lba >> 7) & 0x07);
+ tmp[1] = (lba << 1) & 0xFF;
+
+ if (hweight16(*(uint16_t *)tmp) & 0x01)
+ tmp[1] |= 1;
+
+ oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
+ oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
+}
+
+
+/* Make offset from parts */
+static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
+{
+ WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
+ WARN_ON(zone < 0 || zone >= ftl->zone_count);
+ WARN_ON(block >= ftl->zone_size);
+ WARN_ON(boffset >= ftl->block_size);
+
+ if (block == -1)
+ return -1;
+
+ return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
+}
+
+/* Breaks offset into parts */
+static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
+ int *zone, int *block, int *boffset)
+{
+ *boffset = do_div(offset, ftl->block_size);
+ *block = do_div(offset, ftl->max_lba);
+ *zone = offset >= ftl->zone_count ? -1 : offset;
+}
+
+/* ---------------------- low level IO ------------------------------------- */
+
+static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
+{
+ uint8_t ecc[3];
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
+ return -EIO;
+
+ buffer += SM_SMALL_PAGE;
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a sector + oob*/
+static int sm_read_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct mtd_oob_ops ops;
+ struct sm_oob tmp_oob;
+ int ret = -EIO;
+ int try = 0;
+
+ /* FTL can contain -1 entries that are by default filled with bits */
+ if (block == -1) {
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ return 0;
+ }
+
+ /* User might not need the oob, but we do for data vertification */
+ if (!oob)
+ oob = &tmp_oob;
+
+ ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+
+again:
+ if (try++) {
+ /* Avoid infinite recursion on CIS reads, sm_recheck_media
+ won't help anyway */
+ if (zone == 0 && block == ftl->cis_block && boffset ==
+ ftl->cis_boffset)
+ return ret;
+
+ /* Test if media is stable */
+ if (try == 3 || sm_recheck_media(ftl))
+ return ret;
+ }
+
+ /* Unfortunelly, oob read will _always_ succeed,
+ despite card removal..... */
+ ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Test for unknown errors */
+ if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
+ dbg("read of block %d at zone %d, failed due to error (%d)",
+ block, zone, ret);
+ goto again;
+ }
+
+ /* Do a basic test on the oob, to guard against returned garbage */
+ if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
+ goto again;
+
+ /* This should never happen, unless there is a bug in the mtd driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ if (!buffer)
+ return 0;
+
+ /* Test if sector marked as bad */
+ if (!sm_sector_valid(oob)) {
+ dbg("read of block %d at zone %d, failed because it is marked"
+ " as bad" , block, zone);
+ goto again;
+ }
+
+ /* Test ECC*/
+ if (ret == -EBADMSG ||
+ (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
+
+ dbg("read of block %d at zone %d, failed due to ECC error",
+ block, zone);
+ goto again;
+ }
+
+ return 0;
+}
+
+/* Writes a sector to media */
+static int sm_write_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_oob_ops ops;
+ struct mtd_info *mtd = ftl->trans->mtd;
+ int ret;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone == 0 && (block == ftl->cis_block || block == 0)) {
+ dbg("attempted to write the CIS!");
+ return -EIO;
+ }
+
+ if (ftl->unstable)
+ return -EIO;
+
+ ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+
+ ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Now we assume that hardware will catch write bitflip errors */
+ /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
+
+ if (ret) {
+ dbg("write to block %d at zone %d, failed with error %d",
+ block, zone, ret);
+
+ sm_recheck_media(ftl);
+ return ret;
+ }
+
+ /* This should never happen, unless there is a bug in the driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ return 0;
+}
+
+/* ------------------------ block IO ------------------------------------- */
+
+/* Write a block using data and lba, and invalid sector bitmap */
+static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
+ int zone, int block, int lba,
+ unsigned long invalid_bitmap)
+{
+ struct sm_oob oob;
+ int boffset;
+ int retry = 0;
+
+ /* Initialize the oob with requested values */
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ sm_write_lba(&oob, lba);
+restart:
+ if (ftl->unstable)
+ return -EIO;
+
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ oob.data_status = 0xFF;
+
+ if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
+
+ sm_printk("sector %d of block at LBA %d of zone %d"
+ " coudn't be read, marking it as invalid",
+ boffset / SM_SECTOR_SIZE, lba, zone);
+
+ oob.data_status = 0;
+ }
+
+ if (ftl->smallpagenand) {
+ __nand_calculate_ecc(buf + boffset,
+ SM_SMALL_PAGE, oob.ecc1);
+
+ __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
+ SM_SMALL_PAGE, oob.ecc2);
+ }
+ if (!sm_write_sector(ftl, zone, block, boffset,
+ buf + boffset, &oob))
+ continue;
+
+ if (!retry) {
+
+ /* If write fails. try to erase the block */
+ /* This is safe, because we never write in blocks
+ that contain valuable data.
+ This is intended to repair block that are marked
+ as erased, but that isn't fully erased*/
+
+ if (sm_erase_block(ftl, zone, block, 0))
+ return -EIO;
+
+ retry = 1;
+ goto restart;
+ } else {
+ sm_mark_block_bad(ftl, zone, block);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+
+/* Mark whole block at offset 'offs' as bad. */
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
+{
+ struct sm_oob oob;
+ int boffset;
+
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ oob.block_status = 0xF0;
+
+ if (ftl->unstable)
+ return;
+
+ if (sm_recheck_media(ftl))
+ return;
+
+ sm_printk("marking block %d of zone %d as bad", block, zone);
+
+ /* We aren't checking the return value, because we don't care */
+ /* This also fails on fake xD cards, but I guess these won't expose
+ any bad blocks till fail completly */
+ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
+ sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
+}
+
+/*
+ * Erase a block within a zone
+ * If erase succedes, it updates free block fifo, otherwise marks block as bad
+ */
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct erase_info erase;
+
+ erase.mtd = mtd;
+ erase.callback = sm_erase_callback;
+ erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
+ erase.len = ftl->block_size;
+ erase.priv = (u_long)ftl;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
+ sm_printk("attempted to erase the CIS!");
+ return -EIO;
+ }
+
+ if (mtd->erase(mtd, &erase)) {
+ sm_printk("erase of block %d in zone %d failed",
+ block, zone_num);
+ goto error;
+ }
+
+ if (erase.state == MTD_ERASE_PENDING)
+ wait_for_completion(&ftl->erase_completion);
+
+ if (erase.state != MTD_ERASE_DONE) {
+ sm_printk("erase of block %d in zone %d failed after wait",
+ block, zone_num);
+ goto error;
+ }
+
+ if (put_free)
+ kfifo_in(&zone->free_sectors,
+ (const unsigned char *)&block, sizeof(block));
+
+ return 0;
+error:
+ sm_mark_block_bad(ftl, zone_num, block);
+ return -EIO;
+}
+
+static void sm_erase_callback(struct erase_info *self)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
+ complete(&ftl->erase_completion);
+}
+
+/* Throughtly test that block is valid. */
+static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
+{
+ int boffset;
+ struct sm_oob oob;
+ int lbas[] = { -3, 0, 0, 0 };
+ int i = 0;
+ int test_lba;
+
+
+ /* First just check that block doesn't look fishy */
+ /* Only blocks that are valid or are sliced in two parts, are
+ accepted */
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ /* This shoudn't happen anyway */
+ if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
+ return -2;
+
+ test_lba = sm_read_lba(&oob);
+
+ if (lbas[i] != test_lba)
+ lbas[++i] = test_lba;
+
+ /* If we found three different LBAs, something is fishy */
+ if (i == 3)
+ return -EIO;
+ }
+
+ /* If the block is sliced (partialy erased usually) erase it */
+ if (i == 2) {
+ sm_erase_block(ftl, zone, block, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* ----------------- media scanning --------------------------------- */
+static const struct chs_entry chs_table[] = {
+ { 1, 125, 4, 4 },
+ { 2, 125, 4, 8 },
+ { 4, 250, 4, 8 },
+ { 8, 250, 4, 16 },
+ { 16, 500, 4, 16 },
+ { 32, 500, 8, 16 },
+ { 64, 500, 8, 32 },
+ { 128, 500, 16, 32 },
+ { 256, 1000, 16, 32 },
+ { 512, 1015, 32, 63 },
+ { 1024, 985, 33, 63 },
+ { 2048, 985, 33, 63 },
+ { 0 },
+};
+
+
+static const uint8_t cis_signature[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+/* Find out media parameters.
+ * This ideally has to be based on nand id, but for now device size is enough */
+int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+{
+ int i;
+ int size_in_megs = mtd->size / (1024 * 1024);
+
+ ftl->readonly = mtd->type == MTD_ROM;
+
+ /* Manual settings for very old devices */
+ ftl->zone_count = 1;
+ ftl->smallpagenand = 0;
+
+ switch (size_in_megs) {
+ case 1:
+ /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+
+ break;
+ case 2:
+ /* 2 MiB flash SmartMedia (256 byte pages)*/
+ if (mtd->writesize == SM_SMALL_PAGE) {
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+ /* 2 MiB rom SmartMedia */
+ } else {
+
+ if (!ftl->readonly)
+ return -ENODEV;
+
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+ break;
+ case 4:
+ /* 4 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ break;
+ case 8:
+ /* 8 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+
+ /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
+ sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
+ if (size_in_megs >= 16) {
+ ftl->zone_count = size_in_megs / 16;
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 32 * SM_SECTOR_SIZE;
+ }
+
+ /* Test for proper write,erase and oob sizes */
+ if (mtd->erasesize > ftl->block_size)
+ return -ENODEV;
+
+ if (mtd->writesize > SM_SECTOR_SIZE)
+ return -ENODEV;
+
+ if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
+ return -ENODEV;
+
+ if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
+ return -ENODEV;
+
+ /* We use these functions for IO */
+ if (!mtd->read_oob || !mtd->write_oob)
+ return -ENODEV;
+
+ /* Find geometry information */
+ for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
+ if (chs_table[i].size == size_in_megs) {
+ ftl->cylinders = chs_table[i].cyl;
+ ftl->heads = chs_table[i].head;
+ ftl->sectors = chs_table[i].sec;
+ return 0;
+ }
+ }
+
+ sm_printk("media has unknown size : %dMiB", size_in_megs);
+ ftl->cylinders = 985;
+ ftl->heads = 33;
+ ftl->sectors = 63;
+ return 0;
+}
+
+/* Validate the CIS */
+static int sm_read_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+
+ if (sm_read_sector(ftl,
+ 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
+ return -EIO;
+
+ if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
+ return -EIO;
+
+ if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
+ cis_signature, sizeof(cis_signature))) {
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Scan the media for the CIS */
+static int sm_find_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+ int block, boffset;
+ int block_found = 0;
+ int cis_found = 0;
+
+ /* Search for first valid block */
+ for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
+
+ if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
+ continue;
+
+ if (!sm_block_valid(&oob))
+ continue;
+ block_found = 1;
+ break;
+ }
+
+ if (!block_found)
+ return -EIO;
+
+ /* Search for first valid sector in this block */
+ for (boffset = 0 ; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
+ continue;
+
+ if (!sm_sector_valid(&oob))
+ continue;
+ break;
+ }
+
+ if (boffset == ftl->block_size)
+ return -EIO;
+
+ ftl->cis_block = block;
+ ftl->cis_boffset = boffset;
+ ftl->cis_page_offset = 0;
+
+ cis_found = !sm_read_cis(ftl);
+
+ if (!cis_found) {
+ ftl->cis_page_offset = SM_SMALL_PAGE;
+ cis_found = !sm_read_cis(ftl);
+ }
+
+ if (cis_found) {
+ dbg("CIS block found at offset %x",
+ block * ftl->block_size +
+ boffset + ftl->cis_page_offset);
+ return 0;
+ }
+ return -EIO;
+}
+
+/* Basic test to determine if underlying mtd device if functional */
+static int sm_recheck_media(struct sm_ftl *ftl)
+{
+ if (sm_read_cis(ftl)) {
+
+ if (!ftl->unstable) {
+ sm_printk("media unstable, not allowing writes");
+ ftl->unstable = 1;
+ }
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Initialize a FTL zone */
+static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct sm_oob oob;
+ uint16_t block;
+ int lba;
+ int i = 0;
+ int len;
+
+ dbg("initializing zone %d", zone_num);
+
+ /* Allocate memory for FTL table */
+ zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
+
+ if (!zone->lba_to_phys_table)
+ return -ENOMEM;
+ memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
+
+
+ /* Allocate memory for free sectors FIFO */
+ if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
+ kfree(zone->lba_to_phys_table);
+ return -ENOMEM;
+ }
+
+ /* Now scan the zone */
+ for (block = 0 ; block < ftl->zone_size ; block++) {
+
+ /* Skip blocks till the CIS (including) */
+ if (zone_num == 0 && block <= ftl->cis_block)
+ continue;
+
+ /* Read the oob of first sector */
+ if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
+ return -EIO;
+
+ /* Test to see if block is erased. It is enough to test
+ first sector, because erase happens in one shot */
+ if (sm_block_erased(&oob)) {
+ kfifo_in(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ continue;
+ }
+
+ /* If block is marked as bad, skip it */
+ /* This assumes we can trust first sector*/
+ /* However the way the block valid status is defined, ensures
+ very low probability of failure here */
+ if (!sm_block_valid(&oob)) {
+ dbg("PH %04d <-> <marked bad>", block);
+ continue;
+ }
+
+
+ lba = sm_read_lba(&oob);
+
+ /* Invalid LBA means that block is damaged. */
+ /* We can try to erase it, or mark it as bad, but
+ lets leave that to recovery application */
+ if (lba == -2 || lba >= ftl->max_lba) {
+ dbg("PH %04d <-> LBA %04d(bad)", block, lba);
+ continue;
+ }
+
+
+ /* If there is no collision,
+ just put the sector in the FTL table */
+ if (zone->lba_to_phys_table[lba] < 0) {
+ dbg_verbose("PH %04d <-> LBA %04d", block, lba);
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ sm_printk("collision"
+ " of LBA %d between blocks %d and %d in zone %d",
+ lba, zone->lba_to_phys_table[lba], block, zone_num);
+
+ /* Test that this block is valid*/
+ if (sm_check_block(ftl, zone_num, block))
+ continue;
+
+ /* Test now the old block */
+ if (sm_check_block(ftl, zone_num,
+ zone->lba_to_phys_table[lba])) {
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ /* If both blocks are valid and share same LBA, it means that
+ they hold different versions of same data. It not
+ known which is more recent, thus just erase one of them
+ */
+ sm_printk("both blocks are valid, erasing the later");
+ sm_erase_block(ftl, zone_num, block, 1);
+ }
+
+ dbg("zone initialized");
+ zone->initialized = 1;
+
+ /* No free sectors, means that the zone is heavily damaged, write won't
+ work, but it can still can be (partially) read */
+ if (!kfifo_len(&zone->free_sectors)) {
+ sm_printk("no free blocks in zone %d", zone_num);
+ return 0;
+ }
+
+ /* Randomize first block we write to */
+ get_random_bytes(&i, 2);
+ i %= (kfifo_len(&zone->free_sectors) / 2);
+
+ while (i--) {
+ len = kfifo_out(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ WARN_ON(len != 2);
+ kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
+ }
+ return 0;
+}
+
+/* Get and automaticly initialize an FTL mapping for one zone */
+struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone;
+ int error;
+
+ BUG_ON(zone_num >= ftl->zone_count);
+ zone = &ftl->zones[zone_num];
+
+ if (!zone->initialized) {
+ error = sm_init_zone(ftl, zone_num);
+
+ if (error)
+ return ERR_PTR(error);
+ }
+ return zone;
+}
+
+
+/* ----------------- cache handling ------------------------------------------*/
+
+/* Initialize the one block cache */
+void sm_cache_init(struct sm_ftl *ftl)
+{
+ ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
+ ftl->cache_clean = 1;
+ ftl->cache_zone = -1;
+ ftl->cache_block = -1;
+ /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
+}
+
+/* Put sector in one block cache */
+void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
+ clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
+ ftl->cache_clean = 0;
+}
+
+/* Read a sector from the cache */
+int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ if (test_bit(boffset / SM_SECTOR_SIZE,
+ &ftl->cache_data_invalid_bitmap))
+ return -1;
+
+ memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
+ return 0;
+}
+
+/* Write the cache to hardware */
+int sm_cache_flush(struct sm_ftl *ftl)
+{
+ struct ftl_zone *zone;
+
+ int sector_num;
+ uint16_t write_sector;
+ int zone_num = ftl->cache_zone;
+ int block_num;
+
+ if (ftl->cache_clean)
+ return 0;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(zone_num < 0);
+ zone = &ftl->zones[zone_num];
+ block_num = zone->lba_to_phys_table[ftl->cache_block];
+
+
+ /* Try to read all unread areas of the cache block*/
+ for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
+ ftl->block_size / SM_SECTOR_SIZE) {
+
+ if (!sm_read_sector(ftl,
+ zone_num, block_num, sector_num * SM_SECTOR_SIZE,
+ ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
+ clear_bit(sector_num,
+ &ftl->cache_data_invalid_bitmap);
+ }
+restart:
+
+ if (ftl->unstable)
+ return -EIO;
+
+ /* If there are no spare blocks, */
+ /* we could still continue by erasing/writing the current block,
+ but for such worn out media it doesn't worth the trouble,
+ and the dangers */
+ if (kfifo_out(&zone->free_sectors,
+ (unsigned char *)&write_sector, 2) != 2) {
+ dbg("no free sectors for write!");
+ return -EIO;
+ }
+
+
+ if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
+ ftl->cache_block, ftl->cache_data_invalid_bitmap))
+ goto restart;
+
+ /* Update the FTL table */
+ zone->lba_to_phys_table[ftl->cache_block] = write_sector;
+
+ /* Write succesfull, so erase and free the old block */
+ if (block_num > 0)
+ sm_erase_block(ftl, zone_num, block_num, 1);
+
+ sm_cache_init(ftl);
+ return 0;
+}
+
+
+/* flush timer, runs a second after last write */
+static void sm_cache_flush_timer(unsigned long data)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)data;
+ queue_work(cache_flush_workqueue, &ftl->flush_work);
+}
+
+/* cache flush work, kicked by timer */
+static void sm_cache_flush_work(struct work_struct *work)
+{
+ struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
+ mutex_lock(&ftl->mutex);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return;
+}
+
+/* ---------------- outside interface -------------------------------------- */
+
+/* outside interface: read a sector */
+static int sm_read(struct mtd_blktrans_dev *dev,
+ unsigned long sect_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error = 0, in_cache = 0;
+ int zone_num, block, boffset;
+
+ sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
+ mutex_lock(&ftl->mutex);
+
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* Have to look at cache first */
+ if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
+ in_cache = 1;
+ if (!sm_cache_get(ftl, buf, boffset))
+ goto unlock;
+ }
+
+ /* Translate the block and return if doesn't exist in the table */
+ block = zone->lba_to_phys_table[block];
+
+ if (block == -1) {
+ memset(buf, 0xFF, SM_SECTOR_SIZE);
+ goto unlock;
+ }
+
+ if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
+ error = -EIO;
+ goto unlock;
+ }
+
+ if (in_cache)
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: write a sector */
+static int sm_write(struct mtd_blktrans_dev *dev,
+ unsigned long sec_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error, zone_num, block, boffset;
+
+ BUG_ON(ftl->readonly);
+ sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
+
+ /* No need in flush thread running now */
+ del_timer(&ftl->timer);
+ mutex_lock(&ftl->mutex);
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* If entry is not in cache, flush it */
+ if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
+
+ error = sm_cache_flush(ftl);
+ if (error)
+ goto unlock;
+
+ ftl->cache_block = block;
+ ftl->cache_zone = zone_num;
+ }
+
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: flush everything */
+static int sm_flush(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int retval;
+
+ mutex_lock(&ftl->mutex);
+ retval = sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return retval;
+}
+
+/* outside interface: device is released */
+static int sm_release(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+
+ mutex_lock(&ftl->mutex);
+ del_timer_sync(&ftl->timer);
+ cancel_work_sync(&ftl->flush_work);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return 0;
+}
+
+/* outside interface: get geometry */
+static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct sm_ftl *ftl = dev->priv;
+ geo->heads = ftl->heads;
+ geo->sectors = ftl->sectors;
+ geo->cylinders = ftl->cylinders;
+ return 0;
+}
+
+/* external interface: main initialization function */
+static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtd_blktrans_dev *trans;
+ struct sm_ftl *ftl;
+
+ /* Allocate & initialize our private structure */
+ ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
+ if (!ftl)
+ goto error1;
+
+
+ mutex_init(&ftl->mutex);
+ setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
+ init_completion(&ftl->erase_completion);
+
+ /* Read media information */
+ if (sm_get_media_info(ftl, mtd)) {
+ dbg("found unsupported mtd device, aborting");
+ goto error2;
+ }
+
+
+ /* Allocate temporary CIS buffer for read retry support */
+ ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+ if (!ftl->cis_buffer)
+ goto error2;
+
+ /* Allocate zone array, it will be initialized on demand */
+ ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
+ GFP_KERNEL);
+ if (!ftl->zones)
+ goto error3;
+
+ /* Allocate the cache*/
+ ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
+
+ if (!ftl->cache_data)
+ goto error4;
+
+ sm_cache_init(ftl);
+
+
+ /* Allocate upper layer structure and initialize it */
+ trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!trans)
+ goto error5;
+
+ ftl->trans = trans;
+ trans->priv = ftl;
+
+ trans->tr = tr;
+ trans->mtd = mtd;
+ trans->devnum = -1;
+ trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
+ trans->readonly = ftl->readonly;
+
+ if (sm_find_cis(ftl)) {
+ dbg("CIS not found on mtd device, aborting");
+ goto error6;
+ }
+
+ ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
+ trans->disk_attributes = ftl->disk_attributes;
+
+ sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
+ (int)(mtd->size / (1024 * 1024)), mtd->index);
+
+ dbg("FTL layout:");
+ dbg("%d zone(s), each consists of %d blocks (+%d spares)",
+ ftl->zone_count, ftl->max_lba,
+ ftl->zone_size - ftl->max_lba);
+ dbg("each block consists of %d bytes",
+ ftl->block_size);
+
+
+ /* Register device*/
+ if (add_mtd_blktrans_dev(trans)) {
+ dbg("error in mtdblktrans layer");
+ goto error6;
+ }
+ return;
+error6:
+ kfree(trans);
+error5:
+ kfree(ftl->cache_data);
+error4:
+ kfree(ftl->zones);
+error3:
+ kfree(ftl->cis_buffer);
+error2:
+ kfree(ftl);
+error1:
+ return;
+}
+
+/* main interface: device {surprise,} removal */
+static void sm_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int i;
+
+ del_mtd_blktrans_dev(dev);
+ ftl->trans = NULL;
+
+ for (i = 0 ; i < ftl->zone_count; i++) {
+
+ if (!ftl->zones[i].initialized)
+ continue;
+
+ kfree(ftl->zones[i].lba_to_phys_table);
+ kfifo_free(&ftl->zones[i].free_sectors);
+ }
+
+ sm_delete_sysfs_attributes(ftl);
+ kfree(ftl->cis_buffer);
+ kfree(ftl->zones);
+ kfree(ftl->cache_data);
+ kfree(ftl);
+}
+
+static struct mtd_blktrans_ops sm_ftl_ops = {
+ .name = "smblk",
+ .major = -1,
+ .part_bits = SM_FTL_PARTN_BITS,
+ .blksize = SM_SECTOR_SIZE,
+ .getgeo = sm_getgeo,
+
+ .add_mtd = sm_add_mtd,
+ .remove_dev = sm_remove_dev,
+
+ .readsect = sm_read,
+ .writesect = sm_write,
+
+ .flush = sm_flush,
+ .release = sm_release,
+
+ .owner = THIS_MODULE,
+};
+
+static __init int sm_module_init(void)
+{
+ int error = 0;
+ cache_flush_workqueue = create_freezeable_workqueue("smflush");
+
+ if (IS_ERR(cache_flush_workqueue))
+ return PTR_ERR(cache_flush_workqueue);
+
+ error = register_mtd_blktrans(&sm_ftl_ops);
+ if (error)
+ destroy_workqueue(cache_flush_workqueue);
+ return error;
+
+}
+
+static void __exit sm_module_exit(void)
+{
+ destroy_workqueue(cache_flush_workqueue);
+ deregister_mtd_blktrans(&sm_ftl_ops);
+}
+
+module_init(sm_module_init);
+module_exit(sm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
new file mode 100644
index 000000000000..e30e48e7f63d
--- /dev/null
+++ b/drivers/mtd/sm_ftl.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * Based loosly on ssfdc.c which is
+ * © 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/blktrans.h>
+#include <linux/kfifo.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+
+
+
+struct ftl_zone {
+ int initialized;
+ int16_t *lba_to_phys_table; /* LBA to physical table */
+ struct kfifo free_sectors; /* queue of free sectors */
+};
+
+struct sm_ftl {
+ struct mtd_blktrans_dev *trans;
+
+ struct mutex mutex; /* protects the structure */
+ struct ftl_zone *zones; /* FTL tables for each zone */
+
+ /* Media information */
+ int block_size; /* block size in bytes */
+ int zone_size; /* zone size in blocks */
+ int zone_count; /* number of zones */
+ int max_lba; /* maximum lba in a zone */
+ int smallpagenand; /* 256 bytes/page nand */
+ int readonly; /* is FS readonly */
+ int unstable;
+ int cis_block; /* CIS block location */
+ int cis_boffset; /* CIS offset in the block */
+ int cis_page_offset; /* CIS offset in the page */
+ void *cis_buffer; /* tmp buffer for cis reads */
+
+ /* Cache */
+ int cache_block; /* block number of cached block */
+ int cache_zone; /* zone of cached block */
+ unsigned char *cache_data; /* cached block data */
+ long unsigned int cache_data_invalid_bitmap;
+ int cache_clean;
+ struct work_struct flush_work;
+ struct timer_list timer;
+
+ /* Async erase stuff */
+ struct completion erase_completion;
+
+ /* Geometry stuff */
+ int heads;
+ int sectors;
+ int cylinders;
+
+ struct attribute_group *disk_attributes;
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+};
+
+
+#define SM_FTL_PARTN_BITS 3
+
+#define sm_printk(format, ...) \
+ printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+
+static void sm_erase_callback(struct erase_info *self);
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free);
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
+
+static int sm_recheck_media(struct sm_ftl *ftl);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 3f67e00d98e0..81c4ecdc11f5 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -375,7 +375,6 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(ssfdc->logic_block_map);
- kfree(ssfdc);
}
static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 0a8c7ea764ae..f702a163d8df 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,7 +27,7 @@ config MTD_UBI_WL_THRESHOLD
The default value should be OK for SLC NAND flashes, NOR flashes and
other flashes which have eraseblock life-cycle 100000 or more.
However, in case of MLC NAND flashes which typically have eraseblock
- life-cycle less then 10000, the threshold should be lessened (e.g.,
+ life-cycle less than 10000, the threshold should be lessened (e.g.,
to 128 or 256, although it does not have to be power of 2).
config MTD_UBI_BEB_RESERVE
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 55c726dde942..13b05cb33b08 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -42,7 +42,6 @@
#include <linux/miscdevice.h>
#include <linux/log2.h>
#include <linux/kthread.h>
-#include <linux/reboot.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "ubi.h"
@@ -50,6 +49,12 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+#ifdef CONFIG_MTD_UBI_MODULE
+#define ubi_is_module() 1
+#else
+#define ubi_is_module() 0
+#endif
+
/**
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD character device node path, MTD device name, or MTD device number
@@ -832,34 +837,6 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot.
- * @n: reboot notifier object
- * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF
- * @cmd: pointer to command string for RESTART2
- *
- * This function stops the UBI background thread so that the flash device
- * remains quiescent when Linux restarts the system. Any queued work will be
- * discarded, but this function will block until do_work() finishes if an
- * operation is already in progress.
- *
- * This function solves a real-life problem observed on NOR flashes when an
- * PEB erase operation starts, then the system is rebooted before the erase is
- * finishes, and the boot loader gets confused and dies. So we prefer to finish
- * the ongoing operation before rebooting.
- */
-static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state,
- void *cmd)
-{
- struct ubi_device *ubi;
-
- ubi = container_of(n, struct ubi_device, reboot_notifier);
- if (ubi->bgt_thread)
- kthread_stop(ubi->bgt_thread);
- ubi_sync(ubi->ubi_num);
- return NOTIFY_DONE;
-}
-
-/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
@@ -1016,11 +993,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
- /* Flash device priority is 0 - UBI needs to shut down first */
- ubi->reboot_notifier.priority = 1;
- ubi->reboot_notifier.notifier_call = ubi_reboot_notifier;
- register_reboot_notifier(&ubi->reboot_notifier);
-
ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1091,7 +1063,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
*/
- unregister_reboot_notifier(&ubi->reboot_notifier);
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
@@ -1241,9 +1212,24 @@ static int __init ubi_init(void)
p->vid_hdr_offs);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
- put_mtd_device(mtd);
ubi_err("cannot attach mtd%d", mtd->index);
- goto out_detach;
+ put_mtd_device(mtd);
+
+ /*
+ * Originally UBI stopped initializing on any error.
+ * However, later on it was found out that this
+ * behavior is not very good when UBI is compiled into
+ * the kernel and the MTD devices to attach are passed
+ * through the command line. Indeed, UBI failure
+ * stopped whole boot sequence.
+ *
+ * To fix this, we changed the behavior for the
+ * non-module case, but preserved the old behavior for
+ * the module case, just for compatibility. This is a
+ * little inconsistent, though.
+ */
+ if (ubi_is_module())
+ goto out_detach;
}
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 533b1a4b9af1..4b979e34b159 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -64,9 +64,9 @@
* device, e.g., make @ubi->min_io_size = 512 in the example above?
*
* A: because when writing a sub-page, MTD still writes a full 2K page but the
- * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing
- * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we
- * prefer to use sub-pages only for EV and VID headers.
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
*
* As it was noted above, the VID header may start at a non-aligned offset.
* For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 17f287decc36..69fa4ef03c53 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
- * data, which has to be aligned. The length may be shorter then the logical
+ * data, which has to be aligned. The length may be shorter than the logical
* eraseblock size, ant the logical eraseblock may be appended to more times
* later on. This function guarantees that in case of an unclean reboot the old
* contents is preserved. Returns zero in case of success and a negative error
@@ -571,7 +571,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* This function un-maps logical eraseblock @lnum and schedules the
* corresponding physical eraseblock for erasure, so that it will eventually be
- * physically erased in background. This operation is much faster then the
+ * physically erased in background. This operation is much faster than the
* erase operation.
*
* Unlike erase, the un-map operation does not guarantee that the logical
@@ -590,7 +590,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* The main and obvious use-case of this function is when the contents of a
* logical eraseblock has to be re-written. Then it is much more efficient to
- * first un-map it, then write new data, rather then first erase it, then write
+ * first un-map it, then write new data, rather than first erase it, then write
* new data. Note, once new data has been written to the logical eraseblock,
* UBI guarantees that the old contents has gone forever. In other words, if an
* unclean reboot happens after the logical eraseblock has been un-mapped and
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index dc5f688699da..aed19f33b8f3 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -231,7 +231,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
- * o bit 0 is cleared: the first PEB (described by @seb) is newer then the
+ * o bit 0 is cleared: the first PEB (described by @seb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
@@ -452,7 +452,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
if (cmp_res & 1) {
/*
- * This logical eraseblock is newer then the one
+ * This logical eraseblock is newer than the one
* found earlier.
*/
err = validate_vid_hdr(vid_hdr, sv, pnum);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5176d4886518..a637f0283add 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -350,7 +350,6 @@ struct ubi_wl_entry;
* @bgt_thread: background thread description object
* @thread_enabled: if the background thread is enabled
* @bgt_name: background thread name
- * @reboot_notifier: notifier to terminate background thread before rebooting
*
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
@@ -436,7 +435,6 @@ struct ubi_device {
struct task_struct *bgt_thread;
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
- struct notifier_block reboot_notifier;
/* I/O sub-system's stuff */
long long flash_size;
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index cd90ff3b76b1..14c10bed94ee 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -414,7 +414,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
* 0 contains more recent information.
*
* So the plan is to first check LEB 0. Then
- * a. if LEB 0 is OK, it must be containing the most resent data; then
+ * a. if LEB 0 is OK, it must be containing the most recent data; then
* we compare it with LEB 1, and if they are different, we copy LEB
* 0 to LEB 1;
* b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
@@ -848,7 +848,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
goto out_free;
/*
- * Get sure that the scanning information is consistent to the
+ * Make sure that the scanning information is consistent to the
* information stored in the volume table.
*/
err = check_scanning_info(ubi, si);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index f64ddabd4ac8..ee7b1d8fbb92 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -350,7 +350,7 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
* @max: highest possible erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
- * @max and less then @max.
+ * @max and less than @max.
*/
static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
{
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 3ea42ff17657..4fed2a88243b 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -480,7 +480,6 @@ static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fire ... Trigger xmit. */
outb(AX_XMIT, AX_CMD);
lp->loading = 0;
- dev->trans_start = jiffies;
if (el_debug > 2)
pr_debug(" queued xmit.\n");
dev_kfree_skb(skb);
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 66e0323c1839..b74a0eadbd6c 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -380,6 +380,12 @@ out:
return retval;
}
+static irqreturn_t el2_probe_interrupt(int irq, void *seen)
+{
+ *(bool *)seen = true;
+ return IRQ_HANDLED;
+}
+
static int
el2_open(struct net_device *dev)
{
@@ -391,23 +397,35 @@ el2_open(struct net_device *dev)
outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
do {
- retval = request_irq(*irqp, NULL, 0, "bogus", dev);
- if (retval >= 0) {
+ bool seen;
+
+ retval = request_irq(*irqp, el2_probe_interrupt, 0,
+ dev->name, &seen);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
+
/* Twinkle the interrupt, and check if it's seen. */
- unsigned long cookie = probe_irq_on();
+ seen = false;
+ smp_wmb();
outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
outb_p(0x00, E33G_IDCFR);
- if (*irqp == probe_irq_off(cookie) && /* It's a good IRQ line! */
- ((retval = request_irq(dev->irq = *irqp,
- eip_interrupt, 0,
- dev->name, dev)) == 0))
- break;
- } else {
- if (retval != -EBUSY)
- return retval;
- }
+ msleep(1);
+ free_irq(*irqp, el2_probe_interrupt);
+ if (!seen)
+ continue;
+
+ retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
+ dev->name, dev);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
} while (*++irqp);
+
if (*irqp == 0) {
+ err_disable:
outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
return -EAGAIN;
}
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 29b8d1d63bde..88d766ee0e1b 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1055,7 +1055,7 @@ static void elp_timeout(struct net_device *dev)
(stat & ACRF) ? "interrupt" : "command");
if (elp_debug >= 1)
pr_debug("%s: status %#02x\n", dev->name, stat);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_dropped++;
netif_wake_queue(dev);
}
@@ -1093,11 +1093,6 @@ static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (elp_debug >= 3)
pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
- /*
- * start the transmit timeout
- */
- dev->trans_start = jiffies;
-
prime_rx(dev);
spin_unlock_irqrestore(&adapter->lock, flags);
netif_start_queue(dev);
@@ -1216,7 +1211,7 @@ static int elp_close(struct net_device *dev)
static void elp_set_mc_list(struct net_device *dev)
{
elp_device *adapter = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
unsigned long flags;
@@ -1231,8 +1226,9 @@ static void elp_set_mc_list(struct net_device *dev)
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(adapter->tx_pcb.data.multicast[i++],
+ ha->addr, 6);
adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
pr_err("%s: couldn't send set_multicast command\n", dev->name);
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index b32b7a1710b7..c4e272fbc2cc 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -449,7 +449,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
pr_debug("%s", version);
lp = netdev_priv(dev);
- memset(lp, 0, sizeof(*lp));
spin_lock_init(&lp->lock);
lp->base = ioremap(dev->mem_start, RX_BUF_END);
if (!lp->base) {
@@ -505,7 +504,7 @@ static void el16_tx_timeout (struct net_device *dev)
outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -529,7 +528,6 @@ static netdev_tx_t el16_send_packet (struct sk_buff *skb,
hardware_send_packet (dev, buf, skb->len, length - skb->len);
- dev->trans_start = jiffies;
/* Enable the 82586 interrupt input. */
outb (0x84, ioaddr + MISC_CTRL);
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index ab9bb3c52002..54deaa91e7c6 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -807,7 +807,7 @@ el3_tx_timeout (struct net_device *dev)
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -868,7 +868,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) > 1536)
netif_start_queue(dev);
else
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 2e17837be546..569e269f282e 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -992,7 +992,7 @@ static void corkscrew_timeout(struct net_device *dev)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
outw(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
netif_wake_queue(dev);
@@ -1055,7 +1055,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
prev_entry->status &= ~0x80000000;
netif_wake_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/* Put out the doubleword header... */
@@ -1091,7 +1090,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD);
#endif /* bus master */
- dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 1719079cc498..a7b0e5e43a52 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -503,7 +503,6 @@ static int __init do_elmc_probe(struct net_device *dev)
break;
}
- memset(pr, 0, sizeof(struct priv));
pr->slot = slot;
pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
@@ -624,7 +623,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -787,8 +786,9 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = num_addrs * 6;
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ ha->addr, 6);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd = CUC_START;
elmc_id_attn586();
@@ -1152,7 +1152,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
p->scb->cmd = CUC_START;
p->xmit_cmds[0]->cmd_status = 0;
elmc_attn586();
- dev->trans_start = jiffies;
if (!i) {
dev_kfree_skb(skb);
}
@@ -1176,7 +1175,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
#endif
@@ -1190,7 +1188,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
= make16((p->nop_cmds[next_nop]));
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
if (p->xmit_count != p->xmit_last)
netif_wake_queue(dev);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 5c07b147ec99..38395dfa4963 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1533,7 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
{
unsigned char block[62];
unsigned char *bp;
- struct dev_mc_list *dmc;
+ struct netdev_hw_addr *ha;
if(retry==0)
lp->mc_list_valid = 0;
@@ -1543,8 +1543,8 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
block[0]=netdev_mc_count(dev);
bp=block+2;
- netdev_for_each_mc_addr(dmc, dev) {
- memcpy(bp, dmc->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(bp, ha->addr, 6);
bp+=6;
}
if(mc32_command_nowait(dev, 2, block,
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 5f92fdbe66e2..dab2afac6ddc 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1917,7 +1917,7 @@ static void vortex_tx_timeout(struct net_device *dev)
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
@@ -2063,7 +2063,6 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
@@ -2129,8 +2128,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
- skb->len-skb->data_len, PCI_DMA_TODEVICE));
- vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
+ skb_headlen(skb), PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2174,7 +2173,6 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 500e135723bd..561d3d582813 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -262,7 +262,7 @@ static int lance_reset (struct net_device *dev)
load_csrs (lp);
lance_init_ring (dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance (lp);
#ifdef DEBUG_DRIVER
printk ("Lance restart=%d\n", status);
@@ -526,7 +526,7 @@ void lance_tx_timeout(struct net_device *dev)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -574,7 +574,6 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
outs++;
/* Kick the lance: transmit now */
WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
spin_lock_irqsave (&lp->devlock, flags);
@@ -594,7 +593,7 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -609,8 +608,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a09e6ce3eaa0..cd63b97f3c68 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -882,7 +882,6 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -910,11 +909,11 @@ static void __cp_set_rx_mode (struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f0d23de32967..4ba72933f0da 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1716,8 +1716,6 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
- dev->trans_start = jiffies;
-
tp->cur_tx++;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
@@ -2503,11 +2501,11 @@ static void __set_rx_mode (struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 56e68db48861..dd8dc15556cb 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1050,7 +1050,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -1060,7 +1060,6 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_cmd *tx_cmd;
struct i596_tbd *tbd;
short length = skb->len;
- dev->trans_start = jiffies;
DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
dev->name, skb->len, skb->data));
@@ -1542,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
}
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1552,10 +1551,10 @@ static void set_multicast_list(struct net_device *dev)
cmd->cmd.command = CmdMulticastList;
cmd->mc_cnt = cnt * ETH_ALEN;
cp = cmd->mc_addrs;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, dmi->dmi_addr, ETH_ALEN);
+ memcpy(cp, ha->addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
dev->name, cp));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7b832c727f87..ab75ccb2fb37 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -483,7 +483,7 @@ config XTENSA_XT2000_SONIC
This is the driver for the onboard card of the Xtensa XT2000 board.
config MIPS_AU1X00_ENET
- bool "MIPS AU1000 Ethernet support"
+ tristate "MIPS AU1000 Ethernet support"
depends on SOC_AU1X00
select PHYLIB
select CRC32
@@ -1453,20 +1453,6 @@ config FORCEDETH
To compile this driver as a module, choose M here. The module
will be called forcedeth.
-config FORCEDETH_NAPI
- bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
- depends on FORCEDETH && EXPERIMENTAL
- help
- NAPI is a new driver API designed to reduce CPU and interrupt load
- when the driver is receiving lots of packets from the card. It is
- still somewhat experimental and thus not yet enabled by default.
-
- If your estimated Rx load is 10kpps or more, or if the card will be
- deployed on potentially unfriendly networks (e.g. in a firewall),
- then say Y here.
-
- If in doubt, say N.
-
config CS89x0
tristate "CS89x0 support"
depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
@@ -1916,6 +1902,7 @@ config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+ select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2434,8 +2421,8 @@ config MV643XX_ETH
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on PPC || MICROBLAZE
select PHYLIB
- depends on PPC_DCR_NATIVE
help
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
core used in Xilinx Spartan and Virtex FPGAs
@@ -2618,11 +2605,11 @@ config EHEA
will be called ehea.
config ENIC
- tristate "Cisco 10G Ethernet NIC support"
+ tristate "Cisco VIC Ethernet NIC Support"
depends on PCI && INET
select INET_LRO
help
- This enables the support for the Cisco 10G Ethernet card.
+ This enables the support for the Cisco VIC Ethernet card.
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
@@ -2862,6 +2849,8 @@ source "drivers/ieee802154/Kconfig"
source "drivers/s390/net/Kconfig"
+source "drivers/net/caif/Kconfig"
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
@@ -3180,17 +3169,12 @@ config PPPOATM
config PPPOL2TP
tristate "PPP over L2TP (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP && INET
+ depends on EXPERIMENTAL && L2TP && PPP
help
Support for PPP-over-L2TP socket family. L2TP is a protocol
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
- This kernel component handles only L2TP data packets: a
- userland daemon handles L2TP the control protocol (tunnel
- and session setup). One such daemon is OpenL2TP
- (http://openl2tp.sourceforge.net/).
-
config SLIP
tristate "SLIP (serial line) support"
---help---
@@ -3277,15 +3261,14 @@ config NET_FC
"SCSI generic support".
config NETCONSOLE
- tristate "Network console logging support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Network console logging support"
---help---
If you want to log kernel messages over the network, enable this.
See <file:Documentation/networking/netconsole.txt> for details.
config NETCONSOLE_DYNAMIC
- bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)"
- depends on NETCONSOLE && SYSFS && EXPERIMENTAL
+ bool "Dynamic reconfiguration of logging targets"
+ depends on NETCONSOLE && SYSFS
select CONFIGFS_FS
help
This option enables the ability to dynamically reconfigure target
@@ -3319,4 +3302,18 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
+config VBUS_ENET
+ tristate "VBUS Ethernet Driver"
+ default n
+ depends on VBUS_PROXY
+ help
+ A virtualized 802.x network device based on the VBUS
+ "virtual-ethernet" interface. It can be used with any
+ hypervisor/kernel that supports the vbus+venet protocol.
+
+config VBUS_ENET_DEBUG
+ bool "Enable Debugging"
+ depends on VBUS_ENET
+ default n
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 12b280afdd51..8beb1e27191f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -161,7 +161,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
-obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
+obj-$(CONFIG_PPPOL2TP) += pppox.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
@@ -289,8 +289,10 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
obj-$(CONFIG_NETXEN_NIC) += netxen/
obj-$(CONFIG_NIU) += niu.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
+obj-$(CONFIG_VBUS_ENET) += vbus-enet.o
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index ed5e9742be2c..ecaa28c6f505 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -525,7 +525,7 @@ static inline int lance_reset (struct net_device *dev)
load_csrs (lp);
lance_init_ring (dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
status = init_restart_lance (lp);
@@ -588,7 +588,6 @@ static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
local_irq_restore(flags);
@@ -602,7 +601,7 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -617,8 +616,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 97a3dfd94dfa..1328eb9b841d 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -661,7 +661,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_std_skbuff[i];
- mapping = pci_unmap_addr(ringp, mapping);
+ mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -681,7 +681,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_mini_skbuff[i];
- mapping = pci_unmap_addr(ringp,mapping);
+ mapping = dma_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -700,7 +700,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_jumbo_skbuff[i];
- mapping = pci_unmap_addr(ringp, mapping);
+ mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -1683,7 +1683,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
mapping, mapping);
rd = &ap->rx_std_ring[idx];
@@ -1744,7 +1744,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
mapping, mapping);
rd = &ap->rx_mini_ring[idx];
@@ -1800,7 +1800,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
mapping, mapping);
rd = &ap->rx_jumbo_ring[idx];
@@ -2013,7 +2013,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
skb = rip->skb;
rip->skb = NULL;
pci_unmap_page(ap->pdev,
- pci_unmap_addr(rip, mapping),
+ dma_unmap_addr(rip, mapping),
mapsize,
PCI_DMA_FROMDEVICE);
skb_put(skb, retdesc->size);
@@ -2078,18 +2078,16 @@ static inline void ace_tx_int(struct net_device *dev,
do {
struct sk_buff *skb;
- dma_addr_t mapping;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + idx;
skb = info->skb;
- mapping = pci_unmap_addr(info, mapping);
- if (mapping) {
- pci_unmap_page(ap->pdev, mapping,
- pci_unmap_len(info, maplen),
+ if (dma_unmap_len(info, maplen)) {
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
- pci_unmap_addr_set(info, mapping, 0);
+ dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
@@ -2377,14 +2375,12 @@ static int ace_close(struct net_device *dev)
for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
struct sk_buff *skb;
- dma_addr_t mapping;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + i;
skb = info->skb;
- mapping = pci_unmap_addr(info, mapping);
- if (mapping) {
+ if (dma_unmap_len(info, maplen)) {
if (ACE_IS_TIGON_I(ap)) {
/* NB: TIGON_1 is special, tx_ring is in io space */
struct tx_desc __iomem *tx;
@@ -2395,10 +2391,10 @@ static int ace_close(struct net_device *dev)
} else
memset(ap->tx_ring + i, 0,
sizeof(struct tx_desc));
- pci_unmap_page(ap->pdev, mapping,
- pci_unmap_len(info, maplen),
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
- pci_unmap_addr_set(info, mapping, 0);
+ dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
dev_kfree_skb(skb);
@@ -2433,8 +2429,8 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
info = ap->skb->tx_skbuff + idx;
info->skb = tail;
- pci_unmap_addr_set(info, mapping, mapping);
- pci_unmap_len_set(info, maplen, skb->len);
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, skb->len);
return mapping;
}
@@ -2553,8 +2549,8 @@ restart:
} else {
info->skb = NULL;
}
- pci_unmap_addr_set(info, mapping, mapping);
- pci_unmap_len_set(info, maplen, frag->size);
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, frag->size);
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 17079b927ffa..0681da7e8753 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -589,7 +589,7 @@ struct ace_info {
struct ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -600,8 +600,8 @@ struct ring_info {
*/
struct tx_ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
- DECLARE_PCI_UNMAP_LEN(maplen)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 8d58f0a8f42f..585c25f4b60c 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1339,8 +1339,6 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
writel( VAL1 | TDMD0, lp->mmio + CMD0);
writel( VAL2 | RDMD0,lp->mmio + CMD0);
- dev->trans_start = jiffies;
-
if(amd8111e_tx_queue_avail(lp) < 0){
netif_stop_queue(dev);
}
@@ -1376,7 +1374,7 @@ list to the device.
*/
static void amd8111e_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
struct amd8111e_priv *lp = netdev_priv(dev);
u32 mc_filter[2] ;
int bit_num;
@@ -1407,8 +1405,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
/* load all the multicast addresses in the logic filter */
lp->options |= OPTION_MULTICAST_ENABLE;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mc_ptr, dev) {
- bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 6f8d6206b5c4..14e1d952226e 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -866,7 +866,7 @@ static void cops_timeout(struct net_device *dev)
}
printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
cops_jumpstart(dev); /* Restart the card. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -919,9 +919,8 @@ static netdev_tx_t cops_send_packet(struct sk_buff *skb,
/* Done sending packet, update counters and cleanup. */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/*
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d8f029303754..a746ba272f04 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -654,7 +654,6 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
}
}
retval = NETDEV_TX_OK;
- dev->trans_start = jiffies;
lp->next_tx = txbuf;
} else {
retval = NETDEV_TX_BUSY;
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index fa1a2354f5f9..705373a5308d 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -676,8 +676,6 @@ static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
lance->RAP = CSR0; /* PCnet-ISA Controller Status */
lance->RDP = INEA|TDMD;
- dev->trans_start = jiffies;
-
if (lowb(priv->tx_ring[(entry+1) % TX_RING_SIZE]->TMD1) != 0) {
netif_stop_queue(dev);
priv->tx_full = 1;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index f1f58c5e27bf..8c496fb1ac9e 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -383,12 +383,12 @@ static void am79c961_setmulticastlist (struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI) {
memset(multi_hash, 0xff, sizeof(multi_hash));
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
memset(multi_hash, 0x00, sizeof(multi_hash));
- netdev_for_each_mc_addr(dmi, dev)
- am79c961_mc_hash(dmi->dmi_addr, multi_hash);
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, multi_hash);
}
spin_lock_irqsave(&priv->chip_lock, flags);
@@ -469,7 +469,6 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&priv->chip_lock, flags);
write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&priv->chip_lock, flags);
/*
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index aed5b5479b50..e07b314ed8fd 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -557,14 +557,14 @@ static int hash_get_index(__u8 *addr)
*/
static void at91ether_sethashtable(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -824,7 +824,6 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set length of the packet in the Transmit Control register */
at91_emac_write(AT91_EMAC_TCR, skb->len);
- dev->trans_start = jiffies;
} else {
printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index cd17d09f385c..4a5ec9470aa1 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -374,8 +374,6 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
-
spin_lock_irq(&ep->tx_pending_lock);
ep->tx_pending++;
if (ep->tx_pending == TX_QUEUE_ENTRIES)
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index e47c0d962857..b17ab5153f51 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -736,7 +736,6 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
local_irq_restore(flags);
/* handle transmit */
- dev->trans_start = jiffies;
/* check to see if we have room for a full sized ether frame */
tmp = priv(dev)->tx_head;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index d9de9bce2395..1361b7367c28 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -529,7 +529,6 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; /* unable to queue */
}
- dev->trans_start = jiffies;
ptr = 0x600 * priv(dev)->tx_head;
priv(dev)->tx_head = next_ptr;
next_ptr *= 0x600;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6be8b098b8b4..24df0325090c 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -708,7 +708,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
/* NPE firmware pads short frames with zeros internally */
wmb();
queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
- dev->trans_start = jiffies;
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
@@ -736,7 +735,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u8 diffs[ETH_ALEN], *addr;
int i;
@@ -749,11 +748,11 @@ static void eth_set_mcast_list(struct net_device *dev)
memset(diffs, 0, ETH_ALEN);
addr = NULL;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!addr)
- addr = mclist->dmi_addr; /* first MAC address */
+ addr = ha->addr; /* first MAC address */
for (i = 0; i < ETH_ALEN; i++)
- diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
+ diffs[i] |= addr[i] ^ ha->addr[i];
}
for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 84f8a8f73802..54c6d849cf25 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -332,16 +332,16 @@ ks8695_init_partial_multicast(struct ks8695_priv *ksp,
{
u32 low, high;
int i;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
i = 0;
- netdev_for_each_mc_addr(dmi, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
/* Ran out of space in chip? */
BUG_ON(i == KS8695_NR_ADDRESSES);
- low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) |
- (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]);
- high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]);
+ low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
+ (ha->addr[4] << 8) | (ha->addr[5]);
+ high = (ha->addr[0] << 8) | (ha->addr[1]);
ks8695_writereg(ksp, KS8695_AAL_(i), low);
ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
@@ -1302,8 +1302,6 @@ ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++ksp->tx_ring_used == MAX_TX_DESC)
netif_stop_queue(ndev);
- ndev->trans_start = jiffies;
-
/* Kick the TX DMA in case it decided to go IDLE */
ks8695_writereg(ksp, KS8695_DTSC, 0);
@@ -1472,7 +1470,6 @@ ks8695_probe(struct platform_device *pdev)
/* Configure our private structure a little */
ksp = netdev_priv(ndev);
- memset(ksp, 0, sizeof(struct ks8695_priv));
ksp->dev = &pdev->dev;
ksp->ndev = ndev;
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index f7c9ca1dfb17..2e852463382b 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -483,7 +483,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_init_desc(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
ether->cur_tx = 0x0;
ether->finish_tx = 0x0;
ether->cur_rx = 0x0;
@@ -497,7 +497,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_trigger_tx(dev);
w90p910_trigger_rx(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
@@ -634,8 +634,6 @@ static int w90p910_send_frame(struct net_device *dev,
txbd = &ether->tdesc->desclist[ether->cur_tx];
- dev->trans_start = jiffies;
-
if (txbd->mode & TX_OWEN_DMA)
netif_stop_queue(dev);
@@ -744,7 +742,6 @@ static void netdev_rx(struct net_device *dev)
return;
}
- skb->dev = dev;
skb_reserve(skb, 2);
skb_put(skb, length);
skb_copy_to_linear_data(skb, data, length);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 10a20fb9ae65..861f07a775fb 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -583,7 +583,7 @@ static void net_tx_timeout (struct net_device *dev)
outb (0x00, ioaddr + TX_START);
outb (0x03, ioaddr + COL16CNTL);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
lp->tx_started = 0;
lp->tx_queue_ready = 1;
@@ -636,7 +636,6 @@ static netdev_tx_t net_send_packet (struct sk_buff *skb,
outb (0x80 | lp->tx_queue, ioaddr + TX_START);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_start_queue (dev);
} else if (lp->tx_queue_len < 4096 - 1502)
@@ -847,12 +846,12 @@ set_rx_mode(struct net_device *dev)
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ ether_crc_le(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit >> 3] |= (1 << bit);
}
outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index a8686bfec7a1..b57d7dee389a 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -767,8 +767,8 @@ static void lance_tx_timeout (struct net_device *dev)
/* lance_restart, essentially */
lance_init_ring(dev);
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
- dev->trans_start = jiffies;
- netif_wake_queue (dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
}
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
@@ -836,7 +836,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Trigger an immediate send poll. */
DREG = CSR0_INEA | CSR0_TDMD;
- dev->trans_start = jiffies;
if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
TMD1_OWN_HOST)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 50dc531a02d8..3d7051135c3a 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -354,7 +354,7 @@ static void atl1c_set_multi(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 mac_ctrl_data;
u32 hash_value;
@@ -377,8 +377,8 @@ static void atl1c_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1c_hash_mc_addr(hw, ha->addr);
atl1c_hash_set(hw, hash_value);
}
}
@@ -1817,7 +1817,6 @@ rrs_checked:
atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
skb_put(skb, length - ETH_FCS_LEN);
skb->protocol = eth_type_trans(skb, netdev);
- skb->dev = netdev;
atl1c_rx_checksum(adapter, skb, rrs);
if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
u16 vlan;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 73302ae468aa..7dd33776de00 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -284,7 +284,7 @@ static void atl1e_set_multi(struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 mac_ctrl_data = 0;
u32 hash_value;
@@ -307,8 +307,8 @@ static void atl1e_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1e_hash_mc_addr(hw, ha->addr);
atl1e_hash_set(hw, hash_value);
}
}
@@ -1428,7 +1428,6 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
"Memory squeeze, deferring packet\n");
goto skip_pkt;
}
- skb->dev = netdev;
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb_put(skb, packet_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1680,7 +1679,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
{
struct atl1e_tpd_desc *use_tpd = NULL;
struct atl1e_tx_buffer *tx_buffer = NULL;
- u16 buf_len = skb->len - skb->data_len;
+ u16 buf_len = skb_headlen(skb);
u16 map_len = 0;
u16 mapped_len = 0;
u16 hdr_len = 0;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ebd8208f606..33448a09b47f 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2347,7 +2347,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
- int len = skb->len;
+ int len;
int tso;
int count = 1;
int ret_val;
@@ -2359,7 +2359,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
unsigned int f;
unsigned int proto_hdr_len;
- len -= skb->data_len;
+ len = skb_headlen(skb);
if (unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 54662f24f9bb..8da87383fb39 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -136,7 +136,7 @@ static void atl2_set_multi(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
@@ -158,8 +158,8 @@ static void atl2_set_multi(struct net_device *netdev)
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl2_hash_mc_addr(hw, ha->addr);
atl2_hash_set(hw, hash_value);
}
}
@@ -422,7 +422,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
netdev->stats.rx_dropped++;
break;
}
- skb->dev = netdev;
memcpy(skb->data, rxd->packet, rx_size);
skb_put(skb, rx_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -893,7 +892,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
(adapter->txd_write_ptr >> 2));
mmiowb();
- netdev->trans_start = jiffies;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 72f3306352e2..f979ea2d6d3c 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -123,7 +123,7 @@ static void atlx_set_multi(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
struct atlx_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
@@ -144,8 +144,8 @@ static void atlx_set_multi(struct net_device *netdev)
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
/* compute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atlx_hash_mc_addr(hw, ha->addr);
atlx_hash_set(hw, hash_value);
}
}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 55039d44dc47..75ff0c59e9c7 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -547,7 +547,7 @@ static void tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
/* Try to restart the adapter. */
hardware_init(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
dev->stats.tx_errors++;
}
@@ -586,7 +586,6 @@ static netdev_tx_t atp_send_packet(struct sk_buff *skb,
write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
write_reg_high(ioaddr, IMR, ISRh_RxErr);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
@@ -882,11 +881,11 @@ static void set_rx_mode_8012(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
new_mode = CMR2h_Normal;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4da191b87b0d..ece6128bef14 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -75,14 +75,19 @@ static int au1000_debug = 5;
static int au1000_debug = 3;
#endif
+#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
#define DRV_NAME "au1000_eth"
-#define DRV_VERSION "1.6"
+#define DRV_VERSION "1.7"
#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
#define DRV_DESC "Au1xxx on-chip Ethernet driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
/*
* Theory of operation
@@ -148,7 +153,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
* specific irq-map
*/
-static void enable_mac(struct net_device *dev, int force_reset)
+static void au1000_enable_mac(struct net_device *dev, int force_reset)
{
unsigned long flags;
struct au1000_private *aup = netdev_priv(dev);
@@ -182,8 +187,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: read_MII busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "read_MII busy timeout!!\n");
return -1;
}
}
@@ -197,8 +201,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "mdio_read busy timeout!!\n");
return -1;
}
}
@@ -217,8 +220,7 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "mdio_write busy timeout!!\n");
return;
}
}
@@ -236,7 +238,7 @@ static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
* _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
return au1000_mdio_read(dev, phy_addr, regnum);
}
@@ -246,7 +248,7 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
{
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
au1000_mdio_write(dev, phy_addr, regnum, value);
return 0;
@@ -256,28 +258,26 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
{
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
return 0;
}
-static void hard_stop(struct net_device *dev)
+static void au1000_hard_stop(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: hard stop\n", dev->name);
+ netif_dbg(aup, drv, dev, "hard stop\n");
aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
au_sync_delay(10);
}
-static void enable_rx_tx(struct net_device *dev)
+static void au1000_enable_rx_tx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
+ netif_dbg(aup, hw, dev, "enable_rx_tx\n");
aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
au_sync_delay(10);
@@ -297,16 +297,15 @@ au1000_adjust_link(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
if (phydev->link && (aup->old_speed != phydev->speed)) {
- // speed changed
+ /* speed changed */
- switch(phydev->speed) {
+ switch (phydev->speed) {
case SPEED_10:
case SPEED_100:
break;
default:
- printk(KERN_WARNING
- "%s: Speed (%d) is not 10/100 ???\n",
- dev->name, phydev->speed);
+ netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
+ phydev->speed);
break;
}
@@ -316,10 +315,10 @@ au1000_adjust_link(struct net_device *dev)
}
if (phydev->link && (aup->old_duplex != phydev->duplex)) {
- // duplex mode changed
+ /* duplex mode changed */
/* switching duplex mode requires to disable rx and tx! */
- hard_stop(dev);
+ au1000_hard_stop(dev);
if (DUPLEX_FULL == phydev->duplex)
aup->mac->control = ((aup->mac->control
@@ -331,14 +330,14 @@ au1000_adjust_link(struct net_device *dev)
| MAC_DISABLE_RX_OWN);
au_sync_delay(1);
- enable_rx_tx(dev);
+ au1000_enable_rx_tx(dev);
aup->old_duplex = phydev->duplex;
status_change = 1;
}
- if(phydev->link != aup->old_link) {
- // link state changed
+ if (phydev->link != aup->old_link) {
+ /* link state changed */
if (!phydev->link) {
/* link went down */
@@ -354,15 +353,15 @@ au1000_adjust_link(struct net_device *dev)
if (status_change) {
if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
else
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
-static int mii_probe (struct net_device *dev)
+static int au1000_mii_probe (struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
struct phy_device *phydev = NULL;
@@ -373,8 +372,7 @@ static int mii_probe (struct net_device *dev)
if (aup->phy_addr)
phydev = aup->mii_bus->phy_map[aup->phy_addr];
else
- printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
- dev->name);
+ netdev_info(dev, "using PHY-less setup\n");
return 0;
} else {
int phy_addr;
@@ -391,7 +389,7 @@ static int mii_probe (struct net_device *dev)
/* try harder to find a PHY */
if (!phydev && (aup->mac_id == 1)) {
/* no PHY found, maybe we have a dual PHY? */
- printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
"let's see if it's attached to MAC0...\n");
/* find the first (lowest address) non-attached PHY on
@@ -417,7 +415,7 @@ static int mii_probe (struct net_device *dev)
}
if (!phydev) {
- printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
+ netdev_err(dev, "no PHY found\n");
return -1;
}
@@ -428,7 +426,7 @@ static int mii_probe (struct net_device *dev)
0, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -449,8 +447,8 @@ static int mii_probe (struct net_device *dev)
aup->old_duplex = -1;
aup->phy_dev = phydev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ netdev_info(dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
return 0;
@@ -462,7 +460,7 @@ static int mii_probe (struct net_device *dev)
* has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
-static db_dest_t *GetFreeDB(struct au1000_private *aup)
+static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup)
{
db_dest_t *pDB;
pDB = aup->pDBfree;
@@ -473,7 +471,7 @@ static db_dest_t *GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
+void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
{
db_dest_t *pDBfree = aup->pDBfree;
if (pDBfree)
@@ -481,12 +479,12 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
aup->pDBfree = pDB;
}
-static void reset_mac_unlocked(struct net_device *dev)
+static void au1000_reset_mac_unlocked(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
int i;
- hard_stop(dev);
+ au1000_hard_stop(dev);
*aup->enable = MAC_EN_CLOCK_ENABLE;
au_sync_delay(2);
@@ -507,18 +505,17 @@ static void reset_mac_unlocked(struct net_device *dev)
}
-static void reset_mac(struct net_device *dev)
+static void au1000_reset_mac(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
unsigned long flags;
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: reset mac, aup %x\n",
- dev->name, (unsigned)aup);
+ netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
+ (unsigned)aup);
spin_lock_irqsave(&aup->lock, flags);
- reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked (dev);
spin_unlock_irqrestore(&aup->lock, flags);
}
@@ -529,7 +526,7 @@ static void reset_mac(struct net_device *dev)
* these are not descriptors sitting in memory.
*/
static void
-setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
+au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
{
int i;
@@ -582,11 +579,25 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
info->regdump_len = 0;
}
+static void au1000_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ aup->msg_enable = value;
+}
+
+static u32 au1000_get_msglevel(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ return aup->msg_enable;
+}
+
static const struct ethtool_ops au1000_ethtool_ops = {
.get_settings = au1000_get_settings,
.set_settings = au1000_set_settings,
.get_drvinfo = au1000_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_msglevel = au1000_get_msglevel,
+ .set_msglevel = au1000_set_msglevel,
};
@@ -606,11 +617,10 @@ static int au1000_init(struct net_device *dev)
int i;
u32 control;
- if (au1000_debug > 4)
- printk("%s: au1000_init\n", dev->name);
+ netif_dbg(aup, hw, dev, "au1000_init\n");
/* bring the device out of reset */
- enable_mac(dev, 1);
+ au1000_enable_mac(dev, 1);
spin_lock_irqsave(&aup->lock, flags);
@@ -649,7 +659,7 @@ static int au1000_init(struct net_device *dev)
return 0;
}
-static inline void update_rx_stats(struct net_device *dev, u32 status)
+static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
{
struct net_device_stats *ps = &dev->stats;
@@ -667,8 +677,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
ps->rx_crc_errors++;
if (status & RX_COLL)
ps->collisions++;
- }
- else
+ } else
ps->rx_bytes += status & RX_FRAME_LEN_MASK;
}
@@ -685,15 +694,14 @@ static int au1000_rx(struct net_device *dev)
db_dest_t *pDB;
u32 frmlen;
- if (au1000_debug > 5)
- printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
+ netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
prxd = aup->rx_dma_ring[aup->rx_head];
buff_stat = prxd->buff_stat;
while (buff_stat & RX_T_DONE) {
status = prxd->status;
pDB = aup->rx_db_inuse[aup->rx_head];
- update_rx_stats(dev, status);
+ au1000_update_rx_stats(dev, status);
if (!(status & RX_ERROR)) {
/* good frame */
@@ -701,9 +709,7 @@ static int au1000_rx(struct net_device *dev)
frmlen -= 4; /* Remove FCS */
skb = dev_alloc_skb(frmlen + 2);
if (skb == NULL) {
- printk(KERN_ERR
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
continue;
}
@@ -713,8 +719,7 @@ static int au1000_rx(struct net_device *dev)
skb_put(skb, frmlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* pass the packet to upper layers */
- }
- else {
+ } else {
if (au1000_debug > 4) {
if (status & RX_MISSED_FRAME)
printk("rx miss\n");
@@ -747,7 +752,7 @@ static int au1000_rx(struct net_device *dev)
return 0;
}
-static void update_tx_stats(struct net_device *dev, u32 status)
+static void au1000_update_tx_stats(struct net_device *dev, u32 status)
{
struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
@@ -760,8 +765,7 @@ static void update_tx_stats(struct net_device *dev, u32 status)
ps->tx_errors++;
ps->tx_aborted_errors++;
}
- }
- else {
+ } else {
ps->tx_errors++;
ps->tx_aborted_errors++;
if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
@@ -783,7 +787,7 @@ static void au1000_tx_ack(struct net_device *dev)
ptxd = aup->tx_dma_ring[aup->tx_tail];
while (ptxd->buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
+ au1000_update_tx_stats(dev, ptxd->status);
ptxd->buff_stat &= ~TX_T_DONE;
ptxd->len = 0;
au_sync();
@@ -817,18 +821,18 @@ static int au1000_open(struct net_device *dev)
int retval;
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: open: dev=%p\n", dev->name, dev);
+ netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
- if ((retval = request_irq(dev->irq, au1000_interrupt, 0,
- dev->name, dev))) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
+ retval = request_irq(dev->irq, au1000_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return retval;
}
- if ((retval = au1000_init(dev))) {
- printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
+ retval = au1000_init(dev);
+ if (retval) {
+ netdev_err(dev, "error in au1000_init\n");
free_irq(dev->irq, dev);
return retval;
}
@@ -841,8 +845,7 @@ static int au1000_open(struct net_device *dev)
netif_start_queue(dev);
- if (au1000_debug > 4)
- printk("%s: open: Initialization done.\n", dev->name);
+ netif_dbg(aup, drv, dev, "open: Initialization done.\n");
return 0;
}
@@ -852,15 +855,14 @@ static int au1000_close(struct net_device *dev)
unsigned long flags;
struct au1000_private *const aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: close: dev=%p\n", dev->name, dev);
+ netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
if (aup->phy_dev)
phy_stop(aup->phy_dev);
spin_lock_irqsave(&aup->lock, flags);
- reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked (dev);
/* stop the device */
netif_stop_queue(dev);
@@ -884,9 +886,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
db_dest_t *pDB;
int i;
- if (au1000_debug > 5)
- printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
- dev->name, (unsigned)aup, skb->len,
+ netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
+ (unsigned)aup, skb->len,
skb->data, aup->tx_head);
ptxd = aup->tx_dma_ring[aup->tx_head];
@@ -896,9 +897,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
aup->tx_full = 1;
return NETDEV_TX_BUSY;
- }
- else if (buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
+ } else if (buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
ptxd->len = 0;
}
@@ -910,12 +910,11 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
pDB = aup->tx_db_inuse[aup->tx_head];
skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
if (skb->len < ETH_ZLEN) {
- for (i=skb->len; i<ETH_ZLEN; i++) {
+ for (i = skb->len; i < ETH_ZLEN; i++) {
((char *)pDB->vaddr)[i] = 0;
}
ptxd->len = ETH_ZLEN;
- }
- else
+ } else
ptxd->len = skb->len;
ps->tx_packets++;
@@ -925,7 +924,6 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
au_sync();
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -935,10 +933,10 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
*/
static void au1000_tx_timeout(struct net_device *dev)
{
- printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
- reset_mac(dev);
+ netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
+ au1000_reset_mac(dev);
au1000_init(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -946,8 +944,7 @@ static void au1000_multicast_list(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags);
+ netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
@@ -955,14 +952,14 @@ static void au1000_multicast_list(struct net_device *dev)
netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
- printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
+ netdev_info(dev, "Pass all multicast\n");
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev)
- set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
(long *)mc_filter);
aup->mac->multi_hash_high = mc_filter[1];
aup->mac->multi_hash_low = mc_filter[0];
@@ -975,9 +972,11 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct au1000_private *aup = netdev_priv(dev);
- if (!netif_running(dev)) return -EINVAL;
+ if (!netif_running(dev))
+ return -EINVAL;
- if (!aup->phy_dev) return -EINVAL; // PHY not controllable
+ if (!aup->phy_dev)
+ return -EINVAL; /* PHY not controllable */
return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
}
@@ -996,7 +995,7 @@ static const struct net_device_ops au1000_netdev_ops = {
static int __devinit au1000_probe(struct platform_device *pdev)
{
- static unsigned version_printed = 0;
+ static unsigned version_printed;
struct au1000_private *aup = NULL;
struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
@@ -1007,40 +1006,40 @@ static int __devinit au1000_probe(struct platform_device *pdev)
base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!base) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n");
+ dev_err(&pdev->dev, "failed to retrieve base register\n");
err = -ENODEV;
goto out;
}
macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!macen) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n");
+ dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
err = -ENODEV;
goto out;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n");
+ dev_err(&pdev->dev, "failed to retrieve IRQ\n");
err = -ENODEV;
goto out;
}
if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
- printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n");
+ dev_err(&pdev->dev, "failed to request memory region for base registers\n");
err = -ENXIO;
goto out;
}
if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
- printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n");
+ dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
err = -ENXIO;
goto err_request;
}
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
- printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
err = -ENOMEM;
goto err_alloc;
}
@@ -1050,6 +1049,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup = netdev_priv(dev);
spin_lock_init(&aup->lock);
+ aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug);
/* Allocate the data buffers */
/* Snooping works fine with eth on all au1xxx */
@@ -1057,7 +1057,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0);
if (!aup->vaddr) {
- printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n");
+ dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
goto err_vaddr;
}
@@ -1065,7 +1065,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* aup->mac is the base address of the MAC's registers */
aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
if (!aup->mac) {
- printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n");
+ dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
goto err_remap1;
}
@@ -1073,7 +1073,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* Setup some variables for quick register address access */
aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
if (!aup->enable) {
- printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n");
+ dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
err = -ENXIO;
goto err_remap2;
}
@@ -1083,14 +1083,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
if (prom_get_ethernet_addr(ethaddr) == 0)
memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
else {
- printk(KERN_INFO "%s: No MAC address found\n",
- dev->name);
+ netdev_info(dev, "No MAC address found\n");
/* Use the hard coded MAC addresses */
}
- setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
+ au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
} else if (pdev->id == 1)
- setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+ au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
/*
* Assign to the Ethernet ports two consecutive MAC addresses
@@ -1104,7 +1103,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
pd = pdev->dev.platform_data;
if (!pd) {
- printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n");
+ dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
aup->phy1_search_mac0 = 1;
} else {
aup->phy_static_config = pd->phy_static_config;
@@ -1116,7 +1115,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
if (aup->phy_busid && aup->phy_busid > 0) {
- printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII"
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII"
"bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
@@ -1124,7 +1123,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->mii_bus = mdiobus_alloc();
if (aup->mii_bus == NULL) {
- printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n");
+ dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
}
@@ -1139,7 +1138,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
if (aup->mii_bus->irq == NULL)
goto err_out;
- for(i = 0; i < PHY_MAX_ADDR; ++i)
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus->irq[i] = PHY_POLL;
/* if known, set corresponding PHY IRQs */
if (aup->phy_static_config)
@@ -1148,11 +1147,11 @@ static int __devinit au1000_probe(struct platform_device *pdev)
err = mdiobus_register(aup->mii_bus);
if (err) {
- printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n");
+ dev_err(&pdev->dev, "failed to register MDIO bus\n");
goto err_mdiobus_reg;
}
- if (mii_probe(dev) != 0)
+ if (au1000_mii_probe(dev) != 0)
goto err_out;
pDBfree = NULL;
@@ -1168,7 +1167,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->pDBfree = pDBfree;
for (i = 0; i < NUM_RX_DMA; i++) {
- pDB = GetFreeDB(aup);
+ pDB = au1000_GetFreeDB(aup);
if (!pDB) {
goto err_out;
}
@@ -1176,7 +1175,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->rx_db_inuse[i] = pDB;
}
for (i = 0; i < NUM_TX_DMA; i++) {
- pDB = GetFreeDB(aup);
+ pDB = au1000_GetFreeDB(aup);
if (!pDB) {
goto err_out;
}
@@ -1195,17 +1194,16 @@ static int __devinit au1000_probe(struct platform_device *pdev)
* The boot code uses the ethernet controller, so reset it to start
* fresh. au1000_init() expects that the device is in reset state.
*/
- reset_mac(dev);
+ au1000_reset_mac(dev);
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n",
- dev->name);
+ netdev_err(dev, "Cannot register net device, aborting.\n");
goto err_out;
}
- printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n",
- dev->name, (unsigned long)base->start, irq);
+ netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
+ (unsigned long)base->start, irq);
if (version_printed++ == 0)
printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
@@ -1217,15 +1215,15 @@ err_out:
/* here we should have a valid dev plus aup-> register addresses
* so we can reset the mac properly.*/
- reset_mac(dev);
+ au1000_reset_mac(dev);
for (i = 0; i < NUM_RX_DMA; i++) {
if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
}
for (i = 0; i < NUM_TX_DMA; i++) {
if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
}
err_mdiobus_reg:
mdiobus_free(aup->mii_bus);
@@ -1261,11 +1259,11 @@ static int __devexit au1000_remove(struct platform_device *pdev)
for (i = 0; i < NUM_RX_DMA; i++)
if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
for (i = 0; i < NUM_TX_DMA; i++)
if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_noncoherent(NULL, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index f9d29a29b8fd..d06ec008fbf1 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -35,7 +35,7 @@
#define NUM_TX_BUFFS 4
#define MAX_BUF_SIZE 2048
-#define ETH_TX_TIMEOUT HZ/4
+#define ETH_TX_TIMEOUT (HZ/4)
#define MAC_MIN_PKT_SIZE 64
#define MULTICAST_FILTER_LIMIT 64
@@ -125,4 +125,6 @@ struct au1000_private {
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
spinlock_t lock; /* Serialise access to device */
+
+ u32 msg_enable;
};
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 69d9f3d368ae..293f9c16e786 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1014,8 +1014,6 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
out_unlock:
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1681,15 +1679,15 @@ static struct net_device_stats *b44_get_stats(struct net_device *dev)
static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i, num_ents;
num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i == num_ents)
break;
- __b44_cam_write(bp, mclist->dmi_addr, i++ + 1);
+ __b44_cam_write(bp, ha->addr, i++ + 1);
}
return i+1;
}
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 17460aba3bae..faf5add894d7 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -341,11 +341,9 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
skb_put(skb, len);
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
- dev->last_rx = jiffies;
netif_receive_skb(skb);
} while (--budget > 0);
@@ -567,7 +565,6 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_bytes += skb->len;
priv->stats.tx_packets++;
- dev->trans_start = jiffies;
ret = NETDEV_TX_OK;
out_unlock:
@@ -605,7 +602,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
struct bcm_enet_priv *priv;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 val;
int i;
@@ -633,14 +630,14 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
}
i = 0;
- netdev_for_each_mc_addr(mc_list, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u8 *dmi_addr;
u32 tmp;
if (i == 3)
break;
/* update perfect match registers */
- dmi_addr = mc_list->dmi_addr;
+ dmi_addr = ha->addr;
tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
(dmi_addr[4] << 8) | dmi_addr[5];
enet_writel(priv, tmp, ENET_PML_REG(i + 1));
@@ -960,7 +957,9 @@ static int bcm_enet_open(struct net_device *dev)
/* all set, enable mac and interrupts, start dma engine and
* kick rx dma channel */
wmb();
- enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
+ val = enet_readl(priv, ENET_CTL_REG);
+ val |= ENET_CTL_ENABLE_MASK;
+ enet_writel(priv, val, ENET_CTL_REG);
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->rx_chan));
@@ -1647,7 +1646,6 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(*priv));
ret = compute_hw_mtu(priv, dev->mtu);
if (ret)
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 56387b191c96..373c1a563474 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,6 +84,8 @@ static inline char *nic_name(struct pci_dev *pdev)
#define FW_VER_LEN 32
+#define BE_MAX_VF 32
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -207,7 +209,7 @@ struct be_tx_obj {
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct page *page;
- dma_addr_t bus;
+ DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset;
bool last_page_user;
};
@@ -281,8 +283,15 @@ struct be_adapter {
u8 port_type;
u8 transceiver;
u8 generation; /* BladeEngine ASIC generation */
+
+ bool sriov_enabled;
+ u32 vf_if_handle[BE_MAX_VF];
+ u32 vf_pmac_id[BE_MAX_VF];
+ u8 base_eq_id;
};
+#define be_physfn(adapter) (!adapter->pdev->is_virtfn)
+
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d0ef4ac987cd..e79bf8b9af3b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -843,7 +843,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
* Uses mbox
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
+ u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -860,6 +861,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
+ req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
req->pmac_invalid = pmac_invalid;
@@ -1111,6 +1113,10 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_PROMISCUOUS, sizeof(*req));
+ /* In FW versions X.102.149/X.101.487 and later,
+ * the port setting associated only with the
+ * issuing pci function will take effect
+ */
if (port_num)
req->port1_promiscuous = en;
else
@@ -1157,13 +1163,13 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
req->interface_id = if_id;
if (netdev) {
int i;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
i = 0;
- netdev_for_each_mc_addr(mc, netdev)
- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index cce61f9a3714..763dc199e337 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -878,7 +878,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, bool pmac_invalid,
- u32 *if_handle, u32 *pmac_id);
+ u32 *if_handle, u32 *pmac_id, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 51e1065e7897..d488d52d710a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -496,7 +496,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
&ddrdma_cmd.dma);
if (!ddrdma_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory allocation failure \n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 2d4a4b827637..063026de4957 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -99,6 +99,9 @@
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+/********** SRIOV VF PCICFG OFFSET ********/
+#define SRIOV_VF_PCICFG_OFFSET (4096)
+
/* Flashrom related descriptors */
#define IMAGE_TYPE_FIRMWARE 160
#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index ec6ace802256..fa10f13242c3 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -26,8 +26,11 @@ MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
static unsigned int rx_frag_size = 2048;
+static unsigned int num_vfs;
module_param(rx_frag_size, uint, S_IRUGO);
+module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -138,12 +141,19 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* MAC addr configuration will be done in hardware for VFs
+ * by their corresponding PFs. Just copy to netdev addr here
+ */
+ if (!be_physfn(adapter))
+ goto netdev_addr;
+
status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
if (status)
return status;
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
adapter->if_handle, &adapter->pmac_id);
+netdev_addr:
if (!status)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -386,26 +396,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
+static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+ bool unmap_single)
+{
+ dma_addr_t dma;
+
+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
+
+ dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
+ if (wrb->frag_len) {
+ if (unmap_single)
+ pci_unmap_single(pdev, dma, wrb->frag_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pdev, dma, wrb->frag_len,
+ PCI_DMA_TODEVICE);
+ }
+}
static int make_tx_wrbs(struct be_adapter *adapter,
struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
{
- u64 busaddr;
- u32 i, copied = 0;
+ dma_addr_t busaddr;
+ int i, copied = 0;
struct pci_dev *pdev = adapter->pdev;
struct sk_buff *first_skb = skb;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
struct be_eth_hdr_wrb *hdr;
+ bool map_single = false;
+ u16 map_head;
hdr = queue_head_node(txq);
- atomic_add(wrb_cnt, &txq->used);
queue_head_inc(txq);
+ map_head = txq->head;
if (skb->len > skb->data_len) {
- int len = skb->len - skb->data_len;
+ int len = skb_headlen(skb);
busaddr = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, busaddr))
+ goto dma_err;
+ map_single = true;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, len);
be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -419,6 +451,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
busaddr = pci_map_page(pdev, frag->page,
frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, busaddr))
+ goto dma_err;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, frag->size);
be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -438,6 +472,16 @@ static int make_tx_wrbs(struct be_adapter *adapter,
be_dws_cpu_to_le(hdr, sizeof(*hdr));
return copied;
+dma_err:
+ txq->head = map_head;
+ while (copied) {
+ wrb = queue_head_node(txq);
+ unmap_tx_frag(pdev, wrb, map_single);
+ map_single = false;
+ copied -= wrb->frag_len;
+ queue_head_inc(txq);
+ }
+ return 0;
}
static netdev_tx_t be_xmit(struct sk_buff *skb,
@@ -462,6 +506,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
* *BEFORE* ringing the tx doorbell, so that we serialze the
* tx compls of the current transmit which'll wake up the queue
*/
+ atomic_add(wrb_cnt, &txq->used);
if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
txq->len) {
netif_stop_queue(netdev);
@@ -541,6 +586,9 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (!be_physfn(adapter))
+ return;
+
adapter->vlan_tag[vid] = 1;
adapter->vlans_added++;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
@@ -551,6 +599,9 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (!be_physfn(adapter))
+ return;
+
adapter->vlan_tag[vid] = 0;
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
adapter->vlans_added--;
@@ -588,6 +639,28 @@ done:
return;
}
+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ return -EINVAL;
+
+ status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
+ adapter->vf_pmac_id[vf]);
+
+ status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
+ &adapter->vf_pmac_id[vf]);
+ if (!status)
+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
+ mac, vf);
+ return status;
+}
+
static void be_rx_rate_update(struct be_adapter *adapter)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -647,7 +720,7 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
BUG_ON(!rx_page_info->page);
if (rx_page_info->last_page_user) {
- pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
+ pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
rx_page_info->last_page_user = false;
}
@@ -791,7 +864,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- skb->dev = adapter->netdev;
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
@@ -959,7 +1031,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
}
page_offset = page_info->page_offset;
page_info->page = pagep;
- pci_unmap_addr_set(page_info, bus, page_dmaaddr);
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd = queue_head_node(rxq);
@@ -1012,35 +1084,26 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
struct be_eth_wrb *wrb;
struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
struct sk_buff *sent_skb;
- u64 busaddr;
- u16 cur_index, num_wrbs = 0;
+ u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
+ bool unmap_skb_hdr = true;
- cur_index = txq->tail;
- sent_skb = sent_skbs[cur_index];
+ sent_skb = sent_skbs[txq->tail];
BUG_ON(!sent_skb);
- sent_skbs[cur_index] = NULL;
- wrb = queue_tail_node(txq);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
- if (busaddr != 0) {
- pci_unmap_single(adapter->pdev, busaddr,
- wrb->frag_len, PCI_DMA_TODEVICE);
- }
- num_wrbs++;
+ sent_skbs[txq->tail] = NULL;
+
+ /* skip header wrb */
queue_tail_inc(txq);
- while (cur_index != last_index) {
+ do {
cur_index = txq->tail;
wrb = queue_tail_node(txq);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
- if (busaddr != 0) {
- pci_unmap_page(adapter->pdev, busaddr,
- wrb->frag_len, PCI_DMA_TODEVICE);
- }
+ unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
+ skb_headlen(sent_skb)));
+ unmap_skb_hdr = false;
+
num_wrbs++;
queue_tail_inc(txq);
- }
+ } while (cur_index != last_index);
atomic_sub(num_wrbs, &txq->used);
@@ -1255,6 +1318,8 @@ static int be_tx_queues_create(struct be_adapter *adapter)
/* Ask BE to create Tx Event queue */
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
+ adapter->base_eq_id = adapter->tx_eq.q.id;
+
/* Alloc TX eth compl queue */
cq = &adapter->tx_obj.cq;
if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
@@ -1382,7 +1447,7 @@ rx_eq_free:
/* There are 8 evt ids per func. Retruns the evt id's bit number */
static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
{
- return eq_id % 8;
+ return eq_id - adapter->base_eq_id;
}
static irqreturn_t be_intx(int irq, void *dev)
@@ -1560,6 +1625,28 @@ static void be_msix_enable(struct be_adapter *adapter)
return;
}
+static void be_sriov_enable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ int status;
+ if (be_physfn(adapter) && num_vfs) {
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ adapter->sriov_enabled = status ? false : true;
+ }
+#endif
+ return;
+}
+
+static void be_sriov_disable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ if (adapter->sriov_enabled) {
+ pci_disable_sriov(adapter->pdev);
+ adapter->sriov_enabled = false;
+ }
+#endif
+}
+
static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
{
return adapter->msix_entries[
@@ -1617,6 +1704,9 @@ static int be_irq_register(struct be_adapter *adapter)
status = be_msix_register(adapter);
if (status == 0)
goto done;
+ /* INTx is not supported for VF */
+ if (!be_physfn(adapter))
+ return status;
}
/* INTx */
@@ -1690,14 +1780,17 @@ static int be_open(struct net_device *netdev)
goto ret_sts;
be_link_status_update(adapter, link_up);
- status = be_vid_config(adapter);
+ if (be_physfn(adapter))
+ status = be_vid_config(adapter);
if (status)
goto ret_sts;
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
- goto ret_sts;
+ if (be_physfn(adapter)) {
+ status = be_cmd_set_flow_control(adapter,
+ adapter->tx_fc, adapter->rx_fc);
+ if (status)
+ goto ret_sts;
+ }
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
ret_sts:
@@ -1723,7 +1816,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
- "Could not enable Wake-on-lan \n");
+ "Could not enable Wake-on-lan\n");
pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
cmd.dma);
return status;
@@ -1745,22 +1838,48 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- u32 cap_flags, en_flags;
+ u32 cap_flags, en_flags, vf = 0;
int status;
+ u8 mac[ETH_ALEN];
- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
+
+ if (be_physfn(adapter)) {
+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ }
status = be_cmd_if_create(adapter, cap_flags, en_flags,
netdev->dev_addr, false/* pmac_invalid */,
- &adapter->if_handle, &adapter->pmac_id);
+ &adapter->if_handle, &adapter->pmac_id, 0);
if (status != 0)
goto do_none;
+ if (be_physfn(adapter)) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
+ | BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ mac, true, &adapter->vf_if_handle[vf],
+ NULL, vf+1);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n", vf);
+ goto if_destroy;
+ }
+ vf++;
+ } while (vf < num_vfs);
+ } else if (!be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ if (!status) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+ }
+
status = be_tx_queues_create(adapter);
if (status != 0)
goto if_destroy;
@@ -1782,6 +1901,9 @@ rx_qs_destroy:
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_if_handle[vf])
+ be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
be_cmd_if_destroy(adapter, adapter->if_handle);
do_none:
return status;
@@ -2061,6 +2183,7 @@ static struct net_device_ops be_netdev_ops = {
.ndo_vlan_rx_register = be_vlan_register,
.ndo_vlan_rx_add_vid = be_vlan_add_vid,
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
+ .ndo_set_vf_mac = be_set_vf_mac
};
static void be_netdev_init(struct net_device *netdev)
@@ -2102,37 +2225,48 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
iounmap(adapter->csr);
if (adapter->db)
iounmap(adapter->db);
- if (adapter->pcicfg)
+ if (adapter->pcicfg && be_physfn(adapter))
iounmap(adapter->pcicfg);
}
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- int pcicfg_reg;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
- pci_resource_len(adapter->pdev, 2));
- if (addr == NULL)
- return -ENOMEM;
- adapter->csr = addr;
+ int pcicfg_reg, db_reg;
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
- 128 * 1024);
- if (addr == NULL)
- goto pci_map_err;
- adapter->db = addr;
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
+ pci_resource_len(adapter->pdev, 2));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->csr = addr;
+ }
- if (adapter->generation == BE_GEN2)
+ if (adapter->generation == BE_GEN2) {
pcicfg_reg = 1;
- else
+ db_reg = 4;
+ } else {
pcicfg_reg = 0;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
- pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (be_physfn(adapter))
+ db_reg = 4;
+ else
+ db_reg = 0;
+ }
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
+ pci_resource_len(adapter->pdev, db_reg));
if (addr == NULL)
goto pci_map_err;
- adapter->pcicfg = addr;
+ adapter->db = addr;
+
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(
+ pci_resource_start(adapter->pdev, pcicfg_reg),
+ pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (addr == NULL)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ } else
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
return 0;
pci_map_err:
@@ -2246,6 +2380,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_ctrl_cleanup(adapter);
+ be_sriov_disable(adapter);
+
be_msix_disable(adapter);
pci_set_drvdata(pdev, NULL);
@@ -2270,16 +2406,20 @@ static int be_get_config(struct be_adapter *adapter)
return status;
memset(mac, 0, ETH_ALEN);
- status = be_cmd_mac_addr_query(adapter, mac,
+
+ if (be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
- if (status)
- return status;
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
+ if (status)
+ return status;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
if (adapter->cap & 0x400)
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
@@ -2296,6 +2436,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
struct be_adapter *adapter;
struct net_device *netdev;
+
status = pci_enable_device(pdev);
if (status)
goto do_none;
@@ -2344,24 +2485,28 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
}
+ be_sriov_enable(adapter);
+
status = be_ctrl_init(adapter);
if (status)
goto free_netdev;
/* sync up with fw's ready state */
- status = be_cmd_POST(adapter);
- if (status)
- goto ctrl_clean;
+ if (be_physfn(adapter)) {
+ status = be_cmd_POST(adapter);
+ if (status)
+ goto ctrl_clean;
+
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
+ }
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
goto ctrl_clean;
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
-
status = be_stats_init(adapter);
if (status)
goto ctrl_clean;
@@ -2391,6 +2536,7 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
be_msix_disable(adapter);
+ be_sriov_disable(adapter);
free_netdev(adapter->netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
@@ -2587,6 +2733,13 @@ static int __init be_init_module(void)
rx_frag_size = 2048;
}
+ if (num_vfs > 32) {
+ printk(KERN_WARNING DRV_NAME
+ " : Module param num_vfs must not be greater than 32."
+ "Using 32\n");
+ num_vfs = 32;
+ }
+
return pci_register_driver(&be_driver);
}
module_init(be_init_module);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 587f93cf03f6..b0207f01eb6b 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -654,7 +654,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
out:
adjust_tx_list();
current_tx_ptr = current_tx_ptr->next;
- dev->trans_start = jiffies;
dev->stats.tx_packets++;
dev->stats.tx_bytes += (skb->len);
return NETDEV_TX_OK;
@@ -805,21 +804,21 @@ static void bfin_mac_timeout(struct net_device *dev)
bfin_mac_enable();
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
emac_hashhi = emac_hashlo = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* skip non-multicast addresses */
if (!(*addrs & 1))
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 598b007f1991..44ceecf9d103 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -972,7 +972,7 @@ bmac_remove_multi(struct net_device *dev,
*/
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct bmac_data *bp = netdev_priv(dev);
int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
@@ -1001,8 +1001,8 @@ static void bmac_set_multicast(struct net_device *dev)
rx_cfg = bmac_rx_on(dev, 0, 0);
XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
} else {
- netdev_for_each_mc_addr(dmi, dev)
- bmac_addhash(bp, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ bmac_addhash(bp, ha->addr);
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
@@ -1016,7 +1016,7 @@ static void bmac_set_multicast(struct net_device *dev)
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i;
unsigned short rx_cfg;
@@ -1040,8 +1040,8 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if(!(*addrs & 1))
continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ac90a3828f69..667f4196dc29 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2672,7 +2672,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
rx_pg->page = page;
- pci_unmap_addr_set(rx_pg, mapping, mapping);
+ dma_unmap_addr_set(rx_pg, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
return 0;
@@ -2687,7 +2687,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
if (!page)
return;
- pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
+ pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
PCI_DMA_FROMDEVICE);
__free_page(page);
@@ -2719,7 +2719,8 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
rx_buf->skb = skb;
- pci_unmap_addr_set(rx_buf, mapping, mapping);
+ rx_buf->desc = (struct l2_fhdr *) skb->data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2818,7 +2819,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
}
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
tx_buf->skb = NULL;
@@ -2828,7 +2829,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = NEXT_TX_BD(sw_cons);
pci_unmap_page(bp->pdev,
- pci_unmap_addr(
+ dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
skb_shinfo(skb)->frags[i].size,
@@ -2910,8 +2911,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
if (prod != cons) {
prod_rx_pg->page = cons_rx_pg->page;
cons_rx_pg->page = NULL;
- pci_unmap_addr_set(prod_rx_pg, mapping,
- pci_unmap_addr(cons_rx_pg, mapping));
+ dma_unmap_addr_set(prod_rx_pg, mapping,
+ dma_unmap_addr(cons_rx_pg, mapping));
prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2935,18 +2936,19 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_rx_buf = &rxr->rx_buf_ring[prod];
pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(cons_rx_buf, mapping),
+ dma_unmap_addr(cons_rx_buf, mapping),
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
rxr->rx_prod_bseq += bp->rx_buf_use_size;
prod_rx_buf->skb = skb;
+ prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
if (cons == prod)
return;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3019,7 +3021,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
/* Don't unmap yet. If we're unable to allocate a new
* page, we need to recycle the page and the DMA addr.
*/
- mapping_old = pci_unmap_addr(rx_pg, mapping);
+ mapping_old = dma_unmap_addr(rx_pg, mapping);
if (i == pages - 1)
frag_len -= 4;
@@ -3074,6 +3076,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
+ struct pci_dev *pdev = bp->pdev;
hw_cons = bnx2_get_hw_rx_cons(bnapi);
sw_cons = rxr->rx_cons;
@@ -3086,7 +3089,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
while (sw_cons != hw_cons) {
unsigned int len, hdr_len;
u32 status;
- struct sw_bd *rx_buf;
+ struct sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
u16 vtag = 0;
@@ -3097,16 +3100,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
skb = rx_buf->skb;
+ prefetchw(skb);
+ if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
+ next_rx_buf =
+ &rxr->rx_buf_ring[
+ RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(next_rx_buf->desc);
+ }
rx_buf->skb = NULL;
- dma_addr = pci_unmap_addr(rx_buf, mapping);
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- rx_hdr = (struct l2_fhdr *) skb->data;
+ rx_hdr = rx_buf->desc;
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
@@ -3207,10 +3217,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
#ifdef BCM_VLAN
if (hw_vlan)
- vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
+ vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&bnapi->napi, skb);
rx_pkt++;
@@ -3548,7 +3558,6 @@ bnx2_set_rx_mode(struct net_device *dev)
}
else {
/* Accept one or more multicast(s). */
- struct dev_mc_list *mclist;
u32 mc_filter[NUM_MC_HASH_REGISTERS];
u32 regidx;
u32 bit;
@@ -3556,8 +3565,8 @@ bnx2_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- netdev_for_each_mc_addr(mclist, dev) {
- crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & 0xff;
regidx = (bit & 0xe0) >> 5;
bit &= 0x1f;
@@ -5318,7 +5327,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
}
pci_unmap_single(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -5329,7 +5338,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
for (k = 0; k < last; k++, j++) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
pci_unmap_page(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
}
@@ -5359,7 +5368,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
continue;
pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -5765,11 +5774,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
rx_buf = &rxr->rx_buf_ring[rx_start_idx];
rx_skb = rx_buf->skb;
- rx_hdr = (struct l2_fhdr *) rx_skb->data;
+ rx_hdr = rx_buf->desc;
skb_reserve(rx_skb, BNX2_RX_OFFSET);
pci_dma_sync_single_for_cpu(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, PCI_DMA_FROMDEVICE);
if (rx_hdr->l2_fhdr_status &
@@ -6429,7 +6438,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
- pci_unmap_addr_set(tx_buf, mapping, mapping);
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd = &txr->tx_desc_ring[ring_prod];
@@ -6454,7 +6463,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(bp->pdev, mapping))
goto dma_error;
- pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+ dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
mapping);
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6491,7 +6500,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = NULL;
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
/* unmap remaining mapped pages */
@@ -6499,7 +6508,7 @@ dma_error:
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
- pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
@@ -8297,7 +8306,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(dev->dev_addr, bp->mac_addr, 6);
memcpy(dev->perm_addr, bp->mac_addr, 6);
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
dev->features |= NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index cd4b0e4637ab..dd35bd0b7e05 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6551,17 +6551,18 @@ struct l2_fhdr {
struct sw_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ struct l2_fhdr *desc;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_pg {
struct page *page;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
unsigned short is_gso;
unsigned short nr_frags;
};
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 3c48a7a68308..8bd23687c530 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,16 +24,25 @@
#define BCM_VLAN 1
#endif
+#define BNX2X_MULTI_QUEUE
+
+#define BNX2X_NEW_NAPI
+
+
+
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
-#define BNX2X_MULTI_QUEUE
-
-#define BNX2X_NEW_NAPI
-
+#ifdef BCM_CNIC
+#define BNX2X_MIN_MSIX_VEC_CNT 3
+#define BNX2X_MSIX_VEC_FP_START 2
+#else
+#define BNX2X_MIN_MSIX_VEC_CNT 2
+#define BNX2X_MSIX_VEC_FP_START 1
+#endif
#include <linux/mdio.h>
#include "bnx2x_reg.h"
@@ -83,7 +92,12 @@ do { \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
##__args); \
-} while (0)
+ } while (0)
+
+#define BNX2X_ERROR(__fmt, __args...) do { \
+ pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
+ } while (0)
+
/* before we have a dev->name use dev_info() */
#define BNX2X_DEV_INFO(__fmt, __args...) \
@@ -155,15 +169,21 @@ do { \
#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
+#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
+
#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
/* fast path */
struct sw_rx_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd {
@@ -176,7 +196,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
union db_prod {
@@ -261,7 +281,7 @@ struct bnx2x_eth_q_stats {
u32 hw_csum_err;
};
-#define BNX2X_NUM_Q_STATS 11
+#define BNX2X_NUM_Q_STATS 13
#define Q_STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
@@ -767,7 +787,7 @@ struct bnx2x_eth_stats {
u32 nig_timer_max;
};
-#define BNX2X_NUM_STATS 41
+#define BNX2X_NUM_STATS 43
#define STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
@@ -818,6 +838,12 @@ struct attn_route {
u32 sig[4];
};
+typedef enum {
+ BNX2X_RECOVERY_DONE,
+ BNX2X_RECOVERY_INIT,
+ BNX2X_RECOVERY_WAIT,
+} bnx2x_recovery_state_t;
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
@@ -835,6 +861,9 @@ struct bnx2x {
struct pci_dev *pdev;
atomic_t intr_sem;
+
+ bnx2x_recovery_state_t recovery_state;
+ int is_leader;
#ifdef BCM_CNIC
struct msix_entry msix_table[MAX_CONTEXT+2];
#else
@@ -842,7 +871,6 @@ struct bnx2x {
#endif
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-#define INT_MODE_MSIX 3
int tx_ring_size;
@@ -924,8 +952,7 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
- struct work_struct reset_task;
-
+ struct delayed_work reset_task;
struct timer_list timer;
int current_interval;
@@ -961,6 +988,8 @@ struct bnx2x {
u16 rx_quick_cons_trip;
u16 rx_ticks_int;
u16 rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
u32 lin_cnt;
@@ -1075,6 +1104,7 @@ struct bnx2x {
#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
+ char fw_ver[32];
const struct firmware *firmware;
};
@@ -1125,6 +1155,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_DIAG 2
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
/* DMAE command defines */
@@ -1152,7 +1183,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
#define DMAE_LEN32_RD_MAX 0x80
-#define DMAE_LEN32_WR_MAX 0x400
+#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0xe0d0d0ae
@@ -1294,8 +1325,12 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
-#define MULTI_FLAGS(bp) \
+#define RSS_FLAGS(bp) \
(TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
@@ -1333,6 +1368,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
+#define BNX2X_VPD_LEN 128
+#define VENDOR_ID_LEN 4
+
/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 760069345b11..fd1f29e0317d 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -683,7 +683,7 @@ struct drv_func_mb {
#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
/*
- * The optic module verification commands requris bootcode
+ * The optic module verification commands require bootcode
* v5.0.6 or later
*/
#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index 32e79c359e89..ff70be898765 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1594,7 +1594,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
pause_result);
bnx2x_pause_resolve(vars, pause_result);
if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
@@ -1616,7 +1616,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
bnx2x_pause_resolve(vars, pause_result);
- DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
pause_result);
}
}
@@ -1974,7 +1974,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
}
}
- DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x \n",
+ DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
gp_status, vars->phy_link_up, vars->line_speed);
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
" autoneg 0x%x\n",
@@ -3852,7 +3852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
SPEED_AUTO_NEG) &&
((params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37 \n");
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
bnx2x_cl45_write(bp, params->port, ext_phy_type,
ext_phy_addr, MDIO_AN_DEVAD,
MDIO_AN_REG_ADV, 0x20);
@@ -4234,14 +4234,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x \n", tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
} else if ((params->req_line_speed ==
SPEED_AUTO_NEG) &&
((params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37 \n");
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
bnx2x_cl45_write(bp, params->port, ext_phy_type,
ext_phy_addr, MDIO_AN_DEVAD,
MDIO_PMA_REG_8727_MISC_CTRL, 0);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a72d6cc..2bc35c794aec 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
#include "bnx2x_init_ops.h"
#include "bnx2x_dump.h"
-#define DRV_MODULE_VERSION "1.52.1-7"
-#define DRV_MODULE_RELDATE "2010/02/28"
+#define DRV_MODULE_VERSION "1.52.53-1"
+#define DRV_MODULE_RELDATE "2010/18/04"
#define BNX2X_BC_VER 0x040200
#include <linux/firmware.h>
@@ -102,7 +102,8 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
module_param(int_mode, int, 0);
-MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
+ "(1 INT#x; 2 MSI)");
static int dropless_fc;
module_param(dropless_fc, int, 0);
@@ -352,13 +353,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len)
{
+ int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
- while (len > DMAE_LEN32_WR_MAX) {
+ while (len > dmae_wr_max) {
bnx2x_write_dmae(bp, phys_addr + offset,
- addr + offset, DMAE_LEN32_WR_MAX);
- offset += DMAE_LEN32_WR_MAX * 4;
- len -= DMAE_LEN32_WR_MAX;
+ addr + offset, dmae_wr_max);
+ offset += dmae_wr_max * 4;
+ len -= dmae_wr_max;
}
bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
@@ -508,26 +510,31 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
static void bnx2x_fw_dump(struct bnx2x *bp)
{
+ u32 addr;
u32 mark, offset;
__be32 data[9];
int word;
- mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
- mark = ((mark + 0x3) & ~0x3);
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERR("NO MCP - can not dump\n");
+ return;
+ }
+
+ addr = bp->common.shmem_base - 0x0800 + 4;
+ mark = REG_RD(bp, addr);
+ mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
pr_err("begin fw dump (mark 0x%x)\n", mark);
pr_err("");
- for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
+ for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
- offset + 4*word));
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
- for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
+ for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
- offset + 4*word));
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
@@ -546,9 +553,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
/* Indices */
/* Common */
- BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
- " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
- " spq_prod_idx(%u)\n",
+ BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
+ " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ " spq_prod_idx(0x%x)\n",
bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
@@ -556,14 +563,14 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
- " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
- " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
+ " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
+ " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
- BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
- " fp_u_idx(%x) *sb_u_idx(%x)\n",
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
+ " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
le16_to_cpu(fp->fp_u_idx),
fp->status_blk->u_status_block.status_block_index);
@@ -573,12 +580,13 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
- " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
+ " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
+ " *tx_cons_sb(0x%x)\n",
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
- BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
- " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
+ BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
+ " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
fp->status_blk->c_status_block.status_block_index,
fp->tx_db.data.prod);
}
@@ -764,6 +772,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
* General service functions
*/
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5)
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ else
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return true;
+
+ DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ return false;
+}
+
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
u8 storm, u16 index, u8 op, u8 update)
{
@@ -842,7 +884,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* unmap first bd */
DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
- pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +914,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
- pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
- BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
+ dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
if (--nbd)
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
@@ -1023,7 +1065,8 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
default:
BNX2X_ERR("unexpected MC reply (%d) "
- "fp->state is %x\n", command, fp->state);
+ "fp[%d] state is %x\n",
+ command, fp->index, fp->state);
break;
}
mb(); /* force bnx2x_wait_ramrod() to see the change */
@@ -1086,7 +1129,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
- pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
@@ -1115,15 +1158,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
if (unlikely(page == NULL))
return -ENOMEM;
- mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, page, 0,
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
return -ENOMEM;
}
sw_buf->page = page;
- pci_unmap_addr_set(sw_buf, mapping, mapping);
+ dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1186,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
if (unlikely(skb == NULL))
return -ENOMEM;
- mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
return -ENOMEM;
}
rx_buf->skb = skb;
- pci_unmap_addr_set(rx_buf, mapping, mapping);
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1216,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
- pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(cons_rx_buf, mapping),
- RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(cons_rx_buf, mapping),
+ RX_COPY_THRESH, DMA_FROM_DEVICE);
prod_rx_buf->skb = cons_rx_buf->skb;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
*prod_bd = *cons_bd;
}
@@ -1283,9 +1326,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
- mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
+ bp->rx_buf_size, DMA_FROM_DEVICE);
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */
fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1302,7 +1345,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
-#ifdef __powerpc64__
+#ifdef _ASM_GENERIC_INT_L64_H
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
@@ -1331,8 +1374,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
max(frag_size, (u32)len_on_bd));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages >
- min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1361,8 +1403,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* Unmap the page as we r going to pass it to the stack */
- pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1432,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
- pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
@@ -1441,12 +1484,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
#ifdef BCM_VLAN
if ((bp->vlgrp != NULL) && is_vlan_cqe &&
(!is_not_hwaccel_vlan_cqe))
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.
- vlan_tag));
+ vlan_gro_receive(&fp->napi, bp->vlgrp,
+ le16_to_cpu(cqe->fast_path_cqe.
+ vlan_tag), skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&fp->napi, skb);
} else {
DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
" - dropping packet!\n");
@@ -1539,7 +1582,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct sw_rx_bd *rx_buf = NULL;
struct sk_buff *skb;
union eth_rx_cqe *cqe;
- u8 cqe_fp_flags;
+ u8 cqe_fp_flags, cqe_fp_status_flags;
u16 len, pad;
comp_ring_cons = RCQ_BD(sw_comp_cons);
@@ -1555,6 +1598,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
@@ -1573,7 +1617,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
rx_buf = &fp->rx_buf_ring[bd_cons];
skb = rx_buf->skb;
prefetch(skb);
- prefetch((u8 *)skb + 256);
len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
pad = cqe->fast_path_cqe.placement_offset;
@@ -1620,11 +1663,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
}
}
- pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- PCI_DMA_FROMDEVICE);
- prefetch(skb);
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
prefetch(((char *)(skb)) + 128);
/* is this an error packet? */
@@ -1665,10 +1707,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
} else
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_reserve(skb, pad);
skb_put(skb, len);
@@ -1684,6 +1726,12 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe_fp_status_flags &
+ ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ skb->rxhash = le32_to_cpu(
+ cqe->fast_path_cqe.rss_hash_result);
+
skb->ip_summed = CHECKSUM_NONE;
if (bp->rx_csum) {
if (likely(BNX2X_RX_CSUM_OK(cqe)))
@@ -1699,11 +1747,11 @@ reuse_rx:
if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
PARSING_FLAGS_VLAN))
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+ vlan_gro_receive(&fp->napi, bp->vlgrp,
+ le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&fp->napi, skb);
next_rx:
@@ -1831,8 +1879,8 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
}
- if (status)
- DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
status);
return IRQ_HANDLED;
@@ -1900,6 +1948,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
int func = BP_FUNC(bp);
u32 hw_lock_control_reg;
+ DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
DP(NETIF_MSG_HW,
@@ -2254,11 +2304,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
static u8 bnx2x_link_test(struct bnx2x *bp)
{
- u8 rc;
+ u8 rc = 0;
- bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
- bnx2x_release_phy_lock(bp);
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not test link\n");
return rc;
}
@@ -2387,10 +2440,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
than zero */
m_fair_vn.vn_credit_delta =
- max((u32)(vn_min_rate * (T_FAIR_COEF /
- (8 * bp->vn_weight_sum))),
- (u32)(bp->cmng.fair_vars.fair_threshold * 2));
- DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
+ max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+ (8 * bp->vn_weight_sum))),
+ (bp->cmng.fair_vars.fair_threshold * 2));
+ DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
@@ -2410,6 +2463,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
+ u32 prev_link_status = bp->link_vars.link_status;
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -2442,8 +2496,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- /* indicate link status */
- bnx2x_link_report(bp);
+ /* indicate link status only if link status actually changed */
+ if (prev_link_status != bp->link_vars.link_status)
+ bnx2x_link_report(bp);
if (IS_E1HMF(bp)) {
int port = BP_PORT(bp);
@@ -2560,7 +2615,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
return rc;
}
-static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
static void bnx2x_set_rx_mode(struct net_device *dev);
@@ -2696,12 +2750,6 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
{
struct eth_spe *spe;
- DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
- (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
- (void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
-
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EIO;
@@ -2720,8 +2768,8 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* CID needs port number to be encoded int it */
spe->hdr.conn_and_cmd_data =
- cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
- HW_CID(bp, cid)));
+ cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+ HW_CID(bp, cid));
spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
if (common)
spe->hdr.type |=
@@ -2732,6 +2780,13 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
bp->spq_left--;
+ DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+ "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
+ bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+ (u32)(U64_LO(bp->spq_mapping) +
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+ HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
return 0;
@@ -2740,12 +2795,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* acquire split MCP access lock register */
static int bnx2x_acquire_alr(struct bnx2x *bp)
{
- u32 i, j, val;
+ u32 j, val;
int rc = 0;
might_sleep();
- i = 100;
- for (j = 0; j < i*10; j++) {
+ for (j = 0; j < 1000; j++) {
val = (1UL << 31);
REG_WR(bp, GRCBASE_MCP + 0x9c, val);
val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2765,9 +2819,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
/* release split MCP access lock register */
static void bnx2x_release_alr(struct bnx2x *bp)
{
- u32 val = 0;
-
- REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+ REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
}
static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2823,7 +2875,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
aeu_mask, asserted);
- aeu_mask &= ~(asserted & 0xff);
+ aeu_mask &= ~(asserted & 0x3ff);
DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
REG_WR(bp, aeu_addr, aeu_mask);
@@ -2910,8 +2962,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
bp->link_params.ext_phy_config);
/* log the failure */
- netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
- "Please contact Dell Support for assistance.\n");
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+ " the driver to shutdown the card to prevent permanent"
+ " damage. Please contact OEM Support for assistance\n");
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3104,10 +3157,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
}
}
-static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+
+#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
+#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
+#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
+#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
+#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
+#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ val &= ~(1 << RESET_DONE_FLAG_SHIFT);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ val |= (1 << 16);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & RESET_DONE_FLAG_MASK) ? false : true;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
+ REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
+ REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ barrier();
+ mmiowb();
+
+ return val1;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
+{
+ return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+}
+
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+ if (idx)
+ pr_cont(", ");
+ pr_cont("%s", blk);
+}
+
+static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block(par_num++, "BRB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block(par_num++, "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "TSEMI");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block(par_num++, "PBCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ _print_next_block(par_num++, "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "XSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ _print_next_block(par_num++, "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ _print_next_block(par_num++, "VAUX PCI CORE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ _print_next_block(par_num++, "DEBUG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ _print_next_block(par_num++, "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "USEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ _print_next_block(par_num++, "UPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "CSDM");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "CSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block(par_num++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block(par_num++, "MISC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ _print_next_block(par_num++, "MCP ROM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ _print_next_block(par_num++, "MCP UMP RX");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ _print_next_block(par_num++, "MCP UMP TX");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ _print_next_block(par_num++, "MCP SCPAD");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
+ u32 sig2, u32 sig3)
+{
+ if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
+ (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
+ int par_num = 0;
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+ "[0]:0x%08x [1]:0x%08x "
+ "[2]:0x%08x [3]:0x%08x\n",
+ sig0 & HW_PRTY_ASSERT_SET_0,
+ sig1 & HW_PRTY_ASSERT_SET_1,
+ sig2 & HW_PRTY_ASSERT_SET_2,
+ sig3 & HW_PRTY_ASSERT_SET_3);
+ printk(KERN_ERR"%s: Parity errors detected in blocks: ",
+ bp->dev->name);
+ par_num = bnx2x_print_blocks_with_parity0(
+ sig0 & HW_PRTY_ASSERT_SET_0, par_num);
+ par_num = bnx2x_print_blocks_with_parity1(
+ sig1 & HW_PRTY_ASSERT_SET_1, par_num);
+ par_num = bnx2x_print_blocks_with_parity2(
+ sig2 & HW_PRTY_ASSERT_SET_2, par_num);
+ par_num = bnx2x_print_blocks_with_parity3(
+ sig3 & HW_PRTY_ASSERT_SET_3, par_num);
+ printk("\n");
+ return true;
+ } else
+ return false;
+}
+
+static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
{
struct attn_route attn;
- struct attn_route group_mask;
+ int port = BP_PORT(bp);
+
+ attn.sig[0] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ port*4);
+ attn.sig[1] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+ port*4);
+ attn.sig[2] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+ port*4);
+ attn.sig[3] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
+ attn.sig[3]);
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+ struct attn_route attn, *group_mask;
int port = BP_PORT(bp);
int index;
u32 reg_addr;
@@ -3118,6 +3472,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
try to handle this event */
bnx2x_acquire_alr(bp);
+ if (bnx2x_chk_parity_attn(bp)) {
+ bp->recovery_state = BNX2X_RECOVERY_INIT;
+ bnx2x_set_reset_in_progress(bp);
+ schedule_delayed_work(&bp->reset_task, 0);
+ /* Disable HW interrupts */
+ bnx2x_int_disable(bp);
+ bnx2x_release_alr(bp);
+ /* In case of parity errors don't handle attentions so that
+ * other function would "see" parity errors.
+ */
+ return;
+ }
+
attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3127,28 +3494,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
if (deasserted & (1 << index)) {
- group_mask = bp->attn_group[index];
+ group_mask = &bp->attn_group[index];
DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
- index, group_mask.sig[0], group_mask.sig[1],
- group_mask.sig[2], group_mask.sig[3]);
+ index, group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3]);
bnx2x_attn_int_deasserted3(bp,
- attn.sig[3] & group_mask.sig[3]);
+ attn.sig[3] & group_mask->sig[3]);
bnx2x_attn_int_deasserted1(bp,
- attn.sig[1] & group_mask.sig[1]);
+ attn.sig[1] & group_mask->sig[1]);
bnx2x_attn_int_deasserted2(bp,
- attn.sig[2] & group_mask.sig[2]);
+ attn.sig[2] & group_mask->sig[2]);
bnx2x_attn_int_deasserted0(bp,
- attn.sig[0] & group_mask.sig[0]);
-
- if ((attn.sig[0] & group_mask.sig[0] &
- HW_PRTY_ASSERT_SET_0) ||
- (attn.sig[1] & group_mask.sig[1] &
- HW_PRTY_ASSERT_SET_1) ||
- (attn.sig[2] & group_mask.sig[2] &
- HW_PRTY_ASSERT_SET_2))
- BNX2X_ERR("FATAL HW block parity attention\n");
+ attn.sig[0] & group_mask->sig[0]);
}
}
@@ -3172,7 +3531,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
aeu_mask, deasserted);
- aeu_mask |= (deasserted & 0xff);
+ aeu_mask |= (deasserted & 0x3ff);
DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
REG_WR(bp, reg_addr, aeu_mask);
@@ -3216,7 +3575,6 @@ static void bnx2x_sp_task(struct work_struct *work)
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
u16 status;
-
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
@@ -3227,11 +3585,23 @@ static void bnx2x_sp_task(struct work_struct *work)
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
- DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
/* HW attentions */
- if (status & 0x1)
+ if (status & 0x1) {
bnx2x_attn_int(bp);
+ status &= ~0x1;
+ }
+
+ /* CStorm events: STAT_QUERY */
+ if (status & 0x2) {
+ DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
+ status &= ~0x2;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
IGU_INT_NOP, 1);
@@ -3243,7 +3613,6 @@ static void bnx2x_sp_task(struct work_struct *work)
IGU_INT_NOP, 1);
bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
IGU_INT_ENABLE, 1);
-
}
static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3947,7 +4316,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
u32 lo;
u32 hi;
} diff;
- u32 nig_timer_max;
if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
bnx2x_bmac_stats_update(bp);
@@ -3978,10 +4346,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_start = ++pstats->host_port_stats_end;
- nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
- if (nig_timer_max != estats->nig_timer_max) {
- estats->nig_timer_max = nig_timer_max;
- BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
+ if (!BP_NOMCP(bp)) {
+ u32 nig_timer_max =
+ SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ BNX2X_ERR("NIG timer max (%u)\n",
+ estats->nig_timer_max);
+ }
}
return 0;
@@ -4025,21 +4397,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
- " xstorm counter (%d) != stats_counter (%d)\n",
+ " xstorm counter (0x%x) != stats_counter (0x%x)\n",
i, xclient->stats_counter, bp->stats_counter);
return -1;
}
if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
- " tstorm counter (%d) != stats_counter (%d)\n",
+ " tstorm counter (0x%x) != stats_counter (0x%x)\n",
i, tclient->stats_counter, bp->stats_counter);
return -2;
}
if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
- " ustorm counter (%d) != stats_counter (%d)\n",
+ " ustorm counter (0x%x) != stats_counter (0x%x)\n",
i, uclient->stats_counter, bp->stats_counter);
return -4;
}
@@ -4059,6 +4431,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
qstats->total_bytes_received_lo,
le32_to_cpu(tclient->rcv_unicast_bytes.lo));
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
+
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
+
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
+
qstats->valid_bytes_received_hi =
qstats->total_bytes_received_hi;
qstats->valid_bytes_received_lo =
@@ -4307,47 +4694,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_drv_stats_update(bp);
if (netif_msg_timer(bp)) {
- struct bnx2x_fastpath *fp0_rx = bp->fp;
- struct bnx2x_fastpath *fp0_tx = bp->fp;
- struct tstorm_per_client_stats *old_tclient =
- &bp->fp->old_tclient;
- struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
- struct net_device_stats *nstats = &bp->dev->stats;
int i;
- netdev_printk(KERN_DEBUG, bp->dev, "\n");
- printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
- " tx pkt (%lx)\n",
- bnx2x_tx_avail(fp0_tx),
- le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
- printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
- " rx pkt (%lx)\n",
- (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
- fp0_rx->rx_comp_cons),
- le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
- printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
- "brb truncate %u\n",
- (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
- qstats->driver_xoff,
+ printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
+ bp->dev->name,
estats->brb_drop_lo, estats->brb_truncate_lo);
- printk(KERN_DEBUG "tstats: checksum_discard %u "
- "packets_too_big_discard %lu no_buff_discard %lu "
- "mac_discard %u mac_filter_discard %u "
- "xxovrflow_discard %u brb_truncate_discard %u "
- "ttl0_discard %u\n",
- le32_to_cpu(old_tclient->checksum_discard),
- bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
- bnx2x_hilo(&qstats->no_buff_discard_hi),
- estats->mac_discard, estats->mac_filter_discard,
- estats->xxoverflow_discard, estats->brb_truncate_discard,
- le32_to_cpu(old_tclient->ttl0_discard));
for_each_queue(bp, i) {
- printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
- bnx2x_fp(bp, i, tx_pkt),
- bnx2x_fp(bp, i, rx_pkt),
- bnx2x_fp(bp, i, rx_calls));
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+ printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
+ " rx pkt(%lu) rx calls(%lu %lu)\n",
+ fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+ fp->rx_comp_cons),
+ le16_to_cpu(*fp->rx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_received_hi),
+ fp->rx_calls, fp->rx_pkt);
+ }
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(bp->dev, i);
+
+ printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
+ " tx pkt(%lu) tx calls (%lu)"
+ " %s (Xoff events %u)\n",
+ fp->name, bnx2x_tx_avail(fp),
+ le16_to_cpu(*fp->tx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_transmitted_hi),
+ fp->tx_pkt,
+ (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
+ qstats->driver_xoff);
}
}
@@ -4468,6 +4851,9 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state = bp->stats_state;
+ if (unlikely(bp->panic))
+ return;
+
bnx2x_stats_stm[state][event].action(bp);
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
@@ -4940,9 +5326,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
}
if (fp->tpa_state[i] == BNX2X_TPA_START)
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;
@@ -4978,7 +5364,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->disable_tpa = 1;
break;
}
- pci_unmap_addr_set((struct sw_rx_bd *)
+ dma_unmap_addr_set((struct sw_rx_bd *)
&bp->fp->tpa_pool[i],
mapping, 0);
fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5072,8 +5458,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->rx_bd_prod = ring_prod;
/* must not have more available CQEs than BDs */
- fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
- cqe_ring_prod);
+ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+ cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
/* Warning!
@@ -5179,8 +5565,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
context->ustorm_st_context.common.flags |=
USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
context->ustorm_st_context.common.sge_buff_size =
- (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
- (u32)0xffff);
+ (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
+ 0xffff);
context->ustorm_st_context.common.sge_page_base_hi =
U64_HI(fp->rx_sge_mapping);
context->ustorm_st_context.common.sge_page_base_lo =
@@ -5369,10 +5755,10 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
u32 offset;
u16 max_agg_size;
- if (is_multi(bp)) {
- tstorm_config.config_flags = MULTI_FLAGS(bp);
+ tstorm_config.config_flags = RSS_FLAGS(bp);
+
+ if (is_multi(bp))
tstorm_config.rss_result_mask = MULTI_MASK;
- }
/* Enable TPA if needed */
if (bp->flags & TPA_ENABLE_FLAG)
@@ -5477,10 +5863,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
}
/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
- max_agg_size =
- min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE),
- (u32)0xffff);
+ max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -5566,7 +5950,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
}
- /* Store it to internal memory */
+ /* Store cmng structures to internal memory */
if (bp->port.pmf)
for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -5658,8 +6042,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
- bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
- &bp->gunzip_mapping);
+ bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+ &bp->gunzip_mapping, GFP_KERNEL);
if (bp->gunzip_buf == NULL)
goto gunzip_nomem1;
@@ -5679,12 +6063,13 @@ gunzip_nomem3:
bp->strm = NULL;
gunzip_nomem2:
- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
- bp->gunzip_mapping);
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
bp->gunzip_buf = NULL;
gunzip_nomem1:
- netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+ " un-compression\n");
return -ENOMEM;
}
@@ -5696,8 +6081,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
bp->strm = NULL;
if (bp->gunzip_buf) {
- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
- bp->gunzip_mapping);
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
bp->gunzip_buf = NULL;
}
}
@@ -5735,8 +6120,9 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
if (bp->gunzip_outlen & 0x3)
- netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
- bp->gunzip_outlen);
+ netdev_err(bp->dev, "Firmware decompression error:"
+ " gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
bp->gunzip_outlen >>= 2;
zlib_inflateEnd(bp->strm);
@@ -5962,6 +6348,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
}
+static const struct {
+ u32 addr;
+ u32 mask;
+} bnx2x_parity_mask[] = {
+ {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
+ {HC_REG_HC_PRTY_MASK, 0xffffffff},
+ {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
+ {QM_REG_QM_PRTY_MASK, 0x0},
+ {DORQ_REG_DORQ_PRTY_MASK, 0x0},
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
+ {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
+ {CDU_REG_CDU_PRTY_MASK, 0x0},
+ {CFC_REG_CFC_PRTY_MASK, 0x0},
+ {DBG_REG_DBG_PRTY_MASK, 0x0},
+ {DMAE_REG_DMAE_PRTY_MASK, 0x0},
+ {BRB1_REG_BRB1_PRTY_MASK, 0x0},
+ {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
+ {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
+ {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
+ {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
+ {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_0, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_1, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
+};
+
+static void enable_blocks_parity(struct bnx2x *bp)
+{
+ int i, mask_arr_len =
+ sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
+
+ for (i = 0; i < mask_arr_len; i++)
+ REG_WR(bp, bnx2x_parity_mask[i].addr,
+ bnx2x_parity_mask[i].mask);
+}
+
static void bnx2x_reset_common(struct bnx2x *bp)
{
@@ -5992,10 +6422,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
{
+ int is_required;
u32 val;
- u8 port;
- u8 is_required = 0;
+ int port;
+ if (BP_NOMCP(bp))
+ return;
+
+ is_required = 0;
val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
SHARED_HW_CFG_FAN_FAILURE_MASK;
@@ -6034,7 +6468,7 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
/* set to active low mode */
val = REG_RD(bp, MISC_REG_SPIO_INT);
val |= ((1 << MISC_REGISTERS_SPIO_5) <<
- MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+ MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
REG_WR(bp, MISC_REG_SPIO_INT, val);
/* enable interrupt to signal the IGU */
@@ -6200,10 +6634,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
REG_WR(bp, SRC_REG_SOFT_RST, 1);
- for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
- REG_WR(bp, i, 0xc0cac01a);
- /* TODO: replace with something meaningful */
- }
+ for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
+ REG_WR(bp, i, random32());
bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
#ifdef BCM_CNIC
REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -6221,7 +6653,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
if (sizeof(union cdu_context) != 1024)
/* we currently assume that a context is 1024 bytes */
- pr_alert("please adjust the size of cdu_context(%ld)\n",
+ dev_alert(&bp->pdev->dev, "please adjust the size "
+ "of cdu_context(%ld)\n",
(long)sizeof(union cdu_context));
bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
@@ -6305,6 +6738,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
enable_blocks_attention(bp);
+ if (CHIP_PARITY_SUPPORTED(bp))
+ enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
@@ -6323,7 +6758,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
u32 low, high;
u32 val;
- DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
+ DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
@@ -6342,6 +6777,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
#endif
+
bnx2x_init_block(bp, DQ_BLOCK, init_stage);
bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -6534,7 +6970,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
u32 addr, val;
int i;
- DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
+ DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
/* set MSI reconfigure capability */
addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -6692,7 +7128,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
#define BNX2X_PCI_FREE(x, y, size) \
do { \
if (x) { \
- pci_free_consistent(bp->pdev, size, x, y); \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \
y = 0; \
} \
@@ -6773,7 +7209,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
- x = pci_alloc_consistent(bp->pdev, size, y); \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
memset(x, 0, size); \
@@ -6906,9 +7342,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
if (skb == NULL)
continue;
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL;
dev_kfree_skb(skb);
@@ -6987,7 +7423,31 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
BNX2X_NUM_QUEUES(bp) + offset);
- if (rc) {
+
+ /*
+ * reconfigure number of tx/rx queues according to available
+ * MSI-X vectors
+ */
+ if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+ /* vectors available for FP */
+ int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
+
+ DP(NETIF_MSG_IFUP,
+ "Trying to use less MSI-X vectors: %d\n", rc);
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+ if (rc) {
+ DP(NETIF_MSG_IFUP,
+ "MSI-X is not attainable rc %d\n", rc);
+ return rc;
+ }
+
+ bp->num_queues = min(bp->num_queues, fp_vec);
+
+ DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ bp->num_queues);
+ } else if (rc) {
DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
return rc;
}
@@ -7028,10 +7488,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_QUEUES(bp);
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
- bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
+ " ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
return 0;
}
@@ -7409,8 +7870,6 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = 1;
DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
break;
-
- case INT_MODE_MSIX:
default:
/* Set number of queues according to bp->multi_mode value */
bnx2x_set_num_queues_msix(bp);
@@ -7656,6 +8115,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
+ bnx2x_inc_load_cnt(bp);
return 0;
@@ -7843,33 +8303,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
}
}
-/* must be called with rtnl_lock */
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
{
int port = BP_PORT(bp);
u32 reset_code = 0;
int i, cnt, rc;
-#ifdef BCM_CNIC
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
- bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
-
- /* Set "drop all" */
- bp->rx_mode = BNX2X_RX_MODE_NONE;
- bnx2x_set_storm_rx_mode(bp);
-
- /* Disable HW interrupts, NAPI and Tx */
- bnx2x_netif_stop(bp, 1);
-
- del_timer_sync(&bp->timer);
- SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
- (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp, false);
-
/* Wait until tx fastpath tasks complete */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8010,6 +8449,69 @@ unload_error:
if (!BP_NOMCP(bp))
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+}
+
+static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+ if (CHIP_IS_E1(bp)) {
+ int port = BP_PORT(bp);
+ u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ val = REG_RD(bp, addr);
+ val &= ~(0x300);
+ REG_WR(bp, addr, val);
+ } else if (CHIP_IS_E1H(bp)) {
+ val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+ }
+}
+
+/* must be called with rtnl_lock */
+static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+ int i;
+
+ if (bp->state == BNX2X_STATE_CLOSED) {
+ /* Interface has been removed - nothing to recover */
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ bp->is_leader = 0;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+ smp_wmb();
+
+ return -EINVAL;
+ }
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+ /* Set "drop all" */
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Disable HW interrupts, NAPI and Tx */
+ bnx2x_netif_stop(bp, 1);
+
+ del_timer_sync(&bp->timer);
+ SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
+ (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp, false);
+
+ /* Cleanup the chip if needed */
+ if (unload_mode != UNLOAD_RECOVERY)
+ bnx2x_chip_cleanup(bp, unload_mode);
+
bp->port.pmf = 0;
/* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8024,17 +8526,448 @@ unload_error:
netif_carrier_off(bp->dev);
+ /* The last driver must disable a "close the gate" if there is no
+ * parity attention or "process kill" pending.
+ */
+ if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
+ bnx2x_reset_is_done(bp))
+ bnx2x_disable_close_the_gate(bp);
+
+ /* Reset MCP mail box sequence if there is on going recovery */
+ if (unload_mode == UNLOAD_RECOVERY)
+ bp->fw_seq = 0;
+
return 0;
}
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+ u32 val, addr;
+
+ /* Gates #2 and #4a are closed/opened for "not E1" only */
+ if (!CHIP_IS_E1(bp)) {
+ /* #4 */
+ val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
+ close ? (val | 0x1) : (val & (~(u32)1)));
+ /* #2 */
+ val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
+ close ? (val | 0x1) : (val & (~(u32)1)));
+ }
+
+ /* #3 */
+ addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ val = REG_RD(bp, addr);
+ REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
+
+ DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ close ? "closing" : "opening");
+ mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ /* Do some magic... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/* Restore the value of the `magic' bit.
+ *
+ * @param pdev Device handle.
+ * @param magic_val Old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+ /* Restore the `magic' bit value... */
+ /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
+ SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ MF_CFG_WR(bp, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/* Prepares for MCP reset: takes care of CLP configurations.
+ *
+ * @param bp
+ * @param magic_val Old value of 'magic' bit.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ u32 shmem;
+ u32 validity_offset;
+
+ DP(NETIF_MSG_HW, "Starting\n");
+
+ /* Set `magic' bit in order to save MF config */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_prep(bp, magic_val);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Clear validity map flags */
+ if (shmem > 0)
+ REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
+ * depending on the HW type.
+ *
+ * @param bp
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+ /* special handling for emulation and FPGA,
+ wait 10 times longer */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(MCP_ONE_TIMEOUT*10);
+ else
+ msleep(MCP_ONE_TIMEOUT);
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+ u32 shmem, cnt, validity_offset, val;
+ int rc = 0;
+
+ msleep(100);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ if (shmem == 0) {
+ BNX2X_ERR("Shmem 0 return failure\n");
+ rc = -ENOTTY;
+ goto exit_lbl;
+ }
+
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Wait for MCP to come up */
+ for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
+ /* TBD: its best to check validity map of last port.
+ * currently checks on port 0.
+ */
+ val = REG_RD(bp, shmem + validity_offset);
+ DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
+ shmem + validity_offset, val);
+
+ /* check that shared memory is valid. */
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ break;
+
+ bnx2x_mcp_wait_one(bp);
+ }
+
+ DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
+
+ /* Check that shared memory is valid. This indicates that MCP is up. */
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
+ (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
+ BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
+ rc = -ENOTTY;
+ goto exit_lbl;
+ }
+
+exit_lbl:
+ /* Restore the `magic' bit value */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_done(bp, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+ REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
+ mmiowb();
+ }
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ * one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
+{
+ u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
+
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1(bp))
+ reset_mask2 = 0xffff;
+ else
+ reset_mask2 = 0x1ffff;
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
+ mmiowb();
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp)
+{
+ int cnt = 1000;
+ u32 val = 0;
+ u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff))
+ break;
+ msleep(1);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+ " are still"
+ " outstanding read requests after 1s!\n");
+ DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+ " port_is_idle_0=0x%08x,"
+ " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -EAGAIN;
+ }
+
+ barrier();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, true);
+
+ /* TBD: Indicate that "process kill" is in progress to MCP */
+
+ /* Clear "unprepared" bit */
+ REG_WR(bp, MISC_REG_UNPREPARED, 0);
+ barrier();
+
+ /* Make sure all is written to the chip before the reset */
+ mmiowb();
+
+ /* Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ msleep(1);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ bnx2x_reset_mcp_prep(bp, &val);
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+ barrier();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(bp);
+ barrier();
+
+ /* Recover after reset: */
+ /* MCP */
+ if (bnx2x_reset_mcp_comp(bp, val))
+ return -EAGAIN;
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, false);
+
+ /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+ * reset state, re-enable attentions. */
+
+ return 0;
+}
+
+static int bnx2x_leader_reset(struct bnx2x *bp)
+{
+ int rc = 0;
+ /* Try to recover after the failure */
+ if (bnx2x_process_kill(bp)) {
+ printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
+ bp->dev->name);
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+
+ /* Clear "reset is in progress" bit and update the driver state */
+ bnx2x_set_reset_done(bp);
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+
+exit_leader_reset:
+ bp->is_leader = 0;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+ smp_wmb();
+ return rc;
+}
+
+static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/* Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_reset_task() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+ DP(NETIF_MSG_HW, "Handling parity\n");
+ while (1) {
+ switch (bp->recovery_state) {
+ case BNX2X_RECOVERY_INIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ /* Try to get a LEADER_LOCK HW lock */
+ if (bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08))
+ bp->is_leader = 1;
+
+ /* Stop the driver */
+ /* If interface has been removed - break */
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ return;
+
+ bp->recovery_state = BNX2X_RECOVERY_WAIT;
+ /* Ensure "is_leader" and "recovery_state"
+ * update values are seen on other CPUs
+ */
+ smp_wmb();
+ break;
+
+ case BNX2X_RECOVERY_WAIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+ if (bp->is_leader) {
+ u32 load_counter = bnx2x_get_load_cnt(bp);
+ if (load_counter) {
+ /* Wait until all other functions get
+ * down.
+ */
+ schedule_delayed_work(&bp->reset_task,
+ HZ/10);
+ return;
+ } else {
+ /* If all other functions got down -
+ * try to bring the chip back to
+ * normal. In any case it's an exit
+ * point for a leader.
+ */
+ if (bnx2x_leader_reset(bp) ||
+ bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ printk(KERN_ERR"%s: Recovery "
+ "has failed. Power cycle is "
+ "needed.\n", bp->dev->name);
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+ /* Block ifup for all function
+ * of this ASIC until
+ * "process kill" or power
+ * cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+ /* Shut down the power */
+ bnx2x_set_power_state(bp,
+ PCI_D3hot);
+ return;
+ }
+
+ return;
+ }
+ } else { /* non-leader */
+ if (!bnx2x_reset_is_done(bp)) {
+ /* Try to get a LEADER_LOCK HW lock as
+ * long as a former leader may have
+ * been unloaded by the user or
+ * released a leadership by another
+ * reason.
+ */
+ if (bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08)) {
+ /* I'm a leader now! Restart a
+ * switch case.
+ */
+ bp->is_leader = 1;
+ break;
+ }
+
+ schedule_delayed_work(&bp->reset_task,
+ HZ/10);
+ return;
+
+ } else { /* A leader has completed
+ * the "process kill". It's an exit
+ * point for a non-leader.
+ */
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ smp_wmb();
+ return;
+ }
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
static void bnx2x_reset_task(struct work_struct *work)
{
- struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
+ struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
#ifdef BNX2X_STOP_ON_ERROR
BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
" so reset not done to allow debug dump,\n"
- " you will need to reboot when done\n");
+ KERN_ERR " you will need to reboot when done\n");
return;
#endif
@@ -8043,8 +8976,12 @@ static void bnx2x_reset_task(struct work_struct *work)
if (!netif_running(bp->dev))
goto reset_task_exit;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
+ bnx2x_parity_recover(bp);
+ else {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
reset_task_exit:
rtnl_unlock();
@@ -8264,7 +9201,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
- BNX2X_ERR("BAD MCP validity signature\n");
+ BNX2X_ERROR("BAD MCP validity signature\n");
bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -8288,8 +9225,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
if (val < BNX2X_BC_VER) {
/* for now only warn
* later we might need to enforce this */
- BNX2X_ERR("This driver needs bc_ver %X but found %X,"
- " please upgrade BC\n", BNX2X_BC_VER, val);
+ BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
+ "please upgrade BC\n", BNX2X_BC_VER, val);
}
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
@@ -8310,7 +9247,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
- pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
+ dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+ val, val2, val3, val4);
}
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8588,11 +9526,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8604,11 +9542,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8619,11 +9557,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8635,11 +9573,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8650,11 +9588,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8665,11 +9603,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8682,19 +9620,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
default:
- BNX2X_ERR("NVRAM config error. "
- "BAD link speed link_config 0x%x\n",
- bp->port.link_config);
+ BNX2X_ERROR("NVRAM config error. "
+ "BAD link speed link_config 0x%x\n",
+ bp->port.link_config);
bp->link_params.req_line_speed = SPEED_AUTO_NEG;
bp->port.advertising = bp->port.supported;
break;
@@ -8823,7 +9761,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->e1hov = 0;
bp->e1hmf = 0;
- if (CHIP_IS_E1H(bp)) {
+ if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
bp->mf_config =
SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
@@ -8844,14 +9782,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
"(0x%04x)\n",
func, bp->e1hov, bp->e1hov);
} else {
- BNX2X_ERR("!!! No valid E1HOV for func %d,"
- " aborting\n", func);
+ BNX2X_ERROR("No valid E1HOV for func %d,"
+ " aborting\n", func);
rc = -EPERM;
}
} else {
if (BP_E1HVN(bp)) {
- BNX2X_ERR("!!! VN %d in single function mode,"
- " aborting\n", BP_E1HVN(bp));
+ BNX2X_ERROR("VN %d in single function mode,"
+ " aborting\n", BP_E1HVN(bp));
rc = -EPERM;
}
}
@@ -8887,7 +9825,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
if (BP_NOMCP(bp)) {
/* only supposed to happen on emulation/FPGA */
- BNX2X_ERR("warning random MAC workaround active\n");
+ BNX2X_ERROR("warning: random MAC workaround active\n");
random_ether_addr(bp->dev->dev_addr);
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
}
@@ -8895,6 +9833,70 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
return rc;
}
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+ int cnt, i, block_end, rodi;
+ char vpd_data[BNX2X_VPD_LEN+1];
+ char str_id_reg[VENDOR_ID_LEN+1];
+ char str_id_cap[VENDOR_ID_LEN+1];
+ u8 len;
+
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+ if (cnt < BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&vpd_data[i]);
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (rodi < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ if (len != VENDOR_ID_LEN)
+ goto out_not_found;
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ /* vendor specific info */
+ snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+ if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+ !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (rodi >= 0) {
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], len);
+ bp->fw_ver[len] = ' ';
+ }
+ }
+ return;
+ }
+out_not_found:
+ return;
+}
+
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
@@ -8912,29 +9914,34 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
#endif
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
- INIT_WORK(&bp->reset_task, bnx2x_reset_task);
+ INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
rc = bnx2x_get_hwinfo(bp);
+ bnx2x_read_fwinfo(bp);
/* need to reset chip if undi was active */
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
if (CHIP_REV_IS_FPGA(bp))
- pr_err("FPGA detected\n");
+ dev_err(&bp->pdev->dev, "FPGA detected\n");
if (BP_NOMCP(bp) && (func == 0))
- pr_err("MCP disabled, must load devices in order!\n");
+ dev_err(&bp->pdev->dev, "MCP disabled, "
+ "must load devices in order!\n");
/* Set multi queue mode */
if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- pr_err("Multi disabled since int_mode requested is not MSI-X\n");
+ dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
+ "requested is not MSI-X\n");
multi_mode = ETH_RSS_MODE_DISABLED;
}
bp->multi_mode = multi_mode;
+ bp->dev->features |= NETIF_F_GRO;
+
/* Set TPA flags */
if (disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9304,11 +10311,13 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
- snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
+ strncpy(info->fw_version, bp->fw_ver, 32);
+ snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
- ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strcpy(info->bus_info, pci_name(bp->pdev));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
@@ -9842,19 +10851,18 @@ static int bnx2x_get_coalesce(struct net_device *dev,
return 0;
}
-#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
static int bnx2x_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct bnx2x *bp = netdev_priv(dev);
- bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
- if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
- bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
+ bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+ if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
- bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
- if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
- bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
+ bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+ if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
if (netif_running(dev))
bnx2x_update_coalesce(bp);
@@ -9885,6 +10893,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
if ((ering->rx_pending > MAX_RX_AVAIL) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9970,6 +10983,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
int changed = 0;
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
/* TPA requires Rx CSUM offloading */
if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
if (!disable_tpa) {
@@ -9986,6 +11004,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
changed = 1;
}
+ if (data & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+
if (changed && netif_running(dev)) {
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
rc = bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -10006,6 +11029,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
bp->rx_csum = data;
/* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10050,9 +11078,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
u32 wr_val = 0;
int port = BP_PORT(bp);
static const struct {
- u32 offset0;
- u32 offset1;
- u32 mask;
+ u32 offset0;
+ u32 offset1;
+ u32 mask;
} reg_tbl[] = {
/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
{ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
@@ -10119,15 +11147,19 @@ static int bnx2x_test_registers(struct bnx2x *bp)
save_val = REG_RD(bp, offset);
- REG_WR(bp, offset, wr_val);
+ REG_WR(bp, offset, (wr_val & mask));
val = REG_RD(bp, offset);
/* Restore the original register's value */
REG_WR(bp, offset, save_val);
- /* verify that value is as expected value */
- if ((val & mask) != (wr_val & mask))
+ /* verify value is as expected */
+ if ((val & mask) != (wr_val & mask)) {
+ DP(NETIF_MSG_PROBE,
+ "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+ offset, val, wr_val, mask);
goto test_reg_exit;
+ }
}
}
@@ -10267,8 +11299,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
bd_prod = TX_BD(fp_tx->tx_bd_prod);
tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
- mapping = pci_map_single(bp->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -10344,6 +11376,9 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
{
int rc = 0, res;
+ if (BP_NOMCP(bp))
+ return rc;
+
if (!netif_running(bp->dev))
return BNX2X_LOOPBACK_FAILED;
@@ -10391,6 +11426,9 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
int i, rc;
u32 magic, crc;
+ if (BP_NOMCP(bp))
+ return 0;
+
rc = bnx2x_nvram_read(bp, 0, data, 4);
if (rc) {
DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
@@ -10468,6 +11506,12 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
if (!netif_running(dev))
@@ -10556,7 +11600,11 @@ static const struct {
/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%d]: tx_packets" }
+ 8, "[%d]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%d]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%d]: tx_bcast_packets" }
};
static const struct {
@@ -10618,16 +11666,20 @@ static const struct {
{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_packets" },
+ 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8, STATS_FLAGS_PORT, "tx_mac_errors" },
{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8, STATS_FLAGS_PORT, "tx_carrier_errors" },
- { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_multi_collisions" },
-/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -10643,11 +11695,11 @@ static const struct {
8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
-/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
8, STATS_FLAGS_PORT, "tx_pause_frames" }
@@ -10664,7 +11716,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
struct bnx2x *bp = netdev_priv(dev);
int i, num_stats;
- switch(stringset) {
+ switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
@@ -10893,6 +11945,14 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
break;
case PCI_D3hot:
+ /* If there are other clients above don't
+ shut down the power */
+ if (atomic_read(&bp->pdev->enable_cnt) != 1)
+ return 0;
+ /* Don't shut down the power for emulation and FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ return 0;
+
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= 3;
@@ -11182,6 +12242,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
u8 hlen = 0;
__le16 pkt_size = 0;
+ struct ethhdr *eth;
+ u8 mac_type = UNICAST_ADDRESS;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -11205,6 +12267,16 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+ eth = (struct ethhdr *)skb->data;
+
+ /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (is_broadcast_ether_addr(eth->h_dest))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
@@ -11238,8 +12310,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- tx_start_bd->general_data = (UNICAST_ADDRESS <<
- ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+ tx_start_bd->general_data = (mac_type <<
+ ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
/* header nbd */
tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
@@ -11314,8 +12386,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- mapping = pci_map_single(bp->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +12444,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (total_pkt_bd == NULL)
total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
- mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, frag->page,
+ frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11452,6 +12525,40 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
+ if (!bnx2x_reset_is_done(bp)) {
+ do {
+ /* Reset MCP mail box sequence if there is on going
+ * recovery
+ */
+ bp->fw_seq = 0;
+
+ /* If it's the first function to load and reset done
+ * is still not cleared it may mean that. We don't
+ * check the attention state here because it may have
+ * already been cleared by a "common" reset but we
+ * shell proceed with "process kill" anyway.
+ */
+ if ((bnx2x_get_load_cnt(bp) == 0) &&
+ bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08) &&
+ (!bnx2x_leader_reset(bp))) {
+ DP(NETIF_MSG_HW, "Recovered in open\n");
+ break;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ printk(KERN_ERR"%s: Recovery flow hasn't been properly"
+ " completed yet. Try again later. If u still see this"
+ " message after a few retries then power cycle is"
+ " required.\n", bp->dev->name);
+
+ return -EAGAIN;
+ } while (0);
+ }
+
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+
return bnx2x_nic_load(bp, LOAD_OPEN);
}
@@ -11462,9 +12569,7 @@ static int bnx2x_close(struct net_device *dev)
/* Unload the driver, release IRQs */
bnx2x_nic_unload(bp, UNLOAD_CLOSE);
- if (atomic_read(&bp->pdev->enable_cnt) == 1)
- if (!CHIP_REV_IS_SLOW(bp))
- bnx2x_set_power_state(bp, PCI_D3hot);
+ bnx2x_set_power_state(bp, PCI_D3hot);
return 0;
}
@@ -11494,21 +12599,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
else { /* some multicasts */
if (CHIP_IS_E1(bp)) {
int i, old, offset;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config =
bnx2x_sp(bp, mcast_config);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
config->config_table[i].
cam_entry.msb_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[0]);
+ swab16(*(u16 *)&ha->addr[0]);
config->config_table[i].
cam_entry.middle_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[2]);
+ swab16(*(u16 *)&ha->addr[2]);
config->config_table[i].
cam_entry.lsb_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[4]);
+ swab16(*(u16 *)&ha->addr[4]);
config->config_table[i].cam_entry.flags =
cpu_to_le16(port);
config->config_table[i].
@@ -11562,18 +12667,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
0);
} else { /* E1H */
/* Accept one or more multicasts */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[MC_HASH_SIZE];
u32 crc, bit, regidx;
int i;
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- mclist->dmi_addr);
+ ha->addr);
- crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
+ crc = crc32c_le(0, ha->addr, ETH_ALEN);
bit = (crc >> 24) & 0xff;
regidx = bit >> 5;
bit &= 0x1f;
@@ -11690,6 +12795,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
return -EINVAL;
@@ -11717,7 +12827,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_work(&bp->reset_task);
+ schedule_delayed_work(&bp->reset_task, 0);
}
#ifdef BCM_VLAN
@@ -11789,18 +12899,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
rc = pci_enable_device(pdev);
if (rc) {
- pr_err("Cannot enable PCI device, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot enable PCI device, aborting\n");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- pr_err("Cannot find PCI device base address, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- pr_err("Cannot find second PCI device base address, aborting\n");
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+ " base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11808,7 +12921,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (atomic_read(&pdev->enable_cnt) == 1) {
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- pr_err("Cannot obtain PCI resources, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
goto err_out_disable;
}
@@ -11818,28 +12932,32 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
- pr_err("Cannot find power management capability, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (bp->pcie_cap == 0) {
- pr_err("Cannot find PCI Express capability, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI Express capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- pr_err("pci_set_consistent_dma_mask failed, aborting\n");
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
+ " failed, aborting\n");
rc = -EIO;
goto err_out_release;
}
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- pr_err("System does not support DMA, aborting\n");
+ } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&bp->pdev->dev,
+ "System does not support DMA, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -11852,7 +12970,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->regview = pci_ioremap_bar(pdev, 0);
if (!bp->regview) {
- pr_err("Cannot map register space, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -11861,7 +12980,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
min_t(u64, BNX2X_DB_SIZE,
pci_resource_len(pdev, 2)));
if (!bp->doorbells) {
- pr_err("Cannot map doorbell space, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
goto err_out_unmap;
}
@@ -11876,6 +12996,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
+ /* Reset the load counter */
+ bnx2x_clear_load_cnt(bp);
+
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11963,7 +13086,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
offset = be32_to_cpu(sections[i].offset);
len = be32_to_cpu(sections[i].len);
if (offset + len > firmware->size) {
- pr_err("Section %d length is out of bounds\n", i);
+ dev_err(&bp->pdev->dev,
+ "Section %d length is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11975,7 +13099,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
if (be16_to_cpu(ops_offsets[i]) > num_ops) {
- pr_err("Section offset %d is out of bounds\n", i);
+ dev_err(&bp->pdev->dev,
+ "Section offset %d is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11987,7 +13112,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
- pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ dev_err(&bp->pdev->dev,
+ "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
fw_ver[0], fw_ver[1], fw_ver[2],
fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
@@ -12022,8 +13148,8 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
for (i = 0, j = 0; i < n/8; i++, j += 2) {
tmp = be32_to_cpu(source[j]);
target[i].op = (tmp >> 24) & 0xff;
- target[i].offset = tmp & 0xffffff;
- target[i].raw_data = be32_to_cpu(source[j+1]);
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j + 1]);
}
}
@@ -12057,20 +13183,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
if (CHIP_IS_E1(bp))
fw_file_name = FW_FILE_NAME_E1;
- else
+ else if (CHIP_IS_E1H(bp))
fw_file_name = FW_FILE_NAME_E1H;
+ else {
+ dev_err(dev, "Unsupported chip revision\n");
+ return -EINVAL;
+ }
- pr_info("Loading %s\n", fw_file_name);
+ dev_info(dev, "Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, dev);
if (rc) {
- pr_err("Can't load firmware file %s\n", fw_file_name);
+ dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
rc = bnx2x_check_firmware(bp);
if (rc) {
- pr_err("Corrupt firmware file %s\n", fw_file_name);
+ dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
@@ -12129,7 +13259,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
if (!dev) {
- pr_err("Cannot allocate net device\n");
+ dev_err(&pdev->dev, "Cannot allocate net device\n");
return -ENOMEM;
}
@@ -12151,7 +13281,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* Set init arrays */
rc = bnx2x_init_firmware(bp, &pdev->dev);
if (rc) {
- pr_err("Error loading firmware\n");
+ dev_err(&pdev->dev, "Error loading firmware\n");
goto init_one_exit;
}
@@ -12162,11 +13292,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
- board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
- dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
+ " IRQ %d, ", board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq);
+ pr_cont("node addr %pM\n", dev->dev_addr);
return 0;
@@ -12194,13 +13325,16 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
struct bnx2x *bp;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return;
}
bp = netdev_priv(dev);
unregister_netdev(dev);
+ /* Make sure RESET task is not scheduled before continuing */
+ cancel_delayed_work_sync(&bp->reset_task);
+
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
kfree(bp->init_data);
@@ -12227,7 +13361,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
struct bnx2x *bp;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
@@ -12259,11 +13393,16 @@ static int bnx2x_resume(struct pci_dev *pdev)
int rc;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
rtnl_lock();
pci_restore_state(pdev);
@@ -12430,6 +13569,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return;
+ }
+
rtnl_lock();
bnx2x_eeh_recover(bp);
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 944964e78c81..a1f3bf0cd630 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -766,6 +766,8 @@
#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
#define MCP_REG_MCPR_NVM_WRITE 0x86408
#define MCP_REG_MCPR_SCRATCH 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
/* [R 32] read first 32 bit after inversion of function 0. mapped as
follows: [0] NIG attention for function0; [1] NIG attention for
function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
@@ -1249,6 +1251,8 @@
#define MISC_REG_E1HMF_MODE 0xa5f8
/* [RW 32] Debug only: spare RW register reset by core reset */
#define MISC_REG_GENERIC_CR_0 0xa460
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1 0xa474
/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
these bits is written as a '1'; the corresponding SPIO bit will turn off
it's drivers and become an input. This is the reset state of all GPIO
@@ -1438,7 +1442,7 @@
(~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
- in this register. addres 0 - timer 1; address - timer 2�address 7 -
+ in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
timer 8 */
#define MISC_REG_SW_TIMER_VAL 0xa5c0
/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2407,10 +2411,16 @@
/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
this client is waiting for the arbiter. */
#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+ block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
should update accoring to 'hst_discard_doorbells' register when the state
machine is idle */
#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+ Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
means this PSWHST is discarding inputs from this client. Each bit should
update accoring to 'hst_discard_internal_writes' register when the state
@@ -4422,11 +4432,21 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
#define MISC_REGISTERS_RESET_REG_2_SET 0x594
#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
@@ -4454,6 +4474,7 @@
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RESERVED_08 8
#define HW_LOCK_RESOURCE_SPIO 2
#define HW_LOCK_RESOURCE_UNDI 5
#define PRS_FLAG_OVERETH_IPV4 1
@@ -4474,6 +4495,10 @@
#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 6dd64cf3cb76..969ffed86b9f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -37,7 +37,6 @@
static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
{
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa;
if (!dev)
return;
@@ -47,10 +46,12 @@ static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
return;
read_lock_bh(&idev->lock);
- ifa = idev->addr_list;
- if (ifa)
+ if (!list_empty(&idev->addr_list)) {
+ struct inet6_ifaddr *ifa
+ = list_first_entry(&idev->addr_list,
+ struct inet6_ifaddr, if_list);
ipv6_addr_copy(addr, &ifa->addr);
- else
+ } else
ipv6_addr_set(addr, 0, 0, 0, 0);
read_unlock_bh(&idev->lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0075514bf32f..5e12462a9d5e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,6 +59,7 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
@@ -430,7 +431,18 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
}
skb->priority = 1;
- dev_queue_xmit(skb);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
+ struct netpoll *np = bond->dev->npinfo->netpoll;
+ slave_dev->npinfo = bond->dev->npinfo;
+ np->real_dev = np->dev = skb->dev;
+ slave_dev->priv_flags |= IFF_IN_NETPOLL;
+ netpoll_send_skb(np, skb);
+ slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
+ np->dev = bond->dev;
+ } else
+#endif
+ dev_queue_xmit(skb);
return 0;
}
@@ -762,32 +774,6 @@ static int bond_check_dev_link(struct bonding *bond,
/*----------------------------- Multicast list ------------------------------*/
/*
- * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
- */
-static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1,
- const struct dev_mc_list *dmi2)
-{
- return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
- dmi1->dmi_addrlen == dmi2->dmi_addrlen;
-}
-
-/*
- * returns dmi entry if found, NULL otherwise
- */
-static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi,
- struct dev_mc_list *mc_list)
-{
- struct dev_mc_list *idmi;
-
- for (idmi = mc_list; idmi; idmi = idmi->next) {
- if (bond_is_dmi_same(dmi, idmi))
- return idmi;
- }
-
- return NULL;
-}
-
-/*
* Push the promiscuity flag down to appropriate slaves
*/
static int bond_set_promiscuity(struct bonding *bond, int inc)
@@ -839,18 +825,18 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
* Add a Multicast address to slaves
* according to mode
*/
-static void bond_mc_add(struct bonding *bond, void *addr, int alen)
+static void bond_mc_add(struct bonding *bond, void *addr)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave)
- dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0);
+ dev_mc_add(bond->curr_active_slave->dev, addr);
} else {
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i)
- dev_mc_add(slave->dev, addr, alen, 0);
+ dev_mc_add(slave->dev, addr);
}
}
@@ -858,18 +844,17 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen)
* Remove a multicast address from slave
* according to mode
*/
-static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
+static void bond_mc_del(struct bonding *bond, void *addr)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave)
- dev_mc_delete(bond->curr_active_slave->dev, addr,
- alen, 0);
+ dev_mc_del(bond->curr_active_slave->dev, addr);
} else {
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i) {
- dev_mc_delete(slave->dev, addr, alen, 0);
+ dev_mc_del(slave->dev, addr);
}
}
}
@@ -896,66 +881,22 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
}
/*
- * Totally destroys the mc_list in bond
- */
-static void bond_mc_list_destroy(struct bonding *bond)
-{
- struct dev_mc_list *dmi;
-
- dmi = bond->mc_list;
- while (dmi) {
- bond->mc_list = dmi->next;
- kfree(dmi);
- dmi = bond->mc_list;
- }
-
- bond->mc_list = NULL;
-}
-
-/*
- * Copy all the Multicast addresses from src to the bonding device dst
- */
-static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
- gfp_t gfp_flag)
-{
- struct dev_mc_list *dmi, *new_dmi;
-
- for (dmi = mc_list; dmi; dmi = dmi->next) {
- new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
-
- if (!new_dmi) {
- /* FIXME: Potential memory leak !!! */
- return -ENOMEM;
- }
-
- new_dmi->next = bond->mc_list;
- bond->mc_list = new_dmi;
- new_dmi->dmi_addrlen = dmi->dmi_addrlen;
- memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
- new_dmi->dmi_users = dmi->dmi_users;
- new_dmi->dmi_gusers = dmi->dmi_gusers;
- }
-
- return 0;
-}
-
-/*
* flush all members of flush->mc_list from device dev->mc_list
*/
static void bond_mc_list_flush(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond_dev)
+ dev_mc_del(slave_dev, ha->addr);
if (bond->params.mode == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
- dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ dev_mc_del(slave_dev, lacpdu_multicast);
}
}
@@ -969,7 +910,7 @@ static void bond_mc_list_flush(struct net_device *bond_dev,
static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
struct slave *old_active)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (!USES_PRIMARY(bond->params.mode))
/* nothing to do - mc list is already up-to-date on
@@ -984,9 +925,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_delete(old_active->dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond->dev)
+ dev_mc_del(old_active->dev, ha->addr);
}
if (new_active) {
@@ -997,9 +937,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_add(new_active->dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond->dev)
+ dev_mc_add(new_active->dev, ha->addr);
bond_resend_igmp_join_requests(bond);
}
}
@@ -1329,6 +1268,61 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
bond->slave_cnt--;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * You must hold read lock on bond->lock before calling this.
+ */
+static bool slaves_support_netpoll(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ int i = 0;
+ bool ret = true;
+
+ bond_for_each_slave(bond, slave, i) {
+ if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !slave->dev->netdev_ops->ndo_poll_controller)
+ ret = false;
+ }
+ return i != 0 && ret;
+}
+
+static void bond_poll_controller(struct net_device *bond_dev)
+{
+ struct net_device *dev = bond_dev->npinfo->netpoll->real_dev;
+ if (dev != bond_dev)
+ netpoll_poll_dev(dev);
+}
+
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ const struct net_device_ops *ops;
+ int i;
+
+ read_lock(&bond->lock);
+ bond_dev->npinfo = NULL;
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->dev) {
+ ops = slave->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(slave->dev);
+ else
+ slave->dev->npinfo = NULL;
+ }
+ }
+ read_unlock(&bond->lock);
+}
+
+#else
+
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
+{
+}
+
+#endif
+
/*---------------------------------- IOCTL ----------------------------------*/
static int bond_sethwaddr(struct net_device *bond_dev,
@@ -1411,7 +1405,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct sockaddr addr;
int link_reporting;
int old_features = bond_dev->features;
@@ -1485,14 +1479,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name,
bond_dev->type, slave_dev->type);
- netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
+ res = netdev_bonding_change(bond_dev,
+ NETDEV_PRE_TYPE_CHANGE);
+ res = notifier_to_errno(res);
+ if (res) {
+ pr_err("%s: refused to change device type\n",
+ bond_dev->name);
+ res = -EBUSY;
+ goto err_undo_flags;
+ }
+
+ /* Flush unicast and multicast addresses */
+ dev_uc_flush(bond_dev);
+ dev_mc_flush(bond_dev);
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
else
ether_setup(bond_dev);
- netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
+ netdev_bonding_change(bond_dev,
+ NETDEV_POST_TYPE_CHANGE);
}
} else if (bond_dev->type != slave_dev->type) {
pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1593,9 +1600,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_lock_bh(bond_dev);
/* upload master's mc_list to new slave */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_add(slave_dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond_dev)
+ dev_mc_add(slave_dev, ha->addr);
netif_addr_unlock_bh(bond_dev);
}
@@ -1603,7 +1609,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* add lacpdu mc addr to mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
- dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ dev_mc_add(slave_dev, lacpdu_multicast);
}
bond_add_vlans_on_slave(bond, slave_dev);
@@ -1735,6 +1741,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (slaves_support_netpoll(bond_dev)) {
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (bond_dev->npinfo)
+ slave_dev->npinfo = bond_dev->npinfo;
+ } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ pr_info("New slave device %s does not support netpoll\n",
+ slave_dev->name);
+ pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+ }
+#endif
read_unlock(&bond->lock);
res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1801,6 +1819,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
return -EINVAL;
}
+ netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -1929,6 +1948,17 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netdev_set_master(slave_dev, NULL);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ read_lock_bh(&bond->lock);
+ if (slaves_support_netpoll(bond_dev))
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ read_unlock_bh(&bond->lock);
+ if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
+ slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
+ else
+ slave_dev->npinfo = NULL;
+#endif
+
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -3905,10 +3935,24 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return res;
}
+static bool bond_addr_in_mc_list(unsigned char *addr,
+ struct netdev_hw_addr_list *list,
+ int addrlen)
+{
+ struct netdev_hw_addr *ha;
+
+ netdev_hw_addr_list_for_each(ha, list)
+ if (!memcmp(ha->addr, addr, addrlen))
+ return true;
+
+ return false;
+}
+
static void bond_set_multicast_list(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
+ bool found;
/*
* Do promisc before checking multicast_mode
@@ -3943,20 +3987,25 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
bond->flags = bond_dev->flags;
/* looking for addresses to add to slaves' mc list */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond->mc_list))
- bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_for_each_mc_addr(ha, bond_dev) {
+ found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
+ bond_dev->addr_len);
+ if (!found)
+ bond_mc_add(bond, ha->addr);
}
/* looking for addresses to delete from slaves' list */
- for (dmi = bond->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list))
- bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
+ found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc,
+ bond_dev->addr_len);
+ if (!found)
+ bond_mc_del(bond, ha->addr);
}
/* save master's multicast list */
- bond_mc_list_destroy(bond);
- bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
+ __hw_addr_flush(&bond->mc_list);
+ __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
+ bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
read_unlock(&bond->lock);
}
@@ -4448,6 +4497,10 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_register = bond_vlan_rx_register,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_cleanup = bond_netpoll_cleanup,
+ .ndo_poll_controller = bond_poll_controller,
+#endif
};
static void bond_destructor(struct net_device *bond_dev)
@@ -4541,6 +4594,8 @@ static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ bond_netpoll_cleanup(bond_dev);
+
/* Release the bonded slaves */
bond_release_all(bond_dev);
@@ -4550,9 +4605,7 @@ static void bond_uninit(struct net_device *bond_dev)
bond_remove_proc_entry(bond);
- netif_addr_lock_bh(bond_dev);
- bond_mc_list_destroy(bond);
- netif_addr_unlock_bh(bond_dev);
+ __hw_addr_flush(&bond->mc_list);
}
/*------------------------- Module initialization ---------------------------*/
@@ -4683,13 +4736,13 @@ static int bond_check_params(struct bond_params *params)
}
if (num_grat_arp < 0 || num_grat_arp > 255) {
- pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n",
+ pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n",
num_grat_arp);
num_grat_arp = 1;
}
if (num_unsol_na < 0 || num_unsol_na > 255) {
- pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n",
+ pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
num_unsol_na);
num_unsol_na = 1;
}
@@ -4924,6 +4977,8 @@ static int bond_init(struct net_device *bond_dev)
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
+
+ __hw_addr_init(&bond->mc_list);
return 0;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 257a7a4dfce9..2aa336720591 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -202,7 +202,7 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr_list mc_list;
int (*xmit_hash_policy)(struct sk_buff *, int);
__be32 master_ip;
u16 flags;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
new file mode 100644
index 000000000000..0b28e0107697
--- /dev/null
+++ b/drivers/net/caif/Kconfig
@@ -0,0 +1,17 @@
+#
+# CAIF physical drivers
+#
+
+if CAIF
+
+comment "CAIF transport drivers"
+
+config CAIF_TTY
+ tristate "CAIF TTY transport driver"
+ default n
+ ---help---
+ The CAIF TTY transport driver is a Line Discipline (ldisc)
+ identified as N_CAIF. When this ldisc is opened from user space
+ it will redirect the TTY's traffic into the CAIF stack.
+
+endif # CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
new file mode 100644
index 000000000000..52b6d1f826f8
--- /dev/null
+++ b/drivers/net/caif/Makefile
@@ -0,0 +1,12 @@
+ifeq ($(CONFIG_CAIF_DEBUG),1)
+CAIF_DBG_FLAGS := -DDEBUG
+endif
+
+KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
+
+ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
+clean-dirs:= .tmp_versions
+clean-files:= Module.symvers modules.order *.cmd *~ \
+
+# Serial interface
+obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
new file mode 100644
index 000000000000..09257ca8f563
--- /dev/null
+++ b/drivers/net/caif/caif_serial.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/cfcnfg.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>");
+MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_CAIF);
+
+#define SEND_QUEUE_LOW 10
+#define SEND_QUEUE_HIGH 100
+#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
+#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
+#define MAX_WRITE_CHUNK 4096
+#define ON 1
+#define OFF 0
+#define CAIF_MAX_MTU 4096
+
+/*This list is protected by the rtnl lock. */
+static LIST_HEAD(ser_list);
+
+static int ser_loop;
+module_param(ser_loop, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
+
+static int ser_use_stx = 1;
+module_param(ser_use_stx, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
+
+static int ser_use_fcs = 1;
+
+module_param(ser_use_fcs, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
+
+static int ser_write_chunk = MAX_WRITE_CHUNK;
+module_param(ser_write_chunk, int, S_IRUGO);
+
+MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
+
+static struct dentry *debugfsdir;
+
+static int caif_net_open(struct net_device *dev);
+static int caif_net_close(struct net_device *dev);
+
+struct ser_device {
+ struct caif_dev_common common;
+ struct list_head node;
+ struct net_device *dev;
+ struct sk_buff_head head;
+ struct tty_struct *tty;
+ bool tx_started;
+ unsigned long state;
+ char *tty_name;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_tty_dir;
+ struct debugfs_blob_wrapper tx_blob;
+ struct debugfs_blob_wrapper rx_blob;
+ u8 rx_data[128];
+ u8 tx_data[128];
+ u8 tty_status;
+
+#endif
+};
+
+static void caifdev_setup(struct net_device *dev);
+static void ldisc_tx_wakeup(struct tty_struct *tty);
+#ifdef CONFIG_DEBUG_FS
+static inline void update_tty_status(struct ser_device *ser)
+{
+ ser->tty_status =
+ ser->tty->stopped << 5 |
+ ser->tty->hw_stopped << 4 |
+ ser->tty->flow_stopped << 3 |
+ ser->tty->packet << 2 |
+ ser->tty->low_latency << 1 |
+ ser->tty->warned;
+}
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+ ser->debugfs_tty_dir =
+ debugfs_create_dir(tty->name, debugfsdir);
+ if (!IS_ERR(ser->debugfs_tty_dir)) {
+ debugfs_create_blob("last_tx_msg", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->tx_blob);
+
+ debugfs_create_blob("last_rx_msg", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->rx_blob);
+
+ debugfs_create_x32("ser_state", S_IRUSR,
+ ser->debugfs_tty_dir,
+ (u32 *)&ser->state);
+
+ debugfs_create_x8("tty_status", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->tty_status);
+
+ }
+ ser->tx_blob.data = ser->tx_data;
+ ser->tx_blob.size = 0;
+ ser->rx_blob.data = ser->rx_data;
+ ser->rx_blob.size = 0;
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+ debugfs_remove_recursive(ser->debugfs_tty_dir);
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+ if (size > sizeof(ser->rx_data))
+ size = sizeof(ser->rx_data);
+ memcpy(ser->rx_data, data, size);
+ ser->rx_blob.data = ser->rx_data;
+ ser->rx_blob.size = size;
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+ if (size > sizeof(ser->tx_data))
+ size = sizeof(ser->tx_data);
+ memcpy(ser->tx_data, data, size);
+ ser->tx_blob.data = ser->tx_data;
+ ser->tx_blob.size = size;
+}
+#else
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+}
+
+static inline void update_tty_status(struct ser_device *ser)
+{
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+#endif
+
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
+{
+ struct sk_buff *skb = NULL;
+ struct ser_device *ser;
+ int ret;
+ u8 *p;
+ ser = tty->disc_data;
+
+ /*
+ * NOTE: flags may contain information about break or overrun.
+ * This is not yet handled.
+ */
+
+
+ /*
+ * Workaround for garbage at start of transmission,
+ * only enable if STX handling is not enabled.
+ */
+ if (!ser->common.use_stx && !ser->tx_started) {
+ dev_info(&ser->dev->dev,
+ "Bytes received before initial transmission -"
+ "bytes discarded.\n");
+ return;
+ }
+
+ BUG_ON(ser->dev == NULL);
+
+ /* Get a suitable caif packet and copy in data. */
+ skb = netdev_alloc_skb(ser->dev, count+1);
+ if (skb == NULL)
+ return;
+ p = skb_put(skb, count);
+ memcpy(p, data, count);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = ser->dev;
+ debugfs_rx(ser, data, count);
+ /* Push received packet up the stack. */
+ ret = netif_rx_ni(skb);
+ if (!ret) {
+ ser->dev->stats.rx_packets++;
+ ser->dev->stats.rx_bytes += count;
+ } else
+ ++ser->dev->stats.rx_dropped;
+ update_tty_status(ser);
+}
+
+static int handle_tx(struct ser_device *ser)
+{
+ struct tty_struct *tty;
+ struct sk_buff *skb;
+ int tty_wr, len, room;
+ tty = ser->tty;
+ ser->tx_started = true;
+
+ /* Enter critical section */
+ if (test_and_set_bit(CAIF_SENDING, &ser->state))
+ return 0;
+
+ /* skb_peek is safe because handle_tx is called after skb_queue_tail */
+ while ((skb = skb_peek(&ser->head)) != NULL) {
+
+ /* Make sure you don't write too much */
+ len = skb->len;
+ room = tty_write_room(tty);
+ if (!room)
+ break;
+ if (room > ser_write_chunk)
+ room = ser_write_chunk;
+ if (len > room)
+ len = room;
+
+ /* Write to tty or loopback */
+ if (!ser_loop) {
+ tty_wr = tty->ops->write(tty, skb->data, len);
+ update_tty_status(ser);
+ } else {
+ tty_wr = len;
+ ldisc_receive(tty, skb->data, NULL, len);
+ }
+ ser->dev->stats.tx_packets++;
+ ser->dev->stats.tx_bytes += tty_wr;
+
+ /* Error on TTY ?! */
+ if (tty_wr < 0)
+ goto error;
+ /* Reduce buffer written, and discard if empty */
+ skb_pull(skb, tty_wr);
+ if (skb->len == 0) {
+ struct sk_buff *tmp = skb_dequeue(&ser->head);
+ BUG_ON(tmp != skb);
+ if (in_interrupt())
+ dev_kfree_skb_irq(skb);
+ else
+ kfree_skb(skb);
+ }
+ }
+ /* Send flow off if queue is empty */
+ if (ser->head.qlen <= SEND_QUEUE_LOW &&
+ test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+ ser->common.flowctrl != NULL)
+ ser->common.flowctrl(ser->dev, ON);
+ clear_bit(CAIF_SENDING, &ser->state);
+ return 0;
+error:
+ clear_bit(CAIF_SENDING, &ser->state);
+ return tty_wr;
+}
+
+static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ser_device *ser;
+ BUG_ON(dev == NULL);
+ ser = netdev_priv(dev);
+
+ /* Send flow off once, on high water mark */
+ if (ser->head.qlen > SEND_QUEUE_HIGH &&
+ !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+ ser->common.flowctrl != NULL)
+
+ ser->common.flowctrl(ser->dev, OFF);
+
+ skb_queue_tail(&ser->head, skb);
+ return handle_tx(ser);
+}
+
+
+static void ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct ser_device *ser;
+ ser = tty->disc_data;
+ BUG_ON(ser == NULL);
+ BUG_ON(ser->tty != tty);
+ handle_tx(ser);
+}
+
+
+static int ldisc_open(struct tty_struct *tty)
+{
+ struct ser_device *ser;
+ struct net_device *dev;
+ char name[64];
+ int result;
+
+ /* No write no play */
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
+ return -EPERM;
+
+ sprintf(name, "cf%s", tty->name);
+ dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+ ser = netdev_priv(dev);
+ ser->tty = tty_kref_get(tty);
+ ser->dev = dev;
+ debugfs_init(ser, tty);
+ tty->receive_room = N_TTY_BUF_SIZE;
+ tty->disc_data = ser;
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ rtnl_lock();
+ result = register_netdevice(dev);
+ if (result) {
+ rtnl_unlock();
+ free_netdev(dev);
+ return -ENODEV;
+ }
+
+ list_add(&ser->node, &ser_list);
+ rtnl_unlock();
+ netif_stop_queue(dev);
+ update_tty_status(ser);
+ return 0;
+}
+
+static void ldisc_close(struct tty_struct *tty)
+{
+ struct ser_device *ser = tty->disc_data;
+ /* Remove may be called inside or outside of rtnl_lock */
+ int islocked = rtnl_is_locked();
+ if (!islocked)
+ rtnl_lock();
+ /* device is freed automagically by net-sysfs */
+ dev_close(ser->dev);
+ unregister_netdevice(ser->dev);
+ list_del(&ser->node);
+ debugfs_deinit(ser);
+ tty_kref_put(ser->tty);
+ if (!islocked)
+ rtnl_unlock();
+}
+
+/* The line discipline structure. */
+static struct tty_ldisc_ops caif_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_caif",
+ .open = ldisc_open,
+ .close = ldisc_close,
+ .receive_buf = ldisc_receive,
+ .write_wakeup = ldisc_tx_wakeup
+};
+
+static int register_ldisc(void)
+{
+ int result;
+ result = tty_register_ldisc(N_CAIF, &caif_ldisc);
+ if (result < 0) {
+ pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
+ result);
+ return result;
+ }
+ return result;
+}
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = caif_net_open,
+ .ndo_stop = caif_net_close,
+ .ndo_start_xmit = caif_xmit
+};
+
+static void caifdev_setup(struct net_device *dev)
+{
+ struct ser_device *serdev = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &netdev_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = CAIF_MAX_MTU;
+ dev->hard_header_len = CAIF_NEEDED_HEADROOM;
+ dev->tx_queue_len = 0;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&serdev->head);
+ serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
+ serdev->common.use_frag = true;
+ serdev->common.use_stx = ser_use_stx;
+ serdev->common.use_fcs = ser_use_fcs;
+ serdev->dev = dev;
+}
+
+
+static int caif_net_open(struct net_device *dev)
+{
+ struct ser_device *ser;
+ ser = netdev_priv(dev);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+static int caif_net_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int __init caif_ser_init(void)
+{
+ int ret;
+ ret = register_ldisc();
+ debugfsdir = debugfs_create_dir("caif_serial", NULL);
+ return ret;
+}
+
+static void __exit caif_ser_exit(void)
+{
+ struct ser_device *ser = NULL;
+ struct list_head *node;
+ struct list_head *_tmp;
+ list_for_each_safe(node, _tmp, &ser_list) {
+ ser = list_entry(node, struct ser_device, node);
+ dev_close(ser->dev);
+ unregister_netdevice(ser->dev);
+ list_del(node);
+ }
+ tty_unregister_ldisc(N_CAIF);
+ debugfs_remove_recursive(debugfsdir);
+}
+
+module_init(caif_ser_init);
+module_exit(caif_ser_exit);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 05b751719bd5..2c5227c02fa0 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -63,6 +63,16 @@ config CAN_BFIN
To compile this driver as a module, choose M here: the
module will be called bfin_can.
+config CAN_JANZ_ICAN3
+ tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
+ depends on CAN_DEV && MFD_JANZ_CMODIO
+ ---help---
+ Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
+ connects to a MODULbus carrier board.
+
+ This driver can also be built as a module. If so, the module will be
+ called janz-ican3.ko.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 7a702f28d01c..9047cd066fea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
+obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index a2f29a38798a..2d8bd86bc5e2 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -35,7 +35,6 @@
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -376,7 +375,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
at91_write(priv, AT91_MCR(mb), reg_mcr);
stats->tx_bytes += cf->can_dlc;
- dev->trans_start = jiffies;
/* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
@@ -662,7 +660,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
at91_poll_err_frame(dev, cf, reg_sr);
netif_receive_skb(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
@@ -899,7 +896,6 @@ static void at91_irq_err(struct net_device *dev)
at91_irq_err_state(dev, cf, new_state);
netif_rx(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 03489864376d..b6e890d28366 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -18,7 +18,6 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -270,8 +269,6 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fill data length code */
bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
- dev->trans_start = jiffies;
-
can_put_echo_skb(skb, dev, 0);
/* set transmit request */
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
new file mode 100644
index 000000000000..6e533dcc36c0
--- /dev/null
+++ b/drivers/net/can/janz-ican3.c
@@ -0,0 +1,1830 @@
+/*
+ * Janz MODULbus VMOD-ICAN3 CAN Interface Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include <linux/mfd/janz.h>
+
+/* the DPM has 64k of memory, organized into 256x 256 byte pages */
+#define DPM_NUM_PAGES 256
+#define DPM_PAGE_SIZE 256
+#define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE)
+
+/* JANZ ICAN3 "old-style" host interface queue page numbers */
+#define QUEUE_OLD_CONTROL 0
+#define QUEUE_OLD_RB0 1
+#define QUEUE_OLD_RB1 2
+#define QUEUE_OLD_WB0 3
+#define QUEUE_OLD_WB1 4
+
+/* Janz ICAN3 "old-style" host interface control registers */
+#define MSYNC_PEER 0x00 /* ICAN only */
+#define MSYNC_LOCL 0x01 /* host only */
+#define TARGET_RUNNING 0x02
+
+#define MSYNC_RB0 0x01
+#define MSYNC_RB1 0x02
+#define MSYNC_RBLW 0x04
+#define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1)
+
+#define MSYNC_WB0 0x10
+#define MSYNC_WB1 0x20
+#define MSYNC_WBLW 0x40
+#define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1)
+
+/* Janz ICAN3 "new-style" host interface queue page numbers */
+#define QUEUE_TOHOST 5
+#define QUEUE_FROMHOST_MID 6
+#define QUEUE_FROMHOST_HIGH 7
+#define QUEUE_FROMHOST_LOW 8
+
+/* The first free page in the DPM is #9 */
+#define DPM_FREE_START 9
+
+/* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */
+#define DESC_VALID 0x80
+#define DESC_WRAP 0x40
+#define DESC_INTERRUPT 0x20
+#define DESC_IVALID 0x10
+#define DESC_LEN(len) (len)
+
+/* Janz ICAN3 Firmware Messages */
+#define MSG_CONNECTI 0x02
+#define MSG_DISCONNECT 0x03
+#define MSG_IDVERS 0x04
+#define MSG_MSGLOST 0x05
+#define MSG_NEWHOSTIF 0x08
+#define MSG_INQUIRY 0x0a
+#define MSG_SETAFILMASK 0x10
+#define MSG_INITFDPMQUEUE 0x11
+#define MSG_HWCONF 0x12
+#define MSG_FMSGLOST 0x15
+#define MSG_CEVTIND 0x37
+#define MSG_CBTRREQ 0x41
+#define MSG_COFFREQ 0x42
+#define MSG_CONREQ 0x43
+#define MSG_CCONFREQ 0x47
+
+/*
+ * Janz ICAN3 CAN Inquiry Message Types
+ *
+ * NOTE: there appears to be a firmware bug here. You must send
+ * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED
+ * NOTE: response. The controller never responds to a message with
+ * NOTE: the INQUIRY_EXTENDED subspec :(
+ */
+#define INQUIRY_STATUS 0x00
+#define INQUIRY_TERMINATION 0x01
+#define INQUIRY_EXTENDED 0x04
+
+/* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */
+#define SETAFILMASK_REJECT 0x00
+#define SETAFILMASK_FASTIF 0x02
+
+/* Janz ICAN3 CAN Hardware Configuration Message Types */
+#define HWCONF_TERMINATE_ON 0x01
+#define HWCONF_TERMINATE_OFF 0x00
+
+/* Janz ICAN3 CAN Event Indication Message Types */
+#define CEVTIND_EI 0x01
+#define CEVTIND_DOI 0x02
+#define CEVTIND_LOST 0x04
+#define CEVTIND_FULL 0x08
+#define CEVTIND_BEI 0x10
+
+#define CEVTIND_CHIP_SJA1000 0x02
+
+#define ICAN3_BUSERR_QUOTA_MAX 255
+
+/* Janz ICAN3 CAN Frame Conversion */
+#define ICAN3_ECHO 0x10
+#define ICAN3_EFF_RTR 0x40
+#define ICAN3_SFF_RTR 0x10
+#define ICAN3_EFF 0x80
+
+#define ICAN3_CAN_TYPE_MASK 0x0f
+#define ICAN3_CAN_TYPE_SFF 0x00
+#define ICAN3_CAN_TYPE_EFF 0x01
+
+#define ICAN3_CAN_DLC_MASK 0x0f
+
+/*
+ * SJA1000 Status and Error Register Definitions
+ *
+ * Copied from drivers/net/can/sja1000/sja1000.h
+ */
+
+/* status register content */
+#define SR_BS 0x80
+#define SR_ES 0x40
+#define SR_TS 0x20
+#define SR_RS 0x10
+#define SR_TCS 0x08
+#define SR_TBS 0x04
+#define SR_DOS 0x02
+#define SR_RBS 0x01
+
+#define SR_CRIT (SR_BS|SR_ES)
+
+/* ECC register */
+#define ECC_SEG 0x1F
+#define ECC_DIR 0x20
+#define ECC_ERR 6
+#define ECC_BIT 0x00
+#define ECC_FORM 0x40
+#define ECC_STUFF 0x80
+#define ECC_MASK 0xc0
+
+/* Number of buffers for use in the "new-style" host interface */
+#define ICAN3_NEW_BUFFERS 16
+
+/* Number of buffers for use in the "fast" host interface */
+#define ICAN3_TX_BUFFERS 512
+#define ICAN3_RX_BUFFERS 1024
+
+/* SJA1000 Clock Input */
+#define ICAN3_CAN_CLOCK 8000000
+
+/* Driver Name */
+#define DRV_NAME "janz-ican3"
+
+/* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */
+struct ican3_dpm_control {
+ /* window address register */
+ u8 window_address;
+ u8 unused1;
+
+ /*
+ * Read access: clear interrupt from microcontroller
+ * Write access: send interrupt to microcontroller
+ */
+ u8 interrupt;
+ u8 unused2;
+
+ /* write-only: reset all hardware on the module */
+ u8 hwreset;
+ u8 unused3;
+
+ /* write-only: generate an interrupt to the TPU */
+ u8 tpuinterrupt;
+};
+
+struct ican3_dev {
+
+ /* must be the first member */
+ struct can_priv can;
+
+ /* CAN network device */
+ struct net_device *ndev;
+ struct napi_struct napi;
+
+ /* Device for printing */
+ struct device *dev;
+
+ /* module number */
+ unsigned int num;
+
+ /* base address of registers and IRQ */
+ struct janz_cmodio_onboard_regs __iomem *ctrl;
+ struct ican3_dpm_control __iomem *dpmctrl;
+ void __iomem *dpm;
+ int irq;
+
+ /* CAN bus termination status */
+ struct completion termination_comp;
+ bool termination_enabled;
+
+ /* CAN bus error status registers */
+ struct completion buserror_comp;
+ struct can_berr_counter bec;
+
+ /* old and new style host interface */
+ unsigned int iftype;
+
+ /*
+ * Any function which changes the current DPM page must hold this
+ * lock while it is performing data accesses. This ensures that the
+ * function will not be preempted and end up reading data from a
+ * different DPM page than it expects.
+ */
+ spinlock_t lock;
+
+ /* new host interface */
+ unsigned int rx_int;
+ unsigned int rx_num;
+ unsigned int tx_num;
+
+ /* fast host interface */
+ unsigned int fastrx_start;
+ unsigned int fastrx_int;
+ unsigned int fastrx_num;
+ unsigned int fasttx_start;
+ unsigned int fasttx_num;
+
+ /* first free DPM page */
+ unsigned int free_page;
+};
+
+struct ican3_msg {
+ u8 control;
+ u8 spec;
+ __le16 len;
+ u8 data[252];
+};
+
+struct ican3_new_desc {
+ u8 control;
+ u8 pointer;
+};
+
+struct ican3_fast_desc {
+ u8 control;
+ u8 command;
+ u8 data[14];
+};
+
+/* write to the window basic address register */
+static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page)
+{
+ BUG_ON(page >= DPM_NUM_PAGES);
+ iowrite8(page, &mod->dpmctrl->window_address);
+}
+
+/*
+ * ICAN3 "old-style" host interface
+ */
+
+/*
+ * Recieve a message from the ICAN3 "old-style" firmware interface
+ *
+ * LOCKING: must hold mod->lock
+ *
+ * returns 0 on success, -ENOMEM when no message exists
+ */
+static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned int mbox, mbox_page;
+ u8 locl, peer, xord;
+
+ /* get the MSYNC registers */
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ peer = ioread8(mod->dpm + MSYNC_PEER);
+ locl = ioread8(mod->dpm + MSYNC_LOCL);
+ xord = locl ^ peer;
+
+ if ((xord & MSYNC_RB_MASK) == 0x00) {
+ dev_dbg(mod->dev, "no mbox for reading\n");
+ return -ENOMEM;
+ }
+
+ /* find the first free mbox to read */
+ if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK)
+ mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1;
+ else
+ mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1;
+
+ /* copy the message */
+ mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1;
+ ican3_set_page(mod, mbox_page);
+ memcpy_fromio(msg, mod->dpm, sizeof(*msg));
+
+ /*
+ * notify the firmware that the read buffer is available
+ * for it to fill again
+ */
+ locl ^= mbox;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ iowrite8(locl, mod->dpm + MSYNC_LOCL);
+ return 0;
+}
+
+/*
+ * Send a message through the "old-style" firmware interface
+ *
+ * LOCKING: must hold mod->lock
+ *
+ * returns 0 on success, -ENOMEM when no free space exists
+ */
+static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned int mbox, mbox_page;
+ u8 locl, peer, xord;
+
+ /* get the MSYNC registers */
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ peer = ioread8(mod->dpm + MSYNC_PEER);
+ locl = ioread8(mod->dpm + MSYNC_LOCL);
+ xord = locl ^ peer;
+
+ if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
+ dev_err(mod->dev, "no mbox for writing\n");
+ return -ENOMEM;
+ }
+
+ /* calculate a free mbox to use */
+ mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0;
+
+ /* copy the message to the DPM */
+ mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1;
+ ican3_set_page(mod, mbox_page);
+ memcpy_toio(mod->dpm, msg, sizeof(*msg));
+
+ locl ^= mbox;
+ if (mbox == MSYNC_WB1)
+ locl |= MSYNC_WBLW;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ iowrite8(locl, mod->dpm + MSYNC_LOCL);
+ return 0;
+}
+
+/*
+ * ICAN3 "new-style" Host Interface Setup
+ */
+
+static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod)
+{
+ struct ican3_new_desc desc;
+ unsigned long flags;
+ void __iomem *dst;
+ int i;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* setup the internal datastructures for RX */
+ mod->rx_num = 0;
+ mod->rx_int = 0;
+
+ /* tohost queue descriptors are in page 5 */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ dst = mod->dpm;
+
+ /* initialize the tohost (rx) queue descriptors: pages 9-24 */
+ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
+ desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */
+ desc.pointer = mod->free_page;
+
+ /* set wrap flag on last buffer */
+ if (i == ICAN3_NEW_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ memcpy_toio(dst, &desc, sizeof(desc));
+ dst += sizeof(desc);
+ mod->free_page++;
+ }
+
+ /* fromhost (tx) mid queue descriptors are in page 6 */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ dst = mod->dpm;
+
+ /* setup the internal datastructures for TX */
+ mod->tx_num = 0;
+
+ /* initialize the fromhost mid queue descriptors: pages 25-40 */
+ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
+ desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */
+ desc.pointer = mod->free_page;
+
+ /* set wrap flag on last buffer */
+ if (i == ICAN3_NEW_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ memcpy_toio(dst, &desc, sizeof(desc));
+ dst += sizeof(desc);
+ mod->free_page++;
+ }
+
+ /* fromhost hi queue descriptors are in page 7 */
+ ican3_set_page(mod, QUEUE_FROMHOST_HIGH);
+ dst = mod->dpm;
+
+ /* initialize only a single buffer in the fromhost hi queue (unused) */
+ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
+ desc.pointer = mod->free_page;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ mod->free_page++;
+
+ /* fromhost low queue descriptors are in page 8 */
+ ican3_set_page(mod, QUEUE_FROMHOST_LOW);
+ dst = mod->dpm;
+
+ /* initialize only a single buffer in the fromhost low queue (unused) */
+ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
+ desc.pointer = mod->free_page;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ mod->free_page++;
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+}
+
+/*
+ * ICAN3 Fast Host Interface Setup
+ */
+
+static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod)
+{
+ struct ican3_fast_desc desc;
+ unsigned long flags;
+ unsigned int addr;
+ void __iomem *dst;
+ int i;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* save the start recv page */
+ mod->fastrx_start = mod->free_page;
+ mod->fastrx_num = 0;
+ mod->fastrx_int = 0;
+
+ /* build a single fast tohost queue descriptor */
+ memset(&desc, 0, sizeof(desc));
+ desc.control = 0x00;
+ desc.command = 1;
+
+ /* build the tohost queue descriptor ring in memory */
+ addr = 0;
+ for (i = 0; i < ICAN3_RX_BUFFERS; i++) {
+
+ /* set the wrap bit on the last buffer */
+ if (i == ICAN3_RX_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ /* switch to the correct page */
+ ican3_set_page(mod, mod->free_page);
+
+ /* copy the descriptor to the DPM */
+ dst = mod->dpm + addr;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ addr += sizeof(desc);
+
+ /* move to the next page if necessary */
+ if (addr >= DPM_PAGE_SIZE) {
+ addr = 0;
+ mod->free_page++;
+ }
+ }
+
+ /* make sure we page-align the next queue */
+ if (addr != 0)
+ mod->free_page++;
+
+ /* save the start xmit page */
+ mod->fasttx_start = mod->free_page;
+ mod->fasttx_num = 0;
+
+ /* build a single fast fromhost queue descriptor */
+ memset(&desc, 0, sizeof(desc));
+ desc.control = DESC_VALID;
+ desc.command = 1;
+
+ /* build the fromhost queue descriptor ring in memory */
+ addr = 0;
+ for (i = 0; i < ICAN3_TX_BUFFERS; i++) {
+
+ /* set the wrap bit on the last buffer */
+ if (i == ICAN3_TX_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ /* switch to the correct page */
+ ican3_set_page(mod, mod->free_page);
+
+ /* copy the descriptor to the DPM */
+ dst = mod->dpm + addr;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ addr += sizeof(desc);
+
+ /* move to the next page if necessary */
+ if (addr >= DPM_PAGE_SIZE) {
+ addr = 0;
+ mod->free_page++;
+ }
+ }
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+}
+
+/*
+ * ICAN3 "new-style" Host Interface Message Helpers
+ */
+
+/*
+ * LOCKING: must hold mod->lock
+ */
+static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct ican3_new_desc desc;
+ void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc));
+
+ /* switch to the fromhost mid queue, and read the buffer descriptor */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ if (!(desc.control & DESC_VALID)) {
+ dev_dbg(mod->dev, "%s: no free buffers\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* switch to the data page, copy the data */
+ ican3_set_page(mod, desc.pointer);
+ memcpy_toio(mod->dpm, msg, sizeof(*msg));
+
+ /* switch back to the descriptor, set the valid bit, write it back */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the tx number */
+ mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1);
+ return 0;
+}
+
+/*
+ * LOCKING: must hold mod->lock
+ */
+static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct ican3_new_desc desc;
+ void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc));
+
+ /* switch to the tohost queue, and read the buffer descriptor */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ if (!(desc.control & DESC_VALID)) {
+ dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* switch to the data page, copy the data */
+ ican3_set_page(mod, desc.pointer);
+ memcpy_fromio(msg, mod->dpm, sizeof(*msg));
+
+ /* switch back to the descriptor, toggle the valid bit, write it back */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the rx number */
+ mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1);
+ return 0;
+}
+
+/*
+ * Message Send / Recv Helpers
+ */
+
+static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ if (mod->iftype == 0)
+ ret = ican3_old_send_msg(mod, msg);
+ else
+ ret = ican3_new_send_msg(mod, msg);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return ret;
+}
+
+static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ if (mod->iftype == 0)
+ ret = ican3_old_recv_msg(mod, msg);
+ else
+ ret = ican3_new_recv_msg(mod, msg);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return ret;
+}
+
+/*
+ * Quick Pre-constructed Messages
+ */
+
+static int __devinit ican3_msg_connect(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CONNECTI;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int __devexit ican3_msg_disconnect(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_DISCONNECT;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int __devinit ican3_msg_newhostif(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_NEWHOSTIF;
+ msg.len = cpu_to_le16(0);
+
+ /* If we're not using the old interface, switching seems bogus */
+ WARN_ON(mod->iftype != 0);
+
+ ret = ican3_send_msg(mod, &msg);
+ if (ret)
+ return ret;
+
+ /* mark the module as using the new host interface */
+ mod->iftype = 1;
+ return 0;
+}
+
+static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+ unsigned int addr;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_INITFDPMQUEUE;
+ msg.len = cpu_to_le16(8);
+
+ /* write the tohost queue start address */
+ addr = DPM_PAGE_ADDR(mod->fastrx_start);
+ msg.data[0] = addr & 0xff;
+ msg.data[1] = (addr >> 8) & 0xff;
+ msg.data[2] = (addr >> 16) & 0xff;
+ msg.data[3] = (addr >> 24) & 0xff;
+
+ /* write the fromhost queue start address */
+ addr = DPM_PAGE_ADDR(mod->fasttx_start);
+ msg.data[4] = addr & 0xff;
+ msg.data[5] = (addr >> 8) & 0xff;
+ msg.data[6] = (addr >> 16) & 0xff;
+ msg.data[7] = (addr >> 24) & 0xff;
+
+ /* If we're not using the new interface yet, we cannot do this */
+ WARN_ON(mod->iftype != 1);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * Setup the CAN filter to either accept or reject all
+ * messages from the CAN bus.
+ */
+static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept)
+{
+ struct ican3_msg msg;
+ int ret;
+
+ /* Standard Frame Format */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_SETAFILMASK;
+ msg.len = cpu_to_le16(5);
+ msg.data[0] = 0x00; /* IDLo LSB */
+ msg.data[1] = 0x00; /* IDLo MSB */
+ msg.data[2] = 0xff; /* IDHi LSB */
+ msg.data[3] = 0x07; /* IDHi MSB */
+
+ /* accept all frames for fast host if, or reject all frames */
+ msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
+
+ ret = ican3_send_msg(mod, &msg);
+ if (ret)
+ return ret;
+
+ /* Extended Frame Format */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_SETAFILMASK;
+ msg.len = cpu_to_le16(13);
+ msg.data[0] = 0; /* MUX = 0 */
+ msg.data[1] = 0x00; /* IDLo LSB */
+ msg.data[2] = 0x00;
+ msg.data[3] = 0x00;
+ msg.data[4] = 0x20; /* IDLo MSB */
+ msg.data[5] = 0xff; /* IDHi LSB */
+ msg.data[6] = 0xff;
+ msg.data[7] = 0xff;
+ msg.data[8] = 0x3f; /* IDHi MSB */
+
+ /* accept all frames for fast host if, or reject all frames */
+ msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * Bring the CAN bus online or offline
+ */
+static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_termination(struct ican3_dev *mod, bool on)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_HWCONF;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 0x00;
+ msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_INQUIRY;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = subspec;
+ msg.data[1] = 0x00;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_buserror(struct ican3_dev *mod, u8 quota)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CCONFREQ;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 0x00;
+ msg.data[1] = quota;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * ICAN3 to Linux CAN Frame Conversion
+ */
+
+static void ican3_to_can_frame(struct ican3_dev *mod,
+ struct ican3_fast_desc *desc,
+ struct can_frame *cf)
+{
+ if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) {
+ if (desc->data[1] & ICAN3_SFF_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+
+ cf->can_id |= desc->data[0] << 3;
+ cf->can_id |= (desc->data[1] & 0xe0) >> 5;
+ cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK;
+ memcpy(cf->data, &desc->data[2], sizeof(cf->data));
+ } else {
+ cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK;
+ if (desc->data[0] & ICAN3_EFF_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+
+ if (desc->data[0] & ICAN3_EFF) {
+ cf->can_id |= CAN_EFF_FLAG;
+ cf->can_id |= desc->data[2] << 21; /* 28-21 */
+ cf->can_id |= desc->data[3] << 13; /* 20-13 */
+ cf->can_id |= desc->data[4] << 5; /* 12-5 */
+ cf->can_id |= (desc->data[5] & 0xf8) >> 3;
+ } else {
+ cf->can_id |= desc->data[2] << 3; /* 10-3 */
+ cf->can_id |= desc->data[3] >> 5; /* 2-0 */
+ }
+
+ memcpy(cf->data, &desc->data[6], sizeof(cf->data));
+ }
+}
+
+static void can_frame_to_ican3(struct ican3_dev *mod,
+ struct can_frame *cf,
+ struct ican3_fast_desc *desc)
+{
+ /* clear out any stale data in the descriptor */
+ memset(desc->data, 0, sizeof(desc->data));
+
+ /* we always use the extended format, with the ECHO flag set */
+ desc->command = ICAN3_CAN_TYPE_EFF;
+ desc->data[0] |= cf->can_dlc;
+ desc->data[1] |= ICAN3_ECHO;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ desc->data[0] |= ICAN3_EFF_RTR;
+
+ /* pack the id into the correct places */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ desc->data[0] |= ICAN3_EFF;
+ desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */
+ desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */
+ desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */
+ desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */
+ } else {
+ desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */
+ desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */
+ }
+
+ /* copy the data bits into the descriptor */
+ memcpy(&desc->data[6], cf->data, sizeof(cf->data));
+}
+
+/*
+ * Interrupt Handling
+ */
+
+/*
+ * Handle an ID + Version message response from the firmware. We never generate
+ * this message in production code, but it is very useful when debugging to be
+ * able to display this message.
+ */
+static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data);
+}
+
+static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct net_device *dev = mod->ndev;
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /*
+ * Report that communication messages with the microcontroller firmware
+ * are being lost. These are never CAN frames, so we do not generate an
+ * error frame for userspace
+ */
+ if (msg->spec == MSG_MSGLOST) {
+ dev_err(mod->dev, "lost %d control messages\n", msg->data[0]);
+ return;
+ }
+
+ /*
+ * Oops, this indicates that we have lost messages in the fast queue,
+ * which are exclusively CAN messages. Our driver isn't reading CAN
+ * frames fast enough.
+ *
+ * We'll pretend that the SJA1000 told us that it ran out of buffer
+ * space, because there is not a better message for this.
+ */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_errors++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/*
+ * Handle CAN Event Indication Messages from the firmware
+ *
+ * The ICAN3 firmware provides the values of some SJA1000 registers when it
+ * generates this message. The code below is largely copied from the
+ * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary
+ */
+static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct net_device *dev = mod->ndev;
+ struct net_device_stats *stats = &dev->stats;
+ enum can_state state = mod->can.state;
+ u8 status, isrc, rxerr, txerr;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* we can only handle the SJA1000 part */
+ if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
+ dev_err(mod->dev, "unable to handle errors on non-SJA1000\n");
+ return -ENODEV;
+ }
+
+ /* check the message length for sanity */
+ if (le16_to_cpu(msg->len) < 6) {
+ dev_err(mod->dev, "error message too short\n");
+ return -EINVAL;
+ }
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ isrc = msg->data[0];
+ status = msg->data[3];
+ rxerr = msg->data[4];
+ txerr = msg->data[5];
+
+ /* data overrun interrupt */
+ if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
+ dev_dbg(mod->dev, "data overrun interrupt\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ /* error warning + passive interrupt */
+ if (isrc == CEVTIND_EI) {
+ dev_dbg(mod->dev, "error warning + passive interrupt\n");
+ if (status & SR_BS) {
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(dev);
+ } else if (status & SR_ES) {
+ if (rxerr >= 128 || txerr >= 128)
+ state = CAN_STATE_ERROR_PASSIVE;
+ else
+ state = CAN_STATE_ERROR_WARNING;
+ } else {
+ state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ /* bus error interrupt */
+ if (isrc == CEVTIND_BEI) {
+ u8 ecc = msg->data[2];
+
+ dev_dbg(mod->dev, "bus error interrupt\n");
+ mod->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (ecc & ECC_MASK) {
+ case ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = ecc & ECC_SEG;
+ break;
+ }
+
+ if ((ecc & ECC_DIR) == 0)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
+ state == CAN_STATE_ERROR_PASSIVE)) {
+ cf->can_id |= CAN_ERR_CRTL;
+ if (state == CAN_STATE_ERROR_WARNING) {
+ mod->can.can_stats.error_warning++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ } else {
+ mod->can.can_stats.error_passive++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ mod->can.state = state;
+ stats->rx_errors++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ return 0;
+}
+
+static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ switch (msg->data[0]) {
+ case INQUIRY_STATUS:
+ case INQUIRY_EXTENDED:
+ mod->bec.rxerr = msg->data[5];
+ mod->bec.txerr = msg->data[6];
+ complete(&mod->buserror_comp);
+ break;
+ case INQUIRY_TERMINATION:
+ mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON;
+ complete(&mod->termination_comp);
+ break;
+ default:
+ dev_err(mod->dev, "recieved an unknown inquiry response\n");
+ break;
+ }
+}
+
+static void ican3_handle_unknown_message(struct ican3_dev *mod,
+ struct ican3_msg *msg)
+{
+ dev_warn(mod->dev, "recieved unknown message: spec 0x%.2x length %d\n",
+ msg->spec, le16_to_cpu(msg->len));
+}
+
+/*
+ * Handle a control message from the firmware
+ */
+static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
+ mod->num, msg->spec, le16_to_cpu(msg->len));
+
+ switch (msg->spec) {
+ case MSG_IDVERS:
+ ican3_handle_idvers(mod, msg);
+ break;
+ case MSG_MSGLOST:
+ case MSG_FMSGLOST:
+ ican3_handle_msglost(mod, msg);
+ break;
+ case MSG_CEVTIND:
+ ican3_handle_cevtind(mod, msg);
+ break;
+ case MSG_INQUIRY:
+ ican3_handle_inquiry(mod, msg);
+ break;
+ default:
+ ican3_handle_unknown_message(mod, msg);
+ break;
+ }
+}
+
+/*
+ * Check that there is room in the TX ring to transmit another skb
+ *
+ * LOCKING: must hold mod->lock
+ */
+static bool ican3_txok(struct ican3_dev *mod)
+{
+ struct ican3_fast_desc __iomem *desc;
+ u8 control;
+
+ /* copy the control bits of the descriptor */
+ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
+ desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc));
+ control = ioread8(&desc->control);
+
+ /* if the control bits are not valid, then we have no more space */
+ if (!(control & DESC_VALID))
+ return false;
+
+ return true;
+}
+
+/*
+ * Recieve one CAN frame from the hardware
+ *
+ * This works like the core of a NAPI function, but is intended to be called
+ * from workqueue context instead. This driver already needs a workqueue to
+ * process control messages, so we use the workqueue instead of using NAPI.
+ * This was done to simplify locking.
+ *
+ * CONTEXT: must be called from user context
+ */
+static int ican3_recv_skb(struct ican3_dev *mod)
+{
+ struct net_device *ndev = mod->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct ican3_fast_desc desc;
+ void __iomem *desc_addr;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* copy the whole descriptor */
+ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
+ desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc));
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+
+ /* check that we actually have a CAN frame */
+ if (!(desc.control & DESC_VALID))
+ return -ENOBUFS;
+
+ /* allocate an skb */
+ skb = alloc_can_skb(ndev, &cf);
+ if (unlikely(skb == NULL)) {
+ stats->rx_dropped++;
+ goto err_noalloc;
+ }
+
+ /* convert the ICAN3 frame into Linux CAN format */
+ ican3_to_can_frame(mod, &desc, cf);
+
+ /* receive the skb, update statistics */
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+err_noalloc:
+ /* toggle the valid bit and return the descriptor to the ring */
+ desc.control ^= DESC_VALID;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
+ memcpy_toio(desc_addr, &desc, 1);
+
+ /* update the next buffer pointer */
+ mod->fastrx_num = (desc.control & DESC_WRAP) ? 0
+ : (mod->fastrx_num + 1);
+
+ /* there are still more buffers to process */
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return 0;
+}
+
+static int ican3_napi(struct napi_struct *napi, int budget)
+{
+ struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi);
+ struct ican3_msg msg;
+ unsigned long flags;
+ int received = 0;
+ int ret;
+
+ /* process all communication messages */
+ while (true) {
+ ret = ican3_recv_msg(mod, &msg);
+ if (ret)
+ break;
+
+ ican3_handle_message(mod, &msg);
+ }
+
+ /* process all CAN frames from the fast interface */
+ while (received < budget) {
+ ret = ican3_recv_skb(mod);
+ if (ret)
+ break;
+
+ received++;
+ }
+
+ /* We have processed all packets that the adapter had, but it
+ * was less than our budget, stop polling */
+ if (received < budget)
+ napi_complete(napi);
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* Wake up the transmit queue if necessary */
+ if (netif_queue_stopped(mod->ndev) && ican3_txok(mod))
+ netif_wake_queue(mod->ndev);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+
+ /* re-enable interrupt generation */
+ iowrite8(1 << mod->num, &mod->ctrl->int_enable);
+ return received;
+}
+
+static irqreturn_t ican3_irq(int irq, void *dev_id)
+{
+ struct ican3_dev *mod = dev_id;
+ u8 stat;
+
+ /*
+ * The interrupt status register on this device reports interrupts
+ * as zeroes instead of using ones like most other devices
+ */
+ stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num);
+ if (stat == (1 << mod->num))
+ return IRQ_NONE;
+
+ /* clear the MODULbus interrupt from the microcontroller */
+ ioread8(&mod->dpmctrl->interrupt);
+
+ /* disable interrupt generation, schedule the NAPI poller */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ napi_schedule(&mod->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Firmware reset, startup, and shutdown
+ */
+
+/*
+ * Reset an ICAN module to its power-on state
+ *
+ * CONTEXT: no network device registered
+ * LOCKING: work function disabled
+ */
+static int ican3_reset_module(struct ican3_dev *mod)
+{
+ u8 val = 1 << mod->num;
+ unsigned long start;
+ u8 runold, runnew;
+
+ /* disable interrupts so no more work is scheduled */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+
+ /* flush any pending work */
+ flush_scheduled_work();
+
+ /* the first unallocated page in the DPM is #9 */
+ mod->free_page = DPM_FREE_START;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ runold = ioread8(mod->dpm + TARGET_RUNNING);
+
+ /* reset the module */
+ iowrite8(val, &mod->ctrl->reset_assert);
+ iowrite8(val, &mod->ctrl->reset_deassert);
+
+ /* wait until the module has finished resetting and is running */
+ start = jiffies;
+ do {
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ runnew = ioread8(mod->dpm + TARGET_RUNNING);
+ if (runnew == (runold ^ 0xff))
+ return 0;
+
+ msleep(10);
+ } while (time_before(jiffies, start + HZ / 4));
+
+ dev_err(mod->dev, "failed to reset CAN module\n");
+ return -ETIMEDOUT;
+}
+
+static void __devexit ican3_shutdown_module(struct ican3_dev *mod)
+{
+ ican3_msg_disconnect(mod);
+ ican3_reset_module(mod);
+}
+
+/*
+ * Startup an ICAN module, bringing it into fast mode
+ */
+static int __devinit ican3_startup_module(struct ican3_dev *mod)
+{
+ int ret;
+
+ ret = ican3_reset_module(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to reset module\n");
+ return ret;
+ }
+
+ /* re-enable interrupts so we can send messages */
+ iowrite8(1 << mod->num, &mod->ctrl->int_enable);
+
+ ret = ican3_msg_connect(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to connect to module\n");
+ return ret;
+ }
+
+ ican3_init_new_host_interface(mod);
+ ret = ican3_msg_newhostif(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to switch to new-style interface\n");
+ return ret;
+ }
+
+ /* default to "termination on" */
+ ret = ican3_set_termination(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to enable termination\n");
+ return ret;
+ }
+
+ /* default to "bus errors enabled" */
+ ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-error\n");
+ return ret;
+ }
+
+ ican3_init_fast_host_interface(mod);
+ ret = ican3_msg_fasthostif(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to switch to fast host interface\n");
+ return ret;
+ }
+
+ ret = ican3_set_id_filter(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set acceptance filter\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * CAN Network Device
+ */
+
+static int ican3_open(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ u8 quota;
+ int ret;
+
+ /* open the CAN layer */
+ ret = open_candev(ndev);
+ if (ret) {
+ dev_err(mod->dev, "unable to start CAN layer\n");
+ return ret;
+ }
+
+ /* set the bus error generation state appropriately */
+ if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ quota = ICAN3_BUSERR_QUOTA_MAX;
+ else
+ quota = 0;
+
+ ret = ican3_set_buserror(mod, quota);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-error\n");
+ close_candev(ndev);
+ return ret;
+ }
+
+ /* bring the bus online */
+ ret = ican3_set_bus_state(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-on\n");
+ close_candev(ndev);
+ return ret;
+ }
+
+ /* start up the network device */
+ mod->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int ican3_stop(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ /* stop the network device xmit routine */
+ netif_stop_queue(ndev);
+ mod->can.state = CAN_STATE_STOPPED;
+
+ /* bring the bus offline, stop receiving packets */
+ ret = ican3_set_bus_state(mod, false);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-off\n");
+ return ret;
+ }
+
+ /* close the CAN layer */
+ close_candev(ndev);
+ return 0;
+}
+
+static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct ican3_fast_desc desc;
+ void __iomem *desc_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* check that we can actually transmit */
+ if (!ican3_txok(mod)) {
+ dev_err(mod->dev, "no free descriptors, stopping queue\n");
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* copy the control bits of the descriptor */
+ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
+ desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc));
+ memset(&desc, 0, sizeof(desc));
+ memcpy_fromio(&desc, desc_addr, 1);
+
+ /* convert the Linux CAN frame into ICAN3 format */
+ can_frame_to_ican3(mod, cf, &desc);
+
+ /*
+ * the programming manual says that you must set the IVALID bit, then
+ * interrupt, then set the valid bit. Quite weird, but it seems to be
+ * required for this to work
+ */
+ desc.control |= DESC_IVALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* generate a MODULbus interrupt to the microcontroller */
+ iowrite8(0x01, &mod->dpmctrl->interrupt);
+
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the next buffer pointer */
+ mod->fasttx_num = (desc.control & DESC_WRAP) ? 0
+ : (mod->fasttx_num + 1);
+
+ /* update statistics */
+ stats->tx_packets++;
+ stats->tx_bytes += cf->can_dlc;
+ kfree_skb(skb);
+
+ /*
+ * This hardware doesn't have TX-done notifications, so we'll try and
+ * emulate it the best we can using ECHO skbs. Get the next TX
+ * descriptor, and see if we have room to send. If not, stop the queue.
+ * It will be woken when the ECHO skb for the current packet is recv'd.
+ */
+
+ /* copy the control bits of the descriptor */
+ if (!ican3_txok(mod))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ican3_netdev_ops = {
+ .ndo_open = ican3_open,
+ .ndo_stop = ican3_stop,
+ .ndo_start_xmit = ican3_xmit,
+};
+
+/*
+ * Low-level CAN Device
+ */
+
+/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */
+static struct can_bittiming_const ican3_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+/*
+ * This routine was stolen from drivers/net/can/sja1000/sja1000.c
+ *
+ * The bittiming register command for the ICAN3 just sets the bit timing
+ * registers on the SJA1000 chip directly
+ */
+static int ican3_set_bittiming(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ struct can_bittiming *bt = &mod->can.bittiming;
+ struct ican3_msg msg;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CBTRREQ;
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = 0x00;
+ msg.data[1] = 0x00;
+ msg.data[2] = btr0;
+ msg.data[3] = btr1;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ if (mode != CAN_MODE_START)
+ return -ENOTSUPP;
+
+ /* bring the bus online */
+ ret = ican3_set_bus_state(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-on\n");
+ return ret;
+ }
+
+ /* start up the network device */
+ mod->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+static int ican3_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ ret = ican3_send_inquiry(mod, INQUIRY_STATUS);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
+ if (ret <= 0) {
+ dev_info(mod->dev, "%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ bec->rxerr = mod->bec.rxerr;
+ bec->txerr = mod->bec.txerr;
+ return 0;
+}
+
+/*
+ * Sysfs Attributes
+ */
+
+static ssize_t ican3_sysfs_show_term(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+ int ret;
+
+ ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
+ if (ret <= 0) {
+ dev_info(mod->dev, "%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
+}
+
+static ssize_t ican3_sysfs_set_term(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+ unsigned long enable;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &enable))
+ return -EINVAL;
+
+ ret = ican3_set_termination(mod, enable);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
+ ican3_sysfs_set_term);
+
+static struct attribute *ican3_sysfs_attrs[] = {
+ &dev_attr_termination.attr,
+ NULL,
+};
+
+static struct attribute_group ican3_sysfs_attr_group = {
+ .attrs = ican3_sysfs_attrs,
+};
+
+/*
+ * PCI Subsystem
+ */
+
+static int __devinit ican3_probe(struct platform_device *pdev)
+{
+ struct janz_platform_data *pdata;
+ struct net_device *ndev;
+ struct ican3_dev *mod;
+ struct resource *res;
+ struct device *dev;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -ENXIO;
+
+ dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno);
+
+ /* save the struct device for printing */
+ dev = &pdev->dev;
+
+ /* allocate the CAN device and private data */
+ ndev = alloc_candev(sizeof(*mod), 0);
+ if (!ndev) {
+ dev_err(dev, "unable to allocate CANdev\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ mod = netdev_priv(ndev);
+ mod->ndev = ndev;
+ mod->dev = &pdev->dev;
+ mod->num = pdata->modno;
+ netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ spin_lock_init(&mod->lock);
+ init_completion(&mod->termination_comp);
+ init_completion(&mod->buserror_comp);
+
+ /* setup device-specific sysfs attributes */
+ ndev->sysfs_groups[0] = &ican3_sysfs_attr_group;
+
+ /* the first unallocated page in the DPM is 9 */
+ mod->free_page = DPM_FREE_START;
+
+ ndev->netdev_ops = &ican3_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ mod->can.clock.freq = ICAN3_CAN_CLOCK;
+ mod->can.bittiming_const = &ican3_bittiming_const;
+ mod->can.do_set_bittiming = ican3_set_bittiming;
+ mod->can.do_set_mode = ican3_set_mode;
+ mod->can.do_get_berr_counter = ican3_get_berr_counter;
+ mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
+ | CAN_CTRLMODE_BERR_REPORTING;
+
+ /* find our IRQ number */
+ mod->irq = platform_get_irq(pdev, 0);
+ if (mod->irq < 0) {
+ dev_err(dev, "IRQ line not found\n");
+ ret = -ENODEV;
+ goto out_free_ndev;
+ }
+
+ ndev->irq = mod->irq;
+
+ /* get access to the MODULbus registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "MODULbus registers not found\n");
+ ret = -ENODEV;
+ goto out_free_ndev;
+ }
+
+ mod->dpm = ioremap(res->start, resource_size(res));
+ if (!mod->dpm) {
+ dev_err(dev, "MODULbus registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_free_ndev;
+ }
+
+ mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE;
+
+ /* get access to the control registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "CONTROL registers not found\n");
+ ret = -ENODEV;
+ goto out_iounmap_dpm;
+ }
+
+ mod->ctrl = ioremap(res->start, resource_size(res));
+ if (!mod->ctrl) {
+ dev_err(dev, "CONTROL registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_iounmap_dpm;
+ }
+
+ /* disable our IRQ, then hookup the IRQ handler */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod);
+ if (ret) {
+ dev_err(dev, "unable to request IRQ\n");
+ goto out_iounmap_ctrl;
+ }
+
+ /* reset and initialize the CAN controller into fast mode */
+ napi_enable(&mod->napi);
+ ret = ican3_startup_module(mod);
+ if (ret) {
+ dev_err(dev, "%s: unable to start CANdev\n", __func__);
+ goto out_free_irq;
+ }
+
+ /* register with the Linux CAN layer */
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "%s: unable to register CANdev\n", __func__);
+ goto out_free_irq;
+ }
+
+ dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
+ return 0;
+
+out_free_irq:
+ napi_disable(&mod->napi);
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ free_irq(mod->irq, mod);
+out_iounmap_ctrl:
+ iounmap(mod->ctrl);
+out_iounmap_dpm:
+ iounmap(mod->dpm);
+out_free_ndev:
+ free_candev(ndev);
+out_return:
+ return ret;
+}
+
+static int __devexit ican3_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ican3_dev *mod = netdev_priv(ndev);
+
+ /* unregister the netdevice, stop interrupts */
+ unregister_netdev(ndev);
+ napi_disable(&mod->napi);
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ free_irq(mod->irq, mod);
+
+ /* put the module into reset */
+ ican3_shutdown_module(mod);
+
+ /* unmap all registers */
+ iounmap(mod->ctrl);
+ iounmap(mod->dpm);
+
+ free_candev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver ican3_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ican3_probe,
+ .remove = __devexit_p(ican3_remove),
+};
+
+static int __init ican3_init(void)
+{
+ return platform_driver_register(&ican3_driver);
+}
+
+static void __exit ican3_exit(void)
+{
+ platform_driver_unregister(&ican3_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:janz-ican3");
+
+module_init(ican3_init);
+module_exit(ican3_exit);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index b39b108318b4..b11a0cb5ed81 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -58,7 +58,6 @@
*
*/
-#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/platform/mcp251x.h>
@@ -476,7 +475,6 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
netif_stop_queue(net);
priv->tx_skb = skb;
- net->trans_start = jiffies;
queue_work(priv->wq, &priv->tx_work);
return NETDEV_TX_OK;
@@ -923,12 +921,16 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
struct net_device *net;
struct mcp251x_priv *priv;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ int model = spi_get_device_id(spi)->driver_data;
int ret = -ENODEV;
if (!pdata)
/* Platform data is required for osc freq */
goto error_out;
+ if (model)
+ pdata->model = model;
+
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
if (!net) {
@@ -1118,6 +1120,15 @@ static int mcp251x_can_resume(struct spi_device *spi)
#define mcp251x_can_resume NULL
#endif
+static struct spi_device_id mcp251x_id_table[] = {
+ { "mcp251x", 0 /* Use pdata.model */ },
+ { "mcp2510", CAN_MCP251X_MCP2510 },
+ { "mcp2515", CAN_MCP251X_MCP2515 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
@@ -1125,6 +1136,7 @@ static struct spi_driver mcp251x_can_driver = {
.owner = THIS_MODULE,
},
+ .id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = __devexit_p(mcp251x_can_remove),
.suspend = mcp251x_can_suspend,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 03e7c48465a2..225fd147774a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -25,7 +25,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 6b7dd578d417..64c378cd0c34 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -28,7 +28,6 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/list.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 9e277d64a318..ae3505afd682 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -53,7 +53,9 @@ config CAN_PLX_PCI
Driver supports now:
- Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
- Adlink PCI-7841/cPCI-7841 SE card
+ - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/)
+ - esd CAN-PCI/PMC/266
+ - esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
-
endif
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 5f53da0bc40c..36f4f9780c30 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -24,7 +24,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 441e776a7f59..ed004cebd31f 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -36,7 +36,6 @@
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 4aff4070db96..437b5c716a24 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -27,7 +27,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
@@ -41,7 +40,10 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"Adlink PCI-7841/cPCI-7841 SE, "
"Marathon CAN-bus-PCI, "
- "TEWS TECHNOLOGIES TPMC810");
+ "TEWS TECHNOLOGIES TPMC810, "
+ "esd CAN-PCI/CPCI/PCI104/200, "
+ "esd CAN-PCI/PMC/266, "
+ "esd CAN-PCIe/2000")
MODULE_LICENSE("GPL v2");
#define PLX_PCI_MAX_CHAN 2
@@ -50,11 +52,14 @@ struct plx_pci_card {
int channels; /* detected channels count */
struct net_device *net_dev[PLX_PCI_MAX_CHAN];
void __iomem *conf_addr;
+
+ /* Pointer to device-dependent reset function */
+ void (*reset_func)(struct pci_dev *pdev);
};
#define PLX_PCI_CAN_CLOCK (16000000 / 2)
-/* PLX90xx registers */
+/* PLX9030/9050/9052 registers */
#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
* Serial EEPROM, and Initialization
@@ -66,6 +71,14 @@ struct plx_pci_card {
#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
+/* PLX9056 registers */
+#define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */
+#define PLX9056_CNTRL 0x6c /* Control / Software Reset */
+
+#define PLX9056_LINTI (1 << 11)
+#define PLX9056_PCI_INT_EN (1 << 8)
+#define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */
+
/*
* The board configuration is probably following:
* RX1 is connected to ground.
@@ -101,6 +114,13 @@ struct plx_pci_card {
#define ADLINK_PCI_VENDOR_ID 0x144A
#define ADLINK_PCI_DEVICE_ID 0x7841
+#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004
+#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009
+#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e
+#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b
+#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
+#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
+
#define MARATHON_PCI_DEVICE_ID 0x2715
#define TEWS_PCI_VENDOR_ID 0x1498
@@ -108,6 +128,7 @@ struct plx_pci_card {
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon(struct pci_dev *pdev);
+static void plx9056_pci_reset_common(struct pci_dev *pdev);
struct plx_pci_channel_map {
u32 bar;
@@ -148,6 +169,30 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
/* based on PLX9052 */
};
+static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
+ "esd CAN-PCI/CPCI/PCI104/200", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030/9050 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
+ "esd CAN-PCI/PMC/266", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx9056_pci_reset_common
+ /* based on PLX9056 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
+ "esd CAN-PCIe/2000", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx9056_pci_reset_common
+ /* based on PEX8311 */
+};
+
static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
"Marathon CAN-bus-PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -180,6 +225,48 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
(kernel_ulong_t)&plx_pci_card_info_adlink_se
},
{
+ /* esd CAN-PCI/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-CPCI/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-PCI104/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-PCI/266 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd266
+ },
+ {
+ /* esd CAN-PMC/266 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd266
+ },
+ {
+ /* esd CAN-PCIE/2000 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd2000
+ },
+ {
/* Marathon CAN-bus-PCI card */
PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
PCI_ANY_ID, PCI_ANY_ID,
@@ -242,7 +329,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
}
/*
- * PLX90xx software reset
+ * PLX9030/50/52 software reset
* Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
* For most cards it's enough for reset the SJA1000 chips.
*/
@@ -259,6 +346,38 @@ static void plx_pci_reset_common(struct pci_dev *pdev)
iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
};
+/*
+ * PLX9056 software reset
+ * Assert LRESET# and reset device(s) on the Local Bus (if wired).
+ */
+static void plx9056_pci_reset_common(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ u32 cntrl;
+
+ /* issue a local bus reset */
+ cntrl = ioread32(card->conf_addr + PLX9056_CNTRL);
+ cntrl |= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+ udelay(100);
+ cntrl ^= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+ /* reload local configuration from EEPROM */
+ cntrl |= PLX9056_PCI_RCR;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+ /*
+ * There is no safe way to poll for the end
+ * of reconfiguration process. Waiting for 10ms
+ * is safe.
+ */
+ mdelay(10);
+
+ cntrl ^= PLX9056_PCI_RCR;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+};
+
/* Special reset function for Marathon card */
static void plx_pci_reset_marathon(struct pci_dev *pdev)
{
@@ -302,13 +421,16 @@ static void plx_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- plx_pci_reset_common(pdev);
+ card->reset_func(pdev);
/*
- * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
- * Local_2 interrupts
+ * Disable interrupts from PCI-card and disable local
+ * interrupts
*/
- iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+ iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+ else
+ iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
if (card->conf_addr)
pci_iounmap(pdev, card->conf_addr);
@@ -367,6 +489,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
card->conf_addr = addr + ci->conf_map.offset;
ci->reset_func(pdev);
+ card->reset_func = ci->reset_func;
/* Detect available channels */
for (i = 0; i < ci->channel_count; i++) {
@@ -438,10 +561,17 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
* Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
* Local_2 interrupts from the SJA1000 chips
*/
- val = ioread32(card->conf_addr + PLX_INTCSR);
- val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
- iowrite32(val, card->conf_addr + PLX_INTCSR);
-
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+ val = ioread32(card->conf_addr + PLX_INTCSR);
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
+ val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
+ else
+ val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+ iowrite32(val, card->conf_addr + PLX_INTCSR);
+ } else {
+ iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN,
+ card->conf_addr + PLX9056_INTCSR);
+ }
return 0;
failure_cleanup:
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 145b1a731a53..24b58619f7c1 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,7 +60,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -293,8 +292,6 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
for (i = 0; i < dlc; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
- dev->trans_start = jiffies;
-
can_put_echo_skb(skb, dev, 0);
priv->write_reg(priv, REG_CMR, CMD_TR);
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index a6a51f155962..496223e9e2fc 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 9dd076a626a5..34e79efbd2fc 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -38,7 +38,6 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 628374c2a05f..b65cabb361ab 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -24,7 +24,6 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
@@ -37,16 +36,36 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_LICENSE("GPL v2");
-static u8 sp_read_reg(const struct sja1000_priv *priv, int reg)
+static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
{
return ioread8(priv->reg_base + reg);
}
-static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val)
+static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val)
{
iowrite8(val, priv->reg_base + reg);
}
+static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg * 2);
+}
+
+static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg * 2);
+}
+
+static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg * 4);
+}
+
+static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg * 4);
+}
+
static int sp_probe(struct platform_device *pdev)
{
int err;
@@ -90,14 +109,28 @@ static int sp_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
priv->reg_base = addr;
- priv->read_reg = sp_read_reg;
- priv->write_reg = sp_write_reg;
priv->can.clock.freq = pdata->clock;
priv->ocr = pdata->ocr;
priv->cdr = pdata->cdr;
+ switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = sp_read_reg32;
+ priv->write_reg = sp_write_reg32;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ priv->read_reg = sp_read_reg16;
+ priv->write_reg = sp_write_reg16;
+ break;
+ case IORESOURCE_MEM_8BIT:
+ default:
+ priv->read_reg = sp_read_reg8;
+ priv->write_reg = sp_write_reg8;
+ break;
+ }
+
dev_set_drvdata(&pdev->dev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 0c3d2ba0d178..4d07f1ee7168 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -47,7 +47,6 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/platform/ti_hecc.h>
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 9bd155e4111c..6d76236ea069 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2889,7 +2889,6 @@ static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
return NETDEV_TX_BUSY;
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -2957,20 +2956,20 @@ static void cas_process_mc_list(struct cas *cp)
{
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i = 1;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, cp->dev) {
+ netdev_for_each_mc_addr(ha, cp->dev) {
if (i <= CAS_MC_EXACT_MATCH_SIZE) {
/* use the alternate mac address registers for the
* first 15 multicast addresses
*/
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
+ writel((ha->addr[4] << 8) | ha->addr[5],
cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
+ writel((ha->addr[2] << 8) | ha->addr[3],
cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
+ writel((ha->addr[0] << 8) | ha->addr[1],
cp->regs + REG_MAC_ADDRN(i*3 + 2));
i++;
}
@@ -2978,7 +2977,7 @@ static void cas_process_mc_list(struct cas *cp)
/* use hw hash table for the next series of
* multicast addresses
*/
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 9e631b9d3948..7dbb16d36fff 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -377,12 +377,13 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int bit;
u16 mc_filter[4] = { 0, };
- netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) {
- bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */
+ netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
+ /* bit[23:28] */
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index df3a1410696e..f01cfdb995de 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -162,14 +162,14 @@ struct respQ_e {
*/
struct cmdQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct freelQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
/*
@@ -460,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
again:
for (i = 0; i < MAX_NPORTS; i++) {
- s->port = ++s->port & (MAX_NPORTS - 1);
+ s->port = (s->port + 1) & (MAX_NPORTS - 1);
skbq = &s->p[s->port].skbq;
skb = skb_peek(skbq);
@@ -518,8 +518,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx];
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(ce->skb);
ce->skb = NULL;
@@ -633,9 +633,9 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
q->in_use -= n;
ce = &q->centries[cidx];
while (n--) {
- if (likely(pci_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ if (likely(dma_unmap_len(ce, dma_len))) {
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE);
if (q->sop)
q->sop = 0;
@@ -851,8 +851,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, dma_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, dma_len);
e->addr_lo = (u32)mapping;
e->addr_hi = (u64)mapping >> 32;
e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
@@ -1059,13 +1059,13 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
skb_reserve(skb, 2); /* align IP header */
skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len);
pci_dma_sync_single_for_device(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
recycle_fl_buf(fl, fl->cidx);
return skb;
@@ -1077,8 +1077,8 @@ use_orig_buf:
return NULL;
}
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
skb = ce->skb;
prefetch(skb->data);
@@ -1100,8 +1100,8 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb;
- pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
@@ -1123,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
unsigned int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int i, len = skb->len - skb->data_len;
+ unsigned int i, len = skb_headlen(skb);
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
@@ -1182,7 +1182,7 @@ static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
*gen, nfrags == 0 && *desc_len == 0);
ce1->skb = NULL;
- pci_unmap_len_set(ce1, dma_len, 0);
+ dma_unmap_len_set(ce1, dma_len, 0);
*desc_mapping += SGE_TX_DESC_MAX_PLEN;
if (*desc_len) {
ce1++;
@@ -1219,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
ce = &q->centries[pidx];
mapping = pci_map_single(adapter->pdev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ skb_headlen(skb), PCI_DMA_TODEVICE);
desc_mapping = mapping;
- desc_len = skb->len - skb->data_len;
+ desc_len = skb_headlen(skb);
flags = F_CMD_DATAVALID | F_CMD_SOP |
V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
@@ -1233,7 +1233,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
e->addr_hi = (u64)desc_mapping >> 32;
e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
ce->skb = NULL;
- pci_unmap_len_set(ce, dma_len, 0);
+ dma_unmap_len_set(ce, dma_len, 0);
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
desc_len > SGE_TX_DESC_MAX_PLEN) {
@@ -1257,8 +1257,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
}
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1284,8 +1284,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
write_tx_desc(e1, desc_mapping, desc_len, gen,
nfrags == 0);
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, frag->size);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, frag->size);
}
ce->skb = skb;
wmb();
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 60777fd90b33..3c58db595285 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -328,7 +328,7 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
static void cpmac_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *iter;
+ struct netdev_hw_addr *ha;
u8 tmp;
u32 mbp, bit, hash[2] = { 0, };
struct cpmac_priv *priv = netdev_priv(dev);
@@ -348,19 +348,19 @@ static void cpmac_set_multicast_list(struct net_device *dev)
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- netdev_for_each_mc_addr(iter, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
bit = 0;
- tmp = iter->dmi_addr[0];
+ tmp = ha->addr[0];
bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = iter->dmi_addr[1];
+ tmp = ha->addr[1];
bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = iter->dmi_addr[2];
+ tmp = ha->addr[2];
bit ^= (tmp >> 6) ^ tmp;
- tmp = iter->dmi_addr[3];
+ tmp = ha->addr[3];
bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = iter->dmi_addr[4];
+ tmp = ha->addr[4];
bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = iter->dmi_addr[5];
+ tmp = ha->addr[5];
bit ^= (tmp >> 6) ^ tmp;
bit &= 0x3f;
hash[bit / 32] |= 1 << (bit % 32);
@@ -579,7 +579,6 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock(&priv->lock);
- dev->trans_start = jiffies;
spin_unlock(&priv->lock);
desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
desc->skb = skb;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 61a33914e96f..7e00027b9f8e 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1108,7 +1108,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
myNextTxDesc->skb = skb;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
e100_hardware_send_packet(np, buf, skb->len);
@@ -1595,16 +1595,16 @@ set_multicast_list(struct net_device *dev)
} else {
/* MC mode, receive normal and MC packets */
char hash_ix;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *baddr;
lo_bits = 0x00000000ul;
hi_bits = 0x00000000ul;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Calculate the hash index for the GA registers */
hash_ix = 0;
- baddr = dmi->dmi_addr;
+ baddr = ha->addr;
hash_ix ^= (*baddr) & 0x3f;
hash_ix ^= ((*baddr) >> 6) & 0x03;
++baddr;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 4c38491b8efb..2281ebcb400b 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1554,7 +1554,6 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
spin_unlock_irqrestore(&lp->lock, flags);
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
/*
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 07d7e7fab3f5..5962b911b5bd 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
struct sk_buff *skb;
struct fl_pg_chunk pg_chunk;
};
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct rsp_desc { /* response queue descriptor */
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
* unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
*/
struct dummy {
- DECLARE_PCI_UNMAP_ADDR(addr);
+ DEFINE_DMA_UNMAP_ADDR(addr);
};
return sizeof(struct dummy) != 0;
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
put_page(d->pg_chunk.page);
d->pg_chunk.page = NULL;
} else {
- pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
+ pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
kfree_skb(d->skb);
d->skb = NULL;
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
if (unlikely(pci_dma_mapping_error(pdev, mapping)))
return -ENOMEM;
- pci_unmap_addr_set(sd, dma_addr, mapping);
+ dma_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
break;
}
mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
- pci_unmap_addr_set(sd, dma_addr, mapping);
+ dma_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
if (likely(skb != NULL)) {
__skb_put(skb, len);
pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ dma_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
memcpy(skb->data, sd->skb->data, len);
pci_dma_sync_single_for_device(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ dma_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
} else if (!drop_thres)
goto use_orig_buf;
@@ -810,7 +810,7 @@ recycle:
goto recycle;
use_orig_buf:
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE);
skb = sd->skb;
skb_put(skb, len);
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
struct sk_buff *newskb, *skb;
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
- dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
+ dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
newskb = skb = q->pg_skb;
if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
fl->credits--;
pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr),
+ dma_unmap_addr(sd, dma_addr),
fl->buf_size - SGE_PG_RSVD,
PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index c142a2132e9f..3af19a550372 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
if (exact_addr_idx < EXACT_ADDR_FILTERS)
set_addr_filter(mac, exact_addr_idx++,
- dmi->dmi_addr);
+ ha->addr);
else {
- int hash = hash_hw_addr(dmi->dmi_addr);
+ int hash = hash_hw_addr(ha->addr);
if (hash < 32)
hash_lo |= (1 << hash);
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d8ff4889b56..8856a75fcc94 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -53,7 +53,7 @@
enum {
MAX_NPORTS = 4, /* max # of ports */
- SERNUM_LEN = 16, /* Serial # length */
+ SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
};
@@ -651,8 +651,6 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index a7e30a23d322..1bad50041427 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -240,9 +240,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
u16 filt_idx[7];
const u8 *addr[7];
int ret, naddr = 0;
- const struct dev_addr_list *d;
const struct netdev_hw_addr *ha;
int uc_cnt = netdev_uc_count(dev);
+ int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
/* first do the secondary unicast addresses */
@@ -260,9 +260,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
}
/* next set up the multicast addresses */
- netdev_for_each_mc_addr(d, dev) {
- addr[naddr++] = d->dmi_addr;
- if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) {
+ netdev_for_each_mc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
naddr, addr, filt_idx, &mhash, sleep);
if (ret < 0)
@@ -1711,6 +1711,18 @@ static int set_tso(struct net_device *dev, u32 value)
return 0;
}
+static int set_flags(struct net_device *dev, u32 flags)
+{
+ if (flags & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (flags & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+ return 0;
+}
+
static struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
@@ -1741,6 +1753,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.set_wol = set_wol,
.set_tso = set_tso,
+ .set_flags = set_flags,
.flash_device = set_flash,
};
@@ -3203,7 +3216,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_GRO | highdma;
+ netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->vlan_features = netdev->features & VLAN_FEAT;
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 14adc58e71c3..d1f8f225e45a 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1471,7 +1471,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
* Releases the pages of a packet gather list. We do not own the last
* page on the list and do not free it.
*/
-void t4_pktgl_free(const struct pkt_gl *gl)
+static void t4_pktgl_free(const struct pkt_gl *gl)
{
int n;
const skb_frag_t *p;
@@ -1524,6 +1524,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
+ if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
struct port_info *pi = netdev_priv(rxq->rspq.netdev);
@@ -1565,7 +1567,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
return handle_trace_pkt(q->adap, si);
- pkt = (void *)&rsp[1];
+ pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec;
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
@@ -1583,6 +1585,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
__skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
+ if (skb->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+
pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
@@ -2047,7 +2052,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
adap->sge.ingr_map[iq->cntxt_id] = iq;
if (fl) {
- fl->cntxt_id = htons(c.fl0id);
+ fl->cntxt_id = ntohs(c.fl0id);
fl->avail = fl->pend_cred = 0;
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index a814a3afe123..2923dd43523d 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -53,8 +53,8 @@
* at the time it indicated completion is stored there. Returns 0 if the
* operation completes and -EAGAIN otherwise.
*/
-int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
- int polarity, int attempts, int delay, u32 *valp)
+static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
{
while (1) {
u32 val = t4_read_reg(adapter, reg);
@@ -109,9 +109,9 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, u32 *vals, unsigned int nregs,
- unsigned int start_idx)
+static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx);
@@ -120,6 +120,7 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
}
}
+#if 0
/**
* t4_write_indirect - write indirectly addressed registers
* @adap: the adapter
@@ -132,15 +133,16 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
* Writes a sequential block of registers that are accessed indirectly
* through an address/data register pair.
*/
-void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, const u32 *vals,
- unsigned int nregs, unsigned int start_idx)
+static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx++);
t4_write_reg(adap, data_reg, *vals++);
}
}
+#endif
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
@@ -345,33 +347,21 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
return 0;
}
-#define VPD_ENTRY(name, len) \
- u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
-
/*
* Partial EEPROM Vital Product Data structure. Includes only the ID and
- * VPD-R sections.
+ * VPD-R header.
*/
-struct t4_vpd {
+struct t4_vpd_hdr {
u8 id_tag;
u8 id_len[2];
u8 id_data[ID_LEN];
u8 vpdr_tag;
u8 vpdr_len[2];
- VPD_ENTRY(pn, 16); /* part number */
- VPD_ENTRY(ec, EC_LEN); /* EC level */
- VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
- VPD_ENTRY(na, 12); /* MAC address base */
- VPD_ENTRY(port_type, 8); /* port types */
- VPD_ENTRY(gpio, 14); /* GPIO usage */
- VPD_ENTRY(cclk, 6); /* core clock */
- VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
- VPD_ENTRY(rv, 1); /* csum */
- u32 pad; /* for multiple-of-4 sizing and alignment */
};
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0
+#define VPD_LEN 512
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -396,16 +386,36 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*/
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int ret;
- struct t4_vpd vpd;
- u8 *q = (u8 *)&vpd, csum;
+ int i, ret;
+ int ec, sn, v2;
+ u8 vpd[VPD_LEN], csum;
+ unsigned int vpdr_len;
+ const struct t4_vpd_hdr *v;
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
if (ret < 0)
return ret;
- for (csum = 0; q <= vpd.rv_data; q++)
- csum += *q;
+ v = (const struct t4_vpd_hdr *)vpd;
+ vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
+ if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
+ dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
+ return -EINVAL;
+ }
+
+#define FIND_VPD_KW(var, name) do { \
+ var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
+ vpdr_len, name); \
+ if (var < 0) { \
+ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
+ return -EINVAL; \
+ } \
+ var += PCI_VPD_INFO_FLD_HDR_SIZE; \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
if (csum) {
dev_err(adapter->pdev_dev,
@@ -413,12 +423,18 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return -EINVAL;
}
- p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
- memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(v2, "V2");
+#undef FIND_VPD_KW
+
+ p->cclk = simple_strtoul(vpd + v2, NULL, 10);
+ memcpy(p->id, v->id_data, ID_LEN);
strim(p->id);
- memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
+ memcpy(p->ec, vpd + ec, EC_LEN);
strim(p->ec);
- memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
+ i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
return 0;
}
@@ -537,8 +553,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
* (i.e., big-endian), otherwise as 32-bit words in the platform's
* natural endianess.
*/
-int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented)
+static int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
{
int ret;
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index fdb117443144..7a981b81afaf 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -503,6 +503,7 @@ struct cpl_rx_data_ack {
};
struct cpl_rx_pkt {
+ struct rss_header rsshdr;
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2b8edd2efbf6..08e82b1a0b33 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -952,13 +952,14 @@ static void emac_dev_mcast_set(struct net_device *ndev)
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
if (!netdev_mc_empty(ndev)) {
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
+
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
/* program multicast address list into EMAC hardware */
- netdev_for_each_mc_addr(mc_ptr, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
emac_add_mcast(priv, EMAC_MULTICAST_ADD,
- (u8 *) mc_ptr->dmi_addr);
+ (u8 *) ha->addr);
}
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@ -1467,7 +1468,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_buf.length = skb->len;
tx_buf.buf_token = (void *)skb;
tx_buf.data_ptr = skb->data;
- ndev->trans_start = jiffies;
ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
if (unlikely(ret_code != 0)) {
if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 6b13f4fd2e96..23a65398d011 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -166,8 +166,8 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
- tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 5)
+ tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/20)
return NETDEV_TX_BUSY;
/* else */
printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index a0a6830b5e6d..f3650fd096f4 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -535,7 +535,6 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
de620_write_block(dev, buffer, skb->len, len-skb->len);
- dev->trans_start = jiffies;
if(!(using_txbuf == (TXBF0 | TXBF1)))
netif_wake_queue(dev);
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 8cf3cc6f20e2..74abe195212c 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -874,7 +874,7 @@ static inline int lance_reset(struct net_device *dev)
lance_init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
@@ -930,7 +930,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -940,7 +939,7 @@ static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -959,8 +958,8 @@ static void lance_load_multicast(struct net_device *dev)
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index ed53a8d45f4e..e5667c55844e 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -2195,7 +2195,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
int i; /* used as index in for loop */
- struct dev_mc_list *dmi; /* ptr to multicast addr entry */
+ struct netdev_hw_addr *ha;
/* Enable LLC frame promiscuous mode, if necessary */
@@ -2241,9 +2241,9 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
/* Copy addresses to multicast address table, then update adapter CAM */
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
- dmi->dmi_addr, FDDI_K_ALEN);
+ ha->addr, FDDI_K_ALEN);
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 744c1928dfca..38d4d9eefbdd 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -921,7 +921,7 @@ static void depca_tx_timeout(struct net_device *dev)
STOP_DEPCA;
depca_init_ring(dev);
LoadCSRs(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
InitRestartDepca(dev);
}
@@ -954,7 +954,6 @@ static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
outw(CSR0, DEPCA_ADDR);
outw(INEA | TDMD, DEPCA_DATA);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
}
if (TX_BUFFS_AVAIL)
@@ -1272,7 +1271,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i, j, bit, byte;
u16 hashcode;
@@ -1287,8 +1286,8 @@ static void SetMulticastFilter(struct net_device *dev)
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc(ETH_ALEN, addrs);
hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index b05bad829827..6579225dbd91 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1132,14 +1132,14 @@ set_multicast (struct net_device *dev)
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int bit, index = 0;
- int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ int crc = ether_crc_le(ETH_ALEN, ha->addr);
/* The inverted high significant 6 bits of CRC are
used as an index to hashtable */
for (bit = 0; bit < 6; bit++)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 7f9960f718e3..254b6f724c60 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -725,7 +725,7 @@ static void
dm9000_hash_table(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
int i, oft;
u32 hash_val;
u16 hash_table[4];
@@ -753,8 +753,8 @@ dm9000_hash_table(struct net_device *dev)
rcr |= RCR_ALL;
/* the multicast address in Hash Table : 64 bits */
- netdev_for_each_mc_addr(mcptr, dev) {
- hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = ether_crc_le(6, ha->addr) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -769,7 +769,7 @@ dm9000_hash_table(struct net_device *dev)
}
/*
- * Initilize dm9000 board
+ * Initialize dm9000 board
*/
static void
dm9000_init_dm9000(struct net_device *dev)
@@ -825,7 +825,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
- dev->trans_start = 0;
+ dev->trans_start = jiffies;
}
/* Our watchdog timed out. Called by the networking layer */
@@ -843,7 +843,7 @@ static void dm9000_timeout(struct net_device *dev)
dm9000_reset(db);
dm9000_init_dm9000(dev);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 234685213f1a..8b0f50bbf3e5 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -594,8 +594,6 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -918,7 +916,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
bp->regs, mem_base, dev->irq, dev->dev_addr);
- dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n",
+ dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
(bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
(bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
(bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 791080303db1..b194bad29ace 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -147,6 +147,8 @@
* - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -175,7 +177,6 @@
#define DRV_VERSION "3.5.24-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
-#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
#define E100_NAPI_WEIGHT 16
@@ -201,10 +202,6 @@ module_param(use_io, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
- __func__ , ## args))
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
@@ -690,12 +687,13 @@ static int e100_self_test(struct nic *nic)
/* Check results of self-test */
if (nic->mem->selftest.result != 0) {
- DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
- nic->mem->selftest.result);
+ netif_err(nic, hw, nic->netdev,
+ "Self-test failed: result=0x%08X\n",
+ nic->mem->selftest.result);
return -ETIMEDOUT;
}
if (nic->mem->selftest.signature == 0) {
- DPRINTK(HW, ERR, "Self-test failed: timed out\n");
+ netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
return -ETIMEDOUT;
}
@@ -798,7 +796,7 @@ static int e100_eeprom_load(struct nic *nic)
/* The checksum, stored in the last word, is calculated such that
* the sum of words should be 0xBABA */
if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
- DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
+ netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
if (!eeprom_bad_csum_allow)
return -EAGAIN;
}
@@ -954,8 +952,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
udelay(20);
}
if (unlikely(!i)) {
- printk("e100.mdio_ctrl(%s) won't go Ready\n",
- nic->netdev->name );
+ netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
spin_unlock_irqrestore(&nic->mdio_lock, flags);
return 0; /* No way to indicate timeout error */
}
@@ -967,9 +964,10 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
break;
}
spin_unlock_irqrestore(&nic->mdio_lock, flags);
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data, data_out);
return (u16)data_out;
}
@@ -1029,17 +1027,19 @@ static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
return ADVERTISE_10HALF |
ADVERTISE_10FULL;
default:
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
return 0xFFFF;
}
} else {
switch (reg) {
default:
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
return 0xFFFF;
}
}
@@ -1156,12 +1156,15 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
}
}
- DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
- DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
- DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
}
/*************************************************************************
@@ -1254,16 +1257,18 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
err = request_firmware(&fw, fw_name, &nic->pdev->dev);
if (err) {
- DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
- fw_name, err);
+ netif_err(nic, probe, nic->netdev,
+ "Failed to load firmware \"%s\": %d\n",
+ fw_name, err);
return ERR_PTR(err);
}
/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
if (fw->size != UCODE_SIZE * 4 + 3) {
- DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
- fw_name, fw->size);
+ netif_err(nic, probe, nic->netdev,
+ "Firmware \"%s\" has wrong size %zu\n",
+ fw_name, fw->size);
release_firmware(fw);
return ERR_PTR(-EINVAL);
}
@@ -1275,9 +1280,9 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
min_size >= UCODE_SIZE) {
- DPRINTK(PROBE, ERR,
- "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
- fw_name, timer, bundle, min_size);
+ netif_err(nic, probe, nic->netdev,
+ "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
+ fw_name, timer, bundle, min_size);
release_firmware(fw);
return ERR_PTR(-EINVAL);
}
@@ -1329,7 +1334,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return PTR_ERR(fw);
if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
- DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
+ netif_err(nic, probe, nic->netdev,
+ "ucode cmd failed with error %d\n", err);
/* must restart cuc */
nic->cuc_cmd = cuc_start;
@@ -1349,7 +1355,7 @@ static inline int e100_load_ucode_wait(struct nic *nic)
/* if the command failed, or is not OK, notify and return */
if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
- DPRINTK(PROBE,ERR, "ucode load failed\n");
+ netif_err(nic, probe, nic->netdev, "ucode load failed\n");
err = -EPERM;
}
@@ -1387,8 +1393,8 @@ static int e100_phy_check_without_mii(struct nic *nic)
* media is sensed automatically based on how the link partner
* is configured. This is, in essence, manual configuration.
*/
- DPRINTK(PROBE, INFO,
- "found MII-less i82503 or 80c24 or other PHY\n");
+ netif_info(nic, probe, nic->netdev,
+ "found MII-less i82503 or 80c24 or other PHY\n");
nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
@@ -1435,18 +1441,20 @@ static int e100_phy_init(struct nic *nic)
return 0; /* simply return and hope for the best */
else {
/* for unknown cases log a fatal error */
- DPRINTK(HW, ERR,
- "Failed to locate any known PHY, aborting.\n");
+ netif_err(nic, hw, nic->netdev,
+ "Failed to locate any known PHY, aborting\n");
return -EAGAIN;
}
} else
- DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy_addr = %d\n", nic->mii.phy_id);
/* Get phy ID */
id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
nic->phy = (u32)id_hi << 16 | (u32)id_lo;
- DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy ID = 0x%08X\n", nic->phy);
/* Select the phy and isolate the rest */
for (addr = 0; addr < 32; addr++) {
@@ -1508,7 +1516,7 @@ static int e100_hw_init(struct nic *nic)
e100_hw_reset(nic);
- DPRINTK(HW, ERR, "e100_hw_init\n");
+ netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
if (!in_interrupt() && (err = e100_self_test(nic)))
return err;
@@ -1538,16 +1546,16 @@ static int e100_hw_init(struct nic *nic)
static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
i = 0;
- netdev_for_each_mc_addr(list, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == count)
break;
- memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr,
+ memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
}
@@ -1556,8 +1564,9 @@ static void e100_set_multicast_list(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
- DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev_mc_count(netdev), netdev->flags);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "mc_count=%d, flags=0x%04X\n",
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
@@ -1630,7 +1639,8 @@ static void e100_update_stats(struct nic *nic)
if (e100_exec_cmd(nic, cuc_dump_reset, 0))
- DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_dump_reset failed\n");
}
static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1660,20 +1670,19 @@ static void e100_watchdog(unsigned long data)
struct nic *nic = (struct nic *)data;
struct ethtool_cmd cmd;
- DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
+ netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
+ "right now = %ld\n", jiffies);
/* mii library handles link maintenance tasks */
mii_ethtool_gset(&nic->mii, &cmd);
if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
- printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
- nic->netdev->name,
- cmd.speed == SPEED_100 ? "100" : "10",
- cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
+ cmd.speed == SPEED_100 ? 100 : 10,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
- printk(KERN_INFO "e100: %s NIC Link is Down\n",
- nic->netdev->name);
+ netdev_info(nic->netdev, "NIC Link is Down\n");
}
mii_check_link(&nic->mii);
@@ -1733,7 +1742,8 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
Issue a NOP command followed by a 1us delay before
issuing the Tx command. */
if (e100_exec_cmd(nic, cuc_nop, 0))
- DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_nop failed\n");
udelay(1);
}
@@ -1742,17 +1752,18 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
switch (err) {
case -ENOSPC:
/* We queued the skb, but now we're out of space. */
- DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "No space for CB\n");
netif_stop_queue(netdev);
break;
case -ENOMEM:
/* This is a hard error - log it. */
- DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "Out of Tx resources, returning skb\n");
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -1768,9 +1779,10 @@ static int e100_tx_clean(struct nic *nic)
for (cb = nic->cb_to_clean;
cb->status & cpu_to_le16(cb_complete);
cb = nic->cb_to_clean = cb->next) {
- DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
- (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
- cb->status);
+ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
+ "cb[%d]->status = 0x%04X\n",
+ (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
+ cb->status);
if (likely(cb->skb != NULL)) {
dev->stats.tx_packets++;
@@ -1913,7 +1925,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
rfd_status = le16_to_cpu(rfd->status);
- DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
+ netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
+ "status=0x%04X\n", rfd_status);
/* If data isn't ready, nothing to indicate */
if (unlikely(!(rfd_status & cb_complete))) {
@@ -2124,7 +2137,8 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
struct nic *nic = netdev_priv(netdev);
u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
- DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
+ netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
+ "stat_ack = 0x%02X\n", stat_ack);
if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
stat_ack == stat_ack_not_present) /* Hardware is ejected */
@@ -2264,8 +2278,8 @@ static void e100_tx_timeout_task(struct work_struct *work)
struct nic *nic = container_of(work, struct nic, tx_timeout_task);
struct net_device *netdev = nic->netdev;
- DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
- ioread8(&nic->csr->scb.status));
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
rtnl_lock();
if (netif_running(netdev)) {
@@ -2532,8 +2546,8 @@ static int e100_set_ringparam(struct net_device *netdev,
rfds->count = min(rfds->count, rfds->max);
cbs->count = max(ring->tx_pending, cbs->min);
cbs->count = min(cbs->count, cbs->max);
- DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
- rfds->count, cbs->count);
+ netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
+ rfds->count, cbs->count);
if (netif_running(netdev))
e100_up(nic);
@@ -2710,7 +2724,7 @@ static int e100_open(struct net_device *netdev)
netif_carrier_off(netdev);
if ((err = e100_up(nic)))
- DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
+ netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
return err;
}
@@ -2744,7 +2758,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
if (((1 << debug) - 1) & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
return -ENOMEM;
}
@@ -2762,35 +2776,34 @@ static int __devinit e100_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, netdev);
if ((err = pci_enable_device(pdev))) {
- DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
goto err_out_free_dev;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
- "base address, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
if ((err = pci_request_regions(pdev, DRV_NAME))) {
- DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
goto err_out_free_res;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
if (use_io)
- DPRINTK(PROBE, INFO, "using i/o access mode\n");
+ netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
if (!nic->csr) {
- DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -2824,7 +2837,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
if ((err = e100_alloc(nic))) {
- DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
goto err_out_iounmap;
}
@@ -2837,13 +2850,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
if (!is_valid_ether_addr(netdev->perm_addr)) {
if (!eeprom_bad_csum_allow) {
- DPRINTK(PROBE, ERR, "Invalid MAC address from "
- "EEPROM, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
err = -EAGAIN;
goto err_out_free;
} else {
- DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
- "you MUST configure one.\n");
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
}
}
@@ -2859,7 +2870,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
strcpy(netdev->name, "eth%d");
if ((err = register_netdev(netdev))) {
- DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
goto err_out_free;
}
nic->cbs_pool = pci_pool_create(netdev->name,
@@ -2867,9 +2878,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->params.cbs.max * sizeof(struct cb),
sizeof(u32),
0);
- DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
- (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
- pdev->irq, netdev->dev_addr);
+ netif_info(nic, probe, nic->netdev,
+ "addr 0x%llx, irq %d, MAC addr %pM\n",
+ (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
+ pdev->irq, netdev->dev_addr);
return 0;
@@ -3027,7 +3039,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
struct nic *nic = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
+ pr_err("Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -3086,8 +3098,8 @@ static struct pci_driver e100_driver = {
static int __init e100_init_module(void)
{
if (((1 << debug) - 1) & NETIF_MSG_DRV) {
- printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
- printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
+ pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ pr_info("%s\n", DRV_COPYRIGHT);
}
return pci_register_driver(&e100_driver);
}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2f29c2131851..40b62b406b08 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -81,23 +81,6 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#ifdef DBG
-#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
-#else
-#define E1000_DBG(args...)
-#endif
-
-#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
-
-#define PFX "e1000: "
-
-#define DPRINTK(nlevel, klevel, fmt, args...) \
-do { \
- if (NETIF_MSG_##nlevel & adapter->msg_enable) \
- printk(KERN_##klevel PFX "%s: %s: " fmt, \
- adapter->netdev->name, __func__, ##args); \
-} while (0)
-
#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
@@ -335,6 +318,25 @@ enum e1000_state_t {
__E1000_DOWN
};
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+#define e_dbg(format, arg...) \
+ netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
+#define e_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+ netdev_notice(adapter->netdev, format, ## arg)
+#define e_dev_info(format, arg...) \
+ dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(&adapter->pdev->dev, format, ## arg)
+
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
@@ -352,5 +354,6 @@ extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
+extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c67e93117271..2a3b2dccd06d 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
- DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+ e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = true;
return 0;
}
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
writel(write & test[i], address);
read = readl(address);
if (read != (write & test[i] & mask)) {
- DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
- "got 0x%08X expected 0x%08X\n",
- reg, read, (write & test[i] & mask));
+ e_info("pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, read, (write & test[i] & mask));
*data = reg;
return true;
}
@@ -734,9 +734,9 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
writel(write & mask, address);
read = readl(address);
if ((read & mask) != (write & mask)) {
- DPRINTK(DRV, ERR, "set/check reg %04X test failed: "
- "got 0x%08X expected 0x%08X\n",
- reg, (read & mask), (write & mask));
+ e_err("set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (read & mask), (write & mask));
*data = reg;
return true;
}
@@ -779,8 +779,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
ew32(STATUS, toggle);
after = er32(STATUS) & toggle;
if (value != after) {
- DPRINTK(DRV, ERR, "failed STATUS register test got: "
- "0x%08X expected: 0x%08X\n", after, value);
+ e_err("failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
*data = 1;
return 1;
}
@@ -894,8 +894,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
- DPRINTK(HW, INFO, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
@@ -980,9 +979,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (txdr->desc && txdr->buffer_info) {
for (i = 0; i < txdr->count; i++) {
if (txdr->buffer_info[i].dma)
- pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+ dma_unmap_single(&pdev->dev,
+ txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (txdr->buffer_info[i].skb)
dev_kfree_skb(txdr->buffer_info[i].skb);
}
@@ -991,20 +991,23 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rxdr->desc && rxdr->buffer_info) {
for (i = 0; i < rxdr->count; i++) {
if (rxdr->buffer_info[i].dma)
- pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+ dma_unmap_single(&pdev->dev,
+ rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rxdr->buffer_info[i].skb)
dev_kfree_skb(rxdr->buffer_info[i].skb);
}
}
if (txdr->desc) {
- pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
txdr->desc = NULL;
}
if (rxdr->desc) {
- pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
rxdr->desc = NULL;
}
@@ -1039,7 +1042,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1070,8 +1074,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].skb = skb;
txdr->buffer_info[i].length = skb->len;
txdr->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1093,7 +1097,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 5;
goto err_nomem;
@@ -1126,8 +1131,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].skb = skb;
rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
rxdr->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
@@ -1444,10 +1449,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
- txdr->buffer_info[k].dma,
- txdr->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&pdev->dev,
+ txdr->buffer_info[k].dma,
+ txdr->buffer_info[k].length,
+ DMA_TO_DEVICE);
if (unlikely(++k == txdr->count)) k = 0;
}
ew32(TDT, k);
@@ -1455,10 +1460,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
- rxdr->buffer_info[l].dma,
- rxdr->buffer_info[l].length,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev,
+ rxdr->buffer_info[l].dma,
+ rxdr->buffer_info[l].length,
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
@@ -1558,7 +1563,7 @@ static void e1000_diag_test(struct net_device *netdev,
u8 forced_speed_duplex = hw->forced_speed_duplex;
u8 autoneg = hw->autoneg;
- DPRINTK(HW, INFO, "offline testing starting\n");
+ e_info("offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
@@ -1598,7 +1603,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
- DPRINTK(HW, INFO, "online testing starting\n");
+ e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1691,7 +1696,7 @@ static void e1000_get_wol(struct net_device *netdev,
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
- DPRINTK(DRV, ERR, "Interface does not support "
+ e_err("Interface does not support "
"directed (unicast) frame wake-up packets\n");
break;
default:
@@ -1725,8 +1730,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
if (wol->wolopts & WAKE_UCAST) {
- DPRINTK(DRV, ERR, "Interface does not support "
- "directed (unicast) frame wake-up packets\n");
+ e_err("Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
return -EOPNOTSUPP;
}
break;
@@ -1803,7 +1808,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
if (adapter->hw.mac_type < e1000_82545)
return -EOPNOTSUPP;
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1821,12 +1826,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
return -EOPNOTSUPP;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8d7d87f12827..c7e242b69a18 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -30,7 +30,7 @@
* Shared functions for accessing and configuring the MAC
*/
-#include "e1000_hw.h"
+#include "e1000.h"
static s32 e1000_check_downshift(struct e1000_hw *hw);
static s32 e1000_check_polarity(struct e1000_hw *hw,
@@ -114,7 +114,7 @@ static DEFINE_SPINLOCK(e1000_eeprom_lock);
*/
static s32 e1000_set_phy_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_phy_type");
+ e_dbg("e1000_set_phy_type");
if (hw->mac_type == e1000_undefined)
return -E1000_ERR_PHY_TYPE;
@@ -152,7 +152,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
u32 ret_val;
u16 phy_saved_data;
- DEBUGFUNC("e1000_phy_init_script");
+ e_dbg("e1000_phy_init_script");
if (hw->phy_init_script) {
msleep(20);
@@ -245,7 +245,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
*/
s32 e1000_set_mac_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_mac_type");
+ e_dbg("e1000_set_mac_type");
switch (hw->device_id) {
case E1000_DEV_ID_82542:
@@ -354,7 +354,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
{
u32 status;
- DEBUGFUNC("e1000_set_media_type");
+ e_dbg("e1000_set_media_type");
if (hw->mac_type != e1000_82543) {
/* tbi_compatibility is only valid on 82543 */
@@ -401,16 +401,16 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- DEBUGFUNC("e1000_reset_hw");
+ e_dbg("e1000_reset_hw");
/* For 82542 (rev 2.0), disable MWI before issuing a device reset */
if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
}
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Disable the Transmit and Receive units. Then delay to allow
@@ -442,7 +442,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ e_dbg("Issuing a global reset to MAC\n");
switch (hw->mac_type) {
case e1000_82544:
@@ -516,7 +516,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
}
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Clear any pending interrupt events. */
@@ -549,12 +549,12 @@ s32 e1000_init_hw(struct e1000_hw *hw)
u32 mta_size;
u32 ctrl_ext;
- DEBUGFUNC("e1000_init_hw");
+ e_dbg("e1000_init_hw");
/* Initialize Identification LED */
ret_val = e1000_id_led_init(hw);
if (ret_val) {
- DEBUGOUT("Error Initializing Identification LED\n");
+ e_dbg("Error Initializing Identification LED\n");
return ret_val;
}
@@ -562,14 +562,14 @@ s32 e1000_init_hw(struct e1000_hw *hw)
e1000_set_media_type(hw);
/* Disabling VLAN filtering. */
- DEBUGOUT("Initializing the IEEE VLAN\n");
+ e_dbg("Initializing the IEEE VLAN\n");
if (hw->mac_type < e1000_82545_rev_3)
ew32(VET, 0);
e1000_clear_vfta(hw);
/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
ew32(RCTL, E1000_RCTL_RST);
E1000_WRITE_FLUSH();
@@ -591,7 +591,7 @@ s32 e1000_init_hw(struct e1000_hw *hw)
}
/* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
+ e_dbg("Zeroing the MTA\n");
mta_size = E1000_MC_TBL_SIZE;
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -662,7 +662,7 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
u16 eeprom_data;
s32 ret_val;
- DEBUGFUNC("e1000_adjust_serdes_amplitude");
+ e_dbg("e1000_adjust_serdes_amplitude");
if (hw->media_type != e1000_media_type_internal_serdes)
return E1000_SUCCESS;
@@ -709,7 +709,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- DEBUGFUNC("e1000_setup_link");
+ e_dbg("e1000_setup_link");
/* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
@@ -723,7 +723,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1, &eeprom_data);
if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
@@ -747,7 +747,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
hw->original_fc = hw->fc;
- DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc);
/* Take the 4 bits from EEPROM word 0x0F that determine the initial
* polarity value for the SW controlled pins, and setup the
@@ -760,7 +760,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1, &eeprom_data);
if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
@@ -777,8 +777,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
* control is disabled, because it does not hurt anything to
* initialize these registers.
*/
- DEBUGOUT
- ("Initializing the Flow Control address, type and timer regs\n");
+ e_dbg("Initializing the Flow Control address, type and timer regs\n");
ew32(FCT, FLOW_CONTROL_TYPE);
ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
@@ -827,7 +826,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
u32 signal = 0;
s32 ret_val;
- DEBUGFUNC("e1000_setup_fiber_serdes_link");
+ e_dbg("e1000_setup_fiber_serdes_link");
/* On adapters with a MAC newer than 82544, SWDP 1 will be
* set when the optics detect a signal. On older adapters, it will be
@@ -893,7 +892,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
break;
}
@@ -904,7 +903,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* link-up status bit will be set and the flow control enable bits (RFCE
* and TFCE) will be set according to their negotiated value.
*/
- DEBUGOUT("Auto-negotiation enabled\n");
+ e_dbg("Auto-negotiation enabled\n");
ew32(TXCW, txcw);
ew32(CTRL, ctrl);
@@ -921,7 +920,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
- DEBUGOUT("Looking for Link\n");
+ e_dbg("Looking for Link\n");
for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
msleep(10);
status = er32(STATUS);
@@ -929,7 +928,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
if (i == (LINK_UP_TIMEOUT / 10)) {
- DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
* e1000_check_for_link. This routine will force the link up if
@@ -938,16 +937,16 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
- DEBUGOUT("Error while checking for link\n");
+ e_dbg("Error while checking for link\n");
return ret_val;
}
hw->autoneg_failed = 0;
} else {
hw->autoneg_failed = 0;
- DEBUGOUT("Valid Link Found\n");
+ e_dbg("Valid Link Found\n");
}
} else {
- DEBUGOUT("No Signal Detected\n");
+ e_dbg("No Signal Detected\n");
}
return E1000_SUCCESS;
}
@@ -964,7 +963,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_preconfig");
+ e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
/* With 82543, we need to force speed and duplex on the MAC equal to what
@@ -987,10 +986,10 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
/* Make sure we have a valid PHY */
ret_val = e1000_detect_gig_phy(hw);
if (ret_val) {
- DEBUGOUT("Error, did not detect valid phy.\n");
+ e_dbg("Error, did not detect valid phy.\n");
return ret_val;
}
- DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+ e_dbg("Phy ID = %x\n", hw->phy_id);
/* Set PHY to class A mode (if necessary) */
ret_val = e1000_set_phy_mode(hw);
@@ -1025,14 +1024,14 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_igp_setup");
+ e_dbg("e1000_copper_link_igp_setup");
if (hw->phy_reset_disable)
return E1000_SUCCESS;
ret_val = e1000_phy_reset(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
+ e_dbg("Error Resetting the PHY\n");
return ret_val;
}
@@ -1049,7 +1048,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* disable lplu d3 during driver init */
ret_val = e1000_set_d3_lplu_state(hw, false);
if (ret_val) {
- DEBUGOUT("Error Disabling LPLU D3\n");
+ e_dbg("Error Disabling LPLU D3\n");
return ret_val;
}
}
@@ -1166,7 +1165,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_mgp_setup");
+ e_dbg("e1000_copper_link_mgp_setup");
if (hw->phy_reset_disable)
return E1000_SUCCESS;
@@ -1255,7 +1254,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
/* SW Reset the PHY so all changes take effect */
ret_val = e1000_phy_reset(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
+ e_dbg("Error Resetting the PHY\n");
return ret_val;
}
@@ -1274,7 +1273,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_autoneg");
+ e_dbg("e1000_copper_link_autoneg");
/* Perform some bounds checking on the hw->autoneg_advertised
* parameter. If this variable is zero, then set it to the default.
@@ -1287,13 +1286,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ e_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if (ret_val) {
- DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ e_dbg("Error Setting up Auto-Negotiation\n");
return ret_val;
}
- DEBUGOUT("Restarting Auto-Neg\n");
+ e_dbg("Restarting Auto-Neg\n");
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
@@ -1313,7 +1312,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->wait_autoneg_complete) {
ret_val = e1000_wait_autoneg(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error while waiting for autoneg to complete\n");
return ret_val;
}
@@ -1340,20 +1339,20 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
{
s32 ret_val;
- DEBUGFUNC("e1000_copper_link_postconfig");
+ e_dbg("e1000_copper_link_postconfig");
if (hw->mac_type >= e1000_82544) {
e1000_config_collision_dist(hw);
} else {
ret_val = e1000_config_mac_to_phy(hw);
if (ret_val) {
- DEBUGOUT("Error configuring MAC to PHY settings\n");
+ e_dbg("Error configuring MAC to PHY settings\n");
return ret_val;
}
}
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error Configuring Flow Control\n");
+ e_dbg("Error Configuring Flow Control\n");
return ret_val;
}
@@ -1361,7 +1360,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_config_dsp_after_link_change(hw, true);
if (ret_val) {
- DEBUGOUT("Error Configuring DSP after link up\n");
+ e_dbg("Error Configuring DSP after link up\n");
return ret_val;
}
}
@@ -1381,7 +1380,7 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- DEBUGFUNC("e1000_setup_copper_link");
+ e_dbg("e1000_setup_copper_link");
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
@@ -1407,10 +1406,10 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */
- DEBUGOUT("Forcing speed and duplex\n");
+ e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
- DEBUGOUT("Error Forcing Speed and Duplex\n");
+ e_dbg("Error Forcing Speed and Duplex\n");
return ret_val;
}
}
@@ -1432,13 +1431,13 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT("Valid link established!!!\n");
+ e_dbg("Valid link established!!!\n");
return E1000_SUCCESS;
}
udelay(10);
}
- DEBUGOUT("Unable to establish link!!!\n");
+ e_dbg("Unable to establish link!!!\n");
return E1000_SUCCESS;
}
@@ -1454,7 +1453,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
- DEBUGFUNC("e1000_phy_setup_autoneg");
+ e_dbg("e1000_phy_setup_autoneg");
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
@@ -1481,41 +1480,41 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
- DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+ e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
- DEBUGOUT("Advertise 10mb Half duplex\n");
+ e_dbg("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
- DEBUGOUT("Advertise 10mb Full duplex\n");
+ e_dbg("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
- DEBUGOUT("Advertise 100mb Half duplex\n");
+ e_dbg("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
- DEBUGOUT("Advertise 100mb Full duplex\n");
+ e_dbg("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
- DEBUGOUT
+ e_dbg
("Advertise 1000mb Half duplex requested, request denied!\n");
}
/* Do we want to advertise 1000 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
- DEBUGOUT("Advertise 1000mb Full duplex\n");
+ e_dbg("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
@@ -1568,7 +1567,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
@@ -1576,7 +1575,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+ e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
if (ret_val)
@@ -1600,12 +1599,12 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
u16 phy_data;
u16 i;
- DEBUGFUNC("e1000_phy_force_speed_duplex");
+ e_dbg("e1000_phy_force_speed_duplex");
/* Turn off Flow control if we are forcing speed and duplex. */
hw->fc = E1000_FC_NONE;
- DEBUGOUT1("hw->fc = %d\n", hw->fc);
+ e_dbg("hw->fc = %d\n", hw->fc);
/* Read the Device Control Register. */
ctrl = er32(CTRL);
@@ -1634,14 +1633,14 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
+ e_dbg("Full Duplex\n");
} else {
/* We want to force half duplex so we CLEAR the full duplex bits in
* the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
- DEBUGOUT("Half Duplex\n");
+ e_dbg("Half Duplex\n");
}
/* Are we forcing 100Mbps??? */
@@ -1651,13 +1650,13 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
ctrl |= E1000_CTRL_SPD_100;
mii_ctrl_reg |= MII_CR_SPEED_100;
mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
- DEBUGOUT("Forcing 100mb ");
+ e_dbg("Forcing 100mb ");
} else {
/* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
mii_ctrl_reg |= MII_CR_SPEED_10;
mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
- DEBUGOUT("Forcing 10mb ");
+ e_dbg("Forcing 10mb ");
}
e1000_config_collision_dist(hw);
@@ -1680,7 +1679,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+ e_dbg("M88E1000 PSCR: %x\n", phy_data);
/* Need to reset the PHY or these changes will be ignored */
mii_ctrl_reg |= MII_CR_RESET;
@@ -1720,7 +1719,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
if (hw->wait_autoneg_complete) {
/* We will wait for autoneg to complete. */
- DEBUGOUT("Waiting for forced speed/duplex link.\n");
+ e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -1746,7 +1745,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* We didn't get link. Reset the DSP and wait again for link. */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting PHY DSP\n");
+ e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
@@ -1826,7 +1825,7 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
{
u32 tctl, coll_dist;
- DEBUGFUNC("e1000_config_collision_dist");
+ e_dbg("e1000_config_collision_dist");
if (hw->mac_type < e1000_82543)
coll_dist = E1000_COLLISION_DISTANCE_82542;
@@ -1857,7 +1856,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_config_mac_to_phy");
+ e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
@@ -1913,7 +1912,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
{
u32 ctrl;
- DEBUGFUNC("e1000_force_mac_fc");
+ e_dbg("e1000_force_mac_fc");
/* Get the current configuration of the Device Control Register */
ctrl = er32(CTRL);
@@ -1952,7 +1951,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
@@ -1984,7 +1983,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
u16 speed;
u16 duplex;
- DEBUGFUNC("e1000_config_fc_after_link_up");
+ e_dbg("e1000_config_fc_after_link_up");
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
@@ -1997,7 +1996,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
&& (!hw->autoneg))) {
ret_val = e1000_force_mac_fc(hw);
if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
+ e_dbg("Error forcing flow control settings\n");
return ret_val;
}
}
@@ -2079,10 +2078,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
- DEBUGOUT("Flow Control = FULL.\n");
+ e_dbg("Flow Control = FULL.\n");
} else {
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
}
@@ -2100,7 +2099,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
{
hw->fc = E1000_FC_TX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
@@ -2117,7 +2116,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
{
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
@@ -2144,10 +2143,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
hw->original_fc == E1000_FC_TX_PAUSE) ||
hw->fc_strict_ieee) {
hw->fc = E1000_FC_NONE;
- DEBUGOUT("Flow Control = NONE.\n");
+ e_dbg("Flow Control = NONE.\n");
} else {
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
@@ -2158,7 +2157,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
ret_val =
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error getting link speed and duplex\n");
return ret_val;
}
@@ -2171,12 +2170,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
ret_val = e1000_force_mac_fc(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error forcing flow control settings\n");
return ret_val;
}
} else {
- DEBUGOUT
+ e_dbg
("Copper PHY and Auto Neg has not completed.\n");
}
}
@@ -2197,7 +2196,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
u32 status;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_check_for_serdes_link_generic");
+ e_dbg("e1000_check_for_serdes_link_generic");
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2216,7 +2215,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->autoneg_failed = 1;
goto out;
}
- DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2229,7 +2228,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
/* Configure Flow Control after forcing link up. */
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
+ e_dbg("Error configuring flow control\n");
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -2239,7 +2238,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, hw->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2256,11 +2255,11 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - forced.\n");
+ e_dbg("SERDES: Link up - forced.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - force failed.\n");
+ e_dbg("SERDES: Link down - force failed.\n");
}
}
@@ -2273,20 +2272,20 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - autoneg "
+ e_dbg("SERDES: Link up - autoneg "
"completed successfully.\n");
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - invalid"
+ e_dbg("SERDES: Link down - invalid"
"codewords detected in autoneg.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - no sync.\n");
+ e_dbg("SERDES: Link down - no sync.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ e_dbg("SERDES: Link down - autoneg failed\n");
}
}
@@ -2312,7 +2311,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_for_link");
+ e_dbg("e1000_check_for_link");
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2407,7 +2406,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
else {
ret_val = e1000_config_mac_to_phy(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error configuring MAC to PHY settings\n");
return ret_val;
}
@@ -2419,7 +2418,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
+ e_dbg("Error configuring flow control\n");
return ret_val;
}
@@ -2435,7 +2434,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
ret_val =
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error getting link speed and duplex\n");
return ret_val;
}
@@ -2487,30 +2486,30 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_get_speed_and_duplex");
+ e_dbg("e1000_get_speed_and_duplex");
if (hw->mac_type >= e1000_82543) {
status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000) {
*speed = SPEED_1000;
- DEBUGOUT("1000 Mbs, ");
+ e_dbg("1000 Mbs, ");
} else if (status & E1000_STATUS_SPEED_100) {
*speed = SPEED_100;
- DEBUGOUT("100 Mbs, ");
+ e_dbg("100 Mbs, ");
} else {
*speed = SPEED_10;
- DEBUGOUT("10 Mbs, ");
+ e_dbg("10 Mbs, ");
}
if (status & E1000_STATUS_FD) {
*duplex = FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
+ e_dbg("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
- DEBUGOUT(" Half Duplex\n");
+ e_dbg(" Half Duplex\n");
}
} else {
- DEBUGOUT("1000 Mbs, Full Duplex\n");
+ e_dbg("1000 Mbs, Full Duplex\n");
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
}
@@ -2554,8 +2553,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- DEBUGFUNC("e1000_wait_autoneg");
- DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+ e_dbg("e1000_wait_autoneg");
+ e_dbg("Waiting for Auto-Neg to complete.\n");
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
@@ -2718,7 +2717,7 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
u32 ret_val;
- DEBUGFUNC("e1000_read_phy_reg");
+ e_dbg("e1000_read_phy_reg");
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2741,10 +2740,10 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = 1;
- DEBUGFUNC("e1000_read_phy_reg_ex");
+ e_dbg("e1000_read_phy_reg_ex");
if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
@@ -2767,11 +2766,11 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Read did not complete\n");
+ e_dbg("MDI Read did not complete\n");
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- DEBUGOUT("MDI Error\n");
+ e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
*phy_data = (u16) mdic;
@@ -2820,7 +2819,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
u32 ret_val;
- DEBUGFUNC("e1000_write_phy_reg");
+ e_dbg("e1000_write_phy_reg");
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2843,10 +2842,10 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = 1;
- DEBUGFUNC("e1000_write_phy_reg_ex");
+ e_dbg("e1000_write_phy_reg_ex");
if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
@@ -2870,7 +2869,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Write did not complete\n");
+ e_dbg("MDI Write did not complete\n");
return -E1000_ERR_PHY;
}
} else {
@@ -2910,9 +2909,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- DEBUGFUNC("e1000_phy_hw_reset");
+ e_dbg("e1000_phy_hw_reset");
- DEBUGOUT("Resetting Phy...\n");
+ e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
/* Read the device control register and assert the E1000_CTRL_PHY_RST
@@ -2973,7 +2972,7 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_phy_reset");
+ e_dbg("e1000_phy_reset");
switch (hw->phy_type) {
case e1000_phy_igp:
@@ -3013,7 +3012,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
u16 phy_id_high, phy_id_low;
bool match = false;
- DEBUGFUNC("e1000_detect_gig_phy");
+ e_dbg("e1000_detect_gig_phy");
if (hw->phy_id != 0)
return E1000_SUCCESS;
@@ -3057,16 +3056,16 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
match = true;
break;
default:
- DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+ e_dbg("Invalid MAC type %d\n", hw->mac_type);
return -E1000_ERR_CONFIG;
}
phy_init_status = e1000_set_phy_type(hw);
if ((match) && (phy_init_status == E1000_SUCCESS)) {
- DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+ e_dbg("PHY ID 0x%X detected\n", hw->phy_id);
return E1000_SUCCESS;
}
- DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+ e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id);
return -E1000_ERR_PHY;
}
@@ -3079,7 +3078,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
{
s32 ret_val;
- DEBUGFUNC("e1000_phy_reset_dsp");
+ e_dbg("e1000_phy_reset_dsp");
do {
ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3111,7 +3110,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
u16 phy_data, min_length, max_length, average;
e1000_rev_polarity polarity;
- DEBUGFUNC("e1000_phy_igp_get_info");
+ e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
@@ -3189,7 +3188,7 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
u16 phy_data;
e1000_rev_polarity polarity;
- DEBUGFUNC("e1000_phy_m88_get_info");
+ e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
@@ -3261,7 +3260,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_phy_get_info");
+ e_dbg("e1000_phy_get_info");
phy_info->cable_length = e1000_cable_length_undefined;
phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
@@ -3273,7 +3272,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
phy_info->remote_rx = e1000_1000t_rx_status_undefined;
if (hw->media_type != e1000_media_type_copper) {
- DEBUGOUT("PHY info is only valid for copper media\n");
+ e_dbg("PHY info is only valid for copper media\n");
return -E1000_ERR_CONFIG;
}
@@ -3286,7 +3285,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
return ret_val;
if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
- DEBUGOUT("PHY info is only valid if link is up\n");
+ e_dbg("PHY info is only valid if link is up\n");
return -E1000_ERR_CONFIG;
}
@@ -3298,10 +3297,10 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_validate_mdi_settings");
+ e_dbg("e1000_validate_mdi_settings");
if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
- DEBUGOUT("Invalid MDI setting detected\n");
+ e_dbg("Invalid MDI setting detected\n");
hw->mdix = 1;
return -E1000_ERR_CONFIG;
}
@@ -3322,7 +3321,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 eeprom_size;
- DEBUGFUNC("e1000_init_eeprom_params");
+ e_dbg("e1000_init_eeprom_params");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -3539,7 +3538,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd, i = 0;
- DEBUGFUNC("e1000_acquire_eeprom");
+ e_dbg("e1000_acquire_eeprom");
eecd = er32(EECD);
@@ -3557,7 +3556,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
if (!(eecd & E1000_EECD_GNT)) {
eecd &= ~E1000_EECD_REQ;
ew32(EECD, eecd);
- DEBUGOUT("Could not acquire EEPROM grant\n");
+ e_dbg("Could not acquire EEPROM grant\n");
return -E1000_ERR_EEPROM;
}
}
@@ -3639,7 +3638,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
{
u32 eecd;
- DEBUGFUNC("e1000_release_eeprom");
+ e_dbg("e1000_release_eeprom");
eecd = er32(EECD);
@@ -3687,7 +3686,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
u16 retry_count = 0;
u8 spi_stat_reg;
- DEBUGFUNC("e1000_spi_eeprom_ready");
+ e_dbg("e1000_spi_eeprom_ready");
/* Read "Status Register" repeatedly until the LSB is cleared. The
* EEPROM will signal that the command has been completed by clearing
@@ -3712,7 +3711,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
* only 0-5mSec on 5V devices)
*/
if (retry_count >= EEPROM_MAX_RETRY_SPI) {
- DEBUGOUT("SPI EEPROM Status error\n");
+ e_dbg("SPI EEPROM Status error\n");
return -E1000_ERR_EEPROM;
}
@@ -3741,7 +3740,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 i = 0;
- DEBUGFUNC("e1000_read_eeprom");
+ e_dbg("e1000_read_eeprom");
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
@@ -3752,9 +3751,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
- DEBUGOUT2
- ("\"words\" parameter out of bounds. Words = %d, size = %d\n",
- offset, eeprom->word_size);
+ e_dbg("\"words\" parameter out of bounds. Words = %d,"
+ "size = %d\n", offset, eeprom->word_size);
return -E1000_ERR_EEPROM;
}
@@ -3832,11 +3830,11 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- DEBUGFUNC("e1000_validate_eeprom_checksum");
+ e_dbg("e1000_validate_eeprom_checksum");
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
@@ -3845,7 +3843,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
if (checksum == (u16) EEPROM_SUM)
return E1000_SUCCESS;
else {
- DEBUGOUT("EEPROM Checksum Invalid\n");
+ e_dbg("EEPROM Checksum Invalid\n");
return -E1000_ERR_EEPROM;
}
}
@@ -3862,18 +3860,18 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- DEBUGFUNC("e1000_update_eeprom_checksum");
+ e_dbg("e1000_update_eeprom_checksum");
for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
}
checksum = (u16) EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
- DEBUGOUT("EEPROM Write Error\n");
+ e_dbg("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
}
return E1000_SUCCESS;
@@ -3904,7 +3902,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
s32 status = 0;
- DEBUGFUNC("e1000_write_eeprom");
+ e_dbg("e1000_write_eeprom");
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
@@ -3915,7 +3913,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
- DEBUGOUT("\"words\" parameter out of bounds\n");
+ e_dbg("\"words\" parameter out of bounds\n");
return -E1000_ERR_EEPROM;
}
@@ -3949,7 +3947,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u16 widx = 0;
- DEBUGFUNC("e1000_write_eeprom_spi");
+ e_dbg("e1000_write_eeprom_spi");
while (widx < words) {
u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
@@ -4013,7 +4011,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
u16 words_written = 0;
u16 i = 0;
- DEBUGFUNC("e1000_write_eeprom_microwire");
+ e_dbg("e1000_write_eeprom_microwire");
/* Send the write enable command to the EEPROM (3-bit opcode plus
* 6/8-bit dummy address beginning with 11). It's less work to include
@@ -4056,7 +4054,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
udelay(50);
}
if (i == 200) {
- DEBUGOUT("EEPROM Write did not complete\n");
+ e_dbg("EEPROM Write did not complete\n");
return -E1000_ERR_EEPROM;
}
@@ -4092,12 +4090,12 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
u16 offset;
u16 eeprom_data, i;
- DEBUGFUNC("e1000_read_mac_addr");
+ e_dbg("e1000_read_mac_addr");
for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
offset = i >> 1;
if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
@@ -4132,17 +4130,17 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
u32 i;
u32 rar_num;
- DEBUGFUNC("e1000_init_rx_addrs");
+ e_dbg("e1000_init_rx_addrs");
/* Setup the receive address. */
- DEBUGOUT("Programming MAC Address into RAR[0]\n");
+ e_dbg("Programming MAC Address into RAR[0]\n");
e1000_rar_set(hw, hw->mac_addr, 0);
rar_num = E1000_RAR_ENTRIES;
/* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
+ e_dbg("Clearing RAR[1-15]\n");
for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH();
@@ -4290,7 +4288,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
u16 eeprom_data, i, temp;
const u16 led_mask = 0x0F;
- DEBUGFUNC("e1000_id_led_init");
+ e_dbg("e1000_id_led_init");
if (hw->mac_type < e1000_82540) {
/* Nothing to do */
@@ -4303,7 +4301,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
hw->ledctl_mode2 = hw->ledctl_default;
if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
@@ -4363,7 +4361,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
u32 ledctl;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_setup_led");
+ e_dbg("e1000_setup_led");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4415,7 +4413,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_cleanup_led");
+ e_dbg("e1000_cleanup_led");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4451,7 +4449,7 @@ s32 e1000_led_on(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- DEBUGFUNC("e1000_led_on");
+ e_dbg("e1000_led_on");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4497,7 +4495,7 @@ s32 e1000_led_off(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- DEBUGFUNC("e1000_led_off");
+ e_dbg("e1000_led_off");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4626,7 +4624,7 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
*/
void e1000_reset_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_reset_adaptive");
+ e_dbg("e1000_reset_adaptive");
if (hw->adaptive_ifs) {
if (!hw->ifs_params_forced) {
@@ -4639,7 +4637,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
hw->in_ifs_mode = false;
ew32(AIT, 0);
} else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
+ e_dbg("Not in Adaptive IFS mode!\n");
}
}
@@ -4654,7 +4652,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
*/
void e1000_update_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_update_adaptive");
+ e_dbg("e1000_update_adaptive");
if (hw->adaptive_ifs) {
if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
@@ -4679,7 +4677,7 @@ void e1000_update_adaptive(struct e1000_hw *hw)
}
}
} else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
+ e_dbg("Not in Adaptive IFS mode!\n");
}
}
@@ -4851,7 +4849,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
u16 i, phy_data;
u16 cable_length;
- DEBUGFUNC("e1000_get_cable_length");
+ e_dbg("e1000_get_cable_length");
*min_length = *max_length = 0;
@@ -4968,7 +4966,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_polarity");
+ e_dbg("e1000_check_polarity");
if (hw->phy_type == e1000_phy_m88) {
/* return the Polarity bit in the Status register. */
@@ -5034,7 +5032,7 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_downshift");
+ e_dbg("e1000_check_downshift");
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
@@ -5081,7 +5079,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
};
u16 min_length, max_length;
- DEBUGFUNC("e1000_config_dsp_after_link_change");
+ e_dbg("e1000_config_dsp_after_link_change");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5089,7 +5087,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if (link_up) {
ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT("Error getting link speed and duplex\n");
+ e_dbg("Error getting link speed and duplex\n");
return ret_val;
}
@@ -5289,7 +5287,7 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- DEBUGFUNC("e1000_set_phy_mode");
+ e_dbg("e1000_set_phy_mode");
if ((hw->mac_type == e1000_82545_rev_3) &&
(hw->media_type == e1000_media_type_copper)) {
@@ -5337,7 +5335,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
{
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_set_d3_lplu_state");
+ e_dbg("e1000_set_d3_lplu_state");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5440,7 +5438,7 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
u16 default_page = 0;
u16 phy_data;
- DEBUGFUNC("e1000_set_vco_speed");
+ e_dbg("e1000_set_vco_speed");
switch (hw->mac_type) {
case e1000_82545_rev_3:
@@ -5613,7 +5611,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
*/
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_get_auto_rd_done");
+ e_dbg("e1000_get_auto_rd_done");
msleep(5);
return E1000_SUCCESS;
}
@@ -5628,7 +5626,7 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
*/
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_get_phy_cfg_done");
+ e_dbg("e1000_get_phy_cfg_done");
mdelay(10);
return E1000_SUCCESS;
}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 9acfddb0dafb..ecd9f6c6bcd5 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,6 +35,7 @@
#include "e1000_osdep.h"
+
/* Forward declarations of structures used by the shared code */
struct e1000_hw;
struct e1000_hw_stats;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b15ece26ed84..4dd2c23775cb 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k5-NAPI"
+#define DRV_VERSION "7.3.21-k6-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -214,6 +214,17 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
+ * e1000_get_hw_dev - return device
+ * used by hardware layer to print debugging information
+ *
+ **/
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ return adapter->netdev;
+}
+
+/**
* e1000_init_module - Driver Registration Routine
*
* e1000_init_module is the first routine called when the driver is
@@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
- e1000_driver_string, e1000_driver_version);
+ pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
- printk(KERN_INFO "%s\n", e1000_copyright);
+ pr_info("%s\n", e1000_copyright);
ret = pci_register_driver(&e1000_driver);
if (copybreak != COPYBREAK_DEFAULT) {
if (copybreak == 0)
- printk(KERN_INFO "e1000: copybreak disabled\n");
+ pr_info("copybreak disabled\n");
else
- printk(KERN_INFO "e1000: copybreak enabled for "
- "packets <= %u bytes\n", copybreak);
+ pr_info("copybreak enabled for "
+ "packets <= %u bytes\n", copybreak);
}
return ret;
}
@@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
+ e_err("Unable to allocate interrupt Error: %d\n", err);
}
return err;
@@ -648,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(WUC, 0);
if (e1000_init_hw(hw))
- DPRINTK(PROBE, ERR, "Hardware Error\n");
+ e_err("Hardware Error\n");
e1000_update_mng_vlan(adapter);
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -689,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
data = kmalloc(eeprom.len, GFP_KERNEL);
if (!data) {
- printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
- " data\n");
+ pr_err("Unable to allocate memory to dump EEPROM data\n");
return;
}
@@ -702,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
csum_new += data[i] + (data[i + 1] << 8);
csum_new = EEPROM_SUM - csum_new;
- printk(KERN_ERR "/*********************/\n");
- printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
- printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
+ pr_err("/*********************/\n");
+ pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
+ pr_err("Calculated : 0x%04x\n", csum_new);
- printk(KERN_ERR "Offset Values\n");
- printk(KERN_ERR "======== ======\n");
+ pr_err("Offset Values\n");
+ pr_err("======== ======\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
- printk(KERN_ERR "Include this output when contacting your support "
- "provider.\n");
- printk(KERN_ERR "This is not a software error! Something bad "
- "happened to your hardware or\n");
- printk(KERN_ERR "EEPROM image. Ignoring this "
- "problem could result in further problems,\n");
- printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
- printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
- "which is invalid\n");
- printk(KERN_ERR "and requires you to set the proper MAC "
- "address manually before continuing\n");
- printk(KERN_ERR "to enable this network device.\n");
- printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
- "to your hardware vendor\n");
- printk(KERN_ERR "or Intel Customer Support.\n");
- printk(KERN_ERR "/*********************/\n");
+ pr_err("Include this output when contacting your support provider.\n");
+ pr_err("This is not a software error! Something bad happened to\n");
+ pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
+ pr_err("result in further problems, possibly loss of data,\n");
+ pr_err("corruption or system hangs!\n");
+ pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
+ pr_err("which is invalid and requires you to set the proper MAC\n");
+ pr_err("address manually before continuing to enable this network\n");
+ pr_err("device. Please inspect the EEPROM dump and report the\n");
+ pr_err("issue to your hardware vendor or Intel Customer Support.\n");
+ pr_err("/*********************/\n");
kfree(data);
}
@@ -823,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
- E1000_ERR("No usable DMA configuration, "
- "aborting\n");
+ pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
}
@@ -922,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* initialize eeprom parameters */
if (e1000_init_eeprom_params(hw)) {
- E1000_ERR("EEPROM initialization failed\n");
+ e_err("EEPROM initialization failed\n");
goto err_eeprom;
}
@@ -933,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
if (e1000_validate_eeprom_checksum(hw) < 0) {
- DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ e_err("The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
/*
* set MAC address to all zeroes to invalidate and temporary
@@ -947,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
} else {
/* copy the MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw))
- DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+ e_err("EEPROM Read Error\n");
}
/* don't block initalization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr))
- DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ e_err("Invalid MAC Address\n");
e1000_get_bus_info(hw);
@@ -1035,8 +1038,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* reset the hardware with the new settings */
+ e1000_reset(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
/* print bus type/speed/width info */
- DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+ e_info("(PCI%s:%s:%s) ",
((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
(hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
@@ -1044,20 +1055,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
(hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
- printk("%pM\n", netdev->dev_addr);
-
- /* reset the hardware with the new settings */
- e1000_reset(adapter);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err)
- goto err_register;
+ e_info("%pM\n", netdev->dev_addr);
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+ e_info("Intel(R) PRO/1000 Network Connection\n");
cards_found++;
return 0;
@@ -1157,7 +1160,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
/* identify the MAC */
if (e1000_set_mac_type(hw)) {
- DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+ e_err("Unknown MAC Type\n");
return -EIO;
}
@@ -1190,7 +1193,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->num_rx_queues = 1;
if (e1000_alloc_queues(adapter)) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ e_err("Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -1384,8 +1387,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -1395,12 +1397,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -1408,29 +1410,32 @@ setup_tx_desc_die:
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
void *olddesc = txdr->desc;
dma_addr_t olddma = txdr->dma;
- DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
- "at %p\n", txdr->size, txdr->desc);
+ e_err("txdr align check failed: %u bytes at %p\n",
+ txdr->size, txdr->desc);
/* Try again, without freeing the previous */
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
+ &txdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */
if (!txdr->desc) {
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
goto setup_tx_desc_die;
}
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
/* give up */
- pci_free_consistent(pdev, txdr->size, txdr->desc,
- txdr->dma);
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate aligned memory "
- "for the transmit descriptor ring\n");
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate aligned memory "
+ "for the transmit descriptor ring\n");
vfree(txdr->buffer_info);
return -ENOMEM;
} else {
/* Free old allocation, new allocation was successful */
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
}
}
memset(txdr->desc, 0, txdr->size);
@@ -1456,8 +1461,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
if (err) {
- DPRINTK(PROBE, ERR,
- "Allocation for Tx Queue %u failed\n", i);
+ e_err("Allocation for Tx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_tx_resources(adapter,
&adapter->tx_ring[i]);
@@ -1577,8 +1581,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
+ e_err("Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -1590,11 +1593,11 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->size = rxdr->count * desc_len;
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
+ e_err("Unable to allocate memory for the Rx descriptor ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1604,31 +1607,33 @@ setup_rx_desc_die:
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
void *olddesc = rxdr->desc;
dma_addr_t olddma = rxdr->dma;
- DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
- "at %p\n", rxdr->size, rxdr->desc);
+ e_err("rxdr align check failed: %u bytes at %p\n",
+ rxdr->size, rxdr->desc);
/* Try again, without freeing the previous */
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
+ &rxdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */
if (!rxdr->desc) {
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory "
- "for the receive descriptor ring\n");
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate memory for the Rx descriptor "
+ "ring\n");
goto setup_rx_desc_die;
}
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
/* give up */
- pci_free_consistent(pdev, rxdr->size, rxdr->desc,
- rxdr->dma);
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate aligned memory "
- "for the receive descriptor ring\n");
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate aligned memory for the Rx "
+ "descriptor ring\n");
goto setup_rx_desc_die;
} else {
/* Free old allocation, new allocation was successful */
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
}
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1655,8 +1660,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
if (err) {
- DPRINTK(PROBE, ERR,
- "Allocation for Rx Queue %u failed\n", i);
+ e_err("Allocation for Rx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_rx_resources(adapter,
&adapter->rx_ring[i]);
@@ -1804,7 +1808,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -1829,12 +1834,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -1912,7 +1917,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1952,14 +1958,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_rx_irq) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
@@ -2098,7 +2104,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
bool use_uc = false;
- struct dev_addr_list *mc_ptr;
u32 rctl;
u32 hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
@@ -2106,7 +2111,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
if (!mcarray) {
- DPRINTK(PROBE, ERR, "memory allocation failed\n");
+ e_err("memory allocation failed\n");
return;
}
@@ -2158,17 +2163,17 @@ static void e1000_set_rx_mode(struct net_device *netdev)
WARN_ON(i == rar_entries);
- netdev_for_each_mc_addr(mc_ptr, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == rar_entries) {
/* load any remaining addresses into the hash table */
u32 hash_reg, hash_bit, mta;
- hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+ hash_value = e1000_hash_mc_addr(hw, ha->addr);
hash_reg = (hash_value >> 5) & 0x7F;
hash_bit = hash_value & 0x1F;
mta = (1 << hash_bit);
mcarray[hash_reg] |= mta;
} else {
- e1000_rar_set(hw, mc_ptr->da_addr, i++);
+ e1000_rar_set(hw, ha->addr, i++);
}
}
@@ -2302,16 +2307,16 @@ static void e1000_watchdog(unsigned long data)
&adapter->link_duplex);
ctrl = er32(CTRL);
- printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl &
- E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
- E1000_CTRL_RFCE) ? "RX" : ((ctrl &
- E1000_CTRL_TFCE) ? "TX" : "None" )));
+ pr_info("%s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+ E1000_CTRL_TFCE) ? "TX" : "None")));
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -2341,8 +2346,8 @@ static void e1000_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO "e1000: %s NIC Link is Down\n",
- netdev->name);
+ pr_info("%s NIC Link is Down\n",
+ netdev->name);
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -2381,6 +2386,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
+ u32 dif = (adapter->gotcl > adapter->gorcl ?
+ adapter->gotcl - adapter->gorcl :
+ adapter->gorcl - adapter->gotcl) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure rx ring is cleaned */
ew32(ICS, E1000_ICS_RXDMT0);
@@ -2632,8 +2653,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit()))
- DPRINTK(DRV, WARNING,
- "checksum_partial proto=%x!\n", skb->protocol);
+ e_warn("checksum_partial proto=%x!\n", skb->protocol);
break;
}
@@ -2715,9 +2735,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2761,10 +2782,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2930,7 +2951,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -2976,12 +2997,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- DPRINTK(DRV, ERR,
- "__pskb_pull_tail failed.\n");
+ e_err("__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
break;
default:
/* do nothing */
@@ -3125,7 +3145,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+ e_err("Invalid MTU setting\n");
return -EINVAL;
}
@@ -3133,7 +3153,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
switch (hw->mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+ e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
break;
@@ -3171,8 +3191,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n",
- netdev->name, netdev->mtu, new_mtu);
+ pr_info("%s changing MTU from %d to %d\n",
+ netdev->name, netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -3485,17 +3505,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " Tx Queue <%lu>\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
+ e_err("Detected Tx Unit Hang\n"
+ " Tx Queue <%lu>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
(unsigned long)((tx_ring - adapter->tx_ring) /
sizeof(struct e1000_tx_ring)),
readl(hw->hw_addr + tx_ring->tdh),
@@ -3635,8 +3655,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -3734,7 +3754,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
- DPRINTK(DRV, ERR, "pskb_may_pull failed.\n");
+ e_err("pskb_may_pull failed.\n");
dev_kfree_skb(skb);
goto next_desc;
}
@@ -3818,8 +3838,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma, buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -3834,8 +3854,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (adapter->discarding) {
/* All receives must fit into a single buffer */
- E1000_DBG("%s: Receive packet consumed multiple"
- " buffers\n", netdev->name);
+ e_info("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
if (status & E1000_RXD_STAT_EOP)
@@ -3965,8 +3984,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
- "at %p\n", bufsz, skb->data);
+ e_err("skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -3999,11 +4018,11 @@ check_page:
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
put_page(buffer_info->page);
dev_kfree_skb(skb);
buffer_info->page = NULL;
@@ -4074,8 +4093,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
- "at %p\n", bufsz, skb->data);
+ e_err("skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -4099,11 +4118,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->dma = 0;
@@ -4120,16 +4139,15 @@ map_skb:
if (!e1000_check_64k_bound(adapter,
(void *)(unsigned long)buffer_info->dma,
adapter->rx_buffer_len)) {
- DPRINTK(RX_ERR, ERR,
- "dma align check failed: %u bytes at %p\n",
- adapter->rx_buffer_len,
- (void *)(unsigned long)buffer_info->dma);
+ e_err("dma align check failed: %u bytes at %p\n",
+ adapter->rx_buffer_len,
+ (void *)(unsigned long)buffer_info->dma);
dev_kfree_skb(skb);
buffer_info->skb = NULL;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
@@ -4335,7 +4353,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
int ret_val = pci_set_mwi(adapter->pdev);
if (ret_val)
- DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+ e_err("Error in setting MWI\n");
}
void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4466,7 +4484,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((hw->media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
@@ -4489,7 +4507,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
return 0;
@@ -4612,7 +4630,7 @@ static int e1000_resume(struct pci_dev *pdev)
else
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+ pr_err("Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -4715,7 +4733,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
else
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+ pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -4746,7 +4764,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (e1000_up(adapter)) {
- printk("e1000: can't bring device back up after reset\n");
+ pr_info("can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index d9298522f5ae..edd1c75aa895 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -41,20 +41,6 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
-#ifdef DBG
-#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#define DEBUGFUNC(F) DEBUGOUT(F "\n")
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
-
-
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg)))
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 38d2741ccae9..9fbb562dc964 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -226,17 +226,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ e_dev_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ e_dev_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- DPRINTK(PROBE, INFO,
- "%s set to %i\n", opt->name, *value);
+ e_dev_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -248,7 +247,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ e_dev_info("%s\n", ent->str);
return 0;
}
}
@@ -258,7 +257,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+ e_dev_info("Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
@@ -283,9 +282,8 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- DPRINTK(PROBE, NOTICE,
- "Warning: no configuration for board #%i\n", bd);
- DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ e_dev_warn("Warning: no configuration for board #%i "
+ "using defaults for all values\n", bd);
}
{ /* Transmit Descriptor Count */
@@ -472,27 +470,31 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
+ e_dev_info("%s turned off\n", opt.name);
break;
case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
+ e_dev_info("%s set to dynamic mode\n",
+ opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
- DPRINTK(PROBE, INFO,
- "%s set to dynamic conservative mode\n",
- opt.name);
+ e_dev_info("%s set to dynamic conservative "
+ "mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_dev_info("%s set to simplified "
+ "(2000-8000) ints mode\n", opt.name);
+ adapter->itr_setting = adapter->itr;
+ break;
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
- /* save the setting, because the dynamic bits change itr */
- /* clear the lower two bits because they are
+ /* save the setting, because the dynamic bits
+ * change itr.
+ * clear the lower two bits because they are
* used as control */
adapter->itr_setting = adapter->itr & ~3;
break;
@@ -543,19 +545,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
- DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("Speed not valid for fiber adapters, parameter "
+ "ignored\n");
}
if (num_Duplex > bd) {
- DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("Duplex not valid for fiber adapters, parameter "
+ "ignored\n");
}
if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
- DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
- "not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
+ "adapters, parameter ignored\n");
}
}
@@ -619,9 +620,8 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
}
if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
- DPRINTK(PROBE, INFO,
- "AutoNeg specified along with Speed or Duplex, "
- "parameter ignored\n");
+ e_dev_info("AutoNeg specified along with Speed or Duplex, "
+ "parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
static const struct e1000_opt_list an_list[] =
@@ -680,79 +680,72 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
if ((num_Speed > bd) && (speed != 0 || dplx != 0))
- DPRINTK(PROBE, INFO,
- "Speed and duplex autonegotiation enabled\n");
+ e_dev_info("Speed and duplex autonegotiation "
+ "enabled\n");
break;
case HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "Half Duplex only\n");
+ e_dev_info("Half Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "Full Duplex only\n");
+ e_dev_info("Full Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
- DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
- "without Duplex\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+ e_dev_info("10 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+ e_dev_info("Forcing to 10 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_10 + FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+ e_dev_info("Forcing to 10 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
- DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
- "without Duplex\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "100 Mbps only\n");
+ e_dev_info("100 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+ e_dev_info("Forcing to 100 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100 + FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+ e_dev_info("Forcing to 100 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
- DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
- "Duplex\n");
+ e_dev_info("1000 Mbps Speed specified without Duplex\n");
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO,
- "Half Duplex is not supported at 1000 Mbps\n");
+ e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
/* fall through */
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
- DPRINTK(PROBE, INFO,
- "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
+ "only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
@@ -762,9 +755,8 @@ full_duplex_only:
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
- DPRINTK(PROBE, INFO,
- "Speed, AutoNeg and MDI-X specifications are "
- "incompatible. Setting MDI-X to a compatible value.\n");
+ e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
+ "Setting MDI-X to a compatible value.\n");
}
}
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 90155552ea09..1e73eddee24a 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -323,7 +323,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
/*
- * Initialze device specific counter of SMBI acquisition
+ * Initialize device specific counter of SMBI acquisition
* timeouts.
*/
hw->dev_spec.e82571.smb_counter = 0;
@@ -1845,7 +1845,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
@@ -1862,7 +1862,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e301e26d6897..7f760aa9efe5 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -214,6 +214,8 @@
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ee32b9b27a9f..c0b3db40bd73 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -43,25 +43,16 @@
struct e1000_info;
-#define e_printk(level, adapter, format, arg...) \
- printk(level "%s: %s: " format, pci_name(adapter->pdev), \
- adapter->netdev->name, ## arg)
-
-#ifdef DEBUG
#define e_dbg(format, arg...) \
- e_printk(KERN_DEBUG , hw->adapter, format, ## arg)
-#else
-#define e_dbg(format, arg...) do { (void)(hw); } while (0)
-#endif
-
+ netdev_dbg(hw->adapter->netdev, format, ## arg)
#define e_err(format, arg...) \
- e_printk(KERN_ERR, adapter, format, ## arg)
+ netdev_err(adapter->netdev, format, ## arg)
#define e_info(format, arg...) \
- e_printk(KERN_INFO, adapter, format, ## arg)
+ netdev_info(adapter->netdev, format, ## arg)
#define e_warn(format, arg...) \
- e_printk(KERN_WARNING, adapter, format, ## arg)
+ netdev_warn(adapter->netdev, format, ## arg)
#define e_notice(format, arg...) \
- e_printk(KERN_NOTICE, adapter, format, ## arg)
+ netdev_notice(adapter->netdev, format, ## arg)
/* Interrupt modes, as used by the IntMode parameter */
@@ -159,6 +150,9 @@ struct e1000_info;
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT 100
+
enum e1000_boards {
board_82571,
board_82572,
@@ -195,6 +189,8 @@ struct e1000_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
u16 mapped_as_page;
};
/* Rx */
@@ -370,6 +366,8 @@ struct e1000_adapter {
struct work_struct update_phy_task;
struct work_struct led_blink_task;
struct work_struct print_hang_task;
+
+ bool idle_check;
};
struct e1000_info {
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 983493f2330c..6ff376cfe139 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -412,7 +412,6 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
}
- e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->flags |= FLAG_TSO_FORCE;
return 0;
}
@@ -1069,10 +1068,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
if (tx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
tx_ring->buffer_info[i].dma,
tx_ring->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (tx_ring->buffer_info[i].skb)
dev_kfree_skb(tx_ring->buffer_info[i].skb);
}
@@ -1081,9 +1080,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
if (rx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
rx_ring->buffer_info[i].dma,
- 2048, PCI_DMA_FROMDEVICE);
+ 2048, DMA_FROM_DEVICE);
if (rx_ring->buffer_info[i].skb)
dev_kfree_skb(rx_ring->buffer_info[i].skb);
}
@@ -1163,9 +1162,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1226,9 +1226,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, 2048,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
@@ -1556,10 +1557,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->buffer_info[k].dma,
tx_ring->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1569,9 +1570,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->buffer_info[l].dma, 2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rx_ring->buffer_info[l].skb, 1024);
@@ -1889,7 +1890,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1904,12 +1905,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b5e157e9c87..b8c4dce01a04 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -83,6 +83,8 @@
#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
#define E1000_ICH_MNG_IAMT_MODE 0x2
@@ -259,6 +261,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
+ u32 ctrl;
s32 ret_val = 0;
phy->addr = 1;
@@ -274,6 +277,33 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. Toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode, but only if there is no
+ * firmware present otherwise firmware will have done it.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, ctrl);
+ udelay(10);
+ ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, ctrl);
+ msleep(50);
+ }
+
+ /*
+ * Reset the PHY before any acccess to it. Doing so, ensures that
+ * the PHY is in a known good state before we read/write PHY registers.
+ * The generic reset is sufficient here, because we haven't determined
+ * the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
phy->id = e1000_phy_unknown;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
@@ -1622,7 +1652,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Check if the flash descriptor is valid */
if (hsfsts.hsf_status.fldesvalid == 0) {
e_dbg("Flash descriptor invalid. "
- "SW Sequencing must be used.");
+ "SW Sequencing must be used.\n");
return -E1000_ERR_NVM;
}
@@ -1671,7 +1701,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
hsfsts.hsf_status.flcdone = 1;
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
- e_dbg("Flash controller busy, cannot get access");
+ e_dbg("Flash controller busy, cannot get access\n");
}
}
@@ -1822,7 +1852,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
continue;
} else if (hsfsts.hsf_status.flcdone == 0) {
e_dbg("Timeout error - flash cycle "
- "did not complete.");
+ "did not complete.\n");
break;
}
}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a8b2c0de27c4..b0d2a60aa490 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1262,24 +1262,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
u32 status;
status = er32(STATUS);
- if (status & E1000_STATUS_SPEED_1000) {
+ if (status & E1000_STATUS_SPEED_1000)
*speed = SPEED_1000;
- e_dbg("1000 Mbs, ");
- } else if (status & E1000_STATUS_SPEED_100) {
+ else if (status & E1000_STATUS_SPEED_100)
*speed = SPEED_100;
- e_dbg("100 Mbs, ");
- } else {
+ else
*speed = SPEED_10;
- e_dbg("10 Mbs, ");
- }
- if (status & E1000_STATUS_FD) {
+ if (status & E1000_STATUS_FD)
*duplex = FULL_DUPLEX;
- e_dbg("Full Duplex\n");
- } else {
+ else
*duplex = HALF_DUPLEX;
- e_dbg("Half Duplex\n");
- }
+
+ e_dbg("%u Mbps, %s Duplex\n",
+ *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+ *duplex == FULL_DUPLEX ? "Full" : "Half");
return 0;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index dbf81788bb40..dfddc60c6f66 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -45,11 +47,12 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos_params.h>
+#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include "e1000.h"
-#define DRV_VERSION "1.0.2-k2"
+#define DRV_VERSION "1.0.2-k4"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -66,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pchlan] = &e1000_pch_info,
};
+struct e1000_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN, "RDLEN"},
+ {E1000_RDH, "RDH"},
+ {E1000_RDT, "RDT"},
+ {E1000_RDTR, "RDTR"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_ERT, "ERT"},
+ {E1000_RDBAL, "RDBAL"},
+ {E1000_RDBAH, "RDBAH"},
+ {E1000_RDFH, "RDFH"},
+ {E1000_RDFT, "RDFT"},
+ {E1000_RDFHS, "RDFHS"},
+ {E1000_RDFTS, "RDFTS"},
+ {E1000_RDFPC, "RDFPC"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL, "TDBAL"},
+ {E1000_TDBAH, "TDBAH"},
+ {E1000_TDLEN, "TDLEN"},
+ {E1000_TDH, "TDH"},
+ {E1000_TDT, "TDT"},
+ {E1000_TIDV, "TIDV"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TADV, "TADV"},
+ {E1000_TARC(0), "TARC"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFTS, "TDFTS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_RXDCTL(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TXDCTL(n));
+ break;
+ case E1000_TARC(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TARC(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 2; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+
+/*
+ * e1000e_dump - Print registers, tx-ring and rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_reg_info *reginfo;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_packet_split *rx_desc_ps;
+ struct e1000_rx_desc *rx_desc;
+ struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ e1000_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
+ printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
+ printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+ le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->length,
+ buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ printk(KERN_INFO " %5d %5X %5X\n", 0,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ switch (adapter->rx_ps_pages) {
+ case 1:
+ case 2:
+ case 3:
+ /* [Extended] Packet Split Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address 0 [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Buffer Address 1 [63:0] |
+ * +-----------------------------------------------------+
+ * 16 | Buffer Address 2 [63:0] |
+ * +-----------------------------------------------------+
+ * 24 | Buffer Address 3 [63:0] |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
+ "[buffer 1 63:0 ] "
+ "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext Pkt Split format\n");
+ /* [Extended] Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 13 12 8 7 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
+ * | Checksum | Ident | | Queue | | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
+ "[vl l0 ee es] "
+ "[ l3 l2 l1 hs] [reserved ] ---------------- "
+ "[bi->skb] <-- Ext Rx Write-Back format\n");
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc_ps;
+ staterr =
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %016llX %016llX %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_ps_bsize0, true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ break;
+ default:
+ case 0:
+ /* Legacy Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
+ * +-----------------------------------------------------+
+ * 63 48 47 40 39 32 31 16 15 0
+ */
+ printk(KERN_INFO "Rl[desc] [address 63:0 ] "
+ "[vl er S cks ln] [bi->dma ] [bi->skb] "
+ "<-- Legacy format\n");
+ for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)rx_desc;
+ printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
+ "%016llX %p",
+ i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->skb);
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ adapter->rx_buffer_len, true);
+ }
+ }
+
+exit:
+ return;
+}
+
/**
* e1000_desc_unused - calculate if we have unused descriptors
**/
@@ -178,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
map_skb:
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -190,26 +548,23 @@ map_skb:
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
i++;
if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -247,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- ps_page->dma = pci_map_page(pdev,
- ps_page->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, ps_page->dma)) {
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -276,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -290,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ }
+
i++;
if (i == rx_ring->count)
i = 0;
@@ -297,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
-
- if (!(i--))
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- /*
- * Hardware increments by 16 bytes, but packet split
- * descriptors are 32 bytes...so we increment tail
- * twice as much.
- */
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -366,10 +714,10 @@ check_page:
}
if (!buffer_info->dma)
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -443,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -547,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -643,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
cleaned = (i == eop);
if (cleaned) {
- struct sk_buff *skb = buffer_info->skb;
- unsigned int segs, bytecount;
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_tx_packets += segs;
- total_tx_bytes += bytecount;
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
}
e1000_put_txbuf(adapter, buffer_info);
@@ -753,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* see !EOP comment in other rx routine */
@@ -811,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
* kmap_atomic, so we can't hold the mapping
* very long
*/
- pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, l1);
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
- pci_dma_sync_single_for_device(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
/* remove the CRC */
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -834,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
break;
ps_page = &buffer_info->ps_pages[j];
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
skb_fill_page_desc(skb, j, ps_page->page, 0, length);
ps_page->page = NULL;
@@ -953,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1090,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->clean_rx == e1000_clean_rx_irq)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
- pci_unmap_page(pdev, buffer_info->dma,
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -1118,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
put_page(ps_page->page);
ps_page->page = NULL;
@@ -2524,12 +2865,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* excessive C-state transition latencies result in
* dropped transactions.
*/
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name, 55);
+ pm_qos_update_request(
+ adapter->netdev->pm_qos_req, 55);
} else {
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(
+ adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
}
@@ -2565,7 +2906,7 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
int i;
@@ -2597,9 +2938,8 @@ static void e1000_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN),
- mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -2824,8 +3164,8 @@ int e1000e_up(struct e1000_adapter *adapter)
/* DMA latency requirement to workaround early-receive/jumbo issue */
if (adapter->flags & FLAG_HAS_ERT)
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name,
+ adapter->netdev->pm_qos_req =
+ pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
/* hardware has been reset, we need to reload some things */
@@ -2887,9 +3227,11 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
- if (adapter->flags & FLAG_HAS_ERT)
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name);
+ if (adapter->flags & FLAG_HAS_ERT) {
+ pm_qos_remove_request(
+ adapter->netdev->pm_qos_req);
+ adapter->netdev->pm_qos_req = NULL;
+ }
/*
* TODO: for power management, we could drop the link and
@@ -3083,12 +3425,15 @@ static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
int err;
/* disallow open during test */
if (test_bit(__E1000_TESTING, &adapter->state))
return -EBUSY;
+ pm_runtime_get_sync(&pdev->dev);
+
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -3149,6 +3494,9 @@ static int e1000_open(struct net_device *netdev)
netif_start_queue(netdev);
+ adapter->idle_check = true;
+ pm_runtime_put(&pdev->dev);
+
/* fire a link status change interrupt to start the watchdog */
ew32(ICS, E1000_ICS_LSC);
@@ -3162,6 +3510,7 @@ err_setup_rx:
e1000e_free_tx_resources(adapter);
err_setup_tx:
e1000e_reset(adapter);
+ pm_runtime_put_sync(&pdev->dev);
return err;
}
@@ -3180,11 +3529,17 @@ err_setup_tx:
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
e1000_power_down_phy(adapter);
- e1000_free_irq(adapter);
e1000e_free_tx_resources(adapter);
e1000e_free_rx_resources(adapter);
@@ -3206,6 +3561,8 @@ static int e1000_close(struct net_device *netdev)
if (adapter->flags & FLAG_HAS_AMT)
e1000_release_hw_control(adapter);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
/**
@@ -3550,6 +3907,9 @@ static void e1000_watchdog_task(struct work_struct *work)
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
e1000e_enable_receives(adapter);
goto link_up;
}
@@ -3561,6 +3921,10 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
bool txb2b = 1;
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
@@ -3670,6 +4034,9 @@ static void e1000_watchdog_task(struct work_struct *work)
if (adapter->flags & FLAG_RX_NEEDS_RESTART)
schedule_work(&adapter->reset_task);
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+ LINK_TIMEOUT);
}
}
@@ -3705,6 +4072,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+ u32 dif = (adapter->gotc > adapter->gorc ?
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->msix_entries)
ew32(ICS, adapter->rx_ring->ims_val);
@@ -3879,7 +4262,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
- unsigned int f;
+ unsigned int f, bytecount, segs;
i = tx_ring->next_to_use;
@@ -3890,10 +4273,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3925,11 +4309,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3938,7 +4322,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
tx_ring->buffer_info[first].next_to_watch = i;
return count;
@@ -4105,7 +4495,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int max_per_txd = E1000_MAX_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -4155,7 +4545,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
}
}
@@ -4241,6 +4631,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
e1000e_reinit_locked(adapter);
}
@@ -4475,13 +4867,15 @@ out:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status;
- u32 wufc = adapter->wol;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
int retval = 0;
netif_device_detach(netdev);
@@ -4651,20 +5045,13 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state);
}
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_OPS
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
{
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);
-
- return retval;
+ return !!adapter->tx_ring->buffer_info;
}
-static int e1000_resume(struct pci_dev *pdev)
+static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4677,18 +5064,6 @@ static int e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "Cannot enable PCI device from suspend\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
err = e1000_request_irq(adapter);
@@ -4746,13 +5121,88 @@ static int e1000_resume(struct pci_dev *pdev)
return 0;
}
-#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake, false);
+ if (!retval)
+ e1000_complete_shutdown(pdev, true, wake);
+
+ return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter))
+ adapter->idle_check = true;
+
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter)) {
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake, true);
+ }
+
+ return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ if (adapter->idle_check) {
+ adapter->idle_check = false;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC);
+ }
+
+ return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ adapter->idle_check = !dev->power.runtime_auto;
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM_OPS */
static void e1000_shutdown(struct pci_dev *pdev)
{
bool wake = false;
- __e1000_shutdown(pdev, &wake);
+ __e1000_shutdown(pdev, &wake, false);
if (system_state == SYSTEM_POWER_OFF)
e1000_complete_shutdown(pdev, false, wake);
@@ -4826,8 +5276,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
+ pdev->state_saved = true;
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4968,16 +5418,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -5008,6 +5458,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->irq = pdev->irq;
+
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
hw = &adapter->hw;
@@ -5228,6 +5680,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_print_device_info(adapter);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+
return 0;
err_register:
@@ -5270,12 +5728,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+ pm_runtime_get_sync(&pdev->dev);
/*
* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled
*/
- set_bit(__E1000_DOWN, &adapter->state);
+ if (!down)
+ set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -5289,8 +5751,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
+ /* Don't lie to e1000_close() down the road. */
+ if (!down)
+ clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/*
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -5390,16 +5861,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+#ifdef CONFIG_PM_OPS
+static const struct dev_pm_ops e1000_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+ e1000_runtime_resume, e1000_idle)
+};
+#endif
+
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
.name = e1000e_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = e1000_suspend,
- .resume = e1000_resume,
+#ifdef CONFIG_PM_OPS
+ .driver.pm = &e1000_pm_ops,
#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
@@ -5414,10 +5891,9 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
- e1000e_driver_name, e1000e_driver_version);
- printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
- e1000e_driver_name);
+ pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+ e1000e_driver_version);
+ pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 2e399778cae5..0f4077c3d538 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -248,7 +248,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
{ /* Transmit Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of "
@@ -267,7 +267,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Transmit Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of "
@@ -305,7 +305,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
@@ -324,7 +324,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Throttling Rate */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of "
@@ -351,6 +351,11 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_info("%s set to simplified (2000-8000 ints) "
+ "mode\n", opt.name);
+ adapter->itr_setting = 4;
+ break;
default:
/*
* Save the setting, because the dynamic bits
@@ -399,7 +404,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Smart Power Down */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
@@ -415,7 +420,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* CRC Stripping */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
.err = "defaulting to enabled",
@@ -432,7 +437,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Kumeran Lock Loss Workaround */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
@@ -452,7 +457,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Write-protect NVM */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
.err = "defaulting to Enabled",
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 27c7bdbfa003..8d97f168f018 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -645,7 +645,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
- printk(KERN_DEBUG "port(s) \n");
+ printk(KERN_DEBUG "port(s)\n");
Word = lp->word[6];
printk(KERN_DEBUG "Word6:\n");
@@ -765,7 +765,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
/* Grab the region so we can find another board if autoIRQ fails. */
if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
if (!autoprobe)
- printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n",
+ printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
ioaddr);
return -EBUSY;
}
@@ -1161,8 +1161,7 @@ static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
/* we won't wake queue here because we're out of space */
dev->stats.tx_dropped++;
else {
- dev->stats.tx_bytes+=skb->len;
- dev->trans_start = jiffies;
+ dev->stats.tx_bytes+=skb->len;
netif_wake_queue(dev);
}
@@ -1286,7 +1285,7 @@ set_multicast_list(struct net_device *dev)
struct eepro_local *lp = netdev_priv(dev);
short ioaddr = dev->base_addr;
unsigned short mode;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int mc_count = netdev_mc_count(dev);
if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
@@ -1331,8 +1330,8 @@ set_multicast_list(struct net_device *dev)
outw(0, ioaddr + IO_PORT);
outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- netdev_for_each_mc_addr(dmi, dev) {
- eaddrs = (unsigned short *) dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (unsigned short *) ha->addr;
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 1a7322b80ea7..43c9c9c5cf4c 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -543,7 +543,7 @@ static void unstick_cu(struct net_device *dev)
if (lp->started)
{
- if (time_after(jiffies, dev->trans_start + 50))
+ if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
{
if (lp->tx_link==lp->last_tx_restart)
{
@@ -1018,7 +1018,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
outw(lp->tx_head+0x16, ioaddr + DATAPORT);
outw(0, ioaddr + DATAPORT);
- outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
+ outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
outw(lp->tx_head, ioaddr + DATAPORT);
@@ -1575,7 +1575,7 @@ static void eexp_hw_init586(struct net_device *dev)
static void eexp_setup_filter(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned short ioaddr = dev->base_addr;
int count = netdev_mc_count(dev);
int i;
@@ -1588,8 +1588,8 @@ static void eexp_setup_filter(struct net_device *dev)
outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- unsigned short *data = (unsigned short *) dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned short *data = (unsigned short *) ha->addr;
if (i == count)
break;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index fa311a950996..0630980a2722 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0102"
+#define DRV_VERSION "EHEA_0103"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 809ccc9ff09c..33a41e29ec83 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -791,11 +791,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
cqe_counter++;
rmb();
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
- ehea_error("Send Completion Error: Resetting port");
+ ehea_error("Bad send completion status=0x%04X",
+ cqe->status);
+
if (netif_msg_tx_err(pr->port))
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
- ehea_schedule_port_reset(pr->port);
- break;
+
+ if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
+ ehea_error("Resetting port");
+ ehea_schedule_port_reset(pr->port);
+ break;
+ }
}
if (netif_msg_tx_done(pr->port))
@@ -901,6 +907,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
struct ehea_eqe *eqe;
struct ehea_qp *qp;
u32 qp_token;
+ u64 resource_type, aer, aerr;
+ int reset_port = 0;
eqe = ehea_poll_eq(port->qp_eq);
@@ -910,11 +918,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
eqe->entry, qp_token);
qp = port->port_res[qp_token].qp;
- ehea_error_data(port->adapter, qp->fw_handle);
+
+ resource_type = ehea_error_data(port->adapter, qp->fw_handle,
+ &aer, &aerr);
+
+ if (resource_type == EHEA_AER_RESTYPE_QP) {
+ if ((aer & EHEA_AER_RESET_MASK) ||
+ (aerr & EHEA_AERR_RESET_MASK))
+ reset_port = 1;
+ } else
+ reset_port = 1; /* Reset in case of CQ or EQ error */
+
eqe = ehea_poll_eq(port->qp_eq);
}
- ehea_schedule_port_reset(port);
+ if (reset_port) {
+ ehea_error("Resetting port");
+ ehea_schedule_port_reset(port);
+ }
return IRQ_HANDLED;
}
@@ -1618,7 +1639,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
{
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
- int skb_data_size = skb->len - skb->data_len;
+ int skb_data_size = skb_headlen(skb);
int headersize;
/* Packet is TCP with TSO enabled */
@@ -1629,7 +1650,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
*/
headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
- skb_data_size = skb->len - skb->data_len;
+ skb_data_size = skb_headlen(skb);
if (skb_data_size >= headersize) {
/* copy immediate data */
@@ -1651,7 +1672,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
static void write_swqe2_nonTSO(struct sk_buff *skb,
struct ehea_swqe *swqe, u32 lkey)
{
- int skb_data_size = skb->len - skb->data_len;
+ int skb_data_size = skb_headlen(skb);
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
@@ -1967,7 +1988,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
static void ehea_set_multicast_list(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
- struct dev_mc_list *k_mcl_entry;
+ struct netdev_hw_addr *ha;
int ret;
if (dev->flags & IFF_PROMISC) {
@@ -1998,8 +2019,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
goto out;
}
- netdev_for_each_mc_addr(k_mcl_entry, dev)
- ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ ehea_add_multicast_entry(port, ha->addr);
}
out:
@@ -2108,8 +2129,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
} else {
/* first copy data from the skb->data buffer ... */
skb_copy_from_linear_data(skb, imm_data,
- skb->len - skb->data_len);
- imm_data += skb->len - skb->data_len;
+ skb_headlen(skb));
+ imm_data += skb_headlen(skb);
/* ... then copy data from the fragments */
for (i = 0; i < nfrags; i++) {
@@ -2220,7 +2241,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore(&pr->netif_queue, flags);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
spin_unlock(&pr->xmit_lock);
return NETDEV_TX_OK;
@@ -2868,7 +2889,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
int ret, i;
struct ehea_adapter *adapter;
- mutex_lock(&dlpar_mem_lock);
ehea_info("LPAR memory changed - re-initializing driver");
list_for_each_entry(adapter, &adapter_list, list)
@@ -2938,7 +2958,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
}
ehea_info("re-initializing driver complete");
out:
- mutex_unlock(&dlpar_mem_lock);
return;
}
@@ -3521,7 +3540,14 @@ void ehea_crash_handler(void)
static int ehea_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
+ int ret = NOTIFY_BAD;
struct memory_notify *arg = data;
+
+ if (!mutex_trylock(&dlpar_mem_lock)) {
+ ehea_info("ehea_mem_notifier must not be called parallelized");
+ goto out;
+ }
+
switch (action) {
case MEM_CANCEL_OFFLINE:
ehea_info("memory offlining canceled");
@@ -3530,14 +3556,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
ehea_info("memory is going online");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
- return NOTIFY_BAD;
+ goto out_unlock;
ehea_rereg_mrs(NULL);
break;
case MEM_GOING_OFFLINE:
ehea_info("memory is going offline");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
- return NOTIFY_BAD;
+ goto out_unlock;
ehea_rereg_mrs(NULL);
break;
default:
@@ -3545,8 +3571,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
}
ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
- return NOTIFY_OK;
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+out:
+ return ret;
}
static struct notifier_block ehea_mem_nb = {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e56367..89128b6373e3 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
int ehea_destroy_cq(struct ehea_cq *cq)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!cq)
return 0;
hcp_epas_dtor(&cq->epas);
hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(cq->adapter, cq->fw_handle);
+ ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
}
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
int ehea_destroy_eq(struct ehea_eq *eq)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!eq)
return 0;
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(eq->adapter, eq->fw_handle);
+ ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
}
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
int ehea_destroy_qp(struct ehea_qp *qp)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!qp)
return 0;
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(qp->adapter, qp->fw_handle);
+ ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
hret = ehea_destroy_qp_res(qp, FORCE_FREE);
}
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
if (length > EHEA_PAGESIZE)
length = EHEA_PAGESIZE;
- if (type == 0x8) /* Queue Pair */
+ if (type == EHEA_AER_RESTYPE_QP)
ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
"port=%llX", resource, data[6], data[12], data[22]);
-
- if (type == 0x4) /* Completion Queue */
+ else if (type == EHEA_AER_RESTYPE_CQ)
ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
data[6]);
-
- if (type == 0x3) /* Event Queue */
+ else if (type == EHEA_AER_RESTYPE_EQ)
ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
data[6]);
ehea_dump(data, length, "error data");
}
-void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr)
{
unsigned long ret;
u64 *rblock;
+ u64 type = 0;
rblock = (void *)get_zeroed_page(GFP_KERNEL);
if (!rblock) {
ehea_error("Cannot allocate rblock memory.");
- return;
+ goto out;
}
- ret = ehea_h_error_data(adapter->handle,
- res_handle,
- rblock);
+ ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
- if (ret == H_R_STATE)
- ehea_error("No error data is available: %llX.", res_handle);
- else if (ret == H_SUCCESS)
+ if (ret == H_SUCCESS) {
+ type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
+ *aer = rblock[6];
+ *aerr = rblock[12];
print_error_data(rblock);
- else
+ } else if (ret == H_R_STATE) {
+ ehea_error("No error data available: %llX.", res_handle);
+ } else
ehea_error("Error data could not be fetched: %llX", res_handle);
free_page((unsigned long)rblock);
+out:
+ return type;
}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e74a19..882c50c9c34f 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
#define EHEA_CQE_STAT_ERR_IP 0x2000
#define EHEA_CQE_STAT_ERR_CRC 0x1000
+/* Defines which bad send cqe stati lead to a port reset */
+#define EHEA_CQE_STAT_RESET_MASK 0x0002
+
struct ehea_cqe {
u64 wr_id; /* work request ID from WQE */
u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
+#define EHEA_AER_RESTYPE_QP 0x8
+#define EHEA_AER_RESTYPE_CQ 0x4
+#define EHEA_AER_RESTYPE_EQ 0x3
+
+/* Defines which affiliated errors lead to a port reset */
+#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
+#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
+
struct ehea_eqe {
u64 entry;
};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
int ehea_rem_mr(struct ehea_mr *mr);
-void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr);
int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index ff27f728fd9d..112c5aa9af7f 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1293,8 +1293,6 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
*/
netif_stop_queue(dev);
- /* save the timestamp */
- priv->netdev->trans_start = jiffies;
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
schedule_work(&priv->tx_work);
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 03dce9ed612c..337d1943af46 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -101,14 +101,18 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
{
- u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- u16 q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index ee01f5a6d0d4..5fa56f1e5590 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -33,8 +33,8 @@
#include "vnic_rss.h"
#define DRV_NAME "enic"
-#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
-#define DRV_VERSION "1.1.0.241a"
+#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
+#define DRV_VERSION "1.3.1.1"
#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index cf098bb636b8..1232887c243d 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -822,14 +822,14 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
static void enic_set_multicast_list(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
int directed = 1;
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int i, j;
@@ -852,10 +852,10 @@ static void enic_set_multicast_list(struct net_device *netdev)
*/
i = 0;
- netdev_for_each_mc_addr(list, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == mc_count)
break;
- memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN);
+ memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
}
for (i = 0; i < enic->mc_count; i++) {
@@ -2058,8 +2058,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
- netdev->features |= NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
if (ENIC_SETTING(enic, TXCSUM))
netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
if (ENIC_SETTING(enic, TSO))
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index cf22de71014e..d43a9d43bbff 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -574,22 +574,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
return err;
}
-int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr)
{
u64 a0, a1;
int wait = 1000;
int r;
- if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
- if (!vdev->notify)
- return -ENOMEM;
- memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify));
- }
+ memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
+ vdev->notify = notify_addr;
+ vdev->notify_pa = notify_pa;
- a0 = vdev->notify_pa;
+ a0 = (u64)notify_pa;
a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
@@ -598,7 +594,27 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
return r;
}
-void vnic_dev_notify_unset(struct vnic_dev *vdev)
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ void *notify_addr;
+ dma_addr_t notify_pa;
+
+ if (vdev->notify || vdev->notify_pa) {
+ printk(KERN_ERR "notify block %p still allocated",
+ vdev->notify);
+ return -EINVAL;
+ }
+
+ notify_addr = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa);
+ if (!notify_addr)
+ return -ENOMEM;
+
+ return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
+}
+
+void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
@@ -608,9 +624,23 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
a1 += sizeof(struct vnic_devcmd_notify);
vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ vdev->notify = NULL;
+ vdev->notify_pa = 0;
vdev->notify_sz = 0;
}
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ if (vdev->notify) {
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ }
+
+ vnic_dev_notify_unsetcmd(vdev);
+}
+
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index fc5e3eb35a5e..f5be640b0b5c 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -107,7 +107,10 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
void vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index e186efaf9da1..cc580cfec41d 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -168,10 +168,10 @@ int vnic_rq_disable(struct vnic_rq *rq)
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
- for (wait = 0; wait < 100; wait++) {
+ for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
- udelay(1);
+ udelay(10);
}
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index d5f984357f5c..1378afbdfe67 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -161,10 +161,10 @@ int vnic_wq_disable(struct vnic_wq *wq)
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
- for (wait = 0; wait < 100; wait++) {
+ for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
- udelay(1);
+ udelay(10);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 7a567201e829..a48da2dc907f 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -908,7 +908,7 @@ static void epic_tx_timeout(struct net_device *dev)
outl(TxQueued, dev->base_addr + COMMAND);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
ep->stats.tx_errors++;
if (!ep->tx_full)
netif_wake_queue(dev);
@@ -1006,7 +1006,6 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Trigger an immediate transmit demand. */
outl(TxQueued, dev->base_addr + COMMAND);
- dev->trans_start = jiffies;
if (debug > 4)
printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
"flag %2.2x Tx status %8.8x.\n",
@@ -1399,12 +1398,12 @@ static void set_rx_mode(struct net_device *dev)
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit_nr =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit_nr >> 3] |= (1 << bit_nr);
}
}
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index d4e24f08b3ba..874973f558e9 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1027,7 +1027,7 @@ static void eth16i_timeout(struct net_device *dev)
inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
"IRQ conflict" : "network cable problem");
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Let's dump all registers */
if(eth16i_debug > 0) {
@@ -1047,7 +1047,7 @@ static void eth16i_timeout(struct net_device *dev)
}
dev->stats.tx_errors++;
eth16i_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
netif_wake_queue(dev);
}
@@ -1109,7 +1109,6 @@ static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a8d92503226e..14cbde5cf68e 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -756,7 +756,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
u32 mode = ethoc_read(priv, MODER);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u32 hash[2] = { 0, 0 };
/* set loopback mode if requested */
@@ -784,8 +784,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
hash[0] = 0xffffffff;
hash[1] = 0xffffffff;
} else {
- netdev_for_each_mc_addr(mc, dev) {
- u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
int bit = (crc >> 26) & 0x3f;
hash[bit >> 5] |= 1 << (bit & 0x1f);
}
@@ -851,7 +851,6 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
spin_unlock_irq(&priv->lock);
out:
dev_kfree_skb(skb);
@@ -1040,7 +1039,6 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->features |= 0;
/* setup NAPI */
- memset(&priv->napi, 0, sizeof(priv->napi));
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
spin_lock_init(&priv->rx_lock);
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 91e59f3a9d6d..99eb56be093f 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -757,7 +757,7 @@ static void ewrk3_timeout(struct net_device *dev)
*/
ENABLE_IRQs;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -862,7 +862,6 @@ static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq (&lp->hw_lock);
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
/* Check for free resources: stop Tx queue if there are none */
@@ -1169,7 +1168,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct ewrk3_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i;
char *addrs, bit, byte;
@@ -1213,8 +1212,8 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Update table */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
@@ -1776,8 +1775,7 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case EWRK3_SET_MCA: /* Set a multicast address */
if (capable(CAP_NET_ADMIN)) {
- if (ioc->len > 1024)
- {
+ if (ioc->len > HASH_TABLE_LEN) {
status = -EINVAL;
break;
}
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d11ae5197f01..15f4f8d3d46d 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1233,7 +1233,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev); /* or .._start_.. ?? */
}
@@ -1374,7 +1374,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
++np->really_tx_count;
iowrite32(0, np->mem + TXPDR);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&np->lock, flags);
return NETDEV_TX_OK;
@@ -1791,12 +1790,12 @@ static void __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_AB | CR_W_AM;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
- bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
mc_filter[bit >> 5] |= (1 << bit);
}
rx_mode = CR_W_AB | CR_W_AM;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 9b4e8f797a7a..42d9ac9ba395 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -40,6 +40,7 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
#include <asm/cacheflush.h>
@@ -61,7 +62,6 @@
* Define the fixed address of the FEC hardware.
*/
#if defined(CONFIG_M5272)
-#define HAVE_mii_link_interrupt
static unsigned char fec_mac_default[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -86,23 +86,6 @@ static unsigned char fec_mac_default[] = {
#endif
#endif /* CONFIG_M5272 */
-/* Forward declarations of some structures to support different PHYs */
-
-typedef struct {
- uint mii_data;
- void (*funct)(uint mii_reg, struct net_device *dev);
-} phy_cmd_t;
-
-typedef struct {
- uint id;
- char *name;
-
- const phy_cmd_t *config;
- const phy_cmd_t *startup;
- const phy_cmd_t *ack_int;
- const phy_cmd_t *shutdown;
-} phy_info_t;
-
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
* to keep them that size.
@@ -189,29 +172,21 @@ struct fec_enet_private {
uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
- /* hold while accessing the mii_list_t() elements */
- spinlock_t mii_lock;
-
- uint phy_id;
- uint phy_id_done;
- uint phy_status;
- uint phy_speed;
- phy_info_t const *phy;
- struct work_struct phy_task;
- uint sequence_done;
- uint mii_phy_task_queued;
+ struct platform_device *pdev;
- uint phy_addr;
+ int opened;
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
int index;
- int opened;
int link;
- int old_link;
int full_duplex;
};
-static void fec_enet_mii(struct net_device *dev);
static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
static void fec_enet_tx(struct net_device *dev);
static void fec_enet_rx(struct net_device *dev);
@@ -219,67 +194,20 @@ static int fec_enet_close(struct net_device *dev);
static void fec_restart(struct net_device *dev, int duplex);
static void fec_stop(struct net_device *dev);
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
+#define FEC_MMFR_TA (2 << 16)
+#define FEC_MMFR_DATA(v) (v & 0xffff)
-/* MII processing. We keep this as simple as possible. Requests are
- * placed on the list (if there is room). When the request is finished
- * by the MII, an optional function may be called.
- */
-typedef struct mii_list {
- uint mii_regval;
- void (*mii_func)(uint val, struct net_device *dev);
- struct mii_list *mii_next;
-} mii_list_t;
-
-#define NMII 20
-static mii_list_t mii_cmds[NMII];
-static mii_list_t *mii_free;
-static mii_list_t *mii_head;
-static mii_list_t *mii_tail;
-
-static int mii_queue(struct net_device *dev, int request,
- void (*func)(uint, struct net_device *));
-
-/* Make MII read/write commands for the FEC */
-#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
-#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
- (VAL & 0xffff))
-#define mk_mii_end 0
+#define FEC_MII_TIMEOUT 10000
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
-/* Register definitions for the PHY */
-
-#define MII_REG_CR 0 /* Control Register */
-#define MII_REG_SR 1 /* Status Register */
-#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
-#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
-#define MII_REG_ANAR 4 /* A-N Advertisement Register */
-#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
-#define MII_REG_ANER 6 /* A-N Expansion Register */
-#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
-#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
-
-/* values for phy_status */
-
-#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
-#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
-#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
-#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
-#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
-#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
-#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
-
-#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
-#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
-#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
-#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
-#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
-#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
-#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
-#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
-
-
static int
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -347,8 +275,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
- dev->trans_start = jiffies;
-
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -406,12 +332,6 @@ fec_enet_interrupt(int irq, void * dev_id)
ret = IRQ_HANDLED;
fec_enet_tx(dev);
}
-
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- fec_enet_mii(dev);
- }
-
} while (int_events);
return ret;
@@ -607,827 +527,311 @@ rx_processing_done:
spin_unlock(&fep->hw_lock);
}
-/* called from interrupt context */
-static void
-fec_enet_mii(struct net_device *dev)
-{
- struct fec_enet_private *fep;
- mii_list_t *mip;
-
- fep = netdev_priv(dev);
- spin_lock(&fep->mii_lock);
-
- if ((mip = mii_head) == NULL) {
- printk("MII and no head!\n");
- goto unlock;
- }
-
- if (mip->mii_func != NULL)
- (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
-
- mii_head = mip->mii_next;
- mip->mii_next = mii_free;
- mii_free = mip;
-
- if ((mip = mii_head) != NULL)
- writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
-
-unlock:
- spin_unlock(&fep->mii_lock);
-}
-
-static int
-mii_queue_unlocked(struct net_device *dev, int regval,
- void (*func)(uint, struct net_device *))
+/* ------------------------------------------------------------------------- */
+#ifdef CONFIG_M5272
+static void __inline__ fec_get_mac(struct net_device *dev)
{
- struct fec_enet_private *fep;
- mii_list_t *mip;
- int retval;
-
- /* Add PHY address to register command */
- fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned char *iap, tmpaddr[ETH_ALEN];
- regval |= fep->phy_addr << 23;
- retval = 0;
-
- if ((mip = mii_free) != NULL) {
- mii_free = mip->mii_next;
- mip->mii_regval = regval;
- mip->mii_func = func;
- mip->mii_next = NULL;
- if (mii_head) {
- mii_tail->mii_next = mip;
- mii_tail = mip;
- } else {
- mii_head = mii_tail = mip;
- writel(regval, fep->hwp + FEC_MII_DATA);
- }
+ if (FEC_FLASHMAC) {
+ /*
+ * Get MAC address from FLASH.
+ * If it is all 1's or 0's, use the default.
+ */
+ iap = (unsigned char *)FEC_FLASHMAC;
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
} else {
- retval = 1;
+ *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
+ *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ iap = &tmpaddr[0];
}
- return retval;
-}
-
-static int
-mii_queue(struct net_device *dev, int regval,
- void (*func)(uint, struct net_device *))
-{
- struct fec_enet_private *fep;
- unsigned long flags;
- int retval;
- fep = netdev_priv(dev);
- spin_lock_irqsave(&fep->mii_lock, flags);
- retval = mii_queue_unlocked(dev, regval, func);
- spin_unlock_irqrestore(&fep->mii_lock, flags);
- return retval;
-}
-
-static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
-{
- if(!c)
- return;
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
- for (; c->mii_data != mk_mii_end; c++)
- mii_queue(dev, c->mii_data, c->funct);
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default)
+ dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
}
+#endif
-static void mii_parse_sr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
-
- if (mii_reg & 0x0004)
- status |= PHY_STAT_LINK;
- if (mii_reg & 0x0010)
- status |= PHY_STAT_FAULT;
- if (mii_reg & 0x0020)
- status |= PHY_STAT_ANC;
- *s = status;
-}
+/* ------------------------------------------------------------------------- */
-static void mii_parse_cr(uint mii_reg, struct net_device *dev)
+/*
+ * Phy section
+ */
+static void fec_enet_adjust_link(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
-
- if (mii_reg & 0x1000)
- status |= PHY_CONF_ANE;
- if (mii_reg & 0x4000)
- status |= PHY_CONF_LOOP;
- *s = status;
-}
+ struct phy_device *phy_dev = fep->phy_dev;
+ unsigned long flags;
-static void mii_parse_anar(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_CONF_SPMASK);
-
- if (mii_reg & 0x0020)
- status |= PHY_CONF_10HDX;
- if (mii_reg & 0x0040)
- status |= PHY_CONF_10FDX;
- if (mii_reg & 0x0080)
- status |= PHY_CONF_100HDX;
- if (mii_reg & 0x00100)
- status |= PHY_CONF_100FDX;
- *s = status;
-}
+ int status_change = 0;
-/* ------------------------------------------------------------------------- */
-/* The Level one LXT970 is used by many boards */
+ spin_lock_irqsave(&fep->hw_lock, flags);
-#define MII_LXT970_MIRROR 16 /* Mirror register */
-#define MII_LXT970_IER 17 /* Interrupt Enable Register */
-#define MII_LXT970_ISR 18 /* Interrupt Status Register */
-#define MII_LXT970_CONFIG 19 /* Configuration Register */
-#define MII_LXT970_CSR 20 /* Chip Status Register */
+ /* Prevent a state halted on mii error */
+ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
+ phy_dev->state = PHY_RESUMING;
+ goto spin_unlock;
+ }
-static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ /* Duplex link change */
+ if (phy_dev->link) {
+ if (fep->full_duplex != phy_dev->duplex) {
+ fec_restart(dev, phy_dev->duplex);
+ status_change = 1;
+ }
+ }
- status = *s & ~(PHY_STAT_SPMASK);
- if (mii_reg & 0x0800) {
- if (mii_reg & 0x1000)
- status |= PHY_STAT_100FDX;
- else
- status |= PHY_STAT_100HDX;
- } else {
- if (mii_reg & 0x1000)
- status |= PHY_STAT_10FDX;
+ /* Link on or off change */
+ if (phy_dev->link != fep->link) {
+ fep->link = phy_dev->link;
+ if (phy_dev->link)
+ fec_restart(dev, phy_dev->duplex);
else
- status |= PHY_STAT_10HDX;
+ fec_stop(dev);
+ status_change = 1;
}
- *s = status;
-}
-static phy_cmd_t const phy_cmd_lxt970_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
- /* read SR and ISR to acknowledge */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_LXT970_ISR), NULL },
-
- /* find out the current status */
- { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_lxt970 = {
- .id = 0x07810000,
- .name = "LXT970",
- .config = phy_cmd_lxt970_config,
- .startup = phy_cmd_lxt970_startup,
- .ack_int = phy_cmd_lxt970_ack_int,
- .shutdown = phy_cmd_lxt970_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* The Level one LXT971 is used on some of my custom boards */
-
-/* register definitions for the 971 */
+spin_unlock:
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
-#define MII_LXT971_PCR 16 /* Port Control Register */
-#define MII_LXT971_SR2 17 /* Status Register 2 */
-#define MII_LXT971_IER 18 /* Interrupt Enable Register */
-#define MII_LXT971_ISR 19 /* Interrupt Status Register */
-#define MII_LXT971_LCR 20 /* LED Control Register */
-#define MII_LXT971_TCR 30 /* Transmit Control Register */
+ if (status_change)
+ phy_print_status(phy_dev);
+}
/*
- * I had some nice ideas of running the MDIO faster...
- * The 971 should support 8MHz and I tried it, but things acted really
- * weird, so 2.5 MHz ought to be enough for anyone...
+ * NOTE: a MII transaction is during around 25 us, so polling it...
*/
-
-static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ struct fec_enet_private *fep = bus->priv;
+ int timeout = FEC_MII_TIMEOUT;
- status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
+ fep->mii_timeout = 0;
- if (mii_reg & 0x0400) {
- fep->link = 1;
- status |= PHY_STAT_LINK;
- } else {
- fep->link = 0;
- }
- if (mii_reg & 0x0080)
- status |= PHY_STAT_ANC;
- if (mii_reg & 0x4000) {
- if (mii_reg & 0x0200)
- status |= PHY_STAT_100FDX;
- else
- status |= PHY_STAT_100HDX;
- } else {
- if (mii_reg & 0x0200)
- status |= PHY_STAT_10FDX;
- else
- status |= PHY_STAT_10HDX;
+ /* clear MII end of transfer bit*/
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
+ cpu_relax();
+ if (timeout-- < 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO read timeout\n");
+ return -ETIMEDOUT;
+ }
}
- if (mii_reg & 0x0008)
- status |= PHY_STAT_FAULT;
- *s = status;
+ /* return value */
+ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
}
-static phy_cmd_t const phy_cmd_lxt971_config[] = {
- /* limit to 10MBit because my prototype board
- * doesn't work with 100. */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
- /* Somehow does the 971 tell me that the link is down
- * the first read after power-up.
- * read here to get a valid value in ack_int */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
- /* acknowledge the int before reading status ! */
- { mk_mii_read(MII_LXT971_ISR), NULL },
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_lxt971 = {
- .id = 0x0001378e,
- .name = "LXT971",
- .config = phy_cmd_lxt971_config,
- .startup = phy_cmd_lxt971_startup,
- .ack_int = phy_cmd_lxt971_ack_int,
- .shutdown = phy_cmd_lxt971_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
-
-/* register definitions */
-
-#define MII_QS6612_MCR 17 /* Mode Control Register */
-#define MII_QS6612_FTR 27 /* Factory Test Register */
-#define MII_QS6612_MCO 28 /* Misc. Control Register */
-#define MII_QS6612_ISR 29 /* Interrupt Source Register */
-#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
-#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
-
-static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_SPMASK);
+ struct fec_enet_private *fep = bus->priv;
+ int timeout = FEC_MII_TIMEOUT;
- switch((mii_reg >> 2) & 7) {
- case 1: status |= PHY_STAT_10HDX; break;
- case 2: status |= PHY_STAT_100HDX; break;
- case 5: status |= PHY_STAT_10FDX; break;
- case 6: status |= PHY_STAT_100FDX; break;
-}
-
- *s = status;
-}
+ fep->mii_timeout = 0;
-static phy_cmd_t const phy_cmd_qs6612_config[] = {
- /* The PHY powers up isolated on the RPX,
- * so send a command to allow operation.
- */
- { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
-
- /* parse cr and anar to get some info */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
- /* we need to read ISR, SR and ANER to acknowledge */
- { mk_mii_read(MII_QS6612_ISR), NULL },
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_REG_ANER), NULL },
-
- /* read pcr to get info */
- { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_qs6612 = {
- .id = 0x00181440,
- .name = "QS6612",
- .config = phy_cmd_qs6612_config,
- .startup = phy_cmd_qs6612_startup,
- .ack_int = phy_cmd_qs6612_ack_int,
- .shutdown = phy_cmd_qs6612_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* AMD AM79C874 phy */
+ /* clear MII end of transfer bit*/
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
-/* register definitions for the 874 */
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
+ cpu_relax();
+ if (timeout-- < 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO write timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
-#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
-#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
-#define MII_AM79C874_DR 18 /* Diagnostic Register */
-#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
-#define MII_AM79C874_MCR 21 /* ModeControl Register */
-#define MII_AM79C874_DC 23 /* Disconnect Counter */
-#define MII_AM79C874_REC 24 /* Recieve Error Counter */
+ return 0;
+}
-static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
+static int fec_enet_mdio_reset(struct mii_bus *bus)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
-
- if (mii_reg & 0x0080)
- status |= PHY_STAT_ANC;
- if (mii_reg & 0x0400)
- status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
- else
- status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
-
- *s = status;
+ return 0;
}
-static phy_cmd_t const phy_cmd_am79c874_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
- /* we only need to read ISR to acknowledge */
- { mk_mii_read(MII_AM79C874_ICSR), NULL },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_am79c874 = {
- .id = 0x00022561,
- .name = "AM79C874",
- .config = phy_cmd_am79c874_config,
- .startup = phy_cmd_am79c874_startup,
- .ack_int = phy_cmd_am79c874_ack_int,
- .shutdown = phy_cmd_am79c874_shutdown
-};
-
-
-/* ------------------------------------------------------------------------- */
-/* Kendin KS8721BL phy */
-
-/* register definitions for the 8721 */
-
-#define MII_KS8721BL_RXERCR 21
-#define MII_KS8721BL_ICSR 27
-#define MII_KS8721BL_PHYCR 31
-
-static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- /* we only need to read ISR to acknowledge */
- { mk_mii_read(MII_KS8721BL_ICSR), NULL },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_ks8721bl = {
- .id = 0x00022161,
- .name = "KS8721BL",
- .config = phy_cmd_ks8721bl_config,
- .startup = phy_cmd_ks8721bl_startup,
- .ack_int = phy_cmd_ks8721bl_ack_int,
- .shutdown = phy_cmd_ks8721bl_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* register definitions for the DP83848 */
-
-#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
-
-static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
-
- *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
-
- /* Link up */
- if (mii_reg & 0x0001) {
- fep->link = 1;
- *s |= PHY_STAT_LINK;
- } else
- fep->link = 0;
- /* Status of link */
- if (mii_reg & 0x0010) /* Autonegotioation complete */
- *s |= PHY_STAT_ANC;
- if (mii_reg & 0x0002) { /* 10MBps? */
- if (mii_reg & 0x0004) /* Full Duplex? */
- *s |= PHY_STAT_10FDX;
- else
- *s |= PHY_STAT_10HDX;
- } else { /* 100 Mbps? */
- if (mii_reg & 0x0004) /* Full Duplex? */
- *s |= PHY_STAT_100FDX;
- else
- *s |= PHY_STAT_100HDX;
- }
- if (mii_reg & 0x0008)
- *s |= PHY_STAT_FAULT;
-}
+ struct phy_device *phy_dev = NULL;
+ int phy_addr;
-static phy_info_t phy_info_dp83848= {
- 0x020005c9,
- "DP83848",
-
- (const phy_cmd_t []) { /* config */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* startup - enable interrupts */
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* shutdown */
- { mk_mii_end, }
- },
-};
+ /* find the first phy */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (fep->mii_bus->phy_map[phy_addr]) {
+ phy_dev = fep->mii_bus->phy_map[phy_addr];
+ break;
+ }
+ }
-static phy_info_t phy_info_lan8700 = {
- 0x0007C0C,
- "LAN8700",
- (const phy_cmd_t []) { /* config */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* startup */
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* act_int */
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* shutdown */
- { mk_mii_end, }
- },
-};
-/* ------------------------------------------------------------------------- */
+ if (!phy_dev) {
+ printk(KERN_ERR "%s: no PHY found\n", dev->name);
+ return -ENODEV;
+ }
-static phy_info_t const * const phy_info[] = {
- &phy_info_lxt970,
- &phy_info_lxt971,
- &phy_info_qs6612,
- &phy_info_am79c874,
- &phy_info_ks8721bl,
- &phy_info_dp83848,
- &phy_info_lan8700,
- NULL
-};
+ /* attach the mac to the phy */
+ phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
+ &fec_enet_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phy_dev);
+ }
-/* ------------------------------------------------------------------------- */
-#ifdef HAVE_mii_link_interrupt
-static irqreturn_t
-mii_link_interrupt(int irq, void * dev_id);
+ /* mask with MAC supported features */
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
-/*
- * This is specific to the MII interrupt setup of the M5272EVB.
- */
-static void __inline__ fec_request_mii_intr(struct net_device *dev)
-{
- if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
- printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
-}
+ fep->phy_dev = phy_dev;
+ fep->link = 0;
+ fep->full_duplex = 0;
-static void __inline__ fec_disable_phy_intr(struct net_device *dev)
-{
- free_irq(66, dev);
+ return 0;
}
-#endif
-#ifdef CONFIG_M5272
-static void __inline__ fec_get_mac(struct net_device *dev)
+static int fec_enet_mii_init(struct platform_device *pdev)
{
+ struct net_device *dev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(dev);
- unsigned char *iap, tmpaddr[ETH_ALEN];
+ int err = -ENXIO, i;
- if (FEC_FLASHMAC) {
- /*
- * Get MAC address from FLASH.
- * If it is all 1's or 0's, use the default.
- */
- iap = (unsigned char *)FEC_FLASHMAC;
- if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
- (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
- iap = fec_mac_default;
- if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
- (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
- iap = fec_mac_default;
- } else {
- *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
- *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
- iap = &tmpaddr[0];
- }
+ fep->mii_timeout = 0;
- memcpy(dev->dev_addr, iap, ETH_ALEN);
-
- /* Adjust MAC if using default MAC address */
- if (iap == fec_mac_default)
- dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
-}
-#endif
-
-/* ------------------------------------------------------------------------- */
-
-static void mii_display_status(struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
+ /*
+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ */
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- if (!fep->link && !fep->old_link) {
- /* Link is still down - don't print anything */
- return;
+ fep->mii_bus = mdiobus_alloc();
+ if (fep->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
}
- printk("%s: status: ", dev->name);
-
- if (!fep->link) {
- printk("link down");
- } else {
- printk("link up");
-
- switch(*s & PHY_STAT_SPMASK) {
- case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
- case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
- case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
- case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
- default:
- printk(", Unknown speed/duplex");
- }
-
- if (*s & PHY_STAT_ANC)
- printk(", auto-negotiation complete");
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->reset = fec_enet_mdio_reset;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ fep->mii_bus->priv = fep;
+ fep->mii_bus->parent = &pdev->dev;
+
+ fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!fep->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
}
- if (*s & PHY_STAT_FAULT)
- printk(", remote fault");
-
- printk(".\n");
-}
-
-static void mii_display_config(struct work_struct *work)
-{
- struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
- struct net_device *dev = fep->netdev;
- uint status = fep->phy_status;
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ fep->mii_bus->irq[i] = PHY_POLL;
- /*
- ** When we get here, phy_task is already removed from
- ** the workqueue. It is thus safe to allow to reuse it.
- */
- fep->mii_phy_task_queued = 0;
- printk("%s: config: auto-negotiation ", dev->name);
-
- if (status & PHY_CONF_ANE)
- printk("on");
- else
- printk("off");
+ platform_set_drvdata(dev, fep->mii_bus);
- if (status & PHY_CONF_100FDX)
- printk(", 100FDX");
- if (status & PHY_CONF_100HDX)
- printk(", 100HDX");
- if (status & PHY_CONF_10FDX)
- printk(", 10FDX");
- if (status & PHY_CONF_10HDX)
- printk(", 10HDX");
- if (!(status & PHY_CONF_SPMASK))
- printk(", No speed/duplex selected?");
+ if (mdiobus_register(fep->mii_bus))
+ goto err_out_free_mdio_irq;
- if (status & PHY_CONF_LOOP)
- printk(", loopback enabled");
+ if (fec_enet_mii_probe(dev) != 0)
+ goto err_out_unregister_bus;
- printk(".\n");
+ return 0;
- fep->sequence_done = 1;
+err_out_unregister_bus:
+ mdiobus_unregister(fep->mii_bus);
+err_out_free_mdio_irq:
+ kfree(fep->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(fep->mii_bus);
+err_out:
+ return err;
}
-static void mii_relink(struct work_struct *work)
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
{
- struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
- struct net_device *dev = fep->netdev;
- int duplex;
-
- /*
- ** When we get here, phy_task is already removed from
- ** the workqueue. It is thus safe to allow to reuse it.
- */
- fep->mii_phy_task_queued = 0;
- fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
- mii_display_status(dev);
- fep->old_link = fep->link;
-
- if (fep->link) {
- duplex = 0;
- if (fep->phy_status
- & (PHY_STAT_100FDX | PHY_STAT_10FDX))
- duplex = 1;
- fec_restart(dev, duplex);
- } else
- fec_stop(dev);
+ if (fep->phy_dev)
+ phy_disconnect(fep->phy_dev);
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
}
-/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
-static void mii_queue_relink(uint mii_reg, struct net_device *dev)
+static int fec_enet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- /*
- * We cannot queue phy_task twice in the workqueue. It
- * would cause an endless loop in the workqueue.
- * Fortunately, if the last mii_relink entry has not yet been
- * executed now, it will do the job for the current interrupt,
- * which is just what we want.
- */
- if (fep->mii_phy_task_queued)
- return;
+ if (!phydev)
+ return -ENODEV;
- fep->mii_phy_task_queued = 1;
- INIT_WORK(&fep->phy_task, mii_relink);
- schedule_work(&fep->phy_task);
+ return phy_ethtool_gset(phydev, cmd);
}
-/* mii_queue_config is called in interrupt context from fec_enet_mii */
-static void mii_queue_config(uint mii_reg, struct net_device *dev)
+static int fec_enet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- if (fep->mii_phy_task_queued)
- return;
+ if (!phydev)
+ return -ENODEV;
- fep->mii_phy_task_queued = 1;
- INIT_WORK(&fep->phy_task, mii_display_config);
- schedule_work(&fep->phy_task);
+ return phy_ethtool_sset(phydev, cmd);
}
-phy_cmd_t const phy_cmd_relink[] = {
- { mk_mii_read(MII_REG_CR), mii_queue_relink },
- { mk_mii_end, }
- };
-phy_cmd_t const phy_cmd_config[] = {
- { mk_mii_read(MII_REG_CR), mii_queue_config },
- { mk_mii_end, }
- };
-
-/* Read remainder of PHY ID. */
-static void
-mii_discover_phy3(uint mii_reg, struct net_device *dev)
+static void fec_enet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
- struct fec_enet_private *fep;
- int i;
-
- fep = netdev_priv(dev);
- fep->phy_id |= (mii_reg & 0xffff);
- printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
-
- for(i = 0; phy_info[i]; i++) {
- if(phy_info[i]->id == (fep->phy_id >> 4))
- break;
- }
-
- if (phy_info[i])
- printk(" -- %s\n", phy_info[i]->name);
- else
- printk(" -- unknown PHY!\n");
+ struct fec_enet_private *fep = netdev_priv(dev);
- fep->phy = phy_info[i];
- fep->phy_id_done = 1;
+ strcpy(info->driver, fep->pdev->dev.driver->name);
+ strcpy(info->version, "Revision: 1.0");
+ strcpy(info->bus_info, dev_name(&dev->dev));
}
-/* Scan all of the MII PHY addresses looking for someone to respond
- * with a valid ID. This usually happens quickly.
- */
-static void
-mii_discover_phy(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep;
- uint phytype;
-
- fep = netdev_priv(dev);
-
- if (fep->phy_addr < 32) {
- if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
-
- /* Got first part of ID, now get remainder */
- fep->phy_id = phytype << 16;
- mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2),
- mii_discover_phy3);
- } else {
- fep->phy_addr++;
- mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1),
- mii_discover_phy);
- }
- } else {
- printk("FEC: No PHY device found.\n");
- /* Disable external MII interface */
- writel(0, fep->hwp + FEC_MII_SPEED);
- fep->phy_speed = 0;
-#ifdef HAVE_mii_link_interrupt
- fec_disable_phy_intr(dev);
-#endif
- }
-}
+static struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_settings = fec_enet_get_settings,
+ .set_settings = fec_enet_set_settings,
+ .get_drvinfo = fec_enet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
-/* This interrupt occurs when the PHY detects a link change */
-#ifdef HAVE_mii_link_interrupt
-static irqreturn_t
-mii_link_interrupt(int irq, void * dev_id)
+static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct net_device *dev = dev_id;
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
+ if (!phydev)
+ return -ENODEV;
- return IRQ_HANDLED;
+ return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-#endif
static void fec_enet_free_buffers(struct net_device *dev)
{
@@ -1509,35 +913,8 @@ fec_enet_open(struct net_device *dev)
if (ret)
return ret;
- fep->sequence_done = 0;
- fep->link = 0;
-
- fec_restart(dev, 1);
-
- if (fep->phy) {
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, fep->phy->config);
- mii_do_cmd(dev, phy_cmd_config); /* display configuration */
-
- /* Poll until the PHY tells us its configuration
- * (not link state).
- * Request is initiated by mii_do_cmd above, but answer
- * comes by interrupt.
- * This should take about 25 usec per register at 2.5 MHz,
- * and we read approximately 5 registers.
- */
- while(!fep->sequence_done)
- schedule();
-
- mii_do_cmd(dev, fep->phy->startup);
- }
-
- /* Set the initial link state to true. A lot of hardware
- * based on this device does not implement a PHY interrupt,
- * so we are never notified of link change.
- */
- fep->link = 1;
-
+ /* schedule a link state check */
+ phy_start(fep->phy_dev);
netif_start_queue(dev);
fep->opened = 1;
return 0;
@@ -1550,6 +927,7 @@ fec_enet_close(struct net_device *dev)
/* Don't know what to do yet. */
fep->opened = 0;
+ phy_stop(fep->phy_dev);
netif_stop_queue(dev);
fec_stop(dev);
@@ -1574,7 +952,7 @@ fec_enet_close(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
@@ -1604,16 +982,16 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now */
- if (!(dmi->dmi_addr[0] & 1))
+ if (!(ha->addr[0] & 1))
continue;
/* calculate crc32 value of mac address */
crc = 0xffffffff;
- for (i = 0; i < dmi->dmi_addrlen; i++) {
- data = dmi->dmi_addr[i];
+ for (i = 0; i < dev->addr_len; i++) {
+ data = ha->addr[i];
for (bit = 0; bit < 8; bit++, data >>= 1) {
crc = (crc >> 1) ^
(((crc ^ data) & 1) ? CRC32_POLY : 0);
@@ -1666,6 +1044,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_do_ioctl = fec_enet_ioctl,
};
/*
@@ -1689,7 +1068,6 @@ static int fec_enet_init(struct net_device *dev, int index)
}
spin_lock_init(&fep->hw_lock);
- spin_lock_init(&fep->mii_lock);
fep->index = index;
fep->hwp = (void __iomem *)dev->base_addr;
@@ -1716,20 +1094,10 @@ static int fec_enet_init(struct net_device *dev, int index)
fep->rx_bd_base = cbd_base;
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
-#ifdef HAVE_mii_link_interrupt
- fec_request_mii_intr(dev);
-#endif
/* The FEC Ethernet specific entries in the device structure */
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &fec_netdev_ops;
-
- for (i=0; i<NMII-1; i++)
- mii_cmds[i].mii_next = &mii_cmds[i+1];
- mii_free = mii_cmds;
-
- /* Set MII speed to 2.5 MHz */
- fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
- / 2500000) / 2) & 0x3F) << 1;
+ dev->ethtool_ops = &fec_enet_ethtool_ops;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
@@ -1760,13 +1128,6 @@ static int fec_enet_init(struct net_device *dev, int index)
fec_restart(dev, 0);
- /* Queue up command to detect the PHY and initialize the
- * remainder of the interface.
- */
- fep->phy_id_done = 0;
- fep->phy_addr = 0;
- mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
-
return 0;
}
@@ -1835,8 +1196,7 @@ fec_restart(struct net_device *dev, int duplex)
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
/* Enable interrupts we wish to service */
- writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
- fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK);
}
static void
@@ -1859,7 +1219,6 @@ fec_stop(struct net_device *dev)
/* Clear outstanding MII command interrupts. */
writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
}
@@ -1891,6 +1250,7 @@ fec_probe(struct platform_device *pdev)
memset(fep, 0, sizeof(*fep));
ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+ fep->pdev = pdev;
if (!ndev->base_addr) {
ret = -ENOMEM;
@@ -1926,13 +1286,24 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_init;
+ ret = fec_enet_mii_init(pdev);
+ if (ret)
+ goto failed_mii_init;
+
ret = register_netdev(ndev);
if (ret)
goto failed_register;
+ printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+ fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
+ fep->phy_dev->irq);
+
return 0;
failed_register:
+ fec_enet_mii_remove(fep);
+failed_mii_init:
failed_init:
clk_disable(fep->clk);
clk_put(fep->clk);
@@ -1959,6 +1330,7 @@ fec_drv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
fec_stop(ndev);
+ fec_enet_mii_remove(fep);
clk_disable(fep->clk);
clk_put(fep->clk);
iounmap((void __iomem *)ndev->base_addr);
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 4a43e56b7394..221f440c10f4 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -327,7 +327,6 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock_irqsave(&priv->lock, flags);
- dev->trans_start = jiffies;
bd = (struct bcom_fec_bd *)
bcom_prepare_next_buffer(priv->tx_dmatsk);
@@ -436,7 +435,6 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
DMA_FROM_DEVICE);
length = status & BCOM_FEC_RX_BD_LEN_MASK;
skb_put(rskb, length - 4); /* length without CRC32 */
- rskb->dev = dev;
rskb->protocol = eth_type_trans(rskb, dev);
netif_rx(rskb);
@@ -576,12 +574,12 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
out_be32(&fec->gaddr2, 0xffffffff);
} else {
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 gaddr1 = 0x00000000;
u32 gaddr2 = 0x00000000;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
else
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5c98f7c22425..268ea4d566d7 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
static void nv_napi_enable(struct net_device *dev)
{
-#ifdef CONFIG_FORCEDETH_NAPI
struct fe_priv *np = get_nvpriv(dev);
napi_enable(&np->napi);
-#endif
}
static void nv_napi_disable(struct net_device *dev)
{
-#ifdef CONFIG_FORCEDETH_NAPI
struct fe_priv *np = get_nvpriv(dev);
napi_disable(&np->napi);
-#endif
}
#define MII_READ (-1)
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
}
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
-#ifdef CONFIG_FORCEDETH_NAPI
static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data)
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
}
-#else
-static void nv_do_rx_refill(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
- int retcode;
-
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- disable_irq(np->pci_dev->irq);
- } else {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- }
- if (!nv_optimized(np))
- retcode = nv_alloc_rx(dev);
- else
- retcode = nv_alloc_rx_optimized(dev);
- if (retcode) {
- spin_lock_irq(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
- }
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq(np->pci_dev->irq);
- } else {
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- }
-}
-#endif
static void nv_init_rx(struct net_device *dev)
{
@@ -2148,7 +2108,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int i;
u32 offset = 0;
u32 bcnt;
- u32 size = skb->len-skb->data_len;
+ u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc* put_tx;
@@ -2254,7 +2214,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dprintk("\n");
}
- dev->trans_start = jiffies;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2269,7 +2228,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
unsigned int i;
u32 offset = 0;
u32 bcnt;
- u32 size = skb->len-skb->data_len;
+ u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc_ex* put_tx;
@@ -2409,7 +2368,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
dprintk("\n");
}
- dev->trans_start = jiffies;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2816,11 +2774,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
dev->name, len, skb->protocol);
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
next_pkt:
@@ -2909,27 +2863,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
dev->name, len, skb->protocol);
if (likely(!np->vlangrp)) {
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
} else {
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
-#ifdef CONFIG_FORCEDETH_NAPI
- vlan_hwaccel_receive_skb(skb, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK);
-#else
- vlan_hwaccel_rx(skb, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK);
-#endif
+ vlan_gro_receive(&np->napi, np->vlangrp,
+ vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
} else {
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
}
}
@@ -3104,12 +3045,14 @@ static void nv_set_multicast(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
} else {
- struct dev_mc_list *walk;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(walk, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned char *addr = ha->addr;
u32 a, b;
- a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
- b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
+
+ a = le32_to_cpu(*(__le32 *) addr);
+ b = le16_to_cpu(*(__le16 *) (&addr[4]));
alwaysOn[0] &= a;
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
@@ -3494,10 +3437,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
- int total_work = 0;
- int loop_count = 0;
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
@@ -3514,7 +3453,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
nv_msi_workaround(np);
-#ifdef CONFIG_FORCEDETH_NAPI
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
@@ -3523,65 +3461,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
__napi_schedule(&np->napi);
}
-#else
- do
- {
- int work = 0;
- if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
- if (unlikely(nv_alloc_rx(dev))) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
- }
-
- spin_lock(&np->lock);
- work += nv_tx_done(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
-
- if (!work)
- break;
-
- total_work += work;
-
- loop_count++;
- }
- while (loop_count < max_interrupt_work);
-
- if (nv_change_interrupt_mode(dev, total_work)) {
- /* setup new irq mask */
- writel(np->irqmask, base + NvRegIrqMask);
- }
-
- if (unlikely(np->events & NVREG_IRQ_LINK)) {
- spin_lock(&np->lock);
- nv_link_irq(dev);
- spin_unlock(&np->lock);
- }
- if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
- spin_lock(&np->lock);
- nv_linkchange(dev);
- spin_unlock(&np->lock);
- np->link_timeout = jiffies + LINK_TIMEOUT;
- }
- if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
- spin_lock(&np->lock);
- /* disable interrupts on the nic */
- if (!(np->msi_flags & NV_MSI_X_ENABLED))
- writel(0, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- pci_push(base);
-
- if (!np->in_shutdown) {
- np->nic_poll_irq = np->irqmask;
- np->recover_error = 1;
- mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
- }
- spin_unlock(&np->lock);
- }
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
return IRQ_HANDLED;
@@ -3597,10 +3476,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
- int total_work = 0;
- int loop_count = 0;
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
@@ -3617,7 +3492,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
nv_msi_workaround(np);
-#ifdef CONFIG_FORCEDETH_NAPI
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
@@ -3625,66 +3499,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
writel(0, base + NvRegIrqMask);
__napi_schedule(&np->napi);
}
-#else
- do
- {
- int work = 0;
- if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
- if (unlikely(nv_alloc_rx_optimized(dev))) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
- }
-
- spin_lock(&np->lock);
- work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
-
- if (!work)
- break;
-
- total_work += work;
-
- loop_count++;
- }
- while (loop_count < max_interrupt_work);
-
- if (nv_change_interrupt_mode(dev, total_work)) {
- /* setup new irq mask */
- writel(np->irqmask, base + NvRegIrqMask);
- }
-
- if (unlikely(np->events & NVREG_IRQ_LINK)) {
- spin_lock(&np->lock);
- nv_link_irq(dev);
- spin_unlock(&np->lock);
- }
- if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
- spin_lock(&np->lock);
- nv_linkchange(dev);
- spin_unlock(&np->lock);
- np->link_timeout = jiffies + LINK_TIMEOUT;
- }
- if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
- spin_lock(&np->lock);
- /* disable interrupts on the nic */
- if (!(np->msi_flags & NV_MSI_X_ENABLED))
- writel(0, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- pci_push(base);
-
- if (!np->in_shutdown) {
- np->nic_poll_irq = np->irqmask;
- np->recover_error = 1;
- mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
- }
- spin_unlock(&np->lock);
- }
-
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
return IRQ_HANDLED;
@@ -3733,7 +3547,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
return IRQ_RETVAL(i);
}
-#ifdef CONFIG_FORCEDETH_NAPI
static int nv_napi_poll(struct napi_struct *napi, int budget)
{
struct fe_priv *np = container_of(napi, struct fe_priv, napi);
@@ -3741,23 +3554,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
int retcode;
- int tx_work, rx_work;
+ int rx_count, tx_work=0, rx_work=0;
- if (!nv_optimized(np)) {
- spin_lock_irqsave(&np->lock, flags);
- tx_work = nv_tx_done(dev, np->tx_ring_size);
- spin_unlock_irqrestore(&np->lock, flags);
+ do {
+ if (!nv_optimized(np)) {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
- rx_work = nv_rx_process(dev, budget);
- retcode = nv_alloc_rx(dev);
- } else {
- spin_lock_irqsave(&np->lock, flags);
- tx_work = nv_tx_done_optimized(dev, np->tx_ring_size);
- spin_unlock_irqrestore(&np->lock, flags);
+ rx_count = nv_rx_process(dev, budget - rx_work);
+ retcode = nv_alloc_rx(dev);
+ } else {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
- rx_work = nv_rx_process_optimized(dev, budget);
- retcode = nv_alloc_rx_optimized(dev);
- }
+ rx_count = nv_rx_process_optimized(dev,
+ budget - rx_work);
+ retcode = nv_alloc_rx_optimized(dev);
+ }
+ } while (retcode == 0 &&
+ rx_count > 0 && (rx_work += rx_count) < budget);
if (retcode) {
spin_lock_irqsave(&np->lock, flags);
@@ -3800,7 +3617,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
}
return rx_work;
}
-#endif
static irqreturn_t nv_nic_irq_rx(int foo, void *data)
{
@@ -5706,6 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features |= NETIF_F_TSO;
+ dev->features |= NETIF_F_GRO;
}
np->vlanctl_bits = 0;
@@ -5758,9 +5575,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
else
dev->netdev_ops = &nv_netdev_ops_optimized;
-#ifdef CONFIG_FORCEDETH_NAPI
netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
-#endif
SET_ETHTOOL_OPS(dev, &ops);
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -5863,7 +5678,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* msix has had reported issues when modifying irqmask
as in the case of napi, therefore, disable for now
*/
-#ifndef CONFIG_FORCEDETH_NAPI
+#if 0
np->msi_flags |= NV_MSI_X_CAPABLE;
#endif
}
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 0770e2f6da6b..0fb0fefcb787 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -674,8 +674,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data, skb->len, DMA_TO_DEVICE));
CBDW_DATLEN(bdp, skb->len);
- dev->trans_start = jiffies;
-
/*
* If this was the last BD in the ring, start at the beginning again.
*/
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a973e71876b..714da967fa19 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -231,12 +231,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ec81f50d5919..7eff92ef01da 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -232,12 +232,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 34d3da751eb4..7f0591e43cd9 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -223,12 +223,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4e97ca182997..ea7d5ddb7760 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -82,6 +82,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in.h>
+#include <linux/net_tstamp.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
rctrl |= RCTRL_PADDING(priv->padding);
}
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
+ priv->padding = 8;
+ }
+
/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
- return priv->vlgrp || priv->rx_csum_enable;
+ return priv->vlgrp || priv->rx_csum_enable ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
@@ -738,7 +747,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -768,6 +778,48 @@ err_grp_init:
return err;
}
+static int gfar_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx_en = 0;
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_rx_en = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -776,6 +828,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP)
+ return gfar_hwtstamp_ioctl(dev, rq, cmd);
+
if (!priv->phydev)
return -ENODEV;
@@ -978,7 +1033,8 @@ static int gfar_probe(struct of_device *ofdev,
else
priv->padding = 0;
- if (dev->features & NETIF_F_IP_CSUM)
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->hard_header_len += GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1649,6 +1705,7 @@ static void free_skb_resources(struct gfar_private *priv)
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
+ skb_queue_purge(&priv->rx_recycle);
}
void gfar_start(struct net_device *dev)
@@ -1682,7 +1739,7 @@ void gfar_start(struct net_device *dev)
gfar_write(&regs->imask, IMASK_DEFAULT);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1922,23 +1979,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
struct gfar __iomem *regs = NULL;
struct txfcb *fcb = NULL;
- struct txbd8 *txbdp, *txbdp_start, *base;
+ struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
- int i, rq = 0;
+ int i, rq = 0, do_tstamp = 0;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, length;
-
+ unsigned int nr_frags, nr_txbds, length;
+ union skb_shared_tx *shtx;
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
txq = netdev_get_tx_queue(dev, rq);
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
+ shtx = skb_tx(skb);
+
+ /* check if time stamp should be generated */
+ if (unlikely(shtx->hardware && priv->hwts_tx_en))
+ do_tstamp = 1;
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- (priv->vlgrp && vlan_tx_tag_present(skb))) &&
+ (priv->vlgrp && vlan_tx_tag_present(skb)) ||
+ unlikely(do_tstamp)) &&
(skb_headroom(skb) < GMAC_FCB_LEN)) {
struct sk_buff *skb_new;
@@ -1955,8 +2018,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
+ /* calculate the required number of TxBDs for this skb */
+ if (unlikely(do_tstamp))
+ nr_txbds = nr_frags + 2;
+ else
+ nr_txbds = nr_frags + 1;
+
/* check if there is space to queue this packet */
- if ((nr_frags+1) > tx_queue->num_txbdfree) {
+ if (nr_txbds > tx_queue->num_txbdfree) {
/* no space, stop the queue */
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++;
@@ -1968,9 +2037,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_packets ++;
txbdp = txbdp_start = tx_queue->cur_tx;
+ lstatus = txbdp->lstatus;
+
+ /* Time stamp insertion requires one additional TxBD */
+ if (unlikely(do_tstamp))
+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+ tx_queue->tx_ring_size);
if (nr_frags == 0) {
- lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (unlikely(do_tstamp))
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
+ TXBD_INTERRUPT);
+ else
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
@@ -2016,11 +2095,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
gfar_tx_vlan(skb, fcb);
}
- /* setup the TxBD length and buffer pointer for the first BD */
+ /* Setup tx hardware time stamping if requested */
+ if (unlikely(do_tstamp)) {
+ shtx->in_progress = 1;
+ if (fcb == NULL)
+ fcb = gfar_add_fcb(skb);
+ fcb->ptp = 1;
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ /*
+ * If time stamping is requested one additional TxBD must be set up. The
+ * first TxBD points to the FCB and must have a data length of
+ * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+ * the full frame length.
+ */
+ if (unlikely(do_tstamp)) {
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
+ (skb_headlen(skb) - GMAC_FCB_LEN);
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+ } else {
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ }
/*
* We can work in parallel with gfar_clean_tx_ring(), except
@@ -2060,9 +2160,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
/* reduce TxBD free count */
- tx_queue->num_txbdfree -= (nr_frags + 1);
-
- dev->trans_start = jiffies;
+ tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
@@ -2088,7 +2186,6 @@ static int gfar_close(struct net_device *dev)
disable_napi(priv);
- skb_queue_purge(&priv->rx_recycle);
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
@@ -2251,16 +2348,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
struct net_device *dev = tx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
- struct txbd8 *bdp;
+ struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
struct sk_buff *skb;
int skb_dirtytx;
int tx_ring_size = tx_queue->tx_ring_size;
- int frags = 0;
+ int frags = 0, nr_txbds = 0;
int i;
int howmany = 0;
u32 lstatus;
+ size_t buflen;
+ union skb_shared_tx *shtx;
rx_queue = priv->rx_queue[tx_queue->qindex];
bdp = tx_queue->dirty_tx;
@@ -2270,7 +2369,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
unsigned long flags;
frags = skb_shinfo(skb)->nr_frags;
- lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
+
+ /*
+ * When time stamping, one additional TxBD must be freed.
+ * Also, we need to dma_unmap_single() the TxPAL.
+ */
+ shtx = skb_tx(skb);
+ if (unlikely(shtx->in_progress))
+ nr_txbds = frags + 2;
+ else
+ nr_txbds = frags + 1;
+
+ lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
lstatus = lbdp->lstatus;
@@ -2279,10 +2389,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- dma_unmap_single(&priv->ofdev->dev,
- bdp->bufPtr,
- bdp->length,
- DMA_TO_DEVICE);
+ if (unlikely(shtx->in_progress)) {
+ next = next_txbd(bdp, base, tx_ring_size);
+ buflen = next->length + GMAC_FCB_LEN;
+ } else
+ buflen = bdp->length;
+
+ dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ buflen, DMA_TO_DEVICE);
+
+ if (unlikely(shtx->in_progress)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next;
+ }
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2314,7 +2438,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
- tx_queue->num_txbdfree += frags + 1;
+ tx_queue->num_txbdfree += nr_txbds;
spin_unlock_irqrestore(&tx_queue->txlock, flags);
}
@@ -2470,6 +2594,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
skb_pull(skb, amount_pull);
}
+ /* Get receive timestamp from the skb */
+ if (priv->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ u64 *ns = (u64 *) skb->data;
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ }
+
+ if (priv->padding)
+ skb_pull(skb, priv->padding);
+
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2506,8 +2641,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
- amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
- priv->padding;
+ amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2794,7 +2928,7 @@ static void adjust_link(struct net_device *dev)
* whenever dev->flags is changed */
static void gfar_set_multi(struct net_device *dev)
{
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
@@ -2867,13 +3001,12 @@ static void gfar_set_multi(struct net_device *dev)
return;
/* Parse the list, and set the appropriate bits */
- netdev_for_each_mc_addr(mc_ptr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (idx < em_num) {
- gfar_set_mac_for_addr(dev, idx,
- mc_ptr->dmi_addr);
+ gfar_set_mac_for_addr(dev, idx, ha->addr);
idx++;
} else
- gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+ gfar_set_hash_for_addr(dev, ha->addr);
}
}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 17d25e714236..ac4a92e08c09 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -262,6 +262,7 @@ extern const char gfar_driver_version[];
#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
+#define RCTRL_TS_ENABLE 0x01000000
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_VLEX 0x00002000
#define RCTRL_FILREN 0x00001000
@@ -539,7 +540,7 @@ struct txbd8
struct txfcb {
u8 flags;
- u8 reserved;
+ u8 ptp; /* Flag to enable tx timestamping */
u8 l4os; /* Level 4 Header Offset */
u8 l3os; /* Level 3 Header Offset */
u16 phcs; /* Pseudo-header Checksum */
@@ -885,6 +886,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
#if (MAXGROUPS == 2)
#define DEFAULT_MAPPING 0xAA
@@ -1100,6 +1102,10 @@ struct gfar_private {
/* Network Statistics */
struct gfar_extra_stats extra_stats;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
};
extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 3a90430de918..fd491e409488 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -895,7 +895,6 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
else
skb->ip_summed = CHECKSUM_NONE;
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
netif_receive_skb(skb);
@@ -990,7 +989,7 @@ static u32 greth_hash_get_index(__u8 *addr)
static void greth_set_hash_filter(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
struct greth_regs *regs = (struct greth_regs *) greth->regs;
u32 mc_filter[2];
@@ -998,8 +997,8 @@ static void greth_set_hash_filter(struct net_device *dev)
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = greth_hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = greth_hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 5d6f13879592..83f43bb835d6 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1857,12 +1857,12 @@ static void set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
- writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
+ netdev_for_each_mc_addr(ha, dev) {
+ writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
+ writel(0x20000 | (*(u16 *)&ha->addr[4]),
ioaddr + 0x104 + i*8);
i++;
}
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 0cab992b3d1a..3e25f10cabd6 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -429,7 +429,7 @@ static int ser12_open(struct net_device *dev)
return -EINVAL;
}
if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) {
- printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy \n",
+ printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy\n",
dev->base_addr);
return -EACCES;
}
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 4daad8cd56ea..68e5ac8832ad 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
return -EAGAIN;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
lp->lan_type = hp100_sense_lan(dev);
@@ -1510,7 +1510,7 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
#endif
/* not waited long enough since last tx? */
- if (time_before(jiffies, dev->trans_start + HZ))
+ if (time_before(jiffies, dev_trans_start(dev) + HZ))
goto drop;
if (hp100_check_lan(dev))
@@ -1547,7 +1547,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
}
}
- dev->trans_start = jiffies;
goto drop;
}
@@ -1585,7 +1584,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
/* Update statistics */
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1663,7 +1661,7 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
#endif
/* not waited long enough since last failed tx try? */
- if (time_before(jiffies, dev->trans_start + HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
#ifdef HP100_DEBUG
printk("hp100: %s: trans_start timing problem\n",
dev->name);
@@ -1701,7 +1699,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
mdelay(1);
}
}
- dev->trans_start = jiffies;
goto drop;
}
@@ -1745,7 +1742,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
hp100_ints_on();
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2099,15 +2095,15 @@ static void hp100_set_multicast_list(struct net_device *dev)
} else {
int i, idx;
u_char *addrs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
printk("hp100: %s: computing hash filter - mc_count = %i\n",
dev->name, netdev_mc_count(dev));
#endif
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
printk("hp100: %s: multicast = %pM, ",
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index dd873cc41c2b..2484e9e6c1ed 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -389,18 +389,19 @@ static void emac_hash_mc(struct emac_instance *dev)
const int regs = EMAC_XAHT_REGS(dev);
u32 *gaht_base = emac_gaht_base(dev);
u32 gaht_temp[regs];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
- netdev_for_each_mc_addr(dmi, dev->ndev) {
+ netdev_for_each_mc_addr(ha, dev->ndev) {
int slot, reg, mask;
- DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
+ DBG2(dev, "mc %pM" NL, ha->addr);
- slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
+ slot = EMAC_XAHT_CRC_TO_SLOT(dev,
+ ether_crc(ETH_ALEN, ha->addr));
reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
@@ -1177,7 +1178,7 @@ static int emac_open(struct net_device *ndev)
netif_carrier_on(dev->ndev);
/* Required for Pause packet support in EMAC */
- dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
+ dev_mc_add_global(ndev, default_mcast_addr);
emac_configure(dev);
mal_poll_add(dev->mal, &dev->commac);
@@ -1700,7 +1701,6 @@ static int emac_poll_rx(void *param, int budget)
skb_put(skb, len);
push_packet:
- skb->dev = dev->ndev;
skb->protocol = eth_type_trans(skb, dev->ndev);
emac_rx_csum(dev, skb, ctrl);
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 7d6cf3340c11..294ccfb427cf 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -384,7 +384,7 @@ static void InitBoard(struct net_device *dev)
int camcnt;
camentry_t cams[16];
u32 cammask;
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u16 rcrval;
/* reset the SONIC */
@@ -419,8 +419,8 @@ static void InitBoard(struct net_device *dev)
/* start putting the multicast addresses into the CAM list. Stop if
it is full. */
- netdev_for_each_mc_addr(mcptr, dev) {
- putcam(cams, &camcnt, mcptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ putcam(cams, &camcnt, ha->addr);
if (camcnt == 16)
break;
}
@@ -478,7 +478,7 @@ static void InitBoard(struct net_device *dev)
/* if still multicast addresses left or ALLMULTI is set, set the multicast
enable bit */
- if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL))
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
rcrval |= RCREG_AMC;
/* promiscous mode ? */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index cd508a8ee25b..092fb9d76693 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -957,7 +957,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
} else {
tx_packets++;
tx_bytes += skb->len;
- netdev->trans_start = jiffies;
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
}
if (!used_bounce)
@@ -1073,7 +1073,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
}
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* clear the filter table & disable filtering */
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
@@ -1084,10 +1084,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- netdev_for_each_mc_addr(mclist, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
+ memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index f4081c0a2d9c..ab9f675c5b8b 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -182,7 +182,6 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
skb_queue_tail(&dp->rq, skb);
if (!dp->tasklet_pending) {
dp->tasklet_pending = 1;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 4a32bed77c71..3ef495537dc5 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -104,6 +104,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82580_COPPER_DUAL:
mac->type = e1000_82580;
break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
default:
return -E1000_ERR_MAC_INIT;
break;
@@ -153,8 +159,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
if (mac->type == e1000_82580)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ if (mac->type == e1000_i350)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
/* reset */
- if (mac->type == e1000_82580)
+ if (mac->type >= e1000_82580)
mac->ops.reset_hw = igb_reset_hw_82580;
else
mac->ops.reset_hw = igb_reset_hw_82575;
@@ -225,7 +233,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
- } else if (hw->mac.type == e1000_82580) {
+ } else if (hw->mac.type >= e1000_82580) {
phy->ops.reset = igb_phy_hw_reset;
phy->ops.read_reg = igb_read_phy_reg_82580;
phy->ops.write_reg = igb_write_phy_reg_82580;
@@ -261,6 +269,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break;
case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
phy->type = e1000_phy_82580;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
phy->ops.get_cable_length = igb_get_cable_length_82580;
@@ -1445,7 +1454,6 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
**/
static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
{
- u32 mdicnfg = 0;
s32 ret_val;
@@ -1453,15 +1461,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- wr32(E1000_MDICNFG, mdicnfg);
-
ret_val = igb_read_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@@ -1480,7 +1479,6 @@ out:
**/
static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
{
- u32 mdicnfg = 0;
s32 ret_val;
@@ -1488,15 +1486,6 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- wr32(E1000_MDICNFG, mdicnfg);
-
ret_val = igb_write_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index fbe1c99c193c..cbd1e1259e4d 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -38,9 +38,10 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_OFF1_ON2))
-#define E1000_RAR_ENTRIES_82575 16
-#define E1000_RAR_ENTRIES_82576 24
-#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
#define E1000_SW_SYNCH_MB 0x00000100
#define E1000_STAT_DEV_RST_SET 0x00100000
@@ -52,6 +53,7 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
#define E1000_SRRCTL_DROP_EN 0x80000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
@@ -108,6 +110,7 @@ union e1000_adv_rx_desc {
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index fe6cf1b696c7..24d9be64342f 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -610,11 +610,7 @@
#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
-#define PCIE_LINK_STATUS 0x12
#define PCIE_DEVICE_CONTROL2 0x28
-
-#define PCIE_LINK_WIDTH_MASK 0x3F0
-#define PCIE_LINK_WIDTH_SHIFT 4
#define PCIE_DEVICE_CONTROL2_16ms 0x0005
#define PHY_REVISION_MASK 0xFFFFFFF0
@@ -629,6 +625,7 @@
#define M88E1111_I_PHY_ID 0x01410CC0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 82a533f5192a..cb8db78b1a05 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/netdevice.h>
#include "e1000_regs.h"
#include "e1000_defines.h"
@@ -53,6 +54,10 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_REVISION_2 2
#define E1000_REVISION_4 4
@@ -72,6 +77,7 @@ enum e1000_mac_type {
e1000_82575,
e1000_82576,
e1000_82580,
+ e1000_i350,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
@@ -502,14 +508,11 @@ struct e1000_hw {
u8 revision_id;
};
-#ifdef DEBUG
-extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
+extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
- printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(format, arg...)
-#endif
-#endif
+ netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
+
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index be8d010e4021..90c5e01e9235 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -53,17 +53,30 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
u16 pcie_link_status;
bus->type = e1000_bus_type_pci_express;
- bus->speed = e1000_bus_speed_2500;
ret_val = igb_read_pcie_cap_reg(hw,
- PCIE_LINK_STATUS,
- &pcie_link_status);
- if (ret_val)
+ PCI_EXP_LNKSTA,
+ &pcie_link_status);
+ if (ret_val) {
bus->width = e1000_bus_width_unknown;
- else
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+ switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_unknown;
+ break;
+ }
+
bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCIE_LINK_WIDTH_MASK) >>
- PCIE_LINK_WIDTH_SHIFT);
+ PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT);
+ }
reg = rd32(E1000_STATUS);
bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3b772b822a5d..6e63d9a7fc75 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -107,6 +107,7 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_64 64 /* Used for packet split */
#define IGB_RXBUFFER_128 128 /* Used for packet split */
#define IGB_RXBUFFER_1024 1024
#define IGB_RXBUFFER_2048 2048
@@ -140,8 +141,10 @@ struct igb_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
- u16 mapped_as_page;
+ unsigned int bytecount;
u16 gso_segs;
+ union skb_shared_tx shtx;
+ u8 mapped_as_page;
};
/* RX */
struct {
@@ -185,7 +188,7 @@ struct igb_q_vector {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
- struct pci_dev *pdev; /* pci device for dma mapping */
+ struct device *dev; /* device pointer for dma mapping */
dma_addr_t dma; /* phys address of the ring */
void *desc; /* descriptor ring memory */
unsigned int size; /* length of desc. ring in bytes */
@@ -323,6 +326,7 @@ struct igb_adapter {
#define IGB_82576_TSYNC_SHIFT 19
#define IGB_82580_TSYNC_SHIFT 24
+#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
@@ -336,7 +340,6 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
-extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 743038490104..f2ebf927e4bc 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -902,6 +902,49 @@ struct igb_reg_test {
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i350, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
/* 82580 reg test */
static struct igb_reg_test reg_test_82580[] = {
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1077,6 +1120,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
u32 i, toggle;
switch (adapter->hw.mac.type) {
+ case e1000_i350:
+ test = reg_test_i350;
+ toggle = 0x7FEFF3FF;
+ break;
case e1000_82580:
test = reg_test_82580;
toggle = 0x7FEFF3FF;
@@ -1238,6 +1285,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
case e1000_82580:
ics_mask = 0x77DCFED5;
break;
+ case e1000_i350:
+ ics_mask = 0x77DCFED5;
+ break;
default:
ics_mask = 0x7FFFFFFF;
break;
@@ -1344,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IGB_DEFAULT_TXD;
- tx_ring->pdev = adapter->pdev;
+ tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1358,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rx_ring->count = IGB_DEFAULT_RXD;
- rx_ring->pdev = adapter->pdev;
+ rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
rx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1554,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
buffer_info = &rx_ring->buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* verify contents of skb */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c9baa2aa98cd..589cf4a6427a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -62,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
};
static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -197,6 +201,336 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+struct igb_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct igb_reg_info igb_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN(0), "RDLEN"},
+ {E1000_RDH(0), "RDH"},
+ {E1000_RDT(0), "RDT"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_RDBAL(0), "RDBAL"},
+ {E1000_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL(0), "TDBAL"},
+ {E1000_TDBAH(0), "TDBAH"},
+ {E1000_TDLEN(0), "TDLEN"},
+ {E1000_TDH(0), "TDH"},
+ {E1000_TDT(0), "TDT"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * igb_regdump - register printout routine
+ */
+static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDLEN(n));
+ break;
+ case E1000_RDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDH(n));
+ break;
+ case E1000_RDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDT(n));
+ break;
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RXDCTL(n));
+ break;
+ case E1000_RDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_RDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAH(n));
+ break;
+ case E1000_TDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_TDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDBAH(n));
+ break;
+ case E1000_TDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDLEN(n));
+ break;
+ case E1000_TDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDH(n));
+ break;
+ case E1000_TDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDT(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TXDCTL(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, rd32(reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 4; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * igb_dump - Print registers, tx-rings and rx-rings
+ */
+static void igb_dump(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_reg_info *reginfo;
+ int n = 0;
+ struct igb_ring *tx_ring;
+ union e1000_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct igb_buffer *buffer_info;
+ struct igb_ring *rx_ring;
+ union e1000_adv_rx_desc *rx_desc;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ igb_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 15 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOCIStDDM Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %3X %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO " %5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(buffer_info->dma),
+ rx_ring->rx_buffer_len, true);
+ if (rx_ring->rx_buffer_len
+ < IGB_RXBUFFER_1024)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(
+ buffer_info->page_dma +
+ buffer_info->page_offset),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
+
/**
* igb_read_clock - read raw cycle counter (to be used by time counter)
*/
@@ -223,43 +557,17 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
return stamp;
}
-#ifdef DEBUG
/**
- * igb_get_hw_dev_name - return device name string
+ * igb_get_hw_dev - return device
* used by hardware layer to print debugging information
**/
-char *igb_get_hw_dev_name(struct e1000_hw *hw)
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
{
struct igb_adapter *adapter = hw->back;
- return adapter->netdev->name;
+ return adapter->netdev;
}
/**
- * igb_get_time_str - format current NIC and system time as string
- */
-static char *igb_get_time_str(struct igb_adapter *adapter,
- char buffer[160])
-{
- cycle_t hw = adapter->cycles.read(&adapter->cycles);
- struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
- struct timespec sys;
- struct timespec delta;
- getnstimeofday(&sys);
-
- delta = timespec_sub(nic, sys);
-
- sprintf(buffer,
- "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
- hw,
- (long)nic.tv_sec, nic.tv_nsec,
- (long)sys.tv_sec, sys.tv_nsec,
- (long)delta.tv_sec, delta.tv_nsec);
-
- return buffer;
-}
-#endif
-
-/**
* igb_init_module - Driver Registration Routine
*
* igb_init_module is the first routine called when the driver is
@@ -328,6 +636,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
case e1000_82575:
case e1000_82580:
+ case e1000_i350:
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -371,7 +680,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
@@ -385,7 +694,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -471,6 +780,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
q_vector->eims_value = 1 << msix_vector;
break;
case e1000_82580:
+ case e1000_i350:
/* 82580 uses the same table-based approach as 82576 but has fewer
entries as a result we carry over for queues greater than 4. */
if (rx_queue > IGB_N0_QUEUE) {
@@ -551,6 +861,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
case e1000_82576:
case e1000_82580:
+ case e1000_i350:
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -1253,6 +1564,7 @@ void igb_reset(struct igb_adapter *adapter)
* To take effect CTRL.RST is required.
*/
switch (mac->type) {
+ case e1000_i350:
case e1000_82580:
pba = rd32(E1000_RXPBS);
pba = igb_rxpbs_adjust_82580(pba);
@@ -1416,15 +1728,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -1656,6 +1968,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
netdev->name,
((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
"unknown"),
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
(hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
@@ -1826,6 +2139,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
switch (hw->mac.type) {
+ case e1000_i350:
case e1000_82580:
memset(&adapter->cycles, 0, sizeof(adapter->cycles));
adapter->cycles.read = igb_read_clock;
@@ -2096,7 +2410,7 @@ static int igb_close(struct net_device *netdev)
**/
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2109,9 +2423,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev,
- tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2122,7 +2437,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err:
vfree(tx_ring->buffer_info);
- dev_err(&pdev->dev,
+ dev_err(dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
@@ -2246,7 +2561,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
int size, desc_len;
size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2261,8 +2576,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -2275,8 +2592,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- dev_err(&pdev->dev, "Unable to allocate memory for "
- "the receive descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the receive descriptor"
+ " ring\n");
return -ENOMEM;
}
@@ -2339,6 +2656,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->vfs_allocated_count) {
/* 82575 and 82576 supports 2 RSS queues for VMDq */
switch (hw->mac.type) {
+ case e1000_i350:
case e1000_82580:
num_rx_queues = 1;
shift = 0;
@@ -2590,6 +2908,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
+ if (hw->mac.type == e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2649,8 +2969,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
if (!tx_ring->desc)
return;
- pci_free_consistent(tx_ring->pdev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2674,15 +2994,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(tx_ring->pdev,
+ dma_unmap_page(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(tx_ring->pdev,
+ dma_unmap_single(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -2753,8 +3073,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->desc)
return;
- pci_free_consistent(rx_ring->pdev, rx_ring->size,
- rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2790,10 +3110,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -2802,10 +3122,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
buffer_info->skb = NULL;
}
if (buffer_info->page_dma) {
- pci_unmap_page(rx_ring->pdev,
+ dma_unmap_page(rx_ring->dev,
buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
}
if (buffer_info->page) {
@@ -2876,7 +3196,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
int i;
@@ -2893,8 +3213,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
/* The shared function expects a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -3493,7 +3813,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
struct e1000_adv_tx_context_desc *context_desc;
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
struct igb_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
unsigned int i;
@@ -3544,7 +3864,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
break;
default:
if (unlikely(net_ratelimit()))
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"partial checksum but proto=%x!\n",
skb->protocol);
break;
@@ -3578,59 +3898,61 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned int first)
{
struct igb_buffer *buffer_info;
- struct pci_dev *pdev = tx_ring->pdev;
- unsigned int len = skb_headlen(skb);
+ struct device *dev = tx_ring->dev;
+ unsigned int hlen = skb_headlen(skb);
unsigned int count = 0, i;
unsigned int f;
+ u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
- BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
- buffer_info->length = len;
+ BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
+ buffer_info->length = hlen;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(dev, skb->data, hlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
+ unsigned int len = frag->size;
count++;
i++;
if (i == tx_ring->count)
i = 0;
- frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
-
buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
+ tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
+ /* multiply data chunks by size of headers */
+ tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
+ tx_ring->buffer_info[i].gso_segs = gso_segs;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed buffer_info mapping */
buffer_info->dma = 0;
@@ -3868,6 +4190,8 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ igb_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
}
@@ -3920,6 +4244,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
* i.e. RXBUFFER_2048 --> size-4096 slab
*/
+ if (adapter->hw.mac.type == e1000_82580)
+ max_frame += IGB_TS_HDR_LEN;
+
if (max_frame <= IGB_RXBUFFER_1024)
rx_buffer_len = IGB_RXBUFFER_1024;
else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
@@ -3927,6 +4254,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
else
rx_buffer_len = IGB_RXBUFFER_128;
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
+ rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
+
+ if ((adapter->hw.mac.type == e1000_82580) &&
+ (rx_buffer_len == IGB_RXBUFFER_128))
+ rx_buffer_len += IGB_RXBUFFER_64;
+
if (netif_running(netdev))
igb_down(adapter);
@@ -4955,22 +5290,21 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
/**
* igb_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: pointer to q_vector containing needed info
- * @skb: packet that was just sent
+ * @buffer: pointer to igb_buffer structure
*
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
*/
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
{
struct igb_adapter *adapter = q_vector->adapter;
- union skb_shared_tx *shtx = skb_tx(skb);
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
/* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!shtx->hardware) ||
+ if (likely(!buffer_info->shtx.hardware) ||
!(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
return;
@@ -4978,7 +5312,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
regval |= (u64)rd32(E1000_TXSTMPH) << 32;
igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
}
/**
@@ -4993,7 +5327,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
struct net_device *netdev = tx_ring->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_buffer *buffer_info;
- struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int i, eop, count = 0;
@@ -5009,19 +5342,12 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
- skb = buffer_info->skb;
- if (skb) {
- unsigned int segs, bytecount;
+ if (buffer_info->skb) {
+ total_bytes += buffer_info->bytecount;
/* gso_segs is currently only valid for tcp */
- segs = buffer_info->gso_segs;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
-
- igb_tx_hwtstamp(q_vector, skb);
+ total_packets += buffer_info->gso_segs;
+ igb_tx_hwtstamp(q_vector, buffer_info);
}
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
@@ -5061,7 +5387,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- dev_err(&tx_ring->pdev->dev,
+ dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH <%x>\n"
@@ -5140,10 +5466,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
+ dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
}
-static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
@@ -5161,13 +5487,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
- if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
- return;
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+ if (staterr & E1000_RXDADV_STAT_TSIP) {
+ u32 *stamp = (u32 *)skb->data;
+ regval = le32_to_cpu(*(stamp + 2));
+ regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
+ skb_pull(skb, IGB_TS_HDR_LEN);
+ } else {
+ if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ }
igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
@@ -5190,7 +5521,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
{
struct igb_ring *rx_ring = q_vector->rx_ring;
struct net_device *netdev = rx_ring->netdev;
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
struct igb_buffer *buffer_info , *next_buffer;
struct sk_buff *skb;
@@ -5230,9 +5561,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
cleaned_count++;
if (buffer_info->dma) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(dev, buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
skb_put(skb, length);
@@ -5242,11 +5573,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(dev, buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
@@ -5275,7 +5606,8 @@ send_up:
goto next_desc;
}
- igb_rx_hwtstamp(q_vector, staterr, skb);
+ if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
+ igb_rx_hwtstamp(q_vector, staterr, skb);
total_bytes += skb->len;
total_packets++;
@@ -5350,12 +5682,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(rx_ring->pdev, buffer_info->page,
+ dma_map_page(rx_ring->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->page_dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->page_dma)) {
buffer_info->page_dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5373,12 +5705,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->skb = skb;
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_single(rx_ring->pdev,
+ buffer_info->dma = dma_map_single(rx_ring->dev,
skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->dma)) {
buffer_info->dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5555,6 +5887,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
return 0;
}
+ /*
+ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ }
+
/* enable/disable TX */
regval = rd32(E1000_TSYNCTXCTL);
regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -6131,19 +6473,25 @@ static void igb_vmm_control(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 reg;
- /* replication is not supported for 82575 */
- if (hw->mac.type == e1000_82575)
+ switch (hw->mac.type) {
+ case e1000_82575:
+ default:
+ /* replication is not supported for 82575 */
return;
-
- /* enable replication vlan tag stripping */
- reg = rd32(E1000_RPLOLR);
- reg |= E1000_RPLOLR_STRVLAN;
- wr32(E1000_RPLOLR, reg);
-
- /* notify HW that the MAC is adding vlan tags */
- reg = rd32(E1000_DTXCTL);
- reg |= E1000_DTXCTL_VLAN_ADDED;
- wr32(E1000_DTXCTL, reg);
+ case e1000_82576:
+ /* notify HW that the MAC is adding vlan tags */
+ reg = rd32(E1000_DTXCTL);
+ reg |= E1000_DTXCTL_VLAN_ADDED;
+ wr32(E1000_DTXCTL, reg);
+ case e1000_82580:
+ /* enable replication vlan tag stripping */
+ reg = rd32(E1000_RPLOLR);
+ reg |= E1000_RPLOLR_STRVLAN;
+ wr32(E1000_RPLOLR, reg);
+ case e1000_i350:
+ /* none of the above registers are supported by i350 */
+ break;
+ }
if (adapter->vfs_allocated_count) {
igb_vmdq_set_loopback_pf(hw, true);
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1b1edad1eb5e..5e2b2a8c56c6 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -48,6 +48,7 @@
#define DRV_VERSION "1.0.0-k0"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
+struct pm_qos_request_list *igbvf_driver_pm_qos_req;
static const char igbvf_driver_string[] =
"Intel(R) Virtual Function Network Driver";
static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -164,10 +165,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(pdev, buffer_info->page,
+ dma_map_page(&pdev->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!buffer_info->skb) {
@@ -178,9 +179,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -268,28 +269,28 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
prefetch(skb->data - NET_IP_ALIGN);
buffer_info->skb = NULL;
if (!adapter->rx_ps_hdr_size) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
if (!skb_shinfo(skb)->nr_frags) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb, hlen);
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev, buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
@@ -369,15 +370,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -438,8 +439,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -480,8 +481,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -549,7 +550,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -574,13 +576,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->rx_ps_hdr_size){
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
}
@@ -592,9 +594,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
if (buffer_info->page) {
if (buffer_info->page_dma)
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev,
+ buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
put_page(buffer_info->page);
buffer_info->page = NULL;
buffer_info->page_dma = 0;
@@ -1398,7 +1401,7 @@ static void igbvf_set_multi(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list = NULL;
int i;
@@ -1413,8 +1416,8 @@ static void igbvf_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
kfree(mta_list);
@@ -2104,9 +2107,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -2127,12 +2130,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
}
@@ -2644,16 +2647,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -2899,7 +2902,7 @@ static int __init igbvf_init_module(void)
printk(KERN_INFO "%s\n", igbvf_copyright);
ret = pci_register_driver(&igbvf_driver);
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name,
+ igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
return ret;
@@ -2915,7 +2918,8 @@ module_init(igbvf_init_module);
static void __exit igbvf_exit_module(void)
{
pci_unregister_driver(&igbvf_driver);
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name);
+ pm_qos_remove_request(igbvf_driver_pm_qos_req);
+ igbvf_driver_pm_qos_req = NULL;
}
module_exit(igbvf_exit_module);
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8f6197d647c0..e3b5e9490601 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1503,7 +1503,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
BARRIER();
- dev->trans_start = jiffies;
ip->tx_skbs[produce] = skb; /* Remember skb */
produce = (produce + 1) & 127;
ip->tx_pi = produce;
@@ -1665,7 +1664,7 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void ioc3_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
u64 ehar = 0;
@@ -1689,8 +1688,8 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- netdev_for_each_mc_addr(dmi, dev) {
- char *addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addr = ha->addr;
if (!(*addr & 1))
continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 639bf9fb0279..72e3d2da9e9f 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -570,7 +570,7 @@ static int ipg_config_autoneg(struct net_device *dev)
static void ipg_nic_set_multicast_list(struct net_device *dev)
{
void __iomem *ioaddr = ipg_ioaddr(dev);
- struct dev_mc_list *mc_list_ptr;
+ struct netdev_hw_addr *ha;
unsigned int hashindex;
u32 hashtable[2];
u8 receivemode;
@@ -609,9 +609,9 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
hashtable[1] = 0x00000000;
/* Cycle through all multicast addresses to filter. */
- netdev_for_each_mc_addr(mc_list_ptr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Calculate CRC result for each multicast address. */
- hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
+ hashindex = crc32_le(0xffffffff, ha->addr,
ETH_ALEN);
/* Use only the least significant 6 bits. */
@@ -1548,8 +1548,6 @@ static void ipg_reset_after_host_error(struct work_struct *work)
container_of(work, struct ipg_nic_private, task.work);
struct net_device *dev = sp->dev;
- IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL));
-
/*
* Acknowledge HostError interrupt by resetting
* IPG DMA and HOST.
@@ -1826,9 +1824,6 @@ static int ipg_nic_stop(struct net_device *dev)
netif_stop_queue(dev);
- IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount);
- IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount);
- IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount);
IPG_DUMPTFDLIST(dev);
do {
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dfc2541bb556..6ce027355fcf 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -29,7 +29,7 @@
/* GMII based PHY IDs */
#define NS 0x2000
#define MARVELL 0x0141
-#define ICPLUS_PHY 0x243
+#define ICPLUS_PHY 0x243
/* NIC Physical Layer Device MII register fields. */
#define MII_PHY_SELECTOR_IEEE8023 0x0001
@@ -96,31 +96,31 @@ enum ipg_regs {
};
/* Ethernet MIB statistic register offsets. */
-#define IPG_OCTETRCVOK 0xA8
+#define IPG_OCTETRCVOK 0xA8
#define IPG_MCSTOCTETRCVDOK 0xAC
#define IPG_BCSTOCTETRCVOK 0xB0
#define IPG_FRAMESRCVDOK 0xB4
#define IPG_MCSTFRAMESRCVDOK 0xB8
#define IPG_BCSTFRAMESRCVDOK 0xBE
#define IPG_MACCONTROLFRAMESRCVD 0xC6
-#define IPG_FRAMETOOLONGERRRORS 0xC8
-#define IPG_INRANGELENGTHERRORS 0xCA
-#define IPG_FRAMECHECKSEQERRORS 0xCC
-#define IPG_FRAMESLOSTRXERRORS 0xCE
-#define IPG_OCTETXMTOK 0xD0
+#define IPG_FRAMETOOLONGERRRORS 0xC8
+#define IPG_INRANGELENGTHERRORS 0xCA
+#define IPG_FRAMECHECKSEQERRORS 0xCC
+#define IPG_FRAMESLOSTRXERRORS 0xCE
+#define IPG_OCTETXMTOK 0xD0
#define IPG_MCSTOCTETXMTOK 0xD4
#define IPG_BCSTOCTETXMTOK 0xD8
#define IPG_FRAMESXMTDOK 0xDC
#define IPG_MCSTFRAMESXMTDOK 0xE0
-#define IPG_FRAMESWDEFERREDXMT 0xE4
+#define IPG_FRAMESWDEFERREDXMT 0xE4
#define IPG_LATECOLLISIONS 0xE8
#define IPG_MULTICOLFRAMES 0xEC
#define IPG_SINGLECOLFRAMES 0xF0
#define IPG_BCSTFRAMESXMTDOK 0xF6
-#define IPG_CARRIERSENSEERRORS 0xF8
+#define IPG_CARRIERSENSEERRORS 0xF8
#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
-#define IPG_FRAMESABORTXSCOLLS 0xFC
-#define IPG_FRAMESWEXDEFERRAL 0xFE
+#define IPG_FRAMESABORTXSCOLLS 0xFC
+#define IPG_FRAMESWEXDEFERRAL 0xFE
/* RMON statistic register offsets. */
#define IPG_ETHERSTATSCOLLISIONS 0x100
@@ -134,8 +134,8 @@ enum ipg_regs {
#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
-#define IPG_ETHERSTATSFRAGMENTS 0x12C
-#define IPG_ETHERSTATSJABBERS 0x130
+#define IPG_ETHERSTATSFRAGMENTS 0x12C
+#define IPG_ETHERSTATSJABBERS 0x130
#define IPG_ETHERSTATSOCTETS 0x134
#define IPG_ETHERSTATSPKTS 0x138
#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
@@ -154,10 +154,10 @@ enum ipg_regs {
#define IPG_ETHERSTATSDROPEVENTS 0xCE
/* Serial EEPROM offsets */
-#define IPG_EEPROM_CONFIGPARAM 0x00
+#define IPG_EEPROM_CONFIGPARAM 0x00
#define IPG_EEPROM_ASICCTRL 0x01
#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
-#define IPG_EEPROM_SUBSYSTEMID 0x03
+#define IPG_EEPROM_SUBSYSTEMID 0x03
#define IPG_EEPROM_STATIONADDRESS0 0x10
#define IPG_EEPROM_STATIONADDRESS1 0x11
#define IPG_EEPROM_STATIONADDRESS2 0x12
@@ -168,16 +168,16 @@ enum ipg_regs {
/* IOBaseAddress */
#define IPG_PIB_RSVD_MASK 0xFFFFFE01
-#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
-#define IPG_PIB_IOBASEADDRIND 0x00000001
+#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
+#define IPG_PIB_IOBASEADDRIND 0x00000001
/* MemBaseAddress */
#define IPG_PMB_RSVD_MASK 0xFFFFFE07
-#define IPG_PMB_MEMBASEADDRIND 0x00000001
+#define IPG_PMB_MEMBASEADDRIND 0x00000001
#define IPG_PMB_MEMMAPTYPE 0x00000006
#define IPG_PMB_MEMMAPTYPE0 0x00000002
#define IPG_PMB_MEMMAPTYPE1 0x00000004
-#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
+#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
/* ConfigStatus */
#define IPG_CS_RSVD_MASK 0xFFB0
@@ -196,20 +196,20 @@ enum ipg_regs {
/* TFDList, TFC */
#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF
-#define IPG_TFC_FRAMEID 0x000000000000FFFF
+#define IPG_TFC_FRAMEID 0x000000000000FFFF
#define IPG_TFC_WORDALIGN 0x0000000000030000
#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000
-#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
+#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000
#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000
#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000
#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000
#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000
#define IPG_TFC_TXINDICATE 0x0000000000400000
-#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
+#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
#define IPG_TFC_FRAGCOUNT 0x000000000F000000
-#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
-#define IPG_TFC_TFDDONE 0x0000000080000000
+#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
+#define IPG_TFC_TFDDONE 0x0000000080000000
#define IPG_TFC_VID 0x00000FFF00000000
#define IPG_TFC_CFI 0x0000100000000000
#define IPG_TFC_USERPRIORITY 0x0000E00000000000
@@ -217,35 +217,35 @@ enum ipg_regs {
/* TFDList, FragInfo */
#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
/* RFD data structure masks. */
/* RFDList, RFS */
#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF
#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF
-#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
+#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
#define IPG_RFS_RXRUNTFRAME 0x0000000000020000
#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000
#define IPG_RFS_RXFCSERROR 0x0000000000080000
#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000
-#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
+#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
#define IPG_RFS_VLANDETECTED 0x0000000000400000
#define IPG_RFS_TCPDETECTED 0x0000000000800000
#define IPG_RFS_TCPERROR 0x0000000001000000
#define IPG_RFS_UDPDETECTED 0x0000000002000000
#define IPG_RFS_UDPERROR 0x0000000004000000
#define IPG_RFS_IPDETECTED 0x0000000008000000
-#define IPG_RFS_IPERROR 0x0000000010000000
+#define IPG_RFS_IPERROR 0x0000000010000000
#define IPG_RFS_FRAMESTART 0x0000000020000000
#define IPG_RFS_FRAMEEND 0x0000000040000000
-#define IPG_RFS_RFDDONE 0x0000000080000000
+#define IPG_RFS_RFDDONE 0x0000000080000000
#define IPG_RFS_TCI 0x0000FFFF00000000
/* RFDList, FragInfo */
#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
/* I/O Register masks. */
@@ -254,37 +254,37 @@ enum ipg_regs {
/* Statistics Mask */
#define IPG_SM_ALL 0x0FFFFFFF
-#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
-#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
-#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
+#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
+#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
+#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
#define IPG_SM_RXJUMBOFRAMES 0x00000008
#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
-#define IPG_SM_IPCHECKSUMERRORS 0x00000020
+#define IPG_SM_IPCHECKSUMERRORS 0x00000020
#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
#define IPG_SM_INRANGELENGTHERRORS 0x00000200
#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
-#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
-#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
-#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
+#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
+#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
+#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
-#define IPG_SM_LATECOLLISIONS 0x00010000
-#define IPG_SM_MULTICOLFRAMES 0x00020000
-#define IPG_SM_SINGLECOLFRAMES 0x00040000
+#define IPG_SM_LATECOLLISIONS 0x00010000
+#define IPG_SM_MULTICOLFRAMES 0x00020000
+#define IPG_SM_SINGLECOLFRAMES 0x00040000
#define IPG_SM_TXJUMBOFRAMES 0x00080000
#define IPG_SM_CARRIERSENSEERRORS 0x00100000
#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
-#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
+#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
/* Countdown */
#define IPG_CD_RSVD_MASK 0x0700FFFF
#define IPG_CD_COUNT 0x0000FFFF
-#define IPG_CD_COUNTDOWNSPEED 0x01000000
+#define IPG_CD_COUNTDOWNSPEED 0x01000000
#define IPG_CD_COUNTDOWNMODE 0x02000000
-#define IPG_CD_COUNTINTENABLED 0x04000000
+#define IPG_CD_COUNTINTENABLED 0x04000000
/* TxDMABurstThresh */
#define IPG_TB_RSVD_MASK 0xFF
@@ -653,15 +653,28 @@ enum ipg_regs {
* Miscellaneous macros.
*/
-/* Marco for printing debug statements. */
+/* Macros for printing debug statements. */
#ifdef IPG_DEBUG
-# define IPG_DEBUG_MSG(args...)
-# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args)
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args)
# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
#else
-# define IPG_DEBUG_MSG(args...)
-# define IPG_DDEBUG_MSG(args...)
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
# define IPG_DUMPRFDLIST(args)
# define IPG_DUMPTFDLIST(args)
#endif
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index af10e97345ce..25bb2a015e18 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -397,5 +397,11 @@ config MCS_FIR
To compile it as a module, choose M here: the module will be called
mcs7780.
+config SH_IRDA
+ tristate "SuperH IrDA driver"
+ depends on IRDA && ARCH_SHMOBILE
+ help
+ Say Y here if your want to enable SuperH IrDA devices.
+
endmenu
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index e030d47e2793..dfc64537f62f 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
obj-$(CONFIG_MCS_FIR) += mcs7780.o
obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
+obj-$(CONFIG_SH_IRDA) += sh_irda.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 28992c815cba..a3cb109006a5 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -753,18 +753,18 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
}
if (ali_ircc_dma_receive_complete(self))
{
- IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
self->ier = IER_EOM;
}
else
{
- IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
self->ier = IER_EOM | IER_TIMER;
}
@@ -777,7 +777,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
@@ -942,7 +942,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
// benjamin 2000/11/10 06:32PM
if (self->io.speed > 115200)
{
- IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ );
+ IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
self->ier = IER_EOM;
// SetCOMInterrupts(self, TRUE);
@@ -970,7 +970,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud);
+ IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
@@ -1500,7 +1500,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
diff = self->now.tv_usec - self->stamp.tv_usec;
/* self->stamp is set from ali_ircc_dma_receive_complete() */
- IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff);
+ IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
if (diff < 0)
diff += 1000000;
@@ -1641,7 +1641,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
tmp = inb(iobase+FIR_LCR_B);
tmp &= ~0x20; // Disable SIP
outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
outb(0, iobase+FIR_LSR);
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
//switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
- IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
/* Set Rx Threshold */
switch_bank(iobase, BANK1);
@@ -1840,7 +1840,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
/* Check for errors */
if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
{
- IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
/* Skip frame */
self->netdev->stats.rx_errors++;
@@ -1850,29 +1850,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
if (status & LSR_FIFO_UR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
}
if (status & LSR_FRAME_ERROR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
}
if (status & LSR_CRC_ERROR)
{
self->netdev->stats.rx_crc_errors++;
- IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
}
if(self->rcvFramesOverflow)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
}
if(len == 0)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
}
}
else
@@ -1884,7 +1884,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
val = inb(iobase+FIR_BSR);
if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
{
- IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ );
+ IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
/* Put this entry back in fifo */
st_fifo->head--;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5cbd39d0685..a3d696a9456a 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -546,7 +546,6 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b7e6625ca75e..48bd5ec9f29b 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1002,8 +1002,6 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
toshoboe_checkstuck (self);
- dev->trans_start = jiffies;
-
/* Check if we need to change the speed */
/* But not now. Wait after transmission if mtt not required */
speed=irda_get_next_speed(skb);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 2c9b3af16612..e4e71f32746b 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -839,7 +839,7 @@ static void irda_usb_receive(struct urb *urb)
/* Usually precursor to a hot-unplug on OHCI. */
default:
self->netdev->stats.rx_errors++;
- IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
+ IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
break;
}
/* If we received an error, we don't want to resubmit the
@@ -1124,11 +1124,12 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
* The actual image starts after the "STMP" keyword
* so forward to the firmware header tag
*/
- for (i = 0; (fw->data[i] != STIR421X_PATCH_END_OF_HDR_TAG) &&
- (i < fw->size); i++) ;
+ for (i = 0; i < fw->size && fw->data[i] !=
+ STIR421X_PATCH_END_OF_HDR_TAG; i++)
+ ;
/* here we check for the out of buffer case */
- if ((STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) &&
- (i < STIR421X_PATCH_CODE_OFFSET)) {
+ if (i < STIR421X_PATCH_CODE_OFFSET && i < fw->size &&
+ STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) {
if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG,
sizeof(STIR421X_PATCH_STMP_TAG) - 1)) {
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1a54f6bb68c5..c192c31e4c5c 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -556,7 +556,6 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 1dcdce0631aa..da2705061a60 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -715,8 +715,6 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
}
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
new file mode 100644
index 000000000000..9a828b06a57e
--- /dev/null
+++ b/drivers/net/irda/sh_irda.c
@@ -0,0 +1,865 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on sh_sir.c
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * CAUTION
+ *
+ * This driver is very simple.
+ * So, it doesn't have below support now
+ * - MIR/FIR support
+ * - DMA transfer support
+ * - FIFO mode support
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#define DRIVER_NAME "sh_irda"
+
+#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
+#define __IRDARAM_LEN 0x13FF
+#else
+#define __IRDARAM_LEN 0x1039
+#endif
+
+#define IRTMR 0x1F00 /* Transfer mode */
+#define IRCFR 0x1F02 /* Configuration */
+#define IRCTR 0x1F04 /* IR control */
+#define IRTFLR 0x1F20 /* Transmit frame length */
+#define IRTCTR 0x1F22 /* Transmit control */
+#define IRRFLR 0x1F40 /* Receive frame length */
+#define IRRCTR 0x1F42 /* Receive control */
+#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
+#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
+#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
+#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
+#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
+#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
+#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
+#define CRCCTR 0x1F80 /* CRC engine control */
+#define CRCIR 0x1F86 /* CRC engine input data */
+#define CRCCR 0x1F8A /* CRC engine calculation */
+#define CRCOR 0x1F8E /* CRC engine output data */
+#define FIFOCP 0x1FC0 /* FIFO current pointer */
+#define FIFOFP 0x1FC2 /* FIFO follow pointer */
+#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
+#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
+#define FIFOSEL 0x1FC8 /* FIFO select */
+#define FIFORS 0x1FCA /* FIFO receive status */
+#define FIFORFL 0x1FCC /* FIFO receive frame length */
+#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
+#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
+#define BIFCTL 0x1FD2 /* BUS interface control */
+#define IRDARAM 0x0000 /* IrDA buffer RAM */
+#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
+
+/* IRTMR */
+#define TMD_MASK (0x3 << 14) /* Transfer Mode */
+#define TMD_SIR (0x0 << 14)
+#define TMD_MIR (0x3 << 14)
+#define TMD_FIR (0x2 << 14)
+
+#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
+#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
+#define SIM (1 << 0) /* SIR Interrupt Mask */
+#define xIM_MASK (FIFORIM | MIM | SIM)
+
+/* IRCFR */
+#define RTO_SHIFT 8 /* shift for Receive Timeout */
+#define RTO (0x3 << RTO_SHIFT)
+
+/* IRTCTR */
+#define ARMOD (1 << 15) /* Auto-Receive Mode */
+#define TE (1 << 0) /* Transmit Enable */
+
+/* IRRFLR */
+#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
+
+/* IRRCTR */
+#define RE (1 << 0) /* Receive Enable */
+
+/*
+ * SIRISR, SIRIMR, SIRICR,
+ * MFIRISR, MFIRIMR, MFIRICR
+ */
+#define FRE (1 << 15) /* Frame Receive End */
+#define TROV (1 << 11) /* Transfer Area Overflow */
+#define xIR_9 (1 << 9)
+#define TOT xIR_9 /* for SIR Timeout */
+#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
+#define xIR_8 (1 << 8)
+#define FER xIR_8 /* for SIR Framing Error */
+#define CRCER xIR_8 /* for MIR/FIR CRC error */
+#define FTE (1 << 7) /* Frame Transmit End */
+#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
+
+/* SIRBCR */
+#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
+
+/* CRCCTR */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
+
+/* CRCIR */
+#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
+
+/************************************************************************
+
+
+ enum / structure
+
+
+************************************************************************/
+enum sh_irda_mode {
+ SH_IRDA_NONE = 0,
+ SH_IRDA_SIR,
+ SH_IRDA_MIR,
+ SH_IRDA_FIR,
+};
+
+struct sh_irda_self;
+struct sh_irda_xir_func {
+ int (*xir_fre) (struct sh_irda_self *self);
+ int (*xir_trov) (struct sh_irda_self *self);
+ int (*xir_9) (struct sh_irda_self *self);
+ int (*xir_8) (struct sh_irda_self *self);
+ int (*xir_fte) (struct sh_irda_self *self);
+};
+
+struct sh_irda_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ enum sh_irda_mode mode;
+ spinlock_t lock;
+
+ struct sh_irda_xir_func *xir_func;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ iowrite16(data, self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
+{
+ unsigned long flags;
+ u16 ret;
+
+ spin_lock_irqsave(&self->lock, flags);
+ ret = ioread16(self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return ret;
+}
+
+static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ unsigned long flags;
+ u16 old, new;
+
+ spin_lock_irqsave(&self->lock, flags);
+ old = ioread16(self->membase + offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ iowrite16(data, self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+/************************************************************************
+
+
+ mode function
+
+
+************************************************************************/
+/*=====================================
+ *
+ * common
+ *
+ *=====================================*/
+static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
+{
+ struct device *dev = &self->ndev->dev;
+
+ sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
+ dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
+}
+
+static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
+{
+ struct device *dev = &self->ndev->dev;
+
+ if (SH_IRDA_SIR != self->mode)
+ interval = 0;
+
+ if (interval < 0 || interval > 2) {
+ dev_err(dev, "unsupported timeout interval\n");
+ return -EINVAL;
+ }
+
+ sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
+ return 0;
+}
+
+static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
+{
+ struct device *dev = &self->ndev->dev;
+ u16 val;
+
+ if (baudrate < 0)
+ return 0;
+
+ if (SH_IRDA_SIR != self->mode) {
+ dev_err(dev, "it is not SIR mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Baud rate (bits/s) =
+ * (48 MHz / 26) / (baud rate counter value + 1) x 16
+ */
+ val = (48000000 / 26 / 16 / baudrate) - 1;
+ dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
+
+ sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
+
+ return 0;
+}
+
+static int xir_get_rcv_length(struct sh_irda_self *self)
+{
+ return RFL_MASK & sh_irda_read(self, IRRFLR);
+}
+
+/*=====================================
+ *
+ * NONE MODE
+ *
+ *=====================================*/
+static int xir_fre(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: frame recv\n");
+ return 0;
+}
+
+static int xir_trov(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: buffer ram over\n");
+ return 0;
+}
+
+static int xir_9(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: time over\n");
+ return 0;
+}
+
+static int xir_8(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: framing error\n");
+ return 0;
+}
+
+static int xir_fte(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: frame transmit end\n");
+ return 0;
+}
+
+static struct sh_irda_xir_func xir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+ .xir_8 = xir_8,
+ .xir_fte = xir_fte,
+};
+
+/*=====================================
+ *
+ * MIR/FIR MODE
+ *
+ * MIR/FIR are not supported now
+ *=====================================*/
+static struct sh_irda_xir_func mfir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+ .xir_8 = xir_8,
+ .xir_fte = xir_fte,
+};
+
+/*=====================================
+ *
+ * SIR MODE
+ *
+ *=====================================*/
+static int sir_fre(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ u16 data16;
+ u8 *data = (u8 *)&data16;
+ int len = xir_get_rcv_length(self);
+ int i, j;
+
+ if (len > IRDARAM_LEN)
+ len = IRDARAM_LEN;
+
+ dev_dbg(dev, "frame recv length = %d\n", len);
+
+ for (i = 0; i < len; i++) {
+ j = i % 2;
+ if (!j)
+ data16 = sh_irda_read(self, IRDARAM + i);
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, data[j]);
+ }
+ self->ndev->last_rx = jiffies;
+
+ sh_irda_rcv_ctrl(self, 1);
+
+ return 0;
+}
+
+static int sir_trov(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "buffer ram over\n");
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_tot(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "time over\n");
+ sh_irda_set_baudrate(self, 9600);
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_fer(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "framing error\n");
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_fte(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_dbg(dev, "frame transmit end\n");
+ netif_wake_queue(self->ndev);
+
+ return 0;
+}
+
+static struct sh_irda_xir_func sir_func = {
+ .xir_fre = sir_fre,
+ .xir_trov = sir_trov,
+ .xir_9 = sir_tot,
+ .xir_8 = sir_fer,
+ .xir_fte = sir_fte,
+};
+
+static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
+{
+ struct device *dev = &self->ndev->dev;
+ struct sh_irda_xir_func *func;
+ const char *name;
+ u16 data;
+
+ switch (mode) {
+ case SH_IRDA_SIR:
+ name = "SIR";
+ data = TMD_SIR;
+ func = &sir_func;
+ break;
+ case SH_IRDA_MIR:
+ name = "MIR";
+ data = TMD_MIR;
+ func = &mfir_func;
+ break;
+ case SH_IRDA_FIR:
+ name = "FIR";
+ data = TMD_FIR;
+ func = &mfir_func;
+ break;
+ default:
+ name = "NONE";
+ data = 0;
+ func = &xir_func;
+ break;
+ }
+
+ self->mode = mode;
+ self->xir_func = func;
+ sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
+
+ dev_dbg(dev, "switch to %s mode", name);
+}
+
+/************************************************************************
+
+
+ irq function
+
+
+************************************************************************/
+static void sh_irda_set_irq_mask(struct sh_irda_self *self)
+{
+ u16 tmr_hole;
+ u16 xir_reg;
+
+ /* set all mask */
+ sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
+ sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
+ sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
+
+ /* clear irq */
+ sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
+ sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
+
+ switch (self->mode) {
+ case SH_IRDA_SIR:
+ tmr_hole = SIM;
+ xir_reg = SIRIMR;
+ break;
+ case SH_IRDA_MIR:
+ case SH_IRDA_FIR:
+ tmr_hole = MIM;
+ xir_reg = MFIRIMR;
+ break;
+ default:
+ tmr_hole = 0;
+ xir_reg = 0;
+ break;
+ }
+
+ /* open mask */
+ if (xir_reg) {
+ sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
+ sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
+ }
+}
+
+static irqreturn_t sh_irda_irq(int irq, void *dev_id)
+{
+ struct sh_irda_self *self = dev_id;
+ struct sh_irda_xir_func *func = self->xir_func;
+ u16 isr = sh_irda_read(self, SIRISR);
+
+ /* clear irq */
+ sh_irda_write(self, SIRICR, isr);
+
+ if (isr & FRE)
+ func->xir_fre(self);
+ if (isr & TROV)
+ func->xir_trov(self);
+ if (isr & xIR_9)
+ func->xir_9(self);
+ if (isr & xIR_8)
+ func->xir_8(self);
+ if (isr & FTE)
+ func->xir_fte(self);
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_irda_crc_reset(struct sh_irda_self *self)
+{
+ sh_irda_write(self, CRCCTR, CRC_RST);
+}
+
+static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
+{
+ sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
+}
+
+static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
+{
+ return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
+}
+
+static u16 sh_irda_crc_out(struct sh_irda_self *self)
+{
+ return sh_irda_read(self, CRCOR);
+}
+
+static int sh_irda_crc_init(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_irda_crc_reset(self);
+
+ sh_irda_crc_add(self, 0xCC);
+ sh_irda_crc_add(self, 0xF5);
+ sh_irda_crc_add(self, 0xF1);
+ sh_irda_crc_add(self, 0xA7);
+
+ val = sh_irda_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_irda_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_irda_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static void sh_irda_remove_iobuf(struct sh_irda_self *self)
+{
+ kfree(self->rx_buff.head);
+
+ self->tx_buff.head = NULL;
+ self->tx_buff.data = NULL;
+ self->rx_buff.head = NULL;
+ self->rx_buff.data = NULL;
+}
+
+static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
+{
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return -EINVAL;
+ }
+
+ /* rx_buff */
+ self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
+ if (!self->rx_buff.head)
+ return -ENOMEM;
+
+ self->rx_buff.truesize = rxsize;
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* tx_buff */
+ self->tx_buff.head = self->membase + IRDARAM;
+ self->tx_buff.truesize = IRDARAM_LEN;
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+ struct device *dev = &self->ndev->dev;
+ int speed = irda_get_next_speed(skb);
+ int ret;
+
+ dev_dbg(dev, "hard xmit\n");
+
+ netif_stop_queue(ndev);
+ sh_irda_rcv_ctrl(self, 0);
+
+ ret = sh_irda_set_baudrate(self, speed);
+ if (ret < 0)
+ return ret;
+
+ self->tx_buff.len = 0;
+ if (skb->len) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_buff.len = async_wrap_skb(skb,
+ self->tx_buff.head,
+ self->tx_buff.truesize);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ if (self->tx_buff.len > self->tx_buff.truesize)
+ self->tx_buff.len = self->tx_buff.truesize;
+
+ sh_irda_write(self, IRTFLR, self->tx_buff.len);
+ sh_irda_write(self, IRTCTR, ARMOD | TE);
+ }
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_irda_open(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_irda_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_irda_set_mode(self, SH_IRDA_SIR);
+ sh_irda_set_timeout(self, 2);
+ sh_irda_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap) {
+ err = -ENODEV;
+ goto open_err;
+ }
+
+ netif_start_queue(ndev);
+ sh_irda_rcv_ctrl(self, 1);
+ sh_irda_set_irq_mask(self);
+
+ dev_info(&ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_irda_stop(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stoped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_irda_ndo = {
+ .ndo_open = sh_irda_open,
+ .ndo_stop = sh_irda_stop,
+ .ndo_start_xmit = sh_irda_hard_xmit,
+ .ndo_do_ioctl = sh_irda_ioctl,
+ .ndo_get_stats = sh_irda_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int __devinit sh_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_irda_self *self;
+ struct resource *res;
+ char clk_name[8];
+ unsigned int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_irda_ndo;
+ ndev->irq = irq;
+
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+ spin_lock_init(&self->lock);
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
+ dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_irda_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int __devexit sh_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_irda_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sh_irda_driver = {
+ .probe = sh_irda_probe,
+ .remove = __devexit_p(sh_irda_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_irda_init(void)
+{
+ return platform_driver_register(&sh_irda_driver);
+}
+
+static void __exit sh_irda_exit(void)
+{
+ platform_driver_unregister(&sh_irda_driver);
+}
+
+module_init(sh_irda_init);
+module_exit(sh_irda_exit);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 0745581c4b5e..5c5f99d50341 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -646,8 +646,10 @@ static int sh_sir_open(struct net_device *ndev)
sh_sir_set_baudrate(self, 9600);
self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
- if (!self->irlap)
+ if (!self->irlap) {
+ err = -ENODEV;
goto open_err;
+ }
/*
* Now enable the interrupt then start the queue
@@ -707,7 +709,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
struct sh_sir_self *self;
struct resource *res;
char clk_name[8];
- void __iomem *base;
unsigned int irq;
int err = -ENOMEM;
@@ -722,14 +723,14 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
if (!ndev)
goto exit;
- base = ioremap_nocache(res->start, resource_size(res));
- if (!base) {
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
err = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap.\n");
goto err_mem_1;
}
- self = netdev_priv(ndev);
err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
if (err)
goto err_mem_2;
@@ -746,7 +747,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
ndev->netdev_ops = &sh_sir_ndo;
ndev->irq = irq;
- self->membase = base;
self->ndev = ndev;
self->qos.baud_rate.bits &= IR_9600; /* FIXME */
self->qos.min_turn_time.bits = 1; /* 10 ms or more */
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index de91cd14016b..1b051dab7b29 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -655,7 +655,6 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
if (likely(actual > 0)) {
dev->tx_skb = skb;
- ndev->trans_start = jiffies;
dev->tx_buff.data += actual;
dev->tx_buff.len -= actual;
}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 6af84d88cd03..35e4e44040a2 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -868,7 +868,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
spin_lock_irqsave(&self->lock, flags);
smsc_ircc_sir_start(self);
smsc_ircc_change_speed(self, self->io.speed);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&self->lock, flags);
}
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 209d4bcfaced..c3d07382b7fa 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1037,7 +1037,6 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
wmb();
outw(0, iobase+VLSI_PIO_PROMPT);
}
- ndev->trans_start = jiffies;
if (ring_put(r) == NULL) {
netif_stop_queue(ndev);
@@ -1742,7 +1741,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice \n",
+ IRDA_ERROR("%s - %s: no netdevice\n",
__func__, pci_name(pdev));
return 0;
}
@@ -1781,7 +1780,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice \n",
+ IRDA_ERROR("%s - %s: no netdevice\n",
__func__, pci_name(pdev));
return 0;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index cb0cb758be64..1f9c3f08d1a3 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -515,7 +515,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
w83977af_change_speed(self, speed);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
@@ -549,7 +548,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
switch_bank(iobase, SET0);
outb(ICR_ETXTHI, iobase+ICR);
}
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
/* Restore set register */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 773c59c89691..ba1de5973fb2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -962,15 +962,15 @@ static void veth_set_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
port->promiscuous = 0;
/* Update table */
port->num_mcast = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- u8 *addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addr = ha->addr;
u64 xaddr = 0;
if (addr[0] & 0x01) {/* multicast address? */
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 92d2e71d0c8b..521c0c732998 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -78,9 +78,13 @@ struct ixgb_adapter;
#define PFX "ixgb: "
#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args)
+#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
#else
-#define IXGB_DBG(args...)
+#define IXGB_DBG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG PFX fmt, ##args); \
+} while (0)
#endif
/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 89ffa7264a12..06303a36aaf7 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb_hw.h"
#include "ixgb_ee.h"
/* Local prototypes */
@@ -467,11 +469,11 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
u16 checksum = 0;
struct ixgb_ee_map_type *ee_map;
- DEBUGFUNC("ixgb_get_eeprom_data");
+ ENTER();
ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGOUT("ixgb_ee: Reading eeprom data\n");
+ pr_debug("Reading eeprom data\n");
for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
u16 ee_data;
ee_data = ixgb_read_eeprom(hw, i);
@@ -480,7 +482,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
}
if (checksum != (u16) EEPROM_SUM) {
- DEBUGOUT("ixgb_ee: Checksum invalid.\n");
+ pr_debug("Checksum invalid\n");
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
@@ -489,7 +491,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
!= cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- DEBUGOUT("ixgb_ee: Signature invalid.\n");
+ pr_debug("Signature invalid\n");
return(false);
}
@@ -555,13 +557,13 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
int i;
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGFUNC("ixgb_get_ee_mac_addr");
+ ENTER();
if (ixgb_check_and_get_eeprom_data(hw) == true) {
for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
mac_addr[i] = ee_map->mac_addr[i];
- DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
}
+ pr_debug("eeprom mac address = %pM\n", mac_addr);
}
}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index ff67a84e6802..cd247b8d2b73 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -30,9 +30,13 @@
* Shared functions for accessing and configuring the adapter
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb_hw.h"
#include "ixgb_ids.h"
+#include <linux/etherdevice.h>
+
/* Local function prototypes */
static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
@@ -120,13 +124,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
u32 ctrl_reg;
u32 icr_reg;
- DEBUGFUNC("ixgb_adapter_stop");
+ ENTER();
/* If we are stopped or resetting exit gracefully and wait to be
* started again before accessing the hardware.
*/
if (hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
+ pr_debug("Exiting because the adapter is already stopped!!!\n");
return false;
}
@@ -136,7 +140,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
hw->adapter_stopped = true;
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
/* Disable the Transmit and Receive units. Then delay to allow
@@ -152,12 +156,12 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xffffffff);
/* Clear any pending interrupt events. */
@@ -183,7 +187,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
u16 vendor_name[5];
ixgb_xpak_vendor xpak_vendor;
- DEBUGFUNC("ixgb_identify_xpak_vendor");
+ ENTER();
/* Read the first few bytes of the vendor string from the XPAK NVR
* registers. These are standard XENPAK/XPAK registers, so all XPAK
@@ -222,12 +226,12 @@ ixgb_identify_phy(struct ixgb_hw *hw)
ixgb_phy_type phy_type;
ixgb_xpak_vendor xpak_vendor;
- DEBUGFUNC("ixgb_identify_phy");
+ ENTER();
/* Infer the transceiver/phy type from the device id */
switch (hw->device_id) {
case IXGB_DEVICE_ID_82597EX:
- DEBUGOUT("Identified TXN17401 optics\n");
+ pr_debug("Identified TXN17401 optics\n");
phy_type = ixgb_phy_type_txn17401;
break;
@@ -237,30 +241,30 @@ ixgb_identify_phy(struct ixgb_hw *hw)
* type of optics. */
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
+ pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
- DEBUGOUT("Identified G6005 optics\n");
+ pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
case IXGB_DEVICE_ID_82597EX_LR:
- DEBUGOUT("Identified G6104 optics\n");
+ pr_debug("Identified G6104 optics\n");
phy_type = ixgb_phy_type_g6104;
break;
case IXGB_DEVICE_ID_82597EX_CX4:
- DEBUGOUT("Identified CX4\n");
+ pr_debug("Identified CX4\n");
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
+ pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
- DEBUGOUT("Identified G6005 optics\n");
+ pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
default:
- DEBUGOUT("Unknown physical layer module\n");
+ pr_debug("Unknown physical layer module\n");
phy_type = ixgb_phy_type_unknown;
break;
}
@@ -296,18 +300,18 @@ ixgb_init_hw(struct ixgb_hw *hw)
u32 ctrl_reg;
bool status;
- DEBUGFUNC("ixgb_init_hw");
+ ENTER();
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
- DEBUGOUT("Issuing an EE reset to MAC\n");
+ pr_debug("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
/* Workaround for 82597EX reset errata */
IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
@@ -335,7 +339,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
* If it is not valid, we fail hardware init.
*/
if (!mac_addr_valid(hw->curr_mac_addr)) {
- DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
+ pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
return(false);
}
@@ -346,7 +350,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
ixgb_get_bus_info(hw);
/* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
+ pr_debug("Zeroing the MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -379,7 +383,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
{
u32 i;
- DEBUGFUNC("ixgb_init_rx_addrs");
+ ENTER();
/*
* If the current mac address is valid, assume it is a software override
@@ -391,28 +395,19 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
/* Get the MAC address from the eeprom for later reference */
ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
- DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ pr_debug("Keeping Permanent MAC Addr = %pM\n",
+ hw->curr_mac_addr);
} else {
/* Setup the receive address. */
- DEBUGOUT("Overriding MAC Address in RAR[0]\n");
- DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ pr_debug("Overriding MAC Address in RAR[0]\n");
+ pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
ixgb_rar_set(hw, hw->curr_mac_addr, 0);
}
/* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
+ pr_debug("Clearing RAR[1-15]\n");
for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
/* Write high reg first to disable the AV bit first */
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -444,64 +439,50 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
u32 hash_value;
u32 i;
u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
+ u8 *mca;
- DEBUGFUNC("ixgb_mc_addr_list_update");
+ ENTER();
/* Set the new number of MC addresses that we are being requested to use. */
hw->num_mc_addrs = mc_addr_count;
/* Clear RAR[1-15] */
- DEBUGOUT(" Clearing RAR[1-15]\n");
+ pr_debug("Clearing RAR[1-15]\n");
for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
}
/* Clear the MTA */
- DEBUGOUT(" Clearing MTA\n");
+ pr_debug("Clearing MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* Add the new addresses */
+ mca = mc_addr_list;
for (i = 0; i < mc_addr_count; i++) {
- DEBUGOUT(" Adding the multicast addresses:\n");
- DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 1],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 2],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 3],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 4],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 5]);
+ pr_debug("Adding the multicast addresses:\n");
+ pr_debug("MC Addr #%d = %pM\n", i, mca);
/* Place this multicast address in the RAR if there is room, *
* else put it in the MTA
*/
if (rar_used_count < IXGB_RAR_ENTRIES) {
- ixgb_rar_set(hw,
- mc_addr_list +
- (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
- rar_used_count);
- DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
+ ixgb_rar_set(hw, mca, rar_used_count);
+ pr_debug("Added a multicast address to RAR[%d]\n", i);
rar_used_count++;
} else {
- hash_value = ixgb_hash_mc_addr(hw,
- mc_addr_list +
- (i *
- (IXGB_ETH_LENGTH_OF_ADDRESS
- + pad)));
+ hash_value = ixgb_hash_mc_addr(hw, mca);
- DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+ pr_debug("Hash value = 0x%03X\n", hash_value);
ixgb_mta_set(hw, hash_value);
}
+
+ mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad;
}
- DEBUGOUT("MC Update Complete\n");
+ pr_debug("MC Update Complete\n");
return;
}
@@ -520,7 +501,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
{
u32 hash_value = 0;
- DEBUGFUNC("ixgb_hash_mc_addr");
+ ENTER();
/* The portion of the address that is used for the hash table is
* determined by the mc_filter_type setting.
@@ -547,7 +528,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
break;
default:
/* Invalid mc_filter_type, what should we do? */
- DEBUGOUT("MC filter type param set incorrectly\n");
+ pr_debug("MC filter type param set incorrectly\n");
ASSERT(0);
break;
}
@@ -603,7 +584,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
{
u32 rar_low, rar_high;
- DEBUGFUNC("ixgb_rar_set");
+ ENTER();
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
@@ -666,7 +647,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
u32 pap_reg = 0; /* by default, assume no pause time */
bool status = true;
- DEBUGFUNC("ixgb_setup_fc");
+ ENTER();
/* Get the current control reg 0 settings */
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
@@ -710,7 +691,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
break;
default:
/* We should never get here. The value should be 0-3. */
- DEBUGOUT("Flow control param set incorrectly\n");
+ pr_debug("Flow control param set incorrectly\n");
ASSERT(0);
break;
}
@@ -940,7 +921,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
u32 status_reg;
u32 xpcss_reg;
- DEBUGFUNC("ixgb_check_for_link");
+ ENTER();
xpcss_reg = IXGB_READ_REG(hw, XPCSS);
status_reg = IXGB_READ_REG(hw, STATUS);
@@ -950,7 +931,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
hw->link_up = true;
} else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
(status_reg & IXGB_STATUS_LU)) {
- DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
+ pr_debug("XPCSS Not Aligned while Status:LU is set\n");
hw->link_up = ixgb_link_reset(hw);
} else {
/*
@@ -981,8 +962,7 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
newRFC = IXGB_READ_REG(hw, RFC);
if ((hw->lastLFC + 250 < newLFC)
|| (hw->lastRFC + 250 < newRFC)) {
- DEBUGOUT
- ("BAD LINK! too many LFC/RFC since last check\n");
+ pr_debug("BAD LINK! too many LFC/RFC since last check\n");
bad_link_returncode = true;
}
hw->lastLFC = newLFC;
@@ -1002,11 +982,11 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
volatile u32 temp_reg;
- DEBUGFUNC("ixgb_clear_hw_cntrs");
+ ENTER();
/* if we are stopped or resetting exit gracefully */
if (hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is stopped!!!\n");
+ pr_debug("Exiting because the adapter is stopped!!!\n");
return;
}
@@ -1156,26 +1136,21 @@ static bool
mac_addr_valid(u8 *mac_addr)
{
bool is_valid = true;
- DEBUGFUNC("mac_addr_valid");
+ ENTER();
/* Make sure it is not a multicast address */
- if (IS_MULTICAST(mac_addr)) {
- DEBUGOUT("MAC address is multicast\n");
+ if (is_multicast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is multicast\n");
is_valid = false;
}
/* Not a broadcast address */
- else if (IS_BROADCAST(mac_addr)) {
- DEBUGOUT("MAC address is broadcast\n");
+ else if (is_broadcast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is broadcast\n");
is_valid = false;
}
/* Reject the zero address */
- else if (mac_addr[0] == 0 &&
- mac_addr[1] == 0 &&
- mac_addr[2] == 0 &&
- mac_addr[3] == 0 &&
- mac_addr[4] == 0 &&
- mac_addr[5] == 0) {
- DEBUGOUT("MAC address is all zeros\n");
+ else if (is_zero_ether_addr(mac_addr)) {
+ pr_debug("MAC address is all zeros\n");
is_valid = false;
}
return (is_valid);
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index af6ca3aab5ad..873d32b89fba 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -636,18 +636,6 @@ struct ixgb_flash_buffer {
u8 filler3[0xAAAA];
};
-/*
- * This is a little-endian specific check.
- */
-#define IS_MULTICAST(Address) \
- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
-
-/*
- * Check whether an address is broadcast.
- */
-#define IS_BROADCAST(Address) \
- ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
-
/* Flow control parameters */
struct ixgb_fc {
u32 high_water; /* Flow Control High-water */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c9fef65cb98b..d58ca6b578cc 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb.h"
char ixgb_driver_name[] = "ixgb";
@@ -146,10 +148,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init
ixgb_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n",
- ixgb_driver_string, ixgb_driver_version);
-
- printk(KERN_INFO "%s\n", ixgb_copyright);
+ pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
+ pr_info("%s\n", ixgb_copyright);
return pci_register_driver(&ixgb_driver);
}
@@ -368,17 +368,22 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
- pci_using_dac = 1;
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- printk(KERN_ERR
- "ixgb: No usable DMA configuration, aborting\n");
- goto err_dma_mask;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
+ }
}
- pci_using_dac = 0;
}
err = pci_request_regions(pdev, ixgb_driver_name);
@@ -674,7 +679,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
@@ -763,7 +769,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
@@ -884,8 +891,8 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
vfree(adapter->tx_ring.buffer_info);
adapter->tx_ring.buffer_info = NULL;
- pci_free_consistent(pdev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
+ dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
+ adapter->tx_ring.desc, adapter->tx_ring.dma);
adapter->tx_ring.desc = NULL;
}
@@ -896,12 +903,11 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
@@ -967,7 +973,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter)
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -991,10 +998,10 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
buffer_info->length = 0;
}
@@ -1058,7 +1065,7 @@ ixgb_set_multi(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
int i;
@@ -1089,9 +1096,9 @@ ixgb_set_multi(struct net_device *netdev)
IXGB_WRITE_REG(hw, RCTL, rctl);
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
+ netdev_for_each_mc_addr(ha, netdev)
memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
- mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
+ ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
@@ -1118,15 +1125,14 @@ ixgb_watchdog(unsigned long data)
if (adapter->hw.link_up) {
if (!netif_carrier_ok(netdev)) {
- printk(KERN_INFO "ixgb: %s NIC Link is Up 10 Gbps "
- "Full Duplex, Flow Control: %s\n",
- netdev->name,
- (adapter->hw.fc.type == ixgb_fc_full) ?
- "RX/TX" :
- ((adapter->hw.fc.type == ixgb_fc_rx_pause) ?
- "RX" :
- ((adapter->hw.fc.type == ixgb_fc_tx_pause) ?
- "TX" : "None")));
+ netdev_info(netdev,
+ "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
+ (adapter->hw.fc.type == ixgb_fc_full) ?
+ "RX/TX" :
+ (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
+ "RX" :
+ (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
+ "TX" : "None");
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
@@ -1135,8 +1141,7 @@ ixgb_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO "ixgb: %s NIC Link is Down\n",
- netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
}
}
@@ -1303,9 +1308,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
WARN_ON(buffer_info->dma != 0);
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1344,10 +1350,9 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
buffer_info->dma =
- pci_map_page(pdev, frag->page,
- offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ dma_map_page(&pdev->dev, frag->page,
+ offset, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1965,10 +1970,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -2091,10 +2096,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2322,7 +2327,7 @@ static void ixgb_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (ixgb_up(adapter)) {
- printk ("ixgb: can't bring device back up after reset\n");
+ pr_err("can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 371a6be4d965..e361185920ef 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -41,20 +41,8 @@
#undef ASSERT
#define ASSERT(x) BUG_ON(!(x))
-#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-
-#ifdef DBG
-#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#define DEBUGFUNC(F) DEBUGOUT(F)
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
+
+#define ENTER() pr_debug("%s\n", __func__);
#define IXGB_WRITE_REG(a, reg, value) ( \
writel((value), ((a)->hw_addr + IXGB_##reg)))
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index af35e1ddadd6..88a08f056241 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb.h"
/* This is the only thing that needs to be changed to adjust the
@@ -209,16 +211,16 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- printk(KERN_INFO "%s Enabled\n", opt->name);
+ pr_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- printk(KERN_INFO "%s Disabled\n", opt->name);
+ pr_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- printk(KERN_INFO "%s set to %i\n", opt->name, *value);
+ pr_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -230,7 +232,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- printk(KERN_INFO "%s\n", ent->str);
+ pr_info("%s\n", ent->str);
return 0;
}
}
@@ -240,8 +242,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
BUG();
}
- printk(KERN_INFO "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -261,9 +262,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= IXGB_MAX_NIC) {
- printk(KERN_NOTICE
- "Warning: no configuration for board #%i\n", bd);
- printk(KERN_NOTICE "Using defaults for all values\n");
+ pr_notice("Warning: no configuration for board #%i\n", bd);
+ pr_notice("Using defaults for all values\n");
}
{ /* Transmit Descriptor Count */
@@ -363,8 +363,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.high_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring RxFCHighThresh when no RxFC\n");
+ pr_info("Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
const struct ixgb_option opt = {
@@ -383,8 +382,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.low_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring RxFCLowThresh when no RxFC\n");
+ pr_info("Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
const struct ixgb_option opt = {
@@ -404,17 +402,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.pause_time = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring FCReqTimeout when no RxFC\n");
+ pr_info("Ignoring FCReqTimeout when no RxFC\n");
}
/* high low and spacing check for rx flow control thresholds */
if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
- printk(KERN_INFO
- "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
- "Using Defaults\n");
+ pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH;
adapter->hw.fc.low_water = DEFAULT_FCRTL;
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 79c35ae3718c..d0ea3d6dea95 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -111,7 +111,10 @@ struct vf_data_storage {
u16 default_vf_vlan_id;
u16 vlans_enabled;
bool clear_to_send;
+ bool pf_set_mac;
int rar;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
};
/* wrapper around a pointer to a socket buffer,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 12fc0e7ba2ca..38c384031c4c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -642,6 +642,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
s32 i, j;
bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ struct ixgbe_adapter *adapter = hw->back;
hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
@@ -726,6 +727,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
autoneg_wait_to_complete);
out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
+ " downgraded the link speed from the maximum"
+ " advertised\n");
return status;
}
@@ -1303,7 +1308,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
hw_dbg(hw ,"Flow Director previous command isn't complete, "
- "aborting table re-initialization. \n");
+ "aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index eb49020903c1..6eb5814ca7da 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1484,26 +1484,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
* @hw: pointer to hardware structure
- * @mc_addr_list: the list of new multicast addresses
- * @mc_addr_count: number of addresses
- * @next: iterator function to walk the multicast address list
+ * @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the MC addrs from receive
* address registers and the multicast table. Uses unused receive address
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr next)
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev)
{
+ struct netdev_hw_addr *ha;
u32 i;
- u32 vmdq;
/*
* Set the new number of MC addresses that we are being requested to
* use.
*/
- hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
hw->addr_ctrl.mta_in_use = 0;
/* Clear the MTA */
@@ -1512,9 +1510,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
/* Add the new addresses */
- for (i = 0; i < mc_addr_count; i++) {
+ netdev_for_each_mc_addr(ha, netdev) {
hw_dbg(hw, " Adding the multicast addresses:\n");
- ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ ixgbe_set_mta(hw, ha->addr);
}
/* Enable mta */
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 13606d4809c9..264eef575cd6 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -56,9 +56,8 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 8f461d5cee77..dc7fd5b70bc3 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -365,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.disable_fc_autoneg = false;
- if (pause->rx_pause && pause->tx_pause)
+ if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
fc.requested_mode = ixgbe_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
fc.requested_mode = ixgbe_fc_rx_pause;
@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_tx_buffer *buf =
&(tx_ring->tx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma, buf->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, buf->dma,
+ buf->length, DMA_TO_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_rx_buffer *buf =
&(rx_ring->rx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma,
+ dma_unmap_single(&pdev->dev, buf->dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
}
if (tx_ring->desc) {
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
if (rx_ring->desc) {
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1520,8 +1520,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma))) {
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!(tx_ring->desc)) {
ret_val = 2;
goto err_nomem;
}
@@ -1563,8 +1564,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[i].length = skb->len;
tx_ring->tx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
desc->read.buffer_addr =
cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1593,8 +1594,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma))) {
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!(rx_ring->desc)) {
ret_val = 5;
goto err_nomem;
}
@@ -1661,8 +1663,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->rx_buffer_info[i].skb = skb;
rx_ring->rx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
@@ -1775,10 +1777,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
ixgbe_create_lbtest_frame(
tx_ring->tx_buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->tx_buffer_info[k].dma,
tx_ring->tx_buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (unlikely(++k == tx_ring->count))
k = 0;
}
@@ -1789,10 +1791,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
good_cnt = 0;
do {
/* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->rx_buffer_info[l].dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = ixgbe_check_lbtest_frame(
rx_ring->rx_buffer_info[l].skb, 1024);
if (!ret_val)
@@ -2079,12 +2081,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
return 0;
}
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
+ struct ethtool_coalesce *ec)
+{
+ /* check the old value and enable RSC if necessary */
+ if ((adapter->rx_itr_setting == 0) &&
+ (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->netdev->features |= NETIF_F_LRO;
+ DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
+ ec->rx_coalesce_usecs);
+ return true;
+ }
+ return false;
+}
+
static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
+ bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */
if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
@@ -2095,11 +2117,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
+ u32 max_int;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ max_int = IXGBE_MAX_RSC_INT_RATE;
+ else
+ max_int = IXGBE_MAX_INT_RATE;
+
/* check the limits */
- if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
+ if ((1000000/ec->rx_coalesce_usecs > max_int) ||
(1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
return -EINVAL;
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_reenable_rsc(adapter, ec);
+
/* store the value in ints/second */
adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
@@ -2108,6 +2139,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* clear the lower bit as its used for dynamic state */
adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_reenable_rsc(adapter, ec);
+
/* 1 means dynamic mode */
adapter->rx_eitr_param = 20000;
adapter->rx_itr_setting = 1;
@@ -2116,14 +2150,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
- else
- adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
adapter->rx_itr_setting = 0;
+
+ /*
+ * if hardware RSC is enabled, disable it when
+ * setting low latency mode, to avoid errata, assuming
+ * that when the user set low latency mode they want
+ * it at the cost of anything else
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ netdev->features &= ~NETIF_F_LRO;
+ DPRINTK(PROBE, INFO,
+ "rx-usecs set to 0, disabling RSC\n");
+
+ need_reset = true;
+ }
}
if (ec->tx_coalesce_usecs > 1) {
+ /*
+ * don't have to worry about max_int as above because
+ * tx vectors don't do hardware RSC (an rx function)
+ */
/* check the limits */
if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
(1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
@@ -2167,6 +2217,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
ixgbe_write_eitr(q_vector);
}
+ /*
+ * do reset here at the end to make sure EITR==0 case is handled
+ * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
+ * also locks in RSC enable/disable which requires reset
+ */
+ if (need_reset) {
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+
return 0;
}
@@ -2178,10 +2240,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
ethtool_op_set_flags(netdev, data);
/* if state changes we need to update adapter->flags and reset */
- if ((!!(data & ETH_FLAG_LRO)) !=
- (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
- adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
- need_reset = true;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
+ /*
+ * cast both to bool and verify if they are set the same
+ * but only enable RSC if itr is non-zero, as
+ * itr=0 and RSC are mutually exclusive
+ */
+ if (((!!(data & ETH_FLAG_LRO)) !=
+ (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
+ adapter->rx_itr_setting) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ need_reset = true;
+ break;
+ default:
+ break;
+ }
+ } else if (!adapter->rx_itr_setting) {
+ netdev->features &= ~ETH_FLAG_LRO;
+ }
}
/*
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6c00ee493a3b..d1a1868df817 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -175,6 +175,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
+struct ixgbe_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
+
+ /* General Registers */
+ {IXGBE_CTRL, "CTRL"},
+ {IXGBE_STATUS, "STATUS"},
+ {IXGBE_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {IXGBE_EICR, "EICR"},
+
+ /* RX Registers */
+ {IXGBE_SRRCTL(0), "SRRCTL"},
+ {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
+ {IXGBE_RDLEN(0), "RDLEN"},
+ {IXGBE_RDH(0), "RDH"},
+ {IXGBE_RDT(0), "RDT"},
+ {IXGBE_RXDCTL(0), "RXDCTL"},
+ {IXGBE_RDBAL(0), "RDBAL"},
+ {IXGBE_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IXGBE_TDBAL(0), "TDBAL"},
+ {IXGBE_TDBAH(0), "TDBAH"},
+ {IXGBE_TDLEN(0), "TDLEN"},
+ {IXGBE_TDH(0), "TDH"},
+ {IXGBE_TDT(0), "TDT"},
+ {IXGBE_TXDCTL(0), "TXDCTL"},
+
+ /* List Terminator */
+ {}
+};
+
+
+/*
+ * ixgbe_regdump - register printout routine
+ */
+static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
+{
+ int i = 0, j = 0;
+ char rname[16];
+ u32 regs[64];
+
+ switch (reginfo->ofs) {
+ case IXGBE_SRRCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+ break;
+ case IXGBE_DCA_RXCTRL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ break;
+ case IXGBE_RDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+ break;
+ case IXGBE_RDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+ break;
+ case IXGBE_RDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+ break;
+ case IXGBE_RXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ break;
+ case IXGBE_RDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+ break;
+ case IXGBE_RDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+ break;
+ case IXGBE_TDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+ break;
+ case IXGBE_TDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+ break;
+ case IXGBE_TDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+ break;
+ case IXGBE_TDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ break;
+ case IXGBE_TDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ break;
+ case IXGBE_TXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n", reginfo->name,
+ IXGBE_READ_REG(hw, reginfo->ofs));
+ return;
+ }
+
+ for (i = 0; i < 8; i++) {
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
+ printk(KERN_ERR "%-15s ", rname);
+ for (j = 0; j < 8; j++)
+ printk(KERN_CONT "%08x ", regs[i*8+j]);
+ printk(KERN_CONT "\n");
+ }
+
+}
+
+/*
+ * ixgbe_dump - Print registers, tx-rings and rx-rings
+ */
+static void ixgbe_dump(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_reg_info *reginfo;
+ int n = 0;
+ struct ixgbe_ring *tx_ring;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct ixgbe_ring *rx_ring;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ ixgbe_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
+ "leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ tx_buffer_info =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOIdStDDt Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %3X %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp,
+ tx_buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) &&
+ tx_buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(tx_buffer_info->dma),
+ tx_buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "%5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 16 15 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & IXGBE_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ rx_buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)rx_buffer_info->dma,
+ rx_buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(rx_buffer_info->dma),
+ rx_ring->rx_buf_len, true);
+
+ if (rx_ring->rx_buf_len
+ < IXGBE_RXBUFFER_2048)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(
+ rx_buffer_info->page_dma +
+ rx_buffer_info->page_offset
+ ),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -266,15 +605,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -721,10 +1060,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!bi->skb) {
@@ -743,9 +1082,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- skb->data));
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -886,16 +1225,17 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
else
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev,
+ rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -937,9 +1277,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
@@ -1190,6 +1531,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
itr_reg |= (itr_reg << 16);
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
/*
+ * 82599 can support a value of zero, so allow it for
+ * max interrupt rate, but there is an errata where it can
+ * not be zero with RSC
+ */
+ if (itr_reg == 8 &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ itr_reg = 0;
+
+ /*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
*/
@@ -2372,7 +2722,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- ixgbe_set_vmolr(hw, adapter->num_vfs);
+ ixgbe_set_vmolr(hw, adapter->num_vfs, true);
}
/* Program MRQC for the distribution of queues */
@@ -2482,12 +2832,74 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
+/**
+ * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl &= ~(IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE);
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static void ixgbe_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 ctrl;
- int i, j;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
@@ -2498,25 +2910,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
* still receive traffic from a DCB-enabled host even if we're
* not in DCB mode.
*/
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-
- /* Disable CFI check */
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-
- /* enable VLAN tag stripping */
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- ctrl |= IXGBE_VLNCTRL_VME;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 ctrl;
- j = adapter->rx_ring[i]->reg_idx;
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
- }
- }
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ ixgbe_vlan_filter_enable(adapter);
ixgbe_vlan_rx_add_vid(netdev, 0);
@@ -2538,21 +2932,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
}
-static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
-{
- struct dev_mc_list *mc_ptr;
- u8 *addr = *mc_addr_ptr;
- *vmdq = 0;
-
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
/**
* ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -2566,19 +2945,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 fctrl, vlnctrl;
- u8 *addr_list = NULL;
- int addr_count = 0;
+ u32 fctrl;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = 1;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ /* don't hardware filter vlans in promisc mode */
+ ixgbe_vlan_filter_disable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
@@ -2586,22 +2963,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
} else {
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
}
- vlnctrl |= IXGBE_VLNCTRL_VFE;
+ ixgbe_vlan_filter_enable(adapter);
hw->addr_ctrl.user_set_promisc = 0;
}
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev_mc_count(netdev);
- if (addr_count)
- addr_list = netdev->mc_list->dmi_addr;
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbe_addr_list_itr);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+
if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
}
@@ -2661,7 +3034,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 txdctl, vlnctrl;
+ u32 txdctl;
int i, j;
ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2679,22 +3052,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
/* Enable VLAN tag insert/strip */
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- if (hw->mac.type == ixgbe_mac_82598EB) {
- vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
- vlnctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
- }
- }
+ ixgbe_vlan_filter_enable(adapter);
+
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
}
@@ -2927,8 +3286,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
+ if (adapter->rx_itr_setting == 0) {
+ /* cannot set wthresh when itr==0 */
+ txdctl &= ~0x007F0000;
+ } else {
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ }
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
@@ -3131,9 +3495,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -3142,9 +3506,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
do {
struct sk_buff *this = skb;
if (IXGBE_RSC_CB(this)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0;
}
skb = skb->prev;
@@ -3154,8 +3519,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
put_page(rx_buffer_info->page);
@@ -3268,22 +3633,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- netif_tx_disable(netdev);
-
IXGBE_WRITE_FLUSH(hw);
msleep(10);
netif_tx_stop_all_queues(netdev);
- ixgbe_irq_disable(adapter);
-
- ixgbe_napi_disable_all(adapter);
-
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
del_timer_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->watchdog_task);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ ixgbe_irq_disable(adapter);
+
+ ixgbe_napi_disable_all(adapter);
+
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
cancel_work_sync(&adapter->fdir_reinit_task);
@@ -3301,8 +3667,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
- netif_carrier_off(netdev);
-
/* clear n-tuple filters that are cached */
ethtool_ntuple_flush(netdev);
@@ -3379,6 +3743,8 @@ static void ixgbe_reset_task(struct work_struct *work)
adapter->tx_timeout_count++;
+ ixgbe_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
ixgbe_reinit_locked(adapter);
}
@@ -3479,12 +3845,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
adapter->num_tx_queues = 1;
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
+ DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
ixgbe_set_dcb_queues(adapter);
}
#endif
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
+ DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter);
@@ -4381,8 +4747,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -4452,7 +4818,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
DPRINTK(PROBE, ERR,
@@ -4513,7 +4880,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -4550,7 +4918,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -5100,7 +5469,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
&(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
- "ignored adding FDIR ATR filters \n");
+ "ignored adding FDIR ATR filters\n");
}
/* Done FDIR Re-initialization, enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -5420,10 +5789,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(pdev,
+ tx_buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5456,12 +5825,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5942,6 +6311,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
.ndo_do_ioctl = ixgbe_ioctl,
+ .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
@@ -6039,13 +6412,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 1c1efd386956..d6d5b843d625 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -475,7 +475,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
msleep(edata);
break;
case IXGBE_DATA_NL:
- hw_dbg(hw, "DATA: \n");
+ hw_dbg(hw, "DATA:\n");
data_offset++;
hw->eeprom.ops.read(hw, data_offset++,
&phy_offset);
@@ -491,7 +491,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
break;
case IXGBE_CONTROL_NL:
data_offset++;
- hw_dbg(hw, "CONTROL: \n");
+ hw_dbg(hw, "CONTROL:\n");
if (edata == IXGBE_CONTROL_EOL_NL) {
hw_dbg(hw, "EOL\n");
end_data = true;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index d4cd20f30199..f6cee94ec8e8 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf)
{
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ struct ixgbe_hw *hw = &adapter->hw;
int i;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vfinfo->vf_mc_hashes[i] = hash_list[i];;
}
- /* Flush and reset the mta with the new values */
- ixgbe_set_rx_mode(adapter->netdev);
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
return 0;
}
@@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
{
- u32 ctrl;
-
- /* Check if global VLAN already set, if not set it */
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
- /* enable VLAN tag insert/strip */
- ctrl |= IXGBE_VLNCTRL_VFE;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
- }
-
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_AUPE |
- IXGBE_VMOLR_ROMPE |
+ vmolr |= (IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE |
IXGBE_VMOLR_BAM);
+ if (aupe)
+ vmolr |= IXGBE_VMOLR_AUPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_AUPE;
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (vid)
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
+ (vid | IXGBE_VMVIR_VLANA_DEFAULT));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
+
inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
/* reset offloads to defaults */
- ixgbe_set_vmolr(hw, vf);
-
+ if (adapter->vfinfo[vf].pf_vlan) {
+ ixgbe_set_vf_vlan(adapter, true,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter,
+ (adapter->vfinfo[vf].pf_vlan |
+ (adapter->vfinfo[vf].pf_qos <<
+ VLAN_PRIO_SHIFT)), vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ } else {
+ ixgbe_set_vmvir(adapter, 0, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ }
/* reset multicast table array for vf */
adapter->vfinfo[vf].num_vf_mc_hashes = 0;
@@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_SET_MAC_ADDR:
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac))
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac)
ixgbe_set_vf_mac(adapter, vf, new_mac);
else
- retval = -1;
+ ixgbe_set_vf_mac(adapter,
+ vf, adapter->vfinfo[vf].vf_mac_addresses);
}
break;
case IXGBE_VF_SET_MULTICAST:
@@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
}
}
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+ return -EINVAL;
+ adapter->vfinfo[vf].pf_set_mac = true;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return ixgbe_set_vf_mac(adapter, vf, mac);
+}
+
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+ if (err)
+ goto out;
+ ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmolr(&adapter->hw, vf, false);
+ adapter->vfinfo[vf].pf_vlan = vlan;
+ adapter->vfinfo[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_set_vmolr(&adapter->hw, vf, true);
+ adapter->vfinfo[vf].pf_vlan = 0;
+ adapter->vfinfo[vf].pf_qos = 0;
+ }
+out:
+ return err;
+}
+
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = 0;
+ ivi->vlan = adapter->vfinfo[vf].pf_vlan;
+ ivi->qos = adapter->vfinfo[vf].pf_qos;
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 51d1106c45a1..184730ecdfb6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf);
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
@@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
+ u8 qos);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 534affcc38ca..4277cbbb8126 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -219,6 +219,7 @@
#define IXGBE_MTQC 0x08120
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
@@ -1311,6 +1312,10 @@
#define IXGBE_VLVF_ENTRIES 64
#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
/* STATUS Bit Masks */
@@ -2419,8 +2424,7 @@ struct ixgbe_mac_operations {
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index c44fdb05447a..ca2c81f49a05 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -41,11 +41,13 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
-#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
-#define IXGBE_LINKS_UP 0x40000000
-#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 0cd6202dfacc..40f47b8fe417 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->skb = skb;
}
if (!bi->dma) {
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -604,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* packets not getting split correctly
*/
if (staterr & IXGBE_RXD_STAT_LB) {
- u32 header_fixup_len = skb->len - skb->data_len;
+ u32 header_fixup_len = skb_headlen(skb);
if (header_fixup_len < 14)
skb_push(skb, header_fixup_len);
}
skb->protocol = eth_type_trans(skb, adapter->netdev);
ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
- adapter->netdev->last_rx = jiffies;
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -962,12 +961,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+ if (!hw->mbx.ops.check_for_ack(hw)) {
+ /*
+ * checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it. Also
+ * avoid the read below because the code to read
+ * the mailbox will also clear the ack bit. This was
+ * causing lost acks. Just cache the bit and exit
+ * the IRQ handler.
+ */
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ goto out;
+ }
+
+ /* Not an ack interrupt, go ahead and read the message */
hw->mbx.ops.read(hw, &msg, 1);
if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 1));
+out:
return IRQ_HANDLED;
}
@@ -1496,22 +1511,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
}
}
-static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq)
-{
- struct dev_mc_list *mc_ptr;
- u8 *addr = *mc_addr_ptr;
- *vmdq = 0;
-
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
/**
* ixgbevf_set_rx_mode - Multicast set
* @netdev: network interface device structure
@@ -1524,16 +1523,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u8 *addr_list = NULL;
- int addr_count = 0;
/* reprogram multicast list */
- addr_count = netdev_mc_count(netdev);
- if (addr_count)
- addr_list = netdev->mc_list->dmi_addr;
if (hw->mac.ops.update_mc_addr_list)
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbevf_addr_list_itr);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1744,9 +1737,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -1760,8 +1753,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
@@ -2418,9 +2411,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
if (link_up) {
if (!netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
- ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- "10 Gbps\n" : "1 Gbps\n"));
+ hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ 10 : 1);
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
} else {
@@ -2468,7 +2461,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2513,8 +2507,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2584,8 +2578,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
hw_dbg(&adapter->hw,
@@ -2646,7 +2640,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2958,10 +2953,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -2987,13 +2982,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset,
size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -3189,8 +3184,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
skb->len, hdr_len);
- netdev->trans_start = jiffies;
-
ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -3334,14 +3327,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -3482,7 +3475,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "LRO is disabled \n");
+ hw_dbg(hw, "LRO is disabled\n");
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index 4b5dec0ec140..f6f929958ba0 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -252,22 +252,18 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @next: caller supplied function to return next address in list
+ * @netdev: pointer to net device structure
*
* Updates the Multicast Table Array.
**/
-static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr next)
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
{
+ struct netdev_hw_addr *ha;
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
- u32 vector;
u32 cnt, i;
- u32 vmdq;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
@@ -278,13 +274,17 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
* addresses except for in large enterprise network environments.
*/
- cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ cnt = netdev_mc_count(netdev);
+ if (cnt > 30)
+ cnt = 30;
msgbuf[0] = IXGBE_VF_SET_MULTICAST;
msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
- for (i = 0; i < cnt; i++) {
- vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
- vector_list[i] = vector;
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == cnt)
+ break;
+ vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
@@ -359,7 +359,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
else
*link_up = false;
- if (links_reg & IXGBE_LINKS_SPEED)
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 1f31b052d4b4..94b750b8874f 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/netdevice.h>
#include "defines.h"
#include "regs.h"
@@ -62,8 +63,7 @@ struct ixgbe_mac_operations {
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5932ca3e27d..78ddd8b79e7e 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -64,8 +64,6 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
ixp2000_reg_write(RING_TX_PENDING,
TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
- dev->trans_start = jiffies;
-
local_irq_save(flags);
ip->tx_queue_entries++;
if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index b705ad3a53a7..4e868eeac89e 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2010,12 +2010,12 @@ jme_set_multi(struct net_device *netdev)
} else if (netdev->flags & IFF_ALLMULTI) {
jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
} else if (netdev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int bit_nr;
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
- netdev_for_each_mc_addr(mclist, netdev) {
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
}
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 300c2249812d..26bf1b76b997 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
unsigned long flags;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
int i;
@@ -502,8 +502,8 @@ static void korina_multicast_list(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1135,7 +1135,7 @@ static int korina_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
dev->base_addr = r->start;
- lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->eth_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
rc = -ENXIO;
@@ -1143,7 +1143,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
- lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->rx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
rc = -ENXIO;
@@ -1151,7 +1151,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
- lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->tx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
rc = -ENXIO;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 5c45cb58d023..f852ab3ae9cf 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -1,5 +1,5 @@
/*
- * ks8842_main.c timberdale KS8842 ethernet driver
+ * ks8842.c timberdale KS8842 ethernet driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -20,12 +20,15 @@
* The Micrel KS8842 behind the timberdale FPGA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/ks8842.h>
#define DRV_NAME "ks8842"
@@ -302,6 +305,20 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
ks8842_write16(adapter, 39, mac, REG_MACAR3);
}
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+{
+ unsigned long flags;
+ unsigned i;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ for (i = 0; i < ETH_ALEN; i++) {
+ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+ REG_MACAR1 + i);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
{
return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
@@ -520,13 +537,14 @@ static int ks8842_open(struct net_device *netdev)
/* reset the HW */
ks8842_reset_hw(adapter);
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
ks8842_update_link_status(netdev, adapter);
err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
adapter);
if (err) {
- printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
- adapter->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
return err;
}
@@ -567,10 +585,8 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
static int ks8842_set_mac(struct net_device *netdev, void *p)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
struct sockaddr *addr = p;
char *mac = (u8 *)addr->sa_data;
- int i;
dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
@@ -579,13 +595,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, mac, netdev->addr_len);
- spin_lock_irqsave(&adapter->lock, flags);
- for (i = 0; i < ETH_ALEN; i++) {
- ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
- REG_MACAR1 + i);
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
+ ks8842_write_mac_addr(adapter, mac);
return 0;
}
@@ -604,6 +614,8 @@ static void ks8842_tx_timeout(struct net_device *netdev)
ks8842_reset_hw(adapter);
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
ks8842_update_link_status(netdev, adapter);
}
@@ -626,7 +638,9 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
+ struct ks8842_platform_data *pdata = pdev->dev.platform_data;
u16 id;
+ unsigned i;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
@@ -657,7 +671,25 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
netdev->netdev_ops = &ks8842_netdev_ops;
netdev->ethtool_ops = &ks8842_ethtool_ops;
- ks8842_read_mac_addr(adapter, netdev->dev_addr);
+ /* Check if a mac address was given */
+ i = netdev->addr_len;
+ if (pdata) {
+ for (i = 0; i < netdev->addr_len; i++)
+ if (pdata->macaddr[i] != 0)
+ break;
+
+ if (i < netdev->addr_len)
+ /* an address was passed, use it */
+ memcpy(netdev->dev_addr, pdata->macaddr,
+ netdev->addr_len);
+ }
+
+ if (i == netdev->addr_len) {
+ ks8842_read_mac_addr(adapter, netdev->dev_addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+ }
id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
@@ -668,8 +700,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
- printk(KERN_INFO DRV_NAME
- " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 9e9f9b349766..b4fb07a6f13f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DEBUG
#include <linux/module.h>
@@ -76,7 +78,9 @@ union ks8851_tx_hdr {
* @msg_enable: The message flags controlling driver output (see ethtool).
* @fid: Incrementing frame id tag.
* @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -107,6 +111,8 @@ struct ks8851_net {
u16 rc_ier;
u16 rc_rxqcr;
+ u16 rc_ccr;
+ u16 eeprom_size;
struct mii_if_info mii;
struct ks8851_rxctrl rxctrl;
@@ -125,11 +131,6 @@ struct ks8851_net {
static int msg_enable;
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
-
/* shift for byte-enable data */
#define BYTE_EN(_x) ((_x) << 2)
@@ -167,7 +168,7 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "spi_sync() failed\n");
+ netdev_err(ks->netdev, "spi_sync() failed\n");
}
/**
@@ -197,7 +198,7 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "spi_sync() failed\n");
+ netdev_err(ks->netdev, "spi_sync() failed\n");
}
/**
@@ -263,7 +264,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "read: spi_sync() failed\n");
+ netdev_err(ks->netdev, "read: spi_sync() failed\n");
else if (ks8851_rx_1msg(ks))
memcpy(rxb, trx + 2, rxl);
else
@@ -417,8 +418,8 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
u8 txb[1];
int ret;
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
/* set the operation we're issuing */
txb[0] = KS_SPIOP_RXFIFO;
@@ -434,7 +435,7 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "%s: spi_sync() failed\n", __func__);
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
}
/**
@@ -446,10 +447,11 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
*/
static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
{
- ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
- rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
- rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
- rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+ netdev_dbg(ks->netdev,
+ "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
+ rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
+ rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
}
/**
@@ -471,8 +473,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxfc = ks8851_rdreg8(ks, KS_RXFC);
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "%s: %d packets\n", __func__, rxfc);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d packets\n", __func__, rxfc);
/* Currently we're issuing a read per packet, but we could possibly
* improve the code by issuing a single read, getting the receive
@@ -489,9 +491,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxstat = rxh & 0xffff;
rxlen = rxh >> 16;
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n",
- rxstat, rxlen);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
/* the length of the packet includes the 32bit CRC */
@@ -553,9 +554,8 @@ static void ks8851_irq_work(struct work_struct *work)
status = ks8851_rdreg16(ks, KS_ISR);
- if (netif_msg_intr(ks))
- dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n",
- __func__, status);
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: status 0x%04x\n", __func__, status);
if (status & IRQ_LCI) {
/* should do something about checking link status */
@@ -582,8 +582,8 @@ static void ks8851_irq_work(struct work_struct *work)
* system */
ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
- if (netif_msg_intr(ks))
- ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space);
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: txspace %d\n", __func__, ks->tx_space);
}
if (status & IRQ_RXI)
@@ -659,9 +659,8 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
unsigned fid = 0;
int ret;
- if (netif_msg_tx_queued(ks))
- dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n",
- __func__, txp, txp->len, txp->data, irq);
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
fid = ks->fid++;
fid &= TXFR_TXFID_MASK;
@@ -685,7 +684,7 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "%s: spi_sync() failed\n", __func__);
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
}
/**
@@ -746,8 +745,7 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
{
unsigned pmecr;
- if (netif_msg_hw(ks))
- ks_dbg(ks, "setting power mode %d\n", pwrmode);
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
pmecr = ks8851_rdreg16(ks, KS_PMECR);
pmecr &= ~PMECR_PM_MASK;
@@ -771,8 +769,7 @@ static int ks8851_net_open(struct net_device *dev)
* else at the moment */
mutex_lock(&ks->lock);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "opening %s\n", dev->name);
+ netif_dbg(ks, ifup, ks->netdev, "opening\n");
/* bring chip out of any power saving mode it was in */
ks8851_set_powermode(ks, PMECR_PM_NORMAL);
@@ -828,8 +825,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_start_queue(ks->netdev);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "network device %s up\n", dev->name);
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
return 0;
@@ -847,8 +843,7 @@ static int ks8851_net_stop(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- if (netif_msg_ifdown(ks))
- ks_info(ks, "%s: shutting down\n", dev->name);
+ netif_info(ks, ifdown, dev, "shutting down\n");
netif_stop_queue(dev);
@@ -876,8 +871,8 @@ static int ks8851_net_stop(struct net_device *dev)
while (!skb_queue_empty(&ks->txq)) {
struct sk_buff *txb = skb_dequeue(&ks->txq);
- if (netif_msg_ifdown(ks))
- ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb);
+ netif_dbg(ks, ifdown, ks->netdev,
+ "%s: freeing txb %p\n", __func__, txb);
dev_kfree_skb(txb);
}
@@ -906,9 +901,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
unsigned needed = calc_txlen(skb->len);
netdev_tx_t ret = NETDEV_TX_OK;
- if (netif_msg_tx_queued(ks))
- ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__,
- skb, skb->len, skb->data);
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
spin_lock(&ks->statelock);
@@ -968,13 +962,13 @@ static void ks8851_set_rx_mode(struct net_device *dev)
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
} else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u32 crc;
/* accept some multicast */
- netdev_for_each_mc_addr(mcptr, dev) {
- crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
crc >>= (32 - 6); /* get top six bits */
rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
@@ -1040,6 +1034,234 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+/* Companion eeprom access */
+
+enum { /* EEPROM programming states */
+ EEPROM_CONTROL,
+ EEPROM_ADDRESS,
+ EEPROM_DATA,
+ EEPROM_COMPLETE
+};
+
+/**
+ * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @addr: EEPROM address to read
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
+ * Warning: The READ feature is not supported on ks8851 revision 0.
+ *
+ * Rough programming model:
+ * - on period start: set clock high and read value on bus
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int ctrl = EEPROM_OP_READ;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int data = 0;
+ int dummy;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((ctrl >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ bit_count--;
+ break;
+ case EEPROM_DATA:
+ /* Change to receive mode */
+ eepcr &= ~EEPCR_EESRWA;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* waitread period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ mutex_lock(&ks->lock);
+ eepcr |= EEPCR_EESCK;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* Manage read */
+ switch (state) {
+ case EEPROM_ADDRESS:
+ if (bit_count < 0) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ }
+ break;
+ case EEPROM_DATA:
+ mutex_lock(&ks->lock);
+ dummy = ks8851_rdreg16(ks, KS_EEPCR);
+ mutex_unlock(&ks->lock);
+ data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ return data;
+}
+
+/**
+ * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @op: operand (can be WRITE, EWEN, EWDS)
+ * @addr: EEPROM address to write
+ * @data: data to write
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
+ *
+ * Note that a write enable is required before writing data.
+ *
+ * Rough programming model:
+ * - on period start: set clock high
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
+ unsigned int addr, unsigned int data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ switch (op) {
+ case EEPROM_OP_EWEN:
+ addr = 0x30;
+ break;
+ case EEPROM_OP_EWDS:
+ addr = 0;
+ break;
+ }
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((op >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ if (op == EEPROM_OP_WRITE) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ } else {
+ state = EEPROM_COMPLETE;
+ }
+ }
+ break;
+ case EEPROM_DATA:
+ eepcr |= ((data >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ eepcr |= EEPCR_EESCK;
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+}
+
/* ethtool support */
static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1086,6 +1308,117 @@ static int ks8851_nway_reset(struct net_device *dev)
return mii_nway_restart(&ks->mii);
}
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return ks->eeprom_size;
+}
+
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+
+ /* Device's eeprom is little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int ks8851_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
+ return -EFAULT;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ max_len = (last_word - first_word + 1) * 2;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
+ ptr++;
+ }
+ if ((eeprom->offset + eeprom->len) & 1)
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ eeprom_buff[last_word - first_word] =
+ ks8851_eeprom_read(dev, last_word);
+
+
+ /* Device's eeprom is little-endian, word addressable */
+ le16_to_cpus(&eeprom_buff[0]);
+ le16_to_cpus(&eeprom_buff[last_word - first_word]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
+ eeprom_buff[i]);
+ mdelay(EEPROM_WRITE_TIME);
+ }
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
static const struct ethtool_ops ks8851_ethtool_ops = {
.get_drvinfo = ks8851_get_drvinfo,
.get_msglevel = ks8851_get_msglevel,
@@ -1094,6 +1427,9 @@ static const struct ethtool_ops ks8851_ethtool_ops = {
.set_settings = ks8851_set_settings,
.get_link = ks8851_get_link,
.nway_reset = ks8851_nway_reset,
+ .get_eeprom_len = ks8851_get_eeprom_len,
+ .get_eeprom = ks8851_get_eeprom,
+ .set_eeprom = ks8851_set_eeprom,
};
/* MII interface controls */
@@ -1187,17 +1523,17 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
rd = ks8851_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
- ks_warn(ks, "Memory selftest not finished\n");
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
return 0;
}
if (rd & MBIR_TXMBFA) {
- ks_err(ks, "TX memory selftest fail\n");
+ netdev_err(ks->netdev, "TX memory selftest fail\n");
ret |= 1;
}
if (rd & MBIR_RXMBFA) {
- ks_err(ks, "RX memory selftest fail\n");
+ netdev_err(ks->netdev, "RX memory selftest fail\n");
ret |= 2;
}
@@ -1279,6 +1615,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_id;
}
+ /* cache the contents of the CCR register for EEPROM, etc. */
+ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
+
+ if (ks->rc_ccr & CCR_EEPROM)
+ ks->eeprom_size = 128;
+ else
+ ks->eeprom_size = 0;
+
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
@@ -1295,9 +1639,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_netdev;
}
- dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n",
- CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq);
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
+ ndev->dev_addr, ndev->irq);
return 0;
@@ -1316,7 +1660,7 @@ static int __devexit ks8851_remove(struct spi_device *spi)
struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
if (netif_msg_drv(priv))
- dev_info(&spi->dev, "remove");
+ dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index f52c312cc356..537fb06e5932 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -25,12 +25,24 @@
#define OBCR_ODS_16mA (1 << 6)
#define KS_EEPCR 0x22
+#define EEPCR_EESRWA (1 << 5)
#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB (1 << 3)
+#define EEPCR_EESB_OFFSET 3
+#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
+#define EEPROM_OP_LEN 3 /* bits:*/
+#define EEPROM_OP_READ 0x06
+#define EEPROM_OP_EWEN 0x04
+#define EEPROM_OP_WRITE 0x05
+#define EEPROM_OP_EWDS 0x14
+
+#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
+#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
+#define EEPROM_SK_PERIOD 400 /* in us */
+
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 6354ab3a45a6..2e2c69b24062 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -21,6 +21,8 @@
* KS8851 16bit MLL chip from Micrel Inc.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -361,7 +363,6 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
#define MAX_MCAST_LST 32
#define HW_MCAST_SIZE 8
-#define MAC_ADDR_LEN 6
/**
* union ks_tx_hdr - tx header data
@@ -449,7 +450,7 @@ struct ks_net {
u16 promiscuous;
u16 all_mcast;
u16 mcast_lst_size;
- u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
+ u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
u8 mcast_bits[HW_MCAST_SIZE];
u8 mac_addr[6];
u8 fid;
@@ -459,11 +460,6 @@ struct ks_net {
static int msg_enable;
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
-
#define BE3 0x8000 /* Byte Enable 3 */
#define BE2 0x4000 /* Byte Enable 2 */
#define BE1 0x2000 /* Byte Enable 1 */
@@ -625,8 +621,7 @@ static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
{
unsigned pmecr;
- if (netif_msg_hw(ks))
- ks_dbg(ks, "setting power mode %d\n", pwrmode);
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
ks_rdreg16(ks, KS_GRR);
pmecr = ks_rdreg16(ks, KS_PMECR);
@@ -806,11 +801,10 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
/* read data block including CRC 4 bytes */
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
skb_put(skb, frame_hdr->len);
- skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
} else {
- printk(KERN_ERR "%s: err:skb alloc\n", __func__);
+ pr_err("%s: err:skb alloc\n", __func__);
ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
if (skb)
dev_kfree_skb_irq(skb);
@@ -837,9 +831,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
netif_carrier_off(netdev);
link_up_status = false;
}
- if (netif_msg_link(ks))
- ks_dbg(ks, "%s: %s\n",
- __func__, link_up_status ? "UP" : "DOWN");
+ netif_dbg(ks, link, ks->netdev,
+ "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
}
/**
@@ -909,15 +902,13 @@ static int ks_net_open(struct net_device *netdev)
* else at the moment.
*/
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "%s - entry\n", __func__);
+ netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
/* reset the HW */
err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
if (err) {
- printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
- ks->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
return err;
}
@@ -930,8 +921,7 @@ static int ks_net_open(struct net_device *netdev)
ks_enable_qmu(ks);
netif_start_queue(ks->netdev);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "network device %s up\n", netdev->name);
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
return 0;
}
@@ -948,8 +938,7 @@ static int ks_net_stop(struct net_device *netdev)
{
struct ks_net *ks = netdev_priv(netdev);
- if (netif_msg_ifdown(ks))
- ks_info(ks, "%s: shutting down\n", netdev->name);
+ netif_info(ks, ifdown, netdev, "shutting down\n");
netif_stop_queue(netdev);
@@ -1181,7 +1170,7 @@ static void ks_set_mcast(struct ks_net *ks, u16 mcast)
static void ks_set_rx_mode(struct net_device *netdev)
{
struct ks_net *ks = netdev_priv(netdev);
- struct dev_mc_list *ptr;
+ struct netdev_hw_addr *ha;
/* Turn on/off promiscuous mode. */
if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
@@ -1198,13 +1187,12 @@ static void ks_set_rx_mode(struct net_device *netdev)
if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
- netdev_for_each_mc_addr(ptr, netdev) {
- if (!(*ptr->dmi_addr & 1))
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (!(*ha->addr & 1))
continue;
if (i >= MAX_MCAST_LST)
break;
- memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
- MAC_ADDR_LEN);
+ memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
}
ks->mcast_lst_size = (u8)i;
ks_set_grpaddr(ks);
@@ -1430,21 +1418,21 @@ static int ks_read_selftest(struct ks_net *ks)
rd = ks_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
- ks_warn(ks, "Memory selftest not finished\n");
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
return 0;
}
if (rd & MBIR_TXMBFA) {
- ks_err(ks, "TX memory selftest fails\n");
+ netdev_err(ks->netdev, "TX memory selftest fails\n");
ret |= 1;
}
if (rd & MBIR_RXMBFA) {
- ks_err(ks, "RX memory selftest fails\n");
+ netdev_err(ks->netdev, "RX memory selftest fails\n");
ret |= 2;
}
- ks_info(ks, "the selftest passes\n");
+ netdev_info(ks->netdev, "the selftest passes\n");
return ret;
}
@@ -1515,7 +1503,7 @@ static int ks_hw_init(struct ks_net *ks)
ks->frame_head_info = (struct type_frame_head *) \
kmalloc(MHEADER_SIZE, GFP_KERNEL);
if (!ks->frame_head_info) {
- printk(KERN_ERR "Error: Fail to allocate frame memory\n");
+ pr_err("Error: Fail to allocate frame memory\n");
return false;
}
@@ -1581,7 +1569,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
ks->mii.mdio_read = ks_phy_read;
ks->mii.mdio_write = ks_phy_write;
- ks_info(ks, "message enable is %d\n", msg_enable);
+ netdev_info(netdev, "message enable is %d\n", msg_enable);
/* set the default message enable */
ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
NETIF_MSG_PROBE |
@@ -1590,13 +1578,13 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
/* simple check for a valid chip being connected to the bus */
if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
- ks_err(ks, "failed to read device ID\n");
+ netdev_err(netdev, "failed to read device ID\n");
err = -ENODEV;
goto err_register;
}
if (ks_read_selftest(ks)) {
- ks_err(ks, "failed to read device ID\n");
+ netdev_err(netdev, "failed to read device ID\n");
err = -ENODEV;
goto err_register;
}
@@ -1627,9 +1615,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
id = ks_rdreg16(ks, KS_CIDER);
- printk(KERN_INFO DRV_NAME
- " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
- (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+ netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
err_register:
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 0606a1f359fb..cc0bc8a26085 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -14,10 +14,11 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
@@ -1484,11 +1485,6 @@ struct dev_priv {
int promiscuous;
};
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
-
#define DRV_NAME "KSZ884X PCI"
#define DEVICE_NAME "KSZ884x PCI"
#define DRV_VERSION "1.0.0"
@@ -3835,7 +3831,7 @@ static void ksz_check_desc_num(struct ksz_desc_info *info)
alloc >>= 1;
}
if (alloc != 1 || shift < MIN_DESC_SHIFT) {
- printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
+ pr_alert("Hardware descriptor numbers not right!\n");
while (alloc) {
shift++;
alloc >>= 1;
@@ -4546,8 +4542,7 @@ static int ksz_alloc_mem(struct dev_info *adapter)
(((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
DESC_ALIGNMENT) * DESC_ALIGNMENT);
if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
- printk(KERN_ALERT
- "Hardware descriptor size not right!\n");
+ pr_alert("Hardware descriptor size not right!\n");
ksz_check_desc_num(&hw->rx_desc_info);
ksz_check_desc_num(&hw->tx_desc_info);
@@ -4689,7 +4684,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
int frag;
skb_frag_t *this_frag;
- dma_buf->len = skb->len - skb->data_len;
+ dma_buf->len = skb_headlen(skb);
dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len,
@@ -5049,8 +5044,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
dma_buf->skb->data, packet_len);
} while (0);
- skb->dev = dev;
-
skb->protocol = eth_type_trans(skb, dev);
if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
@@ -5061,8 +5054,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
priv->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
- dev->last_rx = jiffies;
-
rx_status = netif_rx(skb);
return 0;
@@ -5320,10 +5311,10 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
u32 data;
hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
- printk(KERN_INFO "Tx stopped\n");
+ pr_info("Tx stopped\n");
data = readl(hw->io + KS_DMA_TX_CTRL);
if (!(data & DMA_TX_ENABLE))
- printk(KERN_INFO "Tx disabled\n");
+ pr_info("Tx disabled\n");
break;
}
} while (0);
@@ -5496,6 +5487,18 @@ static int prepare_hardware(struct net_device *dev)
return 0;
}
+static void set_media_state(struct net_device *dev, int media_state)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ if (media_state == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ netif_info(priv, link, dev, "link %s\n",
+ media_state == priv->media_state ? "on" : "off");
+}
+
/**
* netdev_open - open network device
* @dev: Network device.
@@ -5585,15 +5588,7 @@ static int netdev_open(struct net_device *dev)
priv->media_state = port->linked->state;
- if (media_connected == priv->media_state)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s link %s\n", dev->name,
- (media_connected == priv->media_state ?
- "on" : "off"));
-
+ set_media_state(dev, media_connected);
netif_start_queue(dev);
return 0;
@@ -5767,7 +5762,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
int multicast = (dev->flags & IFF_ALLMULTI);
dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
@@ -5784,7 +5779,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
int i = 0;
/* List too big to support so turn on all multicast mode. */
- if (dev->mc_count > MAX_MULTICAST_LIST) {
+ if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
if (MAX_MULTICAST_LIST != hw->multi_list_size) {
hw->multi_list_size = MAX_MULTICAST_LIST;
++hw->all_multi;
@@ -5793,13 +5788,12 @@ static void netdev_set_rx_mode(struct net_device *dev)
return;
}
- netdev_for_each_mc_addr(mc_ptr, dev) {
- if (!(*mc_ptr->dmi_addr & 1))
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!(*ha->addr & 1))
continue;
if (i >= MAX_MULTICAST_LIST)
break;
- memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
- MAC_ADDR_LEN);
+ memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
@@ -6683,16 +6677,8 @@ static void update_link(struct net_device *dev, struct dev_priv *priv,
{
if (priv->media_state != port->linked->state) {
priv->media_state = port->linked->state;
- if (netif_running(dev)) {
- if (media_connected == priv->media_state)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s link %s\n", dev->name,
- (media_connected == priv->media_state ?
- "on" : "off"));
- }
+ if (netif_running(dev))
+ set_media_state(dev, media_connected);
}
}
@@ -6986,7 +6972,7 @@ static int __init pcidev_init(struct pci_dev *pdev,
int pi;
int port_count;
int result;
- char banner[80];
+ char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
result = pci_enable_device(pdev);
@@ -7010,10 +6996,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
result = -ENOMEM;
- info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
if (!info)
goto pcidev_init_dev_err;
- memset(info, 0, sizeof(struct platform_info));
hw_priv = &info->dev_info;
hw_priv->pdev = pdev;
@@ -7027,15 +7012,15 @@ static int __init pcidev_init(struct pci_dev *pdev,
cnt = hw_init(hw);
if (!cnt) {
if (msg_enable & NETIF_MSG_PROBE)
- printk(KERN_ALERT "chip not detected\n");
+ pr_alert("chip not detected\n");
result = -ENODEV;
goto pcidev_init_alloc_err;
}
- sprintf(banner, "%s\n", version);
- banner[13] = cnt + '0';
- ks_info(hw_priv, "%s", banner);
- ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+ snprintf(banner, sizeof(banner), "%s", version);
+ banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
+ dev_info(&hw_priv->pdev->dev, "%s\n", banner);
+ dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
/* Assume device is KSZ8841. */
hw->dev_count = 1;
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 7b9447646f8a..21f8adaa87c1 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -945,7 +945,7 @@ static void lance_tx_timeout (struct net_device *dev)
#endif
lance_restart (dev, 0x0043, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -1011,8 +1011,6 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
outw(0x0000, ioaddr+LANCE_ADDR);
outw(0x0048, ioaddr+LANCE_DATA);
- dev->trans_start = jiffies;
-
if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
netif_stop_queue(dev);
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 973390b82ec2..de856d8abc90 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -963,7 +963,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -974,7 +974,6 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_cmd *tx_cmd;
struct i596_tbd *tbd;
short length = skb->len;
- dev->trans_start = jiffies;
DEB(DEB_STARTTX, printk(KERN_DEBUG
"%s: i596_start_xmit(%x,%p) called\n",
@@ -1388,7 +1387,7 @@ static void set_multicast_list(struct net_device *dev)
}
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1396,10 +1395,10 @@ static void set_multicast_list(struct net_device *dev)
cmd->cmd.command = SWAP16(CmdMulticastList);
cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, dmi->dmi_addr, 6);
+ memcpy(cp, ha->addr, 6);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 56f66f485400..64d51d627d8d 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -257,7 +257,7 @@ static void __ei_tx_timeout(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
dev->stats.tx_errors++;
@@ -386,7 +386,6 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
{
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
if (output_page == ei_local->tx_start_page)
{
ei_local->tx1 = -1;
@@ -445,14 +444,14 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
if (ei_local->irqlock)
{
-#if 1 /* This might just be an interrupt for a PCI device sharing this line */
- /* The "irqlock" check is only for testing. */
- printk(ei_local->irqlock
- ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
- : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ /*
+ * This might just be an interrupt for a PCI device sharing
+ * this line
+ */
+ printk("%s: Interrupted while interrupts are masked!"
+ " isr=%#2x imr=%#2x.\n",
dev->name, ei_inb_p(e8390_base + EN0_ISR),
ei_inb_p(e8390_base + EN0_IMR));
-#endif
spin_unlock(&ei_local->page_lock);
return IRQ_NONE;
}
@@ -905,10 +904,10 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev)
static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(dmi, dev) {
- u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
index 1af66a1e6911..c03358434acb 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ll_temac.h
@@ -5,8 +5,11 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/spinlock.h>
+
+#ifdef CONFIG_PPC_DCR
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
+#endif
/* packet size info */
#define XTE_HDR_SIZE 14 /* size of Ethernet header */
@@ -290,9 +293,6 @@ This option defaults to enabled (set) */
#define TX_CONTROL_CALC_CSUM_MASK 1
-#define XTE_ALIGN 32
-#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
-
#define MULTICAST_CAM_TABLE_NUM 4
/* TX/RX CURDESC_PTR points to first descriptor */
@@ -335,9 +335,15 @@ struct temac_local {
struct mii_bus *mii_bus; /* MII bus reference */
int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
- /* IO registers and IRQs */
+ /* IO registers, dma functions and IRQs */
void __iomem *regs;
+ void __iomem *sdma_regs;
+#ifdef CONFIG_PPC_DCR
dcr_host_t sdma_dcrs;
+#endif
+ u32 (*dma_in)(struct temac_local *, int);
+ void (*dma_out)(struct temac_local *, int, u32);
+
int tx_irq;
int rx_irq;
int emac_num;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index ba617e3cf1bb..b59b24d667f0 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -20,9 +20,6 @@
* or rx, so this should be okay.
*
* TODO:
- * - Fix driver to work on more than just Virtex5. Right now the driver
- * assumes that the locallink DMA registers are accessed via DCR
- * instructions.
* - Factor out locallink DMA code into separate driver
* - Fix multicast assignment.
* - Fix support for hardware checksumming.
@@ -116,17 +113,86 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
}
+/**
+ * temac_dma_in32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
- return dcr_read(lp->sdma_dcrs, reg);
+ return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
}
+/**
+ * temac_dma_out32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
+ out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+}
+
+/* DMA register access functions can be DCR based or memory mapped.
+ * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
+ * memory mapped.
+ */
+#ifdef CONFIG_PPC_DCR
+
+/**
+ * temac_dma_dcr_in32 - DCR based DMA read
+ */
+static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
+{
+ return dcr_read(lp->sdma_dcrs, reg);
+}
+
+/**
+ * temac_dma_dcr_out32 - DCR based DMA write
+ */
+static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
+{
dcr_write(lp->sdma_dcrs, reg, value);
}
/**
+ * temac_dcr_setup - If the DMA is DCR based, then setup the address and
+ * I/O functions
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+ struct device_node *np)
+{
+ unsigned int dcrs;
+
+ /* setup the dcr address mapping if it's in the device tree */
+
+ dcrs = dcr_resource_start(np, 0);
+ if (dcrs != 0) {
+ lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
+ lp->dma_in = temac_dma_dcr_in;
+ lp->dma_out = temac_dma_dcr_out;
+ dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
+ return 0;
+ }
+ /* no DCR in the device tree, indicate a failure */
+ return -1;
+}
+
+#else
+
+/*
+ * temac_dcr_setup - This is a stub for when DCR is not supported,
+ * such as with MicroBlaze
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+ struct device_node *np)
+{
+ return -1;
+}
+
+#endif
+
+/**
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
@@ -156,14 +222,14 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
- skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
- + XTE_ALIGN, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
if (skb == 0) {
dev_err(&ndev->dev, "alloc_skb error %d\n", i);
return -1;
}
lp->rx_skb[i] = skb;
- skb_reserve(skb, BUFFER_ALIGN(skb->data));
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
@@ -173,23 +239,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
}
- temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 |
+ lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */
/* 0x00100483 */
- temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 |
+ lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN |
CHNL_CTRL_IRQ_IOE);
/* 0xff010283 */
- temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p);
- temac_dma_out32(lp, RX_TAILDESC_PTR,
+ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
- temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
}
@@ -251,20 +317,20 @@ static void temac_set_multicast_list(struct net_device *ndev)
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
i = 0;
- netdev_for_each_mc_addr(mclist, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
- multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
- (mclist->dmi_addr[2] << 16) |
- (mclist->dmi_addr[1] << 8) |
- (mclist->dmi_addr[0]));
+ multi_addr_msw = ((ha->addr[3] << 24) |
+ (ha->addr[2] << 16) |
+ (ha->addr[1] << 8) |
+ (ha->addr[0]));
temac_indirect_out32(lp, XTE_MAW0_OFFSET,
multi_addr_msw);
- multi_addr_lsw = ((mclist->dmi_addr[5] << 8) |
- (mclist->dmi_addr[4]) | (i << 16));
+ multi_addr_lsw = ((ha->addr[5] << 8) |
+ (ha->addr[4]) | (i << 16));
temac_indirect_out32(lp, XTE_MAW1_OFFSET,
multi_addr_lsw);
i++;
@@ -427,9 +493,9 @@ static void temac_device_reset(struct net_device *ndev)
temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
/* Reset Local Link (DMA) */
- temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
timeout = 1000;
- while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
+ while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
@@ -437,7 +503,7 @@ static void temac_device_reset(struct net_device *ndev)
break;
}
}
- temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
temac_dma_bd_init(ndev);
@@ -461,7 +527,7 @@ static void temac_device_reset(struct net_device *ndev)
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
- ndev->trans_start = 0;
+ ndev->trans_start = jiffies; /* prevent tx timeout */
}
void temac_adjust_link(struct net_device *ndev)
@@ -598,7 +664,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
/* Kick off the transfer */
- temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
}
@@ -612,7 +678,6 @@ static void ll_temac_recv(struct net_device *ndev)
struct cdmac_bd *cur_p;
dma_addr_t tail_p;
int length;
- unsigned long skb_vaddr;
unsigned long flags;
spin_lock_irqsave(&lp->rx_lock, flags);
@@ -626,8 +691,7 @@ static void ll_temac_recv(struct net_device *ndev)
skb = lp->rx_skb[lp->rx_bd_ci];
length = cur_p->app4 & 0x3FFF;
- skb_vaddr = virt_to_bus(skb->data);
- dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
+ dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
DMA_FROM_DEVICE);
skb_put(skb, length);
@@ -640,16 +704,15 @@ static void ll_temac_recv(struct net_device *ndev)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
- new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
- GFP_ATOMIC);
+ new_skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
if (new_skb == 0) {
dev_err(&ndev->dev, "no memory for new sk_buff\n");
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
- skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
-
cur_p->app0 = STS_CTRL_APP0_IRQONEND;
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
@@ -664,7 +727,7 @@ static void ll_temac_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = cur_p->app0;
}
- temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
@@ -675,8 +738,8 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
struct temac_local *lp = netdev_priv(ndev);
unsigned int status;
- status = temac_dma_in32(lp, TX_IRQ_REG);
- temac_dma_out32(lp, TX_IRQ_REG, status);
+ status = lp->dma_in(lp, TX_IRQ_REG);
+ lp->dma_out(lp, TX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
temac_start_xmit_done(lp->ndev);
@@ -693,8 +756,8 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
unsigned int status;
/* Read and clear the status registers */
- status = temac_dma_in32(lp, RX_IRQ_REG);
- temac_dma_out32(lp, RX_IRQ_REG, status);
+ status = lp->dma_in(lp, RX_IRQ_REG);
+ lp->dma_out(lp, RX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
ll_temac_recv(lp->ndev);
@@ -795,7 +858,7 @@ static ssize_t temac_show_llink_regs(struct device *dev,
int i, len = 0;
for (i = 0; i < 0x11; i++)
- len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i),
+ len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
(i % 8) == 7 ? "\n" : " ");
len += sprintf(buf + len, "\n");
@@ -821,7 +884,6 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
struct net_device *ndev;
const void *addr;
int size, rc = 0;
- unsigned int dcrs;
/* Init network device structure */
ndev = alloc_etherdev(sizeof(*lp));
@@ -871,13 +933,20 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
goto nodev;
}
- dcrs = dcr_resource_start(np, 0);
- if (dcrs == 0) {
- dev_err(&op->dev, "could not get DMA register address\n");
- goto nodev;
+ /* Setup the DMA register accesses, could be DCR or memory mapped */
+ if (temac_dcr_setup(lp, op, np)) {
+
+ /* no DCR in the device tree, try non-DCR */
+ lp->sdma_regs = of_iomap(np, 0);
+ if (lp->sdma_regs) {
+ lp->dma_in = temac_dma_in32;
+ lp->dma_out = temac_dma_out32;
+ dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
+ } else {
+ dev_err(&op->dev, "unable to map DMA registers\n");
+ goto nodev;
+ }
}
- lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
- dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
lp->rx_irq = irq_of_parse_and_map(np, 0);
lp->tx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3e3cc04defd0..3df046a58b1d 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -875,8 +875,6 @@ static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev)
length = ETH_ZLEN;
}
- dev->trans_start = jiffies;
-
tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
if (tx_cmd == NULL) {
printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
@@ -1256,7 +1254,7 @@ static void set_multicast_list(struct net_device *dev) {
dev->name, netdev_mc_count(dev));
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *cp;
cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
netdev_mc_count(dev) * 6, GFP_ATOMIC);
@@ -1267,8 +1265,8 @@ static void set_multicast_list(struct net_device *dev) {
cmd->command = CmdMulticastList;
*((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy(cp, dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(cp, ha->addr, 6);
cp += 6;
}
if (i596_debug & LOG_SRCDST)
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index c0876e915eed..69fa4ef64dd2 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -408,7 +408,6 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
skb->len+1);
local_irq_restore(flags);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index c8a18a6203c8..40797fbdca9f 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -666,8 +666,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -793,6 +791,7 @@ static void macb_init_hw(struct macb *bp)
config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
if (bp->dev->flags & IFF_PROMISC)
config |= MACB_BIT(CAF); /* Copy All Frames */
if (!(bp->dev->flags & IFF_BROADCAST))
@@ -882,15 +881,15 @@ static int hash_get_index(__u8 *addr)
*/
static void macb_sethashtable(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 962c41d0c8df..b6855a6476f8 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -599,7 +599,7 @@ static void mace_set_multicast(struct net_device *dev)
mp->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
@@ -607,8 +607,8 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
i = crc >> 26; /* bit number in multicast_filter */
multicast_filter[i >> 3] |= 1 << (i & 7);
}
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 52e9a51c4c4f..c685a4656878 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -488,7 +488,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -509,7 +508,7 @@ static void mace_set_multicast(struct net_device *dev)
mb->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++) {
@@ -518,8 +517,8 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
/* bit number in multicast_filter */
i = crc >> 26;
multicast_filter[i >> 3] |= 1 << (i & 7);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 40faa368b07a..9a939d828b47 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -282,7 +282,7 @@ static int macvlan_open(struct net_device *dev)
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
- err = dev_unicast_add(lowerdev, dev->dev_addr);
+ err = dev_uc_add(lowerdev, dev->dev_addr);
if (err < 0)
goto out;
if (dev->flags & IFF_ALLMULTI) {
@@ -294,7 +294,7 @@ static int macvlan_open(struct net_device *dev)
return 0;
del_unicast:
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
out:
return err;
}
@@ -308,7 +308,7 @@ static int macvlan_stop(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_del(vlan);
return 0;
@@ -332,11 +332,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (macvlan_addr_busy(vlan->port, addr->sa_data))
return -EBUSY;
- err = dev_unicast_add(lowerdev, addr->sa_data);
+ err = dev_uc_add(lowerdev, addr->sa_data);
if (err)
return err;
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_change_addr(vlan, addr->sa_data);
}
@@ -748,6 +748,9 @@ static int macvlan_device_event(struct notifier_block *unused,
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index abba3cc81f12..a8a94e2f6ddc 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -37,6 +37,8 @@
struct macvtap_queue {
struct sock sk;
struct socket sock;
+ struct socket_wq wq;
+ int vnet_hdr_sz;
struct macvlan_dev *vlan;
struct file *file;
unsigned int flags;
@@ -181,7 +183,7 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
return -ENOLINK;
skb_queue_tail(&q->sk.sk_receive_queue, skb);
- wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND);
+ wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
return 0;
}
@@ -242,12 +244,15 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
static void macvtap_sock_write_space(struct sock *sk)
{
+ wait_queue_head_t *wqueue;
+
if (!sock_writeable(sk) ||
!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
static int macvtap_open(struct inode *inode, struct file *file)
@@ -272,7 +277,8 @@ static int macvtap_open(struct inode *inode, struct file *file)
if (!q)
goto out;
- init_waitqueue_head(&q->sock.wait);
+ q->sock.wq = &q->wq;
+ init_waitqueue_head(&q->wq.wait);
q->sock.type = SOCK_RAW;
q->sock.state = SS_CONNECTED;
q->sock.file = file;
@@ -280,6 +286,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
sock_init_data(&q->sock, &q->sk);
q->sk.sk_write_space = macvtap_sock_write_space;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+ q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = macvtap_set_queue(dev, file, q);
if (err)
@@ -308,7 +315,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
goto out;
mask = 0;
- poll_wait(file, &q->sock.wait, wait);
+ poll_wait(file, &q->wq.wait, wait);
if (!skb_queue_empty(&q->sk.sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -440,14 +447,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
int vnet_hdr_len = 0;
if (q->flags & IFF_VNET_HDR) {
- vnet_hdr_len = sizeof(vnet_hdr);
+ vnet_hdr_len = q->vnet_hdr_sz;
err = -EINVAL;
if ((len -= vnet_hdr_len) < 0)
goto err;
err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
- vnet_hdr_len);
+ sizeof(vnet_hdr));
if (err < 0)
goto err;
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -529,7 +536,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
- vnet_hdr_len = sizeof (vnet_hdr);
+ vnet_hdr_len = q->vnet_hdr_sz;
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
@@ -537,7 +544,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (ret)
return ret;
- if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len))
+ if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
@@ -562,7 +569,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(q->sk.sk_sleep, &wait);
+ add_wait_queue(sk_sleep(&q->sk), &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -587,7 +594,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
}
current->state = TASK_RUNNING;
- remove_wait_queue(q->sk.sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(&q->sk), &wait);
return ret;
}
@@ -622,6 +629,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
struct ifreq __user *ifr = argp;
unsigned int __user *up = argp;
unsigned int u;
+ int __user *sp = argp;
+ int s;
int ret;
switch (cmd) {
@@ -667,6 +676,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
q->sk.sk_sndbuf = u;
return 0;
+ case TUNGETVNETHDRSZ:
+ s = q->vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ q->vnet_hdr_sz = s;
+ return 0;
+
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 9f72cb45f4af..16a35944c2da 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -746,7 +746,7 @@ static void meth_tx_timeout(struct net_device *dev)
/* Enable interrupt */
spin_unlock_irqrestore(&priv->meth_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
return;
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 73c3d20c6453..96180c0ec206 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -161,39 +161,29 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct dev_mc_list *plist = priv->mc_list;
- struct dev_mc_list *next;
- while (plist) {
- next = plist->next;
- kfree(plist);
- plist = next;
- }
- priv->mc_list = NULL;
+ kfree(priv->mc_addrs);
+ priv->mc_addrs_cnt = 0;
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct dev_mc_list *mclist;
- struct dev_mc_list *tmp;
- struct dev_mc_list *plist = NULL;
-
- for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
- tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
- if (!tmp) {
- en_err(priv, "failed to allocate multicast list\n");
- mlx4_en_clear_list(dev);
- return;
- }
- memcpy(tmp, mclist, sizeof(struct dev_mc_list));
- tmp->next = NULL;
- if (plist)
- plist->next = tmp;
- else
- priv->mc_list = tmp;
- plist = tmp;
+ struct netdev_hw_addr *ha;
+ char *mc_addrs;
+ int mc_addrs_cnt = netdev_mc_count(dev);
+ int i;
+
+ mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
+ if (!mc_addrs) {
+ en_err(priv, "failed to allocate multicast list\n");
+ return;
}
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ priv->mc_addrs = mc_addrs;
+ priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -213,7 +203,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- struct dev_mc_list *mclist;
u64 mcast_addr = 0;
int err;
@@ -289,6 +278,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (err)
en_err(priv, "Failed disabling multicast filter\n");
} else {
+ int i;
+
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
@@ -303,8 +294,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
netif_tx_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_tx_unlock_bh(dev);
- for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
- mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ mcast_addr =
+ mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -512,7 +504,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
if (err)
- en_dbg(HW, priv, "Could not update stats \n");
+ en_dbg(HW, priv, "Could not update stats\n");
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
@@ -985,7 +977,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->flags = prof->flags;
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
- priv->mc_list = NULL;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 82c3ebc584e3..b55e46c8b682 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -492,7 +492,8 @@ struct mlx4_en_priv {
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
- struct dev_mc_list *mc_list;
+ char *mc_addrs;
+ int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
};
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8613a52ddf17..1f724e53c728 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -882,7 +882,6 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_bytes += skb->len;
txq->tx_packets++;
- dev->trans_start = jiffies;
entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
@@ -1770,7 +1769,7 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 *mc_spec;
u32 *mc_other;
- struct dev_addr_list *addr;
+ struct netdev_hw_addr *ha;
int i;
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
@@ -1795,8 +1794,8 @@ oom:
memset(mc_spec, 0, 0x100);
memset(mc_other, 0, 0x100);
- netdev_for_each_mc_addr(addr, dev) {
- u8 *a = addr->da_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *a = ha->addr;
u32 *table;
int entry;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ecde0876a785..5ae8a93fe4f5 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -110,15 +110,15 @@ MODULE_LICENSE("Dual BSD/GPL");
struct myri10ge_rx_buffer_state {
struct page *page;
int page_offset;
- DECLARE_PCI_UNMAP_ADDR(bus)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_tx_buffer_state {
struct sk_buff *skb;
int last;
- DECLARE_PCI_UNMAP_ADDR(bus)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_cmd {
@@ -239,6 +239,7 @@ struct myri10ge_priv {
int watchdog_resets;
int watchdog_pause;
int pause;
+ bool fw_name_allocated;
char *fw_name;
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
char *product_code_string;
@@ -271,6 +272,7 @@ MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
+/* Careful: must be accessed under kparam_block_sysfs_write */
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
@@ -376,6 +378,14 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
static struct net_device_stats *myri10ge_get_stats(struct net_device *dev);
+static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
+{
+ if (mgp->fw_name_allocated)
+ kfree(mgp->fw_name);
+ mgp->fw_name = name;
+ mgp->fw_name_allocated = allocated;
+}
+
static int
myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
struct myri10ge_cmd *data, int atomic)
@@ -747,7 +757,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
dev_warn(&mgp->pdev->dev, "via hotplug\n");
}
- mgp->fw_name = "adopted";
+ set_fw_name(mgp, "adopted", false);
mgp->tx_boundary = 2048;
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
@@ -1234,7 +1244,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
rx->info[idx].page_offset = rx->page_offset;
/* note that this is the address of the start of the
* page */
- pci_unmap_addr_set(&rx->info[idx], bus, rx->bus);
+ dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
rx->shadow[idx].addr_low =
htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
rx->shadow[idx].addr_high =
@@ -1266,7 +1276,7 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
/* unmap the recvd page if we're the only or last user of it */
if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
(info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
- pci_unmap_page(pdev, (pci_unmap_addr(info, bus)
+ pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
& ~(MYRI10GE_ALLOC_SIZE - 1)),
MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
}
@@ -1373,21 +1383,21 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
tx->info[idx].last = 0;
}
tx->done++;
- len = pci_unmap_len(&tx->info[idx], len);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_bytes += skb->len;
ss->stats.tx_packets++;
dev_kfree_skb_irq(skb);
if (len)
pci_unmap_single(pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
@@ -2094,20 +2104,20 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
/* Mark as free */
tx->info[idx].skb = NULL;
tx->done++;
- len = pci_unmap_len(&tx->info[idx], len);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_dropped++;
dev_kfree_skb_any(skb);
if (len)
pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
@@ -2757,12 +2767,12 @@ again:
}
/* map the skb for DMA */
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
idx = tx->req & tx->mask;
tx->info[idx].skb = skb;
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&tx->info[idx], bus, bus);
- pci_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
frag_cnt = skb_shinfo(skb)->nr_frags;
frag_idx = 0;
@@ -2865,8 +2875,8 @@ again:
len = frag->size;
bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&tx->info[idx], bus, bus);
- pci_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
}
(req - rdma_count)->rdma_count = rdma_count;
@@ -2903,19 +2913,19 @@ abort_linearize:
idx = tx->req & tx->mask;
tx->info[idx].skb = NULL;
do {
- len = pci_unmap_len(&tx->info[idx], len);
+ len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
else
pci_unmap_page(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
@@ -3002,7 +3012,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
__be32 data[2] = { 0, 0 };
int err;
@@ -3039,8 +3049,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
}
/* Walk the multicast list, and add each address */
- netdev_for_each_mc_addr(mc_list, dev) {
- memcpy(data, &mc_list->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(data, &ha->addr, 6);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3048,7 +3058,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
- err, mc_list->dmi_addr);
+ err, ha->addr);
goto abort;
}
}
@@ -3229,7 +3239,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
* load the optimized firmware (which assumes aligned PCIe
* completions) in order to see if it works on this host.
*/
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
goto abort;
@@ -3257,7 +3267,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
abort:
/* fall back to using the unaligned firmware */
mgp->tx_boundary = 2048;
- mgp->fw_name = myri10ge_fw_unaligned;
+ set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
@@ -3280,7 +3290,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
link_width);
mgp->tx_boundary = 4096;
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
myri10ge_firmware_probe(mgp);
}
@@ -3289,22 +3299,29 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (forced)\n");
mgp->tx_boundary = 4096;
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
dev_info(&mgp->pdev->dev,
"Assuming unaligned completions (forced)\n");
mgp->tx_boundary = 2048;
- mgp->fw_name = myri10ge_fw_unaligned;
+ set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
}
+
+ kparam_block_sysfs_write(myri10ge_fw_name);
if (myri10ge_fw_name != NULL) {
- overridden = 1;
- mgp->fw_name = myri10ge_fw_name;
+ char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
+ if (fw_name) {
+ overridden = 1;
+ set_fw_name(mgp, fw_name, true);
+ }
}
+ kparam_unblock_sysfs_write(myri10ge_fw_name);
+
if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
myri10ge_fw_names[mgp->board_number] != NULL &&
strlen(myri10ge_fw_names[mgp->board_number])) {
- mgp->fw_name = myri10ge_fw_names[mgp->board_number];
+ set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
overridden = 1;
}
if (overridden)
@@ -3656,6 +3673,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
struct myri10ge_cmd cmd;
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
+ bool old_allocated;
int i, status, ncpus, msix_cap;
mgp->num_slices = 1;
@@ -3668,17 +3686,23 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
/* try to load the slice aware rss firmware */
old_fw = mgp->fw_name;
+ old_allocated = mgp->fw_name_allocated;
+ /* don't free old_fw if we override it. */
+ mgp->fw_name_allocated = false;
+
if (myri10ge_fw_name != NULL) {
dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
myri10ge_fw_name);
- mgp->fw_name = myri10ge_fw_name;
+ set_fw_name(mgp, myri10ge_fw_name, false);
} else if (old_fw == myri10ge_fw_aligned)
- mgp->fw_name = myri10ge_fw_rss_aligned;
+ set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
else
- mgp->fw_name = myri10ge_fw_rss_unaligned;
+ set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
status = myri10ge_load_firmware(mgp, 0);
if (status != 0) {
dev_info(&pdev->dev, "Rss firmware not found\n");
+ if (old_allocated)
+ kfree(old_fw);
return;
}
@@ -3743,6 +3767,8 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->num_slices);
if (status == 0) {
pci_disable_msix(pdev);
+ if (old_allocated)
+ kfree(old_fw);
return;
}
if (status > 0)
@@ -3759,7 +3785,7 @@ disable_msix:
abort_with_fw:
mgp->num_slices = 1;
- mgp->fw_name = old_fw;
+ set_fw_name(mgp, old_fw, old_allocated);
myri10ge_load_firmware(mgp, 0);
}
@@ -3989,6 +4015,7 @@ abort_with_enabled:
pci_disable_device(pdev);
abort_with_netdev:
+ set_fw_name(mgp, NULL, false);
free_netdev(netdev);
return status;
}
@@ -4033,6 +4060,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
+ set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index e52038783245..2a17b503feaa 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1905,7 +1905,7 @@ static void ns_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock);
enable_irq(dev->irq);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -2119,8 +2119,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies;
-
if (netif_msg_tx_queued(np)) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
dev->name, np->cur_tx, entry);
@@ -2493,12 +2491,12 @@ static void __set_rx_mode(struct net_device *dev)
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
}
rx_mode = RxFilterEnable | AcceptBroadcast
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index a361dea35574..ca142c47b2e4 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -665,7 +665,8 @@ static int netconsole_netdev_event(struct notifier_block *this,
struct netconsole_target *nt;
struct net_device *dev = ptr;
- if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER))
+ if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
+ event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
goto done;
spin_lock_irqsave(&target_list_lock, flags);
@@ -677,19 +678,21 @@ static int netconsole_netdev_event(struct notifier_block *this,
strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
break;
case NETDEV_UNREGISTER:
- if (!nt->enabled)
- break;
netpoll_cleanup(&nt->np);
+ /* Fall through */
+ case NETDEV_GOING_DOWN:
+ case NETDEV_BONDING_DESLAVE:
nt->enabled = 0;
- printk(KERN_INFO "netconsole: network logging stopped"
- ", interface %s unregistered\n",
- dev->name);
break;
}
}
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
+ if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)
+ printk(KERN_INFO "netconsole: network logging stopped, "
+ "interface %s %s\n", dev->name,
+ event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
done:
return NOTIFY_DONE;
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 64770298c4f7..2e4b42175f3f 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -126,7 +126,6 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
FIFO_PTR_FRAMENO(1) |
FIFO_PTR_FRAMELEN(len));
- ndev->trans_start = jiffies;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 0f703838e21a..174ac8ef82fa 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -420,7 +420,6 @@ struct status_desc {
} __attribute__ ((aligned(16)));
/* UNIFIED ROMIMAGE *************************/
-#define NX_UNI_FW_MIN_SIZE 0xc8000
#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
#define NX_UNI_DIR_SECT_BOOTLD 0x6
#define NX_UNI_DIR_SECT_FW 0x7
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index f8499e56cbee..aecba787f7c8 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -703,6 +703,11 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
}
}
+static u32 netxen_nic_get_tx_csum(struct net_device *dev)
+{
+ return dev->features & NETIF_F_IP_CSUM;
+}
+
static u32 netxen_nic_get_rx_csum(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
@@ -909,6 +914,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
.set_ringparam = netxen_nic_set_ringparam,
.get_pauseparam = netxen_nic_get_pauseparam,
.set_pauseparam = netxen_nic_set_pauseparam,
+ .get_tx_csum = netxen_nic_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.get_tso = netxen_nic_get_tso,
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index b1cf46a0c48c..5e5fe2fd6397 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -538,7 +538,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 null_addr[6];
int i;
@@ -572,8 +572,8 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
netxen_nic_enable_mcast_filter(adapter);
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
/* Clear out remaining addresses */
while (i < adapter->max_mc_count)
@@ -681,7 +681,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
void netxen_p3_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
LIST_HEAD(del_list);
@@ -708,8 +708,8 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(mc_ptr, netdev)
- nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
+ netdev_for_each_mc_addr(ha, netdev)
+ nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
}
send_fw_cmd:
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02876f59cbb2..388feaf60ee7 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -614,22 +614,123 @@ static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
return NULL;
}
+#define QLCNIC_FILEHEADER_SIZE (14 * 4)
+
static int
-nx_set_product_offs(struct netxen_adapter *adapter)
-{
- struct uni_table_desc *ptab_descr;
+netxen_nic_validate_header(struct netxen_adapter *adapter)
+ {
const u8 *unirom = adapter->fw->data;
- uint32_t i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 fw_file_size = adapter->fw->size;
+ u32 tab_size;
__le32 entries;
+ __le32 entry_size;
+
+ if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_bootld(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_BOOTLD_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+netxen_nic_validate_fw(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_FIRMWARE_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int
+netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
1 : netxen_p3_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ u32 tab_size;
+ u32 i;
ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
if (ptab_descr == NULL)
- return -1;
+ return -EINVAL;
entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
nomn:
for (i = 0; i < entries; i++) {
@@ -658,9 +759,38 @@ nomn:
goto nomn;
}
- return -1;
+ return -EINVAL;
}
+static int
+netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
+{
+ if (netxen_nic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
u32 section, u32 idx_offset)
@@ -890,6 +1020,16 @@ netxen_load_firmware(struct netxen_adapter *adapter)
flashaddr += 8;
}
+
+ size = (__force u32)nx_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
} else {
u64 data;
u32 hi, lo;
@@ -934,27 +1074,23 @@ static int
netxen_validate_firmware(struct netxen_adapter *adapter)
{
__le32 val;
- u32 ver, min_ver, bios, min_size;
+ u32 ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
if (fw_type == NX_UNIFIED_ROMIMAGE) {
- if (nx_set_product_offs(adapter))
+ if (netxen_nic_validate_unified_romimage(adapter))
return -EINVAL;
-
- min_size = NX_UNI_FW_MIN_SIZE;
} else {
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
if ((__force u32)val != NETXEN_BDINFO_MAGIC)
return -EINVAL;
- min_size = NX_FW_MIN_SIZE;
+ if (fw->size < NX_FW_MIN_SIZE)
+ return -EINVAL;
}
- if (fw->size < min_size)
- return -EINVAL;
-
val = nx_get_fw_version(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ce838f7c8b0f..b665b420a4f2 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -782,15 +782,22 @@ netxen_check_options(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
- } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
- switch (adapter->ahw.board_type) {
- case NETXEN_BRDTYPE_P2_SB31_10G:
- case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
- adapter->msix_supported = !!use_msi_x;
- adapter->rss_supported = !!use_msi_x;
- break;
- default:
- break;
+ } else {
+ u32 flashed_ver = 0;
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ break;
+ default:
+ break;
+ }
}
}
@@ -2304,6 +2311,7 @@ netxen_fwinit_work(struct work_struct *work)
}
break;
+ case NX_DEV_NEED_RESET:
case NX_DEV_INITALIZING:
if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
netxen_schedule_work(adapter,
@@ -2347,6 +2355,9 @@ netxen_detach_work(struct work_struct *work)
ref_cnt = nx_decr_dev_ref_cnt(adapter);
+ if (ref_cnt == -EIO)
+ goto err_ret;
+
delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
adapter->fw_wait_cnt = 0;
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 3892330f244a..f80b50159114 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -444,7 +444,7 @@ static void ni5010_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
/* FIXME: Give it a real kick here */
chipset_init(dev, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -460,7 +460,6 @@ static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index f7a8f707361e..9bddb5fa7a96 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -595,7 +595,7 @@ static int init586(struct net_device *dev)
struct iasetup_cmd_struct __iomem *ias_cmd;
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
@@ -724,8 +724,8 @@ static int init586(struct net_device *dev)
writew(num_addrs * 6, &mc_cmd->mc_cnt);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
writew(make16(mc_cmd), &p->scb->cbl_offset);
writeb(CUC_START, &p->scb->cmd_cuc);
@@ -1147,7 +1147,7 @@ static void ni52_timeout(struct net_device *dev)
writeb(CUC_START, &p->scb->cmd_cuc);
ni_attn586();
wait_for_scb_cmd(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
@@ -1165,7 +1165,7 @@ static void ni52_timeout(struct net_device *dev)
ni52_close(dev);
ni52_open(dev);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
@@ -1218,7 +1218,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writeb(CUC_START, &p->scb->cmd_cuc);
}
ni_attn586();
- dev->trans_start = jiffies;
if (!i)
dev_kfree_skb(skb);
wait_for_scb_cmd(dev);
@@ -1240,7 +1239,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
@@ -1256,7 +1254,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[p->xmit_count]),
&p->nop_cmds[p->xmit_count]->cmd_link);
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
unsigned long flags;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 9225c76cac40..da228a0dd6cd 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -784,7 +784,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
if(!p->lock)
if (p->tmdnum || !p->xmit_queued)
netif_wake_queue(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
else
writedatareg(CSR0_STRT | csr0);
@@ -1150,7 +1150,7 @@ static void ni65_timeout(struct net_device *dev)
printk("%02x ",p->tmdhead[i].u.s.status);
printk("\n");
ni65_lance_reinit(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1213,7 +1213,6 @@ static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
netif_wake_queue(dev);
p->lock = 0;
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&p->ring_lock, flags);
}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d5cd16bfc907..30abb4e436f1 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -36,8 +36,8 @@
#include "niu.h"
#define DRV_MODULE_NAME "niu"
-#define DRV_MODULE_VERSION "1.0"
-#define DRV_MODULE_RELDATE "Nov 14, 2008"
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "Apr 22, 2010"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -3444,6 +3444,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
struct rx_ring_info *rp)
{
unsigned int index = rp->rcr_index;
+ struct rx_pkt_hdr1 *rh;
struct sk_buff *skb;
int len, num_rcr;
@@ -3477,9 +3478,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
if (num_rcr == 1) {
int ptype;
- off += 2;
- append_size -= 2;
-
ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
if ((ptype == RCR_PKT_TYPE_TCP ||
ptype == RCR_PKT_TYPE_UDP) &&
@@ -3488,8 +3486,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
- }
- if (!(val & RCR_ENTRY_MULTI))
+ } else if (!(val & RCR_ENTRY_MULTI))
append_size = len - skb->len;
niu_rx_skb_append(skb, page, off, append_size);
@@ -3510,8 +3507,17 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
}
rp->rcr_index = index;
- skb_reserve(skb, NET_IP_ALIGN);
- __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
+ len += sizeof(*rh);
+ len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
+ __pskb_pull_tail(skb, len);
+
+ rh = (struct rx_pkt_hdr1 *) skb->data;
+ if (np->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0);
+ skb_pull(skb, sizeof(*rh));
rp->rx_packets++;
rp->rx_bytes += skb->len;
@@ -4946,7 +4952,9 @@ static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
RX_DMA_CTL_STAT_RCRTO |
RX_DMA_CTL_STAT_RBR_EMPTY));
nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
- nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
+ nw64(RXDMA_CFIG2(channel),
+ ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
+ RXDMA_CFIG2_FULL_HDR));
nw64(RBR_CFIG_A(channel),
((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
(rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
@@ -6314,7 +6322,6 @@ static void niu_set_rx_mode(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
int i, alt_cnt, err;
- struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
unsigned long flags;
u16 hash[16] = { 0, };
@@ -6366,8 +6373,8 @@ static void niu_set_rx_mode(struct net_device *dev)
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
} else if (!netdev_mc_empty(dev)) {
- netdev_for_each_mc_addr(addr, dev) {
- u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
@@ -7911,6 +7918,18 @@ static int niu_phys_id(struct net_device *dev, u32 data)
return 0;
}
+static int niu_set_flags(struct net_device *dev, u32 data)
+{
+ if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE))
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+ return 0;
+}
+
static const struct ethtool_ops niu_ethtool_ops = {
.get_drvinfo = niu_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -7927,6 +7946,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.phys_id = niu_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .set_flags = niu_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -9755,6 +9776,12 @@ static void __devinit niu_device_announce(struct niu *np)
}
}
+static void __devinit niu_set_basic_features(struct net_device *dev)
+{
+ dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_GRO | NETIF_F_RXHASH);
+}
+
static int __devinit niu_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -9839,7 +9866,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
}
}
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
+ niu_set_basic_features(dev);
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
@@ -10081,7 +10108,7 @@ static int __devinit niu_of_probe(struct of_device *op,
goto err_out_free_dev;
}
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
+ niu_set_basic_features(dev);
np->regs = of_ioremap(&op->resource[1], 0,
resource_size(&op->resource[1]),
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 3bd0b5933d59..d6715465f35d 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2706,7 +2706,7 @@ struct rx_pkt_hdr0 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 inputport:2,
maccheck:1,
- class:4;
+ class:5;
u8 vlan:1,
llcsnap:1,
noport:1,
@@ -2715,7 +2715,7 @@ struct rx_pkt_hdr0 {
tres:2,
tzfvld:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
- u8 class:4,
+ u8 class:5,
maccheck:1,
inputport:2;
u8 tzfvld:1,
@@ -2775,6 +2775,9 @@ struct rx_pkt_hdr1 {
/* Bits 7:0 of hash value, H1. */
u8 hashval1_2;
+ u8 hwrsvd5;
+ u8 hwrsvd6;
+
u8 usrdata_0; /* Bits 39:32 of user data. */
u8 usrdata_1; /* Bits 31:24 of user data. */
u8 usrdata_2; /* Bits 23:16 of user data. */
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 8aadc8e2ddd7..43bf26fb5133 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -189,12 +189,19 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
while (mix_orcnt.s.orcnt) {
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+
+ if (mix_orcnt.s.orcnt == 0) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ break;
+ }
+
dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
DMA_BIDIRECTIONAL);
- spin_lock_irqsave(&p->tx_list.lock, flags);
-
re.d64 = p->tx_ring[p->tx_next_clean];
p->tx_next_clean =
(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
@@ -317,7 +324,6 @@ good:
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
- netdev->last_rx = jiffies;
netif_receive_skb(skb);
rc = 0;
} else if (re.s.code == RING_ENTRY_CODE_MORE) {
@@ -374,7 +380,6 @@ done:
mix_ircnt.s.ircnt = 1;
cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
return rc;
-
}
static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
@@ -384,7 +389,6 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
union cvmx_mixx_ircnt mix_ircnt;
int rc;
-
mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
while (work_done < budget && mix_ircnt.s.ircnt) {
@@ -475,13 +479,12 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
struct octeon_mgmt_cam_state cam_state;
- struct dev_addr_list *list;
- struct list_head *pos;
+ struct netdev_hw_addr *ha;
int available_cam_entries;
memset(&cam_state, 0, sizeof(cam_state));
- if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
+ if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
cam_mode = 0;
available_cam_entries = 8;
} else {
@@ -489,13 +492,13 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
* One CAM entry for the primary address, leaves seven
* for the secondary addresses.
*/
- available_cam_entries = 7 - netdev->dev_addrs.count;
+ available_cam_entries = 7 - netdev->uc.count;
}
if (netdev->flags & IFF_MULTICAST) {
if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
netdev_mc_count(netdev) > available_cam_entries)
- multicast_mode = 2; /* 1 - Accept all multicast. */
+ multicast_mode = 2; /* 2 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
}
@@ -503,19 +506,14 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
if (cam_mode == 1) {
/* Add primary address. */
octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
- list_for_each(pos, &netdev->dev_addrs.list) {
- struct netdev_hw_addr *hw_addr;
- hw_addr = list_entry(pos, struct netdev_hw_addr, list);
- octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
- list = list->next;
- }
+ netdev_for_each_uc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
}
if (multicast_mode == 0) {
- netdev_for_each_mc_addr(list, netdev)
- octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
+ netdev_for_each_mc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
}
-
spin_lock_irqsave(&p->lock, flags);
/* Disable packet I/O. */
@@ -524,7 +522,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
agl_gmx_prtx.s.en = 0;
cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
-
adr_ctl.u64 = 0;
adr_ctl.s.cam_mode = cam_mode;
adr_ctl.s.mcst = multicast_mode;
@@ -597,8 +594,7 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
/* Clear any pending interrupts */
- cvmx_write_csr(CVMX_MIXX_ISR(port),
- cvmx_read_csr(CVMX_MIXX_ISR(port)));
+ cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
cvmx_read_csr(CVMX_MIXX_ISR(port));
if (mixx_isr.s.irthresh) {
@@ -832,9 +828,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_irhwm.s.irhwm = 0;
cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
- /* Interrupt when we have 5 or more packets to clean. */
+ /* Interrupt when we have 1 or more packets to clean. */
mix_orhwm.u64 = 0;
- mix_orhwm.s.orhwm = 5;
+ mix_orhwm.s.orhwm = 1;
cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
/* Enable receive and transmit interrupts */
@@ -928,7 +924,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
octeon_mgmt_reset_hw(p);
-
free_irq(p->irq, netdev);
/* dma_unmap is a nop on Octeon, so just free everything. */
@@ -945,7 +940,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
DMA_BIDIRECTIONAL);
kfree(p->tx_ring);
-
return 0;
}
@@ -955,6 +949,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
int port = p->port;
union mgmt_port_ring_entry re;
unsigned long flags;
+ int rv = NETDEV_TX_BUSY;
re.d64 = 0;
re.s.len = skb->len;
@@ -964,15 +959,18 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
spin_lock_irqsave(&p->tx_list.lock, flags);
+ if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ netif_stop_queue(netdev);
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+ }
+
if (unlikely(p->tx_current_fill >=
ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
spin_unlock_irqrestore(&p->tx_list.lock, flags);
-
dma_unmap_single(p->dev, re.s.addr, re.s.len,
DMA_TO_DEVICE);
-
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
+ goto out;
}
__skb_queue_tail(&p->tx_list, skb);
@@ -994,10 +992,10 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
- netdev->trans_start = jiffies;
- octeon_mgmt_clean_tx_buffers(p);
+ rv = NETDEV_TX_OK;
+out:
octeon_mgmt_update_tx_stats(netdev);
- return NETDEV_TX_OK;
+ return rv;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1107,7 +1105,6 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
netdev->netdev_ops = &octeon_mgmt_ops;
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
-
/* The mgmt ports get the first N MACs. */
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 36785853a149..56f3fc45dbaa 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1354,7 +1354,6 @@ static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
- dev->trans_start = jiffies;
atomic_inc(&tp->cur_tx);
if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
netif_stop_queue(dev);
@@ -1813,12 +1812,12 @@ static void netdrv_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 757f87bb1db3..1ca9c07d8a5e 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -93,7 +93,6 @@ earlier 3Com products.
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <pcmcia/mem_op.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -200,7 +199,6 @@ enum Window4 { /* Window 4: Xcvr/media bits. */
struct el3_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
u16 advertising, partner; /* NWay media advertisement */
unsigned char phys; /* MII device address */
unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
@@ -283,8 +281,6 @@ static int tc574_probe(struct pcmcia_device *link)
spin_lock_init(&lp->window_lock);
link->io.NumPorts1 = 32;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &el3_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -311,8 +307,7 @@ static void tc574_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c574_detach()\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
tc574_release(link);
@@ -353,7 +348,7 @@ static int tc574_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, el3_interrupt);
if (ret)
goto failed;
@@ -361,7 +356,7 @@ static int tc574_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
@@ -446,17 +441,13 @@ static int tc574_config(struct pcmcia_device *link)
}
}
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: %s at io %#3lx, irq %d, "
"hw_addr %pM.\n",
dev->name, cardname, dev->base_addr, dev->irq,
@@ -739,7 +730,7 @@ static void el3_tx_timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc574_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -790,8 +781,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
- dev->trans_start = jiffies;
-
/* TxFree appears only in Window 1, not offset 0x1c. */
if (inw(ioaddr + TxFree) <= 1536) {
netif_stop_queue(dev);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 091e0b00043e..ce63c3773b4c 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -1,20 +1,20 @@
/*======================================================================
A PCMCIA ethernet driver for the 3com 3c589 card.
-
+
Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
3c589_cs.c 1.162 2001/10/13 00:08:50
The network driver code is based on Donald Becker's 3c589 code:
-
+
Written 1994 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU General Public License,
incorporated herein by reference.
Donald Becker may be reached at becker@scyld.com
-
+
Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
======================================================================*/
@@ -69,31 +69,54 @@
/* The top five bits written to EL3_CMD are a command, the lower
11 bits are the parameter, if applicable. */
enum c509cmd {
- TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
- RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
- TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
- FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
- SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
- SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
- StatsDisable = 22<<11, StopCoax = 23<<11,
+ TotalReset = 0<<11,
+ SelectWindow = 1<<11,
+ StartCoax = 2<<11,
+ RxDisable = 3<<11,
+ RxEnable = 4<<11,
+ RxReset = 5<<11,
+ RxDiscard = 8<<11,
+ TxEnable = 9<<11,
+ TxDisable = 10<<11,
+ TxReset = 11<<11,
+ FakeIntr = 12<<11,
+ AckIntr = 13<<11,
+ SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11,
+ SetRxFilter = 16<<11,
+ SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11,
+ SetTxStart = 19<<11,
+ StatsEnable = 21<<11,
+ StatsDisable = 22<<11,
+ StopCoax = 23<<11
};
enum c509status {
- IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
- TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
- IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000
+ IntLatch = 0x0001,
+ AdapterFailure = 0x0002,
+ TxComplete = 0x0004,
+ TxAvailable = 0x0008,
+ RxComplete = 0x0010,
+ RxEarly = 0x0020,
+ IntReq = 0x0040,
+ StatsFull = 0x0080,
+ CmdBusy = 0x1000
};
/* The SetRxFilter command accepts the following classes: */
enum RxFilter {
- RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+ RxStation = 1,
+ RxMulticast = 2,
+ RxBroadcast = 4,
+ RxProm = 8
};
/* Register window 1 offsets, the window used in normal operation. */
#define TX_FIFO 0x00
#define RX_FIFO 0x00
-#define RX_STATUS 0x08
-#define TX_STATUS 0x0B
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
@@ -106,13 +129,12 @@ enum RxFilter {
struct el3_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
- /* For transceiver monitoring */
- struct timer_list media;
- u16 media_status;
- u16 fast_poll;
- unsigned long last_irq;
- spinlock_t lock;
+ /* For transceiver monitoring */
+ struct timer_list media;
+ u16 media_status;
+ u16 fast_poll;
+ unsigned long last_irq;
+ spinlock_t lock;
};
static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
@@ -164,15 +186,15 @@ static void tc589_detach(struct pcmcia_device *p_dev);
======================================================================*/
static const struct net_device_ops el3_netdev_ops = {
- .ndo_open = el3_open,
- .ndo_stop = el3_close,
+ .ndo_open = el3_open,
+ .ndo_stop = el3_close,
.ndo_start_xmit = el3_start_xmit,
- .ndo_tx_timeout = el3_tx_timeout,
+ .ndo_tx_timeout = el3_tx_timeout,
.ndo_set_config = el3_config,
.ndo_get_stats = el3_get_stats,
.ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -194,8 +216,7 @@ static int tc589_probe(struct pcmcia_device *link)
spin_lock_init(&lp->lock);
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &el3_interrupt;
+
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -223,8 +244,7 @@ static void tc589_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c589_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
tc589_release(link);
@@ -236,20 +256,19 @@ static void tc589_detach(struct pcmcia_device *link)
tc589_config() is scheduled to run after a CARD_INSERTION event
is received, to configure the PCMCIA socket, and to make the
ethernet device available to the system.
-
+
======================================================================*/
static int tc589_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- struct el3_private *lp = netdev_priv(dev);
__be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
unsigned int ioaddr;
char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
size_t len;
-
+
dev_dbg(&link->dev, "3c589_config\n");
phys_addr = (__be16 *)dev->dev_addr;
@@ -271,15 +290,15 @@ static int tc589_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, el3_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
-
- dev->irq = link->irq.AssignedIRQ;
+
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
EL3WINDOW(0);
@@ -312,25 +331,20 @@ static int tc589_config(struct pcmcia_device *link)
dev->if_port = if_port;
else
printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
-
- link->dev_node = &lp->node;
+
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
- printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq,
- dev->dev_addr);
- printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
- (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
- if_names[dev->if_port]);
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
return 0;
failed:
@@ -343,7 +357,7 @@ failed:
After a card is removed, tc589_release() will unregister the net
device, and release the PCMCIA configuration. If the device is
still open, this will be postponed until it is closed.
-
+
======================================================================*/
static void tc589_release(struct pcmcia_device *link)
@@ -365,7 +379,7 @@ static int tc589_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- if (link->open) {
+ if (link->open) {
tc589_reset(dev);
netif_device_attach(dev);
}
@@ -385,8 +399,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd)
while (--i > 0)
if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
if (i == 0)
- printk(KERN_WARNING "%s: command 0x%04x did not complete!\n",
- dev->name, cmd);
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
}
/*
@@ -412,7 +425,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
struct el3_private *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
-
+
EL3WINDOW(0);
switch (if_port) {
case 0: case 1: outw(0, ioaddr + 6); break;
@@ -435,14 +448,13 @@ static void dump_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
- printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
- "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
- inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS),
- inw(ioaddr+TX_FREE));
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
EL3WINDOW(4);
- printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
- " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
- inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
EL3WINDOW(1);
}
@@ -451,18 +463,18 @@ static void tc589_reset(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
int i;
-
+
EL3WINDOW(0);
- outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x0001, ioaddr + 4); /* Activate board. */
outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
-
+
/* Set the station address in window 2. */
EL3WINDOW(2);
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
tc589_set_xcvr(dev, dev->if_port);
-
+
/* Switch to the stats window, and clear all stats by reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
@@ -470,7 +482,7 @@ static void tc589_reset(struct net_device *dev)
inb(ioaddr+i);
inw(ioaddr + 10);
inw(ioaddr + 12);
-
+
/* Switch to register set 1 for normal use. */
EL3WINDOW(1);
@@ -504,8 +516,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 3) {
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
tc589_set_xcvr(dev, dev->if_port);
} else
return -EINVAL;
@@ -517,13 +528,13 @@ static int el3_open(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
-
+
if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
netif_start_queue(dev);
-
+
tc589_reset(dev);
init_timer(&lp->media);
lp->media.function = &media_check;
@@ -533,18 +544,18 @@ static int el3_open(struct net_device *dev)
dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
dev->name, inw(dev->base_addr + EL3_STATUS));
-
+
return 0;
}
static void el3_tx_timeout(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
-
- printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name);
+
+ netdev_warn(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc589_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -555,19 +566,18 @@ static void pop_tx_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
int i;
-
+
/* Clear the Tx status stack. */
for (i = 32; i > 0; i--) {
u_char tx_status = inb(ioaddr + TX_STATUS);
if (!(tx_status & 0x84)) break;
/* reset transmitter on jabber error or underrun */
if (tx_status & 0x30)
- tc589_wait_for_completion(dev, TxReset);
+ tc589_wait_for_completion(dev, TxReset);
if (tx_status & 0x38) {
- pr_debug("%s: transmit error: status 0x%02x\n",
- dev->name, tx_status);
- outw(TxEnable, ioaddr + EL3_CMD);
- dev->stats.tx_aborted_errors++;
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
}
outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
@@ -580,11 +590,10 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct el3_private *priv = netdev_priv(dev);
unsigned long flags;
- pr_debug("%s: el3_start_xmit(length = %ld) called, "
- "status %4.4x.\n", dev->name, (long)skb->len,
- inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ (long)skb->len, inw(ioaddr + EL3_STATUS));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -594,7 +603,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) <= 1536) {
netif_stop_queue(dev);
/* Interrupt us when the FIFO has room for max-sized packet. */
@@ -602,9 +610,9 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
}
pop_tx_status(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
dev_kfree_skb(skb);
-
+
return NETDEV_TX_OK;
}
@@ -616,37 +624,32 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
unsigned int ioaddr;
__u16 status;
int i = 0, handled = 1;
-
+
if (!netif_device_present(dev))
return IRQ_NONE;
ioaddr = dev->base_addr;
- pr_debug("%s: interrupt, status %4.4x.\n",
- dev->name, inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
- spin_lock(&lp->lock);
+ spin_lock(&lp->lock);
while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
if ((status & 0xe000) != 0x2000) {
- pr_debug("%s: interrupt from dead card\n", dev->name);
- handled = 0;
- break;
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
}
-
if (status & RxComplete)
- el3_rx(dev);
-
+ el3_rx(dev);
if (status & TxAvailable) {
- pr_debug(" TX room bit was handled.\n");
- /* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
}
-
if (status & TxComplete)
- pop_tx_status(dev);
-
+ pop_tx_status(dev);
if (status & (AdapterFailure | RxEarly | StatsFull)) {
/* Handle all uncommon interrupts. */
if (status & StatsFull) /* Empty statistics. */
@@ -660,8 +663,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
EL3WINDOW(4);
fifo_diag = inw(ioaddr + 4);
EL3WINDOW(1);
- printk(KERN_WARNING "%s: adapter failure, FIFO diagnostic"
- " register %04x.\n", dev->name, fifo_diag);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ fifo_diag);
if (fifo_diag & 0x0400) {
/* Tx overrun */
tc589_wait_for_completion(dev, TxReset);
@@ -676,22 +679,20 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
}
}
-
if (++i > 10) {
- printk(KERN_ERR "%s: infinite loop in interrupt, "
- "status %4.4x.\n", dev->name, status);
- /* Clear all interrupts */
- outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
- break;
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
}
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
}
-
lp->last_irq = jiffies;
- spin_unlock(&lp->lock);
- pr_debug("%s: exiting interrupt, status %4.4x.\n",
- dev->name, inw(ioaddr + EL3_STATUS));
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
return IRQ_RETVAL(handled);
}
@@ -710,7 +711,7 @@ static void media_check(unsigned long arg)
if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
(inb(ioaddr + EL3_TIMER) == 0xff)) {
if (!lp->fast_poll)
- printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_warn(dev, "interrupt(s) dropped!\n");
local_irq_save(flags);
el3_interrupt(dev->irq, dev);
@@ -727,7 +728,7 @@ static void media_check(unsigned long arg)
/* lp->lock guards the EL3 window. Window should always be 1 except
when the lock is held */
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
EL3WINDOW(4);
media = inw(ioaddr+WN4_MEDIA) & 0xc810;
@@ -747,32 +748,30 @@ static void media_check(unsigned long arg)
if (media != lp->media_status) {
if ((media & lp->media_status & 0x8000) &&
((lp->media_status ^ media) & 0x0800))
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (lp->media_status & 0x0800 ? "lost" : "found"));
+ netdev_info(dev, "%s link beat\n",
+ (lp->media_status & 0x0800 ? "lost" : "found"));
else if ((media & lp->media_status & 0x4000) &&
((lp->media_status ^ media) & 0x0010))
- printk(KERN_INFO "%s: coax cable %s\n", dev->name,
- (lp->media_status & 0x0010 ? "ok" : "problem"));
+ netdev_info(dev, "coax cable %s\n",
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
if (dev->if_port == 0) {
if (media & 0x8000) {
if (media & 0x0800)
- printk(KERN_INFO "%s: flipped to 10baseT\n",
- dev->name);
+ netdev_info(dev, "flipped to 10baseT\n");
else
- tc589_set_xcvr(dev, 2);
+ tc589_set_xcvr(dev, 2);
} else if (media & 0x4000) {
if (media & 0x0010)
tc589_set_xcvr(dev, 1);
else
- printk(KERN_INFO "%s: flipped to 10base2\n",
- dev->name);
+ netdev_info(dev, "flipped to 10base2\n");
}
}
lp->media_status = media;
}
-
+
EL3WINDOW(1);
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
reschedule:
lp->media.expires = jiffies + HZ;
@@ -786,7 +785,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
struct pcmcia_device *link = lp->p_dev;
if (pcmcia_dev_present(link)) {
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
update_stats(dev);
spin_unlock_irqrestore(&lp->lock, flags);
}
@@ -798,21 +797,21 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
single-threaded if the device is active. This is expected to be a rare
operation, and it's simpler for the rest of the driver to assume that
window 1 is always valid rather than use a special window-state variable.
-
+
Caller must hold the lock for this
*/
static void update_stats(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
- pr_debug("%s: updating the statistics.\n", dev->name);
+ netdev_dbg(dev, "updating the statistics.\n");
/* Turn off statistics updates while reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
- dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- /* Multiple collisions. */ inb(ioaddr + 2);
+ /* Multiple collisions. */ inb(ioaddr + 2);
dev->stats.collisions += inb(ioaddr + 3);
dev->stats.tx_window_errors += inb(ioaddr + 4);
dev->stats.rx_fifo_errors += inb(ioaddr + 5);
@@ -821,7 +820,7 @@ static void update_stats(struct net_device *dev)
/* Tx deferrals */ inb(ioaddr + 8);
/* Rx octets */ inw(ioaddr + 10);
/* Tx octets */ inw(ioaddr + 12);
-
+
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
@@ -832,9 +831,9 @@ static int el3_rx(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
int worklimit = 32;
short rx_status;
-
- pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
- dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) {
worklimit--;
@@ -852,11 +851,11 @@ static int el3_rx(struct net_device *dev)
} else {
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
-
+
skb = dev_alloc_skb(pkt_len+5);
-
- pr_debug(" Receiving packet size %d status %4.4x.\n",
- pkt_len, rx_status);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2);
insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
@@ -866,8 +865,8 @@ static int el3_rx(struct net_device *dev)
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
} else {
- pr_debug("%s: couldn't allocate a sk_buff of"
- " size %d.\n", dev->name, pkt_len);
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ pkt_len);
dev->stats.rx_dropped++;
}
}
@@ -875,7 +874,7 @@ static int el3_rx(struct net_device *dev)
tc589_wait_for_completion(dev, RxDiscard);
}
if (worklimit == 0)
- printk(KERN_WARNING "%s: too much work in el3_rx!\n", dev->name);
+ netdev_warn(dev, "too much work in el3_rx!\n");
return 0;
}
@@ -906,17 +905,17 @@ static int el3_close(struct net_device *dev)
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
unsigned int ioaddr = dev->base_addr;
-
+
dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
if (pcmcia_dev_present(link)) {
/* Turn off statistics ASAP. We update dev->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
-
+
/* Disable the receiver and transmitter. */
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
-
+
if (dev->if_port == 2)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
@@ -925,12 +924,12 @@ static int el3_close(struct net_device *dev)
EL3WINDOW(4);
outw(0, ioaddr + WN4_MEDIA);
}
-
+
/* Switching back to window 0 disables the IRQ. */
EL3WINDOW(0);
/* But we explicitly zero the IRQ line select anyway. */
outw(0x0f00, ioaddr + WN0_IRQ);
-
+
/* Check if the card still exists */
if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
update_stats(dev);
@@ -939,7 +938,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
del_timer_sync(&lp->media);
-
+
return 0;
}
@@ -961,7 +960,7 @@ static struct pcmcia_driver tc589_driver = {
},
.probe = tc589_probe,
.remove = tc589_detach,
- .id_table = tc589_ids,
+ .id_table = tc589_ids,
.suspend = tc589_suspend,
.resume = tc589_resume,
};
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 9f3d593f14ed..e4a15c4482dc 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -113,7 +113,6 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
typedef struct axnet_dev_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
caddr_t base;
struct timer_list watchdog;
int stale, fast_poll;
@@ -168,7 +167,6 @@ static int axnet_probe(struct pcmcia_device *link)
info = PRIV(dev);
info->p_dev = link;
link->priv = dev;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -195,8 +193,7 @@ static void axnet_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
axnet_release(link);
@@ -265,12 +262,9 @@ static int try_io_port(struct pcmcia_device *link)
int j, ret;
if (link->io.NumPorts1 == 32) {
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (link->io.NumPorts2 > 0) {
- /* for master/slave multifunction cards */
+ /* for master/slave multifunction cards */
+ if (link->io.NumPorts2 > 0)
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
- }
} else {
/* This should be two 16-port windows */
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -336,8 +330,7 @@ static int axnet_config(struct pcmcia_device *link)
if (ret != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
if (link->io.NumPorts2 == 8) {
@@ -349,7 +342,7 @@ static int axnet_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (!get_prom(link)) {
@@ -397,17 +390,13 @@ static int axnet_config(struct pcmcia_device *link)
}
info->phy_id = (i < 32) ? i : -1;
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, "
"hw_addr %pM\n",
dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
@@ -1005,7 +994,7 @@ static void axnet_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
dev->stats.tx_errors++;
@@ -1622,11 +1611,11 @@ static struct net_device_stats *get_stats(struct net_device *dev)
static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 crc;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 21d9c9d815d1..5643f94541bc 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -122,7 +122,6 @@ static void com20020_detach(struct pcmcia_device *p_dev);
typedef struct com20020_dev_t {
struct net_device *dev;
- dev_node_t node;
} com20020_dev_t;
/*======================================================================
@@ -163,7 +162,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.NumPorts1 = 16;
p_dev->io.IOAddrLines = 16;
- p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -196,18 +194,16 @@ static void com20020_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "com20020_detach\n");
- if (link->dev_node) {
- dev_dbg(&link->dev, "unregister...\n");
+ dev_dbg(&link->dev, "unregister...\n");
- unregister_netdev(dev);
+ unregister_netdev(dev);
- /*
- * this is necessary because we register our IRQ separately
- * from card services.
- */
- if (dev->irq)
+ /*
+ * this is necessary because we register our IRQ separately
+ * from card services.
+ */
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
com20020_release(link);
@@ -275,15 +271,14 @@ static int com20020_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
dev_dbg(&link->dev, "request IRQ %d\n",
- link->irq.AssignedIRQ);
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0)
+ link->irq);
+ if (!link->irq)
{
dev_dbg(&link->dev, "requestIRQ failed totally!\n");
goto failed;
}
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -299,7 +294,6 @@ static int com20020_config(struct pcmcia_device *link)
lp->card_name = "PCMCIA COM20020";
lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
i = com20020_found(dev, 0); /* calls register_netdev */
@@ -307,12 +301,9 @@ static int com20020_config(struct pcmcia_device *link)
if (i != 0) {
dev_printk(KERN_NOTICE, &link->dev,
"com20020_cs: com20020_found() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
dev->name, dev->base_addr, dev->irq);
return 0;
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index b9dc80b9d04a..ce32d8ed4962 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -110,7 +110,6 @@ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
*/
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
long open_time;
uint tx_started:1;
uint tx_queue;
@@ -254,10 +253,6 @@ static int fmvj18x_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 5;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = fjn_interrupt;
-
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -278,8 +273,7 @@ static void fmvj18x_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "fmvj18x_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
fmvj18x_release(link);
@@ -425,8 +419,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
}
if (link->io.NumPorts2 != 0) {
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
ret = mfc_try_io_port(link);
if (ret != 0) goto failed;
} else if (cardtype == UNGERMANN) {
@@ -437,14 +429,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
if (ret)
goto failed;
}
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, fjn_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (link->io.BasePort2 != 0) {
@@ -529,17 +521,13 @@ static int fmvj18x_config(struct pcmcia_device *link)
}
lp->cardtype = cardtype;
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
/* print current configuration */
printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, "
"hw_addr %pM\n",
@@ -890,7 +878,6 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
lp->sent = lp->tx_queue ;
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_start_queue(dev);
} else {
@@ -1196,11 +1183,11 @@ static void set_rx_mode(struct net_device *dev)
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit >> 3] |= (1 << (bit & 7));
}
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 37f4a6fdc3ef..2e42d80f8cae 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -104,7 +104,6 @@ static void ibmtr_detach(struct pcmcia_device *p_dev);
typedef struct ibmtr_dev_t {
struct pcmcia_device *p_dev;
struct net_device *dev;
- dev_node_t node;
window_handle_t sram_win_handle;
struct tok_info *ti;
} ibmtr_dev_t;
@@ -156,8 +155,6 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 4;
link->io.IOAddrLines = 16;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = ibmtr_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -192,8 +189,7 @@ static void ibmtr_detach(struct pcmcia_device *link)
*/
ti->sram_phys |= 1;
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
del_timer_sync(&(ti->tr_timer));
@@ -238,11 +234,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
}
dev->base_addr = link->io.BasePort1;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
- ti->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
+ ti->irq = link->irq;
ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
/* Allocate the MMIO memory window */
@@ -291,18 +287,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
Adapters Technical Reference" SC30-3585 for this info. */
ibmtr_hw_setup(dev, mmiobase);
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
i = ibmtr_probe_card(dev);
if (i != 0) {
printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
printk(KERN_INFO
"%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
dev->name, dev->base_addr, dev->irq,
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index c717b143f11a..b371cad850b3 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -363,7 +363,6 @@ typedef struct _mace_statistics {
typedef struct _mace_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct net_device_stats linux_stats; /* Linux statistics counters */
mace_statistics mace_stats; /* MACE chip statistics counters */
@@ -463,8 +462,6 @@ static int nmclan_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 32;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 5;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = mace_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -493,8 +490,7 @@ static void nmclan_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "nmclan_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
nmclan_release(link);
@@ -652,14 +648,14 @@ static int nmclan_config(struct pcmcia_device *link)
ret = pcmcia_request_io(link, &link->io);
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
@@ -698,18 +694,14 @@ static int nmclan_config(struct pcmcia_device *link)
else
printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
i = register_netdev(dev);
if (i != 0) {
printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port,"
" hw_addr %pM\n",
dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
@@ -903,7 +895,7 @@ static void mace_tx_timeout(struct net_device *dev)
#else /* #if RESET_ON_TIMEOUT */
printk("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -945,8 +937,6 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
}
- dev->trans_start = jiffies;
-
#if MULTI_TX
if (lp->tx_free_frames > 0)
netif_start_queue(dev);
@@ -1475,7 +1465,7 @@ static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
#ifdef PCMCIA_DEBUG
{
@@ -1495,8 +1485,8 @@ static void set_multicast_list(struct net_device *dev)
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(adr, ha->addr, ETHER_ADDR_LEN);
BuildLAF(lp->multicast_ladrf, adr);
}
}
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 4c0368de1815..6f77a768ba88 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -208,7 +208,6 @@ static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
typedef struct pcnet_dev_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
u_int flags;
void __iomem *base;
struct timer_list watchdog;
@@ -264,7 +263,6 @@ static int pcnet_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = dev;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -288,8 +286,7 @@ static void pcnet_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "pcnet_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
pcnet_release(link);
@@ -488,8 +485,6 @@ static int try_io_port(struct pcmcia_device *link)
if (link->io.NumPorts2 > 0) {
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
}
} else {
/* This should be two 16-port windows */
@@ -559,8 +554,7 @@ static int pcnet_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
if (link->io.NumPorts2 == 8) {
@@ -574,7 +568,7 @@ static int pcnet_config(struct pcmcia_device *link)
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (info->flags & HAS_MISC_REG) {
if ((if_port == 1) || (if_port == 2))
@@ -643,17 +637,13 @@ static int pcnet_config(struct pcmcia_device *link)
if (info->flags & (IS_DL10019|IS_DL10022))
mii_phy_probe(dev);
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index ccc553782a0d..81be737c8783 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -103,7 +103,6 @@ struct smc_private {
u_short manfid;
u_short cardid;
- dev_node_t node;
struct sk_buff *saved_skb;
int packets_waiting;
void __iomem *base;
@@ -323,14 +322,11 @@ static int smc91c92_probe(struct pcmcia_device *link)
return -ENOMEM;
smc = netdev_priv(dev);
smc->p_dev = link;
- link->priv = dev;
spin_lock_init(&smc->lock);
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 4;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &smc_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -363,8 +359,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "smc91c92_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
smc91c92_release(link);
@@ -453,7 +448,6 @@ static int mhz_mfc_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.IOAddrLines = 16;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -652,7 +646,6 @@ static int osi_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 64;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -877,7 +870,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if (i)
goto config_failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, smc_interrupt);
if (i)
goto config_failed;
i = pcmcia_request_configuration(link, &link->conf);
@@ -887,7 +880,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if (smc->manfid == MANFID_MOTOROLA)
mot_config(link);
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
if ((if_port >= 0) && (if_port <= 2))
dev->if_port = if_port;
@@ -960,17 +953,13 @@ static int smc91c92_config(struct pcmcia_device *link)
SMC_SELECT_BANK(0);
}
- link->dev_node = &smc->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto config_undo;
}
- strcpy(smc->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
"hw_addr %pM\n",
dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
@@ -1254,7 +1243,7 @@ static void smc_tx_timeout(struct net_device *dev)
dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
smc->saved_skb = NULL;
netif_wake_queue(dev);
}
@@ -1621,10 +1610,10 @@ static void set_rx_mode(struct net_device *dev)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_addr;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_addr, dev) {
- u_int position = ether_crc(6, mc_addr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u_int position = ether_crc(6, ha->addr);
multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
}
}
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 4d1802e457be..b6c3644888cd 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -297,31 +297,9 @@ static void xirc2ps_detach(struct pcmcia_device *p_dev);
static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
-/****************
- * A linked list of "instances" of the device. Each actual
- * PCMCIA card corresponds to one device instance, and is described
- * by one struct pcmcia_device structure (defined in ds.h).
- *
- * You may not want to use a linked list for this -- for example, the
- * memory card driver uses an array of struct pcmcia_device pointers, where minor
- * device numbers are used to derive the corresponding array index.
- */
-
-/****************
- * A driver needs to provide a dev_node_t structure for each device
- * on a card. In some cases, there is only one device per card (for
- * example, ethernet cards, modems). In other cases, there may be
- * many actual or logical devices (SCSI adapters, memory cards with
- * multiple partitions). The dev_node_t structures need to be kept
- * in a linked list starting at the 'dev' field of a struct pcmcia_device
- * structure. We allocate them in the card's private data structure,
- * because they generally can't be allocated dynamically.
- */
-
typedef struct local_info_t {
struct net_device *dev;
struct pcmcia_device *p_dev;
- dev_node_t node;
int card_type;
int probe_port;
@@ -555,7 +533,6 @@ xirc2ps_probe(struct pcmcia_device *link)
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
- link->irq.Handler = xirc2ps_interrupt;
/* Fill in card specific entries */
dev->netdev_ops = &netdev_ops;
@@ -580,8 +557,7 @@ xirc2ps_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
xirc2ps_release(link);
@@ -841,7 +817,6 @@ xirc2ps_config(struct pcmcia_device * link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status |= CCSR_AUDIO_ENA;
}
- link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts2 = 8;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
if (local->dingo) {
@@ -866,7 +841,6 @@ xirc2ps_config(struct pcmcia_device * link)
}
printk(KNOT_XIRC "no ports available\n");
} else {
- link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 16;
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
link->io.BasePort1 = ioaddr;
@@ -885,7 +859,7 @@ xirc2ps_config(struct pcmcia_device * link)
* Now allocate an interrupt line. Note that this does not
* actually assign a handler to the interrupt.
*/
- if ((err=pcmcia_request_irq(link, &link->irq)))
+ if ((err=pcmcia_request_irq(link, xirc2ps_interrupt)))
goto config_error;
/****************
@@ -982,23 +956,19 @@ xirc2ps_config(struct pcmcia_device * link)
printk(KNOT_XIRC "invalid if_port requested\n");
/* we can now register the device with the net subsystem */
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (local->dingo)
do_reset(dev, 1); /* a kludge to make the cem56 work */
- link->dev_node = &local->node;
SET_NETDEV_DEV(dev, &link->dev);
if ((err=register_netdev(dev))) {
printk(KNOT_XIRC "register_netdev() failed\n");
- link->dev_node = NULL;
goto config_error;
}
- strcpy(local->node.dev_name, dev->name);
-
/* give some infos about the hardware */
printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n",
dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq,
@@ -1295,7 +1265,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1358,7 +1328,6 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
dev_kfree_skb (skb);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += pktlen;
netif_start_queue(dev);
return NETDEV_TX_OK;
@@ -1398,7 +1367,7 @@ static void set_addresses(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct set_address_info sa_info;
int i;
@@ -1413,10 +1382,10 @@ static void set_addresses(struct net_device *dev)
set_address(&sa_info, dev->dev_addr);
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i++ == 9)
break;
- set_address(&sa_info, dmi->dmi_addr);
+ set_address(&sa_info, ha->addr);
}
while (i++ < 9)
set_address(&sa_info, dev->dev_addr);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 084d78dd1637..566fd89da861 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
napi_disable(&lp->napi);
netif_tx_disable(dev);
}
@@ -2398,7 +2398,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
}
pcnet32_restart(dev, CSR0_NORMAL);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2449,8 +2449,6 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
/* Trigger an immediate send poll. */
lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
- dev->trans_start = jiffies;
-
if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
lp->tx_full = 1;
netif_stop_queue(dev);
@@ -2590,7 +2588,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
volatile struct pcnet32_init_block *ib = lp->init_block;
volatile __le16 *mcast_table = (__le16 *)ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned long ioaddr = dev->base_addr;
char *addrs;
int i;
@@ -2611,8 +2609,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
ib->filter[1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 4fed95e8350e..c12815679837 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void)
module_init(bcm63xx_phy_init);
module_exit(bcm63xx_phy_exit);
+
+static struct mdio_device_id bcm63xx_tbl[] = {
+ { 0x00406000, 0xfffffc00 },
+ { 0x002bdc00, 0xfffffc00 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, bcm63xx_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f482fc4f8cf1..cecdbbd549ec 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -908,3 +908,19 @@ static void __exit broadcom_exit(void)
module_init(broadcom_init);
module_exit(broadcom_exit);
+
+static struct mdio_device_id broadcom_tbl[] = {
+ { 0x00206070, 0xfffffff0 },
+ { 0x002060e0, 0xfffffff0 },
+ { 0x002060c0, 0xfffffff0 },
+ { 0x002060b0, 0xfffffff0 },
+ { 0x0143bca0, 0xfffffff0 },
+ { 0x0143bcb0, 0xfffffff0 },
+ { PHY_ID_BCM50610, 0xfffffff0 },
+ { PHY_ID_BCM50610M, 0xfffffff0 },
+ { PHY_ID_BCM57780, 0xfffffff0 },
+ { PHY_ID_BCMAC131, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 92282b31d94b..1a325d63756b 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -158,3 +158,11 @@ static void __exit cicada_exit(void)
module_init(cicada_init);
module_exit(cicada_exit);
+
+static struct mdio_device_id cicada_tbl[] = {
+ { 0x000fc410, 0x000ffff0 },
+ { 0x000fc440, 0x000fffc0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, cicada_tbl);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index c722e95853ff..29c17617a2ec 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -218,3 +218,12 @@ static void __exit davicom_exit(void)
module_init(davicom_init);
module_exit(davicom_exit);
+
+static struct mdio_device_id davicom_tbl[] = {
+ { 0x0181b880, 0x0ffffff0 },
+ { 0x0181b8a0, 0x0ffffff0 },
+ { 0x00181b80, 0x0ffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, davicom_tbl);
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 7712ebeba9bf..13995f52d6af 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -110,3 +110,10 @@ static void __exit et1011c_exit(void)
module_init(et1011c_init);
module_exit(et1011c_exit);
+
+static struct mdio_device_id et1011c_tbl[] = {
+ { 0x0282f014, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, et1011c_tbl);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 904208b95d4b..439adafeacb1 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -131,3 +131,10 @@ static void __exit ip175c_exit(void)
module_init(ip175c_init);
module_exit(ip175c_exit);
+
+static struct mdio_device_id icplus_tbl[] = {
+ { 0x02430d80, 0x0ffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, icplus_tbl);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 057ecaacde6b..8ee929b796d8 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -173,3 +173,11 @@ static void __exit lxt_exit(void)
module_init(lxt_init);
module_exit(lxt_exit);
+
+static struct mdio_device_id lxt_tbl[] = {
+ { 0x78100000, 0xfffffff0 },
+ { 0x001378e0, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, lxt_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 64c7fbe0a8e7..78b74e83ce5d 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -648,3 +648,16 @@ static void __exit marvell_exit(void)
module_init(marvell_init);
module_exit(marvell_exit);
+
+static struct mdio_device_id marvell_tbl[] = {
+ { 0x01410c60, 0xfffffff0 },
+ { 0x01410c90, 0xfffffff0 },
+ { 0x01410cc0, 0xfffffff0 },
+ { 0x01410e10, 0xfffffff0 },
+ { 0x01410cb0, 0xfffffff0 },
+ { 0x01410cd0, 0xfffffff0 },
+ { 0x01410e30, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, marvell_tbl);
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 19e70d7e27ab..65391891d8c4 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -22,8 +22,13 @@
#include <linux/types.h>
#include <linux/delay.h>
-#define MDIO_READ 1
-#define MDIO_WRITE 0
+#define MDIO_READ 2
+#define MDIO_WRITE 1
+
+#define MDIO_C45 (1<<15)
+#define MDIO_C45_ADDR (MDIO_C45 | 0)
+#define MDIO_C45_READ (MDIO_C45 | 3)
+#define MDIO_C45_WRITE (MDIO_C45 | 1)
#define MDIO_SETUP_TIME 10
#define MDIO_HOLD_TIME 10
@@ -89,7 +94,7 @@ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits)
/* Utility to send the preamble, address, and
* register (common to read and write).
*/
-static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
+static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg)
{
const struct mdiobb_ops *ops = ctrl->ops;
int i;
@@ -108,23 +113,56 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
for (i = 0; i < 32; i++)
mdiobb_send_bit(ctrl, 1);
- /* send the start bit (01) and the read opcode (10) or write (10) */
+ /* send the start bit (01) and the read opcode (10) or write (10).
+ Clause 45 operation uses 00 for the start and 11, 10 for
+ read/write */
mdiobb_send_bit(ctrl, 0);
- mdiobb_send_bit(ctrl, 1);
- mdiobb_send_bit(ctrl, read);
- mdiobb_send_bit(ctrl, !read);
+ if (op & MDIO_C45)
+ mdiobb_send_bit(ctrl, 0);
+ else
+ mdiobb_send_bit(ctrl, 1);
+ mdiobb_send_bit(ctrl, (op >> 1) & 1);
+ mdiobb_send_bit(ctrl, (op >> 0) & 1);
mdiobb_send_num(ctrl, phy, 5);
mdiobb_send_num(ctrl, reg, 5);
}
+/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
+ lower 16 bits of the 21 bit address. This transfer is done identically to a
+ MDIO_WRITE except for a different code. To enable clause 45 mode or
+ MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
+ can exist on the same bus. Normal devices should ignore the MDIO_ADDR
+ phase. */
+static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
+{
+ unsigned int dev_addr = (addr >> 16) & 0x1F;
+ unsigned int reg = addr & 0xFFFF;
+ mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr);
+
+ /* send the turnaround (10) */
+ mdiobb_send_bit(ctrl, 1);
+ mdiobb_send_bit(ctrl, 0);
+
+ mdiobb_send_num(ctrl, reg, 16);
+
+ ctrl->ops->set_mdio_dir(ctrl, 0);
+ mdiobb_get_bit(ctrl);
+
+ return dev_addr;
+}
static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
- mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+ if (reg & MII_ADDR_C45) {
+ reg = mdiobb_cmd_addr(ctrl, phy, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
+ } else
+ mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+
ctrl->ops->set_mdio_dir(ctrl, 0);
/* check the turnaround bit: the PHY should be driving it to zero */
@@ -147,7 +185,11 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
- mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
+ if (reg & MII_ADDR_C45) {
+ reg = mdiobb_cmd_addr(ctrl, phy, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
+ } else
+ mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
/* send the turnaround (10) */
mdiobb_send_bit(ctrl, 1);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index e17b70291bbc..6a6b8199a0d6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mdiobus_scan);
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum)
+int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
@@ -233,7 +233,7 @@ EXPORT_SYMBOL(mdiobus_read);
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val)
+int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0cd80e4d71d9..0692f750c404 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -32,6 +32,7 @@ static int kszphy_config_init(struct phy_device *phydev)
static struct phy_driver ks8001_driver = {
.phy_id = PHY_ID_KS8001,
+ .name = "Micrel KS8001",
.phy_id_mask = 0x00fffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_POLL,
@@ -102,3 +103,12 @@ module_exit(ksphy_exit);
MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
+
+static struct mdio_device_id micrel_tbl[] = {
+ { PHY_ID_KSZ9021, 0x000fff10 },
+ { PHY_ID_VSC8201, 0x00fffff0 },
+ { PHY_ID_KS8001, 0x00fffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, micrel_tbl);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 6c636eb72089..729ab29ba28c 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -153,3 +153,10 @@ MODULE_LICENSE("GPL");
module_init(ns_init);
module_exit(ns_exit);
+
+static struct mdio_device_id ns_tbl[] = {
+ { DP83865_PHY_ID, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ns_tbl);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1794546c56..1a99bb244106 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
{
struct phy_device *dev;
+
/* We allocate the device, and initialize the
* default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
+ /* Request the appropriate module unconditionally; don't
+ bother trying to do so only if it isn't already loaded,
+ because that gets complicated. A hotplug event would have
+ done an unconditional modprobe anyway.
+ We don't do normal hotplug because it won't work for MDIO
+ -- because it relies on the device staying around for long
+ enough for the driver to get loaded. With MDIO, the NIC
+ driver will get bored and give up as soon as it finds that
+ there's no driver _already_ loaded. */
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
+
return dev;
}
EXPORT_SYMBOL(phy_device_create);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index f6e190f73c32..6736b23f1b28 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -137,3 +137,10 @@ static void __exit qs6612_exit(void)
module_init(qs6612_init);
module_exit(qs6612_exit);
+
+static struct mdio_device_id qs6612_tbl[] = {
+ { 0x00181440, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qs6612_tbl);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a052a6744a51..f567c0e1aaa1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -78,3 +78,10 @@ static void __exit realtek_exit(void)
module_init(realtek_init);
module_exit(realtek_exit);
+
+static struct mdio_device_id realtek_tbl[] = {
+ { 0x001cc912, 0x001fffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, realtek_tbl);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ed2644a57500..78fa988256fc 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -253,3 +253,14 @@ MODULE_LICENSE("GPL");
module_init(smsc_init);
module_exit(smsc_exit);
+
+static struct mdio_device_id smsc_tbl[] = {
+ { 0x0007c0a0, 0xfffffff0 },
+ { 0x0007c0b0, 0xfffffff0 },
+ { 0x0007c0c0, 0xfffffff0 },
+ { 0x0007c0d0, 0xfffffff0 },
+ { 0x0007c0f0, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, smsc_tbl);
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 6bdb0d53aaf9..72290099e5e1 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void)
module_init(ste10Xp_init);
module_exit(ste10Xp_exit);
+static struct mdio_device_id ste10Xp_tbl[] = {
+ { STE101P_PHY_ID, 0xfffffff0 },
+ { STE100P_PHY_ID, 0xffffffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl);
+
MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index dd3b2447e85a..45cce50a2799 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void)
module_init(vsc82xx_init);
module_exit(vsc82xx_exit);
+
+static struct mdio_device_id vitesse_tbl[] = {
+ { PHY_ID_VSC8244, 0x000fffc0 },
+ { PHY_ID_VSC8221, 0x000ffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, vitesse_tbl);
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 9a2103a69e17..f4e1f9a38b87 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -979,7 +979,6 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: send request\n", dev->name);
spin_lock_irq(&nl->lock);
- dev->trans_start = jiffies;
snd->skb = skb;
snd->length.h = skb->len;
snd->state = PLIP_PK_TRIGGER;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 8518a2e58e53..5441688daba7 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2174,6 +2174,24 @@ int ppp_unit_number(struct ppp_channel *chan)
}
/*
+ * Return the PPP device interface name of a channel.
+ */
+char *ppp_dev_name(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ char *name = NULL;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ name = pch->ppp->dev->name;
+ read_unlock_bh(&pch->upl);
+ }
+ return name;
+}
+
+
+/*
* Disconnect a channel from the generic layer.
* This must be called in process context.
*/
@@ -2901,6 +2919,7 @@ EXPORT_SYMBOL(ppp_register_channel);
EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
+EXPORT_SYMBOL(ppp_dev_name);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index cdd11ba100ea..99f031a08a01 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -258,7 +258,7 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
- pn = net_generic(net, pppoe_net_id);
+ pn = pppoe_pernet(net);
pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
sp->sa_addr.pppoe.remote, ifindex);
}
@@ -290,12 +290,6 @@ static void pppoe_flush_dev(struct net_device *dev)
struct pppoe_net *pn;
int i;
- BUG_ON(dev == NULL);
-
- pn = pppoe_pernet(dev_net(dev));
- if (!pn) /* already freed */
- return;
-
write_lock_bh(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
struct pppox_sock *po = pn->hash_table[i];
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
deleted file mode 100644
index 449a9825200d..000000000000
--- a/drivers/net/pppol2tp.c
+++ /dev/null
@@ -1,2680 +0,0 @@
-/*****************************************************************************
- * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
- *
- * PPPoX --- Generic PPP encapsulation socket family
- * PPPoL2TP --- PPP over L2TP (RFC 2661)
- *
- * Version: 1.0.0
- *
- * Authors: Martijn van Oosterhout <kleptog@svana.org>
- * James Chapman (jchapman@katalix.com)
- * Contributors:
- * Michal Ostrowski <mostrows@speakeasy.net>
- * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
- * David S. Miller (davem@redhat.com)
- *
- * License:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-/* This driver handles only L2TP data frames; control frames are handled by a
- * userspace application.
- *
- * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
- * attaches it to a bound UDP socket with local tunnel_id / session_id and
- * peer tunnel_id / session_id set. Data can then be sent or received using
- * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
- * can be read or modified using ioctl() or [gs]etsockopt() calls.
- *
- * When a PPPoL2TP socket is connected with local and peer session_id values
- * zero, the socket is treated as a special tunnel management socket.
- *
- * Here's example userspace code to create a socket for sending/receiving data
- * over an L2TP session:-
- *
- * struct sockaddr_pppol2tp sax;
- * int fd;
- * int session_fd;
- *
- * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
- *
- * sax.sa_family = AF_PPPOX;
- * sax.sa_protocol = PX_PROTO_OL2TP;
- * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
- * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
- * sax.pppol2tp.addr.sin_port = addr->sin_port;
- * sax.pppol2tp.addr.sin_family = AF_INET;
- * sax.pppol2tp.s_tunnel = tunnel_id;
- * sax.pppol2tp.s_session = session_id;
- * sax.pppol2tp.d_tunnel = peer_tunnel_id;
- * sax.pppol2tp.d_session = peer_session_id;
- *
- * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
- *
- * A pppd plugin that allows PPP traffic to be carried over L2TP using
- * this driver is available from the OpenL2TP project at
- * http://openl2tp.sourceforge.net.
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <asm/uaccess.h>
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/jiffies.h>
-
-#include <linux/netdevice.h>
-#include <linux/net.h>
-#include <linux/inetdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/if_pppox.h>
-#include <linux/if_pppol2tp.h>
-#include <net/sock.h>
-#include <linux/ppp_channel.h>
-#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
-#include <linux/file.h>
-#include <linux/hash.h>
-#include <linux/sort.h>
-#include <linux/proc_fs.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/dst.h>
-#include <net/ip.h>
-#include <net/udp.h>
-#include <net/xfrm.h>
-
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-
-
-#define PPPOL2TP_DRV_VERSION "V1.0"
-
-/* L2TP header constants */
-#define L2TP_HDRFLAG_T 0x8000
-#define L2TP_HDRFLAG_L 0x4000
-#define L2TP_HDRFLAG_S 0x0800
-#define L2TP_HDRFLAG_O 0x0200
-#define L2TP_HDRFLAG_P 0x0100
-
-#define L2TP_HDR_VER_MASK 0x000F
-#define L2TP_HDR_VER 0x0002
-
-/* Space for UDP, L2TP and PPP headers */
-#define PPPOL2TP_HEADER_OVERHEAD 40
-
-/* Just some random numbers */
-#define L2TP_TUNNEL_MAGIC 0x42114DDA
-#define L2TP_SESSION_MAGIC 0x0C04EB7D
-
-#define PPPOL2TP_HASH_BITS 4
-#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
-
-/* Default trace flags */
-#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
-
-#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
- do { \
- if ((_mask) & (_type)) \
- printk(_lvl "PPPOL2TP: " _fmt, ##args); \
- } while(0)
-
-/* Number of bytes to build transmit L2TP headers.
- * Unfortunately the size is different depending on whether sequence numbers
- * are enabled.
- */
-#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
-#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
-
-struct pppol2tp_tunnel;
-
-/* Describes a session. It is the sk_user_data field in the PPPoL2TP
- * socket. Contains information to determine incoming packets and transmit
- * outgoing ones.
- */
-struct pppol2tp_session
-{
- int magic; /* should be
- * L2TP_SESSION_MAGIC */
- int owner; /* pid that opened the socket */
-
- struct sock *sock; /* Pointer to the session
- * PPPoX socket */
- struct sock *tunnel_sock; /* Pointer to the tunnel UDP
- * socket */
-
- struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
-
- struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
- * context */
-
- char name[20]; /* "sess xxxxx/yyyyy", where
- * x=tunnel_id, y=session_id */
- int mtu;
- int mru;
- int flags; /* accessed by PPPIOCGFLAGS.
- * Unused. */
- unsigned recv_seq:1; /* expect receive packets with
- * sequence numbers? */
- unsigned send_seq:1; /* send packets with sequence
- * numbers? */
- unsigned lns_mode:1; /* behave as LNS? LAC enables
- * sequence numbers under
- * control of LNS. */
- int debug; /* bitmask of debug message
- * categories */
- int reorder_timeout; /* configured reorder timeout
- * (in jiffies) */
- u16 nr; /* session NR state (receive) */
- u16 ns; /* session NR state (send) */
- struct sk_buff_head reorder_q; /* receive reorder queue */
- struct pppol2tp_ioc_stats stats;
- struct hlist_node hlist; /* Hash list node */
-};
-
-/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
- * all the associated sessions so incoming packets can be sorted out
- */
-struct pppol2tp_tunnel
-{
- int magic; /* Should be L2TP_TUNNEL_MAGIC */
- rwlock_t hlist_lock; /* protect session_hlist */
- struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
- /* hashed list of sessions,
- * hashed by id */
- int debug; /* bitmask of debug message
- * categories */
- char name[12]; /* "tunl xxxxx" */
- struct pppol2tp_ioc_stats stats;
-
- void (*old_sk_destruct)(struct sock *);
-
- struct sock *sock; /* Parent socket */
- struct list_head list; /* Keep a list of all open
- * prepared sockets */
- struct net *pppol2tp_net; /* the net we belong to */
-
- atomic_t ref_count;
-};
-
-/* Private data stored for received packets in the skb.
- */
-struct pppol2tp_skb_cb {
- u16 ns;
- u16 nr;
- u16 has_seq;
- u16 length;
- unsigned long expires;
-};
-
-#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
-
-static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
-static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
-
-static atomic_t pppol2tp_tunnel_count;
-static atomic_t pppol2tp_session_count;
-static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
-static const struct proto_ops pppol2tp_ops;
-
-/* per-net private data for this module */
-static int pppol2tp_net_id __read_mostly;
-struct pppol2tp_net {
- struct list_head pppol2tp_tunnel_list;
- rwlock_t pppol2tp_tunnel_list_lock;
-};
-
-static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
-{
- BUG_ON(!net);
-
- return net_generic(net, pppol2tp_net_id);
-}
-
-/* Helpers to obtain tunnel/session contexts from sockets.
- */
-static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
-{
- struct pppol2tp_session *session;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- session = (struct pppol2tp_session *)(sk->sk_user_data);
- if (session == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-out:
- return session;
-}
-
-static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
-{
- struct pppol2tp_tunnel *tunnel;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
- if (tunnel == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-out:
- return tunnel;
-}
-
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
-{
- atomic_inc(&tunnel->ref_count);
-}
-
-static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
-{
- if (atomic_dec_and_test(&tunnel->ref_count))
- pppol2tp_tunnel_free(tunnel);
-}
-
-/* Session hash list.
- * The session_id SHOULD be random according to RFC2661, but several
- * L2TP implementations (Cisco and Microsoft) use incrementing
- * session_ids. So we do a real hash on the session_id, rather than a
- * simple bitmask.
- */
-static inline struct hlist_head *
-pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
-{
- unsigned long hash_val = (unsigned long) session_id;
- return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
-}
-
-/* Lookup a session by id
- */
-static struct pppol2tp_session *
-pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
-{
- struct hlist_head *session_list =
- pppol2tp_session_id_hash(tunnel, session_id);
- struct pppol2tp_session *session;
- struct hlist_node *walk;
-
- read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, walk, session_list, hlist) {
- if (session->tunnel_addr.s_session == session_id) {
- read_unlock_bh(&tunnel->hlist_lock);
- return session;
- }
- }
- read_unlock_bh(&tunnel->hlist_lock);
-
- return NULL;
-}
-
-/* Lookup a tunnel by id
- */
-static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
-{
- struct pppol2tp_tunnel *tunnel;
- struct pppol2tp_net *pn = pppol2tp_pernet(net);
-
- read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
- if (tunnel->stats.tunnel_id == tunnel_id) {
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
- return tunnel;
- }
- }
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- return NULL;
-}
-
-/*****************************************************************************
- * Receive data handling
- *****************************************************************************/
-
-/* Queue a skb in order. We come here only if the skb has an L2TP sequence
- * number.
- */
-static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
-{
- struct sk_buff *skbp;
- struct sk_buff *tmp;
- u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
-
- spin_lock_bh(&session->reorder_q.lock);
- skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
- if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
- __skb_queue_before(&session->reorder_q, skbp, skb);
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
- session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
- skb_queue_len(&session->reorder_q));
- session->stats.rx_oos_packets++;
- goto out;
- }
- }
-
- __skb_queue_tail(&session->reorder_q, skb);
-
-out:
- spin_unlock_bh(&session->reorder_q.lock);
-}
-
-/* Dequeue a single skb.
- */
-static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
-{
- struct pppol2tp_tunnel *tunnel = session->tunnel;
- int length = PPPOL2TP_SKB_CB(skb)->length;
- struct sock *session_sock = NULL;
-
- /* We're about to requeue the skb, so return resources
- * to its current owner (a socket receive buffer).
- */
- skb_orphan(skb);
-
- tunnel->stats.rx_packets++;
- tunnel->stats.rx_bytes += length;
- session->stats.rx_packets++;
- session->stats.rx_bytes += length;
-
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- /* Bump our Nr */
- session->nr++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated nr to %hu\n", session->name, session->nr);
- }
-
- /* If the socket is bound, send it in to PPP's input queue. Otherwise
- * queue it on the session socket.
- */
- session_sock = session->sock;
- if (session_sock->sk_state & PPPOX_BOUND) {
- struct pppox_sock *po;
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv %d byte data frame, passing to ppp\n",
- session->name, length);
-
- /* We need to forget all info related to the L2TP packet
- * gathered in the skb as we are going to reuse the same
- * skb for the inner packet.
- * Namely we need to:
- * - reset xfrm (IPSec) information as it applies to
- * the outer L2TP packet and not to the inner one
- * - release the dst to force a route lookup on the inner
- * IP packet since skb->dst currently points to the dst
- * of the UDP tunnel
- * - reset netfilter information as it doesn't apply
- * to the inner packet either
- */
- secpath_reset(skb);
- skb_dst_drop(skb);
- nf_reset(skb);
-
- po = pppox_sk(session_sock);
- ppp_input(&po->chan, skb);
- } else {
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: socket not bound\n", session->name);
-
- /* Not bound. Nothing we can do, so discard. */
- session->stats.rx_errors++;
- kfree_skb(skb);
- }
-
- sock_put(session->sock);
-}
-
-/* Dequeue skbs from the session's reorder_q, subject to packet order.
- * Skbs that have been in the queue for too long are simply discarded.
- */
-static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
-{
- struct sk_buff *skb;
- struct sk_buff *tmp;
-
- /* If the pkt at the head of the queue has the nr that we
- * expect to send up next, dequeue it and any other
- * in-sequence packets behind it.
- */
- spin_lock_bh(&session->reorder_q.lock);
- skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
- if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
- session->stats.rx_seq_discards++;
- session->stats.rx_errors++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %hu len %d discarded (too old), "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- __skb_unlink(skb, &session->reorder_q);
- kfree_skb(skb);
- sock_put(session->sock);
- continue;
- }
-
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: holding oos pkt %hu len %d, "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- goto out;
- }
- }
- __skb_unlink(skb, &session->reorder_q);
-
- /* Process the skb. We release the queue lock while we
- * do so to let other contexts process the queue.
- */
- spin_unlock_bh(&session->reorder_q.lock);
- pppol2tp_recv_dequeue_skb(session, skb);
- spin_lock_bh(&session->reorder_q.lock);
- }
-
-out:
- spin_unlock_bh(&session->reorder_q.lock);
-}
-
-static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
- struct sk_buff *skb)
-{
- struct udphdr *uh = udp_hdr(skb);
- u16 ulen = ntohs(uh->len);
- struct inet_sock *inet;
- __wsum psum;
-
- if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
- return 0;
-
- inet = inet_sk(sk);
- psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
- IPPROTO_UDP, 0);
-
- if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
- !csum_fold(csum_add(psum, skb->csum)))
- return 0;
-
- skb->csum = psum;
-
- return __skb_checksum_complete(skb);
-}
-
-/* Internal receive frame. Do the real work of receiving an L2TP data frame
- * here. The skb is not on a list when we get here.
- * Returns 0 if the packet was a data packet and was successfully passed on.
- * Returns 1 if the packet was not a good data packet and could not be
- * forwarded. All such packets are passed up to userspace to deal with.
- */
-static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
-{
- struct pppol2tp_session *session = NULL;
- struct pppol2tp_tunnel *tunnel;
- unsigned char *ptr, *optr;
- u16 hdrflags;
- u16 tunnel_id, session_id;
- int length;
- int offset;
-
- tunnel = pppol2tp_sock_to_tunnel(sock);
- if (tunnel == NULL)
- goto no_tunnel;
-
- if (tunnel->sock && pppol2tp_verify_udp_checksum(tunnel->sock, skb))
- goto discard_bad_csum;
-
- /* UDP always verifies the packet length. */
- __skb_pull(skb, sizeof(struct udphdr));
-
- /* Short packet? */
- if (!pskb_may_pull(skb, 12)) {
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
- goto error;
- }
-
- /* Point to L2TP header */
- optr = ptr = skb->data;
-
- /* Get L2TP header flags */
- hdrflags = ntohs(*(__be16*)ptr);
-
- /* Trace packet contents, if enabled */
- if (tunnel->debug & PPPOL2TP_MSG_DATA) {
- length = min(16u, skb->len);
- if (!pskb_may_pull(skb, length))
- goto error;
-
- printk(KERN_DEBUG "%s: recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
- }
-
- /* Get length of L2TP packet */
- length = skb->len;
-
- /* If type is control packet, it is handled by userspace. */
- if (hdrflags & L2TP_HDRFLAG_T) {
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv control packet, len=%d\n", tunnel->name, length);
- goto error;
- }
-
- /* Skip flags */
- ptr += 2;
-
- /* If length is present, skip it */
- if (hdrflags & L2TP_HDRFLAG_L)
- ptr += 2;
-
- /* Extract tunnel and session ID */
- tunnel_id = ntohs(*(__be16 *) ptr);
- ptr += 2;
- session_id = ntohs(*(__be16 *) ptr);
- ptr += 2;
-
- /* Find the session context */
- session = pppol2tp_session_find(tunnel, session_id);
- if (!session) {
- /* Not found? Pass to userspace to deal with */
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: no socket found (%hu/%hu). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
- goto error;
- }
- sock_hold(session->sock);
-
- /* The ref count on the socket was increased by the above call since
- * we now hold a pointer to the session. Take care to do sock_put()
- * when exiting this function from now on...
- */
-
- /* Handle the optional sequence numbers. If we are the LAC,
- * enable/disable sequence numbers under the control of the LNS. If
- * no sequence numbers present but we were expecting them, discard
- * frame.
- */
- if (hdrflags & L2TP_HDRFLAG_S) {
- u16 ns, nr;
- ns = ntohs(*(__be16 *) ptr);
- ptr += 2;
- nr = ntohs(*(__be16 *) ptr);
- ptr += 2;
-
- /* Received a packet with sequence numbers. If we're the LNS,
- * check if we sre sending sequence numbers and if not,
- * configure it so.
- */
- if ((!session->lns_mode) && (!session->send_seq)) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to enable seq numbers by LNS\n",
- session->name);
- session->send_seq = -1;
- }
-
- /* Store L2TP info in the skb */
- PPPOL2TP_SKB_CB(skb)->ns = ns;
- PPPOL2TP_SKB_CB(skb)->nr = nr;
- PPPOL2TP_SKB_CB(skb)->has_seq = 1;
-
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
- session->name, ns, nr, session->nr);
- } else {
- /* No sequence numbers.
- * If user has configured mandatory sequence numbers, discard.
- */
- if (session->recv_seq) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
- goto discard;
- }
-
- /* If we're the LAC and we're sending sequence numbers, the
- * LNS has requested that we no longer send sequence numbers.
- * If we're the LNS and we're sending sequence numbers, the
- * LAC is broken. Discard the frame.
- */
- if ((!session->lns_mode) && (session->send_seq)) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to disable seq numbers by LNS\n",
- session->name);
- session->send_seq = 0;
- } else if (session->send_seq) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
- goto discard;
- }
-
- /* Store L2TP info in the skb */
- PPPOL2TP_SKB_CB(skb)->has_seq = 0;
- }
-
- /* If offset bit set, skip it. */
- if (hdrflags & L2TP_HDRFLAG_O) {
- offset = ntohs(*(__be16 *)ptr);
- ptr += 2 + offset;
- }
-
- offset = ptr - optr;
- if (!pskb_may_pull(skb, offset))
- goto discard;
-
- __skb_pull(skb, offset);
-
- /* Skip PPP header, if present. In testing, Microsoft L2TP clients
- * don't send the PPP header (PPP header compression enabled), but
- * other clients can include the header. So we cope with both cases
- * here. The PPP header is always FF03 when using L2TP.
- *
- * Note that skb->data[] isn't dereferenced from a u16 ptr here since
- * the field may be unaligned.
- */
- if (!pskb_may_pull(skb, 2))
- goto discard;
-
- if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
- skb_pull(skb, 2);
-
- /* Prepare skb for adding to the session's reorder_q. Hold
- * packets for max reorder_timeout or 1 second if not
- * reordering.
- */
- PPPOL2TP_SKB_CB(skb)->length = length;
- PPPOL2TP_SKB_CB(skb)->expires = jiffies +
- (session->reorder_timeout ? session->reorder_timeout : HZ);
-
- /* Add packet to the session's receive queue. Reordering is done here, if
- * enabled. Saved L2TP protocol info is stored in skb->sb[].
- */
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- if (session->reorder_timeout != 0) {
- /* Packet reordering enabled. Add skb to session's
- * reorder queue, in order of ns.
- */
- pppol2tp_recv_queue_skb(session, skb);
- } else {
- /* Packet reordering disabled. Discard out-of-sequence
- * packets
- */
- if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
- session->stats.rx_seq_discards++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %hu len %d discarded, "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- goto discard;
- }
- skb_queue_tail(&session->reorder_q, skb);
- }
- } else {
- /* No sequence numbers. Add the skb to the tail of the
- * reorder queue. This ensures that it will be
- * delivered after all previous sequenced skbs.
- */
- skb_queue_tail(&session->reorder_q, skb);
- }
-
- /* Try to dequeue as many skbs from reorder_q as we can. */
- pppol2tp_recv_dequeue(session);
- sock_put(sock);
-
- return 0;
-
-discard:
- session->stats.rx_errors++;
- kfree_skb(skb);
- sock_put(session->sock);
- sock_put(sock);
-
- return 0;
-
-discard_bad_csum:
- LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
- UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
- tunnel->stats.rx_errors++;
- kfree_skb(skb);
- sock_put(sock);
-
- return 0;
-
-error:
- /* Put UDP header back */
- __skb_push(skb, sizeof(struct udphdr));
- sock_put(sock);
-
-no_tunnel:
- return 1;
-}
-
-/* UDP encapsulation receive handler. See net/ipv4/udp.c.
- * Return codes:
- * 0 : success.
- * <0: error
- * >0: skb should be passed up to userspace as UDP.
- */
-static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
- struct pppol2tp_tunnel *tunnel;
-
- tunnel = pppol2tp_sock_to_tunnel(sk);
- if (tunnel == NULL)
- goto pass_up;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: received %d bytes\n", tunnel->name, skb->len);
-
- if (pppol2tp_recv_core(sk, skb))
- goto pass_up_put;
-
- sock_put(sk);
- return 0;
-
-pass_up_put:
- sock_put(sk);
-pass_up:
- return 1;
-}
-
-/* Receive message. This is the recvmsg for the PPPoL2TP socket.
- */
-static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len,
- int flags)
-{
- int err;
- struct sk_buff *skb;
- struct sock *sk = sock->sk;
-
- err = -EIO;
- if (sk->sk_state & PPPOX_BOUND)
- goto end;
-
- msg->msg_namelen = 0;
-
- err = 0;
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &err);
- if (!skb)
- goto end;
-
- if (len > skb->len)
- len = skb->len;
- else if (len < skb->len)
- msg->msg_flags |= MSG_TRUNC;
-
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
- if (likely(err == 0))
- err = len;
-
- kfree_skb(skb);
-end:
- return err;
-}
-
-/************************************************************************
- * Transmit handling
- ***********************************************************************/
-
-/* Tell how big L2TP headers are for a particular session. This
- * depends on whether sequence numbers are being used.
- */
-static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
-{
- if (session->send_seq)
- return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
-
- return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
-}
-
-/* Build an L2TP header for the session into the buffer provided.
- */
-static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
- void *buf)
-{
- __be16 *bufp = buf;
- u16 flags = L2TP_HDR_VER;
-
- if (session->send_seq)
- flags |= L2TP_HDRFLAG_S;
-
- /* Setup L2TP header.
- * FIXME: Can this ever be unaligned? Is direct dereferencing of
- * 16-bit header fields safe here for all architectures?
- */
- *bufp++ = htons(flags);
- *bufp++ = htons(session->tunnel_addr.d_tunnel);
- *bufp++ = htons(session->tunnel_addr.d_session);
- if (session->send_seq) {
- *bufp++ = htons(session->ns);
- *bufp++ = 0;
- session->ns++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated ns to %hu\n", session->name, session->ns);
- }
-}
-
-/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
- * when a user application does a sendmsg() on the session socket. L2TP and
- * PPP headers must be inserted into the user's data.
- */
-static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t total_len)
-{
- static const unsigned char ppph[2] = { 0xff, 0x03 };
- struct sock *sk = sock->sk;
- struct inet_sock *inet;
- __wsum csum;
- struct sk_buff *skb;
- int error;
- int hdr_len;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- struct udphdr *uh;
- unsigned int len;
- struct sock *sk_tun;
- u16 udp_len;
-
- error = -ENOTCONN;
- if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
- goto error;
-
- /* Get session and tunnel contexts */
- error = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto error;
-
- sk_tun = session->tunnel_sock;
- tunnel = pppol2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto error_put_sess;
-
- /* What header length is configured for this session? */
- hdr_len = pppol2tp_l2tp_header_len(session);
-
- /* Allocate a socket buffer */
- error = -ENOMEM;
- skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
- sizeof(struct udphdr) + hdr_len +
- sizeof(ppph) + total_len,
- 0, GFP_KERNEL);
- if (!skb)
- goto error_put_sess_tun;
-
- /* Reserve space for headers. */
- skb_reserve(skb, NET_SKB_PAD);
- skb_reset_network_header(skb);
- skb_reserve(skb, sizeof(struct iphdr));
- skb_reset_transport_header(skb);
-
- /* Build UDP header */
- inet = inet_sk(sk_tun);
- udp_len = hdr_len + sizeof(ppph) + total_len;
- uh = (struct udphdr *) skb->data;
- uh->source = inet->inet_sport;
- uh->dest = inet->inet_dport;
- uh->len = htons(udp_len);
- uh->check = 0;
- skb_put(skb, sizeof(struct udphdr));
-
- /* Build L2TP header */
- pppol2tp_build_l2tp_header(session, skb->data);
- skb_put(skb, hdr_len);
-
- /* Add PPP header */
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
- skb_put(skb, 2);
-
- /* Copy user data into skb */
- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
- if (error < 0) {
- kfree_skb(skb);
- goto error_put_sess_tun;
- }
- skb_put(skb, total_len);
-
- /* Calculate UDP checksum if configured to do so */
- if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
- skb->ip_summed = CHECKSUM_NONE;
- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- csum = skb_checksum(skb, 0, udp_len, 0);
- uh->check = csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, 0);
- }
-
- /* Debug */
- if (session->send_seq)
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes, ns=%hu\n", session->name,
- total_len, session->ns - 1);
- else
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes\n", session->name, total_len);
-
- if (session->debug & PPPOL2TP_MSG_DATA) {
- int i;
- unsigned char *datap = skb->data;
-
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < total_len; i++) {
- printk(" %02X", *datap++);
- if (i == 15) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
- }
-
- /* Queue the packet to IP for output */
- len = skb->len;
- error = ip_queue_xmit(skb, 1);
-
- /* Update stats */
- if (error >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
- } else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
- }
-
- return error;
-
-error_put_sess_tun:
- sock_put(session->tunnel_sock);
-error_put_sess:
- sock_put(sk);
-error:
- return error;
-}
-
-/* Automatically called when the skb is freed.
- */
-static void pppol2tp_sock_wfree(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
-/* For data skbs that we transmit, we associate with the tunnel socket
- * but don't do accounting.
- */
-static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
-{
- sock_hold(sk);
- skb->sk = sk;
- skb->destructor = pppol2tp_sock_wfree;
-}
-
-/* Transmit function called by generic PPP driver. Sends PPP frame
- * over PPPoL2TP socket.
- *
- * This is almost the same as pppol2tp_sendmsg(), but rather than
- * being called with a msghdr from userspace, it is called with a skb
- * from the kernel.
- *
- * The supplied skb from ppp doesn't have enough headroom for the
- * insertion of L2TP, UDP and IP headers so we need to allocate more
- * headroom in the skb. This will create a cloned skb. But we must be
- * careful in the error case because the caller will expect to free
- * the skb it supplied, not our cloned skb. So we take care to always
- * leave the original skb unfreed if we return an error.
- */
-static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
-{
- static const u8 ppph[2] = { 0xff, 0x03 };
- struct sock *sk = (struct sock *) chan->private;
- struct sock *sk_tun;
- int hdr_len;
- u16 udp_len;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- int rc;
- int headroom;
- int data_len = skb->len;
- struct inet_sock *inet;
- __wsum csum;
- struct udphdr *uh;
- unsigned int len;
- int old_headroom;
- int new_headroom;
-
- if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
- goto abort;
-
- /* Get session and tunnel contexts from the socket */
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto abort;
-
- sk_tun = session->tunnel_sock;
- if (sk_tun == NULL)
- goto abort_put_sess;
- tunnel = pppol2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto abort_put_sess;
-
- /* What header length is configured for this session? */
- hdr_len = pppol2tp_l2tp_header_len(session);
-
- /* Check that there's enough headroom in the skb to insert IP,
- * UDP and L2TP and PPP headers. If not enough, expand it to
- * make room. Adjust truesize.
- */
- headroom = NET_SKB_PAD + sizeof(struct iphdr) +
- sizeof(struct udphdr) + hdr_len + sizeof(ppph);
- old_headroom = skb_headroom(skb);
- if (skb_cow_head(skb, headroom))
- goto abort_put_sess_tun;
-
- new_headroom = skb_headroom(skb);
- skb_orphan(skb);
- skb->truesize += new_headroom - old_headroom;
-
- /* Setup PPP header */
- __skb_push(skb, sizeof(ppph));
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
-
- /* Setup L2TP header */
- pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
-
- udp_len = sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len;
-
- /* Setup UDP header */
- inet = inet_sk(sk_tun);
- __skb_push(skb, sizeof(*uh));
- skb_reset_transport_header(skb);
- uh = udp_hdr(skb);
- uh->source = inet->inet_sport;
- uh->dest = inet->inet_dport;
- uh->len = htons(udp_len);
- uh->check = 0;
-
- /* Debug */
- if (session->send_seq)
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %d bytes, ns=%hu\n", session->name,
- data_len, session->ns - 1);
- else
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %d bytes\n", session->name, data_len);
-
- if (session->debug & PPPOL2TP_MSG_DATA) {
- int i;
- unsigned char *datap = skb->data;
-
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < data_len; i++) {
- printk(" %02X", *datap++);
- if (i == 31) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
- }
-
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
- IPSKB_REROUTED);
- nf_reset(skb);
-
- /* Get routing info from the tunnel socket */
- skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun)));
- pppol2tp_skb_set_owner_w(skb, sk_tun);
-
- /* Calculate UDP checksum if configured to do so */
- if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
- skb->ip_summed = CHECKSUM_NONE;
- else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
- (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- csum = skb_checksum(skb, 0, udp_len, 0);
- uh->check = csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, 0);
- }
-
- /* Queue the packet to IP for output */
- len = skb->len;
- rc = ip_queue_xmit(skb, 1);
-
- /* Update stats */
- if (rc >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
- } else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
- }
-
- sock_put(sk_tun);
- sock_put(sk);
- return 1;
-
-abort_put_sess_tun:
- sock_put(sk_tun);
-abort_put_sess:
- sock_put(sk);
-abort:
- /* Free the original skb */
- kfree_skb(skb);
- return 1;
-}
-
-/*****************************************************************************
- * Session (and tunnel control) socket create/destroy.
- *****************************************************************************/
-
-/* When the tunnel UDP socket is closed, all the attached sockets need to go
- * too.
- */
-static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
-{
- int hash;
- struct hlist_node *walk;
- struct hlist_node *tmp;
- struct pppol2tp_session *session;
- struct sock *sk;
-
- BUG_ON(tunnel == NULL);
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing all sessions...\n", tunnel->name);
-
- write_lock_bh(&tunnel->hlist_lock);
- for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
-again:
- hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
- struct sk_buff *skb;
-
- session = hlist_entry(walk, struct pppol2tp_session, hlist);
-
- sk = session->sock;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing session\n", session->name);
-
- hlist_del_init(&session->hlist);
-
- /* Since we should hold the sock lock while
- * doing any unbinding, we need to release the
- * lock we're holding before taking that lock.
- * Hold a reference to the sock so it doesn't
- * disappear as we're jumping between locks.
- */
- sock_hold(sk);
- write_unlock_bh(&tunnel->hlist_lock);
- lock_sock(sk);
-
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
- pppox_unbind_sock(sk);
- sk->sk_state = PPPOX_DEAD;
- sk->sk_state_change(sk);
- }
-
- /* Purge any queued data */
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
- while ((skb = skb_dequeue(&session->reorder_q))) {
- kfree_skb(skb);
- sock_put(sk);
- }
-
- release_sock(sk);
- sock_put(sk);
-
- /* Now restart from the beginning of this hash
- * chain. We always remove a session from the
- * list so we are guaranteed to make forward
- * progress.
- */
- write_lock_bh(&tunnel->hlist_lock);
- goto again;
- }
- }
- write_unlock_bh(&tunnel->hlist_lock);
-}
-
-/* Really kill the tunnel.
- * Come here only when all sessions have been cleared from the tunnel.
- */
-static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
-{
- struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
-
- /* Remove from socket list */
- write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_del_init(&tunnel->list);
- write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- atomic_dec(&pppol2tp_tunnel_count);
- kfree(tunnel);
-}
-
-/* Tunnel UDP socket destruct hook.
- * The tunnel context is deleted only when all session sockets have been
- * closed.
- */
-static void pppol2tp_tunnel_destruct(struct sock *sk)
-{
- struct pppol2tp_tunnel *tunnel;
-
- tunnel = sk->sk_user_data;
- if (tunnel == NULL)
- goto end;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing...\n", tunnel->name);
-
- /* Close all sessions */
- pppol2tp_tunnel_closeall(tunnel);
-
- /* No longer an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = 0;
- (udp_sk(sk))->encap_rcv = NULL;
-
- /* Remove hooks into tunnel socket */
- tunnel->sock = NULL;
- sk->sk_destruct = tunnel->old_sk_destruct;
- sk->sk_user_data = NULL;
-
- /* Call original (UDP) socket descructor */
- if (sk->sk_destruct != NULL)
- (*sk->sk_destruct)(sk);
-
- pppol2tp_tunnel_dec_refcount(tunnel);
-
-end:
- return;
-}
-
-/* Really kill the session socket. (Called from sock_put() if
- * refcnt == 0.)
- */
-static void pppol2tp_session_destruct(struct sock *sk)
-{
- struct pppol2tp_session *session = NULL;
-
- if (sk->sk_user_data != NULL) {
- struct pppol2tp_tunnel *tunnel;
-
- session = sk->sk_user_data;
- if (session == NULL)
- goto out;
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-
- /* Don't use pppol2tp_sock_to_tunnel() here to
- * get the tunnel context because the tunnel
- * socket might have already been closed (its
- * sk->sk_user_data will be NULL) so use the
- * session's private tunnel ptr instead.
- */
- tunnel = session->tunnel;
- if (tunnel != NULL) {
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
- /* If session_id is zero, this is a null
- * session context, which was created for a
- * socket that is being used only to manage
- * tunnels.
- */
- if (session->tunnel_addr.s_session != 0) {
- /* Delete the session socket from the
- * hash
- */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_del_init(&session->hlist);
- write_unlock_bh(&tunnel->hlist_lock);
-
- atomic_dec(&pppol2tp_session_count);
- }
-
- /* This will delete the tunnel context if this
- * is the last session on the tunnel.
- */
- session->tunnel = NULL;
- session->tunnel_sock = NULL;
- pppol2tp_tunnel_dec_refcount(tunnel);
- }
- }
-
- kfree(session);
-out:
- return;
-}
-
-/* Called when the PPPoX socket (session) is closed.
- */
-static int pppol2tp_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session;
- int error;
-
- if (!sk)
- return 0;
-
- error = -EBADF;
- lock_sock(sk);
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto error;
-
- pppox_unbind_sock(sk);
-
- /* Signal the death of the socket. */
- sk->sk_state = PPPOX_DEAD;
- sock_orphan(sk);
- sock->sk = NULL;
-
- session = pppol2tp_sock_to_session(sk);
-
- /* Purge any queued data */
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
- if (session != NULL) {
- struct sk_buff *skb;
- while ((skb = skb_dequeue(&session->reorder_q))) {
- kfree_skb(skb);
- sock_put(sk);
- }
- sock_put(sk);
- }
-
- release_sock(sk);
-
- /* This will delete the session context via
- * pppol2tp_session_destruct() if the socket's refcnt drops to
- * zero.
- */
- sock_put(sk);
-
- return 0;
-
-error:
- release_sock(sk);
- return error;
-}
-
-/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
- * sockets attached to it.
- */
-static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
- int fd, u16 tunnel_id, int *error)
-{
- int err;
- struct socket *sock = NULL;
- struct sock *sk;
- struct pppol2tp_tunnel *tunnel;
- struct pppol2tp_net *pn;
- struct sock *ret = NULL;
-
- /* Get the tunnel UDP socket from the fd, which was opened by
- * the userspace L2TP daemon.
- */
- err = -EBADF;
- sock = sockfd_lookup(fd, &err);
- if (!sock) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
- tunnel_id, fd, err);
- goto err;
- }
-
- sk = sock->sk;
-
- /* Quick sanity checks */
- err = -EPROTONOSUPPORT;
- if (sk->sk_protocol != IPPROTO_UDP) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
- tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
- goto err;
- }
- err = -EAFNOSUPPORT;
- if (sock->ops->family != AF_INET) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: fd %d wrong family, got %d, expected %d\n",
- tunnel_id, fd, sock->ops->family, AF_INET);
- goto err;
- }
-
- err = -ENOTCONN;
-
- /* Check if this socket has already been prepped */
- tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
- if (tunnel != NULL) {
- /* User-data field already set */
- err = -EBUSY;
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
- /* This socket has already been prepped */
- ret = tunnel->sock;
- goto out;
- }
-
- /* This socket is available and needs prepping. Create a new tunnel
- * context and init it.
- */
- sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
- if (sk->sk_user_data == NULL) {
- err = -ENOMEM;
- goto err;
- }
-
- tunnel->magic = L2TP_TUNNEL_MAGIC;
- sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
-
- tunnel->stats.tunnel_id = tunnel_id;
- tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
-
- /* Hook on the tunnel socket destructor so that we can cleanup
- * if the tunnel socket goes away.
- */
- tunnel->old_sk_destruct = sk->sk_destruct;
- sk->sk_destruct = pppol2tp_tunnel_destruct;
-
- tunnel->sock = sk;
- sk->sk_allocation = GFP_ATOMIC;
-
- /* Misc init */
- rwlock_init(&tunnel->hlist_lock);
-
- /* The net we belong to */
- tunnel->pppol2tp_net = net;
- pn = pppol2tp_pernet(net);
-
- /* Add tunnel to our list */
- INIT_LIST_HEAD(&tunnel->list);
- write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
- write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
- atomic_inc(&pppol2tp_tunnel_count);
-
- /* Bump the reference count. The tunnel context is deleted
- * only when this drops to zero.
- */
- pppol2tp_tunnel_inc_refcount(tunnel);
-
- /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
- (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
-
- ret = tunnel->sock;
-
- *error = 0;
-out:
- if (sock)
- sockfd_put(sock);
-
- return ret;
-
-err:
- *error = err;
- goto out;
-}
-
-static struct proto pppol2tp_sk_proto = {
- .name = "PPPOL2TP",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct pppox_sock),
-};
-
-/* socket() handler. Initialize a new struct sock.
- */
-static int pppol2tp_create(struct net *net, struct socket *sock)
-{
- int error = -ENOMEM;
- struct sock *sk;
-
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
- if (!sk)
- goto out;
-
- sock_init_data(sock, sk);
-
- sock->state = SS_UNCONNECTED;
- sock->ops = &pppol2tp_ops;
-
- sk->sk_backlog_rcv = pppol2tp_recv_core;
- sk->sk_protocol = PX_PROTO_OL2TP;
- sk->sk_family = PF_PPPOX;
- sk->sk_state = PPPOX_NONE;
- sk->sk_type = SOCK_STREAM;
- sk->sk_destruct = pppol2tp_session_destruct;
-
- error = 0;
-
-out:
- return error;
-}
-
-/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
- */
-static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
- int sockaddr_len, int flags)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
- struct pppox_sock *po = pppox_sk(sk);
- struct sock *tunnel_sock = NULL;
- struct pppol2tp_session *session = NULL;
- struct pppol2tp_tunnel *tunnel;
- struct dst_entry *dst;
- int error = 0;
-
- lock_sock(sk);
-
- error = -EINVAL;
- if (sp->sa_protocol != PX_PROTO_OL2TP)
- goto end;
-
- /* Check for already bound sockets */
- error = -EBUSY;
- if (sk->sk_state & PPPOX_CONNECTED)
- goto end;
-
- /* We don't supporting rebinding anyway */
- error = -EALREADY;
- if (sk->sk_user_data)
- goto end; /* socket is already attached */
-
- /* Don't bind if s_tunnel is 0 */
- error = -EINVAL;
- if (sp->pppol2tp.s_tunnel == 0)
- goto end;
-
- /* Special case: prepare tunnel socket if s_session and
- * d_session is 0. Otherwise look up tunnel using supplied
- * tunnel id.
- */
- if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
- tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
- sp->pppol2tp.fd,
- sp->pppol2tp.s_tunnel,
- &error);
- if (tunnel_sock == NULL)
- goto end;
-
- sock_hold(tunnel_sock);
- tunnel = tunnel_sock->sk_user_data;
- } else {
- tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
-
- /* Error if we can't find the tunnel */
- error = -ENOENT;
- if (tunnel == NULL)
- goto end;
-
- tunnel_sock = tunnel->sock;
- }
-
- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
- if (session != NULL)
- goto end;
-
- /* Allocate and initialize a new session context. */
- session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
- if (session == NULL) {
- error = -ENOMEM;
- goto end;
- }
-
- skb_queue_head_init(&session->reorder_q);
-
- session->magic = L2TP_SESSION_MAGIC;
- session->owner = current->pid;
- session->sock = sk;
- session->tunnel = tunnel;
- session->tunnel_sock = tunnel_sock;
- session->tunnel_addr = sp->pppol2tp;
- sprintf(&session->name[0], "sess %hu/%hu",
- session->tunnel_addr.s_tunnel,
- session->tunnel_addr.s_session);
-
- session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
- session->stats.session_id = session->tunnel_addr.s_session;
-
- INIT_HLIST_NODE(&session->hlist);
-
- /* Inherit debug options from tunnel */
- session->debug = tunnel->debug;
-
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
- */
- session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
-
- /* If PMTU discovery was enabled, use the MTU that was discovered */
- dst = sk_dst_get(sk);
- if (dst != NULL) {
- u32 pmtu = dst_mtu(__sk_dst_get(sk));
- if (pmtu != 0)
- session->mtu = session->mru = pmtu -
- PPPOL2TP_HEADER_OVERHEAD;
- dst_release(dst);
- }
-
- /* Special case: if source & dest session_id == 0x0000, this socket is
- * being created to manage the tunnel. Don't add the session to the
- * session hash list, just set up the internal context for use by
- * ioctl() and sockopt() handlers.
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- error = 0;
- sk->sk_user_data = session;
- goto out_no_ppp;
- }
-
- /* Get tunnel context from the tunnel socket */
- tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
- if (tunnel == NULL) {
- error = -EBADF;
- goto end;
- }
-
- /* Right now, because we don't have a way to push the incoming skb's
- * straight through the UDP layer, the only header we need to worry
- * about is the L2TP header. This size is different depending on
- * whether sequence numbers are enabled for the data channel.
- */
- po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
-
- po->chan.private = sk;
- po->chan.ops = &pppol2tp_chan_ops;
- po->chan.mtu = session->mtu;
-
- error = ppp_register_net_channel(sock_net(sk), &po->chan);
- if (error)
- goto end_put_tun;
-
- /* This is how we get the session context from the socket. */
- sk->sk_user_data = session;
-
- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- pppol2tp_session_id_hash(tunnel,
- session->tunnel_addr.s_session));
- write_unlock_bh(&tunnel->hlist_lock);
-
- atomic_inc(&pppol2tp_session_count);
-
-out_no_ppp:
- pppol2tp_tunnel_inc_refcount(tunnel);
- sk->sk_state = PPPOX_CONNECTED;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: created\n", session->name);
-
-end_put_tun:
- sock_put(tunnel_sock);
-end:
- release_sock(sk);
-
- if (error != 0) {
- if (session)
- PRINTK(session->debug,
- PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "%s: connect failed: %d\n",
- session->name, error);
- else
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "connect failed: %d\n", error);
- }
-
- return error;
-}
-
-/* getname() support.
- */
-static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
- int *usockaddr_len, int peer)
-{
- int len = sizeof(struct sockaddr_pppol2tp);
- struct sockaddr_pppol2tp sp;
- int error = 0;
- struct pppol2tp_session *session;
-
- error = -ENOTCONN;
- if (sock->sk->sk_state != PPPOX_CONNECTED)
- goto end;
-
- session = pppol2tp_sock_to_session(sock->sk);
- if (session == NULL) {
- error = -EBADF;
- goto end;
- }
-
- sp.sa_family = AF_PPPOX;
- sp.sa_protocol = PX_PROTO_OL2TP;
- memcpy(&sp.pppol2tp, &session->tunnel_addr,
- sizeof(struct pppol2tp_addr));
-
- memcpy(uaddr, &sp, len);
-
- *usockaddr_len = len;
-
- error = 0;
- sock_put(sock->sk);
-
-end:
- return error;
-}
-
-/****************************************************************************
- * ioctl() handlers.
- *
- * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
- * sockets. However, in order to control kernel tunnel features, we allow
- * userspace to create a special "tunnel" PPPoX socket which is used for
- * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
- * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
- * calls.
- ****************************************************************************/
-
-/* Session ioctl helper.
- */
-static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
- unsigned int cmd, unsigned long arg)
-{
- struct ifreq ifr;
- int err = 0;
- struct sock *sk = session->sock;
- int val = (int) arg;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
- session->name, cmd, arg);
-
- sock_hold(sk);
-
- switch (cmd) {
- case SIOCGIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
- ifr.ifr_mtu = session->mtu;
- if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mtu=%d\n", session->name, session->mtu);
- err = 0;
- break;
-
- case SIOCSIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
-
- session->mtu = ifr.ifr_mtu;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mtu=%d\n", session->name, session->mtu);
- err = 0;
- break;
-
- case PPPIOCGMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (put_user(session->mru, (int __user *) arg))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mru=%d\n", session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCSMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (get_user(val,(int __user *) arg))
- break;
-
- session->mru = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mru=%d\n", session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCGFLAGS:
- err = -EFAULT;
- if (put_user(session->flags, (int __user *) arg))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get flags=%d\n", session->name, session->flags);
- err = 0;
- break;
-
- case PPPIOCSFLAGS:
- err = -EFAULT;
- if (get_user(val, (int __user *) arg))
- break;
- session->flags = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set flags=%d\n", session->name, session->flags);
- err = 0;
- break;
-
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_to_user((void __user *) arg, &session->stats,
- sizeof(session->stats)))
- break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", session->name);
- err = 0;
- break;
-
- default:
- err = -ENOSYS;
- break;
- }
-
- sock_put(sk);
-
- return err;
-}
-
-/* Tunnel ioctl helper.
- *
- * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
- * specifies a session_id, the session ioctl handler is called. This allows an
- * application to retrieve session stats via a tunnel socket.
- */
-static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
- unsigned int cmd, unsigned long arg)
-{
- int err = 0;
- struct sock *sk = tunnel->sock;
- struct pppol2tp_ioc_stats stats_req;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
- cmd, arg);
-
- sock_hold(sk);
-
- switch (cmd) {
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_from_user(&stats_req, (void __user *) arg,
- sizeof(stats_req))) {
- err = -EFAULT;
- break;
- }
- if (stats_req.session_id != 0) {
- /* resend to session ioctl handler */
- struct pppol2tp_session *session =
- pppol2tp_session_find(tunnel, stats_req.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
- err = -EBADR;
- break;
- }
-#ifdef CONFIG_XFRM
- tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
-#endif
- if (copy_to_user((void __user *) arg, &tunnel->stats,
- sizeof(tunnel->stats))) {
- err = -EFAULT;
- break;
- }
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", tunnel->name);
- err = 0;
- break;
-
- default:
- err = -ENOSYS;
- break;
- }
-
- sock_put(sk);
-
- return err;
-}
-
-/* Main ioctl() handler.
- * Dispatch to tunnel or session helpers depending on the socket.
- */
-static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- int err;
-
- if (!sk)
- return 0;
-
- err = -EBADF;
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto end;
-
- err = -ENOTCONN;
- if ((sk->sk_user_data == NULL) ||
- (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session's session_id is zero, treat ioctl as a
- * tunnel ioctl
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
- sock_put(session->tunnel_sock);
- goto end_put_sess;
- }
-
- err = pppol2tp_session_ioctl(session, cmd, arg);
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/*****************************************************************************
- * setsockopt() / getsockopt() support.
- *
- * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
- * sockets. In order to control kernel tunnel features, we allow userspace to
- * create a special "tunnel" PPPoX socket which is used for control only.
- * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
- * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
- *****************************************************************************/
-
-/* Tunnel setsockopt() helper.
- */
-static int pppol2tp_tunnel_setsockopt(struct sock *sk,
- struct pppol2tp_tunnel *tunnel,
- int optname, int val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_DEBUG:
- tunnel->debug = val;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", tunnel->name, tunnel->debug);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Session setsockopt helper.
- */
-static int pppol2tp_session_setsockopt(struct sock *sk,
- struct pppol2tp_session *session,
- int optname, int val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_RECVSEQ:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->recv_seq = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set recv_seq=%d\n", session->name,
- session->recv_seq);
- break;
-
- case PPPOL2TP_SO_SENDSEQ:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->send_seq = val ? -1 : 0;
- {
- struct sock *ssk = session->sock;
- struct pppox_sock *po = pppox_sk(ssk);
- po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
- PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
- }
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set send_seq=%d\n", session->name, session->send_seq);
- break;
-
- case PPPOL2TP_SO_LNSMODE:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->lns_mode = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set lns_mode=%d\n", session->name,
- session->lns_mode);
- break;
-
- case PPPOL2TP_SO_DEBUG:
- session->debug = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", session->name, session->debug);
- break;
-
- case PPPOL2TP_SO_REORDERTO:
- session->reorder_timeout = msecs_to_jiffies(val);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set reorder_timeout=%d\n", session->name,
- session->reorder_timeout);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Main setsockopt() entry point.
- * Does API checks, then calls either the tunnel or session setsockopt
- * handler, according to whether the PPPoL2TP socket is a for a regular
- * session or the special tunnel type.
- */
-static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session = sk->sk_user_data;
- struct pppol2tp_tunnel *tunnel;
- int val;
- int err;
-
- if (level != SOL_PPPOL2TP)
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (get_user(val, (int __user *)optval))
- return -EFAULT;
-
- err = -ENOTCONN;
- if (sk->sk_user_data == NULL)
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session_id == 0x0000, treat as operation on tunnel
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
- sock_put(session->tunnel_sock);
- } else
- err = pppol2tp_session_setsockopt(sk, session, optname, val);
-
- err = 0;
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/* Tunnel getsockopt helper. Called with sock locked.
- */
-static int pppol2tp_tunnel_getsockopt(struct sock *sk,
- struct pppol2tp_tunnel *tunnel,
- int optname, int *val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_DEBUG:
- *val = tunnel->debug;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%x\n", tunnel->name, tunnel->debug);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Session getsockopt helper. Called with sock locked.
- */
-static int pppol2tp_session_getsockopt(struct sock *sk,
- struct pppol2tp_session *session,
- int optname, int *val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_RECVSEQ:
- *val = session->recv_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get recv_seq=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_SENDSEQ:
- *val = session->send_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get send_seq=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_LNSMODE:
- *val = session->lns_mode;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get lns_mode=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_DEBUG:
- *val = session->debug;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_REORDERTO:
- *val = (int) jiffies_to_msecs(session->reorder_timeout);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get reorder_timeout=%d\n", session->name, *val);
- break;
-
- default:
- err = -ENOPROTOOPT;
- }
-
- return err;
-}
-
-/* Main getsockopt() entry point.
- * Does API checks, then calls either the tunnel or session getsockopt
- * handler, according to whether the PPPoX socket is a for a regular session
- * or the special tunnel type.
- */
-static int pppol2tp_getsockopt(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session = sk->sk_user_data;
- struct pppol2tp_tunnel *tunnel;
- int val, len;
- int err;
-
- if (level != SOL_PPPOL2TP)
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
-
- if (get_user(len, (int __user *) optlen))
- return -EFAULT;
-
- len = min_t(unsigned int, len, sizeof(int));
-
- if (len < 0)
- return -EINVAL;
-
- err = -ENOTCONN;
- if (sk->sk_user_data == NULL)
- goto end;
-
- /* Get the session context */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session_id == 0x0000, treat as operation on tunnel */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
- sock_put(session->tunnel_sock);
- } else
- err = pppol2tp_session_getsockopt(sk, session, optname, &val);
-
- err = -EFAULT;
- if (put_user(len, (int __user *) optlen))
- goto end_put_sess;
-
- if (copy_to_user((void __user *) optval, &val, len))
- goto end_put_sess;
-
- err = 0;
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/*****************************************************************************
- * /proc filesystem for debug
- *****************************************************************************/
-
-#ifdef CONFIG_PROC_FS
-
-#include <linux/seq_file.h>
-
-struct pppol2tp_seq_data {
- struct seq_net_private p;
- struct pppol2tp_tunnel *tunnel; /* current tunnel */
- struct pppol2tp_session *session; /* NULL means get first session in tunnel */
-};
-
-static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
-{
- struct pppol2tp_session *session = NULL;
- struct hlist_node *walk;
- int found = 0;
- int next = 0;
- int i;
-
- read_lock_bh(&tunnel->hlist_lock);
- for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
- hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
- if (curr == NULL) {
- found = 1;
- goto out;
- }
- if (session == curr) {
- next = 1;
- continue;
- }
- if (next) {
- found = 1;
- goto out;
- }
- }
- }
-out:
- read_unlock_bh(&tunnel->hlist_lock);
- if (!found)
- session = NULL;
-
- return session;
-}
-
-static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
- struct pppol2tp_tunnel *curr)
-{
- struct pppol2tp_tunnel *tunnel = NULL;
-
- read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
- goto out;
- }
- tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
-out:
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- return tunnel;
-}
-
-static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
-{
- struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
- struct pppol2tp_net *pn;
- loff_t pos = *offs;
-
- if (!pos)
- goto out;
-
- BUG_ON(m->private == NULL);
- pd = m->private;
- pn = pppol2tp_pernet(seq_file_net(m));
-
- if (pd->tunnel == NULL) {
- if (!list_empty(&pn->pppol2tp_tunnel_list))
- pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
- } else {
- pd->session = next_session(pd->tunnel, pd->session);
- if (pd->session == NULL) {
- pd->tunnel = next_tunnel(pn, pd->tunnel);
- }
- }
-
- /* NULL tunnel and session indicates end of list */
- if ((pd->tunnel == NULL) && (pd->session == NULL))
- pd = NULL;
-
-out:
- return pd;
-}
-
-static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
-{
- (*pos)++;
- return NULL;
-}
-
-static void pppol2tp_seq_stop(struct seq_file *p, void *v)
-{
- /* nothing to do */
-}
-
-static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_tunnel *tunnel = v;
-
- seq_printf(m, "\nTUNNEL '%s', %c %d\n",
- tunnel->name,
- (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
- atomic_read(&tunnel->ref_count) - 1);
- seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
- tunnel->debug,
- (unsigned long long)tunnel->stats.tx_packets,
- (unsigned long long)tunnel->stats.tx_bytes,
- (unsigned long long)tunnel->stats.tx_errors,
- (unsigned long long)tunnel->stats.rx_packets,
- (unsigned long long)tunnel->stats.rx_bytes,
- (unsigned long long)tunnel->stats.rx_errors);
-}
-
-static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_session *session = v;
-
- seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
- "%04X/%04X %d %c\n",
- session->name,
- ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
- ntohs(session->tunnel_addr.addr.sin_port),
- session->tunnel_addr.s_tunnel,
- session->tunnel_addr.s_session,
- session->tunnel_addr.d_tunnel,
- session->tunnel_addr.d_session,
- session->sock->sk_state,
- (session == session->sock->sk_user_data) ?
- 'Y' : 'N');
- seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
- session->mtu, session->mru,
- session->recv_seq ? 'R' : '-',
- session->send_seq ? 'S' : '-',
- session->lns_mode ? "LNS" : "LAC",
- session->debug,
- jiffies_to_msecs(session->reorder_timeout));
- seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
- session->nr, session->ns,
- (unsigned long long)session->stats.tx_packets,
- (unsigned long long)session->stats.tx_bytes,
- (unsigned long long)session->stats.tx_errors,
- (unsigned long long)session->stats.rx_packets,
- (unsigned long long)session->stats.rx_bytes,
- (unsigned long long)session->stats.rx_errors);
-}
-
-static int pppol2tp_seq_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_seq_data *pd = v;
-
- /* display header on line 1 */
- if (v == SEQ_START_TOKEN) {
- seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
- seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
- seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
- seq_puts(m, " SESSION name, addr/port src-tid/sid "
- "dest-tid/sid state user-data-ok\n");
- seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
- seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
- goto out;
- }
-
- /* Show the tunnel or session context.
- */
- if (pd->session == NULL)
- pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
- pppol2tp_seq_session_show(m, pd->session);
-
-out:
- return 0;
-}
-
-static const struct seq_operations pppol2tp_seq_ops = {
- .start = pppol2tp_seq_start,
- .next = pppol2tp_seq_next,
- .stop = pppol2tp_seq_stop,
- .show = pppol2tp_seq_show,
-};
-
-/* Called when our /proc file is opened. We allocate data for use when
- * iterating our tunnel / session contexts and store it in the private
- * data of the seq_file.
- */
-static int pppol2tp_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &pppol2tp_seq_ops,
- sizeof(struct pppol2tp_seq_data));
-}
-
-static const struct file_operations pppol2tp_proc_fops = {
- .owner = THIS_MODULE,
- .open = pppol2tp_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-#endif /* CONFIG_PROC_FS */
-
-/*****************************************************************************
- * Init and cleanup
- *****************************************************************************/
-
-static const struct proto_ops pppol2tp_ops = {
- .family = AF_PPPOX,
- .owner = THIS_MODULE,
- .release = pppol2tp_release,
- .bind = sock_no_bind,
- .connect = pppol2tp_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = pppol2tp_getname,
- .poll = datagram_poll,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = pppol2tp_setsockopt,
- .getsockopt = pppol2tp_getsockopt,
- .sendmsg = pppol2tp_sendmsg,
- .recvmsg = pppol2tp_recvmsg,
- .mmap = sock_no_mmap,
- .ioctl = pppox_ioctl,
-};
-
-static struct pppox_proto pppol2tp_proto = {
- .create = pppol2tp_create,
- .ioctl = pppol2tp_ioctl
-};
-
-static __net_init int pppol2tp_init_net(struct net *net)
-{
- struct pppol2tp_net *pn = pppol2tp_pernet(net);
- struct proc_dir_entry *pde;
-
- INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
- rwlock_init(&pn->pppol2tp_tunnel_list_lock);
-
- pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
-#ifdef CONFIG_PROC_FS
- if (!pde)
- return -ENOMEM;
-#endif
-
- return 0;
-}
-
-static __net_exit void pppol2tp_exit_net(struct net *net)
-{
- proc_net_remove(net, "pppol2tp");
-}
-
-static struct pernet_operations pppol2tp_net_ops = {
- .init = pppol2tp_init_net,
- .exit = pppol2tp_exit_net,
- .id = &pppol2tp_net_id,
- .size = sizeof(struct pppol2tp_net),
-};
-
-static int __init pppol2tp_init(void)
-{
- int err;
-
- err = proto_register(&pppol2tp_sk_proto, 0);
- if (err)
- goto out;
- err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
- if (err)
- goto out_unregister_pppol2tp_proto;
-
- err = register_pernet_device(&pppol2tp_net_ops);
- if (err)
- goto out_unregister_pppox_proto;
-
- printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
- PPPOL2TP_DRV_VERSION);
-
-out:
- return err;
-out_unregister_pppox_proto:
- unregister_pppox_proto(PX_PROTO_OL2TP);
-out_unregister_pppol2tp_proto:
- proto_unregister(&pppol2tp_sk_proto);
- goto out;
-}
-
-static void __exit pppol2tp_exit(void)
-{
- unregister_pppox_proto(PX_PROTO_OL2TP);
- unregister_pernet_device(&pppol2tp_net_ops);
- proto_unregister(&pppol2tp_sk_proto);
-}
-
-module_init(pppol2tp_init);
-module_exit(pppol2tp_exit);
-
-MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>, "
- "James Chapman <jchapman@katalix.com>");
-MODULE_DESCRIPTION("PPP over L2TP over UDP");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 5bf229bb34c2..87d6b8f36304 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -327,7 +327,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
- dev_info(ctodev(card), "%s: ERROR status \n", __func__);
+ dev_info(ctodev(card), "%s: ERROR status\n", __func__);
/* we need to round up the buffer size to a multiple of 128 */
bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
@@ -547,7 +547,7 @@ out:
void gelic_net_set_multi(struct net_device *netdev)
{
struct gelic_card *card = netdev_card(netdev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
unsigned int i;
uint8_t *p;
u64 addr;
@@ -581,9 +581,9 @@ void gelic_net_set_multi(struct net_device *netdev)
}
/* set multicast addresses */
- netdev_for_each_mc_addr(mc, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
addr = 0;
- p = mc->dmi_addr;
+ p = ha->addr;
for (i = 0; i < ETH_ALEN; i++) {
addr <<= 8;
addr |= *p++;
@@ -903,9 +903,6 @@ int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
gelic_descr_release_tx(card, descr->next);
card->tx_chain.tail = descr->next->next;
dev_info(ctodev(card), "%s: kick failure\n", __func__);
- } else {
- /* OK, DMA started/reserved */
- netdev->trans_start = jiffies;
}
spin_unlock_irqrestore(&card->tx_lock, flags);
@@ -1435,7 +1432,7 @@ static void gelic_net_tx_timeout_task(struct work_struct *work)
container_of(work, struct gelic_card, tx_timeout_task);
struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
- dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__);
+ dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__);
if (!(netdev->flags & IFF_UP))
goto out;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index f0be507e5324..084f21d89f28 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -96,7 +96,7 @@ static inline int precise_ie(void)
* post_eurus_cmd helpers
*/
struct eurus_cmd_arg_info {
- int pre_arg; /* command requres arg1, arg2 at POST COMMAND */
+ int pre_arg; /* command requires arg1, arg2 at POST COMMAND */
int post_arg; /* command requires arg1, arg2 at GET_RESULT */
};
@@ -528,7 +528,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
u8 item_len;
u8 item_id;
- pr_debug("%s: data=%p len=%ld \n", __func__,
+ pr_debug("%s: data=%p len=%ld\n", __func__,
data, len);
memset(ie_info, 0, sizeof(struct ie_info));
@@ -979,7 +979,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
pr_debug("%s: essid = '%s'\n", __func__, extra);
set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
} else {
- pr_debug("%s: ESSID any \n", __func__);
+ pr_debug("%s: ESSID any\n", __func__);
clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
}
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -987,7 +987,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
gelic_wl_try_associate(netdev); /* FIXME */
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return 0;
}
@@ -998,7 +998,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
unsigned long irqflag;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
mutex_lock(&wl->assoc_stat_lock);
spin_lock_irqsave(&wl->lock, irqflag);
if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
@@ -1011,7 +1011,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
mutex_unlock(&wl->assoc_stat_lock);
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> len=%d \n", __func__, data->essid.length);
+ pr_debug("%s: -> len=%d\n", __func__, data->essid.length);
return 0;
}
@@ -1028,7 +1028,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
int key_index, index_specified;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
flags = enc->flags & IW_ENCODE_FLAGS;
key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1087,7 +1087,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
done:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
@@ -1101,7 +1101,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
unsigned int key_index, index_specified;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
key_index = enc->flags & IW_ENCODE_INDEX;
pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
enc->flags, enc->pointer, enc->length, extra);
@@ -1215,7 +1215,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
int key_index;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
flags = enc->flags & IW_ENCODE_FLAGS;
alg = ext->alg;
key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1288,7 +1288,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
}
done:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
@@ -1304,7 +1304,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
int ret = 0;
int max_key_len;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
max_key_len = enc->length - sizeof(struct iw_encode_ext);
if (max_key_len < 0)
@@ -1359,7 +1359,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
}
out:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
/* SIOC{S,G}IWMODE */
@@ -1370,7 +1370,7 @@ static int gelic_wl_set_mode(struct net_device *netdev,
__u32 mode = data->mode;
int ret;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
if (mode == IW_MODE_INFRA)
ret = 0;
else
@@ -1384,7 +1384,7 @@ static int gelic_wl_get_mode(struct net_device *netdev,
union iwreq_data *data, char *extra)
{
__u32 *mode = &data->mode;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
*mode = IW_MODE_INFRA;
pr_debug("%s: ->\n", __func__);
return 0;
@@ -2022,7 +2022,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
if (!rc) {
/* timeouted. Maybe key or cyrpt mode is wrong */
- pr_info("%s: connect timeout \n", __func__);
+ pr_info("%s: connect timeout\n", __func__);
cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
NULL, 0);
kfree(cmd);
@@ -2063,7 +2063,7 @@ static void gelic_wl_connected_event(struct gelic_wl_info *wl,
}
if (desired_event == event) {
- pr_debug("%s: completed \n", __func__);
+ pr_debug("%s: completed\n", __func__);
complete(&wl->assoc_done);
netif_carrier_on(port_to_netdev(wl_port(wl)));
} else
@@ -2280,26 +2280,25 @@ void gelic_wl_interrupt(struct net_device *netdev, u64 status)
/*
* driver helpers
*/
-#define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT]
static const iw_handler gelic_wl_wext_handler[] =
{
- IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name,
- IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range,
- IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan,
- IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth,
- IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth,
- IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid,
- IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid,
- IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode,
- IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode,
- IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap,
- IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap,
- IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext,
- IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext,
- IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode,
- IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode,
- IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
+ IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name),
+ IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range),
+ IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan),
+ IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth),
+ IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid),
+ IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid),
+ IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode),
+ IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap),
+ IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap),
+ IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext),
+ IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode),
+ IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode),
+ IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick),
};
static const struct iw_handler_def gelic_wl_wext_handler_def = {
@@ -2318,7 +2317,7 @@ static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
pr_debug("%s:start\n", __func__);
netdev = alloc_etherdev(sizeof(struct gelic_port) +
sizeof(struct gelic_wl_info));
- pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card);
+ pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card);
if (!netdev)
return NULL;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 4ef0afbcbe1b..01a6ca303a17 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -343,8 +343,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map));
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
}
@@ -1924,8 +1924,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map));
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
--qdev->lrg_buf_skb_check;
@@ -2041,16 +2041,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_len(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[i],
+ dma_unmap_addr(&tx_cb->map[i],
mapaddr),
- pci_unmap_len(&tx_cb->map[i], maplen),
+ dma_unmap_len(&tx_cb->map[i], maplen),
PCI_DMA_TODEVICE);
}
}
@@ -2119,8 +2119,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb2, mapaddr),
- pci_unmap_len(lrg_buf_cb2, maplen),
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb->data);
skb->ip_summed = CHECKSUM_NONE;
@@ -2165,8 +2165,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb_put(skb2, length); /* Just the second buffer length here. */
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb2, mapaddr),
- pci_unmap_len(lrg_buf_cb2, maplen),
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb2->data);
@@ -2258,7 +2258,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
"%x.\n",
ndev->name, net_rsp->opcode);
printk(KERN_ERR PFX
- "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
+ "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
(unsigned long int)tmp[0],
(unsigned long int)tmp[1],
(unsigned long int)tmp[2],
@@ -2454,8 +2454,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(len);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
seg++;
if (seg_cnt == 1) {
@@ -2488,9 +2488,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->len =
cpu_to_le32(sizeof(struct oal) |
OAL_CONT_ENTRY);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen,
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
sizeof(struct oal));
oal_entry = (struct oal_entry *)oal;
oal++;
@@ -2512,8 +2512,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(frag->size);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen,
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
frag->size);
}
/* Terminate the last segment. */
@@ -2539,22 +2539,22 @@ map_error:
(seg == 12 && seg_cnt > 13) || /* but necessary. */
(seg == 17 && seg_cnt > 18)) {
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[seg], mapaddr),
- pci_unmap_len(&tx_cb->map[seg], maplen),
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE);
oal++;
seg++;
}
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[seg], mapaddr),
- pci_unmap_len(&tx_cb->map[seg], maplen),
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE);
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_addr(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_addr(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
return NETDEV_TX_BUSY;
@@ -2841,8 +2841,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb, mapaddr),
- pci_unmap_len(lrg_buf_cb, maplen),
+ dma_unmap_addr(lrg_buf_cb, mapaddr),
+ dma_unmap_len(lrg_buf_cb, maplen),
PCI_DMA_FROMDEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
@@ -2912,8 +2912,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
return -ENOMEM;
}
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
lrg_buf_cb->buf_phy_addr_low =
@@ -3793,13 +3793,13 @@ static void ql_reset_work(struct work_struct *work)
"%s: Freeing lost SKB.\n",
qdev->ndev->name);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_len(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
for(j=1;j<tx_cb->seg_count;j++) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[j],mapaddr),
- pci_unmap_len(&tx_cb->map[j],maplen),
+ dma_unmap_addr(&tx_cb->map[j],mapaddr),
+ dma_unmap_len(&tx_cb->map[j],maplen),
PCI_DMA_TODEVICE);
}
dev_kfree_skb(tx_cb->skb);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 7113e71b15a1..3362a661248c 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -998,8 +998,8 @@ enum link_state_t {
struct ql_rcv_buf_cb {
struct ql_rcv_buf_cb *next;
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
__le32 buf_phy_addr_low;
__le32 buf_phy_addr_high;
int index;
@@ -1029,8 +1029,8 @@ struct oal {
};
struct map_list {
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct ql_tx_buf_cb {
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 0da94b208db1..2fba9cd5946f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 0
-#define QLCNIC_LINUX_VERSIONID "5.0.0"
+#define _QLCNIC_LINUX_SUBVERSION 2
+#define QLCNIC_LINUX_VERSIONID "5.0.2"
#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -428,6 +428,10 @@ struct qlcnic_adapter_stats {
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
+ u64 null_skb;
+ u64 null_rxbuf;
+ u64 rx_dma_map_error;
+ u64 tx_dma_map_error;
};
/*
@@ -958,8 +962,10 @@ struct qlcnic_adapter {
u8 dev_state;
u8 diag_test;
u8 diag_cnt;
+ u8 reset_ack_timeo;
+ u8 dev_init_timeo;
u8 rsrd1;
- u16 rsrd2;
+ u16 msg_enable;
u8 mac_addr[ETH_ALEN];
@@ -994,6 +1000,11 @@ u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
+void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
#define QLCRD32(adapter, off) \
(qlcnic_hw_read_wx_2M(adapter, off))
@@ -1035,6 +1046,7 @@ int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+void qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1128,4 +1140,11 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
+#define QLCDB(adapter, lvl, _fmt, _args...) do { \
+ if (NETIF_MSG_##lvl & adapter->msg_enable) \
+ printk(KERN_INFO "%s: %s: " _fmt, \
+ dev_name(&adapter->pdev->dev), \
+ __func__, ##_args); \
+ } while (0)
+
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index f83e15fe3e1b..6cdc5ebb7411 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,6 +69,14 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
+ {"null skb",
+ QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
+ {"null rxbuf",
+ QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
};
@@ -998,6 +1006,20 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
return 0;
}
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglvl;
+}
+
const struct ethtool_ops qlcnic_ethtool_ops = {
.get_settings = qlcnic_get_settings,
.set_settings = qlcnic_set_settings,
@@ -1029,4 +1051,6 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_flags = ethtool_op_get_flags,
.set_flags = qlcnic_set_flags,
.phys_id = qlcnic_blink_led,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 0469f84360a4..a984cd227582 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -435,9 +435,10 @@ enum {
#define QLCNIC_PCI_MS_2M (0x80000)
#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_CAMQM (0x04800000UL)
+#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
-#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
@@ -448,7 +449,7 @@ enum {
#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
-#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
/*
* Register offsets for MN
@@ -693,15 +694,24 @@ enum {
#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
-#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
-
- /* Device State */
-#define QLCNIC_DEV_COLD 1
-#define QLCNIC_DEV_INITALIZING 2
-#define QLCNIC_DEV_READY 3
-#define QLCNIC_DEV_NEED_RESET 4
-#define QLCNIC_DEV_NEED_QUISCENT 5
-#define QLCNIC_DEV_FAILED 6
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
+#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
+#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
+
+/* Device State */
+#define QLCNIC_DEV_COLD 0x1
+#define QLCNIC_DEV_INITIALIZING 0x2
+#define QLCNIC_DEV_READY 0x3
+#define QLCNIC_DEV_NEED_RESET 0x4
+#define QLCNIC_DEV_NEED_QUISCENT 0x5
+#define QLCNIC_DEV_FAILED 0x6
+#define QLCNIC_DEV_QUISCENT 0x7
+
+#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
+#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
+#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
@@ -709,9 +719,8 @@ enum {
#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
-#define FW_POLL_DELAY (2 * HZ)
-#define FW_FAIL_THRESH 3
-#define FW_POLL_THRESH 10
+#define FW_POLL_DELAY (1 * HZ)
+#define FW_FAIL_THRESH 2
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e73ba455aa20..7a72b8d06bcb 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -54,21 +54,6 @@ static inline void writeq(u64 val, void __iomem *addr)
}
#endif
-#define ADDR_IN_RANGE(addr, low, high) \
- (((addr) < (high)) && ((addr) >= (low)))
-
-#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
- ((adapter)->ahw.pci_base0 + (off))
-
-static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
- unsigned long off)
-{
- if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
- return PCI_OFFSET_FIRST_RANGE(adapter, off);
-
- return NULL;
-}
-
static const struct crb_128M_2M_block_map
crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
{{{0, 0, 0, 0} } }, /* 0: PCI */
@@ -310,8 +295,12 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
if (done == 1)
break;
- if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock;reg_id=%d\n",
+ sem, id_reg);
return -EIO;
+ }
msleep(1);
}
@@ -427,7 +416,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
@@ -449,8 +438,8 @@ void qlcnic_set_multi(struct net_device *netdev)
}
if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ qlcnic_nic_add_mac(adapter, ha->addr);
}
}
@@ -878,13 +867,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
u64 addr, u32 *start)
{
u32 window;
- struct pci_dev *pdev = adapter->pdev;
-
- if ((addr & 0x00ff800) == 0xff800) {
- if (printk_ratelimit())
- dev_warn(&pdev->dev, "QM access not handled\n");
- return -EIO;
- }
window = OCM_WIN_P3P(addr);
@@ -901,8 +883,7 @@ static int
qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
u64 *data, int op)
{
- void __iomem *addr, *mem_ptr = NULL;
- resource_size_t mem_base;
+ void __iomem *addr;
int ret;
u32 start;
@@ -912,21 +893,8 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
if (ret != 0)
goto unlock;
- addr = pci_base_offset(adapter, start);
- if (addr)
- goto noremap;
+ addr = adapter->ahw.pci_base0 + start;
- mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
-
- mem_ptr = ioremap(mem_base, PAGE_SIZE);
- if (mem_ptr == NULL) {
- ret = -EIO;
- goto unlock;
- }
-
- addr = mem_ptr + (start & (PAGE_SIZE - 1));
-
-noremap:
if (op == 0) /* read */
*data = readq(addr);
else /* write */
@@ -935,11 +903,31 @@ noremap:
unlock:
mutex_unlock(&adapter->ahw.mem_lock);
- if (mem_ptr)
- iounmap(mem_ptr);
return ret;
}
+void
+qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ mutex_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ mutex_unlock(&adapter->ahw.mem_lock);
+}
+
#define MAX_CTL_CHECK 1000
int
@@ -948,7 +936,6 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
{
int i, j, ret;
u32 temp, off8;
- u64 stride;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -957,7 +944,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
/* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ QLCNIC_ADDR_QDR_NET_MAX)) {
mem_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct;
@@ -975,9 +962,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
return -EIO;
correct:
- stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & ~0xf;
mutex_lock(&adapter->ahw.mem_lock);
@@ -985,30 +970,28 @@ correct:
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
i = 0;
- if (stride == 16) {
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE),
- (mem_crb + TEST_AGT_CTRL));
-
- for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
- if ((temp & TA_CTL_BUSY) == 0)
- break;
- }
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
- if (j >= MAX_CTL_CHECK) {
- ret = -EIO;
- goto done;
- }
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
- i = (off & 0xf) ? 0 : 2;
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
- i = (off & 0xf) ? 2 : 0;
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
}
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+
writel(data & 0xffffffff,
mem_crb + MIU_TEST_AGT_WRDATA(i));
writel((data >> 32) & 0xffffffff,
@@ -1044,7 +1027,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
{
int j, ret;
u32 temp, off8;
- u64 val, stride;
+ u64 val;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1053,7 +1036,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
/* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ QLCNIC_ADDR_QDR_NET_MAX)) {
mem_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct;
@@ -1073,9 +1056,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
return -EIO;
correct:
- stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & ~0xf;
mutex_lock(&adapter->ahw.mem_lock);
@@ -1097,7 +1078,7 @@ correct:
ret = -EIO;
} else {
off8 = MIU_TEST_AGT_RDDATA_LO;
- if ((stride == 16) && (off & 0xf))
+ if (off & 0xf)
off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
temp = readl(mem_crb + off8 + 4);
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9d2c124048fa..1b621ca13e25 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -530,6 +530,22 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
return 0;
}
+void
+qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
+
+ int timeo;
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
+ timeo = 30;
+
+ adapter->dev_init_timeo = timeo;
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
+ timeo = 10;
+
+ adapter->reset_ack_timeo = timeo;
+}
+
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
@@ -612,7 +628,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -621,7 +637,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = descr->findex + cpu_to_le32(descr->size);
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -647,7 +663,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -655,7 +671,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = descr->findex + cpu_to_le32(descr->size);
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -950,6 +966,16 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
flashaddr += 8;
}
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
} else {
u64 data;
u32 hi, lo;
@@ -1261,6 +1287,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
+ adapter->stats.rx_dma_map_error++;
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return -ENOMEM;
@@ -1285,8 +1312,10 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
- if (!skb)
+ if (!skb) {
+ adapter->stats.null_skb++;
goto no_skb;
+ }
if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
@@ -1476,6 +1505,8 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
if (rxbuf)
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
skip:
for (; desc_cnt > 0; desc_cnt--) {
@@ -1523,9 +1554,10 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
int producer, count = 0;
struct list_head *head;
+ spin_lock(&rds_ring->lock);
+
producer = rds_ring->producer;
- spin_lock(&rds_ring->lock);
head = &rds_ring->free_list;
while (!list_empty(head)) {
@@ -1547,13 +1579,13 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
producer = get_next_index(producer, rds_ring->num_desc);
}
- spin_unlock(&rds_ring->lock);
if (count) {
rds_ring->producer = producer;
writel((producer-1) & (rds_ring->num_desc-1),
rds_ring->crb_rcv_producer);
}
+ spin_unlock(&rds_ring->lock);
}
static void
@@ -1565,10 +1597,11 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
int producer, count = 0;
struct list_head *head;
- producer = rds_ring->producer;
if (!spin_trylock(&rds_ring->lock))
return;
+ producer = rds_ring->producer;
+
head = &rds_ring->free_list;
while (!list_empty(head)) {
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 234dab1f9982..ee573fe52a8e 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -208,6 +208,9 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
@@ -222,6 +225,9 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
@@ -650,7 +656,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
if (err)
return err;
- if (!qlcnic_can_start_firmware(adapter))
+ err = qlcnic_can_start_firmware(adapter);
+ if (err < 0)
+ return err;
+ else if (!err)
goto wait_init;
first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
@@ -950,11 +959,11 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
adapter->max_sds_rings = max_sds_rings;
if (qlcnic_attach(adapter))
- return;
+ goto out;
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
-
+out:
netif_device_attach(netdev);
}
@@ -976,8 +985,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->diag_test = test;
ret = qlcnic_attach(adapter);
- if (ret)
+ if (ret) {
+ netif_device_attach(netdev);
return ret;
+ }
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -1010,16 +1021,12 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (!err)
- err = __qlcnic_up(adapter, netdev);
-
- if (err)
- goto done;
+ __qlcnic_up(adapter, netdev);
}
netif_device_attach(netdev);
}
-done:
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
}
@@ -1139,6 +1146,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
+ qlcnic_setup_idc_param(adapter);
err = qlcnic_start_firmware(adapter);
if (err)
@@ -1334,6 +1342,7 @@ err_out_detach:
qlcnic_detach(adapter);
err_out:
qlcnic_clr_all_drv_state(adapter);
+ netif_device_attach(netdev);
return err;
}
#endif
@@ -1570,6 +1579,11 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int frag_count, no_of_desc;
u32 num_txd = tx_ring->num_desc;
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 4 fragments per cmd des */
@@ -1586,8 +1600,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pdev = adapter->pdev;
- if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+ adapter->stats.tx_dma_map_error++;
goto drop_packet;
+ }
pbuf->skb = skb;
pbuf->frag_count = frag_count;
@@ -1739,6 +1755,7 @@ static void qlcnic_tx_timeout_task(struct work_struct *work)
request_reset:
adapter->need_fw_reset = 1;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ QLCDB(adapter, DRV, "Resetting adapter\n");
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1943,8 +1960,8 @@ static void qlcnic_poll_controller(struct net_device *netdev)
}
#endif
-static void
-qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+static int
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
{
u32 val;
@@ -1952,18 +1969,20 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
state != QLCNIC_DEV_NEED_QUISCENT);
if (qlcnic_api_lock(adapter))
- return ;
+ return -EIO;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
if (state == QLCNIC_DEV_NEED_RESET)
- val |= ((u32)0x1 << (adapter->portnum * 4));
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
else if (state == QLCNIC_DEV_NEED_QUISCENT)
- val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+ QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
+
+ return 0;
}
static int
@@ -1975,7 +1994,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
return -EBUSY;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -1992,14 +2011,14 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
goto err;
val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- val &= ~((u32)0x1 << (adapter->portnum * 4));
+ QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
if (!(val & 0x11111111))
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2009,6 +2028,7 @@ err:
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
+/* Grab api lock, before checking state */
static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
@@ -2028,26 +2048,27 @@ static int
qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
{
u32 val, prev_state;
- int cnt = 0;
- int portnum = adapter->portnum;
+ u8 dev_init_timeo = adapter->dev_init_timeo;
+ u8 portnum = adapter->portnum;
+
+ if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+ return 1;
if (qlcnic_api_lock(adapter))
return -1;
val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- if (!(val & ((int)0x1 << (portnum * 4)))) {
- val |= ((u32)0x1 << (portnum * 4));
+ if (!(val & (1 << (portnum * 4)))) {
+ QLC_DEV_SET_REF_CNT(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
- } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
- goto start_fw;
}
prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Device state = %u\n", prev_state);
switch (prev_state) {
case QLCNIC_DEV_COLD:
-start_fw:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
qlcnic_api_unlock(adapter);
return 1;
@@ -2057,35 +2078,43 @@ start_fw:
case QLCNIC_DEV_NEED_RESET:
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val |= ((u32)0x1 << (portnum * 4));
+ QLC_DEV_SET_RST_RDY(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_NEED_QUISCENT:
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val |= ((u32)0x1 << ((portnum * 4) + 1));
+ QLC_DEV_SET_QSCNT_RDY(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_FAILED:
qlcnic_api_unlock(adapter);
return -1;
+
+ case QLCNIC_DEV_INITIALIZING:
+ case QLCNIC_DEV_QUISCENT:
+ break;
}
qlcnic_api_unlock(adapter);
- msleep(1000);
- while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
- ++cnt < 20)
+
+ do {
msleep(1000);
+ } while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY)
+ && --dev_init_timeo);
- if (cnt >= 20)
+ if (!dev_init_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for device to initialize timeout\n");
return -1;
+ }
if (qlcnic_api_lock(adapter))
return -1;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2098,44 +2127,60 @@ qlcnic_fwinit_work(struct work_struct *work)
{
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
- int dev_state;
+ u32 dev_state = 0xf;
- if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+ if (qlcnic_api_lock(adapter))
goto err_ret;
- if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+ if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
+ dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
+ adapter->reset_ack_timeo);
+ goto skip_ack_check;
+ }
- if (qlcnic_check_drv_state(adapter)) {
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, FW_POLL_DELAY);
- return;
+ if (!qlcnic_check_drv_state(adapter)) {
+skip_ack_check:
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (dev_state == QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "Restarting fw\n");
}
+ qlcnic_api_unlock(adapter);
+
if (!qlcnic_start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
return;
}
-
goto err_ret;
}
+ qlcnic_api_unlock(adapter);
+
dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
+
switch (dev_state) {
- case QLCNIC_DEV_READY:
- if (!qlcnic_start_firmware(adapter)) {
- qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
- return;
- }
+ case QLCNIC_DEV_NEED_RESET:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
case QLCNIC_DEV_FAILED:
break;
default:
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
- return;
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
}
err_ret:
+ dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
+ "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
+ netif_device_attach(adapter->netdev);
qlcnic_clr_all_drv_state(adapter);
}
@@ -2163,7 +2208,8 @@ qlcnic_detach_work(struct work_struct *work)
if (adapter->temp == QLCNIC_TEMP_PANIC)
goto err_ret;
- qlcnic_set_drv_state(adapter, adapter->dev_state);
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state))
+ goto err_ret;
adapter->fw_wait_cnt = 0;
@@ -2172,10 +2218,14 @@ qlcnic_detach_work(struct work_struct *work)
return;
err_ret:
+ dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
+ status, adapter->temp);
+ netif_device_attach(netdev);
qlcnic_clr_all_drv_state(adapter);
}
+/*Transit to RESET state from READY state only */
static void
qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
{
@@ -2186,9 +2236,9 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+ if (state == QLCNIC_DEV_READY) {
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
- set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "NEED_RESET state set\n");
}
qlcnic_api_unlock(adapter);
@@ -2233,9 +2283,8 @@ qlcnic_attach_work(struct work_struct *work)
qlcnic_config_indev_addr(netdev, NETDEV_UP);
}
- netif_device_attach(netdev);
-
done:
+ netif_device_attach(netdev);
adapter->fw_fail_cnt = 0;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -2285,8 +2334,11 @@ detach:
QLCNIC_DEV_NEED_RESET;
if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
- !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
+
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+ QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ }
return 1;
}
@@ -2387,14 +2439,21 @@ static int
qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
loff_t offset, size_t size)
{
+ size_t crb_size = 4;
+
if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
return -EIO;
- if ((size != 4) || (offset & 0x3))
- return -EINVAL;
+ if (offset < QLCNIC_PCI_CRBSPACE) {
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+ QLCNIC_PCI_CAMQM_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
- if (offset < QLCNIC_PCI_CRBSPACE)
- return -EINVAL;
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
return 0;
}
@@ -2406,14 +2465,20 @@ qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
return size;
}
@@ -2424,14 +2489,20 @@ qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
return size;
}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 8b742b639ceb..20624ba44a37 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1344,8 +1344,8 @@ struct oal {
};
struct map_list {
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct tx_ring_desc {
@@ -1373,8 +1373,8 @@ struct bq_desc {
} p;
__le64 *addr;
u32 index;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 362664628937..68a1c9b91e74 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1340,7 +1340,7 @@ void ql_mpi_core_to_log(struct work_struct *work)
for (i = 0; i < count; i += 8) {
printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
- "%.08x %.08x %.08x \n", i,
+ "%.08x %.08x %.08x\n", i,
tmp[i + 0],
tmp[i + 1],
tmp[i + 2],
@@ -2058,7 +2058,7 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
- printk(KERN_ERR PFX "flags3 = %s %s %s \n",
+ printk(KERN_ERR PFX "flags3 = %s %s %s\n",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index fd34f266c0a8..fa4b24c49f42 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
- pci_unmap_addr(lbq_desc, mapaddr),
+ dma_unmap_addr(lbq_desc, mapaddr),
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
@@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
- pci_unmap_addr_set(lbq_desc, mapaddr, map);
- pci_unmap_len_set(lbq_desc, maplen,
+ dma_unmap_addr_set(lbq_desc, mapaddr, map);
+ dma_unmap_len_set(lbq_desc, maplen,
rx_ring->lbq_buf_size);
*lbq_desc->addr = cpu_to_le64(map);
@@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
sbq_desc->p.skb = NULL;
return;
}
- pci_unmap_addr_set(sbq_desc, mapaddr, map);
- pci_unmap_len_set(sbq_desc, maplen,
+ dma_unmap_addr_set(sbq_desc, mapaddr, map);
+ dma_unmap_len_set(sbq_desc, maplen,
rx_ring->sbq_buf_size);
*sbq_desc->addr = cpu_to_le64(map);
}
@@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev,
"unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_ring_desc->map[i],
+ dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
- pci_unmap_len(&tx_ring_desc->map[i],
+ dma_unmap_len(&tx_ring_desc->map[i],
maplen),
PCI_DMA_TODEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_ring_desc->map[i],
+ dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
- pci_unmap_len(&tx_ring_desc->map[i],
+ dma_unmap_len(&tx_ring_desc->map[i],
maplen), PCI_DMA_TODEVICE);
}
}
@@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->len = cpu_to_le32(len);
tbd->addr = cpu_to_le64(map);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
map_idx++;
/*
@@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->len =
cpu_to_le32((sizeof(struct tx_buf_desc) *
(frag_cnt - frag_idx)) | TX_DESC_C);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
sizeof(struct oal));
tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
map_idx++;
@@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->addr = cpu_to_le64(map);
tbd->len = cpu_to_le32(frag->size);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
frag->size);
}
@@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
@@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
- pci_unmap_addr
+ dma_unmap_addr
(sbq_desc, mapaddr),
- pci_unmap_len
+ dma_unmap_len
(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
memcpy(skb_put(skb, length),
sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
- pci_unmap_addr
+ dma_unmap_addr
(sbq_desc,
mapaddr),
- pci_unmap_len
+ dma_unmap_len
(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
@@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
ql_realign_skb(skb, length);
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc,
+ dma_unmap_addr(sbq_desc,
mapaddr),
- pci_unmap_len(sbq_desc,
+ dma_unmap_len(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
@@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
return NULL;
}
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(lbq_desc,
+ dma_unmap_addr(lbq_desc,
mapaddr),
- pci_unmap_len(lbq_desc, maplen),
+ dma_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
int size, i = 0;
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
@@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
}
if (sbq_desc->p.skb) {
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
int i, status;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
@@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (status)
goto exit;
i = 0;
- netdev_for_each_mc_addr(mc_ptr, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 0298d8c1dcb6..3cc7befa3eb1 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -330,7 +330,7 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
- printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name);
+ netdev_err(dev, "failed to alloc skb for rx\n");
rc = -ENOMEM;
goto err_exit;
}
@@ -410,9 +410,9 @@ static void r6040_tx_timeout(struct net_device *dev)
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
- printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x "
+ netdev_warn(dev, "transmit timed out, int enable %4.4x "
"status %4.4x, PHY status %4.4x\n",
- dev->name, ioread16(ioaddr + MIER),
+ ioread16(ioaddr + MIER),
ioread16(ioaddr + MISR),
r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
@@ -897,7 +897,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
if (!lp->tx_free_desc) {
spin_unlock_irqrestore(&lp->lock, flags);
netif_stop_queue(dev);
- printk(KERN_ERR DRV_NAME ": no tx descriptor\n");
+ netdev_err(dev, ": no tx descriptor\n");
return NETDEV_TX_BUSY;
}
@@ -924,7 +924,6 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
if (!lp->tx_free_desc)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
@@ -937,7 +936,7 @@ static void r6040_multicast_list(struct net_device *dev)
u16 *adrp;
u16 reg;
unsigned long flags;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
/* MAC Address */
@@ -972,8 +971,8 @@ static void r6040_multicast_list(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -990,9 +989,9 @@ static void r6040_multicast_list(struct net_device *dev)
}
/* Multicast Address 1~4 case */
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i < MCAST_MAX) {
- adrp = (u16 *) dmi->dmi_addr;
+ adrp = (u16 *) ha->addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
@@ -1090,20 +1089,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* this should always be supported */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
- printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
goto err_out;
}
@@ -1112,7 +1111,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
- printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n");
+ dev_err(&pdev->dev, "Failed to allocate etherdev\n");
err = -ENOMEM;
goto err_out;
}
@@ -1122,14 +1121,13 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ dev_err(&pdev->dev, "Failed to request PCI regions\n");
goto err_out_free_dev;
}
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
- printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "ioremap failed for device\n");
err = -EIO;
goto err_out_free_res;
}
@@ -1156,7 +1154,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2])) {
- printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n");
+ netdev_warn(dev, "MAC address not initialized, generating random\n");
random_ether_addr(dev->dev_addr);
}
@@ -1184,7 +1182,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Check the vendor ID on the PHY, if 0xffff assume none attached */
if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
- printk(KERN_ERR DRV_NAME ": Failed to detect an attached PHY\n");
+ dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
err = -ENODEV;
goto err_out_unmap;
}
@@ -1192,7 +1190,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Register net device. After this dev->name assign */
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR DRV_NAME ": Failed to register net device\n");
+ dev_err(&pdev->dev, "Failed to register net device\n");
goto err_out_unmap;
}
return 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index dd8106ff35aa..217e709bda3e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -23,6 +23,7 @@
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -509,6 +510,7 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
+ u32 saved_wolopts;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -748,53 +750,61 @@ static void rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
+ /* This is to cancel a scheduled suspend if there's one. */
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
-static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u8 options;
-
- wol->wolopts = 0;
-
-#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
- wol->supported = WAKE_ANY;
-
- spin_lock_irq(&tp->lock);
+ u32 wolopts = 0;
options = RTL_R8(Config1);
if (!(options & PMEnable))
- goto out_unlock;
+ return 0;
options = RTL_R8(Config3);
if (options & LinkUp)
- wol->wolopts |= WAKE_PHY;
+ wolopts |= WAKE_PHY;
if (options & MagicPacket)
- wol->wolopts |= WAKE_MAGIC;
+ wolopts |= WAKE_MAGIC;
options = RTL_R8(Config5);
if (options & UWF)
- wol->wolopts |= WAKE_UCAST;
+ wolopts |= WAKE_UCAST;
if (options & BWF)
- wol->wolopts |= WAKE_BCAST;
+ wolopts |= WAKE_BCAST;
if (options & MWF)
- wol->wolopts |= WAKE_MCAST;
+ wolopts |= WAKE_MCAST;
-out_unlock:
- spin_unlock_irq(&tp->lock);
+ return wolopts;
}
-static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl8169_get_wol(tp);
+
+ spin_unlock_irq(&tp->lock);
+}
+
+static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+{
void __iomem *ioaddr = tp->mmio_addr;
unsigned int i;
static const struct {
@@ -811,23 +821,29 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{ WAKE_ANY, Config5, LanWake }
};
- spin_lock_irq(&tp->lock);
-
RTL_W8(Cfg9346, Cfg9346_Unlock);
for (i = 0; i < ARRAY_SIZE(cfg); i++) {
u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
- if (wol->wolopts & cfg[i].opt)
+ if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(cfg[i].reg, options);
}
RTL_W8(Cfg9346, Cfg9346_Lock);
+}
+
+static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
if (wol->wolopts)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
+ __rtl8169_set_wol(tp, wol->wolopts);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
spin_unlock_irq(&tp->lock);
@@ -3192,6 +3208,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_runtime_idle(&pdev->dev);
+
out:
return rc;
@@ -3213,10 +3235,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ pm_runtime_get_sync(&pdev->dev);
+
flush_scheduled_work();
unregister_netdev(dev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
@@ -3243,6 +3273,7 @@ static int rtl8169_open(struct net_device *dev)
struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM;
+ pm_runtime_get_sync(&pdev->dev);
/*
* Note that we use a magic value here, its wierd I know
@@ -3263,7 +3294,7 @@ static int rtl8169_open(struct net_device *dev)
tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr);
if (!tp->TxDescArray)
- goto out;
+ goto err_pm_runtime_put;
tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr);
@@ -3290,6 +3321,9 @@ static int rtl8169_open(struct net_device *dev)
rtl8169_request_timer(dev);
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
+
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
out:
return retval;
@@ -3299,9 +3333,13 @@ err_release_ring_2:
err_free_rx_1:
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
err_free_tx_0:
pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
goto out;
}
@@ -4720,6 +4758,8 @@ static int rtl8169_close(struct net_device *dev)
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
+ pm_runtime_get_sync(&pdev->dev);
+
/* update counters before going down */
rtl8169_update_counters(dev);
@@ -4734,6 +4774,8 @@ static int rtl8169_close(struct net_device *dev)
tp->TxDescArray = NULL;
tp->RxDescArray = NULL;
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -4759,12 +4801,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
@@ -4832,21 +4874,74 @@ static int rtl8169_suspend(struct device *device)
return 0;
}
+static void __rtl8169_resume(struct net_device *dev)
+{
+ netif_device_attach(dev);
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- if (!netif_running(dev))
- goto out;
+ if (netif_running(dev))
+ __rtl8169_resume(dev);
- netif_device_attach(dev);
+ return 0;
+}
+
+static int rtl8169_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
+ __rtl8169_set_wol(tp, WAKE_ANY);
+ spin_unlock_irq(&tp->lock);
+
+ rtl8169_net_suspend(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
+ tp->saved_wolopts = 0;
+ spin_unlock_irq(&tp->lock);
+
+ __rtl8169_resume(dev);
- rtl8169_schedule_work(dev, rtl8169_reset_task);
-out:
return 0;
}
+static int rtl8169_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ return -EBUSY;
+}
+
static const struct dev_pm_ops rtl8169_pm_ops = {
.suspend = rtl8169_suspend,
.resume = rtl8169_resume,
@@ -4854,6 +4949,9 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
.thaw = rtl8169_resume,
.poweroff = rtl8169_suspend,
.restore = rtl8169_resume,
+ .runtime_suspend = rtl8169_runtime_suspend,
+ .runtime_resume = rtl8169_runtime_resume,
+ .runtime_idle = rtl8169_runtime_idle,
};
#define RTL8169_PM_OPS (&rtl8169_pm_ops)
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index f2e335f0d1b7..e26e107f93e0 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1467,7 +1467,6 @@ static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&rrpriv->lock, flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 92ae8d3de39b..f155928bf14e 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2400,7 +2400,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
return NULL;
}
pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ skb_headlen(skb), PCI_DMA_TODEVICE);
frg_cnt = skb_shinfo(skb)->nr_frags;
if (frg_cnt) {
txds++;
@@ -4202,7 +4202,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
}
- frg_len = skb->len - skb->data_len;
+ frg_len = skb_headlen(skb);
if (offload_type == SKB_GSO_UDP) {
int ufo_size;
@@ -4965,7 +4965,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
static void s2io_set_multicast(struct net_device *dev)
{
int i, j, prev_cnt;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
@@ -5094,12 +5094,12 @@ static void s2io_set_multicast(struct net_device *dev)
/* Create the new Rx filter list and update the same in H/W. */
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(sp->usr_addrs[i].addr, ha->addr,
ETH_ALEN);
mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
- mac_addr |= mclist->dmi_addr[j];
+ mac_addr |= ha->addr[j];
mac_addr <<= 8;
}
mac_addr >>= 8;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 45f26344b368..a7ff8ea342b4 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -396,7 +396,6 @@ static void s6gmac_rx_interrupt(struct net_device *dev)
} else {
skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
& S6_GMAC_BURST_POSTRD_LEN_MASK);
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx(skb);
@@ -853,8 +852,8 @@ static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
{
struct s6gmac *pd = netdev_priv(dev);
unsigned long flags;
+
spin_lock_irqsave(&pd->lock, flags);
- dev->trans_start = jiffies;
writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
0 << S6_GMAC_BURST_PREWR_CFE |
1 << S6_GMAC_BURST_PREWR_PPE |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 04efc0c1bda9..1f3acc3a5dfd 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -48,23 +48,6 @@
#include <asm/io.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-/* This is only here until the firmware is ready. In that case,
- the firmware leaves the ethernet address in the register for us. */
-#ifdef CONFIG_SIBYTE_STANDALONE
-#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
-#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
-#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
-#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
-#endif
-
-
-/* These identify the driver base version and may not be removed. */
-#if 0
-static char version1[] __initdata =
-"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
-#endif
-
-
/* Operational parameters that usually are not changed. */
#define CONFIG_SBMAC_COALESCE
@@ -349,7 +332,6 @@ static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
********************************************************************* */
static char sbmac_string[] = "sb1250-mac";
-static char sbmac_pretty[] = "SB1250 MAC";
static char sbmac_mdio_string[] = "sb1250-mac-mdio";
@@ -2086,8 +2068,6 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return NETDEV_TX_OK;
@@ -2112,7 +2092,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
uint64_t reg;
void __iomem *port;
int idx;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct net_device *dev = sc->sbm_dev;
/*
@@ -2161,10 +2141,10 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
* XXX if the table overflows */
idx = 1; /* skip station address */
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (idx == MAC_ADDR_COUNT)
break;
- reg = sbmac_addr2reg(mclist->dmi_addr);
+ reg = sbmac_addr2reg(ha->addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
@@ -2182,85 +2162,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
}
}
-#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
-/**********************************************************************
- * SBMAC_PARSE_XDIGIT(str)
- *
- * Parse a hex digit, returning its value
- *
- * Input parameters:
- * str - character
- *
- * Return value:
- * hex value, or -1 if invalid
- ********************************************************************* */
-
-static int sbmac_parse_xdigit(char str)
-{
- int digit;
-
- if ((str >= '0') && (str <= '9'))
- digit = str - '0';
- else if ((str >= 'a') && (str <= 'f'))
- digit = str - 'a' + 10;
- else if ((str >= 'A') && (str <= 'F'))
- digit = str - 'A' + 10;
- else
- return -1;
-
- return digit;
-}
-
-/**********************************************************************
- * SBMAC_PARSE_HWADDR(str,hwaddr)
- *
- * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
- * Ethernet address.
- *
- * Input parameters:
- * str - string
- * hwaddr - pointer to hardware address
- *
- * Return value:
- * 0 if ok, else -1
- ********************************************************************* */
-
-static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
-{
- int digit1,digit2;
- int idx = 6;
-
- while (*str && (idx > 0)) {
- digit1 = sbmac_parse_xdigit(*str);
- if (digit1 < 0)
- return -1;
- str++;
- if (!*str)
- return -1;
-
- if ((*str == ':') || (*str == '-')) {
- digit2 = digit1;
- digit1 = 0;
- }
- else {
- digit2 = sbmac_parse_xdigit(*str);
- if (digit2 < 0)
- return -1;
- str++;
- }
-
- *hwaddr++ = (digit1 << 4) | digit2;
- idx--;
-
- if (*str == '-')
- str++;
- if (*str == ':')
- str++;
- }
- return 0;
-}
-#endif
-
static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
{
if (new_mtu > ENET_PACKET_SIZE)
@@ -2585,7 +2486,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
spin_lock_irqsave(&sc->sbm_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
spin_unlock_irqrestore(&sc->sbm_lock, flags);
@@ -2662,7 +2563,6 @@ static int sbmac_close(struct net_device *dev)
static int sbmac_poll(struct napi_struct *napi, int budget)
{
struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
- struct net_device *dev = sc->sbm_dev;
int work_done;
work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
@@ -2766,162 +2666,6 @@ static int __exit sbmac_remove(struct platform_device *pldev)
return 0;
}
-
-static struct platform_device **sbmac_pldev;
-static int sbmac_max_units;
-
-#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
-static void __init sbmac_setup_hwaddr(int idx, char *addr)
-{
- void __iomem *sbm_base;
- unsigned long start, end;
- uint8_t eaddr[6];
- uint64_t val;
-
- if (idx >= sbmac_max_units)
- return;
-
- start = A_MAC_CHANNEL_BASE(idx);
- end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
-
- sbm_base = ioremap_nocache(start, end - start + 1);
- if (!sbm_base) {
- printk(KERN_ERR "%s: unable to map device registers\n",
- sbmac_string);
- return;
- }
-
- sbmac_parse_hwaddr(addr, eaddr);
- val = sbmac_addr2reg(eaddr);
- __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR);
- val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
-
- iounmap(sbm_base);
-}
-#endif
-
-static int __init sbmac_platform_probe_one(int idx)
-{
- struct platform_device *pldev;
- struct {
- struct resource r;
- char name[strlen(sbmac_pretty) + 4];
- } *res;
- int err;
-
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res) {
- printk(KERN_ERR "%s.%d: unable to allocate memory\n",
- sbmac_string, idx);
- err = -ENOMEM;
- goto out_err;
- }
-
- /*
- * This is the base address of the MAC.
- */
- snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx);
- res->r.name = res->name;
- res->r.flags = IORESOURCE_MEM;
- res->r.start = A_MAC_CHANNEL_BASE(idx);
- res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
-
- pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1);
- if (IS_ERR(pldev)) {
- printk(KERN_ERR "%s.%d: unable to register platform device\n",
- sbmac_string, idx);
- err = PTR_ERR(pldev);
- goto out_kfree;
- }
-
- if (!pldev->dev.driver) {
- err = 0; /* No hardware at this address. */
- goto out_unregister;
- }
-
- sbmac_pldev[idx] = pldev;
- return 0;
-
-out_unregister:
- platform_device_unregister(pldev);
-
-out_kfree:
- kfree(res);
-
-out_err:
- return err;
-}
-
-static void __init sbmac_platform_probe(void)
-{
- int i;
-
- /* Set the number of available units based on the SOC type. */
- switch (soc_type) {
- case K_SYS_SOC_TYPE_BCM1250:
- case K_SYS_SOC_TYPE_BCM1250_ALT:
- sbmac_max_units = 3;
- break;
- case K_SYS_SOC_TYPE_BCM1120:
- case K_SYS_SOC_TYPE_BCM1125:
- case K_SYS_SOC_TYPE_BCM1125H:
- case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
- sbmac_max_units = 2;
- break;
- case K_SYS_SOC_TYPE_BCM1x55:
- case K_SYS_SOC_TYPE_BCM1x80:
- sbmac_max_units = 4;
- break;
- default:
- return; /* none */
- }
-
- /*
- * For bringup when not using the firmware, we can pre-fill
- * the MAC addresses using the environment variables
- * specified in this file (or maybe from the config file?)
- */
-#ifdef SBMAC_ETH0_HWADDR
- sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR);
-#endif
-#ifdef SBMAC_ETH1_HWADDR
- sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR);
-#endif
-#ifdef SBMAC_ETH2_HWADDR
- sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR);
-#endif
-#ifdef SBMAC_ETH3_HWADDR
- sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR);
-#endif
-
- sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev),
- GFP_KERNEL);
- if (!sbmac_pldev) {
- printk(KERN_ERR "%s: unable to allocate memory\n",
- sbmac_string);
- return;
- }
-
- /*
- * Walk through the Ethernet controllers and find
- * those who have their MAC addresses set.
- */
- for (i = 0; i < sbmac_max_units; i++)
- if (sbmac_platform_probe_one(i))
- break;
-}
-
-
-static void __exit sbmac_platform_cleanup(void)
-{
- int i;
-
- for (i = 0; i < sbmac_max_units; i++)
- platform_device_unregister(sbmac_pldev[i]);
- kfree(sbmac_pldev);
-}
-
-
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
.remove = __exit_p(sbmac_remove),
@@ -2932,20 +2676,11 @@ static struct platform_driver sbmac_driver = {
static int __init sbmac_init_module(void)
{
- int err;
-
- err = platform_driver_register(&sbmac_driver);
- if (err)
- return err;
-
- sbmac_platform_probe();
-
- return err;
+ return platform_driver_register(&sbmac_driver);
}
static void __exit sbmac_cleanup_module(void)
{
- sbmac_platform_cleanup();
platform_driver_unregister(&sbmac_driver);
}
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index d87c4787fffa..8c4067af32b0 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -433,13 +433,13 @@ static void _sc92031_set_mar(struct net_device *dev)
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_list, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 crc;
unsigned bit = 0;
- crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr);
+ crc = ~ether_crc(ETH_ALEN, ha->addr);
crc >>= 24;
if (crc & 0x01) bit |= 0x02;
@@ -987,8 +987,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
mmiowb();
- dev->trans_start = jiffies;
-
if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
netif_stop_queue(dev);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 374832cca11f..11ab32e0060b 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -390,7 +390,7 @@ static void seeq8005_timeout(struct net_device *dev)
tx_done(dev) ? "IRQ conflict" : "network cable problem");
/* Try to restart the adaptor. */
seeq8005_init(dev, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -411,7 +411,6 @@ static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
netif_stop_queue(dev);
hardware_send_packet(dev, buf, length);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += length;
dev_kfree_skb (skb);
/* You might need to clean up and record Tx statistics here. */
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 649a264d6a81..156460527231 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -225,17 +225,17 @@ static void efx_fini_channels(struct efx_nic *efx);
* never be concurrently called more than once on the same channel,
* though different channels may be being processed concurrently.
*/
-static int efx_process_channel(struct efx_channel *channel, int rx_quota)
+static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
- int rx_packets;
+ int spent;
if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
!channel->enabled))
return 0;
- rx_packets = efx_nic_process_eventq(channel, rx_quota);
- if (rx_packets == 0)
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent == 0)
return 0;
/* Deliver last RX packet. */
@@ -249,7 +249,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
- return rx_packets;
+ return spent;
}
/* Mark channel as finished processing
@@ -278,17 +278,17 @@ static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
- int rx_packets;
+ int spent;
EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
- rx_packets = efx_process_channel(channel, budget);
+ spent = efx_process_channel(channel, budget);
- if (rx_packets < budget) {
+ if (spent < budget) {
struct efx_nic *efx = channel->efx;
- if (channel->used_flags & EFX_USED_BY_RX &&
+ if (channel->channel < efx->n_rx_channels &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
if (unlikely(channel->irq_mod_score <
@@ -318,7 +318,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
efx_channel_processed(channel);
}
- return rx_packets;
+ return spent;
}
/* Process the eventq of the specified channel immediately on this CPU
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- BUG_ON(!channel->used_flags);
BUG_ON(!channel->enabled);
/* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
number = channel->channel;
- if (efx->n_channels > efx->n_rx_queues) {
- if (channel->channel < efx->n_rx_queues) {
+ if (efx->n_channels > efx->n_rx_channels) {
+ if (channel->channel < efx->n_rx_channels) {
type = "-rx";
} else {
type = "-tx";
- number -= efx->n_rx_queues;
+ number -= efx->n_rx_channels;
}
}
snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
-
- channel->used_flags = 0;
}
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
pci_disable_device(efx->pci_dev);
}
-/* Get number of RX queues wanted. Return number of online CPU
- * packages in the expectation that an IRQ balancer will spread
- * interrupts across them. */
-static int efx_wanted_rx_queues(void)
+/* Get number of channels wanted. Each channel will have its own IRQ,
+ * 1 RX queue and/or 2 TX queues. */
+static int efx_wanted_channels(void)
{
cpumask_var_t core_mask;
int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
- int wanted_ints;
- int rx_queues;
+ int n_channels;
- /* We want one RX queue and interrupt per CPU package
- * (or as specified by the rss_cpus module parameter).
- * We will need one channel per interrupt.
- */
- rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
- wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
- wanted_ints = min(wanted_ints, max_channels);
+ n_channels = efx_wanted_channels();
+ if (separate_tx_channels)
+ n_channels *= 2;
+ n_channels = min(n_channels, max_channels);
- for (i = 0; i < wanted_ints; i++)
+ for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
+ rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
- " available (%d < %d).\n", rc, wanted_ints);
+ " available (%d < %d).\n", rc, n_channels);
EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
- EFX_BUG_ON_PARANOID(rc >= wanted_ints);
- wanted_ints = rc;
+ EFX_BUG_ON_PARANOID(rc >= n_channels);
+ n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
- wanted_ints);
+ n_channels);
}
if (rc == 0) {
- efx->n_rx_queues = min(rx_queues, wanted_ints);
- efx->n_channels = wanted_ints;
- for (i = 0; i < wanted_ints; i++)
+ efx->n_channels = n_channels;
+ if (separate_tx_channels) {
+ efx->n_tx_channels =
+ max(efx->n_channels / 2, 1U);
+ efx->n_rx_channels =
+ max(efx->n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = efx->n_channels;
+ efx->n_rx_channels = efx->n_channels;
+ }
+ for (i = 0; i < n_channels; i++)
efx->channel[i].irq = xentries[i].vector;
} else {
/* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->n_rx_queues = 1;
efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_rx_queues = 1;
efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static void efx_set_channels(struct efx_nic *efx)
{
+ struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ unsigned tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
- efx_for_each_tx_queue(tx_queue, efx) {
- if (separate_tx_channels)
- tx_queue->channel = &efx->channel[efx->n_channels-1];
- else
- tx_queue->channel = &efx->channel[0];
- tx_queue->channel->used_flags |= EFX_USED_BY_TX;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
+ channel->tx_queue = &efx->tx_queue[
+ (channel->channel - tx_channel_offset) *
+ EFX_TXQ_TYPES];
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->channel = channel;
+ }
}
- efx_for_each_rx_queue(rx_queue, efx) {
+ efx_for_each_rx_queue(rx_queue, efx)
rx_queue->channel = &efx->channel[rx_queue->queue];
- rx_queue->channel->used_flags |= EFX_USED_BY_RX;
- }
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
return rc;
- /* Determine the number of channels and RX queues by trying to hook
+ /* Determine the number of channels and queues by trying to hook
* in MSI-X interrupts. */
efx_probe_interrupts(efx);
efx_set_channels(efx);
+ efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
/* Mark the port as enabled so port reconfigurations can start, then
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
- efx_wake_queue(efx);
- efx_for_each_channel(channel, efx)
+ efx_for_each_channel(channel, efx) {
+ if (efx_dev_registered(efx))
+ efx_wake_queue(channel);
efx_start_channel(channel);
+ }
efx_nic_enable_interrupts(efx);
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
- efx_stop_queue(efx);
+ struct efx_channel *channel;
+ efx_for_each_channel(channel, efx)
+ efx_stop_queue(channel);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
- " resetting channels\n",
- atomic_read(&efx->netif_stop_count), efx->port_enabled);
+ EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
@@ -1603,7 +1612,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
@@ -1615,8 +1624,8 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- netdev_for_each_mc_addr(mc_list, net_dev) {
- crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
}
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->net_dev = net_dev;
efx->rx_checksum_enabled = true;
- spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->mac_op = type->default_mac_ops;
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
- atomic_set(&efx->netif_stop_count, 1);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
channel = &efx->channel[i];
channel->efx = efx;
channel->channel = i;
channel->work_pending = false;
+ spin_lock_init(&channel->tx_stop_lock);
+ atomic_set(&channel->tx_stop_count, 1);
}
- for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
+ for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
tx_queue = &efx->tx_queue[i];
tx_queue->efx = efx;
tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev(sizeof(*efx));
+ net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 7eff0a615cb3..ffd708c5304a 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -35,8 +35,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern void efx_stop_queue(struct efx_nic *efx);
-extern void efx_wake_queue(struct efx_nic *efx);
+extern void efx_stop_queue(struct efx_channel *channel);
+extern void efx_wake_queue(struct efx_channel *channel);
#define EFX_TXQ_SIZE 1024
#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d9f9c02a928e..22026bfbc4c1 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -304,7 +304,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
{
struct efx_tx_queue *tx_queue;
- efx_for_each_tx_queue(tx_queue, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue),
@@ -647,7 +647,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
efx_for_each_tx_queue(tx_queue, efx) {
channel = tx_queue->channel;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
- if (channel->used_flags != EFX_USED_BY_RX_TX)
+ if (channel->channel < efx->n_rx_channels)
coalesce->tx_coalesce_usecs_irq =
channel->irq_moderation;
else
@@ -690,7 +690,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_tx_queue(tx_queue, efx) {
- if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
+ if ((tx_queue->channel->channel < efx->n_rx_channels) &&
tx_usecs) {
EFX_ERR(efx, "Channel is shared. "
"Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 08278e7302b3..655b697b45b2 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -175,16 +175,19 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- /* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
-
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
*/
BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
+
+ /* Check to see if we have a serious error condition */
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
EFX_ZERO_OWORD(*int_ker);
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
@@ -504,6 +507,9 @@ static void falcon_reset_macs(struct efx_nic *efx)
/* Ensure the correct MAC is selected before statistics
* are re-enabled by the caller */
efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+
+ /* This can run even when the GMAC is selected */
+ falcon_setup_xaui(efx);
}
void falcon_drain_tx_fifo(struct efx_nic *efx)
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 8ccab2c67a20..c84a2ce2ccbb 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -26,7 +26,7 @@
*************************************************************************/
/* Configure the XAUI driver that is an output from Falcon */
-static void falcon_setup_xaui(struct efx_nic *efx)
+void falcon_setup_xaui(struct efx_nic *efx)
{
efx_oword_t sdctl, txdrv;
@@ -85,14 +85,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
return -ETIMEDOUT;
}
-static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
+static void falcon_ack_status_intr(struct efx_nic *efx)
{
efx_oword_t reg;
if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
return;
- /* We expect xgmii faults if the wireside link is up */
+ /* We expect xgmii faults if the wireside link is down */
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
return;
@@ -101,14 +101,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
if (efx->xmac_poll_required)
return;
- /* Flush the ISR */
- if (enable)
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
-
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_MSK_RMTFLT, !enable,
- FRF_AB_XM_MSK_LCLFLT, !enable);
- efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
}
static bool falcon_xgxs_link_ok(struct efx_nic *efx)
@@ -283,15 +276,13 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
static int falcon_reconfigure_xmac(struct efx_nic *efx)
{
- falcon_mask_status_intr(efx, false);
-
falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
falcon_reconfigure_mac_wrapper(efx);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
- falcon_mask_status_intr(efx, true);
+ falcon_ack_status_intr(efx);
return 0;
}
@@ -362,9 +353,8 @@ void falcon_poll_xmac(struct efx_nic *efx)
!efx->xmac_poll_required)
return;
- falcon_mask_status_intr(efx, false);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
- falcon_mask_status_intr(efx, true);
+ falcon_ack_status_intr(efx);
}
struct efx_mac_operations falcon_xmac_operations = {
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index c48669c77414..93cc3c1b9450 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -613,7 +613,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
}
if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -647,8 +647,10 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN)
+ if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -676,7 +678,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
goto fail;
if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -738,8 +740,10 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN)
+ if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
return 0;
@@ -765,8 +769,10 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN)
+ if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
@@ -926,20 +932,26 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
rc = efx_mcdi_nvram_types(efx, &nvram_types);
if (rc)
- return rc;
+ goto fail1;
type = 0;
while (nvram_types != 0) {
if (nvram_types & 1) {
rc = efx_mcdi_nvram_test(efx, type);
if (rc)
- return rc;
+ goto fail2;
}
type++;
nvram_types >>= 1;
}
return 0;
+
+fail2:
+ EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
+fail1:
+ EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
}
static int efx_mcdi_read_assertion(struct efx_nic *efx)
@@ -968,7 +980,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
if (rc)
return rc;
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
- return -EINVAL;
+ return -EIO;
/* Print out any recorded assertion state */
flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
@@ -1086,7 +1098,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
goto fail;
if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -1121,7 +1133,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
goto fail;
if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 06d24a1e412a..39182631ac92 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -80,7 +80,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
int rc;
efx_dword_t *cmd_ptr;
- int period = 1000;
+ int period = enable ? 1000 : 0;
u32 addr_hi;
u32 addr_lo;
@@ -92,21 +92,14 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
- if (enable)
- EFX_POPULATE_DWORD_6(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, 1,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
- else
- EFX_POPULATE_DWORD_5(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, 0,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
+ EFX_POPULATE_DWORD_7(*cmd_ptr,
+ MC_CMD_MAC_STATS_CMD_DMA, !!enable,
+ MC_CMD_MAC_STATS_CMD_CLEAR, clear,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
+ MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index bd59302695b3..90359e644006 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -863,7 +863,7 @@
* bist output. The driver should only consume the BIST output
* after validating OUTLEN and PHY_CFG.PHY_TYPE.
*
- * If a driver can't succesfully parse the BIST output, it should
+ * If a driver can't successfully parse the BIST output, it should
* still respect the pass/Fail in OUT.RESULT
*
* Locks required: PHY_LOCK if doing a PHY BIST
@@ -872,7 +872,7 @@
#define MC_CMD_POLL_BIST 0x26
#define MC_CMD_POLL_BIST_IN_LEN 0
#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
-#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
#define MC_CMD_POLL_BIST_RUNNING 1
@@ -882,15 +882,14 @@
/* Generic: */
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
/* SFT9001-specific: */
-/* (offset 4 unused?) */
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
@@ -1054,9 +1053,13 @@
/* MC_CMD_PHY_STATS:
* Get generic PHY statistics
*
- * This call returns the statistics for a generic PHY, by direct DMA
- * into host memory, in a sparse array (indexed by the enumerate).
- * Each value is represented by a 32bit number.
+ * This call returns the statistics for a generic PHY in a sparse
+ * array (indexed by the enumerate). Each value is represented by
+ * a 32bit number.
+ *
+ * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
+ * may be read directly out of shared memory. If DMA_ADDR != 0, then
+ * the statistics are dmad to that (page-aligned location)
*
* Locks required: None
* Returns: 0, ETIME
@@ -1066,7 +1069,8 @@
#define MC_CMD_PHY_STATS_IN_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
-#define MC_CMD_PHY_STATS_OUT_LEN 0
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
/* Unified MAC statistics enumeration */
#define MC_CMD_MAC_GENERATION_START 0
@@ -1158,11 +1162,13 @@
#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
-/* Fields only relevent when PERIODIC_CHANGE is set */
+/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
@@ -1729,6 +1735,39 @@
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
+ * Change bits of network port state for test purposes in ways that would never be
+ * useful in normal operation and so need a special command to change. */
+#define MC_CMD_TEST_HACK 0x2f
+#define MC_CMD_TEST_HACK_IN_LEN 8
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
+#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
+#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
+#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
+#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
+#define MC_CMD_TEST_HACK_OUT_LEN 0
+
+/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
+ * is a warranty-voiding operation.
+ *
+ * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
+ * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
+ * of these limits are meaningful and what their interpretation is is sensor-specific.
+ *
+ * OUT: nothing
+ *
+ * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
+ * out of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
* used for post-3.0 extensions. If you run out of space, look for gaps or
* commands that are unused in the existing range. */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 2f2354696663..6032c0e1f1f8 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -17,6 +17,8 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "mdio_10g.h"
+#include "nic.h"
+#include "selftest.h"
struct efx_mcdi_phy_cfg {
u32 flags;
@@ -48,7 +50,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
goto fail;
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -111,7 +113,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
goto fail;
if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -587,13 +589,153 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
return rc;
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
- return -EMSGSIZE;
+ return -EIO;
if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
return -EINVAL;
return 0;
}
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ u8 *buf, *ptr;
+ int rc;
+
+ buf = kzalloc(0x100, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
+ NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ buf, 0x100, &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ kfree(buf);
+
+ return rc;
+}
+
+static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
+ unsigned flags)
+{
+ struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
+{
+ struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
@@ -604,6 +746,6 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
.test_alive = efx_mcdi_phy_test_alive,
- .run_tests = NULL,
- .test_name = NULL,
+ .run_tests = efx_mcdi_phy_run_tests,
+ .test_name = efx_mcdi_phy_test_name,
};
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cb018e272097..2e6fd89f2a72 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -85,9 +85,13 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
#define EFX_MAX_CHANNELS 32
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TX_QUEUE_OFFLOAD_CSUM 0
-#define EFX_TX_QUEUE_NO_CSUM 1
-#define EFX_TX_QUEUE_COUNT 2
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
+#define EFX_TXQ_TYPE_OFFLOAD 1
+#define EFX_TXQ_TYPES 2
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
/**
* struct efx_special_buffer - An Efx special buffer
@@ -187,7 +191,7 @@ struct efx_tx_buffer {
struct efx_tx_queue {
/* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp;
- int queue;
+ unsigned queue;
struct efx_channel *channel;
struct efx_nic *nic;
struct efx_tx_buffer *buffer;
@@ -306,11 +310,6 @@ struct efx_buffer {
};
-/* Flags for channel->used_flags */
-#define EFX_USED_BY_RX 1
-#define EFX_USED_BY_TX 2
-#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
-
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
@@ -327,7 +326,6 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC
* @channel: Channel instance number
* @name: Name for channel and IRQ
- * @used_flags: Channel is used by net driver
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -352,12 +350,14 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
+ * @tx_stop_count: Core TX queue stop count
+ * @tx_stop_lock: Core TX queue stop lock
*/
struct efx_channel {
struct efx_nic *efx;
int channel;
char name[IFNAMSIZ + 6];
- int used_flags;
bool enabled;
int irq;
unsigned int irq_moderation;
@@ -389,6 +389,9 @@ struct efx_channel {
struct efx_rx_buffer *rx_pkt;
bool rx_pkt_csummed;
+ struct efx_tx_queue *tx_queue;
+ atomic_t tx_stop_count;
+ spinlock_t tx_stop_lock;
};
enum efx_led_mode {
@@ -661,8 +664,9 @@ union efx_multicast_hash {
* @rx_queue: RX DMA queues
* @channel: Channels
* @next_buffer_table: First available buffer table id
- * @n_rx_queues: Number of RX queues
* @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @int_error_count: Number of internal errors seen recently
@@ -672,6 +676,8 @@ union efx_multicast_hash {
* This register is written with the SMP processor ID whenever an
* interrupt is handled. It is used by efx_nic_test_interrupt()
* to verify that an interrupt has occurred.
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @fatal_irq_level: IRQ level (bit number) used for serious errors
* @spi_flash: SPI flash device
* This field will be %NULL if no flash device is present (or for Siena).
* @spi_eeprom: SPI EEPROM device
@@ -691,8 +697,6 @@ union efx_multicast_hash {
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @rx_checksum_enabled: RX checksumming enabled
- * @netif_stop_count: Port stop count
- * @netif_stop_lock: Port stop lock
* @mac_stats: MAC statistics. These include all statistics the MACs
* can provide. Generic code converts these into a standard
* &struct net_device_stats.
@@ -740,13 +744,14 @@ struct efx_nic {
enum nic_state state;
enum reset_type reset_pending;
- struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
+ struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
struct efx_channel channel[EFX_MAX_CHANNELS];
unsigned next_buffer_table;
- int n_rx_queues;
- int n_channels;
+ unsigned n_channels;
+ unsigned n_rx_channels;
+ unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
@@ -755,7 +760,8 @@ struct efx_nic {
struct efx_buffer irq_status;
volatile signed int last_irq_cpu;
- unsigned long irq_zero_count;
+ unsigned irq_zero_count;
+ unsigned fatal_irq_level;
struct efx_spi_device *spi_flash;
struct efx_spi_device *spi_eeprom;
@@ -777,9 +783,6 @@ struct efx_nic {
struct net_device *net_dev;
bool rx_checksum_enabled;
- atomic_t netif_stop_count;
- spinlock_t netif_stop_lock;
-
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
@@ -924,40 +927,35 @@ struct efx_nic_type {
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx) \
- for (_channel = &_efx->channel[0]; \
- _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
- _channel++) \
- if (!_channel->used_flags) \
- continue; \
- else
+ for (_channel = &((_efx)->channel[0]); \
+ _channel < &((_efx)->channel[(efx)->n_channels]); \
+ _channel++)
/* Iterate over all used TX queues */
#define efx_for_each_tx_queue(_tx_queue, _efx) \
- for (_tx_queue = &_efx->tx_queue[0]; \
- _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
+ for (_tx_queue = &((_efx)->tx_queue[0]); \
+ _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
+ (_efx)->n_tx_channels]); \
_tx_queue++)
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = &_channel->efx->tx_queue[0]; \
- _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
- _tx_queue++) \
- if (_tx_queue->channel != _channel) \
- continue; \
- else
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ _tx_queue++)
/* Iterate over all used RX queues */
#define efx_for_each_rx_queue(_rx_queue, _efx) \
- for (_rx_queue = &_efx->rx_queue[0]; \
- _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
+ for (_rx_queue = &((_efx)->rx_queue[0]); \
+ _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
_rx_queue++)
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \
+ for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
_rx_queue; \
_rx_queue = NULL) \
- if (_rx_queue->channel != _channel) \
+ if (_rx_queue->channel != (_channel)) \
continue; \
else
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index b06f8e348307..5d3aaec58556 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -418,7 +418,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
@@ -431,10 +431,10 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_oword_t reg;
/* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
clear_bit_le(tx_queue->queue, (void *)&reg);
else
set_bit_le(tx_queue->queue, (void *)&reg);
@@ -654,22 +654,23 @@ void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
-static void
+static int
efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
- channel->irq_mod_score +=
- (tx_ev_desc_ptr - tx_queue->read_count) &
- EFX_TXQ_MASK;
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ EFX_TXQ_MASK);
+ channel->irq_mod_score += tx_packets;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -689,6 +690,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
+
+ return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -947,16 +950,17 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
}
}
-int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
+int efx_nic_process_eventq(struct efx_channel *channel, int budget)
{
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
- int rx_packets = 0;
+ int tx_packets = 0;
+ int spent = 0;
read_ptr = channel->eventq_read_ptr;
- do {
+ for (;;) {
p_event = efx_event(channel, read_ptr);
event = *p_event;
@@ -970,15 +974,23 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
+ /* Increment read pointer */
+ read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
switch (ev_code) {
case FSE_AZ_EV_CODE_RX_EV:
efx_handle_rx_event(channel, &event);
- ++rx_packets;
+ if (++spent == budget)
+ goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
- efx_handle_tx_event(channel, &event);
+ tx_packets += efx_handle_tx_event(channel, &event);
+ if (tx_packets >= EFX_TXQ_SIZE) {
+ spent = budget;
+ goto out;
+ }
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
channel->eventq_magic = EFX_QWORD_FIELD(
@@ -1001,14 +1013,11 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
" (data " EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EFX_QWORD_VAL(event));
}
+ }
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
-
- } while (rx_packets < rx_quota);
-
+out:
channel->eventq_read_ptr = read_ptr;
- return rx_packets;
+ return spent;
}
@@ -1123,7 +1132,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
ev_queue = EFX_QWORD_FIELD(*event,
FSF_AZ_DRIVER_EV_SUBDATA);
- if (ev_queue < EFX_TX_QUEUE_COUNT) {
+ if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx->tx_queue + ev_queue;
tx_queue->flushed = FLUSH_DONE;
}
@@ -1133,7 +1142,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
ev_failed = EFX_QWORD_FIELD(
*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (ev_queue < efx->n_rx_queues) {
+ if (ev_queue < efx->n_rx_channels) {
rx_queue = efx->rx_queue + ev_queue;
rx_queue->flushed =
ev_failed ? FLUSH_FAILED : FLUSH_DONE;
@@ -1229,15 +1238,9 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
bool enabled, bool force)
{
efx_oword_t int_en_reg_ker;
- unsigned int level = 0;
-
- if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
- /* Set the level always even if we're generating a test
- * interrupt, because our legacy interrupt handler is safe */
- level = 0x1f;
EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, level,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1291,11 +1294,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
- if (error == 0)
- goto out;
/* If this is a memory parity error dump which blocks are offending */
- mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_MEM_STAT);
@@ -1324,7 +1326,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
"NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
-out:
+
return IRQ_HANDLED;
}
@@ -1346,9 +1348,11 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
if (queues != 0) {
if (EFX_WORKAROUND_15783(efx))
@@ -1362,33 +1366,28 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
}
result = IRQ_HANDLED;
- } else if (EFX_WORKAROUND_15783(efx) &&
- efx->irq_zero_count++ == 0) {
+ } else if (EFX_WORKAROUND_15783(efx)) {
efx_qword_t *event;
- /* Ensure we rearm all event queues */
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
efx_for_each_channel(channel, efx) {
event = efx_event(channel, channel->eventq_read_ptr);
if (efx_event_present(event))
efx_schedule_channel(channel);
+ else
+ efx_nic_eventq_read_ack(channel);
}
-
- result = IRQ_HANDLED;
}
if (result == IRQ_HANDLED) {
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
- } else if (EFX_WORKAROUND_15783(efx)) {
- /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
- * because this might be a shared interrupt, but we do need to
- * check the channel every time and preemptively rearm it if
- * it's idle. */
- efx_for_each_channel(channel, efx) {
- if (!channel->work_pending)
- efx_nic_eventq_read_ack(channel);
- }
}
return result;
@@ -1413,9 +1412,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ if (channel->channel == efx->fatal_irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
/* Schedule processing of the channel */
efx_schedule_channel(channel);
@@ -1440,7 +1441,7 @@ static void efx_setup_rss_indir_table(struct efx_nic *efx)
offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
offset += 0x10) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- i % efx->n_rx_queues);
+ i % efx->n_rx_channels);
efx_writed(efx, &dword, offset);
i++;
}
@@ -1553,6 +1554,13 @@ void efx_nic_init_common(struct efx_nic *efx)
FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->fatal_irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->fatal_irq_level = 0;
+
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
* falcon_interrupts()).
@@ -1563,6 +1571,8 @@ void efx_nic_init_common(struct efx_nic *efx)
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 3166bafdfbef..bbc2c0c2f843 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -135,12 +135,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
* @fw_build: Firmware build number
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
+ * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
*/
struct siena_nic_data {
u64 fw_version;
u32 fw_build;
struct efx_mcdi_iface mcdi;
int wol_filter_id;
+ u8 ipv6_rss_key[40];
};
extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -203,6 +205,7 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
+extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0106b1d9aae2..371e86cc090f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -616,10 +616,10 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto out;
}
- /* Test every TX queue */
- efx_for_each_tx_queue(tx_queue, efx) {
- state->offload_csum = (tx_queue->queue ==
- EFX_TX_QUEUE_OFFLOAD_CSUM);
+ /* Test both types of TX queue */
+ efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
+ state->offload_csum = (tx_queue->queue &
+ EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 643bef72b99d..aed495a4dad7 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
*/
struct efx_loopback_self_tests {
- int tx_sent[EFX_TX_QUEUE_COUNT];
- int tx_done[EFX_TX_QUEUE_COUNT];
+ int tx_sent[EFX_TXQ_TYPES];
+ int tx_done[EFX_TXQ_TYPES];
int rx_good;
int rx_bad;
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index e0c46f59d1f8..727b4228e081 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
@@ -274,6 +275,9 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ get_random_bytes(&nic_data->ipv6_rss_key,
+ sizeof(nic_data->ipv6_rss_key));
+
return 0;
fail5:
@@ -293,6 +297,7 @@ fail1:
*/
static int siena_init_nic(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
efx_oword_t temp;
int rc;
@@ -319,6 +324,20 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
/* No MCDI operation has been defined to set thresholds */
EFX_ERR(efx, "ignoring RX flow control thresholds\n");
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110a1f73..6bb12a87ef2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
*/
#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
-/* We want to be able to nest calls to netif_stop_queue(), since each
- * channel can have an individual stop on the queue.
- */
-void efx_stop_queue(struct efx_nic *efx)
+/* We need to be able to nest calls to netif_tx_stop_queue(), partly
+ * because of the 2 hardware queues associated with each core queue,
+ * but also so that we can inhibit TX for reasons other than a full
+ * hardware queue. */
+void efx_stop_queue(struct efx_channel *channel)
{
- spin_lock_bh(&efx->netif_stop_lock);
+ struct efx_nic *efx = channel->efx;
+
+ if (!channel->tx_queue)
+ return;
+
+ spin_lock_bh(&channel->tx_stop_lock);
EFX_TRACE(efx, "stop TX queue\n");
- atomic_inc(&efx->netif_stop_count);
- netif_stop_queue(efx->net_dev);
+ atomic_inc(&channel->tx_stop_count);
+ netif_tx_stop_queue(
+ netdev_get_tx_queue(
+ efx->net_dev,
+ channel->tx_queue->queue / EFX_TXQ_TYPES));
- spin_unlock_bh(&efx->netif_stop_lock);
+ spin_unlock_bh(&channel->tx_stop_lock);
}
-/* Wake netif's TX queue
- * We want to be able to nest calls to netif_stop_queue(), since each
- * channel can have an individual stop on the queue.
- */
-void efx_wake_queue(struct efx_nic *efx)
+/* Decrement core TX queue stop count and wake it if the count is 0 */
+void efx_wake_queue(struct efx_channel *channel)
{
+ struct efx_nic *efx = channel->efx;
+
+ if (!channel->tx_queue)
+ return;
+
local_bh_disable();
- if (atomic_dec_and_lock(&efx->netif_stop_count,
- &efx->netif_stop_lock)) {
+ if (atomic_dec_and_lock(&channel->tx_stop_count,
+ &channel->tx_stop_lock)) {
EFX_TRACE(efx, "waking TX queue\n");
- netif_wake_queue(efx->net_dev);
- spin_unlock(&efx->netif_stop_lock);
+ netif_tx_wake_queue(
+ netdev_get_tx_queue(
+ efx->net_dev,
+ channel->tx_queue->queue / EFX_TXQ_TYPES));
+ spin_unlock(&channel->tx_stop_lock);
}
local_bh_enable();
}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
rc = NETDEV_TX_BUSY;
if (tx_queue->stopped == 1)
- efx_stop_queue(efx);
+ efx_stop_queue(tx_queue->channel);
unwind:
/* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
+ tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
- tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
- else
- tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
+ tx_queue += EFX_TXQ_TYPE_OFFLOAD;
return efx_enqueue_skb(tx_queue, skb);
}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
netif_tx_lock(efx->net_dev);
if (tx_queue->stopped) {
tx_queue->stopped = 0;
- efx_wake_queue(efx);
+ efx_wake_queue(tx_queue->channel);
}
netif_tx_unlock(efx->net_dev);
}
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Release queue's stop on port, if any */
if (tx_queue->stopped) {
tx_queue->stopped = 0;
- efx_wake_queue(tx_queue->efx);
+ efx_wake_queue(tx_queue->channel);
}
}
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Stop the queue if it wasn't stopped before. */
if (tx_queue->stopped == 1)
- efx_stop_queue(efx);
+ efx_stop_queue(tx_queue->channel);
unwind:
/* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index acd9c734e483..518f7fc91473 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -37,7 +37,7 @@
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy ISR read can return zero once */
-#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA
+#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index c8fc896fc460..cc4bd8c65f8b 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -574,7 +574,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
if (err)
return err;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
return 0;
@@ -638,8 +638,6 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
kick_tx(dev, sp, hregs);
- dev->trans_start = jiffies;
-
if (!TX_BUFFS_AVAIL(sp))
netif_stop_queue(dev);
spin_unlock_irqrestore(&sp->tx_lock, flags);
@@ -652,7 +650,7 @@ static void timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 6242b85d5d15..586ed0915a29 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1148,8 +1148,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
- ndev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b30ce752bbf3..a5d6a6bd0c1a 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -849,13 +849,13 @@ static void sis190_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int bit_nr =
- ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cc0c731c4f09..11f7ebedcde5 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1499,7 +1499,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
}
if(netif_msg_link(sis_priv))
- printk(KERN_INFO "%s: Media Link On %s %s-duplex \n",
+ printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
net_dev->name,
*speed == HW_SPEED_100_MBPS ?
"100mbps" : "10mbps",
@@ -1523,7 +1523,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
int i;
if(netif_msg_tx_err(sis_priv))
- printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n",
+ printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
/* Disable interrupts by clearing the interrupt mask. */
@@ -1553,7 +1553,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies;
+ net_dev->trans_start = jiffies; /* prevent tx timeout */
/* load Transmit Descriptor Register */
outl(sis_priv->tx_ring_dma, ioaddr + txdp);
@@ -1623,8 +1623,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies;
-
if (netif_msg_tx_queued(sis_priv))
printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
"to slot %d.\n",
@@ -2298,12 +2296,14 @@ static void set_rx_mode(struct net_device *net_dev)
/* Accept Broadcast packet, destination address matchs our
* MAC address, use Receive Filter to reject unwanted MCAST
* packets */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = RFAAB;
- netdev_for_each_mc_addr(mclist, net_dev) {
- unsigned int bit_nr =
- sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
+ netdev_for_each_mc_addr(ha, net_dev) {
+ unsigned int bit_nr;
+
+ bit_nr = sis900_mcast_bitnr(ha->addr,
+ sis_priv->chipset_rev);
mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
}
}
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 6028bbb3b28a..9d8d1ac48176 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -1352,7 +1352,7 @@ void rtm_set_timer(struct s_smc *smc)
/*
* MIB timer and hardware timer have the same resolution of 80nS
*/
- DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns \n",
+ DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
(int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
}
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index e6b33ee05ede..ba45bc794d77 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -1277,7 +1277,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
mib = phy->mib ;
- DB_PCMN(1,"SIG rec %x %x: \n", bit,phy->r_val[bit] ) ;
+ DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
bit++ ;
switch(bit) {
@@ -1580,7 +1580,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
break ;
}
- DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ;
+ DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
}
/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index d9016b75abc2..91adc38d5710 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -852,7 +852,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
@@ -876,13 +876,13 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* use exact filtering */
// point to first multicast addr
- netdev_for_each_mc_addr(dmi, dev) {
- mac_add_multicast(smc,
- (struct fddi_addr *)dmi->dmi_addr,
- 1);
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_add_multicast(smc,
+ (struct fddi_addr *)ha->addr,
+ 1);
pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
- dmi->dmi_addr);
+ ha->addr);
}
} else { // more MC addresses than HW supports
@@ -1076,7 +1076,6 @@ static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
if (bp->QueueSkb == 0) {
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
} // skfp_send_pkt
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 6caf713b744c..40882b3faba6 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
- DB_SMT("SRF: sending SRF at %x, len %d \n",smt,mb->sm_len) ;
+ DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
DB_SMT("SRF: state SR%d Threshold %d\n",
smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
#ifdef DEBUG
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 50eb70609f20..96eee8666877 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2918,7 +2918,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u32 mode;
u8 filter[8];
@@ -2938,8 +2938,8 @@ static void genesis_set_multicast(struct net_device *dev)
skge->flow_status == FLOW_STAT_SYMMETRIC)
genesis_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- genesis_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ genesis_add_filter(filter, ha->addr);
}
xm_write32(hw, port, XM_MODE, mode);
@@ -2957,7 +2957,7 @@ static void yukon_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
skge->flow_status == FLOW_STAT_SYMMETRIC);
u16 reg;
@@ -2980,8 +2980,8 @@ static void yukon_set_multicast(struct net_device *dev)
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- yukon_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ yukon_add_filter(filter, ha->addr);
}
@@ -3667,7 +3667,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
t->csum_offs, t->csum_write, t->csum_start);
}
- seq_printf(seq, "\nRx Ring: \n");
+ seq_printf(seq, "\nRx Ring:\n");
for (e = skge->rx_ring.to_clean; ; e = e->next) {
const struct skge_rx_desc *r = e->desc;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 088c797eb73b..bf9c05be347b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -70,18 +70,15 @@
VLAN:GSO + CKSUM + Data + skb_frags * DMA */
#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
-#define TX_MAX_PENDING 4096
+#define TX_MAX_PENDING 1024
#define TX_DEF_PENDING 127
-#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
-#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
#define TX_WATCHDOG (5 * HZ)
#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
#define SKY2_EEPROM_MAGIC 0x9955aabb
-
#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
static const u32 default_msg =
@@ -227,7 +224,7 @@ static void sky2_power_on(struct sky2_hw *hw)
/* disable Core Clock Division, */
sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
/* enable bits are inverted */
sky2_write8(hw, B2_Y2_CLK_GATE,
Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
@@ -269,7 +266,7 @@ static void sky2_power_on(struct sky2_hw *hw)
static void sky2_power_aux(struct sky2_hw *hw)
{
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
else
/* enable bits are inverted */
@@ -652,7 +649,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~phy_power[port];
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
@@ -824,7 +821,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
+ if (hw->chip_id == CHIP_ID_YUKON_XL &&
+ hw->chip_rev == CHIP_REV_YU_XL_A0 &&
+ port == 1) {
/* WA DEV_472 -- looks like crossed wires on port 2 */
/* clear GMAC 1 Control reset */
sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
@@ -878,6 +877,10 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
if (hw->dev[port]->mtu > ETH_DATA_LEN)
reg |= GM_SMOD_JUMBO_ENA;
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev == CHIP_REV_YU_EC_U_B1)
+ reg |= GM_NEW_FLOW_CTRL;
+
gma_write16(hw, port, GM_SERIAL_MODE, reg);
/* virtual address for data */
@@ -1126,7 +1129,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
if (pci_dma_mapping_error(pdev, re->data_addr))
goto mapping_error;
- pci_unmap_len_set(re, data_size, size);
+ dma_unmap_len_set(re, data_size, size);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1148,7 +1151,7 @@ map_page_error:
PCI_DMA_FROMDEVICE);
}
- pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
PCI_DMA_FROMDEVICE);
mapping_error:
@@ -1163,7 +1166,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
struct sk_buff *skb = re->skb;
int i;
- pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
PCI_DMA_FROMDEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -1190,6 +1193,39 @@ static void rx_set_checksum(struct sky2_port *sky2)
? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
+/* Enable/disable receive hash calculation (RSS) */
+static void rx_set_rss(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ int i, nkeys = 4;
+
+ /* Supports IPv6 and other modes */
+ if (hw->flags & SKY2_HW_NEW_LE) {
+ nkeys = 10;
+ sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
+ }
+
+ /* Program RSS initial values */
+ if (dev->features & NETIF_F_RXHASH) {
+ u32 key[nkeys];
+
+ get_random_bytes(key, nkeys * sizeof(u32));
+ for (i = 0; i < nkeys; i++)
+ sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
+ key[i]);
+
+ /* Need to turn on (undocumented) flag to make hashing work */
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
+ RX_STFW_ENA);
+
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_ENA_RX_RSS_HASH);
+ } else
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_RSS_HASH);
+}
+
/*
* The RX Stop command will not work for Yukon-2 if the BMU does not
* reach the end of packet and since we can't make sure that we have
@@ -1414,8 +1450,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
/* These chips have no ram buffer?
* MAC Rx RAM Read is controlled by hardware */
if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
- (hw->chip_rev == CHIP_REV_YU_EC_U_A1 ||
- hw->chip_rev == CHIP_REV_YU_EC_U_B0))
+ hw->chip_rev > CHIP_REV_YU_EC_U_A0)
sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
@@ -1423,6 +1458,9 @@ static void sky2_rx_start(struct sky2_port *sky2)
if (!(hw->flags & SKY2_HW_NEW_LE))
rx_set_checksum(sky2);
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ rx_set_rss(sky2->netdev);
+
/* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
@@ -1657,12 +1695,12 @@ static unsigned tx_le_req(const struct sk_buff *skb)
static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
- pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
- pci_unmap_len(re, maplen),
+ pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
else if (re->flags & TX_MAP_PAGE)
- pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
- pci_unmap_len(re, maplen),
+ pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
re->flags = 0;
}
@@ -1773,8 +1811,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_SINGLE;
- pci_unmap_addr_set(re, mapaddr, mapping);
- pci_unmap_len_set(re, maplen, len);
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, len);
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -1802,8 +1840,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_PAGE;
- pci_unmap_addr_set(re, mapaddr, mapping);
- pci_unmap_len_set(re, maplen, frag->size);
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, frag->size);
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -2142,7 +2180,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
- if (sky2_autoneg_done(sky2, phystat) == 0)
+ if (sky2_autoneg_done(sky2, phystat) == 0 &&
+ !netif_carrier_ok(dev))
sky2_link_up(sky2);
goto out;
}
@@ -2531,6 +2570,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
}
}
+static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+{
+ struct sk_buff *skb;
+
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->rxhash = le32_to_cpu(status);
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
@@ -2552,7 +2599,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
if (!(opcode & HW_OWNER))
break;
- hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
+ hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
port = le->css & CSS_LINK_BIT;
dev = hw->dev[port];
@@ -2603,6 +2650,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
sky2_rx_checksum(sky2, status);
break;
+ case OP_RSS_HASH:
+ sky2_rx_hash(sky2, status);
+ break;
+
case OP_TXINDEXLE:
/* TX index reports status for both ports */
sky2_tx_done(hw->dev[0], status & 0xfff);
@@ -2957,6 +3008,8 @@ static int __devinit sky2_init(struct sky2_hw *hw)
switch(hw->chip_id) {
case CHIP_ID_YUKON_XL:
hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
+ if (hw->chip_rev < CHIP_REV_YU_XL_A2)
+ hw->flags |= SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_EC_U:
@@ -2982,10 +3035,11 @@ static int __devinit sky2_init(struct sky2_hw *hw)
dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
return -EOPNOTSUPP;
}
- hw->flags = SKY2_HW_GIGABIT;
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_FE:
+ hw->flags = SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_FE_P:
@@ -3192,7 +3246,7 @@ static void sky2_reset(struct sky2_hw *hw)
for (i = 0; i < hw->ports; i++)
sky2_gmac_reset(hw, i);
- memset(hw->st_le, 0, STATUS_LE_BYTES);
+ memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
hw->st_idx = 0;
sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
@@ -3202,7 +3256,7 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
/* Set the list last index */
- sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
+ sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
sky2_write16(hw, STAT_TX_IDX_TH, 10);
sky2_write8(hw, STAT_FIFO_WM, 16);
@@ -3622,7 +3676,7 @@ static void sky2_set_multicast(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u16 reg;
u8 filter[8];
int rx_pause;
@@ -3646,8 +3700,8 @@ static void sky2_set_multicast(struct net_device *dev)
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- sky2_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ sky2_add_filter(filter, ha->addr);
}
gma_write16(hw, port, GM_MC_ADDR_H1,
@@ -4109,6 +4163,25 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
+static int sky2_set_flags(struct net_device *dev, u32 data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (data & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_RXHASH) {
+ if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
+ return -EINVAL;
+
+ dev->features |= NETIF_F_RXHASH;
+ } else
+ dev->features &= ~NETIF_F_RXHASH;
+
+ rx_set_rss(dev);
+
+ return 0;
+}
static const struct ethtool_ops sky2_ethtool_ops = {
.get_settings = sky2_get_settings,
@@ -4140,6 +4213,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.phys_id = sky2_phys_id,
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
+ .set_flags = sky2_set_flags,
};
#ifdef CONFIG_SKY2_DEBUG
@@ -4250,12 +4324,13 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
napi_disable(&hw->napi);
last = sky2_read16(hw, STAT_PUT_IDX);
+ seq_printf(seq, "Status ring %u\n", hw->st_size);
if (hw->st_idx == last)
seq_puts(seq, "Status ring (empty)\n");
else {
seq_puts(seq, "Status ring\n");
- for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE;
- idx = RING_NEXT(idx, STATUS_RING_SIZE)) {
+ for (idx = hw->st_idx; idx != last && idx < hw->st_size;
+ idx = RING_NEXT(idx, hw->st_size)) {
const struct sky2_status_le *le = hw->st_le + idx;
seq_printf(seq, "[%d] %#x %d %#x\n",
idx, le->opcode, le->length, le->status);
@@ -4492,6 +4567,10 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
+ /* Enable receive hashing unless hardware is known broken */
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ dev->features |= NETIF_F_RXHASH;
+
#ifdef SKY2_VLAN_TAG_USED
/* The workaround for FE+ status conflicts with VLAN tag detection. */
if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
@@ -4683,15 +4762,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
- /* ring for status responses */
- hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
- if (!hw->st_le)
- goto err_out_iounmap;
-
err = sky2_init(hw);
if (err)
goto err_out_iounmap;
+ /* ring for status responses */
+ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
+ hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ &hw->st_dma);
+ if (!hw->st_le)
+ goto err_out_reset;
+
dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
@@ -4765,8 +4846,10 @@ err_out_unregister:
err_out_free_netdev:
free_netdev(dev);
err_out_free_pci:
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
+err_out_reset:
sky2_write8(hw, B0_CTST, CS_RST_SET);
- pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
err_out_iounmap:
iounmap(hw->regs);
err_out_free_hw:
@@ -4804,7 +4887,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
free_irq(pdev->irq, hw);
if (hw->flags & SKY2_HW_USE_MSI)
pci_disable_msi(pdev);
- pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index a5e182dd9819..084eff21b67a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -548,6 +548,14 @@ enum {
CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
};
+
+enum yukon_xl_rev {
+ CHIP_REV_YU_XL_A0 = 0,
+ CHIP_REV_YU_XL_A1 = 1,
+ CHIP_REV_YU_XL_A2 = 2,
+ CHIP_REV_YU_XL_A3 = 3,
+};
+
enum yukon_ec_rev {
CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
@@ -557,6 +565,7 @@ enum yukon_ec_u_rev {
CHIP_REV_YU_EC_U_A0 = 1,
CHIP_REV_YU_EC_U_A1 = 2,
CHIP_REV_YU_EC_U_B0 = 3,
+ CHIP_REV_YU_EC_U_B1 = 5,
};
enum yukon_fe_rev {
CHIP_REV_YU_FE_A1 = 1,
@@ -685,8 +694,21 @@ enum {
TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
+
+ RSS_KEY = 0x0220, /* RSS Key setup */
+ RSS_CFG = 0x0248, /* RSS Configuration */
};
+enum {
+ HASH_TCP_IPV6_EX_CTRL = 1<<5,
+ HASH_IPV6_EX_CTRL = 1<<4,
+ HASH_TCP_IPV6_CTRL = 1<<3,
+ HASH_IPV6_CTRL = 1<<2,
+ HASH_TCP_IPV4_CTRL = 1<<1,
+ HASH_IPV4_CTRL = 1<<0,
+
+ HASH_ALL = 0x3f,
+};
enum {
B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
@@ -1775,10 +1797,13 @@ enum {
/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
enum {
GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
- GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
- GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
- GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
- GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+ GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
+ GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
+ GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
+
+ GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
+
+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
};
#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
@@ -2157,14 +2182,14 @@ struct tx_ring_info {
unsigned long flags;
#define TX_MAP_SINGLE 0x0001
#define TX_MAP_PAGE 0x0002
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
- DECLARE_PCI_UNMAP_LEN(data_size);
+ DEFINE_DMA_UNMAP_LEN(data_size);
dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
};
@@ -2249,6 +2274,7 @@ struct sky2_hw {
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
+#define SKY2_HW_RSS_BROKEN 0x00000100
u8 chip_id;
u8 chip_rev;
@@ -2256,6 +2282,7 @@ struct sky2_hw {
u8 ports;
struct sky2_status_le *st_le;
+ u32 st_size;
u32 st_idx;
dma_addr_t st_dma;
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 89696156c059..d92772e992d2 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -458,7 +458,7 @@ static void sl_tx_timeout(struct net_device *dev)
* 14 Oct 1994 Dmitry Gorodchanin.
*/
#ifdef SL_CHECK_TRANSMIT
- if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
/* 20 sec timeout not reached */
goto out;
}
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 635820d42b19..66831f378396 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
dev->name, __func__);
status = SMC_GET_RX_STS_FIFO(lp);
- DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
+ DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
if (status & RX_STS_ES_) {
@@ -1135,7 +1135,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
#else
if (status & INT_STS_TSFL_) {
- DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, );
+ DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
smc911x_tx(dev);
SMC_ACK_INT(lp, INT_STS_TSFL_);
}
@@ -1274,7 +1274,7 @@ static void smc911x_timeout(struct net_device *dev)
status = SMC_GET_INT(lp);
mask = SMC_GET_INT_EN(lp);
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n",
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
dev->name, status, mask);
/* Dump the current TX FIFO contents and restart */
@@ -1289,7 +1289,7 @@ static void smc911x_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1340,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* Set the Hash perfec mode */
mcr |= MAC_CR_HPFILT_;
@@ -1348,19 +1348,16 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 position;
- /* do we have a pointer here? */
- if (!cur_addr)
- break;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
- continue;
+ if (!(*ha->addr & 1))
+ continue;
/* upper 6 bits are used as hash index */
- position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26;
+ position = ether_crc(ETH_ALEN, ha->addr)>>26;
multicast_table[position>>5] |= 1 << (position&0x1f);
}
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 3f2f7843aa4e..acb81a876ac6 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -416,7 +416,7 @@ static void smc_shutdown( int ioaddr )
/*
- . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds )
+ . Function: smc_setmulticast( int ioaddr, struct net_device *dev )
. Purpose:
. This sets the internal hardware table to filter out unwanted multicast
. packets before they take up memory.
@@ -437,26 +437,23 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev)
{
int i;
unsigned char multicast_table[ 8 ];
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* table for flipping the order of 3 bits */
unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
/* start with a table of all zeros: reject all */
memset( multicast_table, 0, sizeof( multicast_table ) );
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int position;
- /* do we have a pointer here? */
- if ( !cur_addr )
- break;
/* make sure this is a multicast address - shouldn't this
be a given if we have it here ? */
- if ( !( *cur_addr->dmi_addr & 1 ) )
+ if (!(*ha->addr & 1))
continue;
/* only use the low order bits */
- position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f;
+ position = ether_crc_le(6, ha->addr) & 0x3f;
/* do some messy swapping to put the bit in the right spot */
multicast_table[invert3[position&7]] |=
@@ -528,7 +525,7 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
numPages = ((length & 0xfffe) + 6) / 256;
if (numPages > 7 ) {
- printk(CARDNAME": Far too big packet error. \n");
+ printk(CARDNAME": Far too big packet error.\n");
/* freeing the packet is a good thing here... but should
. any packets of this size get down here? */
dev_kfree_skb (skb);
@@ -570,9 +567,9 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
if ( !time_out ) {
/* oh well, wait until the chip finds memory later */
SMC_ENABLE_INT( IM_ALLOC_INT );
- PRINTK2((CARDNAME": memory allocation deferred. \n"));
+ PRINTK2((CARDNAME": memory allocation deferred.\n"));
/* it's deferred, but I'll handle it later */
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/* or YES! I can send the packet now.. */
smc_hardware_send_packet(dev);
@@ -610,7 +607,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
ioaddr = dev->base_addr;
if ( !skb ) {
- PRINTK((CARDNAME": In XMIT with no packet to send \n"));
+ PRINTK((CARDNAME": In XMIT with no packet to send\n"));
return;
}
length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
@@ -620,7 +617,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
packet_no = inb( ioaddr + PNR_ARR + 1 );
if ( packet_no & 0x80 ) {
/* or isn't there? BAD CHIP! */
- printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n");
+ printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
dev_kfree_skb_any(skb);
lp->saved_skb = NULL;
netif_wake_queue(dev);
@@ -685,7 +682,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* and let the chipset deal with it */
outw( MC_ENQUEUE , ioaddr + MMU_CMD );
- PRINTK2((CARDNAME": Sent packet of length %d \n",length));
+ PRINTK2((CARDNAME": Sent packet of length %d\n", length));
lp->saved_skb = NULL;
dev_kfree_skb_any (skb);
@@ -937,7 +934,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
/* I don't recognize this chip, so... */
printk(CARDNAME ": IO %x: Unrecognized revision register:"
- " %x, Contact author. \n", ioaddr, revision_register );
+ " %x, Contact author.\n", ioaddr, revision_register);
retval = -ENODEV;
goto err_out;
@@ -1045,9 +1042,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
*/
printk("ADDR: %pM\n", dev->dev_addr);
- /* set the private data to zero by default */
- memset(netdev_priv(dev), 0, sizeof(struct smc_local));
-
/* Grab the IRQ */
retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
if (retval) {
@@ -1074,7 +1068,7 @@ static void print_packet( byte * buf, int length )
int remainder;
int lines;
- printk("Packet of length %d \n", length );
+ printk("Packet of length %d\n", length);
lines = length / 16;
remainder = length % 16;
@@ -1170,7 +1164,7 @@ static void smc_timeout(struct net_device *dev)
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* clear anything saved */
((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
netif_wake_queue(dev);
@@ -1201,7 +1195,7 @@ static void smc_rcv(struct net_device *dev)
if ( packet_number & FP_RXEMPTY ) {
/* we got called , but nothing was on the FIFO */
- PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n"));
+ PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n"));
/* don't need to restore anything */
return;
}
@@ -1257,14 +1251,14 @@ static void smc_rcv(struct net_device *dev)
to send the DWORDs or the bytes first, or some
mixture. A mixture might improve already slow PIO
performance */
- PRINTK3((" Reading %d dwords (and %d bytes) \n",
+ PRINTK3((" Reading %d dwords (and %d bytes)\n",
packet_length >> 2, packet_length & 3 ));
insl(ioaddr + DATA_1 , data, packet_length >> 2 );
/* read the left over bytes */
insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
packet_length & 0x3 );
#else
- PRINTK3((" Reading %d words and %d byte(s) \n",
+ PRINTK3((" Reading %d words and %d byte(s)\n",
(packet_length >> 1 ), packet_length & 1 ));
insw(ioaddr + DATA_1 , data, packet_length >> 1);
if ( packet_length & 1 ) {
@@ -1333,7 +1327,7 @@ static void smc_tx( struct net_device * dev )
outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
tx_status = inw( ioaddr + DATA_1 );
- PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status ));
+ PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status));
dev->stats.tx_errors++;
if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
@@ -1347,7 +1341,7 @@ static void smc_tx( struct net_device * dev )
#endif
if ( tx_status & TS_SUCCESS ) {
- printk(CARDNAME": Successful packet caused interrupt \n");
+ printk(CARDNAME": Successful packet caused interrupt\n");
}
/* re-enable transmit */
SMC_SELECT_BANK( 0 );
@@ -1393,7 +1387,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
int handled = 0;
- PRINTK3((CARDNAME": SMC interrupt started \n"));
+ PRINTK3((CARDNAME": SMC interrupt started\n"));
saved_bank = inw( ioaddr + BANK_SELECT );
@@ -1408,7 +1402,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
/* set a timeout value, so I don't stay here forever */
timeout = 4;
- PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
+ PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask));
do {
/* read the status flag, and mask it */
status = inb( ioaddr + INTERRUPT ) & mask;
@@ -1418,7 +1412,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
handled = 1;
PRINTK3((KERN_WARNING CARDNAME
- ": Handling interrupt status %x \n", status ));
+ ": Handling interrupt status %x\n", status));
if (status & IM_RCV_INT) {
/* Got a packet(s). */
@@ -1452,7 +1446,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
} else if (status & IM_ALLOC_INT ) {
PRINTK2((KERN_DEBUG CARDNAME
- ": Allocation interrupt \n"));
+ ": Allocation interrupt\n"));
/* clear this interrupt so it doesn't happen again */
mask &= ~IM_ALLOC_INT;
@@ -1470,9 +1464,9 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
dev->stats.rx_fifo_errors++;
outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
} else if (status & IM_EPH_INT ) {
- PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
+ PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n"));
} else if (status & IM_ERCV_INT ) {
- PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
+ PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n"));
outb( IM_ERCV_INT, ioaddr + INTERRUPT );
}
} while ( timeout -- );
@@ -1482,7 +1476,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
SMC_SELECT_BANK( 2 );
outb( mask, ioaddr + INT_MASK );
- PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
+ PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask));
outw( saved_pointer, ioaddr + POINTER );
SMC_SELECT_BANK( saved_bank );
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 860339d51d58..10cf0cbc2185 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1285,7 +1285,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_phy_interrupt(dev);
} else if (status & IM_ERCV_INT) {
SMC_ACK_INT(lp, IM_ERCV_INT);
- PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name);
+ PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
}
} while (--timeout);
@@ -1360,7 +1360,7 @@ static void smc_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1412,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* table for flipping the order of 3 bits */
static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
@@ -1420,16 +1420,16 @@ static void smc_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int position;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
+ if (!(*ha->addr & 1))
continue;
/* only use the low order bits */
- position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+ position = crc32_le(~0, ha->addr, 6) & 0x3f;
/* do some messy swapping to put the bit in the right spot */
multicast_table[invert3[position&7]] |=
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index cbf520d38eac..cc559741b0fa 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -736,7 +736,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
SMSC_TRACE(HW, "configuring for carrier OK");
if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
(!pdata->using_extphy)) {
- /* Restore orginal GPIO configuration */
+ /* Restore original GPIO configuration */
pdata->gpio_setting = pdata->gpio_orig_setting;
smsc911x_reg_write(pdata, GPIO_CFG,
pdata->gpio_setting);
@@ -750,7 +750,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
(!pdata->using_extphy)) {
/* Force 10/100 LED off, after saving
- * orginal GPIO configuration */
+ * original GPIO configuration */
pdata->gpio_orig_setting = pdata->gpio_setting;
pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_;
@@ -1335,7 +1335,6 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
smsc911x_tx_update_txcounters(dev);
@@ -1382,13 +1381,13 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
pdata->set_bits_mask = MAC_CR_HPFILT_;
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- netdev_for_each_mc_addr(mc_list, dev) {
- unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bitnum = smsc911x_hash(ha->addr);
unsigned int mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index aafaebf45748..6cdee6a15f9f 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1034,8 +1034,6 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
smsc9420_pci_flush_write(pd);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1064,12 +1062,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 hash_lo = 0, hash_hi = 0;
smsc_dbg(HW, "Multicast filter enabled");
- netdev_for_each_mc_addr(mc_list, dev) {
- u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 bit_num = smsc9420_hash(ha->addr);
u32 mask = 1 << (bit_num & 0x1F);
if (bit_num & 0x20)
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 287c251075e5..26e25d7f5829 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -263,8 +263,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -531,7 +529,7 @@ static void sonic_multicast_list(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *addr;
int i;
@@ -550,8 +548,8 @@ static void sonic_multicast_list(struct net_device *dev)
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
i = 1;
- netdev_for_each_mc_addr(dmi, dev) {
- addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addr = ha->addr;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index dd3cb0f2d21f..3dff280b438b 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -625,7 +625,7 @@ spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
static void
spider_net_set_multi(struct net_device *netdev)
{
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u8 hash;
int i;
u32 reg;
@@ -646,8 +646,8 @@ spider_net_set_multi(struct net_device *netdev)
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
set_bit(0xfd, bitmask);
- netdev_for_each_mc_addr(mc, netdev) {
- hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash = spider_net_get_multicast_hash(netdev, ha->addr);
set_bit(hash, bitmask);
}
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 6dfa69899019..e19b5a143886 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1173,7 +1173,7 @@ static void tx_timeout(struct net_device *dev)
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1312,8 +1312,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1766,7 +1764,7 @@ static void set_rx_mode(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
u32 rx_mode = MinVLANPrio;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
#ifdef VLAN_SUPPORT
@@ -1804,8 +1802,8 @@ static void set_rx_mode(struct net_device *dev)
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (__be16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (__be16 *) ha->addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
@@ -1825,10 +1823,10 @@ static void set_rx_mode(struct net_device *dev)
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
- int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
+ int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
__le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
*fptr |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index c776af15fe1a..9691733ddb8e 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
- dwmac100.o $(stmmac-y)
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 2a58172e986a..144f76fd3e39 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -22,8 +22,26 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include "descs.h"
#include <linux/netdevice.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "descs.h"
+
+#undef CHIP_DEBUG_PRINT
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
+/* #define CHIP_DEBUG_PRINT */
+
+#ifdef CHIP_DEBUG_PRINT
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define CHIP_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -231,3 +249,4 @@ extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
+extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c
deleted file mode 100644
index 4cacca614fc1..000000000000
--- a/drivers/net/stmmac/dwmac100.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*******************************************************************************
- This is the driver for the MAC 10/100 on-chip Ethernet controller
- currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
-
- DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
- this code.
-
- Copyright (C) 2007-2009 STMicroelectronics Ltd
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-
-#include "common.h"
-#include "dwmac100.h"
-#include "dwmac_dma.h"
-
-#undef DWMAC100_DEBUG
-/*#define DWMAC100_DEBUG*/
-#ifdef DWMAC100_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
-
-static void dwmac100_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CONTROL);
-
- writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
-
-#ifdef STMMAC_VLAN_TAG_USED
- writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
-#endif
- return;
-}
-
-static void dwmac100_dump_mac_regs(unsigned long ioaddr)
-{
- pr_info("\t----------------------------------------------\n"
- "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
- pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
- pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
- pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
- pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
- pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
- pr_info("\tflow control (offset 0x%x): 0x%08x\n",
- MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
- pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
- pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
- pr_info("\n\tMAC management counter registers\n");
- pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
- pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
- pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
- pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
- pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
- return;
-}
-
-static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
- u32 dma_rx)
-{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
- /* DMA SW reset */
- value |= DMA_BUS_MODE_SFT_RESET;
- writel(value, ioaddr + DMA_BUS_MODE);
- do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
-
- /* Enable Application Access by writing to DMA CSR0 */
- writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
- ioaddr + DMA_BUS_MODE);
-
- /* Mask interrupts by writing to CSR7 */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
-
- /* The base address of the RX/TX descriptor lists must be written into
- * DMA CSR3 and CSR4, respectively. */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
-
- return 0;
-}
-
-/* Store and Forward capability is not used at all..
- * The transmit threshold can be programmed by
- * setting the TTC bits in the DMA control register.*/
-static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
- int rxmode)
-{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
-
- if (txmode <= 32)
- csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
- csr6 |= DMA_CONTROL_TTC_64;
- else
- csr6 |= DMA_CONTROL_TTC_128;
-
- writel(csr6, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void dwmac100_dump_dma_regs(unsigned long ioaddr)
-{
- int i;
-
- DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
- for (i = 0; i < 9; i++)
- pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
- (DMA_BUS_MODE + i * 4),
- readl(ioaddr + DMA_BUS_MODE + i * 4));
- DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
- DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
- DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
- DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
- return;
-}
-
-/* DMA controller has two counters to track the number of
- * the receive missed frames. */
-static void dwmac100_dma_diagnostic_fr(void *data,
- struct stmmac_extra_stats *x,
- unsigned long ioaddr)
-{
- struct net_device_stats *stats = (struct net_device_stats *)data;
- u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
-
- if (unlikely(csr8)) {
- if (csr8 & DMA_MISSED_FRAME_OVE) {
- stats->rx_over_errors += 0x800;
- x->rx_overflow_cntr += 0x800;
- } else {
- unsigned int ove_cntr;
- ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
- stats->rx_over_errors += ove_cntr;
- x->rx_overflow_cntr += ove_cntr;
- }
-
- if (csr8 & DMA_MISSED_FRAME_OVE_M) {
- stats->rx_missed_errors += 0xffff;
- x->rx_missed_cntr += 0xffff;
- } else {
- unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
- stats->rx_missed_errors += miss_f;
- x->rx_missed_cntr += miss_f;
- }
- }
- return;
-}
-
-static int dwmac100_get_tx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
-{
- int ret = 0;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.tx.error_summary)) {
- if (unlikely(p->des01.tx.underflow_error)) {
- x->tx_underflow++;
- stats->tx_fifo_errors++;
- }
- if (unlikely(p->des01.tx.no_carrier)) {
- x->tx_carrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.tx.loss_carrier)) {
- x->tx_losscarrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely((p->des01.tx.excessive_deferral) ||
- (p->des01.tx.excessive_collisions) ||
- (p->des01.tx.late_collision)))
- stats->collisions += p->des01.tx.collision_count;
- ret = -1;
- }
- if (unlikely(p->des01.tx.heartbeat_fail)) {
- x->tx_heartbeat++;
- stats->tx_heartbeat_errors++;
- ret = -1;
- }
- if (unlikely(p->des01.tx.deferred))
- x->tx_deferred++;
-
- return ret;
-}
-
-static int dwmac100_get_tx_len(struct dma_desc *p)
-{
- return p->des01.tx.buffer1_size;
-}
-
-/* This function verifies if each incoming frame has some errors
- * and, if required, updates the multicast statistics.
- * In case of success, it returns csum_none becasue the device
- * is not able to compute the csum in HW. */
-static int dwmac100_get_rx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p)
-{
- int ret = csum_none;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("dwmac100 Error: Oversized Ethernet "
- "frame spanned multiple buffers\n");
- stats->rx_length_errors++;
- return discard_frame;
- }
-
- if (unlikely(p->des01.rx.error_summary)) {
- if (unlikely(p->des01.rx.descriptor_error))
- x->rx_desc++;
- if (unlikely(p->des01.rx.partial_frame_error))
- x->rx_partial++;
- if (unlikely(p->des01.rx.run_frame))
- x->rx_runt++;
- if (unlikely(p->des01.rx.frame_too_long))
- x->rx_toolong++;
- if (unlikely(p->des01.rx.collision)) {
- x->rx_collision++;
- stats->collisions++;
- }
- if (unlikely(p->des01.rx.crc_error)) {
- x->rx_crc++;
- stats->rx_crc_errors++;
- }
- ret = discard_frame;
- }
- if (unlikely(p->des01.rx.dribbling))
- ret = discard_frame;
-
- if (unlikely(p->des01.rx.length_error)) {
- x->rx_length++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.rx.mii_error)) {
- x->rx_mii++;
- ret = discard_frame;
- }
- if (p->des01.rx.multicast_frame) {
- x->rx_multicast++;
- stats->multicast++;
- }
- return ret;
-}
-
-static void dwmac100_irq_status(unsigned long ioaddr)
-{
- return;
-}
-
-static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
-}
-
-static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
-}
-
-static void dwmac100_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- u32 value = readl(ioaddr + MAC_CONTROL);
-
- if (dev->flags & IFF_PROMISC) {
- value |= MAC_CONTROL_PR;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
- MAC_CONTROL_HP);
- } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value |= MAC_CONTROL_PM;
- value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
- writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (netdev_mc_empty(dev)) { /* no multicast */
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
- MAC_CONTROL_HO | MAC_CONTROL_HP);
- } else {
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Perfect filter mode for physical address and Hash
- filter for multicast */
- value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
- MAC_CONTROL_IF | MAC_CONTROL_HO);
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- /* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table */
- int bit_nr =
- ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
- }
-
- writel(value, ioaddr + MAC_CONTROL);
-
- DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
- "HI 0x%08x, LO 0x%08x\n",
- __func__, readl(ioaddr + MAC_CONTROL),
- readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
- return;
-}
-
-static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = MAC_FLOW_CTRL_ENABLE;
-
- if (duplex)
- flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
- writel(flow, ioaddr + MAC_FLOW_CTRL);
-
- return;
-}
-
-/* No PMT module supported for this Ethernet Controller.
- * Tested on ST platforms only.
- */
-static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
-{
- return;
-}
-
-static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.rx.own = 1;
- p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
- if (i == ring_size - 1)
- p->des01.rx.end_ring = 1;
- if (disable_rx_ic)
- p->des01.rx.disable_ic = 1;
- p++;
- }
- return;
-}
-
-static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.tx.own = 0;
- if (i == ring_size - 1)
- p->des01.tx.end_ring = 1;
- p++;
- }
- return;
-}
-
-static int dwmac100_get_tx_owner(struct dma_desc *p)
-{
- return p->des01.tx.own;
-}
-
-static int dwmac100_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.rx.own;
-}
-
-static void dwmac100_set_tx_owner(struct dma_desc *p)
-{
- p->des01.tx.own = 1;
-}
-
-static void dwmac100_set_rx_owner(struct dma_desc *p)
-{
- p->des01.rx.own = 1;
-}
-
-static int dwmac100_get_tx_ls(struct dma_desc *p)
-{
- return p->des01.tx.last_segment;
-}
-
-static void dwmac100_release_tx_desc(struct dma_desc *p)
-{
- int ter = p->des01.tx.end_ring;
-
- /* clean field used within the xmit */
- p->des01.tx.first_segment = 0;
- p->des01.tx.last_segment = 0;
- p->des01.tx.buffer1_size = 0;
-
- /* clean status reported */
- p->des01.tx.error_summary = 0;
- p->des01.tx.underflow_error = 0;
- p->des01.tx.no_carrier = 0;
- p->des01.tx.loss_carrier = 0;
- p->des01.tx.excessive_deferral = 0;
- p->des01.tx.excessive_collisions = 0;
- p->des01.tx.late_collision = 0;
- p->des01.tx.heartbeat_fail = 0;
- p->des01.tx.deferred = 0;
-
- /* set termination field */
- p->des01.tx.end_ring = ter;
-
- return;
-}
-
-static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
-{
- p->des01.tx.first_segment = is_fs;
- p->des01.tx.buffer1_size = len;
-}
-
-static void dwmac100_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.tx.interrupt = 0;
-}
-
-static void dwmac100_close_tx_desc(struct dma_desc *p)
-{
- p->des01.tx.last_segment = 1;
- p->des01.tx.interrupt = 1;
-}
-
-static int dwmac100_get_rx_frame_len(struct dma_desc *p)
-{
- return p->des01.rx.frame_length;
-}
-
-struct stmmac_ops dwmac100_ops = {
- .core_init = dwmac100_core_init,
- .dump_regs = dwmac100_dump_mac_regs,
- .host_irq_status = dwmac100_irq_status,
- .set_filter = dwmac100_set_filter,
- .flow_ctrl = dwmac100_flow_ctrl,
- .pmt = dwmac100_pmt,
- .set_umac_addr = dwmac100_set_umac_addr,
- .get_umac_addr = dwmac100_get_umac_addr,
-};
-
-struct stmmac_dma_ops dwmac100_dma_ops = {
- .init = dwmac100_dma_init,
- .dump_regs = dwmac100_dump_dma_regs,
- .dma_mode = dwmac100_dma_operation_mode,
- .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
- .enable_dma_transmission = dwmac_enable_dma_transmission,
- .enable_dma_irq = dwmac_enable_dma_irq,
- .disable_dma_irq = dwmac_disable_dma_irq,
- .start_tx = dwmac_dma_start_tx,
- .stop_tx = dwmac_dma_stop_tx,
- .start_rx = dwmac_dma_start_rx,
- .stop_rx = dwmac_dma_stop_rx,
- .dma_interrupt = dwmac_dma_interrupt,
-};
-
-struct stmmac_desc_ops dwmac100_desc_ops = {
- .tx_status = dwmac100_get_tx_frame_status,
- .rx_status = dwmac100_get_rx_frame_status,
- .get_tx_len = dwmac100_get_tx_len,
- .init_rx_desc = dwmac100_init_rx_desc,
- .init_tx_desc = dwmac100_init_tx_desc,
- .get_tx_owner = dwmac100_get_tx_owner,
- .get_rx_owner = dwmac100_get_rx_owner,
- .release_tx_desc = dwmac100_release_tx_desc,
- .prepare_tx_desc = dwmac100_prepare_tx_desc,
- .clear_tx_ic = dwmac100_clear_tx_ic,
- .close_tx_desc = dwmac100_close_tx_desc,
- .get_tx_ls = dwmac100_get_tx_ls,
- .set_tx_owner = dwmac100_set_tx_owner,
- .set_rx_owner = dwmac100_set_rx_owner,
- .get_rx_frame_len = dwmac100_get_rx_frame_len,
-};
-
-struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- pr_info("\tDWMAC100\n");
-
- mac->mac = &dwmac100_ops;
- mac->desc = &dwmac100_desc_ops;
- mac->dma = &dwmac100_dma_ops;
-
- mac->pmt = PMT_NOT_SUPPORTED;
- mac->link.port = MAC_CONTROL_PS;
- mac->link.duplex = MAC_CONTROL_F;
- mac->link.speed = 0;
- mac->mii.addr = MAC_MII_ADDR;
- mac->mii.data = MAC_MII_DATA;
-
- return mac;
-}
diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..97956cbf1cb4 100644
--- a/drivers/net/stmmac/dwmac100.h
+++ b/drivers/net/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
/*----------------------------------------------------------------------------
* MAC BLOCK defines
*---------------------------------------------------------------------------*/
@@ -114,3 +117,5 @@ enum ttc_control {
#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
+
+extern struct stmmac_dma_ops dwmac100_dma_ops;
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 62dca0e384e7..d8d0f3553770 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -172,7 +172,6 @@ enum rfd {
deac_full_minus_4 = 0x00401800,
};
#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
-#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
enum ttc_control {
DMA_CONTROL_TTC_64 = 0x00000000,
@@ -206,15 +205,4 @@ enum rtc_control {
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
-#undef DWMAC1000_DEBUG
-/* #define DWMAC1000__DEBUG */
-#undef FRAME_FILTER_DEBUG
-/* #define FRAME_FILTER_DEBUG */
-#ifdef DWMAC1000__DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
-
extern struct stmmac_dma_ops dwmac1000_dma_ops;
-extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 5bd95ebfe498..0aa89ae9b8e9 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -83,8 +83,8 @@ static void dwmac1000_set_filter(struct net_device *dev)
unsigned long ioaddr = dev->base_addr;
unsigned int value = 0;
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+ CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
if (dev->flags & IFF_PROMISC)
value = GMAC_FRAME_FILTER_PR;
@@ -95,17 +95,17 @@ static void dwmac1000_set_filter(struct net_device *dev)
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
} else if (!netdev_mc_empty(dev)) {
u32 mc_filter[2];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* Hash filter for multicast */
value = GMAC_FRAME_FILTER_HMC;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
index the contens of the hash table */
int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
/* The most significant bit determines the register to
* use (H/L) while the other 5 bits determine the bit
* within the register. */
@@ -136,7 +136,7 @@ static void dwmac1000_set_filter(struct net_device *dev)
#endif
writel(value, ioaddr + GMAC_FRAME_FILTER);
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
"HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
@@ -148,18 +148,18 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
{
unsigned int flow = 0;
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
flow |= GMAC_FLOW_CTRL_RFE;
}
if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
flow |= GMAC_FLOW_CTRL_TFE;
}
if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
}
@@ -172,10 +172,10 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
pmt |= power_down | magic_pkt_en;
} else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
pmt |= global_unicast;
}
@@ -190,16 +190,16 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
@@ -230,7 +230,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
mac->mac = &dwmac1000_ops;
- mac->desc = &dwmac1000_desc_ops;
mac->dma = &dwmac1000_dma_ops;
mac->pmt = PMT_SUPPORTED;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 39d436a2da68..a547aa99e114 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,7 +3,7 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
- This contains the functions to handle the dma and descriptors.
+ This contains the functions to handle the dma.
Copyright (C) 2007-2009 STMicroelectronics Ltd
@@ -58,29 +58,20 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
return 0;
}
-/* Transmit FIFO flush operation */
-static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
-{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
- writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
-
- do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
-}
-
static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
if (txmode == SF_DMA_MODE) {
- DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF;
/* Operating on second frame increase the performance
* especially when transmit store-and-forward is used.*/
csr6 |= DMA_CONTROL_OSF;
} else {
- DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
" (threshold = %d)\n", txmode);
csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK;
@@ -98,10 +89,10 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
}
if (rxmode == SF_DMA_MODE) {
- DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
csr6 |= DMA_CONTROL_RSF;
} else {
- DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
" (threshold = %d)\n", rxmode);
csr6 &= ~DMA_CONTROL_RSF;
csr6 &= DMA_CONTROL_TC_RX_MASK;
@@ -141,305 +132,6 @@ static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
return;
}
-static int dwmac1000_get_tx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
-{
- int ret = 0;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.etx.error_summary)) {
- DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
- if (unlikely(p->des01.etx.jabber_timeout)) {
- DBG(KERN_ERR "\tjabber_timeout error\n");
- x->tx_jabber++;
- }
-
- if (unlikely(p->des01.etx.frame_flushed)) {
- DBG(KERN_ERR "\tframe_flushed error\n");
- x->tx_frame_flushed++;
- dwmac1000_flush_tx_fifo(ioaddr);
- }
-
- if (unlikely(p->des01.etx.loss_carrier)) {
- DBG(KERN_ERR "\tloss_carrier error\n");
- x->tx_losscarrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.etx.no_carrier)) {
- DBG(KERN_ERR "\tno_carrier error\n");
- x->tx_carrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.etx.late_collision)) {
- DBG(KERN_ERR "\tlate_collision error\n");
- stats->collisions += p->des01.etx.collision_count;
- }
- if (unlikely(p->des01.etx.excessive_collisions)) {
- DBG(KERN_ERR "\texcessive_collisions\n");
- stats->collisions += p->des01.etx.collision_count;
- }
- if (unlikely(p->des01.etx.excessive_deferral)) {
- DBG(KERN_INFO "\texcessive tx_deferral\n");
- x->tx_deferred++;
- }
-
- if (unlikely(p->des01.etx.underflow_error)) {
- DBG(KERN_ERR "\tunderflow error\n");
- dwmac1000_flush_tx_fifo(ioaddr);
- x->tx_underflow++;
- }
-
- if (unlikely(p->des01.etx.ip_header_error)) {
- DBG(KERN_ERR "\tTX IP header csum error\n");
- x->tx_ip_header_error++;
- }
-
- if (unlikely(p->des01.etx.payload_error)) {
- DBG(KERN_ERR "\tAddr/Payload csum error\n");
- x->tx_payload_error++;
- dwmac1000_flush_tx_fifo(ioaddr);
- }
-
- ret = -1;
- }
-
- if (unlikely(p->des01.etx.deferred)) {
- DBG(KERN_INFO "GMAC TX status: tx deferred\n");
- x->tx_deferred++;
- }
-#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.etx.vlan_frame) {
- DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
- x->tx_vlan++;
- }
-#endif
-
- return ret;
-}
-
-static int dwmac1000_get_tx_len(struct dma_desc *p)
-{
- return p->des01.etx.buffer1_size;
-}
-
-static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
-{
- int ret = good_frame;
- u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
-
- /* bits 5 7 0 | Frame status
- * ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
- * 1 0 0 | IPv4/6 No CSUM errorS.
- * 1 0 1 | IPv4/6 CSUM PAYLOAD error
- * 1 1 0 | IPv4/6 CSUM IP HR error
- * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
- * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
- * 0 1 1 | COE bypassed.. no IPv4/6 frame
- * 0 1 0 | Reserved.
- */
- if (status == 0x0) {
- DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
- ret = good_frame;
- } else if (status == 0x4) {
- DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
- ret = good_frame;
- } else if (status == 0x5) {
- DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
- ret = csum_none;
- } else if (status == 0x6) {
- DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
- ret = csum_none;
- } else if (status == 0x7) {
- DBG(KERN_ERR
- "RX Des0 status: IPv4/6 Header and Payload Error.\n");
- ret = csum_none;
- } else if (status == 0x1) {
- DBG(KERN_ERR
- "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
- ret = discard_frame;
- } else if (status == 0x3) {
- DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
- ret = discard_frame;
- }
- return ret;
-}
-
-static int dwmac1000_get_rx_frame_status(void *data,
- struct stmmac_extra_stats *x, struct dma_desc *p)
-{
- int ret = good_frame;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.erx.error_summary)) {
- DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
- if (unlikely(p->des01.erx.descriptor_error)) {
- DBG(KERN_ERR "\tdescriptor error\n");
- x->rx_desc++;
- stats->rx_length_errors++;
- }
- if (unlikely(p->des01.erx.overflow_error)) {
- DBG(KERN_ERR "\toverflow error\n");
- x->rx_gmac_overflow++;
- }
-
- if (unlikely(p->des01.erx.ipc_csum_error))
- DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
-
- if (unlikely(p->des01.erx.late_collision)) {
- DBG(KERN_ERR "\tlate_collision error\n");
- stats->collisions++;
- stats->collisions++;
- }
- if (unlikely(p->des01.erx.receive_watchdog)) {
- DBG(KERN_ERR "\treceive_watchdog error\n");
- x->rx_watchdog++;
- }
- if (unlikely(p->des01.erx.error_gmii)) {
- DBG(KERN_ERR "\tReceive Error\n");
- x->rx_mii++;
- }
- if (unlikely(p->des01.erx.crc_error)) {
- DBG(KERN_ERR "\tCRC error\n");
- x->rx_crc++;
- stats->rx_crc_errors++;
- }
- ret = discard_frame;
- }
-
- /* After a payload csum error, the ES bit is set.
- * It doesn't match with the information reported into the databook.
- * At any rate, we need to understand if the CSUM hw computation is ok
- * and report this info to the upper layers. */
- ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
- p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
-
- if (unlikely(p->des01.erx.dribbling)) {
- DBG(KERN_ERR "GMAC RX: dribbling error\n");
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.sa_filter_fail)) {
- DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
- x->sa_rx_filter_fail++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.da_filter_fail)) {
- DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
- x->da_rx_filter_fail++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.length_error)) {
- DBG(KERN_ERR "GMAC RX: length_error error\n");
- x->rx_length++;
- ret = discard_frame;
- }
-#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.erx.vlan_tag) {
- DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
- x->rx_vlan++;
- }
-#endif
- return ret;
-}
-
-static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.erx.own = 1;
- p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
- /* To support jumbo frames */
- p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
- if (i == ring_size - 1)
- p->des01.erx.end_ring = 1;
- if (disable_rx_ic)
- p->des01.erx.disable_ic = 1;
- p++;
- }
- return;
-}
-
-static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
-{
- int i;
-
- for (i = 0; i < ring_size; i++) {
- p->des01.etx.own = 0;
- if (i == ring_size - 1)
- p->des01.etx.end_ring = 1;
- p++;
- }
-
- return;
-}
-
-static int dwmac1000_get_tx_owner(struct dma_desc *p)
-{
- return p->des01.etx.own;
-}
-
-static int dwmac1000_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.erx.own;
-}
-
-static void dwmac1000_set_tx_owner(struct dma_desc *p)
-{
- p->des01.etx.own = 1;
-}
-
-static void dwmac1000_set_rx_owner(struct dma_desc *p)
-{
- p->des01.erx.own = 1;
-}
-
-static int dwmac1000_get_tx_ls(struct dma_desc *p)
-{
- return p->des01.etx.last_segment;
-}
-
-static void dwmac1000_release_tx_desc(struct dma_desc *p)
-{
- int ter = p->des01.etx.end_ring;
-
- memset(p, 0, sizeof(struct dma_desc));
- p->des01.etx.end_ring = ter;
-
- return;
-}
-
-static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
-{
- p->des01.etx.first_segment = is_fs;
- if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
- p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
- } else {
- p->des01.etx.buffer1_size = len;
- }
- if (likely(csum_flag))
- p->des01.etx.checksum_insertion = cic_full;
-}
-
-static void dwmac1000_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.etx.interrupt = 0;
-}
-
-static void dwmac1000_close_tx_desc(struct dma_desc *p)
-{
- p->des01.etx.last_segment = 1;
- p->des01.etx.interrupt = 1;
-}
-
-static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
-{
- return p->des01.erx.frame_length;
-}
-
struct stmmac_dma_ops dwmac1000_dma_ops = {
.init = dwmac1000_dma_init,
.dump_regs = dwmac1000_dump_dma_regs,
@@ -454,21 +146,3 @@ struct stmmac_dma_ops dwmac1000_dma_ops = {
.stop_rx = dwmac_dma_stop_rx,
.dma_interrupt = dwmac_dma_interrupt,
};
-
-struct stmmac_desc_ops dwmac1000_desc_ops = {
- .tx_status = dwmac1000_get_tx_frame_status,
- .rx_status = dwmac1000_get_rx_frame_status,
- .get_tx_len = dwmac1000_get_tx_len,
- .init_rx_desc = dwmac1000_init_rx_desc,
- .init_tx_desc = dwmac1000_init_tx_desc,
- .get_tx_owner = dwmac1000_get_tx_owner,
- .get_rx_owner = dwmac1000_get_rx_owner,
- .release_tx_desc = dwmac1000_release_tx_desc,
- .prepare_tx_desc = dwmac1000_prepare_tx_desc,
- .clear_tx_ic = dwmac1000_clear_tx_ic,
- .close_tx_desc = dwmac1000_close_tx_desc,
- .get_tx_ls = dwmac1000_get_tx_ls,
- .set_tx_owner = dwmac1000_set_tx_owner,
- .set_rx_owner = dwmac1000_set_rx_owner,
- .get_rx_frame_len = dwmac1000_get_rx_frame_len,
-};
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
new file mode 100644
index 000000000000..fab14a4cb14c
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -0,0 +1,201 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include "dwmac100.h"
+
+static void dwmac100_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+ return;
+}
+
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
+{
+ pr_info("\t----------------------------------------------\n"
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+ readl(ioaddr + MAC_CONTROL));
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+ readl(ioaddr + MAC_ADDR_HIGH));
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+ readl(ioaddr + MAC_ADDR_LOW));
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+ readl(ioaddr + MAC_VLAN1));
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+ readl(ioaddr + MAC_VLAN2));
+ pr_info("\n\tMAC management counter registers\n");
+ pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+ return;
+}
+
+static void dwmac100_irq_status(unsigned long ioaddr)
+{
+ return;
+}
+
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Perfect filter mode for physical address and Hash
+ filter for multicast */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table */
+ int bit_nr =
+ ether_crc(ETH_ALEN, ha->addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+ CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
+ "HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+ return;
+}
+
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+
+ return;
+}
+
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ return;
+}
+
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->dma = &dwmac100_dma_ops;
+
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
new file mode 100644
index 000000000000..96d098d68ad6
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -0,0 +1,138 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "dwmac100.h"
+#include "dwmac_dma.h"
+
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Store and Forward capability is not used at all..
+ * The transmit threshold can be programmed by
+ * setting the TTC bits in the DMA control register.*/
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
+{
+ int i;
+
+ CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
+ for (i = 0; i < 9; i++)
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
+ CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+ return;
+}
+
+/* DMA controller has two counters to track the number of
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+ return;
+}
+
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index de848d9f6060..7b815a1b7b8c 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -95,6 +95,7 @@
#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
extern void dwmac_enable_dma_irq(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d4adb1eaa447..0a504adb7eb3 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -227,6 +227,13 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
return ret;
}
+void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
unsigned int high, unsigned int low)
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
new file mode 100644
index 000000000000..eb5684a1f713
--- /dev/null
+++ b/drivers/net/stmmac/enh_desc.c
@@ -0,0 +1,342 @@
+/*******************************************************************************
+ This contains the functions to handle the enhanced descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.etx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+ if (unlikely(p->des01.etx.jabber_timeout)) {
+ CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
+ x->tx_jabber++;
+ }
+
+ if (unlikely(p->des01.etx.frame_flushed)) {
+ CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
+ x->tx_frame_flushed++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(p->des01.etx.loss_carrier)) {
+ CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.no_carrier)) {
+ CHIP_DBG(KERN_ERR "\tno_carrier error\n");
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_collisions)) {
+ CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_deferral)) {
+ CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
+ x->tx_deferred++;
+ }
+
+ if (unlikely(p->des01.etx.underflow_error)) {
+ CHIP_DBG(KERN_ERR "\tunderflow error\n");
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(p->des01.etx.ip_header_error)) {
+ CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
+ x->tx_ip_header_error++;
+ }
+
+ if (unlikely(p->des01.etx.payload_error)) {
+ CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
+ x->tx_payload_error++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ ret = -1;
+ }
+
+ if (unlikely(p->des01.etx.deferred)) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+ x->tx_deferred++;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.etx.vlan_frame) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static int enh_desc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.etx.buffer1_size;
+}
+
+static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+ ret = good_frame;
+ } else if (status == 0x4) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+ ret = good_frame;
+ } else if (status == 0x5) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x6) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+ ret = csum_none;
+ } else if (status == 0x7) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x1) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+ ret = discard_frame;
+ } else if (status == 0x3) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+ ret = discard_frame;
+ }
+ return ret;
+}
+
+static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.erx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
+ p->des01.erx);
+ if (unlikely(p->des01.erx.descriptor_error)) {
+ CHIP_DBG(KERN_ERR "\tdescriptor error\n");
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(p->des01.erx.overflow_error)) {
+ CHIP_DBG(KERN_ERR "\toverflow error\n");
+ x->rx_gmac_overflow++;
+ }
+
+ if (unlikely(p->des01.erx.ipc_csum_error))
+ CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(p->des01.erx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.erx.receive_watchdog)) {
+ CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
+ x->rx_watchdog++;
+ }
+ if (unlikely(p->des01.erx.error_gmii)) {
+ CHIP_DBG(KERN_ERR "\tReceive Error\n");
+ x->rx_mii++;
+ }
+ if (unlikely(p->des01.erx.crc_error)) {
+ CHIP_DBG(KERN_ERR "\tCRC error\n");
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
+ p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+
+ if (unlikely(p->des01.erx.dribbling)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.da_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.length_error)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
+ x->rx_length++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.erx.vlan_tag) {
+ CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+ x->rx_vlan++;
+ }
+#endif
+ return ret;
+}
+
+static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ /* To support jumbo frames */
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.erx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
+ p++;
+ }
+ return;
+}
+
+static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ p->des01.etx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.etx.end_ring = 1;
+ p++;
+ }
+
+ return;
+}
+
+static int enh_desc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.etx.own;
+}
+
+static int enh_desc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.erx.own;
+}
+
+static void enh_desc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.etx.own = 1;
+}
+
+static void enh_desc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.erx.own = 1;
+}
+
+static int enh_desc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.etx.last_segment;
+}
+
+static void enh_desc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.etx.end_ring;
+
+ memset(p, 0, sizeof(struct dma_desc));
+ p->des01.etx.end_ring = ter;
+
+ return;
+}
+
+static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.etx.first_segment = is_fs;
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else {
+ p->des01.etx.buffer1_size = len;
+ }
+ if (likely(csum_flag))
+ p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void enh_desc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.etx.interrupt = 0;
+}
+
+static void enh_desc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.etx.last_segment = 1;
+ p->des01.etx.interrupt = 1;
+}
+
+static int enh_desc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.erx.frame_length;
+}
+
+struct stmmac_desc_ops enh_desc_ops = {
+ .tx_status = enh_desc_get_tx_status,
+ .rx_status = enh_desc_get_rx_status,
+ .get_tx_len = enh_desc_get_tx_len,
+ .init_rx_desc = enh_desc_init_rx_desc,
+ .init_tx_desc = enh_desc_init_tx_desc,
+ .get_tx_owner = enh_desc_get_tx_owner,
+ .get_rx_owner = enh_desc_get_rx_owner,
+ .release_tx_desc = enh_desc_release_tx_desc,
+ .prepare_tx_desc = enh_desc_prepare_tx_desc,
+ .clear_tx_ic = enh_desc_clear_tx_ic,
+ .close_tx_desc = enh_desc_close_tx_desc,
+ .get_tx_ls = enh_desc_get_tx_ls,
+ .set_tx_owner = enh_desc_set_tx_owner,
+ .set_rx_owner = enh_desc_set_rx_owner,
+ .get_rx_frame_len = enh_desc_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
new file mode 100644
index 000000000000..ecfcc001a04a
--- /dev/null
+++ b/drivers/net/stmmac/norm_desc.c
@@ -0,0 +1,240 @@
+/*******************************************************************************
+ This contains the functions to handle the normal descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.tx.error_summary)) {
+ if (unlikely(p->des01.tx.underflow_error)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(p->des01.tx.no_carrier)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.tx.loss_carrier)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((p->des01.tx.excessive_deferral) ||
+ (p->des01.tx.excessive_collisions) ||
+ (p->des01.tx.late_collision)))
+ stats->collisions += p->des01.tx.collision_count;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.heartbeat_fail)) {
+ x->tx_heartbeat++;
+ stats->tx_heartbeat_errors++;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.deferred))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int ndesc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns csum_none becasue the device
+ * is not able to compute the csum in HW. */
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = csum_none;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ pr_warning("ndesc Error: Oversized Ethernet "
+ "frame spanned multiple buffers\n");
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(p->des01.rx.error_summary)) {
+ if (unlikely(p->des01.rx.descriptor_error))
+ x->rx_desc++;
+ if (unlikely(p->des01.rx.partial_frame_error))
+ x->rx_partial++;
+ if (unlikely(p->des01.rx.run_frame))
+ x->rx_runt++;
+ if (unlikely(p->des01.rx.frame_too_long))
+ x->rx_toolong++;
+ if (unlikely(p->des01.rx.collision)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.rx.crc_error)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.dribbling))
+ ret = discard_frame;
+
+ if (unlikely(p->des01.rx.length_error)) {
+ x->rx_length++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.mii_error)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+ if (p->des01.rx.multicast_frame) {
+ x->rx_multicast++;
+ stats->multicast++;
+ }
+ return ret;
+}
+
+static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.rx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
+ p++;
+ }
+ return;
+}
+
+static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.tx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.tx.end_ring = 1;
+ p++;
+ }
+ return;
+}
+
+static int ndesc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.tx.own;
+}
+
+static int ndesc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.rx.own;
+}
+
+static void ndesc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.tx.own = 1;
+}
+
+static void ndesc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.rx.own = 1;
+}
+
+static int ndesc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.tx.last_segment;
+}
+
+static void ndesc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.tx.end_ring;
+
+ /* clean field used within the xmit */
+ p->des01.tx.first_segment = 0;
+ p->des01.tx.last_segment = 0;
+ p->des01.tx.buffer1_size = 0;
+
+ /* clean status reported */
+ p->des01.tx.error_summary = 0;
+ p->des01.tx.underflow_error = 0;
+ p->des01.tx.no_carrier = 0;
+ p->des01.tx.loss_carrier = 0;
+ p->des01.tx.excessive_deferral = 0;
+ p->des01.tx.excessive_collisions = 0;
+ p->des01.tx.late_collision = 0;
+ p->des01.tx.heartbeat_fail = 0;
+ p->des01.tx.deferred = 0;
+
+ /* set termination field */
+ p->des01.tx.end_ring = ter;
+
+ return;
+}
+
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.tx.first_segment = is_fs;
+ p->des01.tx.buffer1_size = len;
+}
+
+static void ndesc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.tx.interrupt = 0;
+}
+
+static void ndesc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.tx.last_segment = 1;
+ p->des01.tx.interrupt = 1;
+}
+
+static int ndesc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.rx.frame_length;
+}
+
+struct stmmac_desc_ops ndesc_ops = {
+ .tx_status = ndesc_get_tx_status,
+ .rx_status = ndesc_get_rx_status,
+ .get_tx_len = ndesc_get_tx_len,
+ .init_rx_desc = ndesc_init_rx_desc,
+ .init_tx_desc = ndesc_init_tx_desc,
+ .get_tx_owner = ndesc_get_tx_owner,
+ .get_rx_owner = ndesc_get_rx_owner,
+ .release_tx_desc = ndesc_release_tx_desc,
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
+ .clear_tx_ic = ndesc_clear_tx_ic,
+ .close_tx_desc = ndesc_close_tx_desc,
+ .get_tx_ls = ndesc_get_tx_ls,
+ .set_tx_owner = ndesc_set_tx_owner,
+ .set_rx_owner = ndesc_set_rx_owner,
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ba35e6943cf4..ebebc644b1b8 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,14 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Jan_2010"
+#define DRV_MODULE_VERSION "Apr_2010"
#include <linux/stmmac.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define STMMAC_VLAN_TAG_USED
-#include <linux/if_vlan.h>
-#endif
-
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -93,6 +88,7 @@ struct stmmac_priv {
#ifdef STMMAC_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
+ int enh_desc;
};
#ifdef CONFIG_STM_DRIVERS
@@ -120,3 +116,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev)
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+extern struct stmmac_desc_ops enh_desc_ops;
+extern struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 4111a85ec80e..7ac6ddea989e 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -837,7 +837,7 @@ static int stmmac_open(struct net_device *dev)
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
- pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
+ pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
return -ENOMEM;
}
priv->tm->freq = tmrate;
@@ -1280,7 +1280,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
- priv->dev->last_rx = jiffies;
}
entry = next_entry;
p = p_next; /* use prefetched values */
@@ -1587,6 +1586,12 @@ static int stmmac_mac_device_setup(struct net_device *dev)
else
device = dwmac100_setup(ioaddr);
+ if (priv->enh_desc) {
+ device->desc = &enh_desc_ops;
+ pr_info("\tEnhanced descriptor structure\n");
+ } else
+ device->desc = &ndesc_ops;
+
if (!device)
return -ENOMEM;
@@ -1727,6 +1732,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
+ priv->enh_desc = plat_dat->enh_desc;
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 8b28c89a9a77..151312342243 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -412,7 +412,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,9 +536,9 @@ static int init586(struct net_device *dev)
mc_cmd->mc_cnt = swab16(num_addrs * 6);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy((char *) mc_cmd->mc_list[i++],
- dmi->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd_cuc = CUC_START;
@@ -985,7 +985,7 @@ static void sun3_82586_timeout(struct net_device *dev)
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
@@ -998,7 +998,7 @@ static void sun3_82586_timeout(struct net_device *dev)
sun3_82586_close(dev);
sun3_82586_open(dev);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
@@ -1062,7 +1062,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
}
sun3_attn586();
- dev->trans_start = jiffies;
if(!i)
dev_kfree_skb(skb);
WAIT_4_SCB_CMD();
@@ -1082,7 +1081,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
@@ -1097,7 +1095,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 1694ca5bfb41..358c22f9acbe 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -523,8 +523,8 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Transmitter timeout, serious problems. */
if (netif_queue_stopped(dev)) {
- int tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 20)
+ int tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/5)
return NETDEV_TX_BUSY;
DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
@@ -559,7 +559,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
netif_start_queue(dev);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -637,8 +636,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
AREG = CSR0;
DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
dev->name, DREG ));
- dev->trans_start = jiffies;
- dev_kfree_skb( skb );
+ dev_kfree_skb(skb);
lp->lock = 0;
if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index ed7865a0b5b2..34446b6d9a3c 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -982,8 +982,6 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -999,7 +997,7 @@ static void bigmac_set_multicast(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
void __iomem *bregs = bp->bregs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i;
u32 tmp, crc;
@@ -1028,8 +1026,8 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 8249a394a4e1..16803251a999 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -972,7 +972,7 @@ static void tx_timeout(struct net_device *dev)
dev->if_port = 0;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
netif_wake_queue(dev);
@@ -1084,7 +1084,6 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
} else {
netif_stop_queue (dev);
}
- dev->trans_start = jiffies;
if (netif_msg_tx_queued(np)) {
printk (KERN_DEBUG
"%s: Transmit frame #%d queued in slot %d.\n",
@@ -1522,13 +1521,13 @@ static void set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
if (crc & 0x80000000) index |= 1 << bit;
mc_filter[index/16] |= (1 << (index % 16));
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index e6880f1c4e8c..5bc786f73e4d 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1136,7 +1136,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irqrestore(&gp->tx_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
return NETDEV_TX_OK;
}
@@ -1846,12 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp)
} else {
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, gp->dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, gp->dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b17dbb11bd67..377c0b51e559 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1523,13 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp)
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, hp->dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, hp->dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -2341,8 +2341,6 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
spin_unlock_irq(&hp->happy_lock);
- dev->trans_start = jiffies;
-
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
}
@@ -2362,7 +2360,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -2380,8 +2378,8 @@ static void happy_meal_set_multicast(struct net_device *dev)
u16 hash_table[4];
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -3004,7 +3002,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
dev->base_addr = (long) pdev;
hp = netdev_priv(dev);
- memset(hp, 0, sizeof(*hp));
hp->happy_dev = pdev;
hp->dma_dev = &pdev->dev;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 0c21653ff9f9..f88a60fa25f8 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1003,7 +1003,7 @@ static int lance_reset(struct net_device *dev)
}
lp->init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
@@ -1160,7 +1160,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&lp->lock);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1170,7 +1169,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
u32 val;
@@ -1195,8 +1194,8 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index be637dce944c..a7542d25c845 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -602,7 +602,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
qep->tx_new = NEXT_TX(entry);
/* Get it going. */
- dev->trans_start = jiffies;
sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
dev->stats.tx_packets++;
@@ -627,7 +626,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u8 new_mconfig = qep->mconfig;
char *addrs;
int i;
@@ -651,8 +650,8 @@ static void qe_set_multicast(struct net_device *dev)
u8 *hbytes = (unsigned char *) &hash_table[0];
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 6b1b7cea7f6b..d281a7b34701 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -717,7 +717,6 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
out_dropped_unlock:
@@ -763,12 +762,12 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
static void __update_mc_list(struct vnet *vp, struct net_device *dev)
{
- struct dev_addr_list *p;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(p, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
struct vnet_mcast_entry *m;
- m = __vnet_mc_find(vp, p->dmi_addr);
+ m = __vnet_mc_find(vp, ha->addr);
if (m) {
m->hit = 1;
continue;
@@ -778,7 +777,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
continue;
- memcpy(m->addr, p->dmi_addr, ETH_ALEN);
+ memcpy(m->addr, ha->addr, ETH_ALEN);
m->hit = 1;
m->next = vp->mcast_list;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 49bd84c0d583..be08b75dbc15 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1357,8 +1357,6 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
}
lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
- dev->trans_start = jiffies;
-
/* If we just used up the very last entry in the
* TX ring on this device, tell the queueing
* layer to send no more.
@@ -1954,16 +1952,16 @@ tc35815_set_multicast_list(struct net_device *dev)
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
i = 0;
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* entry 0,1 is reserved. */
- tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
+ tc35815_set_cam_entry(dev, i + 2, ha->addr);
ena_bits |= CAM_Ena_Bit(i + 2);
i++;
}
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index f5493092521a..20ab16192325 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
} else if (!netdev_mc_empty(ndev)) {
u8 hash;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 reg, val;
/* set IMF to deny all multicast frames */
@@ -825,10 +825,10 @@ static void bdx_setmulti(struct net_device *ndev)
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */
/* accept the rest of addresses throu IMF */
- netdev_for_each_mc_addr(mclist, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
hash = 0;
for (i = 0; i < ETH_ALEN; i++)
- hash ^= mclist->dmi_addr[i];
+ hash ^= ha->addr[i];
reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
val = READ_REG(priv, reg);
val |= (1 << (hash % 32));
@@ -1303,7 +1303,6 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
priv->net_stats.rx_bytes += len;
skb_put(skb, len);
- skb->dev = priv->ndev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, priv->ndev);
@@ -1509,7 +1508,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
- db->wptr->len = skb->len - skb->data_len;
+ db->wptr->len = skb_headlen(skb);
db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
db->wptr->len, PCI_DMA_TODEVICE);
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
@@ -2034,7 +2033,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/************** priv ****************/
priv = nic->priv[port] = netdev_priv(ndev);
- memset(priv, 0, sizeof(struct bdx_priv));
priv->pBdxRegs = nic->regs + port * 0x8000;
priv->port = port;
priv->pdev = pdev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ecc41cffb470..573054ae7b58 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
-#define DRV_MODULE_VERSION "3.108"
-#define DRV_MODULE_RELDATE "February 17, 2010"
+#define DRV_MODULE_VERSION "3.110"
+#define DRV_MODULE_RELDATE "April 9, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -101,7 +101,7 @@
#define TG3_DEF_RX_RING_PENDING 200
#define TG3_RX_JUMBO_RING_SIZE 256
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
-#define TG3_RSS_INDIR_TBL_SIZE 128
+#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -126,6 +126,9 @@
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
+#define TG3_RX_DMA_ALIGN 16
+#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
+
#define TG3_DMA_BYTE_ENAB 64
#define TG3_RX_STD_DMA_SZ 1536
@@ -142,6 +145,26 @@
#define TG3_RX_JMB_BUFF_RING_SIZE \
(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
+#define TG3_RSS_MIN_NUM_MSIX_VECS 2
+
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode. The driver
+ * works around this bug by double copying the packet. This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient. For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path. Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD 256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
+#else
+ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
+#endif
+
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
@@ -152,6 +175,8 @@
#define TG3_NUM_TEST 6
+#define TG3_FW_UPDATE_TIMEOUT_SEC 5
+
#define FIRMWARE_TG3 "tigon/tg3.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -167,8 +192,6 @@ MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
-#define TG3_RSS_MIN_NUM_MSIX_VECS 2
-
static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
@@ -360,7 +383,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off));
+ return readl(tp->regs + off);
}
static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
@@ -370,7 +393,7 @@ static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->aperegs + off));
+ return readl(tp->aperegs + off);
}
static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
@@ -488,7 +511,7 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off + GRCMBOX_BASE));
+ return readl(tp->regs + off + GRCMBOX_BASE);
}
static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
@@ -496,16 +519,16 @@ static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
writel(val, tp->regs + off + GRCMBOX_BASE);
}
-#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
-#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
-#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
-#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
+#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
-#define tw32(reg,val) tp->write32(tp, reg, val)
-#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
-#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
-#define tr32(reg) tp->read32(tp, reg)
+#define tw32(reg, val) tp->write32(tp, reg, val)
+#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
+#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
+#define tr32(reg) tp->read32(tp, reg)
static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
@@ -579,11 +602,11 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return 0;
switch (locknum) {
- case TG3_APE_LOCK_GRC:
- case TG3_APE_LOCK_MEM:
- break;
- default:
- return -EINVAL;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return -EINVAL;
}
off = 4 * locknum;
@@ -617,11 +640,11 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
switch (locknum) {
- case TG3_APE_LOCK_GRC:
- case TG3_APE_LOCK_MEM:
- break;
- default:
- return;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return;
}
off = 4 * locknum;
@@ -651,6 +674,7 @@ static void tg3_enable_ints(struct tg3 *tp)
tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
+
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -1098,7 +1122,7 @@ static int tg3_mdio_init(struct tg3 *tp)
i = mdiobus_register(tp->mdio_bus);
if (i) {
- netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
+ dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
@@ -1106,7 +1130,7 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
- netdev_warn(tp->dev, "No PHY devices\n");
+ dev_warn(&tp->pdev->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
@@ -1437,7 +1461,7 @@ static void tg3_adjust_link(struct net_device *dev)
phydev->speed != tp->link_config.active_speed ||
phydev->duplex != tp->link_config.active_duplex ||
oldflowctrl != tp->link_config.active_flowctrl)
- linkmesg = 1;
+ linkmesg = 1;
tp->link_config.active_speed = phydev->speed;
tp->link_config.active_duplex = phydev->duplex;
@@ -1464,7 +1488,7 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
phydev->dev_flags, phydev->interface);
if (IS_ERR(phydev)) {
- netdev_err(tp->dev, "Could not attach to PHY\n");
+ dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -1855,8 +1879,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
/* Set Extended packet length bit for jumbo frames */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
- }
- else {
+ } else {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
}
@@ -1974,8 +1997,7 @@ out:
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
- }
- else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+ } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
@@ -2007,8 +2029,8 @@ out:
u32 phy_reg;
if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3425,7 +3447,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
ap->rxconfig = rx_cfg_reg;
ret = ANEG_OK;
- switch(ap->state) {
+ switch (ap->state) {
case ANEG_STATE_UNKNOWN:
if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
ap->state = ANEG_STATE_AN_ENABLE;
@@ -3463,11 +3485,10 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
/* fallthru */
case ANEG_STATE_RESTART:
delta = ap->cur_time - ap->link_time;
- if (delta > ANEG_STATE_SETTLE_TIME) {
+ if (delta > ANEG_STATE_SETTLE_TIME)
ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
- } else {
+ else
ret = ANEG_TIMER_ENAB;
- }
break;
case ANEG_STATE_DISABLE_LINK_OK:
@@ -3491,9 +3512,8 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
break;
case ANEG_STATE_ABILITY_DETECT:
- if (ap->ability_match != 0 && ap->rxconfig != 0) {
+ if (ap->ability_match != 0 && ap->rxconfig != 0)
ap->state = ANEG_STATE_ACK_DETECT_INIT;
- }
break;
case ANEG_STATE_ACK_DETECT_INIT:
@@ -4171,9 +4191,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
- }
- else
+ } else {
current_link_up = 0;
+ }
}
}
@@ -4211,6 +4231,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->serdes_counter--;
return;
}
+
if (!netif_carrier_ok(tp->dev) &&
(tp->link_config.autoneg == AUTONEG_ENABLE)) {
u32 bmcr;
@@ -4240,10 +4261,9 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
}
}
- }
- else if (netif_carrier_ok(tp->dev) &&
- (tp->link_config.autoneg == AUTONEG_ENABLE) &&
- (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+ } else if (netif_carrier_ok(tp->dev) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+ (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
u32 phy2;
/* Select expansion interrupt status register */
@@ -4266,13 +4286,12 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
{
int err;
- if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
err = tg3_setup_fiber_phy(tp, force_reset);
- } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+ else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
err = tg3_setup_fiber_mii_phy(tp, force_reset);
- } else {
+ else
err = tg3_setup_copper_phy(tp, force_reset);
- }
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
u32 val, scale;
@@ -4335,8 +4354,11 @@ static void tg3_tx_recover(struct tg3 *tp)
BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
- netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
- "Please report the problem to the driver maintainer and include system chipset information.\n");
+ netdev_warn(tp->dev,
+ "The system may be re-ordering memory-mapped I/O "
+ "cycles to the network device, attempting to recover. "
+ "Please report the problem to the driver maintainer "
+ "and include system chipset information.\n");
spin_lock(&tp->lock);
tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4378,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
pci_unmap_single(tp->pdev,
- pci_unmap_addr(ri, mapping),
+ dma_unmap_addr(ri, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -4392,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
tx_bug = 1;
pci_unmap_page(tp->pdev,
- pci_unmap_addr(ri, mapping),
+ dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
sw_idx = NEXT_TX(sw_idx);
@@ -4430,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
if (!ri->skb)
return;
- pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
+ pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(ri->skb);
ri->skb = NULL;
@@ -4496,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
}
map->skb = skb;
- pci_unmap_addr_set(map, mapping, mapping);
+ dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
@@ -4516,8 +4538,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
- int dest_idx;
struct tg3_rx_prodring_set *spr = &tp->prodring[0];
+ int dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
@@ -4541,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
}
dest_map->skb = src_map->skb;
- pci_unmap_addr_set(dest_map, mapping,
- pci_unmap_addr(src_map, mapping));
+ dma_unmap_addr_set(dest_map, mapping,
+ dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
dest_desc->addr_lo = src_desc->addr_lo;
@@ -4605,18 +4627,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ bool hw_vlan __maybe_unused = false;
+ u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->prodring[0].rx_std_buffers[desc_idx];
- dma_addr = pci_unmap_addr(ri, mapping);
+ dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
- dma_addr = pci_unmap_addr(ri, mapping);
+ dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &jmb_prod_idx;
} else
@@ -4638,12 +4662,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
- if (len > RX_COPY_THRESHOLD &&
- tp->rx_offset == NET_IP_ALIGN) {
- /* rx_offset will likely not equal NET_IP_ALIGN
- * if this is a 5701 card running in PCI-X mode
- * [see tg3_get_invariants()]
- */
+ if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
@@ -4668,12 +4687,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev,
- len + TG3_RAW_IP_ALIGN);
+ copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+ TG3_RAW_IP_ALIGN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4699,12 +4718,29 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ if (desc->type_flags & RXD_FLAG_VLAN &&
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
+ vtag = desc->err_vlan & RXD_VLAN_MASK;
#if TG3_VLAN_TAG_USED
- if (tp->vlgrp != NULL &&
- desc->type_flags & RXD_FLAG_VLAN) {
- vlan_gro_receive(&tnapi->napi, tp->vlgrp,
- desc->err_vlan & RXD_VLAN_MASK, skb);
- } else
+ if (tp->vlgrp)
+ hw_vlan = true;
+ else
+#endif
+ {
+ struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+ __skb_push(skb, VLAN_HLEN);
+
+ memmove(ve, skb->data + VLAN_HLEN,
+ ETH_ALEN * 2);
+ ve->h_vlan_proto = htons(ETH_P_8021Q);
+ ve->h_vlan_TCI = htons(vtag);
+ }
+ }
+
+#if TG3_VLAN_TAG_USED
+ if (hw_vlan)
+ vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
+ else
#endif
napi_gro_receive(&tnapi->napi, skb);
@@ -4978,7 +5014,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
if (unlikely(work_done >= budget))
break;
- /* tp->last_tag is used in tg3_restart_ints() below
+ /* tp->last_tag is used in tg3_int_reenable() below
* to tell the hw how much work has been processed,
* so we must read it before checking for more work.
*/
@@ -4987,8 +5023,8 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
rmb();
/* check for RX/TX work to do */
- if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
- *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
+ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+ *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
napi_complete(napi);
/* Reenable interrupts. */
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -5260,7 +5296,8 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
err = tg3_init_hw(tp, reset_phy);
if (err) {
- netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
+ netdev_err(tp->dev,
+ "Failed to re-initialize device, aborting\n");
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
@@ -5437,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
len = skb_shinfo(skb)->frags[i-1].size;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
len, PCI_DMA_TODEVICE);
if (i == 0) {
tnapi->tx_buffers[entry].skb = new_skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
new_addr);
} else {
tnapi->tx_buffers[entry].skb = NULL;
@@ -5492,7 +5529,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
struct netdev_queue *txq;
unsigned int i, last;
-
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5508,7 +5544,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5552,9 +5589,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
tcp_hdr(skb)->check = 0;
- }
- else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ }
+
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
@@ -5571,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
}
tnapi->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
!mss && skb->len > ETH_DATA_LEN)
@@ -5597,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
goto dma_error;
tnapi->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
tg3_set_txd(tnapi, entry, mapping, len,
@@ -5627,7 +5665,7 @@ dma_error:
entry = tnapi->tx_prod;
tnapi->tx_buffers[entry].skb = NULL;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+ dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
@@ -5635,7 +5673,7 @@ dma_error:
entry = NEXT_TX(entry);
pci_unmap_page(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
frag->size, PCI_DMA_TODEVICE);
}
@@ -5695,7 +5733,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
struct netdev_queue *txq;
unsigned int i, last;
-
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5711,7 +5748,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5737,7 +5775,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
hdr_len = ip_tcp_len + tcp_opt_len;
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
- return (tg3_tso_bug(tp, skb));
+ return tg3_tso_bug(tp, skb);
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
@@ -5797,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
}
tnapi->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
would_hit_hwbug = 0;
@@ -5833,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
len, PCI_DMA_TODEVICE);
tnapi->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
if (pci_dma_mapping_error(tp->pdev, mapping))
goto dma_error;
@@ -5898,7 +5936,7 @@ dma_error:
entry = tnapi->tx_prod;
tnapi->tx_buffers[entry].skb = NULL;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+ dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
@@ -5906,7 +5944,7 @@ dma_error:
entry = NEXT_TX(entry);
pci_unmap_page(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
frag->size, PCI_DMA_TODEVICE);
}
@@ -5924,9 +5962,9 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
ethtool_op_set_tso(dev, 0);
- }
- else
+ } else {
tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
+ }
} else {
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
@@ -6007,7 +6045,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
}
-/* Initialize tx/rx rings for packet processing.
+/* Initialize rx rings for packet processing.
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
@@ -6058,8 +6096,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
- netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
- i, tp->rx_pending);
+ netdev_warn(tp->dev,
+ "Using a smaller RX standard ring. Only "
+ "%d out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_pending);
if (i == 0)
goto initfail;
tp->rx_pending = i;
@@ -6088,8 +6128,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
- netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
- i, tp->rx_jumbo_pending);
+ netdev_warn(tp->dev,
+ "Using a smaller RX jumbo ring. Only %d "
+ "out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_jumbo_pending);
if (i == 0)
goto initfail;
tp->rx_jumbo_pending = i;
@@ -6187,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp)
}
pci_unmap_single(tp->pdev,
- pci_unmap_addr(txp, mapping),
+ dma_unmap_addr(txp, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
txp->skb = NULL;
@@ -6197,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp)
for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
pci_unmap_page(tp->pdev,
- pci_unmap_addr(txp, mapping),
+ dma_unmap_addr(txp, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
i++;
@@ -6433,8 +6475,9 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
if (i == MAX_WAIT_CNT && !silent) {
- pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
- ofs, enable_bit);
+ dev_err(&tp->pdev->dev,
+ "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
+ ofs, enable_bit);
return -ENODEV;
}
@@ -6480,8 +6523,9 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
break;
}
if (i >= MAX_WAIT_CNT) {
- netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
- __func__, tr32(MAC_TX_MODE));
+ dev_err(&tp->pdev->dev,
+ "%s timed out, TX_MODE_ENABLE will not clear "
+ "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
err |= -ENODEV;
}
@@ -6551,35 +6595,35 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
return;
switch (kind) {
- case RESET_KIND_INIT:
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
- APE_HOST_SEG_SIG_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
- APE_HOST_SEG_LEN_MAGIC);
- apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
- tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
- tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
- APE_HOST_DRIVER_ID_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
- APE_HOST_BEHAV_NO_PHYLOCK);
-
- event = APE_EVENT_STATUS_STATE_START;
- break;
- case RESET_KIND_SHUTDOWN:
- /* With the interface we are currently using,
- * APE does not track driver state. Wiping
- * out the HOST SEGMENT SIGNATURE forces
- * the APE to assume OS absent status.
- */
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+ case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+ APE_HOST_SEG_SIG_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+ APE_HOST_SEG_LEN_MAGIC);
+ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+ APE_HOST_DRIVER_ID_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+ APE_HOST_BEHAV_NO_PHYLOCK);
+
+ event = APE_EVENT_STATUS_STATE_START;
+ break;
+ case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
- event = APE_EVENT_STATUS_STATE_UNLOAD;
- break;
- case RESET_KIND_SUSPEND:
- event = APE_EVENT_STATUS_STATE_SUSPEND;
- break;
- default:
- return;
+ event = APE_EVENT_STATUS_STATE_UNLOAD;
+ break;
+ case RESET_KIND_SUSPEND:
+ event = APE_EVENT_STATUS_STATE_SUSPEND;
+ break;
+ default:
+ return;
}
event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
@@ -7156,7 +7200,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
if (cpu_base == TX_CPU_BASE &&
(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
+ netdev_err(tp->dev,
+ "%s: Trying to load TX cpu firmware which is 5705\n",
__func__);
return -EINVAL;
}
@@ -7236,7 +7281,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
+ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+ "should be %08x\n", __func__,
tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
return -ENODEV;
}
@@ -7300,7 +7346,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
+ netdev_err(tp->dev,
+ "%s fails to set CPU PC, is %08x should be %08x\n",
__func__, tr32(cpu_base + CPU_PC), info.fw_base);
return -ENODEV;
}
@@ -7568,9 +7615,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
- if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
+ if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
tg3_abort_hw(tp, 1);
- }
if (reset_phy)
tg3_phy_reset(tp);
@@ -7631,6 +7677,25 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+ tw32(GRC_MODE, grc_mode);
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7679,6 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7723,8 +7790,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
- }
- else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
+ } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
int fw_len;
fw_len = tp->fw_len;
@@ -7839,9 +7905,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
- (RX_STD_MAX_SIZE << 2);
+ (TG3_RX_STD_DMA_SZ << 2);
else
- val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
+ val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
} else
val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
@@ -8476,8 +8542,8 @@ static void tg3_timer(unsigned long __opaque)
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
FWCMD_NICDRV_ALIVE3);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
- /* 5 seconds timeout */
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
+ TG3_FW_UPDATE_TIMEOUT_SEC);
tg3_generate_fw_event(tp);
}
@@ -8625,8 +8691,9 @@ static int tg3_test_msi(struct tg3 *tp)
return err;
/* MSI test failed, go back to INTx mode */
- netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
- "Please report this failure to the PCI maintainer and include system chipset information\n");
+ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
+ "to INTx mode. Please report this failure to the PCI "
+ "maintainer and include system chipset information\n");
free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
@@ -8739,7 +8806,8 @@ static void tg3_ints_init(struct tg3 *tp)
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
- netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
+ netdev_warn(tp->dev,
+ "MSI without TAGGED_STATUS? Not using MSI\n");
goto defcfg;
}
@@ -8914,236 +8982,6 @@ err_out1:
return err;
}
-#if 0
-/*static*/ void tg3_dump_state(struct tg3 *tp)
-{
- u32 val32, val32_2, val32_3, val32_4, val32_5;
- u16 val16;
- int i;
- struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
-
- pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
- pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
- printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
- val16, val32);
-
- /* MAC block */
- printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
- tr32(MAC_MODE), tr32(MAC_STATUS));
- printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
- tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
- printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
- tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
- printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
- tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
-
- /* Send data initiator control block */
- printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
- tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
- printk(" SNDDATAI_STATSCTRL[%08x]\n",
- tr32(SNDDATAI_STATSCTRL));
-
- /* Send data completion control block */
- printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
-
- /* Send BD ring selector block */
- printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
- tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
-
- /* Send BD initiator control block */
- printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
- tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
-
- /* Send BD completion control block */
- printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
-
- /* Receive list placement control block */
- printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
- tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
- printk(" RCVLPC_STATSCTRL[%08x]\n",
- tr32(RCVLPC_STATSCTRL));
-
- /* Receive data and receive BD initiator control block */
- printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
- tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
-
- /* Receive data completion control block */
- printk("DEBUG: RCVDCC_MODE[%08x]\n",
- tr32(RCVDCC_MODE));
-
- /* Receive BD initiator control block */
- printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
- tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
-
- /* Receive BD completion control block */
- printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
- tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
-
- /* Receive list selector control block */
- printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
- tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
-
- /* Mbuf cluster free block */
- printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
- tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
-
- /* Host coalescing control block */
- printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
- tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
- printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
- tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
- tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
- printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
- tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
- tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
- printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
- tr32(HOSTCC_STATS_BLK_NIC_ADDR));
- printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
- tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
-
- /* Memory arbiter control block */
- printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
- tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
-
- /* Buffer manager control block */
- printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
- tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
- printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
- tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
- printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
- "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
- tr32(BUFMGR_DMA_DESC_POOL_ADDR),
- tr32(BUFMGR_DMA_DESC_POOL_SIZE));
-
- /* Read DMA control block */
- printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
- tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
-
- /* Write DMA control block */
- printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
- tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
-
- /* DMA completion block */
- printk("DEBUG: DMAC_MODE[%08x]\n",
- tr32(DMAC_MODE));
-
- /* GRC block */
- printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
- tr32(GRC_MODE), tr32(GRC_MISC_CFG));
- printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
- tr32(GRC_LOCAL_CTRL));
-
- /* TG3_BDINFOs */
- printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_JUMBO_BD + 0x0),
- tr32(RCVDBDI_JUMBO_BD + 0x4),
- tr32(RCVDBDI_JUMBO_BD + 0x8),
- tr32(RCVDBDI_JUMBO_BD + 0xc));
- printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_STD_BD + 0x0),
- tr32(RCVDBDI_STD_BD + 0x4),
- tr32(RCVDBDI_STD_BD + 0x8),
- tr32(RCVDBDI_STD_BD + 0xc));
- printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_MINI_BD + 0x0),
- tr32(RCVDBDI_MINI_BD + 0x4),
- tr32(RCVDBDI_MINI_BD + 0x8),
- tr32(RCVDBDI_MINI_BD + 0xc));
-
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
- printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4);
-
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
- printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4);
-
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
- printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4, val32_5);
-
- /* SW status block */
- printk(KERN_DEBUG
- "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
- sblk->status,
- sblk->status_tag,
- sblk->rx_jumbo_consumer,
- sblk->rx_consumer,
- sblk->rx_mini_consumer,
- sblk->idx[0].rx_producer,
- sblk->idx[0].tx_consumer);
-
- /* SW statistics block */
- printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
- ((u32 *)tp->hw_stats)[0],
- ((u32 *)tp->hw_stats)[1],
- ((u32 *)tp->hw_stats)[2],
- ((u32 *)tp->hw_stats)[3]);
-
- /* Mailboxes */
- printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
- tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
- tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
-
- /* NIC side send descriptors. */
- for (i = 0; i < 6; i++) {
- unsigned long txd;
-
- txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
- + (i * sizeof(struct tg3_tx_buffer_desc));
- printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
- i,
- readl(txd + 0x0), readl(txd + 0x4),
- readl(txd + 0x8), readl(txd + 0xc));
- }
-
- /* NIC side RX descriptors. */
- for (i = 0; i < 6; i++) {
- unsigned long rxd;
-
- rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
- + (i * sizeof(struct tg3_rx_buffer_desc));
- printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- rxd += (4 * sizeof(u32));
- printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- }
-
- for (i = 0; i < 6; i++) {
- unsigned long rxd;
-
- rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
- + (i * sizeof(struct tg3_rx_buffer_desc));
- printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- rxd += (4 * sizeof(u32));
- printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- }
-}
-#endif
-
static struct net_device_stats *tg3_get_stats(struct net_device *);
static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
@@ -9162,9 +9000,6 @@ static int tg3_close(struct net_device *dev)
tg3_phy_stop(tp);
tg3_full_lock(tp, 1);
-#if 0
- tg3_dump_state(tp);
-#endif
tg3_disable_ints(tp);
@@ -9406,9 +9241,8 @@ static inline u32 calc_crc(unsigned char *buf, int len)
reg >>= 1;
- if (tmp) {
+ if (tmp)
reg ^= 0xedb88320;
- }
}
}
@@ -9452,20 +9286,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
rx_mode |= RX_MODE_PROMISC;
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
- tg3_set_multi (tp, 1);
+ tg3_set_multi(tp, 1);
} else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
- tg3_set_multi (tp, 0);
+ tg3_set_multi(tp, 0);
} else {
/* Accept one or more multicast(s). */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[4] = { 0, };
u32 regidx;
u32 bit;
u32 crc;
- netdev_for_each_mc_addr(mclist, dev) {
- crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = calc_crc(ha->addr, ETH_ALEN);
bit = ~crc & 0x7f;
regidx = (bit & 0x60) >> 5;
bit &= 0x1f;
@@ -9618,7 +9452,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
memcpy(data, ((char*)&val) + b_offset, b_count);
len -= b_count;
offset += b_count;
- eeprom->len += b_count;
+ eeprom->len += b_count;
}
/* read bytes upto the last 4 byte boundary */
@@ -10166,8 +10000,8 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
- return 0;
- }
+ return 0;
+ }
spin_lock_bh(&tp->lock);
if (data)
@@ -10186,8 +10020,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
- return 0;
- }
+ return 0;
+ }
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
ethtool_op_set_tx_ipv6_csum(dev, data);
@@ -10197,7 +10031,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
-static int tg3_get_sset_count (struct net_device *dev, int sset)
+static int tg3_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -10209,7 +10043,7 @@ static int tg3_get_sset_count (struct net_device *dev, int sset)
}
}
-static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
@@ -10256,7 +10090,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
return 0;
}
-static void tg3_get_ethtool_stats (struct net_device *dev,
+static void tg3_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct tg3 *tp = netdev_priv(dev);
@@ -10362,8 +10196,7 @@ static int tg3_test_nvram(struct tg3 *tp)
for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
parity[k++] = buf8[i] & msk;
i++;
- }
- else if (i == 16) {
+ } else if (i == 16) {
int l;
u8 msk;
@@ -10461,7 +10294,7 @@ static int tg3_test_registers(struct tg3 *tp)
{ MAC_ADDR_0_HIGH, 0x0000,
0x00000000, 0x0000ffff },
{ MAC_ADDR_0_LOW, 0x0000,
- 0x00000000, 0xffffffff },
+ 0x00000000, 0xffffffff },
{ MAC_RX_MTU_SIZE, 0x0000,
0x00000000, 0x0000ffff },
{ MAC_TX_MODE, 0x0000,
@@ -10649,7 +10482,8 @@ static int tg3_test_registers(struct tg3 *tp)
out:
if (netif_msg_hw(tp))
- pr_err("Register test failed at offset %x\n", offset);
+ netdev_err(tp->dev,
+ "Register test failed at offset %x\n", offset);
tw32(offset, save_val);
return -EIO;
}
@@ -10825,9 +10659,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
- }
- else
+ } else {
return -EINVAL;
+ }
err = -EIO;
@@ -10909,7 +10743,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
rx_skb = tpr->rx_std_buffers[desc_idx].skb;
- map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
+ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
for (i = 14; i < tx_len; i++) {
@@ -11083,7 +10917,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phydev, data, cmd);
}
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = tp->phy_addr;
@@ -11776,7 +11610,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_NVRAM;
if (tg3_nvram_lock(tp)) {
- netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
+ netdev_warn(tp->dev,
+ "Cannot get nvram lock, %s failed\n",
__func__);
return;
}
@@ -11895,7 +11730,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
if (ret)
break;
- page_off = offset & pagemask;
+ page_off = offset & pagemask;
size = pagesize;
if (len < size)
size = len;
@@ -11923,7 +11758,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
- if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
break;
/* Issue another write enable to start the write. */
@@ -11977,7 +11812,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
memcpy(&data, buf + i, 4);
tw32(NVRAM_WRDATA, be32_to_cpu(data));
- page_off = offset % tp->nvram_pagesize;
+ page_off = offset % tp->nvram_pagesize;
phy_addr = tg3_nvram_phys_addr(tp, offset);
@@ -11985,7 +11820,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
- if ((page_off == 0) || (i == 0))
+ if (page_off == 0 || i == 0)
nvram_cmd |= NVRAM_CMD_FIRST;
if (page_off == (tp->nvram_pagesize - 4))
nvram_cmd |= NVRAM_CMD_LAST;
@@ -12028,8 +11863,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
- }
- else {
+ } else {
u32 grc_mode;
ret = tg3_nvram_lock(tp);
@@ -12049,8 +11883,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
ret = tg3_nvram_write_block_buffered(tp, offset, len,
buf);
- }
- else {
+ } else {
ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
buf);
}
@@ -12545,11 +12378,11 @@ skip_phy_reset:
return err;
}
-static void __devinit tg3_read_partno(struct tg3 *tp)
+static void __devinit tg3_read_vpd(struct tg3 *tp)
{
- unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
+ u8 vpd_data[TG3_NVM_VPD_LEN];
unsigned int block_end, rosize, len;
- int i = 0;
+ int j, i = 0;
u32 magic;
if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12598,6 +12431,32 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
if (block_end > TG3_NVM_VPD_LEN)
goto out_not_found;
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j > 0) {
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&vpd_data[j], "1028", 4))
+ goto partno;
+
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto partno;
+
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end)
+ goto partno;
+
+ memcpy(tp->fw_ver, &vpd_data[j], len);
+ strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
+ }
+
+partno:
i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
PCI_VPD_RO_KEYWORD_PARTNO);
if (i < 0)
@@ -12667,7 +12526,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
static void __devinit tg3_read_bc_ver(struct tg3 *tp)
{
u32 val, offset, start, ver_offset;
- int i;
+ int i, dst_off;
bool newver = false;
if (tg3_nvram_read(tp, 0xc, &offset) ||
@@ -12687,8 +12546,11 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
newver = true;
}
+ dst_off = strlen(tp->fw_ver);
+
if (newver) {
- if (tg3_nvram_read(tp, offset + 8, &ver_offset))
+ if (TG3_VER_SIZE - dst_off < 16 ||
+ tg3_nvram_read(tp, offset + 8, &ver_offset))
return;
offset = offset + ver_offset - start;
@@ -12697,7 +12559,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
if (tg3_nvram_read_be32(tp, offset + i, &v))
return;
- memcpy(tp->fw_ver + i, &v, sizeof(v));
+ memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
}
} else {
u32 major, minor;
@@ -12708,7 +12570,8 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
TG3_NVM_BCVER_MAJSFT;
minor = ver_offset & TG3_NVM_BCVER_MINMSK;
- snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
+ snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
+ "v%d.%02d", major, minor);
}
}
@@ -12732,9 +12595,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
{
u32 offset, major, minor, build;
- tp->fw_ver[0] = 's';
- tp->fw_ver[1] = 'b';
- tp->fw_ver[2] = '\0';
+ strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
return;
@@ -12771,11 +12632,14 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
if (minor > 99 || build > 26)
return;
- snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
+ offset = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
+ " v%d.%02d", major, minor);
if (build > 0) {
- tp->fw_ver[8] = 'a' + build - 1;
- tp->fw_ver[9] = '\0';
+ offset = strlen(tp->fw_ver);
+ if (offset < TG3_VER_SIZE - 1)
+ tp->fw_ver[offset] = 'a' + build - 1;
}
}
@@ -12862,12 +12726,13 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
+ bool vpd_vers = false;
- if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
- tp->fw_ver[0] = 's';
- tp->fw_ver[1] = 'b';
- tp->fw_ver[2] = '\0';
+ if (tp->fw_ver[0] != 0)
+ vpd_vers = true;
+ if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
+ strcat(tp->fw_ver, "sb");
return;
}
@@ -12884,11 +12749,12 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
return;
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
- return;
+ (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
+ goto done;
tg3_read_mgmtfw_ver(tp);
+done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
@@ -12898,9 +12764,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
{
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ PCI_DEVICE_ID_AMD_FE_GATE_700C) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+ PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },
@@ -13066,8 +12932,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
- }
- else {
+ } else {
struct pci_dev *bridge = NULL;
do {
@@ -13129,6 +12994,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
tp->dev->features |= NETIF_F_IPV6_CSUM;
+ tp->dev->features |= NETIF_F_GRO;
}
/* Determine TSO capabilities */
@@ -13189,8 +13055,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
- (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
@@ -13224,7 +13090,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
if (!tp->pcix_cap) {
- pr_err("Cannot find PCI-X capability, aborting\n");
+ dev_err(&tp->pdev->dev,
+ "Cannot find PCI-X capability, aborting\n");
return -EIO;
}
@@ -13421,7 +13288,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Force the chip into D0. */
err = tg3_set_power_state(tp, PCI_D0);
if (err) {
- pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
+ dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
return err;
}
@@ -13595,13 +13462,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
err = tg3_phy_probe(tp);
if (err) {
- pr_err("(%s) phy probe failed, err %d\n",
- pci_name(tp->pdev), err);
+ dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
/* ... but do not return immediately ... */
tg3_mdio_fini(tp);
}
- tg3_read_partno(tp);
+ tg3_read_vpd(tp);
tg3_read_fw_ver(tp);
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
@@ -13639,10 +13505,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+ tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
- tp->rx_offset = 0;
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+ tp->rx_offset -= NET_IP_ALIGN;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ tp->rx_copy_thresh = ~(u16)0;
+#endif
+ }
tp->rx_std_max_post = TG3_RX_RING_SIZE;
@@ -13965,11 +13836,10 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
}
pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
- if (to_device) {
+ if (to_device)
tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
- } else {
+ else
tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
- }
ret = -ENODEV;
for (i = 0; i < 40; i++) {
@@ -14105,8 +13975,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Send the buffer to the chip. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
if (ret) {
- pr_err("tg3_test_dma() Write the buffer failed %d\n",
- ret);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer write failed. err = %d\n",
+ __func__, ret);
break;
}
@@ -14116,8 +13987,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
u32 val;
tg3_read_mem(tp, 0x2100 + (i*4), &val);
if (le32_to_cpu(val) != p[i]) {
- pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n",
- val, i);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on device! "
+ "(%d != %d)\n", __func__, val, i);
/* ret = -ENODEV here? */
}
p[i] = 0;
@@ -14126,9 +13998,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
if (ret) {
- pr_err("tg3_test_dma() Read the buffer failed %d\n",
- ret);
-
+ dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
+ "err = %d\n", __func__, ret);
break;
}
@@ -14144,8 +14015,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break;
} else {
- pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
- p[i], i);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on read back! "
+ "(%d != %d)\n", __func__, p[i], i);
ret = -ENODEV;
goto out;
}
@@ -14172,10 +14044,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
if (pci_dev_present(dma_wait_state_chipsets)) {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
- }
- else
+ } else {
/* Safe to use the calculated DMA boundary. */
tp->dma_rwctrl = saved_dma_rwctrl;
+ }
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
}
@@ -14437,13 +14309,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- pr_err("Cannot enable PCI device, aborting\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- pr_err("Cannot obtain PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
@@ -14452,14 +14324,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* Find power-management capability. */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap == 0) {
- pr_err("Cannot find PowerManagement capability, aborting\n");
+ dev_err(&pdev->dev,
+ "Cannot find Power Management capability, aborting\n");
err = -EIO;
goto err_out_free_res;
}
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
- pr_err("Etherdev alloc failed, aborting\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -14509,7 +14382,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->regs = pci_ioremap_bar(pdev, BAR_0);
if (!tp->regs) {
- netdev_err(dev, "Cannot map device registers, aborting\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_dev;
}
@@ -14525,7 +14398,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_invariants(tp);
if (err) {
- netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
+ dev_err(&pdev->dev,
+ "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
@@ -14560,7 +14434,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_set_consistent_dma_mask(pdev,
persist_dma_mask);
if (err < 0) {
- netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
+ dev_err(&pdev->dev, "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
goto err_out_iounmap;
}
}
@@ -14568,7 +14443,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- netdev_err(dev, "No usable DMA configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_out_iounmap;
}
}
@@ -14617,14 +14493,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_device_address(tp);
if (err) {
- netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
+ dev_err(&pdev->dev,
+ "Could not obtain valid ethernet address, aborting\n");
goto err_out_iounmap;
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
- netdev_err(dev, "Cannot map APE registers, aborting\n");
+ dev_err(&pdev->dev,
+ "Cannot map APE registers, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -14648,7 +14526,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_test_dma(tp);
if (err) {
- netdev_err(dev, "DMA engine test failed, aborting\n");
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
goto err_out_apeunmap;
}
@@ -14709,7 +14587,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- netdev_err(dev, "Cannot register net device, aborting\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_apeunmap;
}
@@ -14722,11 +14600,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
struct phy_device *phydev;
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ netdev_info(dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
} else
- netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
- tg3_phy_string(tp),
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) "
+ "(WireSpeed[%d])\n", tg3_phy_string(tp),
((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
"10/100/1000Base-T")),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 574a1cc4d353..ce9c4918c318 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -23,11 +23,8 @@
#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
#define TG3_BDINFO_SIZE 0x10UL
-#define RX_COPY_THRESHOLD 256
-
#define TG3_RX_INTERNAL_RING_SZ_5906 32
-#define RX_STD_MAX_SIZE 1536
#define RX_STD_MAX_SIZE_5705 512
#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
@@ -183,6 +180,7 @@
#define METAL_REV_B2 0x02
#define TG3PCI_DMA_RW_CTRL 0x0000006c
#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
+#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -252,7 +250,7 @@
/* 0x94 --> 0x98 unused */
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
-/* 0xa0 --> 0xb8 unused */
+/* 0xa8 --> 0xb8 unused */
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004
@@ -1854,6 +1852,8 @@
#define TG3_PCIE_TLDLPL_PORT 0x00007c00
#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
+#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
@@ -2082,7 +2082,7 @@
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_AADJ1CH3 0x601f
#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
-#define MII_TG3_DSP_EXP8 0x0708
+#define MII_TG3_DSP_EXP8 0x0f08
#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
#define MII_TG3_DSP_EXP8_AEDW 0x0200
#define MII_TG3_DSP_EXP75 0x0f75
@@ -2512,7 +2512,7 @@ struct tg3_hw_stats {
*/
struct ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct tg3_config_info {
@@ -2561,7 +2561,7 @@ struct tg3_bufmgr_config {
struct tg3_ethtool_stats {
/* Statistics maintained by Receive MAC. */
- u64 rx_octets;
+ u64 rx_octets;
u64 rx_fragments;
u64 rx_ucast_packets;
u64 rx_mcast_packets;
@@ -2751,9 +2751,11 @@ struct tg3 {
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
void (*write32_rx_mbox) (struct tg3 *, u32,
u32);
+ u32 rx_copy_thresh;
u32 rx_pending;
u32 rx_jumbo_pending;
u32 rx_std_max_post;
+ u32 rx_offset;
u32 rx_pkt_map_sz;
#if TG3_VLAN_TAG_USED
struct vlan_group *vlgrp;
@@ -2773,7 +2775,6 @@ struct tg3 {
unsigned long last_event_jiffies;
};
- u32 rx_offset;
u32 tg3_flags;
#define TG3_FLAG_TAGGED_STATUS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 390540c101c7..ccee3eddc5f4 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1034,7 +1034,7 @@ static void TLan_tx_timeout(struct net_device *dev)
TLan_ResetLists( dev );
TLan_ReadAndClearStats( dev, TLAN_IGNORE );
TLan_ResetAdapter( dev );
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue( dev );
}
@@ -1147,7 +1147,6 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
} /* TLan_StartTx */
@@ -1314,7 +1313,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
static void TLan_SetMulticastList( struct net_device *dev )
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 hash1 = 0;
u32 hash2 = 0;
int i;
@@ -1336,12 +1335,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
- (char *) &dmi->dmi_addr );
+ (char *) &ha->addr);
} else {
- offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
+ offset = TLan_HashFunc((u8 *)&ha->addr);
if ( offset < 32 )
hash1 |= ( 1 << offset );
else
@@ -2464,7 +2463,7 @@ static void TLan_PhyPrint( struct net_device *dev )
printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
} else if ( phy <= TLAN_PHY_MAX_ADDR ) {
printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
- printk( "TLAN: Off. +0 +1 +2 +3 \n" );
+ printk( "TLAN: Off. +0 +1 +2 +3\n" );
for ( i = 0; i < 0x20; i+= 4 ) {
printk( "TLAN: 0x%02x", i );
TLan_MiiReadReg( dev, phy, i, &data0 );
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7d7f3eef1ab3..10800f16a231 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -77,7 +77,7 @@ static char version[] __devinitdata =
#define FW_NAME "3com/3C359.bin"
MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
-MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
+MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
MODULE_FIRMWARE(FW_NAME);
/* Module parameters */
@@ -163,19 +163,19 @@ static void print_tx_state(struct net_device *dev)
u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
int i ;
- printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d \n",xl_priv->tx_ring_head,
+ printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
- printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len \n");
+ printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
for (i = 0; i < 16; i++) {
txd = &(xl_priv->xl_tx_ring[i]) ;
- printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(txd),
+ printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
}
- printk("DNLISTPTR = %04x \n", readl(xl_mmio + MMIO_DNLISTPTR) );
+ printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
- printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
- printk("Queue status = %0x \n",netif_running(dev) ) ;
+ printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
+ printk("Queue status = %0x\n",netif_running(dev) ) ;
}
static void print_rx_state(struct net_device *dev)
@@ -186,19 +186,19 @@ static void print_rx_state(struct net_device *dev)
u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
int i ;
- printk("rx_ring_tail: %d \n", xl_priv->rx_ring_tail) ;
- printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len \n");
+ printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
+ printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
for (i = 0; i < 16; i++) {
/* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
rxd = &(xl_priv->xl_rx_ring[i]) ;
- printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(rxd),
+ printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
}
- printk("UPLISTPTR = %04x \n", readl(xl_mmio + MMIO_UPLISTPTR) );
+ printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
- printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
- printk("Queue status = %0x \n",netif_running(dev) ) ;
+ printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
+ printk("Queue status = %0x\n",netif_running(dev));
}
#endif
@@ -391,7 +391,7 @@ static int __devinit xl_init(struct net_device *dev)
struct xl_private *xl_priv = netdev_priv(dev);
int err;
- printk(KERN_INFO "%s \n", version);
+ printk(KERN_INFO "%s\n", version);
printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
@@ -463,7 +463,7 @@ static int xl_hw_reset(struct net_device *dev)
writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
#if XL_DEBUG
- printk(KERN_INFO "Read from PMBAR = %04x \n", readw(xl_mmio + MMIO_MACDATA)) ;
+ printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
#endif
if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
@@ -591,9 +591,9 @@ static int xl_hw_reset(struct net_device *dev)
#if XL_DEBUG
writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
- printk(KERN_INFO "Default ring speed 4 mbps \n") ;
+ printk(KERN_INFO "Default ring speed 4 mbps\n");
} else {
- printk(KERN_INFO "Default ring speed 16 mbps \n") ;
+ printk(KERN_INFO "Default ring speed 16 mbps\n");
}
printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
#endif
@@ -651,7 +651,7 @@ static int xl_open(struct net_device *dev)
if (open_err != 0) { /* Something went wrong with the open command */
if (open_err & 0x07) { /* Wrong speed, retry at different speed */
- printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
switchsettings = switchsettings ^ 2 ;
xl_ee_write(dev,0x08,switchsettings) ;
xl_hw_reset(dev) ;
@@ -703,7 +703,7 @@ static int xl_open(struct net_device *dev)
}
if (i==0) {
- printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ;
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
free_irq(dev->irq,dev) ;
kfree(xl_priv->xl_tx_ring);
kfree(xl_priv->xl_rx_ring);
@@ -853,7 +853,7 @@ static int xl_open_hw(struct net_device *dev)
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
- printk(", ARB: %04x \n",xl_priv->arb ) ;
+ printk(", ARB: %04x\n",xl_priv->arb );
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -867,7 +867,7 @@ static int xl_open_hw(struct net_device *dev)
ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
}
ver_str[i] = '\0' ;
- printk(KERN_INFO "%s: Microcode version String: %s \n",dev->name,ver_str);
+ printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
}
/*
@@ -991,7 +991,7 @@ static void xl_rx(struct net_device *dev)
skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
if (skb==NULL) { /* Still need to fix the rx ring */
- printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ;
+ printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
adv_rx_ring(dev) ;
dev->stats.rx_dropped++ ;
writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
@@ -1092,7 +1092,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
*/
if (intstatus == 0x0001) {
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
- printk(KERN_INFO "%s: 00001 int received \n",dev->name) ;
+ printk(KERN_INFO "%s: 00001 int received\n",dev->name);
} else {
if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
@@ -1103,9 +1103,9 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
*/
if (intstatus & HOSTERRINT) {
- printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x \n",dev->name,intstatus) ;
+ printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
- printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
netif_stop_queue(dev) ;
xl_freemem(dev) ;
free_irq(dev->irq,dev);
@@ -1128,7 +1128,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
Must put a timeout check here ! */
/* Empty Loop */
}
- printk(KERN_WARNING "%s: TX Underrun received \n",dev->name) ;
+ printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
} /* TxUnderRun */
@@ -1157,13 +1157,13 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
macstatus = readw(xl_mmio + MMIO_MACDATA) ;
printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
if (macstatus & (1<<14))
- printk(KERN_WARNING "tchk error: Unrecoverable error \n") ;
+ printk(KERN_WARNING "tchk error: Unrecoverable error\n");
if (macstatus & (1<<3))
- printk(KERN_WARNING "eint error: Internal watchdog timer expired \n") ;
+ printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
if (macstatus & (1<<2))
- printk(KERN_WARNING "aint error: Host tried to perform invalid operation \n") ;
+ printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
- printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
netif_stop_queue(dev) ;
xl_freemem(dev) ;
free_irq(dev->irq,dev);
@@ -1175,7 +1175,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
} else {
- printk(KERN_WARNING "%s: Received Unknown interrupt : %04x \n", dev->name, intstatus) ;
+ printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
}
}
@@ -1350,11 +1350,11 @@ static int xl_close(struct net_device *dev)
writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
- printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response \n",dev->name) ;
+ printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
} else {
writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
if (readb(xl_mmio + MMIO_MACDATA)==0) {
- printk(KERN_INFO "%s: Adapter has been closed \n",dev->name) ;
+ printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
xl_freemem(dev) ;
@@ -1391,7 +1391,7 @@ static int xl_close(struct net_device *dev)
static void xl_set_rx_mode(struct net_device *dev)
{
struct xl_private *xl_priv = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
u16 options ;
@@ -1408,11 +1408,11 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
@@ -1447,11 +1447,11 @@ static void xl_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
break ;
case 4:
- printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command \n",dev->name,srb_cmd) ;
+ printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
break ;
case 6:
- printk(KERN_INFO "%s: Command: %d - Options Invalid for command \n",dev->name,srb_cmd) ;
+ printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
break ;
case 0: /* Successful command execution */
@@ -1472,11 +1472,11 @@ static void xl_srb_bh(struct net_device *dev)
break ;
case SET_FUNC_ADDRESS:
if(xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Functional Address Set \n",dev->name) ;
+ printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
break ;
case CLOSE_NIC:
if(xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler \n",dev->name) ;
+ printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
break ;
case SET_MULTICAST_MODE:
if(xl_priv->xl_message_level)
@@ -1485,9 +1485,9 @@ static void xl_srb_bh(struct net_device *dev)
case SET_RECEIVE_MODE:
if(xl_priv->xl_message_level) {
if (xl_priv->xl_copy_all_options == 0x0004)
- printk(KERN_INFO "%s: Entering promiscuous mode \n", dev->name) ;
+ printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
else
- printk(KERN_INFO "%s: Entering normal receive mode \n",dev->name) ;
+ printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
}
break ;
@@ -1557,20 +1557,20 @@ static void xl_arb_cmd(struct net_device *dev)
xl_freemem(dev) ;
free_irq(dev->irq,dev);
- printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (xl_priv->xl_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1579,7 +1579,7 @@ static void xl_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_CO) {
if (xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
xl_srb_cmd(dev, READ_LOG) ;
}
@@ -1595,7 +1595,7 @@ static void xl_arb_cmd(struct net_device *dev)
} /* Lan.change.status */
else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
#if XL_DEBUG
- printk(KERN_INFO "Received.Data \n") ;
+ printk(KERN_INFO "Received.Data\n");
#endif
writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -1630,7 +1630,7 @@ static void xl_arb_cmd(struct net_device *dev)
xl_asb_cmd(dev) ;
} else {
- printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x \n",dev->name,arb_cmd) ;
+ printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
}
/* Acknowledge the arb interrupt */
@@ -1687,13 +1687,13 @@ static void xl_asb_bh(struct net_device *dev)
ret_code = readb(xl_mmio + MMIO_MACDATA) ;
switch (ret_code) {
case 0x01:
- printk(KERN_INFO "%s: ASB Command, unrecognized command code \n",dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
break ;
case 0x26:
- printk(KERN_INFO "%s: ASB Command, unexpected receive buffer \n", dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
break ;
case 0x40:
- printk(KERN_INFO "%s: ASB Command, Invalid Station ID \n", dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
break ;
}
xl_priv->asb_queued = 0 ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 1a0967246e2f..91e6c78271a3 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -986,7 +986,7 @@ static void open_sap(unsigned char type, struct net_device *dev)
static void tok_set_multicast_list(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
unsigned char address[4];
int i;
@@ -995,11 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- address[0] |= mclist->dmi_addr[2];
- address[1] |= mclist->dmi_addr[3];
- address[2] |= mclist->dmi_addr[4];
- address[3] |= mclist->dmi_addr[5];
+ netdev_for_each_mc_addr(ha, dev) {
+ address[0] |= ha->addr[2];
+ address[1] |= ha->addr[3];
+ address[2] |= ha->addr[4];
+ address[3] |= ha->addr[5];
}
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
@@ -1041,7 +1041,6 @@ static netdev_tx_t tok_send_packet(struct sk_buff *skb,
writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
spin_unlock_irqrestore(&(ti->lock), flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 7a5fbf5a9d71..5bd140704533 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -358,7 +358,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
pcr |= PCI_COMMAND_SERR;
pci_write_config_word (pdev, PCI_COMMAND, pcr);
- printk("%s \n", version);
+ printk("%s\n", version);
printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
streamer_priv->streamer_card_name,
(unsigned int) dev->base_addr,
@@ -651,7 +651,7 @@ static int streamer_open(struct net_device *dev)
#if STREAMER_DEBUG
writew(readw(streamer_mmio + LAPWWO),
streamer_mmio + LAPA);
- printk("srb open request: \n");
+ printk("srb open request:\n");
for (i = 0; i < 16; i++) {
printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
}
@@ -701,7 +701,7 @@ static int streamer_open(struct net_device *dev)
if (srb_word != 0) {
if (srb_word == 0x07) {
if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
- printk(KERN_WARNING "%s: Retrying at different ring speed \n",
+ printk(KERN_WARNING "%s: Retrying at different ring speed\n",
dev->name);
open_finished = 0;
} else {
@@ -717,7 +717,7 @@ static int streamer_open(struct net_device *dev)
((error_code & 0x0f) == 0x0d))
{
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
- printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
free_irq(dev->irq, dev);
return -EIO;
}
@@ -923,7 +923,7 @@ static void streamer_rx(struct net_device *dev)
if (rx_desc->status & 0x7E830000) { /* errors */
if (streamer_priv->streamer_message_level) {
- printk(KERN_WARNING "%s: Rx Error %x \n",
+ printk(KERN_WARNING "%s: Rx Error %x\n",
dev->name, rx_desc->status);
}
} else { /* received without errors */
@@ -936,7 +936,7 @@ static void streamer_rx(struct net_device *dev)
if (skb == NULL)
{
- printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
dev->stats.rx_dropped++;
} else { /* we allocated an skb OK */
if (buffer_cnt == 1) {
@@ -1267,7 +1267,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
netdev_priv(dev);
__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
__u8 options = 0;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[5];
writel(streamer_priv->srb, streamer_mmio + LAPA);
@@ -1303,11 +1303,11 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
@@ -1364,7 +1364,7 @@ static void streamer_srb_bh(struct net_device *dev)
case 0x00:
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1392,13 +1392,13 @@ static void streamer_srb_bh(struct net_device *dev)
case 0x00:
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
case 0x39: /* Must deal with this if individual multicast addresses used */
- printk(KERN_INFO "%s: Group address not found \n", dev->name);
+ printk(KERN_INFO "%s: Group address not found\n", dev->name);
break;
default:
break;
@@ -1414,10 +1414,10 @@ static void streamer_srb_bh(struct net_device *dev)
switch (srb_word) {
case 0x00:
if (streamer_priv->streamer_message_level)
- printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name);
+ printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1448,7 +1448,7 @@ static void streamer_srb_bh(struct net_device *dev)
}
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1467,7 +1467,7 @@ static void streamer_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1556,7 +1556,7 @@ static void streamer_arb_cmd(struct net_device *dev)
(streamer_mmio + LAPDINC)));
}
- printk("next %04x, fs %02x, len %04x \n", next,
+ printk("next %04x, fs %02x, len %04x\n", next,
status, len);
}
#endif
@@ -1593,7 +1593,7 @@ static void streamer_arb_cmd(struct net_device *dev)
mac_frame->protocol = tr_type_trans(mac_frame, dev);
#if STREAMER_NETWORK_MONITOR
- printk(KERN_WARNING "%s: Received MAC Frame, details: \n",
+ printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING
@@ -1669,15 +1669,15 @@ drop_frame:
/* If serious error */
if (streamer_priv->streamer_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name);
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n", dev->name);
+ printk(KERN_INFO "%s: Beaconing\n", dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1686,7 +1686,7 @@ drop_frame:
if (lan_status_diff & LSC_CO) {
if (streamer_priv->streamer_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
@@ -1716,7 +1716,7 @@ drop_frame:
streamer_priv->streamer_lan_status = lan_status;
} /* Lan.change.status */
else
- printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void streamer_asb_bh(struct net_device *dev)
@@ -1747,10 +1747,10 @@ static void streamer_asb_bh(struct net_device *dev)
rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
switch (rc) {
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break;
case 0x26:
- printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break;
case 0xFF:
/* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3a25e0434ae2..3d2fbe60b46e 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -302,7 +302,7 @@ static int olympic_init(struct net_device *dev)
olympic_priv=netdev_priv(dev);
olympic_mmio=olympic_priv->olympic_mmio;
- printk("%s \n", version);
+ printk("%s\n", version);
printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
@@ -468,7 +468,7 @@ static int olympic_open(struct net_device *dev)
#if OLYMPIC_DEBUG
printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
- printk("Before the open command \n");
+ printk("Before the open command\n");
#endif
do {
memset_io(init_srb,0,SRB_COMMAND_SIZE);
@@ -520,7 +520,7 @@ static int olympic_open(struct net_device *dev)
break;
}
if (time_after(jiffies, t + 10*HZ)) {
- printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
+ printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
olympic_priv->srb_queued=0;
break ;
}
@@ -549,7 +549,7 @@ static int olympic_open(struct net_device *dev)
break;
case 0x07:
if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
- printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
+ printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
open_finished = 0 ;
continue;
}
@@ -558,7 +558,7 @@ static int olympic_open(struct net_device *dev)
if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
- printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
} else {
printk(KERN_WARNING "%s: %s - %s\n", dev->name,
open_maj_error[(err & 0xf0) >> 4],
@@ -759,7 +759,7 @@ static void olympic_rx(struct net_device *dev)
olympic_priv->rx_status_last_received++ ;
olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
#if OLYMPIC_DEBUG
- printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
+ printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
#endif
length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
@@ -774,15 +774,15 @@ static void olympic_rx(struct net_device *dev)
if (l_status_buffercnt & 0x3B000000) {
if (olympic_priv->olympic_message_level) {
if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
- printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
+ printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
- printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
+ printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
if (l_status_buffercnt & (1<<27)) /* No receive buffers */
- printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
+ printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
- printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
+ printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
- printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
+ printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
}
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -796,7 +796,7 @@ static void olympic_rx(struct net_device *dev)
}
if (skb == NULL) {
- printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
dev->stats.rx_dropped++;
/* Update counters even though we don't transfer the frame */
olympic_priv->rx_ring_last_received += i ;
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
}
if (t == 0) {
- printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
+ printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
}
olympic_priv->srb_queued=0;
}
@@ -1139,7 +1139,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 options = 0;
u8 __iomem *srb;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
writel(olympic_priv->srb,olympic_mmio+LAPA);
@@ -1177,11 +1177,11 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
writeb(SRB_SET_FUNC_ADDRESS,srb+0);
@@ -1239,7 +1239,7 @@ static void olympic_srb_bh(struct net_device *dev)
case 0x00:
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
@@ -1266,13 +1266,13 @@ static void olympic_srb_bh(struct net_device *dev)
case 0x00:
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
break ;
case 0x39: /* Must deal with this if individual multicast addresses used */
- printk(KERN_INFO "%s: Group address not found \n",dev->name);
+ printk(KERN_INFO "%s: Group address not found\n",dev->name);
break ;
default:
break ;
@@ -1287,10 +1287,10 @@ static void olympic_srb_bh(struct net_device *dev)
switch (readb(srb+2)) {
case 0x00:
if (olympic_priv->olympic_message_level)
- printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
+ printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1310,7 +1310,7 @@ static void olympic_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1328,7 +1328,7 @@ static void olympic_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1404,7 +1404,7 @@ static void olympic_arb_cmd(struct net_device *dev)
printk("Loc %d = %02x\n",i,readb(frame_data + i));
}
- printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
+ printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
}
#endif
mac_frame = dev_alloc_skb(frame_len) ;
@@ -1426,7 +1426,7 @@ static void olympic_arb_cmd(struct net_device *dev)
if (olympic_priv->olympic_network_monitor) {
struct trh_hdr *mac_hdr;
- printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
+ printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
dev->name, mac_hdr->daddr);
@@ -1489,20 +1489,20 @@ drop_frame:
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
netif_stop_queue(dev);
olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
- printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (olympic_priv->olympic_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1512,7 +1512,7 @@ drop_frame:
if (lan_status_diff & LSC_CO) {
if (olympic_priv->olympic_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
@@ -1551,7 +1551,7 @@ drop_frame:
} /* Lan.change.status */
else
- printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void olympic_asb_bh(struct net_device *dev)
@@ -1578,10 +1578,10 @@ static void olympic_asb_bh(struct net_device *dev)
if (olympic_priv->asb_queued == 2) {
switch (readb(asb_block+2)) {
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break ;
case 0x26:
- printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break ;
case 0xFF:
/* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index e40560137c46..213b9affc22a 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -4562,7 +4562,7 @@ static void smctr_timeout(struct net_device *dev)
* fake transmission time and go on trying. Our own timeout
* routine is in sktr_timer_chk()
*/
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 8b508c922410..8cb126a80070 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -592,7 +592,7 @@ static void tms380tr_timeout(struct net_device *dev)
* fake transmission time and go on trying. Our own timeout
* routine is in tms380tr_timer_chk()
*/
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1211,17 +1211,17 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
}
else
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
((char *)(&tp->ocpl.FunctAddr))[0] |=
- mclist->dmi_addr[2];
+ ha->addr[2];
((char *)(&tp->ocpl.FunctAddr))[1] |=
- mclist->dmi_addr[3];
+ ha->addr[3];
((char *)(&tp->ocpl.FunctAddr))[2] |=
- mclist->dmi_addr[4];
+ ha->addr[4];
((char *)(&tp->ocpl.FunctAddr))[3] |=
- mclist->dmi_addr[5];
+ ha->addr[5];
}
}
tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
@@ -1390,7 +1390,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
Status &= STS_MASK;
if(tms380tr_debug > 3)
- printk(KERN_DEBUG " %04X \n", Status);
+ printk(KERN_DEBUG " %04X\n", Status);
/* BUD successfully completed */
if(Status == STS_INITIALIZE)
return (1);
@@ -1846,7 +1846,7 @@ static void tms380tr_chk_irq(struct net_device *dev)
break;
case DMA_WRITE_ABORT:
- printk(KERN_INFO "%s: DMA write operation aborted: \n",
+ printk(KERN_INFO "%s: DMA write operation aborted:\n",
dev->name);
switch (AdapterCheckBlock[1])
{
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 5b1fbb3c3b51..a03730bd1da5 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
return;
udelay(10);
}
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
}
static int mii_speed(struct mii_if_info *mii)
@@ -704,8 +704,8 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
if (i == 0) {
data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
- skb->len - skb->data_len, DMA_TO_DEVICE);
- data->txring[tx].len = skb->len - skb->data_len;
+ skb_headlen(skb), DMA_TO_DEVICE);
+ data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1056,7 +1056,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
return;
udelay(10);
}
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
}
static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1186,15 +1186,15 @@ static void tsi108_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
memset(data->mc_hash, 0, sizeof(data->mc_hash));
- netdev_for_each_mc_addr(mc, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 hash, crc;
- crc = ether_crc(6, mc->dmi_addr);
+ crc = ether_crc(6, ha->addr);
hash = crc >> 23;
__set_bit(hash, &data->mc_hash[0]);
}
@@ -1233,7 +1233,7 @@ static void tsi108_init_phy(struct net_device *dev)
udelay(10);
}
if (i == 0)
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
if (data->phy_type == TSI108_PHY_BCM54XX) {
tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 19cafc2b418d..c0e70006374e 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -654,7 +654,6 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
/* Trigger an immediate transmit demand. */
dw32(TxPoll, NormalTxPoll);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -671,15 +670,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- netdev_for_each_mc_addr(mclist, dev) {
- int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
@@ -700,13 +699,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (u16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 09b57193a16a..9522baf8d997 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1337,7 +1337,7 @@ de4x5_open(struct net_device *dev)
}
lp->interrupt = UNMASK_INTERRUPTS;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
START_DE4X5;
@@ -1507,7 +1507,6 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
- dev->trans_start = jiffies;
if (TX_BUFFS_AVAIL) {
netif_start_queue(dev); /* Another pkt may be queued */
@@ -1937,7 +1936,7 @@ set_multicast_list(struct net_device *dev)
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
}
}
@@ -1951,7 +1950,7 @@ static void
SetMulticastFilter(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i, bit, byte;
u16 hashcode;
@@ -1966,8 +1965,8 @@ SetMulticastFilter(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1983,8 +1982,8 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
@@ -5077,7 +5076,7 @@ mii_get_phy(struct net_device *dev)
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
lp->mii_cnt++;
lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name);
+ printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
j = de4x5_debug;
de4x5_debug |= DEBUG_MII;
de4x5_dbg_mii(dev, k);
@@ -5337,7 +5336,7 @@ de4x5_dbg_open(struct net_device *dev)
}
}
printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size: \nRX: %d\nTX: %d\n",
+ printk("Ring size:\nRX: %d\nTX: %d\n",
(short)lp->rxRingSize,
(short)lp->txRingSize);
}
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 9568156dea98..bdb25b8b1017 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -1180,11 +1180,11 @@ static void dmfe_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
- time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
+ time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
/* TX Timeout */
- if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
+ if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
dev_warn(&dev->dev, "Tx timeout - resetting\n");
@@ -1453,7 +1453,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
static void dm9132_id_table(struct DEVICE *dev)
{
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u16 * addrptr;
unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
u32 hash_val;
@@ -1477,8 +1477,8 @@ static void dm9132_id_table(struct DEVICE *dev)
hash_table[3] = 0x8000;
/* the multicast address in Hash Table : 64 bits */
- netdev_for_each_mc_addr(mcptr, dev) {
- hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -1496,7 +1496,7 @@ static void dm9132_id_table(struct DEVICE *dev)
static void send_filter_frame(struct DEVICE *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
struct tx_desc *txptr;
u16 * addrptr;
u32 * suptr;
@@ -1519,8 +1519,8 @@ static void send_filter_frame(struct DEVICE *dev)
*suptr++ = 0xffff;
/* fit the multicast address */
- netdev_for_each_mc_addr(mcptr, dev) {
- addrptr = (u16 *) mcptr->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index 966efa1a27d7..a63e64b6863d 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -67,7 +67,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
*/
if (tulip_media_cap[dev->if_port] & MediaIsMII)
return;
- if (! tp->nwayset || time_after(jiffies, dev->trans_start + 1*HZ)) {
+ if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) {
tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
iowrite32(tp->csr6, ioaddr + CSR6);
iowrite32(0x30, ioaddr + CSR12);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3810db9dc2de..254643ed945e 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
out_unlock:
spin_unlock_irqrestore (&tp->lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -707,8 +707,6 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -991,15 +989,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
u16 hash_table[32];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- netdev_for_each_mc_addr(mclist, dev) {
- int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
@@ -1019,13 +1017,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (u16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -1062,7 +1060,7 @@ static void set_rx_mode(struct net_device *dev)
} else if (tp->flags & MC_HASH_ONLY) {
/* Some work-alikes have only a 64-entry hash filter table. */
/* Should verify correctness on big-endian/__powerpc__ */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (netdev_mc_count(dev) > 64) {
/* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
@@ -1070,18 +1068,21 @@ static void set_rx_mode(struct net_device *dev)
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (tp->flags & COMET_MAC_ADDR)
- filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ filterbit = ether_crc_le(ETH_ALEN,
+ ha->addr);
else
- filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ filterbit = ether_crc(ETH_ALEN,
+ ha->addr) >> 26;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
dev_info(&dev->dev,
"Added filter for %pM %08x bit %d\n",
- mclist->dmi_addr,
- ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ ha->addr,
+ ether_crc(ETH_ALEN, ha->addr),
+ filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a589dd34891e..96de5829b940 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1040,11 +1040,11 @@ static void uli526x_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
- time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
+ time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); // Tx polling again
// TX Timeout
- if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
+ if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
printk( "%s: Tx timeout - resetting\n",
@@ -1393,7 +1393,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
static void send_filter_frame(struct net_device *dev, int mc_cnt)
{
struct uli526x_board_info *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
struct tx_desc *txptr;
u16 * addrptr;
u32 * suptr;
@@ -1416,8 +1416,8 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
*suptr++ = 0xffff << FLT_SHIFT;
/* fit the multicast address */
- netdev_for_each_mc_addr(mcptr, dev) {
- addrptr = (u16 *) mcptr->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 98dbf6cc1d68..60a87542f8f0 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -969,7 +969,7 @@ static void tx_timeout(struct net_device *dev)
enable_irq(dev->irq);
netif_wake_queue(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
return;
}
@@ -1055,8 +1055,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irq(&np->lock);
- dev->trans_start = jiffies;
-
if (debug > 4) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
dev->name, np->cur_tx, entry);
@@ -1366,13 +1364,15 @@ static u32 __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
- filterbit &= 0x3f;
- mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ netdev_for_each_mc_addr(ha, dev) {
+ int filbit;
+
+ filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+ filbit &= 0x3f;
+ mc_filter[filbit >> 5] |= 1 << (filbit & 31);
}
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
}
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index acfeeb980562..a439e93be22d 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -350,9 +350,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
#ifdef DEBUG
print_binary(status);
- printk("tx status 0x%08x 0x%08x \n",
+ printk("tx status 0x%08x 0x%08x\n",
card->tx_buffer[0], card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x \n",
+ printk("rx status 0x%08x 0x%08x\n",
card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
struct xircom_private *xp = netdev_priv(dev);
int retval;
enter("xircom_open");
- pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
+ pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n",
dev->name, dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 43265207d463..dbdfb1ff7ca0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,6 +109,9 @@ struct tun_struct {
struct tap_filter txflt;
struct socket socket;
+ struct socket_wq wq;
+
+ int vnet_hdr_sz;
#ifdef TUN_DEBUG
int debug;
@@ -323,7 +326,7 @@ static void tun_net_uninit(struct net_device *dev)
/* Inform the methods they need to stop using the dev.
*/
if (tfile) {
- wake_up_all(&tun->socket.wait);
+ wake_up_all(&tun->wq.wait);
if (atomic_dec_and_test(&tfile->count))
__tun_detach(tun);
}
@@ -393,12 +396,11 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Enqueue packet */
skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
- dev->trans_start = jiffies;
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
@@ -498,7 +500,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
- poll_wait(file, &tun->socket.wait, wait);
+ poll_wait(file, &tun->wq.wait, wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -563,7 +565,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= sizeof(gso)) > count)
+ if ((len -= tun->vnet_hdr_sz) > count)
return -EINVAL;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
@@ -575,7 +577,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
if (gso.hdr_len > len)
return -EINVAL;
- offset += sizeof(gso);
+ offset += tun->vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -718,7 +720,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (tun->flags & TUN_VNET_HDR) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= sizeof(gso)) < 0)
+ if ((len -= tun->vnet_hdr_sz) < 0)
return -EINVAL;
if (skb_is_gso(skb)) {
@@ -749,7 +751,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
sizeof(gso))))
return -EFAULT;
- total += sizeof(gso);
+ total += tun->vnet_hdr_sz;
}
len = min_t(int, skb->len, len);
@@ -773,7 +775,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- add_wait_queue(&tun->socket.wait, &wait);
+ add_wait_queue(&tun->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -804,7 +806,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tun->socket.wait, &wait);
+ remove_wait_queue(&tun->wq.wait, &wait);
return ret;
}
@@ -861,6 +863,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
static void tun_sock_write_space(struct sock *sk)
{
struct tun_struct *tun;
+ wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
return;
@@ -868,8 +871,9 @@ static void tun_sock_write_space(struct sock *sk)
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
tun = tun_sk(sk)->tun;
@@ -1033,13 +1037,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->dev = dev;
tun->flags = flags;
tun->txflt.count = 0;
+ tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = -ENOMEM;
sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
if (!sk)
goto err_free_dev;
- init_waitqueue_head(&tun->socket.wait);
+ tun->socket.wq = &tun->wq;
+ init_waitqueue_head(&tun->wq.wait);
tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
@@ -1174,6 +1180,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct sock_fprog fprog;
struct ifreq ifr;
int sndbuf;
+ int vnet_hdr_sz;
int ret;
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
@@ -1319,6 +1326,25 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->socket.sk->sk_sndbuf = sndbuf;
break;
+ case TUNGETVNETHDRSZ:
+ vnet_hdr_sz = tun->vnet_hdr_sz;
+ if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
+ ret = -EFAULT;
+ break;
+
+ case TUNSETVNETHDRSZ:
+ if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ tun->vnet_hdr_sz = vnet_hdr_sz;
+ break;
+
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 98d818daa77e..22bde49262c0 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -881,8 +881,6 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
wmb();
iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
- dev->trans_start = jiffies;
-
/* If we don't have room to put the worst case packet on the
* queue, then we must stop the queue. We need 2 extra
* descriptors -- one to prevent ring wrap, and one for the
@@ -920,11 +918,11 @@ typhoon_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 1b0aef37e495..932602db54b3 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1999,7 +1999,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
static void ucc_geth_set_multi(struct net_device *dev)
{
struct ucc_geth_private *ugeth;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
@@ -2028,16 +2028,16 @@ static void ucc_geth_set_multi(struct net_device *dev)
out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now.
*/
- if (!(dmi->dmi_addr[0] & 1))
+ if (!(ha->addr[0] & 1))
continue;
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
- hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
+ hw_add_addr_in_hash(ugeth, ha->addr);
}
}
}
@@ -3148,8 +3148,6 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
out_be32((u32 __iomem *)bd, bd_status);
- dev->trans_start = jiffies;
-
/* Move to next BD in the ring */
if (!(bd_status & T_W))
bd += sizeof(struct qe_bd);
@@ -3883,7 +3881,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
}
if (netif_msg_probe(&debug))
- printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
+ printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
ug_info->uf_info.irq);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 35f56fc82803..8e7d2374558b 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -558,16 +558,14 @@ static void asix_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits =
- ether_crc(ETH_ALEN,
- mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
@@ -794,16 +792,14 @@ static void ax88172_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits =
- ether_crc(ETH_ALEN,
- mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 602e123b2741..97687d335903 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -629,7 +629,7 @@ static void catc_multicast(unsigned char *addr, u8 *multicast)
static void catc_set_multicast_list(struct net_device *netdev)
{
struct catc *catc = netdev_priv(netdev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u8 broadcast[6];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
@@ -647,8 +647,8 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- netdev_for_each_mc_addr(mc, netdev) {
- u32 crc = ether_crc_le(6, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 crc = ether_crc_le(6, ha->addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
} else {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3547cf13d219..b3fe0de40469 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -64,6 +64,11 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
#endif
+static const u8 mbm_guid[16] = {
+ 0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01,
+ 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
+};
+
/*
* probes control interface, claims data interface, collects the bulk
* endpoints, activates data interface (if needed), maybe sets MTU.
@@ -79,6 +84,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
int status;
int rndis;
struct usb_driver *driver = driver_of(intf);
+ struct usb_cdc_mdlm_desc *desc = NULL;
+ struct usb_cdc_mdlm_detail_desc *detail = NULL;
if (sizeof dev->data < sizeof *info)
return -EDOM;
@@ -229,6 +236,34 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* side link address we were given.
*/
break;
+ case USB_CDC_MDLM_TYPE:
+ if (desc) {
+ dev_dbg(&intf->dev, "extra MDLM descriptor\n");
+ goto bad_desc;
+ }
+
+ desc = (void *)buf;
+
+ if (desc->bLength != sizeof(*desc))
+ goto bad_desc;
+
+ if (memcmp(&desc->bGUID, mbm_guid, 16))
+ goto bad_desc;
+ break;
+ case USB_CDC_MDLM_DETAIL_TYPE:
+ if (detail) {
+ dev_dbg(&intf->dev, "extra MDLM detail descriptor\n");
+ goto bad_desc;
+ }
+
+ detail = (void *)buf;
+
+ if (detail->bGuidDescriptorType == 0) {
+ if (detail->bLength < (sizeof(*detail) + 1))
+ goto bad_desc;
+ } else
+ goto bad_desc;
+ break;
}
next_desc:
len -= buf [0]; /* bLength */
@@ -543,80 +578,10 @@ static const struct usb_device_id products [] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
}, {
- /* Ericsson F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3507g ver. 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw ver 3 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3307 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3307 ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson C3607w */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson C3607w ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
+ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&mbm_info,
+
},
{ }, // END
};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 5dfed9297b22..47634b617107 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -387,10 +387,10 @@ static void dm9601_set_multicast(struct net_device *net)
netdev_mc_count(net) > DM_MAX_MCAST) {
rx_ctl |= 0x04;
} else if (!netdev_mc_empty(net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_list, net) {
- u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index be0cc99e881a..a6227f892d1b 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -834,8 +834,6 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
} else {
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
- /* And tell the kernel when the last transmit started. */
- net->trans_start = jiffies;
}
dev_kfree_skb(skb);
/* we're done */
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c4c334d9770f..46890dc625dc 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -856,7 +856,6 @@ skip:
{
kaweth->stats.tx_packets++;
kaweth->stats.tx_bytes += skb->len;
- net->trans_start = jiffies;
}
spin_unlock_irq(&kaweth->device_lock);
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 9f24e3f871e1..834d8cd3005d 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -453,12 +453,12 @@ static void mcs7830_data_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
}
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 41838773b568..1cd17d274a12 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -808,7 +808,7 @@ static void write_bulk_callback(struct urb *urb)
break;
}
- net->trans_start = jiffies;
+ net->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(net);
}
@@ -909,7 +909,6 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
} else {
pegasus->stats.tx_packets++;
pegasus->stats.tx_bytes += skb->len;
- net->trans_start = jiffies;
}
dev_kfree_skb(skb);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 35b98b1b79e4..753ee6eb7edd 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -445,14 +445,14 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
- netdev_for_each_mc_addr(mc_list, netdev) {
- u32 bitnum = smsc75xx_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 bitnum = smsc75xx_hash(ha->addr);
pdata->multicast_hash_table[bitnum / 32] |=
(1 << (bitnum % 32));
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3135af63d378..12a3c88c5282 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -385,13 +385,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
pdata->mac_cr |= MAC_CR_HPFILT_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- netdev_for_each_mc_addr(mc_list, netdev) {
- u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 bitnum = smsc95xx_hash(ha->addr);
u32 mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
hash_hi |= mask;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7177abc78dc6..a95c73de5824 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1069,12 +1069,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
*/
- if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
- urb->transfer_buffer_length++;
- if (skb_tailroom(skb)) {
- skb->data[skb->len] = 0;
- __skb_put(skb, 1);
- }
+ if (length % dev->maxpacket == 0) {
+ if (!(info->flags & FLAG_SEND_ZLP)) {
+ urb->transfer_buffer_length++;
+ if (skb_tailroom(skb)) {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ }
+ } else
+ urb->transfer_flags |= URB_ZERO_PACKET;
}
spin_lock_irqsave(&dev->txq.lock, flags);
diff --git a/drivers/net/vbus-enet.c b/drivers/net/vbus-enet.c
new file mode 100644
index 000000000000..94b86d482cee
--- /dev/null
+++ b/drivers/net/vbus-enet.c
@@ -0,0 +1,1560 @@
+/*
+ * vbus_enet - A virtualized 802.x network device based on the VBUS interface
+ *
+ * Copyright (C) 2009 Novell, Gregory Haskins <ghaskins@novell.com>
+ *
+ * Derived from the SNULL example from the book "Linux Device Drivers" by
+ * Alessandro Rubini, Jonathan Corbet, and Greg Kroah-Hartman, published
+ * by O'Reilly & Associates.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ioq.h>
+#include <linux/vbus_driver.h>
+
+#include <linux/in6.h>
+#include <asm/checksum.h>
+
+#include <linux/venet.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("virtual-ethernet");
+MODULE_VERSION("1");
+
+static int rx_ringlen = 256;
+module_param(rx_ringlen, int, 0444);
+static int tx_ringlen = 256;
+module_param(tx_ringlen, int, 0444);
+static int sg_enabled = 1;
+module_param(sg_enabled, int, 0444);
+
+#define PDEBUG(_dev, fmt, args...) dev_dbg(&(_dev)->dev, fmt, ## args)
+
+#define SG_DESC_SIZE VSG_DESC_SIZE(MAX_SKB_FRAGS)
+
+struct vbus_enet_queue {
+ struct ioq *queue;
+ struct ioq_notifier notifier;
+ unsigned long count;
+};
+
+struct vbus_enet_priv {
+ spinlock_t lock;
+ struct net_device *dev;
+ struct vbus_device_proxy *vdev;
+ struct napi_struct napi;
+ struct vbus_enet_queue rxq;
+ struct {
+ struct vbus_enet_queue veq;
+ struct tasklet_struct task;
+ struct sk_buff_head outstanding;
+ } tx;
+ bool sg;
+ struct {
+ bool enabled;
+ char *pool;
+ } pmtd; /* pre-mapped transmit descriptors */
+ struct {
+ bool enabled;
+ bool linkstate;
+ bool txc;
+ unsigned long evsize;
+ struct vbus_enet_queue veq;
+ struct tasklet_struct task;
+ char *pool;
+ } evq;
+ struct {
+ bool available;
+ char *pool;
+ struct vbus_enet_queue pageq;
+ } l4ro;
+
+ struct sk_buff *(*import)(struct vbus_enet_priv *priv,
+ struct ioq_ring_desc *desc);
+};
+
+static void vbus_enet_tx_reap(struct vbus_enet_priv *priv);
+
+static struct vbus_enet_priv *
+napi_to_priv(struct napi_struct *napi)
+{
+ return container_of(napi, struct vbus_enet_priv, napi);
+}
+
+static int
+queue_init(struct vbus_enet_priv *priv,
+ struct vbus_enet_queue *q,
+ const char *name,
+ int qid,
+ size_t ringsize,
+ void (*func)(struct ioq_notifier *))
+{
+ struct vbus_device_proxy *dev = priv->vdev;
+ int ret;
+ char _name[64];
+
+ if (name)
+ snprintf(_name, sizeof(_name), "%s-%s", priv->dev->name, name);
+
+ ret = vbus_driver_ioq_alloc(dev, name ? _name : NULL, qid, 0,
+ ringsize, &q->queue);
+ if (ret < 0)
+ panic("ioq_alloc failed: %d\n", ret);
+
+ if (func) {
+ q->notifier.signal = func;
+ q->queue->notifier = &q->notifier;
+ }
+
+ q->count = ringsize;
+
+ return 0;
+}
+
+static int
+devcall(struct vbus_enet_priv *priv, u32 func, void *data, size_t len)
+{
+ struct vbus_device_proxy *dev = priv->vdev;
+
+ return dev->ops->call(dev, func, data, len, 0);
+}
+
+/*
+ * ---------------
+ * rx descriptors
+ * ---------------
+ */
+
+static void
+rxdesc_alloc(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc, size_t len)
+{
+ struct net_device *dev = priv->dev;
+ struct sk_buff *skb;
+
+ len += ETH_HLEN;
+
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
+ BUG_ON(!skb);
+
+ skb_reserve(skb, NET_IP_ALIGN); /* align IP on 16B boundary */
+
+ if (priv->l4ro.available) {
+ /*
+ * We will populate an SG descriptor initially with one
+ * IOV filled with an MTU SKB. If the packet needs to be
+ * larger than MTU, the host will grab pages out of the
+ * page-queue and populate additional IOVs
+ */
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)desc->cookie;
+ struct venet_iov *iov = &vsg->iov[0];
+
+ memset(vsg, 0, SG_DESC_SIZE);
+
+ vsg->cookie = (u64)(unsigned long)skb;
+ vsg->count = 1;
+
+ iov->ptr = (u64)__pa(skb->data);
+ iov->len = len;
+ } else {
+ desc->cookie = (u64)(unsigned long)skb;
+ desc->ptr = cpu_to_le64(__pa(skb->data));
+ desc->len = cpu_to_le64(len); /* total length */
+ }
+
+ desc->valid = 1;
+}
+
+static void
+rx_pageq_refill(struct vbus_enet_priv *priv, gfp_t gfp_mask)
+{
+ struct ioq *ioq = priv->l4ro.pageq.queue;
+ struct ioq_iterator iter;
+ int ret, added = 0;
+
+ if (ioq_full(ioq, ioq_idxtype_inuse))
+ /* nothing to do if the pageq is already fully populated */
+ return;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0); /* will never fail unless seriously broken */
+
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty page
+ */
+ while (!iter.desc->sown) {
+ struct page *page = NULL;
+
+ page = alloc_page(gfp_mask);
+
+ if (!page)
+ break;
+
+ added = 1;
+ iter.desc->cookie = (u64)(unsigned long)page;
+ iter.desc->ptr = cpu_to_le64(__pa(page_address(page)));
+ iter.desc->len = cpu_to_le64(PAGE_SIZE);
+
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ if (added)
+ ioq_signal(ioq, 0);
+}
+
+static void
+rx_setup(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->rxq.queue;
+ struct ioq_iterator iter;
+ int ret;
+ int i = 0;
+
+ /*
+ * We want to iterate on the "valid" index. By default the iterator
+ * will not "autoupdate" which means it will not hypercall the host
+ * with our changes. This is good, because we are really just
+ * initializing stuff here anyway. Note that you can always manually
+ * signal the host with ioq_signal() if the autoupdate feature is not
+ * used.
+ */
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0); /* will never fail unless seriously broken */
+
+ /*
+ * Seek to the tail of the valid index (which should be our first
+ * item, since the queue is brand-new)
+ */
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty buffer and mark it valid
+ */
+ while (!iter.desc->valid) {
+ if (priv->l4ro.available) {
+ size_t offset = (i * SG_DESC_SIZE);
+ void *addr = &priv->l4ro.pool[offset];
+
+ iter.desc->ptr = cpu_to_le64(offset);
+ iter.desc->cookie = (u64)(unsigned long)addr;
+ iter.desc->len = cpu_to_le64(SG_DESC_SIZE);
+ }
+
+ rxdesc_alloc(priv, iter.desc, priv->dev->mtu);
+
+ /*
+ * This push operation will simultaneously advance the
+ * valid-head index and increment our position in the queue
+ * by one.
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+
+ i++;
+ }
+
+ if (priv->l4ro.available)
+ rx_pageq_refill(priv, GFP_KERNEL);
+}
+
+static void
+rx_rxq_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->rxq.queue;
+ struct ioq_iterator iter;
+ int ret;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->valid) {
+ struct sk_buff *skb;
+
+ if (priv->l4ro.available) {
+ struct venet_sg *vsg;
+ int i;
+
+ vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+
+ /* skip i=0, since that is the skb->data IOV */
+ for (i = 1; i < vsg->count; i++) {
+ struct venet_iov *iov = &vsg->iov[i];
+ struct page *page = (struct page *)(unsigned long)iov->ptr;
+
+ put_page(page);
+ }
+
+ skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ } else
+ skb = (struct sk_buff *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ dev_kfree_skb(skb);
+ }
+}
+
+static void
+rx_l4ro_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->l4ro.pageq.queue;
+ struct ioq_iterator iter;
+ int ret;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->sown) {
+ struct page *page = (struct page *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ put_page(page);
+ }
+
+ ioq_put(ioq);
+ kfree(priv->l4ro.pool);
+}
+
+static void
+rx_teardown(struct vbus_enet_priv *priv)
+{
+ rx_rxq_teardown(priv);
+
+ if (priv->l4ro.available)
+ rx_l4ro_teardown(priv);
+}
+
+static int
+tx_setup(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->tx.veq.queue;
+ struct ioq_iterator iter;
+ int i;
+ int ret;
+
+ if (!priv->sg)
+ /*
+ * There is nothing to do for a ring that is not using
+ * scatter-gather
+ */
+ return 0;
+
+ /* pre-allocate our descriptor pool if pmtd is enabled */
+ if (priv->pmtd.enabled) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ size_t poollen = SG_DESC_SIZE * priv->tx.veq.count;
+ char *pool;
+ int shmid;
+
+ /* pmtdquery will return the shm-id to use for the pool */
+ ret = devcall(priv, VENET_FUNC_PMTDQUERY, NULL, 0);
+ BUG_ON(ret < 0);
+
+ shmid = ret;
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ priv->pmtd.pool = pool;
+
+ ret = dev->ops->shm(dev, NULL, shmid, 0, pool, poollen,
+ NULL, NULL, 0);
+ BUG_ON(ret < 0);
+ }
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty SG descriptor
+ */
+ for (i = 0; i < priv->tx.veq.count; i++) {
+ struct venet_sg *vsg;
+
+ if (priv->pmtd.enabled) {
+ size_t offset = (i * SG_DESC_SIZE);
+
+ vsg = (struct venet_sg *)&priv->pmtd.pool[offset];
+ iter.desc->ptr = cpu_to_le64(offset);
+ } else {
+ vsg = kzalloc(SG_DESC_SIZE, GFP_KERNEL);
+ if (!vsg)
+ return -ENOMEM;
+
+ iter.desc->ptr = cpu_to_le64(__pa(vsg));
+ }
+
+ iter.desc->cookie = (u64)(unsigned long)vsg;
+ iter.desc->len = cpu_to_le64(SG_DESC_SIZE);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_next, 0, 0);
+ BUG_ON(ret < 0);
+ }
+
+ return 0;
+}
+
+static void
+tx_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->tx.veq.queue;
+ struct ioq_iterator iter;
+ struct sk_buff *skb;
+ int ret;
+
+ /* forcefully free all outstanding transmissions */
+ while ((skb = __skb_dequeue(&priv->tx.outstanding)))
+ dev_kfree_skb(skb);
+
+ if (!priv->sg)
+ /*
+ * There is nothing else to do for a ring that is not using
+ * scatter-gather
+ */
+ return;
+
+ if (priv->pmtd.enabled) {
+ /*
+ * PMTD mode means we only need to free the pool
+ */
+ kfree(priv->pmtd.pool);
+ return;
+ }
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ /* seek to position 0 */
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->cookie) {
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_seek(&iter, ioq_seek_next, 0, 0);
+ BUG_ON(ret < 0);
+
+ kfree(vsg);
+ }
+}
+
+static void
+evq_teardown(struct vbus_enet_priv *priv)
+{
+ if (!priv->evq.enabled)
+ return;
+
+ ioq_put(priv->evq.veq.queue);
+ kfree(priv->evq.pool);
+}
+
+/*
+ * Open and close
+ */
+
+static int
+vbus_enet_open(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = devcall(priv, VENET_FUNC_LINKUP, NULL, 0);
+ BUG_ON(ret < 0);
+
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static int
+vbus_enet_stop(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ napi_disable(&priv->napi);
+
+ ret = devcall(priv, VENET_FUNC_LINKDOWN, NULL, 0);
+ BUG_ON(ret < 0);
+
+ return 0;
+}
+
+/*
+ * Configuration changes (passed on by ifconfig)
+ */
+static int
+vbus_enet_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr) {
+ dev_warn(&dev->dev, "Can't change I/O address\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* ignore other fields */
+ return 0;
+}
+
+static void
+vbus_enet_schedule_rx(struct vbus_enet_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable further interrupts */
+ ioq_notify_disable(priv->rxq.queue, 0);
+ __napi_schedule(&priv->napi);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int
+vbus_enet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ dev->mtu = new_mtu;
+
+ /*
+ * FLUSHRX will cause the device to flush any outstanding
+ * RX buffers. They will appear to come in as 0 length
+ * packets which we can simply discard and replace with new_mtu
+ * buffers for the future.
+ */
+ ret = devcall(priv, VENET_FUNC_FLUSHRX, NULL, 0);
+ BUG_ON(ret < 0);
+
+ vbus_enet_schedule_rx(priv);
+
+ return 0;
+}
+
+static struct sk_buff *
+vbus_enet_l4ro_import(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc)
+{
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)desc->cookie;
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ int i;
+
+ rx_pageq_refill(priv, GFP_ATOMIC);
+
+ if (!vsg->len)
+ /*
+ * the device may send a zero-length packet when its
+ * flushing references on the ring. We can just drop
+ * these on the floor
+ */
+ goto fail;
+
+ /* advance only by the linear portion in IOV[0] */
+ skb_put(skb, vsg->iov[0].len);
+
+ /* skip i=0, since that is the skb->data IOV */
+ for (i = 1; i < vsg->count; i++) {
+ struct venet_iov *iov = &vsg->iov[i];
+ struct page *page = (struct page *)(unsigned long)iov->ptr;
+ skb_frag_t *f = &sinfo->frags[i-1];
+
+ f->page = page;
+ f->page_offset = 0;
+ f->size = iov->len;
+
+ PDEBUG(priv->dev, "SG: Importing %d byte page[%i]\n",
+ f->size, i);
+
+ skb->data_len += f->size;
+ skb->len += f->size;
+ skb->truesize += f->size;
+ sinfo->nr_frags++;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_NEEDS_CSUM
+ && !skb_partial_csum_set(skb, vsg->csum.start,
+ vsg->csum.offset)) {
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_GSO) {
+ PDEBUG(priv->dev, "L4RO packet detected\n");
+
+ switch (vsg->gso.type) {
+ case VENET_GSO_TYPE_TCPV4:
+ sinfo->gso_type = SKB_GSO_TCPV4;
+ break;
+ case VENET_GSO_TYPE_TCPV6:
+ sinfo->gso_type = SKB_GSO_TCPV6;
+ break;
+ case VENET_GSO_TYPE_UDP:
+ sinfo->gso_type = SKB_GSO_UDP;
+ break;
+ default:
+ PDEBUG(priv->dev, "Illegal L4RO type: %d\n",
+ vsg->gso.type);
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_ECN)
+ sinfo->gso_type |= SKB_GSO_TCP_ECN;
+
+ sinfo->gso_size = vsg->gso.size;
+ if (sinfo->gso_size == 0) {
+ PDEBUG(priv->dev, "Illegal L4RO size: %d\n",
+ vsg->gso.size);
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ /*
+ * Header must be checked, and gso_segs
+ * computed.
+ */
+ sinfo->gso_type |= SKB_GSO_DODGY;
+ sinfo->gso_segs = 0;
+ }
+
+ return skb;
+
+fail:
+ dev_kfree_skb(skb);
+
+ return NULL;
+}
+
+static struct sk_buff *
+vbus_enet_flat_import(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc)
+{
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->cookie;
+
+ if (!desc->len) {
+ /*
+ * the device may send a zero-length packet when its
+ * flushing references on the ring. We can just drop
+ * these on the floor
+ */
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ skb_put(skb, le64_to_cpu(desc->len));
+
+ return skb;
+}
+
+/*
+ * The poll implementation.
+ */
+static int
+vbus_enet_poll(struct napi_struct *napi, int budget)
+{
+ struct vbus_enet_priv *priv = napi_to_priv(napi);
+ int npackets = 0;
+ struct ioq_iterator iter;
+ int ret;
+
+ PDEBUG(priv->dev, "polling...\n");
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(priv->rxq.queue, &iter, ioq_idxtype_inuse,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * We stop if we have met the quota or there are no more packets.
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side
+ */
+ while ((npackets < budget) && (!iter.desc->sown)) {
+ struct sk_buff *skb;
+
+ skb = priv->import(priv, iter.desc);
+ if (skb) {
+ /* Maintain stats */
+ npackets++;
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += skb->len;
+
+ /* Pass the buffer up to the stack */
+ skb->dev = priv->dev;
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ netif_receive_skb(skb);
+
+ mb();
+ }
+
+ /* Grab a new buffer to put in the ring */
+ rxdesc_alloc(priv, iter.desc, priv->dev->mtu);
+
+ /* Advance the in-use tail */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ PDEBUG(priv->dev, "%d packets received\n", npackets);
+
+ /*
+ * If we processed all packets, we're done; tell the kernel and
+ * reenable ints
+ */
+ if (ioq_empty(priv->rxq.queue, ioq_idxtype_inuse)) {
+ napi_complete(napi);
+ ioq_notify_enable(priv->rxq.queue, 0);
+ ret = 0;
+ } else
+ /* We couldn't process everything. */
+ ret = 1;
+
+ return ret;
+}
+
+/*
+ * Transmit a packet (called by the kernel)
+ */
+static int
+vbus_enet_tx_start(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ struct ioq_iterator iter;
+ int ret;
+ unsigned long flags;
+
+ PDEBUG(priv->dev, "sending %d bytes\n", skb->len);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ /*
+ * We must flow-control the kernel by disabling the
+ * queue
+ */
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_stop_queue(dev);
+ dev_err(&priv->dev->dev, "tx on full queue bug\n");
+ return 1;
+ }
+
+ /*
+ * We want to iterate on the tail of both the "inuse" and "valid" index
+ * so we specify the "both" index
+ */
+ ret = ioq_iter_init(priv->tx.veq.queue, &iter, ioq_idxtype_both,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+ BUG_ON(iter.desc->sown);
+
+ if (priv->sg) {
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+ struct scatterlist sgl[MAX_SKB_FRAGS+1];
+ struct scatterlist *sg;
+ int count, maxcount = ARRAY_SIZE(sgl);
+
+ sg_init_table(sgl, maxcount);
+
+ memset(vsg, 0, sizeof(*vsg));
+
+ vsg->cookie = (u64)(unsigned long)skb;
+ vsg->len = skb->len;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vsg->flags |= VENET_SG_FLAG_NEEDS_CSUM;
+ vsg->csum.start = skb->csum_start - skb_headroom(skb);
+ vsg->csum.offset = skb->csum_offset;
+ }
+
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ vsg->flags |= VENET_SG_FLAG_GSO;
+
+ vsg->gso.hdrlen = skb_headlen(skb);
+ vsg->gso.size = sinfo->gso_size;
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ vsg->gso.type = VENET_GSO_TYPE_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vsg->gso.type = VENET_GSO_TYPE_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vsg->gso.type = VENET_GSO_TYPE_UDP;
+ else
+ panic("Virtual-Ethernet: unknown GSO type " \
+ "0x%x\n", sinfo->gso_type);
+
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ vsg->flags |= VENET_SG_FLAG_ECN;
+ }
+
+ count = skb_to_sgvec(skb, sgl, 0, skb->len);
+
+ BUG_ON(count > maxcount);
+
+ for (sg = &sgl[0]; sg; sg = sg_next(sg)) {
+ struct venet_iov *iov = &vsg->iov[vsg->count++];
+
+ iov->len = sg->length;
+ iov->ptr = (u64)sg_phys(sg);
+ }
+
+ iter.desc->len = cpu_to_le64(VSG_DESC_SIZE(vsg->count));
+
+ } else {
+ /*
+ * non scatter-gather mode: simply put the skb right onto the
+ * ring.
+ */
+ iter.desc->cookie = (u64)(unsigned long)skb;
+ iter.desc->len = cpu_to_le64(skb->len);
+ iter.desc->ptr = cpu_to_le64(__pa(skb->data));
+ }
+
+ iter.desc->valid = 1;
+
+ priv->dev->stats.tx_packets++;
+ priv->dev->stats.tx_bytes += skb->len;
+
+ skb_queue_tail(&priv->tx.outstanding, skb);
+
+ /*
+ * This advances both indexes together implicitly, and then
+ * signals the south side to consume the packet
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+
+ dev->trans_start = jiffies; /* save the timestamp */
+
+ if (ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ /*
+ * If the queue is congested, we must flow-control the kernel
+ */
+ PDEBUG(priv->dev, "backpressure tx queue\n");
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/* assumes priv->lock held */
+static void
+vbus_enet_skb_complete(struct vbus_enet_priv *priv, struct sk_buff *skb)
+{
+ PDEBUG(priv->dev, "completed sending %d bytes\n",
+ skb->len);
+
+ skb_unlink(skb, &priv->tx.outstanding);
+ dev_kfree_skb(skb);
+}
+
+/*
+ * reclaim any outstanding completed tx packets
+ *
+ * assumes priv->lock held
+ */
+static struct sk_buff *
+vbus_enet_tx_reap_one(struct vbus_enet_priv *priv)
+{
+ struct sk_buff *skb = NULL;
+ struct ioq_iterator iter;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /*
+ * We want to iterate on the head of the valid index, but we
+ * do not want the iter_pop (below) to flip the ownership, so
+ * we set the NOFLIPOWNER option
+ */
+ ret = ioq_iter_init(priv->tx.veq.queue, &iter, ioq_idxtype_valid,
+ IOQ_ITER_NOFLIPOWNER);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ if (iter.desc->valid && !iter.desc->sown) {
+
+ if (priv->sg) {
+ struct venet_sg *vsg;
+
+ vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+ skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ } else
+ skb = (struct sk_buff *)(unsigned long)iter.desc->cookie;
+
+ /* Reset the descriptor */
+ iter.desc->valid = 0;
+
+ /* Advance the valid-index head */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ /*
+ * If we were previously stopped due to flow control, restart the
+ * processing
+ */
+ if (netif_queue_stopped(priv->dev)
+ && !ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ PDEBUG(priv->dev, "re-enabling tx queue\n");
+ netif_wake_queue(priv->dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return skb;
+}
+
+static void
+vbus_enet_tx_reap(struct vbus_enet_priv *priv)
+{
+ struct sk_buff *skb;
+
+ while ((skb = vbus_enet_tx_reap_one(priv))) {
+ if (!priv->evq.txc)
+ /*
+ * We are responsible for freeing the packet upon
+ * reap if TXC is not enabled
+ */
+ vbus_enet_skb_complete(priv, skb);
+ }
+}
+
+static void
+vbus_enet_timeout(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+
+ dev_dbg(&dev->dev, "Transmit timeout\n");
+
+ vbus_enet_tx_reap(priv);
+}
+
+static void
+rx_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+ struct net_device *dev;
+
+ priv = container_of(notifier, struct vbus_enet_priv, rxq.notifier);
+ dev = priv->dev;
+
+ if (!ioq_empty(priv->rxq.queue, ioq_idxtype_inuse))
+ vbus_enet_schedule_rx(priv);
+}
+
+static void
+deferred_tx_isr(unsigned long data)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)data;
+
+ PDEBUG(priv->dev, "deferred_tx_isr\n");
+
+ vbus_enet_tx_reap(priv);
+
+ ioq_notify_enable(priv->tx.veq.queue, 0);
+}
+
+static void
+tx_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+
+ priv = container_of(notifier, struct vbus_enet_priv, tx.veq.notifier);
+
+ PDEBUG(priv->dev, "tx_isr\n");
+
+ ioq_notify_disable(priv->tx.veq.queue, 0);
+ tasklet_schedule(&priv->tx.task);
+}
+
+static void
+evq_linkstate_event(struct vbus_enet_priv *priv,
+ struct venet_event_header *header)
+{
+ struct venet_event_linkstate *event =
+ (struct venet_event_linkstate *)header;
+
+ switch (event->state) {
+ case 0:
+ netif_carrier_off(priv->dev);
+ break;
+ case 1:
+ netif_carrier_on(priv->dev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+evq_txc_event(struct vbus_enet_priv *priv,
+ struct venet_event_header *header)
+{
+ struct venet_event_txc *event =
+ (struct venet_event_txc *)header;
+
+ vbus_enet_tx_reap(priv);
+
+ vbus_enet_skb_complete(priv, (struct sk_buff *)(unsigned long)event->cookie);
+}
+
+static void
+deferred_evq_isr(unsigned long data)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)data;
+ int nevents = 0;
+ struct ioq_iterator iter;
+ int ret;
+
+ PDEBUG(priv->dev, "evq: polling...\n");
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(priv->evq.veq.queue, &iter, ioq_idxtype_inuse,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side
+ */
+ while (!iter.desc->sown) {
+ struct venet_event_header *header;
+
+ header = (struct venet_event_header *)(unsigned long)iter.desc->cookie;
+
+ switch (header->id) {
+ case VENET_EVENT_LINKSTATE:
+ evq_linkstate_event(priv, header);
+ break;
+ case VENET_EVENT_TXC:
+ evq_txc_event(priv, header);
+ break;
+ default:
+ panic("venet: unexpected event id:%d of size %d\n",
+ header->id, header->size);
+ break;
+ }
+
+ memset((void *)(unsigned long)iter.desc->cookie, 0, priv->evq.evsize);
+
+ /* Advance the in-use tail */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ nevents++;
+ }
+
+ PDEBUG(priv->dev, "%d events received\n", nevents);
+
+ ioq_notify_enable(priv->evq.veq.queue, 0);
+}
+
+static void
+evq_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+
+ priv = container_of(notifier, struct vbus_enet_priv, evq.veq.notifier);
+
+ PDEBUG(priv->dev, "evq_isr\n");
+
+ ioq_notify_disable(priv->evq.veq.queue, 0);
+ tasklet_schedule(&priv->evq.task);
+}
+
+static int
+vbus_enet_sg_negcap(struct vbus_enet_priv *priv)
+{
+ struct net_device *dev = priv->dev;
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ if (sg_enabled) {
+ caps.gid = VENET_CAP_GROUP_SG;
+ caps.bits |= (VENET_CAP_SG|VENET_CAP_TSO4|VENET_CAP_TSO6
+ |VENET_CAP_ECN|VENET_CAP_PMTD);
+ /* note: exclude UFO for now due to stack bug */
+ }
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0)
+ return ret;
+
+ if (caps.bits & VENET_CAP_SG) {
+ priv->sg = true;
+
+ dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM|NETIF_F_FRAGLIST;
+
+ if (caps.bits & VENET_CAP_TSO4)
+ dev->features |= NETIF_F_TSO;
+ if (caps.bits & VENET_CAP_UFO)
+ dev->features |= NETIF_F_UFO;
+ if (caps.bits & VENET_CAP_TSO6)
+ dev->features |= NETIF_F_TSO6;
+ if (caps.bits & VENET_CAP_ECN)
+ dev->features |= NETIF_F_TSO_ECN;
+
+ if (caps.bits & VENET_CAP_PMTD)
+ priv->pmtd.enabled = true;
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_evq_negcap(struct vbus_enet_priv *priv, unsigned long count)
+{
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ caps.gid = VENET_CAP_GROUP_EVENTQ;
+ caps.bits |= VENET_CAP_EVQ_LINKSTATE;
+ caps.bits |= VENET_CAP_EVQ_TXC;
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0)
+ return ret;
+
+ if (caps.bits) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ struct venet_eventq_query query;
+ size_t poollen;
+ struct ioq_iterator iter;
+ char *pool;
+ int i;
+
+ priv->evq.enabled = true;
+
+ if (caps.bits & VENET_CAP_EVQ_LINKSTATE) {
+ /*
+ * We will assume there is no carrier until we get
+ * an event telling us otherwise
+ */
+ netif_carrier_off(priv->dev);
+ priv->evq.linkstate = true;
+ }
+
+ if (caps.bits & VENET_CAP_EVQ_TXC)
+ priv->evq.txc = true;
+
+ memset(&query, 0, sizeof(query));
+
+ ret = devcall(priv, VENET_FUNC_EVQQUERY, &query, sizeof(query));
+ if (ret < 0)
+ return ret;
+
+ priv->evq.evsize = query.evsize;
+ poollen = query.evsize * count;
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ priv->evq.pool = pool;
+
+ ret = dev->ops->shm(dev, NULL, query.dpid, 0,
+ pool, poollen, NULL, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ queue_init(priv, &priv->evq.veq, "evq",
+ query.qid, count, evq_isr);
+
+ ret = ioq_iter_init(priv->evq.veq.queue,
+ &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /* Now populate each descriptor with an empty event */
+ for (i = 0; i < count; i++) {
+ size_t offset = (i * query.evsize);
+ void *addr = &priv->evq.pool[offset];
+
+ iter.desc->ptr = cpu_to_le64(offset);
+ iter.desc->cookie = (u64)(unsigned long)addr;
+ iter.desc->len = cpu_to_le64(query.evsize);
+
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ /* Finally, enable interrupts */
+ tasklet_init(&priv->evq.task, deferred_evq_isr,
+ (unsigned long)priv);
+ ioq_notify_enable(priv->evq.veq.queue, 0);
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_l4ro_negcap(struct vbus_enet_priv *priv, unsigned long count)
+{
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ caps.gid = VENET_CAP_GROUP_L4RO;
+ caps.bits |= (VENET_CAP_SG|VENET_CAP_TSO4|VENET_CAP_TSO6
+ |VENET_CAP_ECN);
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0) {
+ printk(KERN_ERR "Error negotiating L4RO: %d\n", ret);
+ return ret;
+ }
+
+ if (caps.bits & VENET_CAP_SG) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ size_t poollen = SG_DESC_SIZE * count;
+ struct venet_l4ro_query query;
+ char *pool;
+
+ memset(&query, 0, sizeof(query));
+
+ ret = devcall(priv, VENET_FUNC_L4ROQUERY, &query, sizeof(query));
+ if (ret < 0) {
+ printk(KERN_ERR "Error querying L4RO: %d\n", ret);
+ return ret;
+ }
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ /*
+ * pre-mapped descriptor pool
+ */
+ ret = dev->ops->shm(dev, NULL, query.dpid, 0,
+ pool, poollen, NULL, NULL, 0);
+ if (ret < 0) {
+ printk(KERN_ERR "Error registering L4RO pool: %d\n",
+ ret);
+ kfree(pool);
+ return ret;
+ }
+
+ /*
+ * page-queue: contains a ring of arbitrary pages for
+ * consumption by the host for when the SG::IOV count exceeds
+ * one MTU frame. All we need to do is keep it populated
+ * with free pages.
+ */
+ queue_init(priv, &priv->l4ro.pageq, "pageq", query.pqid,
+ count, NULL);
+
+ priv->l4ro.pool = pool;
+ priv->l4ro.available = true;
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_negcap(struct vbus_enet_priv *priv)
+{
+ int ret;
+
+ ret = vbus_enet_sg_negcap(priv);
+ if (ret < 0)
+ return ret;
+
+ ret = vbus_enet_evq_negcap(priv, tx_ringlen);
+ if (ret < 0)
+ return ret;
+
+ ret = vbus_enet_l4ro_negcap(priv, rx_ringlen);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int vbus_enet_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+
+ if (data && !priv->sg)
+ return -ENOSYS;
+
+ return ethtool_op_set_tx_hw_csum(dev, data);
+}
+
+static struct ethtool_ops vbus_enet_ethtool_ops = {
+ .set_tx_csum = vbus_enet_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .set_tso = ethtool_op_set_tso,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops vbus_enet_netdev_ops = {
+ .ndo_open = vbus_enet_open,
+ .ndo_stop = vbus_enet_stop,
+ .ndo_set_config = vbus_enet_config,
+ .ndo_start_xmit = vbus_enet_tx_start,
+ .ndo_change_mtu = vbus_enet_change_mtu,
+ .ndo_tx_timeout = vbus_enet_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * This is called whenever a new vbus_device_proxy is added to the vbus
+ * with the matching VENET_ID
+ */
+static int
+vbus_enet_probe(struct vbus_device_proxy *vdev)
+{
+ struct net_device *dev;
+ struct vbus_enet_priv *priv;
+ int ret;
+
+ printk(KERN_INFO "VENET: Found new device at %lld\n", vdev->id);
+
+ ret = vdev->ops->open(vdev, VENET_VERSION, 0);
+ if (ret < 0)
+ return ret;
+
+ dev = alloc_etherdev(sizeof(struct vbus_enet_priv));
+ if (!dev)
+ return -ENOMEM;
+
+ /*
+ * establish our device-name early so we can incorporate it into
+ * the signal-path names, etc
+ */
+ rtnl_lock();
+
+ ret = dev_alloc_name(dev, dev->name);
+ if (ret < 0)
+ goto out_free;
+
+ priv = netdev_priv(dev);
+
+ spin_lock_init(&priv->lock);
+ priv->dev = dev;
+ priv->vdev = vdev;
+
+ ret = vbus_enet_negcap(priv);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: Error negotiating capabilities for " \
+ "%lld\n",
+ priv->vdev->id);
+ goto out_free;
+ }
+
+ if (priv->l4ro.available)
+ priv->import = &vbus_enet_l4ro_import;
+ else
+ priv->import = &vbus_enet_flat_import;
+
+ skb_queue_head_init(&priv->tx.outstanding);
+
+ queue_init(priv, &priv->rxq, "rx", VENET_QUEUE_RX, rx_ringlen,
+ rx_isr);
+ queue_init(priv, &priv->tx.veq, "tx", VENET_QUEUE_TX, tx_ringlen,
+ tx_isr);
+
+ rx_setup(priv);
+ tx_setup(priv);
+
+ ioq_notify_enable(priv->rxq.queue, 0); /* enable rx interrupts */
+
+ if (!priv->evq.txc) {
+ /*
+ * If the TXC feature is present, we will recieve our
+ * tx-complete notification via the event-channel. Therefore,
+ * we only enable txq interrupts if the TXC feature is not
+ * present.
+ */
+ tasklet_init(&priv->tx.task, deferred_tx_isr,
+ (unsigned long)priv);
+ ioq_notify_enable(priv->tx.veq.queue, 0);
+ }
+
+ dev->netdev_ops = &vbus_enet_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ;
+ SET_ETHTOOL_OPS(dev, &vbus_enet_ethtool_ops);
+ SET_NETDEV_DEV(dev, &vdev->dev);
+
+ netif_napi_add(dev, &priv->napi, vbus_enet_poll, 128);
+
+ ret = devcall(priv, VENET_FUNC_MACQUERY, priv->dev->dev_addr, ETH_ALEN);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: Error obtaining MAC address for " \
+ "%lld\n",
+ priv->vdev->id);
+ goto out_free;
+ }
+
+ dev->features |= NETIF_F_HIGHDMA;
+
+ ret = register_netdevice(dev);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: error %i registering device \"%s\"\n",
+ ret, dev->name);
+ goto out_free;
+ }
+
+ rtnl_unlock();
+
+ vdev->priv = priv;
+
+ return 0;
+
+ out_free:
+ rtnl_unlock();
+
+ free_netdev(dev);
+
+ return ret;
+}
+
+static int
+vbus_enet_remove(struct vbus_device_proxy *vdev)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)vdev->priv;
+ struct vbus_device_proxy *dev = priv->vdev;
+
+ unregister_netdev(priv->dev);
+ napi_disable(&priv->napi);
+
+ rx_teardown(priv);
+ ioq_put(priv->rxq.queue);
+
+ tx_teardown(priv);
+ ioq_put(priv->tx.veq.queue);
+
+ if (priv->evq.enabled)
+ evq_teardown(priv);
+
+ dev->ops->close(dev, 0);
+
+ free_netdev(priv->dev);
+
+ return 0;
+}
+
+/*
+ * Finally, the module stuff
+ */
+
+static struct vbus_driver_ops vbus_enet_driver_ops = {
+ .probe = vbus_enet_probe,
+ .remove = vbus_enet_remove,
+};
+
+static struct vbus_driver vbus_enet_driver = {
+ .type = VENET_TYPE,
+ .owner = THIS_MODULE,
+ .ops = &vbus_enet_driver_ops,
+};
+
+static __init int
+vbus_enet_init_module(void)
+{
+ printk(KERN_INFO "Virtual Ethernet: Copyright (C) 2009 Novell, Gregory Haskins\n");
+ printk(KERN_DEBUG "VENET: Using %d/%d queue depth\n",
+ rx_ringlen, tx_ringlen);
+ return vbus_driver_register(&vbus_enet_driver);
+}
+
+static __exit void
+vbus_enet_cleanup(void)
+{
+ vbus_driver_unregister(&vbus_enet_driver);
+}
+
+module_init(vbus_enet_init_module);
+module_exit(vbus_enet_cleanup);
+
+VBUS_DRIVER_AUTOPROBE(VENET_TYPE);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f9f0730b53d5..5ec542dd5b50 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -187,7 +187,6 @@ tx_drop:
return NETDEV_TX_OK;
rx_drop:
- kfree_skb(skb);
rcv_stats->rx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 388751aa66e0..4930f9dbc493 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1209,7 +1209,7 @@ static void rhine_reset_task(struct work_struct *work)
spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1294,8 +1294,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&rp->lock, flags);
if (debug > 4) {
@@ -1703,11 +1701,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
rx_mode = 0x0C;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index bc278d4ee89d..42dffd3e5795 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -719,30 +719,30 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
u32 status = 0;
u16 ANAR;
- if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
+ if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
status |= VELOCITY_LINK_FAIL;
- if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
- else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
+ else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
status |= (VELOCITY_SPEED_1000);
else {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if (ANAR & ANAR_TXFD)
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if (ANAR & ADVERTISE_100FULL)
status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
- else if (ANAR & ANAR_TX)
+ else if (ANAR & ADVERTISE_100HALF)
status |= VELOCITY_SPEED_100;
- else if (ANAR & ANAR_10FD)
+ else if (ANAR & ADVERTISE_10FULL)
status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
else
status |= (VELOCITY_SPEED_10);
}
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
@@ -801,23 +801,23 @@ static void set_mii_flow_control(struct velocity_info *vptr)
/*Enable or Disable PAUSE in ANAR */
switch (vptr->options.flow_cntl) {
case FLOW_CNTL_TX:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_TX_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_DISABLE:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
default:
break;
@@ -832,10 +832,10 @@ static void set_mii_flow_control(struct velocity_info *vptr)
*/
static void mii_set_auto_on(struct velocity_info *vptr)
{
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
else
- MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
}
static u32 check_connection_type(struct mac_regs __iomem *regs)
@@ -860,11 +860,11 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
else
status |= VELOCITY_SPEED_100;
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
@@ -905,7 +905,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
*/
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
/*
* If connection type is AUTO
@@ -915,9 +915,9 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
/* clear force MAC mode bit */
BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
/* set duplex mode of MAC according to duplex mode of MII */
- MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
- MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
@@ -952,31 +952,31 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
}
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
else
BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
- /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
- velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
- ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
+ /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
+ velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
+ ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
if (mii_status & VELOCITY_SPEED_100) {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_TXFD;
+ ANAR |= ADVERTISE_100FULL;
else
- ANAR |= ANAR_TX;
+ ANAR |= ADVERTISE_100HALF;
} else {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_10FD;
+ ANAR |= ADVERTISE_10FULL;
else
- ANAR |= ANAR_10;
+ ANAR |= ADVERTISE_10HALF;
}
- velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
+ velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
- /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
+ /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
}
/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
@@ -1126,7 +1126,7 @@ static void velocity_set_multi(struct net_device *dev)
struct mac_regs __iomem *regs = vptr->mac_regs;
u8 rx_mode;
int i;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writel(0xffffffff, &regs->MARCAM[0]);
@@ -1142,8 +1142,8 @@ static void velocity_set_multi(struct net_device *dev)
mac_get_cam_mask(regs, vptr->mCAMmask);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- mac_set_cam(regs, i + offset, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_set_cam(regs, i + offset, ha->addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
i++;
}
@@ -1178,36 +1178,36 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/*
* Reset to hardware default
*/
- MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue.
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
/*
* Turn on Link/Activity LED enable bit for CIS8201
*/
- MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
+ MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
break;
case PHYID_VT3216_32BIT:
case PHYID_VT3216_64BIT:
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
break;
case PHYID_MARVELL_1000:
@@ -1219,15 +1219,15 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
break;
default:
;
}
- velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
- if (BMCR & BMCR_ISO) {
- BMCR &= ~BMCR_ISO;
- velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
+ velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
+ if (BMCR & BMCR_ISOLATE) {
+ BMCR &= ~BMCR_ISOLATE;
+ velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
}
}
@@ -2606,7 +2606,6 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&vptr->lock, flags);
out:
return NETDEV_TX_OK;
@@ -2953,13 +2952,13 @@ static int velocity_set_wol(struct velocity_info *vptr)
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
}
if (vptr->mii_status & VELOCITY_SPEED_1000)
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index ef4a0f64ba16..c38191179fae 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1240,86 +1240,16 @@ struct velocity_context {
u32 pattern[8];
};
-
-/*
- * MII registers.
- */
-
-
/*
* Registers in the MII (offset unit is WORD)
*/
-#define MII_REG_BMCR 0x00 // physical address
-#define MII_REG_BMSR 0x01 //
-#define MII_REG_PHYID1 0x02 // OUI
-#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID
-#define MII_REG_ANAR 0x04 //
-#define MII_REG_ANLPAR 0x05 //
-#define MII_REG_G1000CR 0x09 //
-#define MII_REG_G1000SR 0x0A //
-#define MII_REG_MODCFG 0x10 //
-#define MII_REG_TCSR 0x16 //
-#define MII_REG_PLED 0x1B //
-// NS, MYSON only
-#define MII_REG_PCR 0x17 //
-// ESI only
-#define MII_REG_PCSR 0x17 //
-#define MII_REG_AUXCR 0x1C //
-
// Marvell 88E1000/88E1000S
#define MII_REG_PSCR 0x10 // PHY specific control register
//
-// Bits in the BMCR register
-//
-#define BMCR_RESET 0x8000 //
-#define BMCR_LBK 0x4000 //
-#define BMCR_SPEED100 0x2000 //
-#define BMCR_AUTO 0x1000 //
-#define BMCR_PD 0x0800 //
-#define BMCR_ISO 0x0400 //
-#define BMCR_REAUTO 0x0200 //
-#define BMCR_FDX 0x0100 //
-#define BMCR_SPEED1G 0x0040 //
-//
-// Bits in the BMSR register
-//
-#define BMSR_AUTOCM 0x0020 //
-#define BMSR_LNK 0x0004 //
-
-//
-// Bits in the ANAR register
-//
-#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support
-#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support
-#define ANAR_T4 0x0200 //
-#define ANAR_TXFD 0x0100 //
-#define ANAR_TX 0x0080 //
-#define ANAR_10FD 0x0040 //
-#define ANAR_10 0x0020 //
-//
-// Bits in the ANLPAR register
-//
-#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support
-#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support
-#define ANLPAR_T4 0x0200 //
-#define ANLPAR_TXFD 0x0100 //
-#define ANLPAR_TX 0x0080 //
-#define ANLPAR_10FD 0x0040 //
-#define ANLPAR_10 0x0020 //
-
-//
-// Bits in the G1000CR register
-//
-#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable
-#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable
-
-//
-// Bits in the G1000SR register
+// Bits in the Silicon revision register
//
-#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable
-#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable
#define TCSR_ECHODIS 0x2000 //
#define AUXCR_MDPPS 0x0004 //
@@ -1338,7 +1268,6 @@ struct velocity_context {
#define PHYID_REV_ID_MASK 0x0000000FUL
-#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK)
#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
#define MII_REG_BITS_ON(x,i,p) do {\
@@ -1362,8 +1291,8 @@ struct velocity_context {
#define MII_GET_PHY_ID(p) ({\
u32 id;\
- velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\
- velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\
+ velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\
+ velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\
(id);})
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0577dd1a42d..890d45e90b25 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,8 +40,7 @@ module_param(gso, bool, 0444);
#define VIRTNET_SEND_COMMAND_SG_MAX 2
-struct virtnet_info
-{
+struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *rvq, *svq, *cvq;
struct net_device *dev;
@@ -62,6 +61,10 @@ struct virtnet_info
/* Chain pages by the private ptr. */
struct page *pages;
+
+ /* fragments + linear part + virtio header */
+ struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
+ struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
};
struct skb_vnet_hdr {
@@ -119,7 +122,7 @@ static void skb_xmit_done(struct virtqueue *svq)
struct virtnet_info *vi = svq->vdev->priv;
/* Suppress further interrupts. */
- svq->vq_ops->disable_cb(svq);
+ virtqueue_disable_cb(svq);
/* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
@@ -207,7 +210,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
return -EINVAL;
}
- page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ page = virtqueue_get_buf(vi->rvq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->name, hdr->mhdr.num_buffers);
@@ -324,10 +327,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
- struct scatterlist sg[2];
int err;
- sg_init_table(sg, 2);
skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
if (unlikely(!skb))
return -ENOMEM;
@@ -335,11 +336,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
- skb_to_sgvec(skb, sg + 1, 0, skb->len);
+ skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+ err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
if (err < 0)
dev_kfree_skb(skb);
@@ -348,13 +349,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
{
- struct scatterlist sg[MAX_SKB_FRAGS + 2];
struct page *first, *list = NULL;
char *p;
int i, err, offset;
- sg_init_table(sg, MAX_SKB_FRAGS + 2);
- /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
+ /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
first = get_a_page(vi, gfp);
if (!first) {
@@ -362,7 +361,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
give_pages(vi, list);
return -ENOMEM;
}
- sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
+ sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
/* chain new page in list head to match sg */
first->private = (unsigned long)list;
@@ -376,18 +375,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
}
p = page_address(first);
- /* sg[0], sg[1] share the same page */
- /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
- sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
+ /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
+ /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
+ sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
- /* sg[1] for data packet, from offset */
+ /* vi->rx_sg[1] for data packet, from offset */
offset = sizeof(struct padded_vnet_hdr);
- sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
+ sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
/* chain first in list head */
first->private = (unsigned long)list;
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
- first);
+ err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
+ first, gfp);
if (err < 0)
give_pages(vi, first);
@@ -397,16 +396,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
struct page *page;
- struct scatterlist sg;
int err;
page = get_a_page(vi, gfp);
if (!page)
return -ENOMEM;
- sg_init_one(&sg, page_address(page), PAGE_SIZE);
+ sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+ err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
if (err < 0)
give_pages(vi, page);
@@ -435,7 +433,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
- vi->rvq->vq_ops->kick(vi->rvq);
+ virtqueue_kick(vi->rvq);
return !oom;
}
@@ -444,7 +442,7 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
/* Schedule NAPI, Suppress further interrupts if successful. */
if (napi_schedule_prep(&vi->napi)) {
- rvq->vq_ops->disable_cb(rvq);
+ virtqueue_disable_cb(rvq);
__napi_schedule(&vi->napi);
}
}
@@ -473,7 +471,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
again:
while (received < budget &&
- (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+ (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
receive_buf(vi->dev, buf, len);
--vi->num;
received++;
@@ -487,9 +485,9 @@ again:
/* Out of packets? */
if (received < budget) {
napi_complete(napi);
- if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
+ if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
napi_schedule_prep(napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->rvq);
__napi_schedule(napi);
goto again;
}
@@ -503,7 +501,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
struct sk_buff *skb;
unsigned int len, tot_sgs = 0;
- while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
+ while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
@@ -515,12 +513,9 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
- struct scatterlist sg[2+MAX_SKB_FRAGS];
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
- sg_init_table(sg, 2+MAX_SKB_FRAGS);
-
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -554,12 +549,12 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
+ sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
- hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
+ hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
+ return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, 0, skb);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -578,14 +573,14 @@ again:
if (unlikely(capacity < 0)) {
netif_stop_queue(dev);
dev_warn(&dev->dev, "Unexpected full queue\n");
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
- vi->svq->vq_ops->disable_cb(vi->svq);
+ if (unlikely(!virtqueue_enable_cb(vi->svq))) {
+ virtqueue_disable_cb(vi->svq);
netif_start_queue(dev);
goto again;
}
return NETDEV_TX_BUSY;
}
- vi->svq->vq_ops->kick(vi->svq);
+ virtqueue_kick(vi->svq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -595,12 +590,12 @@ again:
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ if (unlikely(!virtqueue_enable_cb(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
netif_start_queue(dev);
- vi->svq->vq_ops->disable_cb(vi->svq);
+ virtqueue_disable_cb(vi->svq);
}
}
}
@@ -645,7 +640,7 @@ static int virtnet_open(struct net_device *dev)
* now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->rvq);
__napi_schedule(&vi->napi);
}
return 0;
@@ -682,15 +677,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
- vi->cvq->vq_ops->kick(vi->cvq);
+ virtqueue_kick(vi->cvq);
/*
* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
- while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
+ while (!virtqueue_get_buf(vi->cvq, &tmp))
cpu_relax();
return status == VIRTIO_NET_OK;
@@ -722,7 +717,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct scatterlist sg[2];
u8 promisc, allmulti;
struct virtio_net_ctrl_mac *mac_data;
- struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
int uc_count;
int mc_count;
@@ -779,8 +773,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
mac_data->entries = mc_count;
i = 0;
- netdev_for_each_mc_addr(addr, dev)
- memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -942,6 +936,8 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
vi->pages = NULL;
INIT_DELAYED_WORK(&vi->refill, refill_work);
+ sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
+ sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -1006,13 +1002,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
while (1) {
- buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+ buf = virtqueue_detach_unused_buf(vi->svq);
if (!buf)
break;
dev_kfree_skb(buf);
}
while (1) {
- buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+ buf = virtqueue_detach_unused_buf(vi->rvq);
if (!buf)
break;
if (vi->mergeable_rx_bufs || vi->big_packets)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index cff3485d9673..39462321f5fb 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -992,7 +992,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
tq->tx_ring.next2fill);
}
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1174,7 +1173,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
netif_receive_skb(skb);
}
- adapter->netdev->last_rx = jiffies;
ctx->skb = NULL;
}
@@ -1675,11 +1673,11 @@ vmxnet3_copy_mc(struct net_device *netdev)
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
int i = 0;
- netdev_for_each_mc_addr(mc, netdev)
- memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr,
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(buf + i++ * ETH_ALEN, ha->addr,
ETH_ALEN);
}
}
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index a21a25d218b6..a5fc8166c01d 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -357,8 +357,10 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
switch (host_type) {
case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ if (func_id == 0) {
+ access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ }
break;
case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -633,8 +635,10 @@ vxge_hw_device_initialize(
__vxge_hw_device_pci_e_init(hldev);
status = __vxge_hw_device_reg_addr_get(hldev);
- if (status != VXGE_HW_OK)
+ if (status != VXGE_HW_OK) {
+ vfree(hldev);
goto exit;
+ }
__vxge_hw_device_id_get(hldev);
__vxge_hw_device_host_info_get(hldev);
@@ -1218,14 +1222,13 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
}
/*
- * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
+ * __vxge_hw_ring_replenish - Initial replenish of RxDs
* This function replenishes the RxDs from reserve array to work array
*/
enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
void *rxd;
- int i = 0;
struct __vxge_hw_channel *channel;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -1246,11 +1249,6 @@ vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
}
vxge_hw_ring_rxd_post(ring, rxd);
- if (min_flag) {
- i++;
- if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
- break;
- }
}
status = VXGE_HW_OK;
exit:
@@ -1355,7 +1353,7 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
* Currently we don't have a case when the 1) is done without the 2).
*/
if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring, 1);
+ status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK) {
__vxge_hw_ring_delete(vp);
goto exit;
@@ -1417,7 +1415,7 @@ enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
goto exit;
if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring, 1);
+ status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK)
goto exit;
}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 13f5416307f8..4ae2625d4d8f 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -765,10 +765,18 @@ struct vxge_hw_device_hw_info {
#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
#define VXGE_HW_VH_NORMAL_FUNCTION 7
u64 function_mode;
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0
-#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1
+#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
#define VXGE_HW_FUNCTION_MODE_SRIOV 2
#define VXGE_HW_FUNCTION_MODE_MRIOV 3
+#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
+#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
+#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
+#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
+
u32 func_id;
u64 vpath_mask;
struct vxge_hw_device_version fw_version;
@@ -1915,20 +1923,32 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
gfp_t flags;
void *vaddr;
unsigned long misaligned = 0;
+ int realloc_flag = 0;
*p_dma_acch = *p_dmah = NULL;
if (in_interrupt())
flags = GFP_ATOMIC | GFP_DMA;
else
flags = GFP_KERNEL | GFP_DMA;
-
- size += VXGE_CACHE_LINE_SIZE;
-
+realloc:
vaddr = kmalloc((size), flags);
if (vaddr == NULL)
return vaddr;
- misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr),
+ misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
VXGE_CACHE_LINE_SIZE);
+ if (realloc_flag)
+ goto out;
+
+ if (misaligned) {
+ /* misaligned, free current one and try allocating
+ * size + VXGE_CACHE_LINE_SIZE memory
+ */
+ kfree((void *) vaddr);
+ size += VXGE_CACHE_LINE_SIZE;
+ realloc_flag = 1;
+ goto realloc;
+ }
+out:
*(unsigned long *)p_dma_acch = misaligned;
vaddr = (void *)((u8 *)vaddr + misaligned);
return vaddr;
@@ -2254,4 +2274,6 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
struct vxge_hw_rth_hash_types *hash_type,
u16 bucket_size);
+enum vxge_hw_status
+__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index aaf374cfd322..cadef8549c06 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -109,7 +109,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
int index, offset;
enum vxge_hw_status status;
u64 reg;
- u8 *reg_space = (u8 *) space;
+ u64 *reg_space = (u64 *) space;
struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
pci_get_drvdata(vdev->pdev);
@@ -129,8 +129,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
__func__, __LINE__);
return;
}
-
- memcpy((reg_space + offset), &reg, 8);
+ *reg_space++ = reg;
}
}
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index ba6d0da78c30..2bab36421f71 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -445,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
ring->ndev->name, __func__, __LINE__);
ring->pkts_processed = 0;
- vxge_hw_ring_replenish(ringh, 0);
+ vxge_hw_ring_replenish(ringh);
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1118,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
*/
static void vxge_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct vxgedev *vdev;
int i, mcast_cnt = 0;
struct __vxge_hw_device *hldev;
@@ -1218,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev)
}
/* Add new ones */
- netdev_for_each_mc_addr(mclist, dev) {
- memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
vpath_idx++) {
mac_info.vpath_no = vpath_idx;
@@ -1364,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int msix_id, alarm_msix_id;
- int tim_msix_id[4] = {[0 ...3] = 0};
+ int msix_id = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+ int alarm_msix_id = VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_intr_enable(vpath->handle);
if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
else {
- msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
-
- tim_msix_id[0] = msix_id;
- tim_msix_id[1] = msix_id + 1;
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
alarm_msix_id);
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
/* enable the alarm vector */
- vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id);
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
+ vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
}
}
@@ -1406,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
else {
- msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
/* disable the alarm vector */
- msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
}
}
@@ -2224,19 +2223,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
enum vxge_hw_status status;
struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
struct vxgedev *vdev = vpath->vdev;
- int alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
+ int msix_id = (vpath->handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle,
- alarm_msix_id);
+ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- alarm_msix_id);
+ msix_id);
continue;
}
vxge_debug_intr(VXGE_ERR,
@@ -2249,18 +2247,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
static int vxge_alloc_msix(struct vxgedev *vdev)
{
int j, i, ret = 0;
- int intr_cnt = 0;
- int alarm_msix_id = 0, msix_intr_vect = 0;
+ int msix_intr_vect = 0, temp;
vdev->intr_cnt = 0;
+start:
/* Tx/Rx MSIX Vectors count */
vdev->intr_cnt = vdev->no_of_vpath * 2;
/* Alarm MSIX Vectors count */
vdev->intr_cnt++;
- intr_cnt = (vdev->max_vpath_supported * 2) + 1;
- vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
+ vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
GFP_KERNEL);
if (!vdev->entries) {
vxge_debug_init(VXGE_ERR,
@@ -2269,8 +2266,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM;
}
- vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
+ vdev->vxge_entries =
+ kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
+ GFP_KERNEL);
if (!vdev->vxge_entries) {
vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
VXGE_DRIVER_NAME);
@@ -2278,9 +2276,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM;
}
- /* Last vector in the list is used for alarm */
- alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
- for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
+ for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
@@ -2298,47 +2294,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
}
/* Initialize the alarm vector */
- vdev->entries[j].entry = alarm_msix_id;
- vdev->vxge_entries[j].entry = alarm_msix_id;
+ vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
+ vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0;
- ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
- /* if driver request exceeeds available irq's, request with a small
- * number.
- */
- if (ret > 0) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enable failed for %d vectors, available: %d",
- VXGE_DRIVER_NAME, intr_cnt, ret);
- vdev->max_vpath_supported = vdev->no_of_vpath;
- intr_cnt = (vdev->max_vpath_supported * 2) + 1;
-
- /* Reset the alarm vector setting */
- vdev->entries[j].entry = 0;
- vdev->vxge_entries[j].entry = 0;
-
- /* Initialize the alarm vector with new setting */
- vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
- vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
- vdev->vxge_entries[intr_cnt - 1].in_use = 0;
-
- ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
- if (!ret)
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enabled for %d vectors",
- VXGE_DRIVER_NAME, intr_cnt);
- }
+ ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
- if (ret) {
+ if (ret > 0) {
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d",
- VXGE_DRIVER_NAME, intr_cnt, ret);
+ VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
kfree(vdev->entries);
kfree(vdev->vxge_entries);
vdev->entries = NULL;
vdev->vxge_entries = NULL;
+
+ if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
+ return -ENODEV;
+ /* Try with less no of vector by reducing no of vpaths count */
+ temp = (ret - 1)/2;
+ vxge_close_vpaths(vdev, temp);
+ vdev->no_of_vpath = temp;
+ goto start;
+ } else if (ret < 0)
return -ENODEV;
- }
+
return 0;
}
@@ -2346,43 +2326,26 @@ static int vxge_enable_msix(struct vxgedev *vdev)
{
int i, ret = 0;
- enum vxge_hw_status status;
/* 0 - Tx, 1 - Rx */
- int tim_msix_id[4];
- int alarm_msix_id = 0, msix_intr_vect = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+
vdev->intr_cnt = 0;
/* allocate msix vectors */
ret = vxge_alloc_msix(vdev);
if (!ret) {
- /* Last vector in the list is used for alarm */
- alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
for (i = 0; i < vdev->no_of_vpath; i++) {
/* If fifo or ring are not enabled
the MSIX vector for that should be set to 0
Hence initializeing this array to all 0s.
*/
- memset(tim_msix_id, 0, sizeof(tim_msix_id));
- msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
- tim_msix_id[0] = msix_intr_vect;
+ vdev->vpaths[i].ring.rx_vector_no =
+ (vdev->vpaths[i].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
- tim_msix_id[1] = msix_intr_vect + 1;
- vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
-
- status = vxge_hw_vpath_msix_set(
- vdev->vpaths[i].handle,
- tim_msix_id, alarm_msix_id);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_msix_set "
- "failed with status : %x", status);
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- pci_disable_msix(vdev->pdev);
- return -ENODEV;
- }
+ vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
+ tim_msix_id, VXGE_ALARM_MSIX_ID);
}
}
@@ -2393,7 +2356,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
{
int intr_cnt;
- for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1);
+ for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
intr_cnt++) {
if (vdev->vxge_entries[intr_cnt].in_use) {
synchronize_irq(vdev->entries[intr_cnt].vector);
@@ -2458,9 +2421,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
switch (msix_idx) {
case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
- vdev->ndev->name, pci_fun, vp_idx,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
ret = request_irq(
vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0,
@@ -2472,9 +2436,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
break;
case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
- vdev->ndev->name, pci_fun, vp_idx,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
ret = request_irq(
vdev->entries[intr_cnt].vector,
vxge_rx_msix_napi_handle,
@@ -2502,9 +2467,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
if (irq_req) {
/* We requested for this msix interrupt */
vdev->vxge_entries[intr_cnt].in_use = 1;
+ msix_idx += vdev->vpaths[vp_idx].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(
vdev->vpaths[vp_idx].handle,
- intr_idx);
+ msix_idx);
intr_cnt++;
}
@@ -2514,16 +2481,17 @@ static int vxge_add_isr(struct vxgedev *vdev)
vp_idx++;
}
- intr_cnt = vdev->max_vpath_supported * 2;
+ intr_cnt = vdev->no_of_vpath * 2;
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge Alarm fn: %d MSI-X: %d",
- vdev->ndev->name, pci_fun,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Alarm - fn:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun);
/* For Alarm interrupts */
ret = request_irq(vdev->entries[intr_cnt].vector,
vxge_alarm_msix_handle, 0,
vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx]);
+ &vdev->vpaths[0]);
if (ret) {
vxge_debug_init(VXGE_ERR,
"%s: MSIX - %d Registration failed",
@@ -2536,16 +2504,19 @@ static int vxge_add_isr(struct vxgedev *vdev)
goto INTA_MODE;
}
+ msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
- intr_idx - 2);
+ msix_idx);
vdev->vxge_entries[intr_cnt].in_use = 1;
- vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx];
+ vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
}
INTA_MODE:
#endif
- snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
if (vdev->config.intr_type == INTA) {
+ snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
+ "%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE);
vxge_hw_vpath_tti_ci_set(vdev->devh,
@@ -3995,6 +3966,36 @@ static void vxge_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
+static inline u32 vxge_get_num_vfs(u64 function_mode)
+{
+ u32 num_functions = 0;
+
+ switch (function_mode) {
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
+ case VXGE_HW_FUNCTION_MODE_SRIOV_8:
+ num_functions = 8;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
+ num_functions = 1;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV:
+ case VXGE_HW_FUNCTION_MODE_MRIOV:
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
+ num_functions = 17;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV_4:
+ num_functions = 4;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
+ num_functions = 2;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MRIOV_8:
+ num_functions = 8; /* TODO */
+ break;
+ }
+ return num_functions;
+}
+
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
@@ -4022,14 +4023,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
u8 *macaddr;
struct vxge_mac_addrs *entry;
static int bus = -1, device = -1;
+ u32 host_type;
u8 new_device = 0;
+ enum vxge_hw_status is_privileged;
+ u32 function_mode;
+ u32 num_vfs = 0;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
attr.pdev = pdev;
- if (bus != pdev->bus->number)
- new_device = 1;
- if (device != PCI_SLOT(pdev->devfn))
+ /* In SRIOV-17 mode, functions of the same adapter
+ * can be deployed on different buses */
+ if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
+ (device != PCI_SLOT(pdev->devfn))))
new_device = 1;
bus = pdev->bus->number;
@@ -4046,9 +4052,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
driver_config->total_dev_cnt);
driver_config->config_dev_cnt = 0;
driver_config->total_dev_cnt = 0;
- driver_config->g_no_cpus = 0;
}
-
+ /* Now making the CPU based no of vpath calculation
+ * applicable for individual functions as well.
+ */
+ driver_config->g_no_cpus = 0;
driver_config->vpath_per_dev = max_config_vpath;
driver_config->total_dev_cnt++;
@@ -4161,6 +4169,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s:%d Vpath mask = %llx", __func__, __LINE__,
(unsigned long long)vpath_mask);
+ function_mode = ll_config.device_hw_info.function_mode;
+ host_type = ll_config.device_hw_info.host_type;
+ is_privileged = __vxge_hw_device_is_privilaged(host_type,
+ ll_config.device_hw_info.func_id);
+
/* Check how many vpaths are available */
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!((vpath_mask) & vxge_mBIT(i)))
@@ -4168,14 +4181,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
max_vpath_supported++;
}
+ if (new_device)
+ num_vfs = vxge_get_num_vfs(function_mode) - 1;
+
/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if ((VXGE_HW_FUNCTION_MODE_SRIOV ==
- ll_config.device_hw_info.function_mode) &&
- (max_config_dev > 1) && (pdev->is_physfn)) {
- ret = pci_enable_sriov(pdev, max_config_dev - 1);
- if (ret)
- vxge_debug_ll_config(VXGE_ERR,
- "Failed to enable SRIOV: %d \n", ret);
+ if (is_sriov(function_mode) && (max_config_dev > 1) &&
+ (ll_config.intr_type != INTA) &&
+ (is_privileged == VXGE_HW_OK)) {
+ ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
+ ? (max_config_dev - 1) : num_vfs);
+ if (ret)
+ vxge_debug_ll_config(VXGE_ERR,
+ "Failed in enabling SRIOV mode: %d\n", ret);
}
/*
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 7c83ba4be9d7..60276b20fa5e 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -31,6 +31,7 @@
#define PCI_DEVICE_ID_TITAN_UNI 0x5833
#define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4
+#define VXGE_ALARM_MSIX_ID 2
#define VXGE_HW_RXSYNC_FREQ_CNT 4
#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
#define VXGE_LL_RX_COPY_THRESHOLD 256
@@ -89,6 +90,11 @@
#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
+#define is_sriov(function_mode) \
+ ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
+
enum vxge_reset_event {
/* reset events */
VXGE_LL_VPATH_RESET = 0,
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 2c012f4ce465..f83e6aee3f6a 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -231,8 +231,7 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
- 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->set_msix_mask_vect[msix_id%4]);
return;
@@ -252,8 +251,7 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
- 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
return;
@@ -878,7 +876,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
channel = &ring->channel;
- rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
if (ring->stats->common_stats.usage_cnt > 0)
ring->stats->common_stats.usage_cnt--;
@@ -902,7 +900,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
channel = &ring->channel;
wmb();
- rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
vxge_hw_channel_dtr_post(channel, rxdh);
@@ -966,6 +964,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
struct __vxge_hw_channel *channel;
struct vxge_hw_ring_rxd_1 *rxdp;
enum vxge_hw_status status = VXGE_HW_OK;
+ u64 control_0, own;
channel = &ring->channel;
@@ -977,8 +976,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
goto exit;
}
+ control_0 = rxdp->control_0;
+ own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
+
/* check whether it is not the end */
- if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
+ if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
0);
@@ -986,8 +989,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
++ring->cmpl_cnt;
vxge_hw_channel_dtr_complete(channel);
- *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
-
vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
ring->stats->common_stats.usage_cnt++;
@@ -1035,12 +1036,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode(
* such as unknown UPV6 header), Drop it !!!
*/
- if (t_code == 0 || t_code == 5) {
+ if (t_code == VXGE_HW_RING_T_CODE_OK ||
+ t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
status = VXGE_HW_OK;
goto exit;
}
- if (t_code > 0xF) {
+ if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
status = VXGE_HW_ERR_INVALID_TCODE;
goto exit;
}
@@ -2216,29 +2218,24 @@ exit:
* This API will associate a given MSIX vector numbers with the four TIM
* interrupts and alarm interrupt.
*/
-enum vxge_hw_status
+void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
int alarm_msix_id)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath = vp->vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- u32 first_vp_id = vpath->hldev->first_vp_id;
+ u32 vp_id = vp->vpath->vp_id;
val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[0]) |
+ (vp_id * 4) + tim_msix_id[0]) |
VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[1]) |
- VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[2]);
-
- val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[3]);
+ (vp_id * 4) + tim_msix_id[1]);
writeq(val64, &vp_reg->interrupt_cfg0);
writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
- (first_vp_id * 4) + alarm_msix_id),
+ (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
&vp_reg->interrupt_cfg2);
if (vpath->hldev->config.intr_mode ==
@@ -2259,7 +2256,7 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
0, 32), &vp_reg->one_shot_vect3_en);
}
- return VXGE_HW_OK;
+ return;
}
/**
@@ -2279,8 +2276,7 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id / 4)), 0, 32),
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
return;
@@ -2305,14 +2301,12 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
if (hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clr_msix_one_shot_vec[msix_id%4]);
} else {
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clear_msix_mask_vect[msix_id%4]);
}
@@ -2337,8 +2331,7 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
return;
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 861c853e3e84..c252f3d3f650 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1866,6 +1866,51 @@ struct vxge_hw_ring_rxd_info {
u32 rth_hash_type;
u32 rth_value;
};
+/**
+ * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
+ * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
+ * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
+ * presentation configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
+ * such as unknown IPv6 header.
+ * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
+ * error, such as FCS or ECC).
+ * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
+ * s) were not appropriately sized and data loss occurred.
+ * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
+ * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
+ * Segment1 exceeded the capacity of Buffer1 and the remainder
+ * was placed in Buffer2. Segment2 now starts in Buffer3.
+ * No data loss or errors occurred.
+ * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
+ * assigned buffers has a size of 0 bytes.
+ * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
+ * VPath Reset or because of a VPIN mismatch.
+ * @VXGE_HW_RING_T_CODE_UNUSED: Unused
+ * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
+ * transfer code condition occurred.
+ *
+ * Transfer codes returned by adapter.
+ */
+enum vxge_hw_ring_tcode {
+ VXGE_HW_RING_T_CODE_OK = 0x0,
+ VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
+ VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
+ VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
+ VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
+ VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
+ VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
+ VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
+ VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
+ VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
+ VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
+ VXGE_HW_RING_T_CODE_UNUSED = 0xE,
+ VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
+};
/**
* enum enum vxge_hw_ring_hash_type - RTH hash types
@@ -1910,7 +1955,7 @@ vxge_hw_ring_rxd_post_post(
void *rxdh);
enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag);
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
void
vxge_hw_ring_rxd_post_post_wmb(
@@ -2042,7 +2087,6 @@ void vxge_hw_fifo_txdl_free(
#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
-#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64
/*
* struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
@@ -2332,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 skip_alarms);
-enum vxge_hw_status
+void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
int *tim_msix_id, int alarm_msix_id);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 77c2a754b7b8..5da7ab1fd307 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "6"
-#define VXGE_VERSION_BUILD "18937"
+#define VXGE_VERSION_FIX "8"
+#define VXGE_VERSION_BUILD "20182"
#define VXGE_VERSION_FOR "k"
#endif
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index cd8cb95c5bd7..cf9e15fd8d91 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -634,11 +634,12 @@ static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
}
} else { /* chan->protocol == ETH_P_X25 */
switch (skb->data[0]) {
- case 0: break;
- case 1: /* Connect request */
+ case X25_IFACE_DATA:
+ break;
+ case X25_IFACE_CONNECT:
cycx_x25_chan_connect(dev);
goto free_packet;
- case 2: /* Disconnect request */
+ case X25_IFACE_DISCONNECT:
cycx_x25_chan_disconnect(dev);
goto free_packet;
default:
@@ -1406,7 +1407,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
reset_timer(dev);
if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev, 1);
+ cycx_x25_chan_send_event(dev,
+ X25_IFACE_CONNECT);
break;
case WAN_CONNECTING:
@@ -1424,7 +1426,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
}
if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev, 2);
+ cycx_x25_chan_send_event(dev,
+ X25_IFACE_DISCONNECT);
netif_wake_queue(dev);
break;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index a4859f7a7cc0..d45b08d1dbc9 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1175,8 +1175,6 @@ static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
spin_unlock(&dpriv->lock);
#endif
- dev->trans_start = jiffies;
-
if (debug > 2)
dscc4_tx_print(dev, dpriv, "Xmit");
/* To be cleaned(unsigned int)/optimized. Later, ok ? */
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 4dde2ea4a189..a3ea27ce04f2 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -658,7 +658,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
- dev->trans_start = jiffies;
port->txin = next_desc(port, port->txin, 1);
sca_outw(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index aad9ed45c254..ea476cbd38b5 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -585,7 +585,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
- dev->trans_start = jiffies;
port->txin = (port->txin + 1) % card->tx_ring_buffers;
sca_outl(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index c7adbb79f7cc..70527e5a54a2 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -49,14 +49,14 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
static void x25_connected(struct net_device *dev, int reason)
{
- x25_connect_disconnect(dev, reason, 1);
+ x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
}
static void x25_disconnected(struct net_device *dev, int reason)
{
- x25_connect_disconnect(dev, reason, 2);
+ x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
}
@@ -71,7 +71,7 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
ptr = skb->data;
- *ptr = 0;
+ *ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
@@ -94,13 +94,13 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
/* X.25 to LAPB */
switch (skb->data[0]) {
- case 0: /* Data to be transmitted */
+ case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
- case 1:
+ case X25_IFACE_CONNECT:
if ((result = lapb_connect_request(dev))!= LAPB_OK) {
if (result == LAPB_CONNECTED)
/* Send connect confirm. msg to level 3 */
@@ -112,7 +112,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
}
break;
- case 2:
+ case X25_IFACE_DISCONNECT:
if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
if (result == LAPB_NOTCONNECTED)
/* Send disconnect confirm. msg to level 3 */
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0c2cdde686a0..88e363033e23 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -891,7 +891,6 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
- dev->trans_start = jiffies;
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 98e2f99903d7..4d4dc38c7290 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -139,7 +139,7 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
ptr = skb->data;
- *ptr = 0x00;
+ *ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
@@ -161,14 +161,14 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
goto drop;
switch (skb->data[0]) {
- case 0x00:
+ case X25_IFACE_DATA:
break;
- case 0x01:
+ case X25_IFACE_CONNECT:
if ((err = lapb_connect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_connect_request "
"error: %d\n", err);
goto drop;
- case 0x02:
+ case X25_IFACE_DISCONNECT:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_disconnect_request "
"err: %d\n", err);
@@ -225,7 +225,7 @@ static void lapbeth_connected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
@@ -242,7 +242,7 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index b27850377121..e2c6f7f4f51c 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1506,8 +1506,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
/* send now! */
LMC_CSR_WRITE (sc, csr_txpoll, 0);
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_start_xmit_out");
@@ -2103,7 +2101,7 @@ static void lmc_driver_timeout(struct net_device *dev)
printk("%s: Xmitter busy|\n", dev->name);
sc->extra_stats.tx_tbusy_calls++;
- if (jiffies - dev->trans_start < TX_TIMEOUT)
+ if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
goto bug_out;
/*
@@ -2135,7 +2133,7 @@ static void lmc_driver_timeout(struct net_device *dev)
sc->lmc_device->stats.tx_errors++;
sc->extra_stats.tx_ProcTimeout++; /* -baz */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
bug_out:
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 3f744c643094..c6aa66e5b52f 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -396,7 +396,7 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
u16 next_bd = card->chan[ch].tx_next_bd;
u32 scabase = card->hw.scabase;
- printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
+ printk ("\nnfree_tx_bd = %d\n", card->chan[ch].nfree_tx_bd);
printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
first_bd, TX_BD_ADDR(ch, first_bd),
next_bd, TX_BD_ADDR(ch, next_bd));
@@ -1790,7 +1790,7 @@ static void cpc_tx_timeout(struct net_device *dev)
cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
~(CPLD_REG2_FALC_LED1 << (2 * ch)));
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
CPC_UNLOCK(card, flags);
netif_wake_queue(dev);
}
@@ -1849,7 +1849,6 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
if (d->trace_on) {
cpc_trace(dev, skb, 'T');
}
- dev->trans_start = jiffies;
/* Start transmission */
CPC_LOCK(card, flags);
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4917a94943bd..4293889e287e 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -366,7 +366,7 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
int res;
if (!tty || !tty->driver_data ) {
- CPC_TTY_DBG("hdlx-tty: no TTY in close \n");
+ CPC_TTY_DBG("hdlx-tty: no TTY in close\n");
return;
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 31c41af2246d..43ae6f440bfb 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1352,7 +1352,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
return(-EINVAL);
if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
- printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr);
+ printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
return(-EINVAL);
}
base = map->base_addr;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 541c700dceef..db73a7be199f 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -298,7 +298,6 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
desc->stat = PACKET_FULL;
writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
port->card->plx + PLX_DOORBELL_TO_CARD);
- dev->trans_start = jiffies;
port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 80d5c5834a0b..166e77dfffda 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -29,12 +29,12 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
-#include <linux/x25.h>
#include <linux/lapb.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <linux/slab.h>
+#include <net/x25device.h>
#include "x25_asy.h"
#include <net/x25device.h>
@@ -315,15 +315,15 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
}
switch (skb->data[0]) {
- case 0x00:
+ case X25_IFACE_DATA:
break;
- case 0x01: /* Connection request .. do nothing */
+ case X25_IFACE_CONNECT: /* Connection request .. do nothing */
err = lapb_connect_request(dev);
if (err != LAPB_OK)
printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
kfree_skb(skb);
return NETDEV_TX_OK;
- case 0x02: /* Disconnect request .. do nothing - hang up ?? */
+ case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
err = lapb_disconnect_request(dev);
if (err != LAPB_OK)
printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
@@ -411,7 +411,7 @@ static void x25_asy_connected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
@@ -430,7 +430,7 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index b0cb90624cf6..6db909ecf1c9 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -689,7 +689,7 @@ try_new:
pl_type, buf_len);
tx_msg->num_pls = le16_to_cpu(num_pls+1);
tx_msg->size += padded_len;
- d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n",
+ d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
padded_len, tx_msg->size, num_pls+1);
d_printf(2, dev,
"TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 588943660755..174e3442d519 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -5,6 +5,7 @@
menuconfig WLAN
bool "Wireless LAN"
depends on !S390
+ depends on NET
select WIRELESS
default y
---help---
@@ -38,6 +39,12 @@ config LIBERTAS_THINFIRM
---help---
A library for Marvell Libertas 8xxx devices using thinfirm.
+config LIBERTAS_THINFIRM_DEBUG
+ bool "Enable full debugging output in the Libertas thin firmware module."
+ depends on LIBERTAS_THINFIRM
+ ---help---
+ Debugging support.
+
config LIBERTAS_THINFIRM_USB
tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
depends on LIBERTAS_THINFIRM && USB
@@ -210,90 +217,7 @@ config USB_NET_RNDIS_WLAN
If you choose to build a module, it'll be called rndis_wlan.
-config RTL8180
- tristate "Realtek 8180/8185 PCI support"
- depends on MAC80211 && PCI && EXPERIMENTAL
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8180 and RTL8185 based cards.
- These are PCI based chips found in cards such as:
-
- (RTL8185 802.11g)
- A-Link WL54PC
-
- (RTL8180 802.11b)
- Belkin F5D6020 v3
- Belkin F5D6020 v3
- Dlink DWL-610
- Dlink DWL-510
- Netgear MA521
- Level-One WPC-0101
- Acer Aspire 1357 LMi
- VCTnet PC-11B1
- Ovislink AirLive WL-1120PCM
- Mentor WL-PCI
- Linksys WPC11 v4
- TrendNET TEW-288PI
- D-Link DWL-520 Rev D
- Repotec RP-WP7126
- TP-Link TL-WN250/251
- Zonet ZEW1000
- Longshine LCS-8031-R
- HomeLine HLW-PCC200
- GigaFast WF721-AEX
- Planet WL-3553
- Encore ENLWI-PCI1-NT
- TrendNET TEW-266PC
- Gigabyte GN-WLMR101
- Siemens-fujitsu Amilo D1840W
- Edimax EW-7126
- PheeNet WL-11PCIR
- Tonze PC-2100T
- Planet WL-8303
- Dlink DWL-650 v M1
- Edimax EW-7106
- Q-Tec 770WC
- Topcom Skyr@cer 4011b
- Roper FreeLan 802.11b (edition 2004)
- Wistron Neweb Corp CB-200B
- Pentagram HorNET
- QTec 775WC
- TwinMOS Booming B Series
- Micronet SP906BB
- Sweex LC700010
- Surecom EP-9428
- Safecom SWLCR-1100
-
- Thanks to Realtek for their support!
-
-config RTL8187
- tristate "Realtek 8187 and 8187B USB support"
- depends on MAC80211 && USB
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8187 and RTL8187B based cards.
- These are USB based chips found in devices such as:
-
- Netgear WG111v2
- Level 1 WNC-0301USB
- Micronet SP907GK V5
- Encore ENUWI-G2
- Trendnet TEW-424UB
- ASUS P5B Deluxe/P5K Premium motherboards
- Toshiba Satellite Pro series of laptops
- Asus Wireless Link
- Linksys WUSB54GC-EU v2
- (v1 = rt73usb; v3 is rt2070-based,
- use staging/rt3070 or try rt2800usb)
-
- Thanks to Realtek for their support!
-
-# If possible, automatically enable LEDs for RTL8187.
-
-config RTL8187_LEDS
- bool
- depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
- default y
+source "drivers/net/wireless/rtl818x/Kconfig"
config ADM8211
tristate "ADMtek ADM8211 support"
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index ab61d2b558d6..880ad9d170c2 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1318,21 +1318,19 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
}
static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
- unsigned int bit_nr, i;
+ unsigned int bit_nr;
u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
bit_nr &= 0x3F;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- mclist = mclist->next;
}
return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index dc5018a6d9ed..a441aad922c2 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2876,7 +2876,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
- airo_print_info(dev->name, "Firmware version %x.%x.%02x",
+ airo_print_info(dev->name, "Firmware version %x.%x.%02d",
((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
(le16_to_cpu(cap_rid.softVer) & 0xFF),
le16_to_cpu(cap_rid.softSubVer));
@@ -3193,19 +3193,26 @@ static void airo_print_status(const char *devname, u16 status)
{
u8 reason = status & 0xFF;
- switch (status) {
+ switch (status & 0xFF00) {
case STAT_NOBEACON:
- airo_print_dbg(devname, "link lost (missed beacons)");
- break;
- case STAT_MAXRETRIES:
- case STAT_MAXARL:
- airo_print_dbg(devname, "link lost (max retries)");
- break;
- case STAT_FORCELOSS:
- airo_print_dbg(devname, "link lost (local choice)");
- break;
- case STAT_TSFSYNC:
- airo_print_dbg(devname, "link lost (TSF sync lost)");
+ switch (status) {
+ case STAT_NOBEACON:
+ airo_print_dbg(devname, "link lost (missed beacons)");
+ break;
+ case STAT_MAXRETRIES:
+ case STAT_MAXARL:
+ airo_print_dbg(devname, "link lost (max retries)");
+ break;
+ case STAT_FORCELOSS:
+ airo_print_dbg(devname, "link lost (local choice)");
+ break;
+ case STAT_TSFSYNC:
+ airo_print_dbg(devname, "link lost (TSF sync lost)");
+ break;
+ default:
+ airo_print_dbg(devname, "unknow status %x\n", status);
+ break;
+ }
break;
case STAT_DEAUTH:
airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
@@ -3221,7 +3228,11 @@ static void airo_print_status(const char *devname, u16 status)
airo_print_dbg(devname, "authentication failed (reason: %d)",
reason);
break;
+ case STAT_ASSOC:
+ case STAT_REASSOC:
+ break;
default:
+ airo_print_dbg(devname, "unknow status %x\n", status);
break;
}
}
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index f6036fb42319..33bdc6a84e81 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -75,42 +75,7 @@ static void airo_release(struct pcmcia_device *link);
static void airo_detach(struct pcmcia_device *p_dev);
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'pcmem_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A linked list of "instances" of the aironet device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers,
- where minor device numbers are used to derive the corresponding
- array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
- dev_node_t node;
struct net_device *eth_dev;
} local_info_t;
@@ -132,10 +97,6 @@ static int airo_probe(struct pcmcia_device *p_dev)
dev_dbg(&p_dev->dev, "airo_attach()\n");
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -212,9 +173,7 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -300,16 +259,8 @@ static int airo_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -320,26 +271,17 @@ static int airo_config(struct pcmcia_device *link)
if (ret)
goto failed;
((local_info_t *)link->priv)->eth_dev =
- init_airo_card(link->irq.AssignedIRQ,
+ init_airo_card(link->irq,
link->io.BasePort1, 1, &link->dev);
if (!((local_info_t *)link->priv)->eth_dev)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev_node.
- */
- strcpy(dev->node.dev_name, ((local_info_t *)link->priv)->eth_dev->name);
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x: ",
+ link->conf.ConfigIndex);
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
- if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 0fb419936dff..7a626d4e100f 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1889,6 +1889,7 @@ static void at76_dwork_hw_scan(struct work_struct *work)
}
static int at76_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 4e7a7fd695c8..0a75be027afa 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -3,7 +3,7 @@ menuconfig ATH_COMMON
depends on CFG80211
---help---
This will enable the support for the Atheros wireless drivers.
- ath5k, ath9k and ar9170 drivers share some common code, this option
+ ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
enables the common ath.ko module which shares common helpers.
For more information and documentation on this module you can visit:
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index dc662b76a1c8..4f845f80c098 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,41 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
bool has_plcp;
};
-#define AR9170_NUM_TID 16
-#define WME_BA_BMP_SIZE 64
-#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
-
-#define WME_AC_BE 2
-#define WME_AC_BK 3
-#define WME_AC_VI 1
-#define WME_AC_VO 0
-
-#define TID_TO_WME_AC(_tid) \
- ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
- (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
- (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
- WME_AC_VO)
-
-#define BAW_WITHIN(_start, _bawsz, _seqno) \
- ((((_seqno) - (_start)) & 0xfff) < (_bawsz))
-
-enum ar9170_tid_state {
- AR9170_TID_STATE_INVALID,
- AR9170_TID_STATE_SHUTDOWN,
- AR9170_TID_STATE_PROGRESS,
- AR9170_TID_STATE_COMPLETE,
-};
-
-struct ar9170_sta_tid {
- struct list_head list;
- struct sk_buff_head queue;
- u8 addr[ETH_ALEN];
- u16 ssn;
- u16 tid;
- enum ar9170_tid_state state;
- bool active;
-};
-
struct ar9170_tx_queue_stats {
unsigned int len;
unsigned int limit;
@@ -152,14 +117,11 @@ struct ar9170_tx_queue_stats {
#define AR9170_QUEUE_TIMEOUT 64
#define AR9170_TX_TIMEOUT 8
-#define AR9170_BA_TIMEOUT 4
#define AR9170_JANITOR_DELAY 128
#define AR9170_TX_INVALID_RATE 0xffffffff
-#define AR9170_NUM_TX_STATUS 128
-#define AR9170_NUM_TX_AGG_MAX 30
-#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
-#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
+#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
+#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
struct ar9170 {
struct ieee80211_hw *hw;
@@ -234,11 +196,6 @@ struct ar9170 {
struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
struct delayed_work tx_janitor;
- /* tx ampdu */
- struct sk_buff_head tx_status_ampdu;
- spinlock_t tx_ampdu_list_lock;
- struct list_head tx_ampdu_list;
- atomic_t tx_ampdu_pending;
/* rxstream mpdu merge */
struct ar9170_rxstream_mpdu_merge rx_mpdu;
@@ -250,11 +207,6 @@ struct ar9170 {
u8 global_ampdu_factor;
};
-struct ar9170_sta_info {
- struct ar9170_sta_tid agg[AR9170_NUM_TID];
- unsigned int ampdu_max_len;
-};
-
struct ar9170_tx_info {
unsigned long timeout;
};
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index 826c45e6b274..ec8134b4b949 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -79,7 +79,7 @@ __regwrite_out : \
if (__nreg) { \
if (IS_ACCEPTING_CMD(__ar)) \
__err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
- 8 * __nreg, \
+ 8 * __nreg, \
(u8 *) &__ar->cmdbuf[1], \
0, NULL); \
__nreg = 0; \
diff --git a/drivers/net/wireless/ath/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h
index d2c8cc83f1dd..6c4663883423 100644
--- a/drivers/net/wireless/ath/ar9170/eeprom.h
+++ b/drivers/net/wireless/ath/ar9170/eeprom.h
@@ -127,8 +127,8 @@ struct ar9170_eeprom {
__le16 checksum;
__le16 version;
u8 operating_flags;
-#define AR9170_OPFLAG_5GHZ 1
-#define AR9170_OPFLAG_2GHZ 2
+#define AR9170_OPFLAG_5GHZ 1
+#define AR9170_OPFLAG_2GHZ 2
u8 misc;
__le16 reg_domain[2];
u8 mac_address[6];
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 0a1d4c28e68a..06f1f3c951a4 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -425,5 +425,6 @@ enum ar9170_txq {
#define AR9170_TXQ_DEPTH 32
#define AR9170_TX_MAX_PENDING 128
+#define AR9170_RX_STREAM_MAX_SIZE 65535
#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c53692980990..2abc87578994 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -50,10 +50,6 @@ static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static int modparam_ht;
-module_param_named(ht, modparam_ht, bool, S_IRUGO);
-MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
-
#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
.bitrate = (_bitrate), \
.flags = (_flags), \
@@ -182,7 +178,6 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
};
static void ar9170_tx(struct ar9170 *ar);
-static bool ar9170_tx_ampdu(struct ar9170 *ar);
static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
{
@@ -195,21 +190,7 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
return ar9170_get_seq_h((void *) txc->frame_data);
}
-static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
-{
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
-static inline u16 ar9170_get_tid(struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
- return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
-}
-
-#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
-#define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
-
-#if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
+#ifdef AR9170_QUEUE_DEBUG
static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
@@ -236,7 +217,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
skb_queue_walk(queue, skb) {
- printk(KERN_DEBUG "index:%d => \n", i++);
+ printk(KERN_DEBUG "index:%d =>\n", i++);
ar9170_print_txheader(ar, skb);
}
if (i != skb_queue_len(queue))
@@ -244,7 +225,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
"mismatch %d != %d\n", skb_queue_len(queue), i);
printk(KERN_DEBUG "---[ end ]---\n");
}
-#endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */
+#endif /* AR9170_QUEUE_DEBUG */
#ifdef AR9170_QUEUE_DEBUG
static void ar9170_dump_txqueue(struct ar9170 *ar,
@@ -275,20 +256,6 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
}
#endif /* AR9170_QUEUE_STOP_DEBUG */
-#ifdef AR9170_TXAGG_DEBUG
-static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
- printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
- wiphy_name(ar->hw->wiphy));
- __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
- spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
-}
-
-#endif /* AR9170_TXAGG_DEBUG */
-
/* caller must guarantee exclusive access for _bin_ queue. */
static void ar9170_recycle_expired(struct ar9170 *ar,
struct sk_buff_head *queue,
@@ -308,7 +275,7 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
if (time_is_before_jiffies(arinfo->timeout)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
- "recycle \n", wiphy_name(ar->hw->wiphy),
+ "recycle\n", wiphy_name(ar->hw->wiphy),
jiffies, arinfo->timeout);
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
@@ -360,70 +327,6 @@ static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
-static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
-{
- struct sk_buff_head success;
- struct sk_buff *skb;
- unsigned int i;
- unsigned long queue_bitmap = 0;
-
- skb_queue_head_init(&success);
-
- while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
- __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
-
- ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
- wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
- __ar9170_dump_txqueue(ar, &success);
-#endif /* AR9170_TXAGG_DEBUG */
-
- while ((skb = __skb_dequeue(&success))) {
- struct ieee80211_tx_info *txinfo;
-
- queue_bitmap |= BIT(skb_get_queue_mapping(skb));
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- txinfo->flags |= IEEE80211_TX_STAT_ACK;
- txinfo->status.rates[0].count = 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
- }
-
- for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
-#ifdef AR9170_QUEUE_STOP_DEBUG
- printk(KERN_DEBUG "%s: wake queue %d\n",
- wiphy_name(ar->hw->wiphy), i);
- __ar9170_dump_txstats(ar);
-#endif /* AR9170_QUEUE_STOP_DEBUG */
- ieee80211_wake_queue(ar->hw, i);
- }
-
- if (queue_bitmap)
- ar9170_tx(ar);
-}
-
-static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
-
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_BA_TIMEOUT);
-
- skb_queue_tail(&ar->tx_status_ampdu, skb);
- ar9170_tx_fake_ampdu_status(ar);
-
- if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
- !list_empty(&ar->tx_ampdu_list))
- ar9170_tx_ampdu(ar);
-}
-
void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -447,14 +350,10 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ar9170_tx_ampdu_callback(ar, skb);
- } else {
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_TX_TIMEOUT);
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
- skb_queue_tail(&ar->tx_status[queue], skb);
- }
+ skb_queue_tail(&ar->tx_status[queue], skb);
}
if (!ar->tx_stats[queue].len &&
@@ -524,38 +423,6 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
return NULL;
}
-static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
-{
- struct sk_buff *skb;
- struct ieee80211_tx_info *txinfo;
-
- while (count) {
- skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
- if (!skb)
- break;
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- /* FIXME: maybe more ? */
- txinfo->status.rates[0].count = 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
- count--;
- }
-
-#ifdef AR9170_TXAGG_DEBUG
- if (count) {
- printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
- "suitable frames left in tx_status queue.\n",
- wiphy_name(ar->hw->wiphy), count);
-
- ar9170_dump_tx_status_ampdu(ar);
- }
-#endif /* AR9170_TXAGG_DEBUG */
-}
-
/*
* This worker tries to keeps an maintain tx_status queues.
* So we can guarantee that incoming tx_status reports are
@@ -592,8 +459,6 @@ static void ar9170_tx_janitor(struct work_struct *work)
resched = true;
}
- ar9170_tx_fake_ampdu_status(ar);
-
if (!resched)
return;
@@ -673,10 +538,6 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
case 0xc5:
/* BlockACK events */
- ar9170_handle_block_ack(ar,
- le16_to_cpu(cmd->ba_fail_cnt.failed),
- le16_to_cpu(cmd->ba_fail_cnt.rate));
- ar9170_tx_fake_ampdu_status(ar);
break;
case 0xc6:
@@ -689,7 +550,8 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
/* firmware debug */
case 0xca:
- printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
+ printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
+ (char *)buf + 4);
break;
case 0xcb:
len -= 4;
@@ -926,7 +788,6 @@ static void ar9170_rx_phy_status(struct ar9170 *ar,
/* TODO: we could do something with phy_errors */
status->signal = ar->noise[0] + phy->rssi_combined;
- status->noise = ar->noise[0];
}
static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
@@ -1247,7 +1108,6 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
ar->global_ampdu_density = 6;
ar->global_ampdu_factor = 3;
- atomic_set(&ar->tx_ampdu_pending, 0);
ar->bad_hw_nagger = jiffies;
err = ar->open(ar);
@@ -1310,40 +1170,10 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
skb_queue_purge(&ar->tx_pending[i]);
skb_queue_purge(&ar->tx_status[i]);
}
- skb_queue_purge(&ar->tx_status_ampdu);
mutex_unlock(&ar->mutex);
}
-static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
-}
-
-static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
- struct sk_buff *src)
-{
- struct ar9170_tx_control *dst_txc, *src_txc;
- struct ieee80211_tx_info *dst_info, *src_info;
- struct ar9170_tx_info *dst_arinfo, *src_arinfo;
-
- src_txc = (void *) src->data;
- src_info = IEEE80211_SKB_CB(src);
- src_arinfo = (void *) src_info->rate_driver_data;
-
- dst_txc = (void *) dst->data;
- dst_info = IEEE80211_SKB_CB(dst);
- dst_arinfo = (void *) dst_info->rate_driver_data;
-
- dst_txc->phy_control = src_txc->phy_control;
-
- /* same MCS for the whole aggregate */
- memcpy(dst_info->driver_rates, src_info->driver_rates,
- sizeof(dst_info->driver_rates));
-}
-
static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@@ -1420,14 +1250,7 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (unlikely(!info->control.sta))
- goto err_out;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- } else {
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
- }
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
}
return 0;
@@ -1537,158 +1360,6 @@ static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
}
-static bool ar9170_tx_ampdu(struct ar9170 *ar)
-{
- struct sk_buff_head agg;
- struct ar9170_sta_tid *tid_info = NULL, *tmp;
- struct sk_buff *skb, *first = NULL;
- unsigned long flags, f2;
- unsigned int i = 0;
- u16 seq, queue, tmpssn;
- bool run = false;
-
- skb_queue_head_init(&agg);
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- if (list_empty(&ar->tx_ampdu_list)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: aggregation list is empty.\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- goto out_unlock;
- }
-
- list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
- if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- if (++i > 64) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: enough frames aggregated.\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- break;
- }
-
- queue = TID_TO_WME_AC(tid_info->tid);
-
- if (skb_queue_len(&ar->tx_pending[queue]) >=
- AR9170_NUM_TX_AGG_MAX) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: queue %d full.\n",
- wiphy_name(ar->hw->wiphy), queue);
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- list_del_init(&tid_info->list);
-
- spin_lock_irqsave(&tid_info->queue.lock, f2);
- tmpssn = seq = tid_info->ssn;
- first = skb_peek(&tid_info->queue);
-
- if (likely(first))
- tmpssn = ar9170_get_seq(first);
-
- if (unlikely(tmpssn != seq)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
- wiphy_name(ar->hw->wiphy), seq, tmpssn);
-#endif /* AR9170_TXAGG_DEBUG */
- tid_info->ssn = tmpssn;
- }
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
- "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
- tid_info->tid, tid_info->ssn,
- skb_queue_len(&tid_info->queue));
- __ar9170_dump_txqueue(ar, &tid_info->queue);
-#endif /* AR9170_TXAGG_DEBUG */
-
- while ((skb = skb_peek(&tid_info->queue))) {
- if (unlikely(ar9170_get_seq(skb) != seq))
- break;
-
- __skb_unlink(skb, &tid_info->queue);
- tid_info->ssn = seq = GET_NEXT_SEQ(seq);
-
- if (unlikely(skb_get_queue_mapping(skb) != queue)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
- "!match.\n", wiphy_name(ar->hw->wiphy),
- tid_info->tid,
- TID_TO_WME_AC(tid_info->tid),
- skb_get_queue_mapping(skb));
-#endif /* AR9170_TXAGG_DEBUG */
- dev_kfree_skb_any(skb);
- continue;
- }
-
- if (unlikely(first == skb)) {
- ar9170_tx_prepare_phy(ar, skb);
- __skb_queue_tail(&agg, skb);
- first = skb;
- } else {
- ar9170_tx_copy_phy(ar, skb, first);
- __skb_queue_tail(&agg, skb);
- }
-
- if (unlikely(skb_queue_len(&agg) ==
- AR9170_NUM_TX_AGG_MAX))
- break;
- }
-
- if (skb_queue_empty(&tid_info->queue))
- tid_info->active = false;
- else
- list_add_tail(&tid_info->list,
- &ar->tx_ampdu_list);
-
- spin_unlock_irqrestore(&tid_info->queue.lock, f2);
-
- if (unlikely(skb_queue_empty(&agg))) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: queued empty list!\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- /*
- * tell the FW/HW that this is the last frame,
- * that way it will wait for the immediate block ack.
- */
- ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
- wiphy_name(ar->hw->wiphy));
- __ar9170_dump_txqueue(ar, &agg);
-#endif /* AR9170_TXAGG_DEBUG */
-
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-
- spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
- skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
- spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
- run = true;
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- }
-
-out_unlock:
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- __skb_queue_purge(&agg);
-
- return run;
-}
-
static void ar9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
@@ -1728,7 +1399,7 @@ static void ar9170_tx(struct ar9170 *ar)
printk(KERN_DEBUG "%s: queue %d full\n",
wiphy_name(ar->hw->wiphy), i);
- printk(KERN_DEBUG "%s: stuck frames: ===> \n",
+ printk(KERN_DEBUG "%s: stuck frames: ===>\n",
wiphy_name(ar->hw->wiphy));
ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
ar9170_dump_txqueue(ar, &ar->tx_status[i]);
@@ -1763,9 +1434,6 @@ static void ar9170_tx(struct ar9170 *ar)
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- atomic_inc(&ar->tx_ampdu_pending);
-
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: send frame q:%d =>\n",
wiphy_name(ar->hw->wiphy), i);
@@ -1774,9 +1442,6 @@ static void ar9170_tx(struct ar9170 *ar)
err = ar->tx(ar, skb);
if (unlikely(err)) {
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- atomic_dec(&ar->tx_ampdu_pending);
-
frames_failed++;
dev_kfree_skb_any(skb);
} else {
@@ -1823,94 +1488,11 @@ static void ar9170_tx(struct ar9170 *ar)
msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
-static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *txinfo;
- struct ar9170_sta_info *sta_info;
- struct ar9170_sta_tid *agg;
- struct sk_buff *iter;
- unsigned long flags, f2;
- unsigned int max;
- u16 tid, seq, qseq;
- bool run = false, queue = false;
-
- tid = ar9170_get_tid(skb);
- seq = ar9170_get_seq(skb);
- txinfo = IEEE80211_SKB_CB(skb);
- sta_info = (void *) txinfo->control.sta->drv_priv;
- agg = &sta_info->agg[tid];
- max = sta_info->ampdu_max_len;
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
-
- if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
- "for ESS:%pM tid:%d state:%d.\n",
- wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
- agg->state);
-#endif /* AR9170_TXAGG_DEBUG */
- goto err_unlock;
- }
-
- if (!agg->active) {
- agg->active = true;
- agg->ssn = seq;
- queue = true;
- }
-
- /* check if seq is within the BA window */
- if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
- "fit into BA window (%d - %d)\n",
- wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
- (agg->ssn + max) & 0xfff);
-#endif /* AR9170_TXAGG_DEBUG */
- goto err_unlock;
- }
-
- spin_lock_irqsave(&agg->queue.lock, f2);
-
- skb_queue_reverse_walk(&agg->queue, iter) {
- qseq = ar9170_get_seq(iter);
-
- if (GET_NEXT_SEQ(qseq) == seq) {
- __skb_queue_after(&agg->queue, iter, skb);
- goto queued;
- }
- }
-
- __skb_queue_head(&agg->queue, skb);
-
-queued:
- spin_unlock_irqrestore(&agg->queue.lock, f2);
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
- wiphy_name(ar->hw->wiphy), skb);
- __ar9170_dump_txqueue(ar, &agg->queue);
-#endif /* AR9170_TXAGG_DEBUG */
-
- if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
- run = true;
-
- if (queue)
- list_add_tail(&agg->list, &ar->tx_ampdu_list);
-
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- return run;
-
-err_unlock:
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- dev_kfree_skb_irq(skb);
- return false;
-}
-
int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
+ unsigned int queue;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
@@ -1918,18 +1500,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (unlikely(ar9170_tx_prepare(ar, skb)))
goto err_free;
+ queue = skb_get_queue_mapping(skb);
info = IEEE80211_SKB_CB(skb);
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- bool run = ar9170_tx_ampdu_queue(ar, skb);
-
- if (run || !atomic_read(&ar->tx_ampdu_pending))
- ar9170_tx_ampdu(ar);
- } else {
- unsigned int queue = skb_get_queue_mapping(skb);
-
- ar9170_tx_prepare_phy(ar, skb);
- skb_queue_tail(&ar->tx_pending[queue], skb);
- }
+ ar9170_tx_prepare_phy(ar, skb);
+ skb_queue_tail(&ar->tx_pending[queue], skb);
ar9170_tx(ar);
return NETDEV_TX_OK;
@@ -2046,21 +1620,17 @@ out:
return err;
}
-static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mclist)
+static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
u64 mchash;
- int i;
+ struct netdev_hw_addr *ha;
/* always get broadcast frames */
mchash = 1ULL << (0xff >> 2);
- for (i = 0; i < mc_count; i++) {
- if (WARN_ON(!mclist))
- break;
- mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
- mclist = mclist->next;
- }
+ netdev_hw_addr_list_for_each(ha, mc_list)
+ mchash |= 1ULL << (ha->addr[5] >> 2);
return mchash;
}
@@ -2330,57 +1900,6 @@ out:
return err;
}
-static int ar9170_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- unsigned int i;
-
- memset(sta_info, 0, sizeof(*sta_info));
-
- if (!sta->ht_cap.ht_supported)
- return 0;
-
- if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
- ar->global_ampdu_density = sta->ht_cap.ampdu_density;
-
- if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
- ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
-
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
- sta_info->agg[i].active = false;
- sta_info->agg[i].ssn = 0;
- sta_info->agg[i].tid = i;
- INIT_LIST_HEAD(&sta_info->agg[i].list);
- skb_queue_head_init(&sta_info->agg[i].queue);
- }
-
- sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
-
- return 0;
-}
-
-static int ar9170_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- unsigned int i;
-
- if (!sta->ht_cap.ht_supported)
- return 0;
-
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
- skb_queue_purge(&sta_info->agg[i].queue);
- }
-
- return 0;
-}
-
static int ar9170_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2423,55 +1942,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
- unsigned long flags;
-
- if (!modparam_ht)
- return -EOPNOTSUPP;
-
switch (action) {
- case IEEE80211_AMPDU_TX_START:
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
- !list_empty(&tid_info->list)) {
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
- "is in a very bad state!\n",
- wiphy_name(hw->wiphy), sta->addr, tid);
-#endif /* AR9170_TXAGG_DEBUG */
- return -EBUSY;
- }
-
- *ssn = tid_info->ssn;
- tid_info->state = AR9170_TID_STATE_PROGRESS;
- tid_info->active = false;
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_STOP:
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- tid_info->state = AR9170_TID_STATE_SHUTDOWN;
- list_del_init(&tid_info->list);
- tid_info->active = false;
- skb_queue_purge(&tid_info->queue);
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_OPERATIONAL:
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
- wiphy_name(hw->wiphy), sta->addr, tid);
-#endif /* AR9170_TXAGG_DEBUG */
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- break;
-
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
/* Handled by firmware */
@@ -2497,8 +1968,6 @@ static const struct ieee80211_ops ar9170_ops = {
.bss_info_changed = ar9170_op_bss_info_changed,
.get_tsf = ar9170_op_get_tsf,
.set_key = ar9170_set_key,
- .sta_add = ar9170_sta_add,
- .sta_remove = ar9170_sta_remove,
.get_stats = ar9170_get_stats,
.ampdu_action = ar9170_ampdu_action,
};
@@ -2516,7 +1985,7 @@ void *ar9170_alloc(size_t priv_size)
* tends to split the streams into separate rx descriptors.
*/
- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
+ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
if (!skb)
goto err_nomem;
@@ -2531,8 +2000,6 @@ void *ar9170_alloc(size_t priv_size)
mutex_init(&ar->mutex);
spin_lock_init(&ar->cmdlock);
spin_lock_init(&ar->tx_stats_lock);
- spin_lock_init(&ar->tx_ampdu_list_lock);
- skb_queue_head_init(&ar->tx_status_ampdu);
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
@@ -2540,7 +2007,6 @@ void *ar9170_alloc(size_t priv_size)
ar9170_rx_reset_rx_mpdu(ar);
INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
- INIT_LIST_HEAD(&ar->tx_ampdu_list);
/* all hw supports 2.4 GHz, so set channel to 1 by default */
ar->channel = &ar9170_2ghz_chantable[0];
@@ -2551,19 +2017,10 @@ void *ar9170_alloc(size_t priv_size)
BIT(NL80211_IFTYPE_ADHOC);
ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
-
- if (modparam_ht) {
- ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
- } else {
- ar9170_band_2GHz.ht_cap.ht_supported = false;
- ar9170_band_5GHz.ht_cap.ht_supported = false;
- }
+ IEEE80211_HW_SIGNAL_DBM;
ar->hw->queues = __AR9170_NUM_TXQ;
ar->hw->extra_tx_headroom = 8;
- ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
ar->hw->max_rates = 1;
ar->hw->max_rate_tries = 3;
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 99a6da464bd3..8663660ea4c6 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -42,6 +42,7 @@
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <linux/device.h>
#include <net/mac80211.h>
#include "ar9170.h"
#include "cmd.h"
@@ -67,18 +68,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x1001) },
/* TP-Link TL-WN821N v2 */
{ USB_DEVICE(0x0cf3, 0x1002) },
+ /* 3Com Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1010) },
+ /* H3C Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1011) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160 A1 */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
+ /* Netgear WNA1000 */
+ { USB_DEVICE(0x0846, 0x9040) },
/* Netgear WNDA3100 */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001) },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
+ /* Proxim ORiNOCO 802.11n USB */
+ { USB_DEVICE(0x1435, 0x0804) },
+ /* WNC Generic 11n USB Dongle */
+ { USB_DEVICE(0x1435, 0x0326) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */
@@ -727,12 +738,16 @@ static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
{
struct device *parent = aru->udev->dev.parent;
+ complete(&aru->firmware_loading_complete);
+
/* unbind anything failed */
if (parent)
- down(&parent->sem);
+ device_lock(parent);
device_release_driver(&aru->udev->dev);
if (parent)
- up(&parent->sem);
+ device_unlock(parent);
+
+ usb_put_dev(aru->udev);
}
static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
@@ -761,6 +776,8 @@ static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
if (err)
goto err_unrx;
+ complete(&aru->firmware_loading_complete);
+ usb_put_dev(aru->udev);
return;
err_unrx:
@@ -858,6 +875,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
init_usb_anchor(&aru->tx_pending);
init_usb_anchor(&aru->tx_submitted);
init_completion(&aru->cmd_wait);
+ init_completion(&aru->firmware_loading_complete);
spin_lock_init(&aru->tx_urb_lock);
aru->tx_pending_urbs = 0;
@@ -877,6 +895,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
if (err)
goto err_freehw;
+ usb_get_dev(aru->udev);
return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw",
&aru->udev->dev, GFP_KERNEL, aru,
ar9170_usb_firmware_step2);
@@ -896,6 +915,9 @@ static void ar9170_usb_disconnect(struct usb_interface *intf)
return;
aru->common.state = AR9170_IDLE;
+
+ wait_for_completion(&aru->firmware_loading_complete);
+
ar9170_unregister(&aru->common);
ar9170_usb_cancel_urbs(aru);
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index a2ce3b169ceb..919b06046eb3 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -71,6 +71,7 @@ struct ar9170_usb {
unsigned int tx_pending_urbs;
struct completion cmd_wait;
+ struct completion firmware_loading_complete;
int readlen;
u8 *readbuf;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 71fc960814f0..d32f2828b098 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -48,6 +48,12 @@ enum ath_device_state {
ATH_HW_INITIALIZED,
};
+enum ath_bus_type {
+ ATH_PCI,
+ ATH_AHB,
+ ATH_USB,
+};
+
struct reg_dmn_pair_mapping {
u16 regDmnEnum;
u16 reg_5ghz_ctl;
@@ -65,17 +71,30 @@ struct ath_regulatory {
struct reg_dmn_pair_mapping *regpair;
};
+/**
+ * struct ath_ops - Register read/write operations
+ *
+ * @read: Register read
+ * @write: Register write
+ * @enable_write_buffer: Enable multiple register writes
+ * @disable_write_buffer: Disable multiple register writes
+ * @write_flush: Flush buffered register writes
+ */
struct ath_ops {
unsigned int (*read)(void *, u32 reg_offset);
- void (*write)(void *, u32 val, u32 reg_offset);
+ void (*write)(void *, u32 val, u32 reg_offset);
+ void (*enable_write_buffer)(void *);
+ void (*disable_write_buffer)(void *);
+ void (*write_flush) (void *);
};
struct ath_common;
struct ath_bus_ops {
- void (*read_cachesize)(struct ath_common *common, int *csz);
- bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
- void (*bt_coex_prep)(struct ath_common *common);
+ enum ath_bus_type ath_bus_type;
+ void (*read_cachesize)(struct ath_common *common, int *csz);
+ bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
+ void (*bt_coex_prep)(struct ath_common *common);
};
struct ath_common {
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 090dc6d268a3..cc09595b781a 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -12,5 +12,6 @@ ath5k-y += attach.o
ath5k-y += base.o
ath5k-y += led.o
ath5k-y += rfkill.o
+ath5k-y += ani.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
new file mode 100644
index 000000000000..f2311ab35504
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath5k.h"
+#include "base.h"
+#include "reg.h"
+#include "debug.h"
+#include "ani.h"
+
+/**
+ * DOC: Basic ANI Operation
+ *
+ * Adaptive Noise Immunity (ANI) controls five noise immunity parameters
+ * depending on the amount of interference in the environment, increasing
+ * or reducing sensitivity as necessary.
+ *
+ * The parameters are:
+ * - "noise immunity"
+ * - "spur immunity"
+ * - "firstep level"
+ * - "OFDM weak signal detection"
+ * - "CCK weak signal detection"
+ *
+ * Basically we look at the amount of ODFM and CCK timing errors we get and then
+ * raise or lower immunity accordingly by setting one or more of these
+ * parameters.
+ * Newer chipsets have PHY error counters in hardware which will generate a MIB
+ * interrupt when they overflow. Older hardware has too enable PHY error frames
+ * by setting a RX flag and then count every single PHY error. When a specified
+ * threshold of errors has been reached we will raise immunity.
+ * Also we regularly check the amount of errors and lower or raise immunity as
+ * necessary.
+ */
+
+
+/*** ANI parameter control ***/
+
+/**
+ * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
+ *
+ * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
+ */
+void
+ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
+{
+ /* TODO:
+ * ANI documents suggest the following five levels to use, but the HAL
+ * and ath9k use only use the last two levels, making this
+ * essentially an on/off option. There *may* be a reason for this (???),
+ * so i stick with the HAL version for now...
+ */
+#if 0
+ const s8 hi[] = { -18, -18, -16, -14, -12 };
+ const s8 lo[] = { -52, -56, -60, -64, -70 };
+ const s8 sz[] = { -34, -41, -48, -55, -62 };
+ const s8 fr[] = { -70, -72, -75, -78, -80 };
+#else
+ const s8 sz[] = { -55, -62 };
+ const s8 lo[] = { -64, -70 };
+ const s8 hi[] = { -14, -12 };
+ const s8 fr[] = { -78, -80 };
+#endif
+ if (level < 0 || level >= ARRAY_SIZE(sz)) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_LO, lo[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_HI, hi[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRPWR, fr[level]);
+
+ ah->ah_sc->ani_state.noise_imm_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
+ *
+ * @level: level between 0 and @max_spur_level (the maximum level is dependent
+ * on the chip revision).
+ */
+void
+ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
+{
+ const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val) ||
+ level > ah->ah_sc->ani_state.max_spur_level) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
+ AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
+
+ ah->ah_sc->ani_state.spur_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_firstep_level() - Set "firstep" level
+ *
+ * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
+ */
+void
+ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
+{
+ const int val[] = { 0, 4, 8 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val)) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRSTEP, val[level]);
+
+ ah->ah_sc->ani_state.firstep_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
+ * detection
+ *
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ const int m1l[] = { 127, 50 };
+ const int m2l[] = { 127, 40 };
+ const int m1[] = { 127, 0x4d };
+ const int m2[] = { 127, 0x40 };
+ const int m2cnt[] = { 31, 16 };
+ const int m2lcnt[] = { 63, 48 };
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
+
+ if (on)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+
+ ah->ah_sc->ani_state.ofdm_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/**
+ * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
+ *
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ const int val[] = { 8, 6 };
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
+ AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
+ ah->ah_sc->ani_state.cck_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/*** ANI algorithm ***/
+
+/**
+ * ath5k_ani_raise_immunity() - Increase noise immunity
+ *
+ * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
+ * the algorithm will tune more parameters then.
+ *
+ * Try to raise noise immunity (=decrease sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
+ bool ofdm_trigger)
+{
+ int rssi = ah->ah_beacon_rssi_avg.avg;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
+ ofdm_trigger ? "ODFM" : "CCK");
+
+ /* first: raise noise immunity */
+ if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
+ return;
+ }
+
+ /* only OFDM: raise spur immunity level */
+ if (ofdm_trigger &&
+ as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
+ return;
+ }
+
+ /* AP mode */
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+
+ /* STA and IBSS mode */
+
+ /* TODO: for IBSS mode it would be better to keep a beacon RSSI average
+ * per each neighbour node and use the minimum of these, to make sure we
+ * don't shut out a remote node by raising immunity too high. */
+
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI high");
+ /* only OFDM: beacon RSSI is high, we can disable ODFM weak
+ * signal detection */
+ if (ofdm_trigger && as->ofdm_weak_sig == true) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ return;
+ }
+ /* as a last resort or CCK: raise firstep level */
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI in mid range, we need OFDM weak signal detect,
+ * but can raise firstep level */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI mid");
+ if (ofdm_trigger && as->ofdm_weak_sig == false)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+ /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
+ * detect and zero firstep level to maximize CCK sensitivity */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI low, 2GHz");
+ if (ofdm_trigger && as->ofdm_weak_sig == true)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ if (as->firstep_level > 0)
+ ath5k_ani_set_firstep_level(ah, 0);
+ return;
+ }
+
+ /* TODO: why not?:
+ if (as->cck_weak_sig == true) {
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+ */
+}
+
+
+/**
+ * ath5k_ani_lower_immunity() - Decrease noise immunity
+ *
+ * Try to lower noise immunity (=increase sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int rssi = ah->ah_beacon_rssi_avg.avg;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
+
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ /* AP mode */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* STA and IBSS mode (see TODO above) */
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ /* beacon signal is high, leave OFDM weak signal
+ * detection off or it may oscillate
+ * TODO: who said it's off??? */
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI is mid-range: turn on ODFM weak signal
+ * detection and next, lower firstep level */
+ if (as->ofdm_weak_sig == false) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah,
+ true);
+ return;
+ }
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* beacon signal is low: only reduce firstep level */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ }
+ }
+
+ /* all modes */
+ if (as->spur_level > 0) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
+ return;
+ }
+
+ /* finally, reduce noise immunity */
+ if (as->noise_imm_level > 0) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
+ return;
+ }
+}
+
+
+/**
+ * ath5k_hw_ani_get_listen_time() - Calculate time spent listening
+ *
+ * Return an approximation of the time spent "listening" in milliseconds (ms)
+ * since the last call of this function by deducting the cycles spent
+ * transmitting and receiving from the total cycle count.
+ * Save profile count values for debugging/statistics and because we might want
+ * to use them later.
+ *
+ * We assume no one else clears these registers!
+ */
+static int
+ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int listen;
+
+ /* freeze */
+ ath5k_hw_reg_write(ah, AR5K_MIBC_FMC, AR5K_MIBC);
+ /* read */
+ as->pfc_cycles = ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE);
+ as->pfc_busy = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR);
+ as->pfc_tx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX);
+ as->pfc_rx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX);
+ /* clear */
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
+ /* un-freeze */
+ ath5k_hw_reg_write(ah, 0, AR5K_MIBC);
+
+ /* TODO: where does 44000 come from? (11g clock rate?) */
+ listen = (as->pfc_cycles - as->pfc_rx - as->pfc_tx) / 44000;
+
+ if (as->pfc_cycles == 0 || listen < 0)
+ return 0;
+ return listen;
+}
+
+
+/**
+ * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ *
+ * Clear the PHY error counters as soon as possible, since this might be called
+ * from a MIB interrupt and we want to make sure we don't get interrupted again.
+ * Add the count of CCK and OFDM errors to our internal state, so it can be used
+ * by the algorithm later.
+ *
+ * Will be called from interrupt and tasklet context.
+ * Returns 0 if both counters are zero.
+ */
+static int
+ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
+ struct ath5k_ani_state *as)
+{
+ unsigned int ofdm_err, cck_err;
+
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return 0;
+
+ ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
+ cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
+
+ /* reset counters first, we might be in a hurry (interrupt) */
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+
+ ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
+ cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
+
+ /* sometimes both can be zero, especially when there is a superfluous
+ * second interrupt. detect that here and return an error. */
+ if (ofdm_err <= 0 && cck_err <= 0)
+ return 0;
+
+ /* avoid negative values should one of the registers overflow */
+ if (ofdm_err > 0) {
+ as->ofdm_errors += ofdm_err;
+ as->sum_ofdm_errors += ofdm_err;
+ }
+ if (cck_err > 0) {
+ as->cck_errors += cck_err;
+ as->sum_cck_errors += cck_err;
+ }
+ return 1;
+}
+
+
+/**
+ * ath5k_ani_period_restart() - Restart ANI period
+ *
+ * Just reset counters, so they are clear for the next "ani period".
+ */
+static void
+ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ /* keep last values for debugging */
+ as->last_ofdm_errors = as->ofdm_errors;
+ as->last_cck_errors = as->cck_errors;
+ as->last_listen = as->listen_time;
+
+ as->ofdm_errors = 0;
+ as->cck_errors = 0;
+ as->listen_time = 0;
+}
+
+
+/**
+ * ath5k_ani_calibration() - The main ANI calibration function
+ *
+ * We count OFDM and CCK errors relative to the time where we did not send or
+ * receive ("listen" time) and raise or lower immunity accordingly.
+ * This is called regularly (every second) from the calibration timer, but also
+ * when an error threshold has been reached.
+ *
+ * In order to synchronize access from different contexts, this should be
+ * called only indirectly by scheduling the ANI tasklet!
+ */
+void
+ath5k_ani_calibration(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+ int listen, ofdm_high, ofdm_low, cck_high, cck_low;
+
+ if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* get listen time since last call and add it to the counter because we
+ * might not have restarted the "ani period" last time */
+ listen = ath5k_hw_ani_get_listen_time(ah, as);
+ as->listen_time += listen;
+
+ ath5k_ani_save_and_clear_phy_errors(ah, as);
+
+ ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
+ cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
+ ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
+ cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "listen %d (now %d)", as->listen_time, listen);
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "check high ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
+
+ if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
+ /* too many PHY errors - we have to raise immunity */
+ bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
+ ath5k_ani_raise_immunity(ah, as, ofdm_flag);
+ ath5k_ani_period_restart(ah, as);
+
+ } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
+ /* If more than 5 (TODO: why 5?) periods have passed and we got
+ * relatively little errors we can try to lower immunity */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "check low ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
+
+ if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
+ ath5k_ani_lower_immunity(ah, as);
+
+ ath5k_ani_period_restart(ah, as);
+ }
+}
+
+
+/*** INTERRUPT HANDLER ***/
+
+/**
+ * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ *
+ * Just read & reset the registers quickly, so they don't generate more
+ * interrupts, save the counters and schedule the tasklet to decide whether
+ * to raise immunity or not.
+ *
+ * We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
+ * should take care of all "normal" MIB interrupts.
+ */
+void
+ath5k_ani_mib_intr(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+
+ /* nothing to do here if HW does not have PHY error counters - they
+ * can't be the reason for the MIB interrupt then */
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return;
+
+ /* not in use but clear anyways */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+
+ if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* if one of the errors triggered, we can get a superfluous second
+ * interrupt, even though we have already reset the register. the
+ * function detects that so we can return early */
+ if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
+ return;
+
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
+ as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+}
+
+
+/**
+ * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ *
+ * This is used by hardware without PHY error counters to report PHY errors
+ * on a frame-by-frame basis, instead of the interrupt.
+ */
+void
+ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+
+ if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
+ as->ofdm_errors++;
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ } else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
+ as->cck_errors++;
+ if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ }
+}
+
+
+/*** INIT ***/
+
+/**
+ * ath5k_enable_phy_err_counters() - Enable PHY error counters
+ *
+ * Enable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+
+/**
+ * ath5k_disable_phy_err_counters() - Disable PHY error counters
+ *
+ * Disable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+
+/**
+ * ath5k_ani_init() - Initialize ANI
+ * @mode: Which mode to use (auto, manual high, manual low, off)
+ *
+ * Initialize ANI according to mode.
+ */
+void
+ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
+{
+ /* ANI is only possible on 5212 and newer */
+ if (ah->ah_version < AR5K_AR5212)
+ return;
+
+ /* clear old state information */
+ memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
+
+ /* older hardware has more spur levels than newer */
+ if (ah->ah_mac_srev < AR5K_SREV_AR2414)
+ ah->ah_sc->ani_state.max_spur_level = 7;
+ else
+ ah->ah_sc->ani_state.max_spur_level = 2;
+
+ /* initial values for our ani parameters */
+ if (mode == ATH5K_ANI_MODE_OFF) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "ANI manual low -> high sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, true);
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "ANI manual high -> low sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ ath5k_ani_set_spur_immunity_level(ah,
+ ah->ah_sc->ani_state.max_spur_level);
+ ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ } else if (mode == ATH5K_ANI_MODE_AUTO) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+
+ /* newer hardware has PHY error counter registers which we can use to
+ * get OFDM and CCK error counts. older hardware has to set rxfilter and
+ * report every single PHY error by calling ath5k_ani_phy_error_report()
+ */
+ if (mode == ATH5K_ANI_MODE_AUTO) {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_enable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
+ AR5K_RX_FILTER_PHYERR);
+ } else {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_disable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
+ ~AR5K_RX_FILTER_PHYERR);
+ }
+
+ ah->ah_sc->ani_state.ani_mode = mode;
+}
+
+
+/*** DEBUG ***/
+
+#ifdef CONFIG_ATH5K_DEBUG
+
+void
+ath5k_ani_print_counters(struct ath5k_hw *ah)
+{
+ /* clears too */
+ printk(KERN_NOTICE "ACK fail\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
+ printk(KERN_NOTICE "RTS fail\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
+ printk(KERN_NOTICE "RTS success\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_RTS_OK));
+ printk(KERN_NOTICE "FCS error\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
+
+ /* no clear */
+ printk(KERN_NOTICE "tx\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
+ printk(KERN_NOTICE "rx\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
+ printk(KERN_NOTICE "busy\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
+ printk(KERN_NOTICE "cycles\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
+
+ printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
+ printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
+ printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
+ printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
new file mode 100644
index 000000000000..55cf26d8522c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef ANI_H
+#define ANI_H
+
+/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
+#define ATH5K_ANI_LISTEN_PERIOD 100
+#define ATH5K_ANI_OFDM_TRIG_HIGH 500
+#define ATH5K_ANI_OFDM_TRIG_LOW 200
+#define ATH5K_ANI_CCK_TRIG_HIGH 200
+#define ATH5K_ANI_CCK_TRIG_LOW 100
+
+/* average beacon RSSI thresholds */
+#define ATH5K_ANI_RSSI_THR_HIGH 40
+#define ATH5K_ANI_RSSI_THR_LOW 7
+
+/* maximum availabe levels */
+#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
+#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
+
+
+/**
+ * enum ath5k_ani_mode - mode for ANI / noise sensitivity
+ *
+ * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
+ * algorithm after it has been on auto mode.
+ * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
+ */
+enum ath5k_ani_mode {
+ ATH5K_ANI_MODE_OFF = 0,
+ ATH5K_ANI_MODE_MANUAL_LOW = 1,
+ ATH5K_ANI_MODE_MANUAL_HIGH = 2,
+ ATH5K_ANI_MODE_AUTO = 3
+};
+
+
+/**
+ * struct ath5k_ani_state - ANI state and associated counters
+ *
+ * @max_spur_level: the maximum spur level is chip dependent
+ */
+struct ath5k_ani_state {
+ enum ath5k_ani_mode ani_mode;
+
+ /* state */
+ int noise_imm_level;
+ int spur_level;
+ int firstep_level;
+ bool ofdm_weak_sig;
+ bool cck_weak_sig;
+
+ int max_spur_level;
+
+ /* used by the algorithm */
+ unsigned int listen_time;
+ unsigned int ofdm_errors;
+ unsigned int cck_errors;
+
+ /* debug/statistics only: numbers from last ANI calibration */
+ unsigned int pfc_tx;
+ unsigned int pfc_rx;
+ unsigned int pfc_busy;
+ unsigned int pfc_cycles;
+ unsigned int last_listen;
+ unsigned int last_ofdm_errors;
+ unsigned int last_cck_errors;
+ unsigned int sum_ofdm_errors;
+ unsigned int sum_cck_errors;
+};
+
+void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
+void ath5k_ani_mib_intr(struct ath5k_hw *ah);
+void ath5k_ani_calibration(struct ath5k_hw *ah);
+void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr);
+
+/* for manual control */
+void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
+void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
+
+void ath5k_ani_print_counters(struct ath5k_hw *ah);
+
+#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ac67f02e26d8..2785946f659a 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -202,7 +202,8 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define AR5K_TUNE_HWTXTRIES 4
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -614,28 +615,6 @@ struct ath5k_rx_status {
#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/
#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/
-#if 0
-/**
- * struct ath5k_beacon_state - Per-station beacon timer state.
- * @bs_interval: in TU's, can also include the above flags
- * @bs_cfp_max_duration: if non-zero hw is setup to coexist with a
- * Point Coordination Function capable AP
- */
-struct ath5k_beacon_state {
- u32 bs_next_beacon;
- u32 bs_next_dtim;
- u32 bs_interval;
- u8 bs_dtim_period;
- u8 bs_cfp_period;
- u16 bs_cfp_max_duration;
- u16 bs_cfp_du_remain;
- u16 bs_tim_offset;
- u16 bs_sleep_duration;
- u16 bs_bmiss_threshold;
- u32 bs_cfp_next;
-};
-#endif
-
/*
* TSF to TU conversion:
@@ -822,9 +801,9 @@ struct ath5k_athchan_2ghz {
* @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
* We currently do increments on interrupt by
* (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
- * @AR5K_INT_MIB: Indicates the Management Information Base counters should be
- * checked. We should do this with ath5k_hw_update_mib_counters() but
- * it seems we should also then do some noise immunity work.
+ * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
+ * one of the PHY error counters reached the maximum value and should be
+ * read and cleared.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
@@ -912,10 +891,11 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
-/* Software interrupts used for calibration */
-enum ath5k_software_interrupt {
- AR5K_SWI_FULL_CALIBRATION = 0x01,
- AR5K_SWI_SHORT_CALIBRATION = 0x02,
+/* mask which calibration is active at the moment */
+enum ath5k_calibration_mask {
+ AR5K_CALIBRATION_FULL = 0x01,
+ AR5K_CALIBRATION_SHORT = 0x02,
+ AR5K_CALIBRATION_ANI = 0x04,
};
/*
@@ -1004,6 +984,8 @@ struct ath5k_capabilities {
struct {
u8 q_tx_num;
} cap_queues;
+
+ bool cap_has_phyerr_counters;
};
/* size of noise floor history (keep it a power of two) */
@@ -1014,6 +996,15 @@ struct ath5k_nfcal_hist
s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
};
+/**
+ * struct avg_val - Helper structure for average calculation
+ * @avg: contains the actual average value
+ * @avg_weight: is used internally during calculation to prevent rounding errors
+ */
+struct ath5k_avg_val {
+ int avg;
+ int avg_weight;
+};
/***************************************\
HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1028,7 +1019,6 @@ struct ath5k_nfcal_hist
/* TODO: Clean up and merge with ath5k_softc */
struct ath5k_hw {
- u32 ah_magic;
struct ath_common common;
struct ath5k_softc *ah_sc;
@@ -1036,7 +1026,6 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
- enum nl80211_iftype ah_op_mode;
struct ieee80211_channel *ah_current_channel;
bool ah_turbo;
bool ah_calibration;
@@ -1049,7 +1038,6 @@ struct ath5k_hw {
u32 ah_phy;
u32 ah_mac_srev;
u16 ah_mac_version;
- u16 ah_mac_revision;
u16 ah_phy_revision;
u16 ah_radio_5ghz_revision;
u16 ah_radio_2ghz_revision;
@@ -1071,8 +1059,6 @@ struct ath5k_hw {
u8 ah_def_ant;
bool ah_software_retry;
- int ah_gpio_npins;
-
struct ath5k_capabilities ah_capabilities;
struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES];
@@ -1123,17 +1109,18 @@ struct ath5k_hw {
struct ath5k_nfcal_hist ah_nfcal_hist;
+ /* average beacon RSSI in our BSS (used by ANI) */
+ struct ath5k_avg_val ah_beacon_rssi_avg;
+
/* noise floor from last periodic calibration */
s32 ah_noise_floor;
/* Calibration timestamp */
- unsigned long ah_cal_tstamp;
-
- /* Calibration interval (secs) */
- u8 ah_cal_intval;
+ unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_ani;
- /* Software interrupt mask */
- u8 ah_swi_mask;
+ /* Calibration mask */
+ u8 ah_cal_mask;
/*
* Function pointers
@@ -1141,9 +1128,9 @@ struct ath5k_hw {
int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
u32 size, unsigned int flags);
int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
- unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
+ unsigned int, unsigned int, int, enum ath5k_pkt_type,
unsigned int, unsigned int, unsigned int, unsigned int,
- unsigned int, unsigned int, unsigned int);
+ unsigned int, unsigned int, unsigned int, unsigned int);
int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
unsigned int, unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int);
@@ -1158,158 +1145,145 @@ struct ath5k_hw {
*/
/* Attach/Detach Functions */
-extern int ath5k_hw_attach(struct ath5k_softc *sc);
-extern void ath5k_hw_detach(struct ath5k_hw *ah);
+int ath5k_hw_attach(struct ath5k_softc *sc);
+void ath5k_hw_detach(struct ath5k_hw *ah);
/* LED functions */
-extern int ath5k_init_leds(struct ath5k_softc *sc);
-extern void ath5k_led_enable(struct ath5k_softc *sc);
-extern void ath5k_led_off(struct ath5k_softc *sc);
-extern void ath5k_unregister_leds(struct ath5k_softc *sc);
+int ath5k_init_leds(struct ath5k_softc *sc);
+void ath5k_led_enable(struct ath5k_softc *sc);
+void ath5k_led_off(struct ath5k_softc *sc);
+void ath5k_unregister_leds(struct ath5k_softc *sc);
/* Reset Functions */
-extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
-extern int ath5k_hw_on_hold(struct ath5k_hw *ah);
-extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel);
+int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
+int ath5k_hw_on_hold(struct ath5k_hw *ah);
+int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+ struct ieee80211_channel *channel, bool change_channel);
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set);
/* Power management functions */
-extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
/* DMA Related Functions */
-extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
-extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
-extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
-extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
-extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
+void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
+int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
+u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
+void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
+int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
u32 phys_addr);
-extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
+int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
/* Interrupt handling */
-extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
-extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
-extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum
-ath5k_int new_mask);
-extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
+bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
+int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
+enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
/* EEPROM access functions */
-extern int ath5k_eeprom_init(struct ath5k_hw *ah);
-extern void ath5k_eeprom_detach(struct ath5k_hw *ah);
-extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
-extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
+int ath5k_eeprom_init(struct ath5k_hw *ah);
+void ath5k_eeprom_detach(struct ath5k_hw *ah);
+int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
/* Protocol Control Unit Functions */
-extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
-extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
+extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
-extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
-extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
-extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
+int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
+void ath5k_hw_set_associd(struct ath5k_hw *ah);
+void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
/* Receive start/stop functions */
-extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
-extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
/* RX Filter functions */
-extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
-extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
-extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
-extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
-extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
+void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
+u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
+void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
/* Beacon control functions */
-extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah);
-extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
-extern void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
-extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
-#if 0
-extern int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_beacon_state *state);
-extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah);
-extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr);
-#endif
+u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
+void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
+void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
+void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
/* ACK bit rate */
void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
-/* ACK/CTS Timeouts */
-extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
-extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
-extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
-extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
/* Clock rate related functions */
unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
-extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
-extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
-extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac);
-extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
+int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
+int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
+ const struct ieee80211_key_conf *key, const u8 *mac);
+int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
/* Queue Control Unit, DFS Control Unit Functions */
-extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
- const struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
- enum ath5k_tx_queue queue_type,
- struct ath5k_txq_info *queue_info);
-extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
-extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah);
-extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
+int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+ struct ath5k_txq_info *queue_info);
+int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+ const struct ath5k_txq_info *queue_info);
+int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
+ enum ath5k_tx_queue queue_type,
+ struct ath5k_txq_info *queue_info);
+u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
+void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
/* Hardware Descriptor Functions */
-extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
/* GPIO Functions */
-extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
-extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
-extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
-extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
-extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
-extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
+void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
+int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
+u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
+void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+ u32 interrupt_level);
/* rfkill Functions */
-extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
-extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
/* Misc functions */
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
-extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
-extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
-extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
+int ath5k_hw_get_capability(struct ath5k_hw *ah,
+ enum ath5k_capability_type cap_type, u32 capability,
+ u32 *result);
+int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
+int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
/* Initial register settings functions */
-extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
+int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
/* Initialize RF */
-extern int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel,
- unsigned int mode);
-extern int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
-extern enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
-extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
+int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode);
+int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
+enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
+int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
/* PHY/RF channel functions */
-extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
-extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
+bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
+int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
/* PHY calibration */
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
-extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
-extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
-extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
-extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
+int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
/* Spur mitigation */
bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
+ struct ieee80211_channel *channel);
void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
+ struct ieee80211_channel *channel);
/* Misc PHY functions */
-extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
-extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
+int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Antenna control */
-extern void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
-extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant);
-extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
+void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
/* TX power setup */
-extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower);
-extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
+int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 ee_mode, u8 txpower);
+int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
/*
* Functions used internaly
@@ -1335,29 +1309,6 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
iowrite32(val, ah->ah_iobase + reg);
}
-#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
-/*
- * Check if a register write has been completed
- */
-static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
- u32 val, bool is_set)
-{
- int i;
- u32 data;
-
- for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
- data = ath5k_hw_reg_read(ah, reg);
- if (is_set && (data & flag))
- break;
- else if ((data & flag) == val)
- break;
- udelay(15);
- }
-
- return (i <= 0) ? -EAGAIN : 0;
-}
-#endif
-
static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
{
u32 retval = 0, bit, i;
@@ -1370,9 +1321,27 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
return retval;
}
-static inline int ath5k_pad_size(int hdrlen)
+#define AVG_SAMPLES 8
+#define AVG_FACTOR 1000
+
+/**
+ * ath5k_moving_average - Exponentially weighted moving average
+ * @avg: average structure
+ * @val: current value
+ *
+ * This implementation make use of a struct ath5k_avg_val to prevent rounding
+ * errors.
+ */
+static inline struct ath5k_avg_val
+ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
{
- return (hdrlen < 24) ? 0 : hdrlen & 3;
+ struct ath5k_avg_val new;
+ new.avg_weight = avg.avg_weight ?
+ (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
+ (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
+ (val * (AVG_FACTOR));
+ new.avg = new.avg_weight / (AVG_FACTOR);
+ return new;
}
#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index dc0786cc2639..e0c244b02f05 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -114,7 +114,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/*
* HW information
*/
- ah->ah_op_mode = NL80211_IFTYPE_STATION;
ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
ah->ah_turbo = false;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
@@ -124,6 +123,9 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ah->ah_cw_min = AR5K_TUNE_CWMIN;
ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
ah->ah_software_retry = false;
+ ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
+ ah->ah_noise_floor = -95; /* until first NF calibration is run */
+ sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
/*
* Find the mac version
@@ -149,7 +151,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Get MAC, PHY and RADIO revisions */
ah->ah_mac_srev = srev;
ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
- ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
@@ -328,7 +329,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
ath5k_hw_set_associd(ah);
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, sc->opmode);
ath5k_hw_rfgain_opt_init(ah);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3abbe7513ab5..5f04cf38a5bc 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -59,8 +59,8 @@
#include "base.h"
#include "reg.h"
#include "debug.h"
+#include "ani.h"
-static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -199,7 +199,7 @@ static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
static int ath5k_pci_suspend(struct device *dev);
static int ath5k_pci_resume(struct device *dev);
-SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
+static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
@@ -231,7 +231,7 @@ static void ath5k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mc_list);
+ struct netdev_hw_addr_list *mc_list);
static void ath5k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -242,6 +242,8 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key);
static int ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
+static int ath5k_get_survey(struct ieee80211_hw *hw,
+ int idx, struct survey_info *survey);
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -267,6 +269,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.configure_filter = ath5k_configure_filter,
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
+ .get_survey = ath5k_get_survey,
.conf_tx = NULL,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
@@ -308,7 +311,7 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf);
static int ath5k_txbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf,
- struct ath5k_txq *txq);
+ struct ath5k_txq *txq, int padsize);
static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
@@ -365,6 +368,7 @@ static void ath5k_beacon_send(struct ath5k_softc *sc);
static void ath5k_beacon_config(struct ath5k_softc *sc);
static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
static void ath5k_tasklet_beacon(unsigned long data);
+static void ath5k_tasklet_ani(unsigned long data);
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
@@ -544,8 +548,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
SET_IEEE80211_DEV(hw, &pdev->dev);
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
@@ -830,6 +833,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
+ tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
ret = ath5k_eeprom_read_mac(ah, mac);
if (ret) {
@@ -1138,8 +1142,6 @@ ath5k_mode_setup(struct ath5k_softc *sc)
struct ath5k_hw *ah = sc->ah;
u32 rfilt;
- ah->ah_op_mode = sc->opmode;
-
/* configure rx filter */
rfilt = sc->filter_flags;
ath5k_hw_set_rx_filter(ah, rfilt);
@@ -1148,8 +1150,9 @@ ath5k_mode_setup(struct ath5k_softc *sc)
ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
/* configure operational mode */
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, sc->opmode);
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
@@ -1272,7 +1275,7 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
- struct ath5k_txq *txq)
+ struct ath5k_txq *txq, int padsize)
{
struct ath5k_hw *ah = sc->ah;
struct ath5k_desc *ds = bf->desc;
@@ -1324,7 +1327,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb),
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
@@ -1636,7 +1639,6 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
sc->txqs[i].link);
}
}
- ieee80211_wake_queues(sc->hw); /* XXX move to callers */
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
if (sc->txqs[i].setup)
@@ -1807,6 +1809,86 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
}
static void
+ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ /* only beacons from our BSSID */
+ if (!ieee80211_is_beacon(mgmt->frame_control) ||
+ memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
+ return;
+
+ ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
+ rssi);
+
+ /* in IBSS mode we should keep RSSI statistics per neighbour */
+ /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
+}
+
+/*
+ * Compute padding position. skb must contains an IEEE 802.11 frame
+ */
+static int ath5k_common_padpos(struct sk_buff *skb)
+{
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 frame_control = hdr->frame_control;
+ int padpos = 24;
+
+ if (ieee80211_has_a4(frame_control)) {
+ padpos += ETH_ALEN;
+ }
+ if (ieee80211_is_data_qos(frame_control)) {
+ padpos += IEEE80211_QOS_CTL_LEN;
+ }
+
+ return padpos;
+}
+
+/*
+ * This function expects a 802.11 frame and returns the number of
+ * bytes added, or -1 if we don't have enought header room.
+ */
+
+static int ath5k_add_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>padpos) {
+
+ if (skb_headroom(skb) < padsize)
+ return -1;
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data+padsize, padpos);
+ return padsize;
+ }
+
+ return 0;
+}
+
+/*
+ * This function expects a 802.11 frame and returns the number of
+ * bytes removed
+ */
+
+static int ath5k_remove_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>=padpos+padsize) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ return padsize;
+ }
+
+ return 0;
+}
+
+static void
ath5k_tasklet_rx(unsigned long data)
{
struct ieee80211_rx_status *rxs;
@@ -1819,8 +1901,6 @@ ath5k_tasklet_rx(unsigned long data)
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
- int hdrlen;
- int padsize;
int rx_flag;
spin_lock(&sc->rxbuflock);
@@ -1845,18 +1925,24 @@ ath5k_tasklet_rx(unsigned long data)
break;
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error in processing rx descriptor\n");
+ sc->stats.rxerr_proc++;
spin_unlock(&sc->rxbuflock);
return;
}
- if (unlikely(rs.rs_more)) {
- ATH5K_WARN(sc, "unsupported jumbo\n");
- goto next;
- }
+ sc->stats.rx_all_count++;
if (unlikely(rs.rs_status)) {
- if (rs.rs_status & AR5K_RXERR_PHY)
+ if (rs.rs_status & AR5K_RXERR_CRC)
+ sc->stats.rxerr_crc++;
+ if (rs.rs_status & AR5K_RXERR_FIFO)
+ sc->stats.rxerr_fifo++;
+ if (rs.rs_status & AR5K_RXERR_PHY) {
+ sc->stats.rxerr_phy++;
+ if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
+ sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
goto next;
+ }
if (rs.rs_status & AR5K_RXERR_DECRYPT) {
/*
* Decrypt error. If the error occurred
@@ -1868,12 +1954,14 @@ ath5k_tasklet_rx(unsigned long data)
*
* XXX do key cache faulting
*/
+ sc->stats.rxerr_decrypt++;
if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
!(rs.rs_status & AR5K_RXERR_CRC))
goto accept;
}
if (rs.rs_status & AR5K_RXERR_MIC) {
rx_flag |= RX_FLAG_MMIC_ERROR;
+ sc->stats.rxerr_mic++;
goto accept;
}
@@ -1883,6 +1971,12 @@ ath5k_tasklet_rx(unsigned long data)
sc->opmode != NL80211_IFTYPE_MONITOR)
goto next;
}
+
+ if (unlikely(rs.rs_more)) {
+ sc->stats.rxerr_jumbo++;
+ goto next;
+
+ }
accept:
next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
@@ -1905,12 +1999,8 @@ accept:
* bytes and we can optimize this a bit. In addition, we must
* not try to remove padding from short control frames that do
* not have payload. */
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = ath5k_pad_size(hdrlen);
- if (padsize) {
- memmove(skb->data + padsize, skb->data, hdrlen);
- skb_pull(skb, padsize);
- }
+ ath5k_remove_padding(skb);
+
rxs = IEEE80211_SKB_RXCB(skb);
/*
@@ -1939,10 +2029,15 @@ accept:
rxs->freq = sc->curchan->center_freq;
rxs->band = sc->curband->band;
- rxs->noise = sc->ah->ah_noise_floor;
- rxs->signal = rxs->noise + rs.rs_rssi;
+ rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
rxs->antenna = rs.rs_antenna;
+
+ if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
+ sc->stats.antenna_rx[rs.rs_antenna]++;
+ else
+ sc->stats.antenna_rx[0]++; /* invalid */
+
rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -1952,6 +2047,8 @@ accept:
ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+ ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
+
/* check beacons in IBSS mode */
if (sc->opmode == NL80211_IFTYPE_ADHOC)
ath5k_check_ibss_tsf(sc, skb, rxs);
@@ -1988,6 +2085,17 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
+ /*
+ * It's possible that the hardware can say the buffer is
+ * completed when it hasn't yet loaded the ds_link from
+ * host memory and moved on. If there are more TX
+ * descriptors in the queue, wait for TXDP to change
+ * before processing this one.
+ */
+ if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
+ !list_is_last(&bf->list, &txq->q))
+ break;
+
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
@@ -1997,6 +2105,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
break;
}
+ sc->stats.tx_all_count++;
skb = bf->skb;
info = IEEE80211_SKB_CB(skb);
bf->skb = NULL;
@@ -2022,14 +2131,31 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
info->status.rates[ts.ts_final_idx].count++;
if (unlikely(ts.ts_status)) {
- sc->ll_stats.dot11ACKFailureCount++;
- if (ts.ts_status & AR5K_TXERR_FILT)
+ sc->stats.ack_fail++;
+ if (ts.ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ sc->stats.txerr_filt++;
+ }
+ if (ts.ts_status & AR5K_TXERR_XRETRY)
+ sc->stats.txerr_retry++;
+ if (ts.ts_status & AR5K_TXERR_FIFO)
+ sc->stats.txerr_fifo++;
} else {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts.ts_rssi;
}
+ /*
+ * Remove MAC header padding before giving the frame
+ * back to mac80211.
+ */
+ ath5k_remove_padding(skb);
+
+ if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
+ sc->stats.antenna_tx[ts.ts_antenna]++;
+ else
+ sc->stats.antenna_tx[0]++; /* invalid */
+
ieee80211_tx_status(sc->hw, skb);
spin_lock(&sc->txbuflock);
@@ -2073,6 +2199,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
int ret = 0;
u8 antenna;
u32 flags;
+ const int padsize = 0;
bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -2120,7 +2247,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
* from tx power (value is in dB units already) */
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
- ieee80211_get_hdrlen_from_skb(skb),
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1, AR5K_TXKEYIX_INVALID,
@@ -2407,9 +2534,6 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
- /* Set PHY calibration interval */
- ah->ah_cal_intval = ath5k_calinterval;
-
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@@ -2421,7 +2545,8 @@ ath5k_init(struct ath5k_softc *sc)
sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI;
+ AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+
ret = ath5k_reset(sc, NULL);
if (ret)
goto done;
@@ -2435,8 +2560,7 @@ ath5k_init(struct ath5k_softc *sc)
for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
ath5k_hw_reset_key(ah, i);
- /* Set ack to be sent at low bit-rates */
- ath5k_hw_set_ack_bitrate_high(ah, false);
+ ath5k_hw_set_ack_bitrate_high(ah, true);
ret = 0;
done:
mmiowb();
@@ -2533,12 +2657,33 @@ ath5k_stop_hw(struct ath5k_softc *sc)
tasklet_kill(&sc->restq);
tasklet_kill(&sc->calib);
tasklet_kill(&sc->beacontq);
+ tasklet_kill(&sc->ani_tasklet);
ath5k_rfkill_hw_stop(sc->ah);
return ret;
}
+static void
+ath5k_intr_calibration_poll(struct ath5k_hw *ah)
+{
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
+ /* run ANI only when full calibration is not active */
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ tasklet_schedule(&ah->ah_sc->calib);
+ }
+ /* we could use SWI to generate enough interrupts to meet our
+ * calibration interval requirements, if necessary:
+ * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
+}
+
static irqreturn_t
ath5k_intr(int irq, void *dev_id)
{
@@ -2562,7 +2707,20 @@ ath5k_intr(int irq, void *dev_id)
*/
tasklet_schedule(&sc->restq);
} else if (unlikely(status & AR5K_INT_RXORN)) {
- tasklet_schedule(&sc->restq);
+ /*
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ * Older chipsets need a reset to come out of this
+ * condition, but we treat it as RX for newer chips.
+ * We don't know exactly which versions need a reset -
+ * this guess is copied from the HAL.
+ */
+ sc->stats.rxorn_intr++;
+ if (ah->ah_mac_srev < AR5K_SREV_AR5212)
+ tasklet_schedule(&sc->restq);
+ else
+ tasklet_schedule(&sc->rxtq);
} else {
if (status & AR5K_INT_SWBA) {
tasklet_hi_schedule(&sc->beacontq);
@@ -2587,15 +2745,10 @@ ath5k_intr(int irq, void *dev_id)
if (status & AR5K_INT_BMISS) {
/* TODO */
}
- if (status & AR5K_INT_SWI) {
- tasklet_schedule(&sc->calib);
- }
if (status & AR5K_INT_MIB) {
- /*
- * These stats are also used for ANI i think
- * so how about updating them more often ?
- */
- ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
+ sc->stats.mib_intr++;
+ ath5k_hw_update_mib_counters(ah);
+ ath5k_ani_mib_intr(ah);
}
if (status & AR5K_INT_GPIO)
tasklet_schedule(&sc->rf_kill.toggleq);
@@ -2606,7 +2759,7 @@ ath5k_intr(int irq, void *dev_id)
if (unlikely(!counter))
ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
- ath5k_hw_calibration_poll(ah);
+ ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
}
@@ -2630,8 +2783,7 @@ ath5k_tasklet_calibrate(unsigned long data)
struct ath5k_hw *ah = sc->ah;
/* Only full calibration for now */
- if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION)
- return;
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
/* Stop queues so that calibration
* doesn't interfere with tx */
@@ -2647,18 +2799,29 @@ ath5k_tasklet_calibrate(unsigned long data)
* to load new gain values.
*/
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ath5k_reset_wake(sc);
+ ath5k_reset(sc, sc->curchan);
}
if (ath5k_hw_phy_calibrate(ah, sc->curchan))
ATH5K_ERR(sc, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
sc->curchan->center_freq));
- ah->ah_swi_mask = 0;
-
/* Wake queues */
ieee80211_wake_queues(sc->hw);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+}
+
+
+static void
+ath5k_tasklet_ani(unsigned long data)
+{
+ struct ath5k_softc *sc = (void *)data;
+ struct ath5k_hw *ah = sc->ah;
+
+ ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
+ ath5k_ani_calibration(ah);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
}
@@ -2680,7 +2843,6 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_softc *sc = hw->priv;
struct ath5k_buf *bf;
unsigned long flags;
- int hdrlen;
int padsize;
ath5k_debug_dump_skb(sc, skb, "TX ", 1);
@@ -2692,17 +2854,11 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
* the hardware expects the header padded to 4 byte boundaries
* if this is not the case we add the padding after the header
*/
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = ath5k_pad_size(hdrlen);
- if (padsize) {
-
- if (skb_headroom(skb) < padsize) {
- ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
- " headroom to pad %d\n", hdrlen, padsize);
- goto drop_packet;
- }
- skb_push(skb, padsize);
- memmove(skb->data, skb->data+padsize, hdrlen);
+ padsize = ath5k_add_padding(skb);
+ if (padsize < 0) {
+ ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
+ " headroom to pad");
+ goto drop_packet;
}
spin_lock_irqsave(&sc->txbuflock, flags);
@@ -2721,7 +2877,7 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
bf->skb = skb;
- if (ath5k_txbuf_setup(sc, bf, txq)) {
+ if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
bf->skb = NULL;
spin_lock_irqsave(&sc->txbuflock, flags);
list_add_tail(&bf->list, &sc->txbuf);
@@ -2768,6 +2924,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
goto err;
}
+ ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
+
/*
* Change channels and update the h/w rate map if we're switching;
* e.g. 11a to 11b/g.
@@ -2836,6 +2994,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode);
+
ath5k_hw_set_lladdr(sc->ah, vif->addr);
ath5k_mode_setup(sc);
@@ -2906,7 +3066,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
* then we must allow the user to set how many tx antennas we
* have available
*/
- ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
unlock:
mutex_unlock(&sc->lock);
@@ -2914,22 +3074,20 @@ unlock:
}
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
u32 mfilt[2], val;
- int i;
u8 pos;
+ struct netdev_hw_addr *ha;
mfilt[0] = 0;
mfilt[1] = 1;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
- val = get_unaligned_le32(mclist->dmi_addr + 0);
+ val = get_unaligned_le32(ha->addr + 0);
pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
- val = get_unaligned_le32(mclist->dmi_addr + 3);
+ val = get_unaligned_le32(ha->addr + 3);
pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
pos &= 0x3f;
mfilt[pos / 32] |= (1 << (pos % 32));
@@ -2937,8 +3095,7 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
* but not sure, needs testing, if we do use this we'd
* neet to inform below to not reset the mcast */
/* ath5k_hw_set_mcast_filterindex(ah,
- * mclist->dmi_addr[5]); */
- mclist = mclist->next;
+ * ha->addr[5]); */
}
return ((u64)(mfilt[1]) << 32) | mfilt[0];
@@ -3124,12 +3281,30 @@ ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
/* Force update */
- ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
+ ath5k_hw_update_mib_counters(sc->ah);
+
+ stats->dot11ACKFailureCount = sc->stats.ack_fail;
+ stats->dot11RTSFailureCount = sc->stats.rts_fail;
+ stats->dot11RTSSuccessCount = sc->stats.rts_ok;
+ stats->dot11FCSErrorCount = sc->stats.fcs_error;
+
+ return 0;
+}
+
+static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
- memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats));
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = sc->ah->ah_noise_floor;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 7e1a88a5abdb..56221bc7c8cd 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -50,6 +50,7 @@
#include "ath5k.h"
#include "debug.h"
+#include "ani.h"
#include "../regd.h"
#include "../ath.h"
@@ -105,6 +106,38 @@ struct ath5k_rfkill {
struct tasklet_struct toggleq;
};
+/* statistics */
+struct ath5k_statistics {
+ /* antenna use */
+ unsigned int antenna_rx[5]; /* frames count per antenna RX */
+ unsigned int antenna_tx[5]; /* frames count per antenna TX */
+
+ /* frame errors */
+ unsigned int rx_all_count; /* all RX frames, including errors */
+ unsigned int tx_all_count; /* all TX frames, including errors */
+ unsigned int rxerr_crc;
+ unsigned int rxerr_phy;
+ unsigned int rxerr_phy_code[32];
+ unsigned int rxerr_fifo;
+ unsigned int rxerr_decrypt;
+ unsigned int rxerr_mic;
+ unsigned int rxerr_proc;
+ unsigned int rxerr_jumbo;
+ unsigned int txerr_retry;
+ unsigned int txerr_fifo;
+ unsigned int txerr_filt;
+
+ /* MIB counters */
+ unsigned int ack_fail;
+ unsigned int rts_fail;
+ unsigned int rts_ok;
+ unsigned int fcs_error;
+ unsigned int beacons;
+
+ unsigned int mib_intr;
+ unsigned int rxorn_intr;
+};
+
#if CHAN_DEBUG
#define ATH_CHAN_MAX (26+26+26+200+200)
#else
@@ -117,7 +150,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
- struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels[ATH_CHAN_MAX];
@@ -191,6 +223,11 @@ struct ath5k_softc {
int power_level; /* Requested tx power in dbm */
bool assoc; /* associate state */
bool enable_beacon; /* true if beacons are on */
+
+ struct ath5k_statistics stats;
+
+ struct ath5k_ani_state ani_state;
+ struct tasklet_struct ani_tasklet; /* ANI calibration */
};
#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 367a6c7d3cc7..74f007126f41 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -102,9 +102,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
}
}
- /* GPIO */
- ah->ah_gpio_npins = AR5K_NUM_GPIO;
-
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
ah->ah_capabilities.cap_queues.q_tx_num =
@@ -112,6 +109,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+ /* newer hardware has PHY error counters */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
+ ah->ah_capabilities.cap_has_phyerr_counters = true;
+ else
+ ah->ah_capabilities.cap_has_phyerr_counters = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 747508c15d34..6fb5c5ffa5b1 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -69,6 +69,7 @@ module_param_named(debug, ath5k_debug, uint, 0);
#include <linux/seq_file.h>
#include "reg.h"
+#include "ani.h"
static struct dentry *ath5k_global_debugfs;
@@ -307,6 +308,7 @@ static const struct {
{ ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
+ { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
@@ -364,6 +366,369 @@ static const struct file_operations fops_debug = {
};
+/* debugfs: antenna */
+
+static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+ unsigned int i;
+ unsigned int v;
+
+ len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
+ sc->ah->ah_ant_mode);
+ len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
+ sc->ah->ah_def_ant);
+ len += snprintf(buf+len, sizeof(buf)-len, "tx antenna\t%d\n",
+ sc->ah->ah_tx_ant);
+
+ len += snprintf(buf+len, sizeof(buf)-len, "\nANTENNA\t\tRX\tTX\n");
+ for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "[antenna %d]\t%d\t%d\n",
+ i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
+ }
+ len += snprintf(buf+len, sizeof(buf)-len, "[invalid]\t%d\t%d\n",
+ sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
+ (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
+ (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
+ (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
+ (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_antenna(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ unsigned int i;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "diversity", 9) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
+ printk(KERN_INFO "ath5k debug: enable diversity\n");
+ } else if (strncmp(buf, "fixed-a", 7) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
+ printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
+ } else if (strncmp(buf, "fixed-b", 7) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
+ printk(KERN_INFO "ath5k debug: fixed antenna B\n");
+ } else if (strncmp(buf, "clear", 5) == 0) {
+ for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
+ sc->stats.antenna_rx[i] = 0;
+ sc->stats.antenna_tx[i] = 0;
+ }
+ printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_antenna = {
+ .read = read_file_antenna,
+ .write = write_file_antenna,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: frameerrors */
+
+static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ char buf[700];
+ unsigned int len = 0;
+ int i;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "RX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n",
+ st->rxerr_crc,
+ st->rx_all_count > 0 ?
+ st->rxerr_crc*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n",
+ st->rxerr_phy,
+ st->rx_all_count > 0 ?
+ st->rxerr_phy*100/st->rx_all_count : 0);
+ for (i = 0; i < 32; i++) {
+ if (st->rxerr_phy_code[i])
+ len += snprintf(buf+len, sizeof(buf)-len,
+ " phy_err[%d]\t%d\n",
+ i, st->rxerr_phy_code[i]);
+ }
+
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ st->rxerr_fifo,
+ st->rx_all_count > 0 ?
+ st->rxerr_fifo*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n",
+ st->rxerr_decrypt,
+ st->rx_all_count > 0 ?
+ st->rxerr_decrypt*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n",
+ st->rxerr_mic,
+ st->rx_all_count > 0 ?
+ st->rxerr_mic*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n",
+ st->rxerr_proc,
+ st->rx_all_count > 0 ?
+ st->rxerr_proc*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n",
+ st->rxerr_jumbo,
+ st->rx_all_count > 0 ?
+ st->rxerr_jumbo*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
+ st->rx_all_count);
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nTX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n",
+ st->txerr_retry,
+ st->tx_all_count > 0 ?
+ st->txerr_retry*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ st->txerr_fifo,
+ st->tx_all_count > 0 ?
+ st->txerr_fifo*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n",
+ st->txerr_filt,
+ st->tx_all_count > 0 ?
+ st->txerr_filt*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
+ st->tx_all_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_frameerrors(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "clear", 5) == 0) {
+ st->rxerr_crc = 0;
+ st->rxerr_phy = 0;
+ st->rxerr_fifo = 0;
+ st->rxerr_decrypt = 0;
+ st->rxerr_mic = 0;
+ st->rxerr_proc = 0;
+ st->rxerr_jumbo = 0;
+ st->rx_all_count = 0;
+ st->txerr_retry = 0;
+ st->txerr_fifo = 0;
+ st->txerr_filt = 0;
+ st->tx_all_count = 0;
+ printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_frameerrors = {
+ .read = read_file_frameerrors,
+ .write = write_file_frameerrors,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: ani */
+
+static ssize_t read_file_ani(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ struct ath5k_ani_state *as = &sc->ani_state;
+
+ char buf[700];
+ unsigned int len = 0;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW has PHY error counters:\t%s\n",
+ sc->ah->ah_capabilities.cap_has_phyerr_counters ?
+ "yes" : "no");
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW max spur immunity level:\t%d\n",
+ as->max_spur_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nANI state\n--------------------------------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "operating mode:\t\t\t");
+ switch (as->ani_mode) {
+ case ATH5K_ANI_MODE_OFF:
+ len += snprintf(buf+len, sizeof(buf)-len, "OFF\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_LOW:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "MANUAL LOW\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_HIGH:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "MANUAL HIGH\n");
+ break;
+ case ATH5K_ANI_MODE_AUTO:
+ len += snprintf(buf+len, sizeof(buf)-len, "AUTO\n");
+ break;
+ default:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "??? (not good)\n");
+ break;
+ }
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "noise immunity level:\t\t%d\n",
+ as->noise_imm_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "spur immunity level:\t\t%d\n",
+ as->spur_level);
+ len += snprintf(buf+len, sizeof(buf)-len, "firstep level:\t\t\t%d\n",
+ as->firstep_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "OFDM weak signal detection:\t%s\n",
+ as->ofdm_weak_sig ? "on" : "off");
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "CCK weak signal detection:\t%s\n",
+ as->cck_weak_sig ? "on" : "off");
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nMIB INTERRUPTS:\t\t%u\n",
+ st->mib_intr);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "beacon RSSI average:\t%d\n",
+ sc->ah->ah_beacon_rssi_avg.avg);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
+ as->pfc_tx,
+ as->pfc_cycles > 0 ?
+ as->pfc_tx*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
+ as->pfc_rx,
+ as->pfc_cycles > 0 ?
+ as->pfc_rx*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
+ as->pfc_busy,
+ as->pfc_cycles > 0 ?
+ as->pfc_busy*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
+ as->pfc_cycles);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "listen time\t\t%d\tlast: %d\n",
+ as->listen_time, as->last_listen);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->ofdm_errors, as->last_ofdm_errors,
+ as->sum_ofdm_errors);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "CCK errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->cck_errors, as->last_cck_errors,
+ as->sum_cck_errors);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
+ ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
+ ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_ani(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "sens-low", 8) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
+ } else if (strncmp(buf, "sens-high", 9) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
+ } else if (strncmp(buf, "ani-off", 7) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
+ } else if (strncmp(buf, "ani-on", 6) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
+ } else if (strncmp(buf, "noise-low", 9) == 0) {
+ ath5k_ani_set_noise_immunity_level(sc->ah, 0);
+ } else if (strncmp(buf, "noise-high", 10) == 0) {
+ ath5k_ani_set_noise_immunity_level(sc->ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ } else if (strncmp(buf, "spur-low", 8) == 0) {
+ ath5k_ani_set_spur_immunity_level(sc->ah, 0);
+ } else if (strncmp(buf, "spur-high", 9) == 0) {
+ ath5k_ani_set_spur_immunity_level(sc->ah,
+ sc->ani_state.max_spur_level);
+ } else if (strncmp(buf, "fir-low", 7) == 0) {
+ ath5k_ani_set_firstep_level(sc->ah, 0);
+ } else if (strncmp(buf, "fir-high", 8) == 0) {
+ ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ } else if (strncmp(buf, "ofdm-off", 8) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
+ } else if (strncmp(buf, "ofdm-on", 7) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
+ } else if (strncmp(buf, "cck-off", 7) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
+ } else if (strncmp(buf, "cck-on", 6) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
+ }
+ return count;
+}
+
+static const struct file_operations fops_ani = {
+ .read = read_file_ani,
+ .write = write_file_ani,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
/* init */
void
@@ -393,6 +758,20 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
sc->debug.debugfs_phydir, sc, &fops_reset);
+
+ sc->debug.debugfs_antenna = debugfs_create_file("antenna",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc, &fops_antenna);
+
+ sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_frameerrors);
+
+ sc->debug.debugfs_ani = debugfs_create_file("ani",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_ani);
}
void
@@ -408,6 +787,9 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
debugfs_remove(sc->debug.debugfs_registers);
debugfs_remove(sc->debug.debugfs_beacon);
debugfs_remove(sc->debug.debugfs_reset);
+ debugfs_remove(sc->debug.debugfs_antenna);
+ debugfs_remove(sc->debug.debugfs_frameerrors);
+ debugfs_remove(sc->debug.debugfs_ani);
debugfs_remove(sc->debug.debugfs_phydir);
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 66f69f04e55e..ddd5b3a99e8d 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -74,6 +74,9 @@ struct ath5k_dbg_info {
struct dentry *debugfs_registers;
struct dentry *debugfs_beacon;
struct dentry *debugfs_reset;
+ struct dentry *debugfs_antenna;
+ struct dentry *debugfs_frameerrors;
+ struct dentry *debugfs_ani;
};
/**
@@ -113,6 +116,7 @@ enum ath5k_debug_level {
ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_TRACE = 0x00001000,
+ ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index dc30a2b70a6b..7d7b646ab65a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -35,7 +35,8 @@
*/
static int
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
+ unsigned int pkt_len, unsigned int hdr_len, int padsize,
+ enum ath5k_pkt_type type,
unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
@@ -71,7 +72,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
/* Verify and set frame length */
/* remove padding we might have added before */
- frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
+ frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
@@ -100,7 +101,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
}
- /*Diferences between 5210-5211*/
+ /*Differences between 5210-5211*/
if (ah->ah_version == AR5K_AR5210) {
switch (type) {
case AR5K_PKT_TYPE_BEACON:
@@ -165,6 +166,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
*/
static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
unsigned int tx_tries0, unsigned int key_index,
unsigned int antenna_mode, unsigned int flags,
@@ -206,7 +208,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
/* Verify and set frame length */
/* remove padding we might have added before */
- frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
+ frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
@@ -229,7 +231,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
- tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
+ tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0,
AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
@@ -643,6 +645,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
+ ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rx_status->rx_status_1 &
@@ -668,12 +671,6 @@ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
ah->ah_version != AR5K_AR5212)
return -ENOTSUPP;
- /* XXX: What is this magic value and where is it used ? */
- if (ah->ah_version == AR5K_AR5212)
- ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
- else if (ah->ah_version == AR5K_AR5211)
- ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
-
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 56158c804e3e..64538fbe4167 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -112,15 +112,32 @@ struct ath5k_hw_rx_error {
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
-/* PHY Error codes */
-#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00
-#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20
-#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40
-#define AR5K_DESC_RX_PHY_ERROR_RATE 0x60
-#define AR5K_DESC_RX_PHY_ERROR_LENGTH 0x80
-#define AR5K_DESC_RX_PHY_ERROR_64QAM 0xa0
-#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0
-#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0
+/**
+ * enum ath5k_phy_error_code - PHY Error codes
+ */
+enum ath5k_phy_error_code {
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
+ AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
+ AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
+ AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
+ AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
+ AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
+ AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
+ AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
+ /* these are specific to the 5212 */
+ AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
+ AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
+ AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
+ AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
+ AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
+ AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
+ AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
+ AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
+ AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
+ AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
+ AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
+ AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
+};
/*
* 5210/5211 hardware 2-word TX control descriptor
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 67665cdc7afe..ed0263672d6d 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -331,7 +331,8 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
ee->ee_x_gain[mode] = (val >> 1) & 0xf;
ee->ee_xpd[mode] = val & 0x1;
- if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
+ mode != AR5K_EEPROM_MODE_11B)
ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
@@ -341,6 +342,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
if (mode == AR5K_EEPROM_MODE_11A)
ee->ee_xr_power[mode] = val & 0x3f;
else {
+ /* b_DB_11[bg] and b_OB_11[bg] */
ee->ee_ob[mode][0] = val & 0x7;
ee->ee_db[mode][0] = (val >> 3) & 0x7;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 473a483bb9c3..c4a6d5f26af4 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -24,9 +24,6 @@
* SERDES infos are present */
#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
-#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
-#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
-#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
@@ -78,9 +75,9 @@
#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
-#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
-#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
-#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
+#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz */
+#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
+#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
@@ -101,7 +98,7 @@
#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
-#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
+#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) /* has 32KHz crystal for sleep mode */
#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
@@ -114,26 +111,27 @@
#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
-#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3)
-#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3)
+#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) /* modes supported by radio 0 (bit 1: G, bit 2: A) */
+#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) /* modes supported by radio 1 (bit 1: G, bit 2: A) */
#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
-#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1)
-#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1)
-#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1)
-#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1)
-#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf)
-#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1)
-#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf)
+#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) /* disable compression */
+#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) /* disable AES */
+#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
+#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
+#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
+#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heayy clipping */
+#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
-#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x8)
-#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x8)
-#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1)
-#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1)
-#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1)
-#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 9) & 0x1)
-#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 10) & 0x1)
+#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x7) /* MIMO chains disabled for TX bitmask */
+#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x7) /* MIMO chains disabled for RX bitmask */
+#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) /* 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) /* Japan UNII1 band (5.15-5.25GHz) on even channels (5180, 5200, 5220, 5240) supported */
+#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) /* Japan UNII2 band (5.25-5.35GHz) supported */
+#define AR5K_EEPROM_JAP_MID_EN (((_v) >> 9) & 0x1) /* Japan band from 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 10) & 0x1) /* Japan UNII2 band (5.15-5.25GHz) on odd channels (5170, 5190, 5210, 5230) supported */
+#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 11) & 0x1) /* Japan A mode enabled (using even channels) */
/* calibration settings */
#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
@@ -389,7 +387,49 @@ struct ath5k_edge_power {
bool flag;
};
-/* EEPROM calibration data */
+/**
+ * struct ath5k_eeprom_info - EEPROM calibration data
+ *
+ * @ee_regdomain: ath/regd.c takes care of COUNTRY_ERD and WORLDWIDE_ROAMING
+ * flags
+ * @ee_ant_gain: Antenna gain in 0.5dB steps signed [5211 only?]
+ * @ee_cck_ofdm_gain_delta: difference in gainF to output the same power for
+ * OFDM and CCK packets
+ * @ee_cck_ofdm_power_delta: power difference between OFDM (6Mbps) and CCK
+ * (11Mbps) rate in G mode. 0.1dB steps
+ * @ee_scaled_cck_delta: for Japan Channel 14: 0.1dB resolution
+ *
+ * @ee_i_cal: Initial I coefficient to correct I/Q mismatch in the receive path
+ * @ee_q_cal: Initial Q coefficient to correct I/Q mismatch in the receive path
+ * @ee_fixed_bias: use ee_ob and ee_db settings or use automatic control
+ * @ee_switch_settling: RX/TX Switch settling time
+ * @ee_atn_tx_rx: Difference in attenuation between TX and RX in 1dB steps
+ * @ee_ant_control: Antenna Control Settings
+ * @ee_ob: Bias current for Output stage of PA
+ * B/G mode: Index [0] is used for AR2112/5112, otherwise [1]
+ * A mode: [0] 5.15-5.25 [1] 5.25-5.50 [2] 5.50-5.70 [3] 5.70-5.85 GHz
+ * @ee_db: Bias current for Output stage of PA. see @ee_ob
+ * @ee_tx_end2xlna_enable: Time difference from when BB finishes sending a frame
+ * to when the external LNA is activated
+ * @ee_tx_end2xpa_disable: Time difference from when BB finishes sending a frame
+ * to when the external PA switch is deactivated
+ * @ee_tx_frm2xpa_enable: Time difference from when MAC sends frame to when
+ * external PA switch is activated
+ * @ee_thr_62: Clear Channel Assessment (CCA) sensitivity
+ * (IEEE802.11a section 17.3.10.5 )
+ * @ee_xlna_gain: Total gain of the LNA (information only)
+ * @ee_xpd: Use external (1) or internal power detector
+ * @ee_x_gain: Gain for external power detector output (differences in EEMAP
+ * versions!)
+ * @ee_i_gain: Initial gain value after reset
+ * @ee_margin_tx_rx: Margin in dB when final attenuation stage should be used
+ *
+ * @ee_false_detect: Backoff in Sensitivity (dB) on channels with spur signals
+ * @ee_noise_floor_thr: Noise floor threshold in 1dB steps
+ * @ee_adc_desired_size: Desired amplitude for ADC, used by AGC; in 0.5 dB steps
+ * @ee_pga_desired_size: Desired output of PGA (for BB gain) in 0.5 dB steps
+ * @ee_pd_gain_overlap: PD ADC curves need to overlap in 0.5dB steps (ee_map>=2)
+ */
struct ath5k_eeprom_info {
/* Header information */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index aefe84f9c04b..5212e275f1c7 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -39,16 +39,16 @@
* ath5k_hw_set_opmode - Set PCU operating mode
*
* @ah: The &struct ath5k_hw
+ * @op_mode: &enum nl80211_iftype operating mode
*
* Initialize PCU for the various operating modes (AP/STA etc)
- *
- * NOTE: ah->ah_op_mode must be set before calling this.
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah)
+int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
@@ -61,7 +61,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
ATH5K_TRACE(ah->ah_sc);
- switch (ah->ah_op_mode) {
+ switch (op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_ADHOC;
@@ -113,39 +113,26 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_update - Update mib counters (mac layer statistics)
+ * ath5k_hw_update - Update MIB counters (mac layer statistics)
*
* @ah: The &struct ath5k_hw
- * @stats: The &struct ieee80211_low_level_stats we use to track
- * statistics on the driver
*
- * Reads MIB counters from PCU and updates sw statistics. Must be
- * called after a MIB interrupt.
+ * Reads MIB counters from PCU and updates sw statistics. Is called after a
+ * MIB interrupt, because one of these counters might have reached their maximum
+ * and triggered the MIB interrupt, to let us read and clear the counter.
+ *
+ * Is called in interrupt context!
*/
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
- struct ieee80211_low_level_stats *stats)
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
+ struct ath5k_statistics *stats = &ah->ah_sc->stats;
/* Read-And-Clear */
- stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
- stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
- stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
- stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
-
- /* XXX: Should we use this to track beacon count ?
- * -we read it anyway to clear the register */
- ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
-
- /* Reset profile count registers on 5212*/
- if (ah->ah_version == AR5K_AR5212) {
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
- }
-
- /* TODO: Handle ANI stats */
+ stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
+ stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
+ stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
+ stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
+ stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
/**
@@ -167,9 +154,9 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
else {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (high)
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
- else
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
}
}
@@ -179,25 +166,12 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
\******************/
/**
- * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
- *
- * @ah: The &struct ath5k_hw
- */
-unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
-
- return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
-}
-
-/**
* ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
@@ -211,24 +185,12 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
- * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
- *
- * @ah: The &struct ath5k_hw
- */
-unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
-}
-
-/**
* ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
@@ -290,7 +252,7 @@ unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+static unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
@@ -308,7 +270,7 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+static unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
@@ -417,7 +379,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
- * TODO: Init ANI here
*/
void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
@@ -451,42 +412,6 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
-/*
- * Set multicast filter by index
- */
-int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
- ATH5K_TRACE(ah->ah_sc);
- if (index >= 64)
- return -EINVAL;
- else if (index >= 32)
- AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
- (1 << (index - 32)));
- else
- AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
- return 0;
-}
-
-/*
- * Clear Multicast filter by index
- */
-int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
- ATH5K_TRACE(ah->ah_sc);
- if (index >= 64)
- return -EINVAL;
- else if (index >= 32)
- AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
- (1 << (index - 32)));
- else
- AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
- return 0;
-}
-
/**
* ath5k_hw_get_rx_filter - Get current rx filter
*
@@ -571,18 +496,7 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
* Beacon control *
\****************/
-/**
- * ath5k_hw_get_tsf32 - Get a 32bit TSF
- *
- * @ah: The &struct ath5k_hw
- *
- * Returns lower 32 bits of current TSF
- */
-u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
-}
+#define ATH5K_MAX_TSF_READ 10
/**
* ath5k_hw_get_tsf64 - Get the full 64bit TSF
@@ -593,10 +507,35 @@ u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
*/
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
- u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ /*
+ * While reading TSF upper and then lower part, the clock is still
+ * counting (or jumping in case of IBSS merge) so we might get
+ * inconsistent values. To avoid this, we read the upper part again
+ * and check it has not been changed. We make the hypothesis that a
+ * maximum of 3 changes can happens in a row (we use 10 as a safe
+ * value).
+ *
+ * Impact on performance is pretty small, since in most cases, only
+ * 3 register reads are needed.
+ */
+
+ tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
+ tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
+ tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
+
+ WARN_ON( i == ATH5K_MAX_TSF_READ );
+
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
}
/**
@@ -651,7 +590,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
/*
* Set the additional timers by mode
*/
- switch (ah->ah_op_mode) {
+ switch (ah->ah_sc->opmode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
@@ -688,8 +627,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
- if (ah->ah_op_mode == NL80211_IFTYPE_AP ||
- ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT)
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
+ ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
@@ -722,203 +661,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
-#if 0
-/*
- * Set beacon timers
- */
-int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
- const struct ath5k_beacon_state *state)
-{
- u32 cfp_period, next_cfp, dtim, interval, next_beacon;
-
- /*
- * TODO: should be changed through *state
- * review struct ath5k_beacon_state struct
- *
- * XXX: These are used for cfp period bellow, are they
- * ok ? Is it O.K. for tsf here to be 0 or should we use
- * get_tsf ?
- */
- u32 dtim_count = 0; /* XXX */
- u32 cfp_count = 0; /* XXX */
- u32 tsf = 0; /* XXX */
-
- ATH5K_TRACE(ah->ah_sc);
- /* Return on an invalid beacon state */
- if (state->bs_interval < 1)
- return -EINVAL;
-
- interval = state->bs_interval;
- dtim = state->bs_dtim_period;
-
- /*
- * PCF support?
- */
- if (state->bs_cfp_period > 0) {
- /*
- * Enable PCF mode and set the CFP
- * (Contention Free Period) and timer registers
- */
- cfp_period = state->bs_cfp_period * state->bs_dtim_period *
- state->bs_interval;
- next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
- state->bs_interval;
-
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_PCF);
- ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
- ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
- AR5K_CFP_DUR);
- ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
- next_cfp)) << 3, AR5K_TIMER2);
- } else {
- /* Disable PCF mode */
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_PCF);
- }
-
- /*
- * Enable the beacon timer register
- */
- ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
-
- /*
- * Start the beacon timers
- */
- ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
- ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
- AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
- AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
- AR5K_BEACON_PERIOD), AR5K_BEACON);
-
- /*
- * Write new beacon miss threshold, if it appears to be valid
- * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
- * and return if its not in range. We can test this by reading value and
- * setting value to a largest value and seeing which values register.
- */
-
- AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
- state->bs_bmiss_threshold);
-
- /*
- * Set sleep control register
- * XXX: Didn't find this in 5210 code but since this register
- * exists also in ar5k's 5210 headers i leave it as common code.
- */
- AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
- (state->bs_sleep_duration - 3) << 3);
-
- /*
- * Set enhanced sleep registers on 5212
- */
- if (ah->ah_version == AR5K_AR5212) {
- if (state->bs_sleep_duration > state->bs_interval &&
- roundup(state->bs_sleep_duration, interval) ==
- state->bs_sleep_duration)
- interval = state->bs_sleep_duration;
-
- if (state->bs_sleep_duration > dtim && (dtim == 0 ||
- roundup(state->bs_sleep_duration, dtim) ==
- state->bs_sleep_duration))
- dtim = state->bs_sleep_duration;
-
- if (interval > dtim)
- return -EINVAL;
-
- next_beacon = interval == dtim ? state->bs_next_dtim :
- state->bs_next_beacon;
-
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
- AR5K_SLEEP0_NEXT_DTIM) |
- AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
- AR5K_SLEEP0_ENH_SLEEP_EN |
- AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
-
- ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
- AR5K_SLEEP1_NEXT_TIM) |
- AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
-
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
- AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
- }
-
- return 0;
-}
-
-/*
- * Reset beacon timers
- */
-void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- /*
- * Disable beacon timer
- */
- ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
-
- /*
- * Disable some beacon register values
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
- ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
-}
-
-/*
- * Wait for beacon queue to finish
- */
-int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
-{
- unsigned int i;
- int ret;
-
- ATH5K_TRACE(ah->ah_sc);
-
- /* 5210 doesn't have QCU*/
- if (ah->ah_version == AR5K_AR5210) {
- /*
- * Wait for beaconn queue to finish by checking
- * Control Register and Beacon Status Register.
- */
- for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
- if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
- ||
- !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
- break;
- udelay(10);
- }
-
- /* Timeout... */
- if (i <= 0) {
- /*
- * Re-schedule the beacon queue
- */
- ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
- ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
- AR5K_BCR);
-
- return -EIO;
- }
- ret = 0;
- } else {
- /*5211/5212*/
- ret = ath5k_hw_register_timeout(ah,
- AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
- AR5K_QCU_STS_FRMPENDCNT, 0, false);
-
- if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
- return -EIO;
- }
-
- return ret;
-}
-#endif
-
/*********************\
* Key table functions *
@@ -971,19 +713,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
return 0;
}
-/*
- * Check if a table entry is valid
- */
-int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
-{
- ATH5K_TRACE(ah->ah_sc);
- AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
-
- /* Check the validation flag at the end of the entry */
- return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
- AR5K_KEYTABLE_VALID;
-}
-
static
int ath5k_keycache_type(const struct ieee80211_key_conf *key)
{
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 68e2bccd90d3..3ce9afba1d88 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -20,8 +20,6 @@
*
*/
-#define _ATH5K_PHY
-
#include <linux/delay.h>
#include <linux/slab.h>
@@ -982,7 +980,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
- } else if ((c - (c % 5)) != 2 || c > 5435) {
+ } else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
data2 = ath5k_hw_bitswap(3, 2);
@@ -995,7 +993,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
} else
return -EINVAL;
} else {
- data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@@ -1023,7 +1021,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
data0 = ath5k_hw_bitswap((c - 2272), 8);
data2 = 0;
/* ? 5GHz ? */
- } else if ((c - (c % 5)) != 2 || c > 5435) {
+ } else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c < 5120)
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
else if (!(c % 10))
@@ -1034,7 +1032,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return -EINVAL;
data2 = ath5k_hw_bitswap(1, 2);
} else {
- data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@@ -1105,28 +1103,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
PHY calibration
\*****************/
-void
-ath5k_hw_calibration_poll(struct ath5k_hw *ah)
-{
- /* Calibration interval in jiffies */
- unsigned long cal_intval;
-
- cal_intval = msecs_to_jiffies(ah->ah_cal_intval * 1000);
-
- /* Initialize timestamp if needed */
- if (!ah->ah_cal_tstamp)
- ah->ah_cal_tstamp = jiffies;
-
- /* For now we always do full calibration
- * Mark software interrupt mask and fire software
- * interrupt (bit gets auto-cleared) */
- if (time_is_before_eq_jiffies(ah->ah_cal_tstamp + cal_intval)) {
- ah->ah_cal_tstamp = jiffies;
- ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
- AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
- }
-}
-
static int sign_extend(int val, const int nbits)
{
int order = BIT(nbits-1);
@@ -1191,7 +1167,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
* The median of the values in the history is then loaded into the
* hardware for its own use for RSSI and CCA measurements.
*/
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1400,7 +1376,11 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
}
i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
- q_coffd = q_pwr >> 7;
+
+ if (ah->ah_version == AR5K_AR5211)
+ q_coffd = q_pwr >> 6;
+ else
+ q_coffd = q_pwr >> 7;
/* protect against divide by 0 and loss of sign bits */
if (i_coffd == 0 || q_coffd < 2)
@@ -1409,7 +1389,10 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
- q_coff = (i_pwr / q_coffd) - 128;
+ if (ah->ah_version == AR5K_AR5211)
+ q_coff = (i_pwr / q_coffd) - 64;
+ else
+ q_coff = (i_pwr / q_coffd) - 128;
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
@@ -1769,7 +1752,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
* Antenna control *
\*****************/
-void /*TODO:Boundary check*/
+static void /*TODO:Boundary check*/
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
ATH5K_TRACE(ah->ah_sc);
@@ -1778,16 +1761,6 @@ ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
-unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
-
- if (ah->ah_version != AR5K_AR5210)
- return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA) & 0x7;
-
- return false; /*XXX: What do we return for 5210 ?*/
-}
-
/*
* Enable/disable fast rx antenna diversity
*/
@@ -1931,6 +1904,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
ah->ah_tx_ant = tx_ant;
ah->ah_ant_mode = ant_mode;
+ ah->ah_def_ant = def_ant;
sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
@@ -2441,19 +2415,6 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
pcdac_tmp = pcdac_high_pwr;
edge_flag = 0x40;
-#if 0
- /* If both min and max power limits are in lower
- * power curve's range, only use the low power curve.
- * TODO: min/max levels are related to target
- * power values requested from driver/user
- * XXX: Is this really needed ? */
- if (min_pwr < table_max[1] &&
- max_pwr < table_max[1]) {
- edge_flag = 0;
- pcdac_tmp = pcdac_low_pwr;
- max_pwr_idx = (table_max[1] - table_min[1])/2;
- }
-#endif
} else {
pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
pcdac_high_pwr = ah->ah_txpower.tmpL[0];
@@ -2600,7 +2561,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
/* Fill pdadc_out table */
- while (pdadc_0 < max_idx)
+ while (pdadc_0 < max_idx && pdadc_i < 128)
pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
/* Need to extrapolate above this pdgain? */
@@ -3144,5 +3105,3 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
return ath5k_hw_txpower(ah, channel, ee_mode, txpower);
}
-
-#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 9122a8556f45..f5831da33f7b 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -517,23 +517,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
}
/*
- * Get slot time from DCU
- */
-unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
-{
- unsigned int slot_time_clock;
-
- ATH5K_TRACE(ah->ah_sc);
-
- if (ah->ah_version == AR5K_AR5210)
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
- else
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
-
- return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
-}
-
-/*
* Set slot time on DCU
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 1464f89b249c..55b4ac6d236f 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -212,10 +212,10 @@
* MIB control register
*/
#define AR5K_MIBC 0x0040 /* Register Address */
-#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */
+#define AR5K_MIBC_COW 0x00000001 /* Counter Overflow Warning */
#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
-#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */
-#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */
+#define AR5K_MIBC_CMC 0x00000004 /* Clear MIB Counters */
+#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe, increment all */
/*
* Timeout prescale register
@@ -1139,8 +1139,8 @@
#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
-#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
-#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */
+#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Rate to use for ACK/CTS. 0: highest mandatory rate <= RX rate; 1: 1Mbps in B mode */
+#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* 802.11b base rate. 0: 1, 2, 5.5 and 11Mbps; 1: 1 and 2Mbps. [5211+] */
#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
@@ -1516,7 +1516,14 @@
AR5K_NAV_5210 : AR5K_NAV_5211)
/*
- * RTS success register
+ * MIB counters:
+ *
+ * max value is 0xc000, if this is reached we get a MIB interrupt.
+ * they can be controlled via AR5K_MIBC and are cleared on read.
+ */
+
+/*
+ * RTS success (MIB counter)
*/
#define AR5K_RTS_OK_5210 0x8090
#define AR5K_RTS_OK_5211 0x8088
@@ -1524,7 +1531,7 @@
AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211)
/*
- * RTS failure register
+ * RTS failure (MIB counter)
*/
#define AR5K_RTS_FAIL_5210 0x8094
#define AR5K_RTS_FAIL_5211 0x808c
@@ -1532,7 +1539,7 @@
AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211)
/*
- * ACK failure register
+ * ACK failure (MIB counter)
*/
#define AR5K_ACK_FAIL_5210 0x8098
#define AR5K_ACK_FAIL_5211 0x8090
@@ -1540,7 +1547,7 @@
AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211)
/*
- * FCS failure register
+ * FCS failure (MIB counter)
*/
#define AR5K_FCS_FAIL_5210 0x809c
#define AR5K_FCS_FAIL_5211 0x8094
@@ -1667,11 +1674,17 @@
/*
* Profile count registers
+ *
+ * These registers can be cleared and freezed with ATH5K_MIBC, but they do not
+ * generate a MIB interrupt.
+ * Instead of overflowing, they shift by one bit to the right. All registers
+ * shift together, i.e. when one reaches the max, all shift at the same time by
+ * one bit to the right. This way we should always get consistent values.
*/
#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
-#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */
-#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
+#define AR5K_PROFCNT_RXCLR 0x80f4 /* Busy count */
+#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle counter */
/*
* Quiet period control registers
@@ -1758,7 +1771,7 @@
#define AR5K_CCK_FIL_CNT 0x8128
/*
- * PHY Error Counters (?)
+ * PHY Error Counters (same masks as AR5K_PHY_ERR_FIL)
*/
#define AR5K_PHYERR_CNT1 0x812c
#define AR5K_PHYERR_CNT1_MASK 0x8130
@@ -1766,6 +1779,9 @@
#define AR5K_PHYERR_CNT2 0x8134
#define AR5K_PHYERR_CNT2_MASK 0x8138
+/* if the PHY Error Counters reach this maximum, we get MIB interrupts */
+#define ATH5K_PHYERR_CNT_MAX 0x00c00000
+
/*
* TSF Threshold register (?)
*/
@@ -1974,7 +1990,7 @@
#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
#define AR5K_PHY_SETTLING_AGC_S 0
-#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */
+#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settling time */
#define AR5K_PHY_SETTLING_SWITCH_S 7
/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index cbf28e379843..44bbbf2a6edd 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,8 +19,6 @@
*
*/
-#define _ATH5K_RESET
-
/*****************************\
Reset functions and helpers
\*****************************/
@@ -34,6 +32,27 @@
#include "base.h"
#include "debug.h"
+/*
+ * Check if a register write has been completed
+ */
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set)
+{
+ int i;
+ u32 data;
+
+ for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
+ data = ath5k_hw_reg_read(ah, reg);
+ if (is_set && (data & flag))
+ break;
+ else if ((data & flag) == val)
+ break;
+ udelay(15);
+ }
+
+ return (i <= 0) ? -EAGAIN : 0;
+}
+
/**
* ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
*
@@ -221,8 +240,8 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
/*
* Sleep control
*/
-int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
- bool set_chip, u16 sleep_duration)
+static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+ bool set_chip, u16 sleep_duration)
{
unsigned int i;
u32 staid, data;
@@ -1017,11 +1036,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ret)
return ret;
- /*
- * Initialize operating mode
- */
- ah->ah_op_mode = op_mode;
-
/* PHY access enable */
if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
@@ -1192,7 +1206,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_set_associd(ah);
/* Set PCU config */
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, op_mode);
/* Clear any pending interrupts
* PISR/SISR Not available on 5210 */
@@ -1378,7 +1392,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* external 32KHz crystal when sleeping if one
* exists */
if (ah->ah_version == AR5K_AR5212 &&
- ah->ah_op_mode != NL80211_IFTYPE_AP)
+ op_mode != NL80211_IFTYPE_AP)
ath5k_hw_set_sleep_clock(ah, true);
/*
@@ -1388,5 +1402,3 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_reset_tsf(ah);
return 0;
}
-
-#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 5774cea23a3b..35f23bdc442f 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -32,3 +32,24 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_HTC
+ tristate "Atheros HTC based wireless cards support"
+ depends on USB && MAC80211
+ select ATH9K_HW
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select ATH9K_COMMON
+ ---help---
+ Support for Atheros HTC based cards.
+ Chipsets supported: AR9271
+
+ For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
+
+ The built module will be ath9k_htc.
+
+config ATH9K_HTC_DEBUGFS
+ bool "Atheros ath9k_htc debugging"
+ depends on ATH9K_HTC && DEBUG_FS
+ ---help---
+ Say Y, if you need access to ath9k_htc's statistics.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6b50d5eb9ec3..dd112be218ab 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -13,18 +13,38 @@ ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
obj-$(CONFIG_ATH9K) += ath9k.o
-ath9k_hw-y:= hw.o \
+ath9k_hw-y:= \
+ ar9002_hw.o \
+ ar9003_hw.o \
+ hw.o \
+ ar9003_phy.o \
+ ar9002_phy.o \
+ ar5008_phy.o \
+ ar9002_calib.o \
+ ar9003_calib.o \
+ calib.o \
eeprom.o \
eeprom_def.o \
eeprom_4k.o \
eeprom_9287.o \
- calib.o \
ani.o \
- phy.o \
btcoex.o \
mac.o \
+ ar9002_mac.o \
+ ar9003_mac.o \
+ ar9003_eeprom.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o
+
+ath9k_htc-y += htc_hst.o \
+ hif_usb.o \
+ wmi.o \
+ htc_drv_txrx.o \
+ htc_drv_main.o \
+ htc_drv_beacon.o \
+ htc_drv_init.o
+
+obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index ca4994f13151..85fdd26039c8 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -47,6 +47,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
}
static struct ath_bus_ops ath_ahb_bus_ops = {
+ .ath_bus_type = ATH_AHB,
.read_cachesize = ath_ahb_read_cachesize,
.eeprom_read = ath_ahb_eeprom_read,
};
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2a0cd64c2bfb..ba8b20f01594 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "hw-ops.h"
static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
struct ath9k_channel *chan)
@@ -37,190 +38,6 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
return 0;
}
-static bool ath9k_hw_ani_control(struct ath_hw *ah,
- enum ath9k_ani_cmd cmd, int param)
-{
- struct ar5416AniState *aniState = ah->curani;
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (cmd & ah->ani_function) {
- case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
- u32 level = param;
-
- if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
- return false;
- }
-
- REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
- AR_PHY_DESIRED_SZ_TOT_DES,
- ah->totalSizeDesired[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_LOW,
- ah->coarse_low[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_HIGH,
- ah->coarse_high[level]);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRPWR,
- ah->firpwr[level]);
-
- if (level > aniState->noiseImmunityLevel)
- ah->stats.ast_ani_niup++;
- else if (level < aniState->noiseImmunityLevel)
- ah->stats.ast_ani_nidown++;
- aniState->noiseImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- const int m1ThreshLow[] = { 127, 50 };
- const int m2ThreshLow[] = { 127, 40 };
- const int m1Thresh[] = { 127, 0x4d };
- const int m2Thresh[] = { 127, 0x40 };
- const int m2CountThr[] = { 31, 16 };
- const int m2CountThrLow[] = { 63, 48 };
- u32 on = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
- m1ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
- m2ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M1_THRESH,
- m1Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2_THRESH,
- m2Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2COUNT_THR,
- m2CountThr[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
- m2CountThrLow[on]);
-
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
- m1ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
- m2ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH,
- m1Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH,
- m2Thresh[on]);
-
- if (on)
- REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- else
- REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
-
- if (!on != aniState->ofdmWeakSigDetectOff) {
- if (on)
- ah->stats.ast_ani_ofdmon++;
- else
- ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
- }
- break;
- }
- case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- const int weakSigThrCck[] = { 8, 6 };
- u32 high = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
- weakSigThrCck[high]);
- if (high != aniState->cckWeakSigThreshold) {
- if (high)
- ah->stats.ast_ani_cckhigh++;
- else
- ah->stats.ast_ani_ccklow++;
- aniState->cckWeakSigThreshold = high;
- }
- break;
- }
- case ATH9K_ANI_FIRSTEP_LEVEL:{
- const int firstep[] = { 0, 4, 8 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(firstep)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(firstep));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRSTEP,
- firstep[level]);
- if (level > aniState->firstepLevel)
- ah->stats.ast_ani_stepup++;
- else if (level < aniState->firstepLevel)
- ah->stats.ast_ani_stepdown++;
- aniState->firstepLevel = level;
- break;
- }
- case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- const int cycpwrThr1[] =
- { 2, 4, 6, 8, 10, 12, 14, 16 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(cycpwrThr1));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_TIMING5,
- AR_PHY_TIMING5_CYCPWR_THR1,
- cycpwrThr1[level]);
- if (level > aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurup++;
- else if (level < aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurdown++;
- aniState->spurImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_PRESENT:
- break;
- default:
- ath_print(common, ATH_DBG_ANI,
- "invalid cmd %u\n", cmd);
- return false;
- }
-
- ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
- ath_print(common, ATH_DBG_ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
- "ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_print(common, ATH_DBG_ANI,
- "cckWeakSigThreshold=%d, "
- "firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
- aniState->firstepLevel,
- aniState->listenTime);
- ath_print(common, ATH_DBG_ANI,
- "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
- aniState->cycleCount,
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
-
- return true;
-}
-
static void ath9k_hw_update_mibstats(struct ath_hw *ah,
struct ath9k_mib_stats *stats)
{
@@ -262,11 +79,17 @@ static void ath9k_ani_restart(struct ath_hw *ah)
"Writing ofdmbase=%u cckbase=%u\n",
aniState->ofdmPhyErrBase,
aniState->cckPhyErrBase);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
aniState->ofdmPhyErrCount = 0;
@@ -540,8 +363,14 @@ void ath9k_ani_reset(struct ath_hw *ah)
ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
~ATH9K_RX_FILTER_PHYERR);
ath9k_ani_restart(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
void ath9k_hw_ani_monitor(struct ath_hw *ah,
@@ -639,6 +468,8 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_FILT_OFDM, 0);
REG_WRITE(ah, AR_FILT_CCK, 0);
REG_WRITE(ah, AR_MIBC,
@@ -646,6 +477,9 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
& 0x0f);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
/* Freeze the MIB counters, get the stats and then clear them */
@@ -809,20 +643,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
ah->ani[0].cckPhyErrBase);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ath9k_enable_mib_counters(ah);
ah->aniperiod = ATH9K_ANI_PERIOD;
if (ah->config.enable_ani)
ah->proc_phyerr |= HAL_PROCESS_ANI;
}
-
-void ath9k_hw_ani_disable(struct ath_hw *ah)
-{
- ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
-
- ath9k_hw_disable_mib_counters(ah);
- REG_WRITE(ah, AR_PHY_ERR_1, 0);
- REG_WRITE(ah, AR_PHY_ERR_2, 0);
-}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 4e1ab94a5153..3356762ea384 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -118,6 +118,5 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
void ath9k_hw_procmibevent(struct ath_hw *ah);
void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
-void ath9k_hw_ani_disable(struct ath_hw *ah);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
new file mode 100644
index 000000000000..025c31ac6146
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_AR5008_H
+#define INITVALS_AR5008_H
+
+static const u32 ar5416Modes[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
+ { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
+ { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
+ { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
+ { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
+ { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
+ { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000000 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0xffffffff },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008264, 0x88000010 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00070000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a002e },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5d50e188 },
+ { 0x00009958, 0x00081fff },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x001fff00 },
+ { 0x000099ac, 0x00000000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x000000aa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x00000bb5 },
+ { 0x0000a22c, 0x00000011 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000000 },
+ { 0x0000a26c, 0x0e79e5c6 },
+ { 0x0000b26c, 0x0e79e5c6 },
+ { 0x0000c26c, 0x0e79e5c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x051701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa1f },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x08000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x201400df, 0x201400df },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007081, 0x00007081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x0000000c },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000030 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000060 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000058 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Modes_9100[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
+ { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
+ { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
+ { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
+#ifdef TB243
+ { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
+#else
+ { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+#endif
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+#endif /* INITVALS_AR5008_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
new file mode 100644
index 000000000000..b2c17c98bb38
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -0,0 +1,1374 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "../regd.h"
+#include "ar9002_phy.h"
+
+/* All code below is for non single-chip solutions */
+
+/**
+ * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
+ * @rfbuf:
+ * @reg32:
+ * @numBits:
+ * @firstBit:
+ * @column:
+ *
+ * Performs analog "swizzling" of parameters into their location.
+ * Used on external AR2133/AR5133 radios.
+ */
+static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
+ u32 numBits, u32 firstBit,
+ u32 column)
+{
+ u32 tmp32, mask, arrayEntry, lastBit;
+ int32_t bitPosition, bitsLeft;
+
+ tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
+ arrayEntry = (firstBit - 1) / 8;
+ bitPosition = (firstBit - 1) % 8;
+ bitsLeft = numBits;
+ while (bitsLeft > 0) {
+ lastBit = (bitPosition + bitsLeft > 8) ?
+ 8 : bitPosition + bitsLeft;
+ mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
+ (column * 8);
+ rfBuf[arrayEntry] &= ~mask;
+ rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
+ (column * 8)) & mask;
+ bitsLeft -= 8 - bitPosition;
+ tmp32 = tmp32 >> (8 - bitPosition);
+ bitPosition = 0;
+ arrayEntry++;
+ }
+}
+
+/*
+ * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
+ * rf_pwd_icsyndiv.
+ *
+ * Theoretical Rules:
+ * if 2 GHz band
+ * if forceBiasAuto
+ * if synth_freq < 2412
+ * bias = 0
+ * else if 2412 <= synth_freq <= 2422
+ * bias = 1
+ * else // synth_freq > 2422
+ * bias = 2
+ * else if forceBias > 0
+ * bias = forceBias & 7
+ * else
+ * no change, use value from ini file
+ * else
+ * no change, invalid band
+ *
+ * 1st Mod:
+ * 2422 also uses value of 2
+ * <approved>
+ *
+ * 2nd Mod:
+ * Less than 2412 uses value of 0, 2412 and above uses value of 2
+ */
+static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 tmp_reg;
+ int reg_writes = 0;
+ u32 new_bias = 0;
+
+ if (!AR_SREV_5416(ah) || synth_freq >= 3000)
+ return;
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ if (synth_freq < 2412)
+ new_bias = 0;
+ else if (synth_freq < 2422)
+ new_bias = 1;
+ else
+ new_bias = 2;
+
+ /* pre-reverse this field */
+ tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ new_bias, synth_freq);
+
+ /* swizzle rf_pwd_icsyndiv */
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
+
+ /* write Bank 6 with new params */
+ REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
+}
+
+/**
+ * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
+ * @ah: atheros hardware stucture
+ * @chan:
+ *
+ * For the external AR2133/AR5133 radios, takes the MHz channel value and set
+ * the channel value. Assumes writes enabled to analog bus and bank6 register
+ * cache in ah->analogBank6Data.
+ */
+static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 channelSel = 0;
+ u32 bModeSynth = 0;
+ u32 aModeRefSel = 0;
+ u32 reg32 = 0;
+ u16 freq;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) {
+ u32 txctl;
+
+ if (((freq - 2192) % 5) == 0) {
+ channelSel = ((freq - 672) * 2 - 3040) / 10;
+ bModeSynth = 0;
+ } else if (((freq - 2224) % 5) == 0) {
+ channelSel = ((freq - 704) * 2 - 3040) / 10;
+ bModeSynth = 1;
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ channelSel = (channelSel << 2) & 0xff;
+ channelSel = ath9k_hw_reverse_bits(channelSel, 8);
+
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+
+ } else if ((freq % 20) == 0 && freq >= 5120) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 10) == 0) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
+ aModeRefSel = ath9k_hw_reverse_bits(2, 2);
+ else
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 5) == 0) {
+ channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ ar5008_hw_force_bias(ah, freq);
+
+ reg32 =
+ (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
+ (1 << 5) | 0x1;
+
+ REG_WRITE(ah, AR_PHY(0x37), reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int bin, cur_bin;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, new;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+ if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur)
+ return;
+
+ bin = bb_spur * 32;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+ new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+ new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+ spur_delta_phase = ((bb_spur * 524288) / 100) &
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+ spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+ new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+}
+
+/**
+ * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
+ * @ah: atheros hardware structure
+ *
+ * Only required for older devices with external AR2133/AR5133 radios.
+ */
+static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+#define ATH_ALLOC_BANK(bank, size) do { \
+ bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
+ if (!bank) { \
+ ath_print(common, ATH_DBG_FATAL, \
+ "Cannot allocate RF banks\n"); \
+ return -ENOMEM; \
+ } \
+ } while (0);
+
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
+ ATH_ALLOC_BANK(ah->addac5416_21,
+ ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
+ ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
+
+ return 0;
+#undef ATH_ALLOC_BANK
+}
+
+
+/**
+ * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
+ * @ah: atheros hardware struture
+ * For the external AR2133/AR5133 radios banks.
+ */
+static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+#define ATH_FREE_BANK(bank) do { \
+ kfree(bank); \
+ bank = NULL; \
+ } while (0);
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ ATH_FREE_BANK(ah->analogBank0Data);
+ ATH_FREE_BANK(ah->analogBank1Data);
+ ATH_FREE_BANK(ah->analogBank2Data);
+ ATH_FREE_BANK(ah->analogBank3Data);
+ ATH_FREE_BANK(ah->analogBank6Data);
+ ATH_FREE_BANK(ah->analogBank6TPCData);
+ ATH_FREE_BANK(ah->analogBank7Data);
+ ATH_FREE_BANK(ah->addac5416_21);
+ ATH_FREE_BANK(ah->bank6Temp);
+
+#undef ATH_FREE_BANK
+}
+
+/* *
+ * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
+ * @ah: atheros hardware structure
+ * @chan:
+ * @modesIndex:
+ *
+ * Used for the external AR2133/AR5133 radios.
+ *
+ * Reads the EEPROM header info from the device structure and programs
+ * all rf registers. This routine requires access to the analog
+ * rf device. This is not required for single-chip devices.
+ */
+static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ u32 eepMinorRev;
+ u32 ob5GHz = 0, db5GHz = 0;
+ u32 ob2GHz = 0, db2GHz = 0;
+ int regWrites = 0;
+
+ /*
+ * Software does not need to program bank data
+ * for single chip devices, that is AR9280 or anything
+ * after that.
+ */
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ return true;
+
+ /* Setup rf parameters */
+ eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
+
+ /* Setup Bank 0 Write */
+ RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
+
+ /* Setup Bank 1 Write */
+ RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
+
+ /* Setup Bank 2 Write */
+ RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
+
+ /* Setup Bank 6 Write */
+ RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
+ modesIndex);
+ {
+ int i;
+ for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
+ ah->analogBank6Data[i] =
+ INI_RA(&ah->iniBank6TPC, i, modesIndex);
+ }
+ }
+
+ /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
+ if (eepMinorRev >= 2) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
+ db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob2GHz, 3, 197, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db2GHz, 3, 194, 0);
+ } else {
+ ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
+ db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob5GHz, 3, 203, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db5GHz, 3, 200, 0);
+ }
+ }
+
+ /* Setup Bank 7 Setup */
+ RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
+
+ /* Write Analog registers */
+ REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
+ regWrites);
+
+ return true;
+}
+
+static void ar5008_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
+{
+ int rx_chainmask, tx_chainmask;
+
+ rx_chainmask = ah->rxchainmask;
+ tx_chainmask = ah->txchainmask;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ switch (rx_chainmask) {
+ case 0x5:
+ DISABLE_REGWRITE_BUFFER(ah);
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ ENABLE_REGWRITE_BUFFER(ah);
+ case 0x3:
+ if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
+ break;
+ }
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (tx_chainmask == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+ if (AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
+ REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
+}
+
+static void ar5008_hw_override_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear if off only after
+ * RXE is set for MAC. This prevents frames with corrupted
+ * descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ val = REG_READ(ah, AR_PCU_MISC_MODE2);
+
+ if (!AR_SREV_9271(ah))
+ val &= ~AR_PCU_MISC_MODE2_HWWAR1;
+
+ if (AR_SREV_9287_10_OR_LATER(ah))
+ val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
+ }
+
+ if (!AR_SREV_5416_20_OR_LATER(ah) ||
+ AR_SREV_9280_10_OR_LATER(ah))
+ return;
+ /*
+ * Disable BB clock gating
+ * Necessary to avoid issues on AR5416 2.0
+ */
+ REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+
+ /*
+ * Disable RIFS search on some chips to avoid baseband
+ * hang issues.
+ */
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
+ val &= ~AR_PHY_RIFS_INIT_DELAY;
+ REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
+ }
+}
+
+static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ if (AR_SREV_9285_10_OR_LATER(ah))
+ enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
+ AR_PHY_FC_ENABLE_DAC_FIFO);
+
+ phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
+ | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
+
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_FC_DYN2040_EN;
+
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_FC_DYN2040_PRI_CH;
+
+ }
+ REG_WRITE(ah, AR_PHY_TURBO, phymode);
+
+ ath9k_hw_set11nmac2040(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+
+static int ar5008_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ int i, regWrites = 0;
+ struct ieee80211_channel *channel = chan->chan;
+ u32 modesIndex, freqIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ /* Enable ASYNC FIFO */
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
+ REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
+ REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ }
+
+ /*
+ * Set correct baseband to analog shift setting to
+ * access analog chips.
+ */
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ /* Write ADDAC shifts */
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
+ ah->eep_ops->set_addac(ah, chan);
+
+ if (AR_SREV_5416_22_OR_LATER(ah)) {
+ REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
+ } else {
+ struct ar5416IniArray temp;
+ u32 addacSize =
+ sizeof(u32) * ah->iniAddac.ia_rows *
+ ah->iniAddac.ia_columns;
+
+ /* For AR5416 2.0/2.1 */
+ memcpy(ah->addac5416_21,
+ ah->iniAddac.ia_array, addacSize);
+
+ /* override CLKDRV value at [row, column] = [31, 1] */
+ (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
+
+ temp.ia_array = ah->addac5416_21;
+ temp.ia_columns = ah->iniAddac.ia_columns;
+ temp.ia_rows = ah->iniAddac.ia_rows;
+ REG_WRITE_ARRAY(&temp, 1, regWrites);
+ }
+
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < ah->iniModes.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniModes, i, 0);
+ u32 val = INI_RA(&ah->iniModes, i, modesIndex);
+
+ if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
+ val &= ~AR_AN_TOP2_PWDCLKIND;
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
+ AR_SREV_9287_10_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9271_10(ah))
+ REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
+ modesIndex, regWrites);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* Write common array parameters */
+ for (i = 0; i < ah->iniCommon.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniCommon, i, 0);
+ u32 val = INI_RA(&ah->iniCommon, i, 1);
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9271(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
+ REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
+ modesIndex, regWrites);
+ else
+ REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
+ modesIndex, regWrites);
+ }
+
+ REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan)) {
+ REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
+ regWrites);
+ }
+
+ ar5008_hw_override_ini(ah, chan);
+ ar5008_hw_set_channel_regs(ah, chan);
+ ar5008_hw_init_chain_masks(ah);
+ ath9k_olc_init(ah);
+
+ /* Set TX power */
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit));
+
+ /* Write analog registers */
+ if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "ar5416SetRfRegs failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (!AR_SREV_9280_10_OR_LATER(ah))
+ rfMode |= (IS_CHAN_5GHZ(chan)) ?
+ AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+static void ar5008_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(ah->curchan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+static void ar5008_hw_enable_rfkill(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
+static void ar5008_restore_chainmask(struct ath_hw *ah)
+{
+ int rx_chainmask = ah->rxchainmask;
+
+ if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ }
+}
+
+static void ar5008_set_diversity(struct ath_hw *ah, bool value)
+{
+ u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (value)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+}
+
+static u32 ar9100_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ if (chan && IS_CHAN_5GHZ(chan))
+ return 0x1450;
+ return 0x1458;
+}
+
+static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
+ else
+ pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
+
+ return pll;
+}
+
+static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0xa, AR_RTC_PLL_DIV);
+ else
+ pll |= SM(0xb, AR_RTC_PLL_DIV);
+
+ return pll;
+}
+
+static bool ar5008_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ah->totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_LOW,
+ ah->coarse_low[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_HIGH,
+ ah->coarse_high[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR,
+ ah->firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH,
+ m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR,
+ m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH,
+ m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ah->stats.ast_ani_cckhigh++;
+ else
+ ah->stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_print(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_print(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_print(common, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+
+ return true;
+}
+
+static void ar5008_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+}
+
+static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ int i, j;
+ int32_t val;
+ const u32 ar5416_cca_regs[6] = {
+ AR_PHY_CCA,
+ AR_PHY_CH1_CCA,
+ AR_PHY_CH2_CCA,
+ AR_PHY_EXT_CCA,
+ AR_PHY_CH1_EXT_CCA,
+ AR_PHY_CH2_EXT_CCA
+ };
+ u8 chainmask, rx_chain_status;
+
+ rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ chainmask = 0x9;
+ else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
+ if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ } else {
+ if (rx_chain_status & 0x4)
+ chainmask = 0x3F;
+ else if (rx_chain_status & 0x2)
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ }
+
+ h = ah->nfCalHist;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ for (j = 0; j < 5; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(50);
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->rf_set_freq = ar5008_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
+
+ priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
+ priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
+ priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
+ priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
+ priv_ops->init_bb = ar5008_hw_init_bb;
+ priv_ops->process_ini = ar5008_hw_process_ini;
+ priv_ops->set_rfmode = ar5008_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar5008_hw_rfbus_req;
+ priv_ops->rfbus_done = ar5008_hw_rfbus_done;
+ priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
+ priv_ops->restore_chainmask = ar5008_restore_chainmask;
+ priv_ops->set_diversity = ar5008_set_diversity;
+ priv_ops->ani_control = ar5008_hw_ani_control;
+ priv_ops->do_getnf = ar5008_hw_do_getnf;
+ priv_ops->loadnf = ar5008_hw_loadnf;
+
+ if (AR_SREV_9100(ah))
+ priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
+ else if (AR_SREV_9160_10_OR_LATER(ah))
+ priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
+ else
+ priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
new file mode 100644
index 000000000000..0b94bd385b0a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -0,0 +1,1254 @@
+
+static const u32 ar5416Common_9100[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00020010, 0x00000003 },
+ { 0x00020038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00004000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889ae },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa33 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9100[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9100[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9100[][2] = {
+ { 0x000098b0, 0x02108421},
+ { 0x000098ec, 0x00000008},
+};
+
+static const u32 ar5416Bank2_9100[][2] = {
+ { 0x000098b0, 0x0e73ff17},
+ { 0x000098e0, 0x00000420},
+};
+
+static const u32 ar5416Bank3_9100[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014000f, 0x0014000f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x000180d6, 0x000180d6 },
+ { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
+ { 0x0000989c, 0x000000b1, 0x000000b1 },
+ { 0x0000989c, 0x00002000, 0x00002000 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+
+static const u32 ar5416Bank6TPC_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9100[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9100[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000010 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000015 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Modes_9160[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
+ { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common_9160[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000020 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0xffffffff },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00ff0000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5f3ca3de },
+ { 0x00009958, 0x2108ecff },
+ { 0x00009940, 0x00750604 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000e000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79bfaa03 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9160[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9160[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9160[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2_9160[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3_9160[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9160[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9160[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000008 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Addac_91601_1[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
new file mode 100644
index 000000000000..5fdbb53b47e0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9002_phy.h"
+
+#define AR9285_CLCAL_REDO_THRESH 1
+
+static void ar9002_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+ break;
+ case ADC_GAIN_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting ADC Gain Calibration\n");
+ break;
+ case ADC_DC_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting ADC DC Calibration\n");
+ break;
+ case ADC_DC_INIT_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting Init ADC DC Calibration\n");
+ break;
+ case TEMP_COMP_CAL:
+ break; /* Not supported */
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL);
+}
+
+static bool ar9002_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ bool iscaldone = false;
+
+ if (currCal->calState == CAL_RUNNING) {
+ if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
+ AR_PHY_TIMING_CTRL4_DO_CAL)) {
+
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ int i, numChains = 0;
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ currCal->calData->calPostProc(ah, numChains);
+ ichan->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ ar9002_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+/* Assumes you are talking about the currently configured channel */
+static bool ar9002_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+
+ switch (calType & ah->supp_cals) {
+ case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
+ return true;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
+ conf_is_ht20(conf)))
+ return true;
+ break;
+ }
+ return false;
+}
+
+static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcIOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcIEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcQOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcQEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcIOddPhase[i],
+ ah->totalAdcIEvenPhase[i],
+ ah->totalAdcQOddPhase[i],
+ ah->totalAdcQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcDcOffsetIOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcDcOffsetIEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcDcOffsetQOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcDcOffsetQEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcDcOffsetIOddPhase[i],
+ ah->totalAdcDcOffsetIEvenPhase[i],
+ ah->totalAdcDcOffsetQOddPhase[i],
+ ah->totalAdcDcOffsetQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
+ (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+ iCoff = iCoff & 0x3f;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
+ if (iqCorrNeg == 0x0)
+ iCoff = 0x40 - iCoff;
+
+ if (qCoff > 15)
+ qCoff = 15;
+ else if (qCoff <= -16)
+ qCoff = 16;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
+}
+
+static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
+ u32 qGainMismatch, iGainMismatch, val, i;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting ADC Gain Cal for Chain %d\n", i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
+ iOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = 0x%08x\n", i,
+ iEvenMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
+ qOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = 0x%08x\n", i,
+ qEvenMeasOffset);
+
+ if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
+ iGainMismatch =
+ ((iEvenMeasOffset * 32) /
+ iOddMeasOffset) & 0x3f;
+ qGainMismatch =
+ ((qOddMeasOffset * 32) /
+ qEvenMeasOffset) & 0x3f;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n", i,
+ iGainMismatch);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n", i,
+ qGainMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xfffff000;
+ val |= (qGainMismatch) | (iGainMismatch << 6);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "ADC Gain Cal done for Chain %d\n", i);
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
+}
+
+static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, val, i;
+ int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
+ const struct ath9k_percal_data *calData =
+ ah->cal_list_curr->calData;
+ u32 numSamples =
+ (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting ADC DC Offset Cal for Chain %d\n", i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = %d\n", i,
+ iOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = %d\n", i,
+ iEvenMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = %d\n", i,
+ qOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = %d\n", i,
+ qEvenMeasOffset);
+
+ iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+ qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
+ iDcMismatch);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
+ qDcMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xc0000fff;
+ val |= (qDcMismatch << 12) | (iDcMismatch << 21);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "ADC DC Offset Cal done for Chain %d\n", i);
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
+}
+
+static void ar9287_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata;
+ int32_t delta, currPDADC, slope;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0) {
+ /*
+ * Zero value indicates that no frames have been transmitted
+ * yet, can't do temperature compensation until frames are
+ * transmitted.
+ */
+ return;
+ } else {
+ slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
+
+ if (slope == 0) { /* to avoid divide by zero case */
+ delta = 0;
+ } else {
+ delta = ((currPDADC - ah->initPDADC)*4) / slope;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ }
+}
+
+static void ar9280_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata, i;
+ int delta, currPDADC, regval;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0)
+ return;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
+ delta = (currPDADC - ah->initPDADC + 4) / 8;
+ else
+ delta = (currPDADC - ah->initPDADC + 5) / 10;
+
+ if (delta != ah->PDADCdelta) {
+ ah->PDADCdelta = delta;
+ for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
+ regval = ah->originalGain[i] - delta;
+ if (regval < 0)
+ regval = 0;
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_TX_GAIN_TBL1 + i * 4,
+ AR_PHY_TX_GAIN, regval);
+ }
+ }
+}
+
+static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ u32 regVal;
+ unsigned int i;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 } ,
+ { 0x7828, 0 } ,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ /* 786c,b23,1, pwddac=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ /* 7854, b5,1, pdrxtxbb=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ /* 7854, b7,1, pdv2i=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ /* 7854, b8,1, pddacinterface=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ /* 7824,b12,0, offcal=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ /* 7838, b1,0, pwddb=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ /* 7820,b11,0, enpacal=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ /* 7820,b25,1, pdpadrv1=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ /* 7820,b24,0, pdpadrv2=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ /* 7820,b23,0, pdpaout=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ /* 783c,b14-16,7, padrvgn2tab_0=7 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ /*
+ * 7838,b29-31,0, padrvgn1tab_0=0
+ * does not matter since we turn it off
+ */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
+
+ /* Set:
+ * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
+ * txon=1,paon=1,oscon=1,synthon_force=1
+ */
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
+
+ /* find off_6_1; */
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (20 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ /* regVal = REG_READ(ah, 0x7834); */
+ regVal &= (~(0x1 << (20 + i)));
+ regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
+ << (20 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ regVal = (regVal >> 20) & 0x7f;
+
+ /* Update PA cal info */
+ if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = regVal;
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 regVal;
+ int i, offset, offs_6_1, offs_0;
+ u32 ccomp_org, reg_field;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 },
+ };
+
+ ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+
+ /* PA CAL is not needed for high power solution */
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
+ AR5416_EEP_TXGAIN_HIGH_POWER)
+ return;
+
+ if (AR_SREV_9285_11(ah)) {
+ REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
+ udelay(10);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+ ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
+
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
+
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1 << (19 + i)));
+ reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
+ regVal |= (reg_field << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
+ udelay(1);
+ reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
+ offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
+ offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
+
+ offset = (offs_6_1<<1) | offs_0;
+ offset = offset - 0;
+ offs_6_1 = offset>>1;
+ offs_0 = offset & 1;
+
+ if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = offset;
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
+
+ if (AR_SREV_9285_11(ah))
+ REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
+
+}
+
+static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ if (AR_SREV_9271(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9271_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ } else if (AR_SREV_9285_11_OR_LATER(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9285_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ }
+}
+
+static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ if (OLC_FOR_AR9287_10_LATER)
+ ar9287_hw_olc_temp_compensation(ah);
+ else if (OLC_FOR_AR9280_20_LATER)
+ ar9280_hw_olc_temp_compensation(ah);
+}
+
+static bool ar9002_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9002_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal) {
+ /* Do periodic PAOffset Cal */
+ ar9002_hw_pa_cal(ah, false);
+ ar9002_hw_olc_temp_compensation(ah);
+
+ /*
+ * Get the value from the previous NF cal and update
+ * history buffer.
+ */
+ ath9k_hw_getnf(ah, chan);
+
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ ath9k_hw_start_nfcal(ah);
+ }
+
+ return iscaldone;
+}
+
+/* Carrier leakage Calibration fix */
+static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ if (IS_CHAN_HT20(chan)) {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE, "offset "
+ "calibration failed to complete in "
+ "1ms; noisy ??\n");
+ return false;
+ }
+ REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ }
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
+ "failed to complete in 1ms; noisy ??\n");
+ return false;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+
+ return true;
+}
+
+static bool ar9285_hw_clc(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ int i;
+ u_int32_t txgain_max;
+ u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
+ u_int32_t reg_clc_I0, reg_clc_Q0;
+ u_int32_t i0_num = 0;
+ u_int32_t q0_num = 0;
+ u_int32_t total_num = 0;
+ u_int32_t reg_rf2g5_org;
+ bool retv = true;
+
+ if (!(ar9285_hw_cl_cal(ah, chan)))
+ return false;
+
+ txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
+ AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
+
+ for (i = 0; i < (txgain_max+1); i++) {
+ clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
+ AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
+ if (!(gain_mask & (1 << clc_gain))) {
+ gain_mask |= (1 << clc_gain);
+ clc_num++;
+ }
+ }
+
+ for (i = 0; i < clc_num; i++) {
+ reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
+ reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
+ if (reg_clc_I0 == 0)
+ i0_num++;
+
+ if (reg_clc_Q0 == 0)
+ q0_num++;
+ }
+ total_num = i0_num + q0_num;
+ if (total_num > AR9285_CLCAL_REDO_THRESH) {
+ reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
+ if (AR_SREV_9285E_20(ah)) {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_XE_SET);
+ } else {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_SET);
+ }
+ retv = ar9285_hw_cl_cal(ah, chan);
+ REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
+ }
+ return retv;
+}
+
+static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+ if (!ar9285_hw_clc(ah, chan))
+ return false;
+ } else {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!AR_SREV_9287_10_OR_LATER(ah))
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to "
+ "complete in 1ms; noisy environment?\n");
+ return false;
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!AR_SREV_9287_10_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+ }
+
+ /* Do PA Calibration */
+ ar9002_hw_pa_cal(ah, true);
+
+ /* Do NF Calibration after DC offset and other calibrations */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
+
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ /* Enable IQ, ADC Gain and ADC DC offset CALs */
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
+ if (ar9002_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
+ INIT_CAL(&ah->adcgain_caldata);
+ INSERT_CAL(ah, &ah->adcgain_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling ADC Gain Calibration.\n");
+ }
+ if (ar9002_hw_iscal_supported(ah, ADC_DC_CAL)) {
+ INIT_CAL(&ah->adcdc_caldata);
+ INSERT_CAL(ah, &ah->adcdc_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling ADC DC Calibration.\n");
+ }
+ if (ar9002_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
+ }
+
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+ }
+
+ chan->CalValid = 0;
+
+ return true;
+}
+
+static const struct ath9k_percal_data iq_cal_multi_sample = {
+ IQ_MISMATCH_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_multi_sample = {
+ ADC_GAIN_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_single_sample = {
+ ADC_GAIN_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_multi_sample = {
+ ADC_DC_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_single_sample = {
+ ADC_DC_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_init_dc_cal = {
+ ADC_DC_INIT_CAL,
+ MIN_CAL_SAMPLES,
+ INIT_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+
+static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
+{
+ if (AR_SREV_9100(ah)) {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+ return;
+ }
+
+ if (AR_SREV_9160_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_single_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_single_sample;
+ ah->adcdc_calinitdata.calData =
+ &adc_init_dc_cal;
+ } else {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_multi_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_multi_sample;
+ ah->adcdc_calinitdata.calData =
+ &adc_init_dc_cal;
+ }
+ ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+ }
+}
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
+ priv_ops->init_cal = ar9002_hw_init_cal;
+ priv_ops->setup_calibration = ar9002_hw_setup_calibration;
+ priv_ops->iscal_supported = ar9002_hw_iscal_supported;
+
+ ops->calibrate = ar9002_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
new file mode 100644
index 000000000000..a8a8cdc04afa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -0,0 +1,598 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar5008_initvals.h"
+#include "ar9001_initvals.h"
+#include "ar9002_initvals.h"
+
+/* General hardware code for the A5008/AR9001/AR9002 hadware families */
+
+static bool ar9002_hw_macversion_supported(u32 macversion)
+{
+ switch (macversion) {
+ case AR_SREV_VERSION_5416_PCI:
+ case AR_SREV_VERSION_5416_PCIE:
+ case AR_SREV_VERSION_9160:
+ case AR_SREV_VERSION_9100:
+ case AR_SREV_VERSION_9280:
+ case AR_SREV_VERSION_9285:
+ case AR_SREV_VERSION_9287:
+ case AR_SREV_VERSION_9271:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9271(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
+ ARRAY_SIZE(ar9271Modes_9271), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
+ ARRAY_SIZE(ar9271Common_9271), 2);
+ INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
+ ar9271Common_normal_cck_fir_coeff_9271,
+ ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
+ INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
+ ar9271Common_japan_2484_cck_fir_coeff_9271,
+ ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
+ INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
+ ar9271Modes_9271_1_0_only,
+ ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
+ INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
+ ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
+ INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
+ ar9271Modes_high_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
+ INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
+ ar9271Modes_normal_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
+ return;
+ }
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
+ ARRAY_SIZE(ar9287Common_9287_1_1), 2);
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_1,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
+ 2);
+ } else if (AR_SREV_9287_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
+ ARRAY_SIZE(ar9287Common_9287_1_0), 2);
+
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_0,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
+ 2);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+
+
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
+ ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
+ ARRAY_SIZE(ar9285Common_9285_1_2), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_off_L1_9285_1_2,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
+ 2);
+ }
+ } else if (AR_SREV_9285_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
+ ARRAY_SIZE(ar9285Modes_9285), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
+ ARRAY_SIZE(ar9285Common_9285), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_off_L1_9285,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_always_on_L1_9285,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
+ }
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
+ ARRAY_SIZE(ar9280Modes_9280_2), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
+ ARRAY_SIZE(ar9280Common_9280_2), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_off_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
+ }
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9280Modes_fast_clock_9280_2,
+ ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
+ } else if (AR_SREV_9280_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
+ ARRAY_SIZE(ar9280Modes_9280), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
+ ARRAY_SIZE(ar9280Common_9280), 2);
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
+ ARRAY_SIZE(ar5416Modes_9160), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
+ ARRAY_SIZE(ar5416Common_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
+ ARRAY_SIZE(ar5416Bank0_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
+ ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
+ ARRAY_SIZE(ar5416Bank1_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
+ ARRAY_SIZE(ar5416Bank2_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
+ ARRAY_SIZE(ar5416Bank3_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
+ ARRAY_SIZE(ar5416Bank6_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
+ ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
+ ARRAY_SIZE(ar5416Bank7_9160), 2);
+ if (AR_SREV_9160_11(ah)) {
+ INIT_INI_ARRAY(&ah->iniAddac,
+ ar5416Addac_91601_1,
+ ARRAY_SIZE(ar5416Addac_91601_1), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
+ ARRAY_SIZE(ar5416Addac_9160), 2);
+ }
+ } else if (AR_SREV_9100_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
+ ARRAY_SIZE(ar5416Modes_9100), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
+ ARRAY_SIZE(ar5416Common_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
+ ARRAY_SIZE(ar5416Bank0_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
+ ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
+ ARRAY_SIZE(ar5416Bank1_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
+ ARRAY_SIZE(ar5416Bank2_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
+ ARRAY_SIZE(ar5416Bank3_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
+ ARRAY_SIZE(ar5416Bank6_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
+ ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
+ ARRAY_SIZE(ar5416Bank7_9100), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
+ ARRAY_SIZE(ar5416Addac_9100), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
+ ARRAY_SIZE(ar5416Modes), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
+ ARRAY_SIZE(ar5416Common), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
+ ARRAY_SIZE(ar5416Bank0), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
+ ARRAY_SIZE(ar5416BB_RfGain), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
+ ARRAY_SIZE(ar5416Bank1), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
+ ARRAY_SIZE(ar5416Bank2), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
+ ARRAY_SIZE(ar5416Bank3), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
+ ARRAY_SIZE(ar5416Bank6), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
+ ARRAY_SIZE(ar5416Bank6TPC), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
+ ARRAY_SIZE(ar5416Bank7), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
+ ARRAY_SIZE(ar5416Addac), 2);
+ }
+}
+
+/* Support for Japan ch.14 (2484) spread */
+void ar9002_hw_cck_chan14_spread(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniCckfirNormal,
+ ar9287Common_normal_cck_fir_coeff_92871_1,
+ ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1),
+ 2);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9287Common_japan_2484_cck_fir_coeff_92871_1,
+ ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1),
+ 2);
+ }
+}
+
+static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
+{
+ u32 rxgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_17) {
+ rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
+
+ if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_13db_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
+ else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_23db_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ }
+}
+
+static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
+{
+ u32 txgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_19) {
+ txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_high_power_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ }
+}
+
+static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
+ else if (AR_SREV_9287_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
+ else if (AR_SREV_9280_20(ah))
+ ar9280_20_hw_init_rxgain_ini(ah);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
+ } else if (AR_SREV_9287_10(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
+ } else if (AR_SREV_9280_20(ah)) {
+ ar9280_20_hw_init_txgain_ini(ah);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ /* txgain table */
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_high_power,
+ ARRAY_SIZE(
+ ar9285Modes_XE2_0_high_power), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_high_power_tx_gain_9285_1_2,
+ ARRAY_SIZE(
+ ar9285Modes_high_power_tx_gain_9285_1_2), 6);
+ }
+ } else {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_normal_power,
+ ARRAY_SIZE(
+ ar9285Modes_XE2_0_normal_power), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_original_tx_gain_9285_1_2,
+ ARRAY_SIZE(
+ ar9285Modes_original_tx_gain_9285_1_2), 6);
+ }
+ }
+ }
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ u8 i;
+ u32 val;
+
+ if (ah->is_pciexpress != true)
+ return;
+
+ /* Do not touch SerDes registers */
+ if (ah->config.pcie_powersave_enable == 2)
+ return;
+
+ /* Nothing to do on restore for 11N */
+ if (!restore) {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /*
+ * AR9280 2.0 or later chips use SerDes values from the
+ * initvals.h initialized depending on chipset during
+ * __ath9k_hw_init()
+ */
+ for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
+ INI_RA(&ah->iniPcieSerdes, i, 1));
+ }
+ } else if (AR_SREV_9280(ah) &&
+ (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+
+ /* Shut off CLKREQ active in L1 */
+ if (ah->config.pcie_clock_req)
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
+ else
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ } else {
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
+
+ /*
+ * Ignore ah->ah_config.pcie_clock_req setting for
+ * pre-AR9280 11n
+ */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+ }
+
+ udelay(1000);
+
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen) {
+ val = ah->config.pcie_waen;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) {
+ val = AR9285_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
+ /*
+ * On AR9280 chips bit 22 of 0x4004 needs to be
+ * set otherwise card may disappear.
+ */
+ val = AR9280_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else
+ val = AR_WA_DEFAULT;
+ }
+
+ REG_WRITE(ah, AR_WA, val);
+ }
+
+ if (power_off) {
+ /*
+ * Set PCIe workaround bits
+ * bit 14 in WA register (disable L1) should only
+ * be set when device enters D3 and be cleared
+ * when device comes back to D0.
+ */
+ if (ah->config.pcie_waen) {
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ } else {
+ if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) &&
+ (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
+ (AR_SREV_9280(ah) &&
+ (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ }
+ }
+ }
+}
+
+static int ar9002_hw_get_radiorev(struct ath_hw *ah)
+{
+ u32 val;
+ int i;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
+ val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
+
+ return ath9k_hw_reverse_bits(val, 8);
+}
+
+int ar9002_hw_rf_claim(struct ath_hw *ah)
+{
+ u32 val;
+
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ val = ar9002_hw_get_radiorev(ah);
+ switch (val & AR_RADIO_SREV_MAJOR) {
+ case 0:
+ val = AR_RAD5133_SREV_MAJOR;
+ break;
+ case AR_RAD5133_SREV_MAJOR:
+ case AR_RAD5122_SREV_MAJOR:
+ case AR_RAD2133_SREV_MAJOR:
+ case AR_RAD2122_SREV_MAJOR:
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Radio Chip Rev 0x%02X not supported\n",
+ val & AR_RADIO_SREV_MAJOR);
+ return -EOPNOTSUPP;
+ }
+
+ ah->hw_version.analog5GhzRev = val;
+
+ return 0;
+}
+
+/*
+ * Enable ASYNC FIFO
+ *
+ * If Async FIFO is enabled, the following counters change as MAC now runs
+ * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
+ *
+ * The values below tested for ht40 2 chain.
+ * Overwrite the delay/timeouts initialized in process ini.
+ */
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
+ AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
+ AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
+ AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
+
+ REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
+
+ REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
+ AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
+ REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
+ AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
+ }
+}
+
+/*
+ * We don't enable WEP aggregation on mac80211 but we keep this
+ * around for HAL unification purposes.
+ */
+void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ }
+}
+
+/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+void ar9002_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
+ priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
+ priv_ops->macversion_supported = ar9002_hw_macversion_supported;
+
+ ops->config_pci_powersave = ar9002_hw_configpcipowersave;
+
+ ar5008_hw_attach_phy_ops(ah);
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ar9002_hw_attach_phy_ops(ah);
+
+ ar9002_hw_attach_calib_ops(ah);
+ ar9002_hw_attach_mac_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 8a3bf3ab998d..dae7f3304eb8 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,1982 +14,9 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-static const u32 ar5416Modes[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
- { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
- { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
- { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
- { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
- { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
- { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
- { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
- { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00004030, 0x00000002 },
- { 0x0000403c, 0x00000002 },
- { 0x00007010, 0x00000000 },
- { 0x00007038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00000000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0xffffffff },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8000010 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00070000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a002e },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x00009954, 0x5d50e188 },
- { 0x00009958, 0x00081fff },
- { 0x0000c95c, 0x004b6a8e },
- { 0x0000c968, 0x000003ce },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x001fff00 },
- { 0x000099ac, 0x00000000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x000000aa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x00000bb5 },
- { 0x0000a22c, 0x00000011 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889af },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000a000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000000 },
- { 0x0000a26c, 0x0e79e5c6 },
- { 0x0000b26c, 0x0e79e5c6 },
- { 0x0000c26c, 0x0e79e5c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x051701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79a8aa1f },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x08000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1[][2] = {
- { 0x000098b0, 0x02108421 },
- { 0x000098ec, 0x00000008 },
-};
-
-static const u32 ar5416Bank2[][2] = {
- { 0x000098b0, 0x0e73ff17 },
- { 0x000098e0, 0x00000420 },
-};
-
-static const u32 ar5416Bank3[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014008f, 0x0014008f },
- { 0x0000989c, 0x00c40003, 0x00c40003 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000f1, 0x000000f1 },
- { 0x0000989c, 0x00002081, 0x00002081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank6TPC[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x201400df, 0x201400df },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007081, 0x00007081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
-
-static const u32 ar5416Addac[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000003 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x0000000c },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000030 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000060 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000058 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static const u32 ar5416Modes_9100[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
- { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
- { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
- { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
- { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
- { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
- { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
- { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
- { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
-#ifdef TB243
- { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
-#else
- { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
-#endif
- { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common_9100[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00020010, 0x00000003 },
- { 0x00020038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00004000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008120, 0x08f04800 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0x00000000 },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081d0, 0x00003210 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00000000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a01ae },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x0000c95c, 0x004b6a8e },
- { 0x0000c968, 0x000003ce },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x201fff00 },
- { 0x000099ac, 0x006f0000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x0cc80caa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x001a0bb5 },
- { 0x0000a22c, 0x00000000 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889ae },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000a000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000001 },
- { 0x0000a26c, 0x0ebae9c6 },
- { 0x0000b26c, 0x0ebae9c6 },
- { 0x0000c26c, 0x0ebae9c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x050701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79a8aa33 },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x0c000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0_9100[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain_9100[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1_9100[][2] = {
- { 0x000098b0, 0x02108421},
- { 0x000098ec, 0x00000008},
-};
-
-static const u32 ar5416Bank2_9100[][2] = {
- { 0x000098b0, 0x0e73ff17},
- { 0x000098e0, 0x00000420},
-};
-
-static const u32 ar5416Bank3_9100[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6_9100[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014000f, 0x0014000f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x000180d6, 0x000180d6 },
- { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
- { 0x0000989c, 0x000000b1, 0x000000b1 },
- { 0x0000989c, 0x00002000, 0x00002000 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-
-static const u32 ar5416Bank6TPC_9100[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x2014008f, 0x2014008f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007080, 0x00007080 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7_9100[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
-
-static const u32 ar5416Addac_9100[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000010 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000015 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static const u32 ar5416Modes_9160[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
- { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
- { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
- { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
- { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
- { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
- { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
- { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
- { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common_9160[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00004030, 0x00000002 },
- { 0x0000403c, 0x00000002 },
- { 0x00007010, 0x00000020 },
- { 0x00007038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00000000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008120, 0x08f04800 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0xffffffff },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081d0, 0x00003210 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00ff0000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a01ae },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x00009954, 0x5f3ca3de },
- { 0x00009958, 0x2108ecff },
- { 0x00009940, 0x00750604 },
- { 0x0000c95c, 0x004b6a8e },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x201fff00 },
- { 0x000099ac, 0x006f0000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x0cc80caa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x001a0bb5 },
- { 0x0000a22c, 0x00000000 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889af },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000e000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000001 },
- { 0x0000a26c, 0x0ebae9c6 },
- { 0x0000b26c, 0x0ebae9c6 },
- { 0x0000c26c, 0x0ebae9c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x050701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79bfaa03 },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x0c000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0_9160[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain_9160[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1_9160[][2] = {
- { 0x000098b0, 0x02108421 },
- { 0x000098ec, 0x00000008 },
-};
-
-static const u32 ar5416Bank2_9160[][2] = {
- { 0x000098b0, 0x0e73ff17 },
- { 0x000098e0, 0x00000420 },
-};
-
-static const u32 ar5416Bank3_9160[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6_9160[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014008f, 0x0014008f },
- { 0x0000989c, 0x00c40003, 0x00c40003 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000f1, 0x000000f1 },
- { 0x0000989c, 0x00002081, 0x00002081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank6TPC_9160[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x2014008f, 0x2014008f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007080, 0x00007080 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7_9160[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
+#ifndef INITVALS_9002_10_H
+#define INITVALS_9002_10_H
-static u32 ar5416Addac_9160[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000018 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000019 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000003 },
- {0x0000989c, 0x00000008 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static u32 ar5416Addac_91601_1[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000018 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000019 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-/* XXX 9280 1 */
static const u32 ar9280Modes_9280[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2766,7 +793,7 @@ static const u32 ar9280Common_9280_2[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -3441,7 +1468,7 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
};
/* AR9285 Revsion 10*/
-static const u_int32_t ar9285Modes_9285[][6] = {
+static const u32 ar9285Modes_9285[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
{ 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -3763,7 +1790,7 @@ static const u_int32_t ar9285Modes_9285[][6] = {
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9285Common_9285[][2] = {
+static const u32 ar9285Common_9285[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -3936,7 +1963,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -4096,7 +2123,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
{ 0x00007870, 0x10142c00 },
};
-static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
+static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4109,7 +2136,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
+static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4123,7 +2150,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
};
/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */
-static const u_int32_t ar9285Modes_9285_1_2[][6] = {
+static const u32 ar9285Modes_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4184,7 +2211,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4198,8 +2225,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4312,7 +2339,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4326,8 +2353,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4429,7 +2456,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9285Common_9285_1_2[][2] = {
+static const u32 ar9285Common_9285_1_2[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -4731,17 +2758,12 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007808, 0x54214514 },
{ 0x0000780c, 0x02025830 },
{ 0x00007810, 0x71c0d388 },
- { 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
{ 0x00007824, 0x00d86fff },
- { 0x00007828, 0x26d2491b },
{ 0x0000782c, 0x6e36d97b },
- { 0x00007830, 0xedb6d96e },
{ 0x00007834, 0x71400087 },
- { 0x0000783c, 0x0001fffe },
- { 0x00007840, 0xffeb1a20 },
{ 0x00007844, 0x000c0db6 },
- { 0x00007848, 0x6db61b6f },
+ { 0x00007848, 0x6db6246f },
{ 0x0000784c, 0x6d9b66db },
{ 0x00007850, 0x6d8c6dba },
{ 0x00007854, 0x00040000 },
@@ -4753,7 +2775,7 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007870, 0x10142c00 },
};
-static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
+static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
@@ -4777,7 +2799,12 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
+ { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
+ { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 },
+ { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
+ { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
{ 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
{ 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
{ 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
@@ -4789,7 +2816,7 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
};
-static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
+static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
@@ -4813,7 +2840,52 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
+ { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
+ { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 },
+ { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
+ { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
+ { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
+ { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
+ { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
+ { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c },
+ { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
+ { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
+};
+
+static const u32 ar9285Modes_XE2_0_normal_power[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
+ { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
+ { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae },
+ { 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 },
+ { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
+ { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
{ 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
{ 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
{ 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
@@ -4825,7 +2897,47 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
};
-static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
+static const u32 ar9285Modes_XE2_0_high_power[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
+ { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
+ { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e },
+ { 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 },
+ { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
+ { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
+ { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
+ { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
+ { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 },
+ { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
+ { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
+};
+
+static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4838,7 +2950,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
+static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4852,7 +2964,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
};
/* AR9287 Revision 10 */
-static const u_int32_t ar9287Modes_9287_1_0[][6] = {
+static const u32 ar9287Modes_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4899,7 +3011,7 @@ static const u_int32_t ar9287Modes_9287_1_0[][6] = {
{ 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
};
-static const u_int32_t ar9287Common_9287_1_0[][2] = {
+static const u32 ar9287Common_9287_1_0[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020015 },
{ 0x00000034, 0x00000005 },
@@ -5073,7 +3185,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -5270,7 +3382,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
{ 0x000078b8, 0x2a850160 },
};
-static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
+static const u32 ar9287Modes_tx_gain_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -5320,7 +3432,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
};
-static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
+static const u32 ar9287Modes_rx_gain_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
{ 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -5582,7 +3694,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
{ 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
};
-static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
+static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -5595,7 +3707,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
+static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -5610,7 +3722,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
/* AR9287 Revision 11 */
-static const u_int32_t ar9287Modes_9287_1_1[][6] = {
+static const u32 ar9287Modes_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -5657,7 +3769,7 @@ static const u_int32_t ar9287Modes_9287_1_1[][6] = {
{ 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
};
-static const u_int32_t ar9287Common_9287_1_1[][2] = {
+static const u32 ar9287Common_9287_1_1[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020015 },
{ 0x00000034, 0x00000005 },
@@ -6027,21 +4139,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
/*
* For Japanese regulatory requirements, 2484 MHz requires the following three
- * registers be programmed differently from the channel between 2412 and 2472 MHz.
+ * registers be programmed differently from the channel between 2412 and
+ * 2472 MHz.
*/
-static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
+static const u32 ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
{ 0x0000a1f4, 0x00fffeff },
{ 0x0000a1f8, 0x00f5f9ff },
{ 0x0000a1fc, 0xb79f6427 },
};
-static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
+static const u32 ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
{ 0x0000a1f4, 0x00000000 },
{ 0x0000a1f8, 0xefff0301 },
{ 0x0000a1fc, 0xca9228ee },
};
-static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
+static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -6090,7 +4203,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
{ 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 },
};
-static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
+static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
{ 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -6352,7 +4465,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
{ 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
};
-static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
+static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -6365,7 +4478,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
+static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -6380,7 +4493,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
/* AR9271 initialization values automaticaly created: 06/04/09 */
-static const u_int32_t ar9271Modes_9271[][6] = {
+static const u32 ar9271Modes_9271[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
{ 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6441,7 +4554,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6455,8 +4568,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6569,7 +4682,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6583,8 +4696,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6683,29 +4796,10 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
{ 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
{ 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 },
- { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
- { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
- { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
- { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
- { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
- { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
- { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
- { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
- { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
- { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
- { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
- { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
- { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
- { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
- { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
- { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9271Common_9271[][2] = {
+static const u32 ar9271Common_9271[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -6910,13 +5004,10 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x00007810, 0x71c0d388 },
{ 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
- { 0x00007820, 0x00000c04 },
- { 0x00007824, 0x00d8abff },
{ 0x00007828, 0x66964300 },
{ 0x0000782c, 0x8db6d961 },
{ 0x00007830, 0x8db6d96c },
{ 0x00007834, 0x6140008b },
- { 0x00007838, 0x00000029 },
{ 0x0000783c, 0x72ee0a72 },
{ 0x00007840, 0xbbfffffc },
{ 0x00007844, 0x000c0db6 },
@@ -6929,7 +5020,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x00007860, 0x21084210 },
{ 0x00007864, 0xf7d7ffde },
{ 0x00007868, 0xc2034080 },
- { 0x0000786c, 0x48609eb4 },
{ 0x00007870, 0x10142c00 },
{ 0x00009808, 0x00000000 },
{ 0x0000980c, 0xafe68e30 },
@@ -6982,9 +5072,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x000099e8, 0x3c466478 },
{ 0x000099ec, 0x0cc80caa },
{ 0x000099f0, 0x00000000 },
- { 0x0000a1f4, 0x00000000 },
- { 0x0000a1f8, 0x71733d01 },
- { 0x0000a1fc, 0xd0ad5c12 },
{ 0x0000a208, 0x803e68c8 },
{ 0x0000a210, 0x4080a333 },
{ 0x0000a214, 0x00206c10 },
@@ -7004,13 +5091,9 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000a260, 0xdfa90f01 },
{ 0x0000a268, 0x00000000 },
{ 0x0000a26c, 0x0ebae9e6 },
- { 0x0000a278, 0x3bdef7bd },
- { 0x0000a27c, 0x050e83bd },
{ 0x0000a388, 0x0c000000 },
{ 0x0000a38c, 0x20202020 },
{ 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x3bdef7bd },
- { 0x0000a398, 0x000003bd },
{ 0x0000a39c, 0x00000001 },
{ 0x0000a3a0, 0x00000000 },
{ 0x0000a3a4, 0x00000000 },
@@ -7025,8 +5108,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000a3cc, 0x20202020 },
{ 0x0000a3d0, 0x20202020 },
{ 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x3bdef7bd },
- { 0x0000a3e0, 0x000003bd },
{ 0x0000a3e4, 0x00000000 },
{ 0x0000a3e8, 0x18c43433 },
{ 0x0000a3ec, 0x00f70081 },
@@ -7046,7 +5127,104 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000d384, 0xf3307ff0 },
};
-static const u_int32_t ar9271Modes_9271_1_0_only[][6] = {
+static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = {
+ { 0x0000a1f4, 0x00fffeff },
+ { 0x0000a1f8, 0x00f5f9ff },
+ { 0x0000a1fc, 0xb79f6427 },
+};
+
+static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
+ { 0x0000a1f4, 0x00000000 },
+ { 0x0000a1f8, 0xefff0301 },
+ { 0x0000a1fc, 0xca9228ee },
+};
+
+static const u32 ar9271Modes_9271_1_0_only[][6] = {
{ 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
{ 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
};
+
+static const u32 ar9271Modes_9271_ANI_reg[][6] = {
+ { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e },
+ { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
+ { 0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881 },
+ { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
+ { 0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8 },
+ { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
+ { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
+};
+
+static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
+ { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029 },
+ { 0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff },
+ { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
+ { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
+ { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
+ { 0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd },
+ { 0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
+ { 0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
+};
+
+static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000 },
+ { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b },
+ { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff },
+ { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 },
+ { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 },
+ { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 },
+ { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
+ { 0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
+ { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
+ { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
+};
+
+#endif /* INITVALS_9002_10_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
new file mode 100644
index 000000000000..2be20d2070c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+
+#define AR_BufLen 0x00000fff
+
+static void ar9002_hw_rx_enable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_CR, AR_CR_RXE);
+}
+
+static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ ((struct ath_desc*) ds)->ds_link = ds_link;
+}
+
+static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
+{
+ *ds_link = &((struct ath_desc *)ds)->ds_link;
+}
+
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ bool fatal_int = false;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!AR_SREV_9100(ah)) {
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON) {
+ isr = REG_READ(ah, AR_ISR);
+ }
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
+ AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+ } else {
+ *masked = 0;
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+ if (isr2 & AR_ISR_S2_TIM)
+ mask2 |= ATH9K_INT_TIM;
+ if (isr2 & AR_ISR_S2_DTIM)
+ mask2 |= ATH9K_INT_DTIM;
+ if (isr2 & AR_ISR_S2_DTIMSYNC)
+ mask2 |= ATH9K_INT_DTIMSYNC;
+ if (isr2 & (AR_ISR_S2_CABEND))
+ mask2 |= ATH9K_INT_CABEND;
+ if (isr2 & AR_ISR_S2_GTT)
+ mask2 |= ATH9K_INT_GTT;
+ if (isr2 & AR_ISR_S2_CST)
+ mask2 |= ATH9K_INT_CST;
+ if (isr2 & AR_ISR_S2_TSFOOR)
+ mask2 |= ATH9K_INT_TSFOOR;
+ }
+
+ isr = REG_READ(ah, AR_ISR_RAC);
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation) {
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RX;
+ }
+
+ if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RX;
+ if (isr &
+ (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
+ AR_ISR_TXEOL)) {
+ u32 s0_s, s1_s;
+
+ *masked |= ATH9K_INT_TX;
+
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+
+ if (isr & AR_ISR_RXORN) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "receive FIFO overrun interrupt\n");
+ }
+
+ if (!AR_SREV_9100(ah)) {
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
+ if (isr5 & AR_ISR_S5_TIM_TIMER)
+ *masked |= ATH9K_INT_TIM_TIMER;
+ }
+ }
+
+ *masked |= mask2;
+ }
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5_s;
+
+ s5_s = REG_READ(ah, AR_ISR_S5_S);
+ if (isr & AR_ISR_GENTMR) {
+ ah->intr_gen_timer_trigger =
+ MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ }
+ }
+
+ if (sync_cause) {
+ fatal_int =
+ (sync_cause &
+ (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+ ? true : false;
+
+ if (fatal_int) {
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+ ath_print(common, ATH_DBG_ANY,
+ "received PCI FATAL interrupt\n");
+ }
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+ ath_print(common, ATH_DBG_ANY,
+ "received PCI PERR interrupt\n");
+ }
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+ }
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ }
+
+ return true;
+}
+
+static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_data = buf_addr;
+
+ if (is_firstseg) {
+ ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
+ } else if (is_lastseg) {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = seglen;
+ ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
+ ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
+ } else {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = seglen | AR_TxMore;
+ ads->ds_ctl2 = 0;
+ ads->ds_ctl3 = 0;
+ }
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+
+static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if ((ads->ds_txstatus9 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
+ ts->ts_tstamp = ads->AR_SendTimestamp;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (ads->ds_txstatus1 & AR_FrmXmitOK)
+ ts->ts_status |= ATH9K_TX_ACKED;
+ if (ads->ds_txstatus1 & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->ds_txstatus1 & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus9 & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->ds_txstatus1 & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->ds_txstatus1 & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus0 & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->AR_BaBitmapLow;
+ ts->ba_high = ads->AR_BaBitmapHigh;
+ }
+
+ ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
+ switch (ts->ts_rateindex) {
+ case 0:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
+ break;
+ case 1:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
+ break;
+ case 2:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
+ break;
+ case 3:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
+ break;
+ }
+
+ ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
+ ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
+ ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
+ ts->evm0 = ads->AR_TxEVM0;
+ ts->evm1 = ads->AR_TxEVM1;
+ ts->evm2 = ads->AR_TxEVM2;
+ ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
+ ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
+ ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
+ ts->ts_antenna = 0;
+
+ return 0;
+}
+
+static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ txPower += ah->txpower_indexoffset;
+ if (txPower > 63)
+ txPower = 63;
+
+ ads->ds_ctl0 = (pktLen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txPower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
+
+ ads->ds_ctl1 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ds_ctl6 = SM(keyType, AR_EncrType);
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
+ ads->ds_ctl8 = 0;
+ ads->ds_ctl9 = 0;
+ ads->ds_ctl10 = 0;
+ ads->ds_ctl11 = 0;
+ }
+}
+
+static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ar5416_desc *last_ads = AR5416DESC(lastds);
+ u32 ds_ctl0;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ds_ctl0 = ads->ds_ctl0;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ds_ctl0 &= ~AR_CTSEnable;
+ ds_ctl0 |= AR_RTSEnable;
+ } else {
+ ds_ctl0 &= ~AR_RTSEnable;
+ ds_ctl0 |= AR_CTSEnable;
+ }
+
+ ads->ds_ctl0 = ds_ctl0;
+ } else {
+ ads->ds_ctl0 =
+ (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ds_ctl2 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ds_ctl3 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ds_ctl7 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ last_ads->ds_ctl2 = ads->ds_ctl2;
+ last_ads->ds_ctl3 = ads->ds_ctl3;
+}
+
+static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+ ads->ds_ctl6 &= ~AR_AggrLen;
+ ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
+}
+
+static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ unsigned int ctl6;
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+
+ ctl6 = ads->ds_ctl6;
+ ctl6 &= ~AR_PadDelim;
+ ctl6 |= SM(numDelims, AR_PadDelim);
+ ads->ds_ctl6 = ctl6;
+}
+
+static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= AR_IsAggr;
+ ads->ds_ctl1 &= ~AR_MoreAggr;
+ ads->ds_ctl6 &= ~AR_PadDelim;
+}
+
+static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl2 &= ~AR_BurstDur;
+ ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
+}
+
+static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if (vmf)
+ ads->ds_ctl0 |= AR_VirtMoreFrag;
+ else
+ ads->ds_ctl0 &= ~AR_VirtMoreFrag;
+}
+
+void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
+ u32 size, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+ ads->ds_ctl1 = size & AR_BufLen;
+ if (flags & ATH9K_RXDESC_INTREQ)
+ ads->ds_ctl1 |= AR_RxIntrReq;
+
+ ads->ds_rxstatus8 &= ~AR_RxDone;
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ memset(&(ads->u), 0, sizeof(ads->u));
+}
+EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ ops->rx_enable = ar9002_hw_rx_enable;
+ ops->set_desc_link = ar9002_hw_set_desc_link;
+ ops->get_desc_link = ar9002_hw_get_desc_link;
+ ops->get_isr = ar9002_hw_get_isr;
+ ops->fill_txdesc = ar9002_hw_fill_txdesc;
+ ops->proc_txdesc = ar9002_hw_proc_txdesc;
+ ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
+ ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
+ ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
+ ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
+ ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
+ ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
+ ops->set11n_burstduration = ar9002_hw_set11n_burstduration;
+ ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
new file mode 100644
index 000000000000..ed314e89bfe1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -0,0 +1,535 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: Programming Atheros 802.11n analog front end radios
+ *
+ * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
+ * devices have either an external AR2133 analog front end radio for single
+ * band 2.4 GHz communication or an AR5133 analog front end radio for dual
+ * band 2.4 GHz / 5 GHz communication.
+ *
+ * All devices after the AR5416 and AR5418 family starting with the AR9280
+ * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
+ * into a single-chip and require less programming.
+ *
+ * The following single-chips exist with a respective embedded radio:
+ *
+ * AR9280 - 11n dual-band 2x2 MIMO for PCIe
+ * AR9281 - 11n single-band 1x2 MIMO for PCIe
+ * AR9285 - 11n single-band 1x1 for PCIe
+ * AR9287 - 11n single-band 2x2 MIMO for PCIe
+ *
+ * AR9220 - 11n dual-band 2x2 MIMO for PCI
+ * AR9223 - 11n single-band 2x2 MIMO for PCI
+ *
+ * AR9287 - 11n single-band 1x1 MIMO for USB
+ */
+
+#include "hw.h"
+#include "ar9002_phy.h"
+
+/**
+ * ar9002_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ */
+static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode, aModeRefSel = 0;
+ u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
+ struct chan_centers centers;
+ u32 refDivA = 24;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
+ reg32 &= 0xc0000000;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ u32 txctl;
+ int regWrites = 0;
+
+ bMode = 1;
+ fracMode = 1;
+ aModeRefSel = 0;
+ channelSel = CHANSEL_2G(freq);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
+ 1, regWrites);
+ } else {
+ REG_WRITE_ARRAY(&ah->iniCckfirNormal,
+ 1, regWrites);
+ }
+ } else {
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+ }
+ } else {
+ bMode = 0;
+ fracMode = 0;
+
+ switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
+ case 0:
+ if ((freq % 20) == 0)
+ aModeRefSel = 3;
+ else if ((freq % 10) == 0)
+ aModeRefSel = 2;
+ if (aModeRefSel)
+ break;
+ case 1:
+ default:
+ aModeRefSel = 0;
+ /*
+ * Enable 2G (fractional) mode for channels
+ * which are 5MHz spaced.
+ */
+ fracMode = 1;
+ refDivA = 1;
+ channelSel = CHANSEL_5G(freq);
+
+ /* RefDivA setting */
+ REG_RMW_FIELD(ah, AR_AN_SYNTH9,
+ AR_AN_SYNTH9_REFDIVA, refDivA);
+
+ }
+
+ if (!fracMode) {
+ ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
+ channelSel = ndiv & 0x1ff;
+ channelFrac = (ndiv & 0xfffffe00) * 2;
+ channelSel = (channelSel << 17) | channelFrac;
+ }
+ }
+
+ reg32 = reg32 |
+ (bMode << 29) |
+ (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
+
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar9002_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int freq;
+ int bin, cur_bin;
+ int bb_spur_off, spur_subchannel_sd;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, newVal;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+ struct chan_centers centers;
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ ah->config.spurmode = SPUR_ENABLE_EEPROM;
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+
+ if (is2GHz)
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
+ else
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
+
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - freq;
+
+ if (IS_CHAN_HT40(chan)) {
+ if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur) {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ return;
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ }
+
+ bin = bb_spur * 320;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
+
+ newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
+
+ if (IS_CHAN_HT40(chan)) {
+ if (bb_spur < 0) {
+ spur_subchannel_sd = 1;
+ bb_spur_off = bb_spur + 10;
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur - 10;
+ }
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur;
+ }
+
+ if (IS_CHAN_HT40(chan))
+ spur_delta_phase =
+ ((bb_spur * 262144) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+ else
+ spur_delta_phase =
+ ((bb_spur * 524288) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
+ spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
+
+ newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, newVal);
+
+ newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
+ REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static void ar9002_olc_init(struct ath_hw *ah)
+{
+ u32 i;
+
+ if (!OLC_FOR_AR9280_20_LATER)
+ return;
+
+ if (OLC_FOR_AR9287_10_LATER) {
+ REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
+ AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
+ ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
+ AR9287_AN_TXPC0_TXPCMODE,
+ AR9287_AN_TXPC0_TXPCMODE_S,
+ AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
+ udelay(100);
+ } else {
+ for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
+ ah->originalGain[i] =
+ MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
+ AR_PHY_TX_GAIN);
+ ah->PDADCdelta = 0;
+ }
+}
+
+static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan)) {
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ pll = 0x142c;
+ else if (AR_SREV_9280_20(ah))
+ pll = 0x2850;
+ else
+ pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
+ } else {
+ pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
+ }
+
+ return pll;
+}
+
+static void ar9002_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+
+ if (AR_SREV_9271(ah) && (nf >= -114))
+ nf = -116;
+
+ nfarray[0] = nf;
+
+ if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
+ AR9280_PHY_CH1_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+ }
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+
+ if (AR_SREV_9271(ah) && (nf >= -114))
+ nf = -116;
+
+ nfarray[3] = nf;
+
+ if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
+ AR9280_PHY_CH1_EXT_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+ }
+}
+
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->set_rf_regs = NULL;
+ priv_ops->rf_alloc_ext_banks = NULL;
+ priv_ops->rf_free_ext_banks = NULL;
+ priv_ops->rf_set_freq = ar9002_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
+ priv_ops->olc_init = ar9002_olc_init;
+ priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
+ priv_ops->do_getnf = ar9002_hw_do_getnf;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
new file mode 100644
index 000000000000..81bf6e5840e1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef AR9002_PHY_H
+#define AR9002_PHY_H
+
+#define AR_PHY_TEST 0x9800
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+
+#define AR_PHY_TURBO 0x9804
+#define AR_PHY_FC_TURBO_MODE 0x00000001
+#define AR_PHY_FC_TURBO_SHORT 0x00000002
+#define AR_PHY_FC_DYN2040_EN 0x00000004
+#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
+#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
+/* For 25 MHz channel spacing -- not used but supported by hw */
+#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
+#define AR_PHY_FC_HT_EN 0x00000040
+#define AR_PHY_FC_SHORT_GI_40 0x00000080
+#define AR_PHY_FC_WALSH 0x00000100
+#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
+#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
+
+#define AR_PHY_TEST2 0x9808
+
+#define AR_PHY_TIMING2 0x9810
+#define AR_PHY_TIMING3 0x9814
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+
+#define AR_PHY_CHIP_ID_REV_0 0x80
+#define AR_PHY_CHIP_ID_REV_1 0x81
+#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
+
+#define AR_PHY_ACTIVE 0x981C
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+
+#define AR_PHY_RF_CTL2 0x9824
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+
+#define AR_PHY_RF_CTL3 0x9828
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+
+#define AR_PHY_ADC_CTL 0x982C
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
+#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
+#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
+#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
+
+#define AR_PHY_ADC_SERIAL_CTL 0x9830
+#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
+#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
+
+#define AR_PHY_RF_CTL4 0x9834
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
+
+#define AR_PHY_TSTDAC_CONST 0x983c
+
+#define AR_PHY_SETTLING 0x9844
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+
+#define AR_PHY_RXGAIN 0x9848
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+
+#define AR_PHY_DESIRED_SZ 0x9850
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+
+#define AR_PHY_FIND_SIG 0x9858
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+
+#define AR_PHY_AGC_CTL1 0x985C
+#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
+#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
+
+#define AR_PHY_CCA 0x9864
+#define AR_PHY_MINCCA_PWR 0x0FF80000
+#define AR_PHY_MINCCA_PWR_S 19
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+
+#define AR_PHY_SFCORR_LOW 0x986C
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+
+#define AR_PHY_SFCORR 0x9868
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+
+#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
+#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
+#define AR_PHY_SYNTH_CONTROL 0x9874
+#define AR_PHY_SLEEP_SCAL 0x9878
+
+#define AR_PHY_PLL_CTL 0x987c
+#define AR_PHY_PLL_CTL_40 0xaa
+#define AR_PHY_PLL_CTL_40_5413 0x04
+#define AR_PHY_PLL_CTL_44 0xab
+#define AR_PHY_PLL_CTL_44_2133 0xeb
+#define AR_PHY_PLL_CTL_40_2133 0xea
+
+#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
+#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
+
+#define AR_PHY_RX_DELAY 0x9914
+#define AR_PHY_SEARCH_START_DELAY 0x9918
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+
+#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
+#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
+
+#define AR_PHY_TIMING5 0x9924
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+
+#define AR_PHY_FRAME_CTL 0x9944
+#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
+#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
+
+#define AR_PHY_TXPWRADJ 0x994C
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
+
+#define AR_PHY_RADAR_EXT 0x9940
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+
+#define AR_PHY_RADAR_0 0x9954
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+
+#define AR_PHY_RADAR_1 0x9958
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+
+#define AR_PHY_SWITCH_CHAIN_0 0x9960
+#define AR_PHY_SWITCH_COM 0x9964
+
+#define AR_PHY_SIGMA_DELTA 0x996C
+#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
+#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
+#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
+#define AR_PHY_SIGMA_DELTA_FILT2_S 3
+#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
+#define AR_PHY_SIGMA_DELTA_FILT1_S 8
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
+
+#define AR_PHY_RESTART 0x9970
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+
+#define AR_PHY_RFBUS_REQ 0x997C
+#define AR_PHY_RFBUS_REQ_EN 0x00000001
+
+#define AR_PHY_TIMING7 0x9980
+#define AR_PHY_TIMING8 0x9984
+#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
+
+#define AR_PHY_BIN_MASK2_1 0x9988
+#define AR_PHY_BIN_MASK2_2 0x998c
+#define AR_PHY_BIN_MASK2_3 0x9990
+#define AR_PHY_BIN_MASK2_4 0x9994
+
+#define AR_PHY_BIN_MASK_1 0x9900
+#define AR_PHY_BIN_MASK_2 0x9904
+#define AR_PHY_BIN_MASK_3 0x9908
+
+#define AR_PHY_MASK_CTL 0x990c
+
+#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
+#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
+
+#define AR_PHY_TIMING9 0x9998
+#define AR_PHY_TIMING10 0x999c
+#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
+
+#define AR_PHY_TIMING11 0x99a0
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
+
+#define AR_PHY_RX_CHAINMASK 0x99a4
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+
+#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
+#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
+#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
+#define AR_PHY_9285_ANT_DIV_CTL_S 24
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
+#define AR_PHY_9285_ANT_DIV_LNA1 2
+#define AR_PHY_9285_ANT_DIV_LNA2 1
+#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
+#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+
+#define AR_PHY_EXT_CCA0 0x99b8
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+
+#define AR_PHY_EXT_CCA 0x99bc
+#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
+#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_SFCORR_EXT 0x99c0
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+
+#define AR_PHY_HALFGI 0x99D0
+#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_HALFGI_DSC_MAN_S 4
+#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
+#define AR_PHY_HALFGI_DSC_EXP_S 0
+
+#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+
+#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+
+#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
+#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
+
+#define AR_PHY_M_SLEEP 0x99f0
+#define AR_PHY_REFCLKDLY 0x99f4
+#define AR_PHY_REFCLKPD 0x99f8
+
+#define AR_PHY_CALMODE 0x99f0
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+
+#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
+
+#define AR_PHY_CURRENT_RSSI 0x9c1c
+#define AR9280_PHY_CURRENT_RSSI 0x9c3c
+
+#define AR_PHY_RFBUS_GRANT 0x9C20
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001
+
+#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+
+#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
+
+#define AR_PHY_MODE 0xA200
+#define AR_PHY_MODE_ASYNCFIFO 0x80
+#define AR_PHY_MODE_AR2133 0x08
+#define AR_PHY_MODE_AR5111 0x00
+#define AR_PHY_MODE_AR5112 0x08
+#define AR_PHY_MODE_DYNAMIC 0x04
+#define AR_PHY_MODE_RF2GHZ 0x02
+#define AR_PHY_MODE_RF5GHZ 0x00
+#define AR_PHY_MODE_CCK 0x01
+#define AR_PHY_MODE_OFDM 0x00
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
+
+#define AR_PHY_CCK_TX_CTRL 0xA204
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
+
+#define AR_PHY_CCK_DETECT 0xA208
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+/* [12:6] settling time for antenna switch */
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
+
+#define AR_PHY_GAIN_2GHZ 0xA20C
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
+
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
+
+#define AR_PHY_CCK_RXCTRL4 0xA21C
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
+
+#define AR_PHY_DAG_CTRLCCK 0xA228
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
+#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
+
+#define AR_PHY_POWER_TX_RATE3 0xA234
+#define AR_PHY_POWER_TX_RATE4 0xA238
+
+#define AR_PHY_SCRM_SEQ_XR 0xA23C
+#define AR_PHY_HEADER_DETECT_XR 0xA240
+#define AR_PHY_CHIRP_DETECTED_XR 0xA244
+#define AR_PHY_BLUETOOTH 0xA254
+
+#define AR_PHY_TPCRG1 0xA258
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_TX_PWRCTRL4 0xa264
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
+
+#define AR_PHY_TX_PWRCTRL6_0 0xa270
+#define AR_PHY_TX_PWRCTRL6_1 0xb270
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
+
+#define AR_PHY_TX_PWRCTRL7 0xa274
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
+
+#define AR_PHY_TX_PWRCTRL9 0xa27C
+#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
+#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
+
+#define AR_PHY_TX_GAIN_TBL1 0xa300
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
+
+#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
+#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
+
+#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
+#define AR_PHY_MASK2_M_31_45 0xa3a4
+#define AR_PHY_MASK2_M_16_30 0xa3a8
+#define AR_PHY_MASK2_M_00_15 0xa3ac
+#define AR_PHY_MASK2_P_15_01 0xa3b8
+#define AR_PHY_MASK2_P_30_16 0xa3bc
+#define AR_PHY_MASK2_P_45_31 0xa3c0
+#define AR_PHY_MASK2_P_61_45 0xa3c4
+#define AR_PHY_SPUR_REG 0x994c
+
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
+#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+
+#define AR_PHY_PILOT_MASK_01_30 0xa3b0
+#define AR_PHY_PILOT_MASK_31_60 0xa3b4
+
+#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
+#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
+
+#define AR_PHY_ANALOG_SWAP 0xa268
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+
+#define AR_PHY_TPCRG5 0xA26C
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+
+/* Carrier leak calibration control, do it after AGC calibration */
+#define AR_PHY_CL_CAL_CTL 0xA358
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+
+#define AR_PHY_POWER_TX_RATE5 0xA38C
+#define AR_PHY_POWER_TX_RATE6 0xA390
+
+#define AR_PHY_CAL_CHAINMASK 0xA39C
+
+#define AR_PHY_POWER_TX_SUB 0xA3C8
+#define AR_PHY_POWER_TX_RATE7 0xA3CC
+#define AR_PHY_POWER_TX_RATE8 0xA3D0
+#define AR_PHY_POWER_TX_RATE9 0xA3D4
+
+#define AR_PHY_XPA_CFG 0xA3D8
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+
+#define AR_PHY_CH1_CCA 0xa864
+#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH1_MINCCA_PWR_S 19
+#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_CH1_MINCCA_PWR_S 20
+
+#define AR_PHY_CH2_CCA 0xb864
+#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH2_MINCCA_PWR_S 19
+
+#define AR_PHY_CH1_EXT_CCA 0xa9bc
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_CH2_EXT_CCA 0xb9bc
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
new file mode 100644
index 000000000000..5fcafb460877
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -0,0 +1,803 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_phy.h"
+
+static void ar9003_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Select calibration to run */
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * Start calibration with
+ * 2^(INIT_IQCAL_LOG_COUNT_MAX+1) samples
+ */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+
+ /* Kick-off cal */
+ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
+ break;
+ case TEMP_COMP_CAL:
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
+ AR_PHY_65NM_CH0_THERM_LOCAL, 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
+ AR_PHY_65NM_CH0_THERM_START, 1);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting Temperature Compensation Calibration\n");
+ break;
+ case ADC_DC_INIT_CAL:
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ /* Not yet */
+ break;
+ }
+}
+
+/*
+ * Generic calibration routine.
+ * Recalibrate the lower PHY chips to account for temperature/environment
+ * changes.
+ */
+static bool ar9003_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ /* Cal is assumed not done until explicitly set below */
+ bool iscaldone = false;
+
+ /* Calibration in progress. */
+ if (currCal->calState == CAL_RUNNING) {
+ /* Check to see if it has finished. */
+ if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) {
+ /*
+ * Accumulate cal measures for active chains
+ */
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ unsigned int i, numChains = 0;
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ /*
+ * Process accumulated data
+ */
+ currCal->calData->calPostProc(ah, numChains);
+
+ /* Calibration has finished. */
+ ichan->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ /*
+ * Set-up collection of another sub-sample until we
+ * get desired number
+ */
+ ar9003_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ /* If current cal is marked invalid in channel, kick it off */
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+static bool ar9003_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ /*
+ * For given calibration:
+ * 1. Call generic cal routine
+ * 2. When this cal is done (isCalDone) if we have more cals waiting
+ * (eg after reset), mask this to upper layers by not propagating
+ * isCalDone if it is set to TRUE.
+ * Instead, change isCalDone to FALSE and setup the waiting cal(s)
+ * to be run.
+ */
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9003_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal) {
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ /* start NF calibration, without updating BB NF register */
+ ath9k_hw_start_nfcal(ah);
+ }
+
+ return iscaldone;
+}
+
+static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ /* Accumulate IQ cal measures for active chains */
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+ const u_int32_t offset_array[3] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+ /* Force bounds on iCoff */
+ if (iCoff >= 63)
+ iCoff = 63;
+ else if (iCoff <= -63)
+ iCoff = -63;
+
+ /* Negate iCoff if iqCorrNeg == 0 */
+ if (iqCorrNeg == 0x0)
+ iCoff = -iCoff;
+
+ /* Force bounds on qCoff */
+ if (qCoff >= 63)
+ qCoff = 63;
+ else if (qCoff <= -63)
+ qCoff = -63;
+
+ iCoff = iCoff & 0x7f;
+ qCoff = qCoff & 0x7f;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) "
+ "before update = 0x%x\n",
+ offset_array[i],
+ REG_READ(ah, offset_array[i]));
+
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QI COFF "
+ "(bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ REG_READ(ah, offset_array[i]));
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QQ COFF "
+ "(bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ REG_READ(ah, offset_array[i]));
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction (offset 0x%04x) enabled "
+ "(bit position 0x%08x). New Value 0x%08x\n",
+ (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
+ REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
+}
+
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9003_hw_iqcal_collect,
+ ar9003_hw_iqcalibrate
+};
+
+static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+}
+
+static bool ar9003_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ switch (calType & ah->supp_cals) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * XXX: Run IQ Mismatch for non-CCK only
+ * Note that CHANNEL_B is never set though.
+ */
+ return true;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ return false;
+ case TEMP_COMP_CAL:
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * solve 4x4 linear equation used in loopback iq cal.
+ */
+static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
+ s32 sin_2phi_1,
+ s32 cos_2phi_1,
+ s32 sin_2phi_2,
+ s32 cos_2phi_2,
+ s32 mag_a0_d0,
+ s32 phs_a0_d0,
+ s32 mag_a1_d0,
+ s32 phs_a1_d0,
+ s32 solved_eq[])
+{
+ s32 f1 = cos_2phi_1 - cos_2phi_2,
+ f3 = sin_2phi_1 - sin_2phi_2,
+ f2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ const s32 result_shift = 1 << 15;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ f2 = (f1 * f1 + f3 * f3) / result_shift;
+
+ if (!f2) {
+ ath_print(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+ return false;
+ }
+
+ /* mag mismatch, tx */
+ mag_tx = f1 * (mag_a0_d0 - mag_a1_d0) + f3 * (phs_a0_d0 - phs_a1_d0);
+ /* phs mismatch, tx */
+ phs_tx = f3 * (-mag_a0_d0 + mag_a1_d0) + f1 * (phs_a0_d0 - phs_a1_d0);
+
+ mag_tx = (mag_tx / f2);
+ phs_tx = (phs_tx / f2);
+
+ /* mag mismatch, rx */
+ mag_rx = mag_a0_d0 - (cos_2phi_1 * mag_tx + sin_2phi_1 * phs_tx) /
+ result_shift;
+ /* phs mismatch, rx */
+ phs_rx = phs_a0_d0 + (sin_2phi_1 * mag_tx - cos_2phi_1 * phs_tx) /
+ result_shift;
+
+ solved_eq[0] = mag_tx;
+ solved_eq[1] = phs_tx;
+ solved_eq[2] = mag_rx;
+ solved_eq[3] = phs_rx;
+
+ return true;
+}
+
+static s32 ar9003_hw_find_mag_approx(struct ath_hw *ah, s32 in_re, s32 in_im)
+{
+ s32 abs_i = abs(in_re),
+ abs_q = abs(in_im),
+ max_abs, min_abs;
+
+ if (abs_i > abs_q) {
+ max_abs = abs_i;
+ min_abs = abs_q;
+ } else {
+ max_abs = abs_q;
+ min_abs = abs_i;
+ }
+
+ return max_abs - (max_abs / 32) + (min_abs / 8) + (min_abs / 4);
+}
+
+#define DELPT 32
+
+static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
+ s32 chain_idx,
+ const s32 iq_res[],
+ s32 iqc_coeff[])
+{
+ s32 i2_m_q2_a0_d0, i2_p_q2_a0_d0, iq_corr_a0_d0,
+ i2_m_q2_a0_d1, i2_p_q2_a0_d1, iq_corr_a0_d1,
+ i2_m_q2_a1_d0, i2_p_q2_a1_d0, iq_corr_a1_d0,
+ i2_m_q2_a1_d1, i2_p_q2_a1_d1, iq_corr_a1_d1;
+ s32 mag_a0_d0, mag_a1_d0, mag_a0_d1, mag_a1_d1,
+ phs_a0_d0, phs_a1_d0, phs_a0_d1, phs_a1_d1,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ s32 solved_eq[4], mag_corr_tx, phs_corr_tx, mag_corr_rx, phs_corr_rx,
+ q_q_coff, q_i_coff;
+ const s32 res_scale = 1 << 15;
+ const s32 delpt_shift = 1 << 8;
+ s32 mag1, mag2;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ i2_m_q2_a0_d0 = iq_res[0] & 0xfff;
+ i2_p_q2_a0_d0 = (iq_res[0] >> 12) & 0xfff;
+ iq_corr_a0_d0 = ((iq_res[0] >> 24) & 0xff) + ((iq_res[1] & 0xf) << 8);
+
+ if (i2_m_q2_a0_d0 > 0x800)
+ i2_m_q2_a0_d0 = -((0xfff - i2_m_q2_a0_d0) + 1);
+
+ if (i2_p_q2_a0_d0 > 0x800)
+ i2_p_q2_a0_d0 = -((0xfff - i2_p_q2_a0_d0) + 1);
+
+ if (iq_corr_a0_d0 > 0x800)
+ iq_corr_a0_d0 = -((0xfff - iq_corr_a0_d0) + 1);
+
+ i2_m_q2_a0_d1 = (iq_res[1] >> 4) & 0xfff;
+ i2_p_q2_a0_d1 = (iq_res[2] & 0xfff);
+ iq_corr_a0_d1 = (iq_res[2] >> 12) & 0xfff;
+
+ if (i2_m_q2_a0_d1 > 0x800)
+ i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
+
+ if (i2_p_q2_a0_d1 > 0x800)
+ i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
+
+ if (iq_corr_a0_d1 > 0x800)
+ iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
+
+ i2_m_q2_a1_d0 = ((iq_res[2] >> 24) & 0xff) + ((iq_res[3] & 0xf) << 8);
+ i2_p_q2_a1_d0 = (iq_res[3] >> 4) & 0xfff;
+ iq_corr_a1_d0 = iq_res[4] & 0xfff;
+
+ if (i2_m_q2_a1_d0 > 0x800)
+ i2_m_q2_a1_d0 = -((0xfff - i2_m_q2_a1_d0) + 1);
+
+ if (i2_p_q2_a1_d0 > 0x800)
+ i2_p_q2_a1_d0 = -((0xfff - i2_p_q2_a1_d0) + 1);
+
+ if (iq_corr_a1_d0 > 0x800)
+ iq_corr_a1_d0 = -((0xfff - iq_corr_a1_d0) + 1);
+
+ i2_m_q2_a1_d1 = (iq_res[4] >> 12) & 0xfff;
+ i2_p_q2_a1_d1 = ((iq_res[4] >> 24) & 0xff) + ((iq_res[5] & 0xf) << 8);
+ iq_corr_a1_d1 = (iq_res[5] >> 4) & 0xfff;
+
+ if (i2_m_q2_a1_d1 > 0x800)
+ i2_m_q2_a1_d1 = -((0xfff - i2_m_q2_a1_d1) + 1);
+
+ if (i2_p_q2_a1_d1 > 0x800)
+ i2_p_q2_a1_d1 = -((0xfff - i2_p_q2_a1_d1) + 1);
+
+ if (iq_corr_a1_d1 > 0x800)
+ iq_corr_a1_d1 = -((0xfff - iq_corr_a1_d1) + 1);
+
+ if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
+ (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0:\na0_d0=%d\n"
+ "a0_d1=%d\na2_d0=%d\na1_d1=%d\n",
+ i2_p_q2_a0_d0, i2_p_q2_a0_d1,
+ i2_p_q2_a1_d0, i2_p_q2_a1_d1);
+ return false;
+ }
+
+ mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+ phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+
+ mag_a0_d1 = (i2_m_q2_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+ phs_a0_d1 = (iq_corr_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+
+ mag_a1_d0 = (i2_m_q2_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+ phs_a1_d0 = (iq_corr_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+
+ mag_a1_d1 = (i2_m_q2_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+ phs_a1_d1 = (iq_corr_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+
+ /* w/o analog phase shift */
+ sin_2phi_1 = (((mag_a0_d0 - mag_a0_d1) * delpt_shift) / DELPT);
+ /* w/o analog phase shift */
+ cos_2phi_1 = (((phs_a0_d1 - phs_a0_d0) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ sin_2phi_2 = (((mag_a1_d0 - mag_a1_d1) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ cos_2phi_2 = (((phs_a1_d1 - phs_a1_d0) * delpt_shift) / DELPT);
+
+ /*
+ * force sin^2 + cos^2 = 1;
+ * find magnitude by approximation
+ */
+ mag1 = ar9003_hw_find_mag_approx(ah, cos_2phi_1, sin_2phi_1);
+ mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
+
+ if ((mag1 == 0) || (mag2 == 0)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag1=%d, mag2=%d\n",
+ mag1, mag2);
+ return false;
+ }
+
+ /* normalization sin and cos by mag */
+ sin_2phi_1 = (sin_2phi_1 * res_scale / mag1);
+ cos_2phi_1 = (cos_2phi_1 * res_scale / mag1);
+ sin_2phi_2 = (sin_2phi_2 * res_scale / mag2);
+ cos_2phi_2 = (cos_2phi_2 * res_scale / mag2);
+
+ /* calculate IQ mismatch */
+ if (!ar9003_hw_solve_iq_cal(ah,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2,
+ mag_a0_d0, phs_a0_d0,
+ mag_a1_d0,
+ phs_a1_d0, solved_eq)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed.\n");
+ return false;
+ }
+
+ mag_tx = solved_eq[0];
+ phs_tx = solved_eq[1];
+ mag_rx = solved_eq[2];
+ phs_rx = solved_eq[3];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "chain %d: mag mismatch=%d phase mismatch=%d\n",
+ chain_idx, mag_tx/res_scale, phs_tx/res_scale);
+
+ if (res_scale == mag_tx) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_tx=%d, res_scale=%d\n",
+ mag_tx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Tx IQ correction factor */
+ mag_corr_tx = (mag_tx * res_scale) / (res_scale - mag_tx);
+ phs_corr_tx = -phs_tx;
+
+ q_q_coff = (mag_corr_tx * 128 / res_scale);
+ q_i_coff = (phs_corr_tx * 256 / res_scale);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[0]);
+
+ if (-mag_rx == res_scale) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_rx=%d, res_scale=%d\n",
+ mag_rx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Rx IQ correction factors */
+ mag_corr_rx = (-mag_rx * res_scale) / (res_scale + mag_rx);
+ phs_corr_rx = -phs_rx;
+
+ q_q_coff = (mag_corr_rx * 128 / res_scale);
+ q_i_coff = (phs_corr_rx * 256 / res_scale);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[1]);
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B1,
+ AR_PHY_TX_IQCAL_STATUS_B2,
+ };
+ const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
+ };
+ const u32 rx_corr[AR9300_MAX_CHAINS] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+ const u_int32_t chan_info_tab[] = {
+ AR_PHY_CHAN_INFO_TAB_0,
+ AR_PHY_CHAN_INFO_TAB_1,
+ AR_PHY_CHAN_INFO_TAB_2,
+ };
+ s32 iq_res[6];
+ s32 iqc_coeff[2];
+ s32 i, j;
+ u32 num_chains = 0;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (ah->txchainmask & (1 << i))
+ num_chains++;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ AR_PHY_TX_IQCAL_START_DO_CAL);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal not complete.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ for (i = 0; i < num_chains; i++) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Doing Tx IQ Cal for chain %d.\n", i);
+
+ if (REG_READ(ah, txiqcal_status[i]) &
+ AR_PHY_TX_IQCAL_STATUS_FAILED) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal failed for chain %d.\n", i);
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ for (j = 0; j < 3; j++) {
+ u_int8_t idx = 2 * j,
+ offset = 4 * j;
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ, 0);
+
+ /* 32 bits */
+ iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset);
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ, 1);
+
+ /* 16 bits */
+ iq_res[idx+1] = 0xffff & REG_READ(ah,
+ chan_info_tab[i] +
+ offset);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
+ idx, iq_res[idx], idx+1, iq_res[idx+1]);
+ }
+
+ if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, iqc_coeff)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Failed in calculation of IQ correction.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ_COEFF[0] = 0x%x IQ_COEFF[1] = 0x%x\n",
+ iqc_coeff[0], iqc_coeff[1]);
+
+ REG_RMW_FIELD(ah, tx_corr_coeff[i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
+ iqc_coeff[0]);
+ REG_RMW_FIELD(ah, rx_corr[i],
+ AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF,
+ iqc_coeff[1] >> 7);
+ REG_RMW_FIELD(ah, rx_corr[i],
+ AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF,
+ iqc_coeff[1]);
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+
+ return;
+
+TX_IQ_CAL_FAILED:
+ ath_print(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+ return;
+}
+
+static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /*
+ * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before
+ * running AGC/TxIQ cals
+ */
+ ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to "
+ "complete in 1ms; noisy environment?\n");
+ return false;
+ }
+
+ /* Do Tx IQ Calibration */
+ if (ah->config.tx_iq_calibration)
+ ar9003_hw_tx_iq_cal(ah);
+
+ /* Revert chainmasks to their original values before NF cal */
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ if (ar9003_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
+ }
+
+ if (ar9003_hw_iscal_supported(ah, TEMP_COMP_CAL)) {
+ INIT_CAL(&ah->tempCompCalData);
+ INSERT_CAL(ah, &ah->tempCompCalData);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling Temperature Compensation Calibration.\n");
+ }
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ chan->CalValid = 0;
+
+ return true;
+}
+
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
+ priv_ops->init_cal = ar9003_hw_init_cal;
+ priv_ops->setup_calibration = ar9003_hw_setup_calibration;
+ priv_ops->iscal_supported = ar9003_hw_iscal_supported;
+
+ ops->calibrate = ar9003_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
new file mode 100644
index 000000000000..8a79550dff71
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -0,0 +1,1860 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_eeprom.h"
+
+#define COMP_HDR_LEN 4
+#define COMP_CKSUM_LEN 2
+
+#define AR_CH0_TOP (0x00016288)
+#define AR_CH0_TOP_XPABIASLVL (0x3)
+#define AR_CH0_TOP_XPABIASLVL_S (8)
+
+#define AR_CH0_THERM (0x00016290)
+#define AR_CH0_THERM_SPARE (0x3f)
+#define AR_CH0_THERM_SPARE_S (0)
+
+#define AR_SWITCH_TABLE_COM_ALL (0xffff)
+#define AR_SWITCH_TABLE_COM_ALL_S (0)
+
+#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM2_ALL_S (0)
+
+#define AR_SWITCH_TABLE_ALL (0xfff)
+#define AR_SWITCH_TABLE_ALL_S (0)
+
+static const struct ar9300_eeprom ar9300_default = {
+ .eepromVersion = 2,
+ .templateVersion = 2,
+ .macAddr = {1, 2, 3, 4, 5, 6},
+ .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .baseEepHeader = {
+ .regDmn = {0, 0x1f},
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0c,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 3,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = 0x110,
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = 0x22222,
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = {0x150, 0x150, 0x150},
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 36,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {0, 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {1, 1, 1},/* 3 chain */
+ .db_stage2 = {1, 1, 1}, /* 3 chain */
+ .db_stage3 = {0, 0, 0},
+ .db_stage4 = {0, 0, 0},
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .futureModal = { /* [32] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2484, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {36, 36, 36, 36} },
+ { {36, 36, 36, 36} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .calTargetPower2GHT40 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */
+ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+
+ { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+
+ { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = 0x110,
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = 0x22222,
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ 0x000, 0x000, 0x000,
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 68,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {3, 3, 3}, /* 3 chain */
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 0}, {60, 1}, {60, 0}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ }
+ },
+ {
+ {
+ {60, 0}, {60, 1}, {60, 1}, {60, 0},
+ {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ {60, 0}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 0}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ }
+ },
+ }
+};
+
+static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
+ enum eeprom_param param)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+
+ switch (param) {
+ case EEP_MAC_LSW:
+ return eep->macAddr[0] << 8 | eep->macAddr[1];
+ case EEP_MAC_MID:
+ return eep->macAddr[2] << 8 | eep->macAddr[3];
+ case EEP_MAC_MSW:
+ return eep->macAddr[4] << 8 | eep->macAddr[5];
+ case EEP_REG_0:
+ return pBase->regDmn[0];
+ case EEP_REG_1:
+ return pBase->regDmn[1];
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags.opFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_TX_MASK:
+ return (pBase->txrxMask >> 4) & 0xf;
+ case EEP_RX_MASK:
+ return pBase->txrxMask & 0xf;
+ case EEP_DRIVE_STRENGTH:
+#define AR9300_EEP_BASE_DRIV_STRENGTH 0x1
+ return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH;
+ case EEP_INTERNAL_REGULATOR:
+ /* Bit 4 is internal regulator flag */
+ return (pBase->featureEnable & 0x10) >> 4;
+ case EEP_SWREG:
+ return pBase->swreg;
+ default:
+ return 0;
+ }
+}
+
+#ifdef __BIG_ENDIAN
+static void ar9300_swap_eeprom(struct ar9300_eeprom *eep)
+{
+ u32 dword;
+ u16 word;
+ int i;
+
+ word = swab16(eep->baseEepHeader.regDmn[0]);
+ eep->baseEepHeader.regDmn[0] = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[1]);
+ eep->baseEepHeader.regDmn[1] = word;
+
+ dword = swab32(eep->baseEepHeader.swreg);
+ eep->baseEepHeader.swreg = dword;
+
+ dword = swab32(eep->modalHeader2G.antCtrlCommon);
+ eep->modalHeader2G.antCtrlCommon = dword;
+
+ dword = swab32(eep->modalHeader2G.antCtrlCommon2);
+ eep->modalHeader2G.antCtrlCommon2 = dword;
+
+ dword = swab32(eep->modalHeader5G.antCtrlCommon);
+ eep->modalHeader5G.antCtrlCommon = dword;
+
+ dword = swab32(eep->modalHeader5G.antCtrlCommon2);
+ eep->modalHeader5G.antCtrlCommon2 = dword;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ word = swab16(eep->modalHeader2G.antCtrlChain[i]);
+ eep->modalHeader2G.antCtrlChain[i] = word;
+
+ word = swab16(eep->modalHeader5G.antCtrlChain[i]);
+ eep->modalHeader5G.antCtrlChain[i] = word;
+ }
+}
+#endif
+
+static bool ar9300_hw_read_eeprom(struct ath_hw *ah,
+ long address, u8 *buffer, int many)
+{
+ int i;
+ u8 value[2];
+ unsigned long eepAddr;
+ unsigned long byteAddr;
+ u16 *svalue;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if ((address < 0) || ((address + many) > AR9300_EEPROM_SIZE - 1)) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "eeprom address not in range\n");
+ return false;
+ }
+
+ for (i = 0; i < many; i++) {
+ eepAddr = (u16) (address + i) / 2;
+ byteAddr = (u16) (address + i) % 2;
+ svalue = (u16 *) value;
+ if (!ath9k_hw_nvram_read(common, eepAddr, svalue)) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "unable to read eeprom region\n");
+ return false;
+ }
+ *svalue = le16_to_cpu(*svalue);
+ buffer[i] = value[byteAddr];
+ }
+
+ return true;
+}
+
+static bool ar9300_read_eeprom(struct ath_hw *ah,
+ int address, u8 *buffer, int many)
+{
+ int it;
+
+ for (it = 0; it < many; it++)
+ if (!ar9300_hw_read_eeprom(ah,
+ (address - it),
+ (buffer + it), 1))
+ return false;
+ return true;
+}
+
+static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
+ int *length, int *major, int *minor)
+{
+ unsigned long value[4];
+
+ value[0] = best[0];
+ value[1] = best[1];
+ value[2] = best[2];
+ value[3] = best[3];
+ *code = ((value[0] >> 5) & 0x0007);
+ *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020);
+ *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f);
+ *major = (value[2] & 0x000f);
+ *minor = (value[3] & 0x00ff);
+}
+
+static u16 ar9300_comp_cksum(u8 *data, int dsize)
+{
+ int it, checksum = 0;
+
+ for (it = 0; it < dsize; it++) {
+ checksum += data[it];
+ checksum &= 0xffff;
+ }
+
+ return checksum;
+}
+
+static bool ar9300_uncompress_block(struct ath_hw *ah,
+ u8 *mptr,
+ int mdataSize,
+ u8 *block,
+ int size)
+{
+ int it;
+ int spot;
+ int offset;
+ int length;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ spot = 0;
+
+ for (it = 0; it < size; it += (length+2)) {
+ offset = block[it];
+ offset &= 0xff;
+ spot += offset;
+ length = block[it+1];
+ length &= 0xff;
+
+ if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Restore at %d: spot=%d "
+ "offset=%d length=%d\n",
+ it, spot, offset, length);
+ memcpy(&mptr[spot], &block[it+2], length);
+ spot += length;
+ } else if (length > 0) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Bad restore at %d: spot=%d "
+ "offset=%d length=%d\n",
+ it, spot, offset, length);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int ar9300_compress_decision(struct ath_hw *ah,
+ int it,
+ int code,
+ int reference,
+ u8 *mptr,
+ u8 *word, int length, int mdata_size)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 *dptr;
+
+ switch (code) {
+ case _CompressNone:
+ if (length != mdata_size) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "EEPROM structure size mismatch"
+ "memory=%d eeprom=%d\n", mdata_size, length);
+ return -1;
+ }
+ memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
+ ath_print(common, ATH_DBG_EEPROM, "restored eeprom %d:"
+ " uncompressed, length %d\n", it, length);
+ break;
+ case _CompressBlock:
+ if (reference == 0) {
+ dptr = mptr;
+ } else {
+ if (reference != 2) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "cant find reference eeprom"
+ "struct %d\n", reference);
+ return -1;
+ }
+ memcpy(mptr, &ar9300_default, mdata_size);
+ }
+ ath_print(common, ATH_DBG_EEPROM,
+ "restore eeprom %d: block, reference %d,"
+ " length %d\n", it, reference, length);
+ ar9300_uncompress_block(ah, mptr, mdata_size,
+ (u8 *) (word + COMP_HDR_LEN), length);
+ break;
+ default:
+ ath_print(common, ATH_DBG_EEPROM, "unknown compression"
+ " code %d\n", code);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Read the configuration data from the eeprom.
+ * The data can be put in any specified memory buffer.
+ *
+ * Returns -1 on error.
+ * Returns address of next memory location on success.
+ */
+static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
+ u8 *mptr, int mdata_size)
+{
+#define MDEFAULT 15
+#define MSTATE 100
+ int cptr;
+ u8 *word;
+ int code;
+ int reference, length, major, minor;
+ int osize;
+ int it;
+ u16 checksum, mchecksum;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ word = kzalloc(2048, GFP_KERNEL);
+ if (!word)
+ return -1;
+
+ memcpy(mptr, &ar9300_default, mdata_size);
+
+ cptr = AR9300_BASE_ADDR;
+ for (it = 0; it < MSTATE; it++) {
+ if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN))
+ goto fail;
+
+ if ((word[0] == 0 && word[1] == 0 && word[2] == 0 &&
+ word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
+ && word[2] == 0xff && word[3] == 0xff))
+ break;
+
+ ar9300_comp_hdr_unpack(word, &code, &reference,
+ &length, &major, &minor);
+ ath_print(common, ATH_DBG_EEPROM,
+ "Found block at %x: code=%d ref=%d"
+ "length=%d major=%d minor=%d\n", cptr, code,
+ reference, length, major, minor);
+ if (length >= 1024) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Skipping bad header\n");
+ cptr -= COMP_HDR_LEN;
+ continue;
+ }
+
+ osize = length;
+ ar9300_read_eeprom(ah, cptr, word,
+ COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
+ mchecksum = word[COMP_HDR_LEN + osize] |
+ (word[COMP_HDR_LEN + osize + 1] << 8);
+ ath_print(common, ATH_DBG_EEPROM,
+ "checksum %x %x\n", checksum, mchecksum);
+ if (checksum == mchecksum) {
+ ar9300_compress_decision(ah, it, code, reference, mptr,
+ word, length, mdata_size);
+ } else {
+ ath_print(common, ATH_DBG_EEPROM,
+ "skipping block with bad checksum\n");
+ }
+ cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ }
+
+ kfree(word);
+ return cptr;
+
+fail:
+ kfree(word);
+ return -1;
+}
+
+/*
+ * Restore the configuration structure by reading the eeprom.
+ * This function destroys any existing in-memory structure
+ * content.
+ */
+static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah)
+{
+ u8 *mptr = NULL;
+ int mdata_size;
+
+ mptr = (u8 *) &ah->eeprom.ar9300_eep;
+ mdata_size = sizeof(struct ar9300_eeprom);
+
+ if (mptr && mdata_size > 0) {
+ /* At this point, mptr points to the eeprom data structure
+ * in it's "default" state. If this is big endian, swap the
+ * data structures back to "little endian"
+ */
+ /* First swap, default to Little Endian */
+#ifdef __BIG_ENDIAN
+ ar9300_swap_eeprom((struct ar9300_eeprom *)mptr);
+#endif
+ if (ar9300_eeprom_restore_internal(ah, mptr, mdata_size) >= 0)
+ return true;
+
+ /* Second Swap, back to Big Endian */
+#ifdef __BIG_ENDIAN
+ ar9300_swap_eeprom((struct ar9300_eeprom *)mptr);
+#endif
+ }
+ return false;
+}
+
+/* XXX: review hardware docs */
+static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah)
+{
+ return ah->eeprom.ar9300_eep.eepromVersion;
+}
+
+/* XXX: could be read from the eepromVersion, not sure yet */
+static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
+ enum ieee80211_band freq_band)
+{
+ return 1;
+}
+
+static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return -EINVAL;
+}
+
+static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (is2ghz)
+ return eep->modalHeader2G.xpaBiasLvl;
+ else
+ return eep->modalHeader5G.xpaBiasLvl;
+}
+
+static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
+{
+ int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
+ REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
+ ((bias >> 2) & 0x3));
+}
+
+static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (is2ghz)
+ return eep->modalHeader2G.antCtrlCommon;
+ else
+ return eep->modalHeader5G.antCtrlCommon;
+}
+
+static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (is2ghz)
+ return eep->modalHeader2G.antCtrlCommon2;
+ else
+ return eep->modalHeader5G.antCtrlCommon2;
+}
+
+static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
+ int chain,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (chain >= 0 && chain < AR9300_MAX_CHAINS) {
+ if (is2ghz)
+ return eep->modalHeader2G.antCtrlChain[chain];
+ else
+ return eep->modalHeader5G.antCtrlChain[chain];
+ }
+
+ return 0;
+}
+
+static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+{
+ u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL, value);
+}
+
+static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
+{
+ int drive_strength;
+ unsigned long reg;
+
+ drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH);
+
+ if (!drive_strength)
+ return;
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1);
+ reg &= ~0x00ffffc0;
+ reg |= 0x5 << 21;
+ reg |= 0x5 << 18;
+ reg |= 0x5 << 15;
+ reg |= 0x5 << 12;
+ reg |= 0x5 << 9;
+ reg |= 0x5 << 6;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2);
+ reg &= ~0xffffffe0;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ reg |= 0x5 << 20;
+ reg |= 0x5 << 17;
+ reg |= 0x5 << 14;
+ reg |= 0x5 << 11;
+ reg |= 0x5 << 8;
+ reg |= 0x5 << 5;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4);
+ reg &= ~0xff800000;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
+}
+
+static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+{
+ int internal_regulator =
+ ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
+
+ if (internal_regulator) {
+ /* Internal regulator is ON. Write swreg register. */
+ int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah, AR_RTC_REG_CONTROL1) &
+ (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
+ REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
+ /* Set REG_CONTROL1.SWREG_PROGRAM */
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah,
+ AR_RTC_REG_CONTROL1) |
+ AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
+ } else {
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK,
+ (REG_READ(ah,
+ AR_RTC_SLEEP_CLK) |
+ AR_RTC_FORCE_SWREG_PRD));
+ }
+}
+
+static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
+ ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
+ ar9003_hw_drive_strength_apply(ah);
+ ar9003_hw_internal_regulator_apply(ah);
+}
+
+static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+}
+
+/*
+ * Returns the interpolated y value corresponding to the specified x value
+ * from the np ordered pairs of data (px,py).
+ * The pairs do not have to be in any order.
+ * If the specified x value is less than any of the px,
+ * the returned y value is equal to the py for the lowest px.
+ * If the specified x value is greater than any of the px,
+ * the returned y value is equal to the py for the highest px.
+ */
+static int ar9003_hw_power_interpolate(int32_t x,
+ int32_t *px, int32_t *py, u_int16_t np)
+{
+ int ip = 0;
+ int lx = 0, ly = 0, lhave = 0;
+ int hx = 0, hy = 0, hhave = 0;
+ int dx = 0;
+ int y = 0;
+
+ lhave = 0;
+ hhave = 0;
+
+ /* identify best lower and higher x calibration measurement */
+ for (ip = 0; ip < np; ip++) {
+ dx = x - px[ip];
+
+ /* this measurement is higher than our desired x */
+ if (dx <= 0) {
+ if (!hhave || dx > (x - hx)) {
+ /* new best higher x measurement */
+ hx = px[ip];
+ hy = py[ip];
+ hhave = 1;
+ }
+ }
+ /* this measurement is lower than our desired x */
+ if (dx >= 0) {
+ if (!lhave || dx < (x - lx)) {
+ /* new best lower x measurement */
+ lx = px[ip];
+ ly = py[ip];
+ lhave = 1;
+ }
+ }
+ }
+
+ /* the low x is good */
+ if (lhave) {
+ /* so is the high x */
+ if (hhave) {
+ /* they're the same, so just pick one */
+ if (hx == lx)
+ y = ly;
+ else /* interpolate */
+ y = ly + (((x - lx) * (hy - ly)) / (hx - lx));
+ } else /* only low is good, use it */
+ y = ly;
+ } else if (hhave) /* only high is good, use it */
+ y = hy;
+ else /* nothing is good,this should never happen unless np=0, ???? */
+ y = -(1 << 30);
+ return y;
+}
+
+static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2G;
+ pFreqBin = eep->calTarget_freqbin_2G;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5G;
+ pFreqBin = eep->calTarget_freqbin_5G;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT20;
+ pFreqBin = eep->calTarget_freqbin_2GHT20;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT20;
+ pFreqBin = eep->calTarget_freqbin_5GHT20;
+ }
+
+ /*
+ * create array of channels and targetpower
+ * from targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT40;
+ pFreqBin = eep->calTarget_freqbin_2GHT40;
+ } else {
+ numPiers = AR9300_NUM_5G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT40;
+ pFreqBin = eep->calTarget_freqbin_5GHT40;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq)
+{
+ u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i;
+ s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck;
+ u8 *pFreqBin = eep->calTarget_freqbin_Cck;
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], 1);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+/* Set tx power registers to array of values passed in */
+static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
+{
+#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
+ /* make sure forced gain is not set */
+ REG_WRITE(ah, 0xa458, 0);
+
+ /* Write the OFDM power per rate set */
+
+ /* 6 (LSB), 9, 12, 18 (MSB) */
+ REG_WRITE(ah, 0xa3c0,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* 24 (LSB), 36, 48, 54 (MSB) */
+ REG_WRITE(ah, 0xa3c4,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* Write the CCK power per rate set */
+
+ /* 1L (LSB), reserved, 2L, 2S (MSB) */
+ REG_WRITE(ah, 0xa3c8,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
+ /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
+
+ /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
+ REG_WRITE(ah, 0xa3cc,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
+ );
+
+ /* Write the HT20 power per rate set */
+
+ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
+ REG_WRITE(ah, 0xa3d0,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, 0xa3d4,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, 0xa3e4,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0)
+ );
+
+ /* Mixed HT20 and HT40 rates */
+
+ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
+ REG_WRITE(ah, 0xa3e8,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0)
+ );
+
+ /*
+ * Write the HT40 power per rate set
+ * correct PAR difference between HT40 and HT20/LEGACY
+ * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
+ */
+ REG_WRITE(ah, 0xa3d8,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, 0xa3dc,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, 0xa3ec,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0)
+ );
+
+ return 0;
+#undef POW_SM
+}
+
+static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
+{
+ u8 targetPowerValT2[ar9300RateSize];
+ /* XXX: hard code for now, need to get from eeprom struct */
+ u8 ht40PowerIncForPdadc = 0;
+ bool is2GHz = false;
+ unsigned int i = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (freq < 4000)
+ is2GHz = true;
+
+ targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_36] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_48] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_54] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
+ freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_5S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
+ targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq, is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_4] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_5] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_6] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_7] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_12] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_13] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_14] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_15] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_20] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_21] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_22] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_23] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_4] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_5] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_6] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_7] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_12] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_13] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_14] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_15] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_20] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_21] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_22] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_23] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+
+ while (i < ar9300RateSize) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ i++;
+ }
+
+ /* Write target power array to registers */
+ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
+}
+
+static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
+ int mode,
+ int ipier,
+ int ichain,
+ int *pfrequency,
+ int *pcorrection,
+ int *ptemperature, int *pvoltage)
+{
+ u8 *pCalPier;
+ struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
+ int is2GHz;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (ichain >= AR9300_MAX_CHAINS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid chain index, must be less than %d\n",
+ AR9300_MAX_CHAINS);
+ return -1;
+ }
+
+ if (mode) { /* 5GHz */
+ if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid 5GHz cal pier index, must "
+ "be less than %d\n",
+ AR9300_NUM_5G_CAL_PIERS);
+ return -1;
+ }
+ pCalPier = &(eep->calFreqPier5G[ipier]);
+ pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
+ is2GHz = 0;
+ } else {
+ if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid 2GHz cal pier index, must "
+ "be less than %d\n", AR9300_NUM_2G_CAL_PIERS);
+ return -1;
+ }
+
+ pCalPier = &(eep->calFreqPier2G[ipier]);
+ pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
+ is2GHz = 1;
+ }
+
+ *pfrequency = FBIN2FREQ(*pCalPier, is2GHz);
+ *pcorrection = pCalPierStruct->refPower;
+ *ptemperature = pCalPierStruct->tempMeas;
+ *pvoltage = pCalPierStruct->voltMeas;
+
+ return 0;
+}
+
+static int ar9003_hw_power_control_override(struct ath_hw *ah,
+ int frequency,
+ int *correction,
+ int *voltage, int *temperature)
+{
+ int tempSlope = 0;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ REG_RMW(ah, AR_PHY_TPC_11_B0,
+ (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ REG_RMW(ah, AR_PHY_TPC_11_B1,
+ (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ REG_RMW(ah, AR_PHY_TPC_11_B2,
+ (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+
+ /* enable open loop power control on chip */
+ REG_RMW(ah, AR_PHY_TPC_6_B0,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ REG_RMW(ah, AR_PHY_TPC_6_B1,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ REG_RMW(ah, AR_PHY_TPC_6_B2,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+
+ /*
+ * enable temperature compensation
+ * Need to use register names
+ */
+ if (frequency < 4000)
+ tempSlope = eep->modalHeader2G.tempSlope;
+ else
+ tempSlope = eep->modalHeader5G.tempSlope;
+
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
+ temperature[0]);
+
+ return 0;
+}
+
+/* Apply the recorded correction values. */
+static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
+{
+ int ichain, ipier, npier;
+ int mode;
+ int lfrequency[AR9300_MAX_CHAINS],
+ lcorrection[AR9300_MAX_CHAINS],
+ ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
+ int hfrequency[AR9300_MAX_CHAINS],
+ hcorrection[AR9300_MAX_CHAINS],
+ htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
+ int fdiff;
+ int correction[AR9300_MAX_CHAINS],
+ voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
+ int pfrequency, pcorrection, ptemperature, pvoltage;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mode = (frequency >= 4000);
+ if (mode)
+ npier = AR9300_NUM_5G_CAL_PIERS;
+ else
+ npier = AR9300_NUM_2G_CAL_PIERS;
+
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ lfrequency[ichain] = 0;
+ hfrequency[ichain] = 100000;
+ }
+ /* identify best lower and higher frequency calibration measurement */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ for (ipier = 0; ipier < npier; ipier++) {
+ if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
+ &pfrequency, &pcorrection,
+ &ptemperature, &pvoltage)) {
+ fdiff = frequency - pfrequency;
+
+ /*
+ * this measurement is higher than
+ * our desired frequency
+ */
+ if (fdiff <= 0) {
+ if (hfrequency[ichain] <= 0 ||
+ hfrequency[ichain] >= 100000 ||
+ fdiff >
+ (frequency - hfrequency[ichain])) {
+ /*
+ * new best higher
+ * frequency measurement
+ */
+ hfrequency[ichain] = pfrequency;
+ hcorrection[ichain] =
+ pcorrection;
+ htemperature[ichain] =
+ ptemperature;
+ hvoltage[ichain] = pvoltage;
+ }
+ }
+ if (fdiff >= 0) {
+ if (lfrequency[ichain] <= 0
+ || fdiff <
+ (frequency - lfrequency[ichain])) {
+ /*
+ * new best lower
+ * frequency measurement
+ */
+ lfrequency[ichain] = pfrequency;
+ lcorrection[ichain] =
+ pcorrection;
+ ltemperature[ichain] =
+ ptemperature;
+ lvoltage[ichain] = pvoltage;
+ }
+ }
+ }
+ }
+ }
+
+ /* interpolate */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "ch=%d f=%d low=%d %d h=%d %d\n",
+ ichain, frequency, lfrequency[ichain],
+ lcorrection[ichain], hfrequency[ichain],
+ hcorrection[ichain]);
+ /* they're the same, so just pick one */
+ if (hfrequency[ichain] == lfrequency[ichain]) {
+ correction[ichain] = lcorrection[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ }
+ /* the low frequency is good */
+ else if (frequency - lfrequency[ichain] < 1000) {
+ /* so is the high frequency, interpolate */
+ if (hfrequency[ichain] - frequency < 1000) {
+
+ correction[ichain] = lcorrection[ichain] +
+ (((frequency - lfrequency[ichain]) *
+ (hcorrection[ichain] -
+ lcorrection[ichain])) /
+ (hfrequency[ichain] - lfrequency[ichain]));
+
+ temperature[ichain] = ltemperature[ichain] +
+ (((frequency - lfrequency[ichain]) *
+ (htemperature[ichain] -
+ ltemperature[ichain])) /
+ (hfrequency[ichain] - lfrequency[ichain]));
+
+ voltage[ichain] =
+ lvoltage[ichain] +
+ (((frequency -
+ lfrequency[ichain]) * (hvoltage[ichain] -
+ lvoltage[ichain]))
+ / (hfrequency[ichain] -
+ lfrequency[ichain]));
+ }
+ /* only low is good, use it */
+ else {
+ correction[ichain] = lcorrection[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ }
+ }
+ /* only high is good, use it */
+ else if (hfrequency[ichain] - frequency < 1000) {
+ correction[ichain] = hcorrection[ichain];
+ temperature[ichain] = htemperature[ichain];
+ voltage[ichain] = hvoltage[ichain];
+ } else { /* nothing is good, presume 0???? */
+ correction[ichain] = 0;
+ temperature[ichain] = 0;
+ voltage[ichain] = 0;
+ }
+ }
+
+ ar9003_hw_power_control_override(ah, frequency, correction, voltage,
+ temperature);
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "for frequency=%d, calibration correction = %d %d %d\n",
+ frequency, correction[0], correction[1], correction[2]);
+
+ return 0;
+}
+
+static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan, u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 twiceMaxRegulatoryPower,
+ u8 powerLimit)
+{
+ ah->txpower_limit = powerLimit;
+ ar9003_hw_set_target_power_eeprom(ah, chan->channel);
+ ar9003_hw_calibration_apply(ah, chan->channel);
+}
+
+static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
+ u16 i, bool is2GHz)
+{
+ return AR_NO_SPUR;
+}
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */
+}
+
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
+}
+
+const struct eeprom_ops eep_ar9300_ops = {
+ .check_eeprom = ath9k_hw_ar9300_check_eeprom,
+ .get_eeprom = ath9k_hw_ar9300_get_eeprom,
+ .fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
+ .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
+ .get_num_ant_config = ath9k_hw_ar9300_get_num_ant_config,
+ .get_eeprom_antenna_cfg = ath9k_hw_ar9300_get_eeprom_antenna_cfg,
+ .set_board_values = ath9k_hw_ar9300_set_board_values,
+ .set_addac = ath9k_hw_ar9300_set_addac,
+ .set_txpower = ath9k_hw_ar9300_set_txpower,
+ .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
+};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
new file mode 100644
index 000000000000..d8c0318f416f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -0,0 +1,323 @@
+#ifndef AR9003_EEPROM_H
+#define AR9003_EEPROM_H
+
+#include <linux/types.h>
+
+#define AR9300_EEP_VER 0xD000
+#define AR9300_EEP_VER_MINOR_MASK 0xFFF
+#define AR9300_EEP_MINOR_VER_1 0x1
+#define AR9300_EEP_MINOR_VER AR9300_EEP_MINOR_VER_1
+
+/* 16-bit offset location start of calibration struct */
+#define AR9300_EEP_START_LOC 256
+#define AR9300_NUM_5G_CAL_PIERS 8
+#define AR9300_NUM_2G_CAL_PIERS 3
+#define AR9300_NUM_5G_20_TARGET_POWERS 8
+#define AR9300_NUM_5G_40_TARGET_POWERS 8
+#define AR9300_NUM_2G_CCK_TARGET_POWERS 2
+#define AR9300_NUM_2G_20_TARGET_POWERS 3
+#define AR9300_NUM_2G_40_TARGET_POWERS 3
+/* #define AR9300_NUM_CTLS 21 */
+#define AR9300_NUM_CTLS_5G 9
+#define AR9300_NUM_CTLS_2G 12
+#define AR9300_CTL_MODE_M 0xF
+#define AR9300_NUM_BAND_EDGES_5G 8
+#define AR9300_NUM_BAND_EDGES_2G 4
+#define AR9300_NUM_PD_GAINS 4
+#define AR9300_PD_GAINS_IN_MASK 4
+#define AR9300_PD_GAIN_ICEPTS 5
+#define AR9300_EEPROM_MODAL_SPURS 5
+#define AR9300_MAX_RATE_POWER 63
+#define AR9300_NUM_PDADC_VALUES 128
+#define AR9300_NUM_RATES 16
+#define AR9300_BCHAN_UNUSED 0xFF
+#define AR9300_MAX_PWR_RANGE_IN_HALF_DB 64
+#define AR9300_OPFLAGS_11A 0x01
+#define AR9300_OPFLAGS_11G 0x02
+#define AR9300_OPFLAGS_5G_HT40 0x04
+#define AR9300_OPFLAGS_2G_HT40 0x08
+#define AR9300_OPFLAGS_5G_HT20 0x10
+#define AR9300_OPFLAGS_2G_HT20 0x20
+#define AR9300_EEPMISC_BIG_ENDIAN 0x01
+#define AR9300_EEPMISC_WOW 0x02
+#define AR9300_CUSTOMER_DATA_SIZE 20
+
+#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
+#define AR9300_MAX_CHAINS 3
+#define AR9300_ANT_16S 25
+#define AR9300_FUTURE_MODAL_SZ 6
+
+#define AR9300_NUM_ANT_CHAIN_FIELDS 7
+#define AR9300_NUM_ANT_COMMON_FIELDS 4
+#define AR9300_SIZE_ANT_CHAIN_FIELD 3
+#define AR9300_SIZE_ANT_COMMON_FIELD 4
+#define AR9300_ANT_CHAIN_MASK 0x7
+#define AR9300_ANT_COMMON_MASK 0xf
+#define AR9300_CHAIN_0_IDX 0
+#define AR9300_CHAIN_1_IDX 1
+#define AR9300_CHAIN_2_IDX 2
+
+#define AR928X_NUM_ANT_CHAIN_FIELDS 6
+#define AR928X_SIZE_ANT_CHAIN_FIELD 2
+#define AR928X_ANT_CHAIN_MASK 0x3
+
+/* Delta from which to start power to pdadc table */
+/* This offset is used in both open loop and closed loop power control
+ * schemes. In open loop power control, it is not really needed, but for
+ * the "sake of consistency" it was kept. For certain AP designs, this
+ * value is overwritten by the value in the flag "pwrTableOffset" just
+ * before writing the pdadc vs pwr into the chip registers.
+ */
+#define AR9300_PWR_TABLE_OFFSET 0
+
+/* enable flags for voltage and temp compensation */
+#define ENABLE_TEMP_COMPENSATION 0x01
+#define ENABLE_VOLT_COMPENSATION 0x02
+/* byte addressable */
+#define AR9300_EEPROM_SIZE (16*1024)
+#define FIXED_CCA_THRESHOLD 15
+
+#define AR9300_BASE_ADDR 0x3ff
+
+enum targetPowerHTRates {
+ HT_TARGET_RATE_0_8_16,
+ HT_TARGET_RATE_1_3_9_11_17_19,
+ HT_TARGET_RATE_4,
+ HT_TARGET_RATE_5,
+ HT_TARGET_RATE_6,
+ HT_TARGET_RATE_7,
+ HT_TARGET_RATE_12,
+ HT_TARGET_RATE_13,
+ HT_TARGET_RATE_14,
+ HT_TARGET_RATE_15,
+ HT_TARGET_RATE_20,
+ HT_TARGET_RATE_21,
+ HT_TARGET_RATE_22,
+ HT_TARGET_RATE_23
+};
+
+enum targetPowerLegacyRates {
+ LEGACY_TARGET_RATE_6_24,
+ LEGACY_TARGET_RATE_36,
+ LEGACY_TARGET_RATE_48,
+ LEGACY_TARGET_RATE_54
+};
+
+enum targetPowerCckRates {
+ LEGACY_TARGET_RATE_1L_5L,
+ LEGACY_TARGET_RATE_5S,
+ LEGACY_TARGET_RATE_11L,
+ LEGACY_TARGET_RATE_11S
+};
+
+enum ar9300_Rates {
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_36,
+ ALL_TARGET_LEGACY_48,
+ ALL_TARGET_LEGACY_54,
+ ALL_TARGET_LEGACY_1L_5L,
+ ALL_TARGET_LEGACY_5S,
+ ALL_TARGET_LEGACY_11L,
+ ALL_TARGET_LEGACY_11S,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_4,
+ ALL_TARGET_HT20_5,
+ ALL_TARGET_HT20_6,
+ ALL_TARGET_HT20_7,
+ ALL_TARGET_HT20_12,
+ ALL_TARGET_HT20_13,
+ ALL_TARGET_HT20_14,
+ ALL_TARGET_HT20_15,
+ ALL_TARGET_HT20_20,
+ ALL_TARGET_HT20_21,
+ ALL_TARGET_HT20_22,
+ ALL_TARGET_HT20_23,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_4,
+ ALL_TARGET_HT40_5,
+ ALL_TARGET_HT40_6,
+ ALL_TARGET_HT40_7,
+ ALL_TARGET_HT40_12,
+ ALL_TARGET_HT40_13,
+ ALL_TARGET_HT40_14,
+ ALL_TARGET_HT40_15,
+ ALL_TARGET_HT40_20,
+ ALL_TARGET_HT40_21,
+ ALL_TARGET_HT40_22,
+ ALL_TARGET_HT40_23,
+ ar9300RateSize,
+};
+
+
+struct eepFlags {
+ u8 opFlags;
+ u8 eepMisc;
+} __packed;
+
+enum CompressAlgorithm {
+ _CompressNone = 0,
+ _CompressLzma,
+ _CompressPairs,
+ _CompressBlock,
+ _Compress4,
+ _Compress5,
+ _Compress6,
+ _Compress7,
+};
+
+struct ar9300_base_eep_hdr {
+ u16 regDmn[2];
+ /* 4 bits tx and 4 bits rx */
+ u8 txrxMask;
+ struct eepFlags opCapFlags;
+ u8 rfSilent;
+ u8 blueToothOptions;
+ u8 deviceCap;
+ /* takes lower byte in eeprom location */
+ u8 deviceType;
+ /* offset in dB to be added to beginning
+ * of pdadc table in calibration
+ */
+ int8_t pwrTableOffset;
+ u8 params_for_tuning_caps[2];
+ /*
+ * bit0 - enable tx temp comp
+ * bit1 - enable tx volt comp
+ * bit2 - enable fastClock - default to 1
+ * bit3 - enable doubling - default to 1
+ * bit4 - enable internal regulator - default to 1
+ */
+ u8 featureEnable;
+ /* misc flags: bit0 - turn down drivestrength */
+ u8 miscConfiguration;
+ u8 eepromWriteEnableGpio;
+ u8 wlanDisableGpio;
+ u8 wlanLedGpio;
+ u8 rxBandSelectGpio;
+ u8 txrxgain;
+ /* SW controlled internal regulator fields */
+ u32 swreg;
+} __packed;
+
+struct ar9300_modal_eep_header {
+ /* 4 idle, t1, t2, b (4 bits per setting) */
+ u32 antCtrlCommon;
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ u32 antCtrlCommon2;
+ /* 6 idle, t, r, rx1, rx12, b (2 bits each) */
+ u16 antCtrlChain[AR9300_MAX_CHAINS];
+ /* 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ u8 xatten1DB[AR9300_MAX_CHAINS];
+ /* 3 xatten1_margin for merlin (0xa20c/b20c 16:12 */
+ u8 xatten1Margin[AR9300_MAX_CHAINS];
+ int8_t tempSlope;
+ int8_t voltSlope;
+ /* spur channels in usual fbin coding format */
+ u8 spurChans[AR9300_EEPROM_MODAL_SPURS];
+ /* 3 Check if the register is per chain */
+ int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
+ u8 ob[AR9300_MAX_CHAINS];
+ u8 db_stage2[AR9300_MAX_CHAINS];
+ u8 db_stage3[AR9300_MAX_CHAINS];
+ u8 db_stage4[AR9300_MAX_CHAINS];
+ u8 xpaBiasLvl;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 txClip;
+ int8_t antennaGain;
+ u8 switchSettling;
+ int8_t adcDesiredSize;
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ u8 futureModal[32];
+} __packed;
+
+struct ar9300_cal_data_per_freq_op_loop {
+ int8_t refPower;
+ /* pdadc voltage at power measurement */
+ u8 voltMeas;
+ /* pcdac used for power measurement */
+ u8 tempMeas;
+ /* range is -60 to -127 create a mapping equation 1db resolution */
+ int8_t rxNoisefloorCal;
+ /*range is same as noisefloor */
+ int8_t rxNoisefloorPower;
+ /* temp measured when noisefloor cal was performed */
+ u8 rxTempMeas;
+} __packed;
+
+struct cal_tgt_pow_legacy {
+ u8 tPow2x[4];
+} __packed;
+
+struct cal_tgt_pow_ht {
+ u8 tPow2x[14];
+} __packed;
+
+struct cal_ctl_edge_pwr {
+ u8 tPower:6,
+ flag:2;
+} __packed;
+
+struct cal_ctl_data_2g {
+ struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+} __packed;
+
+struct cal_ctl_data_5g {
+ struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+} __packed;
+
+struct ar9300_eeprom {
+ u8 eepromVersion;
+ u8 templateVersion;
+ u8 macAddr[6];
+ u8 custData[AR9300_CUSTOMER_DATA_SIZE];
+
+ struct ar9300_base_eep_hdr baseEepHeader;
+
+ struct ar9300_modal_eep_header modalHeader2G;
+ u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
+ u8 calTarget_freqbin_Cck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ u8 calTarget_freqbin_2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPowerCck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex_2G[AR9300_NUM_CTLS_2G];
+ u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
+ struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
+ struct ar9300_modal_eep_header modalHeader5G;
+ u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
+ u8 calTarget_freqbin_5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ u8 ctlIndex_5G[AR9300_NUM_CTLS_5G];
+ u8 ctl_freqbin_5G[AR9300_NUM_CTLS_5G][AR9300_NUM_BAND_EDGES_5G];
+ struct cal_ctl_data_5g ctlPowerData_5G[AR9300_NUM_CTLS_5G];
+} __packed;
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
new file mode 100644
index 000000000000..b15309caf1da
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_mac.h"
+#include "ar9003_initvals.h"
+
+/* General hardware code for the AR9003 hadware family */
+
+static bool ar9003_hw_macversion_supported(u32 macversion)
+{
+ switch (macversion) {
+ case AR_SREV_VERSION_9300:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
+/*
+ * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
+ * ensuring it does not affect hardware bring up
+ */
+static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+{
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9300_2p0_mac_core,
+ ARRAY_SIZE(ar9300_2p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9300_2p0_mac_postamble,
+ ARRAY_SIZE(ar9300_2p0_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9300_2p0_baseband_core,
+ ARRAY_SIZE(ar9300_2p0_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9300_2p0_baseband_postamble,
+ ARRAY_SIZE(ar9300_2p0_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9300_2p0_radio_core,
+ ARRAY_SIZE(ar9300_2p0_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9300_2p0_radio_postamble,
+ ARRAY_SIZE(ar9300_2p0_radio_postamble), 5);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9300_2p0_soc_preamble,
+ ARRAY_SIZE(ar9300_2p0_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9300_2p0_soc_postamble,
+ ARRAY_SIZE(ar9300_2p0_soc_postamble), 5);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p0,
+ ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p0),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9300PciePhy_clkreq_enable_L1_2p0,
+ ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p0),
+ 2);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9300Modes_fast_clock_2p0,
+ ARRAY_SIZE(ar9300Modes_fast_clock_2p0),
+ 3);
+}
+
+static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_tx_gain_idx(ah)) {
+ case 0:
+ default:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ case 1:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ case 2:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ }
+}
+
+static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_rx_gain_idx(ah)) {
+ case 0:
+ default:
+ INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
+ 2);
+ break;
+ case 1:
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
+ 2);
+ break;
+ }
+}
+
+/* set gain table pointers according to values read from the eeprom */
+static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ ar9003_tx_gain_table_apply(ah);
+ ar9003_rx_gain_table_apply(ah);
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ if (ah->is_pciexpress != true)
+ return;
+
+ /* Do not touch SerDes registers */
+ if (ah->config.pcie_powersave_enable == 2)
+ return;
+
+ /* Nothing to do on restore for 11N */
+ if (!restore) {
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen)
+ REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
+ }
+}
+
+/* Sets up the AR9003 hardware familiy callbacks */
+void ar9003_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
+ priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
+ priv_ops->macversion_supported = ar9003_hw_macversion_supported;
+
+ ops->config_pci_powersave = ar9003_hw_configpcipowersave;
+
+ ar9003_hw_attach_phy_ops(ah);
+ ar9003_hw_attach_calib_ops(ah);
+ ar9003_hw_attach_mac_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
new file mode 100644
index 000000000000..db019dd220b7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
@@ -0,0 +1,1784 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_H
+#define INITVALS_9003_H
+
+/* AR9003 2.0 */
+
+static const u32 ar9300_2p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
+ {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
+ {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_fast_clock_2p0[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9300_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x40000004},
+ {0x00016294, 0x458aa14f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar9300Common_rx_gain_table_merlin_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x02000101},
+ {0x0000a004, 0x02000102},
+ {0x0000a008, 0x02000103},
+ {0x0000a00c, 0x02000104},
+ {0x0000a010, 0x02000200},
+ {0x0000a014, 0x02000201},
+ {0x0000a018, 0x02000202},
+ {0x0000a01c, 0x02000203},
+ {0x0000a020, 0x02000204},
+ {0x0000a024, 0x02000205},
+ {0x0000a028, 0x02000208},
+ {0x0000a02c, 0x02000302},
+ {0x0000a030, 0x02000303},
+ {0x0000a034, 0x02000304},
+ {0x0000a038, 0x02000400},
+ {0x0000a03c, 0x02010300},
+ {0x0000a040, 0x02010301},
+ {0x0000a044, 0x02010302},
+ {0x0000a048, 0x02000500},
+ {0x0000a04c, 0x02010400},
+ {0x0000a050, 0x02020300},
+ {0x0000a054, 0x02020301},
+ {0x0000a058, 0x02020302},
+ {0x0000a05c, 0x02020303},
+ {0x0000a060, 0x02020400},
+ {0x0000a064, 0x02030300},
+ {0x0000a068, 0x02030301},
+ {0x0000a06c, 0x02030302},
+ {0x0000a070, 0x02030303},
+ {0x0000a074, 0x02030400},
+ {0x0000a078, 0x02040300},
+ {0x0000a07c, 0x02040301},
+ {0x0000a080, 0x02040302},
+ {0x0000a084, 0x02040303},
+ {0x0000a088, 0x02030500},
+ {0x0000a08c, 0x02040400},
+ {0x0000a090, 0x02050203},
+ {0x0000a094, 0x02050204},
+ {0x0000a098, 0x02050205},
+ {0x0000a09c, 0x02040500},
+ {0x0000a0a0, 0x02050301},
+ {0x0000a0a4, 0x02050302},
+ {0x0000a0a8, 0x02050303},
+ {0x0000a0ac, 0x02050400},
+ {0x0000a0b0, 0x02050401},
+ {0x0000a0b4, 0x02050402},
+ {0x0000a0b8, 0x02050403},
+ {0x0000a0bc, 0x02050500},
+ {0x0000a0c0, 0x02050501},
+ {0x0000a0c4, 0x02050502},
+ {0x0000a0c8, 0x02050503},
+ {0x0000a0cc, 0x02050504},
+ {0x0000a0d0, 0x02050600},
+ {0x0000a0d4, 0x02050601},
+ {0x0000a0d8, 0x02050602},
+ {0x0000a0dc, 0x02050603},
+ {0x0000a0e0, 0x02050604},
+ {0x0000a0e4, 0x02050700},
+ {0x0000a0e8, 0x02050701},
+ {0x0000a0ec, 0x02050702},
+ {0x0000a0f0, 0x02050703},
+ {0x0000a0f4, 0x02050704},
+ {0x0000a0f8, 0x02050705},
+ {0x0000a0fc, 0x02050708},
+ {0x0000a100, 0x02050709},
+ {0x0000a104, 0x0205070a},
+ {0x0000a108, 0x0205070b},
+ {0x0000a10c, 0x0205070c},
+ {0x0000a110, 0x0205070d},
+ {0x0000a114, 0x02050710},
+ {0x0000a118, 0x02050711},
+ {0x0000a11c, 0x02050712},
+ {0x0000a120, 0x02050713},
+ {0x0000a124, 0x02050714},
+ {0x0000a128, 0x02050715},
+ {0x0000a12c, 0x02050730},
+ {0x0000a130, 0x02050731},
+ {0x0000a134, 0x02050732},
+ {0x0000a138, 0x02050733},
+ {0x0000a13c, 0x02050734},
+ {0x0000a140, 0x02050735},
+ {0x0000a144, 0x02050750},
+ {0x0000a148, 0x02050751},
+ {0x0000a14c, 0x02050752},
+ {0x0000a150, 0x02050753},
+ {0x0000a154, 0x02050754},
+ {0x0000a158, 0x02050755},
+ {0x0000a15c, 0x02050770},
+ {0x0000a160, 0x02050771},
+ {0x0000a164, 0x02050772},
+ {0x0000a168, 0x02050773},
+ {0x0000a16c, 0x02050774},
+ {0x0000a170, 0x02050775},
+ {0x0000a174, 0x00000776},
+ {0x0000a178, 0x00000776},
+ {0x0000a17c, 0x00000776},
+ {0x0000a180, 0x00000776},
+ {0x0000a184, 0x00000776},
+ {0x0000a188, 0x00000776},
+ {0x0000a18c, 0x00000776},
+ {0x0000a190, 0x00000776},
+ {0x0000a194, 0x00000776},
+ {0x0000a198, 0x00000776},
+ {0x0000a19c, 0x00000776},
+ {0x0000a1a0, 0x00000776},
+ {0x0000a1a4, 0x00000776},
+ {0x0000a1a8, 0x00000776},
+ {0x0000a1ac, 0x00000776},
+ {0x0000a1b0, 0x00000776},
+ {0x0000a1b4, 0x00000776},
+ {0x0000a1b8, 0x00000776},
+ {0x0000a1bc, 0x00000776},
+ {0x0000a1c0, 0x00000776},
+ {0x0000a1c4, 0x00000776},
+ {0x0000a1c8, 0x00000776},
+ {0x0000a1cc, 0x00000776},
+ {0x0000a1d0, 0x00000776},
+ {0x0000a1d4, 0x00000776},
+ {0x0000a1d8, 0x00000776},
+ {0x0000a1dc, 0x00000776},
+ {0x0000a1e0, 0x00000776},
+ {0x0000a1e4, 0x00000776},
+ {0x0000a1e8, 0x00000776},
+ {0x0000a1ec, 0x00000776},
+ {0x0000a1f0, 0x00000776},
+ {0x0000a1f4, 0x00000776},
+ {0x0000a1f8, 0x00000776},
+ {0x0000a1fc, 0x00000776},
+ {0x0000b000, 0x02000101},
+ {0x0000b004, 0x02000102},
+ {0x0000b008, 0x02000103},
+ {0x0000b00c, 0x02000104},
+ {0x0000b010, 0x02000200},
+ {0x0000b014, 0x02000201},
+ {0x0000b018, 0x02000202},
+ {0x0000b01c, 0x02000203},
+ {0x0000b020, 0x02000204},
+ {0x0000b024, 0x02000205},
+ {0x0000b028, 0x02000208},
+ {0x0000b02c, 0x02000302},
+ {0x0000b030, 0x02000303},
+ {0x0000b034, 0x02000304},
+ {0x0000b038, 0x02000400},
+ {0x0000b03c, 0x02010300},
+ {0x0000b040, 0x02010301},
+ {0x0000b044, 0x02010302},
+ {0x0000b048, 0x02000500},
+ {0x0000b04c, 0x02010400},
+ {0x0000b050, 0x02020300},
+ {0x0000b054, 0x02020301},
+ {0x0000b058, 0x02020302},
+ {0x0000b05c, 0x02020303},
+ {0x0000b060, 0x02020400},
+ {0x0000b064, 0x02030300},
+ {0x0000b068, 0x02030301},
+ {0x0000b06c, 0x02030302},
+ {0x0000b070, 0x02030303},
+ {0x0000b074, 0x02030400},
+ {0x0000b078, 0x02040300},
+ {0x0000b07c, 0x02040301},
+ {0x0000b080, 0x02040302},
+ {0x0000b084, 0x02040303},
+ {0x0000b088, 0x02030500},
+ {0x0000b08c, 0x02040400},
+ {0x0000b090, 0x02050203},
+ {0x0000b094, 0x02050204},
+ {0x0000b098, 0x02050205},
+ {0x0000b09c, 0x02040500},
+ {0x0000b0a0, 0x02050301},
+ {0x0000b0a4, 0x02050302},
+ {0x0000b0a8, 0x02050303},
+ {0x0000b0ac, 0x02050400},
+ {0x0000b0b0, 0x02050401},
+ {0x0000b0b4, 0x02050402},
+ {0x0000b0b8, 0x02050403},
+ {0x0000b0bc, 0x02050500},
+ {0x0000b0c0, 0x02050501},
+ {0x0000b0c4, 0x02050502},
+ {0x0000b0c8, 0x02050503},
+ {0x0000b0cc, 0x02050504},
+ {0x0000b0d0, 0x02050600},
+ {0x0000b0d4, 0x02050601},
+ {0x0000b0d8, 0x02050602},
+ {0x0000b0dc, 0x02050603},
+ {0x0000b0e0, 0x02050604},
+ {0x0000b0e4, 0x02050700},
+ {0x0000b0e8, 0x02050701},
+ {0x0000b0ec, 0x02050702},
+ {0x0000b0f0, 0x02050703},
+ {0x0000b0f4, 0x02050704},
+ {0x0000b0f8, 0x02050705},
+ {0x0000b0fc, 0x02050708},
+ {0x0000b100, 0x02050709},
+ {0x0000b104, 0x0205070a},
+ {0x0000b108, 0x0205070b},
+ {0x0000b10c, 0x0205070c},
+ {0x0000b110, 0x0205070d},
+ {0x0000b114, 0x02050710},
+ {0x0000b118, 0x02050711},
+ {0x0000b11c, 0x02050712},
+ {0x0000b120, 0x02050713},
+ {0x0000b124, 0x02050714},
+ {0x0000b128, 0x02050715},
+ {0x0000b12c, 0x02050730},
+ {0x0000b130, 0x02050731},
+ {0x0000b134, 0x02050732},
+ {0x0000b138, 0x02050733},
+ {0x0000b13c, 0x02050734},
+ {0x0000b140, 0x02050735},
+ {0x0000b144, 0x02050750},
+ {0x0000b148, 0x02050751},
+ {0x0000b14c, 0x02050752},
+ {0x0000b150, 0x02050753},
+ {0x0000b154, 0x02050754},
+ {0x0000b158, 0x02050755},
+ {0x0000b15c, 0x02050770},
+ {0x0000b160, 0x02050771},
+ {0x0000b164, 0x02050772},
+ {0x0000b168, 0x02050773},
+ {0x0000b16c, 0x02050774},
+ {0x0000b170, 0x02050775},
+ {0x0000b174, 0x00000776},
+ {0x0000b178, 0x00000776},
+ {0x0000b17c, 0x00000776},
+ {0x0000b180, 0x00000776},
+ {0x0000b184, 0x00000776},
+ {0x0000b188, 0x00000776},
+ {0x0000b18c, 0x00000776},
+ {0x0000b190, 0x00000776},
+ {0x0000b194, 0x00000776},
+ {0x0000b198, 0x00000776},
+ {0x0000b19c, 0x00000776},
+ {0x0000b1a0, 0x00000776},
+ {0x0000b1a4, 0x00000776},
+ {0x0000b1a8, 0x00000776},
+ {0x0000b1ac, 0x00000776},
+ {0x0000b1b0, 0x00000776},
+ {0x0000b1b4, 0x00000776},
+ {0x0000b1b8, 0x00000776},
+ {0x0000b1bc, 0x00000776},
+ {0x0000b1c0, 0x00000776},
+ {0x0000b1c4, 0x00000776},
+ {0x0000b1c8, 0x00000776},
+ {0x0000b1cc, 0x00000776},
+ {0x0000b1d0, 0x00000776},
+ {0x0000b1d4, 0x00000776},
+ {0x0000b1d8, 0x00000776},
+ {0x0000b1dc, 0x00000776},
+ {0x0000b1e0, 0x00000776},
+ {0x0000b1e4, 0x00000776},
+ {0x0000b1e8, 0x00000776},
+ {0x0000b1ec, 0x00000776},
+ {0x0000b1f0, 0x00000776},
+ {0x0000b1f4, 0x00000776},
+ {0x0000b1f8, 0x00000776},
+ {0x0000b1fc, 0x00000776},
+};
+
+static const u32 ar9300_2p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9300_2p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar9200_merlin_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00007800, 0x00040000},
+ {0x00007804, 0xdb005012},
+ {0x00007808, 0x04924914},
+ {0x0000780c, 0x21084210},
+ {0x00007810, 0x6d801300},
+ {0x00007814, 0x0019beff},
+ {0x00007818, 0x07e41000},
+ {0x0000781c, 0x00392000},
+ {0x00007820, 0x92592480},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb005012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x6d801300},
+ {0x00007838, 0x0019beff},
+ {0x0000783c, 0x07e40000},
+ {0x00007840, 0x00392000},
+ {0x00007844, 0x92592480},
+ {0x00007848, 0x00100000},
+ {0x0000784c, 0x773f0567},
+ {0x00007850, 0x54214514},
+ {0x00007854, 0x12035828},
+ {0x00007858, 0x92592692},
+ {0x0000785c, 0x00000000},
+ {0x00007860, 0x56400000},
+ {0x00007864, 0x0a8e370e},
+ {0x00007868, 0xc0102850},
+ {0x0000786c, 0x812d4000},
+ {0x00007870, 0x807ec400},
+ {0x00007874, 0x001b6db0},
+ {0x00007878, 0x00376b63},
+ {0x0000787c, 0x06db6db6},
+ {0x00007880, 0x006d8000},
+ {0x00007884, 0xffeffffe},
+ {0x00007888, 0xffeffffe},
+ {0x0000788c, 0x00010000},
+ {0x00007890, 0x02060aeb},
+ {0x00007894, 0x5a108000},
+};
+
+static const u32 ar9300_2p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9300_2p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e3c, 0xcf946222},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a22c, 0x01036a1e},
+ {0x0000a234, 0x10000fff},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000246},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00000000},
+ {0x0000a610, 0x00000000},
+ {0x0000a614, 0x00000000},
+ {0x0000a618, 0x00000000},
+ {0x0000a61c, 0x00000000},
+ {0x0000a620, 0x00000000},
+ {0x0000a624, 0x00000000},
+ {0x0000a628, 0x00000000},
+ {0x0000a62c, 0x00000000},
+ {0x0000a630, 0x00000000},
+ {0x0000a634, 0x00000000},
+ {0x0000a638, 0x00000000},
+ {0x0000a63c, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00000637},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2dc, 0x00000000},
+ {0x0000b2e0, 0x00000000},
+ {0x0000b2e4, 0x00000000},
+ {0x0000b2e8, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2dc, 0x00000000},
+ {0x0000c2e0, 0x00000000},
+ {0x0000c2e4, 0x00000000},
+ {0x0000c2e8, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+};
+
+static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Common_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300_2p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f424},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e848},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x98a00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9300Common_wo_xlna_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300_2p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007008, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08212e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_enable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08253e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08213e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+#endif /* INITVALS_9003_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
new file mode 100644
index 000000000000..37ba37481a47
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "hw.h"
+#include "ar9003_mac.h"
+
+static void ar9003_hw_rx_enable(struct ath_hw *hw)
+{
+ REG_WRITE(hw, AR_CR, 0);
+}
+
+static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+{
+ int checksum;
+
+ checksum = ads->info + ads->link
+ + ads->data0 + ads->ctl3
+ + ads->data1 + ads->ctl5
+ + ads->data2 + ads->ctl7
+ + ads->data3 + ads->ctl9;
+
+ return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
+}
+
+static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ ads->link = ds_link;
+ ads->ctl10 &= ~AR_TxPtrChkSum;
+ ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
+}
+
+static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ *ds_link = &ads->link;
+}
+
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON)
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+
+ mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
+ MAP_ISR_S2_TIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
+ MAP_ISR_S2_DTIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
+ MAP_ISR_S2_DTIMSYNC);
+ mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
+ MAP_ISR_S2_CABEND);
+ mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
+ MAP_ISR_S2_GTT);
+ mask2 |= ((isr2 & AR_ISR_S2_CST) <<
+ MAP_ISR_S2_CST);
+ mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
+ MAP_ISR_S2_TSFOOR);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
+ }
+
+ if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
+ isr = REG_READ(ah, AR_ISR_RAC);
+
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation)
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (ah->config.tx_intr_mitigation)
+ if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
+ *masked |= ATH9K_INT_TX;
+
+ if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (isr & AR_ISR_HP_RXOK)
+ *masked |= ATH9K_INT_RXHP;
+
+ if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
+ *masked |= ATH9K_INT_TX;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ u32 s0, s1;
+ s0 = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0);
+ s1 = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1);
+
+ isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+ }
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5;
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ s5 = REG_READ(ah, AR_ISR_S5_S);
+ else
+ s5 = REG_READ(ah, AR_ISR_S5);
+
+ ah->intr_gen_timer_trigger =
+ MS(s5, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5);
+ isr &= ~AR_ISR_GENTMR;
+ }
+
+ }
+
+ *masked |= mask2;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+
+ (void) REG_READ(ah, AR_ISR);
+ }
+ }
+
+ if (sync_cause) {
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+
+ }
+ return true;
+}
+
+static void ar9003_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ unsigned int descid = 0;
+
+ ads->info = (ATHEROS_VENDOR_ID << AR_DescId_S) |
+ (1 << AR_TxRxDesc_S) |
+ (1 << AR_CtrlStat_S) |
+ (qcu << AR_TxQcuNum_S) | 0x17;
+
+ ads->data0 = buf_addr;
+ ads->data1 = 0;
+ ads->data2 = 0;
+ ads->data3 = 0;
+
+ ads->ctl3 = (seglen << AR_BufLen_S);
+ ads->ctl3 &= AR_BufLen;
+
+ /* Fill in pointer checksum and descriptor id */
+ ads->ctl10 = ar9003_calc_ptr_chksum(ads);
+ ads->ctl10 |= (descid << AR_TxDescId_S);
+
+ if (is_firstseg) {
+ ads->ctl12 |= (is_lastseg ? 0 : AR_TxMore);
+ } else if (is_lastseg) {
+ ads->ctl11 = 0;
+ ads->ctl12 = 0;
+ ads->ctl13 = AR9003TXC_CONST(ds0)->ctl13;
+ ads->ctl14 = AR9003TXC_CONST(ds0)->ctl14;
+ } else {
+ /* XXX Intermediate descriptor in a multi-descriptor frame.*/
+ ads->ctl11 = 0;
+ ads->ctl12 = AR_TxMore;
+ ads->ctl13 = 0;
+ ads->ctl14 = 0;
+ }
+}
+
+static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar9003_txs *ads;
+
+ ads = &ah->ts_ring[ah->ts_tail];
+
+ if ((ads->status8 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
+
+ if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
+ (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "Tx Descriptor error %x\n", ads->ds_info);
+ memset(ads, 0, sizeof(*ads));
+ return -EIO;
+ }
+
+ ts->qid = MS(ads->ds_info, AR_TxQcuNum);
+ ts->desc_id = MS(ads->status1, AR_TxDescId);
+ ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
+ ts->ts_tstamp = ads->status4;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (ads->status3 & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->status3 & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (ads->status3 & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status8 & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->status3 & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->status3 & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->status3 & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status3 & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status2 & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->status5;
+ ts->ba_high = ads->status6;
+ }
+
+ ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
+
+ ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
+ ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
+ ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
+ ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
+ ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
+ ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
+ ts->ts_antenna = 0;
+
+ ts->tid = MS(ads->status8, AR_TxTid);
+
+ memset(ads, 0, sizeof(*ads));
+
+ return 0;
+}
+
+static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
+ u32 keyIx, enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ if (txpower > ah->txpower_limit)
+ txpower = ah->txpower_limit;
+
+ txpower += ah->txpower_indexoffset;
+ if (txpower > 63)
+ txpower = 63;
+
+ ads->ctl11 = (pktlen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txpower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
+ | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
+
+ ads->ctl12 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ctl17 = SM(keyType, AR_EncrType) |
+ (flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
+ ads->ctl18 = 0;
+ ads->ctl19 = AR_Not_Sounding;
+
+ ads->ctl20 = 0;
+ ads->ctl21 = 0;
+ ads->ctl22 = 0;
+}
+
+static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ struct ar9003_txc *last_ads = (struct ar9003_txc *) lastds;
+ u_int32_t ctl11;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ctl11 = ads->ctl11;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ctl11 &= ~AR_CTSEnable;
+ ctl11 |= AR_RTSEnable;
+ } else {
+ ctl11 &= ~AR_RTSEnable;
+ ctl11 |= AR_CTSEnable;
+ }
+
+ ads->ctl11 = ctl11;
+ } else {
+ ads->ctl11 = (ads->ctl11 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ctl13 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ctl14 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ctl15 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ctl16 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ctl18 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ ads->ctl19 = AR_Not_Sounding;
+
+ last_ads->ctl13 = ads->ctl13;
+ last_ads->ctl14 = ads->ctl14;
+}
+
+static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
+
+ ads->ctl17 &= ~AR_AggrLen;
+ ads->ctl17 |= SM(aggrLen, AR_AggrLen);
+}
+
+static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ unsigned int ctl17;
+
+ ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
+
+ /*
+ * We use a stack variable to manipulate ctl6 to reduce uncached
+ * read modify, modfiy, write.
+ */
+ ctl17 = ads->ctl17;
+ ctl17 &= ~AR_PadDelim;
+ ctl17 |= SM(numDelims, AR_PadDelim);
+ ads->ctl17 = ctl17;
+}
+
+static void ar9003_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 |= AR_IsAggr;
+ ads->ctl12 &= ~AR_MoreAggr;
+ ads->ctl17 &= ~AR_PadDelim;
+}
+
+static void ar9003_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+static void ar9003_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl13 &= ~AR_BurstDur;
+ ads->ctl13 |= SM(burstDuration, AR_BurstDur);
+
+}
+
+static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ if (vmf)
+ ads->ctl11 |= AR_VirtMoreFrag;
+ else
+ ads->ctl11 &= ~AR_VirtMoreFrag;
+}
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(hw);
+
+ ops->rx_enable = ar9003_hw_rx_enable;
+ ops->set_desc_link = ar9003_hw_set_desc_link;
+ ops->get_desc_link = ar9003_hw_get_desc_link;
+ ops->get_isr = ar9003_hw_get_isr;
+ ops->fill_txdesc = ar9003_hw_fill_txdesc;
+ ops->proc_txdesc = ar9003_hw_proc_txdesc;
+ ops->set11n_txdesc = ar9003_hw_set11n_txdesc;
+ ops->set11n_ratescenario = ar9003_hw_set11n_ratescenario;
+ ops->set11n_aggr_first = ar9003_hw_set11n_aggr_first;
+ ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
+ ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
+ ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
+ ops->set11n_burstduration = ar9003_hw_set11n_burstduration;
+ ops->set11n_virtualmorefrag = ar9003_hw_set11n_virtualmorefrag;
+}
+
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
+{
+ REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
+}
+EXPORT_SYMBOL(ath9k_hw_set_rx_bufsize);
+
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype)
+{
+ if (qtype == ATH9K_RX_QUEUE_HP)
+ REG_WRITE(ah, AR_HP_RXDP, rxdp);
+ else
+ REG_WRITE(ah, AR_LP_RXDP, rxdp);
+}
+EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
+ void *buf_addr)
+{
+ struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
+ unsigned int phyerr;
+
+ /* TODO: byte swap on big endian for ar9300_10 */
+
+ if ((rxsp->status11 & AR_RxDone) == 0)
+ return -EINPROGRESS;
+
+ if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
+ return -EINVAL;
+
+ if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
+ return -EINPROGRESS;
+
+ if (!rxs)
+ return 0;
+
+ rxs->rs_status = 0;
+ rxs->rs_flags = 0;
+
+ rxs->rs_datalen = rxsp->status2 & AR_DataLen;
+ rxs->rs_tstamp = rxsp->status3;
+
+ /* XXX: Keycache */
+ rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
+ rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
+ rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
+ rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
+ rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
+ rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
+ rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
+
+ if (rxsp->status11 & AR_RxKeyIdxValid)
+ rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
+ else
+ rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
+
+ rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
+ rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
+
+ rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
+ rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
+ rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
+ rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
+ rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
+
+ rxs->evm0 = rxsp->status6;
+ rxs->evm1 = rxsp->status7;
+ rxs->evm2 = rxsp->status8;
+ rxs->evm3 = rxsp->status9;
+ rxs->evm4 = (rxsp->status10 & 0xffff);
+
+ if (rxsp->status11 & AR_PreDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+
+ if (rxsp->status11 & AR_PostDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+
+ if (rxsp->status11 & AR_DecryptBusyErr)
+ rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+
+ if ((rxsp->status11 & AR_RxFrameOK) == 0) {
+ if (rxsp->status11 & AR_CRCErr) {
+ rxs->rs_status |= ATH9K_RXERR_CRC;
+ } else if (rxsp->status11 & AR_PHYErr) {
+ rxs->rs_status |= ATH9K_RXERR_PHY;
+ phyerr = MS(rxsp->status11, AR_PHYErrCode);
+ rxs->rs_phyerr = phyerr;
+ } else if (rxsp->status11 & AR_DecryptCRCErr) {
+ rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ } else if (rxsp->status11 & AR_MichaelErr) {
+ rxs->rs_status |= ATH9K_RXERR_MIC;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
+
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
+{
+ ah->ts_tail = 0;
+
+ memset((void *) ah->ts_ring, 0,
+ ah->ts_size * sizeof(struct ar9003_txs));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
+ ah->ts_paddr_start, ah->ts_paddr_end,
+ ah->ts_ring, ah->ts_size);
+
+ REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
+ REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
+}
+
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u8 size)
+{
+
+ ah->ts_paddr_start = ts_paddr_start;
+ ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
+ ah->ts_size = size;
+ ah->ts_ring = (struct ar9003_txs *) ts_start;
+
+ ath9k_hw_reset_txstatus_ring(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_setup_statusring);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
new file mode 100644
index 000000000000..f17558b14539
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MAC_H
+#define AR9003_MAC_H
+
+#define AR_DescId 0xffff0000
+#define AR_DescId_S 16
+#define AR_CtrlStat 0x00004000
+#define AR_CtrlStat_S 14
+#define AR_TxRxDesc 0x00008000
+#define AR_TxRxDesc_S 15
+#define AR_TxQcuNum 0x00000f00
+#define AR_TxQcuNum_S 8
+
+#define AR_BufLen 0x0fff0000
+#define AR_BufLen_S 16
+
+#define AR_TxDescId 0xffff0000
+#define AR_TxDescId_S 16
+#define AR_TxPtrChkSum 0x0000ffff
+
+#define AR_TxTid 0xf0000000
+#define AR_TxTid_S 28
+
+#define AR_LowRxChain 0x00004000
+
+#define AR_Not_Sounding 0x20000000
+
+#define MAP_ISR_S2_CST 6
+#define MAP_ISR_S2_GTT 6
+#define MAP_ISR_S2_TIM 3
+#define MAP_ISR_S2_CABEND 0
+#define MAP_ISR_S2_DTIMSYNC 7
+#define MAP_ISR_S2_DTIM 7
+#define MAP_ISR_S2_TSFOOR 4
+
+#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
+
+struct ar9003_rxs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ u32 status9;
+ u32 status10;
+ u32 status11;
+} __packed;
+
+/* Transmit Control Descriptor */
+struct ar9003_txc {
+ u32 info; /* descriptor information */
+ u32 link; /* link pointer */
+ u32 data0; /* data pointer to 1st buffer */
+ u32 ctl3; /* DMA control 3 */
+ u32 data1; /* data pointer to 2nd buffer */
+ u32 ctl5; /* DMA control 5 */
+ u32 data2; /* data pointer to 3rd buffer */
+ u32 ctl7; /* DMA control 7 */
+ u32 data3; /* data pointer to 4th buffer */
+ u32 ctl9; /* DMA control 9 */
+ u32 ctl10; /* DMA control 10 */
+ u32 ctl11; /* DMA control 11 */
+ u32 ctl12; /* DMA control 12 */
+ u32 ctl13; /* DMA control 13 */
+ u32 ctl14; /* DMA control 14 */
+ u32 ctl15; /* DMA control 15 */
+ u32 ctl16; /* DMA control 16 */
+ u32 ctl17; /* DMA control 17 */
+ u32 ctl18; /* DMA control 18 */
+ u32 ctl19; /* DMA control 19 */
+ u32 ctl20; /* DMA control 20 */
+ u32 ctl21; /* DMA control 21 */
+ u32 ctl22; /* DMA control 22 */
+ u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
+} __packed;
+
+struct ar9003_txs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+} __packed;
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
+ struct ath_rx_status *rxs,
+ void *buf_addr);
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u8 size);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
new file mode 100644
index 000000000000..80431a2f6dc1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+
+/**
+ * ar9003_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ *
+ * For 5GHz channels which are 5MHz spaced,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ */
+static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode = 0, aModeRefSel = 0;
+ u32 freq, channelSel = 0, reg32 = 0;
+ struct chan_centers centers;
+ int loadSynthChannel;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ channelSel = CHANSEL_2G(freq);
+ /* Set to 2G mode */
+ bMode = 1;
+ } else {
+ channelSel = CHANSEL_5G(freq);
+ /* Doubler is ON, so, divide channelSel by 2. */
+ channelSel >>= 1;
+ /* Set to 5G mode */
+ bMode = 0;
+ }
+
+ /* Enable fractional mode for all channels */
+ fracMode = 1;
+ aModeRefSel = 0;
+ loadSynthChannel = 0;
+
+ reg32 = (bMode << 29);
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ /* Enable Long shift Select for Synthesizer */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4,
+ AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1);
+
+ /* Program Synth. setting */
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ /* Toggle Load Synth channel bit */
+ loadSynthChannel = 1;
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar9003_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ *
+ * Spur mitigation for MRC CCK
+ */
+static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
+ int cur_bb_spur, negative = 0, cck_spur_freq;
+ int i;
+
+ /*
+ * Need to verify range +/- 10 MHz in control channel, otherwise spur
+ * is out-of-band and can be ignored.
+ */
+
+ for (i = 0; i < 4; i++) {
+ negative = 0;
+ cur_bb_spur = spur_freq[i] - chan->channel;
+
+ if (cur_bb_spur < 0) {
+ negative = 1;
+ cur_bb_spur = -cur_bb_spur;
+ }
+ if (cur_bb_spur < 10) {
+ cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
+
+ if (negative == 1)
+ cck_spur_freq = -cck_spur_freq;
+
+ cck_spur_freq = cck_spur_freq & 0xfffff;
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE,
+ 0x2);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT,
+ 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ,
+ cck_spur_freq);
+
+ return;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0);
+}
+
+/* Clean all spur register fields */
+static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
+{
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0);
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
+}
+
+static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
+ int freq_offset,
+ int spur_freq_sd,
+ int spur_delta_phase,
+ int spur_subchannel_sd)
+{
+ int mask_index = 0;
+
+ /* OFDM Spur mitigation */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
+
+ if (REG_READ_FIELD(ah, AR_PHY_MODE,
+ AR_PHY_MODE_DYNAMIC) == 0x1)
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
+
+ mask_index = (freq_offset << 4) / 5;
+ if (mask_index < 0)
+ mask_index = mask_index - 1;
+
+ mask_index = mask_index & 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
+}
+
+static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int freq_offset)
+{
+ int spur_freq_sd = 0;
+ int spur_subchannel_sd = 0;
+ int spur_delta_phase = 0;
+
+ if (IS_CHAN_HT40(chan)) {
+ if (freq_offset < 0) {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 1;
+ else
+ spur_subchannel_sd = 0;
+
+ spur_freq_sd = ((freq_offset + 10) << 9) / 11;
+
+ } else {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 0;
+ else
+ spur_subchannel_sd = 1;
+
+ spur_freq_sd = ((freq_offset - 10) << 9) / 11;
+
+ }
+
+ spur_delta_phase = (freq_offset << 17) / 5;
+
+ } else {
+ spur_subchannel_sd = 0;
+ spur_freq_sd = (freq_offset << 9) /11;
+ spur_delta_phase = (freq_offset << 18) / 5;
+ }
+
+ spur_freq_sd = spur_freq_sd & 0x3ff;
+ spur_delta_phase = spur_delta_phase & 0xfffff;
+
+ ar9003_hw_spur_ofdm(ah,
+ freq_offset,
+ spur_freq_sd,
+ spur_delta_phase,
+ spur_subchannel_sd);
+}
+
+/* Spur mitigation for OFDM */
+static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int synth_freq;
+ int range = 10;
+ int freq_offset = 0;
+ int mode;
+ u8* spurChansPtr;
+ unsigned int i;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (IS_CHAN_5GHZ(chan)) {
+ spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
+ mode = 0;
+ }
+ else {
+ spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
+ mode = 1;
+ }
+
+ if (spurChansPtr[0] == 0)
+ return; /* No spur in the mode */
+
+ if (IS_CHAN_HT40(chan)) {
+ range = 19;
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ synth_freq = chan->channel - 10;
+ else
+ synth_freq = chan->channel + 10;
+ } else {
+ range = 10;
+ synth_freq = chan->channel;
+ }
+
+ ar9003_hw_spur_ofdm_clear(ah);
+
+ for (i = 0; spurChansPtr[i] && i < 5; i++) {
+ freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
+ if (abs(freq_offset) < range) {
+ ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
+ break;
+ }
+ }
+}
+
+static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
+ ar9003_hw_spur_mitigate_ofdm(ah, chan);
+}
+
+static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9300_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL);
+
+ pll |= SM(0x2c, AR_RTC_9300_PLL_DIV);
+
+ return pll;
+}
+
+static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ enableDacFifo =
+ (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
+
+ /* Enable 11n HT, 20 MHz */
+ phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | AR_PHY_GC_WALSH |
+ AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
+
+ /* Configure baseband for dynamic 20/40 operation */
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_GC_DYN2040_EN;
+ /* Configure control (primary) channel at +-10MHz */
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_GC_DYN2040_PRI_CH;
+
+ }
+
+ /* make sure we preserve INI settings */
+ phymode |= REG_READ(ah, AR_PHY_GEN_CTRL);
+ /* turn off Green Field detection for STA for now */
+ phymode &= ~AR_PHY_GC_GF_DETECT_EN;
+
+ REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
+
+ /* Configure MAC for 20/40 operation */
+ ath9k_hw_set11nmac2040(ah);
+
+ /* global transmit timeout (25 TUs default)*/
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ /* carrier sense timeout */
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+}
+
+static void ar9003_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ /*
+ * Wait for the frequency synth to settle (synth goes on
+ * via AR_PHY_ACTIVE_EN). Read the phy active delay register.
+ * Value is in 100ns increments.
+ */
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ /* Activate the PHY (includes baseband activate + synthesizer on) */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * There is an issue if the AP starts the calibration before
+ * the base band timeout completes. This could result in the
+ * rx_clear false triggering. As a workaround we add delay an
+ * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
+ * does not happen.
+ */
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+{
+ switch (rx) {
+ case 0x5:
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ case 0x3:
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ if (tx == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+}
+
+/*
+ * Override INI values with chip specific configuration.
+ */
+static void ar9003_hw_override_ini(struct ath_hw *ah)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear it only after
+ * RXE is set for MAC. This prevents frames with
+ * corrupted descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID. By default,
+ * this feature is enabled. But since the driver is not using this
+ * feature, we switch it off; otherwise multicast search based on
+ * MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
+ REG_WRITE(ah, AR_PCU_MISC_MODE2,
+ val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
+}
+
+static void ar9003_hw_prog_ini(struct ath_hw *ah,
+ struct ar5416IniArray *iniArr,
+ int column)
+{
+ unsigned int i, regWrites = 0;
+
+ /* New INI format: Array may be undefined (pre, core, post arrays) */
+ if (!iniArr->ia_array)
+ return;
+
+ /*
+ * New INI format: Pre, core, and post arrays for a given subsystem
+ * may be modal (> 2 columns) or non-modal (2 columns). Determine if
+ * the array is non-modal and force the column to 1.
+ */
+ if (column >= iniArr->ia_columns)
+ column = 1;
+
+ for (i = 0; i < iniArr->ia_rows; i++) {
+ u32 reg = INI_RA(iniArr, i, 0);
+ u32 val = INI_RA(iniArr, i, column);
+
+ REG_WRITE(ah, reg, val);
+
+ /*
+ * Determine if this is a shift register value, and insert the
+ * configured delay if so.
+ */
+ if (reg >= 0x16000 && reg < 0x17000
+ && ah->config.analog_shiftreg)
+ udelay(100);
+
+ DO_DELAY(regWrites);
+ }
+}
+
+static int ar9003_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ unsigned int regWrites = 0, i;
+ struct ieee80211_channel *channel = chan->chan;
+ u32 modesIndex, freqIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
+ ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
+ }
+
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ /*
+ * For 5GHz channels requiring Fast Clock, apply
+ * different modal values.
+ */
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesAdditional,
+ modesIndex, regWrites);
+
+ ar9003_hw_override_ini(ah);
+ ar9003_hw_set_channel_regs(ah, chan);
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Set TX power */
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit));
+
+ return 0;
+}
+
+static void ar9003_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar9003_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ /*
+ * half and quarter rate can divide the scaled clock by 2 or 4
+ * scale for selected channel bandwidth
+ */
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ /*
+ * ALGO -> coef = 1e8/fcarrier*fclock/40;
+ * scaled coef to provide precision for this floating calculation
+ */
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ /*
+ * For Short GI,
+ * scaled coeff is 9/10 that of normal coeff
+ */
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ /* for short gi */
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+/*
+ * Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN).
+ * Read the phy active delay register. Value is in 100ns increments.
+ */
+static void ar9003_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(ah->curchan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+/*
+ * Set the interrupt and GPIO values so the ISR can disable RF
+ * on a switch signal. Assumes GPIO port and interrupt polarity
+ * are set prior to call.
+ */
+static void ar9003_hw_enable_rfkill(struct ath_hw *ah)
+{
+ /* Connect rfsilent_bb_l to baseband */
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+ /* Set input mux for rfsilent_bb_l to GPIO #0 */
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ /*
+ * Configure the desired GPIO port for input and
+ * enable baseband rf silence.
+ */
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
+static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
+{
+ u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (value)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+}
+
+static bool ar9003_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ah->totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC,
+ AR_PHY_AGC_COARSE_LOW,
+ ah->coarse_low[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC,
+ AR_PHY_AGC_COARSE_HIGH,
+ ah->coarse_high[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ah->stats.ast_ani_cckhigh++;
+ else
+ ah->stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_print(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_print(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_print(common, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+
+ return true;
+}
+
+static void ar9003_hw_nf_sanitize_2g(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (*nf > ah->nf_2g_max) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "2 GHz NF (%d) > MAX (%d), "
+ "correcting to MAX",
+ *nf, ah->nf_2g_max);
+ *nf = ah->nf_2g_max;
+ } else if (*nf < ah->nf_2g_min) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "2 GHz NF (%d) < MIN (%d), "
+ "correcting to MIN",
+ *nf, ah->nf_2g_min);
+ *nf = ah->nf_2g_min;
+ }
+}
+
+static void ar9003_hw_nf_sanitize_5g(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (*nf > ah->nf_5g_max) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "5 GHz NF (%d) > MAX (%d), "
+ "correcting to MAX",
+ *nf, ah->nf_5g_max);
+ *nf = ah->nf_5g_max;
+ } else if (*nf < ah->nf_5g_min) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "5 GHz NF (%d) < MIN (%d), "
+ "correcting to MIN",
+ *nf, ah->nf_5g_min);
+ *nf = ah->nf_5g_min;
+ }
+}
+
+static void ar9003_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
+{
+ if (IS_CHAN_2GHZ(ah->curchan))
+ ar9003_hw_nf_sanitize_2g(ah, nf);
+ else
+ ar9003_hw_nf_sanitize_5g(ah, nf);
+}
+
+static void ar9003_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+}
+
+void ar9003_hw_set_nf_limits(struct ath_hw *ah)
+{
+ ah->nf_2g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
+ ah->nf_2g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
+ ah->nf_5g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
+ ah->nf_5g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
+}
+
+/*
+ * Find out which of the RX chains are enabled
+ */
+static u32 ar9003_hw_get_rx_chainmask(struct ath_hw *ah)
+{
+ u32 chain = REG_READ(ah, AR_PHY_RX_CHAINMASK);
+ /*
+ * The bits [2:0] indicate the rx chain mask and are to be
+ * interpreted as follows:
+ * 00x => Only chain 0 is enabled
+ * 01x => Chain 1 and 0 enabled
+ * 1xx => Chain 2,1 and 0 enabled
+ */
+ return chain & 0x7;
+}
+
+static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ unsigned i, j;
+ int32_t val;
+ const u32 ar9300_cca_regs[6] = {
+ AR_PHY_CCA_0,
+ AR_PHY_CCA_1,
+ AR_PHY_CCA_2,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CCA_1,
+ AR_PHY_EXT_CCA_2,
+ };
+ u8 chainmask, rx_chain_status;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ rx_chain_status = ar9003_hw_get_rx_chainmask(ah);
+
+ chainmask = 0x3F;
+ h = ah->nfCalHist;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar9300_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar9300_cca_regs[i], val);
+ }
+ }
+
+ /*
+ * Load software filtered NF value into baseband internal minCCApwr
+ * variable.
+ */
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ /*
+ * Wait for load to complete, should be fast, a few 10s of us.
+ * The max delay was changed from an original 250us to 10000us
+ * since 250us often results in NF load timeout and causes deaf
+ * condition during stress testing 12/12/2009
+ */
+ for (j = 0; j < 1000; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * We timed out waiting for the noisefloor to load, probably due to an
+ * in-progress rx. Simply return here and allow the load plenty of time
+ * to complete before the next calibration interval. We need to avoid
+ * trying to load -50 (which happens below) while the previous load is
+ * still in progress as this can cause rx deafness. Instead by returning
+ * here, the baseband nf cal will just be capped by our present
+ * noisefloor until the next calibration timer.
+ */
+ if (j == 1000) {
+ ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
+ "to load: AR_PHY_AGC_CONTROL=0x%x\n",
+ REG_READ(ah, AR_PHY_AGC_CONTROL));
+ return;
+ }
+
+ /*
+ * Restore maxCCAPower register parameter again so that we're not capped
+ * by the median we just loaded. This will be initial (and max) value
+ * of next noise floor calibration the baseband does.
+ */
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar9300_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar9300_cca_regs[i], val);
+ }
+ }
+}
+
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->rf_set_freq = ar9003_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
+ priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
+ priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
+ priv_ops->init_bb = ar9003_hw_init_bb;
+ priv_ops->process_ini = ar9003_hw_process_ini;
+ priv_ops->set_rfmode = ar9003_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar9003_hw_rfbus_req;
+ priv_ops->rfbus_done = ar9003_hw_rfbus_done;
+ priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
+ priv_ops->set_diversity = ar9003_hw_set_diversity;
+ priv_ops->ani_control = ar9003_hw_ani_control;
+ priv_ops->do_getnf = ar9003_hw_do_getnf;
+ priv_ops->loadnf = ar9003_hw_loadnf;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
new file mode 100644
index 000000000000..f08cc8bda005
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -0,0 +1,847 @@
+/*
+ * Copyright (c) 2002-2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_PHY_H
+#define AR9003_PHY_H
+
+/*
+ * Channel Register Map
+ */
+#define AR_CHAN_BASE 0x9800
+
+#define AR_PHY_TIMING1 (AR_CHAN_BASE + 0x0)
+#define AR_PHY_TIMING2 (AR_CHAN_BASE + 0x4)
+#define AR_PHY_TIMING3 (AR_CHAN_BASE + 0x8)
+#define AR_PHY_TIMING4 (AR_CHAN_BASE + 0xc)
+#define AR_PHY_TIMING5 (AR_CHAN_BASE + 0x10)
+#define AR_PHY_TIMING6 (AR_CHAN_BASE + 0x14)
+#define AR_PHY_TIMING11 (AR_CHAN_BASE + 0x18)
+#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
+#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
+#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
+
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC_S 30
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR 0x80000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR_S 31
+
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT 0x4000000
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT_S 26
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 /* bins move with freq offset */
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM_S 17
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x000000FF
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI 0x00000100
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI_S 8
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL 0x03FC0000
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN 0x20000000
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN_S 29
+
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN 0x80000000
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN_S 31
+
+#define AR_PHY_FIND_SIG_LOW (AR_CHAN_BASE + 0x20)
+
+#define AR_PHY_SFCORR (AR_CHAN_BASE + 0x24)
+#define AR_PHY_SFCORR_LOW (AR_CHAN_BASE + 0x28)
+#define AR_PHY_SFCORR_EXT (AR_CHAN_BASE + 0x2c)
+
+#define AR_PHY_EXT_CCA (AR_CHAN_BASE + 0x30)
+#define AR_PHY_RADAR_0 (AR_CHAN_BASE + 0x34)
+#define AR_PHY_RADAR_1 (AR_CHAN_BASE + 0x38)
+#define AR_PHY_RADAR_EXT (AR_CHAN_BASE + 0x3c)
+#define AR_PHY_MULTICHAIN_CTRL (AR_CHAN_BASE + 0x80)
+#define AR_PHY_PERCHAIN_CSD (AR_CHAN_BASE + 0x84)
+
+#define AR_PHY_TX_PHASE_RAMP_0 (AR_CHAN_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_0 (AR_CHAN_BASE + 0xd4)
+#define AR_PHY_IQ_ADC_MEAS_0_B0 (AR_CHAN_BASE + 0xc0)
+#define AR_PHY_IQ_ADC_MEAS_1_B0 (AR_CHAN_BASE + 0xc4)
+#define AR_PHY_IQ_ADC_MEAS_2_B0 (AR_CHAN_BASE + 0xc8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0 (AR_CHAN_BASE + 0xcc)
+
+/* The following registers changed position from AR9300 1.0 to AR9300 2.0 */
+#define AR_PHY_TX_PHASE_RAMP_0_9300_10 (AR_CHAN_BASE + 0xd0 - 0x10)
+#define AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 (AR_CHAN_BASE + 0xd4 - 0x10)
+#define AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 (AR_CHAN_BASE + 0xc0 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 (AR_CHAN_BASE + 0xc4 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 (AR_CHAN_BASE + 0xc8 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 (AR_CHAN_BASE + 0xcc + 0x8)
+
+#define AR_PHY_TX_CRC (AR_CHAN_BASE + 0xa0)
+#define AR_PHY_TST_DAC_CONST (AR_CHAN_BASE + 0xa4)
+#define AR_PHY_SPUR_REPORT_0 (AR_CHAN_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_0 (AR_CHAN_BASE + 0x300)
+
+/*
+ * Channel Field Definitions
+ */
+#define AR_PHY_TIMING2_USE_FORCE_PPM 0x00001000
+#define AR_PHY_TIMING2_FORCE_PPM_VAL 0x00000fff
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK 0x10000000
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK_S 28
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK_S 29
+
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER_S 30
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI_S 31
+
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD 0x10000000
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD_S 28
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_EXT_MINCCA_PWR_S 16
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE_S 0
+#define AR_PHY_TIMING5_CYCPWR_THR1A 0x007F0000
+#define AR_PHY_TIMING5_CYCPWR_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A (0x7F << 16)
+#define AR_PHY_TIMING5_RSSI_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A_ENA (0x1 << 15)
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+#define AR_PHY_RADAR_DC_PWR_THRESH 0x007f8000
+#define AR_PHY_RADAR_DC_PWR_THRESH_S 15
+#define AR_PHY_RADAR_LB_DC_CAP 0x7f800000
+#define AR_PHY_RADAR_LB_DC_CAP_S 23
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW (0x3f << 6)
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW_S 6
+#define AR_PHY_FIND_SIG_LOW_FIRPWR (0x7f << 12)
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_S 12
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_SIGN_BIT 19
+#define AR_PHY_FIND_SIG_LOW_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_SIGN_BIT 5
+#define AR_PHY_CHAN_INFO_TAB_S2_READ 0x00000008
+#define AR_PHY_CHAN_INFO_TAB_S2_READ_S 3
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF 0x0000007F
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF 0x00003F80
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF_S 7
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE 0x00004000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF 0x003f8000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF_S 15
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF 0x1fc00000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF_S 22
+
+/*
+ * MRC Register Map
+ */
+#define AR_MRC_BASE 0x9c00
+
+#define AR_PHY_TIMING_3A (AR_MRC_BASE + 0x0)
+#define AR_PHY_LDPC_CNTL1 (AR_MRC_BASE + 0x4)
+#define AR_PHY_LDPC_CNTL2 (AR_MRC_BASE + 0x8)
+#define AR_PHY_PILOT_SPUR_MASK (AR_MRC_BASE + 0xc)
+#define AR_PHY_CHAN_SPUR_MASK (AR_MRC_BASE + 0x10)
+#define AR_PHY_SGI_DELTA (AR_MRC_BASE + 0x14)
+#define AR_PHY_ML_CNTL_1 (AR_MRC_BASE + 0x18)
+#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
+#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
+
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
+
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
+
+/*
+ * MRC Feild Definitions
+ */
+#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_SGI_DSC_MAN_S 4
+#define AR_PHY_SGI_DSC_EXP 0x0000000F
+#define AR_PHY_SGI_DSC_EXP_S 0
+/*
+ * BBB Register Map
+ */
+#define AR_BBB_BASE 0x9d00
+
+/*
+ * AGC Register Map
+ */
+#define AR_AGC_BASE 0x9e00
+
+#define AR_PHY_SETTLING (AR_AGC_BASE + 0x0)
+#define AR_PHY_FORCEMAX_GAINS_0 (AR_AGC_BASE + 0x4)
+#define AR_PHY_GAINS_MINOFF0 (AR_AGC_BASE + 0x8)
+#define AR_PHY_DESIRED_SZ (AR_AGC_BASE + 0xc)
+#define AR_PHY_FIND_SIG (AR_AGC_BASE + 0x10)
+#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
+#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
+#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
+#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
+#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
+#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
+#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
+#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
+#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
+#define AR_PHY_RIFS_SRCH (AR_AGC_BASE + 0x38)
+#define AR_PHY_PEAK_DET_CTRL_1 (AR_AGC_BASE + 0x3c)
+#define AR_PHY_PEAK_DET_CTRL_2 (AR_AGC_BASE + 0x40)
+#define AR_PHY_RX_GAIN_BOUNDS_1 (AR_AGC_BASE + 0x44)
+#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
+#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
+#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
+#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
+#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
+
+#define AR_PHY_CCK_SPUR_MIT (AR_AGC_BASE + 0x1cc)
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR 0x000001fe
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR_S 1
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE 0x60000000
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE_S 29
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT 0x00000001
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_S 0
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
+
+#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
+
+#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
+#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
+
+/*
+ * AGC Field Definitions
+ */
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN_S 18
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN 0x00003C00
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN_S 10
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN 0x0000001F
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN_S 0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN_S 17
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN_S 12
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB 0x00000FC0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB_S 6
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB 0x0000003F
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB_S 0
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+#define AR_PHY_MINCCA_PWR 0x1FF00000
+#define AR_PHY_MINCCA_PWR_S 20
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR_S 9
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
+#define AR_PHY_AGC_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_COARSE_LOW_S 7
+#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_COARSE_HIGH_S 15
+#define AR_PHY_AGC_COARSE_PWR_CONST 0x0000007F
+#define AR_PHY_AGC_COARSE_PWR_CONST_S 0
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+#define AR_PHY_FIND_SIG_FIRPWR_SIGN_BIT 25
+#define AR_PHY_FIND_SIG_RELPWR (0x1f << 6)
+#define AR_PHY_FIND_SIG_RELPWR_S 6
+#define AR_PHY_FIND_SIG_RELPWR_SIGN_BIT 11
+#define AR_PHY_FIND_SIG_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+#define AR_PHY_RESTART_ENA 0x01
+#define AR_PHY_DC_RESTART_DIS 0x40000000
+
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON 0xFF000000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON_S 24
+#define AR_PHY_TPC_OLPC_GAIN_DELTA 0x00FF0000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_S 16
+
+#define AR_PHY_TPC_6_ERROR_EST_MODE 0x03000000
+#define AR_PHY_TPC_6_ERROR_EST_MODE_S 24
+
+/*
+ * SM Register Map
+ */
+#define AR_SM_BASE 0xa200
+
+#define AR_PHY_D2_CHIP_ID (AR_SM_BASE + 0x0)
+#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
+#define AR_PHY_MODE (AR_SM_BASE + 0x8)
+#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
+#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + 0x20)
+#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
+#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
+#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
+#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
+#define AR_PHY_MAX_RX_LEN (AR_SM_BASE + 0x34)
+#define AR_PHY_FRAME_CTL (AR_SM_BASE + 0x38)
+#define AR_PHY_RFBUS_REQ (AR_SM_BASE + 0x3c)
+#define AR_PHY_RFBUS_GRANT (AR_SM_BASE + 0x40)
+#define AR_PHY_RIFS (AR_SM_BASE + 0x44)
+#define AR_PHY_RX_CLR_DELAY (AR_SM_BASE + 0x50)
+#define AR_PHY_RX_DELAY (AR_SM_BASE + 0x54)
+
+#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
+#define AR_PHY_MISC_PA_CTL (AR_SM_BASE + 0x80)
+#define AR_PHY_SWITCH_CHAIN_0 (AR_SM_BASE + 0x84)
+#define AR_PHY_SWITCH_COM (AR_SM_BASE + 0x88)
+#define AR_PHY_SWITCH_COM_2 (AR_SM_BASE + 0x8c)
+#define AR_PHY_RX_CHAINMASK (AR_SM_BASE + 0xa0)
+#define AR_PHY_CAL_CHAINMASK (AR_SM_BASE + 0xc0)
+#define AR_PHY_CALMODE (AR_SM_BASE + 0xc8)
+#define AR_PHY_FCAL_1 (AR_SM_BASE + 0xcc)
+#define AR_PHY_FCAL_2_0 (AR_SM_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_0 (AR_SM_BASE + 0xd4)
+#define AR_PHY_CL_CAL_CTL (AR_SM_BASE + 0xd8)
+#define AR_PHY_CL_TAB_0 (AR_SM_BASE + 0x100)
+#define AR_PHY_SYNTH_CONTROL (AR_SM_BASE + 0x140)
+#define AR_PHY_ADDAC_CLK_SEL (AR_SM_BASE + 0x144)
+#define AR_PHY_PLL_CTL (AR_SM_BASE + 0x148)
+#define AR_PHY_ANALOG_SWAP (AR_SM_BASE + 0x14c)
+#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
+#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
+
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
+
+#define AR_PHY_TEST (AR_SM_BASE + 0x160)
+
+#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
+#define AR_PHY_TEST_BBB_OBS_SEL_S 19
+
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5_S 23
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5 (1 << AR_PHY_TEST_RX_OBS_SEL_BIT5_S)
+
+#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
+#define AR_PHY_TEST_CHAIN_SEL_S 30
+
+#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + 0x164)
+#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
+#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
+#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
+#define AR_PHY_TEST_CTL_TX_OBS_SEL_S 2
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL 0x60
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL_S 5
+#define AR_PHY_TEST_CTL_TSTADC_EN 0x100
+#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
+#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
+#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+
+
+#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
+
+#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
+#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
+#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
+#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
+#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
+#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
+
+#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
+#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
+
+#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
+#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
+
+#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
+#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
+#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
+#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
+#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
+#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+
+#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
+
+#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
+
+#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
+#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
+#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
+
+#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0)
+#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4)
+#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8)
+#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc)
+#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
+#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
+#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
+#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+
+#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S 1
+#define AR_PHY_65NM_CH0_SYNTH7 0x16098
+#define AR_PHY_65NM_CH0_BIAS1 0x160c0
+#define AR_PHY_65NM_CH0_BIAS2 0x160c4
+#define AR_PHY_65NM_CH0_BIAS4 0x160cc
+#define AR_PHY_65NM_CH0_RXTX4 0x1610c
+#define AR_PHY_65NM_CH0_THERM 0x16290
+
+#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
+#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
+#define AR_PHY_65NM_CH0_THERM_START 0x20000000
+#define AR_PHY_65NM_CH0_THERM_START_S 29
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
+
+#define AR_PHY_65NM_CH0_RXTX1 0x16100
+#define AR_PHY_65NM_CH0_RXTX2 0x16104
+#define AR_PHY_65NM_CH1_RXTX1 0x16500
+#define AR_PHY_65NM_CH1_RXTX2 0x16504
+#define AR_PHY_65NM_CH2_RXTX1 0x16900
+#define AR_PHY_65NM_CH2_RXTX2 0x16904
+
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT_S 22
+#define AR_PHY_LNAGAIN_LONG_SHIFT 0xe0000000
+#define AR_PHY_LNAGAIN_LONG_SHIFT_S 29
+#define AR_PHY_MXRGAIN_LONG_SHIFT 0x03000000
+#define AR_PHY_MXRGAIN_LONG_SHIFT_S 24
+#define AR_PHY_VGAGAIN_LONG_SHIFT 0x1c000000
+#define AR_PHY_VGAGAIN_LONG_SHIFT_S 26
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT 0x00000001
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT_S 0
+#define AR_PHY_MANRXGAIN_LONG_SHIFT 0x00000002
+#define AR_PHY_MANRXGAIN_LONG_SHIFT_S 1
+
+/*
+ * SM Field Definitions
+ */
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_ADDAC_PARACTL_OFF_PWDADC 0x00008000
+
+#define AR_PHY_FCAL20_CAP_STATUS_0 0x01f00000
+#define AR_PHY_FCAL20_CAP_STATUS_0_S 20
+
+#define AR_PHY_RFBUS_REQ_EN 0x00000001 /* request for RF bus */
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001 /* RF bus granted */
+#define AR_PHY_GC_TURBO_MODE 0x00000001 /* set turbo mode bits */
+#define AR_PHY_GC_TURBO_SHORT 0x00000002 /* set short symbols to turbo mode setting */
+#define AR_PHY_GC_DYN2040_EN 0x00000004 /* enable dyn 20/40 mode */
+#define AR_PHY_GC_DYN2040_PRI_ONLY 0x00000008 /* dyn 20/40 - primary only */
+#define AR_PHY_GC_DYN2040_PRI_CH 0x00000010 /* dyn 20/40 - primary ch offset (0=+10MHz, 1=-10MHz)*/
+#define AR_PHY_GC_DYN2040_PRI_CH_S 4
+#define AR_PHY_GC_DYN2040_EXT_CH 0x00000020 /* dyn 20/40 - ext ch spacing (0=20MHz/ 1=25MHz) */
+#define AR_PHY_GC_HT_EN 0x00000040 /* ht enable */
+#define AR_PHY_GC_SHORT_GI_40 0x00000080 /* allow short GI for HT 40 */
+#define AR_PHY_GC_WALSH 0x00000100 /* walsh spatial spreading for 2 chains,2 streams TX */
+#define AR_PHY_GC_SINGLE_HT_LTF1 0x00000200 /* single length (4us) 1st HT long training symbol */
+#define AR_PHY_GC_GF_DETECT_EN 0x00000400 /* enable Green Field detection. Only affects rx, not tx */
+#define AR_PHY_GC_ENABLE_DAC_FIFO 0x00000800 /* fifo between bb and dac */
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF /* delay from wakeup to rx ena */
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+#define AR_PHY_MODE_OFDM 0x00000000
+#define AR_PHY_MODE_CCK 0x00000001
+#define AR_PHY_MODE_DYNAMIC 0x00000004
+#define AR_PHY_MODE_DYNAMIC_S 2
+#define AR_PHY_MODE_HALF 0x00000020
+#define AR_PHY_MODE_QUARTER 0x00000040
+#define AR_PHY_MAC_CLK_MODE 0x00000080
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x00000100
+#define AR_PHY_MODE_SVD_HALF 0x00000200
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF_S 24
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF_S 16
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON_S 8
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON_S 0
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN 0x0000003e
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
+#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
+#define AR_PHY_TXGAIN_FORCE 0x00000001
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB_S 14
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD 0x00c00000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD_S 22
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN 0x000003c0
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN_S 6
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_MASK 0xFFF
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_SIGNED_BIT 0x800
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
+#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
+#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
+#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
+#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
+#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
+#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
+
+#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
+
+#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
+#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
+#define AR_PHY_TPC_19_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_ALPHA_THERM_S 0
+
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
+
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
+
+/*
+ * Channel 1 Register Map
+ */
+#define AR_CHAN1_BASE 0xa800
+
+#define AR_PHY_EXT_CCA_1 (AR_CHAN1_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_1 (AR_CHAN1_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_1 (AR_CHAN1_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_1 (AR_CHAN1_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_1 (AR_CHAN1_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B1 (AR_CHAN1_BASE + 0xdc)
+
+/*
+ * Channel 1 Field Definitions
+ */
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+/*
+ * AGC 1 Register Map
+ */
+#define AR_AGC1_BASE 0xae00
+
+#define AR_PHY_FORCEMAX_GAINS_1 (AR_AGC1_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_1 (AR_AGC1_BASE + 0x18)
+#define AR_PHY_CCA_1 (AR_AGC1_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_1 (AR_AGC1_BASE + 0x20)
+#define AR_PHY_RSSI_1 (AR_AGC1_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP_1 (AR_AGC1_BASE + 0x184)
+#define AR_PHY_RX_OCGAIN_2 (AR_AGC1_BASE + 0x200)
+
+/*
+ * AGC 1 Field Definitions
+ */
+#define AR_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH1_MINCCA_PWR_S 20
+
+/*
+ * SM 1 Register Map
+ */
+#define AR_SM1_BASE 0xb200
+
+#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
+#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
+#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
+#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
+#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
+#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B1 (AR_SM1_BASE + 0x450)
+
+/*
+ * Channel 2 Register Map
+ */
+#define AR_CHAN2_BASE 0xb800
+
+#define AR_PHY_EXT_CCA_2 (AR_CHAN2_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_2 (AR_CHAN2_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_2 (AR_CHAN2_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_2 (AR_CHAN2_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_2 (AR_CHAN2_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B2 (AR_CHAN2_BASE + 0xdc)
+
+/*
+ * Channel 2 Field Definitions
+ */
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 16
+/*
+ * AGC 2 Register Map
+ */
+#define AR_AGC2_BASE 0xbe00
+
+#define AR_PHY_FORCEMAX_GAINS_2 (AR_AGC2_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_2 (AR_AGC2_BASE + 0x18)
+#define AR_PHY_CCA_2 (AR_AGC2_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_2 (AR_AGC2_BASE + 0x20)
+#define AR_PHY_RSSI_2 (AR_AGC2_BASE + 0x180)
+
+/*
+ * AGC 2 Field Definitions
+ */
+#define AR_PHY_CH2_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH2_MINCCA_PWR_S 20
+
+/*
+ * SM 2 Register Map
+ */
+#define AR_SM2_BASE 0xc200
+
+#define AR_PHY_SWITCH_CHAIN_2 (AR_SM2_BASE + 0x84)
+#define AR_PHY_FCAL_2_2 (AR_SM2_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_2 (AR_SM2_BASE + 0xd4)
+#define AR_PHY_CL_TAB_2 (AR_SM2_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_2 (AR_SM2_BASE + 0x180)
+#define AR_PHY_TPC_4_B2 (AR_SM2_BASE + 0x204)
+#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
+#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
+#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B2 (AR_SM2_BASE + 0x450)
+
+#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
+
+/*
+ * AGC 3 Register Map
+ */
+#define AR_AGC3_BASE 0xce00
+
+#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
+
+/*
+ * Misc helper defines
+ */
+#define AR_PHY_CHAIN_OFFSET (AR_CHAN1_BASE - AR_CHAN_BASE)
+
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (AR_PHY_ADC_GAIN_DC_CORR_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR_9300_10(_i) (AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_SWITCH_CHAIN(_i) (AR_PHY_SWITCH_CHAIN_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_EXT_ATTEN_CTL(_i) (AR_PHY_EXT_ATTEN_CTL_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_RXGAIN(_i) (AR_PHY_FORCEMAX_GAINS_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_TPCRG5(_i) (AR_PHY_TPC_5_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_PDADC_TAB(_i) (AR_PHY_PDADC_TAB_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_CAL_MEAS_0(_i) (AR_PHY_IQ_ADC_MEAS_0_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1(_i) (AR_PHY_IQ_ADC_MEAS_1_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2(_i) (AR_PHY_IQ_ADC_MEAS_2_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3(_i) (AR_PHY_IQ_ADC_MEAS_3_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_0_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001
+#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002
+#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000
+#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC
+
+#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002
+#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004
+#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9
+
+#define AR_PHY_BB_WD_STATUS 0x00000007
+#define AR_PHY_BB_WD_STATUS_S 0
+#define AR_PHY_BB_WD_DET_HANG 0x00000008
+#define AR_PHY_BB_WD_DET_HANG_S 3
+#define AR_PHY_BB_WD_RADAR_SM 0x000000F0
+#define AR_PHY_BB_WD_RADAR_SM_S 4
+#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00
+#define AR_PHY_BB_WD_RX_OFDM_SM_S 8
+#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000
+#define AR_PHY_BB_WD_RX_CCK_SM_S 12
+#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000
+#define AR_PHY_BB_WD_TX_OFDM_SM_S 16
+#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000
+#define AR_PHY_BB_WD_TX_CCK_SM_S 20
+#define AR_PHY_BB_WD_AGC_SM 0x0F000000
+#define AR_PHY_BB_WD_AGC_SM_S 24
+#define AR_PHY_BB_WD_SRCH_SM 0xF0000000
+#define AR_PHY_BB_WD_SRCH_SM_S 28
+
+#define AR_PHY_BB_WD_STATUS_CLR 0x00000008
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+
+#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 83c7ea4c007f..fbb7dec6ddeb 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -114,8 +114,10 @@ enum buffer_type {
#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
+#define ATH_TXSTATUS_RING_SIZE 64
+
struct ath_descdma {
- struct ath_desc *dd_desc;
+ void *dd_desc;
dma_addr_t dd_desc_paddr;
u32 dd_desc_len;
struct ath_buf *dd_bufptr;
@@ -123,7 +125,7 @@ struct ath_descdma {
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
- int nbuf, int ndesc);
+ int nbuf, int ndesc, bool is_tx);
void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head);
@@ -178,9 +180,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
-#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
-#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
-#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
#define ATH_TX_COMPLETE_POLL_INT 1000
@@ -191,6 +190,7 @@ enum ATH_AGGR_STATUS {
ATH_AGGR_LIMITED,
};
+#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
u32 axq_qnum;
u32 *axq_link;
@@ -200,6 +200,10 @@ struct ath_txq {
bool stopped;
bool axq_tx_inprogress;
struct list_head axq_acq;
+ struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
+ struct list_head txq_fifo_pending;
+ u8 txq_headidx;
+ u8 txq_tailidx;
};
#define AGGR_CLEANUP BIT(1)
@@ -226,6 +230,12 @@ struct ath_tx {
struct ath_descdma txdma;
};
+struct ath_rx_edma {
+ struct sk_buff_head rx_fifo;
+ struct sk_buff_head rx_buffers;
+ u32 rx_fifo_hwsize;
+};
+
struct ath_rx {
u8 defant;
u8 rxotherant;
@@ -235,6 +245,8 @@ struct ath_rx {
spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
+ struct ath_buf *rx_bufptr;
+ struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
};
int ath_startrecv(struct ath_softc *sc);
@@ -243,7 +255,7 @@ void ath_flushrecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
-int ath_rx_tasklet(struct ath_softc *sc, int flush);
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_setup(struct ath_softc *sc, int haltype);
@@ -261,6 +273,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_tasklet(struct ath_softc *sc);
+void ath_tx_edma_tasklet(struct ath_softc *sc);
void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -483,7 +496,6 @@ struct ath_softc {
bool ps_enabled;
bool ps_idle;
unsigned long ps_usecount;
- enum ath9k_int imask;
struct ath_config config;
struct ath_rx rx;
@@ -511,6 +523,8 @@ struct ath_softc {
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
struct ath_btcoex btcoex;
+
+ struct ath_descdma txsdma;
};
struct ath_wiphy {
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b4a31a43a62c..c8a4558f79ba 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -93,8 +93,6 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
}
- ds->ds_data = bf->bf_buf_addr;
-
sband = &sc->sbands[common->hw->conf.channel->band];
rate = sband->bitrates[rateidx].hw_value;
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
@@ -109,7 +107,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
/* NB: beacon's BufLen must be a multiple of 4 bytes */
ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4),
- true, true, ds);
+ true, true, ds, bf->bf_buf_addr,
+ sc->beacon.beaconq);
memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
series[0].Tries = 1;
@@ -524,6 +523,7 @@ static void ath9k_beacon_init(struct ath_softc *sc,
static void ath_beacon_config_ap(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
+ struct ath_hw *ah = sc->sc_ah;
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
@@ -539,15 +539,15 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
* prepare beacon frames.
*/
intval |= ATH9K_BEACON_ENA;
- sc->imask |= ATH9K_INT_SWBA;
+ ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed AP beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* Clear the reset TSF flag, so that subsequent beacon updation
will not reset the HW TSF. */
@@ -566,7 +566,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
static void ath_beacon_config_sta(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
int dtimperiod, dtimcount, sleepduration;
int cfpperiod, cfpcount;
@@ -605,7 +606,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim+cfp state for the result.
*/
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
num_beacons = tsftu / intval + 1;
@@ -678,17 +679,18 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* Set the computed STA beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
- ath9k_hw_set_sta_beacon_timers(sc->sc_ah, &bs);
- sc->imask |= ATH9K_INT_BMISS;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_set_sta_beacon_timers(ah, &bs);
+ ah->imask |= ATH9K_INT_BMISS;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
static void ath_beacon_config_adhoc(struct ath_softc *sc,
struct ath_beacon_config *conf,
struct ieee80211_vif *vif)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u64 tsf;
u32 tsftu, intval, nexttbtt;
@@ -703,7 +705,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
else if (intval)
nexttbtt = roundup(nexttbtt, intval);
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
do {
nexttbtt += intval;
@@ -719,20 +721,20 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
* self-linked tx descriptor and let the hardware deal with things.
*/
intval |= ATH9K_BEACON_ENA;
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
- sc->imask |= ATH9K_INT_SWBA;
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
+ ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed ADHOC beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* FIXME: Handle properly when vif is NULL */
- if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
+ if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
ath_beacon_start_adhoc(sc, vif);
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 238a5744d8e9..6982577043b8 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -15,6 +15,9 @@
*/
#include "hw.h"
+#include "hw-ops.h"
+
+/* Common calibration code */
/* We can tune this as we go by monitoring really low values */
#define ATH9K_NF_TOO_LOW -60
@@ -86,90 +89,9 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
return;
}
-static void ath9k_hw_do_getnf(struct ath_hw *ah,
- int16_t nfarray[NUM_NF_READINGS])
-{
- struct ath_common *common = ath9k_hw_common(ah);
- int16_t nf;
-
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 0] is %d\n", nf);
- nfarray[0] = nf;
-
- if (!AR_SREV_9285(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
- AR9280_PHY_CH1_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
- AR_PHY_CH1_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 1] is %d\n", nf);
- nfarray[1] = nf;
-
- if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
- nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
- AR_PHY_CH2_MINCCA_PWR);
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 2] is %d\n", nf);
- nfarray[2] = nf;
- }
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
- AR9280_PHY_EXT_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
- AR_PHY_EXT_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 0] is %d\n", nf);
- nfarray[3] = nf;
-
- if (!AR_SREV_9285(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
- AR9280_PHY_CH1_EXT_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
- AR_PHY_CH1_EXT_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 1] is %d\n", nf);
- nfarray[4] = nf;
-
- if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
- nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
- AR_PHY_CH2_EXT_MINCCA_PWR);
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 2] is %d\n", nf);
- nfarray[5] = nf;
- }
- }
-}
-
-static bool getNoiseFloorThresh(struct ath_hw *ah,
- enum ieee80211_band band,
- int16_t *nft)
+static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
+ enum ieee80211_band band,
+ int16_t *nft)
{
switch (band) {
case IEEE80211_BAND_5GHZ:
@@ -186,44 +108,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
return true;
}
-static void ath9k_hw_setup_calibration(struct ath_hw *ah,
- struct ath9k_cal_list *currCal)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
- currCal->calData->calCountMax);
-
- switch (currCal->calData->calType) {
- case IQ_MISMATCH_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting IQ Mismatch Calibration\n");
- break;
- case ADC_GAIN_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC Gain Calibration\n");
- break;
- case ADC_DC_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC DC Calibration\n");
- break;
- case ADC_DC_INIT_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting Init ADC DC Calibration\n");
- break;
- }
-
- REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_DO_CAL);
-}
-
-static void ath9k_hw_reset_calibration(struct ath_hw *ah,
- struct ath9k_cal_list *currCal)
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
{
int i;
@@ -241,324 +127,6 @@ static void ath9k_hw_reset_calibration(struct ath_hw *ah,
ah->cal_samples = 0;
}
-static bool ath9k_hw_per_calibration(struct ath_hw *ah,
- struct ath9k_channel *ichan,
- u8 rxchainmask,
- struct ath9k_cal_list *currCal)
-{
- bool iscaldone = false;
-
- if (currCal->calState == CAL_RUNNING) {
- if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
- AR_PHY_TIMING_CTRL4_DO_CAL)) {
-
- currCal->calData->calCollect(ah);
- ah->cal_samples++;
-
- if (ah->cal_samples >= currCal->calData->calNumSamples) {
- int i, numChains = 0;
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- if (rxchainmask & (1 << i))
- numChains++;
- }
-
- currCal->calData->calPostProc(ah, numChains);
- ichan->CalValid |= currCal->calData->calType;
- currCal->calState = CAL_DONE;
- iscaldone = true;
- } else {
- ath9k_hw_setup_calibration(ah, currCal);
- }
- }
- } else if (!(ichan->CalValid & currCal->calData->calType)) {
- ath9k_hw_reset_calibration(ah, currCal);
- }
-
- return iscaldone;
-}
-
-/* Assumes you are talking about the currently configured channel */
-static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
- enum ath9k_cal_types calType)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- switch (calType & ah->supp_cals) {
- case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
- return true;
- case ADC_GAIN_CAL:
- case ADC_DC_CAL:
- if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
- conf_is_ht20(conf)))
- return true;
- break;
- }
- return false;
-}
-
-static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalPowerMeasI[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalPowerMeasQ[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalIqCorrMeas[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
- ah->cal_samples, i, ah->totalPowerMeasI[i],
- ah->totalPowerMeasQ[i],
- ah->totalIqCorrMeas[i]);
- }
-}
-
-static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalAdcIOddPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalAdcIEvenPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalAdcQOddPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ah->totalAdcQEvenPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcIOddPhase[i],
- ah->totalAdcIEvenPhase[i],
- ah->totalAdcQOddPhase[i],
- ah->totalAdcQEvenPhase[i]);
- }
-}
-
-static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalAdcDcOffsetIOddPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalAdcDcOffsetIEvenPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalAdcDcOffsetQOddPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ah->totalAdcDcOffsetQEvenPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcDcOffsetIOddPhase[i],
- ah->totalAdcDcOffsetIEvenPhase[i],
- ah->totalAdcDcOffsetQOddPhase[i],
- ah->totalAdcDcOffsetQEvenPhase[i]);
- }
-}
-
-static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 powerMeasQ, powerMeasI, iqCorrMeas;
- u32 qCoffDenom, iCoffDenom;
- int32_t qCoff, iCoff;
- int iqCorrNeg, i;
-
- for (i = 0; i < numChains; i++) {
- powerMeasI = ah->totalPowerMeasI[i];
- powerMeasQ = ah->totalPowerMeasQ[i];
- iqCorrMeas = ah->totalIqCorrMeas[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
- i, ah->totalIqCorrMeas[i]);
-
- iqCorrNeg = 0;
-
- if (iqCorrMeas > 0x80000000) {
- iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
- iqCorrNeg = 1;
- }
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
-
- iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
- qCoffDenom = powerMeasQ / 64;
-
- if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
- (qCoffDenom != 0)) {
- iCoff = iqCorrMeas / iCoffDenom;
- qCoff = powerMeasI / qCoffDenom - 64;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
-
- iCoff = iCoff & 0x3f;
- ath_print(common, ATH_DBG_CALIBRATE,
- "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
- if (iqCorrNeg == 0x0)
- iCoff = 0x40 - iCoff;
-
- if (qCoff > 15)
- qCoff = 15;
- else if (qCoff <= -16)
- qCoff = 16;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
- i, iCoff, qCoff);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
- AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
- iCoff);
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
- AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
- qCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ Cal and Correction done for Chain %d\n",
- i);
- }
- }
-
- REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
-}
-
-static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
- u32 qGainMismatch, iGainMismatch, val, i;
-
- for (i = 0; i < numChains; i++) {
- iOddMeasOffset = ah->totalAdcIOddPhase[i];
- iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
- qOddMeasOffset = ah->totalAdcQOddPhase[i];
- qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC Gain Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = 0x%08x\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = 0x%08x\n", i,
- qEvenMeasOffset);
-
- if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
- iGainMismatch =
- ((iEvenMeasOffset * 32) /
- iOddMeasOffset) & 0x3f;
- qGainMismatch =
- ((qOddMeasOffset * 32) /
- qEvenMeasOffset) & 0x3f;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_i = 0x%08x\n", i,
- iGainMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_q = 0x%08x\n", i,
- qGainMismatch);
-
- val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
- val &= 0xfffff000;
- val |= (qGainMismatch) | (iGainMismatch << 6);
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC Gain Cal done for Chain %d\n", i);
- }
- }
-
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
- REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
- AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
-}
-
-static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 iOddMeasOffset, iEvenMeasOffset, val, i;
- int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
- const struct ath9k_percal_data *calData =
- ah->cal_list_curr->calData;
- u32 numSamples =
- (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
-
- for (i = 0; i < numChains; i++) {
- iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
- iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
- qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
- qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC DC Offset Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = %d\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = %d\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = %d\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = %d\n", i,
- qEvenMeasOffset);
-
- iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
- numSamples) & 0x1ff;
- qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
- numSamples) & 0x1ff;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
- iDcMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
- qDcMismatch);
-
- val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
- val &= 0xc0000fff;
- val |= (qDcMismatch << 12) | (iDcMismatch << 21);
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC DC Offset Cal done for Chain %d\n", i);
- }
-
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
- REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
- AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
-}
-
/* This is done for the currently configured channel */
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
@@ -605,72 +173,6 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
}
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath9k_nfcal_hist *h;
- int i, j;
- int32_t val;
- const u32 ar5416_cca_regs[6] = {
- AR_PHY_CCA,
- AR_PHY_CH1_CCA,
- AR_PHY_CH2_CCA,
- AR_PHY_EXT_CCA,
- AR_PHY_CH1_EXT_CCA,
- AR_PHY_CH2_EXT_CCA
- };
- u8 chainmask, rx_chain_status;
-
- rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
- if (AR_SREV_9285(ah))
- chainmask = 0x9;
- else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
- if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
- chainmask = 0x1B;
- else
- chainmask = 0x09;
- } else {
- if (rx_chain_status & 0x4)
- chainmask = 0x3F;
- else if (rx_chain_status & 0x2)
- chainmask = 0x1B;
- else
- chainmask = 0x09;
- }
-
- h = ah->nfCalHist;
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (chainmask & (1 << i)) {
- val = REG_READ(ah, ar5416_cca_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
- REG_WRITE(ah, ar5416_cca_regs[i], val);
- }
- }
-
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_ENABLE_NF);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
-
- for (j = 0; j < 5; j++) {
- if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
- AR_PHY_AGC_CONTROL_NF) == 0)
- break;
- udelay(50);
- }
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (chainmask & (1 << i)) {
- val = REG_READ(ah, ar5416_cca_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (-50) << 1) & 0x1ff);
- REG_WRITE(ah, ar5416_cca_regs[i], val);
- }
- }
-}
-
int16_t ath9k_hw_getnf(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -690,7 +192,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
} else {
ath9k_hw_do_getnf(ah, nfarray);
nf = nfarray[0];
- if (getNoiseFloorThresh(ah, c->band, &nfThresh)
+ if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
ath_print(common, ATH_DBG_CALIBRATE,
"noise floor failed detected; "
@@ -715,7 +217,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
if (AR_SREV_9280(ah))
noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
- else if (AR_SREV_9285(ah))
+ else if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
else if (AR_SREV_9287(ah))
noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
@@ -748,508 +250,3 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
return nf;
}
EXPORT_SYMBOL(ath9k_hw_getchan_noise);
-
-static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
-{
- u32 rddata;
- int32_t delta, currPDADC, slope;
-
- rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
- currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
-
- if (ah->initPDADC == 0 || currPDADC == 0) {
- /*
- * Zero value indicates that no frames have been transmitted yet,
- * can't do temperature compensation until frames are transmitted.
- */
- return;
- } else {
- slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
-
- if (slope == 0) { /* to avoid divide by zero case */
- delta = 0;
- } else {
- delta = ((currPDADC - ah->initPDADC)*4) / slope;
- }
- REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
- AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
- REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
- AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
- }
-}
-
-static void ath9k_olc_temp_compensation(struct ath_hw *ah)
-{
- u32 rddata, i;
- int delta, currPDADC, regval;
-
- if (OLC_FOR_AR9287_10_LATER) {
- ath9k_olc_temp_compensation_9287(ah);
- } else {
- rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
- currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
-
- if (ah->initPDADC == 0 || currPDADC == 0) {
- return;
- } else {
- if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
- delta = (currPDADC - ah->initPDADC + 4) / 8;
- else
- delta = (currPDADC - ah->initPDADC + 5) / 10;
-
- if (delta != ah->PDADCdelta) {
- ah->PDADCdelta = delta;
- for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
- regval = ah->originalGain[i] - delta;
- if (regval < 0)
- regval = 0;
-
- REG_RMW_FIELD(ah,
- AR_PHY_TX_GAIN_TBL1 + i * 4,
- AR_PHY_TX_GAIN, regval);
- }
- }
- }
- }
-}
-
-static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
-{
- u32 regVal;
- unsigned int i;
- u32 regList [][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 } ,
- { 0x7828, 0 } ,
- };
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
-
- /* 786c,b23,1, pwddac=1 */
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
- /* 7854, b5,1, pdrxtxbb=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
- /* 7854, b7,1, pdv2i=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
- /* 7854, b8,1, pddacinterface=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
- /* 7824,b12,0, offcal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
- /* 7838, b1,0, pwddb=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
- /* 7820,b11,0, enpacal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
- /* 7820,b25,1, pdpadrv1=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
- /* 7820,b24,0, pdpadrv2=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1,AR9285_AN_RF2G1_PDPADRV2,0);
- /* 7820,b23,0, pdpaout=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
- /* 783c,b14-16,7, padrvgn2tab_0=7 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G8,AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
- /*
- * 7838,b29-31,0, padrvgn1tab_0=0
- * does not matter since we turn it off
- */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7,AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
-
- /* Set:
- * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
- * txon=1,paon=1,oscon=1,synthon_force=1
- */
- REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
- udelay(30);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
-
- /* find off_6_1; */
- for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
- regVal |= (1 << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
- udelay(1);
- //regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1 << (20 + i)));
- regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
- << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
- }
-
- regVal = (regVal >>20) & 0x7f;
-
- /* Update PA cal info */
- if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
- if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
- ah->pacal_info.max_skipcount =
- 2 * ah->pacal_info.max_skipcount;
- ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
- } else {
- ah->pacal_info.max_skipcount = 1;
- ah->pacal_info.skipcount = 0;
- ah->pacal_info.prev_offset = regVal;
- }
-
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- REG_WRITE(ah, regList[i][0], regList[i][1]);
-}
-
-static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 regVal;
- int i, offset, offs_6_1, offs_0;
- u32 ccomp_org, reg_field;
- u32 regList[][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 },
- };
-
- ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
-
- /* PA CAL is not needed for high power solution */
- if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
- AR5416_EEP_TXGAIN_HIGH_POWER)
- return;
-
- if (AR_SREV_9285_11(ah)) {
- REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
- udelay(10);
- }
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
-
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
- ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
-
- REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
- udelay(30);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
-
- for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
- regVal |= (1 << (19 + i));
- REG_WRITE(ah, 0x7834, regVal);
- udelay(1);
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1 << (19 + i)));
- reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
- regVal |= (reg_field << (19 + i));
- REG_WRITE(ah, 0x7834, regVal);
- }
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
- udelay(1);
- reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
- offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
- offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
-
- offset = (offs_6_1<<1) | offs_0;
- offset = offset - 0;
- offs_6_1 = offset>>1;
- offs_0 = offset & 1;
-
- if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
- if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
- ah->pacal_info.max_skipcount =
- 2 * ah->pacal_info.max_skipcount;
- ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
- } else {
- ah->pacal_info.max_skipcount = 1;
- ah->pacal_info.skipcount = 0;
- ah->pacal_info.prev_offset = offset;
- }
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
-
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- REG_WRITE(ah, regList[i][0], regList[i][1]);
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
-
- if (AR_SREV_9285_11(ah))
- REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
-
-}
-
-bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal)
-{
- bool iscaldone = true;
- struct ath9k_cal_list *currCal = ah->cal_list_curr;
-
- if (currCal &&
- (currCal->calState == CAL_RUNNING ||
- currCal->calState == CAL_WAITING)) {
- iscaldone = ath9k_hw_per_calibration(ah, chan,
- rxchainmask, currCal);
- if (iscaldone) {
- ah->cal_list_curr = currCal = currCal->calNext;
-
- if (currCal->calState == CAL_WAITING) {
- iscaldone = false;
- ath9k_hw_reset_calibration(ah, currCal);
- }
- }
- }
-
- /* Do NF cal only at longer intervals */
- if (longcal) {
- /* Do periodic PAOffset Cal */
- if (AR_SREV_9271(ah))
- ath9k_hw_9271_pa_cal(ah, false);
- else if (AR_SREV_9285_11_OR_LATER(ah)) {
- if (!ah->pacal_info.skipcount)
- ath9k_hw_9285_pa_cal(ah, false);
- else
- ah->pacal_info.skipcount--;
- }
-
- if (OLC_FOR_AR9280_20_LATER || OLC_FOR_AR9287_10_LATER)
- ath9k_olc_temp_compensation(ah);
-
- /* Get the value from the previous NF cal and update history buffer */
- ath9k_hw_getnf(ah, chan);
-
- /*
- * Load the NF from history buffer of the current channel.
- * NF is slow time-variant, so it is OK to use a historical value.
- */
- ath9k_hw_loadnf(ah, ah->curchan);
-
- ath9k_hw_start_nfcal(ah);
- }
-
- return iscaldone;
-}
-EXPORT_SYMBOL(ath9k_hw_calibrate);
-
-/* Carrier leakage Calibration fix */
-static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- if (IS_CHAN_HT20(chan)) {
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset "
- "calibration failed to complete in "
- "1ms; noisy ??\n");
- return false;
- }
- REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- }
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
- "failed to complete in 1ms; noisy ??\n");
- return false;
- }
-
- REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
-
- return true;
-}
-
-bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
- if (!ar9285_clc(ah, chan))
- return false;
- } else {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!AR_SREV_9287_10_OR_LATER(ah))
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
- AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- }
-
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to "
- "complete in 1ms; noisy environment?\n");
- return false;
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!AR_SREV_9287_10_OR_LATER(ah))
- REG_SET_BIT(ah, AR_PHY_ADC_CTL,
- AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- }
- }
-
- /* Do PA Calibration */
- if (AR_SREV_9271(ah))
- ath9k_hw_9271_pa_cal(ah, true);
- else if (AR_SREV_9285_11_OR_LATER(ah))
- ath9k_hw_9285_pa_cal(ah, true);
-
- /* Do NF Calibration after DC offset and other calibrations */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
-
- ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
-
- /* Enable IQ, ADC Gain and ADC DC offset CALs */
- if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
- if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
- INIT_CAL(&ah->adcgain_caldata);
- INSERT_CAL(ah, &ah->adcgain_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
- }
- if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
- INIT_CAL(&ah->adcdc_caldata);
- INSERT_CAL(ah, &ah->adcdc_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
- }
- if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
- INIT_CAL(&ah->iq_caldata);
- INSERT_CAL(ah, &ah->iq_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
- }
-
- ah->cal_list_curr = ah->cal_list;
-
- if (ah->cal_list_curr)
- ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
- }
-
- chan->CalValid = 0;
-
- return true;
-}
-
-const struct ath9k_percal_data iq_cal_multi_sample = {
- IQ_MISMATCH_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_iqcal_collect,
- ath9k_hw_iqcalibrate
-};
-const struct ath9k_percal_data iq_cal_single_sample = {
- IQ_MISMATCH_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_iqcal_collect,
- ath9k_hw_iqcalibrate
-};
-const struct ath9k_percal_data adc_gain_cal_multi_sample = {
- ADC_GAIN_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_adc_gaincal_collect,
- ath9k_hw_adc_gaincal_calibrate
-};
-const struct ath9k_percal_data adc_gain_cal_single_sample = {
- ADC_GAIN_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_adc_gaincal_collect,
- ath9k_hw_adc_gaincal_calibrate
-};
-const struct ath9k_percal_data adc_dc_cal_multi_sample = {
- ADC_DC_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
-const struct ath9k_percal_data adc_dc_cal_single_sample = {
- ADC_DC_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
-const struct ath9k_percal_data adc_init_dc_cal = {
- ADC_DC_INIT_CAL,
- MIN_CAL_SAMPLES,
- INIT_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b2c873e97485..24538bdb9126 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,14 +19,6 @@
#include "hw.h"
-extern const struct ath9k_percal_data iq_cal_multi_sample;
-extern const struct ath9k_percal_data iq_cal_single_sample;
-extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
-extern const struct ath9k_percal_data adc_gain_cal_single_sample;
-extern const struct ath9k_percal_data adc_dc_cal_multi_sample;
-extern const struct ath9k_percal_data adc_dc_cal_single_sample;
-extern const struct ath9k_percal_data adc_init_dc_cal;
-
#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
@@ -76,7 +68,8 @@ enum ath9k_cal_types {
ADC_DC_INIT_CAL = 0x1,
ADC_GAIN_CAL = 0x2,
ADC_DC_CAL = 0x4,
- IQ_MISMATCH_CAL = 0x8
+ IQ_MISMATCH_CAL = 0x8,
+ TEMP_COMP_CAL = 0x10,
};
enum ath9k_cal_state {
@@ -122,14 +115,12 @@ struct ath9k_pacal_info{
bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
void ath9k_hw_start_nfcal(struct ath_hw *ah);
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
int16_t ath9k_hw_getnf(struct ath_hw *ah,
struct ath9k_channel *chan);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
-bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal);
-bool ath9k_hw_init_cal(struct ath_hw *ah,
- struct ath9k_channel *chan);
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+
#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 4d775ae141db..7707341cd0d3 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -57,13 +57,19 @@ static bool ath9k_rx_accept(struct ath_common *common,
* rs_more indicates chained descriptors which can be used
* to link buffers together for a sort of scatter-gather
* operation.
- *
+ * reject the frame, we don't support scatter-gather yet and
+ * the frame is probably corrupt anyway
+ */
+ if (rx_stats->rs_more)
+ return false;
+
+ /*
* The rx_stats->rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set. The
* rs_more will be false at the last element of the chained
* descriptors.
*/
- if (!rx_stats->rs_more && rx_stats->rs_status != 0) {
+ if (rx_stats->rs_status != 0) {
if (rx_stats->rs_status & ATH9K_RXERR_CRC)
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
@@ -102,11 +108,11 @@ static bool ath9k_rx_accept(struct ath_common *common,
return true;
}
-static u8 ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- struct sk_buff *skb)
+static int ath9k_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ struct sk_buff *skb)
{
struct ieee80211_supported_band *sband;
enum ieee80211_band band;
@@ -122,25 +128,32 @@ static u8 ath9k_process_rate(struct ath_common *common,
rxs->flag |= RX_FLAG_40MHZ;
if (rx_stats->rs_flags & ATH9K_RX_GI)
rxs->flag |= RX_FLAG_SHORT_GI;
- return rx_stats->rs_rate & 0x7f;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
}
for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate)
- return i;
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
rxs->flag |= RX_FLAG_SHORTPRE;
- return i;
+ rxs->rate_idx = i;
+ return 0;
}
}
- /* No valid hardware bitrate found -- we should not get here */
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
"0x%02x using 1 Mbit\n", rx_stats->rs_rate);
if ((common->debug_mask & ATH_DBG_XMIT))
print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
- return 0;
+ return -EINVAL;
}
static void ath9k_process_rssi(struct ath_common *common,
@@ -202,17 +215,22 @@ int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
struct ath_hw *ah = common->ah;
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
return -EINVAL;
ath9k_process_rssi(common, hw, skb, rx_stats);
- rx_status->rate_idx = ath9k_process_rate(common, hw,
- rx_stats, rx_status, skb);
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb))
+ return -EINVAL;
+
rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
rx_status->band = hw->conf.channel->band;
rx_status->freq = hw->conf.channel->center_freq;
- rx_status->noise = common->ani.noise_floor;
rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_TSFT;
@@ -255,7 +273,8 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
keyix = rx_stats->rs_keyix;
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
@@ -286,6 +305,345 @@ int ath9k_cmn_padpos(__le16 frame_control)
}
EXPORT_SYMBOL(ath9k_cmn_padpos);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.hw_key) {
+ if (tx_info->control.hw_key->alg == ALG_WEP)
+ return ATH9K_KEY_TYPE_WEP;
+ else if (tx_info->control.hw_key->alg == ALG_TKIP)
+ return ATH9K_KEY_TYPE_TKIP;
+ else if (tx_info->control.hw_key->alg == ALG_CCMP)
+ return ATH9K_KEY_TYPE_AES;
+ }
+
+ return ATH9K_KEY_TYPE_CLEAR;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
+
+static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ u32 chanmode = 0;
+
+ switch (chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chanmode = CHANNEL_G_HT20;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chanmode = CHANNEL_G_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chanmode = CHANNEL_G_HT40MINUS;
+ break;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chanmode = CHANNEL_A_HT20;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chanmode = CHANNEL_A_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chanmode = CHANNEL_A_HT40MINUS;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return chanmode;
+}
+
+/*
+ * Update internal channel flags.
+ */
+void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
+ struct ath9k_channel *ichan)
+{
+ struct ieee80211_channel *chan = hw->conf.channel;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_2GHZ) {
+ ichan->chanmode = CHANNEL_G;
+ ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
+ } else {
+ ichan->chanmode = CHANNEL_A;
+ ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
+ }
+
+ if (conf_is_ht(conf))
+ ichan->chanmode = ath9k_get_extchanmode(chan,
+ conf->channel_type);
+}
+EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
+
+/*
+ * Get the internal channel reference.
+ */
+struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
+ struct ath_hw *ah)
+{
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *channel;
+ u8 chan_idx;
+
+ chan_idx = curchan->hw_value;
+ channel = &ah->channels[chan_idx];
+ ath9k_cmn_update_ichannel(hw, channel);
+
+ return channel;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+
+static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
+ struct ath9k_keyval *hk, const u8 *addr,
+ bool authenticator)
+{
+ struct ath_hw *ah = common->ah;
+ const u8 *key_rxmic;
+ const u8 *key_txmic;
+
+ key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
+ key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
+
+ if (addr == NULL) {
+ /*
+ * Group key installation - only two key cache entries are used
+ * regardless of splitmic capability since group key is only
+ * used either for TX or RX.
+ */
+ if (authenticator) {
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
+ } else {
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
+ }
+ return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
+ }
+ if (!common->splitmic) {
+ /* TX and RX keys share the same key cache entry. */
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
+ return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
+ }
+
+ /* Separate key cache entries for TX and RX */
+
+ /* TX key goes at first index, RX key at +32. */
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
+ /* TX MIC entry failed. No need to proceed further */
+ ath_print(common, ATH_DBG_FATAL,
+ "Setting TX MIC Key Failed\n");
+ return 0;
+ }
+
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ /* XXX delete tx key on failure? */
+ return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
+}
+
+static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
+{
+ int i;
+
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap))
+ continue; /* At least one part of TKIP key allocated */
+ if (common->splitmic &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ continue; /* At least one part of TKIP key allocated */
+
+ /* Found a free slot for a TKIP key */
+ return i;
+ }
+ return -1;
+}
+
+static int ath_reserve_key_cache_slot(struct ath_common *common)
+{
+ int i;
+
+ /* First, try to find slots that would not be available for TKIP. */
+ if (common->splitmic) {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
+ if (!test_bit(i, common->keymap) &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i;
+ if (!test_bit(i + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 32;
+ if (!test_bit(i + 64, common->keymap) &&
+ (test_bit(i , common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 64;
+ if (!test_bit(i + 64 + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap)))
+ return i + 64 + 32;
+ }
+ } else {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (!test_bit(i, common->keymap) &&
+ test_bit(i + 64, common->keymap))
+ return i;
+ if (test_bit(i, common->keymap) &&
+ !test_bit(i + 64, common->keymap))
+ return i + 64;
+ }
+ }
+
+ /* No partially used TKIP slots, pick any available slot */
+ for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
+ /* Do not allow slots that could be needed for TKIP group keys
+ * to be used. This limitation could be removed if we know that
+ * TKIP will not be used. */
+ if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
+ continue;
+ if (common->splitmic) {
+ if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
+ continue;
+ if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
+ continue;
+ }
+
+ if (!test_bit(i, common->keymap))
+ return i; /* Found a free slot for a key */
+ }
+
+ /* No free slot found */
+ return -1;
+}
+
+/*
+ * Configure encryption in the HW.
+ */
+int ath9k_cmn_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_hw *ah = common->ah;
+ struct ath9k_keyval hk;
+ const u8 *mac = NULL;
+ int ret = 0;
+ int idx;
+
+ memset(&hk, 0, sizeof(hk));
+
+ switch (key->alg) {
+ case ALG_WEP:
+ hk.kv_type = ATH9K_CIPHER_WEP;
+ break;
+ case ALG_TKIP:
+ hk.kv_type = ATH9K_CIPHER_TKIP;
+ break;
+ case ALG_CCMP:
+ hk.kv_type = ATH9K_CIPHER_AES_CCM;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hk.kv_len = key->keylen;
+ memcpy(hk.kv_val, key->key, key->keylen);
+
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /* For now, use the default keys for broadcast keys. This may
+ * need to change with virtual interfaces. */
+ idx = key->keyidx;
+ } else if (key->keyidx) {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (vif->type != NL80211_IFTYPE_AP) {
+ /* Only keyidx 0 should be used with unicast key, but
+ * allow this for client mode for now. */
+ idx = key->keyidx;
+ } else
+ return -EIO;
+ } else {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (key->alg == ALG_TKIP)
+ idx = ath_reserve_key_cache_slot_tkip(common);
+ else
+ idx = ath_reserve_key_cache_slot(common);
+ if (idx < 0)
+ return -ENOSPC; /* no free key cache entries */
+ }
+
+ if (key->alg == ALG_TKIP)
+ ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
+ vif->type == NL80211_IFTYPE_AP);
+ else
+ ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
+
+ if (!ret)
+ return -EIO;
+
+ set_bit(idx, common->keymap);
+ if (key->alg == ALG_TKIP) {
+ set_bit(idx + 64, common->keymap);
+ if (common->splitmic) {
+ set_bit(idx + 32, common->keymap);
+ set_bit(idx + 64 + 32, common->keymap);
+ }
+ }
+
+ return idx;
+}
+EXPORT_SYMBOL(ath9k_cmn_key_config);
+
+/*
+ * Delete Key.
+ */
+void ath9k_cmn_key_delete(struct ath_common *common,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_hw *ah = common->ah;
+
+ ath9k_hw_keyreset(ah, key->hw_key_idx);
+ if (key->hw_key_idx < IEEE80211_WEP_NKID)
+ return;
+
+ clear_bit(key->hw_key_idx, common->keymap);
+ if (key->alg != ALG_TKIP)
+ return;
+
+ clear_bit(key->hw_key_idx + 64, common->keymap);
+ if (common->splitmic) {
+ ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
+ clear_bit(key->hw_key_idx + 32, common->keymap);
+ clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
+ }
+}
+EXPORT_SYMBOL(ath9k_cmn_key_delete);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 042999c2fe9c..e08f7e5a26e0 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -20,9 +20,12 @@
#include "../debug.h"
#include "hw.h"
+#include "hw-ops.h"
/* Common header for Atheros 802.11n base driver cores */
+#define IEEE80211_WEP_NKID 4
+
#define WME_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -74,11 +77,12 @@ struct ath_buf {
an aggregate) */
struct ath_buf *bf_next; /* next subframe in the aggregate */
struct sk_buff *bf_mpdu; /* enclosing frame structure */
- struct ath_desc *bf_desc; /* virtual addr of desc */
+ void *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer */
bool bf_stale;
bool bf_isnullfunc;
+ bool bf_tx_aborted;
u16 bf_flags;
struct ath_buf_state bf_state;
dma_addr_t bf_dmacontext;
@@ -125,3 +129,14 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
bool decrypt_error);
int ath9k_cmn_padpos(__le16 frame_control);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
+void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
+ struct ath9k_channel *ichan);
+struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
+ struct ath_hw *ah);
+int ath9k_cmn_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void ath9k_cmn_key_delete(struct ath_common *common,
+ struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 081e0085ed4c..64e30cd45d05 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -157,10 +157,10 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
REG_READ_D(ah, AR_OBS_BUS_1));
len += snprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
+ "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@@ -180,8 +180,15 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
{
if (status)
sc->debug.stats.istats.total++;
- if (status & ATH9K_INT_RX)
- sc->debug.stats.istats.rxok++;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (status & ATH9K_INT_RXLP)
+ sc->debug.stats.istats.rxlp++;
+ if (status & ATH9K_INT_RXHP)
+ sc->debug.stats.istats.rxhp++;
+ } else {
+ if (status & ATH9K_INT_RX)
+ sc->debug.stats.istats.rxok++;
+ }
if (status & ATH9K_INT_RXEOL)
sc->debug.stats.istats.rxeol++;
if (status & ATH9K_INT_RXORN)
@@ -223,8 +230,15 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
+ } else {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+ }
len += snprintf(buf + len, sizeof(buf) - len,
"%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol);
len += snprintf(buf + len, sizeof(buf) - len,
@@ -557,10 +571,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf)
+ struct ath_buf *bf, struct ath_tx_status *ts)
{
- struct ath_desc *ds = bf->bf_desc;
-
if (bf_isampdu(bf)) {
if (bf_isxretried(bf))
TX_STAT_INC(txq->axq_qnum, a_xretries);
@@ -570,17 +582,17 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
TX_STAT_INC(txq->axq_qnum, completed);
}
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)
+ if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(txq->axq_qnum, fifo_underrun);
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_XTXOP)
+ if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(txq->axq_qnum, xtxop);
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_TIMER_EXPIRED)
+ if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(txq->axq_qnum, timer_exp);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DESC_CFG_ERR)
+ if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DATA_UNDERRUN)
+ if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, data_underrun);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DELIM_UNDERRUN)
+ if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, delim_underrun);
}
@@ -663,30 +675,29 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
#undef PHY_ERR
}
-void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
- struct ath_desc *ds = bf->bf_desc;
u32 phyerr;
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ if (rs->rs_status & ATH9K_RXERR_CRC)
RX_STAT_INC(crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ if (rs->rs_status & ATH9K_RXERR_DECRYPT)
RX_STAT_INC(decrypt_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ if (rs->rs_status & ATH9K_RXERR_MIC)
RX_STAT_INC(mic_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
RX_STAT_INC(pre_delim_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
RX_STAT_INC(post_delim_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
RX_STAT_INC(decrypt_busy_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ if (rs->rs_status & ATH9K_RXERR_PHY) {
RX_STAT_INC(phy_err);
- phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ phyerr = rs->rs_phyerr & 0x24;
RX_PHY_ERR_INC(phyerr);
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 86780e68b31e..c545960e7ec5 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -35,6 +35,8 @@ struct ath_buf;
* struct ath_interrupt_stats - Contains statistics about interrupts
* @total: Total no. of interrupts generated so far
* @rxok: RX with no errors
+ * @rxlp: RX with low priority RX
+ * @rxhp: RX with high priority, uapsd only
* @rxeol: RX with no more RXDESC available
* @rxorn: RX FIFO overrun
* @txok: TX completed at the requested rate
@@ -55,6 +57,8 @@ struct ath_buf;
struct ath_interrupt_stats {
u32 total;
u32 rxok;
+ u32 rxlp;
+ u32 rxhp;
u32 rxeol;
u32 rxorn;
u32 txok;
@@ -167,8 +171,8 @@ void ath9k_debug_remove_root(void);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf);
-void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
+ struct ath_buf *bf, struct ath_tx_status *ts);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -204,12 +208,13 @@ static inline void ath_debug_stat_rc(struct ath_softc *sc,
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_txq *txq,
- struct ath_buf *bf)
+ struct ath_buf *bf,
+ struct ath_tx_status *ts)
{
}
static inline void ath_debug_stat_rx(struct ath_softc *sc,
- struct ath_buf *bf)
+ struct ath_rx_status *rs)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index dacaae934148..bd9dff3293dc 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -256,14 +256,13 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
int status;
- if (AR_SREV_9287(ah)) {
- ah->eep_map = EEP_MAP_AR9287;
- ah->eep_ops = &eep_AR9287_ops;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->eep_ops = &eep_ar9300_ops;
+ else if (AR_SREV_9287(ah)) {
+ ah->eep_ops = &eep_ar9287_ops;
} else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
- ah->eep_map = EEP_MAP_4KBITS;
ah->eep_ops = &eep_4k_ops;
} else {
- ah->eep_map = EEP_MAP_DEFAULT;
ah->eep_ops = &eep_def_ops;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 2f2993b50e2f..21354c15a9a9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -19,6 +19,7 @@
#include "../ath.h"
#include <net/cfg80211.h>
+#include "ar9003_eeprom.h"
#define AH_USE_EEPROM 0x1
@@ -93,7 +94,6 @@
*/
#define AR9285_RDEXT_DEFAULT 0x1F
-#define AR_EEPROM_MAC(i) (0x1d+(i))
#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
@@ -155,6 +155,7 @@
#define AR5416_BCHAN_UNUSED 0xFF
#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
#define AR5416_MAX_CHAINS 3
+#define AR9300_MAX_CHAINS 3
#define AR5416_PWR_TABLE_OFFSET_DB -5
/* Rx gain type values */
@@ -249,16 +250,20 @@ enum eeprom_param {
EEP_MINOR_REV,
EEP_TX_MASK,
EEP_RX_MASK,
+ EEP_FSTCLK_5G,
EEP_RXGAIN_TYPE,
- EEP_TXGAIN_TYPE,
EEP_OL_PWRCTRL,
+ EEP_TXGAIN_TYPE,
EEP_RC_CHAIN_MASK,
EEP_DAC_HPWR_5G,
EEP_FRAC_N_5G,
EEP_DEV_TYPE,
EEP_TEMPSENSE_SLOPE,
EEP_TEMPSENSE_SLOPE_PAL_ON,
- EEP_PWR_TABLE_OFFSET
+ EEP_PWR_TABLE_OFFSET,
+ EEP_DRIVE_STRENGTH,
+ EEP_INTERNAL_REGULATOR,
+ EEP_SWREG
};
enum ar5416_rates {
@@ -295,7 +300,8 @@ struct base_eep_header {
u32 binBuildNumber;
u8 deviceType;
u8 pwdclkind;
- u8 futureBase_1[2];
+ u8 fastClk5g;
+ u8 divChain;
u8 rxGainType;
u8 dacHiPwrMode_5G;
u8 openLoopPwrCntl;
@@ -656,13 +662,6 @@ struct ath9k_country_entry {
u8 iso[3];
};
-enum ath9k_eep_map {
- EEP_MAP_DEFAULT = 0x0,
- EEP_MAP_4KBITS,
- EEP_MAP_AR9287,
- EEP_MAP_MAX
-};
-
struct eeprom_ops {
int (*check_eeprom)(struct ath_hw *hw);
u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
@@ -713,6 +712,8 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah);
extern const struct eeprom_ops eep_def_ops;
extern const struct eeprom_ops eep_4k_ops;
-extern const struct eeprom_ops eep_AR9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9300_ops;
#endif /* EEPROM_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 68db16690abf..41a77d1bd439 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
{
@@ -43,7 +44,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region \n");
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -182,11 +183,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -453,6 +454,8 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
&tMinCalPower, gainBoundaries,
pdadcValues, numXpdGain);
+ ENABLE_REGWRITE_BUFFER(ah);
+
if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
SM(pdGainOverlap_t2,
@@ -493,6 +496,9 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
regOffset += 4;
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
}
@@ -758,6 +764,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
}
+ ENABLE_REGWRITE_BUFFER(ah);
+
/* OFDM power per rate */
REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
ATH9K_POW_SM(ratesArray[rate18mb], 24)
@@ -820,6 +828,9 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
| ATH9K_POW_SM(ratesArray[rateDupCck], 0));
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 839d05a1df29..b471db5fb82d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
{
@@ -44,7 +45,7 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
if (!ath9k_hw_nvram_read(common,
addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region \n");
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -172,11 +173,11 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -1169,7 +1170,7 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
#undef EEP_MAP9287_SPURCHAN
}
-const struct eeprom_ops eep_AR9287_ops = {
+const struct eeprom_ops eep_ar9287_ops = {
.check_eeprom = ath9k_hw_AR9287_check_eeprom,
.get_eeprom = ath9k_hw_AR9287_get_eeprom,
.fill_eeprom = ath9k_hw_AR9287_fill_eeprom,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 404a0341242c..e591ad6016e5 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static void ath9k_get_txgain_index(struct ath_hw *ah,
struct ath9k_channel *chan,
@@ -222,6 +223,12 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
return -EINVAL;
}
+ /* Enable fixup for AR_AN_TOP2 if necessary */
+ if (AR_SREV_9280_10_OR_LATER(ah) &&
+ (eep->baseEepHeader.version & 0xff) > 0x0a &&
+ eep->baseEepHeader.pwdclkind == 0)
+ ah->need_an_top2_fixup = 1;
+
return 0;
}
@@ -237,11 +244,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pModal[0].noiseFloorThreshCh[0];
case EEP_NFTHRESH_2:
return pModal[1].noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -267,6 +274,8 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pBase->txMask;
case EEP_RX_MASK:
return pBase->rxMask;
+ case EEP_FSTCLK_5G:
+ return pBase->fastClk5g;
case EEP_RXGAIN_TYPE:
return pBase->rxGainType;
case EEP_TXGAIN_TYPE:
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index deab8beb0680..0ee75e79fe35 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -283,22 +283,17 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
u32 timer_next,
u32 timer_period)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
ath9k_hw_gen_timer_stop(ah, timer);
@@ -306,8 +301,8 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
/* if no timer is enabled, turn off interrupt mask */
if (timer_table->timer_mask.val == 0) {
ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
@@ -364,7 +359,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
+ "no stomp timer running\n");
spin_lock_bh(&btcoex->btcoex_lock);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
new file mode 100644
index 000000000000..74872ca76f9a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -0,0 +1,1008 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define ATH9K_FW_USB_DEV(devid, fw) \
+ { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw }
+
+static struct usb_device_id ath9k_hif_usb_ids[] = {
+ ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
+ ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
+
+static int __hif_usb_tx(struct hif_device_usb *hif_dev);
+
+static void hif_usb_regout_cb(struct urb *urb)
+{
+ struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ break;
+ }
+
+ if (cmd) {
+ ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
+ cmd->skb, 1);
+ kfree(cmd);
+ }
+
+ return;
+free:
+ kfree_skb(cmd->skb);
+ kfree(cmd);
+}
+
+static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct urb *urb;
+ struct cmd_buf *cmd;
+ int ret = 0;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL)
+ return -ENOMEM;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ cmd->skb = skb;
+ cmd->hif_dev = hif_dev;
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
+ skb->data, skb->len,
+ hif_usb_regout_cb, cmd, 1);
+
+ usb_anchor_urb(urb, &hif_dev->regout_submitted);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree(cmd);
+ }
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ dev_kfree_skb_any(skb);
+ TX_STAT_INC(skb_dropped);
+ }
+}
+
+static void hif_usb_tx_cb(struct urb *urb)
+{
+ struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
+ struct hif_device_usb *hif_dev = tx_buf->hif_dev;
+ struct sk_buff *skb;
+
+ if (!hif_dev || !tx_buf)
+ return;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ /*
+ * The URB has been killed, free the SKBs
+ * and return.
+ */
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ return;
+ default:
+ break;
+ }
+
+ /* Check if TX has been stopped */
+ spin_lock(&hif_dev->tx.tx_lock);
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock(&hif_dev->tx.tx_lock);
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ goto add_free;
+ }
+ spin_unlock(&hif_dev->tx.tx_lock);
+
+ /* Complete the queued SKBs. */
+ while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
+ ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
+ skb, 1);
+ TX_STAT_INC(skb_completed);
+ }
+
+add_free:
+ /* Re-initialize the SKB queue */
+ tx_buf->len = tx_buf->offset = 0;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ /* Add this TX buffer to the free list */
+ spin_lock(&hif_dev->tx.tx_lock);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
+ __hif_usb_tx(hif_dev); /* Check for pending SKBs */
+ TX_STAT_INC(buf_completed);
+ spin_unlock(&hif_dev->tx.tx_lock);
+}
+
+/* TX lock has to be taken */
+static int __hif_usb_tx(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL;
+ struct sk_buff *nskb = NULL;
+ int ret = 0, i;
+ u16 *hdr, tx_skb_cnt = 0;
+ u8 *buf;
+
+ if (hif_dev->tx.tx_skb_cnt == 0)
+ return 0;
+
+ /* Check if a free TX buffer is available */
+ if (list_empty(&hif_dev->tx.tx_buf))
+ return 0;
+
+ tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
+ hif_dev->tx.tx_buf_cnt--;
+
+ tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
+
+ for (i = 0; i < tx_skb_cnt; i++) {
+ nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
+
+ /* Should never be NULL */
+ BUG_ON(!nskb);
+
+ hif_dev->tx.tx_skb_cnt--;
+
+ buf = tx_buf->buf;
+ buf += tx_buf->offset;
+ hdr = (u16 *)buf;
+ *hdr++ = nskb->len;
+ *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ buf += 4;
+ memcpy(buf, nskb->data, nskb->len);
+ tx_buf->len = nskb->len + 4;
+
+ if (i < (tx_skb_cnt - 1))
+ tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
+
+ if (i == (tx_skb_cnt - 1))
+ tx_buf->len += tx_buf->offset;
+
+ __skb_queue_tail(&tx_buf->skb_queue, nskb);
+ TX_STAT_INC(skb_queued);
+ }
+
+ usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
+ usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
+ tx_buf->buf, tx_buf->len,
+ hif_usb_tx_cb, tx_buf);
+
+ ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
+ if (ret) {
+ tx_buf->len = tx_buf->offset = 0;
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ __skb_queue_head_init(&tx_buf->skb_queue);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ }
+
+ if (!ret)
+ TX_STAT_INC(buf_queued);
+
+ return ret;
+}
+
+static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENODEV;
+ }
+
+ /* Check if the max queue count has been reached */
+ if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENOMEM;
+ }
+
+ __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
+ hif_dev->tx.tx_skb_cnt++;
+
+ /* Send normal frames immediately */
+ if (!tx_ctl || (tx_ctl && (tx_ctl->type == ATH9K_HTC_NORMAL)))
+ __hif_usb_tx(hif_dev);
+
+ /* Check if AMPDUs have to be sent immediately */
+ if (tx_ctl && (tx_ctl->type == ATH9K_HTC_AMPDU) &&
+ (hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
+ (hif_dev->tx.tx_skb_cnt < 2)) {
+ __hif_usb_tx(hif_dev);
+ }
+
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ return 0;
+}
+
+static void hif_usb_start(void *hif_handle, u8 pipe_id)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ hif_dev->flags |= HIF_USB_START;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static void hif_usb_stop(void *hif_handle, u8 pipe_id)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ ath9k_skb_queue_purge(hif_dev, &hif_dev->tx.tx_skb_queue);
+ hif_dev->tx.tx_skb_cnt = 0;
+ hif_dev->tx.flags |= HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ int ret = 0;
+
+ switch (pipe_id) {
+ case USB_WLAN_TX_PIPE:
+ ret = hif_usb_send_tx(hif_dev, skb, tx_ctl);
+ break;
+ case USB_REG_OUT_PIPE:
+ ret = hif_usb_send_regout(hif_dev, skb);
+ break;
+ default:
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct ath9k_htc_hif hif_usb = {
+ .transport = ATH9K_HIF_USB,
+ .name = "ath9k_hif_usb",
+
+ .control_ul_pipe = USB_REG_OUT_PIPE,
+ .control_dl_pipe = USB_REG_IN_PIPE,
+
+ .start = hif_usb_start,
+ .stop = hif_usb_stop,
+ .send = hif_usb_send,
+};
+
+static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
+ int index = 0, i = 0, chk_idx, len = skb->len;
+ int rx_remain_len = 0, rx_pkt_len = 0;
+ u16 pkt_len, pkt_tag, pool_index = 0;
+ u8 *ptr;
+
+ spin_lock(&hif_dev->rx_lock);
+
+ rx_remain_len = hif_dev->rx_remain_len;
+ rx_pkt_len = hif_dev->rx_transfer_len;
+
+ if (rx_remain_len != 0) {
+ struct sk_buff *remain_skb = hif_dev->remain_skb;
+
+ if (remain_skb) {
+ ptr = (u8 *) remain_skb->data;
+
+ index = rx_remain_len;
+ rx_remain_len -= hif_dev->rx_pad_len;
+ ptr += rx_pkt_len;
+
+ memcpy(ptr, skb->data, rx_remain_len);
+
+ rx_pkt_len += rx_remain_len;
+ hif_dev->rx_remain_len = 0;
+ skb_put(remain_skb, rx_pkt_len);
+
+ skb_pool[pool_index++] = remain_skb;
+
+ } else {
+ index = rx_remain_len;
+ }
+ }
+
+ spin_unlock(&hif_dev->rx_lock);
+
+ while (index < len) {
+ ptr = (u8 *) skb->data;
+
+ pkt_len = ptr[index] + (ptr[index+1] << 8);
+ pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
+
+ if (pkt_tag == ATH_USB_RX_STREAM_MODE_TAG) {
+ u16 pad_len;
+
+ pad_len = 4 - (pkt_len & 0x3);
+ if (pad_len == 4)
+ pad_len = 0;
+
+ chk_idx = index;
+ index = index + 4 + pkt_len + pad_len;
+
+ if (index > MAX_RX_BUF_SIZE) {
+ spin_lock(&hif_dev->rx_lock);
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
+ nskb = __dev_alloc_skb(pkt_len + 32,
+ GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation"
+ " error\n");
+ spin_unlock(&hif_dev->rx_lock);
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]),
+ hif_dev->rx_transfer_len);
+
+ /* Record the buffer pointer */
+ hif_dev->remain_skb = nskb;
+ spin_unlock(&hif_dev->rx_lock);
+ } else {
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation"
+ " error\n");
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
+ skb_put(nskb, pkt_len);
+ skb_pool[pool_index++] = nskb;
+ }
+ } else {
+ RX_STAT_INC(skb_dropped);
+ return;
+ }
+ }
+
+err:
+ for (i = 0; i < pool_index; i++) {
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
+ skb_pool[i]->len, USB_WLAN_RX_PIPE);
+ RX_STAT_INC(skb_completed);
+ }
+}
+
+static void ath9k_hif_usb_rx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+ ath9k_hif_usb_rx_stream(hif_dev, skb);
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto free;
+ }
+
+ return;
+free:
+ kfree_skb(skb);
+}
+
+static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct sk_buff *nskb;
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+
+ /* Process the command first */
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
+ skb->len, USB_REG_IN_PIPE);
+
+
+ nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: REG_IN memory allocation failure\n");
+ urb->context = NULL;
+ return;
+ }
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
+ nskb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, nskb, 1);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ kfree_skb(nskb);
+ urb->context = NULL;
+ }
+
+ return;
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ goto free;
+
+ return;
+free:
+ kfree_skb(skb);
+ urb->context = NULL;
+}
+
+static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_buf, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+}
+
+static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf;
+ int i;
+
+ INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
+ INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
+ spin_lock_init(&hif_dev->tx.tx_lock);
+ __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
+
+ for (i = 0; i < MAX_TX_URB_NUM; i++) {
+ tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
+ if (!tx_buf)
+ goto err;
+
+ tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
+ if (!tx_buf->buf)
+ goto err;
+
+ tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tx_buf->urb)
+ goto err;
+
+ tx_buf->hif_dev = hif_dev;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ }
+
+ hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
+
+ return 0;
+err:
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->rx_submitted);
+}
+
+static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct urb *urb = NULL;
+ struct sk_buff *skb = NULL;
+ int i, ret;
+
+ init_usb_anchor(&hif_dev->rx_submitted);
+ spin_lock_init(&hif_dev->rx_lock);
+
+ for (i = 0; i < MAX_RX_URB_NUM; i++) {
+
+ /* Allocate URB */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+ ret = -ENOMEM;
+ goto err_urb;
+ }
+
+ /* Allocate buffer */
+ skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err_skb;
+ }
+
+ usb_fill_bulk_urb(urb, hif_dev->udev,
+ usb_rcvbulkpipe(hif_dev->udev,
+ USB_WLAN_RX_PIPE),
+ skb->data, MAX_RX_BUF_SIZE,
+ ath9k_hif_usb_rx_cb, skb);
+
+ /* Anchor URB */
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+
+ /* Submit URB */
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto err_submit;
+ }
+
+ /*
+ * Drop reference count.
+ * This ensures that the URB is freed when killing them.
+ */
+ usb_free_urb(urb);
+ }
+
+ return 0;
+
+err_submit:
+ kfree_skb(skb);
+err_skb:
+ usb_free_urb(urb);
+err_urb:
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+ return ret;
+}
+
+static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev)
+{
+ if (hif_dev->reg_in_urb) {
+ usb_kill_urb(hif_dev->reg_in_urb);
+ if (hif_dev->reg_in_urb->context)
+ kfree_skb((void *)hif_dev->reg_in_urb->context);
+ usb_free_urb(hif_dev->reg_in_urb);
+ hif_dev->reg_in_urb = NULL;
+ }
+}
+
+static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
+{
+ struct sk_buff *skb;
+
+ hif_dev->reg_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (hif_dev->reg_in_urb == NULL)
+ return -ENOMEM;
+
+ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
+ if (!skb)
+ goto err;
+
+ usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
+ skb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, skb, 1);
+
+ if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
+ goto err;
+
+ return 0;
+
+err:
+ ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+ return -ENOMEM;
+}
+
+static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
+{
+ /* Register Write */
+ init_usb_anchor(&hif_dev->regout_submitted);
+
+ /* TX */
+ if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* RX */
+ if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* Register Read */
+ if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
+ goto err;
+
+ return 0;
+err:
+ return -ENOMEM;
+}
+
+static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
+{
+ int transfer, err;
+ const void *data = hif_dev->firmware->data;
+ size_t len = hif_dev->firmware->size;
+ u32 addr = AR9271_FIRMWARE;
+ u8 *buf = kzalloc(4096, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ while (len) {
+ transfer = min_t(int, len, 4096);
+ memcpy(buf, data, transfer);
+
+ err = usb_control_msg(hif_dev->udev,
+ usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
+ addr >> 8, 0, buf, transfer, HZ);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ }
+
+ len -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ kfree(buf);
+
+ /*
+ * Issue FW download complete command to firmware.
+ */
+ err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD_COMP,
+ 0x40 | USB_DIR_OUT,
+ AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ);
+ if (err)
+ return -EIO;
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
+ "ar9271.fw", (unsigned long) hif_dev->firmware->size);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
+ const char *fw_name)
+{
+ int ret;
+
+ /* Request firmware */
+ ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s not found\n", fw_name);
+ goto err_fw_req;
+ }
+
+ /* Download firmware */
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s download failed\n", fw_name);
+ goto err_fw_download;
+ }
+
+ /* Alloc URBs */
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Unable to allocate URBs\n");
+ goto err_urb;
+ }
+
+ return 0;
+
+err_urb:
+ /* Nothing */
+err_fw_download:
+ release_firmware(hif_dev->firmware);
+err_fw_req:
+ hif_dev->firmware = NULL;
+ return ret;
+}
+
+static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->regout_submitted);
+ ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+}
+
+static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
+{
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ if (hif_dev->firmware)
+ release_firmware(hif_dev->firmware);
+}
+
+static int ath9k_hif_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev;
+ const char *fw_name = (const char *) id->driver_info;
+ int ret = 0;
+
+ hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
+ if (!hif_dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ usb_get_dev(udev);
+ hif_dev->udev = udev;
+ hif_dev->interface = interface;
+ hif_dev->device_id = id->idProduct;
+#ifdef CONFIG_PM
+ udev->reset_resume = 1;
+#endif
+ usb_set_intfdata(interface, hif_dev);
+
+ ret = ath9k_hif_usb_dev_init(hif_dev, fw_name);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_hif_init_usb;
+ }
+
+ hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev);
+ if (hif_dev->htc_handle == NULL) {
+ ret = -ENOMEM;
+ goto err_htc_hw_alloc;
+ }
+
+ ret = ath9k_htc_hw_init(&hif_usb, hif_dev->htc_handle, hif_dev,
+ &hif_dev->udev->dev, hif_dev->device_id,
+ ATH9K_HIF_USB);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_htc_hw_init;
+ }
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: USB layer initialized\n");
+
+ return 0;
+
+err_htc_hw_init:
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+err_htc_hw_alloc:
+ ath9k_hif_usb_dev_deinit(hif_dev);
+err_hif_init_usb:
+ usb_set_intfdata(interface, NULL);
+ kfree(hif_dev);
+ usb_put_dev(udev);
+err_alloc:
+ return ret;
+}
+
+static void ath9k_hif_usb_reboot(struct usb_device *udev)
+{
+ u32 reboot_cmd = 0xffffffff;
+ void *buf;
+ int ret;
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ memcpy(buf, &reboot_cmd, 4);
+
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
+ buf, 4, NULL, HZ);
+ if (ret)
+ dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
+
+ kfree(buf);
+}
+
+static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+
+ if (hif_dev) {
+ ath9k_htc_hw_deinit(hif_dev->htc_handle,
+ (udev->state == USB_STATE_NOTATTACHED) ? true : false);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
+ usb_set_intfdata(interface, NULL);
+ }
+
+ if (hif_dev->flags & HIF_USB_START)
+ ath9k_hif_usb_reboot(udev);
+
+ kfree(hif_dev);
+ dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
+ usb_put_dev(udev);
+}
+
+#ifdef CONFIG_PM
+static int ath9k_hif_usb_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_resume(struct usb_interface *interface)
+{
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+ int ret;
+
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret)
+ return ret;
+
+ if (hif_dev->firmware) {
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret)
+ goto fail_resume;
+ } else {
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ return -EIO;
+ }
+
+ mdelay(100);
+
+ ret = ath9k_htc_resume(hif_dev->htc_handle);
+
+ if (ret)
+ goto fail_resume;
+
+ return 0;
+
+fail_resume:
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return ret;
+}
+#endif
+
+static struct usb_driver ath9k_hif_usb_driver = {
+ .name = "ath9k_hif_usb",
+ .probe = ath9k_hif_usb_probe,
+ .disconnect = ath9k_hif_usb_disconnect,
+#ifdef CONFIG_PM
+ .suspend = ath9k_hif_usb_suspend,
+ .resume = ath9k_hif_usb_resume,
+ .reset_resume = ath9k_hif_usb_resume,
+#endif
+ .id_table = ath9k_hif_usb_ids,
+ .soft_unbind = 1,
+};
+
+int ath9k_hif_usb_init(void)
+{
+ return usb_register(&ath9k_hif_usb_driver);
+}
+
+void ath9k_hif_usb_exit(void)
+{
+ usb_deregister(&ath9k_hif_usb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
new file mode 100644
index 000000000000..0aca49b6fcb6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_USB_H
+#define HTC_USB_H
+
+#define AR9271_FIRMWARE 0x501000
+#define AR9271_FIRMWARE_TEXT 0x903000
+
+#define FIRMWARE_DOWNLOAD 0x30
+#define FIRMWARE_DOWNLOAD_COMP 0x31
+
+#define ATH_USB_RX_STREAM_MODE_TAG 0x4e00
+#define ATH_USB_TX_STREAM_MODE_TAG 0x697e
+
+/* FIXME: Verify these numbers (with Windows) */
+#define MAX_TX_URB_NUM 8
+#define MAX_TX_BUF_NUM 1024
+#define MAX_TX_BUF_SIZE 32768
+#define MAX_TX_AGGR_NUM 20
+
+#define MAX_RX_URB_NUM 8
+#define MAX_RX_BUF_SIZE 16384
+#define MAX_PKT_NUM_IN_TRANSFER 10
+
+#define MAX_REG_OUT_URB_NUM 1
+#define MAX_REG_OUT_BUF_NUM 8
+
+#define MAX_REG_IN_BUF_SIZE 64
+
+/* USB Endpoint definition */
+#define USB_WLAN_TX_PIPE 1
+#define USB_WLAN_RX_PIPE 2
+#define USB_REG_IN_PIPE 3
+#define USB_REG_OUT_PIPE 4
+
+#define HIF_USB_MAX_RXPIPES 2
+#define HIF_USB_MAX_TXPIPES 4
+
+struct tx_buf {
+ u8 *buf;
+ u16 len;
+ u16 offset;
+ struct urb *urb;
+ struct sk_buff_head skb_queue;
+ struct hif_device_usb *hif_dev;
+ struct list_head list;
+};
+
+#define HIF_USB_TX_STOP BIT(0)
+
+struct hif_usb_tx {
+ u8 flags;
+ u8 tx_buf_cnt;
+ u16 tx_skb_cnt;
+ struct sk_buff_head tx_skb_queue;
+ struct list_head tx_buf;
+ struct list_head tx_pending;
+ spinlock_t tx_lock;
+};
+
+struct cmd_buf {
+ struct sk_buff *skb;
+ struct hif_device_usb *hif_dev;
+};
+
+#define HIF_USB_START BIT(0)
+
+struct hif_device_usb {
+ u16 device_id;
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ const struct firmware *firmware;
+ struct htc_target *htc_handle;
+ struct hif_usb_tx tx;
+ struct urb *reg_in_urb;
+ struct usb_anchor regout_submitted;
+ struct usb_anchor rx_submitted;
+ struct sk_buff *remain_skb;
+ int rx_remain_len;
+ int rx_pkt_len;
+ int rx_transfer_len;
+ int rx_pad_len;
+ spinlock_t rx_lock;
+ u8 flags; /* HIF_USB_* */
+};
+
+int ath9k_hif_usb_init(void);
+void ath9k_hif_usb_exit(void);
+
+#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
new file mode 100644
index 000000000000..c251603ab032
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_H
+#define HTC_H
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "htc_hst.h"
+#include "hif_usb.h"
+#include "wmi.h"
+
+#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
+#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
+#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
+#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+
+#define ATH_DEFAULT_BMISS_LIMIT 10
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+#define TSF_TO_TU(_h, _l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
+extern struct ieee80211_ops ath9k_htc_ops;
+extern int htc_modparam_nohwcrypt;
+
+enum htc_phymode {
+ HTC_MODE_AUTO = 0,
+ HTC_MODE_11A = 1,
+ HTC_MODE_11B = 2,
+ HTC_MODE_11G = 3,
+ HTC_MODE_FH = 4,
+ HTC_MODE_TURBO_A = 5,
+ HTC_MODE_TURBO_G = 6,
+ HTC_MODE_11NA = 7,
+ HTC_MODE_11NG = 8
+};
+
+enum htc_opmode {
+ HTC_M_STA = 1,
+ HTC_M_IBSS = 0,
+ HTC_M_AHDEMO = 3,
+ HTC_M_HOSTAP = 6,
+ HTC_M_MONITOR = 8,
+ HTC_M_WDS = 2
+};
+
+#define ATH9K_HTC_HDRSPACE sizeof(struct htc_frame_hdr)
+#define ATH9K_HTC_AMPDU 1
+#define ATH9K_HTC_NORMAL 2
+
+#define ATH9K_HTC_TX_CTSONLY 0x1
+#define ATH9K_HTC_TX_RTSCTS 0x2
+#define ATH9K_HTC_TX_USE_MIN_RATE 0x100
+
+struct tx_frame_hdr {
+ u8 data_type;
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u32 flags; /* ATH9K_HTC_TX_* */
+ u8 key_type;
+ u8 keyix;
+ u8 reserved[26];
+} __packed;
+
+struct tx_mgmt_hdr {
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u8 flags;
+ u8 key_type;
+ u8 keyix;
+ u16 reserved;
+} __packed;
+
+struct tx_beacon_header {
+ u8 len_changed;
+ u8 vif_index;
+ u16 rev;
+} __packed;
+
+struct ath9k_htc_target_hw {
+ u32 flags;
+ u32 flags_ext;
+ u32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 tx_chainmask;
+ u8 tx_chainmask_legacy;
+ u8 rtscts_ratecode;
+ u8 protmode;
+} __packed;
+
+struct ath9k_htc_cap_target {
+ u32 flags;
+ u32 flags_ext;
+ u32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 tx_chainmask;
+ u8 tx_chainmask_legacy;
+ u8 rtscts_ratecode;
+ u8 protmode;
+} __packed;
+
+struct ath9k_htc_target_vif {
+ u8 index;
+ u8 des_bssid[ETH_ALEN];
+ __be32 opmode;
+ u8 myaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u32 flags;
+ u32 flags_ext;
+ u16 ps_sta;
+ __be16 rtsthreshold;
+ u8 ath_cap;
+ u8 node;
+ s8 mcast_rate;
+} __packed;
+
+#define ATH_HTC_STA_AUTH 0x0001
+#define ATH_HTC_STA_QOS 0x0002
+#define ATH_HTC_STA_ERP 0x0004
+#define ATH_HTC_STA_HT 0x0008
+
+/* FIXME: UAPSD variables */
+struct ath9k_htc_target_sta {
+ u16 associd;
+ u16 txpower;
+ u32 ucastkey;
+ u8 macaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 sta_index;
+ u8 vif_index;
+ u8 vif_sta;
+ __be16 flags; /* ATH_HTC_STA_* */
+ u16 htcap;
+ u8 valid;
+ u16 capinfo;
+ struct ath9k_htc_target_hw *hw;
+ struct ath9k_htc_target_vif *vif;
+ u16 txseqmgmt;
+ u8 is_vif_sta;
+ u16 maxampdu;
+ u16 iv16;
+ u32 iv32;
+} __packed;
+
+struct ath9k_htc_target_aggr {
+ u8 sta_index;
+ u8 tidno;
+ u8 aggr_enable;
+ u8 padding;
+} __packed;
+
+#define ATH_HTC_RATE_MAX 30
+
+#define WLAN_RC_DS_FLAG 0x01
+#define WLAN_RC_40_FLAG 0x02
+#define WLAN_RC_SGI_FLAG 0x04
+#define WLAN_RC_HT_FLAG 0x08
+
+struct ath9k_htc_rateset {
+ u8 rs_nrates;
+ u8 rs_rates[ATH_HTC_RATE_MAX];
+};
+
+struct ath9k_htc_rate {
+ struct ath9k_htc_rateset legacy_rates;
+ struct ath9k_htc_rateset ht_rates;
+} __packed;
+
+struct ath9k_htc_target_rate {
+ u8 sta_index;
+ u8 isnew;
+ __be32 capflags;
+ struct ath9k_htc_rate rates;
+};
+
+struct ath9k_htc_target_stats {
+ __be32 tx_shortretry;
+ __be32 tx_longretry;
+ __be32 tx_xretries;
+ __be32 ht_txunaggr_xretry;
+ __be32 ht_tx_xretries;
+} __packed;
+
+struct ath9k_htc_vif {
+ u8 index;
+};
+
+#define ATH9K_HTC_MAX_STA 8
+#define ATH9K_HTC_MAX_TID 8
+
+enum tid_aggr_state {
+ AGGR_STOP = 0,
+ AGGR_PROGRESS,
+ AGGR_START,
+ AGGR_OPERATIONAL
+};
+
+struct ath9k_htc_sta {
+ u8 index;
+ enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+};
+
+struct ath9k_htc_aggr_work {
+ u16 tid;
+ u8 sta_addr[ETH_ALEN];
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ enum ieee80211_ampdu_mlme_action action;
+ struct mutex mutex;
+};
+
+#define ATH9K_HTC_RXBUF 256
+#define HTC_RX_FRAME_HEADER_SIZE 40
+
+struct ath9k_htc_rxbuf {
+ bool in_process;
+ struct sk_buff *skb;
+ struct ath_htc_rx_status rxstatus;
+ struct list_head list;
+};
+
+struct ath9k_htc_rx {
+ int last_rssi; /* FIXME: per-STA */
+ struct list_head rxbuf;
+ spinlock_t rxbuflock;
+};
+
+struct ath9k_htc_tx_ctl {
+ u8 type; /* ATH9K_HTC_* */
+};
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
+#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
+
+struct ath_tx_stats {
+ u32 buf_queued;
+ u32 buf_completed;
+ u32 skb_queued;
+ u32 skb_completed;
+ u32 skb_dropped;
+};
+
+struct ath_rx_stats {
+ u32 skb_allocated;
+ u32 skb_completed;
+ u32 skb_dropped;
+};
+
+struct ath9k_debug {
+ struct dentry *debugfs_phy;
+ struct dentry *debugfs_tgt_stats;
+ struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
+ struct ath_tx_stats tx_stats;
+ struct ath_rx_stats rx_stats;
+ u32 txrate;
+};
+
+#else
+
+#define TX_STAT_INC(c) do { } while (0)
+#define RX_STAT_INC(c) do { } while (0)
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#define ATH_LED_PIN_DEF 1
+#define ATH_LED_PIN_9287 8
+#define ATH_LED_PIN_9271 15
+#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
+#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
+
+enum ath_led_type {
+ ATH_LED_RADIO,
+ ATH_LED_ASSOC,
+ ATH_LED_TX,
+ ATH_LED_RX
+};
+
+struct ath_led {
+ struct ath9k_htc_priv *priv;
+ struct led_classdev led_cdev;
+ enum ath_led_type led_type;
+ struct delayed_work brightness_work;
+ char name[32];
+ bool registered;
+ int brightness;
+};
+
+struct htc_beacon_config {
+ u16 beacon_interval;
+ u16 listen_interval;
+ u16 dtim_period;
+ u16 bmiss_timeout;
+ u8 dtim_count;
+};
+
+#define OP_INVALID BIT(0)
+#define OP_SCANNING BIT(1)
+#define OP_FULL_RESET BIT(2)
+#define OP_LED_ASSOCIATED BIT(3)
+#define OP_LED_ON BIT(4)
+#define OP_PREAMBLE_SHORT BIT(5)
+#define OP_PROTECT_ENABLE BIT(6)
+#define OP_TXAGGR BIT(7)
+#define OP_ASSOCIATED BIT(8)
+#define OP_ENABLE_BEACON BIT(9)
+#define OP_LED_DEINIT BIT(10)
+#define OP_UNPLUGGED BIT(11)
+
+struct ath9k_htc_priv {
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ath_hw *ah;
+ struct htc_target *htc;
+ struct wmi *wmi;
+
+ enum htc_endpoint_id wmi_cmd_ep;
+ enum htc_endpoint_id beacon_ep;
+ enum htc_endpoint_id cab_ep;
+ enum htc_endpoint_id uapsd_ep;
+ enum htc_endpoint_id mgmt_ep;
+ enum htc_endpoint_id data_be_ep;
+ enum htc_endpoint_id data_bk_ep;
+ enum htc_endpoint_id data_vi_ep;
+ enum htc_endpoint_id data_vo_ep;
+
+ u16 op_flags;
+ u16 curtxpow;
+ u16 txpowlimit;
+ u16 nvifs;
+ u16 nstations;
+ u16 seq_no;
+ u32 bmiss_cnt;
+
+ spinlock_t beacon_lock;
+
+ bool tx_queues_stop;
+ spinlock_t tx_lock;
+
+ struct ieee80211_vif *vif;
+ struct htc_beacon_config cur_beacon_conf;
+ unsigned int rxfilter;
+ struct tasklet_struct wmi_tasklet;
+ struct tasklet_struct rx_tasklet;
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ath9k_htc_rx rx;
+ struct tasklet_struct tx_tasklet;
+ struct sk_buff_head tx_queue;
+ struct ath9k_htc_aggr_work aggr_work;
+ struct delayed_work ath9k_aggr_work;
+ struct delayed_work ath9k_ani_work;
+ struct work_struct ps_work;
+
+ struct mutex htc_pm_lock;
+ unsigned long ps_usecount;
+ bool ps_enabled;
+ bool ps_idle;
+
+ struct ath_led radio_led;
+ struct ath_led assoc_led;
+ struct ath_led tx_led;
+ struct ath_led rx_led;
+ struct delayed_work ath9k_led_blink_work;
+ int led_on_duration;
+ int led_off_duration;
+ int led_on_cnt;
+ int led_off_cnt;
+ int hwq_map[ATH9K_WME_AC_VO+1];
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ struct ath9k_debug debug;
+#endif
+ struct ath9k_htc_target_rate tgt_rate;
+
+ struct mutex mutex;
+};
+
+static inline void ath_read_cachesize(struct ath_common *common, int *csz)
+{
+ common->bus_ops->read_cachesize(common, csz);
+}
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif);
+void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
+
+void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id);
+void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
+ bool txok);
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok);
+
+void ath9k_htc_station_work(struct work_struct *work);
+void ath9k_htc_aggr_work(struct work_struct *work);
+void ath9k_ani_work(struct work_struct *work);;
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv);
+void ath9k_tx_tasklet(unsigned long data);
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
+ enum ath9k_tx_queue_subtype qtype);
+int get_hw_qnum(u16 queue, int *hwq_map);
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo);
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_tasklet(unsigned long data);
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
+void ath9k_ps_work(struct work_struct *work);
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
+void ath9k_init_leds(struct ath9k_htc_priv *priv);
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid);
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
+#ifdef CONFIG_PM
+int ath9k_htc_resume(struct htc_target *htc_handle);
+#endif
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+int ath9k_htc_debug_create_root(void);
+void ath9k_htc_debug_remove_root(void);
+int ath9k_htc_init_debug(struct ath_hw *ah);
+void ath9k_htc_exit_debug(struct ath_hw *ah);
+#else
+static inline int ath9k_htc_debug_create_root(void) { return 0; };
+static inline void ath9k_htc_debug_remove_root(void) {};
+static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
+static inline void ath9k_htc_exit_debug(struct ath_hw *ah) {};
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
new file mode 100644
index 000000000000..c10c7d002eb7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define FUDGE 2
+
+static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_beacon_state bs;
+ enum ath9k_int imask = 0;
+ int dtimperiod, dtimcount, sleepduration;
+ int cfpperiod, cfpcount, bmiss_timeout;
+ u32 nexttbtt = 0, intval, tsftu;
+ __be32 htc_imask = 0;
+ u64 tsf;
+ int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&bs, 0, sizeof(bs));
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
+
+ /*
+ * Setup dtim and cfp parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtimperiod = bss_conf->dtim_period;
+ if (dtimperiod <= 0) /* NB: 0 if not known */
+ dtimperiod = 1;
+ dtimcount = 1;
+ if (dtimcount >= dtimperiod) /* NB: sanity check */
+ dtimcount = 0;
+ cfpperiod = 1; /* NB: no PCF support yet */
+ cfpcount = 0;
+
+ sleepduration = intval;
+ if (sleepduration <= 0)
+ sleepduration = intval;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim+cfp state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
+
+ num_beacons = tsftu / intval + 1;
+ offset = tsftu % intval;
+ nexttbtt = tsftu - offset;
+ if (offset)
+ nexttbtt += intval;
+
+ /* DTIM Beacon every dtimperiod Beacon */
+ dtim_dec_count = num_beacons % dtimperiod;
+ /* CFP every cfpperiod DTIM Beacon */
+ cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
+ if (dtim_dec_count)
+ cfp_dec_count++;
+
+ dtimcount -= dtim_dec_count;
+ if (dtimcount < 0)
+ dtimcount += dtimperiod;
+
+ cfpcount -= cfp_dec_count;
+ if (cfpcount < 0)
+ cfpcount += cfpperiod;
+
+ bs.bs_intval = intval;
+ bs.bs_nexttbtt = nexttbtt;
+ bs.bs_dtimperiod = dtimperiod*intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
+ bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
+ bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
+ bs.bs_cfpmaxduration = 0;
+
+ /*
+ * Calculate the number of consecutive beacons to miss* before taking
+ * a BMISS interrupt. The configuration is specified in TU so we only
+ * need calculate based on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ if (sleepduration > intval) {
+ bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
+ } else {
+ bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
+ if (bs.bs_bmissthreshold > 15)
+ bs.bs_bmissthreshold = 15;
+ else if (bs.bs_bmissthreshold <= 0)
+ bs.bs_bmissthreshold = 1;
+ }
+
+ /*
+ * Calculate sleep duration. The configuration is given in ms.
+ * We ensure a multiple of the beacon period is used. Also, if the sleep
+ * duration is greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ if (bs.bs_sleepduration > bs.bs_dtimperiod)
+ bs.bs_sleepduration = bs.bs_dtimperiod;
+
+ /* TSF out of range threshold fixed at 1 second */
+ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
+
+ ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+ ath_print(common, ATH_DBG_BEACON,
+ "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration,
+ bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+
+ /* Set the computed STA beacon timers */
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
+ imask |= ATH9K_INT_BMISS;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ enum ath9k_int imask = 0;
+ u32 nexttbtt, intval;
+ __be32 htc_imask = 0;
+ int ret;
+ u8 cmd_rsp;
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ nexttbtt = intval;
+ intval |= ATH9K_BEACON_ENA;
+ if (priv->op_flags & OP_ENABLE_BEACON)
+ imask |= ATH9K_INT_SWBA;
+
+ ath_print(common, ATH_DBG_BEACON,
+ "IBSS Beacon config, intval: %d, imask: 0x%x\n",
+ bss_conf->beacon_interval, imask);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
+ priv->bmiss_cnt = 0;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
+{
+ struct ath9k_htc_vif *avp = (void *)priv->vif->drv_priv;
+ struct tx_beacon_header beacon_hdr;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *beacon;
+ u8 *tx_fhdr;
+
+ memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header));
+ memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
+
+ /* FIXME: Handle BMISS */
+ if (beacon_pending != 0) {
+ priv->bmiss_cnt++;
+ return;
+ }
+
+ spin_lock_bh(&priv->beacon_lock);
+
+ if (unlikely(priv->op_flags & OP_SCANNING)) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ /* Get a new beacon */
+ beacon = ieee80211_beacon_get(priv->hw, priv->vif);
+ if (!beacon) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(beacon);
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *) beacon->data;
+ priv->seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
+ }
+
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+ beacon_hdr.vif_index = avp->index;
+ tx_fhdr = skb_push(beacon, sizeof(beacon_hdr));
+ memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr));
+
+ htc_send(priv->htc, beacon, priv->beacon_ep, &tx_ctl);
+
+ spin_unlock_bh(&priv->beacon_lock);
+}
+
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ cur_conf->beacon_interval = bss_conf->beacon_int;
+ if (cur_conf->beacon_interval == 0)
+ cur_conf->beacon_interval = 100;
+
+ cur_conf->dtim_period = bss_conf->dtim_period;
+ cur_conf->listen_interval = 1;
+ cur_conf->dtim_count = 1;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ath9k_htc_beacon_config_sta(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+ break;
+ default:
+ ath_print(common, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
+ return;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
new file mode 100644
index 000000000000..17111fc1d2cc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Atheros driver 802.11n HTC based wireless devices");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int htc_modparam_nohwcrypt;
+module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+static struct ieee80211_channel ath9k_2ghz_channels[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
+{
+ int time_left;
+
+ /* Firmware can take up to 50ms to get ready, to be safe use 1 second */
+ time_left = wait_for_completion_timeout(&priv->htc->target_wait, HZ);
+ if (!time_left) {
+ dev_err(priv->dev, "ath9k_htc: Target is unresponsive\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
+{
+ ath9k_htc_exit_debug(priv->ah);
+ ath9k_hw_deinit(priv->ah);
+ tasklet_kill(&priv->wmi_tasklet);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+ kfree(priv->ah);
+ priv->ah = NULL;
+}
+
+static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
+{
+ struct ieee80211_hw *hw = priv->hw;
+
+ wiphy_rfkill_stop_polling(hw->wiphy);
+ ath9k_deinit_leds(priv);
+ ieee80211_unregister_hw(hw);
+ ath9k_rx_cleanup(priv);
+ ath9k_tx_cleanup(priv);
+ ath9k_deinit_priv(priv);
+}
+
+static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
+ u16 service_id,
+ void (*tx) (void *,
+ struct sk_buff *,
+ enum htc_endpoint_id,
+ bool txok),
+ enum htc_endpoint_id *ep_id)
+{
+ struct htc_service_connreq req;
+
+ memset(&req, 0, sizeof(struct htc_service_connreq));
+
+ req.service_id = service_id;
+ req.ep_callbacks.priv = priv;
+ req.ep_callbacks.rx = ath9k_htc_rxep;
+ req.ep_callbacks.tx = tx;
+
+ return htc_connect_service(priv->htc, &req, ep_id);
+}
+
+static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
+{
+ int ret;
+
+ /* WMI CMD*/
+ ret = ath9k_wmi_connect(priv->htc, priv->wmi, &priv->wmi_cmd_ep);
+ if (ret)
+ goto err;
+
+ /* Beacon */
+ ret = ath9k_htc_connect_svc(priv, WMI_BEACON_SVC, ath9k_htc_beaconep,
+ &priv->beacon_ep);
+ if (ret)
+ goto err;
+
+ /* CAB */
+ ret = ath9k_htc_connect_svc(priv, WMI_CAB_SVC, ath9k_htc_txep,
+ &priv->cab_ep);
+ if (ret)
+ goto err;
+
+
+ /* UAPSD */
+ ret = ath9k_htc_connect_svc(priv, WMI_UAPSD_SVC, ath9k_htc_txep,
+ &priv->uapsd_ep);
+ if (ret)
+ goto err;
+
+ /* MGMT */
+ ret = ath9k_htc_connect_svc(priv, WMI_MGMT_SVC, ath9k_htc_txep,
+ &priv->mgmt_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BE */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BE_SVC, ath9k_htc_txep,
+ &priv->data_be_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BK */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BK_SVC, ath9k_htc_txep,
+ &priv->data_bk_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VI */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VI_SVC, ath9k_htc_txep,
+ &priv->data_vi_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VO */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VO_SVC, ath9k_htc_txep,
+ &priv->data_vo_ep);
+ if (ret)
+ goto err;
+
+ ret = htc_init(priv->htc);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(priv->dev, "ath9k_htc: Unable to initialize HTC services\n");
+ return ret;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ return ath_reg_notifier_apply(wiphy, request,
+ ath9k_hw_regulatory(priv->ah));
+}
+
+static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 val, reg = cpu_to_be32(reg_offset);
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *) &reg, sizeof(reg),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER READ FAILED: (0x%04x, %d)\n",
+ reg_offset, r);
+ return -EIO;
+ }
+
+ return be32_to_cpu(val);
+}
+
+static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 buf[2] = {
+ cpu_to_be32(reg_offset),
+ cpu_to_be32(val),
+ };
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &buf, sizeof(buf),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ reg_offset, r);
+ }
+}
+
+static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ /* Store the register/value */
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].reg =
+ cpu_to_be32(reg_offset);
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].val =
+ cpu_to_be32(val);
+
+ priv->wmi->multi_write_idx++;
+
+ /* If the buffer is full, send it out. */
+ if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (atomic_read(&priv->wmi->mwrite_cnt))
+ ath9k_regwrite_buffer(hw_priv, val, reg_offset);
+ else
+ ath9k_regwrite_single(hw_priv, val, reg_offset);
+}
+
+static void ath9k_enable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_inc(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_disable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_dec(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_regwrite_flush(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ if (priv->wmi->multi_write_idx) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_regread,
+ .write = ath9k_regwrite,
+ .enable_write_buffer = ath9k_enable_regwrite_buffer,
+ .disable_write_buffer = ath9k_disable_regwrite_buffer,
+ .write_flush = ath9k_regwrite_flush,
+};
+
+static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+ (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
+ AH_WAIT_TIMEOUT))
+ return false;
+
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+
+ return true;
+}
+
+static const struct ath_bus_ops ath9k_usb_bus_ops = {
+ .ath_bus_type = ATH_USB,
+ .read_cachesize = ath_usb_read_cachesize,
+ .eeprom_read = ath_usb_eeprom_read,
+};
+
+static void setup_ht_cap(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ ht_info->mcs.rx_mask[0] = 0xff;
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_init_queues(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
+ priv->hwq_map[i] = -1;
+
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = priv->ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(priv->ah, (u16) i);
+
+ if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+}
+
+static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
+{
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) {
+ priv->sbands[IEEE80211_BAND_2GHZ].channels =
+ ath9k_2ghz_channels;
+ priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_channels);
+ priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+}
+
+static void ath9k_init_misc(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ common->tx_chainmask = priv->ah->caps.tx_chainmask;
+ common->rx_chainmask = priv->ah->caps.rx_chainmask;
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ priv->op_flags |= OP_TXAGGR;
+ priv->ah->opmode = NL80211_IFTYPE_STATION;
+}
+
+static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, csz = 0;
+
+ priv->op_flags |= OP_INVALID;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = 0; /* FIXME */
+ priv->ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = &ath9k_usb_bus_ops;
+ common->ah = ah;
+ common->hw = priv->hw;
+ common->priv = priv;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&priv->wmi->wmi_lock);
+ spin_lock_init(&priv->beacon_lock);
+ spin_lock_init(&priv->tx_lock);
+ mutex_init(&priv->mutex);
+ mutex_init(&priv->aggr_work.mutex);
+ mutex_init(&priv->htc_pm_lock);
+ tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
+ INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
+ INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
+ INIT_WORK(&priv->ps_work, ath9k_ps_work);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_htc_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(priv);
+ if (ret)
+ goto err_queues;
+
+ ath9k_init_crypto(priv);
+ ath9k_init_channels_rates(priv);
+ ath9k_init_misc(priv);
+
+ return 0;
+
+err_queues:
+ ath9k_htc_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+
+ kfree(ah);
+ priv->ah = NULL;
+
+ return ret;
+}
+
+static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ hw->vif_data_size = sizeof(struct ath9k_htc_vif);
+ hw->sta_data_size = sizeof(struct ath9k_htc_sta);
+
+ /* tx_frame_hdr is larger than tx_mgmt_hdr anyway */
+ hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) +
+ sizeof(struct htc_frame_hdr) + 4;
+
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->sbands[IEEE80211_BAND_2GHZ];
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
+ setup_ht_cap(priv,
+ &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_priv(priv, devid);
+ if (error != 0)
+ goto err_init;
+
+ ah = priv->ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(priv, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, priv->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto err_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX */
+ error = ath9k_tx_init(priv);
+ if (error != 0)
+ goto err_tx;
+
+ /* Setup RX */
+ error = ath9k_rx_init(priv);
+ if (error != 0)
+ goto err_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto err_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto err_world;
+ }
+
+ ath9k_init_leds(priv);
+ ath9k_start_rfkill_poll(priv);
+
+ return 0;
+
+err_world:
+ ieee80211_unregister_hw(hw);
+err_register:
+ ath9k_rx_cleanup(priv);
+err_rx:
+ ath9k_tx_cleanup(priv);
+err_tx:
+ /* Nothing */
+err_regd:
+ ath9k_deinit_priv(priv);
+err_init:
+ return error;
+}
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid)
+{
+ struct ieee80211_hw *hw;
+ struct ath9k_htc_priv *priv;
+ int ret;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->htc = htc_handle;
+ priv->dev = dev;
+ htc_handle->drv_priv = priv;
+ SET_IEEE80211_DEV(hw, priv->dev);
+
+ ret = ath9k_htc_wait_for_target(priv);
+ if (ret)
+ goto err_free;
+
+ priv->wmi = ath9k_init_wmi(priv);
+ if (!priv->wmi) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ ret = ath9k_init_htc_services(priv);
+ if (ret)
+ goto err_init;
+
+ /* The device may have been unplugged earlier. */
+ priv->op_flags &= ~OP_UNPLUGGED;
+
+ ret = ath9k_init_device(priv, devid);
+ if (ret)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ ath9k_deinit_wmi(priv);
+err_free:
+ ieee80211_free_hw(hw);
+ return ret;
+}
+
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
+{
+ if (htc_handle->drv_priv) {
+
+ /* Check if the device has been yanked out. */
+ if (hotunplug)
+ htc_handle->drv_priv->op_flags |= OP_UNPLUGGED;
+
+ ath9k_deinit_device(htc_handle->drv_priv);
+ ath9k_deinit_wmi(htc_handle->drv_priv);
+ ieee80211_free_hw(htc_handle->drv_priv->hw);
+ }
+}
+
+#ifdef CONFIG_PM
+int ath9k_htc_resume(struct htc_target *htc_handle)
+{
+ int ret;
+
+ ret = ath9k_htc_wait_for_target(htc_handle->drv_priv);
+ if (ret)
+ return ret;
+
+ ret = ath9k_init_htc_services(htc_handle->drv_priv);
+ return ret;
+}
+#endif
+
+static int __init ath9k_htc_init(void)
+{
+ int error;
+
+ error = ath9k_htc_debug_create_root();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k_htc: Unable to create debugfs root: %d\n",
+ error);
+ goto err_dbg;
+ }
+
+ error = ath9k_hif_usb_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k_htc: No USB devices found,"
+ " driver not installed.\n");
+ error = -ENODEV;
+ goto err_usb;
+ }
+
+ return 0;
+
+err_usb:
+ ath9k_htc_debug_remove_root();
+err_dbg:
+ return error;
+}
+module_init(ath9k_htc_init);
+
+static void __exit ath9k_htc_exit(void)
+{
+ ath9k_hif_usb_exit();
+ ath9k_htc_debug_remove_root();
+ printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
+}
+module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
new file mode 100644
index 000000000000..6c386dad1d40
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -0,0 +1,1771 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+static struct dentry *ath9k_debugfs_root;
+#endif
+
+/*************/
+/* Utilities */
+/*************/
+
+static void ath_update_txpow(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ u32 txpow;
+
+ if (priv->curtxpow != priv->txpowlimit) {
+ ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
+ /* read back in case value is clamped */
+ ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
+ priv->curtxpow = txpow;
+ }
+}
+
+/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
+static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
+ struct ath9k_channel *ichan)
+{
+ enum htc_phymode mode;
+
+ mode = HTC_MODE_AUTO;
+
+ switch (ichan->chanmode) {
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ mode = HTC_MODE_11NG;
+ break;
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ mode = HTC_MODE_11NA;
+ break;
+ default:
+ break;
+ }
+
+ return mode;
+}
+
+static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode)
+{
+ bool ret;
+
+ mutex_lock(&priv->htc_pm_lock);
+ ret = ath9k_hw_setpower(priv->ah, mode);
+ mutex_unlock(&priv->htc_pm_lock);
+
+ return ret;
+}
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (++priv->ps_usecount != 1)
+ goto unlock;
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (--priv->ps_usecount != 0)
+ goto unlock;
+
+ if (priv->ps_idle)
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
+ else if (priv->ps_enabled)
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_ps_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+
+ /* The chip wakes up after receiving the first beacon
+ while network sleep is enabled. For the driver to
+ be in sync with the hw, set the chip to awake and
+ only then set it to sleep.
+ */
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+}
+
+static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw,
+ struct ath9k_channel *hchan)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ bool fastcc = true;
+ struct ieee80211_channel *channel = hw->conf.channel;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+ int ret;
+
+ if (priv->op_flags & OP_INVALID)
+ return -EIO;
+
+ if (priv->op_flags & OP_FULL_RESET)
+ fastcc = false;
+
+ /* Fiddle around with fastcc later on, for now just use full reset */
+ fastcc = false;
+ ath9k_htc_ps_wakeup(priv);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "(%u MHz) -> (%u MHz), HT: %d, HT40: %d\n",
+ priv->ah->curchan->channel,
+ channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf));
+
+ ret = ath9k_hw_reset(ah, hchan, fastcc);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset channel (%u Mhz) "
+ "reset status %d\n", channel->center_freq, ret);
+ goto err;
+ }
+
+ ath_update_txpow(priv);
+
+ WMI_CMD(WMI_START_RECV_CMDID);
+ if (ret)
+ goto err;
+
+ ath9k_host_rx_init(priv);
+
+ mode = ath9k_htc_get_curmode(priv, hchan);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ if (ret)
+ goto err;
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+ if (ret)
+ goto err;
+
+ htc_start(priv->htc);
+
+ priv->op_flags &= ~OP_FULL_RESET;
+err:
+ ath9k_htc_ps_restore(priv);
+ return ret;
+}
+
+static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (priv->nvifs > 0)
+ return -ENOBUFS;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+
+ hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
+ priv->ah->opmode = NL80211_IFTYPE_MONITOR;
+ hvif.index = priv->nvifs;
+
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ return ret;
+
+ priv->nvifs++;
+ return 0;
+}
+
+static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+ hvif.index = 0; /* Should do for now */
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+
+ return ret;
+}
+
+static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_sta tsta;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp;
+
+ if (priv->nstations >= ATH9K_HTC_MAX_STA)
+ return -ENOBUFS;
+
+ memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
+ memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
+ tsta.associd = common->curaid;
+ tsta.is_vif_sta = 0;
+ tsta.valid = true;
+ ista->index = priv->nstations;
+ } else {
+ memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
+ tsta.is_vif_sta = 1;
+ }
+
+ tsta.sta_index = priv->nstations;
+ tsta.vif_index = avp->index;
+ tsta.maxampdu = 0xffff;
+ if (sta && sta->ht_cap.ht_supported)
+ tsta.flags = cpu_to_be16(ATH_HTC_STA_HT);
+
+ WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
+ if (ret) {
+ if (sta)
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to add station entry for: %pM\n", sta->addr);
+ return ret;
+ }
+
+ if (sta)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Added a station entry for: %pM (idx: %d)\n",
+ sta->addr, tsta.sta_index);
+
+ priv->nstations++;
+ return 0;
+}
+
+static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp, sta_idx;
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = 0;
+ }
+
+ WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
+ if (ret) {
+ if (sta)
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to remove station entry for: %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ if (sta)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Removed a station entry for: %pM (idx: %d)\n",
+ sta->addr, sta_idx);
+
+ priv->nstations--;
+ return 0;
+}
+
+static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_cap_target tcap;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
+
+ /* FIXME: Values are hardcoded */
+ tcap.flags = 0x240c40;
+ tcap.flags_ext = 0x80601000;
+ tcap.ampdu_limit = 0xffff0000;
+ tcap.ampdu_subframes = 20;
+ tcap.tx_chainmask_legacy = 1;
+ tcap.protmode = 1;
+ tcap.tx_chainmask = 1;
+
+ WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
+
+ return ret;
+}
+
+static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ struct ieee80211_supported_band *sband;
+ struct ath9k_htc_target_rate trate;
+ u32 caps = 0;
+ u8 cmd_rsp;
+ int i, j, ret;
+
+ memset(&trate, 0, sizeof(trate));
+
+ /* Only 2GHz is supported */
+ sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ for (i = 0, j = 0; i < sband->n_bitrates; i++) {
+ if (sta->supp_rates[sband->band] & BIT(i)) {
+ priv->tgt_rate.rates.legacy_rates.rs_rates[j]
+ = (sband->bitrates[i].bitrate * 2) / 10;
+ j++;
+ }
+ }
+ priv->tgt_rate.rates.legacy_rates.rs_nrates = j;
+
+ if (sta->ht_cap.ht_supported) {
+ for (i = 0, j = 0; i < 77; i++) {
+ if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
+ priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i;
+ if (j == ATH_HTC_RATE_MAX)
+ break;
+ }
+ priv->tgt_rate.rates.ht_rates.rs_nrates = j;
+
+ caps = WLAN_RC_HT_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ caps |= WLAN_RC_40_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ caps |= WLAN_RC_SGI_FLAG;
+
+ }
+
+ priv->tgt_rate.sta_index = ista->index;
+ priv->tgt_rate.isnew = 1;
+ trate = priv->tgt_rate;
+ priv->tgt_rate.capflags = cpu_to_be32(caps);
+ trate.capflags = cpu_to_be32(caps);
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize Rate information on target\n");
+ return ret;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
+ return 0;
+}
+
+static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (!conf_is_ht(conf))
+ return false;
+
+ if (!(priv->op_flags & OP_ASSOCIATED) ||
+ (priv->op_flags & OP_SCANNING))
+ return false;
+
+ if (conf_is_ht40(conf)) {
+ if (priv->ah->curchan->chanmode &
+ (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) {
+ return false;
+ } else {
+ *cw40 = true;
+ return true;
+ }
+ } else { /* ht20 */
+ if (priv->ah->curchan->chanmode & CHANNEL_HT20)
+ return false;
+ else
+ return true;
+ }
+}
+
+static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
+{
+ struct ath9k_htc_target_rate trate;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret;
+ u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
+ u8 cmd_rsp;
+
+ memset(&trate, 0, sizeof(trate));
+
+ trate = priv->tgt_rate;
+
+ if (is_cw40)
+ caps |= WLAN_RC_40_FLAG;
+ else
+ caps &= ~WLAN_RC_40_FLAG;
+
+ priv->tgt_rate.capflags = cpu_to_be32(caps);
+ trate.capflags = cpu_to_be32(caps);
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to update Rate information on target\n");
+ return;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG, "Rate control updated with "
+ "caps:0x%x on target\n", priv->tgt_rate.capflags);
+}
+
+static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ u8 *sta_addr, u8 tid, bool oper)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_aggr aggr;
+ struct ieee80211_sta *sta = NULL;
+ struct ath9k_htc_sta *ista;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (tid >= ATH9K_HTC_MAX_TID)
+ return -EINVAL;
+
+ memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
+
+ rcu_read_lock();
+
+ /* Check if we are able to retrieve the station */
+ sta = ieee80211_find_sta(vif, sta_addr);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+
+ if (oper)
+ ista->tid_state[tid] = AGGR_START;
+ else
+ ista->tid_state[tid] = AGGR_STOP;
+
+ aggr.sta_index = ista->index;
+
+ rcu_read_unlock();
+
+ aggr.tidno = tid;
+ aggr.aggr_enable = oper;
+
+ WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
+ if (ret)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Unable to %s TX aggregation for (%pM, %d)\n",
+ (oper) ? "start" : "stop", sta->addr, tid);
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "%s aggregation for (%pM, %d)\n",
+ (oper) ? "Starting" : "Stopping", sta->addr, tid);
+
+ return ret;
+}
+
+void ath9k_htc_aggr_work(struct work_struct *work)
+{
+ int ret = 0;
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ath9k_aggr_work.work);
+ struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
+
+ mutex_lock(&wk->mutex);
+
+ switch (wk->action) {
+ case IEEE80211_AMPDU_TX_START:
+ ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
+ wk->tid, true);
+ if (!ret)
+ ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
+ wk->tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
+ wk->tid, false);
+ ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
+ break;
+ default:
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Unknown AMPDU action\n");
+ }
+
+ mutex_unlock(&wk->mutex);
+}
+
+/*********/
+/* DEBUG */
+/*********/
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+static int ath9k_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ struct ath9k_htc_target_stats cmd_rsp;
+ char buf[512];
+ unsigned int len = 0;
+ int ret = 0;
+
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ WMI_CMD(WMI_TGT_STATS_CMDID);
+ if (ret)
+ return -EINVAL;
+
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Short Retries",
+ be32_to_cpu(cmd_rsp.tx_shortretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Long Retries",
+ be32_to_cpu(cmd_rsp.tx_longretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Xretries",
+ be32_to_cpu(cmd_rsp.tx_xretries));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Unaggr. Xretries",
+ be32_to_cpu(cmd_rsp.ht_txunaggr_xretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Xretries (HT)",
+ be32_to_cpu(cmd_rsp.ht_tx_xretries));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Rate", priv->debug.txrate);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tgt_stats = {
+ .read = read_file_tgt_stats,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.tx_stats.skb_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs dropped",
+ priv->debug.tx_stats.skb_dropped);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_xmit = {
+ .read = read_file_xmit,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.rx_stats.skb_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.rx_stats.skb_dropped);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+int ath9k_htc_init_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
+ priv->debug.debugfs_phy = debugfs_create_dir(wiphy_name(priv->hw->wiphy),
+ ath9k_debugfs_root);
+ if (!priv->debug.debugfs_phy)
+ goto err;
+
+ priv->debug.debugfs_tgt_stats = debugfs_create_file("tgt_stats", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_tgt_stats);
+ if (!priv->debug.debugfs_tgt_stats)
+ goto err;
+
+
+ priv->debug.debugfs_xmit = debugfs_create_file("xmit", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_xmit);
+ if (!priv->debug.debugfs_xmit)
+ goto err;
+
+ priv->debug.debugfs_recv = debugfs_create_file("recv", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_recv);
+ if (!priv->debug.debugfs_recv)
+ goto err;
+
+ return 0;
+
+err:
+ ath9k_htc_exit_debug(ah);
+ return -ENOMEM;
+}
+
+void ath9k_htc_exit_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ debugfs_remove(priv->debug.debugfs_recv);
+ debugfs_remove(priv->debug.debugfs_xmit);
+ debugfs_remove(priv->debug.debugfs_tgt_stats);
+ debugfs_remove(priv->debug.debugfs_phy);
+}
+
+int ath9k_htc_debug_create_root(void)
+{
+ ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
+ return 0;
+}
+
+void ath9k_htc_debug_remove_root(void)
+{
+ debugfs_remove(ath9k_debugfs_root);
+ ath9k_debugfs_root = NULL;
+}
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+/*******/
+/* ANI */
+/*******/
+
+static void ath_start_ani(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ common->ani.longcal_timer = timestamp;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.checkani_timer = timestamp;
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+}
+
+void ath9k_ani_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ath9k_ani_work.work);
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool longcal = false;
+ bool shortcal = false;
+ bool aniflag = false;
+ unsigned int timestamp = jiffies_to_msecs(jiffies);
+ u32 cal_interval, short_cal_interval;
+
+ short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
+
+ /* Only calibrate if awake */
+ if (ah->power_mode != ATH9K_PM_AWAKE)
+ goto set_timer;
+
+ /* Long calibration runs independently of short calibration. */
+ if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
+ longcal = true;
+ ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ common->ani.longcal_timer = timestamp;
+ }
+
+ /* Short calibration applies only while caldone is false */
+ if (!common->ani.caldone) {
+ if ((timestamp - common->ani.shortcal_timer) >=
+ short_cal_interval) {
+ shortcal = true;
+ ath_print(common, ATH_DBG_ANI,
+ "shortcal @%lu\n", jiffies);
+ common->ani.shortcal_timer = timestamp;
+ common->ani.resetcal_timer = timestamp;
+ }
+ } else {
+ if ((timestamp - common->ani.resetcal_timer) >=
+ ATH_RESTART_CALINTERVAL) {
+ common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+ if (common->ani.caldone)
+ common->ani.resetcal_timer = timestamp;
+ }
+ }
+
+ /* Verify whether we must check ANI */
+ if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ aniflag = true;
+ common->ani.checkani_timer = timestamp;
+ }
+
+ /* Skip all processing if there's nothing to do. */
+ if (longcal || shortcal || aniflag) {
+
+ ath9k_htc_ps_wakeup(priv);
+
+ /* Call ANI routine if necessary */
+ if (aniflag)
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+
+ /* Perform calibration if necessary */
+ if (longcal || shortcal) {
+ common->ani.caldone =
+ ath9k_hw_calibrate(ah, ah->curchan,
+ common->rx_chainmask,
+ longcal);
+
+ if (longcal)
+ common->ani.noise_floor =
+ ath9k_hw_getchan_noise(ah, ah->curchan);
+
+ ath_print(common, ATH_DBG_ANI,
+ " calibrate chan %u/%x nf: %d\n",
+ ah->curchan->channel,
+ ah->curchan->channelFlags,
+ common->ani.noise_floor);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ }
+
+set_timer:
+ /*
+ * Set timer interval based on previous results.
+ * The interval must be the shortest necessary to satisfy ANI,
+ * short calibration and long calibration.
+ */
+ cal_interval = ATH_LONG_CALINTERVAL;
+ if (priv->ah->config.enable_ani)
+ cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+ if (!common->ani.caldone)
+ cal_interval = min(cal_interval, (u32)short_cal_interval);
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ msecs_to_jiffies(cal_interval));
+}
+
+/*******/
+/* LED */
+/*******/
+
+static void ath9k_led_blink_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ ath9k_led_blink_work.work);
+
+ if (!(priv->op_flags & OP_LED_ASSOCIATED))
+ return;
+
+ if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (priv->op_flags & OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work,
+ (priv->op_flags & OP_LED_ON) ?
+ msecs_to_jiffies(priv->led_off_duration) :
+ msecs_to_jiffies(priv->led_on_duration));
+
+ priv->led_on_duration = priv->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ priv->led_off_duration = priv->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ priv->led_on_cnt = priv->led_off_cnt = 0;
+
+ if (priv->op_flags & OP_LED_ON)
+ priv->op_flags &= ~OP_LED_ON;
+ else
+ priv->op_flags |= OP_LED_ON;
+}
+
+static void ath9k_led_brightness_work(struct work_struct *work)
+{
+ struct ath_led *led = container_of(work, struct ath_led,
+ brightness_work.work);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ switch (led->brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ priv->op_flags &= ~OP_LED_ON;
+ } else {
+ priv->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ priv->op_flags |= OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ priv->op_flags |= OP_LED_ON;
+ } else {
+ priv->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath9k_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ led->brightness = brightness;
+ if (!(priv->op_flags & OP_LED_DEINIT))
+ ieee80211_queue_delayed_work(priv->hw,
+ &led->brightness_work, 0);
+}
+
+static void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->radio_led.brightness_work);
+ cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
+ cancel_delayed_work_sync(&priv->tx_led.brightness_work);
+ cancel_delayed_work_sync(&priv->rx_led.brightness_work);
+}
+
+static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->priv = priv;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath9k_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+
+ INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
+
+ return ret;
+}
+
+static void ath9k_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
+{
+ priv->op_flags |= OP_LED_DEINIT;
+ ath9k_unregister_led(&priv->assoc_led);
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ ath9k_unregister_led(&priv->tx_led);
+ ath9k_unregister_led(&priv->rx_led);
+ ath9k_unregister_led(&priv->radio_led);
+}
+
+void ath9k_init_leds(struct ath9k_htc_priv *priv)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9271(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9271;
+ else
+ priv->ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(priv->hw);
+ snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->radio_led, trigger);
+ priv->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(priv->hw);
+ snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
+ priv->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(priv->hw);
+ snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->tx_led, trigger);
+ priv->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(priv->hw);
+ snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->rx_led, trigger);
+ priv->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ priv->op_flags &= ~OP_LED_DEINIT;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_deinit_leds(priv);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
+{
+ return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
+ priv->ah->rfkill_polarity;
+}
+
+static void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ bool blocked = !!ath_is_rfkill_set(priv);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
+{
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(priv->hw->wiphy);
+}
+
+/**********************/
+/* mac80211 Callbacks */
+/**********************/
+
+static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ath9k_htc_priv *priv = hw->priv;
+ int padpos, padsize, ret;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* Add the padding after the header if this is not already done */
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize)
+ return -1;
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ ret = ath9k_htc_tx_start(priv, skb);
+ if (ret != 0) {
+ if (ret == -ENOMEM) {
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Stopping TX queues\n");
+ ieee80211_stop_queues(hw);
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = true;
+ spin_unlock_bh(&priv->tx_lock);
+ } else {
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Tx failed");
+ }
+ goto fail_tx;
+ }
+
+ return 0;
+
+fail_tx:
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *init_channel;
+ int ret = 0;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Starting driver with initial channel: %d MHz\n",
+ curchan->center_freq);
+
+ /* setup initial channel */
+ init_channel = ath9k_cmn_get_curchannel(hw, ah);
+
+ /* Reset SERDES registers */
+ ath9k_hw_configpcipowersave(ah, 0, 0);
+
+ ath9k_hw_htc_resetinit(ah);
+ ret = ath9k_hw_reset(ah, init_channel, false);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset hardware; reset status %d "
+ "(freq %u MHz)\n", ret, curchan->center_freq);
+ return ret;
+ }
+
+ ath_update_txpow(priv);
+
+ mode = ath9k_htc_get_curmode(priv, init_channel);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ WMI_CMD(WMI_ATH_INIT_CMDID);
+ WMI_CMD(WMI_START_RECV_CMDID);
+
+ ath9k_host_rx_init(priv);
+
+ priv->op_flags &= ~OP_INVALID;
+ htc_start(priv->htc);
+
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (led) {
+ /* Enable LED */
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ }
+
+ ieee80211_wake_queues(hw);
+
+ return ret;
+}
+
+static int ath9k_htc_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+ ret = ath9k_htc_radio_enable(hw, false);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (priv->op_flags & OP_INVALID) {
+ ath_print(common, ATH_DBG_ANY, "Device not present\n");
+ return;
+ }
+
+ if (led) {
+ /* Disable LED */
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ /* Cancel all the running timers/work .. */
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ cancel_delayed_work_sync(&priv->ath9k_aggr_work);
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_led_stop_brightness(priv);
+
+ ath9k_htc_ps_wakeup(priv);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+ ath9k_hw_phy_disable(ah);
+ ath9k_hw_disable(ah);
+ ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_htc_ps_restore(priv);
+ ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+
+ skb_queue_purge(&priv->tx_queue);
+
+ /* Remove monitor interface here */
+ if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (ath9k_htc_remove_monitor_interface(priv))
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to remove monitor interface\n");
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "Monitor interface removed\n");
+ }
+
+ priv->op_flags |= OP_INVALID;
+
+ ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
+}
+
+static void ath9k_htc_stop(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_radio_disable(hw, false);
+ mutex_unlock(&priv->mutex);
+}
+
+
+static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ mutex_lock(&priv->mutex);
+
+ /* Only one interface for now */
+ if (priv->nvifs > 0) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ ath9k_htc_ps_wakeup(priv);
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ hvif.opmode = cpu_to_be32(HTC_M_STA);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ hvif.opmode = cpu_to_be32(HTC_M_IBSS);
+ break;
+ default:
+ ath_print(common, ATH_DBG_FATAL,
+ "Interface type %d not yet supported\n", vif->type);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
+
+ priv->ah->opmode = vif->type;
+
+ /* Index starts from zero on the target */
+ avp->index = hvif.index = priv->nvifs;
+ hvif.rtsthreshold = cpu_to_be16(2304);
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ goto out;
+
+ priv->nvifs++;
+
+ /*
+ * We need a node in target to tx mgmt frames
+ * before association.
+ */
+ ret = ath9k_htc_add_station(priv, vif, NULL);
+ if (ret)
+ goto out;
+
+ ret = ath9k_htc_update_cap_target(priv);
+ if (ret)
+ ath_print(common, ATH_DBG_CONFIG, "Failed to update"
+ " capability in target \n");
+
+ priv->vif = vif;
+out:
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
+
+ mutex_lock(&priv->mutex);
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+ hvif.index = avp->index;
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+
+ ath9k_htc_remove_station(priv, vif, NULL);
+ priv->vif = NULL;
+
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_conf *conf = &hw->conf;
+
+ mutex_lock(&priv->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ bool enable_radio = false;
+ bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+
+ if (!idle && priv->ps_idle)
+ enable_radio = true;
+
+ priv->ps_idle = idle;
+
+ if (enable_radio) {
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ ath9k_htc_radio_enable(hw, true);
+ ath_print(common, ATH_DBG_CONFIG,
+ "not-idle: enabling radio\n");
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ int pos = curchan->hw_value;
+ bool is_cw40 = false;
+
+ ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+ curchan->center_freq);
+
+ if (check_rc_update(hw, &is_cw40))
+ ath9k_htc_rc_update(priv, is_cw40);
+
+ ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
+
+ if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to set channel\n");
+ mutex_unlock(&priv->mutex);
+ return -EINVAL;
+ }
+
+ }
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS) {
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+ priv->ps_enabled = true;
+ } else {
+ priv->ps_enabled = false;
+ cancel_work_sync(&priv->ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ if (ath9k_htc_add_monitor_interface(priv))
+ ath_print(common, ATH_DBG_FATAL,
+ "Failed to set monitor mode\n");
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ }
+ }
+
+ if (priv->ps_idle) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "idle: disabling radio\n");
+ ath9k_htc_radio_disable(hw, true);
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_FCSFAIL)
+
+static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u32 rfilt;
+
+ mutex_lock(&priv->mutex);
+
+ ath9k_htc_ps_wakeup(priv);
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+
+ priv->rxfilter = *total_flags;
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(priv->ah, rfilt);
+
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
+ "Set HW RX filter: 0x%x\n", rfilt);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret;
+
+ switch (cmd) {
+ case STA_NOTIFY_ADD:
+ ret = ath9k_htc_add_station(priv, vif, sta);
+ if (!ret)
+ ath9k_htc_init_rate(priv, vif, sta);
+ break;
+ case STA_NOTIFY_REMOVE:
+ ath9k_htc_remove_station(priv, vif, sta);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_tx_queue_info qi;
+ int ret = 0, qnum;
+
+ if (queue >= WME_NUM_AC)
+ return 0;
+
+ mutex_lock(&priv->mutex);
+
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cwmin = params->cw_min;
+ qi.tqi_cwmax = params->cw_max;
+ qi.tqi_burstTime = params->txop;
+
+ qnum = get_hw_qnum(queue, priv->hwq_map);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Configure tx [queue/hwq] [%d/%d], "
+ "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, qnum, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
+
+ ret = ath_htc_txq_update(priv, qnum, &qi);
+ if (ret)
+ ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static int ath9k_htc_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret = 0;
+
+ if (htc_modparam_nohwcrypt)
+ return -ENOSPC;
+
+ mutex_lock(&priv->mutex);
+ ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath9k_htc_ps_wakeup(priv);
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = ath9k_cmn_key_config(common, vif, sta, key);
+ if (ret >= 0) {
+ key->hw_key_idx = ret;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (key->alg == ALG_TKIP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ if (priv->ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ ret = 0;
+ }
+ break;
+ case DISABLE_KEY:
+ ath9k_cmn_key_delete(common, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ common->curaid = bss_conf->assoc ?
+ bss_conf->aid : 0;
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ bss_conf->assoc);
+
+ if (bss_conf->assoc) {
+ priv->op_flags |= OP_ASSOCIATED;
+ ath_start_ani(priv);
+ } else {
+ priv->op_flags &= ~OP_ASSOCIATED;
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ /* Set BSSID */
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ ath9k_hw_write_associd(ah);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "BSSID: %pM aid: 0x%x\n",
+ common->curbssid, common->curaid);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) ||
+ (changed & BSS_CHANGED_BEACON) ||
+ ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ bss_conf->enable_beacon)) {
+ priv->op_flags |= OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ !bss_conf->enable_beacon) {
+ priv->op_flags &= ~OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+ bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ priv->op_flags |= OP_PREAMBLE_SHORT;
+ else
+ priv->op_flags &= ~OP_PREAMBLE_SHORT;
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+ bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot &&
+ hw->conf.channel->band != IEEE80211_BAND_5GHZ)
+ priv->op_flags |= OP_PROTECT_ENABLE;
+ else
+ priv->op_flags &= ~OP_PROTECT_ENABLE;
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ah->slottime = 9;
+ else
+ ah->slottime = 20;
+
+ ath9k_hw_init_global_settings(ah);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u64 tsf;
+
+ mutex_lock(&priv->mutex);
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ mutex_unlock(&priv->mutex);
+
+ return tsf;
+}
+
+static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_hw_settsf64(priv->ah, tsf);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath9k_htc_ps_wakeup(priv);
+ mutex_lock(&priv->mutex);
+ ath9k_hw_reset_tsf(priv->ah);
+ mutex_unlock(&priv->mutex);
+ ath9k_htc_ps_restore(priv);
+}
+
+static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_aggr_work *work = &priv->aggr_work;
+ struct ath9k_htc_sta *ista;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP:
+ if (!(priv->op_flags & OP_TXAGGR))
+ return -ENOTSUPP;
+ memcpy(work->sta_addr, sta->addr, ETH_ALEN);
+ work->hw = hw;
+ work->vif = vif;
+ work->action = action;
+ work->tid = tid;
+ ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ ista->tid_state[tid] = AGGR_OPERATIONAL;
+ break;
+ default:
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ priv->op_flags |= OP_SCANNING;
+ spin_unlock_bh(&priv->beacon_lock);
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath9k_htc_ps_wakeup(priv);
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ priv->op_flags &= ~OP_SCANNING;
+ spin_unlock_bh(&priv->beacon_lock);
+ priv->op_flags |= OP_FULL_RESET;
+ if (priv->op_flags & OP_ASSOCIATED)
+ ath9k_htc_beacon_config(priv, priv->vif);
+ ath_start_ani(priv);
+ mutex_unlock(&priv->mutex);
+ ath9k_htc_ps_restore(priv);
+}
+
+static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ priv->ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(priv->ah);
+ mutex_unlock(&priv->mutex);
+}
+
+struct ieee80211_ops ath9k_htc_ops = {
+ .tx = ath9k_htc_tx,
+ .start = ath9k_htc_start,
+ .stop = ath9k_htc_stop,
+ .add_interface = ath9k_htc_add_interface,
+ .remove_interface = ath9k_htc_remove_interface,
+ .config = ath9k_htc_config,
+ .configure_filter = ath9k_htc_configure_filter,
+ .sta_notify = ath9k_htc_sta_notify,
+ .conf_tx = ath9k_htc_conf_tx,
+ .bss_info_changed = ath9k_htc_bss_info_changed,
+ .set_key = ath9k_htc_set_key,
+ .get_tsf = ath9k_htc_get_tsf,
+ .set_tsf = ath9k_htc_set_tsf,
+ .reset_tsf = ath9k_htc_reset_tsf,
+ .ampdu_action = ath9k_htc_ampdu_action,
+ .sw_scan_start = ath9k_htc_sw_scan_start,
+ .sw_scan_complete = ath9k_htc_sw_scan_complete,
+ .set_rts_threshold = ath9k_htc_set_rts_threshold,
+ .rfkill_poll = ath9k_htc_rfkill_poll_state,
+ .set_coverage_class = ath9k_htc_set_coverage_class,
+};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
new file mode 100644
index 000000000000..28abc7d5e909
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+/******/
+/* TX */
+/******/
+
+int get_hw_qnum(u16 queue, int *hwq_map)
+{
+ switch (queue) {
+ case 0:
+ return hwq_map[ATH9K_WME_AC_VO];
+ case 1:
+ return hwq_map[ATH9K_WME_AC_VI];
+ case 2:
+ return hwq_map[ATH9K_WME_AC_BE];
+ case 3:
+ return hwq_map[ATH9K_WME_AC_BK];
+ default:
+ return hwq_map[ATH9K_WME_AC_BE];
+ }
+}
+
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hw *ah = priv->ah;
+ int error = 0;
+ struct ath9k_tx_queue_info qi;
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi);
+
+ qi.tqi_aifs = qinfo->tqi_aifs;
+ qi.tqi_cwmin = qinfo->tqi_cwmin / 2; /* XXX */
+ qi.tqi_cwmax = qinfo->tqi_cwmax;
+ qi.tqi_burstTime = qinfo->tqi_burstTime;
+ qi.tqi_readyTime = qinfo->tqi_readyTime;
+
+ if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Unable to update hardware queue %u!\n", qnum);
+ error = -EIO;
+ } else {
+ ath9k_hw_resettxqueue(ah, qnum);
+ }
+
+ return error;
+}
+
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ath9k_htc_sta *ista;
+ struct ath9k_htc_vif *avp;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ enum htc_endpoint_id epid;
+ u16 qnum, hw_qnum;
+ __le16 fc;
+ u8 *tx_fhdr;
+ u8 sta_idx;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+
+ avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = 0;
+ }
+
+ memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
+
+ if (ieee80211_is_data(fc)) {
+ struct tx_frame_hdr tx_hdr;
+ u8 *qc;
+
+ memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
+
+ tx_hdr.node_idx = sta_idx;
+ tx_hdr.vif_idx = avp->index;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_ctl.type = ATH9K_HTC_AMPDU;
+ tx_hdr.data_type = ATH9K_HTC_AMPDU;
+ } else {
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+ tx_hdr.data_type = ATH9K_HTC_NORMAL;
+ }
+
+ if (ieee80211_is_data(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ /* Check for RTS protection */
+ if (priv->hw->wiphy->rts_threshold != (u32) -1)
+ if (skb->len > priv->hw->wiphy->rts_threshold)
+ tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
+
+ /* CTS-to-self */
+ if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
+ (priv->op_flags & OP_PROTECT_ENABLE))
+ tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
+
+ tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(tx_hdr));
+ memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
+
+ qnum = skb_get_queue_mapping(skb);
+ hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
+
+ switch (hw_qnum) {
+ case 0:
+ epid = priv->data_be_ep;
+ break;
+ case 2:
+ epid = priv->data_vi_ep;
+ break;
+ case 3:
+ epid = priv->data_vo_ep;
+ break;
+ case 1:
+ default:
+ epid = priv->data_bk_ep;
+ break;
+ }
+ } else {
+ struct tx_mgmt_hdr mgmt_hdr;
+
+ memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
+
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+
+ mgmt_hdr.node_idx = sta_idx;
+ mgmt_hdr.vif_idx = avp->index;
+ mgmt_hdr.tidno = 0;
+ mgmt_hdr.flags = 0;
+
+ mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
+ memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
+ epid = priv->mgmt_ep;
+ }
+
+ return htc_send(priv->htc, skb, epid, &tx_ctl);
+}
+
+void ath9k_tx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb = NULL;
+ __le16 fc;
+
+ while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ memset(&tx_info->status, 0, sizeof(tx_info->status));
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (!sta) {
+ rcu_read_unlock();
+ ieee80211_tx_status(priv->hw, skb);
+ continue;
+ }
+
+ /* Check if we need to start aggregation */
+
+ if (sta && conf_is_ht(&priv->hw->conf) &&
+ (priv->op_flags & OP_TXAGGR)
+ && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc, tid;
+ struct ath9k_htc_sta *ista;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ ista = (struct ath9k_htc_sta *)sta->drv_priv;
+
+ if ((tid < ATH9K_HTC_MAX_TID) &&
+ ista->tid_state[tid] == AGGR_STOP) {
+ ieee80211_start_tx_ba_session(sta, tid);
+ ista->tid_state[tid] = AGGR_PROGRESS;
+ }
+ }
+ }
+
+ rcu_read_unlock();
+
+ /* Send status to mac80211 */
+ ieee80211_tx_status(priv->hw, skb);
+ }
+
+ /* Wake TX queues if needed */
+ spin_lock_bh(&priv->tx_lock);
+ if (priv->tx_queues_stop) {
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Waking up TX queues\n");
+ ieee80211_wake_queues(priv->hw);
+ return;
+ }
+ spin_unlock_bh(&priv->tx_lock);
+}
+
+void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return;
+
+ if (ep_id == priv->mgmt_ep) {
+ skb_pull(skb, sizeof(struct tx_mgmt_hdr));
+ } else if ((ep_id == priv->data_bk_ep) ||
+ (ep_id == priv->data_be_ep) ||
+ (ep_id == priv->data_vi_ep) ||
+ (ep_id == priv->data_vo_ep)) {
+ skb_pull(skb, sizeof(struct tx_frame_hdr));
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unsupported TX EPID: %d\n", ep_id);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ if (txok)
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ skb_queue_tail(&priv->tx_queue, skb);
+ tasklet_schedule(&priv->tx_tasklet);
+}
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv)
+{
+ skb_queue_head_init(&priv->tx_queue);
+ return 0;
+}
+
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
+{
+
+}
+
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
+ enum ath9k_tx_queue_subtype subtype)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info qi;
+ int qnum;
+
+ memset(&qi, 0, sizeof(qi));
+
+ qi.tqi_subtype = subtype;
+ qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_physCompBuf = 0;
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
+
+ qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
+ if (qnum == -1)
+ return false;
+
+ if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "qnum %u out of range, max %u!\n",
+ qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
+ ath9k_hw_releasetxqueue(ah, qnum);
+ return false;
+ }
+
+ priv->hwq_map[subtype] = qnum;
+ return true;
+}
+
+/******/
+/* RX */
+/******/
+
+/*
+ * Calculate the RX filter to be set in the HW.
+ */
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
+{
+#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
+
+ struct ath_hw *ah = priv->ah;
+ u32 rfilt;
+
+ rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
+ | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
+ | ATH9K_RX_FILTER_MCAST;
+
+ /* If not a STA, enable processing of Probe Requests */
+ if (ah->opmode != NL80211_IFTYPE_STATION)
+ rfilt |= ATH9K_RX_FILTER_PROBEREQ;
+
+ /*
+ * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
+ * mode interface or when in monitor mode. AP mode does not need this
+ * since it receives all in-BSS frames anyway.
+ */
+ if (((ah->opmode != NL80211_IFTYPE_AP) &&
+ (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
+ (ah->opmode == NL80211_IFTYPE_MONITOR))
+ rfilt |= ATH9K_RX_FILTER_PROM;
+
+ if (priv->rxfilter & FIF_CONTROL)
+ rfilt |= ATH9K_RX_FILTER_CONTROL;
+
+ if ((ah->opmode == NL80211_IFTYPE_STATION) &&
+ !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
+ rfilt |= ATH9K_RX_FILTER_MYBEACON;
+ else
+ rfilt |= ATH9K_RX_FILTER_BEACON;
+
+ if (conf_is_ht(&priv->hw->conf))
+ rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+
+ return rfilt;
+
+#undef RX_FILTER_PRESERVE
+}
+
+/*
+ * Recv initialization for opmode change.
+ */
+static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ u32 rfilt, mfilt[2];
+
+ /* configure rx filter */
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ /* configure bssid mask */
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ ath_hw_setbssidmask(common);
+
+ /* configure operational mode */
+ ath9k_hw_setopmode(ah);
+
+ /* Handle any link-level address change. */
+ ath9k_hw_setmac(ah, common->macaddr);
+
+ /* calculate and install multicast filter */
+ mfilt[0] = mfilt[1] = ~0;
+ ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
+}
+
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
+{
+ ath9k_hw_rxena(priv->ah);
+ ath9k_htc_opmode_init(priv);
+ ath9k_hw_startpcureceive(priv->ah);
+ priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
+}
+
+static void ath9k_process_rate(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *rxs,
+ u8 rx_rate, u8 rs_flags)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+
+ if (rx_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ if (rs_flags & ATH9K_RX_2040)
+ rxs->flag |= RX_FLAG_40MHZ;
+ if (rs_flags & ATH9K_RX_GI)
+ rxs->flag |= RX_FLAG_SHORT_GI;
+ rxs->rate_idx = rx_rate & 0x7f;
+ return;
+ }
+
+ band = hw->conf.channel->band;
+ sband = hw->wiphy->bands[band];
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_rate) {
+ rxs->rate_idx = i;
+ return;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_rate) {
+ rxs->rate_idx = i;
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ return;
+ }
+ }
+
+}
+
+static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_rxbuf *rxbuf,
+ struct ieee80211_rx_status *rx_status)
+
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_hw *hw = priv->hw;
+ struct sk_buff *skb = rxbuf->skb;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_htc_rx_status *rxstatus;
+ int hdrlen, padpos, padsize;
+ int last_rssi = ATH_RSSI_DUMMY_MARKER;
+ __le16 fc;
+
+ if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Corrupted RX frame, dropping\n");
+ goto rx_next;
+ }
+
+ rxstatus = (struct ath_htc_rx_status *)skb->data;
+
+ if (be16_to_cpu(rxstatus->rs_datalen) -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Corrupted RX data len, dropping "
+ "(dlen: %d, skblen: %d)\n",
+ rxstatus->rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ /* Get the RX status information */
+ memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+
+ padpos = ath9k_cmn_padpos(fc);
+
+ padsize = padpos & 3;
+ if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ if (rxbuf->rxstatus.rs_status != 0) {
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
+ goto rx_next;
+
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
+ /* FIXME */
+ } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
+ if (ieee80211_is_ctl(fc))
+ /*
+ * Sometimes, we get invalid
+ * MIC failures on valid control frames.
+ * Remove these mic errors.
+ */
+ rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
+ else
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ }
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (rxbuf->rxstatus.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_CRC))
+ goto rx_next;
+ } else {
+ if (rxbuf->rxstatus.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
+ goto rx_next;
+ }
+ }
+ }
+
+ if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
+ u8 keyix;
+ keyix = rxbuf->rxstatus.rs_keyix;
+ if (keyix != ATH9K_RXKEYIX_INVALID) {
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc) &&
+ skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+ if (test_bit(keyix, common->keymap))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+ }
+
+ ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
+ rxbuf->rxstatus.rs_flags);
+
+ if (priv->op_flags & OP_ASSOCIATED) {
+ if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
+ !rxbuf->rxstatus.rs_moreaggr)
+ ATH_RSSI_LPF(priv->rx.last_rssi,
+ rxbuf->rxstatus.rs_rssi);
+
+ last_rssi = priv->rx.last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+ ATH_RSSI_EP_MULTIPLIER);
+
+ if (rxbuf->rxstatus.rs_rssi < 0)
+ rxbuf->rxstatus.rs_rssi = 0;
+
+ if (ieee80211_is_beacon(fc))
+ priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
+ }
+
+ rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
+ rx_status->band = hw->conf.channel->band;
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
+ rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+ rx_status->flag |= RX_FLAG_TSFT;
+
+ return true;
+
+rx_next:
+ return false;
+}
+
+/*
+ * FIXME: Handle FLUSH later on.
+ */
+void ath9k_rx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct ieee80211_hdr *hdr;
+
+ do {
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+
+ if (rxbuf == NULL) {
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ break;
+ }
+
+ if (!rxbuf->skb)
+ goto requeue;
+
+ if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
+ dev_kfree_skb_any(rxbuf->skb);
+ goto requeue;
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
+ sizeof(struct ieee80211_rx_status));
+ skb = rxbuf->skb;
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
+ ieee80211_queue_work(priv->hw, &priv->ps_work);
+
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+
+ ieee80211_rx(priv->hw, skb);
+
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+requeue:
+ rxbuf->in_process = false;
+ rxbuf->skb = NULL;
+ list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
+ rxbuf = NULL;
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ } while (1);
+
+}
+
+void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+
+ spin_lock(&priv->rx.rxbuflock);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (!tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+ spin_unlock(&priv->rx.rxbuflock);
+
+ if (rxbuf == NULL) {
+ ath_print(common, ATH_DBG_ANY,
+ "No free RX buffer\n");
+ goto err;
+ }
+
+ spin_lock(&priv->rx.rxbuflock);
+ rxbuf->skb = skb;
+ rxbuf->in_process = true;
+ spin_unlock(&priv->rx.rxbuflock);
+
+ tasklet_schedule(&priv->rx_tasklet);
+ return;
+err:
+ dev_kfree_skb_any(skb);
+ return;
+}
+
+/* FIXME: Locking for cleanup/init */
+
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_rxbuf *rxbuf, *tbuf;
+
+ list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
+ list_del(&rxbuf->list);
+ if (rxbuf->skb)
+ dev_kfree_skb_any(rxbuf->skb);
+ kfree(rxbuf);
+ }
+}
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf;
+ int i = 0;
+
+ INIT_LIST_HEAD(&priv->rx.rxbuf);
+ spin_lock_init(&priv->rx.rxbuflock);
+
+ for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
+ rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+ if (rxbuf == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to allocate RX buffers\n");
+ goto err;
+ }
+ list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
+ }
+
+ return 0;
+
+err:
+ ath9k_rx_cleanup(priv);
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
new file mode 100644
index 000000000000..7bf6ce1e7e2e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
+ u16 len, u8 flags, u8 epid,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ struct htc_frame_hdr *hdr;
+ struct htc_endpoint *endpoint = &target->endpoint[epid];
+ int status;
+
+ hdr = (struct htc_frame_hdr *)
+ skb_push(skb, sizeof(struct htc_frame_hdr));
+ hdr->endpoint_id = epid;
+ hdr->flags = flags;
+ hdr->payload_len = cpu_to_be16(len);
+
+ status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb,
+ tx_ctl);
+ return status;
+}
+
+static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint)
+{
+ enum htc_endpoint_id avail_epid;
+
+ for (avail_epid = ENDPOINT_MAX; avail_epid > ENDPOINT0; avail_epid--)
+ if (endpoint[avail_epid].service_id == 0)
+ return &endpoint[avail_epid];
+ return NULL;
+}
+
+static u8 service_to_ulpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 4;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static u8 service_to_dlpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 3;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 2;
+ default:
+ return 0;
+ }
+}
+
+static void htc_process_target_rdy(struct htc_target *target,
+ void *buf)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
+
+ target->credits = be16_to_cpu(htc_ready_msg->credits);
+ target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
+
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->service_id = HTC_CTRL_RSVD_SVC;
+ endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH;
+ complete(&target->target_wait);
+}
+
+static void htc_process_conn_rsp(struct htc_target *target,
+ struct htc_frame_hdr *htc_hdr)
+{
+ struct htc_conn_svc_rspmsg *svc_rspmsg;
+ struct htc_endpoint *endpoint, *tmp_endpoint = NULL;
+ u16 service_id;
+ u16 max_msglen;
+ enum htc_endpoint_id epid, tepid;
+
+ svc_rspmsg = (struct htc_conn_svc_rspmsg *)
+ ((void *) htc_hdr + sizeof(struct htc_frame_hdr));
+
+ if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
+ epid = svc_rspmsg->endpoint_id;
+ service_id = be16_to_cpu(svc_rspmsg->service_id);
+ max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
+ endpoint = &target->endpoint[epid];
+
+ for (tepid = ENDPOINT_MAX; tepid > ENDPOINT0; tepid--) {
+ tmp_endpoint = &target->endpoint[tepid];
+ if (tmp_endpoint->service_id == service_id) {
+ tmp_endpoint->service_id = 0;
+ break;
+ }
+ }
+
+ if (!tmp_endpoint)
+ return;
+
+ endpoint->service_id = service_id;
+ endpoint->max_txqdepth = tmp_endpoint->max_txqdepth;
+ endpoint->ep_callbacks = tmp_endpoint->ep_callbacks;
+ endpoint->ul_pipeid = tmp_endpoint->ul_pipeid;
+ endpoint->dl_pipeid = tmp_endpoint->dl_pipeid;
+ endpoint->max_msglen = max_msglen;
+ target->conn_rsp_epid = epid;
+ complete(&target->cmd_wait);
+ } else {
+ target->conn_rsp_epid = ENDPOINT_UNUSED;
+ }
+}
+
+static int htc_config_pipe_credits(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_config_pipe_msg *cp_msg;
+ int ret, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ cp_msg = (struct htc_config_pipe_msg *)
+ skb_put(skb, sizeof(struct htc_config_pipe_msg));
+
+ cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
+ cp_msg->pipe_id = USB_WLAN_TX_PIPE;
+ cp_msg->credits = 28;
+
+ target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC credit config timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int htc_setup_complete(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_comp_msg *comp_msg;
+ int ret = 0, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ comp_msg = (struct htc_comp_msg *)
+ skb_put(skb, sizeof(struct htc_comp_msg));
+ comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID);
+
+ target->htc_flags |= HTC_OP_START_WAIT;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC start timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+/* HTC APIs */
+
+int htc_init(struct htc_target *target)
+{
+ int ret;
+
+ ret = htc_config_pipe_credits(target);
+ if (ret)
+ return ret;
+
+ return htc_setup_complete(target);
+}
+
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_epid)
+{
+ struct sk_buff *skb;
+ struct htc_endpoint *endpoint;
+ struct htc_conn_svc_msg *conn_msg;
+ int ret, time_left;
+
+ /* Find an available endpoint */
+ endpoint = get_next_avail_ep(target->endpoint);
+ if (!endpoint) {
+ dev_err(target->dev, "Endpoint is not available for"
+ "service %d\n", service_connreq->service_id);
+ return -EINVAL;
+ }
+
+ endpoint->service_id = service_connreq->service_id;
+ endpoint->max_txqdepth = service_connreq->max_send_qdepth;
+ endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id);
+ endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id);
+ endpoint->ep_callbacks = service_connreq->ep_callbacks;
+
+ skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
+ sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "Failed to allocate buf to send"
+ "service connect req\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ conn_msg = (struct htc_conn_svc_msg *)
+ skb_put(skb, sizeof(struct htc_conn_svc_msg));
+ conn_msg->service_id = cpu_to_be16(service_connreq->service_id);
+ conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID);
+ conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags);
+ conn_msg->dl_pipeid = endpoint->dl_pipeid;
+ conn_msg->ul_pipeid = endpoint->ul_pipeid;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "Service connection timeout for: %d\n",
+ service_connreq->service_id);
+ return -ETIMEDOUT;
+ }
+
+ *conn_rsp_epid = target->conn_rsp_epid;
+ return 0;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+int htc_send(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id epid, struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ return htc_issue_send(target, skb, skb->len, 0, epid, tx_ctl);
+}
+
+void htc_stop(struct htc_target *target)
+{
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+
+ for (epid = ENDPOINT0; epid <= ENDPOINT_MAX; epid++) {
+ endpoint = &target->endpoint[epid];
+ if (endpoint->service_id != 0)
+ target->hif->stop(target->hif_dev, endpoint->ul_pipeid);
+ }
+}
+
+void htc_start(struct htc_target *target)
+{
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+
+ for (epid = ENDPOINT0; epid <= ENDPOINT_MAX; epid++) {
+ endpoint = &target->endpoint[epid];
+ if (endpoint->service_id != 0)
+ target->hif->start(target->hif_dev,
+ endpoint->ul_pipeid);
+ }
+}
+
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_frame_hdr *htc_hdr = NULL;
+
+ if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS;
+ goto ret;
+ }
+
+ if (htc_handle->htc_flags & HTC_OP_START_WAIT) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_START_WAIT;
+ goto ret;
+ }
+
+ if (skb) {
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ if (endpoint->ep_callbacks.tx) {
+ endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
+ skb, htc_hdr->endpoint_id,
+ txok);
+ }
+ }
+
+ return;
+ret:
+ /* HTC-generated packets are freed here. */
+ if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+}
+
+/*
+ * HTC Messages are handled directly here and the obtained SKB
+ * is freed.
+ *
+ * Sevice messages (Data, WMI) passed to the corresponding
+ * endpoint RX handlers, which have to free the SKB.
+ */
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id)
+{
+ struct htc_frame_hdr *htc_hdr;
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+ __be16 *msg_id;
+
+ if (!htc_handle || !skb)
+ return;
+
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ epid = htc_hdr->endpoint_id;
+
+ if (epid >= ENDPOINT_MAX) {
+ if (pipe_id != USB_REG_IN_PIPE)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+ return;
+ }
+
+ if (epid == ENDPOINT0) {
+
+ /* Handle trailer */
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ /* Move past the Watchdog pattern */
+ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ }
+
+ /* Get the message ID */
+ msg_id = (__be16 *) ((void *) htc_hdr +
+ sizeof(struct htc_frame_hdr));
+
+ /* Now process HTC messages */
+ switch (be16_to_cpu(*msg_id)) {
+ case HTC_MSG_READY_ID:
+ htc_process_target_rdy(htc_handle, htc_hdr);
+ break;
+ case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ htc_process_conn_rsp(htc_handle, htc_hdr);
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+
+ } else {
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)
+ skb_trim(skb, len - htc_hdr->control[0]);
+
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ endpoint = &htc_handle->endpoint[epid];
+ if (endpoint->ep_callbacks.rx)
+ endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
+ skb, epid);
+ }
+}
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle)
+{
+ struct htc_target *target;
+
+ target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
+ if (!target)
+ printk(KERN_ERR "Unable to allocate memory for"
+ "target device\n");
+
+ return target;
+}
+
+void ath9k_htc_hw_free(struct htc_target *htc)
+{
+ kfree(htc);
+}
+
+int ath9k_htc_hw_init(struct ath9k_htc_hif *hif, struct htc_target *target,
+ void *hif_handle, struct device *dev, u16 devid,
+ enum ath9k_hif_transports transport)
+{
+ struct htc_endpoint *endpoint;
+ int err = 0;
+
+ init_completion(&target->target_wait);
+ init_completion(&target->cmd_wait);
+
+ target->hif = hif;
+ target->hif_dev = hif_handle;
+ target->dev = dev;
+
+ /* Assign control endpoint pipe IDs */
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->ul_pipeid = hif->control_ul_pipe;
+ endpoint->dl_pipeid = hif->control_dl_pipe;
+
+ err = ath9k_htc_probe_device(target, dev, devid);
+ if (err) {
+ printk(KERN_ERR "Failed to initialize the device\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug)
+{
+ if (target)
+ ath9k_htc_disconnect_device(target, hot_unplug);
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
new file mode 100644
index 000000000000..ea50ab032d20
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_HST_H
+#define HTC_HST_H
+
+struct ath9k_htc_priv;
+struct htc_target;
+struct ath9k_htc_tx_ctl;
+
+enum ath9k_hif_transports {
+ ATH9K_HIF_USB,
+};
+
+struct ath9k_htc_hif {
+ struct list_head list;
+ const enum ath9k_hif_transports transport;
+ const char *name;
+
+ u8 control_dl_pipe;
+ u8 control_ul_pipe;
+
+ void (*start) (void *hif_handle, u8 pipe);
+ void (*stop) (void *hif_handle, u8 pipe);
+ int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf,
+ struct ath9k_htc_tx_ctl *tx_ctl);
+};
+
+enum htc_endpoint_id {
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT0 = 0,
+ ENDPOINT1 = 1,
+ ENDPOINT2 = 2,
+ ENDPOINT3 = 3,
+ ENDPOINT4 = 4,
+ ENDPOINT5 = 5,
+ ENDPOINT6 = 6,
+ ENDPOINT7 = 7,
+ ENDPOINT8 = 8,
+ ENDPOINT_MAX = 22
+};
+
+/* Htc frame hdr flags */
+#define HTC_FLAGS_RECV_TRAILER (1 << 1)
+
+struct htc_frame_hdr {
+ u8 endpoint_id;
+ u8 flags;
+ __be16 payload_len;
+ u8 control[4];
+} __packed;
+
+struct htc_ready_msg {
+ __be16 message_id;
+ __be16 credits;
+ __be16 credit_size;
+ u8 max_endpoints;
+ u8 pad;
+} __packed;
+
+struct htc_config_pipe_msg {
+ __be16 message_id;
+ u8 pipe_id;
+ u8 credits;
+} __packed;
+
+struct htc_packet {
+ void *pktcontext;
+ u8 *buf;
+ u8 *buf_payload;
+ u32 buflen;
+ u32 payload_len;
+
+ int endpoint;
+ int status;
+
+ void *context;
+ u32 reserved;
+};
+
+struct htc_ep_callbacks {
+ void *priv;
+ void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
+ void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id);
+};
+
+#define HTC_TX_QUEUE_SIZE 256
+
+struct htc_txq {
+ struct sk_buff *buf[HTC_TX_QUEUE_SIZE];
+ u32 txqdepth;
+ u16 txbuf_cnt;
+ u16 txq_head;
+ u16 txq_tail;
+};
+
+struct htc_endpoint {
+ u16 service_id;
+
+ struct htc_ep_callbacks ep_callbacks;
+ struct htc_txq htc_txq;
+ u32 max_txqdepth;
+ int max_msglen;
+
+ u8 ul_pipeid;
+ u8 dl_pipeid;
+};
+
+#define HTC_MAX_CONTROL_MESSAGE_LENGTH 255
+#define HTC_CONTROL_BUFFER_SIZE \
+ (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
+
+#define NUM_CONTROL_BUFFERS 8
+#define HST_ENDPOINT_MAX 8
+
+struct htc_control_buf {
+ struct htc_packet htc_pkt;
+ u8 buf[HTC_CONTROL_BUFFER_SIZE];
+};
+
+#define HTC_OP_START_WAIT BIT(0)
+#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
+
+struct htc_target {
+ void *hif_dev;
+ struct ath9k_htc_priv *drv_priv;
+ struct device *dev;
+ struct ath9k_htc_hif *hif;
+ struct htc_endpoint endpoint[HST_ENDPOINT_MAX];
+ struct completion target_wait;
+ struct completion cmd_wait;
+ struct list_head list;
+ enum htc_endpoint_id conn_rsp_epid;
+ u16 credits;
+ u16 credit_size;
+ u8 htc_flags;
+};
+
+enum htc_msg_id {
+ HTC_MSG_READY_ID = 1,
+ HTC_MSG_CONNECT_SERVICE_ID,
+ HTC_MSG_CONNECT_SERVICE_RESPONSE_ID,
+ HTC_MSG_SETUP_COMPLETE_ID,
+ HTC_MSG_CONFIG_PIPE_ID,
+ HTC_MSG_CONFIG_PIPE_RESPONSE_ID,
+};
+
+struct htc_service_connreq {
+ u16 service_id;
+ u16 con_flags;
+ u32 max_send_qdepth;
+ struct htc_ep_callbacks ep_callbacks;
+};
+
+/* Current service IDs */
+
+enum htc_service_group_ids{
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_SERVICE_GROUP_LAST = 255
+};
+
+#define MAKE_SERVICE_ID(group, index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
+#define HTC_LOOPBACK_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 2)
+
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
+#define WMI_BEACON_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
+#define WMI_CAB_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
+#define WMI_UAPSD_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
+#define WMI_MGMT_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 5)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 6)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 7)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8)
+
+struct htc_conn_svc_msg {
+ __be16 msg_id;
+ __be16 service_id;
+ __be16 con_flags;
+ u8 dl_pipeid;
+ u8 ul_pipeid;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0
+#define HTC_SERVICE_NOT_FOUND 1
+#define HTC_SERVICE_FAILED 2
+#define HTC_SERVICE_NO_RESOURCES 3
+#define HTC_SERVICE_NO_MORE_EP 4
+
+struct htc_conn_svc_rspmsg {
+ __be16 msg_id;
+ __be16 service_id;
+ u8 status;
+ u8 endpoint_id;
+ __be16 max_msg_len;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+struct htc_comp_msg {
+ __be16 msg_id;
+} __packed;
+
+int htc_init(struct htc_target *target);
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_eid);
+int htc_send(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id eid, struct ath9k_htc_tx_ctl *tx_ctl);
+void htc_stop(struct htc_target *target);
+void htc_start(struct htc_target *target);
+
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id);
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok);
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle);
+void ath9k_htc_hw_free(struct htc_target *htc);
+int ath9k_htc_hw_init(struct ath9k_htc_hif *hif, struct htc_target *target,
+ void *hif_handle, struct device *dev, u16 devid,
+ enum ath9k_hif_transports transport);
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
+
+#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
new file mode 100644
index 000000000000..624422a8169e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_HW_OPS_H
+#define ATH9K_HW_OPS_H
+
+#include "hw.h"
+
+/* Hardware core and driver accessible callbacks */
+
+static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ ath9k_hw_ops(ah)->config_pci_powersave(ah, restore, power_off);
+}
+
+static inline void ath9k_hw_rxena(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->rx_enable(ah);
+}
+
+static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
+ u32 link)
+{
+ ath9k_hw_ops(ah)->set_desc_link(ds, link);
+}
+
+static inline void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds,
+ u32 **link)
+{
+ ath9k_hw_ops(ah)->get_desc_link(ds, link);
+}
+static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
+}
+
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ return ath9k_hw_ops(ah)->get_isr(ah, masked);
+}
+
+static inline void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ ath9k_hw_ops(ah)->fill_txdesc(ah, ds, seglen, is_firstseg, is_lastseg,
+ ds0, buf_addr, qcu);
+}
+
+static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts);
+}
+
+static inline void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType,
+ u32 flags)
+{
+ ath9k_hw_ops(ah)->set11n_txdesc(ah, ds, pktLen, type, txPower, keyIx,
+ keyType, flags);
+}
+
+static inline void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ ath9k_hw_ops(ah)->set11n_ratescenario(ah, ds, lastds, durUpdateEn,
+ rtsctsRate, rtsctsDuration, series,
+ nseries, flags);
+}
+
+static inline void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_first(ah, ds, aggrLen);
+}
+
+static inline void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_middle(ah, ds, numDelims);
+}
+
+static inline void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_last(ah, ds);
+}
+
+static inline void ath9k_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ ath9k_hw_ops(ah)->clr11n_aggr(ah, ds);
+}
+
+static inline void ath9k_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ ath9k_hw_ops(ah)->set11n_burstduration(ah, ds, burstDuration);
+}
+
+static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
+}
+
+/* Private hardware call ops */
+
+/* PHY ops */
+
+static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->rf_set_freq(ah, chan);
+}
+
+static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
+}
+
+static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks)
+ return 0;
+
+ return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah);
+}
+
+static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks)
+ return;
+
+ ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah);
+}
+
+static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ if (!ath9k_hw_private_ops(ah)->set_rf_regs)
+ return true;
+
+ return ath9k_hw_private_ops(ah)->set_rf_regs(ah, chan, modesIndex);
+}
+
+static inline void ath9k_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_bb(ah, chan);
+}
+
+static inline void ath9k_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_channel_regs(ah, chan);
+}
+
+static inline int ath9k_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->process_ini(ah, chan);
+}
+
+static inline void ath9k_olc_init(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->olc_init)
+ return;
+
+ return ath9k_hw_private_ops(ah)->olc_init(ah);
+}
+
+static inline void ath9k_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_rfmode(ah, chan);
+}
+
+static inline void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->mark_phy_inactive(ah);
+}
+
+static inline void ath9k_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_delta_slope(ah, chan);
+}
+
+static inline bool ath9k_hw_rfbus_req(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_req(ah);
+}
+
+static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_done(ah);
+}
+
+static inline void ath9k_enable_rfkill(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->enable_rfkill(ah);
+}
+
+static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->restore_chainmask)
+ return;
+
+ return ath9k_hw_private_ops(ah)->restore_chainmask(ah);
+}
+
+static inline void ath9k_hw_set_diversity(struct ath_hw *ah, bool value)
+{
+ return ath9k_hw_private_ops(ah)->set_diversity(ah, value);
+}
+
+static inline bool ath9k_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ return ath9k_hw_private_ops(ah)->ani_control(ah, cmd, param);
+}
+
+static inline void ath9k_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray);
+}
+
+static inline void ath9k_hw_loadnf(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->loadnf(ah, chan);
+}
+
+static inline bool ath9k_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_cal(ah, chan);
+}
+
+static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
+}
+
+static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
+}
+
+#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 78b571129c92..c33f17dbe6f1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +19,16 @@
#include <asm/unaligned.h>
#include "hw.h"
+#include "hw-ops.h"
#include "rc.h"
-#include "initvals.h"
+#include "ar9003_mac.h"
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
+#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
-static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
@@ -49,6 +47,39 @@ static void __exit ath9k_exit(void)
}
module_exit(ath9k_exit);
+/* Private hardware callbacks */
+
+static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_cal_settings(ah);
+}
+
+static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_mode_regs(ah);
+}
+
+static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ return priv_ops->macversion_supported(ah->hw_version.macVersion);
+}
+
+static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
+}
+
+static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
+}
+
/********************/
/* Helper Functions */
/********************/
@@ -61,7 +92,11 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
return usecs *ATH9K_CLOCK_RATE_CCK;
if (conf->channel->band == IEEE80211_BAND_2GHZ)
return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM;
- return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
+ return usecs * ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
+ else
+ return usecs * ATH9K_CLOCK_RATE_5GHZ_OFDM;
}
static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
@@ -236,21 +271,6 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
}
}
-static int ath9k_hw_get_radiorev(struct ath_hw *ah)
-{
- u32 val;
- int i;
-
- REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
-
- for (i = 0; i < 8; i++)
- REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
- val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
- val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
-
- return ath9k_hw_reverse_bits(val, 8);
-}
-
/************************************/
/* HW Attach, Detach, Init Routines */
/************************************/
@@ -260,6 +280,8 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
if (AR_SREV_9100(ah))
return;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
@@ -271,20 +293,30 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
+/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
+ u32 regAddr[2] = { AR_STA_ID0 };
u32 regHold[2];
u32 patternData[4] = { 0x55555555,
0xaaaaaaaa,
0x66666666,
0x99999999 };
- int i, j;
+ int i, j, loop_max;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ loop_max = 2;
+ regAddr[1] = AR_PHY_BASE + (8 << 2);
+ } else
+ loop_max = 1;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < loop_max; i++) {
u32 addr = regAddr[i];
u32 wrData, rdData;
@@ -339,7 +371,13 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
ah->config.cck_trig_low = 100;
- ah->config.enable_ani = 1;
+
+ /*
+ * For now ANI is disabled for AR9003, it is still
+ * being tested.
+ */
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->config.enable_ani = 1;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -354,6 +392,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.rx_intr_mitigation = true;
/*
+ * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
+ * used by AR9003, but it is showing reliability issues.
+ * It will take a while to fix so this is currently disabled.
+ */
+
+ /*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
* _and_ if on non-uniprocessor systems (Multiprocessor/HT).
* This means we use it for all AR5416 devices, and the few
@@ -372,7 +416,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
if (num_possible_cpus() > 1)
ah->config.serialize_regmode = SER_REG_MODE_AUTO;
}
-EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_defaults(struct ath_hw *ah)
{
@@ -386,8 +429,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->hw_version.subvendorid = 0;
ah->ah_flags = 0;
- if (ah->hw_version.devid == AR5416_AR9100_DEVID)
- ah->hw_version.macVersion = AR_SREV_VERSION_9100;
if (!AR_SREV_9100(ah))
ah->ah_flags = AH_USE_EEPROM;
@@ -400,44 +441,17 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
}
-static int ath9k_hw_rf_claim(struct ath_hw *ah)
-{
- u32 val;
-
- REG_WRITE(ah, AR_PHY(0), 0x00000007);
-
- val = ath9k_hw_get_radiorev(ah);
- switch (val & AR_RADIO_SREV_MAJOR) {
- case 0:
- val = AR_RAD5133_SREV_MAJOR;
- break;
- case AR_RAD5133_SREV_MAJOR:
- case AR_RAD5122_SREV_MAJOR:
- case AR_RAD2133_SREV_MAJOR:
- case AR_RAD2122_SREV_MAJOR:
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Radio Chip Rev 0x%02X not supported\n",
- val & AR_RADIO_SREV_MAJOR);
- return -EOPNOTSUPP;
- }
-
- ah->hw_version.analog5GhzRev = val;
-
- return 0;
-}
-
static int ath9k_hw_init_macaddr(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sum;
int i;
u16 eeval;
+ u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
sum = 0;
for (i = 0; i < 3; i++) {
- eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
+ eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
sum += eeval;
common->macaddr[2 * i] = eeval >> 8;
common->macaddr[2 * i + 1] = eeval & 0xff;
@@ -448,64 +462,20 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
return 0;
}
-static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah)
-{
- u32 rxgain_type;
-
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
- rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
-
- if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_13db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
- else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_23db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
- else
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
- }
-}
-
-static void ath9k_hw_init_txgain_ini(struct ath_hw *ah)
-{
- u32 txgain_type;
-
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
- txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
- if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_high_power_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
- }
-}
-
static int ath9k_hw_post_init(struct ath_hw *ah)
{
int ecode;
- if (!ath9k_hw_chip_test(ah))
- return -ENODEV;
+ if (!AR_SREV_9271(ah)) {
+ if (!ath9k_hw_chip_test(ah))
+ return -ENODEV;
+ }
- ecode = ath9k_hw_rf_claim(ah);
- if (ecode != 0)
- return ecode;
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ecode = ar9002_hw_rf_claim(ah);
+ if (ecode != 0)
+ return ecode;
+ }
ecode = ath9k_hw_eeprom_init(ah);
if (ecode != 0)
@@ -516,14 +486,12 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
- if (!AR_SREV_9280_10_OR_LATER(ah)) {
- ecode = ath9k_hw_rf_alloc_ext_banks(ah);
- if (ecode) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Failed allocating banks for "
- "external radio\n");
- return ecode;
- }
+ ecode = ath9k_hw_rf_alloc_ext_banks(ah);
+ if (ecode) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Failed allocating banks for "
+ "external radio\n");
+ return ecode;
}
if (!AR_SREV_9100(ah)) {
@@ -534,321 +502,22 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
return 0;
}
-static bool ath9k_hw_devid_supported(u16 devid)
+static void ath9k_hw_attach_ops(struct ath_hw *ah)
{
- switch (devid) {
- case AR5416_DEVID_PCI:
- case AR5416_DEVID_PCIE:
- case AR5416_AR9100_DEVID:
- case AR9160_DEVID_PCI:
- case AR9280_DEVID_PCI:
- case AR9280_DEVID_PCIE:
- case AR9285_DEVID_PCIE:
- case AR5416_DEVID_AR9287_PCI:
- case AR5416_DEVID_AR9287_PCIE:
- case AR9271_USB:
- case AR2427_DEVID_PCIE:
- return true;
- default:
- break;
- }
- return false;
-}
-
-static bool ath9k_hw_macversion_supported(u32 macversion)
-{
- switch (macversion) {
- case AR_SREV_VERSION_5416_PCI:
- case AR_SREV_VERSION_5416_PCIE:
- case AR_SREV_VERSION_9160:
- case AR_SREV_VERSION_9100:
- case AR_SREV_VERSION_9280:
- case AR_SREV_VERSION_9285:
- case AR_SREV_VERSION_9287:
- case AR_SREV_VERSION_9271:
- return true;
- default:
- break;
- }
- return false;
-}
-
-static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
-{
- if (AR_SREV_9160_10_OR_LATER(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->adcgain_caldata.calData =
- &adc_gain_cal_single_sample;
- ah->adcdc_caldata.calData =
- &adc_dc_cal_single_sample;
- ah->adcdc_calinitdata.calData =
- &adc_init_dc_cal;
- } else {
- ah->iq_caldata.calData = &iq_cal_multi_sample;
- ah->adcgain_caldata.calData =
- &adc_gain_cal_multi_sample;
- ah->adcdc_caldata.calData =
- &adc_dc_cal_multi_sample;
- ah->adcdc_calinitdata.calData =
- &adc_init_dc_cal;
- }
- ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
- }
-}
-
-static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
-{
- if (AR_SREV_9271(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
- ARRAY_SIZE(ar9271Modes_9271), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
- ARRAY_SIZE(ar9271Common_9271), 2);
- INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
- ar9271Modes_9271_1_0_only,
- ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
- return;
- }
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
- ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
- ARRAY_SIZE(ar9287Common_9287_1_1), 2);
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
- 2);
- } else if (AR_SREV_9287_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
- ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
- ARRAY_SIZE(ar9287Common_9287_1_0), 2);
-
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_0,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
- 2);
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
-
-
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
- ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
- ARRAY_SIZE(ar9285Common_9285_1_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
- 2);
- }
- } else if (AR_SREV_9285_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
- ARRAY_SIZE(ar9285Modes_9285), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
- ARRAY_SIZE(ar9285Common_9285), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
- }
- } else if (AR_SREV_9280_20_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
- ARRAY_SIZE(ar9280Modes_9280_2), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
- ARRAY_SIZE(ar9280Common_9280_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
- }
- INIT_INI_ARRAY(&ah->iniModesAdditional,
- ar9280Modes_fast_clock_9280_2,
- ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
- } else if (AR_SREV_9280_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
- ARRAY_SIZE(ar9280Modes_9280), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
- ARRAY_SIZE(ar9280Common_9280), 2);
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
- ARRAY_SIZE(ar5416Modes_9160), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
- ARRAY_SIZE(ar5416Common_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
- ARRAY_SIZE(ar5416Bank0_9160), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
- ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
- ARRAY_SIZE(ar5416Bank1_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
- ARRAY_SIZE(ar5416Bank2_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
- ARRAY_SIZE(ar5416Bank3_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
- ARRAY_SIZE(ar5416Bank6_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
- ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
- ARRAY_SIZE(ar5416Bank7_9160), 2);
- if (AR_SREV_9160_11(ah)) {
- INIT_INI_ARRAY(&ah->iniAddac,
- ar5416Addac_91601_1,
- ARRAY_SIZE(ar5416Addac_91601_1), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
- ARRAY_SIZE(ar5416Addac_9160), 2);
- }
- } else if (AR_SREV_9100_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
- ARRAY_SIZE(ar5416Modes_9100), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
- ARRAY_SIZE(ar5416Common_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
- ARRAY_SIZE(ar5416Bank0_9100), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
- ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
- ARRAY_SIZE(ar5416Bank1_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
- ARRAY_SIZE(ar5416Bank2_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
- ARRAY_SIZE(ar5416Bank3_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
- ARRAY_SIZE(ar5416Bank6_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
- ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
- ARRAY_SIZE(ar5416Bank7_9100), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
- ARRAY_SIZE(ar5416Addac_9100), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
- ARRAY_SIZE(ar5416Modes), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
- ARRAY_SIZE(ar5416Common), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
- ARRAY_SIZE(ar5416Bank0), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
- ARRAY_SIZE(ar5416BB_RfGain), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
- ARRAY_SIZE(ar5416Bank1), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
- ARRAY_SIZE(ar5416Bank2), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
- ARRAY_SIZE(ar5416Bank3), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
- ARRAY_SIZE(ar5416Bank6), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
- ARRAY_SIZE(ar5416Bank6TPC), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
- ARRAY_SIZE(ar5416Bank7), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
- ARRAY_SIZE(ar5416Addac), 2);
- }
-}
-
-static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
-{
- if (AR_SREV_9287_11_OR_LATER(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
- else if (AR_SREV_9287_10(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_0,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
- else if (AR_SREV_9280_20(ah))
- ath9k_hw_init_rxgain_ini(ah);
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
- } else if (AR_SREV_9287_10(ah)) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_0,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
- } else if (AR_SREV_9280_20(ah)) {
- ath9k_hw_init_txgain_ini(ah);
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
- u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
- /* txgain table */
- if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_high_power_tx_gain_9285_1_2,
- ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_original_tx_gain_9285_1_2,
- ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6);
- }
-
- }
-}
-
-static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
-{
- u32 i, j;
-
- if (ah->hw_version.devid == AR9280_DEVID_PCI) {
-
- /* EEPROM Fixup */
- for (i = 0; i < ah->iniModes.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniModes, i, 0);
-
- for (j = 1; j < ah->iniModes.ia_columns; j++) {
- u32 val = INI_RA(&ah->iniModes, i, j);
-
- INI_RA(&ah->iniModes, i, j) =
- ath9k_hw_ini_fixup(ah,
- &ah->eeprom.def,
- reg, val);
- }
- }
- }
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_attach_ops(ah);
+ else
+ ar9002_hw_attach_ops(ah);
}
-int ath9k_hw_init(struct ath_hw *ah)
+/* Called for all hardware families */
+static int __ath9k_hw_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;
- if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unsupported device ID: 0x%0x\n",
- ah->hw_version.devid);
- return -EOPNOTSUPP;
- }
-
- ath9k_hw_init_defaults(ah);
- ath9k_hw_init_config(ah);
+ if (ah->hw_version.devid == AR5416_AR9100_DEVID)
+ ah->hw_version.macVersion = AR_SREV_VERSION_9100;
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_print(common, ATH_DBG_FATAL,
@@ -856,6 +525,11 @@ int ath9k_hw_init(struct ath_hw *ah)
return -EIO;
}
+ ath9k_hw_init_defaults(ah);
+ ath9k_hw_init_config(ah);
+
+ ath9k_hw_attach_ops(ah);
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
return -EIO;
@@ -880,7 +554,7 @@ int ath9k_hw_init(struct ath_hw *ah)
else
ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
- if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) {
+ if (!ath9k_hw_macversion_supported(ah)) {
ath_print(common, ATH_DBG_FATAL,
"Mac Chip Rev 0x%02x.%x is not supported by "
"this driver\n", ah->hw_version.macVersion,
@@ -888,45 +562,45 @@ int ath9k_hw_init(struct ath_hw *ah)
return -EOPNOTSUPP;
}
- if (AR_SREV_9100(ah)) {
- ah->iq_caldata.calData = &iq_cal_multi_sample;
- ah->supp_cals = IQ_MISMATCH_CAL;
- ah->is_pciexpress = false;
- }
-
- if (AR_SREV_9271(ah))
+ if (AR_SREV_9271(ah) || AR_SREV_9100(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
-
ath9k_hw_init_cal_settings(ah);
ah->ani_function = ATH9K_ANI_ALL;
- if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
- ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
- ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
- } else {
- ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
- ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
- }
ath9k_hw_init_mode_regs(ah);
+ /*
+ * Configire PCIE after Ini init. SERDES values now come from ini file
+ * This enables PCIe low power mode.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ u32 regval;
+ unsigned int i;
+
+ /* Set Bits 16 and 17 in the AR_WA register. */
+ regval = REG_READ(ah, AR_WA);
+ regval |= 0x00030000;
+ REG_WRITE(ah, AR_WA, regval);
+
+ for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) {
+ REG_WRITE(ah,
+ INI_RA(&ah->iniPcieSerdesLowPower, i, 0),
+ INI_RA(&ah->iniPcieSerdesLowPower, i, 1));
+ }
+ }
+
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, 0, 0);
else
ath9k_hw_disablepcie(ah);
- /* Support for Japan ch.14 (2484) spread */
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniCckfirNormal,
- ar9287Common_normal_cck_fir_coeff_92871_1,
- ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
- INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- ar9287Common_japan_2484_cck_fir_coeff_92871_1,
- ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
- }
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ar9002_hw_cck_chan14_spread(ah);
r = ath9k_hw_post_init(ah);
if (r)
@@ -937,8 +611,6 @@ int ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- ath9k_hw_init_eeprom_fix(ah);
-
r = ath9k_hw_init_macaddr(ah);
if (r) {
ath_print(common, ATH_DBG_FATAL,
@@ -951,6 +623,9 @@ int ath9k_hw_init(struct ath_hw *ah)
else
ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_set_nf_limits(ah);
+
ath9k_init_nfcal_hist_buffer(ah);
common->state = ATH_HW_INITIALIZED;
@@ -958,24 +633,50 @@ int ath9k_hw_init(struct ath_hw *ah)
return 0;
}
-static void ath9k_hw_init_bb(struct ath_hw *ah,
- struct ath9k_channel *chan)
+int ath9k_hw_init(struct ath_hw *ah)
{
- u32 synthDelay;
+ int ret;
+ struct ath_common *common = ath9k_hw_common(ah);
- synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
- if (IS_CHAN_B(chan))
- synthDelay = (4 * synthDelay) / 22;
- else
- synthDelay /= 10;
+ /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
+ switch (ah->hw_version.devid) {
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
+ case AR5416_AR9100_DEVID:
+ case AR9160_DEVID_PCI:
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ case AR9285_DEVID_PCIE:
+ case AR9287_DEVID_PCI:
+ case AR9287_DEVID_PCIE:
+ case AR2427_DEVID_PCIE:
+ case AR9300_DEVID_PCIE:
+ break;
+ default:
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ break;
+ ath_print(common, ATH_DBG_FATAL,
+ "Hardware device ID 0x%04x not supported\n",
+ ah->hw_version.devid);
+ return -EOPNOTSUPP;
+ }
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ ret = __ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ return ret;
+ }
- udelay(synthDelay + BASE_ACTIVATE_DELAY);
+ return 0;
}
+EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_qos(struct ath_hw *ah)
{
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
@@ -989,105 +690,22 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
-}
-
-static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
-{
- u32 lcr;
- u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
-
- lcr = REG_READ(ah , 0x5100c);
- lcr |= 0x80;
- REG_WRITE(ah, 0x5100c, lcr);
- REG_WRITE(ah, 0x51004, (baud_divider >> 8));
- REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
-
- lcr &= ~0x80;
- REG_WRITE(ah, 0x5100c, lcr);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
static void ath9k_hw_init_pll(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- u32 pll;
-
- if (AR_SREV_9100(ah)) {
- if (chan && IS_CHAN_5GHZ(chan))
- pll = 0x1450;
- else
- pll = 0x1458;
- } else {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
-
- if (chan && IS_CHAN_5GHZ(chan)) {
- pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
-
-
- if (AR_SREV_9280_20(ah)) {
- if (((chan->channel % 20) == 0)
- || ((chan->channel % 10) == 0))
- pll = 0x2850;
- else
- pll = 0x142c;
- }
- } else {
- pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
- }
-
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
-
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
-
- if (chan && IS_CHAN_5GHZ(chan))
- pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
- else
- pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
- } else {
- pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+ u32 pll = ath9k_hw_compute_pll_control(ah, chan);
- if (chan && IS_CHAN_5GHZ(chan))
- pll |= SM(0xa, AR_RTC_PLL_DIV);
- else
- pll |= SM(0xb, AR_RTC_PLL_DIV);
- }
- }
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
/* Switch the core clock for ar9271 to 117Mhz */
if (AR_SREV_9271(ah)) {
- if ((pll == 0x142c) || (pll == 0x2850) ) {
- udelay(500);
- /* set CLKOBS to output AHB clock */
- REG_WRITE(ah, 0x7020, 0xe);
- /*
- * 0x304: 117Mhz, ahb_ratio: 1x1
- * 0x306: 40Mhz, ahb_ratio: 1x1
- */
- REG_WRITE(ah, 0x50040, 0x304);
- /*
- * makes adjustments for the baud dividor to keep the
- * targetted baud rate based on the used core clock.
- */
- ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
- AR9271_TARGET_BAUD_RATE);
- }
+ udelay(500);
+ REG_WRITE(ah, 0x50040, 0x304);
}
udelay(RTC_PLL_SETTLE_DELAY);
@@ -1095,70 +713,58 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
}
-static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
-{
- int rx_chainmask, tx_chainmask;
-
- rx_chainmask = ah->rxchainmask;
- tx_chainmask = ah->txchainmask;
-
- switch (rx_chainmask) {
- case 0x5:
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- case 0x3:
- if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
- break;
- }
- case 0x1:
- case 0x2:
- case 0x7:
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
- break;
- default:
- break;
- }
-
- REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
- if (tx_chainmask == 0x5) {
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- }
- if (AR_SREV_9100(ah))
- REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
- REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
-}
-
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
enum nl80211_iftype opmode)
{
- ah->mask_reg = AR_IMR_TXERR |
+ u32 imr_reg = AR_IMR_TXERR |
AR_IMR_TXURN |
AR_IMR_RXERR |
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->config.rx_intr_mitigation)
- ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
- else
- ah->mask_reg |= AR_IMR_RXOK;
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ imr_reg |= AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK_LP;
- ah->mask_reg |= AR_IMR_TXOK;
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK;
+ }
+
+ if (ah->config.tx_intr_mitigation)
+ imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
+ else
+ imr_reg |= AR_IMR_TXOK;
if (opmode == NL80211_IFTYPE_AP)
- ah->mask_reg |= AR_IMR_MIB;
+ imr_reg |= AR_IMR_MIB;
+
+ ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_IMR, ah->mask_reg);
- REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
+ REG_WRITE(ah, AR_IMR, imr_reg);
+ ah->imrs2_reg |= AR_IMR_S2_GTT;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
if (!AR_SREV_9100(ah)) {
REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
+ }
}
static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
@@ -1241,19 +847,13 @@ void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- if (common->state <= ATH_HW_INITIALIZED)
+ if (common->state < ATH_HW_INITIALIZED)
goto free_hw;
- if (!AR_SREV_9100(ah))
- ath9k_hw_ani_disable(ah);
-
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
free_hw:
- if (!AR_SREV_9280_10_OR_LATER(ah))
- ath9k_hw_rf_free_ext_banks(ah);
- kfree(ah);
- ah = NULL;
+ ath9k_hw_rf_free_ext_banks(ah);
}
EXPORT_SYMBOL(ath9k_hw_deinit);
@@ -1261,136 +861,7 @@ EXPORT_SYMBOL(ath9k_hw_deinit);
/* INI */
/*******/
-static void ath9k_hw_override_ini(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- u32 val;
-
- if (AR_SREV_9271(ah)) {
- /*
- * Enable spectral scan to solution for issues with stuck
- * beacons on AR9271 1.0. The beacon stuck issue is not seeon on
- * AR9271 1.1
- */
- if (AR_SREV_9271_10(ah)) {
- val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
- AR_PHY_SPECTRAL_SCAN_ENABLE;
- REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
- }
- else if (AR_SREV_9271_11(ah))
- /*
- * change AR_PHY_RF_CTL3 setting to fix MAC issue
- * present on AR9271 1.1
- */
- REG_WRITE(ah, AR_PHY_RF_CTL3, 0x3a020001);
- return;
- }
-
- /*
- * Set the RX_ABORT and RX_DIS and clear if off only after
- * RXE is set for MAC. This prevents frames with corrupted
- * descriptor status.
- */
- REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- val = REG_READ(ah, AR_PCU_MISC_MODE2) &
- (~AR_PCU_MISC_MODE2_HWWAR1);
-
- if (AR_SREV_9287_10_OR_LATER(ah))
- val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
-
- REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
- }
-
- if (!AR_SREV_5416_20_OR_LATER(ah) ||
- AR_SREV_9280_10_OR_LATER(ah))
- return;
- /*
- * Disable BB clock gating
- * Necessary to avoid issues on AR5416 2.0
- */
- REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
-
- /*
- * Disable RIFS search on some chips to avoid baseband
- * hang issues.
- */
- if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
- val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
- val &= ~AR_PHY_RIFS_INIT_DELAY;
- REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
- }
-}
-
-static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value)
-{
- struct base_eep_header *pBase = &(pEepData->baseEepHeader);
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (ah->hw_version.devid) {
- case AR9280_DEVID_PCI:
- if (reg == 0x7894) {
- ath_print(common, ATH_DBG_EEPROM,
- "ini VAL: %x EEPROM: %x\n", value,
- (pBase->version & 0xff));
-
- if ((pBase->version & 0xff) > 0x0a) {
- ath_print(common, ATH_DBG_EEPROM,
- "PWDCLKIND: %d\n",
- pBase->pwdclkind);
- value &= ~AR_AN_TOP2_PWDCLKIND;
- value |= AR_AN_TOP2_PWDCLKIND &
- (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
- } else {
- ath_print(common, ATH_DBG_EEPROM,
- "PWDCLKIND Earlier Rev\n");
- }
-
- ath_print(common, ATH_DBG_EEPROM,
- "final ini VAL: %x\n", value);
- }
- break;
- }
-
- return value;
-}
-
-static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value)
-{
- if (ah->eep_map == EEP_MAP_4KBITS)
- return value;
- else
- return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
-}
-
-static void ath9k_olc_init(struct ath_hw *ah)
-{
- u32 i;
-
- if (OLC_FOR_AR9287_10_LATER) {
- REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
- AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
- AR9287_AN_TXPC0_TXPCMODE,
- AR9287_AN_TXPC0_TXPCMODE_S,
- AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
- udelay(100);
- } else {
- for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
- ah->originalGain[i] =
- MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
- AR_PHY_TX_GAIN);
- ah->PDADCdelta = 0;
- }
-}
-
-static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
- struct ath9k_channel *chan)
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
@@ -1404,173 +875,24 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
return ctl;
}
-static int ath9k_hw_process_ini(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
- int i, regWrites = 0;
- struct ieee80211_channel *channel = chan->chan;
- u32 modesIndex, freqIndex;
-
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- freqIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- freqIndex = 1;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- freqIndex = 2;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- freqIndex = 2;
- break;
-
- default:
- return -EINVAL;
- }
-
- REG_WRITE(ah, AR_PHY(0), 0x00000007);
- REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
- ah->eep_ops->set_addac(ah, chan);
-
- if (AR_SREV_5416_22_OR_LATER(ah)) {
- REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
- } else {
- struct ar5416IniArray temp;
- u32 addacSize =
- sizeof(u32) * ah->iniAddac.ia_rows *
- ah->iniAddac.ia_columns;
-
- memcpy(ah->addac5416_21,
- ah->iniAddac.ia_array, addacSize);
-
- (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
-
- temp.ia_array = ah->addac5416_21;
- temp.ia_columns = ah->iniAddac.ia_columns;
- temp.ia_rows = ah->iniAddac.ia_rows;
- REG_WRITE_ARRAY(&temp, 1, regWrites);
- }
-
- REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
-
- for (i = 0; i < ah->iniModes.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniModes, i, 0);
- u32 val = INI_RA(&ah->iniModes, i, modesIndex);
-
- REG_WRITE(ah, reg, val);
-
- if (reg >= 0x7800 && reg < 0x78a0
- && ah->config.analog_shiftreg) {
- udelay(100);
- }
-
- DO_DELAY(regWrites);
- }
-
- if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
- REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
-
- if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
- AR_SREV_9287_10_OR_LATER(ah))
- REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
-
- for (i = 0; i < ah->iniCommon.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniCommon, i, 0);
- u32 val = INI_RA(&ah->iniCommon, i, 1);
-
- REG_WRITE(ah, reg, val);
-
- if (reg >= 0x7800 && reg < 0x78a0
- && ah->config.analog_shiftreg) {
- udelay(100);
- }
-
- DO_DELAY(regWrites);
- }
-
- ath9k_hw_write_regs(ah, freqIndex, regWrites);
-
- if (AR_SREV_9271_10(ah))
- REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
- modesIndex, regWrites);
-
- if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
- REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
- regWrites);
- }
-
- ath9k_hw_override_ini(ah, chan);
- ath9k_hw_set_regs(ah, chan);
- ath9k_hw_init_chain_masks(ah);
-
- if (OLC_FOR_AR9280_20_LATER)
- ath9k_olc_init(ah);
-
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit));
-
- if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "ar5416SetRfRegs failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
/****************************************/
/* Reset and Channel Switching Routines */
/****************************************/
-static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u32 rfMode = 0;
-
- if (chan == NULL)
- return;
-
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
-
- if (!AR_SREV_9280_10_OR_LATER(ah))
- rfMode |= (IS_CHAN_5GHZ(chan)) ?
- AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
-
- if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
- rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
-
- REG_WRITE(ah, AR_PHY_MODE, rfMode);
-}
-
-static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
-{
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
-}
-
static inline void ath9k_hw_set_dma(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
u32 regval;
+ ENABLE_REGWRITE_BUFFER(ah);
+
/*
* set AHB_MODE not to do cacheline prefetches
*/
- regval = REG_READ(ah, AR_AHB_MODE);
- REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ regval = REG_READ(ah, AR_AHB_MODE);
+ REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+ }
/*
* let mac dma reads be in 128 byte chunks
@@ -1578,12 +900,18 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
/*
* Restore TX Trigger Level to its pre-reset value.
* The initial value depends on whether aggregation is enabled, and is
* adjusted whenever underruns are detected.
*/
- REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+
+ ENABLE_REGWRITE_BUFFER(ah);
/*
* let mac dma writes be in 128 byte chunks
@@ -1596,6 +924,14 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
*/
REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1);
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1);
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+ }
+
/*
* reduce the number of usable entries in PCU TXBUF to avoid
* wrap around issues.
@@ -1611,6 +947,12 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
AR_PCU_TXBUF_CTRL_USABLE_SIZE);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_reset_txstatus_ring(ah);
}
static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
@@ -1638,10 +980,8 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
}
}
-static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
- u32 coef_scaled,
- u32 *coef_mantissa,
- u32 *coef_exponent)
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent)
{
u32 coef_exp, coef_man;
@@ -1657,40 +997,6 @@ static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
*coef_exponent = coef_exp - 16;
}
-static void ath9k_hw_set_delta_slope(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- u32 coef_scaled, ds_coef_exp, ds_coef_man;
- u32 clockMhzScaled = 0x64000000;
- struct chan_centers centers;
-
- if (IS_CHAN_HALF_RATE(chan))
- clockMhzScaled = clockMhzScaled >> 1;
- else if (IS_CHAN_QUARTER_RATE(chan))
- clockMhzScaled = clockMhzScaled >> 2;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- coef_scaled = clockMhzScaled / centers.synth_center;
-
- ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
- &ds_coef_exp);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING3,
- AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
- REG_RMW_FIELD(ah, AR_PHY_TIMING3,
- AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
-
- coef_scaled = (9 * coef_scaled) / 10;
-
- ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
- &ds_coef_exp);
-
- REG_RMW_FIELD(ah, AR_PHY_HALFGI,
- AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
- REG_RMW_FIELD(ah, AR_PHY_HALFGI,
- AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
-}
-
static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
{
u32 rst_flags;
@@ -1704,6 +1010,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
(void)REG_READ(ah, AR_RTC_DERIVED_CLK);
}
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
@@ -1715,11 +1023,16 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (tmpReg &
(AR_INTR_SYNC_LOCAL_TIMEOUT |
AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
+ u32 val;
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
- } else {
+
+ val = AR_RC_HOSTIF;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ val |= AR_RC_AHB;
+ REG_WRITE(ah, AR_RC, val);
+
+ } else if (!AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
- }
rst_flags = AR_RTC_RC_MAC_WARM;
if (type == ATH9K_RESET_COLD)
@@ -1727,6 +1040,10 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
REG_WRITE(ah, AR_RTC_RC, rst_flags);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
udelay(50);
REG_WRITE(ah, AR_RTC_RC, 0);
@@ -1747,16 +1064,23 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
{
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
- if (!AR_SREV_9100(ah))
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
REG_WRITE(ah, AR_RTC_RESET, 0);
- udelay(2);
- if (!AR_SREV_9100(ah))
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ udelay(2);
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
REG_WRITE(ah, AR_RTC_RESET, 1);
@@ -1792,34 +1116,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
}
}
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u32 phymode;
- u32 enableDacFifo = 0;
-
- if (AR_SREV_9285_10_OR_LATER(ah))
- enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
- AR_PHY_FC_ENABLE_DAC_FIFO);
-
- phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
- | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
-
- if (IS_CHAN_HT40(chan)) {
- phymode |= AR_PHY_FC_DYN2040_EN;
-
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
- phymode |= AR_PHY_FC_DYN2040_PRI_CH;
-
- }
- REG_WRITE(ah, AR_PHY_TURBO, phymode);
-
- ath9k_hw_set11nmac2040(ah);
-
- REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
- REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
-}
-
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -1845,7 +1141,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *channel = chan->chan;
- u32 synthDelay, qnum;
+ u32 qnum;
int r;
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1857,17 +1153,15 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
}
}
- REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
- if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
- AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
+ if (!ath9k_hw_rfbus_req(ah)) {
ath_print(common, ATH_DBG_FATAL,
"Could not kill baseband RX\n");
return false;
}
- ath9k_hw_set_regs(ah, chan);
+ ath9k_hw_set_channel_regs(ah, chan);
- r = ah->ath9k_hw_rf_set_freq(ah, chan);
+ r = ath9k_hw_rf_set_freq(ah, chan);
if (r) {
ath_print(common, ATH_DBG_FATAL,
"Failed to set channel\n");
@@ -1881,20 +1175,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
min((u32) MAX_RATE_POWER,
(u32) regulatory->power_limit));
- synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
- if (IS_CHAN_B(chan))
- synthDelay = (4 * synthDelay) / 22;
- else
- synthDelay /= 10;
-
- udelay(synthDelay + BASE_ACTIVATE_DELAY);
-
- REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+ ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
- ah->ath9k_hw_spur_mitigate_freq(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
if (!chan->oneTimeCalsDone)
chan->oneTimeCalsDone = true;
@@ -1902,17 +1188,33 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return true;
}
-static void ath9k_enable_rfkill(struct ath_hw *ah)
+bool ath9k_hw_check_alive(struct ath_hw *ah)
{
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
- AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+ int count = 50;
+ u32 reg;
+
+ if (AR_SREV_9285_10_OR_LATER(ah))
+ return true;
+
+ do {
+ reg = REG_READ(ah, AR_OBS_BUS_1);
- REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
- AR_GPIO_INPUT_MUX2_RFSILENT);
+ if ((reg & 0x7E7FFFEF) == 0x00702400)
+ continue;
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
- REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+ switch (reg & 0x7E000B00) {
+ case 0x1E000000:
+ case 0x52000B00:
+ case 0x18000B00:
+ continue;
+ default:
+ return true;
+ }
+ } while (count-- > 0);
+
+ return false;
}
+EXPORT_SYMBOL(ath9k_hw_check_alive);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange)
@@ -1923,11 +1225,18 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveDefAntenna;
u32 macStaId1;
u64 tsf = 0;
- int i, rx_chainmask, r;
+ int i, r;
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
+ if (!ah->chip_fullsleep) {
+ ath9k_hw_abortpcurecv(ah);
+ if (!ath9k_hw_stopdmarecv(ah))
+ ath_print(common, ATH_DBG_XMIT,
+ "Failed to stop receive dma\n");
+ }
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1940,8 +1249,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
(chan->channel != ah->curchan->channel) &&
((chan->channelFlags & CHANNEL_ALL) ==
(ah->curchan->channelFlags & CHANNEL_ALL)) &&
- !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
- IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
+ !AR_SREV_9280(ah)) {
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
@@ -1966,6 +1274,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_mark_phy_inactive(ah);
+ /* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
REG_WRITE(ah,
AR9271_RESET_POWER_DOWN_CONTROL,
@@ -1978,6 +1287,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
return -EINVAL;
}
+ /* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
ah->htc_reset_init = false;
REG_WRITE(ah,
@@ -1993,16 +1303,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (AR_SREV_9280_10_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- /* Enable ASYNC FIFO */
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
- REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
- REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- }
r = ath9k_hw_process_ini(ah, chan);
if (r)
return r;
@@ -2027,9 +1327,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
- ah->ath9k_hw_spur_mitigate_freq(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
| macStaId1
@@ -2037,25 +1341,27 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
| (ah->config.
ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
| ah->sta_id1_defaults);
- ath9k_hw_set_operating_mode(ah, ah->opmode);
-
ath_hw_setbssidmask(common);
-
REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
-
ath9k_hw_write_associd(ah);
-
REG_WRITE(ah, AR_ISR, ~0);
-
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
- r = ah->ath9k_hw_rf_set_freq(ah, chan);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
return r;
+ ENABLE_REGWRITE_BUFFER(ah);
+
for (i = 0; i < AR_NUM_DCU; i++)
REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ah->intr_txqs = 0;
for (i = 0; i < ah->caps.total_queues; i++)
ath9k_hw_resettxqueue(ah, i);
@@ -2068,25 +1374,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_global_settings(ah);
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
- AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
- AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
- AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
-
- REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
-
- REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
- AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
- REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
- AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
- }
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
- AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ar9002_hw_enable_async_fifo(ah);
+ ar9002_hw_enable_wep_aggregation(ah);
}
REG_WRITE(ah, AR_STA_ID1,
@@ -2101,19 +1391,24 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
+ if (ah->config.tx_intr_mitigation) {
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
+ }
+
ath9k_hw_init_bb(ah, chan);
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
- rx_chainmask = ah->rxchainmask;
- if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
- }
+ ENABLE_REGWRITE_BUFFER(ah);
+ ath9k_hw_restore_chainmask(ah);
REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
/*
* For big endian systems turn on swapping for descriptors
*/
@@ -2143,6 +1438,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->btcoex_hw.enabled)
ath9k_hw_btcoex_enable(ah);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ath9k_hw_loadnf(ah, curchan);
+ ath9k_hw_start_nfcal(ah);
+ }
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2429,21 +1729,35 @@ EXPORT_SYMBOL(ath9k_hw_keyisvalid);
/* Power Management (Chipset) */
/******************************/
+/*
+ * Notify Power Mgt is disabled in self-generated frames.
+ * If requested, force chip to sleep.
+ */
static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
if (setChip) {
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
- if (!AR_SREV_9100(ah))
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
- if(!AR_SREV_5416(ah))
+ /* Shutdown chip. Active low */
+ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
REG_CLR_BIT(ah, (AR_RTC_RESET),
AR_RTC_RESET_EN);
}
}
+/*
+ * Notify Power Management is enabled in self-generating
+ * frames. If request, set power mode of chip to
+ * auto/normal. Duration in units of 128us (1/8 TU).
+ */
static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2451,9 +1765,14 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
struct ath9k_hw_capabilities *pCap = &ah->caps;
if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ /* Set WakeOnInterrupt bit; clear ForceWake bit */
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_ON_INT);
} else {
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
}
@@ -2472,7 +1791,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
ATH9K_RESET_POWER_ON) != true) {
return false;
}
- ath9k_hw_init_pll(ah, NULL);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_init_pll(ah, NULL);
}
if (AR_SREV_9100(ah))
REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2542,424 +1862,6 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
}
EXPORT_SYMBOL(ath9k_hw_setpower);
-/*
- * Helper for ASPM support.
- *
- * Disable PLL when in L0s as well as receiver clock when in L1.
- * This power saving option must be enabled through the SerDes.
- *
- * Programming the SerDes must go through the same 288 bit serial shift
- * register as the other analog registers. Hence the 9 writes.
- */
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
-{
- u8 i;
- u32 val;
-
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
- return;
-
- /* Nothing to do on restore for 11N */
- if (!restore) {
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- /*
- * AR9280 2.0 or later chips use SerDes values from the
- * initvals.h initialized depending on chipset during
- * ath9k_hw_init()
- */
- for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
- REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
- INI_RA(&ah->iniPcieSerdes, i, 1));
- }
- } else if (AR_SREV_9280(ah) &&
- (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
-
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
-
- /* Shut off CLKREQ active in L1 */
- if (ah->config.pcie_clock_req)
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
- else
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
-
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
-
- } else {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
-
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
-
- /*
- * Ignore ah->ah_config.pcie_clock_req setting for
- * pre-AR9280 11n
- */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
-
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
- }
-
- udelay(1000);
-
- /* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
-
- /* Several PCIe massages to ensure proper behaviour */
- if (ah->config.pcie_waen) {
- val = ah->config.pcie_waen;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else {
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) {
- val = AR9285_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else if (AR_SREV_9280(ah)) {
- /*
- * On AR9280 chips bit 22 of 0x4004 needs to be
- * set otherwise card may disappear.
- */
- val = AR9280_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else
- val = AR_WA_DEFAULT;
- }
-
- REG_WRITE(ah, AR_WA, val);
- }
-
- if (power_off) {
- /*
- * Set PCIe workaround bits
- * bit 14 in WA register (disable L1) should only
- * be set when device enters D3 and be cleared
- * when device comes back to D0.
- */
- if (ah->config.pcie_waen) {
- if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- } else {
- if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) &&
- (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
- (AR_SREV_9280(ah) &&
- (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- }
- }
- }
-}
-EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
-
-/**********************/
-/* Interrupt Handling */
-/**********************/
-
-bool ath9k_hw_intrpend(struct ath_hw *ah)
-{
- u32 host_isr;
-
- if (AR_SREV_9100(ah))
- return true;
-
- host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
- return true;
-
- host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
- if ((host_isr & AR_INTR_SYNC_DEFAULT)
- && (host_isr != AR_INTR_SPURIOUS))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(ath9k_hw_intrpend);
-
-bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
-{
- u32 isr = 0;
- u32 mask2 = 0;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- u32 sync_cause = 0;
- bool fatal_int = false;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!AR_SREV_9100(ah)) {
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
- if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
- == AR_RTC_STATUS_ON) {
- isr = REG_READ(ah, AR_ISR);
- }
- }
-
- sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
- AR_INTR_SYNC_DEFAULT;
-
- *masked = 0;
-
- if (!isr && !sync_cause)
- return false;
- } else {
- *masked = 0;
- isr = REG_READ(ah, AR_ISR);
- }
-
- if (isr) {
- if (isr & AR_ISR_BCNMISC) {
- u32 isr2;
- isr2 = REG_READ(ah, AR_ISR_S2);
- if (isr2 & AR_ISR_S2_TIM)
- mask2 |= ATH9K_INT_TIM;
- if (isr2 & AR_ISR_S2_DTIM)
- mask2 |= ATH9K_INT_DTIM;
- if (isr2 & AR_ISR_S2_DTIMSYNC)
- mask2 |= ATH9K_INT_DTIMSYNC;
- if (isr2 & (AR_ISR_S2_CABEND))
- mask2 |= ATH9K_INT_CABEND;
- if (isr2 & AR_ISR_S2_GTT)
- mask2 |= ATH9K_INT_GTT;
- if (isr2 & AR_ISR_S2_CST)
- mask2 |= ATH9K_INT_CST;
- if (isr2 & AR_ISR_S2_TSFOOR)
- mask2 |= ATH9K_INT_TSFOOR;
- }
-
- isr = REG_READ(ah, AR_ISR_RAC);
- if (isr == 0xffffffff) {
- *masked = 0;
- return false;
- }
-
- *masked = isr & ATH9K_INT_COMMON;
-
- if (ah->config.rx_intr_mitigation) {
- if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
- *masked |= ATH9K_INT_RX;
- }
-
- if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
- *masked |= ATH9K_INT_RX;
- if (isr &
- (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
- AR_ISR_TXEOL)) {
- u32 s0_s, s1_s;
-
- *masked |= ATH9K_INT_TX;
-
- s0_s = REG_READ(ah, AR_ISR_S0_S);
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
- s1_s = REG_READ(ah, AR_ISR_S1_S);
- ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
- ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
- }
-
- if (isr & AR_ISR_RXORN) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "receive FIFO overrun interrupt\n");
- }
-
- if (!AR_SREV_9100(ah)) {
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
- if (isr5 & AR_ISR_S5_TIM_TIMER)
- *masked |= ATH9K_INT_TIM_TIMER;
- }
- }
-
- *masked |= mask2;
- }
-
- if (AR_SREV_9100(ah))
- return true;
-
- if (isr & AR_ISR_GENTMR) {
- u32 s5_s;
-
- s5_s = REG_READ(ah, AR_ISR_S5_S);
- if (isr & AR_ISR_GENTMR) {
- ah->intr_gen_timer_trigger =
- MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
-
- ah->intr_gen_timer_thresh =
- MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
-
- if (ah->intr_gen_timer_trigger)
- *masked |= ATH9K_INT_GENTIMER;
-
- }
- }
-
- if (sync_cause) {
- fatal_int =
- (sync_cause &
- (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
- ? true : false;
-
- if (fatal_int) {
- if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI FATAL interrupt\n");
- }
- if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI PERR interrupt\n");
- }
- *masked |= ATH9K_INT_FATAL;
- }
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
- REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
- REG_WRITE(ah, AR_RC, 0);
- *masked |= ATH9K_INT_FATAL;
- }
- if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
- }
-
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
- (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
- }
-
- return true;
-}
-EXPORT_SYMBOL(ath9k_hw_getisr);
-
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
-{
- u32 omask = ah->mask_reg;
- u32 mask, mask2;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
-
- if (omask & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
- (void) REG_READ(ah, AR_IER);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
- }
- }
-
- mask = ints & ATH9K_INT_COMMON;
- mask2 = 0;
-
- if (ints & ATH9K_INT_TX) {
- if (ah->txok_interrupt_mask)
- mask |= AR_IMR_TXOK;
- if (ah->txdesc_interrupt_mask)
- mask |= AR_IMR_TXDESC;
- if (ah->txerr_interrupt_mask)
- mask |= AR_IMR_TXERR;
- if (ah->txeol_interrupt_mask)
- mask |= AR_IMR_TXEOL;
- }
- if (ints & ATH9K_INT_RX) {
- mask |= AR_IMR_RXERR;
- if (ah->config.rx_intr_mitigation)
- mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
- else
- mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- mask |= AR_IMR_GENTMR;
- }
-
- if (ints & (ATH9K_INT_BMISC)) {
- mask |= AR_IMR_BCNMISC;
- if (ints & ATH9K_INT_TIM)
- mask2 |= AR_IMR_S2_TIM;
- if (ints & ATH9K_INT_DTIM)
- mask2 |= AR_IMR_S2_DTIM;
- if (ints & ATH9K_INT_DTIMSYNC)
- mask2 |= AR_IMR_S2_DTIMSYNC;
- if (ints & ATH9K_INT_CABEND)
- mask2 |= AR_IMR_S2_CABEND;
- if (ints & ATH9K_INT_TSFOOR)
- mask2 |= AR_IMR_S2_TSFOOR;
- }
-
- if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
- mask |= AR_IMR_BCNMISC;
- if (ints & ATH9K_INT_GTT)
- mask2 |= AR_IMR_S2_GTT;
- if (ints & ATH9K_INT_CST)
- mask2 |= AR_IMR_S2_CST;
- }
-
- ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
- REG_WRITE(ah, AR_IMR, mask);
- mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
- AR_IMR_S2_DTIM |
- AR_IMR_S2_DTIMSYNC |
- AR_IMR_S2_CABEND |
- AR_IMR_S2_CABTO |
- AR_IMR_S2_TSFOOR |
- AR_IMR_S2_GTT | AR_IMR_S2_CST);
- REG_WRITE(ah, AR_IMR_S2, mask | mask2);
- ah->mask_reg = ints;
-
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- if (ints & ATH9K_INT_TIM_TIMER)
- REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
- else
- REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
- }
-
- if (ints & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
- AR_INTR_SYNC_DEFAULT);
- REG_WRITE(ah, AR_INTR_SYNC_MASK,
- AR_INTR_SYNC_DEFAULT);
- }
- ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
- REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
- }
-
- return omask;
-}
-EXPORT_SYMBOL(ath9k_hw_set_interrupts);
-
/*******************/
/* Beacon Handling */
/*******************/
@@ -2970,6 +1872,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
ah->beacon_interval = beacon_period;
+ ENABLE_REGWRITE_BUFFER(ah);
+
switch (ah->opmode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_MONITOR:
@@ -3013,6 +1917,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
beacon_period &= ~ATH9K_BEACON_ENA;
if (beacon_period & ATH9K_BEACON_RESET_TSF) {
ath9k_hw_reset_tsf(ah);
@@ -3029,6 +1936,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
REG_WRITE(ah, AR_BEACON_PERIOD,
@@ -3036,6 +1945,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
REG_RMW_FIELD(ah, AR_RSSI_THR,
AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
@@ -3058,6 +1970,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_NEXT_DTIM,
TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
@@ -3077,6 +1991,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
REG_SET_BIT(ah, AR_TIMER_MODE,
AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
AR_DTIM_TIMER_EN);
@@ -3219,7 +2136,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
- if (AR_SREV_9285_10_OR_LATER(ah))
+ if (AR_SREV_9271(ah))
+ pCap->num_gpio_pins = AR9271_NUM_GPIO;
+ else if (AR_SREV_9285_10_OR_LATER(ah))
pCap->num_gpio_pins = AR9285_NUM_GPIO;
else if (AR_SREV_9280_10_OR_LATER(ah))
pCap->num_gpio_pins = AR928X_NUM_GPIO;
@@ -3246,8 +2165,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
}
#endif
-
- pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
+ if (AR_SREV_9271(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
+ else
+ pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
@@ -3291,6 +2212,26 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
}
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_LDPC |
+ ATH9K_HW_CAP_FASTCLOCK;
+ pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
+ pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
+ pCap->rx_status_len = sizeof(struct ar9003_rxs);
+ pCap->tx_desc_len = sizeof(struct ar9003_txc);
+ pCap->txs_len = sizeof(struct ar9003_txs);
+ } else {
+ pCap->tx_desc_len = sizeof(struct ath_desc);
+ if (AR_SREV_9280_20(ah) &&
+ ((ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) <=
+ AR5416_EEP_MINOR_VER_16) ||
+ ah->eep_ops->get_eeprom(ah, EEP_FSTCLK_5G)))
+ pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
+
return 0;
}
@@ -3323,10 +2264,6 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
case ATH9K_CAP_TKIP_SPLIT:
return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
false : true;
- case ATH9K_CAP_DIVERSITY:
- return (REG_READ(ah, AR_PHY_CCK_DETECT) &
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
- true : false;
case ATH9K_CAP_MCAST_KEYSRCH:
switch (capability) {
case 0:
@@ -3369,8 +2306,6 @@ EXPORT_SYMBOL(ath9k_hw_getcapability);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 setting, int *status)
{
- u32 v;
-
switch (type) {
case ATH9K_CAP_TKIP_MIC:
if (setting)
@@ -3380,14 +2315,6 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
ah->sta_id1_defaults &=
~AR_STA_ID1_CRPT_MIC_ENABLE;
return true;
- case ATH9K_CAP_DIVERSITY:
- v = REG_READ(ah, AR_PHY_CCK_DETECT);
- if (setting)
- v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- else
- v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
- return true;
case ATH9K_CAP_MCAST_KEYSRCH:
if (setting)
ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
@@ -3455,7 +2382,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
if (gpio >= ah->caps.num_gpio_pins)
return 0xffffffff;
- if (AR_SREV_9287_10_OR_LATER(ah))
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ return MS_REG_READ(AR9300, gpio) != 0;
+ else if (AR_SREV_9271(ah))
+ return MS_REG_READ(AR9271, gpio) != 0;
+ else if (AR_SREV_9287_10_OR_LATER(ah))
return MS_REG_READ(AR9287, gpio) != 0;
else if (AR_SREV_9285_10_OR_LATER(ah))
return MS_REG_READ(AR9285, gpio) != 0;
@@ -3484,6 +2415,9 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
{
+ if (AR_SREV_9271(ah))
+ val = ~val;
+
REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
AR_GPIO_BIT(gpio));
}
@@ -3523,6 +2457,8 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
{
u32 phybits;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RX_FILTER, bits);
phybits = 0;
@@ -3538,6 +2474,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
else
REG_WRITE(ah, AR_RXCFG,
REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
EXPORT_SYMBOL(ath9k_hw_setrxfilter);
@@ -3610,14 +2549,25 @@ void ath9k_hw_write_associd(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_write_associd);
+#define ATH9K_MAX_TSF_READ 10
+
u64 ath9k_hw_gettsf64(struct ath_hw *ah)
{
- u64 tsf;
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ tsf_upper1 = REG_READ(ah, AR_TSF_U32);
+ for (i = 0; i < ATH9K_MAX_TSF_READ; i++) {
+ tsf_lower = REG_READ(ah, AR_TSF_L32);
+ tsf_upper2 = REG_READ(ah, AR_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
- tsf = REG_READ(ah, AR_TSF_U32);
- tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
+ WARN_ON( i == ATH9K_MAX_TSF_READ );
- return tsf;
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
}
EXPORT_SYMBOL(ath9k_hw_gettsf64);
@@ -3868,6 +2818,16 @@ void ath_gen_timer_isr(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath_gen_timer_isr);
+/********/
+/* HTC */
+/********/
+
+void ath9k_hw_htc_resetinit(struct ath_hw *ah)
+{
+ ah->htc_reset_init = true;
+}
+EXPORT_SYMBOL(ath9k_hw_htc_resetinit);
+
static struct {
u32 version;
const char * name;
@@ -3882,6 +2842,7 @@ static struct {
{ AR_SREV_VERSION_9285, "9285" },
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
+ { AR_SREV_VERSION_9300, "9300" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbbf7ca5f97d..77245dff5993 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -41,18 +41,16 @@
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
#define AR2427_DEVID_PCIE 0x002c
+#define AR9287_DEVID_PCI 0x002d
+#define AR9287_DEVID_PCIE 0x002e
+#define AR9300_DEVID_PCIE 0x0030
#define AR5416_AR9100_DEVID 0x000b
-#define AR9271_USB 0x9271
-
#define AR_SUBVENDOR_ID_NOG 0x0e11
#define AR_SUBVENDOR_ID_NEW_A 0x7065
#define AR5416_MAGIC 0x19641014
-#define AR5416_DEVID_AR9287_PCI 0x002D
-#define AR5416_DEVID_AR9287_PCIE 0x002E
-
#define AR9280_COEX2WIRE_SUBSYSID 0x309b
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
@@ -70,6 +68,24 @@
#define REG_READ(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+#define ENABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \
+ } while (0)
+
+#define DISABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->disable_write_buffer((_ah)); \
+ } while (0)
+
+#define REGWRITE_BUFFER_FLUSH(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->write_flush((_ah)); \
+ } while (0)
+
#define SM(_v, _f) (((_v) << _f##_S) & _f)
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
#define REG_RMW(_a, _r, _set, _clr) \
@@ -77,6 +93,8 @@
#define REG_RMW_FIELD(_a, _r, _f, _v) \
REG_WRITE(_a, _r, \
(REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
+#define REG_READ_FIELD(_a, _r, _f) \
+ (((REG_READ(_a, _r) & _f) >> _f##_S))
#define REG_SET_BIT(_a, _r, _f) \
REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
#define REG_CLR_BIT(_a, _r, _f) \
@@ -137,6 +155,16 @@
#define TU_TO_USEC(_tu) ((_tu) << 10)
+#define ATH9K_HW_RX_HP_QDEPTH 16
+#define ATH9K_HW_RX_LP_QDEPTH 128
+
+enum ath_ini_subsys {
+ ATH_INI_PRE = 0,
+ ATH_INI_CORE,
+ ATH_INI_POST,
+ ATH_INI_NUM_SPLIT,
+};
+
enum wireless_mode {
ATH9K_MODE_11A = 0,
ATH9K_MODE_11G,
@@ -167,13 +195,16 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_ENHANCEDPM = BIT(14),
ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
+ ATH9K_HW_CAP_EDMA = BIT(17),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
+ ATH9K_HW_CAP_LDPC = BIT(19),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(20),
};
enum ath9k_capability_type {
ATH9K_CAP_CIPHER = 0,
ATH9K_CAP_TKIP_MIC,
ATH9K_CAP_TKIP_SPLIT,
- ATH9K_CAP_DIVERSITY,
ATH9K_CAP_TXPOW,
ATH9K_CAP_MCAST_KEYSRCH,
ATH9K_CAP_DS
@@ -194,6 +225,11 @@ struct ath9k_hw_capabilities {
u8 num_gpio_pins;
u8 num_antcfg_2ghz;
u8 num_antcfg_5ghz;
+ u8 rx_hp_qdepth;
+ u8 rx_lp_qdepth;
+ u8 rx_status_len;
+ u8 tx_desc_len;
+ u8 txs_len;
};
struct ath9k_ops_config {
@@ -214,6 +250,7 @@ struct ath9k_ops_config {
u32 enable_ani;
int serialize_regmode;
bool rx_intr_mitigation;
+ bool tx_intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -225,6 +262,7 @@ struct ath9k_ops_config {
#define AR_BASE_FREQ_5GHZ 4900
#define AR_SPUR_FEEQ_BOUND_HT40 19
#define AR_SPUR_FEEQ_BOUND_HT20 10
+ bool tx_iq_calibration; /* Only available for >= AR9003 */
int spurmode;
u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
@@ -233,6 +271,8 @@ struct ath9k_ops_config {
enum ath9k_int {
ATH9K_INT_RX = 0x00000001,
ATH9K_INT_RXDESC = 0x00000002,
+ ATH9K_INT_RXHP = 0x00000001,
+ ATH9K_INT_RXLP = 0x00000002,
ATH9K_INT_RXNOFRM = 0x00000008,
ATH9K_INT_RXEOL = 0x00000010,
ATH9K_INT_RXORN = 0x00000020,
@@ -329,10 +369,9 @@ struct ath9k_channel {
#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
-#define IS_CHAN_A_5MHZ_SPACED(_c) \
+#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
- (((_c)->channel % 20) != 0) && \
- (((_c)->channel % 10) != 0))
+ ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
/* These macros check chanmode and not channelFlags */
#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
@@ -365,6 +404,12 @@ enum ser_reg_mode {
SER_REG_MODE_AUTO = 2,
};
+enum ath9k_rx_qtype {
+ ATH9K_RX_QUEUE_HP,
+ ATH9K_RX_QUEUE_LP,
+ ATH9K_RX_QUEUE_MAX,
+};
+
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -442,6 +487,124 @@ struct ath_gen_timer_table {
} timer_mask;
};
+/**
+ * struct ath_hw_private_ops - callbacks used internally by hardware code
+ *
+ * This structure contains private callbacks designed to only be used internally
+ * by the hardware core.
+ *
+ * @init_cal_settings: setup types of calibrations supported
+ * @init_cal: starts actual calibration
+ *
+ * @init_mode_regs: Initializes mode registers
+ * @init_mode_gain_regs: Initialize TX/RX gain registers
+ * @macversion_supported: If this specific mac revision is supported
+ *
+ * @rf_set_freq: change frequency
+ * @spur_mitigate_freq: spur mitigation
+ * @rf_alloc_ext_banks:
+ * @rf_free_ext_banks:
+ * @set_rf_regs:
+ * @compute_pll_control: compute the PLL control value to use for
+ * AR_RTC_PLL_CONTROL for a given channel
+ * @setup_calibration: set up calibration
+ * @iscal_supported: used to query if a type of calibration is supported
+ * @loadnf: load noise floor read from each chain on the CCA registers
+ */
+struct ath_hw_private_ops {
+ /* Calibration ops */
+ void (*init_cal_settings)(struct ath_hw *ah);
+ bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+ void (*init_mode_regs)(struct ath_hw *ah);
+ void (*init_mode_gain_regs)(struct ath_hw *ah);
+ bool (*macversion_supported)(u32 macversion);
+ void (*setup_calibration)(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+ bool (*iscal_supported)(struct ath_hw *ah,
+ enum ath9k_cal_types calType);
+
+ /* PHY ops */
+ int (*rf_set_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ void (*spur_mitigate_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*rf_alloc_ext_banks)(struct ath_hw *ah);
+ void (*rf_free_ext_banks)(struct ath_hw *ah);
+ bool (*set_rf_regs)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex);
+ void (*set_channel_regs)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*init_bb)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*process_ini)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*olc_init)(struct ath_hw *ah);
+ void (*set_rfmode)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*mark_phy_inactive)(struct ath_hw *ah);
+ void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
+ bool (*rfbus_req)(struct ath_hw *ah);
+ void (*rfbus_done)(struct ath_hw *ah);
+ void (*enable_rfkill)(struct ath_hw *ah);
+ void (*restore_chainmask)(struct ath_hw *ah);
+ void (*set_diversity)(struct ath_hw *ah, bool value);
+ u32 (*compute_pll_control)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
+ int param);
+ void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
+ void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
+};
+
+/**
+ * struct ath_hw_ops - callbacks used by hardware code and driver code
+ *
+ * This structure contains callbacks designed to to be used internally by
+ * hardware code and also by the lower level driver.
+ *
+ * @config_pci_powersave:
+ * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ */
+struct ath_hw_ops {
+ void (*config_pci_powersave)(struct ath_hw *ah,
+ int restore,
+ int power_off);
+ void (*rx_enable)(struct ath_hw *ah);
+ void (*set_desc_link)(void *ds, u32 link);
+ void (*get_desc_link)(void *ds, u32 **link);
+ bool (*calibrate)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal);
+ bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
+ void (*fill_txdesc)(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu);
+ int (*proc_txdesc)(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts);
+ void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType,
+ u32 flags);
+ void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags);
+ void (*set11n_aggr_first)(struct ath_hw *ah, void *ds,
+ u32 aggrLen);
+ void (*set11n_aggr_middle)(struct ath_hw *ah, void *ds,
+ u32 numDelims);
+ void (*set11n_aggr_last)(struct ath_hw *ah, void *ds);
+ void (*clr11n_aggr)(struct ath_hw *ah, void *ds);
+ void (*set11n_burstduration)(struct ath_hw *ah, void *ds,
+ u32 burstDuration);
+ void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
+ u32 vmf);
+};
+
struct ath_hw {
struct ieee80211_hw *hw;
struct ath_common common;
@@ -455,13 +618,18 @@ struct ath_hw {
struct ar5416_eeprom_def def;
struct ar5416_eeprom_4k map4k;
struct ar9287_eeprom map9287;
+ struct ar9300_eeprom ar9300_eep;
} eeprom;
const struct eeprom_ops *eep_ops;
- enum ath9k_eep_map eep_map;
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool need_an_top2_fixup;
u16 tx_trig_level;
+ s16 nf_2g_max;
+ s16 nf_2g_min;
+ s16 nf_5g_max;
+ s16 nf_5g_min;
u16 rfsilent;
u32 rfkill_gpio;
u32 rfkill_polarity;
@@ -478,7 +646,8 @@ struct ath_hw {
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
int16_t curchan_rad_index;
- u32 mask_reg;
+ enum ath9k_int imask;
+ u32 imrs2_reg;
u32 txok_interrupt_mask;
u32 txerr_interrupt_mask;
u32 txdesc_interrupt_mask;
@@ -493,6 +662,7 @@ struct ath_hw {
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_calinitdata;
struct ath9k_cal_list adcdc_caldata;
+ struct ath9k_cal_list tempCompCalData;
struct ath9k_cal_list *cal_list;
struct ath9k_cal_list *cal_list_last;
struct ath9k_cal_list *cal_list_curr;
@@ -533,12 +703,10 @@ struct ath_hw {
DONT_USE_32KHZ,
} enable_32kHz_clock;
- /* Callback for radio frequency change */
- int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan);
-
- /* Callback for baseband spur frequency */
- void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
- struct ath9k_channel *chan);
+ /* Private to hardware code */
+ struct ath_hw_private_ops private_ops;
+ /* Accessed by the lower level driver */
+ struct ath_hw_ops ops;
/* Used to program the radio on non single-chip devices */
u32 *analogBank0Data;
@@ -551,6 +719,7 @@ struct ath_hw {
u32 *addac5416_21;
u32 *bank6Temp;
+ u8 txpower_limit;
int16_t txpower_indexoffset;
int coverage_class;
u32 beacon_interval;
@@ -592,16 +761,34 @@ struct ath_hw {
struct ar5416IniArray iniBank7;
struct ar5416IniArray iniAddac;
struct ar5416IniArray iniPcieSerdes;
+ struct ar5416IniArray iniPcieSerdesLowPower;
struct ar5416IniArray iniModesAdditional;
struct ar5416IniArray iniModesRxGain;
struct ar5416IniArray iniModesTxGain;
struct ar5416IniArray iniModes_9271_1_0_only;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
+ struct ar5416IniArray iniCommon_normal_cck_fir_coeff_9271;
+ struct ar5416IniArray iniCommon_japan_2484_cck_fir_coeff_9271;
+ struct ar5416IniArray iniModes_9271_ANI_reg;
+ struct ar5416IniArray iniModes_high_power_tx_gain_9271;
+ struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
+
+ struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniRadio[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniSOC[ATH_INI_NUM_SPLIT];
u32 intr_gen_timer_trigger;
u32 intr_gen_timer_thresh;
struct ath_gen_timer_table hw_gen_timers;
+
+ struct ar9003_txs *ts_ring;
+ void *ts_start;
+ u32 ts_paddr_start;
+ u32 ts_paddr_end;
+ u16 ts_tail;
+ u8 ts_size;
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -614,6 +801,16 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
return &(ath9k_hw_common(ah)->regulatory);
}
+static inline struct ath_hw_private_ops *ath9k_hw_private_ops(struct ath_hw *ah)
+{
+ return &ah->private_ops;
+}
+
+static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
+{
+ return &ah->ops;
+}
+
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
void ath9k_hw_deinit(struct ath_hw *ah);
@@ -625,6 +822,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 *result);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 setting, int *status);
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
/* Key Cache Management */
bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
@@ -673,16 +871,10 @@ void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
+bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
-
-/* Interrupt Handling */
-bool ath9k_hw_intrpend(struct ath_hw *ah);
-bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked);
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
-
/* Generic hw timer primitives */
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
@@ -701,6 +893,39 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
+/* HTC */
+void ath9k_hw_htc_resetinit(struct ath_hw *ah);
+
+/* PHY */
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent);
+
+/*
+ * Code Specific to AR5008, AR9001 or AR9002,
+ * we stuff these here to avoid callbacks for AR9003.
+ */
+void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
+int ar9002_hw_rf_claim(struct ath_hw *ah);
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
+void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
+
+/*
+ * Code specifric to AR9003, we stuff these here to avoid callbacks
+ * for older families
+ */
+void ar9003_hw_set_nf_limits(struct ath_hw *ah);
+
+/* Hardware family op attach helpers */
+void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_ops(struct ath_hw *ah);
+void ar9003_hw_attach_ops(struct ath_hw *ah);
+
#define ATH_PCIE_CAP_LINK_CTRL 0x70
#define ATH_PCIE_CAP_LINK_L0S 1
#define ATH_PCIE_CAP_LINK_L1 2
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3d4d897add6d..8c795488ebc3 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -175,6 +175,18 @@ static const struct ath_ops ath9k_common_ops = {
.write = ath9k_iowrite32,
};
+static int count_streams(unsigned int chainmask, int max)
+{
+ int streams = 0;
+
+ do {
+ if (++streams == max)
+ break;
+ } while ((chainmask = chainmask & (chainmask - 1)));
+
+ return streams;
+}
+
/**************************/
/* Initialization */
/**************************/
@@ -182,8 +194,10 @@ static const struct ath_ops ath9k_common_ops = {
static void setup_ht_cap(struct ath_softc *sc,
struct ieee80211_sta_ht_cap *ht_info)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u8 tx_streams, rx_streams;
+ int i, max_streams;
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -191,28 +205,40 @@ static void setup_ht_cap(struct ath_softc *sc,
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ max_streams = 3;
+ else
+ max_streams = 2;
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (max_streams >= 2)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ }
+
/* set up supported mcs set */
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
+ tx_streams = count_streams(common->tx_chainmask, max_streams);
+ rx_streams = count_streams(common->rx_chainmask, max_streams);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
ht_info->mcs.tx_params |= ((tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
}
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
+ for (i = 0; i < rx_streams; i++)
+ ht_info->mcs.rx_mask[i] = 0xff;
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
}
@@ -235,31 +261,37 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
- int nbuf, int ndesc)
+ int nbuf, int ndesc, bool is_tx)
{
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
+ u8 *ds;
struct ath_buf *bf;
- int i, bsize, error;
+ int i, bsize, error, desc_len;
ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
INIT_LIST_HEAD(head);
+
+ if (is_tx)
+ desc_len = sc->sc_ah->caps.tx_desc_len;
+ else
+ desc_len = sizeof(struct ath_desc);
+
/* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
+ if ((desc_len % 4) != 0) {
ath_print(common, ATH_DBG_FATAL,
"ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ BUG_ON((desc_len % 4) != 0);
error = -ENOMEM;
goto fail;
}
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+ dd->dd_desc_len = desc_len * nbuf * ndesc;
/*
* Need additional DMA memory because we can't use
@@ -272,7 +304,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
u32 dma_len;
while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dma_len = ndesc_skipped * desc_len;
dd->dd_desc_len += dma_len;
ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
@@ -286,7 +318,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
error = -ENOMEM;
goto fail;
}
- ds = dd->dd_desc;
+ ds = (u8 *) dd->dd_desc;
ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -300,7 +332,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
}
dd->dd_bufptr = bf;
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
@@ -316,7 +348,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
((caddr_t) dd->dd_desc +
dd->dd_desc_len));
- ds += ndesc;
+ ds += (desc_len * ndesc);
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
}
@@ -514,7 +546,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ ath9k_hw_set_diversity(sc->sc_ah, true);
sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
@@ -568,13 +600,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_read_cachesize(common, &csz);
common->cachelsz = csz << 2; /* convert to bytes */
+ /* Initializes the hardware for all supported chipsets */
ret = ath9k_hw_init(ah);
- if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", ret);
+ if (ret)
goto err_hw;
- }
ret = ath9k_init_debug(ah);
if (ret) {
@@ -760,6 +789,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
tasklet_kill(&sc->intr_tq);
tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(sc->sc_ah);
+ sc->sc_ah = NULL;
}
void ath9k_deinit_device(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index efc420cd42bf..0e425cb4bbb1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -25,14 +25,21 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
ah->txurn_interrupt_mask);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_IMR_S0,
SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
| SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
REG_WRITE(ah, AR_IMR_S1,
SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
| SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
- REG_RMW_FIELD(ah, AR_IMR_S2,
- AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
+
+ ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
+ ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
@@ -55,6 +62,18 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_txstart);
+void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
+
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
u32 npend;
@@ -103,7 +122,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
if (ah->tx_trig_level >= ah->config.max_txtrig_level)
return false;
- omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
+ omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
txcfg = REG_READ(ah, AR_TXCFG);
curLevel = MS(txcfg, AR_FTRIG);
@@ -205,280 +224,6 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_stoptxdma);
-void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 segLen, bool firstSeg,
- bool lastSeg, const struct ath_desc *ds0)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (firstSeg) {
- ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
- } else if (lastSeg) {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = segLen;
- ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
- ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
- } else {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = segLen | AR_TxMore;
- ads->ds_ctl2 = 0;
- ads->ds_ctl3 = 0;
- }
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_filltxdesc);
-
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
-
-int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if ((ads->ds_txstatus9 & AR_TxDone) == 0)
- return -EINPROGRESS;
-
- ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
- ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
- ds->ds_txstat.ts_status = 0;
- ds->ds_txstat.ts_flags = 0;
-
- if (ads->ds_txstatus1 & AR_FrmXmitOK)
- ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
- if (ads->ds_txstatus1 & AR_ExcessiveRetries)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
- if (ads->ds_txstatus1 & AR_Filtered)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
- if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
- ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus9 & AR_TxOpExceeded)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
- if (ads->ds_txstatus1 & AR_TxTimerExpired)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
-
- if (ads->ds_txstatus1 & AR_DescCfgErr)
- ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
- if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus0 & AR_TxBaStatus) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
- ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
- ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
- }
-
- ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
- switch (ds->ds_txstat.ts_rateindex) {
- case 0:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
- break;
- case 1:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
- break;
- case 2:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
- break;
- case 3:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
- break;
- }
-
- ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
- ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
- ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
- ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
- ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
- ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
- ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
- ds->ds_txstat.evm0 = ads->AR_TxEVM0;
- ds->ds_txstat.evm1 = ads->AR_TxEVM1;
- ds->ds_txstat.evm2 = ads->AR_TxEVM2;
- ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
- ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
- ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
- ds->ds_txstat.ts_antenna = 0;
-
- return 0;
-}
-EXPORT_SYMBOL(ath9k_hw_txprocdesc);
-
-void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- txPower += ah->txpower_indexoffset;
- if (txPower > 63)
- txPower = 63;
-
- ads->ds_ctl0 = (pktLen & AR_FrameLen)
- | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(txPower, AR_XmitPower)
- | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
- | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
- | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
- | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
-
- ads->ds_ctl1 =
- (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
- | SM(type, AR_FrameType)
- | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
- | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
- | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
-
- ads->ds_ctl6 = SM(keyType, AR_EncrType);
-
- if (AR_SREV_9285(ah)) {
- ads->ds_ctl8 = 0;
- ads->ds_ctl9 = 0;
- ads->ds_ctl10 = 0;
- ads->ds_ctl11 = 0;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
-
-void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_desc *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ar5416_desc *last_ads = AR5416DESC(lastds);
- u32 ds_ctl0;
-
- if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
- ds_ctl0 = ads->ds_ctl0;
-
- if (flags & ATH9K_TXDESC_RTSENA) {
- ds_ctl0 &= ~AR_CTSEnable;
- ds_ctl0 |= AR_RTSEnable;
- } else {
- ds_ctl0 &= ~AR_RTSEnable;
- ds_ctl0 |= AR_CTSEnable;
- }
-
- ads->ds_ctl0 = ds_ctl0;
- } else {
- ads->ds_ctl0 =
- (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
- }
-
- ads->ds_ctl2 = set11nTries(series, 0)
- | set11nTries(series, 1)
- | set11nTries(series, 2)
- | set11nTries(series, 3)
- | (durUpdateEn ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
-
- ads->ds_ctl3 = set11nRate(series, 0)
- | set11nRate(series, 1)
- | set11nRate(series, 2)
- | set11nRate(series, 3);
-
- ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
- | set11nPktDurRTSCTS(series, 1);
-
- ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
- | set11nPktDurRTSCTS(series, 3);
-
- ads->ds_ctl7 = set11nRateFlags(series, 0)
- | set11nRateFlags(series, 1)
- | set11nRateFlags(series, 2)
- | set11nRateFlags(series, 3)
- | SM(rtsctsRate, AR_RTSCTSRate);
- last_ads->ds_ctl2 = ads->ds_ctl2;
- last_ads->ds_ctl3 = ads->ds_ctl3;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
-
-void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
- u32 aggrLen)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
- ads->ds_ctl6 &= ~AR_AggrLen;
- ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
-
-void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
- u32 numDelims)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- unsigned int ctl6;
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
-
- ctl6 = ads->ds_ctl6;
- ctl6 &= ~AR_PadDelim;
- ctl6 |= SM(numDelims, AR_PadDelim);
- ads->ds_ctl6 = ctl6;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
-
-void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= AR_IsAggr;
- ads->ds_ctl1 &= ~AR_MoreAggr;
- ads->ds_ctl6 &= ~AR_PadDelim;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
-
-void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
-}
-EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
-
-void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
- u32 burstDuration)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl2 &= ~AR_BurstDur;
- ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
-
-void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
- u32 vmf)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (vmf)
- ads->ds_ctl0 |= AR_VirtMoreFrag;
- else
- ads->ds_ctl0 &= ~AR_VirtMoreFrag;
-}
-
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
{
*txqs &= ah->intr_txqs;
@@ -730,6 +475,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
} else
cwMin = qi->tqi_cwmin;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_DLCL_IFS(q),
SM(cwMin, AR_D_LCL_IFS_CWMIN) |
SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
@@ -744,6 +491,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_WRITE(ah, AR_DMISC(q),
AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
+ REGWRITE_BUFFER_FLUSH(ah);
+
if (qi->tqi_cbrPeriod) {
REG_WRITE(ah, AR_QCBRCFG(q),
SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
@@ -759,6 +508,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_Q_RDYTIMECFG_EN);
}
+ REGWRITE_BUFFER_FLUSH(ah);
+
REG_WRITE(ah, AR_DCHNTIME(q),
SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
(qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
@@ -776,6 +527,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_READ(ah, AR_DMISC(q)) |
AR_D_MISC_POST_FR_BKOFF_DIS);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
REG_WRITE(ah, AR_DMISC(q),
REG_READ(ah, AR_DMISC(q)) |
@@ -783,6 +538,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
}
switch (qi->tqi_type) {
case ATH9K_TX_QUEUE_BEACON:
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_BEACON_USE
@@ -793,8 +550,20 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
| AR_D_MISC_BEACON_USE
| AR_D_MISC_POST_FR_BKOFF_DIS);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ /* cwmin and cwmax should be 0 for beacon queue */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
+ | SM(0, AR_D_LCL_IFS_CWMAX)
+ | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
+ }
break;
case ATH9K_TX_QUEUE_CAB:
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_CBR_INCR_DIS1
@@ -808,6 +577,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
| (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
break;
case ATH9K_TX_QUEUE_PSPOLL:
REG_WRITE(ah, AR_QMISC(q),
@@ -829,6 +602,9 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_D_MISC_POST_FR_BKOFF_DIS);
}
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
+
if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
ah->txok_interrupt_mask |= 1 << q;
else
@@ -856,7 +632,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pa, struct ath_desc *nds, u64 tsf)
+ struct ath_rx_status *rs, u64 tsf)
{
struct ar5416_desc ads;
struct ar5416_desc *adsp = AR5416DESC(ds);
@@ -867,92 +643,76 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
ads.u.rx = adsp->u.rx;
- ds->ds_rxstat.rs_status = 0;
- ds->ds_rxstat.rs_flags = 0;
+ rs->rs_status = 0;
+ rs->rs_flags = 0;
- ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
- ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
+ rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
+ rs->rs_tstamp = ads.AR_RcvTimestamp;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
- ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
} else {
- ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
- ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
+ rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt00);
- ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt01);
- ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt02);
- ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt10);
- ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt11);
- ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt12);
}
if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
- ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
+ rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
else
- ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
+ rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
- ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
- ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
+ rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
+ rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
- ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
- ds->ds_rxstat.rs_moreaggr =
+ rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
+ rs->rs_moreaggr =
(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
- ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
- ds->ds_rxstat.rs_flags =
+ rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
+ rs->rs_flags =
(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
- ds->ds_rxstat.rs_flags |=
+ rs->rs_flags |=
(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+ rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
if (ads.ds_rxstatus8 & AR_CRCErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
+ rs->rs_status |= ATH9K_RXERR_CRC;
else if (ads.ds_rxstatus8 & AR_PHYErr) {
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
+ rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
- ds->ds_rxstat.rs_phyerr = phyerr;
+ rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
+ rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
+ rs->rs_status |= ATH9K_RXERR_MIC;
}
return 0;
}
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
-void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 size, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
-
- ads->ds_ctl1 = size & AR_BufLen;
- if (flags & ATH9K_RXDESC_INTREQ)
- ads->ds_ctl1 |= AR_RxIntrReq;
-
- ads->ds_rxstatus8 &= ~AR_RxDone;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- memset(&(ads->u), 0, sizeof(ads->u));
-}
-EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
-
/*
* This can stop or re-enables RX.
*
@@ -996,12 +756,6 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
}
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
-void ath9k_hw_rxena(struct ath_hw *ah)
-{
- REG_WRITE(ah, AR_CR, AR_CR_RXE);
-}
-EXPORT_SYMBOL(ath9k_hw_rxena);
-
void ath9k_hw_startpcureceive(struct ath_hw *ah)
{
ath9k_enable_mib_counters(ah);
@@ -1020,6 +774,14 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
+void ath9k_hw_abortpcurecv(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
+
+ ath9k_hw_disable_mib_counters(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
+
bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
@@ -1065,3 +827,142 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah)
return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
+
+bool ath9k_hw_intrpend(struct ath_hw *ah)
+{
+ u32 host_isr;
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if ((host_isr & AR_INTR_SYNC_DEFAULT)
+ && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ath9k_hw_intrpend);
+
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
+ enum ath9k_int ints)
+{
+ enum ath9k_int omask = ah->imask;
+ u32 mask, mask2;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
+
+ if (omask & ATH9K_INT_GLOBAL) {
+ ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
+ (void) REG_READ(ah, AR_IER);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ }
+ }
+
+ /* TODO: global int Ref count */
+ mask = ints & ATH9K_INT_COMMON;
+ mask2 = 0;
+
+ if (ints & ATH9K_INT_TX) {
+ if (ah->config.tx_intr_mitigation)
+ mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
+ else {
+ if (ah->txok_interrupt_mask)
+ mask |= AR_IMR_TXOK;
+ if (ah->txdesc_interrupt_mask)
+ mask |= AR_IMR_TXDESC;
+ }
+ if (ah->txerr_interrupt_mask)
+ mask |= AR_IMR_TXERR;
+ if (ah->txeol_interrupt_mask)
+ mask |= AR_IMR_TXEOL;
+ }
+ if (ints & ATH9K_INT_RX) {
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation) {
+ mask &= ~AR_IMR_RXOK_LP;
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ } else {
+ mask |= AR_IMR_RXOK_LP;
+ }
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ else
+ mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
+ }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ mask |= AR_IMR_GENTMR;
+ }
+
+ if (ints & (ATH9K_INT_BMISC)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_TIM)
+ mask2 |= AR_IMR_S2_TIM;
+ if (ints & ATH9K_INT_DTIM)
+ mask2 |= AR_IMR_S2_DTIM;
+ if (ints & ATH9K_INT_DTIMSYNC)
+ mask2 |= AR_IMR_S2_DTIMSYNC;
+ if (ints & ATH9K_INT_CABEND)
+ mask2 |= AR_IMR_S2_CABEND;
+ if (ints & ATH9K_INT_TSFOOR)
+ mask2 |= AR_IMR_S2_TSFOOR;
+ }
+
+ if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_GTT)
+ mask2 |= AR_IMR_S2_GTT;
+ if (ints & ATH9K_INT_CST)
+ mask2 |= AR_IMR_S2_CST;
+ }
+
+ ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+ REG_WRITE(ah, AR_IMR, mask);
+ ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
+ AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
+ AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
+ ah->imrs2_reg |= mask2;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if (ints & ATH9K_INT_TIM_TIMER)
+ REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ else
+ REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ }
+
+ if (ints & ATH9K_INT_GLOBAL) {
+ ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
+ AR_INTR_MAC_IRQ);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
+
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
+ AR_INTR_SYNC_DEFAULT);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK,
+ AR_INTR_SYNC_DEFAULT);
+ }
+ ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+ }
+
+ return omask;
+}
+EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 29851e6376a9..00f3e0c7528a 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -37,6 +37,8 @@
AR_2040_##_index : 0) \
|((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
AR_GI##_index : 0) \
+ |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
+ AR_STBC##_index : 0) \
|SM((_series)[_index].ChSel, AR_ChainSel##_index))
#define CCK_SIFS_TIME 10
@@ -86,7 +88,6 @@
#define ATH9K_TX_DESC_CFG_ERR 0x04
#define ATH9K_TX_DATA_UNDERRUN 0x08
#define ATH9K_TX_DELIM_UNDERRUN 0x10
-#define ATH9K_TX_SW_ABORTED 0x40
#define ATH9K_TX_SW_FILTERED 0x80
/* 64 bytes */
@@ -117,7 +118,10 @@ struct ath_tx_status {
int8_t ts_rssi_ext0;
int8_t ts_rssi_ext1;
int8_t ts_rssi_ext2;
- u8 pad[3];
+ u8 qid;
+ u16 desc_id;
+ u8 tid;
+ u8 pad[2];
u32 ba_low;
u32 ba_high;
u32 evm0;
@@ -148,6 +152,34 @@ struct ath_rx_status {
u32 evm0;
u32 evm1;
u32 evm2;
+ u32 evm3;
+ u32 evm4;
+};
+
+struct ath_htc_rx_status {
+ __be64 rs_tstamp;
+ __be16 rs_datalen;
+ u8 rs_status;
+ u8 rs_phyerr;
+ int8_t rs_rssi;
+ int8_t rs_rssi_ctl0;
+ int8_t rs_rssi_ctl1;
+ int8_t rs_rssi_ctl2;
+ int8_t rs_rssi_ext0;
+ int8_t rs_rssi_ext1;
+ int8_t rs_rssi_ext2;
+ u8 rs_keyix;
+ u8 rs_rate;
+ u8 rs_antenna;
+ u8 rs_more;
+ u8 rs_isaggr;
+ u8 rs_moreaggr;
+ u8 rs_num_delims;
+ u8 rs_flags;
+ u8 rs_dummy;
+ __be32 evm0;
+ __be32 evm1;
+ __be32 evm2;
};
#define ATH9K_RXERR_CRC 0x01
@@ -207,18 +239,9 @@ struct ath_desc {
u32 ds_ctl0;
u32 ds_ctl1;
u32 ds_hw[20];
- union {
- struct ath_tx_status tx;
- struct ath_rx_status rx;
- void *stats;
- } ds_us;
void *ds_vdata;
} __packed;
-#define ds_txstat ds_us.tx
-#define ds_rxstat ds_us.rx
-#define ds_stat ds_us.stats
-
#define ATH9K_TXDESC_CLRDMASK 0x0001
#define ATH9K_TXDESC_NOACK 0x0002
#define ATH9K_TXDESC_RTSENA 0x0004
@@ -242,7 +265,8 @@ struct ath_desc {
#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
#define ATH9K_TXDESC_VMF 0x0100
#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
-#define ATH9K_TXDESC_CAB 0x0400
+#define ATH9K_TXDESC_LOWRXCHAIN 0x0400
+#define ATH9K_TXDESC_LDPC 0x00010000
#define ATH9K_RXDESC_INTREQ 0x0020
@@ -336,7 +360,6 @@ struct ar5416_desc {
#define AR_DestIdxValid 0x40000000
#define AR_CTSEnable 0x80000000
-#define AR_BufLen 0x00000fff
#define AR_TxMore 0x00001000
#define AR_DestIdx 0x000fe000
#define AR_DestIdx_S 13
@@ -393,6 +416,7 @@ struct ar5416_desc {
#define AR_EncrType 0x0c000000
#define AR_EncrType_S 26
#define AR_TxCtlRsvd61 0xf0000000
+#define AR_LDPC 0x80000000
#define AR_2040_0 0x00000001
#define AR_GI0 0x00000002
@@ -412,7 +436,10 @@ struct ar5416_desc {
#define AR_ChainSel3_S 17
#define AR_RTSCTSRate 0x0ff00000
#define AR_RTSCTSRate_S 20
-#define AR_TxCtlRsvd70 0xf0000000
+#define AR_STBC0 0x10000000
+#define AR_STBC1 0x20000000
+#define AR_STBC2 0x40000000
+#define AR_STBC3 0x80000000
#define AR_TxRSSIAnt00 0x000000ff
#define AR_TxRSSIAnt00_S 0
@@ -476,7 +503,6 @@ struct ar5416_desc {
#define AR_RxCTLRsvd00 0xffffffff
-#define AR_BufLen 0x00000fff
#define AR_RxCtlRsvd00 0x00001000
#define AR_RxIntrReq 0x00002000
#define AR_RxCtlRsvd01 0xffffc000
@@ -626,6 +652,7 @@ enum ath9k_rx_filter {
#define ATH9K_RATESERIES_RTS_CTS 0x0001
#define ATH9K_RATESERIES_2040 0x0002
#define ATH9K_RATESERIES_HALFGI 0x0004
+#define ATH9K_RATESERIES_STBC 0x0008
struct ath9k_11n_rate_series {
u32 Tries;
@@ -669,33 +696,10 @@ struct ath9k_channel;
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
+void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
-void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 segLen, bool firstSeg,
- bool lastSeg, const struct ath_desc *ds0);
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds);
-int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags);
-void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_desc *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags);
-void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
- u32 aggrLen);
-void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
- u32 numDelims);
-void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
- u32 burstDuration);
-void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
- u32 vmf);
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
const struct ath9k_tx_queue_info *qinfo);
@@ -706,15 +710,22 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pa, struct ath_desc *nds, u64 tsf);
+ struct ath_rx_status *rs, u64 tsf);
void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags);
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
-void ath9k_hw_rxena(struct ath_hw *ah);
void ath9k_hw_startpcureceive(struct ath_hw *ah);
void ath9k_hw_stoppcurecv(struct ath_hw *ah);
+void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
+/* Interrupt Handling */
+bool ath9k_hw_intrpend(struct ath_hw *ah);
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
+ enum ath9k_int ints);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
+
#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 115e1aeedb59..893b552981a0 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -225,7 +225,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
ath_cache_conf_rate(sc, &hw->conf);
ath_update_txpow(sc);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ps_restore:
ath9k_ps_restore(sc);
@@ -401,23 +401,41 @@ void ath9k_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah);
u32 status = sc->intrstatus;
+ u32 rxmask;
ath9k_ps_wakeup(sc);
- if (status & ATH9K_INT_FATAL) {
+ if ((status & ATH9K_INT_FATAL) ||
+ !ath9k_hw_check_alive(ah)) {
ath_reset(sc, false);
ath9k_ps_restore(sc);
return;
}
- if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN);
+ else
+ rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+
+ if (status & rxmask) {
spin_lock_bh(&sc->rx.rxflushlock);
- ath_rx_tasklet(sc, 0);
+
+ /* Check for high priority Rx first */
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ (status & ATH9K_INT_RXHP))
+ ath_rx_tasklet(sc, 0, true);
+
+ ath_rx_tasklet(sc, 0, false);
spin_unlock_bh(&sc->rx.rxflushlock);
}
- if (status & ATH9K_INT_TX)
- ath_tx_tasklet(sc);
+ if (status & ATH9K_INT_TX) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_tx_edma_tasklet(sc);
+ else
+ ath_tx_tasklet(sc);
+ }
if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
/*
@@ -434,7 +452,7 @@ void ath9k_tasklet(unsigned long data)
ath_gen_timer_isr(sc->sc_ah);
/* re-enable hardware interrupt */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ath9k_ps_restore(sc);
}
@@ -445,6 +463,8 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_RXORN | \
ATH9K_INT_RXEOL | \
ATH9K_INT_RX | \
+ ATH9K_INT_RXLP | \
+ ATH9K_INT_RXHP | \
ATH9K_INT_TX | \
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
@@ -477,7 +497,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* value to insure we only process bits we requested.
*/
ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
- status &= sc->imask; /* discard unasked-for bits */
+ status &= ah->imask; /* discard unasked-for bits */
/*
* If there are no status bits set, then this interrupt was not
@@ -496,7 +516,8 @@ irqreturn_t ath_isr(int irq, void *dev)
* If a FATAL or RXORN interrupt is received, we have to reset the
* chip immediately.
*/
- if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN))
+ if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
+ !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
goto chip_reset;
if (status & ATH9K_INT_SWBA)
@@ -505,6 +526,13 @@ irqreturn_t ath_isr(int irq, void *dev)
if (status & ATH9K_INT_TXURN)
ath9k_hw_updatetxtriglevel(ah, true);
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (status & ATH9K_INT_RXEOL) {
+ ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+ ath9k_hw_set_interrupts(ah, ah->imask);
+ }
+ }
+
if (status & ATH9K_INT_MIB) {
/*
* Disable interrupts until we service the MIB
@@ -518,7 +546,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* the interrupt.
*/
ath9k_hw_procmibevent(ah);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@@ -536,7 +564,7 @@ chip_reset:
if (sched) {
/* turn off every interrupt except SWBA */
- ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
+ ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
tasklet_schedule(&sc->intr_tq);
}
@@ -724,6 +752,7 @@ static int ath_key_config(struct ath_common *common,
struct ath_hw *ah = common->ah;
struct ath9k_keyval hk;
const u8 *mac = NULL;
+ u8 gmac[ETH_ALEN];
int ret = 0;
int idx;
@@ -747,9 +776,30 @@ static int ath_key_config(struct ath_common *common,
memcpy(hk.kv_val, key->key, key->keylen);
if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
- /* For now, use the default keys for broadcast keys. This may
- * need to change with virtual interfaces. */
- idx = key->keyidx;
+
+ if (key->ap_addr) {
+ /*
+ * Group keys on hardware that supports multicast frame
+ * key search use a mac that is the sender's address with
+ * the high bit set instead of the app-specified address.
+ */
+ memcpy(gmac, key->ap_addr, ETH_ALEN);
+ gmac[0] |= 0x80;
+ mac = gmac;
+
+ if (key->alg == ALG_TKIP)
+ idx = ath_reserve_key_cache_slot_tkip(common);
+ else
+ idx = ath_reserve_key_cache_slot(common);
+ if (idx < 0)
+ mac = NULL; /* no free key cache entries */
+ }
+
+ if (!mac) {
+ /* For now, use the default keys for broadcast keys. This may
+ * need to change with virtual interfaces. */
+ idx = key->keyidx;
+ }
} else if (key->keyidx) {
if (WARN_ON(!sta))
return -EOPNOTSUPP;
@@ -887,7 +937,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath_beacon_config(sc, NULL); /* restart beacons */
/* Re-Enable interrupts */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* Enable LED */
ath9k_hw_cfg_output(ah, ah->led_pin,
@@ -977,7 +1027,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL); /* restart beacons */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
if (retry_tx) {
int i;
@@ -1162,23 +1212,28 @@ static int ath9k_start(struct ieee80211_hw *hw)
}
/* Setup our intr mask. */
- sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
- | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
- | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
+ ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN | ATH9K_INT_FATAL |
+ ATH9K_INT_GLOBAL;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP;
+ else
+ ah->imask |= ATH9K_INT_RX;
if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
- sc->imask |= ATH9K_INT_GTT;
+ ah->imask |= ATH9K_INT_GTT;
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- sc->imask |= ATH9K_INT_CST;
+ ah->imask |= ATH9K_INT_CST;
ath_cache_conf_rate(sc, &hw->conf);
sc->sc_flags &= ~SC_OP_INVALID;
/* Disable BMISS interrupt when we're not associated */
- sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ieee80211_wake_queues(hw);
@@ -1372,14 +1427,15 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
mutex_lock(&sc->mutex);
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
sc->nvifs > 0) {
ret = -ENOBUFS;
goto out;
@@ -1414,19 +1470,19 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->nvifs++;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
ath9k_set_bssid_mask(hw);
if (sc->nvifs > 1)
goto out; /* skip global settings for secondary vif */
if (ic_opmode == NL80211_IFTYPE_AP) {
- ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
+ ath9k_hw_set_tsfadjust(ah, 1);
sc->sc_flags |= SC_OP_TSF_RESET;
}
/* Set the device opmode */
- sc->sc_ah->opmode = ic_opmode;
+ ah->opmode = ic_opmode;
/*
* Enable MIB interrupts when there are hardware phy counters.
@@ -1435,11 +1491,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_ADHOC) ||
(vif->type == NL80211_IFTYPE_MESH_POINT)) {
- sc->imask |= ATH9K_INT_MIB;
- sc->imask |= ATH9K_INT_TSFOOR;
+ if (ah->config.enable_ani)
+ ah->imask |= ATH9K_INT_MIB;
+ ah->imask |= ATH9K_INT_TSFOOR;
}
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC ||
@@ -1495,15 +1552,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
void ath9k_enable_ps(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
+
sc->ps_enabled = true;
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ ah->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
- ath9k_hw_setrxabort(sc->sc_ah, 1);
+ ath9k_hw_setrxabort(ah, 1);
}
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1579,10 +1637,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK);
- if (sc->imask & ATH9K_INT_TIM_TIMER) {
- sc->imask &= ~ATH9K_INT_TIM_TIMER;
+ if (ah->imask & ATH9K_INT_TIM_TIMER) {
+ ah->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
+ ah->imask);
}
}
}
@@ -1986,6 +2044,25 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
+static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = common->ani.noise_floor;
+
+ return 0;
+}
+
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2057,6 +2134,7 @@ struct ieee80211_ops ath9k_ops = {
.set_tsf = ath9k_set_tsf,
.reset_tsf = ath9k_reset_tsf,
.ampdu_action = ath9k_ampdu_action,
+ .get_survey = ath9k_get_survey,
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 9441c6718a30..257b10ba6f57 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
{ 0 }
};
@@ -88,6 +89,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
}
static const struct ath_bus_ops ath_pci_bus_ops = {
+ .ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
deleted file mode 100644
index 2547b3c4a26c..000000000000
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/**
- * DOC: Programming Atheros 802.11n analog front end radios
- *
- * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
- * devices have either an external AR2133 analog front end radio for single
- * band 2.4 GHz communication or an AR5133 analog front end radio for dual
- * band 2.4 GHz / 5 GHz communication.
- *
- * All devices after the AR5416 and AR5418 family starting with the AR9280
- * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
- * into a single-chip and require less programming.
- *
- * The following single-chips exist with a respective embedded radio:
- *
- * AR9280 - 11n dual-band 2x2 MIMO for PCIe
- * AR9281 - 11n single-band 1x2 MIMO for PCIe
- * AR9285 - 11n single-band 1x1 for PCIe
- * AR9287 - 11n single-band 2x2 MIMO for PCIe
- *
- * AR9220 - 11n dual-band 2x2 MIMO for PCI
- * AR9223 - 11n single-band 2x2 MIMO for PCI
- *
- * AR9287 - 11n single-band 1x1 MIMO for USB
- */
-
-#include <linux/slab.h>
-
-#include "hw.h"
-
-/**
- * ath9k_hw_write_regs - ??
- *
- * @ah: atheros hardware structure
- * @freqIndex:
- * @regWrites:
- *
- * Used for both the chipsets with an external AR2133/AR5133 radios and
- * single-chip devices.
- */
-void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites)
-{
- REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
-}
-
-/**
- * ath9k_hw_ar9280_set_channel - set channel on single-chip device
- * @ah: atheros hardware structure
- * @chan:
- *
- * This is the function to change channel on single-chip devices, that is
- * all devices after ar9280.
- *
- * This function takes the channel value in MHz and sets
- * hardware channel value. Assumes writes have been enabled to analog bus.
- *
- * Actual Expression,
- *
- * For 2GHz channel,
- * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
- * (freq_ref = 40MHz)
- *
- * For 5GHz channel,
- * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
- * (freq_ref = 40MHz/(24>>amodeRefSel))
- */
-int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u16 bMode, fracMode, aModeRefSel = 0;
- u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
- struct chan_centers centers;
- u32 refDivA = 24;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
- reg32 &= 0xc0000000;
-
- if (freq < 4800) { /* 2 GHz, fractional mode */
- u32 txctl;
- int regWrites = 0;
-
- bMode = 1;
- fracMode = 1;
- aModeRefSel = 0;
- channelSel = (freq * 0x10000) / 15;
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- if (freq == 2484) {
- /* Enable channel spreading for channel 14 */
- REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
- 1, regWrites);
- } else {
- REG_WRITE_ARRAY(&ah->iniCckfirNormal,
- 1, regWrites);
- }
- } else {
- txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
- if (freq == 2484) {
- /* Enable channel spreading for channel 14 */
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
- } else {
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
- }
- }
- } else {
- bMode = 0;
- fracMode = 0;
-
- switch(ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
- case 0:
- if ((freq % 20) == 0) {
- aModeRefSel = 3;
- } else if ((freq % 10) == 0) {
- aModeRefSel = 2;
- }
- if (aModeRefSel)
- break;
- case 1:
- default:
- aModeRefSel = 0;
- /*
- * Enable 2G (fractional) mode for channels
- * which are 5MHz spaced.
- */
- fracMode = 1;
- refDivA = 1;
- channelSel = (freq * 0x8000) / 15;
-
- /* RefDivA setting */
- REG_RMW_FIELD(ah, AR_AN_SYNTH9,
- AR_AN_SYNTH9_REFDIVA, refDivA);
-
- }
-
- if (!fracMode) {
- ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
- channelSel = ndiv & 0x1ff;
- channelFrac = (ndiv & 0xfffffe00) * 2;
- channelSel = (channelSel << 17) | channelFrac;
- }
- }
-
- reg32 = reg32 |
- (bMode << 29) |
- (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
-
- REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
-
- ah->curchan = chan;
- ah->curchan_rad_index = -1;
-
- return 0;
-}
-
-/**
- * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency
- * @ah: atheros hardware structure
- * @chan:
- *
- * For single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- int bb_spur = AR_NO_SPUR;
- int freq;
- int bin, cur_bin;
- int bb_spur_off, spur_subchannel_sd;
- int spur_freq_sd;
- int spur_delta_phase;
- int denominator;
- int upper, lower, cur_vit_mask;
- int tmp, newVal;
- int i;
- int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
- };
- int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
- };
- int inc[4] = { 0, 100, 0, 0 };
- struct chan_centers centers;
-
- int8_t mask_m[123];
- int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
- int cur_bb_spur;
- bool is2GHz = IS_CHAN_2GHZ(chan);
-
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- ah->config.spurmode = SPUR_ENABLE_EEPROM;
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
-
- if (is2GHz)
- cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
- else
- cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
-
- if (AR_NO_SPUR == cur_bb_spur)
- break;
- cur_bb_spur = cur_bb_spur - freq;
-
- if (IS_CHAN_HT40(chan)) {
- if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
- (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
- bb_spur = cur_bb_spur;
- break;
- }
- } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
- (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
- bb_spur = cur_bb_spur;
- break;
- }
- }
-
- if (AR_NO_SPUR == bb_spur) {
- REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
- AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
- return;
- } else {
- REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
- AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
- }
-
- bin = bb_spur * 320;
-
- tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
-
- newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
- AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
- AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
- AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
-
- newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
- AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
- AR_PHY_SPUR_REG_MASK_RATE_SELECT |
- AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
- SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
- REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
-
- if (IS_CHAN_HT40(chan)) {
- if (bb_spur < 0) {
- spur_subchannel_sd = 1;
- bb_spur_off = bb_spur + 10;
- } else {
- spur_subchannel_sd = 0;
- bb_spur_off = bb_spur - 10;
- }
- } else {
- spur_subchannel_sd = 0;
- bb_spur_off = bb_spur;
- }
-
- if (IS_CHAN_HT40(chan))
- spur_delta_phase =
- ((bb_spur * 262144) /
- 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
- else
- spur_delta_phase =
- ((bb_spur * 524288) /
- 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
- denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
- spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
-
- newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
- SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
- SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
- REG_WRITE(ah, AR_PHY_TIMING11, newVal);
-
- newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
- REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
-
- cur_bin = -6000;
- upper = bin + 100;
- lower = bin - 100;
-
- for (i = 0; i < 4; i++) {
- int pilot_mask = 0;
- int chan_mask = 0;
- int bp = 0;
- for (bp = 0; bp < 30; bp++) {
- if ((cur_bin > lower) && (cur_bin < upper)) {
- pilot_mask = pilot_mask | 0x1 << bp;
- chan_mask = chan_mask | 0x1 << bp;
- }
- cur_bin += 100;
- }
- cur_bin += inc[i];
- REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
- REG_WRITE(ah, chan_mask_reg[i], chan_mask);
- }
-
- cur_vit_mask = 6100;
- upper = bin + 120;
- lower = bin - 120;
-
- for (i = 0; i < 123; i++) {
- if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
- /* workaround for gcc bug #37014 */
- volatile int tmp_v = abs(cur_vit_mask - bin);
-
- if (tmp_v < 75)
- mask_amt = 1;
- else
- mask_amt = 0;
- if (cur_vit_mask < 0)
- mask_m[abs(cur_vit_mask / 100)] = mask_amt;
- else
- mask_p[cur_vit_mask / 100] = mask_amt;
- }
- cur_vit_mask -= 100;
- }
-
- tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
- | (mask_m[48] << 26) | (mask_m[49] << 24)
- | (mask_m[50] << 22) | (mask_m[51] << 20)
- | (mask_m[52] << 18) | (mask_m[53] << 16)
- | (mask_m[54] << 14) | (mask_m[55] << 12)
- | (mask_m[56] << 10) | (mask_m[57] << 8)
- | (mask_m[58] << 6) | (mask_m[59] << 4)
- | (mask_m[60] << 2) | (mask_m[61] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
- tmp_mask = (mask_m[31] << 28)
- | (mask_m[32] << 26) | (mask_m[33] << 24)
- | (mask_m[34] << 22) | (mask_m[35] << 20)
- | (mask_m[36] << 18) | (mask_m[37] << 16)
- | (mask_m[48] << 14) | (mask_m[39] << 12)
- | (mask_m[40] << 10) | (mask_m[41] << 8)
- | (mask_m[42] << 6) | (mask_m[43] << 4)
- | (mask_m[44] << 2) | (mask_m[45] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
- tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
- | (mask_m[18] << 26) | (mask_m[18] << 24)
- | (mask_m[20] << 22) | (mask_m[20] << 20)
- | (mask_m[22] << 18) | (mask_m[22] << 16)
- | (mask_m[24] << 14) | (mask_m[24] << 12)
- | (mask_m[25] << 10) | (mask_m[26] << 8)
- | (mask_m[27] << 6) | (mask_m[28] << 4)
- | (mask_m[29] << 2) | (mask_m[30] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
- tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
- | (mask_m[2] << 26) | (mask_m[3] << 24)
- | (mask_m[4] << 22) | (mask_m[5] << 20)
- | (mask_m[6] << 18) | (mask_m[7] << 16)
- | (mask_m[8] << 14) | (mask_m[9] << 12)
- | (mask_m[10] << 10) | (mask_m[11] << 8)
- | (mask_m[12] << 6) | (mask_m[13] << 4)
- | (mask_m[14] << 2) | (mask_m[15] << 0);
- REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
- tmp_mask = (mask_p[15] << 28)
- | (mask_p[14] << 26) | (mask_p[13] << 24)
- | (mask_p[12] << 22) | (mask_p[11] << 20)
- | (mask_p[10] << 18) | (mask_p[9] << 16)
- | (mask_p[8] << 14) | (mask_p[7] << 12)
- | (mask_p[6] << 10) | (mask_p[5] << 8)
- | (mask_p[4] << 6) | (mask_p[3] << 4)
- | (mask_p[2] << 2) | (mask_p[1] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
- tmp_mask = (mask_p[30] << 28)
- | (mask_p[29] << 26) | (mask_p[28] << 24)
- | (mask_p[27] << 22) | (mask_p[26] << 20)
- | (mask_p[25] << 18) | (mask_p[24] << 16)
- | (mask_p[23] << 14) | (mask_p[22] << 12)
- | (mask_p[21] << 10) | (mask_p[20] << 8)
- | (mask_p[19] << 6) | (mask_p[18] << 4)
- | (mask_p[17] << 2) | (mask_p[16] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
- tmp_mask = (mask_p[45] << 28)
- | (mask_p[44] << 26) | (mask_p[43] << 24)
- | (mask_p[42] << 22) | (mask_p[41] << 20)
- | (mask_p[40] << 18) | (mask_p[39] << 16)
- | (mask_p[38] << 14) | (mask_p[37] << 12)
- | (mask_p[36] << 10) | (mask_p[35] << 8)
- | (mask_p[34] << 6) | (mask_p[33] << 4)
- | (mask_p[32] << 2) | (mask_p[31] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
- tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
- | (mask_p[59] << 26) | (mask_p[58] << 24)
- | (mask_p[57] << 22) | (mask_p[56] << 20)
- | (mask_p[55] << 18) | (mask_p[54] << 16)
- | (mask_p[53] << 14) | (mask_p[52] << 12)
- | (mask_p[51] << 10) | (mask_p[50] << 8)
- | (mask_p[49] << 6) | (mask_p[48] << 4)
- | (mask_p[47] << 2) | (mask_p[46] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
-}
-
-/* All code below is for non single-chip solutions */
-
-/**
- * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters
- * @rfbuf:
- * @reg32:
- * @numBits:
- * @firstBit:
- * @column:
- *
- * Performs analog "swizzling" of parameters into their location.
- * Used on external AR2133/AR5133 radios.
- */
-static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
- u32 numBits, u32 firstBit,
- u32 column)
-{
- u32 tmp32, mask, arrayEntry, lastBit;
- int32_t bitPosition, bitsLeft;
-
- tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
- arrayEntry = (firstBit - 1) / 8;
- bitPosition = (firstBit - 1) % 8;
- bitsLeft = numBits;
- while (bitsLeft > 0) {
- lastBit = (bitPosition + bitsLeft > 8) ?
- 8 : bitPosition + bitsLeft;
- mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
- (column * 8);
- rfBuf[arrayEntry] &= ~mask;
- rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
- (column * 8)) & mask;
- bitsLeft -= 8 - bitPosition;
- tmp32 = tmp32 >> (8 - bitPosition);
- bitPosition = 0;
- arrayEntry++;
- }
-}
-
-/*
- * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
- * rf_pwd_icsyndiv.
- *
- * Theoretical Rules:
- * if 2 GHz band
- * if forceBiasAuto
- * if synth_freq < 2412
- * bias = 0
- * else if 2412 <= synth_freq <= 2422
- * bias = 1
- * else // synth_freq > 2422
- * bias = 2
- * else if forceBias > 0
- * bias = forceBias & 7
- * else
- * no change, use value from ini file
- * else
- * no change, invalid band
- *
- * 1st Mod:
- * 2422 also uses value of 2
- * <approved>
- *
- * 2nd Mod:
- * Less than 2412 uses value of 0, 2412 and above uses value of 2
- */
-static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 tmp_reg;
- int reg_writes = 0;
- u32 new_bias = 0;
-
- if (!AR_SREV_5416(ah) || synth_freq >= 3000) {
- return;
- }
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- if (synth_freq < 2412)
- new_bias = 0;
- else if (synth_freq < 2422)
- new_bias = 1;
- else
- new_bias = 2;
-
- /* pre-reverse this field */
- tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
-
- ath_print(common, ATH_DBG_CONFIG,
- "Force rf_pwd_icsyndiv to %1d on %4d\n",
- new_bias, synth_freq);
-
- /* swizzle rf_pwd_icsyndiv */
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
-
- /* write Bank 6 with new params */
- REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
-}
-
-/**
- * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
- * @ah: atheros hardware stucture
- * @chan:
- *
- * For the external AR2133/AR5133 radios, takes the MHz channel value and set
- * the channel value. Assumes writes enabled to analog bus and bank6 register
- * cache in ah->analogBank6Data.
- */
-int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 channelSel = 0;
- u32 bModeSynth = 0;
- u32 aModeRefSel = 0;
- u32 reg32 = 0;
- u16 freq;
- struct chan_centers centers;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- if (freq < 4800) {
- u32 txctl;
-
- if (((freq - 2192) % 5) == 0) {
- channelSel = ((freq - 672) * 2 - 3040) / 10;
- bModeSynth = 0;
- } else if (((freq - 2224) % 5) == 0) {
- channelSel = ((freq - 704) * 2 - 3040) / 10;
- bModeSynth = 1;
- } else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid channel %u MHz\n", freq);
- return -EINVAL;
- }
-
- channelSel = (channelSel << 2) & 0xff;
- channelSel = ath9k_hw_reverse_bits(channelSel, 8);
-
- txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
- if (freq == 2484) {
-
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
- } else {
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
- }
-
- } else if ((freq % 20) == 0 && freq >= 5120) {
- channelSel =
- ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else if ((freq % 10) == 0) {
- channelSel =
- ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
- if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
- aModeRefSel = ath9k_hw_reverse_bits(2, 2);
- else
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else if ((freq % 5) == 0) {
- channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid channel %u MHz\n", freq);
- return -EINVAL;
- }
-
- ath9k_hw_force_bias(ah, freq);
-
- reg32 =
- (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
- (1 << 5) | 0x1;
-
- REG_WRITE(ah, AR_PHY(0x37), reg32);
-
- ah->curchan = chan;
- ah->curchan_rad_index = -1;
-
- return 0;
-}
-
-/**
- * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios
- * @ah: atheros hardware structure
- * @chan:
- *
- * For non single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- int bb_spur = AR_NO_SPUR;
- int bin, cur_bin;
- int spur_freq_sd;
- int spur_delta_phase;
- int denominator;
- int upper, lower, cur_vit_mask;
- int tmp, new;
- int i;
- int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
- };
- int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
- };
- int inc[4] = { 0, 100, 0, 0 };
-
- int8_t mask_m[123];
- int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
- int cur_bb_spur;
- bool is2GHz = IS_CHAN_2GHZ(chan);
-
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
- if (AR_NO_SPUR == cur_bb_spur)
- break;
- cur_bb_spur = cur_bb_spur - (chan->channel * 10);
- if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
- bb_spur = cur_bb_spur;
- break;
- }
- }
-
- if (AR_NO_SPUR == bb_spur)
- return;
-
- bin = bb_spur * 32;
-
- tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
- new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
- AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
- AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
- AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
-
- new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
- AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
- AR_PHY_SPUR_REG_MASK_RATE_SELECT |
- AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
- SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
- REG_WRITE(ah, AR_PHY_SPUR_REG, new);
-
- spur_delta_phase = ((bb_spur * 524288) / 100) &
- AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
- denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
- spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
-
- new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
- SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
- SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
- REG_WRITE(ah, AR_PHY_TIMING11, new);
-
- cur_bin = -6000;
- upper = bin + 100;
- lower = bin - 100;
-
- for (i = 0; i < 4; i++) {
- int pilot_mask = 0;
- int chan_mask = 0;
- int bp = 0;
- for (bp = 0; bp < 30; bp++) {
- if ((cur_bin > lower) && (cur_bin < upper)) {
- pilot_mask = pilot_mask | 0x1 << bp;
- chan_mask = chan_mask | 0x1 << bp;
- }
- cur_bin += 100;
- }
- cur_bin += inc[i];
- REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
- REG_WRITE(ah, chan_mask_reg[i], chan_mask);
- }
-
- cur_vit_mask = 6100;
- upper = bin + 120;
- lower = bin - 120;
-
- for (i = 0; i < 123; i++) {
- if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
- /* workaround for gcc bug #37014 */
- volatile int tmp_v = abs(cur_vit_mask - bin);
-
- if (tmp_v < 75)
- mask_amt = 1;
- else
- mask_amt = 0;
- if (cur_vit_mask < 0)
- mask_m[abs(cur_vit_mask / 100)] = mask_amt;
- else
- mask_p[cur_vit_mask / 100] = mask_amt;
- }
- cur_vit_mask -= 100;
- }
-
- tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
- | (mask_m[48] << 26) | (mask_m[49] << 24)
- | (mask_m[50] << 22) | (mask_m[51] << 20)
- | (mask_m[52] << 18) | (mask_m[53] << 16)
- | (mask_m[54] << 14) | (mask_m[55] << 12)
- | (mask_m[56] << 10) | (mask_m[57] << 8)
- | (mask_m[58] << 6) | (mask_m[59] << 4)
- | (mask_m[60] << 2) | (mask_m[61] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
- tmp_mask = (mask_m[31] << 28)
- | (mask_m[32] << 26) | (mask_m[33] << 24)
- | (mask_m[34] << 22) | (mask_m[35] << 20)
- | (mask_m[36] << 18) | (mask_m[37] << 16)
- | (mask_m[48] << 14) | (mask_m[39] << 12)
- | (mask_m[40] << 10) | (mask_m[41] << 8)
- | (mask_m[42] << 6) | (mask_m[43] << 4)
- | (mask_m[44] << 2) | (mask_m[45] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
- tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
- | (mask_m[18] << 26) | (mask_m[18] << 24)
- | (mask_m[20] << 22) | (mask_m[20] << 20)
- | (mask_m[22] << 18) | (mask_m[22] << 16)
- | (mask_m[24] << 14) | (mask_m[24] << 12)
- | (mask_m[25] << 10) | (mask_m[26] << 8)
- | (mask_m[27] << 6) | (mask_m[28] << 4)
- | (mask_m[29] << 2) | (mask_m[30] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
- tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
- | (mask_m[2] << 26) | (mask_m[3] << 24)
- | (mask_m[4] << 22) | (mask_m[5] << 20)
- | (mask_m[6] << 18) | (mask_m[7] << 16)
- | (mask_m[8] << 14) | (mask_m[9] << 12)
- | (mask_m[10] << 10) | (mask_m[11] << 8)
- | (mask_m[12] << 6) | (mask_m[13] << 4)
- | (mask_m[14] << 2) | (mask_m[15] << 0);
- REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
- tmp_mask = (mask_p[15] << 28)
- | (mask_p[14] << 26) | (mask_p[13] << 24)
- | (mask_p[12] << 22) | (mask_p[11] << 20)
- | (mask_p[10] << 18) | (mask_p[9] << 16)
- | (mask_p[8] << 14) | (mask_p[7] << 12)
- | (mask_p[6] << 10) | (mask_p[5] << 8)
- | (mask_p[4] << 6) | (mask_p[3] << 4)
- | (mask_p[2] << 2) | (mask_p[1] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
- tmp_mask = (mask_p[30] << 28)
- | (mask_p[29] << 26) | (mask_p[28] << 24)
- | (mask_p[27] << 22) | (mask_p[26] << 20)
- | (mask_p[25] << 18) | (mask_p[24] << 16)
- | (mask_p[23] << 14) | (mask_p[22] << 12)
- | (mask_p[21] << 10) | (mask_p[20] << 8)
- | (mask_p[19] << 6) | (mask_p[18] << 4)
- | (mask_p[17] << 2) | (mask_p[16] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
- tmp_mask = (mask_p[45] << 28)
- | (mask_p[44] << 26) | (mask_p[43] << 24)
- | (mask_p[42] << 22) | (mask_p[41] << 20)
- | (mask_p[40] << 18) | (mask_p[39] << 16)
- | (mask_p[38] << 14) | (mask_p[37] << 12)
- | (mask_p[36] << 10) | (mask_p[35] << 8)
- | (mask_p[34] << 6) | (mask_p[33] << 4)
- | (mask_p[32] << 2) | (mask_p[31] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
- tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
- | (mask_p[59] << 26) | (mask_p[58] << 24)
- | (mask_p[57] << 22) | (mask_p[56] << 20)
- | (mask_p[55] << 18) | (mask_p[54] << 16)
- | (mask_p[53] << 14) | (mask_p[52] << 12)
- | (mask_p[51] << 10) | (mask_p[50] << 8)
- | (mask_p[49] << 6) | (mask_p[48] << 4)
- | (mask_p[47] << 2) | (mask_p[46] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
-}
-
-/**
- * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming
- * @ah: atheros hardware structure
- *
- * Only required for older devices with external AR2133/AR5133 radios.
- */
-int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
-{
-#define ATH_ALLOC_BANK(bank, size) do { \
- bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
- if (!bank) { \
- ath_print(common, ATH_DBG_FATAL, \
- "Cannot allocate RF banks\n"); \
- return -ENOMEM; \
- } \
- } while (0);
-
- struct ath_common *common = ath9k_hw_common(ah);
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
- ATH_ALLOC_BANK(ah->addac5416_21,
- ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
- ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
-
- return 0;
-#undef ATH_ALLOC_BANK
-}
-
-
-/**
- * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
- * @ah: atheros hardware struture
- * For the external AR2133/AR5133 radios banks.
- */
-void
-ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
-{
-#define ATH_FREE_BANK(bank) do { \
- kfree(bank); \
- bank = NULL; \
- } while (0);
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- ATH_FREE_BANK(ah->analogBank0Data);
- ATH_FREE_BANK(ah->analogBank1Data);
- ATH_FREE_BANK(ah->analogBank2Data);
- ATH_FREE_BANK(ah->analogBank3Data);
- ATH_FREE_BANK(ah->analogBank6Data);
- ATH_FREE_BANK(ah->analogBank6TPCData);
- ATH_FREE_BANK(ah->analogBank7Data);
- ATH_FREE_BANK(ah->addac5416_21);
- ATH_FREE_BANK(ah->bank6Temp);
-
-#undef ATH_FREE_BANK
-}
-
-/* *
- * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM
- * @ah: atheros hardware structure
- * @chan:
- * @modesIndex:
- *
- * Used for the external AR2133/AR5133 radios.
- *
- * Reads the EEPROM header info from the device structure and programs
- * all rf registers. This routine requires access to the analog
- * rf device. This is not required for single-chip devices.
- */
-bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
- u16 modesIndex)
-{
- u32 eepMinorRev;
- u32 ob5GHz = 0, db5GHz = 0;
- u32 ob2GHz = 0, db2GHz = 0;
- int regWrites = 0;
-
- /*
- * Software does not need to program bank data
- * for single chip devices, that is AR9280 or anything
- * after that.
- */
- if (AR_SREV_9280_10_OR_LATER(ah))
- return true;
-
- /* Setup rf parameters */
- eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
-
- /* Setup Bank 0 Write */
- RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
-
- /* Setup Bank 1 Write */
- RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
-
- /* Setup Bank 2 Write */
- RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
-
- /* Setup Bank 6 Write */
- RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
- modesIndex);
- {
- int i;
- for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
- ah->analogBank6Data[i] =
- INI_RA(&ah->iniBank6TPC, i, modesIndex);
- }
- }
-
- /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
- if (eepMinorRev >= 2) {
- if (IS_CHAN_2GHZ(chan)) {
- ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
- db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- ob2GHz, 3, 197, 0);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- db2GHz, 3, 194, 0);
- } else {
- ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
- db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- ob5GHz, 3, 203, 0);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- db5GHz, 3, 200, 0);
- }
- }
-
- /* Setup Bank 7 Setup */
- RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
-
- /* Write Analog registers */
- REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
- regWrites);
-
- return true;
-}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 0999a495fd46..e724c2c1ae2a 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,589 +17,25 @@
#ifndef PHY_H
#define PHY_H
-/* Common between single chip and non single-chip solutions */
-void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites);
-
-/* Single chip radio settings */
-int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
-void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
-
-/* Routines below are for non single-chip solutions */
-int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
-void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
-
-int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah);
-void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah);
-
-bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u16 modesIndex);
+#define CHANSEL_DIV 15
+#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
+#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
#define AR_PHY_BASE 0x9800
#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
-#define AR_PHY_TEST 0x9800
-#define PHY_AGC_CLR 0x10000000
-#define RFSILENT_BB 0x00002000
-
-#define AR_PHY_TURBO 0x9804
-#define AR_PHY_FC_TURBO_MODE 0x00000001
-#define AR_PHY_FC_TURBO_SHORT 0x00000002
-#define AR_PHY_FC_DYN2040_EN 0x00000004
-#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
-#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
-/* For 25 MHz channel spacing -- not used but supported by hw */
-#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
-#define AR_PHY_FC_HT_EN 0x00000040
-#define AR_PHY_FC_SHORT_GI_40 0x00000080
-#define AR_PHY_FC_WALSH 0x00000100
-#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
-#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
-
-#define AR_PHY_TEST2 0x9808
-
-#define AR_PHY_TIMING2 0x9810
-#define AR_PHY_TIMING3 0x9814
-#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
-#define AR_PHY_TIMING3_DSC_MAN_S 17
-#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
-#define AR_PHY_TIMING3_DSC_EXP_S 13
-
-#define AR_PHY_CHIP_ID 0x9818
-#define AR_PHY_CHIP_ID_REV_0 0x80
-#define AR_PHY_CHIP_ID_REV_1 0x81
-#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
-
-#define AR_PHY_ACTIVE 0x981C
-#define AR_PHY_ACTIVE_EN 0x00000001
-#define AR_PHY_ACTIVE_DIS 0x00000000
-
-#define AR_PHY_RF_CTL2 0x9824
-#define AR_PHY_TX_END_DATA_START 0x000000FF
-#define AR_PHY_TX_END_DATA_START_S 0
-#define AR_PHY_TX_END_PA_ON 0x0000FF00
-#define AR_PHY_TX_END_PA_ON_S 8
-
-#define AR_PHY_RF_CTL3 0x9828
-#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
-#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
-
-#define AR_PHY_ADC_CTL 0x982C
-#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
-#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
-#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
-#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
-#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
-#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
-#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
-
-#define AR_PHY_ADC_SERIAL_CTL 0x9830
-#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
-#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
-
-#define AR_PHY_RF_CTL4 0x9834
-#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
-#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
-#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
-#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
-#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
-#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
-#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
-#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
-
-#define AR_PHY_TSTDAC_CONST 0x983c
-
-#define AR_PHY_SETTLING 0x9844
-#define AR_PHY_SETTLING_SWITCH 0x00003F80
-#define AR_PHY_SETTLING_SWITCH_S 7
-
-#define AR_PHY_RXGAIN 0x9848
-#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
-#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
-#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
-#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
-#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
-#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
-#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
-#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
-
-#define AR_PHY_DESIRED_SZ 0x9850
-#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
-#define AR_PHY_DESIRED_SZ_ADC_S 0
-#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
-#define AR_PHY_DESIRED_SZ_PGA_S 8
-#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
-#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
-
-#define AR_PHY_FIND_SIG 0x9858
-#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
-#define AR_PHY_FIND_SIG_FIRSTEP_S 12
-#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
-#define AR_PHY_FIND_SIG_FIRPWR_S 18
-
-#define AR_PHY_AGC_CTL1 0x985C
-#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
-#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
-#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
-#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
-
-#define AR_PHY_AGC_CONTROL 0x9860
-#define AR_PHY_AGC_CONTROL_CAL 0x00000001
-#define AR_PHY_AGC_CONTROL_NF 0x00000002
-#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
-#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
-#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
-
-#define AR_PHY_CCA 0x9864
-#define AR_PHY_MINCCA_PWR 0x0FF80000
-#define AR_PHY_MINCCA_PWR_S 19
-#define AR_PHY_CCA_THRESH62 0x0007F000
-#define AR_PHY_CCA_THRESH62_S 12
-#define AR9280_PHY_MINCCA_PWR 0x1FF00000
-#define AR9280_PHY_MINCCA_PWR_S 20
-#define AR9280_PHY_CCA_THRESH62 0x000FF000
-#define AR9280_PHY_CCA_THRESH62_S 12
-
-#define AR_PHY_SFCORR_LOW 0x986C
-#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
-#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
-#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
-#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
-#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
-#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
-#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
-
-#define AR_PHY_SFCORR 0x9868
-#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
-#define AR_PHY_SFCORR_M2COUNT_THR_S 0
-#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
-#define AR_PHY_SFCORR_M1_THRESH_S 17
-#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
-#define AR_PHY_SFCORR_M2_THRESH_S 24
-
-#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
-#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
-#define AR_PHY_SYNTH_CONTROL 0x9874
-#define AR_PHY_SLEEP_SCAL 0x9878
-
-#define AR_PHY_PLL_CTL 0x987c
-#define AR_PHY_PLL_CTL_40 0xaa
-#define AR_PHY_PLL_CTL_40_5413 0x04
-#define AR_PHY_PLL_CTL_44 0xab
-#define AR_PHY_PLL_CTL_44_2133 0xeb
-#define AR_PHY_PLL_CTL_40_2133 0xea
-
-#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
-#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
-#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
-#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
-#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
-#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
-#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
-#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
-
-#define AR_PHY_RX_DELAY 0x9914
-#define AR_PHY_SEARCH_START_DELAY 0x9918
-#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
-
-#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
-#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
-#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
-#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
-#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
-
-#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
-
-#define AR_PHY_TIMING5 0x9924
-#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
-#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
-
-#define AR_PHY_POWER_TX_RATE1 0x9934
-#define AR_PHY_POWER_TX_RATE2 0x9938
-#define AR_PHY_POWER_TX_RATE_MAX 0x993c
-#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
-
-#define AR_PHY_FRAME_CTL 0x9944
-#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
-#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
-
-#define AR_PHY_TXPWRADJ 0x994C
-#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
-#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
-#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
-#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
-
-#define AR_PHY_RADAR_EXT 0x9940
-#define AR_PHY_RADAR_EXT_ENA 0x00004000
-
-#define AR_PHY_RADAR_0 0x9954
-#define AR_PHY_RADAR_0_ENA 0x00000001
-#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
-#define AR_PHY_RADAR_0_INBAND 0x0000003e
-#define AR_PHY_RADAR_0_INBAND_S 1
-#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
-#define AR_PHY_RADAR_0_PRSSI_S 6
-#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
-#define AR_PHY_RADAR_0_HEIGHT_S 12
-#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
-#define AR_PHY_RADAR_0_RRSSI_S 18
-#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
-#define AR_PHY_RADAR_0_FIRPWR_S 24
-
-#define AR_PHY_RADAR_1 0x9958
-#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
-#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
-#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
-#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
-#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
-#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
-#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
-#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
-#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
-#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
-#define AR_PHY_RADAR_1_MAXLEN_S 0
-
-#define AR_PHY_SWITCH_CHAIN_0 0x9960
-#define AR_PHY_SWITCH_COM 0x9964
-
-#define AR_PHY_SIGMA_DELTA 0x996C
-#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
-#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
-#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
-#define AR_PHY_SIGMA_DELTA_FILT2_S 3
-#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
-#define AR_PHY_SIGMA_DELTA_FILT1_S 8
-#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
-#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
-
-#define AR_PHY_RESTART 0x9970
-#define AR_PHY_RESTART_DIV_GC 0x001C0000
-#define AR_PHY_RESTART_DIV_GC_S 18
-
-#define AR_PHY_RFBUS_REQ 0x997C
-#define AR_PHY_RFBUS_REQ_EN 0x00000001
-
-#define AR_PHY_TIMING7 0x9980
-#define AR_PHY_TIMING8 0x9984
-#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
-#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
-
-#define AR_PHY_BIN_MASK2_1 0x9988
-#define AR_PHY_BIN_MASK2_2 0x998c
-#define AR_PHY_BIN_MASK2_3 0x9990
-#define AR_PHY_BIN_MASK2_4 0x9994
-
-#define AR_PHY_BIN_MASK_1 0x9900
-#define AR_PHY_BIN_MASK_2 0x9904
-#define AR_PHY_BIN_MASK_3 0x9908
-
-#define AR_PHY_MASK_CTL 0x990c
-
-#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
-#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
-
-#define AR_PHY_TIMING9 0x9998
-#define AR_PHY_TIMING10 0x999c
-#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
-#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
-
-#define AR_PHY_TIMING11 0x99a0
-#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
-#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
-#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
-#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
-#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
-#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
-
-#define AR_PHY_RX_CHAINMASK 0x99a4
-#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
-#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
-#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
-
-#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
-#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
-#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
-#define AR_PHY_9285_ANT_DIV_CTL_S 24
-#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
-#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
-#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
-#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
-#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
-#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
-#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
-#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
-#define AR_PHY_9285_ANT_DIV_LNA1 2
-#define AR_PHY_9285_ANT_DIV_LNA2 1
-#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
-#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
-#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
-#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000
+#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13
+#define AR_PHY_TX_GAIN_CLC 0x0000001E
+#define AR_PHY_TX_GAIN_CLC_S 1
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
-#define AR_PHY_EXT_CCA0 0x99b8
-#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
-#define AR_PHY_EXT_CCA0_THRESH62_S 0
-
-#define AR_PHY_EXT_CCA 0x99bc
-#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
-#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
-#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
-#define AR_PHY_EXT_CCA_THRESH62_S 16
-#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_EXT_MINCCA_PWR_S 23
-#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
-#define AR9280_PHY_EXT_MINCCA_PWR_S 16
-
-#define AR_PHY_SFCORR_EXT 0x99c0
-#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
-#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
-#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
-#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
-#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
-#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
-#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
-#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
-#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
-
-#define AR_PHY_HALFGI 0x99D0
-#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
-#define AR_PHY_HALFGI_DSC_MAN_S 4
-#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
-#define AR_PHY_HALFGI_DSC_EXP_S 0
-
-#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
-#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
-
-#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
-
-#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
-#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
-
-#define AR_PHY_M_SLEEP 0x99f0
-#define AR_PHY_REFCLKDLY 0x99f4
-#define AR_PHY_REFCLKPD 0x99f8
-
-#define AR_PHY_CALMODE 0x99f0
-
-#define AR_PHY_CALMODE_IQ 0x00000000
-#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
-#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
-#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
-
-#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
-
-#define AR_PHY_CURRENT_RSSI 0x9c1c
-#define AR9280_PHY_CURRENT_RSSI 0x9c3c
-
-#define AR_PHY_RFBUS_GRANT 0x9C20
-#define AR_PHY_RFBUS_GRANT_EN 0x00000001
-
-#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
-#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
-
-#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
-
-#define AR_PHY_MODE 0xA200
-#define AR_PHY_MODE_ASYNCFIFO 0x80
-#define AR_PHY_MODE_AR2133 0x08
-#define AR_PHY_MODE_AR5111 0x00
-#define AR_PHY_MODE_AR5112 0x08
-#define AR_PHY_MODE_DYNAMIC 0x04
-#define AR_PHY_MODE_RF2GHZ 0x02
-#define AR_PHY_MODE_RF5GHZ 0x00
-#define AR_PHY_MODE_CCK 0x01
-#define AR_PHY_MODE_OFDM 0x00
-#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
-
-#define AR_PHY_CCK_TX_CTRL 0xA204
-#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
-#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
-#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
-
-#define AR_PHY_CCK_DETECT 0xA208
-#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
-#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
-/* [12:6] settling time for antenna switch */
-#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
-#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
-#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
-#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
-
-#define AR_PHY_GAIN_2GHZ 0xA20C
-#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
-#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
-#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
-#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
-#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
-#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
-
-#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
-#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
-#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
-#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
-#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
-#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
-#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
-#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
-
-#define AR_PHY_CCK_RXCTRL4 0xA21C
-#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
-#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
-
-#define AR_PHY_DAG_CTRLCCK 0xA228
-#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
-#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
-#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
-
-#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
-#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
-
-#define AR_PHY_POWER_TX_RATE3 0xA234
-#define AR_PHY_POWER_TX_RATE4 0xA238
-
-#define AR_PHY_SCRM_SEQ_XR 0xA23C
-#define AR_PHY_HEADER_DETECT_XR 0xA240
-#define AR_PHY_CHIRP_DETECTED_XR 0xA244
-#define AR_PHY_BLUETOOTH 0xA254
-
-#define AR_PHY_TPCRG1 0xA258
-#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
-#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
-
-#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
-#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
-#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
-#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
-#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
-#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
-
-#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
-#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
-
-#define AR_PHY_TX_PWRCTRL4 0xa264
-#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
-#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
-#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
-#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
-
-#define AR_PHY_TX_PWRCTRL6_0 0xa270
-#define AR_PHY_TX_PWRCTRL6_1 0xb270
-#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
-#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
-
-#define AR_PHY_TX_PWRCTRL7 0xa274
-#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
-#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
-
-#define AR_PHY_TX_PWRCTRL9 0xa27C
-#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
-#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
-#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
-#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
-
-#define AR_PHY_TX_GAIN_TBL1 0xa300
-#define AR_PHY_TX_GAIN 0x0007F000
-#define AR_PHY_TX_GAIN_S 12
-
-#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
-#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
-#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
-#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
-
-#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
-#define AR_PHY_MASK2_M_31_45 0xa3a4
-#define AR_PHY_MASK2_M_16_30 0xa3a8
-#define AR_PHY_MASK2_M_00_15 0xa3ac
-#define AR_PHY_MASK2_P_15_01 0xa3b8
-#define AR_PHY_MASK2_P_30_16 0xa3bc
-#define AR_PHY_MASK2_P_45_31 0xa3c0
-#define AR_PHY_MASK2_P_61_45 0xa3c4
-#define AR_PHY_SPUR_REG 0x994c
-
-#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
-#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
-
-#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
-#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
-#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
-#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
-#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
-#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
-
-#define AR_PHY_PILOT_MASK_01_30 0xa3b0
-#define AR_PHY_PILOT_MASK_31_60 0xa3b4
-
-#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
-#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
-
-#define AR_PHY_ANALOG_SWAP 0xa268
-#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
-
-#define AR_PHY_TPCRG5 0xA26C
-#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
-#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
-
-/* Carrier leak calibration control, do it after AGC calibration */
-#define AR_PHY_CL_CAL_CTL 0xA358
-#define AR_PHY_CL_CAL_ENABLE 0x00000002
-#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
-
-#define AR_PHY_POWER_TX_RATE5 0xA38C
-#define AR_PHY_POWER_TX_RATE6 0xA390
-
-#define AR_PHY_CAL_CHAINMASK 0xA39C
-
-#define AR_PHY_POWER_TX_SUB 0xA3C8
-#define AR_PHY_POWER_TX_RATE7 0xA3CC
-#define AR_PHY_POWER_TX_RATE8 0xA3D0
-#define AR_PHY_POWER_TX_RATE9 0xA3D4
-
-#define AR_PHY_XPA_CFG 0xA3D8
-#define AR_PHY_FORCE_XPA_CFG 0x000000001
-#define AR_PHY_FORCE_XPA_CFG_S 0
-
-#define AR_PHY_CH1_CCA 0xa864
-#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
-#define AR_PHY_CH1_MINCCA_PWR_S 19
-#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
-#define AR9280_PHY_CH1_MINCCA_PWR_S 20
-
-#define AR_PHY_CH2_CCA 0xb864
-#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
-#define AR_PHY_CH2_MINCCA_PWR_S 19
-
-#define AR_PHY_CH1_EXT_CCA 0xa9bc
-#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
-#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
-#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
-
-#define AR_PHY_CH2_EXT_CCA 0xb9bc
-#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+#define AR_PHY_CLC_TBL1 0xa35c
+#define AR_PHY_CLC_I0 0x07ff0000
+#define AR_PHY_CLC_I0_S 16
+#define AR_PHY_CLC_Q0 0x0000ffd0
+#define AR_PHY_CLC_Q0_S 5
#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
int r; \
@@ -615,6 +51,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
#define ANTSWAP_AB 0x0001
#define REDUCE_CHAIN_0 0x00000050
#define REDUCE_CHAIN_1 0x00000051
+#define AR_PHY_CHIP_ID 0x9818
#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
int i; \
@@ -622,4 +59,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
(_bank)[i] = INI_RA((_iniarray), i, _col);; \
} while (0)
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 244e1c629177..8519452c95f1 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -691,6 +691,19 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
rate_table = sc->cur_rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
+ /*
+ * If we're in HT mode and both us and our peer supports LDPC.
+ * We don't need to check our own device's capabilities as our own
+ * ht capabilities would have already been intersected with our peer's.
+ */
+ if (conf_is_ht(&sc->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ tx_info->flags |= IEEE80211_TX_CTL_LDPC;
+
+ if (conf_is_ht(&sc->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
+ tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
+
if (is_probe) {
/* set one try for probe rates. For the
* probes don't enable rts */
@@ -1228,8 +1241,12 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
long_retry = rate->count - 1;
}
- if (!priv_sta || !ieee80211_is_data(fc) ||
- !(tx_info->pad[0] & ATH_TX_INFO_UPDATE_RC))
+ if (!priv_sta || !ieee80211_is_data(fc))
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
return;
if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 4f6d6fd442f4..3d8d40cdc99e 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -110,8 +110,8 @@ struct ath_rate_table {
int rate_cnt;
int mcs_start;
struct {
- int valid;
- int valid_single_stream;
+ u8 valid;
+ u8 valid_single_stream;
u8 phy;
u32 ratekbps;
u32 user_ratekbps;
@@ -172,14 +172,13 @@ struct ath_rate_priv {
#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
-#define ATH_TX_INFO_UPDATE_RC (1 << 2)
#define ATH_TX_INFO_XRETRY (1 << 3)
#define ATH_TX_INFO_UNDERRUN (1 << 4)
enum ath9k_internal_frame_type {
- ATH9K_NOT_INTERNAL,
- ATH9K_INT_PAUSE,
- ATH9K_INT_UNPAUSE
+ ATH9K_IFT_NOT_INTERNAL,
+ ATH9K_IFT_PAUSE,
+ ATH9K_IFT_UNPAUSE
};
int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1ca42e5148c8..ba139132c85f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,6 +15,9 @@
*/
#include "ath9k.h"
+#include "ar9003_mac.h"
+
+#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
struct ieee80211_hdr *hdr)
@@ -115,56 +118,244 @@ static void ath_opmode_init(struct ath_softc *sc)
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}
-int ath_rx_init(struct ath_softc *sc, int nbufs)
+static bool ath_rx_edma_buf_link(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
struct ath_buf *bf;
- int error = 0;
- spin_lock_init(&sc->rx.rxflushlock);
- sc->sc_flags &= ~SC_OP_RXFLUSH;
- spin_lock_init(&sc->rx.rxbuflock);
+ rx_edma = &sc->rx.rx_edma[qtype];
+ if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
+ return false;
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
- min(common->cachelsz, (u16)64));
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ list_del_init(&bf->list);
- ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
- common->cachelsz, common->rx_bufsize);
+ skb = bf->bf_mpdu;
+
+ ATH_RXBUF_RESET(bf);
+ memset(skb->data, 0, ah->caps.rx_status_len);
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ ah->caps.rx_status_len, DMA_TO_DEVICE);
- /* Initialize rx descriptors */
+ SKB_CB_ATHBUF(skb) = bf;
+ ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
+ skb_queue_tail(&rx_edma->rx_fifo, skb);
- error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
- "rx", nbufs, 1);
- if (error != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "failed to allocate rx descriptors: %d\n", error);
- goto err;
+ return true;
+}
+
+static void ath_rx_addbuffer_edma(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype, int size)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 nbuf = 0;
+
+ if (list_empty(&sc->rx.rxbuf)) {
+ ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+ return;
}
+ while (!list_empty(&sc->rx.rxbuf)) {
+ nbuf++;
+
+ if (!ath_rx_edma_buf_link(sc, qtype))
+ break;
+
+ if (nbuf >= size)
+ break;
+ }
+}
+
+static void ath_rx_remove_buffer(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_buf *bf;
+ struct ath_rx_edma *rx_edma;
+ struct sk_buff *skb;
+
+ rx_edma = &sc->rx.rx_edma[qtype];
+
+ while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ }
+}
+
+static void ath_rx_edma_cleanup(struct ath_softc *sc)
+{
+ struct ath_buf *bf;
+
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ if (bf->bf_mpdu)
+ dev_kfree_skb_any(bf->bf_mpdu);
+ }
+
+ INIT_LIST_HEAD(&sc->rx.rxbuf);
+
+ kfree(sc->rx.rx_bufptr);
+ sc->rx.rx_bufptr = NULL;
+}
+
+static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
+{
+ skb_queue_head_init(&rx_edma->rx_fifo);
+ skb_queue_head_init(&rx_edma->rx_buffers);
+ rx_edma->rx_fifo_hwsize = size;
+}
+
+static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int error = 0, i;
+ u32 size;
+
+
+ common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
+ ah->caps.rx_status_len,
+ min(common->cachelsz, (u16)64));
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+
+ ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
+ ah->caps.rx_lp_qdepth);
+ ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
+ ah->caps.rx_hp_qdepth);
+
+ size = sizeof(struct ath_buf) * nbufs;
+ bf = kzalloc(size, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&sc->rx.rxbuf);
+ sc->rx.rx_bufptr = bf;
+
+ for (i = 0; i < nbufs; i++, bf++) {
skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
- if (skb == NULL) {
+ if (!skb) {
error = -ENOMEM;
- goto err;
+ goto rx_init_fail;
}
+ memset(skb->data, 0, common->rx_bufsize);
bf->bf_mpdu = skb;
+
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(sc->dev,
- bf->bf_buf_addr))) {
- dev_kfree_skb_any(skb);
- bf->bf_mpdu = NULL;
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ ath_print(common, ATH_DBG_FATAL,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto rx_init_fail;
+ }
+
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ }
+
+ return 0;
+
+rx_init_fail:
+ ath_rx_edma_cleanup(sc);
+ return error;
+}
+
+static void ath_edma_start_recv(struct ath_softc *sc)
+{
+ spin_lock_bh(&sc->rx.rxbuflock);
+
+ ath9k_hw_rxena(sc->sc_ah);
+
+ ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
+ sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
+
+ ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
+ sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
+
+ spin_unlock_bh(&sc->rx.rxbuflock);
+
+ ath_opmode_init(sc);
+
+ ath9k_hw_startpcureceive(sc->sc_ah);
+}
+
+static void ath_edma_stop_recv(struct ath_softc *sc)
+{
+ spin_lock_bh(&sc->rx.rxbuflock);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+ spin_unlock_bh(&sc->rx.rxbuflock);
+}
+
+int ath_rx_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int error = 0;
+
+ spin_lock_init(&sc->rx.rxflushlock);
+ sc->sc_flags &= ~SC_OP_RXFLUSH;
+ spin_lock_init(&sc->rx.rxbuflock);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ return ath_rx_edma_init(sc, nbufs);
+ } else {
+ common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+ min(common->cachelsz, (u16)64));
+
+ ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+ common->cachelsz, common->rx_bufsize);
+
+ /* Initialize rx descriptors */
+
+ error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
+ "rx", nbufs, 1, 0);
+ if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
- "dma_mapping_error() on RX init\n");
- error = -ENOMEM;
+ "failed to allocate rx descriptors: %d\n",
+ error);
goto err;
}
- bf->bf_dmacontext = bf->bf_buf_addr;
+
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = ath_rxbuf_alloc(common, common->rx_bufsize,
+ GFP_KERNEL);
+ if (skb == NULL) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ bf->bf_mpdu = skb;
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev,
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ ath_print(common, ATH_DBG_FATAL,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto err;
+ }
+ bf->bf_dmacontext = bf->bf_buf_addr;
+ }
+ sc->rx.rxlink = NULL;
}
- sc->rx.rxlink = NULL;
err:
if (error)
@@ -180,17 +371,23 @@ void ath_rx_cleanup(struct ath_softc *sc)
struct sk_buff *skb;
struct ath_buf *bf;
- list_for_each_entry(bf, &sc->rx.rxbuf, list) {
- skb = bf->bf_mpdu;
- if (skb) {
- dma_unmap_single(sc->dev, bf->bf_buf_addr,
- common->rx_bufsize, DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ ath_rx_edma_cleanup(sc);
+ return;
+ } else {
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = bf->bf_mpdu;
+ if (skb) {
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
}
- }
- if (sc->rx.rxdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
+ if (sc->rx.rxdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
+ }
}
/*
@@ -273,6 +470,11 @@ int ath_startrecv(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_buf *bf, *tbf;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ ath_edma_start_recv(sc);
+ return 0;
+ }
+
spin_lock_bh(&sc->rx.rxbuflock);
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
@@ -306,7 +508,11 @@ bool ath_stoprecv(struct ath_softc *sc)
ath9k_hw_stoppcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah);
- sc->rx.rxlink = NULL;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_edma_stop_recv(sc);
+ else
+ sc->rx.rxlink = NULL;
return stopped;
}
@@ -315,7 +521,9 @@ void ath_flushrecv(struct ath_softc *sc)
{
spin_lock_bh(&sc->rx.rxflushlock);
sc->sc_flags |= SC_OP_RXFLUSH;
- ath_rx_tasklet(sc, 1);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_rx_tasklet(sc, 1, true);
+ ath_rx_tasklet(sc, 1, false);
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_unlock_bh(&sc->rx.rxflushlock);
}
@@ -469,15 +677,148 @@ static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
ieee80211_rx(hw, skb);
}
-int ath_rx_tasklet(struct ath_softc *sc, int flush)
+static bool ath_edma_get_buffers(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
{
-#define PA2DESC(_sc, _pa) \
- ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \
- ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
+ struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int ret;
+
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (!skb)
+ return false;
+
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize, DMA_FROM_DEVICE);
+
+ ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
+ if (ret == -EINPROGRESS)
+ return false;
+
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ if (ret == -EINVAL) {
+ /* corrupt descriptor, skip this one and the following one */
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (!skb)
+ return true;
+
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ return true;
+ }
+ skb_queue_tail(&rx_edma->rx_buffers, skb);
+
+ return true;
+}
+static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+ struct sk_buff *skb;
struct ath_buf *bf;
+
+ while (ath_edma_get_buffers(sc, qtype));
+ skb = __skb_dequeue(&rx_edma->rx_buffers);
+ if (!skb)
+ return NULL;
+
+ bf = SKB_CB_ATHBUF(skb);
+ ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
+ return bf;
+}
+
+static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ struct ath_rx_status *rs)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_desc *ds;
- struct ath_rx_status *rx_stats;
+ struct ath_buf *bf;
+ int ret;
+
+ if (list_empty(&sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
+ return NULL;
+ }
+
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ ds = bf->bf_desc;
+
+ /*
+ * Must provide the virtual address of the current
+ * descriptor, the physical address, and the virtual
+ * address of the next descriptor in the h/w chain.
+ * This allows the HAL to look ahead to see if the
+ * hardware is done with a descriptor by checking the
+ * done bit in the following descriptor and the address
+ * of the current descriptor the DMA engine is working
+ * on. All this is necessary because of our use of
+ * a self-linked list to avoid rx overruns.
+ */
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
+ if (ret == -EINPROGRESS) {
+ struct ath_rx_status trs;
+ struct ath_buf *tbf;
+ struct ath_desc *tds;
+
+ memset(&trs, 0, sizeof(trs));
+ if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
+ return NULL;
+ }
+
+ tbf = list_entry(bf->list.next, struct ath_buf, list);
+
+ /*
+ * On some hardware the descriptor status words could
+ * get corrupted, including the done bit. Because of
+ * this, check if the next descriptor's done bit is
+ * set or not.
+ *
+ * If the next descriptor's done bit is set, the current
+ * descriptor has been corrupted. Force s/w to discard
+ * this descriptor and continue...
+ */
+
+ tds = tbf->bf_desc;
+ ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
+ if (ret == -EINPROGRESS)
+ return NULL;
+ }
+
+ if (!bf->bf_mpdu)
+ return bf;
+
+ /*
+ * Synchronize the DMA transfer with CPU before
+ * 1. accessing the frame
+ * 2. requeueing the same buffer to h/w
+ */
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+
+ return bf;
+}
+
+
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+{
+ struct ath_buf *bf;
struct sk_buff *skb = NULL, *requeue_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
@@ -491,7 +832,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
struct ieee80211_hdr *hdr;
int retval;
bool decrypt_error = false;
+ struct ath_rx_status rs;
+ enum ath9k_rx_qtype qtype;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+ int dma_type;
+ if (edma)
+ dma_type = DMA_FROM_DEVICE;
+ else
+ dma_type = DMA_BIDIRECTIONAL;
+
+ qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
spin_lock_bh(&sc->rx.rxbuflock);
do {
@@ -499,79 +850,25 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
break;
- if (list_empty(&sc->rx.rxbuf)) {
- sc->rx.rxlink = NULL;
- break;
- }
-
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
- ds = bf->bf_desc;
-
- /*
- * Must provide the virtual address of the current
- * descriptor, the physical address, and the virtual
- * address of the next descriptor in the h/w chain.
- * This allows the HAL to look ahead to see if the
- * hardware is done with a descriptor by checking the
- * done bit in the following descriptor and the address
- * of the current descriptor the DMA engine is working
- * on. All this is necessary because of our use of
- * a self-linked list to avoid rx overruns.
- */
- retval = ath9k_hw_rxprocdesc(ah, ds,
- bf->bf_daddr,
- PA2DESC(sc, ds->ds_link),
- 0);
- if (retval == -EINPROGRESS) {
- struct ath_buf *tbf;
- struct ath_desc *tds;
-
- if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
- sc->rx.rxlink = NULL;
- break;
- }
+ memset(&rs, 0, sizeof(rs));
+ if (edma)
+ bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
+ else
+ bf = ath_get_next_rx_buf(sc, &rs);
- tbf = list_entry(bf->list.next, struct ath_buf, list);
-
- /*
- * On some hardware the descriptor status words could
- * get corrupted, including the done bit. Because of
- * this, check if the next descriptor's done bit is
- * set or not.
- *
- * If the next descriptor's done bit is set, the current
- * descriptor has been corrupted. Force s/w to discard
- * this descriptor and continue...
- */
-
- tds = tbf->bf_desc;
- retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
- PA2DESC(sc, tds->ds_link), 0);
- if (retval == -EINPROGRESS) {
- break;
- }
- }
+ if (!bf)
+ break;
skb = bf->bf_mpdu;
if (!skb)
continue;
- /*
- * Synchronize the DMA transfer with CPU before
- * 1. accessing the frame
- * 2. requeueing the same buffer to h/w
- */
- dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
- common->rx_bufsize,
- DMA_FROM_DEVICE);
-
hdr = (struct ieee80211_hdr *) skb->data;
rxs = IEEE80211_SKB_RXCB(skb);
hw = ath_get_virt_hw(sc, hdr);
- rx_stats = &ds->ds_rxstat;
- ath_debug_stat_rx(sc, bf);
+ ath_debug_stat_rx(sc, &rs);
/*
* If we're asked to flush receive queue, directly
@@ -580,7 +877,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
if (flush)
goto requeue;
- retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats,
+ retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
rxs, &decrypt_error);
if (retval)
goto requeue;
@@ -599,18 +896,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
/* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ dma_type);
- skb_put(skb, rx_stats->rs_datalen);
+ skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
+ if (ah->caps.rx_status_len)
+ skb_pull(skb, ah->caps.rx_status_len);
- ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats,
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ dma_type);
if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) {
dev_kfree_skb_any(requeue_skb);
@@ -626,9 +925,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
*/
- if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
+ if (sc->rx.defant != rs.rs_antenna) {
if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rx_stats->rs_antenna);
+ ath_setdefantenna(sc, rs.rs_antenna);
} else {
sc->rx.rxotherant = 0;
}
@@ -641,12 +940,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
requeue:
- list_move_tail(&bf->list, &sc->rx.rxbuf);
- ath_rx_buf_link(sc, bf);
+ if (edma) {
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+ list_move_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_buf_link(sc, bf);
+ }
} while (1);
spin_unlock_bh(&sc->rx.rxbuflock);
return 0;
-#undef PA2DESC
}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 72cfa8ebd9ae..d4371a43bdaa 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -20,7 +20,7 @@
#include "../reg.h"
#define AR_CR 0x0008
-#define AR_CR_RXE 0x00000004
+#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004)
#define AR_CR_RXD 0x00000020
#define AR_CR_SWI 0x00000040
@@ -39,6 +39,12 @@
#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
+#define AR_RXBP_THRESH 0x0018
+#define AR_RXBP_THRESH_HP 0x0000000f
+#define AR_RXBP_THRESH_HP_S 0
+#define AR_RXBP_THRESH_LP 0x00003f00
+#define AR_RXBP_THRESH_LP_S 8
+
#define AR_MIRT 0x0020
#define AR_MIRT_VAL 0x0000ffff
#define AR_MIRT_VAL_S 16
@@ -144,6 +150,9 @@
#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
#define AR_MACMISC_MISC_OBS_BUS_1 1
+#define AR_DATABUF_SIZE 0x0060
+#define AR_DATABUF_SIZE_MASK 0x00000FFF
+
#define AR_GTXTO 0x0064
#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
@@ -160,9 +169,14 @@
#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
#define AR_CST_TIMEOUT_LIMIT_S 16
+#define AR_HP_RXDP 0x0074
+#define AR_LP_RXDP 0x0078
+
#define AR_ISR 0x0080
#define AR_ISR_RXOK 0x00000001
#define AR_ISR_RXDESC 0x00000002
+#define AR_ISR_HP_RXOK 0x00000001
+#define AR_ISR_LP_RXOK 0x00000002
#define AR_ISR_RXERR 0x00000004
#define AR_ISR_RXNOPKT 0x00000008
#define AR_ISR_RXEOL 0x00000010
@@ -232,7 +246,6 @@
#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
#define AR_ISR_S5_TIM_TIMER 0x00000010
#define AR_ISR_S5_DTIM_TIMER 0x00000020
-#define AR_ISR_S5_S 0x00d8
#define AR_IMR_S5 0x00b8
#define AR_IMR_S5_TIM_TIMER 0x00000010
#define AR_IMR_S5_DTIM_TIMER 0x00000020
@@ -240,7 +253,6 @@
#define AR_ISR_S5_GENTIMER_TRIG_S 0
#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000
#define AR_ISR_S5_GENTIMER_THRESH_S 16
-#define AR_ISR_S5_S 0x00d8
#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80
#define AR_IMR_S5_GENTIMER_TRIG_S 0
#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000
@@ -249,6 +261,8 @@
#define AR_IMR 0x00a0
#define AR_IMR_RXOK 0x00000001
#define AR_IMR_RXDESC 0x00000002
+#define AR_IMR_RXOK_HP 0x00000001
+#define AR_IMR_RXOK_LP 0x00000002
#define AR_IMR_RXERR 0x00000004
#define AR_IMR_RXNOPKT 0x00000008
#define AR_IMR_RXEOL 0x00000010
@@ -332,10 +346,10 @@
#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
#define AR_ISR_S1_QCU_TXEOL_S 16
-#define AR_ISR_S2_S 0x00cc
-#define AR_ISR_S3_S 0x00d0
-#define AR_ISR_S4_S 0x00d4
-#define AR_ISR_S5_S 0x00d8
+#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc)
+#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0)
+#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4)
+#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8)
#define AR_DMADBG_0 0x00e0
#define AR_DMADBG_1 0x00e4
#define AR_DMADBG_2 0x00e8
@@ -369,6 +383,9 @@
#define AR_Q9_TXDP 0x0824
#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
+#define AR_Q_STATUS_RING_START 0x830
+#define AR_Q_STATUS_RING_END 0x834
+
#define AR_Q_TXE 0x0840
#define AR_Q_TXE_M 0x000003FF
@@ -461,6 +478,10 @@
#define AR_Q_RDYTIMESHDN 0x0a40
#define AR_Q_RDYTIMESHDN_M 0x000003FF
+/* MAC Descriptor CRC check */
+#define AR_Q_DESC_CRCCHK 0xa44
+/* Enable CRC check on the descriptor fetched from host */
+#define AR_Q_DESC_CRCCHK_EN 1
#define AR_NUM_DCU 10
#define AR_DCU_0 0x0001
@@ -679,7 +700,7 @@
#define AR_WA 0x4004
#define AR_WA_D3_L1_DISABLE (1 << 14)
-#define AR9285_WA_DEFAULT 0x004a05cb
+#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
@@ -759,6 +780,8 @@
#define AR_SREV_VERSION_9271 0x140
#define AR_SREV_REVISION_9271_10 0
#define AR_SREV_REVISION_9271_11 1
+#define AR_SREV_VERSION_9300 0x1c0
+#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -844,6 +867,19 @@
#define AR_SREV_9271_11(_ah) \
(AR_SREV_9271(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
+#define AR_SREV_9300(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
+#define AR_SREV_9300_20(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_20))
+#define AR_SREV_9300_20_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9300) || \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20)))
+
+#define AR_SREV_9285E_20(_ah) \
+ (AR_SREV_9285_12_OR_LATER(_ah) && \
+ ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -940,6 +976,8 @@ enum {
#define AR928X_NUM_GPIO 10
#define AR9285_NUM_GPIO 12
#define AR9287_NUM_GPIO 11
+#define AR9271_NUM_GPIO 16
+#define AR9300_NUM_GPIO 17
#define AR_GPIO_IN_OUT 0x4048
#define AR_GPIO_IN_VAL 0x0FFFC000
@@ -950,19 +988,23 @@ enum {
#define AR9285_GPIO_IN_VAL_S 12
#define AR9287_GPIO_IN_VAL 0x003FF800
#define AR9287_GPIO_IN_VAL_S 11
+#define AR9271_GPIO_IN_VAL 0xFFFF0000
+#define AR9271_GPIO_IN_VAL_S 16
+#define AR9300_GPIO_IN_VAL 0x0001FFFF
+#define AR9300_GPIO_IN_VAL_S 0
-#define AR_GPIO_OE_OUT 0x404c
+#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
#define AR_GPIO_OE_OUT_DRV_LOW 0x1
#define AR_GPIO_OE_OUT_DRV_HI 0x2
#define AR_GPIO_OE_OUT_DRV_ALL 0x3
-#define AR_GPIO_INTR_POL 0x4050
-#define AR_GPIO_INTR_POL_VAL 0x00001FFF
+#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)
+#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
#define AR_GPIO_INTR_POL_VAL_S 0
-#define AR_GPIO_INPUT_EN_VAL 0x4054
+#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054)
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
@@ -980,13 +1022,13 @@ enum {
#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
#define AR_GPIO_JTAG_DISABLE 0x00020000
-#define AR_GPIO_INPUT_MUX1 0x4058
+#define AR_GPIO_INPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058)
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
-#define AR_GPIO_INPUT_MUX2 0x405c
+#define AR_GPIO_INPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c)
#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
#define AR_GPIO_INPUT_MUX2_CLK25_S 0
#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
@@ -994,13 +1036,13 @@ enum {
#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
-#define AR_GPIO_OUTPUT_MUX1 0x4060
-#define AR_GPIO_OUTPUT_MUX2 0x4064
-#define AR_GPIO_OUTPUT_MUX3 0x4068
+#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060)
+#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064)
+#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068)
-#define AR_INPUT_STATE 0x406c
+#define AR_INPUT_STATE (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c)
-#define AR_EEPROM_STATUS_DATA 0x407c
+#define AR_EEPROM_STATUS_DATA (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c)
#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
#define AR_EEPROM_STATUS_DATA_VAL_S 0
#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
@@ -1008,13 +1050,24 @@ enum {
#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
-#define AR_OBS 0x4080
+#define AR_OBS (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080)
-#define AR_GPIO_PDPU 0x4088
+#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
-#define AR_PCIE_MSI 0x4094
+#define AR_PCIE_MSI (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094)
#define AR_PCIE_MSI_ENABLE 0x00000001
+#define AR_INTR_PRIO_SYNC_ENABLE 0x40c4
+#define AR_INTR_PRIO_ASYNC_MASK 0x40c8
+#define AR_INTR_PRIO_SYNC_MASK 0x40cc
+#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
+
+#define AR_RTC_9300_PLL_DIV 0x000003ff
+#define AR_RTC_9300_PLL_DIV_S 0
+#define AR_RTC_9300_PLL_REFDIV 0x00003C00
+#define AR_RTC_9300_PLL_REFDIV_S 10
+#define AR_RTC_9300_PLL_CLKSEL 0x0000C000
+#define AR_RTC_9300_PLL_CLKSEL_S 14
#define AR_RTC_9160_PLL_DIV 0x000003ff
#define AR_RTC_9160_PLL_DIV_S 0
@@ -1032,6 +1085,16 @@ enum {
#define AR_RTC_RC_COLD_RESET 0x00000004
#define AR_RTC_RC_WARM_RESET 0x00000008
+/* Crystal Control */
+#define AR_RTC_XTAL_CONTROL 0x7004
+
+/* Reg Control 0 */
+#define AR_RTC_REG_CONTROL0 0x7008
+
+/* Reg Control 1 */
+#define AR_RTC_REG_CONTROL1 0x700c
+#define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001
+
#define AR_RTC_PLL_CONTROL \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
@@ -1062,6 +1125,7 @@ enum {
#define AR_RTC_SLEEP_CLK \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
#define AR_RTC_FORCE_DERIVED_CLK 0x2
+#define AR_RTC_FORCE_SWREG_PRD 0x00000004
#define AR_RTC_FORCE_WAKE \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
@@ -1178,6 +1242,13 @@ enum {
#define AR9285_AN_RF2G4_DB2_4 0x00003800
#define AR9285_AN_RF2G4_DB2_4_S 11
+#define AR9285_RF2G5 0x7830
+#define AR9285_RF2G5_IC50TX 0xfffff8ff
+#define AR9285_RF2G5_IC50TX_SET 0x00000400
+#define AR9285_RF2G5_IC50TX_XE_SET 0x00000500
+#define AR9285_RF2G5_IC50TX_CLEAR 0x00000700
+#define AR9285_RF2G5_IC50TX_CLEAR_S 8
+
/* AR9271 : 0x7828, 0x782c different setting from AR9285 */
#define AR9271_AN_RF2G3_OB_cck 0x001C0000
#define AR9271_AN_RF2G3_OB_cck_S 18
@@ -1519,7 +1590,7 @@ enum {
#define AR_TSFOOR_THRESHOLD 0x813c
#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF
-#define AR_PHY_ERR_EIFS_MASK 8144
+#define AR_PHY_ERR_EIFS_MASK 0x8144
#define AR_PHY_ERR_3 0x8168
#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
@@ -1585,24 +1656,26 @@ enum {
#define AR_FIRST_NDP_TIMER 7
#define AR_NDP2_PERIOD 0x81a0
#define AR_NDP2_TIMER_MODE 0x81c0
-#define AR_NEXT_TBTT_TIMER 0x8200
-#define AR_NEXT_DMA_BEACON_ALERT 0x8204
-#define AR_NEXT_SWBA 0x8208
-#define AR_NEXT_CFP 0x8208
-#define AR_NEXT_HCF 0x820C
-#define AR_NEXT_TIM 0x8210
-#define AR_NEXT_DTIM 0x8214
-#define AR_NEXT_QUIET_TIMER 0x8218
-#define AR_NEXT_NDP_TIMER 0x821C
-
-#define AR_BEACON_PERIOD 0x8220
-#define AR_DMA_BEACON_PERIOD 0x8224
-#define AR_SWBA_PERIOD 0x8228
-#define AR_HCF_PERIOD 0x822C
-#define AR_TIM_PERIOD 0x8230
-#define AR_DTIM_PERIOD 0x8234
-#define AR_QUIET_PERIOD 0x8238
-#define AR_NDP_PERIOD 0x823C
+
+#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2))
+#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0)
+#define AR_NEXT_DMA_BEACON_ALERT AR_GEN_TIMERS(1)
+#define AR_NEXT_SWBA AR_GEN_TIMERS(2)
+#define AR_NEXT_CFP AR_GEN_TIMERS(2)
+#define AR_NEXT_HCF AR_GEN_TIMERS(3)
+#define AR_NEXT_TIM AR_GEN_TIMERS(4)
+#define AR_NEXT_DTIM AR_GEN_TIMERS(5)
+#define AR_NEXT_QUIET_TIMER AR_GEN_TIMERS(6)
+#define AR_NEXT_NDP_TIMER AR_GEN_TIMERS(7)
+
+#define AR_BEACON_PERIOD AR_GEN_TIMERS(8)
+#define AR_DMA_BEACON_PERIOD AR_GEN_TIMERS(9)
+#define AR_SWBA_PERIOD AR_GEN_TIMERS(10)
+#define AR_HCF_PERIOD AR_GEN_TIMERS(11)
+#define AR_TIM_PERIOD AR_GEN_TIMERS(12)
+#define AR_DTIM_PERIOD AR_GEN_TIMERS(13)
+#define AR_QUIET_PERIOD AR_GEN_TIMERS(14)
+#define AR_NDP_PERIOD AR_GEN_TIMERS(15)
#define AR_TIMER_MODE 0x8240
#define AR_TBTT_TIMER_EN 0x00000001
@@ -1716,4 +1789,32 @@ enum {
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
+#define AR_AGG_WEP_ENABLE_FIX 0x00000008 /* This allows the use of AR_AGG_WEP_ENABLE */
+#define AR_ADHOC_MCAST_KEYID_ENABLE 0x00000040 /* This bit enables the Multicast search
+ * based on both MAC Address and Key ID.
+ * If bit is 0, then Multicast search is
+ * based on MAC address only.
+ * For Merlin and above only.
+ */
+#define AR_AGG_WEP_ENABLE 0x00020000 /* This field enables AGG_WEP feature,
+ * when it is enable, AGG_WEP would takes
+ * charge of the encryption interface of
+ * pcu_txsm.
+ */
+
+#define AR9300_SM_BASE 0xa200
+#define AR9002_PHY_AGC_CONTROL 0x9860
+#define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4
+#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
+#define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */
+#define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */
+#define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */
+#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000 /* enable noise floor calibration to happen */
+#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000 /* allow tx filter calibration */
+#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 /* don't update noise floor automatically */
+#define AR_PHY_AGC_CONTROL_EXT_NF_PWR_MEAS 0x00040000 /* extend noise floor power measurement */
+#define AR_PHY_AGC_CONTROL_CLC_SUCCESS 0x00080000 /* carrier leak calibration done */
+#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
+#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 00c0e21a4af7..105ad40968f6 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -220,7 +220,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
- txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE;
+ txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
goto exit;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
new file mode 100644
index 000000000000..e23172c9caaf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
+{
+ switch (wmi_cmd) {
+ case WMI_ECHO_CMDID:
+ return "WMI_ECHO_CMDID";
+ case WMI_ACCESS_MEMORY_CMDID:
+ return "WMI_ACCESS_MEMORY_CMDID";
+ case WMI_DISABLE_INTR_CMDID:
+ return "WMI_DISABLE_INTR_CMDID";
+ case WMI_ENABLE_INTR_CMDID:
+ return "WMI_ENABLE_INTR_CMDID";
+ case WMI_RX_LINK_CMDID:
+ return "WMI_RX_LINK_CMDID";
+ case WMI_ATH_INIT_CMDID:
+ return "WMI_ATH_INIT_CMDID";
+ case WMI_ABORT_TXQ_CMDID:
+ return "WMI_ABORT_TXQ_CMDID";
+ case WMI_STOP_TX_DMA_CMDID:
+ return "WMI_STOP_TX_DMA_CMDID";
+ case WMI_STOP_DMA_RECV_CMDID:
+ return "WMI_STOP_DMA_RECV_CMDID";
+ case WMI_ABORT_TX_DMA_CMDID:
+ return "WMI_ABORT_TX_DMA_CMDID";
+ case WMI_DRAIN_TXQ_CMDID:
+ return "WMI_DRAIN_TXQ_CMDID";
+ case WMI_DRAIN_TXQ_ALL_CMDID:
+ return "WMI_DRAIN_TXQ_ALL_CMDID";
+ case WMI_START_RECV_CMDID:
+ return "WMI_START_RECV_CMDID";
+ case WMI_STOP_RECV_CMDID:
+ return "WMI_STOP_RECV_CMDID";
+ case WMI_FLUSH_RECV_CMDID:
+ return "WMI_FLUSH_RECV_CMDID";
+ case WMI_SET_MODE_CMDID:
+ return "WMI_SET_MODE_CMDID";
+ case WMI_RESET_CMDID:
+ return "WMI_RESET_CMDID";
+ case WMI_NODE_CREATE_CMDID:
+ return "WMI_NODE_CREATE_CMDID";
+ case WMI_NODE_REMOVE_CMDID:
+ return "WMI_NODE_REMOVE_CMDID";
+ case WMI_VAP_REMOVE_CMDID:
+ return "WMI_VAP_REMOVE_CMDID";
+ case WMI_VAP_CREATE_CMDID:
+ return "WMI_VAP_CREATE_CMDID";
+ case WMI_BEACON_UPDATE_CMDID:
+ return "WMI_BEACON_UPDATE_CMDID";
+ case WMI_REG_READ_CMDID:
+ return "WMI_REG_READ_CMDID";
+ case WMI_REG_WRITE_CMDID:
+ return "WMI_REG_WRITE_CMDID";
+ case WMI_RC_STATE_CHANGE_CMDID:
+ return "WMI_RC_STATE_CHANGE_CMDID";
+ case WMI_RC_RATE_UPDATE_CMDID:
+ return "WMI_RC_RATE_UPDATE_CMDID";
+ case WMI_DEBUG_INFO_CMDID:
+ return "WMI_DEBUG_INFO_CMDID";
+ case WMI_HOST_ATTACH:
+ return "WMI_HOST_ATTACH";
+ case WMI_TARGET_IC_UPDATE_CMDID:
+ return "WMI_TARGET_IC_UPDATE_CMDID";
+ case WMI_TGT_STATS_CMDID:
+ return "WMI_TGT_STATS_CMDID";
+ case WMI_TX_AGGR_ENABLE_CMDID:
+ return "WMI_TX_AGGR_ENABLE_CMDID";
+ case WMI_TGT_DETACH_CMDID:
+ return "WMI_TGT_DETACH_CMDID";
+ case WMI_TGT_TXQ_ENABLE_CMDID:
+ return "WMI_TGT_TXQ_ENABLE_CMDID";
+ }
+
+ return "Bogus";
+}
+
+struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
+{
+ struct wmi *wmi;
+
+ wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
+ if (!wmi)
+ return NULL;
+
+ wmi->drv_priv = priv;
+ wmi->stopped = false;
+ mutex_init(&wmi->op_mutex);
+ mutex_init(&wmi->multi_write_mutex);
+ init_completion(&wmi->cmd_wait);
+
+ return wmi;
+}
+
+void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
+{
+ struct wmi *wmi = priv->wmi;
+
+ mutex_lock(&wmi->op_mutex);
+ wmi->stopped = true;
+ mutex_unlock(&wmi->op_mutex);
+
+ kfree(priv->wmi);
+}
+
+void ath9k_wmi_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct wmi_cmd_hdr *hdr;
+ struct wmi_swba *swba_hdr;
+ enum wmi_event_id event;
+ struct sk_buff *skb;
+ void *wmi_event;
+ unsigned long flags;
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ __be32 txrate;
+#endif
+
+ spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
+ skb = priv->wmi->wmi_skb;
+ spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
+
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ event = be16_to_cpu(hdr->command_id);
+ wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ ath_print(common, ATH_DBG_WMI,
+ "WMI Event: 0x%x\n", event);
+
+ switch (event) {
+ case WMI_TGT_RDY_EVENTID:
+ break;
+ case WMI_SWBA_EVENTID:
+ swba_hdr = (struct wmi_swba *) wmi_event;
+ ath9k_htc_swba(priv, swba_hdr->beacon_pending);
+ break;
+ case WMI_FATAL_EVENTID:
+ break;
+ case WMI_TXTO_EVENTID:
+ break;
+ case WMI_BMISS_EVENTID:
+ break;
+ case WMI_WLAN_TXCOMP_EVENTID:
+ break;
+ case WMI_DELBA_EVENTID:
+ break;
+ case WMI_TXRATE_EVENTID:
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
+ priv->debug.txrate = be32_to_cpu(txrate);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+}
+
+static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
+ memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
+
+ complete(&wmi->cmd_wait);
+}
+
+static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id epid)
+{
+ struct wmi *wmi = (struct wmi *) priv;
+ struct wmi_cmd_hdr *hdr;
+ u16 cmd_id;
+
+ if (unlikely(wmi->stopped))
+ goto free_skb;
+
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_id = be16_to_cpu(hdr->command_id);
+
+ if (cmd_id & 0x1000) {
+ spin_lock(&wmi->wmi_lock);
+ wmi->wmi_skb = skb;
+ spin_unlock(&wmi->wmi_lock);
+ tasklet_schedule(&wmi->drv_priv->wmi_tasklet);
+ return;
+ }
+
+ /* Check if there has been a timeout. */
+ spin_lock(&wmi->wmi_lock);
+ if (cmd_id != wmi->last_cmd_id) {
+ spin_unlock(&wmi->wmi_lock);
+ goto free_skb;
+ }
+ spin_unlock(&wmi->wmi_lock);
+
+ /* WMI command response */
+ ath9k_wmi_rsp_callback(wmi, skb);
+
+free_skb:
+ kfree_skb(skb);
+}
+
+static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id epid, bool txok)
+{
+ kfree_skb(skb);
+}
+
+int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid)
+{
+ struct htc_service_connreq connect;
+ int ret;
+
+ wmi->htc = htc;
+
+ memset(&connect, 0, sizeof(connect));
+
+ connect.ep_callbacks.priv = wmi;
+ connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx;
+ connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx;
+ connect.service_id = WMI_CONTROL_SVC;
+
+ ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
+ if (ret)
+ return ret;
+
+ *wmi_ctrl_epid = wmi->ctrl_epid;
+
+ return 0;
+}
+
+static int ath9k_wmi_cmd_issue(struct wmi *wmi,
+ struct sk_buff *skb,
+ enum wmi_cmd_id cmd, u16 len)
+{
+ struct wmi_cmd_hdr *hdr;
+
+ hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
+ hdr->command_id = cpu_to_be16(cmd);
+ hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
+
+ return htc_send(wmi->htc, skb, wmi->ctrl_epid, NULL);
+}
+
+int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ u8 *cmd_buf, u32 cmd_len,
+ u8 *rsp_buf, u32 rsp_len,
+ u32 timeout)
+{
+ struct ath_hw *ah = wmi->drv_priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u16 headroom = sizeof(struct htc_frame_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+ struct sk_buff *skb;
+ u8 *data;
+ int time_left, ret = 0;
+ unsigned long flags;
+
+ if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
+ return 0;
+
+ if (!wmi)
+ return -EINVAL;
+
+ skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+
+ if (cmd_len != 0 && cmd_buf != NULL) {
+ data = (u8 *) skb_put(skb, cmd_len);
+ memcpy(data, cmd_buf, cmd_len);
+ }
+
+ mutex_lock(&wmi->op_mutex);
+
+ /* check if wmi stopped flag is set */
+ if (unlikely(wmi->stopped)) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ /* record the rsp buffer and length */
+ wmi->cmd_rsp_buf = rsp_buf;
+ wmi->cmd_rsp_len = rsp_len;
+
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
+ wmi->last_cmd_id = cmd_id;
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+
+ ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
+ if (ret)
+ goto out;
+
+ time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
+ if (!time_left) {
+ ath_print(common, ATH_DBG_WMI,
+ "Timeout waiting for WMI command: %s\n",
+ wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&wmi->op_mutex);
+
+ return 0;
+
+out:
+ ath_print(common, ATH_DBG_WMI,
+ "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+ kfree_skb(skb);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
new file mode 100644
index 000000000000..765db5faa2d3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WMI_H
+#define WMI_H
+
+
+struct wmi_event_txrate {
+ __be32 txrate;
+ struct {
+ u8 rssi_thresh;
+ u8 per;
+ } rc_stats;
+} __packed;
+
+struct wmi_cmd_hdr {
+ __be16 command_id;
+ __be16 seq_no;
+} __packed;
+
+struct wmi_swba {
+ u8 beacon_pending;
+} __packed;
+
+enum wmi_cmd_id {
+ WMI_ECHO_CMDID = 0x0001,
+ WMI_ACCESS_MEMORY_CMDID,
+
+ /* Commands to Target */
+ WMI_DISABLE_INTR_CMDID,
+ WMI_ENABLE_INTR_CMDID,
+ WMI_RX_LINK_CMDID,
+ WMI_ATH_INIT_CMDID,
+ WMI_ABORT_TXQ_CMDID,
+ WMI_STOP_TX_DMA_CMDID,
+ WMI_STOP_DMA_RECV_CMDID,
+ WMI_ABORT_TX_DMA_CMDID,
+ WMI_DRAIN_TXQ_CMDID,
+ WMI_DRAIN_TXQ_ALL_CMDID,
+ WMI_START_RECV_CMDID,
+ WMI_STOP_RECV_CMDID,
+ WMI_FLUSH_RECV_CMDID,
+ WMI_SET_MODE_CMDID,
+ WMI_RESET_CMDID,
+ WMI_NODE_CREATE_CMDID,
+ WMI_NODE_REMOVE_CMDID,
+ WMI_VAP_REMOVE_CMDID,
+ WMI_VAP_CREATE_CMDID,
+ WMI_BEACON_UPDATE_CMDID,
+ WMI_REG_READ_CMDID,
+ WMI_REG_WRITE_CMDID,
+ WMI_RC_STATE_CHANGE_CMDID,
+ WMI_RC_RATE_UPDATE_CMDID,
+ WMI_DEBUG_INFO_CMDID,
+ WMI_HOST_ATTACH,
+ WMI_TARGET_IC_UPDATE_CMDID,
+ WMI_TGT_STATS_CMDID,
+ WMI_TX_AGGR_ENABLE_CMDID,
+ WMI_TGT_DETACH_CMDID,
+ WMI_TGT_TXQ_ENABLE_CMDID,
+};
+
+enum wmi_event_id {
+ WMI_TGT_RDY_EVENTID = 0x1001,
+ WMI_SWBA_EVENTID,
+ WMI_FATAL_EVENTID,
+ WMI_TXTO_EVENTID,
+ WMI_BMISS_EVENTID,
+ WMI_WLAN_TXCOMP_EVENTID,
+ WMI_DELBA_EVENTID,
+ WMI_TXRATE_EVENTID,
+};
+
+#define MAX_CMD_NUMBER 62
+
+struct register_write {
+ __be32 reg;
+ __be32 val;
+};
+
+struct wmi {
+ struct ath9k_htc_priv *drv_priv;
+ struct htc_target *htc;
+ enum htc_endpoint_id ctrl_epid;
+ struct mutex op_mutex;
+ struct completion cmd_wait;
+ enum wmi_cmd_id last_cmd_id;
+ u16 tx_seq_id;
+ u8 *cmd_rsp_buf;
+ u32 cmd_rsp_len;
+ bool stopped;
+
+ struct sk_buff *wmi_skb;
+ spinlock_t wmi_lock;
+
+ atomic_t mwrite_cnt;
+ struct register_write multi_write[MAX_CMD_NUMBER];
+ u32 multi_write_idx;
+ struct mutex multi_write_mutex;
+};
+
+struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
+void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
+int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid);
+int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ u8 *cmd_buf, u32 cmd_len,
+ u8 *rsp_buf, u32 rsp_len,
+ u32 timeout);
+void ath9k_wmi_tasklet(unsigned long data);
+
+#define WMI_CMD(_wmi_cmd) \
+ do { \
+ ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, NULL, 0, \
+ (u8 *) &cmd_rsp, \
+ sizeof(cmd_rsp), HZ*2); \
+ } while (0)
+
+#define WMI_CMD_BUF(_wmi_cmd, _buf) \
+ do { \
+ ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, \
+ (u8 *) _buf, sizeof(*_buf), \
+ &cmd_rsp, sizeof(cmd_rsp), HZ*2); \
+ } while (0)
+
+#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 294b486bc3ed..3db19172b43b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -15,10 +15,11 @@
*/
#include "ath9k.h"
+#include "ar9003_mac.h"
#define BITS_PER_BYTE 8
#define OFDM_PLCP_BITS 22
-#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
+#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define L_STF 8
#define L_LTF 8
@@ -33,7 +34,7 @@
#define OFDM_SIFS_TIME 16
-static u32 bits_per_symbol[][2] = {
+static u16 bits_per_symbol[][2] = {
/* 20MHz 40MHz */
{ 26, 54 }, /* 0: BPSK */
{ 52, 108 }, /* 1: QPSK 1/2 */
@@ -43,14 +44,6 @@ static u32 bits_per_symbol[][2] = {
{ 208, 432 }, /* 5: 64-QAM 2/3 */
{ 234, 486 }, /* 6: 64-QAM 3/4 */
{ 260, 540 }, /* 7: 64-QAM 5/6 */
- { 52, 108 }, /* 8: BPSK */
- { 104, 216 }, /* 9: QPSK 1/2 */
- { 156, 324 }, /* 10: QPSK 3/4 */
- { 208, 432 }, /* 11: 16-QAM 1/2 */
- { 312, 648 }, /* 12: 16-QAM 3/4 */
- { 416, 864 }, /* 13: 64-QAM 2/3 */
- { 468, 972 }, /* 14: 64-QAM 3/4 */
- { 520, 1080 }, /* 15: 64-QAM 5/6 */
};
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
@@ -59,40 +52,50 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_head);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_txq *txq,
- struct list_head *bf_q,
- int txok, int sendbar);
+ struct ath_txq *txq, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok, int sendbar);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head);
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
- int txok);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+ struct ath_tx_status *ts, int txok);
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc);
enum {
- MCS_DEFAULT,
+ MCS_HT20,
+ MCS_HT20_SGI,
MCS_HT40,
MCS_HT40_SGI,
};
-static int ath_max_4ms_framelen[3][16] = {
- [MCS_DEFAULT] = {
- 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180,
- 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320,
+static int ath_max_4ms_framelen[4][32] = {
+ [MCS_HT20] = {
+ 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
+ 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
+ 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
+ 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
+ },
+ [MCS_HT20_SGI] = {
+ 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
+ 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
+ 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
+ 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
},
[MCS_HT40] = {
- 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840,
- 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600,
+ 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
+ 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
+ 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
+ 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
},
[MCS_HT40_SGI] = {
- /* TODO: Only MCS 7 and 15 updated, recalculate the rest */
- 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200,
- 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400,
+ 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
+ 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
+ 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
+ 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
}
};
-
/*********************/
/* Aggregation logic */
/*********************/
@@ -223,6 +226,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
{
struct ath_buf *bf;
struct list_head bf_head;
+ struct ath_tx_status ts;
+
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
for (;;) {
@@ -236,7 +242,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_update_baw(sc, tid, bf->bf_seqno);
spin_unlock(&txq->axq_lock);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
spin_lock(&txq->axq_lock);
}
@@ -259,25 +265,46 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
}
-static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
{
- struct ath_buf *tbf;
+ struct ath_buf *bf = NULL;
spin_lock_bh(&sc->tx.txbuflock);
- if (WARN_ON(list_empty(&sc->tx.txbuf))) {
+
+ if (unlikely(list_empty(&sc->tx.txbuf))) {
spin_unlock_bh(&sc->tx.txbuflock);
return NULL;
}
- tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
- list_del(&tbf->list);
+
+ bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
+ list_del(&bf->list);
+
spin_unlock_bh(&sc->tx.txbuflock);
+ return bf;
+}
+
+static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
+{
+ spin_lock_bh(&sc->tx.txbuflock);
+ list_add_tail(&bf->list, &sc->tx.txbuf);
+ spin_unlock_bh(&sc->tx.txbuflock);
+}
+
+static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ath_buf *tbf;
+
+ tbf = ath_tx_get_buffer(sc);
+ if (WARN_ON(!tbf))
+ return NULL;
+
ATH_TXBUF_RESET(tbf);
tbf->aphy = bf->aphy;
tbf->bf_mpdu = bf->bf_mpdu;
tbf->bf_buf_addr = bf->bf_buf_addr;
- *(tbf->bf_desc) = *(bf->bf_desc);
+ memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
tbf->bf_state = bf->bf_state;
tbf->bf_dmacontext = bf->bf_dmacontext;
@@ -286,7 +313,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
- int txok)
+ struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
@@ -296,7 +323,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
- struct ath_desc *ds = bf_last->bf_desc;
struct list_head bf_head, bf_pending;
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -325,10 +351,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memset(ba, 0, WME_BA_BMP_SIZE >> 3);
if (isaggr && txok) {
- if (ATH_DS_TX_BA(ds)) {
- seq_st = ATH_DS_BA_SEQ(ds);
- memcpy(ba, ATH_DS_BA_BITMAP(ds),
- WME_BA_BMP_SIZE >> 3);
+ if (ts->ts_flags & ATH9K_TX_BA) {
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
} else {
/*
* AR5416 can become deaf/mute when BA
@@ -345,7 +370,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_pending);
INIT_LIST_HEAD(&bf_head);
- nbad = ath_tx_num_badfrms(sc, bf, txok);
+ nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
while (bf) {
txfail = txpending = 0;
bf_next = bf->bf_next;
@@ -359,7 +384,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
acked_cnt++;
} else {
if (!(tid->state & AGGR_CLEANUP) &&
- ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
+ !bf_last->bf_tx_aborted) {
if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
ath_tx_set_retry(sc, txq, bf);
txpending = 1;
@@ -378,7 +403,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
}
- if (bf_next == NULL) {
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ bf_next == NULL) {
/*
* Make sure the last desc is reclaimed if it
* not a holding desc.
@@ -402,45 +428,53 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
- ath_tx_rc_status(bf, ds, nbad, txok, true);
+ ath_tx_rc_status(bf, ts, nbad, txok, true);
rc_update = false;
} else {
- ath_tx_rc_status(bf, ds, nbad, txok, false);
+ ath_tx_rc_status(bf, ts, nbad, txok, false);
}
- ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+ !txfail, sendbar);
} else {
/* retry the un-acked ones */
- if (bf->bf_next == NULL && bf_last->bf_stale) {
- struct ath_buf *tbf;
-
- tbf = ath_clone_txbuf(sc, bf_last);
- /*
- * Update tx baw and complete the frame with
- * failed status if we run out of tx buf
- */
- if (!tbf) {
- spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid,
- bf->bf_seqno);
- spin_unlock_bh(&txq->axq_lock);
-
- bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, ds, nbad,
- 0, false);
- ath_tx_complete_buf(sc, bf, txq,
- &bf_head, 0, 0);
- break;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
+ if (bf->bf_next == NULL && bf_last->bf_stale) {
+ struct ath_buf *tbf;
+
+ tbf = ath_clone_txbuf(sc, bf_last);
+ /*
+ * Update tx baw and complete the
+ * frame with failed status if we
+ * run out of tx buf.
+ */
+ if (!tbf) {
+ spin_lock_bh(&txq->axq_lock);
+ ath_tx_update_baw(sc, tid,
+ bf->bf_seqno);
+ spin_unlock_bh(&txq->axq_lock);
+
+ bf->bf_state.bf_type |=
+ BUF_XRETRY;
+ ath_tx_rc_status(bf, ts, nbad,
+ 0, false);
+ ath_tx_complete_buf(sc, bf, txq,
+ &bf_head,
+ ts, 0, 0);
+ break;
+ }
+
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ tbf->bf_desc);
+ list_add_tail(&tbf->list, &bf_head);
+ } else {
+ /*
+ * Clear descriptor status words for
+ * software retry
+ */
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ bf->bf_desc);
}
-
- ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
- list_add_tail(&tbf->list, &bf_head);
- } else {
- /*
- * Clear descriptor status words for
- * software retry
- */
- ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
}
/*
@@ -508,12 +542,13 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
break;
}
- if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
- modeidx = MCS_HT40_SGI;
- else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
modeidx = MCS_HT40;
else
- modeidx = MCS_DEFAULT;
+ modeidx = MCS_HT20;
+
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ modeidx++;
frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
max_4ms_framelen = min(max_4ms_framelen, frmlen);
@@ -558,7 +593,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
u32 nsymbits, nsymbols;
u16 minlen;
u8 flags, rix;
- int width, half_gi, ndelim, mindelim;
+ int width, streams, half_gi, ndelim, mindelim;
/* Select standard number of delimiters based on frame length alone */
ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -598,7 +633,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
if (nsymbols == 0)
nsymbols = 1;
- nsymbits = bits_per_symbol[rix][width];
+ streams = HT_RC_2_STREAMS(rix);
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
if (frmlen < minlen) {
@@ -664,7 +700,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bpad = PADBYTES(al_delta) + (ndelim << 2);
bf->bf_next = NULL;
- bf->bf_desc->ds_link = 0;
+ ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
/* link buffers of this frame to the aggregate */
ath_tx_addto_baw(sc, tid, bf);
@@ -672,7 +708,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
list_move_tail(&bf->list, bf_q);
if (bf_prev) {
bf_prev->bf_next = bf;
- bf_prev->bf_desc->ds_link = bf->bf_daddr;
+ ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
+ bf->bf_daddr);
}
bf_prev = bf;
@@ -752,8 +789,11 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
+ struct ath_tx_status ts;
struct ath_buf *bf;
struct list_head bf_head;
+
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
if (txtid->state & AGGR_CLEANUP)
@@ -780,7 +820,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
}
list_move_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, txtid, bf->bf_seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
spin_unlock_bh(&txq->axq_lock);
@@ -849,7 +889,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info qi;
- int qnum;
+ int qnum, i;
memset(&qi, 0, sizeof(qi));
qi.tqi_subtype = subtype;
@@ -873,11 +913,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
* The UAPSD queue is an exception, since we take a desc-
* based intr on the EOSP frames.
*/
- if (qtype == ATH9K_TX_QUEUE_UAPSD)
- qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
- else
- qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
- TXQ_FLAG_TXDESCINT_ENABLE;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
+ TXQ_FLAG_TXERRINT_ENABLE;
+ } else {
+ if (qtype == ATH9K_TX_QUEUE_UAPSD)
+ qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
+ else
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
+ TXQ_FLAG_TXDESCINT_ENABLE;
+ }
qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
if (qnum == -1) {
/*
@@ -904,6 +949,11 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_depth = 0;
txq->axq_tx_inprogress = false;
sc->tx.txqsetup |= 1<<qnum;
+
+ txq->txq_headidx = txq->txq_tailidx = 0;
+ for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
+ INIT_LIST_HEAD(&txq->txq_fifo[i]);
+ INIT_LIST_HEAD(&txq->txq_fifo_pending);
}
return &sc->tx.txq[qnum];
}
@@ -1028,45 +1078,63 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
+ struct ath_tx_status ts;
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
for (;;) {
spin_lock_bh(&txq->axq_lock);
- if (list_empty(&txq->axq_q)) {
- txq->axq_link = NULL;
- spin_unlock_bh(&txq->axq_lock);
- break;
- }
-
- bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+ txq->txq_headidx = txq->txq_tailidx = 0;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ } else {
+ bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
+ struct ath_buf, list);
+ }
+ } else {
+ if (list_empty(&txq->axq_q)) {
+ txq->axq_link = NULL;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ }
+ bf = list_first_entry(&txq->axq_q, struct ath_buf,
+ list);
- if (bf->bf_stale) {
- list_del(&bf->list);
- spin_unlock_bh(&txq->axq_lock);
+ if (bf->bf_stale) {
+ list_del(&bf->list);
+ spin_unlock_bh(&txq->axq_lock);
- spin_lock_bh(&sc->tx.txbuflock);
- list_add_tail(&bf->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
- continue;
+ ath_tx_return_buffer(sc, bf);
+ continue;
+ }
}
lastbf = bf->bf_lastbf;
if (!retry_tx)
- lastbf->bf_desc->ds_txstat.ts_flags =
- ATH9K_TX_SW_ABORTED;
+ lastbf->bf_tx_aborted = true;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ list_cut_position(&bf_head,
+ &txq->txq_fifo[txq->txq_tailidx],
+ &lastbf->list);
+ INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+ } else {
+ /* remove ath_buf's of the same mpdu from txq */
+ list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
+ }
- /* remove ath_buf's of the same mpdu from txq */
- list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
txq->axq_depth--;
spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
spin_lock_bh(&txq->axq_lock);
@@ -1081,6 +1149,27 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
spin_unlock_bh(&txq->axq_lock);
}
}
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ spin_lock_bh(&txq->axq_lock);
+ while (!list_empty(&txq->txq_fifo_pending)) {
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head,
+ &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ spin_unlock_bh(&txq->axq_lock);
+
+ if (bf_isampdu(bf))
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head,
+ &ts, 0);
+ else
+ ath_tx_complete_buf(sc, bf, txq, &bf_head,
+ &ts, 0, 0);
+ spin_lock_bh(&txq->axq_lock);
+ }
+ spin_unlock_bh(&txq->axq_lock);
+ }
}
void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1218,44 +1307,47 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(head, struct ath_buf, list);
- list_splice_tail_init(head, &txq->axq_q);
- txq->axq_depth++;
-
ath_print(common, ATH_DBG_QUEUE,
"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
- if (txq->axq_link == NULL) {
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
+ list_splice_tail_init(head, &txq->txq_fifo_pending);
+ return;
+ }
+ if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
+ ath_print(common, ATH_DBG_XMIT,
+ "Initializing tx fifo %d which "
+ "is non-empty\n",
+ txq->txq_headidx);
+ INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
+ list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
+ INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_print(common, ATH_DBG_XMIT,
"TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
} else {
- *txq->axq_link = bf->bf_daddr;
- ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
- txq->axq_qnum, txq->axq_link,
- ito64(bf->bf_daddr), bf->bf_desc);
- }
- txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
- ath9k_hw_txstart(ah, txq->axq_qnum);
-}
+ list_splice_tail_init(head, &txq->axq_q);
-static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
-{
- struct ath_buf *bf = NULL;
-
- spin_lock_bh(&sc->tx.txbuflock);
-
- if (unlikely(list_empty(&sc->tx.txbuf))) {
- spin_unlock_bh(&sc->tx.txbuflock);
- return NULL;
+ if (txq->axq_link == NULL) {
+ ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+ ath_print(common, ATH_DBG_XMIT,
+ "TXDP[%u] = %llx (%p)\n",
+ txq->axq_qnum, ito64(bf->bf_daddr),
+ bf->bf_desc);
+ } else {
+ *txq->axq_link = bf->bf_daddr;
+ ath_print(common, ATH_DBG_XMIT,
+ "link[%u] (%p)=%llx (%p)\n",
+ txq->axq_qnum, txq->axq_link,
+ ito64(bf->bf_daddr), bf->bf_desc);
+ }
+ ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
+ &txq->axq_link);
+ ath9k_hw_txstart(ah, txq->axq_qnum);
}
-
- bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
- list_del(&bf->list);
-
- spin_unlock_bh(&sc->tx.txbuflock);
-
- return bf;
+ txq->axq_depth++;
}
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -1402,8 +1494,7 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
-static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_txq *txq)
+static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
int flags = 0;
@@ -1414,6 +1505,9 @@ static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= ATH9K_TXDESC_NOACK;
+ if (use_ldpc)
+ flags |= ATH9K_TXDESC_LDPC;
+
return flags;
}
@@ -1432,8 +1526,9 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
/* find number of symbols: PLCP + data */
+ streams = HT_RC_2_STREAMS(rix);
nbits = (pktlen << 3) + OFDM_PLCP_BITS;
- nsymbits = bits_per_symbol[rix][width];
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
nsymbols = (nbits + nsymbits - 1) / nsymbits;
if (!half_gi)
@@ -1442,7 +1537,6 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
duration = SYMBOL_TIME_HALFGI(nsymbols);
/* addup duration for legacy/ht training and signal fields */
- streams = HT_RC_2_STREAMS(rix);
duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
return duration;
@@ -1513,6 +1607,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
series[i].Rate = rix | 0x80;
series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
is_40, is_sgi, is_sp);
+ if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
+ series[i].RateFlags |= ATH9K_RATESERIES_STBC;
continue;
}
@@ -1565,15 +1661,16 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
int hdrlen;
__le16 fc;
int padpos, padsize;
+ bool use_ldpc = false;
tx_info->pad[0] = 0;
switch (txctl->frame_type) {
- case ATH9K_NOT_INTERNAL:
+ case ATH9K_IFT_NOT_INTERNAL:
break;
- case ATH9K_INT_PAUSE:
+ case ATH9K_IFT_PAUSE:
tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
/* fall through */
- case ATH9K_INT_UNPAUSE:
+ case ATH9K_IFT_UNPAUSE:
tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
break;
}
@@ -1591,10 +1688,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf))
+ if (conf_is_ht(&hw->conf)) {
bf->bf_state.bf_type |= BUF_HT;
+ if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
+ use_ldpc = true;
+ }
- bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
+ bf->bf_flags = setup_tx_flags(skb, use_ldpc);
bf->bf_keytype = get_hw_crypto_keytype(skb);
if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
@@ -1653,8 +1753,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
list_add_tail(&bf->list, &bf_head);
ds = bf->bf_desc;
- ds->ds_link = 0;
- ds->ds_data = bf->bf_buf_addr;
+ ath9k_hw_set_desc_link(ah, ds, 0);
ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
@@ -1663,7 +1762,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
skb->len, /* segment length */
true, /* first segment */
true, /* last segment */
- ds); /* first descriptor */
+ ds, /* first descriptor */
+ bf->bf_buf_addr,
+ txctl->txq->axq_qnum);
spin_lock_bh(&txctl->txq->axq_lock);
@@ -1732,9 +1833,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
}
spin_unlock_bh(&txq->axq_lock);
- spin_lock_bh(&sc->tx.txbuflock);
- list_add_tail(&bf->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
+ ath_tx_return_buffer(sc, bf);
return r;
}
@@ -1852,9 +1951,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_txq *txq,
- struct list_head *bf_q,
- int txok, int sendbar)
+ struct ath_txq *txq, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok, int sendbar)
{
struct sk_buff *skb = bf->bf_mpdu;
unsigned long flags;
@@ -1872,7 +1970,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
ath_tx_complete(sc, skb, bf->aphy, tx_flags);
- ath_debug_stat_tx(sc, txq, bf);
+ ath_debug_stat_tx(sc, txq, bf, ts);
/*
* Return the list of ath_buf of this mpdu to free queue
@@ -1883,23 +1981,21 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
}
static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
- int txok)
+ struct ath_tx_status *ts, int txok)
{
- struct ath_buf *bf_last = bf->bf_lastbf;
- struct ath_desc *ds = bf_last->bf_desc;
u16 seq_st = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
int ba_index;
int nbad = 0;
int isaggr = 0;
- if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
+ if (bf->bf_tx_aborted)
return 0;
isaggr = bf_isaggr(bf);
if (isaggr) {
- seq_st = ATH_DS_BA_SEQ(ds);
- memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
}
while (bf) {
@@ -1913,7 +2009,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
return nbad;
}
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc)
{
struct sk_buff *skb = bf->bf_mpdu;
@@ -1923,24 +2019,24 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
u8 i, tx_rateindex;
if (txok)
- tx_info->status.ack_signal = ds->ds_txstat.ts_rssi;
+ tx_info->status.ack_signal = ts->ts_rssi;
- tx_rateindex = ds->ds_txstat.ts_rateindex;
+ tx_rateindex = ts->ts_rateindex;
WARN_ON(tx_rateindex >= hw->max_rates);
- if (update_rc)
- tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC;
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
+ if (ts->ts_status & ATH9K_TXERR_FILT)
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
- if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
+ if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
if (ieee80211_is_data(hdr->frame_control)) {
- if (ds->ds_txstat.ts_flags &
+ if (ts->ts_flags &
(ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
- if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) ||
- (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO))
+ if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
+ (ts->ts_status & ATH9K_TXERR_FIFO))
tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
tx_info->status.ampdu_len = bf->bf_nframes;
tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
@@ -1978,6 +2074,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_buf *bf, *lastbf, *bf_held = NULL;
struct list_head bf_head;
struct ath_desc *ds;
+ struct ath_tx_status ts;
int txok;
int status;
@@ -2017,7 +2114,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
lastbf = bf->bf_lastbf;
ds = lastbf->bf_desc;
- status = ath9k_hw_txprocdesc(ah, ds);
+ memset(&ts, 0, sizeof(ts));
+ status = ath9k_hw_txprocdesc(ah, ds, &ts);
if (status == -EINPROGRESS) {
spin_unlock_bh(&txq->axq_lock);
break;
@@ -2028,7 +2126,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* can disable RX.
*/
if (bf->bf_isnullfunc &&
- (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
+ (ts.ts_status & ATH9K_TX_ACKED)) {
if ((sc->ps_flags & PS_ENABLED))
ath9k_enable_ps(sc);
else
@@ -2047,31 +2145,30 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
+ txok = !(ts.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
+ if (bf_held)
+ list_del(&bf_held->list);
spin_unlock_bh(&txq->axq_lock);
- if (bf_held) {
- spin_lock_bh(&sc->tx.txbuflock);
- list_move_tail(&bf_held->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
- }
+ if (bf_held)
+ ath_tx_return_buffer(sc, bf_held);
if (!bf_isampdu(bf)) {
/*
* This frame is sent out as a single frame.
* Use hardware retry status for this frame.
*/
- bf->bf_retries = ds->ds_txstat.ts_longretry;
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_retries = ts.ts_longretry;
+ if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, ds, 0, txok, true);
+ ath_tx_rc_status(bf, &ts, 0, txok, true);
}
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
ath_wake_mac80211_queue(sc, txq);
@@ -2133,10 +2230,121 @@ void ath_tx_tasklet(struct ath_softc *sc)
}
}
+void ath_tx_edma_tasklet(struct ath_softc *sc)
+{
+ struct ath_tx_status txs;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_txq *txq;
+ struct ath_buf *bf, *lastbf;
+ struct list_head bf_head;
+ int status;
+ int txok;
+
+ for (;;) {
+ status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
+ if (status == -EINPROGRESS)
+ break;
+ if (status == -EIO) {
+ ath_print(common, ATH_DBG_XMIT,
+ "Error processing tx status\n");
+ break;
+ }
+
+ /* Skip beacon completions */
+ if (txs.qid == sc->beacon.beaconq)
+ continue;
+
+ txq = &sc->tx.txq[txs.qid];
+
+ spin_lock_bh(&txq->axq_lock);
+ if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+ spin_unlock_bh(&txq->axq_lock);
+ return;
+ }
+
+ bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
+ struct ath_buf, list);
+ lastbf = bf->bf_lastbf;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
+ &lastbf->list);
+ INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+ txq->axq_depth--;
+ txq->axq_tx_inprogress = false;
+ spin_unlock_bh(&txq->axq_lock);
+
+ txok = !(txs.ts_status & ATH9K_TXERR_MASK);
+
+ if (!bf_isampdu(bf)) {
+ bf->bf_retries = txs.ts_longretry;
+ if (txs.ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_state.bf_type |= BUF_XRETRY;
+ ath_tx_rc_status(bf, &txs, 0, txok, true);
+ }
+
+ if (bf_isampdu(bf))
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
+ else
+ ath_tx_complete_buf(sc, bf, txq, &bf_head,
+ &txs, txok, 0);
+
+ ath_wake_mac80211_queue(sc, txq);
+
+ spin_lock_bh(&txq->axq_lock);
+ if (!list_empty(&txq->txq_fifo_pending)) {
+ INIT_LIST_HEAD(&bf_head);
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head, &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ ath_tx_txqaddbuf(sc, txq, &bf_head);
+ } else if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+}
+
/*****************/
/* Init, Cleanup */
/*****************/
+static int ath_txstatus_setup(struct ath_softc *sc, int size)
+{
+ struct ath_descdma *dd = &sc->txsdma;
+ u8 txs_len = sc->sc_ah->caps.txs_len;
+
+ dd->dd_desc_len = size * txs_len;
+ dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (!dd->dd_desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ath_tx_edma_init(struct ath_softc *sc)
+{
+ int err;
+
+ err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
+ if (!err)
+ ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
+ sc->txsdma.dd_desc_paddr,
+ ATH_TXSTATUS_RING_SIZE);
+
+ return err;
+}
+
+static void ath_tx_edma_cleanup(struct ath_softc *sc)
+{
+ struct ath_descdma *dd = &sc->txsdma;
+
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+}
+
int ath_tx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2145,7 +2353,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
spin_lock_init(&sc->tx.txbuflock);
error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
- "tx", nbufs, 1);
+ "tx", nbufs, 1, 1);
if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
"Failed to allocate tx descriptors: %d\n", error);
@@ -2153,7 +2361,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
}
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
- "beacon", ATH_BCBUF, 1);
+ "beacon", ATH_BCBUF, 1, 1);
if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
"Failed to allocate beacon descriptors: %d\n", error);
@@ -2162,6 +2370,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ error = ath_tx_edma_init(sc);
+ if (error)
+ goto err;
+ }
+
err:
if (error != 0)
ath_tx_cleanup(sc);
@@ -2176,6 +2390,9 @@ void ath_tx_cleanup(struct ath_softc *sc)
if (sc->tx.txdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_tx_edma_cleanup(sc);
}
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index 8263633c003c..873bf526e11f 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -59,6 +59,7 @@ enum ATH_DEBUG {
ATH_DBG_PS = 0x00000800,
ATH_DBG_HWTIMER = 0x00001000,
ATH_DBG_BTCOEX = 0x00002000,
+ ATH_DBG_WMI = 0x00004000,
ATH_DBG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index ecc9eb01f4fa..a8f81ea09f14 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -19,8 +19,8 @@
#include "ath.h"
#include "reg.h"
-#define REG_READ common->ops->read
-#define REG_WRITE common->ops->write
+#define REG_READ (common->ops->read)
+#define REG_WRITE (common->ops->write)
/**
* ath_hw_set_bssid_mask - filter out bssids we listen
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 00489c40be0c..24d59883d944 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -50,6 +50,7 @@
#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
ATH9K_5GHZ_5470_5850
+
/* This one skips what we call "mid band" */
#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
ATH9K_5GHZ_5725_5850
@@ -360,7 +361,7 @@ EXPORT_SYMBOL(ath_reg_notifier_apply);
static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
{
- u16 rd = ath_regd_get_eepromRD(reg);
+ u16 rd = ath_regd_get_eepromRD(reg);
int i;
if (rd & COUNTRY_ERD_FLAG) {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 3edbbcf0f548..c8f7090b27d3 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -865,7 +865,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
/* low bit of first byte of destination tells us if broadcast */
tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += len;
spin_unlock_irqrestore(&priv->irqlock, flags);
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 32407911842f..c2746fc7f2be 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -85,41 +85,7 @@ static void atmel_release(struct pcmcia_device *link);
static void atmel_detach(struct pcmcia_device *p_dev);
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'pcmem_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A linked list of "instances" of the atmelnet device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
- dev_node_t node;
struct net_device *eth_dev;
} local_info_t;
@@ -141,10 +107,6 @@ static int atmel_probe(struct pcmcia_device *p_dev)
dev_dbg(&p_dev->dev, "atmel_attach()\n");
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -226,9 +188,7 @@ static int atmel_config_check(struct pcmcia_device *p_dev,
else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -278,15 +238,9 @@ static int atmel_config(struct pcmcia_device *link)
if (pcmcia_loop_config(link, atmel_config_check, NULL))
goto failed;
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
+ if (!link->irq) {
+ dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
+ goto failed;
}
/*
@@ -298,14 +252,8 @@ static int atmel_config(struct pcmcia_device *link)
if (ret)
goto failed;
- if (link->irq.AssignedIRQ == 0) {
- printk(KERN_ALERT
- "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
- goto failed;
- }
-
((local_info_t*)link->priv)->eth_dev =
- init_atmel_card(link->irq.AssignedIRQ,
+ init_atmel_card(link->irq,
link->io.BasePort1,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
&link->dev,
@@ -315,14 +263,6 @@ static int atmel_config(struct pcmcia_device *link)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev_node.
- */
- strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
return 0;
failed:
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b8807fb12c92..3a003e6803a5 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -104,6 +104,7 @@
#define B43_MMIO_MACFILTER_CONTROL 0x420
#define B43_MMIO_MACFILTER_DATA 0x422
#define B43_MMIO_RCMTA_COUNT 0x43C
+#define B43_MMIO_PSM_PHY_HDR 0x492
#define B43_MMIO_RADIO_HWENABLED_LO 0x49A
#define B43_MMIO_GPIO_CONTROL 0x49C
#define B43_MMIO_GPIO_MASK 0x49E
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 9a374ef83a22..7965b70efbab 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4349,11 +4349,10 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
b43_set_phytxctl_defaults(dev);
/* Minimum Contention Window */
- if (phy->type == B43_PHYTYPE_B) {
+ if (phy->type == B43_PHYTYPE_B)
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F);
- } else {
+ else
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF);
- }
/* Maximum Contention Window */
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
@@ -4572,6 +4571,23 @@ static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw)
mutex_unlock(&wl->mutex);
}
+static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct b43_wl *wl = hw_to_b43_wl(hw);
+ struct b43_wldev *dev = wl->current_dev;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = dev->stats.link_noise;
+
+ return 0;
+}
+
static const struct ieee80211_ops b43_hw_ops = {
.tx = b43_op_tx,
.conf_tx = b43_op_conf_tx,
@@ -4591,6 +4607,7 @@ static const struct ieee80211_ops b43_hw_ops = {
.sta_notify = b43_op_sta_notify,
.sw_scan_start = b43_op_sw_scan_start_notifier,
.sw_scan_complete = b43_op_sw_scan_complete_notifier,
+ .get_survey = b43_op_get_survey,
.rfkill_poll = b43_rfkill_poll,
};
@@ -4906,8 +4923,7 @@ static int b43_wireless_init(struct ssb_device *dev)
/* fill hw info */
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 609e7051e018..0e99b634267c 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -98,10 +98,7 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
if (res != 0)
goto err_disable;
- dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- dev->irq.Handler = NULL; /* The handler is registered later. */
- res = pcmcia_request_irq(dev, &dev->irq);
- if (res != 0)
+ if (!dev->irq)
goto err_disable;
res = pcmcia_request_configuration(dev, &dev->conf);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9c7cd282e46c..3d6b33775964 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -73,6 +73,22 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
u16 value, u8 core, bool off);
static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
u16 value, u8 core);
+static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel);
+
+static inline bool b43_empty_chanspec(struct b43_chanspec *chanspec)
+{
+ return !chanspec->channel && !chanspec->sideband &&
+ !chanspec->b_width && !chanspec->b_freq;
+}
+
+static inline bool b43_eq_chanspecs(struct b43_chanspec *chanspec1,
+ struct b43_chanspec *chanspec2)
+{
+ return (chanspec1->channel == chanspec2->channel &&
+ chanspec1->sideband == chanspec2->sideband &&
+ chanspec1->b_width == chanspec2->b_width &&
+ chanspec1->b_freq == chanspec2->b_freq);
+}
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -89,34 +105,44 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
}
static void b43_chantab_radio_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry *e)
-{
- b43_radio_write16(dev, B2055_PLL_REF, e->radio_pll_ref);
- b43_radio_write16(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
- b43_radio_write16(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
- b43_radio_write16(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
- b43_radio_write16(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
- b43_radio_write16(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
- b43_radio_write16(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
- b43_radio_write16(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
- b43_radio_write16(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
- b43_radio_write16(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
- b43_radio_write16(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
- b43_radio_write16(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
- b43_radio_write16(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
- b43_radio_write16(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
- b43_radio_write16(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
- b43_radio_write16(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
- b43_radio_write16(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
- b43_radio_write16(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
- b43_radio_write16(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
- b43_radio_write16(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
- b43_radio_write16(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
- b43_radio_write16(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
+ const struct b43_nphy_channeltab_entry_rev2 *e)
+{
+ b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
+ b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
+ b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
+ b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
+ b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
+ b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
+ b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
+ b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
+ b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
+ b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
+ b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
+ b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
+ b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
+ b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
+ b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
static void b43_chantab_phy_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry *e)
+ const struct b43_phy_n_sfo_cfg *e)
{
b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
@@ -131,34 +157,20 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
//TODO
}
-/* Tune the hardware to a new channel. */
-static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
-{
- const struct b43_nphy_channeltab_entry *tabent;
- tabent = b43_nphy_get_chantabent(dev, channel);
- if (!tabent)
- return -ESRCH;
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
+static void b43_radio_2055_setup(struct b43_wldev *dev,
+ const struct b43_nphy_channeltab_entry_rev2 *e)
+{
+ B43_WARN_ON(dev->phy.rev >= 3);
- //FIXME enable/disable band select upper20 in RXCTL
- if (0 /*FIXME 5Ghz*/)
- b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x20);
- else
- b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x50);
- b43_chantab_radio_upload(dev, tabent);
+ b43_chantab_radio_upload(dev, e);
udelay(50);
- b43_radio_write16(dev, B2055_VCO_CAL10, 5);
- b43_radio_write16(dev, B2055_VCO_CAL10, 45);
- b43_radio_write16(dev, B2055_VCO_CAL10, 65);
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x05);
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x45);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x65);
udelay(300);
- if (0 /*FIXME 5Ghz*/)
- b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
- else
- b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
- b43_chantab_phy_upload(dev, tabent);
- b43_nphy_tx_power_fix(dev);
-
- return 0;
}
static void b43_radio_init2055_pre(struct b43_wldev *dev)
@@ -174,52 +186,64 @@ static void b43_radio_init2055_pre(struct b43_wldev *dev)
static void b43_radio_init2055_post(struct b43_wldev *dev)
{
+ struct b43_phy_n *nphy = dev->phy.n;
struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo);
int i;
u16 val;
+ bool workaround = false;
+
+ if (sprom->revision < 4)
+ workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
+ binfo->type != 0x46D ||
+ binfo->rev < 0x41);
+ else
+ workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0);
b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
- msleep(1);
- if ((sprom->revision != 4) ||
- !(sprom->boardflags_hi & B43_BFH_RSSIINV)) {
- if ((binfo->vendor != PCI_VENDOR_ID_BROADCOM) ||
- (binfo->type != 0x46D) ||
- (binfo->rev < 0x41)) {
- b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
- b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
- msleep(1);
- }
+ if (workaround) {
+ b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
+ b43_radio_mask(dev, B2055_C2_RX_BB_REG, 0x7F);
}
- b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0x3F, 0x2C);
- msleep(1);
- b43_radio_write16(dev, B2055_CAL_MISC, 0x3C);
- msleep(1);
+ b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0xFFC0, 0x2C);
+ b43_radio_write(dev, B2055_CAL_MISC, 0x3C);
b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE);
- msleep(1);
b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80);
- msleep(1);
b43_radio_set(dev, B2055_CAL_MISC, 0x1);
msleep(1);
b43_radio_set(dev, B2055_CAL_MISC, 0x40);
- msleep(1);
- for (i = 0; i < 100; i++) {
- val = b43_radio_read16(dev, B2055_CAL_COUT2);
- if (val & 0x80)
+ for (i = 0; i < 200; i++) {
+ val = b43_radio_read(dev, B2055_CAL_COUT2);
+ if (val & 0x80) {
+ i = 0;
break;
+ }
udelay(10);
}
- msleep(1);
+ if (i)
+ b43err(dev->wl, "radio post init timeout\n");
b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
- msleep(1);
nphy_channel_switch(dev, dev->phy.channel);
- b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9);
- b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9);
- b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
- b43_radio_write16(dev, B2055_C2_RX_BB_MIDACHP, 0x83);
+ b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9);
+ b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9);
+ b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
+ b43_radio_write(dev, B2055_C2_RX_BB_MIDACHP, 0x83);
+ b43_radio_maskset(dev, B2055_C1_LNA_GAINBST, 0xFFF8, 0x6);
+ b43_radio_maskset(dev, B2055_C2_LNA_GAINBST, 0xFFF8, 0x6);
+ if (!nphy->gain_boost) {
+ b43_radio_set(dev, B2055_C1_RX_RFSPC1, 0x2);
+ b43_radio_set(dev, B2055_C2_RX_RFSPC1, 0x2);
+ } else {
+ b43_radio_mask(dev, B2055_C1_RX_RFSPC1, 0xFFFD);
+ b43_radio_mask(dev, B2055_C2_RX_RFSPC1, 0xFFFD);
+ }
+ udelay(2);
}
-/* Initialize a Broadcom 2055 N-radio */
+/*
+ * Initialize a Broadcom 2055 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
+ */
static void b43_radio_init2055(struct b43_wldev *dev)
{
b43_radio_init2055_pre(dev);
@@ -230,16 +254,15 @@ static void b43_radio_init2055(struct b43_wldev *dev)
b43_radio_init2055_post(dev);
}
-void b43_nphy_radio_turn_on(struct b43_wldev *dev)
+/*
+ * Initialize a Broadcom 2056 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
+ */
+static void b43_radio_init2056(struct b43_wldev *dev)
{
- b43_radio_init2055(dev);
+ /* TODO */
}
-void b43_nphy_radio_turn_off(struct b43_wldev *dev)
-{
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_EN);
-}
/*
* Upload the N-PHY tables.
@@ -647,6 +670,41 @@ static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
+static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
+{
+ if (dev->phy.rev >= 3) {
+ if (!init)
+ return;
+ if (0 /* FIXME */) {
+ b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
+ b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
+ b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
+ b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
+ }
+ } else {
+ b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
+ b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
+
+ ssb_chipco_gpio_control(&dev->dev->bus->chipco, 0xFC00,
+ 0xFC00);
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL) &
+ ~B43_MACCTL_GPOUTSMSK);
+ b43_write16(dev, B43_MMIO_GPIO_MASK,
+ b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
+ b43_write16(dev, B43_MMIO_GPIO_CONTROL,
+ b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
+
+ if (init) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
{
@@ -723,7 +781,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
- unsigned int channel;
+ u8 channel = nphy->radio_chanspec.channel;
int tone[2] = { 57, 58 };
u32 noise[2] = { 0x3FF, 0x3FF };
@@ -732,8 +790,6 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
- /* FIXME: channel = radio_chanspec */
-
if (nphy->gband_spurwar_en) {
/* TODO: N PHY Adjust Analog Pfbw (7) */
if (channel == 11 && dev->phy.is_40mhz)
@@ -779,6 +835,62 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
+static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i;
+ s16 tmp;
+ u16 data[4];
+ s16 gain[2];
+ u16 minmax[2];
+ u16 lna_gain[4] = { -2, 10, 19, 25 };
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ gain[0] = 6;
+ gain[1] = 6;
+ } else {
+ tmp = 40370 - 315 * nphy->radio_chanspec.channel;
+ gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ tmp = 23242 - 224 * nphy->radio_chanspec.channel;
+ gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ }
+ } else {
+ gain[0] = 0;
+ gain[1] = 0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (nphy->elna_gain_config) {
+ data[0] = 19 + gain[i];
+ data[1] = 25 + gain[i];
+ data[2] = 25 + gain[i];
+ data[3] = 25 + gain[i];
+ } else {
+ data[0] = lna_gain[0] + gain[i];
+ data[1] = lna_gain[1] + gain[i];
+ data[2] = lna_gain[2] + gain[i];
+ data[3] = lna_gain[3] + gain[i];
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(10, 8), 4, data);
+
+ minmax[i] = 23 + gain[i];
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
+ minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
+ minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
{
@@ -863,7 +975,7 @@ static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
(code << 8 | 0x7C));
- /* TODO: b43_nphy_adjust_lna_gain_table(dev); */
+ b43_nphy_adjust_lna_gain_table(dev);
if (nphy->elna_gain_config) {
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
@@ -1970,12 +2082,12 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
u16 *rssical_phy_regs = NULL;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (!nphy->rssical_chanspec_2G)
+ if (b43_empty_chanspec(&nphy->rssical_chanspec_2G))
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
} else {
- if (!nphy->rssical_chanspec_5G)
+ if (b43_empty_chanspec(&nphy->rssical_chanspec_5G))
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
@@ -2395,7 +2507,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
u16 *txcal_radio_regs = NULL;
- u8 *iqcal_chanspec;
+ struct b43_chanspec *iqcal_chanspec;
u16 *table = NULL;
if (nphy->hang_avoid)
@@ -2451,12 +2563,12 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (nphy->iqcal_chanspec_2G == 0)
+ if (b43_empty_chanspec(&nphy->iqcal_chanspec_2G))
return;
table = nphy->cal_cache.txcal_coeffs_2G;
loft = &nphy->cal_cache.txcal_coeffs_2G[5];
} else {
- if (nphy->iqcal_chanspec_5G == 0)
+ if (b43_empty_chanspec(&nphy->iqcal_chanspec_5G))
return;
table = nphy->cal_cache.txcal_coeffs_5G;
loft = &nphy->cal_cache.txcal_coeffs_5G[5];
@@ -2689,7 +2801,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
}
b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
buffer);
- b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 101), 2,
buffer);
b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
buffer);
@@ -2701,8 +2813,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
nphy->txiqlocal_bestc);
nphy->txiqlocal_coeffsvalid = true;
- /* TODO: Set nphy->txiqlocal_chanspec to
- the current channel */
+ nphy->txiqlocal_chanspec = nphy->radio_chanspec;
} else {
length = 11;
if (dev->phy.rev < 3)
@@ -2737,7 +2848,8 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
u16 buffer[7];
bool equal = true;
- if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */)
+ if (!nphy->txiqlocal_coeffsvalid ||
+ b43_eq_chanspecs(&nphy->txiqlocal_chanspec, &nphy->radio_chanspec))
return;
b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
@@ -3092,9 +3204,11 @@ int b43_phy_initn(struct b43_wldev *dev)
do_rssi_cal = false;
if (phy->rev >= 3) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
+ do_rssi_cal =
+ b43_empty_chanspec(&nphy->rssical_chanspec_2G);
else
- do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
+ do_rssi_cal =
+ b43_empty_chanspec(&nphy->rssical_chanspec_5G);
if (do_rssi_cal)
b43_nphy_rssi_cal(dev);
@@ -3106,9 +3220,9 @@ int b43_phy_initn(struct b43_wldev *dev)
if (!((nphy->measure_hold & 0x6) != 0)) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- do_cal = (nphy->iqcal_chanspec_2G == 0);
+ do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_2G);
else
- do_cal = (nphy->iqcal_chanspec_5G == 0);
+ do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_5G);
if (nphy->mute)
do_cal = false;
@@ -3117,7 +3231,7 @@ int b43_phy_initn(struct b43_wldev *dev)
target = b43_nphy_get_tx_gains(dev);
if (nphy->antsel_type == 2)
- ;/*TODO NPHY Superswitch Init with argument 1*/
+ b43_nphy_superswitch_init(dev, true);
if (nphy->perical != 2) {
b43_nphy_rssi_cal(dev);
if (phy->rev >= 3) {
@@ -3155,6 +3269,133 @@ int b43_phy_initn(struct b43_wldev *dev)
return 0;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
+static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
+ const struct b43_phy_n_sfo_cfg *e,
+ struct b43_chanspec chanspec)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 tmp;
+ u32 tmp32;
+
+ tmp = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
+ if (chanspec.b_freq == 1 && tmp == 0) {
+ tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
+ b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
+ b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
+ } else if (chanspec.b_freq == 1) {
+ b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
+ tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
+ b43_phy_mask(dev, B43_PHY_B_BBCFG, (u16)~0xC000);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
+ }
+
+ b43_chantab_phy_upload(dev, e);
+
+ tmp = chanspec.channel;
+ if (chanspec.b_freq == 1)
+ tmp |= 0x0100;
+ if (chanspec.b_width == 3)
+ tmp |= 0x0200;
+ b43_shm_write16(dev, B43_SHM_SHARED, 0xA0, tmp);
+
+ if (nphy->radio_chanspec.channel == 14) {
+ b43_nphy_classifier(dev, 2, 0);
+ b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
+ } else {
+ b43_nphy_classifier(dev, 2, 2);
+ if (chanspec.b_freq == 2)
+ b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
+ }
+
+ if (nphy->txpwrctrl)
+ b43_nphy_tx_power_fix(dev);
+
+ if (dev->phy.rev < 3)
+ b43_nphy_adjust_lna_gain_table(dev);
+
+ b43_nphy_tx_lp_fbw(dev);
+
+ if (dev->phy.rev >= 3 && 0) {
+ /* TODO */
+ }
+
+ b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
+
+ if (phy->rev >= 3)
+ b43_nphy_spur_workaround(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
+static int b43_nphy_set_chanspec(struct b43_wldev *dev,
+ struct b43_chanspec chanspec)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
+ const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
+
+ u8 tmp;
+ u8 channel = chanspec.channel;
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ tabent_r3 = NULL;
+ if (!tabent_r3)
+ return -ESRCH;
+ } else {
+ tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel);
+ if (!tabent_r2)
+ return -ESRCH;
+ }
+
+ nphy->radio_chanspec = chanspec;
+
+ if (chanspec.b_width != nphy->b_width)
+ ; /* TODO: BMAC BW Set (chanspec.b_width) */
+
+ /* TODO: use defines */
+ if (chanspec.b_width == 3) {
+ if (chanspec.sideband == 2)
+ b43_phy_set(dev, B43_NPHY_RXCTL,
+ B43_NPHY_RXCTL_BSELU20);
+ else
+ b43_phy_mask(dev, B43_NPHY_RXCTL,
+ ~B43_NPHY_RXCTL_BSELU20);
+ }
+
+ if (dev->phy.rev >= 3) {
+ tmp = (chanspec.b_freq == 1) ? 4 : 0;
+ b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
+ /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */
+ b43_nphy_chanspec_setup(dev, &(tabent_r3->phy_regs), chanspec);
+ } else {
+ tmp = (chanspec.b_freq == 1) ? 0x0020 : 0x0050;
+ b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
+ b43_radio_2055_setup(dev, tabent_r2);
+ b43_nphy_chanspec_setup(dev, &(tabent_r2->phy_regs), chanspec);
+ }
+
+ return 0;
+}
+
+/* Tune the hardware to a new channel */
+static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ struct b43_chanspec chanspec;
+ chanspec = nphy->radio_chanspec;
+ chanspec.channel = channel;
+
+ return b43_nphy_set_chanspec(dev, chanspec);
+}
+
static int b43_nphy_op_allocate(struct b43_wldev *dev)
{
struct b43_phy_n *nphy;
@@ -3243,9 +3484,43 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
}
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
bool blocked)
-{//TODO
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
+ b43err(dev->wl, "MAC not suspended\n");
+
+ if (blocked) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+ if (dev->phy.rev >= 3) {
+ b43_radio_mask(dev, 0x09, ~0x2);
+
+ b43_radio_write(dev, 0x204D, 0);
+ b43_radio_write(dev, 0x2053, 0);
+ b43_radio_write(dev, 0x2058, 0);
+ b43_radio_write(dev, 0x205E, 0);
+ b43_radio_mask(dev, 0x2062, ~0xF0);
+ b43_radio_write(dev, 0x2064, 0);
+
+ b43_radio_write(dev, 0x304D, 0);
+ b43_radio_write(dev, 0x3053, 0);
+ b43_radio_write(dev, 0x3058, 0);
+ b43_radio_write(dev, 0x305E, 0);
+ b43_radio_mask(dev, 0x3062, ~0xF0);
+ b43_radio_write(dev, 0x3064, 0);
+ }
+ } else {
+ if (dev->phy.rev >= 3) {
+ b43_radio_init2056(dev);
+ b43_nphy_set_chanspec(dev, nphy->radio_chanspec);
+ } else {
+ b43_radio_init2055(dev);
+ }
+ }
}
static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 403aad3f894f..8b6d570dd0aa 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -711,6 +711,8 @@
#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
+#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */
+#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A)
/* Broadcom 2055 radio registers */
@@ -924,6 +926,13 @@
struct b43_wldev;
+struct b43_chanspec {
+ u8 channel;
+ u8 sideband;
+ u8 b_width;
+ u8 b_freq;
+};
+
struct b43_phy_n_iq_comp {
s16 a0;
s16 b0;
@@ -975,7 +984,8 @@ struct b43_phy_n {
u16 papd_epsilon_offset[2];
s32 preamble_override;
u32 bb_mult_save;
- u16 radio_chanspec;
+ u8 b_width;
+ struct b43_chanspec radio_chanspec;
bool gain_boost;
bool elna_gain_config;
@@ -991,6 +1001,7 @@ struct b43_phy_n {
u16 txiqlocal_bestc[11];
bool txiqlocal_coeffsvalid;
struct b43_phy_n_txpwrindex txpwrindex[2];
+ struct b43_chanspec txiqlocal_chanspec;
u8 txrx_chain;
u16 tx_rx_cal_phy_saveregs[11];
@@ -1006,12 +1017,12 @@ struct b43_phy_n {
bool gband_spurwar_en;
bool ipa2g_on;
- u8 iqcal_chanspec_2G;
- u8 rssical_chanspec_2G;
+ struct b43_chanspec iqcal_chanspec_2G;
+ struct b43_chanspec rssical_chanspec_2G;
bool ipa5g_on;
- u8 iqcal_chanspec_5G;
- u8 rssical_chanspec_5G;
+ struct b43_chanspec iqcal_chanspec_5G;
+ struct b43_chanspec rssical_chanspec_5G;
struct b43_phy_n_rssical_cache rssical_cache;
struct b43_phy_n_cal_cache cal_cache;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index a00d509150f7..d96e870ab8fe 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -318,14 +318,14 @@ void b2055_upload_inittab(struct b43_wldev *dev,
.radio_c2_tx_mxbgtrim = r21
#define PHYREGS(r0, r1, r2, r3, r4, r5) \
- .phy_bw1a = r0, \
- .phy_bw2 = r1, \
- .phy_bw3 = r2, \
- .phy_bw4 = r3, \
- .phy_bw5 = r4, \
- .phy_bw6 = r5
-
-static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
+ .phy_regs.phy_bw1a = r0, \
+ .phy_regs.phy_bw2 = r1, \
+ .phy_regs.phy_bw3 = r2, \
+ .phy_regs.phy_bw4 = r3, \
+ .phy_regs.phy_bw5 = r4, \
+ .phy_regs.phy_bw6 = r5
+
+static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab[] = {
{ .channel = 184,
.freq = 4920, /* MHz */
.unk2 = 3280,
@@ -1320,10 +1320,10 @@ static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
},
};
-const struct b43_nphy_channeltab_entry *
-b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
+const struct b43_nphy_channeltab_entry_rev2 *
+b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
{
- const struct b43_nphy_channeltab_entry *e;
+ const struct b43_nphy_channeltab_entry_rev2 *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9c1c6ecd3672..8fc1da9f8fe5 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -4,9 +4,22 @@
#include <linux/types.h>
-struct b43_nphy_channeltab_entry {
+struct b43_phy_n_sfo_cfg {
+ u16 phy_bw1a;
+ u16 phy_bw2;
+ u16 phy_bw3;
+ u16 phy_bw4;
+ u16 phy_bw5;
+ u16 phy_bw6;
+};
+
+struct b43_nphy_channeltab_entry_rev2 {
/* The channel number */
u8 channel;
+ /* The channel frequency in MHz */
+ u16 freq;
+ /* An unknown value */
+ u16 unk2;
/* Radio register values on channelswitch */
u8 radio_pll_ref;
u8 radio_rf_pllmod0;
@@ -31,16 +44,18 @@ struct b43_nphy_channeltab_entry {
u8 radio_c2_tx_pgapadtn;
u8 radio_c2_tx_mxbgtrim;
/* PHY register values on channelswitch */
- u16 phy_bw1a;
- u16 phy_bw2;
- u16 phy_bw3;
- u16 phy_bw4;
- u16 phy_bw5;
- u16 phy_bw6;
+ struct b43_phy_n_sfo_cfg phy_regs;
+};
+
+struct b43_nphy_channeltab_entry_rev3 {
+ /* The channel number */
+ u8 channel;
/* The channel frequency in MHz */
u16 freq;
- /* An unknown value */
- u16 unk2;
+ /* Radio register values on channelswitch */
+ /* TODO */
+ /* PHY register values on channelswitch */
+ struct b43_phy_n_sfo_cfg phy_regs;
};
@@ -77,8 +92,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
/* Get the NPHY Channel Switch Table entry for a channel number.
* Returns NULL on failure to find an entry. */
-const struct b43_nphy_channeltab_entry *
-b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
+const struct b43_nphy_channeltab_entry_rev2 *
+b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
/* The N-PHY tables. */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index eda06529ef5f..e6b0528f3b52 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -610,7 +610,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
}
/* Link quality statistics */
- status.noise = dev->stats.link_noise;
if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) {
// s8 rssi = max(rxhdr->power0, rxhdr->power1);
//TODO: Find out what the rssi value is (dBm or percentage?)
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index bb2dd9329aa0..1713f5f7a58b 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3482,6 +3482,23 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
return 0;
}
+static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
+ struct b43legacy_wldev *dev = wl->current_dev;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = dev->stats.link_noise;
+
+ return 0;
+}
+
static const struct ieee80211_ops b43legacy_hw_ops = {
.tx = b43legacy_op_tx,
.conf_tx = b43legacy_op_conf_tx,
@@ -3494,6 +3511,7 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
+ .get_survey = b43legacy_op_get_survey,
.rfkill_poll = b43legacy_rfkill_poll,
};
@@ -3769,8 +3787,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
/* fill hw info */
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 9c8882d9275e..7d177d97f1f7 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -548,7 +548,6 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
(phystat0 & B43legacy_RX_PHYST0_OFDM),
(phystat0 & B43legacy_RX_PHYST0_GAINCTL),
(phystat3 & B43legacy_RX_PHYST3_TRSTATE));
- status.noise = dev->stats.link_noise;
/* change to support A PHY */
if (phystat0 & B43legacy_RX_PHYST0_OFDM)
status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index a36501dbbe02..db72461c486b 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -39,7 +39,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
/* struct local_info::hw_priv */
struct hostap_cs_priv {
- dev_node_t node;
struct pcmcia_device *link;
int sandisk_connectplus;
};
@@ -556,15 +555,7 @@ static int prism2_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
- else if (!(p_dev->conf.Attributes & CONF_ENABLE_IRQ)) {
- /* At least Compaq WL200 does not have IRQInfo1 set,
- * but it does not work without interrupts.. */
- printk(KERN_WARNING "Config has no IRQ info, but trying to "
- "enable IRQ anyway..\n");
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
- }
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d "
@@ -633,21 +624,10 @@ static int prism2_config(struct pcmcia_device *link)
local = iface->local;
local->hw_priv = hw_priv;
hw_priv->link = link;
- strcpy(hw_priv->node.dev_name, dev->name);
- link->dev_node = &hw_priv->node;
- /*
- * Allocate an interrupt line. Note that this does not assign a
- * handler to the interrupt, unless the 'Handler' member of the
- * irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = prism2_interrupt;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ ret = pcmcia_request_irq(link, prism2_interrupt);
+ if (ret)
+ goto failed;
/*
* This actually configures the PCMCIA socket -- setting up
@@ -658,7 +638,7 @@ static int prism2_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
/* Finally, report what we've done */
@@ -668,7 +648,7 @@ static int prism2_config(struct pcmcia_device *link)
printk(", Vpp %d.%d", link->conf.Vpp / 10,
link->conf.Vpp % 10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -682,11 +662,9 @@ static int prism2_config(struct pcmcia_device *link)
sandisk_enable_wireless(dev);
ret = prism2_hw_config(dev, 1);
- if (!ret) {
+ if (!ret)
ret = hostap_hw_ready(dev);
- if (ret == 0 && local->ddev)
- strcpy(hw_priv->node.dev_name, local->ddev->name);
- }
+
return ret;
failed:
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9b72c45a7748..7040e3ba77b8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -174,6 +174,8 @@ that only one external action is invoked at a time.
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
+struct pm_qos_request_list *ipw2100_pm_qos_req;
+
/* Debugging stuff */
#ifdef CONFIG_IPW2100_DEBUG
#define IPW2100_RX_DEBUG /* Reception debugging */
@@ -1739,7 +1741,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
/* the ipw2100 hardware really doesn't want power management delays
* longer than 175usec
*/
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175);
+ pm_qos_update_request(ipw2100_pm_qos_req, 175);
/* If the interrupt is enabled, turn it off... */
spin_lock_irqsave(&priv->low_lock, flags);
@@ -1887,8 +1889,7 @@ static void ipw2100_down(struct ipw2100_priv *priv)
ipw2100_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->low_lock, flags);
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100",
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
/* We have to signal any supplicant if we are disassociating */
if (associated)
@@ -2140,7 +2141,7 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
DECLARE_SSID_BUF(ssid);
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
- "disassociated: '%s' %pM \n",
+ "disassociated: '%s' %pM\n",
print_ssid(ssid, priv->essid, priv->essid_len),
priv->bssid);
@@ -3285,7 +3286,7 @@ static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
if (inta & IPW2100_INTA_PARITY_ERROR) {
printk(KERN_ERR DRV_NAME
- ": ***** PARITY ERROR INTERRUPT !!!! \n");
+ ": ***** PARITY ERROR INTERRUPT !!!!\n");
priv->inta_other++;
write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR);
}
@@ -6102,7 +6103,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* Look into using netdev destructor to shutdown ieee80211? */
+/* Look into using netdev destructor to shutdown libipw? */
static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
void __iomem * base_addr,
@@ -6112,7 +6113,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
struct ipw2100_priv *priv;
struct net_device *dev;
- dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0);
+ dev = alloc_libipw(sizeof(struct ipw2100_priv), 0);
if (!dev)
return NULL;
priv = libipw_priv(dev);
@@ -6425,7 +6426,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
sysfs_remove_group(&pci_dev->dev.kobj,
&ipw2100_attribute_group);
- free_ieee80211(dev, 0);
+ free_libipw(dev, 0);
pci_set_drvdata(pci_dev, NULL);
}
@@ -6483,10 +6484,10 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
if (dev->base_addr)
iounmap((void __iomem *)dev->base_addr);
- /* wiphy_unregister needs to be here, before free_ieee80211 */
+ /* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->bg_band.channels);
- free_ieee80211(dev, 0);
+ free_libipw(dev, 0);
}
pci_release_regions(pci_dev);
@@ -6669,7 +6670,7 @@ static int __init ipw2100_init(void)
if (ret)
goto out;
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100",
+ ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
#ifdef CONFIG_IPW2100_DEBUG
ipw2100_debug_level = debug;
@@ -6692,7 +6693,7 @@ static void __exit ipw2100_exit(void)
&driver_attr_debug_level);
#endif
pci_unregister_driver(&ipw2100_pci_driver);
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100");
+ pm_qos_remove_request(ipw2100_pm_qos_req);
}
module_init(ipw2100_init);
@@ -6753,7 +6754,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev,
err = -EOPNOTSUPP;
goto done;
} else { /* Set the channel */
- IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
+ IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
err = ipw2100_set_channel(priv, fwrq->m, 0);
}
@@ -6782,7 +6783,7 @@ static int ipw2100_wx_get_freq(struct net_device *dev,
else
wrqu->freq.m = 0;
- IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
+ IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
return 0;
}
@@ -6794,7 +6795,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
int err = 0;
- IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode);
+ IPW_DEBUG_WX("SET Mode -> %d\n", wrqu->mode);
if (wrqu->mode == priv->ieee->iw_mode)
return 0;
@@ -7149,7 +7150,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
- IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick);
+ IPW_DEBUG_WX("SET Nickname -> %s\n", priv->nick);
return 0;
}
@@ -7168,7 +7169,7 @@ static int ipw2100_wx_get_nick(struct net_device *dev,
memcpy(extra, priv->nick, wrqu->data.length);
wrqu->data.flags = 1; /* active */
- IPW_DEBUG_WX("GET Nickname -> %s \n", extra);
+ IPW_DEBUG_WX("GET Nickname -> %s\n", extra);
return 0;
}
@@ -7207,7 +7208,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev,
err = ipw2100_set_tx_rates(priv, rate, 0);
- IPW_DEBUG_WX("SET Rate -> %04X \n", rate);
+ IPW_DEBUG_WX("SET Rate -> %04X\n", rate);
done:
mutex_unlock(&priv->action_mutex);
return err;
@@ -7258,7 +7259,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = 0;
}
- IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
+ IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7294,7 +7295,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev,
err = ipw2100_set_rts_threshold(priv, value);
- IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value);
+ IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X\n", value);
done:
mutex_unlock(&priv->action_mutex);
return err;
@@ -7316,7 +7317,7 @@ static int ipw2100_wx_get_rts(struct net_device *dev,
/* If RTS is set to the default value, then it is disabled */
wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
- IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value);
+ IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X\n", wrqu->rts.value);
return 0;
}
@@ -7355,7 +7356,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
err = ipw2100_set_tx_power(priv, value);
- IPW_DEBUG_WX("SET TX Power -> %d \n", value);
+ IPW_DEBUG_WX("SET TX Power -> %d\n", value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7384,7 +7385,7 @@ static int ipw2100_wx_get_txpow(struct net_device *dev,
wrqu->txpower.flags = IW_TXPOW_DBM;
- IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value);
+ IPW_DEBUG_WX("GET TX Power -> %d\n", wrqu->txpower.value);
return 0;
}
@@ -7414,7 +7415,7 @@ static int ipw2100_wx_set_frag(struct net_device *dev,
priv->frag_threshold = priv->ieee->fts;
}
- IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts);
+ IPW_DEBUG_WX("SET Frag Threshold -> %d\n", priv->ieee->fts);
return 0;
}
@@ -7432,7 +7433,7 @@ static int ipw2100_wx_get_frag(struct net_device *dev,
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
- IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -7458,14 +7459,14 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
if (wrqu->retry.flags & IW_RETRY_SHORT) {
err = ipw2100_set_short_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Short Retry Limit -> %d \n",
+ IPW_DEBUG_WX("SET Short Retry Limit -> %d\n",
wrqu->retry.value);
goto done;
}
if (wrqu->retry.flags & IW_RETRY_LONG) {
err = ipw2100_set_long_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Long Retry Limit -> %d \n",
+ IPW_DEBUG_WX("SET Long Retry Limit -> %d\n",
wrqu->retry.value);
goto done;
}
@@ -7474,7 +7475,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
if (!err)
err = ipw2100_set_long_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("SET Both Retry Limits -> %d\n", wrqu->retry.value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7508,7 +7509,7 @@ static int ipw2100_wx_get_retry(struct net_device *dev,
wrqu->retry.value = priv->short_retry_limit;
}
- IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("GET Retry -> %d\n", wrqu->retry.value);
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d72e3d19586..82de71a3aea7 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -459,7 +459,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
{
u32 word;
_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
- IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
+ IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
word = _ipw_read32(priv, IPW_INDIRECT_DATA);
return (word >> ((reg & 0x3) * 8)) & 0xff;
}
@@ -473,7 +473,7 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
value = _ipw_read32(priv, IPW_INDIRECT_DATA);
- IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
+ IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
return value;
}
@@ -2349,16 +2349,25 @@ static void ipw_bg_adapter_restart(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
-#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
+static void ipw_abort_scan(struct ipw_priv *priv);
+
+#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
static void ipw_scan_check(void *data)
{
struct ipw_priv *priv = data;
- if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
+
+ if (priv->status & STATUS_SCAN_ABORTING) {
IPW_DEBUG_SCAN("Scan completion watchdog resetting "
"adapter after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
queue_work(priv->workqueue, &priv->adapter_restart);
+ } else if (priv->status & STATUS_SCANNING) {
+ IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
+ "after (%dms).\n",
+ jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
+ ipw_abort_scan(priv);
+ queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
}
}
@@ -2739,7 +2748,7 @@ static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
static int ipw_fw_dma_enable(struct ipw_priv *priv)
{ /* start dma engine but no transfers yet */
- IPW_DEBUG_FW(">> : \n");
+ IPW_DEBUG_FW(">> :\n");
/* Start the dma */
ipw_fw_dma_reset_command_blocks(priv);
@@ -2747,7 +2756,7 @@ static int ipw_fw_dma_enable(struct ipw_priv *priv)
/* Write CB base address */
ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
- IPW_DEBUG_FW("<< : \n");
+ IPW_DEBUG_FW("<< :\n");
return 0;
}
@@ -2762,7 +2771,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
priv->sram_desc.last_cb_index = 0;
- IPW_DEBUG_FW("<< \n");
+ IPW_DEBUG_FW("<<\n");
}
static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
@@ -2813,29 +2822,29 @@ static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
IPW_DEBUG_FW(">> :\n");
address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
- IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
+ IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
/* Read the DMA Controlor register */
register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
- IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
/* Print the CB values */
cb_fields_address = address;
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
+ IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
IPW_DEBUG_FW(">> :\n");
}
@@ -2851,7 +2860,7 @@ static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
sizeof(struct command_block);
- IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
+ IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
current_cb_index, current_cb_address);
IPW_DEBUG_FW(">> :\n");
@@ -2910,7 +2919,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
int ret, i;
u32 size;
- IPW_DEBUG_FW(">> \n");
+ IPW_DEBUG_FW(">>\n");
IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
nr, dest_address, len);
@@ -2927,7 +2936,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
IPW_DEBUG_FW_INFO(": Added new cb\n");
}
- IPW_DEBUG_FW("<< \n");
+ IPW_DEBUG_FW("<<\n");
return 0;
}
@@ -2936,7 +2945,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
u32 current_index = 0, previous_index;
u32 watchdog = 0;
- IPW_DEBUG_FW(">> : \n");
+ IPW_DEBUG_FW(">> :\n");
current_index = ipw_fw_dma_command_block_index(priv);
IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
@@ -2965,7 +2974,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
ipw_set_bit(priv, IPW_RESET_REG,
IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
- IPW_DEBUG_FW("<< dmaWaitSync \n");
+ IPW_DEBUG_FW("<< dmaWaitSync\n");
return 0;
}
@@ -3026,7 +3035,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
{
int rc;
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
/* stop master. typical delay - 0 */
ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
@@ -3045,7 +3054,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
static void ipw_arc_release(struct ipw_priv *priv)
{
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
mdelay(5);
ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
@@ -3067,7 +3076,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
image = (__le16 *) data;
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
rc = ipw_stop_master(priv);
@@ -3181,7 +3190,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
void **virts;
dma_addr_t *phys;
- IPW_DEBUG_TRACE("<< : \n");
+ IPW_DEBUG_TRACE("<< :\n");
virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
GFP_KERNEL);
@@ -4482,7 +4491,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
case CMAS_ASSOCIATED:{
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
IPW_DL_ASSOC,
- "associated: '%s' %pM \n",
+ "associated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -4563,7 +4572,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DL_ASSOC,
"deauthenticated: '%s' "
"%pM"
- ": (0x%04X) - %s \n",
+ ": (0x%04X) - %s\n",
print_ssid(ssid,
priv->
essid,
@@ -4614,7 +4623,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
IPW_DL_ASSOC,
- "disassociated: '%s' %pM \n",
+ "disassociated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -4652,7 +4661,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
switch (auth->state) {
case CMAS_AUTHENTICATED:
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
- "authenticated: '%s' %pM \n",
+ "authenticated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -6925,7 +6934,7 @@ static u8 ipw_qos_current_mode(struct ipw_priv * priv)
} else {
mode = priv->ieee->mode;
}
- IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
+ IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
return mode;
}
@@ -6965,7 +6974,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
&def_parameters_OFDM, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
- IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
+ IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
schedule_work(&priv->qos_activate);
}
@@ -7542,7 +7551,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
return err;
}
- IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
+ IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
print_ssid(ssid, priv->essid, priv->essid_len),
priv->bssid);
@@ -8793,7 +8802,7 @@ static int ipw_wx_set_freq(struct net_device *dev,
}
}
- IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
+ IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
mutex_lock(&priv->mutex);
ret = ipw_set_channel(priv, channel);
mutex_unlock(&priv->mutex);
@@ -8835,7 +8844,7 @@ static int ipw_wx_get_freq(struct net_device *dev,
wrqu->freq.m = 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
+ IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
return 0;
}
@@ -9230,7 +9239,7 @@ static int ipw_wx_get_sens(struct net_device *dev,
wrqu->sens.value = priv->roaming_threshold;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
+ IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
return 0;
@@ -9358,7 +9367,7 @@ static int ipw_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = priv->last_rate;
wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
+ IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
return 0;
}
@@ -9381,7 +9390,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
ipw_send_rts_threshold(priv, priv->rts_threshold);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
+ IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
return 0;
}
@@ -9395,7 +9404,7 @@ static int ipw_wx_get_rts(struct net_device *dev,
wrqu->rts.fixed = 0; /* no auto select */
wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
+ IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
return 0;
}
@@ -9445,7 +9454,7 @@ static int ipw_wx_get_txpow(struct net_device *dev,
wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET TX Power -> %s %d \n",
+ IPW_DEBUG_WX("GET TX Power -> %s %d\n",
wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
return 0;
@@ -9471,7 +9480,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
ipw_send_frag_threshold(priv, wrqu->frag.value);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -9485,7 +9494,7 @@ static int ipw_wx_get_frag(struct net_device *dev,
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -9549,7 +9558,7 @@ static int ipw_wx_get_retry(struct net_device *dev,
}
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
return 0;
}
@@ -9996,49 +10005,48 @@ static int ipw_wx_sw_reset(struct net_device *dev,
}
/* Rebase the WE IOCTLs to zero for the handler array */
-#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
static iw_handler ipw_wx_handlers[] = {
- IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
- IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
- IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
- IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
- IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
- IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
- IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
- IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
- IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
- IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
- IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
- IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
- IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
- IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
- IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
- IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
- IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
- IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
- IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
- IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
- IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
- IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
- IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
- IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
- IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
- IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
- IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
- IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
- IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
- IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
- IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
- IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
- IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
- IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
- IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
- IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
- IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
- IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
- IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
- IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
+ IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
+ IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
+ IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
+ IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
+ IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
+ IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
+ IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
+ IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
+ IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
+ IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
+ IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
+ IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
+ IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
+ IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
+ IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
+ IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
+ IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
+ IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
+ IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
+ IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
+ IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
+ IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
};
enum {
@@ -11667,7 +11675,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
if (priv->prom_net_dev)
return -EPERM;
- priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
+ priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
if (priv->prom_net_dev == NULL)
return -ENOMEM;
@@ -11686,7 +11694,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
rc = register_netdev(priv->prom_net_dev);
if (rc) {
- free_ieee80211(priv->prom_net_dev, 1);
+ free_libipw(priv->prom_net_dev, 1);
priv->prom_net_dev = NULL;
return rc;
}
@@ -11700,7 +11708,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
return;
unregister_netdev(priv->prom_net_dev);
- free_ieee80211(priv->prom_net_dev, 1);
+ free_libipw(priv->prom_net_dev, 1);
priv->prom_net_dev = NULL;
}
@@ -11728,7 +11736,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
struct ipw_priv *priv;
int i;
- net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
+ net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
if (net_dev == NULL) {
err = -ENOMEM;
goto out;
@@ -11748,7 +11756,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
mutex_init(&priv->mutex);
if (pci_enable_device(pdev)) {
err = -ENODEV;
- goto out_free_ieee80211;
+ goto out_free_libipw;
}
pci_set_master(pdev);
@@ -11875,8 +11883,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
out_pci_disable_device:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- out_free_ieee80211:
- free_ieee80211(priv->net_dev, 0);
+ out_free_libipw:
+ free_libipw(priv->net_dev, 0);
out:
return err;
}
@@ -11943,11 +11951,11 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- /* wiphy_unregister needs to be here, before free_ieee80211 */
+ /* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->a_band.channels);
kfree(priv->ieee->bg_band.channels);
- free_ieee80211(priv->net_dev, 0);
+ free_libipw(priv->net_dev, 0);
free_firmware();
}
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index a6d5e42647e4..284b0e4cb815 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -64,7 +64,7 @@
extern u32 libipw_debug_level;
#define LIBIPW_DEBUG(level, fmt, args...) \
do { if (libipw_debug_level & (level)) \
- printk(KERN_DEBUG "ieee80211: %c %s " fmt, \
+ printk(KERN_DEBUG "libipw: %c %s " fmt, \
in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
static inline bool libipw_ratelimit_debug(u32 level)
{
@@ -116,8 +116,8 @@ static inline bool libipw_ratelimit_debug(u32 level)
#define LIBIPW_DL_RX (1<<9)
#define LIBIPW_DL_QOS (1<<31)
-#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
-#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
+#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "libipw: " f, ## a)
+#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "libipw: " f, ## a)
#define LIBIPW_DEBUG_INFO(f, a...) LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a)
#define LIBIPW_DEBUG_WX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a)
@@ -905,7 +905,7 @@ struct libipw_device {
struct libipw_reassoc_request * req);
/* This must be the last item so that it points to the data
- * allocated beyond this structure by alloc_ieee80211 */
+ * allocated beyond this structure by alloc_libipw */
u8 priv[0];
};
@@ -1017,9 +1017,9 @@ static inline int libipw_is_cck_rate(u8 rate)
return 0;
}
-/* ieee80211.c */
-extern void free_ieee80211(struct net_device *dev, int monitor);
-extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor);
+/* libipw.c */
+extern void free_libipw(struct net_device *dev, int monitor);
+extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 2fa55867bd8b..55965408ff3f 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -53,7 +53,7 @@
#include "libipw.h"
#define DRV_DESCRIPTION "802.11 data/management/control stack"
-#define DRV_NAME "ieee80211"
+#define DRV_NAME "libipw"
#define DRV_VERSION LIBIPW_VERSION
#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
@@ -140,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL(libipw_change_mtu);
-struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
+struct net_device *alloc_libipw(int sizeof_priv, int monitor)
{
struct libipw_device *ieee;
struct net_device *dev;
@@ -222,8 +222,9 @@ failed_free_netdev:
failed:
return NULL;
}
+EXPORT_SYMBOL(alloc_libipw);
-void free_ieee80211(struct net_device *dev, int monitor)
+void free_libipw(struct net_device *dev, int monitor)
{
struct libipw_device *ieee = netdev_priv(dev);
@@ -237,6 +238,7 @@ void free_ieee80211(struct net_device *dev, int monitor)
free_netdev(dev);
}
+EXPORT_SYMBOL(free_libipw);
#ifdef CONFIG_LIBIPW_DEBUG
@@ -291,7 +293,7 @@ static int __init libipw_init(void)
struct proc_dir_entry *e;
libipw_debug_level = debug;
- libipw_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
+ libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
if (libipw_proc == NULL) {
LIBIPW_ERROR("Unable to create " DRV_NAME
" proc directory\n");
@@ -331,6 +333,3 @@ MODULE_PARM_DESC(debug, "debug output mask");
module_exit(libipw_exit);
module_init(libipw_init);
-
-EXPORT_SYMBOL(alloc_ieee80211);
-EXPORT_SYMBOL(free_ieee80211);
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 4e378faee650..7c7235385513 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -9,7 +9,10 @@ CFLAGS_iwl-devtrace.o := -I$(src)
# AGN
obj-$(CONFIG_IWLAGN) += iwlagn.o
-iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
+iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
+iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
+iwlagn-objs += iwl-agn-lib.o
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
@@ -19,5 +22,6 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 3bf2e6e9b2d9..6be2992f8f21 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -42,9 +42,11 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-sta.h"
+#include "iwl-agn.h"
#include "iwl-helpers.h"
-#include "iwl-5000-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 3
@@ -117,7 +119,7 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -125,13 +127,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = 0;
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -161,25 +163,25 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
static struct iwl_lib_ops iwl1000_lib = {
.set_hw_params = iwl1000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.apm_ops = {
.init = iwl_apm_init,
@@ -189,40 +191,47 @@ static struct iwl_lib_ops iwl1000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl1000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl1000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl1000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
struct iwl_cfg iwl1000_bgn_cfg = {
- .name = "1000 Series BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
.fw_name_pre = IWL1000_FW_PRE,
.ucode_api_max = IWL1000_UCODE_API_MAX,
.ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -230,10 +239,10 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.ops = &iwl1000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -248,10 +257,15 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 128,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl1000_bg_cfg = {
- .name = "1000 Series BG",
+ .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
.fw_name_pre = IWL1000_FW_PRE,
.ucode_api_max = IWL1000_UCODE_API_MAX,
.ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -259,10 +273,10 @@ struct iwl_cfg iwl1000_bg_cfg = {
.ops = &iwl1000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -270,12 +284,16 @@ struct iwl_cfg iwl1000_bg_cfg = {
.use_bsm = false,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 128,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
new file mode 100644
index 000000000000..6a9c64a50e36
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
@@ -0,0 +1,500 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-3945-debugfs.h"
+
+ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
+ sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
+ ssize_t ret;
+ struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct iwl39_statistics_rx_non_phy *general, *accum_general;
+ struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &priv->_3945.statistics.rx.ofdm;
+ cck = &priv->_3945.statistics.rx.cck;
+ general = &priv->_3945.statistics.rx.general;
+ accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
+ accum_cck = &priv->_3945.accum_statistics.rx.cck;
+ accum_general = &priv->_3945.accum_statistics.rx.general;
+ delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
+ delta_cck = &priv->_3945.delta_statistics.rx.cck;
+ delta_general = &priv->_3945.delta_statistics.rx.general;
+ max_ofdm = &priv->_3945.max_delta.rx.ofdm;
+ max_cck = &priv->_3945.max_delta.rx.cck;
+ max_general = &priv->_3945.max_delta.rx.general;
+
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err),
+ accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+ max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:", le32_to_cpu(ofdm->crc32_good),
+ accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+ max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout),
+ accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout),
+ accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt),
+ accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt),
+ accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(cck->overrun_err),
+ accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good,
+ max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout),
+ accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout),
+ accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts,
+ delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt),
+ accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt),
+ accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bogus_cts:",
+ le32_to_cpu(general->bogus_cts),
+ accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bogus_ack:",
+ le32_to_cpu(general->bogus_ack),
+ accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
+ ssize_t ret;
+ struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &priv->_3945.statistics.tx;
+ accum_tx = &priv->_3945.accum_statistics.tx;
+ delta_tx = &priv->_3945.delta_statistics.tx;
+ max_tx = &priv->_3945.max_delta.tx;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "preamble:",
+ le32_to_cpu(tx->preamble_cnt),
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt),
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout),
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt),
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
+ ssize_t ret;
+ struct iwl39_statistics_general *general, *accum_general;
+ struct iwl39_statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &priv->_3945.statistics.general;
+ dbg = &priv->_3945.statistics.general.dbg;
+ div = &priv->_3945.statistics.general.div;
+ accum_general = &priv->_3945.accum_statistics.general;
+ delta_general = &priv->_3945.delta_statistics.general;
+ max_general = &priv->_3945.max_delta.general;
+ accum_dbg = &priv->_3945.accum_statistics.general.dbg;
+ delta_dbg = &priv->_3945.delta_statistics.general.dbg;
+ max_dbg = &priv->_3945.max_delta.general.dbg;
+ accum_div = &priv->_3945.accum_statistics.general.div;
+ delta_div = &priv->_3945.delta_statistics.general.div;
+ max_div = &priv->_3945.max_delta.general.div;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_check:",
+ le32_to_cpu(dbg->burst_check),
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_count:",
+ le32_to_cpu(dbg->burst_count),
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_out:",
+ le32_to_cpu(general->slots_out),
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
+ pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
new file mode 100644
index 000000000000..70809c53c215
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#else
+static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 3a876a8ece38..91bcb4e3cdfb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,13 +71,11 @@
#include "iwl-eeprom.h"
-/* Time constants */
-#define SHORT_SLOT_TIME 9
-#define LONG_SLOT_TIME 20
-
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
+#define IWL_DEFAULT_TX_POWER 0x0F
+
/*
* EEPROM related constants, enums, and structures.
*/
@@ -228,7 +226,6 @@ struct iwl3945_eeprom {
/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
#define IWL39_NUM_QUEUES 5
-#define IWL_NUM_SCAN_RATES (2)
#define IWL_DEFAULT_TX_RETRY 15
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 902c4d4293e9..32eb4709acac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -330,16 +330,25 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
}
-static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
{
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_conf *conf = &priv->hw->conf;
+ struct iwl3945_sta_priv *psta;
+ struct iwl3945_rs_sta *rs_sta;
+ struct ieee80211_supported_band *sband;
int i;
- IWL_DEBUG_RATE(priv, "enter\n");
+ IWL_DEBUG_INFO(priv, "enter\n");
+ if (sta_id == priv->hw_params.bcast_sta_id)
+ goto out;
- spin_lock_init(&rs_sta->lock);
+ psta = (struct iwl3945_sta_priv *) sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
rs_sta->priv = priv;
@@ -352,9 +361,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
rs_sta->last_flush = jiffies;
rs_sta->flush_time = IWL_RATE_FLUSH;
rs_sta->last_tx_packets = 0;
- rs_sta->ibss_sta_added = 0;
- init_timer(&rs_sta->rate_scale_flush);
rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
@@ -373,16 +380,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
}
}
- priv->sta_supp_rates = sta->supp_rates[sband->band];
+ priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
/* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
if (sband->band == IEEE80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- priv->sta_supp_rates = priv->sta_supp_rates <<
+ priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
IWL_FIRST_OFDM_RATE;
}
+out:
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- IWL_DEBUG_RATE(priv, "leave\n");
+ IWL_DEBUG_INFO(priv, "leave\n");
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -406,6 +415,9 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
rs_sta = &psta->rs_sta;
+ spin_lock_init(&rs_sta->lock);
+ init_timer(&rs_sta->rate_scale_flush);
+
IWL_DEBUG_RATE(priv, "leave\n");
return rs_sta;
@@ -414,13 +426,14 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
void *priv_sta)
{
- struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
- struct iwl3945_rs_sta *rs_sta = &psta->rs_sta;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
+ struct iwl3945_rs_sta *rs_sta = priv_sta;
- IWL_DEBUG_RATE(priv, "enter\n");
+ /*
+ * Be careful not to use any members of iwl3945_rs_sta (like trying
+ * to use iwl_priv to print out debugging) since it may not be fully
+ * initialized at this point.
+ */
del_timer_sync(&rs_sta->rate_scale_flush);
- IWL_DEBUG_RATE(priv, "leave\n");
}
@@ -459,6 +472,13 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
return;
}
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!rs_sta->priv) {
+ IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
+ return;
+ }
+
+
rs_sta->tx_packets++;
scale_rate_index = first_index;
@@ -626,14 +646,19 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
u32 fail_count;
s8 scale_action = 0;
unsigned long flags;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
s8 max_rate_idx = -1;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
IWL_DEBUG_RATE(priv, "enter\n");
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (rs_sta && !rs_sta->priv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
+ priv_sta = NULL;
+ }
+
if (rate_control_send_low(sta, priv_sta, txrc))
return;
@@ -651,20 +676,6 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
if (sband->band == IEEE80211_BAND_5GHZ)
rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !rs_sta->ibss_sta_added) {
- u8 sta_id = iwl_find_station(priv, hdr->addr1);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
- hdr->addr1);
- sta_id = iwl_add_station(priv, hdr->addr1, false,
- CMD_ASYNC, NULL);
- }
- if (sta_id != IWL_INVALID_STATION)
- rs_sta->ibss_sta_added = 1;
- }
-
spin_lock_irqsave(&rs_sta->lock, flags);
/* for recent assoc, choose best rate regarding
@@ -884,12 +895,22 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
}
#endif
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
- .rate_init = rs_rate_init,
+ .rate_init = rs_rate_init_stub,
.alloc = rs_alloc,
.free = rs_free,
.alloc_sta = rs_alloc_sta,
@@ -900,7 +921,6 @@ static struct rate_control_ops rs_ops = {
#endif
};
-
void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
{
struct iwl_priv *priv = hw->priv;
@@ -917,6 +937,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
sta = ieee80211_find_sta(priv->vif,
priv->stations[sta_id].sta.sta.addr);
if (!sta) {
+ IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
rcu_read_unlock();
return;
}
@@ -947,7 +968,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
spin_unlock_irqrestore(&rs_sta->lock, flags);
- rssi = priv->last_rx_rssi;
+ rssi = priv->_3945.last_rx_rssi;
if (rssi == 0)
rssi = IWL_MIN_RSSI_VAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 0728054a22d4..068f7f8435c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -50,6 +50,7 @@
#include "iwl-helpers.h"
#include "iwl-led.h"
#include "iwl-3945-led.h"
+#include "iwl-3945-debugfs.h"
#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -192,12 +193,12 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
}
#ifdef CONFIG_IWLWIFI_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
static const char *iwl3945_get_tx_fail_reason(u32 status)
{
switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
+ case TX_3945_STATUS_SUCCESS:
return "SUCCESS";
TX_STATUS_ENTRY(SHORT_LIMIT);
TX_STATUS_ENTRY(LONG_LIMIT);
@@ -243,7 +244,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
next_rate = IWL_RATE_6M_INDEX;
break;
case IEEE80211_BAND_2GHZ:
- if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
+ if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
iwl_is_associated(priv)) {
if (rate == IWL_RATE_11M_INDEX)
next_rate = IWL_RATE_5M_INDEX;
@@ -293,7 +294,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
* iwl3945_rx_reply_tx - Handle Tx response
*/
static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+ struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -351,18 +352,143 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
* RX handler implementations
*
*****************************************************************************/
+#ifdef CONFIG_IWLWIFI_DEBUG
+/*
+ * based on the assumption of all statistics counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
+ __le32 *stats)
+{
+ int i;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+
+ prev_stats = (__le32 *)&priv->_3945.statistics;
+ accum_stats = (u32 *)&priv->_3945.accum_statistics;
+ delta = (u32 *)&priv->_3945.delta_statistics;
+ max_delta = (u32 *)&priv->_3945.max_delta;
+
+ for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
+ le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative statistics for "no-counter" type statistics */
+ priv->_3945.accum_statistics.general.temperature =
+ priv->_3945.statistics.general.temperature;
+ priv->_3945.accum_statistics.general.ttl_timestamp =
+ priv->_3945.statistics.general.ttl_timestamp;
+}
+#endif
+
+/**
+ * iwl3945_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ bool rc = true;
+ struct iwl3945_notif_statistics current_stat;
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
+
+ memcpy(&current_stat, pkt->u.raw, sizeof(struct
+ iwl3945_notif_statistics));
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ combined_plcp_delta =
+ (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
+ le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold, the following
+ * data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+ "%u, %d, %u mSecs\n",
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(current_stat.rx.ofdm.plcp_err),
+ combined_plcp_delta, plcp_msec);
+ /*
+ * Reset the RF radio due to the high plcp
+ * error rate
+ */
+ rc = false;
+ }
+ }
+ return rc;
+}
void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(struct iwl3945_notif_statistics),
le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLWIFI_DEBUG
+ iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
+#endif
+ iwl_recover_from_statistics(priv, pkt);
+
+ memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
+}
+
+void iwl3945_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ __le32 *flag = (__le32 *)&pkt->u.raw;
- memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
+ if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUG
+ memset(&priv->_3945.accum_statistics, 0,
+ sizeof(struct iwl3945_notif_statistics));
+ memset(&priv->_3945.delta_statistics, 0,
+ sizeof(struct iwl3945_notif_statistics));
+ memset(&priv->_3945.max_delta, 0,
+ sizeof(struct iwl3945_notif_statistics));
+#endif
+ IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+ }
+ iwl3945_hw_rx_statistics(priv, rxb);
}
+
/******************************************************************************
*
* Misc. internal state and helper functions
@@ -487,7 +613,7 @@ static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
* but you can hack it to show more, if you'd like to. */
if (dataframe)
IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
- "len=%u, rssi=%d, chnl=%d, rate=%d, \n",
+ "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
title, le16_to_cpu(fc), header->addr1[5],
length, rssi, channel, rate);
else {
@@ -549,7 +675,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
u16 len = le16_to_cpu(rx_hdr->len);
struct sk_buff *skb;
- int ret;
__le16 fc = hdr->frame_control;
/* We received data from the HW, so stop the watchdog */
@@ -566,9 +691,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
return;
}
- skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
+ skb = dev_alloc_skb(128);
if (!skb) {
- IWL_ERR(priv, "alloc_skb failed\n");
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
return;
}
@@ -577,37 +702,13 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
(struct ieee80211_hdr *)rxb_addr(rxb),
le32_to_cpu(rx_end->status), stats);
- skb_reserve(skb, IWL_LINK_HDR_MAX);
skb_add_rx_frag(skb, 0, rxb->page,
(void *)rx_hdr->payload - (void *)pkt, len);
- /* mac80211 currently doesn't support paged SKB. Convert it to
- * linear SKB for management frame and data frame requires
- * software decryption or software defragementation. */
- if (ieee80211_is_mgmt(fc) ||
- ieee80211_has_protected(fc) ||
- ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
- ret = skb_linearize(skb);
- else
- ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
- 0 : -ENOMEM;
-
- if (ret) {
- kfree_skb(skb);
- goto out;
- }
-
- /*
- * XXX: We cannot touch the page and its virtual memory (pkt) after
- * here. It might have already been freed by the above skb change.
- */
-
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
- out:
priv->alloc_rxb_page--;
rxb->page = NULL;
}
@@ -623,9 +724,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- int snr;
- u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg);
- u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
+ u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+ u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
u8 network_packet;
rx_status.flag = 0;
@@ -663,53 +763,29 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
/* Convert 3945's rssi indicator to dBm */
rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
- /* Set default noise value to -127 */
- if (priv->last_rx_noise == 0)
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- /* 3945 provides noise info for OFDM frames only.
- * sig_avg and noise_diff are measured by the 3945's digital signal
- * processor (DSP), and indicate linear levels of signal level and
- * distortion/noise within the packet preamble after
- * automatic gain control (AGC). sig_avg should stay fairly
- * constant if the radio's AGC is working well.
- * Since these values are linear (not dB or dBm), linear
- * signal-to-noise ratio (SNR) is (sig_avg / noise_diff).
- * Convert linear SNR to dB SNR, then subtract that from rssi dBm
- * to obtain noise level in dBm.
- * Calculate rx_status.signal (quality indicator in %) based on SNR. */
- if (rx_stats_noise_diff) {
- snr = rx_stats_sig_avg / rx_stats_noise_diff;
- rx_status.noise = rx_status.signal -
- iwl3945_calc_db_from_ratio(snr);
- } else {
- rx_status.noise = priv->last_rx_noise;
- }
-
-
- IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_status.noise,
- rx_stats_sig_avg, rx_stats_noise_diff);
+ IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
+ rx_status.signal, rx_stats_sig_avg,
+ rx_stats_noise_diff);
header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
network_packet = iwl3945_is_network_packet(priv, header);
- IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
+ IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
network_packet ? '*' : ' ',
le16_to_cpu(rx_hdr->channel),
rx_status.signal, rx_status.signal,
- rx_status.noise, rx_status.rate_idx);
+ rx_status.rate_idx);
/* Set "1" to report good data frames in groups of 100 */
iwl3945_dbg_report_frame(priv, pkt, header, 1);
iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
if (network_packet) {
- priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
- priv->last_tsf = le64_to_cpu(rx_end->timestamp);
- priv->last_rx_rssi = rx_status.signal;
- priv->last_rx_noise = rx_status.noise;
+ priv->_3945.last_beacon_time =
+ le32_to_cpu(rx_end->beacon_timestamp);
+ priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+ priv->_3945.last_rx_rssi = rx_status.signal;
}
iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
@@ -871,7 +947,8 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
}
-u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
+static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
+ u16 tx_rate, u8 flags)
{
unsigned long flags_spin;
struct iwl_station_entry *station;
@@ -957,7 +1034,7 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
- priv->shared_phys);
+ priv->_3945.shared_phys);
iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
@@ -1049,7 +1126,7 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
- IWL_DEBUG_INFO(priv, "RTP type \n");
+ IWL_DEBUG_INFO(priv, "RTP type\n");
else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
@@ -1607,7 +1684,7 @@ static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
int power;
/* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* Get this channel's rate-to-current-power settings table */
power_info = ch_info->power_info;
@@ -1701,6 +1778,11 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
int ref_temp;
int temperature = priv->temperature;
+ if (priv->disable_tx_power_cal ||
+ test_bit(STATUS_SCANNING, &priv->status)) {
+ /* do not perform tx power calibration */
+ return 0;
+ }
/* set up new Tx power info for each and every channel, 2.4 and 5.x */
for (i = 0; i < priv->channel_count; i++) {
ch_info = &priv->channel_info[i];
@@ -1733,7 +1815,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
}
/* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
for (scan_tbl_index = 0;
@@ -1911,6 +1993,8 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
"configuration (%d).\n", rc);
return rc;
}
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1941,7 +2025,10 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
- iwl_clear_stations_table(priv);
+ if (!new_assoc) {
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ }
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
@@ -1951,19 +2038,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
return rc;
}
- /* Add the broadcast address so we can send broadcast frames */
- priv->cfg->ops->lib->add_bcast_station(priv);
-
- /* If we have set the ASSOC_MSK and we are in BSS mode then
- * add the IWL_AP_ID to the station rate table */
- if (iwl_is_associated(priv) &&
- (priv->iw_mode == NL80211_IFTYPE_STATION))
- if (iwl_add_station(priv, priv->active_rxon.bssid_addr,
- true, CMD_SYNC, NULL) == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding AP address for transmit\n");
- return -EIO;
- }
-
/* Init the hardware's rate fallback order based on the band */
rc = iwl3945_init_hw_rate_table(priv);
if (rc) {
@@ -1998,13 +2072,13 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
reschedule:
queue_delayed_work(priv->workqueue,
- &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ);
+ &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
}
static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
- thermal_periodic.work);
+ _3945.thermal_periodic.work);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -2140,7 +2214,7 @@ static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
* power peaks, without too much distortion (clipping).
*/
/* we'll fill in this array with h/w max power levels */
- clip_pwrs = (s8 *) priv->clip39_groups[i].clip_powers;
+ clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
/* divide factory saturation power by 2 to find -3dB level */
satur_pwr = (s8) (group->saturation_power >> 1);
@@ -2224,7 +2298,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
/* Get this chnlgrp's rate->max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* calculate power index *adjustment* value according to
* diff between current temperature and factory temperature */
@@ -2332,7 +2406,7 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
int txq_id = txq->q.id;
- struct iwl3945_shared *shared_data = priv->shared_virt;
+ struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
@@ -2385,6 +2459,30 @@ static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
return (u16)sizeof(struct iwl3945_addsta_cmd);
}
+static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int ret;
+
+ if (add) {
+ ret = iwl_add_bssid_station(priv, vif->bss_conf.bssid, false,
+ &vif_priv->ibss_bssid_sta_id);
+ if (ret)
+ return ret;
+
+ iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
+ (priv->band == IEEE80211_BAND_5GHZ) ?
+ IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
+ CMD_ASYNC);
+ iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
+
+ return 0;
+ }
+
+ return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
/**
* iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
@@ -2432,7 +2530,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
/* If an OFDM rate is used, have it fall back to the
* 1M CCK rates */
- if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
+ if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
iwl_is_associated(priv)) {
index = IWL_FIRST_CCK_RATE;
@@ -2471,12 +2569,12 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
memset((void *)&priv->hw_params, 0,
sizeof(struct iwl_hw_params));
- priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- &priv->shared_phys, GFP_KERNEL);
- if (!priv->shared_virt) {
+ priv->_3945.shared_virt =
+ dma_alloc_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ &priv->_3945.shared_phys, GFP_KERNEL);
+ if (!priv->_3945.shared_virt) {
IWL_ERR(priv, "failed to allocate pci memory\n");
- mutex_unlock(&priv->mutex);
return -ENOMEM;
}
@@ -2537,13 +2635,13 @@ void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
{
- INIT_DELAYED_WORK(&priv->thermal_periodic,
+ INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
iwl3945_bg_reg_txpower_periodic);
}
void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
{
- cancel_delayed_work(&priv->thermal_periodic);
+ cancel_delayed_work(&priv->_3945.thermal_periodic);
}
/* check contents of special bootstrap uCode SRAM */
@@ -2714,48 +2812,10 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
return 0;
}
-#define IWL3945_UCODE_GET(item) \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- return le32_to_cpu(ucode->u.v1.item); \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
- return UCODE_HEADER_SIZE(1);
-}
-static u32 iwl3945_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return 0;
-}
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return (u8 *) ucode->u.v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
static struct iwl_hcmd_ops iwl3945_hcmd = {
.rxon_assoc = iwl3945_send_rxon_assoc,
.commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_ucode_ops iwl3945_ucode = {
- .get_header_size = iwl3945_ucode_get_header_size,
- .get_build = iwl3945_ucode_get_build,
- .get_inst_size = iwl3945_ucode_get_inst_size,
- .get_data_size = iwl3945_ucode_get_data_size,
- .get_init_size = iwl3945_ucode_get_init_size,
- .get_init_data_size = iwl3945_ucode_get_init_data_size,
- .get_boot_size = iwl3945_ucode_get_boot_size,
- .get_data = iwl3945_ucode_get_data,
+ .send_bt_config = iwl_send_bt_config,
};
static struct iwl_lib_ops iwl3945_lib = {
@@ -2791,17 +2851,24 @@ static struct iwl_lib_ops iwl3945_lib = {
.post_associate = iwl3945_post_associate,
.isr = iwl_isr_legacy,
.config_ap = iwl3945_config_ap,
- .add_bcast_station = iwl3945_add_bcast_station,
+ .manage_ibss_station = iwl3945_manage_ibss_station,
+ .check_plcp_health = iwl3945_good_plcp_health,
+
+ .debugfs_ops = {
+ .rx_stats_read = iwl3945_ucode_rx_stats_read,
+ .tx_stats_read = iwl3945_ucode_tx_stats_read,
+ .general_stats_read = iwl3945_ucode_general_stats_read,
+ },
};
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.get_hcmd_size = iwl3945_get_hcmd_size,
.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
+ .request_scan = iwl3945_request_scan,
};
static const struct iwl_ops iwl3945_ops = {
- .ucode = &iwl3945_ucode,
.lib = &iwl3945_lib,
.hcmd = &iwl3945_hcmd,
.utils = &iwl3945_hcmd_utils,
@@ -2826,7 +2893,10 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2844,7 +2914,10 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
};
DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 452dfd5456c6..bb2aeebf3652 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -95,7 +95,6 @@ struct iwl3945_rs_sta {
u8 tgg;
u8 flush_pending;
u8 start_rate;
- u8 ibss_sta_added;
struct timer_list rate_scale_flush;
struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -107,7 +106,12 @@ struct iwl3945_rs_sta {
};
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and agn!
+ */
struct iwl3945_sta_priv {
+ struct iwl_station_priv_common common;
struct iwl3945_rs_sta rs_sta;
};
@@ -212,13 +216,6 @@ extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-/*
- * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
- * call this... todo... fix that.
-*/
-extern u8 iwl3945_sync_station(struct iwl_priv *priv, int sta_id,
- u16 tx_rate, u8 flags);
-
/******************************************************************************
*
* Functions implemented in iwl-[34]*.c which are forward declared here
@@ -265,10 +262,14 @@ extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwl3945_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
extern void iwl3945_disable_events(struct iwl_priv *priv);
extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
+extern void iwl3945_post_associate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
+extern void iwl3945_config_ap(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
/**
* iwl3945_hw_find_station - Find station id for a given BSSID
@@ -287,14 +288,15 @@ extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
- u16 tx_rate, u8 flags);
extern const struct iwl_channel_info *iwl3945_get_channel_info(
const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
+/* scanning */
+void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
/* Requires full declaration of iwl_priv before including */
#include "iwl-io.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 67ef562e8db1..cd4b61ae25b7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -81,26 +81,6 @@
*/
#define IWL49_FIRST_AMPDU_QUEUE 7
-/* Time constants */
-#define SHORT_SLOT_TIME 9
-#define LONG_SLOT_TIME 20
-
-/* RSSI to dBm */
-#define IWL49_RSSI_OFFSET 44
-
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
-#define IWL_NUM_SCAN_RATES (2)
-
-#define IWL_DEFAULT_TX_RETRY 15
-
-
/* Sizes and addresses for instruction and data memory (SRAM) in
* 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
@@ -393,10 +373,6 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
* location(s) in command (struct iwl4965_txpowertable_cmd).
*/
-/* Limit range of txpower output target to be between these values */
-#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */
-#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
-
/**
* When MIMO is used (2 transmitters operating simultaneously), driver should
* limit each transmitter to deliver a max of 3 dB below the regulatory limit
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 8972166386cb..d3afddae8d9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -46,6 +46,8 @@
#include "iwl-calib.h"
#include "iwl-sta.h"
#include "iwl-agn-led.h"
+#include "iwl-agn.h"
+#include "iwl-agn-debugfs.h"
static int iwl4965_send_tx_power(struct iwl_priv *priv);
static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -60,14 +62,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* module parameters */
-static struct iwl_mod_params iwl4965_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
/* check contents of special bootstrap uCode SRAM */
static int iwl4965_verify_bsm(struct iwl_priv *priv)
{
@@ -417,7 +411,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
sizeof(cmd), &cmd);
if (ret)
IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD \n");
+ "REPLY_PHY_CALIBRATION_CMD\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
@@ -502,14 +496,14 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
}
-static const u16 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
+static const s8 default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_HCCA_1,
- IWL_TX_FIFO_HCCA_2
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
};
static int iwl4965_alive_notify(struct iwl_priv *priv)
@@ -589,9 +583,15 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
+
iwl_txq_ctx_activate(priv, i);
+
+ if (ac == IWL_TX_FIFO_UNUSED)
+ continue;
+
iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
}
@@ -1613,19 +1613,19 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
/* get absolute value */
if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff);
+ IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
temp_diff = -temp_diff;
} else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Same temp, \n");
+ IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff);
+ IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
- IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n");
+ IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
return 0;
}
- IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n");
+ IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
return 1;
}
@@ -1874,7 +1874,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
+ iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
/* FIXME: code repetition end */
IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
@@ -1953,6 +1953,60 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
return 0;
}
+static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
+{
+ int i;
+ int start = 0;
+ int ret = IWL_INVALID_STATION;
+ unsigned long flags;
+
+ if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
+ (priv->iw_mode == NL80211_IFTYPE_AP))
+ start = IWL_STA_ID;
+
+ if (is_broadcast_ether_addr(addr))
+ return priv->hw_params.bcast_sta_id;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ for (i = start; i < priv->hw_params.max_stations; i++)
+ if (priv->stations[i].used &&
+ (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+ addr))) {
+ ret = i;
+ goto out;
+ }
+
+ IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
+ addr, priv->num_stations);
+
+ out:
+ /*
+ * It may be possible that more commands interacting with stations
+ * arrive before we completed processing the adding of
+ * station
+ */
+ if (ret != IWL_INVALID_STATION &&
+ (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
+ ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
+ (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
+ IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
+ ret);
+ ret = IWL_INVALID_STATION;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return ret;
+}
+
+static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+{
+ if (priv->iw_mode == NL80211_IFTYPE_STATION) {
+ return IWL_AP_ID;
+ } else {
+ u8 *da = ieee80211_get_DA(hdr);
+ return iwl_find_station(priv, da);
+ }
+}
+
/**
* iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
*/
@@ -2014,7 +2068,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc)
iwl_free_tfds_in_queue(priv, sta_id,
tid, freed);
@@ -2031,7 +2085,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
} else {
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv,
+ iwlagn_hwrate_to_tx_control(priv,
le32_to_cpu(tx_resp->rate_n_flags),
info);
@@ -2042,7 +2096,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
else if (sta_id == IWL_INVALID_STATION)
@@ -2053,10 +2107,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
iwl_wake_queue(priv, txq_id);
}
if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
- if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
+ iwl_check_abort_status(priv, tx_resp->frame_count, status);
}
static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2090,7 +2143,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
/* dBm = max_rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL49_RSSI_OFFSET;
+ return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
@@ -2098,7 +2151,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
{
/* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
+ priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
/* Tx response */
priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
}
@@ -2113,50 +2166,13 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->txpower_work);
}
-#define IWL4965_UCODE_GET(item) \
-static u32 iwl4965_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- return le32_to_cpu(ucode->u.v1.item); \
-}
-
-static u32 iwl4965_ucode_get_header_size(u32 api_ver)
-{
- return UCODE_HEADER_SIZE(1);
-}
-static u32 iwl4965_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return 0;
-}
-static u8 *iwl4965_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return (u8 *) ucode->u.v1.data;
-}
-
-IWL4965_UCODE_GET(inst_size);
-IWL4965_UCODE_GET(data_size);
-IWL4965_UCODE_GET(init_size);
-IWL4965_UCODE_GET(init_data_size);
-IWL4965_UCODE_GET(boot_size);
-
static struct iwl_hcmd_ops iwl4965_hcmd = {
.rxon_assoc = iwl4965_send_rxon_assoc,
.commit_rxon = iwl_commit_rxon,
.set_rxon_chain = iwl_set_rxon_chain,
+ .send_bt_config = iwl_send_bt_config,
};
-static struct iwl_ucode_ops iwl4965_ucode = {
- .get_header_size = iwl4965_ucode_get_header_size,
- .get_build = iwl4965_ucode_get_build,
- .get_inst_size = iwl4965_ucode_get_inst_size,
- .get_data_size = iwl4965_ucode_get_data_size,
- .get_init_size = iwl4965_ucode_get_init_size,
- .get_init_data_size = iwl4965_ucode_get_init_data_size,
- .get_boot_size = iwl4965_ucode_get_boot_size,
- .get_data = iwl4965_ucode_get_data,
-};
static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.get_hcmd_size = iwl4965_get_hcmd_size,
.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
@@ -2164,6 +2180,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.gain_computation = iwl4965_gain_computation,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
.calc_rssi = iwl4965_calc_rssi,
+ .request_scan = iwlagn_request_scan,
};
static struct iwl_lib_ops iwl4965_lib = {
@@ -2184,6 +2201,7 @@ static struct iwl_lib_ops iwl4965_lib = {
.load_ucode = iwl4965_load_bsm,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_fh = iwl_dump_fh,
.set_channel_switch = iwl4965_hw_channel_switch,
.apm_ops = {
.init = iwl_apm_init,
@@ -2216,11 +2234,16 @@ static struct iwl_lib_ops iwl4965_lib = {
.temperature = iwl4965_temperature_calib,
.set_ct_kill = iwl4965_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .check_plcp_health = iwl_good_plcp_health,
};
static const struct iwl_ops iwl4965_ops = {
- .ucode = &iwl4965_ucode,
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
.utils = &iwl4965_hcmd_utils,
@@ -2228,7 +2251,7 @@ static const struct iwl_ops iwl4965_ops = {
};
struct iwl_cfg iwl4965_agn_cfg = {
- .name = "4965AGN",
+ .name = "Intel(R) Wireless WiFi Link 4965AGN",
.fw_name_pre = IWL4965_FW_PRE,
.ucode_api_max = IWL4965_UCODE_API_MAX,
.ucode_api_min = IWL4965_UCODE_API_MIN,
@@ -2239,7 +2262,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.ops = &iwl4965_ops,
.num_of_queues = IWL49_NUM_QUEUES,
.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
- .mod_params = &iwl4965_mod_params,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
@@ -2251,27 +2274,20 @@ struct iwl_cfg iwl4965_agn_cfg = {
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .temperature_kelvin = true,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ /*
+ * Force use of chains B and C for scan RX on 5 GHz band
+ * because the device has off-channel reception on chain A.
+ */
+ .scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
};
/* Module firmware */
MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
-module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(
- disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
-
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-/* 11n */
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-
-module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 714e032f6217..146e6431ae95 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -68,25 +68,6 @@
#ifndef __iwl_5000_hw_h__
#define __iwl_5000_hw_h__
-#define IWL50_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
-
-#define IWL50_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
-
-#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \
- IWL50_RTC_INST_LOWER_BOUND)
-#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \
- IWL50_RTC_DATA_LOWER_BOUND)
-
-/* EEPROM */
-#define IWL_5000_EEPROM_IMG_SIZE 2048
-
-#define IWL50_CMD_FIFO_NUM 7
-#define IWL50_NUM_QUEUES 20
-#define IWL50_NUM_AMPDU_QUEUES 10
-#define IWL50_FIRST_AMPDU_QUEUE 10
-
/* 5150 only */
#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
@@ -103,19 +84,5 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
}
-/* Fixed (non-configurable) rx data from phy */
-
-/**
- * struct iwl5000_schedq_bc_tbl scheduler byte count table
- * base physical address of iwl5000_shared
- * is provided to SCD_DRAM_BASE_ADDR
- * @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
- */
-struct iwl5000_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __attribute__ ((packed));
-
-
#endif /* __iwl_5000_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e476acb53aa7..a28af7eb67eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -19,6 +19,7 @@
* file called LICENSE.
*
* Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
@@ -43,9 +44,11 @@
#include "iwl-io.h"
#include "iwl-sta.h"
#include "iwl-helpers.h"
+#include "iwl-agn.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-hw.h"
#include "iwl-5000-hw.h"
-#include "iwl-6000-hw.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 2
@@ -63,18 +66,8 @@
#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api)
-static const u16 iwl5000_default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
- IWL50_CMD_FIFO_NUM,
- IWL_TX_FIFO_HCCA_1,
- IWL_TX_FIFO_HCCA_2
-};
-
/* NIC configuration for 5000 series */
-void iwl5000_nic_config(struct iwl_priv *priv)
+static void iwl5000_nic_config(struct iwl_priv *priv)
{
unsigned long flags;
u16 radio_cfg;
@@ -107,162 +100,6 @@ void iwl5000_nic_config(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
}
-
-/*
- * EEPROM
- */
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
-{
- u16 offset = 0;
-
- if ((address & INDIRECT_ADDRESS) == 0)
- return address;
-
- switch (address & INDIRECT_TYPE_MSK) {
- case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
- break;
- case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
- break;
- case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
- break;
- case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
- break;
- case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
- break;
- case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
- break;
- default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
- address & INDIRECT_TYPE_MSK);
- break;
- }
-
- /* translate the offset from words to byte */
- return (address & ADDRESS_MSK) + (offset << 1);
-}
-
-u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
-{
- struct iwl_eeprom_calib_hdr {
- u8 version;
- u8 pa_type;
- u16 voltage;
- } *hdr;
-
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
- EEPROM_5000_CALIB_ALL);
- return hdr->version;
-
-}
-
-static void iwl5000_gain_computation(struct iwl_priv *priv,
- u32 average_noise[NUM_RX_CHAINS],
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
-{
- int i;
- s32 delta_g;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
- /*
- * Find Gain Code for the chains based on "default chain"
- */
- for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
- if ((data->disconn_array[i])) {
- data->delta_gain_code[i] = 0;
- continue;
- }
-
- delta_g = (priv->cfg->chain_noise_scale *
- ((s32)average_noise[default_chain] -
- (s32)average_noise[i])) / 1500;
-
- /* bound gain by 2 bits value max, 3rd bit is sign */
- data->delta_gain_code[i] =
- min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
- if (delta_g < 0)
- /*
- * set negative sign ...
- * note to Intel developers: This is uCode API format,
- * not the format of any internal device registers.
- * Do not change this format for e.g. 6050 or similar
- * devices. Change format only if more resolution
- * (i.e. more than 2 bits magnitude) is needed.
- */
- data->delta_gain_code[i] |= (1 << 2);
- }
-
- IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
- data->delta_gain_code[1], data->delta_gain_code[2]);
-
- if (!data->radio_write) {
- struct iwl_calib_chain_noise_gain_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- cmd.delta_gain_1 = data->delta_gain_code[1];
- cmd.delta_gain_2 = data->delta_gain_code[2];
- iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd, NULL);
-
- data->radio_write = 1;
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
- }
-
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
-}
-
-static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
-{
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
- int ret;
-
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
- struct iwl_calib_chain_noise_reset_cmd cmd;
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv,
- "Could not send REPLY_PHY_CALIBRATION_CMD\n");
- data->state = IWL_CHAIN_NOISE_ACCUMULATE;
- IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
- }
-}
-
-void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags)
-{
- if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
- else
- *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
-}
-
static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.min_nrg_cck = 95,
.max_nrg_cck = 0, /* not used, set to 0 */
@@ -314,14 +151,6 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.nrg_th_cca = 62,
};
-const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset)
-{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->eeprom_size);
- return &priv->eeprom[address];
-}
-
static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
@@ -337,356 +166,10 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
}
-/*
- * Calibration
- */
-static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
-{
- struct iwl_calib_xtal_freq_cmd cmd;
- __le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
- cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
- (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
-{
- struct iwl_calib_cfg_cmd calib_cfg_cmd;
- struct iwl_host_cmd cmd = {
- .id = CALIBRATION_CFG_CMD,
- .len = sizeof(struct iwl_calib_cfg_cmd),
- .data = &calib_cfg_cmd,
- };
-
- memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
- calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
-
- return iwl_send_cmd(priv, &cmd);
-}
-
-static void iwl5000_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
- int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- int index;
-
- /* reduce the size of the length field itself */
- len -= 4;
-
- /* Define the order in which the results will be sent to the runtime
- * uCode. iwl_send_calib_results sends them in a row according to their
- * index. We sort them here */
- switch (hdr->op_code) {
- case IWL_PHY_CALIBRATE_DC_CMD:
- index = IWL_CALIB_DC;
- break;
- case IWL_PHY_CALIBRATE_LO_CMD:
- index = IWL_CALIB_LO;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_CMD:
- index = IWL_CALIB_TX_IQ;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
- index = IWL_CALIB_TX_IQ_PERD;
- break;
- case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
- index = IWL_CALIB_BASE_BAND;
- break;
- default:
- IWL_ERR(priv, "Unknown calibration notification %d\n",
- hdr->op_code);
- return;
- }
- iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
-}
-
-static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
- queue_work(priv->workqueue, &priv->restart);
-}
-
-/*
- * ucode
- */
-static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
- struct fw_desc *image, u32 dst_addr)
-{
- dma_addr_t phy_addr = image->p_addr;
- u32 byte_cnt = image->len;
- int ret;
-
- priv->ucode_write_complete = 0;
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
-
- iwl_write_direct32(priv,
- FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
-
- iwl_write_direct32(priv,
- FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
- phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
-
- iwl_write_direct32(priv,
- FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
- (iwl_get_dma_hi_addr(phy_addr)
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
-
- IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the %s uCode section due "
- "to interrupt\n", name);
- return ret;
- }
- if (!ret) {
- IWL_ERR(priv, "Could not load the %s uCode section\n",
- name);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int iwl5000_load_given_ucode(struct iwl_priv *priv,
- struct fw_desc *inst_image,
- struct fw_desc *data_image)
-{
- int ret = 0;
-
- ret = iwl5000_load_section(priv, "INST", inst_image,
- IWL50_RTC_INST_LOWER_BOUND);
- if (ret)
- return ret;
-
- return iwl5000_load_section(priv, "DATA", data_image,
- IWL50_RTC_DATA_LOWER_BOUND);
-}
-
-int iwl5000_load_ucode(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* check whether init ucode should be loaded, or rather runtime ucode */
- if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
- IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
- ret = iwl5000_load_given_ucode(priv,
- &priv->ucode_init, &priv->ucode_init_data);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
- priv->ucode_type = UCODE_INIT;
- }
- } else {
- IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
- "Loading runtime ucode...\n");
- ret = iwl5000_load_given_ucode(priv,
- &priv->ucode_code, &priv->ucode_data);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
- priv->ucode_type = UCODE_RT;
- }
- }
-
- return ret;
-}
-
-void iwl5000_init_alive_start(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
- /* initialize uCode was loaded... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- iwl_clear_stations_table(priv);
- ret = priv->cfg->ops->lib->alive_notify(priv);
- if (ret) {
- IWL_WARN(priv,
- "Could not complete ALIVE transition: %d\n", ret);
- goto restart;
- }
-
- iwl5000_send_calib_cfg(priv);
- return;
-
-restart:
- /* real restart (first load init_ucode) */
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
- int txq_id, u32 index)
-{
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
- IWL50_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-int iwl5000_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_write_targ_mem(priv, a, 0);
-
- iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
- IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
- iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
- iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
- iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
- IWL_MASK(0, priv->hw_params.max_txq_num));
-
- /* Activate all Tx DMA/FIFO channels */
- priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
-
- iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* map qos queues to fifos one-to-one */
- for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
- int ac = iwl5000_default_queue_to_tx_fifo[i];
- iwl_txq_ctx_activate(priv, i);
- iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- /*
- * TODO - need to initialize these queues and map them to FIFOs
- * in the loop above, not only mark them as active. We do this
- * because we want the first aggregation queue to be queue #10,
- * but do not use 8 or 9 otherwise yet.
- */
- iwl_txq_ctx_activate(priv, 7);
- iwl_txq_ctx_activate(priv, 8);
- iwl_txq_ctx_activate(priv, 9);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
-
- iwl_send_wimax_coex(priv);
-
- iwl5000_set_Xtal_calib(priv);
- iwl_send_calib_results(priv);
-
- return 0;
-}
-
-int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
+static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -694,13 +177,13 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = 0;
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -717,571 +200,61 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_5150:
- priv->hw_params.sens = &iwl5150_sensitivity;
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_DC) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
-
- break;
- default:
- priv->hw_params.sens = &iwl5000_sensitivity;
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
- break;
- }
-
- return 0;
-}
-
-/**
- * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
- u8 sec_ctl = 0;
- u8 sta_id = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != IWL_CMD_QUEUE_NUM) {
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
- break;
- }
- }
-
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != IWL_CMD_QUEUE_NUM)
- sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
-}
-
-static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(priv,
- IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
-
- if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
- <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL50_FIRST_AMPDU_QUEUE,
- IWL50_FIRST_AMPDU_QUEUE +
- priv->cfg->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl5000_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
-
- /* enable aggregations for the queue */
- iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
-
- iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw_params.sens = &iwl5000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_TX_IQ_PERD) |
+ BIT(IWL_CALIB_BASE_BAND);
return 0;
}
-int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
+static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
- if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
- <= txq_id)) {
- IWL_ERR(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL50_FIRST_AMPDU_QUEUE,
- IWL50_FIRST_AMPDU_QUEUE +
- priv->cfg->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl5000_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
-}
-
-
-static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
-}
-
-static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl5000_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = &tx_resp->status;
- struct ieee80211_tx_info *info = NULL;
- struct ieee80211_hdr *hdr = NULL;
- u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- int i, sh, idx;
- u16 seq;
-
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = rate_n_flags;
- agg->bitmap = 0;
-
- /* # frames attempted by Tx command */
- if (agg->frame_count == 1) {
- /* Only one frame was attempted; no block-ack will arrive */
- status = le16_to_cpu(frame_status[0].status);
- idx = start_idx;
-
- /* FIXME: code repetition */
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
-
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
-
- /* FIXME: code repetition end */
-
- IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
- status & 0xff, tx_resp->failure_frame);
- IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc),
- hdr->seq_ctrl);
- return -1;
- }
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
- sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
- bitmap = bitmap << sh;
- sh = 0;
- start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
- sh = start - idx;
- start = idx;
- bitmap = bitmap << sh;
- sh = 0;
- }
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl5150_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_DC) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
- if (bitmap)
- agg->wait_for_ba = 1;
- }
return 0;
}
-static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le16_to_cpu(tx_resp->status.status);
- int tid;
- int sta_id;
- int freed;
-
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
- memset(&info->status, 0, sizeof(info->status));
-
- tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
- sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
-
- if (txq->sched_retry) {
- const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg = NULL;
-
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- scd_ssn , index, txq_id, txq->swq_id);
-
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
- if (agg->state == IWL_AGG_OFF)
- iwl_wake_queue(priv, txq_id);
- else
- iwl_wake_queue(priv, txq->swq_id);
- }
- }
- } else {
- BUG_ON(txq_id != txq->swq_id);
-
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv,
- le32_to_cpu(tx_resp->rate_n_flags),
- info);
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
- "0x%x retries %d\n",
- txq_id,
- iwl_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark))
- iwl_wake_queue(priv, txq_id);
- }
-
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
-
- if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
-}
-
-/* Currently 5000 is the superset of everything */
-u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
-{
- return len;
-}
-
-void iwl5000_setup_deferred_work(struct iwl_priv *priv)
-{
- /* in 5000 the tx power calibration is done in uCode */
- priv->disable_tx_power_cal = 1;
-}
-
-void iwl5000_rx_handler_setup(struct iwl_priv *priv)
-{
- /* init calibration handlers */
- priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
- iwl5000_rx_calib_result;
- priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
- iwl5000_rx_calib_complete;
- priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx;
-}
-
-
-int iwl5000_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL50_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL50_RTC_DATA_UPPER_BOUND);
-}
-
-static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
-{
- int ret = 0;
- struct iwl5000_rxon_assoc_cmd rxon_assoc;
- const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
- const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_ht_single_stream_basic_rates ==
- rxon2->ofdm_ht_single_stream_basic_rates) &&
- (rxon1->ofdm_ht_dual_stream_basic_rates ==
- rxon2->ofdm_ht_dual_stream_basic_rates) &&
- (rxon1->ofdm_ht_triple_stream_basic_rates ==
- rxon2->ofdm_ht_triple_stream_basic_rates) &&
- (rxon1->acquisition_data == rxon2->acquisition_data) &&
- (rxon1->rx_chain == rxon2->rx_chain) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = priv->staging_rxon.flags;
- rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
- rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
- rxon_assoc.reserved1 = 0;
- rxon_assoc.reserved2 = 0;
- rxon_assoc.reserved3 = 0;
- rxon_assoc.ofdm_ht_single_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
- rxon_assoc.ofdm_ht_dual_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
- rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
- rxon_assoc.ofdm_ht_triple_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
- rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
-
- ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
- sizeof(rxon_assoc), &rxon_assoc, NULL);
- if (ret)
- return ret;
-
- return ret;
-}
-int iwl5000_send_tx_power(struct iwl_priv *priv)
-{
- struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
- u8 tx_ant_cfg_cmd;
-
- /* half dBm need to multiply */
- tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
-
- if (priv->tx_power_lmt_in_half_dbm &&
- priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
- /*
- * For the newer devices which using enhanced/extend tx power
- * table in EEPROM, the format is in half dBm. driver need to
- * convert to dBm format before report to mac80211.
- * By doing so, there is a possibility of 1/2 dBm resolution
- * lost. driver will perform "round-up" operation before
- * reporting, but it will cause 1/2 dBm tx power over the
- * regulatory limit. Perform the checking here, if the
- * "tx_power_user_lmt" is higher than EEPROM value (in
- * half-dBm format), lower the tx power based on EEPROM
- */
- tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
- }
- tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
- tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
-
- if (IWL_UCODE_API(priv->ucode_ver) == 1)
- tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
- else
- tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
-
- return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
- sizeof(tx_power_cmd), &tx_power_cmd,
- NULL);
-}
-
-void iwl5000_temperature(struct iwl_priv *priv)
-{
- /* store temperature from statistics (in Celsius) */
- priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
- iwl_tt_handler(priv);
-}
-
static void iwl5150_temperature(struct iwl_priv *priv)
{
u32 vt = 0;
@@ -1294,100 +267,6 @@ static void iwl5150_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-/* Calc max signal level (dBm) among 3 possible receivers */
-int iwl5000_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host
- */
- struct iwl5000_non_cfg_phy *ncphy =
- (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
- u8 agc;
-
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
- agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info.
- */
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
- rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
- rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
- rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
-
- max_rssi = max_t(u32, rssi_a, rssi_b);
- max_rssi = max_t(u32, max_rssi, rssi_c);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- rssi_a, rssi_b, rssi_c, max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL49_RSSI_OFFSET;
-}
-
-static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
-{
- struct iwl_tx_ant_config_cmd tx_ant_cmd = {
- .valid = cpu_to_le32(valid_tx_ant),
- };
-
- if (IWL_UCODE_API(priv->ucode_ver) > 1) {
- IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
- sizeof(struct iwl_tx_ant_config_cmd),
- &tx_ant_cmd);
- } else {
- IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
-
-#define IWL5000_UCODE_GET(item) \
-static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- if (api_ver <= 2) \
- return le32_to_cpu(ucode->u.v1.item); \
- return le32_to_cpu(ucode->u.v2.item); \
-}
-
-static u32 iwl5000_ucode_get_header_size(u32 api_ver)
-{
- if (api_ver <= 2)
- return UCODE_HEADER_SIZE(1);
- return UCODE_HEADER_SIZE(2);
-}
-
-static u32 iwl5000_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- if (api_ver <= 2)
- return 0;
- return le32_to_cpu(ucode->u.v2.build);
-}
-
-static u8 *iwl5000_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- if (api_ver <= 2)
- return (u8 *) ucode->u.v1.data;
- return (u8 *) ucode->u.v2.data;
-}
-
-IWL5000_UCODE_GET(inst_size);
-IWL5000_UCODE_GET(data_size);
-IWL5000_UCODE_GET(init_size);
-IWL5000_UCODE_GET(init_data_size);
-IWL5000_UCODE_GET(boot_size);
-
static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
{
struct iwl5000_channel_switch_cmd cmd;
@@ -1420,54 +299,27 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
return iwl_send_cmd_sync(priv, &hcmd);
}
-struct iwl_hcmd_ops iwl5000_hcmd = {
- .rxon_assoc = iwl5000_send_rxon_assoc,
- .commit_rxon = iwl_commit_rxon,
- .set_rxon_chain = iwl_set_rxon_chain,
- .set_tx_ant = iwl5000_send_tx_ant_config,
-};
-
-struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
- .get_hcmd_size = iwl5000_get_hcmd_size,
- .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
- .gain_computation = iwl5000_gain_computation,
- .chain_noise_reset = iwl5000_chain_noise_reset,
- .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
- .calc_rssi = iwl5000_calc_rssi,
-};
-
-struct iwl_ucode_ops iwl5000_ucode = {
- .get_header_size = iwl5000_ucode_get_header_size,
- .get_build = iwl5000_ucode_get_build,
- .get_inst_size = iwl5000_ucode_get_inst_size,
- .get_data_size = iwl5000_ucode_get_data_size,
- .get_init_size = iwl5000_ucode_get_init_size,
- .get_init_data_size = iwl5000_ucode_get_init_data_size,
- .get_boot_size = iwl5000_ucode_get_boot_size,
- .get_data = iwl5000_ucode_get_data,
-};
-
-struct iwl_lib_ops iwl5000_lib = {
+static struct iwl_lib_ops iwl5000_lib = {
.set_hw_params = iwl5000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .load_ucode = iwl5000_load_ucode,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .load_ucode = iwlagn_load_ucode,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl5000_hw_channel_switch,
.apm_ops = {
@@ -1478,50 +330,58 @@ struct iwl_lib_ops iwl5000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl5000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static struct iwl_lib_ops iwl5150_lib = {
- .set_hw_params = iwl5000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .set_hw_params = iwl5150_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
- .load_ucode = iwl5000_load_ucode,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .load_ucode = iwlagn_load_ucode,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl5000_hw_channel_switch,
.apm_ops = {
@@ -1532,19 +392,19 @@ static struct iwl_lib_ops iwl5150_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
@@ -1553,45 +413,44 @@ static struct iwl_lib_ops iwl5150_lib = {
.temperature = iwl5150_temperature,
.set_ct_kill = iwl5150_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl5000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl5000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
static const struct iwl_ops iwl5150_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl5150_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
-struct iwl_mod_params iwl50_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
-
struct iwl_cfg iwl5300_agn_cfg = {
- .name = "5300AGN",
+ .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1603,21 +462,26 @@ struct iwl_cfg iwl5300_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_bgn_cfg = {
- .name = "5100BGN",
+ .name = "Intel(R) WiFi Link 5100 BGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1629,21 +493,26 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_abg_cfg = {
- .name = "5100ABG",
+ .name = "Intel(R) WiFi Link 5100 ABG",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1653,21 +522,26 @@ struct iwl_cfg iwl5100_abg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_agn_cfg = {
- .name = "5100AGN",
+ .name = "Intel(R) WiFi Link 5100 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1679,21 +553,26 @@ struct iwl_cfg iwl5100_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5350_agn_cfg = {
- .name = "5350AGN",
+ .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1705,21 +584,26 @@ struct iwl_cfg iwl5350_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5150_agn_cfg = {
- .name = "5150AGN",
+ .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
.fw_name_pre = IWL5150_FW_PRE,
.ucode_api_max = IWL5150_UCODE_API_MAX,
.ucode_api_min = IWL5150_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5150_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1731,21 +615,26 @@ struct iwl_cfg iwl5150_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5150_abg_cfg = {
- .name = "5150ABG",
+ .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
.fw_name_pre = IWL5150_FW_PRE,
.ucode_api_max = IWL5150_UCODE_API_MAX,
.ucode_api_min = IWL5150_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
.ops = &iwl5150_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1755,20 +644,12 @@ struct iwl_cfg iwl5150_abg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
-
-module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO);
-MODULE_PARM_DESC(swcrypto50,
- "using software crypto engine (default 0 [hardware])\n");
-module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
-module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
-module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
-module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 92b3e64fc14d..9fbf54cd3e1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -42,18 +42,22 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-sta.h"
+#include "iwl-agn.h"
#include "iwl-helpers.h"
-#include "iwl-5000-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-6000-hw.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 4
#define IWL6050_UCODE_API_MAX 4
+#define IWL6000G2_UCODE_API_MAX 4
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 4
#define IWL6000_FW_PRE "iwlwifi-6000-"
#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -63,6 +67,11 @@
#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
+#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
+#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
+#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+
+
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
@@ -136,7 +145,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -144,7 +153,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -168,24 +177,56 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
priv->hw_params.sens = &iwl6000_sensitivity;
- switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_6x50:
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_DC) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
-
- break;
- default:
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- break;
- }
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ return 0;
+}
+
+static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl6000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_DC) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
return 0;
}
@@ -225,25 +266,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
static struct iwl_lib_ops iwl6000_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
@@ -254,60 +295,67 @@ static struct iwl_lib_ops iwl6000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl6000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl6000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
static struct iwl_lib_ops iwl6050_lib = {
- .set_hw_params = iwl6000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .set_hw_params = iwl6050_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
@@ -318,45 +366,90 @@ static struct iwl_lib_ops iwl6050_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
.set_calib_version = iwl6050_set_calib_version,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl6050_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl6050_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
+
+struct iwl_cfg iwl6000g2a_2agn_cfg = {
+ .name = "6000 Series 2x2 AGN Gen2a",
+ .fw_name_pre = IWL6000G2A_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
+ .ops = &iwl6000_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .ht_greenfield_support = true,
+ .led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+};
+
/*
* "i": Internal configuration, use internal Power Amplifier
*/
struct iwl_cfg iwl6000i_2agn_cfg = {
- .name = "6000 Series 2x2 AGN",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -364,10 +457,10 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -385,10 +478,15 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
- .name = "6000 Series 2x2 ABG",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -396,10 +494,10 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -408,7 +506,6 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.pa_type = IWL_PA_INTERNAL,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -416,10 +513,15 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
- .name = "6000 Series 2x2 BG",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -427,10 +529,10 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -439,7 +541,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.pa_type = IWL_PA_INTERNAL,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -447,10 +548,15 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6050_2agn_cfg = {
- .name = "6050 Series 2x2 AGN",
+ .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -458,10 +564,10 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.ops = &iwl6050_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
@@ -479,10 +585,15 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6050_2abg_cfg = {
- .name = "6050 Series 2x2 ABG",
+ .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -490,10 +601,10 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.ops = &iwl6050_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
@@ -502,7 +613,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.pa_type = IWL_PA_SYSTEM,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -510,10 +620,15 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000_3agn_cfg = {
- .name = "6000 Series 3x3 AGN",
+ .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -521,10 +636,10 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
@@ -542,7 +657,13 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
new file mode 100644
index 000000000000..48c023b4ca36
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -0,0 +1,850 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+
+#include "iwl-agn-debugfs.h"
+
+ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+ {
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct statistics_rx_non_phy *general, *accum_general;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &priv->statistics.rx.ofdm;
+ cck = &priv->statistics.rx.cck;
+ general = &priv->statistics.rx.general;
+ ht = &priv->statistics.rx.ofdm_ht;
+ accum_ofdm = &priv->accum_statistics.rx.ofdm;
+ accum_cck = &priv->accum_statistics.rx.cck;
+ accum_general = &priv->accum_statistics.rx.general;
+ accum_ht = &priv->accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->delta_statistics.rx.ofdm;
+ delta_cck = &priv->delta_statistics.rx.cck;
+ delta_general = &priv->delta_statistics.rx.general;
+ delta_ht = &priv->delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->max_delta.rx.ofdm;
+ max_cck = &priv->max_delta.rx.cck;
+ max_general = &priv->max_delta.rx.general;
+ max_ht = &priv->max_delta.rx.ofdm_ht;
+
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err),
+ accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+ max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:", le32_to_cpu(ofdm->crc32_good),
+ accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+ max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout),
+ accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout),
+ accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt),
+ accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt),
+ accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
+ max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(cck->overrun_err),
+ accum_cck->overrun_err, delta_cck->overrun_err,
+ max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout),
+ accum_cck->sfd_timeout, delta_cck->sfd_timeout,
+ max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout),
+ accum_cck->fina_timeout, delta_cck->fina_timeout,
+ max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt),
+ accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt),
+ accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill),
+ accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err),
+ accum_cck->mh_format_err, delta_cck->mh_format_err,
+ max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
+ le32_to_cpu(general->bogus_cts),
+ accum_general->bogus_cts, delta_general->bogus_cts,
+ max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
+ le32_to_cpu(general->bogus_ack),
+ accum_general->bogus_ack, delta_general->bogus_ack,
+ max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err),
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good),
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt),
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
+ ssize_t ret;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &priv->statistics.tx;
+ accum_tx = &priv->accum_statistics.tx;
+ delta_tx = &priv->delta_statistics.tx;
+ max_tx = &priv->max_delta.tx;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "preamble:",
+ le32_to_cpu(tx->preamble_cnt),
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt),
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout),
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt),
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt),
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout),
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg underrun:",
+ le32_to_cpu(tx->agg.underrun),
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tx power: (1/2 dB step)\n");
+ if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna A: 0x%X\n",
+ tx->tx_power.ant_a);
+ if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna B: 0x%X\n",
+ tx->tx_power.ant_b);
+ if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna C: 0x%X\n",
+ tx->tx_power.ant_c);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
+ ssize_t ret;
+ struct statistics_general *general, *accum_general;
+ struct statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &priv->statistics.general;
+ dbg = &priv->statistics.general.dbg;
+ div = &priv->statistics.general.div;
+ accum_general = &priv->accum_statistics.general;
+ delta_general = &priv->delta_statistics.general;
+ max_general = &priv->max_delta.general;
+ accum_dbg = &priv->accum_statistics.general.dbg;
+ delta_dbg = &priv->delta_statistics.general.dbg;
+ max_dbg = &priv->max_delta.general.dbg;
+ accum_div = &priv->accum_statistics.general.div;
+ delta_div = &priv->delta_statistics.general.div;
+ max_div = &priv->max_delta.general.div;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
+ "temperature:",
+ le32_to_cpu(general->temperature));
+ pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
+ "temperature_m:",
+ le32_to_cpu(general->temperature_m));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_check:",
+ le32_to_cpu(dbg->burst_check),
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_count:",
+ le32_to_cpu(dbg->burst_count),
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_out:",
+ le32_to_cpu(general->slots_out),
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
+ pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
new file mode 100644
index 000000000000..59b1f25f0d85
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+#else
+static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
new file mode 100644
index 000000000000..44ef5d93befc
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -0,0 +1,276 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn.h"
+
+static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
+{
+ int ret = 0;
+ struct iwl5000_rxon_assoc_cmd rxon_assoc;
+ const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
+ const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
+
+ if ((rxon1->flags == rxon2->flags) &&
+ (rxon1->filter_flags == rxon2->filter_flags) &&
+ (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
+ (rxon1->ofdm_ht_single_stream_basic_rates ==
+ rxon2->ofdm_ht_single_stream_basic_rates) &&
+ (rxon1->ofdm_ht_dual_stream_basic_rates ==
+ rxon2->ofdm_ht_dual_stream_basic_rates) &&
+ (rxon1->ofdm_ht_triple_stream_basic_rates ==
+ rxon2->ofdm_ht_triple_stream_basic_rates) &&
+ (rxon1->acquisition_data == rxon2->acquisition_data) &&
+ (rxon1->rx_chain == rxon2->rx_chain) &&
+ (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
+ IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = priv->staging_rxon.flags;
+ rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
+ rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
+ rxon_assoc.reserved1 = 0;
+ rxon_assoc.reserved2 = 0;
+ rxon_assoc.reserved3 = 0;
+ rxon_assoc.ofdm_ht_single_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
+ rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
+ rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
+ rxon_assoc.ofdm_ht_triple_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
+ rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
+
+ ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
+ sizeof(rxon_assoc), &rxon_assoc, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
+{
+ struct iwl_tx_ant_config_cmd tx_ant_cmd = {
+ .valid = cpu_to_le32(valid_tx_ant),
+ };
+
+ if (IWL_UCODE_API(priv->ucode_ver) > 1) {
+ IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
+ return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
+ sizeof(struct iwl_tx_ant_config_cmd),
+ &tx_ant_cmd);
+ } else {
+ IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Currently this is the superset of everything */
+static u16 iwlagn_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ return len;
+}
+
+static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+{
+ u16 size = (u16)sizeof(struct iwl_addsta_cmd);
+ struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
+ memcpy(addsta, cmd, size);
+ /* resrved in 5000 */
+ addsta->rate_n_flags = cpu_to_le16(0);
+ return size;
+}
+
+static void iwlagn_gain_computation(struct iwl_priv *priv,
+ u32 average_noise[NUM_RX_CHAINS],
+ u16 min_average_noise_antenna_i,
+ u32 min_average_noise,
+ u8 default_chain)
+{
+ int i;
+ s32 delta_g;
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+ /*
+ * Find Gain Code for the chains based on "default chain"
+ */
+ for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
+ if ((data->disconn_array[i])) {
+ data->delta_gain_code[i] = 0;
+ continue;
+ }
+
+ delta_g = (priv->cfg->chain_noise_scale *
+ ((s32)average_noise[default_chain] -
+ (s32)average_noise[i])) / 1500;
+
+ /* bound gain by 2 bits value max, 3rd bit is sign */
+ data->delta_gain_code[i] =
+ min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+ if (delta_g < 0)
+ /*
+ * set negative sign ...
+ * note to Intel developers: This is uCode API format,
+ * not the format of any internal device registers.
+ * Do not change this format for e.g. 6050 or similar
+ * devices. Change format only if more resolution
+ * (i.e. more than 2 bits magnitude) is needed.
+ */
+ data->delta_gain_code[i] |= (1 << 2);
+ }
+
+ IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
+ data->delta_gain_code[1], data->delta_gain_code[2]);
+
+ if (!data->radio_write) {
+ struct iwl_calib_chain_noise_gain_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ cmd.delta_gain_1 = data->delta_gain_code[1];
+ cmd.delta_gain_2 = data->delta_gain_code[2];
+ iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
+ sizeof(cmd), &cmd, NULL);
+
+ data->radio_write = 1;
+ data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ }
+
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+}
+
+static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+{
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+ int ret;
+
+ if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
+ struct iwl_calib_chain_noise_reset_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(priv,
+ "Could not send REPLY_PHY_CALIBRATION_CMD\n");
+ data->state = IWL_CHAIN_NOISE_ACCUMULATE;
+ IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
+ }
+}
+
+static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+ __le32 *tx_flags)
+{
+ if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
+ (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+ *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
+ else
+ *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
+}
+
+/* Calc max signal level (dBm) among 3 possible receivers */
+static int iwlagn_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host
+ */
+ struct iwl5000_non_cfg_phy *ncphy =
+ (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
+ u8 agc;
+
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
+ agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info.
+ */
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
+ rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
+ rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
+ rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
+
+ max_rssi = max_t(u32, rssi_a, rssi_b);
+ max_rssi = max_t(u32, max_rssi, rssi_c);
+
+ IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ rssi_a, rssi_b, rssi_c, max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+}
+
+struct iwl_hcmd_ops iwlagn_hcmd = {
+ .rxon_assoc = iwlagn_send_rxon_assoc,
+ .commit_rxon = iwl_commit_rxon,
+ .set_rxon_chain = iwl_set_rxon_chain,
+ .set_tx_ant = iwlagn_send_tx_ant_config,
+ .send_bt_config = iwl_send_bt_config,
+};
+
+struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
+ .get_hcmd_size = iwlagn_get_hcmd_size,
+ .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
+ .gain_computation = iwlagn_gain_computation,
+ .chain_noise_reset = iwlagn_chain_noise_reset,
+ .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
+ .calc_rssi = iwlagn_calc_rssi,
+ .request_scan = iwlagn_request_scan,
+};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
new file mode 100644
index 000000000000..f9a3fbb6338f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -0,0 +1,118 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-agn-hw.h) only for hardware-related definitions.
+ */
+
+#ifndef __iwl_agn_hw_h__
+#define __iwl_agn_hw_h__
+
+#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000)
+#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000)
+
+#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000)
+#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000)
+
+#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \
+ IWLAGN_RTC_INST_LOWER_BOUND)
+#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
+ IWLAGN_RTC_DATA_LOWER_BOUND)
+
+/* RSSI to dBm */
+#define IWLAGN_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IWLAGN_DEFAULT_TX_RETRY 15
+
+/* Limit range of txpower output target to be between these values */
+#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
+#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
+
+/* EEPROM */
+#define IWLAGN_EEPROM_IMG_SIZE 2048
+
+#define IWLAGN_CMD_FIFO_NUM 7
+#define IWLAGN_NUM_QUEUES 20
+#define IWLAGN_NUM_AMPDU_QUEUES 10
+#define IWLAGN_FIRST_AMPDU_QUEUE 10
+
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ */
+struct iwlagn_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+} __attribute__ ((packed));
+
+
+#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
new file mode 100644
index 000000000000..c92b2c0cbd91
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -0,0 +1,308 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-agn.h"
+#include "iwl-helpers.h"
+
+#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* Free dram table */
+void iwl_free_isr_ict(struct iwl_priv *priv)
+{
+ if (priv->_agn.ict_tbl_vir) {
+ dma_free_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ priv->_agn.ict_tbl_vir,
+ priv->_agn.ict_tbl_dma);
+ priv->_agn.ict_tbl_vir = NULL;
+ }
+}
+
+
+/* allocate dram shared table it is a PAGE_SIZE aligned
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_alloc_isr_ict(struct iwl_priv *priv)
+{
+
+ if (priv->cfg->use_isr_legacy)
+ return 0;
+ /* allocate shrared data table */
+ priv->_agn.ict_tbl_vir =
+ dma_alloc_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &priv->_agn.ict_tbl_dma, GFP_KERNEL);
+ if (!priv->_agn.ict_tbl_vir)
+ return -ENOMEM;
+
+ /* align table to PAGE_SIZE boundry */
+ priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
+
+ IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
+ (unsigned long long)priv->_agn.ict_tbl_dma,
+ (unsigned long long)priv->_agn.aligned_ict_tbl_dma,
+ (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
+
+ priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir +
+ (priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma);
+
+ IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
+ priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir,
+ (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
+
+ /* reset table and index to all 0 */
+ memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ priv->_agn.ict_index = 0;
+
+ /* add periodic RX interrupt */
+ priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
+ return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+int iwl_reset_ict(struct iwl_priv *priv)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (!priv->_agn.ict_tbl_vir)
+ return 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_disable_interrupts(priv);
+
+ memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+
+ val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+ IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
+ "aligned dma address %Lx\n",
+ val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma);
+
+ iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
+ priv->_agn.use_ict = true;
+ priv->_agn.ict_index = 0;
+ iwl_write32(priv, CSR_INT, priv->inta_mask);
+ iwl_enable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_disable_ict(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->_agn.use_ict = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static irqreturn_t iwl_isr(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_fh;
+#endif
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(priv, CSR_INT);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
+ "fh 0x%08x\n", inta, inta_mask, inta_fh);
+ }
+#endif
+
+ priv->_agn.inta |= inta;
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ unplugged:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if diabled by irq and no schedules tasklet. */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+irqreturn_t iwl_isr_ict(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ u32 val = 0;
+ unsigned long flags;
+
+ if (!priv)
+ return IRQ_NONE;
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (!priv->_agn.use_ict)
+ return iwl_isr(irq, data);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ /* read all entries that not 0 start with ict_index */
+ while (priv->_agn.ict_tbl[priv->_agn.ict_index]) {
+
+ val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]);
+ IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
+ priv->_agn.ict_index,
+ le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]));
+ priv->_agn.ict_tbl[priv->_agn.ict_index] = 0;
+ priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index,
+ ICT_COUNT);
+
+ }
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
+ inta, inta_mask, val);
+
+ inta &= priv->inta_mask;
+ priv->_agn.inta |= inta;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) {
+ /* Allow interrupt if was disabled by this handler and
+ * no tasklet was schedules, We should not enable interrupt,
+ * tasklet will enable it.
+ */
+ iwl_enable_interrupts(priv);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service.
+ * only Re-enable if disabled by irq.
+ */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
new file mode 100644
index 000000000000..637d7b62fb56
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -0,0 +1,1530 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+#include "iwl-sta.h"
+
+static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & MAX_SN;
+}
+
+static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
+ struct iwl_ht_agg *agg,
+ struct iwl5000_tx_resp *tx_resp,
+ int txq_id, u16 start_idx)
+{
+ u16 status;
+ struct agg_tx_status *frame_status = &tx_resp->status;
+ struct ieee80211_tx_info *info = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ int i, sh, idx;
+ u16 seq;
+
+ if (agg->wait_for_ba)
+ IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
+
+ agg->frame_count = tx_resp->frame_count;
+ agg->start_idx = start_idx;
+ agg->rate_n_flags = rate_n_flags;
+ agg->bitmap = 0;
+
+ /* # frames attempted by Tx command */
+ if (agg->frame_count == 1) {
+ /* Only one frame was attempted; no block-ack will arrive */
+ status = le16_to_cpu(frame_status[0].status);
+ idx = start_idx;
+
+ /* FIXME: code repetition */
+ IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
+ agg->frame_count, agg->start_idx, idx);
+
+ info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= iwl_tx_status_to_mac80211(status);
+ iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
+
+ /* FIXME: code repetition end */
+
+ IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
+ status & 0xff, tx_resp->failure_frame);
+ IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+ agg->wait_for_ba = 0;
+ } else {
+ /* Two or more frames were attempted; expect block-ack */
+ u64 bitmap = 0;
+ int start = agg->start_idx;
+
+ /* Construct bit-map of pending frames within Tx window */
+ for (i = 0; i < agg->frame_count; i++) {
+ u16 sc;
+ status = le16_to_cpu(frame_status[i].status);
+ seq = le16_to_cpu(frame_status[i].sequence);
+ idx = SEQ_TO_INDEX(seq);
+ txq_id = SEQ_TO_QUEUE(seq);
+
+ if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
+
+ IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
+ agg->frame_count, txq_id, idx);
+
+ hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ if (!hdr) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
+
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't match seq control"
+ " idx=%d, seq_idx=%d, seq=%d\n",
+ idx, SEQ_TO_SN(sc),
+ hdr->seq_ctrl);
+ return -1;
+ }
+
+ IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
+ i, idx, SEQ_TO_SN(sc));
+
+ sh = idx - start;
+ if (sh > 64) {
+ sh = (start - idx) + 0xff;
+ bitmap = bitmap << sh;
+ sh = 0;
+ start = idx;
+ } else if (sh < -64)
+ sh = 0xff - (start - idx);
+ else if (sh < 0) {
+ sh = start - idx;
+ start = idx;
+ bitmap = bitmap << sh;
+ sh = 0;
+ }
+ bitmap |= 1ULL << sh;
+ IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
+ start, (unsigned long long)bitmap);
+ }
+
+ agg->bitmap = bitmap;
+ agg->start_idx = start;
+ IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
+ agg->frame_count, agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ if (bitmap)
+ agg->wait_for_ba = 1;
+ }
+ return 0;
+}
+
+void iwl_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n");
+ }
+}
+
+static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct ieee80211_tx_info *info;
+ struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ int tid;
+ int sta_id;
+ int freed;
+
+ if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
+ "is out of range [0-%d] %d %d\n", txq_id,
+ index, txq->q.n_bd, txq->q.write_ptr,
+ txq->q.read_ptr);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+ memset(&info->status, 0, sizeof(info->status));
+
+ tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
+ sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
+
+ if (txq->sched_retry) {
+ const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
+ struct iwl_ht_agg *agg = NULL;
+
+ agg = &priv->stations[sta_id].tid[tid].agg;
+
+ iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
+
+ /* check if BAR is needed */
+ if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+ index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
+ "scd_ssn=%d idx=%d txq=%d swq=%d\n",
+ scd_ssn , index, txq_id, txq->swq_id);
+
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if (priv->mac80211_registered &&
+ (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
+ if (agg->state == IWL_AGG_OFF)
+ iwl_wake_queue(priv, txq_id);
+ else
+ iwl_wake_queue(priv, txq->swq_id);
+ }
+ }
+ } else {
+ BUG_ON(txq_id != txq->swq_id);
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags |= iwl_tx_status_to_mac80211(status);
+ iwlagn_hwrate_to_tx_control(priv,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+
+ IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
+ "0x%x retries %d\n",
+ txq_id,
+ iwl_get_tx_fail_reason(status), status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if (priv->mac80211_registered &&
+ (iwl_queue_space(&txq->q) > txq->q.low_mark))
+ iwl_wake_queue(priv, txq_id);
+ }
+
+ iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
+
+ iwl_check_abort_status(priv, tx_resp->frame_count, status);
+}
+
+void iwlagn_rx_handler_setup(struct iwl_priv *priv)
+{
+ /* init calibration handlers */
+ priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
+ iwlagn_rx_calib_result;
+ priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
+ iwlagn_rx_calib_complete;
+ priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+}
+
+void iwlagn_setup_deferred_work(struct iwl_priv *priv)
+{
+ /* in agn, the tx power calibration is done in uCode */
+ priv->disable_tx_power_cal = 1;
+}
+
+int iwlagn_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
+ (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
+}
+
+int iwlagn_send_tx_power(struct iwl_priv *priv)
+{
+ struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
+ u8 tx_ant_cfg_cmd;
+
+ /* half dBm need to multiply */
+ tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
+
+ if (priv->tx_power_lmt_in_half_dbm &&
+ priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
+ /*
+ * For the newer devices which using enhanced/extend tx power
+ * table in EEPROM, the format is in half dBm. driver need to
+ * convert to dBm format before report to mac80211.
+ * By doing so, there is a possibility of 1/2 dBm resolution
+ * lost. driver will perform "round-up" operation before
+ * reporting, but it will cause 1/2 dBm tx power over the
+ * regulatory limit. Perform the checking here, if the
+ * "tx_power_user_lmt" is higher than EEPROM value (in
+ * half-dBm format), lower the tx power based on EEPROM
+ */
+ tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
+ }
+ tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
+ tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
+
+ if (IWL_UCODE_API(priv->ucode_ver) == 1)
+ tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
+ else
+ tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
+
+ return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
+ sizeof(tx_power_cmd), &tx_power_cmd,
+ NULL);
+}
+
+void iwlagn_temperature(struct iwl_priv *priv)
+{
+ /* store temperature from statistics (in Celsius) */
+ priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
+ iwl_tt_handler(priv);
+}
+
+u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+{
+ struct iwl_eeprom_calib_hdr {
+ u8 version;
+ u8 pa_type;
+ u16 voltage;
+ } *hdr;
+
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ EEPROM_CALIB_ALL);
+ return hdr->version;
+
+}
+
+/*
+ * EEPROM
+ */
+static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+{
+ u16 offset = 0;
+
+ if ((address & INDIRECT_ADDRESS) == 0)
+ return address;
+
+ switch (address & INDIRECT_TYPE_MSK) {
+ case INDIRECT_HOST:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+ break;
+ case INDIRECT_GENERAL:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+ break;
+ case INDIRECT_REGULATORY:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+ break;
+ case INDIRECT_CALIBRATION:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+ break;
+ case INDIRECT_PROCESS_ADJST:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+ break;
+ case INDIRECT_OTHERS:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+ break;
+ default:
+ IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+ address & INDIRECT_TYPE_MSK);
+ break;
+ }
+
+ /* translate the offset from words to byte */
+ return (address & ADDRESS_MSK) + (offset << 1);
+}
+
+const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
+ size_t offset)
+{
+ u32 address = eeprom_indirect_address(priv, offset);
+ BUG_ON(address >= priv->cfg->eeprom_size);
+ return &priv->eeprom[address];
+}
+
+struct iwl_mod_params iwlagn_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
+
+ if (!priv->cfg->use_isr_legacy)
+ rb_timeout = RX_RB_TIMEOUT;
+
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->dma_addr >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+int iwlagn_hw_nic_init(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->cfg->ops->lib->apm_ops.init(priv);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
+
+ priv->cfg->ops->lib->apm_ops.config(priv);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = iwl_rx_queue_alloc(priv);
+ if (ret) {
+ IWL_ERR(priv, "Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ iwlagn_rx_queue_reset(priv, rxq);
+
+ iwlagn_rx_replenish(priv);
+
+ iwlagn_rx_init(priv, rxq);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rxq->need_update = 1;
+ iwl_rx_queue_update_write_ptr(priv, rxq);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!priv->txq) {
+ ret = iwlagn_txq_ctx_alloc(priv);
+ if (ret)
+ return ret;
+ } else
+ iwlagn_txq_ctx_reset(priv);
+
+ set_bit(STATUS_INIT, &priv->status);
+
+ return 0;
+}
+
+/**
+ * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
+ dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void iwlagn_rx_queue_restock(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
+ rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_rx_queue_update_write_ptr(priv, rxq);
+ }
+}
+
+/**
+ * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (priv->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+ "order: %d\n",
+ priv->hw_params.rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, priv->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ priv->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void iwlagn_rx_replenish(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ iwlagn_rx_allocate(priv, GFP_KERNEL);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwlagn_rx_queue_restock(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void iwlagn_rx_replenish_now(struct iwl_priv *priv)
+{
+ iwlagn_rx_allocate(priv, GFP_ATOMIC);
+
+ iwlagn_rx_queue_restock(priv);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int iwlagn_rxq_stop(struct iwl_priv *priv)
+{
+
+ /* stop Rx DMA */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+/* Calc max signal level (dBm) among 3 possible receivers */
+static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+/**
+ * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions
+ *
+ * You may hack this function to show different aspects of received frames,
+ * including selective frame dumps.
+ * group100 parameter selects whether to show 1 out of 100 good data frames.
+ * All beacon and probe response frames are printed.
+ */
+static void iwlagn_dbg_report_frame(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *phy_res, u16 length,
+ struct ieee80211_hdr *header, int group100)
+{
+ u32 to_us;
+ u32 print_summary = 0;
+ u32 print_dump = 0; /* set to 1 to dump all frames' contents */
+ u32 hundred = 0;
+ u32 dataframe = 0;
+ __le16 fc;
+ u16 seq_ctl;
+ u16 channel;
+ u16 phy_flags;
+ u32 rate_n_flags;
+ u32 tsf_low;
+ int rssi;
+
+ if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
+ return;
+
+ /* MAC header */
+ fc = header->frame_control;
+ seq_ctl = le16_to_cpu(header->seq_ctrl);
+
+ /* metadata */
+ channel = le16_to_cpu(phy_res->channel);
+ phy_flags = le16_to_cpu(phy_res->phy_flags);
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* signal statistics */
+ rssi = iwlagn_calc_rssi(priv, phy_res);
+ tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
+
+ to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
+
+ /* if data frame is to us and all is good,
+ * (optionally) print summary for only 1 out of every 100 */
+ if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
+ cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
+ dataframe = 1;
+ if (!group100)
+ print_summary = 1; /* print each frame */
+ else if (priv->framecnt_to_us < 100) {
+ priv->framecnt_to_us++;
+ print_summary = 0;
+ } else {
+ priv->framecnt_to_us = 0;
+ print_summary = 1;
+ hundred = 1;
+ }
+ } else {
+ /* print summary for all other frames */
+ print_summary = 1;
+ }
+
+ if (print_summary) {
+ char *title;
+ int rate_idx;
+ u32 bitrate;
+
+ if (hundred)
+ title = "100Frames";
+ else if (ieee80211_has_retry(fc))
+ title = "Retry";
+ else if (ieee80211_is_assoc_resp(fc))
+ title = "AscRsp";
+ else if (ieee80211_is_reassoc_resp(fc))
+ title = "RasRsp";
+ else if (ieee80211_is_probe_resp(fc)) {
+ title = "PrbRsp";
+ print_dump = 1; /* dump frame contents */
+ } else if (ieee80211_is_beacon(fc)) {
+ title = "Beacon";
+ print_dump = 1; /* dump frame contents */
+ } else if (ieee80211_is_atim(fc))
+ title = "ATIM";
+ else if (ieee80211_is_auth(fc))
+ title = "Auth";
+ else if (ieee80211_is_deauth(fc))
+ title = "DeAuth";
+ else if (ieee80211_is_disassoc(fc))
+ title = "DisAssoc";
+ else
+ title = "Frame";
+
+ rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+ if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
+ bitrate = 0;
+ WARN_ON_ONCE(1);
+ } else {
+ bitrate = iwl_rates[rate_idx].ieee / 2;
+ }
+
+ /* print frame summary.
+ * MAC addresses show just the last byte (for brevity),
+ * but you can hack it to show more, if you'd like to. */
+ if (dataframe)
+ IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
+ "len=%u, rssi=%d, chnl=%d, rate=%u,\n",
+ title, le16_to_cpu(fc), header->addr1[5],
+ length, rssi, channel, bitrate);
+ else {
+ /* src/dst addresses assume managed mode */
+ IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
+ "len=%u, rssi=%d, tim=%lu usec, "
+ "phy=0x%02x, chnl=%d\n",
+ title, le16_to_cpu(fc), header->addr1[5],
+ header->addr3[5], length, rssi,
+ tsf_low - priv->scan_start_tsf,
+ phy_flags, channel);
+ }
+ }
+ if (print_dump)
+ iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
+}
+#endif
+
+static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ };
+
+ IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
+ decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_mem_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!priv->is_open)) {
+ IWL_DEBUG_DROP_LIMIT(priv,
+ "Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!priv->cfg->mod_params->sw_crypto &&
+ iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ iwl_update_stats(priv, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(priv->hw, skb);
+ priv->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct iwl4965_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+ * REPLY_RX: physical layer info is in this buffer
+ * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+ * command and cached in priv->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == REPLY_RX) {
+ phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ + phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!priv->_agn.last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &priv->_agn.last_phy_res;
+ amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status = iwlagn_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.rate_idx =
+ iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_TSFT;*/
+
+ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ /* Set "1" to report good data frames in groups of 100 */
+ if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
+ iwlagn_dbg_report_frame(priv, phy_res, len, header, 1);
+#endif
+ iwl_dbg_log_rx_data_frame(priv, len, header);
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+ rx_status.signal, (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+ rxb, &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ priv->_agn.last_phy_res_valid = true;
+ memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
+ sizeof(struct iwl_rx_phy_res));
+}
+
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int i, added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ /* only scan single channel, good enough to reset the RF */
+ /* pick the first valid not in-use channel */
+ if (band == IEEE80211_BAND_5GHZ) {
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < 14; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel =
+ priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ }
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
+static int iwl_get_channels_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct iwl_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+ chan = priv->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = ieee80211_frequency_to_channel(chan->center_freq);
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = iwl_get_channel_info(priv, band, channel);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+ channel, le32_to_cpu(scan_ch->type),
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ "ACTIVE" : "PASSIVE",
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+ return added;
+}
+
+void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_CMD,
+ .len = sizeof(struct iwl_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct iwl_scan_cmd *scan;
+ struct ieee80211_conf *conf = NULL;
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = priv->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+
+ conf = ieee80211_get_hw_conf(priv->hw);
+
+ cancel_delayed_work(&priv->scan_check);
+
+ if (!iwl_is_ready(priv)) {
+ IWL_WARN(priv, "request scan called when driver not ready.\n");
+ goto done;
+ }
+
+ /* Make sure the scan wasn't canceled before this queued work
+ * was given the chance to run... */
+ if (!test_bit(STATUS_SCANNING, &priv->status))
+ goto done;
+
+ /* This should never be called or scheduled if there is currently
+ * a scan active in the hardware. */
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
+ "Ignoring second request.\n");
+ goto done;
+ }
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
+ goto done;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
+ goto done;
+ }
+
+ if (iwl_is_rfkill(priv)) {
+ IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
+ goto done;
+ }
+
+ if (!test_bit(STATUS_READY, &priv->status)) {
+ IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
+ goto done;
+ }
+
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv,
+ "fail to allocate memory for scan\n");
+ goto done;
+ }
+ }
+ scan = priv->scan_cmd;
+ memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+ if (iwl_is_associated(priv)) {
+ u16 interval = 0;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+ unsigned long flags;
+
+ IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+ spin_lock_irqsave(&priv->lock, flags);
+ interval = vif ? vif->bss_conf.beacon_int : 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time = (extra |
+ ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = IWL_RATE_6M_PLCP;
+ } else {
+ rate = IWL_RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = IWL_RATE_6M_PLCP;
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+ * here instead of IWL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_NEVER;
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band count\n");
+ goto done;
+ }
+
+ band = priv->scan_band;
+
+ if (priv->cfg->scan_antennas[band])
+ rx_ant = priv->cfg->scan_antennas[band];
+
+ priv->scan_tx_ant[band] =
+ iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
+ rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
+ scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains = rx_ant &
+ ((u8)(priv->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+ priv->chain_noise_data.active_chains);
+
+ rx_ant = first_antenna(active_chains);
+ }
+ /* MIMO is not used here, but value is required */
+ rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+
+ }
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+ RXON_FILTER_BCON_AWARE_MSK);
+
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, vif, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, vif, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+ goto done;
+ }
+
+ cmd.len += le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct iwl_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(STATUS_SCAN_HW, &priv->status);
+ if (iwl_send_cmd_sync(priv, &cmd))
+ goto done;
+
+ queue_delayed_work(priv->workqueue, &priv->scan_check,
+ IWL_SCAN_CHECK_WATCHDOG);
+
+ return;
+
+ done:
+ /* Cannot perform scan. Make sure we clear scanning
+ * bits from status so next scan request can be performed.
+ * If we don't clear scanning status bit here all next scan
+ * will fail
+ */
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
+ /* inform mac80211 scan aborted */
+ queue_work(priv->workqueue, &priv->scan_completed);
+}
+
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true,
+ &vif_priv->ibss_bssid_sta_id);
+ return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 1460116d329f..bfcac5608d87 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -295,11 +295,11 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
return tl->total;
}
-static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
+static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
- int ret;
+ int ret = -EAGAIN;
if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
@@ -313,29 +313,29 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
*/
IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
tid);
- ret = ieee80211_stop_tx_ba_session(sta, tid,
+ ieee80211_stop_tx_ba_session(sta, tid,
WLAN_BACK_INITIATOR);
}
- }
+ } else
+ IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
+ return ret;
}
static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
- if ((tid < TID_MAX_LOAD_COUNT))
- rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- else if (tid == IWL_AGG_ALL_TID)
- for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
- rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- if (priv->cfg->use_rts_for_ht) {
- /*
- * switch to RTS/CTS if it is the prefer protection method
- * for HT traffic
- */
- IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
- priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
- iwlcore_commit_rxon(priv);
+ if ((tid < TID_MAX_LOAD_COUNT) &&
+ !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
+ if (priv->cfg->use_rts_for_ht) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+ IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
+ priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
+ iwlcore_commit_rxon(priv);
+ }
}
}
@@ -611,10 +611,6 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
struct ieee80211_hdr *hdr,
enum iwl_table_type rate_type)
{
- if (hdr && is_multicast_ether_addr(hdr->addr1) &&
- lq_sta->active_rate_basic)
- return lq_sta->active_rate_basic;
-
if (is_legacy(rate_type)) {
return lq_sta->active_legacy_rate;
} else {
@@ -775,6 +771,15 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
@@ -784,10 +789,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !lq_sta->ibss_sta_added)
- return;
-
/*
* Ignore this Tx frame response if its initial rate doesn't match
* that of latest Link Quality command. There may be stragglers
@@ -833,7 +834,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
}
/* Regardless, ignore this status info for outdated rate */
return;
@@ -867,14 +868,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
&rs_index);
rs_collect_tx_data(curr_tbl, rs_index,
- info->status.ampdu_ack_len,
- info->status.ampdu_ack_map);
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
/* Update success/fail counts if not searching for new mode */
if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += info->status.ampdu_ack_map;
- lq_sta->total_failed += (info->status.ampdu_ack_len -
- info->status.ampdu_ack_map);
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
}
} else {
/*
@@ -1913,7 +1914,7 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
/* Update uCode's rate table. */
rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
rs_fill_link_cmd(priv, lq_sta, rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
return rate;
}
@@ -2002,7 +2003,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
/* rates available for this association, and for modulation mode */
rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
- IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask);
+ IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
@@ -2077,10 +2078,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
}
/* Else we have enough samples; calculate estimate of
* actual average throughput */
-
- /* Sanity-check TPT calculations */
- BUG_ON(window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128));
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }
/* If we are searching for better modulation mode, check success. */
if (lq_sta->search_better_tbl &&
@@ -2289,7 +2292,7 @@ lq_update:
IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
tbl->current_rate, index);
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
} else
done_search = 1;
}
@@ -2338,7 +2341,20 @@ out:
return;
}
-
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
static void rs_initialize_lq(struct iwl_priv *priv,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
@@ -2357,10 +2373,6 @@ static void rs_initialize_lq(struct iwl_priv *priv,
i = lq_sta->last_txrate_idx;
- if ((lq_sta->lq.sta_id == 0xff) &&
- (priv->iw_mode == NL80211_IFTYPE_ADHOC))
- goto out;
-
valid_tx_ant = priv->hw_params.valid_tx_ant;
if (!lq_sta->search_better_tbl)
@@ -2388,7 +2400,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_link_cmd(NULL, lq_sta, rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_SYNC, true);
out:
return;
}
@@ -2399,10 +2412,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = priv_sta;
int rate_idx;
@@ -2420,30 +2430,18 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
lq_sta->max_rate_idx = -1;
}
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ priv_sta = NULL;
+ }
+
/* Send management frames and NO_ACK data using lowest rate. */
if (rate_control_send_low(sta, priv_sta, txrc))
return;
rate_idx = lq_sta->last_txrate_idx;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !lq_sta->ibss_sta_added) {
- u8 sta_id = iwl_find_station(priv, hdr->addr1);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
- hdr->addr1);
- sta_id = iwl_add_station(priv, hdr->addr1,
- false, CMD_ASYNC, ht_cap);
- }
- if ((sta_id != IWL_INVALID_STATION)) {
- lq_sta->lq.sta_id = sta_id;
- lq_sta->lq.rs_table[0].rate_n_flags = 0;
- lq_sta->ibss_sta_added = 1;
- rs_initialize_lq(priv, conf, sta, lq_sta);
- }
- }
-
if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
rate_idx -= IWL_FIRST_OFDM_RATE;
/* 6M and 9M shared same MCS index */
@@ -2493,16 +2491,25 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
return lq_sta;
}
-static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
{
int i, j;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &priv->hw->conf;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_station_priv *sta_priv;
+ struct iwl_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
- lq_sta->lq.sta_id = 0xff;
+
+ lq_sta->lq.sta_id = sta_id;
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < IWL_RATE_COUNT; i++)
@@ -2514,39 +2521,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
- IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init ***\n");
+ IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
+ sta_id);
/* TODO: what is a good starting rate for STA? About middle? Maybe not
* the lowest or the highest rate.. Could consider using RSSI from
* previous packets? Need to have IEEE 802.1X auth succeed immediately
* after assoc.. */
- lq_sta->ibss_sta_added = 0;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
- u8 sta_id = iwl_find_station(priv,
- sta->addr);
-
- /* for IBSS the call are from tasklet */
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
- sta_id = iwl_add_station(priv, sta->addr, false,
- CMD_ASYNC, ht_cap);
- }
- if ((sta_id != IWL_INVALID_STATION)) {
- lq_sta->lq.sta_id = sta_id;
- lq_sta->lq.rs_table[0].rate_n_flags = 0;
- }
- /* FIXME: this is w/a remove it later */
- priv->assoc_station_added = 1;
- }
-
lq_sta->is_dup = 0;
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config);
lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
- lq_sta->active_rate_basic = priv->active_rate_basic;
lq_sta->band = priv->band;
/*
* active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
@@ -2574,8 +2560,17 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->active_mimo3_rate);
/* These values will be overridden later */
- lq_sta->lq.general_params.single_stream_ant_msk = ANT_A;
- lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ first_antenna(priv->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~first_antenna(priv->hw_params.valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
/* as default allow aggregation for all tids */
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
@@ -2794,7 +2789,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
if (lq_sta->dbg_fixed_rate) {
rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
}
return count;
@@ -2950,12 +2945,6 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
- desc += sprintf(buff+desc,
- "Signal Level= %d dBm\tNoise Level= %d dBm\n",
- priv->last_rx_rssi, priv->last_rx_noise);
- desc += sprintf(buff+desc,
- "Tsf= 0x%llx\tBeacon time= 0x%08X\n",
- priv->last_tsf, priv->last_beacon_time);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
return ret;
@@ -2995,12 +2984,21 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
}
#endif
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
- .rate_init = rs_rate_init,
+ .rate_init = rs_rate_init_stub,
.alloc = rs_alloc,
.free = rs_free,
.alloc_sta = rs_alloc_sta,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index e71923961e69..8292f6d48ec6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -403,7 +403,6 @@ struct iwl_lq_sta {
u8 is_green;
u8 is_dup;
enum ieee80211_band band;
- u8 ibss_sta_added;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
u32 supp_rates;
@@ -411,7 +410,6 @@ struct iwl_lq_sta {
u16 active_siso_rate;
u16 active_mimo2_rate;
u16 active_mimo3_rate;
- u16 active_rate_basic;
s8 max_rate_idx; /* Max rate set by user */
u8 missed_rate_counter;
@@ -479,6 +477,12 @@ static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
*/
extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+/* Initialize station's rate scaling information after adding station */
+extern void iwl_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
new file mode 100644
index 000000000000..c402bfc83f36
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -0,0 +1,1340 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ /* this matches the mac80211 numbers */
+ 2, 3, 3, 2, 1, 1, 0, 0
+};
+
+static const u8 ac_to_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+
+static inline int get_fifo_from_ac(u8 ac)
+{
+ return ac_to_fifo[ac];
+}
+
+static inline int get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int get_fifo_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return get_fifo_from_ac(tid_to_ac[tid]);
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/**
+ * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ int write_ptr = txq->q.write_ptr;
+ int txq_id = txq->q.id;
+ u8 sec_ctl = 0;
+ u8 sta_id = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != IWL_CMD_QUEUE_NUM) {
+ sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
+ sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += WEP_IV_LEN + WEP_ICV_LEN;
+ break;
+ }
+ }
+
+ bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
+
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int read_ptr = txq->q.read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != IWL_CMD_QUEUE_NUM)
+ sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+ u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = priv->scd_base_addr +
+ IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ iwl_write_prph(priv,
+ IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+ (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
+ int txq_id, u32 index)
+{
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR,
+ (index & 0xff) | (txq_id << 8));
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+ int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
+
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
+ IWLAGN_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
+ active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
+}
+
+int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+ int tx_fifo, int sta_id, int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+
+ if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+ <= txq_id)) {
+ IWL_WARN(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ priv->cfg->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ iwlagn_tx_queue_stop_scheduler(priv, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
+
+ /* enable aggregations for the queue */
+ iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
+
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+ <= txq_id)) {
+ IWL_ERR(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ priv->cfg->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ iwlagn_tx_queue_stop_scheduler(priv, txq_id);
+
+ iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
+
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(priv, txq_id);
+ iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
+{
+ iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
+}
+
+static inline int get_queue_from_ac(u16 ac)
+{
+ return ac;
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr,
+ u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
+
+ if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
+ tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT 60
+
+static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ __le16 fc)
+{
+ u32 rate_flags;
+ int rate_idx;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * index is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+ (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+ rate_idx = rate_lowest_index(&priv->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up RTS and CTS flags for certain packets */
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Set up antennas */
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+ rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->alg) {
+ case ALG_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
+ break;
+
+ case ALG_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_key(keyconf, skb_frag,
+ IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+ IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case ALG_WEP:
+ tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
+ (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+ "with key %d\n", keyconf->keyidx);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
+ break;
+ }
+}
+
+/*
+ * start REPLY_TX command process
+ */
+int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct iwl_station_priv *sta_priv = NULL;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_tx_cmd *tx_cmd;
+ int swq_id, txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, len_org, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (iwl_is_rfkill(priv)) {
+ IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (ieee80211_is_auth(fc))
+ IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find index into station table for destination station */
+ if (!info->control.sta)
+ sta_id = priv->hw_params.bcast_sta_id;
+ else
+ sta_id = iwl_sta_id(info->control.sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+ hdr->addr1);
+ goto drop_unlock;
+ }
+
+ IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
+ sta_priv->asleep) {
+ WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
+ }
+
+ txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop_unlock;
+ seq_number = priv->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl = hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
+ txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ }
+ }
+
+ txq = &priv->txq[txq_id];
+ swq_id = txq->swq_id;
+ q = &txq->q;
+
+ if (unlikely(iwl_queue_space(q) < q->high_mark))
+ goto drop_unlock;
+
+ if (ieee80211_is_data_qos(fc))
+ priv->stations[sta_id].tid[tid].tfds_in_queue++;
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+ txq->txb[q->write_ptr].skb[0] = skb;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD index within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = REPLY_TX;
+ out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+
+ /* Total # bytes to be transmitted */
+ len = (u16)skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
+ iwl_dbg_log_tx_data_frame(priv, len, hdr);
+
+ iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+
+ iwl_update_stats(priv, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+
+ len_org = len;
+ firstlen = len = (len + 3) & ~3;
+
+ if (len_org != len)
+ len_org = 1;
+ else
+ len_org = 0;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (len_org)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = pci_map_single(priv->pci_dev,
+ &out_cmd->hdr, len,
+ PCI_DMA_BIDIRECTIONAL);
+ pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ pci_unmap_len_set(out_meta, len, len);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, len, 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ if (qc)
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = len = skb->len - hdr_len;
+ if (len) {
+ phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
+ len, PCI_DMA_TODEVICE);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, len,
+ 0, 0);
+ }
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
+ len, PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+ le16_to_cpu(out_cmd->hdr.sequence));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+ len, PCI_DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(priv,
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &out_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /* avoid atomic ops if it isn't an associated client */
+ if (sta_priv && sta_priv->client)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&priv->lock, flags);
+ txq->need_update = 1;
+ iwl_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+ iwl_stop_queue(priv, txq->swq_id);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -1;
+}
+
+static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * iwlagn_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (priv->txq) {
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ iwl_cmd_queue_free(priv);
+ else
+ iwl_tx_queue_free(priv, txq_id);
+ }
+ iwlagn_free_dma_ptr(priv, &priv->kw);
+
+ iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+ /* free tx queue structure */
+ iwl_free_txq_mem(priv);
+}
+
+/**
+ * iwlagn_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ iwlagn_hw_txq_ctx_free(priv);
+
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+ priv->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(priv, "Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = iwl_alloc_txq_mem(priv);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
+ txq_id);
+ if (ret) {
+ IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+ error:
+ iwlagn_hw_txq_ctx_free(priv);
+ iwlagn_free_dma_ptr(priv, &priv->kw);
+ error_kw:
+ iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
+ error_bc_tbls:
+ return ret;
+}
+
+void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
+ }
+}
+
+/**
+ * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
+{
+ int ch;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
+ iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct iwl_tid_data *tid_data;
+
+ tx_fifo = get_fifo_from_tid(tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
+ __func__, sta->addr, tid);
+
+ sta_id = iwl_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+ IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = iwlagn_txq_ctx_activate_free(priv);
+ if (txq_id == -1) {
+ IWL_ERR(priv, "No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
+ sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ return ret;
+}
+
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn = -1;
+ struct iwl_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = get_fifo_from_tid(tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ if (priv->stations[sta_id].tid[tid].agg.state ==
+ IWL_EMPTYING_HW_QUEUE_ADDBA) {
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+ return 0;
+ }
+
+ if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
+ IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
+
+ tid_data = &priv->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+ write_ptr = priv->txq[txq_id].q.write_ptr;
+ read_ptr = priv->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
+ priv->stations[sta_id].tid[tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ return 0;
+ }
+
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
+ tx_fifo_id);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int iwlagn_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id)
+{
+ struct iwl_queue *q = &priv->txq[txq_id].q;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
+ struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
+
+ switch (priv->stations[sta_id].tid[tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if ((txq_id == tid_data->agg.txq_id) &&
+ (q->read_ptr == q->write_ptr)) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = get_fifo_from_tid(tid);
+ IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
+ ssn, tx_fifo);
+ tid_data->agg.state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
+ }
+ break;
+ }
+ return 0;
+}
+
+static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_sta *sta;
+ struct iwl_station_priv *sta_priv;
+
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(priv->hw, sta, false);
+ }
+
+ ieee80211_tx_status_irqsafe(priv->hw, skb);
+}
+
+int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id,
+ index, q->n_bd, q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (index = iwl_queue_inc_wrap(index, q->n_bd);
+ q->read_ptr != index;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ iwlagn_tx_status(priv, tx_info->skb[0]);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+ tx_info->skb[0] = NULL;
+
+ if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
+ priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
+
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_ht_agg *agg,
+ struct iwl_compressed_ba_resp *ba_resp)
+
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ u64 bitmap;
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ IWL_ERR(priv, "Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx window bits */
+ sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ if (agg->frame_count > (64 - sh)) {
+ IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
+ return -1;
+ }
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ bitmap &= agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ for (i = 0; i < agg->frame_count ; i++) {
+ ack = bitmap & (1ULL << i);
+ successes += !!ack;
+ IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
+ ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
+ agg->start_idx + i);
+ }
+
+ info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_ack_map = bitmap;
+ info->status.ampdu_len = agg->frame_count;
+ iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+
+ IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct iwl_tx_queue *txq = NULL;
+ struct iwl_ht_agg *agg;
+ int index;
+ int sta_id;
+ int tid;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= priv->hw_params.max_txq_num) {
+ IWL_ERR(priv,
+ "BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &priv->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &priv->stations[sta_id].tid[tid].agg;
+
+ /* Find index just before block-ack window */
+ index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ /* TODO: Need to get this copy more safely - now good for debug */
+
+ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ "sta_id = %d\n",
+ agg->wait_for_ba,
+ (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
+ "%d, scd_ssn = %d\n",
+ ba_resp->tid,
+ ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow,
+ ba_resp->scd_ssn);
+ IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
+ agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in window */
+ iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
+ priv->mac80211_registered &&
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+ iwl_wake_queue(priv, txq->swq_id);
+
+ iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
+ }
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
new file mode 100644
index 000000000000..637286c396fe
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -0,0 +1,425 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+
+static const s8 iwlagn_default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+ IWLAGN_CMD_FIFO_NUM,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+};
+
+static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
+ {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
+ 0, COEX_UNASSOC_IDLE_FLAGS},
+ {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
+ 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+ {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
+ 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
+ {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
+ 0, COEX_CALIBRATION_FLAGS},
+ {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
+ 0, COEX_PERIODIC_CALIBRATION_FLAGS},
+ {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
+ 0, COEX_CONNECTION_ESTAB_FLAGS},
+ {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
+ 0, COEX_ASSOCIATED_IDLE_FLAGS},
+ {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
+ 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
+ {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
+ 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
+ {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
+ 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
+ {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
+ {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
+ {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
+ 0, COEX_STAND_ALONE_DEBUG_FLAGS},
+ {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
+ 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
+ {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
+ {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
+};
+
+/*
+ * ucode
+ */
+static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
+ struct fw_desc *image, u32 dst_addr)
+{
+ dma_addr_t phy_addr = image->p_addr;
+ u32 byte_cnt = image->len;
+ int ret;
+
+ priv->ucode_write_complete = 0;
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+ iwl_write_direct32(priv,
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+
+ iwl_write_direct32(priv,
+ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+ iwl_write_direct32(priv,
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
+ ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ priv->ucode_write_complete, 5 * HZ);
+ if (ret == -ERESTARTSYS) {
+ IWL_ERR(priv, "Could not load the %s uCode section due "
+ "to interrupt\n", name);
+ return ret;
+ }
+ if (!ret) {
+ IWL_ERR(priv, "Could not load the %s uCode section\n",
+ name);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int iwlagn_load_given_ucode(struct iwl_priv *priv,
+ struct fw_desc *inst_image,
+ struct fw_desc *data_image)
+{
+ int ret = 0;
+
+ ret = iwlagn_load_section(priv, "INST", inst_image,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+ if (ret)
+ return ret;
+
+ return iwlagn_load_section(priv, "DATA", data_image,
+ IWLAGN_RTC_DATA_LOWER_BOUND);
+}
+
+int iwlagn_load_ucode(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ /* check whether init ucode should be loaded, or rather runtime ucode */
+ if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
+ IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
+ ret = iwlagn_load_given_ucode(priv,
+ &priv->ucode_init, &priv->ucode_init_data);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
+ priv->ucode_type = UCODE_INIT;
+ }
+ } else {
+ IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
+ "Loading runtime ucode...\n");
+ ret = iwlagn_load_given_ucode(priv,
+ &priv->ucode_code, &priv->ucode_data);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
+ priv->ucode_type = UCODE_RT;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Calibration
+ */
+static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
+{
+ struct iwl_calib_xtal_freq_cmd cmd;
+ __le16 *xtal_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+ cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
+ return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
+ (u8 *)&cmd, sizeof(cmd));
+}
+
+static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
+{
+ struct iwl_calib_cfg_cmd calib_cfg_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = CALIBRATION_CFG_CMD,
+ .len = sizeof(struct iwl_calib_cfg_cmd),
+ .data = &calib_cfg_cmd,
+ };
+
+ memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
+ calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
+
+ return iwl_send_cmd(priv, &cmd);
+}
+
+void iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
+ int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ int index;
+
+ /* reduce the size of the length field itself */
+ len -= 4;
+
+ /* Define the order in which the results will be sent to the runtime
+ * uCode. iwl_send_calib_results sends them in a row according to
+ * their index. We sort them here
+ */
+ switch (hdr->op_code) {
+ case IWL_PHY_CALIBRATE_DC_CMD:
+ index = IWL_CALIB_DC;
+ break;
+ case IWL_PHY_CALIBRATE_LO_CMD:
+ index = IWL_CALIB_LO;
+ break;
+ case IWL_PHY_CALIBRATE_TX_IQ_CMD:
+ index = IWL_CALIB_TX_IQ;
+ break;
+ case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
+ index = IWL_CALIB_TX_IQ_PERD;
+ break;
+ case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
+ index = IWL_CALIB_BASE_BAND;
+ break;
+ default:
+ IWL_ERR(priv, "Unknown calibration notification %d\n",
+ hdr->op_code);
+ return;
+ }
+ iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+}
+
+void iwlagn_rx_calib_complete(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
+ queue_work(priv->workqueue, &priv->restart);
+}
+
+void iwlagn_init_alive_start(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ /* Check alive response for "valid" sign from uCode */
+ if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
+ goto restart;
+ }
+
+ /* initialize uCode was loaded... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (iwl_verify_ucode(priv)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ ret = priv->cfg->ops->lib->alive_notify(priv);
+ if (ret) {
+ IWL_WARN(priv,
+ "Could not complete ALIVE transition: %d\n", ret);
+ goto restart;
+ }
+
+ iwlagn_send_calib_cfg(priv);
+ return;
+
+restart:
+ /* real restart (first load init_ucode) */
+ queue_work(priv->workqueue, &priv->restart);
+}
+
+static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
+{
+ struct iwl_wimax_coex_cmd coex_cmd;
+
+ if (priv->cfg->support_wimax_coexist) {
+ /* UnMask wake up src at associated sleep */
+ coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
+
+ /* UnMask wake up src at unassociated sleep */
+ coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
+ memcpy(coex_cmd.sta_prio, cu_priorities,
+ sizeof(struct iwl_wimax_coex_event_entry) *
+ COEX_NUM_OF_EVENTS);
+
+ /* enabling the coexistence feature */
+ coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
+
+ /* enabling the priorities tables */
+ coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
+ } else {
+ /* coexistence is disabled */
+ memset(&coex_cmd, 0, sizeof(coex_cmd));
+ }
+ return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
+ sizeof(coex_cmd), &coex_cmd);
+}
+
+int iwlagn_alive_notify(struct iwl_priv *priv)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
+ a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
+ a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
+ a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr +
+ IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+
+ iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
+ priv->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
+ iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
+ IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
+ iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
+
+ /* initiate the queues */
+ for (i = 0; i < priv->hw_params.max_txq_num; i++) {
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+ }
+
+ iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
+ IWL_MASK(0, priv->hw_params.max_txq_num));
+
+ /* Activate all Tx DMA/FIFO channels */
+ priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
+
+ iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
+ /* map qos queues to fifos one-to-one */
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
+
+ for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
+ int ac = iwlagn_default_queue_to_tx_fifo[i];
+
+ iwl_txq_ctx_activate(priv, i);
+
+ if (ac == IWL_TX_FIFO_UNUSED)
+ continue;
+
+ iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwlagn_send_wimax_coex(priv);
+
+ iwlagn_set_Xtal_calib(priv);
+ iwl_send_calib_results(priv);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index bdff56583e11..dc747ad988b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -55,6 +55,7 @@
#include "iwl-helpers.h"
#include "iwl-sta.h"
#include "iwl-calib.h"
+#include "iwl-agn.h"
/******************************************************************************
@@ -83,13 +84,6 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_ALIAS("iwl4965");
-/*************** STATION TABLE MANAGEMENT ****
- * mac80211 should be examined to determine if sta_info is duplicating
- * the functionality provided here
- */
-
-/**************************************************************/
-
/**
* iwl_commit_rxon - commit staging_rxon to hardware
*
@@ -144,9 +138,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
return 0;
}
- /* station table will be cleared */
- priv->assoc_station_added = 0;
-
/* If we are currently associated and the new config requires
* an RXON_ASSOC and the new config wants the associated mask enabled,
* we must clear the associated from the active configuration
@@ -166,6 +157,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
return ret;
}
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ ret = iwl_restore_default_wep_keys(priv);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -179,9 +177,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
/* Apply the new configuration
- * RXON unassoc clears the station table in uCode, send it before
- * we add the bcast station. If assoc bit is set, we will send RXON
- * after having added the bcast and bssid station.
+ * RXON unassoc clears the station table in uCode so restoration of
+ * stations is needed after it (the RXON command) completes
*/
if (!new_assoc) {
ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
@@ -190,35 +187,19 @@ int iwl_commit_rxon(struct iwl_priv *priv)
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
return ret;
}
+ IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ ret = iwl_restore_default_wep_keys(priv);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
}
- iwl_clear_stations_table(priv);
-
priv->start_calib = 0;
-
- /* Add the broadcast address so we can send broadcast frames */
- priv->cfg->ops->lib->add_bcast_station(priv);
-
-
- /* If we have set the ASSOC_MSK and we are in BSS mode then
- * add the IWL_AP_ID to the station rate table */
if (new_assoc) {
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- ret = iwl_rxon_add_station(priv,
- priv->active_rxon.bssid_addr, 1);
- if (ret == IWL_INVALID_STATION) {
- IWL_ERR(priv,
- "Error adding AP address for TX.\n");
- return -EIO;
- }
- priv->assoc_station_added = 1;
- if (priv->default_wep_key &&
- iwl_send_static_wepkey_cmd(priv, 0))
- IWL_ERR(priv,
- "Could not send WEP static key.\n");
- }
-
/*
* allow CTS-to-self if possible for new association.
* this is relevant only for 5000 series and up,
@@ -907,10 +888,10 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
iwl_rx_missed_beacon_notif;
/* Rx handlers */
- priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
- priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
+ priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
+ priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
/* block ack */
- priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba;
+ priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
/* Set up hardware specific Rx handlers */
priv->cfg->ops->lib->rx_handler_setup(priv);
}
@@ -1038,7 +1019,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_rx_replenish_now(priv);
+ iwlagn_rx_replenish_now(priv);
count = 0;
}
}
@@ -1047,9 +1028,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
- iwl_rx_replenish_now(priv);
+ iwlagn_rx_replenish_now(priv);
else
- iwl_rx_queue_restock(priv);
+ iwlagn_rx_queue_restock(priv);
}
/* call this function to flush any scheduled tasklet */
@@ -1267,9 +1248,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
+ iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
- inta = priv->inta;
+ inta = priv->_agn.inta;
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
@@ -1282,8 +1263,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
- /* saved interrupt in inta variable now we can reset priv->inta */
- priv->inta = 0;
+ /* saved interrupt in inta variable now we can reset priv->_agn.inta */
+ priv->_agn.inta = 0;
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1448,6 +1429,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
iwl_enable_interrupts(priv);
}
+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+#define ACK_CNT_RATIO (50)
+#define BA_TIMEOUT_CNT (5)
+#define BA_TIMEOUT_MAX (16)
+
+/**
+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+ *
+ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+ * operation state.
+ */
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ bool rc = true;
+ int actual_ack_cnt_delta, expected_ack_cnt_delta;
+ int ba_timeout_delta;
+
+ actual_ack_cnt_delta =
+ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
+ le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
+ expected_ack_cnt_delta =
+ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
+ le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
+ ba_timeout_delta =
+ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
+ le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
+ if ((priv->_agn.agg_tids_count > 0) &&
+ (expected_ack_cnt_delta > 0) &&
+ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
+ < ACK_CNT_RATIO) &&
+ (ba_timeout_delta > BA_TIMEOUT_CNT)) {
+ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
+ " expected_ack_cnt = %d\n",
+ actual_ack_cnt_delta, expected_ack_cnt_delta);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
+ priv->delta_statistics.tx.rx_detected_cnt);
+ IWL_DEBUG_RADIO(priv,
+ "ack_or_ba_timeout_collision delta = %d\n",
+ priv->delta_statistics.tx.
+ ack_or_ba_timeout_collision);
+#endif
+ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
+ ba_timeout_delta);
+ if (!actual_ack_cnt_delta &&
+ (ba_timeout_delta >= BA_TIMEOUT_MAX))
+ rc = false;
+ }
+ return rc;
+}
+
/******************************************************************************
*
@@ -1471,9 +1506,13 @@ static void iwl_nic_start(struct iwl_priv *priv)
iwl_write32(priv, CSR_RESET, 0);
}
+struct iwlagn_ucode_capabilities {
+ u32 max_probe_length;
+};
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwl_mac_setup_register(struct iwl_priv *priv);
+static int iwl_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa);
static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
{
@@ -1500,6 +1539,199 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
iwl_ucode_callback);
}
+struct iwlagn_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+
+ u32 build;
+};
+
+static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
+ const struct firmware *ucode_raw,
+ struct iwlagn_firmware_pieces *pieces)
+{
+ struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ priv->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ /*
+ * 4965 doesn't revision the firmware file format
+ * along with the API version, it always uses v1
+ * file format.
+ */
+ if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) !=
+ CSR_HW_REV_TYPE_4965) {
+ hdr_size = 28;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(priv, "File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->build = le32_to_cpu(ucode->u.v2.build);
+ pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
+ pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->u.v2.boot_size);
+ src = ucode->u.v2.data;
+ break;
+ }
+ /* fall through for 4965 */
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(priv, "File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->build = 0;
+ pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->u.v1.boot_size);
+ src = ucode->u.v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size != hdr_size + pieces->inst_size +
+ pieces->data_size + pieces->init_size +
+ pieces->init_data_size + pieces->boot_size) {
+
+ IWL_ERR(priv,
+ "uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+static int iwlagn_wanted_ucode_alternative = 1;
+
+static int iwlagn_load_firmware(struct iwl_priv *priv,
+ const struct firmware *ucode_raw,
+ struct iwlagn_firmware_pieces *pieces,
+ struct iwlagn_ucode_capabilities *capa)
+{
+ struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+ struct iwl_ucode_tlv *tlv;
+ size_t len = ucode_raw->size;
+ const u8 *data;
+ int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
+ u64 alternatives;
+
+ if (len < sizeof(*ucode))
+ return -EINVAL;
+
+ if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC))
+ return -EINVAL;
+
+ /*
+ * Check which alternatives are present, and "downgrade"
+ * when the chosen alternative is not present, warning
+ * the user when that happens. Some files may not have
+ * any alternatives, so don't warn in that case.
+ */
+ alternatives = le64_to_cpu(ucode->alternatives);
+ tmp = wanted_alternative;
+ if (wanted_alternative > 63)
+ wanted_alternative = 63;
+ while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
+ wanted_alternative--;
+ if (wanted_alternative && wanted_alternative != tmp)
+ IWL_WARN(priv,
+ "uCode alternative %d not available, choosing %d\n",
+ tmp, wanted_alternative);
+
+ priv->ucode_ver = le32_to_cpu(ucode->ver);
+ pieces->build = le32_to_cpu(ucode->build);
+ data = ucode->data;
+
+ len -= sizeof(*ucode);
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len;
+ enum iwl_ucode_tlv_type tlv_type;
+ u16 tlv_alt;
+ const u8 *tlv_data;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_alt = le16_to_cpu(tlv->alternative);
+ tlv_data = tlv->data;
+
+ if (len < tlv_len)
+ return -EINVAL;
+ len -= ALIGN(tlv_len, 4);
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+
+ /*
+ * Alternative 0 is always valid.
+ *
+ * Skip alternative TLVs that are not selected.
+ */
+ if (tlv_alt != 0 && tlv_alt != wanted_alternative)
+ continue;
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_INST:
+ pieces->inst = tlv_data;
+ pieces->inst_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_DATA:
+ pieces->data = tlv_data;
+ pieces->data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_INIT:
+ pieces->init = tlv_data;
+ pieces->init_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_INIT_DATA:
+ pieces->init_data = tlv_data;
+ pieces->init_data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_BOOT:
+ pieces->boot = tlv_data;
+ pieces->boot_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len != 4)
+ return -EINVAL;
+ capa->max_probe_length =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (len)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* iwl_ucode_callback - callback when firmware was loaded
*
@@ -1510,14 +1742,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_priv *priv = context;
struct iwl_ucode_header *ucode;
+ int err;
+ struct iwlagn_firmware_pieces pieces;
const unsigned int api_max = priv->cfg->ucode_api_max;
const unsigned int api_min = priv->cfg->ucode_api_min;
- u8 *src;
- size_t len;
- u32 api_ver, build;
- u32 inst_size, data_size, init_size, init_data_size, boot_size;
- int err;
- u16 eeprom_ver;
+ u32 api_ver;
+ char buildstr[25];
+ u32 build;
+ struct iwlagn_ucode_capabilities ucode_capa = {
+ .max_probe_length = 200,
+ };
+
+ memset(&pieces, 0, sizeof(pieces));
if (!ucode_raw) {
IWL_ERR(priv, "request for firmware file '%s' failed.\n",
@@ -1528,8 +1764,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
priv->firmware_name, ucode_raw->size);
- /* Make sure that we got at least the v1 header! */
- if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) {
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
IWL_ERR(priv, "File size way too small!\n");
goto try_again;
}
@@ -1537,21 +1773,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Data from ucode file: header followed by uCode images */
ucode = (struct iwl_ucode_header *)ucode_raw->data;
- priv->ucode_ver = le32_to_cpu(ucode->ver);
+ if (ucode->ver)
+ err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
+ else
+ err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
+ &ucode_capa);
+
+ if (err)
+ goto try_again;
+
api_ver = IWL_UCODE_API(priv->ucode_ver);
- build = priv->cfg->ops->ucode->get_build(ucode, api_ver);
- inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver);
- data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver);
- init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver);
- init_data_size =
- priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver);
- boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver);
- src = priv->cfg->ops->ucode->get_data(ucode, api_ver);
-
- /* api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward */
+ build = pieces.build;
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
if (api_ver < api_min || api_ver > api_max) {
IWL_ERR(priv, "Driver unable to support your firmware API. "
"Driver supports v%u, firmware is v%u.\n",
@@ -1565,40 +1803,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
"from http://www.intellinuxwireless.org.\n",
api_max, api_ver);
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
+ if (build)
+ sprintf(buildstr, " build %u", build);
+ else
+ buildstr[0] = '\0';
+
+ IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
+ IWL_UCODE_MAJOR(priv->ucode_ver),
+ IWL_UCODE_MINOR(priv->ucode_ver),
+ IWL_UCODE_API(priv->ucode_ver),
+ IWL_UCODE_SERIAL(priv->ucode_ver),
+ buildstr);
snprintf(priv->hw->wiphy->fw_version,
sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
+ "%u.%u.%u.%u%s",
IWL_UCODE_MAJOR(priv->ucode_ver),
IWL_UCODE_MINOR(priv->ucode_ver),
IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- if (build)
- IWL_DEBUG_INFO(priv, "Build %u\n", build);
-
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM", eeprom_ver);
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
- inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
- data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
- init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
- init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
- boot_size);
+ IWL_UCODE_SERIAL(priv->ucode_ver),
+ buildstr);
/*
* For any of the failures below (before allocating pci memory)
@@ -1606,43 +1830,47 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* user just got a corrupted version of the latest API.
*/
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size !=
- priv->cfg->ops->ucode->get_header_size(api_ver) +
- inst_size + data_size + init_size +
- init_data_size + boot_size) {
-
- IWL_DEBUG_INFO(priv,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- goto try_again;
- }
+ IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
+ priv->ucode_ver);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
+ pieces.inst_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
+ pieces.data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
+ pieces.init_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
+ pieces.init_data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
+ pieces.boot_size);
/* Verify that uCode images will fit in card's SRAM */
- if (inst_size > priv->hw_params.max_inst_size) {
- IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
- inst_size);
+ if (pieces.inst_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
goto try_again;
}
- if (data_size > priv->hw_params.max_data_size) {
- IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
- data_size);
+ if (pieces.data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
goto try_again;
}
- if (init_size > priv->hw_params.max_inst_size) {
- IWL_INFO(priv, "uCode init instr len %d too large to fit in\n",
- init_size);
+
+ if (pieces.init_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
goto try_again;
}
- if (init_data_size > priv->hw_params.max_data_size) {
- IWL_INFO(priv, "uCode init data len %d too large to fit in\n",
- init_data_size);
+
+ if (pieces.init_data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
goto try_again;
}
- if (boot_size > priv->hw_params.max_bsm_size) {
- IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n",
- boot_size);
+
+ if (pieces.boot_size > priv->hw_params.max_bsm_size) {
+ IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
goto try_again;
}
@@ -1651,13 +1879,13 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = inst_size;
+ priv->ucode_code.len = pieces.inst_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
- priv->ucode_data.len = data_size;
+ priv->ucode_data.len = pieces.data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
- priv->ucode_data_backup.len = data_size;
+ priv->ucode_data_backup.len = pieces.data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
@@ -1665,11 +1893,11 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto err_pci_alloc;
/* Initialization instructions and data */
- if (init_size && init_data_size) {
- priv->ucode_init.len = init_size;
+ if (pieces.init_size && pieces.init_data_size) {
+ priv->ucode_init.len = pieces.init_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
- priv->ucode_init_data.len = init_data_size;
+ priv->ucode_init_data.len = pieces.init_data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
@@ -1677,8 +1905,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
}
/* Bootstrap (instructions only, no data) */
- if (boot_size) {
- priv->ucode_boot.len = boot_size;
+ if (pieces.boot_size) {
+ priv->ucode_boot.len = pieces.boot_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
if (!priv->ucode_boot.v_addr)
@@ -1688,51 +1916,48 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Copy images into buffers for card's bus-master reads ... */
/* Runtime instructions (first block of data in file) */
- len = inst_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", len);
- memcpy(priv->ucode_code.v_addr, src, len);
- src += len;
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
- /* Runtime data (2nd block)
- * NOTE: Copy into backup buffer will be done in iwl_up() */
- len = data_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", len);
- memcpy(priv->ucode_data.v_addr, src, len);
- memcpy(priv->ucode_data_backup.v_addr, src, len);
- src += len;
-
- /* Initialization instructions (3rd block) */
- if (init_size) {
- len = init_size;
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in iwl_up()
+ */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n",
- len);
- memcpy(priv->ucode_init.v_addr, src, len);
- src += len;
+ pieces.init_size);
+ memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
}
- /* Initialization data (4th block) */
- if (init_data_size) {
- len = init_data_size;
+ /* Initialization data */
+ if (pieces.init_data_size) {
IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n",
- len);
- memcpy(priv->ucode_init_data.v_addr, src, len);
- src += len;
+ pieces.init_data_size);
+ memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
}
- /* Bootstrap instructions (5th block) */
- len = boot_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len);
- memcpy(priv->ucode_boot.v_addr, src, len);
+ /* Bootstrap instructions */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
/**************************************************
* This is still part of probe() in a sense...
*
* 9. Setup and register with mac80211 and debugfs
**************************************************/
- err = iwl_mac_setup_register(priv);
+ err = iwl_mac_setup_register(priv, &ucode_capa);
if (err)
goto out_unbind;
@@ -1742,6 +1967,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
+ complete(&priv->_agn.firmware_loading_complete);
return;
try_again:
@@ -1755,6 +1981,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
IWL_ERR(priv, "failed to allocate pci memory\n");
iwl_dealloc_ucode_pci(priv);
out_unbind:
+ complete(&priv->_agn.firmware_loading_complete);
device_release_driver(&priv->pci_dev->dev);
release_firmware(ucode_raw);
}
@@ -1809,6 +2036,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
u32 data2, line;
u32 desc, time, count, base, data1;
u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
@@ -1831,6 +2059,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
+ pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32));
blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
@@ -1839,6 +2068,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
+ hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32));
trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
blink1, blink2, ilink1, ilink2);
@@ -1847,10 +2077,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
"data1 data2 line\n");
IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
desc_lookup(desc), desc, time, data1, data2, line);
- IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
- IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
- ilink1, ilink2);
-
+ IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
+ pc, blink1, blink2, ilink1, ilink2, hcmd);
}
#define EVENT_START_OFFSET (4 * sizeof(u32))
@@ -1966,9 +2195,6 @@ static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
return pos;
}
-/* For sanity check only. Actual size is determined by uCode, typ. 512 */
-#define MAX_EVENT_LOG_SIZE (512)
-
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -2001,16 +2227,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
- if (capacity > MAX_EVENT_LOG_SIZE) {
+ if (capacity > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, MAX_EVENT_LOG_SIZE);
- capacity = MAX_EVENT_LOG_SIZE;
+ capacity, priv->cfg->max_event_log_size);
+ capacity = priv->cfg->max_event_log_size;
}
- if (next_entry > MAX_EVENT_LOG_SIZE) {
+ if (next_entry > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, MAX_EVENT_LOG_SIZE);
- next_entry = MAX_EVENT_LOG_SIZE;
+ next_entry, priv->cfg->max_event_log_size);
+ next_entry = priv->cfg->max_event_log_size;
}
size = num_wraps ? capacity : next_entry;
@@ -2095,7 +2321,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
goto restart;
}
- iwl_clear_stations_table(priv);
ret = priv->cfg->ops->lib->alive_notify(priv);
if (ret) {
IWL_WARN(priv,
@@ -2106,13 +2331,19 @@ static void iwl_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send host commands to the uCode */
set_bit(STATUS_ALIVE, &priv->status);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ /* Enable timer to monitor the driver queues */
+ mod_timer(&priv->monitor_recover,
+ jiffies +
+ msecs_to_jiffies(priv->cfg->monitor_recover_period));
+ }
+
if (iwl_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
- priv->active_rate = priv->rates_mask;
- priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
if (priv->cfg->ops->hcmd->set_tx_ant)
@@ -2126,7 +2357,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- iwl_connection_init_rx_config(priv, priv->iw_mode);
+ iwl_connection_init_rx_config(priv, NULL);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
@@ -2135,7 +2366,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
}
/* Configure Bluetooth device coexistence support */
- iwl_send_bt_config(priv);
+ priv->cfg->ops->hcmd->send_bt_config(priv);
iwl_reset_run_time_calib(priv);
@@ -2152,18 +2383,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
wake_up_interruptible(&priv->wait_command_queue);
iwl_power_update_mode(priv, true);
+ IWL_DEBUG_INFO(priv, "Updated power mode\n");
- /* reassociate for ADHOC mode */
- if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
- struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
- priv->vif);
- if (beacon)
- iwl_mac_beacon_update(priv->hw, beacon);
- }
-
-
- if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
- iwl_set_mode(priv, priv->iw_mode);
return;
@@ -2183,7 +2404,9 @@ static void __iwl_down(struct iwl_priv *priv)
if (!exit_pending)
set_bit(STATUS_EXIT_PENDING, &priv->status);
- iwl_clear_stations_table(priv);
+ iwl_clear_ucode_stations(priv);
+ iwl_dealloc_bcast_station(priv);
+ iwl_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2231,8 +2454,8 @@ static void __iwl_down(struct iwl_priv *priv)
/* device going down, Stop using ICT table */
iwl_disable_ict(priv);
- iwl_txq_ctx_stop(priv);
- iwl_rxq_stop(priv);
+ iwlagn_txq_ctx_stop(priv);
+ iwlagn_rxq_stop(priv);
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
@@ -2292,7 +2515,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
{
int ret = 0;
- IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n");
+ IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
ret = iwl_set_hw_ready(priv);
if (priv->hw_ready)
@@ -2330,6 +2553,10 @@ static int __iwl_up(struct iwl_priv *priv)
return -EIO;
}
+ ret = iwl_alloc_bcast_station(priv, true);
+ if (ret)
+ return ret;
+
iwl_prepare_card_hw(priv);
if (!priv->hw_ready) {
@@ -2353,7 +2580,7 @@ static int __iwl_up(struct iwl_priv *priv)
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- ret = iwl_hw_nic_init(priv);
+ ret = iwlagn_hw_nic_init(priv);
if (ret) {
IWL_ERR(priv, "Unable to init nic\n");
return ret;
@@ -2380,8 +2607,6 @@ static int __iwl_up(struct iwl_priv *priv)
for (i = 0; i < MAX_HW_RESTARTS; i++) {
- iwl_clear_stations_table(priv);
-
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
@@ -2505,34 +2730,28 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
return;
mutex_lock(&priv->mutex);
- iwl_rx_replenish(priv);
+ iwlagn_rx_replenish(priv);
mutex_unlock(&priv->mutex);
}
#define IWL_DELAY_NEXT_SCAN (HZ*2)
-void iwl_post_associate(struct iwl_priv *priv)
+void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct ieee80211_conf *conf = NULL;
int ret = 0;
- unsigned long flags;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (!vif || !priv->is_open)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
return;
}
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- priv->assoc_id, priv->active_rxon.bssid_addr);
-
-
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
-
- if (!priv->vif || !priv->is_open)
- return;
-
iwl_scan_cancel_timeout(priv, 200);
conf = ieee80211_get_hw_conf(priv->hw);
@@ -2540,7 +2759,7 @@ void iwl_post_associate(struct iwl_priv *priv)
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (ret)
@@ -2554,56 +2773,44 @@ void iwl_post_associate(struct iwl_priv *priv)
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
+ priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- priv->assoc_id, priv->beacon_int);
+ vif->bss_conf.aid, vif->bss_conf.beacon_int);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
}
iwlcore_commit_rxon(priv);
- switch (priv->iw_mode) {
+ IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
+ vif->bss_conf.aid, priv->active_rxon.bssid_addr);
+
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
break;
-
case NL80211_IFTYPE_ADHOC:
-
- /* assume default assoc id */
- priv->assoc_id = 1;
-
- iwl_rxon_add_station(priv, priv->bssid, 0);
iwl_send_beacon_cmd(priv);
-
break;
-
default:
IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, priv->iw_mode);
+ __func__, vif->type);
break;
}
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
- priv->assoc_station_added = 1;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_activate_qos(priv, 0);
- spin_unlock_irqrestore(&priv->lock, flags);
-
/* the chain noise calibration will enabled PM upon completion
* If chain noise has already been run, then we need to enable
* power management here */
@@ -2628,7 +2835,8 @@ void iwl_post_associate(struct iwl_priv *priv)
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_mac_setup_register(struct iwl_priv *priv)
+static int iwl_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -2636,7 +2844,6 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_SPECTRUM_MGMT;
@@ -2649,6 +2856,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
hw->sta_data_size = sizeof(struct iwl_station_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
@@ -2664,7 +2873,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
/* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
@@ -2770,17 +2979,16 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
- if (iwl_tx_skb(priv, skb))
+ if (iwlagn_tx_skb(priv, skb))
dev_kfree_skb_any(skb);
IWL_DEBUG_MACDUMP(priv, "leave\n");
return NETDEV_TX_OK;
}
-void iwl_config_ap(struct iwl_priv *priv)
+void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int ret = 0;
- unsigned long flags;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -2793,7 +3001,7 @@ void iwl_config_ap(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
/* RXON Timing */
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (ret)
@@ -2807,9 +3015,10 @@ void iwl_config_ap(struct iwl_priv *priv)
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
- /* FIXME: what should be the assoc_id for AP? */
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ priv->staging_rxon.assoc_id = 0;
+
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
@@ -2817,26 +3026,21 @@ void iwl_config_ap(struct iwl_priv *priv)
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_reset_qos(priv);
- spin_lock_irqsave(&priv->lock, flags);
- iwl_activate_qos(priv, 1);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl_add_bcast_station(priv);
}
iwl_send_beacon_cmd(priv);
@@ -2855,8 +3059,7 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
- iwl_update_tkip_key(priv, keyconf,
- sta ? sta->addr : iwl_bcast_addr,
+ iwl_update_tkip_key(priv, keyconf, sta,
iv32, phase1key);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2868,7 +3071,6 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = hw->priv;
- const u8 *addr;
int ret;
u8 sta_id;
bool is_default_wep_key = false;
@@ -2879,25 +3081,29 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
return -EOPNOTSUPP;
}
- addr = sta ? sta->addr : iwl_bcast_addr;
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return -EINVAL;
+ if (sta) {
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else {
+ sta_id = priv->hw_params.bcast_sta_id;
}
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
- mutex_unlock(&priv->mutex);
- /* If we are getting WEP group key and we didn't receive any key mapping
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
* so far, we are in legacy wep mode (group key only), otherwise we are
* in 1X mode.
- * In legacy wep mode, we use another host command to the uCode */
- if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id &&
- priv->iw_mode != NL80211_IFTYPE_AP) {
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) {
if (cmd == SET_KEY)
is_default_wep_key = !priv->key_mapping_key;
else
@@ -2926,6 +3132,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -2933,8 +3140,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
struct iwl_priv *priv = hw->priv;
int ret;
@@ -2948,20 +3155,31 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
IWL_DEBUG_HT(priv, "start Rx\n");
- return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn);
+ return iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid);
+ ret = iwl_sta_rx_agg_stop(priv, sta, tid);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return 0;
else
return ret;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
- return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
+ ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+ if (ret == 0) {
+ priv->_agn.agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
+ priv->_agn.agg_tids_count);
+ }
+ return ret;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwl_tx_agg_stop(priv, sta->addr, tid);
+ ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+ if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
+ priv->_agn.agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
+ priv->_agn.agg_tids_count);
+ }
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return 0;
else
@@ -2977,18 +3195,6 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static int iwl_mac_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
-
- priv = hw->priv;
- IWL_DEBUG_MAC80211(priv, "enter\n");
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
@@ -2998,18 +3204,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
int sta_id;
- /*
- * TODO: We really should use this callback to
- * actually maintain the station table in
- * the device.
- */
-
switch (cmd) {
- case STA_NOTIFY_ADD:
- atomic_set(&sta_priv->pending_frames, 0);
- if (vif->type == NL80211_IFTYPE_AP)
- sta_priv->client = true;
- break;
case STA_NOTIFY_SLEEP:
WARN_ON(!sta_priv->client);
sta_priv->asleep = true;
@@ -3021,7 +3216,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
if (!sta_priv->asleep)
break;
sta_priv->asleep = false;
- sta_id = iwl_find_station(priv, sta->addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id != IWL_INVALID_STATION)
iwl_sta_modify_ps_wake(priv, sta_id);
break;
@@ -3030,6 +3225,44 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
}
}
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+ IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ sta->addr);
+
+ atomic_set(&sta_priv->pending_frames, 0);
+ if (vif->type == NL80211_IFTYPE_AP)
+ sta_priv->client = true;
+
+ ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
+ &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, sta_id);
+
+ return 0;
+}
+
/*****************************************************************************
*
* sysfs attributes
@@ -3130,125 +3363,6 @@ static ssize_t store_tx_power(struct device *d,
static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
-static ssize_t show_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
-}
-
-static ssize_t store_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- u32 flags;
- int ret = strict_strtoul(buf, 0, &val);
- if (ret)
- return ret;
- flags = (u32)val;
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
- /* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags);
- priv->staging_rxon.flags = cpu_to_le32(flags);
- iwlcore_commit_rxon(priv);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
-
-static ssize_t show_filter_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- return sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->active_rxon.filter_flags));
-}
-
-static ssize_t store_filter_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- u32 filter_flags;
- int ret = strict_strtoul(buf, 0, &val);
- if (ret)
- return ret;
- filter_flags = (u32)val;
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
- /* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
- "0x%04X\n", filter_flags);
- priv->staging_rxon.filter_flags =
- cpu_to_le32(filter_flags);
- iwlcore_commit_rxon(priv);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
- store_filter_flags);
-
-
-static ssize_t show_statistics(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 size = sizeof(struct iwl_notif_statistics);
- u32 len = 0, ofs = 0;
- u8 *data = (u8 *)&priv->statistics;
- int rc = 0;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- mutex_lock(&priv->mutex);
- rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (rc) {
- len = sprintf(buf,
- "Error sending statistics request: 0x%08X\n", rc);
- return len;
- }
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
-
static ssize_t show_rts_ht_protection(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3316,6 +3430,13 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->ucode_trace.data = (unsigned long)priv;
priv->ucode_trace.function = iwl_bg_ucode_trace;
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ init_timer(&priv->monitor_recover);
+ priv->monitor_recover.data = (unsigned long)priv;
+ priv->monitor_recover.function =
+ priv->cfg->ops->lib->recover_from_tx_stall;
+ }
+
if (!priv->cfg->use_isr_legacy)
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl_irq_tasklet, (unsigned long)priv);
@@ -3336,6 +3457,8 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
+ if (priv->cfg->ops->lib->recover_from_tx_stall)
+ del_timer_sync(&priv->monitor_recover);
}
static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3373,9 +3496,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
mutex_init(&priv->mutex);
mutex_init(&priv->sync_cmd_mutex);
- /* Clear the driver's (not device's) station table */
- iwl_clear_stations_table(priv);
-
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3383,6 +3503,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+ priv->_agn.agg_tids_count = 0;
/* initialize force reset */
priv->force_reset[IWL_RF_RESET].reset_duration =
@@ -3396,16 +3517,10 @@ static int iwl_init_drv(struct iwl_priv *priv)
iwl_init_scan_params(priv);
- iwl_reset_qos(priv);
-
- priv->qos_data.qos_active = 0;
- priv->qos_data.qos_cap.val = 0;
-
- priv->rates_mask = IWL_RATES_MASK;
/* Set the tx_power_user_lmt to the lowest power level
* this value will get overwritten by channel max power avg
* from eeprom */
- priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
+ priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
ret = iwl_init_channel_map(priv);
if (ret) {
@@ -3433,13 +3548,10 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
iwl_calib_free_results(priv);
iwlcore_free_geos(priv);
iwl_free_channel_map(priv);
- kfree(priv->scan);
+ kfree(priv->scan_cmd);
}
static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_flags.attr,
- &dev_attr_filter_flags.attr,
- &dev_attr_statistics.attr,
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
&dev_attr_rts_ht_protection.attr,
@@ -3464,13 +3576,14 @@ static struct ieee80211_ops iwl_hw_ops = {
.configure_filter = iwl_configure_filter,
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
- .get_stats = iwl_mac_get_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
.ampdu_action = iwl_mac_ampdu_action,
.hw_scan = iwl_mac_hw_scan,
.sta_notify = iwl_mac_sta_notify,
+ .sta_add = iwlagn_mac_sta_add,
+ .sta_remove = iwl_mac_sta_remove,
};
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3574,7 +3687,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
iwl_hw_detect(priv);
- IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
+ IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
/* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -3672,6 +3785,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
+ init_completion(&priv->_agn.firmware_loading_complete);
+
err = iwl_request_firmware(priv, true);
if (err)
goto out_remove_sysfs;
@@ -3712,6 +3827,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
if (!priv)
return;
+ wait_for_completion(&priv->_agn.firmware_loading_complete);
+
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_dbgfs_unregister(priv);
@@ -3752,10 +3869,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
iwl_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
- iwl_rx_queue_free(priv, &priv->rxq);
- iwl_hw_txq_ctx_free(priv);
+ iwlagn_rx_queue_free(priv, &priv->rxq);
+ iwlagn_hw_txq_ctx_free(priv);
- iwl_clear_stations_table(priv);
iwl_eeprom_free(priv);
@@ -3870,6 +3986,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+/* 6x00 Series Gen2a */
+ {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
+
/* 6x50 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
{IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
@@ -3951,3 +4072,38 @@ module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
+module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO);
+MODULE_PARM_DESC(swcrypto50,
+ "using crypto in software (default 0 [hardware]) (deprecated)");
+module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num50,
+ iwlagn_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num50,
+ "number of hw queues in 50xx series (deprecated)");
+module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)");
+module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K50,
+ "enable 8K amsdu size in 50XX series (deprecated)");
+module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart50,
+ "restart firmware in case of error (deprecated)");
+module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+module_param_named(
+ disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
+
+module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
+ S_IRUGO);
+MODULE_PARM_DESC(ucode_alternative,
+ "specify ucode alternative to use from ucode file");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
new file mode 100644
index 000000000000..2d748053358e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -0,0 +1,181 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_agn_h__
+#define __iwl_agn_h__
+
+#include "iwl-dev.h"
+
+extern struct iwl_mod_params iwlagn_mod_params;
+extern struct iwl_hcmd_ops iwlagn_hcmd;
+extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
+
+int iwl_reset_ict(struct iwl_priv *priv);
+void iwl_disable_ict(struct iwl_priv *priv);
+int iwl_alloc_isr_ict(struct iwl_priv *priv);
+void iwl_free_isr_ict(struct iwl_priv *priv);
+irqreturn_t iwl_isr_ict(int irq, void *data);
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+
+/* tx queue */
+void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
+ int txq_id, u32 index);
+void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+ int tx_fifo, int sta_id, int tid, u16 ssn_idx);
+int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo);
+void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
+
+/* uCode */
+int iwlagn_load_ucode(struct iwl_priv *priv);
+void iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_rx_calib_complete(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_init_alive_start(struct iwl_priv *priv);
+int iwlagn_alive_notify(struct iwl_priv *priv);
+
+/* lib */
+void iwl_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status);
+void iwlagn_rx_handler_setup(struct iwl_priv *priv);
+void iwlagn_setup_deferred_work(struct iwl_priv *priv);
+int iwlagn_hw_valid_rtc_data_addr(u32 addr);
+int iwlagn_send_tx_power(struct iwl_priv *priv);
+void iwlagn_temperature(struct iwl_priv *priv);
+u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
+ size_t offset);
+void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_hw_nic_init(struct iwl_priv *priv);
+
+/* rx */
+void iwlagn_rx_queue_restock(struct iwl_priv *priv);
+void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
+void iwlagn_rx_replenish(struct iwl_priv *priv);
+void iwlagn_rx_replenish_now(struct iwl_priv *priv);
+void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_rxq_stop(struct iwl_priv *priv);
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+/* tx */
+void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int iwlagn_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv);
+int iwlagn_txq_ctx_alloc(struct iwl_priv *priv);
+void iwlagn_txq_ctx_reset(struct iwl_priv *priv);
+void iwlagn_txq_ctx_stop(struct iwl_priv *priv);
+
+static inline u32 iwl_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool iwl_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS) ||
+ (status == TX_STATUS_DIRECT_DONE);
+}
+
+/* scan */
+void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+
+#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 8b516c5ff0bb..f1fd00b1a65d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -593,7 +593,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
if (!rx_enable_time) {
- IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n");
+ IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
return;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 6383d9f8c9b3..9aab020c474b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -106,7 +106,7 @@ enum {
REPLY_TX = 0x1c,
REPLY_RATE_SCALE = 0x47, /* 3945 only */
REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
/* WiMAX coexistence */
COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
@@ -512,8 +512,9 @@ struct iwl_init_alive_resp {
*
* Entries without timestamps contain only event_id and data.
*
+ *
* 2) error_event_table_ptr indicates base of the error log. This contains
- * information about any uCode error that occurs. For 4965, the format
+ * information about any uCode error that occurs. For agn, the format
* of the error log is:
*
* __le32 valid; (nonzero) valid, (0) log is empty
@@ -529,6 +530,30 @@ struct iwl_init_alive_resp {
* __le32 bcon_time; beacon timer
* __le32 tsf_low; network timestamp function timer
* __le32 tsf_hi; network timestamp function timer
+ * __le32 gp1; GP1 timer register
+ * __le32 gp2; GP2 timer register
+ * __le32 gp3; GP3 timer register
+ * __le32 ucode_ver; uCode version
+ * __le32 hw_ver; HW Silicon version
+ * __le32 brd_ver; HW board version
+ * __le32 log_pc; log program counter
+ * __le32 frame_ptr; frame pointer
+ * __le32 stack_ptr; stack pointer
+ * __le32 hcmd; last host command
+ * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
+ * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
+ * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
+ * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
+ * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
+ * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
+ * __le32 wait_event; wait event() caller address
+ * __le32 l2p_control; L2pControlField
+ * __le32 l2p_duration; L2pDurationField
+ * __le32 l2p_mhvalid; L2pMhValidBits
+ * __le32 l2p_addr_match; L2pAddrMatchStat
+ * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
+ * __le32 u_timestamp; indicate when the date and time of the compilation
+ * __le32 reserved;
*
* The Linux driver can print both logs to the system log when a uCode error
* occurs.
@@ -1418,7 +1443,7 @@ struct iwl4965_rx_mpdu_res_start {
/* 1: Ignore Bluetooth priority for this frame.
* 0: Delay Tx until Bluetooth device is done (normal usage). */
-#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12)
+#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
/* 1: uCode overrides sequence control field in MAC header.
* 0: Driver provides sequence control field in MAC header.
@@ -1637,7 +1662,7 @@ struct iwl_tx_cmd {
struct ieee80211_hdr hdr[0];
} __attribute__ ((packed));
-/* TX command response is sent after *all* transmission attempts.
+/* TX command response is sent after *3945* transmission attempts.
*
* NOTES:
*
@@ -1665,24 +1690,65 @@ struct iwl_tx_cmd {
* control line. Receiving is still allowed in this case.
*/
enum {
+ TX_3945_STATUS_SUCCESS = 0x01,
+ TX_3945_STATUS_DIRECT_DONE = 0x02,
+ TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+ TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
+ TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
+ TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_3945_STATUS_FAIL_DEST_PS = 0x88,
+ TX_3945_STATUS_FAIL_ABORTED = 0x89,
+ TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+ TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+ TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
+ TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+/*
+ * TX command response is sent after *agn* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
TX_STATUS_SUCCESS = 0x01,
TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ TX_STATUS_POSTPONE_DELAY = 0x40,
+ TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+ TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
TX_STATUS_FAIL_LONG_LIMIT = 0x83,
TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
- TX_STATUS_FAIL_MGMNT_ABORT = 0x85,
- TX_STATUS_FAIL_NEXT_FRAG = 0x86,
+ TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
TX_STATUS_FAIL_DEST_PS = 0x88,
- TX_STATUS_FAIL_ABORTED = 0x89,
+ TX_STATUS_FAIL_HOST_ABORTED = 0x89,
TX_STATUS_FAIL_BT_RETRY = 0x8a,
TX_STATUS_FAIL_STA_INVALID = 0x8b,
TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
TX_STATUS_FAIL_TID_DISABLE = 0x8d,
- TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+ TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
- TX_STATUS_FAIL_TX_LOCKED = 0x90,
- TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+ /* uCode drop due to FW drop request */
+ TX_STATUS_FAIL_FW_DROP = 0x90,
+ /*
+ * uCode drop due to station color mismatch
+ * between tx command and station table
+ */
+ TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91,
};
#define TX_PACKET_MODE_REGULAR 0x0000
@@ -1704,30 +1770,6 @@ enum {
TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
};
-static inline u32 iwl_tx_status_to_mac80211(u32 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_SUCCESS:
- case TX_STATUS_DIRECT_DONE:
- return IEEE80211_TX_STAT_ACK;
- case TX_STATUS_FAIL_DEST_PS:
- return IEEE80211_TX_STAT_TX_FILTERED;
- default:
- return 0;
- }
-}
-
-static inline bool iwl_is_tx_success(u32 status)
-{
- status &= TX_STATUS_MSK;
- return (status == TX_STATUS_SUCCESS) ||
- (status == TX_STATUS_DIRECT_DONE);
-}
-
-
-
/* *******************************
* TX aggregation status
******************************* */
@@ -2621,10 +2663,11 @@ struct iwl_ssid_ie {
#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
-#define IWL_GOOD_CRC_TH cpu_to_le16(1)
+#define IWL_GOOD_CRC_TH_DISABLED 0
+#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
#define IWL_MAX_SCAN_SIZE 1024
#define IWL_MAX_CMD_SIZE 4096
-#define IWL_MAX_PROBE_REQUEST 200
/*
* REPLY_SCAN_CMD = 0x80 (command)
@@ -3084,6 +3127,11 @@ struct statistics_tx {
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
struct statistics_tx_non_phy_agg agg;
+ /*
+ * "tx_power" are optional parameters provided by uCode,
+ * 6000 series is the only device provide the information,
+ * Those are reserved fields for all the other devices
+ */
struct statistics_tx_power tx_power;
__le32 reserved1;
} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 049b652bcb5e..e39086821077 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -66,38 +66,7 @@ MODULE_LICENSE("GPL");
*/
static bool bt_coex_active = true;
module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
-
-static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
- {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
- 0, COEX_UNASSOC_IDLE_FLAGS},
- {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
- 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
- {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
- 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
- {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
- 0, COEX_CALIBRATION_FLAGS},
- {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
- 0, COEX_PERIODIC_CALIBRATION_FLAGS},
- {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
- 0, COEX_CONNECTION_ESTAB_FLAGS},
- {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
- 0, COEX_ASSOCIATED_IDLE_FLAGS},
- {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
- 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
- {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
- 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
- {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
- 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
- {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
- {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
- {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
- 0, COEX_STAND_ALONE_DEBUG_FLAGS},
- {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
- 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
- {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
- {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
-};
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -115,8 +84,6 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
u32 iwl_debug_level;
EXPORT_SYMBOL(iwl_debug_level);
-static irqreturn_t iwl_isr(int irq, void *data);
-
/*
* Parameter order:
* rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
@@ -143,30 +110,6 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
};
EXPORT_SYMBOL(iwl_rates);
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
-{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
-
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
-
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
@@ -198,27 +141,6 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
}
EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
-int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
- int idx = 0;
- int band_offset = 0;
-
- /* HT rate format: mac80211 wants an MCS number, which is just LSB */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
- return idx;
- /* Legacy rate format, search for match in table */
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- band_offset = IWL_FIRST_OFDM_RATE;
- for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
- if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx - band_offset;
- }
-
- return -1;
-}
-
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
{
int i;
@@ -268,74 +190,16 @@ void iwl_hw_detect(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_hw_detect);
-int iwl_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl_rx_queue_reset(priv, rxq);
-
- iwl_rx_replenish(priv);
-
- iwl_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwl_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwl_txq_ctx_reset(priv);
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_hw_nic_init);
-
/*
* QoS support
*/
-void iwl_activate_qos(struct iwl_priv *priv, u8 force)
+static void iwl_update_qos(struct iwl_priv *priv)
{
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
priv->qos_data.def_qos_parm.qos_flags = 0;
- if (priv->qos_data.qos_cap.q_AP.queue_request &&
- !priv->qos_data.qos_cap.q_AP.txop_request)
- priv->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_TXOP_TYPE_MSK;
if (priv->qos_data.qos_active)
priv->qos_data.def_qos_parm.qos_flags |=
QOS_PARAM_FLG_UPDATE_EDCA_MSK;
@@ -343,118 +207,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force)
if (priv->current_ht_config.is_ht)
priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
- if (force || iwl_is_associated(priv)) {
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- priv->qos_data.qos_active,
- priv->qos_data.def_qos_parm.qos_flags);
+ IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ priv->qos_data.qos_active,
+ priv->qos_data.def_qos_parm.qos_flags);
- iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
- sizeof(struct iwl_qosparam_cmd),
- &priv->qos_data.def_qos_parm, NULL);
- }
+ iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
+ sizeof(struct iwl_qosparam_cmd),
+ &priv->qos_data.def_qos_parm, NULL);
}
-EXPORT_SYMBOL(iwl_activate_qos);
-
-/*
- * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
- * (802.11b) (802.11a/g)
- * AC_BK 15 1023 7 0 0
- * AC_BE 15 1023 3 0 0
- * AC_VI 7 15 2 6.016ms 3.008ms
- * AC_VO 3 7 2 3.264ms 1.504ms
- */
-void iwl_reset_qos(struct iwl_priv *priv)
-{
- u16 cw_min = 15;
- u16 cw_max = 1023;
- u8 aifs = 2;
- bool is_legacy = false;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&priv->lock, flags);
- /* QoS always active in AP and ADHOC mode
- * In STA mode wait for association
- */
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
- priv->iw_mode == NL80211_IFTYPE_AP)
- priv->qos_data.qos_active = 1;
- else
- priv->qos_data.qos_active = 0;
-
- /* check for legacy mode */
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
- (priv->iw_mode == NL80211_IFTYPE_STATION &&
- (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
- cw_min = 31;
- is_legacy = 1;
- }
-
- if (priv->qos_data.qos_active)
- aifs = 3;
-
- /* AC_BE */
- priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
- priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
-
- if (priv->qos_data.qos_active) {
- /* AC_BK */
- i = 1;
- priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
-
- /* AC_VI */
- i = 2;
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16((cw_min + 1) / 2 - 1);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
- if (is_legacy)
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(6016);
- else
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(3008);
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
-
- /* AC_VO */
- i = 3;
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16((cw_min + 1) / 4 - 1);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16((cw_min + 1) / 2 - 1);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
- if (is_legacy)
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(3264);
- else
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(1504);
- } else {
- for (i = 1; i < 4; i++) {
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
- }
- }
- IWL_DEBUG_QOS(priv, "set QoS to default \n");
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_reset_qos);
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -721,7 +481,7 @@ static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
return new_val;
}
-void iwl_setup_rxon_timing(struct iwl_priv *priv)
+void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
u64 tsf;
s32 interval_tm, rem;
@@ -735,15 +495,14 @@ void iwl_setup_rxon_timing(struct iwl_priv *priv)
priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- beacon_int = priv->beacon_int;
- priv->rxon_timing.atim_window = 0;
- } else {
- beacon_int = priv->vif->bss_conf.beacon_int;
+ beacon_int = vif->bss_conf.beacon_int;
+ if (vif->type == NL80211_IFTYPE_ADHOC) {
/* TODO: we need to get atim_window from upper stack
* for now we set to 0 */
priv->rxon_timing.atim_window = 0;
+ } else {
+ priv->rxon_timing.atim_window = 0;
}
beacon_int = iwl_adjust_beacon_interval(beacon_int,
@@ -903,23 +662,10 @@ EXPORT_SYMBOL(iwl_full_rxon_required);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
{
- int i;
- int rate_mask;
-
- /* Set rate mask*/
- if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
- rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
- else
- rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
-
- /* Find lowest valid rate */
- for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
- i = iwl_rates[i].next_ieee) {
- if (rate_mask & (1 << i))
- return iwl_rates[i].plcp;
- }
-
- /* No valid rate was found. Assign the lowest one */
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
return IWL_RATE_1M_PLCP;
else
@@ -1051,19 +797,6 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
}
/**
- * iwl_is_monitor_mode - Determine if interface in monitor mode
- *
- * priv->iw_mode is set in add_interface, but add_interface is
- * never called for monitor mode. The only way mac80211 informs us about
- * monitor mode is through configuring filters (call to configure_filter).
- */
-bool iwl_is_monitor_mode(struct iwl_priv *priv)
-{
- return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
-}
-EXPORT_SYMBOL(iwl_is_monitor_mode);
-
-/**
* iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
*
* Selects how many and which Rx receivers/antennas/chains to use.
@@ -1106,19 +839,6 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
- /* copied from 'iwl_bg_request_scan()' */
- /* Force use of chains B and C (0x6) for Rx for 4965
- * Avoid A (0x1) because of its off-channel reception on A-band.
- * MIMO is not used here, but value is required */
- if (iwl_is_monitor_mode(priv) &&
- !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
- rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- }
-
priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
@@ -1174,8 +894,9 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
}
EXPORT_SYMBOL(iwl_set_rxon_channel);
-void iwl_set_flags_for_band(struct iwl_priv *priv,
- enum ieee80211_band band)
+static void iwl_set_flags_for_band(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
{
if (band == IEEE80211_BAND_5GHZ) {
priv->staging_rxon.flags &=
@@ -1184,12 +905,12 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
} else {
/* Copied from iwl_post_associate() */
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif && vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif && vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -1201,13 +922,18 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
/*
* initialize rxon structure with default values from eeprom
*/
-void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
const struct iwl_channel_info *ch_info;
+ enum nl80211_iftype type = NL80211_IFTYPE_STATION;
+
+ if (vif)
+ type = vif->type;
memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
- switch (mode) {
+ switch (type) {
case NL80211_IFTYPE_AP:
priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
break;
@@ -1225,7 +951,7 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
break;
default:
- IWL_ERR(priv, "Unsupported interface type %d\n", mode);
+ IWL_ERR(priv, "Unsupported interface type %d\n", type);
break;
}
@@ -1244,18 +970,10 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
if (!ch_info)
ch_info = &priv->channel_info[0];
- /*
- * in some case A channels are all non IBSS
- * in this case force B/G channel
- */
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !(is_channel_ibss(ch_info)))
- ch_info = &priv->channel_info[0];
-
priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
priv->band = ch_info->band;
- iwl_set_flags_for_band(priv, priv->band);
+ iwl_set_flags_for_band(priv, priv->band, vif);
priv->staging_rxon.ofdm_basic_rates =
(IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -1286,7 +1004,6 @@ static void iwl_set_rate(struct iwl_priv *priv)
}
priv->active_rate = 0;
- priv->active_rate_basic = 0;
for (i = 0; i < hw->n_bitrates; i++) {
rate = &(hw->bitrates[i]);
@@ -1294,30 +1011,13 @@ static void iwl_set_rate(struct iwl_priv *priv)
priv->active_rate |= (1 << rate->hw_value);
}
- IWL_DEBUG_RATE(priv, "Set active_rate = %0x, active_rate_basic = %0x\n",
- priv->active_rate, priv->active_rate_basic);
+ IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
- /*
- * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
- * otherwise set it to the default of all CCK rates and 6, 12, 24 for
- * OFDM
- */
- if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
- priv->staging_rxon.cck_basic_rates =
- ((priv->active_rate_basic &
- IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
- else
- priv->staging_rxon.cck_basic_rates =
- (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
- priv->staging_rxon.ofdm_basic_rates =
- ((priv->active_rate_basic &
- (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
- IWL_FIRST_OFDM_RATE) & 0xFF;
- else
- priv->staging_rxon.ofdm_basic_rates =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+ priv->staging_rxon.cck_basic_rates =
+ (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+ priv->staging_rxon.ofdm_basic_rates =
+ (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
}
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
@@ -1374,6 +1074,9 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_ERR(priv, "Loaded firmware version: %s\n",
+ priv->hw->wiphy->fw_version);
+
priv->cfg->ops->lib->dump_nic_error_log(priv);
if (priv->cfg->ops->lib->dump_csr)
priv->cfg->ops->lib->dump_csr(priv);
@@ -1401,7 +1104,7 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_irq_handle_error);
-int iwl_apm_stop_master(struct iwl_priv *priv)
+static int iwl_apm_stop_master(struct iwl_priv *priv)
{
int ret = 0;
@@ -1417,7 +1120,6 @@ int iwl_apm_stop_master(struct iwl_priv *priv)
return ret;
}
-EXPORT_SYMBOL(iwl_apm_stop_master);
void iwl_apm_stop(struct iwl_priv *priv)
{
@@ -1561,41 +1263,33 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct iwl_priv *priv = hw->priv;
- __le32 *filter_flags = &priv->staging_rxon.filter_flags;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
changed_flags, *total_flags);
- if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
- if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
- *filter_flags |= RXON_FILTER_PROMISC_MSK;
- else
- *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
- }
- if (changed_flags & FIF_ALLMULTI) {
- if (*total_flags & FIF_ALLMULTI)
- *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
- else
- *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
- }
- if (changed_flags & FIF_CONTROL) {
- if (*total_flags & FIF_CONTROL)
- *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
- else
- *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
- }
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
- *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
- else
- *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
- }
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_ALLMULTI, RXON_FILTER_ACCEPT_GRP_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
- /* We avoid iwl_commit_rxon here to commit the new filter flags
- * since mac80211 will call ieee80211_hw_config immediately.
- * (mc_list is not supported at this time). Otherwise, we need to
- * queue a background iwl_commit_rxon work.
- */
+#undef CHK
+
+ mutex_lock(&priv->mutex);
+
+ priv->staging_rxon.filter_flags &= ~filter_nand;
+ priv->staging_rxon.filter_flags |= filter_or;
+
+ iwlcore_commit_rxon(priv);
+
+ mutex_unlock(&priv->mutex);
*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
@@ -1626,10 +1320,11 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
int ret = 0;
s8 prev_tx_power = priv->tx_power_user_lmt;
- if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
- IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n",
+ if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
+ IWL_WARN(priv,
+ "Requested user TXPOWER %d below lower limit %d.\n",
tx_power,
- IWL_TX_POWER_TARGET_POWER_MIN);
+ IWLAGN_TX_POWER_TARGET_POWER_MIN);
return -EINVAL;
}
@@ -1668,286 +1363,16 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
}
EXPORT_SYMBOL(iwl_set_tx_power);
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
-
-/* Free dram table */
-void iwl_free_isr_ict(struct iwl_priv *priv)
-{
- if (priv->ict_tbl_vir) {
- dma_free_coherent(&priv->pci_dev->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- priv->ict_tbl_vir, priv->ict_tbl_dma);
- priv->ict_tbl_vir = NULL;
- }
-}
-EXPORT_SYMBOL(iwl_free_isr_ict);
-
-
-/* allocate dram shared table it is a PAGE_SIZE aligned
- * also reset all data related to ICT table interrupt.
- */
-int iwl_alloc_isr_ict(struct iwl_priv *priv)
-{
-
- if (priv->cfg->use_isr_legacy)
- return 0;
- /* allocate shrared data table */
- priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &priv->ict_tbl_dma, GFP_KERNEL);
- if (!priv->ict_tbl_vir)
- return -ENOMEM;
-
- /* align table to PAGE_SIZE boundry */
- priv->aligned_ict_tbl_dma = ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)priv->ict_tbl_dma,
- (unsigned long long)priv->aligned_ict_tbl_dma,
- (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
-
- priv->ict_tbl = priv->ict_tbl_vir +
- (priv->aligned_ict_tbl_dma - priv->ict_tbl_dma);
-
- IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
- priv->ict_tbl, priv->ict_tbl_vir,
- (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
-
- /* reset table and index to all 0 */
- memset(priv->ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
- priv->ict_index = 0;
-
- /* add periodic RX interrupt */
- priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
- return 0;
-}
-EXPORT_SYMBOL(iwl_alloc_isr_ict);
-
-/* Device is going up inform it about using ICT interrupt table,
- * also we need to tell the driver to start using ICT interrupt.
- */
-int iwl_reset_ict(struct iwl_priv *priv)
-{
- u32 val;
- unsigned long flags;
-
- if (!priv->ict_tbl_vir)
- return 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
-
- memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
-
- val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
-
- val |= CSR_DRAM_INT_TBL_ENABLE;
- val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
-
- IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val, (unsigned long long)priv->aligned_ict_tbl_dma);
-
- iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
- priv->use_ict = true;
- priv->ict_index = 0;
- iwl_write32(priv, CSR_INT, priv->inta_mask);
- iwl_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_reset_ict);
-
-/* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->use_ict = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_disable_ict);
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_isr_ict(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 val = 0;
-
- if (!priv)
- return IRQ_NONE;
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (!priv->use_ict)
- return iwl_isr(irq, data);
-
- spin_lock(&priv->lock);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here.
- */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!priv->ict_tbl[priv->ict_index]) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /* read all entries that not 0 start with ict_index */
- while (priv->ict_tbl[priv->ict_index]) {
-
- val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
- IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
- priv->ict_index,
- le32_to_cpu(priv->ict_tbl[priv->ict_index]));
- priv->ict_tbl[priv->ict_index] = 0;
- priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
- ICT_COUNT);
-
- }
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
-
- inta &= priv->inta_mask;
- priv->inta |= inta;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(priv);
- }
-
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock(&priv->lock);
- return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_isr_ict);
-
-
-static irqreturn_t iwl_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_fh;
-#endif
- if (!priv)
- return IRQ_NONE;
-
- spin_lock(&priv->lock);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
- "fh 0x%08x\n", inta, inta_mask, inta_fh);
- }
-#endif
-
- priv->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- unplugged:
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if diabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock(&priv->lock);
- return IRQ_NONE;
-}
-
irqreturn_t iwl_isr_legacy(int irq, void *data)
{
struct iwl_priv *priv = data;
u32 inta, inta_mask;
u32 inta_fh;
+ unsigned long flags;
if (!priv)
return IRQ_NONE;
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1985,7 +1410,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
tasklet_schedule(&priv->irq_tasklet);
unplugged:
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_HANDLED;
none:
@@ -1993,12 +1418,12 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
/* only Re-enable if diabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_NONE;
}
EXPORT_SYMBOL(iwl_isr_legacy);
-int iwl_send_bt_config(struct iwl_priv *priv)
+void iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
.lead_time = BT_LEAD_TIME_DEF,
@@ -2015,8 +1440,9 @@ int iwl_send_bt_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
- return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(struct iwl_bt_cmd), &bt_cmd);
+ if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(struct iwl_bt_cmd), &bt_cmd))
+ IWL_ERR(priv, "failed to send BT Coex Config\n");
}
EXPORT_SYMBOL(iwl_send_bt_config);
@@ -2306,12 +1732,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
cpu_to_le16((params->txop * 32));
priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- priv->qos_data.qos_active = 1;
-
- if (priv->iw_mode == NL80211_IFTYPE_AP)
- iwl_activate_qos(priv, 1);
- else if (priv->assoc_id && iwl_is_associated(priv))
- iwl_activate_qos(priv, 0);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2321,12 +1741,13 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
EXPORT_SYMBOL(iwl_mac_conf_tx);
static void iwl_ht_conf(struct iwl_priv *priv,
- struct ieee80211_bss_conf *bss_conf)
+ struct ieee80211_vif *vif)
{
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- IWL_DEBUG_MAC80211(priv, "enter: \n");
+ IWL_DEBUG_MAC80211(priv, "enter:\n");
if (!ht_conf->is_ht)
return;
@@ -2338,10 +1759,10 @@ static void iwl_ht_conf(struct iwl_priv *priv,
ht_conf->single_chain_sufficient = false;
- switch (priv->iw_mode) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, priv->bssid);
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (sta) {
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
int maxstreams;
@@ -2379,7 +1800,6 @@ static void iwl_ht_conf(struct iwl_priv *priv,
static inline void iwl_set_no_assoc(struct iwl_priv *priv)
{
- priv->assoc_id = 0;
iwl_led_disassociate(priv);
/*
* inform the ucode that there is no longer an
@@ -2392,7 +1812,6 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
}
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -2408,14 +1827,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (changes & BSS_CHANGED_BEACON &&
- priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
dev_kfree_skb(priv->ibss_beacon);
priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
}
if (changes & BSS_CHANGED_BEACON_INT) {
- priv->beacon_int = bss_conf->beacon_int;
/* TODO: in AP mode, do something to make this take effect */
}
@@ -2435,8 +1852,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
/* mac80211 only sets assoc when in STATION mode */
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
- bss_conf->assoc) {
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
memcpy(priv->staging_rxon.bssid_addr,
bss_conf->bssid, ETH_ALEN);
@@ -2454,7 +1870,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
* mac80211 decides to do both changes at once because
* it will invoke post_associate.
*/
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
@@ -2497,7 +1913,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
if (changes & BSS_CHANGED_HT) {
- iwl_ht_conf(priv, bss_conf);
+ iwl_ht_conf(priv, vif);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
@@ -2506,28 +1922,17 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
if (bss_conf->assoc) {
- priv->assoc_id = bss_conf->aid;
- priv->beacon_int = bss_conf->beacon_int;
priv->timestamp = bss_conf->timestamp;
- priv->assoc_capability = bss_conf->assoc_capability;
iwl_led_associate(priv);
- /*
- * We have just associated, don't start scan too early
- * leave time for EAPOL exchange to complete.
- *
- * XXX: do this in mac80211
- */
- priv->next_scan_jiffies = jiffies +
- IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
if (!iwl_is_rfkill(priv))
- priv->cfg->ops->lib->post_associate(priv);
+ priv->cfg->ops->lib->post_associate(priv, vif);
} else
iwl_set_no_assoc(priv);
}
- if (changes && iwl_is_associated(priv) && priv->assoc_id) {
+ if (changes && iwl_is_associated(priv) && bss_conf->aid) {
IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
changes);
ret = iwl_send_rxon_assoc(priv);
@@ -2544,11 +1949,20 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
memcpy(priv->staging_rxon.bssid_addr,
bss_conf->bssid, ETH_ALEN);
memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwlcore_config_ap(priv);
+ iwlcore_config_ap(priv, vif);
} else
iwl_set_no_assoc(priv);
}
+ if (changes & BSS_CHANGED_IBSS) {
+ ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif,
+ bss_conf->ibss_joined);
+ if (ret)
+ IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2568,11 +1982,6 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
return -EIO;
}
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
- return -EIO;
- }
-
spin_lock_irqsave(&priv->lock, flags);
if (priv->ibss_beacon)
@@ -2580,59 +1989,31 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
priv->ibss_beacon = skb;
- priv->assoc_id = 0;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
priv->timestamp = le64_to_cpu(timestamp);
IWL_DEBUG_MAC80211(priv, "leave\n");
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_reset_qos(priv);
-
- priv->cfg->ops->lib->post_associate(priv);
-
+ priv->cfg->ops->lib->post_associate(priv, priv->vif);
return 0;
}
EXPORT_SYMBOL(iwl_mac_beacon_update);
-int iwl_set_mode(struct iwl_priv *priv, int mode)
+static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
- if (mode == NL80211_IFTYPE_ADHOC) {
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_get_channel_info(priv,
- priv->band,
- le16_to_cpu(priv->staging_rxon.channel));
-
- if (!ch_info || !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d not IBSS channel\n",
- le16_to_cpu(priv->staging_rxon.channel));
- return -EINVAL;
- }
- }
-
- iwl_connection_init_rx_config(priv, mode);
+ iwl_connection_init_rx_config(priv, vif);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
- iwl_clear_stations_table(priv);
-
- /* dont commit rxon if rf-kill is on*/
- if (!iwl_is_ready_rf(priv))
- return -EAGAIN;
-
- iwlcore_commit_rxon(priv);
-
- return 0;
+ return iwlcore_commit_rxon(priv);
}
-EXPORT_SYMBOL(iwl_set_mode);
-int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
int err = 0;
@@ -2641,6 +2022,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
+ if (WARN_ON(!iwl_is_ready_rf(priv))) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (priv->vif) {
IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
err = -EOPNOTSUPP;
@@ -2650,15 +2036,18 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
priv->vif = vif;
priv->iw_mode = vif->type;
- if (vif->addr) {
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
- memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
- }
+ IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
+
+ err = iwl_set_mode(priv, vif);
+ if (err)
+ goto out_err;
- if (iwl_set_mode(priv, vif->type) == -EAGAIN)
- /* we are not ready, will run again when ready */
- set_bit(STATUS_MODE_PENDING, &priv->status);
+ goto out;
+ out_err:
+ priv->vif = NULL;
+ priv->iw_mode = NL80211_IFTYPE_STATION;
out:
mutex_unlock(&priv->mutex);
@@ -2668,7 +2057,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
EXPORT_SYMBOL(iwl_mac_add_interface);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -2694,10 +2083,6 @@ EXPORT_SYMBOL(iwl_mac_remove_interface);
/**
* iwl_mac_config - mac80211 config callback
- *
- * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
- * be set inappropriately and the driver currently sets the hardware up to
- * use it whenever needed.
*/
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
{
@@ -2752,15 +2137,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
goto set_ch_out;
}
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d in band %d not "
- "IBSS channel\n",
- conf->channel->hw_value, conf->channel->band);
- ret = -EINVAL;
- goto set_ch_out;
- }
-
spin_lock_irqsave(&priv->lock, flags);
/* Configure HT40 channels */
@@ -2794,7 +2170,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_rxon_channel(priv, conf->channel);
iwl_set_rxon_ht(priv, ht_conf);
- iwl_set_flags_for_band(priv, conf->channel->band);
+ iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
spin_unlock_irqrestore(&priv->lock, flags);
if (iwl_is_associated(priv) &&
(le16_to_cpu(priv->active_rxon.channel) != ch) &&
@@ -2833,6 +2209,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_tx_power(priv, conf->power_level, false);
}
+ if (changed & IEEE80211_CONF_CHANGE_QOS) {
+ bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->qos_data.qos_active = qos_active;
+ iwl_update_qos(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
@@ -2867,12 +2252,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_reset_qos(priv);
-
spin_lock_irqsave(&priv->lock, flags);
- priv->assoc_id = 0;
- priv->assoc_capability = 0;
- priv->assoc_station_added = 0;
/* new association get rid of ibss beacon skb */
if (priv->ibss_beacon)
@@ -2880,10 +2260,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
priv->ibss_beacon = NULL;
- priv->beacon_int = priv->vif->bss_conf.beacon_int;
priv->timestamp = 0;
- if ((priv->iw_mode == NL80211_IFTYPE_STATION))
- priv->beacon_int = 0;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2896,17 +2273,9 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
/* we are restarting association process
* clear RXON_FILTER_ASSOC_MSK bit
*/
- if (priv->iw_mode != NL80211_IFTYPE_AP) {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv);
- }
-
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
- IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
- mutex_unlock(&priv->mutex);
- return;
- }
+ iwl_scan_cancel_timeout(priv, 100);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwlcore_commit_rxon(priv);
iwl_set_rate(priv);
@@ -2923,7 +2292,7 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
GFP_KERNEL);
if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq \n");
+ IWL_ERR(priv, "Not enough memory for txq\n");
return -ENOMEM;
}
return 0;
@@ -2937,34 +2306,6 @@ void iwl_free_txq_mem(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_free_txq_mem);
-int iwl_send_wimax_coex(struct iwl_priv *priv)
-{
- struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
-
- if (priv->cfg->support_wimax_coexist) {
- /* UnMask wake up src at associated sleep */
- coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
-
- /* UnMask wake up src at unassociated sleep */
- coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
- memcpy(coex_cmd.sta_prio, cu_priorities,
- sizeof(struct iwl_wimax_coex_event_entry) *
- COEX_NUM_OF_EVENTS);
-
- /* enabling the coexistence feature */
- coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
-
- /* enabling the priorities tables */
- coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
- } else {
- /* coexistence is disabled */
- memset(&coex_cmd, 0, sizeof(coex_cmd));
- }
- return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
- sizeof(coex_cmd), &coex_cmd);
-}
-EXPORT_SYMBOL(iwl_send_wimax_coex);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
@@ -3403,6 +2744,99 @@ int iwl_force_reset(struct iwl_priv *priv, int mode)
}
return 0;
}
+EXPORT_SYMBOL(iwl_force_reset);
+
+/**
+ * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
+ *
+ * During normal condition (no queue is stuck), the timer is continually set to
+ * execute every monitor_recover_period milliseconds after the last timer
+ * expired. When the queue read_ptr is at the same place, the timer is
+ * shorten to 100mSecs. This is
+ * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
+ * 2) to detect the stuck queues quicker before the station and AP can
+ * disassociate each other.
+ *
+ * This function monitors all the tx queues and recover from it if any
+ * of the queues are stuck.
+ * 1. It first check the cmd queue for stuck conditions. If it is stuck,
+ * it will recover by resetting the firmware and return.
+ * 2. Then, it checks for station association. If it associates it will check
+ * other queues. If any queue is stuck, it will recover by resetting
+ * the firmware.
+ * Note: It the number of times the queue read_ptr to be at the same place to
+ * be MAX_REPEAT+1 in order to consider to be stuck.
+ */
+/*
+ * The maximum number of times the read pointer of the tx queue at the
+ * same place without considering to be stuck.
+ */
+#define MAX_REPEAT (2)
+static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
+{
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+
+ txq = &priv->txq[cnt];
+ q = &txq->q;
+ /* queue is empty, skip */
+ if (q->read_ptr != q->write_ptr) {
+ if (q->read_ptr == q->last_read_ptr) {
+ /* a queue has not been read from last time */
+ if (q->repeat_same_read_ptr > MAX_REPEAT) {
+ IWL_ERR(priv,
+ "queue %d stuck %d time. Fw reload.\n",
+ q->id, q->repeat_same_read_ptr);
+ q->repeat_same_read_ptr = 0;
+ iwl_force_reset(priv, IWL_FW_RESET);
+ } else {
+ q->repeat_same_read_ptr++;
+ IWL_DEBUG_RADIO(priv,
+ "queue %d, not read %d time\n",
+ q->id,
+ q->repeat_same_read_ptr);
+ mod_timer(&priv->monitor_recover, jiffies +
+ msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
+ }
+ return 1;
+ } else {
+ q->last_read_ptr = q->read_ptr;
+ q->repeat_same_read_ptr = 0;
+ }
+ }
+ return 0;
+}
+
+void iwl_bg_monitor_recover(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+ int cnt;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (iwl_is_associated(priv)) {
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == IWL_CMD_QUEUE_NUM)
+ continue;
+ if (iwl_check_stuck_queue(priv, cnt))
+ return;
+ }
+ }
+ /*
+ * Reschedule the timer to occur in
+ * priv->cfg->monitor_recover_period
+ */
+ mod_timer(&priv->monitor_recover,
+ jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
+}
+EXPORT_SYMBOL(iwl_bg_monitor_recover);
#ifdef CONFIG_PM
@@ -3432,6 +2866,12 @@ int iwl_pci_resume(struct pci_dev *pdev)
struct iwl_priv *priv = pci_get_drvdata(pdev);
int ret;
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
pci_set_power_state(pdev, PCI_D0);
ret = pci_enable_device(pdev);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 36940a9ec6b9..7e5a5ba41fd2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -90,6 +90,7 @@ struct iwl_hcmd_ops {
int (*commit_rxon)(struct iwl_priv *priv);
void (*set_rxon_chain)(struct iwl_priv *priv);
int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
+ void (*send_bt_config)(struct iwl_priv *priv);
};
struct iwl_hcmd_utils_ops {
@@ -105,6 +106,7 @@ struct iwl_hcmd_utils_ops {
__le32 *tx_flags);
int (*calc_rssi)(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp);
+ void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
};
struct iwl_apm_ops {
@@ -114,23 +116,21 @@ struct iwl_apm_ops {
int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
};
+struct iwl_debugfs_ops {
+ ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+};
+
struct iwl_temp_ops {
void (*temperature)(struct iwl_priv *priv);
void (*set_ct_kill)(struct iwl_priv *priv);
void (*set_calib_version)(struct iwl_priv *priv);
};
-struct iwl_ucode_ops {
- u32 (*get_header_size)(u32);
- u32 (*get_build)(const struct iwl_ucode_header *, u32);
- u32 (*get_inst_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_data_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_init_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_init_data_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_boot_size)(const struct iwl_ucode_header *, u32);
- u8 * (*get_data)(const struct iwl_ucode_header *, u32);
-};
-
struct iwl_lib_ops {
/* set hw dependent parameters */
int (*set_hw_params)(struct iwl_priv *priv);
@@ -180,8 +180,9 @@ struct iwl_lib_ops {
/* power */
int (*send_tx_power) (struct iwl_priv *priv);
void (*update_chain_flags)(struct iwl_priv *priv);
- void (*post_associate) (struct iwl_priv *priv);
- void (*config_ap) (struct iwl_priv *priv);
+ void (*post_associate)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
+ void (*config_ap)(struct iwl_priv *priv, struct ieee80211_vif *vif);
irqreturn_t (*isr) (int irq, void *data);
/* eeprom operations (as defined in iwl-eeprom.h) */
@@ -190,7 +191,17 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
/* station management */
- void (*add_bcast_station)(struct iwl_priv *priv);
+ int (*manage_ibss_station)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+ /* recover from tx queue stall */
+ void (*recover_from_tx_stall)(unsigned long data);
+ /* check for plcp health */
+ bool (*check_plcp_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+ /* check for ack health */
+ bool (*check_ack_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+ struct iwl_debugfs_ops debugfs_ops;
};
struct iwl_led_ops {
@@ -200,7 +211,6 @@ struct iwl_led_ops {
};
struct iwl_ops {
- const struct iwl_ucode_ops *ucode;
const struct iwl_lib_ops *lib;
const struct iwl_hcmd_ops *hcmd;
const struct iwl_hcmd_utils_ops *utils;
@@ -237,6 +247,18 @@ struct iwl_mod_params {
* @support_wimax_coexist: support wimax/wifi co-exist
* @plcp_delta_threshold: plcp error rate threshold used to trigger
* radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @monitor_recover_period: default timer used to check stuck queues
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @tx_power_by_driver: tx power calibration performed by driver
+ * instead of uCode
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ * @scan_antennas: available antenna for scan operation
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -295,6 +317,15 @@ struct iwl_cfg {
const bool support_wimax_coexist;
u8 plcp_delta_threshold;
s32 chain_noise_scale;
+ /* timer period for monitor the driver queues */
+ u32 monitor_recover_period;
+ bool temperature_kelvin;
+ u32 max_event_log_size;
+ const bool tx_power_by_driver;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+ u8 scan_antennas[IEEE80211_NUM_BANDS];
};
/***************************
@@ -304,8 +335,7 @@ struct iwl_cfg {
struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
struct ieee80211_ops *hw_ops);
void iwl_hw_detect(struct iwl_priv *priv);
-void iwl_reset_qos(struct iwl_priv *priv);
-void iwl_activate_qos(struct iwl_priv *priv, u8 force);
+void iwl_activate_qos(struct iwl_priv *priv);
int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
@@ -316,8 +346,8 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *sta_ht_inf);
-void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band);
-void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode);
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
@@ -326,29 +356,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv);
void iwl_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
-int iwl_hw_nic_init(struct iwl_priv *priv);
int iwl_set_hw_params(struct iwl_priv *priv);
-bool iwl_is_monitor_mode(struct iwl_priv *priv);
-void iwl_post_associate(struct iwl_priv *priv);
+void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwl_commit_rxon(struct iwl_priv *priv);
-int iwl_set_mode(struct iwl_priv *priv, int mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_config_ap(struct iwl_priv *priv);
+void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
__le32 *tx_flags);
-int iwl_send_wimax_coex(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -411,26 +437,24 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
/*****************************************************
* RX
******************************************************/
-void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
-void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-void iwl_rx_replenish(struct iwl_priv *priv);
-void iwl_rx_replenish_now(struct iwl_priv *priv);
-int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-void iwl_rx_queue_restock(struct iwl_priv *priv);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+void iwl_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
@@ -442,14 +466,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* TX
******************************************************/
-int iwl_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl_txq_ctx_reset(struct iwl_priv *priv);
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
@@ -460,9 +480,6 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
-int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
-int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
-int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
/*****************************************************
* TX power
****************************************************/
@@ -472,10 +489,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
* Rate
******************************************************************************/
-void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
-int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
@@ -505,7 +519,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+int iwl_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void iwl_bg_start_internal_scan(struct work_struct *work);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -515,7 +532,8 @@ u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
u8 n_probes);
u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band);
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif);
void iwl_bg_scan_check(struct work_struct *data);
void iwl_bg_abort_scan(struct work_struct *work);
void iwl_bg_scan_completed(struct work_struct *work);
@@ -530,6 +548,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
/*******************************************************************************
* Calibrations - implemented in iwl-calib.c
@@ -563,11 +582,6 @@ int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
* PCI *
*****************************************************/
irqreturn_t iwl_isr_legacy(int irq, void *data);
-int iwl_reset_ict(struct iwl_priv *priv);
-void iwl_disable_ict(struct iwl_priv *priv);
-int iwl_alloc_isr_ict(struct iwl_priv *priv);
-void iwl_free_isr_ict(struct iwl_priv *priv);
-irqreturn_t iwl_isr_ict(int irq, void *data);
static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
{
@@ -577,6 +591,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
return pci_lnk_ctl;
}
+
+void iwl_bg_monitor_recover(unsigned long data);
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
int iwl_pci_resume(struct pci_dev *pdev);
@@ -625,7 +642,6 @@ void iwlcore_free_geos(struct iwl_priv *priv);
#define STATUS_SCAN_HW 15
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
-#define STATUS_MODE_PENDING 18
static inline int iwl_is_ready(struct iwl_priv *priv)
@@ -672,23 +688,16 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
}
extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
-extern int iwl_send_bt_config(struct iwl_priv *priv);
+extern void iwl_send_bt_config(struct iwl_priv *priv);
extern int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
extern int iwl_verify_ucode(struct iwl_priv *priv);
extern int iwl_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq, u8 flags);
-extern void iwl_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init);
void iwl_apm_stop(struct iwl_priv *priv);
-int iwl_apm_stop_master(struct iwl_priv *priv);
int iwl_apm_init(struct iwl_priv *priv);
-void iwl_setup_rxon_timing(struct iwl_priv *priv);
+void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif);
static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
{
return priv->cfg->ops->hcmd->rxon_assoc(priv);
@@ -697,9 +706,10 @@ static inline int iwlcore_commit_rxon(struct iwl_priv *priv)
{
return priv->cfg->ops->hcmd->commit_rxon(priv);
}
-static inline void iwlcore_config_ap(struct iwl_priv *priv)
+static inline void iwlcore_config_ap(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
- priv->cfg->ops->lib->config_ap(priv);
+ priv->cfg->ops->lib->config_ap(priv, vif);
}
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
struct iwl_priv *priv, enum ieee80211_band band)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 808b7146bead..254c35ae8b38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -298,6 +298,7 @@
#define CSR_HW_REV_TYPE_1000 (0x0000060)
#define CSR_HW_REV_TYPE_6x00 (0x0000070)
#define CSR_HW_REV_TYPE_6x50 (0x0000080)
+#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
#define CSR_HW_REV_TYPE_NONE (0x00000F0)
/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 1c7b53d511c7..5c2bcef5df0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -78,6 +78,8 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
void iwl_dbgfs_unregister(struct iwl_priv *priv);
+extern int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
+ int bufsz);
#else
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index b6e1b0ebe230..9659c5d01df9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -106,6 +106,26 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
.open = iwl_dbgfs_open_file_generic, \
};
+int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(priv->statistics.flag));
+ if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_FREQUENCY_MSK)
+ ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_NARROW_BAND_MSK)
+ ? "enabled" : "disabled");
+ return p;
+}
+EXPORT_SYMBOL(iwl_dbgfs_statistics_flag);
static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
@@ -561,8 +581,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
test_bit(STATUS_POWER_PMI, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
test_bit(STATUS_FW_ERROR, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n",
- test_bit(STATUS_MODE_PENDING, &priv->status));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -661,7 +679,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
for (i = 0; i < AC_NUM; i++) {
pos += scnprintf(buf + pos, bufsz - pos,
@@ -673,8 +690,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
priv->qos_data.def_qos_parm.ac[i].aifsn,
priv->qos_data.def_qos_parm.ac[i].edca_txop);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
@@ -684,7 +700,6 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"allow blinking: %s\n",
@@ -698,8 +713,7 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
priv->last_blink_time);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
@@ -712,7 +726,6 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"Thermal Throttling Mode: %s\n",
@@ -732,8 +745,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
"HT mode: %d\n",
restriction->is_ht);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
@@ -770,13 +782,11 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"11n 40MHz Mode: %s\n",
priv->disable_ht40 ? "Disabled" : "Enabled");
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
@@ -1044,474 +1054,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
- int bufsz)
-{
- int p = 0;
-
- p += scnprintf(buf + p, bufsz - p,
- "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->statistics.flag));
- if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p,
- "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
- UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p,
- "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
- UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
- return p;
-}
-
-static const char ucode_stats_header[] =
- "%-32s current acumulative delta max\n";
-static const char ucode_stats_short_format[] =
- " %-30s %10u\n";
-static const char ucode_stats_format[] =
- " %-30s %10u %10u %10u %10u\n";
-
static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 40 +
- sizeof(struct statistics_rx_non_phy) * 40 +
- sizeof(struct statistics_rx_ht_phy) * 40 + 400;
- ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
- struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_non_phy *delta_general, *max_general;
- struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->statistics.rx.ofdm;
- cck = &priv->statistics.rx.cck;
- general = &priv->statistics.rx.general;
- ht = &priv->statistics.rx.ofdm_ht;
- accum_ofdm = &priv->accum_statistics.rx.ofdm;
- accum_cck = &priv->accum_statistics.rx.cck;
- accum_general = &priv->accum_statistics.rx.general;
- accum_ht = &priv->accum_statistics.rx.ofdm_ht;
- delta_ofdm = &priv->delta_statistics.rx.ofdm;
- delta_cck = &priv->delta_statistics.rx.cck;
- delta_general = &priv->delta_statistics.rx.general;
- delta_ht = &priv->delta_statistics.rx.ofdm_ht;
- max_ofdm = &priv->max_delta.rx.ofdm;
- max_cck = &priv->max_delta.rx.cck;
- max_general = &priv->max_delta.rx.general;
- max_ht = &priv->max_delta.rx.ofdm_ht;
-
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err,
- delta_ofdm->overrun_err, max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good,
- delta_ofdm->crc32_good, max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout,
- delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout,
- delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt,
- delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt,
- delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ba_rsp_cnt:",
- le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt,
- delta_ofdm->sent_ba_rsp_cnt,
- max_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_self_kill:",
- le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill,
- delta_ofdm->dsp_self_kill,
- max_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err,
- delta_ofdm->mh_format_err,
- max_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "re_acq_main_rssi_sum:",
- le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum,
- delta_ofdm->re_acq_main_rssi_sum,
- max_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err,
- delta_cck->overrun_err, max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good,
- max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout,
- delta_cck->sfd_timeout, max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout,
- delta_cck->fina_timeout, max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts,
- delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt,
- delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt,
- delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ba_rsp_cnt:",
- le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt,
- delta_cck->sent_ba_rsp_cnt,
- max_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_self_kill:",
- le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill,
- delta_cck->dsp_self_kill,
- max_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err,
- delta_cck->mh_format_err, max_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "re_acq_main_rssi_sum:",
- le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum,
- delta_cck->re_acq_main_rssi_sum,
- max_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts,
- delta_general->bogus_cts, max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack,
- delta_general->bogus_ack, max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "channel_beacons:",
- le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons,
- delta_general->channel_beacons,
- max_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "num_missed_bcon:",
- le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon,
- delta_general->num_missed_bcon,
- max_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "adc_rx_saturation_time:",
- le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time,
- delta_general->adc_rx_saturation_time,
- max_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_detect_search_tm:",
- le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time,
- delta_general->ina_detection_search_time,
- max_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_a:",
- le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a,
- delta_general->beacon_silence_rssi_a,
- max_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_b:",
- le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b,
- delta_general->beacon_silence_rssi_b,
- max_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_c:",
- le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c,
- delta_general->beacon_silence_rssi_c,
- max_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "interference_data_flag:",
- le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag,
- delta_general->interference_data_flag,
- max_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "channel_load:",
- le32_to_cpu(general->channel_load),
- accum_general->channel_load,
- delta_general->channel_load,
- max_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_false_alarms:",
- le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms,
- delta_general->dsp_false_alarms,
- max_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_a:",
- le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a,
- delta_general->beacon_rssi_a,
- max_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_b:",
- le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b,
- delta_general->beacon_rssi_b,
- max_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_c:",
- le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c,
- delta_general->beacon_rssi_c,
- max_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_a:",
- le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a,
- delta_general->beacon_energy_a,
- max_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_b:",
- le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b,
- delta_general->beacon_energy_b,
- max_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_c:",
- le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c,
- delta_general->beacon_energy_c,
- max_general->beacon_energy_c);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - OFDM_HT:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
- delta_ht->plcp_err, max_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
- delta_ht->overrun_err, max_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err,
- delta_ht->early_overrun_err,
- max_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
- delta_ht->crc32_good, max_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
- delta_ht->crc32_err, max_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err,
- delta_ht->mh_format_err, max_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_crc32_good:",
- le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good,
- delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_mpdu_cnt:",
- le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt,
- delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_cnt:",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
- delta_ht->agg_cnt, max_ht->agg_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unsupport_mcs:",
- le32_to_cpu(ht->unsupport_mcs),
- accum_ht->unsupport_mcs,
- delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
@@ -1519,173 +1068,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
- ssize_t ret;
- struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->statistics.tx;
- accum_tx = &priv->accum_statistics.tx;
- delta_tx = &priv->delta_statistics.tx;
- max_tx = &priv->max_delta.tx;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dump_msdu_cnt:",
- le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt,
- delta_tx->dump_msdu_cnt,
- max_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "abort_nxt_frame_mismatch:",
- le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt,
- delta_tx->burst_abort_next_frame_mismatch_cnt,
- max_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "abort_missing_nxt_frame:",
- le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt,
- delta_tx->burst_abort_missing_next_frame_cnt,
- max_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "cts_timeout_collision:",
- le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision,
- delta_tx->cts_timeout_collision,
- max_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ack_ba_timeout_collision:",
- le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision,
- delta_tx->ack_or_ba_timeout_collision,
- max_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg ba_timeout:",
- le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout,
- delta_tx->agg.ba_timeout,
- max_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg ba_resched_frames:",
- le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames,
- delta_tx->agg.ba_reschedule_frames,
- max_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_agg_frame:",
- le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt,
- delta_tx->agg.scd_query_agg_frame_cnt,
- max_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_no_agg:",
- le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg,
- delta_tx->agg.scd_query_no_agg,
- max_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_agg:",
- le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg,
- delta_tx->agg.scd_query_agg,
- max_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_mismatch:",
- le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch,
- delta_tx->agg.scd_query_mismatch,
- max_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg frame_not_ready:",
- le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready,
- delta_tx->agg.frame_not_ready,
- max_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg underrun:",
- le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun,
- delta_tx->agg.underrun, max_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg bt_prio_kill:",
- le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill,
- delta_tx->agg.bt_prio_kill,
- max_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg rx_ba_rsp_cnt:",
- le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt,
- delta_tx->agg.rx_ba_rsp_cnt,
- max_tx->agg.rx_ba_rsp_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
@@ -1693,107 +1077,8 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_general) * 10 + 300;
- ssize_t ret;
- struct statistics_general *general, *accum_general;
- struct statistics_general *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->statistics.general;
- dbg = &priv->statistics.general.dbg;
- div = &priv->statistics.general.div;
- accum_general = &priv->accum_statistics.general;
- delta_general = &priv->delta_statistics.general;
- max_general = &priv->max_delta.general;
- accum_dbg = &priv->accum_statistics.general.dbg;
- delta_dbg = &priv->delta_statistics.general.dbg;
- max_dbg = &priv->max_delta.general.dbg;
- accum_div = &priv->accum_statistics.general.div;
- delta_div = &priv->delta_statistics.general.div;
- max_div = &priv->max_delta.general.div;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
- "temperature:",
- le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
- "temperature_m:",
- le32_to_cpu(general->temperature_m));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rx_enable_counter:",
- le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter,
- delta_general->rx_enable_counter,
- max_general->rx_enable_counter);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "num_of_sos_states:",
- le32_to_cpu(general->num_of_sos_states),
- accum_general->num_of_sos_states,
- delta_general->num_of_sos_states,
- max_general->num_of_sos_states);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
@@ -1935,46 +1220,6 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
return ret;
}
-static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[128];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct statistics_tx *tx;
-
- if (!iwl_is_alive(priv))
- pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
- else {
- tx = &priv->statistics.tx;
- if (tx->tx_power.ant_a ||
- tx->tx_power.ant_b ||
- tx->tx_power.ant_c) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "tx power: (1/2 dB step)\n");
- if ((priv->cfg->valid_tx_ant & ANT_A) &&
- tx->tx_power.ant_a)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna A: 0x%X\n",
- tx->tx_power.ant_a);
- if ((priv->cfg->valid_tx_ant & ANT_B) &&
- tx->tx_power.ant_b)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna B: 0x%X\n",
- tx->tx_power.ant_b);
- if ((priv->cfg->valid_tx_ant & ANT_C) &&
- tx->tx_power.ant_c)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna C: 0x%X\n",
- tx->tx_power.ant_c);
- } else
- pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2052,7 +1297,6 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
int pos = 0;
char buf[128];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
priv->event_log.ucode_trace ? "On" : "Off");
@@ -2063,8 +1307,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
priv->event_log.wraps_more_count);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
@@ -2096,6 +1339,31 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n", le32_to_cpu(priv->active_rxon.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n",
+ le32_to_cpu(priv->active_rxon.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2125,13 +1393,11 @@ static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
int pos = 0;
char buf[12];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
priv->missed_beacon_threshold);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
@@ -2160,27 +1426,6 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
return count;
}
-static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int scan;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &scan) != 1)
- return -EINVAL;
-
- iwl_internal_short_hw_scan(priv);
-
- return count;
-}
-
static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
@@ -2189,13 +1434,11 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
int pos = 0;
char buf[12];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
priv->cfg->plcp_delta_threshold);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
@@ -2288,7 +1531,6 @@ DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
DEBUGFS_READ_FILE_OPS(ucode_general_stats);
DEBUGFS_READ_FILE_OPS(sensitivity);
DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(tx_power);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
@@ -2296,9 +1538,10 @@ DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_WRITE_FILE_OPS(internal_scan);
DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
/*
* Create the debugfs files and directories
@@ -2334,8 +1577,11 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ if (!priv->cfg->broken_powersave) {
+ DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
+ S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ }
DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
@@ -2343,29 +1589,33 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (priv->cfg->sensitivity_calib_by_driver)
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (priv->cfg->chain_noise_calib_by_driver)
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ if (priv->cfg->ucode_tracing)
DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
- }
- DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
- DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
- &priv->disable_chain_noise_cal);
- if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ if (priv->cfg->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &priv->disable_sens_cal);
+ if (priv->cfg->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &priv->disable_chain_noise_cal);
+ if (priv->cfg->tx_power_by_driver)
DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
&priv->disable_tx_power_cal);
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index ef1720a852e9..19b06629fe41 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,6 +43,7 @@
#include "iwl-debug.h"
#include "iwl-4965-hw.h"
#include "iwl-3945-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-led.h"
#include "iwl-power.h"
#include "iwl-agn-rs.h"
@@ -56,6 +57,7 @@ extern struct iwl_cfg iwl5100_bgn_cfg;
extern struct iwl_cfg iwl5100_abg_cfg;
extern struct iwl_cfg iwl5150_agn_cfg;
extern struct iwl_cfg iwl5150_abg_cfg;
+extern struct iwl_cfg iwl6000g2a_2agn_cfg;
extern struct iwl_cfg iwl6000i_2agn_cfg;
extern struct iwl_cfg iwl6000i_2abg_cfg;
extern struct iwl_cfg iwl6000i_2bg_cfg;
@@ -67,45 +69,6 @@ extern struct iwl_cfg iwl1000_bg_cfg;
struct iwl_tx_queue;
-/* shared structures from iwl-5000.c */
-extern struct iwl_mod_params iwl50_mod_params;
-extern struct iwl_ucode_ops iwl5000_ucode;
-extern struct iwl_lib_ops iwl5000_lib;
-extern struct iwl_hcmd_ops iwl5000_hcmd;
-extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils;
-
-/* shared functions from iwl-5000.c */
-extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len);
-extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd,
- u8 *data);
-extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags);
-extern int iwl5000_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp);
-extern void iwl5000_nic_config(struct iwl_priv *priv);
-extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
-extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset);
-extern void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
-extern void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern int iwl5000_load_ucode(struct iwl_priv *priv);
-extern void iwl5000_init_alive_start(struct iwl_priv *priv);
-extern int iwl5000_alive_notify(struct iwl_priv *priv);
-extern int iwl5000_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx);
-extern int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo);
-extern void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask);
-extern void iwl5000_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl5000_rx_handler_setup(struct iwl_priv *priv);
-extern int iwl5000_hw_valid_rtc_data_addr(u32 addr);
-extern int iwl5000_send_tx_power(struct iwl_priv *priv);
-extern void iwl5000_temperature(struct iwl_priv *priv);
-
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -183,6 +146,10 @@ struct iwl_queue {
int n_bd; /* number of BDs in this queue */
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
+ /* use for monitoring and recovering the stuck queue */
+ int last_read_ptr; /* storing the last read_ptr */
+ /* number of time read_ptr and last_read_ptr are the same */
+ u8 repeat_same_read_ptr;
dma_addr_t dma_addr; /* physical addr for BD's */
int n_window; /* safe queue window */
u32 id;
@@ -304,13 +271,11 @@ struct iwl_channel_info {
struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
};
-#define IWL_TX_FIFO_AC0 0
-#define IWL_TX_FIFO_AC1 1
-#define IWL_TX_FIFO_AC2 2
-#define IWL_TX_FIFO_AC3 3
-#define IWL_TX_FIFO_HCCA_1 5
-#define IWL_TX_FIFO_HCCA_2 6
-#define IWL_TX_FIFO_NONE 7
+#define IWL_TX_FIFO_BK 0
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_UNUSED -1
/* Minimum number of queues. MAX_NUM is defined in hw specific files.
* Set the minimum to accommodate the 4 standard TX queues, 1 command
@@ -361,13 +326,6 @@ enum {
#define DEF_CMD_PAYLOAD_SIZE 320
-/*
- * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
- * SNAP header and alignment. It should also be big enough for 802.11
- * control frames.
- */
-#define IWL_LINK_HDR_MAX 64
-
/**
* struct iwl_device_cmd
*
@@ -519,38 +477,28 @@ struct iwl_ht_config {
u8 non_GF_STA_present;
};
-union iwl_qos_capabity {
- struct {
- u8 edca_count:4; /* bit 0-3 */
- u8 q_ack:1; /* bit 4 */
- u8 queue_request:1; /* bit 5 */
- u8 txop_request:1; /* bit 6 */
- u8 reserved:1; /* bit 7 */
- } q_AP;
- struct {
- u8 acvo_APSD:1; /* bit 0 */
- u8 acvi_APSD:1; /* bit 1 */
- u8 ac_bk_APSD:1; /* bit 2 */
- u8 ac_be_APSD:1; /* bit 3 */
- u8 q_ack:1; /* bit 4 */
- u8 max_len:2; /* bit 5-6 */
- u8 more_data_ack:1; /* bit 7 */
- } q_STA;
- u8 val;
-};
-
/* QoS structures */
struct iwl_qos_info {
int qos_active;
- union iwl_qos_capabity qos_cap;
struct iwl_qosparam_cmd def_qos_parm;
};
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
+ * held.
+ */
struct iwl_station_entry {
struct iwl_addsta_cmd sta;
struct iwl_tid_data tid[MAX_TID_COUNT];
u8 used;
struct iwl_hw_key keyinfo;
+ struct iwl_link_quality_cmd *lq;
+};
+
+struct iwl_station_priv_common {
+ u8 sta_id;
};
/*
@@ -559,14 +507,28 @@ struct iwl_station_entry {
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is places in that
* space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and agn!
*/
struct iwl_station_priv {
+ struct iwl_station_priv_common common;
struct iwl_lq_sta lq_sta;
atomic_t pending_frames;
bool client;
bool asleep;
};
+/**
+ * struct iwl_vif_priv - driver's private per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct iwl_vif_priv {
+ u8 ibss_bssid_sta_id;
+};
+
/* one for each uCode image (inst/data, boot/init/runtime) */
struct fw_desc {
void *v_addr; /* access by driver */
@@ -574,7 +536,7 @@ struct fw_desc {
u32 len; /* bytes */
};
-/* uCode file layout */
+/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
union {
@@ -597,7 +559,62 @@ struct iwl_ucode_header {
} v2;
} u;
};
-#define UCODE_HEADER_SIZE(ver) ((ver) == 1 ? 24 : 28)
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data. To facilitate "groups", for example
+ * different instruction image with different capabilities,
+ * bundled with the same init image, an alternative mechanism
+ * is provided:
+ * When the alternative field is 0, that means that the item
+ * is always valid. When it is non-zero, then it is only
+ * valid in conjunction with items of the same alternative,
+ * in which case the driver (user) selects one alternative
+ * to use.
+ */
+
+enum iwl_ucode_tlv_type {
+ IWL_UCODE_TLV_INVALID = 0, /* unused */
+ IWL_UCODE_TLV_INST = 1,
+ IWL_UCODE_TLV_DATA = 2,
+ IWL_UCODE_TLV_INIT = 3,
+ IWL_UCODE_TLV_INIT_DATA = 4,
+ IWL_UCODE_TLV_BOOT = 5,
+ IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
+};
+
+struct iwl_ucode_tlv {
+ __le16 type; /* see above */
+ __le16 alternative; /* see comment */
+ __le32 length; /* not including type/length fields */
+ u8 data[0];
+} __attribute__ ((packed));
+
+#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
+
+struct iwl_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ __le32 zero;
+ __le32 magic;
+ u8 human_readable[64];
+ __le32 ver; /* major/minor/API/serial */
+ __le32 build;
+ __le64 alternatives; /* bitmask of valid alternatives */
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ u8 data[0];
+};
struct iwl4965_ibss_seq {
u8 mac[ETH_ALEN];
@@ -1039,6 +1056,11 @@ struct iwl_event_log {
#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+/* timer constants use to monitor and recover stuck tx queues in mSecs */
+#define IWL_MONITORING_PERIOD (1000)
+#define IWL_ONE_HUNDRED_MSECS (100)
+#define IWL_SIXTY_SECS (60000)
+
enum iwl_reset {
IWL_RF_RESET = 0,
IWL_FW_RESET,
@@ -1092,10 +1114,6 @@ struct iwl_priv {
struct iwl_channel_info *channel_info; /* channel info array */
u8 channel_count; /* # of channels */
- /* each calibration channel group in the EEPROM has a derived
- * clip setting for each rate. 3945 only.*/
- const struct iwl3945_clip_group clip39_groups[5];
-
/* thermal calibration */
s32 temperature; /* degrees Kelvin */
s32 last_temperature;
@@ -1104,12 +1122,10 @@ struct iwl_priv {
struct iwl_calib_result calib_results[IWL_CALIB_MAX];
/* Scan related variables */
- unsigned long next_scan_jiffies;
unsigned long scan_start;
- unsigned long scan_pass_start;
unsigned long scan_start_tsf;
- void *scan;
- int scan_bands;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
struct cfg80211_scan_request *scan_request;
bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
@@ -1168,16 +1184,13 @@ struct iwl_priv {
u64 led_tpt;
u16 active_rate;
- u16 active_rate_basic;
- u8 assoc_station_added;
u8 start_calib;
struct iwl_sensitivity_data sensitivity_data;
struct iwl_chain_noise_data chain_noise_data;
__le16 sensitivity_tbl[HD_TABLE_SIZE];
struct iwl_ht_config current_ht_config;
- u8 last_phy_res[100];
/* Rate scaling data */
u8 retry_rate;
@@ -1197,9 +1210,6 @@ struct iwl_priv {
unsigned long status;
- int last_rx_rssi; /* From Rx packet statistics */
- int last_rx_noise; /* From beacon statistics */
-
/* counts mgmt, ctl, and data packets */
struct traffic_stats tx_stats;
struct traffic_stats rx_stats;
@@ -1218,18 +1228,14 @@ struct iwl_priv {
#endif
/* context information */
- u16 rates_mask;
-
- u8 bssid[ETH_ALEN];
- u16 rts_threshold;
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
u8 mac_addr[ETH_ALEN];
/*station table variables */
spinlock_t sta_lock;
int num_stations;
struct iwl_station_entry stations[IWL_STATION_COUNT];
- struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
- u8 default_wep_key;
+ struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
u8 key_mapping_key;
unsigned long ucode_key_table;
@@ -1244,10 +1250,6 @@ struct iwl_priv {
u8 mac80211_registered;
- /* Rx'd packet timing information */
- u32 last_beacon_time;
- u64 last_tsf;
-
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
@@ -1259,29 +1261,67 @@ struct iwl_priv {
/* Last Rx'd beacon timestamp */
u64 timestamp;
- u16 beacon_int;
struct ieee80211_vif *vif;
- /*Added for 3945 */
- void *shared_virt;
- dma_addr_t shared_phys;
- /*End*/
- struct iwl_hw_params hw_params;
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
+
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct iwl3945_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ struct iwl3945_notif_statistics accum_statistics;
+ struct iwl3945_notif_statistics delta_statistics;
+ struct iwl3945_notif_statistics max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet statistics */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct iwl3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE)
+ struct {
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+ void *ict_tbl_vir;
+ dma_addr_t ict_tbl_dma;
+ dma_addr_t aligned_ict_tbl_dma;
+ int ict_index;
+ u32 inta;
+ bool use_ict;
+ /*
+ * reporting the number of tids has AGG on. 0 means
+ * no AGGREGATION
+ */
+ u8 agg_tids_count;
+
+ struct iwl_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+ } _agn;
+#endif
+ };
- /* INT ICT Table */
- __le32 *ict_tbl;
- dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
- int ict_index;
- void *ict_tbl_vir;
- u32 inta;
- bool use_ict;
+ struct iwl_hw_params hw_params;
u32 inta_mask;
- /* Current association information needed to configure the
- * hardware */
- u16 assoc_id;
- u16 assoc_capability;
struct iwl_qos_info qos_data;
@@ -1291,7 +1331,6 @@ struct iwl_priv {
struct work_struct scan_completed;
struct work_struct rx_replenish;
struct work_struct abort_scan;
- struct work_struct request_scan;
struct work_struct beacon_update;
struct work_struct tt_work;
struct work_struct ct_enter;
@@ -1304,9 +1343,7 @@ struct iwl_priv {
struct delayed_work alive_start;
struct delayed_work scan_check;
- /*For 3945 only*/
- struct delayed_work thermal_periodic;
- struct delayed_work rfkill_poll;
+ struct completion firmware_loading_complete;
/* TX Power */
s8 tx_power_user_lmt;
@@ -1339,13 +1376,8 @@ struct iwl_priv {
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
struct timer_list ucode_trace;
+ struct timer_list monitor_recover;
bool hw_ready;
- /*For 3945*/
-#define IWL_DEFAULT_TX_POWER 0x0F
-
- struct iwl3945_notif_statistics statistics_39;
-
- u32 sta_supp_rates;
struct iwl_event_log event_log;
}; /*iwl_priv */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 2ffc2edbf4f0..4a487639d932 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,6 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index fb5bb487f3bc..ee11452519e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -590,9 +590,16 @@ int iwl_eeprom_init(struct iwl_priv *priv)
e[addr / 2] = cpu_to_le16(r >> 16);
}
}
+
+ IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
+ (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ ? "OTP" : "EEPROM",
+ iwl_eeprom_query16(priv, EEPROM_VERSION));
+
ret = 0;
done:
priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
+
err:
if (ret)
iwl_eeprom_free(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 8171c701e4e1..95aa202c85e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -172,35 +172,35 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5000_TX_POWER_VERSION (4)
#define EEPROM_5000_EEPROM_VERSION (0x11A)
-/*5000 calibrations */
-#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
-#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL)
-#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_5000_CALIB_ALL)
-
-/* 5000 links */
-#define EEPROM_5000_LINK_HOST (2*0x64)
-#define EEPROM_5000_LINK_GENERAL (2*0x65)
-#define EEPROM_5000_LINK_REGULATORY (2*0x66)
-#define EEPROM_5000_LINK_CALIBRATION (2*0x67)
-#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68)
-#define EEPROM_5000_LINK_OTHERS (2*0x69)
-
-/* 5000 regulatory - indirect access */
-#define EEPROM_5000_REG_SKU_ID ((0x02)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */
-#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\
+/* 5000 and up calibration */
+#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
+#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
+
+/* 5000 temperature */
+#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+
+/* agn links */
+#define EEPROM_LINK_HOST (2*0x64)
+#define EEPROM_LINK_GENERAL (2*0x65)
+#define EEPROM_LINK_REGULATORY (2*0x66)
+#define EEPROM_LINK_CALIBRATION (2*0x67)
+#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
+#define EEPROM_LINK_OTHERS (2*0x69)
+
+/* agn regulatory - indirect access */
+#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
-#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\
+#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
-#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\
+#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
-#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\
+#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
-#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\
+#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
-#define EEPROM_5000_REG_BAND_24_HT40_CHANNELS ((0x82)\
+#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
-#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\
+#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
/* 6000 regulatory - indirect access */
@@ -265,14 +265,21 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5050_EEPROM_VERSION (0x21E)
/* 1000 Specific */
+#define EEPROM_1000_TX_POWER_VERSION (4)
#define EEPROM_1000_EEPROM_VERSION (0x15C)
/* 6x00 Specific */
+#define EEPROM_6000_TX_POWER_VERSION (4)
#define EEPROM_6000_EEPROM_VERSION (0x434)
/* 6x50 Specific */
+#define EEPROM_6050_TX_POWER_VERSION (4)
#define EEPROM_6050_EEPROM_VERSION (0x532)
+/* 6x00g2 Specific */
+#define EEPROM_6000G2_TX_POWER_VERSION (6)
+#define EEPROM_6000G2_EEPROM_VERSION (0x709)
+
/* OTP */
/* lower blocks contain EEPROM image and calibration data */
#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 73681c4fefe7..51f89e7ba681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -169,7 +169,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
mutex_lock(&priv->sync_cmd_mutex);
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
@@ -191,7 +191,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
ret = -ETIMEDOUT;
goto cancel;
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 51a67fb2e185..3ff6b9d25a10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -31,6 +31,9 @@
#define __iwl_helpers_h__
#include <linux/ctype.h>
+#include <net/mac80211.h>
+
+#include "iwl-io.h"
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 16eb3ced9b30..0203a3bbf872 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -298,7 +298,7 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l,
struct iwl_priv *priv, u32 reg)
{
u32 value = _iwl_read_direct32(priv, reg);
- IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
+ IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
f, l);
return value;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index a6f9c918aabc..db5bfcb036ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -46,7 +46,7 @@
static int led_mode;
module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
- "(default 0)\n");
+ "(default 0)");
static const struct {
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 548dac2f6a96..cda6a94d6cc9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -318,10 +318,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
- if (priv->vif)
- dtimper = priv->hw->conf.ps_dtim_period;
- else
- dtimper = 1;
+ dtimper = priv->hw->conf.ps_dtim_period ?: 1;
if (priv->cfg->broken_powersave)
iwl_power_sleep_cam_cmd(priv, &cmd);
@@ -384,10 +381,10 @@ EXPORT_SYMBOL(iwl_ht_enabled);
bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
{
- s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
+ s32 temp = priv->temperature; /* degrees CELSIUS except specified */
bool within_margin = false;
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
+ if (priv->cfg->temperature_kelvin)
temp = KELVIN_TO_CELSIUS(priv->temperature);
if (!priv->thermal_throttle.advanced_tt)
@@ -840,12 +837,12 @@ EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
static void iwl_bg_tt_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
- s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
+ s32 temp = priv->temperature; /* degrees CELSIUS except specified */
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
+ if (priv->cfg->temperature_kelvin)
temp = KELVIN_TO_CELSIUS(priv->temperature);
if (!priv->thermal_throttle.advanced_tt)
@@ -875,7 +872,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
struct iwl_tt_trans *transaction;
- IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n");
+ IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index d2d2a9174900..b1f101caf19d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -254,7 +254,7 @@
* device. A queue maps to only one (selectable by driver) Tx DMA channel,
* but one DMA channel may take input from several queues.
*
- * Tx DMA channels have dedicated purposes. For 4965, they are used as follows
+ * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
* (cf. default_queue_to_tx_fifo in iwl-4965.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
@@ -262,20 +262,20 @@
* 2 -- EDCA VI (video) frames, higher priority
* 3 -- EDCA VO (voice) and management frames, highest priority
* 4 -- Commands (e.g. RXON, etc.)
- * 5 -- HCCA short frames
- * 6 -- HCCA long frames
+ * 5 -- unused (HCCA)
+ * 6 -- unused (HCCA)
* 7 -- not used by driver (device-internal only)
*
- * For 5000 series and up, they are used slightly differently
+ * For 5000 series and up, they are used differently
* (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
* 1 -- EDCA BE (best effort) frames, normal priority
* 2 -- EDCA VI (video) frames, higher priority
* 3 -- EDCA VO (voice) and management frames, highest priority
- * 4 -- (TBD)
- * 5 -- HCCA short frames
- * 6 -- HCCA long frames
+ * 4 -- unused
+ * 5 -- unused
+ * 6 -- unused
* 7 -- Commands
*
* Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
@@ -529,48 +529,48 @@
#define IWL_SCD_TXFIFO_POS_RA (4)
#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
-/* 5000 SCD */
-#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
-#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
-
-#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
-#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
-#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
-#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
-#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
-#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
-#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
-
-#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600)
-#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
-#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
-
-#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\
- (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
-
-#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
- ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
-
-#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
+/* agn SCD */
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF (0)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL (4)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
+#define IWLAGN_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
+
+#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+
+#define IWLAGN_SCD_CONTEXT_DATA_OFFSET (0x600)
+#define IWLAGN_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
+#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
+
+#define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\
+ (IWLAGN_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+
+#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
+
+#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
(~(1<<IWL_CMD_QUEUE_NUM)))
-#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00)
-
-#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0)
-#define IWL50_SCD_DRAM_BASE_ADDR (IWL50_SCD_BASE + 0x8)
-#define IWL50_SCD_AIT (IWL50_SCD_BASE + 0x0c)
-#define IWL50_SCD_TXFACT (IWL50_SCD_BASE + 0x10)
-#define IWL50_SCD_ACTIVE (IWL50_SCD_BASE + 0x14)
-#define IWL50_SCD_QUEUE_WRPTR(x) (IWL50_SCD_BASE + 0x18 + (x) * 4)
-#define IWL50_SCD_QUEUE_RDPTR(x) (IWL50_SCD_BASE + 0x68 + (x) * 4)
-#define IWL50_SCD_QUEUECHAIN_SEL (IWL50_SCD_BASE + 0xe8)
-#define IWL50_SCD_AGGR_SEL (IWL50_SCD_BASE + 0x248)
-#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108)
-#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4)
+#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
+
+#define IWLAGN_SCD_SRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x0)
+#define IWLAGN_SCD_DRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x8)
+#define IWLAGN_SCD_AIT (IWLAGN_SCD_BASE + 0x0c)
+#define IWLAGN_SCD_TXFACT (IWLAGN_SCD_BASE + 0x10)
+#define IWLAGN_SCD_ACTIVE (IWLAGN_SCD_BASE + 0x14)
+#define IWLAGN_SCD_QUEUE_WRPTR(x) (IWLAGN_SCD_BASE + 0x18 + (x) * 4)
+#define IWLAGN_SCD_QUEUE_RDPTR(x) (IWLAGN_SCD_BASE + 0x68 + (x) * 4)
+#define IWLAGN_SCD_QUEUECHAIN_SEL (IWLAGN_SCD_BASE + 0xe8)
+#define IWLAGN_SCD_AGGR_SEL (IWLAGN_SCD_BASE + 0x248)
+#define IWLAGN_SCD_INTERRUPT_MASK (IWLAGN_SCD_BASE + 0x108)
+#define IWLAGN_SCD_QUEUE_STATUS_BITS(x) (IWLAGN_SCD_BASE + 0x10c + (x) * 4)
/*********************** END TX SCHEDULER *************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e5eb339107dd..0a5d7cf25196 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -163,197 +163,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
-/**
- * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- int write;
-
- spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-EXPORT_SYMBOL(iwl_rx_queue_restock);
-
-
-/**
- * iwl_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwl_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_rx_replenish);
-
-void iwl_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl_rx_allocate(priv, GFP_ATOMIC);
-
- iwl_rx_queue_restock(priv);
-}
-EXPORT_SYMBOL(iwl_rx_replenish_now);
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-EXPORT_SYMBOL(iwl_rx_queue_free);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
@@ -396,98 +205,6 @@ err_bd:
}
EXPORT_SYMBOL(iwl_rx_queue_alloc);
-void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- if (!priv->cfg->use_isr_legacy)
- rb_timeout = RX_RB_TIMEOUT;
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->dma_addr >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-int iwl_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_rxq_stop);
-
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -543,6 +260,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
int bcn_silence_c =
le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+ int last_rx_noise;
if (bcn_silence_a) {
total_silence += bcn_silence_a;
@@ -559,13 +277,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
/* Average among active antennas */
if (num_active_rx)
- priv->last_rx_noise = (total_silence / num_active_rx) - 107;
+ last_rx_noise = (total_silence / num_active_rx) - 107;
else
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+ last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
bcn_silence_a, bcn_silence_b, bcn_silence_c,
- priv->last_rx_noise);
+ last_rx_noise);
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -617,29 +335,20 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
-#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
-void iwl_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
{
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ bool rc = true;
int combined_plcp_delta;
unsigned int plcp_msec;
unsigned long plcp_received_jiffies;
- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
- (int)sizeof(priv->statistics),
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->statistics.general.temperature !=
- pkt->u.stats.general.temperature) ||
- ((priv->statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
/*
* check for plcp_err and trigger radio reset if it exceeds
* the plcp error threshold plcp_delta.
@@ -660,11 +369,11 @@ void iwl_rx_statistics(struct iwl_priv *priv,
le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
+ ((combined_plcp_delta * 100) / plcp_msec) >
priv->cfg->plcp_delta_threshold) {
/*
- * if plcp_err exceed the threshold, the following
- * data is printed in csv format:
+ * if plcp_err exceed the threshold,
+ * the following data is printed in csv format:
* Text: plcp_err exceeded %d,
* Received ofdm.plcp_err,
* Current ofdm.plcp_err,
@@ -673,22 +382,76 @@ void iwl_rx_statistics(struct iwl_priv *priv,
* combined_plcp_delta,
* plcp_msec
*/
- IWL_DEBUG_RADIO(priv, PLCP_MSG,
+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+ "%u, %u, %u, %u, %d, %u mSecs\n",
priv->cfg->plcp_delta_threshold,
le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
le32_to_cpu(
- priv->statistics.rx.ofdm_ht.plcp_err),
+ priv->statistics.rx.ofdm_ht.plcp_err),
combined_plcp_delta, plcp_msec);
+ rc = false;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL(iwl_good_plcp_health);
- /*
- * Reset the RF radio due to the high plcp
- * error rate
- */
- iwl_force_reset(priv, IWL_RF_RESET);
+void iwl_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+ if (iwl_is_associated(priv)) {
+ if (priv->cfg->ops->lib->check_ack_health) {
+ if (!priv->cfg->ops->lib->check_ack_health(
+ priv, pkt)) {
+ /*
+ * low ack count detected
+ * restart Firmware
+ */
+ IWL_ERR(priv, "low ack count detected, "
+ "restart firmware\n");
+ if (!iwl_force_reset(priv, IWL_FW_RESET))
+ return;
+ }
+ }
+ if (priv->cfg->ops->lib->check_plcp_health) {
+ if (!priv->cfg->ops->lib->check_plcp_health(
+ priv, pkt)) {
+ /*
+ * high plcp error detected
+ * reset Radio
+ */
+ iwl_force_reset(priv, IWL_RF_RESET);
+ }
}
}
+}
+EXPORT_SYMBOL(iwl_recover_from_statistics);
+
+void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ int change;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+
+ IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(priv->statistics),
+ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->statistics.general.temperature !=
+ pkt->u.stats.general.temperature) ||
+ ((priv->statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
+ iwl_recover_from_statistics(priv, pkt);
memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
@@ -731,139 +494,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_reply_statistics);
-/* Calc max signal level (dBm) among 3 possible receivers */
-static inline int iwl_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-/**
- * iwl_dbg_report_frame - dump frame to syslog during debug sessions
- *
- * You may hack this function to show different aspects of received frames,
- * including selective frame dumps.
- * group100 parameter selects whether to show 1 out of 100 good data frames.
- * All beacon and probe response frames are printed.
- */
-static void iwl_dbg_report_frame(struct iwl_priv *priv,
- struct iwl_rx_phy_res *phy_res, u16 length,
- struct ieee80211_hdr *header, int group100)
-{
- u32 to_us;
- u32 print_summary = 0;
- u32 print_dump = 0; /* set to 1 to dump all frames' contents */
- u32 hundred = 0;
- u32 dataframe = 0;
- __le16 fc;
- u16 seq_ctl;
- u16 channel;
- u16 phy_flags;
- u32 rate_n_flags;
- u32 tsf_low;
- int rssi;
-
- if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
- return;
-
- /* MAC header */
- fc = header->frame_control;
- seq_ctl = le16_to_cpu(header->seq_ctrl);
-
- /* metadata */
- channel = le16_to_cpu(phy_res->channel);
- phy_flags = le16_to_cpu(phy_res->phy_flags);
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* signal statistics */
- rssi = iwl_calc_rssi(priv, phy_res);
- tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
-
- to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
-
- /* if data frame is to us and all is good,
- * (optionally) print summary for only 1 out of every 100 */
- if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
- cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
- dataframe = 1;
- if (!group100)
- print_summary = 1; /* print each frame */
- else if (priv->framecnt_to_us < 100) {
- priv->framecnt_to_us++;
- print_summary = 0;
- } else {
- priv->framecnt_to_us = 0;
- print_summary = 1;
- hundred = 1;
- }
- } else {
- /* print summary for all other frames */
- print_summary = 1;
- }
-
- if (print_summary) {
- char *title;
- int rate_idx;
- u32 bitrate;
-
- if (hundred)
- title = "100Frames";
- else if (ieee80211_has_retry(fc))
- title = "Retry";
- else if (ieee80211_is_assoc_resp(fc))
- title = "AscRsp";
- else if (ieee80211_is_reassoc_resp(fc))
- title = "RasRsp";
- else if (ieee80211_is_probe_resp(fc)) {
- title = "PrbRsp";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_beacon(fc)) {
- title = "Beacon";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_atim(fc))
- title = "ATIM";
- else if (ieee80211_is_auth(fc))
- title = "Auth";
- else if (ieee80211_is_deauth(fc))
- title = "DeAuth";
- else if (ieee80211_is_disassoc(fc))
- title = "DisAssoc";
- else
- title = "Frame";
-
- rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
- if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
- bitrate = 0;
- WARN_ON_ONCE(1);
- } else {
- bitrate = iwl_rates[rate_idx].ieee / 2;
- }
-
- /* print frame summary.
- * MAC addresses show just the last byte (for brevity),
- * but you can hack it to show more, if you'd like to. */
- if (dataframe)
- IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
- "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
- title, le16_to_cpu(fc), header->addr1[5],
- length, rssi, channel, bitrate);
- else {
- /* src/dst addresses assume managed mode */
- IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
- "len=%u, rssi=%d, tim=%lu usec, "
- "phy=0x%02x, chnl=%d\n",
- title, le16_to_cpu(fc), header->addr1[5],
- header->addr3[5], length, rssi,
- tsf_low - priv->scan_start_tsf,
- phy_flags, channel);
- }
- }
- if (print_dump)
- iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
-}
-#endif
-
/*
* returns non-zero if packet should be dropped
*/
@@ -911,305 +541,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
return 0;
}
EXPORT_SYMBOL(iwl_set_decrypted_flag);
-
-static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- };
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- int ret = 0;
- __le16 fc = hdr->frame_control;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!priv->cfg->mod_params->sw_crypto &&
- iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
- if (!skb) {
- IWL_ERR(priv, "alloc_skb failed\n");
- return;
- }
-
- skb_reserve(skb, IWL_LINK_HDR_MAX);
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- /* mac80211 currently doesn't support paged SKB. Convert it to
- * linear SKB for management frame and data frame requires
- * software decryption or software defragementation. */
- if (ieee80211_is_mgmt(fc) ||
- ieee80211_has_protected(fc) ||
- ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
- (ieee80211_is_data_qos(fc) &&
- *ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
- ret = skb_linearize(skb);
- else
- ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
- 0 : -ENOMEM;
-
- if (ret) {
- kfree_skb(skb);
- goto out;
- }
-
- /*
- * XXX: We cannot touch the page and its virtual memory (hdr) after
- * here. It might have already been freed by the above skb change.
- */
-
- iwl_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- out:
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl_is_network_packet(struct iwl_priv *priv,
- struct ieee80211_hdr *header)
-{
- /* Filter incoming packets to determine if they are targeted toward
- * this network, discarding packets coming from ourselves */
- switch (priv->iw_mode) {
- case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr3, priv->bssid);
- case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr2, priv->bssid);
- default:
- return 1;
- }
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl4965_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->last_phy_res[0]) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
- amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.rate_idx =
- iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_TSFT;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl_calc_rssi(priv, phy_res);
-
- /* Meaningful noise values are available only from beacon statistics,
- * which are gathered only when associated, and indicate noise
- * only for the associated network channel ...
- * Ignore these noise values while scanning (other channels) */
- if (iwl_is_associated(priv) &&
- !test_bit(STATUS_SCANNING, &priv->status)) {
- rx_status.noise = priv->last_rx_noise;
- } else {
- rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
- }
-
- /* Reset beacon noise level if not associated. */
- if (!iwl_is_associated(priv))
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- /* Set "1" to report good data frames in groups of 100 */
- if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
- iwl_dbg_report_frame(priv, phy_res, len, header, 1);
-#endif
- iwl_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
- rx_status.signal, rx_status.noise,
- (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- if (iwl_is_network_packet(priv, header)) {
- priv->last_rx_rssi = rx_status.signal;
- priv->last_beacon_time = priv->ucode_beacon_time;
- priv->last_tsf = le64_to_cpu(phy_res->timestamp);
- }
-
- iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-EXPORT_SYMBOL(iwl_rx_reply_rx);
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->last_phy_res[0] = 1;
- memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
- sizeof(struct iwl_rx_phy_res));
-}
-EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 12e455a4b90e..107e173112f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -69,9 +69,8 @@ int iwl_scan_cancel(struct iwl_priv *priv)
}
if (test_bit(STATUS_SCANNING, &priv->status)) {
- if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ if (!test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n");
- set_bit(STATUS_SCAN_ABORTING, &priv->status);
queue_work(priv->workqueue, &priv->abort_scan);
} else
@@ -201,9 +200,6 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
-
- if (!priv->is_internal_short_scan)
- priv->next_scan_jiffies = 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -223,49 +219,24 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
/* The HW is no longer scanning */
clear_bit(STATUS_SCAN_HW, &priv->status);
- IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n",
- (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
- "2.4" : "5.2",
+ IWL_DEBUG_INFO(priv, "Scan on %sGHz took %dms\n",
+ (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(elapsed_jiffies
- (priv->scan_pass_start, jiffies)));
+ (priv->scan_start, jiffies)));
- /* Remove this scanned band from the list of pending
- * bands to scan, band G precedes A in order of scanning
- * as seen in iwl_bg_request_scan */
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
- priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
- else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
- priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
-
- /* If a request to abort was given, or the scan did not succeed
+ /*
+ * If a request to abort was given, or the scan did not succeed
* then we reset the scan state machine and terminate,
- * re-queuing another scan if one has been requested */
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ * re-queuing another scan if one has been requested
+ */
+ if (test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status))
IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- } else {
- /* If there are more bands on this scan pass reschedule */
- if (priv->scan_bands)
- goto reschedule;
- }
-
- if (!priv->is_internal_short_scan)
- priv->next_scan_jiffies = 0;
IWL_DEBUG_INFO(priv, "Setting scan to off\n");
clear_bit(STATUS_SCANNING, &priv->status);
- IWL_DEBUG_INFO(priv, "Scan took %dms\n",
- jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
-
queue_work(priv->workqueue, &priv->scan_completed);
-
- return;
-
-reschedule:
- priv->scan_pass_start = jiffies;
- queue_work(priv->workqueue, &priv->request_scan);
}
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -294,7 +265,8 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
EXPORT_SYMBOL(iwl_get_active_dwell_time);
u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
{
u16 passive = (band == IEEE80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
@@ -304,7 +276,7 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
/* If we're associated, we clamp the maximum passive
* dwell time to be 98% of the beacon interval (minus
* 2 * channel tune time) */
- passive = priv->beacon_int;
+ passive = vif ? vif->bss_conf.beacon_int : 0;
if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
passive = IWL_PASSIVE_DWELL_BASE;
passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
@@ -314,150 +286,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_get_passive_dwell_time);
-static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct iwl_scan_channel *scan_ch)
-{
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int i, added = 0;
- u16 channel = 0;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband) {
- IWL_ERR(priv, "invalid band\n");
- return added;
- }
-
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- /* only scan single channel, good enough to reset the RF */
- /* pick the first valid not in-use channel */
- if (band == IEEE80211_BAND_5GHZ) {
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel !=
- le16_to_cpu(priv->staging_rxon.channel)) {
- channel = priv->channel_info[i].channel;
- ch_info = iwl_get_channel_info(priv,
- band, channel);
- if (is_channel_valid(ch_info))
- break;
- }
- }
- } else {
- for (i = 0; i < 14; i++) {
- if (priv->channel_info[i].channel !=
- le16_to_cpu(priv->staging_rxon.channel)) {
- channel =
- priv->channel_info[i].channel;
- ch_info = iwl_get_channel_info(priv,
- band, channel);
- if (is_channel_valid(ch_info))
- break;
- }
- }
- }
- if (channel) {
- scan_ch->channel = cpu_to_le16(channel);
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
-}
-
-static int iwl_get_channels_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = ieee80211_frequency_to_channel(chan->center_freq);
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
- return added;
-}
-
void iwl_init_scan_params(struct iwl_priv *priv)
{
u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
@@ -468,7 +296,7 @@ void iwl_init_scan_params(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_init_scan_params);
-static int iwl_scan_initiate(struct iwl_priv *priv)
+static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
WARN_ON(!mutex_is_locked(&priv->mutex));
@@ -476,26 +304,28 @@ static int iwl_scan_initiate(struct iwl_priv *priv)
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = false;
priv->scan_start = jiffies;
- priv->scan_pass_start = priv->scan_start;
- queue_work(priv->workqueue, &priv->request_scan);
+ if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ priv->cfg->ops->utils->request_scan(priv, vif);
return 0;
}
-#define IWL_DELAY_NEXT_SCAN (HZ*2)
-
int iwl_mac_hw_scan(struct ieee80211_hw *hw,
- struct cfg80211_scan_request *req)
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
{
- unsigned long flags;
struct iwl_priv *priv = hw->priv;
- int ret, i;
+ int ret;
IWL_DEBUG_MAC80211(priv, "enter\n");
+ if (req->n_channels == 0)
+ return -EINVAL;
+
mutex_lock(&priv->mutex);
- spin_lock_irqsave(&priv->lock, flags);
if (!iwl_is_ready_rf(priv)) {
ret = -EIO;
@@ -515,30 +345,15 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- /* We don't schedule scan within next_scan_jiffies period.
- * Avoid scanning during possible EAPOL exchange, return
- * success immediately.
- */
- if (priv->next_scan_jiffies &&
- time_after(priv->next_scan_jiffies, jiffies)) {
- IWL_DEBUG_SCAN(priv, "scan rejected: within next scan period\n");
- queue_work(priv->workqueue, &priv->scan_completed);
- ret = 0;
- goto out_unlock;
- }
-
- priv->scan_bands = 0;
- for (i = 0; i < req->n_channels; i++)
- priv->scan_bands |= BIT(req->channels[i]->band);
-
+ /* mac80211 will only ask for one band at a time */
+ priv->scan_band = req->channels[0]->band;
priv->scan_request = req;
- ret = iwl_scan_initiate(priv);
+ ret = iwl_scan_initiate(priv, vif);
IWL_DEBUG_MAC80211(priv, "leave\n");
out_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
mutex_unlock(&priv->mutex);
return ret;
@@ -554,7 +369,7 @@ void iwl_internal_short_hw_scan(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->start_internal_scan);
}
-static void iwl_bg_start_internal_scan(struct work_struct *work)
+void iwl_bg_start_internal_scan(struct work_struct *work)
{
struct iwl_priv *priv =
container_of(work, struct iwl_priv, start_internal_scan);
@@ -576,22 +391,20 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
goto unlock;
}
- priv->scan_bands = 0;
- if (priv->band == IEEE80211_BAND_5GHZ)
- priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
- else
- priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
+ priv->scan_band = priv->band;
IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = true;
- queue_work(priv->workqueue, &priv->request_scan);
+
+ if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+ goto unlock;
+
+ priv->cfg->ops->utils->request_scan(priv, NULL);
unlock:
mutex_unlock(&priv->mutex);
}
-EXPORT_SYMBOL(iwl_internal_short_hw_scan);
-
-#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
+EXPORT_SYMBOL(iwl_bg_start_internal_scan);
void iwl_bg_scan_check(struct work_struct *data)
{
@@ -654,276 +467,15 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
if (WARN_ON(left < ie_len))
return len;
- if (ies)
+ if (ies && ie_len) {
memcpy(pos, ies, ie_len);
- len += ie_len;
- left -= ie_len;
+ len += ie_len;
+ }
return (u16)len;
}
EXPORT_SYMBOL(iwl_fill_probe_req);
-static void iwl_bg_request_scan(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, request_scan);
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl_scan_cmd *scan;
- struct ieee80211_conf *conf = NULL;
- int ret = 0;
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
-
- conf = ieee80211_get_hw_conf(priv->hw);
-
- mutex_lock(&priv->mutex);
-
- cancel_delayed_work(&priv->scan_check);
-
- if (!iwl_is_ready(priv)) {
- IWL_WARN(priv, "request scan called when driver not ready.\n");
- goto done;
- }
-
- /* Make sure the scan wasn't canceled before this queued work
- * was given the chance to run... */
- if (!test_bit(STATUS_SCANNING, &priv->status))
- goto done;
-
- /* This should never be called or scheduled if there is currently
- * a scan active in the hardware. */
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
- "Ignoring second request.\n");
- ret = -EIO;
- goto done;
- }
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
- goto done;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
- goto done;
- }
-
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
- goto done;
- }
-
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
- goto done;
- }
-
- if (!priv->scan_bands) {
- IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
- goto done;
- }
-
- if (!priv->scan) {
- priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan) {
- ret = -ENOMEM;
- goto done;
- }
- }
- scan = priv->scan;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_is_associated(priv)) {
- u16 interval = 0;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
- unsigned long flags;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
- interval = priv->beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->is_internal_short_scan) {
- IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- } else if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
- band = IEEE80211_BAND_2GHZ;
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- scan->good_CRC_th = 0;
- } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
- band = IEEE80211_BAND_5GHZ;
- rate = IWL_RATE_6M_PLCP;
- /*
- * If active scaning is requested but a certain channel
- * is marked passive, we can do active scanning if we
- * detect transmissions.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0;
-
- /* Force use of chains B and C (0x6) for scan Rx for 4965
- * Avoid A (0x1) because of its off-channel reception on A-band.
- */
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
- rx_ant = ANT_BC;
- } else {
- IWL_WARN(priv, "Invalid scan band count\n");
- goto done;
- }
-
- priv->scan_tx_ant[band] =
- iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
- rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = first_antenna(active_chains);
- }
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
- if (!priv->is_internal_short_scan) {
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- } else {
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
-
- }
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
- if (iwl_is_monitor_mode(priv))
- scan->filter_flags = RXON_FILTER_PROMISC_MSK;
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- if (priv->is_internal_short_scan) {
- scan->channel_count =
- iwl_get_single_channel_for_scan(priv, band,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- } else {
- scan->channel_count =
- iwl_get_channels_for_scan(priv, band,
- is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- }
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- goto done;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_send_cmd_sync(priv, &cmd);
- if (ret)
- goto done;
-
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IWL_SCAN_CHECK_WATCHDOG);
-
- mutex_unlock(&priv->mutex);
- return;
-
- done:
- /* Cannot perform scan. Make sure we clear scanning
- * bits from status so next scan request can be performed.
- * If we don't clear scanning status bit here all next scan
- * will fail
- */
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCANNING, &priv->status);
- /* inform mac80211 scan aborted */
- queue_work(priv->workqueue, &priv->scan_completed);
- mutex_unlock(&priv->mutex);
-}
-
void iwl_bg_abort_scan(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
@@ -971,7 +523,6 @@ EXPORT_SYMBOL(iwl_bg_scan_completed);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
{
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
- INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4a6686fa6b36..85ed235ac901 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -29,57 +29,12 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-
-u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
-{
- int i;
- int start = 0;
- int ret = IWL_INVALID_STATION;
- unsigned long flags;
-
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
- (priv->iw_mode == NL80211_IFTYPE_AP))
- start = IWL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return priv->hw_params.bcast_sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = start; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr))) {
- ret = i;
- goto out;
- }
-
- IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
- addr, priv->num_stations);
-
- out:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(iwl_find_station);
-
-int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- return IWL_AP_ID;
- } else {
- u8 *da = ieee80211_get_DA(hdr);
- return iwl_find_station(priv, da);
- }
-}
-EXPORT_SYMBOL(iwl_get_ra_sta_id);
-
/* priv->sta_lock must be held */
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
@@ -132,7 +87,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
sta_id);
break;
case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
+ IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
sta_id);
break;
default:
@@ -158,13 +113,6 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
-
- /*
- * Determine if we wanted to modify or add a station,
- * if adding a station succeeded we have some more initialization
- * to do when using station notification. TODO
- */
-
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
@@ -190,6 +138,10 @@ int iwl_send_add_sta(struct iwl_priv *priv,
.flags = flags,
.data = data,
};
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
if (flags & CMD_ASYNC)
cmd.callback = iwl_add_sta_callback;
@@ -263,18 +215,19 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
}
/**
- * iwl_add_station - Add station to tables in driver and device
+ * iwl_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
*/
-u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
- struct ieee80211_sta_ht_cap *ht_info)
+static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info)
{
struct iwl_station_entry *station;
- unsigned long flags_spin;
int i;
- int sta_id = IWL_INVALID_STATION;
+ u8 sta_id = IWL_INVALID_STATION;
u16 rate;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
if (is_ap)
sta_id = IWL_AP_ID;
else if (is_broadcast_ether_addr(addr))
@@ -292,20 +245,32 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
sta_id = i;
}
- /* These two conditions have the same outcome, but keep them separate
- since they have different meanings */
- if (unlikely(sta_id == IWL_INVALID_STATION)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IWL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
+ sta_id);
return sta_id;
}
- if (priv->stations[sta_id].used &&
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
!compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
return sta_id;
}
-
station = &priv->stations[sta_id];
station->used = IWL_STA_DRIVER_ACTIVE;
IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
@@ -319,10 +284,12 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
station->sta.sta.sta_id = sta_id;
station->sta.station_flags = 0;
- /* BCAST station and IBSS stations do not work in HT mode */
- if (sta_id != priv->hw_params.bcast_sta_id &&
- priv->iw_mode != NL80211_IFTYPE_ADHOC)
- iwl_set_ht_add_station(priv, sta_id, ht_info);
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL ht_info, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ iwl_set_ht_add_station(priv, sta_id, ht_info);
/* 3945 only */
rate = (priv->band == IEEE80211_BAND_5GHZ) ?
@@ -330,86 +297,221 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
/* Turn on both antennas for the station... */
station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+ return sta_id;
+
+}
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_add_station_common -
+ */
+int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info,
+ u8 *sta_id_r)
+{
+ struct iwl_station_entry *station;
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ sta_id = iwl_prep_station(priv, addr, is_ap, ht_info);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
+ sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+ station = &priv->stations[sta_id];
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
/* Add station to device's station table */
- iwl_send_add_sta(priv, &station->sta, flags);
- return sta_id;
+ ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC);
+ if (ret) {
+ IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(iwl_add_station_common);
+
+static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
+ u8 sta_id)
+{
+ int i, r;
+ struct iwl_link_quality_cmd *link_cmd;
+ u32 rate_flags;
+
+ link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ r = IWL_RATE_6M_INDEX;
+ else
+ r = IWL_RATE_1M_INDEX;
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ rate_flags = 0;
+ if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+ RATE_MCS_ANT_POS;
+
+ link_cmd->rs_table[i].rate_n_flags =
+ iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
+ r = iwl_get_prev_ieee_rate(r);
+ }
+
+ link_cmd->general_params.single_stream_ant_msk =
+ first_antenna(priv->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~first_antenna(priv->hw_params.valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
}
-EXPORT_SYMBOL(iwl_add_station);
-static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr)
+/*
+ * iwl_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
+ u8 *sta_id_r)
{
+ int ret;
+ u8 sta_id;
+ struct iwl_link_quality_cmd *link_cmd;
unsigned long flags;
- u8 sta_id = iwl_find_station(priv, addr);
- BUG_ON(sta_id == IWL_INVALID_STATION);
+ if (*sta_id_r)
+ *sta_id_r = IWL_INVALID_STATION;
- IWL_DEBUG_ASSOC(priv, "Removed STA from Ucode: %pM\n", addr);
+ ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].used |= IWL_STA_LOCAL;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
- /* Ucode must be active and driver must be non active */
- if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %d\n", sta_id);
+ if (init_rs) {
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+ ret = iwl_send_lq_cmd(priv, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ }
+
+ return 0;
}
+EXPORT_SYMBOL(iwl_add_bssid_station);
-static void iwl_remove_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
+/**
+ * iwl_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->sta_lock must be held
+ */
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
{
- struct iwl_rem_sta_cmd *rm_sta =
- (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
- const u8 *addr = rm_sta->addr;
+ /* Ucode must be active and driver must be non active */
+ if ((priv->stations[sta_id].used &
+ (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != IWL_STA_UCODE_ACTIVE)
+ IWL_ERR(priv, "removed non active STA %u\n", sta_id);
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- return;
- }
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
- switch (pkt->u.rem_sta.status) {
- case REM_STA_SUCCESS_MSK:
- iwl_sta_ucode_deactivate(priv, addr);
- break;
- default:
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
- }
+ memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+ IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
}
-static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
- u8 flags)
+static int iwl_send_remove_station(struct iwl_priv *priv,
+ struct iwl_station_entry *station)
{
struct iwl_rx_packet *pkt;
int ret;
+ unsigned long flags_spin;
struct iwl_rem_sta_cmd rm_sta_cmd;
struct iwl_host_cmd cmd = {
.id = REPLY_REMOVE_STA,
.len = sizeof(struct iwl_rem_sta_cmd),
- .flags = flags,
+ .flags = CMD_SYNC,
.data = &rm_sta_cmd,
};
memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN);
+ memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
- if (flags & CMD_ASYNC)
- cmd.callback = iwl_remove_sta_callback;
- else
- cmd.flags |= CMD_WANT_SKB;
ret = iwl_send_cmd(priv, &cmd);
- if (ret || (flags & CMD_ASYNC))
+ if (ret)
return ret;
pkt = (struct iwl_rx_packet *)cmd.reply_page;
@@ -422,7 +524,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
if (!ret) {
switch (pkt->u.rem_sta.status) {
case REM_STA_SUCCESS_MSK:
- iwl_sta_ucode_deactivate(priv, addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
default:
@@ -439,45 +543,48 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
/**
* iwl_remove_station - Remove driver's knowledge of station.
*/
-int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
{
- int sta_id = IWL_INVALID_STATION;
- int i, ret = -EINVAL;
+ struct iwl_station_entry *station;
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
- if (is_ap)
- sta_id = IWL_AP_ID;
- else if (is_broadcast_ether_addr(addr))
- sta_id = priv->hw_params.bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- !compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr)) {
- sta_id = i;
- break;
- }
+ IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ sta_id, addr);
- if (unlikely(sta_id == IWL_INVALID_STATION))
- goto out;
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
- sta_id, addr);
+ spin_lock_irqsave(&priv->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_ERR(priv, "Removing %pM but non DRIVER active\n",
+ IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
addr);
- goto out;
+ goto out_err;
}
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_ERR(priv, "Removing %pM but non UCODE active\n",
+ IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
addr);
- goto out;
+ goto out_err;
}
+ if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+ kfree(priv->stations[sta_id].lq);
+ priv->stations[sta_id].lq = NULL;
+ }
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@@ -485,47 +592,112 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
BUG_ON(priv->num_stations < 0);
+ station = &priv->stations[sta_id];
spin_unlock_irqrestore(&priv->sta_lock, flags);
- ret = iwl_send_remove_station(priv, addr, CMD_ASYNC);
- return ret;
-out:
+ return iwl_send_remove_station(priv, station);
+out_err:
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
+ return -EINVAL;
}
+EXPORT_SYMBOL_GPL(iwl_remove_station);
/**
- * iwl_clear_stations_table - Clear the driver's station table
+ * iwl_clear_ucode_stations - clear ucode station table bits
*
- * NOTE: This does not clear or otherwise alter the device's station table.
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
*/
-void iwl_clear_stations_table(struct iwl_priv *priv)
+void iwl_clear_ucode_stations(struct iwl_priv *priv)
{
- unsigned long flags;
int i;
+ unsigned long flags_spin;
+ bool cleared = false;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
- if (iwl_is_alive(priv) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status) &&
- iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL))
- IWL_ERR(priv, "Couldn't clear the station table\n");
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ if (!cleared)
+ IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(iwl_clear_ucode_stations);
+
+/**
+ * iwl_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void iwl_restore_stations(struct iwl_priv *priv)
+{
+ struct iwl_station_entry *station;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
- priv->num_stations = 0;
- memset(priv->stations, 0, sizeof(priv->stations));
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
+ return;
+ }
- /* clean ucode key table bit map */
- priv->ucode_key_table = 0;
+ IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+ !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].sta.mode = 0;
+ priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
- /* keep track of static keys */
- for (i = 0; i < WEP_KEYS_MAX ; i++) {
- if (priv->wep_keys[i].key_size)
- set_bit(i, &priv->ucode_key_table);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ station = &priv->stations[i];
+ ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC);
+ if (ret) {
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ station->sta.sta.addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (station->lq)
+ iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ }
}
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ if (!found)
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
+ else
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
}
-EXPORT_SYMBOL(iwl_clear_stations_table);
+EXPORT_SYMBOL(iwl_restore_stations);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
{
@@ -539,7 +711,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
-int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
+static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
{
int i, not_empty = 0;
u8 buff[sizeof(struct iwl_wep_cmd) +
@@ -549,9 +721,11 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
struct iwl_host_cmd cmd = {
.id = REPLY_WEPKEY,
.data = wep_cmd,
- .flags = CMD_ASYNC,
+ .flags = CMD_SYNC,
};
+ might_sleep();
+
memset(wep_cmd, 0, cmd_size +
(sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
@@ -581,33 +755,34 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
else
return 0;
}
-EXPORT_SYMBOL(iwl_send_static_wepkey_cmd);
+
+int iwl_restore_default_wep_keys(struct iwl_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->mutex));
+
+ return iwl_send_static_wepkey_cmd(priv, 0);
+}
+EXPORT_SYMBOL(iwl_restore_default_wep_keys);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf)
{
int ret;
- unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ WARN_ON(!mutex_is_locked(&priv->mutex));
+
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
- if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
- IWL_ERR(priv, "index %d not used in uCode key table.\n",
- keyconf->keyidx);
-
- priv->default_wep_key--;
memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ /* but keys in device are clear anyway so return success */
return 0;
}
ret = iwl_send_static_wepkey_cmd(priv, 1);
IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
keyconf->keyidx, ret);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
@@ -617,7 +792,8 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf)
{
int ret;
- unsigned long flags;
+
+ WARN_ON(!mutex_is_locked(&priv->mutex));
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
@@ -629,13 +805,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
keyconf->hw_key_idx = HW_KEY_DEFAULT;
priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->default_wep_key++;
-
- if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table))
- IWL_ERR(priv, "index %d already used in uCode key table.\n",
- keyconf->keyidx);
-
priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key,
keyconf->keylen);
@@ -643,7 +812,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
ret = iwl_send_static_wepkey_cmd(priv, 0);
IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
keyconf->keylen, keyconf->keyidx, ret);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
@@ -798,18 +966,23 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
- const u8 *addr, u32 iv32, u16 *phase1key)
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
{
- u8 sta_id = IWL_INVALID_STATION;
+ u8 sta_id;
unsigned long flags;
int i;
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return;
- }
+ if (sta) {
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv, "leave - %pM not initialised.\n",
+ sta->addr);
+ return;
+ }
+ } else
+ sta_id = priv->hw_params.bcast_sta_id;
+
if (iwl_scan_cancel(priv)) {
/* cancel scan failed, just live w/ bad key and rely
@@ -885,7 +1058,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
+ IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
@@ -948,253 +1121,149 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
}
#endif
-int iwl_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq, u8 flags)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = sizeof(struct iwl_link_quality_cmd),
- .flags = flags,
- .data = lq,
- };
-
- if ((lq->sta_id == 0xFF) &&
- (priv->iw_mode == NL80211_IFTYPE_ADHOC))
- return -EINVAL;
-
- if (lq->sta_id == 0xFF)
- lq->sta_id = IWL_AP_ID;
-
- iwl_dump_lq_cmd(priv, lq);
-
- if (iwl_is_associated(priv) && priv->assoc_station_added)
- return iwl_send_cmd(priv, &cmd);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_send_lq_cmd);
-
/**
- * iwl_sta_init_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values. These will be replaced later
- * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
- * rc80211_simple.
+ * is_lq_table_valid() - Test one aspect of LQ cmd for validity
*
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
*/
-static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+static bool is_lq_table_valid(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
{
- int i, r;
- struct iwl_link_quality_cmd link_cmd = {
- .reserved1 = 0,
- };
- u32 rate_flags;
+ int i;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (is_ap)
- r = IWL_RATE_54M_INDEX;
- else if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
+ if (ht_conf->is_ht)
+ return true;
+ IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ priv->active_rxon.channel);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- rate_flags = 0;
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
-
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
-
- link_cmd.rs_table[i].rate_n_flags =
- iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
- r = iwl_get_prev_ieee_rate(r);
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+ IWL_DEBUG_INFO(priv,
+ "index %d of LQ expects HT channel\n",
+ i);
+ return false;
+ }
}
-
- link_cmd.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
- link_cmd.general_params.dual_stream_ant_msk = 3;
- link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd.agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- /* Update the rate scaling for control frame Tx to AP */
- link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
-
- iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
- sizeof(link_cmd), &link_cmd, NULL);
+ return true;
}
/**
- * iwl_rxon_add_station - add station into station table.
+ * iwl_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
*
- * there is only one AP station with id= IWL_AP_ID
- * NOTE: mutex must be held before calling this function
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
*/
-int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+int iwl_send_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init)
{
- struct ieee80211_sta *sta;
- struct ieee80211_sta_ht_cap ht_config;
- struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
- u8 sta_id;
+ int ret = 0;
+ unsigned long flags_spin;
- /*
- * Set HT capabilities. It is ok to set this struct even if not using
- * HT config: the priv->current_ht_config.is_ht flag will just be false
- */
- rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, addr);
- if (sta) {
- memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
- cur_ht_config = &ht_config;
- }
- rcu_read_unlock();
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct iwl_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
- /* Add station to device's station table */
- sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
- /* Set up default rate scaling table in device's station table */
- iwl_sta_init_lq(priv, addr, is_ap);
+ iwl_dump_lq_cmd(priv, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
- return sta_id;
+ if (is_lq_table_valid(priv, lq))
+ ret = iwl_send_cmd(priv, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ return ret;
}
-EXPORT_SYMBOL(iwl_rxon_add_station);
+EXPORT_SYMBOL(iwl_send_lq_cmd);
/**
- * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table
+ * iwl_alloc_bcast_station - add broadcast station into driver's station table.
*
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
*/
-static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
+int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq)
{
- int i, r;
- struct iwl_link_quality_cmd link_cmd = {
- .reserved1 = 0,
- };
- u32 rate_flags;
-
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- rate_flags = 0;
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
+ struct iwl_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ sta_id = iwl_prep_station(priv, iwl_bcast_addr, false, NULL);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
- link_cmd.rs_table[i].rate_n_flags =
- iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
- r = iwl_get_prev_ieee_rate(r);
+ return -EINVAL;
}
- link_cmd.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
- link_cmd.general_params.dual_stream_ant_msk = 3;
- link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd.agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- /* Update the rate scaling for control frame Tx to AP */
- link_cmd.sta_id = priv->hw_params.bcast_sta_id;
-
- iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
- sizeof(link_cmd), &link_cmd, NULL);
-}
-
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used |= IWL_STA_BCAST;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
-/**
- * iwl_add_bcast_station - add broadcast station into station table.
- */
-void iwl_add_bcast_station(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
- iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+ if (init_lq) {
+ link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
- /* Set up default rate scaling table in device's station table */
- iwl_sta_init_bcast_lq(priv);
-}
-EXPORT_SYMBOL(iwl_add_bcast_station);
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ }
-/**
- * iwl3945_add_bcast_station - add broadcast station into station table.
- */
-void iwl3945_add_bcast_station(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
- iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+ return 0;
}
-EXPORT_SYMBOL(iwl3945_add_bcast_station);
+EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
-/**
- * iwl_get_sta_id - Find station's index within station table
- *
- * If new IBSS station, create new entry in station table
- */
-int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+void iwl_dealloc_bcast_station(struct iwl_priv *priv)
{
- int sta_id;
- __le16 fc = hdr->frame_control;
-
- /* If this frame is broadcast or management, use broadcast station id */
- if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1))
- return priv->hw_params.bcast_sta_id;
-
- switch (priv->iw_mode) {
-
- /* If we are a client station in a BSS network, use the special
- * AP station entry (that's the only station we communicate with) */
- case NL80211_IFTYPE_STATION:
- return IWL_AP_ID;
-
- /* If we are an AP, then find the station, or use BCAST */
- case NL80211_IFTYPE_AP:
- sta_id = iwl_find_station(priv, hdr->addr1);
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
- return priv->hw_params.bcast_sta_id;
-
- /* If this frame is going out to an IBSS network, find the station,
- * or create a new station table entry */
- case NL80211_IFTYPE_ADHOC:
- sta_id = iwl_find_station(priv, hdr->addr1);
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
-
- /* Create new station table entry */
- sta_id = iwl_add_station(priv, hdr->addr1, false,
- CMD_ASYNC, NULL);
-
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
-
- IWL_DEBUG_DROP(priv, "Station %pM not in station map. "
- "Defaulting to broadcast...\n",
- hdr->addr1);
- iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
- return priv->hw_params.bcast_sta_id;
+ unsigned long flags;
+ int i;
- default:
- IWL_WARN(priv, "Unknown mode of operation: %d\n",
- priv->iw_mode);
- return priv->hw_params.bcast_sta_id;
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (!(priv->stations[i].used & IWL_STA_BCAST))
+ continue;
+
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ priv->num_stations--;
+ BUG_ON(priv->num_stations < 0);
+ kfree(priv->stations[i].lq);
+ priv->stations[i].lq = NULL;
}
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-EXPORT_SYMBOL(iwl_get_sta_id);
+EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station);
/**
* iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
@@ -1214,13 +1283,13 @@ void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
}
EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
-int iwl_sta_rx_agg_start(struct iwl_priv *priv,
- const u8 *addr, int tid, u16 ssn)
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn)
{
unsigned long flags;
int sta_id;
- sta_id = iwl_find_station(priv, addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
@@ -1233,16 +1302,17 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv,
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
- CMD_ASYNC);
+ CMD_ASYNC);
}
EXPORT_SYMBOL(iwl_sta_rx_agg_start);
-int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid)
{
unsigned long flags;
int sta_id;
- sta_id = iwl_find_station(priv, addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
return -ENXIO;
@@ -1291,3 +1361,22 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
}
+EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
+
+int iwl_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+ sta->addr);
+ ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
+ if (ret)
+ IWL_ERR(priv, "Error removing station %pM\n",
+ sta->addr);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 2dc35fe28f56..c2a453a1a991 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -29,44 +29,82 @@
#ifndef __iwl_sta_h__
#define __iwl_sta_h__
+#include "iwl-dev.h"
+
#define HW_KEY_DYNAMIC 0
#define HW_KEY_DEFAULT 1
-/**
- * iwl_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- */
-u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid);
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
-int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key);
int iwl_set_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key);
+int iwl_restore_default_wep_keys(struct iwl_priv *priv);
int iwl_set_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key, u8 sta_id);
int iwl_remove_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key, u8 sta_id);
void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
- const u8 *addr, u32 iv32, u16 *phase1key);
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
-void iwl_add_bcast_station(struct iwl_priv *priv);
-void iwl3945_add_bcast_station(struct iwl_priv *priv);
-int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
-void iwl_clear_stations_table(struct iwl_priv *priv);
+void iwl_restore_stations(struct iwl_priv *priv);
+void iwl_clear_ucode_stations(struct iwl_priv *priv);
+int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq);
+void iwl_dealloc_bcast_station(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
-int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags);
-u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
- struct ieee80211_sta_ht_cap *ht_info);
+int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
+ u8 *sta_id_r);
+int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info,
+ u8 *sta_id_r);
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr);
+int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
-int iwl_sta_rx_agg_start(struct iwl_priv *priv,
- const u8 *addr, int tid, u16 ssn);
-int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid);
void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
+
+/**
+ * iwl_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static inline int iwl_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IWL_INVALID_STATION;
+
+ return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
+}
#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8dd0c036d547..1ece2ea09773 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -38,47 +38,6 @@
#include "iwl-io.h"
#include "iwl-helpers.h"
-static const u16 default_tid_to_tx_fifo[] = {
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
- IWL_TX_FIFO_AC0,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_AC3
-};
-
-static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
- GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
*/
@@ -310,6 +269,8 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
q->high_mark = 2;
q->write_ptr = q->read_ptr = 0;
+ q->last_read_ptr = 0;
+ q->repeat_same_read_ptr = 0;
return 0;
}
@@ -454,611 +415,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
}
EXPORT_SYMBOL(iwl_tx_queue_reset);
-/**
- * iwl_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == IWL_CMD_QUEUE_NUM)
- iwl_cmd_queue_free(priv);
- else
- iwl_tx_queue_free(priv, txq_id);
- }
- iwl_free_dma_ptr(priv, &priv->kw);
-
- iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
- /* free tx queue structure */
- iwl_free_txq_mem(priv);
-}
-EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
-
-/**
- * iwl_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl_txq_ctx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
-
- /* Free all tx/cmd queues and keep-warm buffer */
- iwl_hw_txq_ctx_free(priv);
-
- ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error_bc_tbls;
- }
- /* Alloc keep-warm buffer */
- ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error_kw;
- }
-
- /* allocate tx queue structure */
- ret = iwl_alloc_txq_mem(priv);
- if (ret)
- goto error;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return ret;
-
- error:
- iwl_hw_txq_ctx_free(priv);
- iwl_free_dma_ptr(priv, &priv->kw);
- error_kw:
- iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
- return ret;
-}
-
-void iwl_txq_ctx_reset(struct iwl_priv *priv)
-{
- int txq_id, slots_num;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
- }
-}
-
-/**
- * iwl_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl_txq_ctx_stop(struct iwl_priv *priv)
-{
- int ch;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_txq_ctx_stop);
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
-{
- __le16 fc = hdr->frame_control;
- __le32 tx_flags = tx_cmd->tx_flags;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
-
- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_HCCA_RETRY_LIMIT 3
-#define RTS_DFAULT_RETRY_LIMIT 60
-
-static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc, int is_hcca)
-{
- u32 rate_flags;
- int rate_idx;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- u8 rate_plcp;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- /* Set retry limit on RTS packets */
- rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
- RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- /* DATA packets will use the uCode station table for rate/antenna
- * selection */
- if (ieee80211_is_data(fc)) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- return;
- }
-
- /**
- * If the current TX rate stored in mac80211 has the MCS bit set, it's
- * not really a TX rate. Thus, we use the lowest supported rate for
- * this band. Also use the lowest supported rate if the stored rate
- * index is invalid.
- */
- rate_idx = info->control.rates[0].idx;
- if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
- (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
- info->control.sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx += IWL_FIRST_OFDM_RATE;
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwl_rates[rate_idx].plcp;
- /* Zero out flags for this packet */
- rate_flags = 0;
-
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
- /* Set up RTS and CTS flags for certain packets */
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
- tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
- }
- break;
- default:
- break;
- }
-
- /* Set up antennas */
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
- rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
-
- /* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
- switch (keyconf->alg) {
- case ALG_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case ALG_TKIP:
- tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_key(keyconf, skb_frag,
- IEEE80211_TKIP_P2_KEY, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
- break;
-
- case ALG_WEP:
- tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
- (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
- if (keyconf->keylen == WEP_KEY_LEN_128)
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
-
- memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", keyconf->keyidx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
- break;
- }
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
- struct iwl_station_priv *sta_priv = NULL;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_cmd *tx_cmd;
- int swq_id, txq_id;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, len_org, firstlen, secondlen;
- u16 seq_number = 0;
- __le16 fc;
- u8 hdr_len;
- u8 sta_id;
- u8 wait_write_ptr = 0;
- u8 tid = 0;
- u8 *qc = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- /* drop all non-injected data frame if we are not associated */
- if (ieee80211_is_data(fc) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (!iwl_is_associated(priv) ||
- ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
- !priv->assoc_station_added)) {
- IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
- goto drop_unlock;
- }
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* Find (or create) index into station table for destination station */
- if (info->flags & IEEE80211_TX_CTL_INJECTED)
- sta_id = priv->hw_params.bcast_sta_id;
- else
- sta_id = iwl_get_sta_id(priv, hdr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock;
- }
-
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
- if (sta)
- sta_priv = (void *)sta->drv_priv;
-
- if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
- sta_priv->asleep) {
- WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
- /*
- * This sends an asynchronous command to the device,
- * but we can rely on it being processed before the
- * next frame is processed -- and the next frame to
- * this station is the one that will consume this
- * counter.
- * For now set the counter to just 1 since we do not
- * support uAPSD yet.
- */
- iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
- }
-
- txq_id = skb_get_queue_mapping(skb);
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (unlikely(tid >= MAX_TID_COUNT))
- goto drop_unlock;
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- }
- }
-
- txq = &priv->txq[txq_id];
- swq_id = txq->swq_id;
- q = &txq->q;
-
- if (unlikely(iwl_queue_space(q) < q->high_mark))
- goto drop_unlock;
-
- if (ieee80211_is_data_qos(fc))
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb[0] = skb;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[q->write_ptr];
- out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- if (info->control.hw_key)
- iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
- iwl_dbg_log_tx_data_frame(priv, len, hdr);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
-
- iwl_update_stats(priv, true, fc, len);
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
-
- len_org = len;
- firstlen = len = (len + 3) & ~3;
-
- if (len_org != len)
- len_org = 1;
- else
- len_org = 0;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (len_org)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, len,
- PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
- pci_unmap_len_set(out_meta, len, len);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, len, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- if (qc)
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = len = skb->len - hdr_len;
- if (len) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- len, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, len,
- 0, 0);
- }
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- /* take back ownership of DMA buffer to enable update */
- pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- len, PCI_DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- len, PCI_DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &out_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
-
- /* avoid atomic ops if it isn't an associated client */
- if (sta_priv && sta_priv->client)
- atomic_inc(&sta_priv->pending_frames);
-
- if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- iwl_stop_queue(priv, txq->swq_id);
- }
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-EXPORT_SYMBOL(iwl_tx_skb);
-
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
/**
@@ -1192,61 +548,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return idx;
}
-static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_sta *sta;
- struct iwl_station_priv *sta_priv;
-
- sta = ieee80211_find_sta(priv->vif, hdr->addr1);
- if (sta) {
- sta_priv = (void *)sta->drv_priv;
- /* avoid atomic ops if this isn't a client */
- if (sta_priv->client &&
- atomic_dec_return(&sta_priv->pending_frames) == 0)
- ieee80211_sta_block_awake(priv->hw, sta, false);
- }
-
- ieee80211_tx_status_irqsafe(priv->hw, skb);
-}
-
-int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
-
- if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
- }
-
- for (index = iwl_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
- iwl_tx_status(priv, tx_info->skb[0]);
-
- hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
- if (hdr && ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
- tx_info->skb[0] = NULL;
-
- if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
- priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
-
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
- return nfreed;
-}
-EXPORT_SYMBOL(iwl_tx_queue_reclaim);
-
-
/**
* iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -1340,7 +641,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
if (!(meta->flags & CMD_ASYNC)) {
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->hdr.cmd));
wake_up_interruptible(&priv->wait_command_queue);
}
@@ -1348,358 +649,37 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
}
EXPORT_SYMBOL(iwl_tx_cmd_complete);
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
- */
-static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
- return -1;
-}
-
-int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
-{
- int sta_id;
- int tx_fifo;
- int txq_id;
- int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
- tx_fifo = default_tid_to_tx_fifo[tid];
- else
- return -EINVAL;
-
- IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
- __func__, ra, tid);
-
- sta_id = iwl_find_station(priv, ra);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Start AGG on invalid station\n");
- return -ENXIO;
- }
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
- IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
- return -ENXIO;
- }
-
- txq_id = iwl_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
- sta_id, tid, *ssn);
- if (ret)
- return ret;
-
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
- } else {
- IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_tx_agg_start);
-
-int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
-{
- int tx_fifo_id, txq_id, sta_id, ssn = -1;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- if (!ra) {
- IWL_ERR(priv, "ra = NULL\n");
- return -EINVAL;
- }
-
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
- tx_fifo_id = default_tid_to_tx_fifo[tid];
- else
- return -EINVAL;
-
- sta_id = iwl_find_station(priv, ra);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- if (priv->stations[sta_id].tid[tid].agg.state ==
- IWL_EMPTYING_HW_QUEUE_ADDBA) {
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
- return 0;
- }
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- spin_lock_irqsave(&priv->lock, flags);
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
- tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_tx_agg_stop);
-
-int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = default_tid_to_tx_fifo[tid];
- IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
- priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
- ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
- }
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_txq_check_empty);
-
-/**
- * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
-
-{
- int i, sh, ack;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- u64 bitmap;
- int successes = 0;
- struct ieee80211_tx_info *info;
-
- if (unlikely(!agg->wait_for_ba)) {
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
- }
-
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0) /* tbw something is wrong with indices */
- sh += 0x100;
-
- /* don't use 64-bit values for now */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
- if (agg->frame_count > (64 - sh)) {
- IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
- return -1;
- }
-
- /* check for success or failure according to the
- * transmitted bitmap and block-ack bitmap */
- bitmap &= agg->bitmap;
-
- /* For each frame attempted in aggregation,
- * update driver's record of tx frame's status. */
- for (i = 0; i < agg->frame_count ; i++) {
- ack = bitmap & (1ULL << i);
- successes += !!ack;
- IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
- ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
- agg->start_idx + i);
- }
-
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_map = successes;
- info->status.ampdu_ack_len = agg->frame_count;
- iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
- IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
-
- return 0;
-}
-
-/**
- * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_ht_agg *agg;
- int index;
- int sta_id;
- int tid;
-
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
- if (scd_flow >= priv->hw_params.max_txq_num) {
- IWL_ERR(priv,
- "BUG_ON scd_flow is bigger than number of queues\n");
- return;
- }
-
- txq = &priv->txq[scd_flow];
- sta_id = ba_resp->sta_id;
- tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- /* Find index just before block-ack window */
- index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- /* TODO: Need to get this copy more safely - now good for debug */
-
- IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
- "sta_id = %d\n",
- agg->wait_for_ba,
- (u8 *) &ba_resp->sta_addr_lo32,
- ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
- "%d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
- (unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- /* Update driver's record of ACK vs. not for each frame in window */
- iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq->swq_id);
-
- iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
-}
-EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
-
#ifdef CONFIG_IWLWIFI_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
const char *iwl_get_tx_fail_reason(u32 status)
{
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
return "SUCCESS";
- TX_STATUS_ENTRY(SHORT_LIMIT);
- TX_STATUS_ENTRY(LONG_LIMIT);
- TX_STATUS_ENTRY(FIFO_UNDERRUN);
- TX_STATUS_ENTRY(MGMNT_ABORT);
- TX_STATUS_ENTRY(NEXT_FRAG);
- TX_STATUS_ENTRY(LIFE_EXPIRE);
- TX_STATUS_ENTRY(DEST_PS);
- TX_STATUS_ENTRY(ABORTED);
- TX_STATUS_ENTRY(BT_RETRY);
- TX_STATUS_ENTRY(STA_INVALID);
- TX_STATUS_ENTRY(FRAG_DROPPED);
- TX_STATUS_ENTRY(TID_DISABLE);
- TX_STATUS_ENTRY(FRAME_FLUSHED);
- TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
- TX_STATUS_ENTRY(TX_LOCKED);
- TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(FW_DROP);
+ TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
}
return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b55e4f39a9e1..935e64311d39 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -352,11 +352,11 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
static void iwl3945_unset_hw_params(struct iwl_priv *priv)
{
- if (priv->shared_virt)
+ if (priv->_3945.shared_virt)
dma_free_coherent(&priv->pci_dev->dev,
sizeof(struct iwl3945_shared),
- priv->shared_virt,
- priv->shared_phys);
+ priv->_3945.shared_virt,
+ priv->_3945.shared_phys);
}
static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -505,24 +505,15 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
- /* drop all non-injected data frame if we are not associated */
- if (ieee80211_is_data(fc) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (!iwl_is_associated(priv) ||
- ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
- IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
- goto drop_unlock;
- }
-
spin_unlock_irqrestore(&priv->lock, flags);
hdr_len = ieee80211_hdrlen(fc);
- /* Find (or create) index into station table for destination station */
- if (info->flags & IEEE80211_TX_CTL_INJECTED)
+ /* Find index into station table for destination station */
+ if (!info->control.sta)
sta_id = priv->hw_params.bcast_sta_id;
else
- sta_id = iwl_get_sta_id(priv, hdr);
+ sta_id = iwl_sta_id(info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -607,9 +598,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq->need_update = 0;
}
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
ieee80211_hdrlen(fc));
@@ -754,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
if (iwl_is_associated(priv))
add_time =
iwl3945_usecs_to_beacons(
- le64_to_cpu(params->start_time) - priv->last_tsf,
+ le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
le16_to_cpu(priv->rxon_timing.beacon_interval));
memset(&spectrum, 0, sizeof(spectrum));
@@ -768,7 +759,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
if (iwl_is_associated(priv))
spectrum.start_time =
- iwl3945_add_beacon_time(priv->last_beacon_time,
+ iwl3945_add_beacon_time(priv->_3945.last_beacon_time,
add_time,
le16_to_cpu(priv->rxon_timing.beacon_interval));
else
@@ -966,7 +957,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
* statistics request from the host as well as for the periodic
* statistics notifications (after received beacons) from the uCode.
*/
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
+ priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
iwl_setup_rx_scan_handlers(priv);
@@ -1613,9 +1604,6 @@ static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
return pos;
}
-/* For sanity check only. Actual size is determined by uCode, typ. 512 */
-#define IWL3945_MAX_EVENT_LOG_SIZE (512)
-
#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -1642,16 +1630,16 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
- if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) {
+ if (capacity > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, IWL3945_MAX_EVENT_LOG_SIZE);
- capacity = IWL3945_MAX_EVENT_LOG_SIZE;
+ capacity, priv->cfg->max_event_log_size);
+ capacity = priv->cfg->max_event_log_size;
}
- if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) {
+ if (next_entry > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, IWL3945_MAX_EVENT_LOG_SIZE);
- next_entry = IWL3945_MAX_EVENT_LOG_SIZE;
+ next_entry, priv->cfg->max_event_log_size);
+ next_entry = priv->cfg->max_event_log_size;
}
size = num_wraps ? capacity : next_entry;
@@ -1860,7 +1848,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
- struct iwl3945_scan_channel *scan_ch)
+ struct iwl3945_scan_channel *scan_ch,
+ struct ieee80211_vif *vif)
{
struct ieee80211_channel *chan;
const struct ieee80211_supported_band *sband;
@@ -1874,7 +1863,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
return 0;
active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
@@ -1947,7 +1936,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
added++;
}
- IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
return added;
}
@@ -2122,6 +2111,28 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
iwl_write32(priv, CSR_RESET, 0);
}
+#define IWL3945_UCODE_GET(item) \
+static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
+{ \
+ return le32_to_cpu(ucode->u.v1.item); \
+}
+
+static u32 iwl3945_ucode_get_header_size(u32 api_ver)
+{
+ return 24;
+}
+
+static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
+{
+ return (u8 *) ucode->u.v1.data;
+}
+
+IWL3945_UCODE_GET(inst_size);
+IWL3945_UCODE_GET(data_size);
+IWL3945_UCODE_GET(init_size);
+IWL3945_UCODE_GET(init_data_size);
+IWL3945_UCODE_GET(boot_size);
+
/**
* iwl3945_read_ucode - Read uCode images from disk file.
*
@@ -2170,7 +2181,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
goto error;
/* Make sure that we got at least our header! */
- if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) {
+ if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
IWL_ERR(priv, "File size way too small!\n");
ret = -EINVAL;
goto err_release;
@@ -2181,13 +2192,12 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
priv->ucode_ver = le32_to_cpu(ucode->ver);
api_ver = IWL_UCODE_API(priv->ucode_ver);
- inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver);
- data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver);
- init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver);
- init_data_size =
- priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver);
- boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver);
- src = priv->cfg->ops->ucode->get_data(ucode, api_ver);
+ inst_size = iwl3945_ucode_get_inst_size(ucode);
+ data_size = iwl3945_ucode_get_data_size(ucode);
+ init_size = iwl3945_ucode_get_init_size(ucode);
+ init_data_size = iwl3945_ucode_get_init_data_size(ucode);
+ boot_size = iwl3945_ucode_get_boot_size(ucode);
+ src = iwl3945_ucode_get_data(ucode);
/* api_ver should match the api version forming part of the
* firmware filename ... but we don't check for that and only rely
@@ -2236,7 +2246,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
/* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != priv->cfg->ops->ucode->get_header_size(api_ver) +
+ if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
inst_size + data_size + init_size +
init_data_size + boot_size) {
@@ -2490,8 +2500,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
goto restart;
}
- iwl_clear_stations_table(priv);
-
rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
@@ -2513,13 +2521,19 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send commands to 3945 uCode */
set_bit(STATUS_ALIVE, &priv->status);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ /* Enable timer to monitor the driver queues */
+ mod_timer(&priv->monitor_recover,
+ jiffies +
+ msecs_to_jiffies(priv->cfg->monitor_recover_period));
+ }
+
if (iwl_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
- priv->active_rate = priv->rates_mask;
- priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK;
iwl_power_update_mode(priv, true);
@@ -2531,11 +2545,11 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- iwl_connection_init_rx_config(priv, priv->iw_mode);
+ iwl_connection_init_rx_config(priv, NULL);
}
/* Configure Bluetooth device coexistence support */
- iwl_send_bt_config(priv);
+ priv->cfg->ops->hcmd->send_bt_config(priv);
/* Configure the adapter for unassociated operation */
iwlcore_commit_rxon(priv);
@@ -2548,17 +2562,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
- /* reassociate for ADHOC mode */
- if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
- struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
- priv->vif);
- if (beacon)
- iwl_mac_beacon_update(priv->hw, beacon);
- }
-
- if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
- iwl_set_mode(priv, priv->iw_mode);
-
return;
restart:
@@ -2580,7 +2583,10 @@ static void __iwl3945_down(struct iwl_priv *priv)
if (!exit_pending)
set_bit(STATUS_EXIT_PENDING, &priv->status);
- iwl_clear_stations_table(priv);
+ /* Station information will now be cleared in device */
+ iwl_clear_ucode_stations(priv);
+ iwl_dealloc_bcast_station(priv);
+ iwl_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2661,6 +2667,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
{
int rc, i;
+ rc = iwl_alloc_bcast_station(priv, false);
+ if (rc)
+ return rc;
+
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
return -EIO;
@@ -2714,12 +2724,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
for (i = 0; i < MAX_HW_RESTARTS; i++) {
- iwl_clear_stations_table(priv);
-
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
- priv->cfg->ops->lib->load_ucode(priv);
+ rc = priv->cfg->ops->lib->load_ucode(priv);
if (rc) {
IWL_ERR(priv,
@@ -2787,7 +2795,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
static void iwl3945_rfkill_poll(struct work_struct *data)
{
struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rfkill_poll.work);
+ container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
& CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
@@ -2806,22 +2814,18 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
/* Keep this running, even if radio now enabled. This will be
* cancelled in mac_start() if system decides to start again */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
round_jiffies_relative(2 * HZ));
}
-#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
-static void iwl3945_bg_request_scan(struct work_struct *data)
+void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, request_scan);
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = sizeof(struct iwl3945_scan_cmd),
.flags = CMD_SIZE_HUGE,
};
- int rc = 0;
struct iwl3945_scan_cmd *scan;
struct ieee80211_conf *conf = NULL;
u8 n_probes = 0;
@@ -2830,8 +2834,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
conf = ieee80211_get_hw_conf(priv->hw);
- mutex_lock(&priv->mutex);
-
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready(priv)) {
@@ -2849,7 +2851,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
"Ignoring second request.\n");
- rc = -EIO;
goto done;
}
@@ -2875,20 +2876,15 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
goto done;
}
- if (!priv->scan_bands) {
- IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
- goto done;
- }
-
- if (!priv->scan) {
- priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan) {
- rc = -ENOMEM;
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
goto done;
}
}
- scan = priv->scan;
+ scan = priv->scan_cmd;
memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
@@ -2904,7 +2900,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
spin_lock_irqsave(&priv->lock, flags);
- interval = priv->beacon_int;
+ interval = vif ? vif->bss_conf.beacon_int : 0;
spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
@@ -2927,7 +2923,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- if (priv->scan_request->n_ssids) {
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -2955,41 +2953,49 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
/* flags + rate selection */
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
scan->good_CRC_th = 0;
band = IEEE80211_BAND_2GHZ;
- } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
+ break;
+ case IEEE80211_BAND_5GHZ:
scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
/*
* If active scaning is requested but a certain channel
* is marked passive, we can do active scanning if we
* detect transmissions.
*/
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0;
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_DISABLED;
band = IEEE80211_BAND_5GHZ;
- } else {
- IWL_WARN(priv, "Invalid scan band count\n");
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band\n");
goto done;
}
- scan->tx_cmd.len = cpu_to_le16(
+ if (!priv->is_internal_short_scan) {
+ scan->tx_cmd.len = cpu_to_le16(
iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan)));
-
+ } else {
+ scan->tx_cmd.len = cpu_to_le16(
+ iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan)));
+ }
/* select Rx antennas */
scan->flags |= iwl3945_get_antenna_flags(priv);
- if (iwl_is_monitor_mode(priv))
- scan->filter_flags = RXON_FILTER_PROMISC_MSK;
-
scan->channel_count =
iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
+ (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
@@ -3002,14 +3008,12 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan->len = cpu_to_le16(cmd.len);
set_bit(STATUS_SCAN_HW, &priv->status);
- rc = iwl_send_cmd_sync(priv, &cmd);
- if (rc)
+ if (iwl_send_cmd_sync(priv, &cmd))
goto done;
queue_delayed_work(priv->workqueue, &priv->scan_check,
IWL_SCAN_CHECK_WATCHDOG);
- mutex_unlock(&priv->mutex);
return;
done:
@@ -3023,7 +3027,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
/* inform mac80211 scan aborted */
queue_work(priv->workqueue, &priv->scan_completed);
- mutex_unlock(&priv->mutex);
}
static void iwl3945_bg_restart(struct work_struct *data)
@@ -3065,28 +3068,25 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-#define IWL_DELAY_NEXT_SCAN (HZ*2)
-
-void iwl3945_post_associate(struct iwl_priv *priv)
+void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int rc = 0;
struct ieee80211_conf *conf = NULL;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (!vif || !priv->is_open)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
return;
}
-
IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- priv->assoc_id, priv->active_rxon.bssid_addr);
+ vif->bss_conf.aid, priv->active_rxon.bssid_addr);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (!priv->vif || !priv->is_open)
- return;
-
iwl_scan_cancel_timeout(priv, 200);
conf = ieee80211_get_hw_conf(priv->hw);
@@ -3095,7 +3095,7 @@ void iwl3945_post_associate(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (rc)
@@ -3104,57 +3104,40 @@ void iwl3945_post_associate(struct iwl_priv *priv)
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
+ priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- priv->assoc_id, priv->beacon_int);
+ vif->bss_conf.aid, vif->bss_conf.beacon_int);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
}
iwlcore_commit_rxon(priv);
- switch (priv->iw_mode) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
break;
-
case NL80211_IFTYPE_ADHOC:
-
- priv->assoc_id = 1;
- iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL);
- iwl3945_sync_sta(priv, IWL_STA_ID,
- (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
- CMD_ASYNC);
- iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
iwl3945_send_beacon_cmd(priv);
-
break;
-
default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, priv->iw_mode);
+ IWL_ERR(priv, "%s Should not be called in %d mode\n",
+ __func__, vif->type);
break;
}
-
- iwl_activate_qos(priv, 0);
-
- /* we have just associated, don't start scan too early */
- priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
}
/*****************************************************************************
@@ -3213,7 +3196,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
/* ucode is running and will send rfkill notifications,
* no need to poll the killswitch state anymore */
- cancel_delayed_work(&priv->rfkill_poll);
+ cancel_delayed_work(&priv->_3945.rfkill_poll);
iwl_led_start(priv);
@@ -3254,7 +3237,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
flush_workqueue(priv->workqueue);
/* start polling the killswitch state again */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
round_jiffies_relative(2 * HZ));
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3276,7 +3259,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-void iwl3945_config_ap(struct iwl_priv *priv)
+void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int rc = 0;
@@ -3292,7 +3275,7 @@ void iwl3945_config_ap(struct iwl_priv *priv)
/* RXON Timing */
memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing),
&priv->rxon_timing);
@@ -3300,9 +3283,10 @@ void iwl3945_config_ap(struct iwl_priv *priv)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
- /* FIXME: what should be the assoc_id for AP? */
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ priv->staging_rxon.assoc_id = 0;
+
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
@@ -3310,22 +3294,21 @@ void iwl3945_config_ap(struct iwl_priv *priv)
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL);
}
iwl3945_send_beacon_cmd(priv);
@@ -3340,7 +3323,6 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = hw->priv;
- const u8 *addr;
int ret = 0;
u8 sta_id = IWL_INVALID_STATION;
u8 static_key;
@@ -3352,21 +3334,24 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- addr = sta ? sta->addr : iwl_bcast_addr;
static_key = !iwl_is_associated(priv);
if (!static_key) {
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return -EINVAL;
+ if (!sta) {
+ sta_id = priv->hw_params.bcast_sta_id;
+ } else {
+ sta_id = iwl_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv,
+ "leave - %pM not in station map.\n",
+ sta->addr);
+ return -EINVAL;
+ }
}
}
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
- mutex_unlock(&priv->mutex);
switch (cmd) {
case SET_KEY:
@@ -3387,11 +3372,45 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
+static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ u8 sta_id;
+
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+ IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ sta->addr);
+
+ ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
+ &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl3945_rs_rate_init(priv, sta, sta_id);
+
+ return 0;
+}
/*****************************************************************************
*
* sysfs attributes
@@ -3591,7 +3610,7 @@ static ssize_t store_measurement(struct device *d,
struct iwl_priv *priv = dev_get_drvdata(d);
struct ieee80211_measurement_params params = {
.channel = le16_to_cpu(priv->active_rxon.channel),
- .start_time = cpu_to_le64(priv->last_tsf),
+ .start_time = cpu_to_le64(priv->_3945.last_tsf),
.duration = cpu_to_le16(1),
};
u8 type = IWL_MEASURE_BASIC;
@@ -3655,44 +3674,6 @@ static ssize_t show_channels(struct device *d,
static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
-static ssize_t show_statistics(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 size = sizeof(struct iwl3945_notif_statistics);
- u32 len = 0, ofs = 0;
- u8 *data = (u8 *)&priv->statistics_39;
- int rc = 0;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- mutex_lock(&priv->mutex);
- rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (rc) {
- len = sprintf(buf,
- "Error sending statistics request: 0x%08X\n", rc);
- return len;
- }
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
-
static ssize_t show_antenna(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3774,14 +3755,21 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
- INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
+ INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
- INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
+ INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
iwl3945_hw_setup_deferred_work(priv);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ init_timer(&priv->monitor_recover);
+ priv->monitor_recover.data = (unsigned long)priv;
+ priv->monitor_recover.function =
+ priv->cfg->ops->lib->recover_from_tx_stall;
+ }
+
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl3945_irq_tasklet, (unsigned long)priv);
}
@@ -3793,7 +3781,10 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->scan_check);
cancel_delayed_work(&priv->alive_start);
+ cancel_work_sync(&priv->start_internal_scan);
cancel_work_sync(&priv->beacon_update);
+ if (priv->cfg->ops->lib->recover_from_tx_stall)
+ del_timer_sync(&priv->monitor_recover);
}
static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3804,7 +3795,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_filter_flags.attr,
&dev_attr_measurement.attr,
&dev_attr_retry_rate.attr,
- &dev_attr_statistics.attr,
&dev_attr_status.attr,
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
@@ -3831,7 +3821,9 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
- .hw_scan = iwl_mac_hw_scan
+ .hw_scan = iwl_mac_hw_scan,
+ .sta_add = iwl3945_mac_sta_add,
+ .sta_remove = iwl_mac_sta_remove,
};
static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3850,9 +3842,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
mutex_init(&priv->mutex);
mutex_init(&priv->sync_cmd_mutex);
- /* Clear the driver's (not device's) station table */
- iwl_clear_stations_table(priv);
-
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3860,12 +3849,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
- iwl_reset_qos(priv);
-
- priv->qos_data.qos_active = 0;
- priv->qos_data.qos_cap.val = 0;
-
- priv->rates_mask = IWL_RATES_MASK;
priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
@@ -3901,6 +3884,8 @@ err:
return ret;
}
+#define IWL3945_MAX_PROBE_REQUEST 200
+
static int iwl3945_setup_mac(struct iwl_priv *priv)
{
int ret;
@@ -3908,10 +3893,10 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->rate_control_algorithm = "iwl-3945-rs";
hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SPECTRUM_MGMT;
if (!priv->cfg->broken_powersave)
@@ -3927,7 +3912,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+ hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
@@ -4130,7 +4115,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
/* Start monitoring the killswitch */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2 * HZ);
return 0;
@@ -4204,7 +4189,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- cancel_delayed_work_sync(&priv->rfkill_poll);
+ cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
iwl3945_dealloc_ucode_pci(priv);
@@ -4213,7 +4198,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
iwl3945_hw_txq_ctx_free(priv);
iwl3945_unset_hw_params(priv);
- iwl_clear_stations_table(priv);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
@@ -4235,7 +4219,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
iwl_free_channel_map(priv);
iwlcore_free_geos(priv);
- kfree(priv->scan);
+ kfree(priv->scan_cmd);
if (priv->ibss_beacon)
dev_kfree_skb(priv->ibss_beacon);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index b9d34a766964..03f998d098c5 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -17,7 +17,7 @@ config IWM
config IWM_DEBUG
bool "Enable full debugging output in iwmc3200wifi"
depends on IWM && DEBUG_FS
- ---help---
+ help
This option will enable debug tracing and setting for iwm
You can set the debug level and module through debugfs. By
@@ -30,3 +30,10 @@ config IWM_DEBUG
Or, if you want the full debug, for all modules:
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
+
+config IWM_TRACING
+ bool "Enable event tracing for iwmc3200wifi"
+ depends on IWM && EVENT_TRACING
+ help
+ Say Y here to trace all the commands and responses between
+ the driver and firmware (including TX/RX frames) with ftrace.
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
index d34291b652d3..cdc7e07ba113 100644
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ b/drivers/net/wireless/iwmc3200wifi/Makefile
@@ -3,3 +3,8 @@ iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
+iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
+
+CFLAGS_trace.o := -I$(src)
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
index 836663eec257..62edd5888a7b 100644
--- a/drivers/net/wireless/iwmc3200wifi/bus.h
+++ b/drivers/net/wireless/iwmc3200wifi/bus.h
@@ -31,7 +31,7 @@ struct iwm_if_ops {
int (*disable)(struct iwm_priv *iwm);
int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
- int (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
+ void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
void (*debugfs_exit)(struct iwm_priv *iwm);
const char *umac_name;
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a1d45cce0ebc..902e95f70f6e 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -264,7 +264,7 @@ static int iwm_cfg80211_get_station(struct wiphy *wiphy,
int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
{
struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_bss_info *bss, *next;
+ struct iwm_bss_info *bss;
struct iwm_umac_notif_bss_info *umac_bss;
struct ieee80211_mgmt *mgmt;
struct ieee80211_channel *channel;
@@ -272,7 +272,7 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
s32 signal;
int freq;
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
+ list_for_each_entry(bss, &iwm->bss_list, node) {
umac_bss = bss->bss;
mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
@@ -726,23 +726,26 @@ static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
CFG_POWER_INDEX, iwm->conf.power_index);
}
-int iwm_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
+static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
}
-int iwm_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
+static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
}
-int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct cfg80211_pmksa pmksa;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 42df7262f9f7..330c7d9cf101 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -507,7 +507,7 @@ static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
return ret;
}
- /* When succeding, the send_target routine returns the seq number */
+ /* When succeeding, the send_target routine returns the seq number */
seq_num = ret;
ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
@@ -782,10 +782,9 @@ int iwm_send_mlme_profile(struct iwm_priv *iwm)
return 0;
}
-int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
+int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
{
struct iwm_umac_invalidate_profile invalid;
- int ret;
invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
invalid.hdr.buf_size =
@@ -794,7 +793,14 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
invalid.reason = WLAN_REASON_UNSPECIFIED;
- ret = iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
+ return iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
+}
+
+int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
+{
+ int ret;
+
+ ret = __iwm_invalidate_mlme_profile(iwm);
if (ret)
return ret;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 3dfd9f0e9003..7e16bcf59978 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -488,6 +488,7 @@ int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
void *payload, u16 payload_size);
int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
int iwm_send_mlme_profile(struct iwm_priv *iwm);
+int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
index e35c9b693d1f..a0c13a49ab3c 100644
--- a/drivers/net/wireless/iwmc3200wifi/debug.h
+++ b/drivers/net/wireless/iwmc3200wifi/debug.h
@@ -113,13 +113,10 @@ struct iwm_debugfs {
};
#ifdef CONFIG_IWM_DEBUG
-int iwm_debugfs_init(struct iwm_priv *iwm);
+void iwm_debugfs_init(struct iwm_priv *iwm);
void iwm_debugfs_exit(struct iwm_priv *iwm);
#else
-static inline int iwm_debugfs_init(struct iwm_priv *iwm)
-{
- return 0;
-}
+static inline void iwm_debugfs_init(struct iwm_priv *iwm) {}
static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index cbb81befdb55..53b0b7711f02 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -48,12 +48,11 @@ static struct {
#define add_dbg_module(dbg, name, id, initlevel) \
do { \
- struct dentry *d; \
dbg.dbg_module[id] = (initlevel); \
- d = debugfs_create_x8(name, 0600, dbg.dbgdir, \
- &(dbg.dbg_module[id])); \
- if (!IS_ERR(d)) \
- dbg.dbg_module_dentries[id] = d; \
+ dbg.dbg_module_dentries[id] = \
+ debugfs_create_x8(name, 0600, \
+ dbg.dbgdir, \
+ &(dbg.dbg_module[id])); \
} while (0)
static int iwm_debugfs_u32_read(void *data, u64 *val)
@@ -266,7 +265,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
size_t count, loff_t *ppos)
{
struct iwm_priv *iwm = filp->private_data;
- struct iwm_rx_ticket_node *ticket, *next;
+ struct iwm_rx_ticket_node *ticket;
char *buf;
int buf_len = 4096, i;
size_t len = 0;
@@ -281,7 +280,8 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
if (!buf)
return -ENOMEM;
- list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
+ spin_lock(&iwm->ticket_lock);
+ list_for_each_entry(ticket, &iwm->rx_tickets, node) {
len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
ticket->ticket->id);
len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
@@ -289,14 +289,17 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
ticket->ticket->flags);
}
+ spin_unlock(&iwm->ticket_lock);
for (i = 0; i < IWM_RX_ID_HASH; i++) {
- struct iwm_rx_packet *packet, *nxt;
+ struct iwm_rx_packet *packet;
struct list_head *pkt_list = &iwm->rx_packets[i];
+
if (!list_empty(pkt_list)) {
len += snprintf(buf + len, buf_len - len,
"Packet hash #%d\n", i);
- list_for_each_entry_safe(packet, nxt, pkt_list, node) {
+ spin_lock(&iwm->packet_lock[i]);
+ list_for_each_entry(packet, pkt_list, node) {
len += snprintf(buf + len, buf_len - len,
"\tPacket id: %d\n",
packet->id);
@@ -304,6 +307,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
"\tPacket length: %lu\n",
packet->pkt_size);
}
+ spin_unlock(&iwm->packet_lock[i]);
}
}
@@ -418,89 +422,29 @@ static const struct file_operations iwm_debugfs_fw_err_fops = {
.read = iwm_debugfs_fw_err_read,
};
-int iwm_debugfs_init(struct iwm_priv *iwm)
+void iwm_debugfs_init(struct iwm_priv *iwm)
{
- int i, result;
- char devdir[16];
+ int i;
iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- result = PTR_ERR(iwm->dbg.rootdir);
- if (!result || IS_ERR(iwm->dbg.rootdir)) {
- if (result == -ENODEV) {
- IWM_ERR(iwm, "DebugFS (CONFIG_DEBUG_FS) not "
- "enabled in kernel config\n");
- result = 0; /* No debugfs support */
- }
- IWM_ERR(iwm, "Couldn't create rootdir: %d\n", result);
- goto error;
- }
-
- snprintf(devdir, sizeof(devdir), "%s", wiphy_name(iwm_to_wiphy(iwm)));
-
- iwm->dbg.devdir = debugfs_create_dir(devdir, iwm->dbg.rootdir);
- result = PTR_ERR(iwm->dbg.devdir);
- if (IS_ERR(iwm->dbg.devdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create devdir: %d\n", result);
- goto error;
- }
-
+ iwm->dbg.devdir = debugfs_create_dir(wiphy_name(iwm_to_wiphy(iwm)),
+ iwm->dbg.rootdir);
iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.dbgdir);
- if (IS_ERR(iwm->dbg.dbgdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbgdir: %d\n", result);
- goto error;
- }
-
iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.rxdir);
- if (IS_ERR(iwm->dbg.rxdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create rx dir: %d\n", result);
- goto error;
- }
-
iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.txdir);
- if (IS_ERR(iwm->dbg.txdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx dir: %d\n", result);
- goto error;
- }
-
iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.busdir);
- if (IS_ERR(iwm->dbg.busdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create bus dir: %d\n", result);
- goto error;
- }
-
- if (iwm->bus_ops->debugfs_init) {
- result = iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
- if (result < 0) {
- IWM_ERR(iwm, "Couldn't create bus entry: %d\n", result);
- goto error;
- }
- }
-
+ if (iwm->bus_ops->debugfs_init)
+ iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
iwm->dbg.dbg_level = IWM_DL_NONE;
iwm->dbg.dbg_level_dentry =
debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
&fops_iwm_dbg_level);
- result = PTR_ERR(iwm->dbg.dbg_level_dentry);
- if (IS_ERR(iwm->dbg.dbg_level_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbg_level: %d\n", result);
- goto error;
- }
-
iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
iwm->dbg.dbg_modules_dentry =
debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
&fops_iwm_dbg_modules);
- result = PTR_ERR(iwm->dbg.dbg_modules_dentry);
- if (IS_ERR(iwm->dbg.dbg_modules_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbg_modules: %d\n", result);
- goto error;
- }
for (i = 0; i < __IWM_DM_NR; i++)
add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
@@ -509,44 +453,15 @@ int iwm_debugfs_init(struct iwm_priv *iwm)
iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
iwm->dbg.txdir, iwm,
&iwm_debugfs_txq_fops);
- result = PTR_ERR(iwm->dbg.txq_dentry);
- if (IS_ERR(iwm->dbg.txq_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx queue: %d\n", result);
- goto error;
- }
-
iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
iwm->dbg.txdir, iwm,
&iwm_debugfs_tx_credit_fops);
- result = PTR_ERR(iwm->dbg.tx_credit_dentry);
- if (IS_ERR(iwm->dbg.tx_credit_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx credit: %d\n", result);
- goto error;
- }
-
iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
iwm->dbg.rxdir, iwm,
&iwm_debugfs_rx_ticket_fops);
- result = PTR_ERR(iwm->dbg.rx_ticket_dentry);
- if (IS_ERR(iwm->dbg.rx_ticket_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create rx ticket: %d\n", result);
- goto error;
- }
-
iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200,
iwm->dbg.dbgdir, iwm,
&iwm_debugfs_fw_err_fops);
- result = PTR_ERR(iwm->dbg.fw_err_dentry);
- if (IS_ERR(iwm->dbg.fw_err_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create last FW err: %d\n", result);
- goto error;
- }
-
-
- return 0;
-
- error:
- return result;
}
void iwm_debugfs_exit(struct iwm_priv *iwm)
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 229de990379c..9531b18cf72a 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -105,6 +105,7 @@
#include "hal.h"
#include "umac.h"
#include "debug.h"
+#include "trace.h"
static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
struct iwm_nonwifi_cmd *cmd,
@@ -207,9 +208,9 @@ void iwm_cmd_flush(struct iwm_priv *iwm)
struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
{
- struct iwm_wifi_cmd *cmd, *next;
+ struct iwm_wifi_cmd *cmd;
- list_for_each_entry_safe(cmd, next, &iwm->wifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->wifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
list_del(&cmd->pending);
return cmd;
@@ -218,12 +219,12 @@ struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
return NULL;
}
-struct iwm_nonwifi_cmd *
-iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm, u8 seq_num, u8 cmd_opcode)
+struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
+ u8 seq_num, u8 cmd_opcode)
{
- struct iwm_nonwifi_cmd *cmd, *next;
+ struct iwm_nonwifi_cmd *cmd;
- list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if ((cmd->seq_num == seq_num) &&
(cmd->udma_cmd.opcode == cmd_opcode) &&
(cmd->resp_received)) {
@@ -277,6 +278,7 @@ static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
udma_cmd->op1_sz, udma_cmd->op2);
+ trace_iwm_tx_nonwifi_cmd(iwm, udma_hdr);
return iwm_bus_send_chunk(iwm, buf->start, buf->len);
}
@@ -363,6 +365,7 @@ static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
return ret;
}
+ trace_iwm_tx_wifi_cmd(iwm, umac_hdr);
return iwm_bus_send_chunk(iwm, buf->start, buf->len);
}
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
index 0adfdc85765d..c20936d9b6b7 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.h
+++ b/drivers/net/wireless/iwmc3200wifi/hal.h
@@ -75,7 +75,8 @@ do { \
/* UDMA IN OP CODE -- cmd bits [3:0] */
-#define UDMA_IN_OPCODE_MASK 0xF
+#define UDMA_HDI_IN_NW_CMD_OPCODE_POS 0
+#define UDMA_HDI_IN_NW_CMD_OPCODE_SEED 0xF
#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
#define UDMA_IN_OPCODE_READ_RESP 0x1
@@ -130,7 +131,7 @@ do { \
#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
IWM_MAX_WIFI_HEADERS_SIZE)
-#define IWM_HAL_CONCATENATE_BUF_SIZE 8192
+#define IWM_HAL_CONCATENATE_BUF_SIZE (32 * 1024)
struct iwm_wifi_cmd_buff {
u16 len;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 79ffa3b98d73..13266c3842f8 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -48,6 +48,7 @@
#include "umac.h"
#include "lmac.h"
#include "eeprom.h"
+#include "trace.h"
#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
#define IWM_AUTHOR "<ilw@linux.intel.com>"
@@ -268,7 +269,9 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
+ spinlock_t ticket_lock;
struct list_head rx_packets[IWM_RX_ID_HASH];
+ spinlock_t packet_lock[IWM_RX_ID_HASH];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 23856d359e12..362002735b12 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -277,8 +277,11 @@ int iwm_priv_init(struct iwm_priv *iwm)
skb_queue_head_init(&iwm->rx_list);
INIT_LIST_HEAD(&iwm->rx_tickets);
- for (i = 0; i < IWM_RX_ID_HASH; i++)
+ spin_lock_init(&iwm->ticket_lock);
+ for (i = 0; i < IWM_RX_ID_HASH; i++) {
INIT_LIST_HEAD(&iwm->rx_packets[i]);
+ spin_lock_init(&iwm->packet_lock[i]);
+ }
INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
@@ -424,9 +427,9 @@ int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
u8 source)
{
- struct iwm_notif *notif, *next;
+ struct iwm_notif *notif;
- list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
+ list_for_each_entry(notif, &iwm->pending_notif, pending) {
if ((notif->cmd_id == cmd) && (notif->src == source)) {
list_del(&notif->pending);
return notif;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 3257d4fad835..e1184deca559 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -343,15 +343,17 @@ static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
{
u8 id_hash = IWM_RX_ID_GET_HASH(id);
- struct list_head *packet_list;
- struct iwm_rx_packet *packet, *next;
-
- packet_list = &iwm->rx_packets[id_hash];
+ struct iwm_rx_packet *packet;
- list_for_each_entry_safe(packet, next, packet_list, node)
- if (packet->id == id)
+ spin_lock(&iwm->packet_lock[id_hash]);
+ list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
+ if (packet->id == id) {
+ list_del(&packet->node);
+ spin_unlock(&iwm->packet_lock[id_hash]);
return packet;
+ }
+ spin_unlock(&iwm->packet_lock[id_hash]);
return NULL;
}
@@ -389,18 +391,22 @@ void iwm_rx_free(struct iwm_priv *iwm)
struct iwm_rx_packet *packet, *np;
int i;
+ spin_lock(&iwm->ticket_lock);
list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
list_del(&ticket->node);
iwm_rx_ticket_node_free(ticket);
}
+ spin_unlock(&iwm->ticket_lock);
for (i = 0; i < IWM_RX_ID_HASH; i++) {
+ spin_lock(&iwm->packet_lock[i]);
list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
node) {
list_del(&packet->node);
kfree_skb(packet->skb);
kfree(packet);
}
+ spin_unlock(&iwm->packet_lock[i]);
}
}
@@ -425,10 +431,13 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
return PTR_ERR(ticket_node);
IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
- ticket->action == IWM_RX_TICKET_RELEASE ?
+ __le16_to_cpu(ticket->action) ==
+ IWM_RX_TICKET_RELEASE ?
"RELEASE" : "DROP",
ticket->id);
+ spin_lock(&iwm->ticket_lock);
list_add_tail(&ticket_node->node, &iwm->rx_tickets);
+ spin_unlock(&iwm->ticket_lock);
/*
* We received an Rx ticket, most likely there's
@@ -461,6 +470,7 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
struct iwm_rx_packet *packet;
u16 id, buf_offset;
u32 packet_size;
+ u8 id_hash;
IWM_DBG_RX(iwm, DBG, "\n");
@@ -478,7 +488,10 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
if (IS_ERR(packet))
return PTR_ERR(packet);
- list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]);
+ id_hash = IWM_RX_ID_GET_HASH(id);
+ spin_lock(&iwm->packet_lock[id_hash]);
+ list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
+ spin_unlock(&iwm->packet_lock[id_hash]);
/* We might (unlikely) have received the packet _after_ the ticket */
queue_work(iwm->rx_wq, &iwm->rx_worker);
@@ -519,6 +532,8 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
unsigned long buf_size,
struct iwm_wifi_cmd *cmd)
{
+ struct wiphy *wiphy = iwm_to_wiphy(iwm);
+ struct ieee80211_channel *chan;
struct iwm_umac_notif_assoc_complete *complete =
(struct iwm_umac_notif_assoc_complete *)buf;
@@ -527,6 +542,18 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
switch (le32_to_cpu(complete->status)) {
case UMAC_ASSOC_COMPLETE_SUCCESS:
+ chan = ieee80211_get_channel(wiphy,
+ ieee80211_channel_to_frequency(complete->channel));
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
+ /* Associated to a unallowed channel, disassociate. */
+ __iwm_invalidate_mlme_profile(iwm);
+ IWM_WARN(iwm, "Couldn't associate with %pM due to "
+ "channel %d is disabled. Check your local "
+ "regulatory setting.\n",
+ complete->bssid, complete->channel);
+ goto failure;
+ }
+
set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
iwm->channel = complete->channel;
@@ -563,6 +590,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
GFP_KERNEL);
break;
case UMAC_ASSOC_COMPLETE_FAILURE:
+ failure:
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
@@ -757,7 +785,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
(struct iwm_umac_notif_bss_info *)buf;
struct ieee80211_channel *channel;
struct ieee80211_supported_band *band;
- struct iwm_bss_info *bss, *next;
+ struct iwm_bss_info *bss;
s32 signal;
int freq;
u16 frame_len = le16_to_cpu(umac_bss->frame_len);
@@ -776,7 +804,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
+ list_for_each_entry(bss, &iwm->bss_list, node)
if (bss->bss->table_idx == umac_bss->table_idx)
break;
@@ -843,16 +871,15 @@ static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
int i;
for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
- table_idx = (le16_to_cpu(bss_rm->entries[i])
- & IWM_BSS_REMOVE_INDEX_MSK);
+ table_idx = le16_to_cpu(bss_rm->entries[i]) &
+ IWM_BSS_REMOVE_INDEX_MSK;
list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
struct ieee80211_mgmt *mgmt;
mgmt = (struct ieee80211_mgmt *)
(bss->bss->frame_buf);
- IWM_DBG_MLME(iwm, ERR,
- "BSS removed: %pM\n",
+ IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
mgmt->bssid);
list_del(&bss->node);
kfree(bss->bss);
@@ -1224,18 +1251,24 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
u8 source, cmd_id;
u16 seq_num;
u32 count;
- u8 resp;
wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
-
source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
if (source >= IWM_SRC_NUM) {
IWM_CRIT(iwm, "invalid source %d\n", source);
return -EINVAL;
}
- count = (GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT));
+ if (cmd_id == REPLY_RX_MPDU_CMD)
+ trace_iwm_rx_packet(iwm, buf, buf_size);
+ else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
+ (source == UMAC_HDI_IN_SOURCE_FW))
+ trace_iwm_rx_ticket(iwm, buf, buf_size);
+ else
+ trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
+
+ count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
count += sizeof(struct iwm_umac_wifi_in_hdr) -
sizeof(struct iwm_dev_cmd_hdr);
if (count > buf_size) {
@@ -1243,8 +1276,6 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
return -EINVAL;
}
- resp = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_STATUS);
-
seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
@@ -1317,8 +1348,9 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
{
u8 seq_num;
struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
- struct iwm_nonwifi_cmd *cmd, *next;
+ struct iwm_nonwifi_cmd *cmd;
+ trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
/*
@@ -1329,7 +1361,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
* That means we only support synchronised non wifi command response
* schemes.
*/
- list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
cmd->resp_received = 1;
cmd->buf.len = buf_size;
@@ -1648,6 +1680,7 @@ void iwm_rx_worker(struct work_struct *work)
* We stop whenever a ticket is missing its packet, as we're
* supposed to send the packets in order.
*/
+ spin_lock(&iwm->ticket_lock);
list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
struct iwm_rx_packet *packet =
iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
@@ -1656,12 +1689,12 @@ void iwm_rx_worker(struct work_struct *work)
IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
"to be handled first\n",
le16_to_cpu(ticket->ticket->id));
- return;
+ break;
}
list_del(&ticket->node);
- list_del(&packet->node);
iwm_rx_process_packet(iwm, packet, ticket);
}
+ spin_unlock(&iwm->ticket_lock);
}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 1eafd6dec3fd..1acea37f39f8 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -366,21 +366,13 @@ static const struct file_operations iwm_debugfs_sdio_fops = {
.read = iwm_debugfs_sdio_read,
};
-static int if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
+static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
{
- int result;
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
hw->cccr_dentry = debugfs_create_file("cccr", 0200,
parent_dir, iwm,
&iwm_debugfs_sdio_fops);
- result = PTR_ERR(hw->cccr_dentry);
- if (IS_ERR(hw->cccr_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create CCCR entry: %d\n", result);
- return result;
- }
-
- return 0;
}
static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
@@ -440,11 +432,7 @@ static int iwm_sdio_probe(struct sdio_func *func,
hw = iwm_private(iwm);
hw->iwm = iwm;
- ret = iwm_debugfs_init(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Debugfs registration failed\n");
- goto if_free;
- }
+ iwm_debugfs_init(iwm);
sdio_set_drvdata(func, hw);
@@ -473,7 +461,6 @@ static int iwm_sdio_probe(struct sdio_func *func,
destroy_workqueue(hw->isr_wq);
debugfs_exit:
iwm_debugfs_exit(iwm);
- if_free:
iwm_if_free(iwm);
return ret;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.c b/drivers/net/wireless/iwmc3200wifi/trace.c
new file mode 100644
index 000000000000..904d36f22311
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.c
@@ -0,0 +1,3 @@
+#include "iwm.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
new file mode 100644
index 000000000000..abb4805fa8df
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.h
@@ -0,0 +1,283 @@
+#if !defined(__IWM_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWM_TRACE_H__
+
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_IWM_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwm
+
+#define IWM_ENTRY __array(char, ndev_name, 16)
+#define IWM_ASSIGN strlcpy(__entry->ndev_name, iwm_to_ndev(iwm)->name, 16)
+#define IWM_PR_FMT "%s"
+#define IWM_PR_ARG __entry->ndev_name
+
+TRACE_EVENT(iwm_tx_nonwifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_udma_out_nonwifi_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u8, resp)
+ __field(u8, eot)
+ __field(u8, hw)
+ __field(u16, seq)
+ __field(u32, addr)
+ __field(u32, op1)
+ __field(u32, op2)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->opcode = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE);
+ __entry->resp = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP);
+ __entry->eot = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->hw = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW);
+ __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM);
+ __entry->addr = le32_to_cpu(hdr->addr);
+ __entry->op1 = le32_to_cpu(hdr->op1_sz);
+ __entry->op2 = le32_to_cpu(hdr->op2);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx TARGET CMD: opcode 0x%x, resp %d, eot %d, "
+ "hw %d, seq 0x%x, addr 0x%x, op1 0x%x, op2 0x%x",
+ IWM_PR_ARG, __entry->opcode, __entry->resp, __entry->eot,
+ __entry->hw, __entry->seq, __entry->addr, __entry->op1,
+ __entry->op2
+ )
+);
+
+TRACE_EVENT(iwm_tx_wifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_out_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u8, lmac)
+ __field(u8, resp)
+ __field(u8, eot)
+ __field(u8, ra_tid)
+ __field(u8, credit_group)
+ __field(u8, color)
+ __field(u16, seq)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->opcode = hdr->sw_hdr.cmd.cmd;
+ __entry->lmac = 0;
+ __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
+ __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
+ __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
+ __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
+ if (__entry->opcode == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH ||
+ __entry->opcode == UMAC_CMD_OPCODE_WIFI_IF_WRAPPER) {
+ __entry->lmac = 1;
+ __entry->opcode = ((struct iwm_lmac_hdr *)(hdr + 1))->id;
+ }
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx %cMAC CMD: opcode 0x%x, resp %d, eot %d, "
+ "seq 0x%x, sta_color 0x%x, ra_tid 0x%x, credit_group 0x%x",
+ IWM_PR_ARG, __entry->lmac ? 'L' : 'U', __entry->opcode,
+ __entry->resp, __entry->eot, __entry->seq, __entry->color,
+ __entry->ra_tid, __entry->credit_group
+ )
+);
+
+TRACE_EVENT(iwm_tx_packets,
+ TP_PROTO(struct iwm_priv *iwm, u8 *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, eot)
+ __field(u8, ra_tid)
+ __field(u8, credit_group)
+ __field(u8, color)
+ __field(u16, seq)
+ __field(u8, npkt)
+ __field(u32, bytes)
+ ),
+
+ TP_fast_assign(
+ struct iwm_umac_wifi_out_hdr *hdr =
+ (struct iwm_umac_wifi_out_hdr *)buf;
+
+ IWM_ASSIGN;
+ __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
+ __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
+ __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
+ __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->npkt = 1;
+ __entry->bytes = len;
+
+ if (!__entry->eot) {
+ int count;
+ u8 *ptr = buf;
+
+ __entry->npkt = 0;
+ while (ptr < buf + len) {
+ count = GET_VAL32(hdr->sw_hdr.meta_data,
+ UMAC_FW_CMD_BYTE_COUNT);
+ ptr += ALIGN(sizeof(*hdr) + count, 16);
+ hdr = (struct iwm_umac_wifi_out_hdr *)ptr;
+ __entry->npkt++;
+ }
+ }
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
+ "ra_tid 0x%x, credit_group 0x%x, embeded_packets %d, %d bytes",
+ IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
+ __entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
+ __entry->credit_group, __entry->npkt, __entry->bytes
+ )
+);
+
+TRACE_EVENT(iwm_rx_nonwifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u16, seq)
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(
+ struct iwm_udma_in_hdr *hdr = buf;
+
+ IWM_ASSIGN;
+ __entry->opcode = GET_VAL32(hdr->cmd, UDMA_HDI_IN_NW_CMD_OPCODE);
+ __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
+ __entry->len = len;
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx TARGET RESP: opcode 0x%x, seq 0x%x, len 0x%x",
+ IWM_PR_ARG, __entry->opcode, __entry->seq, __entry->len
+ )
+);
+
+TRACE_EVENT(iwm_rx_wifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_in_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, cmd)
+ __field(u8, source)
+ __field(u16, seq)
+ __field(u32, count)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->cmd = hdr->sw_hdr.cmd.cmd;
+ __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
+ __entry->count = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
+ __entry->seq = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx %s RESP: cmd 0x%x, seq 0x%x, count 0x%x",
+ IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ? "LMAC" :
+ __entry->source == UMAC_HDI_IN_SOURCE_FW ? "UMAC" : "UDMA",
+ __entry->cmd, __entry->seq, __entry->count
+ )
+);
+
+#define iwm_ticket_action_symbol \
+ { IWM_RX_TICKET_DROP, "DROP" }, \
+ { IWM_RX_TICKET_RELEASE, "RELEASE" }, \
+ { IWM_RX_TICKET_SNIFFER, "SNIFFER" }, \
+ { IWM_RX_TICKET_ENQUEUE, "ENQUEUE" }
+
+TRACE_EVENT(iwm_rx_ticket,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, action)
+ __field(u8, reason)
+ __field(u16, id)
+ __field(u16, flags)
+ ),
+
+ TP_fast_assign(
+ struct iwm_rx_ticket *ticket =
+ ((struct iwm_umac_notif_rx_ticket *)buf)->tickets;
+
+ IWM_ASSIGN;
+ __entry->id = le16_to_cpu(ticket->id);
+ __entry->action = le16_to_cpu(ticket->action);
+ __entry->flags = le16_to_cpu(ticket->flags);
+ __entry->reason = (__entry->flags & IWM_RX_TICKET_DROP_REASON_MSK) >> IWM_RX_TICKET_DROP_REASON_POS;
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx ticket: id 0x%x, action %s, %s 0x%x%s",
+ IWM_PR_ARG, __entry->id,
+ __print_symbolic(__entry->action, iwm_ticket_action_symbol),
+ __entry->reason ? "reason" : "flags",
+ __entry->reason ? __entry->reason : __entry->flags,
+ __entry->flags & IWM_RX_TICKET_AMSDU_MSK ? ", AMSDU frame" : ""
+ )
+);
+
+TRACE_EVENT(iwm_rx_packet,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, source)
+ __field(u16, id)
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(
+ struct iwm_umac_wifi_in_hdr *hdr = buf;
+
+ IWM_ASSIGN;
+ __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
+ __entry->id = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->len = len - sizeof(*hdr);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx %s packet: id 0x%x, %d bytes",
+ IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ?
+ "LMAC" : "UMAC", __entry->id, __entry->len
+ )
+);
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index f6a02f123f31..3216621fc55a 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -302,8 +302,8 @@ void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
-static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
- int pool_id, u8 *buf)
+static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
+ int pool_id, u8 *buf)
{
struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
struct iwm_udma_wifi_cmd udma_cmd;
@@ -347,6 +347,7 @@ static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
/* mark EOP for the last packet */
iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
+ trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
txq->concat_count = 0;
@@ -451,7 +452,6 @@ void iwm_tx_worker(struct work_struct *work)
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct iwm_priv *iwm = ndev_to_iwm(netdev);
- struct net_device *ndev = iwm_to_ndev(iwm);
struct wireless_dev *wdev = iwm_to_wdev(iwm);
struct iwm_tx_info *tx_info;
struct iwm_tx_queue *txq;
@@ -518,12 +518,12 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
drop:
- ndev->stats.tx_dropped++;
+ netdev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 7f54a145ca65..0cbba3ecc813 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -362,7 +362,7 @@ struct iwm_udma_out_wifi_hdr {
#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
#define IWM_RX_TICKET_AMSDU_MSK 0x8
#define IWM_RX_TICKET_DROP_REASON_POS 4
-#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << RX_TICKET_FLAGS_DROP_REASON_POS)
+#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << IWM_RX_TICKET_DROP_REASON_POS)
#define IWM_RX_DROP_NO_DROP 0x0
#define IWM_RX_DROP_BAD_CRC 0x1
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 12a2ef9dacea..aa06070e5eab 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -32,6 +32,9 @@ u8 lbs_bg_rates[MAX_RATES] =
0x00, 0x00 };
+static int assoc_helper_wep_keys(struct lbs_private *priv,
+ struct assoc_request *assoc_req);
+
/**
* @brief This function finds common rates between rates and card rates.
*
@@ -611,7 +614,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
if (status_code) {
lbs_mac_event_disconnected(priv);
- ret = -1;
+ ret = status_code;
goto done;
}
@@ -814,7 +817,24 @@ static int lbs_try_associate(struct lbs_private *priv,
goto out;
ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
+ /* If the association fails with current auth mode, let's
+ * try by changing the auth mode
+ */
+ if ((priv->authtype_auto) &&
+ (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) &&
+ (assoc_req->secinfo.wep_enabled) &&
+ (priv->connect_status != LBS_CONNECTED)) {
+ if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM)
+ priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
+ else
+ priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
+ if (!assoc_helper_wep_keys(priv, assoc_req))
+ ret = lbs_associate(priv, assoc_req,
+ CMD_802_11_ASSOCIATE);
+ }
+ if (ret)
+ ret = -1;
out:
lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index ce7bec402a33..9d5d3ccf08c8 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -79,6 +79,7 @@ static const u32 cipher_suites[] = {
static int lbs_cfg_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a48ccaffb288..6f5b843c1f44 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -75,7 +75,7 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
return -ENOMEM;
pos += snprintf(buf+pos, len-pos,
- "# | ch | rssi | bssid | cap | Qual | SSID \n");
+ "# | ch | rssi | bssid | cap | Qual | SSID\n");
mutex_lock(&priv->lock);
list_for_each_entry (iter_bss, &priv->network_list, list) {
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6875e1498bd5..a54880e4ad2b 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -134,6 +134,7 @@ struct lbs_private {
u8 wpa_ie_len;
u16 wep_tx_keyidx;
struct enc_key wep_keys[4];
+ u8 authtype_auto;
/* Wake On LAN */
uint32_t wol_criteria;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 6d55439a7b97..08e4e3908003 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -777,7 +777,7 @@ static void if_cs_release(struct pcmcia_device *p_dev)
lbs_deb_enter(LBS_DEB_CS);
- free_irq(p_dev->irq.AssignedIRQ, card);
+ free_irq(p_dev->irq, card);
pcmcia_disable_device(p_dev);
if (card->iobase)
ioport_unmap(card->iobase);
@@ -807,8 +807,7 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev,
p_dev->io.NumPorts1 = cfg->io.win[0].len;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
if (cfg->io.nwin != 1) {
@@ -837,9 +836,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
card->p_dev = p_dev;
p_dev->priv = card;
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = NULL;
-
p_dev->conf.Attributes = 0;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -854,13 +850,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
* a handler to the interrupt, unless the 'Handler' member of
* the irq structure is initialized.
*/
- if (p_dev->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(p_dev, &p_dev->irq);
- if (ret) {
- lbs_pr_err("error in pcmcia_request_irq\n");
- goto out1;
- }
- }
+ if (!p_dev->irq)
+ goto out1;
/* Initialize io access */
card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1);
@@ -883,7 +874,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Finally, report what we've done */
lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n",
- p_dev->irq.AssignedIRQ, p_dev->io.BasePort1,
+ p_dev->irq, p_dev->io.BasePort1,
p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1);
/*
@@ -940,7 +931,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
priv->fw_ready = 1;
/* Now actually get the IRQ */
- ret = request_irq(p_dev->irq.AssignedIRQ, if_cs_interrupt,
+ ret = request_irq(p_dev->irq, if_cs_interrupt,
IRQF_SHARED, DRV_NAME, card);
if (ret) {
lbs_pr_err("error in request_irq\n");
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 7d1a3c6b6ce0..7a59574cbb04 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -35,6 +35,8 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/host.h>
#include "host.h"
#include "decl.h"
@@ -123,6 +125,8 @@ struct if_sdio_card {
const char *helper;
const char *firmware;
+ bool helper_allocated;
+ bool firmware_allocated;
u8 buffer[65536];
@@ -313,12 +317,30 @@ out:
return ret;
}
+static int if_sdio_wait_status(struct if_sdio_card *card, const u8 condition)
+{
+ u8 status;
+ unsigned long timeout;
+ int ret = 0;
+
+ timeout = jiffies + HZ;
+ while (1) {
+ status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
+ if (ret)
+ return ret;
+ if ((status & condition) == condition)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ mdelay(1);
+ }
+ return ret;
+}
+
static int if_sdio_card_to_host(struct if_sdio_card *card)
{
int ret;
- u8 status;
u16 size, type, chunk;
- unsigned long timeout;
lbs_deb_enter(LBS_DEB_SDIO);
@@ -333,19 +355,9 @@ static int if_sdio_card_to_host(struct if_sdio_card *card)
goto out;
}
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto out;
- if (status & IF_SDIO_IO_RDY)
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto out;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
+ if (ret)
+ goto out;
/*
* The transfer must be in one transaction or the firmware
@@ -412,8 +424,6 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
{
struct if_sdio_card *card;
struct if_sdio_packet *packet;
- unsigned long timeout;
- u8 status;
int ret;
unsigned long flags;
@@ -433,25 +443,15 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
sdio_claim_host(card->func);
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if (status & IF_SDIO_IO_RDY)
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
+ ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
+ if (ret == 0) {
+ ret = sdio_writesb(card->func, card->ioport,
+ packet->buffer, packet->nb);
}
- ret = sdio_writesb(card->func, card->ioport,
- packet->buffer, packet->nb);
if (ret)
- goto release;
-release:
+ lbs_pr_err("error %d sending packet to firmware\n", ret);
+
sdio_release_host(card->func);
kfree(packet);
@@ -464,10 +464,11 @@ release:
/* Firmware */
/********************************************************************/
+#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY)
+
static int if_sdio_prog_helper(struct if_sdio_card *card)
{
int ret;
- u8 status;
const struct firmware *fw;
unsigned long timeout;
u8 *chunk_buffer;
@@ -499,20 +500,14 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
size = fw->size;
while (size) {
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if ((status & IF_SDIO_IO_RDY) &&
- (status & IF_SDIO_DL_RDY))
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
+ if (ret)
+ goto release;
+
+ /* On some platforms (like Davinci) the chip needs more time
+ * between helper blocks.
+ */
+ mdelay(2);
chunk_size = min(size, (size_t)60);
@@ -582,7 +577,6 @@ out:
static int if_sdio_prog_real(struct if_sdio_card *card)
{
int ret;
- u8 status;
const struct firmware *fw;
unsigned long timeout;
u8 *chunk_buffer;
@@ -614,20 +608,9 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
size = fw->size;
while (size) {
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if ((status & IF_SDIO_IO_RDY) &&
- (status & IF_SDIO_DL_RDY))
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
+ if (ret)
+ goto release;
req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
if (ret)
@@ -943,6 +926,7 @@ static int if_sdio_probe(struct sdio_func *func,
int ret, i;
unsigned int model;
struct if_sdio_packet *packet;
+ struct mmc_host *host = func->card->host;
lbs_deb_enter(LBS_DEB_SDIO);
@@ -1002,16 +986,34 @@ static int if_sdio_probe(struct sdio_func *func,
card->helper = if_sdio_models[i].helper;
card->firmware = if_sdio_models[i].firmware;
+ kparam_block_sysfs_write(helper_name);
if (lbs_helper_name) {
+ char *helper = kstrdup(lbs_helper_name, GFP_KERNEL);
+ if (!helper) {
+ kparam_unblock_sysfs_write(helper_name);
+ ret = -ENOMEM;
+ goto free;
+ }
lbs_deb_sdio("overriding helper firmware: %s\n",
lbs_helper_name);
- card->helper = lbs_helper_name;
+ card->helper = helper;
+ card->helper_allocated = true;
}
+ kparam_unblock_sysfs_write(helper_name);
+ kparam_block_sysfs_write(fw_name);
if (lbs_fw_name) {
+ char *fw_name = kstrdup(lbs_fw_name, GFP_KERNEL);
+ if (!fw_name) {
+ kparam_unblock_sysfs_write(fw_name);
+ ret = -ENOMEM;
+ goto free;
+ }
lbs_deb_sdio("overriding firmware: %s\n", lbs_fw_name);
- card->firmware = lbs_fw_name;
+ card->firmware = fw_name;
+ card->firmware_allocated = true;
}
+ kparam_unblock_sysfs_write(fw_name);
sdio_claim_host(func);
@@ -1023,6 +1025,25 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret)
goto disable;
+ /* For 1-bit transfers to the 8686 model, we need to enable the
+ * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
+ * bit to allow access to non-vendor registers. */
+ if ((card->model == IF_SDIO_MODEL_8686) &&
+ (host->caps & MMC_CAP_SDIO_IRQ) &&
+ (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
+ u8 reg;
+
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto release_int;
+
+ reg |= SDIO_BUS_ECSI;
+ sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto release_int;
+ }
+
card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
if (ret)
goto release_int;
@@ -1126,6 +1147,10 @@ free:
kfree(packet);
}
+ if (card->helper_allocated)
+ kfree(card->helper);
+ if (card->firmware_allocated)
+ kfree(card->firmware);
kfree(card);
goto out;
@@ -1176,6 +1201,10 @@ static void if_sdio_remove(struct sdio_func *func)
kfree(packet);
}
+ if (card->helper_allocated)
+ kfree(card->helper);
+ if (card->firmware_allocated)
+ kfree(card->firmware);
kfree(card);
lbs_deb_leave(LBS_DEB_SDIO);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index fcea5741ba62..331e3b724874 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -291,10 +291,13 @@ static int if_usb_probe(struct usb_interface *intf,
}
/* Upload firmware */
+ kparam_block_sysfs_write(fw_name);
if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
+ kparam_unblock_sysfs_write(fw_name);
lbs_deb_usbd(&udev->dev, "FW upload failed\n");
goto err_prog_firmware;
}
+ kparam_unblock_sysfs_write(fw_name);
if (!(priv = lbs_add_card(cardp, &udev->dev)))
goto err_prog_firmware;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 598080414b17..d9b8ee130c45 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -229,7 +229,7 @@ static void lbs_tx_timeout(struct net_device *dev)
lbs_pr_err("tx watch dog timeout\n");
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
if (priv->currenttxskb)
lbs_send_tx_feedback(priv, 0);
@@ -319,7 +319,7 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
struct net_device *dev, int nr_addrs)
{
int i = nr_addrs;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
int cnt;
if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
@@ -327,19 +327,19 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
netif_addr_lock_bh(dev);
cnt = netdev_mc_count(dev);
- netdev_for_each_mc_addr(mc_list, dev) {
- if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mac_in_list(cmd->maclist, nr_addrs, ha->addr)) {
lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
- mc_list->dmi_addr);
+ ha->addr);
cnt--;
continue;
}
if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE)
break;
- memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN);
+ memcpy(&cmd->maclist[6*i], ha->addr, ETH_ALEN);
lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
- mc_list->dmi_addr);
+ ha->addr);
i++;
cnt--;
}
@@ -836,6 +836,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
priv->is_auto_deep_sleep_enabled = 0;
priv->wakeup_dev_required = 0;
init_waitqueue_head(&priv->ds_awake_q);
+ priv->authtype_auto = 1;
mutex_init(&priv->lock);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 784dae714705..e2b8d886b091 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -39,10 +39,10 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb);
/**
- * @brief This function computes the avgSNR .
+ * @brief This function computes the avgSNR .
*
- * @param priv A pointer to struct lbs_private structure
- * @return avgSNR
+ * @param priv A pointer to struct lbs_private structure
+ * @return avgSNR
*/
static u8 lbs_getavgsnr(struct lbs_private *priv)
{
@@ -57,10 +57,10 @@ static u8 lbs_getavgsnr(struct lbs_private *priv)
}
/**
- * @brief This function computes the AvgNF
+ * @brief This function computes the AvgNF
*
- * @param priv A pointer to struct lbs_private structure
- * @return AvgNF
+ * @param priv A pointer to struct lbs_private structure
+ * @return AvgNF
*/
static u8 lbs_getavgnf(struct lbs_private *priv)
{
@@ -75,11 +75,11 @@ static u8 lbs_getavgnf(struct lbs_private *priv)
}
/**
- * @brief This function save the raw SNR/NF to our internel buffer
+ * @brief This function save the raw SNR/NF to our internel buffer
*
- * @param priv A pointer to struct lbs_private structure
- * @param prxpd A pointer to rxpd structure of received packet
- * @return n/a
+ * @param priv A pointer to struct lbs_private structure
+ * @param prxpd A pointer to rxpd structure of received packet
+ * @return n/a
*/
static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
{
@@ -94,11 +94,11 @@ static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
}
/**
- * @brief This function computes the RSSI in received packet.
+ * @brief This function computes the RSSI in received packet.
*
- * @param priv A pointer to struct lbs_private structure
- * @param prxpd A pointer to rxpd structure of received packet
- * @return n/a
+ * @param priv A pointer to struct lbs_private structure
+ * @param prxpd A pointer to rxpd structure of received packet
+ * @return n/a
*/
static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
{
@@ -135,9 +135,9 @@ static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
* @brief This function processes received packet and forwards it
* to kernel/upper layer
*
- * @param priv A pointer to struct lbs_private
- * @param skb A pointer to skb which includes the received packet
- * @return 0 or -1
+ * @param priv A pointer to struct lbs_private
+ * @param skb A pointer to skb which includes the received packet
+ * @return 0 or -1
*/
int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
{
@@ -197,7 +197,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
* before the snap_type.
*/
p_ethhdr = (struct ethhdr *)
- ((u8 *) & p_rx_pkt->eth803_hdr
+ ((u8 *) &p_rx_pkt->eth803_hdr
+ sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr)
- sizeof(p_rx_pkt->eth803_hdr.dest_addr)
- sizeof(p_rx_pkt->eth803_hdr.src_addr)
@@ -214,7 +214,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd;
} else {
lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP",
- (u8 *) & p_rx_pkt->rfc1042_hdr,
+ (u8 *) &p_rx_pkt->rfc1042_hdr,
sizeof(p_rx_pkt->rfc1042_hdr));
/* Chop off the rxpd */
@@ -255,8 +255,8 @@ EXPORT_SYMBOL_GPL(lbs_process_rxed_packet);
* @brief This function converts Tx/Rx rates from the Marvell WLAN format
* (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s)
*
- * @param rate Input rate
- * @return Output Rate (0 if invalid)
+ * @param rate Input rate
+ * @return Output Rate (0 if invalid)
*/
static u8 convert_mv_rate_to_radiotap(u8 rate)
{
@@ -295,9 +295,9 @@ static u8 convert_mv_rate_to_radiotap(u8 rate)
* @brief This function processes a received 802.11 packet and forwards it
* to kernel/upper layer
*
- * @param priv A pointer to struct lbs_private
- * @param skb A pointer to skb which includes the received packet
- * @return 0 or -1
+ * @param priv A pointer to struct lbs_private
+ * @param skb A pointer to skb which includes the received packet
+ * @return 0 or -1
*/
static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb)
@@ -314,7 +314,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
p_rx_pkt = (struct rx80211packethdr *) skb->data;
prxpd = &p_rx_pkt->rx_pd;
- // lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100));
+ /* lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); */
if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
lbs_deb_rx("rx err: frame received with bad length\n");
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 52d244ea3d97..a9bf658659eb 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -147,8 +147,6 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
-
if (priv->monitormode) {
/* Keep the skb to echo it back once Tx feedback is
received from FW */
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 9b555884b08a..f96a96031a50 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1441,8 +1441,10 @@ static int lbs_set_encode(struct net_device *dev,
set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
} else if (dwrq->flags & IW_ENCODE_OPEN) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
}
@@ -1621,8 +1623,10 @@ static int lbs_set_encodeext(struct net_device *dev,
goto out;
if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
} else if (dwrq->flags & IW_ENCODE_OPEN) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
}
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index b620daf59ef7..8945afd6ce3e 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -7,6 +7,8 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include "libertas_tf.h"
@@ -82,6 +84,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
int ret = -1;
u32 i;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
@@ -104,6 +108,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff,
priv->fwcapinfo);
+ lbtf_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
+ cmd.hwifversion, cmd.version);
/* Clamp region code to 8-bit since FW spec indicates that it should
* only ever be 8-bit, even though the field size is 16-bit. Some
@@ -118,8 +124,10 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
}
/* if it's unidentified region code, use the default (USA) */
- if (i >= MRVDRV_MAX_REGION_CODE)
+ if (i >= MRVDRV_MAX_REGION_CODE) {
priv->regioncode = 0x10;
+ pr_info("unidentified region code; using the default (USA)\n");
+ }
if (priv->current_addr[0] == 0xff)
memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
@@ -128,6 +136,7 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
lbtf_geo_init(priv);
out:
+ lbtf_deb_leave(LBTF_DEB_CMD);
return ret;
}
@@ -141,13 +150,18 @@ out:
*/
int lbtf_set_channel(struct lbtf_private *priv, u8 channel)
{
+ int ret = 0;
struct cmd_ds_802_11_rf_channel cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
cmd.channel = cpu_to_le16(channel);
- return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
+ ret = lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
+ return ret;
}
int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
@@ -155,20 +169,28 @@ int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
struct cmd_ds_802_11_beacon_set cmd;
int size;
- if (beacon->len > MRVL_MAX_BCN_SIZE)
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
+ if (beacon->len > MRVL_MAX_BCN_SIZE) {
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", -1);
return -1;
+ }
size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len;
cmd.hdr.size = cpu_to_le16(size);
cmd.len = cpu_to_le16(beacon->len);
memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len);
lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size);
+
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", 0);
return 0;
}
int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
- int beacon_int) {
+ int beacon_int)
+{
struct cmd_ds_802_11_beacon_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -176,6 +198,8 @@ int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
cmd.beacon_period = cpu_to_le16(beacon_int);
lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
@@ -183,17 +207,28 @@ static void lbtf_queue_cmd(struct lbtf_private *priv,
struct cmd_ctrl_node *cmdnode)
{
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_HOST);
- if (!cmdnode)
- return;
+ if (!cmdnode) {
+ lbtf_deb_host("QUEUE_CMD: cmdnode is NULL\n");
+ goto qcmd_done;
+ }
- if (!cmdnode->cmdbuf->size)
- return;
+ if (!cmdnode->cmdbuf->size) {
+ lbtf_deb_host("DNLD_CMD: cmd size is zero\n");
+ goto qcmd_done;
+ }
cmdnode->result = 0;
spin_lock_irqsave(&priv->driver_lock, flags);
list_add_tail(&cmdnode->list, &priv->cmdpendingq);
spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+ lbtf_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n",
+ le16_to_cpu(cmdnode->cmdbuf->command));
+
+qcmd_done:
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
static void lbtf_submit_command(struct lbtf_private *priv,
@@ -206,22 +241,33 @@ static void lbtf_submit_command(struct lbtf_private *priv,
int timeo = 5 * HZ;
int ret;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
cmd = cmdnode->cmdbuf;
spin_lock_irqsave(&priv->driver_lock, flags);
priv->cur_cmd = cmdnode;
cmdsize = le16_to_cpu(cmd->size);
command = le16_to_cpu(cmd->command);
+
+ lbtf_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
+ command, le16_to_cpu(cmd->seqnum), cmdsize);
+ lbtf_deb_hex(LBTF_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize);
+
ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
spin_unlock_irqrestore(&priv->driver_lock, flags);
- if (ret)
+ if (ret) {
+ pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret);
/* Let the timer kick in and retry, and potentially reset
the whole thing if the condition persists */
timeo = HZ;
+ }
/* Setup the timer after transmit command */
mod_timer(&priv->command_timer, jiffies + timeo);
+
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
/**
@@ -231,8 +277,10 @@ static void lbtf_submit_command(struct lbtf_private *priv,
static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
struct cmd_ctrl_node *cmdnode)
{
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
if (!cmdnode)
- return;
+ goto cl_ins_out;
cmdnode->callback = NULL;
cmdnode->callback_arg = 0;
@@ -240,6 +288,9 @@ static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
list_add_tail(&cmdnode->list, &priv->cmdfreeq);
+
+cl_ins_out:
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
@@ -268,29 +319,41 @@ int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv)
{
struct cmd_ds_mac_multicast_addr cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
+
+ lbtf_deb_cmd("MULTICAST_ADR: setting %d addresses\n", cmd.nr_of_adrs);
+
memcpy(cmd.maclist, priv->multicastlist,
priv->nr_of_multicastmacaddr * ETH_ALEN);
lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
{
struct cmd_ds_set_mode cmd;
+ lbtf_deb_enter(LBTF_DEB_WEXT);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.mode = cpu_to_le16(mode);
+ lbtf_deb_wext("Switching to mode: 0x%x\n", mode);
lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_WEXT);
}
void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
{
struct cmd_ds_set_bssid cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.activate = activate ? 1 : 0;
@@ -298,11 +361,13 @@ void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
memcpy(cmd.bssid, bssid, ETH_ALEN);
lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd));
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
{
struct cmd_ds_802_11_mac_address cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -310,6 +375,7 @@ int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
memcpy(cmd.macadd, mac_addr, ETH_ALEN);
lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd));
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
@@ -318,6 +384,8 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
int ret = 0;
struct cmd_ds_802_11_radio_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -341,19 +409,28 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
else
cmd.control &= cpu_to_le16(~TURN_ON_RF);
+ lbtf_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->radioon,
+ priv->preamble);
+
ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
+
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
return ret;
}
void lbtf_set_mac_control(struct lbtf_private *priv)
{
struct cmd_ds_mac_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(priv->mac_control);
cmd.reserved = 0;
lbtf_cmd_async(priv, CMD_MAC_CONTROL,
&cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
/**
@@ -365,29 +442,43 @@ void lbtf_set_mac_control(struct lbtf_private *priv)
*/
int lbtf_allocate_cmd_buffer(struct lbtf_private *priv)
{
+ int ret = 0;
u32 bufsize;
u32 i;
struct cmd_ctrl_node *cmdarray;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
/* Allocate and initialize the command array */
bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
cmdarray = kzalloc(bufsize, GFP_KERNEL);
- if (!cmdarray)
- return -1;
+ if (!cmdarray) {
+ lbtf_deb_host("ALLOC_CMD_BUF: tempcmd_array is NULL\n");
+ ret = -1;
+ goto done;
+ }
priv->cmd_array = cmdarray;
/* Allocate and initialize each command buffer in the command array */
for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL);
- if (!cmdarray[i].cmdbuf)
- return -1;
+ if (!cmdarray[i].cmdbuf) {
+ lbtf_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
+ ret = -1;
+ goto done;
+ }
}
for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- return 0;
+
+ ret = 0;
+
+done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
+ return ret;
}
/**
@@ -402,9 +493,13 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
struct cmd_ctrl_node *cmdarray;
unsigned int i;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
/* need to check if cmd array is allocated or not */
- if (priv->cmd_array == NULL)
- return 0;
+ if (priv->cmd_array == NULL) {
+ lbtf_deb_host("FREE_CMD_BUF: cmd_array is NULL\n");
+ goto done;
+ }
cmdarray = priv->cmd_array;
@@ -418,6 +513,8 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
kfree(priv->cmd_array);
priv->cmd_array = NULL;
+done:
+ lbtf_deb_leave(LBTF_DEB_HOST);
return 0;
}
@@ -433,6 +530,8 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
struct cmd_ctrl_node *tempnode;
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
if (!priv)
return NULL;
@@ -442,11 +541,14 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
tempnode = list_first_entry(&priv->cmdfreeq,
struct cmd_ctrl_node, list);
list_del(&tempnode->list);
- } else
+ } else {
+ lbtf_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n");
tempnode = NULL;
+ }
spin_unlock_irqrestore(&priv->driver_lock, flags);
+ lbtf_deb_leave(LBTF_DEB_HOST);
return tempnode;
}
@@ -462,16 +564,20 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
struct cmd_ctrl_node *cmdnode = NULL;
struct cmd_header *cmd;
unsigned long flags;
+ int ret = 0;
- /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the
+ /* Debug group is lbtf_deb_THREAD and not lbtf_deb_HOST, because the
* only caller to us is lbtf_thread() and we get even when a
* data packet is received */
+ lbtf_deb_enter(LBTF_DEB_THREAD);
spin_lock_irqsave(&priv->driver_lock, flags);
if (priv->cur_cmd) {
+ pr_alert("EXEC_NEXT_CMD: already processing command!\n");
spin_unlock_irqrestore(&priv->driver_lock, flags);
- return -1;
+ ret = -1;
+ goto done;
}
if (!list_empty(&priv->cmdpendingq)) {
@@ -483,11 +589,17 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
cmd = cmdnode->cmdbuf;
list_del(&cmdnode->list);
+ lbtf_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
+ le16_to_cpu(cmd->command));
spin_unlock_irqrestore(&priv->driver_lock, flags);
lbtf_submit_command(priv, cmdnode);
} else
spin_unlock_irqrestore(&priv->driver_lock, flags);
- return 0;
+
+ ret = 0;
+done:
+ lbtf_deb_leave(LBTF_DEB_THREAD);
+ return ret;
}
static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
@@ -498,14 +610,22 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
{
struct cmd_ctrl_node *cmdnode;
- if (priv->surpriseremoved)
- return ERR_PTR(-ENOENT);
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
+ if (priv->surpriseremoved) {
+ lbtf_deb_host("PREP_CMD: card removed\n");
+ cmdnode = ERR_PTR(-ENOENT);
+ goto done;
+ }
cmdnode = lbtf_get_cmd_ctrl_node(priv);
if (cmdnode == NULL) {
+ lbtf_deb_host("PREP_CMD: cmdnode is NULL\n");
+
/* Wake up main thread to execute next command */
queue_work(lbtf_wq, &priv->cmd_work);
- return ERR_PTR(-ENOBUFS);
+ cmdnode = ERR_PTR(-ENOBUFS);
+ goto done;
}
cmdnode->callback = callback;
@@ -520,17 +640,24 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
cmdnode->cmdbuf->result = 0;
+
+ lbtf_deb_host("PREP_CMD: command 0x%04x\n", command);
+
cmdnode->cmdwaitqwoken = 0;
lbtf_queue_cmd(priv, cmdnode);
queue_work(lbtf_wq, &priv->cmd_work);
+ done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %p", cmdnode);
return cmdnode;
}
void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
struct cmd_header *in_cmd, int in_cmd_size)
{
+ lbtf_deb_enter(LBTF_DEB_CMD);
__lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0);
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
@@ -543,30 +670,35 @@ int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
unsigned long flags;
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size,
callback, callback_arg);
- if (IS_ERR(cmdnode))
- return PTR_ERR(cmdnode);
+ if (IS_ERR(cmdnode)) {
+ ret = PTR_ERR(cmdnode);
+ goto done;
+ }
might_sleep();
ret = wait_event_interruptible(cmdnode->cmdwait_q,
cmdnode->cmdwaitqwoken);
- if (ret) {
- printk(KERN_DEBUG
- "libertastf: command 0x%04x interrupted by signal",
- command);
- return ret;
+ if (ret) {
+ pr_info("PREP_CMD: command 0x%04x interrupted by signal: %d\n",
+ command, ret);
+ goto done;
}
spin_lock_irqsave(&priv->driver_lock, flags);
ret = cmdnode->result;
if (ret)
- printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n",
+ pr_info("PREP_CMD: command 0x%04x failed: %d\n",
command, ret);
__lbtf_cleanup_and_insert_cmd(priv, cmdnode);
spin_unlock_irqrestore(&priv->driver_lock, flags);
+done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(__lbtf_cmd);
@@ -587,6 +719,8 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
unsigned long flags;
uint16_t result;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
mutex_lock(&priv->lock);
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -602,7 +736,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
result = le16_to_cpu(resp->result);
if (net_ratelimit())
- printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n",
+ pr_info("libertastf: cmd response 0x%04x, seq %d, size %d\n",
respcmd, le16_to_cpu(resp->seqnum),
le16_to_cpu(resp->size));
@@ -639,7 +773,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
switch (respcmd) {
case CMD_RET(CMD_GET_HW_SPEC):
case CMD_RET(CMD_802_11_RESET):
- printk(KERN_DEBUG "libertastf: reset failed\n");
+ pr_info("libertastf: reset failed\n");
break;
}
@@ -666,5 +800,6 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
done:
mutex_unlock(&priv->lock);
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/libertas_tf/deb_defs.h
new file mode 100644
index 000000000000..ae753962d8b5
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/deb_defs.h
@@ -0,0 +1,104 @@
+/**
+ * This header file contains global constant/enum definitions,
+ * global variable declaration.
+ */
+#ifndef _LBS_DEB_DEFS_H_
+#define _LBS_DEB_EFS_H_
+
+#ifndef DRV_NAME
+#define DRV_NAME "libertas_tf"
+#endif
+
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_LIBERTAS_THINFIRM_DEBUG
+#define DEBUG
+#define PROC_DEBUG
+#endif
+
+#define LBTF_DEB_ENTER 0x00000001
+#define LBTF_DEB_LEAVE 0x00000002
+#define LBTF_DEB_MAIN 0x00000004
+#define LBTF_DEB_NET 0x00000008
+#define LBTF_DEB_MESH 0x00000010
+#define LBTF_DEB_WEXT 0x00000020
+#define LBTF_DEB_IOCTL 0x00000040
+#define LBTF_DEB_SCAN 0x00000080
+#define LBTF_DEB_ASSOC 0x00000100
+#define LBTF_DEB_JOIN 0x00000200
+#define LBTF_DEB_11D 0x00000400
+#define LBTF_DEB_DEBUGFS 0x00000800
+#define LBTF_DEB_ETHTOOL 0x00001000
+#define LBTF_DEB_HOST 0x00002000
+#define LBTF_DEB_CMD 0x00004000
+#define LBTF_DEB_RX 0x00008000
+#define LBTF_DEB_TX 0x00010000
+#define LBTF_DEB_USB 0x00020000
+#define LBTF_DEB_CS 0x00040000
+#define LBTF_DEB_FW 0x00080000
+#define LBTF_DEB_THREAD 0x00100000
+#define LBTF_DEB_HEX 0x00200000
+#define LBTF_DEB_SDIO 0x00400000
+#define LBTF_DEB_MACOPS 0x00800000
+
+extern unsigned int lbtf_debug;
+
+
+#ifdef DEBUG
+#define LBTF_DEB_LL(grp, grpnam, fmt, args...) \
+do { if ((lbtf_debug & (grp)) == (grp)) \
+ printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
+ in_interrupt() ? " (INT)" : "", ## args); } while (0)
+#else
+#define LBTF_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
+#endif
+
+#define lbtf_deb_enter(grp) \
+ LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s()\n", __func__);
+#define lbtf_deb_enter_args(grp, fmt, args...) \
+ LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s(" fmt ")\n", __func__, ## args);
+#define lbtf_deb_leave(grp) \
+ LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s()\n", __func__);
+#define lbtf_deb_leave_args(grp, fmt, args...) \
+ LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s(), " fmt "\n", \
+ __func__, ##args);
+#define lbtf_deb_main(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MAIN, " main", fmt, ##args)
+#define lbtf_deb_net(fmt, args...) LBTF_DEB_LL(LBTF_DEB_NET, " net", fmt, ##args)
+#define lbtf_deb_mesh(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MESH, " mesh", fmt, ##args)
+#define lbtf_deb_wext(fmt, args...) LBTF_DEB_LL(LBTF_DEB_WEXT, " wext", fmt, ##args)
+#define lbtf_deb_ioctl(fmt, args...) LBTF_DEB_LL(LBTF_DEB_IOCTL, " ioctl", fmt, ##args)
+#define lbtf_deb_scan(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SCAN, " scan", fmt, ##args)
+#define lbtf_deb_assoc(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ASSOC, " assoc", fmt, ##args)
+#define lbtf_deb_join(fmt, args...) LBTF_DEB_LL(LBTF_DEB_JOIN, " join", fmt, ##args)
+#define lbtf_deb_11d(fmt, args...) LBTF_DEB_LL(LBTF_DEB_11D, " 11d", fmt, ##args)
+#define lbtf_deb_debugfs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_DEBUGFS, " debugfs", fmt, ##args)
+#define lbtf_deb_ethtool(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ETHTOOL, " ethtool", fmt, ##args)
+#define lbtf_deb_host(fmt, args...) LBTF_DEB_LL(LBTF_DEB_HOST, " host", fmt, ##args)
+#define lbtf_deb_cmd(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CMD, " cmd", fmt, ##args)
+#define lbtf_deb_rx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_RX, " rx", fmt, ##args)
+#define lbtf_deb_tx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_TX, " tx", fmt, ##args)
+#define lbtf_deb_fw(fmt, args...) LBTF_DEB_LL(LBTF_DEB_FW, " fw", fmt, ##args)
+#define lbtf_deb_usb(fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usb", fmt, ##args)
+#define lbtf_deb_usbd(dev, fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usbd", "%s:" fmt, dev_name(dev), ##args)
+#define lbtf_deb_cs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CS, " cs", fmt, ##args)
+#define lbtf_deb_thread(fmt, args...) LBTF_DEB_LL(LBTF_DEB_THREAD, " thread", fmt, ##args)
+#define lbtf_deb_sdio(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SDIO, " thread", fmt, ##args)
+#define lbtf_deb_macops(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MACOPS, " thread", fmt, ##args)
+
+#ifdef DEBUG
+static inline void lbtf_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
+{
+ char newprompt[32];
+
+ if (len &&
+ (lbtf_debug & LBTF_DEB_HEX) &&
+ (lbtf_debug & grp)) {
+ snprintf(newprompt, sizeof(newprompt), DRV_NAME " %s: ", prompt);
+ print_hex_dump_bytes(prompt, DUMP_PREFIX_NONE, buf, len);
+ }
+}
+#else
+#define lbtf_deb_hex(grp, prompt, buf, len) do {} while (0)
+#endif
+
+#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 8cc9db60c14b..c1c9e31e29a1 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -7,6 +7,13 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define DRV_NAME "lbtf_usb"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "libertas_tf.h"
+#include "if_usb.h"
+
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
@@ -14,10 +21,8 @@
#include <linux/slab.h>
#include <linux/usb.h>
-#define DRV_NAME "lbtf_usb"
-
-#include "libertas_tf.h"
-#include "if_usb.h"
+#define INSANEDEBUG 0
+#define lbtf_deb_usb2(...) do { if (INSANEDEBUG) lbtf_deb_usbd(__VA_ARGS__); } while (0)
#define MESSAGE_HEADER_LEN 4
@@ -53,9 +58,14 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
*/
static void if_usb_write_bulk_callback(struct urb *urb)
{
- if (urb->status != 0)
- printk(KERN_INFO "libertastf: URB in failure status: %d\n",
- urb->status);
+ if (urb->status != 0) {
+ /* print the failure status number for debug */
+ pr_info("URB in failure status: %d\n", urb->status);
+ } else {
+ lbtf_deb_usb2(&urb->dev->dev, "URB status is successful\n");
+ lbtf_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n",
+ urb->actual_length);
+ }
}
/**
@@ -65,6 +75,8 @@ static void if_usb_write_bulk_callback(struct urb *urb)
*/
static void if_usb_free(struct if_usb_card *cardp)
{
+ lbtf_deb_enter(LBTF_DEB_USB);
+
/* Unlink tx & rx urb */
usb_kill_urb(cardp->tx_urb);
usb_kill_urb(cardp->rx_urb);
@@ -81,6 +93,8 @@ static void if_usb_free(struct if_usb_card *cardp)
kfree(cardp->ep_out_buf);
cardp->ep_out_buf = NULL;
+
+ lbtf_deb_leave(LBTF_DEB_USB);
}
static void if_usb_setup_firmware(struct lbtf_private *priv)
@@ -88,23 +102,33 @@ static void if_usb_setup_firmware(struct lbtf_private *priv)
struct if_usb_card *cardp = priv->card;
struct cmd_ds_set_boot2_ver b2_cmd;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
if_usb_submit_rx_urb(cardp);
b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd));
b2_cmd.action = 0;
b2_cmd.version = cardp->boot2_version;
if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd))
- printk(KERN_INFO "libertastf: setting boot2 version failed\n");
+ lbtf_deb_usb("Setting boot2 version failed\n");
+
+ lbtf_deb_leave(LBTF_DEB_USB);
}
static void if_usb_fw_timeo(unsigned long priv)
{
struct if_usb_card *cardp = (void *)priv;
- if (!cardp->fwdnldover)
+ lbtf_deb_enter(LBTF_DEB_USB);
+ if (!cardp->fwdnldover) {
/* Download timed out */
cardp->priv->surpriseremoved = 1;
+ pr_err("Download timed out\n");
+ } else {
+ lbtf_deb_usb("Download complete, no event. Assuming success\n");
+ }
wake_up(&cardp->fw_wq);
+ lbtf_deb_leave(LBTF_DEB_USB);
}
/**
@@ -125,11 +149,14 @@ static int if_usb_probe(struct usb_interface *intf,
struct if_usb_card *cardp;
int i;
+ lbtf_deb_enter(LBTF_DEB_USB);
udev = interface_to_usbdev(intf);
cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
- if (!cardp)
+ if (!cardp) {
+ pr_err("Out of memory allocating private data.\n");
goto error;
+ }
setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
init_waitqueue_head(&cardp->fw_wq);
@@ -137,38 +164,62 @@ static int if_usb_probe(struct usb_interface *intf,
cardp->udev = udev;
iface_desc = intf->cur_altsetting;
+ lbtf_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X"
+ " bDeviceSubClass = 0x%X, bDeviceProtocol = 0x%X\n",
+ le16_to_cpu(udev->descriptor.bcdUSB),
+ udev->descriptor.bDeviceClass,
+ udev->descriptor.bDeviceSubClass,
+ udev->descriptor.bDeviceProtocol);
+
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(endpoint)) {
cardp->ep_in_size =
le16_to_cpu(endpoint->wMaxPacketSize);
cardp->ep_in = usb_endpoint_num(endpoint);
+
+ lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in);
+ lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size);
} else if (usb_endpoint_is_bulk_out(endpoint)) {
cardp->ep_out_size =
le16_to_cpu(endpoint->wMaxPacketSize);
cardp->ep_out = usb_endpoint_num(endpoint);
+
+ lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out);
+ lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n",
+ cardp->ep_out_size);
}
}
- if (!cardp->ep_out_size || !cardp->ep_in_size)
+ if (!cardp->ep_out_size || !cardp->ep_in_size) {
+ lbtf_deb_usbd(&udev->dev, "Endpoints not found\n");
/* Endpoints not found */
goto dealloc;
+ }
cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->rx_urb)
+ if (!cardp->rx_urb) {
+ lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n");
goto dealloc;
+ }
cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->tx_urb)
+ if (!cardp->tx_urb) {
+ lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n");
goto dealloc;
+ }
cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->cmd_urb)
+ if (!cardp->cmd_urb) {
+ lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n");
goto dealloc;
+ }
cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
GFP_KERNEL);
- if (!cardp->ep_out_buf)
+ if (!cardp->ep_out_buf) {
+ lbtf_deb_usbd(&udev->dev, "Could not allocate buffer\n");
goto dealloc;
+ }
priv = lbtf_add_card(cardp, &udev->dev);
if (!priv)
@@ -189,6 +240,7 @@ static int if_usb_probe(struct usb_interface *intf,
dealloc:
if_usb_free(cardp);
error:
+lbtf_deb_leave(LBTF_DEB_MAIN);
return -ENOMEM;
}
@@ -202,6 +254,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
struct if_usb_card *cardp = usb_get_intfdata(intf);
struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
if_usb_reset_device(cardp);
if (priv)
@@ -212,6 +266,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
+
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
/**
@@ -226,6 +282,8 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
struct fwdata *fwdata = cardp->ep_out_buf;
u8 *firmware = (u8 *) cardp->fw->data;
+ lbtf_deb_enter(LBTF_DEB_FW);
+
/* If we got a CRC failure on the last block, back
up and retry it */
if (!cardp->CRC_OK) {
@@ -233,6 +291,9 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
cardp->fwseqnum--;
}
+ lbtf_deb_usb2(&cardp->udev->dev, "totalbytes = %d\n",
+ cardp->totalbytes);
+
/* struct fwdata (which we sent to the card) has an
extra __le32 field in between the header and the data,
which is not in the struct fwheader in the actual
@@ -246,18 +307,33 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
memcpy(fwdata->data, &firmware[cardp->totalbytes],
le32_to_cpu(fwdata->hdr.datalength));
+ lbtf_deb_usb2(&cardp->udev->dev, "Data length = %d\n",
+ le32_to_cpu(fwdata->hdr.datalength));
+
fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum);
cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength);
usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) +
le32_to_cpu(fwdata->hdr.datalength), 0);
- if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK))
+ if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n",
+ cardp->fwseqnum, cardp->totalbytes);
+ } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
+
/* Host has finished FW downloading
* Donwloading FW JUMP BLOCK
*/
cardp->fwfinalblk = 1;
+ }
+ lbtf_deb_usb2(&cardp->udev->dev, "Firmware download done; size %d\n",
+ cardp->totalbytes);
+
+ lbtf_deb_leave(LBTF_DEB_FW);
return 0;
}
@@ -266,6 +342,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
int ret;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET);
@@ -280,6 +358,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
ret = usb_reset_device(cardp->udev);
msleep(100);
+ lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(if_usb_reset_device);
@@ -297,11 +377,15 @@ EXPORT_SYMBOL_GPL(if_usb_reset_device);
static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
uint16_t nb, u8 data)
{
+ int ret = -1;
struct urb *urb;
+ lbtf_deb_enter(LBTF_DEB_USB);
/* check if device is removed */
- if (cardp->priv->surpriseremoved)
- return -1;
+ if (cardp->priv->surpriseremoved) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Device removed\n");
+ goto tx_ret;
+ }
if (data)
urb = cardp->tx_urb;
@@ -315,19 +399,34 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
urb->transfer_flags |= URB_ZERO_PACKET;
- if (usb_submit_urb(urb, GFP_ATOMIC))
- return -1;
- return 0;
+ if (usb_submit_urb(urb, GFP_ATOMIC)) {
+ lbtf_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret);
+ goto tx_ret;
+ }
+
+ lbtf_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n");
+
+ ret = 0;
+
+tx_ret:
+ lbtf_deb_leave(LBTF_DEB_USB);
+ return ret;
}
static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
void (*callbackfn)(struct urb *urb))
{
struct sk_buff *skb;
+ int ret = -1;
+
+ lbtf_deb_enter(LBTF_DEB_USB);
skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
- if (!skb)
+ if (!skb) {
+ pr_err("No free skb\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return -1;
+ }
cardp->rx_skb = skb;
@@ -339,12 +438,19 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
- if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
+ ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
+ if (ret) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
kfree_skb(skb);
cardp->rx_skb = NULL;
+ lbtf_deb_leave(LBTF_DEB_USB);
return -1;
- } else
+ } else {
+ lbtf_deb_usb2(&cardp->udev->dev, "Submit Rx URB success\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return 0;
+ }
}
static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp)
@@ -364,8 +470,12 @@ static void if_usb_receive_fwload(struct urb *urb)
struct fwsyncheader *syncfwheader;
struct bootcmdresp bcmdresp;
+ lbtf_deb_enter(LBTF_DEB_USB);
if (urb->status) {
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "URB status is failed during fw load\n");
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
@@ -373,12 +483,17 @@ static void if_usb_receive_fwload(struct urb *urb)
__le32 *tmp = (__le32 *)(skb->data);
if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
- tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY))
+ tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) {
/* Firmware ready event received */
+ pr_info("Firmware ready event received\n");
wake_up(&cardp->fw_wq);
- else
+ } else {
+ lbtf_deb_usb("Waiting for confirmation; got %x %x\n",
+ le32_to_cpu(tmp[0]), le32_to_cpu(tmp[1]));
if_usb_submit_rx_urb_fwload(cardp);
+ }
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
if (cardp->bootcmdresp <= 0) {
@@ -389,34 +504,60 @@ static void if_usb_receive_fwload(struct urb *urb)
if_usb_submit_rx_urb_fwload(cardp);
cardp->bootcmdresp = 1;
/* Received valid boot command response */
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Received valid boot command response\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) {
if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) ||
bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
- bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION))
+ bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) {
+ if (!cardp->bootcmdresp)
+ pr_info("Firmware already seems alive; resetting\n");
cardp->bootcmdresp = -1;
- } else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB &&
- bcmdresp.result == BOOT_CMD_RESP_OK)
+ } else {
+ pr_info("boot cmd response wrong magic number (0x%x)\n",
+ le32_to_cpu(bcmdresp.magic));
+ }
+ } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) {
+ pr_info("boot cmd response cmd_tag error (%d)\n",
+ bcmdresp.cmd);
+ } else if (bcmdresp.result != BOOT_CMD_RESP_OK) {
+ pr_info("boot cmd response result error (%d)\n",
+ bcmdresp.result);
+ } else {
cardp->bootcmdresp = 1;
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Received valid boot command response\n");
+ }
kfree_skb(skb);
if_usb_submit_rx_urb_fwload(cardp);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
if (!syncfwheader) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
- if (!syncfwheader->cmd)
+ if (!syncfwheader->cmd) {
+ lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
+ le32_to_cpu(syncfwheader->seqnum));
cardp->CRC_OK = 1;
- else
+ } else {
+ lbtf_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n");
cardp->CRC_OK = 0;
+ }
+
kfree_skb(skb);
/* reschedule timer for 200ms hence */
@@ -434,6 +575,7 @@ static void if_usb_receive_fwload(struct urb *urb)
kfree(syncfwheader);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
@@ -445,6 +587,7 @@ static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
{
if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN
|| recvlength < MRVDRV_MIN_PKT_LEN) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Packet length is Invalid\n");
kfree_skb(skb);
return;
}
@@ -460,6 +603,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct lbtf_private *priv)
{
if (recvlength > LBS_CMD_BUFFER_SIZE) {
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "The receive buffer is too large\n");
kfree_skb(skb);
return;
}
@@ -489,16 +634,24 @@ static void if_usb_receive(struct urb *urb)
uint32_t recvtype = 0;
__le32 *pkt = (__le32 *) skb->data;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
if (recvlength) {
if (urb->status) {
+ lbtf_deb_usbd(&cardp->udev->dev, "RX URB failed: %d\n",
+ urb->status);
kfree_skb(skb);
goto setup_for_next;
}
recvbuff = skb->data;
recvtype = le32_to_cpu(pkt[0]);
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Recv length = 0x%x, Recv type = 0x%X\n",
+ recvlength, recvtype);
} else if (urb->status) {
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
@@ -515,6 +668,7 @@ static void if_usb_receive(struct urb *urb)
{
/* Event cause handling */
u32 event_cause = le32_to_cpu(pkt[1]);
+ lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event_cause);
/* Icky undocumented magic special case */
if (event_cause & 0xffff0000) {
@@ -529,21 +683,22 @@ static void if_usb_receive(struct urb *urb)
} else if (event_cause == LBTF_EVENT_BCN_SENT)
lbtf_bcn_sent(priv);
else
- printk(KERN_DEBUG
+ lbtf_deb_usbd(&cardp->udev->dev,
"Unsupported notification %d received\n",
event_cause);
kfree_skb(skb);
break;
}
default:
- printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n",
- recvtype);
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "libertastf: unknown command type 0x%X\n", recvtype);
kfree_skb(skb);
break;
}
setup_for_next:
if_usb_submit_rx_urb(cardp);
+ lbtf_deb_leave(LBTF_DEB_USB);
}
/**
@@ -562,6 +717,9 @@ static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
struct if_usb_card *cardp = priv->card;
u8 data = 0;
+ lbtf_deb_usbd(&cardp->udev->dev, "*** type = %u\n", type);
+ lbtf_deb_usbd(&cardp->udev->dev, "size after = %d\n", nb);
+
if (type == MVMS_CMD) {
*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
} else {
@@ -639,8 +797,10 @@ static int check_fwfile_format(const u8 *data, u32 totlen)
} while (!exit);
if (ret)
- printk(KERN_INFO
- "libertastf: firmware file format check failed\n");
+ pr_err("firmware file format check FAIL\n");
+ else
+ lbtf_deb_fw("firmware file format check PASS\n");
+
return ret;
}
@@ -651,18 +811,24 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
static int reset_count = 10;
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
+ kparam_block_sysfs_write(fw_name);
ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
if (ret < 0) {
- printk(KERN_INFO "libertastf: firmware %s not found\n",
- lbtf_fw_name);
+ pr_err("request_firmware() failed with %#x\n", ret);
+ pr_err("firmware %s not found\n", lbtf_fw_name);
+ kparam_unblock_sysfs_write(fw_name);
goto done;
}
+ kparam_unblock_sysfs_write(fw_name);
if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
goto release_fw;
restart:
if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
+ lbtf_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
ret = -1;
goto release_fw;
}
@@ -709,14 +875,13 @@ restart:
usb_kill_urb(cardp->rx_urb);
if (!cardp->fwdnldover) {
- printk(KERN_INFO "libertastf: failed to load fw,"
- " resetting device!\n");
+ pr_info("failed to load fw, resetting device!\n");
if (--reset_count >= 0) {
if_usb_reset_device(cardp);
goto restart;
}
- printk(KERN_INFO "libertastf: fw download failure\n");
+ pr_info("FW download failure, time = %d ms\n", i * 100);
ret = -1;
goto release_fw;
}
@@ -730,6 +895,7 @@ restart:
if_usb_setup_firmware(cardp->priv);
done:
+ lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
@@ -751,13 +917,19 @@ static int __init if_usb_init_module(void)
{
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
ret = usb_register(&if_usb_driver);
+
+ lbtf_deb_leave_args(LBTF_DEB_MAIN, "ret %d", ret);
return ret;
}
static void __exit if_usb_exit_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
usb_deregister(&if_usb_driver);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
module_init(if_usb_init_module);
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index 4cc42dd5a005..fbbaaae7a1ae 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -13,6 +13,8 @@
#include <linux/kthread.h>
#include <net/mac80211.h>
+#include "deb_defs.h"
+
#ifndef DRV_NAME
#define DRV_NAME "libertas_tf"
#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 7945ff5aa334..60787de56f3a 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -7,10 +7,12 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
+#include <linux/etherdevice.h>
#include "libertas_tf.h"
-#include "linux/etherdevice.h"
#define DRIVER_RELEASE_VERSION "004.p0"
/* thinfirm version: 5.132.X.pX */
@@ -18,7 +20,17 @@
#define LBTF_FW_VER_MAX 0x0584ffff
#define QOS_CONTROL_LEN 2
-static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION;
+/* Module parameters */
+unsigned int lbtf_debug;
+EXPORT_SYMBOL_GPL(lbtf_debug);
+module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
+
+static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
+#ifdef DEBUG
+ "-dbg"
+#endif
+ "";
+
struct workqueue_struct *lbtf_wq;
static const struct ieee80211_channel lbtf_channels[] = {
@@ -81,6 +93,9 @@ static void lbtf_cmd_work(struct work_struct *work)
{
struct lbtf_private *priv = container_of(work, struct lbtf_private,
cmd_work);
+
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
spin_lock_irq(&priv->driver_lock);
/* command response? */
if (priv->cmd_response_rxed) {
@@ -108,11 +123,16 @@ static void lbtf_cmd_work(struct work_struct *work)
priv->cmd_timed_out = 0;
spin_unlock_irq(&priv->driver_lock);
- if (!priv->fw_ready)
+ if (!priv->fw_ready) {
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready");
return;
+ }
+
/* Execute the next command */
if (!priv->cur_cmd)
lbtf_execute_next_command(priv);
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
/**
@@ -126,6 +146,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
{
int ret = -1;
+ lbtf_deb_enter(LBTF_DEB_FW);
/*
* Read priv address from HW
*/
@@ -141,6 +162,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
ret = 0;
done:
+ lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret);
return ret;
}
@@ -152,6 +174,7 @@ static void command_timer_fn(unsigned long data)
{
struct lbtf_private *priv = (struct lbtf_private *)data;
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_CMD);
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -168,10 +191,12 @@ static void command_timer_fn(unsigned long data)
queue_work(lbtf_wq, &priv->cmd_work);
out:
spin_unlock_irqrestore(&priv->driver_lock, flags);
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
static int lbtf_init_adapter(struct lbtf_private *priv)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
memset(priv->current_addr, 0xff, ETH_ALEN);
mutex_init(&priv->lock);
@@ -188,13 +213,16 @@ static int lbtf_init_adapter(struct lbtf_private *priv)
if (lbtf_allocate_cmd_buffer(priv))
return -1;
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void lbtf_free_adapter(struct lbtf_private *priv)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_free_cmd_buffer(priv);
del_timer(&priv->command_timer);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -221,14 +249,18 @@ static void lbtf_tx_work(struct work_struct *work)
struct sk_buff *skb = NULL;
int err;
+ lbtf_deb_enter(LBTF_DEB_MACOPS | LBTF_DEB_TX);
+
if ((priv->vif->type == NL80211_IFTYPE_AP) &&
(!skb_queue_empty(&priv->bc_ps_buf)))
skb = skb_dequeue(&priv->bc_ps_buf);
else if (priv->skb_to_tx) {
skb = priv->skb_to_tx;
priv->skb_to_tx = NULL;
- } else
+ } else {
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
+ }
len = skb->len;
info = IEEE80211_SKB_CB(skb);
@@ -236,6 +268,7 @@ static void lbtf_tx_work(struct work_struct *work)
if (priv->surpriseremoved) {
dev_kfree_skb_any(skb);
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
}
@@ -249,6 +282,7 @@ static void lbtf_tx_work(struct work_struct *work)
ETH_ALEN);
txpd->tx_packet_length = cpu_to_le16(len);
txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
+ lbtf_deb_hex(LBTF_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
BUG_ON(priv->tx_skb);
spin_lock_irq(&priv->driver_lock);
priv->tx_skb = skb;
@@ -257,7 +291,9 @@ static void lbtf_tx_work(struct work_struct *work)
if (err) {
dev_kfree_skb_any(skb);
priv->tx_skb = NULL;
+ pr_err("TX error: %d", err);
}
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
}
static int lbtf_op_start(struct ieee80211_hw *hw)
@@ -266,6 +302,8 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
void *card = priv->card;
int ret = -1;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
if (!priv->fw_ready)
/* Upload firmware */
if (priv->hw_prog_firmware(card))
@@ -286,10 +324,12 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
}
printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
err_prog_firmware:
priv->hw_reset_device(card);
+ lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programing fw; ret=%d", ret);
return ret;
}
@@ -300,6 +340,9 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
struct sk_buff *skb;
struct cmd_ctrl_node *cmdnode;
+
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
/* Flush pending command nodes */
spin_lock_irqsave(&priv->driver_lock, flags);
list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
@@ -316,6 +359,7 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
priv->radioon = RADIO_OFF;
lbtf_set_radio_control(priv);
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return;
}
@@ -323,6 +367,7 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif != NULL)
return -EOPNOTSUPP;
@@ -340,6 +385,7 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
}
lbtf_set_mac_address(priv, (u8 *) vif->addr);
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
@@ -347,6 +393,7 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif->type == NL80211_IFTYPE_AP ||
priv->vif->type == NL80211_IFTYPE_MESH_POINT)
@@ -354,37 +401,38 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
lbtf_set_bssid(priv, 0, NULL);
priv->vif = NULL;
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct lbtf_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (conf->channel->center_freq != priv->cur_freq) {
priv->cur_freq = conf->channel->center_freq;
lbtf_set_channel(priv, conf->channel->hw_value);
}
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct lbtf_private *priv = hw->priv;
int i;
+ struct netdev_hw_addr *ha;
+ int mc_count = netdev_hw_addr_list_count(mc_list);
if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE)
return mc_count;
priv->nr_of_multicastmacaddr = mc_count;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- memcpy(&priv->multicastlist[i], mclist->da_addr,
- ETH_ALEN);
- mclist = mclist->next;
- }
+ i = 0;
+ netdev_hw_addr_list_for_each(ha, mc_list)
+ memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN);
return mc_count;
}
@@ -397,11 +445,16 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
{
struct lbtf_private *priv = hw->priv;
int old_mac_control = priv->mac_control;
+
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
- if (!changed_flags)
+ if (!changed_flags) {
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return;
+ }
if (*new_flags & (FIF_PROMISC_IN_BSS))
priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
@@ -427,6 +480,8 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
if (priv->mac_control != old_mac_control)
lbtf_set_mac_control(priv);
+
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -436,6 +491,7 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct lbtf_private *priv = hw->priv;
struct sk_buff *beacon;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) {
switch (priv->vif->type) {
@@ -466,6 +522,8 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
priv->preamble = CMD_TYPE_LONG_PREAMBLE;
lbtf_set_radio_control(priv);
}
+
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static const struct ieee80211_ops lbtf_ops = {
@@ -488,6 +546,8 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
unsigned int flags;
struct ieee80211_hdr *hdr;
+ lbtf_deb_enter(LBTF_DEB_RX);
+
prxpd = (struct rxpd *) skb->data;
stats.flag = 0;
@@ -496,7 +556,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
stats.freq = priv->cur_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
- stats.noise = prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;
@@ -518,7 +577,15 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
}
memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
+
+ lbtf_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
+ skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
+ lbtf_deb_hex(LBTF_DEB_RX, "RX Data", skb->data,
+ min_t(unsigned int, skb->len, 100));
+
ieee80211_rx_irqsafe(priv->hw, skb);
+
+ lbtf_deb_leave(LBTF_DEB_RX);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_rx);
@@ -535,6 +602,8 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
struct ieee80211_hw *hw;
struct lbtf_private *priv = NULL;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
if (!hw)
goto done;
@@ -577,6 +646,7 @@ err_init_adapter:
priv = NULL;
done:
+ lbtf_deb_leave_args(LBTF_DEB_MAIN, "priv %p", priv);
return priv;
}
EXPORT_SYMBOL_GPL(lbtf_add_card);
@@ -586,6 +656,8 @@ int lbtf_remove_card(struct lbtf_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
priv->surpriseremoved = 1;
del_timer(&priv->command_timer);
lbtf_free_adapter(priv);
@@ -593,6 +665,7 @@ int lbtf_remove_card(struct lbtf_private *priv)
ieee80211_unregister_hw(hw);
ieee80211_free_hw(hw);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_remove_card);
@@ -651,17 +724,21 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
static int __init lbtf_init_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_wq = create_workqueue("libertastf");
if (lbtf_wq == NULL) {
printk(KERN_ERR "libertastf: couldn't create workqueue\n");
return -ENOMEM;
}
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void __exit lbtf_exit_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
destroy_workqueue(lbtf_wq);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
module_init(lbtf_init_module);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7cd5f56662fc..6f8cb3ee6fed 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -291,7 +291,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel *channel;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
- bool started, idle;
+ bool started, idle, scanning;
+ struct mutex mutex;
struct timer_list beacon_timer;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
@@ -651,17 +652,17 @@ static void mac80211_hwsim_beacon(unsigned long arg)
add_timer(&data->beacon_timer);
}
+static const char *hwsim_chantypes[] = {
+ [NL80211_CHAN_NO_HT] = "noht",
+ [NL80211_CHAN_HT20] = "ht20",
+ [NL80211_CHAN_HT40MINUS] = "ht40-",
+ [NL80211_CHAN_HT40PLUS] = "ht40+",
+};
static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
{
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
- static const char *chantypes[4] = {
- [NL80211_CHAN_NO_HT] = "noht",
- [NL80211_CHAN_HT20] = "ht20",
- [NL80211_CHAN_HT40MINUS] = "ht40-",
- [NL80211_CHAN_HT40PLUS] = "ht40+",
- };
static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
[IEEE80211_SMPS_AUTOMATIC] = "auto",
[IEEE80211_SMPS_OFF] = "off",
@@ -672,7 +673,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
wiphy_name(hw->wiphy), __func__,
conf->channel->center_freq,
- chantypes[conf->channel_type],
+ hwsim_chantypes[conf->channel_type],
!!(conf->flags & IEEE80211_CONF_IDLE),
!!(conf->flags & IEEE80211_CONF_PS),
smps_modes[conf->smps_mode]);
@@ -760,9 +761,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_HT) {
- printk(KERN_DEBUG " %s: HT: op_mode=0x%x\n",
+ printk(KERN_DEBUG " %s: HT: op_mode=0x%x, chantype=%s\n",
wiphy_name(hw->wiphy),
- info->ht_operation_mode);
+ info->ht_operation_mode,
+ hwsim_chantypes[info->channel_type]);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -829,6 +831,33 @@ static int mac80211_hwsim_conf_tx(
return 0;
}
+static int mac80211_hwsim_get_survey(
+ struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ieee80211_conf *conf = &hw->conf;
+
+ printk(KERN_DEBUG "%s:%s (idx=%d)\n",
+ wiphy_name(hw->wiphy), __func__, idx);
+
+ if (idx != 0)
+ return -ENOENT;
+
+ /* Current channel */
+ survey->channel = conf->channel;
+
+ /*
+ * Magically conjured noise level --- this is only ok for simulated hardware.
+ *
+ * A real driver which cannot determine the real channel noise MUST NOT
+ * report any noise, especially not a magically conjured one :-)
+ */
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = -92;
+
+ return 0;
+}
+
#ifdef CONFIG_NL80211_TESTMODE
/*
* This section contains example code for using netlink
@@ -946,6 +975,7 @@ static void hw_scan_done(struct work_struct *work)
}
static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
@@ -957,9 +987,9 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
hsd->hw = hw;
INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
- printk(KERN_DEBUG "hwsim scan request\n");
+ printk(KERN_DEBUG "hwsim hw_scan request\n");
for (i = 0; i < req->n_channels; i++)
- printk(KERN_DEBUG "hwsim scan freq %d\n",
+ printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
req->channels[i]->center_freq);
ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
@@ -967,6 +997,36 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
return 0;
}
+static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+
+ if (hwsim->scanning) {
+ printk(KERN_DEBUG "two hwsim sw_scans detected!\n");
+ goto out;
+ }
+
+ printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
+ hwsim->scanning = true;
+
+out:
+ mutex_unlock(&hwsim->mutex);
+}
+
+static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+
+ printk(KERN_DEBUG "hwsim sw_scan_complete\n");
+ hwsim->scanning = false;
+
+ mutex_unlock(&hwsim->mutex);
+}
+
static struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
@@ -982,8 +1042,11 @@ static struct ieee80211_ops mac80211_hwsim_ops =
.sta_notify = mac80211_hwsim_sta_notify,
.set_tim = mac80211_hwsim_set_tim,
.conf_tx = mac80211_hwsim_conf_tx,
+ .get_survey = mac80211_hwsim_get_survey,
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
.ampdu_action = mac80211_hwsim_ampdu_action,
+ .sw_scan_start = mac80211_hwsim_sw_scan,
+ .sw_scan_complete = mac80211_hwsim_sw_scan_complete,
.flush = mac80211_hwsim_flush,
};
@@ -1179,8 +1242,11 @@ static int __init init_mac80211_hwsim(void)
if (radios < 1 || radios > 100)
return -EINVAL;
- if (fake_hw_scan)
+ if (fake_hw_scan) {
mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+ mac80211_hwsim_ops.sw_scan_start = NULL;
+ mac80211_hwsim_ops.sw_scan_complete = NULL;
+ }
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
@@ -1235,7 +1301,8 @@ static int __init init_mac80211_hwsim(void)
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_AMPDU_AGGREGATION;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -1285,6 +1352,7 @@ static int __init init_mac80211_hwsim(void)
}
/* By default all radios are belonging to the first group */
data->group = 1;
+ mutex_init(&data->mutex);
/* Work to be done prior to ieee80211_register_hw() */
switch (regtest) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 12fdcb25fd38..808adb909095 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -750,7 +750,6 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
memset(status, 0, sizeof(*status));
status->signal = -rxd->rssi;
- status->noise = -rxd->noise_floor;
if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
@@ -852,7 +851,6 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
memset(status, 0, sizeof(*status));
status->signal = -rxd->rssi;
- status->noise = -rxd->noise_level;
status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
@@ -1939,11 +1937,15 @@ struct mwl8k_cmd_mac_multicast_adr {
static struct mwl8k_cmd_pkt *
__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_mac_multicast_adr *cmd;
int size;
+ int mc_count = 0;
+
+ if (mc_list)
+ mc_count = netdev_hw_addr_list_count(mc_list);
if (allmulti || mc_count > priv->num_mcaddrs) {
allmulti = 1;
@@ -1964,17 +1966,13 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
if (allmulti) {
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
} else if (mc_count) {
- int i;
+ struct netdev_hw_addr *ha;
+ int i = 0;
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
cmd->numaddr = cpu_to_le16(mc_count);
- for (i = 0; i < mc_count && mclist; i++) {
- if (mclist->da_addrlen != ETH_ALEN) {
- kfree(cmd);
- return NULL;
- }
- memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
- mclist = mclist->next;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
}
}
@@ -3553,7 +3551,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct mwl8k_cmd_pkt *cmd;
@@ -3564,7 +3562,7 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
* we'll end up throwing this packet away and creating a new
* one in mwl8k_configure_filter().
*/
- cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist);
+ cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list);
return (unsigned long)cmd;
}
@@ -3687,7 +3685,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
*/
if (*total_flags & FIF_ALLMULTI) {
kfree(cmd);
- cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL);
+ cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL);
}
if (cmd != NULL) {
@@ -3984,8 +3982,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
hw->queues = MWL8K_TX_QUEUES;
- /* Set rssi and noise values to dBm */
- hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
+ /* Set rssi values to dBm */
+ hw->flags |= IEEE80211_HW_SIGNAL_DBM;
hw->vif_data_size = sizeof(struct mwl8k_vif);
hw->sta_data_size = sizeof(struct mwl8k_sta);
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index e2a2c18920aa..60819bcf4377 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -27,6 +27,17 @@ config HERMES
configure your card and that /etc/pcmcia/wireless.opts works :
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
+config HERMES_PRISM
+ bool "Support Prism 2/2.5 chipset"
+ depends on HERMES
+ ---help---
+
+ Say Y to enable support for Prism 2 and 2.5 chipsets. These
+ chipsets are better handled by the hostap driver. This driver
+ would not support WPA or firmware download for Prism chipset.
+
+ If you are not sure, say N.
+
config HERMES_CACHE_FW_ON_INIT
bool "Cache Hermes firmware on driver initialisation"
depends on HERMES
@@ -86,7 +97,7 @@ config NORTEL_HERMES
config PCI_HERMES
tristate "Prism 2.5 PCI 802.11b adaptor support"
- depends on PCI && HERMES
+ depends on PCI && HERMES && HERMES_PRISM
help
Enable support for PCI and mini-PCI 802.11b wireless NICs based on
the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
@@ -121,3 +132,10 @@ config PCMCIA_SPECTRUM
This driver requires firmware download on startup. Utilities
for downloading Symbol firmware are available at
<http://sourceforge.net/projects/orinoco/>
+
+config ORINOCO_USB
+ tristate "Agere Orinoco USB support"
+ depends on USB && HERMES
+ select FW_LOADER
+ ---help---
+ This driver is for USB versions of the Agere Orinoco card.
diff --git a/drivers/net/wireless/orinoco/Makefile b/drivers/net/wireless/orinoco/Makefile
index 9abd6329bcbd..bfdefb85abcd 100644
--- a/drivers/net/wireless/orinoco/Makefile
+++ b/drivers/net/wireless/orinoco/Makefile
@@ -11,3 +11,7 @@ obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
+obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
+
+# Orinoco should be endian clean.
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index c60df2c1aca3..9bcee10c9308 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -77,9 +77,9 @@ airport_resume(struct macio_dev *mdev)
enable_irq(card->irq);
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = orinoco_up(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
return err;
}
@@ -195,7 +195,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
ssleep(1);
/* Reset it before we get the interrupt */
- hermes_init(hw);
+ hw->ops->init(hw);
if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) {
printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq);
@@ -210,7 +210,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
}
/* Register an interface with the stack */
- if (orinoco_if_add(priv, phys_addr, card->irq) != 0) {
+ if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 27f2d3342645..8c4169c227ae 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -88,7 +88,9 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
wiphy->rts_threshold = priv->rts_thresh;
if (!priv->has_mwo)
- wiphy->frag_threshold = priv->frag_thresh;
+ wiphy->frag_threshold = priv->frag_thresh + 1;
+ wiphy->retry_short = priv->short_retry_limit;
+ wiphy->retry_long = priv->long_retry_limit;
return wiphy_register(wiphy);
}
@@ -157,6 +159,7 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
}
static int orinoco_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
@@ -187,7 +190,7 @@ static int orinoco_set_channel(struct wiphy *wiphy,
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
hermes_t *hw = &priv->hw;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
channel, NULL);
}
@@ -196,8 +199,92 @@ static int orinoco_set_channel(struct wiphy *wiphy,
return err;
}
+static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct orinoco_private *priv = wiphy_priv(wiphy);
+ int frag_value = -1;
+ int rts_value = -1;
+ int err = 0;
+
+ if (changed & WIPHY_PARAM_RETRY_SHORT) {
+ /* Setting short retry not supported */
+ err = -EINVAL;
+ }
+
+ if (changed & WIPHY_PARAM_RETRY_LONG) {
+ /* Setting long retry not supported */
+ err = -EINVAL;
+ }
+
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ /* Set fragmentation */
+ if (priv->has_mwo) {
+ if (wiphy->frag_threshold < 0)
+ frag_value = 0;
+ else {
+ printk(KERN_WARNING "%s: Fixed fragmentation "
+ "is not supported on this firmware. "
+ "Using MWO robust instead.\n",
+ priv->ndev->name);
+ frag_value = 1;
+ }
+ } else {
+ if (wiphy->frag_threshold < 0)
+ frag_value = 2346;
+ else if ((wiphy->frag_threshold < 257) ||
+ (wiphy->frag_threshold > 2347))
+ err = -EINVAL;
+ else
+ /* cfg80211 value is 257-2347 (odd only)
+ * orinoco rid has range 256-2346 (even only) */
+ frag_value = wiphy->frag_threshold & ~0x1;
+ }
+ }
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ /* Set RTS.
+ *
+ * Prism documentation suggests default of 2432,
+ * and a range of 0-3000.
+ *
+ * Current implementation uses 2347 as the default and
+ * the upper limit.
+ */
+
+ if (wiphy->rts_threshold < 0)
+ rts_value = 2347;
+ else if (wiphy->rts_threshold > 2347)
+ err = -EINVAL;
+ else
+ rts_value = wiphy->rts_threshold;
+ }
+
+ if (!err) {
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if (frag_value >= 0) {
+ if (priv->has_mwo)
+ priv->mwo_robust = frag_value;
+ else
+ priv->frag_thresh = frag_value;
+ }
+ if (rts_value >= 0)
+ priv->rts_thresh = rts_value;
+
+ err = orinoco_commit(priv);
+
+ orinoco_unlock(priv, &flags);
+ }
+
+ return err;
+}
+
const struct cfg80211_ops orinoco_cfg_ops = {
.change_virtual_intf = orinoco_change_vif,
.set_channel = orinoco_set_channel,
.scan = orinoco_scan,
+ .set_wiphy_params = orinoco_set_wiphy_params,
};
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 5ea0f7cf85b1..3e1947d097ca 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -122,7 +122,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
dev_dbg(dev, "Attempting to download firmware %s\n", firmware);
/* Read current plug data */
- err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0);
+ err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
dev_dbg(dev, "Read PDA returned %d\n", err);
if (err)
goto free;
@@ -149,7 +149,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
}
/* Enable aux port to allow programming */
- err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point));
+ err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point));
dev_dbg(dev, "Program init returned %d\n", err);
if (err != 0)
goto abort;
@@ -177,7 +177,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
goto abort;
/* Tell card we've finished */
- err = hermesi_program_end(hw);
+ err = hw->ops->program_end(hw);
dev_dbg(dev, "Program end returned %d\n", err);
if (err != 0)
goto abort;
@@ -224,7 +224,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
if (!pda)
return -ENOMEM;
- ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1);
+ ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
if (ret)
goto free;
}
@@ -260,7 +260,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
}
/* Reset hermes chip and make sure it responds */
- ret = hermes_init(hw);
+ ret = hw->ops->init(hw);
/* hermes_reset() should return 0 with the secondary firmware */
if (secondary && ret != 0)
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 1a2fca76fd3c..6c6a23e08df6 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -52,6 +52,26 @@
#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
/*
+ * AUX port access. To unlock the AUX port write the access keys to the
+ * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
+ * register. Then read it and make sure it's HERMES_AUX_ENABLED.
+ */
+#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
+#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
+#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
+#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
+
+#define HERMES_AUX_PW0 0xFE01
+#define HERMES_AUX_PW1 0xDC23
+#define HERMES_AUX_PW2 0xBA45
+
+/* HERMES_CMD_DOWNLD */
+#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
+
+/*
* Debugging helpers
*/
@@ -70,6 +90,7 @@
#endif /* ! HERMES_DEBUG */
+static const struct hermes_ops hermes_ops_local;
/*
* Internal functions
@@ -111,9 +132,9 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
*/
/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
-int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
- u16 parm0, u16 parm1, u16 parm2,
- struct hermes_response *resp)
+static int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
+ u16 parm0, u16 parm1, u16 parm2,
+ struct hermes_response *resp)
{
int err = 0;
int k;
@@ -163,17 +184,18 @@ int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
out:
return err;
}
-EXPORT_SYMBOL(hermes_doicmd_wait);
void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
{
hw->iobase = address;
hw->reg_spacing = reg_spacing;
hw->inten = 0x0;
+ hw->eeprom_pda = false;
+ hw->ops = &hermes_ops_local;
}
EXPORT_SYMBOL(hermes_struct_init);
-int hermes_init(hermes_t *hw)
+static int hermes_init(hermes_t *hw)
{
u16 reg;
int err = 0;
@@ -217,7 +239,6 @@ int hermes_init(hermes_t *hw)
return err;
}
-EXPORT_SYMBOL(hermes_init);
/* Issue a command to the chip, and (busy!) wait for it to
* complete.
@@ -228,8 +249,8 @@ EXPORT_SYMBOL(hermes_init);
* > 0 on error returned by the firmware
*
* Callable from any context, but locking is your problem. */
-int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp)
+static int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp)
{
int err;
int k;
@@ -291,9 +312,8 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
out:
return err;
}
-EXPORT_SYMBOL(hermes_docmd_wait);
-int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
+static int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
{
int err = 0;
int k;
@@ -333,7 +353,6 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
return 0;
}
-EXPORT_SYMBOL(hermes_allocate);
/* Set up a BAP to read a particular chunk of data from card's internal buffer.
*
@@ -403,8 +422,8 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
* 0 on success
* > 0 on error from firmware
*/
-int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
- u16 id, u16 offset)
+static int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
+ u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -422,7 +441,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
out:
return err;
}
-EXPORT_SYMBOL(hermes_bap_pread);
/* Write a block of data to the chip's buffer, via the
* BAP. Synchronization/serialization is the caller's problem.
@@ -432,8 +450,8 @@ EXPORT_SYMBOL(hermes_bap_pread);
* 0 on success
* > 0 on error from firmware
*/
-int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
- u16 id, u16 offset)
+static int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
+ u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -451,7 +469,6 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
out:
return err;
}
-EXPORT_SYMBOL(hermes_bap_pwrite);
/* Read a Length-Type-Value record from the card.
*
@@ -461,8 +478,8 @@ EXPORT_SYMBOL(hermes_bap_pwrite);
* practice.
*
* Callable from user or bh context. */
-int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
- u16 *length, void *buf)
+static int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
+ u16 *length, void *buf)
{
int err = 0;
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -505,10 +522,9 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
return 0;
}
-EXPORT_SYMBOL(hermes_read_ltv);
-int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
- u16 length, const void *value)
+static int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -533,4 +549,228 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
return err;
}
-EXPORT_SYMBOL(hermes_write_ltv);
+
+/*** Hermes AUX control ***/
+
+static inline void
+hermes_aux_setaddr(hermes_t *hw, u32 addr)
+{
+ hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
+ hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
+}
+
+static inline int
+hermes_aux_control(hermes_t *hw, int enabled)
+{
+ int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
+ int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
+ int i;
+
+ /* Already open? */
+ if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
+ return 0;
+
+ hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
+ hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
+ hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
+ hermes_write_reg(hw, HERMES_CONTROL, action);
+
+ for (i = 0; i < 20; i++) {
+ udelay(10);
+ if (hermes_read_reg(hw, HERMES_CONTROL) ==
+ desired_state)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/*** Hermes programming ***/
+
+/* About to start programming data (Hermes I)
+ * offset is the entry point
+ *
+ * Spectrum_cs' Symbol fw does not require this
+ * wl_lkm Agere fw does
+ * Don't know about intersil
+ */
+static int hermesi_program_init(hermes_t *hw, u32 offset)
+{
+ int err;
+
+ /* Disable interrupts?*/
+ /*hw->inten = 0x0;*/
+ /*hermes_write_regn(hw, INTEN, 0);*/
+ /*hermes_set_irqmask(hw, 0);*/
+
+ /* Acknowledge any outstanding command */
+ hermes_write_regn(hw, EVACK, 0xFFFF);
+
+ /* Using init_cmd_wait rather than cmd_wait */
+ err = hw->ops->init_cmd_wait(hw,
+ 0x0100 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = hw->ops->init_cmd_wait(hw,
+ 0x0000 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = hermes_aux_control(hw, 1);
+ pr_debug("AUX enable returned %d\n", err);
+
+ if (err)
+ return err;
+
+ pr_debug("Enabling volatile, EP 0x%08x\n", offset);
+ err = hw->ops->init_cmd_wait(hw,
+ HERMES_PROGRAM_ENABLE_VOLATILE,
+ offset & 0xFFFFu,
+ offset >> 16,
+ 0,
+ NULL);
+ pr_debug("PROGRAM_ENABLE returned %d\n", err);
+
+ return err;
+}
+
+/* Done programming data (Hermes I)
+ *
+ * Spectrum_cs' Symbol fw does not require this
+ * wl_lkm Agere fw does
+ * Don't know about intersil
+ */
+static int hermesi_program_end(hermes_t *hw)
+{
+ struct hermes_response resp;
+ int rc = 0;
+ int err;
+
+ rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
+
+ pr_debug("PROGRAM_DISABLE returned %d, "
+ "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
+ rc, resp.resp0, resp.resp1, resp.resp2);
+
+ if ((rc == 0) &&
+ ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
+ rc = -EIO;
+
+ err = hermes_aux_control(hw, 0);
+ pr_debug("AUX disable returned %d\n", err);
+
+ /* Acknowledge any outstanding command */
+ hermes_write_regn(hw, EVACK, 0xFFFF);
+
+ /* Reinitialise, ignoring return */
+ (void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+
+ return rc ? rc : err;
+}
+
+static int hermes_program_bytes(struct hermes *hw, const char *data,
+ u32 addr, u32 len)
+{
+ /* wl lkm splits the programming into chunks of 2000 bytes.
+ * This restriction appears to come from USB. The PCMCIA
+ * adapters can program the whole lot in one go */
+ hermes_aux_setaddr(hw, addr);
+ hermes_write_bytes(hw, HERMES_AUXDATA, data, len);
+ return 0;
+}
+
+/* Read PDA from the adapter */
+static int hermes_read_pda(hermes_t *hw, __le16 *pda, u32 pda_addr, u16 pda_len)
+{
+ int ret;
+ u16 pda_size;
+ u16 data_len = pda_len;
+ __le16 *data = pda;
+
+ if (hw->eeprom_pda) {
+ /* PDA of spectrum symbol is in eeprom */
+
+ /* Issue command to read EEPROM */
+ ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
+ if (ret)
+ return ret;
+ } else {
+ /* wl_lkm does not include PDA size in the PDA area.
+ * We will pad the information into pda, so other routines
+ * don't have to be modified */
+ pda[0] = cpu_to_le16(pda_len - 2);
+ /* Includes CFG_PROD_DATA but not itself */
+ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
+ data_len = pda_len - 4;
+ data = pda + 2;
+ }
+
+ /* Open auxiliary port */
+ ret = hermes_aux_control(hw, 1);
+ pr_debug("AUX enable returned %d\n", ret);
+ if (ret)
+ return ret;
+
+ /* Read PDA */
+ hermes_aux_setaddr(hw, pda_addr);
+ hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
+
+ /* Close aux port */
+ ret = hermes_aux_control(hw, 0);
+ pr_debug("AUX disable returned %d\n", ret);
+
+ /* Check PDA length */
+ pda_size = le16_to_cpu(pda[0]);
+ pr_debug("Actual PDA length %d, Max allowed %d\n",
+ pda_size, pda_len);
+ if (pda_size > pda_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hermes_lock_irqsave(spinlock_t *lock,
+ unsigned long *flags) __acquires(lock)
+{
+ spin_lock_irqsave(lock, *flags);
+}
+
+static void hermes_unlock_irqrestore(spinlock_t *lock,
+ unsigned long *flags) __releases(lock)
+{
+ spin_unlock_irqrestore(lock, *flags);
+}
+
+static void hermes_lock_irq(spinlock_t *lock) __acquires(lock)
+{
+ spin_lock_irq(lock);
+}
+
+static void hermes_unlock_irq(spinlock_t *lock) __releases(lock)
+{
+ spin_unlock_irq(lock);
+}
+
+/* Hermes operations for local buses */
+static const struct hermes_ops hermes_ops_local = {
+ .init = hermes_init,
+ .cmd_wait = hermes_docmd_wait,
+ .init_cmd_wait = hermes_doicmd_wait,
+ .allocate = hermes_allocate,
+ .read_ltv = hermes_read_ltv,
+ .write_ltv = hermes_write_ltv,
+ .bap_pread = hermes_bap_pread,
+ .bap_pwrite = hermes_bap_pwrite,
+ .read_pda = hermes_read_pda,
+ .program_init = hermesi_program_init,
+ .program_end = hermesi_program_end,
+ .program = hermes_program_bytes,
+ .lock_irqsave = hermes_lock_irqsave,
+ .unlock_irqrestore = hermes_unlock_irqrestore,
+ .lock_irq = hermes_lock_irq,
+ .unlock_irq = hermes_unlock_irq,
+};
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 2dddbb597c4d..9ca34e722b45 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -374,6 +374,37 @@ struct hermes_multicast {
/* Timeouts */
#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
+struct hermes;
+
+/* Functions to access hardware */
+struct hermes_ops {
+ int (*init)(struct hermes *hw);
+ int (*cmd_wait)(struct hermes *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp);
+ int (*init_cmd_wait)(struct hermes *hw, u16 cmd,
+ u16 parm0, u16 parm1, u16 parm2,
+ struct hermes_response *resp);
+ int (*allocate)(struct hermes *hw, u16 size, u16 *fid);
+ int (*read_ltv)(struct hermes *hw, int bap, u16 rid, unsigned buflen,
+ u16 *length, void *buf);
+ int (*write_ltv)(struct hermes *hw, int bap, u16 rid,
+ u16 length, const void *value);
+ int (*bap_pread)(struct hermes *hw, int bap, void *buf, int len,
+ u16 id, u16 offset);
+ int (*bap_pwrite)(struct hermes *hw, int bap, const void *buf,
+ int len, u16 id, u16 offset);
+ int (*read_pda)(struct hermes *hw, __le16 *pda,
+ u32 pda_addr, u16 pda_len);
+ int (*program_init)(struct hermes *hw, u32 entry_point);
+ int (*program_end)(struct hermes *hw);
+ int (*program)(struct hermes *hw, const char *buf,
+ u32 addr, u32 len);
+ void (*lock_irqsave)(spinlock_t *lock, unsigned long *flags);
+ void (*unlock_irqrestore)(spinlock_t *lock, unsigned long *flags);
+ void (*lock_irq)(spinlock_t *lock);
+ void (*unlock_irq)(spinlock_t *lock);
+};
+
/* Basic control structure */
typedef struct hermes {
void __iomem *iobase;
@@ -381,6 +412,9 @@ typedef struct hermes {
#define HERMES_16BIT_REGSPACING 0
#define HERMES_32BIT_REGSPACING 1
u16 inten; /* Which interrupts should be enabled? */
+ bool eeprom_pda;
+ const struct hermes_ops *ops;
+ void *priv;
} hermes_t;
/* Register access convenience macros */
@@ -394,22 +428,6 @@ typedef struct hermes {
/* Function prototypes */
void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
-int hermes_init(hermes_t *hw);
-int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp);
-int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
- u16 parm0, u16 parm1, u16 parm2,
- struct hermes_response *resp);
-int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
-
-int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
- u16 id, u16 offset);
-int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
- u16 id, u16 offset);
-int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
- u16 *length, void *buf);
-int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
- u16 length, const void *value);
/* Inline functions */
@@ -426,13 +444,13 @@ static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
static inline int hermes_enable_port(hermes_t *hw, int port)
{
- return hermes_docmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
+ return hw->ops->cmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
0, NULL);
}
static inline int hermes_disable_port(hermes_t *hw, int port)
{
- return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
+ return hw->ops->cmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
0, NULL);
}
@@ -440,7 +458,7 @@ static inline int hermes_disable_port(hermes_t *hw, int port)
* information frame in __orinoco_ev_info() */
static inline int hermes_inquire(hermes_t *hw, u16 rid)
{
- return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
+ return hw->ops->cmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
}
#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1)
@@ -475,10 +493,10 @@ static inline void hermes_clear_words(struct hermes *hw, int off,
}
#define HERMES_READ_RECORD(hw, bap, rid, buf) \
- (hermes_read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
+ (hw->ops->read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
- (hermes_write_ltv((hw), (bap), (rid), \
- HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
+ (hw->ops->write_ltv((hw), (bap), (rid), \
+ HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
{
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index fb157eb889ca..6da85e75fce0 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -46,37 +46,11 @@
#define PFX "hermes_dld: "
-/*
- * AUX port access. To unlock the AUX port write the access keys to the
- * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
- * register. Then read it and make sure it's HERMES_AUX_ENABLED.
- */
-#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
-#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
-#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
-#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
-
-#define HERMES_AUX_PW0 0xFE01
-#define HERMES_AUX_PW1 0xDC23
-#define HERMES_AUX_PW2 0xBA45
-
-/* HERMES_CMD_DOWNLD */
-#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
-
/* End markers used in dblocks */
#define PDI_END 0x00000000 /* End of PDA */
#define BLOCK_END 0xFFFFFFFF /* Last image block */
#define TEXT_END 0x1A /* End of text header */
-/* Limit the amout we try to download in a single shot.
- * Size is in bytes.
- */
-#define MAX_DL_SIZE 1024
-#define LIMIT_PROGRAM_SIZE 0
-
/*
* The following structures have little-endian fields denoted by
* the leading underscore. Don't access them directly - use inline
@@ -165,41 +139,6 @@ pdi_len(const struct pdi *pdi)
return 2 * (le16_to_cpu(pdi->len) - 1);
}
-/*** Hermes AUX control ***/
-
-static inline void
-hermes_aux_setaddr(hermes_t *hw, u32 addr)
-{
- hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
- hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
-}
-
-static inline int
-hermes_aux_control(hermes_t *hw, int enabled)
-{
- int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
- int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
- int i;
-
- /* Already open? */
- if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
- return 0;
-
- hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
- hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
- hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
- hermes_write_reg(hw, HERMES_CONTROL, action);
-
- for (i = 0; i < 20; i++) {
- udelay(10);
- if (hermes_read_reg(hw, HERMES_CONTROL) ==
- desired_state)
- return 0;
- }
-
- return -EBUSY;
-}
-
/*** Plug Data Functions ***/
/*
@@ -271,62 +210,7 @@ hermes_plug_pdi(hermes_t *hw, const struct pdr *first_pdr,
return -EINVAL;
/* do the actual plugging */
- hermes_aux_setaddr(hw, pdr_addr(pdr));
- hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
-
- return 0;
-}
-
-/* Read PDA from the adapter */
-int hermes_read_pda(hermes_t *hw,
- __le16 *pda,
- u32 pda_addr,
- u16 pda_len,
- int use_eeprom) /* can we get this into hw? */
-{
- int ret;
- u16 pda_size;
- u16 data_len = pda_len;
- __le16 *data = pda;
-
- if (use_eeprom) {
- /* PDA of spectrum symbol is in eeprom */
-
- /* Issue command to read EEPROM */
- ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
- if (ret)
- return ret;
- } else {
- /* wl_lkm does not include PDA size in the PDA area.
- * We will pad the information into pda, so other routines
- * don't have to be modified */
- pda[0] = cpu_to_le16(pda_len - 2);
- /* Includes CFG_PROD_DATA but not itself */
- pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
- data_len = pda_len - 4;
- data = pda + 2;
- }
-
- /* Open auxiliary port */
- ret = hermes_aux_control(hw, 1);
- pr_debug(PFX "AUX enable returned %d\n", ret);
- if (ret)
- return ret;
-
- /* read PDA from EEPROM */
- hermes_aux_setaddr(hw, pda_addr);
- hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
-
- /* Close aux port */
- ret = hermes_aux_control(hw, 0);
- pr_debug(PFX "AUX disable returned %d\n", ret);
-
- /* Check PDA length */
- pda_size = le16_to_cpu(pda[0]);
- pr_debug(PFX "Actual PDA length %d, Max allowed %d\n",
- pda_size, pda_len);
- if (pda_size > pda_len)
- return -EINVAL;
+ hw->ops->program(hw, pdi->data, pdr_addr(pdr), pdi_len(pdi));
return 0;
}
@@ -389,101 +273,13 @@ hermes_blocks_length(const char *first_block, const void *end)
/*** Hermes programming ***/
-/* About to start programming data (Hermes I)
- * offset is the entry point
- *
- * Spectrum_cs' Symbol fw does not require this
- * wl_lkm Agere fw does
- * Don't know about intersil
- */
-int hermesi_program_init(hermes_t *hw, u32 offset)
-{
- int err;
-
- /* Disable interrupts?*/
- /*hw->inten = 0x0;*/
- /*hermes_write_regn(hw, INTEN, 0);*/
- /*hermes_set_irqmask(hw, 0);*/
-
- /* Acknowledge any outstanding command */
- hermes_write_regn(hw, EVACK, 0xFFFF);
-
- /* Using doicmd_wait rather than docmd_wait */
- err = hermes_doicmd_wait(hw,
- 0x0100 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
- if (err)
- return err;
-
- err = hermes_doicmd_wait(hw,
- 0x0000 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
- if (err)
- return err;
-
- err = hermes_aux_control(hw, 1);
- pr_debug(PFX "AUX enable returned %d\n", err);
-
- if (err)
- return err;
-
- pr_debug(PFX "Enabling volatile, EP 0x%08x\n", offset);
- err = hermes_doicmd_wait(hw,
- HERMES_PROGRAM_ENABLE_VOLATILE,
- offset & 0xFFFFu,
- offset >> 16,
- 0,
- NULL);
- pr_debug(PFX "PROGRAM_ENABLE returned %d\n", err);
-
- return err;
-}
-
-/* Done programming data (Hermes I)
- *
- * Spectrum_cs' Symbol fw does not require this
- * wl_lkm Agere fw does
- * Don't know about intersil
- */
-int hermesi_program_end(hermes_t *hw)
-{
- struct hermes_response resp;
- int rc = 0;
- int err;
-
- rc = hermes_docmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
-
- pr_debug(PFX "PROGRAM_DISABLE returned %d, "
- "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
- rc, resp.resp0, resp.resp1, resp.resp2);
-
- if ((rc == 0) &&
- ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
- rc = -EIO;
-
- err = hermes_aux_control(hw, 0);
- pr_debug(PFX "AUX disable returned %d\n", err);
-
- /* Acknowledge any outstanding command */
- hermes_write_regn(hw, EVACK, 0xFFFF);
-
- /* Reinitialise, ignoring return */
- (void) hermes_doicmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
-
- return rc ? rc : err;
-}
-
/* Program the data blocks */
int hermes_program(hermes_t *hw, const char *first_block, const void *end)
{
const struct dblock *blk;
u32 blkaddr;
u32 blklen;
-#if LIMIT_PROGRAM_SIZE
- u32 addr;
- u32 len;
-#endif
+ int err = 0;
blk = (const struct dblock *) first_block;
@@ -498,30 +294,10 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
pr_debug(PFX "Programming block of length %d "
"to address 0x%08x\n", blklen, blkaddr);
-#if !LIMIT_PROGRAM_SIZE
- /* wl_lkm driver splits this into writes of 2000 bytes */
- hermes_aux_setaddr(hw, blkaddr);
- hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
- blklen);
-#else
- len = (blklen < MAX_DL_SIZE) ? blklen : MAX_DL_SIZE;
- addr = blkaddr;
-
- while (addr < (blkaddr + blklen)) {
- pr_debug(PFX "Programming subblock of length %d "
- "to address 0x%08x. Data @ %p\n",
- len, addr, &blk->data[addr - blkaddr]);
-
- hermes_aux_setaddr(hw, addr);
- hermes_write_bytes(hw, HERMES_AUXDATA,
- &blk->data[addr - blkaddr],
- len);
-
- addr += len;
- len = ((blkaddr + blklen - addr) < MAX_DL_SIZE) ?
- (blkaddr + blklen - addr) : MAX_DL_SIZE;
- }
-#endif
+ err = hw->ops->program(hw, blk->data, blkaddr, blklen);
+ if (err)
+ break;
+
blk = (const struct dblock *) &blk->data[blklen];
if ((void *) blk > (end - sizeof(*blk)))
@@ -530,7 +306,7 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
blkaddr = dblock_addr(blk);
blklen = dblock_len(blk);
}
- return 0;
+ return err;
}
/*** Default plugging data for Hermes I ***/
@@ -690,9 +466,8 @@ int hermes_apply_pda_with_defaults(hermes_t *hw,
if ((pdi_len(pdi) == pdr_len(pdr)) &&
((void *) pdi->data + pdi_len(pdi) < pda_end)) {
/* do the actual plugging */
- hermes_aux_setaddr(hw, pdr_addr(pdr));
- hermes_write_bytes(hw, HERMES_AUXDATA,
- pdi->data, pdi_len(pdi));
+ hw->ops->program(hw, pdi->data, pdr_addr(pdr),
+ pdi_len(pdi));
}
}
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index e6369242e49c..6fbd78850123 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -177,9 +177,9 @@ int determine_fw_capabilities(struct orinoco_private *priv,
/* 3Com MAC : 00:50:DA:* */
memset(tmp, 0, sizeof(tmp));
/* Get the Symbol firmware version */
- err = hermes_read_ltv(hw, USER_BAP,
- HERMES_RID_SECONDARYVERSION_SYMBOL,
- SYMBOL_MAX_VER_LEN, NULL, &tmp);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_SECONDARYVERSION_SYMBOL,
+ SYMBOL_MAX_VER_LEN, NULL, &tmp);
if (err) {
dev_warn(dev, "Error %d reading Symbol firmware info. "
"Wildly guessing capabilities...\n", err);
@@ -262,6 +262,13 @@ int determine_fw_capabilities(struct orinoco_private *priv,
if (fw_name)
dev_info(dev, "Firmware determined as %s\n", fw_name);
+#ifndef CONFIG_HERMES_PRISM
+ if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
+ dev_err(dev, "Support for Prism chipset is not enabled\n");
+ return -ENODEV;
+ }
+#endif
+
return 0;
}
@@ -279,8 +286,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
u16 reclen;
/* Get the MAC address */
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
- ETH_ALEN, NULL, dev_addr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ ETH_ALEN, NULL, dev_addr);
if (err) {
dev_warn(dev, "Failed to read MAC address!\n");
goto out;
@@ -289,8 +296,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
dev_dbg(dev, "MAC address %pM\n", dev_addr);
/* Get the station name */
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- sizeof(nickbuf), &reclen, &nickbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ sizeof(nickbuf), &reclen, &nickbuf);
if (err) {
dev_err(dev, "failed to read station name\n");
goto out;
@@ -367,6 +374,32 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFPREAMBLE_SYMBOL,
&priv->preamble);
+ if (err) {
+ dev_err(dev, "Failed to read preamble setup\n");
+ goto out;
+ }
+ }
+
+ /* Retry settings */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
+ &priv->short_retry_limit);
+ if (err) {
+ dev_err(dev, "Failed to read short retry limit\n");
+ goto out;
+ }
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
+ &priv->long_retry_limit);
+ if (err) {
+ dev_err(dev, "Failed to read long retry limit\n");
+ goto out;
+ }
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
+ &priv->retry_lifetime);
+ if (err) {
+ dev_err(dev, "Failed to read max retry lifetime\n");
+ goto out;
}
out:
@@ -380,11 +413,11 @@ int orinoco_hw_allocate_fid(struct orinoco_private *priv)
struct hermes *hw = &priv->hw;
int err;
- err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
/* Try workaround for old Symbol firmware bug */
priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
- err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
dev_warn(dev, "Firmware ALLOC bug detected "
"(old Symbol firmware?). Work around %s\n",
@@ -430,8 +463,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
struct hermes_idstring idbuf;
/* Set the MAC address */
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
- HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ HERMES_BYTES_TO_RECLEN(ETH_ALEN),
+ dev->dev_addr);
if (err) {
printk(KERN_ERR "%s: Error %d setting MAC address\n",
dev->name, err);
@@ -494,7 +528,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
/* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
&idbuf);
if (err) {
@@ -502,7 +536,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
dev->name, err);
return err;
}
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
&idbuf);
if (err) {
@@ -514,9 +548,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
/* Set the station name */
idbuf.len = cpu_to_le16(strlen(priv->nick));
memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
- &idbuf);
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
+ &idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting nickname\n",
dev->name, err);
@@ -631,12 +665,12 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Enable monitor mode */
dev->type = ARPHRD_IEEE80211;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_MONITOR, 0, NULL);
} else {
/* Disable monitor mode */
dev->type = ARPHRD_ETHER;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_STOP, 0, NULL);
}
if (err)
@@ -662,8 +696,8 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
if ((key < 0) || (key >= 4))
return -EINVAL;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
- sizeof(tsc_arr), NULL, &tsc_arr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
+ sizeof(tsc_arr), NULL, &tsc_arr);
if (!err)
memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
@@ -842,7 +876,7 @@ int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
memcpy(key, priv->keys[i].key,
priv->keys[i].key_len);
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFDEFAULTKEY0 + i,
HERMES_BYTES_TO_RECLEN(keylen),
key);
@@ -1049,17 +1083,17 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
* group address if either we want to multicast, or if we were
* multicasting and want to stop */
if (!promisc && (mc_count || priv->mc_count)) {
- struct dev_mc_list *p;
+ struct netdev_hw_addr *ha;
struct hermes_multicast mclist;
int i = 0;
- netdev_for_each_mc_addr(p, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i == mc_count)
break;
- memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN);
+ memcpy(mclist.addr[i++], ha->addr, ETH_ALEN);
}
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFGROUPADDRESSES,
HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
&mclist);
@@ -1101,15 +1135,15 @@ int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
HERMES_RID_CNFDESIREDSSID;
- err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
- NULL, &essidbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
+ NULL, &essidbuf);
if (err)
goto fail_unlock;
} else {
*active = 0;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
- sizeof(essidbuf), NULL, &essidbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
+ sizeof(essidbuf), NULL, &essidbuf);
if (err)
goto fail_unlock;
}
@@ -1180,8 +1214,8 @@ int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
- sizeof(list), NULL, &list);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
+ sizeof(list), NULL, &list);
orinoco_unlock(priv, &flags);
if (err)
@@ -1248,7 +1282,7 @@ int orinoco_hw_trigger_scan(struct orinoco_private *priv,
idbuf.len = cpu_to_le16(len);
memcpy(idbuf.val, ssid->ssid, len);
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFSCANSSID_AGERE,
HERMES_BYTES_TO_RECLEN(len + 2),
&idbuf);
@@ -1312,8 +1346,8 @@ int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
hermes_t *hw = &priv->hw;
int err;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, addr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, addr);
return err;
}
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 9799a1d14a63..97af71e79950 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -22,7 +22,6 @@
/* Forward declarations */
struct orinoco_private;
-struct dev_addr_list;
int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
size_t fw_name_len, u32 *hw_ver);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 413e9ab6cab3..97e954ee17f8 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -254,7 +254,7 @@ void set_port_type(struct orinoco_private *priv)
/* Device methods */
/********************************************************************/
-static int orinoco_open(struct net_device *dev)
+int orinoco_open(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
@@ -272,8 +272,9 @@ static int orinoco_open(struct net_device *dev)
return err;
}
+EXPORT_SYMBOL(orinoco_open);
-static int orinoco_stop(struct net_device *dev)
+int orinoco_stop(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
@@ -281,25 +282,27 @@ static int orinoco_stop(struct net_device *dev)
/* We mustn't use orinoco_lock() here, because we need to be
able to close the interface even if hw_unavailable is set
(e.g. as we're released after a PC Card removal) */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->open = 0;
err = __orinoco_down(priv);
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
return err;
}
+EXPORT_SYMBOL(orinoco_stop);
-static struct net_device_stats *orinoco_get_stats(struct net_device *dev)
+struct net_device_stats *orinoco_get_stats(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
return &priv->stats;
}
+EXPORT_SYMBOL(orinoco_get_stats);
-static void orinoco_set_multicast_list(struct net_device *dev)
+void orinoco_set_multicast_list(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
@@ -313,8 +316,9 @@ static void orinoco_set_multicast_list(struct net_device *dev)
__orinoco_set_multicast_list(dev);
orinoco_unlock(priv, &flags);
}
+EXPORT_SYMBOL(orinoco_set_multicast_list);
-static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
+int orinoco_change_mtu(struct net_device *dev, int new_mtu)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -330,23 +334,115 @@ static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+EXPORT_SYMBOL(orinoco_change_mtu);
/********************************************************************/
/* Tx path */
/********************************************************************/
+/* Add encapsulation and MIC to the existing SKB.
+ * The main xmit routine will then send the whole lot to the card.
+ * Need 8 bytes headroom
+ * Need 8 bytes tailroom
+ *
+ * With encapsulated ethernet II frame
+ * --------
+ * 803.3 header (14 bytes)
+ * dst[6]
+ * -------- src[6]
+ * 803.3 header (14 bytes) len[2]
+ * dst[6] 803.2 header (8 bytes)
+ * src[6] encaps[6]
+ * len[2] <- leave alone -> len[2]
+ * -------- -------- <-- 0
+ * Payload Payload
+ * ... ...
+ *
+ * -------- --------
+ * MIC (8 bytes)
+ * --------
+ *
+ * returns 0 on success, -ENOMEM on error.
+ */
+int orinoco_process_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ struct orinoco_private *priv,
+ int *tx_control,
+ u8 *mic_buf)
+{
+ struct orinoco_tkip_key *key;
+ struct ethhdr *eh;
+ int do_mic;
+
+ key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
+
+ do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
+ (key != NULL));
+
+ if (do_mic)
+ *tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
+ HERMES_TXCTRL_MIC;
+
+ eh = (struct ethhdr *)skb->data;
+
+ /* Encapsulate Ethernet-II frames */
+ if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
+ struct header_struct {
+ struct ethhdr eth; /* 802.3 header */
+ u8 encap[6]; /* 802.2 header */
+ } __attribute__ ((packed)) hdr;
+ int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
+
+ if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Not enough headroom for 802.2 headers %d\n",
+ dev->name, skb_headroom(skb));
+ return -ENOMEM;
+ }
+
+ /* Fill in new header */
+ memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
+ hdr.eth.h_proto = htons(len);
+ memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
+
+ /* Make room for the new header, and copy it in */
+ eh = (struct ethhdr *) skb_push(skb, ENCAPS_OVERHEAD);
+ memcpy(eh, &hdr, sizeof(hdr));
+ }
+
+ /* Calculate Michael MIC */
+ if (do_mic) {
+ size_t len = skb->len - ETH_HLEN;
+ u8 *mic = &mic_buf[0];
+
+ /* Have to write to an even address, so copy the spare
+ * byte across */
+ if (skb->len % 2) {
+ *mic = skb->data[skb->len - 1];
+ mic++;
+ }
+
+ orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
+ eh->h_dest, eh->h_source, 0 /* priority */,
+ skb->data + ETH_HLEN,
+ len, mic);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(orinoco_process_xmit_skb);
+
static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
- struct orinoco_tkip_key *key;
hermes_t *hw = &priv->hw;
int err = 0;
u16 txfid = priv->txfid;
- struct ethhdr *eh;
int tx_control;
unsigned long flags;
- int do_mic;
+ u8 mic_buf[MICHAEL_MIC_LEN+1];
if (!netif_running(dev)) {
printk(KERN_ERR "%s: Tx on stopped device!\n",
@@ -378,16 +474,12 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len < ETH_HLEN)
goto drop;
- key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
-
- do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
- (key != NULL));
-
tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
- if (do_mic)
- tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
- HERMES_TXCTRL_MIC;
+ err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
+ &mic_buf[0]);
+ if (err)
+ goto drop;
if (priv->has_alt_txcntl) {
/* WPA enabled firmwares have tx_cntl at the end of
@@ -400,8 +492,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&desc, 0, sizeof(desc));
*txcntl = cpu_to_le16(tx_control);
- err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
- txfid, 0);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
+ txfid, 0);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx "
@@ -414,8 +506,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&desc, 0, sizeof(desc));
desc.tx_control = cpu_to_le16(tx_control);
- err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
- txfid, 0);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
+ txfid, 0);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx "
@@ -430,68 +522,24 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
}
- eh = (struct ethhdr *)skb->data;
-
- /* Encapsulate Ethernet-II frames */
- if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
- struct header_struct {
- struct ethhdr eth; /* 802.3 header */
- u8 encap[6]; /* 802.2 header */
- } __attribute__ ((packed)) hdr;
-
- /* Strip destination and source from the data */
- skb_pull(skb, 2 * ETH_ALEN);
-
- /* And move them to a separate header */
- memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
- hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
- memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
-
- /* Insert the SNAP header */
- if (skb_headroom(skb) < sizeof(hdr)) {
- printk(KERN_ERR
- "%s: Not enough headroom for 802.2 headers %d\n",
- dev->name, skb_headroom(skb));
- goto drop;
- }
- eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
- memcpy(eh, &hdr, sizeof(hdr));
- }
-
- err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
- txfid, HERMES_802_3_OFFSET);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len,
+ txfid, HERMES_802_3_OFFSET);
if (err) {
printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
dev->name, err);
goto busy;
}
- /* Calculate Michael MIC */
- if (do_mic) {
- u8 mic_buf[MICHAEL_MIC_LEN + 1];
- u8 *mic;
- size_t offset;
- size_t len;
+ if (tx_control & HERMES_TXCTRL_MIC) {
+ size_t offset = HERMES_802_3_OFFSET + skb->len;
+ size_t len = MICHAEL_MIC_LEN;
- if (skb->len % 2) {
- /* MIC start is on an odd boundary */
- mic_buf[0] = skb->data[skb->len - 1];
- mic = &mic_buf[1];
- offset = skb->len - 1;
- len = MICHAEL_MIC_LEN + 1;
- } else {
- mic = &mic_buf[0];
- offset = skb->len;
- len = MICHAEL_MIC_LEN;
+ if (offset % 2) {
+ offset--;
+ len++;
}
-
- orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
- eh->h_dest, eh->h_source, 0 /* priority */,
- skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
-
- /* Write the MIC */
- err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
- txfid, HERMES_802_3_OFFSET + offset);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
+ txfid, offset);
if (err) {
printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
dev->name, err);
@@ -502,7 +550,7 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
/* Finally, we actually initiate the send */
netif_stop_queue(dev);
- err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
txfid, NULL);
if (err) {
netif_start_queue(dev);
@@ -512,7 +560,6 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
goto busy;
}
- dev->trans_start = jiffies;
stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
goto ok;
@@ -572,9 +619,9 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
return; /* Nothing's really happened */
/* Read part of the frame header - we need status and addr1 */
- err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
- sizeof(struct hermes_txexc_data),
- fid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &hdr,
+ sizeof(struct hermes_txexc_data),
+ fid, 0);
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
stats->tx_errors++;
@@ -615,7 +662,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
netif_wake_queue(dev);
}
-static void orinoco_tx_timeout(struct net_device *dev)
+void orinoco_tx_timeout(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -630,6 +677,7 @@ static void orinoco_tx_timeout(struct net_device *dev)
schedule_work(&priv->reset_work);
}
+EXPORT_SYMBOL(orinoco_tx_timeout);
/********************************************************************/
/* Rx path (data frames) */
@@ -764,9 +812,9 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
- err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
- ALIGN(datalen, 2), rxfid,
- HERMES_802_2_OFFSET);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
+ ALIGN(datalen, 2), rxfid,
+ HERMES_802_2_OFFSET);
if (err) {
printk(KERN_ERR "%s: error %d reading monitor frame\n",
dev->name, err);
@@ -792,7 +840,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
stats->rx_dropped++;
}
-static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
+void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -814,8 +862,8 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
rxfid = hermes_read_regn(hw, RXFID);
- err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
- rxfid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
+ rxfid, 0);
if (err) {
printk(KERN_ERR "%s: error %d reading Rx descriptor. "
"Frame dropped.\n", dev->name, err);
@@ -882,9 +930,9 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
nothing is removed. 2 is for aligning the IP header. */
skb_reserve(skb, ETH_HLEN + 2);
- err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, length),
- ALIGN(length, 2), rxfid,
- HERMES_802_2_OFFSET);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length),
+ ALIGN(length, 2), rxfid,
+ HERMES_802_2_OFFSET);
if (err) {
printk(KERN_ERR "%s: error %d reading frame. "
"Frame dropped.\n", dev->name, err);
@@ -913,6 +961,7 @@ update_stats:
out:
kfree(desc);
}
+EXPORT_SYMBOL(__orinoco_ev_rx);
static void orinoco_rx(struct net_device *dev,
struct hermes_rx_descriptor *desc,
@@ -1145,9 +1194,9 @@ static void orinoco_join_ap(struct work_struct *work)
goto out;
/* Read scan results from the firmware */
- err = hermes_read_ltv(hw, USER_BAP,
- HERMES_RID_SCANRESULTSTABLE,
- MAX_SCAN_LEN, &len, buf);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_SCANRESULTSTABLE,
+ MAX_SCAN_LEN, &len, buf);
if (err) {
printk(KERN_ERR "%s: Cannot read scan results\n",
dev->name);
@@ -1194,8 +1243,8 @@ static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
union iwreq_data wrqu;
int err;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
if (err != 0)
return;
@@ -1217,8 +1266,8 @@ static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
if (!priv->has_wpa)
return;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
- sizeof(buf), NULL, &buf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
+ sizeof(buf), NULL, &buf);
if (err != 0)
return;
@@ -1247,8 +1296,9 @@ static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
if (!priv->has_wpa)
return;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
- sizeof(buf), NULL, &buf);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_CURRENT_ASSOC_RESP_INFO,
+ sizeof(buf), NULL, &buf);
if (err != 0)
return;
@@ -1353,7 +1403,7 @@ static void orinoco_process_scan_results(struct work_struct *work)
spin_unlock_irqrestore(&priv->scan_lock, flags);
}
-static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
+void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
u16 infofid;
@@ -1371,8 +1421,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
infofid = hermes_read_regn(hw, INFOFID);
/* Read the info frame header - don't try too hard */
- err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
- infofid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &info, sizeof(info),
+ infofid, 0);
if (err) {
printk(KERN_ERR "%s: error %d reading info frame. "
"Frame dropped.\n", dev->name, err);
@@ -1393,8 +1443,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
len = sizeof(tallies);
}
- err = hermes_bap_pread(hw, IRQ_BAP, &tallies, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &tallies, len,
+ infofid, sizeof(info));
if (err)
break;
@@ -1429,8 +1479,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
break;
}
- err = hermes_bap_pread(hw, IRQ_BAP, &linkstatus, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &linkstatus, len,
+ infofid, sizeof(info));
if (err)
break;
newstatus = le16_to_cpu(linkstatus.linkstatus);
@@ -1494,8 +1544,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
}
/* Read scan data */
- err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) buf, len,
+ infofid, sizeof(info));
if (err) {
kfree(buf);
qabort_scan(priv);
@@ -1547,8 +1597,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
break;
/* Read scan data */
- err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) bss, len,
+ infofid, sizeof(info));
if (err)
kfree(bss);
else
@@ -1571,6 +1621,7 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
return;
}
+EXPORT_SYMBOL(__orinoco_ev_info);
static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
{
@@ -1647,7 +1698,7 @@ static int orinoco_reinit_firmware(struct orinoco_private *priv)
struct hermes *hw = &priv->hw;
int err;
- err = hermes_init(hw);
+ err = hw->ops->init(hw);
if (priv->do_fw_download && !err) {
err = orinoco_download(priv);
if (err)
@@ -1735,7 +1786,7 @@ void orinoco_reset(struct work_struct *work)
}
/* This has to be called from user context */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->hw_unavailable--;
@@ -1750,7 +1801,7 @@ void orinoco_reset(struct work_struct *work)
dev->trans_start = jiffies;
}
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
return;
disable:
@@ -1984,7 +2035,7 @@ int orinoco_init(struct orinoco_private *priv)
priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
/* Initialize the firmware */
- err = hermes_init(hw);
+ err = hw->ops->init(hw);
if (err != 0) {
dev_err(dev, "Failed to initialize firmware (err = %d)\n",
err);
@@ -2067,9 +2118,9 @@ int orinoco_init(struct orinoco_private *priv)
/* Make the hardware available, as long as it hasn't been
* removed elsewhere (e.g. by PCMCIA hot unplug) */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->hw_unavailable--;
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
dev_dbg(dev, "Ready\n");
@@ -2192,7 +2243,8 @@ EXPORT_SYMBOL(alloc_orinocodev);
*/
int orinoco_if_add(struct orinoco_private *priv,
unsigned long base_addr,
- unsigned int irq)
+ unsigned int irq,
+ const struct net_device_ops *ops)
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct wireless_dev *wdev;
@@ -2211,16 +2263,21 @@ int orinoco_if_add(struct orinoco_private *priv,
/* Setup / override net_device fields */
dev->ieee80211_ptr = wdev;
- dev->netdev_ops = &orinoco_netdev_ops;
dev->watchdog_timeo = HZ; /* 1 second timeout */
dev->wireless_handlers = &orinoco_handler_def;
#ifdef WIRELESS_SPY
dev->wireless_data = &priv->wireless_data;
#endif
+ /* Default to standard ops if not set */
+ if (ops)
+ dev->netdev_ops = ops;
+ else
+ dev->netdev_ops = &orinoco_netdev_ops;
+
/* we use the default eth_mac_addr for setting the MAC addr */
/* Reserve space in skb for the SNAP header */
- dev->hard_header_len += ENCAPS_OVERHEAD;
+ dev->needed_headroom = ENCAPS_OVERHEAD;
netif_carrier_off(dev);
@@ -2305,7 +2362,7 @@ int orinoco_up(struct orinoco_private *priv)
unsigned long flags;
int err;
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = orinoco_reinit_firmware(priv);
if (err) {
@@ -2325,7 +2382,7 @@ int orinoco_up(struct orinoco_private *priv)
}
exit:
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
return 0;
}
@@ -2337,7 +2394,7 @@ void orinoco_down(struct orinoco_private *priv)
unsigned long flags;
int err;
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = __orinoco_down(priv);
if (err)
printk(KERN_WARNING "%s: Error %d downing interface\n",
@@ -2345,7 +2402,7 @@ void orinoco_down(struct orinoco_private *priv)
netif_device_detach(dev);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
}
EXPORT_SYMBOL(orinoco_down);
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
index 21ab36cd76c7..4dadf9880a97 100644
--- a/drivers/net/wireless/orinoco/main.h
+++ b/drivers/net/wireless/orinoco/main.h
@@ -33,18 +33,6 @@ int orinoco_commit(struct orinoco_private *priv);
void orinoco_reset(struct work_struct *work);
/* Information element helpers - find a home for these... */
-static inline u8 *orinoco_get_ie(u8 *data, size_t len,
- enum ieee80211_eid eid)
-{
- u8 *p = data;
- while ((p + 2) < (data + len)) {
- if (p[0] == eid)
- return p;
- p += p[1] + 2;
- }
- return NULL;
-}
-
#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
#define WPA_SELECTOR_LEN 4
static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 665ef56f8382..a6da86e0a70f 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -131,6 +131,8 @@ struct orinoco_private {
u16 ap_density, rts_thresh;
u16 pm_on, pm_mcast, pm_period, pm_timeout;
u16 preamble;
+ u16 short_retry_limit, long_retry_limit;
+ u16 retry_lifetime;
#ifdef WIRELESS_SPY
struct iw_spy_data spy_data; /* iwspy support */
struct iw_public_data wireless_data;
@@ -188,12 +190,30 @@ extern void free_orinocodev(struct orinoco_private *priv);
extern int orinoco_init(struct orinoco_private *priv);
extern int orinoco_if_add(struct orinoco_private *priv,
unsigned long base_addr,
- unsigned int irq);
+ unsigned int irq,
+ const struct net_device_ops *ops);
extern void orinoco_if_del(struct orinoco_private *priv);
extern int orinoco_up(struct orinoco_private *priv);
extern void orinoco_down(struct orinoco_private *priv);
extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+extern void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
+extern void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
+
+int orinoco_process_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ struct orinoco_private *priv,
+ int *tx_control,
+ u8 *mic);
+
+/* Common ndo functions exported for reuse by orinoco_usb */
+int orinoco_open(struct net_device *dev);
+int orinoco_stop(struct net_device *dev);
+struct net_device_stats *orinoco_get_stats(struct net_device *dev);
+void orinoco_set_multicast_list(struct net_device *dev);
+int orinoco_change_mtu(struct net_device *dev, int new_mtu);
+void orinoco_tx_timeout(struct net_device *dev);
+
/********************************************************************/
/* Locking and synchronization functions */
/********************************************************************/
@@ -201,11 +221,11 @@ extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
static inline int orinoco_lock(struct orinoco_private *priv,
unsigned long *flags)
{
- spin_lock_irqsave(&priv->lock, *flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, flags);
if (priv->hw_unavailable) {
DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n",
priv->ndev);
- spin_unlock_irqrestore(&priv->lock, *flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
return -EBUSY;
}
return 0;
@@ -214,7 +234,17 @@ static inline int orinoco_lock(struct orinoco_private *priv,
static inline void orinoco_unlock(struct orinoco_private *priv,
unsigned long *flags)
{
- spin_unlock_irqrestore(&priv->lock, *flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
+}
+
+static inline void orinoco_lock_irq(struct orinoco_private *priv)
+{
+ priv->hw.ops->lock_irq(&priv->lock);
+}
+
+static inline void orinoco_unlock_irq(struct orinoco_private *priv)
+{
+ priv->hw.ops->unlock_irq(&priv->lock);
}
/*** Navigate from net_device to orinoco_private ***/
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 1d4ada188eda..b16d5db52a4d 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -50,7 +50,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
* struct orinoco_private */
struct orinoco_pccard {
struct pcmcia_device *p_dev;
- dev_node_t node;
/* Used to handle hard reset */
/* yuck, we need this hack to work around the insanity of the
@@ -119,10 +118,6 @@ orinoco_cs_probe(struct pcmcia_device *link)
card->p_dev = link;
link->priv = priv;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = orinoco_interrupt;
-
/* General socket configuration defaults can go here. In this
* client, we assume very little, and rely on the CIS for
* almost everything. In most clients, many details (i.e.,
@@ -144,8 +139,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- if (link->dev_node)
- orinoco_if_del(priv);
+ orinoco_if_del(priv);
orinoco_cs_release(link);
@@ -230,7 +224,6 @@ static int
orinoco_cs_config(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- struct orinoco_pccard *card = priv->card;
hermes_t *hw = &priv->hw;
int ret;
void __iomem *mem;
@@ -258,12 +251,7 @@ orinoco_cs_config(struct pcmcia_device *link)
goto failed;
}
- /*
- * Allocate an interrupt line. Note that this does not assign
- * a handler to the interrupt, unless the 'Handler' member of
- * the irq structure is initialized.
- */
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
if (ret)
goto failed;
@@ -285,9 +273,6 @@ orinoco_cs_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /* Ok, we have the configuration, prepare to register the netdev */
- card->node.major = card->node.minor = 0;
-
/* Initialise the main driver */
if (orinoco_init(priv) != 0) {
printk(KERN_ERR PFX "orinoco_init() failed\n");
@@ -296,17 +281,11 @@ orinoco_cs_config(struct pcmcia_device *link)
/* Register an interface with the stack */
if (orinoco_if_add(priv, link->io.BasePort1,
- link->irq.AssignedIRQ) != 0) {
+ link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
- /* At this point, the dev_node_t structure(s) needs to be
- * initialized and arranged in a linked list at link->dev_node. */
- strcpy(card->node.dev_name, priv->ndev->name);
- link->dev_node = &card->node; /* link->dev_node being non-NULL is also
- * used to indicate that the
- * net_device has been registered */
return 0;
failed:
@@ -327,9 +306,9 @@ orinoco_cs_release(struct pcmcia_device *link)
/* We're committed to taking the device away now, so mark the
* hardware as unavailable */
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
pcmcia_disable_device(link);
if (priv->hw.iobase)
@@ -374,87 +353,90 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
"Pavel Roskin <proski@gnu.org>, et al)";
static struct pcmcia_device_id orinoco_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
- PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
- PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
- PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
- PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
+ PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
+ PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
+ PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
+ PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
+ PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
+ PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
+ PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
+ PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
+ PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
+ PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
+ PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
+ PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
+ PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
+ PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
+ PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
+ PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
+ PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
+ PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
+#ifdef CONFIG_HERMES_PRISM
+ /* Only entries that certainly identify Prism chipset */
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
+ PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
+ PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
+ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
- PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
- PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
+ PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
- PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
- PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
- PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
- PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
- PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
- PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
- PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
+ PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
+ PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
- PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
- PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
- PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
- PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
- PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
+ PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
- PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
- PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
- PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
- PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
- PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
- PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
- PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
- PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
- PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
- PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
- PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
- PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
+#endif
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 075f446b3139..bc3ea0b67a4f 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -220,7 +220,7 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index bda5317cc596..468197f86673 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -170,7 +170,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index e0d5874ab42f..9358f4d2307b 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -259,7 +259,7 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 88cbc7902aa0..784605f0af15 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -156,7 +156,7 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
new file mode 100644
index 000000000000..78f089baa8c9
--- /dev/null
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -0,0 +1,1795 @@
+/*
+ * USB Orinoco driver
+ *
+ * Copyright (c) 2003 Manuel Estrada Sainz
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ *
+ * Queueing code based on linux-wlan-ng 0.2.1-pre5
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ *
+ * The license is the same as above.
+ *
+ * Initialy based on USB Skeleton driver - 0.7
+ *
+ * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * NOTE: The original USB Skeleton driver is GPL, but all that code is
+ * gone so MPL/GPL applies.
+ */
+
+#define DRIVER_NAME "orinoco_usb"
+#define PFX DRIVER_NAME ": "
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/usb.h>
+#include <linux/timer.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+
+#include "mic.h"
+#include "orinoco.h"
+
+#ifndef URB_ASYNC_UNLINK
+#define URB_ASYNC_UNLINK 0
+#endif
+
+/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
+static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
+
+struct header_struct {
+ /* 802.3 */
+ u8 dest[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+ /* 802.2 */
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl;
+ /* SNAP */
+ u8 oui[3];
+ __be16 ethertype;
+} __attribute__ ((packed));
+
+struct ez_usb_fw {
+ u16 size;
+ const u8 *code;
+};
+
+static struct ez_usb_fw firmware = {
+ .size = 0,
+ .code = NULL,
+};
+
+#ifdef CONFIG_USB_DEBUG
+static int debug = 1;
+#else
+static int debug;
+#endif
+
+/* Debugging macros */
+#undef dbg
+#define dbg(format, arg...) \
+ do { if (debug) printk(KERN_DEBUG PFX "%s: " format "\n", \
+ __func__ , ## arg); } while (0)
+#undef err
+#define err(format, arg...) \
+ do { printk(KERN_ERR PFX format "\n", ## arg); } while (0)
+
+/* Module paramaters */
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug enabled or not");
+
+MODULE_FIRMWARE("orinoco_ezusb_fw");
+
+/*
+ * Under some conditions, the card gets stuck and stops paying attention
+ * to the world (i.e. data communication stalls) until we do something to
+ * it. Sending an INQ_TALLIES command seems to be enough and should be
+ * harmless otherwise. This behaviour has been observed when using the
+ * driver on a systemimager client during installation. In the past a
+ * timer was used to send INQ_TALLIES commands when there was no other
+ * activity, but it was troublesome and was removed.
+ */
+
+#define USB_COMPAQ_VENDOR_ID 0x049f /* Compaq Computer Corp. */
+#define USB_COMPAQ_WL215_ID 0x001f /* Compaq WL215 USB Adapter */
+#define USB_COMPAQ_W200_ID 0x0076 /* Compaq W200 USB Adapter */
+#define USB_HP_WL215_ID 0x0082 /* Compaq WL215 USB Adapter */
+
+#define USB_MELCO_VENDOR_ID 0x0411
+#define USB_BUFFALO_L11_ID 0x0006 /* BUFFALO WLI-USB-L11 */
+#define USB_BUFFALO_L11G_WR_ID 0x000B /* BUFFALO WLI-USB-L11G-WR */
+#define USB_BUFFALO_L11G_ID 0x000D /* BUFFALO WLI-USB-L11G */
+
+#define USB_LUCENT_VENDOR_ID 0x047E /* Lucent Technologies */
+#define USB_LUCENT_ORINOCO_ID 0x0300 /* Lucent/Agere Orinoco USB Client */
+
+#define USB_AVAYA8_VENDOR_ID 0x0D98
+#define USB_AVAYAE_VENDOR_ID 0x0D9E
+#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya Wireless USB Card */
+
+#define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */
+#define USB_AGERE_MODEL0801_ID 0x1000 /* Wireless USB Card Model 0801 */
+#define USB_AGERE_MODEL0802_ID 0x1001 /* Wireless USB Card Model 0802 */
+#define USB_AGERE_REBRANDED_ID 0x047A /* WLAN USB Card */
+
+#define USB_ELSA_VENDOR_ID 0x05CC
+#define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */
+
+#define USB_LEGEND_VENDOR_ID 0x0E7C
+#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet WLAN USB Card */
+
+#define USB_SAMSUNG_VENDOR_ID 0x04E8
+#define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */
+#define USB_SAMSUNG_SEW2001U2_ID 0x5B11 /* Samsung SEW-2001u Card */
+#define USB_SAMSUNG_SEW2003U_ID 0x7011 /* Samsung SEW-2003U Card */
+
+#define USB_IGATE_VENDOR_ID 0x0681
+#define USB_IGATE_IGATE_11M_ID 0x0012 /* I-GATE 11M USB Card */
+
+#define USB_FUJITSU_VENDOR_ID 0x0BF8
+#define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */
+
+#define USB_2WIRE_VENDOR_ID 0x1630
+#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire Wireless USB adapter */
+
+
+#define EZUSB_REQUEST_FW_TRANS 0xA0
+#define EZUSB_REQUEST_TRIGER 0xAA
+#define EZUSB_REQUEST_TRIG_AC 0xAC
+#define EZUSB_CPUCS_REG 0x7F92
+
+#define EZUSB_RID_TX 0x0700
+#define EZUSB_RID_RX 0x0701
+#define EZUSB_RID_INIT1 0x0702
+#define EZUSB_RID_ACK 0x0710
+#define EZUSB_RID_READ_PDA 0x0800
+#define EZUSB_RID_PROG_INIT 0x0852
+#define EZUSB_RID_PROG_SET_ADDR 0x0853
+#define EZUSB_RID_PROG_BYTES 0x0854
+#define EZUSB_RID_PROG_END 0x0855
+#define EZUSB_RID_DOCMD 0x0860
+
+/* Recognize info frames */
+#define EZUSB_IS_INFO(id) ((id >= 0xF000) && (id <= 0xF2FF))
+
+#define EZUSB_MAGIC 0x0210
+
+#define EZUSB_FRAME_DATA 1
+#define EZUSB_FRAME_CONTROL 2
+
+#define DEF_TIMEOUT (3*HZ)
+
+#define BULK_BUF_SIZE 2048
+
+#define MAX_DL_SIZE (BULK_BUF_SIZE - sizeof(struct ezusb_packet))
+
+#define FW_BUF_SIZE 64
+#define FW_VAR_OFFSET_PTR 0x359
+#define FW_VAR_VALUE 0
+#define FW_HOLE_START 0x100
+#define FW_HOLE_END 0x300
+
+struct ezusb_packet {
+ __le16 magic; /* 0x0210 */
+ u8 req_reply_count;
+ u8 ans_reply_count;
+ __le16 frame_type; /* 0x01 for data frames, 0x02 otherwise */
+ __le16 size; /* transport size */
+ __le16 crc; /* CRC up to here */
+ __le16 hermes_len;
+ __le16 hermes_rid;
+ u8 data[0];
+} __attribute__ ((packed));
+
+/* Table of devices that work or may work with this driver */
+static struct usb_device_id ezusb_table[] = {
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)},
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)},
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_WR_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_ID)},
+ {USB_DEVICE(USB_LUCENT_VENDOR_ID, USB_LUCENT_ORINOCO_ID)},
+ {USB_DEVICE(USB_AVAYA8_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
+ {USB_DEVICE(USB_AVAYAE_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0801_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0802_ID)},
+ {USB_DEVICE(USB_ELSA_VENDOR_ID, USB_ELSA_AIRLANCER_ID)},
+ {USB_DEVICE(USB_LEGEND_VENDOR_ID, USB_LEGEND_JOYNET_ID)},
+ {USB_DEVICE_VER(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U1_ID,
+ 0, 0)},
+ {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U2_ID)},
+ {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2003U_ID)},
+ {USB_DEVICE(USB_IGATE_VENDOR_ID, USB_IGATE_IGATE_11M_ID)},
+ {USB_DEVICE(USB_FUJITSU_VENDOR_ID, USB_FUJITSU_E1100_ID)},
+ {USB_DEVICE(USB_2WIRE_VENDOR_ID, USB_2WIRE_WIRELESS_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_REBRANDED_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ezusb_table);
+
+/* Structure to hold all of our device specific stuff */
+struct ezusb_priv {
+ struct usb_device *udev;
+ struct net_device *dev;
+ struct mutex mtx;
+ spinlock_t req_lock;
+ struct list_head req_pending;
+ struct list_head req_active;
+ spinlock_t reply_count_lock;
+ u16 hermes_reg_fake[0x40];
+ u8 *bap_buf;
+ struct urb *read_urb;
+ int read_pipe;
+ int write_pipe;
+ u8 reply_count;
+};
+
+enum ezusb_state {
+ EZUSB_CTX_START,
+ EZUSB_CTX_QUEUED,
+ EZUSB_CTX_REQ_SUBMITTED,
+ EZUSB_CTX_REQ_COMPLETE,
+ EZUSB_CTX_RESP_RECEIVED,
+ EZUSB_CTX_REQ_TIMEOUT,
+ EZUSB_CTX_REQ_FAILED,
+ EZUSB_CTX_RESP_TIMEOUT,
+ EZUSB_CTX_REQSUBMIT_FAIL,
+ EZUSB_CTX_COMPLETE,
+};
+
+struct request_context {
+ struct list_head list;
+ atomic_t refcount;
+ struct completion done; /* Signals that CTX is dead */
+ int killed;
+ struct urb *outurb; /* OUT for req pkt */
+ struct ezusb_priv *upriv;
+ struct ezusb_packet *buf;
+ int buf_length;
+ struct timer_list timer; /* Timeout handling */
+ enum ezusb_state state; /* Current state */
+ /* the RID that we will wait for */
+ u16 out_rid;
+ u16 in_rid;
+};
+
+
+/* Forward declarations */
+static void ezusb_ctx_complete(struct request_context *ctx);
+static void ezusb_req_queue_run(struct ezusb_priv *upriv);
+static void ezusb_bulk_in_callback(struct urb *urb);
+
+static inline u8 ezusb_reply_inc(u8 count)
+{
+ if (count < 0x7F)
+ return count + 1;
+ else
+ return 1;
+}
+
+static void ezusb_request_context_put(struct request_context *ctx)
+{
+ if (!atomic_dec_and_test(&ctx->refcount))
+ return;
+
+ WARN_ON(!ctx->done.done);
+ BUG_ON(ctx->outurb->status == -EINPROGRESS);
+ BUG_ON(timer_pending(&ctx->timer));
+ usb_free_urb(ctx->outurb);
+ kfree(ctx->buf);
+ kfree(ctx);
+}
+
+static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
+ struct timer_list *timer,
+ unsigned long expire)
+{
+ if (!upriv->udev)
+ return;
+ mod_timer(timer, expire);
+}
+
+static void ezusb_request_timerfn(u_long _ctx)
+{
+ struct request_context *ctx = (void *) _ctx;
+
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
+ ctx->state = EZUSB_CTX_REQ_TIMEOUT;
+ } else {
+ ctx->state = EZUSB_CTX_RESP_TIMEOUT;
+ dbg("couldn't unlink");
+ atomic_inc(&ctx->refcount);
+ ctx->killed = 1;
+ ezusb_ctx_complete(ctx);
+ ezusb_request_context_put(ctx);
+ }
+};
+
+static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
+ u16 out_rid, u16 in_rid)
+{
+ struct request_context *ctx;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return NULL;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
+ if (!ctx->buf) {
+ kfree(ctx);
+ return NULL;
+ }
+ ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!ctx->outurb) {
+ kfree(ctx->buf);
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->upriv = upriv;
+ ctx->state = EZUSB_CTX_START;
+ ctx->out_rid = out_rid;
+ ctx->in_rid = in_rid;
+
+ atomic_set(&ctx->refcount, 1);
+ init_completion(&ctx->done);
+
+ init_timer(&ctx->timer);
+ ctx->timer.function = ezusb_request_timerfn;
+ ctx->timer.data = (u_long) ctx;
+ return ctx;
+}
+
+
+/* Hopefully the real complete_all will soon be exported, in the mean
+ * while this should work. */
+static inline void ezusb_complete_all(struct completion *comp)
+{
+ complete(comp);
+ complete(comp);
+ complete(comp);
+ complete(comp);
+}
+
+static void ezusb_ctx_complete(struct request_context *ctx)
+{
+ struct ezusb_priv *upriv = ctx->upriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ list_del_init(&ctx->list);
+ if (upriv->udev) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ ezusb_req_queue_run(upriv);
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ }
+
+ switch (ctx->state) {
+ case EZUSB_CTX_COMPLETE:
+ case EZUSB_CTX_REQSUBMIT_FAIL:
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_REQ_TIMEOUT:
+ case EZUSB_CTX_RESP_TIMEOUT:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) {
+ struct net_device *dev = upriv->dev;
+ struct orinoco_private *priv = ndev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+
+ if (ctx->state != EZUSB_CTX_COMPLETE)
+ stats->tx_errors++;
+ else
+ stats->tx_packets++;
+
+ netif_wake_queue(dev);
+ }
+ ezusb_complete_all(&ctx->done);
+ ezusb_request_context_put(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ if (!upriv->udev) {
+ /* This is normal, as all request contexts get flushed
+ * when the device is disconnected */
+ err("Called, CTX not terminating, but device gone");
+ ezusb_complete_all(&ctx->done);
+ ezusb_request_context_put(ctx);
+ break;
+ }
+
+ err("Called, CTX not in terminating state.");
+ /* Things are really bad if this happens. Just leak
+ * the CTX because it may still be linked to the
+ * queue or the OUT urb may still be active.
+ * Just leaking at least prevents an Oops or Panic.
+ */
+ break;
+ }
+}
+
+/**
+ * ezusb_req_queue_run:
+ * Description:
+ * Note: Only one active CTX at any one time, because there's no
+ * other (reliable) way to match the response URB to the correct
+ * CTX.
+ **/
+static void ezusb_req_queue_run(struct ezusb_priv *upriv)
+{
+ unsigned long flags;
+ struct request_context *ctx;
+ int result;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ if (!list_empty(&upriv->req_active))
+ goto unlock;
+
+ if (list_empty(&upriv->req_pending))
+ goto unlock;
+
+ ctx =
+ list_entry(upriv->req_pending.next, struct request_context,
+ list);
+
+ if (!ctx->upriv->udev)
+ goto unlock;
+
+ /* We need to split this off to avoid a race condition */
+ list_move_tail(&ctx->list, &upriv->req_active);
+
+ if (ctx->state == EZUSB_CTX_QUEUED) {
+ atomic_inc(&ctx->refcount);
+ result = usb_submit_urb(ctx->outurb, GFP_ATOMIC);
+ if (result) {
+ ctx->state = EZUSB_CTX_REQSUBMIT_FAIL;
+
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ err("Fatal, failed to submit command urb."
+ " error=%d\n", result);
+
+ ezusb_ctx_complete(ctx);
+ ezusb_request_context_put(ctx);
+ goto done;
+ }
+
+ ctx->state = EZUSB_CTX_REQ_SUBMITTED;
+ ezusb_mod_timer(ctx->upriv, &ctx->timer,
+ jiffies + DEF_TIMEOUT);
+ }
+
+ unlock:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ done:
+ return;
+}
+
+static void ezusb_req_enqueue_run(struct ezusb_priv *upriv,
+ struct request_context *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ if (!ctx->upriv->udev) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ goto done;
+ }
+ atomic_inc(&ctx->refcount);
+ list_add_tail(&ctx->list, &upriv->req_pending);
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ ctx->state = EZUSB_CTX_QUEUED;
+ ezusb_req_queue_run(upriv);
+
+ done:
+ return;
+}
+
+static void ezusb_request_out_callback(struct urb *urb)
+{
+ unsigned long flags;
+ enum ezusb_state state;
+ struct request_context *ctx = urb->context;
+ struct ezusb_priv *upriv = ctx->upriv;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ del_timer(&ctx->timer);
+
+ if (ctx->killed) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ pr_warning("interrupt called with dead ctx");
+ goto out;
+ }
+
+ state = ctx->state;
+
+ if (urb->status == 0) {
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ if (ctx->in_rid) {
+ ctx->state = EZUSB_CTX_REQ_COMPLETE;
+ /* reply URB still pending */
+ ezusb_mod_timer(upriv, &ctx->timer,
+ jiffies + DEF_TIMEOUT);
+ spin_unlock_irqrestore(&upriv->req_lock,
+ flags);
+ break;
+ }
+ /* fall through */
+ case EZUSB_CTX_RESP_RECEIVED:
+ /* IN already received before this OUT-ACK */
+ ctx->state = EZUSB_CTX_COMPLETE;
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ err("Unexpected state(0x%x, %d) in OUT URB",
+ state, urb->status);
+ break;
+ }
+ } else {
+ /* If someone cancels the OUT URB then its status
+ * should be either -ECONNRESET or -ENOENT.
+ */
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ case EZUSB_CTX_RESP_RECEIVED:
+ ctx->state = EZUSB_CTX_REQ_FAILED;
+ /* fall through */
+
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_REQ_TIMEOUT:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ err("Unexpected state(0x%x, %d) in OUT URB",
+ state, urb->status);
+ break;
+ }
+ }
+ out:
+ ezusb_request_context_put(ctx);
+}
+
+static void ezusb_request_in_callback(struct ezusb_priv *upriv,
+ struct urb *urb)
+{
+ struct ezusb_packet *ans = urb->transfer_buffer;
+ struct request_context *ctx = NULL;
+ enum ezusb_state state;
+ unsigned long flags;
+
+ /* Find the CTX on the active queue that requested this URB */
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ if (upriv->udev) {
+ struct list_head *item;
+
+ list_for_each(item, &upriv->req_active) {
+ struct request_context *c;
+ int reply_count;
+
+ c = list_entry(item, struct request_context, list);
+ reply_count =
+ ezusb_reply_inc(c->buf->req_reply_count);
+ if ((ans->ans_reply_count == reply_count)
+ && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) {
+ ctx = c;
+ break;
+ }
+ dbg("Skipped (0x%x/0x%x) (%d/%d)",
+ le16_to_cpu(ans->hermes_rid),
+ c->in_rid, ans->ans_reply_count, reply_count);
+ }
+ }
+
+ if (ctx == NULL) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ err("%s: got unexpected RID: 0x%04X", __func__,
+ le16_to_cpu(ans->hermes_rid));
+ ezusb_req_queue_run(upriv);
+ return;
+ }
+
+ /* The data we want is in the in buffer, exchange */
+ urb->transfer_buffer = ctx->buf;
+ ctx->buf = (void *) ans;
+ ctx->buf_length = urb->actual_length;
+
+ state = ctx->state;
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ /* We have received our response URB before
+ * our request has been acknowledged. Do NOT
+ * destroy our CTX yet, because our OUT URB
+ * is still alive ...
+ */
+ ctx->state = EZUSB_CTX_RESP_RECEIVED;
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ /* Let the machine continue running. */
+ break;
+
+ case EZUSB_CTX_REQ_COMPLETE:
+ /* This is the usual path: our request
+ * has already been acknowledged, and
+ * we have now received the reply.
+ */
+ ctx->state = EZUSB_CTX_COMPLETE;
+
+ /* Stop the intimer */
+ del_timer(&ctx->timer);
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ /* Call the completion handler */
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ pr_warning("Matched IN URB, unexpected context state(0x%x)",
+ state);
+ /* Throw this CTX away and try submitting another */
+ del_timer(&ctx->timer);
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ usb_unlink_urb(ctx->outurb);
+ ezusb_req_queue_run(upriv);
+ break;
+ } /* switch */
+}
+
+
+static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
+ struct request_context *ctx)
+{
+ switch (ctx->state) {
+ case EZUSB_CTX_QUEUED:
+ case EZUSB_CTX_REQ_SUBMITTED:
+ case EZUSB_CTX_REQ_COMPLETE:
+ case EZUSB_CTX_RESP_RECEIVED:
+ if (in_softirq()) {
+ /* If we get called from a timer, timeout timers don't
+ * get the chance to run themselves. So we make sure
+ * that we don't sleep for ever */
+ int msecs = DEF_TIMEOUT * (1000 / HZ);
+ while (!ctx->done.done && msecs--)
+ udelay(1000);
+ } else {
+ wait_event_interruptible(ctx->done.wait,
+ ctx->done.done);
+ }
+ break;
+ default:
+ /* Done or failed - nothing to wait for */
+ break;
+ }
+}
+
+static inline u16 build_crc(struct ezusb_packet *data)
+{
+ u16 crc = 0;
+ u8 *bytes = (u8 *)data;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) + bytes[i];
+
+ return crc;
+}
+
+/**
+ * ezusb_fill_req:
+ *
+ * if data == NULL and length > 0 the data is assumed to be already in
+ * the target buffer and only the header is filled.
+ *
+ */
+static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid,
+ const void *data, u16 frame_type, u8 reply_count)
+{
+ int total_size = sizeof(*req) + length;
+
+ BUG_ON(total_size > BULK_BUF_SIZE);
+
+ req->magic = cpu_to_le16(EZUSB_MAGIC);
+ req->req_reply_count = reply_count;
+ req->ans_reply_count = 0;
+ req->frame_type = cpu_to_le16(frame_type);
+ req->size = cpu_to_le16(length + 4);
+ req->crc = cpu_to_le16(build_crc(req));
+ req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length));
+ req->hermes_rid = cpu_to_le16(rid);
+ if (data)
+ memcpy(req->data, data, length);
+ return total_size;
+}
+
+static int ezusb_submit_in_urb(struct ezusb_priv *upriv)
+{
+ int retval = 0;
+ void *cur_buf = upriv->read_urb->transfer_buffer;
+
+ if (upriv->read_urb->status == -EINPROGRESS) {
+ dbg("urb busy, not resubmiting");
+ retval = -EBUSY;
+ goto exit;
+ }
+ usb_fill_bulk_urb(upriv->read_urb, upriv->udev, upriv->read_pipe,
+ cur_buf, BULK_BUF_SIZE,
+ ezusb_bulk_in_callback, upriv);
+ upriv->read_urb->transfer_flags = 0;
+ retval = usb_submit_urb(upriv->read_urb, GFP_ATOMIC);
+ if (retval)
+ err("%s submit failed %d", __func__, retval);
+
+ exit:
+ return retval;
+}
+
+static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
+{
+ u8 res_val = reset; /* avoid argument promotion */
+
+ if (!upriv->udev) {
+ err("%s: !upriv->udev", __func__);
+ return -EFAULT;
+ }
+ return usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_FW_TRANS,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val,
+ sizeof(res_val), DEF_TIMEOUT);
+}
+
+static int ezusb_firmware_download(struct ezusb_priv *upriv,
+ struct ez_usb_fw *fw)
+{
+ u8 fw_buffer[FW_BUF_SIZE];
+ int retval, addr;
+ int variant_offset;
+
+ /*
+ * This byte is 1 and should be replaced with 0. The offset is
+ * 0x10AD in version 0.0.6. The byte in question should follow
+ * the end of the code pointed to by the jump in the beginning
+ * of the firmware. Also, it is read by code located at 0x358.
+ */
+ variant_offset = be16_to_cpup((__be16 *) &fw->code[FW_VAR_OFFSET_PTR]);
+ if (variant_offset >= fw->size) {
+ printk(KERN_ERR PFX "Invalid firmware variant offset: "
+ "0x%04x\n", variant_offset);
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ retval = ezusb_8051_cpucs(upriv, 1);
+ if (retval < 0)
+ goto fail;
+ for (addr = 0; addr < fw->size; addr += FW_BUF_SIZE) {
+ /* 0x100-0x300 should be left alone, it contains card
+ * specific data, like USB enumeration information */
+ if ((addr >= FW_HOLE_START) && (addr < FW_HOLE_END))
+ continue;
+
+ memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE);
+ if (variant_offset >= addr &&
+ variant_offset < addr + FW_BUF_SIZE) {
+ dbg("Patching card_variant byte at 0x%04X",
+ variant_offset);
+ fw_buffer[variant_offset - addr] = FW_VAR_VALUE;
+ }
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_FW_TRANS,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE
+ | USB_DIR_OUT,
+ addr, 0x0,
+ fw_buffer, FW_BUF_SIZE,
+ DEF_TIMEOUT);
+
+ if (retval < 0)
+ goto fail;
+ }
+ retval = ezusb_8051_cpucs(upriv, 0);
+ if (retval < 0)
+ goto fail;
+
+ goto exit;
+ fail:
+ printk(KERN_ERR PFX "Firmware download failed, error %d\n",
+ retval);
+ exit:
+ return retval;
+}
+
+static int ezusb_access_ltv(struct ezusb_priv *upriv,
+ struct request_context *ctx,
+ u16 length, const void *data, u16 frame_type,
+ void *ans_buff, int ans_size, u16 *ans_length)
+{
+ int req_size;
+ int retval = 0;
+ enum ezusb_state state;
+
+ BUG_ON(in_irq());
+
+ if (!upriv->udev) {
+ dbg("Device disconnected");
+ return -ENODEV;
+ }
+
+ if (upriv->read_urb->status != -EINPROGRESS)
+ err("%s: in urb not pending", __func__);
+
+ /* protect upriv->reply_count, guarantee sequential numbers */
+ spin_lock_bh(&upriv->reply_count_lock);
+ req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data,
+ frame_type, upriv->reply_count);
+ usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe,
+ ctx->buf, req_size,
+ ezusb_request_out_callback, ctx);
+
+ if (ctx->in_rid)
+ upriv->reply_count = ezusb_reply_inc(upriv->reply_count);
+
+ ezusb_req_enqueue_run(upriv, ctx);
+
+ spin_unlock_bh(&upriv->reply_count_lock);
+
+ if (ctx->in_rid)
+ ezusb_req_ctx_wait(upriv, ctx);
+
+ state = ctx->state;
+ switch (state) {
+ case EZUSB_CTX_COMPLETE:
+ retval = ctx->outurb->status;
+ break;
+
+ case EZUSB_CTX_QUEUED:
+ case EZUSB_CTX_REQ_SUBMITTED:
+ if (!ctx->in_rid)
+ break;
+ default:
+ err("%s: Unexpected context state %d", __func__,
+ state);
+ /* fall though */
+ case EZUSB_CTX_REQ_TIMEOUT:
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_RESP_TIMEOUT:
+ case EZUSB_CTX_REQSUBMIT_FAIL:
+ printk(KERN_ERR PFX "Access failed, resetting (state %d,"
+ " reply_count %d)\n", state, upriv->reply_count);
+ upriv->reply_count = 0;
+ if (state == EZUSB_CTX_REQ_TIMEOUT
+ || state == EZUSB_CTX_RESP_TIMEOUT) {
+ printk(KERN_ERR PFX "ctx timed out\n");
+ retval = -ETIMEDOUT;
+ } else {
+ printk(KERN_ERR PFX "ctx failed\n");
+ retval = -EFAULT;
+ }
+ goto exit;
+ break;
+ }
+ if (ctx->in_rid) {
+ struct ezusb_packet *ans = ctx->buf;
+ int exp_len;
+
+ if (ans->hermes_len != 0)
+ exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
+ else
+ exp_len = 14;
+
+ if (exp_len != ctx->buf_length) {
+ err("%s: length mismatch for RID 0x%04x: "
+ "expected %d, got %d", __func__,
+ ctx->in_rid, exp_len, ctx->buf_length);
+ retval = -EIO;
+ goto exit;
+ }
+
+ if (ans_buff)
+ memcpy(ans_buff, ans->data,
+ min_t(int, exp_len, ans_size));
+ if (ans_length)
+ *ans_length = le16_to_cpu(ans->hermes_len);
+ }
+ exit:
+ ezusb_request_context_put(ctx);
+ return retval;
+}
+
+static int ezusb_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *data)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ u16 frame_type;
+ struct request_context *ctx;
+
+ if (length == 0)
+ return -EINVAL;
+
+ length = HERMES_RECLEN_TO_BYTES(length);
+
+ /* On memory mapped devices HERMES_RID_CNFGROUPADDRESSES can be
+ * set to be empty, but the USB bridge doesn't like it */
+ if (length == 0)
+ return 0;
+
+ ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (rid == EZUSB_RID_TX)
+ frame_type = EZUSB_FRAME_DATA;
+ else
+ frame_type = EZUSB_FRAME_CONTROL;
+
+ return ezusb_access_ltv(upriv, ctx, length, data, frame_type,
+ NULL, 0, NULL);
+}
+
+static int ezusb_read_ltv(hermes_t *hw, int bap, u16 rid,
+ unsigned bufsize, u16 *length, void *buf)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ if ((bufsize < 0) || (bufsize % 2))
+ return -EINVAL;
+
+ ctx = ezusb_alloc_ctx(upriv, rid, rid);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL,
+ buf, bufsize, length);
+}
+
+static int ezusb_doicmd_wait(hermes_t *hw, u16 cmd, u16 parm0, u16 parm1,
+ u16 parm2, struct hermes_response *resp)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ __le16 data[4] = {
+ cpu_to_le16(cmd),
+ cpu_to_le16(parm0),
+ cpu_to_le16(parm1),
+ cpu_to_le16(parm2),
+ };
+ dbg("0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X",
+ cmd, parm0, parm1, parm2);
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ __le16 data[4] = {
+ cpu_to_le16(cmd),
+ cpu_to_le16(parm0),
+ 0,
+ 0,
+ };
+ dbg("0x%04X, parm0 0x%04X", cmd, parm0);
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_bap_pread(struct hermes *hw, int bap,
+ void *buf, int len, u16 id, u16 offset)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct ezusb_packet *ans = (void *) upriv->read_urb->transfer_buffer;
+ int actual_length = upriv->read_urb->actual_length;
+
+ if (id == EZUSB_RID_RX) {
+ if ((sizeof(*ans) + offset + len) > actual_length) {
+ printk(KERN_ERR PFX "BAP read beyond buffer end "
+ "in rx frame\n");
+ return -EINVAL;
+ }
+ memcpy(buf, ans->data + offset, len);
+ return 0;
+ }
+
+ if (EZUSB_IS_INFO(id)) {
+ /* Include 4 bytes for length/type */
+ if ((sizeof(*ans) + offset + len - 4) > actual_length) {
+ printk(KERN_ERR PFX "BAP read beyond buffer end "
+ "in info frame\n");
+ return -EFAULT;
+ }
+ memcpy(buf, ans->data + offset - 4, len);
+ } else {
+ printk(KERN_ERR PFX "Unexpected fid 0x%04x\n", id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ezusb_read_pda(struct hermes *hw, __le16 *pda,
+ u32 pda_addr, u16 pda_len)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le16 data[] = {
+ cpu_to_le16(pda_addr & 0xffff),
+ cpu_to_le16(pda_len - 4)
+ };
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* wl_lkm does not include PDA size in the PDA area.
+ * We will pad the information into pda, so other routines
+ * don't have to be modified */
+ pda[0] = cpu_to_le16(pda_len - 2);
+ /* Includes CFG_PROD_DATA but not itself */
+ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4,
+ NULL);
+}
+
+static int ezusb_program_init(struct hermes *hw, u32 entry_point)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le32 data = cpu_to_le32(entry_point);
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program_end(struct hermes *hw)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, 0, NULL,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program_bytes(struct hermes *hw, const char *buf,
+ u32 addr, u32 len)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le32 data = cpu_to_le32(addr);
+ int err;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+ if (err)
+ return err;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, len, buf,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program(struct hermes *hw, const char *buf,
+ u32 addr, u32 len)
+{
+ u32 ch_addr;
+ u32 ch_len;
+ int err = 0;
+
+ /* We can only send 2048 bytes out of the bulk xmit at a time,
+ * so we have to split any programming into chunks of <2048
+ * bytes. */
+
+ ch_len = (len < MAX_DL_SIZE) ? len : MAX_DL_SIZE;
+ ch_addr = addr;
+
+ while (ch_addr < (addr + len)) {
+ pr_debug("Programming subblock of length %d "
+ "to address 0x%08x. Data @ %p\n",
+ ch_len, ch_addr, &buf[ch_addr - addr]);
+
+ err = ezusb_program_bytes(hw, &buf[ch_addr - addr],
+ ch_addr, ch_len);
+ if (err)
+ break;
+
+ ch_addr += ch_len;
+ ch_len = ((addr + len - ch_addr) < MAX_DL_SIZE) ?
+ (addr + len - ch_addr) : MAX_DL_SIZE;
+ }
+
+ return err;
+}
+
+static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct orinoco_private *priv = ndev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ struct ezusb_priv *upriv = priv->card;
+ u8 mic[MICHAEL_MIC_LEN+1];
+ int err = 0;
+ int tx_control;
+ unsigned long flags;
+ struct request_context *ctx;
+ u8 *buf;
+ int tx_size;
+
+ if (!netif_running(dev)) {
+ printk(KERN_ERR "%s: Tx on stopped device!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_ERR
+ "%s: ezusb_xmit() called while hw_unavailable\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!netif_carrier_ok(dev) ||
+ (priv->iw_mode == NL80211_IFTYPE_MONITOR)) {
+ /* Oops, the firmware hasn't established a connection,
+ silently drop the packet (this seems to be the
+ safest approach). */
+ goto drop;
+ }
+
+ /* Check packet length */
+ if (skb->len < ETH_HLEN)
+ goto drop;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
+ if (!ctx)
+ goto busy;
+
+ memset(ctx->buf, 0, BULK_BUF_SIZE);
+ buf = ctx->buf->data;
+
+ tx_control = 0;
+
+ err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
+ &mic[0]);
+ if (err)
+ goto drop;
+
+ {
+ __le16 *tx_cntl = (__le16 *)buf;
+ *tx_cntl = cpu_to_le16(tx_control);
+ buf += sizeof(*tx_cntl);
+ }
+
+ memcpy(buf, skb->data, skb->len);
+ buf += skb->len;
+
+ if (tx_control & HERMES_TXCTRL_MIC) {
+ u8 *m = mic;
+ /* Mic has been offset so it can be copied to an even
+ * address. We're copying eveything anyway, so we
+ * don't need to copy that first byte. */
+ if (skb->len % 2)
+ m++;
+ memcpy(buf, m, MICHAEL_MIC_LEN);
+ buf += MICHAEL_MIC_LEN;
+ }
+
+ /* Finally, we actually initiate the send */
+ netif_stop_queue(dev);
+
+ /* The card may behave better if we send evenly sized usb transfers */
+ tx_size = ALIGN(buf - ctx->buf->data, 2);
+
+ err = ezusb_access_ltv(upriv, ctx, tx_size, NULL,
+ EZUSB_FRAME_DATA, NULL, 0, NULL);
+
+ if (err) {
+ netif_start_queue(dev);
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error %d transmitting packet\n",
+ dev->name, err);
+ goto busy;
+ }
+
+ dev->trans_start = jiffies;
+ stats->tx_bytes += skb->len;
+ goto ok;
+
+ drop:
+ stats->tx_errors++;
+ stats->tx_dropped++;
+
+ ok:
+ orinoco_unlock(priv, &flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+
+ busy:
+ orinoco_unlock(priv, &flags);
+ return NETDEV_TX_BUSY;
+}
+
+static int ezusb_allocate(struct hermes *hw, u16 size, u16 *fid)
+{
+ *fid = EZUSB_RID_TX;
+ return 0;
+}
+
+
+static int ezusb_hard_reset(struct orinoco_private *priv)
+{
+ struct ezusb_priv *upriv = priv->card;
+ int retval = ezusb_8051_cpucs(upriv, 1);
+
+ if (retval < 0) {
+ err("Failed to reset");
+ return retval;
+ }
+
+ retval = ezusb_8051_cpucs(upriv, 0);
+ if (retval < 0) {
+ err("Failed to unreset");
+ return retval;
+ }
+
+ dbg("sending control message");
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_TRIGER,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, 0x0, 0x0, NULL, 0,
+ DEF_TIMEOUT);
+ if (retval < 0) {
+ err("EZUSB_REQUEST_TRIGER failed retval %d", retval);
+ return retval;
+ }
+#if 0
+ dbg("Sending EZUSB_REQUEST_TRIG_AC");
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_TRIG_AC,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, 0x00FA, 0x0, NULL, 0,
+ DEF_TIMEOUT);
+ if (retval < 0) {
+ err("EZUSB_REQUEST_TRIG_AC failed retval %d", retval);
+ return retval;
+ }
+#endif
+
+ return 0;
+}
+
+
+static int ezusb_init(hermes_t *hw)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ int retval;
+
+ BUG_ON(in_interrupt());
+ BUG_ON(!upriv);
+
+ upriv->reply_count = 0;
+ /* Write the MAGIC number on the simulated registers to keep
+ * orinoco.c happy */
+ hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
+ hermes_write_regn(hw, RXFID, EZUSB_RID_RX);
+
+ usb_kill_urb(upriv->read_urb);
+ ezusb_submit_in_urb(upriv);
+
+ retval = ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1,
+ HERMES_BYTES_TO_RECLEN(2), "\x10\x00");
+ if (retval < 0) {
+ printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval);
+ return retval;
+ }
+
+ retval = ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL);
+ if (retval < 0) {
+ printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void ezusb_bulk_in_callback(struct urb *urb)
+{
+ struct ezusb_priv *upriv = (struct ezusb_priv *) urb->context;
+ struct ezusb_packet *ans = urb->transfer_buffer;
+ u16 crc;
+ u16 hermes_rid;
+
+ if (upriv->udev == NULL) {
+ dbg("disconnected");
+ return;
+ }
+
+ if (urb->status == -ETIMEDOUT) {
+ /* When a device gets unplugged we get this every time
+ * we resubmit, flooding the logs. Since we don't use
+ * USB timeouts, it shouldn't happen any other time*/
+ pr_warning("%s: urb timed out, not resubmiting", __func__);
+ return;
+ }
+ if (urb->status == -ECONNABORTED) {
+ pr_warning("%s: connection abort, resubmiting urb",
+ __func__);
+ goto resubmit;
+ }
+ if ((urb->status == -EILSEQ)
+ || (urb->status == -ENOENT)
+ || (urb->status == -ECONNRESET)) {
+ dbg("status %d, not resubmiting", urb->status);
+ return;
+ }
+ if (urb->status)
+ dbg("status: %d length: %d",
+ urb->status, urb->actual_length);
+ if (urb->actual_length < sizeof(*ans)) {
+ err("%s: short read, ignoring", __func__);
+ goto resubmit;
+ }
+ crc = build_crc(ans);
+ if (le16_to_cpu(ans->crc) != crc) {
+ err("CRC error, ignoring packet");
+ goto resubmit;
+ }
+
+ hermes_rid = le16_to_cpu(ans->hermes_rid);
+ if ((hermes_rid != EZUSB_RID_RX) && !EZUSB_IS_INFO(hermes_rid)) {
+ ezusb_request_in_callback(upriv, urb);
+ } else if (upriv->dev) {
+ struct net_device *dev = upriv->dev;
+ struct orinoco_private *priv = ndev_priv(dev);
+ hermes_t *hw = &priv->hw;
+
+ if (hermes_rid == EZUSB_RID_RX) {
+ __orinoco_ev_rx(dev, hw);
+ } else {
+ hermes_write_regn(hw, INFOFID,
+ le16_to_cpu(ans->hermes_rid));
+ __orinoco_ev_info(dev, hw);
+ }
+ }
+
+ resubmit:
+ if (upriv->udev)
+ ezusb_submit_in_urb(upriv);
+}
+
+static inline void ezusb_delete(struct ezusb_priv *upriv)
+{
+ struct net_device *dev;
+ struct list_head *item;
+ struct list_head *tmp_item;
+ unsigned long flags;
+
+ BUG_ON(in_interrupt());
+ BUG_ON(!upriv);
+
+ dev = upriv->dev;
+ mutex_lock(&upriv->mtx);
+
+ upriv->udev = NULL; /* No timer will be rearmed from here */
+
+ usb_kill_urb(upriv->read_urb);
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ list_for_each_safe(item, tmp_item, &upriv->req_active) {
+ struct request_context *ctx;
+ int err;
+
+ ctx = list_entry(item, struct request_context, list);
+ atomic_inc(&ctx->refcount);
+
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ err = usb_unlink_urb(ctx->outurb);
+
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ if (err == -EINPROGRESS)
+ wait_for_completion(&ctx->done);
+
+ del_timer_sync(&ctx->timer);
+ /* FIXME: there is an slight chance for the irq handler to
+ * be running */
+ if (!list_empty(&ctx->list))
+ ezusb_ctx_complete(ctx);
+
+ ezusb_request_context_put(ctx);
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ }
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ list_for_each_safe(item, tmp_item, &upriv->req_pending)
+ ezusb_ctx_complete(list_entry(item,
+ struct request_context, list));
+
+ if (upriv->read_urb->status == -EINPROGRESS)
+ printk(KERN_ERR PFX "Some URB in progress\n");
+
+ mutex_unlock(&upriv->mtx);
+
+ kfree(upriv->read_urb->transfer_buffer);
+ if (upriv->bap_buf != NULL)
+ kfree(upriv->bap_buf);
+ if (upriv->read_urb != NULL)
+ usb_free_urb(upriv->read_urb);
+ if (upriv->dev) {
+ struct orinoco_private *priv = ndev_priv(upriv->dev);
+ orinoco_if_del(priv);
+ free_orinocodev(priv);
+ }
+}
+
+static void ezusb_lock_irqsave(spinlock_t *lock,
+ unsigned long *flags) __acquires(lock)
+{
+ spin_lock_bh(lock);
+}
+
+static void ezusb_unlock_irqrestore(spinlock_t *lock,
+ unsigned long *flags) __releases(lock)
+{
+ spin_unlock_bh(lock);
+}
+
+static void ezusb_lock_irq(spinlock_t *lock) __acquires(lock)
+{
+ spin_lock_bh(lock);
+}
+
+static void ezusb_unlock_irq(spinlock_t *lock) __releases(lock)
+{
+ spin_unlock_bh(lock);
+}
+
+static const struct hermes_ops ezusb_ops = {
+ .init = ezusb_init,
+ .cmd_wait = ezusb_docmd_wait,
+ .init_cmd_wait = ezusb_doicmd_wait,
+ .allocate = ezusb_allocate,
+ .read_ltv = ezusb_read_ltv,
+ .write_ltv = ezusb_write_ltv,
+ .bap_pread = ezusb_bap_pread,
+ .read_pda = ezusb_read_pda,
+ .program_init = ezusb_program_init,
+ .program_end = ezusb_program_end,
+ .program = ezusb_program,
+ .lock_irqsave = ezusb_lock_irqsave,
+ .unlock_irqrestore = ezusb_unlock_irqrestore,
+ .lock_irq = ezusb_lock_irq,
+ .unlock_irq = ezusb_unlock_irq,
+};
+
+static const struct net_device_ops ezusb_netdev_ops = {
+ .ndo_open = orinoco_open,
+ .ndo_stop = orinoco_stop,
+ .ndo_start_xmit = ezusb_xmit,
+ .ndo_set_multicast_list = orinoco_set_multicast_list,
+ .ndo_change_mtu = orinoco_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = orinoco_tx_timeout,
+ .ndo_get_stats = orinoco_get_stats,
+};
+
+static int ezusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct orinoco_private *priv;
+ hermes_t *hw;
+ struct ezusb_priv *upriv = NULL;
+ struct usb_interface_descriptor *iface_desc;
+ struct usb_endpoint_descriptor *ep;
+ const struct firmware *fw_entry;
+ int retval = 0;
+ int i;
+
+ priv = alloc_orinocodev(sizeof(*upriv), &udev->dev,
+ ezusb_hard_reset, NULL);
+ if (!priv) {
+ err("Couldn't allocate orinocodev");
+ goto exit;
+ }
+
+ hw = &priv->hw;
+
+ upriv = priv->card;
+
+ mutex_init(&upriv->mtx);
+ spin_lock_init(&upriv->reply_count_lock);
+
+ spin_lock_init(&upriv->req_lock);
+ INIT_LIST_HEAD(&upriv->req_pending);
+ INIT_LIST_HEAD(&upriv->req_active);
+
+ upriv->udev = udev;
+
+ hw->iobase = (void __force __iomem *) &upriv->hermes_reg_fake;
+ hw->reg_spacing = HERMES_16BIT_REGSPACING;
+ hw->priv = upriv;
+ hw->ops = &ezusb_ops;
+
+ /* set up the endpoint information */
+ /* check out the endpoints */
+
+ iface_desc = &interface->altsetting[0].desc;
+ for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
+ ep = &interface->altsetting[0].endpoint[i].desc;
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ == USB_DIR_IN) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_BULK)) {
+ /* we found a bulk in endpoint */
+ if (upriv->read_urb != NULL) {
+ pr_warning("Found a second bulk in ep, ignored");
+ continue;
+ }
+
+ upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!upriv->read_urb) {
+ err("No free urbs available");
+ goto error;
+ }
+ if (le16_to_cpu(ep->wMaxPacketSize) != 64)
+ pr_warning("bulk in: wMaxPacketSize!= 64");
+ if (ep->bEndpointAddress != (2 | USB_DIR_IN))
+ pr_warning("bulk in: bEndpointAddress: %d",
+ ep->bEndpointAddress);
+ upriv->read_pipe = usb_rcvbulkpipe(udev,
+ ep->
+ bEndpointAddress);
+ upriv->read_urb->transfer_buffer =
+ kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
+ if (!upriv->read_urb->transfer_buffer) {
+ err("Couldn't allocate IN buffer");
+ goto error;
+ }
+ }
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ == USB_DIR_OUT) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_BULK)) {
+ /* we found a bulk out endpoint */
+ if (upriv->bap_buf != NULL) {
+ pr_warning("Found a second bulk out ep, ignored");
+ continue;
+ }
+
+ if (le16_to_cpu(ep->wMaxPacketSize) != 64)
+ pr_warning("bulk out: wMaxPacketSize != 64");
+ if (ep->bEndpointAddress != 2)
+ pr_warning("bulk out: bEndpointAddress: %d",
+ ep->bEndpointAddress);
+ upriv->write_pipe = usb_sndbulkpipe(udev,
+ ep->
+ bEndpointAddress);
+ upriv->bap_buf = kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
+ if (!upriv->bap_buf) {
+ err("Couldn't allocate bulk_out_buffer");
+ goto error;
+ }
+ }
+ }
+ if (!upriv->bap_buf || !upriv->read_urb) {
+ err("Didn't find the required bulk endpoints");
+ goto error;
+ }
+
+ if (request_firmware(&fw_entry, "orinoco_ezusb_fw",
+ &interface->dev) == 0) {
+ firmware.size = fw_entry->size;
+ firmware.code = fw_entry->data;
+ }
+ if (firmware.size && firmware.code) {
+ ezusb_firmware_download(upriv, &firmware);
+ } else {
+ err("No firmware to download");
+ goto error;
+ }
+
+ if (ezusb_hard_reset(priv) < 0) {
+ err("Cannot reset the device");
+ goto error;
+ }
+
+ /* If the firmware is already downloaded orinoco.c will call
+ * ezusb_init but if the firmware is not already there, that will make
+ * the kernel very unstable, so we try initializing here and quit in
+ * case of error */
+ if (ezusb_init(hw) < 0) {
+ err("Couldn't initialize the device");
+ err("Firmware may not be downloaded or may be wrong.");
+ goto error;
+ }
+
+ /* Initialise the main driver */
+ if (orinoco_init(priv) != 0) {
+ err("orinoco_init() failed\n");
+ goto error;
+ }
+
+ if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
+ upriv->dev = NULL;
+ err("%s: orinoco_if_add() failed", __func__);
+ goto error;
+ }
+ upriv->dev = priv->ndev;
+
+ goto exit;
+
+ error:
+ ezusb_delete(upriv);
+ if (upriv->dev) {
+ /* upriv->dev was 0, so ezusb_delete() didn't free it */
+ free_orinocodev(priv);
+ }
+ upriv = NULL;
+ retval = -EFAULT;
+ exit:
+ if (fw_entry) {
+ firmware.code = NULL;
+ firmware.size = 0;
+ release_firmware(fw_entry);
+ }
+ usb_set_intfdata(interface, upriv);
+ return retval;
+}
+
+
+static void ezusb_disconnect(struct usb_interface *intf)
+{
+ struct ezusb_priv *upriv = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+ ezusb_delete(upriv);
+ printk(KERN_INFO PFX "Disconnected\n");
+}
+
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver orinoco_driver = {
+ .name = DRIVER_NAME,
+ .probe = ezusb_probe,
+ .disconnect = ezusb_disconnect,
+ .id_table = ezusb_table,
+};
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Manuel Estrada Sainz)";
+
+static int __init ezusb_module_init(void)
+{
+ int err;
+
+ printk(KERN_DEBUG "%s\n", version);
+
+ /* register this driver with the USB subsystem */
+ err = usb_register(&orinoco_driver);
+ if (err < 0) {
+ printk(KERN_ERR PFX "usb_register failed, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit ezusb_module_exit(void)
+{
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&orinoco_driver);
+}
+
+
+module_init(ezusb_module_init);
+module_exit(ezusb_module_exit);
+
+MODULE_AUTHOR("Manuel Estrada Sainz");
+MODULE_DESCRIPTION
+ ("Driver for Orinoco wireless LAN cards using EZUSB bridge");
+MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 330d42d45333..4300d9db7d8c 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -127,7 +127,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
- u8 *ie;
+ const u8 *ie;
u64 timestamp;
s32 signal;
u16 capability;
@@ -136,7 +136,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
int chan, freq;
ie_len = len - sizeof(*bss);
- ie = orinoco_get_ie(bss->data, ie_len, WLAN_EID_DS_PARAMS);
+ ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
chan = ie ? ie[2] : 0;
freq = ieee80211_dsss_chan_to_freq(chan);
channel = ieee80211_get_channel(wiphy, freq);
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index 59bda240fdc2..b51a9adc80f6 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -57,7 +57,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
* struct orinoco_private */
struct orinoco_pccard {
struct pcmcia_device *p_dev;
- dev_node_t node;
};
/********************************************************************/
@@ -193,10 +192,6 @@ spectrum_cs_probe(struct pcmcia_device *link)
card->p_dev = link;
link->priv = priv;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = orinoco_interrupt;
-
/* General socket configuration defaults can go here. In this
* client, we assume very little, and rely on the CIS for
* almost everything. In most clients, many details (i.e.,
@@ -218,8 +213,7 @@ static void spectrum_cs_detach(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- if (link->dev_node)
- orinoco_if_del(priv);
+ orinoco_if_del(priv);
spectrum_cs_release(link);
@@ -304,7 +298,6 @@ static int
spectrum_cs_config(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- struct orinoco_pccard *card = priv->card;
hermes_t *hw = &priv->hw;
int ret;
void __iomem *mem;
@@ -332,12 +325,7 @@ spectrum_cs_config(struct pcmcia_device *link)
goto failed;
}
- /*
- * Allocate an interrupt line. Note that this does not assign
- * a handler to the interrupt, unless the 'Handler' member of
- * the irq structure is initialized.
- */
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
if (ret)
goto failed;
@@ -349,6 +337,7 @@ spectrum_cs_config(struct pcmcia_device *link)
goto failed;
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
+ hw->eeprom_pda = true;
/*
* This actually configures the PCMCIA socket -- setting up
@@ -359,9 +348,6 @@ spectrum_cs_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /* Ok, we have the configuration, prepare to register the netdev */
- card->node.major = card->node.minor = 0;
-
/* Reset card */
if (spectrum_cs_hard_reset(priv) != 0)
goto failed;
@@ -374,17 +360,11 @@ spectrum_cs_config(struct pcmcia_device *link)
/* Register an interface with the stack */
if (orinoco_if_add(priv, link->io.BasePort1,
- link->irq.AssignedIRQ) != 0) {
+ link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
- /* At this point, the dev_node_t structure(s) needs to be
- * initialized and arranged in a linked list at link->dev_node. */
- strcpy(card->node.dev_name, priv->ndev->name);
- link->dev_node = &card->node; /* link->dev_node being non-NULL is also
- * used to indicate that the
- * net_device has been registered */
return 0;
failed:
@@ -405,9 +385,9 @@ spectrum_cs_release(struct pcmcia_device *link)
/* We're committed to taking the device away now, so mark the
* hardware as unavailable */
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
pcmcia_disable_device(link);
if (priv->hw.iobase)
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index fbcc6e1a2e1d..5775124e2aee 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -458,7 +458,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
hermes_t *hw = &priv->hw;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
chan, NULL);
}
@@ -538,125 +538,6 @@ static int orinoco_ioctl_setsens(struct net_device *dev,
return -EINPROGRESS; /* Call commit handler */
}
-static int orinoco_ioctl_setrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int val = rrq->value;
- unsigned long flags;
-
- if (rrq->disabled)
- val = 2347;
-
- if ((val < 0) || (val > 2347))
- return -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- priv->rts_thresh = val;
- orinoco_unlock(priv, &flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int orinoco_ioctl_getrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
-
- rrq->value = priv->rts_thresh;
- rrq->disabled = (rrq->value == 2347);
- rrq->fixed = 1;
-
- return 0;
-}
-
-static int orinoco_ioctl_setfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int err = -EINPROGRESS; /* Call commit handler */
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (priv->has_mwo) {
- if (frq->disabled)
- priv->mwo_robust = 0;
- else {
- if (frq->fixed)
- printk(KERN_WARNING "%s: Fixed fragmentation "
- "is not supported on this firmware. "
- "Using MWO robust instead.\n",
- dev->name);
- priv->mwo_robust = 1;
- }
- } else {
- if (frq->disabled)
- priv->frag_thresh = 2346;
- else {
- if ((frq->value < 256) || (frq->value > 2346))
- err = -EINVAL;
- else
- /* must be even */
- priv->frag_thresh = frq->value & ~0x1;
- }
- }
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_getfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
- int err;
- u16 val;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (priv->has_mwo) {
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFMWOROBUST_AGERE,
- &val);
- if (err)
- val = 0;
-
- frq->value = val ? 2347 : 0;
- frq->disabled = !val;
- frq->fixed = 0;
- } else {
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
- &val);
- if (err)
- val = 0;
-
- frq->value = val;
- frq->disabled = (val >= 2346);
- frq->fixed = 1;
- }
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
static int orinoco_ioctl_setrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq,
@@ -1201,60 +1082,6 @@ static int orinoco_ioctl_set_mlme(struct net_device *dev,
return ret;
}
-static int orinoco_ioctl_getretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
- int err = 0;
- u16 short_limit, long_limit, lifetime;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
- &short_limit);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
- &long_limit);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
- &lifetime);
- if (err)
- goto out;
-
- rrq->disabled = 0; /* Can't be disabled */
-
- /* Note : by default, display the retry number */
- if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- rrq->flags = IW_RETRY_LIFETIME;
- rrq->value = lifetime * 1000; /* ??? */
- } else {
- /* By default, display the min number */
- if ((rrq->flags & IW_RETRY_LONG)) {
- rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- rrq->value = long_limit;
- } else {
- rrq->flags = IW_RETRY_LIMIT;
- rrq->value = short_limit;
- if (short_limit != long_limit)
- rrq->flags |= IW_RETRY_SHORT;
- }
- }
-
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
static int orinoco_ioctl_reset(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
@@ -1446,8 +1273,8 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- err = hermes_read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
- extra);
+ err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
+ extra);
if (err)
goto out;
@@ -1506,46 +1333,44 @@ static const struct iw_priv_args orinoco_privtab[] = {
* Structures to export the Wireless Handlers
*/
-#define STD_IW_HANDLER(id, func) \
- [IW_IOCTL_IDX(id)] = (iw_handler) func
static const iw_handler orinoco_handler[] = {
- STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
- STD_IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname),
- STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
- STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
- STD_IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode),
- STD_IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode),
- STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
- STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
- STD_IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange),
- STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
- STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
- STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
- STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
- STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
- STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
- STD_IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan),
- STD_IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan),
- STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
- STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
- STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
- STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
- STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts),
- STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts),
- STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag),
- STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag),
- STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry),
- STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
- STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
- STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
- STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
- STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
- STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
- STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
- STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
- STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
- STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
- STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
+ IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit),
+ IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
+ IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq),
+ IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq),
+ IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode),
+ IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode),
+ IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens),
+ IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens),
+ IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap),
+ IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap),
+ IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan),
+ IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan),
+ IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid),
+ IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid),
+ IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate),
+ IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate),
+ IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts),
+ IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts),
+ IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag),
+ IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag),
+ IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry),
+ IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode),
+ IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode),
+ IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower),
+ IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower),
+ IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
+ IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
+ IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
+ IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
};
@@ -1553,15 +1378,15 @@ static const iw_handler orinoco_handler[] = {
Added typecasting since we no longer use iwreq_data -- Moustafa
*/
static const iw_handler orinoco_private_handler[] = {
- [0] = (iw_handler) orinoco_ioctl_reset,
- [1] = (iw_handler) orinoco_ioctl_reset,
- [2] = (iw_handler) orinoco_ioctl_setport3,
- [3] = (iw_handler) orinoco_ioctl_getport3,
- [4] = (iw_handler) orinoco_ioctl_setpreamble,
- [5] = (iw_handler) orinoco_ioctl_getpreamble,
- [6] = (iw_handler) orinoco_ioctl_setibssport,
- [7] = (iw_handler) orinoco_ioctl_getibssport,
- [9] = (iw_handler) orinoco_ioctl_getrid,
+ [0] = (iw_handler)orinoco_ioctl_reset,
+ [1] = (iw_handler)orinoco_ioctl_reset,
+ [2] = (iw_handler)orinoco_ioctl_setport3,
+ [3] = (iw_handler)orinoco_ioctl_getport3,
+ [4] = (iw_handler)orinoco_ioctl_setpreamble,
+ [5] = (iw_handler)orinoco_ioctl_getpreamble,
+ [6] = (iw_handler)orinoco_ioctl_setibssport,
+ [7] = (iw_handler)orinoco_ioctl_getibssport,
+ [9] = (iw_handler)orinoco_ioctl_getrid,
};
const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index a7cb9eb759a1..c072f41747ca 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -546,7 +546,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index c24067f1a0cb..07c4528f6e6b 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -132,7 +132,7 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
- struct sk_buff **rx_buf)
+ struct sk_buff **rx_buf, u32 index)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
@@ -140,7 +140,7 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
idx = le32_to_cpu(ring_control->host_idx[ring_index]);
limit = idx;
- limit -= le32_to_cpu(ring_control->device_idx[ring_index]);
+ limit -= index;
limit = ring_limit - limit;
i = idx % ring_limit;
@@ -232,7 +232,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
i %= ring_limit;
}
- p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
+ p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
}
static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
@@ -445,10 +445,10 @@ static int p54p_open(struct ieee80211_hw *dev)
priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
- ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
+ ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
- ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
+ ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
P54P_READ(ring_control_base);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 66057999a93c..4e6891099d43 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -38,7 +38,7 @@ static void p54_dump_tx_queue(struct p54_common *priv)
u32 largest_hole = 0, free;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
- printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) --- \n",
+ printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) ---\n",
wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue));
prev_addr = priv->rx_start;
@@ -350,7 +350,6 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
- rx_status->noise = priv->noise;
if (hdr->rate & 0x10)
rx_status->flag |= RX_FLAG_SHORTPRE;
if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 689d59a13d5b..10d91afefa33 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -228,14 +228,14 @@ islpci_interrupt(int irq, void *config)
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS,
- "IRQ: Identification register 0x%p 0x%x \n", device, reg);
+ "IRQ: Identification register 0x%p 0x%x\n", device, reg);
#endif
/* check for each bit in the register separately */
if (reg & ISL38XX_INT_IDENT_UPDATE) {
#if VERBOSE > SHOW_ERROR_MESSAGES
/* Queue has been updated */
- DEBUG(SHOW_TRACING, "IRQ: Update flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Update flag\n");
DEBUG(SHOW_QUEUE_INDEXES,
"CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
@@ -301,7 +301,7 @@ islpci_interrupt(int irq, void *config)
ISL38XX_CB_RX_DATA_LQ) != 0) {
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
- "Received frame in Data Low Queue \n");
+ "Received frame in Data Low Queue\n");
#endif
islpci_eth_receive(priv);
}
@@ -326,7 +326,7 @@ islpci_interrupt(int irq, void *config)
/* Device has been initialized */
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
- "IRQ: Init flag, device initialized \n");
+ "IRQ: Init flag, device initialized\n");
#endif
wake_up(&priv->reset_done);
}
@@ -334,7 +334,7 @@ islpci_interrupt(int irq, void *config)
if (reg & ISL38XX_INT_IDENT_SLEEP) {
/* Device intends to move to powersave state */
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "IRQ: Sleep flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n");
#endif
isl38xx_handle_sleep_request(priv->control_block,
&powerstate,
@@ -344,7 +344,7 @@ islpci_interrupt(int irq, void *config)
if (reg & ISL38XX_INT_IDENT_WAKEUP) {
/* Device has been woken up to active state */
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "IRQ: Wakeup flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n");
#endif
isl38xx_handle_wakeup(priv->control_block,
@@ -635,7 +635,7 @@ islpci_alloc_memory(islpci_private *priv)
ioremap(pci_resource_start(priv->pdev, 0),
ISL38XX_PCI_MEM_SIZE))) {
/* error in remapping the PCI device memory address range */
- printk(KERN_ERR "PCI memory remapping failed \n");
+ printk(KERN_ERR "PCI memory remapping failed\n");
return -1;
}
@@ -902,7 +902,7 @@ islpci_setup(struct pci_dev *pdev)
if (register_netdev(ndev)) {
DEBUG(SHOW_ERROR_MESSAGES,
- "ERROR: register_netdev() failed \n");
+ "ERROR: register_netdev() failed\n");
goto do_islpci_free_memory;
}
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index ac99eaaeabce..2fc52bc2d7dd 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -90,7 +90,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
#endif
/* lock the driver code */
@@ -141,7 +141,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
}
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data,
+ DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
src, skb->len);
#endif
} else {
@@ -224,8 +224,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
priv->data_low_tx_full = 1;
}
- /* set the transmission time */
- ndev->trans_start = jiffies;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
@@ -320,7 +318,7 @@ islpci_eth_receive(islpci_private *priv)
int discard = 0;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
#endif
/* the device has written an Ethernet frame in the data area
@@ -432,7 +430,7 @@ islpci_eth_receive(islpci_private *priv)
skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
if (unlikely(skb == NULL)) {
/* error allocating an sk_buff structure elements */
- DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n");
+ DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
break;
}
skb_reserve(skb, (4 - (long) skb->data) & 0x03);
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index adb289723a96..a5224f6160e4 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -114,7 +114,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
#endif
while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
@@ -212,7 +212,7 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
{
pimfor_header_t *h = buf.mem;
DEBUG(SHOW_PIMFOR_FRAMES,
- "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n",
+ "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
h->operation, oid, h->device_id, h->flags, length);
/* display the buffer contents for debugging */
@@ -280,7 +280,7 @@ islpci_mgt_receive(struct net_device *ndev)
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
#endif
/* Only once per interrupt, determine fragment range to
@@ -339,7 +339,7 @@ islpci_mgt_receive(struct net_device *ndev)
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_PIMFOR_FRAMES,
- "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
+ "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
header->operation, header->oid, header->device_id,
header->flags, header->length);
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index d66933d70fb9..9b796cae4afe 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -820,7 +820,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
for (i = 0; i < list->nr; i++)
k += snprintf(str + k, PRIV_STR_SIZE - k,
- "bss[%u] : \nage=%u\nchannel=%u\n"
+ "bss[%u] :\nage=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n",
i, list->bsslist[i].age,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 11865ea21875..3f50ce4bc9c2 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -51,7 +51,6 @@
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#include <pcmcia/mem_op.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
@@ -321,10 +320,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.IOAddrLines = 5;
- /* Interrupt setup. For PCMCIA, driver takes what's given */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = &ray_interrupt;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -383,8 +378,7 @@ static void ray_detach(struct pcmcia_device *link)
del_timer(&local->timer);
if (link->priv) {
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
free_netdev(dev);
}
dev_dbg(&link->dev, "ray_cs ray_detach ending\n");
@@ -417,10 +411,10 @@ static int ray_config(struct pcmcia_device *link)
/* Now allocate an interrupt line. Note that this does not
actually assign a handler to the interrupt.
*/
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, ray_interrupt);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
/* This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping.
@@ -493,9 +487,6 @@ static int ray_config(struct pcmcia_device *link)
return i;
}
- strcpy(local->node.dev_name, dev->name);
- link->dev_node = &local->node;
-
printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n",
dev->name, dev->irq, dev->dev_addr);
@@ -555,7 +546,7 @@ static int ray_init(struct net_device *dev)
local->fw_ver = local->startup_res.firmware_version[0];
local->fw_bld = local->startup_res.firmware_version[1];
local->fw_var = local->startup_res.firmware_version[2];
- dev_dbg(&link->dev, "ray_init firmware version %d.%d \n", local->fw_ver,
+ dev_dbg(&link->dev, "ray_init firmware version %d.%d\n", local->fw_ver,
local->fw_bld);
local->tib_length = 0x20;
@@ -941,7 +932,6 @@ static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
case XMIT_MSG_BAD:
case XMIT_OK:
default:
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
}
@@ -1112,10 +1102,10 @@ static const struct ethtool_ops netdev_ethtool_ops = {
/*
* Wireless Handler : get protocol name
*/
-static int ray_get_name(struct net_device *dev,
- struct iw_request_info *info, char *cwrq, char *extra)
+static int ray_get_name(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- strcpy(cwrq, "IEEE 802.11-FH");
+ strcpy(wrqu->name, "IEEE 802.11-FH");
return 0;
}
@@ -1123,9 +1113,8 @@ static int ray_get_name(struct net_device *dev,
/*
* Wireless Handler : set frequency
*/
-static int ray_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
+static int ray_set_freq(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
@@ -1135,10 +1124,10 @@ static int ray_set_freq(struct net_device *dev,
return -EBUSY;
/* Setting by channel number */
- if ((fwrq->m > USA_HOP_MOD) || (fwrq->e > 0))
+ if ((wrqu->freq.m > USA_HOP_MOD) || (wrqu->freq.e > 0))
err = -EOPNOTSUPP;
else
- local->sparm.b5.a_hop_pattern = fwrq->m;
+ local->sparm.b5.a_hop_pattern = wrqu->freq.m;
return err;
}
@@ -1147,14 +1136,13 @@ static int ray_set_freq(struct net_device *dev,
/*
* Wireless Handler : get frequency
*/
-static int ray_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
+static int ray_get_freq(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- fwrq->m = local->sparm.b5.a_hop_pattern;
- fwrq->e = 0;
+ wrqu->freq.m = local->sparm.b5.a_hop_pattern;
+ wrqu->freq.e = 0;
return 0;
}
@@ -1162,9 +1150,8 @@ static int ray_get_freq(struct net_device *dev,
/*
* Wireless Handler : set ESSID
*/
-static int ray_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_set_essid(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1173,19 +1160,17 @@ static int ray_set_essid(struct net_device *dev,
return -EBUSY;
/* Check if we asked for `any' */
- if (dwrq->flags == 0) {
+ if (wrqu->essid.flags == 0)
/* Corey : can you do that ? */
return -EOPNOTSUPP;
- } else {
- /* Check the size of the string */
- if (dwrq->length > IW_ESSID_MAX_SIZE) {
- return -E2BIG;
- }
- /* Set the ESSID in the card */
- memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
- memcpy(local->sparm.b5.a_current_ess_id, extra, dwrq->length);
- }
+ /* Check the size of the string */
+ if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ /* Set the ESSID in the card */
+ memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
+ memcpy(local->sparm.b5.a_current_ess_id, extra, wrqu->essid.length);
return -EINPROGRESS; /* Call commit handler */
}
@@ -1194,9 +1179,8 @@ static int ray_set_essid(struct net_device *dev,
/*
* Wireless Handler : get ESSID
*/
-static int ray_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_get_essid(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1204,8 +1188,8 @@ static int ray_get_essid(struct net_device *dev,
memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
/* Push it out ! */
- dwrq->length = strlen(extra);
- dwrq->flags = 1; /* active */
+ wrqu->essid.length = strlen(extra);
+ wrqu->essid.flags = 1; /* active */
return 0;
}
@@ -1214,14 +1198,13 @@ static int ray_get_essid(struct net_device *dev,
/*
* Wireless Handler : get AP address
*/
-static int ray_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *awrq, char *extra)
+static int ray_get_wap(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- memcpy(awrq->sa_data, local->bss_id, ETH_ALEN);
- awrq->sa_family = ARPHRD_ETHER;
+ memcpy(wrqu->ap_addr.sa_data, local->bss_id, ETH_ALEN);
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
return 0;
}
@@ -1230,9 +1213,8 @@ static int ray_get_wap(struct net_device *dev,
/*
* Wireless Handler : set Bit-Rate
*/
-static int ray_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_rate(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1241,15 +1223,15 @@ static int ray_set_rate(struct net_device *dev,
return -EBUSY;
/* Check if rate is in range */
- if ((vwrq->value != 1000000) && (vwrq->value != 2000000))
+ if ((wrqu->bitrate.value != 1000000) && (wrqu->bitrate.value != 2000000))
return -EINVAL;
/* Hack for 1.5 Mb/s instead of 2 Mb/s */
if ((local->fw_ver == 0x55) && /* Please check */
- (vwrq->value == 2000000))
+ (wrqu->bitrate.value == 2000000))
local->net_default_tx_rate = 3;
else
- local->net_default_tx_rate = vwrq->value / 500000;
+ local->net_default_tx_rate = wrqu->bitrate.value / 500000;
return 0;
}
@@ -1258,17 +1240,16 @@ static int ray_set_rate(struct net_device *dev,
/*
* Wireless Handler : get Bit-Rate
*/
-static int ray_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_rate(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
if (local->net_default_tx_rate == 3)
- vwrq->value = 2000000; /* Hum... */
+ wrqu->bitrate.value = 2000000; /* Hum... */
else
- vwrq->value = local->net_default_tx_rate * 500000;
- vwrq->fixed = 0; /* We are in auto mode */
+ wrqu->bitrate.value = local->net_default_tx_rate * 500000;
+ wrqu->bitrate.fixed = 0; /* We are in auto mode */
return 0;
}
@@ -1277,19 +1258,18 @@ static int ray_get_rate(struct net_device *dev,
/*
* Wireless Handler : set RTS threshold
*/
-static int ray_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_rts(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- int rthr = vwrq->value;
+ int rthr = wrqu->rts.value;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* if(wrq->u.rts.fixed == 0) we should complain */
- if (vwrq->disabled)
+ if (wrqu->rts.disabled)
rthr = 32767;
else {
if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
@@ -1305,16 +1285,15 @@ static int ray_set_rts(struct net_device *dev,
/*
* Wireless Handler : get RTS threshold
*/
-static int ray_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_rts(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8)
+ wrqu->rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
+ local->sparm.b5.a_rts_threshold[1];
- vwrq->disabled = (vwrq->value == 32767);
- vwrq->fixed = 1;
+ wrqu->rts.disabled = (wrqu->rts.value == 32767);
+ wrqu->rts.fixed = 1;
return 0;
}
@@ -1323,19 +1302,18 @@ static int ray_get_rts(struct net_device *dev,
/*
* Wireless Handler : set Fragmentation threshold
*/
-static int ray_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_frag(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- int fthr = vwrq->value;
+ int fthr = wrqu->frag.value;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* if(wrq->u.frag.fixed == 0) should complain */
- if (vwrq->disabled)
+ if (wrqu->frag.disabled)
fthr = 32767;
else {
if ((fthr < 256) || (fthr > 2347)) /* To check out ! */
@@ -1351,16 +1329,15 @@ static int ray_set_frag(struct net_device *dev,
/*
* Wireless Handler : get Fragmentation threshold
*/
-static int ray_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_frag(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8)
+ wrqu->frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
+ local->sparm.b5.a_frag_threshold[1];
- vwrq->disabled = (vwrq->value == 32767);
- vwrq->fixed = 1;
+ wrqu->frag.disabled = (wrqu->frag.value == 32767);
+ wrqu->frag.fixed = 1;
return 0;
}
@@ -1369,8 +1346,8 @@ static int ray_get_frag(struct net_device *dev,
/*
* Wireless Handler : set Mode of Operation
*/
-static int ray_set_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq, char *extra)
+static int ray_set_mode(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
@@ -1380,7 +1357,7 @@ static int ray_set_mode(struct net_device *dev,
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
- switch (*uwrq) {
+ switch (wrqu->mode) {
case IW_MODE_ADHOC:
card_mode = 0;
/* Fall through */
@@ -1398,15 +1375,15 @@ static int ray_set_mode(struct net_device *dev,
/*
* Wireless Handler : get Mode of Operation
*/
-static int ray_get_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq, char *extra)
+static int ray_get_mode(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
if (local->sparm.b5.a_network_type)
- *uwrq = IW_MODE_INFRA;
+ wrqu->mode = IW_MODE_INFRA;
else
- *uwrq = IW_MODE_ADHOC;
+ wrqu->mode = IW_MODE_ADHOC;
return 0;
}
@@ -1415,16 +1392,15 @@ static int ray_get_mode(struct net_device *dev,
/*
* Wireless Handler : get range info
*/
-static int ray_get_range(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
- memset((char *)range, 0, sizeof(struct iw_range));
+ memset(range, 0, sizeof(struct iw_range));
/* Set the length (very important for backward compatibility) */
- dwrq->length = sizeof(struct iw_range);
+ wrqu->data.length = sizeof(struct iw_range);
/* Set the Wireless Extension versions */
range->we_version_compiled = WIRELESS_EXT;
@@ -1447,8 +1423,7 @@ static int ray_get_range(struct net_device *dev,
/*
* Wireless Private Handler : set framing mode
*/
-static int ray_set_framing(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
translate = *(extra); /* Set framing mode */
@@ -1460,8 +1435,7 @@ static int ray_set_framing(struct net_device *dev,
/*
* Wireless Private Handler : get framing mode
*/
-static int ray_get_framing(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_get_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
*(extra) = translate;
@@ -1473,8 +1447,7 @@ static int ray_get_framing(struct net_device *dev,
/*
* Wireless Private Handler : get country
*/
-static int ray_get_country(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_get_country(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
*(extra) = country;
@@ -1486,10 +1459,9 @@ static int ray_get_country(struct net_device *dev,
/*
* Commit handler : called after a bunch of SET operations
*/
-static int ray_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */
- void *zwrq, /* NULL */
- char *extra)
-{ /* NULL */
+static int ray_commit(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
return 0;
}
@@ -1530,28 +1502,28 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev)
*/
static const iw_handler ray_handler[] = {
- [SIOCSIWCOMMIT - SIOCIWFIRST] = (iw_handler) ray_commit,
- [SIOCGIWNAME - SIOCIWFIRST] = (iw_handler) ray_get_name,
- [SIOCSIWFREQ - SIOCIWFIRST] = (iw_handler) ray_set_freq,
- [SIOCGIWFREQ - SIOCIWFIRST] = (iw_handler) ray_get_freq,
- [SIOCSIWMODE - SIOCIWFIRST] = (iw_handler) ray_set_mode,
- [SIOCGIWMODE - SIOCIWFIRST] = (iw_handler) ray_get_mode,
- [SIOCGIWRANGE - SIOCIWFIRST] = (iw_handler) ray_get_range,
+ IW_HANDLER(SIOCSIWCOMMIT, ray_commit),
+ IW_HANDLER(SIOCGIWNAME, ray_get_name),
+ IW_HANDLER(SIOCSIWFREQ, ray_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, ray_get_freq),
+ IW_HANDLER(SIOCSIWMODE, ray_set_mode),
+ IW_HANDLER(SIOCGIWMODE, ray_get_mode),
+ IW_HANDLER(SIOCGIWRANGE, ray_get_range),
#ifdef WIRELESS_SPY
- [SIOCSIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
- [SIOCGIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
- [SIOCSIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
- [SIOCGIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
#endif /* WIRELESS_SPY */
- [SIOCGIWAP - SIOCIWFIRST] = (iw_handler) ray_get_wap,
- [SIOCSIWESSID - SIOCIWFIRST] = (iw_handler) ray_set_essid,
- [SIOCGIWESSID - SIOCIWFIRST] = (iw_handler) ray_get_essid,
- [SIOCSIWRATE - SIOCIWFIRST] = (iw_handler) ray_set_rate,
- [SIOCGIWRATE - SIOCIWFIRST] = (iw_handler) ray_get_rate,
- [SIOCSIWRTS - SIOCIWFIRST] = (iw_handler) ray_set_rts,
- [SIOCGIWRTS - SIOCIWFIRST] = (iw_handler) ray_get_rts,
- [SIOCSIWFRAG - SIOCIWFIRST] = (iw_handler) ray_set_frag,
- [SIOCGIWFRAG - SIOCIWFIRST] = (iw_handler) ray_get_frag,
+ IW_HANDLER(SIOCGIWAP, ray_get_wap),
+ IW_HANDLER(SIOCSIWESSID, ray_set_essid),
+ IW_HANDLER(SIOCGIWESSID, ray_get_essid),
+ IW_HANDLER(SIOCSIWRATE, ray_set_rate),
+ IW_HANDLER(SIOCGIWRATE, ray_get_rate),
+ IW_HANDLER(SIOCSIWRTS, ray_set_rts),
+ IW_HANDLER(SIOCGIWRTS, ray_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, ray_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, ray_get_frag),
};
#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
@@ -1559,9 +1531,9 @@ static const iw_handler ray_handler[] = {
#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
static const iw_handler ray_private_handler[] = {
- [0] = (iw_handler) ray_set_framing,
- [1] = (iw_handler) ray_get_framing,
- [3] = (iw_handler) ray_get_country,
+ [0] = ray_set_framing,
+ [1] = ray_get_framing,
+ [3] = ray_get_country,
};
static const struct iw_priv_args ray_private_args[] = {
@@ -1892,17 +1864,17 @@ static void ray_update_multi_list(struct net_device *dev, int all)
writeb(0xff, &pccs->var);
local->num_multi = 0xff;
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i = 0;
/* Copy the kernel's list of MC addresses to card */
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy_toio(p, ha->addr, ETH_ALEN);
dev_dbg(&link->dev,
"ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
- dmi->dmi_addr[0], dmi->dmi_addr[1],
- dmi->dmi_addr[2], dmi->dmi_addr[3],
- dmi->dmi_addr[4], dmi->dmi_addr[5]);
+ ha->addr[0], ha->addr[1],
+ ha->addr[2], ha->addr[3],
+ ha->addr[4], ha->addr[5]);
p += ETH_ALEN;
i++;
}
@@ -2251,7 +2223,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
(dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
FCS_LEN)) {
pr_debug(
- "ray_cs invalid packet length %d received \n",
+ "ray_cs invalid packet length %d received\n",
rx_len);
return;
}
@@ -2262,7 +2234,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
(dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
FCS_LEN)) {
pr_debug(
- "ray_cs invalid packet length %d received \n",
+ "ray_cs invalid packet length %d received\n",
rx_len);
return;
}
@@ -2770,11 +2742,11 @@ static int ray_cs_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Hop dwell = %d Kus\n",
pfh->dwell_time[0] +
256 * pfh->dwell_time[1]);
- seq_printf(m, "Hop set = %d \n",
+ seq_printf(m, "Hop set = %d\n",
pfh->hop_set);
- seq_printf(m, "Hop pattern = %d \n",
+ seq_printf(m, "Hop pattern = %d\n",
pfh->hop_pattern);
- seq_printf(m, "Hop index = %d \n",
+ seq_printf(m, "Hop index = %d\n",
pfh->hop_index);
p += p[1] + 2;
} else {
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index 1e23b7f4cca7..9f01ddb19748 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -25,7 +25,6 @@ struct beacon_rx {
typedef struct ray_dev_t {
int card_status;
int authentication_state;
- dev_node_t node;
window_handle_t amem_handle; /* handle to window for attribute memory */
window_handle_t rmem_handle; /* handle to window for rx buffer on card */
void __iomem *sram; /* pointer to beginning of shared RAM */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 1de5b22d3efe..2d2890878dea 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -118,6 +118,7 @@ MODULE_PARM_DESC(workaround_interval,
#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d)
#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e)
#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f)
+#define OID_802_11_CAPABILITY cpu_to_le32(0x0d010122)
#define OID_802_11_PMKID cpu_to_le32(0x0d010123)
#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203)
#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204)
@@ -359,6 +360,30 @@ struct ndis_80211_assoc_info {
__le32 offset_resp_ies;
} __attribute__((packed));
+struct ndis_80211_auth_encr_pair {
+ __le32 auth_mode;
+ __le32 encr_mode;
+} __attribute__((packed));
+
+struct ndis_80211_capability {
+ __le32 length;
+ __le32 version;
+ __le32 num_pmkids;
+ __le32 num_auth_encr_pair;
+ struct ndis_80211_auth_encr_pair auth_encr_pair[0];
+} __attribute__((packed));
+
+struct ndis_80211_bssid_info {
+ u8 bssid[6];
+ u8 pmkid[16];
+};
+
+struct ndis_80211_pmkid {
+ __le32 length;
+ __le32 bssid_info_count;
+ struct ndis_80211_bssid_info bssid_info[0];
+};
+
/*
* private data
*/
@@ -477,13 +502,7 @@ struct rndis_wlan_private {
/* encryption stuff */
int encr_tx_key_index;
struct rndis_wlan_encr_key encr_keys[4];
- enum nl80211_auth_type wpa_auth_type;
int wpa_version;
- int wpa_keymgmt;
- int wpa_ie_len;
- u8 *wpa_ie;
- int wpa_cipher_pair;
- int wpa_cipher_group;
u8 command_buffer[COMMAND_BUFFER_SIZE];
};
@@ -516,7 +535,7 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
-static int rndis_set_channel(struct wiphy *wiphy,
+static int rndis_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, enum nl80211_channel_type channel_type);
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
@@ -535,6 +554,14 @@ static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo);
+static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa);
+
+static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa);
+
+static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev);
+
static struct cfg80211_ops rndis_config_ops = {
.change_virtual_intf = rndis_change_virtual_intf,
.scan = rndis_scan,
@@ -551,6 +578,9 @@ static struct cfg80211_ops rndis_config_ops = {
.set_default_key = rndis_set_default_key,
.get_station = rndis_get_station,
.dump_station = rndis_dump_station,
+ .set_pmksa = rndis_set_pmksa,
+ .del_pmksa = rndis_del_pmksa,
+ .flush_pmksa = rndis_flush_pmksa,
};
static void *rndis_wiphy_privid = &rndis_wiphy_privid;
@@ -705,6 +735,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
struct rndis_query_c *get_c;
} u;
int ret, buflen;
+ int resplen, respoffs, copylen;
buflen = *len + sizeof(*u.get);
if (buflen < CONTROL_BUFFER_SIZE)
@@ -734,11 +765,34 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
le32_to_cpu(u.get_c->status));
if (ret == 0) {
- memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
+ resplen = le32_to_cpu(u.get_c->len);
+ respoffs = le32_to_cpu(u.get_c->offset) + 8;
- ret = le32_to_cpu(u.get_c->len);
- if (ret > *len)
- *len = ret;
+ if (respoffs > buflen) {
+ /* Device returned data offset outside buffer, error. */
+ netdev_dbg(dev->net, "%s(%s): received invalid "
+ "data offset: %d > %d\n", __func__,
+ oid_to_string(oid), respoffs, buflen);
+
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ if ((resplen + respoffs) > buflen) {
+ /* Device would have returned more data if buffer would
+ * have been big enough. Copy just the bits that we got.
+ */
+ copylen = buflen - respoffs;
+ } else {
+ copylen = resplen;
+ }
+
+ if (copylen > *len)
+ copylen = *len;
+
+ memcpy(data, u.buf + respoffs, copylen);
+
+ *len = resplen;
ret = rndis_error_status(u.get_c->status);
if (ret < 0)
@@ -747,6 +801,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
le32_to_cpu(u.get_c->status), ret);
}
+exit_unlock:
mutex_unlock(&priv->command_lock);
if (u.buf != priv->command_buffer)
@@ -1092,8 +1147,6 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
}
priv->wpa_version = wpa_version;
- priv->wpa_auth_type = auth_type;
- priv->wpa_keymgmt = keymgmt;
return 0;
}
@@ -1118,7 +1171,6 @@ static int set_priv_filter(struct usbnet *usbdev)
static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
int encr_mode, ret;
@@ -1147,8 +1199,6 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
return ret;
}
- priv->wpa_cipher_pair = pairwise;
- priv->wpa_cipher_group = groupwise;
return 0;
}
@@ -1496,7 +1546,7 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
static void set_multicast_list(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
__le32 filter, basefilter;
int ret;
char *mc_addrs = NULL;
@@ -1535,9 +1585,9 @@ static void set_multicast_list(struct usbnet *usbdev)
return;
}
- netdev_for_each_mc_addr(mclist, usbdev->net)
+ netdev_for_each_mc_addr(ha, usbdev->net)
memcpy(mc_addrs + i++ * ETH_ALEN,
- mclist->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
}
netif_addr_unlock_bh(usbdev->net);
@@ -1569,6 +1619,194 @@ set_filter:
le32_to_cpu(filter), ret);
}
+#ifdef DEBUG
+static void debug_print_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ const char *func_str)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ int i, len, count, max_pmkids, entry_len;
+
+ max_pmkids = priv->wdev.wiphy->max_num_pmkids;
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ entry_len = (count > 0) ? (len - sizeof(*pmkids)) / count : -1;
+
+ netdev_dbg(usbdev->net, "%s(): %d PMKIDs (data len: %d, entry len: "
+ "%d)\n", func_str, count, len, entry_len);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ for (i = 0; i < count; i++) {
+ u32 *tmp = (u32 *)pmkids->bssid_info[i].pmkid;
+
+ netdev_dbg(usbdev->net, "%s(): bssid: %pM, "
+ "pmkid: %08X:%08X:%08X:%08X\n",
+ func_str, pmkids->bssid_info[i].bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+ }
+}
+#else
+static void debug_print_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ const char *func_str)
+{
+ return;
+}
+#endif
+
+static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ndis_80211_pmkid *pmkids;
+ int len, ret, max_pmkids;
+
+ max_pmkids = priv->wdev.wiphy->max_num_pmkids;
+ len = sizeof(*pmkids) + max_pmkids * sizeof(pmkids->bssid_info[0]);
+
+ pmkids = kzalloc(len, GFP_KERNEL);
+ if (!pmkids)
+ return ERR_PTR(-ENOMEM);
+
+ pmkids->length = cpu_to_le32(len);
+ pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
+
+ ret = rndis_query_oid(usbdev, OID_802_11_PMKID, pmkids, &len);
+ if (ret < 0) {
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d)"
+ " -> %d\n", __func__, len, max_pmkids, ret);
+
+ kfree(pmkids);
+ return ERR_PTR(ret);
+ }
+
+ if (le32_to_cpu(pmkids->bssid_info_count) > max_pmkids)
+ pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
+
+ debug_print_pmkids(usbdev, pmkids, __func__);
+
+ return pmkids;
+}
+
+static int set_device_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids)
+{
+ int ret, len, num_pmkids;
+
+ num_pmkids = le32_to_cpu(pmkids->bssid_info_count);
+ len = sizeof(*pmkids) + num_pmkids * sizeof(pmkids->bssid_info[0]);
+ pmkids->length = cpu_to_le32(len);
+
+ debug_print_pmkids(usbdev, pmkids, __func__);
+
+ ret = rndis_set_oid(usbdev, OID_802_11_PMKID, pmkids,
+ le32_to_cpu(pmkids->length));
+ if (ret < 0) {
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d) -> %d"
+ "\n", __func__, len, num_pmkids, ret);
+ }
+
+ kfree(pmkids);
+ return ret;
+}
+
+static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ struct cfg80211_pmksa *pmksa,
+ int max_pmkids)
+{
+ int i, len, count, newlen, err;
+
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ for (i = 0; i < count; i++)
+ if (!compare_ether_addr(pmkids->bssid_info[i].bssid,
+ pmksa->bssid))
+ break;
+
+ /* pmkid not found */
+ if (i == count) {
+ netdev_dbg(usbdev->net, "%s(): bssid not found (%pM)\n",
+ __func__, pmksa->bssid);
+ err = -ENOENT;
+ goto error;
+ }
+
+ for (; i + 1 < count; i++)
+ pmkids->bssid_info[i] = pmkids->bssid_info[i + 1];
+
+ count--;
+ newlen = sizeof(*pmkids) + count * sizeof(pmkids->bssid_info[0]);
+
+ pmkids->length = cpu_to_le32(newlen);
+ pmkids->bssid_info_count = cpu_to_le32(count);
+
+ return pmkids;
+error:
+ kfree(pmkids);
+ return ERR_PTR(err);
+}
+
+static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ struct cfg80211_pmksa *pmksa,
+ int max_pmkids)
+{
+ int i, err, len, count, newlen;
+
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ /* update with new pmkid */
+ for (i = 0; i < count; i++) {
+ if (compare_ether_addr(pmkids->bssid_info[i].bssid,
+ pmksa->bssid))
+ continue;
+
+ memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid,
+ WLAN_PMKID_LEN);
+
+ return pmkids;
+ }
+
+ /* out of space, return error */
+ if (i == max_pmkids) {
+ netdev_dbg(usbdev->net, "%s(): out of space\n", __func__);
+ err = -ENOSPC;
+ goto error;
+ }
+
+ /* add new pmkid */
+ newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
+
+ pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
+ if (!pmkids) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ pmkids->length = cpu_to_le32(newlen);
+ pmkids->bssid_info_count = cpu_to_le32(count + 1);
+
+ memcpy(pmkids->bssid_info[count].bssid, pmksa->bssid, ETH_ALEN);
+ memcpy(pmkids->bssid_info[count].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+
+ return pmkids;
+error:
+ kfree(pmkids);
+ return ERR_PTR(err);
+}
+
/*
* cfg80211 ops
*/
@@ -2053,7 +2291,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
return deauthenticate(usbdev);
}
-static int rndis_set_channel(struct wiphy *wiphy,
+static int rndis_set_channel(struct wiphy *wiphy, struct net_device *netdev,
struct ieee80211_channel *chan, enum nl80211_channel_type channel_type)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
@@ -2179,6 +2417,78 @@ static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid *pmkids;
+ u32 *tmp = (u32 *)pmksa->pmkid;
+
+ netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
+ pmksa->bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+
+ pmkids = get_device_pmkids(usbdev);
+ if (IS_ERR(pmkids)) {
+ /* couldn't read PMKID cache from device */
+ return PTR_ERR(pmkids);
+ }
+
+ pmkids = update_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
+ if (IS_ERR(pmkids)) {
+ /* not found, list full, etc */
+ return PTR_ERR(pmkids);
+ }
+
+ return set_device_pmkids(usbdev, pmkids);
+}
+
+static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid *pmkids;
+ u32 *tmp = (u32 *)pmksa->pmkid;
+
+ netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
+ pmksa->bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+
+ pmkids = get_device_pmkids(usbdev);
+ if (IS_ERR(pmkids)) {
+ /* Couldn't read PMKID cache from device */
+ return PTR_ERR(pmkids);
+ }
+
+ pmkids = remove_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
+ if (IS_ERR(pmkids)) {
+ /* not found, etc */
+ return PTR_ERR(pmkids);
+ }
+
+ return set_device_pmkids(usbdev, pmkids);
+}
+
+static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid pmkid;
+
+ netdev_dbg(usbdev->net, "%s()\n", __func__);
+
+ memset(&pmkid, 0, sizeof(pmkid));
+
+ pmkid.length = cpu_to_le32(sizeof(pmkid));
+ pmkid.bssid_info_count = cpu_to_le32(0);
+
+ return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
+}
+
/*
* workers, indication handlers, device poller
*/
@@ -2523,12 +2833,14 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
}
}
-static int rndis_wlan_get_caps(struct usbnet *usbdev)
+static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
{
struct {
__le32 num_items;
__le32 items[8];
} networks_supported;
+ struct ndis_80211_capability *caps;
+ u8 caps_buf[sizeof(*caps) + sizeof(caps->auth_encr_pair) * 16];
int len, retval, i, n;
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
@@ -2556,6 +2868,21 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev)
}
}
+ /* get device 802.11 capabilities, number of PMKIDs */
+ caps = (struct ndis_80211_capability *)caps_buf;
+ len = sizeof(caps_buf);
+ retval = rndis_query_oid(usbdev, OID_802_11_CAPABILITY, caps, &len);
+ if (retval >= 0) {
+ netdev_dbg(usbdev->net, "OID_802_11_CAPABILITY -> len %d, "
+ "ver %d, pmkids %d, auth-encr-pairs %d\n",
+ le32_to_cpu(caps->length),
+ le32_to_cpu(caps->version),
+ le32_to_cpu(caps->num_pmkids),
+ le32_to_cpu(caps->num_auth_encr_pair));
+ wiphy->max_num_pmkids = le32_to_cpu(caps->num_pmkids);
+ } else
+ wiphy->max_num_pmkids = 0;
+
return retval;
}
@@ -2803,7 +3130,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
wiphy->max_scan_ssids = 1;
/* TODO: fill-out band/encr information based on priv->caps */
- rndis_wlan_get_caps(usbdev);
+ rndis_wlan_get_caps(usbdev, wiphy);
memcpy(priv->channels, rndis_channels, sizeof(rndis_channels));
memcpy(priv->rates, rndis_rates, sizeof(rndis_rates));
@@ -2863,9 +3190,6 @@ static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
flush_workqueue(priv->workqueue);
destroy_workqueue(priv->workqueue);
- if (priv && priv->wpa_ie_len)
- kfree(priv->wpa_ie);
-
rndis_unbind(usbdev, intf);
wiphy_unregister(priv->wdev.wiphy);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 5239e082cd0f..eea1ef2f502b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -87,7 +87,7 @@ if RT2800PCI
config RT2800PCI_RT30XX
bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
- default n
+ default y
---help---
This adds support for rt30xx wireless chipset family to the
rt2800pci driver.
@@ -156,7 +156,7 @@ if RT2800USB
config RT2800USB_RT30XX
bool "rt2800usb - Include support for rt30xx (USB) devices"
- default n
+ default y
---help---
This adds support for rt30xx wireless chipset family to the
rt2800usb driver.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5f5204b82891..6126c0ab5880 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -526,6 +526,10 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
rt2x00pci_register_write(rt2x00dev, CSR20, reg);
+ } else {
+ rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
+ rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR20, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1014,8 +1018,8 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_desc_write(entry_priv->desc, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, skb->len);
- rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, txdesc->length);
+ rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, txdesc->length);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
@@ -1056,7 +1060,8 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
-static void rt2400pci_write_beacon(struct queue_entry *entry)
+static void rt2400pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1086,6 +1091,14 @@ static void rt2400pci_write_beacon(struct queue_entry *entry)
rt2x00_desc_read(entry_priv->desc, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 1);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -1093,17 +1106,6 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
- rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2a73f593aab0..2e4f461406ae 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -574,6 +574,10 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
rt2x00pci_register_write(rt2x00dev, CSR20, reg);
+ } else {
+ rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
+ rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR20, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1205,7 +1209,7 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
rt2x00_desc_write(txd, 0, word);
}
@@ -1213,7 +1217,8 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
-static void rt2500pci_write_beacon(struct queue_entry *entry)
+static void rt2500pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1243,6 +1248,14 @@ static void rt2500pci_write_beacon(struct queue_entry *entry)
rt2x00_desc_read(entry_priv->desc, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 1);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -1250,17 +1263,6 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
- rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 8ebb705fe106..e88d7033fbc9 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -649,6 +649,10 @@ static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
+ } else {
+ rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
+ rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0);
+ rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1068,7 +1072,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
rt2x00_desc_write(txd, 0, word);
@@ -1079,7 +1083,8 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
*/
static void rt2500usb_beacondone(struct urb *urb);
-static void rt2500usb_write_beacon(struct queue_entry *entry)
+static void rt2500usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -1087,7 +1092,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint);
int length;
- u16 reg;
+ u16 reg, reg0;
/*
* Add the descriptor in front of the skb.
@@ -1129,6 +1134,26 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
* Send out the guardian byte.
*/
usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
+ rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
+ reg0 = reg;
+ rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
+ /*
+ * Beacon generation will fail initially.
+ * To prevent this we need to change the TXRX_CSR19
+ * register several times (reg0 is the same as reg
+ * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0
+ * and 1 in reg).
+ */
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
}
static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
@@ -1145,37 +1170,6 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u16 reg, reg0;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
- if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) {
- rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
- rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
- reg0 = reg;
- rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
- /*
- * Beacon generation will fail initially.
- * To prevent this we need to change the TXRX_CSR19
- * register several times (reg0 is the same as reg
- * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0
- * and 1 in reg).
- */
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- }
-}
-
/*
* RX control handlers
*/
@@ -1210,11 +1204,9 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER);
- if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR))
- rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER);
+ if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR))
+ rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
@@ -1644,11 +1636,6 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
unsigned int i;
/*
- * Disable powersaving as default.
- */
- rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
@@ -1781,7 +1768,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt2500usb_write_beacon,
.get_tx_data_len = rt2500usb_get_tx_data_len,
- .kick_tx_queue = rt2500usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt2500usb_fill_rxdone,
.config_shared_key = rt2500usb_config_key,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 74c0433dba37..2aa03751c341 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -56,15 +56,20 @@
#define RF3021 0x0007
#define RF3022 0x0008
#define RF3052 0x0009
+#define RF3320 0x000b
/*
- * Chipset version.
+ * Chipset revisions.
*/
-#define RT2860C_VERSION 0x0100
-#define RT2860D_VERSION 0x0101
-#define RT2880E_VERSION 0x0200
-#define RT2883_VERSION 0x0300
-#define RT3070_VERSION 0x0200
+#define REV_RT2860C 0x0100
+#define REV_RT2860D 0x0101
+#define REV_RT2870D 0x0101
+#define REV_RT2872E 0x0200
+#define REV_RT3070E 0x0200
+#define REV_RT3070F 0x0201
+#define REV_RT3071E 0x0211
+#define REV_RT3090E 0x0211
+#define REV_RT3390E 0x0211
/*
* Signal information.
@@ -90,13 +95,19 @@
#define NUM_TX_QUEUES 4
/*
- * USB registers.
+ * Registers.
*/
/*
+ * OPT_14: Unknown register used by rt3xxx devices.
+ */
+#define OPT_14_CSR 0x0114
+#define OPT_14_CSR_BIT0 FIELD32(0x00000001)
+
+/*
* INT_SOURCE_CSR: Interrupt source register.
* Write one to clear corresponding bit.
- * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
+ * TX_FIFO_STATUS: FIFO Statistics is full, sw should read TX_STA_FIFO
*/
#define INT_SOURCE_CSR 0x0200
#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
@@ -398,6 +409,31 @@
#define EFUSE_DATA3 0x059c
/*
+ * LDO_CFG0
+ */
+#define LDO_CFG0 0x05d4
+#define LDO_CFG0_DELAY3 FIELD32(0x000000ff)
+#define LDO_CFG0_DELAY2 FIELD32(0x0000ff00)
+#define LDO_CFG0_DELAY1 FIELD32(0x00ff0000)
+#define LDO_CFG0_BGSEL FIELD32(0x03000000)
+#define LDO_CFG0_LDO_CORE_VLEVEL FIELD32(0x1c000000)
+#define LD0_CFG0_LDO25_LEVEL FIELD32(0x60000000)
+#define LDO_CFG0_LDO25_LARGEA FIELD32(0x80000000)
+
+/*
+ * GPIO_SWITCH
+ */
+#define GPIO_SWITCH 0x05dc
+#define GPIO_SWITCH_0 FIELD32(0x00000001)
+#define GPIO_SWITCH_1 FIELD32(0x00000002)
+#define GPIO_SWITCH_2 FIELD32(0x00000004)
+#define GPIO_SWITCH_3 FIELD32(0x00000008)
+#define GPIO_SWITCH_4 FIELD32(0x00000010)
+#define GPIO_SWITCH_5 FIELD32(0x00000020)
+#define GPIO_SWITCH_6 FIELD32(0x00000040)
+#define GPIO_SWITCH_7 FIELD32(0x00000080)
+
+/*
* MAC Control/Status Registers(CSR).
* Some values are set in TU, whereas 1 TU == 1024 us.
*/
@@ -809,7 +845,7 @@
* TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
*/
#define TX_BAND_CFG 0x132c
-#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
+#define TX_BAND_CFG_HT40_MINUS FIELD32(0x00000001)
#define TX_BAND_CFG_A FIELD32(0x00000002)
#define TX_BAND_CFG_BG FIELD32(0x00000004)
@@ -1483,7 +1519,7 @@ struct mac_iveiv_entry {
* BBP 3: RX Antenna
*/
#define BBP3_RX_ANTENNA FIELD8(0x18)
-#define BBP3_HT40_PLUS FIELD8(0x20)
+#define BBP3_HT40_MINUS FIELD8(0x20)
/*
* BBP 4: Bandwidth
@@ -1492,14 +1528,32 @@ struct mac_iveiv_entry {
#define BBP4_BANDWIDTH FIELD8(0x18)
/*
+ * BBP 138: Unknown
+ */
+#define BBP138_RX_ADC1 FIELD8(0x02)
+#define BBP138_RX_ADC2 FIELD8(0x04)
+#define BBP138_TX_DAC1 FIELD8(0x20)
+#define BBP138_TX_DAC2 FIELD8(0x40)
+
+/*
* RFCSR registers
* The wordsize of the RFCSR is 8 bits.
*/
/*
+ * RFCSR 1:
+ */
+#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
+#define RFCSR1_RX0_PD FIELD8(0x04)
+#define RFCSR1_TX0_PD FIELD8(0x08)
+#define RFCSR1_RX1_PD FIELD8(0x10)
+#define RFCSR1_TX1_PD FIELD8(0x20)
+
+/*
* RFCSR 6:
*/
-#define RFCSR6_R FIELD8(0x03)
+#define RFCSR6_R1 FIELD8(0x03)
+#define RFCSR6_R2 FIELD8(0x40)
/*
* RFCSR 7:
@@ -1512,6 +1566,33 @@ struct mac_iveiv_entry {
#define RFCSR12_TX_POWER FIELD8(0x1f)
/*
+ * RFCSR 13:
+ */
+#define RFCSR13_TX_POWER FIELD8(0x1f)
+
+/*
+ * RFCSR 15:
+ */
+#define RFCSR15_TX_LO2_EN FIELD8(0x08)
+
+/*
+ * RFCSR 17:
+ */
+#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
+#define RFCSR17_TX_LO1_EN FIELD8(0x08)
+#define RFCSR17_R FIELD8(0x20)
+
+/*
+ * RFCSR 20:
+ */
+#define RFCSR20_RX_LO1_EN FIELD8(0x08)
+
+/*
+ * RFCSR 21:
+ */
+#define RFCSR21_RX_LO2_EN FIELD8(0x08)
+
+/*
* RFCSR 22:
*/
#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
@@ -1522,6 +1603,14 @@ struct mac_iveiv_entry {
#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
/*
+ * RFCSR 27:
+ */
+#define RFCSR27_R1 FIELD8(0x03)
+#define RFCSR27_R2 FIELD8(0x04)
+#define RFCSR27_R3 FIELD8(0x30)
+#define RFCSR27_R4 FIELD8(0x40)
+
+/*
* RFCSR 30:
*/
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
@@ -1603,6 +1692,8 @@ struct mac_iveiv_entry {
#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
+#define EEPROM_NIC_ANT_DIVERSITY FIELD16(0x0800)
+#define EEPROM_NIC_DAC_TEST FIELD16(0x8000)
/*
* EEPROM frequency
@@ -1659,6 +1750,12 @@ struct mac_iveiv_entry {
#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
/*
+ * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
+ */
+#define EEPROM_TXMIXER_GAIN_BG 0x0024
+#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007)
+
+/*
* EEPROM RSSI A offset
*/
#define EEPROM_RSSI_A 0x0025
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index c015ce9fdd09..db4250d1c8b3 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -41,9 +41,6 @@
#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
#include "rt2x00usb.h"
#endif
-#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE)
-#include "rt2x00pci.h"
-#endif
#include "rt2800lib.h"
#include "rt2800.h"
#include "rt2800usb.h"
@@ -76,6 +73,23 @@ MODULE_LICENSE("GPL");
rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
H2M_MAILBOX_CSR_OWNER, (__reg))
+static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
+{
+ /* check for rt2872 on SoC */
+ if (!rt2x00_is_soc(rt2x00dev) ||
+ !rt2x00_rt(rt2x00dev, RT2872))
+ return false;
+
+ /* we know for sure that these rf chipsets are used on rt305x boards */
+ if (rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022))
+ return true;
+
+ NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n");
+ return false;
+}
+
static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u8 value)
{
@@ -268,6 +282,104 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
+void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc)
+{
+ __le32 *txwi = (__le32 *)(skb->data - TXWI_DESC_SIZE);
+ u32 word;
+
+ /*
+ * Initialize TX Info descriptor
+ */
+ rt2x00_desc_read(txwi, 0, &word);
+ rt2x00_set_field32(&word, TXWI_W0_FRAG,
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
+ rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
+ rt2x00_set_field32(&word, TXWI_W0_TS,
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_AMPDU,
+ test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
+ rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop);
+ rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
+ rt2x00_set_field32(&word, TXWI_W0_BW,
+ test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
+ test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
+ rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
+ rt2x00_desc_write(txwi, 0, word);
+
+ rt2x00_desc_read(txwi, 1, &word);
+ rt2x00_set_field32(&word, TXWI_W1_ACK,
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W1_NSEQ,
+ test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
+ rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
+ test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
+ txdesc->key_idx : 0xff);
+ rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
+ txdesc->length);
+ rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->queue + 1);
+ rt2x00_desc_write(txwi, 1, word);
+
+ /*
+ * Always write 0 to IV/EIV fields, hardware will insert the IV
+ * from the IVEIV register when TXD_W3_WIV is set to 0.
+ * When TXD_W3_WIV is set to 1 it will use the IV data
+ * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
+ * crypto entry in the registers should be used to encrypt the frame.
+ */
+ _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
+ _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
+}
+EXPORT_SYMBOL_GPL(rt2800_write_txwi);
+
+void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc)
+{
+ __le32 *rxwi = (__le32 *) skb->data;
+ u32 word;
+
+ rt2x00_desc_read(rxwi, 0, &word);
+
+ rxdesc->cipher = rt2x00_get_field32(word, RXWI_W0_UDF);
+ rxdesc->size = rt2x00_get_field32(word, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+
+ rt2x00_desc_read(rxwi, 1, &word);
+
+ if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI))
+ rxdesc->flags |= RX_FLAG_SHORT_GI;
+
+ if (rt2x00_get_field32(word, RXWI_W1_BW))
+ rxdesc->flags |= RX_FLAG_40MHZ;
+
+ /*
+ * Detect RX rate, always use MCS as signal type.
+ */
+ rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
+ rxdesc->signal = rt2x00_get_field32(word, RXWI_W1_MCS);
+ rxdesc->rate_mode = rt2x00_get_field32(word, RXWI_W1_PHYMODE);
+
+ /*
+ * Mask of 0x8 bit to remove the short preamble flag.
+ */
+ if (rxdesc->rate_mode == RATE_MODE_CCK)
+ rxdesc->signal &= ~0x8;
+
+ rt2x00_desc_read(rxwi, 2, &word);
+
+ rxdesc->rssi =
+ (rt2x00_get_field32(word, RXWI_W2_RSSI0) +
+ rt2x00_get_field32(word, RXWI_W2_RSSI1)) / 2;
+
+ /*
+ * Remove RXWI descriptor from start of buffer.
+ */
+ skb_pull(skb, RXWI_DESC_SIZE);
+}
+EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -360,11 +472,6 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
- rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
- rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
return 0;
@@ -610,10 +717,6 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
{
u32 reg;
- rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
- rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
- rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
-
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
!!erp->short_preamble);
@@ -632,15 +735,10 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
- rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
@@ -718,10 +816,10 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
rt2x00dev->lna_gain = lna_gain;
}
-static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_conf *conf,
- struct rf_channel *rf,
- struct channel_info *info)
+static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
{
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
@@ -787,10 +885,10 @@ static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
rt2800_rf_write(rt2x00dev, 4, rf->rf4);
}
-static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_conf *conf,
- struct rf_channel *rf,
- struct channel_info *info)
+static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
{
u8 rfcsr;
@@ -798,7 +896,7 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
@@ -806,6 +904,11 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
TXPOWER_G_TO_DEV(info->tx_power1));
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+ rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
+ TXPOWER_G_TO_DEV(info->tx_power2));
+ rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
+
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -827,15 +930,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp;
- if ((rt2x00_rt(rt2x00dev, RT3070) ||
- rt2x00_rt(rt2x00dev, RT3090)) &&
- (rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022)))
- rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
+ if (rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022))
+ rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
else
- rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
+ rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
/*
* Change BBP settings
@@ -863,7 +964,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
}
rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg);
- rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
+ rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_MINUS, conf_is_ht40_minus(conf));
rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
@@ -896,11 +997,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 4, bbp);
rt2800_bbp_read(rt2x00dev, 3, &bbp);
- rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
+ rt2x00_set_field8(&bbp, BBP3_HT40_MINUS, conf_is_ht40_minus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
if (conf_is_ht40(conf)) {
rt2800_bbp_write(rt2x00dev, 69, 0x1a);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -988,10 +1088,6 @@ static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
libconf->conf->short_frame_max_tx_count);
rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
libconf->conf->long_frame_max_tx_count);
- rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
- rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
- rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
- rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
}
@@ -1015,13 +1111,13 @@ static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
} else {
- rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
-
rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+
+ rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
}
}
@@ -1062,9 +1158,10 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION))
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1095,8 +1192,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION))
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
/*
@@ -1114,8 +1210,17 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
+ u16 eeprom;
unsigned int i;
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+ rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
if (rt2x00_is_usb(rt2x00dev)) {
/*
* Wait until BBP and RF are ready.
@@ -1135,8 +1240,25 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
reg & ~0x00002000);
- } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
+ } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
+ /*
+ * Reset DMA indexes
+ */
+ rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+ rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+ }
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
@@ -1181,12 +1303,42 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
+ rt2800_config_filter(rt2x00dev, FIF_ALLMULTI);
+
+ rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
+ rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, 9);
+ rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
+ rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
+
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
- rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000002c);
+ else
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000000f);
+ } else {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ }
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3070)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c);
+ } else {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ }
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -1205,19 +1357,15 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
+ rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 32);
rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
- if ((rt2x00_rt(rt2x00dev, RT2872) &&
- (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) ||
- rt2x00_rt(rt2x00dev, RT2880) ||
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
rt2x00_rt(rt2x00dev, RT2883) ||
- rt2x00_rt(rt2x00dev, RT2890) ||
- rt2x00_rt(rt2x00dev, RT3052) ||
- (rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) < RT3070_VERSION)))
+ rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E))
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
else
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1225,38 +1373,61 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
+ rt2800_register_read(rt2x00dev, LED_CFG, &reg);
+ rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, 70);
+ rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, 30);
+ rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
+ rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
+ rt2800_register_write(rt2x00dev, LED_CFG, reg);
+
rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
+ rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 15);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 31);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
+ rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
+
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 1);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
@@ -1269,11 +1440,13 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
- rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL,
+ !rt2x00_is_usb(rt2x00dev));
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
@@ -1281,6 +1454,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
@@ -1293,6 +1467,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
@@ -1305,6 +1480,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
if (rt2x00_is_usb(rt2x00dev)) {
@@ -1334,6 +1510,22 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
+
+ /*
+ * Usually the CCK SIFS time should be set to 10 and the OFDM SIFS
+ * time should be set to 16. However, the original Ralink driver uses
+ * 16 for both and indeed using a value of 10 for CCK SIFS results in
+ * connection problems with 11g + CTS protection. Hence, use the same
+ * defaults as the Ralink driver: 16 for both, CCK and OFDM SIFS.
+ */
+ rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, 16);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, 16);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, 314);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
+ rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
+
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
/*
@@ -1481,45 +1673,79 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
+ if (rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
- rt2800_bbp_write(rt2x00dev, 69, 0x12);
+
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
+ rt2800_bbp_write(rt2x00dev, 69, 0x16);
+ rt2800_bbp_write(rt2x00dev, 73, 0x12);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+ }
+
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_bbp_write(rt2x00dev, 73, 0x10);
- rt2800_bbp_write(rt2x00dev, 81, 0x37);
+
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+ } else if (rt2800_is_305x_soc(rt2x00dev)) {
+ rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+ rt2800_bbp_write(rt2x00dev, 80, 0x08);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 81, 0x37);
+ }
+
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
- rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) ||
+ rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D))
+ rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ else
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
- rt2800_bbp_write(rt2x00dev, 103, 0x00);
- rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
- rt2800_bbp_write(rt2x00dev, 69, 0x16);
- rt2800_bbp_write(rt2x00dev, 73, 0x12);
- }
-
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) > RT2860D_VERSION))
- rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+ else
+ rt2800_bbp_write(rt2x00dev, 103, 0x00);
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
- rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_bbp_write(rt2x00dev, 84, 0x99);
+ if (rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 105, 0x01);
+ else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- }
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
- if (rt2x00_rt(rt2x00dev, RT3052)) {
- rt2800_bbp_write(rt2x00dev, 31, 0x08);
- rt2800_bbp_write(rt2x00dev, 78, 0x0e);
- rt2800_bbp_write(rt2x00dev, 80, 0x08);
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_bbp_read(rt2x00dev, 138, &value);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
+ value |= 0x20;
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
+ value &= ~0x02;
+
+ rt2800_bbp_write(rt2x00dev, 138, value);
}
+
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -1598,19 +1824,16 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
u8 rfcsr;
u8 bbp;
+ u32 reg;
+ u16 eeprom;
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) != RT3070_VERSION))
+ if (!rt2x00_rt(rt2x00dev, RT3070) &&
+ !rt2x00_rt(rt2x00dev, RT3071) &&
+ !rt2x00_rt(rt2x00dev, RT3090) &&
+ !rt2x00_rt(rt2x00dev, RT3390) &&
+ !rt2800_is_305x_soc(rt2x00dev))
return 0;
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
- if (!rt2x00_rf(rt2x00dev, RF3020) &&
- !rt2x00_rf(rt2x00dev, RF3021) &&
- !rt2x00_rf(rt2x00dev, RF3022))
- return 0;
- }
-
/*
* Init RF calibration.
*/
@@ -1621,13 +1844,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- if (rt2x00_is_usb(rt2x00dev)) {
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x71);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
@@ -1640,9 +1865,41 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
- } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
+ } else if (rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x62);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x8b);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x34);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x61);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x3b);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x94);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x5c);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xb2);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xf6);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x14);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x41);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
+ } else if (rt2800_is_305x_soc(rt2x00dev)) {
rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
@@ -1673,15 +1930,57 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
+ return 0;
+ }
+
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
+
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ else
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
+ }
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
}
/*
* Set RX Filter calibration for 20MHz and 40MHz
*/
- rt2x00dev->calibration[0] =
- rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
- rt2x00dev->calibration[1] =
- rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+ if (rt2x00_rt(rt2x00dev, RT3070)) {
+ rt2x00dev->calibration[0] =
+ rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
+ rt2x00dev->calibration[1] =
+ rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+ } else if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00dev->calibration[0] =
+ rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
+ rt2x00dev->calibration[1] =
+ rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
+ }
/*
* Set back to initial state
@@ -1699,6 +1998,81 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
rt2800_bbp_write(rt2x00dev, 4, bbp);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+
+ rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
+ rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
+ rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+ rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
+ }
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
+ rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+ rt2x00_get_field16(eeprom,
+ EEPROM_TXMIXER_GAIN_BG_VAL));
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ if (rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_bbp_read(rt2x00dev, 138, &bbp);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
+ rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
+ rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
+
+ rt2800_bbp_write(rt2x00dev, 138, bbp);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
+ rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
+ rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0);
+ rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
@@ -1774,10 +2148,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
} else if (rt2x00_rt(rt2x00dev, RT2860) ||
rt2x00_rt(rt2x00dev, RT2870) ||
- rt2x00_rt(rt2x00dev, RT2872) ||
- rt2x00_rt(rt2x00dev, RT2880) ||
- (rt2x00_rt(rt2x00dev, RT2883) &&
- (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) {
+ rt2x00_rt(rt2x00dev, RT2872)) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
@@ -1882,10 +2253,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_rt(rt2x00dev, RT2860) &&
!rt2x00_rt(rt2x00dev, RT2870) &&
!rt2x00_rt(rt2x00dev, RT2872) &&
- !rt2x00_rt(rt2x00dev, RT2880) &&
!rt2x00_rt(rt2x00dev, RT2883) &&
- !rt2x00_rt(rt2x00dev, RT2890) &&
- !rt2x00_rt(rt2x00dev, RT3052) &&
!rt2x00_rt(rt2x00dev, RT3070) &&
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
@@ -1954,7 +2322,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
/*
- * RF value list for rt28x0
+ * RF value list for rt28xx
* Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
*/
static const struct rf_channel rf_vals[] = {
@@ -2029,10 +2397,10 @@ static const struct rf_channel rf_vals[] = {
};
/*
- * RF value list for rt3070
- * Supports: 2.4 GHz
+ * RF value list for rt3xxx
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
*/
-static const struct rf_channel rf_vals_302x[] = {
+static const struct rf_channel rf_vals_3x[] = {
{1, 241, 2, 2 },
{2, 241, 2, 7 },
{3, 242, 2, 2 },
@@ -2047,6 +2415,51 @@ static const struct rf_channel rf_vals_302x[] = {
{12, 246, 2, 7 },
{13, 247, 2, 2 },
{14, 248, 2, 4 },
+
+ /* 802.11 UNI / HyperLan 2 */
+ {36, 0x56, 0, 4},
+ {38, 0x56, 0, 6},
+ {40, 0x56, 0, 8},
+ {44, 0x57, 0, 0},
+ {46, 0x57, 0, 2},
+ {48, 0x57, 0, 4},
+ {52, 0x57, 0, 8},
+ {54, 0x57, 0, 10},
+ {56, 0x58, 0, 0},
+ {60, 0x58, 0, 4},
+ {62, 0x58, 0, 6},
+ {64, 0x58, 0, 8},
+
+ /* 802.11 HyperLan 2 */
+ {100, 0x5b, 0, 8},
+ {102, 0x5b, 0, 10},
+ {104, 0x5c, 0, 0},
+ {108, 0x5c, 0, 4},
+ {110, 0x5c, 0, 6},
+ {112, 0x5c, 0, 8},
+ {116, 0x5d, 0, 0},
+ {118, 0x5d, 0, 2},
+ {120, 0x5d, 0, 4},
+ {124, 0x5d, 0, 8},
+ {126, 0x5d, 0, 10},
+ {128, 0x5e, 0, 0},
+ {132, 0x5e, 0, 4},
+ {134, 0x5e, 0, 6},
+ {136, 0x5e, 0, 8},
+ {140, 0x5f, 0, 0},
+
+ /* 802.11 UNII */
+ {149, 0x5f, 0, 9},
+ {151, 0x5f, 0, 11},
+ {153, 0x60, 0, 1},
+ {157, 0x60, 0, 5},
+ {159, 0x60, 0, 7},
+ {161, 0x60, 0, 9},
+ {165, 0x61, 0, 1},
+ {167, 0x61, 0, 3},
+ {169, 0x61, 0, 5},
+ {171, 0x61, 0, 7},
+ {173, 0x61, 0, 9},
};
int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
@@ -2087,11 +2500,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
if (rt2x00_rf(rt2x00dev, RF2820) ||
- rt2x00_rf(rt2x00dev, RF2720) ||
- rt2x00_rf(rt2x00dev, RF3052)) {
+ rt2x00_rf(rt2x00dev, RF2720)) {
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2850) ||
+ rt2x00_rf(rt2x00dev, RF2750)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
@@ -2099,8 +2512,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022)) {
- spec->num_channels = ARRAY_SIZE(rf_vals_302x);
- spec->channels = rf_vals_302x;
+ spec->num_channels = 14;
+ spec->channels = rf_vals_3x;
+ } else if (rt2x00_rf(rt2x00dev, RF3052)) {
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ spec->num_channels = ARRAY_SIZE(rf_vals_3x);
+ spec->channels = rf_vals_3x;
}
/*
@@ -2111,8 +2528,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
else
spec->ht.ht_supported = false;
+ /*
+ * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes
+ * reception problems with HT40 capable 11n APs
+ */
spec->ht.cap =
- IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index ebabeae62d1b..94de999e2290 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -111,6 +111,9 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1);
+void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc);
+void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *txdesc);
+
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 91cce2d0f6db..7d4778d66e77 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -60,6 +60,12 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
unsigned int i;
u32 reg;
+ /*
+ * SOC devices don't support MCU requests.
+ */
+ if (rt2x00_is_soc(rt2x00dev))
+ return;
+
for (i = 0; i < 200; i++) {
rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
@@ -341,19 +347,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
struct queue_entry_priv_pci *entry_priv;
u32 reg;
- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
/*
* Initialize registers.
*/
@@ -620,64 +613,30 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
+static int rt2800pci_write_tx_data(struct queue_entry* entry,
+ struct txentry_desc *txdesc)
+{
+ int ret;
+
+ ret = rt2x00pci_write_tx_data(entry, txdesc);
+ if (ret)
+ return ret;
+
+ rt2800_write_txwi(entry->skb, txdesc);
+
+ return 0;
+}
+
+
static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txd = skbdesc->desc;
- __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->ops->extra_tx_headroom);
u32 word;
/*
- * Initialize TX Info descriptor
- */
- rt2x00_desc_read(txwi, 0, &word);
- rt2x00_set_field32(&word, TXWI_W0_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
- rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
- rt2x00_set_field32(&word, TXWI_W0_TS,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_AMPDU,
- test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
- rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
- rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
- rt2x00_set_field32(&word, TXWI_W0_BW,
- test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
- test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
- rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
- rt2x00_desc_write(txwi, 0, word);
-
- rt2x00_desc_read(txwi, 1, &word);
- rt2x00_set_field32(&word, TXWI_W1_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_NSEQ,
- test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
- rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
- test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
- txdesc->key_idx : 0xff);
- rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
- skb->len - txdesc->l2pad);
- rt2x00_set_field32(&word, TXWI_W1_PACKETID,
- skbdesc->entry->queue->qid + 1);
- rt2x00_desc_write(txwi, 1, word);
-
- /*
- * Always write 0 to IV/EIV fields, hardware will insert the IV
- * from the IVEIV register when TXD_W3_WIV is set to 0.
- * When TXD_W3_WIV is set to 1 it will use the IV data
- * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
- * crypto entry in the registers should be used to encrypt the frame.
- */
- _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
- _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
-
- /*
* The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
* must contains a TXWI structure + 802.11 header + padding + 802.11
* data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
@@ -719,10 +678,10 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
-static void rt2800pci_write_beacon(struct queue_entry *entry)
+static void rt2800pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
u32 reg;
@@ -735,15 +694,25 @@ static void rt2800pci_write_beacon(struct queue_entry *entry)
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
- * Write entire beacon with descriptor to register.
+ * Add the TXWI for the beacon to the skb.
+ */
+ rt2800_write_txwi(entry->skb, txdesc);
+ skb_push(entry->skb, TXWI_DESC_SIZE);
+
+ /*
+ * Write entire beacon with TXWI to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2800_register_multiwrite(rt2x00dev,
- beacon_base,
- skbdesc->desc, skbdesc->desc_len);
- rt2800_register_multiwrite(rt2x00dev,
- beacon_base + skbdesc->desc_len,
- entry->skb->data, entry->skb->len);
+ rt2800_register_multiwrite(rt2x00dev, beacon_base,
+ entry->skb->data, entry->skb->len);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
* Clean up beacon skb.
@@ -757,18 +726,6 @@ static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
struct data_queue *queue;
unsigned int idx, qidx = 0;
- u32 reg;
-
- if (queue_idx == QID_BEACON) {
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
- return;
- }
if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
return;
@@ -811,34 +768,21 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *rxd = entry_priv->desc;
- __le32 *rxwi = (__le32 *)entry->skb->data;
- u32 rxd3;
- u32 rxwi0;
- u32 rxwi1;
- u32 rxwi2;
- u32 rxwi3;
-
- rt2x00_desc_read(rxd, 3, &rxd3);
- rt2x00_desc_read(rxwi, 0, &rxwi0);
- rt2x00_desc_read(rxwi, 1, &rxwi1);
- rt2x00_desc_read(rxwi, 2, &rxwi2);
- rt2x00_desc_read(rxwi, 3, &rxwi3);
-
- if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
+ u32 word;
+
+ rt2x00_desc_read(rxd, 3, &word);
+
+ if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- /*
- * Unfortunately we don't know the cipher type used during
- * decryption. This prevents us from correct providing
- * correct statistics through debugfs.
- */
- rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
- rxdesc->cipher_status =
- rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
- }
+ /*
+ * Unfortunately we don't know the cipher type used during
+ * decryption. This prevents us from correct providing
+ * correct statistics through debugfs.
+ */
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
- if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) {
+ if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -853,51 +797,22 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
+ if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
+ if (rt2x00_get_field32(word, RXD_W3_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
- rxdesc->flags |= RX_FLAG_SHORT_GI;
-
- if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
- rxdesc->flags |= RX_FLAG_40MHZ;
-
- /*
- * Detect RX rate, always use MCS as signal type.
- */
- rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
- rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
- rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
-
/*
- * Mask of 0x8 bit to remove the short preamble flag.
+ * Process the RXWI structure that is at the start of the buffer.
*/
- if (rxdesc->rate_mode == RATE_MODE_CCK)
- rxdesc->signal &= ~0x8;
-
- rxdesc->rssi =
- (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
- rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
-
- rxdesc->noise =
- (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
- rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
-
- rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+ rt2800_process_rxwi(entry->skb, rxdesc);
/*
* Set RX IDX in register to inform hardware that we have handled
* this entry and it is available for reuse again.
*/
rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
-
- /*
- * Remove TXWI descriptor from start of buffer.
- */
- skb_pull(entry->skb, RXWI_DESC_SIZE);
}
/*
@@ -907,14 +822,12 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
struct queue_entry *entry;
- struct queue_entry *entry_done;
- struct queue_entry_priv_pci *entry_priv;
+ __le32 *txwi;
struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
u32 old_reg;
- unsigned int type;
- unsigned int index;
+ int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
u16 mcs, real_mcs;
/*
@@ -936,76 +849,89 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
old_reg = reg;
+ wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
+ ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
+ pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+
/*
* Skip this entry when it contains an invalid
* queue identication number.
*/
- type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
- if (type >= QID_RX)
+ if (pid <= 0 || pid > QID_RX)
continue;
- queue = rt2x00queue_get_queue(rt2x00dev, type);
+ queue = rt2x00queue_get_queue(rt2x00dev, pid - 1);
if (unlikely(!queue))
continue;
/*
- * Skip this entry when it contains an invalid
- * index number.
+ * Inside each queue, we process each entry in a chronological
+ * order. We first check that the queue is not empty.
*/
- index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1;
- if (unlikely(index >= queue->limit))
+ if (rt2x00queue_empty(queue))
continue;
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- entry = &queue->entries[index];
- entry_priv = entry->priv_data;
- rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word);
+ /* Check if we got a match by looking at WCID/ACK/PID
+ * fields */
+ txwi = (__le32 *)(entry->skb->data -
+ rt2x00dev->ops->extra_tx_headroom);
- entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- while (entry != entry_done) {
- /*
- * Catch up.
- * Just report any entries we missed as failed.
- */
- WARNING(rt2x00dev,
- "TX status report missed for entry %d\n",
- entry_done->entry_idx);
+ rt2x00_desc_read(txwi, 1, &word);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+ tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
+ tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
- txdesc.flags = 0;
- __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
- txdesc.retry = 0;
-
- rt2x00lib_txdone(entry_done, &txdesc);
- entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- }
+ if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid))
+ WARNING(rt2x00dev, "invalid TX_STA_FIFO content\n");
/*
* Obtain the status about this packet.
*/
txdesc.flags = 0;
- if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS))
- __set_bit(TXDONE_SUCCESS, &txdesc.flags);
- else
- __set_bit(TXDONE_FAILURE, &txdesc.flags);
+ rt2x00_desc_read(txwi, 0, &word);
+ mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
+ real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
/*
* Ralink has a retry mechanism using a global fallback
- * table. We setup this fallback table to try immediate
- * lower rate for all rates. In the TX_STA_FIFO,
- * the MCS field contains the MCS used for the successfull
- * transmission. If the first transmission succeed,
- * we have mcs == tx_mcs. On the second transmission,
- * we have mcs = tx_mcs - 1. So the number of
- * retry is (tx_mcs - mcs).
+ * table. We setup this fallback table to try the immediate
+ * lower rate for all rates. In the TX_STA_FIFO, the MCS field
+ * always contains the MCS used for the last transmission, be
+ * it successful or not.
*/
- mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
- real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
+ if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
+ /*
+ * Transmission succeeded. The number of retries is
+ * mcs - real_mcs
+ */
+ __set_bit(TXDONE_SUCCESS, &txdesc.flags);
+ txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
+ } else {
+ /*
+ * Transmission failed. The number of retries is
+ * always 7 in this case (for a total number of 8
+ * frames sent).
+ */
+ __set_bit(TXDONE_FAILURE, &txdesc.flags);
+ txdesc.retry = 7;
+ }
+
__set_bit(TXDONE_FALLBACK, &txdesc.flags);
- txdesc.retry = mcs - min(mcs, real_mcs);
+
rt2x00lib_txdone(entry, &txdesc);
}
}
+static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -1030,6 +956,9 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
rt2800pci_txdone(rt2x00dev);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ rt2800pci_wakeup(rt2x00dev);
+
return IRQ_HANDLED;
}
@@ -1128,7 +1057,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
.write_tx_desc = rt2800pci_write_tx_desc,
- .write_tx_data = rt2x00pci_write_tx_data,
+ .write_tx_data = rt2800pci_write_tx_data,
.write_beacon = rt2800pci_write_beacon,
.kick_tx_queue = rt2800pci_kick_tx_queue,
.kill_tx_queue = rt2800pci_kill_tx_queue,
@@ -1184,6 +1113,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
+#ifdef CONFIG_RT2800PCI_PCI
static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1208,9 +1138,11 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
{ 0, }
};
+#endif /* CONFIG_RT2800PCI_PCI */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index d27d7d5d850c..8ad0669a1b99 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -401,59 +401,15 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txi = skbdesc->desc;
- __le32 *txwi = &txi[TXINFO_DESC_SIZE / sizeof(__le32)];
u32 word;
/*
- * Initialize TX Info descriptor
+ * Initialize TXWI descriptor
*/
- rt2x00_desc_read(txwi, 0, &word);
- rt2x00_set_field32(&word, TXWI_W0_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
- rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
- rt2x00_set_field32(&word, TXWI_W0_TS,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_AMPDU,
- test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
- rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
- rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
- rt2x00_set_field32(&word, TXWI_W0_BW,
- test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
- test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
- rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
- rt2x00_desc_write(txwi, 0, word);
-
- rt2x00_desc_read(txwi, 1, &word);
- rt2x00_set_field32(&word, TXWI_W1_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_NSEQ,
- test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
- rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
- test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
- txdesc->key_idx : 0xff);
- rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
- skb->len - txdesc->l2pad);
- rt2x00_set_field32(&word, TXWI_W1_PACKETID,
- skbdesc->entry->queue->qid + 1);
- rt2x00_desc_write(txwi, 1, word);
+ rt2800_write_txwi(skb, txdesc);
/*
- * Always write 0 to IV/EIV fields, hardware will insert the IV
- * from the IVEIV register when TXINFO_W0_WIV is set to 0.
- * When TXINFO_W0_WIV is set to 1 it will use the IV data
- * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
- * crypto entry in the registers should be used to encrypt the frame.
- */
- _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
- _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
-
- /*
- * Initialize TX descriptor
+ * Initialize TXINFO descriptor
*/
rt2x00_desc_read(txi, 0, &word);
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
@@ -471,21 +427,14 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
-static void rt2800usb_write_beacon(struct queue_entry *entry)
+static void rt2800usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
u32 reg;
/*
- * Add the descriptor in front of the skb.
- */
- skb_push(entry->skb, entry->queue->desc_size);
- memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry->skb->data;
-
- /*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
@@ -494,6 +443,12 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
+ * Add the TXWI for the beacon to the skb.
+ */
+ rt2800_write_txwi(entry->skb, txdesc);
+ skb_push(entry->skb, TXWI_DESC_SIZE);
+
+ /*
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -503,6 +458,14 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
REGISTER_TIMEOUT32(entry->skb->len));
/*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
* Clean up the beacon skb.
*/
dev_kfree_skb(entry->skb);
@@ -524,84 +487,53 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u32 reg;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
-}
-
/*
* RX control handlers
*/
static void rt2800usb_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *rxi = (__le32 *)entry->skb->data;
- __le32 *rxwi;
__le32 *rxd;
- u32 rxi0;
- u32 rxwi0;
- u32 rxwi1;
- u32 rxwi2;
- u32 rxwi3;
- u32 rxd0;
+ u32 word;
int rx_pkt_len;
/*
+ * Copy descriptor to the skbdesc->desc buffer, making it safe from
+ * moving of frame data in rt2x00usb.
+ */
+ memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
+
+ /*
* RX frame format is :
* | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
* |<------------ rx_pkt_len -------------->|
*/
- rt2x00_desc_read(rxi, 0, &rxi0);
- rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
-
- rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
+ rt2x00_desc_read(rxi, 0, &word);
+ rx_pkt_len = rt2x00_get_field32(word, RXINFO_W0_USB_DMA_RX_PKT_LEN);
/*
- * FIXME : we need to check for rx_pkt_len validity
+ * Remove the RXINFO structure from the sbk.
*/
- rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
+ skb_pull(entry->skb, RXINFO_DESC_SIZE);
/*
- * Copy descriptor to the skbdesc->desc buffer, making it safe from
- * moving of frame data in rt2x00usb.
+ * FIXME: we need to check for rx_pkt_len validity
*/
- memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
+ rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxwi, 0, &rxwi0);
- rt2x00_desc_read(rxwi, 1, &rxwi1);
- rt2x00_desc_read(rxwi, 2, &rxwi2);
- rt2x00_desc_read(rxwi, 3, &rxwi3);
- rt2x00_desc_read(rxd, 0, &rxd0);
+ rt2x00_desc_read(rxd, 0, &word);
- if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
+ if (rt2x00_get_field32(word, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
- rxdesc->cipher_status =
- rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W0_CIPHER_ERROR);
- if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
+ if (rt2x00_get_field32(word, RXD_W0_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -616,45 +548,21 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
+ if (rt2x00_get_field32(word, RXD_W0_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
+ if (rt2x00_get_field32(word, RXD_W0_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
- rxdesc->flags |= RX_FLAG_SHORT_GI;
-
- if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
- rxdesc->flags |= RX_FLAG_40MHZ;
-
- /*
- * Detect RX rate, always use MCS as signal type.
- */
- rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
- rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
- rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
-
/*
- * Mask of 0x8 bit to remove the short preamble flag.
+ * Remove RXD descriptor from end of buffer.
*/
- if (rxdesc->rate_mode == RATE_MODE_CCK)
- rxdesc->signal &= ~0x8;
-
- rxdesc->rssi =
- (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
- rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
-
- rxdesc->noise =
- (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
- rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
-
- rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+ skb_trim(entry->skb, rx_pkt_len);
/*
- * Remove RXWI descriptor from start of buffer.
+ * Process the RXWI structure.
*/
- skb_pull(entry->skb, skbdesc->desc_len);
+ rt2800_process_rxwi(entry->skb, rxdesc);
}
/*
@@ -747,7 +655,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt2800usb_write_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
- .kick_tx_queue = rt2800usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt2800usb_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
@@ -806,6 +714,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amit */
{ USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
@@ -841,13 +753,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
- { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Hawking */
{ USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Linksys */
{ USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -876,6 +793,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -905,8 +824,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AirTies */
{ USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ASUS */
+ { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Conceptronic */
{ USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Corega */
@@ -916,20 +844,46 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Draytek */
+ { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
{ USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* I-O DATA */
{ USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
/* MSI */
{ USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Para */
+ { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Pegatron */
{ USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -944,14 +898,22 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
#ifdef CONFIG_RT2800USB_RT35XX
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
{ USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Cisco */
@@ -966,37 +928,27 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
* Unclear what kind of devices these are (they aren't supported by the
- * vendor driver).
+ * vendor linux driver).
*/
- /* Allwin */
- { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amigo */
{ USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Askey */
- { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3322), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Buffalo */
@@ -1015,24 +967,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* EnGenius */
- { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Hawking */
- { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* I-O DATA */
- { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
/* LevelOne */
{ USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -1042,43 +983,23 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Motorola */
{ USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* MSI */
- { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Para */
- { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Pegatron */
{ USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Sitecom */
- { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xf511), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sweex */
{ USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index d1d8ae94b4d4..2bca6a71a7f5 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,8 +79,6 @@
*/
#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
-#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
-#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
/*
* TX Info structure
@@ -113,44 +111,6 @@
#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
/*
- * RX WI structure
- */
-
-/*
- * Word0
- */
-#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
-#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
-#define RXWI_W0_BSSID FIELD32(0x00001c00)
-#define RXWI_W0_UDF FIELD32(0x0000e000)
-#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
-#define RXWI_W0_TID FIELD32(0xf0000000)
-
-/*
- * Word1
- */
-#define RXWI_W1_FRAG FIELD32(0x0000000f)
-#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
-#define RXWI_W1_MCS FIELD32(0x007f0000)
-#define RXWI_W1_BW FIELD32(0x00800000)
-#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
-#define RXWI_W1_STBC FIELD32(0x06000000)
-#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
-
-/*
- * Word2
- */
-#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
-#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
-#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
-
-/*
- * Word3
- */
-#define RXWI_W3_SNR0 FIELD32(0x000000ff)
-#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
-
-/*
* RX descriptor format for RX Ring.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index d9daa9c406fa..6c1ff4c15c84 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -177,16 +177,15 @@ struct rt2x00_chip {
#define RT2573 0x2573
#define RT2860 0x2860 /* 2.4GHz PCI/CB */
#define RT2870 0x2870
-#define RT2872 0x2872
-#define RT2880 0x2880 /* WSOC */
+#define RT2872 0x2872 /* WSOC */
#define RT2883 0x2883 /* WSOC */
-#define RT2890 0x2890 /* 2.4GHz PCIe */
-#define RT3052 0x3052 /* WSOC */
#define RT3070 0x3070
#define RT3071 0x3071
#define RT3090 0x3090 /* 2.4GHz PCIe */
#define RT3390 0x3390
#define RT3572 0x3572
+#define RT3593 0x3593 /* PCIe */
+#define RT3883 0x3883 /* WSOC */
u16 rf;
u16 rev;
@@ -550,8 +549,10 @@ struct rt2x00lib_ops {
void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc);
- int (*write_tx_data) (struct queue_entry *entry);
- void (*write_beacon) (struct queue_entry *entry);
+ int (*write_tx_data) (struct queue_entry *entry,
+ struct txentry_desc *txdesc);
+ void (*write_beacon) (struct queue_entry *entry,
+ struct txentry_desc *txdesc);
int (*get_tx_data_len) (struct queue_entry *entry);
void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue);
@@ -930,12 +931,12 @@ static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
}
-static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
+static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
{
return (rt2x00dev->chip.rt == rt);
}
-static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
+static inline bool rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
{
return (rt2x00dev->chip.rf == rf);
}
@@ -945,6 +946,24 @@ static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
return rt2x00dev->chip.rev;
}
+static inline bool rt2x00_rt_rev(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) == rev);
+}
+
+static inline bool rt2x00_rt_rev_lt(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) < rev);
+}
+
+static inline bool rt2x00_rt_rev_gte(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) >= rev);
+}
+
static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
enum rt2x00_chip_intf intf)
{
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index d291c7862e10..583dacd8d241 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -128,6 +128,7 @@ void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
/* Pull buffer to correct size */
skb_pull(skb, txdesc->iv_len);
+ txdesc->length -= txdesc->iv_len;
/* IV/EIV data has officially been stripped */
skbdesc->flags |= SKBDESC_IV_STRIPPED;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index eda73ba735a6..3ae468c4d760 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -435,7 +435,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
rx_status->mactime = rxdesc.timestamp;
rx_status->rate_idx = rate_idx;
rx_status->signal = rxdesc.rssi;
- rx_status->noise = rxdesc.noise;
rx_status->flag = rxdesc.flags;
rx_status->antenna = rt2x00dev->link.ant.active.rx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index 1056c92143a8..5a407602ce3e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -35,6 +35,7 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
if (tx_info->control.sta)
txdesc->mpdu_density =
@@ -66,4 +67,20 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
__set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
__set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
+
+ /*
+ * Determine IFS values
+ * - Use TXOP_BACKOFF for management frames
+ * - Use TXOP_SIFS for fragment bursts
+ * - Use TXOP_HTTXOP for everything else
+ *
+ * Note: rt2800 devices won't use CTS protection (if used)
+ * for frames not transmitted with TXOP_HTTXOP
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txdesc->txop = TXOP_BACKOFF;
+ else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+ txdesc->txop = TXOP_SIFS;
+ else
+ txdesc->txop = TXOP_HTTXOP;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index cf3f1c0c4382..4b941e9c794e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -63,7 +63,8 @@ EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
/*
* TX data handlers.
*/
-int rt2x00pci_write_tx_data(struct queue_entry *entry)
+int rt2x00pci_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 8149ff68410a..51bcef3839ce 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -92,7 +92,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
* This function will initialize the DMA and skb descriptor
* to prepare the entry for the actual TX operation.
*/
-int rt2x00pci_write_tx_data(struct queue_entry *entry);
+int rt2x00pci_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
/**
* struct queue_entry_priv_pci: Per entry PCI specific information
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a0bd36fc4d2e..089a12c7b90f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -334,12 +334,10 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
txdesc->aifs = entry->queue->aifs;
/*
- * Header and alignment information.
+ * Header and frame information.
*/
+ txdesc->length = entry->skb->len;
txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
- if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
- (entry->skb->len > txdesc->header_length))
- txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
/*
* Check whether this frame is to be acked.
@@ -431,20 +429,23 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
* it is now ready to be dumped to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
+}
+
+static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
+{
+ struct data_queue *queue = entry->queue;
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
/*
* Check if we need to kick the queue, there are however a few rules
- * 1) Don't kick beacon queue
- * 2) Don't kick unless this is the last in frame in a burst.
+ * 1) Don't kick unless this is the last in frame in a burst.
* When the burst flag is set, this frame is always followed
* by another frame which in some way are related to eachother.
* This is true for fragments, RTS or CTS-to-self frames.
- * 3) Rule 2 can be broken when the available entries
+ * 2) Rule 1 can be broken when the available entries
* in the queue are less then a certain threshold.
*/
- if (entry->queue->qid == QID_BEACON)
- return;
-
if (rt2x00queue_threshold(queue) ||
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
@@ -526,7 +527,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* call failed. Since we always return NETDEV_TX_OK to mac80211,
* this frame will simply be dropped.
*/
- if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
+ if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
+ &txdesc))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
entry->skb = NULL;
return -EIO;
@@ -539,6 +541,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rt2x00queue_index_inc(queue, Q_INDEX);
rt2x00queue_write_tx_descriptor(entry, &txdesc);
+ rt2x00queue_kick_tx_queue(entry, &txdesc);
return 0;
}
@@ -604,12 +607,9 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
/*
- * Send beacon to hardware.
- * Also enable beacon generation, which might have been disabled
- * by the driver during the config_beacon() callback function.
+ * Send beacon to hardware and enable beacon genaration..
*/
- rt2x00dev->ops->lib->write_beacon(intf->beacon);
- rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
+ rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
mutex_unlock(&intf->beacon_skb_mutex);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index c1e482bb37b3..36a957adc1f9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -183,7 +183,6 @@ enum rxdone_entry_desc_flags {
* @timestamp: RX Timestamp
* @signal: Signal of the received frame.
* @rssi: RSSI of the received frame.
- * @noise: Measured noise during frame reception.
* @size: Data size of the received frame.
* @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
* @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
@@ -197,7 +196,6 @@ struct rxdone_entry_desc {
u64 timestamp;
int signal;
int rssi;
- int noise;
int size;
int flags;
int dev_flags;
@@ -287,8 +285,8 @@ enum txentry_desc_flags {
*
* @flags: Descriptor flags (See &enum queue_entry_flags).
* @queue: Queue identification (See &enum data_queue_qid).
+ * @length: Length of the entire frame.
* @header_length: Length of 802.11 header.
- * @l2pad: Amount of padding to align 802.11 payload to 4-byte boundrary.
* @length_high: PLCP length high word.
* @length_low: PLCP length low word.
* @signal: PLCP signal.
@@ -301,6 +299,7 @@ enum txentry_desc_flags {
* @retry_limit: Max number of retries.
* @aifs: AIFS value.
* @ifs: IFS value.
+ * @txop: IFS value for 11n capable chips.
* @cw_min: cwmin value.
* @cw_max: cwmax value.
* @cipher: Cipher type used for encryption.
@@ -313,8 +312,8 @@ struct txentry_desc {
enum data_queue_qid queue;
+ u16 length;
u16 header_length;
- u16 l2pad;
u16 length_high;
u16 length_low;
@@ -330,6 +329,7 @@ struct txentry_desc {
short retry_limit;
short aifs;
short ifs;
+ short txop;
short cw_min;
short cw_max;
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 603bfc0adaa3..b9fe94873ee0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -101,6 +101,16 @@ enum ifs {
};
/*
+ * IFS backoff values for HT devices
+ */
+enum txop {
+ TXOP_HTTXOP = 0,
+ TXOP_PIFS = 1,
+ TXOP_SIFS = 2,
+ TXOP_BACKOFF = 3,
+};
+
+/*
* Cipher types for hardware encryption
*/
enum cipher {
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f9a7f8b17411..da111c0c2928 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -216,7 +216,8 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
rt2x00lib_txdone(entry, &txdesc);
}
-int rt2x00usb_write_tx_data(struct queue_entry *entry)
+int rt2x00usb_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 3da6841b5d42..621d0f829251 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -376,7 +376,8 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
* This function will initialize the URB and skb descriptor
* to prepare the entry for the actual TX operation.
*/
-int rt2x00usb_write_tx_data(struct queue_entry *entry);
+int rt2x00usb_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
/**
* struct queue_entry_priv_usb: Per entry USB specific information
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 432e75f960b7..86c75b9c3f25 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1809,7 +1809,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
if (skbdesc->desc_len > TXINFO_SIZE) {
rt2x00_desc_read(txd, 11, &word);
- rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skb->len);
+ rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0,
+ txdesc->length);
rt2x00_desc_write(txd, 11, word);
}
@@ -1832,7 +1833,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
@@ -1842,7 +1843,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
-static void rt61pci_write_beacon(struct queue_entry *entry)
+static void rt61pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
@@ -1869,6 +1871,19 @@ static void rt61pci_write_beacon(struct queue_entry *entry)
entry->skb->data, entry->skb->len);
/*
+ * Enable beaconing again.
+ *
+ * For Wi-Fi faily generated beacons between participating
+ * stations. Set TBTT phase adaptive adjustment step to 8us.
+ */
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
+
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
* Clean up beacon skb.
*/
dev_kfree_skb_any(entry->skb);
@@ -1880,23 +1895,6 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- /*
- * For Wi-Fi faily generated beacons between participating
- * stations. Set TBTT phase adaptive adjustment step to 8us.
- */
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
-
- rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
- if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK));
@@ -1968,12 +1966,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
- rxdesc->cipher_status =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
+ rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv[0]);
@@ -2118,6 +2112,14 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
}
}
+static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -2165,6 +2167,12 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
rt2x00pci_register_write(rt2x00dev,
M2H_CMD_DONE_CSR, 0xffffffff);
+ /*
+ * 4 - MCU Autowakeup interrupt.
+ */
+ if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
+ rt61pci_wakeup(rt2x00dev);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index bb58d797fb72..11c130748206 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -861,15 +861,15 @@ static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_SLEEP, REGISTER_TIMEOUT);
} else {
- rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
- USB_MODE_WAKEUP, REGISTER_TIMEOUT);
-
rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0);
rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
+
+ rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
+ USB_MODE_WAKEUP, REGISTER_TIMEOUT);
}
}
@@ -1495,7 +1495,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_BURST2,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
@@ -1505,7 +1505,8 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
-static void rt73usb_write_beacon(struct queue_entry *entry)
+static void rt73usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
@@ -1537,6 +1538,19 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
REGISTER_TIMEOUT32(entry->skb->len));
/*
+ * Enable beaconing again.
+ *
+ * For Wi-Fi faily generated beacons between participating stations.
+ * Set TBTT phase adaptive adjustment step to 8us (default 16us)
+ */
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
+
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
* Clean up the beacon skb.
*/
dev_kfree_skb(entry->skb);
@@ -1557,31 +1571,6 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u32 reg;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- /*
- * For Wi-Fi faily generated beacons between participating stations.
- * Set TBTT phase adaptive adjustment step to 8us (default 16us)
- */
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
-
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
- if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
- }
-}
-
/*
* RX control handlers
*/
@@ -1645,12 +1634,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
- rxdesc->cipher_status =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
+ rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
@@ -2266,7 +2251,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt73usb_write_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
- .kick_tx_queue = rt73usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt73usb_fill_rxdone,
.config_shared_key = rt73usb_config_shared_key,
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
new file mode 100644
index 000000000000..17d80fe556de
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -0,0 +1,88 @@
+#
+# RTL818X Wireless LAN device configuration
+#
+config RTL8180
+ tristate "Realtek 8180/8185 PCI support"
+ depends on MAC80211 && PCI && EXPERIMENTAL
+ select EEPROM_93CX6
+ ---help---
+ This is a driver for RTL8180 and RTL8185 based cards.
+ These are PCI based chips found in cards such as:
+
+ (RTL8185 802.11g)
+ A-Link WL54PC
+
+ (RTL8180 802.11b)
+ Belkin F5D6020 v3
+ Belkin F5D6020 v3
+ Dlink DWL-610
+ Dlink DWL-510
+ Netgear MA521
+ Level-One WPC-0101
+ Acer Aspire 1357 LMi
+ VCTnet PC-11B1
+ Ovislink AirLive WL-1120PCM
+ Mentor WL-PCI
+ Linksys WPC11 v4
+ TrendNET TEW-288PI
+ D-Link DWL-520 Rev D
+ Repotec RP-WP7126
+ TP-Link TL-WN250/251
+ Zonet ZEW1000
+ Longshine LCS-8031-R
+ HomeLine HLW-PCC200
+ GigaFast WF721-AEX
+ Planet WL-3553
+ Encore ENLWI-PCI1-NT
+ TrendNET TEW-266PC
+ Gigabyte GN-WLMR101
+ Siemens-fujitsu Amilo D1840W
+ Edimax EW-7126
+ PheeNet WL-11PCIR
+ Tonze PC-2100T
+ Planet WL-8303
+ Dlink DWL-650 v M1
+ Edimax EW-7106
+ Q-Tec 770WC
+ Topcom Skyr@cer 4011b
+ Roper FreeLan 802.11b (edition 2004)
+ Wistron Neweb Corp CB-200B
+ Pentagram HorNET
+ QTec 775WC
+ TwinMOS Booming B Series
+ Micronet SP906BB
+ Sweex LC700010
+ Surecom EP-9428
+ Safecom SWLCR-1100
+
+ Thanks to Realtek for their support!
+
+config RTL8187
+ tristate "Realtek 8187 and 8187B USB support"
+ depends on MAC80211 && USB
+ select EEPROM_93CX6
+ ---help---
+ This is a driver for RTL8187 and RTL8187B based cards.
+ These are USB based chips found in devices such as:
+
+ Netgear WG111v2
+ Level 1 WNC-0301USB
+ Micronet SP907GK V5
+ Encore ENUWI-G2
+ Trendnet TEW-424UB
+ ASUS P5B Deluxe/P5K Premium motherboards
+ Toshiba Satellite Pro series of laptops
+ Asus Wireless Link
+ Linksys WUSB54GC-EU v2
+ (v1 = rt73usb; v3 is rt2070-based,
+ use staging/rt3070 or try rt2800usb)
+
+ Thanks to Realtek for their support!
+
+# If possible, automatically enable LEDs for RTL8187.
+
+config RTL8187_LEDS
+ bool
+ depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
+ default y
+
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index de3844fe06d8..4baf0cf0826f 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -55,6 +55,14 @@ struct rtl8180_tx_ring {
struct sk_buff_head queue;
};
+struct rtl8180_vif {
+ struct ieee80211_hw *dev;
+
+ /* beaconing */
+ struct delayed_work beacon_work;
+ bool enable_beacon;
+};
+
struct rtl8180_priv {
/* common between rtl818x drivers */
struct rtl818x_csr __iomem *map;
@@ -78,6 +86,9 @@ struct rtl8180_priv {
u32 anaparam;
u16 rfparam;
u8 csthreshold;
+
+ /* sequence # */
+ u16 seqno;
};
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 2131a442831a..515817de2905 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.rates[0].count = (flags & 0xFF) + 1;
+ info->status.rates[1].idx = -1;
ieee80211_tx_status_irqsafe(dev, skb);
if (ring->entries - skb_queue_len(&ring->queue) == 2)
@@ -233,6 +234,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
@@ -284,6 +286,14 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
}
spin_lock_irqsave(&priv->lock, flags);
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ priv->seqno += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
+ }
+
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
entry = &ring->desc[idx];
@@ -297,7 +307,8 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
entry->flags = cpu_to_le32(tx_flags);
__skb_queue_tail(&ring->queue, skb);
if (ring->entries - skb_queue_len(&ring->queue) < 2)
- ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
+ ieee80211_stop_queue(dev, prio);
+
spin_unlock_irqrestore(&priv->lock, flags);
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
@@ -652,10 +663,59 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
rtl8180_free_tx_ring(dev, i);
}
+static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
+void rtl8180_beacon_work(struct work_struct *work)
+{
+ struct rtl8180_vif *vif_priv =
+ container_of(work, struct rtl8180_vif, beacon_work.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
+ struct ieee80211_hw *dev = vif_priv->dev;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int err = 0;
+
+ /* don't overflow the tx ring */
+ if (ieee80211_queue_stopped(dev, 0))
+ goto resched;
+
+ /* grab a fresh beacon */
+ skb = ieee80211_beacon_get(dev, vif);
+
+ /*
+ * update beacon timestamp w/ TSF value
+ * TODO: make hardware update beacon timestamp
+ */
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ mgmt->u.beacon.timestamp = cpu_to_le64(rtl8180_get_tsf(dev));
+
+ /* TODO: use actual beacon queue */
+ skb_set_queue_mapping(skb, 0);
+
+ err = rtl8180_tx(dev, skb);
+ WARN_ON(err);
+
+resched:
+ /*
+ * schedule next beacon
+ * TODO: use hardware support for beacon timing
+ */
+ schedule_delayed_work(&vif_priv->beacon_work,
+ usecs_to_jiffies(1024 * vif->bss_conf.beacon_int));
+}
+
static int rtl8180_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_vif *vif_priv;
/*
* We only support one active interface at a time.
@@ -665,6 +725,7 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
break;
default:
return -EOPNOTSUPP;
@@ -672,6 +733,12 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
priv->vif = vif;
+ /* Initialize driver private area */
+ vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
+ vif_priv->dev = dev;
+ INIT_DELAYED_WORK(&vif_priv->beacon_work, rtl8180_beacon_work);
+ vif_priv->enable_beacon = false;
+
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
le32_to_cpu(*(__le32 *)vif->addr));
@@ -705,8 +772,11 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
u32 changed)
{
struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_vif *vif_priv;
int i;
+ vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
+
if (changed & BSS_CHANGED_BSSID) {
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->BSSID[i],
@@ -721,13 +791,22 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
}
if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
- priv->rf->conf_erp(dev, info);
+ priv->rf->conf_erp(dev, info);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ vif_priv->enable_beacon = info->enable_beacon;
+
+ if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON)) {
+ cancel_delayed_work_sync(&vif_priv->beacon_work);
+ if (vif_priv->enable_beacon)
+ schedule_work(&vif_priv->beacon_work.work);
+ }
}
-static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev,
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void rtl8180_configure_filter(struct ieee80211_hw *dev,
@@ -762,14 +841,6 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
}
-static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
-{
- struct rtl8180_priv *priv = dev->priv;
-
- return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
- (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
-}
-
static const struct ieee80211_ops rtl8180_ops = {
.tx = rtl8180_tx,
.start = rtl8180_start,
@@ -827,6 +898,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
const char *chip_name, *rf_name = NULL;
u32 reg;
u16 eeprom_val;
+ u8 mac_addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -855,8 +927,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
goto err_free_reg;
}
- if ((err = pci_set_dma_mask(pdev, 0xFFFFFF00ULL)) ||
- (err = pci_set_consistent_dma_mask(pdev, 0xFFFFFF00ULL))) {
+ if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
+ (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
@@ -905,7 +977,9 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SIGNAL_UNSPEC;
- dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ dev->vif_data_size = sizeof(struct rtl8180_vif);
+ dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
dev->queues = 1;
dev->max_signal = 65;
@@ -987,12 +1061,13 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
}
- eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)dev->wiphy->perm_addr, 3);
- if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
+ eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
+ if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
" randomly generated MAC addr\n", pci_name(pdev));
- random_ether_addr(dev->wiphy->perm_addr);
+ random_ether_addr(mac_addr);
}
+ SET_IEEE80211_PERM_ADDR(dev, mac_addr);
/* CCK TX power */
for (i = 0; i < 14; i += 2) {
@@ -1024,7 +1099,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
}
printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n",
- wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
+ wiphy_name(dev->wiphy), mac_addr,
chip_name, priv->rf->name);
return 0;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 1d30792973f5..891b8490e349 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1194,9 +1194,9 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
}
static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev,
- int mc_count, struct dev_addr_list *mc_list)
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void rtl8187_configure_filter(struct ieee80211_hw *dev,
@@ -1333,6 +1333,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
u16 txpwr, reg;
u16 product_id = le16_to_cpu(udev->descriptor.idProduct);
int err, i;
+ u8 mac_addr[ETH_ALEN];
dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
if (!dev) {
@@ -1390,12 +1391,13 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
udelay(10);
eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR,
- (__le16 __force *)dev->wiphy->perm_addr, 3);
- if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
+ (__le16 __force *)mac_addr, 3);
+ if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
"generated MAC address\n");
- random_ether_addr(dev->wiphy->perm_addr);
+ random_ether_addr(mac_addr);
}
+ SET_IEEE80211_PERM_ADDR(dev, mac_addr);
channel = priv->channels;
for (i = 0; i < 3; i++) {
@@ -1526,7 +1528,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
skb_queue_head_init(&priv->b_tx_status.queue);
printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
- wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
+ wiphy_name(dev->wiphy), mac_addr,
chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask);
#ifdef CONFIG_RTL8187_LEDS
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 785e0244e305..337fc7bec5a5 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -51,3 +51,27 @@ config WL1271
If you choose to build a module, it'll be called wl1271. Say N if
unsure.
+
+config WL1271_SPI
+ tristate "TI wl1271 SPI support"
+ depends on WL1271 && SPI_MASTER
+ ---help---
+ This module adds support for the SPI interface of adapters using
+ TI wl1271 chipset. Select this if your platform is using
+ the SPI bus.
+
+ If you choose to build a module, it'll be called wl1251_spi.
+ Say N if unsure.
+
+config WL1271_SDIO
+ tristate "TI wl1271 SDIO support"
+ depends on WL1271 && MMC && ARM
+ ---help---
+ This module adds support for the SDIO interface of adapters using
+ TI wl1271 chipset. Select this if your platform is using
+ the SDIO bus.
+
+ If you choose to build a module, it'll be called
+ wl1271_sdio. Say N if unsure.
+
+
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index f47ec94c16dc..27ddd2be0a91 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -7,10 +7,12 @@ obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
-wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \
+wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \
wl1271_event.o wl1271_tx.o wl1271_rx.o \
wl1271_ps.o wl1271_acx.o wl1271_boot.o \
- wl1271_init.o wl1271_debugfs.o wl1271_io.o
+ wl1271_init.o wl1271_debugfs.o
wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
obj-$(CONFIG_WL1271) += wl1271.o
+obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o
+obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 37c61c19cae5..4f5f02a26e62 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -256,6 +256,8 @@ struct wl1251_debugfs {
struct wl1251_if_operations {
void (*read)(struct wl1251 *wl, int addr, void *buf, size_t len);
void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
+ void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
+ void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
void (*reset)(struct wl1251 *wl);
void (*enable_irq)(struct wl1251 *wl);
void (*disable_irq)(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index d5ac79aeaa73..2545123931e8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -497,7 +497,8 @@ int wl1251_boot(struct wl1251 *wl)
/* 2. start processing NVS file */
if (wl->use_eeprom) {
wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR);
- msleep(4000);
+ /* Wait for EEPROM NVS burst read to complete */
+ msleep(40);
wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM);
} else {
ret = wl1251_boot_upload_nvs(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.h b/drivers/net/wireless/wl12xx/wl1251_io.h
index b89d2ac62efb..c545e9d5f512 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.h
+++ b/drivers/net/wireless/wl12xx/wl1251_io.h
@@ -48,6 +48,26 @@ static inline void wl1251_write32(struct wl1251 *wl, int addr, u32 val)
wl->if_ops->write(wl, addr, &val, sizeof(u32));
}
+static inline u32 wl1251_read_elp(struct wl1251 *wl, int addr)
+{
+ u32 response;
+
+ if (wl->if_ops->read_elp)
+ wl->if_ops->read_elp(wl, addr, &response);
+ else
+ wl->if_ops->read(wl, addr, &response, sizeof(u32));
+
+ return response;
+}
+
+static inline void wl1251_write_elp(struct wl1251 *wl, int addr, u32 val)
+{
+ if (wl->if_ops->write_elp)
+ wl->if_ops->write_elp(wl, addr, val);
+ else
+ wl->if_ops->write(wl, addr, &val, sizeof(u32));
+}
+
/* Memory target IO, address is translated to partition 0 */
void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len);
void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1c8226eee409..00b24282fc73 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -147,8 +147,8 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
u32 elp_reg;
elp_reg = ELPCTRL_WAKE_UP;
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
if (!(elp_reg & ELPCTRL_WLAN_READY))
wl1251_warning("WLAN not ready");
@@ -202,8 +202,8 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
goto out;
}
- /* No NVS from netlink, try to get it from the filesystem */
- if (wl->nvs == NULL) {
+ if (wl->nvs == NULL && !wl->use_eeprom) {
+ /* No NVS from netlink, try to get it from the filesystem */
ret = wl1251_fetch_nvs(wl);
if (ret < 0)
goto out;
@@ -857,6 +857,7 @@ out:
}
static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct wl1251 *wl = hw->priv;
@@ -1196,6 +1197,66 @@ static const struct ieee80211_ops wl1251_ops = {
.conf_tx = wl1251_op_conf_tx,
};
+static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data)
+{
+ unsigned long timeout;
+
+ wl1251_reg_write32(wl, EE_ADDR, offset);
+ wl1251_reg_write32(wl, EE_CTL, EE_CTL_READ);
+
+ /* EE_CTL_READ clears when data is ready */
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (1) {
+ if (!(wl1251_reg_read32(wl, EE_CTL) & EE_CTL_READ))
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ *data = wl1251_reg_read32(wl, EE_DATA);
+ return 0;
+}
+
+static int wl1251_read_eeprom(struct wl1251 *wl, off_t offset,
+ u8 *data, size_t len)
+{
+ size_t i;
+ int ret;
+
+ wl1251_reg_write32(wl, EE_START, 0);
+
+ for (i = 0; i < len; i++) {
+ ret = wl1251_read_eeprom_byte(wl, offset + i, &data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wl1251_read_eeprom_mac(struct wl1251 *wl)
+{
+ u8 mac[ETH_ALEN];
+ int i, ret;
+
+ wl1251_set_partition(wl, 0, 0, REGISTERS_BASE, REGISTERS_DOWN_SIZE);
+
+ ret = wl1251_read_eeprom(wl, 0x1c, mac, sizeof(mac));
+ if (ret < 0) {
+ wl1251_warning("failed to read MAC address from EEPROM");
+ return ret;
+ }
+
+ /* MAC is stored in reverse order */
+ for (i = 0; i < ETH_ALEN; i++)
+ wl->mac_addr[i] = mac[ETH_ALEN - i - 1];
+
+ return 0;
+}
+
static int wl1251_register_hw(struct wl1251 *wl)
{
int ret;
@@ -1231,7 +1292,6 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->channel_change_time = 10000;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_SUPPORTS_UAPSD;
@@ -1242,6 +1302,9 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->queues = 4;
+ if (wl->use_eeprom)
+ wl1251_read_eeprom_mac(wl);
+
ret = wl1251_register_hw(wl);
if (ret)
goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 851dfb65e474..b55cb2bd459a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -45,7 +45,7 @@ void wl1251_elp_work(struct work_struct *work)
goto out;
wl1251_debug(DEBUG_PSM, "chip to elp");
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
wl->elp = true;
out:
@@ -79,9 +79,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
/*
* FIXME: we should wait for irq from chip but, as a temporary
@@ -93,7 +93,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
return -ETIMEDOUT;
}
msleep(1);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index 0ca3b4326056..d16edd9bf06c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -46,7 +46,14 @@
#define SOR_CFG (REGISTERS_BASE + 0x0800)
#define ECPU_CTRL (REGISTERS_BASE + 0x0804)
#define HI_CFG (REGISTERS_BASE + 0x0808)
+
+/* EEPROM registers */
#define EE_START (REGISTERS_BASE + 0x080C)
+#define EE_CTL (REGISTERS_BASE + 0x2000)
+#define EE_DATA (REGISTERS_BASE + 0x2004)
+#define EE_ADDR (REGISTERS_BASE + 0x2008)
+
+#define EE_CTL_READ 2
#define CHIP_ID_B (REGISTERS_BASE + 0x5674)
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 6f229e0990f4..af5c67b4da95 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -74,12 +74,6 @@ static void wl1251_rx_status(struct wl1251 *wl,
status->signal = desc->rssi;
- /*
- * FIXME: guessing that snr needs to be divided by two, otherwise
- * the values don't make any sense
- */
- status->noise = desc->rssi - desc->snr / 2;
-
status->freq = ieee80211_channel_to_frequency(desc->channel);
status->flag |= RX_FLAG_TSFT;
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index 9423f22bdced..d234285c2c81 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -20,20 +20,14 @@
* Copyright (C) 2009 Bob Copeland (me@bobcopeland.com)
*/
#include <linux/module.h>
-#include <linux/crc7.h>
#include <linux/mod_devicetable.h>
-#include <linux/irq.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/platform_device.h>
+#include <linux/spi/wl12xx.h>
+#include <linux/irq.h>
#include "wl1251.h"
-#include "wl12xx_80211.h"
-#include "wl1251_reg.h"
-#include "wl1251_ps.h"
-#include "wl1251_io.h"
-#include "wl1251_tx.h"
-#include "wl1251_debugfs.h"
#ifndef SDIO_VENDOR_ID_TI
#define SDIO_VENDOR_ID_TI 0x104c
@@ -43,6 +37,8 @@
#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#endif
+static struct wl12xx_platform_data *wl12xx_board_data;
+
static struct sdio_func *wl_to_func(struct wl1251 *wl)
{
return wl->if_priv;
@@ -65,7 +61,8 @@ static const struct sdio_device_id wl1251_devices[] = {
MODULE_DEVICE_TABLE(sdio, wl1251_devices);
-void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len)
+static void wl1251_sdio_read(struct wl1251 *wl, int addr,
+ void *buf, size_t len)
{
int ret;
struct sdio_func *func = wl_to_func(wl);
@@ -77,7 +74,8 @@ void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len)
sdio_release_host(func);
}
-void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len)
+static void wl1251_sdio_write(struct wl1251 *wl, int addr,
+ void *buf, size_t len)
{
int ret;
struct sdio_func *func = wl_to_func(wl);
@@ -89,7 +87,33 @@ void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len)
sdio_release_host(func);
}
-void wl1251_sdio_reset(struct wl1251 *wl)
+static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
+{
+ int ret = 0;
+ struct sdio_func *func = wl_to_func(wl);
+
+ sdio_claim_host(func);
+ *val = sdio_readb(func, addr, &ret);
+ sdio_release_host(func);
+
+ if (ret)
+ wl1251_error("sdio_readb failed (%d)", ret);
+}
+
+static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
+{
+ int ret = 0;
+ struct sdio_func *func = wl_to_func(wl);
+
+ sdio_claim_host(func);
+ sdio_writeb(func, val, addr, &ret);
+ sdio_release_host(func);
+
+ if (ret)
+ wl1251_error("sdio_writeb failed (%d)", ret);
+}
+
+static void wl1251_sdio_reset(struct wl1251 *wl)
{
}
@@ -111,19 +135,64 @@ static void wl1251_sdio_disable_irq(struct wl1251 *wl)
sdio_release_host(func);
}
-void wl1251_sdio_set_power(bool enable)
+/* Interrupts when using dedicated WLAN_IRQ pin */
+static irqreturn_t wl1251_line_irq(int irq, void *cookie)
+{
+ struct wl1251 *wl = cookie;
+
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1251_enable_line_irq(struct wl1251 *wl)
{
+ return enable_irq(wl->irq);
}
-struct wl1251_if_operations wl1251_sdio_ops = {
+static void wl1251_disable_line_irq(struct wl1251 *wl)
+{
+ return disable_irq(wl->irq);
+}
+
+static void wl1251_sdio_set_power(bool enable)
+{
+}
+
+static struct wl1251_if_operations wl1251_sdio_ops = {
.read = wl1251_sdio_read,
.write = wl1251_sdio_write,
+ .write_elp = wl1251_sdio_write_elp,
+ .read_elp = wl1251_sdio_read_elp,
.reset = wl1251_sdio_reset,
- .enable_irq = wl1251_sdio_enable_irq,
- .disable_irq = wl1251_sdio_disable_irq,
};
-int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
+static int wl1251_platform_probe(struct platform_device *pdev)
+{
+ if (pdev->id != -1) {
+ wl1251_error("can only handle single device");
+ return -ENODEV;
+ }
+
+ wl12xx_board_data = pdev->dev.platform_data;
+ return 0;
+}
+
+/*
+ * Dummy platform_driver for passing platform_data to this driver,
+ * until we have a way to pass this through SDIO subsystem or
+ * some other way.
+ */
+static struct platform_driver wl1251_platform_driver = {
+ .driver = {
+ .name = "wl1251_data",
+ .owner = THIS_MODULE,
+ },
+ .probe = wl1251_platform_probe,
+};
+
+static int wl1251_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
int ret;
struct wl1251 *wl;
@@ -141,20 +210,50 @@ int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
goto release;
sdio_set_block_size(func, 512);
+ sdio_release_host(func);
SET_IEEE80211_DEV(hw, &func->dev);
wl->if_priv = func;
wl->if_ops = &wl1251_sdio_ops;
wl->set_power = wl1251_sdio_set_power;
- sdio_release_host(func);
+ if (wl12xx_board_data != NULL) {
+ wl->set_power = wl12xx_board_data->set_power;
+ wl->irq = wl12xx_board_data->irq;
+ wl->use_eeprom = wl12xx_board_data->use_eeprom;
+ }
+
+ if (wl->irq) {
+ ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
+ if (ret < 0) {
+ wl1251_error("request_irq() failed: %d", ret);
+ goto disable;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+ disable_irq(wl->irq);
+
+ wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+ wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+
+ wl1251_info("using dedicated interrupt line");
+ } else {
+ wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
+ wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
+
+ wl1251_info("using SDIO interrupt");
+ }
+
ret = wl1251_init_ieee80211(wl);
if (ret)
- goto disable;
+ goto out_free_irq;
sdio_set_drvdata(func, wl);
return ret;
+out_free_irq:
+ if (wl->irq)
+ free_irq(wl->irq, wl);
disable:
sdio_claim_host(func);
sdio_disable_func(func);
@@ -167,6 +266,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
{
struct wl1251 *wl = sdio_get_drvdata(func);
+ if (wl->irq)
+ free_irq(wl->irq, wl);
wl1251_free_hw(wl);
sdio_claim_host(func);
@@ -186,6 +287,12 @@ static int __init wl1251_sdio_init(void)
{
int err;
+ err = platform_driver_register(&wl1251_platform_driver);
+ if (err) {
+ wl1251_error("failed to register platform driver: %d", err);
+ return err;
+ }
+
err = sdio_register_driver(&wl1251_sdio_driver);
if (err)
wl1251_error("failed to register sdio driver: %d", err);
@@ -195,6 +302,7 @@ static int __init wl1251_sdio_init(void)
static void __exit wl1251_sdio_exit(void)
{
sdio_unregister_driver(&wl1251_sdio_driver);
+ platform_driver_unregister(&wl1251_platform_driver);
wl1251_notice("unloaded");
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 3bfb59bd4635..e81474203a23 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -310,7 +310,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
- .name = "wl1251",
+ .name = DRIVER_NAME,
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 97ea5096bc8c..6f1b6b5640c0 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -53,6 +53,9 @@ enum {
DEBUG_MAC80211 = BIT(11),
DEBUG_CMD = BIT(12),
DEBUG_ACX = BIT(13),
+ DEBUG_SDIO = BIT(14),
+ DEBUG_FILTERS = BIT(15),
+ DEBUG_ADHOC = BIT(16),
DEBUG_ALL = ~0,
};
@@ -110,6 +113,9 @@ enum {
#define WL1271_FW_NAME "wl1271-fw.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin"
+#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
+#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
+
/* NVS data structure */
#define WL1271_NVS_SECTION_SIZE 468
@@ -142,14 +148,7 @@ struct wl1271_nvs_file {
*/
#undef WL1271_80211A_ENABLED
-/*
- * FIXME: for the wl1271, a busy word count of 1 here will result in a more
- * optimal SPI interface. There is some SPI bug however, causing RXS time outs
- * with this mode occasionally on boot, so lets have three for now. A value of
- * three should make sure, that the chipset will always be ready, though this
- * will impact throughput and latencies slightly.
- */
-#define WL1271_BUSY_WORD_CNT 3
+#define WL1271_BUSY_WORD_CNT 1
#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
#define WL1271_ELP_HW_STATE_ASLEEP 0
@@ -334,11 +333,27 @@ struct wl1271_scan {
u8 probe_requests;
};
+struct wl1271_if_operations {
+ void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+ void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+ void (*reset)(struct wl1271 *wl);
+ void (*init)(struct wl1271 *wl);
+ void (*power)(struct wl1271 *wl, bool enable);
+ struct device* (*dev)(struct wl1271 *wl);
+ void (*enable_irq)(struct wl1271 *wl);
+ void (*disable_irq)(struct wl1271 *wl);
+};
+
struct wl1271 {
+ struct platform_device *plat_dev;
struct ieee80211_hw *hw;
bool mac80211_registered;
- struct spi_device *spi;
+ void *if_priv;
+
+ struct wl1271_if_operations *if_ops;
void (*set_power)(bool enable);
int irq;
@@ -357,6 +372,9 @@ struct wl1271 {
#define WL1271_FLAG_IN_ELP (6)
#define WL1271_FLAG_PSM (7)
#define WL1271_FLAG_PSM_REQUESTED (8)
+#define WL1271_FLAG_IRQ_PENDING (9)
+#define WL1271_FLAG_IRQ_RUNNING (10)
+#define WL1271_FLAG_IDLE (11)
unsigned long flags;
struct wl1271_partition_set part;
@@ -370,9 +388,12 @@ struct wl1271 {
size_t fw_len;
struct wl1271_nvs_file *nvs;
+ s8 hw_pg_ver;
+
u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
u8 bss_type;
+ u8 set_bss_type;
u8 ssid[IW_ESSID_MAX_SIZE + 1];
u8 ssid_len;
int channel;
@@ -382,13 +403,13 @@ struct wl1271 {
/* Accounting for allocated / available TX blocks on HW */
u32 tx_blocks_freed[NUM_TX_QUEUES];
u32 tx_blocks_available;
- u8 tx_results_count;
+ u32 tx_results_count;
/* Transmitted TX packets counter for chipset interface */
- int tx_packets_count;
+ u32 tx_packets_count;
/* Time-offset between host and chipset clocks */
- int time_offset;
+ s64 time_offset;
/* Session counter for the chipset */
int session_counter;
@@ -403,8 +424,7 @@ struct wl1271 {
/* Security sequence number counters */
u8 tx_security_last_seq;
- u16 tx_security_seq_16;
- u32 tx_security_seq_32;
+ s64 tx_security_seq;
/* FW Rx counter */
u32 rx_counter;
@@ -430,14 +450,19 @@ struct wl1271 {
/* currently configured rate set */
u32 sta_rate_set;
u32 basic_rate_set;
+ u32 basic_rate;
u32 rate_set;
/* The current band */
enum ieee80211_band band;
+ /* Beaconing interval (needed for ad-hoc) */
+ u32 beacon_int;
+
/* Default key (for WEP) */
u32 default_key;
+ unsigned int filters;
unsigned int rx_config;
unsigned int rx_filter;
@@ -450,10 +475,13 @@ struct wl1271 {
/* in dBm */
int power_level;
+ int rssi_thold;
+ int last_rssi_event;
+
struct wl1271_stats stats;
struct wl1271_debugfs debugfs;
- u32 buffer_32;
+ __le32 buffer_32;
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
@@ -465,6 +493,8 @@ struct wl1271 {
/* Current chipset configuration */
struct conf_drv_settings conf;
+ bool sg_enabled;
+
struct list_head list;
};
@@ -477,7 +507,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_DEFAULT_POWER_LEVEL 0
-#define WL1271_TX_QUEUE_MAX_LENGTH 20
+#define WL1271_TX_QUEUE_LOW_WATERMARK 10
+#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 308782421fce..e19e2f8f1e52 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -32,7 +32,6 @@
#include "wl1271.h"
#include "wl12xx_80211.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_ps.h"
int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
@@ -137,12 +136,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
goto out;
}
- /*
- * FIXME: This is a workaround needed while we don't the correct
- * calibration, to avoid distortions
- */
- /* acx->current_tx_power = power * 10; */
- acx->current_tx_power = 120;
+ acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
if (ret < 0) {
@@ -511,12 +505,17 @@ out:
return ret;
}
-int wl1271_acx_conn_monit_params(struct wl1271 *wl)
+#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
+
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
{
struct acx_conn_monit_params *acx;
+ u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
+ u32 timeout = ACX_CONN_MONIT_DISABLE_VALUE;
int ret;
- wl1271_debug(DEBUG_ACX, "acx connection monitor parameters");
+ wl1271_debug(DEBUG_ACX, "acx connection monitor parameters: %s",
+ enable ? "enabled" : "disabled");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
@@ -524,8 +523,13 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl)
goto out;
}
- acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold);
- acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout);
+ if (enable) {
+ threshold = wl->conf.conn.synch_fail_thold;
+ timeout = wl->conf.conn.bss_lose_timeout;
+ }
+
+ acx->synch_fail_thold = cpu_to_le32(threshold);
+ acx->bss_lose_timeout = cpu_to_le32(timeout);
ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
acx, sizeof(*acx));
@@ -541,7 +545,7 @@ out:
}
-int wl1271_acx_sg_enable(struct wl1271 *wl)
+int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable)
{
struct acx_bt_wlan_coex *pta;
int ret;
@@ -554,7 +558,10 @@ int wl1271_acx_sg_enable(struct wl1271 *wl)
goto out;
}
- pta->enable = SG_ENABLE;
+ if (enable)
+ pta->enable = wl->conf.sg.state;
+ else
+ pta->enable = CONF_SG_DISABLE;
ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta));
if (ret < 0) {
@@ -571,7 +578,7 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
{
struct acx_bt_wlan_coex_param *param;
struct conf_sg_settings *c = &wl->conf.sg;
- int ret;
+ int i, ret;
wl1271_debug(DEBUG_ACX, "acx sg cfg");
@@ -582,19 +589,9 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
}
/* BT-WLAN coext parameters */
- param->per_threshold = cpu_to_le32(c->per_threshold);
- param->max_scan_compensation_time =
- cpu_to_le32(c->max_scan_compensation_time);
- param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
- param->load_ratio = c->load_ratio;
- param->auto_ps_mode = c->auto_ps_mode;
- param->probe_req_compensation = c->probe_req_compensation;
- param->scan_window_compensation = c->scan_window_compensation;
- param->antenna_config = c->antenna_config;
- param->beacon_miss_threshold = c->beacon_miss_threshold;
- param->rate_adaptation_threshold =
- cpu_to_le32(c->rate_adaptation_threshold);
- param->rate_adaptation_snr = c->rate_adaptation_snr;
+ for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
+ param->params[i] = cpu_to_le32(c->params[i]);
+ param->param_idx = CONF_SG_PARAMS_ALL;
ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
if (ret < 0) {
@@ -806,7 +803,7 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
/* configure one basic rate class */
idx = ACX_TX_BASIC_RATE;
- acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate);
acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
acx->rate_class[idx].aflags = c->aflags;
@@ -1143,3 +1140,129 @@ out:
kfree(acx);
return ret;
}
+
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+{
+ struct wl1271_acx_keep_alive_mode *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx keep alive mode: %d", enable);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enabled = enable;
+
+ ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx keep alive mode failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+{
+ struct wl1271_acx_keep_alive_config *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx keep alive config");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
+ acx->index = index;
+ acx->tpl_validation = tpl_valid;
+ acx->trigger = ACX_KEEP_ALIVE_NO_TX;
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_KEEP_ALIVE_CONFIG,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx keep alive config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
+ s16 thold, u8 hyst)
+{
+ struct wl1271_acx_rssi_snr_trigger *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx rssi snr trigger");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl->last_rssi_event = -1;
+
+ acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
+ acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
+ acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
+ if (enable)
+ acx->enable = WL1271_ACX_TRIG_ENABLE;
+ else
+ acx->enable = WL1271_ACX_TRIG_DISABLE;
+
+ acx->index = WL1271_ACX_TRIG_IDX_RSSI;
+ acx->dir = WL1271_ACX_TRIG_DIR_BIDIR;
+ acx->threshold = cpu_to_le16(thold);
+ acx->hysteresis = hyst;
+
+ ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_TRIGGER, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx rssi snr trigger setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+{
+ struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
+ struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx rssi snr avg weights");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->rssi_beacon = c->avg_weight_rssi_beacon;
+ acx->rssi_data = c->avg_weight_rssi_data;
+ acx->snr_beacon = c->avg_weight_snr_beacon;
+ acx->snr_data = c->avg_weight_snr_data;
+
+ ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_WEIGHTS, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx rssi snr trigger weights failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index aeccc98581eb..420e7e2fc021 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -392,81 +392,27 @@ struct acx_conn_monit_params {
__le32 bss_lose_timeout; /* number of TU's from synch fail */
} __attribute__ ((packed));
-enum {
- SG_ENABLE = 0,
- SG_DISABLE,
- SG_SENSE_NO_ACTIVITY,
- SG_SENSE_ACTIVE
-};
-
struct acx_bt_wlan_coex {
struct acx_header header;
- /*
- * 0 -> PTA enabled
- * 1 -> PTA disabled
- * 2 -> sense no active mode, i.e.
- * an interrupt is sent upon
- * BT activity.
- * 3 -> PTA is switched on in response
- * to the interrupt sending.
- */
u8 enable;
u8 pad[3];
} __attribute__ ((packed));
-struct acx_dco_itrim_params {
+struct acx_bt_wlan_coex_param {
struct acx_header header;
- u8 enable;
+ __le32 params[CONF_SG_PARAMS_MAX];
+ u8 param_idx;
u8 padding[3];
- __le32 timeout;
} __attribute__ ((packed));
-#define PTA_ANTENNA_TYPE_DEF (0)
-#define PTA_BT_HP_MAXTIME_DEF (2000)
-#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
-#define PTA_SENSE_DISABLE_TIMER_DEF (1350)
-#define PTA_PROTECTIVE_RX_TIME_DEF (1500)
-#define PTA_PROTECTIVE_TX_TIME_DEF (1500)
-#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000)
-#define PTA_SIGNALING_TYPE_DEF (1)
-#define PTA_AFH_LEVERAGE_ON_DEF (0)
-#define PTA_NUMBER_QUIET_CYCLE_DEF (0)
-#define PTA_MAX_NUM_CTS_DEF (3)
-#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2)
-#define PTA_NUMBER_OF_BT_PACKETS_DEF (2)
-#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500)
-#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000)
-#define PTA_CYCLE_TIME_FAST_DEF (8700)
-#define PTA_RX_FOR_AVALANCHE_DEF (5)
-#define PTA_ELP_HP_DEF (0)
-#define PTA_ANTI_STARVE_PERIOD_DEF (500)
-#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4)
-#define PTA_ALLOW_PA_SD_DEF (1)
-#define PTA_TIME_BEFORE_BEACON_DEF (6300)
-#define PTA_HPDM_MAX_TIME_DEF (1600)
-#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550)
-#define PTA_AUTO_MODE_NO_CTS_DEF (0)
-#define PTA_BT_HP_RESPECTED_DEF (3)
-#define PTA_WLAN_RX_MIN_RATE_DEF (24)
-#define PTA_ACK_MODE_DEF (1)
-
-struct acx_bt_wlan_coex_param {
+struct acx_dco_itrim_params {
struct acx_header header;
- __le32 per_threshold;
- __le32 max_scan_compensation_time;
- __le16 nfs_sample_interval;
- u8 load_ratio;
- u8 auto_ps_mode;
- u8 probe_req_compensation;
- u8 scan_window_compensation;
- u8 antenna_config;
- u8 beacon_miss_threshold;
- __le32 rate_adaptation_threshold;
- s8 rate_adaptation_snr;
+ u8 enable;
u8 padding[3];
+ __le32 timeout;
} __attribute__ ((packed));
struct acx_energy_detection {
@@ -969,6 +915,84 @@ struct wl1271_acx_pm_config {
u8 padding[3];
} __attribute__ ((packed));
+struct wl1271_acx_keep_alive_mode {
+ struct acx_header header;
+
+ u8 enabled;
+ u8 padding[3];
+} __attribute__ ((packed));
+
+enum {
+ ACX_KEEP_ALIVE_NO_TX = 0,
+ ACX_KEEP_ALIVE_PERIOD_ONLY
+};
+
+enum {
+ ACX_KEEP_ALIVE_TPL_INVALID = 0,
+ ACX_KEEP_ALIVE_TPL_VALID
+};
+
+struct wl1271_acx_keep_alive_config {
+ struct acx_header header;
+
+ __le32 period;
+ u8 index;
+ u8 tpl_validation;
+ u8 trigger;
+ u8 padding;
+} __attribute__ ((packed));
+
+enum {
+ WL1271_ACX_TRIG_TYPE_LEVEL = 0,
+ WL1271_ACX_TRIG_TYPE_EDGE,
+};
+
+enum {
+ WL1271_ACX_TRIG_DIR_LOW = 0,
+ WL1271_ACX_TRIG_DIR_HIGH,
+ WL1271_ACX_TRIG_DIR_BIDIR,
+};
+
+enum {
+ WL1271_ACX_TRIG_ENABLE = 1,
+ WL1271_ACX_TRIG_DISABLE,
+};
+
+enum {
+ WL1271_ACX_TRIG_METRIC_RSSI_BEACON = 0,
+ WL1271_ACX_TRIG_METRIC_RSSI_DATA,
+ WL1271_ACX_TRIG_METRIC_SNR_BEACON,
+ WL1271_ACX_TRIG_METRIC_SNR_DATA,
+};
+
+enum {
+ WL1271_ACX_TRIG_IDX_RSSI = 0,
+ WL1271_ACX_TRIG_COUNT = 8,
+};
+
+struct wl1271_acx_rssi_snr_trigger {
+ struct acx_header header;
+
+ __le16 threshold;
+ __le16 pacing; /* 0 - 60000 ms */
+ u8 metric;
+ u8 type;
+ u8 dir;
+ u8 hysteresis;
+ u8 index;
+ u8 enable;
+ u8 padding[2];
+};
+
+struct wl1271_acx_rssi_snr_avg_weights {
+ struct acx_header header;
+
+ u8 rssi_beacon;
+ u8 rssi_data;
+ u8 snr_beacon;
+ u8 snr_data;
+};
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
ACX_MEM_CFG = 0x0003,
@@ -1017,8 +1041,8 @@ enum {
ACX_FRAG_CFG = 0x004F,
ACX_BET_ENABLE = 0x0050,
ACX_RSSI_SNR_TRIGGER = 0x0051,
- ACX_RSSI_SNR_WEIGHTS = 0x0051,
- ACX_KEEP_ALIVE_MODE = 0x0052,
+ ACX_RSSI_SNR_WEIGHTS = 0x0052,
+ ACX_KEEP_ALIVE_MODE = 0x0053,
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
@@ -1058,8 +1082,8 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl);
-int wl1271_acx_sg_enable(struct wl1271 *wl);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_sg_cfg(struct wl1271 *wl);
int wl1271_acx_cca_threshold(struct wl1271 *wl);
int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
@@ -1085,5 +1109,10 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
u8 version);
int wl1271_acx_pm_config(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
+ s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 024356263065..1a36d8a2196e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -1,7 +1,7 @@
/*
* This file is part of wl1271
*
- * Copyright (C) 2008-2009 Nokia Corporation
+ * Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -27,7 +27,6 @@
#include "wl1271_acx.h"
#include "wl1271_reg.h"
#include "wl1271_boot.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
@@ -230,6 +229,14 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_len = sizeof(wl->nvs->nvs);
nvs_ptr = (u8 *)wl->nvs->nvs;
+ /* update current MAC address to NVS */
+ nvs_ptr[11] = wl->mac_addr[0];
+ nvs_ptr[10] = wl->mac_addr[1];
+ nvs_ptr[6] = wl->mac_addr[2];
+ nvs_ptr[5] = wl->mac_addr[3];
+ nvs_ptr[4] = wl->mac_addr[4];
+ nvs_ptr[3] = wl->mac_addr[5];
+
/*
* Layout before the actual NVS tables:
* 1 byte : burst length.
@@ -300,7 +307,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
{
- enable_irq(wl->irq);
+ wl1271_enable_interrupts(wl);
wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
@@ -344,7 +351,7 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
static int wl1271_boot_run_firmware(struct wl1271 *wl)
{
int loop, ret;
- u32 chip_id, interrupt;
+ u32 chip_id, intr;
wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
@@ -361,15 +368,15 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
- interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
- if (interrupt == 0xffffffff) {
+ if (intr == 0xffffffff) {
wl1271_error("error reading hardware complete "
"init indication");
return -EIO;
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
- else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
+ else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
WL1271_ACX_INTR_INIT_COMPLETE);
break;
@@ -404,7 +411,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
/* unmask required mbox events */
wl->event_mask = BSS_LOSE_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
- PS_REPORT_EVENT_ID;
+ PS_REPORT_EVENT_ID |
+ JOIN_EVENT_COMPLETE_ID |
+ DISCONNECT_EVENT_COMPLETE_ID |
+ RSSI_SNR_TRIGGER_0_EVENT_ID;
ret = wl1271_event_unmask(wl);
if (ret < 0) {
@@ -431,11 +441,23 @@ static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
return 0;
}
+static void wl1271_boot_hw_version(struct wl1271 *wl)
+{
+ u32 fuse;
+
+ fuse = wl1271_top_reg_read(wl, REG_FUSE_DATA_2_1);
+ fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
+
+ wl->hw_pg_ver = (s8)fuse;
+}
+
int wl1271_boot(struct wl1271 *wl)
{
int ret = 0;
u32 tmp, clk, pause;
+ wl1271_boot_hw_version(wl);
+
if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
/* ref clk: 19.2/38.4/38.4-XTAL */
clk = 0x3;
@@ -445,11 +467,15 @@ int wl1271_boot(struct wl1271 *wl)
if (REF_CLOCK != 0) {
u16 val;
- /* Set clock type */
+ /* Set clock type (open drain) */
val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
val &= FREF_CLK_TYPE_BITS;
- val |= CLK_REQ_PRCM;
wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+
+ /* Set clock pull mode (no pull) */
+ val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL);
+ val |= NO_PULL;
+ wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val);
} else {
u16 val;
/* Set clock polarity */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index 412443ee655a..f829699d597e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -53,10 +53,16 @@ struct wl1271_static_data {
#define OCP_REG_POLARITY 0x0064
#define OCP_REG_CLK_TYPE 0x0448
#define OCP_REG_CLK_POLARITY 0x0cb2
+#define OCP_REG_CLK_PULL 0x0cb4
-#define CMD_MBOX_ADDRESS 0x407B4
+#define REG_FUSE_DATA_2_1 0x050a
+#define PG_VER_MASK 0x3c
+#define PG_VER_OFFSET 2
-#define POLARITY_LOW BIT(1)
+#define CMD_MBOX_ADDRESS 0x407B4
+
+#define POLARITY_LOW BIT(1)
+#define NO_PULL (BIT(14) | BIT(15))
#define FREF_CLK_TYPE_BITS 0xfffffe7f
#define CLK_REQ_PRCM 0x100
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index e7832f3318eb..19393e236e2c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -1,7 +1,7 @@
/*
* This file is part of wl1271
*
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -26,15 +26,18 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_acx.h"
#include "wl12xx_80211.h"
#include "wl1271_cmd.h"
+#include "wl1271_event.h"
+
+#define WL1271_CMD_FAST_POLL_COUNT 50
/*
* send command to firmware
@@ -52,6 +55,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
u32 intr;
int ret = 0;
u16 status;
+ u16 poll_count = 0;
cmd = buf;
cmd->id = cpu_to_le16(id);
@@ -73,7 +77,11 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
goto out;
}
- msleep(1);
+ poll_count++;
+ if (poll_count < WL1271_CMD_FAST_POLL_COUNT)
+ udelay(10);
+ else
+ msleep(1);
intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
}
@@ -249,7 +257,36 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
return ret;
}
-int wl1271_cmd_join(struct wl1271 *wl)
+/*
+ * Poll the mailbox event field until any of the bits in the mask is set or a
+ * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
+ */
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+ u32 events_vector, event;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+
+ do {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+
+ /* read from both event fields */
+ wl1271_read(wl, wl->mbox_ptr[0], &events_vector,
+ sizeof(events_vector), false);
+ event = events_vector & mask;
+ wl1271_read(wl, wl->mbox_ptr[1], &events_vector,
+ sizeof(events_vector), false);
+ event |= events_vector & mask;
+ } while (!event);
+
+ return 0;
+}
+
+int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
{
static bool do_cal = true;
struct wl1271_cmd_join *join;
@@ -280,30 +317,13 @@ int wl1271_cmd_join(struct wl1271 *wl)
join->rx_config_options = cpu_to_le32(wl->rx_config);
join->rx_filter_options = cpu_to_le32(wl->rx_filter);
- join->bss_type = wl->bss_type;
+ join->bss_type = bss_type;
+ join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- /*
- * FIXME: disable temporarily all filters because after commit
- * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
- * association. The filter logic needs to be implemented properly
- * and once that is done, this hack can be removed.
- */
- join->rx_config_options = cpu_to_le32(0);
- join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
-
- if (wl->band == IEEE80211_BAND_2GHZ)
- join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS |
- CONF_HW_BIT_RATE_2MBPS |
- CONF_HW_BIT_RATE_5_5MBPS |
- CONF_HW_BIT_RATE_11MBPS);
- else {
+ if (wl->band == IEEE80211_BAND_5GHZ)
join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
- join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS |
- CONF_HW_BIT_RATE_12MBPS |
- CONF_HW_BIT_RATE_24MBPS);
- }
- join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT);
+ join->beacon_interval = cpu_to_le16(wl->beacon_int);
join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
join->channel = wl->channel;
@@ -320,8 +340,7 @@ int wl1271_cmd_join(struct wl1271 *wl)
/* reset TX security counters */
wl->tx_security_last_seq = 0;
- wl->tx_security_seq_16 = 0;
- wl->tx_security_seq_32 = 0;
+ wl->tx_security_seq = 0;
ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
if (ret < 0) {
@@ -329,11 +348,9 @@ int wl1271_cmd_join(struct wl1271 *wl)
goto out_free;
}
- /*
- * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
- * simplify locking we just sleep instead, for now
- */
- msleep(10);
+ ret = wl1271_cmd_wait_for_event(wl, JOIN_EVENT_COMPLETE_ID);
+ if (ret < 0)
+ wl1271_error("cmd join event completion error");
out_free:
kfree(join);
@@ -465,7 +482,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
if (ret < 0) {
wl1271_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", cmd->channel);
- return ret;
+ goto out;
}
wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
@@ -499,7 +516,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
ps_params->ps_mode = ps_mode;
ps_params->send_null_data = send;
ps_params->retries = 5;
- ps_params->hang_over_period = 128;
+ ps_params->hang_over_period = 1;
ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -549,25 +566,29 @@ out:
return ret;
}
-int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 band,
- u8 probe_requests)
+int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 active_scan,
+ u8 high_prio, u8 band, u8 probe_requests)
{
struct wl1271_cmd_trigger_scan_to *trigger = NULL;
struct wl1271_cmd_scan *params = NULL;
struct ieee80211_channel *channels;
+ u32 rate;
int i, j, n_ch, ret;
u16 scan_options = 0;
u8 ieee_band;
- if (band == WL1271_SCAN_BAND_2_4_GHZ)
+ if (band == WL1271_SCAN_BAND_2_4_GHZ) {
ieee_band = IEEE80211_BAND_2GHZ;
- else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled())
+ rate = wl->conf.tx.basic_rate;
+ } else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) {
ieee_band = IEEE80211_BAND_2GHZ;
- else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled())
+ rate = wl->conf.tx.basic_rate;
+ } else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) {
ieee_band = IEEE80211_BAND_5GHZ;
- else
+ rate = wl->conf.tx.basic_rate_5;
+ } else
return -EINVAL;
if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
@@ -594,8 +615,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
params->params.scan_options = cpu_to_le16(scan_options);
params->params.num_probe_requests = probe_requests;
- /* Let the fw autodetect suitable tx_rate for probes */
- params->params.tx_rate = 0;
+ params->params.tx_rate = cpu_to_le32(rate);
params->params.tid_trigger = 0;
params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
@@ -622,12 +642,13 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
params->params.num_channels = j;
- if (len && ssid) {
- params->params.ssid_len = len;
- memcpy(params->params.ssid, ssid, len);
+ if (ssid_len && ssid) {
+ params->params.ssid_len = ssid_len;
+ memcpy(params->params.ssid, ssid, ssid_len);
}
- ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band);
+ ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len,
+ ie, ie_len, ieee_band);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -658,9 +679,9 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
wl->scan.active = active_scan;
wl->scan.high_prio = high_prio;
wl->scan.probe_requests = probe_requests;
- if (len && ssid) {
- wl->scan.ssid_len = len;
- memcpy(wl->scan.ssid, ssid, len);
+ if (ssid_len && ssid) {
+ wl->scan.ssid_len = ssid_len;
+ memcpy(wl->scan.ssid, ssid, ssid_len);
} else
wl->scan.ssid_len = 0;
}
@@ -675,11 +696,12 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
out:
kfree(params);
+ kfree(trigger);
return ret;
}
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
- void *buf, size_t buf_len)
+ void *buf, size_t buf_len, int index, u32 rates)
{
struct wl1271_cmd_template_set *cmd;
int ret = 0;
@@ -697,9 +719,10 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
- cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates);
+ cmd->enabled_rates = cpu_to_le32(rates);
cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+ cmd->index = index;
if (buf)
memcpy(cmd->template_data, buf, buf_len);
@@ -717,155 +740,129 @@ out:
return ret;
}
-static int wl1271_build_basic_rates(u8 *rates, u8 band)
+int wl1271_cmd_build_null_data(struct wl1271 *wl)
{
- u8 index = 0;
-
- if (band == IEEE80211_BAND_2GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
- } else if (band == IEEE80211_BAND_5GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- } else {
- wl1271_error("build_basic_rates invalid band: %d", band);
- }
+ struct sk_buff *skb = NULL;
+ int size;
+ void *ptr;
+ int ret = -ENOMEM;
- return index;
-}
-static int wl1271_build_extended_rates(u8 *rates, u8 band)
-{
- u8 index = 0;
-
- if (band == IEEE80211_BAND_2GHZ) {
- rates[index++] = IEEE80211_OFDM_RATE_6MB;
- rates[index++] = IEEE80211_OFDM_RATE_9MB;
- rates[index++] = IEEE80211_OFDM_RATE_12MB;
- rates[index++] = IEEE80211_OFDM_RATE_18MB;
- rates[index++] = IEEE80211_OFDM_RATE_24MB;
- rates[index++] = IEEE80211_OFDM_RATE_36MB;
- rates[index++] = IEEE80211_OFDM_RATE_48MB;
- rates[index++] = IEEE80211_OFDM_RATE_54MB;
- } else if (band == IEEE80211_BAND_5GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ size = sizeof(struct wl12xx_null_data_template);
+ ptr = NULL;
} else {
- wl1271_error("build_basic_rates invalid band: %d", band);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+ size = skb->len;
+ ptr = skb->data;
}
- return index;
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
+ WL1271_RATE_AUTOMATIC);
+
+out:
+ dev_kfree_skb(skb);
+ if (ret)
+ wl1271_warning("cmd buld null data failed %d", ret);
+
+ return ret;
+
}
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
{
- struct wl12xx_null_data_template template;
+ struct sk_buff *skb = NULL;
+ int ret = -ENOMEM;
- if (!is_zero_ether_addr(wl->bssid)) {
- memcpy(template.header.da, wl->bssid, ETH_ALEN);
- memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
- } else {
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- }
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
+ skb->data, skb->len,
+ CMD_TEMPL_KLV_IDX_NULL_DATA,
+ WL1271_RATE_AUTOMATIC);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
+out:
+ dev_kfree_skb(skb);
+ if (ret)
+ wl1271_warning("cmd build klv null data failed %d", ret);
- return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template,
- sizeof(template));
+ return ret;
}
int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
{
- struct wl12xx_ps_poll_template template;
-
- memcpy(template.bssid, wl->bssid, ETH_ALEN);
- memcpy(template.ta, wl->mac_addr, ETH_ALEN);
-
- /* aid in PS-Poll has its two MSBs each set to 1 */
- template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+ struct sk_buff *skb;
+ int ret = 0;
- template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+ skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
- return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template,
- sizeof(template));
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
+ skb->len, 0, wl->basic_rate);
+out:
+ dev_kfree_skb(skb);
+ return ret;
}
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
- u8 band)
+int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 band)
{
- struct wl12xx_probe_req_template template;
- struct wl12xx_ie_rates *rates;
- char *ptr;
- u16 size;
+ struct sk_buff *skb;
int ret;
- ptr = (char *)&template;
- size = sizeof(struct ieee80211_header);
-
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-
- /* IEs */
- /* SSID */
- template.ssid.header.id = WLAN_EID_SSID;
- template.ssid.header.len = ssid_len;
- if (ssid_len && ssid)
- memcpy(template.ssid.ssid, ssid, ssid_len);
- size += sizeof(struct wl12xx_ie_header) + ssid_len;
- ptr += size;
-
- /* Basic Rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_SUPP_RATES;
- rates->header.len = wl1271_build_basic_rates(rates->rates, band);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
- ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- /* Extended rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_EXT_SUPP_RATES;
- rates->header.len = wl1271_build_extended_rates(rates->rates, band);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
+ skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ ie, ie_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
- &template, size);
+ skb->data, skb->len, 0,
+ wl->conf.tx.basic_rate);
else
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- &template, size);
+ skb->data, skb->len, 0,
+ wl->conf.tx.basic_rate_5);
+
+out:
+ dev_kfree_skb(skb);
return ret;
}
+int wl1271_build_qos_null_data(struct wl1271 *wl)
+{
+ struct ieee80211_qos_hdr template;
+
+ memset(&template, 0, sizeof(template));
+
+ memcpy(template.addr1, wl->bssid, ETH_ALEN);
+ memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr3, wl->bssid, ETH_ALEN);
+
+ template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_TODS);
+
+ /* FIXME: not sure what priority to use here */
+ template.qos_ctrl = cpu_to_le16(0);
+
+ return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
+ sizeof(template), 0,
+ WL1271_RATE_AUTOMATIC);
+}
+
int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
{
struct wl1271_cmd_set_keys *cmd;
@@ -976,6 +973,10 @@ int wl1271_cmd_disconnect(struct wl1271 *wl)
goto out_free;
}
+ ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
+ if (ret < 0)
+ wl1271_error("cmd disconnect event completion error");
+
out_free:
kfree(cmd);
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 2dc06c73532b..f2820b42a943 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -33,7 +33,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len);
int wl1271_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
-int wl1271_cmd_join(struct wl1271 *wl);
+int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -41,15 +41,18 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
-int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 band,
- u8 probe_requests);
+int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 active_scan,
+ u8 high_prio, u8 band, u8 probe_requests);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
- void *buf, size_t buf_len);
+ void *buf, size_t buf_len, int index, u32 rates);
int wl1271_cmd_build_null_data(struct wl1271 *wl);
int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
- u8 band);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 band);
+int wl1271_build_qos_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
@@ -99,6 +102,11 @@ enum wl1271_commands {
#define MAX_CMD_PARAMS 572
+enum {
+ CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
+ CMD_TEMPL_KLV_IDX_MAX = 4
+};
+
enum cmd_templ {
CMD_TEMPL_NULL_DATA = 0,
CMD_TEMPL_BEACON,
@@ -121,6 +129,7 @@ enum cmd_templ {
/* unit ms */
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_MAX_SIZE 252
+#define WL1271_EVENT_TIMEOUT 750
struct wl1271_cmd_header {
__le16 id;
@@ -243,6 +252,8 @@ struct cmd_enabledisable_path {
u8 padding[3];
} __attribute__ ((packed));
+#define WL1271_RATE_AUTOMATIC 0
+
struct wl1271_cmd_template_set {
struct wl1271_cmd_header header;
@@ -509,6 +520,8 @@ enum wl1271_disconnect_type {
};
struct wl1271_cmd_disconnect {
+ struct wl1271_cmd_header header;
+
__le32 rx_config_options;
__le32 rx_filter_options;
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 6f9e75cc5640..d046d044b5bd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -65,110 +65,344 @@ enum {
CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
};
-struct conf_sg_settings {
+enum {
+ CONF_HW_RXTX_RATE_MCS7 = 0,
+ CONF_HW_RXTX_RATE_MCS6,
+ CONF_HW_RXTX_RATE_MCS5,
+ CONF_HW_RXTX_RATE_MCS4,
+ CONF_HW_RXTX_RATE_MCS3,
+ CONF_HW_RXTX_RATE_MCS2,
+ CONF_HW_RXTX_RATE_MCS1,
+ CONF_HW_RXTX_RATE_MCS0,
+ CONF_HW_RXTX_RATE_54,
+ CONF_HW_RXTX_RATE_48,
+ CONF_HW_RXTX_RATE_36,
+ CONF_HW_RXTX_RATE_24,
+ CONF_HW_RXTX_RATE_22,
+ CONF_HW_RXTX_RATE_18,
+ CONF_HW_RXTX_RATE_12,
+ CONF_HW_RXTX_RATE_11,
+ CONF_HW_RXTX_RATE_9,
+ CONF_HW_RXTX_RATE_6,
+ CONF_HW_RXTX_RATE_5_5,
+ CONF_HW_RXTX_RATE_2,
+ CONF_HW_RXTX_RATE_1,
+ CONF_HW_RXTX_RATE_MAX,
+ CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
+};
+
+enum {
+ CONF_SG_DISABLE = 0,
+ CONF_SG_PROTECTIVE,
+ CONF_SG_OPPORTUNISTIC
+};
+
+enum {
/*
- * Defines the PER threshold in PPM of the BT voice of which reaching
- * this value will trigger raising the priority of the BT voice by
- * the BT IP until next NFS sample interval time as defined in
- * nfs_sample_interval.
+ * PER threshold in PPM of the BT voice
*
- * Unit: PER value in PPM (parts per million)
- * #Error_packets / #Total_packets
+ * Range: 0 - 10000000
+ */
+ CONF_SG_BT_PER_THRESHOLD = 0,
- * Range: u32
+ /*
+ * Number of consequent RX_ACTIVE activities to override BT voice
+ * frames to ensure WLAN connection
+ *
+ * Range: 0 - 100
+ */
+ CONF_SG_HV3_MAX_OVERRIDE,
+
+ /*
+ * Defines the PER threshold of the BT voice
+ *
+ * Range: 0 - 65000
+ */
+ CONF_SG_BT_NFS_SAMPLE_INTERVAL,
+
+ /*
+ * Defines the load ratio of BT
+ *
+ * Range: 0 - 100 (%)
+ */
+ CONF_SG_BT_LOAD_RATIO,
+
+ /*
+ * Defines whether the SG will force WLAN host to enter/exit PSM
+ *
+ * Range: 1 - SG can force, 0 - host handles PSM
+ */
+ CONF_SG_AUTO_PS_MODE,
+
+ /*
+ * Compensation percentage of probe requests when scan initiated
+ * during BT voice/ACL link.
+ *
+ * Range: 0 - 255 (%)
+ */
+ CONF_SG_AUTO_SCAN_PROBE_REQ,
+
+ /*
+ * Compensation percentage of probe requests when active scan initiated
+ * during BT voice
+ *
+ * Range: 0 - 255 (%)
+ */
+ CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3,
+
+ /*
+ * Defines antenna configuration (single/dual antenna)
+ *
+ * Range: 0 - single antenna, 1 - dual antenna
+ */
+ CONF_SG_ANTENNA_CONFIGURATION,
+
+ /*
+ * The threshold (percent) of max consequtive beacon misses before
+ * increasing priority of beacon reception.
+ *
+ * Range: 0 - 100 (%)
+ */
+ CONF_SG_BEACON_MISS_PERCENT,
+
+ /*
+ * The rate threshold below which receiving a data frame from the AP
+ * will increase the priority of the data frame above BT traffic.
+ *
+ * Range: 0,2, 5(=5.5), 6, 9, 11, 12, 18, 24, 36, 48, 54
+ */
+ CONF_SG_RATE_ADAPT_THRESH,
+
+ /*
+ * Not used currently.
+ *
+ * Range: 0
+ */
+ CONF_SG_RATE_ADAPT_SNR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT master basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR,
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT master basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT slave basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR,
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT slave basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR,
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT slave EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR,
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT slave EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR,
+
+ /*
+ * RX guard time before the beginning of a new BT voice frame during
+ * which no new WLAN trigger frame is transmitted.
+ *
+ * Range: 0 - 100000 (us)
+ */
+ CONF_SG_RXT,
+
+ /*
+ * TX guard time before the beginning of a new BT voice frame during
+ * which no new WLAN frame is transmitted.
+ *
+ * Range: 0 - 100000 (us)
+ */
+
+ CONF_SG_TXT,
+
+ /*
+ * Enable adaptive RXT/TXT algorithm. If disabled, the host values
+ * will be utilized.
+ *
+ * Range: 0 - disable, 1 - enable
+ */
+ CONF_SG_ADAPTIVE_RXT_TXT,
+
+ /*
+ * The used WLAN legacy service period during active BT ACL link
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_PS_POLL_TIMEOUT,
+
+ /*
+ * The used WLAN UPSD service period during active BT ACL link
+ *
+ * Range: 0 - 255 (ms)
*/
- u32 per_threshold;
+ CONF_SG_UPSD_TIMEOUT,
/*
- * This value is an absolute time in micro-seconds to limit the
- * maximum scan duration compensation while in SG
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
*/
- u32 max_scan_compensation_time;
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR,
- /* Defines the PER threshold of the BT voice of which reaching this
- * value will trigger raising the priority of the BT voice until next
- * NFS sample interval time as defined in sample_interval.
+ /*
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT master EDR
*
- * Unit: msec
- * Range: 1-65000
+ * Range: 0 - 255 (ms)
*/
- u16 nfs_sample_interval;
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR,
/*
- * Defines the load ratio for the BT.
- * The WLAN ratio is: 100 - load_ratio
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT slave EDR
*
- * Unit: Percent
- * Range: 0-100
+ * Range: 0 - 255 (ms)
*/
- u8 load_ratio;
+ CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR,
/*
- * true - Co-ex is allowed to enter/exit P.S automatically and
- * transparently to the host
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT slave EDR
*
- * false - Co-ex is disallowed to enter/exit P.S and will trigger an
- * event to the host to notify for the need to enter/exit P.S
- * due to BT change state
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR,
+
+ /*
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT basic rate
*
+ * Range: 0 - 255 (ms)
*/
- u8 auto_ps_mode;
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR,
/*
- * This parameter defines the compensation percentage of num of probe
- * requests in case scan is initiated during BT voice/BT ACL
- * guaranteed link.
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT voice
*
- * Unit: Percent
- * Range: 0-255 (0 - No compensation)
+ * Range: 0 - 1000 (%)
*/
- u8 probe_req_compensation;
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
/*
- * This parameter defines the compensation percentage of scan window
- * size in case scan is initiated during BT voice/BT ACL Guaranteed
- * link.
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT A2DP
*
- * Unit: Percent
- * Range: 0-255 (0 - No compensation)
+ * Range: 0 - 1000 (%)
*/
- u8 scan_window_compensation;
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP,
/*
- * Defines the antenna configuration.
+ * Fixed time ensured for BT traffic to gain the antenna during WLAN
+ * passive scan.
*
- * Range: 0 - Single Antenna; 1 - Dual Antenna
+ * Range: 0 - 1000 ms
*/
- u8 antenna_config;
+ CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME,
/*
- * The percent out of the Max consecutive beacon miss roaming trigger
- * which is the threshold for raising the priority of beacon
- * reception.
+ * Fixed time ensured for WLAN traffic to gain the antenna during WLAN
+ * passive scan.
*
- * Range: 1-100
- * N = MaxConsecutiveBeaconMiss
- * P = coexMaxConsecutiveBeaconMissPrecent
- * Threshold = MIN( N-1, round(N * P / 100))
+ * Range: 0 - 1000 ms
*/
- u8 beacon_miss_threshold;
+ CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME,
/*
- * The RX rate threshold below which rate adaptation is assumed to be
- * occurring at the AP which will raise priority for ACTIVE_RX and RX
- * SP.
+ * Number of consequent BT voice frames not interrupted by WLAN
*
- * Range: HW_BIT_RATE_*
+ * Range: 0 - 100
*/
- u32 rate_adaptation_threshold;
+ CONF_SG_HV3_MAX_SERVED,
/*
- * The SNR above which the RX rate threshold indicating AP rate
- * adaptation is valid
+ * Protection time of the DHCP procedure.
*
- * Range: -128 - 127
+ * Range: 0 - 100000 (ms)
*/
- s8 rate_adaptation_snr;
+ CONF_SG_DHCP_TIME,
+
+ /*
+ * Compensation percentage of WLAN active scan window if initiated
+ * during BT A2DP
+ *
+ * Range: 0 - 1000 (%)
+ */
+ CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
+ CONF_SG_TEMP_PARAM_1,
+ CONF_SG_TEMP_PARAM_2,
+ CONF_SG_TEMP_PARAM_3,
+ CONF_SG_TEMP_PARAM_4,
+ CONF_SG_TEMP_PARAM_5,
+ CONF_SG_PARAMS_MAX,
+ CONF_SG_PARAMS_ALL = 0xff
+};
+
+struct conf_sg_settings {
+ u32 params[CONF_SG_PARAMS_MAX];
+ u8 state;
};
enum conf_rx_queue_type {
@@ -440,6 +674,19 @@ struct conf_tx_settings {
*/
u16 tx_compl_threshold;
+ /*
+ * The rate used for control messages and scanning on the 2.4GHz band
+ *
+ * Range: CONF_HW_BIT_RATE_* bit mask
+ */
+ u32 basic_rate;
+
+ /*
+ * The rate used for control messages and scanning on the 5GHz band
+ *
+ * Range: CONF_HW_BIT_RATE_* bit mask
+ */
+ u32 basic_rate_5;
};
enum {
@@ -509,65 +756,6 @@ enum {
CONF_TRIG_EVENT_DIR_BIDIR
};
-
-struct conf_sig_trigger {
- /*
- * The RSSI / SNR threshold value.
- *
- * FIXME: what is the range?
- */
- s16 threshold;
-
- /*
- * Minimum delay between two trigger events for this trigger in ms.
- *
- * Range: 0 - 60000
- */
- u16 pacing;
-
- /*
- * The measurement data source for this trigger.
- *
- * Range: CONF_TRIG_METRIC_*
- */
- u8 metric;
-
- /*
- * The trigger type of this trigger.
- *
- * Range: CONF_TRIG_EVENT_TYPE_*
- */
- u8 type;
-
- /*
- * The direction of the trigger.
- *
- * Range: CONF_TRIG_EVENT_DIR_*
- */
- u8 direction;
-
- /*
- * Hysteresis range of the trigger around the threshold (in dB)
- *
- * Range: u8
- */
- u8 hysteresis;
-
- /*
- * Index of the trigger rule.
- *
- * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
- */
- u8 index;
-
- /*
- * Enable / disable this rule (to use for clearing rules.)
- *
- * Range: 1 - Enabled, 2 - Not enabled
- */
- u8 enable;
-};
-
struct conf_sig_weights {
/*
@@ -686,12 +874,6 @@ struct conf_conn_settings {
u8 ps_poll_threshold;
/*
- * Configuration of signal (rssi/snr) triggers.
- */
- u8 sig_trigger_count;
- struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
-
- /*
* Configuration of signal average weights.
*/
struct conf_sig_weights sig_weights;
@@ -721,6 +903,22 @@ struct conf_conn_settings {
* Range 0 - 255
*/
u8 psm_entry_retries;
+
+ /*
+ *
+ * Specifies the interval of the connection keep-alive null-func
+ * frame in ms.
+ *
+ * Range: 1000 - 3600000
+ */
+ u32 keep_alive_interval;
+
+ /*
+ * Maximum listen interval supported by the driver in units of beacons.
+ *
+ * Range: u16
+ */
+ u8 max_listen_interval;
};
enum {
@@ -782,6 +980,43 @@ struct conf_pm_config_settings {
bool host_fast_wakeup_support;
};
+struct conf_roam_trigger_settings {
+ /*
+ * The minimum interval between two trigger events.
+ *
+ * Range: 0 - 60000 ms
+ */
+ u16 trigger_pacing;
+
+ /*
+ * The weight for rssi/beacon average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_rssi_beacon;
+
+ /*
+ * The weight for rssi/data frame average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_rssi_data;
+
+ /*
+ * The weight for snr/beacon average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_snr_beacon;
+
+ /*
+ * The weight for snr/data frame average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_snr_data;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -790,6 +1025,7 @@ struct conf_drv_settings {
struct conf_init_settings init;
struct conf_itrim_settings itrim;
struct conf_pm_config_settings pm_config;
+ struct conf_roam_trigger_settings roam_trigger;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index 3f7ff8d0cf5a..c239ef4d0b8d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -29,6 +29,7 @@
#include "wl1271.h"
#include "wl1271_acx.h"
#include "wl1271_ps.h"
+#include "wl1271_io.h"
/* ms */
#define WL1271_DEBUGFS_STATS_LIFETIME 1000
@@ -277,13 +278,10 @@ static ssize_t gpio_power_write(struct file *file,
goto out;
}
- if (value) {
- wl->set_power(true);
- set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
- } else {
- wl->set_power(false);
- clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
- }
+ if (value)
+ wl1271_power_on(wl);
+ else
+ wl1271_power_off(wl);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index 7468ef10194b..cf37aa6eb137 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -23,7 +23,6 @@
#include "wl1271.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_ps.h"
@@ -32,34 +31,24 @@
static int wl1271_event_scan_complete(struct wl1271 *wl,
struct event_mailbox *mbox)
{
- int size = sizeof(struct wl12xx_probe_req_template);
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
- wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
- NULL, size);
/* 2.4 GHz band scanned, scan 5 GHz band, pretend
* to the wl1271_cmd_scan function that we are not
* scanning as it checks that.
*/
clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
+ /* FIXME: ie missing! */
wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
+ NULL, 0,
wl->scan.active,
wl->scan.high_prio,
WL1271_SCAN_BAND_5_GHZ,
wl->scan.probe_requests);
} else {
- if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
- wl1271_cmd_template_set(wl,
- CMD_TEMPL_CFG_PROBE_REQ_2_4,
- NULL, size);
- else
- wl1271_cmd_template_set(wl,
- CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL, size);
-
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
@@ -92,16 +81,9 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
true);
} else {
- wl1271_error("PSM entry failed, giving up.\n");
- /* FIXME: this may need to be reconsidered. for now it
- is not possible to indicate to the mac80211
- afterwards that PSM entry failed. To maximize
- functionality (receiving data and remaining
- associated) make sure that we are in sync with the
- AP in regard of PSM mode. */
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- false);
+ wl1271_info("No ack to nullfunc from AP.");
wl->psm_entry_retry = 0;
+ *beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -143,6 +125,24 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
return ret;
}
+static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+ struct event_mailbox *mbox)
+{
+ enum nl80211_cqm_rssi_threshold_event event;
+ s8 metric = mbox->rssi_snr_trigger_metric[0];
+
+ wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
+
+ if (metric <= wl->rssi_thold)
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+
+ if (event != wl->last_rssi_event)
+ ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
+ wl->last_rssi_event = event;
+}
+
static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
{
wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -172,10 +172,13 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
* filtering) is enabled. Without PSM, the stack will receive all
* beacons and can detect beacon loss by itself.
+ *
+ * As there's possibility that the driver disables PSM before receiving
+ * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
+ *
*/
- if (vector & BSS_LOSE_EVENT_ID &&
- test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
+ if (vector & BSS_LOSE_EVENT_ID) {
+ wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
@@ -188,17 +191,15 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
return ret;
}
- if (wl->vif && beacon_loss) {
- /* Obviously, it's dangerous to release the mutex while
- we are holding many of the variables in the wl struct.
- That's why it's done last in the function, and care must
- be taken that nothing more is done after this function
- returns. */
- mutex_unlock(&wl->mutex);
- ieee80211_beacon_loss(wl->vif);
- mutex_lock(&wl->mutex);
+ if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
+ if (wl->vif)
+ wl1271_event_rssi_trigger(wl, mbox);
}
+ if (wl->vif && beacon_loss)
+ ieee80211_connection_loss(wl->vif);
+
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 278f9206aa56..58371008f270 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -38,6 +38,14 @@
*/
enum {
+ RSSI_SNR_TRIGGER_0_EVENT_ID = BIT(0),
+ RSSI_SNR_TRIGGER_1_EVENT_ID = BIT(1),
+ RSSI_SNR_TRIGGER_2_EVENT_ID = BIT(2),
+ RSSI_SNR_TRIGGER_3_EVENT_ID = BIT(3),
+ RSSI_SNR_TRIGGER_4_EVENT_ID = BIT(4),
+ RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5),
+ RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6),
+ RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7),
MEASUREMENT_START_EVENT_ID = BIT(8),
MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
SCAN_COMPLETE_EVENT_ID = BIT(10),
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index d189e8fe05a6..4447af1557f5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -52,50 +52,65 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
int wl1271_init_templates_config(struct wl1271 *wl)
{
- int ret;
+ int ret, i;
/* send empty templates for fw memory reservation */
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
- sizeof(struct wl12xx_probe_req_template));
+ sizeof(struct wl12xx_probe_req_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
if (wl1271_11a_enabled()) {
+ size_t size = sizeof(struct wl12xx_probe_req_template);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL,
- sizeof(struct wl12xx_probe_req_template));
+ NULL, size, 0,
+ WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
- sizeof(struct wl12xx_null_data_template));
+ sizeof(struct wl12xx_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL,
- sizeof(struct wl12xx_ps_poll_template));
+ sizeof(struct wl12xx_ps_poll_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
- (struct wl12xx_qos_null_data_template));
+ (struct wl12xx_qos_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
sizeof
- (struct wl12xx_probe_resp_template));
+ (struct wl12xx_probe_resp_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
sizeof
- (struct wl12xx_beacon_template));
+ (struct wl12xx_beacon_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE, i,
+ WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -161,11 +176,11 @@ int wl1271_init_pta(struct wl1271 *wl)
{
int ret;
- ret = wl1271_acx_sg_enable(wl);
+ ret = wl1271_acx_sg_cfg(wl);
if (ret < 0)
return ret;
- ret = wl1271_acx_sg_cfg(wl);
+ ret = wl1271_acx_sg_enable(wl, wl->sg_enabled);
if (ret < 0)
return ret;
@@ -237,7 +252,7 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
/* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl);
+ ret = wl1271_acx_conn_monit_params(wl, false);
if (ret < 0)
goto out_free_memmap;
@@ -325,6 +340,24 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ /* disable all keep-alive templates */
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_acx_keep_alive_config(wl, i,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
+
+ /* disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
return 0;
out_free_memmap:
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c
index 5cd94d5666c2..c8759acef131 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.c
+++ b/drivers/net/wireless/wl12xx/wl1271_io.c
@@ -28,30 +28,29 @@
#include "wl1271.h"
#include "wl12xx_80211.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
-static int wl1271_translate_addr(struct wl1271 *wl, int addr)
+#define OCP_CMD_LOOP 32
+
+#define OCP_CMD_WRITE 0x1
+#define OCP_CMD_READ 0x2
+
+#define OCP_READY_MASK BIT(18)
+#define OCP_STATUS_MASK (BIT(16) | BIT(17))
+
+#define OCP_STATUS_NO_RESP 0x00000
+#define OCP_STATUS_OK 0x10000
+#define OCP_STATUS_REQ_FAILED 0x20000
+#define OCP_STATUS_RESP_ERROR 0x30000
+
+void wl1271_disable_interrupts(struct wl1271 *wl)
{
- /*
- * To translate, first check to which window of addresses the
- * particular address belongs. Then subtract the starting address
- * of that window from the address. Then, add offset of the
- * translated region.
- *
- * The translated regions occur next to each other in physical device
- * memory, so just add the sizes of the preceeding address regions to
- * get the offset to the new region.
- *
- * Currently, only the two first regions are addressed, and the
- * assumption is that all addresses will fall into either of those
- * two.
- */
- if ((addr >= wl->part.reg.start) &&
- (addr < wl->part.reg.start + wl->part.reg.size))
- return addr - wl->part.reg.start + wl->part.mem.size;
- else
- return addr - wl->part.mem.start;
+ wl->if_ops->disable_irq(wl);
+}
+
+void wl1271_enable_interrupts(struct wl1271 *wl)
+{
+ wl->if_ops->enable_irq(wl);
}
/* Set the SPI partitions to access the chip addresses
@@ -117,54 +116,12 @@ int wl1271_set_partition(struct wl1271 *wl,
void wl1271_io_reset(struct wl1271 *wl)
{
- wl1271_spi_reset(wl);
+ wl->if_ops->reset(wl);
}
void wl1271_io_init(struct wl1271 *wl)
{
- wl1271_spi_init(wl);
-}
-
-void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- wl1271_spi_raw_write(wl, addr, buf, len, fixed);
-}
-
-void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- wl1271_spi_raw_read(wl, addr, buf, len, fixed);
-}
-
-void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_read(wl, physical, buf, len, fixed);
-}
-
-void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_write(wl, physical, buf, len, fixed);
-}
-
-u32 wl1271_read32(struct wl1271 *wl, int addr)
-{
- return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
-}
-
-void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
-{
- wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+ wl->if_ops->init(wl);
}
void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
index fa9a0b35788f..bc806c74c63a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -25,44 +25,145 @@
#ifndef __WL1271_IO_H__
#define __WL1271_IO_H__
+#include "wl1271_reg.h"
+
+#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
+
+#define HW_PARTITION_REGISTERS_ADDR 0x1FFC0
+#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
+#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
+#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
+#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
+#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
+#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
+#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
+
+#define HW_ACCESS_REGISTER_SIZE 4
+
+#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
+
struct wl1271;
+void wl1271_disable_interrupts(struct wl1271 *wl);
+void wl1271_enable_interrupts(struct wl1271 *wl);
+
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
-/* Raw target IO, address is not translated */
-void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
+static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
+{
+ return wl->if_ops->dev(wl);
+}
-/* Translated target IO */
-void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-u32 wl1271_read32(struct wl1271 *wl, int addr);
-void wl1271_write32(struct wl1271 *wl, int addr, u32 val);
-/* Top Register IO */
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
+/* Raw target IO, address is not translated */
+static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl->if_ops->write(wl, addr, buf, len, fixed);
+}
-int wl1271_set_partition(struct wl1271 *wl,
- struct wl1271_partition_set *p);
+static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl->if_ops->read(wl, addr, buf, len, fixed);
+}
static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
{
wl1271_raw_read(wl, addr, &wl->buffer_32,
sizeof(wl->buffer_32), false);
- return wl->buffer_32;
+ return le32_to_cpu(wl->buffer_32);
}
static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
{
- wl->buffer_32 = val;
+ wl->buffer_32 = cpu_to_le32(val);
wl1271_raw_write(wl, addr, &wl->buffer_32,
sizeof(wl->buffer_32), false);
}
+
+/* Translated target IO */
+static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
+{
+ /*
+ * To translate, first check to which window of addresses the
+ * particular address belongs. Then subtract the starting address
+ * of that window from the address. Then, add offset of the
+ * translated region.
+ *
+ * The translated regions occur next to each other in physical device
+ * memory, so just add the sizes of the preceeding address regions to
+ * get the offset to the new region.
+ *
+ * Currently, only the two first regions are addressed, and the
+ * assumption is that all addresses will fall into either of those
+ * two.
+ */
+ if ((addr >= wl->part.reg.start) &&
+ (addr < wl->part.reg.start + wl->part.reg.size))
+ return addr - wl->part.reg.start + wl->part.mem.size;
+ else
+ return addr - wl->part.mem.start;
+}
+
+static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_raw_read(wl, physical, buf, len, fixed);
+}
+
+static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_raw_write(wl, physical, buf, len, fixed);
+}
+
+static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
+{
+ return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
+}
+
+static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+{
+ wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+}
+
+static inline void wl1271_power_off(struct wl1271 *wl)
+{
+ wl->if_ops->power(wl, false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+}
+
+static inline void wl1271_power_on(struct wl1271 *wl)
+{
+ wl->if_ops->power(wl, true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+}
+
+
+/* Top Register IO */
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
+
+int wl1271_set_partition(struct wl1271 *wl,
+ struct wl1271_partition_set *p);
+
+/* Functions from wl1271_main.c */
+
+int wl1271_register_hw(struct wl1271 *wl);
+void wl1271_unregister_hw(struct wl1271 *wl);
+int wl1271_init_ieee80211(struct wl1271 *wl);
+struct ieee80211_hw *wl1271_alloc_hw(void);
+int wl1271_free_hw(struct wl1271 *wl);
+
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 65a1aeba2419..b7d9137851ac 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -22,23 +22,19 @@
*/
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/delay.h>
-#include <linux/irq.h>
#include <linux/spi/spi.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
-#include <linux/spi/wl12xx.h>
#include <linux/inetdevice.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_tx.h"
@@ -54,17 +50,57 @@
static struct conf_drv_settings default_conf = {
.sg = {
- .per_threshold = 7500,
- .max_scan_compensation_time = 120000,
- .nfs_sample_interval = 400,
- .load_ratio = 50,
- .auto_ps_mode = 0,
- .probe_req_compensation = 170,
- .scan_window_compensation = 50,
- .antenna_config = 0,
- .beacon_miss_threshold = 60,
- .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS,
- .rate_adaptation_snr = 0
+ .params = {
+ [CONF_SG_BT_PER_THRESHOLD] = 7500,
+ [CONF_SG_HV3_MAX_OVERRIDE] = 0,
+ [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
+ [CONF_SG_BT_LOAD_RATIO] = 50,
+ [CONF_SG_AUTO_PS_MODE] = 0,
+ [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
+ [CONF_SG_ANTENNA_CONFIGURATION] = 0,
+ [CONF_SG_BEACON_MISS_PERCENT] = 60,
+ [CONF_SG_RATE_ADAPT_THRESH] = 12,
+ [CONF_SG_RATE_ADAPT_SNR] = 0,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 30,
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 8,
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 50,
+ /* Note: with UPSD, this should be 4 */
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 8,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 20,
+ /* Note: with UPDS, this should be 15 */
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
+ /* Note: with UPDS, this should be 50 */
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 40,
+ /* Note: with UPDS, this should be 10 */
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 20,
+ [CONF_SG_RXT] = 1200,
+ [CONF_SG_TXT] = 1000,
+ [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
+ [CONF_SG_PS_POLL_TIMEOUT] = 10,
+ [CONF_SG_UPSD_TIMEOUT] = 10,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
+ [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
+ [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
+ [CONF_SG_HV3_MAX_SERVED] = 6,
+ [CONF_SG_DHCP_TIME] = 5000,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
+ },
+ .state = CONF_SG_PROTECTIVE,
},
.rx = {
.rx_msdu_life_time = 512000,
@@ -81,8 +117,7 @@ static struct conf_drv_settings default_conf = {
.tx = {
.tx_energy_detection = 0,
.rc_conf = {
- .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
- CONF_HW_BIT_RATE_2MBPS,
+ .enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0
@@ -179,11 +214,13 @@ static struct conf_drv_settings default_conf = {
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
.tx_compl_timeout = 700,
- .tx_compl_threshold = 4
+ .tx_compl_threshold = 4,
+ .basic_rate = CONF_HW_BIT_RATE_1MBPS,
+ .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
- .listen_interval = 0,
+ .listen_interval = 1,
.bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
.bcn_filt_ie_count = 1,
.bcn_filt_ie = {
@@ -198,38 +235,11 @@ static struct conf_drv_settings default_conf = {
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
.ps_poll_threshold = 20,
- .sig_trigger_count = 2,
- .sig_trigger = {
- [0] = {
- .threshold = -75,
- .pacing = 500,
- .metric = CONF_TRIG_METRIC_RSSI_BEACON,
- .type = CONF_TRIG_EVENT_TYPE_EDGE,
- .direction = CONF_TRIG_EVENT_DIR_LOW,
- .hysteresis = 2,
- .index = 0,
- .enable = 1
- },
- [1] = {
- .threshold = -75,
- .pacing = 500,
- .metric = CONF_TRIG_METRIC_RSSI_BEACON,
- .type = CONF_TRIG_EVENT_TYPE_EDGE,
- .direction = CONF_TRIG_EVENT_DIR_HIGH,
- .hysteresis = 2,
- .index = 1,
- .enable = 1
- }
- },
- .sig_weights = {
- .rssi_bcn_avg_weight = 10,
- .rssi_pkt_avg_weight = 10,
- .snr_bcn_avg_weight = 10,
- .snr_pkt_avg_weight = 10
- },
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 10,
- .psm_entry_retries = 3
+ .psm_entry_retries = 3,
+ .keep_alive_interval = 55000,
+ .max_listen_interval = 20,
},
.init = {
.radioparam = {
@@ -243,9 +253,32 @@ static struct conf_drv_settings default_conf = {
.pm_config = {
.host_clk_settling_time = 5000,
.host_fast_wakeup_support = false
+ },
+ .roam_trigger = {
+ /* FIXME: due to firmware bug, must use value 1 for now */
+ .trigger_pacing = 1,
+ .avg_weight_rssi_beacon = 20,
+ .avg_weight_rssi_data = 10,
+ .avg_weight_snr_beacon = 20,
+ .avg_weight_snr_data = 10
}
};
+static void wl1271_device_release(struct device *dev)
+{
+
+}
+
+static struct platform_device wl1271_device = {
+ .name = "wl1271",
+ .id = -1,
+
+ /* device model insists to have a release function */
+ .dev = {
+ .release = wl1271_device_release,
+ },
+};
+
static LIST_HEAD(wl_list);
static void wl1271_conf_init(struct wl1271 *wl)
@@ -298,7 +331,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
goto out_free_memmap;
/* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl);
+ ret = wl1271_acx_conn_monit_params(wl, false);
if (ret < 0)
goto out_free_memmap;
@@ -365,30 +398,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
-static void wl1271_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_power_off(struct wl1271 *wl)
-{
- wl->set_power(false);
- clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
-}
-
-static void wl1271_power_on(struct wl1271 *wl)
-{
- wl->set_power(true);
- set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
-}
-
static void wl1271_fw_status(struct wl1271 *wl,
struct wl1271_fw_status *status)
{
+ struct timespec ts;
u32 total = 0;
int i;
- wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -413,14 +430,19 @@ static void wl1271_fw_status(struct wl1271 *wl,
ieee80211_queue_work(wl->hw, &wl->tx_work);
/* update the host-chipset time offset */
- wl->time_offset = jiffies_to_usecs(jiffies) -
- le32_to_cpu(status->fw_localtime);
+ getnstimeofday(&ts);
+ wl->time_offset = (timespec_to_ns(&ts) >> 10) -
+ (s64)le32_to_cpu(status->fw_localtime);
}
+#define WL1271_IRQ_MAX_LOOPS 10
+
static void wl1271_irq_work(struct work_struct *work)
{
int ret;
u32 intr;
+ int loopcount = WL1271_IRQ_MAX_LOOPS;
+ unsigned long flags;
struct wl1271 *wl =
container_of(work, struct wl1271, irq_work);
@@ -428,91 +450,78 @@ static void wl1271_irq_work(struct work_struct *work)
wl1271_debug(DEBUG_IRQ, "IRQ work");
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, true);
if (ret < 0)
goto out;
- wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
-
- wl1271_fw_status(wl, wl->fw_status);
- intr = le32_to_cpu(wl->fw_status->intr);
- if (!intr) {
- wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
- goto out_sleep;
- }
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
+ clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ loopcount--;
+
+ wl1271_fw_status(wl, wl->fw_status);
+ intr = le32_to_cpu(wl->fw_status->intr);
+ if (!intr) {
+ wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ continue;
+ }
- intr &= WL1271_INTR_MASK;
+ intr &= WL1271_INTR_MASK;
- if (intr & WL1271_ACX_INTR_EVENT_A) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0);
- }
+ if (intr & WL1271_ACX_INTR_DATA) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- if (intr & WL1271_ACX_INTR_EVENT_B) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1);
- }
+ /* check for tx results */
+ if (wl->fw_status->tx_results_counter !=
+ (wl->tx_results_count & 0xff))
+ wl1271_tx_complete(wl);
- if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
- wl1271_debug(DEBUG_IRQ,
- "WL1271_ACX_INTR_INIT_COMPLETE");
+ wl1271_rx(wl, wl->fw_status);
+ }
- if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
+ if (intr & WL1271_ACX_INTR_EVENT_A) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
+ wl1271_event_handle(wl, 0);
+ }
- if (intr & WL1271_ACX_INTR_DATA) {
- u8 tx_res_cnt = wl->fw_status->tx_results_counter -
- wl->tx_results_count;
+ if (intr & WL1271_ACX_INTR_EVENT_B) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
+ wl1271_event_handle(wl, 1);
+ }
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
+ if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
+ wl1271_debug(DEBUG_IRQ,
+ "WL1271_ACX_INTR_INIT_COMPLETE");
- /* check for tx results */
- if (tx_res_cnt)
- wl1271_tx_complete(wl, tx_res_cnt);
+ if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
- wl1271_rx(wl, wl->fw_status);
+ spin_lock_irqsave(&wl->wl_lock, flags);
}
-out_sleep:
- wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ else
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
-{
- struct wl1271 *wl;
- unsigned long flags;
-
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- wl = cookie;
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
-
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_HANDLED;
-}
-
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_FW_NAME, &wl->spi->dev);
+ ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get firmware: %d", ret);
@@ -545,46 +554,12 @@ out:
return ret;
}
-static int wl1271_update_mac_addr(struct wl1271 *wl)
-{
- int ret = 0;
- u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
-
- /* get mac address from the NVS */
- wl->mac_addr[0] = nvs_ptr[11];
- wl->mac_addr[1] = nvs_ptr[10];
- wl->mac_addr[2] = nvs_ptr[6];
- wl->mac_addr[3] = nvs_ptr[5];
- wl->mac_addr[4] = nvs_ptr[4];
- wl->mac_addr[5] = nvs_ptr[3];
-
- /* FIXME: if it is a zero-address, we should bail out. Now, instead,
- we randomize an address */
- if (is_zero_ether_addr(wl->mac_addr)) {
- static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
- memcpy(wl->mac_addr, nokia_oui, 3);
- get_random_bytes(wl->mac_addr + 3, 3);
-
- /* update this address to the NVS */
- nvs_ptr[11] = wl->mac_addr[0];
- nvs_ptr[10] = wl->mac_addr[1];
- nvs_ptr[6] = wl->mac_addr[2];
- nvs_ptr[5] = wl->mac_addr[3];
- nvs_ptr[4] = wl->mac_addr[4];
- nvs_ptr[3] = wl->mac_addr[5];
- }
-
- SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
-
- return ret;
-}
-
static int wl1271_fetch_nvs(struct wl1271 *wl)
{
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_NVS_NAME, &wl->spi->dev);
+ ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get nvs file: %d", ret);
@@ -608,8 +583,6 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
- ret = wl1271_update_mac_addr(wl);
-
out:
release_firmware(fw);
@@ -826,15 +799,13 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_MAX_LENGTH) {
- ieee80211_stop_queues(wl->hw);
+ if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
- /*
- * FIXME: this is racy, the variable is not properly
- * protected. Maybe fix this by removing the stupid
- * variable altogether and checking the real queue state?
- */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_stop_queues(wl->hw);
set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return NETDEV_TX_OK;
@@ -882,7 +853,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (wl == wl_temp)
break;
}
- if (wl == NULL)
+ if (wl != wl_temp)
return NOTIFY_DONE;
/* Get the interface IP address for the device. "ifa" will become
@@ -929,13 +900,60 @@ static struct notifier_block wl1271_dev_notifier = {
static int wl1271_op_start(struct ieee80211_hw *hw)
{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 start");
+
+ /*
+ * We have to delay the booting of the hardware because
+ * we need to know the local MAC address before downloading and
+ * initializing the firmware. The MAC address cannot be changed
+ * after boot, and without the proper MAC address, the firmware
+ * will not function properly.
+ *
+ * The MAC address is first known when the corresponding interface
+ * is added. That is where we will initialize the hardware.
+ */
+
+ return 0;
+}
+
+static void wl1271_op_stop(struct ieee80211_hw *hw)
+{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
struct wl1271 *wl = hw->priv;
int retries = WL1271_BOOT_RETRIES;
int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "mac80211 start");
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
+ if (wl->vif) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ wl->vif = vif;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ wl->bss_type = BSS_TYPE_STA_BSS;
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ wl->bss_type = BSS_TYPE_IBSS;
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
if (wl->state != WL1271_STATE_OFF) {
wl1271_error("cannot start because not in off state: %d",
@@ -991,19 +1009,20 @@ out:
return ret;
}
-static void wl1271_op_stop(struct ieee80211_hw *hw)
+static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
int i;
- wl1271_info("down");
-
- wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
-
unregister_inetaddr_notifier(&wl1271_dev_notifier);
- list_del(&wl->list);
mutex_lock(&wl->mutex);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+
+ wl1271_info("down");
+
+ list_del(&wl->list);
WARN_ON(wl->state != WL1271_STATE_ON);
@@ -1032,6 +1051,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
wl->ssid_len = 0;
wl->bss_type = MAX_BSS_TYPE;
+ wl->set_bss_type = MAX_BSS_TYPE;
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
@@ -1041,163 +1061,142 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->tx_results_count = 0;
wl->tx_packets_count = 0;
wl->tx_security_last_seq = 0;
- wl->tx_security_seq_16 = 0;
- wl->tx_security_seq_32 = 0;
+ wl->tx_security_seq = 0;
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->sta_rate_set = 0;
wl->flags = 0;
+ wl->vif = NULL;
+ wl->filters = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
wl1271_debugfs_reset(wl);
+
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
+ kfree(wl->tx_res_if);
+ wl->tx_res_if = NULL;
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
mutex_unlock(&wl->mutex);
}
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
{
- struct wl1271 *wl = hw->priv;
- int ret = 0;
+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
- wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- vif->type, vif->addr);
+ /* combine requested filters with current filter config */
+ filters = wl->filters | filters;
- mutex_lock(&wl->mutex);
- if (wl->vif) {
- ret = -EBUSY;
- goto out;
+ wl1271_debug(DEBUG_FILTERS, "RX filters set: ");
+
+ if (filters & FIF_PROMISC_IN_BSS) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_PROMISC_IN_BSS");
+ wl->rx_config &= ~CFG_UNI_FILTER_EN;
+ wl->rx_config |= CFG_BSSID_FILTER_EN;
+ }
+ if (filters & FIF_BCN_PRBRESP_PROMISC) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_BCN_PRBRESP_PROMISC");
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ wl->rx_config &= ~CFG_SSID_FILTER_EN;
}
+ if (filters & FIF_OTHER_BSS) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_OTHER_BSS");
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ }
+ if (filters & FIF_CONTROL) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_CONTROL");
+ wl->rx_filter |= CFG_RX_CTL_EN;
+ }
+ if (filters & FIF_FCSFAIL) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_FCSFAIL");
+ wl->rx_filter |= CFG_RX_FCS_ERROR;
+ }
+}
- wl->vif = vif;
+static int wl1271_dummy_join(struct wl1271 *wl)
+{
+ int ret = 0;
+ /* we need to use a dummy BSSID for now */
+ static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
+ 0xad, 0xbe, 0xef };
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- wl->bss_type = BSS_TYPE_STA_BSS;
- break;
- case NL80211_IFTYPE_ADHOC:
- wl->bss_type = BSS_TYPE_IBSS;
- break;
- default:
- ret = -EOPNOTSUPP;
+ memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+
+ /* pass through frames from all BSS */
+ wl1271_configure_filters(wl, FIF_OTHER_BSS);
+
+ ret = wl1271_cmd_join(wl, wl->set_bss_type);
+ if (ret < 0)
goto out;
- }
- /* FIXME: what if conf->mac_addr changes? */
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
out:
- mutex_unlock(&wl->mutex);
return ret;
}
-static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct wl1271 *wl = hw->priv;
-
- mutex_lock(&wl->mutex);
- wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
- wl->vif = NULL;
- mutex_unlock(&wl->mutex);
-}
-
-#if 0
-static int wl1271_op_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
+static int wl1271_join(struct wl1271 *wl, bool set_assoc)
{
- struct wl1271 *wl = hw->priv;
- struct sk_buff *beacon;
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %pM",
- conf->bssid);
- wl1271_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid,
- conf->ssid_len);
+ /*
+ * One of the side effects of the JOIN command is that is clears
+ * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
+ * to a WPA/WPA2 access point will therefore kill the data-path.
+ * Currently there is no supported scenario for JOIN during
+ * association - if it becomes a supported scenario, the WPA/WPA2 keys
+ * must be handled somehow.
+ *
+ */
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_info("JOIN while associated.");
- mutex_lock(&wl->mutex);
+ if (set_assoc)
+ set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_cmd_join(wl, wl->set_bss_type);
if (ret < 0)
goto out;
- if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) {
- wl1271_debug(DEBUG_MAC80211, "bssid changed");
-
- memcpy(wl->bssid, conf->bssid, ETH_ALEN);
-
- ret = wl1271_cmd_join(wl);
- if (ret < 0)
- goto out_sleep;
-
- ret = wl1271_cmd_build_null_data(wl);
- if (ret < 0)
- goto out_sleep;
- }
-
- wl->ssid_len = conf->ssid_len;
- if (wl->ssid_len)
- memcpy(wl->ssid, conf->ssid, wl->ssid_len);
-
- if (conf->changed & IEEE80211_IFCC_BEACON) {
- beacon = ieee80211_beacon_get(hw, vif);
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
- beacon->data, beacon->len);
-
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out_sleep;
- }
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE,
- beacon->data, beacon->len);
-
- dev_kfree_skb(beacon);
-
- if (ret < 0)
- goto out_sleep;
- }
-
-out_sleep:
- wl1271_ps_elp_sleep(wl);
-
-out:
- mutex_unlock(&wl->mutex);
-
- return ret;
-}
-#endif
-
-static int wl1271_join_channel(struct wl1271 *wl, int channel)
-{
- int ret = 0;
- /* we need to use a dummy BSSID for now */
- static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
- 0xad, 0xbe, 0xef };
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
- /* the dummy join is not required for ad-hoc */
- if (wl->bss_type == BSS_TYPE_IBSS)
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
- /* disable mac filter, so we hear everything */
- wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ /*
+ * The join command disable the keep-alive mode, shut down its process,
+ * and also clear the template config, so we need to reset it all after
+ * the join. The acx_aid starts the keep-alive process, and the order
+ * of the commands below is relevant.
+ */
+ ret = wl1271_acx_keep_alive_mode(wl, true);
+ if (ret < 0)
+ goto out;
- wl->channel = channel;
- memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+ ret = wl1271_acx_aid(wl, wl->aid);
+ if (ret < 0)
+ goto out;
- ret = wl1271_cmd_join(wl);
+ ret = wl1271_cmd_build_klv_null_data(wl);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_JOINED, &wl->flags);
+ ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ACX_KEEP_ALIVE_TPL_VALID);
+ if (ret < 0)
+ goto out;
out:
return ret;
}
-static int wl1271_unjoin_channel(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl)
{
int ret;
@@ -1207,14 +1206,41 @@ static int wl1271_unjoin_channel(struct wl1271 *wl)
goto out;
clear_bit(WL1271_FLAG_JOINED, &wl->flags);
- wl->channel = 0;
memset(wl->bssid, 0, ETH_ALEN);
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+
+ /* stop filterting packets based on bssid */
+ wl1271_configure_filters(wl, FIF_OTHER_BSS);
out:
return ret;
}
+static void wl1271_set_band_rate(struct wl1271 *wl)
+{
+ if (wl->band == IEEE80211_BAND_2GHZ)
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ else
+ wl->basic_rate_set = wl->conf.tx.basic_rate_5;
+}
+
+static u32 wl1271_min_rate_get(struct wl1271 *wl)
+{
+ int i;
+ u32 rate = 0;
+
+ if (!wl->basic_rate_set) {
+ WARN_ON(1);
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ }
+
+ for (i = 0; !rate; i++) {
+ if ((wl->basic_rate_set >> i) & 0x1)
+ rate = 1 << i;
+ }
+
+ return rate;
+}
+
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
@@ -1231,38 +1257,62 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&wl->mutex);
- wl->band = conf->channel->band;
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
+ /* if the channel changes while joined, join again */
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
+ ((wl->band != conf->channel->band) ||
+ (wl->channel != channel))) {
+ wl->band = conf->channel->band;
+ wl->channel = channel;
+
+ /*
+ * FIXME: the mac80211 should really provide a fixed rate
+ * to use here. for now, just use the smallest possible rate
+ * for the band as a fixed rate for association frames and
+ * other control messages.
+ */
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_set_band_rate(wl);
+
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ wl1271_warning("rate policy for update channel "
+ "failed %d", ret);
+
+ if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ ret = wl1271_join(wl, false);
+ if (ret < 0)
+ wl1271_warning("cmd join to update channel "
+ "failed %d", ret);
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (conf->flags & IEEE80211_CONF_IDLE &&
test_bit(WL1271_FLAG_JOINED, &wl->flags))
- wl1271_unjoin_channel(wl);
+ wl1271_unjoin(wl);
else if (!(conf->flags & IEEE80211_CONF_IDLE))
- wl1271_join_channel(wl, channel);
+ wl1271_dummy_join(wl);
if (conf->flags & IEEE80211_CONF_IDLE) {
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->rate_set = wl1271_min_rate_get(wl);
wl->sta_rate_set = 0;
wl1271_acx_rate_policies(wl);
- }
+ wl1271_acx_keep_alive_config(
+ wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ set_bit(WL1271_FLAG_IDLE, &wl->flags);
+ } else
+ clear_bit(WL1271_FLAG_IDLE, &wl->flags);
}
- /* if the channel changes while joined, join again */
- if (channel != wl->channel &&
- test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
- wl->channel = channel;
- /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */
- ret = wl1271_cmd_join(wl);
- if (ret < 0)
- wl1271_warning("cmd join to update channel failed %d",
- ret);
- } else
- wl->channel = channel;
-
if (conf->flags & IEEE80211_CONF_PS &&
!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1273,13 +1323,13 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* through the bss_info_changed() hook.
*/
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- wl1271_info("psm enabled");
+ wl1271_debug(DEBUG_PSM, "psm enabled");
ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
true);
}
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
- wl1271_info("psm disabled");
+ wl1271_debug(DEBUG_PSM, "psm disabled");
clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1311,11 +1361,15 @@ struct wl1271_filter_params {
u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
};
-static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
struct wl1271_filter_params *fp;
- int i;
+ struct netdev_hw_addr *ha;
+ struct wl1271 *wl = hw->priv;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ return 0;
fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
if (!fp) {
@@ -1324,21 +1378,16 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
}
/* update multicast filtering parameters */
- fp->enabled = true;
- if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
- mc_count = 0;
- fp->enabled = false;
- }
-
fp->mc_list_length = 0;
- for (i = 0; i < mc_count; i++) {
- if (mc_list->da_addrlen == ETH_ALEN) {
+ if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
+ fp->enabled = false;
+ } else {
+ fp->enabled = true;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
memcpy(fp->mc_list[fp->mc_list_length],
- mc_list->da_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
fp->mc_list_length++;
- } else
- wl1271_warning("Unknown mc address length.");
- mc_list = mc_list->next;
+ }
}
return (u64)(unsigned long)fp;
@@ -1363,15 +1412,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF)
+ *total &= WL1271_SUPPORTED_FILTERS;
+ changed &= WL1271_SUPPORTED_FILTERS;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
- *total &= WL1271_SUPPORTED_FILTERS;
- changed &= WL1271_SUPPORTED_FILTERS;
if (*total & FIF_ALLMULTI)
ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
@@ -1382,14 +1432,14 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- kfree(fp);
-
- /* FIXME: We still need to set our filters properly */
-
/* determine, whether supported filter values have changed */
if (changed == 0)
goto out_sleep;
+ /* configure filters */
+ wl->filters = *total;
+ wl1271_configure_filters(wl, 0);
+
/* apply configured filters */
ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
if (ret < 0)
@@ -1400,6 +1450,7 @@ out_sleep:
out:
mutex_unlock(&wl->mutex);
+ kfree(fp);
}
static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -1450,15 +1501,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = KEY_TKIP;
key_conf->hw_key_idx = key_conf->keyidx;
- tx_seq_32 = wl->tx_security_seq_32;
- tx_seq_16 = wl->tx_security_seq_16;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
break;
case ALG_CCMP:
key_type = KEY_AES;
key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- tx_seq_32 = wl->tx_security_seq_32;
- tx_seq_16 = wl->tx_security_seq_16;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -1508,8 +1559,6 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
default:
wl1271_error("Unsupported key cmd 0x%x", cmd);
ret = -EOPNOTSUPP;
- goto out_sleep;
-
break;
}
@@ -1524,6 +1573,7 @@ out:
}
static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
@@ -1545,10 +1595,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
if (wl1271_11a_enabled())
- ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+ ret = wl1271_cmd_scan(hw->priv, ssid, len,
+ req->ie, req->ie_len, 1, 0,
WL1271_SCAN_BAND_DUAL, 3);
else
- ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+ ret = wl1271_cmd_scan(hw->priv, ssid, len,
+ req->ie, req->ie_len, 1, 0,
WL1271_SCAN_BAND_2_4_GHZ, 3);
wl1271_ps_elp_sleep(wl);
@@ -1562,10 +1614,13 @@ out:
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
- int ret;
+ int ret = 0;
mutex_lock(&wl->mutex);
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
@@ -1607,6 +1662,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
enum wl1271_cmd_ps_mode mode;
struct wl1271 *wl = hw->priv;
bool do_join = false;
+ bool set_assoc = false;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1617,20 +1673,29 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl->bss_type == BSS_TYPE_IBSS) {
- /* FIXME: This implements rudimentary ad-hoc support -
- proper templates are on the wish list and notification
- on when they change. This patch will update the templates
- on every call to this function. */
+ if ((changed && BSS_CHANGED_BEACON_INT) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
+ bss_conf->beacon_int);
+
+ wl->beacon_int = bss_conf->beacon_int;
+ do_join = true;
+ }
+
+ if ((changed && BSS_CHANGED_BEACON) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
+
if (beacon) {
struct ieee80211_hdr *hdr;
wl1271_ssid_set(wl, beacon);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
beacon->data,
- beacon->len);
+ beacon->len, 0,
+ wl1271_min_rate_get(wl));
if (ret < 0) {
dev_kfree_skb(beacon);
@@ -1645,7 +1710,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1271_cmd_template_set(wl,
CMD_TEMPL_PROBE_RESPONSE,
beacon->data,
- beacon->len);
+ beacon->len, 0,
+ wl1271_min_rate_get(wl));
dev_kfree_skb(beacon);
if (ret < 0)
goto out_sleep;
@@ -1655,20 +1721,48 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
+ bss_conf->enable_beacon ? "enabled" : "disabled");
+
+ if (bss_conf->enable_beacon)
+ wl->set_bss_type = BSS_TYPE_IBSS;
+ else
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ do_join = true;
+ }
+
+ if (changed & BSS_CHANGED_CQM) {
+ bool enable = false;
+ if (bss_conf->cqm_rssi_thold)
+ enable = true;
+ ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+ bss_conf->cqm_rssi_thold,
+ bss_conf->cqm_rssi_hyst);
+ if (ret < 0)
+ goto out;
+ wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ }
+
if ((changed & BSS_CHANGED_BSSID) &&
/*
* Now we know the correct bssid, so we send a new join command
* and enable the BSSID filter
*/
memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- wl->rx_config |= CFG_BSSID_FILTER_EN;
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+
ret = wl1271_cmd_build_null_data(wl);
- if (ret < 0) {
- wl1271_warning("cmd buld null data failed %d",
- ret);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_build_qos_null_data(wl);
+ if (ret < 0)
goto out_sleep;
- }
+
+ /* filter out all packets not from this BSSID */
+ wl1271_configure_filters(wl, 0);
/* Need to update the BSSID (for filtering etc) */
do_join = true;
@@ -1676,8 +1770,21 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
+ u32 rates;
wl->aid = bss_conf->aid;
- set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+ set_assoc = true;
+
+ /*
+ * use basic rates from AP, and determine lowest rate
+ * to use with control frames.
+ */
+ rates = bss_conf->basic_rates;
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
+ rates);
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ goto out_sleep;
/*
* with wl1271, we don't need to update the
@@ -1689,7 +1796,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- ret = wl1271_acx_aid(wl, wl->aid);
+ /*
+ * The SSID is intentionally set to NULL here - the
+ * firmware will set the probe request with a
+ * broadcast SSID regardless of what we set in the
+ * template.
+ */
+ ret = wl1271_cmd_build_probe_req(wl, NULL, 0,
+ NULL, 0, wl->band);
+
+ /* enable the connection monitoring feature */
+ ret = wl1271_acx_conn_monit_params(wl, true);
if (ret < 0)
goto out_sleep;
@@ -1705,6 +1822,22 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* use defaults when not associated */
clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
+
+ /* revert back to minimum rates for the current band */
+ wl1271_set_band_rate(wl);
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ goto out_sleep;
+
+ /* disable connection monitor features */
+ ret = wl1271_acx_conn_monit_params(wl, false);
+
+ /* Disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+
+ if (ret < 0)
+ goto out_sleep;
}
}
@@ -1739,12 +1872,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (do_join) {
- ret = wl1271_cmd_join(wl);
+ ret = wl1271_join(wl, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out_sleep;
}
- set_bit(WL1271_FLAG_JOINED, &wl->flags);
}
out_sleep:
@@ -1758,6 +1890,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
+ u8 ps_scheme;
int ret;
mutex_lock(&wl->mutex);
@@ -1768,17 +1901,22 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
if (ret < 0)
goto out;
+ /* the txop is confed in units of 32us by the mac80211, we need us */
ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
- params->aifs, params->txop);
+ params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
+ if (params->uapsd)
+ ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = CONF_PS_SCHEME_LEGACY;
+
ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
- CONF_PS_SCHEME_LEGACY_PSPOLL,
- CONF_ACK_POLICY_LEGACY, 0, 0);
+ ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
if (ret < 0)
goto out_sleep;
@@ -1852,6 +1990,36 @@ static struct ieee80211_channel wl1271_channels[] = {
{ .hw_value = 13, .center_freq = 2472, .max_power = 25 },
};
+/* mapping to indexes for wl1271_rates */
+const static u8 wl1271_rate_to_idx_2ghz[] = {
+ /* MCS rates are used only with 11n */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+
+ 11, /* CONF_HW_RXTX_RATE_54 */
+ 10, /* CONF_HW_RXTX_RATE_48 */
+ 9, /* CONF_HW_RXTX_RATE_36 */
+ 8, /* CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
+
+ 7, /* CONF_HW_RXTX_RATE_18 */
+ 6, /* CONF_HW_RXTX_RATE_12 */
+ 3, /* CONF_HW_RXTX_RATE_11 */
+ 5, /* CONF_HW_RXTX_RATE_9 */
+ 4, /* CONF_HW_RXTX_RATE_6 */
+ 2, /* CONF_HW_RXTX_RATE_5_5 */
+ 1, /* CONF_HW_RXTX_RATE_2 */
+ 0 /* CONF_HW_RXTX_RATE_1 */
+};
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1271_band_2ghz = {
.channels = wl1271_channels,
@@ -1934,6 +2102,35 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
{ .hw_value = 165, .center_freq = 5825},
};
+/* mapping to indexes for wl1271_rates_5ghz */
+const static u8 wl1271_rate_to_idx_5ghz[] = {
+ /* MCS rates are used only with 11n */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+
+ 7, /* CONF_HW_RXTX_RATE_54 */
+ 6, /* CONF_HW_RXTX_RATE_48 */
+ 5, /* CONF_HW_RXTX_RATE_36 */
+ 4, /* CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
+
+ 3, /* CONF_HW_RXTX_RATE_18 */
+ 2, /* CONF_HW_RXTX_RATE_12 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */
+ 1, /* CONF_HW_RXTX_RATE_9 */
+ 0, /* CONF_HW_RXTX_RATE_6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */
+};
static struct ieee80211_supported_band wl1271_band_5ghz = {
.channels = wl1271_channels_5ghz,
@@ -1942,13 +2139,17 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
};
+const static u8 *wl1271_band_rate_to_idx[] = {
+ [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
+ [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
+};
+
static const struct ieee80211_ops wl1271_ops = {
.start = wl1271_op_start,
.stop = wl1271_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
.config = wl1271_op_config,
-/* .config_interface = wl1271_op_config_interface, */
.prepare_multicast = wl1271_op_prepare_multicast,
.configure_filter = wl1271_op_configure_filter,
.tx = wl1271_op_tx,
@@ -1960,7 +2161,113 @@ static const struct ieee80211_ops wl1271_ops = {
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
-static int wl1271_register_hw(struct wl1271 *wl)
+
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate)
+{
+ u8 idx;
+
+ BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
+
+ if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
+ wl1271_error("Illegal RX rate from HW: %d", rate);
+ return 0;
+ }
+
+ idx = wl1271_band_rate_to_idx[wl->band][rate];
+ if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
+ wl1271_error("Unsupported RX rate from HW: %d", rate);
+ return 0;
+ }
+
+ return idx;
+}
+
+static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+
+ /* FIXME: what's the maximum length of buf? page size?*/
+ len = 500;
+
+ mutex_lock(&wl->mutex);
+ len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
+ wl->sg_enabled);
+ mutex_unlock(&wl->mutex);
+
+ return len;
+
+}
+
+static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ unsigned long res;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &res);
+
+ if (ret < 0) {
+ wl1271_warning("incorrect value written to bt_coex_mode");
+ return count;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ res = !!res;
+
+ if (res == wl->sg_enabled)
+ goto out;
+
+ wl->sg_enabled = res;
+
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ wl1271_acx_sg_enable(wl, wl->sg_enabled);
+ wl1271_ps_elp_sleep(wl);
+
+ out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
+ wl1271_sysfs_show_bt_coex_state,
+ wl1271_sysfs_store_bt_coex_state);
+
+static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+
+ /* FIXME: what's the maximum length of buf? page size?*/
+ len = 500;
+
+ mutex_lock(&wl->mutex);
+ if (wl->hw_pg_ver >= 0)
+ len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
+ else
+ len = snprintf(buf, len, "n/a\n");
+ mutex_unlock(&wl->mutex);
+
+ return len;
+}
+
+static DEVICE_ATTR(hw_pg_ver, S_IRUGO | S_IWUSR,
+ wl1271_sysfs_show_hw_pg_ver, NULL);
+
+int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
@@ -1981,8 +2288,17 @@ static int wl1271_register_hw(struct wl1271 *wl)
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_register_hw);
+
+void wl1271_unregister_hw(struct wl1271 *wl)
+{
+ ieee80211_unregister_hw(wl->hw);
+ wl->mac80211_registered = false;
+
+}
+EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
-static int wl1271_init_ieee80211(struct wl1271 *wl)
+int wl1271_init_ieee80211(struct wl1271 *wl)
{
/* The tx descriptor buffer and the TKIP space. */
wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
@@ -1991,11 +2307,15 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
/* unit us */
/* FIXME: find a proper value */
wl->hw->channel_change_time = 10000;
+ wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_SUPPORTS_PS;
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_CQM_RSSI;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
@@ -2005,51 +2325,53 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
if (wl1271_11a_enabled())
wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
- SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
+ wl->hw->queues = 4;
+ wl->hw->max_rates = 1;
- return 0;
-}
-
-static void wl1271_device_release(struct device *dev)
-{
+ SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ return 0;
}
-
-static struct platform_device wl1271_device = {
- .name = "wl1271",
- .id = -1,
-
- /* device model insists to have a release function */
- .dev = {
- .release = wl1271_device_release,
- },
-};
+EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
#define WL1271_DEFAULT_CHANNEL 0
-static struct ieee80211_hw *wl1271_alloc_hw(void)
+struct ieee80211_hw *wl1271_alloc_hw(void)
{
struct ieee80211_hw *hw;
+ struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
- int i;
+ int i, ret;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_hw_alloc;
+ }
+
+ plat_dev = kmalloc(sizeof(wl1271_device), GFP_KERNEL);
+ if (!plat_dev) {
+ wl1271_error("could not allocate platform_device");
+ ret = -ENOMEM;
+ goto err_plat_alloc;
}
+ memcpy(plat_dev, &wl1271_device, sizeof(wl1271_device));
+
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
INIT_LIST_HEAD(&wl->list);
wl->hw = hw;
+ wl->plat_dev = plat_dev;
skb_queue_head_init(&wl->tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
wl->channel = WL1271_DEFAULT_CHANNEL;
+ wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
wl->rx_counter = 0;
wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
@@ -2057,11 +2379,14 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
+ wl->sg_enabled = true;
+ wl->hw_pg_ver = -1;
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
@@ -2074,167 +2399,72 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
- return hw;
-}
-
-int wl1271_free_hw(struct wl1271 *wl)
-{
- ieee80211_unregister_hw(wl->hw);
-
- wl1271_debugfs_exit(wl);
-
- kfree(wl->target_mem_map);
- vfree(wl->fw);
- wl->fw = NULL;
- kfree(wl->nvs);
- wl->nvs = NULL;
-
- kfree(wl->fw_status);
- kfree(wl->tx_res_if);
-
- ieee80211_free_hw(wl->hw);
-
- return 0;
-}
-
-static int __devinit wl1271_probe(struct spi_device *spi)
-{
- struct wl12xx_platform_data *pdata;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- int ret;
+ wl1271_debugfs_init(wl);
- pdata = spi->dev.platform_data;
- if (!pdata) {
- wl1271_error("no platform data");
- return -ENODEV;
+ /* Register platform device */
+ ret = platform_device_register(wl->plat_dev);
+ if (ret) {
+ wl1271_error("couldn't register platform device");
+ goto err_hw;
}
+ dev_set_drvdata(&wl->plat_dev->dev, wl);
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- wl = hw->priv;
-
- dev_set_drvdata(&spi->dev, wl);
- wl->spi = spi;
-
- /* This is the only SPI value that we need to set here, the rest
- * comes from the board-peripherals file */
- spi->bits_per_word = 32;
-
- ret = spi_setup(spi);
+ /* Create sysfs file to control bt coex state */
+ ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
if (ret < 0) {
- wl1271_error("spi_setup failed");
- goto out_free;
- }
-
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1271_error("set power function missing in platform data");
- ret = -ENODEV;
- goto out_free;
+ wl1271_error("failed to create sysfs file bt_coex_state");
+ goto err_platform;
}
- wl->irq = spi->irq;
- if (wl->irq < 0) {
- wl1271_error("irq missing in platform data");
- ret = -ENODEV;
- goto out_free;
- }
-
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ /* Create sysfs file to get HW PG version */
+ ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
- }
-
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
- disable_irq(wl->irq);
-
- ret = platform_device_register(&wl1271_device);
- if (ret) {
- wl1271_error("couldn't register platform device");
- goto out_irq;
+ wl1271_error("failed to create sysfs file hw_pg_ver");
+ goto err_bt_coex_state;
}
- dev_set_drvdata(&wl1271_device.dev, wl);
-
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_platform;
-
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_platform;
-
- wl1271_debugfs_init(wl);
- wl1271_notice("initialized");
+ return hw;
- return 0;
+err_bt_coex_state:
+ device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- out_platform:
- platform_device_unregister(&wl1271_device);
+err_platform:
+ platform_device_unregister(wl->plat_dev);
- out_irq:
- free_irq(wl->irq, wl);
+err_hw:
+ wl1271_debugfs_exit(wl);
+ kfree(plat_dev);
- out_free:
+err_plat_alloc:
ieee80211_free_hw(hw);
- return ret;
-}
-
-static int __devexit wl1271_remove(struct spi_device *spi)
-{
- struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+err_hw_alloc:
- platform_device_unregister(&wl1271_device);
- free_irq(wl->irq, wl);
-
- wl1271_free_hw(wl);
-
- return 0;
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
-
-static struct spi_driver wl1271_spi_driver = {
- .driver = {
- .name = "wl1271",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- },
-
- .probe = wl1271_probe,
- .remove = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
+int wl1271_free_hw(struct wl1271 *wl)
{
- int ret;
+ platform_device_unregister(wl->plat_dev);
+ kfree(wl->plat_dev);
- ret = spi_register_driver(&wl1271_spi_driver);
- if (ret < 0) {
- wl1271_error("failed to register spi driver: %d", ret);
- goto out;
- }
+ wl1271_debugfs_exit(wl);
-out:
- return ret;
-}
+ vfree(wl->fw);
+ wl->fw = NULL;
+ kfree(wl->nvs);
+ wl->nvs = NULL;
-static void __exit wl1271_exit(void)
-{
- spi_unregister_driver(&wl1271_spi_driver);
+ kfree(wl->fw_status);
+ kfree(wl->tx_res_if);
- wl1271_notice("unloaded");
-}
+ ieee80211_free_hw(wl->hw);
-module_init(wl1271_init);
-module_exit(wl1271_exit);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wl1271_free_hw);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index e2b1ebf096e8..a5e60e0403e5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -23,7 +23,6 @@
#include "wl1271_reg.h"
#include "wl1271_ps.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -41,7 +40,8 @@ void wl1271_elp_work(struct work_struct *work)
mutex_lock(&wl->mutex);
if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
- !test_bit(WL1271_FLAG_PSM, &wl->flags))
+ (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
+ !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
goto out;
wl1271_debug(DEBUG_PSM, "chip to elp");
@@ -57,7 +57,8 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags) ||
+ test_bit(WL1271_FLAG_IDLE, &wl->flags)) {
cancel_delayed_work(&wl->elp_work);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index c723d9c7e131..57f4bfd959c8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -27,7 +27,6 @@
#include "wl1271_acx.h"
#include "wl1271_reg.h"
#include "wl1271_rx.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
@@ -44,66 +43,6 @@ static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
}
-/* The values of this table must match the wl1271_rates[] array */
-static u8 wl1271_rx_rate_to_idx[] = {
- /* MCS rates are used only with 11n */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
-
- 11, /* WL1271_RATE_54 */
- 10, /* WL1271_RATE_48 */
- 9, /* WL1271_RATE_36 */
- 8, /* WL1271_RATE_24 */
-
- /* TI-specific rate */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
-
- 7, /* WL1271_RATE_18 */
- 6, /* WL1271_RATE_12 */
- 3, /* WL1271_RATE_11 */
- 5, /* WL1271_RATE_9 */
- 4, /* WL1271_RATE_6 */
- 2, /* WL1271_RATE_5_5 */
- 1, /* WL1271_RATE_2 */
- 0 /* WL1271_RATE_1 */
-};
-
-/* The values of this table must match the wl1271_rates[] array */
-static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
- /* MCS rates are used only with 11n */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
-
- 7, /* WL1271_RATE_54 */
- 6, /* WL1271_RATE_48 */
- 5, /* WL1271_RATE_36 */
- 4, /* WL1271_RATE_24 */
-
- /* TI-specific rate */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
-
- 3, /* WL1271_RATE_18 */
- 2, /* WL1271_RATE_12 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */
- 1, /* WL1271_RATE_9 */
- 0, /* WL1271_RATE_6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */
- WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */
-};
-
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
@@ -111,20 +50,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
- if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
- WL1271_RX_DESC_BAND_BG) {
- status->band = IEEE80211_BAND_2GHZ;
- status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
- } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
- WL1271_RX_DESC_BAND_A) {
- status->band = IEEE80211_BAND_5GHZ;
- status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
- } else
- wl1271_warning("unsupported band 0x%x",
- desc->flags & WL1271_RX_DESC_BAND_MASK);
-
- if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
- wl1271_warning("unsupported rate");
+ status->band = wl->band;
+ status->rate_idx = wl1271_rate_to_idx(wl, desc->rate);
/*
* FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the
@@ -134,13 +61,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
*/
status->signal = desc->rssi;
- /*
- * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
- * need to divide by two for now, but TI has been discussing about
- * changing it. This needs to be rechecked.
- */
- status->noise = desc->rssi - (desc->snr >> 1);
-
status->freq = ieee80211_channel_to_frequency(desc->channel);
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
@@ -162,6 +82,13 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
u8 *buf;
u8 beacon = 0;
+ /*
+ * In PLT mode we seem to get frames and mac80211 warns about them,
+ * workaround this by not retrieving them at all.
+ */
+ if (unlikely(wl->state == WL1271_STATE_PLT))
+ return;
+
skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1271_error("Couldn't allocate RX frame");
@@ -220,6 +147,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
wl->rx_counter++;
drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
- wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
+
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index 1ae6d1783ed4..b89be4758e78 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -43,7 +43,6 @@
#define RX_MAX_PACKET_ID 3
#define NUM_RX_PKT_DESC_MOD_MASK 7
-#define WL1271_RX_RATE_UNSUPPORTED 0xFF
#define RX_DESC_VALID_FCS 0x0001
#define RX_DESC_MATCH_RXADDR1 0x0002
@@ -117,5 +116,6 @@ struct wl1271_rx_descriptor {
} __attribute__ ((packed));
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
new file mode 100644
index 000000000000..d3d6f302f705
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -0,0 +1,291 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/crc7.h>
+#include <linux/vmalloc.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <plat/gpio.h>
+
+#include "wl1271.h"
+#include "wl12xx_80211.h"
+#include "wl1271_io.h"
+
+
+#define RX71_WL1271_IRQ_GPIO 42
+
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI 0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#endif
+
+static const struct sdio_device_id wl1271_devices[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
+ {}
+};
+MODULE_DEVICE_TABLE(sdio, wl1271_devices);
+
+static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
+{
+ return wl->if_priv;
+}
+
+static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
+{
+ return &(wl_to_func(wl)->dev);
+}
+
+static irqreturn_t wl1271_irq(int irq, void *cookie)
+{
+ struct wl1271 *wl = cookie;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
+{
+ disable_irq(wl->irq);
+}
+
+static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
+{
+ enable_irq(wl->irq);
+}
+
+static void wl1271_sdio_reset(struct wl1271 *wl)
+{
+}
+
+static void wl1271_sdio_init(struct wl1271 *wl)
+{
+}
+
+static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int ret;
+ struct sdio_func *func = wl_to_func(wl);
+
+ if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+ ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
+ wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
+ addr, ((u8 *)buf)[0]);
+ } else {
+ if (fixed)
+ ret = sdio_readsb(func, buf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+
+ wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
+ addr, len);
+ wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ }
+
+ if (ret)
+ wl1271_error("sdio read failed (%d)", ret);
+
+}
+
+static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int ret;
+ struct sdio_func *func = wl_to_func(wl);
+
+ if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+ sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
+ wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
+ addr, ((u8 *)buf)[0]);
+ } else {
+ wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
+ addr, len);
+ wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+
+ if (fixed)
+ ret = sdio_writesb(func, addr, buf, len);
+ else
+ ret = sdio_memcpy_toio(func, addr, buf, len);
+ }
+ if (ret)
+ wl1271_error("sdio write failed (%d)", ret);
+
+}
+
+static void wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+{
+ struct sdio_func *func = wl_to_func(wl);
+
+ /* Let the SDIO stack handle wlan_enable control, so we
+ * keep host claimed while wlan is in use to keep wl1271
+ * alive.
+ */
+ if (enable) {
+ sdio_claim_host(func);
+ sdio_enable_func(func);
+ } else {
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ }
+}
+
+static struct wl1271_if_operations sdio_ops = {
+ .read = wl1271_sdio_raw_read,
+ .write = wl1271_sdio_raw_write,
+ .reset = wl1271_sdio_reset,
+ .init = wl1271_sdio_init,
+ .power = wl1271_sdio_set_power,
+ .dev = wl1271_sdio_wl_to_dev,
+ .enable_irq = wl1271_sdio_enable_interrupts,
+ .disable_irq = wl1271_sdio_disable_interrupts
+};
+
+static int __devinit wl1271_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ int ret;
+
+ /* We are only able to handle the wlan function */
+ if (func->num != 0x02)
+ return -ENODEV;
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ wl = hw->priv;
+
+ wl->if_priv = func;
+ wl->if_ops = &sdio_ops;
+
+ /* Grab access to FN0 for ELP reg. */
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
+ wl->irq = gpio_to_irq(RX71_WL1271_IRQ_GPIO);
+ if (wl->irq < 0) {
+ ret = wl->irq;
+ wl1271_error("could not get irq!");
+ goto out_free;
+ }
+
+ ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ sdio_set_drvdata(func, wl);
+
+ wl1271_notice("initialized");
+
+ return 0;
+
+ out_irq:
+ free_irq(wl->irq, wl);
+
+
+ out_free:
+ wl1271_free_hw(wl);
+
+ return ret;
+}
+
+static void __devexit wl1271_remove(struct sdio_func *func)
+{
+ struct wl1271 *wl = sdio_get_drvdata(func);
+
+ free_irq(wl->irq, wl);
+
+ wl1271_unregister_hw(wl);
+ wl1271_free_hw(wl);
+}
+
+static struct sdio_driver wl1271_sdio_driver = {
+ .name = "wl1271_sdio",
+ .id_table = wl1271_devices,
+ .probe = wl1271_probe,
+ .remove = __devexit_p(wl1271_remove),
+};
+
+static int __init wl1271_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&wl1271_sdio_driver);
+ if (ret < 0) {
+ wl1271_error("failed to register sdio driver: %d", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void __exit wl1271_exit(void)
+{
+ sdio_unregister_driver(&wl1271_sdio_driver);
+
+ wl1271_notice("unloaded");
+}
+
+module_init(wl1271_init);
+module_exit(wl1271_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 053c84aceb49..5189b812f939 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -21,18 +21,69 @@
*
*/
+#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
+#include <linux/spi/wl12xx.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
-#include "wl1271_spi.h"
+#include "wl1271_io.h"
+
+#include "wl1271_reg.h"
+
+#define WSPI_CMD_READ 0x40000000
+#define WSPI_CMD_WRITE 0x00000000
+#define WSPI_CMD_FIXED 0x20000000
+#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
+#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
+#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
+
+#define WSPI_INIT_CMD_CRC_LEN 5
+
+#define WSPI_INIT_CMD_START 0x00
+#define WSPI_INIT_CMD_TX 0x40
+/* the extra bypass bit is sampled by the TNET as '1' */
+#define WSPI_INIT_CMD_BYPASS_BIT 0x80
+#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
+#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
+#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
+#define WSPI_INIT_CMD_IOD 0x40
+#define WSPI_INIT_CMD_IP 0x20
+#define WSPI_INIT_CMD_CS 0x10
+#define WSPI_INIT_CMD_WS 0x08
+#define WSPI_INIT_CMD_WSPI 0x01
+#define WSPI_INIT_CMD_END 0x01
+
+#define WSPI_INIT_CMD_LEN 8
+
+#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
+ ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
+#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
+
+static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
+{
+ return wl->if_priv;
+}
+static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
+{
+ return &(wl_to_spi(wl)->dev);
+}
-void wl1271_spi_reset(struct wl1271 *wl)
+static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
+{
+ disable_irq(wl->irq);
+}
+
+static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
+{
+ enable_irq(wl->irq);
+}
+
+static void wl1271_spi_reset(struct wl1271 *wl)
{
u8 *cmd;
struct spi_transfer t;
@@ -53,12 +104,13 @@ void wl1271_spi_reset(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
+ kfree(cmd);
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
}
-void wl1271_spi_init(struct wl1271 *wl)
+static void wl1271_spi_init(struct wl1271 *wl)
{
u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
@@ -107,48 +159,25 @@ void wl1271_spi_init(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
+ kfree(cmd);
wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
-/* FIXME: Check busy words, removed due to SPI bug */
-#if 0
-static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
+static int wl1271_spi_read_busy(struct wl1271 *wl)
{
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
int num_busy_bytes = 0;
- wl1271_info("spi read BUSY!");
-
- /*
- * Look for the non-busy word in the read buffer, and if found,
- * read in the remaining data into the buffer.
- */
- busy_buf = (u32 *)buf;
- for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
- num_busy_bytes += sizeof(u32);
- if (*busy_buf & 0x1) {
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
- memmove(buf, busy_buf, len - num_busy_bytes);
- t[0].rx_buf = buf + (len - num_busy_bytes);
- t[0].len = num_busy_bytes;
- spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
- return;
- }
- }
-
/*
* Read further busy words from SPI until a non-busy word is
* encountered, then read the data itself into the buffer.
*/
- wl1271_info("spi read BUSY-polling needed!");
num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
busy_buf = wl->buffer_busyword;
@@ -158,28 +187,21 @@ static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
memset(t, 0, sizeof(t));
t[0].rx_buf = busy_buf;
t[0].len = sizeof(u32);
+ t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
-
- if (*busy_buf & 0x1) {
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
- t[0].rx_buf = buf;
- t[0].len = len;
- spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
- return;
- }
+ spi_sync(wl_to_spi(wl), &m);
+
+ if (*busy_buf & 0x1)
+ return 0;
}
/* The SPI bus is unresponsive, the read failed. */
- memset(buf, 0, len);
wl1271_error("SPI read busy-word timeout!\n");
+ return -ETIMEDOUT;
}
-#endif
-void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
{
struct spi_transfer t[3];
struct spi_message m;
@@ -202,28 +224,38 @@ void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[0].tx_buf = cmd;
t[0].len = 4;
+ t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
/* Busy and non busy words read */
t[1].rx_buf = busy_buf;
t[1].len = WL1271_BUSY_WORD_LEN;
+ t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
- t[2].rx_buf = buf;
- t[2].len = len;
- spi_message_add_tail(&t[2], &m);
+ spi_sync(wl_to_spi(wl), &m);
- spi_sync(wl->spi, &m);
+ if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
+ wl1271_spi_read_busy(wl)) {
+ memset(buf, 0, len);
+ return;
+ }
- /* FIXME: Check busy words, removed due to SPI bug */
- /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1))
- wl1271_spi_read_busy(wl, buf, len); */
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].rx_buf = buf;
+ t[0].len = len;
+ t[0].cs_change = true;
+ spi_message_add_tail(&t[0], &m);
+
+ spi_sync(wl_to_spi(wl), &m);
wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
}
-void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
struct spi_transfer t[2];
@@ -251,8 +283,181 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
}
+
+static irqreturn_t wl1271_irq(int irq, void *cookie)
+{
+ struct wl1271 *wl;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ wl = cookie;
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1271_spi_set_power(struct wl1271 *wl, bool enable)
+{
+ if (wl->set_power)
+ wl->set_power(enable);
+}
+
+static struct wl1271_if_operations spi_ops = {
+ .read = wl1271_spi_raw_read,
+ .write = wl1271_spi_raw_write,
+ .reset = wl1271_spi_reset,
+ .init = wl1271_spi_init,
+ .power = wl1271_spi_set_power,
+ .dev = wl1271_spi_wl_to_dev,
+ .enable_irq = wl1271_spi_enable_interrupts,
+ .disable_irq = wl1271_spi_disable_interrupts
+};
+
+static int __devinit wl1271_probe(struct spi_device *spi)
+{
+ struct wl12xx_platform_data *pdata;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata) {
+ wl1271_error("no platform data");
+ return -ENODEV;
+ }
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ wl = hw->priv;
+
+ dev_set_drvdata(&spi->dev, wl);
+ wl->if_priv = spi;
+
+ wl->if_ops = &spi_ops;
+
+ /* This is the only SPI value that we need to set here, the rest
+ * comes from the board-peripherals file */
+ spi->bits_per_word = 32;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ wl1271_error("spi_setup failed");
+ goto out_free;
+ }
+
+ wl->set_power = pdata->set_power;
+ if (!wl->set_power) {
+ wl1271_error("set power function missing in platform data");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ wl->irq = spi->irq;
+ if (wl->irq < 0) {
+ wl1271_error("irq missing in platform data");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ wl1271_notice("initialized");
+
+ return 0;
+
+ out_irq:
+ free_irq(wl->irq, wl);
+
+ out_free:
+ wl1271_free_hw(wl);
+
+ return ret;
+}
+
+static int __devexit wl1271_remove(struct spi_device *spi)
+{
+ struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+
+ free_irq(wl->irq, wl);
+
+ wl1271_unregister_hw(wl);
+ wl1271_free_hw(wl);
+
+ return 0;
+}
+
+
+static struct spi_driver wl1271_spi_driver = {
+ .driver = {
+ .name = "wl1271_spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = wl1271_probe,
+ .remove = __devexit_p(wl1271_remove),
+};
+
+static int __init wl1271_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&wl1271_spi_driver);
+ if (ret < 0) {
+ wl1271_error("failed to register spi driver: %d", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void __exit wl1271_exit(void)
+{
+ spi_unregister_driver(&wl1271_spi_driver);
+
+ wl1271_notice("unloaded");
+}
+
+module_init(wl1271_init);
+module_exit(wl1271_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
deleted file mode 100644
index a803596dad4a..000000000000
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * This file is part of wl1271
- *
- * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __WL1271_SPI_H__
-#define __WL1271_SPI_H__
-
-#include "wl1271_reg.h"
-
-#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
-
-#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0
-#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
-#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
-#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
-#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
-#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
-#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
-#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
-
-#define HW_ACCESS_REGISTER_SIZE 4
-
-#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
-
-#define WSPI_CMD_READ 0x40000000
-#define WSPI_CMD_WRITE 0x00000000
-#define WSPI_CMD_FIXED 0x20000000
-#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
-#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
-#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
-
-#define WSPI_INIT_CMD_CRC_LEN 5
-
-#define WSPI_INIT_CMD_START 0x00
-#define WSPI_INIT_CMD_TX 0x40
-/* the extra bypass bit is sampled by the TNET as '1' */
-#define WSPI_INIT_CMD_BYPASS_BIT 0x80
-#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
-#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
-#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
-#define WSPI_INIT_CMD_IOD 0x40
-#define WSPI_INIT_CMD_IP 0x20
-#define WSPI_INIT_CMD_CS 0x10
-#define WSPI_INIT_CMD_WS 0x08
-#define WSPI_INIT_CMD_WSPI 0x01
-#define WSPI_INIT_CMD_END 0x01
-
-#define WSPI_INIT_CMD_LEN 8
-
-#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
- ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
-#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
-
-#define OCP_CMD_LOOP 32
-
-#define OCP_CMD_WRITE 0x1
-#define OCP_CMD_READ 0x2
-
-#define OCP_READY_MASK BIT(18)
-#define OCP_STATUS_MASK (BIT(16) | BIT(17))
-
-#define OCP_STATUS_NO_RESP 0x00000
-#define OCP_STATUS_OK 0x10000
-#define OCP_STATUS_REQ_FAILED 0x20000
-#define OCP_STATUS_RESP_ERROR 0x30000
-
-/* Raw target IO, address is not translated */
-void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-
-/* INIT and RESET words */
-void wl1271_spi_reset(struct wl1271 *wl);
-void wl1271_spi_init(struct wl1271 *wl);
-#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 5c1c4f565fd8..554deb4d024e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -26,7 +26,6 @@
#include <net/genetlink.h>
#include "wl1271.h"
-#include "wl1271_spi.h"
#include "wl1271_acx.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 811e739d05bf..62db79508ddf 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include "wl1271.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_reg.h"
#include "wl1271_ps.h"
@@ -47,7 +46,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
- u32 total_blocks, excluded;
+ u32 total_blocks;
int id, ret = -EBUSY;
/* allocate free identifier for the packet */
@@ -57,12 +56,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
/* approximate the number of blocks required for this packet
in the firmware */
- /* FIXME: try to figure out what is done here and make it cleaner */
- total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV;
- excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
- total_blocks += (excluded > 252) ? 2 : 1;
- total_blocks += TX_HW_BLOCK_SPARE;
-
+ total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
+ total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
if (total_blocks <= wl->tx_blocks_available) {
desc = (struct wl1271_tx_hw_descr *)skb_push(
skb, total_len - skb->len);
@@ -87,8 +82,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
u32 extra, struct ieee80211_tx_info *control)
{
+ struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int pad, ac;
+ s64 hosttime;
u16 tx_attr;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -102,8 +99,9 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
}
/* configure packet life time */
- desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) -
- wl->time_offset);
+ getnstimeofday(&ts);
+ hosttime = (timespec_to_ns(&ts) >> 10);
+ desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
/* configure the tx attributes */
@@ -170,7 +168,6 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
/* write packet new counter into the write access register */
wl->tx_packets_count++;
- wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
desc = (struct wl1271_tx_hw_descr *) skb->data;
wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -223,7 +220,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
return ret;
}
-static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
{
struct ieee80211_supported_band *band;
u32 enabled_rates = 0;
@@ -245,6 +242,7 @@ void wl1271_tx_work(struct work_struct *work)
struct sk_buff *skb;
bool woken_up = false;
u32 sta_rates = 0;
+ u32 prev_tx_packets_count;
int ret;
/* check if the rates supported by the AP have changed */
@@ -261,6 +259,8 @@ void wl1271_tx_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ prev_tx_packets_count = wl->tx_packets_count;
+
/* if rates have changed, re-configure the rate policy */
if (unlikely(sta_rates)) {
wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
@@ -271,31 +271,26 @@ void wl1271_tx_work(struct work_struct *work)
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
- goto out;
+ goto out_ack;
woken_up = true;
}
ret = wl1271_tx_frame(wl, skb);
if (ret == -EBUSY) {
- /* firmware buffer is full, stop queues */
- wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
- "stop queues");
- ieee80211_stop_queues(wl->hw);
- set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ /* firmware buffer is full, lets stop transmitting. */
skb_queue_head(&wl->tx_queue, skb);
- goto out;
+ goto out_ack;
} else if (ret < 0) {
dev_kfree_skb(skb);
- goto out;
- } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
- &wl->flags)) {
- /* firmware buffer has space, restart queues */
- wl1271_debug(DEBUG_TX,
- "complete_packet: waking queues");
- ieee80211_wake_queues(wl->hw);
+ goto out_ack;
}
}
+out_ack:
+ /* interrupt the firmware with the new packets */
+ if (prev_tx_packets_count != wl->tx_packets_count)
+ wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
+
out:
if (woken_up)
wl1271_ps_elp_sleep(wl);
@@ -308,11 +303,12 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- u16 seq;
int id = result->id;
+ int rate = -1;
+ u8 retries = 0;
/* check for id legality */
- if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) {
+ if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
wl1271_warning("TX result illegal id: %d", id);
return;
}
@@ -320,31 +316,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
- /* update packet status */
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- if (result->status == TX_SUCCESS)
+ /* update the TX status info */
+ if (result->status == TX_SUCCESS) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (result->status & TX_RETRY_EXCEEDED) {
- /* FIXME */
- /* info->status.excessive_retries = 1; */
- wl->stats.excessive_retries++;
- }
+ rate = wl1271_rate_to_idx(wl, result->rate_class_index);
+ retries = result->ack_failures;
+ } else if (result->status == TX_RETRY_EXCEEDED) {
+ wl->stats.excessive_retries++;
+ retries = result->ack_failures;
}
- /* FIXME */
- /* info->status.retry_count = result->ack_failures; */
+ info->status.rates[0].idx = rate;
+ info->status.rates[0].count = retries;
+ info->status.rates[0].flags = 0;
+ info->status.ack_signal = -1;
+
wl->stats.retry_count += result->ack_failures;
/* update security sequence number */
- seq = wl->tx_security_seq_16 +
- (result->lsb_security_sequence_number -
- wl->tx_security_last_seq);
+ wl->tx_security_seq += (result->lsb_security_sequence_number -
+ wl->tx_security_last_seq);
wl->tx_security_last_seq = result->lsb_security_sequence_number;
- if (seq < wl->tx_security_seq_16)
- wl->tx_security_seq_32++;
- wl->tx_security_seq_16 = seq;
-
/* remove private header from packet */
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
@@ -367,23 +361,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
}
/* Called upon reception of a TX complete interrupt */
-void wl1271_tx_complete(struct wl1271 *wl, u32 count)
+void wl1271_tx_complete(struct wl1271 *wl)
{
struct wl1271_acx_mem_map *memmap =
(struct wl1271_acx_mem_map *)wl->target_mem_map;
+ u32 count, fw_counter;
u32 i;
- wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
-
/* read the tx results from the chipset */
wl1271_read(wl, le32_to_cpu(memmap->tx_result),
wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
+
+ /* write host counter to chipset (to ack) */
+ wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
+ offsetof(struct wl1271_tx_hw_res_if,
+ tx_result_host_counter), fw_counter);
+
+ count = fw_counter - wl->tx_results_count;
+ wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
/* verify that the result buffer is not getting overrun */
- if (count > TX_HW_RESULT_QUEUE_LEN) {
+ if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
wl1271_warning("TX result overflow from chipset: %d", count);
- count = TX_HW_RESULT_QUEUE_LEN;
- }
/* process the results */
for (i = 0; i < count; i++) {
@@ -397,11 +397,18 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
wl->tx_results_count++;
}
- /* write host counter to chipset (to ack) */
- wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
- offsetof(struct wl1271_tx_hw_res_if,
- tx_result_host_counter),
- le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
+ if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
+ skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
+ unsigned long flags;
+
+ /* firmware buffer has space, restart queues */
+ wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_wake_queues(wl->hw);
+ clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ }
}
/* caller must hold wl->mutex */
@@ -409,31 +416,19 @@ void wl1271_tx_flush(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
- struct ieee80211_tx_info *info;
/* TX failure */
/* control->flags = 0; FIXME */
while ((skb = skb_dequeue(&wl->tx_queue))) {
- info = IEEE80211_SKB_CB(skb);
-
wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);
-
- if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
- continue;
-
ieee80211_tx_status(wl->hw, skb);
}
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
if (wl->tx_frames[i] != NULL) {
skb = wl->tx_frames[i];
- info = IEEE80211_SKB_CB(skb);
-
- if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
- continue;
-
- ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[i] = NULL;
+ ieee80211_tx_status(wl->hw, skb);
}
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 17e405a09caa..3b8b7ac253fd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -26,7 +26,7 @@
#define __WL1271_TX_H__
#define TX_HW_BLOCK_SPARE 2
-#define TX_HW_BLOCK_SHIFT_DIV 8
+#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
/* The chipset reference driver states, that the "aid" value 1
@@ -125,9 +125,6 @@ struct wl1271_tx_hw_res_if {
static inline int wl1271_tx_get_queue(int queue)
{
- /* FIXME: use best effort until WMM is enabled */
- return CONF_TX_AC_BE;
-
switch (queue) {
case 0:
return CONF_TX_AC_VO;
@@ -160,7 +157,9 @@ static inline int wl1271_tx_ac_to_tid(int ac)
}
void wl1271_tx_work(struct work_struct *work);
-void wl1271_tx_complete(struct wl1271 *wl, u32 count);
+void wl1271_tx_complete(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
#endif
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8bce1a550a22..8816e371fd0e 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -610,7 +610,6 @@ struct wl3501_card {
struct iw_statistics wstats;
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
- struct dev_node_t node;
struct pcmcia_device *p_dev;
};
#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 7b9621de239f..630e7948f52c 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1307,7 +1307,7 @@ static void wl3501_tx_timeout(struct net_device *dev)
printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
dev->name, rc);
else {
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -1326,7 +1326,6 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&this->lock, flags);
enabled = wl3501_block_interrupt(this);
- dev->trans_start = jiffies;
rc = wl3501_send_pkt(this, skb->data, skb->len);
if (enabled)
wl3501_unblock_interrupt(this);
@@ -1451,6 +1450,8 @@ static void wl3501_detach(struct pcmcia_device *link)
netif_device_detach(dev);
wl3501_release(link);
+ unregister_netdev(dev);
+
if (link->priv)
free_netdev(link->priv);
@@ -1834,32 +1835,32 @@ out:
}
static const iw_handler wl3501_handler[] = {
- [SIOCGIWNAME - SIOCIWFIRST] = wl3501_get_name,
- [SIOCSIWFREQ - SIOCIWFIRST] = wl3501_set_freq,
- [SIOCGIWFREQ - SIOCIWFIRST] = wl3501_get_freq,
- [SIOCSIWMODE - SIOCIWFIRST] = wl3501_set_mode,
- [SIOCGIWMODE - SIOCIWFIRST] = wl3501_get_mode,
- [SIOCGIWSENS - SIOCIWFIRST] = wl3501_get_sens,
- [SIOCGIWRANGE - SIOCIWFIRST] = wl3501_get_range,
- [SIOCSIWSPY - SIOCIWFIRST] = iw_handler_set_spy,
- [SIOCGIWSPY - SIOCIWFIRST] = iw_handler_get_spy,
- [SIOCSIWTHRSPY - SIOCIWFIRST] = iw_handler_set_thrspy,
- [SIOCGIWTHRSPY - SIOCIWFIRST] = iw_handler_get_thrspy,
- [SIOCSIWAP - SIOCIWFIRST] = wl3501_set_wap,
- [SIOCGIWAP - SIOCIWFIRST] = wl3501_get_wap,
- [SIOCSIWSCAN - SIOCIWFIRST] = wl3501_set_scan,
- [SIOCGIWSCAN - SIOCIWFIRST] = wl3501_get_scan,
- [SIOCSIWESSID - SIOCIWFIRST] = wl3501_set_essid,
- [SIOCGIWESSID - SIOCIWFIRST] = wl3501_get_essid,
- [SIOCSIWNICKN - SIOCIWFIRST] = wl3501_set_nick,
- [SIOCGIWNICKN - SIOCIWFIRST] = wl3501_get_nick,
- [SIOCGIWRATE - SIOCIWFIRST] = wl3501_get_rate,
- [SIOCGIWRTS - SIOCIWFIRST] = wl3501_get_rts_threshold,
- [SIOCGIWFRAG - SIOCIWFIRST] = wl3501_get_frag_threshold,
- [SIOCGIWTXPOW - SIOCIWFIRST] = wl3501_get_txpow,
- [SIOCGIWRETRY - SIOCIWFIRST] = wl3501_get_retry,
- [SIOCGIWENCODE - SIOCIWFIRST] = wl3501_get_encode,
- [SIOCGIWPOWER - SIOCIWFIRST] = wl3501_get_power,
+ IW_HANDLER(SIOCGIWNAME, wl3501_get_name),
+ IW_HANDLER(SIOCSIWFREQ, wl3501_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, wl3501_get_freq),
+ IW_HANDLER(SIOCSIWMODE, wl3501_set_mode),
+ IW_HANDLER(SIOCGIWMODE, wl3501_get_mode),
+ IW_HANDLER(SIOCGIWSENS, wl3501_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, wl3501_get_range),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWAP, wl3501_set_wap),
+ IW_HANDLER(SIOCGIWAP, wl3501_get_wap),
+ IW_HANDLER(SIOCSIWSCAN, wl3501_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, wl3501_get_scan),
+ IW_HANDLER(SIOCSIWESSID, wl3501_set_essid),
+ IW_HANDLER(SIOCGIWESSID, wl3501_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, wl3501_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, wl3501_get_nick),
+ IW_HANDLER(SIOCGIWRATE, wl3501_get_rate),
+ IW_HANDLER(SIOCGIWRTS, wl3501_get_rts_threshold),
+ IW_HANDLER(SIOCGIWFRAG, wl3501_get_frag_threshold),
+ IW_HANDLER(SIOCGIWTXPOW, wl3501_get_txpow),
+ IW_HANDLER(SIOCGIWRETRY, wl3501_get_retry),
+ IW_HANDLER(SIOCGIWENCODE, wl3501_get_encode),
+ IW_HANDLER(SIOCGIWPOWER, wl3501_get_power),
};
static const struct iw_handler_def wl3501_handler_def = {
@@ -1897,10 +1898,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.IOAddrLines = 5;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = wl3501_interrupt;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -1961,7 +1958,7 @@ static int wl3501_config(struct pcmcia_device *link)
/* Now allocate an interrupt line. Note that this does not actually
* assign a handler to the interrupt. */
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, wl3501_interrupt);
if (ret)
goto failed;
@@ -1972,7 +1969,7 @@ static int wl3501_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev)) {
@@ -1981,20 +1978,15 @@ static int wl3501_config(struct pcmcia_device *link)
}
this = netdev_priv(dev);
- /*
- * At this point, the dev_node_t structure(s) should be initialized and
- * arranged in a linked list at link->dev_node.
- */
- link->dev_node = &this->node;
this->base_addr = dev->base_addr;
if (!wl3501_get_flash_mac_addr(this)) {
printk(KERN_WARNING "%s: Cant read MAC addr in flash ROM?\n",
dev->name);
+ unregister_netdev(dev);
goto failed;
}
- strcpy(this->node.dev_name, dev->name);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = ((char *)&this->mac_addr)[i];
@@ -2038,12 +2030,6 @@ failed:
*/
static void wl3501_release(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
-
- /* Unlink the device chain */
- if (link->dev_node)
- unregister_netdev(dev);
-
pcmcia_disable_device(link);
}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 9d1277874645..ece86a5d3355 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -827,7 +827,6 @@ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
} else {
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
}
kfree_skb(skb);
@@ -845,7 +844,7 @@ static void zd1201_tx_timeout(struct net_device *dev)
usb_unlink_urb(zd->tx_urb);
dev->stats.tx_errors++;
/* Restart the timeout to quiet the watchdog: */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
static int zd1201_set_mac_address(struct net_device *dev, void *p)
@@ -876,7 +875,7 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
static void zd1201_set_multicast(struct net_device *dev)
{
struct zd1201 *zd = netdev_priv(dev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
int i;
@@ -884,8 +883,8 @@ static void zd1201_set_multicast(struct net_device *dev)
return;
i = 0;
- netdev_for_each_mc_addr(mc, dev)
- memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
netdev_mc_count(dev) * ETH_ALEN, 0);
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 16fa289ad77b..b0b666019a93 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -948,20 +948,17 @@ static void set_rx_filter_handler(struct work_struct *work)
}
static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_mc_hash hash;
- int i;
+ struct netdev_hw_addr *ha;
zd_mc_clear(&hash);
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", mclist->dmi_addr);
- zd_mc_add_addr(&hash, mclist->dmi_addr);
- mclist = mclist->next;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr);
+ zd_mc_add_addr(&hash, ha->addr);
}
return hash.low | ((u64)hash.high << 32);
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 1e783ccc306e..a7db68d37ee9 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -558,7 +558,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
}
/* To exclude tx timeout */
- dev->trans_start = 0xffffffff - TX_TIMEOUT - TX_TIMEOUT;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* We're all ready to go. Start the queue */
netif_wake_queue(dev);
@@ -590,7 +590,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
dev->stats.tx_bytes += lp->deferred_skb->len;
dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -639,7 +639,6 @@ static void xemaclite_rx_handler(struct net_device *dev)
}
skb_put(skb, len); /* Tell the skb how much data we got */
- skb->dev = dev; /* Fill out required meta-data */
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_NONE;
@@ -1055,7 +1054,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
dev->stats.tx_bytes += len;
dev_kfree_skb(new_skb);
- dev->trans_start = jiffies;
return 0;
}
@@ -1172,7 +1170,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
}
/* Get the virtual base address for the device */
- lp->base_addr = ioremap(r_mem.start, r_mem.end - r_mem.start + 1);
+ lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem));
if (NULL == lp->base_addr) {
dev_err(dev, "EmacLite: Could not allocate iomem\n");
rc = -EIO;
@@ -1225,7 +1223,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
return 0;
error1:
- release_mem_region(ndev->mem_start, r_mem.end - r_mem.start + 1);
+ release_mem_region(ndev->mem_start, resource_size(&r_mem));
error2:
xemaclite_remove_ndev(ndev);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index ede5b2436f22..efbff76a9908 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1299,25 +1299,25 @@ static void set_rx_mode(struct net_device *dev)
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 hash_table[4];
int i;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
/* Due to a bug in the early chip versions, multiple filter
slots must be set for each address. */
if (yp->drv_flags & HasMulticastBug) {
- bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
- bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
- bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
- bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
/* Copy the hash table to the chip. */
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index dbfef8d70f2d..b9fd2f0cdd3d 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -587,7 +587,6 @@ static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore (&znet->lock, flags);
- dev->trans_start = jiffies;
netif_start_queue (dev);
if (znet_debug > 4)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index dee4fb56b094..b6987bba8556 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -556,6 +556,21 @@ void __init unflatten_device_tree(void)
pr_debug(" -> unflatten_device_tree()\n");
+ if (!initial_boot_params) {
+ pr_debug("No device tree pointer\n");
+ return;
+ }
+
+ pr_debug("Unflattening device tree:\n");
+ pr_debug("magic: %08x\n", be32_to_cpu(initial_boot_params->magic));
+ pr_debug("size: %08x\n", be32_to_cpu(initial_boot_params->totalsize));
+ pr_debug("version: %08x\n", be32_to_cpu(initial_boot_params->version));
+
+ if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) {
+ pr_err("Invalid device tree blob header\n");
+ return;
+ }
+
/* First pass, scan for size */
start = ((unsigned long)initial_boot_params) +
be32_to_cpu(initial_boot_params->off_dt_struct);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index a3a708e590d0..e690a2aa6fef 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -43,6 +43,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
info.addr = be32_to_cpup(addr);
dev_archdata_set_node(&dev_ad, node);
+ info.of_node = node;
info.archdata = &dev_ad;
request_module("%s", info.type);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index b4748337223b..48098cbbfd69 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -80,6 +80,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
* can be looked up later */
of_node_get(child);
dev_archdata_set_node(&phy->dev.archdata, child);
+ phy->dev.of_node = child;
/* All data is now stored in the phy struct; register it */
rc = phy_device_register(phy);
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index f65f48b98448..f3119a0836af 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -79,6 +79,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np)
/* Store a pointer to the node in the device structure */
of_node_get(nc);
+ spi->dev.of_node = nc;
spi->dev.archdata.of_node = nc;
/* Register the new device */
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index d58ade170c4b..9fd7f7d0a0d1 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -22,8 +22,7 @@ extern struct device_attribute of_platform_device_attrs[];
static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
{
struct of_device *of_dev = to_of_device(dev);
- struct of_platform_driver *of_drv = to_of_platform_driver(drv);
- const struct of_device_id *matches = of_drv->match_table;
+ const struct of_device_id *matches = drv->of_match_table;
if (!matches)
return 0;
@@ -46,7 +45,7 @@ static int of_platform_device_probe(struct device *dev)
of_dev_get(of_dev);
- match = of_match_device(drv->match_table, of_dev);
+ match = of_match_device(drv->driver.of_match_table, of_dev);
if (match)
error = drv->probe(of_dev, match);
if (error)
@@ -391,6 +390,8 @@ int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
drv->driver.name = drv->name;
if (!drv->driver.owner)
drv->driver.owner = drv->owner;
+ if (!drv->driver.of_match_table)
+ drv->driver.of_match_table = drv->match_table;
drv->driver.bus = bus;
/* register with core */
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 7dd370fa3439..fd8cfe95f0a3 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -75,7 +75,6 @@ INT_MODULE_PARM(epp_mode, 1);
typedef struct parport_info_t {
struct pcmcia_device *p_dev;
int ndev;
- dev_node_t node;
struct parport *port;
} parport_info_t;
@@ -105,7 +104,6 @@ static int parport_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -174,20 +172,19 @@ static int parport_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2,
- link->irq.AssignedIRQ, PARPORT_DMA_NONE,
+ link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
"0x%3x, irq %u failed\n", link->io.BasePort1,
- link->irq.AssignedIRQ);
+ link->irq);
goto failed;
}
@@ -195,11 +192,7 @@ static int parport_config(struct pcmcia_device *link)
if (epp_mode)
p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP;
info->ndev = 1;
- info->node.major = LP_MAJOR;
- info->node.minor = p->number;
info->port = p;
- strcpy(info->node.dev_name, p->name);
- link->dev_node = &info->node;
return 0;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7858a117e80b..34ef70d562b2 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -19,7 +19,7 @@ config PCI_MSI
by using the 'pci=nomsi' option. This disables MSI for the
entire system.
- If you don't know what to do here, say N.
+ If you don't know what to do here, say Y.
config PCI_DEBUG
bool "PCI Debugging"
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2f646fe1260f..affb83b42ebb 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -13,7 +13,7 @@
* configuration space.
*/
-static DEFINE_SPINLOCK(pci_lock);
+static DEFINE_RAW_SPINLOCK(pci_lock);
/*
* Wrappers for all PCI configuration access functions. They just check
@@ -33,10 +33,10 @@ int pci_bus_read_config_##size \
unsigned long flags; \
u32 data = 0; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irqsave(&pci_lock, flags); \
+ raw_spin_lock_irqsave(&pci_lock, flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
*value = (type)data; \
- spin_unlock_irqrestore(&pci_lock, flags); \
+ raw_spin_unlock_irqrestore(&pci_lock, flags); \
return res; \
}
@@ -47,9 +47,9 @@ int pci_bus_write_config_##size \
int res; \
unsigned long flags; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irqsave(&pci_lock, flags); \
+ raw_spin_lock_irqsave(&pci_lock, flags); \
res = bus->ops->write(bus, devfn, pos, len, value); \
- spin_unlock_irqrestore(&pci_lock, flags); \
+ raw_spin_unlock_irqrestore(&pci_lock, flags); \
return res; \
}
@@ -79,10 +79,10 @@ struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
struct pci_ops *old_ops;
unsigned long flags;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
old_ops = bus->ops;
bus->ops = ops;
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
return old_ops;
}
EXPORT_SYMBOL(pci_bus_set_ops);
@@ -136,9 +136,9 @@ static noinline void pci_wait_ucfg(struct pci_dev *dev)
__add_wait_queue(&pci_ucfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&pci_lock);
+ raw_spin_unlock_irq(&pci_lock);
schedule();
- spin_lock_irq(&pci_lock);
+ raw_spin_lock_irq(&pci_lock);
} while (dev->block_ucfg_access);
__remove_wait_queue(&pci_ucfg_wait, &wait);
}
@@ -150,11 +150,11 @@ int pci_user_read_config_##size \
int ret = 0; \
u32 data = -1; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irq(&pci_lock); \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
- spin_unlock_irq(&pci_lock); \
+ raw_spin_unlock_irq(&pci_lock); \
*val = (type)data; \
return ret; \
}
@@ -165,11 +165,11 @@ int pci_user_write_config_##size \
{ \
int ret = -EIO; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irq(&pci_lock); \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
- spin_unlock_irq(&pci_lock); \
+ raw_spin_unlock_irq(&pci_lock); \
return ret; \
}
@@ -396,10 +396,10 @@ void pci_block_user_cfg_access(struct pci_dev *dev)
unsigned long flags;
int was_blocked;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
was_blocked = dev->block_ucfg_access;
dev->block_ucfg_access = 1;
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
/* If we BUG() inside the pci_lock, we're guaranteed to hose
* the machine */
@@ -417,7 +417,7 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
{
unsigned long flags;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
@@ -425,6 +425,6 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
dev->block_ucfg_access = 0;
wake_up_all(&pci_ucfg_wait);
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 33ead97f0c4b..a198ce8b3faa 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -131,9 +131,10 @@ static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++;
- else
+ else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
printk(KERN_WARNING PREFIX
- "Unsupported device scope\n");
+ "Unsupported device scope\n");
+ }
start += scope->length;
}
if (*cnt == 0)
@@ -309,6 +310,8 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
+ dev = pci_physfn(dev);
+
list_for_each_entry(atsru, &dmar_atsr_units, list) {
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
if (atsr->segment == pci_domain_nr(dev->bus))
@@ -507,7 +510,7 @@ parse_dmar_table(void)
return ret;
}
-int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
+static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
struct pci_dev *dev)
{
int index;
@@ -530,6 +533,8 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
struct dmar_drhd_unit *dmaru = NULL;
struct acpi_dmar_hardware_unit *drhd;
+ dev = pci_physfn(dev);
+
list_for_each_entry(dmaru, &dmar_drhd_units, list) {
drhd = container_of(dmaru->hdr,
struct acpi_dmar_hardware_unit,
@@ -806,7 +811,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
}
ver = readl(iommu->reg + DMAR_VER_REG);
- pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
+ pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap,
@@ -1457,9 +1463,11 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
/*
* Check interrupt remapping support in DMAR table description.
*/
-int dmar_ir_support(void)
+int __init dmar_ir_support(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
+ if (!dmar)
+ return 0;
return dmar->flags & 0x1;
}
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index f184d1d2ecbe..6644337d63d6 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -848,7 +848,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- /* Check for the proper subsytem ID's
+ /* Check for the proper subsystem ID's
* Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 417312528ddf..da40f0789739 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -491,13 +491,11 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
domain->iommu_coherency = 1;
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
}
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
}
}
@@ -507,13 +505,11 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
domain->iommu_snooping = 1;
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_sc_support(g_iommus[i]->ecap)) {
domain->iommu_snooping = 0;
break;
}
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
}
}
@@ -1068,7 +1064,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- unsigned long pfn, unsigned int pages)
+ unsigned long pfn, unsigned int pages, int map)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
@@ -1089,10 +1085,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
DMA_TLB_PSI_FLUSH);
/*
- * In caching mode, domain ID 0 is reserved for non-present to present
- * mapping flush. Device IOTLB doesn't need to be flushed in this case.
+ * In caching mode, changes of pages from non-present to present require
+ * flush. However, device IOTLB doesn't need to be flushed in this case.
*/
- if (!cap_caching_mode(iommu->cap) || did)
+ if (!cap_caching_mode(iommu->cap) || !map)
iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
@@ -1154,7 +1150,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("Number of Domains supportd <%ld>\n", ndomains);
+ pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
+ ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
@@ -1194,8 +1191,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
- i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
- for (; i < cap_ndoms(iommu->cap); ) {
+ for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
domain = iommu->domains[i];
clear_bit(i, iommu->domain_ids);
@@ -1207,9 +1203,6 @@ void free_dmar_iommu(struct intel_iommu *iommu)
domain_exit(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags);
-
- i = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), i+1);
}
}
@@ -1292,14 +1285,11 @@ static void iommu_detach_domain(struct dmar_domain *domain,
spin_lock_irqsave(&iommu->lock, flags);
ndomains = cap_ndoms(iommu->cap);
- num = find_first_bit(iommu->domain_ids, ndomains);
- for (; num < ndomains; ) {
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (iommu->domains[num] == domain) {
found = 1;
break;
}
- num = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), num+1);
}
if (found) {
@@ -1485,15 +1475,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
/* find an available domain id for this device in iommu */
ndomains = cap_ndoms(iommu->cap);
- num = find_first_bit(iommu->domain_ids, ndomains);
- for (; num < ndomains; ) {
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (iommu->domains[num] == domain) {
id = num;
found = 1;
break;
}
- num = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), num+1);
}
if (found == 0) {
@@ -1558,7 +1545,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
@@ -2333,14 +2320,16 @@ int __init init_dmars(void)
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
+ printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
"invalidation\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
- printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
+ printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
"invalidation\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
}
}
@@ -2621,7 +2610,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
+ iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
else
iommu_flush_write_buffer(iommu);
@@ -2661,15 +2650,24 @@ static void flush_unmaps(void)
if (!deferred_flush[i].next)
continue;
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ /* In caching mode, global flushes turn emulation expensive */
+ if (!cap_caching_mode(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
for (j = 0; j < deferred_flush[i].next; j++) {
unsigned long mask;
struct iova *iova = deferred_flush[i].iova[j];
-
- mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
- iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ struct dmar_domain *domain = deferred_flush[i].domain[j];
+
+ /* On real hardware multiple invalidations are expensive */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, domain->id,
+ iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
+ else {
+ mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
+ iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+ (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ }
__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
deferred_flush[i].next = 0;
@@ -2750,7 +2748,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1);
+ last_pfn - start_pfn + 1, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2840,7 +2838,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1);
+ last_pfn - start_pfn + 1, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2874,7 +2872,6 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- size_t offset_pfn = 0;
struct iova *iova = NULL;
int ret;
struct scatterlist *sg;
@@ -2928,7 +2925,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
else
iommu_flush_write_buffer(iommu);
@@ -3441,12 +3438,9 @@ static int vm_domain_min_agaw(struct dmar_domain *domain)
int i;
int min_agaw = domain->agaw;
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
if (min_agaw > g_iommus[i]->agaw)
min_agaw = g_iommus[i]->agaw;
-
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
}
return min_agaw;
@@ -3512,8 +3506,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
iommu = drhd->iommu;
ndomains = cap_ndoms(iommu->cap);
- i = find_first_bit(iommu->domain_ids, ndomains);
- for (; i < ndomains; ) {
+ for_each_set_bit(i, iommu->domain_ids, ndomains) {
if (iommu->domains[i] == domain) {
spin_lock_irqsave(&iommu->lock, flags);
clear_bit(i, iommu->domain_ids);
@@ -3521,7 +3514,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
spin_unlock_irqrestore(&iommu->lock, flags);
break;
}
- i = find_next_bit(iommu->domain_ids, ndomains, i+1);
}
}
}
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 6ee98a56946f..1315ac688aa2 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -832,9 +832,9 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
return -1;
}
- printk(KERN_INFO "IOAPIC id %d under DRHD base"
- " 0x%Lx\n", scope->enumeration_id,
- drhd->address);
+ printk(KERN_INFO "IOAPIC id %d under DRHD base "
+ " 0x%Lx IOMMU %d\n", scope->enumeration_id,
+ drhd->address, iommu->seq_id);
ir_parse_one_ioapic_scope(scope, iommu);
} else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fad93983bfed..89a08ed39c94 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -960,7 +960,12 @@ static ssize_t reset_store(struct device *dev,
if (val != 1)
return -EINVAL;
- return pci_reset_function(pdev);
+
+ result = pci_reset_function(pdev);
+ if (result < 0)
+ return result;
+
+ return count;
}
static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
@@ -1011,6 +1016,39 @@ error:
return retval;
}
+static void pci_remove_slot_links(struct pci_dev *dev)
+{
+ char func[10];
+ struct pci_slot *slot;
+
+ sysfs_remove_link(&dev->dev.kobj, "slot");
+ list_for_each_entry(slot, &dev->bus->slots, list) {
+ if (slot->number != PCI_SLOT(dev->devfn))
+ continue;
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ sysfs_remove_link(&slot->kobj, func);
+ }
+}
+
+static int pci_create_slot_links(struct pci_dev *dev)
+{
+ int result = 0;
+ char func[10];
+ struct pci_slot *slot;
+
+ list_for_each_entry(slot, &dev->bus->slots, list) {
+ if (slot->number != PCI_SLOT(dev->devfn))
+ continue;
+ result = sysfs_create_link(&dev->dev.kobj, &slot->kobj, "slot");
+ if (result)
+ goto out;
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ result = sysfs_create_link(&slot->kobj, &dev->dev.kobj, func);
+ }
+out:
+ return result;
+}
+
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
{
int retval;
@@ -1073,6 +1111,8 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
if (retval)
goto err_vga_file;
+ pci_create_slot_links(pdev);
+
return 0;
err_vga_file:
@@ -1122,6 +1162,8 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
if (!sysfs_initialized)
return;
+ pci_remove_slot_links(pdev);
+
pci_remove_capabilities_sysfs(pdev);
if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 37499127c801..8548a85593e2 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1503,7 +1503,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
* pci_back_from_sleep - turn PCI device on during system-wide transition into working state
* @dev: Device to handle.
*
- * Disable device's sytem wake-up capability and put it into D0.
+ * Disable device's system wake-up capability and put it into D0.
*/
int pci_back_from_sleep(struct pci_dev *dev)
{
@@ -1631,7 +1631,6 @@ void pci_pm_init(struct pci_dev *dev)
* let the user space enable it to wake up the system as needed.
*/
device_set_wakeup_capable(&dev->dev, true);
- device_set_wakeup_enable(&dev->dev, false);
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
} else {
@@ -1655,7 +1654,6 @@ void platform_pci_wakeup_init(struct pci_dev *dev)
return;
device_set_wakeup_capable(&dev->dev, true);
- device_set_wakeup_enable(&dev->dev, false);
platform_pci_sleep_wake(dev, false);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4eb10f48d270..f8077b3c8c8c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -244,7 +244,7 @@ struct pci_ats {
int stu; /* Smallest Translation Unit */
int qdep; /* Invalidate Queue Depth */
int ref_cnt; /* Physical Function reference count */
- int is_enabled:1; /* Enable bit is set */
+ unsigned int is_enabled:1; /* Enable bit is set */
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index f8f425b8731d..909924692b8a 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -168,7 +168,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
target = &err->root_status;
rw1cs = 1;
break;
- case PCI_ERR_ROOT_COR_SRC:
+ case PCI_ERR_ROOT_ERR_SRC:
target = &err->source_id;
break;
}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 7a711ee314b7..484cc55194b8 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,13 +72,120 @@ void pci_no_aer(void)
pcie_aer_disable = 1; /* has priority over 'forceload' */
}
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
+{
+ bool enable = *((bool *)data);
+
+ if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
+ (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
+
+ if (enable)
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
+}
+
+/**
+ * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
+ * @dev: pointer to root port's pci_dev data structure
+ * @enable: true = enable error reporting, false = disable error reporting.
+ */
+static void set_downstream_devices_error_reporting(struct pci_dev *dev,
+ bool enable)
+{
+ set_device_error_reporting(dev, &enable);
+
+ if (!dev->subordinate)
+ return;
+ pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
+}
+
+/**
+ * aer_enable_rootport - enable Root Port's interrupts when receiving messages
+ * @rpc: pointer to a Root Port data structure
+ *
+ * Invoked when PCIe bus loads AER service driver.
+ */
+static void aer_enable_rootport(struct aer_rpc *rpc)
+{
+ struct pci_dev *pdev = rpc->rpd->port;
+ int pos, aer_pos;
+ u16 reg16;
+ u32 reg32;
+
+ pos = pci_pcie_cap(pdev);
+ /* Clear PCIe Capability's Device Status */
+ pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
+ pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
+
+ /* Disable system error generation in response to error messages */
+ pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
+ reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
+ pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
+
+ aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ /* Clear error status */
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
+
+ /*
+ * Enable error reporting for the root port device and downstream port
+ * devices.
+ */
+ set_downstream_devices_error_reporting(pdev, true);
+
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
+}
+
+/**
+ * aer_disable_rootport - disable Root Port's interrupts when receiving messages
+ * @rpc: pointer to a Root Port data structure
+ *
+ * Invoked when PCIe bus unloads AER service driver.
+ */
+static void aer_disable_rootport(struct aer_rpc *rpc)
+{
+ struct pci_dev *pdev = rpc->rpd->port;
+ u32 reg32;
+ int pos;
+
+ /*
+ * Disable error reporting for the root port device and downstream port
+ * devices.
+ */
+ set_downstream_devices_error_reporting(pdev, false);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ /* Disable Root's interrupt in response to error messages */
+ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+
+ /* Clear Root's error status reg */
+ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
+}
+
/**
* aer_irq - Root Port's ISR
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
* Invoked when Root Port detects AER messages.
- **/
+ */
irqreturn_t aer_irq(int irq, void *context)
{
unsigned int status, id;
@@ -97,13 +204,13 @@ irqreturn_t aer_irq(int irq, void *context)
/* Read error status */
pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
- if (!(status & ROOT_ERR_STATUS_MASKS)) {
+ if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
spin_unlock_irqrestore(&rpc->e_lock, flags);
return IRQ_NONE;
}
/* Read error source and clear error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_COR_SRC, &id);
+ pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
/* Store error source for later DPC handler */
@@ -135,7 +242,7 @@ EXPORT_SYMBOL_GPL(aer_irq);
* @dev: pointer to the pcie_dev data structure
*
* Invoked when Root Port's AER service is loaded.
- **/
+ */
static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
{
struct aer_rpc *rpc;
@@ -144,15 +251,11 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
if (!rpc)
return NULL;
- /*
- * Initialize Root lock access, e_lock, to Root Error Status Reg,
- * Root Error ID Reg, and Root error producer/consumer index.
- */
+ /* Initialize Root lock access, e_lock, to Root Error Status Reg */
spin_lock_init(&rpc->e_lock);
rpc->rpd = dev;
INIT_WORK(&rpc->dpc_handler, aer_isr);
- rpc->prod_idx = rpc->cons_idx = 0;
mutex_init(&rpc->rpc_mutex);
init_waitqueue_head(&rpc->wait_release);
@@ -167,7 +270,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
* @dev: pointer to the pcie_dev data structure
*
* Invoked when PCI Express bus unloads or AER probe fails.
- **/
+ */
static void aer_remove(struct pcie_device *dev)
{
struct aer_rpc *rpc = get_service_data(dev);
@@ -179,7 +282,8 @@ static void aer_remove(struct pcie_device *dev)
wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
- aer_delete_rootport(rpc);
+ aer_disable_rootport(rpc);
+ kfree(rpc);
set_service_data(dev, NULL);
}
}
@@ -190,7 +294,7 @@ static void aer_remove(struct pcie_device *dev)
* @id: pointer to the service id data structure
*
* Invoked when PCI Express bus loads AER service driver.
- **/
+ */
static int __devinit aer_probe(struct pcie_device *dev)
{
int status;
@@ -230,47 +334,30 @@ static int __devinit aer_probe(struct pcie_device *dev)
* @dev: pointer to Root Port's pci_dev data structure
*
* Invoked by Port Bus driver when performing link reset at Root Port.
- **/
+ */
static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
- u16 p2p_ctrl;
- u32 status;
+ u32 reg32;
int pos;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
/* Disable Root's interrupt in response to error messages */
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0);
-
- /* Assert Secondary Bus Reset */
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
- p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * we should send hot reset message for 2ms to allow it time to
- * propogate to all downstream ports
- */
- msleep(2);
-
- /* De-assert Secondary Bus Reset */
- p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- /*
- * System software must wait for at least 100ms from the end
- * of a reset of one or more device before it is permitted
- * to issue Configuration Requests to those devices.
- */
- msleep(200);
+ aer_do_secondary_bus_reset(dev);
dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
+ /* Clear Root Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
+
/* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
- pci_write_config_dword(dev,
- pos + PCI_ERR_ROOT_COMMAND,
- ROOT_PORT_INTR_ON_MESG_MASK);
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -281,7 +368,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
* @error: error severity being notified by port bus
*
* Invoked by Port Bus driver during error recovery.
- **/
+ */
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
@@ -294,7 +381,7 @@ static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
* @dev: pointer to Root Port's pci_dev data structure
*
* Invoked by Port Bus driver during nonfatal recovery.
- **/
+ */
static void aer_error_resume(struct pci_dev *dev)
{
int pos;
@@ -321,7 +408,7 @@ static void aer_error_resume(struct pci_dev *dev)
* aer_service_init - register AER root service driver
*
* Invoked when AER root service driver is loaded.
- **/
+ */
static int __init aer_service_init(void)
{
if (pcie_aer_disable)
@@ -335,7 +422,7 @@ static int __init aer_service_init(void)
* aer_service_exit - unregister AER root service driver
*
* Invoked when AER root service driver is unloaded.
- **/
+ */
static void __exit aer_service_exit(void)
{
pcie_port_service_unregister(&aerdriver);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index bd833ea3ba49..80c11d131499 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -17,9 +17,6 @@
#define AER_FATAL 1
#define AER_CORRECTABLE 2
-/* Root Error Status Register Bits */
-#define ROOT_ERR_STATUS_MASKS 0x0f
-
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
PCI_EXP_RTCTL_SEFEE)
@@ -117,8 +114,7 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-extern void aer_enable_rootport(struct aer_rpc *rpc);
-extern void aer_delete_rootport(struct aer_rpc *rpc);
+extern void aer_do_secondary_bus_reset(struct pci_dev *dev);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
@@ -134,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
}
#endif
+#ifdef CONFIG_ACPI_APEI
+extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
+#else
+static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
+{
+ if (pci_dev->__aer_firmware_first_valid)
+ return pci_dev->__aer_firmware_first;
+ return 0;
+}
+#endif
+
+static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
+ int enable)
+{
+ pci_dev->__aer_firmware_first = !!enable;
+ pci_dev->__aer_firmware_first_valid = 1;
+}
#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 04814087658d..f278d7b0d95d 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <linux/delay.h>
+#include <acpi/apei.h>
#include "aerdrv.h"
/**
@@ -53,3 +54,79 @@ int aer_osc_setup(struct pcie_device *pciedev)
return 0;
}
+
+#ifdef CONFIG_ACPI_APEI
+static inline int hest_match_pci(struct acpi_hest_aer_common *p,
+ struct pci_dev *pci)
+{
+ return (0 == pci_domain_nr(pci->bus) &&
+ p->bus == pci->bus->number &&
+ p->device == PCI_SLOT(pci->devfn) &&
+ p->function == PCI_FUNC(pci->devfn));
+}
+
+struct aer_hest_parse_info {
+ struct pci_dev *pci_dev;
+ int firmware_first;
+};
+
+static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct aer_hest_parse_info *info = data;
+ struct acpi_hest_aer_common *p;
+ u8 pcie_type = 0;
+ u8 bridge = 0;
+ int ff = 0;
+
+ switch (hest_hdr->type) {
+ case ACPI_HEST_TYPE_AER_ROOT_PORT:
+ pcie_type = PCI_EXP_TYPE_ROOT_PORT;
+ break;
+ case ACPI_HEST_TYPE_AER_ENDPOINT:
+ pcie_type = PCI_EXP_TYPE_ENDPOINT;
+ break;
+ case ACPI_HEST_TYPE_AER_BRIDGE:
+ if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ bridge = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
+ if (p->flags & ACPI_HEST_GLOBAL) {
+ if ((info->pci_dev->is_pcie &&
+ info->pci_dev->pcie_type == pcie_type) || bridge)
+ ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ } else
+ if (hest_match_pci(p, info->pci_dev))
+ ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ info->firmware_first = ff;
+
+ return 0;
+}
+
+static void aer_set_firmware_first(struct pci_dev *pci_dev)
+{
+ int rc;
+ struct aer_hest_parse_info info = {
+ .pci_dev = pci_dev,
+ .firmware_first = 0,
+ };
+
+ rc = apei_hest_parse(aer_hest_parse, &info);
+
+ if (rc)
+ pci_dev->__aer_firmware_first = 0;
+ else
+ pci_dev->__aer_firmware_first = info.firmware_first;
+ pci_dev->__aer_firmware_first_valid = 1;
+}
+
+int pcie_aer_get_firmware_first(struct pci_dev *dev)
+{
+ if (!dev->__aer_firmware_first_valid)
+ aer_set_firmware_first(dev);
+ return dev->__aer_firmware_first;
+}
+#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index aceb04b67b60..8af4f619bba2 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -36,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
u16 reg16 = 0;
int pos;
- if (dev->aer_firmware_first)
+ if (pcie_aer_get_firmware_first(dev))
return -EIO;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -47,13 +47,12 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
if (!pos)
return -EIO;
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
- reg16 = reg16 |
- PCI_EXP_DEVCTL_CERE |
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
+ reg16 |= (PCI_EXP_DEVCTL_CERE |
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE;
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+ PCI_EXP_DEVCTL_URRE);
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
return 0;
}
@@ -64,19 +63,19 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
u16 reg16 = 0;
int pos;
- if (dev->aer_firmware_first)
+ if (pcie_aer_get_firmware_first(dev))
return -EIO;
pos = pci_pcie_cap(dev);
if (!pos)
return -EIO;
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
- reg16 = reg16 & ~(PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE);
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
+ reg16 &= ~(PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
return 0;
}
@@ -99,99 +98,46 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
-static int set_device_error_reporting(struct pci_dev *dev, void *data)
-{
- bool enable = *((bool *)data);
-
- if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
- (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
- (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
- if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
- }
-
- if (enable)
- pcie_set_ecrc_checking(dev);
-
- return 0;
-}
-
/**
- * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
- * @dev: pointer to root port's pci_dev data structure
- * @enable: true = enable error reporting, false = disable error reporting.
+ * add_error_device - list device to be handled
+ * @e_info: pointer to error info
+ * @dev: pointer to pci_dev to be added
*/
-static void set_downstream_devices_error_reporting(struct pci_dev *dev,
- bool enable)
-{
- set_device_error_reporting(dev, &enable);
-
- if (!dev->subordinate)
- return;
- pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
-}
-
-static inline int compare_device_id(struct pci_dev *dev,
- struct aer_err_info *e_info)
-{
- if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
- /*
- * Device ID match
- */
- return 1;
- }
-
- return 0;
-}
-
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
e_info->dev[e_info->error_dev_num] = dev;
e_info->error_dev_num++;
- return 1;
+ return 0;
}
-
- return 0;
+ return -ENOSPC;
}
-
#define PCI_BUS(x) (((x) >> 8) & 0xff)
-static int find_device_iter(struct pci_dev *dev, void *data)
+/**
+ * is_error_source - check whether the device is source of reported error
+ * @dev: pointer to pci_dev to be checked
+ * @e_info: pointer to reported error info
+ */
+static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
{
int pos;
- u32 status;
- u32 mask;
+ u32 status, mask;
u16 reg16;
- int result;
- struct aer_err_info *e_info = (struct aer_err_info *)data;
/*
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
- result = compare_device_id(dev, e_info);
- if (result)
- add_error_device(e_info, dev);
+ /* Device ID match? */
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
+ return true;
- /*
- * If there is no multiple error, we stop
- * or continue based on the id comparing.
- */
+ /* Continue id comparing if there is no multiple error */
if (!e_info->multi_error_valid)
- return result;
-
- /*
- * If there are multiple errors and id does match,
- * We need continue to search other devices under
- * the root port. Return 0 means that.
- */
- if (result)
- return 0;
+ return false;
}
/*
@@ -200,71 +146,94 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* 2) bus id is equal to 0. Some ports might lose the bus
* id of error source id;
* 3) There are multiple errors and prior id comparing fails;
- * We check AER status registers to find the initial reporter.
+ * We check AER status registers to find possible reporter.
*/
if (atomic_read(&dev->enable_cnt) == 0)
- return 0;
+ return false;
pos = pci_pcie_cap(dev);
if (!pos)
- return 0;
+ return false;
+
/* Check if AER is enabled */
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
if (!(reg16 & (
PCI_EXP_DEVCTL_CERE |
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_URRE)))
- return 0;
+ return false;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
- return 0;
+ return false;
- status = 0;
- mask = 0;
+ /* Check if error is recorded */
if (e_info->severity == AER_CORRECTABLE) {
pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
- if (status & ~mask) {
- add_error_device(e_info, dev);
- goto added;
- }
} else {
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
- if (status & ~mask) {
- add_error_device(e_info, dev);
- goto added;
- }
}
+ if (status & ~mask)
+ return true;
- return 0;
+ return false;
+}
-added:
- if (e_info->multi_error_valid)
- return 0;
- else
- return 1;
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ if (is_error_source(dev, e_info)) {
+ /* List this device */
+ if (add_error_device(e_info, dev)) {
+ /* We cannot handle more... Stop iteration */
+ /* TODO: Should print error message here? */
+ return 1;
+ }
+
+ /* If there is only a single error, stop iteration */
+ if (!e_info->multi_error_valid)
+ return 1;
+ }
+ return 0;
}
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @err_info: including detailed error information such like id
+ * @e_info: including detailed error information such like id
*
- * Invoked when error is detected at the Root Port.
+ * Return true if found.
+ *
+ * Invoked by DPC when error is detected at the Root Port.
+ * Caller of this function must set id, severity, and multi_error_valid of
+ * struct aer_err_info pointed by @e_info properly. This function must fill
+ * e_info->error_dev_num and e_info->dev[], based on the given information.
*/
-static void find_source_device(struct pci_dev *parent,
+static bool find_source_device(struct pci_dev *parent,
struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
+ /* Must reset in this function */
+ e_info->error_dev_num = 0;
+
/* Is Root Port an agent that sends error message? */
result = find_device_iter(dev, e_info);
if (result)
- return;
+ return true;
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
+
+ if (!e_info->error_dev_num) {
+ dev_printk(KERN_DEBUG, &parent->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ return false;
+ }
+ return true;
}
static int report_error_detected(struct pci_dev *dev, void *data)
@@ -403,43 +372,77 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
return result_data.result;
}
-struct find_aer_service_data {
- struct pcie_port_service_driver *aer_driver;
- int is_downstream;
-};
-
-static int find_aer_service_iter(struct device *device, void *data)
+/**
+ * aer_do_secondary_bus_reset - perform secondary bus reset
+ * @dev: pointer to bridge's pci_dev data structure
+ *
+ * Invoked when performing link reset at Root Port or Downstream Port.
+ */
+void aer_do_secondary_bus_reset(struct pci_dev *dev)
{
- struct device_driver *driver;
- struct pcie_port_service_driver *service_driver;
- struct find_aer_service_data *result;
+ u16 p2p_ctrl;
+
+ /* Assert Secondary Bus Reset */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
+ p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+
+ /*
+ * we should send hot reset message for 2ms to allow it time to
+ * propagate to all downstream ports
+ */
+ msleep(2);
+
+ /* De-assert Secondary Bus Reset */
+ p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+
+ /*
+ * System software must wait for at least 100ms from the end
+ * of a reset of one or more device before it is permitted
+ * to issue Configuration Requests to those devices.
+ */
+ msleep(200);
+}
- result = (struct find_aer_service_data *) data;
+/**
+ * default_downstream_reset_link - default reset function for Downstream Port
+ * @dev: pointer to downstream port's pci_dev data structure
+ *
+ * Invoked when performing link reset at Downstream Port w/ no aer driver.
+ */
+static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev)
+{
+ aer_do_secondary_bus_reset(dev);
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "Downstream Port link has been reset\n");
+ return PCI_ERS_RESULT_RECOVERED;
+}
- if (device->bus == &pcie_port_bus_type) {
- struct pcie_device *pcie = to_pcie_device(device);
+static int find_aer_service_iter(struct device *device, void *data)
+{
+ struct pcie_port_service_driver *service_driver, **drv;
- if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
- result->is_downstream = 1;
+ drv = (struct pcie_port_service_driver **) data;
- driver = device->driver;
- if (driver) {
- service_driver = to_service_driver(driver);
- if (service_driver->service == PCIE_PORT_SERVICE_AER) {
- result->aer_driver = service_driver;
- return 1;
- }
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ service_driver = to_service_driver(device->driver);
+ if (service_driver->service == PCIE_PORT_SERVICE_AER) {
+ *drv = service_driver;
+ return 1;
}
}
return 0;
}
-static void find_aer_service(struct pci_dev *dev,
- struct find_aer_service_data *data)
+static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
{
- int retval;
- retval = device_for_each_child(&dev->dev, data, find_aer_service_iter);
+ struct pcie_port_service_driver *drv = NULL;
+
+ device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
+
+ return drv;
}
static pci_ers_result_t reset_link(struct pcie_device *aerdev,
@@ -447,38 +450,34 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
{
struct pci_dev *udev;
pci_ers_result_t status;
- struct find_aer_service_data data;
+ struct pcie_port_service_driver *driver;
- if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)
+ if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
+ /* Reset this port for all subordinates */
udev = dev;
- else
+ } else {
+ /* Reset the upstream component (likely downstream port) */
udev = dev->bus->self;
+ }
- data.is_downstream = 0;
- data.aer_driver = NULL;
- find_aer_service(udev, &data);
+ /* Use the aer driver of the component firstly */
+ driver = find_aer_service(udev);
- /*
- * Use the aer driver of the error agent firstly.
- * If it hasn't the aer driver, use the root port's
- */
- if (!data.aer_driver || !data.aer_driver->reset_link) {
- if (data.is_downstream &&
- aerdev->device.driver &&
- to_service_driver(aerdev->device.driver)->reset_link) {
- data.aer_driver =
- to_service_driver(aerdev->device.driver);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev, "no link-reset "
- "support\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
+ if (driver && driver->reset_link) {
+ status = driver->reset_link(udev);
+ } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ status = default_downstream_reset_link(udev);
+ } else {
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "no link-reset support at upstream device %s\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
}
- status = data.aer_driver->reset_link(udev);
if (status != PCI_ERS_RESULT_RECOVERED) {
- dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream "
- "device %s failed\n", pci_name(udev));
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "link reset at upstream device %s failed\n",
+ pci_name(udev));
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -495,8 +494,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
* error detected message to all downstream drivers within a hierarchy in
* question and return the returned code.
*/
-static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
- struct pci_dev *dev,
+static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
int severity)
{
pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
@@ -514,10 +512,8 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
if (severity == AER_FATAL) {
result = reset_link(aerdev, dev);
- if (result != PCI_ERS_RESULT_RECOVERED) {
- /* TODO: Should panic here? */
- return result;
- }
+ if (result != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
}
if (status == PCI_ERS_RESULT_CAN_RECOVER)
@@ -538,13 +534,22 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
report_slot_reset);
}
- if (status == PCI_ERS_RESULT_RECOVERED)
- broadcast_error_message(dev,
+ if (status != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
+
+ broadcast_error_message(dev,
state,
"resume",
report_resume);
- return status;
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "AER driver successfully recovered\n");
+ return;
+
+failed:
+ /* TODO: Should kernel panic here? */
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "AER driver didn't recover\n");
}
/**
@@ -559,7 +564,6 @@ static void handle_error_source(struct pcie_device *aerdev,
struct pci_dev *dev,
struct aer_err_info *info)
{
- pci_ers_result_t status = 0;
int pos;
if (info->severity == AER_CORRECTABLE) {
@@ -571,114 +575,8 @@ static void handle_error_source(struct pcie_device *aerdev,
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
- } else {
- status = do_recovery(aerdev, dev, info->severity);
- if (status == PCI_ERS_RESULT_RECOVERED) {
- dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
- "successfully recovered\n");
- } else {
- /* TODO: Should kernel panic here? */
- dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't "
- "recover\n");
- }
- }
-}
-
-/**
- * aer_enable_rootport - enable Root Port's interrupts when receiving messages
- * @rpc: pointer to a Root Port data structure
- *
- * Invoked when PCIe bus loads AER service driver.
- */
-void aer_enable_rootport(struct aer_rpc *rpc)
-{
- struct pci_dev *pdev = rpc->rpd->port;
- int pos, aer_pos;
- u16 reg16;
- u32 reg32;
-
- pos = pci_pcie_cap(pdev);
- /* Clear PCIe Capability's Device Status */
- pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
- pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
-
- /* Disable system error generation in response to error messages */
- pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
- reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
- pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
-
- aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- /* Clear error status */
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
-
- /*
- * Enable error reporting for the root port device and downstream port
- * devices.
- */
- set_downstream_devices_error_reporting(pdev, true);
-
- /* Enable Root Port's interrupt in response to error messages */
- pci_write_config_dword(pdev,
- aer_pos + PCI_ERR_ROOT_COMMAND,
- ROOT_PORT_INTR_ON_MESG_MASK);
-}
-
-/**
- * disable_root_aer - disable Root Port's interrupts when receiving messages
- * @rpc: pointer to a Root Port data structure
- *
- * Invoked when PCIe bus unloads AER service driver.
- */
-static void disable_root_aer(struct aer_rpc *rpc)
-{
- struct pci_dev *pdev = rpc->rpd->port;
- u32 reg32;
- int pos;
-
- /*
- * Disable error reporting for the root port device and downstream port
- * devices.
- */
- set_downstream_devices_error_reporting(pdev, false);
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- /* Disable Root's interrupt in response to error messages */
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
-
- /* Clear Root's error status reg */
- pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
-}
-
-/**
- * get_e_source - retrieve an error source
- * @rpc: pointer to the root port which holds an error
- *
- * Invoked by DPC handler to consume an error.
- */
-static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
-{
- struct aer_err_source *e_source;
- unsigned long flags;
-
- /* Lock access to Root error producer/consumer index */
- spin_lock_irqsave(&rpc->e_lock, flags);
- if (rpc->prod_idx == rpc->cons_idx) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
- return NULL;
- }
- e_source = &rpc->e_sources[rpc->cons_idx];
- rpc->cons_idx++;
- if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
- rpc->cons_idx = 0;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- return e_source;
+ } else
+ do_recovery(aerdev, dev, info->severity);
}
/**
@@ -687,11 +585,14 @@ static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
* @info: pointer to structure to store the error record
*
* Return 1 on success, 0 on error.
+ *
+ * Note that @info is reused among all error devices. Clear fields properly.
*/
static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int pos, temp;
+ /* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
@@ -744,12 +645,6 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
{
int i;
- if (!e_info->dev[0]) {
- dev_printk(KERN_DEBUG, &p_device->port->dev,
- "can't find device of ID%04x\n",
- e_info->id);
- }
-
/* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (get_device_error_info(e_info->dev[i], e_info))
@@ -770,11 +665,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
struct aer_err_info *e_info;
- int i;
/* struct aer_err_info might be big, so we allocate it with slab */
e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
- if (e_info == NULL) {
+ if (!e_info) {
dev_printk(KERN_DEBUG, &p_device->port->dev,
"Can't allocate mem when processing AER errors\n");
return;
@@ -784,37 +678,72 @@ static void aer_isr_one_error(struct pcie_device *p_device,
* There is a possibility that both correctable error and
* uncorrectable error being logged. Report correctable error first.
*/
- for (i = 1; i & ROOT_ERR_STATUS_MASKS ; i <<= 2) {
- if (i > 4)
- break;
- if (!(e_src->status & i))
- continue;
-
- memset(e_info, 0, sizeof(struct aer_err_info));
-
- /* Init comprehensive error information */
- if (i & PCI_ERR_ROOT_COR_RCV) {
- e_info->id = ERR_COR_ID(e_src->id);
- e_info->severity = AER_CORRECTABLE;
- } else {
- e_info->id = ERR_UNCOR_ID(e_src->id);
- e_info->severity = ((e_src->status >> 6) & 1);
- }
- if (e_src->status &
- (PCI_ERR_ROOT_MULTI_COR_RCV |
- PCI_ERR_ROOT_MULTI_UNCOR_RCV))
+ if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
+
+ if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
e_info->multi_error_valid = 1;
+ else
+ e_info->multi_error_valid = 0;
+
+ aer_print_port_info(p_device->port, e_info);
+
+ if (find_source_device(p_device->port, e_info))
+ aer_process_err_devices(p_device, e_info);
+ }
+
+ if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+
+ if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
+ e_info->severity = AER_FATAL;
+ else
+ e_info->severity = AER_NONFATAL;
+
+ if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
+ e_info->multi_error_valid = 1;
+ else
+ e_info->multi_error_valid = 0;
aer_print_port_info(p_device->port, e_info);
- find_source_device(p_device->port, e_info);
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(p_device->port, e_info))
+ aer_process_err_devices(p_device, e_info);
}
kfree(e_info);
}
/**
+ * get_e_source - retrieve an error source
+ * @rpc: pointer to the root port which holds an error
+ * @e_src: pointer to store retrieved error source
+ *
+ * Return 1 if an error source is retrieved, otherwise 0.
+ *
+ * Invoked by DPC handler to consume an error.
+ */
+static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ /* Lock access to Root error producer/consumer index */
+ spin_lock_irqsave(&rpc->e_lock, flags);
+ if (rpc->prod_idx != rpc->cons_idx) {
+ *e_src = rpc->e_sources[rpc->cons_idx];
+ rpc->cons_idx++;
+ if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
+ rpc->cons_idx = 0;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+
+ return ret;
+}
+
+/**
* aer_isr - consume errors detected by root port
* @work: definition of this work item
*
@@ -824,34 +753,17 @@ void aer_isr(struct work_struct *work)
{
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
struct pcie_device *p_device = rpc->rpd;
- struct aer_err_source *e_src;
+ struct aer_err_source e_src;
mutex_lock(&rpc->rpc_mutex);
- e_src = get_e_source(rpc);
- while (e_src) {
- aer_isr_one_error(p_device, e_src);
- e_src = get_e_source(rpc);
- }
+ while (get_e_source(rpc, &e_src))
+ aer_isr_one_error(p_device, &e_src);
mutex_unlock(&rpc->rpc_mutex);
wake_up(&rpc->wait_release);
}
/**
- * aer_delete_rootport - disable root port aer and delete service data
- * @rpc: pointer to a root port device being deleted
- *
- * Invoked when AER service unloaded on a specific Root Port
- */
-void aer_delete_rootport(struct aer_rpc *rpc)
-{
- /* Disable root port AER itself */
- disable_root_aer(rpc);
-
- kfree(rpc);
-}
-
-/**
* aer_init - provide AER initialization
* @dev: pointer to AER pcie device
*
@@ -859,7 +771,7 @@ void aer_delete_rootport(struct aer_rpc *rpc)
*/
int aer_init(struct pcie_device *dev)
{
- if (dev->port->aer_firmware_first) {
+ if (pcie_aer_get_firmware_first(dev->port)) {
dev_printk(KERN_DEBUG, &dev->device,
"PCIe errors handled by platform firmware.\n");
goto out;
@@ -873,7 +785,7 @@ out:
if (forceload) {
dev_printk(KERN_DEBUG, &dev->device,
"aerdrv forceload requested.\n");
- dev->port->aer_firmware_first = 0;
+ pcie_aer_force_firmware_first(dev->port, 0);
return 0;
}
return -ENXIO;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c82548afcd5c..f4adba2d1dd3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
-#include <acpi/acpi_hest.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -904,12 +903,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pdev->is_hotplug_bridge = 1;
}
-static void set_pci_aer_firmware_first(struct pci_dev *pdev)
-{
- if (acpi_hest_firmware_first_pci(pdev))
- pdev->aer_firmware_first = 1;
-}
-
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
@@ -939,7 +932,6 @@ int pci_setup_device(struct pci_dev *dev)
dev->multifunction = !!(hdr_type & 0x80);
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
- set_pci_aer_firmware_first(dev);
list_for_each_entry(slot, &dev->bus->slots, list)
if (PCI_SLOT(dev->devfn) == slot->number)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0fc48f..e0189cf7c558 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -97,6 +97,50 @@ static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
return bus_speed_read(slot->bus->cur_bus_speed, buf);
}
+static void remove_sysfs_files(struct pci_slot *slot)
+{
+ char func[10];
+ struct list_head *tmp;
+
+ list_for_each(tmp, &slot->bus->devices) {
+ struct pci_dev *dev = pci_dev_b(tmp);
+ if (PCI_SLOT(dev->devfn) != slot->number)
+ continue;
+ sysfs_remove_link(&dev->dev.kobj, "slot");
+
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ sysfs_remove_link(&slot->kobj, func);
+ }
+}
+
+static int create_sysfs_files(struct pci_slot *slot)
+{
+ int result;
+ char func[10];
+ struct list_head *tmp;
+
+ list_for_each(tmp, &slot->bus->devices) {
+ struct pci_dev *dev = pci_dev_b(tmp);
+ if (PCI_SLOT(dev->devfn) != slot->number)
+ continue;
+
+ result = sysfs_create_link(&dev->dev.kobj, &slot->kobj, "slot");
+ if (result)
+ goto fail;
+
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ result = sysfs_create_link(&slot->kobj, &dev->dev.kobj, func);
+ if (result)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ remove_sysfs_files(slot);
+ return result;
+}
+
static void pci_slot_release(struct kobject *kobj)
{
struct pci_dev *dev;
@@ -109,6 +153,8 @@ static void pci_slot_release(struct kobject *kobj)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = NULL;
+ remove_sysfs_files(slot);
+
list_del(&slot->list);
kfree(slot);
@@ -300,6 +346,8 @@ placeholder:
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
+ create_sysfs_files(slot);
+
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot_nr)
dev->slot = slot;
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index d189e4743e69..caca50eaa0e7 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -49,26 +49,6 @@ config PCMCIA_LOAD_CIS
If unsure, say Y.
-config PCMCIA_IOCTL
- bool "PCMCIA control ioctl (obsolete)"
- depends on PCMCIA && ARM && !SMP && !PREEMPT
- default y
- help
- If you say Y here, the deprecated ioctl interface to the PCMCIA
- subsystem will be built. It is needed by the deprecated pcmcia-cs
- tools (cardmgr, cardctl) to function properly.
-
- You should use the new pcmciautils package instead (see
- <file:Documentation/Changes> for location and details).
-
- This config option will most likely be removed from kernel 2.6.35,
- the associated code from kernel 2.6.36.
-
- As the PCMCIA ioctl is not locking safe, it depends on !SMP and
- !PREEMPT.
-
- If unsure, say N.
-
config CARDBUS
bool "32-bit CardBus support"
depends on PCI
@@ -317,7 +297,7 @@ config ELECTRA_CF
PA Semi Electra eval board.
config PCCARD_NONSTATIC
- tristate
+ bool
config PCCARD_IODYN
bool
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 381b031d9d75..7031d0a4f4b0 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -2,15 +2,18 @@
# Makefile for the kernel pcmcia subsystem (c/o David Hinds)
#
-pcmcia_core-y += cs.o rsrc_mgr.o socket_sysfs.o
+pcmcia_core-y += cs.o socket_sysfs.o
pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o
obj-$(CONFIG_PCCARD) += pcmcia_core.o
-pcmcia-y += ds.o pcmcia_resource.o cistpl.o
+pcmcia-y += ds.o pcmcia_resource.o cistpl.o pcmcia_cis.o
pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o
obj-$(CONFIG_PCMCIA) += pcmcia.o
-obj-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
+pcmcia_rsrc-y += rsrc_mgr.o
+pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
+pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
+obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
# socket drivers
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 9e84d039de41..eae9cbe37a3e 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -113,7 +113,7 @@ static int bfin_cf_get_status(struct pcmcia_socket *s, u_int *sp)
if (bfin_cf_present(cf->cd_pfx)) {
*sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
- s->irq.AssignedIRQ = 0;
+ s->pcmcia_irq = 0;
s->pci_irq = cf->irq;
} else
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index e6ab2a47d8cb..9a58862f1401 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -94,7 +94,6 @@ int __ref cb_alloc(struct pcmcia_socket *s)
pci_enable_bridges(bus);
pci_bus_add_devices(bus);
- s->irq.AssignedIRQ = s->pci_irq;
return 0;
}
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 854959cada3a..60d428be0b07 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -129,6 +129,8 @@ static void __iomem *set_cis_map(struct pcmcia_socket *s,
/**
* pcmcia_read_cis_mem() - low-level function to read CIS memory
+ *
+ * must be called with ops_mutex held
*/
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
@@ -138,7 +140,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
- mutex_lock(&s->ops_mutex);
if (attr & IS_INDIRECT) {
/* Indirect accesses use a bunch of special registers at fixed
locations in common memory */
@@ -153,7 +154,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
memset(ptr, 0xff, len);
- mutex_unlock(&s->ops_mutex);
return -1;
}
@@ -184,7 +184,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
memset(ptr, 0xff, len);
- mutex_unlock(&s->ops_mutex);
return -1;
}
end = sys + s->map_size;
@@ -198,7 +197,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
addr = 0;
}
}
- mutex_unlock(&s->ops_mutex);
dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
*(u_char *)(ptr+0), *(u_char *)(ptr+1),
*(u_char *)(ptr+2), *(u_char *)(ptr+3));
@@ -209,7 +207,8 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
/**
* pcmcia_write_cis_mem() - low-level function to write CIS memory
*
- * Probably only useful for writing one-byte registers.
+ * Probably only useful for writing one-byte registers. Must be called
+ * with ops_mutex held.
*/
void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
@@ -220,7 +219,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
dev_dbg(&s->dev,
"pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
- mutex_lock(&s->ops_mutex);
if (attr & IS_INDIRECT) {
/* Indirect accesses use a bunch of special registers at fixed
locations in common memory */
@@ -234,7 +232,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
((cis_width) ? MAP_16BIT : 0));
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
- mutex_unlock(&s->ops_mutex);
return; /* FIXME: Error */
}
@@ -260,7 +257,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
sys = set_cis_map(s, card_offset, flags);
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
- mutex_unlock(&s->ops_mutex);
return; /* FIXME: error */
}
@@ -275,7 +271,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
addr = 0;
}
}
- mutex_unlock(&s->ops_mutex);
}
@@ -314,7 +309,6 @@ static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
return 0;
}
}
- mutex_unlock(&s->ops_mutex);
ret = pcmcia_read_cis_mem(s, attr, addr, len, ptr);
@@ -326,11 +320,11 @@ static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
cis->len = len;
cis->attr = attr;
memcpy(cis->cache, ptr, len);
- mutex_lock(&s->ops_mutex);
list_add(&cis->node, &s->cis_cache);
- mutex_unlock(&s->ops_mutex);
}
}
+ mutex_unlock(&s->ops_mutex);
+
return ret;
}
@@ -386,6 +380,7 @@ int verify_cis_cache(struct pcmcia_socket *s)
"no memory for verifying CIS\n");
return -ENOMEM;
}
+ mutex_lock(&s->ops_mutex);
list_for_each_entry(cis, &s->cis_cache, node) {
int len = cis->len;
@@ -395,10 +390,12 @@ int verify_cis_cache(struct pcmcia_socket *s)
ret = pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf);
if (ret || memcmp(buf, cis->cache, len) != 0) {
kfree(buf);
+ mutex_unlock(&s->ops_mutex);
return -1;
}
}
kfree(buf);
+ mutex_unlock(&s->ops_mutex);
return 0;
}
@@ -1362,106 +1359,6 @@ EXPORT_SYMBOL(pcmcia_parse_tuple);
/**
- * pccard_read_tuple() - internal CIS tuple access
- * @s: the struct pcmcia_socket where the card is inserted
- * @function: the device function we loop for
- * @code: which CIS code shall we look for?
- * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
- *
- * pccard_read_tuple() reads out one tuple and attempts to parse it
- */
-int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
- cisdata_t code, void *parse)
-{
- tuple_t tuple;
- cisdata_t *buf;
- int ret;
-
- buf = kmalloc(256, GFP_KERNEL);
- if (buf == NULL) {
- dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
- return -ENOMEM;
- }
- tuple.DesiredTuple = code;
- tuple.Attributes = 0;
- if (function == BIND_FN_ALL)
- tuple.Attributes = TUPLE_RETURN_COMMON;
- ret = pccard_get_first_tuple(s, function, &tuple);
- if (ret != 0)
- goto done;
- tuple.TupleData = buf;
- tuple.TupleOffset = 0;
- tuple.TupleDataMax = 255;
- ret = pccard_get_tuple_data(s, &tuple);
- if (ret != 0)
- goto done;
- ret = pcmcia_parse_tuple(&tuple, parse);
-done:
- kfree(buf);
- return ret;
-}
-
-
-/**
- * pccard_loop_tuple() - loop over tuples in the CIS
- * @s: the struct pcmcia_socket where the card is inserted
- * @function: the device function we loop for
- * @code: which CIS code shall we look for?
- * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
- * @priv_data: private data to be passed to the loop_tuple function.
- * @loop_tuple: function to call for each CIS entry of type @function. IT
- * gets passed the raw tuple, the paresed tuple (if @parse is
- * set) and @priv_data.
- *
- * pccard_loop_tuple() loops over all CIS entries of type @function, and
- * calls the @loop_tuple function for each entry. If the call to @loop_tuple
- * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
- */
-int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
- cisdata_t code, cisparse_t *parse, void *priv_data,
- int (*loop_tuple) (tuple_t *tuple,
- cisparse_t *parse,
- void *priv_data))
-{
- tuple_t tuple;
- cisdata_t *buf;
- int ret;
-
- buf = kzalloc(256, GFP_KERNEL);
- if (buf == NULL) {
- dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
- return -ENOMEM;
- }
-
- tuple.TupleData = buf;
- tuple.TupleDataMax = 255;
- tuple.TupleOffset = 0;
- tuple.DesiredTuple = code;
- tuple.Attributes = 0;
-
- ret = pccard_get_first_tuple(s, function, &tuple);
- while (!ret) {
- if (pccard_get_tuple_data(s, &tuple))
- goto next_entry;
-
- if (parse)
- if (pcmcia_parse_tuple(&tuple, parse))
- goto next_entry;
-
- ret = loop_tuple(&tuple, parse, priv_data);
- if (!ret)
- break;
-
-next_entry:
- ret = pccard_get_next_tuple(s, function, &tuple);
- }
-
- kfree(buf);
- return ret;
-}
-
-
-/**
* pccard_validate_cis() - check whether card has a sensible CIS
* @s: the struct pcmcia_socket we are to check
* @info: returns the number of tuples in the (valid) CIS, or 0
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index c3383750e333..976d80706eae 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -337,7 +337,6 @@ static void socket_shutdown(struct pcmcia_socket *s)
s->socket = dead_socket;
s->ops->init(s);
s->ops->set_socket(s, &s->socket);
- s->irq.AssignedIRQ = s->irq.Config = 0;
s->lock_count = 0;
kfree(s->fake_cis);
s->fake_cis = NULL;
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index f95864c2191e..4126a75445ea 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -52,13 +52,11 @@ struct cis_cache_entry {
struct pccard_resource_ops {
int (*validate_mem) (struct pcmcia_socket *s);
- int (*adjust_io_region) (struct resource *res,
- unsigned long r_start,
- unsigned long r_end,
- struct pcmcia_socket *s);
- struct resource* (*find_io) (unsigned long base, int num,
- unsigned long align,
- struct pcmcia_socket *s);
+ int (*find_io) (struct pcmcia_socket *s,
+ unsigned int attr,
+ unsigned int *base,
+ unsigned int num,
+ unsigned int align);
struct resource* (*find_mem) (unsigned long base, unsigned long num,
unsigned long align, int low,
struct pcmcia_socket *s);
@@ -89,6 +87,14 @@ struct pccard_resource_ops {
/*
+ * Stuff internal to module "pcmcia_rsrc":
+ */
+extern int static_init(struct pcmcia_socket *s);
+extern struct resource *pcmcia_make_resource(unsigned long start,
+ unsigned long end,
+ int flags, const char *name);
+
+/*
* Stuff internal to module "pcmcia_core":
*/
@@ -149,6 +155,8 @@ extern struct resource *pcmcia_find_mem_region(u_long base,
int low,
struct pcmcia_socket *s);
+void pcmcia_cleanup_irq(struct pcmcia_socket *s);
+int pcmcia_setup_irq(struct pcmcia_device *p_dev);
/* cistpl.c */
extern struct bin_attribute pccard_cis_attr;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 041eee43fd8d..7ef7adee5e4f 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -371,8 +371,6 @@ static int pcmcia_device_remove(struct device *dev)
if (p_drv->remove)
p_drv->remove(p_dev);
- p_dev->dev_node = NULL;
-
/* check for proper unloading */
if (p_dev->_irq || p_dev->_io || p_dev->_locked)
dev_printk(KERN_INFO, dev,
@@ -479,15 +477,6 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
}
-/* device_add_lock is needed to avoid double registration by cardmgr and kernel.
- * Serializes pcmcia_device_add; will most likely be removed in future.
- *
- * While it has the caveat that adding new PCMCIA devices inside(!) device_register()
- * won't work, this doesn't matter much at the moment: the driver core doesn't
- * support it either.
- */
-static DEFINE_MUTEX(device_add_lock);
-
struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
{
struct pcmcia_device *p_dev, *tmp_dev;
@@ -497,8 +486,6 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
if (!s)
return NULL;
- mutex_lock(&device_add_lock);
-
pr_debug("adding device to %d, function %d\n", s->sock, function);
p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL);
@@ -538,8 +525,8 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
/*
* p_dev->function_config must be the same for all card functions.
- * Note that this is serialized by the device_add_lock, so that
- * only one such struct will be created.
+ * Note that this is serialized by ops_mutex, so that only one
+ * such struct will be created.
*/
list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
if (p_dev->func == tmp_dev->func) {
@@ -552,28 +539,31 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
/* Add to the list in pcmcia_bus_socket */
list_add(&p_dev->socket_device_list, &s->devices_list);
- mutex_unlock(&s->ops_mutex);
+ if (pcmcia_setup_irq(p_dev))
+ dev_warn(&p_dev->dev,
+ "IRQ setup failed -- device might not work\n");
if (!p_dev->function_config) {
dev_dbg(&p_dev->dev, "creating config_t\n");
p_dev->function_config = kzalloc(sizeof(struct config_t),
GFP_KERNEL);
- if (!p_dev->function_config)
+ if (!p_dev->function_config) {
+ mutex_unlock(&s->ops_mutex);
goto err_unreg;
+ }
kref_init(&p_dev->function_config->ref);
}
+ mutex_unlock(&s->ops_mutex);
dev_printk(KERN_NOTICE, &p_dev->dev,
- "pcmcia: registering new device %s\n",
- p_dev->devname);
+ "pcmcia: registering new device %s (IRQ: %d)\n",
+ p_dev->devname, p_dev->irq);
pcmcia_device_query(p_dev);
if (device_register(&p_dev->dev))
goto err_unreg;
- mutex_unlock(&device_add_lock);
-
return p_dev;
err_unreg:
@@ -591,7 +581,6 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
kfree(p_dev->devname);
kfree(p_dev);
err_put:
- mutex_unlock(&device_add_lock);
pcmcia_put_socket(s);
return NULL;
@@ -1258,6 +1247,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
handle_event(skt, event);
mutex_lock(&s->ops_mutex);
destroy_cis_cache(s);
+ pcmcia_cleanup_irq(s);
mutex_unlock(&s->ops_mutex);
break;
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index a7cfc7964c7c..0ad06a3bd562 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -117,7 +117,7 @@ static int omap_cf_get_status(struct pcmcia_socket *s, u_int *sp)
*sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
cf = container_of(s, struct omap_cf_socket, socket);
- s->irq.AssignedIRQ = 0;
+ s->pcmcia_irq = 0;
s->pci_irq = cf->irq;
} else
*sp = 0;
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
new file mode 100644
index 000000000000..4a65eaf96b0a
--- /dev/null
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -0,0 +1,356 @@
+/*
+ * PCMCIA high-level CIS access functions
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Copyright (C) 1999 David A. Hinds
+ * Copyright (C) 2004-2009 Dominik Brodowski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/ds.h>
+#include "cs_internal.h"
+
+
+/**
+ * pccard_read_tuple() - internal CIS tuple access
+ * @s: the struct pcmcia_socket where the card is inserted
+ * @function: the device function we loop for
+ * @code: which CIS code shall we look for?
+ * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
+ *
+ * pccard_read_tuple() reads out one tuple and attempts to parse it
+ */
+int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
+ cisdata_t code, void *parse)
+{
+ tuple_t tuple;
+ cisdata_t *buf;
+ int ret;
+
+ buf = kmalloc(256, GFP_KERNEL);
+ if (buf == NULL) {
+ dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
+ return -ENOMEM;
+ }
+ tuple.DesiredTuple = code;
+ tuple.Attributes = 0;
+ if (function == BIND_FN_ALL)
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ ret = pccard_get_first_tuple(s, function, &tuple);
+ if (ret != 0)
+ goto done;
+ tuple.TupleData = buf;
+ tuple.TupleOffset = 0;
+ tuple.TupleDataMax = 255;
+ ret = pccard_get_tuple_data(s, &tuple);
+ if (ret != 0)
+ goto done;
+ ret = pcmcia_parse_tuple(&tuple, parse);
+done:
+ kfree(buf);
+ return ret;
+}
+
+
+/**
+ * pccard_loop_tuple() - loop over tuples in the CIS
+ * @s: the struct pcmcia_socket where the card is inserted
+ * @function: the device function we loop for
+ * @code: which CIS code shall we look for?
+ * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
+ * @priv_data: private data to be passed to the loop_tuple function.
+ * @loop_tuple: function to call for each CIS entry of type @function. IT
+ * gets passed the raw tuple, the paresed tuple (if @parse is
+ * set) and @priv_data.
+ *
+ * pccard_loop_tuple() loops over all CIS entries of type @function, and
+ * calls the @loop_tuple function for each entry. If the call to @loop_tuple
+ * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
+ */
+int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
+ cisdata_t code, cisparse_t *parse, void *priv_data,
+ int (*loop_tuple) (tuple_t *tuple,
+ cisparse_t *parse,
+ void *priv_data))
+{
+ tuple_t tuple;
+ cisdata_t *buf;
+ int ret;
+
+ buf = kzalloc(256, GFP_KERNEL);
+ if (buf == NULL) {
+ dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
+ return -ENOMEM;
+ }
+
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 255;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = code;
+ tuple.Attributes = 0;
+
+ ret = pccard_get_first_tuple(s, function, &tuple);
+ while (!ret) {
+ if (pccard_get_tuple_data(s, &tuple))
+ goto next_entry;
+
+ if (parse)
+ if (pcmcia_parse_tuple(&tuple, parse))
+ goto next_entry;
+
+ ret = loop_tuple(&tuple, parse, priv_data);
+ if (!ret)
+ break;
+
+next_entry:
+ ret = pccard_get_next_tuple(s, function, &tuple);
+ }
+
+ kfree(buf);
+ return ret;
+}
+
+struct pcmcia_cfg_mem {
+ struct pcmcia_device *p_dev;
+ void *priv_data;
+ int (*conf_check) (struct pcmcia_device *p_dev,
+ cistpl_cftable_entry_t *cfg,
+ cistpl_cftable_entry_t *dflt,
+ unsigned int vcc,
+ void *priv_data);
+ cisparse_t parse;
+ cistpl_cftable_entry_t dflt;
+};
+
+/**
+ * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
+ *
+ * pcmcia_do_loop_config() is the internal callback for the call from
+ * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred
+ * by a struct pcmcia_cfg_mem.
+ */
+static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv)
+{
+ cistpl_cftable_entry_t *cfg = &parse->cftable_entry;
+ struct pcmcia_cfg_mem *cfg_mem = priv;
+
+ /* default values */
+ cfg_mem->p_dev->conf.ConfigIndex = cfg->index;
+ if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
+ cfg_mem->dflt = *cfg;
+
+ return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt,
+ cfg_mem->p_dev->socket->socket.Vcc,
+ cfg_mem->priv_data);
+}
+
+/**
+ * pcmcia_loop_config() - loop over configuration options
+ * @p_dev: the struct pcmcia_device which we need to loop for.
+ * @conf_check: function to call for each configuration option.
+ * It gets passed the struct pcmcia_device, the CIS data
+ * describing the configuration option, and private data
+ * being passed to pcmcia_loop_config()
+ * @priv_data: private data to be passed to the conf_check function.
+ *
+ * pcmcia_loop_config() loops over all configuration options, and calls
+ * the driver-specific conf_check() for each one, checking whether
+ * it is a valid one. Returns 0 on success or errorcode otherwise.
+ */
+int pcmcia_loop_config(struct pcmcia_device *p_dev,
+ int (*conf_check) (struct pcmcia_device *p_dev,
+ cistpl_cftable_entry_t *cfg,
+ cistpl_cftable_entry_t *dflt,
+ unsigned int vcc,
+ void *priv_data),
+ void *priv_data)
+{
+ struct pcmcia_cfg_mem *cfg_mem;
+ int ret;
+
+ cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
+ if (cfg_mem == NULL)
+ return -ENOMEM;
+
+ cfg_mem->p_dev = p_dev;
+ cfg_mem->conf_check = conf_check;
+ cfg_mem->priv_data = priv_data;
+
+ ret = pccard_loop_tuple(p_dev->socket, p_dev->func,
+ CISTPL_CFTABLE_ENTRY, &cfg_mem->parse,
+ cfg_mem, pcmcia_do_loop_config);
+
+ kfree(cfg_mem);
+ return ret;
+}
+EXPORT_SYMBOL(pcmcia_loop_config);
+
+
+struct pcmcia_loop_mem {
+ struct pcmcia_device *p_dev;
+ void *priv_data;
+ int (*loop_tuple) (struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data);
+};
+
+/**
+ * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
+ *
+ * pcmcia_do_loop_tuple() is the internal callback for the call from
+ * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred
+ * by a struct pcmcia_cfg_mem.
+ */
+static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv)
+{
+ struct pcmcia_loop_mem *loop = priv;
+
+ return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data);
+};
+
+/**
+ * pcmcia_loop_tuple() - loop over tuples in the CIS
+ * @p_dev: the struct pcmcia_device which we need to loop for.
+ * @code: which CIS code shall we look for?
+ * @priv_data: private data to be passed to the loop_tuple function.
+ * @loop_tuple: function to call for each CIS entry of type @function. IT
+ * gets passed the raw tuple and @priv_data.
+ *
+ * pcmcia_loop_tuple() loops over all CIS entries of type @function, and
+ * calls the @loop_tuple function for each entry. If the call to @loop_tuple
+ * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
+ */
+int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
+ int (*loop_tuple) (struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data),
+ void *priv_data)
+{
+ struct pcmcia_loop_mem loop = {
+ .p_dev = p_dev,
+ .loop_tuple = loop_tuple,
+ .priv_data = priv_data};
+
+ return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
+ &loop, pcmcia_do_loop_tuple);
+}
+EXPORT_SYMBOL(pcmcia_loop_tuple);
+
+
+struct pcmcia_loop_get {
+ size_t len;
+ cisdata_t **buf;
+};
+
+/**
+ * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
+ *
+ * pcmcia_do_get_tuple() is the internal callback for the call from
+ * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in
+ * the first tuple, return 0 unconditionally. Create a memory buffer large
+ * enough to hold the content of the tuple, and fill it with the tuple data.
+ * The caller is responsible to free the buffer.
+ */
+static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
+ void *priv)
+{
+ struct pcmcia_loop_get *get = priv;
+
+ *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL);
+ if (*get->buf) {
+ get->len = tuple->TupleDataLen;
+ memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen);
+ } else
+ dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
+ return 0;
+}
+
+/**
+ * pcmcia_get_tuple() - get first tuple from CIS
+ * @p_dev: the struct pcmcia_device which we need to loop for.
+ * @code: which CIS code shall we look for?
+ * @buf: pointer to store the buffer to.
+ *
+ * pcmcia_get_tuple() gets the content of the first CIS entry of type @code.
+ * It returns the buffer length (or zero). The caller is responsible to free
+ * the buffer passed in @buf.
+ */
+size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
+ unsigned char **buf)
+{
+ struct pcmcia_loop_get get = {
+ .len = 0,
+ .buf = buf,
+ };
+
+ *get.buf = NULL;
+ pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
+
+ return get.len;
+}
+EXPORT_SYMBOL(pcmcia_get_tuple);
+
+
+/**
+ * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
+ *
+ * pcmcia_do_get_mac() is the internal callback for the call from
+ * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the
+ * tuple contains a proper LAN_NODE_ID of length 6, and copy the data
+ * to struct net_device->dev_addr[i].
+ */
+static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
+ void *priv)
+{
+ struct net_device *dev = priv;
+ int i;
+
+ if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
+ return -EINVAL;
+ if (tuple->TupleDataLen < ETH_ALEN + 2) {
+ dev_warn(&p_dev->dev, "Invalid CIS tuple length for "
+ "LAN_NODE_ID\n");
+ return -EINVAL;
+ }
+
+ if (tuple->TupleData[1] != ETH_ALEN) {
+ dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = tuple->TupleData[i+2];
+ return 0;
+}
+
+/**
+ * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
+ * @p_dev: the struct pcmcia_device for which we want the address.
+ * @dev: a properly prepared struct net_device to store the info to.
+ *
+ * pcmcia_get_mac_from_cis() reads out the hardware MAC address from
+ * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which
+ * must be set up properly by the driver (see examples!).
+ */
+int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
+{
+ return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
+}
+EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
+
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 7631faa0cadd..ef0c5f133691 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -301,7 +301,9 @@ static int pccard_get_status(struct pcmcia_socket *s,
(c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
u_char reg;
if (c->CardValues & PRESENT_PIN_REPLACE) {
+ mutex_lock(&s->ops_mutex);
pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
+ mutex_unlock(&s->ops_mutex);
status->CardState |=
(reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
status->CardState |=
@@ -315,7 +317,9 @@ static int pccard_get_status(struct pcmcia_socket *s,
status->CardState |= CS_EVENT_READY_CHANGE;
}
if (c->CardValues & PRESENT_EXT_STATUS) {
+ mutex_lock(&s->ops_mutex);
pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
+ mutex_unlock(&s->ops_mutex);
status->CardState |=
(reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
}
@@ -351,7 +355,7 @@ static int pccard_get_configuration_info(struct pcmcia_socket *s,
if (s->state & SOCKET_CARDBUS_CONFIG) {
config->Attributes = CONF_VALID_CLIENT;
config->IntType = INT_CARDBUS;
- config->AssignedIRQ = s->irq.AssignedIRQ;
+ config->AssignedIRQ = s->pcmcia_irq;
if (config->AssignedIRQ)
config->Attributes |= CONF_ENABLE_IRQ;
if (s->io[0].res) {
@@ -391,7 +395,7 @@ static int pccard_get_configuration_info(struct pcmcia_socket *s,
config->ExtStatus = c->ExtStatus;
config->Present = config->CardValues = c->CardValues;
config->IRQAttributes = c->irq.Attributes;
- config->AssignedIRQ = s->irq.AssignedIRQ;
+ config->AssignedIRQ = s->pcmcia_irq;
config->BasePort1 = c->io.BasePort1;
config->NumPorts1 = c->io.NumPorts1;
config->Attributes1 = c->io.Attributes1;
@@ -571,7 +575,6 @@ static struct pci_bus *pcmcia_lookup_bus(struct pcmcia_socket *s)
static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int first)
{
- dev_node_t *node;
struct pcmcia_device *p_dev;
struct pcmcia_driver *p_drv;
int ret = 0;
@@ -633,21 +636,13 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
goto err_put;
}
- if (first)
- node = p_dev->dev_node;
- else
- for (node = p_dev->dev_node; node; node = node->next)
- if (node == bind_info->next)
- break;
- if (!node) {
+ if (!first) {
ret = -ENODEV;
goto err_put;
}
- strlcpy(bind_info->name, node->dev_name, DEV_NAME_LEN);
- bind_info->major = node->major;
- bind_info->minor = node->minor;
- bind_info->next = node->next;
+ strlcpy(bind_info->name, dev_name(&p_dev->dev), DEV_NAME_LEN);
+ bind_info->next = NULL;
err_put:
pcmcia_put_dev(p_dev);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 7c3d03bb4f30..29f91fac1dff 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -23,6 +23,8 @@
#include <linux/netdevice.h>
#include <linux/slab.h>
+#include <asm/irq.h>
+
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
@@ -38,29 +40,6 @@ static int io_speed;
module_param(io_speed, int, 0444);
-#ifdef CONFIG_PCMCIA_PROBE
-#include <asm/irq.h>
-/* mask of IRQs already reserved by other cards, we should avoid using them */
-static u8 pcmcia_used_irq[NR_IRQS];
-#endif
-
-static int pcmcia_adjust_io_region(struct resource *res, unsigned long start,
- unsigned long end, struct pcmcia_socket *s)
-{
- if (s->resource_ops->adjust_io_region)
- return s->resource_ops->adjust_io_region(res, start, end, s);
- return -ENOMEM;
-}
-
-static struct resource *pcmcia_find_io_region(unsigned long base, int num,
- unsigned long align,
- struct pcmcia_socket *s)
-{
- if (s->resource_ops->find_io)
- return s->resource_ops->find_io(base, num, align, s);
- return NULL;
-}
-
int pcmcia_validate_mem(struct pcmcia_socket *s)
{
if (s->resource_ops->validate_mem)
@@ -86,8 +65,7 @@ struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
unsigned int *base, unsigned int num, u_int lines)
{
- int i;
- unsigned int try, align;
+ unsigned int align;
align = (*base) ? (lines ? 1<<lines : 0) : 1;
if (align && (align < num)) {
@@ -104,50 +82,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
*base, align);
align = 0;
}
- if ((s->features & SS_CAP_STATIC_MAP) && s->io_offset) {
- *base = s->io_offset | (*base & 0x0fff);
- return 0;
- }
- /* Check for an already-allocated window that must conflict with
- * what was asked for. It is a hack because it does not catch all
- * potential conflicts, just the most obvious ones.
- */
- for (i = 0; i < MAX_IO_WIN; i++)
- if ((s->io[i].res) && *base &&
- ((s->io[i].res->start & (align-1)) == *base))
- return 1;
- for (i = 0; i < MAX_IO_WIN; i++) {
- if (!s->io[i].res) {
- s->io[i].res = pcmcia_find_io_region(*base, num, align, s);
- if (s->io[i].res) {
- *base = s->io[i].res->start;
- s->io[i].res->flags = (s->io[i].res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS);
- s->io[i].InUse = num;
- break;
- } else
- return 1;
- } else if ((s->io[i].res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS))
- continue;
- /* Try to extend top of window */
- try = s->io[i].res->end + 1;
- if ((*base == 0) || (*base == try))
- if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start,
- s->io[i].res->end + num, s) == 0) {
- *base = try;
- s->io[i].InUse += num;
- break;
- }
- /* Try to extend bottom of window */
- try = s->io[i].res->start - num;
- if ((*base == 0) || (*base == try))
- if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start - num,
- s->io[i].res->end, s) == 0) {
- *base = try;
- s->io[i].InUse += num;
- break;
- }
- }
- return (i == MAX_IO_WIN);
+
+ return s->resource_ops->find_io(s, attr, base, num, align);
} /* alloc_io_space */
@@ -187,6 +123,7 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
config_t *c;
int addr;
u_char val;
+ int ret = 0;
if (!p_dev || !p_dev->function_config)
return -EINVAL;
@@ -203,11 +140,10 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
}
addr = (c->ConfigBase + reg->Offset) >> 1;
- mutex_unlock(&s->ops_mutex);
switch (reg->Action) {
case CS_READ:
- pcmcia_read_cis_mem(s, 1, addr, 1, &val);
+ ret = pcmcia_read_cis_mem(s, 1, addr, 1, &val);
reg->Value = val;
break;
case CS_WRITE:
@@ -216,10 +152,11 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
break;
default:
dev_dbg(&s->dev, "Invalid conf register request\n");
- return -EINVAL;
+ ret = -EINVAL;
break;
}
- return 0;
+ mutex_unlock(&s->ops_mutex);
+ return ret;
} /* pcmcia_access_configuration_register */
EXPORT_SYMBOL(pcmcia_access_configuration_register);
@@ -275,19 +212,9 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
goto unlock;
}
- if (mod->Attributes & CONF_IRQ_CHANGE_VALID) {
- if (mod->Attributes & CONF_ENABLE_IRQ) {
- c->Attributes |= CONF_ENABLE_IRQ;
- s->socket.io_irq = s->irq.AssignedIRQ;
- } else {
- c->Attributes &= ~CONF_ENABLE_IRQ;
- s->socket.io_irq = 0;
- }
- s->ops->set_socket(s, &s->socket);
- }
-
- if (mod->Attributes & CONF_VCC_CHANGE_VALID) {
- dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
+ if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
+ dev_dbg(&s->dev,
+ "changing Vcc or IRQ is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
}
@@ -422,52 +349,6 @@ out:
} /* pcmcia_release_io */
-static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
-{
- struct pcmcia_socket *s = p_dev->socket;
- config_t *c;
- int ret = -EINVAL;
-
- mutex_lock(&s->ops_mutex);
-
- c = p_dev->function_config;
-
- if (!p_dev->_irq)
- goto out;
-
- p_dev->_irq = 0;
-
- if (c->state & CONFIG_LOCKED)
- goto out;
-
- if (c->irq.Attributes != req->Attributes) {
- dev_dbg(&s->dev, "IRQ attributes must match assigned ones\n");
- goto out;
- }
- if (s->irq.AssignedIRQ != req->AssignedIRQ) {
- dev_dbg(&s->dev, "IRQ must match assigned one\n");
- goto out;
- }
- if (--s->irq.Config == 0) {
- c->state &= ~CONFIG_IRQ_REQ;
- s->irq.AssignedIRQ = 0;
- }
-
- if (req->Handler)
- free_irq(req->AssignedIRQ, p_dev->priv);
-
-#ifdef CONFIG_PCMCIA_PROBE
- pcmcia_used_irq[req->AssignedIRQ]--;
-#endif
- ret = 0;
-
-out:
- mutex_unlock(&s->ops_mutex);
-
- return ret;
-} /* pcmcia_release_irq */
-
-
int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
{
struct pcmcia_socket *s = p_dev->socket;
@@ -551,12 +432,11 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
if (req->Attributes & CONF_ENABLE_SPKR)
s->socket.flags |= SS_SPKR_ENA;
if (req->Attributes & CONF_ENABLE_IRQ)
- s->socket.io_irq = s->irq.AssignedIRQ;
+ s->socket.io_irq = s->pcmcia_irq;
else
s->socket.io_irq = 0;
s->ops->set_socket(s, &s->socket);
s->lock_count++;
- mutex_unlock(&s->ops_mutex);
/* Set up CIS configuration registers */
base = c->ConfigBase = req->ConfigBase;
@@ -574,9 +454,9 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
if (req->Present & PRESENT_IOBASE_0)
c->Option |= COR_ADDR_DECODE;
}
- if (c->state & CONFIG_IRQ_REQ)
- if (!(c->irq.Attributes & IRQ_FORCED_PULSE))
- c->Option |= COR_LEVEL_REQ;
+ if ((req->Attributes & CONF_ENABLE_IRQ) &&
+ !(req->Attributes & CONF_ENABLE_PULSE_IRQ))
+ c->Option |= COR_LEVEL_REQ;
pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &c->Option);
mdelay(40);
}
@@ -605,7 +485,6 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
/* Configure I/O windows */
if (c->state & CONFIG_IO_REQ) {
- mutex_lock(&s->ops_mutex);
iomap.speed = io_speed;
for (i = 0; i < MAX_IO_WIN; i++)
if (s->io[i].res) {
@@ -624,11 +503,11 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
s->ops->set_io_map(s, &iomap);
s->io[i].Config++;
}
- mutex_unlock(&s->ops_mutex);
}
c->state |= CONFIG_LOCKED;
p_dev->_locked = 1;
+ mutex_unlock(&s->ops_mutex);
return 0;
} /* pcmcia_request_configuration */
EXPORT_SYMBOL(pcmcia_request_configuration);
@@ -706,137 +585,176 @@ out:
EXPORT_SYMBOL(pcmcia_request_io);
-/** pcmcia_request_irq
+/**
+ * pcmcia_request_irq() - attempt to request a IRQ for a PCMCIA device
*
- * Request_irq() reserves an irq for this client.
+ * pcmcia_request_irq() is a wrapper around request_irq which will allow
+ * the PCMCIA core to clean up the registration in pcmcia_disable_device().
+ * Drivers are free to use request_irq() directly, but then they need to
+ * call free_irq themselfves, too. Also, only IRQF_SHARED capable IRQ
+ * handlers are allowed.
+ */
+int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
+ irq_handler_t handler)
+{
+ int ret;
+
+ if (!p_dev->irq)
+ return -EINVAL;
+
+ ret = request_irq(p_dev->irq, handler, IRQF_SHARED,
+ p_dev->devname, p_dev->priv);
+ if (!ret)
+ p_dev->_irq = 1;
+
+ return ret;
+}
+EXPORT_SYMBOL(pcmcia_request_irq);
+
+
+/**
+ * pcmcia_request_exclusive_irq() - attempt to request an exclusive IRQ first
*
- * Also, since Linux only reserves irq's when they are actually
- * hooked, we don't guarantee that an irq will still be available
- * when the configuration is locked. Now that I think about it,
- * there might be a way to fix this using a dummy handler.
+ * pcmcia_request_exclusive_irq() is a wrapper around request_irq which
+ * attempts first to request an exclusive IRQ. If it fails, it also accepts
+ * a shared IRQ, but prints out a warning. PCMCIA drivers should allow for
+ * IRQ sharing and either use request_irq directly (then they need to call
+ * free_irq themselves, too), or the pcmcia_request_irq() function.
*/
+int __must_check
+__pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
+ irq_handler_t handler)
+{
+ int ret;
+
+ if (!p_dev->irq)
+ return -EINVAL;
+
+ ret = request_irq(p_dev->irq, handler, 0, p_dev->devname, p_dev->priv);
+ if (ret) {
+ ret = pcmcia_request_irq(p_dev, handler);
+ dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: "
+ "request for exclusive IRQ could not be fulfilled.\n");
+ dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: the driver "
+ "needs updating to supported shared IRQ lines.\n");
+ }
+ if (ret)
+ dev_printk(KERN_INFO, &p_dev->dev, "request_irq() failed\n");
+ else
+ p_dev->_irq = 1;
+
+ return ret;
+} /* pcmcia_request_exclusive_irq */
+EXPORT_SYMBOL(__pcmcia_request_exclusive_irq);
+
#ifdef CONFIG_PCMCIA_PROBE
+
+/* mask of IRQs already reserved by other cards, we should avoid using them */
+static u8 pcmcia_used_irq[NR_IRQS];
+
static irqreturn_t test_action(int cpl, void *dev_id)
{
return IRQ_NONE;
}
-#endif
-int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
+/**
+ * pcmcia_setup_isa_irq() - determine whether an ISA IRQ can be used
+ * @p_dev - the associated PCMCIA device
+ *
+ * locking note: must be called with ops_mutex locked.
+ */
+static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
{
struct pcmcia_socket *s = p_dev->socket;
- config_t *c;
- int ret = -EINVAL, irq = 0;
- int type;
+ unsigned int try, irq;
+ u32 mask = s->irq_mask;
+ int ret = -ENODEV;
- mutex_lock(&s->ops_mutex);
+ for (try = 0; try < 64; try++) {
+ irq = try % 32;
- if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
- goto out;
- }
- c = p_dev->function_config;
- if (c->state & CONFIG_LOCKED) {
- dev_dbg(&s->dev, "Configuration is locked\n");
- goto out;
- }
- if (c->state & CONFIG_IRQ_REQ) {
- dev_dbg(&s->dev, "IRQ already configured\n");
- goto out;
+ /* marked as available by driver, not blocked by userspace? */
+ if (!((mask >> irq) & 1))
+ continue;
+
+ /* avoid an IRQ which is already used by another PCMCIA card */
+ if ((try < 32) && pcmcia_used_irq[irq])
+ continue;
+
+ /* register the correct driver, if possible, to check whether
+ * registering a dummy handle works, i.e. if the IRQ isn't
+ * marked as used by the kernel resource management core */
+ ret = request_irq(irq, test_action, type, p_dev->devname,
+ p_dev);
+ if (!ret) {
+ free_irq(irq, p_dev);
+ p_dev->irq = s->pcmcia_irq = irq;
+ pcmcia_used_irq[irq]++;
+ break;
+ }
}
- /* Decide what type of interrupt we are registering */
- type = 0;
- if (s->functions > 1) /* All of this ought to be handled higher up */
- type = IRQF_SHARED;
- else if (req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)
- type = IRQF_SHARED;
- else
- printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
+ return ret;
+}
- /* If the interrupt is already assigned, it must be the same */
- if (s->irq.AssignedIRQ != 0)
- irq = s->irq.AssignedIRQ;
+void pcmcia_cleanup_irq(struct pcmcia_socket *s)
+{
+ pcmcia_used_irq[s->pcmcia_irq]--;
+ s->pcmcia_irq = 0;
+}
-#ifdef CONFIG_PCMCIA_PROBE
- if (!irq) {
- int try;
- u32 mask = s->irq_mask;
- void *data = p_dev; /* something unique to this device */
+#else /* CONFIG_PCMCIA_PROBE */
- for (try = 0; try < 64; try++) {
- irq = try % 32;
+static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
+{
+ return -EINVAL;
+}
- /* marked as available by driver, and not blocked by userspace? */
- if (!((mask >> irq) & 1))
- continue;
+void pcmcia_cleanup_irq(struct pcmcia_socket *s)
+{
+ s->pcmcia_irq = 0;
+ return;
+}
- /* avoid an IRQ which is already used by a PCMCIA card */
- if ((try < 32) && pcmcia_used_irq[irq])
- continue;
+#endif /* CONFIG_PCMCIA_PROBE */
- /* register the correct driver, if possible, of check whether
- * registering a dummy handle works, i.e. if the IRQ isn't
- * marked as used by the kernel resource management core */
- ret = request_irq(irq,
- (req->Handler) ? req->Handler : test_action,
- type,
- p_dev->devname,
- (req->Handler) ? p_dev->priv : data);
- if (!ret) {
- if (!req->Handler)
- free_irq(irq, data);
- break;
- }
- }
- }
-#endif
- /* only assign PCI irq if no IRQ already assigned */
- if (ret && !s->irq.AssignedIRQ) {
- if (!s->pci_irq) {
- dev_printk(KERN_INFO, &s->dev, "no IRQ found\n");
- goto out;
- }
- type = IRQF_SHARED;
- irq = s->pci_irq;
- }
- if (ret && req->Handler) {
- ret = request_irq(irq, req->Handler, type,
- p_dev->devname, p_dev->priv);
- if (ret) {
- dev_printk(KERN_INFO, &s->dev,
- "request_irq() failed\n");
- goto out;
- }
- }
+/**
+ * pcmcia_setup_irq() - determine IRQ to be used for device
+ * @p_dev - the associated PCMCIA device
+ *
+ * locking note: must be called with ops_mutex locked.
+ */
+int pcmcia_setup_irq(struct pcmcia_device *p_dev)
+{
+ struct pcmcia_socket *s = p_dev->socket;
- /* Make sure the fact the request type was overridden is passed back */
- if (type == IRQF_SHARED && !(req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)) {
- req->Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
- dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: "
- "request for exclusive IRQ could not be fulfilled.\n");
- dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: the driver "
- "needs updating to supported shared IRQ lines.\n");
+ if (p_dev->irq)
+ return 0;
+
+ /* already assigned? */
+ if (s->pcmcia_irq) {
+ p_dev->irq = s->pcmcia_irq;
+ return 0;
}
- c->irq.Attributes = req->Attributes;
- s->irq.AssignedIRQ = req->AssignedIRQ = irq;
- s->irq.Config++;
- c->state |= CONFIG_IRQ_REQ;
- p_dev->_irq = 1;
+ /* prefer an exclusive ISA irq */
+ if (!pcmcia_setup_isa_irq(p_dev, 0))
+ return 0;
-#ifdef CONFIG_PCMCIA_PROBE
- pcmcia_used_irq[irq]++;
-#endif
+ /* but accept a shared ISA irq */
+ if (!pcmcia_setup_isa_irq(p_dev, IRQF_SHARED))
+ return 0;
- ret = 0;
-out:
- mutex_unlock(&s->ops_mutex);
- return ret;
-} /* pcmcia_request_irq */
-EXPORT_SYMBOL(pcmcia_request_irq);
+ /* but use the PCI irq otherwise */
+ if (s->pci_irq) {
+ p_dev->irq = s->pcmcia_irq = s->pci_irq;
+ return 0;
+ }
+
+ return -EINVAL;
+}
/** pcmcia_request_window
@@ -939,237 +857,9 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev)
{
pcmcia_release_configuration(p_dev);
pcmcia_release_io(p_dev, &p_dev->io);
- pcmcia_release_irq(p_dev, &p_dev->irq);
+ if (p_dev->_irq)
+ free_irq(p_dev->irq, p_dev->priv);
if (p_dev->win)
pcmcia_release_window(p_dev, p_dev->win);
}
EXPORT_SYMBOL(pcmcia_disable_device);
-
-
-struct pcmcia_cfg_mem {
- struct pcmcia_device *p_dev;
- void *priv_data;
- int (*conf_check) (struct pcmcia_device *p_dev,
- cistpl_cftable_entry_t *cfg,
- cistpl_cftable_entry_t *dflt,
- unsigned int vcc,
- void *priv_data);
- cisparse_t parse;
- cistpl_cftable_entry_t dflt;
-};
-
-/**
- * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
- *
- * pcmcia_do_loop_config() is the internal callback for the call from
- * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred
- * by a struct pcmcia_cfg_mem.
- */
-static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv)
-{
- cistpl_cftable_entry_t *cfg = &parse->cftable_entry;
- struct pcmcia_cfg_mem *cfg_mem = priv;
-
- /* default values */
- cfg_mem->p_dev->conf.ConfigIndex = cfg->index;
- if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
- cfg_mem->dflt = *cfg;
-
- return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt,
- cfg_mem->p_dev->socket->socket.Vcc,
- cfg_mem->priv_data);
-}
-
-/**
- * pcmcia_loop_config() - loop over configuration options
- * @p_dev: the struct pcmcia_device which we need to loop for.
- * @conf_check: function to call for each configuration option.
- * It gets passed the struct pcmcia_device, the CIS data
- * describing the configuration option, and private data
- * being passed to pcmcia_loop_config()
- * @priv_data: private data to be passed to the conf_check function.
- *
- * pcmcia_loop_config() loops over all configuration options, and calls
- * the driver-specific conf_check() for each one, checking whether
- * it is a valid one. Returns 0 on success or errorcode otherwise.
- */
-int pcmcia_loop_config(struct pcmcia_device *p_dev,
- int (*conf_check) (struct pcmcia_device *p_dev,
- cistpl_cftable_entry_t *cfg,
- cistpl_cftable_entry_t *dflt,
- unsigned int vcc,
- void *priv_data),
- void *priv_data)
-{
- struct pcmcia_cfg_mem *cfg_mem;
- int ret;
-
- cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
- if (cfg_mem == NULL)
- return -ENOMEM;
-
- cfg_mem->p_dev = p_dev;
- cfg_mem->conf_check = conf_check;
- cfg_mem->priv_data = priv_data;
-
- ret = pccard_loop_tuple(p_dev->socket, p_dev->func,
- CISTPL_CFTABLE_ENTRY, &cfg_mem->parse,
- cfg_mem, pcmcia_do_loop_config);
-
- kfree(cfg_mem);
- return ret;
-}
-EXPORT_SYMBOL(pcmcia_loop_config);
-
-
-struct pcmcia_loop_mem {
- struct pcmcia_device *p_dev;
- void *priv_data;
- int (*loop_tuple) (struct pcmcia_device *p_dev,
- tuple_t *tuple,
- void *priv_data);
-};
-
-/**
- * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
- *
- * pcmcia_do_loop_tuple() is the internal callback for the call from
- * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred
- * by a struct pcmcia_cfg_mem.
- */
-static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv)
-{
- struct pcmcia_loop_mem *loop = priv;
-
- return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data);
-};
-
-/**
- * pcmcia_loop_tuple() - loop over tuples in the CIS
- * @p_dev: the struct pcmcia_device which we need to loop for.
- * @code: which CIS code shall we look for?
- * @priv_data: private data to be passed to the loop_tuple function.
- * @loop_tuple: function to call for each CIS entry of type @function. IT
- * gets passed the raw tuple and @priv_data.
- *
- * pcmcia_loop_tuple() loops over all CIS entries of type @function, and
- * calls the @loop_tuple function for each entry. If the call to @loop_tuple
- * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
- */
-int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
- int (*loop_tuple) (struct pcmcia_device *p_dev,
- tuple_t *tuple,
- void *priv_data),
- void *priv_data)
-{
- struct pcmcia_loop_mem loop = {
- .p_dev = p_dev,
- .loop_tuple = loop_tuple,
- .priv_data = priv_data};
-
- return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
- &loop, pcmcia_do_loop_tuple);
-}
-EXPORT_SYMBOL(pcmcia_loop_tuple);
-
-
-struct pcmcia_loop_get {
- size_t len;
- cisdata_t **buf;
-};
-
-/**
- * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
- *
- * pcmcia_do_get_tuple() is the internal callback for the call from
- * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in
- * the first tuple, return 0 unconditionally. Create a memory buffer large
- * enough to hold the content of the tuple, and fill it with the tuple data.
- * The caller is responsible to free the buffer.
- */
-static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
- void *priv)
-{
- struct pcmcia_loop_get *get = priv;
-
- *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL);
- if (*get->buf) {
- get->len = tuple->TupleDataLen;
- memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen);
- } else
- dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
- return 0;
-}
-
-/**
- * pcmcia_get_tuple() - get first tuple from CIS
- * @p_dev: the struct pcmcia_device which we need to loop for.
- * @code: which CIS code shall we look for?
- * @buf: pointer to store the buffer to.
- *
- * pcmcia_get_tuple() gets the content of the first CIS entry of type @code.
- * It returns the buffer length (or zero). The caller is responsible to free
- * the buffer passed in @buf.
- */
-size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
- unsigned char **buf)
-{
- struct pcmcia_loop_get get = {
- .len = 0,
- .buf = buf,
- };
-
- *get.buf = NULL;
- pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
-
- return get.len;
-}
-EXPORT_SYMBOL(pcmcia_get_tuple);
-
-
-/**
- * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
- *
- * pcmcia_do_get_mac() is the internal callback for the call from
- * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the
- * tuple contains a proper LAN_NODE_ID of length 6, and copy the data
- * to struct net_device->dev_addr[i].
- */
-static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
- void *priv)
-{
- struct net_device *dev = priv;
- int i;
-
- if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
- return -EINVAL;
- if (tuple->TupleDataLen < ETH_ALEN + 2) {
- dev_warn(&p_dev->dev, "Invalid CIS tuple length for "
- "LAN_NODE_ID\n");
- return -EINVAL;
- }
-
- if (tuple->TupleData[1] != ETH_ALEN) {
- dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
- return -EINVAL;
- }
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
- return 0;
-}
-
-/**
- * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
- * @p_dev: the struct pcmcia_device for which we want the address.
- * @dev: a properly prepared struct net_device to store the info to.
- *
- * pcmcia_get_mac_from_cis() reads out the hardware MAC address from
- * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which
- * must be set up properly by the driver (see examples!).
- */
-int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
-{
- return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
-}
-EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
-
diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c
new file mode 100644
index 000000000000..d0bf35021065
--- /dev/null
+++ b/drivers/pcmcia/rsrc_iodyn.c
@@ -0,0 +1,172 @@
+/*
+ * rsrc_iodyn.c -- Resource management routines for MEM-static sockets.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * (C) 1999 David A. Hinds
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+
+struct pcmcia_align_data {
+ unsigned long mask;
+ unsigned long offset;
+};
+
+static resource_size_t pcmcia_align(void *align_data,
+ const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pcmcia_align_data *data = align_data;
+ resource_size_t start;
+
+ start = (res->start & ~data->mask) + data->offset;
+ if (start < res->start)
+ start += data->mask + 1;
+
+#ifdef CONFIG_X86
+ if (res->flags & IORESOURCE_IO) {
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+ }
+#endif
+
+#ifdef CONFIG_M68K
+ if (res->flags & IORESOURCE_IO) {
+ if ((res->start + size - 1) >= 1024)
+ start = res->end;
+ }
+#endif
+
+ return start;
+}
+
+
+static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s,
+ unsigned long base, int num,
+ unsigned long align)
+{
+ struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
+ dev_name(&s->dev));
+ struct pcmcia_align_data data;
+ unsigned long min = base;
+ int ret;
+
+ data.mask = align - 1;
+ data.offset = base & data.mask;
+
+#ifdef CONFIG_PCI
+ if (s->cb_dev) {
+ ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
+ min, 0, pcmcia_align, &data);
+ } else
+#endif
+ ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
+ 1, pcmcia_align, &data);
+
+ if (ret != 0) {
+ kfree(res);
+ res = NULL;
+ }
+ return res;
+}
+
+static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
+ unsigned int *base, unsigned int num,
+ unsigned int align)
+{
+ int i, ret = 0;
+
+ /* Check for an already-allocated window that must conflict with
+ * what was asked for. It is a hack because it does not catch all
+ * potential conflicts, just the most obvious ones.
+ */
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (!s->io[i].res)
+ continue;
+
+ if (!*base)
+ continue;
+
+ if ((s->io[i].res->start & (align-1)) == *base)
+ return -EBUSY;
+ }
+
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ struct resource *res = s->io[i].res;
+ unsigned int try;
+
+ if (res && (res->flags & IORESOURCE_BITS) !=
+ (attr & IORESOURCE_BITS))
+ continue;
+
+ if (!res) {
+ if (align == 0)
+ align = 0x10000;
+
+ res = s->io[i].res = __iodyn_find_io_region(s, *base,
+ num, align);
+ if (!res)
+ return -EINVAL;
+
+ *base = res->start;
+ s->io[i].res->flags =
+ ((res->flags & ~IORESOURCE_BITS) |
+ (attr & IORESOURCE_BITS));
+ s->io[i].InUse = num;
+ return 0;
+ }
+
+ /* Try to extend top of window */
+ try = res->end + 1;
+ if ((*base == 0) || (*base == try)) {
+ if (adjust_resource(s->io[i].res, res->start,
+ res->end - res->start + num + 1))
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ return 0;
+ }
+
+ /* Try to extend bottom of window */
+ try = res->start - num;
+ if ((*base == 0) || (*base == try)) {
+ if (adjust_resource(s->io[i].res,
+ res->start - num,
+ res->end - res->start + num + 1))
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+
+struct pccard_resource_ops pccard_iodyn_ops = {
+ .validate_mem = NULL,
+ .find_io = iodyn_find_io,
+ .find_mem = NULL,
+ .add_io = NULL,
+ .add_mem = NULL,
+ .init = static_init,
+ .exit = NULL,
+};
+EXPORT_SYMBOL(pccard_iodyn_ops);
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index ffa5f3cae57b..142efac3c387 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -22,7 +22,7 @@
#include <pcmcia/cistpl.h>
#include "cs_internal.h"
-static int static_init(struct pcmcia_socket *s)
+int static_init(struct pcmcia_socket *s)
{
/* the good thing about SS_CAP_STATIC_MAP sockets is
* that they don't need a resource database */
@@ -32,118 +32,44 @@ static int static_init(struct pcmcia_socket *s)
return 0;
}
-
-struct pccard_resource_ops pccard_static_ops = {
- .validate_mem = NULL,
- .adjust_io_region = NULL,
- .find_io = NULL,
- .find_mem = NULL,
- .add_io = NULL,
- .add_mem = NULL,
- .init = static_init,
- .exit = NULL,
-};
-EXPORT_SYMBOL(pccard_static_ops);
-
-
-#ifdef CONFIG_PCCARD_IODYN
-
-static struct resource *
-make_resource(unsigned long b, unsigned long n, int flags, char *name)
+struct resource *pcmcia_make_resource(unsigned long start, unsigned long end,
+ int flags, const char *name)
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
if (res) {
res->name = name;
- res->start = b;
- res->end = b + n - 1;
+ res->start = start;
+ res->end = start + end - 1;
res->flags = flags;
}
return res;
}
-struct pcmcia_align_data {
- unsigned long mask;
- unsigned long offset;
-};
-
-static resource_size_t pcmcia_align(void *align_data,
- const struct resource *res,
- resource_size_t size, resource_size_t align)
+static int static_find_io(struct pcmcia_socket *s, unsigned int attr,
+ unsigned int *base, unsigned int num,
+ unsigned int align)
{
- struct pcmcia_align_data *data = align_data;
- resource_size_t start;
+ if (!s->io_offset)
+ return -EINVAL;
+ *base = s->io_offset | (*base & 0x0fff);
- start = (res->start & ~data->mask) + data->offset;
- if (start < res->start)
- start += data->mask + 1;
-
-#ifdef CONFIG_X86
- if (res->flags & IORESOURCE_IO) {
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
- }
-#endif
-
-#ifdef CONFIG_M68K
- if (res->flags & IORESOURCE_IO) {
- if ((res->start + size - 1) >= 1024)
- start = res->end;
- }
-#endif
-
- return start;
-}
-
-
-static int iodyn_adjust_io_region(struct resource *res, unsigned long r_start,
- unsigned long r_end, struct pcmcia_socket *s)
-{
- return adjust_resource(res, r_start, r_end - r_start + 1);
+ return 0;
}
-static struct resource *iodyn_find_io_region(unsigned long base, int num,
- unsigned long align, struct pcmcia_socket *s)
-{
- struct resource *res = make_resource(0, num, IORESOURCE_IO,
- dev_name(&s->dev));
- struct pcmcia_align_data data;
- unsigned long min = base;
- int ret;
-
- if (align == 0)
- align = 0x10000;
-
- data.mask = align - 1;
- data.offset = base & data.mask;
-
-#ifdef CONFIG_PCI
- if (s->cb_dev) {
- ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
- min, 0, pcmcia_align, &data);
- } else
-#endif
- ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
- 1, pcmcia_align, &data);
-
- if (ret != 0) {
- kfree(res);
- res = NULL;
- }
- return res;
-}
-
-struct pccard_resource_ops pccard_iodyn_ops = {
+struct pccard_resource_ops pccard_static_ops = {
.validate_mem = NULL,
- .adjust_io_region = iodyn_adjust_io_region,
- .find_io = iodyn_find_io_region,
+ .find_io = static_find_io,
.find_mem = NULL,
.add_io = NULL,
.add_mem = NULL,
.init = static_init,
.exit = NULL,
};
-EXPORT_SYMBOL(pccard_iodyn_ops);
+EXPORT_SYMBOL(pccard_static_ops);
+
-#endif /* CONFIG_PCCARD_IODYN */
+MODULE_AUTHOR("David A. Hinds, Dominik Brodowski");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("rsrc_nonstatic");
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index a6eb7b59ba9f..dcd1a4ad3d63 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -34,8 +34,10 @@
#include <pcmcia/cistpl.h>
#include "cs_internal.h"
+/* moved to rsrc_mgr.c
MODULE_AUTHOR("David A. Hinds, Dominik Brodowski");
MODULE_LICENSE("GPL");
+*/
/* Parameters that can be set with 'insmod' */
@@ -70,27 +72,13 @@ struct socket_data {
======================================================================*/
static struct resource *
-make_resource(resource_size_t b, resource_size_t n, int flags, const char *name)
-{
- struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
-
- if (res) {
- res->name = name;
- res->start = b;
- res->end = b + n - 1;
- res->flags = flags;
- }
- return res;
-}
-
-static struct resource *
claim_region(struct pcmcia_socket *s, resource_size_t base,
resource_size_t size, int type, char *name)
{
struct resource *res, *parent;
parent = type & IORESOURCE_MEM ? &iomem_resource : &ioport_resource;
- res = make_resource(base, size, type | IORESOURCE_BUSY, name);
+ res = pcmcia_make_resource(base, size, type | IORESOURCE_BUSY, name);
if (res) {
#ifdef CONFIG_PCI
@@ -661,8 +649,9 @@ pcmcia_align(void *align_data, const struct resource *res,
* Adjust an existing IO region allocation, but making sure that we don't
* encroach outside the resources which the user supplied.
*/
-static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_start,
- unsigned long r_end, struct pcmcia_socket *s)
+static int __nonstatic_adjust_io_region(struct pcmcia_socket *s,
+ unsigned long r_start,
+ unsigned long r_end)
{
struct resource_map *m;
struct socket_data *s_data = s->resource_data;
@@ -675,8 +664,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
if (start > r_start || r_end > end)
continue;
- ret = adjust_resource(res, r_start, r_end - r_start + 1);
- break;
+ ret = 0;
}
return ret;
@@ -695,18 +683,17 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
======================================================================*/
-static struct resource *nonstatic_find_io_region(unsigned long base, int num,
- unsigned long align, struct pcmcia_socket *s)
+static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s,
+ unsigned long base, int num,
+ unsigned long align)
{
- struct resource *res = make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev));
+ struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
+ dev_name(&s->dev));
struct socket_data *s_data = s->resource_data;
struct pcmcia_align_data data;
unsigned long min = base;
int ret;
- if (align == 0)
- align = 0x10000;
-
data.mask = align - 1;
data.offset = base & data.mask;
data.map = &s_data->io_db;
@@ -727,10 +714,97 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
return res;
}
+static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
+ unsigned int *base, unsigned int num,
+ unsigned int align)
+{
+ int i, ret = 0;
+
+ /* Check for an already-allocated window that must conflict with
+ * what was asked for. It is a hack because it does not catch all
+ * potential conflicts, just the most obvious ones.
+ */
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (!s->io[i].res)
+ continue;
+
+ if (!*base)
+ continue;
+
+ if ((s->io[i].res->start & (align-1)) == *base)
+ return -EBUSY;
+ }
+
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ struct resource *res = s->io[i].res;
+ unsigned int try;
+
+ if (res && (res->flags & IORESOURCE_BITS) !=
+ (attr & IORESOURCE_BITS))
+ continue;
+
+ if (!res) {
+ if (align == 0)
+ align = 0x10000;
+
+ res = s->io[i].res = __nonstatic_find_io_region(s,
+ *base, num,
+ align);
+ if (!res)
+ return -EINVAL;
+
+ *base = res->start;
+ s->io[i].res->flags =
+ ((res->flags & ~IORESOURCE_BITS) |
+ (attr & IORESOURCE_BITS));
+ s->io[i].InUse = num;
+ return 0;
+ }
+
+ /* Try to extend top of window */
+ try = res->end + 1;
+ if ((*base == 0) || (*base == try)) {
+ ret = __nonstatic_adjust_io_region(s, res->start,
+ res->end + num);
+ if (!ret) {
+ ret = adjust_resource(s->io[i].res, res->start,
+ res->end - res->start + num + 1);
+ if (ret)
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ return 0;
+ }
+ }
+
+ /* Try to extend bottom of window */
+ try = res->start - num;
+ if ((*base == 0) || (*base == try)) {
+ ret = __nonstatic_adjust_io_region(s,
+ res->start - num,
+ res->end);
+ if (!ret) {
+ ret = adjust_resource(s->io[i].res,
+ res->start - num,
+ res->end - res->start + num + 1);
+ if (ret)
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+
static struct resource *nonstatic_find_mem_region(u_long base, u_long num,
u_long align, int low, struct pcmcia_socket *s)
{
- struct resource *res = make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev));
+ struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
+ dev_name(&s->dev));
struct socket_data *s_data = s->resource_data;
struct pcmcia_align_data data;
unsigned long min, max;
@@ -861,23 +935,42 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
return -ENODEV;
#if defined(CONFIG_X86)
- /* If this is the root bus, the risk of hitting
- * some strange system devices which aren't protected
- * by either ACPI resource tables or properly requested
- * resources is too big. Therefore, don't do auto-adding
- * of resources at the moment.
+ /* If this is the root bus, the risk of hitting some strange
+ * system devices is too high: If a driver isn't loaded, the
+ * resources are not claimed; even if a driver is loaded, it
+ * may not request all resources or even the wrong one. We
+ * can neither trust the rest of the kernel nor ACPI/PNP and
+ * CRS parsing to get it right. Therefore, use several
+ * safeguards:
+ *
+ * - Do not auto-add resources if the CardBus bridge is on
+ * the PCI root bus
+ *
+ * - Avoid any I/O ports < 0x100.
+ *
+ * - On PCI-PCI bridges, only use resources which are set up
+ * exclusively for the secondary PCI bus: the risk of hitting
+ * system devices is quite low, as they usually aren't
+ * connected to the secondary PCI bus.
*/
if (s->cb_dev->bus->number == 0)
return -EINVAL;
-#endif
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ res = s->cb_dev->bus->resource[i];
+#else
pci_bus_for_each_resource(s->cb_dev->bus, res, i) {
+#endif
if (!res)
continue;
if (res->flags & IORESOURCE_IO) {
+ /* safeguard against the root resource, where the
+ * risk of hitting any other device would be too
+ * high */
if (res == &ioport_resource)
continue;
+
dev_printk(KERN_INFO, &s->cb_dev->dev,
"pcmcia: parent PCI bridge window: %pR\n",
res);
@@ -887,8 +980,12 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
}
if (res->flags & IORESOURCE_MEM) {
+ /* safeguard against the root resource, where the
+ * risk of hitting any other device would be too
+ * high */
if (res == &iomem_resource)
continue;
+
dev_printk(KERN_INFO, &s->cb_dev->dev,
"pcmcia: parent PCI bridge window: %pR\n",
res);
@@ -956,8 +1053,7 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
struct pccard_resource_ops pccard_nonstatic_ops = {
.validate_mem = pcmcia_nonstatic_validate_mem,
- .adjust_io_region = nonstatic_adjust_io_region,
- .find_io = nonstatic_find_io_region,
+ .find_io = nonstatic_find_io,
.find_mem = nonstatic_find_mem_region,
.add_io = adjust_io,
.add_mem = adjust_memory,
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 83ace277426c..424e576f3acb 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1303,13 +1303,6 @@ static int yenta_dev_suspend_noirq(struct device *dev)
pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]);
pci_disable_device(pdev);
- /*
- * Some laptops (IBM T22) do not like us putting the Cardbus
- * bridge into D3. At a guess, some other laptop will
- * probably require this, so leave it commented out for now.
- */
- /* pci_set_power_state(dev, 3); */
-
return 0;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index faaa9b4d0d07..c59bcb7c6eb0 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -65,10 +65,10 @@ config BATTERY_DS2760
Say Y here to enable support for batteries with ds2760 chip.
config BATTERY_DS2782
- tristate "DS2782 standalone gas-gauge"
+ tristate "DS2782/DS2786 standalone gas-gauge"
depends on I2C
help
- Say Y here to enable support for the DS2782 standalone battery
+ Say Y here to enable support for the DS2782/DS2786 standalone battery
gas-gauge.
config BATTERY_PMU
@@ -125,6 +125,12 @@ config BATTERY_MAX17040
in handheld and portable equipment. The MAX17040 is configured
to operate with a single lithium cell
+config BATTERY_Z2
+ tristate "Z2 battery driver"
+ depends on I2C && MACH_ZIPIT2
+ help
+ Say Y to include support for the battery on the Zipit Z2.
+
config CHARGER_PCF50633
tristate "NXP PCF50633 MBC"
depends on MFD_PCF50633
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index a2ba7c85c97a..a82f292e5c94 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -31,4 +31,5 @@ obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 99c89976a902..d762a0cbc6af 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -5,6 +5,8 @@
*
* Author: Ryan Mallon <ryan@bluewatersys.com>
*
+ * DS2786 added by Yulia Vilensky <vilensky@compulab.co.il>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -20,12 +22,13 @@
#include <linux/idr.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
+#include <linux/ds2782_battery.h>
#define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */
-#define DS2782_REG_VOLT_MSB 0x0c
-#define DS2782_REG_TEMP_MSB 0x0a
-#define DS2782_REG_CURRENT_MSB 0x0e
+#define DS278x_REG_VOLT_MSB 0x0c
+#define DS278x_REG_TEMP_MSB 0x0a
+#define DS278x_REG_CURRENT_MSB 0x0e
/* EEPROM Block */
#define DS2782_REG_RSNSP 0x69 /* Sense resistor value */
@@ -33,18 +36,33 @@
/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
#define DS2782_CURRENT_UNITS 1563
-#define to_ds2782_info(x) container_of(x, struct ds2782_info, battery)
+#define DS2786_REG_RARC 0x02 /* Remaining active relative capacity */
+
+#define DS2786_CURRENT_UNITS 25
+
+struct ds278x_info;
+
+struct ds278x_battery_ops {
+ int (*get_current)(struct ds278x_info *info, int *current_uA);
+ int (*get_voltage)(struct ds278x_info *info, int *voltage_uA);
+ int (*get_capacity)(struct ds278x_info *info, int *capacity_uA);
+
+};
+
+#define to_ds278x_info(x) container_of(x, struct ds278x_info, battery)
-struct ds2782_info {
+struct ds278x_info {
struct i2c_client *client;
struct power_supply battery;
+ struct ds278x_battery_ops *ops;
int id;
+ int rsns;
};
static DEFINE_IDR(battery_id);
static DEFINE_MUTEX(battery_lock);
-static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val)
+static inline int ds278x_read_reg(struct ds278x_info *info, int reg, u8 *val)
{
int ret;
@@ -58,7 +76,7 @@ static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val)
return 0;
}
-static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb,
+static inline int ds278x_read_reg16(struct ds278x_info *info, int reg_msb,
s16 *val)
{
int ret;
@@ -73,7 +91,7 @@ static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb,
return 0;
}
-static int ds2782_get_temp(struct ds2782_info *info, int *temp)
+static int ds278x_get_temp(struct ds278x_info *info, int *temp)
{
s16 raw;
int err;
@@ -84,14 +102,14 @@ static int ds2782_get_temp(struct ds2782_info *info, int *temp)
* celsius. The temperature value is stored as a 10 bit number, plus
* sign in the upper bits of a 16 bit register.
*/
- err = ds2782_read_reg16(info, DS2782_REG_TEMP_MSB, &raw);
+ err = ds278x_read_reg16(info, DS278x_REG_TEMP_MSB, &raw);
if (err)
return err;
*temp = ((raw / 32) * 125) / 100;
return 0;
}
-static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
+static int ds2782_get_current(struct ds278x_info *info, int *current_uA)
{
int sense_res;
int err;
@@ -102,7 +120,7 @@ static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
* The units of measurement for current are dependent on the value of
* the sense resistor.
*/
- err = ds2782_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw);
+ err = ds278x_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw);
if (err)
return err;
if (sense_res_raw == 0) {
@@ -113,14 +131,14 @@ static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n",
sense_res);
- err = ds2782_read_reg16(info, DS2782_REG_CURRENT_MSB, &raw);
+ err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw);
if (err)
return err;
*current_uA = raw * (DS2782_CURRENT_UNITS / sense_res);
return 0;
}
-static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA)
+static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uA)
{
s16 raw;
int err;
@@ -129,36 +147,77 @@ static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA)
* Voltage is measured in units of 4.88mV. The voltage is stored as
* a 10-bit number plus sign, in the upper bits of a 16-bit register
*/
- err = ds2782_read_reg16(info, DS2782_REG_VOLT_MSB, &raw);
+ err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
return err;
*voltage_uA = (raw / 32) * 4800;
return 0;
}
-static int ds2782_get_capacity(struct ds2782_info *info, int *capacity)
+static int ds2782_get_capacity(struct ds278x_info *info, int *capacity)
{
int err;
u8 raw;
- err = ds2782_read_reg(info, DS2782_REG_RARC, &raw);
+ err = ds278x_read_reg(info, DS2782_REG_RARC, &raw);
if (err)
return err;
*capacity = raw;
return raw;
}
-static int ds2782_get_status(struct ds2782_info *info, int *status)
+static int ds2786_get_current(struct ds278x_info *info, int *current_uA)
+{
+ int err;
+ s16 raw;
+
+ err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw);
+ if (err)
+ return err;
+ *current_uA = (raw / 16) * (DS2786_CURRENT_UNITS / info->rsns);
+ return 0;
+}
+
+static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uA)
+{
+ s16 raw;
+ int err;
+
+ /*
+ * Voltage is measured in units of 1.22mV. The voltage is stored as
+ * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ */
+ err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
+ if (err)
+ return err;
+ *voltage_uA = (raw / 8) * 1220;
+ return 0;
+}
+
+static int ds2786_get_capacity(struct ds278x_info *info, int *capacity)
+{
+ int err;
+ u8 raw;
+
+ err = ds278x_read_reg(info, DS2786_REG_RARC, &raw);
+ if (err)
+ return err;
+ /* Relative capacity is displayed with resolution 0.5 % */
+ *capacity = raw/2 ;
+ return 0;
+}
+
+static int ds278x_get_status(struct ds278x_info *info, int *status)
{
int err;
int current_uA;
int capacity;
- err = ds2782_get_current(info, &current_uA);
+ err = info->ops->get_current(info, &current_uA);
if (err)
return err;
- err = ds2782_get_capacity(info, &capacity);
+ err = info->ops->get_capacity(info, &capacity);
if (err)
return err;
@@ -174,32 +233,32 @@ static int ds2782_get_status(struct ds2782_info *info, int *status)
return 0;
}
-static int ds2782_battery_get_property(struct power_supply *psy,
+static int ds278x_battery_get_property(struct power_supply *psy,
enum power_supply_property prop,
union power_supply_propval *val)
{
- struct ds2782_info *info = to_ds2782_info(psy);
+ struct ds278x_info *info = to_ds278x_info(psy);
int ret;
switch (prop) {
case POWER_SUPPLY_PROP_STATUS:
- ret = ds2782_get_status(info, &val->intval);
+ ret = ds278x_get_status(info, &val->intval);
break;
case POWER_SUPPLY_PROP_CAPACITY:
- ret = ds2782_get_capacity(info, &val->intval);
+ ret = info->ops->get_capacity(info, &val->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = ds2782_get_voltage(info, &val->intval);
+ ret = info->ops->get_voltage(info, &val->intval);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = ds2782_get_current(info, &val->intval);
+ ret = info->ops->get_current(info, &val->intval);
break;
case POWER_SUPPLY_PROP_TEMP:
- ret = ds2782_get_temp(info, &val->intval);
+ ret = ds278x_get_temp(info, &val->intval);
break;
default:
@@ -209,7 +268,7 @@ static int ds2782_battery_get_property(struct power_supply *psy,
return ret;
}
-static enum power_supply_property ds2782_battery_props[] = {
+static enum power_supply_property ds278x_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -217,18 +276,18 @@ static enum power_supply_property ds2782_battery_props[] = {
POWER_SUPPLY_PROP_TEMP,
};
-static void ds2782_power_supply_init(struct power_supply *battery)
+static void ds278x_power_supply_init(struct power_supply *battery)
{
battery->type = POWER_SUPPLY_TYPE_BATTERY;
- battery->properties = ds2782_battery_props;
- battery->num_properties = ARRAY_SIZE(ds2782_battery_props);
- battery->get_property = ds2782_battery_get_property;
+ battery->properties = ds278x_battery_props;
+ battery->num_properties = ARRAY_SIZE(ds278x_battery_props);
+ battery->get_property = ds278x_battery_get_property;
battery->external_power_changed = NULL;
}
-static int ds2782_battery_remove(struct i2c_client *client)
+static int ds278x_battery_remove(struct i2c_client *client)
{
- struct ds2782_info *info = i2c_get_clientdata(client);
+ struct ds278x_info *info = i2c_get_clientdata(client);
power_supply_unregister(&info->battery);
kfree(info->battery.name);
@@ -237,19 +296,45 @@ static int ds2782_battery_remove(struct i2c_client *client)
idr_remove(&battery_id, info->id);
mutex_unlock(&battery_lock);
- i2c_set_clientdata(client, info);
-
kfree(info);
return 0;
}
-static int ds2782_battery_probe(struct i2c_client *client,
+enum ds278x_num_id {
+ DS2782 = 0,
+ DS2786,
+};
+
+static struct ds278x_battery_ops ds278x_ops[] = {
+ [DS2782] = {
+ .get_current = ds2782_get_current,
+ .get_voltage = ds2782_get_voltage,
+ .get_capacity = ds2782_get_capacity,
+ },
+ [DS2786] = {
+ .get_current = ds2786_get_current,
+ .get_voltage = ds2786_get_voltage,
+ .get_capacity = ds2786_get_capacity,
+ }
+};
+
+static int ds278x_battery_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct ds2782_info *info;
+ struct ds278x_platform_data *pdata = client->dev.platform_data;
+ struct ds278x_info *info;
int ret;
int num;
+ /*
+ * ds2786 should have the sense resistor value set
+ * in the platform data
+ */
+ if (id->driver_data == DS2786 && !pdata) {
+ dev_err(&client->dev, "missing platform data for ds2786\n");
+ return -EINVAL;
+ }
+
/* Get an ID for this battery */
ret = idr_pre_get(&battery_id, GFP_KERNEL);
if (ret == 0) {
@@ -269,15 +354,20 @@ static int ds2782_battery_probe(struct i2c_client *client,
goto fail_info;
}
- info->battery.name = kasprintf(GFP_KERNEL, "ds2782-%d", num);
+ info->battery.name = kasprintf(GFP_KERNEL, "%s-%d", client->name, num);
if (!info->battery.name) {
ret = -ENOMEM;
goto fail_name;
}
+ if (id->driver_data == DS2786)
+ info->rsns = pdata->rsns;
+
i2c_set_clientdata(client, info);
info->client = client;
- ds2782_power_supply_init(&info->battery);
+ info->id = num;
+ info->ops = &ds278x_ops[id->driver_data];
+ ds278x_power_supply_init(&info->battery);
ret = power_supply_register(&client->dev, &info->battery);
if (ret) {
@@ -290,7 +380,6 @@ static int ds2782_battery_probe(struct i2c_client *client,
fail_register:
kfree(info->battery.name);
fail_name:
- i2c_set_clientdata(client, info);
kfree(info);
fail_info:
mutex_lock(&battery_lock);
@@ -300,31 +389,32 @@ fail_id:
return ret;
}
-static const struct i2c_device_id ds2782_id[] = {
- {"ds2782", 0},
+static const struct i2c_device_id ds278x_id[] = {
+ {"ds2782", DS2782},
+ {"ds2786", DS2786},
{},
};
-static struct i2c_driver ds2782_battery_driver = {
+static struct i2c_driver ds278x_battery_driver = {
.driver = {
.name = "ds2782-battery",
},
- .probe = ds2782_battery_probe,
- .remove = ds2782_battery_remove,
- .id_table = ds2782_id,
+ .probe = ds278x_battery_probe,
+ .remove = ds278x_battery_remove,
+ .id_table = ds278x_id,
};
-static int __init ds2782_init(void)
+static int __init ds278x_init(void)
{
- return i2c_add_driver(&ds2782_battery_driver);
+ return i2c_add_driver(&ds278x_battery_driver);
}
-module_init(ds2782_init);
+module_init(ds278x_init);
-static void __exit ds2782_exit(void)
+static void __exit ds278x_exit(void)
{
- i2c_del_driver(&ds2782_battery_driver);
+ i2c_del_driver(&ds278x_battery_driver);
}
-module_exit(ds2782_exit);
+module_exit(ds278x_exit);
MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>");
MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index a232de6a5703..69f8aa3a6a4b 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -404,6 +404,13 @@ static int usb_wakeup_enabled;
static int pda_power_suspend(struct platform_device *pdev, pm_message_t state)
{
+ if (pdata->suspend) {
+ int ret = pdata->suspend(state);
+
+ if (ret)
+ return ret;
+ }
+
if (device_may_wakeup(&pdev->dev)) {
if (ac_irq)
ac_wakeup_enabled = !enable_irq_wake(ac_irq->start);
@@ -423,6 +430,9 @@ static int pda_power_resume(struct platform_device *pdev)
disable_irq_wake(ac_irq->start);
}
+ if (pdata->resume)
+ return pdata->resume();
+
return 0;
}
#else
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 2eab35aab311..ee04936b2db5 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -61,7 +61,7 @@ static unsigned long tosa_read_bat(struct tosa_bat *bat)
mutex_lock(&bat_lock);
gpio_set_value(bat->gpio_bat, 1);
msleep(5);
- value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data,
+ value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy.dev->parent),
bat->adc_bat);
gpio_set_value(bat->gpio_bat, 0);
mutex_unlock(&bat_lock);
@@ -81,7 +81,7 @@ static unsigned long tosa_read_temp(struct tosa_bat *bat)
mutex_lock(&bat_lock);
gpio_set_value(bat->gpio_temp, 1);
msleep(5);
- value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data,
+ value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy.dev->parent),
bat->adc_temp);
gpio_set_value(bat->gpio_temp, 0);
mutex_unlock(&bat_lock);
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 875c4d0f776b..fbcc36dae470 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -537,9 +537,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
goto err_battery;
irq = platform_get_irq_byname(pdev, "SYSLO");
- ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq,
- IRQF_TRIGGER_RISING, "SYSLO",
- power);
+ ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
+ IRQF_TRIGGER_RISING, "System power low",
+ power);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
irq, ret);
@@ -547,9 +547,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
- ret = wm831x_request_irq(wm831x, irq, wm831x_pwr_src_irq,
- IRQF_TRIGGER_RISING, "Power source",
- power);
+ ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
+ IRQF_TRIGGER_RISING, "Power source",
+ power);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
irq, ret);
@@ -558,10 +558,10 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
- ret = wm831x_request_irq(wm831x, irq, wm831x_bat_irq,
- IRQF_TRIGGER_RISING,
- wm831x_bat_irqs[i],
- power);
+ ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
+ IRQF_TRIGGER_RISING,
+ wm831x_bat_irqs[i],
+ power);
if (ret != 0) {
dev_err(&pdev->dev,
"Failed to request %s IRQ %d: %d\n",
@@ -575,13 +575,13 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
err_bat_irq:
for (; i >= 0; i--) {
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
- wm831x_free_irq(wm831x, irq, power);
+ free_irq(irq, power);
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
- wm831x_free_irq(wm831x, irq, power);
+ free_irq(irq, power);
err_syslo:
irq = platform_get_irq_byname(pdev, "SYSLO");
- wm831x_free_irq(wm831x, irq, power);
+ free_irq(irq, power);
err_usb:
power_supply_unregister(usb);
err_battery:
@@ -596,19 +596,18 @@ err_kmalloc:
static __devexit int wm831x_power_remove(struct platform_device *pdev)
{
struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
- struct wm831x *wm831x = wm831x_power->wm831x;
int irq, i;
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
- wm831x_free_irq(wm831x, irq, wm831x_power);
+ free_irq(irq, wm831x_power);
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
- wm831x_free_irq(wm831x, irq, wm831x_power);
+ free_irq(irq, wm831x_power);
irq = platform_get_irq_byname(pdev, "SYSLO");
- wm831x_free_irq(wm831x, irq, wm831x_power);
+ free_irq(irq, wm831x_power);
power_supply_unregister(&wm831x_power->battery);
power_supply_unregister(&wm831x_power->wall);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 94c70650aafc..4e8afce0c818 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -308,6 +308,9 @@ static void __exit wm97xx_bat_exit(void)
platform_driver_unregister(&wm97xx_bat_driver);
}
+/* The interface is deprecated, as well as linux/wm97xx_batt.h */
+void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data);
+
void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data)
{
gpdata = data;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
new file mode 100644
index 000000000000..9cca465436e3
--- /dev/null
+++ b/drivers/power/z2_battery.c
@@ -0,0 +1,328 @@
+/*
+ * Battery measurement code for Zipit Z2
+ *
+ * Copyright (C) 2009 Peter Edwards <sweetlilmre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/i2c.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+#include <linux/z2_battery.h>
+
+#define Z2_DEFAULT_NAME "Z2"
+
+struct z2_charger {
+ struct z2_battery_info *info;
+ int bat_status;
+ struct i2c_client *client;
+ struct power_supply batt_ps;
+ struct mutex work_lock;
+ struct work_struct bat_work;
+};
+
+static unsigned long z2_read_bat(struct z2_charger *charger)
+{
+ int data;
+ data = i2c_smbus_read_byte_data(charger->client,
+ charger->info->batt_I2C_reg);
+ if (data < 0)
+ return 0;
+
+ return data * charger->info->batt_mult / charger->info->batt_div;
+}
+
+static int z2_batt_get_property(struct power_supply *batt_ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct z2_charger *charger = container_of(batt_ps, struct z2_charger,
+ batt_ps);
+ struct z2_battery_info *info = charger->info;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = charger->bat_status;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = info->batt_tech;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (info->batt_I2C_reg >= 0)
+ val->intval = z2_read_bat(charger);
+ else
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ if (info->max_voltage >= 0)
+ val->intval = info->max_voltage;
+ else
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ if (info->min_voltage >= 0)
+ val->intval = info->min_voltage;
+ else
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void z2_batt_ext_power_changed(struct power_supply *batt_ps)
+{
+ struct z2_charger *charger = container_of(batt_ps, struct z2_charger,
+ batt_ps);
+ schedule_work(&charger->bat_work);
+}
+
+static void z2_batt_update(struct z2_charger *charger)
+{
+ int old_status = charger->bat_status;
+ struct z2_battery_info *info;
+
+ info = charger->info;
+
+ mutex_lock(&charger->work_lock);
+
+ charger->bat_status = (info->charge_gpio >= 0) ?
+ (gpio_get_value(info->charge_gpio) ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING) :
+ POWER_SUPPLY_STATUS_UNKNOWN;
+
+ if (old_status != charger->bat_status) {
+ pr_debug("%s: %i -> %i\n", charger->batt_ps.name, old_status,
+ charger->bat_status);
+ power_supply_changed(&charger->batt_ps);
+ }
+
+ mutex_unlock(&charger->work_lock);
+}
+
+static void z2_batt_work(struct work_struct *work)
+{
+ struct z2_charger *charger;
+ charger = container_of(work, struct z2_charger, bat_work);
+ z2_batt_update(charger);
+}
+
+static irqreturn_t z2_charge_switch_irq(int irq, void *devid)
+{
+ struct z2_charger *charger = devid;
+ schedule_work(&charger->bat_work);
+ return IRQ_HANDLED;
+}
+
+static int z2_batt_ps_init(struct z2_charger *charger, int props)
+{
+ int i = 0;
+ enum power_supply_property *prop;
+ struct z2_battery_info *info = charger->info;
+
+ if (info->batt_tech >= 0)
+ props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */
+ if (info->batt_I2C_reg >= 0)
+ props++; /* POWER_SUPPLY_PROP_VOLTAGE_NOW */
+ if (info->max_voltage >= 0)
+ props++; /* POWER_SUPPLY_PROP_VOLTAGE_MAX */
+ if (info->min_voltage >= 0)
+ props++; /* POWER_SUPPLY_PROP_VOLTAGE_MIN */
+
+ prop = kzalloc(props * sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ prop[i++] = POWER_SUPPLY_PROP_PRESENT;
+ if (info->charge_gpio >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_STATUS;
+ if (info->batt_tech >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY;
+ if (info->batt_I2C_reg >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_NOW;
+ if (info->max_voltage >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MAX;
+ if (info->min_voltage >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MIN;
+
+ if (!info->batt_name) {
+ dev_info(&charger->client->dev,
+ "Please consider setting proper battery "
+ "name in platform definition file, falling "
+ "back to name \" Z2_DEFAULT_NAME \"\n");
+ charger->batt_ps.name = Z2_DEFAULT_NAME;
+ } else
+ charger->batt_ps.name = info->batt_name;
+
+ charger->batt_ps.properties = prop;
+ charger->batt_ps.num_properties = props;
+ charger->batt_ps.type = POWER_SUPPLY_TYPE_BATTERY;
+ charger->batt_ps.get_property = z2_batt_get_property;
+ charger->batt_ps.external_power_changed = z2_batt_ext_power_changed;
+ charger->batt_ps.use_for_apm = 1;
+
+ return 0;
+}
+
+static int __devinit z2_batt_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
+ struct z2_charger *charger;
+ struct z2_battery_info *info = client->dev.platform_data;
+
+ if (info == NULL) {
+ dev_err(&client->dev,
+ "Please set platform device platform_data"
+ " to a valid z2_battery_info pointer!\n");
+ return -EINVAL;
+ }
+
+ charger = kzalloc(sizeof(*charger), GFP_KERNEL);
+ if (charger == NULL)
+ return -ENOMEM;
+
+ charger->bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ charger->info = info;
+ charger->client = client;
+ i2c_set_clientdata(client, charger);
+
+ mutex_init(&charger->work_lock);
+
+ if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) {
+ ret = gpio_request(info->charge_gpio, "BATT CHRG");
+ if (ret)
+ goto err;
+
+ ret = gpio_direction_input(info->charge_gpio);
+ if (ret)
+ goto err2;
+
+ set_irq_type(gpio_to_irq(info->charge_gpio),
+ IRQ_TYPE_EDGE_BOTH);
+ ret = request_irq(gpio_to_irq(info->charge_gpio),
+ z2_charge_switch_irq, IRQF_DISABLED,
+ "AC Detect", charger);
+ if (ret)
+ goto err3;
+ }
+
+ ret = z2_batt_ps_init(charger, props);
+ if (ret)
+ goto err3;
+
+ INIT_WORK(&charger->bat_work, z2_batt_work);
+
+ ret = power_supply_register(&client->dev, &charger->batt_ps);
+ if (ret)
+ goto err4;
+
+ schedule_work(&charger->bat_work);
+
+ return 0;
+
+err4:
+ kfree(charger->batt_ps.properties);
+err3:
+ if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio))
+ free_irq(gpio_to_irq(info->charge_gpio), charger);
+err2:
+ if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio))
+ gpio_free(info->charge_gpio);
+err:
+ kfree(charger);
+ return ret;
+}
+
+static int __devexit z2_batt_remove(struct i2c_client *client)
+{
+ struct z2_charger *charger = i2c_get_clientdata(client);
+ struct z2_battery_info *info = charger->info;
+
+ flush_scheduled_work();
+ power_supply_unregister(&charger->batt_ps);
+
+ kfree(charger->batt_ps.properties);
+ if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) {
+ free_irq(gpio_to_irq(info->charge_gpio), charger);
+ gpio_free(info->charge_gpio);
+ }
+
+ kfree(charger);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
+{
+ flush_scheduled_work();
+ return 0;
+}
+
+static int z2_batt_resume(struct i2c_client *client)
+{
+ struct z2_charger *charger = i2c_get_clientdata(client);
+
+ schedule_work(&charger->bat_work);
+ return 0;
+}
+#else
+#define z2_batt_suspend NULL
+#define z2_batt_resume NULL
+#endif
+
+static const struct i2c_device_id z2_batt_id[] = {
+ { "aer915", 0 },
+ { }
+};
+
+static struct i2c_driver z2_batt_driver = {
+ .driver = {
+ .name = "z2-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = z2_batt_probe,
+ .remove = z2_batt_remove,
+ .suspend = z2_batt_suspend,
+ .resume = z2_batt_resume,
+ .id_table = z2_batt_id,
+};
+
+static int __init z2_batt_init(void)
+{
+ return i2c_add_driver(&z2_batt_driver);
+}
+
+static void __exit z2_batt_exit(void)
+{
+ i2c_del_driver(&z2_batt_driver);
+}
+
+module_init(z2_batt_init);
+module_exit(z2_batt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>");
+MODULE_DESCRIPTION("Zipit Z2 battery driver");
diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
index 3cbaf1811bd0..d37c445f0eda 100644
--- a/drivers/ps3/ps3-sys-manager.c
+++ b/drivers/ps3/ps3-sys-manager.c
@@ -119,7 +119,7 @@ enum ps3_sys_manager_service_id {
* enum ps3_sys_manager_attr - Notification attribute (bit position mask).
* @PS3_SM_ATTR_POWER: Power button.
* @PS3_SM_ATTR_RESET: Reset button, not available on retail console.
- * @PS3_SM_ATTR_THERMAL: Sytem thermal alert.
+ * @PS3_SM_ATTR_THERMAL: System thermal alert.
* @PS3_SM_ATTR_CONTROLLER: Remote controller event.
* @PS3_SM_ATTR_ALL: Logical OR of all.
*
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 5fb83e2ced25..7d149a8d8d9b 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -23,9 +23,9 @@ struct pm8607_regulator_info {
struct regulator_dev *regulator;
struct i2c_client *i2c;
- int min_uV;
- int max_uV;
- int step_uV;
+ unsigned int *vol_table;
+ unsigned int *vol_suspend;
+
int vol_reg;
int vol_shift;
int vol_nbits;
@@ -36,83 +36,189 @@ struct pm8607_regulator_info {
int slope_double;
};
-static inline int check_range(struct pm8607_regulator_info *info,
- int min_uV, int max_uV)
-{
- if (max_uV < info->min_uV || min_uV > info->max_uV || min_uV > max_uV)
- return -EINVAL;
+static const unsigned int BUCK1_table[] = {
+ 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
+ 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
+ 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
+};
- return 0;
-}
+static const unsigned int BUCK1_suspend_table[] = {
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
+ 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
+ 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
+ 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
+};
+
+static const unsigned int BUCK2_table[] = {
+ 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000,
+ 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000,
+ 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
+ 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000,
+ 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000,
+ 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000,
+ 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000,
+};
+
+static const unsigned int BUCK2_suspend_table[] = {
+ 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000,
+ 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000,
+ 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
+ 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000,
+ 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000,
+ 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000,
+ 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000,
+};
+
+static const unsigned int BUCK3_table[] = {
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
+ 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
+ 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
+ 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
+};
+
+static const unsigned int BUCK3_suspend_table[] = {
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
+ 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
+ 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
+ 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
+};
+
+static const unsigned int LDO1_table[] = {
+ 1800000, 1200000, 2800000, 0,
+};
+
+static const unsigned int LDO1_suspend_table[] = {
+ 1800000, 1200000, 0, 0,
+};
+
+static const unsigned int LDO2_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
+};
+
+static const unsigned int LDO2_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO3_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
+};
+
+static const unsigned int LDO3_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO4_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 3300000,
+};
+
+static const unsigned int LDO4_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 2900000,
+};
+
+static const unsigned int LDO5_table[] = {
+ 2900000, 3000000, 3100000, 3300000,
+};
+
+static const unsigned int LDO5_suspend_table[] = {
+ 2900000, 0, 0, 0,
+};
+
+static const unsigned int LDO6_table[] = {
+ 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 3300000,
+};
+
+static const unsigned int LDO6_suspend_table[] = {
+ 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 2900000,
+};
+
+static const unsigned int LDO7_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO7_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO8_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO8_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO9_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
+};
+
+static const unsigned int LDO9_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO10_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
+ 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
+};
+
+static const unsigned int LDO10_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+ 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
+};
+
+static const unsigned int LDO12_table[] = {
+ 1800000, 1900000, 2700000, 2800000, 2900000, 3000000, 3100000, 3300000,
+ 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
+};
+
+static const unsigned int LDO12_suspend_table[] = {
+ 1800000, 1900000, 2700000, 2800000, 2900000, 2900000, 2900000, 2900000,
+ 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
+};
+
+static const unsigned int LDO13_table[] = {
+ 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, 0,
+};
+
+static const unsigned int LDO13_suspend_table[] = {
+ 0,
+};
+
+static const unsigned int LDO14_table[] = {
+ 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 3300000,
+};
+
+static const unsigned int LDO14_suspend_table[] = {
+ 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 2900000,
+};
static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
int ret = -EINVAL;
- switch (info->desc.id) {
- case PM8607_ID_BUCK1:
- ret = (index < 0x1d) ? (index * 25000 + 800000) :
- ((index < 0x20) ? 1500000 :
- ((index < 0x40) ? ((index - 0x20) * 25000) :
- -EINVAL));
- break;
- case PM8607_ID_BUCK3:
- ret = (index < 0x3d) ? (index * 25000) :
- ((index < 0x40) ? 1500000 : -EINVAL);
- if (ret < 0)
- break;
+ if (info->vol_table && (index < (2 << info->vol_nbits))) {
+ ret = info->vol_table[index];
if (info->slope_double)
ret <<= 1;
- break;
- case PM8607_ID_LDO1:
- ret = (index == 0) ? 1800000 :
- ((index == 1) ? 1200000 :
- ((index == 2) ? 2800000 : -EINVAL));
- break;
- case PM8607_ID_LDO5:
- ret = (index == 0) ? 2900000 :
- ((index == 1) ? 3000000 :
- ((index == 2) ? 3100000 : 3300000));
- break;
- case PM8607_ID_LDO7:
- case PM8607_ID_LDO8:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 8) ? (index * 50000 + 2550000) :
- -EINVAL);
- break;
- case PM8607_ID_LDO12:
- ret = (index < 2) ? (index * 100000 + 1800000) :
- ((index < 7) ? (index * 100000 + 2500000) :
- ((index == 7) ? 3300000 : 1200000));
- break;
- case PM8607_ID_LDO2:
- case PM8607_ID_LDO3:
- case PM8607_ID_LDO9:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2550000) :
- 3300000);
- break;
- case PM8607_ID_LDO4:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 6) ? (index * 50000 + 2550000) :
- ((index == 6) ? 2900000 : 3300000));
- break;
- case PM8607_ID_LDO6:
- ret = (index < 2) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2500000) :
- 3300000);
- break;
- case PM8607_ID_LDO10:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2550000) :
- ((index == 7) ? 3300000 : 1200000));
- break;
- case PM8607_ID_LDO14:
- ret = (index < 2) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2600000) :
- 3300000);
- break;
}
return ret;
}
@@ -120,174 +226,26 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- int val = -ENOENT;
- int ret;
+ int i, ret = -ENOENT;
- switch (info->desc.id) {
- case PM8607_ID_BUCK1:
- if (min_uV >= 800000) /* 800mV ~ 1500mV / 25mV */
- val = (min_uV - 775001) / 25000;
- else { /* 25mV ~ 775mV / 25mV */
- val = (min_uV + 249999) / 25000;
- val += 32;
- }
- break;
- case PM8607_ID_BUCK3:
- if (info->slope_double)
- min_uV = min_uV >> 1;
- val = (min_uV + 249999) / 25000; /* 0mV ~ 1500mV / 25mV */
-
- break;
- case PM8607_ID_LDO1:
- if (min_uV > 1800000)
- val = 2;
- else if (min_uV > 1200000)
- val = 0;
- else
- val = 1;
- break;
- case PM8607_ID_LDO5:
- if (min_uV > 3100000)
- val = 3;
- else /* 2900mV ~ 3100mV / 100mV */
- val = (min_uV - 2800001) / 100000;
- break;
- case PM8607_ID_LDO7:
- case PM8607_ID_LDO8:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0; /* 1800mv */
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2900mV / 50mV */
- if (min_uV <= 2900000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO10:
- if (min_uV > 2850000)
- val = 7;
- else if (min_uV <= 1200000)
- val = 8;
- else if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
- val = (min_uV - 1750001) / 50000;
- else { /* 2700mV ~ 2850mV / 50mV */
- val = (min_uV - 2650001) / 50000;
- val += 3;
- }
- break;
- case PM8607_ID_LDO12:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 100mV */
- if (min_uV <= 1200000)
- val = 8; /* 1200mV */
- else if (min_uV <= 1800000)
- val = 0; /* 1800mV */
- else if (min_uV <= 1900000)
- val = (min_uV - 1700001) / 100000;
- else
- val = 2; /* 2700mV */
- } else { /* 2700mV ~ 3100mV / 100mV */
- if (min_uV <= 3100000) {
- val = (min_uV - 2600001) / 100000;
- val += 2;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO2:
- case PM8607_ID_LDO3:
- case PM8607_ID_LDO9:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2850mV / 50mV */
- if (min_uV <= 2850000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO4:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2800mV / 50mV */
- if (min_uV <= 2850000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else if (min_uV <= 2900000)
- val = 6;
- else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO6:
- if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1850000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 2; /* 2600mV */
- } else { /* 2600mV ~ 2800mV / 50mV */
- if (min_uV <= 2800000) {
- val = (min_uV - 2550001) / 50000;
- val += 2;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO14:
- if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1850000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 2; /* 2700mV */
- } else { /* 2700mV ~ 2900mV / 50mV */
- if (min_uV <= 2900000) {
- val = (min_uV - 2650001) / 50000;
- val += 2;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
+ if (info->slope_double) {
+ min_uV = min_uV >> 1;
+ max_uV = max_uV >> 1;
}
- if (val >= 0) {
- ret = pm8607_list_voltage(rdev, val);
- if (ret > max_uV) {
- pr_err("exceed voltage range (%d %d) uV",
- min_uV, max_uV);
- return -EINVAL;
+ if (info->vol_table) {
+ for (i = 0; i < (2 << info->vol_nbits); i++) {
+ if (!info->vol_table[i])
+ break;
+ if ((min_uV <= info->vol_table[i])
+ && (max_uV >= info->vol_table[i])) {
+ ret = i;
+ break;
+ }
}
- } else
- pr_err("invalid voltage range (%d %d) uV", min_uV, max_uV);
- return val;
+ }
+ if (ret < 0)
+ pr_err("invalid voltage range (%d %d) uV\n", min_uV, max_uV);
+ return ret;
}
static int pm8607_set_voltage(struct regulator_dev *rdev,
@@ -297,7 +255,7 @@ static int pm8607_set_voltage(struct regulator_dev *rdev,
uint8_t val, mask;
int ret;
- if (check_range(info, min_uV, max_uV)) {
+ if (min_uV > max_uV) {
pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
return -EINVAL;
}
@@ -375,18 +333,15 @@ static struct regulator_ops pm8607_regulator_ops = {
.is_enabled = pm8607_is_enabled,
};
-#define PM8607_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
+#define PM8607_DVC(vreg, nbits, ureg, ubit, ereg, ebit) \
{ \
.desc = { \
- .name = "BUCK" #_id, \
+ .name = #vreg, \
.ops = &pm8607_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PM8607_ID_BUCK##_id, \
+ .id = PM8607_ID_##vreg, \
.owner = THIS_MODULE, \
}, \
- .min_uV = (min) * 1000, \
- .max_uV = (max) * 1000, \
- .step_uV = (step) * 1000, \
.vol_reg = PM8607_##vreg, \
.vol_shift = (0), \
.vol_nbits = (nbits), \
@@ -395,9 +350,11 @@ static struct regulator_ops pm8607_regulator_ops = {
.enable_reg = PM8607_##ereg, \
.enable_bit = (ebit), \
.slope_double = (0), \
+ .vol_table = (unsigned int *)&vreg##_table, \
+ .vol_suspend = (unsigned int *)&vreg##_suspend_table, \
}
-#define PM8607_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
+#define PM8607_LDO(_id, vreg, shift, nbits, ereg, ebit) \
{ \
.desc = { \
.name = "LDO" #_id, \
@@ -406,33 +363,34 @@ static struct regulator_ops pm8607_regulator_ops = {
.id = PM8607_ID_LDO##_id, \
.owner = THIS_MODULE, \
}, \
- .min_uV = (min) * 1000, \
- .max_uV = (max) * 1000, \
- .step_uV = (step) * 1000, \
.vol_reg = PM8607_##vreg, \
.vol_shift = (shift), \
.vol_nbits = (nbits), \
.enable_reg = PM8607_##ereg, \
.enable_bit = (ebit), \
.slope_double = (0), \
+ .vol_table = (unsigned int *)&LDO##_id##_table, \
+ .vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \
}
static struct pm8607_regulator_info pm8607_regulator_info[] = {
- PM8607_DVC(1, 0, 1500, 25, BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
- PM8607_DVC(3, 0, 1500, 25, BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
-
- PM8607_LDO(1 , 1200, 2800, 0, LDO1 , 0, 2, SUPPLIES_EN11, 3),
- PM8607_LDO(2 , 1800, 3300, 0, LDO2 , 0, 3, SUPPLIES_EN11, 4),
- PM8607_LDO(3 , 1800, 3300, 0, LDO3 , 0, 3, SUPPLIES_EN11, 5),
- PM8607_LDO(4 , 1800, 3300, 0, LDO4 , 0, 3, SUPPLIES_EN11, 6),
- PM8607_LDO(5 , 2900, 3300, 0, LDO5 , 0, 2, SUPPLIES_EN11, 7),
- PM8607_LDO(6 , 1800, 3300, 0, LDO6 , 0, 3, SUPPLIES_EN12, 0),
- PM8607_LDO(7 , 1800, 2900, 0, LDO7 , 0, 3, SUPPLIES_EN12, 1),
- PM8607_LDO(8 , 1800, 2900, 0, LDO8 , 0, 3, SUPPLIES_EN12, 2),
- PM8607_LDO(9 , 1800, 3300, 0, LDO9 , 0, 3, SUPPLIES_EN12, 3),
- PM8607_LDO(10, 1200, 3300, 0, LDO10, 0, 4, SUPPLIES_EN11, 4),
- PM8607_LDO(12, 1200, 3300, 0, LDO12, 0, 4, SUPPLIES_EN11, 5),
- PM8607_LDO(14, 1800, 3300, 0, LDO14, 0, 3, SUPPLIES_EN11, 6),
+ PM8607_DVC(BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
+ PM8607_DVC(BUCK2, 6, GO, 1, SUPPLIES_EN11, 1),
+ PM8607_DVC(BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
+
+ PM8607_LDO( 1, LDO1, 0, 2, SUPPLIES_EN11, 3),
+ PM8607_LDO( 2, LDO2, 0, 3, SUPPLIES_EN11, 4),
+ PM8607_LDO( 3, LDO3, 0, 3, SUPPLIES_EN11, 5),
+ PM8607_LDO( 4, LDO4, 0, 3, SUPPLIES_EN11, 6),
+ PM8607_LDO( 5, LDO5, 0, 2, SUPPLIES_EN11, 7),
+ PM8607_LDO( 6, LDO6, 0, 3, SUPPLIES_EN12, 0),
+ PM8607_LDO( 7, LDO7, 0, 3, SUPPLIES_EN12, 1),
+ PM8607_LDO( 8, LDO8, 0, 3, SUPPLIES_EN12, 2),
+ PM8607_LDO( 9, LDO9, 0, 3, SUPPLIES_EN12, 3),
+ PM8607_LDO(10, LDO10, 0, 3, SUPPLIES_EN12, 4),
+ PM8607_LDO(12, LDO12, 0, 4, SUPPLIES_EN12, 5),
+ PM8607_LDO(13, VIBRATOR_SET, 1, 3, VIBRATOR_SET, 0),
+ PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6),
};
static inline struct pm8607_regulator_info *find_regulator_info(int id)
@@ -484,60 +442,29 @@ static int __devexit pm8607_regulator_remove(struct platform_device *pdev)
{
struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
regulator_unregister(info->regulator);
return 0;
}
-#define PM8607_REGULATOR_DRIVER(_name) \
-{ \
- .driver = { \
- .name = "88pm8607-" #_name, \
- .owner = THIS_MODULE, \
- }, \
- .probe = pm8607_regulator_probe, \
- .remove = __devexit_p(pm8607_regulator_remove), \
-}
-
-static struct platform_driver pm8607_regulator_driver[] = {
- PM8607_REGULATOR_DRIVER(buck1),
- PM8607_REGULATOR_DRIVER(buck2),
- PM8607_REGULATOR_DRIVER(buck3),
- PM8607_REGULATOR_DRIVER(ldo1),
- PM8607_REGULATOR_DRIVER(ldo2),
- PM8607_REGULATOR_DRIVER(ldo3),
- PM8607_REGULATOR_DRIVER(ldo4),
- PM8607_REGULATOR_DRIVER(ldo5),
- PM8607_REGULATOR_DRIVER(ldo6),
- PM8607_REGULATOR_DRIVER(ldo7),
- PM8607_REGULATOR_DRIVER(ldo8),
- PM8607_REGULATOR_DRIVER(ldo9),
- PM8607_REGULATOR_DRIVER(ldo10),
- PM8607_REGULATOR_DRIVER(ldo12),
- PM8607_REGULATOR_DRIVER(ldo14),
+static struct platform_driver pm8607_regulator_driver = {
+ .driver = {
+ .name = "88pm860x-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm8607_regulator_probe,
+ .remove = __devexit_p(pm8607_regulator_remove),
};
static int __init pm8607_regulator_init(void)
{
- int i, count, ret;
-
- count = ARRAY_SIZE(pm8607_regulator_driver);
- for (i = 0; i < count; i++) {
- ret = platform_driver_register(&pm8607_regulator_driver[i]);
- if (ret != 0)
- pr_err("Failed to register regulator driver: %d\n",
- ret);
- }
- return 0;
+ return platform_driver_register(&pm8607_regulator_driver);
}
subsys_initcall(pm8607_regulator_init);
static void __exit pm8607_regulator_exit(void)
{
- int i, count;
-
- count = ARRAY_SIZE(pm8607_regulator_driver);
- for (i = 0; i < count; i++)
- platform_driver_unregister(&pm8607_regulator_driver[i]);
+ platform_driver_unregister(&pm8607_regulator_driver);
}
module_exit(pm8607_regulator_exit);
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 7de950959ed2..6580e261cd09 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -16,7 +16,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
/* LDO registers and some handy masking definitions for AB3100 */
#define AB3100_LDO_A 0x40
@@ -41,7 +41,7 @@
* struct ab3100_regulator
* A struct passed around the individual regulator functions
* @platform_device: platform device holding this regulator
- * @ab3100: handle to the AB3100 parent chip
+ * @dev: handle to the device
* @plfdata: AB3100 platform data passed in at probe time
* @regreg: regulator register number in the AB3100
* @fixed_voltage: a fixed voltage for this regulator, if this
@@ -52,7 +52,7 @@
*/
struct ab3100_regulator {
struct regulator_dev *rdev;
- struct ab3100 *ab3100;
+ struct device *dev;
struct ab3100_platform_data *plfdata;
u8 regreg;
int fixed_voltage;
@@ -183,7 +183,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
int err;
u8 regval;
- err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg,
+ err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
&regval);
if (err) {
dev_warn(&reg->dev, "failed to get regid %d value\n",
@@ -197,7 +197,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
regval |= AB3100_REG_ON_MASK;
- err = ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg,
+ err = abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg,
regval);
if (err) {
dev_warn(&reg->dev, "failed to set regid %d value\n",
@@ -245,14 +245,14 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
if (abreg->regreg == AB3100_LDO_D) {
dev_info(&reg->dev, "disabling LDO D - shut down system\n");
/* Setting LDO D to 0x00 cuts the power to the SoC */
- return ab3100_set_register_interruptible(abreg->ab3100,
+ return abx500_set_register_interruptible(abreg->dev, 0,
AB3100_LDO_D, 0x00U);
}
/*
* All other regulators are handled here
*/
- err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg,
+ err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
&regval);
if (err) {
dev_err(&reg->dev, "unable to get register 0x%x\n",
@@ -260,7 +260,7 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
return err;
}
regval &= ~AB3100_REG_ON_MASK;
- return ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg,
+ return abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg,
regval);
}
@@ -270,7 +270,7 @@ static int ab3100_is_enabled_regulator(struct regulator_dev *reg)
u8 regval;
int err;
- err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg,
+ err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
&regval);
if (err) {
dev_err(&reg->dev, "unable to get register 0x%x\n",
@@ -305,7 +305,7 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
* For variable types, read out setting and index into
* supplied voltage list.
*/
- err = ab3100_get_register_interruptible(abreg->ab3100,
+ err = abx500_get_register_interruptible(abreg->dev, 0,
abreg->regreg, &regval);
if (err) {
dev_warn(&reg->dev,
@@ -373,7 +373,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
if (bestindex < 0)
return bestindex;
- err = ab3100_get_register_interruptible(abreg->ab3100,
+ err = abx500_get_register_interruptible(abreg->dev, 0,
abreg->regreg, &regval);
if (err) {
dev_warn(&reg->dev,
@@ -386,7 +386,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
regval &= ~0xE0;
regval |= (bestindex << 5);
- err = ab3100_set_register_interruptible(abreg->ab3100,
+ err = abx500_set_register_interruptible(abreg->dev, 0,
abreg->regreg, regval);
if (err)
dev_warn(&reg->dev, "failed to set regulator register %02x\n",
@@ -414,7 +414,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
/* LDO E and BUCK have special suspend voltages you can set */
bestindex = ab3100_get_best_voltage_index(reg, uV, uV);
- err = ab3100_get_register_interruptible(abreg->ab3100,
+ err = abx500_get_register_interruptible(abreg->dev, 0,
targetreg, &regval);
if (err) {
dev_warn(&reg->dev,
@@ -427,7 +427,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
regval &= ~0xE0;
regval |= (bestindex << 5);
- err = ab3100_set_register_interruptible(abreg->ab3100,
+ err = abx500_set_register_interruptible(abreg->dev, 0,
targetreg, regval);
if (err)
dev_warn(&reg->dev, "failed to set regulator register %02x\n",
@@ -564,13 +564,12 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
{
struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
- struct ab3100 *ab3100 = platform_get_drvdata(pdev);
int err = 0;
u8 data;
int i;
/* Check chip state */
- err = ab3100_get_register_interruptible(ab3100,
+ err = abx500_get_register_interruptible(&pdev->dev, 0,
AB3100_LDO_D, &data);
if (err) {
dev_err(&pdev->dev, "could not read initial status of LDO_D\n");
@@ -585,7 +584,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
/* Set up regulators */
for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
- err = ab3100_set_register_interruptible(ab3100,
+ err = abx500_set_register_interruptible(&pdev->dev, 0,
ab3100_reg_init_order[i],
plfdata->reg_initvals[i]);
if (err) {
@@ -607,7 +606,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
* see what it looks like for a certain machine, go
* into the machine I2C setup.
*/
- reg->ab3100 = ab3100;
+ reg->dev = &pdev->dev;
reg->plfdata = plfdata;
/*
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 2b4e40d31190..98e5d14f94f3 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -944,8 +944,13 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
has_dev = 0;
list_for_each_entry(node, &regulator_map_list, list) {
- if (consumer_dev_name != node->dev_name)
+ if (node->dev_name && consumer_dev_name) {
+ if (strcmp(node->dev_name, consumer_dev_name) != 0)
+ continue;
+ } else if (node->dev_name || consumer_dev_name) {
continue;
+ }
+
if (strcmp(node->supply, supply) != 0)
continue;
@@ -976,29 +981,6 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
return 0;
}
-static void unset_consumer_device_supply(struct regulator_dev *rdev,
- const char *consumer_dev_name, struct device *consumer_dev)
-{
- struct regulator_map *node, *n;
-
- if (consumer_dev && !consumer_dev_name)
- consumer_dev_name = dev_name(consumer_dev);
-
- list_for_each_entry_safe(node, n, &regulator_map_list, list) {
- if (rdev != node->regulator)
- continue;
-
- if (consumer_dev_name && node->dev_name &&
- strcmp(consumer_dev_name, node->dev_name))
- continue;
-
- list_del(&node->list);
- kfree(node->dev_name);
- kfree(node);
- return;
- }
-}
-
static void unset_regulator_supplies(struct regulator_dev *rdev)
{
struct regulator_map *node, *n;
@@ -1008,7 +990,6 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
list_del(&node->list);
kfree(node->dev_name);
kfree(node);
- return;
}
}
}
@@ -1540,7 +1521,7 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
* Context: can sleep
*
* Returns a voltage that can be passed to @regulator_set_voltage(),
- * zero if this selector code can't be used on this sytem, or a
+ * zero if this selector code can't be used on this system, or a
* negative errno.
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
@@ -2328,7 +2309,37 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
goto scrub;
/* set supply regulator if it exists */
+ if (init_data->supply_regulator && init_data->supply_regulator_dev) {
+ dev_err(dev,
+ "Supply regulator specified by both name and dev\n");
+ goto scrub;
+ }
+
+ if (init_data->supply_regulator) {
+ struct regulator_dev *r;
+ int found = 0;
+
+ list_for_each_entry(r, &regulator_list, list) {
+ if (strcmp(rdev_get_name(r),
+ init_data->supply_regulator) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(dev, "Failed to find supply %s\n",
+ init_data->supply_regulator);
+ goto scrub;
+ }
+
+ ret = set_supply(rdev, r);
+ if (ret < 0)
+ goto scrub;
+ }
+
if (init_data->supply_regulator_dev) {
+ dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n");
ret = set_supply(rdev,
dev_get_drvdata(init_data->supply_regulator_dev));
if (ret < 0)
@@ -2341,13 +2352,8 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
init_data->consumer_supplies[i].dev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
- if (ret < 0) {
- for (--i; i >= 0; i--)
- unset_consumer_device_supply(rdev,
- init_data->consumer_supplies[i].dev_name,
- init_data->consumer_supplies[i].dev);
- goto scrub;
- }
+ if (ret < 0)
+ goto unset_supplies;
}
list_add(&rdev->list, &regulator_list);
@@ -2355,6 +2361,9 @@ out:
mutex_unlock(&regulator_list_mutex);
return rdev;
+unset_supplies:
+ unset_regulator_supplies(rdev);
+
scrub:
device_unregister(&rdev->dev);
/* device core frees rdev */
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index ad036dd8da13..4597d508a229 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -440,8 +440,8 @@ static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
__func__, id, min_uV, max_uV);
- if (min_uV > mc13783_regulators[id].voltages[0] &&
- max_uV < mc13783_regulators[id].voltages[0])
+ if (min_uV >= mc13783_regulators[id].voltages[0] &&
+ max_uV <= mc13783_regulators[id].voltages[0])
return 0;
else
return -EINVAL;
@@ -649,6 +649,6 @@ static void __exit mc13783_regulator_exit(void)
module_exit(mc13783_regulator_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 74841abcc9cc..14b4576281c5 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -22,68 +22,9 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
-
-/* Register definitions */
-#define TPS6507X_REG_PPATH1 0X01
-#define TPS6507X_REG_INT 0X02
-#define TPS6507X_REG_CHGCONFIG0 0X03
-#define TPS6507X_REG_CHGCONFIG1 0X04
-#define TPS6507X_REG_CHGCONFIG2 0X05
-#define TPS6507X_REG_CHGCONFIG3 0X06
-#define TPS6507X_REG_REG_ADCONFIG 0X07
-#define TPS6507X_REG_TSCMODE 0X08
-#define TPS6507X_REG_ADRESULT_1 0X09
-#define TPS6507X_REG_ADRESULT_2 0X0A
-#define TPS6507X_REG_PGOOD 0X0B
-#define TPS6507X_REG_PGOODMASK 0X0C
-#define TPS6507X_REG_CON_CTRL1 0X0D
-#define TPS6507X_REG_CON_CTRL2 0X0E
-#define TPS6507X_REG_CON_CTRL3 0X0F
-#define TPS6507X_REG_DEFDCDC1 0X10
-#define TPS6507X_REG_DEFDCDC2_LOW 0X11
-#define TPS6507X_REG_DEFDCDC2_HIGH 0X12
-#define TPS6507X_REG_DEFDCDC3_LOW 0X13
-#define TPS6507X_REG_DEFDCDC3_HIGH 0X14
-#define TPS6507X_REG_DEFSLEW 0X15
-#define TPS6507X_REG_LDO_CTRL1 0X16
-#define TPS6507X_REG_DEFLDO2 0X17
-#define TPS6507X_REG_WLED_CTRL1 0X18
-#define TPS6507X_REG_WLED_CTRL2 0X19
-
-/* CON_CTRL1 bitfields */
-#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
-#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
-#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
-#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
-#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
-
-/* DEFDCDC1 bitfields */
-#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7)
-#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F
-
-/* DEFDCDC2_LOW bitfields */
-#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F
-
-/* DEFDCDC2_HIGH bitfields */
-#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F
-
-/* DEFDCDC3_LOW bitfields */
-#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F
-
-/* DEFDCDC3_HIGH bitfields */
-#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F
-
-/* TPS6507X_REG_LDO_CTRL1 bitfields */
-#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F
-
-/* TPS6507X_REG_DEFLDO2 bitfields */
-#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F
-
-/* VDCDC MASK */
-#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F
+#include <linux/mfd/tps6507x.h>
/* DCDC's */
#define TPS6507X_DCDC_1 0
@@ -162,101 +103,146 @@ struct tps_info {
const u16 *table;
};
-struct tps_pmic {
+static const struct tps_info tps6507x_pmic_regs[] = {
+ {
+ .name = "VDCDC1",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "VDCDC2",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "VDCDC3",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "LDO1",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(LDO1_VSEL_table),
+ .table = LDO1_VSEL_table,
+ },
+ {
+ .name = "LDO2",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(LDO2_VSEL_table),
+ .table = LDO2_VSEL_table,
+ },
+};
+
+struct tps6507x_pmic {
struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
- struct i2c_client *client;
+ struct tps6507x_dev *mfd;
struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
const struct tps_info *info[TPS6507X_NUM_REGULATOR];
struct mutex io_lock;
};
-
-static inline int tps_6507x_read(struct tps_pmic *tps, u8 reg)
+static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg)
{
- return i2c_smbus_read_byte_data(tps->client, reg);
+ u8 val;
+ int err;
+
+ err = tps->mfd->read_dev(tps->mfd, reg, 1, &val);
+
+ if (err)
+ return err;
+
+ return val;
}
-static inline int tps_6507x_write(struct tps_pmic *tps, u8 reg, u8 val)
+static inline int tps6507x_pmic_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
{
- return i2c_smbus_write_byte_data(tps->client, reg, val);
+ return tps->mfd->write_dev(tps->mfd, reg, 1, &val);
}
-static int tps_6507x_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
+static int tps6507x_pmic_set_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
{
int err, data;
mutex_lock(&tps->io_lock);
- data = tps_6507x_read(tps, reg);
+ data = tps6507x_pmic_read(tps, reg);
if (data < 0) {
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
err = data;
goto out;
}
data |= mask;
- err = tps_6507x_write(tps, reg, data);
+ err = tps6507x_pmic_write(tps, reg, data);
if (err)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
out:
mutex_unlock(&tps->io_lock);
return err;
}
-static int tps_6507x_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
+static int tps6507x_pmic_clear_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
{
int err, data;
mutex_lock(&tps->io_lock);
- data = tps_6507x_read(tps, reg);
+ data = tps6507x_pmic_read(tps, reg);
if (data < 0) {
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
err = data;
goto out;
}
data &= ~mask;
- err = tps_6507x_write(tps, reg, data);
+ err = tps6507x_pmic_write(tps, reg, data);
if (err)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
out:
mutex_unlock(&tps->io_lock);
return err;
}
-static int tps_6507x_reg_read(struct tps_pmic *tps, u8 reg)
+static int tps6507x_pmic_reg_read(struct tps6507x_pmic *tps, u8 reg)
{
int data;
mutex_lock(&tps->io_lock);
- data = tps_6507x_read(tps, reg);
+ data = tps6507x_pmic_read(tps, reg);
if (data < 0)
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
mutex_unlock(&tps->io_lock);
return data;
}
-static int tps_6507x_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
+static int tps6507x_pmic_reg_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
{
int err;
mutex_lock(&tps->io_lock);
- err = tps_6507x_write(tps, reg, val);
+ err = tps6507x_pmic_write(tps, reg, val);
if (err < 0)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
mutex_unlock(&tps->io_lock);
return err;
}
-static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
+static int tps6507x_pmic_dcdc_is_enabled(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, dcdc = rdev_get_id(dev);
u8 shift;
@@ -264,7 +250,7 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - dcdc;
- data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1);
+ data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1);
if (data < 0)
return data;
@@ -272,9 +258,9 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
return (data & 1<<shift) ? 1 : 0;
}
-static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
+static int tps6507x_pmic_ldo_is_enabled(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
u8 shift;
@@ -282,7 +268,7 @@ static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - ldo;
- data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1);
+ data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1);
if (data < 0)
return data;
@@ -290,9 +276,9 @@ static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
return (data & 1<<shift) ? 1 : 0;
}
-static int tps6507x_dcdc_enable(struct regulator_dev *dev)
+static int tps6507x_pmic_dcdc_enable(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
u8 shift;
@@ -300,12 +286,12 @@ static int tps6507x_dcdc_enable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - dcdc;
- return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+ return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
}
-static int tps6507x_dcdc_disable(struct regulator_dev *dev)
+static int tps6507x_pmic_dcdc_disable(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
u8 shift;
@@ -313,12 +299,13 @@ static int tps6507x_dcdc_disable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - dcdc;
- return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+ return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1,
+ 1 << shift);
}
-static int tps6507x_ldo_enable(struct regulator_dev *dev)
+static int tps6507x_pmic_ldo_enable(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev);
u8 shift;
@@ -326,12 +313,12 @@ static int tps6507x_ldo_enable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - ldo;
- return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+ return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
}
-static int tps6507x_ldo_disable(struct regulator_dev *dev)
+static int tps6507x_pmic_ldo_disable(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev);
u8 shift;
@@ -339,12 +326,13 @@ static int tps6507x_ldo_disable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - ldo;
- return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+ return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1,
+ 1 << shift);
}
-static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
+static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, dcdc = rdev_get_id(dev);
u8 reg;
@@ -362,7 +350,7 @@ static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
return -EINVAL;
}
- data = tps_6507x_reg_read(tps, reg);
+ data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
@@ -370,10 +358,10 @@ static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
return tps->info[dcdc]->table[data] * 1000;
}
-static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev,
+static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, dcdc = rdev_get_id(dev);
u8 reg;
@@ -411,19 +399,19 @@ static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[dcdc]->table_len)
return -EINVAL;
- data = tps_6507x_reg_read(tps, reg);
+ data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
data &= ~TPS6507X_DEFDCDCX_DCDC_MASK;
data |= vsel;
- return tps_6507x_reg_write(tps, reg, data);
+ return tps6507x_pmic_reg_write(tps, reg, data);
}
-static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
+static int tps6507x_pmic_ldo_get_voltage(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
u8 reg, mask;
@@ -437,7 +425,7 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
TPS6507X_REG_DEFLDO2_LDO2_MASK);
}
- data = tps_6507x_reg_read(tps, reg);
+ data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
@@ -445,10 +433,10 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
return tps->info[ldo]->table[data] * 1000;
}
-static int tps6507x_ldo_set_voltage(struct regulator_dev *dev,
+static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, ldo = rdev_get_id(dev);
u8 reg, mask;
@@ -479,20 +467,20 @@ static int tps6507x_ldo_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[ldo]->table_len)
return -EINVAL;
- data = tps_6507x_reg_read(tps, reg);
+ data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
data &= ~mask;
data |= vsel;
- return tps_6507x_reg_write(tps, reg, data);
+ return tps6507x_pmic_reg_write(tps, reg, data);
}
-static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev,
+static int tps6507x_pmic_dcdc_list_voltage(struct regulator_dev *dev,
unsigned selector)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3)
@@ -504,10 +492,10 @@ static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev,
return tps->info[dcdc]->table[selector] * 1000;
}
-static int tps6507x_ldo_list_voltage(struct regulator_dev *dev,
+static int tps6507x_pmic_ldo_list_voltage(struct regulator_dev *dev,
unsigned selector)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev);
if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
@@ -520,47 +508,54 @@ static int tps6507x_ldo_list_voltage(struct regulator_dev *dev,
}
/* Operations permitted on VDCDCx */
-static struct regulator_ops tps6507x_dcdc_ops = {
- .is_enabled = tps6507x_dcdc_is_enabled,
- .enable = tps6507x_dcdc_enable,
- .disable = tps6507x_dcdc_disable,
- .get_voltage = tps6507x_dcdc_get_voltage,
- .set_voltage = tps6507x_dcdc_set_voltage,
- .list_voltage = tps6507x_dcdc_list_voltage,
+static struct regulator_ops tps6507x_pmic_dcdc_ops = {
+ .is_enabled = tps6507x_pmic_dcdc_is_enabled,
+ .enable = tps6507x_pmic_dcdc_enable,
+ .disable = tps6507x_pmic_dcdc_disable,
+ .get_voltage = tps6507x_pmic_dcdc_get_voltage,
+ .set_voltage = tps6507x_pmic_dcdc_set_voltage,
+ .list_voltage = tps6507x_pmic_dcdc_list_voltage,
};
/* Operations permitted on LDOx */
-static struct regulator_ops tps6507x_ldo_ops = {
- .is_enabled = tps6507x_ldo_is_enabled,
- .enable = tps6507x_ldo_enable,
- .disable = tps6507x_ldo_disable,
- .get_voltage = tps6507x_ldo_get_voltage,
- .set_voltage = tps6507x_ldo_set_voltage,
- .list_voltage = tps6507x_ldo_list_voltage,
+static struct regulator_ops tps6507x_pmic_ldo_ops = {
+ .is_enabled = tps6507x_pmic_ldo_is_enabled,
+ .enable = tps6507x_pmic_ldo_enable,
+ .disable = tps6507x_pmic_ldo_disable,
+ .get_voltage = tps6507x_pmic_ldo_get_voltage,
+ .set_voltage = tps6507x_pmic_ldo_set_voltage,
+ .list_voltage = tps6507x_pmic_ldo_list_voltage,
};
-static int __devinit tps_6507x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static __devinit
+int tps6507x_pmic_probe(struct platform_device *pdev)
{
+ struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
static int desc_id;
- const struct tps_info *info = (void *)id->driver_data;
+ const struct tps_info *info = &tps6507x_pmic_regs[0];
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
- struct tps_pmic *tps;
+ struct tps6507x_pmic *tps;
+ struct tps6507x_board *tps_board;
int i;
int error;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA))
- return -EIO;
+ /**
+ * tps_board points to pmic related constants
+ * coming from the board-evm file.
+ */
+
+ tps_board = dev_get_platdata(tps6507x_dev->dev);
+ if (!tps_board)
+ return -EINVAL;
/**
* init_data points to array of regulator_init structures
* coming from the board-evm file.
*/
- init_data = client->dev.platform_data;
+ init_data = tps_board->tps6507x_pmic_init_data;
if (!init_data)
- return -EIO;
+ return -EINVAL;
tps = kzalloc(sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -569,7 +564,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
mutex_init(&tps->io_lock);
/* common for all regulators */
- tps->client = client;
+ tps->mfd = tps6507x_dev;
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
/* Register the regulators */
@@ -578,15 +573,16 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
tps->desc[i].id = desc_id++;
tps->desc[i].n_voltages = num_voltages[i];
tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
- &tps6507x_ldo_ops : &tps6507x_dcdc_ops);
+ &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
tps->desc[i].type = REGULATOR_VOLTAGE;
tps->desc[i].owner = THIS_MODULE;
rdev = regulator_register(&tps->desc[i],
- &client->dev, init_data, tps);
+ tps6507x_dev->dev, init_data, tps);
if (IS_ERR(rdev)) {
- dev_err(&client->dev, "failed to register %s\n",
- id->name);
+ dev_err(tps6507x_dev->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
error = PTR_ERR(rdev);
goto fail;
}
@@ -595,7 +591,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
tps->rdev[i] = rdev;
}
- i2c_set_clientdata(client, tps);
+ tps6507x_dev->pmic = tps;
return 0;
@@ -608,19 +604,17 @@ fail:
}
/**
- * tps_6507x_remove - TPS6507x driver i2c remove handler
+ * tps6507x_remove - TPS6507x driver i2c remove handler
* @client: i2c driver client device structure
*
* Unregister TPS driver as an i2c client device driver
*/
-static int __devexit tps_6507x_remove(struct i2c_client *client)
+static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
{
- struct tps_pmic *tps = i2c_get_clientdata(client);
+ struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
+ struct tps6507x_pmic *tps = tps6507x_dev->pmic;
int i;
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
-
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
regulator_unregister(tps->rdev[i]);
@@ -629,83 +623,38 @@ static int __devexit tps_6507x_remove(struct i2c_client *client)
return 0;
}
-static const struct tps_info tps6507x_regs[] = {
- {
- .name = "VDCDC1",
- .min_uV = 725000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
- .table = VDCDCx_VSEL_table,
- },
- {
- .name = "VDCDC2",
- .min_uV = 725000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
- .table = VDCDCx_VSEL_table,
- },
- {
- .name = "VDCDC3",
- .min_uV = 725000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
- .table = VDCDCx_VSEL_table,
- },
- {
- .name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(LDO1_VSEL_table),
- .table = LDO1_VSEL_table,
- },
- {
- .name = "LDO2",
- .min_uV = 725000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(LDO2_VSEL_table),
- .table = LDO2_VSEL_table,
- },
-};
-
-static const struct i2c_device_id tps_6507x_id[] = {
- {.name = "tps6507x",
- .driver_data = (unsigned long) tps6507x_regs,},
- { },
-};
-MODULE_DEVICE_TABLE(i2c, tps_6507x_id);
-
-static struct i2c_driver tps_6507x_i2c_driver = {
+static struct platform_driver tps6507x_pmic_driver = {
.driver = {
- .name = "tps6507x",
+ .name = "tps6507x-pmic",
.owner = THIS_MODULE,
},
- .probe = tps_6507x_probe,
- .remove = __devexit_p(tps_6507x_remove),
- .id_table = tps_6507x_id,
+ .probe = tps6507x_pmic_probe,
+ .remove = __devexit_p(tps6507x_pmic_remove),
};
/**
- * tps_6507x_init
+ * tps6507x_pmic_init
*
* Module init function
*/
-static int __init tps_6507x_init(void)
+static int __init tps6507x_pmic_init(void)
{
- return i2c_add_driver(&tps_6507x_i2c_driver);
+ return platform_driver_register(&tps6507x_pmic_driver);
}
-subsys_initcall(tps_6507x_init);
+subsys_initcall(tps6507x_pmic_init);
/**
- * tps_6507x_cleanup
+ * tps6507x_pmic_cleanup
*
* Module exit function
*/
-static void __exit tps_6507x_cleanup(void)
+static void __exit tps6507x_pmic_cleanup(void)
{
- i2c_del_driver(&tps_6507x_i2c_driver);
+ platform_driver_unregister(&tps6507x_pmic_driver);
}
-module_exit(tps_6507x_cleanup);
+module_exit(tps6507x_pmic_cleanup);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps6507x-pmic");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6a1303759432..50ac047cd136 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,6 +620,16 @@ config RTC_DRV_NUC900
comment "on-CPU RTC drivers"
+config RTC_DRV_DAVINCI
+ tristate "TI DaVinci RTC"
+ depends on ARCH_DAVINCI_DM365
+ help
+ If you say yes here you get support for the RTC on the
+ DaVinci platforms (DM365).
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-davinci.
+
config RTC_DRV_OMAP
tristate "TI OMAP1"
depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 44ef194a9573..245311a1348f 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
+obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index 4704aac2b5af..d26780ea254b 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
/* Clock rate in Hz */
#define AB3100_RTC_CLOCK_RATE 32768
@@ -45,7 +45,6 @@
*/
static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2,
AB3100_TI3, AB3100_TI4, AB3100_TI5};
unsigned char buf[6];
@@ -61,27 +60,26 @@ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs)
buf[5] = (fat_time >> 40) & 0xFF;
for (i = 0; i < 6; i++) {
- err = ab3100_set_register_interruptible(ab3100_data,
+ err = abx500_set_register_interruptible(dev, 0,
regs[i], buf[i]);
if (err)
return err;
}
/* Set the flag to mark that the clock is now set */
- return ab3100_mask_and_set_register_interruptible(ab3100_data,
+ return abx500_mask_and_set_register_interruptible(dev, 0,
AB3100_RTC,
- 0xFE, 0x01);
+ 0x01, 0x01);
}
static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
unsigned long time;
u8 rtcval;
int err;
- err = ab3100_get_register_interruptible(ab3100_data,
+ err = abx500_get_register_interruptible(dev, 0,
AB3100_RTC, &rtcval);
if (err)
return err;
@@ -94,7 +92,7 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
u8 buf[6];
/* Read out time registers */
- err = ab3100_get_register_page_interruptible(ab3100_data,
+ err = abx500_get_register_page_interruptible(dev, 0,
AB3100_TI0,
buf, 6);
if (err != 0)
@@ -114,7 +112,6 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
unsigned long time;
u64 fat_time;
u8 buf[6];
@@ -122,7 +119,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
int err;
/* Figure out if alarm is enabled or not */
- err = ab3100_get_register_interruptible(ab3100_data,
+ err = abx500_get_register_interruptible(dev, 0,
AB3100_RTC, &rtcval);
if (err)
return err;
@@ -133,7 +130,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
/* No idea how this could be represented */
alarm->pending = 0;
/* Read out alarm registers, only 4 bytes */
- err = ab3100_get_register_page_interruptible(ab3100_data,
+ err = abx500_get_register_page_interruptible(dev, 0,
AB3100_AL0, buf, 4);
if (err)
return err;
@@ -148,7 +145,6 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3};
unsigned char buf[4];
unsigned long secs;
@@ -165,21 +161,19 @@ static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
/* Set the alarm */
for (i = 0; i < 4; i++) {
- err = ab3100_set_register_interruptible(ab3100_data,
+ err = abx500_set_register_interruptible(dev, 0,
regs[i], buf[i]);
if (err)
return err;
}
/* Then enable the alarm */
- return ab3100_mask_and_set_register_interruptible(ab3100_data,
- AB3100_RTC, ~(1 << 2),
+ return abx500_mask_and_set_register_interruptible(dev, 0,
+ AB3100_RTC, (1 << 2),
alarm->enabled << 2);
}
static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
-
/*
* It's not possible to enable/disable the alarm IRQ for this RTC.
* It does not actually trigger any IRQ: instead its only function is
@@ -188,12 +182,12 @@ static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
* and need to be handled there instead.
*/
if (enabled)
- return ab3100_mask_and_set_register_interruptible(ab3100_data,
- AB3100_RTC, ~(1 << 2),
+ return abx500_mask_and_set_register_interruptible(dev, 0,
+ AB3100_RTC, (1 << 2),
1 << 2);
else
- return ab3100_mask_and_set_register_interruptible(ab3100_data,
- AB3100_RTC, ~(1 << 2),
+ return abx500_mask_and_set_register_interruptible(dev, 0,
+ AB3100_RTC, (1 << 2),
0);
}
@@ -210,10 +204,9 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
int err;
u8 regval;
struct rtc_device *rtc;
- struct ab3100 *ab3100_data = platform_get_drvdata(pdev);
/* The first RTC register needs special treatment */
- err = ab3100_get_register_interruptible(ab3100_data,
+ err = abx500_get_register_interruptible(&pdev->dev, 0,
AB3100_RTC, &regval);
if (err) {
dev_err(&pdev->dev, "unable to read RTC register\n");
@@ -231,7 +224,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
* This bit remains until RTC power is lost.
*/
regval = 1 | RTC_SETTING;
- err = ab3100_set_register_interruptible(ab3100_data,
+ err = abx500_set_register_interruptible(&pdev->dev, 0,
AB3100_RTC, regval);
/* Ignore any error on this write */
}
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e9aa814ddd23..4a92fc24787f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -238,31 +238,32 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
rtc_control = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
- /* REVISIT this assumes PC style usage: always BCD */
-
- if (((unsigned)t->time.tm_sec) < 0x60)
- t->time.tm_sec = bcd2bin(t->time.tm_sec);
- else
- t->time.tm_sec = -1;
- if (((unsigned)t->time.tm_min) < 0x60)
- t->time.tm_min = bcd2bin(t->time.tm_min);
- else
- t->time.tm_min = -1;
- if (((unsigned)t->time.tm_hour) < 0x24)
- t->time.tm_hour = bcd2bin(t->time.tm_hour);
- else
- t->time.tm_hour = -1;
-
- if (cmos->day_alrm) {
- if (((unsigned)t->time.tm_mday) <= 0x31)
- t->time.tm_mday = bcd2bin(t->time.tm_mday);
+ if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ if (((unsigned)t->time.tm_sec) < 0x60)
+ t->time.tm_sec = bcd2bin(t->time.tm_sec);
else
- t->time.tm_mday = -1;
- if (cmos->mon_alrm) {
- if (((unsigned)t->time.tm_mon) <= 0x12)
- t->time.tm_mon = bcd2bin(t->time.tm_mon) - 1;
+ t->time.tm_sec = -1;
+ if (((unsigned)t->time.tm_min) < 0x60)
+ t->time.tm_min = bcd2bin(t->time.tm_min);
+ else
+ t->time.tm_min = -1;
+ if (((unsigned)t->time.tm_hour) < 0x24)
+ t->time.tm_hour = bcd2bin(t->time.tm_hour);
+ else
+ t->time.tm_hour = -1;
+
+ if (cmos->day_alrm) {
+ if (((unsigned)t->time.tm_mday) <= 0x31)
+ t->time.tm_mday = bcd2bin(t->time.tm_mday);
else
- t->time.tm_mon = -1;
+ t->time.tm_mday = -1;
+
+ if (cmos->mon_alrm) {
+ if (((unsigned)t->time.tm_mon) <= 0x12)
+ t->time.tm_mon = bcd2bin(t->time.tm_mon)-1;
+ else
+ t->time.tm_mon = -1;
+ }
}
}
t->time.tm_year = -1;
@@ -322,29 +323,26 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
- unsigned char mon, mday, hrs, min, sec;
+ unsigned char mon, mday, hrs, min, sec, rtc_control;
if (!is_valid_irq(cmos->irq))
return -EIO;
- /* REVISIT this assumes PC style usage: always BCD */
-
- /* Writing 0xff means "don't care" or "match all". */
-
mon = t->time.tm_mon + 1;
- mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
-
mday = t->time.tm_mday;
- mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
-
hrs = t->time.tm_hour;
- hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
-
min = t->time.tm_min;
- min = (min < 60) ? bin2bcd(min) : 0xff;
-
sec = t->time.tm_sec;
- sec = (sec < 60) ? bin2bcd(sec) : 0xff;
+
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ /* Writing 0xff means "don't care" or "match all". */
+ mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
+ mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
+ hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
+ min = (min < 60) ? bin2bcd(min) : 0xff;
+ sec = (sec < 60) ? bin2bcd(sec) : 0xff;
+ }
spin_lock_irq(&rtc_lock);
@@ -478,7 +476,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
"update_IRQ\t: %s\n"
"HPET_emulated\t: %s\n"
// "square_wave\t: %s\n"
- // "BCD\t\t: %s\n"
+ "BCD\t\t: %s\n"
"DST_enable\t: %s\n"
"periodic_freq\t: %d\n"
"batt_status\t: %s\n",
@@ -486,7 +484,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
(rtc_control & RTC_UIE) ? "yes" : "no",
is_hpet_enabled() ? "yes" : "no",
// (rtc_control & RTC_SQWE) ? "yes" : "no",
- // (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
+ (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
(rtc_control & RTC_DST_EN) ? "yes" : "no",
cmos->rtc->irq_freq,
(valid & RTC_VRT) ? "okay" : "dead");
@@ -749,12 +747,11 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
spin_unlock_irq(&rtc_lock);
- /* FIXME teach the alarm code how to handle binary mode;
+ /* FIXME:
* <asm-generic/rtc.h> doesn't know 12-hour mode either.
*/
- if (is_valid_irq(rtc_irq) &&
- (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))) {
- dev_dbg(dev, "only 24-hr BCD mode supported\n");
+ if (is_valid_irq(rtc_irq) && !(rtc_control & RTC_24H)) {
+ dev_warn(dev, "only 24-hr supported\n");
retval = -ENXIO;
goto cleanup1;
}
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
new file mode 100644
index 000000000000..34647fc1ee98
--- /dev/null
+++ b/drivers/rtc/rtc-davinci.c
@@ -0,0 +1,674 @@
+/*
+ * DaVinci Power Management and Real Time Clock Driver for TI platforms
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+/*
+ * The DaVinci RTC is a simple RTC with the following
+ * Sec: 0 - 59 : BCD count
+ * Min: 0 - 59 : BCD count
+ * Hour: 0 - 23 : BCD count
+ * Day: 0 - 0x7FFF(32767) : Binary count ( Over 89 years )
+ */
+
+/* PRTC interface registers */
+#define DAVINCI_PRTCIF_PID 0x00
+#define PRTCIF_CTLR 0x04
+#define PRTCIF_LDATA 0x08
+#define PRTCIF_UDATA 0x0C
+#define PRTCIF_INTEN 0x10
+#define PRTCIF_INTFLG 0x14
+
+/* PRTCIF_CTLR bit fields */
+#define PRTCIF_CTLR_BUSY BIT(31)
+#define PRTCIF_CTLR_SIZE BIT(25)
+#define PRTCIF_CTLR_DIR BIT(24)
+#define PRTCIF_CTLR_BENU_MSB BIT(23)
+#define PRTCIF_CTLR_BENU_3RD_BYTE BIT(22)
+#define PRTCIF_CTLR_BENU_2ND_BYTE BIT(21)
+#define PRTCIF_CTLR_BENU_LSB BIT(20)
+#define PRTCIF_CTLR_BENU_MASK (0x00F00000)
+#define PRTCIF_CTLR_BENL_MSB BIT(19)
+#define PRTCIF_CTLR_BENL_3RD_BYTE BIT(18)
+#define PRTCIF_CTLR_BENL_2ND_BYTE BIT(17)
+#define PRTCIF_CTLR_BENL_LSB BIT(16)
+#define PRTCIF_CTLR_BENL_MASK (0x000F0000)
+
+/* PRTCIF_INTEN bit fields */
+#define PRTCIF_INTEN_RTCSS BIT(1)
+#define PRTCIF_INTEN_RTCIF BIT(0)
+#define PRTCIF_INTEN_MASK (PRTCIF_INTEN_RTCSS \
+ | PRTCIF_INTEN_RTCIF)
+
+/* PRTCIF_INTFLG bit fields */
+#define PRTCIF_INTFLG_RTCSS BIT(1)
+#define PRTCIF_INTFLG_RTCIF BIT(0)
+#define PRTCIF_INTFLG_MASK (PRTCIF_INTFLG_RTCSS \
+ | PRTCIF_INTFLG_RTCIF)
+
+/* PRTC subsystem registers */
+#define PRTCSS_RTC_INTC_EXTENA1 (0x0C)
+#define PRTCSS_RTC_CTRL (0x10)
+#define PRTCSS_RTC_WDT (0x11)
+#define PRTCSS_RTC_TMR0 (0x12)
+#define PRTCSS_RTC_TMR1 (0x13)
+#define PRTCSS_RTC_CCTRL (0x14)
+#define PRTCSS_RTC_SEC (0x15)
+#define PRTCSS_RTC_MIN (0x16)
+#define PRTCSS_RTC_HOUR (0x17)
+#define PRTCSS_RTC_DAY0 (0x18)
+#define PRTCSS_RTC_DAY1 (0x19)
+#define PRTCSS_RTC_AMIN (0x1A)
+#define PRTCSS_RTC_AHOUR (0x1B)
+#define PRTCSS_RTC_ADAY0 (0x1C)
+#define PRTCSS_RTC_ADAY1 (0x1D)
+#define PRTCSS_RTC_CLKC_CNT (0x20)
+
+/* PRTCSS_RTC_INTC_EXTENA1 */
+#define PRTCSS_RTC_INTC_EXTENA1_MASK (0x07)
+
+/* PRTCSS_RTC_CTRL bit fields */
+#define PRTCSS_RTC_CTRL_WDTBUS BIT(7)
+#define PRTCSS_RTC_CTRL_WEN BIT(6)
+#define PRTCSS_RTC_CTRL_WDRT BIT(5)
+#define PRTCSS_RTC_CTRL_WDTFLG BIT(4)
+#define PRTCSS_RTC_CTRL_TE BIT(3)
+#define PRTCSS_RTC_CTRL_TIEN BIT(2)
+#define PRTCSS_RTC_CTRL_TMRFLG BIT(1)
+#define PRTCSS_RTC_CTRL_TMMD BIT(0)
+
+/* PRTCSS_RTC_CCTRL bit fields */
+#define PRTCSS_RTC_CCTRL_CALBUSY BIT(7)
+#define PRTCSS_RTC_CCTRL_DAEN BIT(5)
+#define PRTCSS_RTC_CCTRL_HAEN BIT(4)
+#define PRTCSS_RTC_CCTRL_MAEN BIT(3)
+#define PRTCSS_RTC_CCTRL_ALMFLG BIT(2)
+#define PRTCSS_RTC_CCTRL_AIEN BIT(1)
+#define PRTCSS_RTC_CCTRL_CAEN BIT(0)
+
+static DEFINE_SPINLOCK(davinci_rtc_lock);
+
+struct davinci_rtc {
+ struct rtc_device *rtc;
+ void __iomem *base;
+ resource_size_t pbase;
+ size_t base_size;
+ int irq;
+};
+
+static inline void rtcif_write(struct davinci_rtc *davinci_rtc,
+ u32 val, u32 addr)
+{
+ writel(val, davinci_rtc->base + addr);
+}
+
+static inline u32 rtcif_read(struct davinci_rtc *davinci_rtc, u32 addr)
+{
+ return readl(davinci_rtc->base + addr);
+}
+
+static inline void rtcif_wait(struct davinci_rtc *davinci_rtc)
+{
+ while (rtcif_read(davinci_rtc, PRTCIF_CTLR) & PRTCIF_CTLR_BUSY)
+ cpu_relax();
+}
+
+static inline void rtcss_write(struct davinci_rtc *davinci_rtc,
+ unsigned long val, u8 addr)
+{
+ rtcif_wait(davinci_rtc);
+
+ rtcif_write(davinci_rtc, PRTCIF_CTLR_BENL_LSB | addr, PRTCIF_CTLR);
+ rtcif_write(davinci_rtc, val, PRTCIF_LDATA);
+
+ rtcif_wait(davinci_rtc);
+}
+
+static inline u8 rtcss_read(struct davinci_rtc *davinci_rtc, u8 addr)
+{
+ rtcif_wait(davinci_rtc);
+
+ rtcif_write(davinci_rtc, PRTCIF_CTLR_DIR | PRTCIF_CTLR_BENL_LSB | addr,
+ PRTCIF_CTLR);
+
+ rtcif_wait(davinci_rtc);
+
+ return rtcif_read(davinci_rtc, PRTCIF_LDATA);
+}
+
+static inline void davinci_rtcss_calendar_wait(struct davinci_rtc *davinci_rtc)
+{
+ while (rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
+ PRTCSS_RTC_CCTRL_CALBUSY)
+ cpu_relax();
+}
+
+static irqreturn_t davinci_rtc_interrupt(int irq, void *class_dev)
+{
+ struct davinci_rtc *davinci_rtc = class_dev;
+ unsigned long events = 0;
+ u32 irq_flg;
+ u8 alm_irq, tmr_irq;
+ u8 rtc_ctrl, rtc_cctrl;
+ int ret = IRQ_NONE;
+
+ irq_flg = rtcif_read(davinci_rtc, PRTCIF_INTFLG) &
+ PRTCIF_INTFLG_RTCSS;
+
+ alm_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
+ PRTCSS_RTC_CCTRL_ALMFLG;
+
+ tmr_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL) &
+ PRTCSS_RTC_CTRL_TMRFLG;
+
+ if (irq_flg) {
+ if (alm_irq) {
+ events |= RTC_IRQF | RTC_AF;
+ rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
+ rtc_cctrl |= PRTCSS_RTC_CCTRL_ALMFLG;
+ rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
+ } else if (tmr_irq) {
+ events |= RTC_IRQF | RTC_PF;
+ rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
+ rtc_ctrl |= PRTCSS_RTC_CTRL_TMRFLG;
+ rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
+ }
+
+ rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS,
+ PRTCIF_INTFLG);
+ rtc_update_irq(davinci_rtc->rtc, 1, events);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int
+davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ u8 rtc_ctrl;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
+
+ switch (cmd) {
+ case RTC_WIE_ON:
+ rtc_ctrl |= PRTCSS_RTC_CTRL_WEN | PRTCSS_RTC_CTRL_WDTFLG;
+ break;
+ case RTC_WIE_OFF:
+ rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN;
+ break;
+ case RTC_UIE_OFF:
+ case RTC_UIE_ON:
+ ret = -ENOTTY;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return ret;
+}
+
+static int convertfromdays(u16 days, struct rtc_time *tm)
+{
+ int tmp_days, year, mon;
+
+ for (year = 2000;; year++) {
+ tmp_days = rtc_year_days(1, 12, year);
+ if (days >= tmp_days)
+ days -= tmp_days;
+ else {
+ for (mon = 0;; mon++) {
+ tmp_days = rtc_month_days(mon, year);
+ if (days >= tmp_days) {
+ days -= tmp_days;
+ } else {
+ tm->tm_year = year - 1900;
+ tm->tm_mon = mon;
+ tm->tm_mday = days + 1;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+static int convert2days(u16 *days, struct rtc_time *tm)
+{
+ int i;
+ *days = 0;
+
+ /* epoch == 1900 */
+ if (tm->tm_year < 100 || tm->tm_year > 199)
+ return -EINVAL;
+
+ for (i = 2000; i < 1900 + tm->tm_year; i++)
+ *days += rtc_year_days(1, 12, i);
+
+ *days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year);
+
+ return 0;
+}
+
+static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ u16 days = 0;
+ u8 day0, day1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ tm->tm_sec = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_SEC));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ tm->tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_MIN));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ tm->tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_HOUR));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY0);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY1);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ days |= day1;
+ days <<= 8;
+ days |= day0;
+
+ if (convertfromdays(days, tm) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ u16 days;
+ u8 rtc_cctrl;
+ unsigned long flags;
+
+ if (convert2days(&days, tm) < 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(tm->tm_sec), PRTCSS_RTC_SEC);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(tm->tm_min), PRTCSS_RTC_MIN);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(tm->tm_hour), PRTCSS_RTC_HOUR);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_DAY0);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_DAY1);
+
+ rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
+ rtc_cctrl |= PRTCSS_RTC_CCTRL_CAEN;
+ rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static int davinci_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+ u8 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ if (enabled)
+ rtc_cctrl |= PRTCSS_RTC_CCTRL_DAEN |
+ PRTCSS_RTC_CCTRL_HAEN |
+ PRTCSS_RTC_CCTRL_MAEN |
+ PRTCSS_RTC_CCTRL_ALMFLG |
+ PRTCSS_RTC_CCTRL_AIEN;
+ else
+ rtc_cctrl &= ~PRTCSS_RTC_CCTRL_AIEN;
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ u16 days = 0;
+ u8 day0, day1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ alm->time.tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AMIN));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ alm->time.tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AHOUR));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY0);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY1);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+ days |= day1;
+ days <<= 8;
+ days |= day0;
+
+ if (convertfromdays(days, &alm->time) < 0)
+ return -EINVAL;
+
+ alm->pending = !!(rtcss_read(davinci_rtc,
+ PRTCSS_RTC_CCTRL) &
+ PRTCSS_RTC_CCTRL_AIEN);
+ alm->enabled = alm->pending && device_may_wakeup(dev);
+
+ return 0;
+}
+
+static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+ u16 days;
+
+ if (alm->time.tm_mday <= 0 && alm->time.tm_mon < 0
+ && alm->time.tm_year < 0) {
+ struct rtc_time tm;
+ unsigned long now, then;
+
+ davinci_rtc_read_time(dev, &tm);
+ rtc_tm_to_time(&tm, &now);
+
+ alm->time.tm_mday = tm.tm_mday;
+ alm->time.tm_mon = tm.tm_mon;
+ alm->time.tm_year = tm.tm_year;
+ rtc_tm_to_time(&alm->time, &then);
+
+ if (then < now) {
+ rtc_time_to_tm(now + 24 * 60 * 60, &tm);
+ alm->time.tm_mday = tm.tm_mday;
+ alm->time.tm_mon = tm.tm_mon;
+ alm->time.tm_year = tm.tm_year;
+ }
+ }
+
+ if (convert2days(&days, &alm->time) < 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_min), PRTCSS_RTC_AMIN);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_hour), PRTCSS_RTC_AHOUR);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_ADAY0);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_ADAY1);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static int davinci_rtc_irq_set_state(struct device *dev, int enabled)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+ u8 rtc_ctrl;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
+
+ if (enabled) {
+ while (rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL)
+ & PRTCSS_RTC_CTRL_WDTBUS)
+ cpu_relax();
+
+ rtc_ctrl |= PRTCSS_RTC_CTRL_TE;
+ rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
+
+ rtcss_write(davinci_rtc, 0x0, PRTCSS_RTC_CLKC_CNT);
+
+ rtc_ctrl |= PRTCSS_RTC_CTRL_TIEN |
+ PRTCSS_RTC_CTRL_TMMD |
+ PRTCSS_RTC_CTRL_TMRFLG;
+ } else
+ rtc_ctrl &= ~PRTCSS_RTC_CTRL_TIEN;
+
+ rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static int davinci_rtc_irq_set_freq(struct device *dev, int freq)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+ u16 tmr_counter = (0x8000 >> (ffs(freq) - 1));
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ rtcss_write(davinci_rtc, tmr_counter & 0xFF, PRTCSS_RTC_TMR0);
+ rtcss_write(davinci_rtc, (tmr_counter & 0xFF00) >> 8, PRTCSS_RTC_TMR1);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static struct rtc_class_ops davinci_rtc_ops = {
+ .ioctl = davinci_rtc_ioctl,
+ .read_time = davinci_rtc_read_time,
+ .set_time = davinci_rtc_set_time,
+ .alarm_irq_enable = davinci_rtc_alarm_irq_enable,
+ .read_alarm = davinci_rtc_read_alarm,
+ .set_alarm = davinci_rtc_set_alarm,
+ .irq_set_state = davinci_rtc_irq_set_state,
+ .irq_set_freq = davinci_rtc_irq_set_freq,
+};
+
+static int __init davinci_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct davinci_rtc *davinci_rtc;
+ struct resource *res, *mem;
+ int ret = 0;
+
+ davinci_rtc = kzalloc(sizeof(struct davinci_rtc), GFP_KERNEL);
+ if (!davinci_rtc) {
+ dev_dbg(dev, "could not allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ davinci_rtc->irq = platform_get_irq(pdev, 0);
+ if (davinci_rtc->irq < 0) {
+ dev_err(dev, "no RTC irq\n");
+ ret = davinci_rtc->irq;
+ goto fail1;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no mem resource\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ davinci_rtc->pbase = res->start;
+ davinci_rtc->base_size = resource_size(res);
+
+ mem = request_mem_region(davinci_rtc->pbase, davinci_rtc->base_size,
+ pdev->name);
+ if (!mem) {
+ dev_err(dev, "RTC registers at %08x are not free\n",
+ davinci_rtc->pbase);
+ ret = -EBUSY;
+ goto fail1;
+ }
+
+ davinci_rtc->base = ioremap(davinci_rtc->pbase, davinci_rtc->base_size);
+ if (!davinci_rtc->base) {
+ dev_err(dev, "unable to ioremap MEM resource\n");
+ ret = -ENOMEM;
+ goto fail2;
+ }
+
+ davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &davinci_rtc_ops, THIS_MODULE);
+ if (IS_ERR(davinci_rtc->rtc)) {
+ dev_err(dev, "unable to register RTC device, err %ld\n",
+ PTR_ERR(davinci_rtc->rtc));
+ goto fail3;
+ }
+
+ rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
+ rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
+ rtcss_write(davinci_rtc, 0, PRTCSS_RTC_INTC_EXTENA1);
+
+ rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CTRL);
+ rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CCTRL);
+
+ ret = request_irq(davinci_rtc->irq, davinci_rtc_interrupt,
+ IRQF_DISABLED, "davinci_rtc", davinci_rtc);
+ if (ret < 0) {
+ dev_err(dev, "unable to register davinci RTC interrupt\n");
+ goto fail4;
+ }
+
+ /* Enable interrupts */
+ rtcif_write(davinci_rtc, PRTCIF_INTEN_RTCSS, PRTCIF_INTEN);
+ rtcss_write(davinci_rtc, PRTCSS_RTC_INTC_EXTENA1_MASK,
+ PRTCSS_RTC_INTC_EXTENA1);
+
+ rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL);
+
+ platform_set_drvdata(pdev, davinci_rtc);
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ return 0;
+
+fail4:
+ rtc_device_unregister(davinci_rtc->rtc);
+fail3:
+ iounmap(davinci_rtc->base);
+fail2:
+ release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
+fail1:
+ kfree(davinci_rtc);
+
+ return ret;
+}
+
+static int __devexit davinci_rtc_remove(struct platform_device *pdev)
+{
+ struct davinci_rtc *davinci_rtc = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
+
+ free_irq(davinci_rtc->irq, davinci_rtc);
+
+ rtc_device_unregister(davinci_rtc->rtc);
+
+ iounmap(davinci_rtc->base);
+ release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(davinci_rtc);
+
+ return 0;
+}
+
+static struct platform_driver davinci_rtc_driver = {
+ .probe = davinci_rtc_probe,
+ .remove = __devexit_p(davinci_rtc_remove),
+ .driver = {
+ .name = "rtc_davinci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init rtc_init(void)
+{
+ return platform_driver_probe(&davinci_rtc_driver, davinci_rtc_probe);
+}
+module_init(rtc_init);
+
+static void __exit rtc_exit(void)
+{
+ platform_driver_unregister(&davinci_rtc_driver);
+}
+module_exit(rtc_exit);
+
+MODULE_AUTHOR("Miguel Aguilar <miguel.aguilar@ridgerun.com>");
+MODULE_DESCRIPTION("Texas Instruments DaVinci PRTC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 64d9727b7229..73377b0d65da 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -34,7 +34,8 @@
* Board-specific wiring options include using split power mode with
* RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset),
* and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from
- * low power modes). See the BOARD-SPECIFIC CUSTOMIZATION comment.
+ * low power modes) for OMAP1 boards (OMAP-L138 has this built into
+ * the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment.
*/
#define OMAP_RTC_BASE 0xfffb4800
@@ -401,16 +402,17 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
/* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
*
- * - Boards wired so that RTC_WAKE_INT does something, and muxed
- * right (W13_1610_RTC_WAKE_INT is the default after chip reset),
- * should initialize the device wakeup flag appropriately.
+ * - Device wake-up capability setting should come through chip
+ * init logic. OMAP1 boards should initialize the "wakeup capable"
+ * flag in the platform device if the board is wired right for
+ * being woken up by RTC alarm. For OMAP-L138, this capability
+ * is built into the SoC by the "Deep Sleep" capability.
*
* - Boards wired so RTC_ON_nOFF is used as the reset signal,
* rather than nPWRON_RESET, should forcibly enable split
* power mode. (Some chip errata report that RTC_CTRL_SPLIT
* is write-only, and always reads as zero...)
*/
- device_init_wakeup(&pdev->dev, 0);
if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT)
pr_info("%s: split power mode\n", pdev->name);
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index c9522f3bc21c..9718aaaa8215 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -1,8 +1,8 @@
/*
* An I2C driver for the Epson RX8581 RTC
*
- * Author: Martyn Welch <martyn.welch@gefanuc.com>
- * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -272,7 +272,7 @@ static void __exit rx8581_exit(void)
i2c_del_driver(&rx8581_driver);
}
-MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com>");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_DESCRIPTION("Epson RX-8581 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 875ba099e7a5..b53a00198dbe 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -1,7 +1,7 @@
/*
* A RTC driver for the Simtek STK17TA8
*
- * By Thomas Hommel <thomas.hommel@gefanuc.com>
+ * By Thomas Hommel <thomas.hommel@ge.com>
*
* Based on the DS1553 driver from
* Atsushi Nemoto <anemo@mba.ocn.ne.jp>
@@ -382,7 +382,7 @@ static __exit void stk17ta8_exit(void)
module_init(stk17ta8_init);
module_exit(stk17ta8_exit);
-MODULE_AUTHOR("Thomas Hommel <thomas.hommel@gefanuc.com>");
+MODULE_AUTHOR("Thomas Hommel <thomas.hommel@ge.com>");
MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index acf222f91f5a..0e86247d791e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,9 @@
*/
#define DASD_CHANQ_MAX_SIZE 4
+#define DASD_SLEEPON_START_TAG (void *) 1
+#define DASD_SLEEPON_END_TAG (void *) 2
+
/*
* SECTION: exported variables of dasd.c
*/
@@ -62,6 +65,7 @@ static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
+static void do_reload_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
@@ -112,6 +116,7 @@ struct dasd_device *dasd_alloc_device(void)
device->timer.data = (unsigned long) device;
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
+ INIT_WORK(&device->reload_device, do_reload_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
@@ -518,6 +523,26 @@ void dasd_kick_device(struct dasd_device *device)
}
/*
+ * dasd_reload_device will schedule a call do do_reload_device to the kernel
+ * event daemon.
+ */
+static void do_reload_device(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ reload_device);
+ device->discipline->reload(device);
+ dasd_put_device(device);
+}
+
+void dasd_reload_device(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_reload_device to the kernel event daemon. */
+ schedule_work(&device->reload_device);
+}
+EXPORT_SYMBOL(dasd_reload_device);
+
+/*
* dasd_restore_device will schedule a call do do_restore_device to the kernel
* event daemon.
*/
@@ -1472,7 +1497,10 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
*/
static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
{
- wake_up((wait_queue_head_t *) data);
+ spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+ cqr->callback_data = DASD_SLEEPON_END_TAG;
+ spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+ wake_up(&generic_waitq);
}
static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
@@ -1482,10 +1510,7 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
device = cqr->startdev;
spin_lock_irq(get_ccwdev_lock(device->cdev));
- rc = ((cqr->status == DASD_CQR_DONE ||
- cqr->status == DASD_CQR_NEED_ERP ||
- cqr->status == DASD_CQR_TERMINATED) &&
- list_empty(&cqr->devlist));
+ rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
@@ -1573,7 +1598,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
wait_event(generic_waitq, !(device->stopped));
cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &generic_waitq;
+ cqr->callback_data = DASD_SLEEPON_START_TAG;
dasd_add_request_tail(cqr);
if (interruptible) {
rc = wait_event_interruptible(
@@ -1652,7 +1677,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
}
cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &generic_waitq;
+ cqr->callback_data = DASD_SLEEPON_START_TAG;
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 6632649dd6aa..85bfd8794856 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1418,9 +1418,29 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
struct dasd_ccw_req *erp)
{
struct dasd_ccw_req *cqr = erp->refers;
+ char *sense;
if (cqr->block &&
(cqr->block->base != cqr->startdev)) {
+
+ sense = dasd_get_sense(&erp->refers->irb);
+ /*
+ * dynamic pav may have changed base alias mapping
+ */
+ if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
+ && (sense[0] == 0x10) && (sense[7] == 0x0F)
+ && (sense[8] == 0x67)) {
+ /*
+ * remove device from alias handling to prevent new
+ * requests from being scheduled on the
+ * wrong alias device
+ */
+ dasd_alias_remove_device(cqr->startdev);
+
+ /* schedule worker to reload device */
+ dasd_reload_device(cqr->startdev);
+ }
+
if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
"ERP on alias device for request %p,"
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 8c4814258e93..cd09aacd258c 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -190,20 +190,21 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
int is_lcu_known;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+
+ device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&aliastree.lock, flags);
is_lcu_known = 1;
- server = _find_server(uid);
+ server = _find_server(&uid);
if (!server) {
spin_unlock_irqrestore(&aliastree.lock, flags);
- newserver = _allocate_server(uid);
+ newserver = _allocate_server(&uid);
if (IS_ERR(newserver))
return PTR_ERR(newserver);
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (!server) {
list_add(&newserver->server, &aliastree.serverlist);
server = newserver;
@@ -214,14 +215,14 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
}
}
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
if (!lcu) {
spin_unlock_irqrestore(&aliastree.lock, flags);
- newlcu = _allocate_lcu(uid);
+ newlcu = _allocate_lcu(&uid);
if (IS_ERR(newlcu))
return PTR_ERR(newlcu);
spin_lock_irqsave(&aliastree.lock, flags);
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
if (!lcu) {
list_add(&newlcu->lcu, &server->lculist);
lcu = newlcu;
@@ -256,20 +257,20 @@ void dasd_alias_lcu_setup_complete(struct dasd_device *device)
unsigned long flags;
struct alias_server *server;
struct alias_lcu *lcu;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+ device->discipline->get_uid(device, &uid);
lcu = NULL;
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (server)
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
spin_unlock_irqrestore(&aliastree.lock, flags);
if (!lcu) {
DBF_EVENT_DEVID(DBF_ERR, device->cdev,
"could not find lcu for %04x %02x",
- uid->ssid, uid->real_unit_addr);
+ uid.ssid, uid.real_unit_addr);
WARN_ON(1);
return;
}
@@ -282,20 +283,20 @@ void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
unsigned long flags;
struct alias_server *server;
struct alias_lcu *lcu;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+ device->discipline->get_uid(device, &uid);
lcu = NULL;
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (server)
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
spin_unlock_irqrestore(&aliastree.lock, flags);
if (!lcu) {
DBF_EVENT_DEVID(DBF_ERR, device->cdev,
"could not find lcu for %04x %02x",
- uid->ssid, uid->real_unit_addr);
+ uid.ssid, uid.real_unit_addr);
WARN_ON(1);
return;
}
@@ -314,9 +315,11 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
struct alias_lcu *lcu;
struct alias_server *server;
int was_pending;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
@@ -353,7 +356,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
_schedule_lcu_update(lcu, NULL);
spin_unlock(&lcu->lock);
}
- server = _find_server(&private->uid);
+ server = _find_server(&uid);
if (server && list_empty(&server->lculist)) {
list_del(&server->server);
_free_server(server);
@@ -366,19 +369,30 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
* in the lcu is up to date and will update the device uid before
* adding it to a pav group.
*/
+
static int _add_device_to_lcu(struct alias_lcu *lcu,
- struct dasd_device *device)
+ struct dasd_device *device,
+ struct dasd_device *pos)
{
struct dasd_eckd_private *private;
struct alias_pav_group *group;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
+ unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
- uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
- uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
- dasd_set_uid(device->cdev, &private->uid);
+
+ /* only lock if not allready locked */
+ if (device != pos)
+ spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
+ CDEV_NESTED_SECOND);
+ private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
+ private->uid.base_unit_addr =
+ lcu->uac->unit[private->uid.real_unit_addr].base_ua;
+ uid = private->uid;
+
+ if (device != pos)
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
@@ -386,25 +400,25 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
return 0;
}
- group = _find_group(lcu, uid);
+ group = _find_group(lcu, &uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
if (!group)
return -ENOMEM;
- memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
- memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
- group->uid.ssid = uid->ssid;
- if (uid->type == UA_BASE_DEVICE)
- group->uid.base_unit_addr = uid->real_unit_addr;
+ memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
+ memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
+ group->uid.ssid = uid.ssid;
+ if (uid.type == UA_BASE_DEVICE)
+ group->uid.base_unit_addr = uid.real_unit_addr;
else
- group->uid.base_unit_addr = uid->base_unit_addr;
- memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
+ group->uid.base_unit_addr = uid.base_unit_addr;
+ memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
INIT_LIST_HEAD(&group->group);
INIT_LIST_HEAD(&group->baselist);
INIT_LIST_HEAD(&group->aliaslist);
list_add(&group->group, &lcu->grouplist);
}
- if (uid->type == UA_BASE_DEVICE)
+ if (uid.type == UA_BASE_DEVICE)
list_move(&device->alias_list, &group->baselist);
else
list_move(&device->alias_list, &group->aliaslist);
@@ -496,13 +510,13 @@ static int read_unit_address_configuration(struct dasd_device *device,
static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
{
- unsigned long flags;
+ unsigned long cdev_flags, lcu_flags;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *device, *tempdev;
int i, rc;
struct dasd_eckd_private *private;
- spin_lock_irqsave(&lcu->lock, flags);
+ spin_lock_irqsave(&lcu->lock, lcu_flags);
list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
alias_list) {
@@ -519,13 +533,16 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
list_del(&pavgroup->group);
kfree(pavgroup);
}
- spin_unlock_irqrestore(&lcu->lock, flags);
+ spin_unlock_irqrestore(&lcu->lock, lcu_flags);
rc = read_unit_address_configuration(refdev, lcu);
if (rc)
return rc;
- spin_lock_irqsave(&lcu->lock, flags);
+ /* need to take cdev lock before lcu lock */
+ spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), cdev_flags,
+ CDEV_NESTED_FIRST);
+ spin_lock_irqsave(&lcu->lock, lcu_flags);
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
@@ -542,9 +559,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
alias_list) {
- _add_device_to_lcu(lcu, device);
+ _add_device_to_lcu(lcu, device, refdev);
}
- spin_unlock_irqrestore(&lcu->lock, flags);
+ spin_unlock_irqrestore(&lcu->lock, lcu_flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), cdev_flags);
return 0;
}
@@ -622,15 +640,18 @@ int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct alias_lcu *lcu;
- unsigned long flags;
+ unsigned long cdev_flags, lcu_flags;
int rc;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
rc = 0;
- spin_lock_irqsave(&lcu->lock, flags);
+
+ /* need to take cdev lock before lcu lock */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), cdev_flags);
+ spin_lock_irqsave(&lcu->lock, lcu_flags);
if (!(lcu->flags & UPDATE_PENDING)) {
- rc = _add_device_to_lcu(lcu, device);
+ rc = _add_device_to_lcu(lcu, device, device);
if (rc)
lcu->flags |= UPDATE_PENDING;
}
@@ -638,10 +659,19 @@ int dasd_alias_add_device(struct dasd_device *device)
list_move(&device->alias_list, &lcu->active_devices);
_schedule_lcu_update(lcu, device);
}
- spin_unlock_irqrestore(&lcu->lock, flags);
+ spin_unlock_irqrestore(&lcu->lock, lcu_flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), cdev_flags);
return rc;
}
+int dasd_alias_update_add_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ private = (struct dasd_eckd_private *) device->private;
+ private->lcu->flags |= UPDATE_PENDING;
+ return dasd_alias_add_device(device);
+}
+
int dasd_alias_remove_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
@@ -740,19 +770,30 @@ static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device;
struct dasd_eckd_private *private;
+ unsigned long flags;
/* active and inactive list can contain alias as well as base devices */
list_for_each_entry(device, &lcu->active_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
- if (private->uid.type != UA_BASE_DEVICE)
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (private->uid.type != UA_BASE_DEVICE) {
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+ flags);
continue;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
- if (private->uid.type != UA_BASE_DEVICE)
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (private->uid.type != UA_BASE_DEVICE) {
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+ flags);
continue;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index eff9c812c5c2..238965908019 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -49,7 +49,6 @@ struct dasd_devmap {
unsigned int devindex;
unsigned short features;
struct dasd_device *device;
- struct dasd_uid uid;
};
/*
@@ -936,42 +935,47 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
-static ssize_t
-dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dasd_alias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
- int alias;
+ struct dasd_device *device;
+ struct dasd_uid uid;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
- spin_unlock(&dasd_devmap_lock);
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
return sprintf(buf, "0\n");
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid)) {
+ if (uid.type == UA_BASE_PAV_ALIAS ||
+ uid.type == UA_HYPER_PAV_ALIAS)
+ return sprintf(buf, "1\n");
}
- if (devmap->uid.type == UA_BASE_PAV_ALIAS ||
- devmap->uid.type == UA_HYPER_PAV_ALIAS)
- alias = 1;
- else
- alias = 0;
- spin_unlock(&dasd_devmap_lock);
- return sprintf(buf, alias ? "1\n" : "0\n");
+ dasd_put_device(device);
+
+ return sprintf(buf, "0\n");
}
static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
-static ssize_t
-dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dasd_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ struct dasd_uid uid;
char *vendor;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
- vendor = devmap->uid.vendor;
- else
- vendor = "";
- spin_unlock(&dasd_devmap_lock);
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ vendor = "";
+ if (IS_ERR(device))
+ return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid))
+ if (strlen(uid.vendor) > 0)
+ vendor = uid.vendor;
+
+ dasd_put_device(device);
return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
}
@@ -985,48 +989,48 @@ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
static ssize_t
dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ struct dasd_uid uid;
char uid_string[UID_STRLEN];
char ua_string[3];
- struct dasd_uid *uid;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
- spin_unlock(&dasd_devmap_lock);
- return sprintf(buf, "\n");
- }
- uid = &devmap->uid;
- switch (uid->type) {
- case UA_BASE_DEVICE:
- sprintf(ua_string, "%02x", uid->real_unit_addr);
- break;
- case UA_BASE_PAV_ALIAS:
- sprintf(ua_string, "%02x", uid->base_unit_addr);
- break;
- case UA_HYPER_PAV_ALIAS:
- sprintf(ua_string, "xx");
- break;
- default:
- /* should not happen, treat like base device */
- sprintf(ua_string, "%02x", uid->real_unit_addr);
- break;
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ uid_string[0] = 0;
+ if (IS_ERR(device))
+ return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid)) {
+ switch (uid.type) {
+ case UA_BASE_DEVICE:
+ sprintf(ua_string, "%02x", uid.real_unit_addr);
+ break;
+ case UA_BASE_PAV_ALIAS:
+ sprintf(ua_string, "%02x", uid.base_unit_addr);
+ break;
+ case UA_HYPER_PAV_ALIAS:
+ sprintf(ua_string, "xx");
+ break;
+ default:
+ /* should not happen, treat like base device */
+ sprintf(ua_string, "%02x", uid.real_unit_addr);
+ break;
+ }
+
+ if (strlen(uid.vduit) > 0)
+ snprintf(uid_string, sizeof(uid_string),
+ "%s.%s.%04x.%s.%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string,
+ uid.vduit);
+ else
+ snprintf(uid_string, sizeof(uid_string),
+ "%s.%s.%04x.%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string);
}
- if (strlen(uid->vduit) > 0)
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s.%s",
- uid->vendor, uid->serial,
- uid->ssid, ua_string,
- uid->vduit);
- else
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s",
- uid->vendor, uid->serial,
- uid->ssid, ua_string);
- spin_unlock(&dasd_devmap_lock);
+ dasd_put_device(device);
+
return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
}
-
static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
/*
@@ -1094,50 +1098,6 @@ static struct attribute_group dasd_attr_group = {
};
/*
- * Return copy of the device unique identifier.
- */
-int
-dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
-{
- struct dasd_devmap *devmap;
-
- devmap = dasd_find_busid(dev_name(&cdev->dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
- spin_lock(&dasd_devmap_lock);
- *uid = devmap->uid;
- spin_unlock(&dasd_devmap_lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(dasd_get_uid);
-
-/*
- * Register the given device unique identifier into devmap struct.
- * In addition check if the related storage server subsystem ID is already
- * contained in the dasd_server_ssid_list. If subsystem ID is not contained,
- * create new entry.
- * Return 0 if server was already in serverlist,
- * 1 if the server was added successful
- * <0 in case of error.
- */
-int
-dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
-{
- struct dasd_devmap *devmap;
-
- devmap = dasd_find_busid(dev_name(&cdev->dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
-
- spin_lock(&dasd_devmap_lock);
- devmap->uid = *uid;
- spin_unlock(&dasd_devmap_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(dasd_set_uid);
-
-/*
* Return value of the specified feature.
*/
int
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 0cb233116855..b2930faa8b78 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -692,18 +692,20 @@ dasd_eckd_cdl_reclen(int recid)
/*
* Generate device unique id that specifies the physical device.
*/
-static int dasd_eckd_generate_uid(struct dasd_device *device,
- struct dasd_uid *uid)
+static int dasd_eckd_generate_uid(struct dasd_device *device)
{
struct dasd_eckd_private *private;
+ struct dasd_uid *uid;
int count;
+ unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
if (!private)
return -ENODEV;
if (!private->ned || !private->gneq)
return -ENODEV;
-
+ uid = &private->uid;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, private->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
@@ -726,9 +728,25 @@ static int dasd_eckd_generate_uid(struct dasd_device *device,
private->vdsneq->uit[count]);
}
}
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
+static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+
+ if (device->private) {
+ private = (struct dasd_eckd_private *)device->private;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ *uid = private->uid;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
void *rcd_buffer,
struct ciw *ciw, __u8 lpm)
@@ -1088,6 +1106,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_block *block;
+ struct dasd_uid temp_uid;
int is_known, rc;
int readonly;
@@ -1124,13 +1143,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err1;
- /* Generate device unique id and register in devmap */
- rc = dasd_eckd_generate_uid(device, &private->uid);
+ /* Generate device unique id */
+ rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err1;
- dasd_set_uid(device->cdev, &private->uid);
- if (private->uid.type == UA_BASE_DEVICE) {
+ dasd_eckd_get_uid(device, &temp_uid);
+ if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
@@ -1451,6 +1470,7 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
+ cancel_work_sync(&device->reload_device);
return dasd_alias_remove_device(device);
};
@@ -1709,10 +1729,27 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
{
char mask;
char *sense = NULL;
+ struct dasd_eckd_private *private;
+ private = (struct dasd_eckd_private *) device->private;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((scsw_dstat(&irb->scsw) & mask) == mask) {
+ /* for alias only and not in offline processing*/
+ if (!device->block && private->lcu &&
+ !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /*
+ * the state change could be caused by an alias
+ * reassignment remove device from alias handling
+ * to prevent new requests from being scheduled on
+ * the wrong alias device
+ */
+ dasd_alias_remove_device(device);
+
+ /* schedule worker to reload device */
+ dasd_reload_device(device);
+ }
+
dasd_generic_handle_state_change(device);
return;
}
@@ -3259,7 +3296,7 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
dasd_eckd_dump_sense_ccw(device, req, irb);
}
-int dasd_eckd_pm_freeze(struct dasd_device *device)
+static int dasd_eckd_pm_freeze(struct dasd_device *device)
{
/*
* the device should be disconnected from our LCU structure
@@ -3272,7 +3309,7 @@ int dasd_eckd_pm_freeze(struct dasd_device *device)
return 0;
}
-int dasd_eckd_restore_device(struct dasd_device *device)
+static int dasd_eckd_restore_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_eckd_characteristics temp_rdc_data;
@@ -3287,15 +3324,16 @@ int dasd_eckd_restore_device(struct dasd_device *device)
if (rc)
goto out_err;
- /* Generate device unique id and register in devmap */
- rc = dasd_eckd_generate_uid(device, &private->uid);
- dasd_get_uid(device->cdev, &temp_uid);
+ dasd_eckd_get_uid(device, &temp_uid);
+ /* Generate device unique id */
+ rc = dasd_eckd_generate_uid(device);
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
dev_err(&device->cdev->dev, "The UID of the DASD has "
"changed\n");
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (rc)
goto out_err;
- dasd_set_uid(device->cdev, &private->uid);
/* register lcu with alias handling, enable PAV if this is a new lcu */
is_known = dasd_alias_make_device_known_to_lcu(device);
@@ -3336,6 +3374,56 @@ out_err:
return -1;
}
+static int dasd_eckd_reload_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ int rc, old_base;
+ char print_uid[60];
+ struct dasd_uid uid;
+ unsigned long flags;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ old_base = private->uid.base_unit_addr;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ /* Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err;
+
+ rc = dasd_eckd_generate_uid(device);
+ if (rc)
+ goto out_err;
+ /*
+ * update unit address configuration and
+ * add device to alias management
+ */
+ dasd_alias_update_add_device(device);
+
+ dasd_eckd_get_uid(device, &uid);
+
+ if (old_base != uid.base_unit_addr) {
+ if (strlen(uid.vduit) > 0)
+ snprintf(print_uid, 60, "%s.%s.%04x.%02x.%s",
+ uid.vendor, uid.serial, uid.ssid,
+ uid.base_unit_addr, uid.vduit);
+ else
+ snprintf(print_uid, 60, "%s.%s.%04x.%02x",
+ uid.vendor, uid.serial, uid.ssid,
+ uid.base_unit_addr);
+
+ dev_info(&device->cdev->dev,
+ "An Alias device was reassigned to a new base device "
+ "with UID: %s\n", print_uid);
+ }
+ return 0;
+
+out_err:
+ return -1;
+}
+
static struct ccw_driver dasd_eckd_driver = {
.name = "dasd-eckd",
.owner = THIS_MODULE,
@@ -3389,6 +3477,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
.ioctl = dasd_eckd_ioctl,
.freeze = dasd_eckd_pm_freeze,
.restore = dasd_eckd_restore_device,
+ .reload = dasd_eckd_reload_device,
+ .get_uid = dasd_eckd_get_uid,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 864d53c04201..dd6385a5af14 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -426,7 +426,6 @@ struct alias_pav_group {
struct dasd_device *next;
};
-
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
@@ -463,4 +462,5 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
void dasd_alias_lcu_setup_complete(struct dasd_device *);
void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
+int dasd_alias_update_add_device(struct dasd_device *);
#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index a91d4a97d4f2..32fac186ba3f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -81,6 +81,10 @@ struct dasd_block;
#define DASD_SIM_MSG_TO_OP 0x03
#define DASD_SIM_LOG 0x0C
+/* lock class for nested cdev lock */
+#define CDEV_NESTED_FIRST 1
+#define CDEV_NESTED_SECOND 2
+
/*
* SECTION: MACROs for klogd and s390 debug feature (dbf)
*/
@@ -229,6 +233,24 @@ struct dasd_ccw_req {
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
/*
+ * Unique identifier for dasd device.
+ */
+#define UA_NOT_CONFIGURED 0x00
+#define UA_BASE_DEVICE 0x01
+#define UA_BASE_PAV_ALIAS 0x02
+#define UA_HYPER_PAV_ALIAS 0x03
+
+struct dasd_uid {
+ __u8 type;
+ char vendor[4];
+ char serial[15];
+ __u16 ssid;
+ __u8 real_unit_addr;
+ __u8 base_unit_addr;
+ char vduit[33];
+};
+
+/*
* the struct dasd_discipline is
* sth like a table of virtual functions, if you think of dasd_eckd
* inheriting dasd...
@@ -312,28 +334,15 @@ struct dasd_discipline {
/* suspend/resume functions */
int (*freeze) (struct dasd_device *);
int (*restore) (struct dasd_device *);
-};
-extern struct dasd_discipline *dasd_diag_discipline_pointer;
-
-/*
- * Unique identifier for dasd device.
- */
-#define UA_NOT_CONFIGURED 0x00
-#define UA_BASE_DEVICE 0x01
-#define UA_BASE_PAV_ALIAS 0x02
-#define UA_HYPER_PAV_ALIAS 0x03
+ /* reload device after state change */
+ int (*reload) (struct dasd_device *);
-struct dasd_uid {
- __u8 type;
- char vendor[4];
- char serial[15];
- __u16 ssid;
- __u8 real_unit_addr;
- __u8 base_unit_addr;
- char vduit[33];
+ int (*get_uid) (struct dasd_device *, struct dasd_uid *);
};
+extern struct dasd_discipline *dasd_diag_discipline_pointer;
+
/*
* Notification numbers for extended error reporting notifications:
* The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
@@ -386,6 +395,7 @@ struct dasd_device {
struct tasklet_struct tasklet;
struct work_struct kick_work;
struct work_struct restore_device;
+ struct work_struct reload_device;
struct timer_list timer;
debug_info_t *debug_area;
@@ -582,6 +592,7 @@ void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
void dasd_restore_device(struct dasd_device *);
+void dasd_reload_device(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -629,8 +640,6 @@ void dasd_devmap_exit(void);
struct dasd_device *dasd_create_device(struct ccw_device *);
void dasd_delete_device(struct dasd_device *);
-int dasd_get_uid(struct ccw_device *, struct dasd_uid *);
-int dasd_set_uid(struct ccw_device *, struct dasd_uid *);
int dasd_get_feature(struct ccw_device *, int);
int dasd_set_feature(struct ccw_device *, int, int);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 4e34d3686c23..40834f18754c 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -148,13 +148,12 @@ config VMLOGRDR
This driver depends on the IUCV support driver.
config VMCP
- tristate "Support for the z/VM CP interface (VM only)"
+ bool "Support for the z/VM CP interface"
depends on S390
help
Select this option if you want to be able to interact with the control
program on z/VM
-
config MONREADER
tristate "API for reading z/VM monitor service records"
depends on IUCV
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0eabcca3c92d..857dfcb7b359 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -484,6 +484,7 @@ fs3270_open(struct inode *inode, struct file *filp)
raw3270_del_view(&fp->view);
goto out;
}
+ nonseekable_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 62c2647f37f4..4a51e3f09689 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -102,7 +102,7 @@ static struct sclp_req *cpi_prepare_req(void)
/* set system name */
set_data(evb->system_name, system_name);
- /* set sytem level */
+ /* set system level */
evb->system_level = system_level;
/* set sysplex name */
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 5bb59d36a6d4..04e532eec032 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,24 +1,20 @@
/*
- * Copyright IBM Corp. 2004,2007
+ * Copyright IBM Corp. 2004,2010
* Interface implementation for communication with the z/VM control program
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
-
+ *
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
-#define KMSG_COMPONENT "vmcp"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
@@ -26,10 +22,6 @@
#include <asm/uaccess.h>
#include "vmcp.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
-MODULE_DESCRIPTION("z/VM CP interface");
-
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
@@ -197,11 +189,8 @@ static int __init vmcp_init(void)
{
int ret;
- if (!MACHINE_IS_VM) {
- pr_warning("The z/VM CP interface device driver cannot be "
- "loaded without z/VM\n");
- return -ENODEV;
- }
+ if (!MACHINE_IS_VM)
+ return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
@@ -214,19 +203,8 @@ static int __init vmcp_init(void)
}
ret = misc_register(&vmcp_dev);
- if (ret) {
+ if (ret)
debug_unregister(vmcp_debug);
- return ret;
- }
-
- return 0;
-}
-
-static void __exit vmcp_exit(void)
-{
- misc_deregister(&vmcp_dev);
- debug_unregister(vmcp_debug);
+ return ret;
}
-
-module_init(vmcp_init);
-module_exit(vmcp_exit);
+device_initcall(vmcp_init);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 7217966f7d31..f5ea3384a4b9 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -445,7 +445,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp)
}
kfree(chunk_array);
filp->private_data = buf;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int zcore_memmap_release(struct inode *inode, struct file *filp)
@@ -473,7 +473,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
- return 0;
+ return nonseekable_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 3b6f4adc5094..a83877c664a6 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -803,6 +803,7 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
+ .open = nonseekable_open,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 511649115bd7..ac94ac751459 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -648,6 +648,8 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
+ struct cpuid cpu_id;
+
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
@@ -658,8 +660,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
- css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
+ get_cpu_id(&cpu_id);
+ css->global_pgid.cpu_id = cpu_id.ident;
+ css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
@@ -1062,6 +1065,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
}
static const struct file_operations cio_settle_proc_fops = {
+ .open = nonseekable_open,
.write = cio_settle_write,
};
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 304caf549973..41e0aaefafd5 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -302,7 +302,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
static int zcrypt_open(struct inode *inode, struct file *filp)
{
atomic_inc(&zcrypt_open_count);
- return 0;
+ return nonseekable_open(inode, filp);
}
/**
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6a801dc3bf8e..904b1f3567b4 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -608,7 +608,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
static void qeth_l2_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- struct dev_addr_list *dm;
struct netdev_hw_addr *ha;
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -620,8 +619,8 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
return;
qeth_l2_del_all_mc(card);
spin_lock_bh(&card->mclock);
- for (dm = dev->mc_list; dm; dm = dm->next)
- qeth_l2_add_mc(card, dm->da_addr, 0);
+ netdev_for_each_mc_addr(ha, dev)
+ qeth_l2_add_mc(card, ha->addr, 0);
netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mc(card, ha->addr, 1);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fc6ca1da8b98..35b6d3d2bd73 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1929,7 +1929,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
return;
- for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
+ list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr) {
memcpy(&addr->u.a6.addr, &ifa->addr,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1e6183a86ce5..e331df2122f7 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -425,7 +425,8 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_read(&adapter->stat_miss) > 0)
if (zfcp_fsf_status_read(adapter->qdio)) {
- if (atomic_read(&adapter->stat_miss) >= 16) {
+ if (atomic_read(&adapter->stat_miss) >=
+ adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
NULL);
return 1;
@@ -545,6 +546,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto failed;
+ /* report size limit per scatter-gather segment */
+ adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
if (!zfcp_adapter_scsi_register(adapter))
return adapter;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 25d9e0ae9c57..1a2db0a35737 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -254,6 +254,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
}
static const struct file_operations zfcp_cfdc_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zfcp_cfdc_dev_ioctl
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7131c7db1f04..9fa1b064893e 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -44,23 +44,6 @@ struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
-/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
-
-/* DMQ bug workaround: don't use last SBALE */
-#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
-
-/* index of last SBALE (with respect to DMQ bug workaround) */
-#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
-
-/* max. number of (data buffer) SBALEs in largest SBAL chain */
-#define ZFCP_MAX_SBALES_PER_REQ \
- (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
- /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
-
-#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
- /* max. number of (data buffer) SBALEs in largest SBAL chain
- multiplied with number of sectors per 4k block */
-
/********************* FSF SPECIFIC DEFINES *********************************/
/* ATTENTION: value must not be used by hardware */
@@ -181,6 +164,7 @@ struct zfcp_adapter {
stack abort/command
completion races */
atomic_t stat_miss; /* # missing status reads*/
+ unsigned int stat_read_buf_num;
struct work_struct stat_work;
atomic_t status; /* status of this adapter */
struct list_head erp_ready_head; /* error recovery for this
@@ -205,6 +189,7 @@ struct zfcp_adapter {
struct work_struct scan_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
+ struct device_dma_parameters dma_parms;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 0be5e7ea2828..e3dbeda97179 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -714,7 +714,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- atomic_set(&act->adapter->stat_miss, 16);
+ atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
if (zfcp_status_read_refill(act->adapter))
return ZFCP_ERP_FAILED;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 8786a79c7f8f..48a8f93b72f5 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef ZFCP_EXT_H
@@ -143,9 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
+extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
-extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
- struct zfcp_qdio_req *, unsigned long,
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 2a1cbb74b99b..6f8ab43a4856 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -400,7 +400,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_adapter *adapter = port->adapter;
int ret;
- adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
+ adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
if (!adisc)
return -ENOMEM;
@@ -493,7 +493,7 @@ static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
if (!gpn_ft)
return NULL;
- req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
+ req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
if (!req) {
kfree(gpn_ft);
gpn_ft = NULL;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b3b1d2f79398..9ac6a6e4a604 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -496,6 +496,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->hydra_version = bottom->adapter_type;
adapter->timer_ticks = bottom->timer_interval;
+ adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16);
if (fc_host_permanent_port_name(shost) == -1)
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
@@ -640,37 +641,6 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
}
}
-static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
-{
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
-
- spin_lock_bh(&qdio->req_q_lock);
- if (atomic_read(&req_q->count))
- return 1;
- spin_unlock_bh(&qdio->req_q_lock);
- return 0;
-}
-
-static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
-{
- struct zfcp_adapter *adapter = qdio->adapter;
- long ret;
-
- spin_unlock_bh(&qdio->req_q_lock);
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
- zfcp_fsf_sbal_check(qdio), 5 * HZ);
- if (ret > 0)
- return 0;
- if (!ret) {
- atomic_inc(&qdio->req_q_full);
- /* assume hanging outbound queue, try queue recovery */
- zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
- }
-
- spin_lock_bh(&qdio->req_q_lock);
- return -EIO;
-}
-
static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
{
struct zfcp_fsf_req *req;
@@ -705,10 +675,9 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
}
static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
- u32 fsf_cmd, mempool_t *pool)
+ u32 fsf_cmd, u32 sbtype,
+ mempool_t *pool)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
@@ -725,14 +694,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->adapter = adapter;
req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
- req->qdio_req.sbal_number = 1;
- req->qdio_req.sbal_first = req_q->first;
- req->qdio_req.sbal_last = req_q->first;
- req->qdio_req.sbale_curr = 1;
-
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].addr = (void *) req->req_id;
- sbale[0].flags |= SBAL_FLAGS0_COMMAND;
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
if (likely(pool))
@@ -753,10 +714,11 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = req->fsf_command;
- sbale[1].addr = (void *) req->qtcb;
- sbale[1].length = sizeof(struct fsf_qtcb);
}
+ zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
+ req->qtcb, sizeof(struct fsf_qtcb));
+
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
zfcp_fsf_req_free(req);
return ERR_PTR(-EIO);
@@ -803,24 +765,19 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
- struct qdio_buffer_element *sbale;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
adapter->pool.status_read_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
- req->qdio_req.sbale_curr = 2;
-
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
if (!sr_buf) {
retval = -ENOMEM;
@@ -828,9 +785,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
}
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
- sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
- sbale->addr = (void *) sr_buf;
- sbale->length = sizeof(*sr_buf);
+
+ zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (retval)
@@ -907,14 +864,14 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
struct zfcp_unit *unit)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.scsi_abort);
if (IS_ERR(req)) {
req = NULL;
@@ -925,9 +882,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = unit;
req->handler = zfcp_fsf_abort_fcp_command_handler;
@@ -996,21 +951,14 @@ skip_fsfstatus:
ct->handler(ct->handler_data);
}
-static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
+static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp)
{
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
- sbale[2].addr = sg_virt(sg_req);
- sbale[2].length = sg_req->length;
- sbale[3].addr = sg_virt(sg_resp);
- sbale[3].length = sg_resp->length;
- sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
-}
-
-static int zfcp_fsf_one_sbal(struct scatterlist *sg)
-{
- return sg_is_last(sg) && sg->length <= PAGE_SIZE;
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
+ zfcp_qdio_set_sbale_last(qdio, q_req);
}
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
@@ -1019,35 +967,34 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
int max_sbals)
{
struct zfcp_adapter *adapter = req->adapter;
- struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
- &req->qdio_req);
u32 feat = adapter->adapter_features;
int bytes;
if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
- if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
+ if (!zfcp_qdio_sg_one_sbale(sg_req) ||
+ !zfcp_qdio_sg_one_sbale(sg_resp))
return -EOPNOTSUPP;
- zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+ zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
/* use single, unchained SBAL if it can hold the request */
- if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
- zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+ if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) {
+ zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- SBAL_FLAGS0_TYPE_WRITE_READ,
sg_req, max_sbals);
if (bytes <= 0)
return -EIO;
req->qtcb->bottom.support.req_buf_length = bytes;
- req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- SBAL_FLAGS0_TYPE_WRITE_READ,
sg_resp, max_sbals);
req->qtcb->bottom.support.resp_buf_length = bytes;
if (bytes <= 0)
@@ -1091,10 +1038,11 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
+ SBAL_FLAGS0_TYPE_WRITE_READ, pool);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1103,7 +1051,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
- FSF_MAX_SBALS_PER_REQ, timeout);
+ ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
if (ret)
goto failed_send;
@@ -1187,10 +1135,11 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
+ SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1224,16 +1173,16 @@ out:
int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1242,9 +1191,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_CFDC |
@@ -1269,24 +1216,22 @@ out:
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_FLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out_unlock;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_config_data_handler;
req->qtcb->bottom.config.feature_selection =
@@ -1320,7 +1265,6 @@ out_unlock:
int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
int retval = -EIO;
@@ -1328,10 +1272,11 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
return -EOPNOTSUPP;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1340,9 +1285,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
@@ -1368,7 +1311,6 @@ out:
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
@@ -1376,10 +1318,11 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
return -EOPNOTSUPP;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_FLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -1389,9 +1332,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (data)
req->data = data;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1485,17 +1426,17 @@ out:
*/
int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_port *port = erp_action->port;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1504,9 +1445,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_port_handler;
hton24(req->qtcb->bottom.support.d_id, port->d_id);
@@ -1556,16 +1495,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1574,9 +1513,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_port_handler;
req->data = erp_action->port;
@@ -1633,16 +1570,16 @@ out:
*/
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (unlikely(IS_ERR(req))) {
@@ -1651,9 +1588,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_wka_port_handler;
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
@@ -1688,16 +1623,16 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (unlikely(IS_ERR(req))) {
@@ -1706,9 +1641,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_wka_port_handler;
req->data = wka_port;
@@ -1782,16 +1715,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1800,9 +1733,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = erp_action->port;
req->qtcb->header.port_handle = erp_action->port->handle;
@@ -1954,17 +1885,17 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+ SBAL_FLAGS0_TYPE_READ,
adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1973,9 +1904,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
@@ -2041,16 +1970,16 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -2059,9 +1988,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->header.lun_handle = erp_action->unit->handle;
@@ -2289,8 +2216,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
}
+ if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
+ sbtype = SBAL_FLAGS0_TYPE_WRITE;
+
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
- adapter->pool.scsi_req);
+ sbtype, adapter->pool.scsi_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -2298,7 +2228,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- get_device(&unit->dev);
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2323,20 +2252,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
break;
case DMA_TO_DEVICE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
- sbtype = SBAL_FLAGS0_TYPE_WRITE;
break;
case DMA_BIDIRECTIONAL:
goto failed_scsi_cmnd;
}
+ get_device(&unit->dev);
+
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
- real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
+ real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
scsi_sglist(scsi_cmnd),
- FSF_MAX_SBALS_PER_REQ);
+ ZFCP_FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
- if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+ if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",
@@ -2371,7 +2301,6 @@ out:
*/
struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd *fcp_cmnd;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
@@ -2381,10 +2310,11 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
return NULL;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+ SBAL_FLAGS0_TYPE_WRITE,
qdio->adapter->pool.scsi_req);
if (IS_ERR(req)) {
@@ -2401,9 +2331,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
@@ -2432,7 +2360,6 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
struct zfcp_fsf_cfdc *fsf_cfdc)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req = NULL;
struct fsf_qtcb_bottom_support *bottom;
@@ -2453,10 +2380,10 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
}
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
+ req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
if (IS_ERR(req)) {
retval = -EPERM;
goto out;
@@ -2464,16 +2391,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
req->handler = zfcp_fsf_control_file_handler;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= direction;
-
bottom = &req->qtcb->bottom.support;
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
- direction, fsf_cfdc->sg,
- FSF_MAX_SBALS_PER_REQ);
+ fsf_cfdc->sg,
+ ZFCP_FSF_MAX_SBALS_PER_REQ);
if (bytes != ZFCP_CFDC_MAX_SIZE) {
zfcp_fsf_req_free(req);
goto out;
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index b3de682b64cf..519083fd6e89 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef FSF_H
@@ -152,7 +152,12 @@
#define FSF_CLASS_3 0x00000003
/* SBAL chaining */
-#define FSF_MAX_SBALS_PER_REQ 36
+#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
+
+/* max. number of (data buffer) SBALEs in largest SBAL chain
+ * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
+#define ZFCP_FSF_MAX_SBALES_PER_REQ \
+ (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
@@ -361,7 +366,7 @@ struct fsf_qtcb_bottom_config {
u32 adapter_type;
u8 res0;
u8 peer_d_id[3];
- u8 res1[2];
+ u16 status_read_buf_num;
u16 timer_interval;
u8 res2[9];
u8 s_id[3];
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbfa312a7f50..28117e130e2c 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -151,8 +151,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
}
static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long sbtype)
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
@@ -180,17 +179,16 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->flags |= sbtype;
+ sbale->flags |= q_req->sbtype;
return sbale;
}
static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned int sbtype)
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
- return zfcp_qdio_sbal_chain(qdio, q_req, sbtype);
+ if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL)
+ return zfcp_qdio_sbal_chain(qdio, q_req);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
}
@@ -206,62 +204,38 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
zfcp_qdio_zero_sbals(sbal, first, count);
}
-static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
- struct zfcp_qdio_req *q_req,
- unsigned int sbtype, void *start_addr,
- unsigned int total_length)
-{
- struct qdio_buffer_element *sbale;
- unsigned long remaining, length;
- void *addr;
-
- /* split segment up */
- for (addr = start_addr, remaining = total_length; remaining > 0;
- addr += length, remaining -= length) {
- sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype);
- if (!sbale) {
- atomic_inc(&qdio->req_q_full);
- zfcp_qdio_undo_sbals(qdio, q_req);
- return -EINVAL;
- }
-
- /* new piece must not exceed next page boundary */
- length = min(remaining,
- (PAGE_SIZE - ((unsigned long)addr &
- (PAGE_SIZE - 1))));
- sbale->addr = addr;
- sbale->length = length;
- }
- return 0;
-}
-
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
- * @fsf_req: request to be processed
- * @sbtype: SBALE flags
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
* @max_sbals: upper bound for number of SBALs to be used
* Returns: number of bytes, or error (negativ)
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long sbtype, struct scatterlist *sg,
- int max_sbals)
+ struct scatterlist *sg, int max_sbals)
{
struct qdio_buffer_element *sbale;
- int retval, bytes = 0;
+ int bytes = 0;
/* figure out last allowed SBAL */
zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->flags |= sbtype;
+ sbale->flags |= q_req->sbtype;
for (; sg; sg = sg_next(sg)) {
- retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype,
- sg_virt(sg), sg->length);
- if (retval < 0)
- return retval;
+ sbale = zfcp_qdio_sbale_next(qdio, q_req);
+ if (!sbale) {
+ atomic_inc(&qdio->req_q_full);
+ zfcp_qdio_undo_sbals(qdio, q_req);
+ return -EINVAL;
+ }
+
+ sbale->addr = sg_virt(sg);
+ sbale->length = sg->length;
+
bytes += sg->length;
}
@@ -272,6 +246,46 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
return bytes;
}
+static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+{
+ struct zfcp_qdio_queue *req_q = &qdio->req_q;
+
+ spin_lock_bh(&qdio->req_q_lock);
+ if (atomic_read(&req_q->count))
+ return 1;
+ spin_unlock_bh(&qdio->req_q_lock);
+ return 0;
+}
+
+/**
+ * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
+ * @qdio: pointer to struct zfcp_qdio
+ *
+ * The req_q_lock must be held by the caller of this function, and
+ * this function may only be called from process context; it will
+ * sleep when waiting for a free sbal.
+ *
+ * Returns: 0 on success, -EIO if there is no free sbal after waiting.
+ */
+int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+{
+ long ret;
+
+ spin_unlock_bh(&qdio->req_q_lock);
+ ret = wait_event_interruptible_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), 5 * HZ);
+ if (ret > 0)
+ return 0;
+ if (!ret) {
+ atomic_inc(&qdio->req_q_full);
+ /* assume hanging outbound queue, try queue recovery */
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
+ }
+
+ spin_lock_bh(&qdio->req_q_lock);
+ return -EIO;
+}
+
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 8cca54631e1e..138fba577b48 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -11,6 +11,14 @@
#include <asm/qdio.h>
+#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
+
+/* DMQ bug workaround: don't use last SBALE */
+#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+
+/* index of last SBALE (with respect to DMQ bug workaround) */
+#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
+
/**
* struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
* @sbal: qdio buffers
@@ -49,6 +57,7 @@ struct zfcp_qdio {
/**
* struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbtype: sbal type flags for sbale 0
* @sbal_number: number of free sbals
* @sbal_first: first sbal for this request
* @sbal_last: last sbal for this request
@@ -59,6 +68,7 @@ struct zfcp_qdio {
* @qdio_inb_usage: usage of inbound queue
*/
struct zfcp_qdio_req {
+ u32 sbtype;
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
@@ -106,4 +116,98 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
q_req->sbale_curr);
}
+/**
+ * zfcp_qdio_req_init - initialize qdio request
+ * @qdio: request queue where to start putting the request
+ * @q_req: the qdio request to start
+ * @req_id: The request id
+ * @sbtype: type flags to set for all sbals
+ * @data: First data block
+ * @len: Length of first data block
+ *
+ * This is the start of putting the request into the queue, the last
+ * step is passing the request to zfcp_qdio_send. The request queue
+ * lock must be held during the whole process from init to send.
+ */
+static inline
+void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ unsigned long req_id, u32 sbtype, void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ q_req->sbal_first = q_req->sbal_last = qdio->req_q.first;
+ q_req->sbal_number = 1;
+ q_req->sbtype = sbtype;
+
+ sbale = zfcp_qdio_sbale_req(qdio, q_req);
+ sbale->addr = (void *) req_id;
+ sbale->flags |= SBAL_FLAGS0_COMMAND;
+ sbale->flags |= sbtype;
+
+ q_req->sbale_curr = 1;
+ sbale++;
+ sbale->addr = data;
+ if (likely(data))
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ *
+ * This is only required for single sbal requests, calling it when
+ * wrapping around to the next sbal is a bug.
+ */
+static inline
+void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
+ q_req->sbale_curr++;
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->addr = data;
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ */
+static inline
+void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+}
+
+/**
+ * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
+ * @sg: The scatterlist where to check the data size
+ *
+ * Returns: 1 when one sbale is enough for the data in the scatterlist,
+ * 0 if not.
+ */
+static inline
+int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
+{
+ return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
+}
+
+/**
+ * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
+{
+ q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
+}
+
#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 174b6d57d576..be5d2c60453d 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -175,7 +175,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
- int retval = SUCCESS;
+ int retval = SUCCESS, ret;
int retry = 3;
char *dbf_tag;
@@ -200,7 +200,9 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
break;
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -231,7 +233,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_fsf_req *fsf_req = NULL;
- int retval = SUCCESS;
+ int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
@@ -240,7 +242,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
break;
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
+
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -276,10 +281,13 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
+ int ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
return SUCCESS;
}
@@ -669,11 +677,12 @@ struct zfcp_data zfcp_data = {
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.can_queue = 4096,
.this_id = -1,
- .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
+ .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ,
.cmd_per_lun = 1,
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8),
+ .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8),
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
},
};
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index e9788f55ab13..1bb774becf25 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,10 +1,11 @@
/*
3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
- Modifications By: Tom Couch <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Tom Couch <linuxraid@lsi.com>
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
+ Copyright (C) 2010 LSI Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -40,10 +41,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
Note: This version of the driver does not contain a bundled firmware
image.
@@ -77,6 +78,7 @@
Use pci_resource_len() for ioremap().
2.26.02.012 - Add power management support.
2.26.02.013 - Fix bug in twa_load_sgl().
+ 2.26.02.014 - Force 60 second timeout default.
*/
#include <linux/module.h>
@@ -102,14 +104,14 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.013"
+#define TW_DRIVER_VERSION "2.26.02.014"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
extern struct timezone sys_tz;
/* Module parameters */
-MODULE_AUTHOR ("AMCC");
+MODULE_AUTHOR ("LSI");
MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
@@ -1990,6 +1992,15 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
scsi_dma_unmap(cmd);
} /* End twa_unmap_scsi_data() */
+/* This function gets called when a disk is coming on-line */
+static int twa_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End twa_slave_configure() */
+
/* scsi_host_template initializer */
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1999,6 +2010,7 @@ static struct scsi_host_template driver_template = {
.bios_param = twa_scsi_biosparam,
.change_queue_depth = twa_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
+ .slave_configure = twa_slave_configure,
.this_id = -1,
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 2893eec78ed2..3343824855d0 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,10 +1,11 @@
/*
3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
- Modifications By: Tom Couch <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Tom Couch <linuxraid@lsi.com>
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
+ Copyright (C) 2010 LSI Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -40,10 +41,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
*/
#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 5faf903ca8c8..d119a614bf7d 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,12 +1,12 @@
/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
- Copyright (C) 1999-2009 3ware Inc.
+ Copyright (C) 1999-2010 3ware Inc.
Kernel compatibility By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -47,10 +47,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
History
-------
@@ -194,6 +194,7 @@
1.26.02.002 - Free irq handler in __tw_shutdown().
Turn on RCD bit for caching mode page.
Serialize reset code.
+ 1.26.02.003 - Force 60 second timeout default.
*/
#include <linux/module.h>
@@ -219,13 +220,13 @@
#include "3w-xxxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "1.26.02.002"
+#define TW_DRIVER_VERSION "1.26.02.003"
static TW_Device_Extension *tw_device_extension_list[TW_MAX_SLOT];
static int tw_device_extension_count = 0;
static int twe_major = -1;
/* Module parameters */
-MODULE_AUTHOR("AMCC");
+MODULE_AUTHOR("LSI");
MODULE_DESCRIPTION("3ware Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
@@ -2245,6 +2246,15 @@ static void tw_shutdown(struct pci_dev *pdev)
__tw_shutdown(tw_dev);
} /* End tw_shutdown() */
+/* This function gets called when a disk is coming online */
+static int tw_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End tw_slave_configure() */
+
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "3ware Storage Controller",
@@ -2253,6 +2263,7 @@ static struct scsi_host_template driver_template = {
.bios_param = tw_scsi_biosparam,
.change_queue_depth = tw_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
+ .slave_configure = tw_slave_configure,
.this_id = -1,
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index a5a2ba2561d9..8b9f9d17e7fe 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,12 +1,12 @@
/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
- Copyright (C) 1999-2009 3ware Inc.
+ Copyright (C) 1999-2010 3ware Inc.
Kernel compatiblity By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -45,10 +45,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
*/
#ifndef _3W_XXXX_H
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 92a8c500b23d..1c7ac49be649 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -162,6 +162,7 @@ scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
+scsi_mod-y += scsi_trace.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index d8fe5b76fee0..308541ff85cf 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -19,186 +19,190 @@
#include "wd33c93.h"
#include "a2091.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define DMA(ptr) ((a2091_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
static int a2091_release(struct Scsi_Host *instance);
-static irqreturn_t a2091_intr (int irq, void *_instance)
+static irqreturn_t a2091_intr(int irq, void *data)
{
- unsigned long flags;
- unsigned int status;
- struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
-
- status = DMA(instance)->ISTR;
- if (!(status & (ISTR_INT_F|ISTR_INT_P)) || !(status & ISTR_INTS))
- return IRQ_NONE;
-
- spin_lock_irqsave(instance->host_lock, flags);
- wd33c93_intr(instance);
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
+ struct Scsi_Host *instance = data;
+ a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
+ unsigned int status = regs->ISTR;
+ unsigned long flags;
+
+ if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
- struct Scsi_Host *instance = cmd->device->host;
-
- /* don't allow DMA if the physical address is bad */
- if (addr & A2091_XFER_MASK)
- {
- HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
- HDATA(instance)->dma_bounce_buffer =
- kmalloc (HDATA(instance)->dma_bounce_len, GFP_KERNEL);
-
- /* can't allocate memory; use PIO */
- if (!HDATA(instance)->dma_bounce_buffer) {
- HDATA(instance)->dma_bounce_len = 0;
- return 1;
- }
-
- /* get the physical address of the bounce buffer */
- addr = virt_to_bus(HDATA(instance)->dma_bounce_buffer);
+ struct Scsi_Host *instance = cmd->device->host;
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+ a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
+ unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
- /* the bounce buffer may not be in the first 16M of physmem */
+ /* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
- /* we could use chipmem... maybe later */
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- return 1;
+ hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len,
+ GFP_KERNEL);
+
+ /* can't allocate memory; use PIO */
+ if (!hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
+
+ /* get the physical address of the bounce buffer */
+ addr = virt_to_bus(hdata->dma_bounce_buffer);
+
+ /* the bounce buffer may not be in the first 16M of physmem */
+ if (addr & A2091_XFER_MASK) {
+ /* we could use chipmem... maybe later */
+ kfree(hdata->dma_bounce_buffer);
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
}
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(instance)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
- }
- }
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= CNTR_DDIR;
- /* setup dma direction */
- if (!dir_in)
- cntr |= CNTR_DDIR;
+ /* remember direction */
+ hdata->dma_dir = dir_in;
- /* remember direction */
- HDATA(cmd->device->host)->dma_dir = dir_in;
+ regs->CNTR = cntr;
- DMA(cmd->device->host)->CNTR = cntr;
+ /* setup DMA *physical* address */
+ regs->ACR = addr;
- /* setup DMA *physical* address */
- DMA(cmd->device->host)->ACR = addr;
-
- if (dir_in){
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- }else{
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
- }
- /* start DMA */
- DMA(cmd->device->host)->ST_DMA = 1;
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+ /* start DMA */
+ regs->ST_DMA = 1;
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
- int status)
+ int status)
{
- /* disable SCSI interrupts */
- unsigned short cntr = CNTR_PDMD;
-
- if (!HDATA(instance)->dma_dir)
- cntr |= CNTR_DDIR;
-
- /* disable SCSI interrupts */
- DMA(instance)->CNTR = cntr;
-
- /* flush if we were reading */
- if (HDATA(instance)->dma_dir) {
- DMA(instance)->FLUSH = 1;
- while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
- ;
- }
-
- /* clear a possible interrupt */
- DMA(instance)->CINT = 1;
-
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
-
- /* restore the CONTROL bits (minus the direction flag) */
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if( HDATA(instance)->dma_dir )
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- }
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+ a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
+
+ /* disable SCSI interrupts */
+ unsigned short cntr = CNTR_PDMD;
+
+ if (!hdata->dma_dir)
+ cntr |= CNTR_DDIR;
+
+ /* disable SCSI interrupts */
+ regs->CNTR = cntr;
+
+ /* flush if we were reading */
+ if (hdata->dma_dir) {
+ regs->FLUSH = 1;
+ while (!(regs->ISTR & ISTR_FE_FLG))
+ ;
+ }
+
+ /* clear a possible interrupt */
+ regs->CINT = 1;
+
+ /* stop DMA */
+ regs->SP_DMA = 1;
+
+ /* restore the CONTROL bits (minus the direction flag) */
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && hdata->dma_bounce_buffer) {
+ if (hdata->dma_dir)
+ memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+ kfree(hdata->dma_bounce_buffer);
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ }
}
static int __init a2091_detect(struct scsi_host_template *tpnt)
{
- static unsigned char called = 0;
- struct Scsi_Host *instance;
- unsigned long address;
- struct zorro_dev *z = NULL;
- wd33c93_regs regs;
- int num_a2091 = 0;
-
- if (!MACH_IS_AMIGA || called)
- return 0;
- called = 1;
-
- tpnt->proc_name = "A2091";
- tpnt->proc_info = &wd33c93_proc_info;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
- z->id != ZORRO_PROD_CBM_A590_A2091_2)
- continue;
- address = z->resource.start;
- if (!request_mem_region(address, 256, "wd33c93"))
- continue;
-
- instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
- if (instance == NULL)
- goto release;
- instance->base = ZTWO_VADDR(address);
- instance->irq = IRQ_AMIGA_PORTS;
- instance->unique_id = z->slotaddr;
- DMA(instance)->DAWR = DAWR_A2091;
- regs.SASR = &(DMA(instance)->SASR);
- regs.SCMD = &(DMA(instance)->SCMD);
- HDATA(instance)->no_sync = 0xff;
- HDATA(instance)->fast = 0;
- HDATA(instance)->dma_mode = CTRL_DMA;
- wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
- if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
- instance))
- goto unregister;
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
- num_a2091++;
- continue;
+ static unsigned char called = 0;
+ struct Scsi_Host *instance;
+ unsigned long address;
+ struct zorro_dev *z = NULL;
+ wd33c93_regs wdregs;
+ a2091_scsiregs *regs;
+ struct WD33C93_hostdata *hdata;
+ int num_a2091 = 0;
+
+ if (!MACH_IS_AMIGA || called)
+ return 0;
+ called = 1;
+
+ tpnt->proc_name = "A2091";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
+ if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
+ z->id != ZORRO_PROD_CBM_A590_A2091_2)
+ continue;
+ address = z->resource.start;
+ if (!request_mem_region(address, 256, "wd33c93"))
+ continue;
+
+ instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (instance == NULL)
+ goto release;
+ instance->base = ZTWO_VADDR(address);
+ instance->irq = IRQ_AMIGA_PORTS;
+ instance->unique_id = z->slotaddr;
+ regs = (a2091_scsiregs *)(instance->base);
+ regs->DAWR = DAWR_A2091;
+ wdregs.SASR = &regs->SASR;
+ wdregs.SCMD = &regs->SCMD;
+ hdata = shost_priv(instance);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(instance, wdregs, dma_setup, dma_stop,
+ WD33C93_FS_8_10);
+ if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
+ "A2091 SCSI", instance))
+ goto unregister;
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ num_a2091++;
+ continue;
unregister:
- scsi_unregister(instance);
- wd33c93_release();
+ scsi_unregister(instance);
release:
- release_mem_region(address, 256);
- }
+ release_mem_region(address, 256);
+ }
- return num_a2091;
+ return num_a2091;
}
static int a2091_bus_reset(struct scsi_cmnd *cmd)
@@ -239,10 +243,11 @@ static struct scsi_host_template driver_template = {
static int a2091_release(struct Scsi_Host *instance)
{
#ifdef MODULE
- DMA(instance)->CNTR = 0;
+ a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
+
+ regs->CNTR = 0;
release_mem_region(ZTWO_PADDR(instance->base), 256);
free_irq(IRQ_AMIGA_PORTS, instance);
- wd33c93_release();
#endif
return 1;
}
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 252528f2672e..1c3daa1fd754 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -12,38 +12,38 @@
#include <linux/types.h>
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
/*
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define A2091_XFER_MASK (0xff000001)
+#define A2091_XFER_MASK (0xff000001)
typedef struct {
- unsigned char pad1[64];
- volatile unsigned short ISTR;
- volatile unsigned short CNTR;
- unsigned char pad2[60];
- volatile unsigned int WTC;
- volatile unsigned long ACR;
- unsigned char pad3[6];
- volatile unsigned short DAWR;
- unsigned char pad4;
- volatile unsigned char SASR;
- unsigned char pad5;
- volatile unsigned char SCMD;
- unsigned char pad6[76];
- volatile unsigned short ST_DMA;
- volatile unsigned short SP_DMA;
- volatile unsigned short CINT;
- unsigned char pad7[2];
- volatile unsigned short FLUSH;
+ unsigned char pad1[64];
+ volatile unsigned short ISTR;
+ volatile unsigned short CNTR;
+ unsigned char pad2[60];
+ volatile unsigned int WTC;
+ volatile unsigned long ACR;
+ unsigned char pad3[6];
+ volatile unsigned short DAWR;
+ unsigned char pad4;
+ volatile unsigned char SASR;
+ unsigned char pad5;
+ volatile unsigned char SCMD;
+ unsigned char pad6[76];
+ volatile unsigned short ST_DMA;
+ volatile unsigned short SP_DMA;
+ volatile unsigned short CINT;
+ unsigned char pad7[2];
+ volatile unsigned short FLUSH;
} a2091_scsiregs;
#define DAWR_A2091 (3)
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index c35fc55f1c96..bc6eb69f5fd0 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -19,26 +19,25 @@
#include "wd33c93.h"
#include "a3000.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
+
+#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base))
static struct Scsi_Host *a3000_host = NULL;
static int a3000_release(struct Scsi_Host *instance);
-static irqreturn_t a3000_intr (int irq, void *dummy)
+static irqreturn_t a3000_intr(int irq, void *dummy)
{
unsigned long flags;
unsigned int status = DMA(a3000_host)->ISTR;
if (!(status & ISTR_INT_P))
return IRQ_NONE;
- if (status & ISTR_INTS)
- {
+ if (status & ISTR_INTS) {
spin_lock_irqsave(a3000_host->host_lock, flags);
- wd33c93_intr (a3000_host);
+ wd33c93_intr(a3000_host);
spin_unlock_irqrestore(a3000_host->host_lock, flags);
return IRQ_HANDLED;
}
@@ -48,162 +47,165 @@ static irqreturn_t a3000_intr (int irq, void *dummy)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
-
- /*
- * if the physical address has the wrong alignment, or if
- * physical address is bad, or if it is a write and at the
- * end of a physical memory chunk, then allocate a bounce
- * buffer
- */
- if (addr & A3000_XFER_MASK)
- {
- HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
- HDATA(a3000_host)->dma_bounce_buffer =
- kmalloc (HDATA(a3000_host)->dma_bounce_len, GFP_KERNEL);
-
- /* can't allocate memory; use PIO */
- if (!HDATA(a3000_host)->dma_bounce_buffer) {
- HDATA(a3000_host)->dma_bounce_len = 0;
- return 1;
- }
-
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(a3000_host)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
+ struct WD33C93_hostdata *hdata = shost_priv(a3000_host);
+ unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /*
+ * if the physical address has the wrong alignment, or if
+ * physical address is bad, or if it is a write and at the
+ * end of a physical memory chunk, then allocate a bounce
+ * buffer
+ */
+ if (addr & A3000_XFER_MASK) {
+ hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len,
+ GFP_KERNEL);
+
+ /* can't allocate memory; use PIO */
+ if (!hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
+
+ addr = virt_to_bus(hdata->dma_bounce_buffer);
}
- addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer);
- }
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= CNTR_DDIR;
- /* setup dma direction */
- if (!dir_in)
- cntr |= CNTR_DDIR;
+ /* remember direction */
+ hdata->dma_dir = dir_in;
- /* remember direction */
- HDATA(a3000_host)->dma_dir = dir_in;
+ DMA(a3000_host)->CNTR = cntr;
- DMA(a3000_host)->CNTR = cntr;
+ /* setup DMA *physical* address */
+ DMA(a3000_host)->ACR = addr;
- /* setup DMA *physical* address */
- DMA(a3000_host)->ACR = addr;
-
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
- /* start DMA */
- mb(); /* make sure setup is completed */
- DMA(a3000_host)->ST_DMA = 1;
- mb(); /* make sure DMA has started before next IO */
+ /* start DMA */
+ mb(); /* make sure setup is completed */
+ DMA(a3000_host)->ST_DMA = 1;
+ mb(); /* make sure DMA has started before next IO */
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
- /* disable SCSI interrupts */
- unsigned short cntr = CNTR_PDMD;
-
- if (!HDATA(instance)->dma_dir)
- cntr |= CNTR_DDIR;
-
- DMA(instance)->CNTR = cntr;
- mb(); /* make sure CNTR is updated before next IO */
-
- /* flush if we were reading */
- if (HDATA(instance)->dma_dir) {
- DMA(instance)->FLUSH = 1;
- mb(); /* don't allow prefetch */
- while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
- barrier();
- mb(); /* no IO until FLUSH is done */
- }
-
- /* clear a possible interrupt */
- /* I think that this CINT is only necessary if you are
- * using the terminal count features. HM 7 Mar 1994
- */
- DMA(instance)->CINT = 1;
-
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
- mb(); /* make sure DMA is stopped before next IO */
-
- /* restore the CONTROL bits (minus the direction flag) */
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
- mb(); /* make sure CNTR is updated before next IO */
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if (SCpnt) {
- if (HDATA(instance)->dma_dir && SCpnt)
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- } else {
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+
+ /* disable SCSI interrupts */
+ unsigned short cntr = CNTR_PDMD;
+
+ if (!hdata->dma_dir)
+ cntr |= CNTR_DDIR;
+
+ DMA(instance)->CNTR = cntr;
+ mb(); /* make sure CNTR is updated before next IO */
+
+ /* flush if we were reading */
+ if (hdata->dma_dir) {
+ DMA(instance)->FLUSH = 1;
+ mb(); /* don't allow prefetch */
+ while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
+ barrier();
+ mb(); /* no IO until FLUSH is done */
+ }
+
+ /* clear a possible interrupt */
+ /* I think that this CINT is only necessary if you are
+ * using the terminal count features. HM 7 Mar 1994
+ */
+ DMA(instance)->CINT = 1;
+
+ /* stop DMA */
+ DMA(instance)->SP_DMA = 1;
+ mb(); /* make sure DMA is stopped before next IO */
+
+ /* restore the CONTROL bits (minus the direction flag) */
+ DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
+ mb(); /* make sure CNTR is updated before next IO */
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && hdata->dma_bounce_buffer) {
+ if (SCpnt) {
+ if (hdata->dma_dir && SCpnt)
+ memcpy(SCpnt->SCp.ptr,
+ hdata->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+ kfree(hdata->dma_bounce_buffer);
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ } else {
+ kfree(hdata->dma_bounce_buffer);
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ }
}
- }
}
static int __init a3000_detect(struct scsi_host_template *tpnt)
{
- wd33c93_regs regs;
-
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
- return 0;
- if (!request_mem_region(0xDD0000, 256, "wd33c93"))
- return 0;
-
- tpnt->proc_name = "A3000";
- tpnt->proc_info = &wd33c93_proc_info;
-
- a3000_host = scsi_register (tpnt, sizeof(struct WD33C93_hostdata));
- if (a3000_host == NULL)
- goto fail_register;
-
- a3000_host->base = ZTWO_VADDR(0xDD0000);
- a3000_host->irq = IRQ_AMIGA_PORTS;
- DMA(a3000_host)->DAWR = DAWR_A3000;
- regs.SASR = &(DMA(a3000_host)->SASR);
- regs.SCMD = &(DMA(a3000_host)->SCMD);
- HDATA(a3000_host)->no_sync = 0xff;
- HDATA(a3000_host)->fast = 0;
- HDATA(a3000_host)->dma_mode = CTRL_DMA;
- wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
- if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
- a3000_intr))
- goto fail_irq;
- DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
-
- return 1;
+ wd33c93_regs regs;
+ struct WD33C93_hostdata *hdata;
+
+ if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
+ return 0;
+ if (!request_mem_region(0xDD0000, 256, "wd33c93"))
+ return 0;
+
+ tpnt->proc_name = "A3000";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (a3000_host == NULL)
+ goto fail_register;
+
+ a3000_host->base = ZTWO_VADDR(0xDD0000);
+ a3000_host->irq = IRQ_AMIGA_PORTS;
+ DMA(a3000_host)->DAWR = DAWR_A3000;
+ regs.SASR = &(DMA(a3000_host)->SASR);
+ regs.SCMD = &(DMA(a3000_host)->SCMD);
+ hdata = shost_priv(a3000_host);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
+ if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
+ a3000_intr))
+ goto fail_irq;
+ DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ return 1;
fail_irq:
- wd33c93_release();
- scsi_unregister(a3000_host);
+ scsi_unregister(a3000_host);
fail_register:
- release_mem_region(0xDD0000, 256);
- return 0;
+ release_mem_region(0xDD0000, 256);
+ return 0;
}
static int a3000_bus_reset(struct scsi_cmnd *cmd)
{
/* FIXME perform bus-specific reset */
-
+
/* FIXME 2: kill this entire function, which should
cause mid-layer to call wd33c93_host_reset anyway? */
@@ -237,11 +239,10 @@ static struct scsi_host_template driver_template = {
static int a3000_release(struct Scsi_Host *instance)
{
- wd33c93_release();
- DMA(instance)->CNTR = 0;
- release_mem_region(0xDD0000, 256);
- free_irq(IRQ_AMIGA_PORTS, a3000_intr);
- return 1;
+ DMA(instance)->CNTR = 0;
+ release_mem_region(0xDD0000, 256);
+ free_irq(IRQ_AMIGA_PORTS, a3000_intr);
+ return 1;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index c7afe16fd6e4..684813ee378c 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -12,40 +12,40 @@
#include <linux/types.h>
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
/*
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define A3000_XFER_MASK (0x00000003)
+#define A3000_XFER_MASK (0x00000003)
typedef struct {
- unsigned char pad1[2];
- volatile unsigned short DAWR;
- volatile unsigned int WTC;
- unsigned char pad2[2];
- volatile unsigned short CNTR;
- volatile unsigned long ACR;
- unsigned char pad3[2];
- volatile unsigned short ST_DMA;
- unsigned char pad4[2];
- volatile unsigned short FLUSH;
- unsigned char pad5[2];
- volatile unsigned short CINT;
- unsigned char pad6[2];
- volatile unsigned short ISTR;
- unsigned char pad7[30];
- volatile unsigned short SP_DMA;
- unsigned char pad8;
- volatile unsigned char SASR;
- unsigned char pad9;
- volatile unsigned char SCMD;
+ unsigned char pad1[2];
+ volatile unsigned short DAWR;
+ volatile unsigned int WTC;
+ unsigned char pad2[2];
+ volatile unsigned short CNTR;
+ volatile unsigned long ACR;
+ unsigned char pad3[2];
+ volatile unsigned short ST_DMA;
+ unsigned char pad4[2];
+ volatile unsigned short FLUSH;
+ unsigned char pad5[2];
+ volatile unsigned short CINT;
+ unsigned char pad6[2];
+ volatile unsigned short ISTR;
+ unsigned char pad7[30];
+ volatile unsigned short SP_DMA;
+ unsigned char pad8;
+ volatile unsigned char SASR;
+ unsigned char pad9;
+ volatile unsigned char SCMD;
} a3000_scsiregs;
#define DAWR_A3000 (3)
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
index 961fe439daad..53a616f5f50d 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -117,35 +117,6 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
}
/**
- * Get SG element for the I/O request given the SG element index
- */
-static inline union bfi_addr_u
-bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
- struct scatterlist *sge;
- u64 addr;
-
- sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
- addr = (u64) sg_dma_address(sge);
-
- return *((union bfi_addr_u *) &addr);
-}
-
-static inline u32
-bfa_cb_ioim_get_sglen(struct bfad_ioim_s *dio, int sgeid)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
- struct scatterlist *sge;
- u32 len;
-
- sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
- len = sg_dma_len(sge);
-
- return len;
-}
-
-/**
* Get Command Reference Number for the I/O request. 0 if none.
*/
static inline u8
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index 5b107abe46e5..687f3d6e252b 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -731,6 +731,9 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
static struct fcp_cmnd_s cmnd_z0 = { 0 };
struct bfi_sge_s *sge;
u32 pgdlen = 0;
+ u64 addr;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
/**
* check for room in queue to send request now
@@ -754,8 +757,10 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
*/
sge = &m->sges[0];
if (ioim->nsges) {
- sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, 0);
- pgdlen = bfa_cb_ioim_get_sglen(ioim->dio, 0);
+ sg = (struct scatterlist *)scsi_sglist(cmnd);
+ addr = bfa_os_sgaddr(sg_dma_address(sg));
+ sge->sga = *(union bfi_addr_u *) &addr;
+ pgdlen = sg_dma_len(sg);
sge->sg_len = pgdlen;
sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
@@ -868,10 +873,16 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
struct bfi_sge_s *sge;
struct bfa_sgpg_s *sgpg;
u32 pgcumsz;
+ u64 addr;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
sgeid = BFI_SGE_INLINE;
ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
+ sg = scsi_sglist(cmnd);
+ sg = sg_next(sg);
+
do {
sge = sgpg->sgpg->sges;
nsges = ioim->nsges - sgeid;
@@ -879,9 +890,10 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
nsges = BFI_SGPG_DATA_SGES;
pgcumsz = 0;
- for (i = 0; i < nsges; i++, sge++, sgeid++) {
- sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, sgeid);
- sge->sg_len = bfa_cb_ioim_get_sglen(ioim->dio, sgeid);
+ for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
+ addr = bfa_os_sgaddr(sg_dma_address(sg));
+ sge->sga = *(union bfi_addr_u *) &addr;
+ sge->sg_len = sg_dma_len(sg);
pgcumsz += sge->sg_len;
/**
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index 10a89f75fa94..bd1cd3ee3022 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -50,6 +50,10 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_transport.h>
+#ifdef __BIG_ENDIAN
+#define __BIGENDIAN
+#endif
+
#define BFA_ERR KERN_ERR
#define BFA_WARNING KERN_WARNING
#define BFA_NOTICE KERN_NOTICE
@@ -123,6 +127,15 @@ int bfa_os_MWB(void *);
(((_x) & 0x00ff0000) >> 8) | \
(((_x) & 0xff000000) >> 24))
+#define bfa_os_swap_sgaddr(_x) ((u64)( \
+ (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
+ (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
+ (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
+ (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
+ (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
#ifndef __BIGENDIAN
#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
@@ -133,6 +146,7 @@ int bfa_os_MWB(void *);
#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
#define bfa_os_wtole(_x) (_x)
+#define bfa_os_sgaddr(_x) (_x)
#else
@@ -141,6 +155,7 @@ int bfa_os_MWB(void *);
#define bfa_os_hton3b(_x) (_x)
#define bfa_os_htonll(_x) (_x)
#define bfa_os_wtole(_x) bfa_os_swap32(_x)
+#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x)
#endif
@@ -161,12 +176,12 @@ int bfa_os_MWB(void *);
#define bfa_os_addr_t char __iomem *
#define bfa_os_panic()
-#define bfa_os_reg_read(_raddr) bfa_os_wtole(readl(_raddr))
-#define bfa_os_reg_write(_raddr, _val) writel(bfa_os_wtole((_val)), (_raddr))
+#define bfa_os_reg_read(_raddr) readl(_raddr)
+#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
#define bfa_os_mem_read(_raddr, _off) \
- bfa_os_ntohl(readl(((_raddr) + (_off))))
+ bfa_os_swap32(readl(((_raddr) + (_off))))
#define bfa_os_mem_write(_raddr, _off, _val) \
- writel(bfa_os_htonl((_val)), ((_raddr) + (_off)))
+ writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
#define BFA_TRC_TS(_trcm) \
({ \
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 13f5feb308c2..24d015241e5b 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -33,7 +33,7 @@
#include <fcb/bfa_fcb.h>
BFA_TRC_FILE(LDRV, BFAD);
-static DEFINE_MUTEX(bfad_mutex);
+DEFINE_MUTEX(bfad_mutex);
LIST_HEAD(bfad_list);
static int bfad_inst;
int bfad_supported_fc4s;
@@ -299,8 +299,6 @@ bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv)
complete(vport_drv->comp_del);
return;
}
-
- kfree(vport_drv);
}
/**
@@ -483,7 +481,7 @@ ext:
*/
bfa_status_t
bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
- struct bfa_port_cfg_s *port_cfg)
+ struct bfa_port_cfg_s *port_cfg, struct device *dev)
{
struct bfad_vport_s *vport;
int rc = BFA_STATUS_OK;
@@ -506,7 +504,8 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
goto ext_free_vport;
if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) {
- rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port);
+ rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
+ dev);
if (rc != BFA_STATUS_OK)
goto ext_free_fcs_vport;
}
@@ -591,7 +590,6 @@ bfad_init_timer(struct bfad_s *bfad)
int
bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
{
- unsigned long bar0_len;
int rc = -ENODEV;
if (pci_enable_device(pdev)) {
@@ -611,9 +609,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
goto out_release_region;
}
- bfad->pci_bar0_map = pci_resource_start(pdev, 0);
- bar0_len = pci_resource_len(pdev, 0);
- bfad->pci_bar0_kva = ioremap(bfad->pci_bar0_map, bar0_len);
+ bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (bfad->pci_bar0_kva == NULL) {
BFA_PRINTF(BFA_ERR, "Fail to map bar0\n");
@@ -646,11 +642,7 @@ out:
void
bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
{
-#if defined(__ia64__)
pci_iounmap(pdev, bfad->pci_bar0_kva);
-#else
- iounmap(bfad->pci_bar0_kva);
-#endif
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -737,6 +729,7 @@ bfad_drv_init(struct bfad_s *bfad)
memset(&driver_info, 0, sizeof(driver_info));
strncpy(driver_info.version, BFAD_DRIVER_VERSION,
sizeof(driver_info.version) - 1);
+ __kernel_param_lock();
if (host_name)
strncpy(driver_info.host_machine_name, host_name,
sizeof(driver_info.host_machine_name) - 1);
@@ -746,6 +739,7 @@ bfad_drv_init(struct bfad_s *bfad)
if (os_patch)
strncpy(driver_info.host_os_patch, os_patch,
sizeof(driver_info.host_os_patch) - 1);
+ __kernel_param_unlock();
strncpy(driver_info.os_device_name, bfad->pci_name,
sizeof(driver_info.os_device_name - 1));
@@ -848,7 +842,8 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
goto out;
}
- rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port);
+ rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
+ &bfad->pcidev->dev);
if (rc != BFA_STATUS_OK)
goto out;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 6a2efdd5ef24..e477bfbfa7d8 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -364,6 +364,152 @@ bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
}
+static int
+bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ char *vname = fc_vport->symbolic_name;
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_port_cfg_s port_cfg;
+ int status = 0, rc;
+ unsigned long flags;
+
+ memset(&port_cfg, 0, sizeof(port_cfg));
+
+ port_cfg.pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
+ port_cfg.nwwn = wwn_to_u64((u8 *) &fc_vport->node_name);
+
+ if (strlen(vname) > 0)
+ strcpy((char *)&port_cfg.sym_name, vname);
+
+ port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
+ rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
+
+ if (rc == BFA_STATUS_OK) {
+ struct bfad_vport_s *vport;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
+ port_cfg.pwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return VPCERR_BAD_WWN;
+ }
+
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+ if (disable) {
+ bfa_fcs_vport_stop(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ vport = fcs_vport->vport_drv;
+ vshost = vport->drv_port.im_port->shost;
+ fc_host_node_name(vshost) = wwn_to_u64((u8 *) &port_cfg.nwwn);
+ fc_host_port_name(vshost) = wwn_to_u64((u8 *) &port_cfg.pwwn);
+ fc_vport->dd_data = vport;
+ vport->drv_port.im_port->fc_vport = fc_vport;
+
+ } else if (rc == BFA_STATUS_INVALID_WWN)
+ return VPCERR_BAD_WWN;
+ else if (rc == BFA_STATUS_VPORT_EXISTS)
+ return VPCERR_BAD_WWN;
+ else if (rc == BFA_STATUS_VPORT_MAX)
+ return VPCERR_NO_FABRIC_SUPP;
+ else if (rc == BFA_STATUS_VPORT_WWN_BP)
+ return VPCERR_BAD_WWN;
+ else
+ return FC_VPORT_FAILED;
+
+ return status;
+}
+
+static int
+bfad_im_vport_delete(struct fc_vport *fc_vport)
+{
+ struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) vport->drv_port.im_port;
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_port_s *port;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+ wwn_t pwwn;
+ int rc;
+ unsigned long flags;
+ struct completion fcomp;
+
+ if (im_port->flags & BFAD_PORT_DELETE)
+ goto free_scsi_host;
+
+ port = im_port->port;
+
+ vshost = vport->drv_port.im_port->shost;
+ pwwn = wwn_to_u64((u8 *) &fc_host_port_name(vshost));
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ vport->drv_port.flags |= BFAD_PORT_DELETE;
+
+ vport->comp_del = &fcomp;
+ init_completion(vport->comp_del);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_fcs_vport_delete(&vport->fcs_vport);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_for_completion(vport->comp_del);
+
+free_scsi_host:
+ bfad_os_scsi_host_free(bfad, im_port);
+
+ kfree(vport);
+
+ return 0;
+}
+
+static int
+bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ struct bfad_vport_s *vport;
+ struct bfad_s *bfad;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+ wwn_t pwwn;
+ unsigned long flags;
+
+ vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ bfad = vport->drv_port.bfad;
+ vshost = vport->drv_port.im_port->shost;
+ pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ if (disable) {
+ bfa_fcs_vport_stop(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ } else {
+ bfa_fcs_vport_start(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+ }
+
+ return 0;
+}
+
struct fc_function_template bfad_im_fc_function_template = {
/* Target dynamic attributes */
@@ -413,6 +559,61 @@ struct fc_function_template bfad_im_fc_function_template = {
.show_rport_dev_loss_tmo = 1,
.get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
+
+ .vport_create = bfad_im_vport_create,
+ .vport_delete = bfad_im_vport_delete,
+ .vport_disable = bfad_im_vport_disable,
+};
+
+struct fc_function_template bfad_im_vport_fc_function_template = {
+
+ /* Target dynamic attributes */
+ .get_starget_port_id = bfad_im_get_starget_port_id,
+ .show_starget_port_id = 1,
+ .get_starget_node_name = bfad_im_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = bfad_im_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ /* Host dynamic attribute */
+ .get_host_port_id = bfad_im_get_host_port_id,
+ .show_host_port_id = 1,
+
+ /* Host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* More host dynamic attributes */
+ .show_host_port_type = 1,
+ .get_host_port_type = bfad_im_get_host_port_type,
+ .show_host_port_state = 1,
+ .get_host_port_state = bfad_im_get_host_port_state,
+ .show_host_active_fc4s = 1,
+ .get_host_active_fc4s = bfad_im_get_host_active_fc4s,
+ .show_host_speed = 1,
+ .get_host_speed = bfad_im_get_host_speed,
+ .show_host_fabric_name = 1,
+ .get_host_fabric_name = bfad_im_get_host_fabric_name,
+
+ .show_host_symbolic_name = 1,
+
+ /* Statistics */
+ .get_fc_host_stats = bfad_im_get_stats,
+ .reset_fc_host_stats = bfad_im_reset_stats,
+
+ /* Allocation length for host specific data */
+ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
+
+ /* Remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+ .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
+ .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
};
/**
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 107848cd3b6d..6c920c1b53a4 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -162,7 +162,6 @@ struct bfad_s {
const char *pci_name;
struct bfa_pcidev_s hal_pcidev;
struct bfa_ioc_pci_attr_s pci_attr;
- unsigned long pci_bar0_map;
void __iomem *pci_bar0_kva;
struct completion comp;
struct completion suspend;
@@ -254,7 +253,7 @@ do { \
bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
- struct bfa_port_cfg_s *port_cfg);
+ struct bfa_port_cfg_s *port_cfg, struct device *dev);
bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
struct bfa_port_cfg_s *port_cfg);
bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
@@ -294,5 +293,6 @@ extern struct list_head bfad_list;
extern int bfa_lun_queue_depth;
extern int bfad_supported_fc4s;
extern int bfa_linkup_delay;
+extern struct mutex bfad_mutex;
#endif /* __BFAD_DRV_H__ */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 78f42aa57369..5b7cf539e50b 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -30,6 +30,7 @@ BFA_TRC_FILE(LDRV, IM);
DEFINE_IDR(bfad_im_port_index);
struct scsi_transport_template *bfad_im_scsi_transport_template;
+struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
static void bfad_im_itnim_work_handler(struct work_struct *work);
static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
void (*done)(struct scsi_cmnd *));
@@ -252,7 +253,6 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
struct bfa_itnim_s *bfa_itnim;
bfa_status_t rc = BFA_STATUS_OK;
- bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
if (!tskim) {
BFA_DEV_PRINTF(bfad, BFA_ERR,
@@ -513,11 +513,14 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
* Allocate a Scsi_Host for a port.
*/
int
-bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
+ struct device *dev)
{
int error = 1;
+ mutex_lock(&bfad_mutex);
if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
+ mutex_unlock(&bfad_mutex);
printk(KERN_WARNING "idr_pre_get failure\n");
goto out;
}
@@ -525,10 +528,13 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
error = idr_get_new(&bfad_im_port_index, im_port,
&im_port->idr_id);
if (error) {
+ mutex_unlock(&bfad_mutex);
printk(KERN_WARNING "idr_get_new failure\n");
goto out;
}
+ mutex_unlock(&bfad_mutex);
+
im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
if (!im_port->shost) {
error = 1;
@@ -542,12 +548,15 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
im_port->shost->max_lun = MAX_FCP_LUN;
im_port->shost->max_cmd_len = 16;
im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
- im_port->shost->transportt = bfad_im_scsi_transport_template;
+ if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
+ im_port->shost->transportt = bfad_im_scsi_transport_template;
+ else
+ im_port->shost->transportt =
+ bfad_im_scsi_vport_transport_template;
- error = bfad_os_scsi_add_host(im_port->shost, im_port, bfad);
+ error = scsi_add_host(im_port->shost, dev);
if (error) {
- printk(KERN_WARNING "bfad_os_scsi_add_host failure %d\n",
- error);
+ printk(KERN_WARNING "scsi_add_host failure %d\n", error);
goto out_fc_rel;
}
@@ -559,7 +568,9 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
out_fc_rel:
scsi_host_put(im_port->shost);
out_free_idr:
+ mutex_lock(&bfad_mutex);
idr_remove(&bfad_im_port_index, im_port->idr_id);
+ mutex_unlock(&bfad_mutex);
out:
return error;
}
@@ -567,8 +578,6 @@ out:
void
bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
- unsigned long flags;
-
bfa_trc(bfad, bfad->inst_no);
bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
im_port->shost->host_no);
@@ -578,9 +587,9 @@ bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
scsi_remove_host(im_port->shost);
scsi_host_put(im_port->shost);
- spin_lock_irqsave(&bfad->bfad_lock, flags);
+ mutex_lock(&bfad_mutex);
idr_remove(&bfad_im_port_index, im_port->idr_id);
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ mutex_unlock(&bfad_mutex);
}
static void
@@ -589,9 +598,11 @@ bfad_im_port_delete_handler(struct work_struct *work)
struct bfad_im_port_s *im_port =
container_of(work, struct bfad_im_port_s, port_delete_work);
- bfad_im_scsi_host_free(im_port->bfad, im_port);
- bfad_im_port_clean(im_port);
- kfree(im_port);
+ if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
+ im_port->flags |= BFAD_PORT_DELETE;
+ fc_vport_terminate(im_port->fc_vport);
+ }
+
}
bfa_status_t
@@ -690,23 +701,6 @@ bfad_im_probe_undo(struct bfad_s *bfad)
}
}
-
-
-
-int
-bfad_os_scsi_add_host(struct Scsi_Host *shost, struct bfad_im_port_s *im_port,
- struct bfad_s *bfad)
-{
- struct device *dev;
-
- if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
- dev = &bfad->pcidev->dev;
- else
- dev = &bfad->pport.im_port->shost->shost_gendev;
-
- return scsi_add_host(shost, dev);
-}
-
struct Scsi_Host *
bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
{
@@ -725,7 +719,8 @@ bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
void
bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
- flush_workqueue(bfad->im->drv_workq);
+ if (!(im_port->flags & BFAD_PORT_DELETE))
+ flush_workqueue(bfad->im->drv_workq);
bfad_im_scsi_host_free(im_port->bfad, im_port);
bfad_im_port_clean(im_port);
kfree(im_port);
@@ -830,6 +825,13 @@ bfad_im_module_init(void)
if (!bfad_im_scsi_transport_template)
return BFA_STATUS_ENOMEM;
+ bfad_im_scsi_vport_transport_template =
+ fc_attach_transport(&bfad_im_vport_fc_function_template);
+ if (!bfad_im_scsi_vport_transport_template) {
+ fc_release_transport(bfad_im_scsi_transport_template);
+ return BFA_STATUS_ENOMEM;
+ }
+
return BFA_STATUS_OK;
}
@@ -838,6 +840,8 @@ bfad_im_module_exit(void)
{
if (bfad_im_scsi_transport_template)
fc_release_transport(bfad_im_scsi_transport_template);
+ if (bfad_im_scsi_vport_transport_template)
+ fc_release_transport(bfad_im_scsi_vport_transport_template);
}
void
@@ -938,6 +942,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
fc_host_port_name(host) =
bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
+ fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
fc_host_supported_classes(host) = FC_COS_CLASS3;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 85ab2da21321..973cab4d09c7 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -34,7 +34,7 @@ void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_clean(struct bfad_im_port_s *im_port);
int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
- struct bfad_im_port_s *im_port);
+ struct bfad_im_port_s *im_port, struct device *dev);
void bfad_im_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
@@ -64,9 +64,11 @@ struct bfad_im_port_s {
struct work_struct port_delete_work;
int idr_id;
u16 cur_scsi_id;
+ u16 flags;
struct list_head binding_list;
struct Scsi_Host *shost;
struct list_head itnim_mapped_list;
+ struct fc_vport *fc_vport;
};
enum bfad_itnim_state {
@@ -140,6 +142,8 @@ void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
extern struct scsi_host_template bfad_im_scsi_host_template;
extern struct scsi_host_template bfad_im_vport_template;
extern struct fc_function_template bfad_im_fc_function_template;
+extern struct fc_function_template bfad_im_vport_fc_function_template;
extern struct scsi_transport_template *bfad_im_scsi_transport_template;
+extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
#endif
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
index b0ac9ac15c5d..6eaf519eccdc 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
@@ -50,7 +50,7 @@ struct bfa_cee_lldp_str_s {
};
-/* LLDP paramters */
+/* LLDP parameters */
struct bfa_cee_lldp_cfg_s {
struct bfa_cee_lldp_str_s chassis_id;
struct bfa_cee_lldp_str_s port_id;
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
index 4374494bd566..ec78b4cb121a 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
@@ -223,9 +223,9 @@ enum bfa_status {
BFA_STATUS_IM_PVID_NON_ZERO = 140, /* Port VLAN ID (PVID) is Set to
* Non-Zero Value */
BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, /* Acquiring Network
- * Subsytem Lock Failed.Please
+ * Subsystem Lock Failed.Please
* try after some time */
- BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem
+ BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsystem
* handle Failed. Please try
* after some time */
BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 18352ff82101..3a66ca24c7bd 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -347,6 +347,7 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ login_wqe->flags = ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN;
login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
login_wqe->resp_bd_list_addr_hi =
@@ -356,7 +357,6 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
(bnx2i_conn->gen_pdu.resp_buf_size <<
ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
login_wqe->resp_buffer = dword;
- login_wqe->flags = 0;
login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
login_wqe->bd_list_addr_hi =
(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 5d9296c599f6..af6a00a600fb 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.1.0"
-#define DRV_MODULE_RELDATE "Dec 06, 2009"
+#define DRV_MODULE_VERSION "2.1.1"
+#define DRV_MODULE_RELDATE "Mar 24, 2010"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -26,7 +26,8 @@ static char version[] __devinitdata =
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+ " iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -289,6 +290,7 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
int rc;
mutex_lock(&bnx2i_dev_lock);
+ hba->cnic = cnic;
rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
if (!rc) {
hba->age++;
@@ -335,8 +337,7 @@ void bnx2i_ulp_init(struct cnic_dev *dev)
if (bnx2i_init_one(hba, dev)) {
printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
bnx2i_free_hba(hba);
- } else
- hba->cnic = dev;
+ }
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index d0ab23a58355..685af3698518 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -104,8 +104,10 @@ static int __init cxgb3i_init_module(void)
return err;
err = cxgb3i_pdu_init();
- if (err < 0)
+ if (err < 0) {
+ cxgb3i_iscsi_cleanup();
return err;
+ }
cxgb3_register_client(&t3c_client);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e8a0bc3efd49..6faf472f7537 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -285,13 +285,11 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
switch (cmd) {
case MODE_SELECT:
len = sizeof(short_trespass);
- rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
rq->cmd[4] = len;
break;
case MODE_SELECT_10:
len = sizeof(long_trespass);
- rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
rq->cmd[8] = len;
break;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f01b9b44e8aa..1804bab672ae 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -146,6 +146,7 @@ static int fcoe_vport_destroy(struct fc_vport *);
static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
+static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -153,6 +154,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.ddp_done = fcoe_ddp_done,
.elsct_send = fcoe_elsct_send,
.get_lesb = fcoe_get_lesb,
+ .lport_set_port_id = fcoe_set_port_id,
};
struct fc_function_template fcoe_transport_function = {
@@ -309,10 +311,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* for multiple unicast MACs.
*/
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
- dev_unicast_add(netdev, flogi_maddr);
+ dev_uc_add(netdev, flogi_maddr);
if (fip->spma)
- dev_unicast_add(netdev, fip->ctl_src_addr);
- dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+ dev_uc_add(netdev, fip->ctl_src_addr);
+ dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
/*
* setup the receive function from ethernet driver
@@ -395,10 +397,10 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
/* Delete secondary MAC addresses */
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
- dev_unicast_delete(netdev, flogi_maddr);
+ dev_uc_del(netdev, flogi_maddr);
if (fip->spma)
- dev_unicast_delete(netdev, fip->ctl_src_addr);
- dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+ dev_uc_del(netdev, fip->ctl_src_addr);
+ dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
/* Tell the LLD we are done w/ FCoE */
ops = netdev->netdev_ops;
@@ -491,9 +493,9 @@ static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
- dev_unicast_delete(fcoe->netdev, port->data_src_addr);
+ dev_uc_del(fcoe->netdev, port->data_src_addr);
if (!is_zero_ether_addr(addr))
- dev_unicast_add(fcoe->netdev, addr);
+ dev_uc_add(fcoe->netdev, addr);
memcpy(port->data_src_addr, addr, ETH_ALEN);
rtnl_unlock();
}
@@ -653,15 +655,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
/**
* fcoe_shost_config() - Set up the SCSI host associated with a local port
* @lport: The local port
- * @shost: The SCSI host to associate with the local port
* @dev: The device associated with the SCSI host
*
* Must be called after fcoe_lport_config() and fcoe_netdev_config()
*
* Returns: 0 for success
*/
-static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
- struct device *dev)
+static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
{
int rc = 0;
@@ -669,6 +669,8 @@ static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
lport->host->max_lun = FCOE_MAX_LUN;
lport->host->max_id = FCOE_MAX_FCP_TARGET;
lport->host->max_channel = 0;
+ lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
if (lport->vport)
lport->host->transportt = fcoe_vport_transport_template;
else
@@ -820,7 +822,7 @@ static void fcoe_if_destroy(struct fc_lport *lport)
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
- dev_unicast_delete(netdev, port->data_src_addr);
+ dev_uc_del(netdev, port->data_src_addr);
rtnl_unlock();
/* receives may not be stopped until after this */
@@ -897,7 +899,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct net_device *netdev = fcoe->netdev;
struct fc_lport *lport = NULL;
struct fcoe_port *port;
- struct Scsi_Host *shost;
int rc;
/*
* parent is only a vport if npiv is 1,
@@ -919,7 +920,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
rc = -ENOMEM;
goto out;
}
- shost = lport->host;
port = lport_priv(lport);
port->lport = lport;
port->fcoe = fcoe;
@@ -934,7 +934,8 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
if (npiv) {
- FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n",
+ FCOE_NETDEV_DBG(netdev, "Setting vport names, "
+ "%16.16llx %16.16llx\n",
vport->node_name, vport->port_name);
fc_set_wwnn(lport, vport->node_name);
fc_set_wwpn(lport, vport->port_name);
@@ -949,7 +950,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
/* configure lport scsi host properties */
- rc = fcoe_shost_config(lport, shost, parent);
+ rc = fcoe_shost_config(lport, parent);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
"interface\n");
@@ -1073,7 +1074,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
struct sk_buff *skb;
#ifdef CONFIG_SMP
struct fcoe_percpu_s *p0;
- unsigned targ_cpu = smp_processor_id();
+ unsigned targ_cpu = get_cpu();
#endif /* CONFIG_SMP */
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
@@ -1129,6 +1130,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
+ put_cpu();
#else
/*
* This a non-SMP scenario where the singular Rx thread is
@@ -1297,8 +1299,8 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
return 0;
err:
- fc_lport_get_stats(lport)->ErrorFrames++;
-
+ per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
+ put_cpu();
err2:
kfree_skb(skb);
return -1;
@@ -1444,7 +1446,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
return 0;
}
- if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
+ if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
return 0;
@@ -1527,9 +1529,10 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_shinfo(skb)->gso_size = 0;
}
/* update tx stats: regardless if LLD fails */
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
+ put_cpu();
/* send down to lld */
fr_dev(fp) = lport;
@@ -1563,7 +1566,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
- u8 *mac = NULL;
struct fcoe_port *port;
struct fcoe_hdr *hp;
@@ -1583,13 +1585,9 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb_end_pointer(skb), skb->csum,
skb->dev ? skb->dev->name : "<NULL>");
- /*
- * Save source MAC address before discarding header.
- */
port = lport_priv(lport);
if (skb_is_nonlinear(skb))
skb_linearize(skb); /* not ideal */
- mac = eth_hdr(skb)->h_source;
/*
* Frame length checks and setting up the header pointers
@@ -1598,7 +1596,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
hp = (struct fcoe_hdr *) skb_network_header(skb);
fh = (struct fc_frame_header *) skb_transport_header(skb);
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
@@ -1607,9 +1605,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
"initiator supports version "
"%x\n", FC_FCOE_DECAPS_VER(hp),
FC_FCOE_VER);
- stats->ErrorFrames++;
- kfree_skb(skb);
- return;
+ goto drop;
}
skb_pull(skb, sizeof(struct fcoe_hdr));
@@ -1624,16 +1620,12 @@ static void fcoe_recv_frame(struct sk_buff *skb)
fr_sof(fp) = hp->fcoe_sof;
/* Copy out the CRC and EOF trailer for access */
- if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
- kfree_skb(skb);
- return;
- }
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
+ goto drop;
fr_eof(fp) = crc_eof.fcoe_eof;
fr_crc(fp) = crc_eof.fcoe_crc32;
- if (pskb_trim(skb, fr_len)) {
- kfree_skb(skb);
- return;
- }
+ if (pskb_trim(skb, fr_len))
+ goto drop;
/*
* We only check CRC if no offload is available and if it is
@@ -1647,25 +1639,27 @@ static void fcoe_recv_frame(struct sk_buff *skb)
fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
fh = fc_frame_header_get(fp);
- if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
- fh->fh_type == FC_TYPE_FCP) {
- fc_exch_recv(lport, fp);
- return;
- }
- if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+ if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA ||
+ fh->fh_type != FC_TYPE_FCP) &&
+ (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
if (le32_to_cpu(fr_crc(fp)) !=
~crc32(~0, skb->data, fr_len)) {
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping "
"frame with CRC error\n");
stats->InvalidCRCCount++;
- stats->ErrorFrames++;
- fc_frame_free(fp);
- return;
+ goto drop;
}
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
}
+ put_cpu();
fc_exch_recv(lport, fp);
+ return;
+
+drop:
+ stats->ErrorFrames++;
+ put_cpu();
+ kfree_skb(skb);
}
/**
@@ -1838,8 +1832,9 @@ static int fcoe_device_notification(struct notifier_block *notifier,
if (link_possible && !fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->LinkFailureCount++;
+ put_cpu();
fcoe_clean_pending_queue(lport);
}
out:
@@ -1905,9 +1900,10 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (fcoe)
+ if (fcoe) {
fc_fabric_logoff(fcoe->ctlr.lp);
- else
+ fcoe_ctlr_link_down(&fcoe->ctlr);
+ } else
rc = -ENODEV;
dev_put(netdev);
@@ -1954,9 +1950,11 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (fcoe)
+ if (fcoe) {
+ if (!fcoe_link_ok(fcoe->ctlr.lp))
+ fcoe_ctlr_link_up(&fcoe->ctlr);
rc = fc_fabric_login(fcoe->ctlr.lp);
- else
+ } else
rc = -ENODEV;
dev_put(netdev);
@@ -2149,8 +2147,7 @@ int fcoe_link_ok(struct fc_lport *lport)
struct net_device *netdev = port->fcoe->netdev;
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
- if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) &&
- (!dev_ethtool_get_settings(netdev, &ecmd))) {
+ if (netif_oper_up(netdev) && !dev_ethtool_get_settings(netdev, &ecmd)) {
lport->link_supported_speeds &=
~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@ -2631,3 +2628,25 @@ static void fcoe_get_lesb(struct fc_lport *lport,
lesb->lesb_miss_fka = htonl(mdac);
lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
}
+
+/**
+ * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
+ * @lport: the local port
+ * @port_id: the port ID
+ * @fp: the received frame, if any, that caused the port_id to be set.
+ *
+ * This routine handles the case where we received a FLOGI and are
+ * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi()
+ * so it can set the non-mapped mode and gateway address.
+ *
+ * The FLOGI LS_ACC is handled by fcoe_flogi_resp().
+ */
+static void fcoe_set_port_id(struct fc_lport *lport,
+ u32 port_id, struct fc_frame *fp)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->fcoe;
+
+ if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
+ fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 3440da48d169..aadd24962e92 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -51,7 +51,7 @@ MODULE_LICENSE("GPL v2");
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
static void fcoe_ctlr_timeout(unsigned long);
-static void fcoe_ctlr_link_work(struct work_struct *);
+static void fcoe_ctlr_timer_work(struct work_struct *);
static void fcoe_ctlr_recv_work(struct work_struct *);
static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
@@ -116,7 +116,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip)
spin_lock_init(&fip->lock);
fip->flogi_oxid = FC_XID_UNKNOWN;
setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
- INIT_WORK(&fip->link_work, fcoe_ctlr_link_work);
+ INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
skb_queue_head_init(&fip->fip_recv_list);
}
@@ -164,7 +164,7 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
fcoe_ctlr_reset_fcfs(fip);
spin_unlock_bh(&fip->lock);
del_timer_sync(&fip->timer);
- cancel_work_sync(&fip->link_work);
+ cancel_work_sync(&fip->timer_work);
}
EXPORT_SYMBOL(fcoe_ctlr_destroy);
@@ -257,14 +257,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
{
spin_lock_bh(&fip->lock);
if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) {
- fip->last_link = 1;
- fip->link = 1;
spin_unlock_bh(&fip->lock);
fc_linkup(fip->lp);
} else if (fip->state == FIP_ST_LINK_WAIT) {
fip->state = fip->mode;
- fip->last_link = 1;
- fip->link = 1;
spin_unlock_bh(&fip->lock);
if (fip->state == FIP_ST_AUTO)
LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
@@ -306,9 +302,7 @@ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
LIBFCOE_FIP_DBG(fip, "link down.\n");
spin_lock_bh(&fip->lock);
fcoe_ctlr_reset(fip);
- link_dropped = fip->link;
- fip->link = 0;
- fip->last_link = 0;
+ link_dropped = fip->state != FIP_ST_LINK_WAIT;
fip->state = FIP_ST_LINK_WAIT;
spin_unlock_bh(&fip->lock);
@@ -448,10 +442,15 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
memset(mac, 0, sizeof(mac));
mac->fd_desc.fip_dtype = FIP_DT_MAC;
mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
- if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC)
+ if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) {
memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
- else if (fip->spma)
+ } else if (fip_flags & FIP_FL_SPMA) {
+ LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n");
memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
+ } else {
+ LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n");
+ /* FPMA only FLOGI must leave the MAC desc set to all 0s */
+ }
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
@@ -556,7 +555,7 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send);
* fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller
* @fip: The FCoE controller to free FCFs on
*
- * Called with lock held.
+ * Called with lock held and preemption disabled.
*
* An FCF is considered old if we have missed three advertisements.
* That is, there have been no valid advertisement from it for three
@@ -573,17 +572,20 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
struct fcoe_fcf *next;
unsigned long sel_time = 0;
unsigned long mda_time = 0;
+ struct fcoe_dev_stats *stats;
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
mda_time = fcf->fka_period + (fcf->fka_period >> 1);
if ((fip->sel_fcf == fcf) &&
(time_after(jiffies, fcf->time + mda_time))) {
mod_timer(&fip->timer, jiffies + mda_time);
- fc_lport_get_stats(fip->lp)->MissDiscAdvCount++;
+ stats = per_cpu_ptr(fip->lp->dev_stats,
+ smp_processor_id());
+ stats->MissDiscAdvCount++;
printk(KERN_INFO "libfcoe: host%d: Missing Discovery "
- "Advertisement for fab %llx count %lld\n",
+ "Advertisement for fab %16.16llx count %lld\n",
fip->lp->host->host_no, fcf->fabric_name,
- fc_lport_get_stats(fip->lp)->MissDiscAdvCount);
+ stats->MissDiscAdvCount);
}
if (time_after(jiffies, fcf->time + fcf->fka_period * 3 +
msecs_to_jiffies(FIP_FCF_FUZZ * 3))) {
@@ -593,7 +595,9 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
WARN_ON(!fip->fcf_count);
fip->fcf_count--;
kfree(fcf);
- fc_lport_get_stats(fip->lp)->VLinkFailureCount++;
+ stats = per_cpu_ptr(fip->lp->dev_stats,
+ smp_processor_id());
+ stats->VLinkFailureCount++;
} else if (fcoe_ctlr_mtu_valid(fcf) &&
(!sel_time || time_before(sel_time, fcf->time))) {
sel_time = fcf->time;
@@ -776,7 +780,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
if (!found) {
- LIBFCOE_FIP_DBG(fip, "New FCF for fab %llx map %x val %d\n",
+ LIBFCOE_FIP_DBG(fip, "New FCF for fab %16.16llx "
+ "map %x val %d\n",
fcf->fabric_name, fcf->fc_map, mtu_valid);
}
@@ -906,9 +911,10 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
fr_eof(fp) = FC_EOF_T;
fr_dev(fp) = lport;
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->RxFrames++;
stats->RxWords += skb->len / FIP_BPW;
+ put_cpu();
fc_exch_recv(lport, fp);
return;
@@ -1006,7 +1012,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
spin_lock_bh(&fip->lock);
- fc_lport_get_stats(lport)->VLinkFailureCount++;
+ per_cpu_ptr(lport->dev_stats,
+ smp_processor_id())->VLinkFailureCount++;
fcoe_ctlr_reset(fip);
spin_unlock_bh(&fip->lock);
@@ -1102,15 +1109,17 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
struct fcoe_fcf *best = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
- LIBFCOE_FIP_DBG(fip, "consider FCF for fab %llx VFID %d map %x "
- "val %d\n", fcf->fabric_name, fcf->vfid,
+ LIBFCOE_FIP_DBG(fip, "consider FCF for fab %16.16llx "
+ "VFID %d map %x val %d\n",
+ fcf->fabric_name, fcf->vfid,
fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
if (!fcoe_ctlr_fcf_usable(fcf)) {
- LIBFCOE_FIP_DBG(fip, "FCF for fab %llx map %x %svalid "
- "%savailable\n", fcf->fabric_name,
- fcf->fc_map, (fcf->flags & FIP_FL_SOL)
- ? "" : "in", (fcf->flags & FIP_FL_AVAIL)
- ? "" : "un");
+ LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
+ "map %x %svalid %savailable\n",
+ fcf->fabric_name, fcf->fc_map,
+ (fcf->flags & FIP_FL_SOL) ? "" : "in",
+ (fcf->flags & FIP_FL_AVAIL) ?
+ "" : "un");
continue;
}
if (!best) {
@@ -1175,7 +1184,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
"Starting FCF discovery.\n",
fip->lp->host->host_no);
fip->reset_req = 1;
- schedule_work(&fip->link_work);
+ schedule_work(&fip->timer_work);
}
}
@@ -1201,43 +1210,31 @@ static void fcoe_ctlr_timeout(unsigned long arg)
mod_timer(&fip->timer, next_timer);
}
if (fip->send_ctlr_ka || fip->send_port_ka)
- schedule_work(&fip->link_work);
+ schedule_work(&fip->timer_work);
spin_unlock_bh(&fip->lock);
}
/**
- * fcoe_ctlr_link_work() - Worker thread function for link changes
+ * fcoe_ctlr_timer_work() - Worker thread function for timer work
* @work: Handle to a FCoE controller
*
- * See if the link status has changed and if so, report it.
- *
- * This is here because fc_linkup() and fc_linkdown() must not
+ * Sends keep-alives and resets which must not
* be called from the timer directly, since they use a mutex.
*/
-static void fcoe_ctlr_link_work(struct work_struct *work)
+static void fcoe_ctlr_timer_work(struct work_struct *work)
{
struct fcoe_ctlr *fip;
struct fc_lport *vport;
u8 *mac;
- int link;
- int last_link;
int reset;
- fip = container_of(work, struct fcoe_ctlr, link_work);
+ fip = container_of(work, struct fcoe_ctlr, timer_work);
spin_lock_bh(&fip->lock);
- last_link = fip->last_link;
- link = fip->link;
- fip->last_link = link;
reset = fip->reset_req;
fip->reset_req = 0;
spin_unlock_bh(&fip->lock);
- if (last_link != link) {
- if (link)
- fc_linkup(fip->lp);
- else
- fc_linkdown(fip->lp);
- } else if (reset && link)
+ if (reset)
fc_lport_reset(fip->lp);
if (fip->send_ctlr_ka) {
@@ -1334,9 +1331,9 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
memcpy(fip->dest_addr, sa, ETH_ALEN);
fip->map_dest = 0;
- if (fip->state == FIP_ST_NON_FIP)
- LIBFCOE_FIP_DBG(fip, "received FLOGI REQ, "
- "using non-FIP mode\n");
+ if (fip->state == FIP_ST_AUTO)
+ LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. "
+ "Setting non-FIP mode\n");
fip->state = FIP_ST_NON_FIP;
}
spin_unlock_bh(&fip->lock);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3966c71d0095..19338e0ba2c5 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -36,7 +36,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.4.0.98"
+#define DRV_VERSION "1.4.0.145"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -45,7 +45,7 @@
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 32
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
-#define FNIC_MAX_CMD_LEN 16 /* Supported CDB length */
+
/*
* Tag bits used for special requests.
*/
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 5259888fbfb1..2b48d79bad94 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -617,7 +617,7 @@ void fnic_flush_tx(struct fnic *fnic)
struct sk_buff *skb;
struct fc_frame *fp;
- while ((skb = skb_dequeue(&fnic->frame_queue))) {
+ while ((skb = skb_dequeue(&fnic->tx_queue))) {
fp = (struct fc_frame *)skb;
fnic_send_frame(fnic, fp);
}
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 97b212570bcc..265e73d9cd6f 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -556,7 +556,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
}
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FNIC_MAX_CMD_LEN;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
fnic_get_res_counts(fnic);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 35a4b3073ec3..a765fe7a55c3 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -3842,7 +3842,7 @@ int __init option_setup(char *str)
TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
- while (cur && isdigit(*cur) && i <= MAXHA) {
+ while (cur && isdigit(*cur) && i < MAXHA) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL) cur++;
}
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 48f406850c65..18b7102bb80e 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -19,332 +19,334 @@
#include "wd33c93.h"
#include "gvp11.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
-static irqreturn_t gvp11_intr (int irq, void *_instance)
+#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base))
+
+static irqreturn_t gvp11_intr(int irq, void *_instance)
{
- unsigned long flags;
- unsigned int status;
- struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
-
- status = DMA(instance)->CNTR;
- if (!(status & GVP11_DMAC_INT_PENDING))
- return IRQ_NONE;
-
- spin_lock_irqsave(instance->host_lock, flags);
- wd33c93_intr(instance);
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
+ unsigned long flags;
+ unsigned int status;
+ struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
+
+ status = DMA(instance)->CNTR;
+ if (!(status & GVP11_DMAC_INT_PENDING))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
}
static int gvp11_xfer_mask = 0;
-void gvp11_setup (char *str, int *ints)
+void gvp11_setup(char *str, int *ints)
{
- gvp11_xfer_mask = ints[1];
+ gvp11_xfer_mask = ints[1];
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
- int bank_mask;
- static int scsi_alloc_out_of_range = 0;
-
- /* use bounce buffer if the physical address is bad */
- if (addr & HDATA(cmd->device->host)->dma_xfer_mask)
- {
- HDATA(cmd->device->host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
-
- if( !scsi_alloc_out_of_range ) {
- HDATA(cmd->device->host)->dma_bounce_buffer =
- kmalloc (HDATA(cmd->device->host)->dma_bounce_len, GFP_KERNEL);
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_SCSI_ALLOCED;
- }
+ struct Scsi_Host *instance = cmd->device->host;
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+ unsigned short cntr = GVP11_DMAC_INT_ENABLE;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ int bank_mask;
+ static int scsi_alloc_out_of_range = 0;
+
+ /* use bounce buffer if the physical address is bad */
+ if (addr & hdata->dma_xfer_mask) {
+ hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+
+ if (!scsi_alloc_out_of_range) {
+ hdata->dma_bounce_buffer =
+ kmalloc(hdata->dma_bounce_len, GFP_KERNEL);
+ hdata->dma_buffer_pool = BUF_SCSI_ALLOCED;
+ }
- if (scsi_alloc_out_of_range ||
- !HDATA(cmd->device->host)->dma_bounce_buffer) {
- HDATA(cmd->device->host)->dma_bounce_buffer =
- amiga_chip_alloc(HDATA(cmd->device->host)->dma_bounce_len,
- "GVP II SCSI Bounce Buffer");
+ if (scsi_alloc_out_of_range ||
+ !hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_buffer =
+ amiga_chip_alloc(hdata->dma_bounce_len,
+ "GVP II SCSI Bounce Buffer");
- if(!HDATA(cmd->device->host)->dma_bounce_buffer)
- {
- HDATA(cmd->device->host)->dma_bounce_len = 0;
- return 1;
- }
+ if (!hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_CHIP_ALLOCED;
- }
+ hdata->dma_buffer_pool = BUF_CHIP_ALLOCED;
+ }
- /* check if the address of the bounce buffer is OK */
- addr = virt_to_bus(HDATA(cmd->device->host)->dma_bounce_buffer);
-
- if (addr & HDATA(cmd->device->host)->dma_xfer_mask) {
- /* fall back to Chip RAM if address out of range */
- if( HDATA(cmd->device->host)->dma_buffer_pool == BUF_SCSI_ALLOCED) {
- kfree (HDATA(cmd->device->host)->dma_bounce_buffer);
- scsi_alloc_out_of_range = 1;
- } else {
- amiga_chip_free (HDATA(cmd->device->host)->dma_bounce_buffer);
- }
-
- HDATA(cmd->device->host)->dma_bounce_buffer =
- amiga_chip_alloc(HDATA(cmd->device->host)->dma_bounce_len,
- "GVP II SCSI Bounce Buffer");
-
- if(!HDATA(cmd->device->host)->dma_bounce_buffer)
- {
- HDATA(cmd->device->host)->dma_bounce_len = 0;
- return 1;
- }
-
- addr = virt_to_bus(HDATA(cmd->device->host)->dma_bounce_buffer);
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_CHIP_ALLOCED;
- }
-
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(cmd->device->host)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
+ /* check if the address of the bounce buffer is OK */
+ addr = virt_to_bus(hdata->dma_bounce_buffer);
+
+ if (addr & hdata->dma_xfer_mask) {
+ /* fall back to Chip RAM if address out of range */
+ if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ kfree(hdata->dma_bounce_buffer);
+ scsi_alloc_out_of_range = 1;
+ } else {
+ amiga_chip_free(hdata->dma_bounce_buffer);
+ }
+
+ hdata->dma_bounce_buffer =
+ amiga_chip_alloc(hdata->dma_bounce_len,
+ "GVP II SCSI Bounce Buffer");
+
+ if (!hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
+
+ addr = virt_to_bus(hdata->dma_bounce_buffer);
+ hdata->dma_buffer_pool = BUF_CHIP_ALLOCED;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
}
- }
- /* setup dma direction */
- if (!dir_in)
- cntr |= GVP11_DMAC_DIR_WRITE;
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= GVP11_DMAC_DIR_WRITE;
- HDATA(cmd->device->host)->dma_dir = dir_in;
- DMA(cmd->device->host)->CNTR = cntr;
+ hdata->dma_dir = dir_in;
+ DMA(cmd->device->host)->CNTR = cntr;
- /* setup DMA *physical* address */
- DMA(cmd->device->host)->ACR = addr;
+ /* setup DMA *physical* address */
+ DMA(cmd->device->host)->ACR = addr;
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
- if ((bank_mask = (~HDATA(cmd->device->host)->dma_xfer_mask >> 18) & 0x01c0))
- DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18);
+ bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0;
+ if (bank_mask)
+ DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18);
- /* start DMA */
- DMA(cmd->device->host)->ST_DMA = 1;
+ /* start DMA */
+ DMA(cmd->device->host)->ST_DMA = 1;
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
- /* remove write bit from CONTROL bits */
- DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if (HDATA(instance)->dma_dir && SCpnt)
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
-
- if (HDATA(instance)->dma_buffer_pool == BUF_SCSI_ALLOCED)
- kfree (HDATA(instance)->dma_bounce_buffer);
- else
- amiga_chip_free(HDATA(instance)->dma_bounce_buffer);
-
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- }
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+
+ /* stop DMA */
+ DMA(instance)->SP_DMA = 1;
+ /* remove write bit from CONTROL bits */
+ DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && hdata->dma_bounce_buffer) {
+ if (hdata->dma_dir && SCpnt)
+ memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+
+ if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ kfree(hdata->dma_bounce_buffer);
+ else
+ amiga_chip_free(hdata->dma_bounce_buffer);
+
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ }
}
#define CHECK_WD33C93
int __init gvp11_detect(struct scsi_host_template *tpnt)
{
- static unsigned char called = 0;
- struct Scsi_Host *instance;
- unsigned long address;
- unsigned int epc;
- struct zorro_dev *z = NULL;
- unsigned int default_dma_xfer_mask;
- wd33c93_regs regs;
- int num_gvp11 = 0;
+ static unsigned char called = 0;
+ struct Scsi_Host *instance;
+ unsigned long address;
+ unsigned int epc;
+ struct zorro_dev *z = NULL;
+ unsigned int default_dma_xfer_mask;
+ struct WD33C93_hostdata *hdata;
+ wd33c93_regs regs;
+ int num_gvp11 = 0;
#ifdef CHECK_WD33C93
- volatile unsigned char *sasr_3393, *scmd_3393;
- unsigned char save_sasr;
- unsigned char q, qq;
+ volatile unsigned char *sasr_3393, *scmd_3393;
+ unsigned char save_sasr;
+ unsigned char q, qq;
#endif
- if (!MACH_IS_AMIGA || called)
- return 0;
- called = 1;
-
- tpnt->proc_name = "GVP11";
- tpnt->proc_info = &wd33c93_proc_info;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- /*
- * This should (hopefully) be the correct way to identify
- * all the different GVP SCSI controllers (except for the
- * SERIES I though).
- */
-
- if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
- z->id == ZORRO_PROD_GVP_SERIES_II)
- default_dma_xfer_mask = ~0x00ffffff;
- else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
- z->id == ZORRO_PROD_GVP_A530_SCSI ||
- z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
- default_dma_xfer_mask = ~0x01ffffff;
- else if (z->id == ZORRO_PROD_GVP_A1291 ||
- z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
- default_dma_xfer_mask = ~0x07ffffff;
- else
- continue;
-
- /*
- * Rumors state that some GVP ram boards use the same product
- * code as the SCSI controllers. Therefore if the board-size
- * is not 64KB we asume it is a ram board and bail out.
- */
- if (z->resource.end-z->resource.start != 0xffff)
- continue;
-
- address = z->resource.start;
- if (!request_mem_region(address, 256, "wd33c93"))
- continue;
+ if (!MACH_IS_AMIGA || called)
+ return 0;
+ called = 1;
+
+ tpnt->proc_name = "GVP11";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
+ /*
+ * This should (hopefully) be the correct way to identify
+ * all the different GVP SCSI controllers (except for the
+ * SERIES I though).
+ */
+
+ if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
+ z->id == ZORRO_PROD_GVP_SERIES_II)
+ default_dma_xfer_mask = ~0x00ffffff;
+ else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
+ z->id == ZORRO_PROD_GVP_A530_SCSI ||
+ z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
+ default_dma_xfer_mask = ~0x01ffffff;
+ else if (z->id == ZORRO_PROD_GVP_A1291 ||
+ z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
+ default_dma_xfer_mask = ~0x07ffffff;
+ else
+ continue;
+
+ /*
+ * Rumors state that some GVP ram boards use the same product
+ * code as the SCSI controllers. Therefore if the board-size
+ * is not 64KB we asume it is a ram board and bail out.
+ */
+ if (z->resource.end - z->resource.start != 0xffff)
+ continue;
+
+ address = z->resource.start;
+ if (!request_mem_region(address, 256, "wd33c93"))
+ continue;
#ifdef CHECK_WD33C93
- /*
- * These darn GVP boards are a problem - it can be tough to tell
- * whether or not they include a SCSI controller. This is the
- * ultimate Yet-Another-GVP-Detection-Hack in that it actually
- * probes for a WD33c93 chip: If we find one, it's extremely
- * likely that this card supports SCSI, regardless of Product_
- * Code, Board_Size, etc.
- */
-
- /* Get pointers to the presumed register locations and save contents */
-
- sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
- scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
- save_sasr = *sasr_3393;
-
- /* First test the AuxStatus Reg */
-
- q = *sasr_3393; /* read it */
- if (q & 0x08) /* bit 3 should always be clear */
- goto release;
- *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
- if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
- *sasr_3393 = save_sasr; /* Oops - restore this byte */
- goto release;
+ /*
+ * These darn GVP boards are a problem - it can be tough to tell
+ * whether or not they include a SCSI controller. This is the
+ * ultimate Yet-Another-GVP-Detection-Hack in that it actually
+ * probes for a WD33c93 chip: If we find one, it's extremely
+ * likely that this card supports SCSI, regardless of Product_
+ * Code, Board_Size, etc.
+ */
+
+ /* Get pointers to the presumed register locations and save contents */
+
+ sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
+ scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
+ save_sasr = *sasr_3393;
+
+ /* First test the AuxStatus Reg */
+
+ q = *sasr_3393; /* read it */
+ if (q & 0x08) /* bit 3 should always be clear */
+ goto release;
+ *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
+ if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
+ *sasr_3393 = save_sasr; /* Oops - restore this byte */
+ goto release;
}
- if (*sasr_3393 != q) { /* should still read the same */
- *sasr_3393 = save_sasr; /* Oops - restore this byte */
- goto release;
+ if (*sasr_3393 != q) { /* should still read the same */
+ *sasr_3393 = save_sasr; /* Oops - restore this byte */
+ goto release;
}
- if (*scmd_3393 != q) /* and so should the image at 0x1f */
- goto release;
-
-
- /* Ok, we probably have a wd33c93, but let's check a few other places
- * for good measure. Make sure that this works for both 'A and 'B
- * chip versions.
- */
-
- *sasr_3393 = WD_SCSI_STATUS;
- q = *scmd_3393;
- *sasr_3393 = WD_SCSI_STATUS;
- *scmd_3393 = ~q;
- *sasr_3393 = WD_SCSI_STATUS;
- qq = *scmd_3393;
- *sasr_3393 = WD_SCSI_STATUS;
- *scmd_3393 = q;
- if (qq != q) /* should be read only */
- goto release;
- *sasr_3393 = 0x1e; /* this register is unimplemented */
- q = *scmd_3393;
- *sasr_3393 = 0x1e;
- *scmd_3393 = ~q;
- *sasr_3393 = 0x1e;
- qq = *scmd_3393;
- *sasr_3393 = 0x1e;
- *scmd_3393 = q;
- if (qq != q || qq != 0xff) /* should be read only, all 1's */
- goto release;
- *sasr_3393 = WD_TIMEOUT_PERIOD;
- q = *scmd_3393;
- *sasr_3393 = WD_TIMEOUT_PERIOD;
- *scmd_3393 = ~q;
- *sasr_3393 = WD_TIMEOUT_PERIOD;
- qq = *scmd_3393;
- *sasr_3393 = WD_TIMEOUT_PERIOD;
- *scmd_3393 = q;
- if (qq != (~q & 0xff)) /* should be read/write */
- goto release;
+ if (*scmd_3393 != q) /* and so should the image at 0x1f */
+ goto release;
+
+ /*
+ * Ok, we probably have a wd33c93, but let's check a few other places
+ * for good measure. Make sure that this works for both 'A and 'B
+ * chip versions.
+ */
+
+ *sasr_3393 = WD_SCSI_STATUS;
+ q = *scmd_3393;
+ *sasr_3393 = WD_SCSI_STATUS;
+ *scmd_3393 = ~q;
+ *sasr_3393 = WD_SCSI_STATUS;
+ qq = *scmd_3393;
+ *sasr_3393 = WD_SCSI_STATUS;
+ *scmd_3393 = q;
+ if (qq != q) /* should be read only */
+ goto release;
+ *sasr_3393 = 0x1e; /* this register is unimplemented */
+ q = *scmd_3393;
+ *sasr_3393 = 0x1e;
+ *scmd_3393 = ~q;
+ *sasr_3393 = 0x1e;
+ qq = *scmd_3393;
+ *sasr_3393 = 0x1e;
+ *scmd_3393 = q;
+ if (qq != q || qq != 0xff) /* should be read only, all 1's */
+ goto release;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ q = *scmd_3393;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ *scmd_3393 = ~q;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ qq = *scmd_3393;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ *scmd_3393 = q;
+ if (qq != (~q & 0xff)) /* should be read/write */
+ goto release;
#endif
- instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
- if(instance == NULL)
- goto release;
- instance->base = ZTWO_VADDR(address);
- instance->irq = IRQ_AMIGA_PORTS;
- instance->unique_id = z->slotaddr;
-
- if (gvp11_xfer_mask)
- HDATA(instance)->dma_xfer_mask = gvp11_xfer_mask;
- else
- HDATA(instance)->dma_xfer_mask = default_dma_xfer_mask;
-
-
- DMA(instance)->secret2 = 1;
- DMA(instance)->secret1 = 0;
- DMA(instance)->secret3 = 15;
- while (DMA(instance)->CNTR & GVP11_DMAC_BUSY) ;
- DMA(instance)->CNTR = 0;
-
- DMA(instance)->BANK = 0;
-
- epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
-
- /*
- * Check for 14MHz SCSI clock
- */
- regs.SASR = &(DMA(instance)->SASR);
- regs.SCMD = &(DMA(instance)->SCMD);
- HDATA(instance)->no_sync = 0xff;
- HDATA(instance)->fast = 0;
- HDATA(instance)->dma_mode = CTRL_DMA;
- wd33c93_init(instance, regs, dma_setup, dma_stop,
- (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
- : WD33C93_FS_12_15);
-
- if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
- instance))
- goto unregister;
- DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
- num_gvp11++;
- continue;
+ instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (instance == NULL)
+ goto release;
+ instance->base = ZTWO_VADDR(address);
+ instance->irq = IRQ_AMIGA_PORTS;
+ instance->unique_id = z->slotaddr;
+
+ hdata = shost_priv(instance);
+ if (gvp11_xfer_mask)
+ hdata->dma_xfer_mask = gvp11_xfer_mask;
+ else
+ hdata->dma_xfer_mask = default_dma_xfer_mask;
+
+ DMA(instance)->secret2 = 1;
+ DMA(instance)->secret1 = 0;
+ DMA(instance)->secret3 = 15;
+ while (DMA(instance)->CNTR & GVP11_DMAC_BUSY)
+ ;
+ DMA(instance)->CNTR = 0;
+
+ DMA(instance)->BANK = 0;
+
+ epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
+
+ /*
+ * Check for 14MHz SCSI clock
+ */
+ regs.SASR = &(DMA(instance)->SASR);
+ regs.SCMD = &(DMA(instance)->SCMD);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(instance, regs, dma_setup, dma_stop,
+ (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
+ : WD33C93_FS_12_15);
+
+ if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
+ "GVP11 SCSI", instance))
+ goto unregister;
+ DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
+ num_gvp11++;
+ continue;
unregister:
- scsi_unregister(instance);
- wd33c93_release();
+ scsi_unregister(instance);
release:
- release_mem_region(address, 256);
- }
+ release_mem_region(address, 256);
+ }
- return num_gvp11;
+ return num_gvp11;
}
static int gvp11_bus_reset(struct scsi_cmnd *cmd)
@@ -389,12 +391,11 @@ static struct scsi_host_template driver_template = {
int gvp11_release(struct Scsi_Host *instance)
{
#ifdef MODULE
- DMA(instance)->CNTR = 0;
- release_mem_region(ZTWO_PADDR(instance->base), 256);
- free_irq(IRQ_AMIGA_PORTS, instance);
- wd33c93_release();
+ DMA(instance)->CNTR = 0;
+ release_mem_region(ZTWO_PADDR(instance->base), 256);
+ free_irq(IRQ_AMIGA_PORTS, instance);
#endif
- return 1;
+ return 1;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index bf22859a5035..e2efdf9601ef 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -15,11 +15,11 @@ int gvp11_detect(struct scsi_host_template *);
int gvp11_release(struct Scsi_Host *);
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
#ifndef HOSTS_C
@@ -28,24 +28,24 @@ int gvp11_release(struct Scsi_Host *);
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define GVP11_XFER_MASK (0xff000001)
+#define GVP11_XFER_MASK (0xff000001)
typedef struct {
- unsigned char pad1[64];
- volatile unsigned short CNTR;
- unsigned char pad2[31];
- volatile unsigned char SASR;
- unsigned char pad3;
- volatile unsigned char SCMD;
- unsigned char pad4[4];
- volatile unsigned short BANK;
- unsigned char pad5[6];
- volatile unsigned long ACR;
- volatile unsigned short secret1; /* store 0 here */
- volatile unsigned short ST_DMA;
- volatile unsigned short SP_DMA;
- volatile unsigned short secret2; /* store 1 here */
- volatile unsigned short secret3; /* store 15 here */
+ unsigned char pad1[64];
+ volatile unsigned short CNTR;
+ unsigned char pad2[31];
+ volatile unsigned char SASR;
+ unsigned char pad3;
+ volatile unsigned char SCMD;
+ unsigned char pad4[4];
+ volatile unsigned short BANK;
+ unsigned char pad5[6];
+ volatile unsigned long ACR;
+ volatile unsigned short secret1; /* store 0 here */
+ volatile unsigned short ST_DMA;
+ volatile unsigned short SP_DMA;
+ volatile unsigned short secret2; /* store 1 here */
+ volatile unsigned short secret3; /* store 15 here */
} gvp11_scsiregs;
/* bits in CNTR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 183d3a43c280..c016426b31b2 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2708,14 +2708,6 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[8] = (size >> 8) & 0xFF;
c->Request.CDB[9] = size & 0xFF;
break;
-
- case HPSA_READ_CAPACITY:
- c->Request.CDBLen = 10;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
case HPSA_CACHE_FLUSH:
c->Request.CDBLen = 12;
c->Request.Type.Attribute = ATTR_SIMPLE;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 56fb9827681e..78de9b6d1e0b 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -152,21 +152,6 @@ struct SenseSubsystem_info {
u8 reserved1[1108];
};
-#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
-struct ReadCapdata {
- u8 total_size[4]; /* Total size in blocks */
- u8 block_size[4]; /* Size of blocks in bytes */
-};
-
-#if 0
-/* 12 byte commands not implemented in firmware yet. */
-#define HPSA_READ 0xa8
-#define HPSA_WRITE 0xaa
-#endif
-
-#define HPSA_READ 0x28 /* Read(10) */
-#define HPSA_WRITE 0x2a /* Write(10) */
-
/* BMIC commands */
#define BMIC_READ 0x26
#define BMIC_WRITE 0x27
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index c2eea711a5ce..d18f45c95639 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2245,7 +2245,7 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
DECLARE_COMPLETION_ONSTACK(comp);
int wait;
unsigned long flags;
- signed long timeout = init_timeout * HZ;
+ signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
ENTER;
do {
@@ -3013,6 +3013,7 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -3035,6 +3036,7 @@ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -3083,12 +3085,14 @@ static void ibmvfc_tasklet(void *data)
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
}
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -3096,10 +3100,12 @@ static void ibmvfc_tasklet(void *data)
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
} else
done = 1;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index d25106a958d7..7e9742764e4b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -38,6 +38,7 @@
#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
(IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
#define IBMVFC_INIT_TIMEOUT 120
+#define IBMVFC_ABORT_WAIT_TIMEOUT 40
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
#define IBMVFC_DEBUG 0
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 02143af7c1af..bf55d3057413 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -206,8 +206,10 @@ static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
}
static void
-iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn)
+iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct sock *sk = tcp_sw_conn->sock->sk;
/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -555,7 +557,7 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
return;
sock_hold(sock->sk);
- iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
+ iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
spin_lock_bh(&session->lock);
@@ -599,9 +601,9 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
- if (sock->sk->sk_sleep) {
+ if (sk_sleep(sock->sk)) {
sock->sk->sk_err = EIO;
- wake_up_interruptible(sock->sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sock->sk));
}
iscsi_conn_stop(cls_conn, flag);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index ca6b7bc64de0..94644bad0ed7 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -36,7 +36,6 @@ struct iscsi_sw_tcp_send {
};
struct iscsi_sw_tcp_conn {
- struct iscsi_conn *iscsi_conn;
struct socket *sock;
struct iscsi_sw_tcp_send out;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 1087a7f18e84..b292272d296f 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -132,7 +132,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
switch (fmt) {
case ELS_ADDR_FMT_PORT:
FC_DISC_DBG(disc, "Port address format for port "
- "(%6x)\n", ntoh24(pp->rscn_fid));
+ "(%6.6x)\n", ntoh24(pp->rscn_fid));
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
redisc = 1;
@@ -449,7 +449,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
} else {
printk(KERN_WARNING "libfc: Failed to allocate "
"memory for the newly discovered port "
- "(%6x)\n", ids.port_id);
+ "(%6.6x)\n", ids.port_id);
error = -ENOMEM;
}
}
@@ -607,7 +607,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
- "Port-id %x wwpn %llx\n",
+ "Port-id %6.6x wwpn %16.16llx\n",
rdata->ids.port_id, port_name);
lport->tt.rport_logoff(rdata);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index e5df0d4db67e..daff880e426e 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -488,7 +488,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
*/
spin_lock_bh(&ep->ex_lock);
ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
- if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
+ if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat &= ~ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
return error;
@@ -676,9 +676,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
- cpu = smp_processor_id();
+ cpu = get_cpu();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
+ put_cpu();
index = pool->next_index;
/* allocate new exch from pool */
while (fc_exch_ptr_get(pool, index)) {
@@ -734,19 +735,14 @@ err:
* EM is selected when a NULL match function pointer is encountered
* or when a call to a match function returns true.
*/
-static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
- struct fc_frame *fp)
+static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
- struct fc_exch *ep;
- list_for_each_entry(ema, &lport->ema_list, ema_list) {
- if (!ema->match || ema->match(fp)) {
- ep = fc_exch_em_alloc(lport, ema->mp);
- if (ep)
- return ep;
- }
- }
+ list_for_each_entry(ema, &lport->ema_list, ema_list)
+ if (!ema->match || ema->match(fp))
+ return fc_exch_em_alloc(lport, ema->mp);
return NULL;
}
@@ -920,12 +916,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
* Find or create the sequence.
*/
if (fc_sof_is_init(fr_sof(fp))) {
- sp = fc_seq_start_next(&ep->seq);
- if (!sp) {
- reject = FC_RJT_SEQ_XS; /* exchange shortage */
- goto rel;
- }
- sp->id = fh->fh_seq_id;
+ sp = &ep->seq;
sp->ssb_stat |= SSB_ST_RESP;
} else {
sp = &ep->seq;
@@ -1250,9 +1241,6 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = NULL;
struct fc_exch *ep = NULL;
- enum fc_sof sof;
- enum fc_eof eof;
- u32 f_ctl;
enum fc_pf_rjt_reason reject;
/* We can have the wrong fc_lport at this point with NPIV, which is a
@@ -1269,9 +1257,6 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
if (reject == FC_RJT_NONE) {
sp = fr_seq(fp); /* sequence will be held */
ep = fc_seq_exch(sp);
- sof = fr_sof(fp);
- eof = fr_eof(fp);
- f_ctl = ntoh24(fh->fh_f_ctl);
fc_seq_send_ack(sp, fp);
/*
@@ -1336,17 +1321,14 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
goto rel;
}
sof = fr_sof(fp);
- if (fc_sof_is_init(sof)) {
- sp = fc_seq_start_next(&ep->seq);
- sp->id = fh->fh_seq_id;
+ sp = &ep->seq;
+ if (fc_sof_is_init(sof))
sp->ssb_stat |= SSB_ST_RESP;
- } else {
- sp = &ep->seq;
- if (sp->id != fh->fh_seq_id) {
+ else if (sp->id != fh->fh_seq_id) {
atomic_inc(&mp->stats.seq_not_found);
goto rel;
- }
}
+
f_ctl = ntoh24(fh->fh_f_ctl);
fr_seq(fp) = sp;
if (f_ctl & FC_FC_SEQ_INIT)
@@ -1763,7 +1745,6 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
fc_exch_done(sp);
goto out;
}
- sp = fc_seq_start_next(sp);
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->reca_cmd = ELS_LS_ACC;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 17396c708b08..81a7c976b373 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -97,7 +97,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
-static void fc_timeout_error(struct fc_fcp_pkt *);
+static void fc_fcp_recovery(struct fc_fcp_pkt *);
static void fc_fcp_timeout(unsigned long);
static void fc_fcp_rec(struct fc_fcp_pkt *);
static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
@@ -121,7 +121,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
#define FC_DATA_UNDRUN 7
#define FC_ERROR 8
#define FC_HRD_ERROR 9
-#define FC_CMD_TIME_OUT 10
+#define FC_CMD_RECOVERY 10
/*
* Error recovery timeout values.
@@ -446,9 +446,16 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
len = fr_len(fp) - sizeof(*fh);
buf = fc_frame_payload_get(fp, 0);
- /* if this I/O is ddped, update xfer len */
- fc_fcp_ddp_done(fsp);
-
+ /*
+ * if this I/O is ddped then clear it
+ * and initiate recovery since data
+ * frames are expected to be placed
+ * directly in that case.
+ */
+ if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
+ fc_fcp_ddp_done(fsp);
+ goto err;
+ }
if (offset + len > fsp->data_len) {
/* this should never happen */
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
@@ -456,8 +463,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
goto crc_err;
FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
"data_len %x\n", len, offset, fsp->data_len);
- fc_fcp_retry_cmd(fsp);
- return;
+ goto err;
}
if (offset != fsp->xfer_len)
fsp->state |= FC_SRB_DISCONTIG;
@@ -478,13 +484,14 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->ErrorFrames++;
- /* FIXME - per cpu count, not total count! */
+ /* per cpu count, not total count, but OK for limit */
if (stats->InvalidCRCCount++ < 5)
printk(KERN_WARNING "libfc: CRC error on data "
- "frame for port (%6x)\n",
+ "frame for port (%6.6x)\n",
fc_host_port_id(lport->host));
+ put_cpu();
/*
* Assume the frame is total garbage.
* We may have copied it over the good part
@@ -493,7 +500,7 @@ crc_err:
* Otherwise, ignore it.
*/
if (fsp->state & FC_SRB_DISCONTIG)
- fc_fcp_retry_cmd(fsp);
+ goto err;
return;
}
}
@@ -509,6 +516,9 @@ crc_err:
if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
fc_fcp_complete_locked(fsp);
+ return;
+err:
+ fc_fcp_recovery(fsp);
}
/**
@@ -834,8 +844,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* exit here
*/
return;
- } else
- goto err;
+ }
}
if (flags & FCP_SNS_LEN_VAL) {
snsl = ntohl(rp_ex->fr_sns_len);
@@ -885,7 +894,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
return;
}
fsp->status_code = FC_DATA_OVRRUN;
- FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
"len %x, data len %x\n",
fsp->rport->port_id,
fsp->xfer_len, expected_len, fsp->data_len);
@@ -1341,7 +1350,7 @@ static void fc_fcp_timeout(unsigned long data)
else if (fsp->state & FC_SRB_RCV_STATUS)
fc_fcp_complete_locked(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
unlock:
fc_fcp_unlock_pkt(fsp);
@@ -1385,7 +1394,7 @@ retry:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
}
/**
@@ -1454,7 +1463,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fc_fcp_retry_cmd(fsp);
break;
}
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
} else if (opcode == ELS_LS_ACC) {
@@ -1553,7 +1562,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
break;
default:
- FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
/* fall through */
@@ -1563,13 +1572,13 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
+ FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
fsp->rport->port_id, error, fsp->recov_retry,
FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
fc_fcp_unlock_pkt(fsp);
@@ -1578,12 +1587,12 @@ out:
}
/**
- * fc_timeout_error() - Handler for fcp_pkt timeouts
- * @fsp: The FCP packt that has timed out
+ * fc_fcp_recovery() - Handler for fcp_pkt recovery
+ * @fsp: The FCP pkt that needs to be aborted
*/
-static void fc_timeout_error(struct fc_fcp_pkt *fsp)
+static void fc_fcp_recovery(struct fc_fcp_pkt *fsp)
{
- fsp->status_code = FC_CMD_TIME_OUT;
+ fsp->status_code = FC_CMD_RECOVERY;
fsp->cdb_status = 0;
fsp->io_status = 0;
/*
@@ -1689,7 +1698,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
case ELS_LS_RJT:
default:
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
fc_fcp_unlock_pkt(fsp);
@@ -1715,7 +1724,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
case -FC_EX_CLOSED: /* e.g., link failure */
/* fall through */
@@ -1810,7 +1819,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
/*
* setup the data direction
*/
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
fsp->req_flags = FC_SRB_READ;
stats->InputRequests++;
@@ -1823,6 +1832,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
fsp->req_flags = 0;
stats->ControlRequests++;
}
+ put_cpu();
fsp->tgt_flags = rpriv->flags;
@@ -1907,6 +1917,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
}
break;
case FC_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_ERROR\n");
sc_cmd->result = DID_ERROR << 16;
break;
case FC_DATA_UNDRUN:
@@ -1915,12 +1927,19 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
* scsi status is good but transport level
* underrun.
*/
- sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ?
- DID_OK : DID_ERROR) << 16;
+ if (fsp->state & FC_SRB_RCV_STATUS) {
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml"
+ " due to FC_DATA_UNDRUN (trans)\n");
+ sc_cmd->result = DID_ERROR << 16;
+ }
} else {
/*
* scsi got underrun, this is an error
*/
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_UNDRUN (scsi)\n");
CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
}
@@ -1929,12 +1948,16 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
/*
* overrun is an error
*/
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_OVRRUN\n");
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
break;
- case FC_CMD_TIME_OUT:
+ case FC_CMD_RECOVERY:
sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
break;
case FC_CMD_RESET:
@@ -1944,6 +1967,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_NO_CONNECT << 16);
break;
default:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to unknown error\n");
sc_cmd->result = (DID_ERROR << 16);
break;
}
@@ -2028,7 +2053,7 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
if (lport->state != LPORT_ST_READY)
return rc;
- FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id);
+ FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id);
fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
if (fsp == NULL) {
@@ -2076,11 +2101,11 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
if (fc_fcp_lport_queue_ready(lport)) {
shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
- "on port (%6x)\n", fc_host_port_id(lport->host));
+ "on port (%6.6x)\n", fc_host_port_id(lport->host));
return SUCCESS;
} else {
shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
- "port (%6x) is not ready.\n",
+ "port (%6.6x) is not ready.\n",
fc_host_port_id(lport->host));
return FAILED;
}
@@ -2166,7 +2191,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
if (!list_empty(&si->scsi_pkt_queue))
printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
- "port (%6x)\n", fc_host_port_id(lport->host));
+ "port (%6.6x)\n", fc_host_port_id(lport->host));
mempool_destroy(si->scsi_pkt_pool);
kfree(si);
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index 741fd5c72e13..efc6b3fe6f35 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -45,7 +45,7 @@ extern unsigned int fc_debug_logging;
#define FC_LPORT_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
- printk(KERN_INFO "host%u: lport %6x: " fmt, \
+ printk(KERN_INFO "host%u: lport %6.6x: " fmt, \
(lport)->host->host_no, \
fc_host_port_id((lport)->host), ##args))
@@ -57,7 +57,7 @@ extern unsigned int fc_debug_logging;
#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
- printk(KERN_INFO "host%u: rport %6x: " fmt, \
+ printk(KERN_INFO "host%u: rport %6.6x: " fmt, \
(lport)->host->host_no, \
(port_id), ##args))
@@ -66,7 +66,7 @@ extern unsigned int fc_debug_logging;
#define FC_FCP_DBG(pkt, fmt, args...) \
FC_CHECK_LOGGING(FC_FCP_LOGGING, \
- printk(KERN_INFO "host%u: fcp: %6x: " fmt, \
+ printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
(pkt)->lp->host->host_no, \
pkt->rport->port_id, ##args))
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index d126ecfff704..ef25e11abd33 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -172,7 +172,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
- FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
+ FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
rdata->ids.port_id);
mutex_lock(&lport->lp_mutex);
@@ -183,7 +183,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
} else {
FC_LPORT_DBG(lport, "Received an READY event "
- "on port (%6x) for the directory "
+ "on port (%6.6x) for the directory "
"server, but the lport is not "
"in the DNS state, it's in the "
"%d state", rdata->ids.port_id,
@@ -228,9 +228,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
u64 remote_wwnn)
{
mutex_lock(&lport->disc.disc_mutex);
- if (lport->ptp_rdata)
+ if (lport->ptp_rdata) {
lport->tt.rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ }
lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+ kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
@@ -572,7 +575,7 @@ void __fc_linkup(struct fc_lport *lport)
*/
void fc_linkup(struct fc_lport *lport)
{
- printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n",
+ printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
lport->host->host_no, fc_host_port_id(lport->host));
mutex_lock(&lport->lp_mutex);
@@ -602,7 +605,7 @@ void __fc_linkdown(struct fc_lport *lport)
*/
void fc_linkdown(struct fc_lport *lport)
{
- printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n",
+ printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
lport->host->host_no, fc_host_port_id(lport->host));
mutex_lock(&lport->lp_mutex);
@@ -704,7 +707,7 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
break;
case DISC_EV_FAILED:
printk(KERN_ERR "host%d: libfc: "
- "Discovery failed for port (%6x)\n",
+ "Discovery failed for port (%6.6x)\n",
lport->host->host_no, fc_host_port_id(lport->host));
mutex_lock(&lport->lp_mutex);
fc_lport_enter_reset(lport);
@@ -750,7 +753,7 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
struct fc_frame *fp)
{
if (port_id)
- printk(KERN_INFO "host%d: Assigned Port ID %6x\n",
+ printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
lport->host->host_no, port_id);
fc_host_port_id(lport->host) = port_id;
@@ -797,11 +800,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
if (remote_wwpn == lport->wwpn) {
printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
- "with same WWPN %llx\n",
+ "with same WWPN %16.16llx\n",
lport->host->host_no, remote_wwpn);
goto out;
}
- FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
+ FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
/*
* XXX what is the right thing to do for FIDs?
@@ -832,7 +835,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
*/
f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
ep = fc_seq_exch(sp);
- fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid,
FC_TYPE_ELS, f_ctl, 0);
lport->tt.seq_send(lport, sp, fp);
@@ -947,7 +950,11 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
if (lport->dns_rdata)
lport->tt.rport_logoff(lport->dns_rdata);
- lport->ptp_rdata = NULL;
+ if (lport->ptp_rdata) {
+ lport->tt.rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ lport->ptp_rdata = NULL;
+ }
lport->tt.disc_stop(lport);
@@ -1492,7 +1499,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
lport->r_a_tov = 2 * e_d_tov;
fc_lport_set_port_id(lport, did, fp);
printk(KERN_INFO "host%d: libfc: "
- "Port (%6x) entered "
+ "Port (%6.6x) entered "
"point-to-point mode\n",
lport->host->host_no, did);
fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index c68f6c7341c2..45b6f1e2df92 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -72,6 +72,9 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
if (fc_host_port_id(n_port->host) == port_id)
return n_port;
+ if (port_id == FC_FID_FLOGI)
+ return n_port; /* for point-to-point */
+
mutex_lock(&n_port->lp_mutex);
list_for_each_entry(vn_port, &n_port->vports, list) {
if (fc_host_port_id(vn_port->host) == port_id) {
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b37d0ff28b35..39e440f0f54a 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1442,136 +1442,115 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_els_spp *spp; /* response spp */
unsigned int len;
unsigned int plen;
- enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
- enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
enum fc_els_spp_resp resp;
struct fc_seq_els_data rjt_data;
u32 f_ctl;
u32 fcp_parm;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
- rjt_data.fp = NULL;
+ rjt_data.fp = NULL;
fh = fc_frame_header_get(rx_fp);
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- case RPORT_ST_RTV:
- case RPORT_ST_READY:
- case RPORT_ST_ADISC:
- reason = ELS_RJT_NONE;
- break;
- default:
- fc_frame_free(rx_fp);
- return;
- break;
- }
len = fr_len(rx_fp) - sizeof(*fh);
pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
- if (pp == NULL) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- } else {
- plen = ntohs(pp->prli.prli_len);
- if ((plen % 4) != 0 || plen > len) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- } else if (plen < len) {
- len = plen;
- }
- plen = pp->prli.prli_spp_len;
- if ((plen % 4) != 0 || plen < sizeof(*spp) ||
- plen > len || len < sizeof(*pp)) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- }
- rspp = &pp->spp;
+ if (!pp)
+ goto reject_len;
+ plen = ntohs(pp->prli.prli_len);
+ if ((plen % 4) != 0 || plen > len || plen < 16)
+ goto reject_len;
+ if (plen < len)
+ len = plen;
+ plen = pp->prli.prli_spp_len;
+ if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+ plen > len || len < sizeof(*pp) || plen < 12)
+ goto reject_len;
+ rspp = &pp->spp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
}
- if (reason != ELS_RJT_NONE ||
- (fp = fc_frame_alloc(lport, len)) == NULL) {
- rjt_data.reason = reason;
- rjt_data.explan = explan;
- lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
- } else {
- sp = lport->tt.seq_start_next(sp);
- WARN_ON(!sp);
- pp = fc_frame_payload_get(fp, len);
- WARN_ON(!pp);
- memset(pp, 0, len);
- pp->prli.prli_cmd = ELS_LS_ACC;
- pp->prli.prli_spp_len = plen;
- pp->prli.prli_len = htons(len);
- len -= sizeof(struct fc_els_prli);
-
- /* reinitialize remote port roles */
- rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
- /*
- * Go through all the service parameter pages and build
- * response. If plen indicates longer SPP than standard,
- * use that. The entire response has been pre-cleared above.
- */
- spp = &pp->spp;
- while (len >= plen) {
- spp->spp_type = rspp->spp_type;
- spp->spp_type_ext = rspp->spp_type_ext;
- spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
- resp = FC_SPP_RESP_ACK;
- if (rspp->spp_flags & FC_SPP_RPA_VAL)
- resp = FC_SPP_RESP_NO_PA;
- switch (rspp->spp_type) {
- case 0: /* common to all FC-4 types */
- break;
- case FC_TYPE_FCP:
- fcp_parm = ntohl(rspp->spp_params);
- if (fcp_parm & FCP_SPPF_RETRY)
- rdata->flags |= FC_RP_FLAGS_RETRY;
- rdata->supported_classes = FC_COS_CLASS3;
- if (fcp_parm & FCP_SPPF_INIT_FCN)
- roles |= FC_RPORT_ROLE_FCP_INITIATOR;
- if (fcp_parm & FCP_SPPF_TARG_FCN)
- roles |= FC_RPORT_ROLE_FCP_TARGET;
- rdata->ids.roles = roles;
-
- spp->spp_params =
- htonl(lport->service_params);
- break;
- default:
- resp = FC_SPP_RESP_INVL;
- break;
- }
- spp->spp_flags |= resp;
- len -= plen;
- rspp = (struct fc_els_spp *)((char *)rspp + plen);
- spp = (struct fc_els_spp *)((char *)spp + plen);
- }
+ sp = lport->tt.seq_start_next(sp);
+ WARN_ON(!sp);
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = plen;
+ pp->prli.prli_len = htons(len);
+ len -= sizeof(struct fc_els_prli);
- /*
- * Send LS_ACC. If this fails, the originator should retry.
- */
- f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
- f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
- ep = fc_seq_exch(sp);
- fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
- FC_TYPE_ELS, f_ctl, 0);
- lport->tt.seq_send(lport, sp, fp);
+ /* reinitialize remote port roles */
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
- /*
- * Get lock and re-check state.
- */
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- fc_rport_enter_ready(rdata);
+ /*
+ * Go through all the service parameter pages and build
+ * response. If plen indicates longer SPP than standard,
+ * use that. The entire response has been pre-cleared above.
+ */
+ spp = &pp->spp;
+ while (len >= plen) {
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+ resp = FC_SPP_RESP_ACK;
+
+ switch (rspp->spp_type) {
+ case 0: /* common to all FC-4 types */
break;
- case RPORT_ST_READY:
- case RPORT_ST_ADISC:
+ case FC_TYPE_FCP:
+ fcp_parm = ntohl(rspp->spp_params);
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+ rdata->ids.roles = roles;
+
+ spp->spp_params = htonl(lport->service_params);
break;
default:
+ resp = FC_SPP_RESP_INVL;
break;
}
+ spp->spp_flags |= resp;
+ len -= plen;
+ rspp = (struct fc_els_spp *)((char *)rspp + plen);
+ spp = (struct fc_els_spp *)((char *)spp + plen);
+ }
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+ f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ ep = fc_seq_exch(sp);
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_PRLI:
+ fc_rport_enter_ready(rdata);
+ break;
+ default:
+ break;
}
+ goto drop;
+
+reject_len:
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+reject:
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+drop:
fc_frame_free(rx_fp);
}
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 5c92620292fb..8eeb39ffa37f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -421,7 +421,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
struct hash_desc *rx_hash = NULL;
- if (conn->datadgst_en &
+ if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
rx_hash = tcp_conn->rx_hash;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 565e16dd74fc..e35a4c71eb9a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -310,7 +310,9 @@ struct lpfc_vport {
#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
#define FC_FABRIC 0x100 /* We are fabric attached */
+#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
+#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -554,6 +556,7 @@ struct lpfc_hba {
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
+ uint32_t *mbox_ext;
uint32_t *inb_ha_copy;
uint32_t *inb_counter;
uint32_t inb_last_counter;
@@ -622,6 +625,7 @@ struct lpfc_hba {
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
uint32_t cfg_enable_bg;
+ uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
uint32_t cfg_suppress_link_up;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1849e33e68f9..2e5f376d9ccc 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -869,6 +869,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
+ uint32_t max_vpi;
/*
* prevent udev from issuing mailbox commands until the port is
@@ -916,11 +917,17 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
if (axri)
*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
phba->sli4_hba.max_cfg_param.xri_used;
+
+ /* Account for differences with SLI-3. Get vpi count from
+ * mailbox data and subtract one for max vpi value.
+ */
+ max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
+ (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+
if (mvpi)
- *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+ *mvpi = max_vpi;
if (avpi)
- *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
- phba->sli4_hba.max_cfg_param.vpi_used;
+ *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
} else {
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
@@ -1925,13 +1932,12 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
-int lpfc_enable_npiv = 0;
+int lpfc_enable_npiv = 1;
module_param(lpfc_enable_npiv, int, 0);
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
lpfc_param_show(enable_npiv);
-lpfc_param_init(enable_npiv, 0, 0, 1);
-static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
- lpfc_enable_npiv_show, NULL);
+lpfc_param_init(enable_npiv, 1, 0, 1);
+static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
/*
# lpfc_suppress_link_up: Bring link up at initialization
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d62b3e467926..dcf088262b20 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -79,6 +79,12 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
+ struct lpfc_dmabuf *rxbmp; /* for BIU diags */
+ struct lpfc_dmabufext *dmp; /* for BIU diags */
+ uint8_t *ext; /* extended mailbox data */
+ uint32_t mbOffset; /* from app */
+ uint32_t inExtWLen; /* from app */
+ uint32_t outExtWLen; /* from app */
/* job waiting for this mbox command to finish */
struct fc_bsg_job *set_job;
@@ -1708,21 +1714,26 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (dmabuf) {
dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
- INIT_LIST_HEAD(&dmabuf->list);
- bpl = (struct ulp_bde64 *) dmabuf->virt;
- memset(bpl, 0, sizeof(*bpl));
- ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
- bpl->addrHigh =
- le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
- bpl->addrLow =
- le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ if (dmabuf->virt) {
+ INIT_LIST_HEAD(&dmabuf->list);
+ bpl = (struct ulp_bde64 *) dmabuf->virt;
+ memset(bpl, 0, sizeof(*bpl));
+ ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(dmabuf->phys +
+ sizeof(*bpl)));
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(dmabuf->phys +
+ sizeof(*bpl)));
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ }
}
if (cmdiocbq == NULL || rspiocbq == NULL ||
- dmabuf == NULL || bpl == NULL || ctreq == NULL) {
+ dmabuf == NULL || bpl == NULL || ctreq == NULL ||
+ dmabuf->virt == NULL) {
ret_val = ENOMEM;
goto err_get_xri_exit;
}
@@ -1918,9 +1929,11 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (rxbmp != NULL) {
rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+ if (rxbmp->virt) {
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+ }
}
if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
@@ -2174,14 +2187,16 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
if (txbmp) {
txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
- INIT_LIST_HEAD(&txbmp->list);
- txbpl = (struct ulp_bde64 *) txbmp->virt;
- if (txbpl)
+ if (txbmp->virt) {
+ INIT_LIST_HEAD(&txbmp->list);
+ txbpl = (struct ulp_bde64 *) txbmp->virt;
txbuffer = diag_cmd_data_alloc(phba,
txbpl, full_size, 0);
+ }
}
- if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
+ if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
+ !txbmp->virt) {
rc = -ENOMEM;
goto err_loopback_test_exit;
}
@@ -2377,35 +2392,90 @@ void
lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- MAILBOX_t *pmb;
- MAILBOX_t *mb;
struct fc_bsg_job *job;
uint32_t size;
unsigned long flags;
+ uint8_t *to;
+ uint8_t *from;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
+ /* job already timed out? */
if (!dd_data) {
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return;
}
- pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
- mb = dd_data->context_un.mbox.mb;
+ /* build the outgoing buffer to do an sg copy
+ * the format is the response mailbox followed by any extended
+ * mailbox data
+ */
+ from = (uint8_t *)&pmboxq->u.mb;
+ to = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(to, from, sizeof(MAILBOX_t));
+ if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
+ /* copy the extended data if any, count is in words */
+ if (dd_data->context_un.mbox.outExtWLen) {
+ from = (uint8_t *)dd_data->context_un.mbox.ext;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.outExtWLen *
+ sizeof(uint32_t);
+ memcpy(to, from, size);
+ } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
+ from = (uint8_t *)dd_data->context_un.mbox.
+ dmp->dma.virt;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.dmp->size;
+ memcpy(to, from, size);
+ } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
+ from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
+ virt;
+ to += sizeof(MAILBOX_t);
+ size = pmboxq->u.mb.un.varWords[5];
+ memcpy(to, from, size);
+ } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
+ from = (uint8_t *)dd_data->context_un.
+ mbox.dmp->dma.virt;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.dmp->size;
+ memcpy(to, from, size);
+ }
+ }
+
+ from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
- memcpy(mb, pmb, sizeof(*pmb));
- size = job->request_payload.payload_len;
+ size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
- mb, size);
+ from, size);
job->reply->result = 0;
+
dd_data->context_un.mbox.set_job = NULL;
job->dd_data = NULL;
job->job_done(job);
+ /* need to hold the lock until we call job done to hold off
+ * the timeout handler returning to the midlayer while
+ * we are stillprocessing the job
+ */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(mb);
+ kfree(dd_data->context_un.mbox.ext);
+ if (dd_data->context_un.mbox.dmp) {
+ dma_free_coherent(&phba->pcidev->dev,
+ dd_data->context_un.mbox.dmp->size,
+ dd_data->context_un.mbox.dmp->dma.virt,
+ dd_data->context_un.mbox.dmp->dma.phys);
+ kfree(dd_data->context_un.mbox.dmp);
+ }
+ if (dd_data->context_un.mbox.rxbmp) {
+ lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
+ dd_data->context_un.mbox.rxbmp->phys);
+ kfree(dd_data->context_un.mbox.rxbmp);
+ }
kfree(dd_data);
return;
}
@@ -2464,10 +2534,12 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
case MBX_SET_DEBUG:
case MBX_WRITE_WWN:
case MBX_SLI4_CONFIG:
+ case MBX_READ_EVENT_LOG:
case MBX_READ_EVENT_LOG_STATUS:
case MBX_WRITE_EVENT_LOG:
case MBX_PORT_CAPABILITIES:
case MBX_PORT_IOV_CONTROL:
+ case MBX_RUN_BIU_DIAG64:
break;
case MBX_SET_VARIABLE:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -2482,8 +2554,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
phba->fc_topology = TOPOLOGY_PT_PT;
}
break;
- case MBX_RUN_BIU_DIAG64:
- case MBX_READ_EVENT_LOG:
case MBX_READ_SPARM64:
case MBX_READ_LA:
case MBX_READ_LA64:
@@ -2518,97 +2588,365 @@ static uint32_t
lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_vport *vport)
{
- LPFC_MBOXQ_t *pmboxq;
- MAILBOX_t *pmb;
- MAILBOX_t *mb;
- struct bsg_job_data *dd_data;
+ LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
+ MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
+ /* a 4k buffer to hold the mb and extended data from/to the bsg */
+ MAILBOX_t *mb = NULL;
+ struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
uint32_t size;
+ struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
+ struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
+ struct ulp_bde64 *rxbpl = NULL;
+ struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ uint8_t *ext = NULL;
int rc = 0;
+ uint8_t *from;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* check if requested extended data lengths are valid */
+ if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
+ (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2727 Failed allocation of dd_data\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
- mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
if (!mb) {
- kfree(dd_data);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
- kfree(dd_data);
- kfree(mb);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
size = job->request_payload.payload_len;
- job->reply->reply_payload_rcv_len =
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ mb, size);
rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0) {
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
- return rc; /* must be negative */
- }
+ if (rc != 0)
+ goto job_done; /* must be negative */
- memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
memcpy(pmb, mb, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
- pmboxq->context1 = NULL;
pmboxq->vport = vport;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
- rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT) {
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
+ /* If HBA encountered an error attention, allow only DUMP
+ * or RESTART mailbox commands until the HBA is restarted.
+ */
+ if (phba->pport->stopped &&
+ pmb->mbxCommand != MBX_DUMP_MEMORY &&
+ pmb->mbxCommand != MBX_RESTART &&
+ pmb->mbxCommand != MBX_WRITE_VPARMS &&
+ pmb->mbxCommand != MBX_WRITE_WWN)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "2797 mbox: Issued mailbox cmd "
+ "0x%x while in stopped state.\n",
+ pmb->mbxCommand);
+
+ /* Don't allow mailbox commands to be sent when blocked
+ * or when in the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ /* extended mailbox commands will need an extended buffer */
+ if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
+ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
+ if (!ext) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* any data for the device? */
+ if (mbox_req->inExtWLen) {
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)ext, from,
+ mbox_req->inExtWLen * sizeof(uint32_t));
+ }
+
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen *
+ sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outExtWLen *
+ sizeof(uint32_t);
+ pmboxq->mbox_offset_word =
+ mbox_req->mbOffset;
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen * sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outExtWLen * sizeof(uint32_t);
+ pmboxq->mbox_offset_word = mbox_req->mbOffset;
+ }
+
+ /* biu diag will need a kernel buffer to transfer the data
+ * allocate our own buffer and setup the mailbox command to
+ * use ours
+ */
+ if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
+ uint32_t transmit_length = pmb->un.varWords[1];
+ uint32_t receive_length = pmb->un.varWords[4];
+ /* transmit length cannot be greater than receive length or
+ * mailbox extension size
+ */
+ if ((transmit_length > receive_length) ||
+ (transmit_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
+ putPaddrHigh(dmp->dma.phys);
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
+ putPaddrLow(dmp->dma.phys);
+
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
+ putPaddrHigh(dmp->dma.phys +
+ pmb->un.varBIUdiag.un.s2.
+ xmit_bde64.tus.f.bdeSize);
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
+ putPaddrLow(dmp->dma.phys +
+ pmb->un.varBIUdiag.un.s2.
+ xmit_bde64.tus.f.bdeSize);
+
+ /* copy the transmit data found in the mailbox extension area */
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+ } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
+ struct READ_EVENT_LOG_VAR *rdEventLog =
+ &pmb->un.varRdEventLog ;
+ uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
+ uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
+
+ /* receive length cannot be greater than mailbox
+ * extension size
+ */
+ if (receive_length > MAILBOX_EXT_SIZE) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ /* mode zero uses a bde like biu diags command */
+ if (mode == 0) {
+
+ /* rebuild the command for sli4 using our own buffers
+ * like we do for biu diags
+ */
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
}
- return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ if (rxbpl) {
+ INIT_LIST_HEAD(&rxbmp->list);
+ dmp = diag_cmd_data_alloc(phba, rxbpl,
+ receive_length, 0);
+ }
+
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
+ pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
}
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
+ /* rebuild the command for sli4 using our own buffers
+ * like we do for biu diags
+ */
+ uint32_t receive_length = pmb->un.varWords[2];
+ /* receive length cannot be greater than mailbox
+ * extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
- memcpy(mb, pmb, sizeof(*pmb));
- job->reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- mb, size);
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
- /* not waiting mbox already done */
- return 0;
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
+ 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
+ pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
+ pmb->un.varUpdateCfg.co) {
+ struct ulp_bde64 *bde =
+ (struct ulp_bde64 *)&pmb->un.varWords[4];
+
+ /* bde size cannot be greater than mailbox ext size */
+ if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl,
+ bde->tus.f.bdeSize, 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ bde->addrHigh = putPaddrHigh(dmp->dma.phys);
+ bde->addrLow = putPaddrLow(dmp->dma.phys);
+
+ /* copy the transmit data found in the mailbox
+ * extension area
+ */
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)dmp->dma.virt, from,
+ bde->tus.f.bdeSize);
+ }
}
+ dd_data->context_un.mbox.rxbmp = rxbmp;
+ dd_data->context_un.mbox.dmp = dmp;
+
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = mb;
dd_data->context_un.mbox.set_job = job;
+ dd_data->context_un.mbox.ext = ext;
+ dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
+ dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
+ dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
job->dd_data = dd_data;
+
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+ goto job_done;
+ }
+
+ /* job finished, copy the data */
+ memcpy(mb, pmb, sizeof(*pmb));
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ mb, size);
+ /* not waiting mbox already done */
+ rc = 0;
+ goto job_done;
+ }
+
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
- if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
- kfree(dd_data);
- kfree(mb);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
+ return 1; /* job started */
+
+job_done:
+ /* common exit for error or job completed inline */
+ kfree(mb);
+ if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- return -EIO;
+ kfree(ext);
+ if (dmp) {
+ dma_free_coherent(&phba->pcidev->dev,
+ dmp->size, dmp->dma.virt,
+ dmp->dma.phys);
+ kfree(dmp);
}
+ if (rxbmp) {
+ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+ kfree(rxbmp);
+ }
+ kfree(dd_data);
- return 1;
+ return rc;
}
/**
@@ -2633,7 +2971,12 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
goto job_error;
}
- if (job->request_payload.payload_len != PAGE_SIZE) {
+ if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
rc = -EINVAL;
goto job_error;
}
@@ -3094,6 +3437,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
job->dd_data = NULL;
job->reply->reply_payload_rcv_len = 0;
job->reply->result = -EAGAIN;
+ /* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 5bc630819b9e..a2c33e7c9152 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -91,11 +91,12 @@ struct get_mgmt_rev_reply {
struct MgmtRevInfo info;
};
+#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
struct dfc_mbox_req {
uint32_t command;
+ uint32_t mbOffset;
uint32_t inExtWLen;
uint32_t outExtWLen;
- uint8_t mbOffset;
};
/* Used for menlo command or menlo data. The xri is only used for menlo data */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 5087c4211b43..fbc9baeb6048 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -65,6 +65,7 @@ void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
+void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 2851d75ffc6f..36257a685509 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -38,6 +38,7 @@ enum lpfc_work_type {
LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS,
LPFC_EVT_FASTPATH_MGMT_EVT,
+ LPFC_EVT_RESET_HBA,
};
/* structure used to queue event to the discovery tasklet */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 5fbdb22c1899..c4c7f0ad7468 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -584,6 +584,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, np);
}
+ lpfc_cleanup_pending_mbox(vport);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
@@ -864,6 +865,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
spin_unlock_irq(shost->host_lock);
/*
@@ -893,11 +895,14 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (!rc) {
/* Mark the FCF discovery process done */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
- "2769 FLOGI successful on FCF record: "
- "current_fcf_index:x%x, terminate FCF "
- "round robin failover process\n",
- phba->fcf.current_rec.fcf_indx);
+ if (phba->hba_flag & HBA_FIP_SUPPORT)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
+ LOG_ELS,
+ "2769 FLOGI successful on FCF "
+ "record: current_fcf_index:"
+ "x%x, terminate FCF round "
+ "robin failover process\n",
+ phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
@@ -5366,7 +5371,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
sizeof(struct lpfc_name));
pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
cmdiocbp->context2)->virt);
- lsrjt_event.command = *pcmd;
+ lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
@@ -6050,7 +6055,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT
+ && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
lpfc_initial_flogi(vport);
else
lpfc_initial_fdisc(vport);
@@ -6286,6 +6292,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
vport->fc_flag |= FC_FABRIC;
if (vport->phba->fc_topology == TOPOLOGY_LOOP)
vport->fc_flag |= FC_PUBLIC_LOOP;
@@ -6310,11 +6317,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, np);
}
+ lpfc_cleanup_pending_mbox(vport);
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
if (phba->sli_rev == LPFC_SLI_REV4)
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ else
+ vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
spin_unlock_irq(shost->host_lock);
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e1466eec56b7..1f87b4fb8b50 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -475,6 +475,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
lpfc_send_fastpath_evt(phba, evtp);
free_evt = 0;
break;
+ case LPFC_EVT_RESET_HBA:
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_reset_hba(phba);
+ break;
}
if (free_evt)
kfree(evtp);
@@ -1531,7 +1535,37 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
}
/**
- * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
+ * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_cnt: number of eligible fcf record seen so far.
+ *
+ * This function makes an running random selection decision on FCF record to
+ * use through a sequence of @fcf_cnt eligible FCF records with equal
+ * probability. To perform integer manunipulation of random numbers with
+ * size unit32_t, the lower 16 bits of the 32-bit random number returned
+ * from random32() are taken as the random random number generated.
+ *
+ * Returns true when outcome is for the newly read FCF record should be
+ * chosen; otherwise, return false when outcome is for keeping the previously
+ * chosen FCF record.
+ **/
+static bool
+lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
+{
+ uint32_t rand_num;
+
+ /* Get 16-bit uniform random number */
+ rand_num = (0xFFFF & random32());
+
+ /* Decision with probability 1/fcf_cnt */
+ if ((fcf_cnt * rand_num) < 0xFFFF)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
* @next_fcf_index: pointer to holder of next fcf index.
@@ -1592,7 +1626,9 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
new_fcf_record = (struct fcf_record *)(virt_addr +
sizeof(struct lpfc_mbx_read_fcf_tbl));
lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
- sizeof(struct fcf_record));
+ offsetof(struct fcf_record, vlan_bitmap));
+ new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
+ new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
return new_fcf_record;
}
@@ -1679,6 +1715,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint16_t fcf_index, next_fcf_index;
struct lpfc_fcf_rec *fcf_rec = NULL;
uint16_t vlan_id;
+ uint32_t seed;
+ bool select_new_fcf;
int rc;
/* If there is pending FCoE event restart FCF table scan */
@@ -1809,9 +1847,21 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* than the driver FCF record, use the new record.
*/
if (new_fcf_record->fip_priority < fcf_rec->priority) {
- /* Choose this FCF record */
+ /* Choose the new FCF record with lower priority */
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
addr_mode, vlan_id, 0);
+ /* Reset running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt = 1;
+ } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
+ /* Update running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt++;
+ select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
+ phba->fcf.eligible_fcf_cnt);
+ if (select_new_fcf)
+ /* Choose the new FCF by random selection */
+ __lpfc_update_fcf_record(phba, fcf_rec,
+ new_fcf_record,
+ addr_mode, vlan_id, 0);
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -1825,6 +1875,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
addr_mode, vlan_id, (boot_flag ?
BOOT_ENABLE : 0));
phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ /* Setup initial running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt = 1;
+ /* Seeding the random number generator for random selection */
+ seed = (uint32_t)(0xFFFFFFFF & jiffies);
+ srandom32(seed);
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -2686,11 +2741,18 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
switch (mb->mbxStatus) {
case 0x0011:
case 0x0020:
- case 0x9700:
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0911 cmpl_unreg_vpi, mb status = 0x%x\n",
mb->mbxStatus);
break;
+ /* If VPI is busy, reset the HBA */
+ case 0x9700:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
+ vport->vpi, mb->mbxStatus);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_workq_post_event(phba, NULL, NULL,
+ LPFC_EVT_RESET_HBA);
}
spin_lock_irq(shost->host_lock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
@@ -2965,7 +3027,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
- lpfc_start_fdiscs(phba);
+ /* when physical port receive logo donot start
+ * vport discovery */
+ if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+ lpfc_start_fdiscs(phba);
+ else
+ vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -3177,7 +3244,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (new_state == NLP_STE_UNMAPPED_NODE) {
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
ndlp->nlp_type |= NLP_FC_NODE;
}
@@ -4935,6 +5001,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
if (ndlp)
lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+ lpfc_cleanup_pending_mbox(vports[i]);
lpfc_mbx_unreg_vpi(vports[i]);
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 89ff7c09e298..e654d01dad24 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1565,95 +1565,83 @@ enum lpfc_protgrp_type {
};
/* PDE Descriptors */
-#define LPFC_PDE1_DESCRIPTOR 0x81
-#define LPFC_PDE2_DESCRIPTOR 0x82
-#define LPFC_PDE3_DESCRIPTOR 0x83
-
-/* BlockGuard Profiles */
-enum lpfc_bg_prof_codes {
- LPFC_PROF_INVALID,
- LPFC_PROF_A1 = 128, /* Full Protection */
- LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */
- LPFC_PROF_A3,
- LPFC_PROF_A4,
- LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */
- LPFC_PROF_B2,
- LPFC_PROF_B3,
- LPFC_PROF_C1, /* Separate DIFs: C1~C3 */
- LPFC_PROF_C2,
- LPFC_PROF_C3,
- LPFC_PROF_D1, /* Full Protection */
- LPFC_PROF_D2, /* Partial Protection & Check Disabling */
- LPFC_PROF_D3,
- LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */
- LPFC_PROF_E2,
- LPFC_PROF_E3,
- LPFC_PROF_E4,
- LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */
- /* F1 Translation BDE */
- LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */
- LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */
- LPFC_PROF_ANT2,
- LPFC_PROF_AST2
+#define LPFC_PDE5_DESCRIPTOR 0x85
+#define LPFC_PDE6_DESCRIPTOR 0x86
+#define LPFC_PDE7_DESCRIPTOR 0x87
+
+/* BlockGuard Opcodes */
+#define BG_OP_IN_NODIF_OUT_CRC 0x0
+#define BG_OP_IN_CRC_OUT_NODIF 0x1
+#define BG_OP_IN_NODIF_OUT_CSUM 0x2
+#define BG_OP_IN_CSUM_OUT_NODIF 0x3
+#define BG_OP_IN_CRC_OUT_CRC 0x4
+#define BG_OP_IN_CSUM_OUT_CSUM 0x5
+#define BG_OP_IN_CRC_OUT_CSUM 0x6
+#define BG_OP_IN_CSUM_OUT_CRC 0x7
+
+struct lpfc_pde5 {
+ uint32_t word0;
+#define pde5_type_SHIFT 24
+#define pde5_type_MASK 0x000000ff
+#define pde5_type_WORD word0
+#define pde5_rsvd0_SHIFT 0
+#define pde5_rsvd0_MASK 0x00ffffff
+#define pde5_rsvd0_WORD word0
+ uint32_t reftag; /* Reference Tag Value */
+ uint32_t reftagtr; /* Reference Tag Translation Value */
};
-/* BlockGuard error-control defines */
-#define BG_EC_STOP_ERR 0x00
-#define BG_EC_CONT_ERR 0x01
-#define BG_EC_IGN_UNINIT_STOP_ERR 0x10
-#define BG_EC_IGN_UNINIT_CONT_ERR 0x11
-
-/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */
-#define PDE_DESC_TYPE_MASK 0xff000000
-#define PDE_DESC_TYPE_SHIFT 24
-#define PDE_BG_PROFILE_MASK 0x00ff0000
-#define PDE_BG_PROFILE_SHIFT 16
-#define PDE_BLOCK_LEN_MASK 0x0000fffc
-#define PDE_BLOCK_LEN_SHIFT 2
-#define PDE_ERR_CTRL_MASK 0x00000003
-#define PDE_ERR_CTRL_SHIFT 0
-/* PDE word 1 bit masks and shifts */
-#define PDE_APPTAG_MASK_MASK 0xffff0000
-#define PDE_APPTAG_MASK_SHIFT 16
-#define PDE_APPTAG_VAL_MASK 0x0000ffff
-#define PDE_APPTAG_VAL_SHIFT 0
-struct lpfc_pde {
- uint32_t parms; /* bitfields of descriptor, prof, len, and ec */
- uint32_t apptag; /* bitfields of app tag maskand app tag value */
- uint32_t reftag; /* reference tag occupying all 32 bits */
+struct lpfc_pde6 {
+ uint32_t word0;
+#define pde6_type_SHIFT 24
+#define pde6_type_MASK 0x000000ff
+#define pde6_type_WORD word0
+#define pde6_rsvd0_SHIFT 0
+#define pde6_rsvd0_MASK 0x00ffffff
+#define pde6_rsvd0_WORD word0
+ uint32_t word1;
+#define pde6_rsvd1_SHIFT 26
+#define pde6_rsvd1_MASK 0x0000003f
+#define pde6_rsvd1_WORD word1
+#define pde6_na_SHIFT 25
+#define pde6_na_MASK 0x00000001
+#define pde6_na_WORD word1
+#define pde6_rsvd2_SHIFT 16
+#define pde6_rsvd2_MASK 0x000001FF
+#define pde6_rsvd2_WORD word1
+#define pde6_apptagtr_SHIFT 0
+#define pde6_apptagtr_MASK 0x0000ffff
+#define pde6_apptagtr_WORD word1
+ uint32_t word2;
+#define pde6_optx_SHIFT 28
+#define pde6_optx_MASK 0x0000000f
+#define pde6_optx_WORD word2
+#define pde6_oprx_SHIFT 24
+#define pde6_oprx_MASK 0x0000000f
+#define pde6_oprx_WORD word2
+#define pde6_nr_SHIFT 23
+#define pde6_nr_MASK 0x00000001
+#define pde6_nr_WORD word2
+#define pde6_ce_SHIFT 22
+#define pde6_ce_MASK 0x00000001
+#define pde6_ce_WORD word2
+#define pde6_re_SHIFT 21
+#define pde6_re_MASK 0x00000001
+#define pde6_re_WORD word2
+#define pde6_ae_SHIFT 20
+#define pde6_ae_MASK 0x00000001
+#define pde6_ae_WORD word2
+#define pde6_ai_SHIFT 19
+#define pde6_ai_MASK 0x00000001
+#define pde6_ai_WORD word2
+#define pde6_bs_SHIFT 16
+#define pde6_bs_MASK 0x00000007
+#define pde6_bs_WORD word2
+#define pde6_apptagval_SHIFT 0
+#define pde6_apptagval_MASK 0x0000ffff
+#define pde6_apptagval_WORD word2
};
-/* inline function to set fields in parms of PDE */
-static inline void
-lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
-{
- uint32_t *wp = &p->parms;
-
- /* spec indicates that adapter appends two 0's to length field */
- len = len >> 2;
-
- *wp &= 0;
- *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
- *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
- *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
- *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
- *wp = le32_to_cpu(*wp);
-}
-
-/* inline function to set apptag and reftag fields of PDE */
-static inline void
-lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
- u32 reftag)
-{
- uint32_t *wp = &p->apptag;
- *wp &= 0;
- *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
- *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
- *wp = le32_to_cpu(*wp);
- wp = &p->reftag;
- *wp = le32_to_cpu(reftag);
-}
-
/* Structure for MB Command LOAD_SM and DOWN_LOAD */
@@ -1744,6 +1732,17 @@ typedef struct {
} un;
} BIU_DIAG_VAR;
+/* Structure for MB command READ_EVENT_LOG (0x38) */
+struct READ_EVENT_LOG_VAR {
+ uint32_t word1;
+#define lpfc_event_log_SHIFT 29
+#define lpfc_event_log_MASK 0x00000001
+#define lpfc_event_log_WORD word1
+#define USE_MAILBOX_RESPONSE 1
+ uint32_t offset;
+ struct ulp_bde64 rcv_bde64;
+};
+
/* Structure for MB Command INIT_LINK (05) */
typedef struct {
@@ -2487,8 +2486,8 @@ typedef struct {
#define DMP_VPORT_REGION_SIZE 0x200
#define DMP_MBOX_OFFSET_WORD 0x5
-#define DMP_REGION_23 0x17 /* fcoe param and port state region */
-#define DMP_RGN23_SIZE 0x400
+#define DMP_REGION_23 0x17 /* fcoe param and port state region */
+#define DMP_RGN23_SIZE 0x400
#define WAKE_UP_PARMS_REGION_ID 4
#define WAKE_UP_PARMS_WORD_SIZE 15
@@ -2503,9 +2502,9 @@ struct vport_rec {
#define VPORT_INFO_REV 0x1
#define MAX_STATIC_VPORT_COUNT 16
struct static_vport_info {
- uint32_t signature;
+ uint32_t signature;
uint32_t rev;
- struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
+ struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
uint32_t resvd[66];
};
@@ -2934,6 +2933,12 @@ typedef struct {
/* Union of all Mailbox Command types */
#define MAILBOX_CMD_WSIZE 32
#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
+/* ext_wsize times 4 bytes should not be greater than max xmit size */
+#define MAILBOX_EXT_WSIZE 512
+#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
+#define MAILBOX_HBA_EXT_OFFSET 0x100
+/* max mbox xmit size is a page size for sysfs IO operations */
+#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
typedef union {
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
@@ -2972,6 +2977,9 @@ typedef union {
REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+ struct READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38
+ * (READ_EVENT_LOG)
+ */
struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
} MAILVARIANTS;
@@ -3652,7 +3660,8 @@ typedef struct _IOCB { /* IOCB structure */
/* Maximum IOCBs that will fit in SLI2 slim */
#define MAX_SLI2_IOCB 498
#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
- (sizeof(MAILBOX_t) + sizeof(PCB_t)))
+ (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
+ sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
/* HBQ entries are 4 words each = 4k */
#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
@@ -3660,6 +3669,7 @@ typedef struct _IOCB { /* IOCB structure */
struct lpfc_sli2_slim {
MAILBOX_t mbx;
+ uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE];
PCB_t pcb;
IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
};
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 820015fbc4d6..bbdcf96800f6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -41,8 +41,14 @@
* Or clear that bit field:
* bf_set(example_bit_field, &t1, 0);
*/
+#define bf_get_le32(name, ptr) \
+ ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
#define bf_get(name, ptr) \
(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set_le32(name, ptr, value) \
+ ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+ name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+ ~(name##_MASK << name##_SHIFT)))))
#define bf_set(name, ptr, value) \
((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
@@ -781,6 +787,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
+#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -1102,6 +1109,39 @@ struct lpfc_mbx_mq_create {
} u;
};
+struct lpfc_mbx_mq_create_ext {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
+ uint32_t async_evt_bmap;
+#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
+#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5
+#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap
+ struct mq_context context;
+ struct dma_address page[LPFC_MAX_MQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT 0
+#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD word0
+ } response;
+ } u;
+#define LPFC_ASYNC_EVENT_LINK_STATE 0x2
+#define LPFC_ASYNC_EVENT_FCF_STATE 0x4
+#define LPFC_ASYNC_EVENT_GROUP5 0x20
+};
+
struct lpfc_mbx_mq_destroy {
struct mbox_header header;
union {
@@ -1428,8 +1468,8 @@ struct lpfc_mbx_reg_vfi {
#define lpfc_reg_vfi_fcfi_WORD word2
uint32_t wwn[2];
struct ulp_bde64 bde;
- uint32_t word8_rsvd;
- uint32_t word9_rsvd;
+ uint32_t e_d_tov;
+ uint32_t r_a_tov;
uint32_t word10;
#define lpfc_reg_vfi_nport_id_SHIFT 0
#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
@@ -1940,6 +1980,7 @@ struct lpfc_mbx_sli4_params {
#define rdma_MASK 0x00000001
#define rdma_WORD word3
uint32_t sge_supp_len;
+#define SLI4_PAGE_SIZE 4096
uint32_t word5;
#define if_page_sz_SHIFT 0
#define if_page_sz_MASK 0x0000ffff
@@ -2041,6 +2082,7 @@ struct lpfc_mqe {
struct lpfc_mbx_reg_fcfi reg_fcfi;
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
+ struct lpfc_mbx_mq_create_ext mq_create_ext;
struct lpfc_mbx_eq_create eq_create;
struct lpfc_mbx_cq_create cq_create;
struct lpfc_mbx_wq_create wq_create;
@@ -2099,6 +2141,7 @@ struct lpfc_mcqe {
#define LPFC_TRAILER_CODE_LINK 0x1
#define LPFC_TRAILER_CODE_FCOE 0x2
#define LPFC_TRAILER_CODE_DCBX 0x3
+#define LPFC_TRAILER_CODE_GRP5 0x5
};
struct lpfc_acqe_link {
@@ -2168,6 +2211,19 @@ struct lpfc_acqe_dcbx {
uint32_t trailer;
};
+struct lpfc_acqe_grp5 {
+ uint32_t word0;
+#define lpfc_acqe_grp5_pport_SHIFT 0
+#define lpfc_acqe_grp5_pport_MASK 0x000000FF
+#define lpfc_acqe_grp5_pport_WORD word0
+ uint32_t word1;
+#define lpfc_acqe_grp5_llink_spd_SHIFT 16
+#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF
+#define lpfc_acqe_grp5_llink_spd_WORD word1
+ uint32_t event_tag;
+ uint32_t trailer;
+};
+
/*
* Define the bootstrap mailbox (bmbx) region used to communicate
* mailbox command between the host and port. The mailbox consists
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 774663e8e1fe..cd9697edf860 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2566,7 +2566,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary =
- phba->sli4_hba.pc_sli4_params.sge_supp_len;
+ phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
shost->sg_tablesize = phba->cfg_sg_seg_cnt;
}
@@ -2600,15 +2600,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- shost->sg_tablesize = phba->cfg_sg_seg_cnt;
- }
- }
-
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -3236,12 +3227,26 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
if (!vport)
return NULL;
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp)
- return NULL;
phba = vport->phba;
if (!phba)
return NULL;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 0;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Set the node type */
+ ndlp->nlp_type |= NLP_FABRIC;
+ /* Put ndlp onto node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 0;
+ }
if (phba->pport->port_state <= LPFC_FLOGI)
return NULL;
/* If virtual link is not yet instantiated ignore CVL */
@@ -3304,11 +3309,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
- "2546 New FCF found/FCF parameter modified event: "
- "evt_tag:x%x, fcf_index:x%x\n",
- acqe_fcoe->event_tag, acqe_fcoe->index);
-
+ if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+ LOG_DISCOVERY,
+ "2546 New FCF found event: "
+ "evt_tag:x%x, fcf_index:x%x\n",
+ acqe_fcoe->event_tag,
+ acqe_fcoe->index);
+ else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
+ LOG_DISCOVERY,
+ "2788 FCF parameter modified event: "
+ "evt_tag:x%x, fcf_index:x%x\n",
+ acqe_fcoe->event_tag,
+ acqe_fcoe->index);
spin_lock_irq(&phba->hbalock);
if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
(phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3517,6 +3531,32 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async grp5 completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
+ * is an asynchronous notified of a logical link speed change. The Port
+ * reports the logical link speed in units of 10Mbps.
+ **/
+static void
+lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_grp5 *acqe_grp5)
+{
+ uint16_t prev_ll_spd;
+
+ phba->fc_eventTag = acqe_grp5->event_tag;
+ phba->fcoe_eventtag = acqe_grp5->event_tag;
+ prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
+ phba->sli4_hba.link_state.logical_speed =
+ (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2789 GRP5 Async Event: Updating logical link speed "
+ "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
+ (phba->sli4_hba.link_state.logical_speed*10));
+}
+
+/**
* lpfc_sli4_async_event_proc - Process all the pending asynchronous event
* @phba: pointer to lpfc hba data structure.
*
@@ -3552,6 +3592,10 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
lpfc_sli4_async_dcbx_evt(phba,
&cq_event->cqe.acqe_dcbx);
break;
+ case LPFC_TRAILER_CODE_GRP5:
+ lpfc_sli4_async_grp5_evt(phba,
+ &cq_event->cqe.acqe_grp5);
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"1804 Invalid asynchrous event code: "
@@ -3813,6 +3857,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
+ if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+ phba->menlo_flag |= HBA_MENLO_SUPPORT;
+ /* check for menlo minimum sg count */
+ if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+ }
+
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4030,6 +4081,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (unlikely(rc))
goto out_free_bsmbx;
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq) {
+ rc = -ENOMEM;
+ goto out_free_bsmbx;
+ }
+
+ /* Get the Supported Pages. It is always available. */
+ lpfc_supported_pages(mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ goto out_free_bsmbx;
+ }
+
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (rc) {
+ rc = -EIO;
+ goto out_free_bsmbx;
+ }
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
if (rc)
@@ -4090,43 +4178,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcp_eq_hdl;
}
- mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
- if (!mboxq) {
- rc = -ENOMEM;
- goto out_free_fcp_eq_hdl;
- }
-
- /* Get the Supported Pages. It is always available. */
- lpfc_supported_pages(mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (unlikely(rc)) {
- rc = -EIO;
- mempool_free(mboxq, phba->mbox_mem_pool);
- goto out_free_fcp_eq_hdl;
- }
-
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
- }
- }
-
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (rc) {
- rc = -EIO;
- goto out_free_fcp_eq_hdl;
- }
return rc;
out_free_fcp_eq_hdl:
@@ -5050,6 +5101,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+ phba->mbox_ext = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, mbx_ext_words));
phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
phba->IOCBs = (phba->slim2p.virt +
offsetof(struct lpfc_sli2_slim, IOCBs));
@@ -7753,21 +7806,23 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
* @phba: pointer to lpfc hba data structure.
*
* This routine is called to prepare the SLI3 device for PCI slot recover. It
- * aborts and stops all the on-going I/Os on the pci device.
+ * aborts all the outstanding SCSI I/Os to the pci device.
**/
static void
lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2723 PCI channel I/O abort preparing for recovery\n");
- /* Prepare for bringing HBA offline */
- lpfc_offline_prep(phba);
- /* Clear sli active flag to prevent sysfs access to HBA */
- spin_lock_irq(&phba->hbalock);
- phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
- spin_unlock_irq(&phba->hbalock);
- /* Stop and flush all I/Os and bring HBA offline */
- lpfc_offline(phba);
+
+ /*
+ * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+ * and let the SCSI mid-layer to retry them to recover.
+ */
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
}
/**
@@ -7781,21 +7836,20 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
-
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2710 PCI channel disable preparing for reset\n");
+
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
/* Disable interrupt and pci device */
lpfc_sli_disable_intr(phba);
pci_disable_device(phba->pcidev);
- /*
- * There may be I/Os dropped by the firmware.
- * Error iocb (I/O) on txcmplq and let the SCSI layer
- * retry it after re-establishing link.
- */
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+ lpfc_sli_flush_fcp_rings(phba);
}
/**
@@ -7811,6 +7865,12 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2711 PCI channel permanent disable for failure\n");
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
}
@@ -7839,9 +7899,6 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
-
switch (state) {
case pci_channel_io_normal:
/* Non-fatal error, prepare for recovery */
@@ -7948,7 +8005,7 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- /* Bring the device online */
+ /* Bring device online, it will be no-op for non-fatal error resume */
lpfc_online(phba);
/* Clean up Advanced Error Reporting (AER) if needed */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 72e6adb0643e..e84dc33ca201 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1216,7 +1216,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pcb->feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
- phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
+ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
@@ -1272,28 +1272,41 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*
*/
- if (phba->sli_rev == 3) {
- phba->host_gp = &mb_slim->us.s3.host[0];
- phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
- } else {
- phba->host_gp = &mb_slim->us.s2.host[0];
+ if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
+ phba->host_gp = &phba->mbox->us.s2.host[0];
phba->hbq_put = NULL;
- }
+ offset = (uint8_t *)&phba->mbox->us.s2.host -
+ (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
+ } else {
+ /* Always Host Group Pointer is in SLIM */
+ mb->un.varCfgPort.hps = 1;
- /* mask off BAR0's flag bits 0 - 3 */
- phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
- (void __iomem *)phba->host_gp -
- (void __iomem *)phba->MBslimaddr;
- if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
- phba->pcb->hgpAddrHigh = bar_high;
- else
- phba->pcb->hgpAddrHigh = 0;
- /* write HGP data to SLIM at the required longword offset */
- memset(&hgp, 0, sizeof(struct lpfc_hgp));
+ if (phba->sli_rev == 3) {
+ phba->host_gp = &mb_slim->us.s3.host[0];
+ phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
+ } else {
+ phba->host_gp = &mb_slim->us.s2.host[0];
+ phba->hbq_put = NULL;
+ }
- for (i=0; i < phba->sli.num_rings; i++) {
- lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
+ /* mask off BAR0's flag bits 0 - 3 */
+ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+ (void __iomem *)phba->host_gp -
+ (void __iomem *)phba->MBslimaddr;
+ if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ phba->pcb->hgpAddrHigh = bar_high;
+ else
+ phba->pcb->hgpAddrHigh = 0;
+ /* write HGP data to SLIM at the required longword offset */
+ memset(&hgp, 0, sizeof(struct lpfc_hgp));
+
+ for (i = 0; i < phba->sli.num_rings; i++) {
+ lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
sizeof(*phba->host_gp));
+ }
}
/* Setup Port Group offset */
@@ -1598,7 +1611,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
for (sgentry = 0; sgentry < sgecount; sgentry++) {
lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
- dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
mbox->sge_array->addr[sgentry], phyaddr);
}
/* Free the sge address array memory */
@@ -1656,7 +1669,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
}
/* Setup for the none-embedded mbox command */
- pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
+ pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
@@ -1671,24 +1684,24 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
/* The DMA memory is always allocated in the length of a
* page even though the last SGE might not fill up to a
- * page, this is used as a priori size of PAGE_SIZE for
+ * page, this is used as a priori size of SLI4_PAGE_SIZE for
* the later DMA memory free.
*/
- viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
+ viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
&phyaddr, GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
- memset(viraddr, 0, PAGE_SIZE);
+ memset(viraddr, 0, SLI4_PAGE_SIZE);
mbox->sge_array->addr[pagen] = viraddr;
/* Keep the first page for later sub-header construction */
if (pagen == 0)
cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
resid_len = length - alloc_len;
- if (resid_len > PAGE_SIZE) {
+ if (resid_len > SLI4_PAGE_SIZE) {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
- PAGE_SIZE);
- alloc_len += PAGE_SIZE;
+ SLI4_PAGE_SIZE);
+ alloc_len += SLI4_PAGE_SIZE;
} else {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
resid_len);
@@ -1886,6 +1899,8 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
+ reg_vfi->e_d_tov = vport->phba->fc_edtov;
+ reg_vfi->r_a_tov = vport->phba->fc_ratov;
reg_vfi->bde.addrHigh = putPaddrHigh(phys);
reg_vfi->bde.addrLow = putPaddrLow(phys);
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e331204a4d56..b90820a699fd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -493,6 +493,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vport **vports;
+ int i, active_vlink_present = 0 ;
/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
@@ -505,15 +508,44 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- if ((ndlp->nlp_DID == Fabric_DID) &&
- vport->port_type == LPFC_NPIV_PORT) {
+ if (ndlp->nlp_DID == Fabric_DID) {
+ if (vport->port_state <= LPFC_FDISC)
+ goto out;
lpfc_linkdown_port(vport);
- mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DELAY_TMO;
+ vport->fc_flag |= FC_VPORT_LOGO_RCVD;
spin_unlock_irq(shost->host_lock);
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+ i++) {
+ if ((!(vports[i]->fc_flag &
+ FC_VPORT_LOGO_RCVD)) &&
+ (vports[i]->port_state > LPFC_FDISC)) {
+ active_vlink_present = 1;
+ break;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
- ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ if (active_vlink_present) {
+ /*
+ * If there are other active VLinks present,
+ * re-instantiate the Vlink using FDISC.
+ */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ vport->port_state = LPFC_FDISC;
+ } else {
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_retry_pport_discovery(phba);
+ }
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
@@ -526,6 +558,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
}
+out:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -604,11 +637,55 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
+/**
+ * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd.
+ * @phba : Pointer to lpfc_hba structure.
+ * @vport: Pointer to lpfc_vport structure.
+ * @rpi : rpi to be release.
+ *
+ * This function will send a unreg_login mailbox command to the firmware
+ * to release a rpi.
+ **/
+void
+lpfc_release_rpi(struct lpfc_hba *phba,
+ struct lpfc_vport *vport,
+ uint16_t rpi)
+{
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!pmb)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "2796 mailbox memory allocation failed \n");
+ else {
+ lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+}
static uint32_t
lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb;
+ uint16_t rpi;
+
+ phba = vport->phba;
+ /* Release the RPI if reglogin completing */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ (evt == NLP_EVT_CMPL_REG_LOGIN) &&
+ (!pmb->u.mb.mbxStatus)) {
+ mb = &pmb->u.mb;
+ rpi = pmb->u.mb.un.varWords[0];
+ lpfc_release_rpi(phba, vport, rpi);
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0271 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
@@ -944,6 +1021,18 @@ static uint32_t
lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->u.mb;
+ uint16_t rpi;
+
+ phba = vport->phba;
+ /* Release the RPI */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ !mb->mbxStatus) {
+ rpi = pmb->u.mb.un.varWords[0];
+ lpfc_release_rpi(phba, vport, rpi);
+ }
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index dccdb822328c..f4a3b2e79eea 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1141,37 +1141,47 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
}
/*
- * Given a scsi cmnd, determine the BlockGuard profile to be used
- * with the cmd
+ * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
*/
static int
-lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint8_t *txop, uint8_t *rxop)
{
uint8_t guard_type = scsi_host_get_guard(sc->device->host);
- uint8_t ret_prof = LPFC_PROF_INVALID;
+ uint8_t ret = 0;
if (guard_type == SHOST_DIX_GUARD_IP) {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- ret_prof = LPFC_PROF_AST2;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
+ *rxop = BG_OP_IN_NODIF_OUT_CSUM;
break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- ret_prof = LPFC_PROF_A1;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- ret_prof = LPFC_PROF_AST1;
+ *txop = BG_OP_IN_CSUM_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_CSUM;
break;
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9063 BLKGRD:Bad op/guard:%d/%d combination\n",
+ "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
scsi_get_prot_op(sc), guard_type);
+ ret = 1;
break;
}
@@ -1179,12 +1189,14 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- ret_prof = LPFC_PROF_A1;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- ret_prof = LPFC_PROF_C1;
+ *txop = BG_OP_IN_CRC_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_CRC;
break;
case SCSI_PROT_READ_INSERT:
@@ -1194,6 +1206,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9075 BLKGRD: Bad op/guard:%d/%d combination\n",
scsi_get_prot_op(sc), guard_type);
+ ret = 1;
break;
}
} else {
@@ -1201,7 +1214,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
BUG();
}
- return ret_prof;
+ return ret;
}
struct scsi_dif_tuple {
@@ -1266,7 +1279,9 @@ lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
* The buffer list consists of just one protection group described
* below:
* +-------------------------+
- * start of prot group --> | PDE_1 |
+ * start of prot group --> | PDE_5 |
+ * +-------------------------+
+ * | PDE_6 |
* +-------------------------+
* | Data BDE |
* +-------------------------+
@@ -1284,30 +1299,49 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct ulp_bde64 *bpl, int datasegcnt)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
- struct lpfc_pde *pde1 = NULL;
+ struct lpfc_pde5 *pde5 = NULL;
+ struct lpfc_pde6 *pde6 = NULL;
dma_addr_t physaddr;
- int i = 0, num_bde = 0;
+ int i = 0, num_bde = 0, status;
int datadir = sc->sc_data_direction;
- int prof = LPFC_PROF_INVALID;
unsigned blksize;
uint32_t reftag;
uint16_t apptagmask, apptagval;
+ uint8_t txop, rxop;
- pde1 = (struct lpfc_pde *) bpl;
- prof = lpfc_sc_to_sli_prof(phba, sc);
-
- if (prof == LPFC_PROF_INVALID)
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
goto out;
- /* extract some info from the scsi command for PDE1*/
+ /* extract some info from the scsi command for pde*/
blksize = lpfc_cmd_blksize(sc);
lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
- /* setup PDE1 with what we have */
- lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
- BG_EC_STOP_ERR);
- lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+ /* setup PDE5 with what we have */
+ pde5 = (struct lpfc_pde5 *) bpl;
+ memset(pde5, 0, sizeof(struct lpfc_pde5));
+ bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+ pde5->reftag = reftag;
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+ pde6 = (struct lpfc_pde6 *) bpl;
+
+ /* setup PDE6 with the rest of the info */
+ memset(pde6, 0, sizeof(struct lpfc_pde6));
+ bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+ bf_set(pde6_optx, pde6, txop);
+ bf_set(pde6_oprx, pde6, rxop);
+ if (datadir == DMA_FROM_DEVICE) {
+ bf_set(pde6_ce, pde6, 1);
+ bf_set(pde6_re, pde6, 1);
+ bf_set(pde6_ae, pde6, 1);
+ }
+ bf_set(pde6_ai, pde6, 1);
+ bf_set(pde6_apptagval, pde6, apptagval);
+
+ /* advance bpl and increment bde count */
num_bde++;
bpl++;
@@ -1342,15 +1376,17 @@ out:
* The buffer list for this type consists of one or more of the
* protection groups described below:
* +-------------------------+
- * start of first prot group --> | PDE_1 |
+ * start of first prot group --> | PDE_5 |
+ * +-------------------------+
+ * | PDE_6 |
* +-------------------------+
- * | PDE_3 (Prot BDE) |
+ * | PDE_7 (Prot BDE) |
* +-------------------------+
* | Data BDE |
* +-------------------------+
* |more Data BDE's ... (opt)|
* +-------------------------+
- * start of new prot group --> | PDE_1 |
+ * start of new prot group --> | PDE_5 |
* +-------------------------+
* | ... |
* +-------------------------+
@@ -1369,19 +1405,21 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct scatterlist *sgpe = NULL; /* s/g prot entry */
- struct lpfc_pde *pde1 = NULL;
+ struct lpfc_pde5 *pde5 = NULL;
+ struct lpfc_pde6 *pde6 = NULL;
struct ulp_bde64 *prot_bde = NULL;
dma_addr_t dataphysaddr, protphysaddr;
unsigned short curr_data = 0, curr_prot = 0;
unsigned int split_offset, protgroup_len;
unsigned int protgrp_blks, protgrp_bytes;
unsigned int remainder, subtotal;
- int prof = LPFC_PROF_INVALID;
+ int status;
int datadir = sc->sc_data_direction;
unsigned char pgdone = 0, alldone = 0;
unsigned blksize;
uint32_t reftag;
uint16_t apptagmask, apptagval;
+ uint8_t txop, rxop;
int num_bde = 0;
sgpe = scsi_prot_sglist(sc);
@@ -1394,31 +1432,47 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return 0;
}
- prof = lpfc_sc_to_sli_prof(phba, sc);
- if (prof == LPFC_PROF_INVALID)
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
goto out;
- /* extract some info from the scsi command for PDE1*/
+ /* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
split_offset = 0;
do {
- /* setup the first PDE_1 */
- pde1 = (struct lpfc_pde *) bpl;
-
- lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
- BG_EC_STOP_ERR);
- lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+ /* setup PDE5 with what we have */
+ pde5 = (struct lpfc_pde5 *) bpl;
+ memset(pde5, 0, sizeof(struct lpfc_pde5));
+ bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+ pde5->reftag = reftag;
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+ pde6 = (struct lpfc_pde6 *) bpl;
+
+ /* setup PDE6 with the rest of the info */
+ memset(pde6, 0, sizeof(struct lpfc_pde6));
+ bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+ bf_set(pde6_optx, pde6, txop);
+ bf_set(pde6_oprx, pde6, rxop);
+ bf_set(pde6_ce, pde6, 1);
+ bf_set(pde6_re, pde6, 1);
+ bf_set(pde6_ae, pde6, 1);
+ bf_set(pde6_ai, pde6, 1);
+ bf_set(pde6_apptagval, pde6, apptagval);
+
+ /* advance bpl and increment bde count */
num_bde++;
bpl++;
/* setup the first BDE that points to protection buffer */
prot_bde = (struct ulp_bde64 *) bpl;
protphysaddr = sg_dma_address(sgpe);
- prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
- prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+ prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
+ prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
protgroup_len = sg_dma_len(sgpe);
@@ -1429,10 +1483,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
protgrp_bytes = protgrp_blks * blksize;
prot_bde->tus.f.bdeSize = protgroup_len;
- if (datadir == DMA_TO_DEVICE)
- prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- else
- prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
curr_prot++;
@@ -1484,6 +1535,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* Move to the next s/g segment if possible */
sgde = sg_next(sgde);
+
}
/* are we done ? */
@@ -1506,7 +1558,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
out:
-
return num_bde;
}
/*
@@ -1828,8 +1879,8 @@ out:
* field of @lpfc_cmd for device with SLI-4 interface spec.
*
* Return codes:
- * 1 - Error
- * 0 - Success
+ * 1 - Error
+ * 0 - Success
**/
static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
@@ -1937,8 +1988,8 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* lpfc_hba struct.
*
* Return codes:
- * 1 - Error
- * 0 - Success
+ * 1 - Error
+ * 0 - Success
**/
static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 049fb9a17b3f..7a61455140b6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -212,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */
- if (!bf_get(lpfc_eqe_valid, eqe))
+ if (!bf_get_le32(lpfc_eqe_valid, eqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -247,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe;
- bf_set(lpfc_eqe_valid, temp_eqe, 0);
+ bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
@@ -285,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
struct lpfc_cqe *cqe;
/* If the next CQE is not valid then we are done */
- if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+ if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -321,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe;
- bf_set(lpfc_cqe_valid, temp_qe, 0);
+ bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
@@ -1659,6 +1659,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_INIT_VPI:
case MBX_INIT_VFI:
case MBX_RESUME_RPI:
+ case MBX_READ_EVENT_LOG_STATUS:
+ case MBX_READ_EVENT_LOG:
ret = mbxCommand;
break;
default:
@@ -4296,7 +4298,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
- vpd_size = PAGE_SIZE;
+ vpd_size = SLI4_PAGE_SIZE;
vpd = kzalloc(vpd_size, GFP_KERNEL);
if (!vpd) {
rc = -ENOMEM;
@@ -4891,9 +4893,34 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
mb->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
- /* First copy command data to host SLIM area */
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ = (uint8_t *)phba->mbox_ext
+ - (uint8_t *)phba->mbox;
+ }
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(pmbox->context2,
+ (uint8_t *)phba->mbox_ext,
+ pmbox->in_ext_byte_len);
+ }
+ /* Copy command data to host SLIM area */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
} else {
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ = MAILBOX_HBA_EXT_OFFSET;
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_to_slim(phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->context2, pmbox->in_ext_byte_len);
+
+ }
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
@@ -5003,15 +5030,22 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(phba->mbox_ext,
+ pmbox->context2,
+ pmbox->out_ext_byte_len);
+ }
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
- if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
- pmbox->context2) {
- lpfc_memcpy_from_slim((void *)pmbox->context2,
- phba->MBslimaddr + DMP_RSP_OFFSET,
- mb->un.varDmp.word_cnt);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_from_slim(pmbox->context2,
+ phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->out_ext_byte_len);
}
}
@@ -7104,13 +7138,11 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
- spin_unlock_irq(&phba->hbalock);
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
- spin_lock_irq(&phba->hbalock);
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
spin_unlock_irq(&phba->hbalock);
@@ -7118,7 +7150,8 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
- }
+ } else
+ spin_unlock_irq(&phba->hbalock);
}
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -8133,6 +8166,12 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
if (pmb->mbox_cmpl) {
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
+ if (pmb->out_ext_byte_len &&
+ pmb->context2)
+ lpfc_sli_pcimem_bcopy(
+ phba->mbox_ext,
+ pmb->context2,
+ pmb->out_ext_byte_len);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
@@ -8983,17 +9022,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
int ecount = 0;
uint16_t cqid;
- if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
+ if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0359 Not a valid slow-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
- bf_get(lpfc_eqe_major_code, eqe),
- bf_get(lpfc_eqe_minor_code, eqe));
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
/* Get the reference to the corresponding CQ */
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
/* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq;
@@ -9221,12 +9260,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint16_t cqid;
int ecount = 0;
- if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0366 Not a valid fast-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
- bf_get(lpfc_eqe_major_code, eqe),
- bf_get(lpfc_eqe_minor_code, eqe));
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
@@ -9239,7 +9278,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
}
/* Get the reference to the corresponding CQ */
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
if (unlikely(cqid != cq->queue_id)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0368 Miss-matched fast-path completion "
@@ -9506,7 +9545,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -9532,13 +9571,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
struct lpfc_dmabuf *dmabuf;
int x, total_qe_count;
void *dma_pointer;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
if (!queue)
return NULL;
- queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
+ queue->page_count = (ALIGN(entry_size * entry_count,
+ hw_page_size))/hw_page_size;
INIT_LIST_HEAD(&queue->list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
@@ -9547,19 +9590,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
if (!dmabuf)
goto out_fail;
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
- PAGE_SIZE, &dmabuf->phys,
+ hw_page_size, &dmabuf->phys,
GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
}
- memset(dmabuf->virt, 0, PAGE_SIZE);
+ memset(dmabuf->virt, 0, hw_page_size);
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
/* initialize queue's entry array */
dma_pointer = dmabuf->virt;
for (; total_qe_count < entry_count &&
- dma_pointer < (PAGE_SIZE + dmabuf->virt);
+ dma_pointer < (hw_page_size + dmabuf->virt);
total_qe_count++, dma_pointer += entry_size) {
queue->qe[total_qe_count].address = dma_pointer;
}
@@ -9604,6 +9647,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9653,6 +9700,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
break;
}
list_for_each_entry(dmabuf, &eq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9715,6 +9763,11 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9752,6 +9805,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9791,9 +9845,70 @@ out:
}
/**
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
+ * @cq: The completion queue to associate with this cq.
+ *
+ * This function provides failback (fb) functionality when the
+ * mq_create_ext fails on older FW generations. It's purpose is identical
+ * to mq_create_ext otherwise.
+ *
+ * This routine cannot fail as all attributes were previously accessed and
+ * initialized in mq_create_ext.
+ **/
+static void
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
+ LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
+{
+ struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_dmabuf *dmabuf;
+ int length;
+
+ length = (sizeof(struct lpfc_mbx_mq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+ mq->page_count);
+ bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ switch (mq->entry_count) {
+ case 16:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_16);
+ break;
+ case 32:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_32);
+ break;
+ case 64:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_64);
+ break;
+ case 128:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_128);
+ break;
+ }
+ list_for_each_entry(dmabuf, &mq->page_list, list) {
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+}
+
+/**
* lpfc_mq_create - Create a mailbox Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @mq: The queue structure to use to create the mailbox queue.
+ * @cq: The completion queue to associate with this cq.
+ * @subtype: The queue's subtype.
*
* This function creates a mailbox queue, as detailed in @mq, on a port,
* described by @phba by sending a MQ_CREATE mailbox command to the HBA.
@@ -9809,31 +9924,43 @@ out:
* memory this function will return ENOMEM. If the queue create mailbox command
* fails this function will return ENXIO.
**/
-uint32_t
+int32_t
lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
struct lpfc_queue *cq, uint32_t subtype)
{
struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_mbx_mq_create_ext *mq_create_ext;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
- length = (sizeof(struct lpfc_mbx_mq_create) -
+ length = (sizeof(struct lpfc_mbx_mq_create_ext) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_MQ_CREATE,
+ LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
length, LPFC_SLI4_MBX_EMBED);
- mq_create = &mbox->u.mqe.un.mq_create;
- bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+
+ mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+ bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
mq->page_count);
- bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
- cq->queue_id);
- bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
+ 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
switch (mq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -9843,31 +9970,47 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 16:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_16);
break;
case 32:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_32);
break;
case 64:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_64);
break;
case 128:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
- mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ memset(dmabuf->virt, 0, hw_page_size);
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
- mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create_ext->u.response);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2795 MQ_CREATE_EXT failed with "
+ "status x%x. Failback to MQ_CREATE.\n",
+ rc);
+ lpfc_mq_create_fb_init(phba, mq, mbox, cq);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create->u.response);
+ }
+
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -9878,7 +10021,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
status = -ENXIO;
goto out;
}
- mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
if (mq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
@@ -9927,6 +10069,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9942,6 +10088,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
list_for_each_entry(dmabuf, &wq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10010,6 +10157,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
if (hrq->entry_count != drq->entry_count)
return -EINVAL;
@@ -10054,6 +10205,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
LPFC_HDR_BUF_SIZE);
list_for_each_entry(dmabuf, &hrq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10626,7 +10778,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > PAGE_SIZE) {
+ if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"2559 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
@@ -10732,7 +10884,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
/* Calculate the requested length of the dma memory */
reqlen = cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > PAGE_SIZE) {
+ if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0217 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
@@ -11568,8 +11720,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
* This routine does not require any locks. It's usage is expected
* to be driver load or reset recovery when the driver is
@@ -11672,8 +11824,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
* Returns
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
@@ -12040,9 +12192,11 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
/* Reset FCF round robin index bmask for new scan */
- if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) {
memset(phba->fcf.fcf_rr_bmask, 0,
sizeof(*phba->fcf.fcf_rr_bmask));
+ phba->fcf.eligible_fcf_cnt = 0;
+ }
error = 0;
}
fail_fcf_scan:
@@ -12507,6 +12661,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12523,6 +12678,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
+ ndlp = (struct lpfc_nodelist *) mb->context2;
+ if (ndlp) {
+ lpfc_nlp_put(ndlp);
+ mb->context2 = NULL;
+ }
}
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
@@ -12532,6 +12692,15 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
(mb->u.mb.mbxCommand == MBX_REG_VPI))
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ ndlp = (struct lpfc_nodelist *) mb->context2;
+ if (ndlp) {
+ lpfc_nlp_put(ndlp);
+ mb->context2 = NULL;
+ }
+ /* Unregister the RPI when mailbox complete */
+ mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ }
}
spin_unlock_irq(&phba->hbalock);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index b4a639c47616..e3792151ca06 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -36,6 +36,7 @@ struct lpfc_cq_event {
struct lpfc_acqe_link acqe_link;
struct lpfc_acqe_fcoe acqe_fcoe;
struct lpfc_acqe_dcbx acqe_dcbx;
+ struct lpfc_acqe_grp5 acqe_grp5;
struct lpfc_rcqe rcqe_cmpl;
struct sli4_wcqe_xri_aborted wcqe_axri;
struct lpfc_wcqe_complete wcqe_cmpl;
@@ -110,6 +111,9 @@ typedef struct lpfcMboxq {
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
+ uint16_t in_ext_byte_len;
+ uint16_t out_ext_byte_len;
+ uint8_t mbox_offset_word;
struct lpfc_mcqe mcqe;
struct lpfc_mbx_nembed_sge_virt *sge_array;
} LPFC_MBOXQ_t;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4a35e7b9bc5b..58bb4c81b54e 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -162,6 +162,7 @@ struct lpfc_fcf {
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
uint32_t addr_mode;
uint16_t fcf_rr_init_indx;
+ uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
struct timer_list redisc_wait;
@@ -492,8 +493,8 @@ void lpfc_sli4_queue_free(struct lpfc_queue *);
uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
-uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_queue *, uint32_t);
+int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t);
uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 013deec5dae8..5294c3a515a1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.10"
+#define LPFC_DRIVER_VERSION "8.3.12"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index ffd575c379f3..ab91359bde20 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -763,7 +763,9 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- lpfc_printf_vlog(port_iterator, KERN_WARNING, LOG_VPORT,
+ if (!(port_iterator->load_flag & FC_UNLOADING))
+ lpfc_printf_vlog(port_iterator, KERN_ERR,
+ LOG_VPORT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index ba8e128de238..bbb7e4bf30a3 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
# Kernel configuration file for the MPT2SAS
#
# This code is based on drivers/scsi/mpt2sas/Kconfig
-# Copyright (C) 2007-2009 LSI Corporation
+# Copyright (C) 2007-2010 LSI Corporation
# (mailto:DL-MPTFusionLinux@lsi.com)
# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 9958d847a88d..dada0a13223f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index cf0ac9f40c97..d4e9d6f8452e 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index c4adf76b49d9..bd6c92b5fae5 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -2,7 +2,7 @@
Fusion-MPT MPI 2.0 Header File Change History
==============================
- Copyright (c) 2000-2009 LSI Corporation.
+ Copyright (c) 2000-2010 LSI Corporation.
---------------------------------------
Header Set Release Version: 02.00.14
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 6541945e97c3..220bf65a9216 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_init.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 754938422f6a..f18f114922ba 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_ioc.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 73fcdbf92632..686b09b81219 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_tool.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 88e6eebc3159..b830d61684dd 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -58,6 +58,7 @@
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/time.h>
+#include <linux/aer.h>
#include "mpt2sas_base.h"
@@ -285,6 +286,9 @@ _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
return;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return;
+
switch (ioc_status) {
/****************************************************************************
@@ -517,8 +521,18 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
desc = "IR Operation Status";
break;
case MPI2_EVENT_SAS_DISCOVERY:
- desc = "Discovery";
- break;
+ {
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
+ printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ printk("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ printk("\n");
+ return;
+ }
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
desc = "SAS Broadcast Primitive";
break;
@@ -1243,6 +1257,9 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
goto out_fail;
}
+ /* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
pci_set_master(pdev);
if (_base_config_dma_addressing(ioc, pdev) != 0) {
@@ -1253,7 +1270,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
}
for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
if (pio_sz)
continue;
pio_chip = (u64)pci_resource_start(pdev, i);
@@ -1261,15 +1278,18 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
} else {
if (memap_sz)
continue;
- ioc->chip_phys = pci_resource_start(pdev, i);
- chip_phys = (u64)ioc->chip_phys;
- memap_sz = pci_resource_len(pdev, i);
- ioc->chip = ioremap(ioc->chip_phys, memap_sz);
- if (ioc->chip == NULL) {
- printk(MPT2SAS_ERR_FMT "unable to map adapter "
- "memory!\n", ioc->name);
- r = -EINVAL;
- goto out_fail;
+ /* verify memory resource is valid before using */
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
+ memap_sz = pci_resource_len(pdev, i);
+ ioc->chip = ioremap(ioc->chip_phys, memap_sz);
+ if (ioc->chip == NULL) {
+ printk(MPT2SAS_ERR_FMT "unable to map "
+ "adapter memory!\n", ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
}
}
}
@@ -1295,6 +1315,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->chip_phys = 0;
ioc->pci_irq = -1;
pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return r;
}
@@ -1898,7 +1919,10 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
- kfree(ioc->scsi_lookup);
+ if (ioc->scsi_lookup) {
+ free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
+ ioc->scsi_lookup = NULL;
+ }
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
}
@@ -2110,11 +2134,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- ioc->scsi_lookup = kcalloc(ioc->scsiio_depth,
- sizeof(struct request_tracker), GFP_KERNEL);
+ sz = ioc->scsiio_depth * sizeof(struct request_tracker);
+ ioc->scsi_lookup_pages = get_order(sz);
+ ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->scsi_lookup_pages);
if (!ioc->scsi_lookup) {
- printk(MPT2SAS_ERR_FMT "scsi_lookup: kcalloc failed\n",
- ioc->name);
+ printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
+ "sz(%d)\n", ioc->name, (int)sz);
goto out;
}
@@ -3006,8 +3032,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
* since epoch ~ midnight January 1, 1970.
*/
do_gettimeofday(&current_time);
- mpi_request.TimeStamp = (current_time.tv_sec * 1000) +
- (current_time.tv_usec >> 3);
+ mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
+ (current_time.tv_usec / 1000));
if (ioc->logging_level & MPT_DEBUG_INIT) {
u32 *mfp;
@@ -3179,7 +3205,7 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
mpi_request->VP_ID = 0;
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mpi_request->EventMasks[i] =
- le32_to_cpu(ioc->event_masks[i]);
+ cpu_to_le32(ioc->event_masks[i]);
mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
@@ -3516,7 +3542,9 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
__func__));
_base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
if (ioc->pci_irq) {
synchronize_irq(pdev->irq);
free_irq(ioc->pci_irq, ioc);
@@ -3527,6 +3555,7 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->pci_irq = -1;
ioc->chip_phys = 0;
pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return;
}
@@ -3560,8 +3589,10 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
- if (!ioc->pfacts)
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
r = _base_get_port_facts(ioc, i, CAN_SLEEP);
@@ -3607,6 +3638,15 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
mutex_init(&ioc->ctl_cmds.mutex);
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ init_completion(&ioc->shost_recovery_done);
+
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
ioc->event_masks[i] = -1;
@@ -3639,6 +3679,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
pci_set_drvdata(ioc->pdev, NULL);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
kfree(ioc->config_cmds.reply);
kfree(ioc->base_cmds.reply);
kfree(ioc->ctl_cmds.reply);
@@ -3646,6 +3687,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->ctl_cmds.reply = NULL;
ioc->base_cmds.reply = NULL;
ioc->tm_cmds.reply = NULL;
+ ioc->scsih_cmds.reply = NULL;
ioc->transport_cmds.reply = NULL;
ioc->config_cmds.reply = NULL;
ioc->pfacts = NULL;
@@ -3675,6 +3717,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
kfree(ioc->base_cmds.reply);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
kfree(ioc->config_cmds.reply);
}
@@ -3811,9 +3854,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
+ complete(&ioc->shost_recovery_done);
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- if (!r)
- _base_reset_handler(ioc, MPT2_IOC_RUNNING);
return r;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index e18b0544c38f..b4afe431ac1e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -69,11 +69,11 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "04.100.01.00"
-#define MPT2SAS_MAJOR_VERSION 04
+#define MPT2SAS_DRIVER_VERSION "05.100.00.02"
+#define MPT2SAS_MAJOR_VERSION 05
#define MPT2SAS_MINOR_VERSION 100
-#define MPT2SAS_BUILD_VERSION 01
-#define MPT2SAS_RELEASE_VERSION 00
+#define MPT2SAS_BUILD_VERSION 00
+#define MPT2SAS_RELEASE_VERSION 02
/*
* Set MPT2SAS_SG_DEPTH value based on user input.
@@ -119,7 +119,6 @@
#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
-#define MPT2_IOC_RUNNING 4 /* shost running */
/*
* logging format
@@ -260,16 +259,6 @@ struct _internal_cmd {
u16 smid;
};
-/*
- * SAS Topology Structures
- */
-
-#define MPTSAS_STATE_TR_SEND 0x0001
-#define MPTSAS_STATE_TR_COMPLETE 0x0002
-#define MPTSAS_STATE_CNTRL_SEND 0x0004
-#define MPTSAS_STATE_CNTRL_COMPLETE 0x0008
-
-#define MPT2SAS_REQ_SAS_CNTRL 0x0010
/**
* struct _sas_device - attached device information
@@ -307,7 +296,6 @@ struct _sas_device {
u16 slot;
u8 hidden_raid_component;
u8 responding;
- u16 state;
};
/**
@@ -378,6 +366,7 @@ struct _sas_port {
* @phy_id: unique phy id
* @handle: device handle for this phy
* @attached_handle: device handle for attached device
+ * @phy_belongs_to_port: port has been created for this phy
*/
struct _sas_phy {
struct list_head port_siblings;
@@ -387,6 +376,7 @@ struct _sas_phy {
u8 phy_id;
u16 handle;
u16 attached_handle;
+ u8 phy_belongs_to_port;
};
/**
@@ -603,7 +593,6 @@ struct MPT2SAS_ADAPTER {
/* fw event handler */
char firmware_event_name[20];
struct workqueue_struct *firmware_event_thread;
- u8 fw_events_off;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
@@ -611,6 +600,7 @@ struct MPT2SAS_ADAPTER {
int aen_event_read_flag;
u8 broadcast_aen_busy;
u8 shost_recovery;
+ struct completion shost_recovery_done;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
u8 ignore_loginfos;
@@ -688,7 +678,8 @@ struct MPT2SAS_ADAPTER {
dma_addr_t request_dma;
u32 request_dma_sz;
struct request_tracker *scsi_lookup;
- spinlock_t scsi_lookup_lock;
+ ulong scsi_lookup_pages;
+ spinlock_t scsi_lookup_lock;
struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
@@ -700,7 +691,7 @@ struct MPT2SAS_ADAPTER {
u16 max_sges_in_chain_message;
u16 chains_needed_per_io;
u16 chain_offset_value_for_main_message;
- u16 chain_depth;
+ u32 chain_depth;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -814,8 +805,9 @@ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
/* scsih shared API */
u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
-void mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
- u8 type, u16 smid_task, ulong timeout);
+int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, struct scsi_cmnd *scmd);
void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index cf44b355bc97..e762dd3e2fcb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
* This module provides common API for accessing firmware configuration pages
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -1390,12 +1390,12 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
goto out;
for (i = 0; i < config_page->NumElements; i++) {
- if ((config_page->ConfigElement[i].ElementFlags &
+ if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) &
MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT)
continue;
- if (config_page->ConfigElement[i].PhysDiskDevHandle ==
- pd_handle) {
+ if (le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle) == pd_handle) {
*volume_handle = le16_to_cpu(config_page->
ConfigElement[i].VolDevHandle);
r = 0;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index fa9bf83819d5..d88e9756d8f5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -533,7 +533,7 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
if (!found) {
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
- desc, tm_request->DevHandle, lun));
+ desc, le16_to_cpu(tm_request->DevHandle), lun));
tm_reply = ioc->ctl_cmds.reply;
tm_reply->DevHandle = tm_request->DevHandle;
tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -551,7 +551,8 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
- desc, tm_request->DevHandle, lun, tm_request->TaskMID));
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
return 0;
}
@@ -647,9 +648,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
- if (!mpi_request->FunctionDependent1 ||
- mpi_request->FunctionDependent1 >
- cpu_to_le16(ioc->facts.MaxDevHandle)) {
+ if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
+ le16_to_cpu(mpi_request->FunctionDependent1) >
+ ioc->facts.MaxDevHandle) {
ret = -EINVAL;
mpt2sas_base_free_smid(ioc, smid);
goto out;
@@ -743,8 +744,11 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
mpt2sas_base_get_sense_buffer_dma(ioc, smid);
priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid);
memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
- mpt2sas_base_put_smid_scsi_io(ioc, smid,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ else
+ mpt2sas_base_put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -752,6 +756,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
+ dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: "
+ "handle(0x%04x), task_type(0x%02x)\n", ioc->name,
+ le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+
if (tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
tm_request->TaskType ==
@@ -762,7 +770,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
}
}
- mutex_lock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
mpt2sas_base_put_smid_hi_priority(ioc, smid);
@@ -818,7 +825,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
- mutex_unlock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
} else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
@@ -897,14 +903,13 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
printk(MPT2SAS_INFO_FMT "issue target reset: handle "
"= (0x%04x)\n", ioc->name,
- mpi_request->FunctionDependent1);
+ le16_to_cpu(mpi_request->FunctionDependent1));
mpt2sas_halt_firmware(ioc);
- mutex_lock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_issue_tm(ioc,
- mpi_request->FunctionDependent1, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
+ le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
+ NULL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
} else
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -1373,7 +1378,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), "
"dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
- (unsigned long long)request_data_dma, mpi_request->BufferLength));
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
mpi_request->ProductSpecific[i] =
@@ -2334,8 +2340,8 @@ _ctl_version_nvdata_persistent_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%02xh\n",
- le16_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
}
static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
_ctl_version_nvdata_persistent_show, NULL);
@@ -2354,8 +2360,8 @@ _ctl_version_nvdata_default_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%02xh\n",
- le16_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
}
static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
_ctl_version_nvdata_default_show, NULL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 8a5eeb1a5c84..69916e46e04f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 5308a25cb307..3dcddfeb6f4c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
* Logging Support for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index be171ed682e0..c5ff26a2a51d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -52,6 +52,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/aer.h>
#include <linux/raid_class.h>
#include <linux/slab.h>
@@ -109,14 +110,16 @@ struct sense_info {
};
+#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
+
/**
* struct fw_event_work - firmware event struct
* @list: link list framework
* @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
* @VF_ID: virtual function id
* @VP_ID: virtual port id
- * @host_reset_handling: handling events during host reset
* @ignore: flag meaning this event has been marked to ignore
* @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
* @event_data: reply event data payload follows
@@ -125,11 +128,11 @@ struct sense_info {
*/
struct fw_event_work {
struct list_head list;
- struct work_struct work;
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
struct MPT2SAS_ADAPTER *ioc;
u8 VF_ID;
u8 VP_ID;
- u8 host_reset_handling;
u8 ignore;
u16 event;
void *event_data;
@@ -482,27 +485,17 @@ struct _sas_device *
mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address)
{
- struct _sas_device *sas_device, *r;
+ struct _sas_device *sas_device;
- r = NULL;
- /* check the sas_device_init_list */
- list_for_each_entry(sas_device, &ioc->sas_device_init_list,
- list) {
- if (sas_device->sas_address != sas_address)
- continue;
- r = sas_device;
- goto out;
- }
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
- /* then check the sas_device_list */
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->sas_address != sas_address)
- continue;
- r = sas_device;
- goto out;
- }
- out:
- return r;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ return NULL;
}
/**
@@ -517,28 +510,17 @@ mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
static struct _sas_device *
_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
- struct _sas_device *sas_device, *r;
+ struct _sas_device *sas_device;
- r = NULL;
- if (ioc->wait_for_port_enable_to_complete) {
- list_for_each_entry(sas_device, &ioc->sas_device_init_list,
- list) {
- if (sas_device->handle != handle)
- continue;
- r = sas_device;
- goto out;
- }
- } else {
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->handle != handle)
- continue;
- r = sas_device;
- goto out;
- }
- }
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
- out:
- return r;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ return NULL;
}
/**
@@ -555,10 +537,15 @@ _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
{
unsigned long flags;
+ if (!sas_device)
+ return;
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_del(&sas_device->list);
- memset(sas_device, 0, sizeof(struct _sas_device));
- kfree(sas_device);
+ if (mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device->sas_address)) {
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -988,7 +975,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
u32 chain_offset;
u32 chain_length;
u32 chain_flags;
- u32 sges_left;
+ int sges_left;
u32 sges_in_segment;
u32 sgl_flags;
u32 sgl_flags_last_element;
@@ -1009,7 +996,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (!sges_left) {
+ if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device, "pci_map_sg"
" failed: request for %d bytes!\n", scsi_bufflen(scmd));
return -ENOMEM;
@@ -1395,7 +1382,7 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
}
flags = le16_to_cpu(sas_device_pg0.Flags);
- device_info = le16_to_cpu(sas_device_pg0.DeviceInfo);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
sdev_printk(KERN_INFO, sdev,
"atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
@@ -1963,65 +1950,78 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
}
}
+
/**
* mpt2sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
* @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
* @timeout: timeout in seconds
- * Context: The calling function needs to acquire the tm_cmds.mutex
+ * Context: user
*
* A generic API for sending task management requests to firmware.
*
- * The ioc->tm_cmds.status flag should be MPT2_CMD_NOT_USED before calling
- * this API.
- *
* The callback index is set inside `ioc->tm_cb_idx`.
*
- * Return nothing.
+ * Return SUCCESS or FAILED.
*/
-void
-mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
- u8 type, u16 smid_task, ulong timeout)
+int
+mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
+ struct scsi_cmnd *scmd)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
unsigned long timeleft;
+ struct scsi_cmnd *scmd_lookup;
+ int rc;
+ mutex_lock(&ioc->tm_cmds.mutex);
if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
__func__, ioc->name);
- return;
+ rc = FAILED;
+ goto err_out;
}
- if (ioc->shost_recovery) {
+ if (ioc->shost_recovery || ioc->remove_host) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
- return;
+ rc = FAILED;
+ goto err_out;
}
ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell "
"active!\n", ioc->name));
- goto issue_host_reset;
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ goto err_out;
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt2sas_base_fault_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
- goto issue_host_reset;
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ goto err_out;
}
smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
- return;
+ rc = FAILED;
+ goto err_out;
}
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
@@ -2035,21 +2035,24 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
mpi_request->TaskMID = cpu_to_le16(smid_task);
- mpi_request->VP_ID = 0; /* TODO */
- mpi_request->VF_ID = 0;
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt2sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
mpt2sas_base_put_smid_hi_priority(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
- mpt2sas_scsih_clear_tm_flag(ioc, handle);
if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
- if (!(ioc->tm_cmds.status & MPT2_CMD_RESET))
- goto issue_host_reset;
+ if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) {
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mpt2sas_scsih_clear_tm_flag(ioc, handle);
+ goto err_out;
+ }
}
if (ioc->tm_cmds.status & MPT2_CMD_REPLY_VALID) {
@@ -2059,12 +2062,57 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
le32_to_cpu(mpi_reply->IOCLogInfo),
le32_to_cpu(mpi_reply->TerminationCount)));
- if (ioc->logging_level & MPT_DEBUG_TM)
+ if (ioc->logging_level & MPT_DEBUG_TM) {
_scsih_response_code(ioc, mpi_reply->ResponseCode);
+ if (mpi_reply->IOCStatus)
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ }
}
- return;
- issue_host_reset:
- mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER);
+
+ /* sanity check:
+ * Check to see the commands were terminated.
+ * This is only needed for eh callbacks, hence the scmd check.
+ */
+ rc = FAILED;
+ if (scmd == NULL)
+ goto bypass_sanity_checks;
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task);
+ if (scmd_lookup && (scmd_lookup->serial_number ==
+ scmd->serial_number))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ }
+
+ bypass_sanity_checks:
+
+ mpt2sas_scsih_clear_tm_flag(ioc, handle);
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return rc;
+
+ err_out:
+ mutex_unlock(&ioc->tm_cmds.mutex);
+ return rc;
}
/**
@@ -2081,7 +2129,6 @@ _scsih_abort(struct scsi_cmnd *scmd)
u16 smid;
u16 handle;
int r;
- struct scsi_cmnd *scmd_lookup;
printk(MPT2SAS_INFO_FMT "attempting task abort! scmd(%p)\n",
ioc->name, scmd);
@@ -2116,19 +2163,10 @@ _scsih_abort(struct scsi_cmnd *scmd)
mpt2sas_halt_firmware(ioc);
- mutex_lock(&ioc->tm_cmds.mutex);
handle = sas_device_priv_data->sas_target->handle;
- mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
-
- /* sanity check - see whether command actually completed */
- scmd_lookup = _scsih_scsi_lookup_get(ioc, smid);
- if (scmd_lookup && (scmd_lookup->serial_number == scmd->serial_number))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "task abort: %s scmd(%p)\n",
@@ -2185,22 +2223,9 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
- 30);
-
- /*
- * sanity check see whether all commands to this device been
- * completed
- */
- if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
- scmd->device->lun, scmd->device->channel))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
@@ -2257,21 +2282,9 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
-
- /*
- * sanity check see whether all commands to this target been
- * completed
- */
- if (_scsih_scsi_lookup_find_by_target(ioc, scmd->device->id,
- scmd->device->channel))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "target reset: %s scmd(%p)\n",
@@ -2325,8 +2338,9 @@ _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
- INIT_WORK(&fw_event->work, _firmware_event_work);
- queue_work(ioc->firmware_event_thread, &fw_event->work);
+ INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
+ queue_delayed_work(ioc->firmware_event_thread,
+ &fw_event->delayed_work, 0);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -2353,61 +2367,53 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
+
/**
- * _scsih_fw_event_add - requeue an event
+ * _scsih_queue_rescan - queue a topology rescan from user context
* @ioc: per adapter object
- * @fw_event: object describing the event
- * Context: This function will acquire ioc->fw_event_lock.
*
* Return nothing.
*/
static void
-_scsih_fw_event_requeue(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
- *fw_event, unsigned long delay)
+_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
{
- unsigned long flags;
- if (ioc->firmware_event_thread == NULL)
- return;
+ struct fw_event_work *fw_event;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- queue_work(ioc->firmware_event_thread, &fw_event->work);
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->wait_for_port_enable_to_complete)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
}
/**
- * _scsih_fw_event_off - turn flag off preventing event handling
+ * _scsih_fw_event_cleanup_queue - cleanup event queue
* @ioc: per adapter object
*
- * Used to prevent handling of firmware events during adapter reset
- * driver unload.
+ * Walk the firmware event queue, either killing timers, or waiting
+ * for outstanding events to complete
*
* Return nothing.
*/
static void
-_scsih_fw_event_off(struct MPT2SAS_ADAPTER *ioc)
+_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
{
- unsigned long flags;
+ struct fw_event_work *fw_event, *next;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- ioc->fw_events_off = 1;
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
-
-}
-
-/**
- * _scsih_fw_event_on - turn flag on allowing firmware event handling
- * @ioc: per adapter object
- *
- * Returns nothing.
- */
-static void
-_scsih_fw_event_on(struct MPT2SAS_ADAPTER *ioc)
-{
- unsigned long flags;
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->firmware_event_thread || in_interrupt())
+ return;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- ioc->fw_events_off = 0;
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work(&fw_event->delayed_work)) {
+ _scsih_fw_event_free(ioc, fw_event);
+ continue;
+ }
+ fw_event->cancel_pending_work = 1;
+ }
}
/**
@@ -2571,25 +2577,24 @@ static void
_scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
- struct MPT2SAS_TARGET *sas_target_priv_data;
u16 smid;
struct _sas_device *sas_device;
unsigned long flags;
struct _tr_list *delayed_tr;
- if (ioc->shost_recovery) {
- printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ if (ioc->shost_recovery || ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
+ "progress!\n", __func__, ioc->name));
return;
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- /* skip is hidden raid component */
- if (sas_device && sas_device->hidden_raid_component)
+ if (sas_device && sas_device->hidden_raid_component) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
if (!smid) {
@@ -2598,36 +2603,16 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
return;
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
- delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL;
- list_add_tail(&delayed_tr->list,
- &ioc->delayed_tr_list);
- if (sas_device && sas_device->starget) {
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "DELAYED:tr:handle(0x%04x), "
- "(open)\n", handle));
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
- }
- return;
- }
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_TR_SEND;
- sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
- if (sas_device->starget && sas_device->starget->hostdata) {
- sas_target_priv_data = sas_device->starget->hostdata;
- sas_target_priv_data->tm_busy = 1;
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "tr:handle(0x%04x), (open)\n",
- handle));
- }
- } else {
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "tr:handle(0x%04x), (open)\n", ioc->name, handle));
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
}
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid,
+ ioc->tm_tr_cb_idx));
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -2657,35 +2642,15 @@ static u8
_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
u8 msix_index, u32 reply)
{
- unsigned long flags;
- u16 handle;
- struct _sas_device *sas_device;
Mpi2SasIoUnitControlReply_t *mpi_reply =
mpt2sas_base_get_reply_virt_addr(ioc, reply);
- handle = le16_to_cpu(mpi_reply->DevHandle);
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
- if (sas_device->starget)
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget,
- "sc_complete:handle(0x%04x), "
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "sc_complete:handle(0x%04x), "
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- }
-
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
return 1;
}
@@ -2709,87 +2674,63 @@ static u8
_scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply)
{
- unsigned long flags;
u16 handle;
- struct _sas_device *sas_device;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
Mpi2SCSITaskManagementReply_t *mpi_reply =
mpt2sas_base_get_reply_virt_addr(ioc, reply);
Mpi2SasIoUnitControlRequest_t *mpi_request;
u16 smid_sas_ctrl;
- struct MPT2SAS_TARGET *sas_target_priv_data;
struct _tr_list *delayed_tr;
- u8 rc;
- handle = le16_to_cpu(mpi_reply->DevHandle);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
- if (sas_device->starget) {
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "tr_complete:handle(0x%04x), "
- "(%s) ioc_status(0x%04x), loginfo(0x%08x), "
- "completed(%d)\n", sas_device->handle,
- (sas_device->state & MPT2SAS_REQ_SAS_CNTRL) ?
- "open" : "active",
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
- if (sas_device->starget->hostdata) {
- sas_target_priv_data =
- sas_device->starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
- }
- }
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "tr_complete:handle(0x%04x), (open) ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ if (ioc->shost_recovery || ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
+ "progress!\n", __func__, ioc->name));
+ return 1;
}
- if (!list_empty(&ioc->delayed_tr_list)) {
- delayed_tr = list_entry(ioc->delayed_tr_list.next,
- struct _tr_list, list);
- mpt2sas_base_free_smid(ioc, smid);
- if (delayed_tr->state & MPT2SAS_REQ_SAS_CNTRL)
- _scsih_tm_tr_send(ioc, delayed_tr->handle);
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- rc = 0; /* tells base_interrupt not to free mf */
- } else
- rc = 1;
-
- if (sas_device && !(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
- return rc;
-
- if (ioc->shost_recovery) {
- printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
- return rc;
+ mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "spurious interrupt: "
+ "handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
}
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
smid_sas_ctrl = mpt2sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
- return rc;
+ return 1;
}
- if (sas_device)
- sas_device->state |= MPTSAS_STATE_CNTRL_SEND;
-
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sc_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid_sas_ctrl,
+ ioc->tm_sas_control_cb_idx));
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
- mpi_request->DevHandle = mpi_reply->DevHandle;
+ mpi_request->DevHandle = mpi_request_tm->DevHandle;
mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
- return rc;
+
+ if (!list_empty(&ioc->delayed_tr_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_list.next,
+ struct _tr_list, list);
+ mpt2sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0; /* tells base_interrupt not to free mf */
+ }
+ return 1;
}
/**
@@ -3021,25 +2962,32 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
scmd->scsi_done = done;
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data) {
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
sas_target_priv_data = sas_device_priv_data->sas_target;
- if (!sas_target_priv_data || sas_target_priv_data->handle ==
- MPT2SAS_INVALID_DEVICE_HANDLE || sas_target_priv_data->deleted) {
+ /* invalid device handle */
+ if (sas_target_priv_data->handle == MPT2SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
- /* see if we are busy with task managment stuff */
- if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
- return SCSI_MLQUEUE_DEVICE_BUSY;
- else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ /* host recovery or link resets sent via IOCTLs */
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
return SCSI_MLQUEUE_HOST_BUSY;
+ /* device busy with task managment */
+ else if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ /* device has been deleted */
+ else if (sas_target_priv_data->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
@@ -3110,8 +3058,11 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
}
}
- mpt2sas_base_put_smid_scsi_io(ioc, smid,
- sas_device_priv_data->sas_target->handle);
+ if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ sas_device_priv_data->sas_target->handle);
+ else
+ mpt2sas_base_put_smid_default(ioc, smid);
return 0;
out:
@@ -3301,8 +3252,8 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
printk(MPT2SAS_WARN_FMT "\t[sense_key,asc,ascq]: "
- "[0x%02x,0x%02x,0x%02x]\n", ioc->name, data.skey,
- data.asc, data.ascq);
+ "[0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
}
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
@@ -3356,7 +3307,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
mpi_request.SlotStatus =
- MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
mpi_request.DevHandle = cpu_to_le16(handle);
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
@@ -4008,6 +3959,134 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
}
/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+ desc = "sata capability failed";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+ desc = "sata affiliation conflict";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+ desc = "route not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+ desc = "smp error not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+ desc = "device blocked";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+ desc = "sata initialization failed";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ if (!rc)
+ return 0;
+
+ printk(MPT2SAS_ERR_FMT "discovery errors(%s): sas_address(0x%016llx), "
+ "handle(0x%04x)\n", ioc->name, desc,
+ (unsigned long long)sas_address, handle);
+ return rc;
+}
+
+static void
+_scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device) {
+ printk(MPT2SAS_ERR_FMT "device is not present "
+ "handle(0x%04x), no sas_device!!!\n", ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(sas_device->handle != handle)) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget, "handle changed from(0x%04x)"
+ " to (0x%04x)!!!\n", sas_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ sas_device->handle = handle;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ printk(MPT2SAS_ERR_FMT "device is not present "
+ "handle(0x%04x), flags!!!\n", ioc->name, handle);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
+ return;
+ _scsih_ublock_io_device(ioc, handle);
+
+}
+
+/**
* _scsih_add_device - creating sas device object
* @ioc: per adapter object
* @handle: sas device handle
@@ -4045,6 +4124,8 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
@@ -4055,15 +4136,10 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
- /* check if there were any issus with discovery */
- if (sas_device_pg0.AccessStatus ==
- MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- printk(MPT2SAS_ERR_FMT "AccessStatus = 0x%02x\n",
- ioc->name, sas_device_pg0.AccessStatus);
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
return -1;
- }
/* check if this is end device */
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -4073,17 +4149,14 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
- sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_address);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device) {
- _scsih_ublock_io_device(ioc, handle);
+ if (sas_device)
return 0;
- }
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
@@ -4126,67 +4199,38 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
}
/**
- * _scsih_remove_device - removing sas device object
+ * _scsih_remove_pd_device - removing sas device pd object
* @ioc: per adapter object
- * @sas_device: the sas_device object
+ * @sas_device_delete: the sas_device object
*
+ * For hidden raid components, we do driver-fw handshake from
+ * hotplug work threads.
* Return nothing.
*/
static void
-_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
- *sas_device)
+_scsih_remove_pd_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
+ sas_device)
{
- struct MPT2SAS_TARGET *sas_target_priv_data;
Mpi2SasIoUnitControlReply_t mpi_reply;
Mpi2SasIoUnitControlRequest_t mpi_request;
- u16 device_handle, handle;
-
- if (!sas_device)
- return;
+ u16 vol_handle, handle;
- handle = sas_device->handle;
+ handle = sas_device.handle;
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x),"
" sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
- (unsigned long long) sas_device->sas_address));
-
- if (sas_device->starget && sas_device->starget->hostdata) {
- sas_target_priv_data = sas_device->starget->hostdata;
- sas_target_priv_data->deleted = 1;
- }
-
- if (ioc->remove_host || ioc->shost_recovery || !handle)
- goto out;
+ (unsigned long long) sas_device.sas_address));
- if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
- "target_reset handle(0x%04x)\n", ioc->name,
- handle));
- goto skip_tr;
- }
-
- /* Target Reset to flush out all the outstanding IO */
- device_handle = (sas_device->hidden_raid_component) ?
- sas_device->volume_handle : handle;
- if (device_handle) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
- "handle(0x%04x)\n", ioc->name, device_handle));
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, device_handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
- "done: handle(0x%04x)\n", ioc->name, device_handle));
- if (ioc->shost_recovery)
- goto out;
- }
- skip_tr:
-
- if ((sas_device->state & MPTSAS_STATE_CNTRL_COMPLETE)) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
- "sas_cntrl handle(0x%04x)\n", ioc->name, handle));
- goto out;
- }
+ vol_handle = sas_device.volume_handle;
+ if (!vol_handle)
+ return;
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
+ "handle(0x%04x)\n", ioc->name, vol_handle));
+ mpt2sas_scsih_issue_tm(ioc, vol_handle, 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, NULL);
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
+ "done: handle(0x%04x)\n", ioc->name, vol_handle));
+ if (ioc->shost_recovery)
+ return;
/* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle"
@@ -4194,34 +4238,68 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
- mpi_request.DevHandle = handle;
- mpi_request.VF_ID = 0; /* TODO */
- mpi_request.VP_ID = 0;
+ mpi_request.DevHandle = cpu_to_le16(handle);
if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply,
- &mpi_request)) != 0) {
+ &mpi_request)) != 0)
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- }
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: ioc_status"
"(0x%04x), loginfo(0x%08x)\n", ioc->name,
le16_to_cpu(mpi_reply.IOCStatus),
le32_to_cpu(mpi_reply.IOCLogInfo)));
- out:
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle(0x%04x),"
+ " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
+ (unsigned long long) sas_device.sas_address));
+}
- _scsih_ublock_io_device(ioc, handle);
+/**
+ * _scsih_remove_device - removing sas device object
+ * @ioc: per adapter object
+ * @sas_device_delete: the sas_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ struct _sas_device sas_device_backup;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
- mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
- sas_device->sas_address_parent);
+ if (!sas_device)
+ return;
- printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
- "(0x%016llx)\n", ioc->name, handle,
- (unsigned long long) sas_device->sas_address);
+ memcpy(&sas_device_backup, sas_device, sizeof(struct _sas_device));
_scsih_sas_device_remove(ioc, sas_device);
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle"
- "(0x%04x)\n", ioc->name, __func__, handle));
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device_backup.handle, (unsigned long long)
+ sas_device_backup.sas_address));
+
+ if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
+ sas_target_priv_data = sas_device_backup.starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+
+ if (sas_device_backup.hidden_raid_component)
+ _scsih_remove_pd_device(ioc, sas_device_backup);
+
+ _scsih_ublock_io_device(ioc, sas_device_backup.handle);
+
+ mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address,
+ sas_device_backup.sas_address_parent);
+
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
+ "(0x%016llx)\n", ioc->name, sas_device_backup.handle,
+ (unsigned long long) sas_device_backup.sas_address);
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device_backup.handle, (unsigned long long)
+ sas_device_backup.sas_address));
}
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4331,7 +4409,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
_scsih_sas_topology_change_event_debug(ioc, event_data);
#endif
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->remove_host)
return;
if (!ioc->sas_hba.num_phys)
@@ -4370,7 +4448,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
"expander event\n", ioc->name));
return;
}
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->remove_host)
return;
phy_number = event_data->StartPhyNum + i;
reason_code = event_data->PHY[i].PhyStatus &
@@ -4393,8 +4471,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
mpt2sas_transport_update_links(ioc, sas_address,
handle, phy_number, link_rate);
- if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
- _scsih_ublock_io_device(ioc, handle);
+ if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ _scsih_check_device(ioc, handle);
break;
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -4520,10 +4600,10 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
event_data);
#endif
- if (!(event_data->ReasonCode ==
+ if (event_data->ReasonCode !=
MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
- event_data->ReasonCode ==
- MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET))
+ event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
@@ -4630,7 +4710,6 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
__func__));
- mutex_lock(&ioc->tm_cmds.mutex);
termination_count = 0;
query_count = 0;
mpi_reply = ioc->tm_cmds.reply;
@@ -4654,8 +4733,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
lun = sas_device_priv_data->lun;
query_count++;
- mpt2sas_scsih_issue_tm(ioc, handle, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
+ mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
& MPI2_IOCSTATUS_MASK;
@@ -4666,13 +4745,11 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
continue;
- mpt2sas_scsih_issue_tm(ioc, handle, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30);
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
termination_count += le32_to_cpu(mpi_reply->TerminationCount);
}
ioc->broadcast_aen_busy = 0;
- mutex_unlock(&ioc->tm_cmds.mutex);
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - exit, query_count = %d termination_count = %d\n",
@@ -5442,6 +5519,26 @@ _scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
}
/**
+ * _scsih_prep_device_scan - initialize parameters prior to device scan
+ * @ioc: per adapter object
+ *
+ * Set the deleted flag prior to device scan. If the device is found during
+ * the scan, then we clear the deleted flag.
+ */
+static void
+_scsih_prep_device_scan(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target)
+ sas_device_priv_data->sas_target->deleted = 1;
+ }
+}
+
+/**
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
* @ioc: per adapter object
* @sas_address: sas address
@@ -5467,10 +5564,13 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
if (sas_device->sas_address == sas_address &&
sas_device->slot == slot && sas_device->starget) {
sas_device->responding = 1;
- sas_device->state = 0;
starget = sas_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
starget_printk(KERN_INFO, sas_device->starget,
"handle(0x%04x), sas_addr(0x%016llx), enclosure "
"logical id(0x%016llx), slot(%d)\n", handle,
@@ -5483,7 +5583,8 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
sas_device->handle);
sas_device->handle = handle;
- sas_target_priv_data->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
goto out;
}
}
@@ -5558,6 +5659,12 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
spin_lock_irqsave(&ioc->raid_device_lock, flags);
list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
if (raid_device->wwid == wwid && raid_device->starget) {
+ starget = raid_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
raid_device->responding = 1;
starget_printk(KERN_INFO, raid_device->starget,
"handle(0x%04x), wwid(0x%016llx)\n", handle,
@@ -5567,9 +5674,8 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
raid_device->handle);
raid_device->handle = handle;
- starget = raid_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
goto out;
}
}
@@ -5694,13 +5800,13 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
}
/**
- * _scsih_remove_unresponding_devices - removing unresponding devices
+ * _scsih_remove_unresponding_sas_devices - removing unresponding devices
* @ioc: per adapter object
*
* Return nothing.
*/
static void
-_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
+_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
struct _sas_node *sas_expander;
@@ -5722,8 +5828,6 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
(unsigned long long)
sas_device->enclosure_logical_id,
sas_device->slot);
- /* invalidate the device handle */
- sas_device->handle = 0;
_scsih_remove_device(ioc, sas_device);
}
@@ -5774,32 +5878,33 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
- _scsih_fw_event_off(ioc);
break;
case MPT2_IOC_AFTER_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
ioc->tm_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
complete(&ioc->tm_cmds.done);
}
- _scsih_fw_event_on(ioc);
+ _scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
+ _scsih_queue_rescan(ioc);
break;
case MPT2_IOC_DONE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
_scsih_sas_host_refresh(ioc);
+ _scsih_prep_device_scan(ioc);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
break;
- case MPT2_IOC_RUNNING:
- dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
- "MPT2_IOC_RUNNING\n", ioc->name, __func__));
- _scsih_remove_unresponding_devices(ioc);
- break;
}
}
@@ -5815,21 +5920,28 @@ static void
_firmware_event_work(struct work_struct *work)
{
struct fw_event_work *fw_event = container_of(work,
- struct fw_event_work, work);
+ struct fw_event_work, delayed_work.work);
unsigned long flags;
struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
/* the queue is being flushed so ignore this event */
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- if (ioc->fw_events_off || ioc->remove_host) {
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->remove_host || fw_event->cancel_pending_work) {
_scsih_fw_event_free(ioc, fw_event);
return;
}
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
- if (ioc->shost_recovery) {
- _scsih_fw_event_requeue(ioc, fw_event, 1000);
+ if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
+ _scsih_fw_event_free(ioc, fw_event);
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->shost_recovery) {
+ init_completion(&ioc->shost_recovery_done);
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ wait_for_completion(&ioc->shost_recovery_done);
+ } else
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ _scsih_remove_unresponding_sas_devices(ioc);
return;
}
@@ -5891,16 +6003,12 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
{
struct fw_event_work *fw_event;
Mpi2EventNotificationReply_t *mpi_reply;
- unsigned long flags;
u16 event;
+ u16 sz;
/* events turned off due to host reset or driver unloading */
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- if (ioc->fw_events_off || ioc->remove_host) {
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->remove_host)
return 1;
- }
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
event = le16_to_cpu(mpi_reply->Event);
@@ -5947,8 +6055,8 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
ioc->name, __FILE__, __LINE__, __func__);
return 1;
}
- fw_event->event_data =
- kzalloc(mpi_reply->EventDataLength*4, GFP_ATOMIC);
+ sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
+ fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
if (!fw_event->event_data) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -5957,7 +6065,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
}
memcpy(fw_event->event_data, mpi_reply->EventData,
- mpi_reply->EventDataLength*4);
+ sz);
fw_event->ioc = ioc;
fw_event->VF_ID = mpi_reply->VF_ID;
fw_event->VP_ID = mpi_reply->VP_ID;
@@ -6158,6 +6266,18 @@ _scsih_shutdown(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
_scsih_ir_shutdown(ioc);
mpt2sas_base_detach(ioc);
@@ -6184,7 +6304,7 @@ _scsih_remove(struct pci_dev *pdev)
unsigned long flags;
ioc->remove_host = 1;
- _scsih_fw_event_off(ioc);
+ _scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
wq = ioc->firmware_event_thread;
@@ -6557,6 +6677,122 @@ _scsih_resume(struct pci_dev *pdev)
}
#endif /* CONFIG_PM */
+/**
+ * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t
+_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: detected callback, state(%d)!!\n",
+ ioc->name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ scsi_block_requests(ioc->shost);
+ mpt2sas_base_stop_watchdog(ioc);
+ mpt2sas_base_free_resources(ioc);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ _scsih_remove(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t
+_scsih_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ printk(MPT2SAS_INFO_FMT "PCI error: slot reset callback!!\n",
+ ioc->name);
+
+ ioc->pdev = pdev;
+ rc = mpt2sas_base_map_resources(ioc);
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ printk(MPT2SAS_WARN_FMT "hard reset: %s\n", ioc->name,
+ (rc == 0) ? "success" : "failed");
+
+ if (!rc)
+ return PCI_ERS_RESULT_RECOVERED;
+ else
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * _scsih_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void
+_scsih_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: resume callback!!\n", ioc->name);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ mpt2sas_base_start_watchdog(ioc);
+ scsi_unblock_requests(ioc->shost);
+}
+
+/**
+ * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t
+_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: mmio enabled callback!!\n",
+ ioc->name);
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static struct pci_error_handlers _scsih_err_handler = {
+ .error_detected = _scsih_pci_error_detected,
+ .mmio_enabled = _scsih_pci_mmio_enabled,
+ .slot_reset = _scsih_pci_slot_reset,
+ .resume = _scsih_pci_resume,
+};
static struct pci_driver scsih_driver = {
.name = MPT2SAS_DRIVER_NAME,
@@ -6564,6 +6800,7 @@ static struct pci_driver scsih_driver = {
.probe = _scsih_probe,
.remove = __devexit_p(_scsih_remove),
.shutdown = _scsih_shutdown,
+ .err_handler = &_scsih_err_handler,
#ifdef CONFIG_PM
.suspend = _scsih_suspend,
.resume = _scsih_resume,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index bd7ca2b49f81..2727c3b65104 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -465,6 +465,85 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
return rc;
}
+
+/**
+ * _transport_delete_duplicate_port - (see below description)
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ * @phy_num: phy number
+ *
+ * This function is called when attempting to add a new port that is claiming
+ * the same phy resources already in use by another port. If we don't release
+ * the claimed phy resources, the sas transport layer will hang from the BUG
+ * in sas_port_add_phy.
+ *
+ * The reason we would hit this issue is becuase someone is changing the
+ * sas address of a device on the fly, meanwhile controller firmware sends
+ * EVENTs out of order when removing the previous instance of the device.
+ */
+static void
+_transport_delete_duplicate_port(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, u64 sas_address, int phy_num)
+{
+ struct _sas_port *mpt2sas_port, *mpt2sas_port_duplicate;
+ struct _sas_phy *mpt2sas_phy;
+
+ printk(MPT2SAS_ERR_FMT "new device located at sas_addr(0x%016llx), "
+ "phy_id(%d)\n", ioc->name, (unsigned long long)sas_address,
+ phy_num);
+
+ mpt2sas_port_duplicate = NULL;
+ list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list, port_list) {
+ dev_printk(KERN_ERR, &mpt2sas_port->port->dev,
+ "existing device at sas_addr(0x%016llx), num_phys(%d)\n",
+ (unsigned long long)
+ mpt2sas_port->remote_identify.sas_address,
+ mpt2sas_port->num_phys);
+ list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list,
+ port_siblings) {
+ dev_printk(KERN_ERR, &mpt2sas_phy->phy->dev,
+ "phy_number(%d)\n", mpt2sas_phy->phy_id);
+ if (mpt2sas_phy->phy_id == phy_num)
+ mpt2sas_port_duplicate = mpt2sas_port;
+ }
+ }
+
+ if (!mpt2sas_port_duplicate)
+ return;
+
+ dev_printk(KERN_ERR, &mpt2sas_port_duplicate->port->dev,
+ "deleting duplicate device at sas_addr(0x%016llx), phy(%d)!!!!\n",
+ (unsigned long long)
+ mpt2sas_port_duplicate->remote_identify.sas_address, phy_num);
+ ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+ mpt2sas_transport_port_remove(ioc,
+ mpt2sas_port_duplicate->remote_identify.sas_address,
+ sas_node->sas_address);
+ ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
+
+/**
+ * _transport_sanity_check - sanity check when adding a new port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ *
+ * See the explanation above from _transport_delete_duplicate_port
+ */
+static void
+_transport_sanity_check(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < sas_node->num_phys; i++)
+ if (sas_node->phy[i].remote_identify.sas_address == sas_address)
+ if (sas_node->phy[i].phy_belongs_to_port)
+ _transport_delete_duplicate_port(ioc, sas_node,
+ sas_address, i);
+}
+
/**
* mpt2sas_transport_port_add - insert port to the list
* @ioc: per adapter object
@@ -522,6 +601,9 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
+ _transport_sanity_check(ioc, sas_node,
+ mpt2sas_port->remote_identify.sas_address);
+
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address !=
mpt2sas_port->remote_identify.sas_address)
@@ -553,6 +635,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
mpt2sas_port->remote_identify.sas_address,
mpt2sas_phy->phy_id);
sas_port_add_phy(port, mpt2sas_phy->phy);
+ mpt2sas_phy->phy_belongs_to_port = 1;
}
mpt2sas_port->port = port;
@@ -651,6 +734,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
(unsigned long long)
mpt2sas_port->remote_identify.sas_address,
mpt2sas_phy->phy_id);
+ mpt2sas_phy->phy_belongs_to_port = 0;
sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
list_del(&mpt2sas_phy->port_siblings);
}
@@ -1341,7 +1425,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
req->sense_len = sizeof(*mpi_reply);
req->resid_len = 0;
- rsp->resid_len -= mpi_reply->ResponseDataLength;
+ rsp->resid_len -=
+ le16_to_cpu(mpi_reply->ResponseDataLength);
} else {
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - no reply\n", ioc->name, __func__));
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index d722235111a8..716d1785cda7 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -13,112 +13,116 @@
#include "wd33c93.h"
#include "mvme147.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
static struct Scsi_Host *mvme147_host = NULL;
-static irqreturn_t mvme147_intr (int irq, void *dummy)
+static irqreturn_t mvme147_intr(int irq, void *dummy)
{
- if (irq == MVME147_IRQ_SCSI_PORT)
- wd33c93_intr (mvme147_host);
- else
- m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
- return IRQ_HANDLED;
+ if (irq == MVME147_IRQ_SCSI_PORT)
+ wd33c93_intr(mvme147_host);
+ else
+ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
+ return IRQ_HANDLED;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned char flags = 0x01;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
-
- /* setup dma direction */
- if (!dir_in)
- flags |= 0x04;
-
- /* remember direction */
- HDATA(mvme147_host)->dma_dir = dir_in;
-
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
-
- /* start DMA */
- m147_pcc->dma_bcr = cmd->SCp.this_residual | (1<<24);
- m147_pcc->dma_dadr = addr;
- m147_pcc->dma_cntrl = flags;
-
- /* return success */
- return 0;
+ struct WD33C93_hostdata *hdata = shost_priv(mvme147_host);
+ unsigned char flags = 0x01;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /* setup dma direction */
+ if (!dir_in)
+ flags |= 0x04;
+
+ /* remember direction */
+ hdata->dma_dir = dir_in;
+
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+
+ /* start DMA */
+ m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
+ m147_pcc->dma_dadr = addr;
+ m147_pcc->dma_cntrl = flags;
+
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
- int status)
+ int status)
{
- m147_pcc->dma_cntrl = 0;
+ m147_pcc->dma_cntrl = 0;
}
int mvme147_detect(struct scsi_host_template *tpnt)
{
- static unsigned char called = 0;
- wd33c93_regs regs;
-
- if (!MACH_IS_MVME147 || called)
- return 0;
- called++;
-
- tpnt->proc_name = "MVME147";
- tpnt->proc_info = &wd33c93_proc_info;
-
- mvme147_host = scsi_register (tpnt, sizeof(struct WD33C93_hostdata));
- if (!mvme147_host)
- goto err_out;
-
- mvme147_host->base = 0xfffe4000;
- mvme147_host->irq = MVME147_IRQ_SCSI_PORT;
- regs.SASR = (volatile unsigned char *)0xfffe4000;
- regs.SCMD = (volatile unsigned char *)0xfffe4001;
- HDATA(mvme147_host)->no_sync = 0xff;
- HDATA(mvme147_host)->fast = 0;
- HDATA(mvme147_host)->dma_mode = CTRL_DMA;
- wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
-
- if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_intr))
- goto err_unregister;
- if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, "MVME147 SCSI DMA", mvme147_intr))
- goto err_free_irq;
+ static unsigned char called = 0;
+ wd33c93_regs regs;
+ struct WD33C93_hostdata *hdata;
+
+ if (!MACH_IS_MVME147 || called)
+ return 0;
+ called++;
+
+ tpnt->proc_name = "MVME147";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (!mvme147_host)
+ goto err_out;
+
+ mvme147_host->base = 0xfffe4000;
+ mvme147_host->irq = MVME147_IRQ_SCSI_PORT;
+ regs.SASR = (volatile unsigned char *)0xfffe4000;
+ regs.SCMD = (volatile unsigned char *)0xfffe4001;
+ hdata = shost_priv(mvme147_host);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
+
+ if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
+ "MVME147 SCSI PORT", mvme147_intr))
+ goto err_unregister;
+ if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
+ "MVME147 SCSI DMA", mvme147_intr))
+ goto err_free_irq;
#if 0 /* Disabled; causes problems booting */
- m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
- udelay(100);
- m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */
- udelay(2000);
- m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */
+ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
+ udelay(100);
+ m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */
+ udelay(2000);
+ m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */
#endif
- m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */
+ m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */
- m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */
- m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
+ m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */
+ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
- return 1;
+ return 1;
- err_free_irq:
- free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
- err_unregister:
- wd33c93_release();
- scsi_unregister(mvme147_host);
- err_out:
- return 0;
+err_free_irq:
+ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
+err_unregister:
+ scsi_unregister(mvme147_host);
+err_out:
+ return 0;
}
static int mvme147_bus_reset(struct scsi_cmnd *cmd)
{
/* FIXME perform bus-specific reset */
- /* FIXME 2: kill this function, and let midlayer fallback to
+ /* FIXME 2: kill this function, and let midlayer fallback to
the same result, calling wd33c93_host_reset() */
spin_lock_irq(cmd->device->host->host_lock);
@@ -154,10 +158,9 @@ static struct scsi_host_template driver_template = {
int mvme147_release(struct Scsi_Host *instance)
{
#ifdef MODULE
- /* XXX Make sure DMA is stopped! */
- wd33c93_release();
- free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
- free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
+ /* XXX Make sure DMA is stopped! */
+ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
+ free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
#endif
- return 1;
+ return 1;
}
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
index 32aee85434d8..bfd4566ef050 100644
--- a/drivers/scsi/mvme147.h
+++ b/drivers/scsi/mvme147.h
@@ -14,11 +14,11 @@ int mvme147_detect(struct scsi_host_template *);
int mvme147_release(struct Scsi_Host *);
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
#endif /* MVME147_H */
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 10a5077b6aed..afc7f6f3a13e 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -132,9 +132,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
tmp = mvs_read_phy_ctl(mvi, phy_id);
- if (hard)
+ if (hard == 1)
tmp |= PHY_RST_HARD;
- else
+ else if (hard == 0)
tmp |= PHY_RST;
mvs_write_phy_ctl(mvi, phy_id, tmp);
if (hard) {
@@ -144,6 +144,26 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
}
}
+void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ if (clear_all) {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp) {
+ printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ } else {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp & (1 << (reg_set % 32))) {
+ printk(KERN_DEBUG "register set 0x%x was stopped.\n",
+ reg_set);
+ mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
+ }
+ }
+}
+
static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
@@ -761,6 +781,7 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
mvs_write_port_irq_mask,
mvs_get_sas_addr,
mvs_64xx_command_active,
+ mvs_64xx_clear_srs_irq,
mvs_64xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 0940fae19d20..eed4c5c72013 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -616,6 +616,15 @@ void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
}
#endif
+/*
+ * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
+ * with 64xx fixes
+ */
+static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
+ u8 clear_all)
+{
+}
+
const struct mvs_dispatch mvs_94xx_dispatch = {
"mv94xx",
mvs_94xx_init,
@@ -640,6 +649,7 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
mvs_write_port_irq_mask,
mvs_get_sas_addr,
mvs_94xx_command_active,
+ mvs_94xx_clear_srs_irq,
mvs_94xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index cae6b2cf492f..19ad34f381a5 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -37,6 +37,7 @@ static const struct mvs_chip_info mvs_chips[] = {
};
#define SOC_SAS_NUM 2
+#define SG_MX 64
static struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
@@ -53,10 +54,10 @@ static struct scsi_host_template mvs_sht = {
.can_queue = 1,
.cmd_per_lun = 1,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = SG_MX,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.slave_alloc = mvs_slave_alloc,
.target_destroy = sas_target_destroy,
@@ -65,19 +66,17 @@ static struct scsi_host_template mvs_sht = {
static struct sas_domain_function_template mvs_transport_ops = {
.lldd_dev_found = mvs_dev_found,
- .lldd_dev_gone = mvs_dev_gone,
-
+ .lldd_dev_gone = mvs_dev_gone,
.lldd_execute_task = mvs_queue_command,
.lldd_control_phy = mvs_phy_control,
.lldd_abort_task = mvs_abort_task,
.lldd_abort_task_set = mvs_abort_task_set,
.lldd_clear_aca = mvs_clear_aca,
- .lldd_clear_task_set = mvs_clear_task_set,
+ .lldd_clear_task_set = mvs_clear_task_set,
.lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
.lldd_lu_reset = mvs_lu_reset,
.lldd_query_task = mvs_query_task,
-
.lldd_port_formed = mvs_port_formed,
.lldd_port_deformed = mvs_port_deformed,
@@ -213,7 +212,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
{
- int i, slot_nr;
+ int i = 0, slot_nr;
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
@@ -232,6 +231,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
mvi->devices[i].dev_type = NO_DEVICE;
mvi->devices[i].device_id = i;
mvi->devices[i].dev_status = MVS_DEV_NORMAL;
+ init_timer(&mvi->devices[i].timer);
}
/*
@@ -437,6 +437,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
+ sha->core.shost = shost;
sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
if (!sha->lldd_ha)
@@ -574,6 +575,10 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
}
nhost++;
} while (nhost < chip->n_host);
+#ifdef MVS_USE_TASKLET
+ tasklet_init(&mv_tasklet, mvs_tasklet,
+ (unsigned long)SHOST_TO_SAS_HA(shost));
+#endif
mvs_post_sas_ha_init(shost, chip);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0d2138641214..f5e321791903 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -259,8 +259,6 @@ static inline void mvs_free_reg_set(struct mvs_info *mvi,
mv_printk("device has been free.\n");
return;
}
- if (dev->runing_req != 0)
- return;
if (dev->taskfileset == MVS_ID_NOT_MAPPED)
return;
MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
@@ -762,8 +760,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
}
if (is_tmf)
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
- else
- flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
hdr->tags = cpu_to_le32(tag);
hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -878,14 +874,15 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
struct mvs_slot_info *slot;
u32 tag = 0xdeadbeef, rc, n_elem = 0;
u32 n = num, pass = 0;
- unsigned long flags = 0;
+ unsigned long flags = 0, flags_libsas = 0;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
tsm->resp = SAS_TASK_UNDELIVERED;
tsm->stat = SAS_PHY_DOWN;
- t->task_done(t);
+ if (dev->dev_type != SATA_DEV)
+ t->task_done(t);
return 0;
}
@@ -910,12 +907,25 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
else
tei.port = &mvi->port[dev->port->id];
- if (!tei.port->port_attached) {
+ if (tei.port && !tei.port->port_attached) {
if (sas_protocol_ata(t->task_proto)) {
+ struct task_status_struct *ts = &t->task_status;
+
mv_dprintk("port %d does not"
"attached device.\n", dev->port->id);
- rc = SAS_PHY_DOWN;
- goto out_done;
+ ts->stat = SAS_PROTO_RESPONSE;
+ ts->stat = SAS_PHY_DOWN;
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock,
+ flags_libsas);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ t->task_done(t);
+ spin_lock_irqsave(&mvi->lock, flags);
+ spin_lock_irqsave(dev->sata_dev.ap->lock,
+ flags_libsas);
+ if (n > 1)
+ t = list_entry(t->list.next,
+ struct sas_task, list);
+ continue;
} else {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
@@ -973,8 +983,8 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
break;
default:
dev_printk(KERN_ERR, mvi->dev,
- "unknown sas_task proto: 0x%x\n",
- t->task_proto);
+ "unknown sas_task proto: 0x%x\n",
+ t->task_proto);
rc = -EINVAL;
break;
}
@@ -993,11 +1003,15 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
spin_unlock(&t->task_state_lock);
mvs_hba_memory_dump(mvi, tag, t->task_proto);
- mvi_dev->runing_req++;
+ mvi_dev->running_req++;
++pass;
mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
if (n > 1)
t = list_entry(t->list.next, struct sas_task, list);
+ if (likely(pass))
+ MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
+ (MVS_CHIP_SLOT_SZ - 1));
+
} while (--n);
rc = 0;
goto out_done;
@@ -1012,10 +1026,6 @@ err_out:
dma_unmap_sg(mvi->dev, t->scatter, n_elem,
t->data_dir);
out_done:
- if (likely(pass)) {
- MVS_CHIP_DISP->start_delivery(mvi,
- (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
- }
spin_unlock_irqrestore(&mvi->lock, flags);
return rc;
}
@@ -1187,7 +1197,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
MVS_CHIP_DISP->phy_reset(mvi, i, 0);
goto out_done;
}
- } else if (phy->phy_type & PORT_TYPE_SAS
+ } else if (phy->phy_type & PORT_TYPE_SAS
|| phy->att_dev_info & PORT_SSP_INIT_MASK) {
phy->phy_attached = 1;
phy->identify.device_type =
@@ -1256,7 +1266,20 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
{
- /*Nothing*/
+ struct domain_device *dev;
+ struct mvs_phy *phy = sas_phy->lldd_phy;
+ struct mvs_info *mvi = phy->mvi;
+ struct asd_sas_port *port = sas_phy->port;
+ int phy_no = 0;
+
+ while (phy != &mvi->phy[phy_no]) {
+ phy_no++;
+ if (phy_no >= MVS_MAX_PHYS)
+ return;
+ }
+ list_for_each_entry(dev, &port->dev_list, dev_list_node)
+ mvs_do_release_task(phy->mvi, phy_no, NULL);
+
}
@@ -1316,6 +1339,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
goto found_out;
}
dev->lldd_dev = mvi_device;
+ mvi_device->dev_status = MVS_DEV_NORMAL;
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
@@ -1351,18 +1375,18 @@ int mvs_dev_found(struct domain_device *dev)
return mvs_dev_found_notify(dev, 1);
}
-void mvs_dev_gone_notify(struct domain_device *dev, int lock)
+void mvs_dev_gone_notify(struct domain_device *dev)
{
unsigned long flags = 0;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- if (lock)
- spin_lock_irqsave(&mvi->lock, flags);
+ spin_lock_irqsave(&mvi->lock, flags);
if (mvi_dev) {
mv_dprintk("found dev[%d:%x] is gone.\n",
mvi_dev->device_id, mvi_dev->dev_type);
+ mvs_release_task(mvi, dev);
mvs_free_reg_set(mvi, mvi_dev);
mvs_free_dev(mvi_dev);
} else {
@@ -1370,14 +1394,13 @@ void mvs_dev_gone_notify(struct domain_device *dev, int lock)
}
dev->lldd_dev = NULL;
- if (lock)
- spin_unlock_irqrestore(&mvi->lock, flags);
+ spin_unlock_irqrestore(&mvi->lock, flags);
}
void mvs_dev_gone(struct domain_device *dev)
{
- mvs_dev_gone_notify(dev, 1);
+ mvs_dev_gone_notify(dev);
}
static struct sas_task *mvs_alloc_task(void)
@@ -1540,7 +1563,7 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
num = mvs_find_dev_phyno(dev, phyno);
spin_lock_irqsave(&mvi->lock, flags);
for (i = 0; i < num; i++)
- mvs_release_task(mvi, phyno[i], dev);
+ mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
}
/* If failed, fall-through I_T_Nexus reset */
@@ -1552,8 +1575,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
int mvs_I_T_nexus_reset(struct domain_device *dev)
{
unsigned long flags;
- int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
- struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
if (mvi_dev->dev_status != MVS_DEV_EH)
@@ -1563,10 +1586,8 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
__func__, mvi_dev->device_id, rc);
/* housekeeper */
- num = mvs_find_dev_phyno(dev, phyno);
spin_lock_irqsave(&mvi->lock, flags);
- for (i = 0; i < num; i++)
- mvs_release_task(mvi, phyno[i], dev);
+ mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
return rc;
@@ -1603,6 +1624,9 @@ int mvs_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
+ default:
+ rc = TMF_RESP_FUNC_COMPLETE;
+ break;
}
}
mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1621,8 +1645,11 @@ int mvs_abort_task(struct sas_task *task)
unsigned long flags;
u32 tag;
- if (mvi->exp_req)
- mvi->exp_req--;
+ if (!mvi_dev) {
+ mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
+ rc = TMF_RESP_FUNC_FAILED;
+ }
+
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1630,6 +1657,7 @@ int mvs_abort_task(struct sas_task *task)
goto out;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ mvi_dev->dev_status = MVS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
@@ -1654,12 +1682,31 @@ int mvs_abort_task(struct sas_task *task)
if (task->lldd_task) {
slot = task->lldd_task;
slot_no = (u32) (slot - mvi->slot_info);
+ spin_lock_irqsave(&mvi->lock, flags);
mvs_slot_complete(mvi, slot_no, 1);
+ spin_unlock_irqrestore(&mvi->lock, flags);
}
}
+
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
/* to do free register_set */
+ if (SATA_DEV == dev->dev_type) {
+ struct mvs_slot_info *slot = task->lldd_task;
+ struct task_status_struct *tstat;
+ u32 slot_idx = (u32)(slot - mvi->slot_info);
+ tstat = &task->task_status;
+ mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
+ "slot=%p slot_idx=x%x\n",
+ mvi, task, slot, slot_idx);
+ tstat->stat = SAS_ABORTED_TASK;
+ if (mvi_dev && mvi_dev->running_req)
+ mvi_dev->running_req--;
+ if (sas_protocol_ata(task->task_proto))
+ mvs_free_reg_set(mvi, mvi_dev);
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ return -1;
+ }
} else {
/* SMP */
@@ -1717,8 +1764,13 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
sizeof(struct dev_to_host_fis));
tstat->buf_valid_size = sizeof(*resp);
- if (unlikely(err))
- stat = SAS_PROTO_RESPONSE;
+ if (unlikely(err)) {
+ if (unlikely(err & CMD_ISS_STPD))
+ stat = SAS_OPEN_REJECT;
+ else
+ stat = SAS_PROTO_RESPONSE;
+ }
+
return stat;
}
@@ -1753,9 +1805,7 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
mv_printk("find reserved error, why?\n");
task->ata_task.use_ncq = 0;
- stat = SAS_PROTO_RESPONSE;
- mvs_sata_done(mvi, task, slot_idx, 1);
-
+ mvs_sata_done(mvi, task, slot_idx, err_dw0);
}
break;
default:
@@ -1772,18 +1822,20 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
struct sas_task *task = slot->task;
struct mvs_device *mvi_dev = NULL;
struct task_status_struct *tstat;
+ struct domain_device *dev;
+ u32 aborted;
- bool aborted;
void *to;
enum exec_status sts;
if (mvi->exp_req)
mvi->exp_req--;
- if (unlikely(!task || !task->lldd_task))
+ if (unlikely(!task || !task->lldd_task || !task->dev))
return -1;
tstat = &task->task_status;
- mvi_dev = task->dev->lldd_dev;
+ dev = task->dev;
+ mvi_dev = dev->lldd_dev;
mvs_hba_cq_dump(mvi);
@@ -1800,8 +1852,8 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
if (unlikely(aborted)) {
tstat->stat = SAS_ABORTED_TASK;
- if (mvi_dev)
- mvi_dev->runing_req--;
+ if (mvi_dev && mvi_dev->running_req)
+ mvi_dev->running_req--;
if (sas_protocol_ata(task->task_proto))
mvs_free_reg_set(mvi, mvi_dev);
@@ -1809,24 +1861,17 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
return -1;
}
- if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
- mv_dprintk("port has not device.\n");
+ if (unlikely(!mvi_dev || flags)) {
+ if (!mvi_dev)
+ mv_dprintk("port has not device.\n");
tstat->stat = SAS_PHY_DOWN;
goto out;
}
- /*
- if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
- mv_dprintk("Find device[%016llx] RXQ_ERR %X,
- err info:%016llx\n",
- SAS_ADDR(task->dev->sas_addr),
- rx_desc, (u64)(*(u64 *) slot->response));
- }
- */
-
/* error info record present */
if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+ tstat->resp = SAS_TASK_COMPLETE;
goto out;
}
@@ -1868,11 +1913,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
tstat->stat = SAM_CHECK_COND;
break;
}
+ if (!slot->port->port_attached) {
+ mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
+ tstat->stat = SAS_PHY_DOWN;
+ }
+
out:
- if (mvi_dev) {
- mvi_dev->runing_req--;
- if (sas_protocol_ata(task->task_proto))
+ if (mvi_dev && mvi_dev->running_req) {
+ mvi_dev->running_req--;
+ if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
mvs_free_reg_set(mvi, mvi_dev);
}
mvs_slot_task_free(mvi, task, slot, slot_idx);
@@ -1888,10 +1938,10 @@ out:
return sts;
}
-void mvs_release_task(struct mvs_info *mvi,
+void mvs_do_release_task(struct mvs_info *mvi,
int phy_no, struct domain_device *dev)
{
- int i = 0; u32 slot_idx;
+ u32 slot_idx;
struct mvs_phy *phy;
struct mvs_port *port;
struct mvs_slot_info *slot, *slot2;
@@ -1900,6 +1950,10 @@ void mvs_release_task(struct mvs_info *mvi,
port = phy->port;
if (!port)
return;
+ /* clean cmpl queue in case request is already finished */
+ mvs_int_rx(mvi, false);
+
+
list_for_each_entry_safe(slot, slot2, &port->list, entry) {
struct sas_task *task;
@@ -1911,18 +1965,22 @@ void mvs_release_task(struct mvs_info *mvi,
mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
slot_idx, slot->slot_tag, task);
-
- if (task->task_proto & SAS_PROTOCOL_SSP) {
- mv_printk("attached with SSP task CDB[");
- for (i = 0; i < 16; i++)
- mv_printk(" %02x", task->ssp_task.cdb[i]);
- mv_printk(" ]\n");
- }
+ MVS_CHIP_DISP->command_active(mvi, slot_idx);
mvs_slot_complete(mvi, slot_idx, 1);
}
}
+void mvs_release_task(struct mvs_info *mvi,
+ struct domain_device *dev)
+{
+ int i, phyno[WIDE_PORT_MAX_PHY], num;
+ /* housekeeper */
+ num = mvs_find_dev_phyno(dev, phyno);
+ for (i = 0; i < num; i++)
+ mvs_do_release_task(mvi, phyno[i], dev);
+}
+
static void mvs_phy_disconnected(struct mvs_phy *phy)
{
phy->phy_attached = 0;
@@ -2029,16 +2087,18 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
* we need check the interrupt status which belongs to per port.
*/
- if (phy->irq_status & PHYEV_DCDR_ERR)
+ if (phy->irq_status & PHYEV_DCDR_ERR) {
mv_dprintk("port %d STP decoding error.\n",
- phy_no+mvi->id*mvi->chip->n_phy);
+ phy_no + mvi->id*mvi->chip->n_phy);
+ }
if (phy->irq_status & PHYEV_POOF) {
if (!(phy->phy_event & PHY_PLUG_OUT)) {
int dev_sata = phy->phy_type & PORT_TYPE_SATA;
int ready;
- mvs_release_task(mvi, phy_no, NULL);
+ mvs_do_release_task(mvi, phy_no, NULL);
phy->phy_event |= PHY_PLUG_OUT;
+ MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
mvs_handle_event(mvi,
(void *)(unsigned long)phy_no,
PHY_PLUG_EVENT);
@@ -2085,6 +2145,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
phy_no, tmp);
}
mvs_update_phyinfo(mvi, phy_no, 0);
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
+ mdelay(10);
+ }
+
mvs_bytes_dmaed(mvi, phy_no);
/* whether driver is going to handle hot plug */
if (phy->phy_event & PHY_PLUG_OUT) {
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 885858bcc403..77ddc7c1e5f2 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <scsi/libsas.h>
+#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/version.h>
@@ -49,7 +50,7 @@
#define _MV_DUMP 0
#define MVS_ID_NOT_MAPPED 0x7f
/* #define DISABLE_HOTPLUG_DMA_FIX */
-#define MAX_EXP_RUNNING_REQ 2
+// #define MAX_EXP_RUNNING_REQ 2
#define WIDE_PORT_MAX_PHY 4
#define MV_DISABLE_NCQ 0
#define mv_printk(fmt, arg ...) \
@@ -129,6 +130,7 @@ struct mvs_dispatch {
void (*get_sas_addr)(void *buf, u32 buflen);
void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
+ void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
u32 tfs);
void (*start_delivery)(struct mvs_info *mvi, u32 tx);
@@ -236,9 +238,10 @@ struct mvs_device {
enum sas_dev_type dev_type;
struct mvs_info *mvi_info;
struct domain_device *sas_device;
+ struct timer_list timer;
u32 attached_phy;
u32 device_id;
- u32 runing_req;
+ u32 running_req;
u8 taskfileset;
u8 dev_status;
u16 reserved;
@@ -397,7 +400,9 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun);
int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
int mvs_I_T_nexus_reset(struct domain_device *dev);
int mvs_query_task(struct sas_task *task);
-void mvs_release_task(struct mvs_info *mvi, int phy_no,
+void mvs_release_task(struct mvs_info *mvi,
+ struct domain_device *dev);
+void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
struct domain_device *dev);
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 528733b4a392..9d70aef99227 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -80,7 +80,6 @@ MODULE_LICENSE("Dual MPL/GPL");
typedef struct scsi_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct Scsi_Host *host;
} scsi_info_t;
@@ -105,7 +104,6 @@ static int aha152x_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 0x20;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -160,8 +158,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -172,7 +169,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
memset(&s, 0, sizeof(s));
s.conf = "PCMCIA setup";
s.io_port = link->io.BasePort1;
- s.irq = link->irq.AssignedIRQ;
+ s.irq = link->irq;
s.scsiid = host_id;
s.reconnect = reconnect;
s.parity = parity;
@@ -187,8 +184,6 @@ static int aha152x_config_cs(struct pcmcia_device *link)
goto failed;
}
- sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
return 0;
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 914040684079..21b141151dfc 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -63,7 +63,6 @@ MODULE_LICENSE("Dual MPL/GPL");
typedef struct scsi_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct Scsi_Host *host;
} scsi_info_t;
@@ -88,7 +87,6 @@ static int fdomain_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 0x10;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -133,8 +131,7 @@ static int fdomain_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -144,7 +141,7 @@ static int fdomain_config(struct pcmcia_device *link)
release_region(link->io.BasePort1, link->io.NumPorts1);
/* Set configuration options for the fdomain driver */
- sprintf(str, "%d,%d", link->io.BasePort1, link->irq.AssignedIRQ);
+ sprintf(str, "%d,%d", link->io.BasePort1, link->irq);
fdomain_setup(str);
host = __fdomain_16x0_detect(&fdomain_driver_template);
@@ -157,8 +154,6 @@ static int fdomain_config(struct pcmcia_device *link)
goto failed;
scsi_scan_host(host);
- sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
return 0;
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 021246454872..0f0e112c3f8e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1563,13 +1563,6 @@ static int nsp_cs_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10; /* not used */
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
-
- /* Interrupt handler */
- link->irq.Handler = &nspintr;
- link->irq.Attributes |= IRQF_SHARED;
-
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -1646,8 +1639,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev,
}
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -1720,10 +1712,8 @@ static int nsp_cs_config(struct pcmcia_device *link)
if (ret)
goto cs_failed;
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- if (pcmcia_request_irq(link, &link->irq))
- goto cs_failed;
- }
+ if (pcmcia_request_irq(link, nspintr))
+ goto cs_failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -1741,7 +1731,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
/* Set port and IRQ */
data->BaseAddress = link->io.BasePort1;
data->NumAddress = link->io.NumPorts1;
- data->IrqNumber = link->irq.AssignedIRQ;
+ data->IrqNumber = link->irq;
nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d",
data->BaseAddress, data->NumAddress, data->IrqNumber);
@@ -1764,8 +1754,6 @@ static int nsp_cs_config(struct pcmcia_device *link)
scsi_scan_host(host);
- snprintf(info->node.dev_name, sizeof(info->node.dev_name), "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
/* Finally, report what we've done */
@@ -1775,7 +1763,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
}
if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
}
if (link->io.NumPorts1) {
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
@@ -1823,7 +1811,6 @@ static void nsp_cs_release(struct pcmcia_device *link)
if (info->host != NULL) {
scsi_remove_host(info->host);
}
- link->dev_node = NULL;
if (link->win) {
if (data != NULL) {
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index 8c61a4fe1db9..d68c9f267c5e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -224,7 +224,6 @@
typedef struct scsi_info_t {
struct pcmcia_device *p_dev;
struct Scsi_Host *host;
- dev_node_t node;
int stop;
} scsi_info_t;
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index f85f094870b4..f0fc6baed9fc 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -82,7 +82,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
typedef struct scsi_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct Scsi_Host *host;
unsigned short manf_id;
} scsi_info_t;
@@ -161,7 +160,6 @@ static int qlogic_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -209,8 +207,7 @@ static int qlogic_config(struct pcmcia_device * link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -227,18 +224,16 @@ static int qlogic_config(struct pcmcia_device * link)
/* The KXL-810AN has a bigger IO port window */
if (link->io.NumPorts1 == 32)
host = qlogic_detect(&qlogicfas_driver_template, link,
- link->io.BasePort1 + 16, link->irq.AssignedIRQ);
+ link->io.BasePort1 + 16, link->irq);
else
host = qlogic_detect(&qlogicfas_driver_template, link,
- link->io.BasePort1, link->irq.AssignedIRQ);
+ link->io.BasePort1, link->irq);
if (!host) {
printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
goto failed;
}
- sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
return 0;
@@ -258,7 +253,7 @@ static void qlogic_release(struct pcmcia_device *link)
scsi_remove_host(info->host);
- free_irq(link->irq.AssignedIRQ, info->host);
+ free_irq(link->irq, info->host);
pcmcia_disable_device(link);
scsi_host_put(info->host);
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index e7564d8f0cbf..a51164171179 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -191,7 +191,6 @@
struct scsi_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct Scsi_Host *host;
unsigned short manf_id;
};
@@ -719,8 +718,7 @@ SYM53C500_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -752,7 +750,7 @@ SYM53C500_config(struct pcmcia_device *link)
* 0x320, 0x330, 0x340, 0x350
*/
port_base = link->io.BasePort1;
- irq_level = link->irq.AssignedIRQ;
+ irq_level = link->irq;
DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n",
port_base, irq_level, USE_FAST_PIO);)
@@ -793,8 +791,6 @@ SYM53C500_config(struct pcmcia_device *link)
*/
data->fast_pio = USE_FAST_PIO;
- sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
if (scsi_add_host(host, NULL))
@@ -866,7 +862,6 @@ SYM53C500_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 909c00ec044f..5ff8261c5d67 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4390,7 +4390,6 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
return -ENOMEM;
}
}
- memset(buffer, 0, fw_control->len);
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index bff4f5139b9c..cd02ceaf67ff 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -885,11 +885,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
u32 tag;
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
- u32 device_id = pm8001_dev->device_id;
+
pm8001_ha = pm8001_find_ha_by_dev(dev);
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_tag_alloc(pm8001_ha, &tag);
if (pm8001_dev) {
+ u32 device_id = pm8001_dev->device_id;
+
PM8001_DISC_DBG(pm8001_ha,
pm8001_printk("found dev[%d:%x] is gone.\n",
pm8001_dev->device_id, pm8001_dev->dev_type));
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 53aefffbaead..c44e4ab4e938 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3751,12 +3751,6 @@ static int pmcraid_check_ioctl_buffer(
return -EINVAL;
}
- /* buffer length can't be negetive */
- if (hdr->buffer_length < 0) {
- pmcraid_err("ioctl: invalid buffer length specified\n");
- return -EINVAL;
- }
-
/* check for appropriate buffer access */
if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
access = VERIFY_WRITE;
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index c51fd1f86639..5df782f4a097 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,5 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
- qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o
+ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
+ qla_nx.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c7ef55966fb..c272af4a76e4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -12,9 +12,7 @@
#include <linux/delay.h>
static int qla24xx_vport_disable(struct fc_vport *, bool);
-static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
-int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
-static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
+
/* SYSFS attributes --------------------------------------------------------- */
static ssize_t
@@ -43,6 +41,12 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int reading;
+ if (IS_QLA82XX(ha)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Firmware dump not supported for ISP82xx\n"));
+ return count;
+ }
+
if (off != 0)
return (0);
@@ -315,8 +319,8 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
else if (start == (ha->flt_region_boot * 4) ||
start == (ha->flt_region_fw * 4))
valid = 1;
- else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
- valid = 1;
+ else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+ valid = 1;
if (!valid) {
qla_printk(KERN_WARNING, ha,
"Invalid start region 0x%x/0x%x.\n", start, size);
@@ -519,6 +523,7 @@ qla2x00_sysfs_write_reset(struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int type;
if (off != 0)
@@ -553,6 +558,20 @@ qla2x00_sysfs_write_reset(struct kobject *kobj,
"MPI reset failed on (%ld).\n", vha->host_no);
scsi_unblock_requests(vha->host);
break;
+ case 0x2025e:
+ if (!IS_QLA82XX(ha) || vha != base_vha) {
+ qla_printk(KERN_INFO, ha,
+ "FCoE ctx reset not supported for host%ld.\n",
+ vha->host_no);
+ return count;
+ }
+
+ qla_printk(KERN_INFO, ha,
+ "Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_fcoe_ctx_reset(vha);
+ break;
}
return count;
}
@@ -838,7 +857,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
continue;
- if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
+ if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -862,7 +881,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
continue;
- if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
+ if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -1179,15 +1198,13 @@ qla24xx_84xx_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA84XX(ha) && ha->cs84xx) {
- if (ha->cs84xx->op_fw_version == 0) {
+ if (IS_QLA84XX(ha) && ha->cs84xx)
+ if (ha->cs84xx->op_fw_version == 0)
rval = qla84xx_verify_chip(vha, status);
- }
if ((rval == QLA_SUCCESS) && (status[0] == 0))
return snprintf(buf, PAGE_SIZE, "%u\n",
(uint32_t)ha->cs84xx->op_fw_version);
- }
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -1237,7 +1254,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1249,7 +1266,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1825,582 +1842,6 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
-/* BSG support for ELS/CT pass through */
-inline srb_t *
-qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
-{
- srb_t *sp;
- struct qla_hw_data *ha = vha->hw;
- struct srb_bsg_ctx *ctx;
-
- sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
- if (!sp)
- goto done;
- ctx = kzalloc(size, GFP_KERNEL);
- if (!ctx) {
- mempool_free(sp, ha->srb_mempool);
- goto done;
- }
-
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->ctx = ctx;
-done:
- return sp;
-}
-
-static int
-qla2x00_process_els(struct fc_bsg_job *bsg_job)
-{
- struct fc_rport *rport;
- fc_port_t *fcport;
- struct Scsi_Host *host;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
- srb_t *sp;
- const char *type;
- int req_sg_cnt, rsp_sg_cnt;
- int rval = (DRIVER_ERROR << 16);
- uint16_t nextlid = 0;
- struct srb_bsg *els;
-
- /* Multiple SG's are not supported for ELS requests */
- if (bsg_job->request_payload.sg_cnt > 1 ||
- bsg_job->reply_payload.sg_cnt > 1) {
- DEBUG2(printk(KERN_INFO
- "multiple SG's are not supported for ELS requests"
- " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt,
- bsg_job->reply_payload.sg_cnt));
- rval = -EPERM;
- goto done;
- }
-
- /* ELS request for rport */
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
- fcport = *(fc_port_t **) rport->dd_data;
- host = rport_to_shost(rport);
- vha = shost_priv(host);
- ha = vha->hw;
- type = "FC_BSG_RPT_ELS";
-
- /* make sure the rport is logged in,
- * if not perform fabric login
- */
- if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "failed to login port %06X for ELS passthru\n",
- fcport->d_id.b24));
- rval = -EIO;
- goto done;
- }
- } else {
- host = bsg_job->shost;
- vha = shost_priv(host);
- ha = vha->hw;
- type = "FC_BSG_HST_ELS_NOLOGIN";
-
- /* Allocate a dummy fcport structure, since functions
- * preparing the IOCB and mailbox command retrieves port
- * specific information from fcport structure. For Host based
- * ELS commands there will be no fcport structure allocated
- */
- fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
- if (!fcport) {
- rval = -ENOMEM;
- goto done;
- }
-
- /* Initialize all required fields of fcport */
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- fcport->d_id.b.al_pa =
- bsg_job->request->rqst_data.h_els.port_id[0];
- fcport->d_id.b.area =
- bsg_job->request->rqst_data.h_els.port_id[1];
- fcport->d_id.b.domain =
- bsg_job->request->rqst_data.h_els.port_id[2];
- fcport->loop_id =
- (fcport->d_id.b.al_pa == 0xFD) ?
- NPH_FABRIC_CONTROLLER : NPH_F_PORT;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done;
- }
-
- req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!req_sg_cnt) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
- rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
-
- if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
-
- /* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
- if (!sp) {
- rval = -ENOMEM;
- goto done_unmap_sg;
- }
-
- els = sp->ctx;
- els->ctx.type =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
- els->bsg_job = bsg_job;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- bsg_job->request->rqst_data.h_els.command_code,
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- rval = -EIO;
- goto done_unmap_sg;
- }
- return rval;
-
-done_unmap_sg:
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- goto done_free_fcport;
-
-done_free_fcport:
- if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
- kfree(fcport);
-done:
- return rval;
-}
-
-static int
-qla2x00_process_ct(struct fc_bsg_job *bsg_job)
-{
- srb_t *sp;
- struct Scsi_Host *host = bsg_job->shost;
- scsi_qla_host_t *vha = shost_priv(host);
- struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
- int req_sg_cnt, rsp_sg_cnt;
- uint16_t loop_id;
- struct fc_port *fcport;
- char *type = "FC_BSG_HST_CT";
- struct srb_bsg *ct;
-
- /* pass through is supported only for ISP 4Gb or higher */
- if (!IS_FWI2_CAPABLE(ha)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld):Firmware is not capable to support FC "
- "CT pass thru\n", vha->host_no));
- rval = -EPERM;
- goto done;
- }
-
- req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!req_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done_unmap_sg;
- }
-
- loop_id =
- (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
- >> 24;
- switch (loop_id) {
- case 0xFC:
- loop_id = cpu_to_le16(NPH_SNS);
- break;
- case 0xFA:
- loop_id = vha->mgmt_svr_loop_id;
- break;
- default:
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unknown loop id: %x\n", loop_id));
- rval = -EINVAL;
- goto done_unmap_sg;
- }
-
- /* Allocate a dummy fcport structure, since functions preparing the
- * IOCB and mailbox command retrieves port specific information
- * from fcport structure. For Host based ELS commands there will be
- * no fcport structure allocated
- */
- fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
- if (!fcport)
- {
- rval = -ENOMEM;
- goto done_unmap_sg;
- }
-
- /* Initialize all required fields of fcport */
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
- fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
- fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
- fcport->loop_id = loop_id;
-
- /* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
- if (!sp) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
-
- ct = sp->ctx;
- ct->ctx.type = SRB_CT_CMD;
- ct->bsg_job = bsg_job;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- rval = -EIO;
- goto done_free_fcport;
- }
- return rval;
-
-done_free_fcport:
- kfree(fcport);
-done_unmap_sg:
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-done:
- return rval;
-}
-
-static int
-qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
-{
- struct Scsi_Host *host = bsg_job->shost;
- scsi_qla_host_t *vha = shost_priv(host);
- struct qla_hw_data *ha = vha->hw;
- int rval;
- uint8_t command_sent;
- uint32_t vendor_cmd;
- char *type;
- struct msg_echo_lb elreq;
- uint16_t response[MAILBOX_REGISTER_COUNT];
- uint8_t* fw_sts_ptr;
- uint8_t *req_data;
- dma_addr_t req_data_dma;
- uint32_t req_data_len;
- uint8_t *rsp_data;
- dma_addr_t rsp_data_dma;
- uint32_t rsp_data_len;
-
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- rval = -EBUSY;
- goto done;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done;
- }
-
- elreq.req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!elreq.req_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
- elreq.rsp_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!elreq.rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
- req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
- req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
- &req_data_dma, GFP_KERNEL);
-
- rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
- &rsp_data_dma, GFP_KERNEL);
-
- /* Copy the request buffer in req_data now */
- sg_copy_to_buffer(bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, req_data,
- req_data_len);
-
- elreq.send_dma = req_data_dma;
- elreq.rcv_dma = rsp_data_dma;
- elreq.transfer_size = req_data_len;
-
- /* Vendor cmd : loopback or ECHO diagnostic
- * Options:
- * Loopback : Either internal or external loopback
- * ECHO: ECHO ELS or Vendor specific FC4 link data
- */
- vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
- elreq.options =
- *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
- + 1);
-
- switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
- case QL_VND_LOOPBACK:
- if (ha->current_topology != ISP_CFG_F) {
- type = "FC_BSG_HST_VENDOR_LOOPBACK";
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
- vha->host_no, type, vendor_cmd, elreq.options));
-
- command_sent = INT_DEF_LB_LOOPBACK_CMD;
- rval = qla2x00_loopback_test(vha, &elreq, response);
- if (IS_QLA81XX(ha)) {
- if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
- DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
- "ISP\n", __func__, vha->host_no));
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- }
- }
- } else {
- type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
- vha->host_no, type, vendor_cmd, elreq.options));
-
- command_sent = INT_DEF_LB_ECHO_CMD;
- rval = qla2x00_echo_test(vha, &elreq, response);
- }
- break;
- case QLA84_RESET:
- if (!IS_QLA84XX(vha->hw)) {
- rval = -EINVAL;
- DEBUG16(printk(
- "%s(%ld): 8xxx exiting.\n",
- __func__, vha->host_no));
- return rval;
- }
- rval = qla84xx_reset(vha, &elreq, bsg_job);
- break;
- case QLA84_MGMT_CMD:
- if (!IS_QLA84XX(vha->hw)) {
- rval = -EINVAL;
- DEBUG16(printk(
- "%s(%ld): 8xxx exiting.\n",
- __func__, vha->host_no));
- return rval;
- }
- rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
- break;
- default:
- rval = -ENOSYS;
- }
-
- if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->reply->reply_payload_rcv_len = 0;
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
- memcpy( fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- } else {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
- rval = bsg_job->reply->result = 0;
- bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
- bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, rsp_data,
- rsp_data_len);
- }
- bsg_job->job_done(bsg_job);
-
-done_unmap_sg:
-
- if(req_data)
- dma_free_coherent(&ha->pdev->dev, req_data_len,
- req_data, req_data_dma);
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
-done:
- return rval;
-}
-
-static int
-qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
-{
- int ret = -EINVAL;
-
- switch (bsg_job->request->msgcode) {
- case FC_BSG_RPT_ELS:
- case FC_BSG_HST_ELS_NOLOGIN:
- ret = qla2x00_process_els(bsg_job);
- break;
- case FC_BSG_HST_CT:
- ret = qla2x00_process_ct(bsg_job);
- break;
- case FC_BSG_HST_VENDOR:
- ret = qla2x00_process_vendor_specific(bsg_job);
- break;
- case FC_BSG_HST_ADD_RPORT:
- case FC_BSG_HST_DEL_RPORT:
- case FC_BSG_RPT_CT:
- default:
- DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
- break;
- }
- return ret;
-}
-
-static int
-qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
-{
- scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp;
- int cnt, que;
- unsigned long flags;
- struct req_que *req;
- struct srb_bsg *sp_bsg;
-
- /* find the bsg job from the active list of commands */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_req_queues; que++) {
- req = ha->req_q_map[que];
- if (!req)
- continue;
-
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
- sp = req->outstanding_cmds[cnt];
-
- if (sp) {
- sp_bsg = (struct srb_bsg*)sp->ctx;
-
- if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
- (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
- || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
- (sp_bsg->bsg_job == bsg_job)) {
- if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort_command failed\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = -EIO;
- } else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort_command success\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = 0;
- }
- goto done;
- }
- }
- }
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) SRB not found to abort\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
- return 0;
-
-done:
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (bsg_job->request->msgcode == FC_BSG_HST_CT)
- kfree(sp->fcport);
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- return 0;
-}
-
struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
@@ -2502,7 +1943,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
speed = FC_PORTSPEED_10GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -2516,125 +1957,3 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
}
-static int
-qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
-{
- int ret = 0;
- int cmd;
- uint16_t cmd_status;
-
- DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
-
- cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
- == A84_RESET_FLAG_ENABLE_DIAG_FW ?
- A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
- ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
- &cmd_status);
- return ret;
-}
-
-static int
-qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
-{
- struct access_chip_84xx *mn;
- dma_addr_t mn_dma, mgmt_dma;
- void *mgmt_b = NULL;
- int ret = 0;
- int rsp_hdr_len, len = 0;
- struct qla84_msg_mgmt *ql84_mgmt;
-
- ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
- ql84_mgmt->cmd =
- *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
- ql84_mgmt->mgmtp.u.mem.start_addr =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
- ql84_mgmt->len =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
- ql84_mgmt->mgmtp.u.config.id =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
- ql84_mgmt->mgmtp.u.config.param0 =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
- ql84_mgmt->mgmtp.u.config.param1 =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
- ql84_mgmt->mgmtp.u.info.type =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
- ql84_mgmt->mgmtp.u.info.context =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
-
- rsp_hdr_len = bsg_job->request_payload.payload_len;
-
- mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
- if (mn == NULL) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
- "failed%lu\n", __func__, ha->host_no));
- return -ENOMEM;
- }
-
- memset(mn, 0, sizeof (struct access_chip_84xx));
-
- mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
- mn->entry_count = 1;
-
- switch (ql84_mgmt->cmd) {
- case QLA84_MGMT_READ_MEM:
- mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
- break;
- case QLA84_MGMT_WRITE_MEM:
- mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
- break;
- case QLA84_MGMT_CHNG_CONFIG:
- mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
- mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
- mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
- break;
- case QLA84_MGMT_GET_INFO:
- mn->options = cpu_to_le16(ACO_REQUEST_INFO);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
- mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
- break;
- default:
- ret = -EIO;
- goto exit_mgmt0;
- }
-
- if ((len == ql84_mgmt->len) &&
- ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
- mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
- &mgmt_dma, GFP_KERNEL);
- if (mgmt_b == NULL) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
- "failed%lu\n", __func__, ha->host_no));
- ret = -ENOMEM;
- goto exit_mgmt0;
- }
- mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
- mn->dseg_count = cpu_to_le16(1);
- mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
- mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
- mn->dseg_length = cpu_to_le32(len);
-
- if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
- memcpy(mgmt_b, ql84_mgmt->payload, len);
- }
- }
-
- ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
- if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
- || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
- if (ret != QLA_SUCCESS)
- DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
- __func__, ha->host_no));
- } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
- (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
- }
-
- if (mgmt_b)
- dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
-
-exit_mgmt0:
- dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
- return ret;
-}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
new file mode 100644
index 000000000000..21e5bcd4bb51
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -0,0 +1,1208 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+/* BSG support for ELS/CT pass through */
+inline srb_t *
+qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_bsg_ctx *ctx;
+
+ sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
+ if (!sp)
+ goto done;
+ ctx = kzalloc(size, GFP_KERNEL);
+ if (!ctx) {
+ mempool_free(sp, ha->srb_mempool);
+ sp = NULL;
+ goto done;
+ }
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->ctx = ctx;
+done:
+ return sp;
+}
+
+int
+qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
+{
+ int i, ret, num_valid;
+ uint8_t *bcode;
+ struct qla_fcp_prio_entry *pri_entry;
+
+ ret = 1;
+ num_valid = 0;
+ bcode = (uint8_t *)pri_cfg;
+
+ if (bcode[0x0] != 'H' || bcode[0x1] != 'Q' || bcode[0x2] != 'O' ||
+ bcode[0x3] != 'S') {
+ return 0;
+ }
+ if (flag != 1)
+ return ret;
+
+ pri_entry = &pri_cfg->entry[0];
+ for (i = 0; i < pri_cfg->num_entries; i++) {
+ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+ num_valid++;
+ pri_entry++;
+ }
+
+ if (num_valid == 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int
+qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int ret = 0;
+ uint32_t len;
+ uint32_t oper;
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ret = -EBUSY;
+ goto exit_fcp_prio_cfg;
+ }
+
+ /* Get the sub command */
+ oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ /* Only set config is allowed if config memory is not allocated */
+ if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+ switch (oper) {
+ case QLFC_FCP_PRIO_DISABLE:
+ if (ha->flags.fcp_prio_enabled) {
+ ha->flags.fcp_prio_enabled = 0;
+ ha->fcp_prio_cfg->attributes &=
+ ~FCP_PRIO_ATTR_ENABLE;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ } else {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+ break;
+
+ case QLFC_FCP_PRIO_ENABLE:
+ if (!ha->flags.fcp_prio_enabled) {
+ if (ha->fcp_prio_cfg) {
+ ha->flags.fcp_prio_enabled = 1;
+ ha->fcp_prio_cfg->attributes |=
+ FCP_PRIO_ATTR_ENABLE;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ } else {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+ }
+ break;
+
+ case QLFC_FCP_PRIO_GET_CONFIG:
+ len = bsg_job->reply_payload.payload_len;
+ if (!len || len > FCP_PRIO_CFG_SIZE) {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+
+ bsg_job->reply->result = DID_OK;
+ bsg_job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
+ len);
+
+ break;
+
+ case QLFC_FCP_PRIO_SET_CONFIG:
+ len = bsg_job->request_payload.payload_len;
+ if (!len || len > FCP_PRIO_CFG_SIZE) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ if (!ha->fcp_prio_cfg) {
+ ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+ if (!ha->fcp_prio_cfg) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory "
+ "for fcp prio config data (%x).\n",
+ FCP_PRIO_CFG_SIZE);
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -ENOMEM;
+ goto exit_fcp_prio_cfg;
+ }
+ }
+
+ memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
+ FCP_PRIO_CFG_SIZE);
+
+ /* validate fcp priority data */
+ if (!qla24xx_fcp_prio_cfg_valid(
+ (struct qla_fcp_prio_cfg *)
+ ha->fcp_prio_cfg, 1)) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -EINVAL;
+ /* If buffer was invalidatic int
+ * fcp_prio_cfg is of no use
+ */
+ vfree(ha->fcp_prio_cfg);
+ ha->fcp_prio_cfg = NULL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ ha->flags.fcp_prio_enabled = 0;
+ if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
+ ha->flags.fcp_prio_enabled = 1;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+exit_fcp_prio_cfg:
+ bsg_job->job_done(bsg_job);
+ return ret;
+}
+static int
+qla2x00_process_els(struct fc_bsg_job *bsg_job)
+{
+ struct fc_rport *rport;
+ fc_port_t *fcport;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ srb_t *sp;
+ const char *type;
+ int req_sg_cnt, rsp_sg_cnt;
+ int rval = (DRIVER_ERROR << 16);
+ uint16_t nextlid = 0;
+ struct srb_bsg *els;
+
+ /* Multiple SG's are not supported for ELS requests */
+ if (bsg_job->request_payload.sg_cnt > 1 ||
+ bsg_job->reply_payload.sg_cnt > 1) {
+ DEBUG2(printk(KERN_INFO
+ "multiple SG's are not supported for ELS requests"
+ " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt));
+ rval = -EPERM;
+ goto done;
+ }
+
+ /* ELS request for rport */
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_RPT_ELS";
+
+ /* make sure the rport is logged in,
+ * if not perform fabric login
+ */
+ if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "failed to login port %06X for ELS passthru\n",
+ fcport->d_id.b24));
+ rval = -EIO;
+ goto done;
+ }
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_HST_ELS_NOLOGIN";
+
+ /* Allocate a dummy fcport structure, since functions
+ * preparing the IOCB and mailbox command retrieves port
+ * specific information from fcport structure. For Host based
+ * ELS commands there will be no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa =
+ bsg_job->request->rqst_data.h_els.port_id[0];
+ fcport->d_id.b.area =
+ bsg_job->request->rqst_data.h_els.port_id[1];
+ fcport->d_id.b.domain =
+ bsg_job->request->rqst_data.h_els.port_id[2];
+ fcport->loop_id =
+ (fcport->d_id.b.al_pa == 0xFD) ?
+ NPH_FABRIC_CONTROLLER : NPH_F_PORT;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ els = sp->ctx;
+ els->ctx.type =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ els->bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ bsg_job->request->rqst_data.h_els.command_code,
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+ return rval;
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ goto done_free_fcport;
+
+done_free_fcport:
+ if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
+ kfree(fcport);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+{
+ srb_t *sp;
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = (DRIVER_ERROR << 16);
+ int req_sg_cnt, rsp_sg_cnt;
+ uint16_t loop_id;
+ struct fc_port *fcport;
+ char *type = "FC_BSG_HST_CT";
+ struct srb_bsg *ct;
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld):Firmware is not capable to support FC "
+ "CT pass thru\n", vha->host_no));
+ rval = -EPERM;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "[request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+
+ loop_id =
+ (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ >> 24;
+ switch (loop_id) {
+ case 0xFC:
+ loop_id = cpu_to_le16(NPH_SNS);
+ break;
+ case 0xFA:
+ loop_id = vha->mgmt_svr_loop_id;
+ break;
+ default:
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Unknown loop id: %x\n", loop_id));
+ rval = -EINVAL;
+ goto done_unmap_sg;
+ }
+
+ /* Allocate a dummy fcport structure, since functions preparing the
+ * IOCB and mailbox command retrieves port specific information
+ * from fcport structure. For Host based ELS commands there will be
+ * no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->loop_id = loop_id;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ ct = sp->ctx;
+ ct->ctx.type = SRB_CT_CMD;
+ ct->bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_free_fcport;
+ }
+ return rval;
+
+done_free_fcport:
+ kfree(fcport);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint8_t command_sent;
+ char *type;
+ struct msg_echo_lb elreq;
+ uint16_t response[MAILBOX_REGISTER_COUNT];
+ uint8_t *fw_sts_ptr;
+ uint8_t *req_data = NULL;
+ dma_addr_t req_data_dma;
+ uint32_t req_data_len;
+ uint8_t *rsp_data = NULL;
+ dma_addr_t rsp_data_dma;
+ uint32_t rsp_data_len;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
+ return -EIO;
+ }
+
+ elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
+ DMA_TO_DEVICE);
+
+ if (!elreq.req_sg_cnt)
+ return -ENOMEM;
+
+ elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+ DMA_FROM_DEVICE);
+
+ if (!elreq.rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_unmap_req_sg;
+ }
+
+ if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "[request_sg_cnt: %x dma_request_sg_cnt: %x "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
+ &req_data_dma, GFP_KERNEL);
+ if (!req_data) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
+ &rsp_data_dma, GFP_KERNEL);
+ if (!rsp_data) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_free_dma_req;
+ }
+
+ /* Copy the request buffer in req_data now */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+ elreq.send_dma = req_data_dma;
+ elreq.rcv_dma = rsp_data_dma;
+ elreq.transfer_size = req_data_len;
+
+ elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ if (ha->current_topology != ISP_CFG_F) {
+ type = "FC_BSG_HST_VENDOR_LOOPBACK";
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s\n",
+ vha->host_no, type));
+
+ command_sent = INT_DEF_LB_LOOPBACK_CMD;
+ rval = qla2x00_loopback_test(vha, &elreq, response);
+ if (IS_QLA81XX(ha)) {
+ if (response[0] == MBS_COMMAND_ERROR &&
+ response[1] == MBS_LB_RESET) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
+ "ISP\n", __func__, vha->host_no));
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ } else {
+ type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
+ command_sent = INT_DEF_LB_ECHO_CMD;
+ rval = qla2x00_echo_test(vha, &elreq, response);
+ }
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request %s failed\n", vha->host_no, type));
+
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ rval = 0;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request %s completed\n", vha->host_no, type));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(response) + sizeof(uint8_t);
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ bsg_job->reply->result = DID_OK;
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, rsp_data,
+ rsp_data_len);
+ }
+ bsg_job->job_done(bsg_job);
+
+ dma_free_coherent(&ha->pdev->dev, rsp_data_len,
+ rsp_data, rsp_data_dma);
+done_free_dma_req:
+ dma_free_coherent(&ha->pdev->dev, req_data_len,
+ req_data, req_data_dma);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ return rval;
+}
+
+static int
+qla84xx_reset(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint32_t flag;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx reset failed\n", vha->host_no));
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx reset completed\n", vha->host_no));
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct verify_chip_entry_84xx *mn = NULL;
+ dma_addr_t mn_dma, fw_dma;
+ void *fw_buf = NULL;
+ int rval = 0;
+ uint32_t sg_cnt;
+ uint32_t data_len;
+ uint16_t options;
+ uint32_t flag;
+ uint32_t fw_ver;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!sg_cnt)
+ return -ENOMEM;
+
+ if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x ",
+ bsg_job->request_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->request_payload.payload_len;
+ fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &fw_dma, GFP_KERNEL);
+ if (!fw_buf) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, fw_buf, data_len);
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (!mn) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_free_fw_buf;
+ }
+
+ flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
+
+ memset(mn, 0, sizeof(struct access_chip_84xx));
+ mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
+ if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
+ options |= VCO_DIAG_FW;
+
+ mn->options = cpu_to_le16(options);
+ mn->fw_ver = cpu_to_le32(fw_ver);
+ mn->fw_size = cpu_to_le32(data_len);
+ mn->fw_seq_size = cpu_to_le32(data_len);
+ mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
+ mn->dseg_length = cpu_to_le32(data_len);
+ mn->data_seg_cnt = cpu_to_le16(1);
+
+ rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx updatefw failed\n", vha->host_no));
+
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx updatefw completed\n", vha->host_no));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+done_free_fw_buf:
+ dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ return rval;
+}
+
+static int
+qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct access_chip_84xx *mn = NULL;
+ dma_addr_t mn_dma, mgmt_dma;
+ void *mgmt_b = NULL;
+ int rval = 0;
+ struct qla_bsg_a84_mgmt *ql84_mgmt;
+ uint32_t sg_cnt;
+ uint32_t data_len = 0;
+ uint32_t dma_direction = DMA_NONE;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
+ sizeof(struct fc_bsg_request));
+ if (!ql84_mgmt) {
+ DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (!mn) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ return -ENOMEM;
+ }
+
+ memset(mn, 0, sizeof(struct access_chip_84xx));
+ mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ switch (ql84_mgmt->mgmt.cmd) {
+ case QLA84_MGMT_READ_MEM:
+ case QLA84_MGMT_GET_INFO:
+ sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!sg_cnt) {
+ rval = -ENOMEM;
+ goto exit_mgmt;
+ }
+
+ dma_direction = DMA_FROM_DEVICE;
+
+ if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
+ bsg_job->reply_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->reply_payload.payload_len;
+
+ mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &mgmt_dma, GFP_KERNEL);
+ if (!mgmt_b) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
+ "failed for host=%lu\n",
+ __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
+ mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
+ mn->parameter1 =
+ cpu_to_le32(
+ ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+
+ } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
+ mn->options = cpu_to_le16(ACO_REQUEST_INFO);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
+
+ mn->parameter2 =
+ cpu_to_le32(
+ ql84_mgmt->mgmt.mgmtp.u.info.context);
+ }
+ break;
+
+ case QLA84_MGMT_WRITE_MEM:
+ sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (!sg_cnt) {
+ rval = -ENOMEM;
+ goto exit_mgmt;
+ }
+
+ dma_direction = DMA_TO_DEVICE;
+
+ if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x ",
+ bsg_job->request_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->request_payload.payload_len;
+ mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &mgmt_dma, GFP_KERNEL);
+ if (!mgmt_b) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
+ "failed for host=%lu\n",
+ __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
+
+ mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+ break;
+
+ case QLA84_MGMT_CHNG_CONFIG:
+ mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
+
+ mn->parameter2 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
+
+ mn->parameter3 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
+ break;
+
+ default:
+ rval = -EIO;
+ goto exit_mgmt;
+ }
+
+ if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
+ mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
+ mn->dseg_count = cpu_to_le16(1);
+ mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
+ mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
+ }
+
+ rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx mgmt failed\n", vha->host_no));
+
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx mgmt completed\n", vha->host_no));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK;
+
+ if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
+ (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, mgmt_b,
+ data_len);
+ }
+ }
+
+ bsg_job->job_done(bsg_job);
+
+done_unmap_sg:
+ if (mgmt_b)
+ dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
+
+ if (dma_direction == DMA_TO_DEVICE)
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ else if (dma_direction == DMA_FROM_DEVICE)
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+exit_mgmt:
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+ return rval;
+}
+
+static int
+qla24xx_iidma(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ struct qla_port_param *port_param = NULL;
+ fc_port_t *fcport = NULL;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ uint8_t *rsp_ptr = NULL;
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
+ "supported\n", __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ port_param = (struct qla_port_param *)((char *)bsg_job->request +
+ sizeof(struct fc_bsg_request));
+ if (!port_param) {
+ DEBUG2(printk("%s(%ld): port_param header not provided, "
+ "exiting.\n", __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
+ fcport->port_name, sizeof(fcport->port_name)))
+ continue;
+ break;
+ }
+
+ if (!fcport) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ if (port_param->mode)
+ rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
+ port_param->speed, mb);
+ else
+ rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
+ &port_param->speed, mb);
+
+ if (rval) {
+ DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
+ "%02x%02x%02x%02x%02x%02x%02x%02x -- "
+ "%04x %x %04x %04x.\n",
+ vha->host_no, fcport->port_name[0],
+ fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7], rval,
+ fcport->fp_speed, mb[0], mb[1]));
+ rval = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ if (!port_param->mode) {
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(struct qla_port_param);
+
+ rsp_ptr = ((uint8_t *)bsg_job->reply) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(rsp_ptr, port_param,
+ sizeof(struct qla_port_param));
+ }
+
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+{
+ switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QL_VND_LOOPBACK:
+ return qla2x00_process_loopback(bsg_job);
+
+ case QL_VND_A84_RESET:
+ return qla84xx_reset(bsg_job);
+
+ case QL_VND_A84_UPDATE_FW:
+ return qla84xx_updatefw(bsg_job);
+
+ case QL_VND_A84_MGMT_CMD:
+ return qla84xx_mgmt_cmd(bsg_job);
+
+ case QL_VND_IIDMA:
+ return qla24xx_iidma(bsg_job);
+
+ case QL_VND_FCP_PRIO_CFG_CMD:
+ return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
+
+ default:
+ bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->job_done(bsg_job);
+ return -ENOSYS;
+ }
+}
+
+int
+qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+{
+ int ret = -EINVAL;
+
+ switch (bsg_job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ ret = qla2x00_process_els(bsg_job);
+ break;
+ case FC_BSG_HST_CT:
+ ret = qla2x00_process_ct(bsg_job);
+ break;
+ case FC_BSG_HST_VENDOR:
+ ret = qla2x00_process_vendor_specific(bsg_job);
+ break;
+ case FC_BSG_HST_ADD_RPORT:
+ case FC_BSG_HST_DEL_RPORT:
+ case FC_BSG_RPT_CT:
+ default:
+ DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
+ break;
+ }
+ return ret;
+}
+
+int
+qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int cnt, que;
+ unsigned long flags;
+ struct req_que *req;
+ struct srb_bsg *sp_bsg;
+
+ /* find the bsg job from the active list of commands */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ sp_bsg = (struct srb_bsg *)sp->ctx;
+
+ if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
+ (sp_bsg->ctx.type == SRB_ELS_CMD_HST))
+ && (sp_bsg->bsg_job == bsg_job)) {
+ if (ha->isp_ops->abort_command(sp)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx "
+ "abort_command failed\n",
+ vha->host_no));
+ bsg_job->req->errors =
+ bsg_job->reply->result = -EIO;
+ } else {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx "
+ "abort_command success\n",
+ vha->host_no));
+ bsg_job->req->errors =
+ bsg_job->reply->result = 0;
+ }
+ goto done;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) SRB not found to abort\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ return 0;
+
+done:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (bsg_job->request->msgcode == FC_BSG_HST_CT)
+ kfree(sp->fcport);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
new file mode 100644
index 000000000000..76ed92dd2ef2
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -0,0 +1,135 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_BSG_H
+#define __QLA_BSG_H
+
+/* BSG Vendor specific commands */
+#define QL_VND_LOOPBACK 0x01
+#define QL_VND_A84_RESET 0x02
+#define QL_VND_A84_UPDATE_FW 0x03
+#define QL_VND_A84_MGMT_CMD 0x04
+#define QL_VND_IIDMA 0x05
+#define QL_VND_FCP_PRIO_CFG_CMD 0x06
+
+/* BSG definations for interpreting CommandSent field */
+#define INT_DEF_LB_LOOPBACK_CMD 0
+#define INT_DEF_LB_ECHO_CMD 1
+
+/* BSG Vendor specific definations */
+#define A84_ISSUE_WRITE_TYPE_CMD 0
+#define A84_ISSUE_READ_TYPE_CMD 1
+#define A84_CLEANUP_CMD 2
+#define A84_ISSUE_RESET_OP_FW 3
+#define A84_ISSUE_RESET_DIAG_FW 4
+#define A84_ISSUE_UPDATE_OPFW_CMD 5
+#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
+
+struct qla84_mgmt_param {
+ union {
+ struct {
+ uint32_t start_addr;
+ } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
+ struct {
+ uint32_t id;
+#define QLA84_MGMT_CONFIG_ID_UIF 1
+#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
+#define QLA84_MGMT_CONFIG_ID_PAUSE 3
+#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
+
+ uint32_t param0;
+ uint32_t param1;
+ } config; /* for QLA84_MGMT_CHNG_CONFIG */
+
+ struct {
+ uint32_t type;
+#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
+#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
+#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
+#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
+#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
+#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
+#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
+
+ uint32_t context;
+/*
+* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
+*/
+#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
+#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
+#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
+#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
+#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
+#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
+#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
+#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
+
+/*
+* context definitions for QLA84_MGMT_INFO_PORT_STAT
+*/
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
+
+
+/*
+* context definitions for QLA84_MGMT_INFO_LIF_STAT
+*/
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
+#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
+
+ } info; /* for QLA84_MGMT_GET_INFO */
+ } u;
+};
+
+struct qla84_msg_mgmt {
+ uint16_t cmd;
+#define QLA84_MGMT_READ_MEM 0x00
+#define QLA84_MGMT_WRITE_MEM 0x01
+#define QLA84_MGMT_CHNG_CONFIG 0x02
+#define QLA84_MGMT_GET_INFO 0x03
+ uint16_t rsrvd;
+ struct qla84_mgmt_param mgmtp;/* parameters for cmd */
+ uint32_t len; /* bytes in payload following this struct */
+ uint8_t payload[0]; /* payload for cmd */
+};
+
+struct qla_bsg_a84_mgmt {
+ struct qla84_msg_mgmt mgmt;
+} __attribute__ ((packed));
+
+struct qla_scsi_addr {
+ uint16_t bus;
+ uint16_t target;
+} __attribute__ ((packed));
+
+struct qla_ext_dest_addr {
+ union {
+ uint8_t wwnn[8];
+ uint8_t wwpn[8];
+ uint8_t id[4];
+ struct qla_scsi_addr scsi_addr;
+ } dest_addr;
+ uint16_t dest_type;
+#define EXT_DEF_TYPE_WWPN 2
+ uint16_t lun;
+ uint16_t padding[2];
+} __attribute__ ((packed));
+
+struct qla_port_param {
+ struct qla_ext_dest_addr fc_scsi_addr;
+ uint16_t mode;
+ uint16_t speed;
+} __attribute__ ((packed));
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cb2eca4c26d8..89bfc119010b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -769,6 +769,9 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ if (IS_QLA82XX(ha))
+ return;
+
risc_address = ext_mem_cnt = 0;
flags = 0;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index afa95614aaf8..edb7a704ed77 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -33,6 +33,8 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
+#include "qla_bsg.h"
+#include "qla_nx.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
/*
@@ -206,6 +208,7 @@ typedef struct srb {
* SRB flag definitions
*/
#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
+#define SRB_FCP_CMND_DMA_VALID BIT_12 /* FCP command in IOCB */
/*
* SRB extensions.
@@ -416,6 +419,7 @@ typedef union {
struct device_reg_2xxx isp;
struct device_reg_24xx isp24;
struct device_reg_25xxmq isp25mq;
+ struct device_reg_82xx isp82;
} device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
@@ -1579,6 +1583,8 @@ typedef struct fc_port {
uint16_t loop_id;
uint16_t old_loop_id;
+ uint8_t fcp_prio;
+
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
@@ -2109,6 +2115,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
+ int (*abort_isp) (struct scsi_qla_host *);
};
/* MSI-X Support *************************************************************/
@@ -2295,6 +2302,7 @@ struct qla_hw_data {
uint32_t eeh_busy :1;
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
+ uint32_t fcp_prio_enabled :1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2382,7 +2390,8 @@ struct qla_hw_data {
#define DT_ISP2532 BIT_11
#define DT_ISP8432 BIT_12
#define DT_ISP8001 BIT_13
-#define DT_ISP_LAST (DT_ISP8001 << 1)
+#define DT_ISP8021 BIT_14
+#define DT_ISP_LAST (DT_ISP8021 << 1)
#define DT_IIDMA BIT_26
#define DT_FWI2 BIT_27
@@ -2405,6 +2414,7 @@ struct qla_hw_data {
#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
+#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2415,8 +2425,10 @@ struct qla_hw_data {
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
+#define IS_QLA8XXX_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
- IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+ IS_QLA82XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
(ha)->flags.msix_enabled)
@@ -2598,6 +2610,8 @@ struct qla_hw_data {
uint32_t flt_region_nvram;
uint32_t flt_region_npiv_conf;
uint32_t flt_region_gold_fw;
+ uint32_t flt_region_fcp_prio;
+ uint32_t flt_region_bootload;
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -2626,6 +2640,41 @@ struct qla_hw_data {
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
+
+ /* FCP_CMND priority support */
+ struct qla_fcp_prio_cfg *fcp_prio_cfg;
+
+ struct dma_pool *dl_dma_pool;
+#define DSD_LIST_DMA_POOL_SIZE 512
+
+ struct dma_pool *fcp_cmnd_dma_pool;
+ mempool_t *ctx_mempool;
+#define FCP_CMND_DMA_POOL_SIZE 512
+
+ unsigned long nx_pcibase; /* Base I/O address */
+ uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */
+ unsigned long nxdb_wr_ptr; /* Door bell write pointer */
+ unsigned long first_page_group_start;
+ unsigned long first_page_group_end;
+
+ uint32_t crb_win;
+ uint32_t curr_window;
+ uint32_t ddr_mn_window;
+ unsigned long mn_win_crb;
+ unsigned long ms_win_crb;
+ int qdr_sn_window;
+ uint32_t nx_dev_init_timeout;
+ uint32_t nx_reset_timeout;
+ rwlock_t hw_lock;
+ uint16_t portnum; /* port number */
+ int link_width;
+ struct fw_blob *hablob;
+ struct qla82xx_legacy_intr_set nx_legacy_intr;
+
+ uint16_t gbl_dsd_inuse;
+ uint16_t gbl_dsd_avail;
+ struct list_head gbl_dsd_list;
+#define NUM_DSD_CHAIN 4096
};
/*
@@ -2678,10 +2727,13 @@ typedef struct scsi_qla_host {
#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */
#define UNLOADING 15
#define NPIV_CONFIG_NEEDED 16
+#define ISP_UNRECOVERABLE 17
+#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
#define DFLG_NO_CABLE BIT_1
+#define DFLG_DEV_FAILED BIT_5
/* ISP configuration data. */
uint16_t loop_id; /* Host adapter loop id */
@@ -2739,6 +2791,8 @@ typedef struct scsi_qla_host {
#define VP_ERR_ADAP_NORESOURCES 5
struct qla_hw_data *hw;
struct req_que *req;
+ int fw_heartbeat_counter;
+ int seconds_since_last_heartbeat;
} scsi_qla_host_t;
/*
@@ -2791,134 +2845,14 @@ typedef struct scsi_qla_host {
#define OPTROM_SIZE_24XX 0x100000
#define OPTROM_SIZE_25XX 0x200000
#define OPTROM_SIZE_81XX 0x400000
+#define OPTROM_SIZE_82XX 0x800000
+
+#define OPTROM_BURST_SIZE 0x1000
+#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-
-/*
- * BSG Vendor specific commands
- */
-
-#define QL_VND_LOOPBACK 0x01
-#define QLA84_RESET 0x02
-#define QLA84_UPDATE_FW 0x03
-#define QLA84_MGMT_CMD 0x04
-
-/* BSG definations for interpreting CommandSent field */
-#define INT_DEF_LB_LOOPBACK_CMD 0
-#define INT_DEF_LB_ECHO_CMD 1
-
-/* BSG Vendor specific definations */
-typedef struct _A84_RESET {
- uint16_t Flags;
- uint16_t Reserved;
-#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
-} __attribute__((packed)) A84_RESET, *PA84_RESET;
-
-#define A84_ISSUE_WRITE_TYPE_CMD 0
-#define A84_ISSUE_READ_TYPE_CMD 1
-#define A84_CLEANUP_CMD 2
-#define A84_ISSUE_RESET_OP_FW 3
-#define A84_ISSUE_RESET_DIAG_FW 4
-#define A84_ISSUE_UPDATE_OPFW_CMD 5
-#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
-
-struct qla84_mgmt_param {
- union {
- struct {
- uint32_t start_addr;
- } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
- struct {
- uint32_t id;
-#define QLA84_MGMT_CONFIG_ID_UIF 1
-#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
-#define QLA84_MGMT_CONFIG_ID_PAUSE 3
-#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
-
- uint32_t param0;
- uint32_t param1;
- } config; /* for QLA84_MGMT_CHNG_CONFIG */
-
- struct {
- uint32_t type;
-#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
-#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
-#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
-#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
-#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
-#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
-#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
-
- uint32_t context;
-/*
-* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
-*/
-#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
-#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
-#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
-#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
-#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
-#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
-#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
-#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
-#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
-#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
-
-/*
-* context definitions for QLA84_MGMT_INFO_PORT_STAT
-*/
-#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
-#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
-#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
-#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
-#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
-#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
-
-
-/*
-* context definitions for QLA84_MGMT_INFO_LIF_STAT
-*/
-#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
-#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
-#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
-#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
-#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
-
- } info; /* for QLA84_MGMT_GET_INFO */
- } u;
-};
-
-struct qla84_msg_mgmt {
- uint16_t cmd;
-#define QLA84_MGMT_READ_MEM 0x00
-#define QLA84_MGMT_WRITE_MEM 0x01
-#define QLA84_MGMT_CHNG_CONFIG 0x02
-#define QLA84_MGMT_GET_INFO 0x03
- uint16_t rsrvd;
- struct qla84_mgmt_param mgmtp;/* parameters for cmd */
- uint32_t len; /* bytes in payload following this struct */
- uint8_t payload[0]; /* payload for cmd */
-};
-
-struct msg_update_fw {
- /*
- * diag_fw = 0 operational fw
- * otherwise diagnostic fw
- * offset, len, fw_len are present to overcome the current limitation
- * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
- * specifies the byte "offset" where it fits in the fw buffer. The
- * number of bytes in each chunk is specified in "len". "fw_len"
- * is the total size of fw. The first chunk should start at offset = 0.
- * When offset+len == fw_len, the fw is written to the HBA.
- */
- uint32_t diag_fw;
- uint32_t offset;/* start offset */
- uint32_t len; /* num bytes in cur xfer */
- uint32_t fw_len; /* size of fw in bytes */
- uint8_t fw_bytes[0];
-};
-
#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 42c5587cc50c..a77a2471eaff 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -841,6 +841,8 @@ struct device_reg_24xx {
#define FA_HW_EVENT_ENTRY_SIZE 4
#define FA_NPIV_CONF0_ADDR 0x5C000
#define FA_NPIV_CONF1_ADDR 0x5D000
+#define FA_FCP_PRIO0_ADDR 0x10000
+#define FA_FCP_PRIO1_ADDR 0x12000
/*
* Flash Error Log Event Codes.
@@ -1274,6 +1276,8 @@ struct qla_flt_header {
#define FLT_REG_NPIV_CONF_0 0x29
#define FLT_REG_NPIV_CONF_1 0x2a
#define FLT_REG_GOLD_FW 0x2f
+#define FLT_REG_FCP_PRIO_0 0x87
+#define FLT_REG_FCP_PRIO_1 0x88
struct qla_flt_region {
uint32_t code;
@@ -1750,6 +1754,61 @@ struct ex_init_cb_81xx {
#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
+/* FCP priority config defines *************************************/
+/* operations */
+#define QLFC_FCP_PRIO_DISABLE 0x0
+#define QLFC_FCP_PRIO_ENABLE 0x1
+#define QLFC_FCP_PRIO_GET_CONFIG 0x2
+#define QLFC_FCP_PRIO_SET_CONFIG 0x3
+
+struct qla_fcp_prio_entry {
+ uint16_t flags; /* Describes parameter(s) in FCP */
+ /* priority entry that are valid */
+#define FCP_PRIO_ENTRY_VALID 0x1
+#define FCP_PRIO_ENTRY_TAG_VALID 0x2
+#define FCP_PRIO_ENTRY_SPID_VALID 0x4
+#define FCP_PRIO_ENTRY_DPID_VALID 0x8
+#define FCP_PRIO_ENTRY_LUNB_VALID 0x10
+#define FCP_PRIO_ENTRY_LUNE_VALID 0x20
+#define FCP_PRIO_ENTRY_SWWN_VALID 0x40
+#define FCP_PRIO_ENTRY_DWWN_VALID 0x80
+ uint8_t tag; /* Priority value */
+ uint8_t reserved; /* Reserved for future use */
+ uint32_t src_pid; /* Src port id. high order byte */
+ /* unused; -1 (wild card) */
+ uint32_t dst_pid; /* Src port id. high order byte */
+ /* unused; -1 (wild card) */
+ uint16_t lun_beg; /* 1st lun num of lun range. */
+ /* -1 (wild card) */
+ uint16_t lun_end; /* 2nd lun num of lun range. */
+ /* -1 (wild card) */
+ uint8_t src_wwpn[8]; /* Source WWPN: -1 (wild card) */
+ uint8_t dst_wwpn[8]; /* Destination WWPN: -1 (wild card) */
+};
+
+struct qla_fcp_prio_cfg {
+ uint8_t signature[4]; /* "HQOS" signature of config data */
+ uint16_t version; /* 1: Initial version */
+ uint16_t length; /* config data size in num bytes */
+ uint16_t checksum; /* config data bytes checksum */
+ uint16_t num_entries; /* Number of entries */
+ uint16_t size_of_entry; /* Size of each entry in num bytes */
+ uint8_t attributes; /* enable/disable, persistence */
+#define FCP_PRIO_ATTR_DISABLE 0x0
+#define FCP_PRIO_ATTR_ENABLE 0x1
+#define FCP_PRIO_ATTR_PERSIST 0x2
+ uint8_t reserved; /* Reserved for future use */
+#define FCP_PRIO_CFG_HDR_SIZE 0x10
+ struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
+#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+};
+
+#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
+
+/* 25XX Support ****************************************************/
+#define FA_FCP_PRIO0_ADDR_25 0x3C000
+#define FA_FCP_PRIO1_ADDR_25 0x3E000
+
/* 81XX Flash locations -- occupies second 2MB region. */
#define FA_BOOT_CODE_ADDR_81 0x80000
#define FA_RISC_CODE_ADDR_81 0xA0000
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3a89bc514e2b..cff5a4ae7626 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,6 +44,7 @@ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
extern int qla2x00_abort_isp(scsi_qla_host_t *);
+extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
@@ -79,6 +80,9 @@ extern int ql2xmaxqueues;
extern int ql2xmultique_tag;
extern int ql2xfwloadbin;
extern int ql2xetsenable;
+extern int ql2xshiftctondsd;
+extern int ql2xdbwr;
+extern int ql2xdontresethba;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -135,6 +139,7 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *);
+extern int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *);
extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
@@ -157,6 +162,9 @@ int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern void qla2x00_ctx_sp_free(srb_t *);
+extern uint16_t qla24xx_calc_iocbs(uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
+
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -328,6 +336,9 @@ extern int
qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
+ uint16_t *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -340,6 +351,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_sup.c source file.
*/
@@ -384,6 +396,7 @@ extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
+extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_dbg.c source file.
@@ -430,7 +443,10 @@ extern void qla2x00_init_host_attr(scsi_qla_host_t *);
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
-extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
+extern int qla2x00_echo_test(scsi_qla_host_t *,
+ struct msg_echo_lb *, uint16_t *);
+extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
+extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t);
/*
* Global Function Prototypes in qla_dfs.c source file.
@@ -459,4 +475,88 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+/* qla82xx related functions */
+
+/* PCI related functions */
+extern int qla82xx_pci_config(struct scsi_qla_host *);
+extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int);
+extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
+extern int qla82xx_pci_region_offset(struct pci_dev *, int);
+extern int qla82xx_pci_region_len(struct pci_dev *, int);
+extern int qla82xx_iospace_config(struct qla_hw_data *);
+
+/* Initialization related functions */
+extern void qla82xx_reset_chip(struct scsi_qla_host *);
+extern void qla82xx_config_rings(struct scsi_qla_host *);
+extern int qla82xx_nvram_config(struct scsi_qla_host *);
+extern int qla82xx_pinit_from_rom(scsi_qla_host_t *);
+extern int qla82xx_load_firmware(scsi_qla_host_t *);
+extern int qla82xx_reset_hw(scsi_qla_host_t *);
+extern int qla82xx_load_risc_blob(scsi_qla_host_t *, uint32_t *);
+extern void qla82xx_watchdog(scsi_qla_host_t *);
+
+/* Firmware and flash related functions */
+extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
+extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+
+/* Mailbox related functions */
+extern int qla82xx_abort_isp(scsi_qla_host_t *);
+extern int qla82xx_restart_isp(scsi_qla_host_t *);
+
+/* IOCB related functions */
+extern int qla82xx_start_scsi(srb_t *);
+
+/* Interrupt related */
+extern irqreturn_t qla82xx_intr_handler(int, void *);
+extern irqreturn_t qla82xx_msi_handler(int, void *);
+extern irqreturn_t qla82xx_msix_default(int, void *);
+extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
+extern void qla82xx_enable_intrs(struct qla_hw_data *);
+extern void qla82xx_disable_intrs(struct qla_hw_data *);
+extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
+extern void qla82xx_poll(int, void *);
+extern void qla82xx_init_flags(struct qla_hw_data *);
+
+/* ISP 8021 hardware related */
+extern int qla82xx_crb_win_lock(struct qla_hw_data *);
+extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
+extern int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *, ulong *);
+extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
+extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
+extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_check_for_bad_spd(struct qla_hw_data *);
+extern int qla82xx_load_fw(scsi_qla_host_t *);
+extern int qla82xx_rom_lock(struct qla_hw_data *);
+extern void qla82xx_rom_unlock(struct qla_hw_data *);
+extern int qla82xx_rom_fast_read(struct qla_hw_data *, int , int *);
+extern int qla82xx_do_rom_fast_read(struct qla_hw_data *, int, int *);
+extern unsigned long qla82xx_decode_crb_addr(unsigned long);
+
+/* ISP 8021 IDC */
+extern void qla82xx_clear_drv_active(struct qla_hw_data *);
+extern int qla82xx_idc_lock(struct qla_hw_data *);
+extern void qla82xx_idc_unlock(struct qla_hw_data *);
+extern int qla82xx_device_state_handler(scsi_qla_host_t *);
+
+extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
+ size_t, char *);
+extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
+extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
+extern void qla82xx_start_iocbs(srb_t *);
+extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern void qla82xx_wait_for_pending_commands(scsi_qla_host_t *);
+
+/* BSG related functions */
+extern int qla24xx_bsg_request(struct fc_bsg_job *);
+extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
+extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
+ dma_addr_t, size_t, uint32_t);
+extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
+ uint16_t *, uint16_t *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4647015eba63..872c55f049a5 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1535,7 +1535,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_10GB);
else if (IS_QLA25XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4229bb483c5e..55540d2d4e38 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -328,6 +328,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (rval)
return (rval);
}
+
if (IS_QLA84XX(ha)) {
ha->cs84xx = qla84xx_get_chip(vha);
if (!ha->cs84xx) {
@@ -340,7 +341,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
- /* Issue verify 84xx FW IOCB to complete 84xx initialization */
+ /* Issue verify 84xx FW IOCB to complete 84xx initialization */
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
@@ -349,6 +350,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
}
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) {
+ if (qla24xx_read_fcp_prio_cfg(vha))
+ qla_printk(KERN_ERR, ha,
+ "Unable to read FCP priority data.\n");
+ }
+
return (rval);
}
@@ -955,6 +962,9 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
rval = qla2x00_mbx_reg_test(vha);
@@ -1177,6 +1187,12 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
unsigned long flags;
uint16_t fw_major_version;
+ if (IS_QLA82XX(ha)) {
+ rval = ha->isp_ops->load_risc(vha, &srisc_address);
+ if (rval == QLA_SUCCESS)
+ goto enable_82xx_npiv;
+ }
+
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1202,6 +1218,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
+enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
rval = qla2x00_get_fw_version(vha,
&ha->fw_major_version,
@@ -1226,8 +1243,10 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
- qla2x00_alloc_fw_dump(vha);
+ if (!fw_major_version && ql2xallocfwdump) {
+ if (!IS_QLA82XX(ha))
+ qla2x00_alloc_fw_dump(vha);
+ }
}
} else {
DEBUG2(printk(KERN_INFO
@@ -1384,6 +1403,9 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return;
+
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
@@ -1818,7 +1840,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
return(rval);
}
-static inline void
+inline void
qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
char *def)
{
@@ -1826,7 +1848,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
uint16_t index;
struct qla_hw_data *ha = vha->hw;
int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
- !IS_QLA81XX(ha);
+ !IS_QLA8XXX_TYPE(ha);
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
@@ -3546,6 +3568,45 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
qla2x00_rport_del(fcport);
}
+void
+qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
+ struct scsi_qla_host *tvp;
+
+ vha->flags.online = 0;
+ ha->flags.chip_reset_done = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ ha->qla_stats.total_isp_aborts++;
+
+ qla_printk(KERN_INFO, ha,
+ "Performing ISP error recovery - ha= %p.\n", ha);
+
+ /* Chip reset does not apply to 82XX */
+ if (!IS_QLA82XX(ha))
+ ha->isp_ops->reset_chip(vha);
+
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+ qla2x00_mark_all_devices_lost(vp, 0);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ }
+
+ /* Make sure for ISP 82XX IO DMA is complete */
+ if (IS_QLA82XX(ha))
+ qla82xx_wait_for_pending_commands(vha);
+
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+}
+
/*
* qla2x00_abort_isp
* Resets ISP and aborts all outstanding commands.
@@ -3567,27 +3628,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
struct req_que *req = ha->req_q_map[0];
if (vha->flags.online) {
- vha->flags.online = 0;
- ha->flags.chip_reset_done = 0;
- clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- ha->qla_stats.total_isp_aborts++;
-
- qla_printk(KERN_INFO, ha,
- "Performing ISP error recovery - ha= %p.\n", ha);
- ha->isp_ops->reset_chip(vha);
-
- atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
- atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
- } else {
- if (!atomic_read(&vha->loop_down_timer))
- atomic_set(&vha->loop_down_timer,
- LOOP_DOWN_TIME);
- }
-
- /* Requeue all commands in outstanding command list. */
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ qla2x00_abort_isp_cleanup(vha);
if (unlikely(pci_channel_offline(ha->pdev) &&
ha->flags.pci_channel_io_perm_failure)) {
@@ -3843,6 +3884,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return;
+
vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
@@ -3906,6 +3950,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
ha->nvram_size = sizeof(struct nvram_24xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
+ if (IS_QLA82XX(ha))
+ ha->vpd_size = FA_VPD_SIZE_82XX;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
@@ -4769,7 +4815,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
* Setup driver NVRAM options.
*/
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
- "QLE81XX");
+ "QLE8XXX");
/* Use alternate WWN? */
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
@@ -4892,6 +4938,147 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
return (rval);
}
+int
+qla82xx_restart_isp(scsi_qla_host_t *vha)
+{
+ int status, rval;
+ uint32_t wait_time;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct scsi_qla_host *vp;
+ struct scsi_qla_host *tvp;
+
+ status = qla2x00_init_rings(vha);
+ if (!status) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ ha->flags.chip_reset_done = 1;
+
+ status = qla2x00_fw_ready(vha);
+ if (!status) {
+ qla_printk(KERN_INFO, ha,
+ "%s(): Start configure loop, "
+ "status = %d\n", __func__, status);
+
+ /* Issue a marker after FW becomes ready. */
+ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+
+ vha->flags.online = 1;
+ /* Wait at most MAX_TARGET RSCNs for a stable link. */
+ wait_time = 256;
+ do {
+ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2x00_configure_loop(vha);
+ wait_time--;
+ } while (!atomic_read(&vha->loop_down_timer) &&
+ !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
+ wait_time &&
+ (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
+ }
+
+ /* if no cable then assume it's good */
+ if ((vha->device_flags & DFLG_NO_CABLE))
+ status = 0;
+
+ qla_printk(KERN_INFO, ha,
+ "%s(): Configure loop done, status = 0x%x\n",
+ __func__, status);
+ }
+
+ if (!status) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ if (!atomic_read(&vha->loop_down_timer)) {
+ /*
+ * Issue marker command only when we are going
+ * to start the I/O .
+ */
+ vha->marker_needed = 1;
+ }
+
+ vha->flags.online = 1;
+
+ ha->isp_ops->enable_intrs(ha);
+
+ ha->isp_abort_cnt = 0;
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0,
+ fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+ &ha->fce_bufs);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize FCE "
+ "(%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(vha,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize EFT "
+ "(%d).\n", rval);
+ }
+ }
+ } else { /* failed the ISP abort */
+ vha->flags.online = 1;
+ if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ if (ha->isp_abort_cnt == 0) {
+ qla_printk(KERN_WARNING, ha,
+ "ISP error recovery failed - "
+ "board disabled\n");
+ /*
+ * The next call disables the board
+ * completely.
+ */
+ ha->isp_ops->reset_adapter(vha);
+ vha->flags.online = 0;
+ clear_bit(ISP_ABORT_RETRY,
+ &vha->dpc_flags);
+ status = 0;
+ } else { /* schedule another ISP abort */
+ ha->isp_abort_cnt--;
+ qla_printk(KERN_INFO, ha,
+ "qla%ld: ISP abort - "
+ "retry remaining %d\n",
+ vha->host_no, ha->isp_abort_cnt);
+ status = 1;
+ }
+ } else {
+ ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
+ qla_printk(KERN_INFO, ha,
+ "(%ld): ISP error recovery "
+ "- retrying (%d) more times\n",
+ vha->host_no, ha->isp_abort_cnt);
+ set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ status = 1;
+ }
+ }
+
+ if (!status) {
+ DEBUG(printk(KERN_INFO
+ "qla82xx_restart_isp(%ld): succeeded.\n",
+ vha->host_no));
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
+ if (vp->vp_idx)
+ qla2x00_vp_abort_isp(vp);
+ }
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "qla82xx_restart_isp: **** FAILED ****\n");
+ }
+
+ return status;
+}
+
void
qla81xx_update_fw_options(scsi_qla_host_t *vha)
{
@@ -4905,3 +5092,165 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_9;
qla2x00_set_fw_options(vha, ha->fw_options);
}
+
+/*
+ * qla24xx_get_fcp_prio
+ * Gets the fcp cmd priority value for the logged in port.
+ * Looks for a match of the port descriptors within
+ * each of the fcp prio config entries. If a match is found,
+ * the tag (priority) value is returned.
+ *
+ * Input:
+ * ha = adapter block po
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * non-zero (if found)
+ * 0 (if not found)
+ *
+ * Context:
+ * Kernel context
+ */
+uint8_t
+qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int i, entries;
+ uint8_t pid_match, wwn_match;
+ uint8_t priority;
+ uint32_t pid1, pid2;
+ uint64_t wwn1, wwn2;
+ struct qla_fcp_prio_entry *pri_entry;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
+ return 0;
+
+ priority = 0;
+ entries = ha->fcp_prio_cfg->num_entries;
+ pri_entry = &ha->fcp_prio_cfg->entry[0];
+
+ for (i = 0; i < entries; i++) {
+ pid_match = wwn_match = 0;
+
+ if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
+ pri_entry++;
+ continue;
+ }
+
+ /* check source pid for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
+ pid1 = pri_entry->src_pid & INVALID_PORT_ID;
+ pid2 = vha->d_id.b24 & INVALID_PORT_ID;
+ if (pid1 == INVALID_PORT_ID)
+ pid_match++;
+ else if (pid1 == pid2)
+ pid_match++;
+ }
+
+ /* check destination pid for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
+ pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
+ pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
+ if (pid1 == INVALID_PORT_ID)
+ pid_match++;
+ else if (pid1 == pid2)
+ pid_match++;
+ }
+
+ /* check source WWN for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
+ wwn1 = wwn_to_u64(vha->port_name);
+ wwn2 = wwn_to_u64(pri_entry->src_wwpn);
+ if (wwn2 == (uint64_t)-1)
+ wwn_match++;
+ else if (wwn1 == wwn2)
+ wwn_match++;
+ }
+
+ /* check destination WWN for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
+ wwn1 = wwn_to_u64(fcport->port_name);
+ wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
+ if (wwn2 == (uint64_t)-1)
+ wwn_match++;
+ else if (wwn1 == wwn2)
+ wwn_match++;
+ }
+
+ if (pid_match == 2 || wwn_match == 2) {
+ /* Found a matching entry */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+ priority = pri_entry->tag;
+ break;
+ }
+
+ pri_entry++;
+ }
+
+ return priority;
+}
+
+/*
+ * qla24xx_update_fcport_fcp_prio
+ * Activates fcp priority for the logged in fc port
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fcp = port structure pointer.
+ *
+ * Return:
+ * QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport)
+{
+ int ret;
+ uint8_t priority;
+ uint16_t mb[5];
+
+ if (atomic_read(&fcport->state) == FCS_UNCONFIGURED ||
+ fcport->port_type != FCT_TARGET ||
+ fcport->loop_id == FC_NO_LOOP_ID)
+ return QLA_FUNCTION_FAILED;
+
+ priority = qla24xx_get_fcp_prio(ha, fcport);
+ ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb);
+ if (ret == QLA_SUCCESS)
+ fcport->fcp_prio = priority;
+ else
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld): Unable to activate fcp priority, "
+ " ret=0x%x\n", ha->host_no, ret));
+
+ return ret;
+}
+
+/*
+ * qla24xx_update_all_fcp_prio
+ * Activates fcp priority for all the logged in ports
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Return:
+ * QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
+{
+ int ret;
+ fc_port_t *fcport;
+
+ ret = QLA_FUNCTION_FAILED;
+ /* We need to set priority for all logged in ports */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
+
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 5e0a7095c9f2..ad53c6455556 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -37,7 +37,10 @@ qla2x00_poll(struct rsp_que *rsp)
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
local_irq_save(flags);
- ha->isp_ops->intr_handler(0, rsp);
+ if (IS_QLA82XX(ha))
+ qla82xx_poll(0, rsp);
+ else
+ ha->isp_ops->intr_handler(0, rsp);
local_irq_restore(flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8299a9891bfe..d792ae32ed69 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -506,7 +506,10 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
cnt = (uint16_t)
RD_REG_DWORD(&reg->isp25mq.req_q_out);
else {
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha))
+ cnt = (uint16_t)RD_REG_DWORD(
+ &reg->isp82.req_q_out);
+ else if (IS_FWI2_CAPABLE(ha))
cnt = (uint16_t)RD_REG_DWORD(
&reg->isp24.req_q_out);
else
@@ -579,11 +582,29 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable) {
+ if (IS_QLA82XX(ha)) {
+ uint32_t dbval = 0x04 | (ha->portnum << 5);
+
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD((unsigned long __iomem *)
+ ha->nxdb_wr_ptr, dbval);
+ wmb();
+ }
+ }
+ } else if (ha->mqenable) {
+ /* Set chip new ring index. */
WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
RD_REG_DWORD(&ioreg->hccr);
- }
- else {
+ } else {
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -604,7 +625,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
*
* Returns the number of IOCB entries needed to store @dsds.
*/
-static inline uint16_t
+inline uint16_t
qla24xx_calc_iocbs(uint16_t dsds)
{
uint16_t iocbs;
@@ -626,7 +647,7 @@ qla24xx_calc_iocbs(uint16_t dsds)
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
-static inline void
+inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
uint16_t tot_dsds)
{
@@ -931,24 +952,31 @@ qla2x00_start_iocbs(srb_t *sp)
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- /* Set chip new ring index. */
- if (ha->mqenable) {
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(sp);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ if (ha->mqenable) {
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+ RD_REG_DWORD(&ioreg->hccr);
+ } else if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(sp);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
}
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index db539b0c3dae..f1ac62d505d1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -326,7 +326,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
/* Setup to process RIO completion. */
handle_cnt = 0;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
@@ -544,7 +544,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
"%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
else
@@ -845,7 +845,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
qla2x00_sp_compl(ha, sp);
} else {
DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
- " handle(%d)\n", vha->host_no, req->id, index));
+ " handle(0x%x)\n", vha->host_no, req->id, index));
qla_printk(KERN_WARNING, ha,
"Invalid ISP SCSI completion handle\n");
@@ -1337,6 +1337,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
handle = (uint32_t) LSW(sts->handle);
que = MSW(sts->handle);
req = ha->req_q_map[que];
+
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
@@ -1806,6 +1807,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct rsp_que *rsp)
{
struct sts_entry_24xx *pkt;
+ struct qla_hw_data *ha = vha->hw;
if (!vha->flags.online)
return;
@@ -1866,7 +1868,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ if (IS_QLA82XX(ha)) {
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+ } else
+ WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
}
static void
@@ -2169,6 +2175,11 @@ static struct qla_init_msix_entry msix_entries[3] = {
{ "qla2xxx (multiq)", qla25xx_msix_rsp_q },
};
+static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+ { "qla2xxx (default)", qla82xx_msix_default },
+ { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
+};
+
static void
qla24xx_disable_msix(struct qla_hw_data *ha)
{
@@ -2195,7 +2206,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
struct qla_msix_entry *qentry;
entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -2240,8 +2251,15 @@ msix_failed:
/* Enable MSI-X vectors for the base queue */
for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
- ret = request_irq(qentry->vector, msix_entries[i].handler,
- 0, msix_entries[i].name, rsp);
+ if (IS_QLA82XX(ha)) {
+ ret = request_irq(qentry->vector,
+ qla82xx_msix_entries[i].handler,
+ 0, qla82xx_msix_entries[i].name, rsp);
+ } else {
+ ret = request_irq(qentry->vector,
+ msix_entries[i].handler,
+ 0, msix_entries[i].name, rsp);
+ }
if (ret) {
qla_printk(KERN_WARNING, ha,
"MSI-X: Unable to register handler -- %x/%d.\n",
@@ -2272,7 +2290,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
- !IS_QLA8432(ha) && !IS_QLA8001(ha))
+ !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2302,7 +2320,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
goto clear_risc_ints;
}
qla_printk(KERN_WARNING, ha,
- "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
+ "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2313,7 +2331,9 @@ skip_msix:
if (!ret) {
DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
ha->flags.msi_enabled = 1;
- }
+ } else
+ qla_printk(KERN_WARNING, ha,
+ "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -2331,7 +2351,7 @@ clear_risc_ints:
* FIXME: Noted that 8014s were being dropped during NK testing.
* Timing deltas during MSI-X/INTa transitions?
*/
- if (IS_QLA81XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
goto fail;
spin_lock_irq(&ha->hardware_lock);
if (IS_FWI2_CAPABLE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 42eb7ffd5942..2f3033228061 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -49,6 +49,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->pdev->error_state > pci_channel_io_frozen)
return QLA_FUNCTION_TIMEOUT;
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): Device in failed state, "
+ "timeout MBX Exiting.\n",
+ __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -85,7 +93,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha))
+ optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
else
optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -133,7 +143,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha)) {
+ if (RD_REG_DWORD(&reg->isp82.hint) &
+ HINT_MBX_INT_PENDING) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ DEBUG2_3_11(printk(KERN_INFO
+ "%s(%ld): Pending Mailbox timeout. "
+ "Exiting.\n", __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+ WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ } else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -147,7 +168,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
base_vha->host_no, command));
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha)) {
+ if (RD_REG_DWORD(&reg->isp82.hint) &
+ HINT_MBX_INT_PENDING) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ DEBUG2_3_11(printk(KERN_INFO
+ "%s(%ld): Pending Mailbox timeout. "
+ "Exiting.\n", __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+ WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ } else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -264,7 +296,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
/* Failed. retry later. */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
}
@@ -711,7 +743,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
* Context:
* Kernel context.
*/
-static int
+int
qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
dma_addr_t phys_addr, size_t size, uint32_t tov)
{
@@ -952,7 +984,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_0;
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -978,7 +1010,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
vha->host_no));
- if (IS_QLA81XX(vha->hw)) {
+ if (IS_QLA8XXX_TYPE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
vha->fcoe_fcf_idx = mcp->mb[10];
vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
@@ -1076,6 +1108,10 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
vha->host_no));
+ if (IS_QLA82XX(ha) && ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
+ (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
+
if (ha->flags.npiv_supported)
mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
else
@@ -1408,7 +1444,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
- if (IS_QLA81XX(vha->hw)) {
+ if (IS_QLA8XXX_TYPE(vha->hw)) {
/* Logout across all FCFs. */
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = BIT_1;
@@ -2740,6 +2776,48 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
}
int
+qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ uint16_t *port_speed, uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ mcp->mb[2] = mcp->mb[3] = 0;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
+ vha->host_no, rval));
+ } else {
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ if (port_speed)
+ *port_speed = mcp->mb[3];
+ }
+
+ return rval;
+}
+
+int
qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
uint16_t port_speed, uint16_t *mb)
{
@@ -2755,7 +2833,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
else
mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
@@ -3544,7 +3622,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3582,7 +3660,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3643,7 +3721,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
}
int
-qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+ uint16_t *mresp)
{
int rval;
mbx_cmd_t mc;
@@ -3678,7 +3757,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3690,9 +3769,11 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
if (rval != QLA_SUCCESS) {
DEBUG2(printk(KERN_WARNING
- "(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
+ "(%ld): failed=%x mb[0]=0x%x "
+ "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x "
+ "mb[19]=0x%x.\n",
+ vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[18], mcp->mb[19]));
} else {
DEBUG2(printk(KERN_WARNING
"scsi(%ld): done.\n", vha->host_no));
@@ -3706,7 +3787,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
}
int
-qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+ uint16_t *mresp)
{
int rval;
mbx_cmd_t mc;
@@ -3718,9 +3800,10 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha)) {
mcp->mb[1] |= BIT_15;
- mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
+ mcp->mb[2] = vha->fcoe_fcf_idx;
+ }
mcp->mb[16] = LSW(mreq->rcv_dma);
mcp->mb[17] = MSW(mreq->rcv_dma);
mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
@@ -3735,13 +3818,13 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
@@ -3764,8 +3847,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
return rval;
}
int
-qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
- uint16_t *cmd_status)
+qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
{
int rval;
mbx_cmd_t mc;
@@ -3782,8 +3864,6 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
rval = qla2x00_mailbox_command(ha, mcp);
- /* Return mailbox statuses. */
- *cmd_status = mcp->mb[0];
if (rval != QLA_SUCCESS)
DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
rval));
@@ -3801,7 +3881,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mbx_cmd_t *mcp = &mc;
if (!IS_FWI2_CAPABLE(vha->hw))
- return QLA_FUNCTION_FAILED;
+ return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3836,7 +3916,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_DATA_RATE;
mcp->mb[1] = 0;
@@ -3857,3 +3938,122 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
return rval;
}
+
+int
+qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
+ uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): entered.\n", __func__, ha->host_no));
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ if (ha->flags.fcp_prio_enabled)
+ mcp->mb[2] = BIT_1;
+ else
+ mcp->mb[2] = BIT_2;
+ mcp->mb[4] = priority & 0xf;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ mb[4] = mcp->mb[4];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk(KERN_WARNING
+ "%s(%ld): failed=%x.\n", __func__,
+ vha->host_no, rval));
+ } else {
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_TOGGLE_INTR;
+ mcp->mb[1] = 1;
+
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_TOGGLE_INTR;
+ mcp->mb[1] = 0;
+
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
new file mode 100644
index 000000000000..495ff1ade36e
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -0,0 +1,3635 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
+ ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
+ ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+#define QLA82XX_PCI_MN_2M (0)
+#define QLA82XX_PCI_MS_2M (0x80000)
+#define QLA82XX_PCI_OCM0_2M (0xc0000)
+#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+/* CRB window related */
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
+#define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+ ((off) & 0xf0000))
+#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+static inline void *qla82xx_pci_base_offsetfset(struct qla_hw_data *ha,
+ unsigned long off)
+{
+ if ((off < ha->first_page_group_end) &&
+ (off >= ha->first_page_group_start))
+ return (void *)(ha->nx_pcibase + off);
+
+ return NULL;
+}
+
+#define MAX_CRB_XFORM 60
+static unsigned long crb_addr_xform[MAX_CRB_XFORM];
+int qla82xx_crb_table_initialized;
+
+#define qla82xx_crb_addr_transform(name) \
+ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
+ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+static void qla82xx_crb_addr_transform_setup(void)
+{
+ qla82xx_crb_addr_transform(XDMA);
+ qla82xx_crb_addr_transform(TIMR);
+ qla82xx_crb_addr_transform(SRE);
+ qla82xx_crb_addr_transform(SQN3);
+ qla82xx_crb_addr_transform(SQN2);
+ qla82xx_crb_addr_transform(SQN1);
+ qla82xx_crb_addr_transform(SQN0);
+ qla82xx_crb_addr_transform(SQS3);
+ qla82xx_crb_addr_transform(SQS2);
+ qla82xx_crb_addr_transform(SQS1);
+ qla82xx_crb_addr_transform(SQS0);
+ qla82xx_crb_addr_transform(RPMX7);
+ qla82xx_crb_addr_transform(RPMX6);
+ qla82xx_crb_addr_transform(RPMX5);
+ qla82xx_crb_addr_transform(RPMX4);
+ qla82xx_crb_addr_transform(RPMX3);
+ qla82xx_crb_addr_transform(RPMX2);
+ qla82xx_crb_addr_transform(RPMX1);
+ qla82xx_crb_addr_transform(RPMX0);
+ qla82xx_crb_addr_transform(ROMUSB);
+ qla82xx_crb_addr_transform(SN);
+ qla82xx_crb_addr_transform(QMN);
+ qla82xx_crb_addr_transform(QMS);
+ qla82xx_crb_addr_transform(PGNI);
+ qla82xx_crb_addr_transform(PGND);
+ qla82xx_crb_addr_transform(PGN3);
+ qla82xx_crb_addr_transform(PGN2);
+ qla82xx_crb_addr_transform(PGN1);
+ qla82xx_crb_addr_transform(PGN0);
+ qla82xx_crb_addr_transform(PGSI);
+ qla82xx_crb_addr_transform(PGSD);
+ qla82xx_crb_addr_transform(PGS3);
+ qla82xx_crb_addr_transform(PGS2);
+ qla82xx_crb_addr_transform(PGS1);
+ qla82xx_crb_addr_transform(PGS0);
+ qla82xx_crb_addr_transform(PS);
+ qla82xx_crb_addr_transform(PH);
+ qla82xx_crb_addr_transform(NIU);
+ qla82xx_crb_addr_transform(I2Q);
+ qla82xx_crb_addr_transform(EG);
+ qla82xx_crb_addr_transform(MN);
+ qla82xx_crb_addr_transform(MS);
+ qla82xx_crb_addr_transform(CAS2);
+ qla82xx_crb_addr_transform(CAS1);
+ qla82xx_crb_addr_transform(CAS0);
+ qla82xx_crb_addr_transform(CAM);
+ qla82xx_crb_addr_transform(C2C1);
+ qla82xx_crb_addr_transform(C2C0);
+ qla82xx_crb_addr_transform(SMB);
+ qla82xx_crb_addr_transform(OCM0);
+ /*
+ * Used only in P3 just define it for P2 also.
+ */
+ qla82xx_crb_addr_transform(I2C0);
+
+ qla82xx_crb_table_initialized = 1;
+}
+
+struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x0100000, 0x0102000, 0x120000},
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } } ,
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
+ {{{1, 0x0800000, 0x0802000, 0x170000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
+ {{{0} } },
+ {{{1, 0x2100000, 0x2102000, 0x120000},
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
+ {{{0} } },
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
+ {{{0} } },
+ {{{0} } },
+ {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+unsigned qla82xx_crb_hub_agt[64] = {
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
+{
+ u32 win_read;
+
+ ha->crb_win = CRB_HI(*off);
+ writel(ha->crb_win,
+ (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != ha->crb_win) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
+ "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
+ }
+ *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+}
+
+static inline unsigned long
+qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
+{
+ /* See if we are currently pointing to the region we want to use next */
+ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
+ /* No need to change window. PCIX and PCIEregs are in both
+ * regs are in both windows.
+ */
+ return off;
+ }
+
+ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
+ /* We are in first CRB window */
+ if (ha->curr_window != 0)
+ WARN_ON(1);
+ return off;
+ }
+
+ if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
+ /* We are in second CRB window */
+ off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
+
+ if (ha->curr_window != 1)
+ return off;
+
+ /* We are in the QM or direct access
+ * register region - do nothing
+ */
+ if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
+ (off < QLA82XX_PCI_CAMQM_MAX))
+ return off;
+ }
+ /* strange address given */
+ qla_printk(KERN_WARNING, ha,
+ "%s: Warning: unm_nic_pci_set_crbwindow called with"
+ " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
+ return off;
+}
+
+int
+qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
+{
+ unsigned long flags = 0;
+ int rv;
+
+ rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla82xx_crb_win_lock(ha);
+ qla82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+
+ writel(data, (void __iomem *)off);
+
+ if (rv == 1) {
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return 0;
+}
+
+int
+qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
+{
+ unsigned long flags = 0;
+ int rv;
+ u32 data;
+
+ rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla82xx_crb_win_lock(ha);
+ qla82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+ data = RD_REG_DWORD((void __iomem *)off);
+
+ if (rv == 1) {
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return data;
+}
+
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+int qla82xx_crb_win_lock(struct qla_hw_data *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore3 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+ return -1;
+ timeout++;
+ }
+ qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
+ return 0;
+}
+
+#define IDC_LOCK_TIMEOUT 100000000
+int qla82xx_idc_lock(struct qla_hw_data *ha)
+{
+ int i;
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore5 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= IDC_LOCK_TIMEOUT)
+ return -1;
+
+ timeout++;
+
+ /* Yield CPU */
+ if (!in_interrupt())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax();
+ }
+ }
+
+ return 0;
+}
+
+void qla82xx_idc_unlock(struct qla_hw_data *ha)
+{
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+}
+
+int
+qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
+{
+ struct crb_128M_2M_sub_block_map *m;
+
+ if (*off >= QLA82XX_CRB_MAX)
+ return -1;
+
+ if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
+ *off = (*off - QLA82XX_PCI_CAMQM) +
+ QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
+ return 0;
+ }
+
+ if (*off < QLA82XX_PCI_CRBSPACE)
+ return -1;
+
+ *off -= QLA82XX_PCI_CRBSPACE;
+
+ /* Try direct map */
+ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+
+ if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
+ *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
+ return 0;
+ }
+ /* Not in direct map, use crb window */
+ return 1;
+}
+
+/* PCI Windowing for DDR regions. */
+#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+/*
+ * check memory access boundary.
+ * used by test agent. support ddr access only for now
+ */
+static unsigned long
+qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
+ unsigned long long addr, int size)
+{
+ if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX) ||
+ !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX) ||
+ ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
+ return 0;
+ else
+ return 1;
+}
+
+int qla82xx_pci_set_window_warning_count;
+
+unsigned long
+qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
+{
+ int window;
+ u32 win_read;
+
+ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ window = MN_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla82xx_wr_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+ if ((win_read << 17) != window) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
+ } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ QLA82XX_ADDR_OCM0_MAX)) {
+ unsigned int temp1;
+ if ((addr & 0x00ff800) == 0xff800) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: QM access not handled.\n", __func__);
+ addr = -1UL;
+ }
+ window = OCM_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla82xx_wr_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+ temp1 = ((window & 0x1FF) << 7) |
+ ((window & 0x0FFFE0000) >> 17);
+ if (win_read != temp1) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
+ __func__, temp1, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
+
+ } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+ QLA82XX_P3_ADDR_QDR_NET_MAX)) {
+ /* QDR network side */
+ window = MS_WIN(addr);
+ ha->qdr_sn_window = window;
+ qla82xx_wr_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
+ if (win_read != window) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
+ } else {
+ /*
+ * peg gdb frequently accesses memory that doesn't exist,
+ * this limits the chit chat so debugging isn't slowed down.
+ */
+ if ((qla82xx_pci_set_window_warning_count++ < 8) ||
+ (qla82xx_pci_set_window_warning_count%64 == 0)) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Warning:%s Unknown address range!\n", __func__,
+ QLA2XXX_DRIVER_NAME);
+ }
+ addr = -1UL;
+ }
+ return addr;
+}
+
+/* check if address is in the same windows as the previous access */
+static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
+ unsigned long long addr)
+{
+ int window;
+ unsigned long long qdr_max;
+
+ qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
+
+ /* DDR network side */
+ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX))
+ BUG();
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ QLA82XX_ADDR_OCM0_MAX))
+ return 1;
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
+ QLA82XX_ADDR_OCM1_MAX))
+ return 1;
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
+ /* QDR network side */
+ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
+ if (ha->qdr_sn_window == window)
+ return 1;
+ }
+ return 0;
+}
+
+static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void *addr;
+ int ret = 0;
+ u64 start;
+ uint8_t *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ qla_printk(KERN_ERR, ha,
+ "%s out of bound pci memory access. "
+ "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ return -1;
+ }
+
+ addr = qla82xx_pci_base_offsetfset(ha, start);
+ if (!addr) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == 0UL) {
+ *(u8 *)data = 0;
+ return -1;
+ }
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+ }
+
+ switch (size) {
+ case 1:
+ *(u8 *)data = readb(addr);
+ break;
+ case 2:
+ *(u16 *)data = readw(addr);
+ break;
+ case 4:
+ *(u32 *)data = readl(addr);
+ break;
+ case 8:
+ *(u64 *)data = readq(addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+static int
+qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void *addr;
+ int ret = 0;
+ u64 start;
+ uint8_t *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ qla_printk(KERN_ERR, ha,
+ "%s out of bound pci memory access. "
+ "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ return -1;
+ }
+
+ addr = qla82xx_pci_base_offsetfset(ha, start);
+ if (!addr) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == 0UL)
+ return -1;
+
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+ }
+
+ switch (size) {
+ case 1:
+ writeb(*(u8 *)data, addr);
+ break;
+ case 2:
+ writew(*(u16 *)data, addr);
+ break;
+ case 4:
+ writel(*(u32 *)data, addr);
+ break;
+ case 8:
+ writeq(*(u64 *)data, addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+int
+qla82xx_wrmem(struct qla_hw_data *ha, u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ u32 temp;
+ u64 off8, mem_crb, tmpw, word[2] = {0, 0};
+#define MAX_CTL_CHECK 1000
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) {
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ } else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_write_direct(ha, off,
+ data, size);
+ }
+
+ off8 = off & 0xfffffff8;
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+ loop = ((off0 + size - 1) >> 3) + 1;
+
+ if ((size != 8) || (off0 != 0)) {
+ for (i = 0; i < loop; i++) {
+ if (qla82xx_rdmem(ha, off8 + (i << 3), &word[i], 8))
+ return -1;
+ }
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((u8 *)data);
+ break;
+ case 2:
+ tmpw = *((u16 *)data);
+ break;
+ case 4:
+ tmpw = *((u32 *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((u64 *)data);
+ break;
+ }
+
+ word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[0] |= tmpw << (off0 * 8);
+
+ if (loop == 2) {
+ word[1] &= ~(~0ULL << (sz[1] * 8));
+ word[1] |= tmpw >> (sz[0] * 8);
+ }
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << 3);
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ temp = word[i] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ temp = (word[i] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Fail to write through agent\n",
+ QLA2XXX_DRIVER_NAME);
+ ret = -1;
+ break;
+ }
+ }
+ return ret;
+}
+
+int
+qla82xx_rdmem(struct qla_hw_data *ha, u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ u32 temp;
+ u64 off8, val, mem_crb, word[2] = {0, 0};
+#define MAX_CTL_CHECK 1000
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_read_direct(ha, off,
+ data, size);
+ }
+
+ off8 = off & 0xfffffff8;
+ off0[0] = off & 0x7;
+ off0[1] = 0;
+ sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
+ sz[1] = size - sz[0];
+ loop = ((off0[0] + size - 1) >> 3) + 1;
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << 3);
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ temp = MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Fail to read through agent\n",
+ QLA2XXX_DRIVER_NAME);
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ temp = qla82xx_rd_32(ha,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
+ word[i] |= ((u64)temp << (32 * k));
+ }
+ }
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if (sz[0] == 8) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(u8 *)data = val;
+ break;
+ case 2:
+ *(u16 *)data = val;
+ break;
+ case 4:
+ *(u32 *)data = val;
+ break;
+ case 8:
+ *(u64 *)data = val;
+ break;
+ }
+ return 0;
+}
+
+#define MTU_FUDGE_FACTOR 100
+unsigned long qla82xx_decode_crb_addr(unsigned long addr)
+{
+ int i;
+ unsigned long base_addr, offset, pci_base;
+
+ if (!qla82xx_crb_table_initialized)
+ qla82xx_crb_addr_transform_setup();
+
+ pci_base = ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == ADDR_ERROR)
+ return pci_base;
+ return pci_base + offset;
+}
+
+static long rom_max_timeout = 100;
+static long qla82xx_rom_lock_timeout = 100;
+
+int
+qla82xx_rom_lock(struct qla_hw_data *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore2 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= qla82xx_rom_lock_timeout)
+ return -1;
+ timeout++;
+ }
+ qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+ return 0;
+}
+
+int
+qla82xx_wait_rom_busy(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+
+ while (done == 0) {
+ done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 4;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: Timeout reached waiting for rom busy",
+ QLA2XXX_DRIVER_NAME));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
+qla82xx_wait_rom_done(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+
+ while (done == 0) {
+ done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 2;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: Timeout reached waiting for rom done",
+ QLA2XXX_DRIVER_NAME));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
+qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Error waiting for rom done\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+ /* Reset abyte_cnt and dummy_byte_cnt */
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ udelay(10);
+ cond_resched();
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+int
+qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+ int ret, loops = 0;
+
+ while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ schedule();
+ loops++;
+ }
+ if (loops >= 50000) {
+ qla_printk(KERN_INFO, ha,
+ "%s: qla82xx_rom_lock failed\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+ ret = qla82xx_do_rom_fast_read(ha, addr, valp);
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+int
+qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ uint32_t done = 1 ;
+ uint32_t val;
+ int ret = 0;
+
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ while ((done != 0) && (ret == 0)) {
+ ret = qla82xx_read_status_reg(ha, &val);
+ done = val & 1;
+ timeout++;
+ udelay(10);
+ cond_resched();
+ if (timeout >= 50000) {
+ qla_printk(KERN_WARNING, ha,
+ "Timeout reached waiting for write finish");
+ return -1;
+ }
+ }
+ return ret;
+}
+
+int
+qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
+{
+ uint32_t val;
+ qla82xx_wait_rom_busy(ha);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha))
+ return -1;
+ if (qla82xx_read_status_reg(ha, &val) != 0)
+ return -1;
+ if ((val & 2) != 2)
+ return -1;
+ return 0;
+}
+
+int
+qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
+{
+ if (qla82xx_flash_set_write_enable(ha))
+ return -1;
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ return qla82xx_flash_wait_write_finish(ha);
+}
+
+int
+qla82xx_write_disable_flash(struct qla_hw_data *ha)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ return 0;
+}
+
+int
+ql82xx_rom_lock_d(struct qla_hw_data *ha)
+{
+ int loops = 0;
+ while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ cond_resched();
+ loops++;
+ }
+ if (loops >= 50000) {
+ qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
+ return -1;
+ }
+ return 0;;
+}
+
+int
+qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
+ uint32_t data)
+{
+ int ret = 0;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ if (qla82xx_flash_set_write_enable(ha))
+ goto done_write;
+
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ ret = -1;
+ goto done_write;
+ }
+
+ ret = qla82xx_flash_wait_write_finish(ha);
+
+done_write:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+/* This routine does CRB initialize sequence
+ * to put the ISP into operational state
+ */
+int qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
+{
+ int addr, val;
+ int i ;
+ struct crb_addr_pair *buf;
+ unsigned long off;
+ unsigned offset, n;
+ struct qla_hw_data *ha = vha->hw;
+
+ struct crb_addr_pair {
+ long addr;
+ long data;
+ };
+
+ /* Halt all the indiviual PEGs and other blocks of the ISP */
+ qla82xx_rom_lock(ha);
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ /* don't reset CAM block on reset */
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ else
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+
+ /* Read the signature value from the flash.
+ * Offset 0: Contain signature (0xcafecafe)
+ * Offset 4: Offset and number of addr/value pairs
+ * that present in CRB initialize sequence
+ */
+ if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+ qla82xx_rom_fast_read(ha, 4, &n) != 0) {
+ qla_printk(KERN_WARNING, ha,
+ "[ERROR] Reading crb_init area: n: %08x\n", n);
+ return -1;
+ }
+
+ /* Offset in flash = lower 16 bits
+ * Number of enteries = upper 16 bits
+ */
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ /* number of addr/value pair should not exceed 1024 enteries */
+ if (n >= 1024) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
+ QLA2XXX_DRIVER_NAME, __func__, n);
+ return -1;
+ }
+
+ qla_printk(KERN_INFO, ha,
+ "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
+
+ buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: [ERROR] Unable to malloc memory.\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+ qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -1;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+ /* Translate internal CRB initialization
+ * address to PCI bus address
+ */
+ off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
+ QLA82XX_PCI_CRBSPACE;
+ /* Not all CRB addr/value pair to be written,
+ * some of them are skipped
+ */
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLA82XX_CAM_RAM(0x1fc))
+ continue;
+
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
+ continue;
+
+ /* skip core clock, so that firmware can increase the clock */
+ if (off == (ROMUSB_GLB + 0xc8))
+ continue;
+
+ /* skip the function enable register */
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
+ continue;
+
+ if (off == ADDR_ERROR) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: [ERROR] Unknown addr: 0x%08lx\n",
+ QLA2XXX_DRIVER_NAME, buf[i].addr);
+ continue;
+ }
+
+ if (off == (QLA82XX_CRB_PEG_NET_1 + 0x18)) {
+ if (!QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision))
+ buf[i].data = 0x1020;
+ }
+
+ qla82xx_wr_32(ha, off, buf[i].data);
+
+ /* ISP requires much bigger delay to settle down,
+ * else crb_window returns 0xffffffff
+ */
+ if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
+ msleep(1000);
+
+ /* ISP requires millisec delay between
+ * successive CRB register updation
+ */
+ msleep(1);
+ }
+
+ kfree(buf);
+
+ /* Resetting the data and instruction cache */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+
+ /* Clear all protocol processing engines */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+ return 0;
+}
+
+int qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
+ val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
+ if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
+ qla_printk(KERN_INFO, ha,
+ "Memory DIMM SPD not programmed. "
+ " Assumed valid.\n");
+ return 1;
+ } else if (val) {
+ qla_printk(KERN_INFO, ha,
+ "Memory DIMM type incorrect.Info:%08X.\n", val);
+ return 2;
+ }
+ return 0;
+}
+
+int
+qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
+{
+ int i;
+ long size = 0;
+ long flashaddr = BOOTLD_START, memaddr = BOOTLD_START;
+ u64 data;
+ u32 high, low;
+ size = (IMAGE_START - BOOTLD_START) / 8;
+
+ for (i = 0; i < size; i++) {
+ if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+ (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
+ return -1;
+ }
+ data = ((u64)high << 32) | low ;
+ qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
+ flashaddr += 8;
+ memaddr += 8;
+
+ if (i % 0x1000 == 0)
+ msleep(1);
+ }
+ udelay(100);
+ read_lock(&ha->hw_lock);
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else {
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
+ }
+ read_unlock(&ha->hw_lock);
+ return 0;
+}
+
+int
+qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ int shift_amount;
+ uint32_t temp;
+ uint64_t off8, val, mem_crb, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_read_direct(ha,
+ off, data, size);
+ }
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ off8 = off & 0xfffffff0;
+ off0[0] = off & 0xf;
+ sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
+ shift_amount = 4;
+ } else {
+ off8 = off & 0xfffffff8;
+ off0[0] = off & 0x7;
+ sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
+ shift_amount = 4;
+ }
+ loop = ((off0[0] + size - 1) >> shift_amount) + 1;
+ off0[1] = 0;
+ sz[1] = size - sz[0];
+
+ /*
+ * don't lock here - write_wx gets the lock if each time
+ * write_lock_irqsave(&adapter->adapter_lock, flags);
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+ */
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ temp = MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&ha->pdev->dev,
+ "failed to read through agent\n");
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ temp = qla82xx_rd_32(ha,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
+ word[i] |= ((uint64_t)temp << (32 * (k & 1)));
+ }
+ }
+
+ /*
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
+ * write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ */
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if ((off0[0] & 7) == 0) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(uint8_t *)data = val;
+ break;
+ case 2:
+ *(uint16_t *)data = val;
+ break;
+ case 4:
+ *(uint32_t *)data = val;
+ break;
+ case 8:
+ *(uint64_t *)data = val;
+ break;
+ }
+ return 0;
+}
+
+int
+qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ int scale, shift_amount, p3p, startword;
+ uint32_t temp;
+ uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_write_direct(ha,
+ off, data, size);
+ }
+
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ off8 = off & 0xfffffff0;
+ loop = (((off & 0xf) + size - 1) >> 4) + 1;
+ shift_amount = 4;
+ scale = 2;
+ p3p = 1;
+ startword = (off & 0xf)/8;
+ } else {
+ off8 = off & 0xfffffff8;
+ loop = ((off0 + size - 1) >> 3) + 1;
+ shift_amount = 3;
+ scale = 1;
+ p3p = 0;
+ startword = 0;
+ }
+
+ if (p3p || (size != 8) || (off0 != 0)) {
+ for (i = 0; i < loop; i++) {
+ if (qla82xx_pci_mem_read_2M(ha, off8 +
+ (i << shift_amount), &word[i * scale], 8))
+ return -1;
+ }
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((uint8_t *)data);
+ break;
+ case 2:
+ tmpw = *((uint16_t *)data);
+ break;
+ case 4:
+ tmpw = *((uint32_t *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((uint64_t *)data);
+ break;
+ }
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ if (sz[0] == 8) {
+ word[startword] = tmpw;
+ } else {
+ word[startword] &=
+ ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[startword] |= tmpw << (off0 * 8);
+ }
+ if (sz[1] != 0) {
+ word[startword+1] &= ~(~0ULL << (sz[1] * 8));
+ word[startword+1] |= tmpw >> (sz[0] * 8);
+ }
+ } else {
+ word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[startword] |= tmpw << (off0 * 8);
+
+ if (loop == 2) {
+ word[1] &= ~(~0ULL << (sz[1] * 8));
+ word[1] |= tmpw >> (sz[0] * 8);
+ }
+ }
+
+ /*
+ * don't lock here - write_wx gets the lock if each time
+ * write_lock_irqsave(&adapter->adapter_lock, flags);
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+ */
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ temp = word[i * scale] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ temp = (word[i * scale] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ temp = word[i*scale + 1] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb +
+ MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
+ temp = (word[i*scale + 1] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb +
+ MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
+ }
+
+ temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&ha->pdev->dev,
+ "failed to write through agent\n");
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* PCI related functions */
+char *
+qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+ int pcie_reg;
+ struct qla_hw_data *ha = vha->hw;
+ char lwstr[6];
+ uint16_t lnk;
+
+ pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
+ ha->link_width = (lnk >> 4) & 0x3f;
+
+ strcpy(str, "PCIe (");
+ strcat(str, "2.5Gb/s ");
+ snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
+ strcat(str, lwstr);
+ return str;
+}
+
+int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
+{
+ unsigned long val = 0;
+ u32 control;
+
+ switch (region) {
+ case 0:
+ val = 0;
+ break;
+ case 1:
+ pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
+ val = control + QLA82XX_MSIX_TBL_SPACE;
+ break;
+ }
+ return val;
+}
+
+int qla82xx_pci_region_len(struct pci_dev *pdev, int region)
+{
+ unsigned long val = 0;
+ u32 control;
+ switch (region) {
+ case 0:
+ pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
+ val = control;
+ break;
+ case 1:
+ val = pci_resource_len(pdev, 0) -
+ qla82xx_pci_region_offset(pdev, 1);
+ break;
+ }
+ return val;
+}
+
+int
+qla82xx_iospace_config(struct qla_hw_data *ha)
+{
+ uint32_t len = 0;
+
+ if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
+ qla_printk(KERN_WARNING, ha,
+ "Failed to reserve selected regions (%s)\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ qla_printk(KERN_ERR, ha,
+ "region #0 not an MMIO resource (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ len = pci_resource_len(ha->pdev, 0);
+ ha->nx_pcibase =
+ (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
+ if (!ha->nx_pcibase) {
+ qla_printk(KERN_ERR, ha,
+ "cannot remap pcibase MMIO (%s), aborting\n",
+ pci_name(ha->pdev));
+ pci_release_regions(ha->pdev);
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer */
+ ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+ 0xbc000 + (ha->pdev->devfn << 11));
+
+ if (!ql2xdbwr) {
+ ha->nxdb_wr_ptr =
+ (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
+ (ha->pdev->devfn << 12)), 4);
+ if (!ha->nxdb_wr_ptr) {
+ qla_printk(KERN_ERR, ha,
+ "cannot remap MMIO (%s), aborting\n",
+ pci_name(ha->pdev));
+ pci_release_regions(ha->pdev);
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer,
+ * door bell read and write pointer
+ */
+ ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
+ (ha->pdev->devfn * 8);
+ } else {
+ ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
+ QLA82XX_CAMRAM_DB1 :
+ QLA82XX_CAMRAM_DB2);
+ }
+
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->msix_count = ha->max_rsp_queues + 1;
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+/* GS related functions */
+
+/* Initialization related functions */
+
+/**
+ * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+*/
+int
+qla82xx_pci_config(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int ret;
+
+ pci_set_master(ha->pdev);
+ ret = pci_set_mwi(ha->pdev);
+ ha->chip_revision = ha->pdev->revision;
+ return 0;
+}
+
+/**
+ * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla82xx_reset_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ha->isp_ops->disable_intrs(ha);
+}
+
+void qla82xx_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ struct init_cb_81xx *icb;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ /* Setup ring parameters in initialization control block. */
+ icb = (struct init_cb_81xx *)ha->init_cb;
+ icb->request_q_outpointer = __constant_cpu_to_le16(0);
+ icb->response_q_inpointer = __constant_cpu_to_le16(0);
+ icb->request_q_length = cpu_to_le16(req->length);
+ icb->response_q_length = cpu_to_le16(rsp->length);
+ icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+ icb->version = 1;
+ icb->frame_payload_size = 2112;
+ icb->execution_throttle = 8;
+ icb->exchange_count = 128;
+ icb->login_retry_count = 8;
+
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
+}
+
+int qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ __le64 data;
+
+ size = (IMAGE_START - BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)&ha->hablob->fw->data[BOOTLD_START];
+ flashaddr = BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+ qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8);
+ flashaddr += 8;
+ }
+
+ size = *(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET];
+ size = (__force u32)cpu_to_le32(size) / 8;
+ ptr64 = (u64 *)&ha->hablob->fw->data[IMAGE_START];
+ flashaddr = FLASH_ADDR_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
+ return -EIO;
+ flashaddr += 8;
+ }
+
+ /* Write a magic value to CAMRAM register
+ * at a specified offset to indicate
+ * that all data is written and
+ * ready for firmware to initialize.
+ */
+ qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), 0x12345678);
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
+ return 0;
+}
+
+int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ do {
+ read_lock(&ha->hw_lock);
+ val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return QLA_SUCCESS;
+ case PHAN_INITIALIZE_FAILED:
+ break;
+ default:
+ break;
+ }
+ qla_printk(KERN_WARNING, ha,
+ "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
+ val, retries);
+
+ msleep(500);
+
+ } while (--retries);
+
+ qla_printk(KERN_INFO, ha,
+ "Cmd Peg initialization failed: 0x%x.\n", val);
+
+ qla82xx_check_for_bad_spd(ha);
+ val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+ read_unlock(&ha->hw_lock);
+ return QLA_FUNCTION_FAILED;
+}
+
+int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ do {
+ read_lock(&ha->hw_lock);
+ val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return QLA_SUCCESS;
+ case PHAN_INITIALIZE_FAILED:
+ break;
+ default:
+ break;
+ }
+
+ qla_printk(KERN_WARNING, ha,
+ "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
+ val, retries);
+
+ msleep(500);
+
+ } while (--retries);
+
+ qla_printk(KERN_INFO, ha,
+ "Rcv Peg initialization failed: 0x%x.\n", val);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
+ read_unlock(&ha->hw_lock);
+ return QLA_FUNCTION_FAILED;
+}
+
+/* ISR related functions */
+uint32_t qla82xx_isr_int_target_mask_enable[8] = {
+ ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
+ ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
+ ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
+ ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
+};
+
+uint32_t qla82xx_isr_int_target_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
+};
+
+static struct qla82xx_legacy_intr_set legacy_intr[] = \
+ QLA82XX_LEGACY_INTR_CONFIG;
+
+/*
+ * qla82xx_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+void
+qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+ uint16_t cnt;
+ uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+
+ /* Load return mailbox registers. */
+ ha->flags.mbox_int = 1;
+ ha->mailbox_out[0] = mb0;
+
+ for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ wptr++;
+ }
+
+ if (ha->mcp) {
+ DEBUG3_11(printk(KERN_INFO "%s(%ld): "
+ "Got mailbox completion. cmd=%x.\n",
+ __func__, vha->host_no, ha->mcp->mb[0]));
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "%s(%ld): MBX pointer ERROR!\n",
+ __func__, vha->host_no);
+ }
+}
+
+/*
+ * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ * @regs:
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla82xx_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0, status1 = 0;
+ unsigned long flags;
+ unsigned long iter;
+ uint32_t stat;
+ uint16_t mb[4];
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+
+ if (!ha->flags.msi_enabled) {
+ status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
+ if (!(status & ha->nx_legacy_intr.int_vec_bit))
+ return IRQ_NONE;
+
+ status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
+ if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
+ return IRQ_NONE;
+ }
+
+ /* clear the interrupt */
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+ /* read twice to ensure write is flushed */
+ qla82xx_rd_32(ha, ISR_INT_VECTOR);
+ qla82xx_rd_32(ha, ISR_INT_VECTOR);
+
+ reg = &ha->iobase->isp82;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 1; iter--; ) {
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_PAUSED) {
+ if (pci_channel_offline(ha->pdev))
+ break;
+
+ qla_printk(KERN_INFO, ha, "RISC paused\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+ } else if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): "
+ " Unrecognized interrupt type (%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (!ha->flags.msi_enabled)
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
+#ifdef QL_DEBUG_LEVEL_17
+ if (!irq && ha->flags.eeh_busy)
+ qla_printk(KERN_WARNING, ha,
+ "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
+ status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
+#endif
+
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_default(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ unsigned long flags;
+ uint32_t stat;
+ uint16_t mb[4];
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+
+ reg = &ha->iobase->isp82;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ do {
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_PAUSED) {
+ if (pci_channel_offline(ha->pdev))
+ break;
+
+ qla_printk(KERN_INFO, ha, "RISC paused\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+ } else if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): "
+ " Unrecognized interrupt type (%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ } while (0);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+#ifdef QL_DEBUG_LEVEL_17
+ if (!irq && ha->flags.eeh_busy)
+ qla_printk(KERN_WARNING, ha,
+ "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
+ status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
+#endif
+
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_rsp_q(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->isp82;
+ spin_lock_irq(&ha->hardware_lock);
+ vha = pci_get_drvdata(ha->pdev);
+ qla24xx_process_response_queue(vha, rsp);
+ WRT_REG_DWORD(&reg->host_int, 0);
+ spin_unlock_irq(&ha->hardware_lock);
+ return IRQ_HANDLED;
+}
+
+void
+qla82xx_poll(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ uint32_t stat;
+ uint16_t mb[4];
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return;
+ }
+ ha = rsp->hw;
+
+ reg = &ha->iobase->isp82;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
+ "(%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla82xx_enable_intrs(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_mbx_intr_enable(vha);
+ spin_lock_irq(&ha->hardware_lock);
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ spin_unlock_irq(&ha->hardware_lock);
+ ha->interrupts_on = 1;
+}
+
+void
+qla82xx_disable_intrs(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_mbx_intr_disable(vha);
+ spin_lock_irq(&ha->hardware_lock);
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+ spin_unlock_irq(&ha->hardware_lock);
+ ha->interrupts_on = 0;
+}
+
+void qla82xx_init_flags(struct qla_hw_data *ha)
+{
+ struct qla82xx_legacy_intr_set *nx_legacy_intr;
+
+ /* ISP 8021 initializations */
+ rwlock_init(&ha->hw_lock);
+ ha->qdr_sn_window = -1;
+ ha->ddr_mn_window = -1;
+ ha->curr_window = 255;
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ nx_legacy_intr = &legacy_intr[ha->portnum];
+ ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
+ ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
+ ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
+ ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+}
+
+static inline void
+qla82xx_set_drv_active(scsi_qla_host_t *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+ /* If reset value is all FF's, initialize DRV_ACTIVE */
+ if (drv_active == 0xffffffff) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 0);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ }
+ drv_active |= (1 << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+inline void
+qla82xx_clear_drv_active(struct qla_hw_data *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ drv_active &= ~(1 << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+static inline int
+qla82xx_need_reset(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+ int rval;
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ rval = drv_state & (1 << (ha->portnum * 4));
+ return rval;
+}
+
+static inline void
+qla82xx_set_rst_ready(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+
+ /* If reset value is all FF's, initialize DRV_STATE */
+ if (drv_state == 0xffffffff) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ }
+ drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ qla_printk(KERN_INFO, ha,
+ "%s(%ld):drv_state = 0x%x\n",
+ __func__, vha->host_no, drv_state);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_clear_rst_ready(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
+{
+ uint32_t qsnt_state;
+
+ qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+}
+
+int qla82xx_load_fw(scsi_qla_host_t *vha)
+{
+ int rst;
+ struct fw_blob *blob;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Put both the PEG CMD and RCV PEG to default state
+ * of 0 before resetting the hardware
+ */
+ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+ qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+
+ if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "%s: Error during CRB Initialization\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+ udelay(500);
+
+ /* Bring QM and CAMRAM out of reset */
+ rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+ rst &= ~((1 << 28) | (1 << 24));
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+
+ /*
+ * FW Load priority:
+ * 1) Operational firmware residing in flash.
+ * 2) Firmware via request-firmware interface (.bin file).
+ */
+ if (ql2xfwloadbin == 2)
+ goto try_blob_fw;
+
+ qla_printk(KERN_INFO, ha,
+ "Attempting to load firmware from flash\n");
+
+ if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "Firmware loaded successfully from flash\n");
+ return QLA_SUCCESS;
+ }
+try_blob_fw:
+ qla_printk(KERN_INFO, ha,
+ "Attempting to load firmware from blob\n");
+
+ /* Load firmware blob. */
+ blob = ha->hablob = qla2x00_request_firmware(vha);
+ if (!blob) {
+ qla_printk(KERN_ERR, ha,
+ "Firmware image not present.\n");
+ goto fw_load_failed;
+ }
+
+ if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "%s: Firmware loaded successfully "
+ " from binary blob\n", __func__);
+ return QLA_SUCCESS;
+ } else {
+ qla_printk(KERN_ERR, ha,
+ "Firmware load failed from binary blob\n");
+ blob->fw = NULL;
+ blob = NULL;
+ goto fw_load_failed;
+ }
+ return QLA_SUCCESS;
+
+fw_load_failed:
+ return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla82xx_start_firmware(scsi_qla_host_t *vha)
+{
+ int pcie_cap;
+ uint16_t lnk;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* scrub dma mask expansion register */
+ qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
+
+ /* Overwrite stale initialization register values */
+ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+
+ if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Error trying to start fw!\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Handshake with the card before we register the devices. */
+ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Error during card handshake!\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Negotiated Link width */
+ pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
+ ha->link_width = (lnk >> 4) & 0x3f;
+
+ /* Synchronize with Receive peg */
+ return qla82xx_check_rcvpeg_state(ha);
+}
+
+static inline int
+qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint32_t *cur_dsd = NULL;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ uint32_t *dsd_seg;
+ void *next_dsd;
+ uint8_t avail_dsds;
+ uint8_t first_iocb = 1;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct ct6_dsd *ctx;
+
+ cmd = sp->cmd;
+
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_6);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return 0;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ }
+
+ cur_seg = scsi_sglist(cmd);
+ ctx = sp->ctx;
+
+ while (tot_dsds) {
+ avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : tot_dsds;
+ tot_dsds -= avail_dsds;
+ dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+ dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+ struct dsd_dma, list);
+ next_dsd = dsd_ptr->dsd_addr;
+ list_del(&dsd_ptr->list);
+ ha->gbl_dsd_avail--;
+ list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+ ctx->dsd_use_cnt++;
+ ha->gbl_dsd_inuse++;
+
+ if (first_iocb) {
+ first_iocb = 0;
+ dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = dsd_list_len;
+ } else {
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ }
+ cur_dsd = (uint32_t *)next_dsd;
+ while (avail_dsds) {
+ dma_addr_t sle_dma;
+
+ sle_dma = sg_dma_address(cur_seg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+ cur_seg++;
+ avail_dsds--;
+ }
+ }
+
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ return 0;
+}
+
+/*
+ * qla82xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of dsd list needed to store @dsds.
+ */
+inline uint16_t
+qla82xx_calc_dsd_lists(uint16_t dsds)
+{
+ uint16_t dsd_lists = 0;
+
+ dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+ if (dsds % QLA_DSDS_PER_IOCB)
+ dsd_lists++;
+ return dsd_lists;
+}
+
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occured, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+ uint32_t *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+
+ /* Setup device pointers. */
+ ret = 0;
+ reg = &ha->iobase->isp82;
+ cmd = sp->cmd;
+ req = vha->req;
+ rsp = ha->rsp_q_map[0];
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ dbval = 0x04 | (ha->portnum << 5);
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == MAX_OUTSTANDING_COMMANDS)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+
+ if (tot_dsds > ql2xshiftctondsd) {
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+
+ more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
+ goto queuing_error;
+
+ if (more_dsd_lists <= ha->gbl_dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= ha->gbl_dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ goto queuing_error;
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!sp->ctx) {
+ DEBUG(printk(KERN_INFO
+ "%s(%ld): failed to allocate"
+ " ctx.\n", __func__, vha->host_no));
+ goto queuing_error;
+ }
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ DEBUG2_3(printk("%s(%ld): failed to allocate"
+ " fcp_cmnd.\n", __func__, vha->host_no));
+ goto queuing_error_fcp_cmnd;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ /* Build IOCB segments */
+ if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+ goto queuing_error_fcp_cmnd;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] =
+ cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+ cmd_pkt->fcp_cmnd_dseg_address[1] =
+ cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ } else {
+ struct cmd_type_7 *cmd_pkt;
+ req_cnt = qla24xx_calc_iocbs(tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+ sizeof(cmd_pkt->lun));
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen.
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+ }
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ }
+ }
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->ctx) {
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+uint32_t *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+ uint32_t length)
+{
+ uint32_t i;
+ uint32_t val;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Dword reads to flash. */
+ for (i = 0; i < length/4; i++, faddr += 4) {
+ if (qla82xx_rom_fast_read(ha, faddr, &val)) {
+ qla_printk(KERN_WARNING, ha,
+ "Do ROM fast read failed\n");
+ goto done_read;
+ }
+ dwptr[i] = __constant_cpu_to_le32(val);
+ }
+done_read:
+ return dwptr;
+}
+
+int
+qla82xx_unprotect_flash(struct qla_hw_data *ha)
+{
+ int ret;
+ uint32_t val;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ ret = qla82xx_read_status_reg(ha, &val);
+ if (ret < 0)
+ goto done_unprotect;
+
+ val &= ~(0x7 << 2);
+ ret = qla82xx_write_status_reg(ha, val);
+ if (ret < 0) {
+ val |= (0x7 << 2);
+ qla82xx_write_status_reg(ha, val);
+ }
+
+ if (qla82xx_write_disable_flash(ha) != 0)
+ qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+
+done_unprotect:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_protect_flash(struct qla_hw_data *ha)
+{
+ int ret;
+ uint32_t val;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ ret = qla82xx_read_status_reg(ha, &val);
+ if (ret < 0)
+ goto done_protect;
+
+ val |= (0x7 << 2);
+ /* LOCK all sectors */
+ ret = qla82xx_write_status_reg(ha, val);
+ if (ret < 0)
+ qla_printk(KERN_WARNING, ha, "Write status register failed\n");
+
+ if (qla82xx_write_disable_flash(ha) != 0)
+ qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+done_protect:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
+{
+ int ret = 0;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ qla82xx_flash_set_write_enable(ha);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
+
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ ret = -1;
+ goto done;
+ }
+ ret = qla82xx_flash_wait_write_finish(ha);
+done:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ scsi_block_requests(vha->host);
+ qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+ scsi_unblock_requests(vha->host);
+ return buf;
+}
+
+static int
+qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret;
+ uint32_t liter;
+ uint32_t sec_mask, rest_addr;
+ dma_addr_t optrom_dma;
+ void *optrom = NULL;
+ int page_mode = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret = -1;
+
+ /* Prepare burst-capable write on supported ISPs. */
+ if (page_mode && !(faddr & 0xfff) &&
+ dwords > OPTROM_BURST_DWORDS) {
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+ if (!optrom) {
+ qla_printk(KERN_DEBUG, ha,
+ "Unable to allocate memory for optrom "
+ "burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
+ }
+ }
+
+ rest_addr = ha->fdt_block_size - 1;
+ sec_mask = ~rest_addr;
+
+ ret = qla82xx_unprotect_flash(ha);
+ if (ret) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to unprotect flash for update.\n");
+ goto write_done;
+ }
+
+ for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+ /* Are we at the beginning of a sector? */
+ if ((faddr & rest_addr) == 0) {
+
+ ret = qla82xx_erase_sector(ha, faddr);
+ if (ret) {
+ DEBUG9(qla_printk(KERN_ERR, ha,
+ "Unable to erase sector: "
+ "address=%x.\n", faddr));
+ break;
+ }
+ }
+
+ /* Go with burst-write. */
+ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
+ /* Copy data to DMA'ble buffer. */
+ memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+
+ ret = qla2x00_load_ram(vha, optrom_dma,
+ (ha->flash_data_off | faddr),
+ OPTROM_BURST_DWORDS);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to burst-write optrom segment "
+ "(%x/%x/%llx).\n", ret,
+ (ha->flash_data_off | faddr),
+ (unsigned long long)optrom_dma);
+ qla_printk(KERN_WARNING, ha,
+ "Reverting to slow-write.\n");
+
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ optrom = NULL;
+ } else {
+ liter += OPTROM_BURST_DWORDS - 1;
+ faddr += OPTROM_BURST_DWORDS - 1;
+ dwptr += OPTROM_BURST_DWORDS - 1;
+ continue;
+ }
+ }
+
+ ret = qla82xx_write_flash_dword(ha, faddr,
+ cpu_to_le32(*dwptr));
+ if (ret) {
+ DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
+ "flash address=%x data=%x.\n", __func__,
+ ha->host_no, faddr, *dwptr));
+ break;
+ }
+ }
+
+ ret = qla82xx_protect_flash(ha);
+ if (ret)
+ qla_printk(KERN_WARNING, ha,
+ "Unable to protect flash after update.\n");
+write_done:
+ if (optrom)
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ return ret;
+}
+
+int
+qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
+ length >> 2);
+ scsi_unblock_requests(vha->host);
+
+ /* Convert return ISP82xx to generic */
+ if (rval)
+ rval = QLA_FUNCTION_FAILED;
+ else
+ rval = QLA_SUCCESS;
+ return rval;
+}
+
+void
+qla82xx_start_iocbs(srb_t *sp)
+{
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ reg = &ha->iobase->isp82;
+ dbval = 0x04 | (ha->portnum << 5);
+
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+ wmb();
+ }
+}
+
+/*
+ * qla82xx_device_bootstrap
+ * Initialize device, set DEV_READY, start fw
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+static int
+qla82xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+ int rval, i, timeout;
+ uint32_t old_count, count;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla82xx_need_reset(ha))
+ goto dev_initialize;
+
+ old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ goto dev_ready;
+ }
+
+dev_initialize:
+ /* set to DEV_INITIALIZING */
+ qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
+
+ /* Driver that sets device state to initializating sets IDC version */
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
+
+ qla82xx_idc_unlock(ha);
+ rval = qla82xx_start_firmware(vha);
+ qla82xx_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ qla82xx_clear_drv_active(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
+ return rval;
+ }
+
+dev_ready:
+ qla_printk(KERN_INFO, ha, "HW State: READY\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
+
+ return QLA_SUCCESS;
+}
+
+static void
+qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Disable the board */
+ qla_printk(KERN_INFO, ha, "Disabling the board\n");
+
+ /* Set DEV_FAILED flag to disable timer */
+ vha->device_flags |= DFLG_DEV_FAILED;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ vha->flags.online = 0;
+ vha->flags.init_done = 0;
+}
+
+/*
+ * qla82xx_need_reset_handler
+ * Code to start reset sequence
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+static void
+qla82xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state, drv_state, drv_active;
+ unsigned long reset_timeout;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (vha->flags.online) {
+ qla82xx_idc_unlock(ha);
+ qla2x00_abort_isp_cleanup(vha);
+ ha->isp_ops->get_flash_version(vha, req->ring);
+ ha->isp_ops->nvram_config(vha);
+ qla82xx_idc_lock(ha);
+ }
+
+ qla82xx_set_rst_ready(ha);
+
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ qla_printk(KERN_INFO, ha,
+ "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
+ break;
+ }
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ }
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ /* Force to DEV_COLD unless someone else is starting a reset */
+ if (dev_state != QLA82XX_DEV_INITIALIZING) {
+ qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ }
+}
+
+static void
+qla82xx_check_fw_alive(scsi_qla_host_t *vha)
+{
+ uint32_t fw_heartbeat_counter, halt_status;
+ struct qla_hw_data *ha = vha->hw;
+
+ fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+ vha->seconds_since_last_heartbeat++;
+ /* FW not alive after 2 seconds */
+ if (vha->seconds_since_last_heartbeat == 2) {
+ vha->seconds_since_last_heartbeat = 0;
+ halt_status = qla82xx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): %s - detect abort needed\n",
+ vha->host_no, __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ vha->fw_heartbeat_counter = fw_heartbeat_counter;
+}
+
+/*
+ * qla82xx_device_state_handler
+ * Main state handler
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+int
+qla82xx_device_state_handler(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state;
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla82xx_idc_lock(ha);
+ if (!vha->flags.init_done)
+ qla82xx_set_drv_active(vha);
+
+ /* Set cold state*/
+ if (!PCI_FUNC(ha->pdev->devfn & 1)) {
+
+ /* Check if other functions alive, else set dev state
+ * to cold
+ */
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ drv_active &= ~(1 << (ha->portnum * 4));
+ drv_active &= ~(1 << ((ha->portnum + 1) * 4));
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (!drv_active) {
+
+ switch (dev_state) {
+ case QLA82XX_DEV_COLD:
+ case QLA82XX_DEV_READY:
+ case QLA82XX_DEV_INITIALIZING:
+ case QLA82XX_DEV_NEED_RESET:
+ case QLA82XX_DEV_NEED_QUIESCENT:
+ case QLA82XX_DEV_QUIESCENT:
+ case QLA82XX_DEV_FAILED:
+ break;
+ default:
+ qla_printk(KERN_INFO, ha,
+ "No other function exist,"
+ " resetting dev state to COLD\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_COLD);
+ break;
+ }
+ }
+ }
+
+ /* wait for 30 seconds for device to go ready */
+ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+ while (1) {
+
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: device init failed!\n",
+ QLA2XXX_DRIVER_NAME));
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ switch (dev_state) {
+ case QLA82XX_DEV_READY:
+ goto exit;
+ case QLA82XX_DEV_COLD:
+ rval = qla82xx_device_bootstrap(vha);
+ goto exit;
+ case QLA82XX_DEV_INITIALIZING:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ break;
+ case QLA82XX_DEV_NEED_RESET:
+ if (!ql2xdontresethba)
+ qla82xx_need_reset_handler(vha);
+ break;
+ case QLA82XX_DEV_NEED_QUIESCENT:
+ qla82xx_set_qsnt_ready(ha);
+ case QLA82XX_DEV_QUIESCENT:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ break;
+ case QLA82XX_DEV_FAILED:
+ qla82xx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ default:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ }
+ }
+exit:
+ qla82xx_idc_unlock(ha);
+ return rval;
+}
+
+void qla82xx_watchdog(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+ /* don't poll if reset is going on */
+ if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
+ if (dev_state == QLA82XX_DEV_NEED_RESET) {
+ qla_printk(KERN_WARNING, ha,
+ "%s(): Adapter reset needed!\n", __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ qla82xx_check_fw_alive(vha);
+ }
+ }
+}
+
+int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+ int rval;
+ rval = qla82xx_device_state_handler(vha);
+ return rval;
+}
+
+/*
+ * qla82xx_abort_isp
+ * Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qla82xx_abort_isp(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t dev_state;
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ qla_printk(KERN_WARNING, ha,
+ "%s(%ld): Device in failed state, "
+ "Exiting.\n", __func__, vha->host_no);
+ return QLA_SUCCESS;
+ }
+
+ qla82xx_idc_lock(ha);
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state != QLA82XX_DEV_INITIALIZING) {
+ qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_NEED_RESET);
+ } else
+ qla_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
+ qla82xx_idc_unlock(ha);
+
+ rval = qla82xx_device_state_handler(vha);
+
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_rst_ready(ha);
+ qla82xx_idc_unlock(ha);
+
+ if (rval == QLA_SUCCESS)
+ qla82xx_restart_isp(vha);
+ return rval;
+}
+
+/*
+ * qla82xx_fcoe_ctx_reset
+ * Perform a quick reset and aborts all outstanding commands.
+ * This will only perform an FCoE context reset and avoids a full blown
+ * chip reset.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * is_reset_path = flag for identifying the reset path.
+ *
+ * Returns:
+ * 0 = success
+ */
+int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (vha->flags.online) {
+ /* Abort all outstanding commands, so as to be requeued later */
+ qla2x00_abort_isp_cleanup(vha);
+ }
+
+ /* Stop currently executing firmware.
+ * This will destroy existing FCoE context at the F/W end.
+ */
+ qla2x00_try_to_stop_firmware(vha);
+
+ /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
+ rval = qla82xx_restart_isp(vha);
+
+ return rval;
+}
+
+/*
+ * qla2x00_wait_for_fcoe_ctx_reset
+ * Wait till the FCoE context is reset.
+ *
+ * Note:
+ * Does context switching here.
+ * Release SPIN_LOCK (if any) before calling this routine.
+ *
+ * Return:
+ * Success (fcoe_ctx reset is done) : 0
+ * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
+ */
+int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+ int status = QLA_FUNCTION_FAILED;
+ unsigned long wait_reset;
+
+ wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ && time_before(jiffies, wait_reset)) {
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ status = QLA_SUCCESS;
+ break;
+ }
+ }
+ DEBUG2(printk(KERN_INFO
+ "%s status=%d\n", __func__, status));
+
+ return status;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
new file mode 100644
index 000000000000..7804bbbb3db3
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -0,0 +1,888 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NX_H
+#define __QLA_NX_H
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+*/
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+/*CRB_RELATED*/
+#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
+
+#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
+#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
+#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
+#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+
+#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
+#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
+#define QLA82XX_HW_H2_CH_HUB_ADR 0x03
+#define QLA82XX_HW_H3_CH_HUB_ADR 0x01
+#define QLA82XX_HW_H4_CH_HUB_ADR 0x06
+#define QLA82XX_HW_H5_CH_HUB_ADR 0x07
+#define QLA82XX_HW_H6_CH_HUB_ADR 0x08
+
+/* Hub 0 */
+#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15
+#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25
+
+/* Hub 1 */
+#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73
+#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00
+#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b
+#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01
+#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02
+#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03
+#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04
+#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58
+#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59
+#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a
+#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a
+#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c
+#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f
+#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12
+#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18
+
+/* Hub 2 */
+#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31
+#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19
+#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29
+
+#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10
+#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20
+#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22
+#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21
+#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66
+#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60
+#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61
+#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62
+#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63
+#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09
+#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d
+#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e
+#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11
+
+/* Hub 3 */
+#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A
+#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50
+#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51
+#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08
+
+/* Hub 4 */
+#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a
+#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b
+
+/* Hub 5 */
+#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46
+
+/* Hub 6 */
+#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16
+#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17
+#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05
+#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06
+#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+/* */
+#define QLA82XX_HW_PX_MAP_CRB_PH 0
+#define QLA82XX_HW_PX_MAP_CRB_PS 1
+#define QLA82XX_HW_PX_MAP_CRB_MN 2
+#define QLA82XX_HW_PX_MAP_CRB_MS 3
+#define QLA82XX_HW_PX_MAP_CRB_SRE 5
+#define QLA82XX_HW_PX_MAP_CRB_NIU 6
+#define QLA82XX_HW_PX_MAP_CRB_QMN 7
+#define QLA82XX_HW_PX_MAP_CRB_SQN0 8
+#define QLA82XX_HW_PX_MAP_CRB_SQN1 9
+#define QLA82XX_HW_PX_MAP_CRB_SQN2 10
+#define QLA82XX_HW_PX_MAP_CRB_SQN3 11
+#define QLA82XX_HW_PX_MAP_CRB_QMS 12
+#define QLA82XX_HW_PX_MAP_CRB_SQS0 13
+#define QLA82XX_HW_PX_MAP_CRB_SQS1 14
+#define QLA82XX_HW_PX_MAP_CRB_SQS2 15
+#define QLA82XX_HW_PX_MAP_CRB_SQS3 16
+#define QLA82XX_HW_PX_MAP_CRB_PGN0 17
+#define QLA82XX_HW_PX_MAP_CRB_PGN1 18
+#define QLA82XX_HW_PX_MAP_CRB_PGN2 19
+#define QLA82XX_HW_PX_MAP_CRB_PGN3 20
+#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2
+#define QLA82XX_HW_PX_MAP_CRB_PGND 21
+#define QLA82XX_HW_PX_MAP_CRB_PGNI 22
+#define QLA82XX_HW_PX_MAP_CRB_PGS0 23
+#define QLA82XX_HW_PX_MAP_CRB_PGS1 24
+#define QLA82XX_HW_PX_MAP_CRB_PGS2 25
+#define QLA82XX_HW_PX_MAP_CRB_PGS3 26
+#define QLA82XX_HW_PX_MAP_CRB_PGSD 27
+#define QLA82XX_HW_PX_MAP_CRB_PGSI 28
+#define QLA82XX_HW_PX_MAP_CRB_SN 29
+#define QLA82XX_HW_PX_MAP_CRB_EG 31
+#define QLA82XX_HW_PX_MAP_CRB_PH2 32
+#define QLA82XX_HW_PX_MAP_CRB_PS2 33
+#define QLA82XX_HW_PX_MAP_CRB_CAM 34
+#define QLA82XX_HW_PX_MAP_CRB_CAS0 35
+#define QLA82XX_HW_PX_MAP_CRB_CAS1 36
+#define QLA82XX_HW_PX_MAP_CRB_CAS2 37
+#define QLA82XX_HW_PX_MAP_CRB_C2C0 38
+#define QLA82XX_HW_PX_MAP_CRB_C2C1 39
+#define QLA82XX_HW_PX_MAP_CRB_TIMR 40
+#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42
+#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43
+#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44
+#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45
+#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46
+#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47
+#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48
+#define QLA82XX_HW_PX_MAP_CRB_XDMA 49
+#define QLA82XX_HW_PX_MAP_CRB_I2Q 50
+#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51
+#define QLA82XX_HW_PX_MAP_CRB_CAS3 52
+#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53
+#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54
+#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55
+#define QLA82XX_HW_PX_MAP_CRB_OCM0 56
+#define QLA82XX_HW_PX_MAP_CRB_OCM1 57
+#define QLA82XX_HW_PX_MAP_CRB_SMB 58
+#define QLA82XX_HW_PX_MAP_CRB_I2C0 59
+#define QLA82XX_HW_PX_MAP_CRB_I2C1 60
+#define QLA82XX_HW_PX_MAP_CRB_LPC 61
+#define QLA82XX_HW_PX_MAP_CRB_PGNC 62
+#define QLA82XX_HW_PX_MAP_CRB_PGR0 63
+#define QLA82XX_HW_PX_MAP_CRB_PGR1 4
+#define QLA82XX_HW_PX_MAP_CRB_PGR2 30
+#define QLA82XX_HW_PX_MAP_CRB_PGR3 41
+
+/* This field defines CRB adr [31:20] of the agents */
+/* */
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PH_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QMS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX7_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX9_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SMB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NIU_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SRE_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_EG_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX5_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX6_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX8_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGND_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSD_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NCM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_TMR_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_XDMA_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2Q_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_LPC_CRB_AGT_ADR)
+
+#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000)
+#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000)
+#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
+#define QLA82XX_PCI_CRB_WINDOW(A) \
+ (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
+#define QLA82XX_CRB_C2C_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
+#define QLA82XX_CRB_C2C_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
+#define QLA82XX_CRB_C2C_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
+#define QLA82XX_CRB_CAM \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
+#define QLA82XX_CRB_CASPER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
+#define QLA82XX_CRB_CASPER_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
+#define QLA82XX_CRB_CASPER_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
+#define QLA82XX_CRB_CASPER_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
+#define QLA82XX_CRB_DDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
+#define QLA82XX_CRB_DDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
+#define QLA82XX_CRB_EPG \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
+#define QLA82XX_CRB_I2Q \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
+#define QLA82XX_CRB_NIU \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
+
+#define QLA82XX_CRB_PCIX_HOST \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
+#define QLA82XX_CRB_PCIX_HOST2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
+#define QLA82XX_CRB_PCIX_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
+#define QLA82XX_CRB_PCIE \
+ QLA82XX_CRB_PCIX_MD
+
+/* window 1 pcie slot */
+#define QLA82XX_CRB_PCIE2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
+#define QLA82XX_CRB_PEG_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
+#define QLA82XX_CRB_PEG_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
+#define QLA82XX_CRB_PEG_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
+#define QLA82XX_CRB_PEG_MD_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
+#define QLA82XX_CRB_PEG_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
+#define QLA82XX_CRB_PEG_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
+#define QLA82XX_CRB_PEG_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
+#define QLA82XX_CRB_PEG_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
+#define QLA82XX_CRB_PEG_NET_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
+#define QLA82XX_CRB_PEG_NET_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
+#define QLA82XX_CRB_PEG_NET_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
+#define QLA82XX_CRB_PQM_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
+#define QLA82XX_CRB_PQM_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
+#define QLA82XX_CRB_QDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
+#define QLA82XX_CRB_QDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
+#define QLA82XX_CRB_ROMUSB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
+#define QLA82XX_CRB_RPMX_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
+#define QLA82XX_CRB_RPMX_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
+#define QLA82XX_CRB_RPMX_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
+#define QLA82XX_CRB_RPMX_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
+#define QLA82XX_CRB_RPMX_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
+#define QLA82XX_CRB_RPMX_5 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
+#define QLA82XX_CRB_RPMX_6 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
+#define QLA82XX_CRB_RPMX_7 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
+#define QLA82XX_CRB_SQM_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
+#define QLA82XX_CRB_SQM_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
+#define QLA82XX_CRB_SQM_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
+#define QLA82XX_CRB_SQM_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
+#define QLA82XX_CRB_SQM_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
+#define QLA82XX_CRB_SQM_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
+#define QLA82XX_CRB_SQM_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
+#define QLA82XX_CRB_SQM_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
+#define QLA82XX_CRB_SRE \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
+#define QLA82XX_CRB_TIMER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
+#define QLA82XX_CRB_XDMA \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
+#define QLA82XX_CRB_I2C0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
+#define QLA82XX_CRB_I2C1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
+#define QLA82XX_CRB_OCM0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
+#define QLA82XX_CRB_SMB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
+#define QLA82XX_CRB_MAX \
+ QLA82XX_PCI_CRB_WINDOW(64)
+
+/*
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ * Base addresses of major components on-chip.
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ */
+#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
+#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
+#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
+#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
+
+#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
+#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+
+#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
+#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
+#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000
+#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff
+#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000
+#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
+#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_CONTROL (0x000)
+#define MIU_TAG (0x004)
+#define MIU_TEST_AGT_CTRL (0x090)
+#define MIU_TEST_AGT_ADDR_LO (0x094)
+#define MIU_TEST_AGT_ADDR_HI (0x098)
+#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
+#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
+#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
+#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
+#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
+#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_START 1
+#define MIU_TA_CTL_ENABLE 2
+#define MIU_TA_CTL_WRITE 4
+#define MIU_TA_CTL_BUSY 8
+
+/*CAM RAM */
+# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
+# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
+
+#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
+#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
+#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
+#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
+#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
+#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
+
+#define QLA82XX_CAMRAM_DB1 (QLA82XX_CAM_RAM(0x1b8))
+#define QLA82XX_CAMRAM_DB2 (QLA82XX_CAM_RAM(0x1bc))
+
+#define HALT_STATUS_UNRECOVERABLE 0x80000000
+#define HALT_STATUS_RECOVERABLE 0x40000000
+
+/* Driver Coexistence Defines */
+#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
+#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
+#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
+#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
+#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
+#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+
+/* Every driver should use these Device State */
+#define QLA82XX_DEV_COLD 1
+#define QLA82XX_DEV_INITIALIZING 2
+#define QLA82XX_DEV_READY 3
+#define QLA82XX_DEV_NEED_RESET 4
+#define QLA82XX_DEV_NEED_QUIESCENT 5
+#define QLA82XX_DEV_FAILED 6
+#define QLA82XX_DEV_QUIESCENT 7
+
+#define QLA82XX_IDC_VERSION 1
+#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30
+#define QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT 10
+
+#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100))
+#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124))
+#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150))
+#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154))
+#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
+#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
+
+#define PCIE_CHICKEN3 (0x120c8)
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+
+#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg))
+#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg))
+
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+
+/* Different drive state */
+#define QLA82XX_DRVST_NOT_RDY 0
+#define QLA82XX_DRVST_RST_RDY 1
+#define QLA82XX_DRVST_QSNT_RDY 2
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021
+
+#define QLA82XX_MSIX_TBL_SPACE 8192
+#define QLA82XX_PCI_REG_MSIX_TBL 0x44
+#define QLA82XX_PCI_MSIX_CONTROL 0x40
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map {
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+
+struct crb_addr_pair {
+ long addr;
+ long data;
+};
+
+#define ADDR_ERROR ((unsigned long) 0xffffffff)
+#define MAX_CTL_CHECK 1000
+
+/***************************************************************************
+ * PCI related defines.
+ **************************************************************************/
+
+/*
+ * Interrupt related defines.
+ */
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+/*
+ * Message Signaled Interrupts
+ */
+#define PCIX_MSI_F0 (0x13000)
+#define PCIX_MSI_F1 (0x13004)
+#define PCIX_MSI_F2 (0x13008)
+#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
+#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4))
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+/*
+ * Interrupt state machine and other bits.
+ */
+#define PCIE_MISCCFG_RC (0x1206c)
+
+#define ISR_INT_TARGET_STATUS \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_STATUS_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_STATUS_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_STATUS_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_STATUS_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_STATUS_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_STATUS_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_STATUS_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+
+#define ISR_INT_TARGET_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_MASK_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_MASK_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_MASK_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_MASK_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_MASK_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_MASK_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_MASK_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define ISR_INT_VECTOR \
+ (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_STATE_REG \
+ (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
+
+#define ISR_MSI_INT_TRIGGER(FUNC) \
+ (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+
+#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0)
+#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qla82xx_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+#define QLA82XX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+#define BOOTLD_START 0x10000
+#define IMAGE_START 0x100000
+#define FLASH_ADDR_START 0x43000
+
+/* Magic number to let user know flash is programmed */
+#define QLA82XX_BDINFO_MAGIC 0x12345678
+#define FW_SIZE_OFFSET (0x3e840c)
+
+#define QLA82XX_IS_REVISION_P3PLUS(_rev_) ((_rev_) >= 0x50)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+/* Request and response queue size */
+#define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/
+
+/*
+ * ISP 8021 I/O Register Set structure definitions.
+ */
+struct device_reg_82xx {
+ uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
+ uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
+ uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
+
+ uint16_t mailbox_in[32]; /* Mail box In registers */
+ uint16_t unused_1[32];
+ uint32_t hint; /* Host interrupt register */
+#define HINT_MBX_INT_PENDING BIT_0
+ uint16_t unused_2[62];
+ uint16_t mailbox_out[32]; /* Mail box Out registers */
+ uint32_t unused_3[48];
+
+ uint32_t host_status; /* host status */
+#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
+#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
+ uint32_t host_int; /* Interrupt status. */
+#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
+};
+
+struct fcp_cmnd {
+ struct scsi_lun lun;
+ uint8_t crn;
+ uint8_t task_attribute;
+ uint8_t task_managment;
+ uint8_t additional_cdb_len;
+ uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+};
+
+struct dsd_dma {
+ struct list_head list;
+ dma_addr_t dsd_list_dma;
+ void *dsd_addr;
+};
+
+#define QLA_DSDS_PER_IOCB 37
+#define QLA_DSD_SIZE 12
+struct ct6_dsd {
+ uint16_t fcp_cmnd_len;
+ dma_addr_t fcp_cmnd_dma;
+ struct fcp_cmnd *fcp_cmnd;
+ int dsd_use_cnt;
+ struct list_head dsd_list;
+};
+
+#define MBC_TOGGLE_INTR 0x10
+
+/* Flash offset */
+#define FLT_REG_BOOTLOAD_82XX 0x72
+#define FLT_REG_BOOT_CODE_82XX 0x78
+#define FLT_REG_FW_82XX 0x74
+#define FLT_REG_GOLD_FW_82XX 0x75
+#define FLT_REG_VPD_82XX 0x81
+
+#define FA_VPD_SIZE_82XX 0x400
+
+#define FA_FLASH_LAYOUT_ADDR_82 0xFC400
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+* Instructions
+*/
+#define M25P_INSTR_WREN 0x06
+#define M25P_INSTR_WRDI 0x04
+#define M25P_INSTR_RDID 0x9f
+#define M25P_INSTR_RDSR 0x05
+#define M25P_INSTR_WRSR 0x01
+#define M25P_INSTR_READ 0x03
+#define M25P_INSTR_FAST_READ 0x0b
+#define M25P_INSTR_PP 0x02
+#define M25P_INSTR_SE 0xd8
+#define M25P_INSTR_BE 0xc7
+#define M25P_INSTR_DP 0xb9
+#define M25P_INSTR_RES 0xab
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48c37e38ed01..b1adeb71a8b0 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -29,6 +29,11 @@ char qla2x00_version_str[40];
*/
static struct kmem_cache *srb_cachep;
+/*
+ * CT6 CTX allocation cache
+ */
+static struct kmem_cache *ctx_cachep;
+
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -65,13 +70,19 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"Option to enable extended error logging, "
"Default is 0 - no logging. 1 - log errors.");
+int ql2xshiftctondsd = 6;
+module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xshiftctondsd,
+ "Set to control shifting of command type processing "
+ "based on total number of SG elements.");
+
static void qla2x00_free_device(scsi_qla_host_t *);
int ql2xfdmienable=1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
- "Enables FDMI registratons "
- "Default is 0 - no FDMI. 1 - perfom FDMI.");
+ "Enables FDMI registrations. "
+ "0 - no FDMI. Default is 1 - perform FDMI.");
#define MAX_Q_DEPTH 32
static int ql2xmaxqdepth = MAX_Q_DEPTH;
@@ -114,6 +125,21 @@ MODULE_PARM_DESC(ql2xetsenable,
"Enables firmware ETS burst."
"Default is 0 - skip ETS enablement.");
+int ql2xdbwr;
+module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xdbwr,
+ "Option to specify scheme for request queue posting\n"
+ " 0 -- Regular doorbell.\n"
+ " 1 -- CAMRAM doorbell (faster).\n");
+
+int ql2xdontresethba;
+module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xdontresethba,
+ "Option to specify reset behaviour\n"
+ " 0 (Default) -- Reset on failure.\n"
+ " 1 -- Do not reset on failure.\n");
+
+
/*
* SCSI host template entry points
*/
@@ -183,6 +209,10 @@ qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
static inline void
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
{
+ /* Currently used for 82XX only. */
+ if (vha->device_flags & DFLG_DEV_FAILED)
+ return;
+
mod_timer(&vha->timer, jiffies + interval * HZ);
}
@@ -739,7 +769,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (sp == NULL)
continue;
- if (sp->ctx)
+ if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID))
continue;
if (sp->cmd != cmd)
continue;
@@ -834,6 +864,24 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
return status;
}
+void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
+{
+ int cnt;
+ srb_t *sp;
+ struct req_que *req = vha->req;
+
+ DEBUG2(qla_printk(KERN_INFO, vha->hw,
+ "Waiting for pending commands\n"));
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ sp, WAIT_HOST) == QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_INFO, vha->hw,
+ "Done wait for pending commands\n"));
+ }
+ }
+}
+
static char *reset_errors[] = {
"HBA not online",
"HBA not ready",
@@ -1020,11 +1068,19 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
} else {
+ if (IS_QLA82XX(vha->hw)) {
+ if (!qla82xx_fcoe_ctx_reset(vha)) {
+ /* Ctx reset success */
+ ret = SUCCESS;
+ goto eh_host_reset_lock;
+ }
+ /* fall thru if ctx reset failed */
+ }
if (ha->wq)
flush_workqueue(ha->wq);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
/* failed. schedule dpc to try */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
@@ -1078,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
- if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
+ if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): failed: "
@@ -1125,23 +1181,32 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (!sp->ctx) {
+ if (!sp->ctx ||
+ (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
sp->cmd->result = res;
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
+ if (ctx->type == SRB_LOGIN_CMD ||
+ ctx->type == SRB_LOGOUT_CMD) {
del_timer_sync(&ctx->timer);
ctx->free(sp);
} else {
- struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
- if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
+ struct srb_bsg *sp_bsg =
+ (struct srb_bsg *)sp->ctx;
+ struct fc_bsg_job *bsg_job =
+ sp_bsg->bsg_job;
+
+ if (bsg_job->request->msgcode
+ == FC_BSG_HST_CT)
kfree(sp->fcport);
- sp_bsg->bsg_job->req->errors = 0;
- sp_bsg->bsg_job->reply->result = res;
- sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
+ bsg_job->req->errors = 0;
+ bsg_job->reply->result = res;
+ bsg_job->job_done(
+ sp_bsg->bsg_job);
kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
+ mempool_free(sp,
+ ha->srb_mempool);
}
}
}
@@ -1379,6 +1444,7 @@ static struct isp_operations qla2100_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1414,6 +1480,7 @@ static struct isp_operations qla2300_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1449,6 +1516,7 @@ static struct isp_operations qla24xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1484,6 +1552,7 @@ static struct isp_operations qla25xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla81xx_isp_ops = {
@@ -1519,6 +1588,43 @@ static struct isp_operations qla81xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+};
+
+static struct isp_operations qla82xx_isp_ops = {
+ .pci_config = qla82xx_pci_config,
+ .reset_chip = qla82xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla82xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla82xx_load_risc,
+ .pci_info_str = qla82xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla82xx_intr_handler,
+ .enable_intrs = qla82xx_enable_intrs,
+ .disable_intrs = qla82xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = qla24xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla24xx_beacon_blink,
+ .read_optrom = qla82xx_read_optrom_data,
+ .write_optrom = qla82xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla82xx_start_scsi,
+ .abort_isp = qla82xx_abort_isp,
};
static inline void
@@ -1607,10 +1713,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8021:
+ ha->device_type |= DT_ISP8021;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ /* Initialize 82XX ISP flags */
+ qla82xx_init_flags(ha);
+ break;
}
- /* Get adapter physical port no from interrupt pin register. */
- pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+ if (IS_QLA82XX(ha))
+ ha->port_no = !(ha->portnum & 1);
+ else
+ /* Get adapter physical port no from interrupt pin register. */
+ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+
if (ha->port_no & 1)
ha->flags.port0 = 1;
else
@@ -1624,6 +1742,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
uint16_t msix;
int cpus;
+ if (IS_QLA82XX(ha))
+ return qla82xx_iospace_config(ha);
+
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
qla_printk(KERN_WARNING, ha,
@@ -1767,7 +1888,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
}
@@ -1897,6 +2019,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
+ } else if (IS_QLA82XX(ha)) {
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_82XX;
+ rsp_length = RESPONSE_ENTRY_CNT_82XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_82XX;
+ ha->isp_ops = &qla82xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
}
mutex_init(&ha->vport_lock);
@@ -1969,6 +2104,7 @@ que_init:
" pointers\n");
goto probe_init_failed;
}
+
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
rsp->req = req;
@@ -1987,6 +2123,12 @@ que_init:
rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
}
+ if (IS_QLA82XX(ha)) {
+ req->req_q_out = &ha->iobase->isp82.req_q_out[0];
+ rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
+ rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
+ }
+
if (qla2x00_initialize_adapter(base_vha)) {
qla_printk(KERN_WARNING, ha,
"Failed to initialize adapter\n");
@@ -1995,6 +2137,14 @@ que_init:
"Adapter flags %x.\n",
base_vha->host_no, base_vha->device_flags));
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ }
+
ret = -ENODEV;
goto probe_failed;
}
@@ -2033,6 +2183,8 @@ skip_dpc:
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
base_vha->host_no, ha));
+ ha->isp_ops->enable_intrs(ha);
+
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -2040,8 +2192,6 @@ skip_dpc:
base_vha->flags.init_done = 1;
base_vha->flags.online = 1;
- ha->isp_ops->enable_intrs(ha);
-
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2083,9 +2233,17 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
- if (ha->iobase)
- iounmap(ha->iobase);
-
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_drv_active(ha);
+ qla82xx_idc_unlock(ha);
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
+ }
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
ha = NULL;
@@ -2152,11 +2310,17 @@ qla2x00_remove_one(struct pci_dev *pdev)
scsi_host_put(base_vha->host);
- if (ha->iobase)
- iounmap(ha->iobase);
+ if (IS_QLA82XX(ha)) {
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
- if (ha->mqiobase)
- iounmap(ha->mqiobase);
+ if (ha->mqiobase)
+ iounmap(ha->mqiobase);
+ }
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
@@ -2205,8 +2369,10 @@ qla2x00_free_device(scsi_qla_host_t *vha)
vha->flags.online = 0;
/* turn-off interrupts on the card */
- if (ha->interrupts_on)
+ if (ha->interrupts_on) {
+ vha->flags.init_done = 0;
ha->isp_ops->disable_intrs(ha);
+ }
qla2x00_free_irqs(vha);
@@ -2351,10 +2517,25 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
+ if (IS_QLA82XX(ha)) {
+ /* Allocate cache for CT6 Ctx. */
+ if (!ctx_cachep) {
+ ctx_cachep = kmem_cache_create("qla2xxx_ctx",
+ sizeof(struct ct6_dsd), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ctx_cachep)
+ goto fail_free_gid_list;
+ }
+ ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+ ctx_cachep);
+ if (!ha->ctx_mempool)
+ goto fail_free_srb_mempool;
+ }
+
/* Get memory for cached NVRAM */
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
if (!ha->nvram)
- goto fail_free_srb_mempool;
+ goto fail_free_ctx_mempool;
snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
ha->pdev->device);
@@ -2363,6 +2544,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->s_dma_pool)
goto fail_free_nvram;
+ if (IS_QLA82XX(ha)) {
+ ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DSD_LIST_DMA_POOL_SIZE, 8, 0);
+ if (!ha->dl_dma_pool) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - dl_dma_pool\n");
+ goto fail_s_dma_pool;
+ }
+
+ ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ FCP_CMND_DMA_POOL_SIZE, 8, 0);
+ if (!ha->fcp_cmnd_dma_pool) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - fcp_cmnd_dma_pool\n");
+ goto fail_dl_dma_pool;
+ }
+ }
+
/* Allocate memory for SNS commands */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
/* Get consistent memory allocated for SNS commands */
@@ -2429,13 +2628,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_QLA81XX(ha)) {
+ if (IS_QLA8XXX_TYPE(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
goto fail_ex_init_cb;
}
+ INIT_LIST_HEAD(&ha->gbl_dsd_list);
+
INIT_LIST_HEAD(&ha->vp_list);
return 1;
@@ -2465,11 +2666,24 @@ fail_free_ms_iocb:
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
fail_dma_pool:
+ if (IS_QLA82XX(ha)) {
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+ ha->fcp_cmnd_dma_pool = NULL;
+ }
+fail_dl_dma_pool:
+ if (IS_QLA82XX(ha)) {
+ dma_pool_destroy(ha->dl_dma_pool);
+ ha->dl_dma_pool = NULL;
+ }
+fail_s_dma_pool:
dma_pool_destroy(ha->s_dma_pool);
ha->s_dma_pool = NULL;
fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
+fail_free_ctx_mempool:
+ mempool_destroy(ha->ctx_mempool);
+ ha->ctx_mempool = NULL;
fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
@@ -2538,7 +2752,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
if (ha->ex_init_cb)
- dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+ dma_pool_free(ha->s_dma_pool,
+ ha->ex_init_cb, ha->ex_init_cb_dma);
if (ha->s_dma_pool)
dma_pool_destroy(ha->s_dma_pool);
@@ -2547,14 +2762,39 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
ha->gid_list_dma);
+ if (IS_QLA82XX(ha)) {
+ if (!list_empty(&ha->gbl_dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr,
+ tdsd_ptr, &ha->gbl_dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool,
+ dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+ }
+
+ if (ha->dl_dma_pool)
+ dma_pool_destroy(ha->dl_dma_pool);
+
+ if (ha->fcp_cmnd_dma_pool)
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
+
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
- ha->init_cb, ha->init_cb_dma);
+ ha->init_cb, ha->init_cb_dma);
vfree(ha->optrom_buffer);
kfree(ha->nvram);
kfree(ha->npiv_info);
ha->srb_mempool = NULL;
+ ha->ctx_mempool = NULL;
ha->eft = NULL;
ha->eft_dma = 0;
ha->sns_cmd = NULL;
@@ -2569,6 +2809,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->ex_init_cb_dma = 0;
ha->s_dma_pool = NULL;
+ ha->dl_dma_pool = NULL;
+ ha->fcp_cmnd_dma_pool = NULL;
ha->gid_list = NULL;
ha->gid_list_dma = 0;
@@ -2896,6 +3138,45 @@ qla2x00_do_dpc(void *data)
qla2x00_do_work(base_vha);
+ if (IS_QLA82XX(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ qla_printk(KERN_INFO, ha,
+ "HW State: FAILED\n");
+ qla82xx_device_state_handler(base_vha);
+ continue;
+ }
+
+ if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
+ &base_vha->dpc_flags)) {
+
+ DEBUG(printk(KERN_INFO
+ "scsi(%ld): dpc: sched "
+ "qla82xx_fcoe_ctx_reset ha = %p\n",
+ base_vha->host_no, ha));
+ if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags))) {
+ if (qla82xx_fcoe_ctx_reset(base_vha)) {
+ /* FCoE-ctx reset failed.
+ * Escalate to chip-reset
+ */
+ set_bit(ISP_ABORT_NEEDED,
+ &base_vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags);
+ }
+
+ DEBUG(printk("scsi(%ld): dpc:"
+ " qla82xx_fcoe_ctx_reset end\n",
+ base_vha->host_no));
+ }
+ }
+
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags)) {
@@ -2905,7 +3186,7 @@ qla2x00_do_dpc(void *data)
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
/* failed. retry later */
set_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags);
@@ -3053,8 +3334,18 @@ qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx = sp->ctx;
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
+ ctx->fcp_cmnd_dma);
+ list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx->dsd_use_cnt;
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ mempool_free(sp, ha->srb_mempool);
cmd->scsi_done(cmd);
}
@@ -3079,6 +3370,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+ if (IS_QLA82XX(ha))
+ qla82xx_watchdog(vha);
+
/* Hardware read to raise pending EEH errors during mailbox waits. */
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
@@ -3193,6 +3487,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
start_dpc ||
test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
+ test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
qla2xxx_wake_dpc(vha);
@@ -3202,7 +3498,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 7
+#define FW_BLOBS 8
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -3210,6 +3506,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP24XX 4
#define FW_ISP25XX 5
#define FW_ISP81XX 6
+#define FW_ISP82XX 7
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3218,6 +3515,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP24XX "ql2400_fw.bin"
#define FW_FILE_ISP25XX "ql2500_fw.bin"
#define FW_FILE_ISP81XX "ql8100_fw.bin"
+#define FW_FILE_ISP82XX "ql8200_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
@@ -3229,6 +3527,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP24XX, },
{ .name = FW_FILE_ISP25XX, },
{ .name = FW_FILE_ISP81XX, },
+ { .name = FW_FILE_ISP82XX, },
};
struct fw_blob *
@@ -3252,6 +3551,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP25XX];
} else if (IS_QLA81XX(ha)) {
blob = &qla_fw_blobs[FW_ISP81XX];
+ } else if (IS_QLA82XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP82XX];
}
mutex_lock(&qla_fw_lock);
@@ -3392,7 +3693,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
msleep(1000);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
+ if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -3445,6 +3746,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -3516,6 +3818,8 @@ qla2x00_module_exit(void)
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
+ if (ctx_cachep)
+ kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 8b3de4e54c28..de92504d7585 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -423,9 +423,6 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
/* Flash Manipulation Routines */
/*****************************************************************************/
-#define OPTROM_BURST_SIZE 0x1000
-#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
-
static inline uint32_t
flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
{
@@ -565,6 +562,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
*start = FA_FLASH_LAYOUT_ADDR;
else if (IS_QLA81XX(ha))
*start = FA_FLASH_LAYOUT_ADDR_81;
+ else if (IS_QLA82XX(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_82;
+ goto end;
+ }
/* Begin with first PCI expansion ROM header. */
buf = (uint8_t *)req->ring;
dcode = (uint32_t *)req->ring;
@@ -648,6 +649,12 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
const uint32_t def_npiv_conf1[] =
{ FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
FA_NPIV_CONF1_ADDR_81 };
+ const uint32_t fcp_prio_cfg0[] =
+ { FA_FCP_PRIO0_ADDR, FA_FCP_PRIO0_ADDR_25,
+ 0 };
+ const uint32_t fcp_prio_cfg1[] =
+ { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
+ 0 };
uint32_t def;
uint16_t *wptr;
uint16_t cnt, chksum;
@@ -703,10 +710,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
break;
case FLT_REG_VPD_0:
ha->flt_region_vpd_nvram = start;
+ if (IS_QLA82XX(ha))
+ break;
if (ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
+ if (IS_QLA82XX(ha))
+ break;
if (!ha->flags.port0)
ha->flt_region_vpd = start;
break;
@@ -732,6 +743,29 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_GOLD_FW:
ha->flt_region_gold_fw = start;
break;
+ case FLT_REG_FCP_PRIO_0:
+ if (ha->flags.port0)
+ ha->flt_region_fcp_prio = start;
+ break;
+ case FLT_REG_FCP_PRIO_1:
+ if (!ha->flags.port0)
+ ha->flt_region_fcp_prio = start;
+ break;
+ case FLT_REG_BOOT_CODE_82XX:
+ ha->flt_region_boot = start;
+ break;
+ case FLT_REG_FW_82XX:
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_GOLD_FW_82XX:
+ ha->flt_region_gold_fw = start;
+ break;
+ case FLT_REG_BOOTLOAD_82XX:
+ ha->flt_region_bootload = start;
+ break;
+ case FLT_REG_VPD_82XX:
+ ha->flt_region_vpd = start;
+ break;
}
}
goto done;
@@ -750,12 +784,14 @@ no_flash_data:
ha->flt_region_boot = def_boot[def];
ha->flt_region_vpd_nvram = def_vpd_nvram[def];
ha->flt_region_vpd = ha->flags.port0 ?
- def_vpd0[def]: def_vpd1[def];
+ def_vpd0[def] : def_vpd1[def];
ha->flt_region_nvram = ha->flags.port0 ?
- def_nvram0[def]: def_nvram1[def];
+ def_nvram0[def] : def_nvram1[def];
ha->flt_region_fdt = def_fdt[def];
ha->flt_region_npiv_conf = ha->flags.port0 ?
- def_npiv_conf0[def]: def_npiv_conf1[def];
+ def_npiv_conf0[def] : def_npiv_conf1[def];
+ ha->flt_region_fcp_prio = ha->flags.port0 ?
+ fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
"vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
@@ -775,7 +811,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
uint16_t *wptr;
struct qla_fdt_layout *fdt;
uint8_t man_id, flash_id;
- uint16_t mid, fid;
+ uint16_t mid = 0, fid = 0;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
@@ -816,6 +852,10 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
goto done;
no_flash_data:
loc = locations[0];
+ if (IS_QLA82XX(ha)) {
+ ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+ goto done;
+ }
qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
mid = man_id;
fid = flash_id;
@@ -853,6 +893,31 @@ done:
ha->fdt_block_size));
}
+static void
+qla2xxx_get_idc_param(scsi_qla_host_t *vha)
+{
+#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
+ uint32_t *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (!IS_QLA82XX(ha))
+ return;
+
+ wptr = (uint32_t *)req->ring;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+ QLA82XX_IDC_PARAM_ADDR , 8);
+
+ if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
+ ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
+ } else {
+ ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
+ ha->nx_reset_timeout = le32_to_cpu(*wptr);
+ }
+ return;
+}
+
int
qla2xxx_get_flash_info(scsi_qla_host_t *vha)
{
@@ -860,7 +925,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
uint32_t flt_addr;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -869,6 +934,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
qla2xxx_get_flt_info(vha, flt_addr);
qla2xxx_get_fdt_info(vha);
+ qla2xxx_get_idc_param(vha);
return QLA_SUCCESS;
}
@@ -885,7 +951,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
struct qla_npiv_entry *entry;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
return;
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1178,6 +1244,9 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t *dwptr;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return buf;
+
/* Dword reads to flash. */
dwptr = (uint32_t *)buf;
for (i = 0; i < bytes >> 2; i++, naddr++)
@@ -1233,6 +1302,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = QLA_SUCCESS;
+ if (IS_QLA82XX(ha))
+ return ret;
+
/* Enable flash write. */
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -1344,6 +1416,9 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ if (IS_QLA82XX(ha))
+ return;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Save the Original GPIOE. */
@@ -1525,6 +1600,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
if (ha->beacon_blink_led == 0) {
/* Enable firmware for update */
ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
@@ -1567,6 +1645,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
ha->beacon_blink_led = 0;
ha->beacon_color_state = QLA_LED_ALL_ON;
@@ -2576,6 +2657,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
int i;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return ret;
+
if (!mbuf)
return QLA_FUNCTION_FAILED;
@@ -2722,3 +2806,50 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
return 0;
}
+
+int
+qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
+{
+ int len, max_len;
+ uint32_t fcp_prio_addr;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fcp_prio_cfg) {
+ ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+ if (!ha->fcp_prio_cfg) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for fcp priority data "
+ "(%x).\n", FCP_PRIO_CFG_SIZE);
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+ memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+
+ fcp_prio_addr = ha->flt_region_fcp_prio;
+
+ /* first read the fcp priority data header from flash */
+ ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
+ fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
+
+ if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0))
+ goto fail;
+
+ /* read remaining FCP CMD config data from flash */
+ fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
+ len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+ max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
+ fcp_prio_addr << 2, (len < max_len ? len : max_len));
+
+ /* revalidate the entire FCP priority config data, including entries */
+ if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1))
+ goto fail;
+
+ ha->flags.fcp_prio_enabled = 1;
+ return QLA_SUCCESS;
+fail:
+ vfree(ha->fcp_prio_cfg);
+ ha->fcp_prio_cfg = NULL;
+ return QLA_FUNCTION_FAILED;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 81b5f29254e2..428802616e33 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -114,6 +114,7 @@
*/
#define MAC_ADDR_LEN 6 /* in bytes */
#define IP_ADDR_LEN 4 /* in bytes */
+#define IPv6_ADDR_LEN 16 /* IPv6 address size */
#define DRIVER_NAME "qla4xxx"
#define MAX_LINKED_CMDS_PER_LUN 3
@@ -147,6 +148,8 @@
#define MAX_RESET_HA_RETRIES 2
+#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+
/*
* SCSI Request Block structure (srb) that is placed
* on cmd->SCp location of every I/O [We have 22 bytes available]
@@ -169,7 +172,7 @@ struct srb {
struct scsi_cmnd *cmd; /* (4) SCSI command block */
dma_addr_t dma_handle; /* (4) for unmap of single transfers */
- atomic_t ref_count; /* reference count for this srb */
+ struct kref srb_ref; /* reference count for this srb */
uint32_t fw_ddb_index;
uint8_t err_id; /* error id */
#define SRB_ERR_PORT 1 /* Request failed because "port down" */
@@ -220,7 +223,7 @@ struct ddb_entry {
uint16_t os_target_id; /* Target ID */
uint16_t fw_ddb_index; /* DDB firmware index */
- uint8_t reserved[2];
+ uint16_t options;
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
uint32_t CmdSn;
@@ -245,10 +248,18 @@ struct ddb_entry {
uint16_t port;
uint32_t tpgt;
- uint8_t ip_addr[ISCSI_IPADDR_SIZE];
+ uint8_t ip_addr[IP_ADDR_LEN];
uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
uint8_t iscsi_alias[0x20];
uint8_t isid[6];
+ uint16_t iscsi_max_burst_len;
+ uint16_t iscsi_max_outsnd_r2t;
+ uint16_t iscsi_first_burst_len;
+ uint16_t iscsi_max_rcv_data_seg_len;
+ uint16_t iscsi_max_snd_data_seg_len;
+
+ struct in6_addr remote_ipv6_addr;
+ struct in6_addr link_local_ipv6_addr;
};
/*
@@ -301,6 +312,7 @@ struct scsi_qla_host {
#define DPC_ISNS_RESTART 7 /* 0x00000080 */
#define DPC_AEN 9 /* 0x00000200 */
#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+#define DPC_LINK_CHANGED 18 /* 0x00040000 */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
@@ -320,8 +332,7 @@ struct scsi_qla_host {
#define MIN_IOBASE_LEN 0x100
uint16_t req_q_count;
- uint8_t marker_needed;
- uint8_t rsvd1;
+ uint8_t rsvd1[2];
unsigned long host_no;
@@ -441,8 +452,35 @@ struct scsi_qla_host {
/* Saved srb for status continuation entry processing */
struct srb *status_srb;
+
+ /* IPv6 support info from InitFW */
+ uint8_t acb_version;
+ uint8_t ipv4_addr_state;
+ uint16_t ipv4_options;
+
+ uint32_t resvd2;
+ uint32_t ipv6_options;
+ uint32_t ipv6_addl_options;
+ uint8_t ipv6_link_local_state;
+ uint8_t ipv6_addr0_state;
+ uint8_t ipv6_addr1_state;
+ uint8_t ipv6_default_router_state;
+ struct in6_addr ipv6_link_local_addr;
+ struct in6_addr ipv6_addr0;
+ struct in6_addr ipv6_addr1;
+ struct in6_addr ipv6_default_router_addr;
};
+static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
+{
+ return ((ha->ipv4_options & IPOPT_IPv4_PROTOCOL_ENABLE) != 0);
+}
+
+static inline int is_ipv6_enabled(struct scsi_qla_host *ha)
+{
+ return ((ha->ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
+}
+
static inline int is_qla4010(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 9cd7a608df38..855226e08665 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -215,6 +215,7 @@ union external_hw_config_reg {
/* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009
#define MBOX_CMD_PING 0x000B
+#define MBOX_CMD_ABORT_TASK 0x0015
#define MBOX_CMD_LUN_RESET 0x0016
#define MBOX_CMD_TARGET_WARM_RESET 0x0017
#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
@@ -258,13 +259,15 @@ union external_hw_config_reg {
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
#define FW_STATE_CONFIG_WAIT 0x0001
-#define FW_STATE_WAIT_LOGIN 0x0002
+#define FW_STATE_WAIT_AUTOCONNECT 0x0002
#define FW_STATE_ERROR 0x0004
-#define FW_STATE_DHCP_IN_PROGRESS 0x0008
+#define FW_STATE_CONFIGURING_IP 0x0008
/* Mailbox 3 */
#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001
-#define FW_ADDSTATE_DHCP_ENABLED 0x0002
+#define FW_ADDSTATE_DHCPv4_ENABLED 0x0002
+#define FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED 0x0004
+#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
#define FW_ADDSTATE_LINK_UP 0x0010
#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
@@ -320,6 +323,8 @@ union external_hw_config_reg {
/* Host Adapter Initialization Control Block (from host) */
struct addr_ctrl_blk {
uint8_t version; /* 00 */
+#define IFCB_VER_MIN 0x01
+#define IFCB_VER_MAX 0x02
uint8_t control; /* 01 */
uint16_t fw_options; /* 02-03 */
@@ -351,11 +356,16 @@ struct addr_ctrl_blk {
uint16_t iscsi_opts; /* 30-31 */
uint16_t ipv4_tcp_opts; /* 32-33 */
uint16_t ipv4_ip_opts; /* 34-35 */
+#define IPOPT_IPv4_PROTOCOL_ENABLE 0x8000
uint16_t iscsi_max_pdu_size; /* 36-37 */
uint8_t ipv4_tos; /* 38 */
uint8_t ipv4_ttl; /* 39 */
uint8_t acb_version; /* 3A */
+#define ACB_NOT_SUPPORTED 0x00
+#define ACB_SUPPORTED 0x02 /* Capable of ACB Version 2
+ Features */
+
uint8_t res2; /* 3B */
uint16_t def_timeout; /* 3C-3D */
uint16_t iscsi_fburst_len; /* 3E-3F */
@@ -397,16 +407,35 @@ struct addr_ctrl_blk {
uint32_t cookie; /* 200-203 */
uint16_t ipv6_port; /* 204-205 */
uint16_t ipv6_opts; /* 206-207 */
+#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
+
uint16_t ipv6_addtl_opts; /* 208-209 */
+#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB
+ Only */
+#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001
+
uint16_t ipv6_tcp_opts; /* 20A-20B */
uint8_t ipv6_tcp_wsf; /* 20C */
uint16_t ipv6_flow_lbl; /* 20D-20F */
- uint8_t ipv6_gw_addr[16]; /* 210-21F */
+ uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
uint16_t ipv6_vlan_tag; /* 220-221 */
uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
uint8_t ipv6_addr0_state; /* 223 */
uint8_t ipv6_addr1_state; /* 224 */
- uint8_t ipv6_gw_state; /* 225 */
+#define IP_ADDRSTATE_UNCONFIGURED 0
+#define IP_ADDRSTATE_INVALID 1
+#define IP_ADDRSTATE_ACQUIRING 2
+#define IP_ADDRSTATE_TENTATIVE 3
+#define IP_ADDRSTATE_DEPRICATED 4
+#define IP_ADDRSTATE_PREFERRED 5
+#define IP_ADDRSTATE_DISABLING 6
+
+ uint8_t ipv6_dflt_rtr_state; /* 225 */
+#define IPV6_RTRSTATE_UNKNOWN 0
+#define IPV6_RTRSTATE_MANUAL 1
+#define IPV6_RTRSTATE_ADVERTISED 3
+#define IPV6_RTRSTATE_STALE 4
+
uint8_t ipv6_traffic_class; /* 226 */
uint8_t ipv6_hop_limit; /* 227 */
uint8_t ipv6_if_id[8]; /* 228-22F */
@@ -424,7 +453,7 @@ struct addr_ctrl_blk {
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
- struct addr_ctrl_blk sec;
+/* struct addr_ctrl_blk sec;*/
};
/*************************************************************************/
@@ -433,6 +462,9 @@ struct dev_db_entry {
uint16_t options; /* 00-01 */
#define DDB_OPT_DISC_SESSION 0x10
#define DDB_OPT_TARGET 0x02 /* device is a target */
+#define DDB_OPT_IPV6_DEVICE 0x100
+#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
+#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
uint16_t exec_throttle; /* 02-03 */
uint16_t exec_count; /* 04-05 */
@@ -468,7 +500,7 @@ struct dev_db_entry {
* pointer to a string so we
* don't have to reserve soooo
* much RAM */
- uint8_t ipv6_addr[0x10];/* 1A0-1AF */
+ uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
uint8_t res5[0x10]; /* 1B0-1BF */
uint16_t ddb_link; /* 1C0-1C1 */
uint16_t chap_tbl_idx; /* 1C2-1C3 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 96ebfb021f6c..c4636f6cb3cb 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -25,6 +25,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
int qla4xxx_relogin_device(struct scsi_qla_host * ha,
struct ddb_entry * ddb_entry);
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
int lun);
int qla4xxx_reset_target(struct scsi_qla_host * ha,
@@ -65,13 +66,14 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
int qla4xxx_init_rings(struct scsi_qla_host * ha);
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
uint32_t index);
-void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
+void qla4xxx_srb_compl(struct kref *ref);
int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
-int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
- uint32_t fw_ddb_index, uint32_t state);
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_error);
void qla4xxx_dump_buffer(void *b, uint32_t size);
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
+int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err);
extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 92329a461c68..5510df8a7fa6 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -189,6 +189,78 @@ static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
return qla4xxx_get_firmware_status(ha);
}
+static uint8_t
+qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
+{
+ uint8_t ipv4_wait = 0;
+ uint8_t ipv6_wait = 0;
+ int8_t ip_address[IPv6_ADDR_LEN] = {0} ;
+
+ /* If both IPv4 & IPv6 are enabled, possibly only one
+ * IP address may be acquired, so check to see if we
+ * need to wait for another */
+ if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) {
+ if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) &&
+ ((ha->addl_fw_state &
+ FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
+ ipv4_wait = 1;
+ }
+ if (((ha->ipv6_addl_options &
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
+ ((ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) {
+
+ ipv6_wait = 1;
+
+ if ((ha->ipv6_link_local_state ==
+ IP_ADDRSTATE_PREFERRED) ||
+ (ha->ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) ||
+ (ha->ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "Preferred IP configured."
+ " Don't wait!\n", ha->host_no,
+ __func__));
+ ipv6_wait = 0;
+ }
+ if (memcmp(&ha->ipv6_default_router_addr, ip_address,
+ IPv6_ADDR_LEN) == 0) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "No Router configured. "
+ "Don't wait!\n", ha->host_no,
+ __func__));
+ ipv6_wait = 0;
+ }
+ if ((ha->ipv6_default_router_state ==
+ IPV6_RTRSTATE_MANUAL) &&
+ (ha->ipv6_link_local_state ==
+ IP_ADDRSTATE_TENTATIVE) &&
+ (memcmp(&ha->ipv6_link_local_addr,
+ &ha->ipv6_default_router_addr, 4) == 0)) {
+ DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
+ "IP configured. Don't wait!\n",
+ ha->host_no, __func__));
+ ipv6_wait = 0;
+ }
+ }
+ if (ipv4_wait || ipv6_wait) {
+ DEBUG2(printk("scsi%ld: %s: Wait for additional "
+ "IP(s) \"", ha->host_no, __func__));
+ if (ipv4_wait)
+ DEBUG2(printk("IPv4 "));
+ if (ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6LinkLocal "));
+ if (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6Addr0 "));
+ if (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6Addr1 "));
+ DEBUG2(printk("\"\n"));
+ }
+ }
+
+ return ipv4_wait|ipv6_wait;
+}
+
static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
{
uint32_t timeout_count;
@@ -226,38 +298,80 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
continue;
}
+ if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+ "AUTOCONNECT in progress\n",
+ ha->host_no, __func__));
+ }
+
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+ " CONFIGURING IP\n",
+ ha->host_no, __func__));
+ /*
+ * Check for link state after 15 secs and if link is
+ * still DOWN then, cable is unplugged. Ignore "DHCP
+ * in Progress/CONFIGURING IP" bit to check if firmware
+ * is in ready state or not after 15 secs.
+ * This is applicable for both 2.x & 3.x firmware
+ */
+ if (timeout_count <= (ADAPTER_INIT_TOV - 15)) {
+ if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s:"
+ " LINK UP (Cable plugged)\n",
+ ha->host_no, __func__));
+ } else if (ha->firmware_state &
+ (FW_STATE_CONFIGURING_IP |
+ FW_STATE_READY)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "LINK DOWN (Cable unplugged)\n",
+ ha->host_no, __func__));
+ ha->firmware_state = FW_STATE_READY;
+ }
+ }
+ }
+
if (ha->firmware_state == FW_STATE_READY) {
- DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n"));
- /* The firmware is ready to process SCSI commands. */
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: MEDIA TYPE - %s\n",
- ha->host_no,
- __func__, (ha->addl_fw_state &
- FW_ADDSTATE_OPTICAL_MEDIA)
- != 0 ? "OPTICAL" : "COPPER"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: DHCP STATE Enabled "
- "%s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_DHCP_ENABLED) != 0 ?
- "YES" : "NO"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: LINK %s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_LINK_UP) != 0 ?
- "UP" : "DOWN"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: iSNS Service "
- "Started %s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
- "YES" : "NO"));
-
- ready = 1;
- break;
+ /* If DHCP IP Addr is available, retrieve it now. */
+ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR,
+ &ha->dpc_flags))
+ qla4xxx_get_dhcp_ip_address(ha);
+
+ if (!qla4xxx_wait_for_ip_config(ha) ||
+ timeout_count == 1) {
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "Firmware Ready..\n"));
+ /* The firmware is ready to process SCSI
+ commands. */
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: MEDIA TYPE"
+ " - %s\n", ha->host_no,
+ __func__, (ha->addl_fw_state &
+ FW_ADDSTATE_OPTICAL_MEDIA)
+ != 0 ? "OPTICAL" : "COPPER"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: DHCPv4 STATE"
+ " Enabled %s\n", ha->host_no,
+ __func__, (ha->addl_fw_state &
+ FW_ADDSTATE_DHCPv4_ENABLED) != 0 ?
+ "YES" : "NO"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: LINK %s\n",
+ ha->host_no, __func__,
+ (ha->addl_fw_state &
+ FW_ADDSTATE_LINK_UP) != 0 ?
+ "UP" : "DOWN"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: iSNS Service "
+ "Started %s\n",
+ ha->host_no, __func__,
+ (ha->addl_fw_state &
+ FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
+ "YES" : "NO"));
+
+ ready = 1;
+ break;
+ }
}
DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
"seconds expired= %d\n", ha->host_no, __func__,
@@ -272,15 +386,19 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
msleep(1000);
} /* end of for */
- if (timeout_count == 0)
+ if (timeout_count <= 0)
DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
ha->host_no, __func__));
- if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) {
- DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to"
- " grab an IP address from DHCP server\n",
- ha->host_no, __func__));
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+ DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting "
+ "it's waiting to configure an IP address\n",
+ ha->host_no, __func__));
ready = 1;
+ } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+ DEBUG2(printk("scsi%ld: %s: FW initialized, but "
+ "auto-discovery still in process\n",
+ ha->host_no, __func__));
}
return ready;
@@ -387,6 +505,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
int status = QLA_ERROR;
+ uint32_t conn_err;
if (ddb_entry == NULL) {
DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
@@ -407,7 +526,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
fw_ddb_entry_dma, NULL, NULL,
- &ddb_entry->fw_ddb_device_state, NULL,
+ &ddb_entry->fw_ddb_device_state, &conn_err,
&ddb_entry->tcp_source_port_num,
&ddb_entry->connection_id) ==
QLA_ERROR) {
@@ -419,6 +538,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
}
status = QLA_SUCCESS;
+ ddb_entry->options = le16_to_cpu(fw_ddb_entry->options);
ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
ddb_entry->task_mgmt_timeout =
le16_to_cpu(fw_ddb_entry->def_timeout);
@@ -442,11 +562,44 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
- DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
- ha->host_no, __func__, fw_ddb_index,
- ddb_entry->fw_ddb_device_state, status));
-
- exit_update_ddb:
+ ddb_entry->iscsi_max_burst_len = fw_ddb_entry->iscsi_max_burst_len;
+ ddb_entry->iscsi_max_outsnd_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t;
+ ddb_entry->iscsi_first_burst_len = fw_ddb_entry->iscsi_first_burst_len;
+ ddb_entry->iscsi_max_rcv_data_seg_len =
+ fw_ddb_entry->iscsi_max_rcv_data_seg_len;
+ ddb_entry->iscsi_max_snd_data_seg_len =
+ fw_ddb_entry->iscsi_max_snd_data_seg_len;
+
+ if (ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ memcpy(&ddb_entry->remote_ipv6_addr,
+ fw_ddb_entry->ip_addr,
+ min(sizeof(ddb_entry->remote_ipv6_addr),
+ sizeof(fw_ddb_entry->ip_addr)));
+ memcpy(&ddb_entry->link_local_ipv6_addr,
+ fw_ddb_entry->link_local_ipv6_addr,
+ min(sizeof(ddb_entry->link_local_ipv6_addr),
+ sizeof(fw_ddb_entry->link_local_ipv6_addr)));
+
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d "
+ "State %04x ConnErr %08x IP %pI6 "
+ ":%04d \"%s\"\n",
+ __func__, fw_ddb_index,
+ ddb_entry->os_target_id,
+ ddb_entry->fw_ddb_device_state,
+ conn_err, fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name));
+ } else
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d "
+ "State %04x ConnErr %08x IP %pI4 "
+ ":%04d \"%s\"\n",
+ __func__, fw_ddb_index,
+ ddb_entry->os_target_id,
+ ddb_entry->fw_ddb_device_state,
+ conn_err, fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name));
+exit_update_ddb:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
@@ -492,6 +645,40 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
}
/**
+ * qla4_is_relogin_allowed - Are we allowed to login?
+ * @ha: Pointer to host adapter structure.
+ * @conn_err: Last connection error associated with the ddb
+ *
+ * This routine tests the given connection error to determine if
+ * we are allowed to login.
+ **/
+int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
+{
+ uint32_t err_code, login_rsp_sts_class;
+ int relogin = 1;
+
+ err_code = ((conn_err & 0x00ff0000) >> 16);
+ login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8);
+ if (err_code == 0x1c || err_code == 0x06) {
+ DEBUG2(dev_info(&ha->pdev->dev,
+ ": conn_err=0x%08x, send target completed"
+ " or access denied failure\n", conn_err));
+ relogin = 0;
+ }
+ if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) {
+ /* Login Response PDU returned an error.
+ Login Response Status in Error Code Detail
+ indicates login should not be retried.*/
+ DEBUG2(dev_info(&ha->pdev->dev,
+ ": conn_err=0x%08x, do not retry relogin\n",
+ conn_err));
+ relogin = 0;
+ }
+
+ return relogin;
+}
+
+/**
* qla4xxx_configure_ddbs - builds driver ddb list
* @ha: Pointer to host adapter structure.
*
@@ -505,18 +692,30 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
uint32_t fw_ddb_index = 0;
uint32_t next_fw_ddb_index = 0;
uint32_t ddb_state;
- uint32_t conn_err, err_code;
+ uint32_t conn_err;
struct ddb_entry *ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t ipv6_device;
uint32_t new_tgt;
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DMA alloc failed\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
fw_ddb_index = next_fw_ddb_index) {
/* First, let's see if a device exists here */
- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL,
- &next_fw_ddb_index, &ddb_state,
- &conn_err, NULL, NULL) ==
- QLA_ERROR) {
+ if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
+ 0, NULL, &next_fw_ddb_index,
+ &ddb_state, &conn_err,
+ NULL, NULL) ==
+ QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
"fw_ddb_index %d failed", ha->host_no,
__func__, fw_ddb_index));
@@ -533,18 +732,19 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
/* Try and login to device */
DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
ha->host_no, __func__, fw_ddb_index));
- err_code = ((conn_err & 0x00ff0000) >> 16);
- if (err_code == 0x1c || err_code == 0x06) {
- DEBUG2(printk("scsi%ld: %s send target "
- "completed "
- "or access denied failure\n",
- ha->host_no, __func__));
- } else {
+ ipv6_device = le16_to_cpu(fw_ddb_entry->options) &
+ DDB_OPT_IPV6_DEVICE;
+ if (qla4_is_relogin_allowed(ha, conn_err) &&
+ ((!ipv6_device &&
+ *((uint32_t *)fw_ddb_entry->ip_addr))
+ || ipv6_device)) {
qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
- NULL, 0, NULL, &next_fw_ddb_index,
- &ddb_state, &conn_err, NULL, NULL)
- == QLA_ERROR) {
+ NULL, 0, NULL,
+ &next_fw_ddb_index,
+ &ddb_state, &conn_err,
+ NULL, NULL)
+ == QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s:"
"get_ddb_entry %d failed\n",
ha->host_no,
@@ -599,7 +799,6 @@ next_one:
struct qla4_relog_scan {
int halt_wait;
uint32_t conn_err;
- uint32_t err_code;
uint32_t fw_ddb_index;
uint32_t next_fw_ddb_index;
uint32_t fw_ddb_device_state;
@@ -609,18 +808,7 @@ static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
{
struct ddb_entry *ddb_entry;
- /*
- * Don't want to do a relogin if connection
- * error is 0x1c.
- */
- rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16);
- if (rs->err_code == 0x1c || rs->err_code == 0x06) {
- DEBUG2(printk(
- "scsi%ld: %s send target"
- " completed or "
- "access denied failure\n",
- ha->host_no, __func__));
- } else {
+ if (qla4_is_relogin_allowed(ha, rs->conn_err)) {
/* We either have a device that is in
* the process of relogging in or a
* device that is waiting to be
@@ -908,7 +1096,7 @@ static void qla4x00_pci_config(struct scsi_qla_host *ha)
static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
- uint32_t max_wait_time;
+ unsigned long max_wait_time;
unsigned long flags;
uint32_t mbox_status;
@@ -940,7 +1128,10 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for firmware to come UP. */
- max_wait_time = FIRMWARE_UP_TOV * 4;
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for "
+ "boot firmware to complete...\n",
+ ha->host_no, __func__, FIRMWARE_UP_TOV));
+ max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ);
do {
uint32_t ctrl_status;
@@ -954,16 +1145,15 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
break;
- DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to "
- "complete... ctrl_sts=0x%x, remaining=%d\n",
- ha->host_no, __func__, ctrl_status,
- max_wait_time));
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
+ "firmware to complete... ctrl_sts=0x%x\n",
+ ha->host_no, __func__, ctrl_status));
- msleep(250);
- } while ((max_wait_time--));
+ msleep_interruptible(250);
+ } while (!time_after_eq(jiffies, max_wait_time));
if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
- DEBUG(printk("scsi%ld: %s: Firmware has started\n",
+ DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n",
ha->host_no, __func__));
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1141,6 +1331,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
int status = QLA_ERROR;
int8_t ip_address[IP_ADDR_LEN] = {0} ;
+ clear_bit(AF_ONLINE, &ha->flags);
ha->eeprom_cmd_data = 0;
qla4x00_pci_config(ha);
@@ -1166,7 +1357,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
* the ddb_list and wait for DHCP lease acquired aen to come in
* followed by 0x8014 aen" to trigger the tgt discovery process.
*/
- if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP)
goto exit_init_online;
/* Skip device discovery if ip and subnet is zero */
@@ -1270,8 +1461,8 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
*
* This routine processes a Decive Database Changed AEN Event.
**/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index, uint32_t state)
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
{
struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
@@ -1318,19 +1509,24 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
* the device came back.
*/
} else {
- /* Device went away, try to relogin. */
- /* Mark device missing */
- if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
+ /* Device went away, mark device missing */
+ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
+ DEBUG2(dev_info(&ha->pdev->dev, "%s mark missing "
+ "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
+ __func__, ddb_entry,
+ ddb_entry->sess, ddb_entry->conn));
qla4xxx_mark_device_missing(ha, ddb_entry);
+ }
+
/*
* Relogin if device state changed to a not active state.
- * However, do not relogin if this aen is a result of an IOCTL
- * logout (DF_NO_RELOGIN) or if this is a discovered device.
+ * However, do not relogin if a RELOGIN is in process, or
+ * we are not allowed to relogin to this DDB.
*/
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
!test_bit(DF_RELOGIN, &ddb_entry->flags) &&
!test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
- !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) {
+ qla4_is_relogin_allowed(ha, conn_err)) {
/*
* This triggers a relogin. After the relogin_timer
* expires, the relogin gets scheduled. We must wait a
@@ -1338,7 +1534,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
* with failed device_state or a logout response before
* we can issue another relogin.
*/
- /* Firmware padds this timeout: (time2wait +1).
+ /* Firmware pads this timeout: (time2wait +1).
* Driver retry to login should be longer than F/W.
* Otherwise F/W will fail
* set_ddb() mbx cmd with 0x4005 since it still
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index e0c32159749c..e66f3f263f49 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -299,7 +299,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
wmb();
- srb->cmd->host_scribble = (unsigned char *)srb;
+ srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
/* update counters */
srb->state = SRB_ACTIVE_STATE;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index c196d55eae39..596c3031483c 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -97,7 +97,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
/* Place command on done queue. */
if (srb->req_sense_len == 0) {
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
ha->status_srb = NULL;
}
}
@@ -329,7 +329,7 @@ status_entry_exit:
/* complete the request, if not waiting for status_continuation pkt */
srb->cc_stat = sts_entry->completionStatus;
if (ha->status_srb == NULL)
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
/**
@@ -393,7 +393,7 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
/* ETRY normally by sending it back with
* DID_BUS_BUSY */
srb->cmd->result = DID_BUS_BUSY << 16;
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
break;
case ET_CONTINUE:
@@ -498,15 +498,22 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
break;
case MBOX_ASTS_LINK_UP:
- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
- ha->host_no, mbox_status));
set_bit(AF_LINK_UP, &ha->flags);
+ if (test_bit(AF_INIT_DONE, &ha->flags))
+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+
+ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter"
+ " LINK UP\n", ha->host_no,
+ mbox_status));
break;
case MBOX_ASTS_LINK_DOWN:
- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
- ha->host_no, mbox_status));
clear_bit(AF_LINK_UP, &ha->flags);
+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+
+ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter"
+ " LINK DOWN\n", ha->host_no,
+ mbox_status));
break;
case MBOX_ASTS_HEARTBEAT:
@@ -831,7 +838,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
qla4xxx_reinitialize_ddb_list(ha);
} else if (mbox_sts[1] == 1) { /* Specific device. */
qla4xxx_process_ddb_changed(ha, mbox_sts[2],
- mbox_sts[3]);
+ mbox_sts[3], mbox_sts[4]);
}
break;
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index caeb7d10ae04..75496fb0ae75 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -172,108 +172,207 @@ mbox_exit:
return status;
}
+uint8_t
+qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
+ mbox_cmd[1] = 0;
+ mbox_cmd[2] = LSDW(init_fw_cb_dma);
+ mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+ mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
+
+ if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+ "MBOX_CMD_INITIALIZE_FIRMWARE"
+ " failed w/ status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+uint8_t
+qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
+ mbox_cmd[2] = LSDW(init_fw_cb_dma);
+ mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+ "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
+ " failed w/ status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+void
+qla4xxx_update_local_ip(struct scsi_qla_host *ha,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ /* Save IPv4 Address Info */
+ memcpy(ha->ip_address, init_fw_cb->ipv4_addr,
+ min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr)));
+ memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet,
+ min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet)));
+ memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr,
+ min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr)));
+
+ if (is_ipv6_enabled(ha)) {
+ /* Save IPv6 Address */
+ ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state;
+ ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state;
+ ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state;
+ ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state;
+ ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
+ ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
+
+ memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8],
+ init_fw_cb->ipv6_if_id,
+ min(sizeof(ha->ipv6_link_local_addr)/2,
+ sizeof(init_fw_cb->ipv6_if_id)));
+ memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0,
+ min(sizeof(ha->ipv6_addr0),
+ sizeof(init_fw_cb->ipv6_addr0)));
+ memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1,
+ min(sizeof(ha->ipv6_addr1),
+ sizeof(init_fw_cb->ipv6_addr1)));
+ memcpy(&ha->ipv6_default_router_addr,
+ init_fw_cb->ipv6_dflt_rtr_addr,
+ min(sizeof(ha->ipv6_default_router_addr),
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
+ }
+}
+
+uint8_t
+qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd,
+ uint32_t *mbox_sts,
+ struct addr_ctrl_blk *init_fw_cb,
+ dma_addr_t init_fw_cb_dma)
+{
+ if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
+ != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ return QLA_ERROR;
+ }
+
+ DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
+
+ /* Save some info in adapter structure. */
+ ha->acb_version = init_fw_cb->acb_version;
+ ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
+ ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
+ ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
+ ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state);
+ ha->heartbeat_interval = init_fw_cb->hb_interval;
+ memcpy(ha->name_string, init_fw_cb->iscsi_name,
+ min(sizeof(ha->name_string),
+ sizeof(init_fw_cb->iscsi_name)));
+ /*memcpy(ha->alias, init_fw_cb->Alias,
+ min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
+
+ /* Save Command Line Paramater info */
+ ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout);
+ ha->discovery_wait = ql4xdiscoverywait;
+
+ if (ha->acb_version == ACB_SUPPORTED) {
+ ha->ipv6_options = init_fw_cb->ipv6_opts;
+ ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
+ }
+ qla4xxx_update_local_ip(ha, init_fw_cb);
+
+ return QLA_SUCCESS;
+}
+
/**
* qla4xxx_initialize_fw_cb - initializes firmware control block.
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
{
- struct init_fw_ctrl_blk *init_fw_cb;
+ struct addr_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
return 10;
}
- memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
dma_free_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
- return status;
+ goto exit_init_fw_cb;
}
/* Initialize request and response queues. */
qla4xxx_init_rings(ha);
/* Fill in the request and response queue information. */
- init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out);
- init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in);
- init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
- init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
- init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
- init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
- init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
- init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
- init_fw_cb->pri.shdwreg_addr_lo =
- cpu_to_le32(LSDW(ha->shadow_regs_dma));
- init_fw_cb->pri.shdwreg_addr_hi =
- cpu_to_le32(MSDW(ha->shadow_regs_dma));
+ init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
+ init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
+ init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
+ init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
+ init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
+ init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
+ init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
+ init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
/* Set up required options. */
- init_fw_cb->pri.fw_options |=
+ init_fw_cb->fw_options |=
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
- init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
-
- /* Save some info in adapter structure. */
- ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
- ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
- ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
- memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
- memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
- memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
- memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
- min(sizeof(ha->name_string),
- sizeof(init_fw_cb->pri.iscsi_name)));
- /*memcpy(ha->alias, init_fw_cb->Alias,
- min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
-
- /* Save Command Line Paramater info */
- ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
- ha->discovery_wait = ql4xdiscoverywait;
+ init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
- /* Send Initialize Firmware Control Block. */
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
-
- mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
- mbox_cmd[1] = 0;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
+ if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
+ != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb;
+ }
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) ==
- QLA_SUCCESS)
- status = QLA_SUCCESS;
- else {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE "
- "failed w/ status %04X\n", ha->host_no, __func__,
- mbox_sts[0]));
+ if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
+ init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb;
}
- dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
+ status = QLA_SUCCESS;
+
+exit_init_fw_cb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
return status;
}
@@ -284,13 +383,13 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
**/
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
{
- struct init_fw_ctrl_blk *init_fw_cb;
+ struct addr_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
@@ -299,35 +398,21 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Get Initialize Firmware Control Block. */
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
-
- memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
- mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
ha->host_no, __func__));
dma_free_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return QLA_ERROR;
}
/* Save IP Address. */
- memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
- memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
- memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
-
- dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
+ qla4xxx_update_local_ip(ha, init_fw_cb);
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
return QLA_SUCCESS;
}
@@ -409,6 +494,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t *connection_id)
{
int status = QLA_ERROR;
+ uint16_t options;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -441,14 +527,26 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
goto exit_get_fwddb;
}
if (fw_ddb_entry) {
- dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
- "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
- fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
- mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0],
- fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2],
- fw_ddb_entry->ip_addr[3],
- le16_to_cpu(fw_ddb_entry->port),
- fw_ddb_entry->iscsi_name);
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
+ "Next %d State %04x ConnErr %08x %pI6 "
+ ":%04d \"%s\"\n", __func__, fw_ddb_index,
+ mbox_sts[0], mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5],
+ fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
+ } else {
+ dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
+ "Next %d State %04x ConnErr %08x %pI4 "
+ ":%04d \"%s\"\n", __func__, fw_ddb_index,
+ mbox_sts[0], mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5],
+ fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
+ }
}
if (num_valid_ddb_entries)
*num_valid_ddb_entries = mbox_sts[2];
@@ -664,6 +762,59 @@ exit_get_event_log:
}
/**
+ * qla4xxx_abort_task - issues Abort Task
+ * @ha: Pointer to host adapter structure.
+ * @srb: Pointer to srb entry
+ *
+ * This routine performs a LUN RESET on the specified target/lun.
+ * The caller must ensure that the ddb_entry and lun_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct scsi_cmnd *cmd = srb->cmd;
+ int status = QLA_SUCCESS;
+ unsigned long flags = 0;
+ uint32_t index;
+
+ /*
+ * Send abort task command to ISP, so that the ISP will return
+ * request with ABORT status
+ */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ index = (unsigned long)(unsigned char *)cmd->host_scribble;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Firmware already posted completion on response queue */
+ if (index == MAX_SRBS)
+ return status;
+
+ mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
+ mbox_cmd[1] = srb->fw_ddb_index;
+ mbox_cmd[2] = index;
+ /* Immediate Command Enable */
+ mbox_cmd[5] = 0x01;
+
+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
+ status = QLA_ERROR;
+
+ DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%d: abort task FAILED: "
+ "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
+ ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
+ }
+
+ return status;
+}
+
+/**
* qla4xxx_reset_lun - issues LUN Reset
* @ha: Pointer to host adapter structure.
* @db_entry: Pointer to device database entry
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2ccad36bee9f..38b1d38afca5 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -74,6 +74,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
*/
static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *));
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
@@ -88,6 +89,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.proc_name = DRIVER_NAME,
.queuecommand = qla4xxx_queuecommand,
+ .eh_abort_handler = qla4xxx_eh_abort,
.eh_device_reset_handler = qla4xxx_eh_device_reset,
.eh_target_reset_handler = qla4xxx_eh_target_reset,
.eh_host_reset_handler = qla4xxx_eh_host_reset,
@@ -384,12 +386,12 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
if (!srb)
return srb;
- atomic_set(&srb->ref_count, 1);
+ kref_init(&srb->srb_ref);
srb->ha = ha;
srb->ddb = ddb_entry;
srb->cmd = cmd;
srb->flags = 0;
- cmd->SCp.ptr = (void *)srb;
+ CMD_SP(cmd) = (void *)srb;
cmd->scsi_done = done;
return srb;
@@ -403,12 +405,14 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
- cmd->SCp.ptr = NULL;
+ CMD_SP(cmd) = NULL;
}
-void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb)
+void qla4xxx_srb_compl(struct kref *ref)
{
+ struct srb *srb = container_of(ref, struct srb, srb_ref);
struct scsi_cmnd *cmd = srb->cmd;
+ struct scsi_qla_host *ha = srb->ha;
qla4xxx_srb_free_dma(ha, srb);
@@ -685,6 +689,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
+ test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_AEN, &ha->dpc_flags)) &&
ha->dpc_thread) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
@@ -886,11 +891,10 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
srb = qla4xxx_del_from_active_array(ha, i);
if (srb != NULL) {
srb->cmd->result = DID_RESET << 16;
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
}
/**
@@ -1069,6 +1073,54 @@ static void qla4xxx_do_dpc(struct work_struct *work)
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- link change? --- */
+ if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ /* ---- link down? --- */
+ list_for_each_entry_safe(ddb_entry, dtemp,
+ &ha->ddb_list, list) {
+ if (atomic_read(&ddb_entry->state) ==
+ DDB_STATE_ONLINE)
+ qla4xxx_mark_device_missing(ha,
+ ddb_entry);
+ }
+ } else {
+ /* ---- link up? --- *
+ * F/W will auto login to all devices ONLY ONCE after
+ * link up during driver initialization and runtime
+ * fatal error recovery. Therefore, the driver must
+ * manually relogin to devices when recovering from
+ * connection failures, logouts, expired KATO, etc. */
+
+ list_for_each_entry_safe(ddb_entry, dtemp,
+ &ha->ddb_list, list) {
+ if ((atomic_read(&ddb_entry->state) ==
+ DDB_STATE_MISSING) ||
+ (atomic_read(&ddb_entry->state) ==
+ DDB_STATE_DEAD)) {
+ if (ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_ACTIVE) {
+ atomic_set(&ddb_entry->state,
+ DDB_STATE_ONLINE);
+ dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: ddb[%d]"
+ " os[%d] marked"
+ " ONLINE\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index,
+ ddb_entry->os_target_id);
+
+ iscsi_unblock_session(
+ ddb_entry->sess);
+ } else
+ qla4xxx_relogin_device(
+ ha, ddb_entry);
+ }
+
+ }
+ }
+ }
+
/* ---- relogin device? --- */
if (adapter_up(ha) &&
test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
@@ -1430,12 +1482,14 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index)
{
struct srb *srb = NULL;
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = NULL;
- if (!(cmd = scsi_host_find_tag(ha->host, index)))
+ cmd = scsi_host_find_tag(ha->host, index);
+ if (!cmd)
return srb;
- if (!(srb = (struct srb *)cmd->host_scribble))
+ srb = (struct srb *)CMD_SP(cmd);
+ if (!srb)
return srb;
/* update counters */
@@ -1443,14 +1497,15 @@ struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t in
ha->req_q_count += srb->iocb_cnt;
ha->iocb_cnt -= srb->iocb_cnt;
if (srb->cmd)
- srb->cmd->host_scribble = NULL;
+ srb->cmd->host_scribble =
+ (unsigned char *)(unsigned long) MAX_SRBS;
}
return srb;
}
/**
* qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
- * @ha: actual ha whose done queue will contain the comd returned by firmware.
+ * @ha: Pointer to host adapter structure.
* @cmd: Scsi Command to wait on.
*
* This routine waits for the command to be returned by the Firmware
@@ -1465,7 +1520,7 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
do {
/* Checking to see if its returned to OS */
- rp = (struct srb *) cmd->SCp.ptr;
+ rp = (struct srb *) CMD_SP(cmd);
if (rp == NULL) {
done++;
break;
@@ -1534,6 +1589,62 @@ static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
}
/**
+ * qla4xxx_eh_abort - callback for abort task.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to abort the specified
+ * command.
+ **/
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
+{
+ struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ unsigned int id = cmd->device->id;
+ unsigned int lun = cmd->device->lun;
+ unsigned long serial = cmd->serial_number;
+ struct srb *srb = NULL;
+ int ret = SUCCESS;
+ int wait = 0;
+
+ dev_info(&ha->pdev->dev,
+ "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
+ ha->host_no, id, lun, cmd, serial);
+
+ srb = (struct srb *) CMD_SP(cmd);
+
+ if (!srb)
+ return SUCCESS;
+
+ kref_get(&srb->srb_ref);
+
+ if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
+ DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
+ ha->host_no, id, lun));
+ ret = FAILED;
+ } else {
+ DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
+ ha->host_no, id, lun));
+ wait = 1;
+ }
+
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+
+ /* Wait for command to complete */
+ if (wait) {
+ if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
+ DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
+ ha->host_no, id, lun));
+ ret = FAILED;
+ }
+ }
+
+ dev_info(&ha->pdev->dev,
+ "scsi%ld:%d:%d: Abort command - %s\n",
+ ha->host_no, id, lun, (ret == SUCCESS) ? "succeded" : "failed");
+
+ return ret;
+}
+
+/**
* qla4xxx_eh_device_reset - callback for target reset.
* @cmd: Pointer to Linux's SCSI command structure
*
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 6980cb279c81..28a6c494a2e8 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,5 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.01.00-k9"
-
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k1"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1c08f6164658..ad0ed212db4a 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -67,6 +67,9 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/scsi.h>
+
static void scsi_done(struct scsi_cmnd *cmd);
/*
@@ -747,10 +750,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
cmd->result = (DID_NO_CONNECT << 16);
scsi_done(cmd);
} else {
+ trace_scsi_dispatch_cmd_start(cmd);
rtn = host->hostt->queuecommand(cmd, scsi_done);
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
+ trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
rtn = SCSI_MLQUEUE_HOST_BUSY;
@@ -781,6 +786,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
static void scsi_done(struct scsi_cmnd *cmd)
{
+ trace_scsi_dispatch_cmd_done(cmd);
blk_complete_request(cmd->request);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3a5bfd10b2cb..136329b4027b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -12,7 +12,7 @@
* SAS disks.
*
*
- * For documentation see http://www.torque.net/sg/sdebug26.html
+ * For documentation see http://sg.danny.cz/sg/sdebug26.html
*
* D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
* dpg: work for devfs large number of disks [20010809]
@@ -58,8 +58,8 @@
#include "sd.h"
#include "scsi_logging.h"
-#define SCSI_DEBUG_VERSION "1.81"
-static const char * scsi_debug_version_date = "20070104";
+#define SCSI_DEBUG_VERSION "1.82"
+static const char * scsi_debug_version_date = "20100324";
/* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0
@@ -108,6 +108,7 @@ static const char * scsi_debug_version_date = "20070104";
#define DEF_ATO 1
#define DEF_PHYSBLK_EXP 0
#define DEF_LOWEST_ALIGNED 0
+#define DEF_OPT_BLKS 64
#define DEF_UNMAP_MAX_BLOCKS 0
#define DEF_UNMAP_MAX_DESC 0
#define DEF_UNMAP_GRANULARITY 0
@@ -147,12 +148,18 @@ static const char * scsi_debug_version_date = "20070104";
#define SAM2_LUN_ADDRESS_METHOD 0
#define SAM2_WLUN_REPORT_LUNS 0xc101
+/* Can queue up to this number of commands. Typically commands that
+ * that have a non-zero delay are queued. */
+#define SCSI_DEBUG_CANQUEUE 255
+
static int scsi_debug_add_host = DEF_NUM_HOST;
static int scsi_debug_delay = DEF_DELAY;
static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
static int scsi_debug_every_nth = DEF_EVERY_NTH;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
+static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
static int scsi_debug_num_parts = DEF_NUM_PARTS;
+static int scsi_debug_no_uld = 0;
static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
static int scsi_debug_opts = DEF_OPTS;
static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
@@ -169,6 +176,7 @@ static int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_ato = DEF_ATO;
static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
+static int scsi_debug_opt_blks = DEF_OPT_BLKS;
static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
@@ -192,7 +200,6 @@ static int sdebug_sectors_per; /* sectors per cylinder */
#define SDEBUG_SENSE_LEN 32
-#define SCSI_DEBUG_CANQUEUE 255
#define SCSI_DEBUG_MAX_CMD_LEN 32
struct sdebug_dev_info {
@@ -699,9 +706,13 @@ static int inquiry_evpd_b0(unsigned char * arr)
unsigned int gran;
memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
+
+ /* Optimal transfer length granularity */
gran = 1 << scsi_debug_physblk_exp;
arr[2] = (gran >> 8) & 0xff;
arr[3] = gran & 0xff;
+
+ /* Maximum Transfer Length */
if (sdebug_store_sectors > 0x400) {
arr[4] = (sdebug_store_sectors >> 24) & 0xff;
arr[5] = (sdebug_store_sectors >> 16) & 0xff;
@@ -709,6 +720,9 @@ static int inquiry_evpd_b0(unsigned char * arr)
arr[7] = sdebug_store_sectors & 0xff;
}
+ /* Optimal Transfer Length */
+ put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
+
if (scsi_debug_unmap_max_desc) {
unsigned int blocks;
@@ -717,15 +731,20 @@ static int inquiry_evpd_b0(unsigned char * arr)
else
blocks = 0xffffffff;
+ /* Maximum Unmap LBA Count */
put_unaligned_be32(blocks, &arr[16]);
+
+ /* Maximum Unmap Block Descriptor Count */
put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
}
+ /* Unmap Granularity Alignment */
if (scsi_debug_unmap_alignment) {
put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
arr[28] |= 0x80; /* UGAVALID */
}
+ /* Optimal Unmap Granularity */
if (scsi_debug_unmap_granularity) {
put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
return 0x3c; /* Mandatory page length for thin provisioning */
@@ -2266,7 +2285,7 @@ static void timer_intr_handler(unsigned long indx)
struct sdebug_queued_cmd * sqcp;
unsigned long iflags;
- if (indx >= SCSI_DEBUG_CANQUEUE) {
+ if (indx >= scsi_debug_max_queue) {
printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
"large\n");
return;
@@ -2380,6 +2399,8 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
sdp->host->cmd_per_lun);
blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
+ if (scsi_debug_no_uld)
+ sdp->no_uld_attach = 1;
return 0;
}
@@ -2406,7 +2427,7 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
struct sdebug_queued_cmd *sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
del_timer_sync(&sqcp->cmnd_timer);
@@ -2416,7 +2437,7 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
}
}
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0;
+ return (k < scsi_debug_max_queue) ? 1 : 0;
}
/* Deletes (stops) timers of all queued commands */
@@ -2427,7 +2448,7 @@ static void stop_all_queued(void)
struct sdebug_queued_cmd *sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (sqcp->in_use && sqcp->a_cmnd) {
del_timer_sync(&sqcp->cmnd_timer);
@@ -2533,7 +2554,7 @@ static void __init init_all_queued(void)
struct sdebug_queued_cmd * sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
init_timer(&sqcp->cmnd_timer);
sqcp->in_use = 0;
@@ -2625,12 +2646,12 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
struct sdebug_queued_cmd * sqcp = NULL;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (! sqcp->in_use)
break;
}
- if (k >= SCSI_DEBUG_CANQUEUE) {
+ if (k >= scsi_debug_max_queue) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
return 1; /* report busy to mid level */
@@ -2662,7 +2683,9 @@ module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
+module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
@@ -2677,6 +2700,7 @@ module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
+module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
@@ -2695,7 +2719,9 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
+MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
+MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
@@ -2705,6 +2731,7 @@ MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)")
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@ -2970,6 +2997,31 @@ static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
sdebug_max_luns_store);
+static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
+}
+static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
+ const char * buf, size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
+ (n <= SCSI_DEBUG_CANQUEUE)) {
+ scsi_debug_max_queue = n;
+ return count;
+ }
+ return -EINVAL;
+}
+DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
+ sdebug_max_queue_store);
+
+static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
+}
+DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
+
static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
@@ -3107,7 +3159,9 @@ static int do_create_driverfs_files(void)
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
@@ -3139,7 +3193,9 @@ static void do_remove_driverfs_files(void)
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
+ driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
+ driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
@@ -3830,12 +3886,13 @@ static int sdebug_driver_probe(struct device * dev)
sdbg_host = to_sdebug_host(dev);
- hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
- if (NULL == hpnt) {
- printk(KERN_ERR "%s: scsi_register failed\n", __func__);
- error = -ENODEV;
+ sdebug_driver_template.can_queue = scsi_debug_max_queue;
+ hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
+ if (NULL == hpnt) {
+ printk(KERN_ERR "%s: scsi_register failed\n", __func__);
+ error = -ENODEV;
return error;
- }
+ }
sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 7ad53fa42766..a5d630f5f519 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -39,6 +39,8 @@
#include "scsi_logging.h"
#include "scsi_transport_api.h"
+#include <trace/events/scsi.h>
+
#define SENSE_TIMEOUT (10*HZ)
/*
@@ -52,6 +54,7 @@
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
if (shost->host_busy == shost->host_failed) {
+ trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5,
printk("Waking error handler thread\n"));
@@ -127,6 +130,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
struct scsi_cmnd *scmd = req->special;
enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
+ trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
if (scmd->device->host->transportt->eh_timed_out)
@@ -970,9 +974,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
"0x%p\n", current->comm,
scmd));
rtn = scsi_try_to_abort_cmd(scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(scmd)) {
scsi_eh_finish_cmd(scmd, done_q);
}
@@ -1099,8 +1104,9 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
" 0x%p\n", current->comm,
sdev));
rtn = scsi_try_bus_device_reset(bdr_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
if (!scsi_device_online(sdev) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(bdr_scmd)) {
list_for_each_entry_safe(scmd, next,
work_q, eh_entry) {
@@ -1163,10 +1169,11 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
"to target %d\n",
current->comm, id));
rtn = scsi_try_target_reset(tgtr_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (id == scmd_id(scmd))
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(tgtr_scmd))
scsi_eh_finish_cmd(scmd,
done_q);
@@ -1222,10 +1229,11 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
" %d\n", current->comm,
channel));
rtn = scsi_try_bus_reset(chan_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (channel == scmd_channel(scmd))
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd,
done_q);
@@ -1259,9 +1267,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
, current->comm));
rtn = scsi_try_host_reset(scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
(!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
!scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd, done_q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 38518b088073..c992ecf4e372 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -459,8 +459,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
found_target->reap_ref++;
spin_unlock_irqrestore(shost->host_lock, flags);
if (found_target->state != STARGET_DEL) {
- put_device(parent);
- kfree(starget);
+ put_device(dev);
return found_target;
}
/* Unfortunately, we found a dying target; need to
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 429c9b73e3e4..c23ab978c3ba 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -474,7 +474,7 @@ static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
/*
- * sdev_rd_attr: create a function and attribute variable for a
+ * sdev_rw_attr: create a function and attribute variable for a
* read/write field.
*/
#define sdev_rw_attr(field, format_string) \
@@ -486,7 +486,7 @@ sdev_store_##field (struct device *dev, struct device_attribute *attr, \
{ \
struct scsi_device *sdev; \
sdev = to_scsi_device(dev); \
- snscanf (buf, 20, format_string, &sdev->field); \
+ sscanf (buf, format_string, &sdev->field); \
return count; \
} \
static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
@@ -853,9 +853,6 @@ static int scsi_target_add(struct scsi_target *starget)
error = device_add(&starget->dev);
if (error) {
dev_err(&starget->dev, "target device_add failed, error %d\n", error);
- get_device(&starget->dev);
- scsi_target_reap(starget);
- put_device(&starget->dev);
return error;
}
transport_add_device(&starget->dev);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
new file mode 100644
index 000000000000..b587289cfacb
--- /dev/null
+++ b/drivers/scsi/scsi_trace.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2010 FUJITSU LIMITED
+ * Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <linux/trace_seq.h>
+#include <trace/events/scsi.h>
+
+#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
+#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+
+static const char *
+scsi_trace_misc(struct trace_seq *, unsigned char *, int);
+
+static const char *
+scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((cdb[1] & 0x1F) << 16);
+ lba |= (cdb[2] << 8);
+ lba |= cdb[3];
+ txlen = cdb[4];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu",
+ (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[7] << 8);
+ txlen |= cdb[8];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[6] << 24);
+ txlen |= (cdb[7] << 16);
+ txlen |= (cdb[8] << 8);
+ txlen |= cdb[9];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ txlen |= (cdb[10] << 24);
+ txlen |= (cdb[11] << 16);
+ txlen |= (cdb[12] << 8);
+ txlen |= cdb[13];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME_16)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len, *cmd;
+ sector_t lba = 0, txlen = 0;
+ u32 ei_lbrt = 0;
+
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ cmd = "READ";
+ break;
+ case VERIFY_32:
+ cmd = "VERIFY";
+ break;
+ case WRITE_32:
+ cmd = "WRITE";
+ break;
+ case WRITE_SAME_32:
+ cmd = "WRITE_SAME";
+ break;
+ default:
+ trace_seq_printf(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[12] << 56);
+ lba |= ((u64)cdb[13] << 48);
+ lba |= ((u64)cdb[14] << 40);
+ lba |= ((u64)cdb[15] << 32);
+ lba |= (cdb[16] << 24);
+ lba |= (cdb[17] << 16);
+ lba |= (cdb[18] << 8);
+ lba |= cdb[19];
+ ei_lbrt |= (cdb[20] << 24);
+ ei_lbrt |= (cdb[21] << 16);
+ ei_lbrt |= (cdb[22] << 8);
+ ei_lbrt |= cdb[23];
+ txlen |= (cdb[28] << 24);
+ txlen |= (cdb[29] << 16);
+ txlen |= (cdb[30] << 8);
+ txlen |= cdb[31];
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
+ cmd, (unsigned long long)lba,
+ (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+
+ if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
+ trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ unsigned int regions = cdb[7] << 8 | cdb[8];
+
+ trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len, *cmd;
+ sector_t lba = 0;
+ u32 alloc_len = 0;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case SAI_READ_CAPACITY_16:
+ cmd = "READ_CAPACITY_16";
+ break;
+ case SAI_GET_LBA_STATUS:
+ cmd = "GET_LBA_STATUS";
+ break;
+ default:
+ trace_seq_printf(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ alloc_len |= (cdb[10] << 24);
+ alloc_len |= (cdb[11] << 16);
+ alloc_len |= (cdb[12] << 8);
+ alloc_len |= cdb[13];
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
+ (unsigned long long)lba, alloc_len);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ case VERIFY_32:
+ case WRITE_32:
+ case WRITE_SAME_32:
+ return scsi_trace_rw32(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
+
+static const char *
+scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+
+ trace_seq_printf(p, "-");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *
+scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (cdb[0]) {
+ case READ_6:
+ case WRITE_6:
+ return scsi_trace_rw6(p, cdb, len);
+ case READ_10:
+ case VERIFY:
+ case WRITE_10:
+ case WRITE_SAME:
+ return scsi_trace_rw10(p, cdb, len);
+ case READ_12:
+ case VERIFY_12:
+ case WRITE_12:
+ return scsi_trace_rw12(p, cdb, len);
+ case READ_16:
+ case VERIFY_16:
+ case WRITE_16:
+ case WRITE_SAME_16:
+ return scsi_trace_rw16(p, cdb, len);
+ case UNMAP:
+ return scsi_trace_unmap(p, cdb, len);
+ case SERVICE_ACTION_IN:
+ return scsi_trace_service_action_in(p, cdb, len);
+ case VARIABLE_LENGTH_CMD:
+ return scsi_trace_varlen(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6cfffc88022a..06813789145c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -834,7 +834,7 @@ static ssize_t
store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int val;
+ unsigned long val;
struct fc_rport *rport = transport_class_to_rport(dev);
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -848,6 +848,12 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
return -EINVAL;
/*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (val > UINT_MAX)
+ return -EINVAL;
+
+ /*
* If fast_io_fail is off we have to cap
* dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
*/
@@ -2865,7 +2871,7 @@ void
fc_remote_port_delete(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
- int timeout = rport->dev_loss_tmo;
+ unsigned long timeout = rport->dev_loss_tmo;
unsigned long flags;
/*
@@ -3191,23 +3197,33 @@ fc_scsi_scan_rport(struct work_struct *work)
*
* This routine can be called from a FC LLD scsi_eh callback. It
* blocks the scsi_eh thread until the fc_rport leaves the
- * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh
- * failing recovery actions for blocked rports which would lead to
- * offlined SCSI devices.
+ * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
+ * necessary to avoid the scsi_eh failing recovery actions for blocked
+ * rports which would lead to offlined SCSI devices.
+ *
+ * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
+ * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
+ * passed back to scsi_eh.
*/
-void fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+ while (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
spin_unlock_irqrestore(shost->host_lock, flags);
msleep(1000);
spin_lock_irqsave(shost->host_lock, flags);
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
+ return FAST_IO_FAIL;
+
+ return 0;
}
EXPORT_SYMBOL(fc_block_scsi_eh);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de6c60320f6f..829cc37abc41 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1434,6 +1434,8 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#error RC16_LEN must not be more than SD_BUF_SIZE
#endif
+#define READ_CAPACITY_RETRIES_ON_RESET 10
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -1441,7 +1443,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int the_result;
- int retries = 3;
+ int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
unsigned int alignment;
unsigned long long lba;
unsigned sector_size;
@@ -1470,6 +1472,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
* Invalid Field in CDB, just retry
* silently with RC10 */
return -EINVAL;
+ if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+ /* Device reset might occur several times,
+ * give it one more chance */
+ if (--reset_retries > 0)
+ continue;
}
retries--;
@@ -1528,7 +1537,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int the_result;
- int retries = 3;
+ int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
sector_t lba;
unsigned sector_size;
@@ -1544,8 +1553,16 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result)
+ if (the_result) {
sense_valid = scsi_sense_valid(&sshdr);
+ if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+ /* Device reset might occur several times,
+ * give it one more chance */
+ if (--reset_retries > 0)
+ continue;
+ }
retries--;
} while (the_result && retries);
@@ -1574,6 +1591,8 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
static int sd_try_rc16_first(struct scsi_device *sdp)
{
+ if (sdp->host->max_cmd_len < 16)
+ return 0;
if (sdp->scsi_level > SCSI_SPC_2)
return 1;
if (scsi_device_protection(sdp))
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 5fda881c2470..b701bf2cc187 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -2224,14 +2224,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
}
-void
-wd33c93_release(void)
-{
-}
-
EXPORT_SYMBOL(wd33c93_host_reset);
EXPORT_SYMBOL(wd33c93_init);
-EXPORT_SYMBOL(wd33c93_release);
EXPORT_SYMBOL(wd33c93_abort);
EXPORT_SYMBOL(wd33c93_queuecommand);
EXPORT_SYMBOL(wd33c93_intr);
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 00123f2383d7..1ed5f3bf388e 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -348,6 +348,5 @@ int wd33c93_queuecommand (struct scsi_cmnd *cmd,
void wd33c93_intr (struct Scsi_Host *instance);
int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
int wd33c93_host_reset (struct scsi_cmnd *);
-void wd33c93_release(void);
#endif /* WD33C93_H */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 2b1ea3d4c4f4..891e1dd65f24 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1891,8 +1891,8 @@ static int serial8250_get_poll_char(struct uart_port *port)
struct uart_8250_port *up = (struct uart_8250_port *)port;
unsigned char lsr = serial_inp(up, UART_LSR);
- while (!(lsr & UART_LSR_DR))
- lsr = serial_inp(up, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return NO_POLL_CHAR;
return serial_inp(up, UART_RX);
}
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index f55c49475a8c..5f3f03df92e3 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -518,12 +518,13 @@ config SERIAL_S3C2412
Serial port support for the Samsung S3C2412 and S3C2413 SoC
config SERIAL_S3C2440
- tristate "Samsung S3C2440/S3C2442 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442)
+ tristate "Samsung S3C2440/S3C2442/S3C2416 Serial port support"
+ depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442 || CPU_S3C2416)
default y if CPU_S3C2440
default y if CPU_S3C2442
+ select SERIAL_SAMSUNG_UARTS_4 if CPU_S3C2416
help
- Serial port support for the Samsung S3C2440 and S3C2442 SoC
+ Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
config SERIAL_S3C24A0
tristate "Samsung S3C24A0 Serial port support"
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 743ebf5f16da..eb4cb480b93e 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -342,9 +342,9 @@ static int pl010_get_poll_char(struct uart_port *port)
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int status;
- do {
- status = readw(uap->port.membase + UART01x_FR);
- } while (status & UART01x_FR_RXFE);
+ status = readw(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ return NO_POLL_CHAR;
return readw(uap->port.membase + UART01x_DR);
}
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 2c9bf9b68327..eed3c2d8dd1c 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel_serial.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
@@ -59,6 +60,9 @@
#include <linux/serial_core.h>
+static void atmel_start_rx(struct uart_port *port);
+static void atmel_stop_rx(struct uart_port *port);
+
#ifdef CONFIG_SERIAL_ATMEL_TTYAT
/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
@@ -93,6 +97,7 @@
#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
+#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
@@ -147,6 +152,9 @@ struct atmel_uart_port {
unsigned int irq_status_prev;
struct circ_buf rx_ring;
+
+ struct serial_rs485 rs485; /* rs485 settings */
+ unsigned int tx_done_mask;
};
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
@@ -187,6 +195,46 @@ static bool atmel_use_dma_tx(struct uart_port *port)
}
#endif
+/* Enable or disable the rs485 support */
+void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int mode;
+
+ spin_lock(&port->lock);
+
+ /* Disable interrupts */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+
+ mode = UART_GET_MR(port);
+
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ atmel_port->rs485 = *rs485conf;
+
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ UART_PUT_TTGR(port, rs485conf->delay_rts_before_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ if (atmel_use_dma_tx(port))
+ atmel_port->tx_done_mask = ATMEL_US_ENDTX |
+ ATMEL_US_TXBUFE;
+ else
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ }
+ UART_PUT_MR(port, mode);
+
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+
+ spin_unlock(&port->lock);
+
+}
+
/*
* Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
*/
@@ -202,6 +250,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
{
unsigned int control = 0;
unsigned int mode;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
#ifdef CONFIG_ARCH_AT91RM9200
if (cpu_is_at91rm9200()) {
@@ -236,6 +285,17 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
mode |= ATMEL_US_CHMODE_LOC_LOOP;
else
mode |= ATMEL_US_CHMODE_NORMAL;
+
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ }
UART_PUT_MR(port, mode);
}
@@ -268,12 +328,17 @@ static u_int atmel_get_mctrl(struct uart_port *port)
*/
static void atmel_stop_tx(struct uart_port *port)
{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
if (atmel_use_dma_tx(port)) {
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
- UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE);
- } else
- UART_PUT_IDR(port, ATMEL_US_TXRDY);
+ }
+ /* Disable interrupts */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_start_rx(port);
}
/*
@@ -281,17 +346,39 @@ static void atmel_stop_tx(struct uart_port *port)
*/
static void atmel_start_tx(struct uart_port *port)
{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
if (atmel_use_dma_tx(port)) {
if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
/* The transmitter is already running. Yes, we
really need this.*/
return;
- UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE);
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_stop_rx(port);
+
/* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
- } else
- UART_PUT_IER(port, ATMEL_US_TXRDY);
+ }
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+}
+
+/*
+ * start receiving - port is in process of being opened.
+ */
+static void atmel_start_rx(struct uart_port *port)
+{
+ UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
+
+ if (atmel_use_dma_rx(port)) {
+ /* enable PDC controller */
+ UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+ port->read_status_mask);
+ UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+ } else {
+ UART_PUT_IER(port, ATMEL_US_RXRDY);
+ }
}
/*
@@ -302,9 +389,11 @@ static void atmel_stop_rx(struct uart_port *port)
if (atmel_use_dma_rx(port)) {
/* disable PDC receive */
UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
- UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
- } else
+ UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+ port->read_status_mask);
+ } else {
UART_PUT_IDR(port, ATMEL_US_RXRDY);
+ }
}
/*
@@ -428,8 +517,9 @@ static void atmel_rx_chars(struct uart_port *port)
static void atmel_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (port->x_char && UART_GET_CSR(port) & ATMEL_US_TXRDY) {
+ if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
@@ -437,7 +527,7 @@ static void atmel_tx_chars(struct uart_port *port)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
- while (UART_GET_CSR(port) & ATMEL_US_TXRDY) {
+ while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -449,7 +539,8 @@ static void atmel_tx_chars(struct uart_port *port)
uart_write_wakeup(port);
if (!uart_circ_empty(xmit))
- UART_PUT_IER(port, ATMEL_US_TXRDY);
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
}
/*
@@ -501,18 +592,10 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_dma_tx(port)) {
- /* PDC transmit */
- if (pending & (ATMEL_US_ENDTX | ATMEL_US_TXBUFE)) {
- UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE);
- tasklet_schedule(&atmel_port->tasklet);
- }
- } else {
- /* Interrupt transmit */
- if (pending & ATMEL_US_TXRDY) {
- UART_PUT_IDR(port, ATMEL_US_TXRDY);
- tasklet_schedule(&atmel_port->tasklet);
- }
+ if (pending & atmel_port->tx_done_mask) {
+ /* Either PDC or interrupt transmission */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+ tasklet_schedule(&atmel_port->tasklet);
}
}
@@ -590,9 +673,15 @@ static void atmel_tx_dma(struct uart_port *port)
UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
UART_PUT_TCR(port, count);
- /* re-enable PDC transmit and interrupts */
+ /* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
- UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE);
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+ } else {
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1017,6 +1106,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
{
unsigned long flags;
unsigned int mode, imr, quot, baud;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* Get current mode register */
mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
@@ -1115,6 +1205,17 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* disable receiver and transmitter */
UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ }
+
/* set the parity, stop bits and data size */
UART_PUT_MR(port, mode);
@@ -1231,6 +1332,35 @@ static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
}
#endif
+static int
+atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
+{
+ struct serial_rs485 rs485conf;
+
+ switch (cmd) {
+ case TIOCSRS485:
+ if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
+ sizeof(rs485conf)))
+ return -EFAULT;
+
+ atmel_config_rs485(port, &rs485conf);
+ break;
+
+ case TIOCGRS485:
+ if (copy_to_user((struct serial_rs485 *) arg,
+ &(to_atmel_uart_port(port)->rs485),
+ sizeof(rs485conf)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+
+
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
@@ -1250,6 +1380,7 @@ static struct uart_ops atmel_pops = {
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
+ .ioctl = atmel_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = atmel_poll_get_char,
.poll_put_char = atmel_poll_put_char,
@@ -1265,13 +1396,12 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
struct uart_port *port = &atmel_port->uart;
struct atmel_uart_data *data = pdev->dev.platform_data;
- port->iotype = UPIO_MEM;
- port->flags = UPF_BOOT_AUTOCONF;
- port->ops = &atmel_pops;
- port->fifosize = 1;
- port->line = pdev->id;
- port->dev = &pdev->dev;
-
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &atmel_pops;
+ port->fifosize = 1;
+ port->line = pdev->id;
+ port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
@@ -1299,8 +1429,16 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
atmel_port->use_dma_rx = data->use_dma_rx;
atmel_port->use_dma_tx = data->use_dma_tx;
- if (atmel_use_dma_tx(port))
+ atmel_port->rs485 = data->rs485;
+ /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ else if (atmel_use_dma_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
+ atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
+ } else {
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ }
}
/*
@@ -1334,6 +1472,7 @@ static void atmel_console_putchar(struct uart_port *port, int ch)
static void atmel_console_write(struct console *co, const char *s, u_int count)
{
struct uart_port *port = &atmel_ports[co->index].uart;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, imr;
unsigned int pdc_tx;
@@ -1341,7 +1480,7 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
* First, save IMR and then disable interrupts
*/
imr = UART_GET_IMR(port);
- UART_PUT_IDR(port, ATMEL_US_RXRDY | ATMEL_US_TXRDY);
+ UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
/* Store PDC transmit status and disable it */
pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 4315b23590bd..eacb588a9345 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -120,7 +120,8 @@
#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
#define UCR3_BPEN (1<<0) /* Preset registers enable */
-#define UCR4_CTSTL_32 (32<<10) /* CTS trigger level (32 chars) */
+#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
+#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
#define UCR4_INVR (1<<9) /* Inverted infrared reception */
#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
@@ -591,6 +592,9 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
return 0;
}
+/* half the RX buffer size */
+#define CTSTL 16
+
static int imx_startup(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
@@ -607,6 +611,10 @@ static int imx_startup(struct uart_port *port)
if (USE_IRDA(sport))
temp |= UCR4_IRSC;
+ /* set the trigger level for CTS */
+ temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
+ temp |= CTSTL<< UCR4_CTSTL_SHF;
+
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
if (USE_IRDA(sport)) {
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index eadc1ab6bbce..a9a94ae72349 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -14,7 +14,9 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/kgdb.h>
+#include <linux/kdb.h>
#include <linux/tty.h>
+#include <linux/console.h>
#define MAX_CONFIG_LEN 40
@@ -32,6 +34,40 @@ static struct kparam_string kps = {
static struct tty_driver *kgdb_tty_driver;
static int kgdb_tty_line;
+#ifdef CONFIG_KDB_KEYBOARD
+static int kgdboc_register_kbd(char **cptr)
+{
+ if (strncmp(*cptr, "kbd", 3) == 0) {
+ if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
+ kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
+ kdb_poll_idx++;
+ if (cptr[0][3] == ',')
+ *cptr += 4;
+ else
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void kgdboc_unregister_kbd(void)
+{
+ int i;
+
+ for (i = 0; i < kdb_poll_idx; i++) {
+ if (kdb_poll_funcs[i] == kdb_get_kbd_char) {
+ kdb_poll_idx--;
+ kdb_poll_funcs[i] = kdb_poll_funcs[kdb_poll_idx];
+ kdb_poll_funcs[kdb_poll_idx] = NULL;
+ i--;
+ }
+ }
+}
+#else /* ! CONFIG_KDB_KEYBOARD */
+#define kgdboc_register_kbd(x) 0
+#define kgdboc_unregister_kbd()
+#endif /* ! CONFIG_KDB_KEYBOARD */
+
static int kgdboc_option_setup(char *opt)
{
if (strlen(opt) > MAX_CONFIG_LEN) {
@@ -45,25 +81,51 @@ static int kgdboc_option_setup(char *opt)
__setup("kgdboc=", kgdboc_option_setup);
+static void cleanup_kgdboc(void)
+{
+ kgdboc_unregister_kbd();
+ if (configured == 1)
+ kgdb_unregister_io_module(&kgdboc_io_ops);
+}
+
static int configure_kgdboc(void)
{
struct tty_driver *p;
int tty_line = 0;
int err;
+ char *cptr = config;
+ struct console *cons;
err = kgdboc_option_setup(config);
if (err || !strlen(config) || isspace(config[0]))
goto noconfig;
err = -ENODEV;
+ kgdboc_io_ops.is_console = 0;
+ kgdb_tty_driver = NULL;
- p = tty_find_polling_driver(config, &tty_line);
+ if (kgdboc_register_kbd(&cptr))
+ goto do_register;
+
+ p = tty_find_polling_driver(cptr, &tty_line);
if (!p)
goto noconfig;
+ cons = console_drivers;
+ while (cons) {
+ int idx;
+ if (cons->device && cons->device(cons, &idx) == p &&
+ idx == tty_line) {
+ kgdboc_io_ops.is_console = 1;
+ break;
+ }
+ cons = cons->next;
+ }
+
kgdb_tty_driver = p;
kgdb_tty_line = tty_line;
+do_register:
err = kgdb_register_io_module(&kgdboc_io_ops);
if (err)
goto noconfig;
@@ -75,6 +137,7 @@ static int configure_kgdboc(void)
noconfig:
config[0] = 0;
configured = 0;
+ cleanup_kgdboc();
return err;
}
@@ -88,20 +151,18 @@ static int __init init_kgdboc(void)
return configure_kgdboc();
}
-static void cleanup_kgdboc(void)
-{
- if (configured == 1)
- kgdb_unregister_io_module(&kgdboc_io_ops);
-}
-
static int kgdboc_get_char(void)
{
+ if (!kgdb_tty_driver)
+ return -1;
return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
kgdb_tty_line);
}
static void kgdboc_put_char(u8 chr)
{
+ if (!kgdb_tty_driver)
+ return;
kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
kgdb_tty_line, chr);
}
@@ -162,6 +223,25 @@ static struct kgdb_io kgdboc_io_ops = {
.post_exception = kgdboc_post_exp_handler,
};
+#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+/* This is only available if kgdboc is a built in for early debugging */
+int __init kgdboc_early_init(char *opt)
+{
+ /* save the first character of the config string because the
+ * init routine can destroy it.
+ */
+ char save_ch;
+
+ kgdboc_option_setup(opt);
+ save_ch = config[0];
+ init_kgdboc();
+ config[0] = save_ch;
+ return 0;
+}
+
+early_param("ekgdboc", kgdboc_early_init);
+#endif /* CONFIG_KGDB_SERIAL_CONSOLE */
+
module_init(init_kgdboc);
module_exit(cleanup_kgdboc);
module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index a176ab4bd65b..a5d525af8962 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -113,7 +113,9 @@ struct psc_ops {
unsigned char (*read_char)(struct uart_port *port);
void (*cw_disable_ints)(struct uart_port *port);
void (*cw_restore_ints)(struct uart_port *port);
- unsigned long (*getuartclk)(void *p);
+ unsigned int (*set_baudrate)(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old);
int (*clock)(struct uart_port *port, int enable);
int (*fifoc_init)(void);
void (*fifoc_uninit)(void);
@@ -121,6 +123,16 @@ struct psc_ops {
irqreturn_t (*handle_irq)(struct uart_port *port);
};
+/* setting the prescaler and divisor reg is common for all chips */
+static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc,
+ u16 prescaler, unsigned int divisor)
+{
+ /* select prescaler */
+ out_be16(&psc->mpc52xx_psc_clock_select, prescaler);
+ out_8(&psc->ctur, divisor >> 8);
+ out_8(&psc->ctlr, divisor & 0xff);
+}
+
#ifdef CONFIG_PPC_MPC52xx
#define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
static void mpc52xx_psc_fifo_init(struct uart_port *port)
@@ -128,9 +140,6 @@ static void mpc52xx_psc_fifo_init(struct uart_port *port)
struct mpc52xx_psc __iomem *psc = PSC(port);
struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port);
- /* /32 prescaler */
- out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00);
-
out_8(&fifo->rfcntl, 0x00);
out_be16(&fifo->rfalarm, 0x1ff);
out_8(&fifo->tfcntl, 0x07);
@@ -219,15 +228,47 @@ static void mpc52xx_psc_cw_restore_ints(struct uart_port *port)
out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
}
-/* Search for bus-frequency property in this node or a parent */
-static unsigned long mpc52xx_getuartclk(void *p)
+static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
{
- /*
- * 5200 UARTs have a / 32 prescaler
- * but the generic serial code assumes 16
- * so return ipb freq / 2
- */
- return mpc5xxx_get_bus_frequency(p) / 2;
+ unsigned int baud;
+ unsigned int divisor;
+
+ /* The 5200 has a fixed /32 prescaler, uartclk contains the ipb freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (32 * 0xffff) + 1,
+ port->uartclk / 32);
+ divisor = (port->uartclk + 16 * baud) / (32 * baud);
+
+ /* enable the /32 prescaler and set the divisor */
+ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor);
+ return baud;
+}
+
+static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned int divisor;
+ u16 prescaler;
+
+ /* The 5200B has a selectable /4 or /32 prescaler, uartclk contains the
+ * ipb freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (32 * 0xffff) + 1,
+ port->uartclk / 4);
+ divisor = (port->uartclk + 2 * baud) / (4 * baud);
+
+ /* select the proper prescaler and set the divisor */
+ if (divisor > 0xffff) {
+ divisor = (divisor + 4) / 8;
+ prescaler = 0xdd00; /* /32 */
+ } else
+ prescaler = 0xff00; /* /4 */
+ mpc52xx_set_divisor(PSC(port), prescaler, divisor);
+ return baud;
}
static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np)
@@ -258,7 +299,28 @@ static struct psc_ops mpc52xx_psc_ops = {
.read_char = mpc52xx_psc_read_char,
.cw_disable_ints = mpc52xx_psc_cw_disable_ints,
.cw_restore_ints = mpc52xx_psc_cw_restore_ints,
- .getuartclk = mpc52xx_getuartclk,
+ .set_baudrate = mpc5200_psc_set_baudrate,
+ .get_irq = mpc52xx_psc_get_irq,
+ .handle_irq = mpc52xx_psc_handle_irq,
+};
+
+static struct psc_ops mpc5200b_psc_ops = {
+ .fifo_init = mpc52xx_psc_fifo_init,
+ .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
+ .rx_rdy = mpc52xx_psc_rx_rdy,
+ .tx_rdy = mpc52xx_psc_tx_rdy,
+ .tx_empty = mpc52xx_psc_tx_empty,
+ .stop_rx = mpc52xx_psc_stop_rx,
+ .start_tx = mpc52xx_psc_start_tx,
+ .stop_tx = mpc52xx_psc_stop_tx,
+ .rx_clr_irq = mpc52xx_psc_rx_clr_irq,
+ .tx_clr_irq = mpc52xx_psc_tx_clr_irq,
+ .write_char = mpc52xx_psc_write_char,
+ .read_char = mpc52xx_psc_read_char,
+ .cw_disable_ints = mpc52xx_psc_cw_disable_ints,
+ .cw_restore_ints = mpc52xx_psc_cw_restore_ints,
+ .set_baudrate = mpc5200b_psc_set_baudrate,
.get_irq = mpc52xx_psc_get_irq,
.handle_irq = mpc52xx_psc_handle_irq,
};
@@ -392,9 +454,35 @@ static void mpc512x_psc_cw_restore_ints(struct uart_port *port)
out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f);
}
-static unsigned long mpc512x_getuartclk(void *p)
+static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
{
- return mpc5xxx_get_bus_frequency(p);
+ unsigned int baud;
+ unsigned int divisor;
+
+ /*
+ * The "MPC5121e Microcontroller Reference Manual, Rev. 3" says on
+ * pg. 30-10 that the chip supports a /32 and a /10 prescaler.
+ * Furthermore, it states that "After reset, the prescaler by 10
+ * for the UART mode is selected", but the reset register value is
+ * 0x0000 which means a /32 prescaler. This is wrong.
+ *
+ * In reality using /32 prescaler doesn't work, as it is not supported!
+ * Use /16 or /10 prescaler, see "MPC5121e Hardware Design Guide",
+ * Chapter 4.1 PSC in UART Mode.
+ * Calculate with a /16 prescaler here.
+ */
+
+ /* uartclk contains the ips freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (16 * 0xffff) + 1,
+ port->uartclk / 16);
+ divisor = (port->uartclk + 8 * baud) / (16 * baud);
+
+ /* enable the /16 prescaler and set the divisor */
+ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor);
+ return baud;
}
#define DEFAULT_FIFO_SIZE 16
@@ -567,7 +655,7 @@ static struct psc_ops mpc512x_psc_ops = {
.read_char = mpc512x_psc_read_char,
.cw_disable_ints = mpc512x_psc_cw_disable_ints,
.cw_restore_ints = mpc512x_psc_cw_restore_ints,
- .getuartclk = mpc512x_getuartclk,
+ .set_baudrate = mpc512x_psc_set_baudrate,
.clock = mpc512x_psc_clock,
.fifoc_init = mpc512x_psc_fifoc_init,
.fifoc_uninit = mpc512x_psc_fifoc_uninit,
@@ -735,8 +823,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
struct mpc52xx_psc __iomem *psc = PSC(port);
unsigned long flags;
unsigned char mr1, mr2;
- unsigned short ctr;
- unsigned int j, baud, quot;
+ unsigned int j;
+ unsigned int baud;
/* Prepare what we're gonna write */
mr1 = 0;
@@ -773,16 +861,9 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
mr2 |= MPC52xx_PSC_MODE_TXCTS;
}
- baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
- ctr = quot & 0xffff;
-
/* Get the lock */
spin_lock_irqsave(&port->lock, flags);
- /* Update the per-port timeout */
- uart_update_timeout(port, new->c_cflag, baud);
-
/* Do our best to flush TX & RX, so we don't lose anything */
/* But we don't wait indefinitely ! */
j = 5000000; /* Maximum wait */
@@ -806,8 +887,10 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
out_8(&psc->mode, mr1);
out_8(&psc->mode, mr2);
- out_8(&psc->ctur, ctr >> 8);
- out_8(&psc->ctlr, ctr & 0xff);
+ baud = psc_ops->set_baudrate(port, new, old);
+
+ /* Update the per-port timeout */
+ uart_update_timeout(port, new->c_cflag, baud);
if (UART_ENABLE_MS(port, new->c_cflag))
mpc52xx_uart_enable_ms(port);
@@ -1187,7 +1270,7 @@ mpc52xx_console_setup(struct console *co, char *options)
return ret;
}
- uartclk = psc_ops->getuartclk(np);
+ uartclk = mpc5xxx_get_bus_frequency(np);
if (uartclk == 0) {
pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1270,6 +1353,7 @@ static struct uart_driver mpc52xx_uart_driver = {
static struct of_device_id mpc52xx_uart_of_match[] = {
#ifdef CONFIG_PPC_MPC52xx
+ { .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, },
{ .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
/* binding used by old lite5200 device trees: */
{ .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
@@ -1302,7 +1386,10 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
pr_debug("Found %s assigned to ttyPSC%x\n",
mpc52xx_uart_nodes[idx]->full_name, idx);
- uartclk = psc_ops->getuartclk(op->node);
+ /* set the uart clock to the input clock of the psc, the different
+ * prescalers are taken into account in the set_baudrate() methods
+ * of the respective chip */
+ uartclk = mpc5xxx_get_bus_frequency(op->node);
if (uartclk == 0) {
dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1467,7 +1554,7 @@ mpc52xx_uart_init(void)
/*
* Map the PSC FIFO Controller and init if on MPC512x.
*/
- if (psc_ops->fifoc_init) {
+ if (psc_ops && psc_ops->fifoc_init) {
ret = psc_ops->fifoc_init();
if (ret)
return ret;
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 55e113a0be03..6a9c6605666a 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -2071,6 +2071,7 @@ static int mpsc_drv_probe(struct platform_device *dev)
if (!(rc = mpsc_drv_map_regs(pi, dev))) {
mpsc_drv_get_platform_data(pi, dev, dev->id);
+ pi->port.dev = &dev->dev;
if (!(rc = mpsc_make_ready(pi))) {
spin_lock_init(&pi->tx_lock);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 8cfa5b12ea7a..dadd686c9801 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -89,7 +89,6 @@ struct serial_info {
int manfid;
int prodid;
int c950ctrl;
- dev_node_t node[4];
int line[4];
const struct serial_quirk *quirk;
};
@@ -289,8 +288,6 @@ static void serial_remove(struct pcmcia_device *link)
for (i = 0; i < info->ndev; i++)
serial8250_unregister_port(info->line[i]);
- info->p_dev->dev_node = NULL;
-
if (!info->slave)
pcmcia_disable_device(link);
}
@@ -343,7 +340,6 @@ static int serial_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
if (do_sound) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,11 +407,6 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
}
info->line[info->ndev] = line;
- sprintf(info->node[info->ndev].dev_name, "ttyS%d", line);
- info->node[info->ndev].major = TTY_MAJOR;
- info->node[info->ndev].minor = 0x40 + line;
- if (info->ndev > 0)
- info->node[info->ndev - 1].next = &info->node[info->ndev];
info->ndev++;
return 0;
@@ -486,7 +477,7 @@ static int simple_config(struct pcmcia_device *link)
}
if (info->slave) {
return setup_serial(link, info, port,
- link->irq.AssignedIRQ);
+ link->irq);
}
}
@@ -507,10 +498,6 @@ static int simple_config(struct pcmcia_device *link)
return -1;
found_port:
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0)
- link->irq.AssignedIRQ = 0;
-
if (info->multi && (info->manfid == MANFID_3COM))
link->conf.ConfigIndex &= ~(0x08);
@@ -523,7 +510,7 @@ found_port:
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
return -1;
- return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
+ return setup_serial(link, info, link->io.BasePort1, link->irq);
}
static int multi_config_check(struct pcmcia_device *p_dev,
@@ -586,13 +573,9 @@ static int multi_config(struct pcmcia_device *link)
}
}
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
- /* FIXME: comment does not fit, error handling does not fit */
- printk(KERN_NOTICE
- "serial_cs: no usable port range found, giving up\n");
- link->irq.AssignedIRQ = 0;
- }
+ if (!link->irq)
+ dev_warn(&link->dev,
+ "serial_cs: no usable IRQ found, continuing...\n");
/*
* Apply any configuration quirks.
@@ -615,11 +598,11 @@ static int multi_config(struct pcmcia_device *link)
if (link->conf.ConfigIndex == 1 ||
link->conf.ConfigIndex == 3) {
err = setup_serial(link, info, base2,
- link->irq.AssignedIRQ);
+ link->irq);
base2 = link->io.BasePort1;
} else {
err = setup_serial(link, info, link->io.BasePort1,
- link->irq.AssignedIRQ);
+ link->irq);
}
info->c950ctrl = base2;
@@ -633,10 +616,10 @@ static int multi_config(struct pcmcia_device *link)
return 0;
}
- setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
+ setup_serial(link, info, link->io.BasePort1, link->irq);
for (i = 0; i < info->multi - 1; i++)
setup_serial(link, info, base2 + (8 * i),
- link->irq.AssignedIRQ);
+ link->irq);
return 0;
}
@@ -720,7 +703,6 @@ static int serial_config(struct pcmcia_device * link)
if (info->quirk->post(link))
goto failed;
- link->dev_node = &info->node[0];
return 0;
failed:
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8eb094c1f61b..892573164cd4 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -83,16 +83,16 @@ struct sci_port {
/* Interface clock */
struct clk *iclk;
- /* Data clock */
- struct clk *dclk;
+ /* Function clock */
+ struct clk *fclk;
struct list_head node;
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct device *dma_dev;
- enum sh_dmae_slave_chan_id slave_tx;
- enum sh_dmae_slave_chan_id slave_rx;
+ unsigned int slave_tx;
+ unsigned int slave_rx;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx[2];
dma_cookie_t cookie_tx;
@@ -107,6 +107,7 @@ struct sci_port {
struct work_struct work_tx;
struct work_struct work_rx;
struct timer_list rx_timer;
+ unsigned int rx_timeout;
#endif
};
@@ -150,7 +151,11 @@ static int sci_poll_get_char(struct uart_port *port)
handle_error(port);
continue;
}
- } while (!(status & SCxSR_RDxF(port)));
+ break;
+ } while (1);
+
+ if (!(status & SCxSR_RDxF(port)))
+ return NO_POLL_CHAR;
c = sci_in(port, SCxRDR);
@@ -674,22 +679,22 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
struct sci_port *s = to_sci_port(port);
if (s->chan_rx) {
- unsigned long tout;
u16 scr = sci_in(port, SCSCR);
u16 ssr = sci_in(port, SCxSR);
/* Disable future Rx interrupts */
- sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE);
+ if (port->type == PORT_SCIFA) {
+ disable_irq_nosync(irq);
+ scr |= 0x4000;
+ } else {
+ scr &= ~SCI_CTRL_FLAGS_RIE;
+ }
+ sci_out(port, SCSCR, scr);
/* Clear current interrupt */
sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
- /* Calculate delay for 1.5 DMA buffers */
- tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
- port->fifosize / 2;
- dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
- tout * 1000 / HZ);
- if (tout < 2)
- tout = 2;
- mod_timer(&s->rx_timer, jiffies + tout);
+ dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
+ jiffies, s->rx_timeout);
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
return IRQ_HANDLED;
}
@@ -799,7 +804,7 @@ static int sci_notifier(struct notifier_block *self,
(phase == CPUFREQ_RESUMECHANGE)) {
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(sci_port, &priv->ports, node)
- sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -810,21 +815,17 @@ static void sci_clk_enable(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- clk_enable(sci_port->dclk);
- sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
-
- if (sci_port->iclk)
- clk_enable(sci_port->iclk);
+ clk_enable(sci_port->iclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
+ clk_enable(sci_port->fclk);
}
static void sci_clk_disable(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- if (sci_port->iclk)
- clk_disable(sci_port->iclk);
-
- clk_disable(sci_port->dclk);
+ clk_disable(sci_port->fclk);
+ clk_disable(sci_port->iclk);
}
static int sci_request_irq(struct sci_port *port)
@@ -913,22 +914,26 @@ static void sci_dma_tx_complete(void *arg)
spin_lock_irqsave(&port->lock, flags);
- xmit->tail += s->sg_tx.length;
+ xmit->tail += sg_dma_len(&s->sg_tx);
xmit->tail &= UART_XMIT_SIZE - 1;
- port->icount.tx += s->sg_tx.length;
+ port->icount.tx += sg_dma_len(&s->sg_tx);
async_tx_ack(s->desc_tx);
s->cookie_tx = -EINVAL;
s->desc_tx = NULL;
- spin_unlock_irqrestore(&port->lock, flags);
-
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_chars_pending(xmit))
+ if (!uart_circ_empty(xmit)) {
schedule_work(&s->work_tx);
+ } else if (port->type == PORT_SCIFA) {
+ u16 ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
}
/* Locking: called with port lock held */
@@ -972,13 +977,13 @@ static void sci_dma_rx_complete(void *arg)
unsigned long flags;
int count;
- dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
spin_lock_irqsave(&port->lock, flags);
count = sci_dma_rx_push(s, tty, s->buf_len_rx);
- mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5));
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
spin_unlock_irqrestore(&port->lock, flags);
@@ -1050,6 +1055,8 @@ static void sci_submit_rx(struct sci_port *s)
sci_rx_dma_release(s, true);
return;
}
+ dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
+ s->cookie_rx[i], i);
}
s->active_rx = s->cookie_rx[0];
@@ -1084,7 +1091,7 @@ static void work_fn_rx(struct work_struct *work)
unsigned long flags;
int count;
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL);
dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
sh_desc->partial, sh_desc->cookie);
@@ -1107,10 +1114,10 @@ static void work_fn_rx(struct work_struct *work)
return;
}
- dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
- s->cookie_rx[new], new);
-
s->active_rx = s->cookie_rx[!new];
+
+ dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
+ s->cookie_rx[new], new, s->active_rx);
}
static void work_fn_tx(struct work_struct *work)
@@ -1131,14 +1138,13 @@ static void work_fn_tx(struct work_struct *work)
*/
spin_lock_irq(&port->lock);
sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
- sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
+ sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
sg->offset;
- sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
+ sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
- sg->dma_length = sg->length;
spin_unlock_irq(&port->lock);
- BUG_ON(!sg->length);
+ BUG_ON(!sg_dma_len(sg));
desc = chan->device->device_prep_slave_sg(chan,
sg, s->sg_len_tx, DMA_TO_DEVICE,
@@ -1173,23 +1179,28 @@ static void work_fn_tx(struct work_struct *work)
static void sci_start_tx(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
unsigned short ctrl;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- struct sci_port *s = to_sci_port(port);
-
- if (s->chan_tx) {
- if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0)
- schedule_work(&s->work_tx);
-
- return;
+ if (port->type == PORT_SCIFA) {
+ u16 new, scr = sci_in(port, SCSCR);
+ if (s->chan_tx)
+ new = scr | 0x8000;
+ else
+ new = scr & ~0x8000;
+ if (new != scr)
+ sci_out(port, SCSCR, new);
}
+ if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
+ s->cookie_tx < 0)
+ schedule_work(&s->work_tx);
#endif
-
- /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
- ctrl = sci_in(port, SCSCR);
- ctrl |= SCI_CTRL_FLAGS_TIE;
- sci_out(port, SCSCR, ctrl);
+ if (!s->chan_tx || port->type == PORT_SCIFA) {
+ /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
+ ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
+ }
}
static void sci_stop_tx(struct uart_port *port)
@@ -1198,6 +1209,8 @@ static void sci_stop_tx(struct uart_port *port)
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x8000;
ctrl &= ~SCI_CTRL_FLAGS_TIE;
sci_out(port, SCSCR, ctrl);
}
@@ -1208,6 +1221,8 @@ static void sci_start_rx(struct uart_port *port)
/* Set RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl |= sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x4000;
sci_out(port, SCSCR, ctrl);
}
@@ -1217,6 +1232,8 @@ static void sci_stop_rx(struct uart_port *port)
/* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x4000;
ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
sci_out(port, SCSCR, ctrl);
}
@@ -1251,8 +1268,12 @@ static void rx_timer_fn(unsigned long arg)
{
struct sci_port *s = (struct sci_port *)arg;
struct uart_port *port = &s->port;
-
u16 scr = sci_in(port, SCSCR);
+
+ if (port->type == PORT_SCIFA) {
+ scr &= ~0x4000;
+ enable_irq(s->irqs[1]);
+ }
sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
schedule_work(&s->work_rx);
@@ -1339,8 +1360,7 @@ static void sci_request_dma(struct uart_port *port)
sg_init_table(sg, 1);
sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
(int)buf[i] & ~PAGE_MASK);
- sg->dma_address = dma[i];
- sg->dma_length = sg->length;
+ sg_dma_address(sg) = dma[i];
}
INIT_WORK(&s->work_rx, work_fn_rx);
@@ -1403,8 +1423,12 @@ static void sci_shutdown(struct uart_port *port)
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ struct sci_port *s = to_sci_port(port);
+#endif
unsigned int status, baud, smr_val, max_baud;
int t = -1;
+ u16 scfcr = 0;
/*
* earlyprintk comes here early on with port->uartclk set to zero.
@@ -1427,7 +1451,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
if (port->type != PORT_SCI)
- sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+ sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
smr_val = sci_in(port, SCSMR) & 3;
if ((termios->c_cflag & CSIZE) == CS7)
@@ -1458,10 +1482,32 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
}
sci_init_pins(port, termios->c_cflag);
- sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0);
+ sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
sci_out(port, SCSCR, SCSCR_INIT(port));
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ /*
+ * Calculate delay for 1.5 DMA buffers: see
+ * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
+ * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
+ * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
+ * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
+ * sizes), but it has been found out experimentally, that this is not
+ * enough: the driver too often needlessly runs on a DMA timeout. 20ms
+ * as a minimum seem to work perfectly.
+ */
+ if (s->chan_rx) {
+ s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
+ port->fifosize / 2;
+ dev_dbg(port->dev,
+ "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ s->rx_timeout * 1000 / HZ, port->timeout);
+ if (s->rx_timeout < msecs_to_jiffies(20))
+ s->rx_timeout = msecs_to_jiffies(20);
+ }
+#endif
+
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
}
@@ -1553,10 +1599,10 @@ static struct uart_ops sci_uart_ops = {
#endif
};
-static void __devinit sci_init_single(struct platform_device *dev,
- struct sci_port *sci_port,
- unsigned int index,
- struct plat_sci_port *p)
+static int __devinit sci_init_single(struct platform_device *dev,
+ struct sci_port *sci_port,
+ unsigned int index,
+ struct plat_sci_port *p)
{
struct uart_port *port = &sci_port->port;
@@ -1577,8 +1623,23 @@ static void __devinit sci_init_single(struct platform_device *dev,
}
if (dev) {
- sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
- sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
+ sci_port->iclk = clk_get(&dev->dev, "sci_ick");
+ if (IS_ERR(sci_port->iclk)) {
+ sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
+ if (IS_ERR(sci_port->iclk)) {
+ dev_err(&dev->dev, "can't get iclk\n");
+ return PTR_ERR(sci_port->iclk);
+ }
+ }
+
+ /*
+ * The function clock is optional, ignore it if we can't
+ * find it.
+ */
+ sci_port->fclk = clk_get(&dev->dev, "sci_fck");
+ if (IS_ERR(sci_port->fclk))
+ sci_port->fclk = NULL;
+
sci_port->enable = sci_clk_enable;
sci_port->disable = sci_clk_disable;
port->dev = &dev->dev;
@@ -1605,6 +1666,7 @@ static void __devinit sci_init_single(struct platform_device *dev,
#endif
memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
+ return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1754,8 +1816,11 @@ static int sci_remove(struct platform_device *dev)
cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
+ list_for_each_entry(p, &priv->ports, node) {
uart_remove_one_port(&sci_uart_driver, &p->port);
+ clk_put(p->iclk);
+ clk_put(p->fclk);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
kfree(priv);
@@ -1781,7 +1846,9 @@ static int __devinit sci_probe_single(struct platform_device *dev,
return 0;
}
- sci_init_single(dev, sciport, index, p);
+ ret = sci_init_single(dev, sciport, index, p);
+ if (ret)
+ return ret;
ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
if (ret)
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 2c7a66af4f52..978b3cee02d7 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -102,6 +102,8 @@ struct uart_sunzilog_port {
#endif
};
+static void sunzilog_putchar(struct uart_port *port, int ch);
+
#define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel __iomem *)((PORT)->membase))
#define UART_ZILOG(PORT) ((struct uart_sunzilog_port *)(PORT))
@@ -996,6 +998,50 @@ static int sunzilog_verify_port(struct uart_port *port, struct serial_struct *se
return -EINVAL;
}
+#ifdef CONFIG_CONSOLE_POLL
+static int sunzilog_get_poll_char(struct uart_port *port)
+{
+ unsigned char ch, r1;
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct zilog_channel __iomem *channel
+ = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+
+ r1 = read_zsreg(channel, R1);
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+ }
+
+ ch = readb(&channel->control);
+ ZSDELAY();
+
+ /* This funny hack depends upon BRK_ABRT not interfering
+ * with the other bits we care about in R1.
+ */
+ if (ch & BRK_ABRT)
+ r1 |= BRK_ABRT;
+
+ if (!(ch & Rx_CH_AV))
+ return NO_POLL_CHAR;
+
+ ch = readb(&channel->data);
+ ZSDELAY();
+
+ ch &= up->parity_mask;
+ return ch;
+}
+
+static void sunzilog_put_poll_char(struct uart_port *port,
+ unsigned char ch)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *)port;
+
+ sunzilog_putchar(&up->port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
static struct uart_ops sunzilog_pops = {
.tx_empty = sunzilog_tx_empty,
.set_mctrl = sunzilog_set_mctrl,
@@ -1013,6 +1059,10 @@ static struct uart_ops sunzilog_pops = {
.request_port = sunzilog_request_port,
.config_port = sunzilog_config_port,
.verify_port = sunzilog_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = sunzilog_get_poll_char,
+ .poll_put_char = sunzilog_put_poll_char,
+#endif
};
static int uart_chip_count;
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
new file mode 100644
index 000000000000..a54de0b9b3df
--- /dev/null
+++ b/drivers/sh/Kconfig
@@ -0,0 +1,24 @@
+config INTC_USERIMASK
+ bool "Userspace interrupt masking support"
+ depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
+ help
+ This enables support for hardware-assisted userspace hardirq
+ masking.
+
+ SH-4A and newer interrupt blocks all support a special shadowed
+ page with all non-masking registers obscured when mapped in to
+ userspace. This is primarily for use by userspace device
+ drivers that are using special priority levels.
+
+ If in doubt, say N.
+
+config INTC_BALANCING
+ bool "Hardware IRQ balancing support"
+ depends on SMP && SUPERH && CPU_SUBTYPE_SH7786
+ help
+ This enables support for IRQ auto-distribution mode on SH-X3
+ SMP parts. All of the balancing and CPU wakeup decisions are
+ taken care of automatically by hardware for distributed
+ vectors.
+
+ If in doubt, say N.
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 94ad6bd86a00..c585574b9aed 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -28,6 +28,7 @@
#include <linux/topology.h>
#include <linux/bitmap.h>
#include <linux/cpumask.h>
+#include <asm/sizes.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -45,6 +46,12 @@ struct intc_handle_int {
unsigned long handle;
};
+struct intc_window {
+ phys_addr_t phys;
+ void __iomem *virt;
+ unsigned long size;
+};
+
struct intc_desc_int {
struct list_head list;
struct sys_device sysdev;
@@ -58,6 +65,8 @@ struct intc_desc_int {
unsigned int nr_prio;
struct intc_handle_int *sense;
unsigned int nr_sense;
+ struct intc_window *window;
+ unsigned int nr_windows;
struct irq_chip chip;
};
@@ -87,8 +96,12 @@ static DEFINE_SPINLOCK(vector_lock);
#define SMP_NR(d, x) 1
#endif
-static unsigned int intc_prio_level[NR_IRQS]; /* for now */
+static unsigned int intc_prio_level[NR_IRQS]; /* for now */
+static unsigned int default_prio_level = 2; /* 2 - 16 */
static unsigned long ack_handle[NR_IRQS];
+#ifdef CONFIG_INTC_BALANCING
+static unsigned long dist_handle[NR_IRQS];
+#endif
static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
{
@@ -96,6 +109,47 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
return container_of(chip, struct intc_desc_int, chip);
}
+static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
+ unsigned long address)
+{
+ struct intc_window *window;
+ int k;
+
+ /* scan through physical windows and convert address */
+ for (k = 0; k < d->nr_windows; k++) {
+ window = d->window + k;
+
+ if (address < window->phys)
+ continue;
+
+ if (address >= (window->phys + window->size))
+ continue;
+
+ address -= window->phys;
+ address += (unsigned long)window->virt;
+
+ return address;
+ }
+
+ /* no windows defined, register must be 1:1 mapped virt:phys */
+ return address;
+}
+
+static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
+{
+ unsigned int k;
+
+ address = intc_phys_to_virt(d, address);
+
+ for (k = 0; k < d->nr_reg; k++) {
+ if (d->reg[k] == address)
+ return k;
+ }
+
+ BUG();
+ return 0;
+}
+
static inline unsigned int set_field(unsigned int value,
unsigned int field_value,
unsigned int handle)
@@ -229,6 +283,85 @@ static void (*intc_disable_fns[])(unsigned long addr,
[MODE_PCLR_REG] = intc_mode_field,
};
+#ifdef CONFIG_INTC_BALANCING
+static inline void intc_balancing_enable(unsigned int irq)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = dist_handle[irq];
+ unsigned long addr;
+
+ if (irq_balancing_disabled(irq) || !handle)
+ return;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
+}
+
+static inline void intc_balancing_disable(unsigned int irq)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = dist_handle[irq];
+ unsigned long addr;
+
+ if (irq_balancing_disabled(irq) || !handle)
+ return;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
+}
+
+static unsigned int intc_dist_data(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id)
+{
+ struct intc_mask_reg *mr = desc->hw.mask_regs;
+ unsigned int i, j, fn, mode;
+ unsigned long reg_e, reg_d;
+
+ for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
+ mr = desc->hw.mask_regs + i;
+
+ /*
+ * Skip this entry if there's no auto-distribution
+ * register associated with it.
+ */
+ if (!mr->dist_reg)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
+ if (mr->enum_ids[j] != enum_id)
+ continue;
+
+ fn = REG_FN_MODIFY_BASE;
+ mode = MODE_ENABLE_REG;
+ reg_e = mr->dist_reg;
+ reg_d = mr->dist_reg;
+
+ fn += (mr->reg_width >> 3) - 1;
+ return _INTC_MK(fn, mode,
+ intc_get_reg(d, reg_e),
+ intc_get_reg(d, reg_d),
+ 1,
+ (mr->reg_width - 1) - j);
+ }
+ }
+
+ /*
+ * It's possible we've gotten here with no distribution options
+ * available for the IRQ in question, so we just skip over those.
+ */
+ return 0;
+}
+#else
+static inline void intc_balancing_enable(unsigned int irq)
+{
+}
+
+static inline void intc_balancing_disable(unsigned int irq)
+{
+}
+#endif
+
static inline void _intc_enable(unsigned int irq, unsigned long handle)
{
struct intc_desc_int *d = get_intc_desc(irq);
@@ -244,6 +377,8 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
[_INTC_FN(handle)], irq);
}
+
+ intc_balancing_enable(irq);
}
static void intc_enable(unsigned int irq)
@@ -254,10 +389,12 @@ static void intc_enable(unsigned int irq)
static void intc_disable(unsigned int irq)
{
struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long handle = (unsigned long) get_irq_chip_data(irq);
+ unsigned long handle = (unsigned long)get_irq_chip_data(irq);
unsigned long addr;
unsigned int cpu;
+ intc_balancing_disable(irq);
+
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
@@ -336,8 +473,7 @@ static void intc_mask_ack(unsigned int irq)
intc_disable(irq);
- /* read register and write zero only to the assocaited bit */
-
+ /* read register and write zero only to the associated bit */
if (handle) {
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
switch (_INTC_FN(handle)) {
@@ -366,7 +502,8 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
{
int i;
- /* this doesn't scale well, but...
+ /*
+ * this doesn't scale well, but...
*
* this function should only be used for cerain uncommon
* operations such as intc_set_priority() and intc_set_sense()
@@ -377,7 +514,6 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
* memory footprint down is to make sure the array is sorted
* and then perform a bisect to lookup the irq.
*/
-
for (i = 0; i < nr_hp; i++) {
if ((hp + i)->irq != irq)
continue;
@@ -408,7 +544,6 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
* primary masking method is using intc_prio_level[irq]
* priority level will be set during next enable()
*/
-
if (_INTC_FN(ihp->handle) != REG_FN_ERR)
_intc_enable(irq, ihp->handle);
}
@@ -447,20 +582,6 @@ static int intc_set_sense(unsigned int irq, unsigned int type)
return 0;
}
-static unsigned int __init intc_get_reg(struct intc_desc_int *d,
- unsigned long address)
-{
- unsigned int k;
-
- for (k = 0; k < d->nr_reg; k++) {
- if (d->reg[k] == address)
- return k;
- }
-
- BUG();
- return 0;
-}
-
static intc_enum __init intc_grp_id(struct intc_desc *desc,
intc_enum enum_id)
{
@@ -718,13 +839,14 @@ static void __init intc_register_irq(struct intc_desc *desc,
*/
set_bit(irq, intc_irq_map);
- /* Prefer single interrupt source bitmap over other combinations:
+ /*
+ * Prefer single interrupt source bitmap over other combinations:
+ *
* 1. bitmap, single interrupt source
* 2. priority, single interrupt source
* 3. bitmap, multiple interrupt sources (groups)
* 4. priority, multiple interrupt sources (groups)
*/
-
data[0] = intc_mask_data(desc, d, enum_id, 0);
data[1] = intc_prio_data(desc, d, enum_id, 0);
@@ -749,10 +871,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
handle_level_irq, "level");
set_irq_chip_data(irq, (void *)data[primary]);
- /* set priority level
+ /*
+ * set priority level
* - this needs to be at least 2 for 5-bit priorities on 7780
*/
- intc_prio_level[irq] = 2;
+ intc_prio_level[irq] = default_prio_level;
/* enable secondary masking method if present */
if (data[!primary])
@@ -769,7 +892,6 @@ static void __init intc_register_irq(struct intc_desc *desc,
* only secondary priority should access registers, so
* set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
*/
-
hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
}
@@ -790,6 +912,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
if (desc->hw.ack_regs)
ack_handle[irq] = intc_ack_data(desc, d, enum_id);
+#ifdef CONFIG_INTC_BALANCING
+ if (desc->hw.mask_regs)
+ dist_handle[irq] = intc_dist_data(desc, d, enum_id);
+#endif
+
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
#endif
@@ -801,6 +928,8 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
unsigned int smp)
{
if (value) {
+ value = intc_phys_to_virt(d, value);
+
d->reg[cnt] = value;
#ifdef CONFIG_SMP
d->smp[cnt] = smp;
@@ -816,25 +945,59 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
generic_handle_irq((unsigned int)get_irq_data(irq));
}
-void __init register_intc_controller(struct intc_desc *desc)
+int __init register_intc_controller(struct intc_desc *desc)
{
unsigned int i, k, smp;
struct intc_hw_desc *hw = &desc->hw;
struct intc_desc_int *d;
+ struct resource *res;
+
+ pr_info("intc: Registered controller '%s' with %u IRQs\n",
+ desc->name, hw->nr_vectors);
d = kzalloc(sizeof(*d), GFP_NOWAIT);
+ if (!d)
+ goto err0;
INIT_LIST_HEAD(&d->list);
list_add(&d->list, &intc_list);
+ if (desc->num_resources) {
+ d->nr_windows = desc->num_resources;
+ d->window = kzalloc(d->nr_windows * sizeof(*d->window),
+ GFP_NOWAIT);
+ if (!d->window)
+ goto err1;
+
+ for (k = 0; k < d->nr_windows; k++) {
+ res = desc->resource + k;
+ WARN_ON(resource_type(res) != IORESOURCE_MEM);
+ d->window[k].phys = res->start;
+ d->window[k].size = resource_size(res);
+ d->window[k].virt = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!d->window[k].virt)
+ goto err2;
+ }
+ }
+
d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
+#ifdef CONFIG_INTC_BALANCING
+ if (d->nr_reg)
+ d->nr_reg += hw->nr_mask_regs;
+#endif
d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
+ if (!d->reg)
+ goto err2;
+
#ifdef CONFIG_SMP
d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
+ if (!d->smp)
+ goto err3;
#endif
k = 0;
@@ -843,12 +1006,17 @@ void __init register_intc_controller(struct intc_desc *desc)
smp = IS_SMP(hw->mask_regs[i]);
k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
+#ifdef CONFIG_INTC_BALANCING
+ k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
+#endif
}
}
if (hw->prio_regs) {
d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
GFP_NOWAIT);
+ if (!d->prio)
+ goto err4;
for (i = 0; i < hw->nr_prio_regs; i++) {
smp = IS_SMP(hw->prio_regs[i]);
@@ -860,6 +1028,8 @@ void __init register_intc_controller(struct intc_desc *desc)
if (hw->sense_regs) {
d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
GFP_NOWAIT);
+ if (!d->sense)
+ goto err5;
for (i = 0; i < hw->nr_sense_regs; i++)
k += save_reg(d, k, hw->sense_regs[i].reg, 0);
@@ -906,7 +1076,7 @@ void __init register_intc_controller(struct intc_desc *desc)
irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
if (unlikely(!irq_desc)) {
- pr_info("can't get irq_desc for %d\n", irq);
+ pr_err("can't get irq_desc for %d\n", irq);
continue;
}
@@ -926,7 +1096,7 @@ void __init register_intc_controller(struct intc_desc *desc)
*/
irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
if (unlikely(!irq_desc)) {
- pr_info("can't get irq_desc for %d\n", irq2);
+ pr_err("can't get irq_desc for %d\n", irq2);
continue;
}
@@ -942,8 +1112,100 @@ void __init register_intc_controller(struct intc_desc *desc)
/* enable bits matching force_enable after registering irqs */
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
+
+ return 0;
+err5:
+ kfree(d->prio);
+err4:
+#ifdef CONFIG_SMP
+ kfree(d->smp);
+err3:
+#endif
+ kfree(d->reg);
+err2:
+ for (k = 0; k < d->nr_windows; k++)
+ if (d->window[k].virt)
+ iounmap(d->window[k].virt);
+
+ kfree(d->window);
+err1:
+ kfree(d);
+err0:
+ pr_err("unable to allocate INTC memory\n");
+
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_INTC_USERIMASK
+static void __iomem *uimask;
+
+int register_intc_userimask(unsigned long addr)
+{
+ if (unlikely(uimask))
+ return -EBUSY;
+
+ uimask = ioremap_nocache(addr, SZ_4K);
+ if (unlikely(!uimask))
+ return -ENOMEM;
+
+ pr_info("intc: userimask support registered for levels 0 -> %d\n",
+ default_prio_level - 1);
+
+ return 0;
+}
+
+static ssize_t
+show_intc_userimask(struct sysdev_class *cls,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
+}
+
+static ssize_t
+store_intc_userimask(struct sysdev_class *cls,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long level;
+
+ level = simple_strtoul(buf, NULL, 10);
+
+ /*
+ * Minimal acceptable IRQ levels are in the 2 - 16 range, but
+ * these are chomped so as to not interfere with normal IRQs.
+ *
+ * Level 1 is a special case on some CPUs in that it's not
+ * directly settable, but given that USERIMASK cuts off below a
+ * certain level, we don't care about this limitation here.
+ * Level 0 on the other hand equates to user masking disabled.
+ *
+ * We use default_prio_level as a cut off so that only special
+ * case opt-in IRQs can be mangled.
+ */
+ if (level >= default_prio_level)
+ return -EINVAL;
+
+ __raw_writel(0xa5 << 24 | level << 4, uimask);
+
+ return count;
}
+static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
+ show_intc_userimask, store_intc_userimask);
+#endif
+
+static ssize_t
+show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
+{
+ struct intc_desc_int *d;
+
+ d = container_of(dev, struct intc_desc_int, sysdev);
+
+ return sprintf(buf, "%s\n", d->chip.name);
+}
+
+static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
+
static int intc_suspend(struct sys_device *dev, pm_message_t state)
{
struct intc_desc_int *d;
@@ -1003,19 +1265,28 @@ static int __init register_intc_sysdevs(void)
int id = 0;
error = sysdev_class_register(&intc_sysdev_class);
+#ifdef CONFIG_INTC_USERIMASK
+ if (!error && uimask)
+ error = sysdev_class_create_file(&intc_sysdev_class,
+ &attr_userimask);
+#endif
if (!error) {
list_for_each_entry(d, &intc_list, list) {
d->sysdev.id = id;
d->sysdev.cls = &intc_sysdev_class;
error = sysdev_register(&d->sysdev);
+ if (error == 0)
+ error = sysdev_create_file(&d->sysdev,
+ &attr_name);
if (error)
break;
+
id++;
}
}
if (error)
- pr_warning("intc: sysdev registration error\n");
+ pr_err("intc: sysdev registration error\n");
return error;
}
@@ -1048,7 +1319,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
desc = irq_to_desc_alloc_node(new, node);
if (unlikely(!desc)) {
- pr_info("can't get irq_desc for %d\n", new);
+ pr_err("can't get irq_desc for %d\n", new);
goto out_unlock;
}
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 14d052316502..e324627d97a2 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -640,7 +640,7 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
- hw_mode = cs->hw_mode; /* Save orginal settings */
+ hw_mode = cs->hw_mode; /* Save original settings */
cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
/* mask out bits we are going to set */
cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 9681536163ca..59ae76bace14 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -233,6 +233,8 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
+ if (cc->dev->id.revision >= 11)
+ cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
ssb_pmu_init(cc);
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
@@ -370,6 +372,7 @@ u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
}
+EXPORT_SYMBOL(ssb_chipco_gpio_control);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 80ff7d9e60de..51275aac5b34 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -490,7 +490,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
break;
case SSB_BUSTYPE_PCMCIA:
#ifdef CONFIG_SSB_PCMCIAHOST
- sdev->irq = bus->host_pcmcia->irq.AssignedIRQ;
+ sdev->irq = bus->host_pcmcia->irq;
dev->parent = &bus->host_pcmcia->dev;
#endif
break;
@@ -834,6 +834,9 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus,
if (!err) {
ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
"PCI device %s\n", dev_name(&host_pci->dev));
+ } else {
+ ssb_printk(KERN_ERR PFX "Failed to register PCI version"
+ " of SSB with error %d\n", err);
}
return err;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index a8dbb06623c9..989e2752cc36 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -168,7 +168,7 @@ err_pci:
}
/* Get the word-offset for a SSB_SPROM_XXX define. */
-#define SPOFF(offset) (((offset) - SSB_SPROM_BASE) / sizeof(u16))
+#define SPOFF(offset) ((offset) / sizeof(u16))
/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
#define SPEX16(_outvar, _offset, _mask, _shift) \
out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
@@ -254,7 +254,7 @@ static int sprom_do_read(struct ssb_bus *bus, u16 *sprom)
int i;
for (i = 0; i < bus->sprom_size; i++)
- sprom[i] = ioread16(bus->mmio + SSB_SPROM_BASE + (i * 2));
+ sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2));
return 0;
}
@@ -285,7 +285,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
ssb_printk("75%%");
else if (i % 2)
ssb_printk(".");
- writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2));
+ writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
mmiowb();
msleep(20);
}
@@ -621,6 +621,14 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
int err = -ENOMEM;
u16 *buf;
+ if (!ssb_is_sprom_available(bus)) {
+ ssb_printk(KERN_ERR PFX "No SPROM available!\n");
+ return -ENODEV;
+ }
+
+ bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
+ SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
+
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
goto out;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index f2f920fef10d..007bc3a03486 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -176,3 +176,17 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void)
{
return fallback_sprom;
}
+
+/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
+bool ssb_is_sprom_available(struct ssb_bus *bus)
+{
+ /* status register only exists on chipcomon rev >= 11 and we need check
+ for >= 31 only */
+ /* this routine differs from specs as we do not access SPROM directly
+ on PCMCIA */
+ if (bus->bustype == SSB_BUSTYPE_PCI &&
+ bus->chipco.dev->id.revision >= 31)
+ return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
+
+ return true;
+}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7696a664f8a5..5589616082e7 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -49,6 +49,8 @@ source "drivers/staging/go7007/Kconfig"
source "drivers/staging/cx25821/Kconfig"
+source "drivers/staging/tm6000/Kconfig"
+
source "drivers/staging/usbip/Kconfig"
source "drivers/staging/winbond/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ea2e70e2fed4..ec45d4bb8c11 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_ET131X) += et131x/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_VIDEO_CX25821) += cx25821/
+obj-$(CONFIG_VIDEO_TM6000) += tm6000/
obj-$(CONFIG_USB_IP_COMMON) += usbip/
obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
diff --git a/drivers/staging/arlan/arlan-main.c b/drivers/staging/arlan/arlan-main.c
index 88fdd53cf5d3..80284522c42b 100644
--- a/drivers/staging/arlan/arlan-main.c
+++ b/drivers/staging/arlan/arlan-main.c
@@ -1458,7 +1458,7 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
!netdev_mc_empty(dev))
{
char hw_dst_addr[6];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6);
@@ -1469,12 +1469,13 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
printk(KERN_ERR "%s mcast 0x0100 \n", dev->name);
else if (hw_dst_addr[1] == 0x40)
printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
- netdev_for_each_mc_entry(dmi, dev) {
+ netdev_for_each_mc_entry(ha, dev) {
if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
printk(KERN_ERR "%s mcl %pM\n",
- dev->name, dmi->dmi_addr);
+ dev->name,
+ ha->addr);
for (i = 0; i < 6; i++)
- if (dmi->dmi_addr[i] != hw_dst_addr[i])
+ if (ha->addr[i] != hw_dst_addr[i])
break;
if (i == 6)
break;
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 5632991760af..30b522c0bf2c 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -180,12 +180,12 @@ static int das16cs_attach(struct comedi_device *dev,
}
printk("\n");
- ret = request_irq(link->irq.AssignedIRQ, das16cs_interrupt,
+ ret = request_irq(link->irq, das16cs_interrupt,
IRQF_SHARED, "cb_das16_cs", dev);
if (ret < 0) {
return ret;
}
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
printk("irq=%u ", dev->irq);
dev->board_ptr = das16cs_probe(dev, link);
@@ -671,7 +671,6 @@ static dev_info_t dev_info = "cb_das16_cs";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -702,10 +701,6 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
link->priv = local;
/* Initialize the pcmcia_device structure */
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -720,10 +715,8 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "das16cs_pcmcia_detach\n");
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- das16cs_pcmcia_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ das16cs_pcmcia_release(link);
/* This points to the parent struct local_info_t struct */
if (link->priv)
kfree(link->priv);
@@ -740,8 +733,7 @@ static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
return -EINVAL;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -769,7 +761,6 @@ static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void das16cs_pcmcia_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
dev_dbg(&link->dev, "das16cs_pcmcia_config\n");
@@ -780,16 +771,9 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
+
/*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
@@ -799,19 +783,10 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "cb_das16_cs");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %u", link->irq.AssignedIRQ);
+ printk(", irq %u", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 9164ce158dcd..896d25bc85b5 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -142,7 +142,6 @@ static const dev_info_t dev_info = "pcm-das08";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -172,10 +171,6 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -207,10 +202,8 @@ static void das08_pcmcia_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "das08_pcmcia_detach\n");
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- das08_pcmcia_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ das08_pcmcia_release(link);
/* This points to the parent struct local_info_t struct */
if (link->priv)
@@ -229,8 +222,7 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
return -ENODEV;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -266,7 +258,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void das08_pcmcia_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
dev_dbg(&link->dev, "das08_pcmcia_config\n");
@@ -277,11 +268,8 @@ static void das08_pcmcia_config(struct pcmcia_device *link)
goto failed;
}
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -292,19 +280,10 @@ static void das08_pcmcia_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "pcm-das08");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %u", link->irq.AssignedIRQ);
+ printk(", irq %u", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 7ea64538e055..06dd44ff1a95 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -380,7 +380,7 @@ static int dio700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
iobase = link->io.BasePort1;
#ifdef incomplete
- irq = link->irq.AssignedIRQ;
+ irq = link->irq;
#endif
break;
default:
@@ -470,7 +470,6 @@ static const dev_info_t dev_info = "ni_daq_700";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -502,10 +501,6 @@ static int dio700_cs_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -539,10 +534,8 @@ static void dio700_cs_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "dio700_cs_detach\n");
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- dio700_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ dio700_release(link);
/* This points to the parent struct local_info_t struct */
if (link->priv)
@@ -577,8 +570,7 @@ static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
}
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -625,7 +617,6 @@ static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void dio700_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
win_req_t req;
int ret;
@@ -639,16 +630,8 @@ static void dio700_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -659,19 +642,10 @@ static void dio700_config(struct pcmcia_device *link)
if (ret != 0)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "ni_daq_700");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index ddc312b5d20d..7bfe08b01fe9 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -131,7 +131,7 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
iobase = link->io.BasePort1;
#ifdef incomplete
- irq = link->irq.AssignedIRQ;
+ irq = link->irq;
#endif
break;
default:
@@ -221,7 +221,6 @@ static const dev_info_t dev_info = "ni_daq_dio24";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -253,10 +252,6 @@ static int dio24_cs_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -290,10 +285,8 @@ static void dio24_cs_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "dio24_cs_detach\n");
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- dio24_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ dio24_release(link);
/* This points to the parent local_info_t struct */
if (link->priv)
@@ -328,8 +321,7 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
}
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -376,7 +368,6 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void dio24_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
win_req_t req;
@@ -390,16 +381,8 @@ static void dio24_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -410,19 +393,10 @@ static void dio24_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "ni_daq_dio24");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 8ad1055a5cc1..fd8d3e9520a0 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -144,7 +144,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!link)
return -EIO;
iobase = link->io.BasePort1;
- irq = link->irq.AssignedIRQ;
+ irq = link->irq;
break;
default:
printk("bug! couldn't determine board type\n");
@@ -199,7 +199,6 @@ static const dev_info_t dev_info = "daqcard-1200";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -229,10 +228,6 @@ static int labpc_cs_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -269,10 +264,8 @@ static void labpc_cs_detach(struct pcmcia_device *link)
the release() function is called, that will trigger a proper
detach().
*/
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- labpc_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ labpc_release(link);
/* This points to the parent local_info_t struct (may be null) */
kfree(link->priv);
@@ -306,8 +299,7 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
}
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -355,7 +347,6 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void labpc_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
win_req_t req;
@@ -367,16 +358,8 @@ static void labpc_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -387,19 +370,10 @@ static void labpc_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "daqcard-1200");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index dc4849a40c97..1e8aebae8ae8 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -262,17 +262,11 @@ static void cs_detach(struct pcmcia_device *);
static struct pcmcia_device *cur_dev = NULL;
static const dev_info_t dev_info = "ni_mio_cs";
-static dev_node_t dev_node = {
- "ni_mio_cs",
- COMEDI_MAJOR, 0,
- NULL
-};
static int cs_attach(struct pcmcia_device *link)
{
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
link->io.NumPorts1 = 16;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -292,8 +286,7 @@ static void cs_detach(struct pcmcia_device *link)
{
DPRINTK("cs_detach(link=%p)\n", link);
- if (link->dev_node)
- cs_release(link);
+ cs_release(link);
}
static int mio_cs_suspend(struct pcmcia_device *link)
@@ -344,14 +337,10 @@ static void mio_cs_config(struct pcmcia_device *link)
return;
}
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret) {
- printk("pcmcia_request_irq() returned error: %i\n", ret);
- }
+ if (!link->irq)
+ dev_info(&link->dev, "no IRQ available\n");
ret = pcmcia_request_configuration(link, &link->conf);
-
- link->dev_node = &dev_node;
}
static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -369,7 +358,7 @@ static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->driver = &driver_ni_mio_cs;
dev->iobase = link->io.BasePort1;
- irq = link->irq.AssignedIRQ;
+ irq = link->irq;
printk("comedi%d: %s: DAQCard: io 0x%04lx, irq %u, ",
dev->minor, dev->driver->driver_name, dev->iobase, irq);
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 3325f24448b5..1786db2f3378 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -60,7 +60,6 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
int table_index;
char board_name[32];
@@ -1040,10 +1039,6 @@ static int daqp_cs_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = daqp_interrupt;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -1074,10 +1069,8 @@ static void daqp_cs_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "daqp_cs_detach\n");
- if (link->dev_node) {
- dev->stop = 1;
- daqp_cs_release(link);
- }
+ dev->stop = 1;
+ daqp_cs_release(link);
/* Unlink device structure, and free it */
dev_table[dev->table_index] = NULL;
@@ -1105,8 +1098,7 @@ static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
return -ENODEV;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -1133,7 +1125,6 @@ static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void daqp_cs_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
dev_dbg(&link->dev, "daqp_cs_config\n");
@@ -1144,16 +1135,9 @@ static void daqp_cs_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ ret = pcmcia_request_irq(link, daqp_interrupt);
+ if (ret)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -1164,23 +1148,10 @@ static void daqp_cs_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- /* Comedi's PCMCIA script uses this device name (extracted
- * from /var/lib/pcmcia/stab) to pass to comedi_config
- */
- /* sprintf(dev->node.dev_name, "daqp%d", dev->table_index); */
- sprintf(dev->node.dev_name, "quatech_daqp_cs");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %u", link->irq.AssignedIRQ);
+ printk(", irq %u", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c
index 061add30ba8a..1798975a69bd 100644
--- a/drivers/staging/cx25821/cx25821-alsa.c
+++ b/drivers/staging/cx25821/cx25821-alsa.c
@@ -29,7 +29,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <asm/delay.h>
+#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -42,10 +42,10 @@
#define AUDIO_SRAM_CHANNEL SRAM_CH08
-#define dprintk(level,fmt, arg...) if (debug >= level) \
+#define dprintk(level, fmt, arg...) if (debug >= level) \
printk(KERN_INFO "%s/1: " fmt, chip->dev->name , ## arg)
-#define dprintk_core(level,fmt, arg...) if (debug >= level) \
+#define dprintk_core(level, fmt, arg...) if (debug >= level) \
printk(KERN_DEBUG "%s/1: " fmt, chip->dev->name , ## arg)
/****************************************************************************
@@ -81,7 +81,6 @@ struct cx25821_audio_dev {
struct snd_pcm_substream *substream;
};
-typedef struct cx25821_audio_dev snd_cx25821_card_t;
/****************************************************************************
@@ -90,7 +89,7 @@ typedef struct cx25821_audio_dev snd_cx25821_card_t;
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = { 1,[1 ... (SNDRV_CARDS - 1)] = 1 };
+static int enable[SNDRV_CARDS] = { 1, [1 ... (SNDRV_CARDS - 1)] = 1 };
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx25821 soundcard. default enabled.");
@@ -105,7 +104,7 @@ MODULE_PARM_DESC(index, "Index value for cx25821 capture interface(s).");
MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards");
MODULE_AUTHOR("Hiep Huynh");
MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); //"{{Conexant,23881},"
+MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); /* "{{Conexant,23881}," */
static unsigned int debug;
module_param(debug, int, 0644);
@@ -135,7 +134,7 @@ MODULE_PARM_DESC(debug, "enable debug messages");
* BOARD Specific: Sets audio DMA
*/
-static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
+static int _cx25821_start_audio_dma(struct cx25821_audio_dev *chip)
{
struct cx25821_buffer *buf = chip->buf;
struct cx25821_dev *dev = chip->dev;
@@ -143,7 +142,7 @@ static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
&cx25821_sram_channels[AUDIO_SRAM_CHANNEL];
u32 tmp = 0;
- // enable output on the GPIO 0 for the MCLK ADC (Audio)
+ /* enable output on the GPIO 0 for the MCLK ADC (Audio) */
cx25821_set_gpiopin_direction(chip->dev, 0, 0);
/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
@@ -158,18 +157,23 @@ static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
cx_write(AUD_A_LNGTH, buf->bpl);
/* reset counter */
- cx_write(AUD_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET); //GP_COUNT_CONTROL_RESET = 0x3
+ /* GP_COUNT_CONTROL_RESET = 0x3 */
+ cx_write(AUD_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
atomic_set(&chip->count, 0);
- //Set the input mode to 16-bit
+ /* Set the input mode to 16-bit */
tmp = cx_read(AUD_A_CFG);
cx_write(AUD_A_CFG,
tmp | FLD_AUD_DST_PK_MODE | FLD_AUD_DST_ENABLE |
FLD_AUD_CLK_ENABLE);
- //printk(KERN_INFO "DEBUG: Start audio DMA, %d B/line, cmds_start(0x%x)= %d lines/FIFO, %d periods, %d "
- // "byte buffer\n", buf->bpl, audio_ch->cmds_start, cx_read(audio_ch->cmds_start + 12)>>1,
- // chip->num_periods, buf->bpl * chip->num_periods);
+ /* printk(KERN_INFO "DEBUG: Start audio DMA, %d B/line,"
+ "cmds_start(0x%x)= %d lines/FIFO, %d periods, "
+ "%d byte buffer\n", buf->bpl,
+ audio_ch->cmds_start,
+ cx_read(audio_ch->cmds_start + 12)>>1,
+ chip->num_periods, buf->bpl *chip->num_periods);
+ */
/* Enables corresponding bits at AUD_INT_STAT */
cx_write(AUD_A_INT_MSK,
@@ -182,7 +186,7 @@ static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
/* enable audio irqs */
cx_set(PCI_INT_MSK, chip->dev->pci_irqmask | PCI_MSK_AUD_INT);
- // Turn on audio downstream fifo and risc enable 0x101
+ /* Turn on audio downstream fifo and risc enable 0x101 */
tmp = cx_read(AUD_INT_DMA_CTL);
cx_set(AUD_INT_DMA_CTL,
tmp | (FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN));
@@ -194,7 +198,7 @@ static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
/*
* BOARD Specific: Resets audio DMA
*/
-static int _cx25821_stop_audio_dma(snd_cx25821_card_t * chip)
+static int _cx25821_stop_audio_dma(struct cx25821_audio_dev *chip)
{
struct cx25821_dev *dev = chip->dev;
@@ -232,13 +236,13 @@ static char *cx25821_aud_irqs[32] = {
/*
* BOARD Specific: Threats IRQ audio specific calls
*/
-static void cx25821_aud_irq(snd_cx25821_card_t * chip, u32 status, u32 mask)
+static void cx25821_aud_irq(struct cx25821_audio_dev *chip, u32 status,
+ u32 mask)
{
struct cx25821_dev *dev = chip->dev;
- if (0 == (status & mask)) {
+ if (0 == (status & mask))
return;
- }
cx_write(AUD_A_INT_STAT, status);
if (debug > 1 || (status & mask & ~0xff))
@@ -277,7 +281,7 @@ static void cx25821_aud_irq(snd_cx25821_card_t * chip, u32 status, u32 mask)
*/
static irqreturn_t cx25821_irq(int irq, void *dev_id)
{
- snd_cx25821_card_t *chip = dev_id;
+ struct cx25821_audio_dev *chip = dev_id;
struct cx25821_dev *dev = chip->dev;
u32 status, pci_status;
u32 audint_status, audint_mask;
@@ -318,11 +322,11 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
if (handled)
cx_write(PCI_INT_STAT, pci_status);
- out:
+out:
return IRQ_RETVAL(handled);
}
-static int dsp_buffer_free(snd_cx25821_card_t * chip)
+static int dsp_buffer_free(struct cx25821_audio_dev *chip)
{
BUG_ON(!chip->dma_size);
@@ -363,7 +367,8 @@ static struct snd_pcm_hardware snd_cx25821_digital_hw = {
.period_bytes_max = DEFAULT_FIFO_SIZE / 3,
.periods_min = 1,
.periods_max = AUDIO_LINE_SIZE,
- .buffer_bytes_max = (AUDIO_LINE_SIZE * AUDIO_LINE_SIZE), //128*128 = 16384 = 1024 * 16
+ /* 128 * 128 = 16384 = 1024 * 16 */
+ .buffer_bytes_max = (AUDIO_LINE_SIZE * AUDIO_LINE_SIZE),
};
/*
@@ -371,7 +376,7 @@ static struct snd_pcm_hardware snd_cx25821_digital_hw = {
*/
static int snd_cx25821_pcm_open(struct snd_pcm_substream *substream)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
unsigned int bpl = 0;
@@ -393,18 +398,19 @@ static int snd_cx25821_pcm_open(struct snd_pcm_substream *substream)
if (cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size !=
DEFAULT_FIFO_SIZE) {
- bpl = cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 3; //since there are 3 audio Clusters
+ /* since there are 3 audio Clusters */
+ bpl = cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 3;
bpl &= ~7; /* must be multiple of 8 */
- if (bpl > AUDIO_LINE_SIZE) {
+ if (bpl > AUDIO_LINE_SIZE)
bpl = AUDIO_LINE_SIZE;
- }
+
runtime->hw.period_bytes_min = bpl;
runtime->hw.period_bytes_max = bpl;
}
return 0;
- _error:
+_error:
dprintk(1, "Error opening PCM!\n");
return err;
}
@@ -423,7 +429,7 @@ static int snd_cx25821_close(struct snd_pcm_substream *substream)
static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct videobuf_dmabuf *dma;
struct cx25821_buffer *buf;
@@ -445,9 +451,8 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
if (NULL == buf)
return -ENOMEM;
- if (chip->period_size > AUDIO_LINE_SIZE) {
+ if (chip->period_size > AUDIO_LINE_SIZE)
chip->period_size = AUDIO_LINE_SIZE;
- }
buf->vb.memory = V4L2_MEMORY_MMAP;
buf->vb.field = V4L2_FIELD_NONE;
@@ -474,7 +479,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
buf->vb.width, buf->vb.height, 1);
if (ret < 0) {
printk(KERN_INFO
- "DEBUG: ERROR after cx25821_risc_databuffer_audio() \n");
+ "DEBUG: ERROR after cx25821_risc_databuffer_audio()\n");
goto error;
}
@@ -494,7 +499,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
return 0;
- error:
+error:
kfree(buf);
return ret;
}
@@ -504,7 +509,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
*/
static int snd_cx25821_hw_free(struct snd_pcm_substream *substream)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
if (substream->runtime->dma_area) {
dsp_buffer_free(chip);
@@ -528,7 +533,7 @@ static int snd_cx25821_prepare(struct snd_pcm_substream *substream)
static int snd_cx25821_card_trigger(struct snd_pcm_substream *substream,
int cmd)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
int err = 0;
/* Local interrupts are already disabled by ALSA */
@@ -557,7 +562,7 @@ static int snd_cx25821_card_trigger(struct snd_pcm_substream *substream,
static snd_pcm_uframes_t snd_cx25821_pointer(struct snd_pcm_substream
*substream)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
u16 count;
@@ -593,10 +598,11 @@ static struct snd_pcm_ops snd_cx25821_pcm_ops = {
};
/*
- * ALSA create a PCM device: Called when initializing the board. Sets up the name and hooks up
- * the callbacks
+ * ALSA create a PCM device: Called when initializing the board.
+ * Sets up the name and hooks up the callbacks
*/
-static int snd_cx25821_pcm(snd_cx25821_card_t * chip, int device, char *name)
+static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device,
+ char *name)
{
struct snd_pcm *pcm;
int err;
@@ -636,7 +642,7 @@ MODULE_DEVICE_TABLE(pci, cx25821_audio_pci_tbl);
* from the file.
*/
/*
-static int snd_cx25821_free(snd_cx25821_card_t *chip)
+static int snd_cx25821_free(struct cx25821_audio_dev *chip)
{
if (chip->irq >= 0)
free_irq(chip->irq, chip);
@@ -653,9 +659,9 @@ static int snd_cx25821_free(snd_cx25821_card_t *chip)
*/
static void snd_cx25821_dev_free(struct snd_card *card)
{
- snd_cx25821_card_t *chip = card->private_data;
+ struct cx25821_audio_dev *chip = card->private_data;
- //snd_cx25821_free(chip);
+ /* snd_cx25821_free(chip); */
snd_card_free(chip->card);
}
@@ -665,23 +671,23 @@ static void snd_cx25821_dev_free(struct snd_card *card)
static int cx25821_audio_initdev(struct cx25821_dev *dev)
{
struct snd_card *card;
- snd_cx25821_card_t *chip;
+ struct cx25821_audio_dev *chip;
int err;
if (devno >= SNDRV_CARDS) {
printk(KERN_INFO "DEBUG ERROR: devno >= SNDRV_CARDS %s\n",
__func__);
- return (-ENODEV);
+ return -ENODEV;
}
if (!enable[devno]) {
++devno;
printk(KERN_INFO "DEBUG ERROR: !enable[devno] %s\n", __func__);
- return (-ENOENT);
+ return -ENOENT;
}
err = snd_card_create(index[devno], id[devno], THIS_MODULE,
- sizeof(snd_cx25821_card_t), &card);
+ sizeof(struct cx25821_audio_dev), &card);
if (err < 0) {
printk(KERN_INFO
"DEBUG ERROR: cannot create snd_card_new in %s\n",
@@ -693,7 +699,7 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
/* Card "creation" */
card->private_free = snd_cx25821_dev_free;
- chip = (snd_cx25821_card_t *) card->private_data;
+ chip = (struct cx25821_audio_dev *) card->private_data;
spin_lock_init(&chip->reg_lock);
chip->dev = dev;
@@ -712,7 +718,8 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
goto error;
}
- if ((err = snd_cx25821_pcm(chip, 0, "cx25821 Digital")) < 0) {
+ err = snd_cx25821_pcm(chip, 0, "cx25821 Digital");
+ if (err < 0) {
printk(KERN_INFO
"DEBUG ERROR: cannot create snd_cx25821_pcm %s\n",
__func__);
@@ -741,7 +748,7 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
devno++;
return 0;
- error:
+error:
snd_card_free(card);
return err;
}
diff --git a/drivers/staging/cx25821/cx25821-audio-upstream.c b/drivers/staging/cx25821/cx25821-audio-upstream.c
index 11c56bdb0ceb..ac13ba925fcb 100644
--- a/drivers/staging/cx25821/cx25821-audio-upstream.c
+++ b/drivers/staging/cx25821/cx25821-audio-upstream.c
@@ -32,8 +32,8 @@
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
@@ -62,9 +62,8 @@ int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
- if (lines > 3) {
+ if (lines > 3)
lines = 3;
- }
BUG_ON(lines < 2);
@@ -84,7 +83,7 @@ int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
cx_write(ch->cmds_start + 12, AUDIO_CDT_SIZE_QW);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
- //IQ size
+ /* IQ size */
cx_write(ch->cmds_start + 20, AUDIO_IQ_SIZE_DW);
for (i = 24; i < 80; i += 4)
@@ -100,7 +99,7 @@ int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
}
static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
- __le32 * rp,
+ __le32 *rp,
dma_addr_t databuf_phys_addr,
unsigned int bpl,
int fifo_enable)
@@ -116,8 +115,10 @@ static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
*(rp++) = cpu_to_le32(databuf_phys_addr + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
- // Check if we need to enable the FIFO after the first 3 lines
- // For the upstream audio channel, the risc engine will enable the FIFO.
+ /* Check if we need to enable the FIFO
+ * after the first 3 lines.
+ * For the upstream audio channel,
+ * the risc engine will enable the FIFO */
if (fifo_enable && line == 2) {
*(rp++) = RISC_WRITECR;
*(rp++) = sram_ch->dma_ctl;
@@ -160,7 +161,7 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
risc_flag = RISC_CNT_INC;
}
- //Calculate physical jump address
+ /* Calculate physical jump address */
if ((frame + 1) == NUM_AUDIO_FRAMES) {
risc_phys_jump_addr =
dev->_risc_phys_start_addr +
@@ -179,17 +180,17 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
fifo_enable);
if (USE_RISC_NOOP_AUDIO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
+ for (i = 0; i < NUM_NO_OPS; i++)
*(rp++) = cpu_to_le32(RISC_NOOP);
- }
}
- // Loop to (Nth)FrameRISC or to Start of Risc program & generate IRQ
+ /* Loop to (Nth)FrameRISC or to Start of Risc program &
+ * generate IRQ */
*(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
*(rp++) = cpu_to_le32(risc_phys_jump_addr);
*(rp++) = cpu_to_le32(0);
- //Recalculate virtual address based on frame index
+ /* Recalculate virtual address based on frame index */
rp = dev->_risc_virt_addr + RISC_SYNC_INSTRUCTION_SIZE / 4 +
(AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4);
}
@@ -220,19 +221,19 @@ void cx25821_stop_upstream_audio(struct cx25821_dev *dev)
u32 tmp = 0;
if (!dev->_audio_is_running) {
- printk
- ("cx25821: No audio file is currently running so return!\n");
+ printk(KERN_DEBUG
+ "cx25821: No audio file is currently running so return!\n");
return;
}
- //Disable RISC interrupts
+ /* Disable RISC interrupts */
cx_write(sram_ch->int_msk, 0);
- //Turn OFF risc and fifo enable in AUD_DMA_CNTRL
+ /* Turn OFF risc and fifo enable in AUD_DMA_CNTRL */
tmp = cx_read(sram_ch->dma_ctl);
cx_write(sram_ch->dma_ctl,
tmp & ~(sram_ch->fld_aud_fifo_en | sram_ch->fld_aud_risc_en));
- //Clear data buffer memory
+ /* Clear data buffer memory */
if (dev->_audiodata_buf_virt_addr)
memset(dev->_audiodata_buf_virt_addr, 0,
dev->_audiodata_buf_size);
@@ -253,9 +254,8 @@ void cx25821_stop_upstream_audio(struct cx25821_dev *dev)
void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev)
{
- if (dev->_audio_is_running) {
+ if (dev->_audio_is_running)
cx25821_stop_upstream_audio(dev);
- }
cx25821_free_memory_audio(dev);
}
@@ -282,7 +282,7 @@ int cx25821_get_audio_data(struct cx25821_dev *dev,
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
+ printk(KERN_ERR "%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_audiofilename, open_errno);
return PTR_ERR(myfile);
} else {
@@ -294,7 +294,7 @@ int cx25821_get_audio_data(struct cx25821_dev *dev,
}
if (!myfile->f_op->read) {
- printk("%s: File has no READ operations registered! \n",
+ printk("%s: File has no READ operations registered!\n",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -347,7 +347,7 @@ static void cx25821_audioups_handler(struct work_struct *work)
container_of(work, struct cx25821_dev, _audio_work_entry);
if (!dev) {
- printk("ERROR %s(): since container_of(work_struct) FAILED! \n",
+ printk(KERN_ERR "ERROR %s(): since container_of(work_struct) FAILED!\n",
__func__);
return;
}
@@ -373,19 +373,19 @@ int cx25821_openfile_audio(struct cx25821_dev *dev,
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
+ printk(KERN_ERR "%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_audiofilename, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
- printk("%s: File has no file operations registered! \n",
+ printk("%s: File has no file operations registered!\n",
__func__);
filp_close(myfile, NULL);
return -EIO;
}
if (!myfile->f_op->read) {
- printk("%s: File has no READ operations registered! \n",
+ printk("%s: File has no READ operations registered!\n",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -421,13 +421,11 @@ int cx25821_openfile_audio(struct cx25821_dev *dev,
}
}
- if (i > 0) {
+ if (i > 0)
dev->_audioframe_count++;
- }
- if (vfs_read_retval < line_size) {
+ if (vfs_read_retval < line_size)
break;
- }
}
dev->_audiofile_status =
@@ -460,14 +458,14 @@ static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
dev->_audiorisc_size = dev->audio_upstream_riscbuf_size;
if (!dev->_risc_virt_addr) {
- printk
- ("cx25821 ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning.\n");
+ printk(KERN_DEBUG
+ "cx25821 ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning.\n");
return -ENOMEM;
}
- //Clear out memory at address
+ /* Clear out memory at address */
memset(dev->_risc_virt_addr, 0, dev->_audiorisc_size);
- //For Audio Data buffer allocation
+ /* For Audio Data buffer allocation */
dev->_audiodata_buf_virt_addr =
pci_alloc_consistent(dev->pci, dev->audio_upstream_databuf_size,
&data_dma_addr);
@@ -475,30 +473,30 @@ static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
dev->_audiodata_buf_size = dev->audio_upstream_databuf_size;
if (!dev->_audiodata_buf_virt_addr) {
- printk
- ("cx25821 ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning. \n");
+ printk(KERN_DEBUG
+ "cx25821 ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning.\n");
return -ENOMEM;
}
- //Clear out memory at address
+ /* Clear out memory at address */
memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size);
ret = cx25821_openfile_audio(dev, sram_ch);
if (ret < 0)
return ret;
- //Creating RISC programs
+ /* Creating RISC programs */
ret =
cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl,
dev->_audio_lines_count);
if (ret < 0) {
printk(KERN_DEBUG
- "cx25821 ERROR creating audio upstream RISC programs! \n");
+ "cx25821 ERROR creating audio upstream RISC programs!\n");
goto error;
}
return 0;
- error:
+error:
return ret;
}
@@ -512,22 +510,22 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
__le32 *rp;
if (status & FLD_AUD_SRC_RISCI1) {
- //Get interrupt_index of the program that interrupted
+ /* Get interrupt_index of the program that interrupted */
u32 prog_cnt = cx_read(channel->gpcnt);
- //Since we've identified our IRQ, clear our bits from the interrupt mask and interrupt status registers
+ /* Since we've identified our IRQ, clear our bits from the
+ * interrupt mask and interrupt status registers */
cx_write(channel->int_msk, 0);
cx_write(channel->int_stat, cx_read(channel->int_stat));
spin_lock(&dev->slock);
while (prog_cnt != dev->_last_index_irq) {
- //Update _last_index_irq
- if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1)) {
+ /* Update _last_index_irq */
+ if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1))
dev->_last_index_irq++;
- } else {
+ else
dev->_last_index_irq = 0;
- }
dev->_audioframe_index = dev->_last_index_irq;
@@ -559,7 +557,7 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
cpu_to_le32(RISC_NOOP);
}
}
- // Jump to 2nd Audio Frame
+ /* Jump to 2nd Audio Frame */
*(rp++) =
cpu_to_le32(RISC_JUMP | RISC_IRQ1 |
RISC_CNT_RESET);
@@ -582,7 +580,8 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
printk("%s: Audio Received OpCode Error Interrupt!\n",
__func__);
- // Read and write back the interrupt status register to clear our bits
+ /* Read and write back the interrupt status register to clear
+ * our bits */
cx_write(channel->int_stat, cx_read(channel->int_stat));
}
@@ -591,7 +590,7 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
dev->_audioframe_count);
return -1;
}
- //ElSE, set the interrupt mask register, re-enable irq.
+ /* ElSE, set the interrupt mask register, re-enable irq. */
int_msk_tmp = cx_read(channel->int_msk);
cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
@@ -613,7 +612,7 @@ static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
msk_stat = cx_read(sram_ch->int_mstat);
audio_status = cx_read(sram_ch->int_stat);
- // Only deal with our interrupt
+ /* Only deal with our interrupt */
if (audio_status) {
handled =
cx25821_audio_upstream_irq(dev,
@@ -622,11 +621,10 @@ static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
audio_status);
}
- if (handled < 0) {
+ if (handled < 0)
cx25821_stop_upstream_audio(dev);
- } else {
+ else
handled += handled;
- }
return IRQ_RETVAL(handled);
}
@@ -638,13 +636,14 @@ static void cx25821_wait_fifo_enable(struct cx25821_dev *dev,
u32 tmp;
do {
- //Wait 10 microsecond before checking to see if the FIFO is turned ON.
+ /* Wait 10 microsecond before checking to see if the FIFO is
+ * turned ON. */
udelay(10);
tmp = cx_read(sram_ch->dma_ctl);
- if (count++ > 1000) //10 millisecond timeout
- {
+ /* 10 millisecond timeout */
+ if (count++ > 1000) {
printk
("cx25821 ERROR: %s() fifo is NOT turned on. Timeout!\n",
__func__);
@@ -661,31 +660,34 @@ int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
u32 tmp = 0;
int err = 0;
- // Set the physical start address of the RISC program in the initial program counter(IPC) member of the CMDS.
+ /* Set the physical start address of the RISC program in the initial
+ * program counter(IPC) member of the CMDS. */
cx_write(sram_ch->cmds_start + 0, dev->_risc_phys_addr);
- cx_write(sram_ch->cmds_start + 4, 0); /* Risc IPC High 64 bits 63-32 */
+ /* Risc IPC High 64 bits 63-32 */
+ cx_write(sram_ch->cmds_start + 4, 0);
/* reset counter */
cx_write(sram_ch->gpcnt_ctl, 3);
- //Set the line length (It looks like we do not need to set the line length)
+ /* Set the line length (It looks like we do not need to set the
+ * line length) */
cx_write(sram_ch->aud_length, AUDIO_LINE_SIZE & FLD_AUD_DST_LN_LNGTH);
- //Set the input mode to 16-bit
+ /* Set the input mode to 16-bit */
tmp = cx_read(sram_ch->aud_cfg);
tmp |=
FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE |
FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D | FLD_AUD_SONY_MODE;
cx_write(sram_ch->aud_cfg, tmp);
- // Read and write back the interrupt status register to clear it
+ /* Read and write back the interrupt status register to clear it */
tmp = cx_read(sram_ch->int_stat);
cx_write(sram_ch->int_stat, tmp);
- // Clear our bits from the interrupt status register.
+ /* Clear our bits from the interrupt status register. */
cx_write(sram_ch->int_stat, _intr_msk);
- //Set the interrupt mask register, enable irq.
+ /* Set the interrupt mask register, enable irq. */
cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp |= _intr_msk);
@@ -699,19 +701,19 @@ int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
goto fail_irq;
}
- // Start the DMA engine
+ /* Start the DMA engine */
tmp = cx_read(sram_ch->dma_ctl);
cx_set(sram_ch->dma_ctl, tmp | sram_ch->fld_aud_risc_en);
dev->_audio_is_running = 1;
dev->_is_first_audio_frame = 1;
- // The fifo_en bit turns on by the first Risc program
+ /* The fifo_en bit turns on by the first Risc program */
cx25821_wait_fifo_enable(dev, sram_ch);
return 0;
- fail_irq:
+fail_irq:
cx25821_dev_unregister(dev);
return err;
}
@@ -731,14 +733,14 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
dev->_audio_upstream_channel_select = channel_select;
sram_ch = &dev->sram_channels[channel_select];
- //Work queue
+ /* Work queue */
INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler);
dev->_irq_audio_queues =
create_singlethread_workqueue("cx25821_audioworkqueue");
if (!dev->_irq_audio_queues) {
- printk
- ("cx25821 ERROR: create_singlethread_workqueue() for Audio FAILED!\n");
+ printk(KERN_DEBUG
+ "cx25821 ERROR: create_singlethread_workqueue() for Audio FAILED!\n");
return -ENOMEM;
}
@@ -760,7 +762,7 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
memcpy(dev->_audiofilename, dev->input_audiofilename,
str_length + 1);
- //Default if filename is empty string
+ /* Default if filename is empty string */
if (strcmp(dev->input_audiofilename, "") == 0) {
dev->_audiofilename = "/root/audioGOOD.wav";
}
@@ -784,7 +786,7 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
RISC_SYNC_INSTRUCTION_SIZE;
dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
- //Allocating buffers and prepare RISC program
+ /* Allocating buffers and prepare RISC program */
retval =
cx25821_audio_upstream_buffer_prepare(dev, sram_ch, _line_size);
if (retval < 0) {
@@ -793,12 +795,12 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
dev->name);
goto error;
}
- //Start RISC engine
+ /* Start RISC engine */
cx25821_start_audio_dma_upstream(dev, sram_ch);
return 0;
- error:
+error:
cx25821_dev_unregister(dev);
return err;
diff --git a/drivers/staging/cx25821/cx25821-audups11.c b/drivers/staging/cx25821/cx25821-audups11.c
index e76451c309f1..9193a6eb7cf2 100644
--- a/drivers/staging/cx25821/cx25821-audups11.c
+++ b/drivers/staging/cx25821/cx25821-audups11.c
@@ -88,10 +88,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -145,7 +145,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO11))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO11))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -163,7 +163,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO11)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO11)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -191,13 +191,13 @@ static int video_release(struct file *file)
//cx_write(channel11->dma_ctl, 0);
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO11)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO11)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO11);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO11);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -224,7 +224,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO11)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO11)))) {
return -EBUSY;
}
@@ -242,11 +242,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO11);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO11);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -264,7 +264,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -364,50 +364,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl_upstream11,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-cards.c b/drivers/staging/cx25821/cx25821-cards.c
index 4d0b9eac3e49..da0f56d50e27 100644
--- a/drivers/staging/cx25821/cx25821-cards.c
+++ b/drivers/staging/cx25821/cx25821-cards.c
@@ -30,12 +30,12 @@
#include "cx25821.h"
#include "tuner-xc2028.h"
-// board config info
+/* board config info */
struct cx25821_board cx25821_boards[] = {
[UNKNOWN_BOARD] = {
.name = "UNKNOWN/GENERIC",
- // Ensure safe default for unknown boards
+ /* Ensure safe default for unknown boards */
.clk_freq = 0,
},
diff --git a/drivers/staging/cx25821/cx25821-core.c b/drivers/staging/cx25821/cx25821-core.c
index 9e9b8c3c9311..d90abb383fc8 100644
--- a/drivers/staging/cx25821/cx25821-core.c
+++ b/drivers/staging/cx25821/cx25821-core.c
@@ -32,6 +32,7 @@ MODULE_AUTHOR("Shu Lin - Hiep Huynh");
MODULE_LICENSE("GPL");
struct list_head cx25821_devlist;
+EXPORT_SYMBOL(cx25821_devlist);
static unsigned int debug;
module_param(debug, int, 0644);
@@ -313,6 +314,7 @@ struct sram_channel cx25821_sram_channels[] = {
.irq_bit = 11,
},
};
+EXPORT_SYMBOL(cx25821_sram_channels);
struct sram_channel *channel0 = &cx25821_sram_channels[SRAM_CH00];
struct sram_channel *channel1 = &cx25821_sram_channels[SRAM_CH01];
@@ -388,70 +390,74 @@ static void cx25821_registers_init(struct cx25821_dev *dev)
{
u32 tmp;
- // enable RUN_RISC in Pecos
+ /* enable RUN_RISC in Pecos */
cx_write(DEV_CNTRL2, 0x20);
- // Set the master PCI interrupt masks to enable video, audio, MBIF, and GPIO interrupts
- // I2C interrupt masking is handled by the I2C objects themselves.
+ /* Set the master PCI interrupt masks to enable video, audio, MBIF,
+ * and GPIO interrupts
+ * I2C interrupt masking is handled by the I2C objects themselves. */
cx_write(PCI_INT_MSK, 0x2001FFFF);
tmp = cx_read(RDR_TLCTL0);
- tmp &= ~FLD_CFG_RCB_CK_EN; // Clear the RCB_CK_EN bit
+ tmp &= ~FLD_CFG_RCB_CK_EN; /* Clear the RCB_CK_EN bit */
cx_write(RDR_TLCTL0, tmp);
- // PLL-A setting for the Audio Master Clock
+ /* PLL-A setting for the Audio Master Clock */
cx_write(PLL_A_INT_FRAC, 0x9807A58B);
- // PLL_A_POST = 0x1C, PLL_A_OUT_TO_PIN = 0x1
+ /* PLL_A_POST = 0x1C, PLL_A_OUT_TO_PIN = 0x1 */
cx_write(PLL_A_POST_STAT_BIST, 0x8000019C);
- // clear reset bit [31]
+ /* clear reset bit [31] */
tmp = cx_read(PLL_A_INT_FRAC);
cx_write(PLL_A_INT_FRAC, tmp & 0x7FFFFFFF);
- // PLL-B setting for Mobilygen Host Bus Interface
+ /* PLL-B setting for Mobilygen Host Bus Interface */
cx_write(PLL_B_INT_FRAC, 0x9883A86F);
- // PLL_B_POST = 0xD, PLL_B_OUT_TO_PIN = 0x0
+ /* PLL_B_POST = 0xD, PLL_B_OUT_TO_PIN = 0x0 */
cx_write(PLL_B_POST_STAT_BIST, 0x8000018D);
- // clear reset bit [31]
+ /* clear reset bit [31] */
tmp = cx_read(PLL_B_INT_FRAC);
cx_write(PLL_B_INT_FRAC, tmp & 0x7FFFFFFF);
- // PLL-C setting for video upstream channel
+ /* PLL-C setting for video upstream channel */
cx_write(PLL_C_INT_FRAC, 0x96A0EA3F);
- // PLL_C_POST = 0x3, PLL_C_OUT_TO_PIN = 0x0
+ /* PLL_C_POST = 0x3, PLL_C_OUT_TO_PIN = 0x0 */
cx_write(PLL_C_POST_STAT_BIST, 0x80000103);
- // clear reset bit [31]
+ /* clear reset bit [31] */
tmp = cx_read(PLL_C_INT_FRAC);
cx_write(PLL_C_INT_FRAC, tmp & 0x7FFFFFFF);
- // PLL-D setting for audio upstream channel
+ /* PLL-D setting for audio upstream channel */
cx_write(PLL_D_INT_FRAC, 0x98757F5B);
- // PLL_D_POST = 0x13, PLL_D_OUT_TO_PIN = 0x0
+ /* PLL_D_POST = 0x13, PLL_D_OUT_TO_PIN = 0x0 */
cx_write(PLL_D_POST_STAT_BIST, 0x80000113);
- // clear reset bit [31]
+ /* clear reset bit [31] */
tmp = cx_read(PLL_D_INT_FRAC);
cx_write(PLL_D_INT_FRAC, tmp & 0x7FFFFFFF);
- // This selects the PLL C clock source for the video upstream channel I and J
+ /* This selects the PLL C clock source for the video upstream channel
+ * I and J */
tmp = cx_read(VID_CH_CLK_SEL);
cx_write(VID_CH_CLK_SEL, (tmp & 0x00FFFFFF) | 0x24000000);
- // 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for channel A-C
- //select 656/VIP DST for downstream Channel A - C
+ /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
+ * channel A-C
+ * select 656/VIP DST for downstream Channel A - C */
tmp = cx_read(VID_CH_MODE_SEL);
- //cx_write( VID_CH_MODE_SEL, tmp | 0x1B0001FF);
+ /* cx_write( VID_CH_MODE_SEL, tmp | 0x1B0001FF); */
cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00);
- // enables 656 port I and J as output
+ /* enables 656 port I and J as output */
tmp = cx_read(CLK_RST);
- tmp |= FLD_USE_ALT_PLL_REF; // use external ALT_PLL_REF pin as its reference clock instead
+ /* use external ALT_PLL_REF pin as its reference clock instead */
+ tmp |= FLD_USE_ALT_PLL_REF;
cx_write(CLK_RST, tmp & ~(FLD_VID_I_CLK_NOE | FLD_VID_J_CLK_NOE));
mdelay(100);
@@ -476,9 +482,8 @@ int cx25821_sram_channel_setup(struct cx25821_dev *dev,
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
- if (lines > 4) {
+ if (lines > 4)
lines = 4;
- }
BUG_ON(lines < 2);
@@ -494,16 +499,15 @@ int cx25821_sram_channel_setup(struct cx25821_dev *dev,
cx_write(cdt + 16 * i + 12, 0);
}
- //init the first cdt buffer
+ /* init the first cdt buffer */
for (i = 0; i < 128; i++)
cx_write(ch->fifo_start + 4 * i, i);
/* write CMDS */
- if (ch->jumponly) {
+ if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
- } else {
+ else
cx_write(ch->cmds_start + 0, risc);
- }
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
@@ -526,6 +530,7 @@ int cx25821_sram_channel_setup(struct cx25821_dev *dev,
return 0;
}
+EXPORT_SYMBOL(cx25821_sram_channel_setup);
int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
struct sram_channel *ch,
@@ -546,9 +551,8 @@ int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
- if (lines > 3) {
- lines = 3; //for AUDIO
- }
+ if (lines > 3)
+ lines = 3; /* for AUDIO */
BUG_ON(lines < 2);
@@ -565,25 +569,23 @@ int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
}
/* write CMDS */
- if (ch->jumponly) {
+ if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
- } else {
+ else
cx_write(ch->cmds_start + 0, risc);
- }
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
- //IQ size
- if (ch->jumponly) {
+ /* IQ size */
+ if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
- } else {
+ else
cx_write(ch->cmds_start + 20, 64 >> 2);
- }
- //zero out
+ /* zero out */
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
@@ -595,6 +597,7 @@ int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
return 0;
}
+EXPORT_SYMBOL(cx25821_sram_channel_setup_audio);
void cx25821_sram_channel_dump(struct cx25821_dev *dev, struct sram_channel *ch)
{
@@ -658,6 +661,7 @@ void cx25821_sram_channel_dump(struct cx25821_dev *dev, struct sram_channel *ch)
printk(KERN_WARNING " : cnt2_reg: 0x%08x\n",
cx_read(ch->cnt2_reg));
}
+EXPORT_SYMBOL(cx25821_sram_channel_dump);
void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
struct sram_channel *ch)
@@ -731,7 +735,7 @@ void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
printk(KERN_WARNING "instruction %d = 0x%x\n", i, risc);
}
- //read data from the first cdt buffer
+ /* read data from the first cdt buffer */
risc = cx_read(AUD_A_CDT);
printk(KERN_WARNING "\nread cdt loc=0x%x\n", risc);
for (i = 0; i < 8; i++) {
@@ -741,31 +745,32 @@ void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
printk(KERN_WARNING "\n\n");
value = cx_read(CLK_RST);
- CX25821_INFO(" CLK_RST = 0x%x \n\n", value);
+ CX25821_INFO(" CLK_RST = 0x%x\n\n", value);
value = cx_read(PLL_A_POST_STAT_BIST);
- CX25821_INFO(" PLL_A_POST_STAT_BIST = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_A_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_A_INT_FRAC);
- CX25821_INFO(" PLL_A_INT_FRAC = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_A_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_B_POST_STAT_BIST);
- CX25821_INFO(" PLL_B_POST_STAT_BIST = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_B_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_B_INT_FRAC);
- CX25821_INFO(" PLL_B_INT_FRAC = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_B_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_C_POST_STAT_BIST);
- CX25821_INFO(" PLL_C_POST_STAT_BIST = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_C_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_C_INT_FRAC);
- CX25821_INFO(" PLL_C_INT_FRAC = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_C_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_D_POST_STAT_BIST);
- CX25821_INFO(" PLL_D_POST_STAT_BIST = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_D_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_D_INT_FRAC);
- CX25821_INFO(" PLL_D_INT_FRAC = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_D_INT_FRAC = 0x%x\n\n", value);
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
- CX25821_INFO(" AFE_AB_DIAG_CTRL (0x10900090) = 0x%x \n\n", value);
+ CX25821_INFO(" AFE_AB_DIAG_CTRL (0x10900090) = 0x%x\n\n", value);
}
+EXPORT_SYMBOL(cx25821_sram_channel_dump_audio);
static void cx25821_shutdown(struct cx25821_dev *dev)
{
@@ -835,8 +840,8 @@ static void cx25821_initialize(struct cx25821_dev *dev)
cx_write(AUD_E_INT_STAT, 0xffffffff);
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
- cx_write(PAD_CTRL, 0x12); //for I2C
- cx25821_registers_init(dev); //init Pecos registers
+ cx_write(PAD_CTRL, 0x12); /* for I2C */
+ cx25821_registers_init(dev); /* init Pecos registers */
mdelay(100);
for (i = 0; i < VID_CHANNEL_NUM; i++) {
@@ -847,7 +852,7 @@ static void cx25821_initialize(struct cx25821_dev *dev)
dev->use_cif_resolution[i] = FALSE;
}
- //Probably only affect Downstream
+ /* Probably only affect Downstream */
for (i = VID_UPSTREAM_SRAM_CHANNEL_I; i <= VID_UPSTREAM_SRAM_CHANNEL_J;
i++) {
cx25821_set_vip_mode(dev, &dev->sram_channels[i]);
@@ -859,7 +864,7 @@ static void cx25821_initialize(struct cx25821_dev *dev)
cx25821_gpio_init(dev);
}
-static int get_resources(struct cx25821_dev *dev)
+static int cx25821_get_resources(struct cx25821_dev *dev)
{
if (request_mem_region
(pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0),
@@ -944,12 +949,11 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
dev->clk_freq = 28000000;
dev->sram_channels = cx25821_sram_channels;
- if (dev->nr > 1) {
+ if (dev->nr > 1)
CX25821_INFO("dev->nr > 1!");
- }
/* board config */
- dev->board = 1; //card[dev->nr];
+ dev->board = 1; /* card[dev->nr]; */
dev->_max_num_decoders = MAX_DECODERS;
dev->pci_bus = dev->pci->bus->number;
@@ -967,7 +971,7 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
dev->i2c_bus[0].i2c_period = (0x07 << 24); /* 1.95MHz */
- if (get_resources(dev) < 0) {
+ if (cx25821_get_resources(dev) < 0) {
printk(KERN_ERR "%s No more PCIe resources for "
"subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
@@ -1007,8 +1011,8 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
cx25821_initialize(dev);
cx25821_i2c_register(&dev->i2c_bus[0]);
-// cx25821_i2c_register(&dev->i2c_bus[1]);
-// cx25821_i2c_register(&dev->i2c_bus[2]);
+/* cx25821_i2c_register(&dev->i2c_bus[1]);
+ * cx25821_i2c_register(&dev->i2c_bus[2]); */
CX25821_INFO("i2c register! bus->i2c_rc = %d\n",
dev->i2c_bus[0].i2c_rc);
@@ -1026,7 +1030,7 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= AUDIO_UPSTREAM_SRAM_CHANNEL_B; i++) {
- //Since we don't have template8 for Audio Downstream
+ /* Since we don't have template8 for Audio Downstream */
if (cx25821_video_register(dev, i, video_template[i - 1]) < 0) {
printk(KERN_ERR
"%s() Failed to register analog video adapters for Upstream channel %d.\n",
@@ -1034,7 +1038,7 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
}
}
- // register IOCTL device
+ /* register IOCTL device */
dev->ioctl_dev =
cx25821_vdev_init(dev, dev->pci, video_template[VIDEO_IOCTL_CH],
"video");
@@ -1112,6 +1116,7 @@ void cx25821_dev_unregister(struct cx25821_dev *dev)
cx25821_i2c_unregister(&dev->i2c_bus[0]);
cx25821_iounmap(dev);
}
+EXPORT_SYMBOL(cx25821_dev_unregister);
static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
@@ -1122,9 +1127,8 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
unsigned int line, todo;
/* sync instruction */
- if (sync_line != NO_SYNC_LINE) {
+ if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
- }
/* scan lines */
sg = sglist;
@@ -1299,7 +1303,8 @@ int cx25821_risc_databuffer_audio(struct pci_dev *pci,
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 1;
- if ((rc = btcx_riscmem_alloc(pci, risc, instructions * 12)) < 0)
+ rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
+ if (rc < 0)
return rc;
/* write risc instructions */
@@ -1312,6 +1317,7 @@ int cx25821_risc_databuffer_audio(struct pci_dev *pci,
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
+EXPORT_SYMBOL(cx25821_risc_databuffer_audio);
int cx25821_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
u32 reg, u32 mask, u32 value)
@@ -1375,7 +1381,7 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
}
}
- out:
+out:
return IRQ_RETVAL(handled);
}
@@ -1399,12 +1405,14 @@ void cx25821_print_irqbits(char *name, char *tag, char **strings,
}
printk("\n");
}
+EXPORT_SYMBOL(cx25821_print_irqbits);
struct cx25821_dev *cx25821_dev_get(struct pci_dev *pci)
{
struct cx25821_dev *dev = pci_get_drvdata(pci);
return dev;
}
+EXPORT_SYMBOL(cx25821_dev_get);
static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
@@ -1430,7 +1438,7 @@ static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
goto fail_unregister_device;
}
- printk(KERN_INFO "cx25821 Athena pci enable ! \n");
+ printk(KERN_INFO "cx25821 Athena pci enable !\n");
if (cx25821_dev_setup(dev) < 0) {
err = -EINVAL;
@@ -1464,14 +1472,14 @@ static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
return 0;
- fail_irq:
- printk(KERN_INFO "cx25821 cx25821_initdev() can't get IRQ ! \n");
+fail_irq:
+ printk(KERN_INFO "cx25821 cx25821_initdev() can't get IRQ !\n");
cx25821_dev_unregister(dev);
- fail_unregister_device:
+fail_unregister_device:
v4l2_device_unregister(&dev->v4l2_dev);
- fail_free:
+fail_free:
kfree(dev);
return err;
}
@@ -1536,16 +1544,6 @@ static void __exit cx25821_fini(void)
pci_unregister_driver(&cx25821_pci_driver);
}
-EXPORT_SYMBOL(cx25821_devlist);
-EXPORT_SYMBOL(cx25821_sram_channels);
-EXPORT_SYMBOL(cx25821_print_irqbits);
-EXPORT_SYMBOL(cx25821_dev_get);
-EXPORT_SYMBOL(cx25821_dev_unregister);
-EXPORT_SYMBOL(cx25821_sram_channel_setup);
-EXPORT_SYMBOL(cx25821_sram_channel_dump);
-EXPORT_SYMBOL(cx25821_sram_channel_setup_audio);
-EXPORT_SYMBOL(cx25821_sram_channel_dump_audio);
-EXPORT_SYMBOL(cx25821_risc_databuffer_audio);
EXPORT_SYMBOL(cx25821_set_gpiopin_direction);
module_init(cx25821_init);
diff --git a/drivers/staging/cx25821/cx25821-gpio.c b/drivers/staging/cx25821/cx25821-gpio.c
index e8a37b47e437..2f154b3658a1 100644
--- a/drivers/staging/cx25821/cx25821-gpio.c
+++ b/drivers/staging/cx25821/cx25821-gpio.c
@@ -31,7 +31,7 @@ void cx25821_set_gpiopin_direction(struct cx25821_dev *dev,
u32 gpio_register = 0;
u32 value = 0;
- // Check for valid pinNumber
+ /* Check for valid pinNumber */
if (pin_number >= 47)
return;
@@ -39,14 +39,14 @@ void cx25821_set_gpiopin_direction(struct cx25821_dev *dev,
bit = pin_number - 31;
gpio_oe_reg = GPIO_HI_OE;
}
- // Here we will make sure that the GPIOs 0 and 1 are output. keep the rest as is
+ /* Here we will make sure that the GPIOs 0 and 1 are output. keep the
+ * rest as is */
gpio_register = cx_read(gpio_oe_reg);
- if (pin_logic_value == 1) {
+ if (pin_logic_value == 1)
value = gpio_register | Set_GPIO_Bit(bit);
- } else {
+ else
value = gpio_register & Clear_GPIO_Bit(bit);
- }
cx_write(gpio_oe_reg, value);
}
@@ -58,11 +58,12 @@ static void cx25821_set_gpiopin_logicvalue(struct cx25821_dev *dev,
u32 gpio_reg = GPIO_LO;
u32 value = 0;
- // Check for valid pinNumber
+ /* Check for valid pinNumber */
if (pin_number >= 47)
return;
- cx25821_set_gpiopin_direction(dev, pin_number, 0); // change to output direction
+ /* change to output direction */
+ cx25821_set_gpiopin_direction(dev, pin_number, 0);
if (pin_number > 31) {
bit = pin_number - 31;
@@ -71,25 +72,23 @@ static void cx25821_set_gpiopin_logicvalue(struct cx25821_dev *dev,
value = cx_read(gpio_reg);
- if (pin_logic_value == 0) {
+ if (pin_logic_value == 0)
value &= Clear_GPIO_Bit(bit);
- } else {
+ else
value |= Set_GPIO_Bit(bit);
- }
cx_write(gpio_reg, value);
}
void cx25821_gpio_init(struct cx25821_dev *dev)
{
- if (dev == NULL) {
+ if (dev == NULL)
return;
- }
switch (dev->board) {
case CX25821_BOARD_CONEXANT_ATHENA10:
default:
- //set GPIO 5 to select the path for Medusa/Athena
+ /* set GPIO 5 to select the path for Medusa/Athena */
cx25821_set_gpiopin_logicvalue(dev, 5, 1);
mdelay(20);
break;
diff --git a/drivers/staging/cx25821/cx25821-i2c.c b/drivers/staging/cx25821/cx25821-i2c.c
index f4f2681d8f1c..08f45b52df6a 100644
--- a/drivers/staging/cx25821/cx25821-i2c.c
+++ b/drivers/staging/cx25821/cx25821-i2c.c
@@ -28,7 +28,7 @@ static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
-static unsigned int i2c_scan = 0;
+static unsigned int i2c_scan;
module_param(i2c_scan, int, 0444);
MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
@@ -159,9 +159,9 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
return msg->len;
- eio:
+eio:
retval = -EIO;
- err:
+err:
if (i2c_debug)
printk(KERN_ERR " ERR: %d\n", retval);
return retval;
@@ -223,9 +223,9 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
}
return msg->len;
- eio:
+eio:
retval = -EIO;
- err:
+err:
if (i2c_debug)
printk(KERN_ERR " ERR: %d\n", retval);
return retval;
@@ -266,7 +266,7 @@ static int i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
}
return num;
- err:
+err:
return retval;
}
@@ -319,7 +319,7 @@ int cx25821_i2c_register(struct cx25821_i2c *bus)
bus->i2c_client.adapter = &bus->i2c_adap;
- //set up the I2c
+ /* set up the I2c */
bus->i2c_client.addr = (0x88 >> 1);
return bus->i2c_rc;
diff --git a/drivers/staging/cx25821/cx25821-medusa-video.c b/drivers/staging/cx25821/cx25821-medusa-video.c
index d6016200d699..0bb33ba7e99d 100644
--- a/drivers/staging/cx25821/cx25821-medusa-video.c
+++ b/drivers/staging/cx25821/cx25821-medusa-video.c
@@ -24,11 +24,12 @@
#include "cx25821-medusa-video.h"
#include "cx25821-biffuncs.h"
-/////////////////////////////////////////////////////////////////////////////////////////
-//medusa_enable_bluefield_output()
-//
-// Enable the generation of blue filed output if no video
-//
+/*
+ * medusa_enable_bluefield_output()
+ *
+ * Enable the generation of blue filed output if no video
+ *
+ */
static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
int enable)
{
@@ -73,15 +74,15 @@ static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
}
value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl, &tmp);
- value &= 0xFFFFFF7F; // clear BLUE_FIELD_EN
+ value &= 0xFFFFFF7F; /* clear BLUE_FIELD_EN */
if (enable)
- value |= 0x00000080; // set BLUE_FIELD_EN
+ value |= 0x00000080; /* set BLUE_FIELD_EN */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl, value);
value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl_ns, &tmp);
value &= 0xFFFFFF7F;
if (enable)
- value |= 0x00000080; // set BLUE_FIELD_EN
+ value |= 0x00000080; /* set BLUE_FIELD_EN */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl_ns, value);
}
@@ -95,17 +96,18 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
mutex_lock(&dev->lock);
for (i = 0; i < MAX_DECODERS; i++) {
- // set video format NTSC-M
+ /* set video format NTSC-M */
value =
cx25821_i2c_read(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
&tmp);
value &= 0xFFFFFFF0;
- value |= 0x10001; // enable the fast locking mode bit[16]
+ /* enable the fast locking mode bit[16] */
+ value |= 0x10001;
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
value);
- // resolution NTSC 720x480
+ /* resolution NTSC 720x480 */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), &tmp);
@@ -119,17 +121,17 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_read(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
- value |= 0x1C1E001A; // vblank_cnt + 2 to get camera ID
+ value |= 0x1C1E001A; /* vblank_cnt + 2 to get camera ID */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), value);
- // chroma subcarrier step size
+ /* chroma subcarrier step size */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
SC_STEP_SIZE + (0x200 * i), 0x43E00000);
- // enable VIP optional active
+ /* enable VIP optional active */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), &tmp);
@@ -139,7 +141,7 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), value);
- // enable VIP optional active (VIP_OPT_AL) for direct output.
+ /* enable VIP optional active (VIP_OPT_AL) for direct output. */
value =
cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
&tmp);
@@ -149,19 +151,21 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
value);
- // clear VPRES_VERT_EN bit, fixes the chroma run away problem
- // when the input switching rate < 16 fields
- //
+ /*
+ * clear VPRES_VERT_EN bit, fixes the chroma run away problem
+ * when the input switching rate < 16 fields
+ */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), &tmp);
- value = setBitAtPos(value, 14); // disable special play detection
+ /* disable special play detection */
+ value = setBitAtPos(value, 14);
value = clearBitAtPos(value, 15);
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), value);
- // set vbi_gate_en to 0
+ /* set vbi_gate_en to 0 */
value =
cx25821_i2c_read(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
&tmp);
@@ -170,12 +174,12 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
value);
- // Enable the generation of blue field output if no video
+ /* Enable the generation of blue field output if no video */
medusa_enable_bluefield_output(dev, i, 1);
}
for (i = 0; i < MAX_ENCODERS; i++) {
- // NTSC hclock
+ /* NTSC hclock */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), &tmp);
@@ -185,7 +189,7 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), value);
- // burst begin and burst end
+ /* burst begin and burst end */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), &tmp);
@@ -204,7 +208,7 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), value);
- // set NTSC vblank, no phase alternation, 7.5 IRE pedestal
+ /* set NTSC vblank, no phase alternation, 7.5 IRE pedestal */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_4 + (0x100 * i), &tmp);
@@ -227,17 +231,19 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_6 + (0x100 * i), 0x009A89C1);
- // Subcarrier Increment
+ /* Subcarrier Increment */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_7 + (0x100 * i), 0x21F07C1F);
}
- //set picture resolutions
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0); //0 - 720
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0); //0 - 480
+ /* set picture resolutions */
+ /* 0 - 720 */
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0);
+ /* 0 - 480 */
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0);
- // set Bypass input format to NTSC 525 lines
+ /* set Bypass input format to NTSC 525 lines */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
value |= 0x00080200;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
@@ -252,7 +258,7 @@ static int medusa_PALCombInit(struct cx25821_dev *dev, int dec)
int ret_val = -1;
u32 value = 0, tmp = 0;
- // Setup for 2D threshold
+ /* Setup for 2D threshold */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_HFS_CFG + (0x200 * dec),
0x20002861);
@@ -263,7 +269,7 @@ static int medusa_PALCombInit(struct cx25821_dev *dev, int dec)
cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_LF_CFG + (0x200 * dec),
0x200A1023);
- // Setup flat chroma and luma thresholds
+ /* Setup flat chroma and luma thresholds */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
COMB_FLAT_THRESH_CTRL + (0x200 * dec), &tmp);
@@ -272,12 +278,12 @@ static int medusa_PALCombInit(struct cx25821_dev *dev, int dec)
cx25821_i2c_write(&dev->i2c_bus[0],
COMB_FLAT_THRESH_CTRL + (0x200 * dec), value);
- // set comb 2D blend
+ /* set comb 2D blend */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_BLEND + (0x200 * dec),
0x210F0F0F);
- // COMB MISC CONTROL
+ /* COMB MISC CONTROL */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], COMB_MISC_CTRL + (0x200 * dec),
0x41120A7F);
@@ -295,17 +301,18 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
mutex_lock(&dev->lock);
for (i = 0; i < MAX_DECODERS; i++) {
- // set video format PAL-BDGHI
+ /* set video format PAL-BDGHI */
value =
cx25821_i2c_read(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
&tmp);
value &= 0xFFFFFFF0;
- value |= 0x10004; // enable the fast locking mode bit[16]
+ /* enable the fast locking mode bit[16] */
+ value |= 0x10004;
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
value);
- // resolution PAL 720x576
+ /* resolution PAL 720x576 */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), &tmp);
@@ -315,22 +322,22 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), value);
- // vblank656_cnt=x26, vactive_cnt=240h, vblank_cnt=x24
+ /* vblank656_cnt=x26, vactive_cnt=240h, vblank_cnt=x24 */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
- value |= 0x28240026; // vblank_cnt + 2 to get camera ID
+ value |= 0x28240026; /* vblank_cnt + 2 to get camera ID */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), value);
- // chroma subcarrier step size
+ /* chroma subcarrier step size */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
SC_STEP_SIZE + (0x200 * i), 0x5411E2D0);
- // enable VIP optional active
+ /* enable VIP optional active */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), &tmp);
@@ -340,7 +347,7 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), value);
- // enable VIP optional active (VIP_OPT_AL) for direct output.
+ /* enable VIP optional active (VIP_OPT_AL) for direct output. */
value =
cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
&tmp);
@@ -350,18 +357,21 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
value);
- // clear VPRES_VERT_EN bit, fixes the chroma run away problem
- // when the input switching rate < 16 fields
+ /*
+ * clear VPRES_VERT_EN bit, fixes the chroma run away problem
+ * when the input switching rate < 16 fields
+ */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), &tmp);
- value = setBitAtPos(value, 14); // disable special play detection
+ /* disable special play detection */
+ value = setBitAtPos(value, 14);
value = clearBitAtPos(value, 15);
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), value);
- // set vbi_gate_en to 0
+ /* set vbi_gate_en to 0 */
value =
cx25821_i2c_read(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
&tmp);
@@ -372,12 +382,12 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
medusa_PALCombInit(dev, i);
- // Enable the generation of blue field output if no video
+ /* Enable the generation of blue field output if no video */
medusa_enable_bluefield_output(dev, i, 1);
}
for (i = 0; i < MAX_ENCODERS; i++) {
- // PAL hclock
+ /* PAL hclock */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), &tmp);
@@ -387,7 +397,7 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), value);
- // burst begin and burst end
+ /* burst begin and burst end */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), &tmp);
@@ -397,7 +407,7 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), value);
- // hblank and vactive
+ /* hblank and vactive */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), &tmp);
@@ -407,7 +417,7 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), value);
- // set PAL vblank, phase alternation, 0 IRE pedestal
+ /* set PAL vblank, phase alternation, 0 IRE pedestal */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_4 + (0x100 * i), &tmp);
@@ -430,17 +440,19 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_6 + (0x100 * i), 0x00A493CF);
- // Subcarrier Increment
+ /* Subcarrier Increment */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_7 + (0x100 * i), 0x2A098ACB);
}
- //set picture resolutions
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0); //0 - 720
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0); //0 - 576
+ /* set picture resolutions */
+ /* 0 - 720 */
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0);
+ /* 0 - 576 */
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0);
- // set Bypass input format to PAL 625 lines
+ /* set Bypass input format to PAL 625 lines */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
value &= 0xFFF7FDFF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
@@ -455,18 +467,17 @@ int medusa_set_videostandard(struct cx25821_dev *dev)
int status = STATUS_SUCCESS;
u32 value = 0, tmp = 0;
- if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK) {
+ if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK)
status = medusa_initialize_pal(dev);
- } else {
+ else
status = medusa_initialize_ntsc(dev);
- }
- // Enable DENC_A output
+ /* Enable DENC_A output */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_4, &tmp);
value = setBitAtPos(value, 4);
status = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_4, value);
- // Enable DENC_B output
+ /* Enable DENC_B output */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_B_REG_4, &tmp);
value = setBitAtPos(value, 4);
status = cx25821_i2c_write(&dev->i2c_bus[0], DENC_B_REG_4, value);
@@ -486,10 +497,10 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
mutex_lock(&dev->lock);
- // validate the width - cannot be negative
+ /* validate the width - cannot be negative */
if (width > MAX_WIDTH) {
printk
- ("cx25821 %s() : width %d > MAX_WIDTH %d ! resetting to MAX_WIDTH \n",
+ ("cx25821 %s() : width %d > MAX_WIDTH %d ! resetting to MAX_WIDTH\n",
__func__, width, MAX_WIDTH);
width = MAX_WIDTH;
}
@@ -523,14 +534,14 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
vscale = 0x1E00;
break;
- default: //720
+ default: /* 720 */
hscale = 0x0;
vscale = 0x0;
break;
}
for (; decoder < decoder_count; decoder++) {
- // write scaling values for each decoder
+ /* write scaling values for each decoder */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
HSCALE_CTRL + (0x200 * decoder), hscale);
@@ -552,7 +563,7 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
mutex_lock(&dev->lock);
- // no support
+ /* no support */
if (decoder < VDEC_A && decoder > VDEC_H) {
mutex_unlock(&dev->lock);
return;
@@ -577,11 +588,10 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
_display_field_cnt[decoder] = duration;
- // update hardware
+ /* update hardware */
fld_cnt = cx25821_i2c_read(&dev->i2c_bus[0], disp_cnt_reg, &tmp);
- if (!(decoder % 2)) // EVEN decoder
- {
+ if (!(decoder % 2)) { /* EVEN decoder */
fld_cnt &= 0xFFFF0000;
fld_cnt |= duration;
} else {
@@ -594,8 +604,7 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
mutex_unlock(&dev->lock);
}
-/////////////////////////////////////////////////////////////////////////////////////////
-// Map to Medusa register setting
+/* Map to Medusa register setting */
static int mapM(int srcMin,
int srcMax, int srcVal, int dstMin, int dstMax, int *dstVal)
{
@@ -603,20 +612,21 @@ static int mapM(int srcMin,
int denominator;
int quotient;
- if ((srcMin == srcMax) || (srcVal < srcMin) || (srcVal > srcMax)) {
+ if ((srcMin == srcMax) || (srcVal < srcMin) || (srcVal > srcMax))
return -1;
- }
- // This is the overall expression used:
- // *dstVal = (srcVal - srcMin)*(dstMax - dstMin) / (srcMax - srcMin) + dstMin;
- // but we need to account for rounding so below we use the modulus
- // operator to find the remainder and increment if necessary.
+ /*
+ * This is the overall expression used:
+ * *dstVal =
+ * (srcVal - srcMin)*(dstMax - dstMin) / (srcMax - srcMin) + dstMin;
+ * but we need to account for rounding so below we use the modulus
+ * operator to find the remainder and increment if necessary.
+ */
numerator = (srcVal - srcMin) * (dstMax - dstMin);
denominator = srcMax - srcMin;
quotient = numerator / denominator;
- if (2 * (numerator % denominator) >= denominator) {
+ if (2 * (numerator % denominator) >= denominator)
quotient++;
- }
*dstVal = quotient + dstMin;
@@ -636,7 +646,6 @@ static unsigned long convert_to_twos(long numeric, unsigned long bits_len)
}
}
-/////////////////////////////////////////////////////////////////////////////////////////
int medusa_set_brightness(struct cx25821_dev *dev, int brightness, int decoder)
{
int ret_val = 0;
@@ -665,7 +674,6 @@ int medusa_set_brightness(struct cx25821_dev *dev, int brightness, int decoder)
return ret_val;
}
-/////////////////////////////////////////////////////////////////////////////////////////
int medusa_set_contrast(struct cx25821_dev *dev, int contrast, int decoder)
{
int ret_val = 0;
@@ -695,7 +703,6 @@ int medusa_set_contrast(struct cx25821_dev *dev, int contrast, int decoder)
return ret_val;
}
-/////////////////////////////////////////////////////////////////////////////////////////
int medusa_set_hue(struct cx25821_dev *dev, int hue, int decoder)
{
int ret_val = 0;
@@ -727,7 +734,6 @@ int medusa_set_hue(struct cx25821_dev *dev, int hue, int decoder)
return ret_val;
}
-/////////////////////////////////////////////////////////////////////////////////////////
int medusa_set_saturation(struct cx25821_dev *dev, int saturation, int decoder)
{
int ret_val = 0;
@@ -768,9 +774,8 @@ int medusa_set_saturation(struct cx25821_dev *dev, int saturation, int decoder)
return ret_val;
}
-/////////////////////////////////////////////////////////////////////////////////////////
-// Program the display sequence and monitor output.
-//
+/* Program the display sequence and monitor output. */
+
int medusa_video_init(struct cx25821_dev *dev)
{
u32 value = 0, tmp = 0;
@@ -781,7 +786,7 @@ int medusa_video_init(struct cx25821_dev *dev)
_num_decoders = dev->_max_num_decoders;
- // disable Auto source selection on all video decoders
+ /* disable Auto source selection on all video decoders */
value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp);
value &= 0xFFFFF0FF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MON_A_CTRL, value);
@@ -790,7 +795,7 @@ int medusa_video_init(struct cx25821_dev *dev)
mutex_unlock(&dev->lock);
return -EINVAL;
}
- // Turn off Master source switch enable
+ /* Turn off Master source switch enable */
value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp);
value &= 0xFFFFFFDF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MON_A_CTRL, value);
@@ -800,32 +805,31 @@ int medusa_video_init(struct cx25821_dev *dev)
mutex_unlock(&dev->lock);
- for (i = 0; i < _num_decoders; i++) {
+ for (i = 0; i < _num_decoders; i++)
medusa_set_decoderduration(dev, i, _display_field_cnt[i]);
- }
mutex_lock(&dev->lock);
- // Select monitor as DENC A input, power up the DAC
+ /* Select monitor as DENC A input, power up the DAC */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_AB_CTRL, &tmp);
value &= 0xFF70FF70;
- value |= 0x00090008; // set en_active
+ value |= 0x00090008; /* set en_active */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_AB_CTRL, value);
if (ret_val < 0) {
mutex_unlock(&dev->lock);
return -EINVAL;
}
- // enable input is VIP/656
+ /* enable input is VIP/656 */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
- value |= 0x00040100; // enable VIP
+ value |= 0x00040100; /* enable VIP */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
if (ret_val < 0) {
mutex_unlock(&dev->lock);
return -EINVAL;
}
- // select AFE clock to output mode
+ /* select AFE clock to output mode */
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
value &= 0x83FFFFFF;
ret_val =
@@ -836,15 +840,20 @@ int medusa_video_init(struct cx25821_dev *dev)
mutex_unlock(&dev->lock);
return -EINVAL;
}
- // Turn on all of the data out and control output pins.
+ /* Turn on all of the data out and control output pins. */
value = cx25821_i2c_read(&dev->i2c_bus[0], PIN_OE_CTRL, &tmp);
value &= 0xFEF0FE00;
if (_num_decoders == MAX_DECODERS) {
- // Note: The octal board does not support control pins(bit16-19).
- // These bits are ignored in the octal board.
- value |= 0x010001F8; // disable VDEC A-C port, default to Mobilygen Interface
+ /*
+ * Note: The octal board does not support control pins(bit16-19)
+ * These bits are ignored in the octal board.
+ *
+ * disable VDEC A-C port, default to Mobilygen Interface
+ */
+ value |= 0x010001F8;
} else {
- value |= 0x010F0108; // disable VDEC A-C port, default to Mobilygen Interface
+ /* disable VDEC A-C port, default to Mobilygen Interface */
+ value |= 0x010F0108;
}
value |= 7;
diff --git a/drivers/staging/cx25821/cx25821-video-upstream.c b/drivers/staging/cx25821/cx25821-video-upstream.c
index 6d48a1e26d1b..a22a5696a289 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream.c
+++ b/drivers/staging/cx25821/cx25821-video-upstream.c
@@ -31,6 +31,7 @@
#include <linux/syscalls.h>
#include <linux/file.h>
#include <linux/fcntl.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -60,9 +61,8 @@ int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
- if (lines > 4) {
+ if (lines > 4)
lines = 4;
- }
BUG_ON(lines < 2);
@@ -97,7 +97,7 @@ int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
}
static __le32 *cx25821_update_riscprogram(struct cx25821_dev *dev,
- __le32 * rp, unsigned int offset,
+ __le32 *rp, unsigned int offset,
unsigned int bpl, u32 sync_line,
unsigned int lines, int fifo_enable,
int field_type)
@@ -108,9 +108,8 @@ static __le32 *cx25821_update_riscprogram(struct cx25821_dev *dev,
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
+ for (i = 0; i < NUM_NO_OPS; i++)
*(rp++) = cpu_to_le32(RISC_NOOP);
- }
}
/* scan lines */
@@ -140,14 +139,12 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp,
int dist_betwn_starts = bpl * 2;
/* sync instruction */
- if (sync_line != NO_SYNC_LINE) {
+ if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
- }
if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
+ for (i = 0; i < NUM_NO_OPS; i++)
*(rp++) = cpu_to_le32(RISC_NOOP);
- }
}
/* scan lines */
@@ -157,12 +154,13 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp,
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
if ((lines <= NTSC_FIELD_HEIGHT)
- || (line < (NTSC_FIELD_HEIGHT - 1)) || !(dev->_isNTSC)) {
- offset += dist_betwn_starts; //to skip the other field line
- }
+ || (line < (NTSC_FIELD_HEIGHT - 1)) || !(dev->_isNTSC))
+ /* to skip the other field line */
+ offset += dist_betwn_starts;
- // check if we need to enable the FIFO after the first 4 lines
- // For the upstream video channel, the risc engine will enable the FIFO.
+ /* check if we need to enable the FIFO after the first 4 lines
+ * For the upstream video channel, the risc engine will enable
+ * the FIFO. */
if (fifo_enable && line == 3) {
*(rp++) = RISC_WRITECR;
*(rp++) = sram_ch->dma_ctl;
@@ -181,7 +179,8 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
{
__le32 *rp;
int fifo_enable = 0;
- int singlefield_lines = lines >> 1; //get line count for single field
+ /* get line count for single field */
+ int singlefield_lines = lines >> 1;
int odd_num_lines = singlefield_lines;
int frame = 0;
int frame_size = 0;
@@ -225,7 +224,7 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
fifo_enable = FIFO_DISABLE;
- //Even Field
+ /* Even Field */
rp = cx25821_risc_field_upstream(dev, rp,
dev->_data_buf_phys_addr +
databuf_offset, bottom_offset,
@@ -241,7 +240,9 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
risc_flag = RISC_CNT_INC;
}
- // Loop to 2ndFrameRISC or to Start of Risc program & generate IRQ
+ /* Loop to 2ndFrameRISC or to Start of Risc
+ * program & generate IRQ
+ */
*(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
*(rp++) = cpu_to_le32(risc_phys_jump_addr);
*(rp++) = cpu_to_le32(0);
@@ -258,18 +259,18 @@ void cx25821_stop_upstream_video_ch1(struct cx25821_dev *dev)
if (!dev->_is_running) {
printk
- ("cx25821: No video file is currently running so return!\n");
+ (KERN_INFO "cx25821: No video file is currently running so return!\n");
return;
}
- //Disable RISC interrupts
+ /* Disable RISC interrupts */
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp & ~_intr_msk);
- //Turn OFF risc and fifo enable
+ /* Turn OFF risc and fifo enable */
tmp = cx_read(sram_ch->dma_ctl);
cx_write(sram_ch->dma_ctl, tmp & ~(FLD_VID_FIFO_EN | FLD_VID_RISC_EN));
- //Clear data buffer memory
+ /* Clear data buffer memory */
if (dev->_data_buf_virt_addr)
memset(dev->_data_buf_virt_addr, 0, dev->_data_buf_size);
@@ -292,9 +293,8 @@ void cx25821_stop_upstream_video_ch1(struct cx25821_dev *dev)
void cx25821_free_mem_upstream_ch1(struct cx25821_dev *dev)
{
- if (dev->_is_running) {
+ if (dev->_is_running)
cx25821_stop_upstream_video_ch1(dev);
- }
if (dev->_dma_virt_addr) {
pci_free_consistent(dev->pci, dev->_risc_size,
@@ -347,19 +347,19 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
+ printk(KERN_ERR "%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_filename, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
- printk("%s: File has no file operations registered!",
+ printk(KERN_ERR "%s: File has no file operations registered!",
__func__);
filp_close(myfile, NULL);
return -EIO;
}
if (!myfile->f_op->read) {
- printk("%s: File has no READ operations registered!",
+ printk(KERN_ERR "%s: File has no READ operations registered!",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -412,7 +412,7 @@ static void cx25821_vidups_handler(struct work_struct *work)
container_of(work, struct cx25821_dev, _irq_work_entry);
if (!dev) {
- printk("ERROR %s(): since container_of(work_struct) FAILED! \n",
+ printk(KERN_ERR "ERROR %s(): since container_of(work_struct) FAILED!\n",
__func__);
return;
}
@@ -438,12 +438,12 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
+ printk(KERN_ERR "%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_filename, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
- printk("%s: File has no file operations registered!",
+ printk(KERN_ERR "%s: File has no file operations registered!",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -451,7 +451,7 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (!myfile->f_op->read) {
printk
- ("%s: File has no READ operations registered! Returning.",
+ (KERN_ERR "%s: File has no READ operations registered! Returning.",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -490,9 +490,8 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (i > 0)
dev->_frame_count++;
- if (vfs_read_retval < line_size) {
+ if (vfs_read_retval < line_size)
break;
- }
}
dev->_file_status =
@@ -528,11 +527,11 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
if (!dev->_dma_virt_addr) {
printk
- ("cx25821: FAILED to allocate memory for Risc buffer! Returning.\n");
+ (KERN_ERR "cx25821: FAILED to allocate memory for Risc buffer! Returning.\n");
return -ENOMEM;
}
- //Clear memory at address
+ /* Clear memory at address */
memset(dev->_dma_virt_addr, 0, dev->_risc_size);
if (dev->_data_buf_virt_addr != NULL) {
@@ -540,7 +539,7 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
dev->_data_buf_virt_addr,
dev->_data_buf_phys_addr);
}
- //For Video Data buffer allocation
+ /* For Video Data buffer allocation */
dev->_data_buf_virt_addr =
pci_alloc_consistent(dev->pci, dev->upstream_databuf_size,
&data_dma_addr);
@@ -549,30 +548,30 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
if (!dev->_data_buf_virt_addr) {
printk
- ("cx25821: FAILED to allocate memory for data buffer! Returning.\n");
+ (KERN_ERR "cx25821: FAILED to allocate memory for data buffer! Returning.\n");
return -ENOMEM;
}
- //Clear memory at address
+ /* Clear memory at address */
memset(dev->_data_buf_virt_addr, 0, dev->_data_buf_size);
ret = cx25821_openfile(dev, sram_ch);
if (ret < 0)
return ret;
- //Create RISC programs
+ /* Create RISC programs */
ret =
cx25821_risc_buffer_upstream(dev, dev->pci, 0, bpl,
dev->_lines_count);
if (ret < 0) {
printk(KERN_INFO
- "cx25821: Failed creating Video Upstream Risc programs! \n");
+ "cx25821: Failed creating Video Upstream Risc programs!\n");
goto error;
}
return 0;
- error:
+error:
return ret;
}
@@ -588,10 +587,11 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
__le32 *rp;
if (status & FLD_VID_SRC_RISC1) {
- // We should only process one program per call
+ /* We should only process one program per call */
u32 prog_cnt = cx_read(channel->gpcnt);
- //Since we've identified our IRQ, clear our bits from the interrupt mask and interrupt status registers
+ /* Since we've identified our IRQ, clear our bits from the
+ * interrupt mask and interrupt status registers */
int_msk_tmp = cx_read(channel->int_msk);
cx_write(channel->int_msk, int_msk_tmp & ~_intr_msk);
cx_write(channel->int_stat, _intr_msk);
@@ -632,7 +632,7 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
FIFO_DISABLE,
ODD_FIELD);
- // Jump to Even Risc program of 1st Frame
+ /* Jump to Even Risc program of 1st Frame */
*(rp++) = cpu_to_le32(RISC_JUMP);
*(rp++) = cpu_to_le32(risc_phys_jump_addr);
*(rp++) = cpu_to_le32(0);
@@ -643,24 +643,24 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
} else {
if (status & FLD_VID_SRC_UF)
printk
- ("%s: Video Received Underflow Error Interrupt!\n",
+ (KERN_ERR "%s: Video Received Underflow Error Interrupt!\n",
__func__);
if (status & FLD_VID_SRC_SYNC)
- printk("%s: Video Received Sync Error Interrupt!\n",
+ printk(KERN_ERR "%s: Video Received Sync Error Interrupt!\n",
__func__);
if (status & FLD_VID_SRC_OPC_ERR)
- printk("%s: Video Received OpCode Error Interrupt!\n",
+ printk(KERN_ERR "%s: Video Received OpCode Error Interrupt!\n",
__func__);
}
if (dev->_file_status == END_OF_FILE) {
- printk("cx25821: EOF Channel 1 Framecount = %d\n",
+ printk(KERN_ERR "cx25821: EOF Channel 1 Framecount = %d\n",
dev->_frame_count);
return -1;
}
- //ElSE, set the interrupt mask register, re-enable irq.
+ /* ElSE, set the interrupt mask register, re-enable irq. */
int_msk_tmp = cx_read(channel->int_msk);
cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
@@ -685,17 +685,16 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
msk_stat = cx_read(sram_ch->int_mstat);
vid_status = cx_read(sram_ch->int_stat);
- // Only deal with our interrupt
+ /* Only deal with our interrupt */
if (vid_status) {
handled =
cx25821_video_upstream_irq(dev, channel_num, vid_status);
}
- if (handled < 0) {
+ if (handled < 0)
cx25821_stop_upstream_video_ch1(dev);
- } else {
+ else
handled += handled;
- }
return IRQ_RETVAL(handled);
}
@@ -714,19 +713,19 @@ void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch,
value |= dev->_isNTSC ? 0 : 0x10;
cx_write(ch->vid_fmt_ctl, value);
- // set number of active pixels in each line. Default is 720 pixels in both NTSC and PAL format
+ /* set number of active pixels in each line.
+ * Default is 720 pixels in both NTSC and PAL format */
cx_write(ch->vid_active_ctl1, width);
num_lines = (height / 2) & 0x3FF;
odd_num_lines = num_lines;
- if (dev->_isNTSC) {
+ if (dev->_isNTSC)
odd_num_lines += 1;
- }
value = (num_lines << 16) | odd_num_lines;
- // set number of active lines in field 0 (top) and field 1 (bottom)
+ /* set number of active lines in field 0 (top) and field 1 (bottom) */
cx_write(ch->vid_active_ctl2, value);
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
@@ -738,21 +737,26 @@ int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
u32 tmp = 0;
int err = 0;
- // 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for channel A-C
+ /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
+ * channel A-C
+ */
tmp = cx_read(VID_CH_MODE_SEL);
cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
- // Set the physical start address of the RISC program in the initial program counter(IPC) member of the cmds.
+ /* Set the physical start address of the RISC program in the initial
+ * program counter(IPC) member of the cmds.
+ */
cx_write(sram_ch->cmds_start + 0, dev->_dma_phys_addr);
- cx_write(sram_ch->cmds_start + 4, 0); /* Risc IPC High 64 bits 63-32 */
+ /* Risc IPC High 64 bits 63-32 */
+ cx_write(sram_ch->cmds_start + 4, 0);
/* reset counter */
cx_write(sram_ch->gpcnt_ctl, 3);
- // Clear our bits from the interrupt status register.
+ /* Clear our bits from the interrupt status register. */
cx_write(sram_ch->int_stat, _intr_msk);
- //Set the interrupt mask register, enable irq.
+ /* Set the interrupt mask register, enable irq. */
cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp |= _intr_msk);
@@ -766,7 +770,7 @@ int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
goto fail_irq;
}
- // Start the DMA engine
+ /* Start the DMA engine */
tmp = cx_read(sram_ch->dma_ctl);
cx_set(sram_ch->dma_ctl, tmp | FLD_VID_RISC_EN);
@@ -775,7 +779,7 @@ int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
return 0;
- fail_irq:
+fail_irq:
cx25821_dev_unregister(dev);
return err;
}
@@ -792,7 +796,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
int str_length = 0;
if (dev->_is_running) {
- printk("Video Channel is still running so return!\n");
+ printk(KERN_INFO "Video Channel is still running so return!\n");
return 0;
}
@@ -804,10 +808,12 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
if (!dev->_irq_queues) {
printk
- ("cx25821: create_singlethread_workqueue() for Video FAILED!\n");
+ (KERN_ERR "cx25821: create_singlethread_workqueue() for Video FAILED!\n");
return -ENOMEM;
}
- // 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for channel A-C
+ /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
+ * channel A-C
+ */
tmp = cx_read(VID_CH_MODE_SEL);
cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
@@ -841,7 +847,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
memcpy(dev->_filename, dev->_defaultname, str_length + 1);
}
- //Default if filename is empty string
+ /* Default if filename is empty string */
if (strcmp(dev->input_filename, "") == 0) {
if (dev->_isNTSC) {
dev->_filename =
@@ -875,7 +881,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->upstream_riscbuf_size = risc_buffer_size * 2;
dev->upstream_databuf_size = data_frame_size * 2;
- //Allocating buffers and prepare RISC program
+ /* Allocating buffers and prepare RISC program */
retval = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size);
if (retval < 0) {
printk(KERN_ERR
@@ -888,7 +894,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
return 0;
- error:
+error:
cx25821_dev_unregister(dev);
return err;
diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/staging/cx25821/cx25821-video.c
index 8cd3986d2e5c..a3bea2739cef 100644
--- a/drivers/staging/cx25821/cx25821-video.c
+++ b/drivers/staging/cx25821/cx25821-video.c
@@ -54,34 +54,34 @@ static void init_controls(struct cx25821_dev *dev, int chan_num);
struct cx25821_fmt formats[] = {
{
- .name = "8 bpp, gray",
- .fourcc = V4L2_PIX_FMT_GREY,
- .depth = 8,
- .flags = FORMAT_FLAGS_PACKED,
+ .name = "8 bpp, gray",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = 8,
+ .flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "4:1:1, packed, Y41P",
- .fourcc = V4L2_PIX_FMT_Y41P,
- .depth = 12,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "4:2:2, packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "4:2:2, packed, UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "4:2:0, YUV",
- .fourcc = V4L2_PIX_FMT_YUV420,
- .depth = 12,
- .flags = FORMAT_FLAGS_PACKED,
- },
+ .name = "4:1:1, packed, Y41P",
+ .fourcc = V4L2_PIX_FMT_Y41P,
+ .depth = 12,
+ .flags = FORMAT_FLAGS_PACKED,
+ }, {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .flags = FORMAT_FLAGS_PACKED,
+ }, {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .flags = FORMAT_FLAGS_PACKED,
+ }, {
+ .name = "4:2:0, YUV",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .flags = FORMAT_FLAGS_PACKED,
+ },
};
-int get_format_size(void)
+int cx25821_get_format_size(void)
{
return ARRAY_SIZE(formats);
}
@@ -102,7 +102,7 @@ struct cx25821_fmt *format_by_fourcc(unsigned int fourcc)
return NULL;
}
-void dump_video_queue(struct cx25821_dev *dev, struct cx25821_dmaqueue *q)
+void cx25821_dump_video_queue(struct cx25821_dev *dev, struct cx25821_dmaqueue *q)
{
struct cx25821_buffer *buf;
struct list_head *item;
@@ -212,7 +212,7 @@ static int cx25821_ctrl_query(struct v4l2_queryctrl *qctrl)
*/
// resource management
-int res_get(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bit)
+int cx25821_res_get(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bit)
{
dprintk(1, "%s()\n", __func__);
if (fh->resources & bit)
@@ -234,17 +234,17 @@ int res_get(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bit)
return 1;
}
-int res_check(struct cx25821_fh *fh, unsigned int bit)
+int cx25821_res_check(struct cx25821_fh *fh, unsigned int bit)
{
return fh->resources & bit;
}
-int res_locked(struct cx25821_dev *dev, unsigned int bit)
+int cx25821_res_locked(struct cx25821_dev *dev, unsigned int bit)
{
return dev->resources & bit;
}
-void res_free(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bits)
+void cx25821_res_free(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bits)
{
BUG_ON((fh->resources & bits) != bits);
dprintk(1, "%s()\n", __func__);
@@ -506,7 +506,7 @@ int cx25821_video_register(struct cx25821_dev *dev, int chan_num,
return err;
}
-int buffer_setup(struct videobuf_queue *q, unsigned int *count,
+int cx25821_buffer_setup(struct videobuf_queue *q, unsigned int *count,
unsigned int *size)
{
struct cx25821_fh *fh = q->priv_data;
@@ -516,13 +516,13 @@ int buffer_setup(struct videobuf_queue *q, unsigned int *count,
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
-int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
+int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct cx25821_fh *fh = q->priv_data;
@@ -648,7 +648,7 @@ int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
return rc;
}
-void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+void cx25821_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
@@ -667,7 +667,7 @@ struct videobuf_queue *get_queue(struct cx25821_fh *fh)
}
}
-int get_resource(struct cx25821_fh *fh, int resource)
+int cx25821_get_resource(struct cx25821_fh *fh, int resource)
{
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
@@ -678,7 +678,7 @@ int get_resource(struct cx25821_fh *fh, int resource)
}
}
-int video_mmap(struct file *file, struct vm_area_struct *vma)
+int cx25821_video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct cx25821_fh *fh = file->private_data;
@@ -686,7 +686,7 @@ int video_mmap(struct file *file, struct vm_area_struct *vma)
}
/* VIDEO IOCTLS */
-int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
+int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
{
struct cx25821_fh *fh = priv;
@@ -700,7 +700,7 @@ int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
+int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
{
struct cx25821_fmt *fmt;
enum v4l2_field field;
@@ -746,7 +746,7 @@ int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+int cx25821_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -761,7 +761,7 @@ int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
return 0;
}
-int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(formats)))
@@ -774,7 +774,7 @@ int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
-int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
+int cx25821_vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
{
struct cx25821_fh *fh = priv;
struct videobuf_queue *q;
@@ -801,25 +801,25 @@ int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
}
#endif
-int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p)
+int cx25821_vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p)
{
struct cx25821_fh *fh = priv;
return videobuf_reqbufs(get_queue(fh), p);
}
-int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+int cx25821_vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx25821_fh *fh = priv;
return videobuf_querybuf(get_queue(fh), p);
}
-int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+int cx25821_vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx25821_fh *fh = priv;
return videobuf_qbuf(get_queue(fh), p);
}
-int vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p)
+int cx25821_vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)f)->dev;
@@ -828,7 +828,7 @@ int vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p)
return 0;
}
-int vidioc_s_priority(struct file *file, void *f, enum v4l2_priority prio)
+int cx25821_vidioc_s_priority(struct file *file, void *f, enum v4l2_priority prio)
{
struct cx25821_fh *fh = f;
struct cx25821_dev *dev = ((struct cx25821_fh *)f)->dev;
@@ -837,7 +837,7 @@ int vidioc_s_priority(struct file *file, void *f, enum v4l2_priority prio)
}
#ifdef TUNER_FLAG
-int vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms)
+int cx25821_vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -891,14 +891,14 @@ int cx25821_enum_input(struct cx25821_dev *dev, struct v4l2_input *i)
return 0;
}
-int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i)
+int cx25821_vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
dprintk(1, "%s()\n", __func__);
return cx25821_enum_input(dev, i);
}
-int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+int cx25821_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -907,7 +907,7 @@ int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
return 0;
}
-int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -933,7 +933,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned int i)
}
#ifdef TUNER_FLAG
-int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
+int cx25821_vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = fh->dev;
@@ -960,7 +960,7 @@ int cx25821_set_freq(struct cx25821_dev *dev, struct v4l2_frequency *f)
return 0;
}
-int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
+int cx25821_vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev;
@@ -978,7 +978,7 @@ int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
-int vidioc_g_register(struct file *file, void *fh,
+int cx25821_vidioc_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)fh)->dev;
@@ -991,7 +991,7 @@ int vidioc_g_register(struct file *file, void *fh,
return 0;
}
-int vidioc_s_register(struct file *file, void *fh,
+int cx25821_vidioc_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)fh)->dev;
@@ -1007,7 +1007,7 @@ int vidioc_s_register(struct file *file, void *fh,
#endif
#ifdef TUNER_FLAG
-int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
+int cx25821_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -1025,7 +1025,7 @@ int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
return 0;
}
-int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
+int cx25821_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
struct cx25821_fh *fh = priv;
@@ -1108,7 +1108,7 @@ static int cx25821_ctrl_query(struct v4l2_queryctrl *qctrl)
return 0;
}
-int vidioc_queryctrl(struct file *file, void *priv,
+int cx25821_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qctrl)
{
return cx25821_ctrl_query(qctrl);
@@ -1127,7 +1127,7 @@ static const struct v4l2_queryctrl *ctrl_by_id(unsigned int id)
return NULL;
}
-int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctl)
+int cx25821_vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctl)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -1216,7 +1216,7 @@ static void init_controls(struct cx25821_dev *dev, int chan_num)
}
}
-int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap)
+int cx25821_vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -1233,7 +1233,7 @@ int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap)
return 0;
}
-int vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+int cx25821_vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
struct cx25821_fh *fh = priv;
@@ -1244,17 +1244,17 @@ int vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
if (0 != err)
return err;
}
- // vidioc_s_crop not supported
+ // cx25821_vidioc_s_crop not supported
return -EINVAL;
}
-int vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+int cx25821_vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
{
- // vidioc_g_crop not supported
+ // cx25821_vidioc_g_crop not supported
return -EINVAL;
}
-int vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm)
+int cx25821_vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm)
{
// medusa does not support video standard sensing of current input
*norm = CX25821_NORMS;
@@ -1262,7 +1262,7 @@ int vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm)
return 0;
}
-int is_valid_width(u32 width, v4l2_std_id tvnorm)
+int cx25821_is_valid_width(u32 width, v4l2_std_id tvnorm)
{
if (tvnorm == V4L2_STD_PAL_BG) {
if (width == 352 || width == 720)
@@ -1280,7 +1280,7 @@ int is_valid_width(u32 width, v4l2_std_id tvnorm)
return 0;
}
-int is_valid_height(u32 height, v4l2_std_id tvnorm)
+int cx25821_is_valid_height(u32 height, v4l2_std_id tvnorm)
{
if (tvnorm == V4L2_STD_PAL_BG) {
if (height == 576 || height == 288)
diff --git a/drivers/staging/cx25821/cx25821-video.h b/drivers/staging/cx25821/cx25821-video.h
index 4417ff5d90d4..0bddc02be57d 100644
--- a/drivers/staging/cx25821/cx25821-video.h
+++ b/drivers/staging/cx25821/cx25821-video.h
@@ -101,7 +101,7 @@ extern struct cx25821_fmt formats[];
extern struct cx25821_fmt *format_by_fourcc(unsigned int fourcc);
extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM];
-extern void dump_video_queue(struct cx25821_dev *dev,
+extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q);
extern void cx25821_video_wakeup(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q, u32 count);
@@ -110,11 +110,11 @@ extern void cx25821_video_wakeup(struct cx25821_dev *dev,
extern int cx25821_set_tvnorm(struct cx25821_dev *dev, v4l2_std_id norm);
#endif
-extern int res_get(struct cx25821_dev *dev, struct cx25821_fh *fh,
+extern int cx25821_res_get(struct cx25821_dev *dev, struct cx25821_fh *fh,
unsigned int bit);
-extern int res_check(struct cx25821_fh *fh, unsigned int bit);
-extern int res_locked(struct cx25821_dev *dev, unsigned int bit);
-extern void res_free(struct cx25821_dev *dev, struct cx25821_fh *fh,
+extern int cx25821_res_check(struct cx25821_fh *fh, unsigned int bit);
+extern int cx25821_res_locked(struct cx25821_dev *dev, unsigned int bit);
+extern void cx25821_res_free(struct cx25821_dev *dev, struct cx25821_fh *fh,
unsigned int bits);
extern int cx25821_video_mux(struct cx25821_dev *dev, unsigned int input);
extern int cx25821_start_video_dma(struct cx25821_dev *dev,
@@ -128,67 +128,67 @@ extern int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status);
extern void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num);
extern int cx25821_video_register(struct cx25821_dev *dev, int chan_num,
struct video_device *video_template);
-extern int get_format_size(void);
+extern int cx25821_get_format_size(void);
-extern int buffer_setup(struct videobuf_queue *q, unsigned int *count,
+extern int cx25821_buffer_setup(struct videobuf_queue *q, unsigned int *count,
unsigned int *size);
-extern int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
+extern int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
enum v4l2_field field);
-extern void buffer_release(struct videobuf_queue *q,
+extern void cx25821_buffer_release(struct videobuf_queue *q,
struct videobuf_buffer *vb);
extern struct videobuf_queue *get_queue(struct cx25821_fh *fh);
-extern int get_resource(struct cx25821_fh *fh, int resource);
-extern int video_mmap(struct file *file, struct vm_area_struct *vma);
-extern int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+extern int cx25821_get_resource(struct cx25821_fh *fh, int resource);
+extern int cx25821_video_mmap(struct file *file, struct vm_area_struct *vma);
+extern int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f);
-extern int vidioc_querycap(struct file *file, void *priv,
+extern int cx25821_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap);
-extern int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+extern int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
-extern int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf);
-extern int vidioc_reqbufs(struct file *file, void *priv,
+extern int cx25821_vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf);
+extern int cx25821_vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p);
-extern int vidioc_querybuf(struct file *file, void *priv,
+extern int cx25821_vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p);
-extern int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
-extern int vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms);
+extern int cx25821_vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
+extern int cx25821_vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms);
extern int cx25821_enum_input(struct cx25821_dev *dev, struct v4l2_input *i);
-extern int vidioc_enum_input(struct file *file, void *priv,
+extern int cx25821_vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i);
-extern int vidioc_g_input(struct file *file, void *priv, unsigned int *i);
-extern int vidioc_s_input(struct file *file, void *priv, unsigned int i);
-extern int vidioc_g_ctrl(struct file *file, void *priv,
+extern int cx25821_vidioc_g_input(struct file *file, void *priv, unsigned int *i);
+extern int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i);
+extern int cx25821_vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl);
-extern int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+extern int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f);
-extern int vidioc_g_frequency(struct file *file, void *priv,
+extern int cx25821_vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f);
extern int cx25821_set_freq(struct cx25821_dev *dev, struct v4l2_frequency *f);
-extern int vidioc_s_frequency(struct file *file, void *priv,
+extern int cx25821_vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f);
-extern int vidioc_g_register(struct file *file, void *fh,
+extern int cx25821_vidioc_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg);
-extern int vidioc_s_register(struct file *file, void *fh,
+extern int cx25821_vidioc_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg);
-extern int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
-extern int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
+extern int cx25821_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
+extern int cx25821_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
-extern int is_valid_width(u32 width, v4l2_std_id tvnorm);
-extern int is_valid_height(u32 height, v4l2_std_id tvnorm);
+extern int cx25821_is_valid_width(u32 width, v4l2_std_id tvnorm);
+extern int cx25821_is_valid_height(u32 height, v4l2_std_id tvnorm);
-extern int vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p);
-extern int vidioc_s_priority(struct file *file, void *f,
+extern int cx25821_vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p);
+extern int cx25821_vidioc_s_priority(struct file *file, void *f,
enum v4l2_priority prio);
-extern int vidioc_queryctrl(struct file *file, void *priv,
+extern int cx25821_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qctrl);
extern int cx25821_set_control(struct cx25821_dev *dev,
struct v4l2_control *ctrl, int chan_num);
-extern int vidioc_cropcap(struct file *file, void *fh,
+extern int cx25821_vidioc_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cropcap);
-extern int vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop);
-extern int vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop);
+extern int cx25821_vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop);
+extern int cx25821_vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop);
-extern int vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm);
+extern int cx25821_vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm);
#endif
diff --git a/drivers/staging/cx25821/cx25821-video0.c b/drivers/staging/cx25821/cx25821-video0.c
index ad7a69129118..1f95ddba8499 100644
--- a/drivers/staging/cx25821/cx25821-video0.c
+++ b/drivers/staging/cx25821/cx25821-video0.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO0))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO0))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO0)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO0)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,13 +207,13 @@ static int video_release(struct file *file)
cx_write(channel0->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO0)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO0)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO0);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO0);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO0)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO0)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO0);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO0);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -280,7 +280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video1.c b/drivers/staging/cx25821/cx25821-video1.c
index e3f3c4ac7908..9b94462a62cf 100644
--- a/drivers/staging/cx25821/cx25821-video1.c
+++ b/drivers/staging/cx25821/cx25821-video1.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO1))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO1))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO1)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO1)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,13 +207,13 @@ static int video_release(struct file *file)
cx_write(channel1->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO1)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO1)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO1);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO1);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO1)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO1)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO1);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO1);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -280,7 +280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video2.c b/drivers/staging/cx25821/cx25821-video2.c
index 36fb855a497e..31c46aa2e563 100644
--- a/drivers/staging/cx25821/cx25821-video2.c
+++ b/drivers/staging/cx25821/cx25821-video2.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO2))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO2))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO2)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO2)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,13 +207,13 @@ static int video_release(struct file *file)
cx_write(channel2->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO2)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO2)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO2);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO2);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO2)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO2)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO2);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO2);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -280,7 +280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -380,50 +380,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video3.c b/drivers/staging/cx25821/cx25821-video3.c
index 1e0f10abdbcd..cbc5cad24a08 100644
--- a/drivers/staging/cx25821/cx25821-video3.c
+++ b/drivers/staging/cx25821/cx25821-video3.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO3))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO3))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO3)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO3)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,13 +207,13 @@ static int video_release(struct file *file)
cx_write(channel3->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO3)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO3)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO3);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO3);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO3)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO3)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO3);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO3);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -280,7 +280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -379,50 +379,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video4.c b/drivers/staging/cx25821/cx25821-video4.c
index 0cbe7a79d8c0..101074ad742c 100644
--- a/drivers/staging/cx25821/cx25821-video4.c
+++ b/drivers/staging/cx25821/cx25821-video4.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -146,7 +146,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO4))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO4))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -164,7 +164,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO4)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO4)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -206,13 +206,13 @@ static int video_release(struct file *file)
cx_write(channel4->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO4)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO4)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO4);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO4);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -238,7 +238,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO4)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO4)))) {
return -EBUSY;
}
@@ -256,11 +256,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO4);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO4);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -279,7 +279,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -288,11 +288,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video5.c b/drivers/staging/cx25821/cx25821-video5.c
index 5dc08adc12e8..2019c5e3ea14 100644
--- a/drivers/staging/cx25821/cx25821-video5.c
+++ b/drivers/staging/cx25821/cx25821-video5.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO5))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO5))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO5)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO5)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,13 +207,13 @@ static int video_release(struct file *file)
cx_write(channel5->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO5)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO5)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO5);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO5);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO5)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO5)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO5);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO5);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -280,7 +280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video6.c b/drivers/staging/cx25821/cx25821-video6.c
index 2938ad3ad3c5..d19c786e5a5f 100644
--- a/drivers/staging/cx25821/cx25821-video6.c
+++ b/drivers/staging/cx25821/cx25821-video6.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO6))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO6))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO6)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO6)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,12 +207,12 @@ static int video_release(struct file *file)
cx_write(channel6->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO6)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO6)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO6);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO6);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -238,7 +238,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO6)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO6)))) {
return -EBUSY;
}
@@ -256,11 +256,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO6);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO6);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -279,7 +279,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -288,11 +288,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video7.c b/drivers/staging/cx25821/cx25821-video7.c
index 458e525d72af..8a7c854a0fc8 100644
--- a/drivers/staging/cx25821/cx25821-video7.c
+++ b/drivers/staging/cx25821/cx25821-video7.c
@@ -85,10 +85,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -146,7 +146,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO7))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO7))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -164,7 +164,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO7)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO7)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -206,13 +206,13 @@ static int video_release(struct file *file)
cx_write(channel7->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO7)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO7)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO7);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO7);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -238,7 +238,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO7)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO7)))) {
return -EBUSY;
}
@@ -256,11 +256,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO7);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO7);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -279,7 +279,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -288,11 +288,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -377,50 +377,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-videoioctl.c b/drivers/staging/cx25821/cx25821-videoioctl.c
index 1da52b54a454..840714a9415f 100644
--- a/drivers/staging/cx25821/cx25821-videoioctl.c
+++ b/drivers/staging/cx25821/cx25821-videoioctl.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -145,7 +145,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO_IOCTL))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO_IOCTL))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -163,7 +163,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO_IOCTL)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO_IOCTL)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -189,13 +189,13 @@ static int video_release(struct file *file)
struct cx25821_dev *dev = fh->dev;
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO_IOCTL)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO_IOCTL)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO_IOCTL);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO_IOCTL);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -222,7 +222,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO_IOCTL)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO_IOCTL)))) {
return -EBUSY;
}
@@ -240,11 +240,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO_IOCTL);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO_IOCTL);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -262,7 +262,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -424,50 +424,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl_set,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-vidups10.c b/drivers/staging/cx25821/cx25821-vidups10.c
index b76d9f62c3d1..89c8592e5f02 100644
--- a/drivers/staging/cx25821/cx25821-vidups10.c
+++ b/drivers/staging/cx25821/cx25821-vidups10.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -143,7 +143,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO10))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO10))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -161,7 +161,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO10)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO10)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -189,13 +189,13 @@ static int video_release(struct file *file)
//cx_write(channel10->dma_ctl, 0);
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO10)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO10)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO10);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO10);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -222,7 +222,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO10)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO10)))) {
return -EBUSY;
}
@@ -240,11 +240,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO10);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO10);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -305,7 +305,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -362,50 +362,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl_upstream10,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-vidups9.c b/drivers/staging/cx25821/cx25821-vidups9.c
index 1580da3b29aa..c8e8083c7934 100644
--- a/drivers/staging/cx25821/cx25821-vidups9.c
+++ b/drivers/staging/cx25821/cx25821-vidups9.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -143,7 +143,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO9))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO9))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -161,7 +161,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO9)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO9)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -189,13 +189,13 @@ static int video_release(struct file *file)
//cx_write(channel9->dma_ctl, 0);
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO9)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO9)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO9);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO9);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
@@ -222,7 +222,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO9)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO9)))) {
return -EBUSY;
}
@@ -240,11 +240,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO9);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO9);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -305,7 +305,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -360,50 +360,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl_upstream9,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index ab047f2ff72c..abc82c3dad21 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -404,7 +404,7 @@ void et131x_multicast(struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
uint32_t PacketFilter = 0;
unsigned long flags;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
spin_lock_irqsave(&adapter->Lock, flags);
@@ -449,10 +449,10 @@ void et131x_multicast(struct net_device *netdev)
/* Set values in the private adapter struct */
i = 0;
- netdev_for_each_mc_addr(mclist, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == NIC_MAX_MCAST_LIST)
break;
- memcpy(adapter->MCList[i++], mclist->dmi_addr, ETH_ALEN);
+ memcpy(adapter->MCList[i++], ha->addr, ETH_ALEN);
}
adapter->MCAddressCount = i;
diff --git a/drivers/staging/netwave/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index 3875a722d12b..f1ee2cbc8407 100644
--- a/drivers/staging/netwave/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
@@ -61,7 +61,6 @@
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#include <pcmcia/mem_op.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -382,10 +381,6 @@ static int netwave_probe(struct pcmcia_device *link)
link->io.Attributes2 = IO_DATA_PATH_WIDTH_16; */
link->io.IOAddrLines = 5;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &netwave_interrupt;
-
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -732,7 +727,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
* Now allocate an interrupt line. Note that this does not
* actually assign a handler to the interrupt.
*/
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, netwave_interrupt);
if (ret)
goto failed;
@@ -767,7 +762,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
ramBase = ioremap(req.Base, 0x8000);
priv->ramBase = ramBase;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
SET_NETDEV_DEV(dev, &link->dev);
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 55d12e3271de..b09ce4b517a4 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -61,7 +61,7 @@ static struct pci_device_id rtl8180_pci_id_tbl[] __devinitdata = {
};
-static char* ifname = "wlan%d";
+static char ifname[IFNAMSIZ] = "wlan%d";
static int hwseqnum = 0;
static int hwwep = 0;
static int channels = 0x3fff;
@@ -74,7 +74,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("Linux driver for Realtek RTL8180 / RTL8185 WiFi cards");
-module_param(ifname, charp, S_IRUGO|S_IWUSR );
+module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
module_param(hwseqnum,int, S_IRUGO|S_IWUSR);
module_param(hwwep,int, S_IRUGO|S_IWUSR);
module_param(channels,int, S_IRUGO|S_IWUSR);
@@ -3613,7 +3613,7 @@ static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
if (dev_alloc_name(dev, ifname) < 0){
DMESG("Oops: devname already taken! Trying wlan%%d...\n");
- ifname = "wlan%d";
+ strcpy(ifname, "wlan%d");
dev_alloc_name(dev, ifname);
}
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index bb7e1ef28d3b..dbb6fe3c9ddd 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -116,7 +116,7 @@ static struct pci_device_id rtl8192_pci_id_tbl[] __devinitdata = {
{}
};
-static char* ifname = "wlan%d";
+static char ifname[IFNAMSIZ] = "wlan%d";
static int hwwep = 1; //default use hw. set 0 to use software security
static int channels = 0x3fff;
@@ -127,7 +127,7 @@ MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl);
MODULE_DESCRIPTION("Linux driver for Realtek RTL819x WiFi cards");
-module_param(ifname, charp, S_IRUGO|S_IWUSR );
+module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
//module_param(hwseqnum,int, S_IRUGO|S_IWUSR);
module_param(hwwep,int, S_IRUGO|S_IWUSR);
module_param(channels,int, S_IRUGO|S_IWUSR);
@@ -6453,7 +6453,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
if (dev_alloc_name(dev, ifname) < 0){
RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying wlan%%d...\n");
- ifname = "wlan%d";
+ strcpy(ifname, "wlan%d");
dev_alloc_name(dev, ifname);
}
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
index 04d9b85f3d4c..fcad2cdfeae7 100644
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -142,13 +142,13 @@ MODULE_VERSION("V 1.1");
MODULE_DEVICE_TABLE(usb, rtl8192_usb_id_tbl);
MODULE_DESCRIPTION("Linux driver for Realtek RTL8192 USB WiFi cards");
-static char* ifname = "wlan%d";
+static char ifname[IFNAMSIZ] = "wlan%d";
static int hwwep = 1; //default use hw. set 0 to use software security
static int channels = 0x3fff;
-module_param(ifname, charp, S_IRUGO|S_IWUSR );
+module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
//module_param(hwseqnum,int, S_IRUGO|S_IWUSR);
module_param(hwwep,int, S_IRUGO|S_IWUSR);
module_param(channels,int, S_IRUGO|S_IWUSR);
@@ -7432,7 +7432,7 @@ static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
if (dev_alloc_name(dev, ifname) < 0){
RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying wlan%%d...\n");
- ifname = "wlan%d";
+ strcpy(ifname, "wlan%d");
dev_alloc_name(dev, ifname);
}
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 7daeced317c4..bebf0fd2af85 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1367,12 +1367,12 @@ static void slic_mcast_set_list(struct net_device *dev)
struct adapter *adapter = netdev_priv(dev);
int status = STATUS_SUCCESS;
char *addresses;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
ASSERT(adapter);
- netdev_for_each_mc_addr(mc_list, dev) {
- addresses = (char *) &mc_list->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addresses = (char *) &ha->addr;
status = slic_mcast_add_list(adapter, addresses);
if (status != STATUS_SUCCESS)
break;
diff --git a/drivers/staging/tm6000/Kconfig b/drivers/staging/tm6000/Kconfig
new file mode 100644
index 000000000000..5fe759cc2ee9
--- /dev/null
+++ b/drivers/staging/tm6000/Kconfig
@@ -0,0 +1,32 @@
+config VIDEO_TM6000
+ tristate "TV Master TM5600/6000/6010 driver"
+ depends on VIDEO_DEV && I2C && INPUT && USB && EXPERIMENTAL
+ select VIDEO_TUNER
+ select TUNER_XC2028
+ select VIDEOBUF_VMALLOC
+ help
+ Support for TM5600/TM6000/TM6010 USB Device
+
+ Since these cards have no MPEG decoder onboard, they transmit
+ only compressed MPEG data over the usb bus, so you need
+ an external software decoder to watch TV on your computer.
+
+ Say Y if you own such a device and want to use it.
+
+config VIDEO_TM6000_ALSA
+ tristate "TV Master TM5600/6000/6010 audio support"
+ depends on VIDEO_TM6000 && SND && EXPERIMENTAL
+ select SND_PCM
+ ---help---
+ This is a video4linux driver for direct (DMA) audio for
+ TM5600/TM6000/TM6010 USB Devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tm6000-alsa.
+
+config VIDEO_TM6000_DVB
+ bool "DVB Support for tm6000 based TV cards"
+ depends on VIDEO_TM6000 && DVB_CORE && EXPERIMENTAL
+ select DVB_ZL10353
+ ---help---
+ This adds support for DVB cards based on the tm5600/tm6000 chip.
diff --git a/drivers/staging/tm6000/Makefile b/drivers/staging/tm6000/Makefile
new file mode 100644
index 000000000000..93370fccc073
--- /dev/null
+++ b/drivers/staging/tm6000/Makefile
@@ -0,0 +1,17 @@
+tm6000-objs := tm6000-cards.o \
+ tm6000-core.o \
+ tm6000-i2c.o \
+ tm6000-video.o \
+ tm6000-stds.o
+
+ifeq ($(CONFIG_VIDEO_TM6000_DVB),y)
+tm6000-objs += tm6000-dvb.o
+endif
+
+obj-$(CONFIG_VIDEO_TM6000) += tm6000.o
+obj-$(CONFIG_VIDEO_TM6000_ALSA) += tm6000-alsa.o
+
+EXTRA_CFLAGS = -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/staging/tm6000/README b/drivers/staging/tm6000/README
new file mode 100644
index 000000000000..078e803e47b1
--- /dev/null
+++ b/drivers/staging/tm6000/README
@@ -0,0 +1,21 @@
+Todo:
+ - Fix the loss of some blocks when receiving the video URB's
+ - Add a lock at tm6000_read_write_usb() to prevent two simultaneous access to the
+ URB control transfers
+ - Properly add the locks at tm6000-video
+ - Add audio support
+ - Add IR support
+ - Do several cleanups
+ - I think that frame1/frame0 are inverted. This causes a funny effect at the image.
+ the fix is trivial, but require some tests
+ - My tm6010 devices sometimes insist on stop working. I need to turn them off, removing
+ from my machine and wait for a while for it to work again. I'm starting to think that
+ it is an overheat issue - is there a workaround that we could do?
+ - Sometimes, tm6010 doesn't read eeprom at the proper time (hardware bug). So, the device
+ got miss-detected as a "generic" tm6000. This can be really bad if the tuner is the
+ Low Power one, as it may result on loading the high power firmware, that could damage
+ the device. Maybe we may read eeprom to double check, when the device is marked as "generic"
+ - Coding Style fixes
+ - sparse cleanups
+
+Please send patches to linux-media@vger.kernel.org
diff --git a/drivers/staging/tm6000/tm6000-alsa.c b/drivers/staging/tm6000/tm6000-alsa.c
new file mode 100644
index 000000000000..34045162bddf
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-alsa.c
@@ -0,0 +1,415 @@
+/*
+ *
+ * Support for audio capture for tm5600/6000/6010
+ * (c) 2007-2008 Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Based on cx88-alsa.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+
+#include <asm/delay.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+#undef dprintk
+
+#define dprintk(level, fmt, arg...) do { \
+ if (debug >= level) \
+ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg); \
+ } while (0)
+
+/****************************************************************************
+ Data type declarations - Can be moded to a header file later
+ ****************************************************************************/
+
+struct snd_tm6000_card {
+ struct snd_card *card;
+
+ spinlock_t reg_lock;
+
+ atomic_t count;
+
+ unsigned int period_size;
+ unsigned int num_periods;
+
+ struct tm6000_core *core;
+ struct tm6000_buffer *buf;
+
+ int bufsize;
+
+ struct snd_pcm_substream *substream;
+};
+
+
+/****************************************************************************
+ Module global static vars
+ ****************************************************************************/
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable tm6000x soundcard. default enabled.");
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
+
+
+/****************************************************************************
+ Module macros
+ ****************************************************************************/
+
+MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},"
+ "{{Trident,tm6000},"
+ "{{Trident,tm6010}");
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
+
+/****************************************************************************
+ Module specific funtions
+ ****************************************************************************/
+
+/*
+ * BOARD Specific: Sets audio DMA
+ */
+
+static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip)
+{
+ struct tm6000_core *core = chip->core;
+ int val;
+
+ /* Enables audio */
+ val = tm6000_get_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0x0);
+ val |= 0x20;
+ tm6000_set_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+
+ tm6000_set_reg(core, TM6010_REQ08_R01_A_INIT, 0x80);
+
+ return 0;
+}
+
+/*
+ * BOARD Specific: Resets audio DMA
+ */
+static int _tm6000_stop_audio_dma(struct snd_tm6000_card *chip)
+{
+ struct tm6000_core *core = chip->core;
+ int val;
+ dprintk(1, "Stopping audio DMA\n");
+
+ /* Enables audio */
+ val = tm6000_get_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0x0);
+ val &= ~0x20;
+ tm6000_set_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+
+ tm6000_set_reg(core, TM6010_REQ08_R01_A_INIT, 0);
+
+ return 0;
+}
+
+static int dsp_buffer_free(struct snd_tm6000_card *chip)
+{
+ BUG_ON(!chip->bufsize);
+
+ dprintk(2, "Freeing buffer\n");
+
+ /* FIXME: Frees buffer */
+
+ chip->bufsize = 0;
+
+ return 0;
+}
+
+/****************************************************************************
+ ALSA PCM Interface
+ ****************************************************************************/
+
+/*
+ * Digital hardware definition
+ */
+#define DEFAULT_FIFO_SIZE 4096
+
+static struct snd_pcm_hardware snd_tm6000_digital_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
+ .rate_min = 44100,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .period_bytes_min = DEFAULT_FIFO_SIZE/4,
+ .period_bytes_max = DEFAULT_FIFO_SIZE/4,
+ .periods_min = 1,
+ .periods_max = 1024,
+ .buffer_bytes_max = (1024*1024),
+};
+
+/*
+ * audio pcm capture open callback
+ */
+static int snd_tm6000_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int err;
+
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
+ goto _error;
+
+ chip->substream = substream;
+
+ runtime->hw = snd_tm6000_digital_hw;
+
+ return 0;
+_error:
+ dprintk(1, "Error opening PCM!\n");
+ return err;
+}
+
+/*
+ * audio close callback
+ */
+static int snd_tm6000_close(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+/*
+ * hw_params callback
+ */
+static int snd_tm6000_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ chip->period_size = params_period_bytes(hw_params);
+ chip->num_periods = params_periods(hw_params);
+ chip->bufsize = chip->period_size * params_periods(hw_params);
+
+ BUG_ON(!chip->bufsize);
+
+ dprintk(1, "Setting buffer\n");
+
+ /* FIXME: Allocate buffer for audio */
+
+
+ return 0;
+}
+
+/*
+ * hw free callback
+ */
+static int snd_tm6000_hw_free(struct snd_pcm_substream *substream)
+{
+
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * prepare callback
+ */
+static int snd_tm6000_prepare(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+
+/*
+ * trigger callback
+ */
+static int snd_tm6000_card_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ int err;
+
+ spin_lock(&chip->reg_lock);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ err = _tm6000_start_audio_dma(chip);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ err = _tm6000_stop_audio_dma(chip);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ spin_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+/*
+ * pointer callback
+ */
+static snd_pcm_uframes_t snd_tm6000_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ u16 count;
+
+ count = atomic_read(&chip->count);
+
+ return runtime->period_size * (count & (runtime->periods-1));
+}
+
+/*
+ * operators
+ */
+static struct snd_pcm_ops snd_tm6000_pcm_ops = {
+ .open = snd_tm6000_pcm_open,
+ .close = snd_tm6000_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_tm6000_hw_params,
+ .hw_free = snd_tm6000_hw_free,
+ .prepare = snd_tm6000_prepare,
+ .trigger = snd_tm6000_card_trigger,
+ .pointer = snd_tm6000_pointer,
+};
+
+/*
+ * create a PCM device
+ */
+static int __devinit snd_tm6000_pcm(struct snd_tm6000_card *chip,
+ int device, char *name)
+{
+ int err;
+ struct snd_pcm *pcm;
+
+ err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = chip;
+ strcpy(pcm->name, name);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_tm6000_pcm_ops);
+
+ return 0;
+}
+
+/* FIXME: Control interface - How to control volume/mute? */
+
+/****************************************************************************
+ Basic Flow for Sound Devices
+ ****************************************************************************/
+
+/*
+ * Alsa Constructor - Component probe
+ */
+
+int tm6000_audio_init(struct tm6000_core *dev, int idx)
+{
+ struct snd_card *card;
+ struct snd_tm6000_card *chip;
+ int rc, len;
+ char component[14];
+
+ if (idx >= SNDRV_CARDS)
+ return -ENODEV;
+
+ if (!enable[idx])
+ return -ENOENT;
+
+ rc = snd_card_create(index[idx], id[idx], THIS_MODULE, 0, &card);
+ if (rc < 0) {
+ snd_printk(KERN_ERR "cannot create card instance %d\n", idx);
+ return rc;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ chip->core = dev;
+ chip->card = card;
+
+ strcpy(card->driver, "tm6000-alsa");
+ sprintf(component, "USB%04x:%04x",
+ le16_to_cpu(dev->udev->descriptor.idVendor),
+ le16_to_cpu(dev->udev->descriptor.idProduct));
+ snd_component_add(card, component);
+
+ if (dev->udev->descriptor.iManufacturer)
+ len = usb_string(dev->udev,
+ dev->udev->descriptor.iManufacturer,
+ card->longname, sizeof(card->longname));
+ else
+ len = 0;
+
+ if (len > 0)
+ strlcat(card->longname, " ", sizeof(card->longname));
+
+ strlcat(card->longname, card->shortname, sizeof(card->longname));
+
+ len = strlcat(card->longname, " at ", sizeof(card->longname));
+
+ if (len < sizeof(card->longname))
+ usb_make_path(dev->udev, card->longname + len,
+ sizeof(card->longname) - len);
+
+ strlcat(card->longname,
+ dev->udev->speed == USB_SPEED_LOW ? ", low speed" :
+ dev->udev->speed == USB_SPEED_FULL ? ", full speed" :
+ ", high speed",
+ sizeof(card->longname));
+
+ rc = snd_tm6000_pcm(chip, 0, "tm6000 Digital");
+ if (rc < 0)
+ goto error;
+
+ rc = snd_card_register(card);
+ if (rc < 0)
+ goto error;
+
+
+ return 0;
+
+error:
+ snd_card_free(card);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tm6000_audio_init);
+
diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/staging/tm6000/tm6000-cards.c
new file mode 100644
index 000000000000..4ae92b17b503
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-cards.c
@@ -0,0 +1,974 @@
+/*
+ tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+#include <media/tvaudio.h>
+#include <media/i2c-addr.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include "tuner-xc2028.h"
+#include "xc5000.h"
+
+#define TM6000_BOARD_UNKNOWN 0
+#define TM5600_BOARD_GENERIC 1
+#define TM6000_BOARD_GENERIC 2
+#define TM6010_BOARD_GENERIC 3
+#define TM5600_BOARD_10MOONS_UT821 4
+#define TM5600_BOARD_10MOONS_UT330 5
+#define TM6000_BOARD_ADSTECH_DUAL_TV 6
+#define TM6000_BOARD_FREECOM_AND_SIMILAR 7
+#define TM6000_BOARD_ADSTECH_MINI_DUAL_TV 8
+#define TM6010_BOARD_HAUPPAUGE_900H 9
+#define TM6010_BOARD_BEHOLD_WANDER 10
+#define TM6010_BOARD_BEHOLD_VOYAGER 11
+#define TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE 12
+#define TM6010_BOARD_TWINHAN_TU501 13
+
+#define TM6000_MAXBOARDS 16
+static unsigned int card[] = {[0 ... (TM6000_MAXBOARDS - 1)] = UNSET };
+
+module_param_array(card, int, NULL, 0444);
+
+static unsigned long tm6000_devused;
+
+
+struct tm6000_board {
+ char *name;
+
+ struct tm6000_capabilities caps;
+
+ enum tm6000_devtype type; /* variant of the chipset */
+ int tuner_type; /* type of the tuner */
+ int tuner_addr; /* tuner address */
+ int demod_addr; /* demodulator address */
+
+ struct tm6000_gpio gpio;
+};
+
+struct tm6000_board tm6000_boards[] = {
+ [TM6000_BOARD_UNKNOWN] = {
+ .name = "Unknown tm6000 video grabber",
+ .caps = {
+ .has_tuner = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_1,
+ },
+ },
+ [TM5600_BOARD_GENERIC] = {
+ .name = "Generic tm5600 board",
+ .type = TM5600,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_1,
+ },
+ },
+ [TM6000_BOARD_GENERIC] = {
+ .name = "Generic tm6000 board",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_1,
+ },
+ },
+ [TM6010_BOARD_GENERIC] = {
+ .name = "Generic tm6010 board",
+ .type = TM6010,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_2,
+ .tuner_on = TM6010_GPIO_3,
+ .demod_reset = TM6010_GPIO_1,
+ .demod_on = TM6010_GPIO_4,
+ .power_led = TM6010_GPIO_7,
+ .dvb_led = TM6010_GPIO_5,
+ .ir = TM6010_GPIO_0,
+ },
+ },
+ [TM5600_BOARD_10MOONS_UT821] = {
+ .name = "10Moons UT 821",
+ .tuner_type = TUNER_XC2028,
+ .type = TM5600,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_eeprom = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_1,
+ },
+ },
+ [TM5600_BOARD_10MOONS_UT330] = {
+ .name = "10Moons UT 330",
+ .tuner_type = TUNER_PHILIPS_FQ1216AME_MK4,
+ .tuner_addr = 0xc8 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 0,
+ .has_zl10353 = 0,
+ .has_eeprom = 1,
+ },
+ },
+ [TM6000_BOARD_ADSTECH_DUAL_TV] = {
+ .name = "ADSTECH Dual TV USB",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc8 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_tda9874 = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ },
+ },
+ [TM6000_BOARD_FREECOM_AND_SIMILAR] = {
+ .name = "Freecom Hybrid Stick / Moka DVB-T Receiver Dual",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 0,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_4,
+ },
+ },
+ [TM6000_BOARD_ADSTECH_MINI_DUAL_TV] = {
+ .name = "ADSTECH Mini Dual TV USB",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc8 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 0,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_4,
+ },
+ },
+ [TM6010_BOARD_HAUPPAUGE_900H] = {
+ .name = "Hauppauge WinTV HVR-900H / WinTV USB2-Stick",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_2,
+ .tuner_on = TM6010_GPIO_3,
+ .demod_reset = TM6010_GPIO_1,
+ .demod_on = TM6010_GPIO_4,
+ .power_led = TM6010_GPIO_7,
+ .dvb_led = TM6010_GPIO_5,
+ .ir = TM6010_GPIO_0,
+ },
+ },
+ [TM6010_BOARD_BEHOLD_WANDER] = {
+ .name = "Beholder Wander DVB-T/TV/FM USB2.0",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_0,
+ .demod_reset = TM6010_GPIO_1,
+ .power_led = TM6010_GPIO_6,
+ },
+ },
+ [TM6010_BOARD_BEHOLD_VOYAGER] = {
+ .name = "Beholder Voyager TV/FM USB2.0",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0xc2 >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 0,
+ .has_zl10353 = 0,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_0,
+ .power_led = TM6010_GPIO_6,
+ },
+ },
+ [TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE] = {
+ .name = "Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_2,
+ .tuner_on = TM6010_GPIO_3,
+ .demod_reset = TM6010_GPIO_1,
+ .demod_on = TM6010_GPIO_4,
+ .power_led = TM6010_GPIO_7,
+ .dvb_led = TM6010_GPIO_5,
+ .ir = TM6010_GPIO_0,
+ },
+ },
+ [TM6010_BOARD_TWINHAN_TU501] = {
+ .name = "Twinhan TU501(704D1)",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_2,
+ .tuner_on = TM6010_GPIO_3,
+ .demod_reset = TM6010_GPIO_1,
+ .demod_on = TM6010_GPIO_4,
+ .power_led = TM6010_GPIO_7,
+ .dvb_led = TM6010_GPIO_5,
+ .ir = TM6010_GPIO_0,
+ },
+ }
+};
+
+/* table of devices that work with this driver */
+struct usb_device_id tm6000_id_table[] = {
+ { USB_DEVICE(0x6000, 0x0001), .driver_info = TM5600_BOARD_10MOONS_UT821 },
+ { USB_DEVICE(0x6000, 0x0002), .driver_info = TM6010_BOARD_GENERIC },
+ { USB_DEVICE(0x06e1, 0xf332), .driver_info = TM6000_BOARD_ADSTECH_DUAL_TV },
+ { USB_DEVICE(0x14aa, 0x0620), .driver_info = TM6000_BOARD_FREECOM_AND_SIMILAR },
+ { USB_DEVICE(0x06e1, 0xb339), .driver_info = TM6000_BOARD_ADSTECH_MINI_DUAL_TV },
+ { USB_DEVICE(0x2040, 0x6600), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x2040, 0x6601), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x2040, 0x6610), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x2040, 0x6611), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x6000, 0xdec0), .driver_info = TM6010_BOARD_BEHOLD_WANDER },
+ { USB_DEVICE(0x6000, 0xdec1), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER },
+ { USB_DEVICE(0x0ccd, 0x0086), .driver_info = TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE },
+ { USB_DEVICE(0x0ccd, 0x00A5), .driver_info = TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE },
+ { USB_DEVICE(0x13d3, 0x3240), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
+ { USB_DEVICE(0x13d3, 0x3241), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
+ { USB_DEVICE(0x13d3, 0x3243), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
+ { USB_DEVICE(0x13d3, 0x3264), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
+ { },
+};
+
+/* Tuner callback to provide the proper gpio changes needed for xc5000 */
+int tm6000_xc5000_callback(void *ptr, int component, int command, int arg)
+{
+ int rc = 0;
+ struct tm6000_core *dev = ptr;
+
+ if (dev->tuner_type != TUNER_XC5000)
+ return 0;
+
+ switch (command) {
+ case XC5000_TUNER_RESET:
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ msleep(15);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ msleep(15);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ break;
+ }
+ return (rc);
+}
+
+
+/* Tuner callback to provide the proper gpio changes needed for xc2028 */
+
+int tm6000_tuner_callback(void *ptr, int component, int command, int arg)
+{
+ int rc = 0;
+ struct tm6000_core *dev = ptr;
+
+ if (dev->tuner_type != TUNER_XC2028)
+ return 0;
+
+ switch (command) {
+ case XC2028_RESET_CLK:
+ tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT,
+ 0x02, arg);
+ msleep(10);
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 0);
+ if (rc < 0)
+ return rc;
+ msleep(10);
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 1);
+ break;
+ case XC2028_TUNER_RESET:
+ /* Reset codes during load firmware */
+ switch (arg) {
+ case 0:
+ /* newer tuner can faster reset */
+ switch (dev->model) {
+ case TM5600_BOARD_10MOONS_UT821:
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ 0x300, 0x01);
+ msleep(10);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ 0x300, 0x00);
+ msleep(10);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ 0x300, 0x01);
+ break;
+ case TM6010_BOARD_HAUPPAUGE_900H:
+ case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
+ case TM6010_BOARD_TWINHAN_TU501:
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ msleep(60);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ msleep(75);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ msleep(60);
+ break;
+ default:
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ msleep(130);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ msleep(130);
+ break;
+ }
+ break;
+ case 1:
+ tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT,
+ 0x02, 0x01);
+ msleep(10);
+ break;
+
+ case 2:
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 0);
+ if (rc < 0)
+ return rc;
+ msleep(100);
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 1);
+ msleep(100);
+ break;
+ }
+ }
+ return rc;
+}
+
+int tm6000_cards_setup(struct tm6000_core *dev)
+{
+ int i, rc;
+
+ /*
+ * Board-specific initialization sequence. Handles all GPIO
+ * initialization sequences that are board-specific.
+ * Up to now, all found devices use GPIO1 and GPIO4 at the same way.
+ * Probably, they're all based on some reference device. Due to that,
+ * there's a common routine at the end to handle those GPIO's. Devices
+ * that use different pinups or init sequences can just return at
+ * the board-specific session.
+ */
+ switch (dev->model) {
+ case TM6010_BOARD_HAUPPAUGE_900H:
+ case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
+ case TM6010_BOARD_TWINHAN_TU501:
+ case TM6010_BOARD_GENERIC:
+ /* Turn xceive 3028 on */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_on, 0x01);
+ msleep(15);
+ /* Turn zarlink zl10353 on */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x00);
+ msleep(15);
+ /* Reset zarlink zl10353 */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x00);
+ msleep(50);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x01);
+ msleep(15);
+ /* Turn zarlink zl10353 off */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x01);
+ msleep(15);
+ /* ir ? */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.ir, 0x01);
+ msleep(15);
+ /* Power led on (blue) */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00);
+ msleep(15);
+ /* DVB led off (orange) */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.dvb_led, 0x01);
+ msleep(15);
+ /* Turn zarlink zl10353 on */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x00);
+ msleep(15);
+ break;
+ case TM6010_BOARD_BEHOLD_WANDER:
+ /* Power led on (blue) */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01);
+ msleep(15);
+ /* Reset zarlink zl10353 */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x00);
+ msleep(50);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x01);
+ msleep(15);
+ break;
+ case TM6010_BOARD_BEHOLD_VOYAGER:
+ /* Power led on (blue) */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01);
+ msleep(15);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Default initialization. Most of the devices seem to use GPIO1
+ * and GPIO4.on the same way, so, this handles the common sequence
+ * used by most devices.
+ * If a device uses a different sequence or different GPIO pins for
+ * reset, just add the code at the board-specific part
+ */
+
+ if (dev->gpio.tuner_reset) {
+ for (i = 0; i < 2; i++) {
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %i doing tuner reset\n", rc);
+ return rc;
+ }
+
+ msleep(10); /* Just to be conservative */
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %i doing tuner reset\n", rc);
+ return rc;
+ }
+ msleep(10);
+
+ if (!i) {
+ rc = tm6000_get_reg32(dev, REQ_40_GET_VERSION, 0, 0);
+ if (rc >= 0)
+ printk(KERN_DEBUG "board=0x%08x\n", rc);
+ }
+ }
+ } else {
+ printk(KERN_ERR "Tuner reset is not configured\n");
+ return -1;
+ }
+
+ msleep(50);
+
+ return 0;
+};
+
+static void tm6000_config_tuner(struct tm6000_core *dev)
+{
+ struct tuner_setup tun_setup;
+
+ /* Load tuner module */
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
+ "tuner", "tuner", dev->tuner_addr, NULL);
+
+ memset(&tun_setup, 0, sizeof(tun_setup));
+ tun_setup.type = dev->tuner_type;
+ tun_setup.addr = dev->tuner_addr;
+
+ tun_setup.mode_mask = 0;
+ if (dev->caps.has_tuner)
+ tun_setup.mode_mask |= (T_ANALOG_TV | T_RADIO);
+ if (dev->caps.has_dvb)
+ tun_setup.mode_mask |= T_DIGITAL_TV;
+
+ switch (dev->tuner_type) {
+ case TUNER_XC2028:
+ tun_setup.tuner_callback = tm6000_tuner_callback;;
+ break;
+ case TUNER_XC5000:
+ tun_setup.tuner_callback = tm6000_xc5000_callback;
+ break;
+ }
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+
+ switch (dev->tuner_type) {
+ case TUNER_XC2028: {
+ struct v4l2_priv_tun_config xc2028_cfg;
+ struct xc2028_ctrl ctl;
+
+ memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
+ memset(&ctl, 0, sizeof(ctl));
+
+ ctl.input1 = 1;
+ ctl.read_not_reliable = 0;
+ ctl.msleep = 10;
+ ctl.demod = XC3028_FE_ZARLINK456;
+ ctl.vhfbw7 = 1;
+ ctl.uhfbw8 = 1;
+ xc2028_cfg.tuner = TUNER_XC2028;
+ xc2028_cfg.priv = &ctl;
+
+ switch (dev->model) {
+ case TM6010_BOARD_HAUPPAUGE_900H:
+ case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
+ case TM6010_BOARD_TWINHAN_TU501:
+ ctl.fname = "xc3028L-v36.fw";
+ break;
+ default:
+ if (dev->dev_type == TM6010)
+ ctl.fname = "xc3028-v27.fw";
+ else
+ ctl.fname = "xc3028-v24.fw";
+ }
+
+ printk(KERN_INFO "Setting firmware parameters for xc2028\n");
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config,
+ &xc2028_cfg);
+
+ }
+ break;
+ case TUNER_XC5000:
+ {
+ struct v4l2_priv_tun_config xc5000_cfg;
+ struct xc5000_config ctl = {
+ .i2c_address = dev->tuner_addr,
+ .if_khz = 4570,
+ .radio_input = XC5000_RADIO_FM1,
+ };
+
+ xc5000_cfg.tuner = TUNER_XC5000;
+ xc5000_cfg.priv = &ctl;
+
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config,
+ &xc5000_cfg);
+ }
+ break;
+ default:
+ printk(KERN_INFO "Unknown tuner type. Tuner is not configured.\n");
+ break;
+ }
+}
+
+static int tm6000_init_dev(struct tm6000_core *dev)
+{
+ struct v4l2_frequency f;
+ int rc = 0;
+
+ mutex_init(&dev->lock);
+
+ mutex_lock(&dev->lock);
+
+ /* Initializa board-specific data */
+ dev->dev_type = tm6000_boards[dev->model].type;
+ dev->tuner_type = tm6000_boards[dev->model].tuner_type;
+ dev->tuner_addr = tm6000_boards[dev->model].tuner_addr;
+
+ dev->gpio = tm6000_boards[dev->model].gpio;
+
+ dev->demod_addr = tm6000_boards[dev->model].demod_addr;
+
+ dev->caps = tm6000_boards[dev->model].caps;
+
+ /* initialize hardware */
+ rc = tm6000_init(dev);
+ if (rc < 0)
+ goto err;
+
+ rc = v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev);
+ if (rc < 0)
+ goto err;
+
+ /* register i2c bus */
+ rc = tm6000_i2c_register(dev);
+ if (rc < 0)
+ goto err;
+
+ /* Default values for STD and resolutions */
+ dev->width = 720;
+ dev->height = 480;
+ dev->norm = V4L2_STD_PAL_M;
+
+ /* Configure tuner */
+ tm6000_config_tuner(dev);
+
+ /* Set video standard */
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
+
+ /* Set tuner frequency - also loads firmware on xc2028/xc3028 */
+ f.tuner = 0;
+ f.type = V4L2_TUNER_ANALOG_TV;
+ f.frequency = 3092; /* 193.25 MHz */
+ dev->freq = f.frequency;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+
+ if (dev->caps.has_tda9874)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
+ "tvaudio", "tvaudio", I2C_ADDR_TDA9874, NULL);
+
+ /* register and initialize V4L2 */
+ rc = tm6000_v4l2_register(dev);
+ if (rc < 0)
+ goto err;
+
+ if (dev->caps.has_dvb) {
+ dev->dvb = kzalloc(sizeof(*(dev->dvb)), GFP_KERNEL);
+ if (!dev->dvb) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+#ifdef CONFIG_VIDEO_TM6000_DVB
+ rc = tm6000_dvb_register(dev);
+ if (rc < 0) {
+ kfree(dev->dvb);
+ dev->dvb = NULL;
+ goto err2;
+ }
+#endif
+ }
+ mutex_unlock(&dev->lock);
+ return 0;
+
+err2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+err:
+ mutex_unlock(&dev->lock);
+ return rc;
+}
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
+static void get_max_endpoint(struct usb_device *udev,
+ struct usb_host_interface *alt,
+ char *msgtype,
+ struct usb_host_endpoint *curr_e,
+ struct tm6000_endpoint *tm_ep)
+{
+ u16 tmp = le16_to_cpu(curr_e->desc.wMaxPacketSize);
+ unsigned int size = tmp & 0x7ff;
+
+ if (udev->speed == USB_SPEED_HIGH)
+ size = size * hb_mult (tmp);
+
+ if (size > tm_ep->maxsize) {
+ tm_ep->endp = curr_e;
+ tm_ep->maxsize = size;
+ tm_ep->bInterfaceNumber = alt->desc.bInterfaceNumber;
+ tm_ep->bAlternateSetting = alt->desc.bAlternateSetting;
+
+ printk(KERN_INFO "tm6000: %s endpoint: 0x%02x (max size=%u bytes)\n",
+ msgtype, curr_e->desc.bEndpointAddress,
+ size);
+ }
+}
+
+/*
+ * tm6000_usb_probe()
+ * checks for supported devices
+ */
+static int tm6000_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev;
+ struct tm6000_core *dev = NULL;
+ int i, rc = 0;
+ int nr = 0;
+ char *speed;
+
+ usbdev = usb_get_dev(interface_to_usbdev(interface));
+
+ /* Selects the proper interface */
+ rc = usb_set_interface(usbdev, 0, 1);
+ if (rc < 0)
+ goto err;
+
+ /* Check to see next free device and mark as used */
+ nr = find_first_zero_bit(&tm6000_devused, TM6000_MAXBOARDS);
+ if (nr >= TM6000_MAXBOARDS) {
+ printk(KERN_ERR "tm6000: Supports only %i tm60xx boards.\n", TM6000_MAXBOARDS);
+ usb_put_dev(usbdev);
+ return -ENOMEM;
+ }
+
+ /* Create and initialize dev struct */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ printk(KERN_ERR "tm6000" ": out of memory!\n");
+ usb_put_dev(usbdev);
+ return -ENOMEM;
+ }
+ spin_lock_init(&dev->slock);
+
+ /* Increment usage count */
+ tm6000_devused |= 1<<nr;
+ snprintf(dev->name, 29, "tm6000 #%d", nr);
+
+ dev->model = id->driver_info;
+ if ((card[nr] >= 0) && (card[nr] < ARRAY_SIZE(tm6000_boards)))
+ dev->model = card[nr];
+
+ dev->udev = usbdev;
+ dev->devno = nr;
+
+ switch (usbdev->speed) {
+ case USB_SPEED_LOW:
+ speed = "1.5";
+ break;
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_FULL:
+ speed = "12";
+ break;
+ case USB_SPEED_HIGH:
+ speed = "480";
+ break;
+ default:
+ speed = "unknown";
+ }
+
+
+
+ /* Get endpoints */
+ for (i = 0; i < interface->num_altsetting; i++) {
+ int ep;
+
+ for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) {
+ struct usb_host_endpoint *e;
+ int dir_out;
+
+ e = &interface->altsetting[i].endpoint[ep];
+
+ dir_out = ((e->desc.bEndpointAddress &
+ USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
+
+ printk(KERN_INFO "tm6000: alt %d, interface %i, class %i\n",
+ i,
+ interface->altsetting[i].desc.bInterfaceNumber,
+ interface->altsetting[i].desc.bInterfaceClass);
+
+ switch (e->desc.bmAttributes) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (!dir_out) {
+ get_max_endpoint(usbdev,
+ &interface->altsetting[i],
+ "Bulk IN", e,
+ &dev->bulk_in);
+ } else {
+ get_max_endpoint(usbdev,
+ &interface->altsetting[i],
+ "Bulk OUT", e,
+ &dev->bulk_out);
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!dir_out) {
+ get_max_endpoint(usbdev,
+ &interface->altsetting[i],
+ "ISOC IN", e,
+ &dev->isoc_in);
+ } else {
+ get_max_endpoint(usbdev,
+ &interface->altsetting[i],
+ "ISOC OUT", e,
+ &dev->isoc_out);
+ }
+ break;
+ }
+ }
+ }
+
+
+ printk(KERN_INFO "tm6000: New video device @ %s Mbps (%04x:%04x, ifnum %d)\n",
+ speed,
+ le16_to_cpu(dev->udev->descriptor.idVendor),
+ le16_to_cpu(dev->udev->descriptor.idProduct),
+ interface->altsetting->desc.bInterfaceNumber);
+
+/* check if the the device has the iso in endpoint at the correct place */
+ if (!dev->isoc_in.endp) {
+ printk(KERN_ERR "tm6000: probing error: no IN ISOC endpoint!\n");
+ rc = -ENODEV;
+
+ goto err;
+ }
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, dev);
+
+ printk(KERN_INFO "tm6000: Found %s\n", tm6000_boards[dev->model].name);
+
+ rc = tm6000_init_dev(dev);
+
+ if (rc < 0)
+ goto err;
+
+ return 0;
+
+err:
+ printk(KERN_ERR "tm6000: Error %d while registering\n", rc);
+
+ tm6000_devused &= ~(1<<nr);
+ usb_put_dev(usbdev);
+
+ kfree(dev);
+ return rc;
+}
+
+/*
+ * tm6000_usb_disconnect()
+ * called when the device gets diconencted
+ * video device will be unregistered on v4l2_close in case it is still open
+ */
+static void tm6000_usb_disconnect(struct usb_interface *interface)
+{
+ struct tm6000_core *dev = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+
+ if (!dev)
+ return;
+
+ printk(KERN_INFO "tm6000: disconnecting %s\n", dev->name);
+
+ mutex_lock(&dev->lock);
+
+#ifdef CONFIG_VIDEO_TM6000_DVB
+ if (dev->dvb) {
+ tm6000_dvb_unregister(dev);
+ kfree(dev->dvb);
+ }
+#endif
+
+ tm6000_v4l2_unregister(dev);
+
+ tm6000_i2c_unregister(dev);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ dev->state |= DEV_DISCONNECTED;
+
+ usb_put_dev(dev->udev);
+
+ mutex_unlock(&dev->lock);
+ kfree(dev);
+}
+
+static struct usb_driver tm6000_usb_driver = {
+ .name = "tm6000",
+ .probe = tm6000_usb_probe,
+ .disconnect = tm6000_usb_disconnect,
+ .id_table = tm6000_id_table,
+};
+
+static int __init tm6000_module_init(void)
+{
+ int result;
+
+ printk(KERN_INFO "tm6000" " v4l2 driver version %d.%d.%d loaded\n",
+ (TM6000_VERSION >> 16) & 0xff,
+ (TM6000_VERSION >> 8) & 0xff, TM6000_VERSION & 0xff);
+
+ /* register this driver with the USB subsystem */
+ result = usb_register(&tm6000_usb_driver);
+ if (result)
+ printk(KERN_ERR "tm6000"
+ " usb_register failed. Error number %d.\n", result);
+
+ return result;
+}
+
+static void __exit tm6000_module_exit(void)
+{
+ /* deregister at USB subsystem */
+ usb_deregister(&tm6000_usb_driver);
+}
+
+module_init(tm6000_module_init);
+module_exit(tm6000_module_exit);
+
+MODULE_DESCRIPTION("Trident TVMaster TM5600/TM6000/TM6010 USB2 adapter");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/tm6000/tm6000-core.c b/drivers/staging/tm6000/tm6000-core.c
new file mode 100644
index 000000000000..a597fbcc6f3f
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-core.c
@@ -0,0 +1,603 @@
+/*
+ tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - DVB-T support
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+
+#define USB_TIMEOUT 5*HZ /* ms */
+
+int tm6000_read_write_usb (struct tm6000_core *dev, u8 req_type, u8 req,
+ u16 value, u16 index, u8 *buf, u16 len)
+{
+ int ret, i;
+ unsigned int pipe;
+ static int ini=0, last=0, n=0;
+ u8 *data=NULL;
+
+ if (len)
+ data = kzalloc(len, GFP_KERNEL);
+
+
+ if (req_type & USB_DIR_IN)
+ pipe=usb_rcvctrlpipe(dev->udev, 0);
+ else {
+ pipe=usb_sndctrlpipe(dev->udev, 0);
+ memcpy(data, buf, len);
+ }
+
+ if (tm6000_debug & V4L2_DEBUG_I2C) {
+ if (!ini)
+ last=ini=jiffies;
+
+ printk("%06i (dev %p, pipe %08x): ", n, dev->udev, pipe);
+
+ printk( "%s: %06u ms %06u ms %02x %02x %02x %02x %02x %02x %02x %02x ",
+ (req_type & USB_DIR_IN)?" IN":"OUT",
+ jiffies_to_msecs(jiffies-last),
+ jiffies_to_msecs(jiffies-ini),
+ req_type, req,value&0xff,value>>8, index&0xff, index>>8,
+ len&0xff, len>>8);
+ last=jiffies;
+ n++;
+
+ if ( !(req_type & USB_DIR_IN) ) {
+ printk(">>> ");
+ for (i=0;i<len;i++) {
+ printk(" %02x",buf[i]);
+ }
+ printk("\n");
+ }
+ }
+
+ ret = usb_control_msg(dev->udev, pipe, req, req_type, value, index, data,
+ len, USB_TIMEOUT);
+
+ if (req_type & USB_DIR_IN)
+ memcpy(buf, data, len);
+
+ if (tm6000_debug & V4L2_DEBUG_I2C) {
+ if (ret<0) {
+ if (req_type & USB_DIR_IN)
+ printk("<<< (len=%d)\n",len);
+
+ printk("%s: Error #%d\n", __FUNCTION__, ret);
+ } else if (req_type & USB_DIR_IN) {
+ printk("<<< ");
+ for (i=0;i<len;i++) {
+ printk(" %02x",buf[i]);
+ }
+ printk("\n");
+ }
+ }
+
+ kfree(data);
+
+ msleep(5);
+
+ return ret;
+}
+
+int tm6000_set_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ return
+ tm6000_read_write_usb (dev, USB_DIR_OUT | USB_TYPE_VENDOR,
+ req, value, index, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(tm6000_set_reg);
+
+int tm6000_get_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ int rc;
+ u8 buf[1];
+
+ rc=tm6000_read_write_usb (dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
+ value, index, buf, 1);
+
+ if (rc<0)
+ return rc;
+
+ return *buf;
+}
+EXPORT_SYMBOL_GPL(tm6000_get_reg);
+
+int tm6000_get_reg16 (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ int rc;
+ u8 buf[2];
+
+ rc=tm6000_read_write_usb (dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
+ value, index, buf, 2);
+
+ if (rc<0)
+ return rc;
+
+ return buf[1]|buf[0]<<8;
+}
+
+int tm6000_get_reg32 (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ int rc;
+ u8 buf[4];
+
+ rc=tm6000_read_write_usb (dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
+ value, index, buf, 4);
+
+ if (rc<0)
+ return rc;
+
+ return buf[3] | buf[2] << 8 | buf[1] << 16 | buf[0] << 24;
+}
+
+void tm6000_set_fourcc_format(struct tm6000_core *dev)
+{
+ if (dev->dev_type == TM6010) {
+ int val;
+
+ val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0) & 0xfc;
+ if (dev->fourcc == V4L2_PIX_FMT_UYVY)
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+ else
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val | 1);
+ } else {
+ if (dev->fourcc == V4L2_PIX_FMT_UYVY)
+ tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0xd0);
+ else
+ tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0x90);
+ }
+}
+
+int tm6000_init_analog_mode (struct tm6000_core *dev)
+{
+ if (dev->dev_type == TM6010) {
+ int val;
+
+ /* Enable video */
+ val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0);
+ val |= 0x60;
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+ val = tm6000_get_reg(dev,
+ TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0);
+ val &= ~0x40;
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, val);
+
+ /* Init teletext */
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ07_R41_TELETEXT_VBI_CODE1, 0x27);
+ tm6000_set_reg(dev, TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55);
+ tm6000_set_reg(dev, TM6010_REQ07_R43_VBI_DATA_TYPE_LINE7, 0x66);
+ tm6000_set_reg(dev, TM6010_REQ07_R44_VBI_DATA_TYPE_LINE8, 0x66);
+ tm6000_set_reg(dev, TM6010_REQ07_R45_VBI_DATA_TYPE_LINE9, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R46_VBI_DATA_TYPE_LINE10, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R47_VBI_DATA_TYPE_LINE11, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R48_VBI_DATA_TYPE_LINE12, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R49_VBI_DATA_TYPE_LINE13, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4A_VBI_DATA_TYPE_LINE14, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4B_VBI_DATA_TYPE_LINE15, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4C_VBI_DATA_TYPE_LINE16, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4D_VBI_DATA_TYPE_LINE17, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4E_VBI_DATA_TYPE_LINE18, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4F_VBI_DATA_TYPE_LINE19, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R50_VBI_DATA_TYPE_LINE20, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R52_VBI_DATA_TYPE_LINE22, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R53_VBI_DATA_TYPE_LINE23, 0x00);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R54_VBI_DATA_TYPE_RLINES, 0x00);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R56_VBI_LOOP_FILTER_I_GAIN, 0x00);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02);
+ tm6000_set_reg(dev, TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35);
+ tm6000_set_reg(dev, TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ07_R5A_VBI_TELETEXT_DTO1, 0x11);
+ tm6000_set_reg(dev, TM6010_REQ07_R5B_VBI_TELETEXT_DTO0, 0x4c);
+ tm6000_set_reg(dev, TM6010_REQ07_R40_TELETEXT_VBI_CODE0, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x00);
+
+
+ /* Init audio */
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, 0x04);
+ tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, 0x05);
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x06);
+ tm6000_set_reg(dev, TM6010_REQ08_R07_A_LEFT_VOL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, 0x91);
+ tm6000_set_reg(dev, TM6010_REQ08_R0B_A_ASD_THRES1, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x12);
+ tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R0E_A_MONO_THRES1, 0xf0);
+ tm6000_set_reg(dev, TM6010_REQ08_R0F_A_MONO_THRES2, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R10_A_MUTE_THRES1, 0xc0);
+ tm6000_set_reg(dev, TM6010_REQ08_R11_A_MUTE_THRES2, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R12_A_AGC_U, 0x12);
+ tm6000_set_reg(dev, TM6010_REQ08_R13_A_AGC_ERR_T, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R14_A_AGC_GAIN_INIT, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R15_A_AGC_STEP_THR, 0x14);
+ tm6000_set_reg(dev, TM6010_REQ08_R16_A_AGC_GAIN_MAX, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R17_A_AGC_GAIN_MIN, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ08_R18_A_TR_CTRL, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ08_R19_A_FH_2FH_GAIN, 0x32);
+ tm6000_set_reg(dev, TM6010_REQ08_R1A_A_NICAM_SER_MAX, 0x64);
+ tm6000_set_reg(dev, TM6010_REQ08_R1B_A_NICAM_SER_MIN, 0x20);
+ tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1c, 0x00);
+ tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1d, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
+ tm6000_set_reg(dev, TM6010_REQ08_R1F_A_TEST_INTF_SEL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R20_A_TEST_PIN_SEL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3);
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
+
+ } else {
+ /* Enables soft reset */
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01);
+
+ if (dev->scaler) {
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x20);
+ } else {
+ /* Enable Hfilter and disable TS Drop err */
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x80);
+ }
+
+ tm6000_set_reg(dev, TM6010_REQ07_RC3_HSTART1, 0x88);
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0x23);
+ tm6000_set_reg(dev, TM6010_REQ07_RD1_ADDR_FOR_REQ1, 0xc0);
+ tm6000_set_reg(dev, TM6010_REQ07_RD2_ADDR_FOR_REQ2, 0xd8);
+ tm6000_set_reg(dev, TM6010_REQ07_RD6_ENDP_REQ1_REQ2, 0x06);
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_PULSE_CNT0, 0x1f);
+
+ /* AP Software reset */
+ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x00);
+
+ tm6000_set_fourcc_format(dev);
+
+ /* Disables soft reset */
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x00);
+
+ /* E3: Select input 0 - TV tuner */
+ tm6000_set_reg(dev, TM6010_REQ07_RE3_OUT_SEL1, 0x00);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xeb, 0x60);
+
+ /* This controls input */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_2, 0x0);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_3, 0x01);
+ }
+ msleep(20);
+
+ /* Tuner firmware can now be loaded */
+
+ /*FIXME: Hack!!! */
+ struct v4l2_frequency f;
+ mutex_lock(&dev->lock);
+ f.frequency=dev->freq;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+ mutex_unlock(&dev->lock);
+
+ msleep(100);
+ tm6000_set_standard (dev, &dev->norm);
+ tm6000_set_audio_bitrate (dev,48000);
+
+ return 0;
+}
+
+int tm6000_init_digital_mode (struct tm6000_core *dev)
+{
+ if (dev->dev_type == TM6010) {
+ int val;
+ u8 buf[2];
+
+ /* digital init */
+ val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0);
+ val &= ~0x60;
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+ val = tm6000_get_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0);
+ val |= 0x40;
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, val);
+ tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, 0x28);
+ tm6000_set_reg(dev, TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xfc);
+ tm6000_set_reg(dev, TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0xff);
+ tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe);
+ tm6000_read_write_usb (dev, 0xc0, 0x0e, 0x00c2, 0x0008, buf, 2);
+ printk (KERN_INFO "buf %#x %#x \n", buf[0], buf[1]);
+
+
+ } else {
+ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_PULSE_CNT0, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ07_RE2_OUT_SEL2, 0x0c);
+ tm6000_set_reg(dev, TM6010_REQ07_RE8_TYPESEL_MOS_I2S, 0xff);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00eb, 0xd8);
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x40);
+ tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0xd0);
+ tm6000_set_reg(dev, TM6010_REQ07_RC3_HSTART1, 0x09);
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0x37);
+ tm6000_set_reg(dev, TM6010_REQ07_RD1_ADDR_FOR_REQ1, 0xd8);
+ tm6000_set_reg(dev, TM6010_REQ07_RD2_ADDR_FOR_REQ2, 0xc0);
+ tm6000_set_reg(dev, TM6010_REQ07_RD6_ENDP_REQ1_REQ2, 0x60);
+
+ tm6000_set_reg(dev, TM6010_REQ07_RE2_OUT_SEL2, 0x0c);
+ tm6000_set_reg(dev, TM6010_REQ07_RE8_TYPESEL_MOS_I2S, 0xff);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00eb, 0x08);
+ msleep(50);
+
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x00);
+ msleep(50);
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x01);
+ msleep(50);
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x00);
+ msleep(100);
+ }
+ return 0;
+}
+
+struct reg_init {
+ u8 req;
+ u8 reg;
+ u8 val;
+};
+
+/* The meaning of those initializations are unknown */
+struct reg_init tm6000_init_tab[] = {
+ /* REG VALUE */
+ { TM6010_REQ07_RD8_IR_PULSE_CNT0, 0x1f },
+ { TM6010_REQ07_RFF_SOFT_RESET, 0x08 },
+ { TM6010_REQ07_RFF_SOFT_RESET, 0x00 },
+ { TM6010_REQ07_RD5_POWERSAVE, 0x4f },
+ { TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0x23 },
+ { TM6010_REQ07_RD8_IR_WAKEUP_ADD, 0x08 },
+ { TM6010_REQ07_RE2_OUT_SEL2, 0x00 },
+ { TM6010_REQ07_RE3_OUT_SEL1, 0x10 },
+ { TM6010_REQ07_RE5_REMOTE_WAKEUP, 0x00 },
+ { TM6010_REQ07_RE8_TYPESEL_MOS_I2S, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0xeb, 0x64 }, /* 48000 bits/sample, external input */
+ { REQ_07_SET_GET_AVREG, 0xee, 0xc2 },
+ { TM6010_REQ07_R3F_RESET, 0x01 }, /* Start of soft reset */
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x07 },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 },
+ { TM6010_REQ07_R05_NOISE_THRESHOLD, 0x64 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01 },
+ { TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0x82 },
+ { TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0x36 },
+ { TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0x50 },
+ { TM6010_REQ07_R0C_CHROMA_AGC_CONTROL, 0x6a },
+ { TM6010_REQ07_R11_AGC_PEAK_CONTROL, 0xc9 },
+ { TM6010_REQ07_R12_AGC_GATE_STARTH, 0x07 },
+ { TM6010_REQ07_R13_AGC_GATE_STARTL, 0x3b },
+ { TM6010_REQ07_R14_AGC_GATE_WIDTH, 0x47 },
+ { TM6010_REQ07_R15_AGC_BP_DELAY, 0x6f },
+ { TM6010_REQ07_R17_HLOOP_MAXSTATE, 0xcd },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R20_HSYNC_RISING_EDGE_TIME, 0x3c },
+ { TM6010_REQ07_R21_HSYNC_PHASE_OFFSET, 0x3c },
+ { TM6010_REQ07_R2D_CHROMA_BURST_END, 0x48 },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 },
+ { TM6010_REQ07_R32_VSYNC_HLOCK_MIN, 0x74 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c },
+ { TM6010_REQ07_R34_VSYNC_AGC_MIN, 0x74 },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R36_VSYNC_VBI_MIN, 0x7a },
+ { TM6010_REQ07_R37_VSYNC_VBI_MAX, 0x26 },
+ { TM6010_REQ07_R38_VSYNC_THRESHOLD, 0x40 },
+ { TM6010_REQ07_R39_VSYNC_TIME_CONSTANT, 0x0a },
+ { TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55 },
+ { TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x11 },
+ { TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01 },
+ { TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02 },
+ { TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35 },
+ { TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0 },
+ { TM6010_REQ07_R80_COMB_FILTER_TRESHOLD, 0x15 },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 },
+ { TM6010_REQ07_RC1_TRESHOLD, 0xd0 },
+ { TM6010_REQ07_RC3_HSTART1, 0x88 },
+ { TM6010_REQ07_R3F_RESET, 0x00 }, /* End of the soft reset */
+ { TM6010_REQ05_R18_IMASK7, 0x00 },
+};
+
+struct reg_init tm6010_init_tab[] = {
+ { TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x00 },
+ { TM6010_REQ07_RC4_HSTART0, 0xa0 },
+ { TM6010_REQ07_RC6_HEND0, 0x40 },
+ { TM6010_REQ07_RCA_VEND0, 0x31 },
+ { TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0xe1 },
+ { TM6010_REQ07_RE0_DVIDEO_SOURCE, 0x03 },
+ { TM6010_REQ07_RFE_POWER_DOWN, 0x7f },
+
+ { TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0 },
+ { TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4 },
+ { TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8 },
+ { TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00 },
+ { TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2 },
+ { TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0 },
+ { TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2 },
+ { TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60 },
+ { TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc },
+
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x07 },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 },
+ { TM6010_REQ07_R05_NOISE_THRESHOLD, 0x64 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01 },
+ { TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0x82 },
+ { TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0x36 },
+ { TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0x50 },
+ { TM6010_REQ07_R0C_CHROMA_AGC_CONTROL, 0x6a },
+ { TM6010_REQ07_R11_AGC_PEAK_CONTROL, 0xc9 },
+ { TM6010_REQ07_R12_AGC_GATE_STARTH, 0x07 },
+ { TM6010_REQ07_R13_AGC_GATE_STARTL, 0x3b },
+ { TM6010_REQ07_R14_AGC_GATE_WIDTH, 0x47 },
+ { TM6010_REQ07_R15_AGC_BP_DELAY, 0x6f },
+ { TM6010_REQ07_R17_HLOOP_MAXSTATE, 0xcd },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R20_HSYNC_RISING_EDGE_TIME, 0x3c },
+ { TM6010_REQ07_R21_HSYNC_PHASE_OFFSET, 0x3c },
+ { TM6010_REQ07_R2D_CHROMA_BURST_END, 0x48 },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 },
+ { TM6010_REQ07_R32_VSYNC_HLOCK_MIN, 0x74 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c },
+ { TM6010_REQ07_R34_VSYNC_AGC_MIN, 0x74 },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R36_VSYNC_VBI_MIN, 0x7a },
+ { TM6010_REQ07_R37_VSYNC_VBI_MAX, 0x26 },
+ { TM6010_REQ07_R38_VSYNC_THRESHOLD, 0x40 },
+ { TM6010_REQ07_R39_VSYNC_TIME_CONSTANT, 0x0a },
+ { TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55 },
+ { TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x11 },
+ { TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01 },
+ { TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02 },
+ { TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35 },
+ { TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0 },
+ { TM6010_REQ07_R80_COMB_FILTER_TRESHOLD, 0x15 },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 },
+ { TM6010_REQ07_RC1_TRESHOLD, 0xd0 },
+ { TM6010_REQ07_RC3_HSTART1, 0x88 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+
+ { TM6010_REQ05_R18_IMASK7, 0x00 },
+
+ { TM6010_REQ07_RD8_IR_LEADER1, 0xaa },
+ { TM6010_REQ07_RD8_IR_LEADER0, 0x30 },
+ { TM6010_REQ07_RD8_IR_PULSE_CNT1, 0x20 },
+ { TM6010_REQ07_RD8_IR_PULSE_CNT0, 0xd0 },
+ { REQ_04_EN_DISABLE_MCU_INT, 0x02, 0x00 },
+ { TM6010_REQ07_RD8_IR, 0x2f },
+
+ /* set remote wakeup key:any key wakeup */
+ { TM6010_REQ07_RE5_REMOTE_WAKEUP, 0xfe },
+ { TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0xff },
+};
+
+int tm6000_init (struct tm6000_core *dev)
+{
+ int board, rc=0, i, size;
+ struct reg_init *tab;
+
+ if (dev->dev_type == TM6010) {
+ tab = tm6010_init_tab;
+ size = ARRAY_SIZE(tm6010_init_tab);
+ } else {
+ tab = tm6000_init_tab;
+ size = ARRAY_SIZE(tm6000_init_tab);
+ }
+
+ /* Load board's initialization table */
+ for (i=0; i< size; i++) {
+ rc= tm6000_set_reg (dev, tab[i].req, tab[i].reg, tab[i].val);
+ if (rc<0) {
+ printk (KERN_ERR "Error %i while setting req %d, "
+ "reg %d to value %d\n", rc,
+ tab[i].req,tab[i].reg, tab[i].val);
+ return rc;
+ }
+ }
+
+ msleep(5); /* Just to be conservative */
+
+ /* Check board version - maybe 10Moons specific */
+ board=tm6000_get_reg32 (dev, REQ_40_GET_VERSION, 0, 0);
+ if (board >=0) {
+ printk (KERN_INFO "Board version = 0x%08x\n",board);
+ } else {
+ printk (KERN_ERR "Error %i while retrieving board version\n",board);
+ }
+
+ rc = tm6000_cards_setup(dev);
+
+ return rc;
+}
+
+int tm6000_set_audio_bitrate(struct tm6000_core *dev, int bitrate)
+{
+ int val;
+
+ val=tm6000_get_reg (dev, REQ_07_SET_GET_AVREG, 0xeb, 0x0);
+printk("Original value=%d\n",val);
+ if (val<0)
+ return val;
+
+ val &= 0x0f; /* Preserve the audio input control bits */
+ switch (bitrate) {
+ case 44100:
+ val|=0xd0;
+ dev->audio_bitrate=bitrate;
+ break;
+ case 48000:
+ val|=0x60;
+ dev->audio_bitrate=bitrate;
+ break;
+ }
+ val=tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0xeb, val);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(tm6000_set_audio_bitrate);
diff --git a/drivers/staging/tm6000/tm6000-dvb.c b/drivers/staging/tm6000/tm6000-dvb.c
new file mode 100644
index 000000000000..f87c8d89ba62
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-dvb.c
@@ -0,0 +1,347 @@
+/*
+ tm6000-dvb.c - dvb-t support for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+#include "zl10353.h"
+
+#include <media/tuner.h>
+
+#include "tuner-xc2028.h"
+
+static void inline print_err_status (struct tm6000_core *dev,
+ int packet, int status)
+{
+ char *errmsg = "Unknown";
+
+ switch(status) {
+ case -ENOENT:
+ errmsg = "unlinked synchronuously";
+ break;
+ case -ECONNRESET:
+ errmsg = "unlinked asynchronuously";
+ break;
+ case -ENOSR:
+ errmsg = "Buffer error (overrun)";
+ break;
+ case -EPIPE:
+ errmsg = "Stalled (device not responding)";
+ break;
+ case -EOVERFLOW:
+ errmsg = "Babble (bad cable?)";
+ break;
+ case -EPROTO:
+ errmsg = "Bit-stuff error (bad cable?)";
+ break;
+ case -EILSEQ:
+ errmsg = "CRC/Timeout (could be anything)";
+ break;
+ case -ETIME:
+ errmsg = "Device does not respond";
+ break;
+ }
+ if (packet<0) {
+ dprintk(dev, 1, "URB status %d [%s].\n",
+ status, errmsg);
+ } else {
+ dprintk(dev, 1, "URB packet %d, status %d [%s].\n",
+ packet, status, errmsg);
+ }
+}
+
+static void tm6000_urb_received(struct urb *urb)
+{
+ int ret;
+ struct tm6000_core* dev = urb->context;
+
+ if(urb->status != 0) {
+ print_err_status (dev,0,urb->status);
+ }
+ else if(urb->actual_length>0){
+ dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
+ urb->actual_length);
+ }
+
+ if(dev->dvb->streams > 0) {
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: error %s\n", __FUNCTION__);
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ }
+}
+
+int tm6000_start_stream(struct tm6000_core *dev)
+{
+ int ret;
+ unsigned int pipe, size;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ printk(KERN_INFO "tm6000: got start stream request %s\n",__FUNCTION__);
+
+ tm6000_init_digital_mode(dev);
+
+ dvb->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if(dvb->bulk_urb == NULL) {
+ printk(KERN_ERR "tm6000: couldn't allocate urb\n");
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
+ & USB_ENDPOINT_NUMBER_MASK);
+
+ size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+ size = size * 15; /* 512 x 8 or 12 or 15 */
+
+ dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
+ if(dvb->bulk_urb->transfer_buffer == NULL) {
+ usb_free_urb(dvb->bulk_urb);
+ printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
+ return -ENOMEM;
+ }
+
+ usb_fill_bulk_urb(dvb->bulk_urb, dev->udev, pipe,
+ dvb->bulk_urb->transfer_buffer,
+ size,
+ tm6000_urb_received, dev);
+
+ ret = usb_clear_halt(dev->udev, pipe);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",ret,__FUNCTION__);
+ return ret;
+ }
+ else {
+ printk(KERN_ERR "tm6000: pipe resetted\n");
+ }
+
+/* mutex_lock(&tm6000_driver.open_close_mutex); */
+ ret = usb_submit_urb(dvb->bulk_urb, GFP_KERNEL);
+
+/* mutex_unlock(&tm6000_driver.open_close_mutex); */
+ if (ret) {
+ printk(KERN_ERR "tm6000: submit of urb failed (error=%i)\n",ret);
+
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
+ return ret;
+ }
+
+ return 0;
+}
+
+void tm6000_stop_stream(struct tm6000_core *dev)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ if(dvb->bulk_urb) {
+ printk (KERN_INFO "urb killing\n");
+ usb_kill_urb(dvb->bulk_urb);
+ printk (KERN_INFO "urb buffer free\n");
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
+ }
+}
+
+int tm6000_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct tm6000_core *dev = demux->priv;
+ struct tm6000_dvb *dvb = dev->dvb;
+ printk(KERN_INFO "tm6000: got start feed request %s\n",__FUNCTION__);
+
+ mutex_lock(&dvb->mutex);
+ if(dvb->streams == 0) {
+ dvb->streams = 1;
+/* mutex_init(&tm6000_dev->streming_mutex); */
+ tm6000_start_stream(dev);
+ }
+ else {
+ ++(dvb->streams);
+ }
+ mutex_unlock(&dvb->mutex);
+
+ return 0;
+}
+
+int tm6000_stop_feed(struct dvb_demux_feed *feed) {
+ struct dvb_demux *demux = feed->demux;
+ struct tm6000_core *dev = demux->priv;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ printk(KERN_INFO "tm6000: got stop feed request %s\n",__FUNCTION__);
+
+ mutex_lock(&dvb->mutex);
+
+ printk (KERN_INFO "stream %#x\n", dvb->streams);
+ --(dvb->streams);
+ if(dvb->streams == 0) {
+ printk (KERN_INFO "stop stream\n");
+ tm6000_stop_stream(dev);
+/* mutex_destroy(&tm6000_dev->streaming_mutex); */
+ }
+ mutex_unlock(&dvb->mutex);
+/* mutex_destroy(&tm6000_dev->streaming_mutex); */
+
+ return 0;
+}
+
+int tm6000_dvb_attach_frontend(struct tm6000_core *dev)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ if(dev->caps.has_zl10353) {
+ struct zl10353_config config =
+ {.demod_address = dev->demod_addr,
+ .no_tuner = 1,
+ .parallel_ts = 1,
+ .if2 = 45700,
+ .disable_i2c_gate_ctrl = 1,
+ };
+
+ dvb->frontend = dvb_attach(zl10353_attach, &config,
+ &dev->i2c_adap);
+ }
+ else {
+ printk(KERN_ERR "tm6000: no frontend defined for the device!\n");
+ return -1;
+ }
+
+ return (!dvb->frontend) ? -1 : 0;
+}
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+int tm6000_dvb_register(struct tm6000_core *dev)
+{
+ int ret = -1;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ mutex_init(&dvb->mutex);
+
+ dvb->streams = 0;
+
+ /* attach the frontend */
+ ret = tm6000_dvb_attach_frontend(dev);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: couldn't attach the frontend!\n");
+ goto err;
+ }
+
+ ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
+ THIS_MODULE, &dev->udev->dev, adapter_nr);
+ dvb->adapter.priv = dev;
+
+ if (dvb->frontend) {
+ struct xc2028_config cfg = {
+ .i2c_adap = &dev->i2c_adap,
+ .i2c_addr = dev->tuner_addr,
+ };
+
+ dvb->frontend->callback = tm6000_tuner_callback;
+ ret = dvb_register_frontend(&dvb->adapter, dvb->frontend);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "tm6000: couldn't register frontend\n");
+ goto adapter_err;
+ }
+
+ if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) {
+ printk(KERN_ERR "tm6000: couldn't register "
+ "frontend (xc3028)\n");
+ ret = -EINVAL;
+ goto frontend_err;
+ }
+ printk(KERN_INFO "tm6000: XC2028/3028 asked to be "
+ "attached to frontend!\n");
+ } else {
+ printk(KERN_ERR "tm6000: no frontend found\n");
+ }
+
+ dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING
+ | DMX_MEMORY_BASED_FILTERING;
+ dvb->demux.priv = dev;
+ dvb->demux.filternum = 8;
+ dvb->demux.feednum = 8;
+ dvb->demux.start_feed = tm6000_start_feed;
+ dvb->demux.stop_feed = tm6000_stop_feed;
+ dvb->demux.write_to_decoder = NULL;
+ ret = dvb_dmx_init(&dvb->demux);
+ if(ret < 0) {
+ printk("tm6000: dvb_dmx_init failed (errno = %d)\n", ret);
+ goto frontend_err;
+ }
+
+ dvb->dmxdev.filternum = dev->dvb->demux.filternum;
+ dvb->dmxdev.demux = &dev->dvb->demux.dmx;
+ dvb->dmxdev.capabilities = 0;
+
+ ret = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
+ if(ret < 0) {
+ printk("tm6000: dvb_dmxdev_init failed (errno = %d)\n", ret);
+ goto dvb_dmx_err;
+ }
+
+ return 0;
+
+dvb_dmx_err:
+ dvb_dmx_release(&dvb->demux);
+frontend_err:
+ if(dvb->frontend) {
+ dvb_frontend_detach(dvb->frontend);
+ dvb_unregister_frontend(dvb->frontend);
+ }
+adapter_err:
+ dvb_unregister_adapter(&dvb->adapter);
+err:
+ return ret;
+}
+
+void tm6000_dvb_unregister(struct tm6000_core *dev)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ if(dvb->bulk_urb != NULL) {
+ struct urb *bulk_urb = dvb->bulk_urb;
+
+ kfree(bulk_urb->transfer_buffer);
+ bulk_urb->transfer_buffer = NULL;
+ usb_unlink_urb(bulk_urb);
+ usb_free_urb(bulk_urb);
+ }
+
+/* mutex_lock(&tm6000_driver.open_close_mutex); */
+ if(dvb->frontend) {
+ dvb_frontend_detach(dvb->frontend);
+ dvb_unregister_frontend(dvb->frontend);
+ }
+
+ dvb_dmxdev_release(&dvb->dmxdev);
+ dvb_dmx_release(&dvb->demux);
+ dvb_unregister_adapter(&dvb->adapter);
+ mutex_destroy(&dvb->mutex);
+/* mutex_unlock(&tm6000_driver.open_close_mutex); */
+
+}
diff --git a/drivers/staging/tm6000/tm6000-i2c.c b/drivers/staging/tm6000/tm6000-i2c.c
new file mode 100644
index 000000000000..94ff489a1bbb
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-i2c.c
@@ -0,0 +1,370 @@
+/*
+ tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - Fix SMBus Read Byte command
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+#include "tuner-xc2028.h"
+
+
+/*FIXME: Hack to avoid needing to patch i2c-id.h */
+#define I2C_HW_B_TM6000 I2C_HW_B_EM28XX
+/* ----------------------------------------------------------- */
+
+static unsigned int i2c_debug = 0;
+module_param(i2c_debug, int, 0644);
+MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
+
+#define i2c_dprintk(lvl,fmt, args...) if (i2c_debug>=lvl) do{ \
+ printk(KERN_DEBUG "%s at %s: " fmt, \
+ dev->name, __FUNCTION__ , ##args); } while (0)
+
+static int tm6000_i2c_send_regs(struct tm6000_core *dev, unsigned char addr,
+ __u8 reg, char *buf, int len)
+{
+ int rc;
+ unsigned int tsleep;
+ unsigned int i2c_packet_limit = 16;
+
+ if (dev->dev_type == TM6010)
+ i2c_packet_limit = 64;
+
+ if (!buf)
+ return -1;
+
+ if (len < 1 || len > i2c_packet_limit) {
+ printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n",
+ len, i2c_packet_limit);
+ return -1;
+ }
+
+ /* capture mutex */
+ rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN,
+ addr | reg << 8, 0, buf, len);
+
+ if (rc < 0) {
+ /* release mutex */
+ return rc;
+ }
+
+ /* Calculate delay time, 14000us for 64 bytes */
+ tsleep = ((len * 200) + 200 + 1000) / 1000;
+ msleep(tsleep);
+
+ /* release mutex */
+ return rc;
+}
+
+/* Generic read - doesn't work fine with 16bit registers */
+static int tm6000_i2c_recv_regs(struct tm6000_core *dev, unsigned char addr,
+ __u8 reg, char *buf, int len)
+{
+ int rc;
+ u8 b[2];
+ unsigned int i2c_packet_limit = 16;
+
+ if (dev->dev_type == TM6010)
+ i2c_packet_limit = 64;
+
+ if (!buf)
+ return -1;
+
+ if (len < 1 || len > i2c_packet_limit) {
+ printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n",
+ len, i2c_packet_limit);
+ return -1;
+ }
+
+ /* capture mutex */
+ if ((dev->caps.has_zl10353) && (dev->demod_addr << 1 == addr) && (reg % 2 == 0)) {
+ /*
+ * Workaround an I2C bug when reading from zl10353
+ */
+ reg -= 1;
+ len += 1;
+
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, b, len);
+
+ *buf = b[1];
+ } else {
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, buf, len);
+ }
+
+ /* release mutex */
+ return rc;
+}
+
+/*
+ * read from a 16bit register
+ * for example xc2028, xc3028 or xc3028L
+ */
+static int tm6000_i2c_recv_regs16(struct tm6000_core *dev, unsigned char addr,
+ __u16 reg, char *buf, int len)
+{
+ int rc;
+ unsigned char ureg;
+
+ if (!buf || len != 2)
+ return -1;
+
+ /* capture mutex */
+ if (dev->dev_type == TM6010) {
+ ureg = reg & 0xFF;
+ rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN,
+ addr | (reg & 0xFF00), 0, &ureg, 1);
+
+ if (rc < 0) {
+ /* release mutex */
+ return rc;
+ }
+
+ msleep(1400 / 1000);
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, REQ_35_AFTEK_TUNER_READ,
+ reg, 0, buf, len);
+ } else {
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, REQ_14_SET_GET_I2C_WR2_RDN,
+ addr, reg, buf, len);
+ }
+
+ /* release mutex */
+ return rc;
+}
+
+static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct tm6000_core *dev = i2c_adap->algo_data;
+ int addr, rc, i, byte;
+
+ if (num <= 0)
+ return 0;
+ for (i = 0; i < num; i++) {
+ addr = (msgs[i].addr << 1) & 0xff;
+ i2c_dprintk(2,"%s %s addr=0x%x len=%d:",
+ (msgs[i].flags & I2C_M_RD) ? "read" : "write",
+ i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len);
+ if (msgs[i].flags & I2C_M_RD) {
+ /* read request without preceding register selection */
+ /*
+ * The TM6000 only supports a read transaction
+ * immediately after a 1 or 2 byte write to select
+ * a register. We cannot fulfil this request.
+ */
+ i2c_dprintk(2, " read without preceding write not"
+ " supported");
+ rc = -EOPNOTSUPP;
+ goto err;
+ } else if (i + 1 < num && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD) &&
+ msgs[i].addr == msgs[i + 1].addr) {
+ /* 1 or 2 byte write followed by a read */
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ i2c_dprintk(2, "; joined to read %s len=%d:",
+ i == num - 2 ? "stop" : "nonstop",
+ msgs[i + 1].len);
+
+ if (msgs[i].len == 2) {
+ rc = tm6000_i2c_recv_regs16(dev, addr,
+ msgs[i].buf[0] << 8 | msgs[i].buf[1],
+ msgs[i + 1].buf, msgs[i + 1].len);
+ } else {
+ rc = tm6000_i2c_recv_regs(dev, addr, msgs[i].buf[0],
+ msgs[i + 1].buf, msgs[i + 1].len);
+ }
+
+ i++;
+
+ if (addr == dev->tuner_addr << 1) {
+ tm6000_set_reg(dev, REQ_50_SET_START, 0, 0);
+ tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0);
+ }
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ } else {
+ /* write bytes */
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ rc = tm6000_i2c_send_regs(dev, addr, msgs[i].buf[0],
+ msgs[i].buf + 1, msgs[i].len - 1);
+
+ if (addr == dev->tuner_addr << 1) {
+ tm6000_set_reg(dev, REQ_50_SET_START, 0, 0);
+ tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0);
+ }
+ }
+ if (i2c_debug >= 2)
+ printk("\n");
+ if (rc < 0)
+ goto err;
+ }
+
+ return num;
+err:
+ i2c_dprintk(2," ERROR: %i\n", rc);
+ return rc;
+}
+
+static int tm6000_i2c_eeprom(struct tm6000_core *dev,
+ unsigned char *eedata, int len)
+{
+ int i, rc;
+ unsigned char *p = eedata;
+ unsigned char bytes[17];
+
+ dev->i2c_client.addr = 0xa0 >> 1;
+
+ bytes[16] = '\0';
+ for (i = 0; i < len; ) {
+ *p = i;
+ rc = tm6000_i2c_recv_regs(dev, 0xa0, i, p, 1);
+ if (rc < 1) {
+ if (p == eedata)
+ goto noeeprom;
+ else {
+ printk(KERN_WARNING
+ "%s: i2c eeprom read error (err=%d)\n",
+ dev->name, rc);
+ }
+ return -1;
+ }
+ p++;
+ if (0 == (i % 16))
+ printk(KERN_INFO "%s: i2c eeprom %02x:", dev->name, i);
+ printk(" %02x", eedata[i]);
+ if ((eedata[i] >= ' ') && (eedata[i] <= 'z')) {
+ bytes[i%16] = eedata[i];
+ } else {
+ bytes[i%16]='.';
+ }
+
+ i++;
+
+ if (0 == (i % 16)) {
+ bytes[16] = '\0';
+ printk(" %s\n", bytes);
+ }
+ }
+ if (0 != (i%16)) {
+ bytes[i%16] = '\0';
+ for (i %= 16; i < 16; i++)
+ printk(" ");
+ }
+ printk(" %s\n", bytes);
+
+ return 0;
+
+noeeprom:
+ printk(KERN_INFO "%s: Huh, no eeprom present (err=%d)?\n",
+ dev->name, rc);
+ return rc;
+}
+
+/* ----------------------------------------------------------- */
+
+/*
+ * functionality()
+ */
+static u32 functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+#define mass_write(addr, reg, data...) \
+ { const static u8 _val[] = data; \
+ rc=tm6000_read_write_usb(dev,USB_DIR_OUT | USB_TYPE_VENDOR, \
+ REQ_16_SET_GET_I2C_WR1_RDN,(reg<<8)+addr, 0x00, (u8 *) _val, \
+ ARRAY_SIZE(_val)); \
+ if (rc<0) { \
+ printk(KERN_ERR "Error on line %d: %d\n",__LINE__,rc); \
+ return rc; \
+ } \
+ msleep (10); \
+ }
+
+static struct i2c_algorithm tm6000_algo = {
+ .master_xfer = tm6000_i2c_xfer,
+ .functionality = functionality,
+};
+
+static struct i2c_adapter tm6000_adap_template = {
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL,
+ .name = "tm6000",
+ .id = I2C_HW_B_TM6000,
+ .algo = &tm6000_algo,
+};
+
+static struct i2c_client tm6000_client_template = {
+ .name = "tm6000 internal",
+};
+
+/* ----------------------------------------------------------- */
+
+/*
+ * tm6000_i2c_register()
+ * register i2c bus
+ */
+int tm6000_i2c_register(struct tm6000_core *dev)
+{
+ unsigned char eedata[256];
+
+ dev->i2c_adap = tm6000_adap_template;
+ dev->i2c_adap.dev.parent = &dev->udev->dev;
+ strcpy(dev->i2c_adap.name, dev->name);
+ dev->i2c_adap.algo_data = dev;
+ i2c_add_adapter(&dev->i2c_adap);
+
+ dev->i2c_client = tm6000_client_template;
+ dev->i2c_client.adapter = &dev->i2c_adap;
+
+ i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev);
+
+ tm6000_i2c_eeprom(dev, eedata, sizeof(eedata));
+
+ return 0;
+}
+
+/*
+ * tm6000_i2c_unregister()
+ * unregister i2c_bus
+ */
+int tm6000_i2c_unregister(struct tm6000_core *dev)
+{
+ i2c_del_adapter(&dev->i2c_adap);
+ return 0;
+}
diff --git a/drivers/staging/tm6000/tm6000-regs.h b/drivers/staging/tm6000/tm6000-regs.h
new file mode 100644
index 000000000000..1c5289c971fa
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-regs.h
@@ -0,0 +1,541 @@
+/*
+ tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Define TV Master TM5600/TM6000/TM6010 Request codes
+ */
+#define REQ_00_SET_IR_VALUE 0
+#define REQ_01_SET_WAKEUP_IRCODE 1
+#define REQ_02_GET_IR_CODE 2
+#define REQ_03_SET_GET_MCU_PIN 3
+#define REQ_04_EN_DISABLE_MCU_INT 4
+#define REQ_05_SET_GET_USBREG 5
+ /* Write: RegNum, Value, 0 */
+ /* Read : RegNum, Value, 1, RegStatus */
+#define REQ_06_SET_GET_USBREG_BIT 6
+#define REQ_07_SET_GET_AVREG 7
+ /* Write: RegNum, Value, 0 */
+ /* Read : RegNum, Value, 1, RegStatus */
+#define REQ_08_SET_GET_AVREG_BIT 8
+#define REQ_09_SET_GET_TUNER_FQ 9
+#define REQ_10_SET_TUNER_SYSTEM 10
+#define REQ_11_SET_EEPROM_ADDR 11
+#define REQ_12_SET_GET_EEPROMBYTE 12
+#define REQ_13_GET_EEPROM_SEQREAD 13
+#define REQ_14_SET_GET_I2C_WR2_RDN 14
+#define REQ_15_SET_GET_I2CBYTE 15
+ /* Write: Subaddr, Slave Addr, value, 0 */
+ /* Read : Subaddr, Slave Addr, value, 1 */
+#define REQ_16_SET_GET_I2C_WR1_RDN 16
+ /* Subaddr, Slave Addr, 0, length */
+#define REQ_17_SET_GET_I2CFP 17
+ /* Write: Slave Addr, register, value */
+ /* Read : Slave Addr, register, 2, data */
+#define REQ_20_DATA_TRANSFER 20
+#define REQ_30_I2C_WRITE 30
+#define REQ_31_I2C_READ 31
+#define REQ_35_AFTEK_TUNER_READ 35
+#define REQ_40_GET_VERSION 40
+#define REQ_50_SET_START 50
+#define REQ_51_SET_STOP 51
+#define REQ_52_TRANSMIT_DATA 52
+#define REQ_53_SPI_INITIAL 53
+#define REQ_54_SPI_SETSTART 54
+#define REQ_55_SPI_INOUTDATA 55
+#define REQ_56_SPI_SETSTOP 56
+
+/*
+ * Define TV Master TM5600/TM6000/TM6010 GPIO lines
+ */
+
+#define TM6000_GPIO_CLK 0x101
+#define TM6000_GPIO_DATA 0x100
+
+#define TM6000_GPIO_1 0x102
+#define TM6000_GPIO_2 0x103
+#define TM6000_GPIO_3 0x104
+#define TM6000_GPIO_4 0x300
+#define TM6000_GPIO_5 0x301
+#define TM6000_GPIO_6 0x304
+#define TM6000_GPIO_7 0x305
+
+/* tm6010 defines GPIO with different values */
+#define TM6010_GPIO_0 0x0102
+#define TM6010_GPIO_1 0x0103
+#define TM6010_GPIO_2 0x0104
+#define TM6010_GPIO_3 0x0105
+#define TM6010_GPIO_4 0x0106
+#define TM6010_GPIO_5 0x0107
+#define TM6010_GPIO_6 0x0300
+#define TM6010_GPIO_7 0x0301
+#define TM6010_GPIO_9 0x0305
+/*
+ * Define TV Master TM5600/TM6000/TM6010 URB message codes and length
+ */
+
+enum {
+ TM6000_URB_MSG_VIDEO=1,
+ TM6000_URB_MSG_AUDIO,
+ TM6000_URB_MSG_VBI,
+ TM6000_URB_MSG_PTS,
+ TM6000_URB_MSG_ERR,
+};
+
+/* Define TM6000/TM6010 Video decoder registers */
+#define TM6010_REQ07_R00_VIDEO_CONTROL0 0x07, 0x00
+#define TM6010_REQ07_R01_VIDEO_CONTROL1 0x07, 0x01
+#define TM6010_REQ07_R02_VIDEO_CONTROL2 0x07, 0x02
+#define TM6010_REQ07_R03_YC_SEP_CONTROL 0x07, 0x03
+#define TM6010_REQ07_R04_LUMA_HAGC_CONTROL 0x07, 0x04
+#define TM6010_REQ07_R05_NOISE_THRESHOLD 0x07, 0x05
+#define TM6010_REQ07_R06_AGC_GATE_THRESHOLD 0x07, 0x06
+#define TM6010_REQ07_R07_OUTPUT_CONTROL 0x07, 0x07
+#define TM6010_REQ07_R08_LUMA_CONTRAST_ADJ 0x07, 0x08
+#define TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ 0x07, 0x09
+#define TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ 0x07, 0x0a
+#define TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ 0x07, 0x0b
+#define TM6010_REQ07_R0C_CHROMA_AGC_CONTROL 0x07, 0x0c
+#define TM6010_REQ07_R0D_CHROMA_KILL_LEVEL 0x07, 0x0d
+#define TM6010_REQ07_R0F_CHROMA_AUTO_POSITION 0x07, 0x0f
+#define TM6010_REQ07_R10_AGC_PEAK_NOMINAL 0x07, 0x10
+#define TM6010_REQ07_R11_AGC_PEAK_CONTROL 0x07, 0x11
+#define TM6010_REQ07_R12_AGC_GATE_STARTH 0x07, 0x12
+#define TM6010_REQ07_R13_AGC_GATE_STARTL 0x07, 0x13
+#define TM6010_REQ07_R14_AGC_GATE_WIDTH 0x07, 0x14
+#define TM6010_REQ07_R15_AGC_BP_DELAY 0x07, 0x15
+#define TM6010_REQ07_R16_LOCK_COUNT 0x07, 0x16
+#define TM6010_REQ07_R17_HLOOP_MAXSTATE 0x07, 0x17
+#define TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3 0x07, 0x18
+#define TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2 0x07, 0x19
+#define TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1 0x07, 0x1a
+#define TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0 0x07, 0x1b
+#define TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3 0x07, 0x1c
+#define TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2 0x07, 0x1d
+#define TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1 0x07, 0x1e
+#define TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0 0x07, 0x1f
+#define TM6010_REQ07_R20_HSYNC_RISING_EDGE_TIME 0x07, 0x20
+#define TM6010_REQ07_R21_HSYNC_PHASE_OFFSET 0x07, 0x21
+#define TM6010_REQ07_R22_HSYNC_PLL_START_TIME 0x07, 0x22
+#define TM6010_REQ07_R23_HSYNC_PLL_END_TIME 0x07, 0x23
+#define TM6010_REQ07_R24_HSYNC_TIP_START_TIME 0x07, 0x24
+#define TM6010_REQ07_R25_HSYNC_TIP_END_TIME 0x07, 0x25
+#define TM6010_REQ07_R26_HSYNC_RISING_EDGE_START 0x07, 0x26
+#define TM6010_REQ07_R27_HSYNC_RISING_EDGE_END 0x07, 0x27
+#define TM6010_REQ07_R28_BACKPORCH_START 0x07, 0x28
+#define TM6010_REQ07_R29_BACKPORCH_END 0x07, 0x29
+#define TM6010_REQ07_R2A_HSYNC_FILTER_START 0x07, 0x2a
+#define TM6010_REQ07_R2B_HSYNC_FILTER_END 0x07, 0x2b
+#define TM6010_REQ07_R2C_CHROMA_BURST_START 0x07, 0x2c
+#define TM6010_REQ07_R2D_CHROMA_BURST_END 0x07, 0x2d
+#define TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART 0x07, 0x2e
+#define TM6010_REQ07_R2F_ACTIVE_VIDEO_HWIDTH 0x07, 0x2f
+#define TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART 0x07, 0x30
+#define TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT 0x07, 0x31
+#define TM6010_REQ07_R32_VSYNC_HLOCK_MIN 0x07, 0x32
+#define TM6010_REQ07_R33_VSYNC_HLOCK_MAX 0x07, 0x33
+#define TM6010_REQ07_R34_VSYNC_AGC_MIN 0x07, 0x34
+#define TM6010_REQ07_R35_VSYNC_AGC_MAX 0x07, 0x35
+#define TM6010_REQ07_R36_VSYNC_VBI_MIN 0x07, 0x36
+#define TM6010_REQ07_R37_VSYNC_VBI_MAX 0x07, 0x37
+#define TM6010_REQ07_R38_VSYNC_THRESHOLD 0x07, 0x38
+#define TM6010_REQ07_R39_VSYNC_TIME_CONSTANT 0x07, 0x39
+#define TM6010_REQ07_R3A_STATUS1 0x07, 0x3a
+#define TM6010_REQ07_R3B_STATUS2 0x07, 0x3b
+#define TM6010_REQ07_R3C_STATUS3 0x07, 0x3c
+#define TM6010_REQ07_R3F_RESET 0x07, 0x3f
+#define TM6010_REQ07_R40_TELETEXT_VBI_CODE0 0x07, 0x40
+#define TM6010_REQ07_R41_TELETEXT_VBI_CODE1 0x07, 0x41
+#define TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL 0x07, 0x42
+#define TM6010_REQ07_R43_VBI_DATA_TYPE_LINE7 0x07, 0x43
+#define TM6010_REQ07_R44_VBI_DATA_TYPE_LINE8 0x07, 0x44
+#define TM6010_REQ07_R45_VBI_DATA_TYPE_LINE9 0x07, 0x45
+#define TM6010_REQ07_R46_VBI_DATA_TYPE_LINE10 0x07, 0x46
+#define TM6010_REQ07_R47_VBI_DATA_TYPE_LINE11 0x07, 0x47
+#define TM6010_REQ07_R48_VBI_DATA_TYPE_LINE12 0x07, 0x48
+#define TM6010_REQ07_R49_VBI_DATA_TYPE_LINE13 0x07, 0x49
+#define TM6010_REQ07_R4A_VBI_DATA_TYPE_LINE14 0x07, 0x4a
+#define TM6010_REQ07_R4B_VBI_DATA_TYPE_LINE15 0x07, 0x4b
+#define TM6010_REQ07_R4C_VBI_DATA_TYPE_LINE16 0x07, 0x4c
+#define TM6010_REQ07_R4D_VBI_DATA_TYPE_LINE17 0x07, 0x4d
+#define TM6010_REQ07_R4E_VBI_DATA_TYPE_LINE18 0x07, 0x4e
+#define TM6010_REQ07_R4F_VBI_DATA_TYPE_LINE19 0x07, 0x4f
+#define TM6010_REQ07_R50_VBI_DATA_TYPE_LINE20 0x07, 0x50
+#define TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21 0x07, 0x51
+#define TM6010_REQ07_R52_VBI_DATA_TYPE_LINE22 0x07, 0x52
+#define TM6010_REQ07_R53_VBI_DATA_TYPE_LINE23 0x07, 0x53
+#define TM6010_REQ07_R54_VBI_DATA_TYPE_RLINES 0x07, 0x54
+#define TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN 0x07, 0x55
+#define TM6010_REQ07_R56_VBI_LOOP_FILTER_I_GAIN 0x07, 0x56
+#define TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN 0x07, 0x57
+#define TM6010_REQ07_R58_VBI_CAPTION_DTO1 0x07, 0x58
+#define TM6010_REQ07_R59_VBI_CAPTION_DTO0 0x07, 0x59
+#define TM6010_REQ07_R5A_VBI_TELETEXT_DTO1 0x07, 0x5a
+#define TM6010_REQ07_R5B_VBI_TELETEXT_DTO0 0x07, 0x5b
+#define TM6010_REQ07_R5C_VBI_WSS625_DTO1 0x07, 0x5c
+#define TM6010_REQ07_R5D_VBI_WSS625_DTO0 0x07, 0x5d
+#define TM6010_REQ07_R5E_VBI_CAPTION_FRAME_START 0x07, 0x5e
+#define TM6010_REQ07_R5F_VBI_WSS625_FRAME_START 0x07, 0x5f
+#define TM6010_REQ07_R60_TELETEXT_FRAME_START 0x07, 0x60
+#define TM6010_REQ07_R61_VBI_CCDATA1 0x07, 0x61
+#define TM6010_REQ07_R62_VBI_CCDATA2 0x07, 0x62
+#define TM6010_REQ07_R63_VBI_WSS625_DATA1 0x07, 0x63
+#define TM6010_REQ07_R64_VBI_WSS625_DATA2 0x07, 0x64
+#define TM6010_REQ07_R65_VBI_DATA_STATUS 0x07, 0x65
+#define TM6010_REQ07_R66_VBI_CAPTION_START 0x07, 0x66
+#define TM6010_REQ07_R67_VBI_WSS625_START 0x07, 0x67
+#define TM6010_REQ07_R68_VBI_TELETEXT_START 0x07, 0x68
+#define TM6010_REQ07_R70_HSYNC_DTO_INC_STATUS3 0x07, 0x70
+#define TM6010_REQ07_R71_HSYNC_DTO_INC_STATUS2 0x07, 0x71
+#define TM6010_REQ07_R72_HSYNC_DTO_INC_STATUS1 0x07, 0x72
+#define TM6010_REQ07_R73_HSYNC_DTO_INC_STATUS0 0x07, 0x73
+#define TM6010_REQ07_R74_CHROMA_DTO_INC_STATUS3 0x07, 0x74
+#define TM6010_REQ07_R75_CHROMA_DTO_INC_STATUS2 0x07, 0x75
+#define TM6010_REQ07_R76_CHROMA_DTO_INC_STATUS1 0x07, 0x76
+#define TM6010_REQ07_R77_CHROMA_DTO_INC_STATUS0 0x07, 0x77
+#define TM6010_REQ07_R78_AGC_AGAIN_STATUS 0x07, 0x78
+#define TM6010_REQ07_R79_AGC_DGAIN_STATUS 0x07, 0x79
+#define TM6010_REQ07_R7A_CHROMA_MAG_STATUS 0x07, 0x7a
+#define TM6010_REQ07_R7B_CHROMA_GAIN_STATUS1 0x07, 0x7b
+#define TM6010_REQ07_R7C_CHROMA_GAIN_STATUS0 0x07, 0x7c
+#define TM6010_REQ07_R7D_CORDIC_FREQ_STATUS 0x07, 0x7d
+#define TM6010_REQ07_R7F_STATUS_NOISE 0x07, 0x7f
+#define TM6010_REQ07_R80_COMB_FILTER_TRESHOLD 0x07, 0x80
+#define TM6010_REQ07_R82_COMB_FILTER_CONFIG 0x07, 0x82
+#define TM6010_REQ07_R83_CHROMA_LOCK_CONFIG 0x07, 0x83
+#define TM6010_REQ07_R84_NOISE_NTSC_C 0x07, 0x84
+#define TM6010_REQ07_R85_NOISE_PAL_C 0x07, 0x85
+#define TM6010_REQ07_R86_NOISE_PHASE_C 0x07, 0x86
+#define TM6010_REQ07_R87_NOISE_PHASE_Y 0x07, 0x87
+#define TM6010_REQ07_R8A_CHROMA_LOOPFILTER_STATE 0x07, 0x8a
+#define TM6010_REQ07_R8B_CHROMA_HRESAMPLER 0x07, 0x8b
+#define TM6010_REQ07_R8D_CPUMP_DELAY_ADJ 0x07, 0x8d
+#define TM6010_REQ07_R8E_CPUMP_ADJ 0x07, 0x8e
+#define TM6010_REQ07_R8F_CPUMP_DELAY 0x07, 0x8f
+
+/* Define TM6000/TM6010 Miscellaneous registers */
+#define TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE 0x07, 0xc0
+#define TM6010_REQ07_RC1_TRESHOLD 0x07, 0xc1
+#define TM6010_REQ07_RC2_HSYNC_WIDTH 0x07, 0xc2
+#define TM6010_REQ07_RC3_HSTART1 0x07, 0xc3
+#define TM6010_REQ07_RC4_HSTART0 0x07, 0xc4
+#define TM6010_REQ07_RC5_HEND1 0x07, 0xc5
+#define TM6010_REQ07_RC6_HEND0 0x07, 0xc6
+#define TM6010_REQ07_RC7_VSTART1 0x07, 0xc7
+#define TM6010_REQ07_RC8_VSTART0 0x07, 0xc8
+#define TM6010_REQ07_RC9_VEND1 0x07, 0xc9
+#define TM6010_REQ07_RCA_VEND0 0x07, 0xca
+#define TM6010_REQ07_RCB_DELAY 0x07, 0xcb
+#define TM6010_REQ07_RCC_ACTIVE_VIDEO_IF 0x07, 0xcc
+#define TM6010_REQ07_RD0_USB_PERIPHERY_CONTROL 0x07, 0xd0
+#define TM6010_REQ07_RD1_ADDR_FOR_REQ1 0x07, 0xd1
+#define TM6010_REQ07_RD2_ADDR_FOR_REQ2 0x07, 0xd2
+#define TM6010_REQ07_RD3_ADDR_FOR_REQ3 0x07, 0xd3
+#define TM6010_REQ07_RD4_ADDR_FOR_REQ4 0x07, 0xd4
+#define TM6010_REQ07_RD5_POWERSAVE 0x07, 0xd5
+#define TM6010_REQ07_RD6_ENDP_REQ1_REQ2 0x07, 0xd6
+#define TM6010_REQ07_RD7_ENDP_REQ3_REQ4 0x07, 0xd7
+#define TM6010_REQ07_RD8_IR 0x07, 0xd8
+#define TM6010_REQ07_RD8_IR_BSIZE 0x07, 0xd9
+#define TM6010_REQ07_RD8_IR_WAKEUP_SEL 0x07, 0xda
+#define TM6010_REQ07_RD8_IR_WAKEUP_ADD 0x07, 0xdb
+#define TM6010_REQ07_RD8_IR_LEADER1 0x07, 0xdc
+#define TM6010_REQ07_RD8_IR_LEADER0 0x07, 0xdd
+#define TM6010_REQ07_RD8_IR_PULSE_CNT1 0x07, 0xde
+#define TM6010_REQ07_RD8_IR_PULSE_CNT0 0x07, 0xdf
+#define TM6010_REQ07_RE0_DVIDEO_SOURCE 0x07, 0xe0
+#define TM6010_REQ07_RE0_DVIDEO_SOURCE_IF 0x07, 0xe1
+#define TM6010_REQ07_RE2_OUT_SEL2 0x07, 0xe2
+#define TM6010_REQ07_RE3_OUT_SEL1 0x07, 0xe3
+#define TM6010_REQ07_RE4_OUT_SEL0 0x07, 0xe4
+#define TM6010_REQ07_RE5_REMOTE_WAKEUP 0x07, 0xe5
+#define TM6010_REQ07_RE7_PUB_GPIO 0x07, 0xe7
+#define TM6010_REQ07_RE8_TYPESEL_MOS_I2S 0x07, 0xe8
+#define TM6010_REQ07_RE9_TYPESEL_MOS_TS 0x07, 0xe9
+#define TM6010_REQ07_REA_TYPESEL_MOS_CCIR 0x07, 0xea
+#define TM6010_REQ07_RF0_BIST_CRC_RESULT0 0x07, 0xf0
+#define TM6010_REQ07_RF1_BIST_CRC_RESULT1 0x07, 0xf1
+#define TM6010_REQ07_RF2_BIST_CRC_RESULT2 0x07, 0xf2
+#define TM6010_REQ07_RF3_BIST_CRC_RESULT3 0x07, 0xf3
+#define TM6010_REQ07_RF4_BIST_ERR_VST2 0x07, 0xf4
+#define TM6010_REQ07_RF5_BIST_ERR_VST1 0x07, 0xf5
+#define TM6010_REQ07_RF6_BIST_ERR_VST0 0x07, 0xf6
+#define TM6010_REQ07_RF7_BIST 0x07, 0xf7
+#define TM6010_REQ07_RFE_POWER_DOWN 0x07, 0xfe
+#define TM6010_REQ07_RFF_SOFT_RESET 0x07, 0xff
+
+/* Define TM6000/TM6010 USB registers */
+#define TM6010_REQ05_R00_MAIN_CTRL 0x05, 0x00
+#define TM6010_REQ05_R01_DEVADDR 0x05, 0x01
+#define TM6010_REQ05_R02_TEST 0x05, 0x02
+#define TM6010_REQ05_R04_SOFN0 0x05, 0x04
+#define TM6010_REQ05_R05_SOFN1 0x05, 0x05
+#define TM6010_REQ05_R06_SOFTM0 0x05, 0x06
+#define TM6010_REQ05_R07_SOFTM1 0x05, 0x07
+#define TM6010_REQ05_R08_PHY_TEST 0x05, 0x08
+#define TM6010_REQ05_R09_VCTL 0x05, 0x09
+#define TM6010_REQ05_R0A_VSTA 0x05, 0x0a
+#define TM6010_REQ05_R0B_CX_CFG 0x05, 0x0b
+#define TM6010_REQ05_R0C_ENDP0_REG0 0x05, 0x0c
+#define TM6010_REQ05_R10_GMASK 0x05, 0x10
+#define TM6010_REQ05_R11_IMASK0 0x05, 0x11
+#define TM6010_REQ05_R12_IMASK1 0x05, 0x12
+#define TM6010_REQ05_R13_IMASK2 0x05, 0x13
+#define TM6010_REQ05_R14_IMASK3 0x05, 0x14
+#define TM6010_REQ05_R15_IMASK4 0x05, 0x15
+#define TM6010_REQ05_R16_IMASK5 0x05, 0x16
+#define TM6010_REQ05_R17_IMASK6 0x05, 0x17
+#define TM6010_REQ05_R18_IMASK7 0x05, 0x18
+#define TM6010_REQ05_R19_ZEROP0 0x05, 0x19
+#define TM6010_REQ05_R1A_ZEROP1 0x05, 0x1a
+#define TM6010_REQ05_R1C_FIFO_EMP0 0x05, 0x1c
+#define TM6010_REQ05_R1D_FIFO_EMP1 0x05, 0x1d
+#define TM6010_REQ05_R20_IRQ_GROUP 0x05, 0x20
+#define TM6010_REQ05_R21_IRQ_SOURCE0 0x05, 0x21
+#define TM6010_REQ05_R22_IRQ_SOURCE1 0x05, 0x22
+#define TM6010_REQ05_R23_IRQ_SOURCE2 0x05, 0x23
+#define TM6010_REQ05_R24_IRQ_SOURCE3 0x05, 0x24
+#define TM6010_REQ05_R25_IRQ_SOURCE4 0x05, 0x25
+#define TM6010_REQ05_R26_IRQ_SOURCE5 0x05, 0x26
+#define TM6010_REQ05_R27_IRQ_SOURCE6 0x05, 0x27
+#define TM6010_REQ05_R28_IRQ_SOURCE7 0x05, 0x28
+#define TM6010_REQ05_R29_SEQ_ERR0 0x05, 0x29
+#define TM6010_REQ05_R2A_SEQ_ERR1 0x05, 0x2a
+#define TM6010_REQ05_R2B_SEQ_ABORT0 0x05, 0x2b
+#define TM6010_REQ05_R2C_SEQ_ABORT1 0x05, 0x2c
+#define TM6010_REQ05_R2D_TX_ZERO0 0x05, 0x2d
+#define TM6010_REQ05_R2E_TX_ZERO1 0x05, 0x2e
+#define TM6010_REQ05_R2F_IDLE_CNT 0x05, 0x2f
+#define TM6010_REQ05_R30_FNO_P1 0x05, 0x30
+#define TM6010_REQ05_R31_FNO_P2 0x05, 0x31
+#define TM6010_REQ05_R32_FNO_P3 0x05, 0x32
+#define TM6010_REQ05_R33_FNO_P4 0x05, 0x33
+#define TM6010_REQ05_R34_FNO_P5 0x05, 0x34
+#define TM6010_REQ05_R35_FNO_P6 0x05, 0x35
+#define TM6010_REQ05_R36_FNO_P7 0x05, 0x36
+#define TM6010_REQ05_R37_FNO_P8 0x05, 0x37
+#define TM6010_REQ05_R38_FNO_P9 0x05, 0x38
+#define TM6010_REQ05_R30_FNO_P10 0x05, 0x39
+#define TM6010_REQ05_R30_FNO_P11 0x05, 0x3a
+#define TM6010_REQ05_R30_FNO_P12 0x05, 0x3b
+#define TM6010_REQ05_R30_FNO_P13 0x05, 0x3c
+#define TM6010_REQ05_R30_FNO_P14 0x05, 0x3d
+#define TM6010_REQ05_R30_FNO_P15 0x05, 0x3e
+#define TM6010_REQ05_R40_IN_MAXPS_LOW1 0x05, 0x40
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH1 0x05, 0x41
+#define TM6010_REQ05_R42_IN_MAXPS_LOW2 0x05, 0x42
+#define TM6010_REQ05_R43_IN_MAXPS_HIGH2 0x05, 0x43
+#define TM6010_REQ05_R44_IN_MAXPS_LOW3 0x05, 0x44
+#define TM6010_REQ05_R45_IN_MAXPS_HIGH3 0x05, 0x45
+#define TM6010_REQ05_R46_IN_MAXPS_LOW4 0x05, 0x46
+#define TM6010_REQ05_R47_IN_MAXPS_HIGH4 0x05, 0x47
+#define TM6010_REQ05_R48_IN_MAXPS_LOW5 0x05, 0x48
+#define TM6010_REQ05_R49_IN_MAXPS_HIGH5 0x05, 0x49
+#define TM6010_REQ05_R4A_IN_MAXPS_LOW6 0x05, 0x4a
+#define TM6010_REQ05_R4B_IN_MAXPS_HIGH6 0x05, 0x4b
+#define TM6010_REQ05_R4C_IN_MAXPS_LOW7 0x05, 0x4c
+#define TM6010_REQ05_R4D_IN_MAXPS_HIGH7 0x05, 0x4d
+#define TM6010_REQ05_R4E_IN_MAXPS_LOW8 0x05, 0x4e
+#define TM6010_REQ05_R4F_IN_MAXPS_HIGH8 0x05, 0x4f
+#define TM6010_REQ05_R50_IN_MAXPS_LOW9 0x05, 0x50
+#define TM6010_REQ05_R51_IN_MAXPS_HIGH9 0x05, 0x51
+#define TM6010_REQ05_R40_IN_MAXPS_LOW10 0x05, 0x52
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH10 0x05, 0x53
+#define TM6010_REQ05_R40_IN_MAXPS_LOW11 0x05, 0x54
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH11 0x05, 0x55
+#define TM6010_REQ05_R40_IN_MAXPS_LOW12 0x05, 0x56
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH12 0x05, 0x57
+#define TM6010_REQ05_R40_IN_MAXPS_LOW13 0x05, 0x58
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH13 0x05, 0x59
+#define TM6010_REQ05_R40_IN_MAXPS_LOW14 0x05, 0x5a
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH14 0x05, 0x5b
+#define TM6010_REQ05_R40_IN_MAXPS_LOW15 0x05, 0x5c
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH15 0x05, 0x5d
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW1 0x05, 0x60
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH1 0x05, 0x61
+#define TM6010_REQ05_R62_OUT_MAXPS_LOW2 0x05, 0x62
+#define TM6010_REQ05_R63_OUT_MAXPS_HIGH2 0x05, 0x63
+#define TM6010_REQ05_R64_OUT_MAXPS_LOW3 0x05, 0x64
+#define TM6010_REQ05_R65_OUT_MAXPS_HIGH3 0x05, 0x65
+#define TM6010_REQ05_R66_OUT_MAXPS_LOW4 0x05, 0x66
+#define TM6010_REQ05_R67_OUT_MAXPS_HIGH4 0x05, 0x67
+#define TM6010_REQ05_R68_OUT_MAXPS_LOW5 0x05, 0x68
+#define TM6010_REQ05_R69_OUT_MAXPS_HIGH5 0x05, 0x69
+#define TM6010_REQ05_R6A_OUT_MAXPS_LOW6 0x05, 0x6a
+#define TM6010_REQ05_R6B_OUT_MAXPS_HIGH6 0x05, 0x6b
+#define TM6010_REQ05_R6C_OUT_MAXPS_LOW7 0x05, 0x6c
+#define TM6010_REQ05_R6D_OUT_MAXPS_HIGH7 0x05, 0x6d
+#define TM6010_REQ05_R6E_OUT_MAXPS_LOW8 0x05, 0x6e
+#define TM6010_REQ05_R6F_OUT_MAXPS_HIGH8 0x05, 0x6f
+#define TM6010_REQ05_R70_OUT_MAXPS_LOW9 0x05, 0x70
+#define TM6010_REQ05_R71_OUT_MAXPS_HIGH9 0x05, 0x71
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW10 0x05, 0x72
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH10 0x05, 0x73
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW11 0x05, 0x74
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH11 0x05, 0x75
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW12 0x05, 0x76
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH12 0x05, 0x77
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW13 0x05, 0x78
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH13 0x05, 0x79
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW14 0x05, 0x7a
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH14 0x05, 0x7b
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW15 0x05, 0x7c
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH15 0x05, 0x7d
+#define TM6010_REQ05_R80_FIFO0 0x05, 0x80
+#define TM6010_REQ05_R81_FIFO1 0x05, 0x81
+#define TM6010_REQ05_R82_FIFO2 0x05, 0x82
+#define TM6010_REQ05_R83_FIFO3 0x05, 0x83
+#define TM6010_REQ05_R84_FIFO4 0x05, 0x84
+#define TM6010_REQ05_R85_FIFO5 0x05, 0x85
+#define TM6010_REQ05_R86_FIFO6 0x05, 0x86
+#define TM6010_REQ05_R87_FIFO7 0x05, 0x87
+#define TM6010_REQ05_R88_FIFO8 0x05, 0x88
+#define TM6010_REQ05_R89_FIFO9 0x05, 0x89
+#define TM6010_REQ05_R81_FIFO10 0x05, 0x8a
+#define TM6010_REQ05_R81_FIFO11 0x05, 0x8b
+#define TM6010_REQ05_R81_FIFO12 0x05, 0x8c
+#define TM6010_REQ05_R81_FIFO13 0x05, 0x8d
+#define TM6010_REQ05_R81_FIFO14 0x05, 0x8e
+#define TM6010_REQ05_R81_FIFO15 0x05, 0x8f
+#define TM6010_REQ05_R90_CFG_FIFO0 0x05, 0x90
+#define TM6010_REQ05_R91_CFG_FIFO1 0x05, 0x91
+#define TM6010_REQ05_R92_CFG_FIFO2 0x05, 0x92
+#define TM6010_REQ05_R93_CFG_FIFO3 0x05, 0x93
+#define TM6010_REQ05_R94_CFG_FIFO4 0x05, 0x94
+#define TM6010_REQ05_R95_CFG_FIFO5 0x05, 0x95
+#define TM6010_REQ05_R96_CFG_FIFO6 0x05, 0x96
+#define TM6010_REQ05_R97_CFG_FIFO7 0x05, 0x97
+#define TM6010_REQ05_R98_CFG_FIFO8 0x05, 0x98
+#define TM6010_REQ05_R99_CFG_FIFO9 0x05, 0x99
+#define TM6010_REQ05_R91_CFG_FIFO10 0x05, 0x9a
+#define TM6010_REQ05_R91_CFG_FIFO11 0x05, 0x9b
+#define TM6010_REQ05_R91_CFG_FIFO12 0x05, 0x9c
+#define TM6010_REQ05_R91_CFG_FIFO13 0x05, 0x9d
+#define TM6010_REQ05_R91_CFG_FIFO14 0x05, 0x9e
+#define TM6010_REQ05_R91_CFG_FIFO15 0x05, 0x9f
+#define TM6010_REQ05_RA0_CTL_FIFO0 0x05, 0xa0
+#define TM6010_REQ05_RA1_CTL_FIFO1 0x05, 0xa1
+#define TM6010_REQ05_RA2_CTL_FIFO2 0x05, 0xa2
+#define TM6010_REQ05_RA3_CTL_FIFO3 0x05, 0xa3
+#define TM6010_REQ05_RA4_CTL_FIFO4 0x05, 0xa4
+#define TM6010_REQ05_RA5_CTL_FIFO5 0x05, 0xa5
+#define TM6010_REQ05_RA6_CTL_FIFO6 0x05, 0xa6
+#define TM6010_REQ05_RA7_CTL_FIFO7 0x05, 0xa7
+#define TM6010_REQ05_RA8_CTL_FIFO8 0x05, 0xa8
+#define TM6010_REQ05_RA9_CTL_FIFO9 0x05, 0xa9
+#define TM6010_REQ05_RA1_CTL_FIFO10 0x05, 0xaa
+#define TM6010_REQ05_RA1_CTL_FIFO11 0x05, 0xab
+#define TM6010_REQ05_RA1_CTL_FIFO12 0x05, 0xac
+#define TM6010_REQ05_RA1_CTL_FIFO13 0x05, 0xad
+#define TM6010_REQ05_RA1_CTL_FIFO14 0x05, 0xae
+#define TM6010_REQ05_RA1_CTL_FIFO15 0x05, 0xaf
+#define TM6010_REQ05_RB0_BC_LOW_FIFO0 0x05, 0xb0
+#define TM6010_REQ05_RB1_BC_LOW_FIFO1 0x05, 0xb1
+#define TM6010_REQ05_RB2_BC_LOW_FIFO2 0x05, 0xb2
+#define TM6010_REQ05_RB3_BC_LOW_FIFO3 0x05, 0xb3
+#define TM6010_REQ05_RB4_BC_LOW_FIFO4 0x05, 0xb4
+#define TM6010_REQ05_RB5_BC_LOW_FIFO5 0x05, 0xb5
+#define TM6010_REQ05_RB6_BC_LOW_FIFO6 0x05, 0xb6
+#define TM6010_REQ05_RB7_BC_LOW_FIFO7 0x05, 0xb7
+#define TM6010_REQ05_RB8_BC_LOW_FIFO8 0x05, 0xb8
+#define TM6010_REQ05_RB9_BC_LOW_FIFO9 0x05, 0xb9
+#define TM6010_REQ05_RB1_BC_LOW_FIFO10 0x05, 0xba
+#define TM6010_REQ05_RB1_BC_LOW_FIFO11 0x05, 0xbb
+#define TM6010_REQ05_RB1_BC_LOW_FIFO12 0x05, 0xbc
+#define TM6010_REQ05_RB1_BC_LOW_FIFO13 0x05, 0xbd
+#define TM6010_REQ05_RB1_BC_LOW_FIFO14 0x05, 0xbe
+#define TM6010_REQ05_RB1_BC_LOW_FIFO15 0x05, 0xbf
+#define TM6010_REQ05_RC0_DATA_FIFO0 0x05, 0xc0
+#define TM6010_REQ05_RC4_DATA_FIFO1 0x05, 0xc4
+#define TM6010_REQ05_RC8_DATA_FIFO2 0x05, 0xc8
+#define TM6010_REQ05_RCC_DATA_FIFO3 0x05, 0xcc
+#define TM6010_REQ05_RD0_DATA_FIFO4 0x05, 0xd0
+#define TM6010_REQ05_RD4_DATA_FIFO5 0x05, 0xd4
+#define TM6010_REQ05_RD8_DATA_FIFO6 0x05, 0xd8
+#define TM6010_REQ05_RDC_DATA_FIFO7 0x05, 0xdc
+#define TM6010_REQ05_RE0_DATA_FIFO8 0x05, 0xe0
+#define TM6010_REQ05_RE4_DATA_FIFO9 0x05, 0xe4
+#define TM6010_REQ05_RC4_DATA_FIFO10 0x05, 0xe8
+#define TM6010_REQ05_RC4_DATA_FIFO11 0x05, 0xec
+#define TM6010_REQ05_RC4_DATA_FIFO12 0x05, 0xf0
+#define TM6010_REQ05_RC4_DATA_FIFO13 0x05, 0xf4
+#define TM6010_REQ05_RC4_DATA_FIFO14 0x05, 0xf8
+#define TM6010_REQ05_RC4_DATA_FIFO15 0x05, 0xfc
+
+/* Define TM6000/TM6010 Audio decoder registers */
+#define TM6010_REQ08_R00_A_VERSION 0x08, 0x00
+#define TM6010_REQ08_R01_A_INIT 0x08, 0x01
+#define TM6010_REQ08_R02_A_FIX_GAIN_CTRL 0x08, 0x02
+#define TM6010_REQ08_R03_A_AUTO_GAIN_CTRL 0x08, 0x03
+#define TM6010_REQ08_R04_A_SIF_AMP_CTRL 0x08, 0x04
+#define TM6010_REQ08_R05_A_STANDARD_MOD 0x08, 0x05
+#define TM6010_REQ08_R06_A_SOUND_MOD 0x08, 0x06
+#define TM6010_REQ08_R07_A_LEFT_VOL 0x08, 0x07
+#define TM6010_REQ08_R08_A_RIGHT_VOL 0x08, 0x08
+#define TM6010_REQ08_R09_A_MAIN_VOL 0x08, 0x09
+#define TM6010_REQ08_R0A_A_I2S_MOD 0x08, 0x0a
+#define TM6010_REQ08_R0B_A_ASD_THRES1 0x08, 0x0b
+#define TM6010_REQ08_R0C_A_ASD_THRES2 0x08, 0x0c
+#define TM6010_REQ08_R0D_A_AMD_THRES 0x08, 0x0d
+#define TM6010_REQ08_R0E_A_MONO_THRES1 0x08, 0x0e
+#define TM6010_REQ08_R0F_A_MONO_THRES2 0x08, 0x0f
+#define TM6010_REQ08_R10_A_MUTE_THRES1 0x08, 0x10
+#define TM6010_REQ08_R11_A_MUTE_THRES2 0x08, 0x11
+#define TM6010_REQ08_R12_A_AGC_U 0x08, 0x12
+#define TM6010_REQ08_R13_A_AGC_ERR_T 0x08, 0x13
+#define TM6010_REQ08_R14_A_AGC_GAIN_INIT 0x08, 0x14
+#define TM6010_REQ08_R15_A_AGC_STEP_THR 0x08, 0x15
+#define TM6010_REQ08_R16_A_AGC_GAIN_MAX 0x08, 0x16
+#define TM6010_REQ08_R17_A_AGC_GAIN_MIN 0x08, 0x17
+#define TM6010_REQ08_R18_A_TR_CTRL 0x08, 0x18
+#define TM6010_REQ08_R19_A_FH_2FH_GAIN 0x08, 0x19
+#define TM6010_REQ08_R1A_A_NICAM_SER_MAX 0x08, 0x1a
+#define TM6010_REQ08_R1B_A_NICAM_SER_MIN 0x08, 0x1b
+#define TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT 0x08, 0x1e
+#define TM6010_REQ08_R1F_A_TEST_INTF_SEL 0x08, 0x1f
+#define TM6010_REQ08_R20_A_TEST_PIN_SEL 0x08, 0x20
+#define TM6010_REQ08_R21_A_AGC_ERR 0x08, 0x21
+#define TM6010_REQ08_R22_A_AGC_GAIN 0x08, 0x22
+#define TM6010_REQ08_R23_A_NICAM_INFO 0x08, 0x23
+#define TM6010_REQ08_R24_A_SER 0x08, 0x24
+#define TM6010_REQ08_R25_A_C1_AMP 0x08, 0x25
+#define TM6010_REQ08_R26_A_C2_AMP 0x08, 0x26
+#define TM6010_REQ08_R27_A_NOISE_AMP 0x08, 0x27
+#define TM6010_REQ08_R28_A_AUDIO_MODE_RES 0x08, 0x28
+
+/* Define TM6000/TM6010 Video ADC registers */
+#define TM6010_REQ08_RE0_ADC_REF 0x08, 0xe0
+#define TM6010_REQ08_RE1_DAC_CLMP 0x08, 0xe1
+#define TM6010_REQ08_RE2_POWER_DOWN_CTRL1 0x08, 0xe2
+#define TM6010_REQ08_RE3_ADC_IN1_SEL 0x08, 0xe3
+#define TM6010_REQ08_RE4_ADC_IN2_SEL 0x08, 0xe4
+#define TM6010_REQ08_RE5_GAIN_PARAM 0x08, 0xe5
+#define TM6010_REQ08_RE6_POWER_DOWN_CTRL2 0x08, 0xe6
+#define TM6010_REQ08_RE7_REG_GAIN_Y 0x08, 0xe7
+#define TM6010_REQ08_RE8_REG_GAIN_C 0x08, 0xe8
+#define TM6010_REQ08_RE9_BIAS_CTRL 0x08, 0xe9
+#define TM6010_REQ08_REA_BUFF_DRV_CTRL 0x08, 0xea
+#define TM6010_REQ08_REB_SIF_GAIN_CTRL 0x08, 0xeb
+#define TM6010_REQ08_REC_REVERSE_YC_CTRL 0x08, 0xec
+#define TM6010_REQ08_RED_GAIN_SEL 0x08, 0xed
+
+/* Define TM6000/TM6010 Audio ADC registers */
+#define TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG 0x08, 0xf0
+#define TM6010_REQ08_RF1_AADC_POWER_DOWN 0x08, 0xf1
+#define TM6010_REQ08_RF2_LEFT_CHANNEL_VOL 0x08, 0xf2
+#define TM6010_REQ08_RF3_RIGHT_CHANNEL_VOL 0x08, 0xf3
diff --git a/drivers/staging/tm6000/tm6000-stds.c b/drivers/staging/tm6000/tm6000-stds.c
new file mode 100644
index 000000000000..b3564f611e5e
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-stds.c
@@ -0,0 +1,873 @@
+/*
+ tm6000-stds.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+struct tm6000_reg_settings {
+ unsigned char req;
+ unsigned char reg;
+ unsigned char value;
+};
+
+struct tm6000_std_tv_settings {
+ v4l2_std_id id;
+ struct tm6000_reg_settings sif[12];
+ struct tm6000_reg_settings nosif[12];
+ struct tm6000_reg_settings common[25];
+};
+
+struct tm6000_std_settings {
+ v4l2_std_id id;
+ struct tm6000_reg_settings common[37];
+};
+
+static struct tm6000_std_tv_settings tv_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x04},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x20},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x36},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0}
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x32},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+static struct tm6000_std_settings composite_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x04},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x20},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x36},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x32},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+static struct tm6000_std_settings svideo_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x05},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x37},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x33},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x00},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x39},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x01},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x00},
+ {TM6010_REQ07_R17_HLOOP_MAXSTATE, 0x8b},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+void tm6000_get_std_res(struct tm6000_core *dev)
+{
+ /* Currently, those are the only supported resoltions */
+ if (dev->norm & V4L2_STD_525_60) {
+ dev->height = 480;
+ } else {
+ dev->height = 576;
+ }
+ dev->width = 720;
+}
+
+static int tm6000_load_std(struct tm6000_core *dev,
+ struct tm6000_reg_settings *set, int max_size)
+{
+ int i, rc;
+
+ /* Load board's initialization table */
+ for (i = 0; max_size; i++) {
+ if (!set[i].req)
+ return 0;
+
+ if ((dev->dev_type != TM6010) &&
+ (set[i].req == REQ_08_SET_GET_AVREG_BIT))
+ continue;
+
+ rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %i while setting "
+ "req %d, reg %d to value %d\n",
+ rc, set[i].req, set[i].reg, set[i].value);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int tm6000_set_tv(struct tm6000_core *dev, int pos)
+{
+ int rc;
+
+ /* FIXME: This code is for tm6010 - not tested yet - doesn't work with
+ tm5600
+ */
+
+ /* FIXME: This is tuner-dependent */
+ int nosif = 0;
+
+ if (nosif) {
+ rc = tm6000_load_std(dev, tv_stds[pos].nosif,
+ sizeof(tv_stds[pos].nosif));
+ } else {
+ rc = tm6000_load_std(dev, tv_stds[pos].sif,
+ sizeof(tv_stds[pos].sif));
+ }
+ if (rc < 0)
+ return rc;
+ rc = tm6000_load_std(dev, tv_stds[pos].common,
+ sizeof(tv_stds[pos].common));
+
+ return rc;
+}
+
+int tm6000_set_standard(struct tm6000_core *dev, v4l2_std_id * norm)
+{
+ int i, rc = 0;
+
+ dev->norm = *norm;
+ tm6000_get_std_res(dev);
+
+ switch (dev->input) {
+ case TM6000_INPUT_TV:
+ for (i = 0; i < ARRAY_SIZE(tv_stds); i++) {
+ if (*norm & tv_stds[i].id) {
+ rc = tm6000_set_tv(dev, i);
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ case TM6000_INPUT_SVIDEO:
+ for (i = 0; i < ARRAY_SIZE(svideo_stds); i++) {
+ if (*norm & svideo_stds[i].id) {
+ rc = tm6000_load_std(dev, svideo_stds[i].common,
+ sizeof(svideo_stds[i].
+ common));
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ case TM6000_INPUT_COMPOSITE:
+ for (i = 0; i < ARRAY_SIZE(composite_stds); i++) {
+ if (*norm & composite_stds[i].id) {
+ rc = tm6000_load_std(dev,
+ composite_stds[i].common,
+ sizeof(composite_stds[i].
+ common));
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ }
+
+ret:
+ if (rc < 0)
+ return rc;
+
+ msleep(40);
+
+
+ return 0;
+}
diff --git a/drivers/staging/tm6000/tm6000-usb-isoc.h b/drivers/staging/tm6000/tm6000-usb-isoc.h
new file mode 100644
index 000000000000..5a5049acd4ec
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-usb-isoc.h
@@ -0,0 +1,53 @@
+/*
+ tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/videodev2.h>
+
+#define TM6000_URB_MSG_LEN 180
+
+struct usb_isoc_ctl {
+ /* max packet size of isoc transaction */
+ int max_pkt_size;
+
+ /* number of allocated urbs */
+ int num_bufs;
+
+ /* urb for isoc transfers */
+ struct urb **urb;
+
+ /* transfer buffers for isoc transfer */
+ char **transfer_buffer;
+
+ /* Last buffer command and region */
+ u8 cmd;
+ int pos, size, pktsize;
+
+ /* Last field: ODD or EVEN? */
+ int field;
+
+ /* Stores incomplete commands */
+ u32 tmp_buf;
+ int tmp_buf_len;
+
+ /* Stores already requested buffers */
+ struct tm6000_buffer *buf;
+
+ /* Stores the number of received fields */
+ int nfields;
+};
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
new file mode 100644
index 000000000000..da1dc07dff12
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -0,0 +1,1557 @@
+/*
+ tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - Fixed module load/unload
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ioctl.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/highmem.h>
+#include <linux/freezer.h>
+
+#include "tm6000-regs.h"
+#include "tm6000.h"
+
+#define BUFFER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+
+/* Limits minimum and default number of buffers */
+#define TM6000_MIN_BUF 4
+#define TM6000_DEF_BUF 8
+
+#define TM6000_MAX_ISO_PACKETS 40 /* Max number of ISO packets */
+
+/* Declare static vars that will be used as parameters */
+static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
+static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
+
+/* Debug level */
+int tm6000_debug;
+
+/* supported controls */
+static struct v4l2_queryctrl tm6000_qctrl[] = {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 54,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 119,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 112,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -128,
+ .maximum = 127,
+ .step = 0x1,
+ .default_value = 0,
+ .flags = 0,
+ }
+};
+
+static int qctl_regs[ARRAY_SIZE(tm6000_qctrl)];
+
+static struct tm6000_fmt format[] = {
+ {
+ .name = "4:2:2, packed, YVY2",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ }, {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ }, {
+ .name = "A/V + VBI mux packet",
+ .fourcc = V4L2_PIX_FMT_TM6000,
+ .depth = 16,
+ }
+};
+
+/* ------------------------------------------------------------------
+ DMA and thread functions
+ ------------------------------------------------------------------*/
+
+#define norm_maxw(a) 720
+#define norm_maxh(a) 576
+
+#define norm_minw(a) norm_maxw(a)
+#define norm_minh(a) norm_maxh(a)
+
+/*
+ * video-buf generic routine to get the next available buffer
+ */
+static inline void get_next_buf(struct tm6000_dmaqueue *dma_q,
+ struct tm6000_buffer **buf)
+{
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ char *outp;
+
+ if (list_empty(&dma_q->active)) {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "No active queue to serve\n");
+ *buf = NULL;
+ return;
+ }
+
+ *buf = list_entry(dma_q->active.next,
+ struct tm6000_buffer, vb.queue);
+
+ if (!buf)
+ return;
+
+ /* Cleans up buffer - Usefull for testing for frame/URB loss */
+ outp = videobuf_to_vmalloc(&(*buf)->vb);
+// if (outp)
+// memset(outp, 0, (*buf)->vb.size);
+
+ return;
+}
+
+/*
+ * Announces that a buffer were filled and request the next
+ */
+static inline void buffer_filled(struct tm6000_core *dev,
+ struct tm6000_dmaqueue *dma_q,
+ struct tm6000_buffer *buf)
+{
+ /* Advice that buffer was filled */
+ dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i);
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ do_gettimeofday(&buf->vb.ts);
+
+ list_del(&buf->vb.queue);
+ wake_up(&buf->vb.done);
+}
+
+const char *tm6000_msg_type[] = {
+ "unknown(0)", /* 0 */
+ "video", /* 1 */
+ "audio", /* 2 */
+ "vbi", /* 3 */
+ "pts", /* 4 */
+ "err", /* 5 */
+ "unknown(6)", /* 6 */
+ "unknown(7)", /* 7 */
+};
+
+/*
+ * Identify the tm5600/6000 buffer header type and properly handles
+ */
+static int copy_packet(struct urb *urb, u32 header, u8 **ptr, u8 *endp,
+ u8 *out_p, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ u8 c;
+ unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0;
+ int rc = 0;
+ /* FIXME: move to tm6000-isoc */
+ static int last_line = -2, start_line = -2, last_field = -2;
+
+ /* FIXME: this is the hardcoded window size
+ */
+ unsigned int linewidth = (*buf)->vb.width << 1;
+
+ if (!dev->isoc_ctl.cmd) {
+ c = (header >> 24) & 0xff;
+
+ /* split the header fields */
+ size = (((header & 0x7e) << 1) -1) *4;
+ block = (header >> 7) & 0xf;
+ field = (header >> 11) & 0x1;
+ line = (header >> 12) & 0x1ff;
+ cmd = (header >> 21) & 0x7;
+
+ /* Validates header fields */
+ if(size > TM6000_URB_MSG_LEN)
+ size = TM6000_URB_MSG_LEN;
+
+ if (cmd == TM6000_URB_MSG_VIDEO) {
+ if ((block+1)*TM6000_URB_MSG_LEN>linewidth)
+ cmd = TM6000_URB_MSG_ERR;
+
+ /* FIXME: Mounts the image as field0+field1
+ * It should, instead, check if the user selected
+ * entrelaced or non-entrelaced mode
+ */
+ pos= ((line<<1)+field)*linewidth +
+ block*TM6000_URB_MSG_LEN;
+
+ /* Don't allow to write out of the buffer */
+ if (pos+TM6000_URB_MSG_LEN > (*buf)->vb.size) {
+ dprintk(dev, V4L2_DEBUG_ISOC,
+ "ERR: size=%d, num=%d, line=%d, "
+ "field=%d\n",
+ size, block, line, field);
+
+ cmd = TM6000_URB_MSG_ERR;
+ }
+ } else {
+ pos=0;
+ }
+
+ /* Prints debug info */
+ dprintk(dev, V4L2_DEBUG_ISOC, "size=%d, num=%d, "
+ " line=%d, field=%d\n",
+ size, block, line, field);
+
+ if ((last_line!=line)&&(last_line+1!=line) &&
+ (cmd != TM6000_URB_MSG_ERR) ) {
+ if (cmd != TM6000_URB_MSG_VIDEO) {
+ dprintk(dev, V4L2_DEBUG_ISOC, "cmd=%d, "
+ "size=%d, num=%d, line=%d, field=%d\n",
+ cmd, size, block, line, field);
+ }
+ if (start_line<0)
+ start_line=last_line;
+ /* Prints debug info */
+ dprintk(dev, V4L2_DEBUG_ISOC, "lines= %d-%d, "
+ "field=%d\n",
+ start_line, last_line, field);
+
+ if ((start_line<6 && last_line>200) &&
+ (last_field != field) ) {
+
+ dev->isoc_ctl.nfields++;
+ if (dev->isoc_ctl.nfields>=2) {
+ dev->isoc_ctl.nfields=0;
+
+ /* Announces that a new buffer were filled */
+ buffer_filled (dev, dma_q, *buf);
+ dprintk(dev, V4L2_DEBUG_ISOC,
+ "new buffer filled\n");
+ get_next_buf (dma_q, buf);
+ if (!*buf)
+ return rc;
+ out_p = videobuf_to_vmalloc(&((*buf)->vb));
+ if (!out_p)
+ return rc;
+
+ pos = dev->isoc_ctl.pos = 0;
+ }
+ }
+
+ start_line=line;
+ last_field=field;
+ }
+ if (cmd == TM6000_URB_MSG_VIDEO)
+ last_line = line;
+
+ pktsize = TM6000_URB_MSG_LEN;
+ } else {
+ /* Continue the last copy */
+ cmd = dev->isoc_ctl.cmd;
+ size= dev->isoc_ctl.size;
+ pos = dev->isoc_ctl.pos;
+ pktsize = dev->isoc_ctl.pktsize;
+ }
+
+ cpysize = (endp-(*ptr) > size) ? size : endp - *ptr;
+
+ if (cpysize) {
+ /* handles each different URB message */
+ switch(cmd) {
+ case TM6000_URB_MSG_VIDEO:
+ /* Fills video buffer */
+ memcpy(&out_p[pos], *ptr, cpysize);
+ break;
+ case TM6000_URB_MSG_PTS:
+ break;
+ case TM6000_URB_MSG_AUDIO:
+/* Need some code to process audio */
+printk ("%ld: cmd=%s, size=%d\n", jiffies,
+ tm6000_msg_type[cmd],size);
+ break;
+ default:
+ dprintk (dev, V4L2_DEBUG_ISOC, "cmd=%s, size=%d\n",
+ tm6000_msg_type[cmd],size);
+ }
+ }
+ if (cpysize<size) {
+ /* End of URB packet, but cmd processing is not
+ * complete. Preserve the state for a next packet
+ */
+ dev->isoc_ctl.pos = pos+cpysize;
+ dev->isoc_ctl.size= size-cpysize;
+ dev->isoc_ctl.cmd = cmd;
+ dev->isoc_ctl.pktsize = pktsize-cpysize;
+ (*ptr)+=cpysize;
+ } else {
+ dev->isoc_ctl.cmd = 0;
+ (*ptr)+=pktsize;
+ }
+
+ return rc;
+}
+
+static int copy_streams(u8 *data, u8 *out_p, unsigned long len,
+ struct urb *urb, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ u8 *ptr=data, *endp=data+len;
+ unsigned long header=0;
+ int rc=0;
+
+ for (ptr=data; ptr<endp;) {
+ if (!dev->isoc_ctl.cmd) {
+ u8 *p=(u8 *)&dev->isoc_ctl.tmp_buf;
+ /* FIXME: This seems very complex
+ * It just recovers up to 3 bytes of the header that
+ * might be at the previous packet
+ */
+ if (dev->isoc_ctl.tmp_buf_len) {
+ while (dev->isoc_ctl.tmp_buf_len) {
+ if ( *(ptr+3-dev->isoc_ctl.tmp_buf_len) == 0x47) {
+ break;
+ }
+ p++;
+ dev->isoc_ctl.tmp_buf_len--;
+ }
+ if (dev->isoc_ctl.tmp_buf_len) {
+ memcpy(&header, p,
+ dev->isoc_ctl.tmp_buf_len);
+ memcpy((u8 *)&header +
+ dev->isoc_ctl.tmp_buf_len,
+ ptr,
+ 4 - dev->isoc_ctl.tmp_buf_len);
+ ptr += 4 - dev->isoc_ctl.tmp_buf_len;
+ goto HEADER;
+ }
+ }
+ /* Seek for sync */
+ for (;ptr<endp-3;ptr++) {
+ if (*(ptr+3)==0x47)
+ break;
+ }
+
+ if (ptr+3>=endp) {
+ dev->isoc_ctl.tmp_buf_len=endp-ptr;
+ memcpy (&dev->isoc_ctl.tmp_buf,ptr,
+ dev->isoc_ctl.tmp_buf_len);
+ dev->isoc_ctl.cmd=0;
+ return rc;
+ }
+
+ /* Get message header */
+ header=*(unsigned long *)ptr;
+ ptr+=4;
+ }
+HEADER:
+ /* Copy or continue last copy */
+ rc=copy_packet(urb,header,&ptr,endp,out_p,buf);
+ if (rc<0) {
+ buf=NULL;
+ printk(KERN_ERR "tm6000: buffer underrun at %ld\n",
+ jiffies);
+ return rc;
+ }
+ if (!*buf)
+ return 0;
+ }
+
+ return 0;
+}
+/*
+ * Identify the tm5600/6000 buffer header type and properly handles
+ */
+static int copy_multiplexed(u8 *ptr, u8 *out_p, unsigned long len,
+ struct urb *urb, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ unsigned int pos=dev->isoc_ctl.pos,cpysize;
+ int rc=1;
+
+ while (len>0) {
+ cpysize=min(len,(*buf)->vb.size-pos);
+//printk("Copying %d bytes (max=%lu) from %p to %p[%u]\n",cpysize,(*buf)->vb.size,ptr,out_p,pos);
+ memcpy(&out_p[pos], ptr, cpysize);
+ pos+=cpysize;
+ ptr+=cpysize;
+ len-=cpysize;
+ if (pos >= (*buf)->vb.size) {
+ pos=0;
+ /* Announces that a new buffer were filled */
+ buffer_filled (dev, dma_q, *buf);
+ dprintk(dev, V4L2_DEBUG_ISOC, "new buffer filled\n");
+ get_next_buf (dma_q, buf);
+ if (!*buf)
+ break;
+ out_p = videobuf_to_vmalloc(&((*buf)->vb));
+ if (!out_p)
+ return rc;
+ pos = 0;
+ }
+ }
+
+ dev->isoc_ctl.pos=pos;
+ return rc;
+}
+
+static void inline print_err_status (struct tm6000_core *dev,
+ int packet, int status)
+{
+ char *errmsg = "Unknown";
+
+ switch(status) {
+ case -ENOENT:
+ errmsg = "unlinked synchronuously";
+ break;
+ case -ECONNRESET:
+ errmsg = "unlinked asynchronuously";
+ break;
+ case -ENOSR:
+ errmsg = "Buffer error (overrun)";
+ break;
+ case -EPIPE:
+ errmsg = "Stalled (device not responding)";
+ break;
+ case -EOVERFLOW:
+ errmsg = "Babble (bad cable?)";
+ break;
+ case -EPROTO:
+ errmsg = "Bit-stuff error (bad cable?)";
+ break;
+ case -EILSEQ:
+ errmsg = "CRC/Timeout (could be anything)";
+ break;
+ case -ETIME:
+ errmsg = "Device does not respond";
+ break;
+ }
+ if (packet<0) {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "URB status %d [%s].\n",
+ status, errmsg);
+ } else {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "URB packet %d, status %d [%s].\n",
+ packet, status, errmsg);
+ }
+}
+
+
+/*
+ * Controls the isoc copy of each urb packet
+ */
+static inline int tm6000_isoc_copy(struct urb *urb)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ struct tm6000_buffer *buf;
+ int i, len=0, rc=1;
+ int size;
+ char *outp = NULL, *p;
+ unsigned long copied;
+
+ get_next_buf(dma_q, &buf);
+ if (buf)
+ outp = videobuf_to_vmalloc(&buf->vb);
+
+ if (!outp)
+ return 0;
+
+ size = buf->vb.size;
+
+ copied=0;
+
+ if (urb->status<0) {
+ print_err_status (dev,-1,urb->status);
+ return 0;
+ }
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ int status = urb->iso_frame_desc[i].status;
+
+ if (status<0) {
+ print_err_status (dev,i,status);
+ continue;
+ }
+
+ len=urb->iso_frame_desc[i].actual_length;
+
+// if (len>=TM6000_URB_MSG_LEN) {
+ p=urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ if (!urb->iso_frame_desc[i].status) {
+ if ((buf->fmt->fourcc)==V4L2_PIX_FMT_TM6000) {
+ rc=copy_multiplexed(p, outp, len, urb, &buf);
+ if (rc<=0)
+ return rc;
+ } else {
+ copy_streams(p, outp, len, urb, &buf);
+ }
+ }
+ copied += len;
+ if (copied >= size || !buf)
+ break;
+// }
+ }
+ return rc;
+}
+
+/* ------------------------------------------------------------------
+ URB control
+ ------------------------------------------------------------------*/
+
+/*
+ * IRQ callback, called by URB callback
+ */
+static void tm6000_irq_callback(struct urb *urb)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ int i;
+
+ if (!dev)
+ return;
+
+ spin_lock(&dev->slock);
+ tm6000_isoc_copy(urb);
+ spin_unlock(&dev->slock);
+
+ /* Reset urb buffers */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ urb->iso_frame_desc[i].status = 0;
+ urb->iso_frame_desc[i].actual_length = 0;
+ }
+
+ urb->status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (urb->status)
+ tm6000_err("urb resubmit failed (error=%i)\n",
+ urb->status);
+}
+
+/*
+ * Stop and Deallocate URBs
+ */
+static void tm6000_uninit_isoc(struct tm6000_core *dev)
+{
+ struct urb *urb;
+ int i;
+
+ dev->isoc_ctl.nfields = -1;
+ dev->isoc_ctl.buf = NULL;
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ urb=dev->isoc_ctl.urb[i];
+ if (urb) {
+ usb_kill_urb(urb);
+ usb_unlink_urb(urb);
+ if (dev->isoc_ctl.transfer_buffer[i]) {
+ usb_free_coherent(dev->udev,
+ urb->transfer_buffer_length,
+ dev->isoc_ctl.transfer_buffer[i],
+ urb->transfer_dma);
+ }
+ usb_free_urb(urb);
+ dev->isoc_ctl.urb[i] = NULL;
+ }
+ dev->isoc_ctl.transfer_buffer[i] = NULL;
+ }
+
+ kfree (dev->isoc_ctl.urb);
+ kfree (dev->isoc_ctl.transfer_buffer);
+
+ dev->isoc_ctl.urb=NULL;
+ dev->isoc_ctl.transfer_buffer=NULL;
+ dev->isoc_ctl.num_bufs = 0;
+
+ dev->isoc_ctl.num_bufs=0;
+}
+
+/*
+ * Allocate URBs and start IRQ
+ */
+static int tm6000_prepare_isoc(struct tm6000_core *dev, unsigned int framesize)
+{
+ struct tm6000_dmaqueue *dma_q = &dev->vidq;
+ int i, j, sb_size, pipe, size, max_packets, num_bufs = 5;
+ struct urb *urb;
+
+ /* De-allocates all pending stuff */
+ tm6000_uninit_isoc(dev);
+
+ usb_set_interface(dev->udev,
+ dev->isoc_in.bInterfaceNumber,
+ dev->isoc_in.bAlternateSetting);
+
+ pipe = usb_rcvisocpipe(dev->udev,
+ dev->isoc_in.endp->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+
+ size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+
+ if (size > dev->isoc_in.maxsize)
+ size = dev->isoc_in.maxsize;
+
+ dev->isoc_ctl.max_pkt_size = size;
+
+ max_packets = ( framesize + size - 1) / size;
+
+ if (max_packets > TM6000_MAX_ISO_PACKETS)
+ max_packets = TM6000_MAX_ISO_PACKETS;
+
+ sb_size = max_packets * size;
+
+ dev->isoc_ctl.num_bufs = num_bufs;
+
+ dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ if (!dev->isoc_ctl.urb) {
+ tm6000_err("cannot alloc memory for usb buffers\n");
+ return -ENOMEM;
+ }
+
+ dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs,
+ GFP_KERNEL);
+ if (!dev->isoc_ctl.transfer_buffer) {
+ tm6000_err("cannot allocate memory for usbtransfer\n");
+ kfree(dev->isoc_ctl.urb);
+ return -ENOMEM;
+ }
+
+ dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets"
+ " (%d bytes) of %d bytes each to handle %u size\n",
+ max_packets, num_bufs, sb_size,
+ dev->isoc_in.maxsize, size);
+
+ /* allocate urbs and transfer buffers */
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ urb = usb_alloc_urb(max_packets, GFP_KERNEL);
+ if (!urb) {
+ tm6000_err("cannot alloc isoc_ctl.urb %i\n", i);
+ tm6000_uninit_isoc(dev);
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+ dev->isoc_ctl.urb[i] = urb;
+
+ dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
+ sb_size, GFP_KERNEL, &urb->transfer_dma);
+ if (!dev->isoc_ctl.transfer_buffer[i]) {
+ tm6000_err ("unable to allocate %i bytes for transfer"
+ " buffer %i%s\n",
+ sb_size, i,
+ in_interrupt()?" while in int":"");
+ tm6000_uninit_isoc(dev);
+ return -ENOMEM;
+ }
+ memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
+
+ usb_fill_bulk_urb(urb, dev->udev, pipe,
+ dev->isoc_ctl.transfer_buffer[i], sb_size,
+ tm6000_irq_callback, dma_q);
+ urb->interval = dev->isoc_in.endp->desc.bInterval;
+ urb->number_of_packets = max_packets;
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+
+ for (j = 0; j < max_packets; j++) {
+ urb->iso_frame_desc[j].offset = size * j;
+ urb->iso_frame_desc[j].length = size;
+ }
+ }
+
+ return 0;
+}
+
+static int tm6000_start_thread( struct tm6000_core *dev)
+{
+ struct tm6000_dmaqueue *dma_q = &dev->vidq;
+ int i;
+
+ dma_q->frame=0;
+ dma_q->ini_jiffies=jiffies;
+
+ init_waitqueue_head(&dma_q->wq);
+
+ /* submit urbs and enables IRQ */
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ int rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_ATOMIC);
+ if (rc) {
+ tm6000_err("submit of urb %i failed (error=%i)\n", i,
+ rc);
+ tm6000_uninit_isoc(dev);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+
+static int
+buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+
+ *size = fh->fmt->depth * fh->width * fh->height >> 3;
+ if (0 == *count)
+ *count = TM6000_DEF_BUF;
+
+ if (*count < TM6000_MIN_BUF) {
+ *count=TM6000_MIN_BUF;
+ }
+
+ while (*size * *count > vid_limit * 1024 * 1024)
+ (*count)--;
+
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct tm6000_buffer *buf)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_core *dev = fh->dev;
+ unsigned long flags;
+
+ if (in_interrupt())
+ BUG();
+
+ /* We used to wait for the buffer to finish here, but this didn't work
+ because, as we were keeping the state as VIDEOBUF_QUEUED,
+ videobuf_queue_cancel marked it as finished for us.
+ (Also, it could wedge forever if the hardware was misconfigured.)
+
+ This should be safe; by the time we get here, the buffer isn't
+ queued anymore. If we ever start marking the buffers as
+ VIDEOBUF_ACTIVE, it won't be, though.
+ */
+ spin_lock_irqsave(&dev->slock, flags);
+ if (dev->isoc_ctl.buf == buf)
+ dev->isoc_ctl.buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ videobuf_vmalloc_free(&buf->vb);
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int
+buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_core *dev = fh->dev;
+ int rc = 0, urb_init = 0;
+
+ BUG_ON(NULL == fh->fmt);
+
+
+ /* FIXME: It assumes depth=2 */
+ /* The only currently supported format is 16 bits/pixel */
+ buf->vb.size = fh->fmt->depth*fh->width*fh->height >> 3;
+ if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ return -EINVAL;
+
+ if (buf->fmt != fh->fmt ||
+ buf->vb.width != fh->width ||
+ buf->vb.height != fh->height ||
+ buf->vb.field != field) {
+ buf->fmt = fh->fmt;
+ buf->vb.width = fh->width;
+ buf->vb.height = fh->height;
+ buf->vb.field = field;
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
+ if (0 != (rc = videobuf_iolock(vq, &buf->vb, NULL)))
+ goto fail;
+ urb_init = 1;
+ }
+
+ if (!dev->isoc_ctl.num_bufs)
+ urb_init = 1;
+
+ if (urb_init) {
+ rc = tm6000_prepare_isoc(dev, buf->vb.size);
+ if (rc < 0)
+ goto fail;
+
+ rc = tm6000_start_thread(dev);
+ if (rc < 0)
+ goto fail;
+
+ }
+
+ buf->vb.state = VIDEOBUF_PREPARED;
+ return 0;
+
+fail:
+ free_buffer(vq, buf);
+ return rc;
+}
+
+static void
+buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_core *dev = fh->dev;
+ struct tm6000_dmaqueue *vidq = &dev->vidq;
+
+ buf->vb.state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->vb.queue, &vidq->active);
+}
+
+static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+
+ free_buffer(vq,buf);
+}
+
+static struct videobuf_queue_ops tm6000_video_qops = {
+ .buf_setup = buffer_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .buf_release = buffer_release,
+};
+
+/* ------------------------------------------------------------------
+ IOCTL handling
+ ------------------------------------------------------------------*/
+
+static int res_get(struct tm6000_core *dev, struct tm6000_fh *fh)
+{
+ /* is it free? */
+ mutex_lock(&dev->lock);
+ if (dev->resources) {
+ /* no, someone else uses it */
+ mutex_unlock(&dev->lock);
+ return 0;
+ }
+ /* it's free, grab it */
+ dev->resources =1;
+ dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: get\n");
+ mutex_unlock(&dev->lock);
+ return 1;
+}
+
+static int res_locked(struct tm6000_core *dev)
+{
+ return (dev->resources);
+}
+
+static void res_free(struct tm6000_core *dev, struct tm6000_fh *fh)
+{
+ mutex_lock(&dev->lock);
+ dev->resources = 0;
+ dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: put\n");
+ mutex_unlock(&dev->lock);
+}
+
+/* ------------------------------------------------------------------
+ IOCTL vidioc handling
+ ------------------------------------------------------------------*/
+static int vidioc_querycap (struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ // struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
+
+ strlcpy(cap->driver, "tm6000", sizeof(cap->driver));
+ strlcpy(cap->card,"Trident TVMaster TM5600/6000/6010", sizeof(cap->card));
+ // strlcpy(cap->bus_info, dev->udev->dev.bus_id, sizeof(cap->bus_info));
+ cap->version = TM6000_VERSION;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_TUNER |
+ V4L2_CAP_READWRITE;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (unlikely(f->index >= ARRAY_SIZE(format)))
+ return -EINVAL;
+
+ strlcpy(f->description,format[f->index].name,sizeof(f->description));
+ f->pixelformat = format[f->index].fourcc;
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_fh *fh=priv;
+
+ f->fmt.pix.width = fh->width;
+ f->fmt.pix.height = fh->height;
+ f->fmt.pix.field = fh->vb_vidq.field;
+ f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return (0);
+}
+
+static struct tm6000_fmt* format_by_fourcc(unsigned int fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(format); i++)
+ if (format[i].fourcc == fourcc)
+ return format+i;
+ return NULL;
+}
+
+static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
+ struct tm6000_fmt *fmt;
+ enum v4l2_field field;
+
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (NULL == fmt) {
+ dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Fourcc format (0x%08x)"
+ " invalid.\n", f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ field = f->fmt.pix.field;
+
+ if (field == V4L2_FIELD_ANY) {
+// field=V4L2_FIELD_INTERLACED;
+ field=V4L2_FIELD_SEQ_TB;
+ } else if (V4L2_FIELD_INTERLACED != field) {
+ dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Field type invalid.\n");
+ return -EINVAL;
+ }
+
+ tm6000_get_std_res (dev);
+
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
+
+ f->fmt.pix.width &= ~0x01;
+
+ f->fmt.pix.field = field;
+
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+/*FIXME: This seems to be generic enough to be at videodev2 */
+static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int ret = vidioc_try_fmt_vid_cap(file,fh,f);
+ if (ret < 0)
+ return (ret);
+
+ fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ fh->width = f->fmt.pix.width;
+ fh->height = f->fmt.pix.height;
+ fh->vb_vidq.field = f->fmt.pix.field;
+ fh->type = f->type;
+
+ dev->fourcc = f->fmt.pix.pixelformat;
+
+ tm6000_set_fourcc_format(dev);
+
+ return (0);
+}
+
+static int vidioc_reqbufs (struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_reqbufs(&fh->vb_vidq, p));
+}
+
+static int vidioc_querybuf (struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_querybuf(&fh->vb_vidq, p));
+}
+
+static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_qbuf(&fh->vb_vidq, p));
+}
+
+static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_dqbuf(&fh->vb_vidq, p,
+ file->f_flags & O_NONBLOCK));
+}
+
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf)
+{
+ struct tm6000_fh *fh=priv;
+
+ return videobuf_cgmbuf (&fh->vb_vidq, mbuf, 8);
+}
+#endif
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (i != fh->type)
+ return -EINVAL;
+
+ if (!res_get(dev,fh))
+ return -EBUSY;
+ return (videobuf_streamon(&fh->vb_vidq));
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (i != fh->type)
+ return -EINVAL;
+
+ videobuf_streamoff(&fh->vb_vidq);
+ res_free(dev,fh);
+
+ return (0);
+}
+
+static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
+{
+ int rc=0;
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ rc=tm6000_set_standard (dev, norm);
+
+ fh->width = dev->width;
+ fh->height = dev->height;
+
+ if (rc<0)
+ return rc;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
+
+ return 0;
+}
+
+static int vidioc_enum_input (struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ switch (inp->index) {
+ case TM6000_INPUT_TV:
+ inp->type = V4L2_INPUT_TYPE_TUNER;
+ strcpy(inp->name,"Television");
+ break;
+ case TM6000_INPUT_COMPOSITE:
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strcpy(inp->name,"Composite");
+ break;
+ case TM6000_INPUT_SVIDEO:
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strcpy(inp->name,"S-Video");
+ break;
+ default:
+ return -EINVAL;
+ }
+ inp->std = TM6000_STD;
+
+ return 0;
+}
+
+static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ *i=dev->input;
+
+ return 0;
+}
+static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int rc=0;
+ char buf[1];
+
+ switch (i) {
+ case TM6000_INPUT_TV:
+ dev->input=i;
+ *buf=0;
+ break;
+ case TM6000_INPUT_COMPOSITE:
+ case TM6000_INPUT_SVIDEO:
+ dev->input=i;
+ *buf=1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ rc=tm6000_read_write_usb (dev, USB_DIR_OUT | USB_TYPE_VENDOR,
+ REQ_03_SET_GET_MCU_PIN, 0x03, 1, buf, 1);
+
+ if (!rc) {
+ dev->input=i;
+ rc=vidioc_s_std (file, priv, &dev->vfd->current_norm);
+ }
+
+ return (rc);
+}
+
+ /* --- controls ---------------------------------------------- */
+static int vidioc_queryctrl (struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
+ if (qc->id && qc->id == tm6000_qctrl[i].id) {
+ memcpy(qc, &(tm6000_qctrl[i]),
+ sizeof(*qc));
+ return (0);
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_g_ctrl (struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int val;
+
+ /* FIXME: Probably, those won't work! Maybe we need shadow regs */
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ val = tm6000_get_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0);
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ val = tm6000_get_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0);
+ return 0;
+ case V4L2_CID_SATURATION:
+ val = tm6000_get_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0);
+ return 0;
+ case V4L2_CID_HUE:
+ val = tm6000_get_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, 0);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (val<0)
+ return val;
+
+ ctrl->value=val;
+
+ return 0;
+}
+static int vidioc_s_ctrl (struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+ u8 val=ctrl->value;
+
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val);
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val);
+ return 0;
+ case V4L2_CID_SATURATION:
+ tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val);
+ return 0;
+ case V4L2_CID_HUE:
+ tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_g_tuner (struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+ if (0 != t->index)
+ return -EINVAL;
+
+ strcpy(t->name, "Television");
+ t->type = V4L2_TUNER_ANALOG_TV;
+ t->capability = V4L2_TUNER_CAP_NORM;
+ t->rangehigh = 0xffffffffUL;
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
+
+ return 0;
+}
+
+static int vidioc_s_tuner (struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (UNSET == dev->tuner_type)
+ return -EINVAL;
+ if (0 != t->index)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vidioc_g_frequency (struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+
+ f->type = V4L2_TUNER_ANALOG_TV;
+ f->frequency = dev->freq;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f);
+
+ return 0;
+}
+
+static int vidioc_s_frequency (struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(f->type != V4L2_TUNER_ANALOG_TV))
+ return -EINVAL;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+ if (unlikely(f->tuner != 0))
+ return -EINVAL;
+
+// mutex_lock(&dev->lock);
+ dev->freq = f->frequency;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f);
+// mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ File operations for the device
+ ------------------------------------------------------------------*/
+
+static int tm6000_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct tm6000_core *dev = video_drvdata(file);
+ struct tm6000_fh *fh;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ int i,rc;
+
+ printk(KERN_INFO "tm6000: open called (dev=%s)\n",
+ video_device_node_name(vdev));
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: open called (dev=%s)\n",
+ video_device_node_name(vdev));
+
+
+ /* If more than one user, mutex should be added */
+ dev->users++;
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "open dev=%s type=%s users=%d\n",
+ video_device_node_name(vdev), v4l2_type_names[type],
+ dev->users);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh),GFP_KERNEL);
+ if (NULL == fh) {
+ dev->users--;
+ return -ENOMEM;
+ }
+
+ file->private_data = fh;
+ fh->dev = dev;
+
+ fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dev->fourcc = format[0].fourcc;
+
+ fh->fmt = format_by_fourcc(dev->fourcc);
+
+ tm6000_get_std_res (dev);
+
+ fh->width = dev->width;
+ fh->height = dev->height;
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, "
+ "dev->vidq=0x%08lx\n",
+ (unsigned long)fh,(unsigned long)dev,(unsigned long)&dev->vidq);
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
+ "queued=%d\n",list_empty(&dev->vidq.queued));
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
+ "active=%d\n",list_empty(&dev->vidq.active));
+
+ /* initialize hardware on analog mode */
+ if (dev->mode!=TM6000_MODE_ANALOG) {
+ rc=tm6000_init_analog_mode (dev);
+ if (rc<0)
+ return rc;
+
+ /* Put all controls at a sane state */
+ for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
+ qctl_regs[i] =tm6000_qctrl[i].default_value;
+
+ dev->mode=TM6000_MODE_ANALOG;
+ }
+
+ videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops,
+ NULL, &dev->slock,
+ fh->type,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct tm6000_buffer),fh);
+
+ return 0;
+}
+
+static ssize_t
+tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
+{
+ struct tm6000_fh *fh = file->private_data;
+
+ if (fh->type==V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (res_locked(fh->dev))
+ return -EBUSY;
+
+ return videobuf_read_stream(&fh->vb_vidq, data, count, pos, 0,
+ file->f_flags & O_NONBLOCK);
+ }
+ return 0;
+}
+
+static unsigned int
+tm6000_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct tm6000_fh *fh = file->private_data;
+ struct tm6000_buffer *buf;
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
+ return POLLERR;
+
+ if (res_get(fh->dev,fh)) {
+ /* streaming capture */
+ if (list_empty(&fh->vb_vidq.stream))
+ return POLLERR;
+ buf = list_entry(fh->vb_vidq.stream.next,struct tm6000_buffer,vb.stream);
+ } else {
+ /* read() capture */
+ return videobuf_poll_stream(file, &fh->vb_vidq,
+ wait);
+ }
+ poll_wait(file, &buf->vb.done, wait);
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return POLLIN|POLLRDNORM;
+ return 0;
+}
+
+static int tm6000_release(struct file *file)
+{
+ struct tm6000_fh *fh = file->private_data;
+ struct tm6000_core *dev = fh->dev;
+ struct video_device *vdev = video_devdata(file);
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: close called (dev=%s, users=%d)\n",
+ video_device_node_name(vdev), dev->users);
+
+ dev->users--;
+
+ if (!dev->users) {
+ tm6000_uninit_isoc(dev);
+ videobuf_mmap_free(&fh->vb_vidq);
+ }
+
+ kfree (fh);
+
+ return 0;
+}
+
+static int tm6000_mmap(struct file *file, struct vm_area_struct * vma)
+{
+ struct tm6000_fh *fh = file->private_data;
+ int ret;
+
+ ret=videobuf_mmap_mapper(&fh->vb_vidq, vma);
+
+ return ret;
+}
+
+static struct v4l2_file_operations tm6000_fops = {
+ .owner = THIS_MODULE,
+ .open = tm6000_open,
+ .release = tm6000_release,
+ .ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .read = tm6000_read,
+ .poll = tm6000_poll,
+ .mmap = tm6000_mmap,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ .vidiocgmbuf = vidiocgmbuf,
+#endif
+};
+
+static struct video_device tm6000_template = {
+ .name = "tm6000",
+ .fops = &tm6000_fops,
+ .ioctl_ops = &video_ioctl_ops,
+ .release = video_device_release,
+ .tvnorms = TM6000_STD,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+/* -----------------------------------------------------------------
+ Initialization and module stuff
+ ------------------------------------------------------------------*/
+
+int tm6000_v4l2_register(struct tm6000_core *dev)
+{
+ int ret = -1;
+ struct video_device *vfd;
+
+ vfd = video_device_alloc();
+ if(!vfd) {
+ return -ENOMEM;
+ }
+ dev->vfd = vfd;
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&dev->vidq.active);
+ INIT_LIST_HEAD(&dev->vidq.queued);
+
+ memcpy (dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
+ dev->vfd->debug=tm6000_debug;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
+ printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
+ return ret;
+}
+
+int tm6000_v4l2_unregister(struct tm6000_core *dev)
+{
+ video_unregister_device(dev->vfd);
+
+ return 0;
+}
+
+int tm6000_v4l2_exit(void)
+{
+ return 0;
+}
+
+module_param(video_nr, int, 0);
+MODULE_PARM_DESC(video_nr,"Allow changing video device number");
+
+module_param_named (debug, tm6000_debug, int, 0444);
+MODULE_PARM_DESC(debug,"activates debug info");
+
+module_param(vid_limit,int,0644);
+MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
+
diff --git a/drivers/staging/tm6000/tm6000.h b/drivers/staging/tm6000/tm6000.h
new file mode 100644
index 000000000000..6812d6867d57
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000.h
@@ -0,0 +1,301 @@
+/*
+ tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - DVB-T support
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+// Use the tm6000-hack, instead of the proper initialization code
+//#define HACK 1
+
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf-vmalloc.h>
+#include "tm6000-usb-isoc.h"
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <media/v4l2-device.h>
+
+
+#include <linux/dvb/frontend.h>
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dmxdev.h"
+
+#define TM6000_VERSION KERNEL_VERSION(0, 0, 2)
+
+/* Inputs */
+
+enum tm6000_itype {
+ TM6000_INPUT_TV = 0,
+ TM6000_INPUT_COMPOSITE,
+ TM6000_INPUT_SVIDEO,
+};
+
+enum tm6000_devtype {
+ TM6000 = 0,
+ TM5600,
+ TM6010,
+};
+
+/* ------------------------------------------------------------------
+ Basic structures
+ ------------------------------------------------------------------*/
+
+struct tm6000_fmt {
+ char *name;
+ u32 fourcc; /* v4l2 format id */
+ int depth;
+};
+
+/* buffer for one video frame */
+struct tm6000_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+
+ struct tm6000_fmt *fmt;
+};
+
+struct tm6000_dmaqueue {
+ struct list_head active;
+ struct list_head queued;
+
+ /* thread for generating video stream*/
+ struct task_struct *kthread;
+ wait_queue_head_t wq;
+ /* Counters to control fps rate */
+ int frame;
+ int ini_jiffies;
+};
+
+/* device states */
+enum tm6000_core_state {
+ DEV_INITIALIZED = 0x01,
+ DEV_DISCONNECTED = 0x02,
+ DEV_MISCONFIGURED = 0x04,
+};
+
+/* io methods */
+enum tm6000_io_method {
+ IO_NONE,
+ IO_READ,
+ IO_MMAP,
+};
+
+enum tm6000_mode {
+ TM6000_MODE_UNKNOWN=0,
+ TM6000_MODE_ANALOG,
+ TM6000_MODE_DIGITAL,
+};
+
+struct tm6000_gpio {
+ int tuner_reset;
+ int tuner_on;
+ int demod_reset;
+ int demod_on;
+ int power_led;
+ int dvb_led;
+ int ir;
+};
+
+struct tm6000_capabilities {
+ unsigned int has_tuner:1;
+ unsigned int has_tda9874:1;
+ unsigned int has_dvb:1;
+ unsigned int has_zl10353:1;
+ unsigned int has_eeprom:1;
+ unsigned int has_remote:1;
+};
+
+struct tm6000_dvb {
+ struct dvb_adapter adapter;
+ struct dvb_demux demux;
+ struct dvb_frontend *frontend;
+ struct dmxdev dmxdev;
+ unsigned int streams;
+ struct urb *bulk_urb;
+ struct mutex mutex;
+};
+
+struct tm6000_endpoint {
+ struct usb_host_endpoint *endp;
+ __u8 bInterfaceNumber;
+ __u8 bAlternateSetting;
+ unsigned maxsize;
+};
+
+struct tm6000_core {
+ /* generic device properties */
+ char name[30]; /* name (including minor) of the device */
+ int model; /* index in the device_data struct */
+ int devno; /* marks the number of this device */
+ enum tm6000_devtype dev_type; /* type of device */
+
+ v4l2_std_id norm; /* Current norm */
+ int width,height; /* Selected resolution */
+
+ enum tm6000_core_state state;
+
+ /* Device Capabilities*/
+ struct tm6000_capabilities caps;
+
+ /* Tuner configuration */
+ int tuner_type; /* type of the tuner */
+ int tuner_addr; /* tuner address */
+
+ struct tm6000_gpio gpio;
+
+ /* Demodulator configuration */
+ int demod_addr; /* demodulator address */
+
+ int audio_bitrate;
+ /* i2c i/o */
+ struct i2c_adapter i2c_adap;
+ struct i2c_client i2c_client;
+
+ /* video for linux */
+ int users;
+
+ /* various device info */
+ unsigned int resources;
+ struct video_device *vfd;
+ struct tm6000_dmaqueue vidq;
+ struct v4l2_device v4l2_dev;
+
+ int input;
+ int freq;
+ unsigned int fourcc;
+
+ enum tm6000_mode mode;
+
+ /* DVB-T support */
+ struct tm6000_dvb *dvb;
+
+ /* locks */
+ struct mutex lock;
+
+ /* usb transfer */
+ struct usb_device *udev; /* the usb device */
+
+ struct tm6000_endpoint bulk_in, bulk_out, isoc_in, isoc_out;
+
+ /* scaler!=0 if scaler is active*/
+ int scaler;
+
+ /* Isoc control struct */
+ struct usb_isoc_ctl isoc_ctl;
+
+ spinlock_t slock;
+};
+
+struct tm6000_fh {
+ struct tm6000_core *dev;
+
+ /* video capture */
+ struct tm6000_fmt *fmt;
+ unsigned int width,height;
+ struct videobuf_queue vb_vidq;
+
+ enum v4l2_buf_type type;
+};
+
+#define TM6000_STD V4L2_STD_PAL|V4L2_STD_PAL_N|V4L2_STD_PAL_Nc| \
+ V4L2_STD_PAL_M|V4L2_STD_PAL_60|V4L2_STD_NTSC_M| \
+ V4L2_STD_NTSC_M_JP|V4L2_STD_SECAM
+
+/* In tm6000-cards.c */
+
+int tm6000_tuner_callback (void *ptr, int component, int command, int arg);
+int tm6000_xc5000_callback (void *ptr, int component, int command, int arg);
+int tm6000_cards_setup(struct tm6000_core *dev);
+
+/* In tm6000-core.c */
+
+int tm6000_read_write_usb (struct tm6000_core *dev, u8 reqtype, u8 req,
+ u16 value, u16 index, u8 *buf, u16 len);
+int tm6000_get_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_get_reg16(struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_get_reg32(struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_set_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_init (struct tm6000_core *dev);
+
+int tm6000_init_analog_mode (struct tm6000_core *dev);
+int tm6000_init_digital_mode (struct tm6000_core *dev);
+int tm6000_set_audio_bitrate (struct tm6000_core *dev, int bitrate);
+
+int tm6000_dvb_register(struct tm6000_core *dev);
+void tm6000_dvb_unregister(struct tm6000_core *dev);
+
+int tm6000_v4l2_register(struct tm6000_core *dev);
+int tm6000_v4l2_unregister(struct tm6000_core *dev);
+int tm6000_v4l2_exit(void);
+void tm6000_set_fourcc_format(struct tm6000_core *dev);
+
+/* In tm6000-stds.c */
+void tm6000_get_std_res(struct tm6000_core *dev);
+int tm6000_set_standard (struct tm6000_core *dev, v4l2_std_id *norm);
+
+/* In tm6000-i2c.c */
+int tm6000_i2c_register(struct tm6000_core *dev);
+int tm6000_i2c_unregister(struct tm6000_core *dev);
+
+/* In tm6000-queue.c */
+
+int tm6000_v4l2_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int tm6000_vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type i);
+int tm6000_vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type i);
+int tm6000_vidioc_reqbufs (struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb);
+int tm6000_vidioc_querybuf (struct file *file, void *priv,
+ struct v4l2_buffer *b);
+int tm6000_vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *b);
+int tm6000_vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *b);
+ssize_t tm6000_v4l2_read(struct file *filp, char __user * buf, size_t count,
+ loff_t * f_pos);
+unsigned int tm6000_v4l2_poll(struct file *file,
+ struct poll_table_struct *wait);
+int tm6000_queue_init(struct tm6000_core *dev);
+
+/* In tm6000-alsa.c */
+int tm6000_audio_init(struct tm6000_core *dev, int idx);
+
+
+/* Debug stuff */
+
+extern int tm6000_debug;
+
+#define dprintk(dev, level, fmt, arg...) do {\
+ if (tm6000_debug & level) \
+ printk(KERN_INFO "(%lu) %s %s :"fmt, jiffies, \
+ dev->name, __FUNCTION__ , ##arg); } while (0)
+
+#define V4L2_DEBUG_REG 0x0004
+#define V4L2_DEBUG_I2C 0x0008
+#define V4L2_DEBUG_QUEUE 0x0010
+#define V4L2_DEBUG_ISOC 0x0020
+#define V4L2_DEBUG_RES_LOCK 0x0040 /* Resource locking */
+#define V4L2_DEBUG_OPEN 0x0080 /* video open/close debug */
+
+#define tm6000_err(fmt, arg...) do {\
+ printk(KERN_ERR "tm6000 %s :"fmt, \
+ __FUNCTION__ , ##arg); } while (0)
+
+
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index e40a2e990f4f..18f4dfed997f 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3080,7 +3080,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = pDevice->pMgmt;
u32 mc_filter[2];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
@@ -3100,8 +3100,8 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
MACvSelectPage1(pDevice->PortOffset);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index ed3070edcac1..4fcc4351e73f 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -25,19 +25,19 @@
* Date: May 20, 2003
*
* Functions:
- * s_vGenerateTxParameter - Generate tx dma requried parameter.
+ * s_vGenerateTxParameter - Generate tx dma required parameter.
* vGenerateMACHeader - Translate 802.3 to 802.11 header
- * cbGetFragCount - Caculate fragement number count
+ * cbGetFragCount - Caculate fragment number count
* csBeacon_xmit - beacon tx function
* csMgmt_xmit - management tx function
* s_cbFillTxBufHead - fulfill tx dma buffer header
* s_uGetDataDuration - get tx data required duration
* s_uFillDataHead- fulfill tx data duration header
- * s_uGetRTSCTSDuration- get rtx/cts requried duration
+ * s_uGetRTSCTSDuration- get rtx/cts required duration
* s_uGetRTSCTSRsvTime- get rts/cts reserved time
* s_uGetTxRsvTime- get frame reserved time
* s_vFillCTSHead- fulfill CTS ctl header
- * s_vFillFragParameter- Set fragement ctl parameter.
+ * s_vFillFragParameter- Set fragment ctl parameter.
* s_vFillRTSHead- fulfill RTS ctl header
* s_vFillTxKey- fulfill tx encrypt key
* s_vSWencryption- Software encrypt header
@@ -877,7 +877,7 @@ s_vFillRTSHead (
}
// Note: So far RTSHead dosen't appear in ATIM & Beacom DMA, so we don't need to take them into account.
- // Otherwise, we need to modified codes for them.
+ // Otherwise, we need to modify codes for them.
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption == AUTO_FB_NONE) {
PSRTS_g pBuf = (PSRTS_g)pvRTS;
@@ -1133,7 +1133,7 @@ s_vFillCTSHead (
*
* Parameters:
* In:
- * pDevice - Pointer to adpater
+ * pDevice - Pointer to adapter
* pTxDataHead - Transmit Data Buffer
* pTxBufHead - pTxBufHead
* pvRrvTime - pvRrvTime
@@ -2252,7 +2252,7 @@ vGenerateFIFOHeader (
*
* Parameters:
* In:
- * pDevice - Pointer to adpater
+ * pDevice - Pointer to adapter
* dwTxBufferAddr - Transmit Buffer
* pPacket - Packet from upper layer
* cbPacketSize - Transmit Data Length
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index a8e1adbc9592..49270db98fbb 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1596,7 +1596,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
u32 mc_filter[2];
int ii;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE byTmpMode = 0;
int rc;
@@ -1632,8 +1632,8 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
for (ii = 0; ii < 4; ii++) {
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index d9fa36c95230..a2ce6fad8ee5 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -25,17 +25,17 @@
* Date: May 20, 2003
*
* Functions:
- * s_vGenerateTxParameter - Generate tx dma requried parameter.
+ * s_vGenerateTxParameter - Generate tx dma required parameter.
* s_vGenerateMACHeader - Translate 802.3 to 802.11 header
* csBeacon_xmit - beacon tx function
* csMgmt_xmit - management tx function
* s_uGetDataDuration - get tx data required duration
* s_uFillDataHead- fulfill tx data duration header
- * s_uGetRTSCTSDuration- get rtx/cts requried duration
+ * s_uGetRTSCTSDuration- get rtx/cts required duration
* s_uGetRTSCTSRsvTime- get rts/cts reserved time
* s_uGetTxRsvTime- get frame reserved time
* s_vFillCTSHead- fulfill CTS ctl header
- * s_vFillFragParameter- Set fragement ctl parameter.
+ * s_vFillFragParameter- Set fragment ctl parameter.
* s_vFillRTSHead- fulfill RTS ctl header
* s_vFillTxKey- fulfill tx encrypt key
* s_vSWencryption- Software encrypt header
diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c
index 54ca63196fdd..f44ef351647b 100644
--- a/drivers/staging/wavelan/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
@@ -3419,7 +3419,7 @@ static void wv_82586_config(struct net_device * dev)
ac_cfg_t cfg; /* Configure action */
ac_ias_t ias; /* IA-setup action */
ac_mcs_t mcs; /* Multicast setup */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
#ifdef DEBUG_CONFIG_TRACE
printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name);
@@ -3531,16 +3531,16 @@ static void wv_82586_config(struct net_device * dev)
/* Any address to set? */
if (lp->mc_count) {
- netdev_for_each_mc_addr(dmi, dev)
- outsw(PIOP1(ioaddr), (u16 *) dmi->dmi_addr,
+ netdev_for_each_mc_addr(ha, dev)
+ outsw(PIOP1(ioaddr), (u16 *) ha->addr,
WAVELAN_ADDR_SIZE >> 1);
#ifdef DEBUG_CONFIG_INFO
printk(KERN_DEBUG
"%s: wv_82586_config(): set %d multicast addresses:\n",
dev->name, lp->mc_count);
- netdev_for_each_mc_addr(dmi, dev)
- printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ printk(KERN_DEBUG " %pM\n", ha->addr);
#endif
}
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 04f691d127b4..e3bb40be4306 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -3591,20 +3591,20 @@ wv_82593_config(struct net_device * dev)
/* If roaming is enabled, join the "Beacon Request" multicast group... */
/* But only if it's not in there already! */
if(do_roaming)
- dev_mc_add(dev,WAVELAN_BEACON_ADDRESS, WAVELAN_ADDR_SIZE, 1);
+ dev_mc_add(dev, WAVELAN_BEACON_ADDRESS);
#endif /* WAVELAN_ROAMING */
/* If any multicast address to set */
if(lp->mc_count)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count;
#ifdef DEBUG_CONFIG_INFO
printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n",
dev->name, lp->mc_count);
- netdev_for_each_mc_addr(dmi, dev)
- printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ printk(KERN_DEBUG " %pM\n", ha->addr);
#endif
/* Initialize adapter's ethernet multicast addresses */
@@ -3612,8 +3612,8 @@ wv_82593_config(struct net_device * dev)
outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */
outb((addrs_len >> 8), PIOP(base)); /* byte count msb */
- netdev_for_each_mc_addr(dmi, dev)
- outsb(PIOP(base), dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_for_each_mc_addr(ha, dev)
+ outsb(PIOP(base), ha->addr, dev->addr_len);
/* reset transmit DMA pointer */
hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
@@ -3850,12 +3850,8 @@ wv_pcmcia_config(struct pcmcia_device * link)
if (i != 0)
break;
- /*
- * Now allocate an interrupt line. Note that this does not
- * actually assign a handler to the interrupt.
- */
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0)
+ i = pcmcia_request_interrupt(link, wavelan_interrupt);
+ if (!i)
break;
/*
@@ -3890,7 +3886,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
break;
/* Feed device with this info... */
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
netif_start_queue(dev);
@@ -4437,10 +4433,6 @@ wavelan_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.IOAddrLines = 3;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = wavelan_interrupt;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -4487,7 +4479,6 @@ wavelan_probe(struct pcmcia_device *p_dev)
ret = wv_hw_config(dev);
if (ret) {
- dev->irq = 0;
pcmcia_disable_device(p_dev);
return ret;
}
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 3482eec18651..5d9499bba9cc 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -92,10 +92,10 @@ static int wbsoft_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void wbsoft_configure_filter(struct ieee80211_hw *dev,
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index 9da42e66085e..c9d99d88b786 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -156,15 +156,12 @@ static int wl_adapter_attach(struct pcmcia_device *link)
link->io.NumPorts1 = HCF_NUM_IO_PORTS;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
link->io.IOAddrLines = 6;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
- link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
- link->irq.Handler = &wl_isr;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 5;
link->conf.Present = PRESENT_OPTION;
- link->priv = link->irq.Instance = dev;
+ link->priv = dev;
lp = wl_priv(dev);
lp->link = link;
@@ -318,11 +315,11 @@ void wl_adapter_insert( struct pcmcia_device *link )
link->conf.Attributes |= CONF_ENABLE_IRQ;
CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, wl_isr));
CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
SET_NETDEV_DEV(dev, &handle_to_dev(link));
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 1db73ebcae28..ca8c8b134c4e 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1050,7 +1050,7 @@ void wl_multicast( struct net_device *dev )
//;?seems reasonable that even an AP-only driver could afford this small additional footprint
int x;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
@@ -1073,9 +1073,9 @@ void wl_multicast( struct net_device *dev )
DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
- netdev_for_each_mc_addr(mclist, dev)
- DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr),
- mclist->dmi_addrlen );
+ netdev_for_each_mc_addr(ha, dev)
+ DBG_PRINT(" %s (%d)\n", DbgHwAddr(ha->addr),
+ dev->addr_len);
}
#endif /* DBG */
@@ -1120,9 +1120,9 @@ void wl_multicast( struct net_device *dev )
lp->ltvRecord.typ = CFG_GROUP_ADDR;
x = 0;
- netdev_for_each_mc_addr(mclist, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
- mclist->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
DBG_PRINT( "Setting multicast list\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
} else {
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index d442fd35620a..99cb2246ac72 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -22,7 +22,6 @@
typedef struct ixj_info_t {
int ndev;
- dev_node_t node;
struct ixj *port;
} ixj_info_t;
@@ -155,8 +154,6 @@ static int ixj_config(struct pcmcia_device * link)
j = ixj_pcmcia_probe(link->io.BasePort1, link->io.BasePort1 + 0x10);
info->ndev = 1;
- info->node.major = PHONE_MAJOR;
- link->dev_node = &info->node;
ixj_get_serial(link, j);
return 0;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 25f01b536f67..bc62a2019b0d 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -1552,6 +1552,7 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
char file_arr[] = "CMVxy.bin";
char *file;
+ kparam_block_sysfs_write(cmv_file);
/* set proper name corresponding modem version and line type */
if (cmv_file[sc->modem_index] == NULL) {
if (UEA_CHIP_VERSION(sc) == ADI930)
@@ -1570,6 +1571,7 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
strlcat(cmv_name, file, UEA_FW_NAME_MAX);
if (ver == 2)
strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
+ kparam_unblock_sysfs_write(cmv_file);
}
static int request_cmvs_old(struct uea_softc *sc,
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 6e98a3697844..94ecdbc758ce 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -19,6 +19,9 @@
#include <linux/usb/ch9.h>
#include <linux/usb/ehci_def.h>
#include <linux/delay.h>
+#include <linux/serial_core.h>
+#include <linux/kgdb.h>
+#include <linux/kthread.h>
#include <asm/io.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
@@ -55,6 +58,7 @@ static struct ehci_regs __iomem *ehci_regs;
static struct ehci_dbg_port __iomem *ehci_debug;
static int dbgp_not_safe; /* Cannot use debug device during ehci reset */
static unsigned int dbgp_endpoint_out;
+static unsigned int dbgp_endpoint_in;
struct ehci_dev {
u32 bus;
@@ -91,6 +95,13 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
return (x & ~0x0f) | (len & 0x0f);
}
+#ifdef CONFIG_KGDB
+static struct kgdb_io kgdbdbgp_io_ops;
+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
+#else
+#define dbgp_kgdb_mode (0)
+#endif
+
/*
* USB Packet IDs (PIDs)
*/
@@ -182,11 +193,10 @@ static void dbgp_breath(void)
/* Sleep to give the debug port a chance to breathe */
}
-static int dbgp_wait_until_done(unsigned ctrl)
+static int dbgp_wait_until_done(unsigned ctrl, int loop)
{
u32 pids, lpid;
int ret;
- int loop = DBGP_LOOPS;
retry:
writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -276,13 +286,13 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
dbgp_set_data(bytes, size);
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- ret = dbgp_wait_until_done(ctrl);
+ ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
return ret;
}
static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
- int size)
+ int size, int loops)
{
u32 pids, addr, ctrl;
int ret;
@@ -302,7 +312,7 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- ret = dbgp_wait_until_done(ctrl);
+ ret = dbgp_wait_until_done(ctrl, loops);
if (ret < 0)
return ret;
@@ -343,12 +353,12 @@ static int dbgp_control_msg(unsigned devnum, int requesttype,
dbgp_set_data(&req, sizeof(req));
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- ret = dbgp_wait_until_done(ctrl);
+ ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
if (ret < 0)
return ret;
/* Read the result */
- return dbgp_bulk_read(devnum, 0, data, size);
+ return dbgp_bulk_read(devnum, 0, data, size, DBGP_LOOPS);
}
/* Find a PCI capability */
@@ -559,6 +569,7 @@ try_again:
goto err;
}
dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
+ dbgp_endpoint_in = dbgp_desc.bDebugInEndpoint;
/* Move the device to 127 if it isn't already there */
if (devnum != USB_DEBUG_DEVNUM) {
@@ -968,8 +979,9 @@ int dbgp_reset_prep(void)
if (!ehci_debug)
return 0;
- if (early_dbgp_console.index != -1 &&
- !(early_dbgp_console.flags & CON_BOOT))
+ if ((early_dbgp_console.index != -1 &&
+ !(early_dbgp_console.flags & CON_BOOT)) ||
+ dbgp_kgdb_mode)
return 1;
/* This means the console is not initialized, or should get
* shutdown so as to allow for reuse of the usb device, which
@@ -982,3 +994,93 @@ int dbgp_reset_prep(void)
return 0;
}
EXPORT_SYMBOL_GPL(dbgp_reset_prep);
+
+#ifdef CONFIG_KGDB
+
+static char kgdbdbgp_buf[DBGP_MAX_PACKET];
+static int kgdbdbgp_buf_sz;
+static int kgdbdbgp_buf_idx;
+static int kgdbdbgp_loop_cnt = DBGP_LOOPS;
+
+static int kgdbdbgp_read_char(void)
+{
+ int ret;
+
+ if (kgdbdbgp_buf_idx < kgdbdbgp_buf_sz) {
+ char ch = kgdbdbgp_buf[kgdbdbgp_buf_idx++];
+ return ch;
+ }
+
+ ret = dbgp_bulk_read(USB_DEBUG_DEVNUM, dbgp_endpoint_in,
+ &kgdbdbgp_buf, DBGP_MAX_PACKET,
+ kgdbdbgp_loop_cnt);
+ if (ret <= 0)
+ return NO_POLL_CHAR;
+ kgdbdbgp_buf_sz = ret;
+ kgdbdbgp_buf_idx = 1;
+ return kgdbdbgp_buf[0];
+}
+
+static void kgdbdbgp_write_char(u8 chr)
+{
+ early_dbgp_write(NULL, &chr, 1);
+}
+
+static struct kgdb_io kgdbdbgp_io_ops = {
+ .name = "kgdbdbgp",
+ .read_char = kgdbdbgp_read_char,
+ .write_char = kgdbdbgp_write_char,
+};
+
+static int kgdbdbgp_wait_time;
+
+static int __init kgdbdbgp_parse_config(char *str)
+{
+ char *ptr;
+
+ if (!ehci_debug) {
+ if (early_dbgp_init(str))
+ return -1;
+ }
+ ptr = strchr(str, ',');
+ if (ptr) {
+ ptr++;
+ kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
+ }
+ kgdb_register_io_module(&kgdbdbgp_io_ops);
+ kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+
+ return 0;
+}
+early_param("kgdbdbgp", kgdbdbgp_parse_config);
+
+static int kgdbdbgp_reader_thread(void *ptr)
+{
+ int ret;
+
+ while (readl(&ehci_debug->control) & DBGP_ENABLED) {
+ kgdbdbgp_loop_cnt = 1;
+ ret = kgdbdbgp_read_char();
+ kgdbdbgp_loop_cnt = DBGP_LOOPS;
+ if (ret != NO_POLL_CHAR) {
+ if (ret == 0x3 || ret == '$') {
+ if (ret == '$')
+ kgdbdbgp_buf_idx--;
+ kgdb_breakpoint();
+ }
+ continue;
+ }
+ schedule_timeout_interruptible(kgdbdbgp_wait_time * HZ);
+ }
+ return 0;
+}
+
+static int __init kgdbdbgp_start_thread(void)
+{
+ if (dbgp_kgdb_mode && kgdbdbgp_wait_time)
+ kthread_run(kgdbdbgp_reader_thread, NULL, "%s", "dbgp");
+
+ return 0;
+}
+module_init(kgdbdbgp_start_thread);
+#endif /* CONFIG_KGDB */
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index df1bae9b048e..eaa79c8a9b8c 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -366,6 +366,13 @@ rescan:
if (is_done)
done(ep, req, 0);
else if (ep->is_pingpong) {
+ /*
+ * One dummy read to delay the code because of a HW glitch:
+ * CSR returns bad RXCOUNT when read too soon after updating
+ * RX_DATA_BK flags.
+ */
+ csr = __raw_readl(creg);
+
bufferspace -= count;
buf += count;
goto rescan;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ead59f42e69b..544ccfd7056e 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -199,8 +199,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
writel(pdata->portsc, hcd->regs + PORTSC_OFFSET);
mdelay(10);
- /* setup USBCONTROL. */
- ret = mxc_set_usbcontrol(pdev->id, pdata->flags);
+ /* setup specific usb hw */
+ ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
if (ret < 0)
goto err_init;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 40a858335035..0cd6c7795d90 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -181,7 +181,7 @@ struct ehci_hcd_omap {
void __iomem *ehci_base;
/* Regulators for USB PHYs.
- * Each PHY can have a seperate regulator.
+ * Each PHY can have a separate regulator.
*/
struct regulator *regulator[OMAP3_HS_USB_PORTS];
};
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 39d253e841f6..58cb73c8420a 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -47,7 +47,6 @@ static const char driver_name[DEV_NAME_LEN] = "sl811_cs";
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
} local_info_t;
static void sl811_cs_release(struct pcmcia_device * link);
@@ -163,8 +162,7 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
/* we need an interrupt */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -186,7 +184,6 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
static int sl811_cs_config(struct pcmcia_device *link)
{
struct device *parent = &link->dev;
- local_info_t *dev = link->priv;
int ret;
dev_dbg(&link->dev, "sl811_cs_config\n");
@@ -197,31 +194,24 @@ static int sl811_cs_config(struct pcmcia_device *link)
/* require an IRQ and two registers */
if (!link->io.NumPorts1 || link->io.NumPorts1 < 2)
goto failed;
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- } else
+
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- sprintf(dev->node.dev_name, driver_name);
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x: ",
+ link->conf.ConfigIndex);
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
printk("\n");
- if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ)
+ if (sl811_hc_init(parent, link->io.BasePort1, link->irq)
< 0) {
failed:
printk(KERN_WARNING "sl811_cs_config failed\n");
@@ -241,10 +231,6 @@ static int sl811_cs_probe(struct pcmcia_device *link)
local->p_dev = link;
link->priv = local;
- /* Initialize */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = NULL;
-
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 112ef7e26f6b..608d61a105f6 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -76,7 +76,7 @@
* xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
* we are going to have to rebuild all this based on an scheduler,
* to where we have a list of transactions to do and based on the
- * availability of the different requried components (blocks,
+ * availability of the different required components (blocks,
* rpipes, segment slots, etc), we go scheduling them. Painful.
*/
#include <linux/init.h>
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
index def778cf2216..daf793559bcf 100644
--- a/drivers/uwb/i1480/i1480u-wlp/lc.c
+++ b/drivers/uwb/i1480/i1480u-wlp/lc.c
@@ -93,28 +93,28 @@ void i1480u_init(struct i1480u *i1480u)
* information elements have intuitive mappings, other not.
*/
static
-void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info)
+void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *wdi)
{
struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
struct usb_device *usb_dev = i1480u->usb_dev;
/* Treat device name and model name the same */
if (usb_dev->descriptor.iProduct) {
usb_string(usb_dev, usb_dev->descriptor.iProduct,
- dev_info->name, sizeof(dev_info->name));
+ wdi->name, sizeof(wdi->name));
usb_string(usb_dev, usb_dev->descriptor.iProduct,
- dev_info->model_name, sizeof(dev_info->model_name));
+ wdi->model_name, sizeof(wdi->model_name));
}
if (usb_dev->descriptor.iManufacturer)
usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
- dev_info->manufacturer,
- sizeof(dev_info->manufacturer));
- scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x",
+ wdi->manufacturer,
+ sizeof(wdi->manufacturer));
+ scnprintf(wdi->model_nr, sizeof(wdi->model_nr), "%04x",
__le16_to_cpu(usb_dev->descriptor.bcdDevice));
if (usb_dev->descriptor.iSerialNumber)
usb_string(usb_dev, usb_dev->descriptor.iSerialNumber,
- dev_info->serial, sizeof(dev_info->serial));
+ wdi->serial, sizeof(wdi->serial));
/* FIXME: where should we obtain category? */
- dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER);
+ wdi->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER);
/* FIXME: Complete OUI and OUIsubdiv attributes */
}
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
index 3a8e033dce21..6daa464bed24 100644
--- a/drivers/uwb/wlp/messages.c
+++ b/drivers/uwb/wlp/messages.c
@@ -713,7 +713,7 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss,
struct sk_buff *_skb;
void *d1_itr;
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_setup_device_info(wlp);
if (result < 0) {
dev_err(dev, "WLP: Unable to setup device "
@@ -721,7 +721,7 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss,
goto error;
}
}
- info = wlp->dev_info;
+ info = wlp->wdi;
_skb = dev_alloc_skb(sizeof(*_d1)
+ sizeof(struct wlp_attr_uuid_e)
+ sizeof(struct wlp_attr_wss_sel_mthd)
@@ -795,7 +795,7 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss,
void *d2_itr;
size_t mem_needed;
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_setup_device_info(wlp);
if (result < 0) {
dev_err(dev, "WLP: Unable to setup device "
@@ -803,7 +803,7 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss,
goto error;
}
}
- info = wlp->dev_info;
+ info = wlp->wdi;
mem_needed = sizeof(*_d2)
+ sizeof(struct wlp_attr_uuid_e)
+ sizeof(struct wlp_attr_uuid_r)
@@ -971,7 +971,7 @@ error_parse:
*/
static
int wlp_get_variable_info(struct wlp *wlp, void *data,
- struct wlp_device_info *dev_info, ssize_t len)
+ struct wlp_device_info *wdi, ssize_t len)
{
struct device *dev = &wlp->rc->uwb_dev.dev;
size_t used = 0;
@@ -994,7 +994,7 @@ int wlp_get_variable_info(struct wlp *wlp, void *data,
goto error_parse;
}
result = wlp_get_manufacturer(wlp, data + used,
- dev_info->manufacturer,
+ wdi->manufacturer,
len - used);
if (result < 0) {
dev_err(dev, "WLP: Unable to obtain "
@@ -1012,7 +1012,7 @@ int wlp_get_variable_info(struct wlp *wlp, void *data,
goto error_parse;
}
result = wlp_get_model_name(wlp, data + used,
- dev_info->model_name,
+ wdi->model_name,
len - used);
if (result < 0) {
dev_err(dev, "WLP: Unable to obtain Model "
@@ -1029,7 +1029,7 @@ int wlp_get_variable_info(struct wlp *wlp, void *data,
goto error_parse;
}
result = wlp_get_model_nr(wlp, data + used,
- dev_info->model_nr,
+ wdi->model_nr,
len - used);
if (result < 0) {
dev_err(dev, "WLP: Unable to obtain Model "
@@ -1046,7 +1046,7 @@ int wlp_get_variable_info(struct wlp *wlp, void *data,
goto error_parse;
}
result = wlp_get_serial(wlp, data + used,
- dev_info->serial, len - used);
+ wdi->serial, len - used);
if (result < 0) {
dev_err(dev, "WLP: Unable to obtain Serial "
"number attribute from D1 message.\n");
@@ -1062,7 +1062,7 @@ int wlp_get_variable_info(struct wlp *wlp, void *data,
goto error_parse;
}
result = wlp_get_prim_dev_type(wlp, data + used,
- &dev_info->prim_dev_type,
+ &wdi->prim_dev_type,
len - used);
if (result < 0) {
dev_err(dev, "WLP: Unable to obtain Primary "
@@ -1070,10 +1070,10 @@ int wlp_get_variable_info(struct wlp *wlp, void *data,
"message.\n");
goto error_parse;
}
- dev_info->prim_dev_type.category =
- le16_to_cpu(dev_info->prim_dev_type.category);
- dev_info->prim_dev_type.subID =
- le16_to_cpu(dev_info->prim_dev_type.subID);
+ wdi->prim_dev_type.category =
+ le16_to_cpu(wdi->prim_dev_type.category);
+ wdi->prim_dev_type.subID =
+ le16_to_cpu(wdi->prim_dev_type.subID);
last = WLP_ATTR_PRI_DEV_TYPE;
used += result;
break;
@@ -1099,7 +1099,7 @@ static
int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb,
struct wlp_uuid *uuid_e,
enum wlp_wss_sel_mthd *sel_mthd,
- struct wlp_device_info *dev_info,
+ struct wlp_device_info *wdi,
enum wlp_assc_error *assc_err)
{
struct device *dev = &wlp->rc->uwb_dev.dev;
@@ -1124,7 +1124,7 @@ int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb,
goto error_parse;
}
used += result;
- result = wlp_get_dev_name(wlp, ptr + used, dev_info->name,
+ result = wlp_get_dev_name(wlp, ptr + used, wdi->name,
len - used);
if (result < 0) {
dev_err(dev, "WLP: unable to obtain Device Name from D1 "
@@ -1132,7 +1132,7 @@ int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb,
goto error_parse;
}
used += result;
- result = wlp_get_variable_info(wlp, ptr + used, dev_info, len - used);
+ result = wlp_get_variable_info(wlp, ptr + used, wdi, len - used);
if (result < 0) {
dev_err(dev, "WLP: unable to obtain Device Information from "
"D1 message.\n");
@@ -1172,15 +1172,15 @@ void wlp_handle_d1_frame(struct work_struct *ws)
struct device *dev = &wlp->rc->uwb_dev.dev;
struct wlp_uuid uuid_e;
enum wlp_wss_sel_mthd sel_mthd = 0;
- struct wlp_device_info dev_info;
+ struct wlp_device_info wdi;
enum wlp_assc_error assc_err;
struct sk_buff *resp = NULL;
/* Parse D1 frame */
mutex_lock(&wss->mutex);
mutex_lock(&wlp->mutex); /* to access wlp->uuid */
- memset(&dev_info, 0, sizeof(dev_info));
- result = wlp_parse_d1_frame(wlp, skb, &uuid_e, &sel_mthd, &dev_info,
+ memset(&wdi, 0, sizeof(wdi));
+ result = wlp_parse_d1_frame(wlp, skb, &uuid_e, &sel_mthd, &wdi,
&assc_err);
if (result < 0) {
dev_err(dev, "WLP: Unable to parse incoming D1 frame.\n");
diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
index 6627c94cc854..b24751c3430b 100644
--- a/drivers/uwb/wlp/sysfs.c
+++ b/drivers/uwb/wlp/sysfs.c
@@ -333,12 +333,12 @@ ssize_t wlp_dev_##type##_show(struct wlp *wlp, char *buf) \
{ \
ssize_t result = 0; \
mutex_lock(&wlp->mutex); \
- if (wlp->dev_info == NULL) { \
+ if (wlp->wdi == NULL) { \
result = __wlp_setup_device_info(wlp); \
if (result < 0) \
goto out; \
} \
- result = scnprintf(buf, PAGE_SIZE, "%s\n", wlp->dev_info->type);\
+ result = scnprintf(buf, PAGE_SIZE, "%s\n", wlp->wdi->type); \
out: \
mutex_unlock(&wlp->mutex); \
return result; \
@@ -360,14 +360,14 @@ ssize_t wlp_dev_##type##_store(struct wlp *wlp, const char *buf, size_t size)\
ssize_t result; \
char format[10]; \
mutex_lock(&wlp->mutex); \
- if (wlp->dev_info == NULL) { \
+ if (wlp->wdi == NULL) { \
result = __wlp_alloc_device_info(wlp); \
if (result < 0) \
goto out; \
} \
- memset(wlp->dev_info->type, 0, sizeof(wlp->dev_info->type)); \
+ memset(wlp->wdi->type, 0, sizeof(wlp->wdi->type)); \
sprintf(format, "%%%uc", len); \
- result = sscanf(buf, format, wlp->dev_info->type); \
+ result = sscanf(buf, format, wlp->wdi->type); \
out: \
mutex_unlock(&wlp->mutex); \
return result < 0 ? result : size; \
@@ -409,13 +409,13 @@ ssize_t wlp_dev_prim_category_show(struct wlp *wlp, char *buf)
{
ssize_t result = 0;
mutex_lock(&wlp->mutex);
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_setup_device_info(wlp);
if (result < 0)
goto out;
}
result = scnprintf(buf, PAGE_SIZE, "%s\n",
- wlp_dev_category_str(wlp->dev_info->prim_dev_type.category));
+ wlp_dev_category_str(wlp->wdi->prim_dev_type.category));
out:
mutex_unlock(&wlp->mutex);
return result;
@@ -428,7 +428,7 @@ ssize_t wlp_dev_prim_category_store(struct wlp *wlp, const char *buf,
ssize_t result;
u16 cat;
mutex_lock(&wlp->mutex);
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_alloc_device_info(wlp);
if (result < 0)
goto out;
@@ -436,7 +436,7 @@ ssize_t wlp_dev_prim_category_store(struct wlp *wlp, const char *buf,
result = sscanf(buf, "%hu", &cat);
if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
|| cat == WLP_DEV_CAT_OTHER)
- wlp->dev_info->prim_dev_type.category = cat;
+ wlp->wdi->prim_dev_type.category = cat;
else
result = -EINVAL;
out:
@@ -449,15 +449,15 @@ ssize_t wlp_dev_prim_OUI_show(struct wlp *wlp, char *buf)
{
ssize_t result = 0;
mutex_lock(&wlp->mutex);
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_setup_device_info(wlp);
if (result < 0)
goto out;
}
result = scnprintf(buf, PAGE_SIZE, "%02x:%02x:%02x\n",
- wlp->dev_info->prim_dev_type.OUI[0],
- wlp->dev_info->prim_dev_type.OUI[1],
- wlp->dev_info->prim_dev_type.OUI[2]);
+ wlp->wdi->prim_dev_type.OUI[0],
+ wlp->wdi->prim_dev_type.OUI[1],
+ wlp->wdi->prim_dev_type.OUI[2]);
out:
mutex_unlock(&wlp->mutex);
return result;
@@ -469,7 +469,7 @@ ssize_t wlp_dev_prim_OUI_store(struct wlp *wlp, const char *buf, size_t size)
ssize_t result;
u8 OUI[3];
mutex_lock(&wlp->mutex);
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_alloc_device_info(wlp);
if (result < 0)
goto out;
@@ -480,7 +480,7 @@ ssize_t wlp_dev_prim_OUI_store(struct wlp *wlp, const char *buf, size_t size)
result = -EINVAL;
goto out;
} else
- memcpy(wlp->dev_info->prim_dev_type.OUI, OUI, sizeof(OUI));
+ memcpy(wlp->wdi->prim_dev_type.OUI, OUI, sizeof(OUI));
out:
mutex_unlock(&wlp->mutex);
return result < 0 ? result : size;
@@ -492,13 +492,13 @@ ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *wlp, char *buf)
{
ssize_t result = 0;
mutex_lock(&wlp->mutex);
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_setup_device_info(wlp);
if (result < 0)
goto out;
}
result = scnprintf(buf, PAGE_SIZE, "%u\n",
- wlp->dev_info->prim_dev_type.OUIsubdiv);
+ wlp->wdi->prim_dev_type.OUIsubdiv);
out:
mutex_unlock(&wlp->mutex);
return result;
@@ -512,14 +512,14 @@ ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *wlp, const char *buf,
unsigned sub;
u8 max_sub = ~0;
mutex_lock(&wlp->mutex);
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_alloc_device_info(wlp);
if (result < 0)
goto out;
}
result = sscanf(buf, "%u", &sub);
if (sub <= max_sub)
- wlp->dev_info->prim_dev_type.OUIsubdiv = sub;
+ wlp->wdi->prim_dev_type.OUIsubdiv = sub;
else
result = -EINVAL;
out:
@@ -532,13 +532,13 @@ ssize_t wlp_dev_prim_subcat_show(struct wlp *wlp, char *buf)
{
ssize_t result = 0;
mutex_lock(&wlp->mutex);
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_setup_device_info(wlp);
if (result < 0)
goto out;
}
result = scnprintf(buf, PAGE_SIZE, "%u\n",
- wlp->dev_info->prim_dev_type.subID);
+ wlp->wdi->prim_dev_type.subID);
out:
mutex_unlock(&wlp->mutex);
return result;
@@ -552,14 +552,14 @@ ssize_t wlp_dev_prim_subcat_store(struct wlp *wlp, const char *buf,
unsigned sub;
__le16 max_sub = ~0;
mutex_lock(&wlp->mutex);
- if (wlp->dev_info == NULL) {
+ if (wlp->wdi == NULL) {
result = __wlp_alloc_device_info(wlp);
if (result < 0)
goto out;
}
result = sscanf(buf, "%u", &sub);
if (sub <= max_sub)
- wlp->dev_info->prim_dev_type.subID = sub;
+ wlp->wdi->prim_dev_type.subID = sub;
else
result = -EINVAL;
out:
diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c
index 7f6a630bf26c..202655d4fc20 100644
--- a/drivers/uwb/wlp/wlp-lc.c
+++ b/drivers/uwb/wlp/wlp-lc.c
@@ -40,9 +40,9 @@ void wlp_neighbor_init(struct wlp_neighbor_e *neighbor)
int __wlp_alloc_device_info(struct wlp *wlp)
{
struct device *dev = &wlp->rc->uwb_dev.dev;
- BUG_ON(wlp->dev_info != NULL);
- wlp->dev_info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
- if (wlp->dev_info == NULL) {
+ BUG_ON(wlp->wdi != NULL);
+ wlp->wdi = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
+ if (wlp->wdi == NULL) {
dev_err(dev, "WLP: Unable to allocate memory for "
"device information.\n");
return -ENOMEM;
@@ -59,7 +59,7 @@ int __wlp_alloc_device_info(struct wlp *wlp)
static
void __wlp_fill_device_info(struct wlp *wlp)
{
- wlp->fill_device_info(wlp, wlp->dev_info);
+ wlp->fill_device_info(wlp, wlp->wdi);
}
/**
@@ -539,8 +539,8 @@ void wlp_remove(struct wlp *wlp)
uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
wlp_eda_release(&wlp->eda);
mutex_lock(&wlp->mutex);
- if (wlp->dev_info != NULL)
- kfree(wlp->dev_info);
+ if (wlp->wdi != NULL)
+ kfree(wlp->wdi);
mutex_unlock(&wlp->mutex);
wlp->rc = NULL;
}
diff --git a/drivers/vbus/Kconfig b/drivers/vbus/Kconfig
new file mode 100644
index 000000000000..f51cba10913e
--- /dev/null
+++ b/drivers/vbus/Kconfig
@@ -0,0 +1,25 @@
+#
+# Virtual-Bus (VBus) driver configuration
+#
+
+config VBUS_PROXY
+ bool "Virtual-Bus support"
+ select SHM_SIGNAL
+ select IOQ
+ default n
+ help
+ Adds support for a virtual-bus model drivers in a guest to connect
+ to host side virtual-bus resources. If you are using this kernel
+ in a virtualization solution which implements virtual-bus devices
+ on the backend, say Y. If unsure, say N.
+
+config VBUS_PCIBRIDGE
+ bool "PCI to Virtual-Bus bridge"
+ depends on PCI
+ depends on VBUS_PROXY
+ select IOQ
+ default n
+ help
+ Provides a way to bridge host side vbus devices via a PCI-BRIDGE
+ object. If you are running virtualization with vbus devices on the
+ host, and the vbus is exposed via PCI, say Y. Otherwise, say N.
diff --git a/drivers/vbus/Makefile b/drivers/vbus/Makefile
new file mode 100644
index 000000000000..944b7f1fec90
--- /dev/null
+++ b/drivers/vbus/Makefile
@@ -0,0 +1,6 @@
+
+vbus-proxy-objs += bus-proxy.o
+obj-$(CONFIG_VBUS_PROXY) += vbus-proxy.o
+
+vbus-pcibridge-objs += pci-bridge.o
+obj-$(CONFIG_VBUS_PCIBRIDGE) += vbus-pcibridge.o
diff --git a/drivers/vbus/bus-proxy.c b/drivers/vbus/bus-proxy.c
new file mode 100644
index 000000000000..ae11f679d34e
--- /dev/null
+++ b/drivers/vbus/bus-proxy.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vbus_driver.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+
+#define VBUS_PROXY_NAME "vbus-proxy"
+
+static struct vbus_device_proxy *to_dev(struct device *_dev)
+{
+ return _dev ? container_of(_dev, struct vbus_device_proxy, dev) : NULL;
+}
+
+static struct vbus_driver *to_drv(struct device_driver *_drv)
+{
+ return container_of(_drv, struct vbus_driver, drv);
+}
+
+/*
+ * This function is invoked whenever a new driver and/or device is added
+ * to check if there is a match
+ */
+static int vbus_dev_proxy_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ struct vbus_driver *drv = to_drv(_drv);
+
+ return !strcmp(dev->type, drv->type);
+}
+
+static int vbus_dev_proxy_uevent(struct device *_dev, struct kobj_uevent_env *env)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+
+ if (add_uevent_var(env, "MODALIAS=vbus-proxy:%s", dev->type))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * This function is invoked after the bus infrastructure has already made a
+ * match. The device will contain a reference to the paired driver which
+ * we will extract.
+ */
+static int vbus_dev_proxy_probe(struct device *_dev)
+{
+ int ret = 0;
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ struct vbus_driver *drv = to_drv(_dev->driver);
+
+ if (drv->ops->probe)
+ ret = drv->ops->probe(dev);
+
+ return ret;
+}
+
+static struct bus_type vbus_proxy = {
+ .name = VBUS_PROXY_NAME,
+ .match = vbus_dev_proxy_match,
+ .uevent = vbus_dev_proxy_uevent,
+};
+
+static struct device vbus_proxy_rootdev = {
+ .parent = NULL,
+ .init_name = VBUS_PROXY_NAME,
+};
+
+static int __init vbus_init(void)
+{
+ int ret;
+
+ ret = bus_register(&vbus_proxy);
+ BUG_ON(ret < 0);
+
+ ret = device_register(&vbus_proxy_rootdev);
+ BUG_ON(ret < 0);
+
+ return 0;
+}
+
+postcore_initcall(vbus_init);
+
+static void device_release(struct device *dev)
+{
+ struct vbus_device_proxy *_dev;
+
+ _dev = container_of(dev, struct vbus_device_proxy, dev);
+
+ _dev->ops->release(_dev);
+}
+
+static ssize_t _show_modalias(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "vbus-proxy:%s\n", to_dev(dev)->type);
+}
+static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, _show_modalias, NULL);
+
+int vbus_device_proxy_register(struct vbus_device_proxy *new)
+{
+ int ret;
+
+ new->dev.parent = &vbus_proxy_rootdev;
+ new->dev.bus = &vbus_proxy;
+ new->dev.release = &device_release;
+
+ ret = device_register(&new->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = device_create_file(&new->dev, &dev_attr_modalias);
+ if (ret < 0) {
+ device_unregister(&new->dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_register);
+
+void vbus_device_proxy_unregister(struct vbus_device_proxy *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_modalias);
+ device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_unregister);
+
+static int match_device_id(struct device *_dev, void *data)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ u64 id = *(u64 *)data;
+
+ return dev->id == id;
+}
+
+struct vbus_device_proxy *vbus_device_proxy_find(u64 id)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&vbus_proxy, NULL, &id, &match_device_id);
+
+ return to_dev(dev);
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_find);
+
+int vbus_driver_register(struct vbus_driver *new)
+{
+ new->drv.bus = &vbus_proxy;
+ new->drv.name = new->type;
+ new->drv.owner = new->owner;
+ new->drv.probe = vbus_dev_proxy_probe;
+
+ return driver_register(&new->drv);
+}
+EXPORT_SYMBOL_GPL(vbus_driver_register);
+
+void vbus_driver_unregister(struct vbus_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(vbus_driver_unregister);
+
+/*
+ *---------------------------------
+ * driver-side IOQ helper
+ *---------------------------------
+ */
+static void
+vbus_driver_ioq_release(struct ioq *ioq)
+{
+ kfree(ioq->head_desc);
+ kfree(ioq);
+}
+
+static struct ioq_ops vbus_driver_ioq_ops = {
+ .release = vbus_driver_ioq_release,
+};
+
+
+int vbus_driver_ioq_alloc(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio, size_t count, struct ioq **ioq)
+{
+ struct ioq *_ioq;
+ struct ioq_ring_head *head = NULL;
+ struct shm_signal *signal = NULL;
+ size_t len = IOQ_HEAD_DESC_SIZE(count);
+ int ret = -ENOMEM;
+
+ _ioq = kzalloc(sizeof(*_ioq), GFP_KERNEL);
+ if (!_ioq)
+ goto error;
+
+ head = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!head)
+ goto error;
+
+ head->magic = IOQ_RING_MAGIC;
+ head->ver = IOQ_RING_VER;
+ head->count = cpu_to_le32(count);
+
+ ret = dev->ops->shm(dev, name, id, prio, head, len,
+ &head->signal, &signal, 0);
+ if (ret < 0)
+ goto error;
+
+ ioq_init(_ioq,
+ &vbus_driver_ioq_ops,
+ ioq_locality_north,
+ head,
+ signal,
+ count);
+
+ *ioq = _ioq;
+
+ return 0;
+
+ error:
+ kfree(_ioq);
+ kfree(head);
+
+ if (signal)
+ shm_signal_put(signal);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vbus_driver_ioq_alloc);
diff --git a/drivers/vbus/pci-bridge.c b/drivers/vbus/pci-bridge.c
new file mode 100644
index 000000000000..36de7c48891c
--- /dev/null
+++ b/drivers/vbus/pci-bridge.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (C) 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/ioq.h>
+#include <linux/interrupt.h>
+#include <linux/vbus_driver.h>
+#include <linux/vbus_pci.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+#define VBUS_PCI_NAME "pci-to-vbus-bridge"
+
+struct vbus_pci {
+ spinlock_t lock;
+ struct pci_dev *dev;
+ struct ioq eventq;
+ struct vbus_pci_event *ring;
+ struct vbus_pci_regs *regs;
+ struct vbus_pci_signals *signals;
+ int irq;
+ bool enabled;
+ struct {
+ struct dentry *fs;
+ int events;
+ int qnotify;
+ int qinject;
+ int notify;
+ int inject;
+ int bridgecalls;
+ int buscalls;
+ } stats;
+};
+
+static struct vbus_pci vbus_pci;
+
+struct vbus_pci_device {
+ char type[VBUS_MAX_DEVTYPE_LEN];
+ u64 handle;
+ struct list_head shms;
+ struct vbus_device_proxy vdev;
+ struct work_struct drop;
+};
+
+static DEFINE_PER_CPU(struct vbus_pci_fastcall_desc, vbus_pci_percpu_fastcall)
+____cacheline_aligned;
+
+/*
+ * -------------------
+ * common routines
+ * -------------------
+ */
+
+static int
+vbus_pci_bridgecall(unsigned long nr, void *data, unsigned long len)
+{
+ struct vbus_pci_call_desc params = {
+ .vector = nr,
+ .len = len,
+ .datap = __pa(data),
+ };
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vbus_pci.lock, flags);
+
+ memcpy_toio(&vbus_pci.regs->bridgecall, &params, sizeof(params));
+ ret = ioread32(&vbus_pci.regs->bridgecall);
+
+ spin_unlock_irqrestore(&vbus_pci.lock, flags);
+
+ vbus_pci.stats.bridgecalls++;
+
+ return ret;
+}
+
+static int
+vbus_pci_buscall(unsigned long nr, void *data, unsigned long len)
+{
+ struct vbus_pci_fastcall_desc *params;
+ int ret;
+
+ preempt_disable();
+
+ params = &get_cpu_var(vbus_pci_percpu_fastcall);
+
+ params->call.vector = nr;
+ params->call.len = len;
+ params->call.datap = __pa(data);
+
+ iowrite32(smp_processor_id(), &vbus_pci.signals->fastcall);
+
+ ret = params->result;
+
+ preempt_enable();
+
+ vbus_pci.stats.buscalls++;
+
+ return ret;
+}
+
+static struct vbus_pci_device *
+to_dev(struct vbus_device_proxy *vdev)
+{
+ return container_of(vdev, struct vbus_pci_device, vdev);
+}
+
+static void
+_signal_init(struct shm_signal *signal, struct shm_signal_desc *desc,
+ struct shm_signal_ops *ops)
+{
+ desc->magic = SHM_SIGNAL_MAGIC;
+ desc->ver = SHM_SIGNAL_VER;
+
+ shm_signal_init(signal, shm_locality_north, ops, desc);
+}
+
+/*
+ * -------------------
+ * _signal
+ * -------------------
+ */
+
+struct _signal {
+ char name[64];
+ struct vbus_pci *pcivbus;
+ struct shm_signal signal;
+ u32 handle;
+ struct rb_node node;
+ struct list_head list;
+ int irq;
+ struct irq_desc *desc;
+};
+
+static struct _signal *
+to_signal(struct shm_signal *signal)
+{
+ return container_of(signal, struct _signal, signal);
+}
+
+static int
+_signal_inject(struct shm_signal *signal)
+{
+ struct _signal *_signal = to_signal(signal);
+
+ vbus_pci.stats.inject++;
+ iowrite32(_signal->handle, &vbus_pci.signals->shmsignal);
+
+ return 0;
+}
+
+static void
+_signal_release(struct shm_signal *signal)
+{
+ struct _signal *_signal = to_signal(signal);
+
+ kfree(_signal);
+}
+
+static struct shm_signal_ops _signal_ops = {
+ .inject = _signal_inject,
+ .release = _signal_release,
+};
+
+static void shmsignal_disconnect(struct _signal *_signal);
+
+/*
+ * -------------------
+ * vbus_device_proxy routines
+ * -------------------
+ */
+
+static int
+vbus_pci_device_open(struct vbus_device_proxy *vdev, int version, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct vbus_pci_deviceopen params;
+ int ret;
+
+ if (dev->handle)
+ return -EINVAL;
+
+ params.devid = vdev->id;
+ params.version = version;
+
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVOPEN,
+ &params, sizeof(params));
+ if (ret < 0)
+ return ret;
+
+ dev->handle = params.handle;
+
+ return 0;
+}
+
+static int
+vbus_pci_device_close(struct vbus_device_proxy *vdev, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ unsigned long iflags;
+ int ret;
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+
+ while (!list_empty(&dev->shms)) {
+ struct _signal *_signal;
+
+ _signal = list_first_entry(&dev->shms, struct _signal, list);
+
+ list_del(&_signal->list);
+ shmsignal_disconnect(_signal);
+
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+ shm_signal_put(&_signal->signal);
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+ }
+
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+
+ /*
+ * The DEVICECLOSE will implicitly close all of the shm on the
+ * host-side, so there is no need to do an explicit per-shm
+ * hypercall
+ */
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVCLOSE,
+ &dev->handle, sizeof(dev->handle));
+
+ if (ret < 0)
+ printk(KERN_ERR "VBUS-PCI: Error closing device %s/%lld: %d\n",
+ vdev->type, vdev->id, ret);
+
+ dev->handle = 0;
+
+ return 0;
+}
+
+/*
+ * -------------------
+ * shmsignal interrupt routines
+ * -------------------
+ */
+
+/* We abstract these routines so that we can drop in irqchip later */
+
+static void
+shmsignal_wakeup(struct _signal *_signal)
+{
+ _shm_signal_wakeup(&_signal->signal);
+}
+
+static int
+shmsignal_connect(struct _signal *_signal)
+{
+ return 0;
+}
+
+static void
+shmsignal_disconnect(struct _signal *_signal)
+{
+
+}
+
+static int
+vbus_pci_device_shm(struct vbus_device_proxy *vdev, const char *name,
+ int id, int prio,
+ void *ptr, size_t len,
+ struct shm_signal_desc *sdesc, struct shm_signal **signal,
+ int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct _signal *_signal = NULL;
+ struct vbus_pci_deviceshm params;
+ unsigned long iflags;
+ int ret;
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ params.devh = dev->handle;
+ params.id = id;
+ params.flags = flags;
+ params.datap = (u64)__pa(ptr);
+ params.len = len;
+
+ if (signal) {
+ /*
+ * The signal descriptor must be embedded within the
+ * provided ptr
+ */
+ if (!sdesc
+ || (len < sizeof(*sdesc))
+ || ((void *)sdesc < ptr)
+ || ((void *)sdesc > (ptr + len - sizeof(*sdesc))))
+ return -EINVAL;
+
+ _signal = kzalloc(sizeof(*_signal), GFP_KERNEL);
+ if (!_signal)
+ return -ENOMEM;
+
+ _signal_init(&_signal->signal, sdesc, &_signal_ops);
+
+ /*
+ * take another reference for the host. This is dropped
+ * by a SHMCLOSE event
+ */
+ shm_signal_get(&_signal->signal);
+
+ params.signal.offset = (u64)(unsigned long)sdesc -
+ (u64)(unsigned long)ptr;
+ params.signal.prio = prio;
+ params.signal.cookie = (u64)(unsigned long)_signal;
+
+ } else
+ params.signal.offset = -1; /* yes, this is a u32, but its ok */
+
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVSHM,
+ &params, sizeof(params));
+ if (ret < 0)
+ goto fail;
+
+ if (signal) {
+
+ BUG_ON(ret < 0);
+
+ _signal->handle = ret;
+
+ if (!name)
+ snprintf(_signal->name, sizeof(_signal->name),
+ "dev%lld-id%d", vdev->id, id);
+ else
+ snprintf(_signal->name, sizeof(_signal->name),
+ "%s", name);
+
+ shmsignal_connect(_signal);
+
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+ list_add_tail(&_signal->list, &dev->shms);
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+
+ shm_signal_get(&_signal->signal);
+ *signal = &_signal->signal;
+ }
+
+ return 0;
+
+fail:
+ if (_signal) {
+ /*
+ * We held two references above, so we need to drop
+ * both of them
+ */
+ shm_signal_put(&_signal->signal);
+ shm_signal_put(&_signal->signal);
+ }
+
+ return ret;
+}
+
+static int
+vbus_pci_device_call(struct vbus_device_proxy *vdev, u32 func, void *data,
+ size_t len, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct vbus_pci_devicecall params = {
+ .devh = dev->handle,
+ .func = func,
+ .datap = (u64)__pa(data),
+ .len = len,
+ .flags = flags,
+ };
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ return vbus_pci_buscall(VBUS_PCI_HC_DEVCALL, &params, sizeof(params));
+}
+
+static void
+vbus_pci_device_release(struct vbus_device_proxy *vdev)
+{
+ struct vbus_pci_device *_dev = to_dev(vdev);
+
+ vbus_pci_device_close(vdev, 0);
+
+ kfree(_dev);
+}
+
+static struct vbus_device_proxy_ops vbus_pci_device_ops = {
+ .open = vbus_pci_device_open,
+ .close = vbus_pci_device_close,
+ .shm = vbus_pci_device_shm,
+ .call = vbus_pci_device_call,
+ .release = vbus_pci_device_release,
+};
+
+/*
+ * -------------------
+ * vbus events
+ * -------------------
+ */
+
+struct deferred_devadd_event {
+ struct work_struct work;
+ struct vbus_pci_add_event event;
+};
+
+static void deferred_devdrop(struct work_struct *work);
+
+static void
+deferred_devadd(struct work_struct *work)
+{
+ struct deferred_devadd_event *_event;
+ struct vbus_pci_device *new;
+ int ret;
+
+ _event = container_of(work, struct deferred_devadd_event, work);
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ printk(KERN_ERR "VBUS_PCI: Out of memory on add_event\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&new->shms);
+
+ memcpy(new->type, _event->event.type, VBUS_MAX_DEVTYPE_LEN);
+ new->vdev.type = new->type;
+ new->vdev.id = _event->event.id;
+ new->vdev.ops = &vbus_pci_device_ops;
+
+ dev_set_name(&new->vdev.dev, "%lld", _event->event.id);
+
+ INIT_WORK(&new->drop, deferred_devdrop);
+
+ ret = vbus_device_proxy_register(&new->vdev);
+ if (ret < 0)
+ panic("failed to register device %lld(%s): %d\n",
+ new->vdev.id, new->type, ret);
+
+ kfree(_event);
+}
+
+static void
+deferred_devdrop(struct work_struct *work)
+{
+ struct vbus_pci_device *dev;
+
+ dev = container_of(work, struct vbus_pci_device, drop);
+ vbus_device_proxy_unregister(&dev->vdev);
+}
+
+static void
+event_devadd(struct vbus_pci_add_event *event)
+{
+ struct deferred_devadd_event *_event;
+
+ _event = kzalloc(sizeof(*_event), GFP_ATOMIC);
+ if (!_event) {
+ printk(KERN_ERR \
+ "VBUS_PCI: Out of ATOMIC memory on add_event\n");
+ return;
+ }
+
+ INIT_WORK(&_event->work, deferred_devadd);
+ memcpy(&_event->event, event, sizeof(*event));
+
+ schedule_work(&_event->work);
+}
+
+static void
+event_devdrop(struct vbus_pci_handle_event *event)
+{
+ struct vbus_device_proxy *dev = vbus_device_proxy_find(event->handle);
+
+ if (!dev) {
+ printk(KERN_WARNING "VBUS-PCI: devdrop failed: %lld\n",
+ event->handle);
+ return;
+ }
+
+ schedule_work(&to_dev(dev)->drop);
+}
+
+static void
+event_shmsignal(struct vbus_pci_handle_event *event)
+{
+ struct _signal *_signal = (struct _signal *)(unsigned long)event->handle;
+
+ vbus_pci.stats.notify++;
+
+ shmsignal_wakeup(_signal);
+}
+
+static void
+event_shmclose(struct vbus_pci_handle_event *event)
+{
+ struct _signal *_signal = (struct _signal *)(unsigned long)event->handle;
+
+ /*
+ * This reference was taken during the DEVICESHM call
+ */
+ shm_signal_put(&_signal->signal);
+}
+
+/*
+ * -------------------
+ * eventq routines
+ * -------------------
+ */
+
+static struct ioq_notifier eventq_notifier;
+
+static int __devinit
+eventq_init(int qlen)
+{
+ struct ioq_iterator iter;
+ int ret;
+ int i;
+
+ vbus_pci.ring = kzalloc(sizeof(struct vbus_pci_event) * qlen,
+ GFP_KERNEL);
+ if (!vbus_pci.ring)
+ return -ENOMEM;
+
+ /*
+ * We want to iterate on the "valid" index. By default the iterator
+ * will not "autoupdate" which means it will not hypercall the host
+ * with our changes. This is good, because we are really just
+ * initializing stuff here anyway. Note that you can always manually
+ * signal the host with ioq_signal() if the autoupdate feature is not
+ * used.
+ */
+ ret = ioq_iter_init(&vbus_pci.eventq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Seek to the tail of the valid index (which should be our first
+ * item since the queue is brand-new)
+ */
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty vbus_event and mark it
+ * valid
+ */
+ for (i = 0; i < qlen; i++) {
+ struct vbus_pci_event *event = &vbus_pci.ring[i];
+ size_t len = sizeof(*event);
+ struct ioq_ring_desc *desc = iter.desc;
+
+ BUG_ON(iter.desc->valid);
+
+ desc->cookie = (u64)(unsigned long)event;
+ desc->ptr = cpu_to_le64(__pa(event));
+ desc->len = cpu_to_le64(len); /* total length */
+ desc->valid = 1;
+
+ /*
+ * This push operation will simultaneously advance the
+ * valid-tail index and increment our position in the queue
+ * by one.
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ vbus_pci.eventq.notifier = &eventq_notifier;
+
+ /*
+ * And finally, ensure that we can receive notification
+ */
+ ioq_notify_enable(&vbus_pci.eventq, 0);
+
+ return 0;
+}
+
+/* Invoked whenever the hypervisor ioq_signal()s our eventq */
+static void
+eventq_wakeup(struct ioq_notifier *notifier)
+{
+ struct ioq_iterator iter;
+ int ret;
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(&vbus_pci.eventq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side.
+ *
+ * FIXME: This in theory could run indefinitely if the host keeps
+ * feeding us events since there is nothing like a NAPI budget. We
+ * might need to address that
+ */
+ while (!iter.desc->sown) {
+ struct ioq_ring_desc *desc = iter.desc;
+ struct vbus_pci_event *event;
+
+ event = (struct vbus_pci_event *)(unsigned long)desc->cookie;
+
+ switch (event->eventid) {
+ case VBUS_PCI_EVENT_DEVADD:
+ event_devadd(&event->data.add);
+ break;
+ case VBUS_PCI_EVENT_DEVDROP:
+ event_devdrop(&event->data.handle);
+ break;
+ case VBUS_PCI_EVENT_SHMSIGNAL:
+ event_shmsignal(&event->data.handle);
+ break;
+ case VBUS_PCI_EVENT_SHMCLOSE:
+ event_shmclose(&event->data.handle);
+ break;
+ default:
+ printk(KERN_WARNING "VBUS_PCI: Unexpected event %d\n",
+ event->eventid);
+ break;
+ };
+
+ memset(event, 0, sizeof(*event));
+
+ /* Advance the in-use head */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ vbus_pci.stats.events++;
+ }
+
+ /* And let the south side know that we changed the queue */
+ ioq_signal(&vbus_pci.eventq, 0);
+}
+
+static struct ioq_notifier eventq_notifier = {
+ .signal = &eventq_wakeup,
+};
+
+/* Injected whenever the host issues an ioq_signal() on the eventq */
+static irqreturn_t
+eventq_intr(int irq, void *dev)
+{
+ vbus_pci.stats.qnotify++;
+ _shm_signal_wakeup(vbus_pci.eventq.signal);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -------------------
+ */
+
+static int
+eventq_signal_inject(struct shm_signal *signal)
+{
+ vbus_pci.stats.qinject++;
+
+ /* The eventq uses the special-case handle=0 */
+ iowrite32(0, &vbus_pci.signals->eventq);
+
+ return 0;
+}
+
+static void
+eventq_signal_release(struct shm_signal *signal)
+{
+ kfree(signal);
+}
+
+static struct shm_signal_ops eventq_signal_ops = {
+ .inject = eventq_signal_inject,
+ .release = eventq_signal_release,
+};
+
+/*
+ * -------------------
+ */
+
+static void
+eventq_ioq_release(struct ioq *ioq)
+{
+ /* released as part of the vbus_pci object */
+}
+
+static struct ioq_ops eventq_ioq_ops = {
+ .release = eventq_ioq_release,
+};
+
+/*
+ * -------------------
+ */
+
+static void
+vbus_pci_release(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ if (vbus_pci.stats.fs)
+ debugfs_remove(vbus_pci.stats.fs);
+#endif
+
+ if (vbus_pci.irq > 0)
+ free_irq(vbus_pci.irq, NULL);
+
+ if (vbus_pci.signals)
+ pci_iounmap(vbus_pci.dev, (void *)vbus_pci.signals);
+
+ if (vbus_pci.regs)
+ pci_iounmap(vbus_pci.dev, (void *)vbus_pci.regs);
+
+ pci_release_regions(vbus_pci.dev);
+ pci_disable_device(vbus_pci.dev);
+
+ kfree(vbus_pci.eventq.head_desc);
+ kfree(vbus_pci.ring);
+
+ vbus_pci.enabled = false;
+}
+
+static int __devinit
+vbus_pci_open(void)
+{
+ struct vbus_pci_bridge_negotiate params = {
+ .magic = VBUS_PCI_ABI_MAGIC,
+ .version = VBUS_PCI_HC_VERSION,
+ .capabilities = 0,
+ };
+
+ return vbus_pci_bridgecall(VBUS_PCI_BRIDGE_NEGOTIATE,
+ &params, sizeof(params));
+}
+
+#define QLEN 1024
+
+static int __devinit
+vbus_pci_eventq_register(void)
+{
+ struct vbus_pci_busreg params = {
+ .count = 1,
+ .eventq = {
+ {
+ .count = QLEN,
+ .ring = (u64)__pa(vbus_pci.eventq.head_desc),
+ .data = (u64)__pa(vbus_pci.ring),
+ },
+ },
+ };
+
+ return vbus_pci_bridgecall(VBUS_PCI_BRIDGE_QREG,
+ &params, sizeof(params));
+}
+
+static int __devinit
+_ioq_init(size_t ringsize, struct ioq *ioq, struct ioq_ops *ops)
+{
+ struct shm_signal *signal = NULL;
+ struct ioq_ring_head *head = NULL;
+ size_t len = IOQ_HEAD_DESC_SIZE(ringsize);
+
+ head = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!head)
+ return -ENOMEM;
+
+ signal = kzalloc(sizeof(*signal), GFP_KERNEL);
+ if (!signal) {
+ kfree(head);
+ return -ENOMEM;
+ }
+
+ head->magic = IOQ_RING_MAGIC;
+ head->ver = IOQ_RING_VER;
+ head->count = cpu_to_le32(ringsize);
+
+ _signal_init(signal, &head->signal, &eventq_signal_ops);
+
+ ioq_init(ioq, ops, ioq_locality_north, head, signal, ringsize);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _debugfs_seq_show(struct seq_file *m, void *p)
+{
+#define P(F) \
+ seq_printf(m, " .%-30s: %d\n", #F, (int)vbus_pci.stats.F)
+
+ P(events);
+ P(qnotify);
+ P(qinject);
+ P(notify);
+ P(inject);
+ P(bridgecalls);
+ P(buscalls);
+
+#undef P
+
+ return 0;
+}
+
+static int _debugfs_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _debugfs_seq_show, inode->i_private);
+}
+
+static const struct file_operations stat_fops = {
+ .open = _debugfs_fops_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+#endif
+
+static int __devinit
+vbus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+ int cpu;
+
+ if (vbus_pci.enabled)
+ return -EEXIST; /* we only support one bridge per kernel */
+
+ if (pdev->revision != VBUS_PCI_ABI_VERSION) {
+ printk(KERN_DEBUG "VBUS_PCI: expected ABI version %d, got %d\n",
+ VBUS_PCI_ABI_VERSION,
+ pdev->revision);
+ return -ENODEV;
+ }
+
+ vbus_pci.dev = pdev;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, VBUS_PCI_NAME);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Could not init BARs: %d\n", ret);
+ goto out_fail;
+ }
+
+ vbus_pci.regs = pci_iomap(pdev, 0, sizeof(struct vbus_pci_regs));
+ if (!vbus_pci.regs) {
+ printk(KERN_ERR "VBUS_PCI: Could not map BARs\n");
+ goto out_fail;
+ }
+
+ vbus_pci.signals = pci_iomap(pdev, 1, sizeof(struct vbus_pci_signals));
+ if (!vbus_pci.signals) {
+ printk(KERN_ERR "VBUS_PCI: Could not map BARs\n");
+ goto out_fail;
+ }
+
+ ret = vbus_pci_open();
+ if (ret < 0) {
+ printk(KERN_DEBUG "VBUS_PCI: Could not register with host: %d\n",
+ ret);
+ goto out_fail;
+ }
+
+ /*
+ * Allocate an IOQ to use for host-2-guest event notification
+ */
+ ret = _ioq_init(QLEN, &vbus_pci.eventq, &eventq_ioq_ops);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not init eventq: %d\n", ret);
+ goto out_fail;
+ }
+
+ ret = eventq_init(QLEN);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not setup ring: %d\n", ret);
+ goto out_fail;
+ }
+
+ ret = pci_enable_msi(pdev);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not enable MSI: %d\n", ret);
+ goto out_fail;
+ }
+
+ vbus_pci.irq = pdev->irq;
+
+ ret = request_irq(pdev->irq, eventq_intr, 0, "vbus", NULL);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Failed to register IRQ %d\n: %d",
+ pdev->irq, ret);
+ goto out_fail;
+ }
+
+ /*
+ * Add one fastcall vector per cpu so that we can do lockless
+ * hypercalls
+ */
+ for_each_possible_cpu(cpu) {
+ struct vbus_pci_fastcall_desc *desc =
+ &per_cpu(vbus_pci_percpu_fastcall, cpu);
+ struct vbus_pci_call_desc params = {
+ .vector = cpu,
+ .len = sizeof(*desc),
+ .datap = __pa(desc),
+ };
+
+ ret = vbus_pci_bridgecall(VBUS_PCI_BRIDGE_FASTCALL_ADD,
+ &params, sizeof(params));
+ if (ret < 0) {
+ printk(KERN_ERR \
+ "VBUS_PCI: Failed to register cpu:%d\n: %d",
+ cpu, ret);
+ goto out_fail;
+ }
+ }
+
+ /*
+ * Finally register our queue on the host to start receiving events
+ */
+ ret = vbus_pci_eventq_register();
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Could not register with host: %d\n",
+ ret);
+ goto out_fail;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ vbus_pci.stats.fs = debugfs_create_file(VBUS_PCI_NAME, S_IRUGO,
+ NULL, NULL, &stat_fops);
+ if (IS_ERR(vbus_pci.stats.fs)) {
+ ret = PTR_ERR(vbus_pci.stats.fs);
+ printk(KERN_ERR "VBUS_PCI: error creating stats-fs: %d\n", ret);
+ goto out_fail;
+ }
+#endif
+
+ vbus_pci.enabled = true;
+
+ printk(KERN_INFO "Virtual-Bus: Copyright (c) 2009, " \
+ "Gregory Haskins <ghaskins@novell.com>\n");
+
+ return 0;
+
+ out_fail:
+ vbus_pci_release();
+
+ return ret;
+}
+
+static void __devexit
+vbus_pci_remove(struct pci_dev *pdev)
+{
+ vbus_pci_release();
+}
+
+static DEFINE_PCI_DEVICE_TABLE(vbus_pci_tbl) = {
+ { PCI_DEVICE(0x11da, 0x2000) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, vbus_pci_tbl);
+
+static struct pci_driver vbus_pci_driver = {
+ .name = VBUS_PCI_NAME,
+ .id_table = vbus_pci_tbl,
+ .probe = vbus_pci_probe,
+ .remove = vbus_pci_remove,
+};
+
+static int __init
+vbus_pci_init(void)
+{
+ memset(&vbus_pci, 0, sizeof(vbus_pci));
+ spin_lock_init(&vbus_pci.lock);
+
+ return pci_register_driver(&vbus_pci_driver);
+}
+
+static void __exit
+vbus_pci_exit(void)
+{
+ pci_unregister_driver(&vbus_pci_driver);
+}
+
+module_init(vbus_pci_init);
+module_exit(vbus_pci_exit);
+
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9777583218ff..aa88911c9504 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -642,7 +642,7 @@ static struct miscdevice vhost_net_misc = {
&vhost_net_fops,
};
-int vhost_net_init(void)
+static int vhost_net_init(void)
{
int r = vhost_init();
if (r)
@@ -659,7 +659,7 @@ err_init:
}
module_init(vhost_net_init);
-void vhost_net_exit(void)
+static void vhost_net_exit(void)
{
misc_deregister(&vhost_net_misc);
vhost_cleanup();
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e69d238c5af0..5c9c657ab753 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -715,8 +715,8 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
return 0;
}
-int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
- struct iovec iov[], int iov_size)
+static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+ struct iovec iov[], int iov_size)
{
const struct vhost_memory_region *reg;
struct vhost_memory *mem;
@@ -741,7 +741,7 @@ int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
_iov->iov_len = min((u64)len, size);
- _iov->iov_base = (void *)(unsigned long)
+ _iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
addr += size;
@@ -995,7 +995,7 @@ void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
- struct vring_used_elem *used;
+ struct vring_used_elem __user *used;
/* The virtqueue contains a ring of used buffers. Get a pointer to the
* next entry in that used ring. */
@@ -1019,7 +1019,8 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
smp_wmb();
/* Log used ring entry write. */
log_write(vq->log_base,
- vq->log_addr + ((void *)used - (void *)vq->used),
+ vq->log_addr +
+ ((void __user *)used - (void __user *)vq->used),
sizeof *used);
/* Log used index update. */
log_write(vq->log_base,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6e16244f3ed1..1e6fec487973 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1511,6 +1511,7 @@ config FB_VIA
select FB_CFB_IMAGEBLIT
select I2C_ALGOBIT
select I2C
+ select GPIOLIB
help
This is the frame buffer device driver for Graphics chips of VIA
UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
@@ -1520,6 +1521,21 @@ config FB_VIA
To compile this driver as a module, choose M here: the
module will be called viafb.
+
+if FB_VIA
+
+config FB_VIA_DIRECT_PROCFS
+ bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)"
+ depends on FB_VIA
+ default n
+ help
+ Allow direct hardware access to some output registers via procfs.
+ This is dangerous but may provide the only chance to get the
+ correct output device configuration.
+ Its use is strongly discouraged.
+
+endif
+
config FB_NEOMAGIC
tristate "NeoMagic display support"
depends on FB && PCI
@@ -2186,7 +2202,6 @@ config FB_MSM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- default y
config FB_MX3
tristate "MX3 Framebuffer support"
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index c025c84601b0..4dfb5f44dffd 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -8,12 +8,13 @@ menuconfig BACKLIGHT_LCD_SUPPORT
Enable this to be able to choose the drivers for controlling the
backlight and the LCD panel on some platforms, for example on PDAs.
+if BACKLIGHT_LCD_SUPPORT
+
#
# LCD
#
config LCD_CLASS_DEVICE
tristate "Lowlevel LCD controls"
- depends on BACKLIGHT_LCD_SUPPORT
default m
help
This framework adds support for low-level control of LCD.
@@ -24,31 +25,32 @@ config LCD_CLASS_DEVICE
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
+if LCD_CLASS_DEVICE
+
config LCD_CORGI
tristate "LCD Panel support for SHARP corgi/spitz model"
- depends on LCD_CLASS_DEVICE && SPI_MASTER && PXA_SHARPSL
+ depends on SPI_MASTER && PXA_SHARPSL
help
Say y here to support the LCD panels usually found on SHARP
corgi (C7x0) and spitz (Cxx00) models.
config LCD_L4F00242T03
tristate "Epson L4F00242T03 LCD"
- depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
+ depends on SPI_MASTER && GENERIC_GPIO
help
SPI driver for Epson L4F00242T03. This provides basic support
for init and powering the LCD up/down through a sysfs interface.
config LCD_LMS283GF05
tristate "Samsung LMS283GF05 LCD"
- depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
+ depends on SPI_MASTER && GENERIC_GPIO
help
SPI driver for Samsung LMS283GF05. This provides basic support
for powering the LCD up/down through a sysfs interface.
config LCD_LTV350QV
tristate "Samsung LTV350QV LCD Panel"
- depends on LCD_CLASS_DEVICE && SPI_MASTER
- default n
+ depends on SPI_MASTER
help
If you have a Samsung LTV350QV LCD panel, say y to include a
power control driver for it. The panel starts up in power
@@ -59,60 +61,53 @@ config LCD_LTV350QV
config LCD_ILI9320
tristate
- depends on LCD_CLASS_DEVICE && BACKLIGHT_LCD_SUPPORT
- default n
help
If you have a panel based on the ILI9320 controller chip
then say y to include a power driver for it.
config LCD_TDO24M
tristate "Toppoly TDO24M and TDO35S LCD Panels support"
- depends on LCD_CLASS_DEVICE && SPI_MASTER
- default n
+ depends on SPI_MASTER
help
If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to
include the support for it.
config LCD_VGG2432A4
tristate "VGG2432A4 LCM device support"
- depends on BACKLIGHT_LCD_SUPPORT && LCD_CLASS_DEVICE && SPI_MASTER
+ depends on SPI_MASTER
select LCD_ILI9320
- default n
help
If you have a VGG2432A4 panel based on the ILI9320 controller chip
then say y to include a power driver for it.
config LCD_PLATFORM
tristate "Platform LCD controls"
- depends on LCD_CLASS_DEVICE
help
This driver provides a platform-device registered LCD power
control interface.
config LCD_TOSA
tristate "Sharp SL-6000 LCD Driver"
- depends on LCD_CLASS_DEVICE && SPI
- depends on MACH_TOSA
- default n
+ depends on SPI && MACH_TOSA
help
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its LCD.
config LCD_HP700
tristate "HP Jornada 700 series LCD Driver"
- depends on LCD_CLASS_DEVICE
depends on SA1100_JORNADA720_SSP && !PREEMPT
default y
help
If you have an HP Jornada 700 series handheld (710/720/728)
say Y to enable LCD control driver.
+endif # LCD_CLASS_DEVICE
+
#
# Backlight
#
config BACKLIGHT_CLASS_DEVICE
tristate "Lowlevel Backlight controls"
- depends on BACKLIGHT_LCD_SUPPORT
default m
help
This framework adds support for low-level control of the LCD
@@ -121,9 +116,11 @@ config BACKLIGHT_CLASS_DEVICE
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
+if BACKLIGHT_CLASS_DEVICE
+
config BACKLIGHT_ATMEL_LCDC
bool "Atmel LCDC Contrast-as-Backlight control"
- depends on BACKLIGHT_CLASS_DEVICE && FB_ATMEL
+ depends on FB_ATMEL
default y if MACH_SAM9261EK || MACH_SAM9G10EK || MACH_SAM9263EK
help
This provides a backlight control internal to the Atmel LCDC
@@ -136,8 +133,7 @@ config BACKLIGHT_ATMEL_LCDC
config BACKLIGHT_ATMEL_PWM
tristate "Atmel PWM backlight control"
- depends on BACKLIGHT_CLASS_DEVICE && ATMEL_PWM
- default n
+ depends on ATMEL_PWM
help
Say Y here if you want to use the PWM peripheral in Atmel AT91 and
AVR32 devices. This driver will need additional platform data to know
@@ -148,7 +144,6 @@ config BACKLIGHT_ATMEL_PWM
config BACKLIGHT_GENERIC
tristate "Generic (aka Sharp Corgi) Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE
default y
help
Say y to enable the generic platform backlight driver previously
@@ -157,7 +152,7 @@ config BACKLIGHT_GENERIC
config BACKLIGHT_LOCOMO
tristate "Sharp LOCOMO LCD/Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && SHARP_LOCOMO
+ depends on SHARP_LOCOMO
default y
help
If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to
@@ -165,7 +160,7 @@ config BACKLIGHT_LOCOMO
config BACKLIGHT_OMAP1
tristate "OMAP1 PWL-based LCD Backlight"
- depends on BACKLIGHT_CLASS_DEVICE && ARCH_OMAP1
+ depends on ARCH_OMAP1
default y
help
This driver controls the LCD backlight level and power for
@@ -174,7 +169,7 @@ config BACKLIGHT_OMAP1
config BACKLIGHT_HP680
tristate "HP Jornada 680 Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && SH_HP6XX
+ depends on SH_HP6XX
default y
help
If you have a HP Jornada 680, say y to enable the
@@ -182,7 +177,6 @@ config BACKLIGHT_HP680
config BACKLIGHT_HP700
tristate "HP Jornada 700 series Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE
depends on SA1100_JORNADA720_SSP && !PREEMPT
default y
help
@@ -191,76 +185,70 @@ config BACKLIGHT_HP700
config BACKLIGHT_PROGEAR
tristate "Frontpath ProGear Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && PCI && X86
- default n
+ depends on PCI && X86
help
If you have a Frontpath ProGear say Y to enable the
backlight driver.
config BACKLIGHT_CARILLO_RANCH
tristate "Intel Carillo Ranch Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578
- default n
+ depends on LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578
help
If you have a Intel LE80578 (Carillo Ranch) say Y to enable the
backlight driver.
config BACKLIGHT_PWM
tristate "Generic PWM based Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && HAVE_PWM
+ depends on HAVE_PWM
help
If you have a LCD backlight adjustable by PWM, say Y to enable
this driver.
config BACKLIGHT_DA903X
tristate "Backlight Driver for DA9030/DA9034 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && PMIC_DA903X
+ depends on PMIC_DA903X
help
If you have a LCD backlight connected to the WLED output of DA9030
or DA9034 WLED output, say Y here to enable this driver.
config BACKLIGHT_MAX8925
tristate "Backlight driver for MAX8925"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_MAX8925
+ depends on MFD_MAX8925
help
If you have a LCD backlight connected to the WLED output of MAX8925
WLED output, say Y here to enable this driver.
config BACKLIGHT_MBP_NVIDIA
tristate "MacBook Pro Nvidia Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && X86
- default n
+ depends on X86
help
If you have an Apple Macbook Pro with Nvidia graphics hardware say Y
to enable a driver for its backlight
config BACKLIGHT_TOSA
tristate "Sharp SL-6000 Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
- depends on MACH_TOSA && LCD_TOSA
- default n
+ depends on I2C && MACH_TOSA && LCD_TOSA
help
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its backlight
config BACKLIGHT_SAHARA
tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && X86
- default n
+ depends on X86
help
If you have a Tabletkiosk Sahara Touch-iT, say y to enable the
backlight driver.
config BACKLIGHT_WM831X
tristate "WM831x PMIC Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X
+ depends on MFD_WM831X
help
If you have a backlight driven by the ISINK and DCDC of a
WM831x PMIC say y to enable the backlight driver for it.
config BACKLIGHT_ADX
tristate "Avionic Design Xanthos Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX
+ depends on ARCH_PXA_ADX
default y
help
Say Y to enable the backlight driver on Avionic Design Xanthos-based
@@ -268,7 +256,7 @@ config BACKLIGHT_ADX
config BACKLIGHT_ADP5520
tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520
+ depends on PMIC_ADP5520
help
If you have a LCD backlight connected to the BST/BL_SNK output of
ADP5520 or ADP5501, say Y here to enable this driver.
@@ -278,7 +266,10 @@ config BACKLIGHT_ADP5520
config BACKLIGHT_88PM860X
tristate "Backlight Driver for 88PM8606 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_88PM860X
+ depends on MFD_88PM860X
help
Say Y to enable the backlight driver for Marvell 88PM8606.
+endif # BACKLIGHT_CLASS_DEVICE
+
+endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index 7f4a7c30a98b..fe9af129c5dd 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -107,8 +107,8 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev)
props.max_brightness = 0xff;
bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
bl, &adx_backlight_ops, &props);
- if (!bldev) {
- ret = -ENOMEM;
+ if (IS_ERR(bldev)) {
+ ret = PTR_ERR(bldev);
goto out;
}
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 1b5d3fe6bbbc..52c780f5db1b 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -184,6 +184,42 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
},
{
.callback = mbp_dmi_match,
+ .ident = "MacBookPro 1,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 1,2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,2"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 2,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 2,2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
.ident = "MacBookPro 3,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -238,6 +274,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
},
{
.callback = mbp_dmi_match,
+ .ident = "MacBook 6,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
+ },
+ .driver_data = (void *)&nvidia_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
.ident = "MacBookAir 2,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index 5eb61b5adfe8..42fe155aba0e 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -123,7 +123,7 @@ static void lcd_clear(struct fb_info *info)
lcd_write_control(info, LCD_RESET);
}
-static struct fb_fix_screeninfo cobalt_lcdfb_fix __initdata = {
+static struct fb_fix_screeninfo cobalt_lcdfb_fix __devinitdata = {
.id = "cobalt-lcd",
.type = FB_TYPE_TEXT,
.type_aux = FB_AUX_TEXT_MDA,
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 772ba3f45e6f..3aa50bc276eb 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -387,7 +387,8 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
spin_unlock_irqrestore(&mx3fb->lock, flags);
- mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan);
+ mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan,
+ DMA_TERMINATE_ALL);
mx3_fbi->txd = NULL;
mx3_fbi->cookie = -EINVAL;
}
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index dfb57ee50861..5f568a2d3cfa 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -33,8 +33,14 @@ config PANEL_TOPPOLY_TDO35S
config PANEL_TPO_TD043MTEA1
tristate "TPO TD043MTEA1 LCD Panel"
- depends on OMAP2_DSS && I2C
+ depends on OMAP2_DSS && SPI
help
LCD Panel used in OMAP3 Pandora
+config PANEL_ACX565AKM
+ tristate "ACX565AKM Panel"
+ depends on OMAP2_DSS_SDI
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ This is the LCD panel used on Nokia N900
endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index e2bb32168dee..aa386095d7c4 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_PANEL_SHARP_LQ043T1DG01) += panel-sharp-lq043t1dg01.o
obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o
obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
+obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
new file mode 100644
index 000000000000..1f8eb70e2937
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -0,0 +1,819 @@
+/*
+ * Support for ACX565AKM LCD Panel used on Nokia N900
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Original Driver Author: Imre Deak <imre.deak@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <plat/display.h>
+
+#define MIPID_CMD_READ_DISP_ID 0x04
+#define MIPID_CMD_READ_RED 0x06
+#define MIPID_CMD_READ_GREEN 0x07
+#define MIPID_CMD_READ_BLUE 0x08
+#define MIPID_CMD_READ_DISP_STATUS 0x09
+#define MIPID_CMD_RDDSDR 0x0F
+#define MIPID_CMD_SLEEP_IN 0x10
+#define MIPID_CMD_SLEEP_OUT 0x11
+#define MIPID_CMD_DISP_OFF 0x28
+#define MIPID_CMD_DISP_ON 0x29
+#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51
+#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
+#define MIPID_CMD_WRITE_CTRL_DISP 0x53
+
+#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5)
+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4)
+#define CTRL_DISP_BACKLIGHT_ON (1 << 2)
+#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1)
+
+#define MIPID_CMD_READ_CTRL_DISP 0x54
+#define MIPID_CMD_WRITE_CABC 0x55
+#define MIPID_CMD_READ_CABC 0x56
+
+#define MIPID_VER_LPH8923 3
+#define MIPID_VER_LS041Y3 4
+#define MIPID_VER_L4F00311 8
+#define MIPID_VER_ACX565AKM 9
+
+struct acx565akm_device {
+ char *name;
+ int enabled;
+ int model;
+ int revision;
+ u8 display_id[3];
+ unsigned has_bc:1;
+ unsigned has_cabc:1;
+ unsigned cabc_mode;
+ unsigned long hw_guard_end; /* next value of jiffies
+ when we can issue the
+ next sleep in/out command */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ struct spi_device *spi;
+ struct mutex mutex;
+
+ struct omap_dss_device *dssdev;
+ struct backlight_device *bl_dev;
+};
+
+static struct acx565akm_device acx_dev;
+static int acx565akm_bl_update_status(struct backlight_device *dev);
+
+/*--------------------MIPID interface-----------------------------*/
+
+static void acx565akm_transfer(struct acx565akm_device *md, int cmd,
+ const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ struct spi_message m;
+ struct spi_transfer *x, xfer[5];
+ int r;
+
+ BUG_ON(md->spi == NULL);
+
+ spi_message_init(&m);
+
+ memset(xfer, 0, sizeof(xfer));
+ x = &xfer[0];
+
+ cmd &= 0xff;
+ x->tx_buf = &cmd;
+ x->bits_per_word = 9;
+ x->len = 2;
+
+ if (rlen > 1 && wlen == 0) {
+ /*
+ * Between the command and the response data there is a
+ * dummy clock cycle. Add an extra bit after the command
+ * word to account for this.
+ */
+ x->bits_per_word = 10;
+ cmd <<= 1;
+ }
+ spi_message_add_tail(x, &m);
+
+ if (wlen) {
+ x++;
+ x->tx_buf = wbuf;
+ x->len = wlen;
+ x->bits_per_word = 9;
+ spi_message_add_tail(x, &m);
+ }
+
+ if (rlen) {
+ x++;
+ x->rx_buf = rbuf;
+ x->len = rlen;
+ spi_message_add_tail(x, &m);
+ }
+
+ r = spi_sync(md->spi, &m);
+ if (r < 0)
+ dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
+}
+
+static inline void acx565akm_cmd(struct acx565akm_device *md, int cmd)
+{
+ acx565akm_transfer(md, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void acx565akm_write(struct acx565akm_device *md,
+ int reg, const u8 *buf, int len)
+{
+ acx565akm_transfer(md, reg, buf, len, NULL, 0);
+}
+
+static inline void acx565akm_read(struct acx565akm_device *md,
+ int reg, u8 *buf, int len)
+{
+ acx565akm_transfer(md, reg, NULL, 0, buf, len);
+}
+
+static void hw_guard_start(struct acx565akm_device *md, int guard_msec)
+{
+ md->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ md->hw_guard_end = jiffies + md->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct acx565akm_device *md)
+{
+ unsigned long wait = md->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= md->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+/*----------------------MIPID wrappers----------------------------*/
+
+static void set_sleep_mode(struct acx565akm_device *md, int on)
+{
+ int cmd;
+
+ if (on)
+ cmd = MIPID_CMD_SLEEP_IN;
+ else
+ cmd = MIPID_CMD_SLEEP_OUT;
+ /*
+ * We have to keep 120msec between sleep in/out commands.
+ * (8.2.15, 8.2.16).
+ */
+ hw_guard_wait(md);
+ acx565akm_cmd(md, cmd);
+ hw_guard_start(md, 120);
+}
+
+static void set_display_state(struct acx565akm_device *md, int enabled)
+{
+ int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
+
+ acx565akm_cmd(md, cmd);
+}
+
+static int panel_enabled(struct acx565akm_device *md)
+{
+ u32 disp_status;
+ int enabled;
+
+ acx565akm_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
+ disp_status = __be32_to_cpu(disp_status);
+ enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
+ dev_dbg(&md->spi->dev,
+ "LCD panel %senabled by bootloader (status 0x%04x)\n",
+ enabled ? "" : "not ", disp_status);
+ return enabled;
+}
+
+static int panel_detect(struct acx565akm_device *md)
+{
+ acx565akm_read(md, MIPID_CMD_READ_DISP_ID, md->display_id, 3);
+ dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
+ md->display_id[0], md->display_id[1], md->display_id[2]);
+
+ switch (md->display_id[0]) {
+ case 0x10:
+ md->model = MIPID_VER_ACX565AKM;
+ md->name = "acx565akm";
+ md->has_bc = 1;
+ md->has_cabc = 1;
+ break;
+ case 0x29:
+ md->model = MIPID_VER_L4F00311;
+ md->name = "l4f00311";
+ break;
+ case 0x45:
+ md->model = MIPID_VER_LPH8923;
+ md->name = "lph8923";
+ break;
+ case 0x83:
+ md->model = MIPID_VER_LS041Y3;
+ md->name = "ls041y3";
+ break;
+ default:
+ md->name = "unknown";
+ dev_err(&md->spi->dev, "invalid display ID\n");
+ return -ENODEV;
+ }
+
+ md->revision = md->display_id[1];
+
+ dev_info(&md->spi->dev, "omapfb: %s rev %02x LCD detected\n",
+ md->name, md->revision);
+
+ return 0;
+}
+
+/*----------------------Backlight Control-------------------------*/
+
+static void enable_backlight_ctrl(struct acx565akm_device *md, int enable)
+{
+ u16 ctrl;
+
+ acx565akm_read(md, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
+ if (enable) {
+ ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON;
+ } else {
+ ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON);
+ }
+
+ ctrl |= 1 << 8;
+ acx565akm_write(md, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
+}
+
+static void set_cabc_mode(struct acx565akm_device *md, unsigned mode)
+{
+ u16 cabc_ctrl;
+
+ md->cabc_mode = mode;
+ if (!md->enabled)
+ return;
+ cabc_ctrl = 0;
+ acx565akm_read(md, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
+ cabc_ctrl &= ~3;
+ cabc_ctrl |= (1 << 8) | (mode & 3);
+ acx565akm_write(md, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
+}
+
+static unsigned get_cabc_mode(struct acx565akm_device *md)
+{
+ return md->cabc_mode;
+}
+
+static unsigned get_hw_cabc_mode(struct acx565akm_device *md)
+{
+ u8 cabc_ctrl;
+
+ acx565akm_read(md, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
+ return cabc_ctrl & 3;
+}
+
+static void acx565akm_set_brightness(struct acx565akm_device *md, int level)
+{
+ int bv;
+
+ bv = level | (1 << 8);
+ acx565akm_write(md, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
+
+ if (level)
+ enable_backlight_ctrl(md, 1);
+ else
+ enable_backlight_ctrl(md, 0);
+}
+
+static int acx565akm_get_actual_brightness(struct acx565akm_device *md)
+{
+ u8 bv;
+
+ acx565akm_read(md, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
+
+ return bv;
+}
+
+
+static int acx565akm_bl_update_status(struct backlight_device *dev)
+{
+ struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
+ int r;
+ int level;
+
+ dev_dbg(&md->spi->dev, "%s\n", __func__);
+
+ mutex_lock(&md->mutex);
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ r = 0;
+ if (md->has_bc)
+ acx565akm_set_brightness(md, level);
+ else if (md->dssdev->set_backlight)
+ r = md->dssdev->set_backlight(md->dssdev, level);
+ else
+ r = -ENODEV;
+
+ mutex_unlock(&md->mutex);
+
+ return r;
+}
+
+static int acx565akm_bl_get_intensity(struct backlight_device *dev)
+{
+ struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ if (!md->has_bc && md->dssdev->set_backlight == NULL)
+ return -ENODEV;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK) {
+ if (md->has_bc)
+ return acx565akm_get_actual_brightness(md);
+ else
+ return dev->props.brightness;
+ }
+
+ return 0;
+}
+
+static const struct backlight_ops acx565akm_bl_ops = {
+ .get_brightness = acx565akm_bl_get_intensity,
+ .update_status = acx565akm_bl_update_status,
+};
+
+/*--------------------Auto Brightness control via Sysfs---------------------*/
+
+static const char *cabc_modes[] = {
+ "off", /* always used when CABC is not supported */
+ "ui",
+ "still-image",
+ "moving-image",
+};
+
+static ssize_t show_cabc_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acx565akm_device *md = dev_get_drvdata(dev);
+ const char *mode_str;
+ int mode;
+ int len;
+
+ if (!md->has_cabc)
+ mode = 0;
+ else
+ mode = get_cabc_mode(md);
+ mode_str = "unknown";
+ if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
+ mode_str = cabc_modes[mode];
+ len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
+
+ return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
+}
+
+static ssize_t store_cabc_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acx565akm_device *md = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
+ const char *mode_str = cabc_modes[i];
+ int cmp_len = strlen(mode_str);
+
+ if (count > 0 && buf[count - 1] == '\n')
+ count--;
+ if (count != cmp_len)
+ continue;
+
+ if (strncmp(buf, mode_str, cmp_len) == 0)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cabc_modes))
+ return -EINVAL;
+
+ if (!md->has_cabc && i != 0)
+ return -EINVAL;
+
+ mutex_lock(&md->mutex);
+ set_cabc_mode(md, i);
+ mutex_unlock(&md->mutex);
+
+ return count;
+}
+
+static ssize_t show_cabc_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acx565akm_device *md = dev_get_drvdata(dev);
+ int len;
+ int i;
+
+ if (!md->has_cabc)
+ return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
+
+ for (i = 0, len = 0;
+ len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
+ len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
+ i ? " " : "", cabc_modes[i],
+ i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
+
+ return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
+}
+
+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
+ show_cabc_mode, store_cabc_mode);
+static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
+ show_cabc_available_modes, NULL);
+
+static struct attribute *bldev_attrs[] = {
+ &dev_attr_cabc_mode.attr,
+ &dev_attr_cabc_available_modes.attr,
+ NULL,
+};
+
+static struct attribute_group bldev_attr_group = {
+ .attrs = bldev_attrs,
+};
+
+
+/*---------------------------ACX Panel----------------------------*/
+
+static int acx_get_recommended_bpp(struct omap_dss_device *dssdev)
+{
+ return 16;
+}
+
+static struct omap_video_timings acx_panel_timings = {
+ .x_res = 800,
+ .y_res = 480,
+ .pixel_clock = 24000,
+ .hfp = 28,
+ .hsw = 4,
+ .hbp = 24,
+ .vfp = 3,
+ .vsw = 3,
+ .vbp = 4,
+};
+
+static int acx_panel_probe(struct omap_dss_device *dssdev)
+{
+ int r;
+ struct acx565akm_device *md = &acx_dev;
+ struct backlight_device *bldev;
+ int max_brightness, brightness;
+ struct backlight_properties props;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS;
+ /* FIXME AC bias ? */
+ dssdev->panel.timings = acx_panel_timings;
+
+ if (dssdev->platform_enable)
+ dssdev->platform_enable(dssdev);
+ /*
+ * After reset we have to wait 5 msec before the first
+ * command can be sent.
+ */
+ msleep(5);
+
+ md->enabled = panel_enabled(md);
+
+ r = panel_detect(md);
+ if (r) {
+ dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
+ if (!md->enabled && dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+ return r;
+ }
+
+ mutex_lock(&acx_dev.mutex);
+ acx_dev.dssdev = dssdev;
+ mutex_unlock(&acx_dev.mutex);
+
+ if (!md->enabled) {
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+ }
+
+ /*------- Backlight control --------*/
+
+ props.fb_blank = FB_BLANK_UNBLANK;
+ props.power = FB_BLANK_UNBLANK;
+
+ bldev = backlight_device_register("acx565akm", &md->spi->dev,
+ md, &acx565akm_bl_ops, &props);
+ md->bl_dev = bldev;
+ if (md->has_cabc) {
+ r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
+ if (r) {
+ dev_err(&bldev->dev,
+ "%s failed to create sysfs files\n", __func__);
+ backlight_device_unregister(bldev);
+ return r;
+ }
+ md->cabc_mode = get_hw_cabc_mode(md);
+ }
+
+ if (md->has_bc)
+ max_brightness = 255;
+ else
+ max_brightness = dssdev->max_backlight_level;
+
+ if (md->has_bc)
+ brightness = acx565akm_get_actual_brightness(md);
+ else if (dssdev->get_backlight)
+ brightness = dssdev->get_backlight(dssdev);
+ else
+ brightness = 0;
+
+ bldev->props.max_brightness = max_brightness;
+ bldev->props.brightness = brightness;
+
+ acx565akm_bl_update_status(bldev);
+ return 0;
+}
+
+static void acx_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct acx565akm_device *md = &acx_dev;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group);
+ backlight_device_unregister(md->bl_dev);
+ mutex_lock(&acx_dev.mutex);
+ acx_dev.dssdev = NULL;
+ mutex_unlock(&acx_dev.mutex);
+}
+
+static int acx_panel_power_on(struct omap_dss_device *dssdev)
+{
+ struct acx565akm_device *md = &acx_dev;
+ int r;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+
+ mutex_lock(&md->mutex);
+
+ r = omapdss_sdi_display_enable(dssdev);
+ if (r) {
+ pr_err("%s sdi enable failed\n", __func__);
+ return r;
+ }
+
+ /*FIXME tweak me */
+ msleep(50);
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto fail;
+ }
+
+ if (md->enabled) {
+ dev_dbg(&md->spi->dev, "panel already enabled\n");
+ mutex_unlock(&md->mutex);
+ return 0;
+ }
+
+ /*
+ * We have to meet all the following delay requirements:
+ * 1. tRW: reset pulse width 10usec (7.12.1)
+ * 2. tRT: reset cancel time 5msec (7.12.1)
+ * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
+ * case (7.6.2)
+ * 4. 120msec before the sleep out command (7.12.1)
+ */
+ msleep(120);
+
+ set_sleep_mode(md, 0);
+ md->enabled = 1;
+
+ /* 5msec between sleep out and the next command. (8.2.16) */
+ msleep(5);
+ set_display_state(md, 1);
+ set_cabc_mode(md, md->cabc_mode);
+
+ mutex_unlock(&md->mutex);
+
+ return acx565akm_bl_update_status(md->bl_dev);
+fail:
+ omapdss_sdi_display_disable(dssdev);
+ return r;
+}
+
+static void acx_panel_power_off(struct omap_dss_device *dssdev)
+{
+ struct acx565akm_device *md = &acx_dev;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+
+ mutex_lock(&md->mutex);
+
+ if (!md->enabled) {
+ mutex_unlock(&md->mutex);
+ return;
+ }
+ set_display_state(md, 0);
+ set_sleep_mode(md, 1);
+ md->enabled = 0;
+ /*
+ * We have to provide PCLK,HS,VS signals for 2 frames (worst case
+ * ~50msec) after sending the sleep in command and asserting the
+ * reset signal. We probably could assert the reset w/o the delay
+ * but we still delay to avoid possible artifacts. (7.6.1)
+ */
+ msleep(50);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ /* FIXME need to tweak this delay */
+ msleep(100);
+
+ omapdss_sdi_display_disable(dssdev);
+
+ mutex_unlock(&md->mutex);
+}
+
+static int acx_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ r = acx_panel_power_on(dssdev);
+
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ return 0;
+}
+
+static void acx_panel_disable(struct omap_dss_device *dssdev)
+{
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ acx_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int acx_panel_suspend(struct omap_dss_device *dssdev)
+{
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ acx_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ return 0;
+}
+
+static int acx_panel_resume(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ r = acx_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ return 0;
+}
+
+static void acx_panel_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ int r;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ omapdss_sdi_display_disable(dssdev);
+
+ dssdev->panel.timings = *timings;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ r = omapdss_sdi_display_enable(dssdev);
+ if (r)
+ dev_err(&dssdev->dev, "%s enable failed\n", __func__);
+ }
+}
+
+static void acx_panel_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+static int acx_panel_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ return 0;
+}
+
+
+static struct omap_dss_driver acx_panel_driver = {
+ .probe = acx_panel_probe,
+ .remove = acx_panel_remove,
+
+ .enable = acx_panel_enable,
+ .disable = acx_panel_disable,
+ .suspend = acx_panel_suspend,
+ .resume = acx_panel_resume,
+
+ .set_timings = acx_panel_set_timings,
+ .get_timings = acx_panel_get_timings,
+ .check_timings = acx_panel_check_timings,
+
+ .get_recommended_bpp = acx_get_recommended_bpp,
+
+ .driver = {
+ .name = "panel-acx565akm",
+ .owner = THIS_MODULE,
+ },
+};
+
+/*--------------------SPI probe-------------------------*/
+
+static int acx565akm_spi_probe(struct spi_device *spi)
+{
+ struct acx565akm_device *md = &acx_dev;
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ spi->mode = SPI_MODE_3;
+ md->spi = spi;
+ mutex_init(&md->mutex);
+ dev_set_drvdata(&spi->dev, md);
+
+ omap_dss_register_driver(&acx_panel_driver);
+
+ return 0;
+}
+
+static int acx565akm_spi_remove(struct spi_device *spi)
+{
+ struct acx565akm_device *md = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(&md->spi->dev, "%s\n", __func__);
+ omap_dss_unregister_driver(&acx_panel_driver);
+
+ return 0;
+}
+
+static struct spi_driver acx565akm_spi_driver = {
+ .driver = {
+ .name = "acx565akm",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = acx565akm_spi_probe,
+ .remove = __devexit_p(acx565akm_spi_remove),
+};
+
+static int __init acx565akm_init(void)
+{
+ return spi_register_driver(&acx565akm_spi_driver);
+}
+
+static void __exit acx565akm_exit(void)
+{
+ spi_unregister_driver(&acx565akm_spi_driver);
+}
+
+module_init(acx565akm_init);
+module_exit(acx565akm_exit);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("acx565akm LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 8d51a5e6341c..7d9eb2b1f5af 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -20,10 +20,17 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <plat/display.h>
+struct sharp_data {
+ struct backlight_device *bl;
+};
+
static struct omap_video_timings sharp_ls_timings = {
.x_res = 480,
.y_res = 640,
@@ -39,18 +46,89 @@ static struct omap_video_timings sharp_ls_timings = {
.vbp = 1,
};
+static int sharp_ls_bl_update_status(struct backlight_device *bl)
+{
+ struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
+ int level;
+
+ if (!dssdev->set_backlight)
+ return -EINVAL;
+
+ if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+ bl->props.power == FB_BLANK_UNBLANK)
+ level = bl->props.brightness;
+ else
+ level = 0;
+
+ return dssdev->set_backlight(dssdev, level);
+}
+
+static int sharp_ls_bl_get_brightness(struct backlight_device *bl)
+{
+ if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+ bl->props.power == FB_BLANK_UNBLANK)
+ return bl->props.brightness;
+
+ return 0;
+}
+
+static const struct backlight_ops sharp_ls_bl_ops = {
+ .get_brightness = sharp_ls_bl_get_brightness,
+ .update_status = sharp_ls_bl_update_status,
+};
+
+
+
static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
{
+ struct backlight_properties props;
+ struct backlight_device *bl;
+ struct sharp_data *sd;
+ int r;
+
dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
OMAP_DSS_LCD_IHS;
dssdev->panel.acb = 0x28;
dssdev->panel.timings = sharp_ls_timings;
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dssdev->dev, sd);
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = dssdev->max_backlight_level;
+
+ bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
+ &sharp_ls_bl_ops, &props);
+ if (IS_ERR(bl)) {
+ r = PTR_ERR(bl);
+ kfree(sd);
+ return r;
+ }
+ sd->bl = bl;
+
+ bl->props.fb_blank = FB_BLANK_UNBLANK;
+ bl->props.power = FB_BLANK_UNBLANK;
+ bl->props.brightness = dssdev->max_backlight_level;
+ r = sharp_ls_bl_update_status(bl);
+ if (r < 0)
+ dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
return 0;
}
static void sharp_ls_panel_remove(struct omap_dss_device *dssdev)
{
+ struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+ struct backlight_device *bl = sd->bl;
+
+ bl->props.power = FB_BLANK_POWERDOWN;
+ sharp_ls_bl_update_status(bl);
+ backlight_device_unregister(bl);
+
+ kfree(sd);
}
static int sharp_ls_power_on(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 4f3988a41082..aaf5d308a046 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -31,6 +31,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
#include <plat/display.h>
@@ -67,6 +68,8 @@
static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
struct taal_data {
+ struct mutex lock;
+
struct backlight_device *bldev;
unsigned long hw_guard_end; /* next value of jiffies when we can
@@ -510,6 +513,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
}
td->dssdev = dssdev;
+ mutex_init(&td->lock);
+
td->esd_wq = create_singlethread_workqueue("taal_esd");
if (td->esd_wq == NULL) {
dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -697,10 +702,9 @@ static int taal_power_on(struct omap_dss_device *dssdev)
return 0;
err:
- dsi_bus_unlock();
-
omapdss_dsi_display_disable(dssdev);
err0:
+ dsi_bus_unlock();
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
@@ -733,54 +737,96 @@ static void taal_power_off(struct omap_dss_device *dssdev)
static int taal_enable(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+
dev_dbg(&dssdev->dev, "enable\n");
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
- return -EINVAL;
+ mutex_lock(&td->lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
r = taal_power_on(dssdev);
if (r)
- return r;
+ goto err;
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ mutex_unlock(&td->lock);
+
+ return 0;
+err:
+ dev_dbg(&dssdev->dev, "enable failed\n");
+ mutex_unlock(&td->lock);
return r;
}
static void taal_disable(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
dev_dbg(&dssdev->dev, "disable\n");
+ mutex_lock(&td->lock);
+
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
taal_power_off(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&td->lock);
}
static int taal_suspend(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int r;
+
dev_dbg(&dssdev->dev, "suspend\n");
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return -EINVAL;
+ mutex_lock(&td->lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = -EINVAL;
+ goto err;
+ }
taal_power_off(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ mutex_unlock(&td->lock);
+
return 0;
+err:
+ mutex_unlock(&td->lock);
+ return r;
}
static int taal_resume(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+
dev_dbg(&dssdev->dev, "resume\n");
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
- return -EINVAL;
+ mutex_lock(&td->lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ r = -EINVAL;
+ goto err;
+ }
r = taal_power_on(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&td->lock);
+
+ return r;
+err:
+ mutex_unlock(&td->lock);
return r;
}
@@ -799,6 +845,7 @@ static int taal_update(struct omap_dss_device *dssdev,
dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
+ mutex_lock(&td->lock);
dsi_bus_lock();
if (!td->enabled) {
@@ -820,18 +867,24 @@ static int taal_update(struct omap_dss_device *dssdev,
goto err;
/* note: no bus_unlock here. unlock is in framedone_cb */
+ mutex_unlock(&td->lock);
return 0;
err:
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
static int taal_sync(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
dev_dbg(&dssdev->dev, "sync\n");
+ mutex_lock(&td->lock);
dsi_bus_lock();
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
dev_dbg(&dssdev->dev, "sync done\n");
@@ -861,13 +914,16 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+ mutex_lock(&td->lock);
dsi_bus_lock();
r = _taal_enable_te(dssdev, enable);
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
@@ -875,7 +931,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
static int taal_get_te(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- return td->te_enabled;
+ int r;
+
+ mutex_lock(&td->lock);
+ r = td->te_enabled;
+ mutex_unlock(&td->lock);
+
+ return r;
}
static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
@@ -885,6 +947,7 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
+ mutex_lock(&td->lock);
dsi_bus_lock();
if (td->enabled) {
@@ -896,16 +959,24 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
td->rotate = rotate;
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return 0;
err:
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
static u8 taal_get_rotate(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- return td->rotate;
+ int r;
+
+ mutex_lock(&td->lock);
+ r = td->rotate;
+ mutex_unlock(&td->lock);
+
+ return r;
}
static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
@@ -915,6 +986,7 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
dev_dbg(&dssdev->dev, "mirror %d\n", enable);
+ mutex_lock(&td->lock);
dsi_bus_lock();
if (td->enabled) {
r = taal_set_addr_mode(td->rotate, enable);
@@ -925,23 +997,33 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
td->mirror = enable;
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return 0;
err:
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
static bool taal_get_mirror(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- return td->mirror;
+ int r;
+
+ mutex_lock(&td->lock);
+ r = td->mirror;
+ mutex_unlock(&td->lock);
+
+ return r;
}
static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
u8 id1, id2, id3;
int r;
+ mutex_lock(&td->lock);
dsi_bus_lock();
r = taal_dcs_read_1(DCS_GET_ID1, &id1);
@@ -955,9 +1037,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
goto err;
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return 0;
err:
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
@@ -971,12 +1055,16 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
unsigned buf_used = 0;
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- if (!td->enabled)
- return -ENODEV;
-
if (size < w * h * 3)
return -ENOMEM;
+ mutex_lock(&td->lock);
+
+ if (!td->enabled) {
+ r = -ENODEV;
+ goto err1;
+ }
+
size = min(w * h * 3,
dssdev->panel.timings.x_res *
dssdev->panel.timings.y_res * 3);
@@ -995,7 +1083,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
r = dsi_vc_set_max_rx_packet_size(TCH, plen);
if (r)
- goto err0;
+ goto err2;
while (buf_used < size) {
u8 dcs_cmd = first ? 0x2e : 0x3e;
@@ -1006,7 +1094,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
if (r < 0) {
dev_err(&dssdev->dev, "read error\n");
- goto err;
+ goto err3;
}
buf_used += r;
@@ -1020,16 +1108,18 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
dev_err(&dssdev->dev, "signal pending, "
"aborting memory read\n");
r = -ERESTARTSYS;
- goto err;
+ goto err3;
}
}
r = buf_used;
-err:
+err3:
dsi_vc_set_max_rx_packet_size(TCH, 1);
-err0:
+err2:
dsi_bus_unlock();
+err1:
+ mutex_unlock(&td->lock);
return r;
}
@@ -1041,8 +1131,12 @@ static void taal_esd_work(struct work_struct *work)
u8 state1, state2;
int r;
- if (!td->enabled)
+ mutex_lock(&td->lock);
+
+ if (!td->enabled) {
+ mutex_unlock(&td->lock);
return;
+ }
dsi_bus_lock();
@@ -1084,16 +1178,19 @@ static void taal_esd_work(struct work_struct *work)
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+ mutex_unlock(&td->lock);
return;
err:
dev_err(&dssdev->dev, "performing LCD reset\n");
- taal_disable(dssdev);
- taal_enable(dssdev);
+ taal_power_off(dssdev);
+ taal_power_on(dssdev);
dsi_bus_unlock();
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+
+ mutex_unlock(&td->lock);
}
static int taal_set_update_mode(struct omap_dss_device *dssdev,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 87afb81b2c44..43b64403eaa4 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -36,6 +36,12 @@ config OMAP2_DSS_COLLECT_IRQ_STATS
<debugfs>/omapdss/dispc_irq for DISPC interrupts, and
<debugfs>/omapdss/dsi_irq for DSI interrupts.
+config OMAP2_DSS_DPI
+ bool "DPI support"
+ default y
+ help
+ DPI Interface. This is the Parallel Display Interface.
+
config OMAP2_DSS_RFBI
bool "RFBI support"
default n
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 980c72c2db98..d71b5d9d71b1 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o
+omapdss-y := core.o dss.o dispc.o display.o manager.o overlay.o
+omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 7ebe50b335ed..6d54467e5e20 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -526,11 +526,13 @@ static int omap_dss_probe(struct platform_device *pdev)
}
#endif
+#ifdef CONFIG_OMAP2_DSS_DPI
r = dpi_init(pdev);
if (r) {
DSSERR("Failed to initialize dpi\n");
goto fail0;
}
+#endif
r = dispc_init();
if (r) {
@@ -601,7 +603,9 @@ static int omap_dss_remove(struct platform_device *pdev)
venc_exit();
#endif
dispc_exit();
+#ifdef CONFIG_OMAP2_DSS_DPI
dpi_exit();
+#endif
#ifdef CONFIG_OMAP2_DSS_RFBI
rfbi_exit();
#endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 6a74ea116d29..ef8c8529dda2 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -392,7 +392,9 @@ void dss_init_device(struct platform_device *pdev,
int r;
switch (dssdev->type) {
+#ifdef CONFIG_OMAP2_DSS_DPI
case OMAP_DISPLAY_TYPE_DPI:
+#endif
#ifdef CONFIG_OMAP2_DSS_RFBI
case OMAP_DISPLAY_TYPE_DBI:
#endif
@@ -413,9 +415,11 @@ void dss_init_device(struct platform_device *pdev,
}
switch (dssdev->type) {
+#ifdef CONFIG_OMAP2_DSS_DPI
case OMAP_DISPLAY_TYPE_DPI:
r = dpi_init_display(dssdev);
break;
+#endif
#ifdef CONFIG_OMAP2_DSS_RFBI
case OMAP_DISPLAY_TYPE_DBI:
r = rfbi_init_display(dssdev);
@@ -541,7 +545,10 @@ int dss_resume_all_devices(void)
static int dss_disable_device(struct device *dev, void *data)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- dssdev->driver->disable(dssdev);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+ dssdev->driver->disable(dssdev);
+
return 0;
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 54344184dd73..24b18258654f 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -223,7 +223,13 @@ void dss_dump_clocks(struct seq_file *s)
seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
- seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
+ if (cpu_is_omap3630())
+ seq_printf(s, "dss1_alwon_fclk = %lu / %lu = %lu\n",
+ dpll4_ck_rate,
+ dpll4_ck_rate / dpll4_m4_ck_rate,
+ dss_clk_get_rate(DSS_CLK_FCK1));
+ else
+ seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
dpll4_ck_rate,
dpll4_ck_rate / dpll4_m4_ck_rate,
dss_clk_get_rate(DSS_CLK_FCK1));
@@ -293,7 +299,8 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
{
unsigned long prate;
- if (cinfo->fck_div > 16 || cinfo->fck_div == 0)
+ if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
+ cinfo->fck_div == 0)
return -EINVAL;
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -329,7 +336,10 @@ int dss_get_clock_div(struct dss_clock_info *cinfo)
if (cpu_is_omap34xx()) {
unsigned long prate;
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- cinfo->fck_div = prate / (cinfo->fck / 2);
+ if (cpu_is_omap3630())
+ cinfo->fck_div = prate / (cinfo->fck);
+ else
+ cinfo->fck_div = prate / (cinfo->fck / 2);
} else {
cinfo->fck_div = 0;
}
@@ -402,10 +412,14 @@ retry:
goto found;
} else if (cpu_is_omap34xx()) {
- for (fck_div = 16; fck_div > 0; --fck_div) {
+ for (fck_div = (cpu_is_omap3630() ? 32 : 16);
+ fck_div > 0; --fck_div) {
struct dispc_clock_info cur_dispc;
- fck = prate / fck_div * 2;
+ if (cpu_is_omap3630())
+ fck = prate / fck_div;
+ else
+ fck = prate / fck_div * 2;
if (fck > DISPC_MAX_FCK)
continue;
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 0820986d4a68..9e1fbe531bf0 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -843,6 +843,7 @@ static void configure_manager(enum omap_channel channel)
c = &dss_cache.manager_cache[channel];
+ dispc_set_default_color(channel, c->default_color);
dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
dispc_enable_trans_key(channel, c->trans_enabled);
dispc_enable_alpha_blending(channel, c->alpha_enabled);
@@ -940,6 +941,22 @@ static int configure_dispc(void)
return r;
}
+/* Make the coordinates even. There are some strange problems with OMAP and
+ * partial DSI update when the update widths are odd. */
+static void make_even(u16 *x, u16 *w)
+{
+ u16 x1, x2;
+
+ x1 = *x;
+ x2 = *x + *w;
+
+ x1 &= ~1;
+ x2 = ALIGN(x2, 2);
+
+ *x = x1;
+ *w = x2 - x1;
+}
+
/* Configure dispc for partial update. Return possibly modified update
* area */
void dss_setup_partial_planes(struct omap_dss_device *dssdev,
@@ -968,6 +985,8 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
return;
}
+ make_even(&x, &w);
+
spin_lock_irqsave(&dss_cache.lock, flags);
/* We need to show the whole overlay if it is scaled. So look for
@@ -1029,6 +1048,8 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
w = x2 - x1;
h = y2 - y1;
+ make_even(&x, &w);
+
DSSDBG("changing upd area due to ovl(%d) scaling %d,%d %dx%d\n",
i, x, y, w, h);
}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 12eb4042dd82..ee07a3cc22ef 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -23,13 +23,16 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/regulator/consumer.h>
#include <plat/display.h>
+#include <plat/cpu.h>
#include "dss.h"
static struct {
bool skip_init;
bool update_enabled;
+ struct regulator *vdds_sdi_reg;
} sdi;
static void sdi_basic_init(void)
@@ -57,6 +60,10 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
+ r = regulator_enable(sdi.vdds_sdi_reg);
+ if (r)
+ goto err1;
+
/* In case of skip_init sdi_init has already enabled the clocks */
if (!sdi.skip_init)
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -115,19 +122,12 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
dssdev->manager->enable(dssdev->manager);
- if (dssdev->driver->enable) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- goto err3;
- }
-
sdi.skip_init = 0;
return 0;
-err3:
- dssdev->manager->disable(dssdev->manager);
err2:
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ regulator_disable(sdi.vdds_sdi_reg);
err1:
omap_dss_stop_device(dssdev);
err0:
@@ -137,15 +137,14 @@ EXPORT_SYMBOL(omapdss_sdi_display_enable);
void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
{
- if (dssdev->driver->disable)
- dssdev->driver->disable(dssdev);
-
dssdev->manager->disable(dssdev->manager);
dss_sdi_disable();
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ regulator_disable(sdi.vdds_sdi_reg);
+
omap_dss_stop_device(dssdev);
}
EXPORT_SYMBOL(omapdss_sdi_display_disable);
@@ -162,6 +161,11 @@ int sdi_init(bool skip_init)
/* we store this for first display enable, then clear it */
sdi.skip_init = skip_init;
+ sdi.vdds_sdi_reg = dss_get_vdds_sdi();
+ if (IS_ERR(sdi.vdds_sdi_reg)) {
+ DSSERR("can't get VDDS_SDI regulator\n");
+ return PTR_ERR(sdi.vdds_sdi_reg);
+ }
/*
* Enable clocks already here, otherwise there would be a toggle
* of them until sdi_display_enable is called.
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index f0ba5732d84a..eff35050e28a 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -479,12 +479,6 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
goto err1;
}
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err2;
- }
-
venc_power_on(dssdev);
venc.wss_data = 0;
@@ -494,13 +488,9 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
/* wait couple of vsyncs until enabling the LCD */
msleep(50);
- mutex_unlock(&venc.venc_lock);
-
- return r;
-err2:
- venc_power_off(dssdev);
err1:
mutex_unlock(&venc.venc_lock);
+
return r;
}
@@ -524,9 +514,6 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
/* wait at least 5 vsyncs after disabling the LCD */
msleep(100);
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
end:
mutex_unlock(&venc.venc_lock);
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 1ffa760b8545..9c7361871d78 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -183,13 +183,14 @@ int omapfb_update_window(struct fb_info *fbi,
struct omapfb2_device *fbdev = ofbi->fbdev;
int r;
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
omapfb_lock(fbdev);
- lock_fb_info(fbi);
r = omapfb_update_window_nolock(fbi, x, y, w, h);
- unlock_fb_info(fbi);
omapfb_unlock(fbdev);
+ unlock_fb_info(fbi);
return r;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 62bb88f5c192..5179219128bd 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -57,7 +57,8 @@ static ssize_t store_rotate_type(struct device *dev,
if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
return -EINVAL;
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
r = 0;
if (rot_type == ofbi->rotation_type)
@@ -105,7 +106,8 @@ static ssize_t store_mirror(struct device *dev,
if (mirror != 0 && mirror != 1)
return -EINVAL;
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
ofbi->mirror = mirror;
@@ -137,8 +139,9 @@ static ssize_t show_overlays(struct device *dev,
ssize_t l = 0;
int t;
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
omapfb_lock(fbdev);
- lock_fb_info(fbi);
for (t = 0; t < ofbi->num_overlays; t++) {
struct omap_overlay *ovl = ofbi->overlays[t];
@@ -154,8 +157,8 @@ static ssize_t show_overlays(struct device *dev,
l += snprintf(buf + l, PAGE_SIZE - l, "\n");
- unlock_fb_info(fbi);
omapfb_unlock(fbdev);
+ unlock_fb_info(fbi);
return l;
}
@@ -195,8 +198,9 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
if (buf[len - 1] == '\n')
len = len - 1;
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
omapfb_lock(fbdev);
- lock_fb_info(fbi);
if (len > 0) {
char *p = (char *)buf;
@@ -303,8 +307,8 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
r = count;
out:
- unlock_fb_info(fbi);
omapfb_unlock(fbdev);
+ unlock_fb_info(fbi);
return r;
}
@@ -317,7 +321,8 @@ static ssize_t show_overlays_rotate(struct device *dev,
ssize_t l = 0;
int t;
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
for (t = 0; t < ofbi->num_overlays; t++) {
l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
@@ -345,7 +350,8 @@ static ssize_t store_overlays_rotate(struct device *dev,
if (buf[len - 1] == '\n')
len = len - 1;
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
if (len > 0) {
char *p = (char *)buf;
@@ -416,7 +422,8 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0));
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
for (i = 0; i < ofbi->num_overlays; i++) {
if (ofbi->overlays[i]->info.enabled) {
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 980548390048..3ee5e63cfa4f 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1571,8 +1571,8 @@ out_err_iobase:
if (default_par->mtrr_handle >= 0)
mtrr_del(default_par->mtrr_handle, info->fix.smem_start,
info->fix.smem_len);
- release_mem_region(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
+ release_region(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
out_err_screenbase:
if (info->screen_base)
iounmap(info->screen_base);
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 7b8839ebf3c4..52ec0959d462 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -1977,8 +1977,7 @@ static void __devexit uvesafb_exit(void)
module_exit(uvesafb_exit);
-#define param_get_scroll NULL
-static int param_set_scroll(const char *val, struct kernel_param *kp)
+static int param_set_scroll(const char *val, const struct kernel_param *kp)
{
ypan = 0;
@@ -1993,7 +1992,9 @@ static int param_set_scroll(const char *val, struct kernel_param *kp)
return 0;
}
-
+static struct kernel_param_ops param_ops_scroll = {
+ .set = param_set_scroll,
+};
#define param_check_scroll(name, p) __param_check(name, p, void)
module_param_named(scroll, ypan, scroll, 0);
diff --git a/drivers/video/via/Makefile b/drivers/video/via/Makefile
index eeed238ad6a2..d496adb0f832 100644
--- a/drivers/video/via/Makefile
+++ b/drivers/video/via/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_FB_VIA) += viafb.o
-viafb-y :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o via_utility.o vt1636.o global.o tblDPASetting.o viamode.o tbl1636.o
+viafb-y :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o \
+ via_utility.o vt1636.o global.o tblDPASetting.o viamode.o tbl1636.o \
+ via-core.o via-gpio.o via_modesetting.o
diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
index d5077dfa9e00..e44893ea590d 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -18,14 +18,45 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
+/*
+ * Figure out an appropriate bytes-per-pixel setting.
+ */
+static int viafb_set_bpp(void __iomem *engine, u8 bpp)
+{
+ u32 gemode;
+
+ /* Preserve the reserved bits */
+ /* Lowest 2 bits to zero gives us no rotation */
+ gemode = readl(engine + VIA_REG_GEMODE) & 0xfffffcfc;
+ switch (bpp) {
+ case 8:
+ gemode |= VIA_GEM_8bpp;
+ break;
+ case 16:
+ gemode |= VIA_GEM_16bpp;
+ break;
+ case 32:
+ gemode |= VIA_GEM_32bpp;
+ break;
+ default:
+ printk(KERN_WARNING "viafb_set_bpp: Unsupported bpp %d\n", bpp);
+ return -EINVAL;
+ }
+ writel(gemode, engine + VIA_REG_GEMODE);
+ return 0;
+}
+
+
static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
u8 dst_bpp, u32 dst_addr, u32 dst_pitch, u32 dst_x, u32 dst_y,
u32 *src_mem, u32 src_addr, u32 src_pitch, u32 src_x, u32 src_y,
u32 fg_color, u32 bg_color, u8 fill_rop)
{
u32 ge_cmd = 0, tmp, i;
+ int ret;
if (!op || op > 3) {
printk(KERN_WARNING "hw_bitblt_1: Invalid operation: %d\n", op);
@@ -59,22 +90,9 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
}
}
- switch (dst_bpp) {
- case 8:
- tmp = 0x00000000;
- break;
- case 16:
- tmp = 0x00000100;
- break;
- case 32:
- tmp = 0x00000300;
- break;
- default:
- printk(KERN_WARNING "hw_bitblt_1: Unsupported bpp %d\n",
- dst_bpp);
- return -EINVAL;
- }
- writel(tmp, engine + 0x04);
+ ret = viafb_set_bpp(engine, dst_bpp);
+ if (ret)
+ return ret;
if (op != VIA_BITBLT_FILL) {
if (src_x & (op == VIA_BITBLT_MONO ? 0xFFFF8000 : 0xFFFFF000)
@@ -171,6 +189,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
u32 fg_color, u32 bg_color, u8 fill_rop)
{
u32 ge_cmd = 0, tmp, i;
+ int ret;
if (!op || op > 3) {
printk(KERN_WARNING "hw_bitblt_2: Invalid operation: %d\n", op);
@@ -204,22 +223,9 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
}
}
- switch (dst_bpp) {
- case 8:
- tmp = 0x00000000;
- break;
- case 16:
- tmp = 0x00000100;
- break;
- case 32:
- tmp = 0x00000300;
- break;
- default:
- printk(KERN_WARNING "hw_bitblt_2: Unsupported bpp %d\n",
- dst_bpp);
- return -EINVAL;
- }
- writel(tmp, engine + 0x04);
+ ret = viafb_set_bpp(engine, dst_bpp);
+ if (ret)
+ return ret;
if (op == VIA_BITBLT_FILL)
tmp = 0;
@@ -312,17 +318,29 @@ int viafb_init_engine(struct fb_info *info)
{
struct viafb_par *viapar = info->par;
void __iomem *engine;
+ int highest_reg, i;
u32 vq_start_addr, vq_end_addr, vq_start_low, vq_end_low, vq_high,
vq_len, chip_name = viapar->shared->chip_info.gfx_chip_name;
- engine = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
- viapar->shared->engine_mmio = engine;
+ engine = viapar->shared->vdev->engine_mmio;
if (!engine) {
printk(KERN_WARNING "viafb_init_accel: ioremap failed, "
"hardware acceleration disabled\n");
return -ENOMEM;
}
+ /* Initialize registers to reset the 2D engine */
+ switch (viapar->shared->chip_info.twod_engine) {
+ case VIA_2D_ENG_M1:
+ highest_reg = 0x5c;
+ break;
+ default:
+ highest_reg = 0x40;
+ break;
+ }
+ for (i = 0; i <= highest_reg; i += 4)
+ writel(0x0, engine + i);
+
switch (chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
@@ -352,13 +370,28 @@ int viafb_init_engine(struct fb_info *info)
viapar->shared->vq_vram_addr = viapar->fbmem_free;
viapar->fbmem_used += VQ_SIZE;
- /* Init 2D engine reg to reset 2D engine */
- writel(0x0, engine + VIA_REG_KEYCONTROL);
+#if defined(CONFIG_FB_VIA_CAMERA) || defined(CONFIG_FB_VIA_CAMERA_MODULE)
+ /*
+ * Set aside a chunk of framebuffer memory for the camera
+ * driver. Someday this driver probably needs a proper allocator
+ * for fbmem; for now, we just have to do this before the
+ * framebuffer initializes itself.
+ *
+ * As for the size: the engine can handle three frames,
+ * 16 bits deep, up to VGA resolution.
+ */
+ viapar->shared->vdev->camera_fbmem_size = 3*VGA_HEIGHT*VGA_WIDTH*2;
+ viapar->fbmem_free -= viapar->shared->vdev->camera_fbmem_size;
+ viapar->fbmem_used += viapar->shared->vdev->camera_fbmem_size;
+ viapar->shared->vdev->camera_fbmem_offset = viapar->fbmem_free;
+#endif
/* Init AGP and VQ regs */
switch (chip_name) {
case UNICHROME_K8M890:
case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
writel(0x00100000, engine + VIA_REG_CR_TRANSET);
writel(0x680A0000, engine + VIA_REG_CR_TRANSPACE);
writel(0x02000000, engine + VIA_REG_CR_TRANSPACE);
@@ -393,6 +426,8 @@ int viafb_init_engine(struct fb_info *info)
switch (chip_name) {
case UNICHROME_K8M890:
case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
vq_start_low |= 0x20000000;
vq_end_low |= 0x20000000;
vq_high |= 0x20000000;
@@ -446,7 +481,7 @@ void viafb_show_hw_cursor(struct fb_info *info, int Status)
struct viafb_par *viapar = info->par;
u32 temp, iga_path = viapar->iga_path;
- temp = readl(viapar->shared->engine_mmio + VIA_REG_CURSOR_MODE);
+ temp = readl(viapar->shared->vdev->engine_mmio + VIA_REG_CURSOR_MODE);
switch (Status) {
case HW_Cursor_ON:
temp |= 0x1;
@@ -463,23 +498,33 @@ void viafb_show_hw_cursor(struct fb_info *info, int Status)
default:
temp &= 0x7FFFFFFF;
}
- writel(temp, viapar->shared->engine_mmio + VIA_REG_CURSOR_MODE);
+ writel(temp, viapar->shared->vdev->engine_mmio + VIA_REG_CURSOR_MODE);
}
void viafb_wait_engine_idle(struct fb_info *info)
{
struct viafb_par *viapar = info->par;
int loop = 0;
-
- while (!(readl(viapar->shared->engine_mmio + VIA_REG_STATUS) &
- VIA_VR_QUEUE_BUSY) && (loop < MAXLOOP)) {
- loop++;
- cpu_relax();
+ u32 mask;
+ void __iomem *engine = viapar->shared->vdev->engine_mmio;
+
+ switch (viapar->shared->chip_info.twod_engine) {
+ case VIA_2D_ENG_H5:
+ case VIA_2D_ENG_M1:
+ mask = VIA_CMD_RGTR_BUSY_M1 | VIA_2D_ENG_BUSY_M1 |
+ VIA_3D_ENG_BUSY_M1;
+ break;
+ default:
+ while (!(readl(engine + VIA_REG_STATUS) &
+ VIA_VR_QUEUE_BUSY) && (loop < MAXLOOP)) {
+ loop++;
+ cpu_relax();
+ }
+ mask = VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY;
+ break;
}
- while ((readl(viapar->shared->engine_mmio + VIA_REG_STATUS) &
- (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)) &&
- (loop < MAXLOOP)) {
+ while ((readl(engine + VIA_REG_STATUS) & mask) && (loop < MAXLOOP)) {
loop++;
cpu_relax();
}
diff --git a/drivers/video/via/accel.h b/drivers/video/via/accel.h
index 615c84ad0f01..2c122d292365 100644
--- a/drivers/video/via/accel.h
+++ b/drivers/video/via/accel.h
@@ -67,6 +67,34 @@
/* from 0x100 to 0x1ff */
#define VIA_REG_COLORPAT 0x100
+/* defines for VIA 2D registers for vt3353/3409 (M1 engine)*/
+#define VIA_REG_GECMD_M1 0x000
+#define VIA_REG_GEMODE_M1 0x004
+#define VIA_REG_GESTATUS_M1 0x004 /* as same as VIA_REG_GEMODE */
+#define VIA_REG_PITCH_M1 0x008 /* pitch of src and dst */
+#define VIA_REG_DIMENSION_M1 0x00C /* width and height */
+#define VIA_REG_DSTPOS_M1 0x010
+#define VIA_REG_LINE_XY_M1 0x010
+#define VIA_REG_DSTBASE_M1 0x014
+#define VIA_REG_SRCPOS_M1 0x018
+#define VIA_REG_LINE_K1K2_M1 0x018
+#define VIA_REG_SRCBASE_M1 0x01C
+#define VIA_REG_PATADDR_M1 0x020
+#define VIA_REG_MONOPAT0_M1 0x024
+#define VIA_REG_MONOPAT1_M1 0x028
+#define VIA_REG_OFFSET_M1 0x02C
+#define VIA_REG_LINE_ERROR_M1 0x02C
+#define VIA_REG_CLIPTL_M1 0x040 /* top and left of clipping */
+#define VIA_REG_CLIPBR_M1 0x044 /* bottom and right of clipping */
+#define VIA_REG_KEYCONTROL_M1 0x048 /* color key control */
+#define VIA_REG_FGCOLOR_M1 0x04C
+#define VIA_REG_DSTCOLORKEY_M1 0x04C /* as same as VIA_REG_FG */
+#define VIA_REG_BGCOLOR_M1 0x050
+#define VIA_REG_SRCCOLORKEY_M1 0x050 /* as same as VIA_REG_BG */
+#define VIA_REG_MONOPATFGC_M1 0x058 /* Add BG color of Pattern. */
+#define VIA_REG_MONOPATBGC_M1 0x05C /* Add FG color of Pattern. */
+#define VIA_REG_COLORPAT_M1 0x100 /* from 0x100 to 0x1ff */
+
/* VIA_REG_PITCH(0x38): Pitch Setting */
#define VIA_PITCH_ENABLE 0x80000000
@@ -157,6 +185,18 @@
/* Virtual Queue is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000
+/* VIA_REG_STATUS(0x400): Engine Status for H5 */
+#define VIA_CMD_RGTR_BUSY_H5 0x00000010 /* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY_H5 0x00000002 /* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY_H5 0x00001FE1 /* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY_H5 0x00000004 /* Virtual Queue is busy */
+
+/* VIA_REG_STATUS(0x400): Engine Status for VT3353/3409 */
+#define VIA_CMD_RGTR_BUSY_M1 0x00000010 /* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY_M1 0x00000002 /* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY_M1 0x00001FE1 /* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY_M1 0x00000004 /* Virtual Queue is busy */
+
#define MAXLOOP 0xFFFFFF
#define VIA_BITBLT_COLOR 1
diff --git a/drivers/video/via/chip.h b/drivers/video/via/chip.h
index 8c06bd3c0b4d..d9b6e06e0700 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/via/chip.h
@@ -121,9 +121,17 @@ struct lvds_chip_information {
int i2c_port;
};
+/* The type of 2D engine */
+enum via_2d_engine {
+ VIA_2D_ENG_H2,
+ VIA_2D_ENG_H5,
+ VIA_2D_ENG_M1,
+};
+
struct chip_information {
int gfx_chip_name;
int gfx_chip_revision;
+ enum via_2d_engine twod_engine;
struct tmds_chip_information tmds_chip_info;
struct lvds_chip_information lvds_chip_info;
struct lvds_chip_information lvds_chip_info2;
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index abe59b8c7a05..39b040bb3817 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -18,6 +18,8 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
static void tmds_register_write(int index, u8 data);
@@ -96,7 +98,7 @@ int viafb_tmds_trasmitter_identify(void)
viaparinfo->chip_info->tmds_chip_info.tmds_chip_name = VT1632_TMDS;
viaparinfo->chip_info->
tmds_chip_info.tmds_chip_slave_addr = VT1632_TMDS_I2C_ADDR;
- viaparinfo->chip_info->tmds_chip_info.i2c_port = I2CPORTINDEX;
+ viaparinfo->chip_info->tmds_chip_info.i2c_port = VIA_PORT_31;
if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID) != FAIL) {
/*
* Currently only support 12bits,dual edge,add 24bits mode later
@@ -110,7 +112,7 @@ int viafb_tmds_trasmitter_identify(void)
viaparinfo->chip_info->tmds_chip_info.i2c_port);
return OK;
} else {
- viaparinfo->chip_info->tmds_chip_info.i2c_port = GPIOPORTINDEX;
+ viaparinfo->chip_info->tmds_chip_info.i2c_port = VIA_PORT_2C;
if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID)
!= FAIL) {
tmds_register_write(0x08, 0x3b);
@@ -160,32 +162,26 @@ int viafb_tmds_trasmitter_identify(void)
static void tmds_register_write(int index, u8 data)
{
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
-
- viafb_i2c_writebyte(viaparinfo->chip_info->tmds_chip_info.
- tmds_chip_slave_addr, index,
- data);
+ viafb_i2c_writebyte(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ index, data);
}
static int tmds_register_read(int index)
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->
- tmds_chip_info.tmds_chip_slave_addr,
- (u8) index, &data);
+ viafb_i2c_readbyte(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ (u8) viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ (u8) index, &data);
return data;
}
static int tmds_register_read_bytes(int index, u8 *buff, int buff_len)
{
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
- viafb_i2c_readbytes((u8) viaparinfo->chip_info->tmds_chip_info.
- tmds_chip_slave_addr, (u8) index, buff, buff_len);
+ viafb_i2c_readbytes(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ (u8) viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ (u8) index, buff, buff_len);
return 0;
}
@@ -541,9 +537,10 @@ void viafb_dvi_enable(void)
else
data = 0x37;
viafb_i2c_writebyte(viaparinfo->chip_info->
- tmds_chip_info.
- tmds_chip_slave_addr,
- 0x08, data);
+ tmds_chip_info.i2c_port,
+ viaparinfo->chip_info->
+ tmds_chip_info.tmds_chip_slave_addr,
+ 0x08, data);
}
}
}
diff --git a/drivers/video/via/global.h b/drivers/video/via/global.h
index 8d95d5fd1388..28221a062dda 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/via/global.h
@@ -41,7 +41,6 @@
#include "share.h"
#include "dvi.h"
#include "viamode.h"
-#include "via_i2c.h"
#include "hw.h"
#include "lcd.h"
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index f2583b1b527f..b996803ae2c1 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
static struct pll_map pll_value[] = {
@@ -62,6 +63,7 @@ static struct pll_map pll_value[] = {
CX700_52_977M, VX855_52_977M},
{CLK_56_250M, CLE266_PLL_56_250M, K800_PLL_56_250M,
CX700_56_250M, VX855_56_250M},
+ {CLK_57_275M, 0, 0, 0, VX855_57_275M},
{CLK_60_466M, CLE266_PLL_60_466M, K800_PLL_60_466M,
CX700_60_466M, VX855_60_466M},
{CLK_61_500M, CLE266_PLL_61_500M, K800_PLL_61_500M,
@@ -525,8 +527,7 @@ static void dvi_patch_skew_dvp_low(void);
static void set_dvi_output_path(int set_iga, int output_interface);
static void set_lcd_output_path(int set_iga, int output_interface);
static void load_fix_bit_crtc_reg(void);
-static void init_gfx_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi);
+static void init_gfx_chip_info(int chip_type);
static void init_tmds_chip_info(void);
static void init_lvds_chip_info(void);
static void device_screen_off(void);
@@ -537,18 +538,6 @@ static void device_on(void);
static void enable_second_display_channel(void);
static void disable_second_display_channel(void);
-void viafb_write_reg(u8 index, u16 io_port, u8 data)
-{
- outb(index, io_port);
- outb(data, io_port + 1);
- /*DEBUG_MSG(KERN_INFO "\nIndex=%2d Value=%2d", index, data); */
-}
-u8 viafb_read_reg(int io_port, u8 index)
-{
- outb(index, io_port);
- return inb(io_port + 1);
-}
-
void viafb_lock_crt(void)
{
viafb_write_reg_mask(CR11, VIACR, BIT7, BIT7);
@@ -560,16 +549,6 @@ void viafb_unlock_crt(void)
viafb_write_reg_mask(CR47, VIACR, 0, BIT0);
}
-void viafb_write_reg_mask(u8 index, int io_port, u8 data, u8 mask)
-{
- u8 tmp;
-
- outb(index, io_port);
- tmp = inb(io_port + 1);
- outb((data & mask) | (tmp & (~mask)), io_port + 1);
- /*DEBUG_MSG(KERN_INFO "\nIndex=%2d Value=%2d", index, tmp); */
-}
-
void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
{
outb(index, LUT_INDEX_WRITE);
@@ -646,102 +625,6 @@ void viafb_set_iga_path(void)
}
}
-void viafb_set_primary_address(u32 addr)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_address(0x%08X)\n", addr);
- viafb_write_reg(CR0D, VIACR, addr & 0xFF);
- viafb_write_reg(CR0C, VIACR, (addr >> 8) & 0xFF);
- viafb_write_reg(CR34, VIACR, (addr >> 16) & 0xFF);
- viafb_write_reg_mask(CR48, VIACR, (addr >> 24) & 0x1F, 0x1F);
-}
-
-void viafb_set_secondary_address(u32 addr)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_address(0x%08X)\n", addr);
- /* secondary display supports only quadword aligned memory */
- viafb_write_reg_mask(CR62, VIACR, (addr >> 2) & 0xFE, 0xFE);
- viafb_write_reg(CR63, VIACR, (addr >> 10) & 0xFF);
- viafb_write_reg(CR64, VIACR, (addr >> 18) & 0xFF);
- viafb_write_reg_mask(CRA3, VIACR, (addr >> 26) & 0x07, 0x07);
-}
-
-void viafb_set_primary_pitch(u32 pitch)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_pitch(0x%08X)\n", pitch);
- /* spec does not say that first adapter skips 3 bits but old
- * code did it and seems to be reasonable in analogy to 2nd adapter
- */
- pitch = pitch >> 3;
- viafb_write_reg(0x13, VIACR, pitch & 0xFF);
- viafb_write_reg_mask(0x35, VIACR, (pitch >> (8 - 5)) & 0xE0, 0xE0);
-}
-
-void viafb_set_secondary_pitch(u32 pitch)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_pitch(0x%08X)\n", pitch);
- pitch = pitch >> 3;
- viafb_write_reg(0x66, VIACR, pitch & 0xFF);
- viafb_write_reg_mask(0x67, VIACR, (pitch >> 8) & 0x03, 0x03);
- viafb_write_reg_mask(0x71, VIACR, (pitch >> (10 - 7)) & 0x80, 0x80);
-}
-
-void viafb_set_primary_color_depth(u8 depth)
-{
- u8 value;
-
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_color_depth(%d)\n", depth);
- switch (depth) {
- case 8:
- value = 0x00;
- break;
- case 15:
- value = 0x04;
- break;
- case 16:
- value = 0x14;
- break;
- case 24:
- value = 0x0C;
- break;
- case 30:
- value = 0x08;
- break;
- default:
- printk(KERN_WARNING "viafb_set_primary_color_depth: "
- "Unsupported depth: %d\n", depth);
- return;
- }
-
- viafb_write_reg_mask(0x15, VIASR, value, 0x1C);
-}
-
-void viafb_set_secondary_color_depth(u8 depth)
-{
- u8 value;
-
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_color_depth(%d)\n", depth);
- switch (depth) {
- case 8:
- value = 0x00;
- break;
- case 16:
- value = 0x40;
- break;
- case 24:
- value = 0xC0;
- break;
- case 30:
- value = 0x80;
- break;
- default:
- printk(KERN_WARNING "viafb_set_secondary_color_depth: "
- "Unsupported depth: %d\n", depth);
- return;
- }
-
- viafb_write_reg_mask(0x67, VIACR, value, 0xC0);
-}
-
static void set_color_register(u8 index, u8 red, u8 green, u8 blue)
{
outb(0xFF, 0x3C6); /* bit mask of palette */
@@ -1126,16 +1009,12 @@ void viafb_load_reg(int timing_value, int viafb_load_reg_num,
void viafb_write_regx(struct io_reg RegTable[], int ItemNum)
{
int i;
- unsigned char RegTemp;
/*DEBUG_MSG(KERN_INFO "Table Size : %x!!\n",ItemNum ); */
- for (i = 0; i < ItemNum; i++) {
- outb(RegTable[i].index, RegTable[i].port);
- RegTemp = inb(RegTable[i].port + 1);
- RegTemp = (RegTemp & (~RegTable[i].mask)) | RegTable[i].value;
- outb(RegTemp, RegTable[i].port + 1);
- }
+ for (i = 0; i < ItemNum; i++)
+ via_write_reg_mask(RegTable[i].port, RegTable[i].index,
+ RegTable[i].value, RegTable[i].mask);
}
void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga)
@@ -1516,8 +1395,6 @@ u32 viafb_get_clk_value(int clk)
/* Set VCLK*/
void viafb_set_vclock(u32 CLK, int set_iga)
{
- unsigned char RegTemp;
-
/* H.W. Reset : ON */
viafb_write_reg_mask(CR17, VIACR, 0x00, BIT7);
@@ -1590,8 +1467,7 @@ void viafb_set_vclock(u32 CLK, int set_iga)
}
/* Fire! */
- RegTemp = inb(VIARMisc);
- outb(RegTemp | (BIT2 + BIT3), VIAWMisc);
+ via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
}
void viafb_load_crtc_timing(struct display_timing device_timing,
@@ -1835,6 +1711,7 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
int index = 0;
int h_addr, v_addr;
u32 pll_D_N;
+ u8 polarity = 0;
for (i = 0; i < video_mode->mode_array; i++) {
index = i;
@@ -1863,20 +1740,11 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
v_addr = crt_reg.ver_addr;
/* update polarity for CRT timing */
- if (crt_table[index].h_sync_polarity == NEGATIVE) {
- if (crt_table[index].v_sync_polarity == NEGATIVE)
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) |
- (BIT6 + BIT7), VIAWMisc);
- else
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) | (BIT6),
- VIAWMisc);
- } else {
- if (crt_table[index].v_sync_polarity == NEGATIVE)
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) | (BIT7),
- VIAWMisc);
- else
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))), VIAWMisc);
- }
+ if (crt_table[index].h_sync_polarity == NEGATIVE)
+ polarity |= BIT6;
+ if (crt_table[index].v_sync_polarity == NEGATIVE)
+ polarity |= BIT7;
+ via_write_misc_reg_mask(polarity, BIT6 | BIT7);
if (set_iga == IGA1) {
viafb_unlock_crt();
@@ -1910,10 +1778,9 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
}
-void viafb_init_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi)
+void viafb_init_chip_info(int chip_type)
{
- init_gfx_chip_info(pdev, pdi);
+ init_gfx_chip_info(chip_type);
init_tmds_chip_info();
init_lvds_chip_info();
@@ -1980,12 +1847,11 @@ void viafb_update_device_setting(int hres, int vres,
}
}
-static void init_gfx_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi)
+static void init_gfx_chip_info(int chip_type)
{
u8 tmp;
- viaparinfo->chip_info->gfx_chip_name = pdi->driver_data;
+ viaparinfo->chip_info->gfx_chip_name = chip_type;
/* Check revision of CLE266 Chip */
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
@@ -2016,6 +1882,21 @@ static void init_gfx_chip_info(struct pci_dev *pdev,
CX700_REVISION_700;
}
}
+
+ /* Determine which 2D engine we have */
+ switch (viaparinfo->chip_info->gfx_chip_name) {
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_M1;
+ break;
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M900:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_H5;
+ break;
+ default:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_H2;
+ break;
+ }
}
static void init_tmds_chip_info(void)
@@ -2232,13 +2113,11 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
/* Fill VPIT Parameters */
/* Write Misc Register */
- outb(VPIT.Misc, VIAWMisc);
+ outb(VPIT.Misc, VIA_MISC_REG_WRITE);
/* Write Sequencer */
- for (i = 1; i <= StdSR; i++) {
- outb(i, VIASR);
- outb(VPIT.SR[i - 1], VIASR + 1);
- }
+ for (i = 1; i <= StdSR; i++)
+ via_write_reg(VIASR, i, VPIT.SR[i - 1]);
viafb_write_reg_mask(0x15, VIASR, 0xA2, 0xA2);
viafb_set_iga_path();
@@ -2247,10 +2126,8 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
viafb_fill_crtc_timing(crt_timing, vmode_tbl, video_bpp / 8, IGA1);
/* Write Graphic Controller */
- for (i = 0; i < StdGR; i++) {
- outb(i, VIAGR);
- outb(VPIT.GR[i], VIAGR + 1);
- }
+ for (i = 0; i < StdGR; i++)
+ via_write_reg(VIAGR, i, VPIT.GR[i]);
/* Write Attribute Controller */
for (i = 0; i < StdAR; i++) {
@@ -2277,11 +2154,11 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
}
}
- viafb_set_primary_pitch(viafbinfo->fix.line_length);
- viafb_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
+ via_set_primary_pitch(viafbinfo->fix.line_length);
+ via_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
: viafbinfo->fix.line_length);
- viafb_set_primary_color_depth(viaparinfo->depth);
- viafb_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth
+ via_set_primary_color_depth(viaparinfo->depth);
+ via_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth
: viaparinfo->depth);
/* Update Refresh Rate Setting */
@@ -2473,108 +2350,6 @@ static void disable_second_display_channel(void)
viafb_write_reg_mask(CR6A, VIACR, BIT6, BIT6);
}
-int viafb_get_fb_size_from_pci(void)
-{
- unsigned long configid, deviceid, FBSize = 0;
- int VideoMemSize;
- int DeviceFound = false;
-
- for (configid = 0x80000000; configid < 0x80010800; configid += 0x100) {
- outl(configid, (unsigned long)0xCF8);
- deviceid = (inl((unsigned long)0xCFC) >> 16) & 0xffff;
-
- switch (deviceid) {
- case CLE266:
- case KM400:
- outl(configid + 0xE0, (unsigned long)0xCF8);
- FBSize = inl((unsigned long)0xCFC);
- DeviceFound = true; /* Found device id */
- break;
-
- case CN400_FUNCTION3:
- case CN700_FUNCTION3:
- case CX700_FUNCTION3:
- case KM800_FUNCTION3:
- case KM890_FUNCTION3:
- case P4M890_FUNCTION3:
- case P4M900_FUNCTION3:
- case VX800_FUNCTION3:
- case VX855_FUNCTION3:
- /*case CN750_FUNCTION3: */
- outl(configid + 0xA0, (unsigned long)0xCF8);
- FBSize = inl((unsigned long)0xCFC);
- DeviceFound = true; /* Found device id */
- break;
-
- default:
- break;
- }
-
- if (DeviceFound)
- break;
- }
-
- DEBUG_MSG(KERN_INFO "Device ID = %lx\n", deviceid);
-
- FBSize = FBSize & 0x00007000;
- DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
-
- if (viaparinfo->chip_info->gfx_chip_name < UNICHROME_CX700) {
- switch (FBSize) {
- case 0x00004000:
- VideoMemSize = (16 << 20); /*16M */
- break;
-
- case 0x00005000:
- VideoMemSize = (32 << 20); /*32M */
- break;
-
- case 0x00006000:
- VideoMemSize = (64 << 20); /*64M */
- break;
-
- default:
- VideoMemSize = (32 << 20); /*32M */
- break;
- }
- } else {
- switch (FBSize) {
- case 0x00001000:
- VideoMemSize = (8 << 20); /*8M */
- break;
-
- case 0x00002000:
- VideoMemSize = (16 << 20); /*16M */
- break;
-
- case 0x00003000:
- VideoMemSize = (32 << 20); /*32M */
- break;
-
- case 0x00004000:
- VideoMemSize = (64 << 20); /*64M */
- break;
-
- case 0x00005000:
- VideoMemSize = (128 << 20); /*128M */
- break;
-
- case 0x00006000:
- VideoMemSize = (256 << 20); /*256M */
- break;
-
- case 0x00007000: /* Only on VX855/875 */
- VideoMemSize = (512 << 20); /*512M */
- break;
-
- default:
- VideoMemSize = (32 << 20); /*32M */
- break;
- }
- }
-
- return VideoMemSize;
-}
void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
*p_gfx_dpa_setting)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index 12ef32d334cb..a109de379816 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -24,6 +24,11 @@
#include "viamode.h"
#include "global.h"
+#include "via_modesetting.h"
+
+#define viafb_read_reg(p, i) via_read_reg(p, i)
+#define viafb_write_reg(i, p, d) via_write_reg(p, i, d)
+#define viafb_write_reg_mask(i, p, d, m) via_write_reg_mask(p, i, d, m)
/***************************************************
* Definition IGA1 Design Method of CRTC Registers *
@@ -823,8 +828,8 @@ struct iga2_crtc_timing {
};
/* device ID */
-#define CLE266 0x3123
-#define KM400 0x3205
+#define CLE266_FUNCTION3 0x3123
+#define KM400_FUNCTION3 0x3205
#define CN400_FUNCTION2 0x2259
#define CN400_FUNCTION3 0x3259
/* support VT3314 chipset */
@@ -870,7 +875,6 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-void viafb_write_reg_mask(u8 index, int io_port, u8 data, u8 mask);
void viafb_set_output_path(int device, int set_iga,
int output_interface);
@@ -885,8 +889,6 @@ void viafb_crt_disable(void);
void viafb_crt_enable(void);
void init_ad9389(void);
/* Access I/O Function */
-void viafb_write_reg(u8 index, u16 io_port, u8 data);
-u8 viafb_read_reg(int io_port, u8 index);
void viafb_lock_crt(void);
void viafb_unlock_crt(void);
void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga);
@@ -900,20 +902,14 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
struct VideoModeTable *vmode_tbl1, int video_bpp1);
void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
struct VideoModeTable *vmode_tbl);
-void viafb_init_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi);
+void viafb_init_chip_info(int chip_type);
void viafb_init_dac(int set_iga);
int viafb_get_pixclock(int hres, int vres, int vmode_refresh);
int viafb_get_refresh(int hres, int vres, u32 float_refresh);
void viafb_update_device_setting(int hres, int vres, int bpp,
int vmode_refresh, int flag);
-int viafb_get_fb_size_from_pci(void);
void viafb_set_iga_path(void);
-void viafb_set_primary_address(u32 addr);
-void viafb_set_secondary_address(u32 addr);
-void viafb_set_primary_pitch(u32 pitch);
-void viafb_set_secondary_pitch(u32 pitch);
void viafb_set_primary_color_register(u8 index, u8 red, u8 green, u8 blue);
void viafb_set_secondary_color_register(u8 index, u8 red, u8 green, u8 blue);
void viafb_get_fb_info(unsigned int *fb_base, unsigned int *fb_len);
diff --git a/drivers/video/via/ioctl.h b/drivers/video/via/ioctl.h
index de899807eade..c430fa23008a 100644
--- a/drivers/video/via/ioctl.h
+++ b/drivers/video/via/ioctl.h
@@ -75,7 +75,7 @@
/*SAMM operation flag*/
#define OP_SAMM 0x80
-#define LCD_PANEL_ID_MAXIMUM 22
+#define LCD_PANEL_ID_MAXIMUM 23
#define STATE_ON 0x1
#define STATE_OFF 0x0
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 1b1ccdc2d83d..2ab0f156439a 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -18,7 +18,8 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
#include "lcdtbl.h"
@@ -172,18 +173,16 @@ static bool lvds_identify_integratedlvds(void)
int viafb_lvds_trasmitter_identify(void)
{
- viaparinfo->shared->i2c_stuff.i2c_port = I2CPORTINDEX;
- if (viafb_lvds_identify_vt1636()) {
- viaparinfo->chip_info->lvds_chip_info.i2c_port = I2CPORTINDEX;
+ if (viafb_lvds_identify_vt1636(VIA_PORT_31)) {
+ viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31;
DEBUG_MSG(KERN_INFO
- "Found VIA VT1636 LVDS on port i2c 0x31 \n");
+ "Found VIA VT1636 LVDS on port i2c 0x31\n");
} else {
- viaparinfo->shared->i2c_stuff.i2c_port = GPIOPORTINDEX;
- if (viafb_lvds_identify_vt1636()) {
+ if (viafb_lvds_identify_vt1636(VIA_PORT_2C)) {
viaparinfo->chip_info->lvds_chip_info.i2c_port =
- GPIOPORTINDEX;
+ VIA_PORT_2C;
DEBUG_MSG(KERN_INFO
- "Found VIA VT1636 LVDS on port gpio 0x2c \n");
+ "Found VIA VT1636 LVDS on port gpio 0x2c\n");
}
}
@@ -398,6 +397,15 @@ static void fp_id_to_vindex(int panel_id)
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
+ case 0x17:
+ /* OLPC XO-1.5 panel */
+ viaparinfo->lvds_setting_info->lcd_panel_hres = 1200;
+ viaparinfo->lvds_setting_info->lcd_panel_vres = 900;
+ viaparinfo->lvds_setting_info->lcd_panel_id =
+ LCD_PANEL_IDD_1200X900;
+ viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
+ viaparinfo->lvds_setting_info->LCDDithering = 0;
+ break;
default:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
@@ -412,9 +420,8 @@ static int lvds_register_read(int index)
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port = GPIOPORTINDEX;
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->
- lvds_chip_info.lvds_chip_slave_addr,
+ viafb_i2c_readbyte(VIA_PORT_2C,
+ (u8) viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr,
(u8) index, &data);
return data;
}
diff --git a/drivers/video/via/lcd.h b/drivers/video/via/lcd.h
index 071f47cf5be1..9762ec62b495 100644
--- a/drivers/video/via/lcd.h
+++ b/drivers/video/via/lcd.h
@@ -60,6 +60,8 @@
#define LCD_PANEL_IDB_1360X768 0x0B
/* Resolution: 480x640, Channel: single, Dithering: Enable */
#define LCD_PANEL_IDC_480X640 0x0C
+/* Resolution: 1200x900, Channel: single, Dithering: Disable */
+#define LCD_PANEL_IDD_1200X900 0x0D
extern int viafb_LCD2_ON;
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index d55aaa7b912c..7f0de7f006ad 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -43,16 +43,9 @@
/* Video Memory Size */
#define VIDEO_MEMORY_SIZE_16M 0x1000000
-/* standard VGA IO port
-*/
-#define VIARMisc 0x3CC
-#define VIAWMisc 0x3C2
-#define VIAStatus 0x3DA
-#define VIACR 0x3D4
-#define VIASR 0x3C4
-#define VIAGR 0x3CE
-#define VIAAR 0x3C0
-
+/*
+ * Lengths of the VPIT structure arrays.
+ */
#define StdCR 0x19
#define StdSR 0x04
#define StdGR 0x09
@@ -570,6 +563,10 @@
#define M1200X720_R60_HSP NEGATIVE
#define M1200X720_R60_VSP POSITIVE
+/* 1200x900@60 Sync Polarity (DCON) */
+#define M1200X900_R60_HSP NEGATIVE
+#define M1200X900_R60_VSP NEGATIVE
+
/* 1280x600@60 Sync Polarity (GTF Mode) */
#define M1280x600_R60_HSP NEGATIVE
#define M1280x600_R60_VSP POSITIVE
@@ -651,6 +648,7 @@
#define CLK_52_406M 52406000
#define CLK_52_977M 52977000
#define CLK_56_250M 56250000
+#define CLK_57_275M 57275000
#define CLK_60_466M 60466000
#define CLK_61_500M 61500000
#define CLK_65_000M 65000000
@@ -939,6 +937,7 @@
#define VX855_52_406M 0x00580C03
#define VX855_52_977M 0x00940C05
#define VX855_56_250M 0x009D0C05
+#define VX855_57_275M 0x009D8C85 /* Used by XO panel */
#define VX855_60_466M 0x00A90C05
#define VX855_61_500M 0x00AC0C05
#define VX855_65_000M 0x006D0C03
@@ -1065,6 +1064,7 @@
#define RES_1600X1200_60HZ_PIXCLOCK 6172
#define RES_1600X1200_75HZ_PIXCLOCK 4938
#define RES_1280X720_60HZ_PIXCLOCK 13426
+#define RES_1200X900_60HZ_PIXCLOCK 17459
#define RES_1920X1080_60HZ_PIXCLOCK 5787
#define RES_1400X1050_60HZ_PIXCLOCK 8214
#define RES_1400X1050_75HZ_PIXCLOCK 6410
diff --git a/drivers/video/via/via-core.c b/drivers/video/via/via-core.c
new file mode 100644
index 000000000000..e8cfe8392110
--- /dev/null
+++ b/drivers/video/via/via-core.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
+ */
+
+/*
+ * Core code for the Via multifunction framebuffer device.
+ */
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
+#include <linux/via-gpio.h>
+#include "global.h"
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+/*
+ * The default port config.
+ */
+static struct via_port_cfg adap_configs[] = {
+ [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_OFF, VIASR, 0x26 },
+ [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
+ [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
+ [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
+ [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
+ { 0, 0, 0, 0 }
+};
+
+/*
+ * We currently only support one viafb device (will there ever be
+ * more than one?), so just declare it globally here.
+ */
+static struct viafb_dev global_dev;
+
+
+/*
+ * Basic register access; spinlock required.
+ */
+static inline void viafb_mmio_write(int reg, u32 v)
+{
+ iowrite32(v, global_dev.engine_mmio + reg);
+}
+
+static inline int viafb_mmio_read(int reg)
+{
+ return ioread32(global_dev.engine_mmio + reg);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Interrupt management. We have a single IRQ line for a lot of
+ * different functions, so we need to share it. The design here
+ * is that we don't want to reimplement the shared IRQ code here;
+ * we also want to avoid having contention for a single handler thread.
+ * So each subdev driver which needs interrupts just requests
+ * them directly from the kernel. We just have what's needed for
+ * overall access to the interrupt control register.
+ */
+
+/*
+ * Which interrupts are enabled now?
+ */
+static u32 viafb_enabled_ints;
+
+static void viafb_int_init(void)
+{
+ viafb_enabled_ints = 0;
+
+ viafb_mmio_write(VDE_INTERRUPT, 0);
+}
+
+/*
+ * Allow subdevs to ask for specific interrupts to be enabled. These
+ * functions must be called with reg_lock held
+ */
+void viafb_irq_enable(u32 mask)
+{
+ viafb_enabled_ints |= mask;
+ viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
+}
+EXPORT_SYMBOL_GPL(viafb_irq_enable);
+
+void viafb_irq_disable(u32 mask)
+{
+ viafb_enabled_ints &= ~mask;
+ if (viafb_enabled_ints == 0)
+ viafb_mmio_write(VDE_INTERRUPT, 0); /* Disable entirely */
+ else
+ viafb_mmio_write(VDE_INTERRUPT,
+ viafb_enabled_ints | VDE_I_ENABLE);
+}
+EXPORT_SYMBOL_GPL(viafb_irq_disable);
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Access to the DMA engine. This currently provides what the camera
+ * driver needs (i.e. outgoing only) but is easily expandable if need
+ * be.
+ */
+
+/*
+ * There are four DMA channels in the vx855. For now, we only
+ * use one of them, though. Most of the time, the DMA channel
+ * will be idle, so we keep the IRQ handler unregistered except
+ * when some subsystem has indicated an interest.
+ */
+static int viafb_dma_users;
+static DECLARE_COMPLETION(viafb_dma_completion);
+/*
+ * This mutex protects viafb_dma_users and our global interrupt
+ * registration state; it also serializes access to the DMA
+ * engine.
+ */
+static DEFINE_MUTEX(viafb_dma_lock);
+
+/*
+ * The VX855 DMA descriptor (used for s/g transfers) looks
+ * like this.
+ */
+struct viafb_vx855_dma_descr {
+ u32 addr_low; /* Low part of phys addr */
+ u32 addr_high; /* High 12 bits of addr */
+ u32 fb_offset; /* Offset into FB memory */
+ u32 seg_size; /* Size, 16-byte units */
+ u32 tile_mode; /* "tile mode" setting */
+ u32 next_desc_low; /* Next descriptor addr */
+ u32 next_desc_high;
+ u32 pad; /* Fill out to 64 bytes */
+};
+
+/*
+ * Flags added to the "next descriptor low" pointers
+ */
+#define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */
+#define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
+
+/*
+ * The completion IRQ handler.
+ */
+static irqreturn_t viafb_dma_irq(int irq, void *data)
+{
+ int csr;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&global_dev.reg_lock);
+ csr = viafb_mmio_read(VDMA_CSR0);
+ if (csr & VDMA_C_DONE) {
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
+ complete(&viafb_dma_completion);
+ ret = IRQ_HANDLED;
+ }
+ spin_unlock(&global_dev.reg_lock);
+ return ret;
+}
+
+/*
+ * Indicate a need for DMA functionality.
+ */
+int viafb_request_dma(void)
+{
+ int ret = 0;
+
+ /*
+ * Only VX855 is supported currently.
+ */
+ if (global_dev.chip_type != UNICHROME_VX855)
+ return -ENODEV;
+ /*
+ * Note the new user and set up our interrupt handler
+ * if need be.
+ */
+ mutex_lock(&viafb_dma_lock);
+ viafb_dma_users++;
+ if (viafb_dma_users == 1) {
+ ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
+ IRQF_SHARED, "via-dma", &viafb_dma_users);
+ if (ret)
+ viafb_dma_users--;
+ else
+ viafb_irq_enable(VDE_I_DMA0TDEN);
+ }
+ mutex_unlock(&viafb_dma_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(viafb_request_dma);
+
+void viafb_release_dma(void)
+{
+ mutex_lock(&viafb_dma_lock);
+ viafb_dma_users--;
+ if (viafb_dma_users == 0) {
+ viafb_irq_disable(VDE_I_DMA0TDEN);
+ free_irq(global_dev.pdev->irq, &viafb_dma_users);
+ }
+ mutex_unlock(&viafb_dma_lock);
+}
+EXPORT_SYMBOL_GPL(viafb_release_dma);
+
+
+#if 0
+/*
+ * Copy a single buffer from FB memory, synchronously. This code works
+ * but is not currently used.
+ */
+void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
+{
+ unsigned long flags;
+ int csr;
+
+ mutex_lock(&viafb_dma_lock);
+ init_completion(&viafb_dma_completion);
+ /*
+ * Program the controller.
+ */
+ spin_lock_irqsave(&global_dev.reg_lock, flags);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
+ /* Enable ints; must happen after CSR0 write! */
+ viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
+ viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
+ viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
+ /* Data sheet suggests DAR0 should be <<4, but it lies */
+ viafb_mmio_write(VDMA_DAR0, offset);
+ viafb_mmio_write(VDMA_DQWCR0, len >> 4);
+ viafb_mmio_write(VDMA_TMR0, 0);
+ viafb_mmio_write(VDMA_DPRL0, 0);
+ viafb_mmio_write(VDMA_DPRH0, 0);
+ viafb_mmio_write(VDMA_PMR0, 0);
+ csr = viafb_mmio_read(VDMA_CSR0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
+ spin_unlock_irqrestore(&global_dev.reg_lock, flags);
+ /*
+ * Now we just wait until the interrupt handler says
+ * we're done.
+ */
+ wait_for_completion_interruptible(&viafb_dma_completion);
+ viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
+ mutex_unlock(&viafb_dma_lock);
+}
+EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
+#endif
+
+/*
+ * Do a scatter/gather DMA copy from FB memory. You must have done
+ * a successful call to viafb_request_dma() first.
+ */
+int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
+{
+ struct viafb_vx855_dma_descr *descr;
+ void *descrpages;
+ dma_addr_t descr_handle;
+ unsigned long flags;
+ int i;
+ struct scatterlist *sgentry;
+ dma_addr_t nextdesc;
+
+ /*
+ * Get a place to put the descriptors.
+ */
+ descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
+ nsg*sizeof(struct viafb_vx855_dma_descr),
+ &descr_handle, GFP_KERNEL);
+ if (descrpages == NULL) {
+ dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&viafb_dma_lock);
+ /*
+ * Fill them in.
+ */
+ descr = descrpages;
+ nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
+ for_each_sg(sg, sgentry, nsg, i) {
+ dma_addr_t paddr = sg_dma_address(sgentry);
+ descr->addr_low = paddr & 0xfffffff0;
+ descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
+ descr->fb_offset = offset;
+ descr->seg_size = sg_dma_len(sgentry) >> 4;
+ descr->tile_mode = 0;
+ descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
+ descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
+ descr->pad = 0xffffffff; /* VIA driver does this */
+ offset += sg_dma_len(sgentry);
+ nextdesc += sizeof(struct viafb_vx855_dma_descr);
+ descr++;
+ }
+ descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
+ /*
+ * Program the engine.
+ */
+ spin_lock_irqsave(&global_dev.reg_lock, flags);
+ init_completion(&viafb_dma_completion);
+ viafb_mmio_write(VDMA_DQWCR0, 0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
+ viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
+ viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
+ viafb_mmio_write(VDMA_DPRH0,
+ (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
+ (void) viafb_mmio_read(VDMA_CSR0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
+ spin_unlock_irqrestore(&global_dev.reg_lock, flags);
+ /*
+ * Now we just wait until the interrupt handler says
+ * we're done. Except that, actually, we need to wait a little
+ * longer: the interrupts seem to jump the gun a little and we
+ * get corrupted frames sometimes.
+ */
+ wait_for_completion_timeout(&viafb_dma_completion, 1);
+ msleep(1);
+ if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
+ printk(KERN_ERR "VIA DMA timeout!\n");
+ /*
+ * Clean up and we're done.
+ */
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
+ viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
+ mutex_unlock(&viafb_dma_lock);
+ dma_free_coherent(&global_dev.pdev->dev,
+ nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
+ descr_handle);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
+
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Figure out how big our framebuffer memory is. Kind of ugly,
+ * but evidently we can't trust the information found in the
+ * fbdev configuration area.
+ */
+static u16 via_function3[] = {
+ CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
+ CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
+ P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3,
+};
+
+/* Get the BIOS-configured framebuffer size from PCI configuration space
+ * of function 3 in the respective chipset */
+static int viafb_get_fb_size_from_pci(int chip_type)
+{
+ int i;
+ u8 offset = 0;
+ u32 FBSize;
+ u32 VideoMemSize;
+
+ /* search for the "FUNCTION3" device in this chipset */
+ for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
+ NULL);
+ if (!pdev)
+ continue;
+
+ DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
+
+ switch (pdev->device) {
+ case CLE266_FUNCTION3:
+ case KM400_FUNCTION3:
+ offset = 0xE0;
+ break;
+ case CN400_FUNCTION3:
+ case CN700_FUNCTION3:
+ case CX700_FUNCTION3:
+ case KM800_FUNCTION3:
+ case KM890_FUNCTION3:
+ case P4M890_FUNCTION3:
+ case P4M900_FUNCTION3:
+ case VX800_FUNCTION3:
+ case VX855_FUNCTION3:
+ /*case CN750_FUNCTION3: */
+ offset = 0xA0;
+ break;
+ }
+
+ if (!offset)
+ break;
+
+ pci_read_config_dword(pdev, offset, &FBSize);
+ pci_dev_put(pdev);
+ }
+
+ if (!offset) {
+ printk(KERN_ERR "cannot determine framebuffer size\n");
+ return -EIO;
+ }
+
+ FBSize = FBSize & 0x00007000;
+ DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
+
+ if (chip_type < UNICHROME_CX700) {
+ switch (FBSize) {
+ case 0x00004000:
+ VideoMemSize = (16 << 20); /*16M */
+ break;
+
+ case 0x00005000:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+
+ case 0x00006000:
+ VideoMemSize = (64 << 20); /*64M */
+ break;
+
+ default:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+ }
+ } else {
+ switch (FBSize) {
+ case 0x00001000:
+ VideoMemSize = (8 << 20); /*8M */
+ break;
+
+ case 0x00002000:
+ VideoMemSize = (16 << 20); /*16M */
+ break;
+
+ case 0x00003000:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+
+ case 0x00004000:
+ VideoMemSize = (64 << 20); /*64M */
+ break;
+
+ case 0x00005000:
+ VideoMemSize = (128 << 20); /*128M */
+ break;
+
+ case 0x00006000:
+ VideoMemSize = (256 << 20); /*256M */
+ break;
+
+ case 0x00007000: /* Only on VX855/875 */
+ VideoMemSize = (512 << 20); /*512M */
+ break;
+
+ default:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+ }
+ }
+
+ return VideoMemSize;
+}
+
+
+/*
+ * Figure out and map our MMIO regions.
+ */
+static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
+{
+ int ret;
+ /*
+ * Hook up to the device registers. Note that we soldier
+ * on if it fails; the framebuffer can operate (without
+ * acceleration) without this region.
+ */
+ vdev->engine_start = pci_resource_start(vdev->pdev, 1);
+ vdev->engine_len = pci_resource_len(vdev->pdev, 1);
+ vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
+ vdev->engine_len);
+ if (vdev->engine_mmio == NULL)
+ dev_err(&vdev->pdev->dev,
+ "Unable to map engine MMIO; operation will be "
+ "slow and crippled.\n");
+ /*
+ * Map in framebuffer memory. For now, failure here is
+ * fatal. Unfortunately, in the absence of significant
+ * vmalloc space, failure here is also entirely plausible.
+ * Eventually we want to move away from mapping this
+ * entire region.
+ */
+ vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
+ ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
+ if (ret < 0)
+ goto out_unmap;
+ vdev->fbmem = ioremap_nocache(vdev->fbmem_start, vdev->fbmem_len);
+ if (vdev->fbmem == NULL) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ return 0;
+out_unmap:
+ iounmap(vdev->engine_mmio);
+ return ret;
+}
+
+static void __devexit via_pci_teardown_mmio(struct viafb_dev *vdev)
+{
+ iounmap(vdev->fbmem);
+ iounmap(vdev->engine_mmio);
+}
+
+/*
+ * Create our subsidiary devices.
+ */
+static struct viafb_subdev_info {
+ char *name;
+ struct platform_device *platdev;
+} viafb_subdevs[] = {
+ {
+ .name = "viafb-gpio",
+ },
+ {
+ .name = "viafb-i2c",
+ }
+};
+#define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
+
+static int __devinit via_create_subdev(struct viafb_dev *vdev,
+ struct viafb_subdev_info *info)
+{
+ int ret;
+
+ info->platdev = platform_device_alloc(info->name, -1);
+ if (!info->platdev) {
+ dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
+ info->name);
+ return -ENOMEM;
+ }
+ info->platdev->dev.parent = &vdev->pdev->dev;
+ info->platdev->dev.platform_data = vdev;
+ ret = platform_device_add(info->platdev);
+ if (ret) {
+ dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
+ info->name);
+ platform_device_put(info->platdev);
+ info->platdev = NULL;
+ }
+ return ret;
+}
+
+static int __devinit via_setup_subdevs(struct viafb_dev *vdev)
+{
+ int i;
+
+ /*
+ * Ignore return values. Even if some of the devices
+ * fail to be created, we'll still be able to use some
+ * of the rest.
+ */
+ for (i = 0; i < N_SUBDEVS; i++)
+ via_create_subdev(vdev, viafb_subdevs + i);
+ return 0;
+}
+
+static void __devexit via_teardown_subdevs(void)
+{
+ int i;
+
+ for (i = 0; i < N_SUBDEVS; i++)
+ if (viafb_subdevs[i].platdev) {
+ viafb_subdevs[i].platdev->dev.platform_data = NULL;
+ platform_device_unregister(viafb_subdevs[i].platdev);
+ }
+}
+
+
+static int __devinit via_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ /*
+ * Global device initialization.
+ */
+ memset(&global_dev, 0, sizeof(global_dev));
+ global_dev.pdev = pdev;
+ global_dev.chip_type = ent->driver_data;
+ global_dev.port_cfg = adap_configs;
+ spin_lock_init(&global_dev.reg_lock);
+ ret = via_pci_setup_mmio(&global_dev);
+ if (ret)
+ goto out_disable;
+ /*
+ * Set up interrupts and create our subdevices. Continue even if
+ * some things fail.
+ */
+ viafb_int_init();
+ via_setup_subdevs(&global_dev);
+ /*
+ * Set up the framebuffer device
+ */
+ ret = via_fb_pci_probe(&global_dev);
+ if (ret)
+ goto out_subdevs;
+ return 0;
+
+out_subdevs:
+ via_teardown_subdevs();
+ via_pci_teardown_mmio(&global_dev);
+out_disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit via_pci_remove(struct pci_dev *pdev)
+{
+ via_teardown_subdevs();
+ via_fb_pci_remove(pdev);
+ via_pci_teardown_mmio(&global_dev);
+ pci_disable_device(pdev);
+}
+
+
+static struct pci_device_id via_pci_table[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
+ .driver_data = UNICHROME_CLE266 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
+ .driver_data = UNICHROME_PM800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
+ .driver_data = UNICHROME_K400 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
+ .driver_data = UNICHROME_K800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
+ .driver_data = UNICHROME_CN700 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
+ .driver_data = UNICHROME_K8M890 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
+ .driver_data = UNICHROME_CX700 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
+ .driver_data = UNICHROME_P4M900 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
+ .driver_data = UNICHROME_CN750 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
+ .driver_data = UNICHROME_VX800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
+ .driver_data = UNICHROME_VX855 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, via_pci_table);
+
+static struct pci_driver via_driver = {
+ .name = "viafb",
+ .id_table = via_pci_table,
+ .probe = via_pci_probe,
+ .remove = __devexit_p(via_pci_remove),
+};
+
+static int __init via_core_init(void)
+{
+ int ret;
+
+ ret = viafb_init();
+ if (ret)
+ return ret;
+ viafb_i2c_init();
+ viafb_gpio_init();
+ return pci_register_driver(&via_driver);
+}
+
+static void __exit via_core_exit(void)
+{
+ pci_unregister_driver(&via_driver);
+ viafb_gpio_exit();
+ viafb_i2c_exit();
+ viafb_exit();
+}
+
+module_init(via_core_init);
+module_exit(via_core_exit);
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/via/via-gpio.c
new file mode 100644
index 000000000000..595516aea691
--- /dev/null
+++ b/drivers/video/via/via-gpio.c
@@ -0,0 +1,285 @@
+/*
+ * Support for viafb GPIO ports.
+ *
+ * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under version 2 of the GNU General Public License.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/via-core.h>
+#include <linux/via-gpio.h>
+
+/*
+ * The ports we know about. Note that the port-25 gpios are not
+ * mentioned in the datasheet.
+ */
+
+struct viafb_gpio {
+ char *vg_name; /* Data sheet name */
+ u16 vg_io_port;
+ u8 vg_port_index;
+ int vg_mask_shift;
+};
+
+static struct viafb_gpio viafb_all_gpios[] = {
+ {
+ .vg_name = "VGPIO0", /* Guess - not in datasheet */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x25,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO1",
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x25,
+ .vg_mask_shift = 0
+ },
+ {
+ .vg_name = "VGPIO2", /* aka DISPCLKI0 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x2c,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO3", /* aka DISPCLKO0 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x2c,
+ .vg_mask_shift = 0
+ },
+ {
+ .vg_name = "VGPIO4", /* DISPCLKI1 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x3d,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO5", /* DISPCLKO1 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x3d,
+ .vg_mask_shift = 0
+ },
+};
+
+#define VIAFB_NUM_GPIOS ARRAY_SIZE(viafb_all_gpios)
+
+/*
+ * This structure controls the active GPIOs, which may be a subset
+ * of those which are known.
+ */
+
+struct viafb_gpio_cfg {
+ struct gpio_chip gpio_chip;
+ struct viafb_dev *vdev;
+ struct viafb_gpio *active_gpios[VIAFB_NUM_GPIOS];
+ char *gpio_names[VIAFB_NUM_GPIOS];
+};
+
+/*
+ * GPIO access functions
+ */
+static void via_gpio_set(struct gpio_chip *chip, unsigned int nr,
+ int value)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ u8 reg;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ reg = via_read_reg(VIASR, gpio->vg_port_index);
+ reg |= 0x40 << gpio->vg_mask_shift; /* output enable */
+ if (value)
+ reg |= 0x10 << gpio->vg_mask_shift;
+ else
+ reg &= ~(0x10 << gpio->vg_mask_shift);
+ via_write_reg(VIASR, gpio->vg_port_index, reg);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+}
+
+static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr,
+ int value)
+{
+ via_gpio_set(chip, nr, value);
+ return 0;
+}
+
+/*
+ * Set the input direction. I'm not sure this is right; we should
+ * be able to do input without disabling output.
+ */
+static int via_gpio_dir_input(struct gpio_chip *chip, unsigned int nr)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0,
+ 0x40 << gpio->vg_mask_shift);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+ return 0;
+}
+
+static int via_gpio_get(struct gpio_chip *chip, unsigned int nr)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ u8 reg;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ reg = via_read_reg(VIASR, gpio->vg_port_index);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+ return reg & (0x04 << gpio->vg_mask_shift);
+}
+
+
+static struct viafb_gpio_cfg gpio_config = {
+ .gpio_chip = {
+ .label = "VIAFB onboard GPIO",
+ .owner = THIS_MODULE,
+ .direction_output = via_gpio_dir_out,
+ .set = via_gpio_set,
+ .direction_input = via_gpio_dir_input,
+ .get = via_gpio_get,
+ .base = -1,
+ .ngpio = 0,
+ .can_sleep = 0
+ }
+};
+
+/*
+ * Manage the software enable bit.
+ */
+static void viafb_gpio_enable(struct viafb_gpio *gpio)
+{
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0x02, 0x02);
+}
+
+static void viafb_gpio_disable(struct viafb_gpio *gpio)
+{
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0, 0x02);
+}
+
+/*
+ * Look up a specific gpio and return the number it was assigned.
+ */
+int viafb_gpio_lookup(const char *name)
+{
+ int i;
+
+ for (i = 0; i < gpio_config.gpio_chip.ngpio; i++)
+ if (!strcmp(name, gpio_config.active_gpios[i]->vg_name))
+ return gpio_config.gpio_chip.base + i;
+ return -1;
+}
+EXPORT_SYMBOL_GPL(viafb_gpio_lookup);
+
+/*
+ * Platform device stuff.
+ */
+static __devinit int viafb_gpio_probe(struct platform_device *platdev)
+{
+ struct viafb_dev *vdev = platdev->dev.platform_data;
+ struct via_port_cfg *port_cfg = vdev->port_cfg;
+ int i, ngpio = 0, ret;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ /*
+ * Set up entries for all GPIOs which have been configured to
+ * operate as such (as opposed to as i2c ports).
+ */
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ if (port_cfg[i].mode != VIA_MODE_GPIO)
+ continue;
+ for (gpio = viafb_all_gpios;
+ gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++)
+ if (gpio->vg_port_index == port_cfg[i].ioport_index) {
+ gpio_config.active_gpios[ngpio] = gpio;
+ gpio_config.gpio_names[ngpio] = gpio->vg_name;
+ ngpio++;
+ }
+ }
+ gpio_config.gpio_chip.ngpio = ngpio;
+ gpio_config.gpio_chip.names = gpio_config.gpio_names;
+ gpio_config.vdev = vdev;
+ if (ngpio == 0) {
+ printk(KERN_INFO "viafb: no GPIOs configured\n");
+ return 0;
+ }
+ /*
+ * Enable the ports. They come in pairs, with a single
+ * enable bit for both.
+ */
+ spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags);
+ for (i = 0; i < ngpio; i += 2)
+ viafb_gpio_enable(gpio_config.active_gpios[i]);
+ spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags);
+ /*
+ * Get registered.
+ */
+ gpio_config.gpio_chip.base = -1; /* Dynamic */
+ ret = gpiochip_add(&gpio_config.gpio_chip);
+ if (ret) {
+ printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret);
+ gpio_config.gpio_chip.ngpio = 0;
+ }
+ return ret;
+}
+
+
+static int viafb_gpio_remove(struct platform_device *platdev)
+{
+ unsigned long flags;
+ int ret = 0, i;
+
+ /*
+ * Get unregistered.
+ */
+ if (gpio_config.gpio_chip.ngpio > 0) {
+ ret = gpiochip_remove(&gpio_config.gpio_chip);
+ if (ret) { /* Somebody still using it? */
+ printk(KERN_ERR "Viafb: GPIO remove failed\n");
+ return ret;
+ }
+ }
+ /*
+ * Disable the ports.
+ */
+ spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags);
+ for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2)
+ viafb_gpio_disable(gpio_config.active_gpios[i]);
+ gpio_config.gpio_chip.ngpio = 0;
+ spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags);
+ return ret;
+}
+
+static struct platform_driver via_gpio_driver = {
+ .driver = {
+ .name = "viafb-gpio",
+ },
+ .probe = viafb_gpio_probe,
+ .remove = viafb_gpio_remove,
+};
+
+int viafb_gpio_init(void)
+{
+ return platform_driver_register(&via_gpio_driver);
+}
+
+void viafb_gpio_exit(void)
+{
+ platform_driver_unregister(&via_gpio_driver);
+}
diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c
index 15543e968248..da9e4ca94b17 100644
--- a/drivers/video/via/via_i2c.c
+++ b/drivers/video/via/via_i2c.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
@@ -19,77 +19,106 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include "global.h"
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
+
+/*
+ * There can only be one set of these, so there's no point in having
+ * them be dynamically allocated...
+ */
+#define VIAFB_NUM_I2C 5
+static struct via_i2c_stuff via_i2c_par[VIAFB_NUM_I2C];
+struct viafb_dev *i2c_vdev; /* Passed in from core */
static void via_i2c_setscl(void *data, int state)
{
u8 val;
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
- val = viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0xF0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0;
if (state)
val |= 0x20;
else
val &= ~0x20;
- switch (via_i2c_chan->i2c_port) {
- case I2CPORTINDEX:
+ switch (adap_data->type) {
+ case VIA_PORT_I2C:
val |= 0x01;
break;
- case GPIOPORTINDEX:
+ case VIA_PORT_GPIO:
val |= 0x80;
break;
default:
- DEBUG_MSG("via_i2c: specify wrong i2c port.\n");
+ printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n");
}
- viafb_write_reg(via_i2c_chan->i2c_port, VIASR, val);
+ via_write_reg(adap_data->io_port, adap_data->ioport_index, val);
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
}
static int via_i2c_getscl(void *data)
{
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
+ int ret = 0;
- if (viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0x08)
- return 1;
- return 0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x08)
+ ret = 1;
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
+ return ret;
}
static int via_i2c_getsda(void *data)
{
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
+ int ret = 0;
- if (viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0x04)
- return 1;
- return 0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x04)
+ ret = 1;
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
+ return ret;
}
static void via_i2c_setsda(void *data, int state)
{
u8 val;
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
- val = viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0xF0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0;
if (state)
val |= 0x10;
else
val &= ~0x10;
- switch (via_i2c_chan->i2c_port) {
- case I2CPORTINDEX:
+ switch (adap_data->type) {
+ case VIA_PORT_I2C:
val |= 0x01;
break;
- case GPIOPORTINDEX:
+ case VIA_PORT_GPIO:
val |= 0x40;
break;
default:
- DEBUG_MSG("via_i2c: specify wrong i2c port.\n");
+ printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n");
}
- viafb_write_reg(via_i2c_chan->i2c_port, VIASR, val);
+ via_write_reg(adap_data->io_port, adap_data->ioport_index, val);
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
}
-int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata)
+int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
{
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
*pdata = 0;
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
@@ -97,81 +126,144 @@ int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata)
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = 1;
msgs[0].buf = mm1; msgs[1].buf = pdata;
- i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, msgs, 2);
-
- return 0;
+ return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
}
-int viafb_i2c_writebyte(u8 slave_addr, u8 index, u8 data)
+int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
{
u8 msg[2] = { index, data };
struct i2c_msg msgs;
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
msgs.flags = 0;
msgs.addr = slave_addr / 2;
msgs.len = 2;
msgs.buf = msg;
- return i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, &msgs, 1);
+ return i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
}
-int viafb_i2c_readbytes(u8 slave_addr, u8 index, u8 *buff, int buff_len)
+int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len)
{
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
msgs[0].addr = msgs[1].addr = slave_addr / 2;
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = buff_len;
msgs[0].buf = mm1; msgs[1].buf = buff;
- i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, msgs, 2);
- return 0;
+ return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
}
-int viafb_create_i2c_bus(void *viapar)
+/*
+ * Allow other viafb subdevices to look up a specific adapter
+ * by port name.
+ */
+struct i2c_adapter *viafb_find_i2c_adapter(enum viafb_i2c_adap which)
{
- int ret;
- struct via_i2c_stuff *i2c_stuff =
- &((struct viafb_par *)viapar)->shared->i2c_stuff;
-
- strcpy(i2c_stuff->adapter.name, "via_i2c");
- i2c_stuff->i2c_port = 0x0;
- i2c_stuff->adapter.owner = THIS_MODULE;
- i2c_stuff->adapter.id = 0x01FFFF;
- i2c_stuff->adapter.class = 0;
- i2c_stuff->adapter.algo_data = &i2c_stuff->algo;
- i2c_stuff->adapter.dev.parent = NULL;
- i2c_stuff->algo.setsda = via_i2c_setsda;
- i2c_stuff->algo.setscl = via_i2c_setscl;
- i2c_stuff->algo.getsda = via_i2c_getsda;
- i2c_stuff->algo.getscl = via_i2c_getscl;
- i2c_stuff->algo.udelay = 40;
- i2c_stuff->algo.timeout = 20;
- i2c_stuff->algo.data = i2c_stuff;
-
- i2c_set_adapdata(&i2c_stuff->adapter, i2c_stuff);
+ struct via_i2c_stuff *stuff = &via_i2c_par[which];
- /* Raise SCL and SDA */
- i2c_stuff->i2c_port = I2CPORTINDEX;
- via_i2c_setsda(i2c_stuff, 1);
- via_i2c_setscl(i2c_stuff, 1);
+ return &stuff->adapter;
+}
+EXPORT_SYMBOL_GPL(viafb_find_i2c_adapter);
- i2c_stuff->i2c_port = GPIOPORTINDEX;
- via_i2c_setsda(i2c_stuff, 1);
- via_i2c_setscl(i2c_stuff, 1);
- udelay(20);
- ret = i2c_bit_add_bus(&i2c_stuff->adapter);
- if (ret == 0)
- DEBUG_MSG("I2C bus %s registered.\n", i2c_stuff->adapter.name);
+static int create_i2c_bus(struct i2c_adapter *adapter,
+ struct i2c_algo_bit_data *algo,
+ struct via_port_cfg *adap_cfg,
+ struct pci_dev *pdev)
+{
+ algo->setsda = via_i2c_setsda;
+ algo->setscl = via_i2c_setscl;
+ algo->getsda = via_i2c_getsda;
+ algo->getscl = via_i2c_getscl;
+ algo->udelay = 40;
+ algo->timeout = 20;
+ algo->data = adap_cfg;
+
+ sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
+ adap_cfg->ioport_index);
+ adapter->owner = THIS_MODULE;
+ adapter->id = 0x01FFFF;
+ adapter->class = I2C_CLASS_DDC;
+ adapter->algo_data = algo;
+ if (pdev)
+ adapter->dev.parent = &pdev->dev;
else
- DEBUG_MSG("Failed to register I2C bus %s.\n",
- i2c_stuff->adapter.name);
- return ret;
+ adapter->dev.parent = NULL;
+ /* i2c_set_adapdata(adapter, adap_cfg); */
+
+ /* Raise SCL and SDA */
+ via_i2c_setsda(adap_cfg, 1);
+ via_i2c_setscl(adap_cfg, 1);
+ udelay(20);
+
+ return i2c_bit_add_bus(adapter);
+}
+
+static int viafb_i2c_probe(struct platform_device *platdev)
+{
+ int i, ret;
+ struct via_port_cfg *configs;
+
+ i2c_vdev = platdev->dev.platform_data;
+ configs = i2c_vdev->port_cfg;
+
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ struct via_port_cfg *adap_cfg = configs++;
+ struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i];
+
+ i2c_stuff->is_active = 0;
+ if (adap_cfg->type == 0 || adap_cfg->mode != VIA_MODE_I2C)
+ continue;
+ ret = create_i2c_bus(&i2c_stuff->adapter,
+ &i2c_stuff->algo, adap_cfg,
+ NULL); /* FIXME: PCIDEV */
+ if (ret < 0) {
+ printk(KERN_ERR "viafb: cannot create i2c bus %u:%d\n",
+ i, ret);
+ continue; /* Still try to make the rest */
+ }
+ i2c_stuff->is_active = 1;
+ }
+
+ return 0;
+}
+
+static int viafb_i2c_remove(struct platform_device *platdev)
+{
+ int i;
+
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i];
+ /*
+ * Only remove those entries in the array that we've
+ * actually used (and thus initialized algo_data)
+ */
+ if (i2c_stuff->is_active)
+ i2c_del_adapter(&i2c_stuff->adapter);
+ }
+ return 0;
+}
+
+static struct platform_driver via_i2c_driver = {
+ .driver = {
+ .name = "viafb-i2c",
+ },
+ .probe = viafb_i2c_probe,
+ .remove = viafb_i2c_remove,
+};
+
+int viafb_i2c_init(void)
+{
+ return platform_driver_register(&via_i2c_driver);
}
-void viafb_delete_i2c_buss(void *par)
+void viafb_i2c_exit(void)
{
- i2c_del_adapter(&((struct viafb_par *)par)->shared->i2c_stuff.adapter);
+ platform_driver_unregister(&via_i2c_driver);
}
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/via/via_modesetting.c
new file mode 100644
index 000000000000..3cddcff88ab9
--- /dev/null
+++ b/drivers/video/via/via_modesetting.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * basic modesetting functions
+ */
+
+#include <linux/kernel.h>
+#include <linux/via-core.h>
+#include "via_modesetting.h"
+#include "share.h"
+#include "debug.h"
+
+void via_set_primary_address(u32 addr)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_address(0x%08X)\n", addr);
+ via_write_reg(VIACR, 0x0D, addr & 0xFF);
+ via_write_reg(VIACR, 0x0C, (addr >> 8) & 0xFF);
+ via_write_reg(VIACR, 0x34, (addr >> 16) & 0xFF);
+ via_write_reg_mask(VIACR, 0x48, (addr >> 24) & 0x1F, 0x1F);
+}
+
+void via_set_secondary_address(u32 addr)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_address(0x%08X)\n", addr);
+ /* secondary display supports only quadword aligned memory */
+ via_write_reg_mask(VIACR, 0x62, (addr >> 2) & 0xFE, 0xFE);
+ via_write_reg(VIACR, 0x63, (addr >> 10) & 0xFF);
+ via_write_reg(VIACR, 0x64, (addr >> 18) & 0xFF);
+ via_write_reg_mask(VIACR, 0xA3, (addr >> 26) & 0x07, 0x07);
+}
+
+void via_set_primary_pitch(u32 pitch)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_pitch(0x%08X)\n", pitch);
+ /* spec does not say that first adapter skips 3 bits but old
+ * code did it and seems to be reasonable in analogy to 2nd adapter
+ */
+ pitch = pitch >> 3;
+ via_write_reg(VIACR, 0x13, pitch & 0xFF);
+ via_write_reg_mask(VIACR, 0x35, (pitch >> (8 - 5)) & 0xE0, 0xE0);
+}
+
+void via_set_secondary_pitch(u32 pitch)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_pitch(0x%08X)\n", pitch);
+ pitch = pitch >> 3;
+ via_write_reg(VIACR, 0x66, pitch & 0xFF);
+ via_write_reg_mask(VIACR, 0x67, (pitch >> 8) & 0x03, 0x03);
+ via_write_reg_mask(VIACR, 0x71, (pitch >> (10 - 7)) & 0x80, 0x80);
+}
+
+void via_set_primary_color_depth(u8 depth)
+{
+ u8 value;
+
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_color_depth(%d)\n", depth);
+ switch (depth) {
+ case 8:
+ value = 0x00;
+ break;
+ case 15:
+ value = 0x04;
+ break;
+ case 16:
+ value = 0x14;
+ break;
+ case 24:
+ value = 0x0C;
+ break;
+ case 30:
+ value = 0x08;
+ break;
+ default:
+ printk(KERN_WARNING "via_set_primary_color_depth: "
+ "Unsupported depth: %d\n", depth);
+ return;
+ }
+
+ via_write_reg_mask(VIASR, 0x15, value, 0x1C);
+}
+
+void via_set_secondary_color_depth(u8 depth)
+{
+ u8 value;
+
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_color_depth(%d)\n", depth);
+ switch (depth) {
+ case 8:
+ value = 0x00;
+ break;
+ case 16:
+ value = 0x40;
+ break;
+ case 24:
+ value = 0xC0;
+ break;
+ case 30:
+ value = 0x80;
+ break;
+ default:
+ printk(KERN_WARNING "via_set_secondary_color_depth: "
+ "Unsupported depth: %d\n", depth);
+ return;
+ }
+
+ via_write_reg_mask(VIACR, 0x67, value, 0xC0);
+}
diff --git a/drivers/video/via/via_modesetting.h b/drivers/video/via/via_modesetting.h
new file mode 100644
index 000000000000..ae35cfdeb37c
--- /dev/null
+++ b/drivers/video/via/via_modesetting.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * basic modesetting functions
+ */
+
+#ifndef __VIA_MODESETTING_H__
+#define __VIA_MODESETTING_H__
+
+#include <linux/types.h>
+
+void via_set_primary_address(u32 addr);
+void via_set_secondary_address(u32 addr);
+void via_set_primary_pitch(u32 pitch);
+void via_set_secondary_pitch(u32 pitch);
+void via_set_primary_color_depth(u8 depth);
+void via_set_secondary_color_depth(u8 depth);
+
+#endif /* __VIA_MODESETTING_H__ */
diff --git a/drivers/video/via/via_utility.c b/drivers/video/via/via_utility.c
index aefdeeec89b1..d05ccb62b55f 100644
--- a/drivers/video/via/via_utility.c
+++ b/drivers/video/via/via_utility.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
void viafb_get_device_support_state(u32 *support_state)
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 777b38a06d40..2bc40e682f95 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
@@ -23,8 +23,9 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#define _MASTER_FILE
+#include <linux/via-core.h>
+#define _MASTER_FILE
#include "global.h"
static char *viafb_name = "Via";
@@ -221,7 +222,7 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
/* Adjust var according to our driver's own table */
viafb_fill_var_timing_info(var, viafb_refresh, vmode_entry);
if (info->var.accel_flags & FB_ACCELF_TEXT &&
- !ppar->shared->engine_mmio)
+ !ppar->shared->vdev->engine_mmio)
info->var.accel_flags = 0;
return 0;
@@ -317,12 +318,12 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr);
if (!viafb_dual_fb) {
- viafb_set_primary_address(vram_addr);
- viafb_set_secondary_address(vram_addr);
+ via_set_primary_address(vram_addr);
+ via_set_secondary_address(vram_addr);
} else if (viapar->iga_path == IGA1)
- viafb_set_primary_address(vram_addr);
+ via_set_primary_address(vram_addr);
else
- viafb_set_secondary_address(vram_addr);
+ via_set_secondary_address(vram_addr);
return 0;
}
@@ -696,7 +697,7 @@ static void viafb_fillrect(struct fb_info *info,
rop = 0xF0;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: fillrect\n");
- if (shared->hw_bitblt(shared->engine_mmio, VIA_BITBLT_FILL,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_FILL,
rect->width, rect->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, rect->dx, rect->dy,
NULL, 0, 0, 0, 0, fg_color, 0, rop))
@@ -718,7 +719,7 @@ static void viafb_copyarea(struct fb_info *info,
return;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: copyarea\n");
- if (shared->hw_bitblt(shared->engine_mmio, VIA_BITBLT_COLOR,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_COLOR,
area->width, area->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, area->dx, area->dy,
NULL, viapar->vram_addr, info->fix.line_length,
@@ -755,7 +756,7 @@ static void viafb_imageblit(struct fb_info *info,
op = VIA_BITBLT_COLOR;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: imageblit\n");
- if (shared->hw_bitblt(shared->engine_mmio, op,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, op,
image->width, image->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, image->dx, image->dy,
(u32 *)image->data, 0, 0, 0, 0, fg_color, bg_color, 0))
@@ -765,7 +766,7 @@ static void viafb_imageblit(struct fb_info *info,
static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct viafb_par *viapar = info->par;
- void __iomem *engine = viapar->shared->engine_mmio;
+ void __iomem *engine = viapar->shared->vdev->engine_mmio;
u32 temp, xx, yy, bg_color = 0, fg_color = 0,
chip_name = viapar->shared->chip_info.gfx_chip_name;
int i, j = 0, cur_size = 64;
@@ -1018,8 +1019,8 @@ static void viafb_set_device(struct device_t active_dev)
viafb_SAMM_ON = active_dev.samm;
viafb_primary_dev = active_dev.primary_dev;
- viafb_set_primary_address(0);
- viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
+ via_set_primary_address(0);
+ via_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
viafb_set_iga_path();
}
@@ -1165,8 +1166,9 @@ static int apply_device_setting(struct viafb_ioctl_setting setting_info,
if (viafb_SAMM_ON)
viafb_primary_dev = setting_info.primary_device;
- viafb_set_primary_address(0);
- viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
+ via_set_primary_address(0);
+ via_set_secondary_address(viafb_SAMM_ON ?
+ viafb_second_offset : 0);
viafb_set_iga_path();
}
need_set_mode = 1;
@@ -1325,6 +1327,8 @@ static void parse_dvi_port(void)
output_interface);
}
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
+
/*
* The proc filesystem read/write function, a simple proc implement to
* get/set the value of DPA DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
@@ -1701,16 +1705,21 @@ static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
}
static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
{
- /* no problem if it was not registered */
+ struct chip_information *chip_info = &viaparinfo->shared->chip_info;
+
remove_proc_entry("dvp0", viafb_entry);/* parent dir */
remove_proc_entry("dvp1", viafb_entry);
remove_proc_entry("dfph", viafb_entry);
remove_proc_entry("dfpl", viafb_entry);
- remove_proc_entry("vt1636", viafb_entry);
- remove_proc_entry("vt1625", viafb_entry);
+ if (chip_info->lvds_chip_info.lvds_chip_name == VT1636_LVDS
+ || chip_info->lvds_chip_info2.lvds_chip_name == VT1636_LVDS)
+ remove_proc_entry("vt1636", viafb_entry);
+
remove_proc_entry("viafb", NULL);
}
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+
static int parse_mode(const char *str, u32 *xres, u32 *yres)
{
char *ptr;
@@ -1732,12 +1741,13 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
return 0;
}
-static int __devinit via_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+
+int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
{
u32 default_xres, default_yres;
struct VideoModeTable *vmode_entry;
struct fb_var_screeninfo default_var;
+ int rc;
u32 viafb_par_length;
DEBUG_MSG(KERN_INFO "VIAFB PCI Probe!!\n");
@@ -1749,14 +1759,15 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
*/
viafbinfo = framebuffer_alloc(viafb_par_length +
ALIGN(sizeof(struct viafb_shared), BITS_PER_LONG/8),
- &pdev->dev);
+ &vdev->pdev->dev);
if (!viafbinfo) {
printk(KERN_ERR"Could not allocate memory for viafb_info.\n");
- return -ENODEV;
+ return -ENOMEM;
}
viaparinfo = (struct viafb_par *)viafbinfo->par;
viaparinfo->shared = viafbinfo->par + viafb_par_length;
+ viaparinfo->shared->vdev = vdev;
viaparinfo->vram_addr = 0;
viaparinfo->tmds_setting_info = &viaparinfo->shared->tmds_setting_info;
viaparinfo->lvds_setting_info = &viaparinfo->shared->lvds_setting_info;
@@ -1774,23 +1785,20 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
if (!viafb_SAMM_ON)
viafb_dual_fb = 0;
- /* Set up I2C bus stuff */
- viafb_create_i2c_bus(viaparinfo);
-
- viafb_init_chip_info(pdev, ent);
- viaparinfo->fbmem = pci_resource_start(pdev, 0);
- viaparinfo->memsize = viafb_get_fb_size_from_pci();
+ viafb_init_chip_info(vdev->chip_type);
+ /*
+ * The framebuffer will have been successfully mapped by
+ * the core (or we'd not be here), but we still need to
+ * set up our own accounting.
+ */
+ viaparinfo->fbmem = vdev->fbmem_start;
+ viaparinfo->memsize = vdev->fbmem_len;
viaparinfo->fbmem_free = viaparinfo->memsize;
viaparinfo->fbmem_used = 0;
- viafbinfo->screen_base = ioremap_nocache(viaparinfo->fbmem,
- viaparinfo->memsize);
- if (!viafbinfo->screen_base) {
- printk(KERN_INFO "ioremap failed\n");
- return -ENOMEM;
- }
+ viafbinfo->screen_base = vdev->fbmem;
- viafbinfo->fix.mmio_start = pci_resource_start(pdev, 1);
- viafbinfo->fix.mmio_len = pci_resource_len(pdev, 1);
+ viafbinfo->fix.mmio_start = vdev->engine_start;
+ viafbinfo->fix.mmio_len = vdev->engine_len;
viafbinfo->node = 0;
viafbinfo->fbops = &viafb_ops;
viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
@@ -1858,12 +1866,13 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
viafbinfo->var = default_var;
if (viafb_dual_fb) {
- viafbinfo1 = framebuffer_alloc(viafb_par_length, &pdev->dev);
+ viafbinfo1 = framebuffer_alloc(viafb_par_length,
+ &vdev->pdev->dev);
if (!viafbinfo1) {
printk(KERN_ERR
"allocate the second framebuffer struct error\n");
- framebuffer_release(viafbinfo);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_fb_release;
}
viaparinfo1 = viafbinfo1->par;
memcpy(viaparinfo1, viaparinfo, viafb_par_length);
@@ -1914,48 +1923,66 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
viaparinfo->depth = fb_get_color_depth(&viafbinfo->var,
&viafbinfo->fix);
default_var.activate = FB_ACTIVATE_NOW;
- fb_alloc_cmap(&viafbinfo->cmap, 256, 0);
+ rc = fb_alloc_cmap(&viafbinfo->cmap, 256, 0);
+ if (rc)
+ goto out_fb1_release;
if (viafb_dual_fb && (viafb_primary_dev == LCD_Device)
&& (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) {
- if (register_framebuffer(viafbinfo1) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo1);
+ if (rc)
+ goto out_dealloc_cmap;
}
- if (register_framebuffer(viafbinfo) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo);
+ if (rc)
+ goto out_fb1_unreg_lcd_cle266;
if (viafb_dual_fb && ((viafb_primary_dev != LCD_Device)
|| (viaparinfo->chip_info->gfx_chip_name !=
UNICHROME_CLE266))) {
- if (register_framebuffer(viafbinfo1) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo1);
+ if (rc)
+ goto out_fb_unreg;
}
DEBUG_MSG(KERN_INFO "fb%d: %s frame buffer device %dx%d-%dbpp\n",
viafbinfo->node, viafbinfo->fix.id, default_var.xres,
default_var.yres, default_var.bits_per_pixel);
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
viafb_init_proc(&viaparinfo->shared->proc_entry);
+#endif
viafb_init_dac(IGA2);
return 0;
+
+out_fb_unreg:
+ unregister_framebuffer(viafbinfo);
+out_fb1_unreg_lcd_cle266:
+ if (viafb_dual_fb && (viafb_primary_dev == LCD_Device)
+ && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266))
+ unregister_framebuffer(viafbinfo1);
+out_dealloc_cmap:
+ fb_dealloc_cmap(&viafbinfo->cmap);
+out_fb1_release:
+ if (viafbinfo1)
+ framebuffer_release(viafbinfo1);
+out_fb_release:
+ framebuffer_release(viafbinfo);
+ return rc;
}
-static void __devexit via_pci_remove(struct pci_dev *pdev)
+void __devexit via_fb_pci_remove(struct pci_dev *pdev)
{
DEBUG_MSG(KERN_INFO "via_pci_remove!\n");
fb_dealloc_cmap(&viafbinfo->cmap);
unregister_framebuffer(viafbinfo);
if (viafb_dual_fb)
unregister_framebuffer(viafbinfo1);
- iounmap((void *)viafbinfo->screen_base);
- iounmap(viaparinfo->shared->engine_mmio);
-
- viafb_delete_i2c_buss(viaparinfo);
-
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
+ viafb_remove_proc(viaparinfo->shared->proc_entry);
+#endif
framebuffer_release(viafbinfo);
if (viafb_dual_fb)
framebuffer_release(viafbinfo1);
-
- viafb_remove_proc(viaparinfo->shared->proc_entry);
}
#ifndef MODULE
@@ -2031,41 +2058,10 @@ static int __init viafb_setup(char *options)
}
#endif
-static struct pci_device_id viafb_pci_table[] __devinitdata = {
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
- .driver_data = UNICHROME_CLE266 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
- .driver_data = UNICHROME_PM800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
- .driver_data = UNICHROME_K400 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
- .driver_data = UNICHROME_K800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
- .driver_data = UNICHROME_CN700 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
- .driver_data = UNICHROME_K8M890 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
- .driver_data = UNICHROME_CX700 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
- .driver_data = UNICHROME_P4M900 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
- .driver_data = UNICHROME_CN750 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
- .driver_data = UNICHROME_VX800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
- .driver_data = UNICHROME_VX855 },
- { }
-};
-MODULE_DEVICE_TABLE(pci, viafb_pci_table);
-
-static struct pci_driver viafb_driver = {
- .name = "viafb",
- .id_table = viafb_pci_table,
- .probe = via_pci_probe,
- .remove = __devexit_p(via_pci_remove),
-};
-
-static int __init viafb_init(void)
+/*
+ * These are called out of via-core for now.
+ */
+int __init viafb_init(void)
{
u32 dummy;
#ifndef MODULE
@@ -2084,13 +2080,12 @@ static int __init viafb_init(void)
printk(KERN_INFO
"VIA Graphics Intergration Chipset framebuffer %d.%d initializing\n",
VERSION_MAJOR, VERSION_MINOR);
- return pci_register_driver(&viafb_driver);
+ return 0;
}
-static void __exit viafb_exit(void)
+void __exit viafb_exit(void)
{
DEBUG_MSG(KERN_INFO "viafb_exit!\n");
- pci_unregister_driver(&viafb_driver);
}
static struct fb_ops viafb_ops = {
@@ -2110,8 +2105,6 @@ static struct fb_ops viafb_ops = {
.fb_sync = viafb_sync,
};
-module_init(viafb_init);
-module_exit(viafb_exit);
#ifdef MODULE
module_param(viafb_mode, charp, S_IRUSR);
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index 61b5953cd159..52a35fabba91 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -24,12 +24,12 @@
#include <linux/proc_fs.h>
#include <linux/fb.h>
+#include <linux/spinlock.h>
#include "ioctl.h"
#include "share.h"
#include "chip.h"
#include "hw.h"
-#include "via_i2c.h"
#define VERSION_MAJOR 2
#define VERSION_KERNEL 6 /* For kernel 2.6 */
@@ -37,11 +37,11 @@
#define VERSION_OS 0 /* 0: for 32 bits OS, 1: for 64 bits OS */
#define VERSION_MINOR 4
+#define VIAFB_NUM_I2C 5
+
struct viafb_shared {
struct proc_dir_entry *proc_entry; /*viafb proc entry */
-
- /* I2C stuff */
- struct via_i2c_stuff i2c_stuff;
+ struct viafb_dev *vdev; /* Global dev info */
/* All the information will be needed to set engine */
struct tmds_setting_information tmds_setting_info;
@@ -51,7 +51,6 @@ struct viafb_shared {
struct chip_information chip_info;
/* hardware acceleration stuff */
- void __iomem *engine_mmio;
u32 cursor_vram_addr;
u32 vq_vram_addr; /* virtual queue address in video ram */
int (*hw_bitblt)(void __iomem *engine, u8 op, u32 width, u32 height,
@@ -99,4 +98,9 @@ u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
void viafb_gpio_i2c_write_mask_lvds(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information
*plvds_chip_info, struct IODATA io_data);
+int via_fb_pci_probe(struct viafb_dev *vdev);
+void via_fb_pci_remove(struct pci_dev *pdev);
+/* Temporary */
+int viafb_init(void);
+void viafb_exit(void);
#endif /* __VIAFBDEV_H__ */
diff --git a/drivers/video/via/viamode.c b/drivers/video/via/viamode.c
index af50e244016c..2dbad3c0f679 100644
--- a/drivers/video/via/viamode.c
+++ b/drivers/video/via/viamode.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
struct res_map_refresh res_map_refresh_tbl[] = {
/*hres, vres, vclock, vmode_refresh*/
@@ -66,6 +67,7 @@ struct res_map_refresh res_map_refresh_tbl[] = {
{1088, 612, RES_1088X612_60HZ_PIXCLOCK, 60},
{1152, 720, RES_1152X720_60HZ_PIXCLOCK, 60},
{1200, 720, RES_1200X720_60HZ_PIXCLOCK, 60},
+ {1200, 900, RES_1200X900_60HZ_PIXCLOCK, 60},
{1280, 600, RES_1280X600_60HZ_PIXCLOCK, 60},
{1280, 720, RES_1280X720_50HZ_PIXCLOCK, 50},
{1280, 768, RES_1280X768_50HZ_PIXCLOCK, 50},
@@ -759,6 +761,16 @@ struct crt_mode_table CRTM1200x720[] = {
{1568, 1200, 1200, 368, 1256, 128, 746, 720, 720, 26, 721, 3} }
};
+/* 1200x900 (DCON) */
+struct crt_mode_table DCON1200x900[] = {
+ /* r_rate, vclk, hsp, vsp */
+ {REFRESH_60, CLK_57_275M, M1200X900_R60_HSP, M1200X900_R60_VSP,
+ /* The correct htotal is 1240, but this doesn't raster on VX855. */
+ /* Via suggested changing to a multiple of 16, hence 1264. */
+ /* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
+ {1264, 1200, 1200, 64, 1211, 32, 912, 900, 900, 12, 901, 10} }
+};
+
/* 1280x600 (GTF) */
struct crt_mode_table CRTM1280x600[] = {
/* r_rate, vclk, hsp, vsp */
@@ -937,6 +949,9 @@ struct VideoModeTable viafb_modes[] = {
/* Display : 1200x720 (GTF) */
{CRTM1200x720, ARRAY_SIZE(CRTM1200x720)},
+ /* Display : 1200x900 (DCON) */
+ {DCON1200x900, ARRAY_SIZE(DCON1200x900)},
+
/* Display : 1280x600 (GTF) */
{CRTM1280x600, ARRAY_SIZE(CRTM1280x600)},
diff --git a/drivers/video/via/vt1636.c b/drivers/video/via/vt1636.c
index a6b37494e79a..d65bf1aee87c 100644
--- a/drivers/video/via/vt1636.c
+++ b/drivers/video/via/vt1636.c
@@ -19,6 +19,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
@@ -27,9 +29,8 @@ u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
- viafb_i2c_readbyte(plvds_chip_info->lvds_chip_slave_addr, index, &data);
-
+ viafb_i2c_readbyte(plvds_chip_info->i2c_port,
+ plvds_chip_info->lvds_chip_slave_addr, index, &data);
return data;
}
@@ -39,14 +40,13 @@ void viafb_gpio_i2c_write_mask_lvds(struct lvds_setting_information
{
int index, data;
- viaparinfo->shared->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
-
index = io_data.Index;
data = viafb_gpio_i2c_read_lvds(plvds_setting_info, plvds_chip_info,
index);
data = (data & (~io_data.Mask)) | io_data.Data;
- viafb_i2c_writebyte(plvds_chip_info->lvds_chip_slave_addr, index, data);
+ viafb_i2c_writebyte(plvds_chip_info->i2c_port,
+ plvds_chip_info->lvds_chip_slave_addr, index, data);
}
void viafb_init_lvds_vt1636(struct lvds_setting_information
@@ -159,7 +159,7 @@ void viafb_disable_lvds_vt1636(struct lvds_setting_information
}
}
-bool viafb_lvds_identify_vt1636(void)
+bool viafb_lvds_identify_vt1636(u8 i2c_adapter)
{
u8 Buffer[2];
@@ -167,26 +167,20 @@ bool viafb_lvds_identify_vt1636(void)
/* Sense VT1636 LVDS Transmiter */
viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr =
- VT1636_LVDS_I2C_ADDR;
+ VT1636_LVDS_I2C_ADDR;
/* Check vendor ID first: */
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x00, &Buffer[0]);
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x01, &Buffer[1]);
+ if (viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR,
+ 0x00, &Buffer[0]))
+ return false;
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x01, &Buffer[1]);
if (!((Buffer[0] == 0x06) && (Buffer[1] == 0x11)))
return false;
/* Check Chip ID: */
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x02, &Buffer[0]);
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x03, &Buffer[1]);
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x02, &Buffer[0]);
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x03, &Buffer[1]);
if ((Buffer[0] == 0x45) && (Buffer[1] == 0x33)) {
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
VT1636_LVDS;
diff --git a/drivers/video/via/vt1636.h b/drivers/video/via/vt1636.h
index 2a150c58c7ed..4c1314e57468 100644
--- a/drivers/video/via/vt1636.h
+++ b/drivers/video/via/vt1636.h
@@ -22,7 +22,7 @@
#ifndef _VT1636_H_
#define _VT1636_H_
#include "chip.h"
-bool viafb_lvds_identify_vt1636(void);
+bool viafb_lvds_identify_vt1636(u8 i2c_adapter);
void viafb_init_lvds_vt1636(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information *plvds_chip_info);
void viafb_enable_lvds_vt1636(struct lvds_setting_information
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index d31dc96f838a..85d76ec4c63e 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -726,7 +726,9 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
/* Prepare startup mode */
+ kparam_block_sysfs_write(mode_option);
rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
+ kparam_unblock_sysfs_write(mode_option);
if (! ((rc == 1) || (rc == 2))) {
rc = -EINVAL;
dev_err(info->device, "mode %s not found\n", mode_option);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfec7c29486d..0f1da45ba47d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -75,7 +75,7 @@ static void balloon_ack(struct virtqueue *vq)
struct virtio_balloon *vb;
unsigned int len;
- vb = vq->vq_ops->get_buf(vq, &len);
+ vb = virtqueue_get_buf(vq, &len);
if (vb)
complete(&vb->acked);
}
@@ -89,9 +89,9 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
init_completion(&vb->acked);
/* We should always be able to add one buffer to an empty queue. */
- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
wait_for_completion(&vb->acked);
@@ -204,7 +204,7 @@ static void stats_request(struct virtqueue *vq)
struct virtio_balloon *vb;
unsigned int len;
- vb = vq->vq_ops->get_buf(vq, &len);
+ vb = virtqueue_get_buf(vq, &len);
if (!vb)
return;
vb->need_stats_update = 1;
@@ -221,9 +221,9 @@ static void stats_handle_request(struct virtio_balloon *vb)
vq = vb->stats_vq;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
}
static void virtballoon_changed(struct virtio_device *vdev)
@@ -314,10 +314,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
* use it to signal us later.
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
- if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
- &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
BUG();
- vb->stats_vq->vq_ops->kick(vb->stats_vq);
+ virtqueue_kick(vb->stats_vq);
}
vb->thread = kthread_run(balloon, vb, "vballoon");
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 24747aef1952..95896f387927 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -655,7 +655,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
/* we use the subsystem vendor/device id as the virtio vendor/device
* id. this allows us to use the same PCI vendor/device id for all
* virtio devices and to identify the particular virtio driver by
- * the subsytem ids */
+ * the subsystem ids */
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
vp_dev->vdev.id.device = pci_dev->subsystem_device;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 0f90634bcb85..1ca88908723b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -110,13 +110,14 @@ struct vring_virtqueue
static int vring_add_indirect(struct vring_virtqueue *vq,
struct scatterlist sg[],
unsigned int out,
- unsigned int in)
+ unsigned int in,
+ gfp_t gfp)
{
struct vring_desc *desc;
unsigned head;
int i;
- desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
+ desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
if (!desc)
return vq->vring.num;
@@ -155,11 +156,12 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-static int vring_add_buf(struct virtqueue *_vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- void *data)
+int virtqueue_add_buf_gfp(struct virtqueue *_vq,
+ struct scatterlist sg[],
+ unsigned int out,
+ unsigned int in,
+ void *data,
+ gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i, avail, head, uninitialized_var(prev);
@@ -171,7 +173,7 @@ static int vring_add_buf(struct virtqueue *_vq,
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) {
- head = vring_add_indirect(vq, sg, out, in);
+ head = vring_add_indirect(vq, sg, out, in, gfp);
if (head != vq->vring.num)
goto add_head;
}
@@ -232,8 +234,9 @@ add_head:
return vq->num_free ? vq->vring.num : 0;
return vq->num_free;
}
+EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
-static void vring_kick(struct virtqueue *_vq)
+void virtqueue_kick(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
START_USE(vq);
@@ -253,6 +256,7 @@ static void vring_kick(struct virtqueue *_vq)
END_USE(vq);
}
+EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
@@ -284,7 +288,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->last_used_idx != vq->vring.used->idx;
}
-static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
+void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
@@ -325,15 +329,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
END_USE(vq);
return ret;
}
+EXPORT_SYMBOL_GPL(virtqueue_get_buf);
-static void vring_disable_cb(struct virtqueue *_vq)
+void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
+EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
-static bool vring_enable_cb(struct virtqueue *_vq)
+bool virtqueue_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -351,8 +357,9 @@ static bool vring_enable_cb(struct virtqueue *_vq)
END_USE(vq);
return true;
}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
-static void *vring_detach_unused_buf(struct virtqueue *_vq)
+void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
@@ -375,6 +382,7 @@ static void *vring_detach_unused_buf(struct virtqueue *_vq)
END_USE(vq);
return NULL;
}
+EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
irqreturn_t vring_interrupt(int irq, void *_vq)
{
@@ -396,15 +404,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
EXPORT_SYMBOL_GPL(vring_interrupt);
-static struct virtqueue_ops vring_vq_ops = {
- .add_buf = vring_add_buf,
- .get_buf = vring_get_buf,
- .kick = vring_kick,
- .disable_cb = vring_disable_cb,
- .enable_cb = vring_enable_cb,
- .detach_unused_buf = vring_detach_unused_buf,
-};
-
struct virtqueue *vring_new_virtqueue(unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
@@ -429,7 +428,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
vring_init(&vq->vring, num, pages, vring_align);
vq->vq.callback = callback;
vq->vq.vdev = vdev;
- vq->vq.vq_ops = &vring_vq_ops;
vq->vq.name = name;
vq->notify = notify;
vq->broken = false;
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 9c7ccd1e9088..9042a95fc98c 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/blackfin.h>
+#include <asm/bfin_watchdog.h>
#define stamp(fmt, args...) \
pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
@@ -49,24 +50,6 @@
# define bfin_write_WDOG_STAT(x) bfin_write_WDOGA_STAT(x)
#endif
-/* Bit in SWRST that indicates boot caused by watchdog */
-#define SWRST_RESET_WDOG 0x4000
-
-/* Bit in WDOG_CTL that indicates watchdog has expired (WDR0) */
-#define WDOG_EXPIRED 0x8000
-
-/* Masks for WDEV field in WDOG_CTL register */
-#define ICTL_RESET 0x0
-#define ICTL_NMI 0x2
-#define ICTL_GPI 0x4
-#define ICTL_NONE 0x6
-#define ICTL_MASK 0x6
-
-/* Masks for WDEN field in WDOG_CTL register */
-#define WDEN_MASK 0x0FF0
-#define WDEN_ENABLE 0x0000
-#define WDEN_DISABLE 0x0AD0
-
/* some defaults */
#define WATCHDOG_TIMEOUT 20
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 801ead191499..3d49671cdf5a 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -137,12 +137,12 @@ static long booke_wdt_ioctl(struct file *file,
if (copy_to_user((void *)arg, &ident, sizeof(ident)))
return -EFAULT;
case WDIOC_GETSTATUS:
- return put_user(ident.options, p);
+ return put_user(0, p);
case WDIOC_GETBOOTSTATUS:
/* XXX: something is clearing TSR */
tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
- /* returns 1 if last reset was caused by the WDT */
- return (tmp ? 1 : 0);
+ /* returns CARDRESET if last reset was caused by the WDT */
+ return (tmp ? WDIOF_CARDRESET : 0);
case WDIOC_SETOPTIONS:
if (get_user(tmp, p))
return -EINVAL;
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index d1c4e55b1db0..3f3dc093ad68 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -68,7 +68,6 @@ static spinlock_t eurwdt_lock;
/*
* You must set these - there is no sane way to probe for this board.
- * You can use eurwdt=x,y to set these now.
*/
static int io = 0x3f0;
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 4e3941c5e293..83e791885073 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -53,7 +53,7 @@ static int mpc8xxx_wdt_init_late(void);
static u16 timeout = 0xffff;
module_param(timeout, ushort, 0);
MODULE_PARM_DESC(timeout,
- "Watchdog timeout in ticks. (0<timeout<65536, default=65535");
+ "Watchdog timeout in ticks. (0<timeout<65536, default=65535)");
static int reset = 1;
module_param(reset, bool, 0);
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index d3aa2f1fe61d..3a56bc360924 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -53,7 +53,9 @@
#define WDTO 0x11 /* Watchdog timeout register */
#define WDCFG 0x12 /* Watchdog config register */
-static int io = 0x2E; /* Address used on Portwell Boards */
+#define IO_DEFAULT 0x2E /* Address used on Portwell Boards */
+
+static int io = IO_DEFAULT;
static int timeout = DEFAULT_TIMEOUT; /* timeout value */
static unsigned long timer_enabled; /* is the timer enabled? */
@@ -583,12 +585,13 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(io, int, 0);
-MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(io) ").");
+MODULE_PARM_DESC(io, MODNAME " I/O port (default: "
+ __MODULE_STRING(IO_DEFAULT) ").");
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in minutes (default="
- __MODULE_STRING(timeout) ").");
+ __MODULE_STRING(DEFAULT_TIMEOUT) ").");
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
index 09102f09e681..a7b5ad2a98bd 100644
--- a/drivers/watchdog/pnx833x_wdt.c
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -33,6 +33,8 @@
#define PFX "pnx833x: "
#define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */
#define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */
+#define PNX_WATCHDOG_TIMEOUT (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY)
+#define PNX_TIMEOUT_VALUE 2040000000U
/** CONFIG block */
#define PNX833X_CONFIG (0x07000U)
@@ -47,20 +49,21 @@
static int pnx833x_wdt_alive;
/* Set default timeout in MHZ.*/
-static int pnx833x_wdt_timeout = (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY);
+static int pnx833x_wdt_timeout = PNX_WATCHDOG_TIMEOUT;
module_param(pnx833x_wdt_timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default="
- __MODULE_STRING(pnx833x_wdt_timeout) "(30 seconds).");
+ __MODULE_STRING(PNX_TIMEOUT_VALUE) "(30 seconds).");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static int start_enabled = 1;
+#define START_DEFAULT 1
+static int start_enabled = START_DEFAULT;
module_param(start_enabled, int, 0);
MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion "
- "(default=" __MODULE_STRING(start_enabled) ")");
+ "(default=" __MODULE_STRING(START_DEFAULT) ")");
static void pnx833x_wdt_start(void)
{
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 69c6adbd8205..4733a99955dd 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -1,7 +1,7 @@
/*
* RDC321x watchdog driver
*
- * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
*
* This driver is highly inspired from the cpu5_wdt driver
*
@@ -36,8 +36,7 @@
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-
-#include <asm/rdc321x_defs.h>
+#include <linux/mfd/rdc321x.h>
#define RDC_WDT_MASK 0x80000000 /* Mask */
#define RDC_WDT_EN 0x00800000 /* Enable bit */
@@ -63,6 +62,8 @@ static struct {
int default_ticks;
unsigned long inuse;
spinlock_t lock;
+ struct pci_dev *sb_pdev;
+ int base_reg;
} rdc321x_wdt_device;
/* generic helper functions */
@@ -70,14 +71,18 @@ static struct {
static void rdc321x_wdt_trigger(unsigned long unused)
{
unsigned long flags;
+ u32 val;
if (rdc321x_wdt_device.running)
ticks--;
/* keep watchdog alive */
spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
- outl(RDC_WDT_EN | inl(RDC3210_CFGREG_DATA),
- RDC3210_CFGREG_DATA);
+ pci_read_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, &val);
+ val |= RDC_WDT_EN;
+ pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, val);
spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
/* requeue?? */
@@ -105,10 +110,13 @@ static void rdc321x_wdt_start(void)
/* Clear the timer */
spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
- outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR);
+ pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, RDC_CLS_TMR);
/* Enable watchdog and set the timeout to 81.92 us */
- outl(RDC_WDT_EN | RDC_WDT_CNT, RDC3210_CFGREG_DATA);
+ pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg,
+ RDC_WDT_EN | RDC_WDT_CNT);
spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
mod_timer(&rdc321x_wdt_device.timer,
@@ -148,7 +156,7 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
- unsigned int value;
+ u32 value;
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET,
.identity = "RDC321x WDT",
@@ -162,9 +170,10 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_GETSTATUS:
/* Read the value from the DATA register */
spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
- value = inl(RDC3210_CFGREG_DATA);
+ pci_read_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, &value);
spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
- if (copy_to_user(argp, &value, sizeof(int)))
+ if (copy_to_user(argp, &value, sizeof(u32)))
return -EFAULT;
break;
case WDIOC_GETSUPPORT:
@@ -219,17 +228,35 @@ static struct miscdevice rdc321x_wdt_misc = {
static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
{
int err;
+ struct resource *r;
+ struct rdc321x_wdt_pdata *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wdt-reg");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get wdt-reg resource\n");
+ return -ENODEV;
+ }
+
+ rdc321x_wdt_device.sb_pdev = pdata->sb_pdev;
+ rdc321x_wdt_device.base_reg = r->start;
err = misc_register(&rdc321x_wdt_misc);
if (err < 0) {
- printk(KERN_ERR PFX "watchdog misc_register failed\n");
+ dev_err(&pdev->dev, "misc_register failed\n");
return err;
}
spin_lock_init(&rdc321x_wdt_device.lock);
/* Reset the watchdog */
- outl(RDC_WDT_RST, RDC3210_CFGREG_DATA);
+ pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, RDC_WDT_RST);
init_completion(&rdc321x_wdt_device.stop);
rdc321x_wdt_device.queue = 0;
@@ -240,7 +267,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
rdc321x_wdt_device.default_ticks = ticks;
- printk(KERN_INFO PFX "watchdog init success\n");
+ dev_info(&pdev->dev, "watchdog init success\n");
return 0;
}
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index e4cebef55177..095cbf38eb5d 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -63,7 +63,7 @@ module_param(nowayout, int, 0);
module_param(soft_noboot, int, 0);
module_param(debug, int, 0);
-MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. default="
+MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. (default="
__MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")");
MODULE_PARM_DESC(tmr_atboot,
"Watchdog is started at boot time if set to 1, default="
@@ -71,8 +71,8 @@ MODULE_PARM_DESC(tmr_atboot,
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
- "0 to reboot (default depends on ONLY_TESTING)");
-MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)");
+ "0 to reboot (default 0)");
+MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
static unsigned long open_lock;
static struct device *wdt_dev; /* platform device attached to */
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index a03f84e5ee1f..6fc74065abee 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -496,7 +496,7 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
MODULE_PARM_DESC(clock_division_ratio,
"Clock division ratio. Valid ranges are from 0x5 (1.31ms) "
- "to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")");
+ "to 0x7 (5.25ms). (default=" __MODULE_STRING(WTCSR_CKS_4096) ")");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat,
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index bfda2e99dd89..552a4381e78f 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -91,7 +91,7 @@ MODULE_PARM_DESC(tachometer,
static int type = 500;
module_param(type, int, 0);
MODULE_PARM_DESC(type,
- "WDT501-P Card type (500 or 501 , default=500)");
+ "WDT501-P Card type (500 or 501, default=500)");
/*
* Programming support
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 90ef70eb47d7..5c2521fc836c 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -63,7 +63,7 @@ static char expect_close;
static DEFINE_SPINLOCK(spinlock);
module_param(timeout, int, 0);
-MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300), default="
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300, default="
__MODULE_STRING(DEFAULT_TIMEOUT) ")");
module_param(testmode, int, 0);
MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 2ac4440e7b08..8943b8ccee1a 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -80,12 +80,6 @@ static void do_suspend(void)
shutting_down = SHUTDOWN_SUSPEND;
- err = stop_machine_create();
- if (err) {
- printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err);
- goto out;
- }
-
#ifdef CONFIG_PREEMPT
/* If the kernel is preemptible, we need to freeze all the processes
to prevent them from being in the middle of a pagetable update
@@ -93,7 +87,7 @@ static void do_suspend(void)
err = freeze_processes();
if (err) {
printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
- goto out_destroy_sm;
+ goto out;
}
#endif
@@ -136,12 +130,8 @@ out_resume:
out_thaw:
#ifdef CONFIG_PREEMPT
thaw_processes();
-
-out_destroy_sm:
-#endif
- stop_machine_destroy();
-
out:
+#endif
shutting_down = SHUTDOWN_INVALID;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index ed835836e0dc..32ef4009d030 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -40,7 +40,9 @@
extern struct file_system_type v9fs_fs_type;
extern const struct address_space_operations v9fs_addr_operations;
extern const struct file_operations v9fs_file_operations;
+extern const struct file_operations v9fs_file_operations_dotl;
extern const struct file_operations v9fs_dir_operations;
+extern const struct file_operations v9fs_dir_operations_dotl;
extern const struct dentry_operations v9fs_dentry_operations;
extern const struct dentry_operations v9fs_cached_dentry_operations;
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 0adfd64dfcee..d61e3b28ce37 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -203,3 +203,11 @@ const struct file_operations v9fs_dir_operations = {
.open = v9fs_file_open,
.release = v9fs_dir_release,
};
+
+const struct file_operations v9fs_dir_operations_dotl = {
+ .read = generic_read_dir,
+ .llseek = generic_file_llseek,
+ .readdir = v9fs_dir_readdir,
+ .open = v9fs_file_open,
+ .release = v9fs_dir_release,
+};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index df52d488d2a6..25b300e1c9d7 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -296,3 +296,14 @@ const struct file_operations v9fs_file_operations = {
.mmap = generic_file_readonly_mmap,
.fsync = v9fs_file_fsync,
};
+
+const struct file_operations v9fs_file_operations_dotl = {
+ .llseek = generic_file_llseek,
+ .read = v9fs_file_read,
+ .write = v9fs_file_write,
+ .open = v9fs_file_open,
+ .release = v9fs_dir_release,
+ .lock = v9fs_file_lock,
+ .mmap = generic_file_readonly_mmap,
+ .fsync = v9fs_file_fsync,
+};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index f2434fc9d2c4..de9a39590b70 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -44,9 +44,12 @@
#include "cache.h"
static const struct inode_operations v9fs_dir_inode_operations;
-static const struct inode_operations v9fs_dir_inode_operations_ext;
+static const struct inode_operations v9fs_dir_inode_operations_dotu;
+static const struct inode_operations v9fs_dir_inode_operations_dotl;
static const struct inode_operations v9fs_file_inode_operations;
+static const struct inode_operations v9fs_file_inode_operations_dotl;
static const struct inode_operations v9fs_symlink_inode_operations;
+static const struct inode_operations v9fs_symlink_inode_operations_dotl;
/**
* unixmode2p9mode - convert unix mode bits to plan 9
@@ -275,25 +278,44 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
init_special_inode(inode, inode->i_mode, inode->i_rdev);
break;
case S_IFREG:
- inode->i_op = &v9fs_file_inode_operations;
- inode->i_fop = &v9fs_file_operations;
+ if (v9fs_proto_dotl(v9ses)) {
+ inode->i_op = &v9fs_file_inode_operations_dotl;
+ inode->i_fop = &v9fs_file_operations_dotl;
+ } else {
+ inode->i_op = &v9fs_file_inode_operations;
+ inode->i_fop = &v9fs_file_operations;
+ }
+
break;
+
case S_IFLNK:
- if (!v9fs_proto_dotu(v9ses)) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "extended modes used w/o 9P2000.u\n");
+ if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "extended modes used with "
+ "legacy protocol.\n");
err = -EINVAL;
goto error;
}
- inode->i_op = &v9fs_symlink_inode_operations;
+
+ if (v9fs_proto_dotl(v9ses))
+ inode->i_op = &v9fs_symlink_inode_operations_dotl;
+ else
+ inode->i_op = &v9fs_symlink_inode_operations;
+
break;
case S_IFDIR:
inc_nlink(inode);
- if (v9fs_proto_dotu(v9ses))
- inode->i_op = &v9fs_dir_inode_operations_ext;
+ if (v9fs_proto_dotl(v9ses))
+ inode->i_op = &v9fs_dir_inode_operations_dotl;
+ else if (v9fs_proto_dotu(v9ses))
+ inode->i_op = &v9fs_dir_inode_operations_dotu;
else
inode->i_op = &v9fs_dir_inode_operations;
- inode->i_fop = &v9fs_dir_operations;
+
+ if (v9fs_proto_dotl(v9ses))
+ inode->i_fop = &v9fs_dir_operations_dotl;
+ else
+ inode->i_fop = &v9fs_dir_operations;
+
break;
default:
P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%x S_IFMT 0x%x\n",
@@ -772,6 +794,13 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto clunk_olddir;
}
+ if (v9fs_proto_dotl(v9ses)) {
+ retval = p9_client_rename(oldfid, newdirfid,
+ (char *) new_dentry->d_name.name);
+ if (retval != -ENOSYS)
+ goto clunk_newdir;
+ }
+
/* 9P can only handle file rename in the same directory */
if (memcmp(&olddirfid->qid, &newdirfid->qid, sizeof(newdirfid->qid))) {
P9_DPRINTK(P9_DEBUG_ERROR,
@@ -1208,7 +1237,21 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
return retval;
}
-static const struct inode_operations v9fs_dir_inode_operations_ext = {
+static const struct inode_operations v9fs_dir_inode_operations_dotu = {
+ .create = v9fs_vfs_create,
+ .lookup = v9fs_vfs_lookup,
+ .symlink = v9fs_vfs_symlink,
+ .link = v9fs_vfs_link,
+ .unlink = v9fs_vfs_unlink,
+ .mkdir = v9fs_vfs_mkdir,
+ .rmdir = v9fs_vfs_rmdir,
+ .mknod = v9fs_vfs_mknod,
+ .rename = v9fs_vfs_rename,
+ .getattr = v9fs_vfs_getattr,
+ .setattr = v9fs_vfs_setattr,
+};
+
+static const struct inode_operations v9fs_dir_inode_operations_dotl = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,
.symlink = v9fs_vfs_symlink,
@@ -1239,6 +1282,11 @@ static const struct inode_operations v9fs_file_inode_operations = {
.setattr = v9fs_vfs_setattr,
};
+static const struct inode_operations v9fs_file_inode_operations_dotl = {
+ .getattr = v9fs_vfs_getattr,
+ .setattr = v9fs_vfs_setattr,
+};
+
static const struct inode_operations v9fs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = v9fs_vfs_follow_link,
@@ -1246,3 +1294,11 @@ static const struct inode_operations v9fs_symlink_inode_operations = {
.getattr = v9fs_vfs_getattr,
.setattr = v9fs_vfs_setattr,
};
+
+static const struct inode_operations v9fs_symlink_inode_operations_dotl = {
+ .readlink = generic_readlink,
+ .follow_link = v9fs_vfs_follow_link,
+ .put_link = v9fs_vfs_put_link,
+ .getattr = v9fs_vfs_getattr,
+ .setattr = v9fs_vfs_setattr,
+};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 806da5d3b3a0..be74d020436e 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -38,6 +38,7 @@
#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/statfs.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -45,7 +46,7 @@
#include "v9fs_vfs.h"
#include "fid.h"
-static const struct super_operations v9fs_super_ops;
+static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl;
/**
* v9fs_set_super - set the superblock
@@ -76,7 +77,10 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_blocksize_bits = fls(v9ses->maxdata - 1);
sb->s_blocksize = 1 << sb->s_blocksize_bits;
sb->s_magic = V9FS_MAGIC;
- sb->s_op = &v9fs_super_ops;
+ if (v9fs_proto_dotl(v9ses))
+ sb->s_op = &v9fs_super_ops_dotl;
+ else
+ sb->s_op = &v9fs_super_ops;
sb->s_bdi = &v9ses->bdi;
sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
@@ -211,6 +215,42 @@ v9fs_umount_begin(struct super_block *sb)
v9fs_session_begin_cancel(v9ses);
}
+static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid;
+ struct p9_rstatfs rs;
+ int res;
+
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid)) {
+ res = PTR_ERR(fid);
+ goto done;
+ }
+
+ v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ if (v9fs_proto_dotl(v9ses)) {
+ res = p9_client_statfs(fid, &rs);
+ if (res == 0) {
+ buf->f_type = rs.type;
+ buf->f_bsize = rs.bsize;
+ buf->f_blocks = rs.blocks;
+ buf->f_bfree = rs.bfree;
+ buf->f_bavail = rs.bavail;
+ buf->f_files = rs.files;
+ buf->f_ffree = rs.ffree;
+ buf->f_fsid.val[0] = rs.fsid & 0xFFFFFFFFUL;
+ buf->f_fsid.val[1] = (rs.fsid >> 32) & 0xFFFFFFFFUL;
+ buf->f_namelen = rs.namelen;
+ }
+ if (res != -ENOSYS)
+ goto done;
+ }
+ res = simple_statfs(dentry, buf);
+done:
+ return res;
+}
+
static const struct super_operations v9fs_super_ops = {
#ifdef CONFIG_9P_FSCACHE
.alloc_inode = v9fs_alloc_inode,
@@ -222,6 +262,17 @@ static const struct super_operations v9fs_super_ops = {
.umount_begin = v9fs_umount_begin,
};
+static const struct super_operations v9fs_super_ops_dotl = {
+#ifdef CONFIG_9P_FSCACHE
+ .alloc_inode = v9fs_alloc_inode,
+ .destroy_inode = v9fs_destroy_inode,
+#endif
+ .statfs = v9fs_statfs,
+ .clear_inode = v9fs_clear_inode,
+ .show_options = generic_show_options,
+ .umount_begin = v9fs_umount_begin,
+};
+
struct file_system_type v9fs_fs_type = {
.name = "9p",
.get_sb = v9fs_get_sb,
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 109a6c606d92..e8e5e63ac950 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -177,8 +177,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
}
/* Trigger mount for path component or follow link */
} else if (ino->flags & AUTOFS_INF_PENDING ||
- autofs4_need_mount(flags) ||
- current->link_count) {
+ autofs4_need_mount(flags)) {
DPRINTK("waiting for mount name=%.*s",
dentry->d_name.len, dentry->d_name.name);
@@ -262,7 +261,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
spin_unlock(&dcache_lock);
spin_unlock(&sbi->fs_lock);
- status = try_to_fill_dentry(dentry, 0);
+ status = try_to_fill_dentry(dentry, nd->flags);
if (status)
goto out_error;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 6dcee88c2e5d..55dcb7884f4d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -417,7 +417,7 @@ int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync)
*/
mutex_unlock(&bd_inode->i_mutex);
- error = blkdev_issue_flush(bdev, NULL);
+ error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
if (error == -EOPNOTSUPP)
error = 0;
@@ -668,41 +668,209 @@ void bd_forget(struct inode *inode)
iput(bdev->bd_inode);
}
-int bd_claim(struct block_device *bdev, void *holder)
+/**
+ * bd_may_claim - test whether a block device can be claimed
+ * @bdev: block device of interest
+ * @whole: whole block device containing @bdev, may equal @bdev
+ * @holder: holder trying to claim @bdev
+ *
+ * Test whther @bdev can be claimed by @holder.
+ *
+ * CONTEXT:
+ * spin_lock(&bdev_lock).
+ *
+ * RETURNS:
+ * %true if @bdev can be claimed, %false otherwise.
+ */
+static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+ void *holder)
{
- int res;
- spin_lock(&bdev_lock);
-
- /* first decide result */
if (bdev->bd_holder == holder)
- res = 0; /* already a holder */
+ return true; /* already a holder */
else if (bdev->bd_holder != NULL)
- res = -EBUSY; /* held by someone else */
+ return false; /* held by someone else */
else if (bdev->bd_contains == bdev)
- res = 0; /* is a whole device which isn't held */
+ return true; /* is a whole device which isn't held */
- else if (bdev->bd_contains->bd_holder == bd_claim)
- res = 0; /* is a partition of a device that is being partitioned */
- else if (bdev->bd_contains->bd_holder != NULL)
- res = -EBUSY; /* is a partition of a held device */
+ else if (whole->bd_holder == bd_claim)
+ return true; /* is a partition of a device that is being partitioned */
+ else if (whole->bd_holder != NULL)
+ return false; /* is a partition of a held device */
else
- res = 0; /* is a partition of an un-held device */
+ return true; /* is a partition of an un-held device */
+}
+
+/**
+ * bd_prepare_to_claim - prepare to claim a block device
+ * @bdev: block device of interest
+ * @whole: the whole device containing @bdev, may equal @bdev
+ * @holder: holder trying to claim @bdev
+ *
+ * Prepare to claim @bdev. This function fails if @bdev is already
+ * claimed by another holder and waits if another claiming is in
+ * progress. This function doesn't actually claim. On successful
+ * return, the caller has ownership of bd_claiming and bd_holder[s].
+ *
+ * CONTEXT:
+ * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
+ * it multiple times.
+ *
+ * RETURNS:
+ * 0 if @bdev can be claimed, -EBUSY otherwise.
+ */
+static int bd_prepare_to_claim(struct block_device *bdev,
+ struct block_device *whole, void *holder)
+{
+retry:
+ /* if someone else claimed, fail */
+ if (!bd_may_claim(bdev, whole, holder))
+ return -EBUSY;
+
+ /* if someone else is claiming, wait for it to finish */
+ if (whole->bd_claiming && whole->bd_claiming != holder) {
+ wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&bdev_lock);
+ schedule();
+ finish_wait(wq, &wait);
+ spin_lock(&bdev_lock);
+ goto retry;
+ }
+
+ /* yay, all mine */
+ return 0;
+}
+
+/**
+ * bd_start_claiming - start claiming a block device
+ * @bdev: block device of interest
+ * @holder: holder trying to claim @bdev
+ *
+ * @bdev is about to be opened exclusively. Check @bdev can be opened
+ * exclusively and mark that an exclusive open is in progress. Each
+ * successful call to this function must be matched with a call to
+ * either bd_claim() or bd_abort_claiming(). If this function
+ * succeeds, the matching bd_claim() is guaranteed to succeed.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * Pointer to the block device containing @bdev on success, ERR_PTR()
+ * value on failure.
+ */
+static struct block_device *bd_start_claiming(struct block_device *bdev,
+ void *holder)
+{
+ struct gendisk *disk;
+ struct block_device *whole;
+ int partno, err;
+
+ might_sleep();
+
+ /*
+ * @bdev might not have been initialized properly yet, look up
+ * and grab the outer block device the hard way.
+ */
+ disk = get_gendisk(bdev->bd_dev, &partno);
+ if (!disk)
+ return ERR_PTR(-ENXIO);
+
+ whole = bdget_disk(disk, 0);
+ put_disk(disk);
+ if (!whole)
+ return ERR_PTR(-ENOMEM);
+
+ /* prepare to claim, if successful, mark claiming in progress */
+ spin_lock(&bdev_lock);
+
+ err = bd_prepare_to_claim(bdev, whole, holder);
+ if (err == 0) {
+ whole->bd_claiming = holder;
+ spin_unlock(&bdev_lock);
+ return whole;
+ } else {
+ spin_unlock(&bdev_lock);
+ bdput(whole);
+ return ERR_PTR(err);
+ }
+}
- /* now impose change */
- if (res==0) {
+/* releases bdev_lock */
+static void __bd_abort_claiming(struct block_device *whole, void *holder)
+{
+ BUG_ON(whole->bd_claiming != holder);
+ whole->bd_claiming = NULL;
+ wake_up_bit(&whole->bd_claiming, 0);
+
+ spin_unlock(&bdev_lock);
+ bdput(whole);
+}
+
+/**
+ * bd_abort_claiming - abort claiming a block device
+ * @whole: whole block device returned by bd_start_claiming()
+ * @holder: holder trying to claim @bdev
+ *
+ * Abort a claiming block started by bd_start_claiming(). Note that
+ * @whole is not the block device to be claimed but the whole device
+ * returned by bd_start_claiming().
+ *
+ * CONTEXT:
+ * Grabs and releases bdev_lock.
+ */
+static void bd_abort_claiming(struct block_device *whole, void *holder)
+{
+ spin_lock(&bdev_lock);
+ __bd_abort_claiming(whole, holder); /* releases bdev_lock */
+}
+
+/**
+ * bd_claim - claim a block device
+ * @bdev: block device to claim
+ * @holder: holder trying to claim @bdev
+ *
+ * Try to claim @bdev which must have been opened successfully. This
+ * function may be called with or without preceding
+ * blk_start_claiming(). In the former case, this function is always
+ * successful and terminates the claiming block.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 if successful, -EBUSY if @bdev is already claimed.
+ */
+int bd_claim(struct block_device *bdev, void *holder)
+{
+ struct block_device *whole = bdev->bd_contains;
+ int res;
+
+ might_sleep();
+
+ spin_lock(&bdev_lock);
+
+ res = bd_prepare_to_claim(bdev, whole, holder);
+ if (res == 0) {
/* note that for a whole device bd_holders
* will be incremented twice, and bd_holder will
* be set to bd_claim before being set to holder
*/
- bdev->bd_contains->bd_holders ++;
- bdev->bd_contains->bd_holder = bd_claim;
+ whole->bd_holders++;
+ whole->bd_holder = bd_claim;
bdev->bd_holders++;
bdev->bd_holder = holder;
}
- spin_unlock(&bdev_lock);
+
+ if (whole->bd_claiming)
+ __bd_abort_claiming(whole, holder); /* releases bdev_lock */
+ else
+ spin_unlock(&bdev_lock);
+
return res;
}
-
EXPORT_SYMBOL(bd_claim);
void bd_release(struct block_device *bdev)
@@ -1316,6 +1484,7 @@ EXPORT_SYMBOL(blkdev_get);
static int blkdev_open(struct inode * inode, struct file * filp)
{
+ struct block_device *whole = NULL;
struct block_device *bdev;
int res;
@@ -1338,22 +1507,25 @@ static int blkdev_open(struct inode * inode, struct file * filp)
if (bdev == NULL)
return -ENOMEM;
+ if (filp->f_mode & FMODE_EXCL) {
+ whole = bd_start_claiming(bdev, filp);
+ if (IS_ERR(whole)) {
+ bdput(bdev);
+ return PTR_ERR(whole);
+ }
+ }
+
filp->f_mapping = bdev->bd_inode->i_mapping;
res = blkdev_get(bdev, filp->f_mode);
- if (res)
- return res;
- if (filp->f_mode & FMODE_EXCL) {
- res = bd_claim(bdev, filp);
- if (res)
- goto out_blkdev_put;
+ if (whole) {
+ if (res == 0)
+ BUG_ON(bd_claim(bdev, filp) != 0);
+ else
+ bd_abort_claiming(whole, filp);
}
- return 0;
-
- out_blkdev_put:
- blkdev_put(bdev, filp->f_mode);
return res;
}
@@ -1564,27 +1736,34 @@ EXPORT_SYMBOL(lookup_bdev);
*/
struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
{
- struct block_device *bdev;
- int error = 0;
+ struct block_device *bdev, *whole;
+ int error;
bdev = lookup_bdev(path);
if (IS_ERR(bdev))
return bdev;
+ whole = bd_start_claiming(bdev, holder);
+ if (IS_ERR(whole)) {
+ bdput(bdev);
+ return whole;
+ }
+
error = blkdev_get(bdev, mode);
if (error)
- return ERR_PTR(error);
+ goto out_abort_claiming;
+
error = -EACCES;
if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
- goto blkdev_put;
- error = bd_claim(bdev, holder);
- if (error)
- goto blkdev_put;
+ goto out_blkdev_put;
+ BUG_ON(bd_claim(bdev, holder) != 0);
return bdev;
-
-blkdev_put:
+
+out_blkdev_put:
blkdev_put(bdev, mode);
+out_abort_claiming:
+ bd_abort_claiming(whole, holder);
return ERR_PTR(error);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b34d32fdaaec..c6a4f459ad76 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1589,7 +1589,7 @@ static void btrfs_issue_discard(struct block_device *bdev,
u64 start, u64 len)
{
blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
- DISCARD_FL_BARRIER);
+ BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
}
static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index f7c255f9c624..a8cd821226da 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -34,6 +34,7 @@ struct cachefiles_object {
loff_t i_size; /* object size */
unsigned long flags;
#define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */
+#define CACHEFILES_OBJECT_BURIED 1 /* T if preemptively buried */
atomic_t usage; /* object usage count */
uint8_t type; /* object type */
uint8_t new; /* T if object new */
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index d5db84a1ee0d..f4a7840bf42c 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -93,6 +93,59 @@ static noinline void cachefiles_printk_object(struct cachefiles_object *object,
}
/*
+ * mark the owner of a dentry, if there is one, to indicate that that dentry
+ * has been preemptively deleted
+ * - the caller must hold the i_mutex on the dentry's parent as required to
+ * call vfs_unlink(), vfs_rmdir() or vfs_rename()
+ */
+static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
+ struct dentry *dentry)
+{
+ struct cachefiles_object *object;
+ struct rb_node *p;
+
+ _enter(",'%*.*s'",
+ dentry->d_name.len, dentry->d_name.len, dentry->d_name.name);
+
+ write_lock(&cache->active_lock);
+
+ p = cache->active_nodes.rb_node;
+ while (p) {
+ object = rb_entry(p, struct cachefiles_object, active_node);
+ if (object->dentry > dentry)
+ p = p->rb_left;
+ else if (object->dentry < dentry)
+ p = p->rb_right;
+ else
+ goto found_dentry;
+ }
+
+ write_unlock(&cache->active_lock);
+ _leave(" [no owner]");
+ return;
+
+ /* found the dentry for */
+found_dentry:
+ kdebug("preemptive burial: OBJ%x [%s] %p",
+ object->fscache.debug_id,
+ fscache_object_states[object->fscache.state],
+ dentry);
+
+ if (object->fscache.state < FSCACHE_OBJECT_DYING) {
+ printk(KERN_ERR "\n");
+ printk(KERN_ERR "CacheFiles: Error:"
+ " Can't preemptively bury live object\n");
+ cachefiles_printk_object(object, NULL);
+ } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
+ printk(KERN_ERR "CacheFiles: Error:"
+ " Object already preemptively buried\n");
+ }
+
+ write_unlock(&cache->active_lock);
+ _leave(" [owner marked]");
+}
+
+/*
* record the fact that an object is now active
*/
static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
@@ -219,7 +272,8 @@ requeue:
*/
static int cachefiles_bury_object(struct cachefiles_cache *cache,
struct dentry *dir,
- struct dentry *rep)
+ struct dentry *rep,
+ bool preemptive)
{
struct dentry *grave, *trap;
char nbuffer[8 + 8 + 1];
@@ -229,11 +283,16 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
dir->d_name.len, dir->d_name.len, dir->d_name.name,
rep->d_name.len, rep->d_name.len, rep->d_name.name);
+ _debug("remove %p from %p", rep, dir);
+
/* non-directories can just be unlinked */
if (!S_ISDIR(rep->d_inode->i_mode)) {
_debug("unlink stale object");
ret = vfs_unlink(dir->d_inode, rep);
+ if (preemptive)
+ cachefiles_mark_object_buried(cache, rep);
+
mutex_unlock(&dir->d_inode->i_mutex);
if (ret == -EIO)
@@ -325,6 +384,9 @@ try_again:
if (ret != 0 && ret != -ENOMEM)
cachefiles_io_error(cache, "Rename failed with error %d", ret);
+ if (preemptive)
+ cachefiles_mark_object_buried(cache, rep);
+
unlock_rename(cache->graveyard, dir);
dput(grave);
_leave(" = 0");
@@ -340,7 +402,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
struct dentry *dir;
int ret;
- _enter(",{%p}", object->dentry);
+ _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
ASSERT(object->dentry);
ASSERT(object->dentry->d_inode);
@@ -350,15 +412,25 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- /* we need to check that our parent is _still_ our parent - it may have
- * been renamed */
- if (dir == object->dentry->d_parent) {
- ret = cachefiles_bury_object(cache, dir, object->dentry);
- } else {
- /* it got moved, presumably by cachefilesd culling it, so it's
- * no longer in the key path and we can ignore it */
+ if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
+ /* object allocation for the same key preemptively deleted this
+ * object's file so that it could create its own file */
+ _debug("object preemptively buried");
mutex_unlock(&dir->d_inode->i_mutex);
ret = 0;
+ } else {
+ /* we need to check that our parent is _still_ our parent - it
+ * may have been renamed */
+ if (dir == object->dentry->d_parent) {
+ ret = cachefiles_bury_object(cache, dir,
+ object->dentry, false);
+ } else {
+ /* it got moved, presumably by cachefilesd culling it,
+ * so it's no longer in the key path and we can ignore
+ * it */
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = 0;
+ }
}
dput(dir);
@@ -381,7 +453,9 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
const char *name;
int ret, nlen;
- _enter("{%p},,%s,", parent->dentry, key);
+ _enter("OBJ%x{%p},OBJ%x,%s,",
+ parent->fscache.debug_id, parent->dentry,
+ object->fscache.debug_id, key);
cache = container_of(parent->fscache.cache,
struct cachefiles_cache, cache);
@@ -509,7 +583,7 @@ lookup_again:
* mutex) */
object->dentry = NULL;
- ret = cachefiles_bury_object(cache, dir, next);
+ ret = cachefiles_bury_object(cache, dir, next, true);
dput(next);
next = NULL;
@@ -828,7 +902,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
/* actually remove the victim (drops the dir mutex) */
_debug("bury");
- ret = cachefiles_bury_object(cache, dir, victim);
+ ret = cachefiles_bury_object(cache, dir, victim, false);
if (ret < 0)
goto error;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4b42c2bb603f..19f93da844ab 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -274,7 +274,6 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
int rc = 0;
struct page **pages;
- struct pagevec pvec;
loff_t offset;
u64 len;
@@ -297,8 +296,6 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
if (rc < 0)
goto out;
- /* set uptodate and add to lru in pagevec-sized chunks */
- pagevec_init(&pvec, 0);
for (; !list_empty(page_list) && len > 0;
rc -= PAGE_CACHE_SIZE, len -= PAGE_CACHE_SIZE) {
struct page *page =
@@ -312,7 +309,7 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
zero_user_segment(page, s, PAGE_CACHE_SIZE);
}
- if (add_to_page_cache(page, mapping, page->index, GFP_NOFS)) {
+ if (add_to_page_cache_lru(page, mapping, page->index, GFP_NOFS)) {
page_cache_release(page);
dout("readpages %p add_to_page_cache failed %p\n",
inode, page);
@@ -323,10 +320,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
- if (pagevec_add(&pvec, page) == 0)
- pagevec_lru_add_file(&pvec); /* add to lru */
+ page_cache_release(page);
}
- pagevec_lru_add_file(&pvec);
rc = 0;
out:
@@ -573,7 +568,7 @@ static void writepages_finish(struct ceph_osd_request *req,
ceph_release_pages(req->r_pages, req->r_num_pages);
if (req->r_pages_from_pool)
mempool_free(req->r_pages,
- ceph_client(inode->i_sb)->wb_pagevec_pool);
+ ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
else
kfree(req->r_pages);
ceph_osdc_put_request(req);
diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c
index 818afe72e6c7..9f46de2ba7a7 100644
--- a/fs/ceph/auth.c
+++ b/fs/ceph/auth.c
@@ -150,7 +150,8 @@ int ceph_build_auth_request(struct ceph_auth_client *ac,
ret = ac->ops->build_request(ac, p + sizeof(u32), end);
if (ret < 0) {
- pr_err("error %d building request\n", ret);
+ pr_err("error %d building auth method %s request\n", ret,
+ ac->ops->name);
return ret;
}
dout(" built request %d bytes\n", ret);
@@ -216,8 +217,8 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
if (ac->protocol != protocol) {
ret = ceph_auth_init_protocol(ac, protocol);
if (ret) {
- pr_err("error %d on auth protocol %d init\n",
- ret, protocol);
+ pr_err("error %d on auth method %s init\n",
+ ret, ac->ops->name);
goto out;
}
}
@@ -229,7 +230,7 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
if (ret == -EAGAIN) {
return ceph_build_auth_request(ac, reply_buf, reply_len);
} else if (ret) {
- pr_err("authentication error %d\n", ret);
+ pr_err("auth method '%s' error %d\n", ac->ops->name, ret);
return ret;
}
return 0;
diff --git a/fs/ceph/auth.h b/fs/ceph/auth.h
index ca4f57cfb267..4429a707c021 100644
--- a/fs/ceph/auth.h
+++ b/fs/ceph/auth.h
@@ -15,6 +15,8 @@ struct ceph_auth_client;
struct ceph_authorizer;
struct ceph_auth_client_ops {
+ const char *name;
+
/*
* true if we are authenticated and can connect to
* services.
diff --git a/fs/ceph/auth_none.c b/fs/ceph/auth_none.c
index 8cd9e3af07f7..24407c119291 100644
--- a/fs/ceph/auth_none.c
+++ b/fs/ceph/auth_none.c
@@ -94,6 +94,7 @@ static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
}
static const struct ceph_auth_client_ops ceph_auth_none_ops = {
+ .name = "none",
.reset = reset,
.destroy = destroy,
.is_authenticated = is_authenticated,
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index fee5a08da881..7b206231566d 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -127,7 +127,7 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
int ret;
char *dbuf;
char *ticket_buf;
- u8 struct_v;
+ u8 reply_struct_v;
dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
if (!dbuf)
@@ -139,14 +139,14 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
goto out_dbuf;
ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
- struct_v = ceph_decode_8(&p);
- if (struct_v != 1)
+ reply_struct_v = ceph_decode_8(&p);
+ if (reply_struct_v != 1)
goto bad;
num = ceph_decode_32(&p);
dout("%d tickets\n", num);
while (num--) {
int type;
- u8 struct_v;
+ u8 tkt_struct_v, blob_struct_v;
struct ceph_x_ticket_handler *th;
void *dp, *dend;
int dlen;
@@ -165,8 +165,8 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
type = ceph_decode_32(&p);
dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
- struct_v = ceph_decode_8(&p);
- if (struct_v != 1)
+ tkt_struct_v = ceph_decode_8(&p);
+ if (tkt_struct_v != 1)
goto bad;
th = get_ticket_handler(ac, type);
@@ -186,8 +186,8 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
dend = dbuf + dlen;
dp = dbuf;
- struct_v = ceph_decode_8(&dp);
- if (struct_v != 1)
+ tkt_struct_v = ceph_decode_8(&dp);
+ if (tkt_struct_v != 1)
goto bad;
memcpy(&old_key, &th->session_key, sizeof(old_key));
@@ -224,7 +224,7 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
tpend = tp + dlen;
dout(" ticket blob is %d bytes\n", dlen);
ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
- struct_v = ceph_decode_8(&tp);
+ blob_struct_v = ceph_decode_8(&tp);
new_secret_id = ceph_decode_64(&tp);
ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
if (ret)
@@ -618,6 +618,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
static const struct ceph_auth_client_ops ceph_x_ops = {
+ .name = "x",
.is_authenticated = ceph_x_is_authenticated,
.build_request = ceph_x_build_request,
.handle_reply = ceph_x_handle_reply,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 0c1681806867..410539aa3c08 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -865,7 +865,8 @@ void __ceph_remove_cap(struct ceph_cap *cap)
{
struct ceph_mds_session *session = cap->session;
struct ceph_inode_info *ci = cap->ci;
- struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
+ struct ceph_mds_client *mdsc =
+ &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
@@ -932,9 +933,9 @@ static int send_cap_msg(struct ceph_mds_session *session,
seq, issue_seq, mseq, follows, size, max_size,
xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL);
- if (IS_ERR(msg))
- return PTR_ERR(msg);
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc));
+ if (!msg)
+ return -ENOMEM;
msg->hdr.tid = cpu_to_le64(flush_tid);
@@ -1293,7 +1294,8 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
*/
void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
{
- struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
+ struct ceph_mds_client *mdsc =
+ &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
struct inode *inode = &ci->vfs_inode;
int was = ci->i_dirty_caps;
int dirty = 0;
@@ -1331,7 +1333,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
static int __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session)
{
- struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
int flushing;
@@ -1658,7 +1660,7 @@ ack:
static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
unsigned *flush_tid)
{
- struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
int unlock_session = session ? 0 : 1;
int flushing = 0;
@@ -1824,7 +1826,8 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
err = wait_event_interruptible(ci->i_cap_wq,
caps_are_flushed(inode, flush_tid));
} else {
- struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc =
+ &ceph_sb_to_client(inode->i_sb)->mdsc;
spin_lock(&inode->i_lock);
if (__ceph_caps_dirty(ci))
@@ -2406,7 +2409,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
__releases(inode->i_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
unsigned seq = le32_to_cpu(m->seq);
int dirty = le32_to_cpu(m->dirty);
int cleaned = 0;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 650d2db5ed26..d2fea5164559 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -51,8 +51,11 @@ int ceph_init_dentry(struct dentry *dentry)
return -ENOMEM; /* oh well */
spin_lock(&dentry->d_lock);
- if (dentry->d_fsdata) /* lost a race */
+ if (dentry->d_fsdata) {
+ /* lost a race */
+ kmem_cache_free(ceph_dentry_cachep, di);
goto out_unlock;
+ }
di->dentry = dentry;
di->lease_session = NULL;
dentry->d_fsdata = di;
@@ -125,7 +128,8 @@ more:
dentry = list_entry(p, struct dentry, d_u.d_child);
di = ceph_dentry(dentry);
while (1) {
- dout(" p %p/%p d_subdirs %p/%p\n", p->prev, p->next,
+ dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
+ d_unhashed(dentry) ? "!hashed" : "hashed",
parent->d_subdirs.prev, parent->d_subdirs.next);
if (p == &parent->d_subdirs) {
fi->at_end = 1;
@@ -335,7 +339,7 @@ more:
if (req->r_reply_info.dir_end) {
kfree(fi->last_name);
fi->last_name = NULL;
- fi->next_offset = 0;
+ fi->next_offset = 2;
} else {
rinfo = &req->r_reply_info;
err = note_last_dentry(fi,
@@ -478,7 +482,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err)
{
- struct ceph_client *client = ceph_client(dentry->d_sb);
+ struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
struct inode *parent = dentry->d_parent->d_inode;
/* .snap dir? */
@@ -568,7 +572,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
!is_root_ceph_dentry(dir, dentry) &&
(ci->i_ceph_flags & CEPH_I_COMPLETE) &&
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
- di->offset = ci->i_max_offset++;
spin_unlock(&dir->i_lock);
dout(" dir %p complete, -ENOENT\n", dir);
d_add(dentry, NULL);
@@ -972,8 +975,9 @@ static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct inode *dir = dentry->d_parent->d_inode;
- dout("d_revalidate %p '%.*s' inode %p\n", dentry,
- dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
+ dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
+ dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
+ ceph_dentry(dentry)->offset);
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
@@ -1050,7 +1054,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
struct ceph_inode_info *ci = ceph_inode(inode);
int left;
- if (!ceph_test_opt(ceph_client(inode->i_sb), DIRSTAT))
+ if (!ceph_test_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
return -EISDIR;
if (!cf->dir_info) {
@@ -1152,7 +1156,7 @@ void ceph_dentry_lru_add(struct dentry *dn)
dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
dn->d_name.len, dn->d_name.name);
if (di) {
- mdsc = &ceph_client(dn->d_sb)->mdsc;
+ mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_add_tail(&di->lru, &mdsc->dentry_lru);
mdsc->num_dentry++;
@@ -1165,10 +1169,10 @@ void ceph_dentry_lru_touch(struct dentry *dn)
struct ceph_dentry_info *di = ceph_dentry(dn);
struct ceph_mds_client *mdsc;
- dout("dentry_lru_touch %p %p '%.*s'\n", di, dn,
- dn->d_name.len, dn->d_name.name);
+ dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
+ dn->d_name.len, dn->d_name.name, di->offset);
if (di) {
- mdsc = &ceph_client(dn->d_sb)->mdsc;
+ mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_move_tail(&di->lru, &mdsc->dentry_lru);
spin_unlock(&mdsc->dentry_lru_lock);
@@ -1183,7 +1187,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
dn->d_name.len, dn->d_name.name);
if (di) {
- mdsc = &ceph_client(dn->d_sb)->mdsc;
+ mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_del_init(&di->lru);
mdsc->num_dentry--;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 9d67572fb328..17447644d675 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -93,11 +93,11 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
return ERR_PTR(-ESTALE);
dentry = d_obtain_alias(inode);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
pr_err("fh_to_dentry %llx -- inode %p but ENOMEM\n",
fh->ino, inode);
iput(inode);
- return ERR_PTR(-ENOMEM);
+ return dentry;
}
err = ceph_init_dentry(dentry);
@@ -115,7 +115,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
static struct dentry *__cfh_to_dentry(struct super_block *sb,
struct ceph_nfs_confh *cfh)
{
- struct ceph_mds_client *mdsc = &ceph_client(sb)->mdsc;
+ struct ceph_mds_client *mdsc = &ceph_sb_to_client(sb)->mdsc;
struct inode *inode;
struct dentry *dentry;
struct ceph_vino vino;
@@ -149,11 +149,11 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
}
dentry = d_obtain_alias(inode);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
pr_err("cfh_to_dentry %llx -- inode %p but ENOMEM\n",
cfh->ino, inode);
iput(inode);
- return ERR_PTR(-ENOMEM);
+ return dentry;
}
err = ceph_init_dentry(dentry);
if (err < 0) {
@@ -202,11 +202,11 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
return ERR_PTR(-ESTALE);
dentry = d_obtain_alias(inode);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
pr_err("fh_to_parent %llx -- inode %p but ENOMEM\n",
cfh->ino, inode);
iput(inode);
- return ERR_PTR(-ENOMEM);
+ return dentry;
}
err = ceph_init_dentry(dentry);
if (err < 0) {
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ed6f19721d6e..b0426090e8c3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -326,7 +326,7 @@ static struct page **alloc_page_vector(int num_pages)
if (!pages)
return ERR_PTR(-ENOMEM);
for (i = 0; i < num_pages; i++) {
- pages[i] = alloc_page(GFP_NOFS);
+ pages[i] = __page_cache_alloc(GFP_NOFS);
if (pages[i] == NULL) {
ceph_release_page_vector(pages, i);
return ERR_PTR(-ENOMEM);
@@ -649,8 +649,8 @@ more:
do_sync,
ci->i_truncate_seq, ci->i_truncate_size,
&mtime, false, 2);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (!req)
+ return -ENOMEM;
num_pages = calc_pages_for(pos, len);
@@ -809,7 +809,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc;
+ struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc;
loff_t endoff = pos + iov->iov_len;
int got = 0;
int ret, err;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 261f3e6c0bcf..ac5b31bfb5d9 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -384,7 +384,7 @@ void ceph_destroy_inode(struct inode *inode)
*/
if (ci->i_snap_realm) {
struct ceph_mds_client *mdsc =
- &ceph_client(ci->vfs_inode.i_sb)->mdsc;
+ &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
struct ceph_snap_realm *realm = ci->i_snap_realm;
dout(" dropping residual ref to snap realm %p\n", realm);
@@ -619,11 +619,12 @@ static int fill_inode(struct inode *inode,
memcpy(ci->i_xattrs.blob->vec.iov_base,
iinfo->xattr_data, iinfo->xattr_len);
ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
+ xattr_blob = NULL;
}
inode->i_mapping->a_ops = &ceph_aops;
inode->i_mapping->backing_dev_info =
- &ceph_client(inode->i_sb)->backing_dev_info;
+ &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
switch (inode->i_mode & S_IFMT) {
case S_IFIFO:
@@ -674,14 +675,15 @@ static int fill_inode(struct inode *inode,
/* set dir completion flag? */
if (ci->i_files == 0 && ci->i_subdirs == 0 &&
ceph_snap(inode) == CEPH_NOSNAP &&
- (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) {
+ (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
+ (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
dout(" marking %p complete (empty)\n", inode);
ci->i_ceph_flags |= CEPH_I_COMPLETE;
ci->i_max_offset = 2;
}
/* it may be better to set st_size in getattr instead? */
- if (ceph_test_opt(ceph_client(inode->i_sb), RBYTES))
+ if (ceph_test_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
inode->i_size = ci->i_rbytes;
break;
default:
@@ -798,6 +800,37 @@ out_unlock:
}
/*
+ * Set dentry's directory position based on the current dir's max, and
+ * order it in d_subdirs, so that dcache_readdir behaves.
+ */
+static void ceph_set_dentry_offset(struct dentry *dn)
+{
+ struct dentry *dir = dn->d_parent;
+ struct inode *inode = dn->d_parent->d_inode;
+ struct ceph_dentry_info *di;
+
+ BUG_ON(!inode);
+
+ di = ceph_dentry(dn);
+
+ spin_lock(&inode->i_lock);
+ if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+ di->offset = ceph_inode(inode)->i_max_offset++;
+ spin_unlock(&inode->i_lock);
+
+ spin_lock(&dcache_lock);
+ spin_lock(&dn->d_lock);
+ list_move_tail(&dir->d_subdirs, &dn->d_u.d_child);
+ dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
+ dn->d_u.d_child.prev, dn->d_u.d_child.next);
+ spin_unlock(&dn->d_lock);
+ spin_unlock(&dcache_lock);
+}
+
+/*
* splice a dentry to an inode.
* caller must hold directory i_mutex for this to be safe.
*
@@ -810,6 +843,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
{
struct dentry *realdn;
+ BUG_ON(dn->d_inode);
+
/* dn must be unhashed */
if (!d_unhashed(dn))
d_drop(dn);
@@ -831,44 +866,17 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
dn = realdn;
} else {
BUG_ON(!ceph_dentry(dn));
-
dout("dn %p attached to %p ino %llx.%llx\n",
dn, dn->d_inode, ceph_vinop(dn->d_inode));
}
if ((!prehash || *prehash) && d_unhashed(dn))
d_rehash(dn);
+ ceph_set_dentry_offset(dn);
out:
return dn;
}
/*
- * Set dentry's directory position based on the current dir's max, and
- * order it in d_subdirs, so that dcache_readdir behaves.
- */
-static void ceph_set_dentry_offset(struct dentry *dn)
-{
- struct dentry *dir = dn->d_parent;
- struct inode *inode = dn->d_parent->d_inode;
- struct ceph_dentry_info *di;
-
- BUG_ON(!inode);
-
- di = ceph_dentry(dn);
-
- spin_lock(&inode->i_lock);
- di->offset = ceph_inode(inode)->i_max_offset++;
- spin_unlock(&inode->i_lock);
-
- spin_lock(&dcache_lock);
- spin_lock(&dn->d_lock);
- list_move_tail(&dir->d_subdirs, &dn->d_u.d_child);
- dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
- dn->d_u.d_child.prev, dn->d_u.d_child.next);
- spin_unlock(&dn->d_lock);
- spin_unlock(&dcache_lock);
-}
-
-/*
* Incorporate results into the local cache. This is either just
* one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
* after a lookup).
@@ -1012,6 +1020,9 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
dn->d_time = jiffies;
ceph_dentry(dn)->lease_shared_gen = 0;
/* take overwritten dentry's readdir offset */
+ dout("dn %p gets %p offset %lld (old offset %lld)\n",
+ req->r_old_dentry, dn, ceph_dentry(dn)->offset,
+ ceph_dentry(req->r_old_dentry)->offset);
ceph_dentry(req->r_old_dentry)->offset =
ceph_dentry(dn)->offset;
dn = req->r_old_dentry; /* use old_dentry */
@@ -1055,7 +1066,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
goto done;
}
req->r_dentry = dn; /* may have spliced */
- ceph_set_dentry_offset(dn);
igrab(in);
} else if (ceph_ino(in) == vino.ino &&
ceph_snap(in) == vino.snap) {
@@ -1098,7 +1108,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
err = PTR_ERR(dn);
goto done;
}
- ceph_set_dentry_offset(dn);
req->r_dentry = dn; /* may have spliced */
igrab(in);
rinfo->head->is_dentry = 1; /* fool notrace handlers */
@@ -1425,7 +1434,7 @@ void ceph_queue_vmtruncate(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
+ if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
&ci->i_vmtruncate_work)) {
dout("ceph_queue_vmtruncate %p\n", inode);
igrab(inode);
@@ -1514,7 +1523,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
struct inode *parent_inode = dentry->d_parent->d_inode;
const unsigned int ia_valid = attr->ia_valid;
struct ceph_mds_request *req;
- struct ceph_mds_client *mdsc = &ceph_client(dentry->d_sb)->mdsc;
+ struct ceph_mds_client *mdsc = &ceph_sb_to_client(dentry->d_sb)->mdsc;
int issued;
int release = 0, dirtied = 0;
int mask = 0;
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8a5bcae62846..d085f07756b4 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -98,7 +98,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
struct ceph_ioctl_dataloc dl;
struct inode *inode = file->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc;
+ struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc;
u64 len = 1, olen;
u64 tmp;
struct ceph_object_layout ol;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 60a9a4ae47be..651fa521b58f 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -665,10 +665,10 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
- if (IS_ERR(msg)) {
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h));
+ if (!msg) {
pr_err("create_session_msg ENOMEM creating msg\n");
- return ERR_PTR(PTR_ERR(msg));
+ return NULL;
}
h = msg->front.iov_base;
h->op = cpu_to_le32(op);
@@ -687,7 +687,6 @@ static int __open_session(struct ceph_mds_client *mdsc,
struct ceph_msg *msg;
int mstate;
int mds = session->s_mds;
- int err = 0;
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
@@ -698,13 +697,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
/* send connect message */
msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
- if (IS_ERR(msg)) {
- err = PTR_ERR(msg);
- goto out;
- }
+ if (!msg)
+ return -ENOMEM;
ceph_con_send(&session->s_con, msg);
-
-out:
return 0;
}
@@ -882,8 +877,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
ceph_mds_state_name(state));
msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
++session->s_renew_seq);
- if (IS_ERR(msg))
- return PTR_ERR(msg);
+ if (!msg)
+ return -ENOMEM;
ceph_con_send(&session->s_con, msg);
return 0;
}
@@ -930,17 +925,15 @@ static int request_close_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_msg *msg;
- int err = 0;
dout("request_close_session mds%d state %s seq %lld\n",
session->s_mds, session_state_name(session->s_state),
session->s_seq);
msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
- if (IS_ERR(msg))
- err = PTR_ERR(msg);
- else
- ceph_con_send(&session->s_con, msg);
- return err;
+ if (!msg)
+ return -ENOMEM;
+ ceph_con_send(&session->s_con, msg);
+ return 0;
}
/*
@@ -1057,8 +1050,7 @@ static int add_cap_releases(struct ceph_mds_client *mdsc,
while (session->s_num_cap_releases < session->s_nr_caps + extra) {
spin_unlock(&session->s_cap_lock);
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
- 0, 0, NULL);
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE);
if (!msg)
goto out_unlocked;
dout("add_cap_releases %p msg %p now %d\n", session, msg,
@@ -1150,10 +1142,8 @@ static void send_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_msg *msg;
dout("send_cap_releases mds%d\n", session->s_mds);
- while (1) {
- spin_lock(&session->s_cap_lock);
- if (list_empty(&session->s_cap_releases_done))
- break;
+ spin_lock(&session->s_cap_lock);
+ while (!list_empty(&session->s_cap_releases_done)) {
msg = list_first_entry(&session->s_cap_releases_done,
struct ceph_msg, list_head);
list_del_init(&msg->list_head);
@@ -1161,6 +1151,7 @@ static void send_cap_releases(struct ceph_mds_client *mdsc,
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
+ spin_lock(&session->s_cap_lock);
}
spin_unlock(&session->s_cap_lock);
}
@@ -1266,7 +1257,7 @@ retry:
struct inode *inode = temp->d_inode;
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
- dout("build_path_dentry path+%d: %p SNAPDIR\n",
+ dout("build_path path+%d: %p SNAPDIR\n",
pos, temp);
} else if (stop_on_nosnap && inode &&
ceph_snap(inode) == CEPH_NOSNAP) {
@@ -1277,20 +1268,18 @@ retry:
break;
strncpy(path + pos, temp->d_name.name,
temp->d_name.len);
- dout("build_path_dentry path+%d: %p '%.*s'\n",
- pos, temp, temp->d_name.len, path + pos);
}
if (pos)
path[--pos] = '/';
temp = temp->d_parent;
if (temp == NULL) {
- pr_err("build_path_dentry corrupt dentry\n");
+ pr_err("build_path corrupt dentry\n");
kfree(path);
return ERR_PTR(-EINVAL);
}
}
if (pos != 0) {
- pr_err("build_path_dentry did not end path lookup where "
+ pr_err("build_path did not end path lookup where "
"expected, namelen is %d, pos is %d\n", len, pos);
/* presumably this is only possible if racing with a
rename of one of the parent directories (we can not
@@ -1302,7 +1291,7 @@ retry:
*base = ceph_ino(temp->d_inode);
*plen = len;
- dout("build_path_dentry on %p %d built %llx '%.*s'\n",
+ dout("build_path on %p %d built %llx '%.*s'\n",
dentry, atomic_read(&dentry->d_count), *base, len, path);
return path;
}
@@ -1425,9 +1414,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
if (req->r_old_dentry_drop)
len += req->r_old_dentry->d_name.len;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
- if (IS_ERR(msg))
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len);
+ if (!msg) {
+ msg = ERR_PTR(-ENOMEM);
goto out_free2;
+ }
msg->hdr.tid = cpu_to_le64(req->r_tid);
@@ -1518,7 +1509,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
if (IS_ERR(msg)) {
req->r_reply = ERR_PTR(PTR_ERR(msg));
complete_request(mdsc, req);
- return -PTR_ERR(msg);
+ return PTR_ERR(msg);
}
req->r_request = msg;
@@ -2146,11 +2137,10 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
goto fail_nopagelist;
ceph_pagelist_init(pagelist);
- reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL);
- if (IS_ERR(reply)) {
- err = PTR_ERR(reply);
+ err = -ENOMEM;
+ reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0);
+ if (!reply)
goto fail_nomsg;
- }
/* find session */
session = __ceph_lookup_mds_session(mdsc, mds);
@@ -2453,8 +2443,8 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
dnamelen = dentry->d_name.len;
len += dnamelen;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
- if (IS_ERR(msg))
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len);
+ if (!msg)
return;
lease = msg->front.iov_base;
lease->action = action;
@@ -2616,6 +2606,9 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
mdsc->client = client;
mutex_init(&mdsc->mutex);
mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
+ if (mdsc->mdsmap == NULL)
+ return -ENOMEM;
+
init_completion(&mdsc->safe_umount_waiters);
init_completion(&mdsc->session_close_waiters);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
@@ -2641,6 +2634,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
init_waitqueue_head(&mdsc->cap_flushing_wq);
spin_lock_init(&mdsc->dentry_lru_lock);
INIT_LIST_HEAD(&mdsc->dentry_lru);
+
return 0;
}
@@ -2736,6 +2730,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
{
u64 want_tid, want_flush;
+ if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+ return;
+
dout("sync\n");
mutex_lock(&mdsc->mutex);
want_tid = mdsc->last_tid;
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 509f57d9ccb3..27fc523597e4 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -340,6 +340,7 @@ static void reset_connection(struct ceph_connection *con)
ceph_msg_put(con->out_msg);
con->out_msg = NULL;
}
+ con->out_keepalive_pending = false;
con->in_seq = 0;
con->in_seq_acked = 0;
}
@@ -357,6 +358,7 @@ void ceph_con_close(struct ceph_connection *con)
clear_bit(WRITE_PENDING, &con->state);
mutex_lock(&con->mutex);
reset_connection(con);
+ con->peer_global_seq = 0;
cancel_delayed_work(&con->work);
mutex_unlock(&con->mutex);
queue_con(con);
@@ -1395,19 +1397,17 @@ static int read_partial_message(struct ceph_connection *con)
con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
if (skip) {
/* skip this message */
- dout("alloc_msg returned NULL, skipping message\n");
+ dout("alloc_msg said skip message\n");
con->in_base_pos = -front_len - middle_len - data_len -
sizeof(m->footer);
con->in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
return 0;
}
- if (IS_ERR(con->in_msg)) {
- ret = PTR_ERR(con->in_msg);
- con->in_msg = NULL;
+ if (!con->in_msg) {
con->error_msg =
"error allocating memory for incoming message";
- return ret;
+ return -ENOMEM;
}
m = con->in_msg;
m->front.iov_len = 0; /* haven't read it yet */
@@ -1539,7 +1539,6 @@ static int try_write(struct ceph_connection *con)
dout("try_write start %p state %lu nref %d\n", con, con->state,
atomic_read(&con->nref));
- mutex_lock(&con->mutex);
more:
dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -1632,7 +1631,6 @@ do_next:
done:
ret = 0;
out:
- mutex_unlock(&con->mutex);
dout("try_write done on %p\n", con);
return ret;
}
@@ -1644,7 +1642,6 @@ out:
*/
static int try_read(struct ceph_connection *con)
{
- struct ceph_messenger *msgr;
int ret = -1;
if (!con->sock)
@@ -1654,9 +1651,6 @@ static int try_read(struct ceph_connection *con)
return 0;
dout("try_read start on %p\n", con);
- msgr = con->msgr;
-
- mutex_lock(&con->mutex);
more:
dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
@@ -1751,7 +1745,6 @@ more:
done:
ret = 0;
out:
- mutex_unlock(&con->mutex);
dout("try_read done on %p\n", con);
return ret;
@@ -1823,6 +1816,8 @@ more:
dout("con_work %p start, clearing QUEUED\n", con);
clear_bit(QUEUED, &con->state);
+ mutex_lock(&con->mutex);
+
if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
dout("con_work CLOSED\n");
con_close_socket(con);
@@ -1837,11 +1832,16 @@ more:
if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
try_read(con) < 0 ||
try_write(con) < 0) {
+ mutex_unlock(&con->mutex);
backoff = 1;
ceph_fault(con); /* error/fault path */
+ goto done_unlocked;
}
done:
+ mutex_unlock(&con->mutex);
+
+done_unlocked:
clear_bit(BUSY, &con->state);
dout("con->state=%lu\n", con->state);
if (test_bit(QUEUED, &con->state)) {
@@ -1940,7 +1940,7 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
/* the zero page is needed if a request is "canceled" while the message
* is being written over the socket */
- msgr->zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
if (!msgr->zero_page) {
kfree(msgr);
return ERR_PTR(-ENOMEM);
@@ -2074,8 +2074,7 @@ void ceph_con_keepalive(struct ceph_connection *con)
* construct a new message with given type, size
* the new msg has a ref count of 1.
*/
-struct ceph_msg *ceph_msg_new(int type, int front_len,
- int page_len, int page_off, struct page **pages)
+struct ceph_msg *ceph_msg_new(int type, int front_len)
{
struct ceph_msg *m;
@@ -2088,8 +2087,8 @@ struct ceph_msg *ceph_msg_new(int type, int front_len,
m->hdr.type = cpu_to_le16(type);
m->hdr.front_len = cpu_to_le32(front_len);
m->hdr.middle_len = 0;
- m->hdr.data_len = cpu_to_le32(page_len);
- m->hdr.data_off = cpu_to_le16(page_off);
+ m->hdr.data_len = 0;
+ m->hdr.data_off = 0;
m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
m->footer.front_crc = 0;
m->footer.middle_crc = 0;
@@ -2122,19 +2121,18 @@ struct ceph_msg *ceph_msg_new(int type, int front_len,
m->middle = NULL;
/* data */
- m->nr_pages = calc_pages_for(page_off, page_len);
- m->pages = pages;
+ m->nr_pages = 0;
+ m->pages = NULL;
m->pagelist = NULL;
- dout("ceph_msg_new %p page %d~%d -> %d\n", m, page_off, page_len,
- m->nr_pages);
+ dout("ceph_msg_new %p front %d\n", m, front_len);
return m;
out2:
ceph_msg_put(m);
out:
- pr_err("msg_new can't create type %d len %d\n", type, front_len);
- return ERR_PTR(-ENOMEM);
+ pr_err("msg_new can't create type %d front %d\n", type, front_len);
+ return NULL;
}
/*
@@ -2177,29 +2175,25 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
mutex_unlock(&con->mutex);
msg = con->ops->alloc_msg(con, hdr, skip);
mutex_lock(&con->mutex);
- if (IS_ERR(msg))
- return msg;
-
- if (*skip)
+ if (!msg || *skip)
return NULL;
}
if (!msg) {
*skip = 0;
- msg = ceph_msg_new(type, front_len, 0, 0, NULL);
+ msg = ceph_msg_new(type, front_len);
if (!msg) {
pr_err("unable to allocate msg type %d len %d\n",
type, front_len);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
}
memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
- if (middle_len) {
+ if (middle_len && !msg->middle) {
ret = ceph_alloc_middle(con, msg);
-
if (ret < 0) {
ceph_msg_put(msg);
- return msg;
+ return NULL;
}
}
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h
index a343dae73cdc..9f0a540be2d6 100644
--- a/fs/ceph/messenger.h
+++ b/fs/ceph/messenger.h
@@ -157,7 +157,6 @@ struct ceph_connection {
struct list_head out_queue;
struct list_head out_sent; /* sending or sent but unacked */
u64 out_seq; /* last message queued for send */
- u64 out_seq_sent; /* last message sent */
bool out_keepalive_pending;
u64 in_seq, in_seq_acked; /* last message received, acked */
@@ -233,9 +232,7 @@ extern void ceph_con_keepalive(struct ceph_connection *con);
extern struct ceph_connection *ceph_con_get(struct ceph_connection *con);
extern void ceph_con_put(struct ceph_connection *con);
-extern struct ceph_msg *ceph_msg_new(int type, int front_len,
- int page_len, int page_off,
- struct page **pages);
+extern struct ceph_msg *ceph_msg_new(int type, int front_len);
extern void ceph_msg_kfree(struct ceph_msg *m);
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
index 8fdc011ca956..9900706fee75 100644
--- a/fs/ceph/mon_client.c
+++ b/fs/ceph/mon_client.c
@@ -191,7 +191,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
struct ceph_mon_subscribe_item *i;
void *p, *end;
- msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, 0, 0, NULL);
+ msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96);
if (!msg)
return;
@@ -393,16 +393,64 @@ static void __insert_statfs(struct ceph_mon_client *monc,
rb_insert_color(&new->node, &monc->statfs_request_tree);
}
+static void release_statfs_request(struct kref *kref)
+{
+ struct ceph_mon_statfs_request *req =
+ container_of(kref, struct ceph_mon_statfs_request, kref);
+
+ if (req->reply)
+ ceph_msg_put(req->reply);
+ if (req->request)
+ ceph_msg_put(req->request);
+}
+
+static void put_statfs_request(struct ceph_mon_statfs_request *req)
+{
+ kref_put(&req->kref, release_statfs_request);
+}
+
+static void get_statfs_request(struct ceph_mon_statfs_request *req)
+{
+ kref_get(&req->kref);
+}
+
+static struct ceph_msg *get_statfs_reply(struct ceph_connection *con,
+ struct ceph_msg_header *hdr,
+ int *skip)
+{
+ struct ceph_mon_client *monc = con->private;
+ struct ceph_mon_statfs_request *req;
+ u64 tid = le64_to_cpu(hdr->tid);
+ struct ceph_msg *m;
+
+ mutex_lock(&monc->mutex);
+ req = __lookup_statfs(monc, tid);
+ if (!req) {
+ dout("get_statfs_reply %lld dne\n", tid);
+ *skip = 1;
+ m = NULL;
+ } else {
+ dout("get_statfs_reply %lld got %p\n", tid, req->reply);
+ m = ceph_msg_get(req->reply);
+ /*
+ * we don't need to track the connection reading into
+ * this reply because we only have one open connection
+ * at a time, ever.
+ */
+ }
+ mutex_unlock(&monc->mutex);
+ return m;
+}
+
static void handle_statfs_reply(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
struct ceph_mon_statfs_request *req;
struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
- u64 tid;
+ u64 tid = le64_to_cpu(msg->hdr.tid);
if (msg->front.iov_len != sizeof(*reply))
goto bad;
- tid = le64_to_cpu(msg->hdr.tid);
dout("handle_statfs_reply %p tid %llu\n", msg, tid);
mutex_lock(&monc->mutex);
@@ -410,10 +458,13 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
if (req) {
*req->buf = reply->st;
req->result = 0;
+ get_statfs_request(req);
}
mutex_unlock(&monc->mutex);
- if (req)
+ if (req) {
complete(&req->completion);
+ put_statfs_request(req);
+ }
return;
bad:
@@ -422,67 +473,60 @@ bad:
}
/*
- * (re)send a statfs request
+ * Do a synchronous statfs().
*/
-static int send_statfs(struct ceph_mon_client *monc,
- struct ceph_mon_statfs_request *req)
+int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
{
- struct ceph_msg *msg;
+ struct ceph_mon_statfs_request *req;
struct ceph_mon_statfs *h;
+ int err;
+
+ req = kmalloc(sizeof(*req), GFP_NOFS);
+ if (!req)
+ return -ENOMEM;
+
+ memset(req, 0, sizeof(*req));
+ kref_init(&req->kref);
+ req->buf = buf;
+ init_completion(&req->completion);
+
+ err = -ENOMEM;
+ req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h));
+ if (!req->request)
+ goto out;
+ req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024);
+ if (!req->reply)
+ goto out;
- dout("send_statfs tid %llu\n", req->tid);
- msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
- if (IS_ERR(msg))
- return PTR_ERR(msg);
- req->request = msg;
- msg->hdr.tid = cpu_to_le64(req->tid);
- h = msg->front.iov_base;
+ /* fill out request */
+ h = req->request->front.iov_base;
h->monhdr.have_version = 0;
h->monhdr.session_mon = cpu_to_le16(-1);
h->monhdr.session_mon_tid = 0;
h->fsid = monc->monmap->fsid;
- ceph_con_send(monc->con, msg);
- return 0;
-}
-
-/*
- * Do a synchronous statfs().
- */
-int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
-{
- struct ceph_mon_statfs_request req;
- int err;
-
- req.buf = buf;
- init_completion(&req.completion);
-
- /* allocate memory for reply */
- err = ceph_msgpool_resv(&monc->msgpool_statfs_reply, 1);
- if (err)
- return err;
/* register request */
mutex_lock(&monc->mutex);
- req.tid = ++monc->last_tid;
- req.last_attempt = jiffies;
- req.delay = BASE_DELAY_INTERVAL;
- __insert_statfs(monc, &req);
+ req->tid = ++monc->last_tid;
+ req->request->hdr.tid = cpu_to_le64(req->tid);
+ __insert_statfs(monc, req);
monc->num_statfs_requests++;
mutex_unlock(&monc->mutex);
/* send request and wait */
- err = send_statfs(monc, &req);
- if (!err)
- err = wait_for_completion_interruptible(&req.completion);
+ ceph_con_send(monc->con, ceph_msg_get(req->request));
+ err = wait_for_completion_interruptible(&req->completion);
mutex_lock(&monc->mutex);
- rb_erase(&req.node, &monc->statfs_request_tree);
+ rb_erase(&req->node, &monc->statfs_request_tree);
monc->num_statfs_requests--;
- ceph_msgpool_resv(&monc->msgpool_statfs_reply, -1);
mutex_unlock(&monc->mutex);
if (!err)
- err = req.result;
+ err = req->result;
+
+out:
+ kref_put(&req->kref, release_statfs_request);
return err;
}
@@ -496,7 +540,7 @@ static void __resend_statfs(struct ceph_mon_client *monc)
for (p = rb_first(&monc->statfs_request_tree); p; p = rb_next(p)) {
req = rb_entry(p, struct ceph_mon_statfs_request, node);
- send_statfs(monc, req);
+ ceph_con_send(monc->con, ceph_msg_get(req->request));
}
}
@@ -587,25 +631,20 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
/* msg pools */
- err = ceph_msgpool_init(&monc->msgpool_subscribe_ack,
- sizeof(struct ceph_mon_subscribe_ack), 1, false);
- if (err < 0)
+ err = -ENOMEM;
+ monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
+ sizeof(struct ceph_mon_subscribe_ack));
+ if (!monc->m_subscribe_ack)
goto out_monmap;
- err = ceph_msgpool_init(&monc->msgpool_statfs_reply,
- sizeof(struct ceph_mon_statfs_reply), 0, false);
- if (err < 0)
- goto out_pool1;
- err = ceph_msgpool_init(&monc->msgpool_auth_reply, 4096, 1, false);
- if (err < 0)
- goto out_pool2;
-
- monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL);
+
+ monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096);
+ if (!monc->m_auth_reply)
+ goto out_subscribe_ack;
+
+ monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096);
monc->pending_auth = 0;
- if (IS_ERR(monc->m_auth)) {
- err = PTR_ERR(monc->m_auth);
- monc->m_auth = NULL;
- goto out_pool3;
- }
+ if (!monc->m_auth)
+ goto out_auth_reply;
monc->cur_mon = -1;
monc->hunting = true;
@@ -622,12 +661,10 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
monc->want_next_osdmap = 1;
return 0;
-out_pool3:
- ceph_msgpool_destroy(&monc->msgpool_auth_reply);
-out_pool2:
- ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
-out_pool1:
- ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
+out_auth_reply:
+ ceph_msg_put(monc->m_auth_reply);
+out_subscribe_ack:
+ ceph_msg_put(monc->m_subscribe_ack);
out_monmap:
kfree(monc->monmap);
out:
@@ -651,9 +688,8 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
ceph_auth_destroy(monc->auth);
ceph_msg_put(monc->m_auth);
- ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
- ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
- ceph_msgpool_destroy(&monc->msgpool_auth_reply);
+ ceph_msg_put(monc->m_auth_reply);
+ ceph_msg_put(monc->m_subscribe_ack);
kfree(monc->monmap);
}
@@ -770,18 +806,17 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
switch (type) {
case CEPH_MSG_MON_SUBSCRIBE_ACK:
- m = ceph_msgpool_get(&monc->msgpool_subscribe_ack, front_len);
+ m = ceph_msg_get(monc->m_subscribe_ack);
break;
case CEPH_MSG_STATFS_REPLY:
- m = ceph_msgpool_get(&monc->msgpool_statfs_reply, front_len);
- break;
+ return get_statfs_reply(con, hdr, skip);
case CEPH_MSG_AUTH_REPLY:
- m = ceph_msgpool_get(&monc->msgpool_auth_reply, front_len);
+ m = ceph_msg_get(monc->m_auth_reply);
break;
case CEPH_MSG_MON_MAP:
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
- m = ceph_msg_new(type, front_len, 0, 0, NULL);
+ m = ceph_msg_new(type, front_len);
break;
}
diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h
index b958ad5afa06..0046deed0740 100644
--- a/fs/ceph/mon_client.h
+++ b/fs/ceph/mon_client.h
@@ -2,10 +2,10 @@
#define _FS_CEPH_MON_CLIENT_H
#include <linux/completion.h>
+#include <linux/kref.h>
#include <linux/rbtree.h>
#include "messenger.h"
-#include "msgpool.h"
struct ceph_client;
struct ceph_mount_args;
@@ -44,13 +44,14 @@ struct ceph_mon_request {
* to the caller
*/
struct ceph_mon_statfs_request {
+ struct kref kref;
u64 tid;
struct rb_node node;
int result;
struct ceph_statfs *buf;
struct completion completion;
- unsigned long last_attempt, delay; /* jiffies */
struct ceph_msg *request; /* original request */
+ struct ceph_msg *reply; /* and reply */
};
struct ceph_mon_client {
@@ -61,7 +62,7 @@ struct ceph_mon_client {
struct delayed_work delayed_work;
struct ceph_auth_client *auth;
- struct ceph_msg *m_auth;
+ struct ceph_msg *m_auth, *m_auth_reply, *m_subscribe_ack;
int pending_auth;
bool hunting;
@@ -70,11 +71,6 @@ struct ceph_mon_client {
struct ceph_connection *con;
bool have_fsid;
- /* msg pools */
- struct ceph_msgpool msgpool_subscribe_ack;
- struct ceph_msgpool msgpool_statfs_reply;
- struct ceph_msgpool msgpool_auth_reply;
-
/* pending statfs requests */
struct rb_root statfs_request_tree;
int num_statfs_requests;
diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c
index ca3b44a89f2d..50fe5cc5de76 100644
--- a/fs/ceph/msgpool.c
+++ b/fs/ceph/msgpool.c
@@ -7,180 +7,58 @@
#include "msgpool.h"
-/*
- * We use msg pools to preallocate memory for messages we expect to
- * receive over the wire, to avoid getting ourselves into OOM
- * conditions at unexpected times. We take use a few different
- * strategies:
- *
- * - for request/response type interactions, we preallocate the
- * memory needed for the response when we generate the request.
- *
- * - for messages we can receive at any time from the MDS, we preallocate
- * a pool of messages we can re-use.
- *
- * - for writeback, we preallocate some number of messages to use for
- * requests and their replies, so that we always make forward
- * progress.
- *
- * The msgpool behaves like a mempool_t, but keeps preallocated
- * ceph_msgs strung together on a list_head instead of using a pointer
- * vector. This avoids vector reallocation when we adjust the number
- * of preallocated items (which happens frequently).
- */
+static void *alloc_fn(gfp_t gfp_mask, void *arg)
+{
+ struct ceph_msgpool *pool = arg;
+ void *p;
+ p = ceph_msg_new(0, pool->front_len);
+ if (!p)
+ pr_err("msgpool %s alloc failed\n", pool->name);
+ return p;
+}
-/*
- * Allocate or release as necessary to meet our target pool size.
- */
-static int __fill_msgpool(struct ceph_msgpool *pool)
+static void free_fn(void *element, void *arg)
{
- struct ceph_msg *msg;
-
- while (pool->num < pool->min) {
- dout("fill_msgpool %p %d/%d allocating\n", pool, pool->num,
- pool->min);
- spin_unlock(&pool->lock);
- msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL);
- spin_lock(&pool->lock);
- if (IS_ERR(msg))
- return PTR_ERR(msg);
- msg->pool = pool;
- list_add(&msg->list_head, &pool->msgs);
- pool->num++;
- }
- while (pool->num > pool->min) {
- msg = list_first_entry(&pool->msgs, struct ceph_msg, list_head);
- dout("fill_msgpool %p %d/%d releasing %p\n", pool, pool->num,
- pool->min, msg);
- list_del_init(&msg->list_head);
- pool->num--;
- ceph_msg_kfree(msg);
- }
- return 0;
+ ceph_msg_put(element);
}
int ceph_msgpool_init(struct ceph_msgpool *pool,
- int front_len, int min, bool blocking)
+ int front_len, int size, bool blocking, const char *name)
{
- int ret;
-
- dout("msgpool_init %p front_len %d min %d\n", pool, front_len, min);
- spin_lock_init(&pool->lock);
pool->front_len = front_len;
- INIT_LIST_HEAD(&pool->msgs);
- pool->num = 0;
- pool->min = min;
- pool->blocking = blocking;
- init_waitqueue_head(&pool->wait);
-
- spin_lock(&pool->lock);
- ret = __fill_msgpool(pool);
- spin_unlock(&pool->lock);
- return ret;
+ pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
+ if (!pool->pool)
+ return -ENOMEM;
+ pool->name = name;
+ return 0;
}
void ceph_msgpool_destroy(struct ceph_msgpool *pool)
{
- dout("msgpool_destroy %p\n", pool);
- spin_lock(&pool->lock);
- pool->min = 0;
- __fill_msgpool(pool);
- spin_unlock(&pool->lock);
+ mempool_destroy(pool->pool);
}
-int ceph_msgpool_resv(struct ceph_msgpool *pool, int delta)
+struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
+ int front_len)
{
- int ret;
-
- spin_lock(&pool->lock);
- dout("msgpool_resv %p delta %d\n", pool, delta);
- pool->min += delta;
- ret = __fill_msgpool(pool);
- spin_unlock(&pool->lock);
- return ret;
-}
-
-struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len)
-{
- wait_queue_t wait;
- struct ceph_msg *msg;
-
- if (front_len && front_len > pool->front_len) {
- pr_err("msgpool_get pool %p need front %d, pool size is %d\n",
- pool, front_len, pool->front_len);
+ if (front_len > pool->front_len) {
+ pr_err("msgpool_get pool %s need front %d, pool size is %d\n",
+ pool->name, front_len, pool->front_len);
WARN_ON(1);
/* try to alloc a fresh message */
- msg = ceph_msg_new(0, front_len, 0, 0, NULL);
- if (!IS_ERR(msg))
- return msg;
- }
-
- if (!front_len)
- front_len = pool->front_len;
-
- if (pool->blocking) {
- /* mempool_t behavior; first try to alloc */
- msg = ceph_msg_new(0, front_len, 0, 0, NULL);
- if (!IS_ERR(msg))
- return msg;
+ return ceph_msg_new(0, front_len);
}
- while (1) {
- spin_lock(&pool->lock);
- if (likely(pool->num)) {
- msg = list_entry(pool->msgs.next, struct ceph_msg,
- list_head);
- list_del_init(&msg->list_head);
- pool->num--;
- dout("msgpool_get %p got %p, now %d/%d\n", pool, msg,
- pool->num, pool->min);
- spin_unlock(&pool->lock);
- return msg;
- }
- pr_err("msgpool_get %p now %d/%d, %s\n", pool, pool->num,
- pool->min, pool->blocking ? "waiting" : "may fail");
- spin_unlock(&pool->lock);
-
- if (!pool->blocking) {
- WARN_ON(1);
-
- /* maybe we can allocate it now? */
- msg = ceph_msg_new(0, front_len, 0, 0, NULL);
- if (!IS_ERR(msg))
- return msg;
-
- pr_err("msgpool_get %p empty + alloc failed\n", pool);
- return ERR_PTR(-ENOMEM);
- }
-
- init_wait(&wait);
- prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
- schedule();
- finish_wait(&pool->wait, &wait);
- }
+ return mempool_alloc(pool->pool, GFP_NOFS);
}
void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
{
- spin_lock(&pool->lock);
- if (pool->num < pool->min) {
- /* reset msg front_len; user may have changed it */
- msg->front.iov_len = pool->front_len;
- msg->hdr.front_len = cpu_to_le32(pool->front_len);
+ /* reset msg front_len; user may have changed it */
+ msg->front.iov_len = pool->front_len;
+ msg->hdr.front_len = cpu_to_le32(pool->front_len);
- kref_set(&msg->kref, 1); /* retake a single ref */
- list_add(&msg->list_head, &pool->msgs);
- pool->num++;
- dout("msgpool_put %p reclaim %p, now %d/%d\n", pool, msg,
- pool->num, pool->min);
- spin_unlock(&pool->lock);
- wake_up(&pool->wait);
- } else {
- dout("msgpool_put %p drop %p, at %d/%d\n", pool, msg,
- pool->num, pool->min);
- spin_unlock(&pool->lock);
- ceph_msg_kfree(msg);
- }
+ kref_init(&msg->kref); /* retake single ref */
}
diff --git a/fs/ceph/msgpool.h b/fs/ceph/msgpool.h
index bc834bfcd720..a362605f9368 100644
--- a/fs/ceph/msgpool.h
+++ b/fs/ceph/msgpool.h
@@ -1,6 +1,7 @@
#ifndef _FS_CEPH_MSGPOOL
#define _FS_CEPH_MSGPOOL
+#include <linux/mempool.h>
#include "messenger.h"
/*
@@ -8,18 +9,15 @@
* avoid unexpected OOM conditions.
*/
struct ceph_msgpool {
- spinlock_t lock;
+ const char *name;
+ mempool_t *pool;
int front_len; /* preallocated payload size */
- struct list_head msgs; /* msgs in the pool; each has 1 ref */
- int num, min; /* cur, min # msgs in the pool */
- bool blocking;
- wait_queue_head_t wait;
};
extern int ceph_msgpool_init(struct ceph_msgpool *pool,
- int front_len, int size, bool blocking);
+ int front_len, int size, bool blocking,
+ const char *name);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
-extern int ceph_msgpool_resv(struct ceph_msgpool *, int delta);
extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
int front_len);
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index c7b4dedaace6..642fe57f84bb 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -147,7 +147,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
req = kzalloc(sizeof(*req), GFP_NOFS);
}
if (req == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
req->r_osdc = osdc;
req->r_mempool = use_mempool;
@@ -164,10 +164,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
else
msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
- OSD_OPREPLY_FRONT_LEN, 0, 0, NULL);
- if (IS_ERR(msg)) {
+ OSD_OPREPLY_FRONT_LEN);
+ if (!msg) {
ceph_osdc_put_request(req);
- return ERR_PTR(PTR_ERR(msg));
+ return NULL;
}
req->r_reply = msg;
@@ -178,10 +178,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
else
- msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL);
- if (IS_ERR(msg)) {
+ msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size);
+ if (!msg) {
ceph_osdc_put_request(req);
- return ERR_PTR(PTR_ERR(msg));
+ return NULL;
}
msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
memset(msg->front.iov_base, 0, msg->front.iov_len);
@@ -704,7 +704,7 @@ static void handle_timeout(struct work_struct *work)
* should mark the osd as failed and we should find out about
* it from an updated osd map.
*/
- while (!list_empty(&osdc->req_lru)) {
+ while (timeout && !list_empty(&osdc->req_lru)) {
req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
r_req_lru_item);
@@ -1064,6 +1064,7 @@ done:
if (newmap)
kick_requests(osdc, NULL);
up_read(&osdc->map_sem);
+ wake_up(&osdc->client->auth_wq);
return;
bad:
@@ -1073,45 +1074,6 @@ bad:
return;
}
-
-/*
- * A read request prepares specific pages that data is to be read into.
- * When a message is being read off the wire, we call prepare_pages to
- * find those pages.
- * 0 = success, -1 failure.
- */
-static int __prepare_pages(struct ceph_connection *con,
- struct ceph_msg_header *hdr,
- struct ceph_osd_request *req,
- u64 tid,
- struct ceph_msg *m)
-{
- struct ceph_osd *osd = con->private;
- struct ceph_osd_client *osdc;
- int ret = -1;
- int data_len = le32_to_cpu(hdr->data_len);
- unsigned data_off = le16_to_cpu(hdr->data_off);
-
- int want = calc_pages_for(data_off & ~PAGE_MASK, data_len);
-
- if (!osd)
- return -1;
-
- osdc = osd->o_osdc;
-
- dout("__prepare_pages on msg %p tid %llu, has %d pages, want %d\n", m,
- tid, req->r_num_pages, want);
- if (unlikely(req->r_num_pages < want))
- goto out;
- m->pages = req->r_pages;
- m->nr_pages = req->r_num_pages;
- ret = 0; /* success */
-out:
- BUG_ON(ret < 0 || m->nr_pages < want);
-
- return ret;
-}
-
/*
* Register request, send initial attempt.
*/
@@ -1238,11 +1200,13 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
if (!osdc->req_mempool)
goto out;
- err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true);
+ err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true,
+ "osd_op");
if (err < 0)
goto out_mempool;
err = ceph_msgpool_init(&osdc->msgpool_op_reply,
- OSD_OPREPLY_FRONT_LEN, 10, true);
+ OSD_OPREPLY_FRONT_LEN, 10, true,
+ "osd_op_reply");
if (err < 0)
goto out_msgpool;
return 0;
@@ -1288,8 +1252,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, 0, truncate_seq, truncate_size, NULL,
false, 1);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (!req)
+ return -ENOMEM;
/* it may be a short read due to an object boundary */
req->r_pages = pages;
@@ -1331,8 +1295,8 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
snapc, do_sync,
truncate_seq, truncate_size, mtime,
nofail, 1);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (!req)
+ return -ENOMEM;
/* it may be a short write due to an object boundary */
req->r_pages = pages;
@@ -1380,7 +1344,8 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
}
/*
- * lookup and return message for incoming reply
+ * lookup and return message for incoming reply. set up reply message
+ * pages.
*/
static struct ceph_msg *get_reply(struct ceph_connection *con,
struct ceph_msg_header *hdr,
@@ -1393,7 +1358,6 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
int front = le32_to_cpu(hdr->front_len);
int data_len = le32_to_cpu(hdr->data_len);
u64 tid;
- int err;
tid = le64_to_cpu(hdr->tid);
mutex_lock(&osdc->request_mutex);
@@ -1411,13 +1375,14 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
req->r_reply, req->r_con_filling_msg);
ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
ceph_con_put(req->r_con_filling_msg);
+ req->r_con_filling_msg = NULL;
}
if (front > req->r_reply->front.iov_len) {
pr_warning("get_reply front %d > preallocated %d\n",
front, (int)req->r_reply->front.iov_len);
- m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, 0, 0, NULL);
- if (IS_ERR(m))
+ m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front);
+ if (!m)
goto out;
ceph_msg_put(req->r_reply);
req->r_reply = m;
@@ -1425,12 +1390,19 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
m = ceph_msg_get(req->r_reply);
if (data_len > 0) {
- err = __prepare_pages(con, hdr, req, tid, m);
- if (err < 0) {
+ unsigned data_off = le16_to_cpu(hdr->data_off);
+ int want = calc_pages_for(data_off & ~PAGE_MASK, data_len);
+
+ if (unlikely(req->r_num_pages < want)) {
+ pr_warning("tid %lld reply %d > expected %d pages\n",
+ tid, want, m->nr_pages);
*skip = 1;
ceph_msg_put(m);
- m = ERR_PTR(err);
+ m = NULL;
+ goto out;
}
+ m->pages = req->r_pages;
+ m->nr_pages = req->r_num_pages;
}
*skip = 0;
req->r_con_filling_msg = ceph_con_get(con);
@@ -1452,7 +1424,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
switch (type) {
case CEPH_MSG_OSD_MAP:
- return ceph_msg_new(type, front, 0, 0, NULL);
+ return ceph_msg_new(type, front);
case CEPH_MSG_OSD_OPREPLY:
return get_reply(con, hdr, skip);
default:
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
index 5f8dbf7c745a..b6859f47d364 100644
--- a/fs/ceph/pagelist.c
+++ b/fs/ceph/pagelist.c
@@ -20,7 +20,7 @@ int ceph_pagelist_release(struct ceph_pagelist *pl)
static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
{
- struct page *page = alloc_page(GFP_NOFS);
+ struct page *page = __page_cache_alloc(GFP_NOFS);
if (!page)
return -ENOMEM;
pl->room += PAGE_SIZE;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index d5114db70453..c0b26b6badba 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -512,7 +512,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap)
{
struct inode *inode = &ci->vfs_inode;
- struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
BUG_ON(capsnap->writing);
capsnap->size = inode->i_size;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index f888cf487b7c..956f8ffa5eb8 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -8,14 +8,11 @@
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/parser.h>
-#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/string.h>
-#include <linux/version.h>
-#include <linux/vmalloc.h>
#include "decode.h"
#include "super.h"
@@ -47,7 +44,7 @@ const char *ceph_file_part(const char *s, int len)
*/
static void ceph_put_super(struct super_block *s)
{
- struct ceph_client *cl = ceph_client(s);
+ struct ceph_client *cl = ceph_sb_to_client(s);
dout("put_super\n");
ceph_mdsc_close_sessions(&cl->mdsc);
@@ -97,12 +94,40 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
static int ceph_syncfs(struct super_block *sb, int wait)
{
dout("sync_fs %d\n", wait);
- ceph_osdc_sync(&ceph_client(sb)->osdc);
- ceph_mdsc_sync(&ceph_client(sb)->mdsc);
+ ceph_osdc_sync(&ceph_sb_to_client(sb)->osdc);
+ ceph_mdsc_sync(&ceph_sb_to_client(sb)->mdsc);
dout("sync_fs %d done\n", wait);
return 0;
}
+static int default_congestion_kb(void)
+{
+ int congestion_kb;
+
+ /*
+ * Copied from NFS
+ *
+ * congestion size, scale with available memory.
+ *
+ * 64MB: 8192k
+ * 128MB: 11585k
+ * 256MB: 16384k
+ * 512MB: 23170k
+ * 1GB: 32768k
+ * 2GB: 46340k
+ * 4GB: 65536k
+ * 8GB: 92681k
+ * 16GB: 131072k
+ *
+ * This allows larger machines to have larger/more transfers.
+ * Limit the default to 256M
+ */
+ congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
+ if (congestion_kb > 256*1024)
+ congestion_kb = 256*1024;
+
+ return congestion_kb;
+}
/**
* ceph_show_options - Show mount options in /proc/mounts
@@ -128,6 +153,33 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
seq_puts(m, ",nocrc");
if (args->flags & CEPH_OPT_NOASYNCREADDIR)
seq_puts(m, ",noasyncreaddir");
+
+ if (args->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
+ seq_printf(m, ",mount_timeout=%d", args->mount_timeout);
+ if (args->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
+ seq_printf(m, ",osd_idle_ttl=%d", args->osd_idle_ttl);
+ if (args->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
+ seq_printf(m, ",osdtimeout=%d", args->osd_timeout);
+ if (args->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
+ seq_printf(m, ",osdkeepalivetimeout=%d",
+ args->osd_keepalive_timeout);
+ if (args->wsize)
+ seq_printf(m, ",wsize=%d", args->wsize);
+ if (args->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
+ seq_printf(m, ",rsize=%d", args->rsize);
+ if (args->congestion_kb != default_congestion_kb())
+ seq_printf(m, ",write_congestion_kb=%d", args->congestion_kb);
+ if (args->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
+ seq_printf(m, ",caps_wanted_delay_min=%d",
+ args->caps_wanted_delay_min);
+ if (args->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
+ seq_printf(m, ",caps_wanted_delay_max=%d",
+ args->caps_wanted_delay_max);
+ if (args->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
+ seq_printf(m, ",cap_release_safety=%d",
+ args->cap_release_safety);
+ if (args->max_readdir != CEPH_MAX_READDIR_DEFAULT)
+ seq_printf(m, ",readdir_max_entries=%d", args->max_readdir);
if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
seq_printf(m, ",snapdirname=%s", args->snapdir_name);
if (args->name)
@@ -151,35 +203,6 @@ static void ceph_inode_init_once(void *foo)
inode_init_once(&ci->vfs_inode);
}
-static int default_congestion_kb(void)
-{
- int congestion_kb;
-
- /*
- * Copied from NFS
- *
- * congestion size, scale with available memory.
- *
- * 64MB: 8192k
- * 128MB: 11585k
- * 256MB: 16384k
- * 512MB: 23170k
- * 1GB: 32768k
- * 2GB: 46340k
- * 4GB: 65536k
- * 8GB: 92681k
- * 16GB: 131072k
- *
- * This allows larger machines to have larger/more transfers.
- * Limit the default to 256M
- */
- congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
- if (congestion_kb > 256*1024)
- congestion_kb = 256*1024;
-
- return congestion_kb;
-}
-
static int __init init_caches(void)
{
ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
@@ -298,6 +321,7 @@ enum {
Opt_osd_idle_ttl,
Opt_caps_wanted_delay_min,
Opt_caps_wanted_delay_max,
+ Opt_cap_release_safety,
Opt_readdir_max_entries,
Opt_congestion_kb,
Opt_last_int,
@@ -329,6 +353,7 @@ static match_table_t arg_tokens = {
{Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
{Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
{Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
+ {Opt_cap_release_safety, "cap_release_safety=%d"},
{Opt_readdir_max_entries, "readdir_max_entries=%d"},
{Opt_congestion_kb, "write_congestion_kb=%d"},
/* int args above */
@@ -378,8 +403,8 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options,
args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
args->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
- args->cap_release_safety = CEPH_CAPS_PER_RELEASE * 4;
- args->max_readdir = 1024;
+ args->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
+ args->max_readdir = CEPH_MAX_READDIR_DEFAULT;
args->congestion_kb = default_congestion_kb();
/* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
@@ -670,9 +695,10 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
/*
* true if we have the mon map (and have thus joined the cluster)
*/
-static int have_mon_map(struct ceph_client *client)
+static int have_mon_and_osd_map(struct ceph_client *client)
{
- return client->monc.monmap && client->monc.monmap->epoch;
+ return client->monc.monmap && client->monc.monmap->epoch &&
+ client->osdc.osdmap && client->osdc.osdmap->epoch;
}
/*
@@ -750,7 +776,7 @@ static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt,
if (err < 0)
goto out;
- while (!have_mon_map(client)) {
+ while (!have_mon_and_osd_map(client)) {
err = -EIO;
if (timeout && time_after_eq(jiffies, started + timeout))
goto out;
@@ -758,8 +784,8 @@ static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt,
/* wait */
dout("mount waiting for mon_map\n");
err = wait_event_interruptible_timeout(client->auth_wq,
- have_mon_map(client) || (client->auth_err < 0),
- timeout);
+ have_mon_and_osd_map(client) || (client->auth_err < 0),
+ timeout);
if (err == -EINTR || err == -ERESTARTSYS)
goto out;
if (client->auth_err < 0) {
@@ -872,6 +898,8 @@ static int ceph_compare_super(struct super_block *sb, void *data)
/*
* construct our own bdi so we can control readahead, etc.
*/
+static atomic_long_t bdi_seq = ATOMIC_INIT(0);
+
static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
{
int err;
@@ -883,7 +911,8 @@ static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
client->backing_dev_info.ra_pages =
(client->mount_args->rsize + PAGE_CACHE_SIZE - 1)
>> PAGE_SHIFT;
- err = bdi_register_dev(&client->backing_dev_info, sb->s_dev);
+ err = bdi_register(&client->backing_dev_info, NULL, "ceph-%d",
+ atomic_long_inc_return(&bdi_seq));
return err;
}
@@ -920,9 +949,9 @@ static int ceph_get_sb(struct file_system_type *fs_type,
goto out;
}
- if (ceph_client(sb) != client) {
+ if (ceph_sb_to_client(sb) != client) {
ceph_destroy_client(client);
- client = ceph_client(sb);
+ client = ceph_sb_to_client(sb);
dout("get_sb got existing client %p\n", client);
} else {
dout("get_sb using new client %p\n", client);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 13513b80d87f..b580e43ae983 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -52,24 +52,24 @@
struct ceph_mount_args {
int sb_flags;
+ int flags;
+ struct ceph_fsid fsid;
+ struct ceph_entity_addr my_addr;
int num_mon;
struct ceph_entity_addr *mon_addr;
- int flags;
int mount_timeout;
int osd_idle_ttl;
- int caps_wanted_delay_min, caps_wanted_delay_max;
- struct ceph_fsid fsid;
- struct ceph_entity_addr my_addr;
+ int osd_timeout;
+ int osd_keepalive_timeout;
int wsize;
int rsize; /* max readahead */
+ int congestion_kb; /* max writeback in flight */
+ int caps_wanted_delay_min, caps_wanted_delay_max;
+ int cap_release_safety;
int max_readdir; /* max readdir size */
- int congestion_kb; /* max readdir size */
- int osd_timeout;
- int osd_keepalive_timeout;
char *snapdir_name; /* default ".snap" */
char *name;
char *secret;
- int cap_release_safety;
};
/*
@@ -80,13 +80,13 @@ struct ceph_mount_args {
#define CEPH_OSD_KEEPALIVE_DEFAULT 5
#define CEPH_OSD_IDLE_TTL_DEFAULT 60
#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
+#define CEPH_MAX_READDIR_DEFAULT 1024
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
#define CEPH_AUTH_NAME_DEFAULT "guest"
-
/*
* Delay telling the MDS we no longer want caps, in case we reopen
* the file. Delay a minimum amount of time, even if we send a cap
@@ -96,6 +96,7 @@ struct ceph_mount_args {
#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
+#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
/* mount state */
enum {
@@ -160,12 +161,6 @@ struct ceph_client {
#endif
};
-static inline struct ceph_client *ceph_client(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-
/*
* File i/o capability. This tracks shared state with the metadata
* server that allows us to cache or writeback attributes or to read
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 2845422907fc..7185d0794df3 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -186,12 +186,6 @@ static int __set_xattr(struct ceph_inode_info *ci,
ci->i_xattrs.names_size -= xattr->name_len;
ci->i_xattrs.vals_size -= xattr->val_len;
}
- if (!xattr) {
- pr_err("__set_xattr ENOMEM on %p %llx.%llx xattr %s=%s\n",
- &ci->vfs_inode, ceph_vinop(&ci->vfs_inode), name,
- xattr->val);
- return -ENOMEM;
- }
ci->i_xattrs.names_size += name_len;
ci->i_xattrs.vals_size += val_len;
if (val)
@@ -574,7 +568,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
ci->i_xattrs.version, ci->i_xattrs.index_version);
if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
- (ci->i_xattrs.index_version > ci->i_xattrs.version)) {
+ (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto list_xattr;
} else {
spin_unlock(&inode->i_lock);
@@ -622,7 +616,7 @@ out:
static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
const char *value, size_t size, int flags)
{
- struct ceph_client *client = ceph_client(dentry->d_sb);
+ struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
struct inode *inode = dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct inode *parent_inode = dentry->d_parent->d_inode;
@@ -641,7 +635,7 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
return -ENOMEM;
err = -ENOMEM;
for (i = 0; i < nr_pages; i++) {
- pages[i] = alloc_page(GFP_NOFS);
+ pages[i] = __page_cache_alloc(GFP_NOFS);
if (!pages[i]) {
nr_pages = i;
goto out;
@@ -779,7 +773,7 @@ out:
static int ceph_send_removexattr(struct dentry *dentry, const char *name)
{
- struct ceph_client *client = ceph_client(dentry->d_sb);
+ struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
struct ceph_mds_client *mdsc = &client->mdsc;
struct inode *inode = dentry->d_inode;
struct inode *parent_inode = dentry->d_parent->d_inode;
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index a20bea598933..cfd1ce34e0bc 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -492,17 +492,13 @@ compare_oid(unsigned long *oid1, unsigned int oid1len,
int
decode_negTokenInit(unsigned char *security_blob, int length,
- enum securityEnum *secType)
+ struct TCP_Server_Info *server)
{
struct asn1_ctx ctx;
unsigned char *end;
unsigned char *sequence_end;
unsigned long *oid = NULL;
unsigned int cls, con, tag, oidlen, rc;
- bool use_ntlmssp = false;
- bool use_kerberos = false;
- bool use_kerberosu2u = false;
- bool use_mskerberos = false;
/* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
@@ -510,11 +506,11 @@ decode_negTokenInit(unsigned char *security_blob, int length,
/* GSSAPI header */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit header"));
+ cFYI(1, "Error decoding negTokenInit header");
return 0;
} else if ((cls != ASN1_APL) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cFYI(1, ("cls = %d con = %d tag = %d", cls, con, tag));
+ cFYI(1, "cls = %d con = %d tag = %d", cls, con, tag);
return 0;
}
@@ -535,56 +531,52 @@ decode_negTokenInit(unsigned char *security_blob, int length,
/* SPNEGO OID not present or garbled -- bail out */
if (!rc) {
- cFYI(1, ("Error decoding negTokenInit header"));
+ cFYI(1, "Error decoding negTokenInit header");
return 0;
}
/* SPNEGO */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit"));
+ cFYI(1, "Error decoding negTokenInit");
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cFYI(1,
- ("cls = %d con = %d tag = %d end = %p (%d) exit 0",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 0",
+ cls, con, tag, end, *end);
return 0;
}
/* negTokenInit */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit"));
+ cFYI(1, "Error decoding negTokenInit");
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cFYI(1,
- ("cls = %d con = %d tag = %d end = %p (%d) exit 1",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 1",
+ cls, con, tag, end, *end);
return 0;
}
/* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding 2nd part of negTokenInit"));
+ cFYI(1, "Error decoding 2nd part of negTokenInit");
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cFYI(1,
- ("cls = %d con = %d tag = %d end = %p (%d) exit 0",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 0",
+ cls, con, tag, end, *end);
return 0;
}
/* sequence of */
if (asn1_header_decode
(&ctx, &sequence_end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding 2nd part of negTokenInit"));
+ cFYI(1, "Error decoding 2nd part of negTokenInit");
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cFYI(1,
- ("cls = %d con = %d tag = %d end = %p (%d) exit 1",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 1",
+ cls, con, tag, end, *end);
return 0;
}
@@ -592,37 +584,33 @@ decode_negTokenInit(unsigned char *security_blob, int length,
while (!asn1_eoc_decode(&ctx, sequence_end)) {
rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
if (!rc) {
- cFYI(1,
- ("Error decoding negTokenInit hdr exit2"));
+ cFYI(1, "Error decoding negTokenInit hdr exit2");
return 0;
}
if ((tag == ASN1_OJI) && (con == ASN1_PRI)) {
if (asn1_oid_decode(&ctx, end, &oid, &oidlen)) {
- cFYI(1, ("OID len = %d oid = 0x%lx 0x%lx "
- "0x%lx 0x%lx", oidlen, *oid,
- *(oid + 1), *(oid + 2), *(oid + 3)));
+ cFYI(1, "OID len = %d oid = 0x%lx 0x%lx "
+ "0x%lx 0x%lx", oidlen, *oid,
+ *(oid + 1), *(oid + 2), *(oid + 3));
if (compare_oid(oid, oidlen, MSKRB5_OID,
- MSKRB5_OID_LEN) &&
- !use_mskerberos)
- use_mskerberos = true;
+ MSKRB5_OID_LEN))
+ server->sec_mskerberos = true;
else if (compare_oid(oid, oidlen, KRB5U2U_OID,
- KRB5U2U_OID_LEN) &&
- !use_kerberosu2u)
- use_kerberosu2u = true;
+ KRB5U2U_OID_LEN))
+ server->sec_kerberosu2u = true;
else if (compare_oid(oid, oidlen, KRB5_OID,
- KRB5_OID_LEN) &&
- !use_kerberos)
- use_kerberos = true;
+ KRB5_OID_LEN))
+ server->sec_kerberos = true;
else if (compare_oid(oid, oidlen, NTLMSSP_OID,
NTLMSSP_OID_LEN))
- use_ntlmssp = true;
+ server->sec_ntlmssp = true;
kfree(oid);
}
} else {
- cFYI(1, ("Should be an oid what is going on?"));
+ cFYI(1, "Should be an oid what is going on?");
}
}
@@ -632,54 +620,47 @@ decode_negTokenInit(unsigned char *security_blob, int length,
no mechListMic (e.g. NTLMSSP instead of KRB5) */
if (ctx.error == ASN1_ERR_DEC_EMPTY)
goto decode_negtoken_exit;
- cFYI(1, ("Error decoding last part negTokenInit exit3"));
+ cFYI(1, "Error decoding last part negTokenInit exit3");
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
/* tag = 3 indicating mechListMIC */
- cFYI(1, ("Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end));
+ cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
+ cls, con, tag, end, *end);
return 0;
}
/* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding last part negTokenInit exit5"));
+ cFYI(1, "Error decoding last part negTokenInit exit5");
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cFYI(1, ("cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end));
+ cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
+ cls, con, tag, end, *end);
}
/* sequence of */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding last part negTokenInit exit 7"));
+ cFYI(1, "Error decoding last part negTokenInit exit 7");
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
- cFYI(1, ("Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end));
+ cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
+ cls, con, tag, end, *end);
return 0;
}
/* general string */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding last part negTokenInit exit9"));
+ cFYI(1, "Error decoding last part negTokenInit exit9");
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
|| (tag != ASN1_GENSTR)) {
- cFYI(1, ("Exit10 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end));
+ cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
+ cls, con, tag, end, *end);
return 0;
}
- cFYI(1, ("Need to call asn1_octets_decode() function for %s",
- ctx.pointer)); /* is this UTF-8 or ASCII? */
+ cFYI(1, "Need to call asn1_octets_decode() function for %s",
+ ctx.pointer); /* is this UTF-8 or ASCII? */
decode_negtoken_exit:
- if (use_kerberos)
- *secType = Kerberos;
- else if (use_mskerberos)
- *secType = MSKerberos;
- else if (use_ntlmssp)
- *secType = RawNTLMSSP;
-
return 1;
}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 42cec2a7c0cf..4fce6e61b34e 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -60,10 +60,10 @@ cifs_dump_mem(char *label, void *data, int length)
#ifdef CONFIG_CIFS_DEBUG2
void cifs_dump_detail(struct smb_hdr *smb)
{
- cERROR(1, ("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
+ cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
smb->Command, smb->Status.CifsError,
- smb->Flags, smb->Flags2, smb->Mid, smb->Pid));
- cERROR(1, ("smb buf %p len %d", smb, smbCalcSize_LE(smb)));
+ smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
+ cERROR(1, "smb buf %p len %d", smb, smbCalcSize_LE(smb));
}
@@ -75,25 +75,25 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
if (server == NULL)
return;
- cERROR(1, ("Dump pending requests:"));
+ cERROR(1, "Dump pending requests:");
spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cERROR(1, ("State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
+ cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
mid_entry->midState,
(int)mid_entry->command,
mid_entry->pid,
mid_entry->tsk,
- mid_entry->mid));
+ mid_entry->mid);
#ifdef CONFIG_CIFS_STATS2
- cERROR(1, ("IsLarge: %d buf: %p time rcv: %ld now: %ld",
+ cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
mid_entry->largeBuf,
mid_entry->resp_buf,
mid_entry->when_received,
- jiffies));
+ jiffies);
#endif /* STATS2 */
- cERROR(1, ("IsMult: %d IsEnd: %d", mid_entry->multiRsp,
- mid_entry->multiEnd));
+ cERROR(1, "IsMult: %d IsEnd: %d", mid_entry->multiRsp,
+ mid_entry->multiEnd);
if (mid_entry->resp_buf) {
cifs_dump_detail(mid_entry->resp_buf);
cifs_dump_mem("existing buf: ",
@@ -716,7 +716,7 @@ static const struct file_operations cifs_multiuser_mount_proc_fops = {
static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
{
- seq_printf(m, "0x%x\n", extended_security);
+ seq_printf(m, "0x%x\n", global_secflags);
return 0;
}
@@ -744,13 +744,13 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
/* single char or single char followed by null */
c = flags_string[0];
if (c == '0' || c == 'n' || c == 'N') {
- extended_security = CIFSSEC_DEF; /* default */
+ global_secflags = CIFSSEC_DEF; /* default */
return count;
} else if (c == '1' || c == 'y' || c == 'Y') {
- extended_security = CIFSSEC_MAX;
+ global_secflags = CIFSSEC_MAX;
return count;
} else if (!isdigit(c)) {
- cERROR(1, ("invalid flag %c", c));
+ cERROR(1, "invalid flag %c", c);
return -EINVAL;
}
}
@@ -758,26 +758,26 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
flags = simple_strtoul(flags_string, NULL, 0);
- cFYI(1, ("sec flags 0x%x", flags));
+ cFYI(1, "sec flags 0x%x", flags);
if (flags <= 0) {
- cERROR(1, ("invalid security flags %s", flags_string));
+ cERROR(1, "invalid security flags %s", flags_string);
return -EINVAL;
}
if (flags & ~CIFSSEC_MASK) {
- cERROR(1, ("attempt to set unsupported security flags 0x%x",
- flags & ~CIFSSEC_MASK));
+ cERROR(1, "attempt to set unsupported security flags 0x%x",
+ flags & ~CIFSSEC_MASK);
return -EINVAL;
}
/* flags look ok - update the global security flags for cifs module */
- extended_security = flags;
- if (extended_security & CIFSSEC_MUST_SIGN) {
+ global_secflags = flags;
+ if (global_secflags & CIFSSEC_MUST_SIGN) {
/* requiring signing implies signing is allowed */
- extended_security |= CIFSSEC_MAY_SIGN;
- cFYI(1, ("packet signing now required"));
- } else if ((extended_security & CIFSSEC_MAY_SIGN) == 0) {
- cFYI(1, ("packet signing disabled"));
+ global_secflags |= CIFSSEC_MAY_SIGN;
+ cFYI(1, "packet signing now required");
+ } else if ((global_secflags & CIFSSEC_MAY_SIGN) == 0) {
+ cFYI(1, "packet signing disabled");
}
/* BB should we turn on MAY flags for other MUST options? */
return count;
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 5eb3b83bbfa7..aa316891ac0c 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -43,34 +43,54 @@ void dump_smb(struct smb_hdr *, int);
*/
#ifdef CIFS_DEBUG
-
/* information message: e.g., configuration, major event */
extern int cifsFYI;
-#define cifsfyi(format,arg...) if (cifsFYI & CIFS_INFO) printk(KERN_DEBUG " " __FILE__ ": " format "\n" "" , ## arg)
+#define cifsfyi(fmt, arg...) \
+do { \
+ if (cifsFYI & CIFS_INFO) \
+ printk(KERN_DEBUG "%s: " fmt "\n", __FILE__, ##arg); \
+} while (0)
-#define cFYI(button,prspec) if (button) cifsfyi prspec
+#define cFYI(set, fmt, arg...) \
+do { \
+ if (set) \
+ cifsfyi(fmt, ##arg); \
+} while (0)
-#define cifswarn(format, arg...) printk(KERN_WARNING ": " format "\n" , ## arg)
+#define cifswarn(fmt, arg...) \
+ printk(KERN_WARNING fmt "\n", ##arg)
/* debug event message: */
extern int cifsERROR;
-#define cEVENT(format,arg...) if (cifsERROR) printk(KERN_EVENT __FILE__ ": " format "\n" , ## arg)
+#define cEVENT(fmt, arg...) \
+do { \
+ if (cifsERROR) \
+ printk(KERN_EVENT "%s: " fmt "\n", __FILE__, ##arg); \
+} while (0)
/* error event message: e.g., i/o error */
-#define cifserror(format,arg...) if (cifsERROR) printk(KERN_ERR " CIFS VFS: " format "\n" "" , ## arg)
+#define cifserror(fmt, arg...) \
+do { \
+ if (cifsERROR) \
+ printk(KERN_ERR "CIFS VFS: " fmt "\n", ##arg); \
+} while (0)
-#define cERROR(button, prspec) if (button) cifserror prspec
+#define cERROR(set, fmt, arg...) \
+do { \
+ if (set) \
+ cifserror(fmt, ##arg); \
+} while (0)
/*
* debug OFF
* ---------
*/
#else /* _CIFS_DEBUG */
-#define cERROR(button, prspec)
-#define cEVENT(format, arg...)
-#define cFYI(button, prspec)
-#define cifserror(format, arg...)
+#define cERROR(set, fmt, arg...)
+#define cEVENT(fmt, arg...)
+#define cFYI(set, fmt, arg...)
+#define cifserror(fmt, arg...)
#endif /* _CIFS_DEBUG */
#endif /* _H_CIFS_DEBUG */
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 78e4d2a3a68b..ac19a6f3dae0 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -85,8 +85,8 @@ static char *cifs_get_share_name(const char *node_name)
/* find server name end */
pSep = memchr(UNC+2, '\\', len-2);
if (!pSep) {
- cERROR(1, ("%s: no server name end in node name: %s",
- __func__, node_name));
+ cERROR(1, "%s: no server name end in node name: %s",
+ __func__, node_name);
kfree(UNC);
return ERR_PTR(-EINVAL);
}
@@ -142,8 +142,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
if (rc != 0) {
- cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
- __func__, *devname, rc));
+ cERROR(1, "%s: Failed to resolve server part of %s to IP: %d",
+ __func__, *devname, rc);
goto compose_mount_options_err;
}
/* md_len = strlen(...) + 12 for 'sep+prefixpath='
@@ -217,8 +217,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
strcat(mountdata, fullpath + ref->path_consumed);
}
- /*cFYI(1,("%s: parent mountdata: %s", __func__,sb_mountdata));*/
- /*cFYI(1, ("%s: submount mountdata: %s", __func__, mountdata ));*/
+ /*cFYI(1, "%s: parent mountdata: %s", __func__,sb_mountdata);*/
+ /*cFYI(1, "%s: submount mountdata: %s", __func__, mountdata );*/
compose_mount_options_out:
kfree(srvIP);
@@ -294,11 +294,11 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
static void dump_referral(const struct dfs_info3_param *ref)
{
- cFYI(1, ("DFS: ref path: %s", ref->path_name));
- cFYI(1, ("DFS: node path: %s", ref->node_name));
- cFYI(1, ("DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type));
- cFYI(1, ("DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag,
- ref->path_consumed));
+ cFYI(1, "DFS: ref path: %s", ref->path_name);
+ cFYI(1, "DFS: node path: %s", ref->node_name);
+ cFYI(1, "DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type);
+ cFYI(1, "DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag,
+ ref->path_consumed);
}
@@ -314,7 +314,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
int rc = 0;
struct vfsmount *mnt = ERR_PTR(-ENOENT);
- cFYI(1, ("in %s", __func__));
+ cFYI(1, "in %s", __func__);
BUG_ON(IS_ROOT(dentry));
xid = GetXid();
@@ -352,15 +352,15 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
/* connect to a node */
len = strlen(referrals[i].node_name);
if (len < 2) {
- cERROR(1, ("%s: Net Address path too short: %s",
- __func__, referrals[i].node_name));
+ cERROR(1, "%s: Net Address path too short: %s",
+ __func__, referrals[i].node_name);
rc = -EINVAL;
goto out_err;
}
mnt = cifs_dfs_do_refmount(nd->path.mnt,
nd->path.dentry, referrals + i);
- cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__,
- referrals[i].node_name, mnt));
+ cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__,
+ referrals[i].node_name, mnt);
/* complete mount procedure if we accured submount */
if (!IS_ERR(mnt))
@@ -378,7 +378,7 @@ out:
FreeXid(xid);
free_dfs_info_array(referrals, num_referrals);
kfree(full_path);
- cFYI(1, ("leaving %s" , __func__));
+ cFYI(1, "leaving %s" , __func__);
return ERR_PTR(rc);
out_err:
path_put(&nd->path);
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 310d12f69a92..379bd7d9c05f 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -133,9 +133,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
dp = description + strlen(description);
/* for now, only sec=krb5 and sec=mskrb5 are valid */
- if (server->secType == Kerberos)
+ if (server->sec_kerberos)
sprintf(dp, ";sec=krb5");
- else if (server->secType == MSKerberos)
+ else if (server->sec_mskerberos)
sprintf(dp, ";sec=mskrb5");
else
goto out;
@@ -149,7 +149,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
dp = description + strlen(description);
sprintf(dp, ";pid=0x%x", current->pid);
- cFYI(1, ("key description = %s", description));
+ cFYI(1, "key description = %s", description);
spnego_key = request_key(&cifs_spnego_key_type, description, "");
#ifdef CONFIG_CIFS_DEBUG2
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index d07676bd76d2..430f510a1720 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -200,9 +200,8 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
/* works for 2.4.0 kernel or later */
charlen = codepage->char2uni(from, len, &wchar_to[i]);
if (charlen < 1) {
- cERROR(1,
- ("strtoUCS: char2uni of %d returned %d",
- (int)*from, charlen));
+ cERROR(1, "strtoUCS: char2uni of %d returned %d",
+ (int)*from, charlen);
/* A question mark */
to[i] = cpu_to_le16(0x003f);
charlen = 1;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 9b716d044bbd..85d7cf7ff2c8 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -87,11 +87,11 @@ int match_sid(struct cifs_sid *ctsid)
continue; /* all sub_auth values do not match */
}
- cFYI(1, ("matching sid: %s\n", wksidarr[i].sidname));
+ cFYI(1, "matching sid: %s\n", wksidarr[i].sidname);
return 0; /* sids compare/match */
}
- cFYI(1, ("No matching sid"));
+ cFYI(1, "No matching sid");
return -1;
}
@@ -208,14 +208,14 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
*pbits_to_set &= ~S_IXUGO;
return;
} else if (type != ACCESS_ALLOWED) {
- cERROR(1, ("unknown access control type %d", type));
+ cERROR(1, "unknown access control type %d", type);
return;
}
/* else ACCESS_ALLOWED type */
if (flags & GENERIC_ALL) {
*pmode |= (S_IRWXUGO & (*pbits_to_set));
- cFYI(DBG2, ("all perms"));
+ cFYI(DBG2, "all perms");
return;
}
if ((flags & GENERIC_WRITE) ||
@@ -228,7 +228,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pmode |= (S_IXUGO & (*pbits_to_set));
- cFYI(DBG2, ("access flags 0x%x mode now 0x%x", flags, *pmode));
+ cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
return;
}
@@ -257,7 +257,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
if (mode & S_IXUGO)
*pace_flags |= SET_FILE_EXEC_RIGHTS;
- cFYI(DBG2, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags));
+ cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
return;
}
@@ -297,24 +297,24 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
/* validate that we do not go past end of acl */
if (le16_to_cpu(pace->size) < 16) {
- cERROR(1, ("ACE too small, %d", le16_to_cpu(pace->size)));
+ cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
return;
}
if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
- cERROR(1, ("ACL too small to parse ACE"));
+ cERROR(1, "ACL too small to parse ACE");
return;
}
num_subauth = pace->sid.num_subauth;
if (num_subauth) {
int i;
- cFYI(1, ("ACE revision %d num_auth %d type %d flags %d size %d",
+ cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
pace->sid.revision, pace->sid.num_subauth, pace->type,
- pace->flags, le16_to_cpu(pace->size)));
+ pace->flags, le16_to_cpu(pace->size));
for (i = 0; i < num_subauth; ++i) {
- cFYI(1, ("ACE sub_auth[%d]: 0x%x", i,
- le32_to_cpu(pace->sid.sub_auth[i])));
+ cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
+ le32_to_cpu(pace->sid.sub_auth[i]));
}
/* BB add length check to make sure that we do not have huge
@@ -347,13 +347,13 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
/* validate that we do not go past end of acl */
if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
- cERROR(1, ("ACL too small to parse DACL"));
+ cERROR(1, "ACL too small to parse DACL");
return;
}
- cFYI(DBG2, ("DACL revision %d size %d num aces %d",
+ cFYI(DBG2, "DACL revision %d size %d num aces %d",
le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
- le32_to_cpu(pdacl->num_aces)));
+ le32_to_cpu(pdacl->num_aces));
/* reset rwx permissions for user/group/other.
Also, if num_aces is 0 i.e. DACL has no ACEs,
@@ -437,25 +437,25 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
/* validate that we do not go past end of ACL - sid must be at least 8
bytes long (assuming no sub-auths - e.g. the null SID */
if (end_of_acl < (char *)psid + 8) {
- cERROR(1, ("ACL too small to parse SID %p", psid));
+ cERROR(1, "ACL too small to parse SID %p", psid);
return -EINVAL;
}
if (psid->num_subauth) {
#ifdef CONFIG_CIFS_DEBUG2
int i;
- cFYI(1, ("SID revision %d num_auth %d",
- psid->revision, psid->num_subauth));
+ cFYI(1, "SID revision %d num_auth %d",
+ psid->revision, psid->num_subauth);
for (i = 0; i < psid->num_subauth; i++) {
- cFYI(1, ("SID sub_auth[%d]: 0x%x ", i,
- le32_to_cpu(psid->sub_auth[i])));
+ cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
+ le32_to_cpu(psid->sub_auth[i]));
}
/* BB add length check to make sure that we do not have huge
num auths and therefore go off the end */
- cFYI(1, ("RID 0x%x",
- le32_to_cpu(psid->sub_auth[psid->num_subauth-1])));
+ cFYI(1, "RID 0x%x",
+ le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
#endif
}
@@ -482,11 +482,11 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
le32_to_cpu(pntsd->gsidoffset));
dacloffset = le32_to_cpu(pntsd->dacloffset);
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
- cFYI(DBG2, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x "
+ cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
"sacloffset 0x%x dacloffset 0x%x",
pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
le32_to_cpu(pntsd->gsidoffset),
- le32_to_cpu(pntsd->sacloffset), dacloffset));
+ le32_to_cpu(pntsd->sacloffset), dacloffset);
/* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
rc = parse_sid(owner_sid_ptr, end_of_acl);
if (rc)
@@ -500,7 +500,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
group_sid_ptr, fattr);
else
- cFYI(1, ("no ACL")); /* BB grant all or default perms? */
+ cFYI(1, "no ACL"); /* BB grant all or default perms? */
/* cifscred->uid = owner_sid_ptr->rid;
cifscred->gid = group_sid_ptr->rid;
@@ -563,7 +563,7 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
FreeXid(xid);
- cFYI(1, ("GetCIFSACL rc = %d ACL len %d", rc, *pacllen));
+ cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
return pntsd;
}
@@ -581,12 +581,12 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
&fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- cERROR(1, ("Unable to open file to get ACL"));
+ cERROR(1, "Unable to open file to get ACL");
goto out;
}
rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen);
- cFYI(1, ("GetCIFSACL rc = %d ACL len %d", rc, *pacllen));
+ cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
CIFSSMBClose(xid, cifs_sb->tcon, fid);
out:
@@ -621,7 +621,7 @@ static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
FreeXid(xid);
- cFYI(DBG2, ("SetCIFSACL rc = %d", rc));
+ cFYI(DBG2, "SetCIFSACL rc = %d", rc);
return rc;
}
@@ -638,12 +638,12 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
&fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- cERROR(1, ("Unable to open file to set ACL"));
+ cERROR(1, "Unable to open file to set ACL");
goto out;
}
rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
- cFYI(DBG2, ("SetCIFSACL rc = %d", rc));
+ cFYI(DBG2, "SetCIFSACL rc = %d", rc);
CIFSSMBClose(xid, cifs_sb->tcon, fid);
out:
@@ -659,7 +659,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
struct cifsFileInfo *open_file;
int rc;
- cFYI(DBG2, ("set ACL for %s from mode 0x%x", path, inode->i_mode));
+ cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
open_file = find_readable_file(CIFS_I(inode));
if (!open_file)
@@ -679,7 +679,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
u32 acllen = 0;
int rc = 0;
- cFYI(DBG2, ("converting ACL to mode for %s", path));
+ cFYI(DBG2, "converting ACL to mode for %s", path);
if (pfid)
pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
@@ -690,7 +690,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
if (pntsd)
rc = parse_sec_desc(pntsd, acllen, fattr);
if (rc)
- cFYI(1, ("parse sec desc failed rc = %d", rc));
+ cFYI(1, "parse sec desc failed rc = %d", rc);
kfree(pntsd);
return;
@@ -704,7 +704,7 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
- cFYI(DBG2, ("set ACL from mode for %s", path));
+ cFYI(DBG2, "set ACL from mode for %s", path);
/* Get the security descriptor */
pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
@@ -721,19 +721,19 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
DEFSECDESCLEN : secdesclen;
pnntsd = kmalloc(secdesclen, GFP_KERNEL);
if (!pnntsd) {
- cERROR(1, ("Unable to allocate security descriptor"));
+ cERROR(1, "Unable to allocate security descriptor");
kfree(pntsd);
return -ENOMEM;
}
rc = build_sec_desc(pntsd, pnntsd, inode, nmode);
- cFYI(DBG2, ("build_sec_desc rc: %d", rc));
+ cFYI(DBG2, "build_sec_desc rc: %d", rc);
if (!rc) {
/* Set the security descriptor */
rc = set_cifs_acl(pnntsd, secdesclen, inode, path);
- cFYI(DBG2, ("set_cifs_acl rc: %d", rc));
+ cFYI(DBG2, "set_cifs_acl rc: %d", rc);
}
kfree(pnntsd);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index fbe986430d0c..847628dfdc44 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -103,7 +103,7 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
- cERROR(1, ("null iovec entry"));
+ cERROR(1, "null iovec entry");
return -EIO;
}
/* The first entry includes a length field (which does not get
@@ -181,8 +181,8 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
/* Do not need to verify session setups with signature "BSRSPYL " */
if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
- cFYI(1, ("dummy signature received for smb command 0x%x",
- cifs_pdu->Command));
+ cFYI(1, "dummy signature received for smb command 0x%x",
+ cifs_pdu->Command);
/* save off the origiginal signature so we can modify the smb and check
its signature against what the server sent */
@@ -291,7 +291,7 @@ void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
if (password)
strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
- if (!encrypt && extended_security & CIFSSEC_MAY_PLNTXT) {
+ if (!encrypt && global_secflags & CIFSSEC_MAY_PLNTXT) {
memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE);
memcpy(lnm_session_key, password_with_pad,
CIFS_ENCPWD_SIZE);
@@ -398,7 +398,7 @@ void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
/* calculate buf->ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, nls_cp);
if (rc)
- cERROR(1, ("could not get v2 hash rc %d", rc));
+ cERROR(1, "could not get v2 hash rc %d", rc);
CalcNTLMv2_response(ses, resp_buf);
/* now calculate the MAC key for NTLMv2 */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index ad235d604a0b..78c02eb4cb1f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -49,10 +49,6 @@
#include "cifs_spnego.h"
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
-#ifdef CONFIG_CIFS_QUOTA
-static const struct quotactl_ops cifs_quotactl_ops;
-#endif /* QUOTA */
-
int cifsFYI = 0;
int cifsERROR = 1;
int traceSMB = 0;
@@ -61,7 +57,7 @@ unsigned int experimEnabled = 0;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
-unsigned int extended_security = CIFSSEC_DEF;
+unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
static const struct super_operations cifs_super_ops;
@@ -86,8 +82,6 @@ extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
-extern struct kmem_cache *cifs_oplock_cachep;
-
static int
cifs_read_super(struct super_block *sb, void *data,
const char *devname, int silent)
@@ -135,8 +129,7 @@ cifs_read_super(struct super_block *sb, void *data,
if (rc) {
if (!silent)
- cERROR(1,
- ("cifs_mount failed w/return code = %d", rc));
+ cERROR(1, "cifs_mount failed w/return code = %d", rc);
goto out_mount_failed;
}
@@ -146,9 +139,6 @@ cifs_read_super(struct super_block *sb, void *data,
/* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
sb->s_blocksize =
cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
-#ifdef CONFIG_CIFS_QUOTA
- sb->s_qcop = &cifs_quotactl_ops;
-#endif
sb->s_blocksize = CIFS_MAX_MSGSIZE;
sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
inode = cifs_root_iget(sb, ROOT_I);
@@ -168,7 +158,7 @@ cifs_read_super(struct super_block *sb, void *data,
#ifdef CONFIG_CIFS_EXPERIMENTAL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
- cFYI(1, ("export ops supported"));
+ cFYI(1, "export ops supported");
sb->s_export_op = &cifs_export_ops;
}
#endif /* EXPERIMENTAL */
@@ -176,7 +166,7 @@ cifs_read_super(struct super_block *sb, void *data,
return 0;
out_no_root:
- cERROR(1, ("cifs_read_super: get root inode failed"));
+ cERROR(1, "cifs_read_super: get root inode failed");
if (inode)
iput(inode);
@@ -203,10 +193,10 @@ cifs_put_super(struct super_block *sb)
int rc = 0;
struct cifs_sb_info *cifs_sb;
- cFYI(1, ("In cifs_put_super"));
+ cFYI(1, "In cifs_put_super");
cifs_sb = CIFS_SB(sb);
if (cifs_sb == NULL) {
- cFYI(1, ("Empty cifs superblock info passed to unmount"));
+ cFYI(1, "Empty cifs superblock info passed to unmount");
return;
}
@@ -214,7 +204,7 @@ cifs_put_super(struct super_block *sb)
rc = cifs_umount(sb, cifs_sb);
if (rc)
- cERROR(1, ("cifs_umount failed with return code %d", rc));
+ cERROR(1, "cifs_umount failed with return code %d", rc);
#ifdef CONFIG_CIFS_DFS_UPCALL
if (cifs_sb->mountdata) {
kfree(cifs_sb->mountdata);
@@ -300,7 +290,6 @@ static int cifs_permission(struct inode *inode, int mask)
static struct kmem_cache *cifs_inode_cachep;
static struct kmem_cache *cifs_req_cachep;
static struct kmem_cache *cifs_mid_cachep;
-struct kmem_cache *cifs_oplock_cachep;
static struct kmem_cache *cifs_sm_req_cachep;
mempool_t *cifs_sm_req_poolp;
mempool_t *cifs_req_poolp;
@@ -432,106 +421,6 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
return 0;
}
-#ifdef CONFIG_CIFS_QUOTA
-int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
- struct fs_disk_quota *pdquota)
-{
- int xid;
- int rc = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *pTcon;
-
- if (cifs_sb)
- pTcon = cifs_sb->tcon;
- else
- return -EIO;
-
-
- xid = GetXid();
- if (pTcon) {
- cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
- } else
- rc = -EIO;
-
- FreeXid(xid);
- return rc;
-}
-
-int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
- struct fs_disk_quota *pdquota)
-{
- int xid;
- int rc = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *pTcon;
-
- if (cifs_sb)
- pTcon = cifs_sb->tcon;
- else
- return -EIO;
-
- xid = GetXid();
- if (pTcon) {
- cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
- } else
- rc = -EIO;
-
- FreeXid(xid);
- return rc;
-}
-
-int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
-{
- int xid;
- int rc = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *pTcon;
-
- if (cifs_sb)
- pTcon = cifs_sb->tcon;
- else
- return -EIO;
-
- xid = GetXid();
- if (pTcon) {
- cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
- } else
- rc = -EIO;
-
- FreeXid(xid);
- return rc;
-}
-
-int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
-{
- int xid;
- int rc = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *pTcon;
-
- if (cifs_sb)
- pTcon = cifs_sb->tcon;
- else
- return -EIO;
-
- xid = GetXid();
- if (pTcon) {
- cFYI(1, ("pqstats %p", qstats));
- } else
- rc = -EIO;
-
- FreeXid(xid);
- return rc;
-}
-
-static const struct quotactl_ops cifs_quotactl_ops = {
- .set_xquota = cifs_xquota_set,
- .get_xquota = cifs_xquota_get,
- .set_xstate = cifs_xstate_set,
- .get_xstate = cifs_xstate_get,
-};
-#endif
-
static void cifs_umount_begin(struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -558,7 +447,7 @@ static void cifs_umount_begin(struct super_block *sb)
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
/* cancel_notify_requests(tcon); */
if (tcon->ses && tcon->ses->server) {
- cFYI(1, ("wake up tasks now - umount begin not complete"));
+ cFYI(1, "wake up tasks now - umount begin not complete");
wake_up_all(&tcon->ses->server->request_q);
wake_up_all(&tcon->ses->server->response_q);
msleep(1); /* yield */
@@ -609,7 +498,7 @@ cifs_get_sb(struct file_system_type *fs_type,
int rc;
struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
- cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
+ cFYI(1, "Devname: %s flags: %d ", dev_name, flags);
if (IS_ERR(sb))
return PTR_ERR(sb);
@@ -656,7 +545,6 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
return generic_file_llseek_unlocked(file, offset, origin);
}
-#ifdef CONFIG_CIFS_EXPERIMENTAL
static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
{
/* note that this is called by vfs setlease with the BKL held
@@ -685,7 +573,6 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
else
return -EAGAIN;
}
-#endif
struct file_system_type cifs_fs_type = {
.owner = THIS_MODULE,
@@ -762,10 +649,7 @@ const struct file_operations cifs_file_ops = {
#ifdef CONFIG_CIFS_POSIX
.unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
-
-#ifdef CONFIG_CIFS_EXPERIMENTAL
.setlease = cifs_setlease,
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
};
const struct file_operations cifs_file_direct_ops = {
@@ -784,9 +668,7 @@ const struct file_operations cifs_file_direct_ops = {
.unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
.llseek = cifs_llseek,
-#ifdef CONFIG_CIFS_EXPERIMENTAL
.setlease = cifs_setlease,
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
};
const struct file_operations cifs_file_nobrl_ops = {
.read = do_sync_read,
@@ -803,10 +685,7 @@ const struct file_operations cifs_file_nobrl_ops = {
#ifdef CONFIG_CIFS_POSIX
.unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
-
-#ifdef CONFIG_CIFS_EXPERIMENTAL
.setlease = cifs_setlease,
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
};
const struct file_operations cifs_file_direct_nobrl_ops = {
@@ -824,9 +703,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
.llseek = cifs_llseek,
-#ifdef CONFIG_CIFS_EXPERIMENTAL
.setlease = cifs_setlease,
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
};
const struct file_operations cifs_dir_ops = {
@@ -878,7 +755,7 @@ cifs_init_request_bufs(void)
} else {
CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
}
-/* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
+/* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
cifs_req_cachep = kmem_cache_create("cifs_request",
CIFSMaxBufSize +
MAX_CIFS_HDR_SIZE, 0,
@@ -890,7 +767,7 @@ cifs_init_request_bufs(void)
cifs_min_rcv = 1;
else if (cifs_min_rcv > 64) {
cifs_min_rcv = 64;
- cERROR(1, ("cifs_min_rcv set to maximum (64)"));
+ cERROR(1, "cifs_min_rcv set to maximum (64)");
}
cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
@@ -921,7 +798,7 @@ cifs_init_request_bufs(void)
cifs_min_small = 2;
else if (cifs_min_small > 256) {
cifs_min_small = 256;
- cFYI(1, ("cifs_min_small set to maximum (256)"));
+ cFYI(1, "cifs_min_small set to maximum (256)");
}
cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
@@ -962,15 +839,6 @@ cifs_init_mids(void)
return -ENOMEM;
}
- cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
- sizeof(struct oplock_q_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (cifs_oplock_cachep == NULL) {
- mempool_destroy(cifs_mid_poolp);
- kmem_cache_destroy(cifs_mid_cachep);
- return -ENOMEM;
- }
-
return 0;
}
@@ -979,7 +847,6 @@ cifs_destroy_mids(void)
{
mempool_destroy(cifs_mid_poolp);
kmem_cache_destroy(cifs_mid_cachep);
- kmem_cache_destroy(cifs_oplock_cachep);
}
static int __init
@@ -1019,10 +886,10 @@ init_cifs(void)
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
- cFYI(1, ("cifs_max_pending set to min of 2"));
+ cFYI(1, "cifs_max_pending set to min of 2");
} else if (cifs_max_pending > 256) {
cifs_max_pending = 256;
- cFYI(1, ("cifs_max_pending set to max of 256"));
+ cFYI(1, "cifs_max_pending set to max of 256");
}
rc = cifs_init_inodecache();
@@ -1080,7 +947,7 @@ init_cifs(void)
static void __exit
exit_cifs(void)
{
- cFYI(DBG2, ("exit_cifs"));
+ cFYI(DBG2, "exit_cifs");
cifs_proc_clean();
#ifdef CONFIG_CIFS_DFS_UPCALL
cifs_dfs_release_automount_timer();
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 7aa57ecdc437..0242ff9cbf41 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -114,5 +114,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.62"
+#define CIFS_VERSION "1.64"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ecf0ffbe2b64..4a99487400f3 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -87,7 +87,6 @@ enum securityEnum {
RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
/* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */
Kerberos, /* Kerberos via SPNEGO */
- MSKerberos, /* MS Kerberos via SPNEGO */
};
enum protocolEnum {
@@ -185,6 +184,12 @@ struct TCP_Server_Info {
struct mac_key mac_signing_key;
char ntlmv2_hash[16];
unsigned long lstrp; /* when we got last response from this server */
+ u16 dialect; /* dialect index that server chose */
+ /* extended security flavors that server supports */
+ bool sec_kerberos; /* supports plain Kerberos */
+ bool sec_mskerberos; /* supports legacy MS Kerberos */
+ bool sec_kerberosu2u; /* supports U2U Kerberos */
+ bool sec_ntlmssp; /* supports NTLMSSP */
};
/*
@@ -717,7 +722,7 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
GLOBAL_EXTERN unsigned int oplockEnabled;
GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
-GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent
+GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 39e47f46dea5..2e07da9a46fa 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -39,8 +39,20 @@ extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
unsigned int /* length */);
extern unsigned int _GetXid(void);
extern void _FreeXid(unsigned int);
-#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current_fsuid()));
-#define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__func__,curr_xid,(int)rc));}
+#define GetXid() \
+({ \
+ int __xid = (int)_GetXid(); \
+ cFYI(1, "CIFS VFS: in %s as Xid: %d with uid: %d", \
+ __func__, __xid, current_fsuid()); \
+ __xid; \
+})
+
+#define FreeXid(curr_xid) \
+do { \
+ _FreeXid(curr_xid); \
+ cFYI(1, "CIFS VFS: leaving %s (xid = %d) rc = %d", \
+ __func__, curr_xid, (int)rc); \
+} while (0)
extern char *build_path_from_dentry(struct dentry *);
extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
@@ -73,7 +85,7 @@ extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *);
extern unsigned int smbCalcSize(struct smb_hdr *ptr);
extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
- enum securityEnum *secType);
+ struct TCP_Server_Info *server);
extern int cifs_convert_address(char *src, void *dst);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
@@ -83,7 +95,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
struct cifsSesInfo *ses,
void **request_buf);
extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
- const int stage,
const struct nls_table *nls_cp);
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
@@ -95,8 +106,10 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
__u16 fileHandle, struct file *file,
struct vfsmount *mnt, unsigned int oflags);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid);
+ struct vfsmount *mnt,
+ struct super_block *sb,
+ int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid);
extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
FILE_UNIX_BASIC_INFO *info,
struct cifs_sb_info *cifs_sb);
@@ -125,7 +138,9 @@ extern void cifs_dfs_release_automount_timer(void);
void cifs_proc_init(void);
void cifs_proc_clean(void);
-extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
+extern int cifs_negotiate_protocol(unsigned int xid,
+ struct cifsSesInfo *ses);
+extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
struct nls_table *nls_info);
extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5d3f29fef532..c65c3419dd37 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifssmb.c
*
- * Copyright (C) International Business Machines Corp., 2002,2009
+ * Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Contains the routines for constructing the SMB PDUs themselves
@@ -130,8 +130,8 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) {
- cFYI(1, ("can not send cmd %d while umounting",
- smb_command));
+ cFYI(1, "can not send cmd %d while umounting",
+ smb_command);
return -ENODEV;
}
}
@@ -157,7 +157,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
* back on-line
*/
if (!tcon->retry || ses->status == CifsExiting) {
- cFYI(1, ("gave up waiting on reconnect in smb_init"));
+ cFYI(1, "gave up waiting on reconnect in smb_init");
return -EHOSTDOWN;
}
}
@@ -172,7 +172,8 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
* reconnect the same SMB session
*/
mutex_lock(&ses->session_mutex);
- if (ses->need_reconnect)
+ rc = cifs_negotiate_protocol(0, ses);
+ if (rc == 0 && ses->need_reconnect)
rc = cifs_setup_session(0, ses, nls_codepage);
/* do we need to reconnect tcon? */
@@ -184,7 +185,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
mark_open_files_invalid(tcon);
rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
mutex_unlock(&ses->session_mutex);
- cFYI(1, ("reconnect tcon rc = %d", rc));
+ cFYI(1, "reconnect tcon rc = %d", rc);
if (rc)
goto out;
@@ -355,7 +356,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
struct TCP_Server_Info *server;
u16 count;
unsigned int secFlags;
- u16 dialect;
if (ses->server)
server = ses->server;
@@ -372,9 +372,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */
else /* if override flags set only sign/seal OR them with global auth */
- secFlags = extended_security | ses->overrideSecFlg;
+ secFlags = global_secflags | ses->overrideSecFlg;
- cFYI(1, ("secFlags 0x%x", secFlags));
+ cFYI(1, "secFlags 0x%x", secFlags);
pSMB->hdr.Mid = GetNextMid(server);
pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
@@ -382,14 +382,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) {
- cFYI(1, ("Kerberos only mechanism, enable extended security"));
+ cFYI(1, "Kerberos only mechanism, enable extended security");
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
}
#ifdef CONFIG_CIFS_EXPERIMENTAL
else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) {
- cFYI(1, ("NTLMSSP only mechanism, enable extended security"));
+ cFYI(1, "NTLMSSP only mechanism, enable extended security");
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
}
#endif
@@ -408,10 +408,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (rc != 0)
goto neg_err_exit;
- dialect = le16_to_cpu(pSMBr->DialectIndex);
- cFYI(1, ("Dialect: %d", dialect));
+ server->dialect = le16_to_cpu(pSMBr->DialectIndex);
+ cFYI(1, "Dialect: %d", server->dialect);
/* Check wct = 1 error case */
- if ((pSMBr->hdr.WordCount < 13) || (dialect == BAD_PROT)) {
+ if ((pSMBr->hdr.WordCount < 13) || (server->dialect == BAD_PROT)) {
/* core returns wct = 1, but we do not ask for core - otherwise
small wct just comes when dialect index is -1 indicating we
could not negotiate a common dialect */
@@ -419,8 +419,8 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
goto neg_err_exit;
#ifdef CONFIG_CIFS_WEAK_PW_HASH
} else if ((pSMBr->hdr.WordCount == 13)
- && ((dialect == LANMAN_PROT)
- || (dialect == LANMAN2_PROT))) {
+ && ((server->dialect == LANMAN_PROT)
+ || (server->dialect == LANMAN2_PROT))) {
__s16 tmp;
struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr;
@@ -428,8 +428,8 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
(secFlags & CIFSSEC_MAY_PLNTXT))
server->secType = LANMAN;
else {
- cERROR(1, ("mount failed weak security disabled"
- " in /proc/fs/cifs/SecurityFlags"));
+ cERROR(1, "mount failed weak security disabled"
+ " in /proc/fs/cifs/SecurityFlags");
rc = -EOPNOTSUPP;
goto neg_err_exit;
}
@@ -462,9 +462,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
utc = CURRENT_TIME;
ts = cnvrtDosUnixTm(rsp->SrvTime.Date,
rsp->SrvTime.Time, 0);
- cFYI(1, ("SrvTime %d sec since 1970 (utc: %d) diff: %d",
+ cFYI(1, "SrvTime %d sec since 1970 (utc: %d) diff: %d",
(int)ts.tv_sec, (int)utc.tv_sec,
- (int)(utc.tv_sec - ts.tv_sec)));
+ (int)(utc.tv_sec - ts.tv_sec));
val = (int)(utc.tv_sec - ts.tv_sec);
seconds = abs(val);
result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
@@ -478,7 +478,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->timeAdj = (int)tmp;
server->timeAdj *= 60; /* also in seconds */
}
- cFYI(1, ("server->timeAdj: %d seconds", server->timeAdj));
+ cFYI(1, "server->timeAdj: %d seconds", server->timeAdj);
/* BB get server time for time conversions and add
@@ -493,14 +493,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
goto neg_err_exit;
}
- cFYI(1, ("LANMAN negotiated"));
+ cFYI(1, "LANMAN negotiated");
/* we will not end up setting signing flags - as no signing
was in LANMAN and server did not return the flags on */
goto signing_check;
#else /* weak security disabled */
} else if (pSMBr->hdr.WordCount == 13) {
- cERROR(1, ("mount failed, cifs module not built "
- "with CIFS_WEAK_PW_HASH support"));
+ cERROR(1, "mount failed, cifs module not built "
+ "with CIFS_WEAK_PW_HASH support");
rc = -EOPNOTSUPP;
#endif /* WEAK_PW_HASH */
goto neg_err_exit;
@@ -512,14 +512,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
/* else wct == 17 NTLM */
server->secMode = pSMBr->SecurityMode;
if ((server->secMode & SECMODE_USER) == 0)
- cFYI(1, ("share mode security"));
+ cFYI(1, "share mode security");
if ((server->secMode & SECMODE_PW_ENCRYPT) == 0)
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
#endif /* CIFS_WEAK_PW_HASH */
- cERROR(1, ("Server requests plain text password"
- " but client support disabled"));
+ cERROR(1, "Server requests plain text password"
+ " but client support disabled");
if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
server->secType = NTLMv2;
@@ -539,7 +539,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
#endif */
else {
rc = -EOPNOTSUPP;
- cERROR(1, ("Invalid security type"));
+ cERROR(1, "Invalid security type");
goto neg_err_exit;
}
/* else ... any others ...? */
@@ -551,7 +551,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
- cFYI(DBG2, ("Max buf = %d", ses->server->maxBuf));
+ cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
server->capabilities = le32_to_cpu(pSMBr->Capabilities);
server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
@@ -582,7 +582,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (memcmp(server->server_GUID,
pSMBr->u.extended_response.
GUID, 16) != 0) {
- cFYI(1, ("server UID changed"));
+ cFYI(1, "server UID changed");
memcpy(server->server_GUID,
pSMBr->u.extended_response.GUID,
16);
@@ -597,13 +597,19 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->secType = RawNTLMSSP;
} else {
rc = decode_negTokenInit(pSMBr->u.extended_response.
- SecurityBlob,
- count - 16,
- &server->secType);
+ SecurityBlob, count - 16,
+ server);
if (rc == 1)
rc = 0;
else
rc = -EINVAL;
+
+ if (server->sec_kerberos || server->sec_mskerberos)
+ server->secType = Kerberos;
+ else if (server->sec_ntlmssp)
+ server->secType = RawNTLMSSP;
+ else
+ rc = -EOPNOTSUPP;
}
} else
server->capabilities &= ~CAP_EXTENDED_SECURITY;
@@ -614,22 +620,21 @@ signing_check:
if ((secFlags & CIFSSEC_MAY_SIGN) == 0) {
/* MUST_SIGN already includes the MAY_SIGN FLAG
so if this is zero it means that signing is disabled */
- cFYI(1, ("Signing disabled"));
+ cFYI(1, "Signing disabled");
if (server->secMode & SECMODE_SIGN_REQUIRED) {
- cERROR(1, ("Server requires "
+ cERROR(1, "Server requires "
"packet signing to be enabled in "
- "/proc/fs/cifs/SecurityFlags."));
+ "/proc/fs/cifs/SecurityFlags.");
rc = -EOPNOTSUPP;
}
server->secMode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
} else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
/* signing required */
- cFYI(1, ("Must sign - secFlags 0x%x", secFlags));
+ cFYI(1, "Must sign - secFlags 0x%x", secFlags);
if ((server->secMode &
(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
- cERROR(1,
- ("signing required but server lacks support"));
+ cERROR(1, "signing required but server lacks support");
rc = -EOPNOTSUPP;
} else
server->secMode |= SECMODE_SIGN_REQUIRED;
@@ -643,7 +648,7 @@ signing_check:
neg_err_exit:
cifs_buf_release(pSMB);
- cFYI(1, ("negprot rc %d", rc));
+ cFYI(1, "negprot rc %d", rc);
return rc;
}
@@ -653,7 +658,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
struct smb_hdr *smb_buffer;
int rc = 0;
- cFYI(1, ("In tree disconnect"));
+ cFYI(1, "In tree disconnect");
/* BB: do we need to check this? These should never be NULL. */
if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
@@ -675,7 +680,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0);
if (rc)
- cFYI(1, ("Tree disconnect failed %d", rc));
+ cFYI(1, "Tree disconnect failed %d", rc);
/* No need to return error on this operation if tid invalidated and
closed on server already e.g. due to tcp session crashing */
@@ -691,7 +696,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
LOGOFF_ANDX_REQ *pSMB;
int rc = 0;
- cFYI(1, ("In SMBLogoff for session disconnect"));
+ cFYI(1, "In SMBLogoff for session disconnect");
/*
* BB: do we need to check validity of ses and server? They should
@@ -744,7 +749,7 @@ CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
- cFYI(1, ("In POSIX delete"));
+ cFYI(1, "In POSIX delete");
PsxDelete:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -796,7 +801,7 @@ PsxDelete:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("Posix delete returned %d", rc));
+ cFYI(1, "Posix delete returned %d", rc);
cifs_buf_release(pSMB);
cifs_stats_inc(&tcon->num_deletes);
@@ -843,7 +848,7 @@ DelFileRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_deletes);
if (rc)
- cFYI(1, ("Error in RMFile = %d", rc));
+ cFYI(1, "Error in RMFile = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -862,7 +867,7 @@ CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName,
int bytes_returned;
int name_len;
- cFYI(1, ("In CIFSSMBRmDir"));
+ cFYI(1, "In CIFSSMBRmDir");
RmDirRetry:
rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -887,7 +892,7 @@ RmDirRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_rmdirs);
if (rc)
- cFYI(1, ("Error in RMDir = %d", rc));
+ cFYI(1, "Error in RMDir = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -905,7 +910,7 @@ CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
int bytes_returned;
int name_len;
- cFYI(1, ("In CIFSSMBMkDir"));
+ cFYI(1, "In CIFSSMBMkDir");
MkDirRetry:
rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -930,7 +935,7 @@ MkDirRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_mkdirs);
if (rc)
- cFYI(1, ("Error in Mkdir = %d", rc));
+ cFYI(1, "Error in Mkdir = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -953,7 +958,7 @@ CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
OPEN_PSX_REQ *pdata;
OPEN_PSX_RSP *psx_rsp;
- cFYI(1, ("In POSIX Create"));
+ cFYI(1, "In POSIX Create");
PsxCreat:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -1007,11 +1012,11 @@ PsxCreat:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Posix create returned %d", rc));
+ cFYI(1, "Posix create returned %d", rc);
goto psx_create_err;
}
- cFYI(1, ("copying inode info"));
+ cFYI(1, "copying inode info");
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP))) {
@@ -1033,11 +1038,11 @@ PsxCreat:
/* check to make sure response data is there */
if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) {
pRetData->Type = cpu_to_le32(-1); /* unknown */
- cFYI(DBG2, ("unknown type"));
+ cFYI(DBG2, "unknown type");
} else {
if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP)
+ sizeof(FILE_UNIX_BASIC_INFO)) {
- cERROR(1, ("Open response data too small"));
+ cERROR(1, "Open response data too small");
pRetData->Type = cpu_to_le32(-1);
goto psx_create_err;
}
@@ -1084,7 +1089,7 @@ static __u16 convert_disposition(int disposition)
ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
break;
default:
- cFYI(1, ("unknown disposition %d", disposition));
+ cFYI(1, "unknown disposition %d", disposition);
ofun = SMBOPEN_OAPPEND; /* regular open */
}
return ofun;
@@ -1175,7 +1180,7 @@ OldOpenRetry:
(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
- cFYI(1, ("Error in Open = %d", rc));
+ cFYI(1, "Error in Open = %d", rc);
} else {
/* BB verify if wct == 15 */
@@ -1288,7 +1293,7 @@ openRetry:
(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
- cFYI(1, ("Error in Open = %d", rc));
+ cFYI(1, "Error in Open = %d", rc);
} else {
*pOplock = pSMBr->OplockLevel; /* 1 byte no need to le_to_cpu */
*netfid = pSMBr->Fid; /* cifs fid stays in le */
@@ -1326,7 +1331,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
int resp_buf_type = 0;
struct kvec iov[1];
- cFYI(1, ("Reading %d bytes on fid %d", count, netfid));
+ cFYI(1, "Reading %d bytes on fid %d", count, netfid);
if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 12;
else {
@@ -1371,7 +1376,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
cifs_stats_inc(&tcon->num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
- cERROR(1, ("Send error in read = %d", rc));
+ cERROR(1, "Send error in read = %d", rc);
} else {
int data_length = le16_to_cpu(pSMBr->DataLengthHigh);
data_length = data_length << 16;
@@ -1381,15 +1386,15 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
/*check that DataLength would not go beyond end of SMB */
if ((data_length > CIFSMaxBufSize)
|| (data_length > count)) {
- cFYI(1, ("bad length %d for count %d",
- data_length, count));
+ cFYI(1, "bad length %d for count %d",
+ data_length, count);
rc = -EIO;
*nbytes = 0;
} else {
pReadData = (char *) (&pSMBr->hdr.Protocol) +
le16_to_cpu(pSMBr->DataOffset);
/* if (rc = copy_to_user(buf, pReadData, data_length)) {
- cERROR(1,("Faulting on read rc = %d",rc));
+ cERROR(1, "Faulting on read rc = %d",rc);
rc = -EFAULT;
}*/ /* can not use copy_to_user when using page cache*/
if (*buf)
@@ -1433,7 +1438,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
*nbytes = 0;
- /* cFYI(1, ("write at %lld %d bytes", offset, count));*/
+ /* cFYI(1, "write at %lld %d bytes", offset, count);*/
if (tcon->ses == NULL)
return -ECONNABORTED;
@@ -1514,7 +1519,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
(struct smb_hdr *) pSMBr, &bytes_returned, long_op);
cifs_stats_inc(&tcon->num_writes);
if (rc) {
- cFYI(1, ("Send error in write = %d", rc));
+ cFYI(1, "Send error in write = %d", rc);
} else {
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
@@ -1551,7 +1556,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
*nbytes = 0;
- cFYI(1, ("write2 at %lld %d bytes", (long long)offset, count));
+ cFYI(1, "write2 at %lld %d bytes", (long long)offset, count);
if (tcon->ses->capabilities & CAP_LARGE_FILES) {
wct = 14;
@@ -1606,7 +1611,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
long_op);
cifs_stats_inc(&tcon->num_writes);
if (rc) {
- cFYI(1, ("Send error Write2 = %d", rc));
+ cFYI(1, "Send error Write2 = %d", rc);
} else if (resp_buf_type == 0) {
/* presumably this can not happen, but best to be safe */
rc = -EIO;
@@ -1651,7 +1656,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
int timeout = 0;
__u16 count;
- cFYI(1, ("CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock));
+ cFYI(1, "CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock);
rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
if (rc)
@@ -1699,7 +1704,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
}
cifs_stats_inc(&tcon->num_locks);
if (rc)
- cFYI(1, ("Send error in Lock = %d", rc));
+ cFYI(1, "Send error in Lock = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -1722,7 +1727,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
__u16 params, param_offset, offset, byte_count, count;
struct kvec iov[1];
- cFYI(1, ("Posix Lock"));
+ cFYI(1, "Posix Lock");
if (pLockData == NULL)
return -EINVAL;
@@ -1792,7 +1797,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
}
if (rc) {
- cFYI(1, ("Send error in Posix Lock = %d", rc));
+ cFYI(1, "Send error in Posix Lock = %d", rc);
} else if (get_flag) {
/* lock structure can be returned on get */
__u16 data_offset;
@@ -1849,7 +1854,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
{
int rc = 0;
CLOSE_REQ *pSMB = NULL;
- cFYI(1, ("In CIFSSMBClose"));
+ cFYI(1, "In CIFSSMBClose");
/* do not retry on dead session on close */
rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB);
@@ -1866,7 +1871,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
if (rc) {
if (rc != -EINTR) {
/* EINTR is expected when user ctl-c to kill app */
- cERROR(1, ("Send error in Close = %d", rc));
+ cERROR(1, "Send error in Close = %d", rc);
}
}
@@ -1882,7 +1887,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
{
int rc = 0;
FLUSH_REQ *pSMB = NULL;
- cFYI(1, ("In CIFSSMBFlush"));
+ cFYI(1, "In CIFSSMBFlush");
rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB);
if (rc)
@@ -1893,7 +1898,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
cifs_stats_inc(&tcon->num_flushes);
if (rc)
- cERROR(1, ("Send error in Flush = %d", rc));
+ cERROR(1, "Send error in Flush = %d", rc);
return rc;
}
@@ -1910,7 +1915,7 @@ CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
int name_len, name_len2;
__u16 count;
- cFYI(1, ("In CIFSSMBRename"));
+ cFYI(1, "In CIFSSMBRename");
renameRetry:
rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -1956,7 +1961,7 @@ renameRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_renames);
if (rc)
- cFYI(1, ("Send error in rename = %d", rc));
+ cFYI(1, "Send error in rename = %d", rc);
cifs_buf_release(pSMB);
@@ -1980,7 +1985,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
int len_of_str;
__u16 params, param_offset, offset, count, byte_count;
- cFYI(1, ("Rename to File by handle"));
+ cFYI(1, "Rename to File by handle");
rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
@@ -2035,7 +2040,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&pTcon->num_t2renames);
if (rc)
- cFYI(1, ("Send error in Rename (by file handle) = %d", rc));
+ cFYI(1, "Send error in Rename (by file handle) = %d", rc);
cifs_buf_release(pSMB);
@@ -2057,7 +2062,7 @@ CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName,
int name_len, name_len2;
__u16 count;
- cFYI(1, ("In CIFSSMBCopy"));
+ cFYI(1, "In CIFSSMBCopy");
copyRetry:
rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -2102,8 +2107,8 @@ copyRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in copy = %d with %d files copied",
- rc, le16_to_cpu(pSMBr->CopyCount)));
+ cFYI(1, "Send error in copy = %d with %d files copied",
+ rc, le16_to_cpu(pSMBr->CopyCount));
}
cifs_buf_release(pSMB);
@@ -2127,7 +2132,7 @@ CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon,
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
- cFYI(1, ("In Symlink Unix style"));
+ cFYI(1, "In Symlink Unix style");
createSymLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -2192,7 +2197,7 @@ createSymLinkRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_symlinks);
if (rc)
- cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc));
+ cFYI(1, "Send error in SetPathInfo create symlink = %d", rc);
cifs_buf_release(pSMB);
@@ -2216,7 +2221,7 @@ CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon,
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
- cFYI(1, ("In Create Hard link Unix style"));
+ cFYI(1, "In Create Hard link Unix style");
createHardLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -2278,7 +2283,7 @@ createHardLinkRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_hardlinks);
if (rc)
- cFYI(1, ("Send error in SetPathInfo (hard link) = %d", rc));
+ cFYI(1, "Send error in SetPathInfo (hard link) = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -2299,7 +2304,7 @@ CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon,
int name_len, name_len2;
__u16 count;
- cFYI(1, ("In CIFSCreateHardLink"));
+ cFYI(1, "In CIFSCreateHardLink");
winCreateHardLinkRetry:
rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB,
@@ -2350,7 +2355,7 @@ winCreateHardLinkRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_hardlinks);
if (rc)
- cFYI(1, ("Send error in hard link (NT rename) = %d", rc));
+ cFYI(1, "Send error in hard link (NT rename) = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -2373,7 +2378,7 @@ CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon,
__u16 params, byte_count;
char *data_start;
- cFYI(1, ("In QPathSymLinkInfo (Unix) for path %s", searchName));
+ cFYI(1, "In QPathSymLinkInfo (Unix) for path %s", searchName);
querySymLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -2420,7 +2425,7 @@ querySymLinkRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QuerySymLinkInfo = %d", rc));
+ cFYI(1, "Send error in QuerySymLinkInfo = %d", rc);
} else {
/* decode response */
@@ -2521,21 +2526,21 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
/* should we also check that parm and data areas do not overlap? */
if (*ppparm > end_of_smb) {
- cFYI(1, ("parms start after end of smb"));
+ cFYI(1, "parms start after end of smb");
return -EINVAL;
} else if (parm_count + *ppparm > end_of_smb) {
- cFYI(1, ("parm end after end of smb"));
+ cFYI(1, "parm end after end of smb");
return -EINVAL;
} else if (*ppdata > end_of_smb) {
- cFYI(1, ("data starts after end of smb"));
+ cFYI(1, "data starts after end of smb");
return -EINVAL;
} else if (data_count + *ppdata > end_of_smb) {
- cFYI(1, ("data %p + count %d (%p) ends after end of smb %p start %p",
+ cFYI(1, "data %p + count %d (%p) past smb end %p start %p",
*ppdata, data_count, (data_count + *ppdata),
- end_of_smb, pSMBr));
+ end_of_smb, pSMBr);
return -EINVAL;
} else if (parm_count + data_count > pSMBr->ByteCount) {
- cFYI(1, ("parm count and data count larger than SMB"));
+ cFYI(1, "parm count and data count larger than SMB");
return -EINVAL;
}
*pdatalen = data_count;
@@ -2554,7 +2559,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
struct smb_com_transaction_ioctl_req *pSMB;
struct smb_com_transaction_ioctl_rsp *pSMBr;
- cFYI(1, ("In Windows reparse style QueryLink for path %s", searchName));
+ cFYI(1, "In Windows reparse style QueryLink for path %s", searchName);
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
@@ -2583,7 +2588,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QueryReparseLinkInfo = %d", rc));
+ cFYI(1, "Send error in QueryReparseLinkInfo = %d", rc);
} else { /* decode response */
__u32 data_offset = le32_to_cpu(pSMBr->DataOffset);
__u32 data_count = le32_to_cpu(pSMBr->DataCount);
@@ -2607,7 +2612,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
if ((reparse_buf->LinkNamesBuf +
reparse_buf->TargetNameOffset +
reparse_buf->TargetNameLen) > end_of_smb) {
- cFYI(1, ("reparse buf beyond SMB"));
+ cFYI(1, "reparse buf beyond SMB");
rc = -EIO;
goto qreparse_out;
}
@@ -2628,12 +2633,12 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
}
} else {
rc = -EIO;
- cFYI(1, ("Invalid return data count on "
- "get reparse info ioctl"));
+ cFYI(1, "Invalid return data count on "
+ "get reparse info ioctl");
}
symlinkinfo[buflen] = 0; /* just in case so the caller
does not go off the end of the buffer */
- cFYI(1, ("readlink result - %s", symlinkinfo));
+ cFYI(1, "readlink result - %s", symlinkinfo);
}
qreparse_out:
@@ -2656,7 +2661,7 @@ static void cifs_convert_ace(posix_acl_xattr_entry *ace,
ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag);
ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid));
- /* cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id)); */
+ /* cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id); */
return;
}
@@ -2682,8 +2687,8 @@ static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
size += sizeof(struct cifs_posix_ace) * count;
/* check if we would go beyond end of SMB */
if (size_of_data_area < size) {
- cFYI(1, ("bad CIFS POSIX ACL size %d vs. %d",
- size_of_data_area, size));
+ cFYI(1, "bad CIFS POSIX ACL size %d vs. %d",
+ size_of_data_area, size);
return -EINVAL;
}
} else if (acl_type & ACL_TYPE_DEFAULT) {
@@ -2730,7 +2735,7 @@ static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
cifs_ace->cifs_uid = cpu_to_le64(-1);
} else
cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
- /*cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id));*/
+ /*cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id);*/
return rc;
}
@@ -2748,12 +2753,12 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
return 0;
count = posix_acl_xattr_count((size_t)buflen);
- cFYI(1, ("setting acl with %d entries from buf of length %d and "
+ cFYI(1, "setting acl with %d entries from buf of length %d and "
"version of %d",
- count, buflen, le32_to_cpu(local_acl->a_version)));
+ count, buflen, le32_to_cpu(local_acl->a_version));
if (le32_to_cpu(local_acl->a_version) != 2) {
- cFYI(1, ("unknown POSIX ACL version %d",
- le32_to_cpu(local_acl->a_version)));
+ cFYI(1, "unknown POSIX ACL version %d",
+ le32_to_cpu(local_acl->a_version));
return 0;
}
cifs_acl->version = cpu_to_le16(1);
@@ -2762,7 +2767,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
else if (acl_type == ACL_TYPE_DEFAULT)
cifs_acl->default_entry_count = cpu_to_le16(count);
else {
- cFYI(1, ("unknown ACL type %d", acl_type));
+ cFYI(1, "unknown ACL type %d", acl_type);
return 0;
}
for (i = 0; i < count; i++) {
@@ -2795,7 +2800,7 @@ CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
- cFYI(1, ("In GetPosixACL (Unix) for path %s", searchName));
+ cFYI(1, "In GetPosixACL (Unix) for path %s", searchName);
queryAclRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -2847,7 +2852,7 @@ queryAclRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_acl_get);
if (rc) {
- cFYI(1, ("Send error in Query POSIX ACL = %d", rc));
+ cFYI(1, "Send error in Query POSIX ACL = %d", rc);
} else {
/* decode response */
@@ -2884,7 +2889,7 @@ CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
int bytes_returned = 0;
__u16 params, byte_count, data_count, param_offset, offset;
- cFYI(1, ("In SetPosixACL (Unix) for path %s", fileName));
+ cFYI(1, "In SetPosixACL (Unix) for path %s", fileName);
setAclRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -2939,7 +2944,7 @@ setAclRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("Set POSIX ACL returned %d", rc));
+ cFYI(1, "Set POSIX ACL returned %d", rc);
setACLerrorExit:
cifs_buf_release(pSMB);
@@ -2959,7 +2964,7 @@ CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
int bytes_returned;
__u16 params, byte_count;
- cFYI(1, ("In GetExtAttr"));
+ cFYI(1, "In GetExtAttr");
if (tcon == NULL)
return -ENODEV;
@@ -2998,7 +3003,7 @@ GetExtAttrRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("error %d in GetExtAttr", rc));
+ cFYI(1, "error %d in GetExtAttr", rc);
} else {
/* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3013,7 +3018,7 @@ GetExtAttrRetry:
struct file_chattr_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count != 16) {
- cFYI(1, ("Illegal size ret in GetExtAttr"));
+ cFYI(1, "Illegal size ret in GetExtAttr");
rc = -EIO;
goto GetExtAttrOut;
}
@@ -3043,7 +3048,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
QUERY_SEC_DESC_REQ *pSMB;
struct kvec iov[1];
- cFYI(1, ("GetCifsACL"));
+ cFYI(1, "GetCifsACL");
*pbuflen = 0;
*acl_inf = NULL;
@@ -3068,7 +3073,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
CIFS_STD_OP);
cifs_stats_inc(&tcon->num_acl_get);
if (rc) {
- cFYI(1, ("Send error in QuerySecDesc = %d", rc));
+ cFYI(1, "Send error in QuerySecDesc = %d", rc);
} else { /* decode response */
__le32 *parm;
__u32 parm_len;
@@ -3083,7 +3088,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
goto qsec_out;
pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base;
- cFYI(1, ("smb %p parm %p data %p", pSMBr, parm, *acl_inf));
+ cFYI(1, "smb %p parm %p data %p", pSMBr, parm, *acl_inf);
if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
rc = -EIO; /* bad smb */
@@ -3095,8 +3100,8 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
acl_len = le32_to_cpu(*parm);
if (acl_len != *pbuflen) {
- cERROR(1, ("acl length %d does not match %d",
- acl_len, *pbuflen));
+ cERROR(1, "acl length %d does not match %d",
+ acl_len, *pbuflen);
if (*pbuflen > acl_len)
*pbuflen = acl_len;
}
@@ -3105,7 +3110,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
header followed by the smallest SID */
if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) ||
(*pbuflen >= 64 * 1024)) {
- cERROR(1, ("bad acl length %d", *pbuflen));
+ cERROR(1, "bad acl length %d", *pbuflen);
rc = -EINVAL;
*pbuflen = 0;
} else {
@@ -3179,9 +3184,9 @@ setCifsAclRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cFYI(1, ("SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc));
+ cFYI(1, "SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc);
if (rc)
- cFYI(1, ("Set CIFS ACL returned %d", rc));
+ cFYI(1, "Set CIFS ACL returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -3205,7 +3210,7 @@ int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
int bytes_returned;
int name_len;
- cFYI(1, ("In SMBQPath path %s", searchName));
+ cFYI(1, "In SMBQPath path %s", searchName);
QInfRetry:
rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -3231,7 +3236,7 @@ QInfRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QueryInfo = %d", rc));
+ cFYI(1, "Send error in QueryInfo = %d", rc);
} else if (pFinfo) {
struct timespec ts;
__u32 time = le32_to_cpu(pSMBr->last_write_time);
@@ -3305,7 +3310,7 @@ QFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QPathInfo = %d", rc));
+ cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3343,7 +3348,7 @@ CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
-/* cFYI(1, ("In QPathInfo path %s", searchName)); */
+/* cFYI(1, "In QPathInfo path %s", searchName); */
QPathInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -3393,7 +3398,7 @@ QPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QPathInfo = %d", rc));
+ cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3473,14 +3478,14 @@ UnixQFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QPathInfo = %d", rc));
+ cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) {
- cERROR(1, ("Malformed FILE_UNIX_BASIC_INFO response.\n"
+ cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n"
"Unix Extensions can be disabled on mount "
- "by specifying the nosfu mount option."));
+ "by specifying the nosfu mount option.");
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
@@ -3512,7 +3517,7 @@ CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
- cFYI(1, ("In QPathInfo (Unix) the path %s", searchName));
+ cFYI(1, "In QPathInfo (Unix) the path %s", searchName);
UnixQPathInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -3559,14 +3564,14 @@ UnixQPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QPathInfo = %d", rc));
+ cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) {
- cERROR(1, ("Malformed FILE_UNIX_BASIC_INFO response.\n"
+ cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n"
"Unix Extensions can be disabled on mount "
- "by specifying the nosfu mount option."));
+ "by specifying the nosfu mount option.");
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
@@ -3600,7 +3605,7 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
- cFYI(1, ("In FindFirst for %s", searchName));
+ cFYI(1, "In FindFirst for %s", searchName);
findFirstRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -3677,7 +3682,7 @@ findFirstRetry:
if (rc) {/* BB add logic to retry regular search if Unix search
rejected unexpectedly by server */
/* BB Add code to handle unsupported level rc */
- cFYI(1, ("Error in FindFirst = %d", rc));
+ cFYI(1, "Error in FindFirst = %d", rc);
cifs_buf_release(pSMB);
@@ -3716,7 +3721,7 @@ findFirstRetry:
lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
lnoff) {
- cERROR(1, ("ignoring corrupt resume name"));
+ cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL;
return rc;
}
@@ -3744,7 +3749,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
int bytes_returned, name_len;
__u16 params, byte_count;
- cFYI(1, ("In FindNext"));
+ cFYI(1, "In FindNext");
if (psrch_inf->endOfSearch)
return -ENOENT;
@@ -3808,7 +3813,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
cifs_buf_release(pSMB);
rc = 0; /* search probably was closed at end of search*/
} else
- cFYI(1, ("FindNext returned = %d", rc));
+ cFYI(1, "FindNext returned = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3844,15 +3849,15 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
lnoff) {
- cERROR(1, ("ignoring corrupt resume name"));
+ cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL;
return rc;
} else
psrch_inf->last_entry =
psrch_inf->srch_entries_start + lnoff;
-/* cFYI(1,("fnxt2 entries in buf %d index_of_last %d",
- psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry)); */
+/* cFYI(1, "fnxt2 entries in buf %d index_of_last %d",
+ psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */
/* BB fixme add unlock here */
}
@@ -3877,7 +3882,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
FINDCLOSE_REQ *pSMB = NULL;
- cFYI(1, ("In CIFSSMBFindClose"));
+ cFYI(1, "In CIFSSMBFindClose");
rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB);
/* no sense returning error if session restarted
@@ -3891,7 +3896,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
- cERROR(1, ("Send error in FindClose = %d", rc));
+ cERROR(1, "Send error in FindClose = %d", rc);
cifs_stats_inc(&tcon->num_fclose);
@@ -3914,7 +3919,7 @@ CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
int name_len, bytes_returned;
__u16 params, byte_count;
- cFYI(1, ("In GetSrvInodeNum for %s", searchName));
+ cFYI(1, "In GetSrvInodeNum for %s", searchName);
if (tcon == NULL)
return -ENODEV;
@@ -3964,7 +3969,7 @@ GetInodeNumberRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("error %d in QueryInternalInfo", rc));
+ cFYI(1, "error %d in QueryInternalInfo", rc);
} else {
/* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -3979,7 +3984,7 @@ GetInodeNumberRetry:
struct file_internal_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count < 8) {
- cFYI(1, ("Illegal size ret in QryIntrnlInf"));
+ cFYI(1, "Illegal size ret in QryIntrnlInf");
rc = -EIO;
goto GetInodeNumOut;
}
@@ -4020,16 +4025,16 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
*num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals);
if (*num_of_nodes < 1) {
- cERROR(1, ("num_referrals: must be at least > 0,"
- "but we get num_referrals = %d\n", *num_of_nodes));
+ cERROR(1, "num_referrals: must be at least > 0,"
+ "but we get num_referrals = %d\n", *num_of_nodes);
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals);
if (ref->VersionNumber != cpu_to_le16(3)) {
- cERROR(1, ("Referrals of V%d version are not supported,"
- "should be V3", le16_to_cpu(ref->VersionNumber)));
+ cERROR(1, "Referrals of V%d version are not supported,"
+ "should be V3", le16_to_cpu(ref->VersionNumber));
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
@@ -4038,14 +4043,14 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
data_end = (char *)(&(pSMBr->PathConsumed)) +
le16_to_cpu(pSMBr->t2.DataCount);
- cFYI(1, ("num_referrals: %d dfs flags: 0x%x ... \n",
+ cFYI(1, "num_referrals: %d dfs flags: 0x%x ...\n",
*num_of_nodes,
- le32_to_cpu(pSMBr->DFSFlags)));
+ le32_to_cpu(pSMBr->DFSFlags));
*target_nodes = kzalloc(sizeof(struct dfs_info3_param) *
*num_of_nodes, GFP_KERNEL);
if (*target_nodes == NULL) {
- cERROR(1, ("Failed to allocate buffer for target_nodes\n"));
+ cERROR(1, "Failed to allocate buffer for target_nodes\n");
rc = -ENOMEM;
goto parse_DFS_referrals_exit;
}
@@ -4121,7 +4126,7 @@ CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
*num_of_nodes = 0;
*target_nodes = NULL;
- cFYI(1, ("In GetDFSRefer the path %s", searchName));
+ cFYI(1, "In GetDFSRefer the path %s", searchName);
if (ses == NULL)
return -ENODEV;
getDFSRetry:
@@ -4188,7 +4193,7 @@ getDFSRetry:
rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in GetDFSRefer = %d", rc));
+ cFYI(1, "Send error in GetDFSRefer = %d", rc);
goto GetDFSRefExit;
}
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4199,9 +4204,9 @@ getDFSRetry:
goto GetDFSRefExit;
}
- cFYI(1, ("Decoding GetDFSRefer response BCC: %d Offset %d",
+ cFYI(1, "Decoding GetDFSRefer response BCC: %d Offset %d",
pSMBr->ByteCount,
- le16_to_cpu(pSMBr->t2.DataOffset)));
+ le16_to_cpu(pSMBr->t2.DataOffset));
/* parse returned result into more usable form */
rc = parse_DFS_referrals(pSMBr, num_of_nodes,
@@ -4229,7 +4234,7 @@ SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("OldQFSInfo"));
+ cFYI(1, "OldQFSInfo");
oldQFSInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4262,7 +4267,7 @@ oldQFSInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QFSInfo = %d", rc));
+ cFYI(1, "Send error in QFSInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4270,8 +4275,8 @@ oldQFSInfoRetry:
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- cFYI(1, ("qfsinf resp BCC: %d Offset %d",
- pSMBr->ByteCount, data_offset));
+ cFYI(1, "qfsinf resp BCC: %d Offset %d",
+ pSMBr->ByteCount, data_offset);
response_data = (FILE_SYSTEM_ALLOC_INFO *)
(((char *) &pSMBr->hdr.Protocol) + data_offset);
@@ -4283,11 +4288,10 @@ oldQFSInfoRetry:
le32_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
le32_to_cpu(response_data->FreeAllocationUnits);
- cFYI(1,
- ("Blocks: %lld Free: %lld Block size %ld",
- (unsigned long long)FSData->f_blocks,
- (unsigned long long)FSData->f_bfree,
- FSData->f_bsize));
+ cFYI(1, "Blocks: %lld Free: %lld Block size %ld",
+ (unsigned long long)FSData->f_blocks,
+ (unsigned long long)FSData->f_bfree,
+ FSData->f_bsize);
}
}
cifs_buf_release(pSMB);
@@ -4309,7 +4313,7 @@ CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSInfo"));
+ cFYI(1, "In QFSInfo");
QFSInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4342,7 +4346,7 @@ QFSInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QFSInfo = %d", rc));
+ cFYI(1, "Send error in QFSInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4363,11 +4367,10 @@ QFSInfoRetry:
le64_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
le64_to_cpu(response_data->FreeAllocationUnits);
- cFYI(1,
- ("Blocks: %lld Free: %lld Block size %ld",
- (unsigned long long)FSData->f_blocks,
- (unsigned long long)FSData->f_bfree,
- FSData->f_bsize));
+ cFYI(1, "Blocks: %lld Free: %lld Block size %ld",
+ (unsigned long long)FSData->f_blocks,
+ (unsigned long long)FSData->f_bfree,
+ FSData->f_bsize);
}
}
cifs_buf_release(pSMB);
@@ -4389,7 +4392,7 @@ CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSAttributeInfo"));
+ cFYI(1, "In QFSAttributeInfo");
QFSAttributeRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4423,7 +4426,7 @@ QFSAttributeRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cERROR(1, ("Send error in QFSAttributeInfo = %d", rc));
+ cERROR(1, "Send error in QFSAttributeInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4459,7 +4462,7 @@ CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSDeviceInfo"));
+ cFYI(1, "In QFSDeviceInfo");
QFSDeviceRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4494,7 +4497,7 @@ QFSDeviceRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QFSDeviceInfo = %d", rc));
+ cFYI(1, "Send error in QFSDeviceInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4529,7 +4532,7 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSUnixInfo"));
+ cFYI(1, "In QFSUnixInfo");
QFSUnixRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4563,7 +4566,7 @@ QFSUnixRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cERROR(1, ("Send error in QFSUnixInfo = %d", rc));
+ cERROR(1, "Send error in QFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4598,7 +4601,7 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
- cFYI(1, ("In SETFSUnixInfo"));
+ cFYI(1, "In SETFSUnixInfo");
SETFSUnixRetry:
/* BB switch to small buf init to save memory */
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -4646,7 +4649,7 @@ SETFSUnixRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cERROR(1, ("Send error in SETFSUnixInfo = %d", rc));
+ cERROR(1, "Send error in SETFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc)
@@ -4674,7 +4677,7 @@ CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
int bytes_returned = 0;
__u16 params, byte_count;
- cFYI(1, ("In QFSPosixInfo"));
+ cFYI(1, "In QFSPosixInfo");
QFSPosixRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4708,7 +4711,7 @@ QFSPosixRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QFSUnixInfo = %d", rc));
+ cFYI(1, "Send error in QFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4768,7 +4771,7 @@ CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
int bytes_returned = 0;
__u16 params, byte_count, data_count, param_offset, offset;
- cFYI(1, ("In SetEOF"));
+ cFYI(1, "In SetEOF");
SetEOFRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4834,7 +4837,7 @@ SetEOFRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("SetPathInfo (file size) returned %d", rc));
+ cFYI(1, "SetPathInfo (file size) returned %d", rc);
cifs_buf_release(pSMB);
@@ -4854,8 +4857,8 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("SetFileSize (via SetFileInfo) %lld",
- (long long)size));
+ cFYI(1, "SetFileSize (via SetFileInfo) %lld",
+ (long long)size);
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -4914,9 +4917,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc) {
- cFYI(1,
- ("Send error in SetFileInfo (SetFileSize) = %d",
- rc));
+ cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc);
}
/* Note: On -EAGAIN error only caller can retry on handle based calls
@@ -4940,7 +4941,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("Set Times (via SetFileInfo)"));
+ cFYI(1, "Set Times (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -4985,7 +4986,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
- cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
+ cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -5002,7 +5003,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("Set File Disposition (via SetFileInfo)"));
+ cFYI(1, "Set File Disposition (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -5044,7 +5045,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
*data_offset = delete_file ? 1 : 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
- cFYI(1, ("Send error in SetFileDisposition = %d", rc));
+ cFYI(1, "Send error in SetFileDisposition = %d", rc);
return rc;
}
@@ -5062,7 +5063,7 @@ CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon,
char *data_offset;
__u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("In SetTimes"));
+ cFYI(1, "In SetTimes");
SetTimesRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -5118,7 +5119,7 @@ SetTimesRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("SetPathInfo (times) returned %d", rc));
+ cFYI(1, "SetPathInfo (times) returned %d", rc);
cifs_buf_release(pSMB);
@@ -5143,7 +5144,7 @@ CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
int bytes_returned;
int name_len;
- cFYI(1, ("In SetAttrLegacy"));
+ cFYI(1, "In SetAttrLegacy");
SetAttrLgcyRetry:
rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB,
@@ -5169,7 +5170,7 @@ SetAttrLgcyRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("Error in LegacySetAttr = %d", rc));
+ cFYI(1, "Error in LegacySetAttr = %d", rc);
cifs_buf_release(pSMB);
@@ -5231,7 +5232,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
u16 params, param_offset, offset, byte_count, count;
- cFYI(1, ("Set Unix Info (via SetFileInfo)"));
+ cFYI(1, "Set Unix Info (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -5276,7 +5277,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
- cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
+ cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -5297,7 +5298,7 @@ CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
FILE_UNIX_BASIC_INFO *data_offset;
__u16 params, param_offset, offset, count, byte_count;
- cFYI(1, ("In SetUID/GID/Mode"));
+ cFYI(1, "In SetUID/GID/Mode");
setPermsRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -5353,7 +5354,7 @@ setPermsRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("SetPathInfo (perms) returned %d", rc));
+ cFYI(1, "SetPathInfo (perms) returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -5372,7 +5373,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
struct dir_notify_req *dnotify_req;
int bytes_returned;
- cFYI(1, ("In CIFSSMBNotify for file handle %d", (int)netfid));
+ cFYI(1, "In CIFSSMBNotify for file handle %d", (int)netfid);
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
@@ -5406,7 +5407,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
(struct smb_hdr *)pSMBr, &bytes_returned,
CIFS_ASYNC_OP);
if (rc) {
- cFYI(1, ("Error in Notify = %d", rc));
+ cFYI(1, "Error in Notify = %d", rc);
} else {
/* Add file to outstanding requests */
/* BB change to kmem cache alloc */
@@ -5462,7 +5463,7 @@ CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
char *end_of_smb;
__u16 params, byte_count, data_offset;
- cFYI(1, ("In Query All EAs path %s", searchName));
+ cFYI(1, "In Query All EAs path %s", searchName);
QAllEAsRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -5509,7 +5510,7 @@ QAllEAsRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1, ("Send error in QueryAllEAs = %d", rc));
+ cFYI(1, "Send error in QueryAllEAs = %d", rc);
goto QAllEAsOut;
}
@@ -5537,16 +5538,16 @@ QAllEAsRetry:
(((char *) &pSMBr->hdr.Protocol) + data_offset);
list_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1, ("ea length %d", list_len));
+ cFYI(1, "ea length %d", list_len);
if (list_len <= 8) {
- cFYI(1, ("empty EA list returned from server"));
+ cFYI(1, "empty EA list returned from server");
goto QAllEAsOut;
}
/* make sure list_len doesn't go past end of SMB */
end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
if ((char *)ea_response_data + list_len > end_of_smb) {
- cFYI(1, ("EA list appears to go beyond SMB"));
+ cFYI(1, "EA list appears to go beyond SMB");
rc = -EIO;
goto QAllEAsOut;
}
@@ -5563,7 +5564,7 @@ QAllEAsRetry:
temp_ptr += 4;
/* make sure we can read name_len and value_len */
if (list_len < 0) {
- cFYI(1, ("EA entry goes beyond length of list"));
+ cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
@@ -5572,7 +5573,7 @@ QAllEAsRetry:
value_len = le16_to_cpu(temp_fea->value_len);
list_len -= name_len + 1 + value_len;
if (list_len < 0) {
- cFYI(1, ("EA entry goes beyond length of list"));
+ cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
@@ -5639,7 +5640,7 @@ CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
int bytes_returned = 0;
__u16 params, param_offset, byte_count, offset, count;
- cFYI(1, ("In SetEA"));
+ cFYI(1, "In SetEA");
SetEARetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -5721,7 +5722,7 @@ SetEARetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
- cFYI(1, ("SetPathInfo (EA) returned %d", rc));
+ cFYI(1, "SetPathInfo (EA) returned %d", rc);
cifs_buf_release(pSMB);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d9566bf8f917..2208f06e4c45 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -102,6 +102,7 @@ struct smb_vol {
bool sockopt_tcp_nodelay:1;
unsigned short int port;
char *prepath;
+ struct nls_table *local_nls;
};
static int ipv4_connect(struct TCP_Server_Info *server);
@@ -135,7 +136,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
spin_unlock(&GlobalMid_Lock);
server->maxBuf = 0;
- cFYI(1, ("Reconnecting tcp session"));
+ cFYI(1, "Reconnecting tcp session");
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
@@ -153,12 +154,12 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* do not want to be sending data on a socket we are freeing */
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
- cFYI(1, ("State: 0x%x Flags: 0x%lx", server->ssocket->state,
- server->ssocket->flags));
+ cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state,
+ server->ssocket->flags);
kernel_sock_shutdown(server->ssocket, SHUT_WR);
- cFYI(1, ("Post shutdown state: 0x%x Flags: 0x%lx",
+ cFYI(1, "Post shutdown state: 0x%x Flags: 0x%lx",
server->ssocket->state,
- server->ssocket->flags));
+ server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
}
@@ -187,7 +188,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
else
rc = ipv4_connect(server);
if (rc) {
- cFYI(1, ("reconnect error %d", rc));
+ cFYI(1, "reconnect error %d", rc);
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
@@ -223,7 +224,7 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
/* check for plausible wct, bcc and t2 data and parm sizes */
/* check for parm and data offset going beyond end of smb */
if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
- cFYI(1, ("invalid transact2 word count"));
+ cFYI(1, "invalid transact2 word count");
return -EINVAL;
}
@@ -237,15 +238,15 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
if (remaining == 0)
return 0;
else if (remaining < 0) {
- cFYI(1, ("total data %d smaller than data in frame %d",
- total_data_size, data_in_this_rsp));
+ cFYI(1, "total data %d smaller than data in frame %d",
+ total_data_size, data_in_this_rsp);
return -EINVAL;
} else {
- cFYI(1, ("missing %d bytes from transact2, check next response",
- remaining));
+ cFYI(1, "missing %d bytes from transact2, check next response",
+ remaining);
if (total_data_size > maxBufSize) {
- cERROR(1, ("TotalDataSize %d is over maximum buffer %d",
- total_data_size, maxBufSize));
+ cERROR(1, "TotalDataSize %d is over maximum buffer %d",
+ total_data_size, maxBufSize);
return -EINVAL;
}
return remaining;
@@ -267,7 +268,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
- cFYI(1, ("total data size of primary and secondary t2 differ"));
+ cFYI(1, "total data size of primary and secondary t2 differ");
}
total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount);
@@ -282,7 +283,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount);
if (remaining < total_in_buf2) {
- cFYI(1, ("transact2 2nd response contains too much data"));
+ cFYI(1, "transact2 2nd response contains too much data");
}
/* find end of first SMB data area */
@@ -311,7 +312,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
pTargetSMB->smb_buf_length = byte_count;
if (remaining == total_in_buf2) {
- cFYI(1, ("found the last secondary response"));
+ cFYI(1, "found the last secondary response");
return 0; /* we are done */
} else /* more responses to go */
return 1;
@@ -339,7 +340,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
int reconnect;
current->flags |= PF_MEMALLOC;
- cFYI(1, ("Demultiplex PID: %d", task_pid_nr(current)));
+ cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
length = atomic_inc_return(&tcpSesAllocCount);
if (length > 1)
@@ -353,7 +354,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
if (bigbuf == NULL) {
bigbuf = cifs_buf_get();
if (!bigbuf) {
- cERROR(1, ("No memory for large SMB response"));
+ cERROR(1, "No memory for large SMB response");
msleep(3000);
/* retry will check if exiting */
continue;
@@ -366,7 +367,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
if (smallbuf == NULL) {
smallbuf = cifs_small_buf_get();
if (!smallbuf) {
- cERROR(1, ("No memory for SMB response"));
+ cERROR(1, "No memory for SMB response");
msleep(1000);
/* retry will check if exiting */
continue;
@@ -391,9 +392,9 @@ incomplete_rcv:
if (server->tcpStatus == CifsExiting) {
break;
} else if (server->tcpStatus == CifsNeedReconnect) {
- cFYI(1, ("Reconnect after server stopped responding"));
+ cFYI(1, "Reconnect after server stopped responding");
cifs_reconnect(server);
- cFYI(1, ("call to reconnect done"));
+ cFYI(1, "call to reconnect done");
csocket = server->ssocket;
continue;
} else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
@@ -411,7 +412,7 @@ incomplete_rcv:
continue;
} else if (length <= 0) {
if (server->tcpStatus == CifsNew) {
- cFYI(1, ("tcp session abend after SMBnegprot"));
+ cFYI(1, "tcp session abend after SMBnegprot");
/* some servers kill the TCP session rather than
returning an SMB negprot error, in which
case reconnecting here is not going to help,
@@ -419,18 +420,18 @@ incomplete_rcv:
break;
}
if (!try_to_freeze() && (length == -EINTR)) {
- cFYI(1, ("cifsd thread killed"));
+ cFYI(1, "cifsd thread killed");
break;
}
- cFYI(1, ("Reconnect after unexpected peek error %d",
- length));
+ cFYI(1, "Reconnect after unexpected peek error %d",
+ length);
cifs_reconnect(server);
csocket = server->ssocket;
wake_up(&server->response_q);
continue;
} else if (length < pdu_length) {
- cFYI(1, ("requested %d bytes but only got %d bytes",
- pdu_length, length));
+ cFYI(1, "requested %d bytes but only got %d bytes",
+ pdu_length, length);
pdu_length -= length;
msleep(1);
goto incomplete_rcv;
@@ -450,18 +451,18 @@ incomplete_rcv:
pdu_length = be32_to_cpu((__force __be32)smb_buffer->smb_buf_length);
smb_buffer->smb_buf_length = pdu_length;
- cFYI(1, ("rfc1002 length 0x%x", pdu_length+4));
+ cFYI(1, "rfc1002 length 0x%x", pdu_length+4);
if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
continue;
} else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
- cFYI(1, ("Good RFC 1002 session rsp"));
+ cFYI(1, "Good RFC 1002 session rsp");
continue;
} else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
/* we get this from Windows 98 instead of
an error on SMB negprot response */
- cFYI(1, ("Negative RFC1002 Session Response Error 0x%x)",
- pdu_length));
+ cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
+ pdu_length);
if (server->tcpStatus == CifsNew) {
/* if nack on negprot (rather than
ret of smb negprot error) reconnecting
@@ -484,7 +485,7 @@ incomplete_rcv:
continue;
}
} else if (temp != (char) 0) {
- cERROR(1, ("Unknown RFC 1002 frame"));
+ cERROR(1, "Unknown RFC 1002 frame");
cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
length);
cifs_reconnect(server);
@@ -495,8 +496,8 @@ incomplete_rcv:
/* else we have an SMB response */
if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
(pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
- cERROR(1, ("Invalid size SMB length %d pdu_length %d",
- length, pdu_length+4));
+ cERROR(1, "Invalid size SMB length %d pdu_length %d",
+ length, pdu_length+4);
cifs_reconnect(server);
csocket = server->ssocket;
wake_up(&server->response_q);
@@ -539,8 +540,8 @@ incomplete_rcv:
length = 0;
continue;
} else if (length <= 0) {
- cERROR(1, ("Received no data, expecting %d",
- pdu_length - total_read));
+ cERROR(1, "Received no data, expecting %d",
+ pdu_length - total_read);
cifs_reconnect(server);
csocket = server->ssocket;
reconnect = 1;
@@ -588,7 +589,7 @@ incomplete_rcv:
}
} else {
if (!isLargeBuf) {
- cERROR(1,("1st trans2 resp needs bigbuf"));
+ cERROR(1, "1st trans2 resp needs bigbuf");
/* BB maybe we can fix this up, switch
to already allocated large buffer? */
} else {
@@ -630,8 +631,8 @@ multi_t2_fnd:
wake_up_process(task_to_wake);
} else if (!is_valid_oplock_break(smb_buffer, server) &&
!isMultiRsp) {
- cERROR(1, ("No task to wake, unknown frame received! "
- "NumMids %d", midCount.counter));
+ cERROR(1, "No task to wake, unknown frame received! "
+ "NumMids %d", midCount.counter);
cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
sizeof(struct smb_hdr));
#ifdef CONFIG_CIFS_DEBUG2
@@ -708,8 +709,8 @@ multi_t2_fnd:
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
- cFYI(1, ("Clearing Mid 0x%x - waking up ",
- mid_entry->mid));
+ cFYI(1, "Clearing Mid 0x%x - waking up ",
+ mid_entry->mid);
task_to_wake = mid_entry->tsk;
if (task_to_wake)
wake_up_process(task_to_wake);
@@ -728,7 +729,7 @@ multi_t2_fnd:
to wait at least 45 seconds before giving up
on a request getting a response and going ahead
and killing cifsd */
- cFYI(1, ("Wait for exit from demultiplex thread"));
+ cFYI(1, "Wait for exit from demultiplex thread");
msleep(46000);
/* if threads still have not exited they are probably never
coming home not much else we can do but free the memory */
@@ -849,7 +850,7 @@ cifs_parse_mount_options(char *options, const char *devname,
separator[0] = options[4];
options += 5;
} else {
- cFYI(1, ("Null separator not allowed"));
+ cFYI(1, "Null separator not allowed");
}
}
@@ -974,7 +975,7 @@ cifs_parse_mount_options(char *options, const char *devname,
}
} else if (strnicmp(data, "sec", 3) == 0) {
if (!value || !*value) {
- cERROR(1, ("no security value specified"));
+ cERROR(1, "no security value specified");
continue;
} else if (strnicmp(value, "krb5i", 5) == 0) {
vol->secFlg |= CIFSSEC_MAY_KRB5 |
@@ -982,7 +983,7 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if (strnicmp(value, "krb5p", 5) == 0) {
/* vol->secFlg |= CIFSSEC_MUST_SEAL |
CIFSSEC_MAY_KRB5; */
- cERROR(1, ("Krb5 cifs privacy not supported"));
+ cERROR(1, "Krb5 cifs privacy not supported");
return 1;
} else if (strnicmp(value, "krb5", 4) == 0) {
vol->secFlg |= CIFSSEC_MAY_KRB5;
@@ -1014,7 +1015,7 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if (strnicmp(value, "none", 4) == 0) {
vol->nullauth = 1;
} else {
- cERROR(1, ("bad security option: %s", value));
+ cERROR(1, "bad security option: %s", value);
return 1;
}
} else if ((strnicmp(data, "unc", 3) == 0)
@@ -1053,7 +1054,7 @@ cifs_parse_mount_options(char *options, const char *devname,
a domain name and need special handling? */
if (strnlen(value, 256) < 256) {
vol->domainname = value;
- cFYI(1, ("Domain name set"));
+ cFYI(1, "Domain name set");
} else {
printk(KERN_WARNING "CIFS: domain name too "
"long\n");
@@ -1076,7 +1077,7 @@ cifs_parse_mount_options(char *options, const char *devname,
strcpy(vol->prepath+1, value);
} else
strcpy(vol->prepath, value);
- cFYI(1, ("prefix path %s", vol->prepath));
+ cFYI(1, "prefix path %s", vol->prepath);
} else {
printk(KERN_WARNING "CIFS: prefix too long\n");
return 1;
@@ -1092,7 +1093,7 @@ cifs_parse_mount_options(char *options, const char *devname,
vol->iocharset = value;
/* if iocharset not set then load_nls_default
is used by caller */
- cFYI(1, ("iocharset set to %s", value));
+ cFYI(1, "iocharset set to %s", value);
} else {
printk(KERN_WARNING "CIFS: iocharset name "
"too long.\n");
@@ -1144,14 +1145,14 @@ cifs_parse_mount_options(char *options, const char *devname,
}
} else if (strnicmp(data, "sockopt", 5) == 0) {
if (!value || !*value) {
- cERROR(1, ("no socket option specified"));
+ cERROR(1, "no socket option specified");
continue;
} else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
vol->sockopt_tcp_nodelay = 1;
}
} else if (strnicmp(data, "netbiosname", 4) == 0) {
if (!value || !*value || (*value == ' ')) {
- cFYI(1, ("invalid (empty) netbiosname"));
+ cFYI(1, "invalid (empty) netbiosname");
} else {
memset(vol->source_rfc1001_name, 0x20, 15);
for (i = 0; i < 15; i++) {
@@ -1175,7 +1176,7 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if (strnicmp(data, "servern", 7) == 0) {
/* servernetbiosname specified override *SMBSERVER */
if (!value || !*value || (*value == ' ')) {
- cFYI(1, ("empty server netbiosname specified"));
+ cFYI(1, "empty server netbiosname specified");
} else {
/* last byte, type, is 0x20 for servr type */
memset(vol->target_rfc1001_name, 0x20, 16);
@@ -1434,7 +1435,7 @@ cifs_find_tcp_session(struct sockaddr_storage *addr, unsigned short int port)
++server->srv_count;
write_unlock(&cifs_tcp_ses_lock);
- cFYI(1, ("Existing tcp session with server found"));
+ cFYI(1, "Existing tcp session with server found");
return server;
}
write_unlock(&cifs_tcp_ses_lock);
@@ -1475,7 +1476,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
memset(&addr, 0, sizeof(struct sockaddr_storage));
- cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip));
+ cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip);
if (volume_info->UNCip && volume_info->UNC) {
rc = cifs_convert_address(volume_info->UNCip, &addr);
@@ -1487,13 +1488,12 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
} else if (volume_info->UNCip) {
/* BB using ip addr as tcp_ses name to connect to the
DFS root below */
- cERROR(1, ("Connecting to DFS root not implemented yet"));
+ cERROR(1, "Connecting to DFS root not implemented yet");
rc = -EINVAL;
goto out_err;
} else /* which tcp_sess DFS root would we conect to */ {
- cERROR(1,
- ("CIFS mount error: No UNC path (e.g. -o "
- "unc=//192.168.1.100/public) specified"));
+ cERROR(1, "CIFS mount error: No UNC path (e.g. -o "
+ "unc=//192.168.1.100/public) specified");
rc = -EINVAL;
goto out_err;
}
@@ -1540,7 +1540,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
++tcp_ses->srv_count;
if (addr.ss_family == AF_INET6) {
- cFYI(1, ("attempting ipv6 connect"));
+ cFYI(1, "attempting ipv6 connect");
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
sin_server6->sin6_port = htons(volume_info->port);
@@ -1554,7 +1554,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
rc = ipv4_connect(tcp_ses);
}
if (rc < 0) {
- cERROR(1, ("Error connecting to socket. Aborting operation"));
+ cERROR(1, "Error connecting to socket. Aborting operation");
goto out_err;
}
@@ -1567,7 +1567,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses, "cifsd");
if (IS_ERR(tcp_ses->tsk)) {
rc = PTR_ERR(tcp_ses->tsk);
- cERROR(1, ("error %d create cifsd thread", rc));
+ cERROR(1, "error %d create cifsd thread", rc);
module_put(THIS_MODULE);
goto out_err;
}
@@ -1616,6 +1616,7 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
int xid;
struct TCP_Server_Info *server = ses->server;
+ cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count);
write_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) {
write_unlock(&cifs_tcp_ses_lock);
@@ -1634,6 +1635,102 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
cifs_put_tcp_session(server);
}
+static struct cifsSesInfo *
+cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
+{
+ int rc = -ENOMEM, xid;
+ struct cifsSesInfo *ses;
+
+ xid = GetXid();
+
+ ses = cifs_find_smb_ses(server, volume_info->username);
+ if (ses) {
+ cFYI(1, "Existing smb sess found (status=%d)", ses->status);
+
+ /* existing SMB ses has a server reference already */
+ cifs_put_tcp_session(server);
+
+ mutex_lock(&ses->session_mutex);
+ rc = cifs_negotiate_protocol(xid, ses);
+ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+ /* problem -- put our ses reference */
+ cifs_put_smb_ses(ses);
+ FreeXid(xid);
+ return ERR_PTR(rc);
+ }
+ if (ses->need_reconnect) {
+ cFYI(1, "Session needs reconnect");
+ rc = cifs_setup_session(xid, ses,
+ volume_info->local_nls);
+ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+ /* problem -- put our reference */
+ cifs_put_smb_ses(ses);
+ FreeXid(xid);
+ return ERR_PTR(rc);
+ }
+ }
+ mutex_unlock(&ses->session_mutex);
+ FreeXid(xid);
+ return ses;
+ }
+
+ cFYI(1, "Existing smb sess not found");
+ ses = sesInfoAlloc();
+ if (ses == NULL)
+ goto get_ses_fail;
+
+ /* new SMB session uses our server ref */
+ ses->server = server;
+ if (server->addr.sockAddr6.sin6_family == AF_INET6)
+ sprintf(ses->serverName, "%pI6",
+ &server->addr.sockAddr6.sin6_addr);
+ else
+ sprintf(ses->serverName, "%pI4",
+ &server->addr.sockAddr.sin_addr.s_addr);
+
+ if (volume_info->username)
+ strncpy(ses->userName, volume_info->username,
+ MAX_USERNAME_SIZE);
+
+ /* volume_info->password freed at unmount */
+ if (volume_info->password) {
+ ses->password = kstrdup(volume_info->password, GFP_KERNEL);
+ if (!ses->password)
+ goto get_ses_fail;
+ }
+ if (volume_info->domainname) {
+ int len = strlen(volume_info->domainname);
+ ses->domainName = kmalloc(len + 1, GFP_KERNEL);
+ if (ses->domainName)
+ strcpy(ses->domainName, volume_info->domainname);
+ }
+ ses->linux_uid = volume_info->linux_uid;
+ ses->overrideSecFlg = volume_info->secFlg;
+
+ mutex_lock(&ses->session_mutex);
+ rc = cifs_negotiate_protocol(xid, ses);
+ if (!rc)
+ rc = cifs_setup_session(xid, ses, volume_info->local_nls);
+ mutex_unlock(&ses->session_mutex);
+ if (rc)
+ goto get_ses_fail;
+
+ /* success, put it on the list */
+ write_lock(&cifs_tcp_ses_lock);
+ list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ write_unlock(&cifs_tcp_ses_lock);
+
+ FreeXid(xid);
+ return ses;
+
+get_ses_fail:
+ sesInfoFree(ses);
+ FreeXid(xid);
+ return ERR_PTR(rc);
+}
+
static struct cifsTconInfo *
cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
{
@@ -1662,6 +1759,7 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
int xid;
struct cifsSesInfo *ses = tcon->ses;
+ cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
write_lock(&cifs_tcp_ses_lock);
if (--tcon->tc_count > 0) {
write_unlock(&cifs_tcp_ses_lock);
@@ -1679,6 +1777,80 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
cifs_put_smb_ses(ses);
}
+static struct cifsTconInfo *
+cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
+{
+ int rc, xid;
+ struct cifsTconInfo *tcon;
+
+ tcon = cifs_find_tcon(ses, volume_info->UNC);
+ if (tcon) {
+ cFYI(1, "Found match on UNC path");
+ /* existing tcon already has a reference */
+ cifs_put_smb_ses(ses);
+ if (tcon->seal != volume_info->seal)
+ cERROR(1, "transport encryption setting "
+ "conflicts with existing tid");
+ return tcon;
+ }
+
+ tcon = tconInfoAlloc();
+ if (tcon == NULL) {
+ rc = -ENOMEM;
+ goto out_fail;
+ }
+
+ tcon->ses = ses;
+ if (volume_info->password) {
+ tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
+ if (!tcon->password) {
+ rc = -ENOMEM;
+ goto out_fail;
+ }
+ }
+
+ if (strchr(volume_info->UNC + 3, '\\') == NULL
+ && strchr(volume_info->UNC + 3, '/') == NULL) {
+ cERROR(1, "Missing share name");
+ rc = -ENODEV;
+ goto out_fail;
+ }
+
+ /* BB Do we need to wrap session_mutex around
+ * this TCon call and Unix SetFS as
+ * we do on SessSetup and reconnect? */
+ xid = GetXid();
+ rc = CIFSTCon(xid, ses, volume_info->UNC, tcon, volume_info->local_nls);
+ FreeXid(xid);
+ cFYI(1, "CIFS Tcon rc = %d", rc);
+ if (rc)
+ goto out_fail;
+
+ if (volume_info->nodfs) {
+ tcon->Flags &= ~SMB_SHARE_IS_IN_DFS;
+ cFYI(1, "DFS disabled (%d)", tcon->Flags);
+ }
+ tcon->seal = volume_info->seal;
+ /* we can have only one retry value for a connection
+ to a share so for resources mounted more than once
+ to the same server share the last value passed in
+ for the retry flag is used */
+ tcon->retry = volume_info->retry;
+ tcon->nocase = volume_info->nocase;
+ tcon->local_lease = volume_info->local_lease;
+
+ write_lock(&cifs_tcp_ses_lock);
+ list_add(&tcon->tcon_list, &ses->tcon_list);
+ write_unlock(&cifs_tcp_ses_lock);
+
+ return tcon;
+
+out_fail:
+ tconInfoFree(tcon);
+ return ERR_PTR(rc);
+}
+
+
int
get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
@@ -1703,8 +1875,7 @@ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
strcpy(temp_unc + 2, pSesInfo->serverName);
strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$");
rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage);
- cFYI(1,
- ("CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid));
+ cFYI(1, "CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid);
kfree(temp_unc);
}
if (rc == 0)
@@ -1777,12 +1948,12 @@ ipv4_connect(struct TCP_Server_Info *server)
rc = sock_create_kern(PF_INET, SOCK_STREAM,
IPPROTO_TCP, &socket);
if (rc < 0) {
- cERROR(1, ("Error %d creating socket", rc));
+ cERROR(1, "Error %d creating socket", rc);
return rc;
}
/* BB other socket options to set KEEPALIVE, NODELAY? */
- cFYI(1, ("Socket created"));
+ cFYI(1, "Socket created");
server->ssocket = socket;
socket->sk->sk_allocation = GFP_NOFS;
cifs_reclassify_socket4(socket);
@@ -1827,7 +1998,7 @@ ipv4_connect(struct TCP_Server_Info *server)
if (!connected) {
if (orig_port)
server->addr.sockAddr.sin_port = orig_port;
- cFYI(1, ("Error %d connecting to server via ipv4", rc));
+ cFYI(1, "Error %d connecting to server via ipv4", rc);
sock_release(socket);
server->ssocket = NULL;
return rc;
@@ -1855,12 +2026,12 @@ ipv4_connect(struct TCP_Server_Info *server)
rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
(char *)&val, sizeof(val));
if (rc)
- cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ cFYI(1, "set TCP_NODELAY socket option error %d", rc);
}
- cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
+ cFYI(1, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
socket->sk->sk_sndbuf,
- socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
+ socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
/* send RFC1001 sessinit */
if (server->addr.sockAddr.sin_port == htons(RFC1001_PORT)) {
@@ -1938,13 +2109,13 @@ ipv6_connect(struct TCP_Server_Info *server)
rc = sock_create_kern(PF_INET6, SOCK_STREAM,
IPPROTO_TCP, &socket);
if (rc < 0) {
- cERROR(1, ("Error %d creating ipv6 socket", rc));
+ cERROR(1, "Error %d creating ipv6 socket", rc);
socket = NULL;
return rc;
}
/* BB other socket options to set KEEPALIVE, NODELAY? */
- cFYI(1, ("ipv6 Socket created"));
+ cFYI(1, "ipv6 Socket created");
server->ssocket = socket;
socket->sk->sk_allocation = GFP_NOFS;
cifs_reclassify_socket6(socket);
@@ -1988,7 +2159,7 @@ ipv6_connect(struct TCP_Server_Info *server)
if (!connected) {
if (orig_port)
server->addr.sockAddr6.sin6_port = orig_port;
- cFYI(1, ("Error %d connecting to server via ipv6", rc));
+ cFYI(1, "Error %d connecting to server via ipv6", rc);
sock_release(socket);
server->ssocket = NULL;
return rc;
@@ -2007,7 +2178,7 @@ ipv6_connect(struct TCP_Server_Info *server)
rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
(char *)&val, sizeof(val));
if (rc)
- cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ cFYI(1, "set TCP_NODELAY socket option error %d", rc);
}
server->ssocket = socket;
@@ -2032,13 +2203,13 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (vol_info && vol_info->no_linux_ext) {
tcon->fsUnixInfo.Capability = 0;
tcon->unix_ext = 0; /* Unix Extensions disabled */
- cFYI(1, ("Linux protocol extensions disabled"));
+ cFYI(1, "Linux protocol extensions disabled");
return;
} else if (vol_info)
tcon->unix_ext = 1; /* Unix Extensions supported */
if (tcon->unix_ext == 0) {
- cFYI(1, ("Unix extensions disabled so not set on reconnect"));
+ cFYI(1, "Unix extensions disabled so not set on reconnect");
return;
}
@@ -2054,12 +2225,11 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
- cERROR(1, ("POSIXPATH support change"));
+ cERROR(1, "POSIXPATH support change");
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
- cERROR(1, ("possible reconnect error"));
- cERROR(1,
- ("server disabled POSIX path support"));
+ cERROR(1, "possible reconnect error");
+ cERROR(1, "server disabled POSIX path support");
}
}
@@ -2067,7 +2237,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (vol_info && vol_info->no_psx_acl)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
- cFYI(1, ("negotiated posix acl support"));
+ cFYI(1, "negotiated posix acl support");
if (sb)
sb->s_flags |= MS_POSIXACL;
}
@@ -2075,7 +2245,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (vol_info && vol_info->posix_paths == 0)
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
- cFYI(1, ("negotiate posix pathnames"));
+ cFYI(1, "negotiate posix pathnames");
if (sb)
CIFS_SB(sb)->mnt_cifs_flags |=
CIFS_MOUNT_POSIX_PATHS;
@@ -2090,39 +2260,38 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
CIFS_SB(sb)->rsize = 127 * 1024;
- cFYI(DBG2,
- ("larger reads not supported by srv"));
+ cFYI(DBG2, "larger reads not supported by srv");
}
}
- cFYI(1, ("Negotiate caps 0x%x", (int)cap));
+ cFYI(1, "Negotiate caps 0x%x", (int)cap);
#ifdef CONFIG_CIFS_DEBUG2
if (cap & CIFS_UNIX_FCNTL_CAP)
- cFYI(1, ("FCNTL cap"));
+ cFYI(1, "FCNTL cap");
if (cap & CIFS_UNIX_EXTATTR_CAP)
- cFYI(1, ("EXTATTR cap"));
+ cFYI(1, "EXTATTR cap");
if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
- cFYI(1, ("POSIX path cap"));
+ cFYI(1, "POSIX path cap");
if (cap & CIFS_UNIX_XATTR_CAP)
- cFYI(1, ("XATTR cap"));
+ cFYI(1, "XATTR cap");
if (cap & CIFS_UNIX_POSIX_ACL_CAP)
- cFYI(1, ("POSIX ACL cap"));
+ cFYI(1, "POSIX ACL cap");
if (cap & CIFS_UNIX_LARGE_READ_CAP)
- cFYI(1, ("very large read cap"));
+ cFYI(1, "very large read cap");
if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
- cFYI(1, ("very large write cap"));
+ cFYI(1, "very large write cap");
#endif /* CIFS_DEBUG2 */
if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
if (vol_info == NULL) {
- cFYI(1, ("resetting capabilities failed"));
+ cFYI(1, "resetting capabilities failed");
} else
- cERROR(1, ("Negotiating Unix capabilities "
+ cERROR(1, "Negotiating Unix capabilities "
"with the server failed. Consider "
"mounting with the Unix Extensions\n"
"disabled, if problems are found, "
"by specifying the nounix mount "
- "option."));
+ "option.");
}
}
@@ -2152,8 +2321,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb)
{
if (pvolume_info->rsize > CIFSMaxBufSize) {
- cERROR(1, ("rsize %d too large, using MaxBufSize",
- pvolume_info->rsize));
+ cERROR(1, "rsize %d too large, using MaxBufSize",
+ pvolume_info->rsize);
cifs_sb->rsize = CIFSMaxBufSize;
} else if ((pvolume_info->rsize) &&
(pvolume_info->rsize <= CIFSMaxBufSize))
@@ -2162,8 +2331,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->rsize = CIFSMaxBufSize;
if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) {
- cERROR(1, ("wsize %d too large, using 4096 instead",
- pvolume_info->wsize));
+ cERROR(1, "wsize %d too large, using 4096 instead",
+ pvolume_info->wsize);
cifs_sb->wsize = 4096;
} else if (pvolume_info->wsize)
cifs_sb->wsize = pvolume_info->wsize;
@@ -2181,7 +2350,7 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
if (cifs_sb->rsize < 2048) {
cifs_sb->rsize = 2048;
/* Windows ME may prefer this */
- cFYI(1, ("readsize set to minimum: 2048"));
+ cFYI(1, "readsize set to minimum: 2048");
}
/* calculate prepath */
cifs_sb->prepath = pvolume_info->prepath;
@@ -2199,8 +2368,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_gid = pvolume_info->linux_gid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
- cFYI(1, ("file mode: 0x%x dir mode: 0x%x",
- cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode));
+ cFYI(1, "file mode: 0x%x dir mode: 0x%x",
+ cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
if (pvolume_info->noperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -2229,13 +2398,13 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
if (pvolume_info->dynperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
if (pvolume_info->direct_io) {
- cFYI(1, ("mounting share using direct i/o"));
+ cFYI(1, "mounting share using direct i/o");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
}
if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
- cERROR(1, ("mount option dynperm ignored if cifsacl "
- "mount option supported"));
+ cERROR(1, "mount option dynperm ignored if cifsacl "
+ "mount option supported");
}
static int
@@ -2262,7 +2431,7 @@ cleanup_volume_info(struct smb_vol **pvolume_info)
{
struct smb_vol *volume_info;
- if (!pvolume_info && !*pvolume_info)
+ if (!pvolume_info || !*pvolume_info)
return;
volume_info = *pvolume_info;
@@ -2344,11 +2513,11 @@ try_mount_again:
}
if (volume_info->nullauth) {
- cFYI(1, ("null user"));
+ cFYI(1, "null user");
volume_info->username = "";
} else if (volume_info->username) {
/* BB fixme parse for domain name here */
- cFYI(1, ("Username: %s", volume_info->username));
+ cFYI(1, "Username: %s", volume_info->username);
} else {
cifserror("No username specified");
/* In userspace mount helper we can get user name from alternate
@@ -2357,20 +2526,20 @@ try_mount_again:
goto out;
}
-
/* this is needed for ASCII cp to Unicode converts */
if (volume_info->iocharset == NULL) {
- cifs_sb->local_nls = load_nls_default();
- /* load_nls_default can not return null */
+ /* load_nls_default cannot return null */
+ volume_info->local_nls = load_nls_default();
} else {
- cifs_sb->local_nls = load_nls(volume_info->iocharset);
- if (cifs_sb->local_nls == NULL) {
- cERROR(1, ("CIFS mount error: iocharset %s not found",
- volume_info->iocharset));
+ volume_info->local_nls = load_nls(volume_info->iocharset);
+ if (volume_info->local_nls == NULL) {
+ cERROR(1, "CIFS mount error: iocharset %s not found",
+ volume_info->iocharset);
rc = -ELIBACC;
goto out;
}
}
+ cifs_sb->local_nls = volume_info->local_nls;
/* get a reference to a tcp session */
srvTcp = cifs_get_tcp_session(volume_info);
@@ -2379,148 +2548,30 @@ try_mount_again:
goto out;
}
- pSesInfo = cifs_find_smb_ses(srvTcp, volume_info->username);
- if (pSesInfo) {
- cFYI(1, ("Existing smb sess found (status=%d)",
- pSesInfo->status));
- /*
- * The existing SMB session already has a reference to srvTcp,
- * so we can put back the extra one we got before
- */
- cifs_put_tcp_session(srvTcp);
-
- mutex_lock(&pSesInfo->session_mutex);
- if (pSesInfo->need_reconnect) {
- cFYI(1, ("Session needs reconnect"));
- rc = cifs_setup_session(xid, pSesInfo,
- cifs_sb->local_nls);
- }
- mutex_unlock(&pSesInfo->session_mutex);
- } else if (!rc) {
- cFYI(1, ("Existing smb sess not found"));
- pSesInfo = sesInfoAlloc();
- if (pSesInfo == NULL) {
- rc = -ENOMEM;
- goto mount_fail_check;
- }
-
- /* new SMB session uses our srvTcp ref */
- pSesInfo->server = srvTcp;
- if (srvTcp->addr.sockAddr6.sin6_family == AF_INET6)
- sprintf(pSesInfo->serverName, "%pI6",
- &srvTcp->addr.sockAddr6.sin6_addr);
- else
- sprintf(pSesInfo->serverName, "%pI4",
- &srvTcp->addr.sockAddr.sin_addr.s_addr);
-
- write_lock(&cifs_tcp_ses_lock);
- list_add(&pSesInfo->smb_ses_list, &srvTcp->smb_ses_list);
- write_unlock(&cifs_tcp_ses_lock);
-
- /* volume_info->password freed at unmount */
- if (volume_info->password) {
- pSesInfo->password = kstrdup(volume_info->password,
- GFP_KERNEL);
- if (!pSesInfo->password) {
- rc = -ENOMEM;
- goto mount_fail_check;
- }
- }
- if (volume_info->username)
- strncpy(pSesInfo->userName, volume_info->username,
- MAX_USERNAME_SIZE);
- if (volume_info->domainname) {
- int len = strlen(volume_info->domainname);
- pSesInfo->domainName = kmalloc(len + 1, GFP_KERNEL);
- if (pSesInfo->domainName)
- strcpy(pSesInfo->domainName,
- volume_info->domainname);
- }
- pSesInfo->linux_uid = volume_info->linux_uid;
- pSesInfo->overrideSecFlg = volume_info->secFlg;
- mutex_lock(&pSesInfo->session_mutex);
-
- /* BB FIXME need to pass vol->secFlgs BB */
- rc = cifs_setup_session(xid, pSesInfo,
- cifs_sb->local_nls);
- mutex_unlock(&pSesInfo->session_mutex);
+ /* get a reference to a SMB session */
+ pSesInfo = cifs_get_smb_ses(srvTcp, volume_info);
+ if (IS_ERR(pSesInfo)) {
+ rc = PTR_ERR(pSesInfo);
+ pSesInfo = NULL;
+ goto mount_fail_check;
}
- /* search for existing tcon to this server share */
- if (!rc) {
- setup_cifs_sb(volume_info, cifs_sb);
-
- tcon = cifs_find_tcon(pSesInfo, volume_info->UNC);
- if (tcon) {
- cFYI(1, ("Found match on UNC path"));
- /* existing tcon already has a reference */
- cifs_put_smb_ses(pSesInfo);
- if (tcon->seal != volume_info->seal)
- cERROR(1, ("transport encryption setting "
- "conflicts with existing tid"));
- } else {
- tcon = tconInfoAlloc();
- if (tcon == NULL) {
- rc = -ENOMEM;
- goto mount_fail_check;
- }
-
- tcon->ses = pSesInfo;
- if (volume_info->password) {
- tcon->password = kstrdup(volume_info->password,
- GFP_KERNEL);
- if (!tcon->password) {
- rc = -ENOMEM;
- goto mount_fail_check;
- }
- }
-
- if ((strchr(volume_info->UNC + 3, '\\') == NULL)
- && (strchr(volume_info->UNC + 3, '/') == NULL)) {
- cERROR(1, ("Missing share name"));
- rc = -ENODEV;
- goto mount_fail_check;
- } else {
- /* BB Do we need to wrap sesSem around
- * this TCon call and Unix SetFS as
- * we do on SessSetup and reconnect? */
- rc = CIFSTCon(xid, pSesInfo, volume_info->UNC,
- tcon, cifs_sb->local_nls);
- cFYI(1, ("CIFS Tcon rc = %d", rc));
- if (volume_info->nodfs) {
- tcon->Flags &= ~SMB_SHARE_IS_IN_DFS;
- cFYI(1, ("DFS disabled (%d)",
- tcon->Flags));
- }
- }
- if (rc)
- goto remote_path_check;
- tcon->seal = volume_info->seal;
- write_lock(&cifs_tcp_ses_lock);
- list_add(&tcon->tcon_list, &pSesInfo->tcon_list);
- write_unlock(&cifs_tcp_ses_lock);
- }
-
- /* we can have only one retry value for a connection
- to a share so for resources mounted more than once
- to the same server share the last value passed in
- for the retry flag is used */
- tcon->retry = volume_info->retry;
- tcon->nocase = volume_info->nocase;
- tcon->local_lease = volume_info->local_lease;
- }
- if (pSesInfo) {
- if (pSesInfo->capabilities & CAP_LARGE_FILES)
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- else
- sb->s_maxbytes = MAX_NON_LFS;
- }
+ setup_cifs_sb(volume_info, cifs_sb);
+ if (pSesInfo->capabilities & CAP_LARGE_FILES)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ else
+ sb->s_maxbytes = MAX_NON_LFS;
/* BB FIXME fix time_gran to be larger for LANMAN sessions */
sb->s_time_gran = 100;
- if (rc)
+ /* search for existing tcon to this server share */
+ tcon = cifs_get_tcon(pSesInfo, volume_info);
+ if (IS_ERR(tcon)) {
+ rc = PTR_ERR(tcon);
+ tcon = NULL;
goto remote_path_check;
+ }
cifs_sb->tcon = tcon;
@@ -2544,7 +2595,7 @@ try_mount_again:
if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
cifs_sb->rsize = 1024 * 127;
- cFYI(DBG2, ("no very large read support, rsize now 127K"));
+ cFYI(DBG2, "no very large read support, rsize now 127K");
}
if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
cifs_sb->wsize = min(cifs_sb->wsize,
@@ -2593,7 +2644,7 @@ remote_path_check:
goto mount_fail_check;
}
- cFYI(1, ("Getting referral for: %s", full_path));
+ cFYI(1, "Getting referral for: %s", full_path);
rc = get_dfs_path(xid, pSesInfo , full_path + 1,
cifs_sb->local_nls, &num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -2707,7 +2758,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
by Samba (not sure whether other servers allow
NTLMv2 password here) */
#ifdef CONFIG_CIFS_WEAK_PW_HASH
- if ((extended_security & CIFSSEC_MAY_LANMAN) &&
+ if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
calc_lanman_hash(tcon->password, ses->server->cryptKey,
ses->server->secMode &
@@ -2778,13 +2829,13 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
if (length == 3) {
if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
(bcc_ptr[2] == 'C')) {
- cFYI(1, ("IPC connection"));
+ cFYI(1, "IPC connection");
tcon->ipc = 1;
}
} else if (length == 2) {
if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
/* the most common case */
- cFYI(1, ("disk share connection"));
+ cFYI(1, "disk share connection");
}
}
bcc_ptr += length + 1;
@@ -2797,7 +2848,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
bytes_left, is_unicode,
nls_codepage);
- cFYI(1, ("nativeFileSystem=%s", tcon->nativeFileSystem));
+ cFYI(1, "nativeFileSystem=%s", tcon->nativeFileSystem);
if ((smb_buffer_response->WordCount == 3) ||
(smb_buffer_response->WordCount == 7))
@@ -2805,7 +2856,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
else
tcon->Flags = 0;
- cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags));
+ cFYI(1, "Tcon flags: 0x%x ", tcon->Flags);
} else if ((rc == 0) && tcon == NULL) {
/* all we need to save for IPC$ connection */
ses->ipc_tid = smb_buffer_response->Tid;
@@ -2833,57 +2884,61 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
return rc;
}
-int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
- struct nls_table *nls_info)
+int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
{
int rc = 0;
- int first_time = 0;
- struct TCP_Server_Info *server = pSesInfo->server;
-
- /* what if server changes its buffer size after dropping the session? */
- if (server->maxBuf == 0) /* no need to send on reconnect */ {
- rc = CIFSSMBNegotiate(xid, pSesInfo);
- if (rc == -EAGAIN) {
- /* retry only once on 1st time connection */
- rc = CIFSSMBNegotiate(xid, pSesInfo);
- if (rc == -EAGAIN)
- rc = -EHOSTDOWN;
- }
- if (rc == 0) {
- spin_lock(&GlobalMid_Lock);
- if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsGood;
- else
- rc = -EHOSTDOWN;
- spin_unlock(&GlobalMid_Lock);
+ struct TCP_Server_Info *server = ses->server;
+
+ /* only send once per connect */
+ if (server->maxBuf != 0)
+ return 0;
+
+ rc = CIFSSMBNegotiate(xid, ses);
+ if (rc == -EAGAIN) {
+ /* retry only once on 1st time connection */
+ rc = CIFSSMBNegotiate(xid, ses);
+ if (rc == -EAGAIN)
+ rc = -EHOSTDOWN;
+ }
+ if (rc == 0) {
+ spin_lock(&GlobalMid_Lock);
+ if (server->tcpStatus != CifsExiting)
+ server->tcpStatus = CifsGood;
+ else
+ rc = -EHOSTDOWN;
+ spin_unlock(&GlobalMid_Lock);
- }
- first_time = 1;
}
- if (rc)
- goto ss_err_exit;
+ return rc;
+}
+
+
+int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
+ struct nls_table *nls_info)
+{
+ int rc = 0;
+ struct TCP_Server_Info *server = ses->server;
- pSesInfo->flags = 0;
- pSesInfo->capabilities = server->capabilities;
+ ses->flags = 0;
+ ses->capabilities = server->capabilities;
if (linuxExtEnabled == 0)
- pSesInfo->capabilities &= (~CAP_UNIX);
+ ses->capabilities &= (~CAP_UNIX);
- cFYI(1, ("Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
- server->secMode, server->capabilities, server->timeAdj));
+ cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
+ server->secMode, server->capabilities, server->timeAdj);
- rc = CIFS_SessSetup(xid, pSesInfo, first_time, nls_info);
+ rc = CIFS_SessSetup(xid, ses, nls_info);
if (rc) {
- cERROR(1, ("Send error in SessSetup = %d", rc));
+ cERROR(1, "Send error in SessSetup = %d", rc);
} else {
- cFYI(1, ("CIFS Session Established successfully"));
+ cFYI(1, "CIFS Session Established successfully");
spin_lock(&GlobalMid_Lock);
- pSesInfo->status = CifsGood;
- pSesInfo->need_reconnect = false;
+ ses->status = CifsGood;
+ ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
}
-ss_err_exit:
return rc;
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index e9f7ecc2714b..86d3c0c82f25 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -73,7 +73,7 @@ cifs_bp_rename_retry:
namelen += (1 + temp->d_name.len);
temp = temp->d_parent;
if (temp == NULL) {
- cERROR(1, ("corrupt dentry"));
+ cERROR(1, "corrupt dentry");
return NULL;
}
}
@@ -90,19 +90,18 @@ cifs_bp_rename_retry:
full_path[namelen] = dirsep;
strncpy(full_path + namelen + 1, temp->d_name.name,
temp->d_name.len);
- cFYI(0, ("name: %s", full_path + namelen));
+ cFYI(0, "name: %s", full_path + namelen);
}
temp = temp->d_parent;
if (temp == NULL) {
- cERROR(1, ("corrupt dentry"));
+ cERROR(1, "corrupt dentry");
kfree(full_path);
return NULL;
}
}
if (namelen != pplen + dfsplen) {
- cERROR(1,
- ("did not end path lookup where expected namelen is %d",
- namelen));
+ cERROR(1, "did not end path lookup where expected namelen is %d",
+ namelen);
/* presumably this is only possible if racing with a rename
of one of the parent directories (we can not lock the dentries
above us to prevent this, but retrying should be harmless) */
@@ -130,6 +129,12 @@ cifs_bp_rename_retry:
return full_path;
}
+/*
+ * When called with struct file pointer set to NULL, there is no way we could
+ * update file->private_data, but getting it stuck on openFileList provides a
+ * way to access it from cifs_fill_filedata and thereby set file->private_data
+ * from cifs_open.
+ */
struct cifsFileInfo *
cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
struct file *file, struct vfsmount *mnt, unsigned int oflags)
@@ -173,7 +178,7 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, ("Exclusive Oplock inode %p", newinode));
+ cFYI(1, "Exclusive Oplock inode %p", newinode);
} else if ((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
}
@@ -183,16 +188,17 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
}
int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid)
+ struct vfsmount *mnt, struct super_block *sb,
+ int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid)
{
int rc;
FILE_UNIX_BASIC_INFO *presp_data;
__u32 posix_flags = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_fattr fattr;
- cFYI(1, ("posix open %s", full_path));
+ cFYI(1, "posix open %s", full_path);
presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
if (presp_data == NULL)
@@ -242,7 +248,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
/* get new inode and set it up */
if (*pinode == NULL) {
- *pinode = cifs_iget(mnt->mnt_sb, &fattr);
+ *pinode = cifs_iget(sb, &fattr);
if (!*pinode) {
rc = -ENOMEM;
goto posix_open_ret;
@@ -251,7 +257,18 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
cifs_fattr_to_inode(*pinode, &fattr);
}
- cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
+ /*
+ * cifs_fill_filedata() takes care of setting cifsFileInfo pointer to
+ * file->private_data.
+ */
+ if (mnt) {
+ struct cifsFileInfo *pfile_info;
+
+ pfile_info = cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt,
+ oflags);
+ if (pfile_info == NULL)
+ rc = -ENOMEM;
+ }
posix_open_ret:
kfree(presp_data);
@@ -315,13 +332,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if (nd && (nd->flags & LOOKUP_OPEN))
oflags = nd->intent.open.flags;
else
- oflags = FMODE_READ;
+ oflags = FMODE_READ | SMB_O_CREAT;
if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
- mode, oflags, &oplock, &fileHandle, xid);
+ rc = cifs_posix_open(full_path, &newinode,
+ nd ? nd->path.mnt : NULL,
+ inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
negotation. EREMOTE indicates DFS junction, which is not
@@ -358,7 +376,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
else if ((oflags & O_CREAT) == O_CREAT)
disposition = FILE_OPEN_IF;
else
- cFYI(1, ("Create flag not set in create function"));
+ cFYI(1, "Create flag not set in create function");
}
/* BB add processing to set equivalent of mode - e.g. via CreateX with
@@ -394,7 +412,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
- cFYI(1, ("cifs_create returned 0x%x", rc));
+ cFYI(1, "cifs_create returned 0x%x", rc);
goto cifs_create_out;
}
@@ -457,15 +475,22 @@ cifs_create_set_dentry:
if (rc == 0)
setup_cifs_dentry(tcon, direntry, newinode);
else
- cFYI(1, ("Create worked, get_inode_info failed rc = %d", rc));
+ cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
/* nfsd case - nfs srv does not set nd */
if ((nd == NULL) || (!(nd->flags & LOOKUP_OPEN))) {
/* mknod case - do not leave file open */
CIFSSMBClose(xid, tcon, fileHandle);
} else if (!(posix_create) && (newinode)) {
- cifs_new_fileinfo(newinode, fileHandle, NULL,
- nd->path.mnt, oflags);
+ struct cifsFileInfo *pfile_info;
+ /*
+ * cifs_fill_filedata() takes care of setting cifsFileInfo
+ * pointer to file->private_data.
+ */
+ pfile_info = cifs_new_fileinfo(newinode, fileHandle, NULL,
+ nd->path.mnt, oflags);
+ if (pfile_info == NULL)
+ rc = -ENOMEM;
}
cifs_create_out:
kfree(buf);
@@ -531,7 +556,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
u16 fileHandle;
FILE_ALL_INFO *buf;
- cFYI(1, ("sfu compat create special file"));
+ cFYI(1, "sfu compat create special file");
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
@@ -616,8 +641,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
xid = GetXid();
- cFYI(1, ("parent inode = 0x%p name is: %s and dentry = 0x%p",
- parent_dir_inode, direntry->d_name.name, direntry));
+ cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
+ parent_dir_inode, direntry->d_name.name, direntry);
/* check whether path exists */
@@ -632,7 +657,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
int i;
for (i = 0; i < direntry->d_name.len; i++)
if (direntry->d_name.name[i] == '\\') {
- cFYI(1, ("Invalid file name"));
+ cFYI(1, "Invalid file name");
FreeXid(xid);
return ERR_PTR(-EINVAL);
}
@@ -657,11 +682,11 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
}
if (direntry->d_inode != NULL) {
- cFYI(1, ("non-NULL inode in lookup"));
+ cFYI(1, "non-NULL inode in lookup");
} else {
- cFYI(1, ("NULL inode in lookup"));
+ cFYI(1, "NULL inode in lookup");
}
- cFYI(1, ("Full path: %s inode = 0x%p", full_path, direntry->d_inode));
+ cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode);
/* Posix open is only called (at lookup time) for file create now.
* For opens (rather than creates), because we do not know if it
@@ -678,6 +703,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
+ parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.flags, &oplock,
&fileHandle, xid);
@@ -723,7 +749,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
/* if it was once a directory (but how can we tell?) we could do
shrink_dcache_parent(direntry); */
} else if (rc != -EACCES) {
- cERROR(1, ("Unexpected lookup error %d", rc));
+ cERROR(1, "Unexpected lookup error %d", rc);
/* We special case check for Access Denied - since that
is a common return code */
}
@@ -742,8 +768,8 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
if (cifs_revalidate_dentry(direntry))
return 0;
} else {
- cFYI(1, ("neg dentry 0x%p name = %s",
- direntry, direntry->d_name.name));
+ cFYI(1, "neg dentry 0x%p name = %s",
+ direntry, direntry->d_name.name);
if (time_after(jiffies, direntry->d_time + HZ) ||
!lookupCacheEnabled) {
d_drop(direntry);
@@ -758,7 +784,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
{
int rc = 0;
- cFYI(1, ("In cifs d_delete, name = %s", direntry->d_name.name));
+ cFYI(1, "In cifs d_delete, name = %s", direntry->d_name.name);
return rc;
} */
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 6f8a0e3fb25b..4db2c5e7283f 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -106,14 +106,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
/* search for server name delimiter */
len = strlen(unc);
if (len < 3) {
- cFYI(1, ("%s: unc is too short: %s", __func__, unc));
+ cFYI(1, "%s: unc is too short: %s", __func__, unc);
return -EINVAL;
}
len -= 2;
name = memchr(unc+2, '\\', len);
if (!name) {
- cFYI(1, ("%s: probably server name is whole unc: %s",
- __func__, unc));
+ cFYI(1, "%s: probably server name is whole unc: %s",
+ __func__, unc);
} else {
len = (name - unc) - 2/* leading // */;
}
@@ -127,8 +127,8 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
name[len] = 0;
if (is_ip(name)) {
- cFYI(1, ("%s: it is IP, skipping dns upcall: %s",
- __func__, name));
+ cFYI(1, "%s: it is IP, skipping dns upcall: %s",
+ __func__, name);
data = name;
goto skip_upcall;
}
@@ -138,7 +138,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
len = rkey->type_data.x[0];
data = rkey->payload.data;
} else {
- cERROR(1, ("%s: unable to resolve: %s", __func__, name));
+ cERROR(1, "%s: unable to resolve: %s", __func__, name);
goto out;
}
@@ -148,10 +148,10 @@ skip_upcall:
if (*ip_addr) {
memcpy(*ip_addr, data, len + 1);
if (!IS_ERR(rkey))
- cFYI(1, ("%s: resolved: %s to %s", __func__,
+ cFYI(1, "%s: resolved: %s to %s", __func__,
name,
*ip_addr
- ));
+ );
rc = 0;
} else {
rc = -ENOMEM;
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 6177f7cca16a..993f82045bf6 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -49,7 +49,7 @@
static struct dentry *cifs_get_parent(struct dentry *dentry)
{
/* BB need to add code here eventually to enable export via NFSD */
- cFYI(1, ("get parent for %p", dentry));
+ cFYI(1, "get parent for %p", dentry);
return ERR_PTR(-EACCES);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 9b11a8f56f3a..a83541ec9713 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3,7 +3,7 @@
*
* vfs operations that deal with files
*
- * Copyright (C) International Business Machines Corp., 2002,2007
+ * Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
@@ -108,8 +108,7 @@ static inline int cifs_get_disposition(unsigned int flags)
/* all arguments to this function must be checked for validity in caller */
static inline int
cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
- struct cifsInodeInfo *pCifsInode,
- struct cifsFileInfo *pCifsFile, __u32 oplock,
+ struct cifsInodeInfo *pCifsInode, __u32 oplock,
u16 netfid)
{
@@ -136,15 +135,15 @@ cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
(file->f_path.dentry->d_inode->i_size ==
(loff_t)le64_to_cpu(buf->EndOfFile))) {
- cFYI(1, ("inode unchanged on server"));
+ cFYI(1, "inode unchanged on server");
} else {
if (file->f_path.dentry->d_inode->i_mapping) {
rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
if (rc != 0)
CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
}
- cFYI(1, ("invalidating remote inode since open detected it "
- "changed"));
+ cFYI(1, "invalidating remote inode since open detected it "
+ "changed");
invalidate_remote_inode(file->f_path.dentry->d_inode);
} */
@@ -152,8 +151,8 @@ psx_client_can_cache:
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, ("Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode));
+ cFYI(1, "Exclusive Oplock granted on inode %p",
+ file->f_path.dentry->d_inode);
} else if ((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
@@ -190,8 +189,8 @@ cifs_fill_filedata(struct file *file)
if (file->private_data != NULL) {
return pCifsFile;
} else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL))
- cERROR(1, ("could not find file instance for "
- "new file %p", file));
+ cERROR(1, "could not find file instance for "
+ "new file %p", file);
return NULL;
}
@@ -217,7 +216,7 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
(file->f_path.dentry->d_inode->i_size ==
(loff_t)le64_to_cpu(buf->EndOfFile))) {
- cFYI(1, ("inode unchanged on server"));
+ cFYI(1, "inode unchanged on server");
} else {
if (file->f_path.dentry->d_inode->i_mapping) {
/* BB no need to lock inode until after invalidate
@@ -226,8 +225,8 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
if (rc != 0)
CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
}
- cFYI(1, ("invalidating remote inode since open detected it "
- "changed"));
+ cFYI(1, "invalidating remote inode since open detected it "
+ "changed");
invalidate_remote_inode(file->f_path.dentry->d_inode);
}
@@ -242,8 +241,8 @@ client_can_cache:
if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, ("Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode));
+ cFYI(1, "Exclusive Oplock granted on inode %p",
+ file->f_path.dentry->d_inode);
} else if ((*oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
@@ -285,8 +284,8 @@ int cifs_open(struct inode *inode, struct file *file)
return rc;
}
- cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
- inode, file->f_flags, full_path));
+ cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
+ inode, file->f_flags, full_path);
if (oplockEnabled)
oplock = REQ_OPLOCK;
@@ -298,27 +297,29 @@ int cifs_open(struct inode *inode, struct file *file)
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
+ oflags |= SMB_O_CREAT;
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
- cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
if (rc == 0) {
- cFYI(1, ("posix open succeeded"));
+ cFYI(1, "posix open succeeded");
/* no need for special case handling of setting mode
on read only files needed here */
pCifsFile = cifs_fill_filedata(file);
cifs_posix_open_inode_helper(inode, file, pCifsInode,
- pCifsFile, oplock, netfid);
+ oplock, netfid);
goto out;
} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
if (tcon->ses->serverNOS)
- cERROR(1, ("server %s of type %s returned"
+ cERROR(1, "server %s of type %s returned"
" unexpected error on SMB posix open"
", disabling posix open support."
" Check if server update available.",
tcon->ses->serverName,
- tcon->ses->serverNOS));
+ tcon->ses->serverNOS);
tcon->broken_posix_open = true;
} else if ((rc != -EIO) && (rc != -EREMOTE) &&
(rc != -EOPNOTSUPP)) /* path not found or net err */
@@ -386,7 +387,7 @@ int cifs_open(struct inode *inode, struct file *file)
& CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
- cFYI(1, ("cifs_open returned 0x%x", rc));
+ cFYI(1, "cifs_open returned 0x%x", rc);
goto out;
}
@@ -469,7 +470,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
}
if (file->f_path.dentry == NULL) {
- cERROR(1, ("no valid name if dentry freed"));
+ cERROR(1, "no valid name if dentry freed");
dump_stack();
rc = -EBADF;
goto reopen_error_exit;
@@ -477,7 +478,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
inode = file->f_path.dentry->d_inode;
if (inode == NULL) {
- cERROR(1, ("inode not valid"));
+ cERROR(1, "inode not valid");
dump_stack();
rc = -EBADF;
goto reopen_error_exit;
@@ -499,8 +500,8 @@ reopen_error_exit:
return rc;
}
- cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
- inode, file->f_flags, full_path));
+ cFYI(1, "inode = 0x%p file flags 0x%x for %s",
+ inode, file->f_flags, full_path);
if (oplockEnabled)
oplock = REQ_OPLOCK;
@@ -513,10 +514,11 @@ reopen_error_exit:
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
- cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
if (rc == 0) {
- cFYI(1, ("posix reopen succeeded"));
+ cFYI(1, "posix reopen succeeded");
goto reopen_success;
}
/* fallthrough to retry open the old way on errors, especially
@@ -537,8 +539,8 @@ reopen_error_exit:
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
mutex_unlock(&pCifsFile->fh_mutex);
- cFYI(1, ("cifs_open returned 0x%x", rc));
- cFYI(1, ("oplock: %d", oplock));
+ cFYI(1, "cifs_open returned 0x%x", rc);
+ cFYI(1, "oplock: %d", oplock);
} else {
reopen_success:
pCifsFile->netfid = netfid;
@@ -570,8 +572,8 @@ reopen_success:
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, ("Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode));
+ cFYI(1, "Exclusive Oplock granted on inode %p",
+ file->f_path.dentry->d_inode);
} else if ((oplock & 0xF) == OPLOCK_READ) {
pCifsInode->clientCanCacheRead = true;
pCifsInode->clientCanCacheAll = false;
@@ -619,8 +621,7 @@ int cifs_close(struct inode *inode, struct file *file)
the struct would be in each open file,
but this should give enough time to
clear the socket */
- cFYI(DBG2,
- ("close delay, write pending"));
+ cFYI(DBG2, "close delay, write pending");
msleep(timeout);
timeout *= 4;
}
@@ -653,7 +654,7 @@ int cifs_close(struct inode *inode, struct file *file)
read_lock(&GlobalSMBSeslock);
if (list_empty(&(CIFS_I(inode)->openFileList))) {
- cFYI(1, ("closing last open instance for inode %p", inode));
+ cFYI(1, "closing last open instance for inode %p", inode);
/* if the file is not open we do not know if we can cache info
on this inode, much less write behind and read ahead */
CIFS_I(inode)->clientCanCacheRead = false;
@@ -674,7 +675,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
(struct cifsFileInfo *)file->private_data;
char *ptmp;
- cFYI(1, ("Closedir inode = 0x%p", inode));
+ cFYI(1, "Closedir inode = 0x%p", inode);
xid = GetXid();
@@ -685,22 +686,22 @@ int cifs_closedir(struct inode *inode, struct file *file)
pTcon = cifs_sb->tcon;
- cFYI(1, ("Freeing private data in close dir"));
+ cFYI(1, "Freeing private data in close dir");
write_lock(&GlobalSMBSeslock);
if (!pCFileStruct->srch_inf.endOfSearch &&
!pCFileStruct->invalidHandle) {
pCFileStruct->invalidHandle = true;
write_unlock(&GlobalSMBSeslock);
rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
- cFYI(1, ("Closing uncompleted readdir with rc %d",
- rc));
+ cFYI(1, "Closing uncompleted readdir with rc %d",
+ rc);
/* not much we can do if it fails anyway, ignore rc */
rc = 0;
} else
write_unlock(&GlobalSMBSeslock);
ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
if (ptmp) {
- cFYI(1, ("closedir free smb buf in srch struct"));
+ cFYI(1, "closedir free smb buf in srch struct");
pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
if (pCFileStruct->srch_inf.smallBuf)
cifs_small_buf_release(ptmp);
@@ -748,49 +749,49 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
rc = -EACCES;
xid = GetXid();
- cFYI(1, ("Lock parm: 0x%x flockflags: "
+ cFYI(1, "Lock parm: 0x%x flockflags: "
"0x%x flocktype: 0x%x start: %lld end: %lld",
cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
- pfLock->fl_end));
+ pfLock->fl_end);
if (pfLock->fl_flags & FL_POSIX)
- cFYI(1, ("Posix"));
+ cFYI(1, "Posix");
if (pfLock->fl_flags & FL_FLOCK)
- cFYI(1, ("Flock"));
+ cFYI(1, "Flock");
if (pfLock->fl_flags & FL_SLEEP) {
- cFYI(1, ("Blocking lock"));
+ cFYI(1, "Blocking lock");
wait_flag = true;
}
if (pfLock->fl_flags & FL_ACCESS)
- cFYI(1, ("Process suspended by mandatory locking - "
- "not implemented yet"));
+ cFYI(1, "Process suspended by mandatory locking - "
+ "not implemented yet");
if (pfLock->fl_flags & FL_LEASE)
- cFYI(1, ("Lease on file - not implemented yet"));
+ cFYI(1, "Lease on file - not implemented yet");
if (pfLock->fl_flags &
(~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
- cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
+ cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
if (pfLock->fl_type == F_WRLCK) {
- cFYI(1, ("F_WRLCK "));
+ cFYI(1, "F_WRLCK ");
numLock = 1;
} else if (pfLock->fl_type == F_UNLCK) {
- cFYI(1, ("F_UNLCK"));
+ cFYI(1, "F_UNLCK");
numUnlock = 1;
/* Check if unlock includes more than
one lock range */
} else if (pfLock->fl_type == F_RDLCK) {
- cFYI(1, ("F_RDLCK"));
+ cFYI(1, "F_RDLCK");
lockType |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1;
} else if (pfLock->fl_type == F_EXLCK) {
- cFYI(1, ("F_EXLCK"));
+ cFYI(1, "F_EXLCK");
numLock = 1;
} else if (pfLock->fl_type == F_SHLCK) {
- cFYI(1, ("F_SHLCK"));
+ cFYI(1, "F_SHLCK");
lockType |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1;
} else
- cFYI(1, ("Unknown type of lock"));
+ cFYI(1, "Unknown type of lock");
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
tcon = cifs_sb->tcon;
@@ -833,8 +834,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
0 /* wait flag */ );
pfLock->fl_type = F_UNLCK;
if (rc != 0)
- cERROR(1, ("Error unlocking previously locked "
- "range %d during test of lock", rc));
+ cERROR(1, "Error unlocking previously locked "
+ "range %d during test of lock", rc);
rc = 0;
} else {
@@ -856,9 +857,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
0 /* wait flag */);
pfLock->fl_type = F_RDLCK;
if (rc != 0)
- cERROR(1, ("Error unlocking "
+ cERROR(1, "Error unlocking "
"previously locked range %d "
- "during test of lock", rc));
+ "during test of lock", rc);
rc = 0;
} else {
pfLock->fl_type = F_WRLCK;
@@ -923,9 +924,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
1, 0, li->type, false);
if (stored_rc)
rc = stored_rc;
-
- list_del(&li->llist);
- kfree(li);
+ else {
+ list_del(&li->llist);
+ kfree(li);
+ }
}
}
mutex_unlock(&fid->lock_mutex);
@@ -988,9 +990,8 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
pTcon = cifs_sb->tcon;
- /* cFYI(1,
- (" write %d bytes to offset %lld of %s", write_size,
- *poffset, file->f_path.dentry->d_name.name)); */
+ /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
+ *poffset, file->f_path.dentry->d_name.name); */
if (file->private_data == NULL)
return -EBADF;
@@ -1091,8 +1092,8 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
pTcon = cifs_sb->tcon;
- cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
- *poffset, file->f_path.dentry->d_name.name));
+ cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
+ *poffset, file->f_path.dentry->d_name.name);
if (file->private_data == NULL)
return -EBADF;
@@ -1233,7 +1234,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
it being zero) during stress testcases so we need to check for it */
if (cifs_inode == NULL) {
- cERROR(1, ("Null inode passed to cifs_writeable_file"));
+ cERROR(1, "Null inode passed to cifs_writeable_file");
dump_stack();
return NULL;
}
@@ -1277,7 +1278,7 @@ refind_writable:
again. Note that it would be bad
to hold up writepages here (rather than
in caller) with continuous retries */
- cFYI(1, ("wp failed on reopen file"));
+ cFYI(1, "wp failed on reopen file");
read_lock(&GlobalSMBSeslock);
/* can not use this handle, no write
pending on this one after all */
@@ -1353,7 +1354,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
else if (bytes_written < 0)
rc = bytes_written;
} else {
- cFYI(1, ("No writeable filehandles for inode"));
+ cFYI(1, "No writeable filehandles for inode");
rc = -EIO;
}
@@ -1525,7 +1526,7 @@ retry:
*/
open_file = find_writable_file(CIFS_I(mapping->host));
if (!open_file) {
- cERROR(1, ("No writable handles for inode"));
+ cERROR(1, "No writable handles for inode");
rc = -EBADF;
} else {
long_op = cifs_write_timeout(cifsi, offset);
@@ -1538,8 +1539,8 @@ retry:
cifs_update_eof(cifsi, offset, bytes_written);
if (rc || bytes_written < bytes_to_write) {
- cERROR(1, ("Write2 ret %d, wrote %d",
- rc, bytes_written));
+ cERROR(1, "Write2 ret %d, wrote %d",
+ rc, bytes_written);
/* BB what if continued retry is
requested via mount flags? */
if (rc == -ENOSPC)
@@ -1600,7 +1601,7 @@ static int cifs_writepage(struct page *page, struct writeback_control *wbc)
/* BB add check for wbc flags */
page_cache_get(page);
if (!PageUptodate(page))
- cFYI(1, ("ppw - page not up to date"));
+ cFYI(1, "ppw - page not up to date");
/*
* Set the "writeback" flag, and clear "dirty" in the radix tree.
@@ -1629,8 +1630,8 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
int rc;
struct inode *inode = mapping->host;
- cFYI(1, ("write_end for page %p from pos %lld with %d bytes",
- page, pos, copied));
+ cFYI(1, "write_end for page %p from pos %lld with %d bytes",
+ page, pos, copied);
if (PageChecked(page)) {
if (copied == len)
@@ -1686,8 +1687,8 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
xid = GetXid();
- cFYI(1, ("Sync file - name: %s datasync: 0x%x",
- dentry->d_name.name, datasync));
+ cFYI(1, "Sync file - name: %s datasync: 0x%x",
+ dentry->d_name.name, datasync);
rc = filemap_write_and_wait(inode->i_mapping);
if (rc == 0) {
@@ -1711,7 +1712,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
unsigned int rpages = 0;
int rc = 0;
- cFYI(1, ("sync page %p",page));
+ cFYI(1, "sync page %p", page);
mapping = page->mapping;
if (!mapping)
return 0;
@@ -1722,7 +1723,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
/* fill in rpages then
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
-/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
+/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
#if 0
if (rc < 0)
@@ -1756,7 +1757,7 @@ int cifs_flush(struct file *file, fl_owner_t id)
CIFS_I(inode)->write_behind_rc = 0;
}
- cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
+ cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
return rc;
}
@@ -1788,7 +1789,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
open_file = (struct cifsFileInfo *)file->private_data;
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cFYI(1, ("attempting read on write only file instance"));
+ cFYI(1, "attempting read on write only file instance");
for (total_read = 0, current_offset = read_data;
read_size > total_read;
@@ -1869,7 +1870,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
open_file = (struct cifsFileInfo *)file->private_data;
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cFYI(1, ("attempting read on write only file instance"));
+ cFYI(1, "attempting read on write only file instance");
for (total_read = 0, current_offset = read_data;
read_size > total_read;
@@ -1920,7 +1921,7 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
xid = GetXid();
rc = cifs_revalidate_file(file);
if (rc) {
- cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
+ cFYI(1, "Validation prior to mmap failed, error=%d", rc);
FreeXid(xid);
return rc;
}
@@ -1931,8 +1932,7 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
static void cifs_copy_cache_pages(struct address_space *mapping,
- struct list_head *pages, int bytes_read, char *data,
- struct pagevec *plru_pvec)
+ struct list_head *pages, int bytes_read, char *data)
{
struct page *page;
char *target;
@@ -1944,10 +1944,10 @@ static void cifs_copy_cache_pages(struct address_space *mapping,
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
- if (add_to_page_cache(page, mapping, page->index,
+ if (add_to_page_cache_lru(page, mapping, page->index,
GFP_KERNEL)) {
page_cache_release(page);
- cFYI(1, ("Add page cache failed"));
+ cFYI(1, "Add page cache failed");
data += PAGE_CACHE_SIZE;
bytes_read -= PAGE_CACHE_SIZE;
continue;
@@ -1970,8 +1970,6 @@ static void cifs_copy_cache_pages(struct address_space *mapping,
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
- if (!pagevec_add(plru_pvec, page))
- __pagevec_lru_add_file(plru_pvec);
data += PAGE_CACHE_SIZE;
}
return;
@@ -1990,7 +1988,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
unsigned int read_size, i;
char *smb_read_data = NULL;
struct smb_com_read_rsp *pSMBr;
- struct pagevec lru_pvec;
struct cifsFileInfo *open_file;
int buf_type = CIFS_NO_BUFFER;
@@ -2004,8 +2001,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
pTcon = cifs_sb->tcon;
- pagevec_init(&lru_pvec, 0);
- cFYI(DBG2, ("rpages: num pages %d", num_pages));
+ cFYI(DBG2, "rpages: num pages %d", num_pages);
for (i = 0; i < num_pages; ) {
unsigned contig_pages;
struct page *tmp_page;
@@ -2038,8 +2034,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* Read size needs to be in multiples of one page */
read_size = min_t(const unsigned int, read_size,
cifs_sb->rsize & PAGE_CACHE_MASK);
- cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d",
- read_size, contig_pages));
+ cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
+ read_size, contig_pages);
rc = -EAGAIN;
while (rc == -EAGAIN) {
if ((open_file->invalidHandle) &&
@@ -2066,14 +2062,14 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
}
if ((rc < 0) || (smb_read_data == NULL)) {
- cFYI(1, ("Read error in readpages: %d", rc));
+ cFYI(1, "Read error in readpages: %d", rc);
break;
} else if (bytes_read > 0) {
task_io_account_read(bytes_read);
pSMBr = (struct smb_com_read_rsp *)smb_read_data;
cifs_copy_cache_pages(mapping, page_list, bytes_read,
smb_read_data + 4 /* RFC1001 hdr */ +
- le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
+ le16_to_cpu(pSMBr->DataOffset));
i += bytes_read >> PAGE_CACHE_SHIFT;
cifs_stats_bytes_read(pTcon, bytes_read);
@@ -2089,9 +2085,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* break; */
}
} else {
- cFYI(1, ("No bytes read (%d) at offset %lld . "
- "Cleaning remaining pages from readahead list",
- bytes_read, offset));
+ cFYI(1, "No bytes read (%d) at offset %lld . "
+ "Cleaning remaining pages from readahead list",
+ bytes_read, offset);
/* BB turn off caching and do new lookup on
file size at server? */
break;
@@ -2106,8 +2102,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
bytes_read = 0;
}
- pagevec_lru_add_file(&lru_pvec);
-
/* need to free smb_read_data buf before exit */
if (smb_read_data) {
if (buf_type == CIFS_SMALL_BUFFER)
@@ -2136,7 +2130,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
if (rc < 0)
goto io_error;
else
- cFYI(1, ("Bytes read %d", rc));
+ cFYI(1, "Bytes read %d", rc);
file->f_path.dentry->d_inode->i_atime =
current_fs_time(file->f_path.dentry->d_inode->i_sb);
@@ -2168,8 +2162,8 @@ static int cifs_readpage(struct file *file, struct page *page)
return rc;
}
- cFYI(1, ("readpage %p at offset %d 0x%x\n",
- page, (int)offset, (int)offset));
+ cFYI(1, "readpage %p at offset %d 0x%x\n",
+ page, (int)offset, (int)offset);
rc = cifs_readpage_worker(file, page, &offset);
@@ -2239,7 +2233,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
struct page *page;
int rc = 0;
- cFYI(1, ("write_begin from %lld len %d", (long long)pos, len));
+ cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
@@ -2311,12 +2305,10 @@ cifs_oplock_break(struct slow_work *work)
int rc, waitrc = 0;
if (inode && S_ISREG(inode->i_mode)) {
-#ifdef CONFIG_CIFS_EXPERIMENTAL
- if (cinode->clientCanCacheAll == 0)
+ if (cinode->clientCanCacheRead)
break_lease(inode, O_RDONLY);
- else if (cinode->clientCanCacheRead == 0)
+ else
break_lease(inode, O_WRONLY);
-#endif
rc = filemap_fdatawrite(inode->i_mapping);
if (cinode->clientCanCacheRead == 0) {
waitrc = filemap_fdatawait(inode->i_mapping);
@@ -2326,7 +2318,7 @@ cifs_oplock_break(struct slow_work *work)
rc = waitrc;
if (rc)
cinode->write_behind_rc = rc;
- cFYI(1, ("Oplock flush inode %p rc %d", inode, rc));
+ cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
}
/*
@@ -2338,7 +2330,7 @@ cifs_oplock_break(struct slow_work *work)
if (!cfile->closePend && !cfile->oplock_break_cancelled) {
rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
LOCKING_ANDX_OPLOCK_RELEASE, false);
- cFYI(1, ("Oplock release rc = %d", rc));
+ cFYI(1, "Oplock release rc = %d", rc);
}
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 35ec11716213..b35cb031c20c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/inode.c
*
- * Copyright (C) International Business Machines Corp., 2002,2008
+ * Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -86,30 +86,30 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
- cFYI(1, ("%s: revalidating inode %llu", __func__, cifs_i->uniqueid));
+ cFYI(1, "%s: revalidating inode %llu", __func__, cifs_i->uniqueid);
if (inode->i_state & I_NEW) {
- cFYI(1, ("%s: inode %llu is new", __func__, cifs_i->uniqueid));
+ cFYI(1, "%s: inode %llu is new", __func__, cifs_i->uniqueid);
return;
}
/* don't bother with revalidation if we have an oplock */
if (cifs_i->clientCanCacheRead) {
- cFYI(1, ("%s: inode %llu is oplocked", __func__,
- cifs_i->uniqueid));
+ cFYI(1, "%s: inode %llu is oplocked", __func__,
+ cifs_i->uniqueid);
return;
}
/* revalidate if mtime or size have changed */
if (timespec_equal(&inode->i_mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
- cFYI(1, ("%s: inode %llu is unchanged", __func__,
- cifs_i->uniqueid));
+ cFYI(1, "%s: inode %llu is unchanged", __func__,
+ cifs_i->uniqueid);
return;
}
- cFYI(1, ("%s: invalidating inode %llu mapping", __func__,
- cifs_i->uniqueid));
+ cFYI(1, "%s: invalidating inode %llu mapping", __func__,
+ cifs_i->uniqueid);
cifs_i->invalid_mapping = true;
}
@@ -144,8 +144,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
else
cifs_i->time = jiffies;
- cFYI(1, ("inode 0x%p old_time=%ld new_time=%ld", inode,
- oldtime, cifs_i->time));
+ cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
+ oldtime, cifs_i->time);
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
@@ -227,7 +227,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
/* safest to call it a file if we do not know */
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
- cFYI(1, ("unknown type %d", le32_to_cpu(info->Type)));
+ cFYI(1, "unknown type %d", le32_to_cpu(info->Type));
break;
}
@@ -256,7 +256,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- cFYI(1, ("creating fake fattr for DFS referral"));
+ cFYI(1, "creating fake fattr for DFS referral");
memset(fattr, 0, sizeof(*fattr));
fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
@@ -305,7 +305,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
tcon = cifs_sb->tcon;
- cFYI(1, ("Getting info on %s", full_path));
+ cFYI(1, "Getting info on %s", full_path);
/* could have done a find first instead but this returns more info */
rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
@@ -373,7 +373,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
&bytes_read, &pbuf, &buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
if (memcmp("IntxBLK", pbuf, 8) == 0) {
- cFYI(1, ("Block device"));
+ cFYI(1, "Block device");
fattr->cf_mode |= S_IFBLK;
fattr->cf_dtype = DT_BLK;
if (bytes_read == 24) {
@@ -385,7 +385,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
- cFYI(1, ("Char device"));
+ cFYI(1, "Char device");
fattr->cf_mode |= S_IFCHR;
fattr->cf_dtype = DT_CHR;
if (bytes_read == 24) {
@@ -397,7 +397,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
- cFYI(1, ("Symlink"));
+ cFYI(1, "Symlink");
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
} else {
@@ -439,10 +439,10 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
else if (rc > 3) {
mode = le32_to_cpu(*((__le32 *)ea_value));
fattr->cf_mode &= ~SFBITS_MASK;
- cFYI(1, ("special bits 0%o org mode 0%o", mode,
- fattr->cf_mode));
+ cFYI(1, "special bits 0%o org mode 0%o", mode,
+ fattr->cf_mode);
fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode;
- cFYI(1, ("special mode bits 0%o", mode));
+ cFYI(1, "special mode bits 0%o", mode);
}
return 0;
@@ -548,11 +548,11 @@ int cifs_get_inode_info(struct inode **pinode,
struct cifs_fattr fattr;
pTcon = cifs_sb->tcon;
- cFYI(1, ("Getting info on %s", full_path));
+ cFYI(1, "Getting info on %s", full_path);
if ((pfindData == NULL) && (*pinode != NULL)) {
if (CIFS_I(*pinode)->clientCanCacheRead) {
- cFYI(1, ("No need to revalidate cached inode sizes"));
+ cFYI(1, "No need to revalidate cached inode sizes");
return rc;
}
}
@@ -618,7 +618,7 @@ int cifs_get_inode_info(struct inode **pinode,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc1 || !fattr.cf_uniqueid) {
- cFYI(1, ("GetSrvInodeNum rc %d", rc1));
+ cFYI(1, "GetSrvInodeNum rc %d", rc1);
fattr.cf_uniqueid = iunique(sb, ROOT_I);
cifs_autodisable_serverino(cifs_sb);
}
@@ -634,13 +634,13 @@ int cifs_get_inode_info(struct inode **pinode,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid);
if (tmprc)
- cFYI(1, ("cifs_sfu_type failed: %d", tmprc));
+ cFYI(1, "cifs_sfu_type failed: %d", tmprc);
}
#ifdef CONFIG_CIFS_EXPERIMENTAL
/* fill in 0777 bits from ACL */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- cFYI(1, ("Getting mode bits from ACL"));
+ cFYI(1, "Getting mode bits from ACL");
cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid);
}
#endif
@@ -734,7 +734,7 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
unsigned long hash;
struct inode *inode;
- cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
+ cFYI(1, "looking for uniqueid=%llu", fattr->cf_uniqueid);
/* hash down to 32-bits on 32-bit arch */
hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
@@ -780,7 +780,7 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
return ERR_PTR(-ENOMEM);
if (rc && cifs_sb->tcon->ipc) {
- cFYI(1, ("ipc connection - fake read inode"));
+ cFYI(1, "ipc connection - fake read inode");
inode->i_mode |= S_IFDIR;
inode->i_nlink = 2;
inode->i_op = &cifs_ipc_inode_ops;
@@ -842,7 +842,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
* server times.
*/
if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
- cFYI(1, ("CIFS - CTIME changed"));
+ cFYI(1, "CIFS - CTIME changed");
info_buf.ChangeTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
} else
@@ -877,8 +877,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
goto out;
}
- cFYI(1, ("calling SetFileInfo since SetPathInfo for "
- "times not supported by this server"));
+ cFYI(1, "calling SetFileInfo since SetPathInfo for "
+ "times not supported by this server");
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
CREATE_NOT_DIR, &netfid, &oplock,
@@ -1036,7 +1036,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
struct iattr *attrs = NULL;
__u32 dosattr = 0, origattr = 0;
- cFYI(1, ("cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry));
+ cFYI(1, "cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry);
xid = GetXid();
@@ -1055,7 +1055,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("posix del rc %d", rc));
+ cFYI(1, "posix del rc %d", rc);
if ((rc == 0) || (rc == -ENOENT))
goto psx_del_no_retry;
}
@@ -1129,7 +1129,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
struct inode *newinode = NULL;
struct cifs_fattr fattr;
- cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode));
+ cFYI(1, "In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode);
xid = GetXid();
@@ -1164,7 +1164,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
kfree(pInfo);
goto mkdir_retry_old;
} else if (rc) {
- cFYI(1, ("posix mkdir returned 0x%x", rc));
+ cFYI(1, "posix mkdir returned 0x%x", rc);
d_drop(direntry);
} else {
if (pInfo->Type == cpu_to_le32(-1)) {
@@ -1190,12 +1190,12 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
d_instantiate(direntry, newinode);
#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("instantiated dentry %p %s to inode %p",
- direntry, direntry->d_name.name, newinode));
+ cFYI(1, "instantiated dentry %p %s to inode %p",
+ direntry, direntry->d_name.name, newinode);
if (newinode->i_nlink != 2)
- cFYI(1, ("unexpected number of links %d",
- newinode->i_nlink));
+ cFYI(1, "unexpected number of links %d",
+ newinode->i_nlink);
#endif
}
kfree(pInfo);
@@ -1206,7 +1206,7 @@ mkdir_retry_old:
rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- cFYI(1, ("cifs_mkdir returned 0x%x", rc));
+ cFYI(1, "cifs_mkdir returned 0x%x", rc);
d_drop(direntry);
} else {
mkdir_get_info:
@@ -1309,7 +1309,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
- cFYI(1, ("cifs_rmdir, inode = 0x%p", inode));
+ cFYI(1, "cifs_rmdir, inode = 0x%p", inode);
xid = GetXid();
@@ -1577,9 +1577,9 @@ int cifs_revalidate_dentry(struct dentry *dentry)
goto check_inval;
}
- cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
+ cFYI(1, "Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
"jiffies %ld", full_path, inode, inode->i_count.counter,
- dentry, dentry->d_time, jiffies));
+ dentry, dentry->d_time, jiffies);
if (CIFS_SB(sb)->tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
@@ -1673,12 +1673,12 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid,
npid, false);
cifsFileInfo_put(open_file);
- cFYI(1, ("SetFSize for attrs rc = %d", rc));
+ cFYI(1, "SetFSize for attrs rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
unsigned int bytes_written;
rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size,
&bytes_written, NULL, NULL, 1);
- cFYI(1, ("Wrt seteof rc %d", rc));
+ cFYI(1, "Wrt seteof rc %d", rc);
}
} else
rc = -EINVAL;
@@ -1692,7 +1692,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
false, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc));
+ cFYI(1, "SetEOF by path (setattrs) rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
__u16 netfid;
int oplock = 0;
@@ -1709,7 +1709,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
attrs->ia_size,
&bytes_written, NULL,
NULL, 1);
- cFYI(1, ("wrt seteof rc %d", rc));
+ cFYI(1, "wrt seteof rc %d", rc);
CIFSSMBClose(xid, pTcon, netfid);
}
}
@@ -1737,8 +1737,8 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
struct cifs_unix_set_info_args *args = NULL;
struct cifsFileInfo *open_file;
- cFYI(1, ("setattr_unix on file %s attrs->ia_valid=0x%x",
- direntry->d_name.name, attrs->ia_valid));
+ cFYI(1, "setattr_unix on file %s attrs->ia_valid=0x%x",
+ direntry->d_name.name, attrs->ia_valid);
xid = GetXid();
@@ -1868,8 +1868,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
xid = GetXid();
- cFYI(1, ("setattr on file %s attrs->iavalid 0x%x",
- direntry->d_name.name, attrs->ia_valid));
+ cFYI(1, "setattr on file %s attrs->iavalid 0x%x",
+ direntry->d_name.name, attrs->ia_valid);
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) == 0) {
/* check if we have permission to change attrs */
@@ -1926,7 +1926,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
attrs->ia_valid &= ~ATTR_MODE;
if (attrs->ia_valid & ATTR_MODE) {
- cFYI(1, ("Mode changed to 0%o", attrs->ia_mode));
+ cFYI(1, "Mode changed to 0%o", attrs->ia_mode);
mode = attrs->ia_mode;
}
@@ -2012,7 +2012,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
#if 0
void cifs_delete_inode(struct inode *inode)
{
- cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode));
+ cFYI(1, "In cifs_delete_inode, inode = 0x%p", inode);
/* may have to add back in if and when safe distributed caching of
directories added e.g. via FindNotify */
}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index f94650683a00..505926f1ee6b 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -47,7 +47,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
xid = GetXid();
- cFYI(1, ("ioctl file %p cmd %u arg %lu", filep, command, arg));
+ cFYI(1, "ioctl file %p cmd %u arg %lu", filep, command, arg);
cifs_sb = CIFS_SB(inode->i_sb);
@@ -64,12 +64,12 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
switch (command) {
case CIFS_IOC_CHECKUMOUNT:
- cFYI(1, ("User unmount attempted"));
+ cFYI(1, "User unmount attempted");
if (cifs_sb->mnt_uid == current_uid())
rc = 0;
else {
rc = -EACCES;
- cFYI(1, ("uids do not match"));
+ cFYI(1, "uids do not match");
}
break;
#ifdef CONFIG_CIFS_POSIX
@@ -97,11 +97,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
/* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid,
extAttrBits, &ExtAttrMask);*/
}
- cFYI(1, ("set flags not implemented yet"));
+ cFYI(1, "set flags not implemented yet");
break;
#endif /* CONFIG_CIFS_POSIX */
default:
- cFYI(1, ("unsupported ioctl"));
+ cFYI(1, "unsupported ioctl");
break;
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index c1a9d4236a8c..473ca8033656 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -139,7 +139,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
if (!full_path)
goto out;
- cFYI(1, ("Full path: %s inode = 0x%p", full_path, inode));
+ cFYI(1, "Full path: %s inode = 0x%p", full_path, inode);
rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
cifs_sb->local_nls);
@@ -178,8 +178,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
return rc;
}
- cFYI(1, ("Full path: %s", full_path));
- cFYI(1, ("symname is %s", symname));
+ cFYI(1, "Full path: %s", full_path);
+ cFYI(1, "symname is %s", symname);
/* BB what if DFS and this volume is on different share? BB */
if (pTcon->unix_ext)
@@ -198,8 +198,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
inode->i_sb, xid, NULL);
if (rc != 0) {
- cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d",
- rc));
+ cFYI(1, "Create symlink ok, getinodeinfo fail rc = %d",
+ rc);
} else {
if (pTcon->nocase)
direntry->d_op = &cifs_ci_dentry_ops;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index d1474996a812..1394aa37f26c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -51,7 +51,7 @@ _GetXid(void)
if (GlobalTotalActiveXid > GlobalMaxActiveXid)
GlobalMaxActiveXid = GlobalTotalActiveXid;
if (GlobalTotalActiveXid > 65000)
- cFYI(1, ("warning: more than 65000 requests active"));
+ cFYI(1, "warning: more than 65000 requests active");
xid = GlobalCurrentXid++;
spin_unlock(&GlobalMid_Lock);
return xid;
@@ -88,7 +88,7 @@ void
sesInfoFree(struct cifsSesInfo *buf_to_free)
{
if (buf_to_free == NULL) {
- cFYI(1, ("Null buffer passed to sesInfoFree"));
+ cFYI(1, "Null buffer passed to sesInfoFree");
return;
}
@@ -126,7 +126,7 @@ void
tconInfoFree(struct cifsTconInfo *buf_to_free)
{
if (buf_to_free == NULL) {
- cFYI(1, ("Null buffer passed to tconInfoFree"));
+ cFYI(1, "Null buffer passed to tconInfoFree");
return;
}
atomic_dec(&tconInfoAllocCount);
@@ -166,7 +166,7 @@ void
cifs_buf_release(void *buf_to_free)
{
if (buf_to_free == NULL) {
- /* cFYI(1, ("Null buffer passed to cifs_buf_release"));*/
+ /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
return;
}
mempool_free(buf_to_free, cifs_req_poolp);
@@ -202,7 +202,7 @@ cifs_small_buf_release(void *buf_to_free)
{
if (buf_to_free == NULL) {
- cFYI(1, ("Null buffer passed to cifs_small_buf_release"));
+ cFYI(1, "Null buffer passed to cifs_small_buf_release");
return;
}
mempool_free(buf_to_free, cifs_sm_req_poolp);
@@ -345,19 +345,19 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
/* with userid/password pairs found on the smb session */
/* for other target tcp/ip addresses BB */
if (current_fsuid() != treeCon->ses->linux_uid) {
- cFYI(1, ("Multiuser mode and UID "
- "did not match tcon uid"));
+ cFYI(1, "Multiuser mode and UID "
+ "did not match tcon uid");
read_lock(&cifs_tcp_ses_lock);
list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
if (ses->linux_uid == current_fsuid()) {
if (ses->server == treeCon->ses->server) {
- cFYI(1, ("found matching uid substitute right smb_uid"));
+ cFYI(1, "found matching uid substitute right smb_uid");
buffer->Uid = ses->Suid;
break;
} else {
/* BB eventually call cifs_setup_session here */
- cFYI(1, ("local UID found but no smb sess with this server exists"));
+ cFYI(1, "local UID found but no smb sess with this server exists");
}
}
}
@@ -394,17 +394,16 @@ checkSMBhdr(struct smb_hdr *smb, __u16 mid)
if (smb->Command == SMB_COM_LOCKING_ANDX)
return 0;
else
- cERROR(1, ("Received Request not response"));
+ cERROR(1, "Received Request not response");
}
} else { /* bad signature or mid */
if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
- cERROR(1,
- ("Bad protocol string signature header %x",
- *(unsigned int *) smb->Protocol));
+ cERROR(1, "Bad protocol string signature header %x",
+ *(unsigned int *) smb->Protocol);
if (mid != smb->Mid)
- cERROR(1, ("Mids do not match"));
+ cERROR(1, "Mids do not match");
}
- cERROR(1, ("bad smb detected. The Mid=%d", smb->Mid));
+ cERROR(1, "bad smb detected. The Mid=%d", smb->Mid);
return 1;
}
@@ -413,7 +412,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
{
__u32 len = smb->smb_buf_length;
__u32 clc_len; /* calculated length */
- cFYI(0, ("checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len));
+ cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
if (length < 2 + sizeof(struct smb_hdr)) {
if ((length >= sizeof(struct smb_hdr) - 1)
@@ -437,15 +436,15 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
tmp[sizeof(struct smb_hdr)+1] = 0;
return 0;
}
- cERROR(1, ("rcvd invalid byte count (bcc)"));
+ cERROR(1, "rcvd invalid byte count (bcc)");
} else {
- cERROR(1, ("Length less than smb header size"));
+ cERROR(1, "Length less than smb header size");
}
return 1;
}
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cERROR(1, ("smb length greater than MaxBufSize, mid=%d",
- smb->Mid));
+ cERROR(1, "smb length greater than MaxBufSize, mid=%d",
+ smb->Mid);
return 1;
}
@@ -454,8 +453,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
clc_len = smbCalcSize_LE(smb);
if (4 + len != length) {
- cERROR(1, ("Length read does not match RFC1001 length %d",
- len));
+ cERROR(1, "Length read does not match RFC1001 length %d",
+ len);
return 1;
}
@@ -466,8 +465,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
return 0; /* bcc wrapped */
}
- cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d",
- clc_len, 4 + len, smb->Mid));
+ cFYI(1, "Calculated size %d vs length %d mismatch for mid %d",
+ clc_len, 4 + len, smb->Mid);
/* Windows XP can return a few bytes too much, presumably
an illegal pad, at the end of byte range lock responses
so we allow for that three byte pad, as long as actual
@@ -482,8 +481,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
if ((4+len > clc_len) && (len <= clc_len + 512))
return 0;
else {
- cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d",
- len, smb->Mid));
+ cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d",
+ len, smb->Mid);
return 1;
}
}
@@ -501,7 +500,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
struct cifsFileInfo *netfile;
int rc;
- cFYI(1, ("Checking for oplock break or dnotify response"));
+ cFYI(1, "Checking for oplock break or dnotify response");
if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
(pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
struct smb_com_transaction_change_notify_rsp *pSMBr =
@@ -513,15 +512,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
- cFYI(1, ("dnotify on %s Action: 0x%x",
- pnotify->FileName, pnotify->Action));
+ cFYI(1, "dnotify on %s Action: 0x%x",
+ pnotify->FileName, pnotify->Action);
/* cifs_dump_mem("Rcvd notify Data: ",buf,
sizeof(struct smb_hdr)+60); */
return true;
}
if (pSMBr->hdr.Status.CifsError) {
- cFYI(1, ("notify err 0x%d",
- pSMBr->hdr.Status.CifsError));
+ cFYI(1, "notify err 0x%d",
+ pSMBr->hdr.Status.CifsError);
return true;
}
return false;
@@ -535,7 +534,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
large dirty files cached on the client */
if ((NT_STATUS_INVALID_HANDLE) ==
le32_to_cpu(pSMB->hdr.Status.CifsError)) {
- cFYI(1, ("invalid handle on oplock break"));
+ cFYI(1, "invalid handle on oplock break");
return true;
} else if (ERRbadfid ==
le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
@@ -547,8 +546,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
if (pSMB->hdr.WordCount != 8)
return false;
- cFYI(1, ("oplock type 0x%d level 0x%d",
- pSMB->LockType, pSMB->OplockLevel));
+ cFYI(1, "oplock type 0x%d level 0x%d",
+ pSMB->LockType, pSMB->OplockLevel);
if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
return false;
@@ -579,15 +578,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
return true;
}
- cFYI(1, ("file id match, oplock break"));
+ cFYI(1, "file id match, oplock break");
pCifsInode = CIFS_I(netfile->pInode);
pCifsInode->clientCanCacheAll = false;
if (pSMB->OplockLevel == 0)
pCifsInode->clientCanCacheRead = false;
rc = slow_work_enqueue(&netfile->oplock_break);
if (rc) {
- cERROR(1, ("failed to enqueue oplock "
- "break: %d\n", rc));
+ cERROR(1, "failed to enqueue oplock "
+ "break: %d\n", rc);
} else {
netfile->oplock_break_cancelled = false;
}
@@ -597,12 +596,12 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
}
read_unlock(&GlobalSMBSeslock);
read_unlock(&cifs_tcp_ses_lock);
- cFYI(1, ("No matching file for oplock break"));
+ cFYI(1, "No matching file for oplock break");
return true;
}
}
read_unlock(&cifs_tcp_ses_lock);
- cFYI(1, ("Can not process oplock break for non-existent connection"));
+ cFYI(1, "Can not process oplock break for non-existent connection");
return true;
}
@@ -721,11 +720,11 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
{
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
- cERROR(1, ("Autodisabling the use of server inode numbers on "
+ cERROR(1, "Autodisabling the use of server inode numbers on "
"%s. This server doesn't seem to support them "
"properly. Hardlinks will not be recognized on this "
"mount. Consider mounting with the \"noserverino\" "
"option to silence this message.",
- cifs_sb->tcon->treeName));
+ cifs_sb->tcon->treeName);
}
}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index bd6d6895730d..d35d52889cb5 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -149,7 +149,7 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst)
else if (address_family == AF_INET6)
ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL);
- cFYI(DBG2, ("address conversion returned %d for %s", ret, cp));
+ cFYI(DBG2, "address conversion returned %d for %s", ret, cp);
if (ret > 0)
ret = 1;
return ret;
@@ -870,8 +870,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
}
/* else ERRHRD class errors or junk - return EIO */
- cFYI(1, ("Mapping smb error code %d to POSIX err %d",
- smberrcode, rc));
+ cFYI(1, "Mapping smb error code %d to POSIX err %d",
+ smberrcode, rc);
/* generic corrective action e.g. reconnect SMB session on
* ERRbaduid could be added */
@@ -940,20 +940,20 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
SMB_TIME *st = (SMB_TIME *)&time;
SMB_DATE *sd = (SMB_DATE *)&date;
- cFYI(1, ("date %d time %d", date, time));
+ cFYI(1, "date %d time %d", date, time);
sec = 2 * st->TwoSeconds;
min = st->Minutes;
if ((sec > 59) || (min > 59))
- cERROR(1, ("illegal time min %d sec %d", min, sec));
+ cERROR(1, "illegal time min %d sec %d", min, sec);
sec += (min * 60);
sec += 60 * 60 * st->Hours;
if (st->Hours > 24)
- cERROR(1, ("illegal hours %d", st->Hours));
+ cERROR(1, "illegal hours %d", st->Hours);
days = sd->Day;
month = sd->Month;
if ((days > 31) || (month > 12)) {
- cERROR(1, ("illegal date, month %d day: %d", month, days));
+ cERROR(1, "illegal date, month %d day: %d", month, days);
if (month > 12)
month = 12;
}
@@ -979,7 +979,7 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
ts.tv_sec = sec + offset;
- /* cFYI(1,("sec after cnvrt dos to unix time %d",sec)); */
+ /* cFYI(1, "sec after cnvrt dos to unix time %d",sec); */
ts.tv_nsec = 0;
return ts;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 18e0bc1fb593..daf1753af674 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -47,15 +47,15 @@ static void dump_cifs_file_struct(struct file *file, char *label)
if (file) {
cf = file->private_data;
if (cf == NULL) {
- cFYI(1, ("empty cifs private file data"));
+ cFYI(1, "empty cifs private file data");
return;
}
if (cf->invalidHandle)
- cFYI(1, ("invalid handle"));
+ cFYI(1, "invalid handle");
if (cf->srch_inf.endOfSearch)
- cFYI(1, ("end of search"));
+ cFYI(1, "end of search");
if (cf->srch_inf.emptyDir)
- cFYI(1, ("empty dir"));
+ cFYI(1, "empty dir");
}
}
#else
@@ -76,7 +76,7 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
struct inode *inode;
struct super_block *sb = parent->d_inode->i_sb;
- cFYI(1, ("For %s", name->name));
+ cFYI(1, "For %s", name->name);
if (parent->d_op && parent->d_op->d_hash)
parent->d_op->d_hash(parent, name);
@@ -214,7 +214,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
fid,
cifs_sb->local_nls);
if (CIFSSMBClose(xid, ptcon, fid)) {
- cFYI(1, ("Error closing temporary reparsepoint open)"));
+ cFYI(1, "Error closing temporary reparsepoint open");
}
}
}
@@ -252,7 +252,7 @@ static int initiate_cifs_search(const int xid, struct file *file)
if (full_path == NULL)
return -ENOMEM;
- cFYI(1, ("Full path: %s start at: %lld", full_path, file->f_pos));
+ cFYI(1, "Full path: %s start at: %lld", full_path, file->f_pos);
ffirst_retry:
/* test for Unix extensions */
@@ -297,7 +297,7 @@ static int cifs_unicode_bytelen(char *str)
if (ustr[len] == 0)
return len << 1;
}
- cFYI(1, ("Unicode string longer than PATH_MAX found"));
+ cFYI(1, "Unicode string longer than PATH_MAX found");
return len << 1;
}
@@ -314,19 +314,18 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
pfData->FileNameLength;
} else
new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
- cFYI(1, ("new entry %p old entry %p", new_entry, old_entry));
+ cFYI(1, "new entry %p old entry %p", new_entry, old_entry);
/* validate that new_entry is not past end of SMB */
if (new_entry >= end_of_smb) {
- cERROR(1,
- ("search entry %p began after end of SMB %p old entry %p",
- new_entry, end_of_smb, old_entry));
+ cERROR(1, "search entry %p began after end of SMB %p old entry %p",
+ new_entry, end_of_smb, old_entry);
return NULL;
} else if (((level == SMB_FIND_FILE_INFO_STANDARD) &&
(new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb))
|| ((level != SMB_FIND_FILE_INFO_STANDARD) &&
(new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) {
- cERROR(1, ("search entry %p extends after end of SMB %p",
- new_entry, end_of_smb));
+ cERROR(1, "search entry %p extends after end of SMB %p",
+ new_entry, end_of_smb);
return NULL;
} else
return new_entry;
@@ -380,8 +379,8 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
filename = &pFindData->FileName[0];
len = pFindData->FileNameLength;
} else {
- cFYI(1, ("Unknown findfirst level %d",
- cfile->srch_inf.info_level));
+ cFYI(1, "Unknown findfirst level %d",
+ cfile->srch_inf.info_level);
}
if (filename) {
@@ -481,7 +480,7 @@ static int cifs_save_resume_key(const char *current_entry,
len = (unsigned int)pFindData->FileNameLength;
cifsFile->srch_inf.resume_key = pFindData->ResumeKey;
} else {
- cFYI(1, ("Unknown findfirst level %d", level));
+ cFYI(1, "Unknown findfirst level %d", level);
return -EINVAL;
}
cifsFile->srch_inf.resume_name_len = len;
@@ -525,7 +524,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
is_dir_changed(file)) ||
(index_to_find < first_entry_in_buffer)) {
/* close and restart search */
- cFYI(1, ("search backing up - close and restart search"));
+ cFYI(1, "search backing up - close and restart search");
write_lock(&GlobalSMBSeslock);
if (!cifsFile->srch_inf.endOfSearch &&
!cifsFile->invalidHandle) {
@@ -535,7 +534,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
} else
write_unlock(&GlobalSMBSeslock);
if (cifsFile->srch_inf.ntwrk_buf_start) {
- cFYI(1, ("freeing SMB ff cache buf on search rewind"));
+ cFYI(1, "freeing SMB ff cache buf on search rewind");
if (cifsFile->srch_inf.smallBuf)
cifs_small_buf_release(cifsFile->srch_inf.
ntwrk_buf_start);
@@ -546,8 +545,8 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
}
rc = initiate_cifs_search(xid, file);
if (rc) {
- cFYI(1, ("error %d reinitiating a search on rewind",
- rc));
+ cFYI(1, "error %d reinitiating a search on rewind",
+ rc);
return rc;
}
cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
@@ -555,7 +554,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
(rc == 0) && !cifsFile->srch_inf.endOfSearch) {
- cFYI(1, ("calling findnext2"));
+ cFYI(1, "calling findnext2");
rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
&cifsFile->srch_inf);
cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
@@ -575,7 +574,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
- cifsFile->srch_inf.entries_in_buffer;
pos_in_buf = index_to_find - first_entry_in_buffer;
- cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf));
+ cFYI(1, "found entry - pos_in_buf %d", pos_in_buf);
for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
/* go entry by entry figuring out which is first */
@@ -584,19 +583,19 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
}
if ((current_entry == NULL) && (i < pos_in_buf)) {
/* BB fixme - check if we should flag this error */
- cERROR(1, ("reached end of buf searching for pos in buf"
+ cERROR(1, "reached end of buf searching for pos in buf"
" %d index to find %lld rc %d",
- pos_in_buf, index_to_find, rc));
+ pos_in_buf, index_to_find, rc);
}
rc = 0;
*ppCurrentEntry = current_entry;
} else {
- cFYI(1, ("index not in buffer - could not findnext into it"));
+ cFYI(1, "index not in buffer - could not findnext into it");
return 0;
}
if (pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) {
- cFYI(1, ("can not return entries pos_in_buf beyond last"));
+ cFYI(1, "can not return entries pos_in_buf beyond last");
*num_to_ret = 0;
} else
*num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf;
@@ -656,12 +655,12 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
/* one byte length, no name conversion */
len = (unsigned int)pFindData->FileNameLength;
} else {
- cFYI(1, ("Unknown findfirst level %d", level));
+ cFYI(1, "Unknown findfirst level %d", level);
return -EINVAL;
}
if (len > max_len) {
- cERROR(1, ("bad search response length %d past smb end", len));
+ cERROR(1, "bad search response length %d past smb end", len);
return -EINVAL;
}
@@ -754,7 +753,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
* case already. Why should we be clobbering other errors from it?
*/
if (rc) {
- cFYI(1, ("filldir rc = %d", rc));
+ cFYI(1, "filldir rc = %d", rc);
rc = -EOVERFLOW;
}
dput(tmp_dentry);
@@ -786,7 +785,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
case 0:
if (filldir(direntry, ".", 1, file->f_pos,
file->f_path.dentry->d_inode->i_ino, DT_DIR) < 0) {
- cERROR(1, ("Filldir for current dir failed"));
+ cERROR(1, "Filldir for current dir failed");
rc = -ENOMEM;
break;
}
@@ -794,7 +793,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
case 1:
if (filldir(direntry, "..", 2, file->f_pos,
file->f_path.dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
- cERROR(1, ("Filldir for parent dir failed"));
+ cERROR(1, "Filldir for parent dir failed");
rc = -ENOMEM;
break;
}
@@ -807,7 +806,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
if (file->private_data == NULL) {
rc = initiate_cifs_search(xid, file);
- cFYI(1, ("initiate cifs search rc %d", rc));
+ cFYI(1, "initiate cifs search rc %d", rc);
if (rc) {
FreeXid(xid);
return rc;
@@ -821,7 +820,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
cifsFile = file->private_data;
if (cifsFile->srch_inf.endOfSearch) {
if (cifsFile->srch_inf.emptyDir) {
- cFYI(1, ("End of search, empty dir"));
+ cFYI(1, "End of search, empty dir");
rc = 0;
break;
}
@@ -833,16 +832,16 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
rc = find_cifs_entry(xid, pTcon, file,
&current_entry, &num_to_fill);
if (rc) {
- cFYI(1, ("fce error %d", rc));
+ cFYI(1, "fce error %d", rc);
goto rddir2_exit;
} else if (current_entry != NULL) {
- cFYI(1, ("entry %lld found", file->f_pos));
+ cFYI(1, "entry %lld found", file->f_pos);
} else {
- cFYI(1, ("could not find entry"));
+ cFYI(1, "could not find entry");
goto rddir2_exit;
}
- cFYI(1, ("loop through %d times filling dir for net buf %p",
- num_to_fill, cifsFile->srch_inf.ntwrk_buf_start));
+ cFYI(1, "loop through %d times filling dir for net buf %p",
+ num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
max_len = smbCalcSize((struct smb_hdr *)
cifsFile->srch_inf.ntwrk_buf_start);
end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
@@ -851,8 +850,8 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
for (i = 0; (i < num_to_fill) && (rc == 0); i++) {
if (current_entry == NULL) {
/* evaluate whether this case is an error */
- cERROR(1, ("past SMB end, num to fill %d i %d",
- num_to_fill, i));
+ cERROR(1, "past SMB end, num to fill %d i %d",
+ num_to_fill, i);
break;
}
/* if buggy server returns . and .. late do
@@ -867,8 +866,8 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
file->f_pos++;
if (file->f_pos ==
cifsFile->srch_inf.index_of_last_entry) {
- cFYI(1, ("last entry in buf at pos %lld %s",
- file->f_pos, tmp_buf));
+ cFYI(1, "last entry in buf at pos %lld %s",
+ file->f_pos, tmp_buf);
cifs_save_resume_key(current_entry, cifsFile);
break;
} else
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7c3fd7463f44..7707389bdf2c 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -35,9 +35,11 @@
extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
unsigned char *p24);
-/* Checks if this is the first smb session to be reconnected after
- the socket has been reestablished (so we know whether to use vc 0).
- Called while holding the cifs_tcp_ses_lock, so do not block */
+/*
+ * Checks if this is the first smb session to be reconnected after
+ * the socket has been reestablished (so we know whether to use vc 0).
+ * Called while holding the cifs_tcp_ses_lock, so do not block
+ */
static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
{
struct list_head *tmp;
@@ -284,7 +286,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
int len;
char *data = *pbcc_area;
- cFYI(1, ("bleft %d", bleft));
+ cFYI(1, "bleft %d", bleft);
/*
* Windows servers do not always double null terminate their final
@@ -301,7 +303,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
kfree(ses->serverOS);
ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
- cFYI(1, ("serverOS=%s", ses->serverOS));
+ cFYI(1, "serverOS=%s", ses->serverOS);
len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
data += len;
bleft -= len;
@@ -310,7 +312,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
kfree(ses->serverNOS);
ses->serverNOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
- cFYI(1, ("serverNOS=%s", ses->serverNOS));
+ cFYI(1, "serverNOS=%s", ses->serverNOS);
len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
data += len;
bleft -= len;
@@ -319,7 +321,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
kfree(ses->serverDomain);
ses->serverDomain = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
- cFYI(1, ("serverDomain=%s", ses->serverDomain));
+ cFYI(1, "serverDomain=%s", ses->serverDomain);
return;
}
@@ -332,7 +334,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
int len;
char *bcc_ptr = *pbcc_area;
- cFYI(1, ("decode sessetup ascii. bleft %d", bleft));
+ cFYI(1, "decode sessetup ascii. bleft %d", bleft);
len = strnlen(bcc_ptr, bleft);
if (len >= bleft)
@@ -344,7 +346,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
if (ses->serverOS)
strncpy(ses->serverOS, bcc_ptr, len);
if (strncmp(ses->serverOS, "OS/2", 4) == 0) {
- cFYI(1, ("OS/2 server"));
+ cFYI(1, "OS/2 server");
ses->flags |= CIFS_SES_OS2;
}
@@ -373,7 +375,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
/* BB For newer servers which do not support Unicode,
but thus do return domain here we could add parsing
for it later, but it is not very important */
- cFYI(1, ("ascii: bytes left %d", bleft));
+ cFYI(1, "ascii: bytes left %d", bleft);
return rc;
}
@@ -384,16 +386,16 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
- cERROR(1, ("challenge blob len %d too small", blob_len));
+ cERROR(1, "challenge blob len %d too small", blob_len);
return -EINVAL;
}
if (memcmp(pblob->Signature, "NTLMSSP", 8)) {
- cERROR(1, ("blob signature incorrect %s", pblob->Signature));
+ cERROR(1, "blob signature incorrect %s", pblob->Signature);
return -EINVAL;
}
if (pblob->MessageType != NtLmChallenge) {
- cERROR(1, ("Incorrect message type %d", pblob->MessageType));
+ cERROR(1, "Incorrect message type %d", pblob->MessageType);
return -EINVAL;
}
@@ -447,7 +449,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
This function returns the length of the data in the blob */
static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
struct cifsSesInfo *ses,
- const struct nls_table *nls_cp, int first)
+ const struct nls_table *nls_cp, bool first)
{
AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
__u32 flags;
@@ -546,7 +548,7 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
struct cifsSesInfo *ses,
- const struct nls_table *nls, int first_time)
+ const struct nls_table *nls, bool first_time)
{
int bloblen;
@@ -559,8 +561,8 @@ static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
#endif
int
-CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
- const struct nls_table *nls_cp)
+CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+ const struct nls_table *nls_cp)
{
int rc = 0;
int wct;
@@ -577,13 +579,18 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
int bytes_remaining;
struct key *spnego_key = NULL;
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
+ bool first_time;
if (ses == NULL)
return -EINVAL;
+ read_lock(&cifs_tcp_ses_lock);
+ first_time = is_first_ses_reconnect(ses);
+ read_unlock(&cifs_tcp_ses_lock);
+
type = ses->server->secType;
- cFYI(1, ("sess setup type %d", type));
+ cFYI(1, "sess setup type %d", type);
ssetup_ntlmssp_authenticate:
if (phase == NtLmChallenge)
phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
@@ -664,7 +671,7 @@ ssetup_ntlmssp_authenticate:
changed to do higher than lanman dialect and
we reconnected would we ever calc signing_key? */
- cFYI(1, ("Negotiating LANMAN setting up strings"));
+ cFYI(1, "Negotiating LANMAN setting up strings");
/* Unicode not allowed for LANMAN dialects */
ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
#endif
@@ -744,7 +751,7 @@ ssetup_ntlmssp_authenticate:
unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
} else
ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
- } else if (type == Kerberos || type == MSKerberos) {
+ } else if (type == Kerberos) {
#ifdef CONFIG_CIFS_UPCALL
struct cifs_spnego_msg *msg;
spnego_key = cifs_get_spnego_key(ses);
@@ -758,17 +765,17 @@ ssetup_ntlmssp_authenticate:
/* check version field to make sure that cifs.upcall is
sending us a response in an expected form */
if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
- cERROR(1, ("incorrect version of cifs.upcall (expected"
+ cERROR(1, "incorrect version of cifs.upcall (expected"
" %d but got %d)",
- CIFS_SPNEGO_UPCALL_VERSION, msg->version));
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
rc = -EKEYREJECTED;
goto ssetup_exit;
}
/* bail out if key is too long */
if (msg->sesskey_len >
sizeof(ses->server->mac_signing_key.data.krb5)) {
- cERROR(1, ("Kerberos signing key too long (%u bytes)",
- msg->sesskey_len));
+ cERROR(1, "Kerberos signing key too long (%u bytes)",
+ msg->sesskey_len);
rc = -EOVERFLOW;
goto ssetup_exit;
}
@@ -796,7 +803,7 @@ ssetup_ntlmssp_authenticate:
/* BB: is this right? */
ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
#else /* ! CONFIG_CIFS_UPCALL */
- cERROR(1, ("Kerberos negotiated but upcall support disabled!"));
+ cERROR(1, "Kerberos negotiated but upcall support disabled!");
rc = -ENOSYS;
goto ssetup_exit;
#endif /* CONFIG_CIFS_UPCALL */
@@ -804,12 +811,12 @@ ssetup_ntlmssp_authenticate:
#ifdef CONFIG_CIFS_EXPERIMENTAL
if (type == RawNTLMSSP) {
if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
- cERROR(1, ("NTLMSSP requires Unicode support"));
+ cERROR(1, "NTLMSSP requires Unicode support");
rc = -ENOSYS;
goto ssetup_exit;
}
- cFYI(1, ("ntlmssp session setup phase %d", phase));
+ cFYI(1, "ntlmssp session setup phase %d", phase);
pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
capabilities |= CAP_EXTENDED_SECURITY;
pSMB->req.Capabilities |= cpu_to_le32(capabilities);
@@ -827,7 +834,7 @@ ssetup_ntlmssp_authenticate:
on the response (challenge) */
smb_buf->Uid = ses->Suid;
} else {
- cERROR(1, ("invalid phase %d", phase));
+ cERROR(1, "invalid phase %d", phase);
rc = -ENOSYS;
goto ssetup_exit;
}
@@ -839,12 +846,12 @@ ssetup_ntlmssp_authenticate:
}
unicode_oslm_strings(&bcc_ptr, nls_cp);
} else {
- cERROR(1, ("secType %d not supported!", type));
+ cERROR(1, "secType %d not supported!", type);
rc = -ENOSYS;
goto ssetup_exit;
}
#else
- cERROR(1, ("secType %d not supported!", type));
+ cERROR(1, "secType %d not supported!", type);
rc = -ENOSYS;
goto ssetup_exit;
#endif
@@ -862,7 +869,7 @@ ssetup_ntlmssp_authenticate:
CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
/* SMB request buf freed in SendReceive2 */
- cFYI(1, ("ssetup rc from sendrecv2 is %d", rc));
+ cFYI(1, "ssetup rc from sendrecv2 is %d", rc);
pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
smb_buf = (struct smb_hdr *)iov[0].iov_base;
@@ -870,7 +877,7 @@ ssetup_ntlmssp_authenticate:
if ((type == RawNTLMSSP) && (smb_buf->Status.CifsError ==
cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))) {
if (phase != NtLmNegotiate) {
- cERROR(1, ("Unexpected more processing error"));
+ cERROR(1, "Unexpected more processing error");
goto ssetup_exit;
}
/* NTLMSSP Negotiate sent now processing challenge (response) */
@@ -882,14 +889,14 @@ ssetup_ntlmssp_authenticate:
if ((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
rc = -EIO;
- cERROR(1, ("bad word count %d", smb_buf->WordCount));
+ cERROR(1, "bad word count %d", smb_buf->WordCount);
goto ssetup_exit;
}
action = le16_to_cpu(pSMB->resp.Action);
if (action & GUEST_LOGIN)
- cFYI(1, ("Guest login")); /* BB mark SesInfo struct? */
+ cFYI(1, "Guest login"); /* BB mark SesInfo struct? */
ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
- cFYI(1, ("UID = %d ", ses->Suid));
+ cFYI(1, "UID = %d ", ses->Suid);
/* response can have either 3 or 4 word count - Samba sends 3 */
/* and lanman response is 3 */
bytes_remaining = BCC(smb_buf);
@@ -899,7 +906,7 @@ ssetup_ntlmssp_authenticate:
__u16 blob_len;
blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
if (blob_len > bytes_remaining) {
- cERROR(1, ("bad security blob length %d", blob_len));
+ cERROR(1, "bad security blob length %d", blob_len);
rc = -EINVAL;
goto ssetup_exit;
}
@@ -933,7 +940,7 @@ ssetup_exit:
}
kfree(str_area);
if (resp_buf_type == CIFS_SMALL_BUFFER) {
- cFYI(1, ("ssetup freeing small buf %p", iov[0].iov_base));
+ cFYI(1, "ssetup freeing small buf %p", iov[0].iov_base);
cifs_small_buf_release(iov[0].iov_base);
} else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index ad081fe7eb18..82f78c4d6978 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -35,7 +35,6 @@
#include "cifs_debug.h"
extern mempool_t *cifs_mid_poolp;
-extern struct kmem_cache *cifs_oplock_cachep;
static struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
@@ -43,7 +42,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
struct mid_q_entry *temp;
if (server == NULL) {
- cERROR(1, ("Null TCP session in AllocMidQEntry"));
+ cERROR(1, "Null TCP session in AllocMidQEntry");
return NULL;
}
@@ -55,7 +54,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
temp->mid = smb_buffer->Mid; /* always LE */
temp->pid = current->pid;
temp->command = smb_buffer->Command;
- cFYI(1, ("For smb_command %d", temp->command));
+ cFYI(1, "For smb_command %d", temp->command);
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
/* when mid allocated can be before when sent */
temp->when_alloc = jiffies;
@@ -140,7 +139,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
total_len += iov[i].iov_len;
smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length);
- cFYI(1, ("Sending smb: total_len %d", total_len));
+ cFYI(1, "Sending smb: total_len %d", total_len);
dump_smb(smb_buffer, len);
i = 0;
@@ -168,9 +167,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
reconnect which may clear the network problem.
*/
if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
- cERROR(1,
- ("sends on sock %p stuck for 15 seconds",
- ssocket));
+ cERROR(1, "sends on sock %p stuck for 15 seconds",
+ ssocket);
rc = -EAGAIN;
break;
}
@@ -184,13 +182,13 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
total_len = 0;
break;
} else if (rc > total_len) {
- cERROR(1, ("sent %d requested %d", rc, total_len));
+ cERROR(1, "sent %d requested %d", rc, total_len);
break;
}
if (rc == 0) {
/* should never happen, letting socket clear before
retrying is our only obvious option here */
- cERROR(1, ("tcp sent no data"));
+ cERROR(1, "tcp sent no data");
msleep(500);
continue;
}
@@ -213,8 +211,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
}
if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
- cFYI(1, ("partial send (%d remaining), terminating session",
- total_len));
+ cFYI(1, "partial send (%d remaining), terminating session",
+ total_len);
/* If we have only sent part of an SMB then the next SMB
could be taken as the remainder of this one. We need
to kill the socket so the server throws away the partial
@@ -223,7 +221,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
}
if (rc < 0) {
- cERROR(1, ("Error %d sending data on socket to server", rc));
+ cERROR(1, "Error %d sending data on socket to server", rc);
} else
rc = 0;
@@ -296,7 +294,7 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
}
if (ses->server->tcpStatus == CifsNeedReconnect) {
- cFYI(1, ("tcp session dead - return to caller to retry"));
+ cFYI(1, "tcp session dead - return to caller to retry");
return -EAGAIN;
}
@@ -348,7 +346,7 @@ static int wait_for_response(struct cifsSesInfo *ses,
lrt += time_to_wait;
if (time_after(jiffies, lrt)) {
/* No replies for time_to_wait. */
- cERROR(1, ("server not responding"));
+ cERROR(1, "server not responding");
return -1;
}
} else {
@@ -379,7 +377,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
iov[0].iov_len = in_buf->smb_buf_length + 4;
flags |= CIFS_NO_RESP;
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
- cFYI(DBG2, ("SendRcvNoRsp flags %d rc %d", flags, rc));
+ cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
return rc;
}
@@ -402,7 +400,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if ((ses == NULL) || (ses->server == NULL)) {
cifs_small_buf_release(in_buf);
- cERROR(1, ("Null session"));
+ cERROR(1, "Null session");
return -EIO;
}
@@ -471,7 +469,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
else if (long_op == CIFS_BLOCKING_OP)
timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */
else {
- cERROR(1, ("unknown timeout flag %d", long_op));
+ cERROR(1, "unknown timeout flag %d", long_op);
rc = -EIO;
goto out;
}
@@ -490,8 +488,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
spin_lock(&GlobalMid_Lock);
if (midQ->resp_buf == NULL) {
- cERROR(1, ("No response to cmd %d mid %d",
- midQ->command, midQ->mid));
+ cERROR(1, "No response to cmd %d mid %d",
+ midQ->command, midQ->mid);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
@@ -504,7 +502,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if (rc != -EHOSTDOWN) {
if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1, ("marking request for retry"));
+ cFYI(1, "marking request for retry");
} else {
rc = -EIO;
}
@@ -521,8 +519,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, ("Frame too large received. Length: %d Xid: %d",
- receive_len, xid));
+ cERROR(1, "Frame too large received. Length: %d Xid: %d",
+ receive_len, xid);
rc = -EIO;
goto out;
}
@@ -548,7 +546,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
&ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
- cERROR(1, ("Unexpected SMB signature"));
+ cERROR(1, "Unexpected SMB signature");
/* BB FIXME add code to kill session */
}
}
@@ -569,7 +567,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
DeleteMidQEntry */
} else {
rc = -EIO;
- cFYI(1, ("Bad MID state?"));
+ cFYI(1, "Bad MID state?");
}
out:
@@ -591,11 +589,11 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
struct mid_q_entry *midQ;
if (ses == NULL) {
- cERROR(1, ("Null smb session"));
+ cERROR(1, "Null smb session");
return -EIO;
}
if (ses->server == NULL) {
- cERROR(1, ("Null tcp session"));
+ cERROR(1, "Null tcp session");
return -EIO;
}
@@ -607,8 +605,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
use ses->maxReq */
if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cERROR(1, ("Illegal length, greater than maximum frame, %d",
- in_buf->smb_buf_length));
+ cERROR(1, "Illegal length, greater than maximum frame, %d",
+ in_buf->smb_buf_length);
return -EIO;
}
@@ -665,7 +663,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
else if (long_op == CIFS_BLOCKING_OP)
timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
else {
- cERROR(1, ("unknown timeout flag %d", long_op));
+ cERROR(1, "unknown timeout flag %d", long_op);
rc = -EIO;
goto out;
}
@@ -681,8 +679,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
spin_lock(&GlobalMid_Lock);
if (midQ->resp_buf == NULL) {
- cERROR(1, ("No response for cmd %d mid %d",
- midQ->command, midQ->mid));
+ cERROR(1, "No response for cmd %d mid %d",
+ midQ->command, midQ->mid);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
@@ -695,7 +693,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (rc != -EHOSTDOWN) {
if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1, ("marking request for retry"));
+ cFYI(1, "marking request for retry");
} else {
rc = -EIO;
}
@@ -712,8 +710,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, ("Frame too large received. Length: %d Xid: %d",
- receive_len, xid));
+ cERROR(1, "Frame too large received. Length: %d Xid: %d",
+ receive_len, xid);
rc = -EIO;
goto out;
}
@@ -736,7 +734,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
&ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
- cERROR(1, ("Unexpected SMB signature"));
+ cERROR(1, "Unexpected SMB signature");
/* BB FIXME add code to kill session */
}
}
@@ -753,7 +751,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
} else {
rc = -EIO;
- cERROR(1, ("Bad MID state?"));
+ cERROR(1, "Bad MID state?");
}
out:
@@ -824,13 +822,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
struct cifsSesInfo *ses;
if (tcon == NULL || tcon->ses == NULL) {
- cERROR(1, ("Null smb session"));
+ cERROR(1, "Null smb session");
return -EIO;
}
ses = tcon->ses;
if (ses->server == NULL) {
- cERROR(1, ("Null tcp session"));
+ cERROR(1, "Null tcp session");
return -EIO;
}
@@ -842,8 +840,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
use ses->maxReq */
if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cERROR(1, ("Illegal length, greater than maximum frame, %d",
- in_buf->smb_buf_length));
+ cERROR(1, "Illegal length, greater than maximum frame, %d",
+ in_buf->smb_buf_length);
return -EIO;
}
@@ -933,8 +931,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
} else {
- cERROR(1, ("No response for cmd %d mid %d",
- midQ->command, midQ->mid));
+ cERROR(1, "No response for cmd %d mid %d",
+ midQ->command, midQ->mid);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
@@ -947,7 +945,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if (rc != -EHOSTDOWN) {
if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1, ("marking request for retry"));
+ cFYI(1, "marking request for retry");
} else {
rc = -EIO;
}
@@ -958,8 +956,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
}
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
- cERROR(1, ("Frame too large received. Length: %d Xid: %d",
- receive_len, xid));
+ cERROR(1, "Frame too large received. Length: %d Xid: %d",
+ receive_len, xid);
rc = -EIO;
goto out;
}
@@ -968,7 +966,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) {
rc = -EIO;
- cERROR(1, ("Bad MID state?"));
+ cERROR(1, "Bad MID state?");
goto out;
}
@@ -986,7 +984,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
&ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
- cERROR(1, ("Unexpected SMB signature"));
+ cERROR(1, "Unexpected SMB signature");
/* BB FIXME add code to kill session */
}
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index f555ce077d4f..a1509207bfa6 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -70,12 +70,12 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
return rc;
}
if (ea_name == NULL) {
- cFYI(1, ("Null xattr names not supported"));
+ cFYI(1, "Null xattr names not supported");
} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5)
&& (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) {
cFYI(1,
- ("illegal xattr request %s (only user namespace supported)",
- ea_name));
+ "illegal xattr request %s (only user namespace supported)",
+ ea_name);
/* BB what if no namespace prefix? */
/* Should we just pass them to server, except for
system and perhaps security prefixes? */
@@ -131,19 +131,19 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
search server for EAs or streams to
returns as xattrs */
if (value_size > MAX_EA_VALUE_SIZE) {
- cFYI(1, ("size of EA value too large"));
+ cFYI(1, "size of EA value too large");
kfree(full_path);
FreeXid(xid);
return -EOPNOTSUPP;
}
if (ea_name == NULL) {
- cFYI(1, ("Null xattr names not supported"));
+ cFYI(1, "Null xattr names not supported");
} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
- cFYI(1, ("attempt to set cifs inode metadata"));
+ cFYI(1, "attempt to set cifs inode metadata");
ea_name += 5; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
@@ -169,9 +169,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
ACL_TYPE_ACCESS, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("set POSIX ACL rc %d", rc));
+ cFYI(1, "set POSIX ACL rc %d", rc);
#else
- cFYI(1, ("set POSIX ACL not supported"));
+ cFYI(1, "set POSIX ACL not supported");
#endif
} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
@@ -182,13 +182,13 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
ACL_TYPE_DEFAULT, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("set POSIX default ACL rc %d", rc));
+ cFYI(1, "set POSIX default ACL rc %d", rc);
#else
- cFYI(1, ("set default POSIX ACL not supported"));
+ cFYI(1, "set default POSIX ACL not supported");
#endif
} else {
- cFYI(1, ("illegal xattr request %s (only user namespace"
- " supported)", ea_name));
+ cFYI(1, "illegal xattr request %s (only user namespace"
+ " supported)", ea_name);
/* BB what if no namespace prefix? */
/* Should we just pass them to server, except for
system and perhaps security prefixes? */
@@ -235,13 +235,13 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
if (ea_name == NULL) {
- cFYI(1, ("Null xattr names not supported"));
+ cFYI(1, "Null xattr names not supported");
} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
- cFYI(1, ("attempt to query cifs inode metadata"));
+ cFYI(1, "attempt to query cifs inode metadata");
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
ea_name += 5; /* skip past user. prefix */
@@ -287,7 +287,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
}
#endif /* EXPERIMENTAL */
#else
- cFYI(1, ("query POSIX ACL not supported yet"));
+ cFYI(1, "query POSIX ACL not supported yet");
#endif /* CONFIG_CIFS_POSIX */
} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
@@ -299,18 +299,18 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
#else
- cFYI(1, ("query POSIX default ACL not supported yet"));
+ cFYI(1, "query POSIX default ACL not supported yet");
#endif
} else if (strncmp(ea_name,
CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
- cFYI(1, ("Trusted xattr namespace not supported yet"));
+ cFYI(1, "Trusted xattr namespace not supported yet");
} else if (strncmp(ea_name,
CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
- cFYI(1, ("Security xattr namespace not supported yet"));
+ cFYI(1, "Security xattr namespace not supported yet");
} else
cFYI(1,
- ("illegal xattr request %s (only user namespace supported)",
- ea_name));
+ "illegal xattr request %s (only user namespace supported)",
+ ea_name);
/* We could add an additional check for streams ie
if proc/fs/cifs/streamstoxattr is set then
diff --git a/fs/compat.c b/fs/compat.c
index 4b6ed03cc478..a0b3375ed497 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1169,11 +1169,10 @@ out:
if (iov != iovstack)
kfree(iov);
if ((ret + (type == READ)) > 0) {
- struct dentry *dentry = file->f_path.dentry;
if (type == READ)
- fsnotify_access(dentry);
+ fsnotify_access(file);
else
- fsnotify_modify(dentry);
+ fsnotify_modify(file);
}
return ret;
}
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 17903b491298..031dbe3a15ca 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -733,10 +733,7 @@ static void lkb_add_ordered(struct list_head *new, struct list_head *head,
if (lkb->lkb_rqmode < mode)
break;
- if (!lkb)
- list_add_tail(new, head);
- else
- __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
+ __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
}
/* add/remove lkb to rsb's grant/convert/wait queue */
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 8b6e73c47435..b6272853130c 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -215,6 +215,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode)
if (!ast_type) {
kref_get(&lkb->lkb_ref);
list_add_tail(&lkb->lkb_astqueue, &proc->asts);
+ lkb->lkb_ast_first = type;
wake_up_interruptible(&proc->wait);
}
if (type == AST_COMP && (ast_type & AST_COMP))
@@ -223,7 +224,6 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode)
eol = lkb_is_endoflife(lkb, ua->lksb.sb_status, type);
if (eol) {
- lkb->lkb_ast_type &= ~AST_BAST;
lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
}
@@ -706,7 +706,7 @@ static int device_close(struct inode *inode, struct file *file)
}
static int copy_result_to_user(struct dlm_user_args *ua, int compat, int type,
- int bmode, char __user *buf, size_t count)
+ int mode, char __user *buf, size_t count)
{
#ifdef CONFIG_COMPAT
struct dlm_lock_result32 result32;
@@ -733,7 +733,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat, int type,
if (type == AST_BAST) {
result.user_astaddr = ua->bastaddr;
result.user_astparam = ua->bastparam;
- result.bast_mode = bmode;
+ result.bast_mode = mode;
} else {
result.user_astaddr = ua->castaddr;
result.user_astparam = ua->castparam;
@@ -801,7 +801,9 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
struct dlm_user_proc *proc = file->private_data;
struct dlm_lkb *lkb;
DECLARE_WAITQUEUE(wait, current);
- int error, type=0, bmode=0, removed = 0;
+ int error = 0, removed;
+ int ret_type, ret_mode;
+ int bastmode, castmode, do_bast, do_cast;
if (count == sizeof(struct dlm_device_version)) {
error = copy_version_to_user(buf, count);
@@ -820,6 +822,8 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
#endif
return -EINVAL;
+ try_another:
+
/* do we really need this? can a read happen after a close? */
if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
return -EINVAL;
@@ -855,13 +859,55 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue);
- if (lkb->lkb_ast_type & AST_COMP) {
- lkb->lkb_ast_type &= ~AST_COMP;
- type = AST_COMP;
- } else if (lkb->lkb_ast_type & AST_BAST) {
- lkb->lkb_ast_type &= ~AST_BAST;
- type = AST_BAST;
- bmode = lkb->lkb_bastmode;
+ removed = 0;
+ ret_type = 0;
+ ret_mode = 0;
+ do_bast = lkb->lkb_ast_type & AST_BAST;
+ do_cast = lkb->lkb_ast_type & AST_COMP;
+ bastmode = lkb->lkb_bastmode;
+ castmode = lkb->lkb_castmode;
+
+ /* when both are queued figure out which to do first and
+ switch first so the other goes in the next read */
+
+ if (do_cast && do_bast) {
+ if (lkb->lkb_ast_first == AST_COMP) {
+ ret_type = AST_COMP;
+ ret_mode = castmode;
+ lkb->lkb_ast_type &= ~AST_COMP;
+ lkb->lkb_ast_first = AST_BAST;
+ } else {
+ ret_type = AST_BAST;
+ ret_mode = bastmode;
+ lkb->lkb_ast_type &= ~AST_BAST;
+ lkb->lkb_ast_first = AST_COMP;
+ }
+ } else {
+ ret_type = lkb->lkb_ast_first;
+ ret_mode = (ret_type == AST_COMP) ? castmode : bastmode;
+ lkb->lkb_ast_type &= ~ret_type;
+ lkb->lkb_ast_first = 0;
+ }
+
+ /* if we're doing a bast but the bast is unnecessary, then
+ switch to do nothing or do a cast if that was needed next */
+
+ if ((ret_type == AST_BAST) &&
+ dlm_modes_compat(bastmode, lkb->lkb_castmode_done)) {
+ ret_type = 0;
+ ret_mode = 0;
+
+ if (do_cast) {
+ ret_type = AST_COMP;
+ ret_mode = castmode;
+ lkb->lkb_ast_type &= ~AST_COMP;
+ lkb->lkb_ast_first = 0;
+ }
+ }
+
+ if (lkb->lkb_ast_first != lkb->lkb_ast_type) {
+ log_print("device_read %x ast_first %x ast_type %x",
+ lkb->lkb_id, lkb->lkb_ast_first, lkb->lkb_ast_type);
}
if (!lkb->lkb_ast_type) {
@@ -870,15 +916,29 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
}
spin_unlock(&proc->asts_spin);
- error = copy_result_to_user(lkb->lkb_ua,
- test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
- type, bmode, buf, count);
+ if (ret_type) {
+ error = copy_result_to_user(lkb->lkb_ua,
+ test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
+ ret_type, ret_mode, buf, count);
+
+ if (ret_type == AST_COMP)
+ lkb->lkb_castmode_done = castmode;
+ if (ret_type == AST_BAST)
+ lkb->lkb_bastmode_done = bastmode;
+ }
/* removes reference for the proc->asts lists added by
dlm_user_add_ast() and may result in the lkb being freed */
+
if (removed)
dlm_put_lkb(lkb);
+ /* the bast that was queued was eliminated (see unnecessary above),
+ leaving nothing to return */
+
+ if (!ret_type)
+ goto try_another;
+
return error;
}
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 760983d0f25e..9b516597e8f5 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -602,28 +602,46 @@ static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags,
struct vfsmount *mnt)
{
int rc;
- struct super_block *sb;
+ struct super_block *sb, *lower_sb;
+ struct nameidata nd;
+
+ rc = path_lookup(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &nd);
+ if (rc) {
+ printk(KERN_WARNING
+ "path_lookup() failed on dev_name = [%s]\n", dev_name);
+ goto out;
+ }
+ lower_sb = nd.path.dentry->d_sb;
+ if (strcmp(lower_sb->s_type->name, "ecryptfs") == 0) {
+ rc = -EINVAL;
+ printk(KERN_ERR "Mount on filesystem of type "
+ "eCryptfs explicitly disallowed due to "
+ "known incompatibilities\n");
+ goto out_pathput;
+ }
rc = get_sb_nodev(fs_type, flags, raw_data, ecryptfs_fill_super, mnt);
if (rc < 0) {
printk(KERN_ERR "Getting sb failed; rc = [%d]\n", rc);
- goto out;
+ goto out_pathput;
}
sb = mnt->mnt_sb;
rc = ecryptfs_parse_options(sb, raw_data);
if (rc) {
printk(KERN_ERR "Error parsing options; rc = [%d]\n", rc);
- goto out_abort;
+ goto out_dput;
}
rc = ecryptfs_read_super(sb, dev_name);
if (rc) {
printk(KERN_ERR "Reading sb failed; rc = [%d]\n", rc);
- goto out_abort;
+ goto out_dput;
}
goto out;
-out_abort:
+out_dput:
dput(sb->s_root); /* aka mnt->mnt_root, as set by get_sb_nodev() */
deactivate_locked_super(sb);
+out_pathput:
+ path_put(&nd.path);
out:
return rc;
}
diff --git a/fs/exec.c b/fs/exec.c
index 49cdaa19e5b9..f351cdb2ea1d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -129,7 +129,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- fsnotify_open(file->f_path.dentry);
+ fsnotify_open(file);
error = -ENOEXEC;
if(file->f_op) {
@@ -678,7 +678,7 @@ struct file *open_exec(const char *name)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- fsnotify_open(file->f_path.dentry);
+ fsnotify_open(file);
err = deny_write_access(file);
if (err)
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 4cfab1cc75c0..d91e9d829bc1 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
de->inode_no = cpu_to_le64(parent->i_ino);
memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
exofs_set_de_type(de, inode);
- kunmap_atomic(page, KM_USER0);
+ kunmap_atomic(kaddr, KM_USER0);
err = exofs_commit_chunk(page, 0, chunk_size);
fail:
page_cache_release(page);
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 76d2a79ef93e..9507283be3ad 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -755,6 +755,21 @@ static int exofs_write_end(struct file *file, struct address_space *mapping,
return ret;
}
+static int exofs_releasepage(struct page *page, gfp_t gfp)
+{
+ EXOFS_DBGMSG("page 0x%lx\n", page->index);
+ WARN_ON(1);
+ return try_to_free_buffers(page);
+}
+
+static void exofs_invalidatepage(struct page *page, unsigned long offset)
+{
+ EXOFS_DBGMSG("page_has_buffers=>%d\n", page_has_buffers(page));
+ WARN_ON(1);
+
+ block_invalidatepage(page, offset);
+}
+
const struct address_space_operations exofs_aops = {
.readpage = exofs_readpage,
.readpages = exofs_readpages,
@@ -762,6 +777,21 @@ const struct address_space_operations exofs_aops = {
.writepages = exofs_writepages,
.write_begin = exofs_write_begin_export,
.write_end = exofs_write_end,
+ .releasepage = exofs_releasepage,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ .invalidatepage = exofs_invalidatepage,
+
+ /* Not implemented Yet */
+ .bmap = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
+ .direct_IO = NULL, /* TODO: Should be trivial to do */
+
+ /* With these NULL has special meaning or default is not exported */
+ .sync_page = NULL,
+ .get_xip_mem = NULL,
+ .migratepage = NULL,
+ .launder_page = NULL,
+ .is_partially_uptodate = NULL,
+ .error_remove_page = NULL,
};
/******************************************************************************
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 3cf038c055d7..e8766a396776 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -1332,6 +1332,12 @@ retry_alloc:
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
/*
+ * skip this group (and avoid loading bitmap) if there
+ * are no free blocks
+ */
+ if (!free_blocks)
+ continue;
+ /*
* skip this group if the number of
* free blocks is less than half of the reservation
* window size.
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index ad7d572ee8dc..f0c5286f9342 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -106,7 +106,7 @@ void ext2_free_inode (struct inode * inode)
struct super_block * sb = inode->i_sb;
int is_directory;
unsigned long ino;
- struct buffer_head *bitmap_bh = NULL;
+ struct buffer_head *bitmap_bh;
unsigned long block_group;
unsigned long bit;
struct ext2_super_block * es;
@@ -135,14 +135,13 @@ void ext2_free_inode (struct inode * inode)
ino > le32_to_cpu(es->s_inodes_count)) {
ext2_error (sb, "ext2_free_inode",
"reserved or nonexistent inode %lu", ino);
- goto error_return;
+ return;
}
block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
- brelse(bitmap_bh);
bitmap_bh = read_inode_bitmap(sb, block_group);
if (!bitmap_bh)
- goto error_return;
+ return;
/* Ok, now we can actually update the inode bitmaps.. */
if (!ext2_clear_bit_atomic(sb_bgl_lock(EXT2_SB(sb), block_group),
@@ -154,7 +153,7 @@ void ext2_free_inode (struct inode * inode)
mark_buffer_dirty(bitmap_bh);
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
-error_return:
+
brelse(bitmap_bh);
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fc13cc119aad..b90c3bf6e9ba 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -22,7 +22,6 @@
* Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
*/
-#include <linux/smp_lock.h>
#include <linux/time.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
@@ -1406,11 +1405,11 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
/* If this is the first large file
* created, add a flag to the superblock.
*/
- lock_kernel();
+ spin_lock(&EXT2_SB(sb)->s_lock);
ext2_update_dynamic_rev(sb);
EXT2_SET_RO_COMPAT_FEATURE(sb,
EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
- unlock_kernel();
+ spin_unlock(&EXT2_SB(sb)->s_lock);
ext2_write_super(sb);
}
}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 42e4a303b675..71e9eb1fa696 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -26,7 +26,6 @@
#include <linux/random.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
-#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
@@ -39,7 +38,7 @@
#include "xip.h"
static void ext2_sync_super(struct super_block *sb,
- struct ext2_super_block *es);
+ struct ext2_super_block *es, int wait);
static int ext2_remount (struct super_block * sb, int * flags, char * data);
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
static int ext2_sync_fs(struct super_block *sb, int wait);
@@ -52,9 +51,11 @@ void ext2_error (struct super_block * sb, const char * function,
struct ext2_super_block *es = sbi->s_es;
if (!(sb->s_flags & MS_RDONLY)) {
+ spin_lock(&sbi->s_lock);
sbi->s_mount_state |= EXT2_ERROR_FS;
es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
- ext2_sync_super(sb, es);
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, es, 1);
}
va_start(args, fmt);
@@ -84,6 +85,9 @@ void ext2_msg(struct super_block *sb, const char *prefix,
va_end(args);
}
+/*
+ * This must be called with sbi->s_lock held.
+ */
void ext2_update_dynamic_rev(struct super_block *sb)
{
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
@@ -115,8 +119,6 @@ static void ext2_put_super (struct super_block * sb)
int i;
struct ext2_sb_info *sbi = EXT2_SB(sb);
- lock_kernel();
-
if (sb->s_dirt)
ext2_write_super(sb);
@@ -124,8 +126,10 @@ static void ext2_put_super (struct super_block * sb)
if (!(sb->s_flags & MS_RDONLY)) {
struct ext2_super_block *es = sbi->s_es;
+ spin_lock(&sbi->s_lock);
es->s_state = cpu_to_le16(sbi->s_mount_state);
- ext2_sync_super(sb, es);
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, es, 1);
}
db_count = sbi->s_gdb_count;
for (i = 0; i < db_count; i++)
@@ -140,8 +144,6 @@ static void ext2_put_super (struct super_block * sb)
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
-
- unlock_kernel();
}
static struct kmem_cache * ext2_inode_cachep;
@@ -209,6 +211,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
struct ext2_super_block *es = sbi->s_es;
unsigned long def_mount_opts;
+ spin_lock(&sbi->s_lock);
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (sbi->s_sb_block != 1)
@@ -281,6 +284,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (!test_opt(sb, RESERVATION))
seq_puts(seq, ",noreservation");
+ spin_unlock(&sbi->s_lock);
return 0;
}
@@ -606,7 +610,6 @@ static int ext2_setup_super (struct super_block * sb,
if (!le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
- ext2_write_super(sb);
if (test_opt (sb, DEBUG))
ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
"bpg=%lu, ipg=%lu, mo=%04lx]",
@@ -767,6 +770,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
+ spin_lock_init(&sbi->s_lock);
+
/*
* See what the current blocksize for the device is, and
* use that as the blocksize. Otherwise (or if the blocksize
@@ -1079,7 +1084,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
- ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+ if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
+ sb->s_flags |= MS_RDONLY;
+ ext2_write_super(sb);
return 0;
cantfind_ext2:
@@ -1120,30 +1127,26 @@ static void ext2_clear_super_error(struct super_block *sb)
* be remapped. Nothing we can do but to retry the
* write and hope for the best.
*/
- printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
- "superblock detected", sb->s_id);
+ ext2_msg(sb, KERN_ERR,
+ "previous I/O error to superblock detected\n");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
}
-static void ext2_commit_super (struct super_block * sb,
- struct ext2_super_block * es)
-{
- ext2_clear_super_error(sb);
- es->s_wtime = cpu_to_le32(get_seconds());
- mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
- sb->s_dirt = 0;
-}
-
-static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
+static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
+ int wait)
{
ext2_clear_super_error(sb);
+ spin_lock(&EXT2_SB(sb)->s_lock);
es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
es->s_wtime = cpu_to_le32(get_seconds());
+ /* unlock before we do IO */
+ spin_unlock(&EXT2_SB(sb)->s_lock);
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
- sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
+ if (wait)
+ sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
}
@@ -1157,43 +1160,18 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
* may have been checked while mounted and e2fsck may have
* set s_state to EXT2_VALID_FS after some corrections.
*/
-
static int ext2_sync_fs(struct super_block *sb, int wait)
{
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
- struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
-
- lock_kernel();
- if (buffer_write_io_error(sbh)) {
- /*
- * Oh, dear. A previous attempt to write the
- * superblock failed. This could happen because the
- * USB device was yanked out. Or it could happen to
- * be a transient write error and maybe the block will
- * be remapped. Nothing we can do but to retry the
- * write and hope for the best.
- */
- ext2_msg(sb, KERN_ERR,
- "previous I/O error to superblock detected\n");
- clear_buffer_write_io_error(sbh);
- set_buffer_uptodate(sbh);
- }
+ spin_lock(&sbi->s_lock);
if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
ext2_debug("setting valid to 0\n");
es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
- es->s_free_blocks_count =
- cpu_to_le32(ext2_count_free_blocks(sb));
- es->s_free_inodes_count =
- cpu_to_le32(ext2_count_free_inodes(sb));
- es->s_mtime = cpu_to_le32(get_seconds());
- ext2_sync_super(sb, es);
- } else {
- ext2_commit_super(sb, es);
}
- sb->s_dirt = 0;
- unlock_kernel();
-
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, es, wait);
return 0;
}
@@ -1215,7 +1193,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
unsigned long old_sb_flags;
int err;
- lock_kernel();
+ spin_lock(&sbi->s_lock);
/* Store the old options */
old_sb_flags = sb->s_flags;
@@ -1254,13 +1232,13 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
}
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
- unlock_kernel();
+ spin_unlock(&sbi->s_lock);
return 0;
}
if (*flags & MS_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS)) {
- unlock_kernel();
+ spin_unlock(&sbi->s_lock);
return 0;
}
/*
@@ -1269,6 +1247,8 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
*/
es->s_state = cpu_to_le16(sbi->s_mount_state);
es->s_mtime = cpu_to_le32(get_seconds());
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, es, 1);
} else {
__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
~EXT2_FEATURE_RO_COMPAT_SUPP);
@@ -1288,16 +1268,16 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
sbi->s_mount_state = le16_to_cpu(es->s_state);
if (!ext2_setup_super (sb, es, 0))
sb->s_flags &= ~MS_RDONLY;
+ spin_unlock(&sbi->s_lock);
+ ext2_write_super(sb);
}
- ext2_sync_super(sb, es);
- unlock_kernel();
return 0;
restore_opts:
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_resuid = old_opts.s_resuid;
sbi->s_resgid = old_opts.s_resgid;
sb->s_flags = old_sb_flags;
- unlock_kernel();
+ spin_unlock(&sbi->s_lock);
return err;
}
@@ -1308,6 +1288,8 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
struct ext2_super_block *es = sbi->s_es;
u64 fsid;
+ spin_lock(&sbi->s_lock);
+
if (test_opt (sb, MINIX_DF))
sbi->s_overhead_last = 0;
else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
@@ -1362,6 +1344,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
le64_to_cpup((void *)es->s_uuid + sizeof(u64));
buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
+ spin_unlock(&sbi->s_lock);
return 0;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index e44dc92609be..3b96045a00ce 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -345,7 +345,9 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
return;
+ spin_lock(&EXT2_SB(sb)->s_lock);
EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
+ spin_unlock(&EXT2_SB(sb)->s_lock);
sb->s_dirt = 1;
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a177122a1b25..4a32511f4ded 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1584,6 +1584,12 @@ retry_alloc:
goto io_error;
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
/*
+ * skip this group (and avoid loading bitmap) if there
+ * are no free blocks
+ */
+ if (!free_blocks)
+ continue;
+ /*
* skip this group if the number of
* free blocks is less than half of the reservation
* window size.
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 8209f266e9ad..fcf7487734b6 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -48,7 +48,7 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
struct inode *inode = dentry->d_inode;
struct ext3_inode_info *ei = EXT3_I(inode);
journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
- int ret = 0;
+ int ret, needs_barrier = 0;
tid_t commit_tid;
if (inode->i_sb->s_flags & MS_RDONLY)
@@ -70,28 +70,27 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
- if (ext3_should_journal_data(inode)) {
- ret = ext3_force_commit(inode->i_sb);
- goto out;
- }
+ if (ext3_should_journal_data(inode))
+ return ext3_force_commit(inode->i_sb);
if (datasync)
commit_tid = atomic_read(&ei->i_datasync_tid);
else
commit_tid = atomic_read(&ei->i_sync_tid);
- if (log_start_commit(journal, commit_tid)) {
- log_wait_commit(journal, commit_tid);
- goto out;
- }
+ if (test_opt(inode->i_sb, BARRIER) &&
+ !journal_trans_will_send_data_barrier(journal, commit_tid))
+ needs_barrier = 1;
+ log_start_commit(journal, commit_tid);
+ ret = log_wait_commit(journal, commit_tid);
/*
* In case we didn't commit a transaction, we have to flush
* disk caches manually so that data really is on persistent
* storage
*/
- if (test_opt(inode->i_sb, BARRIER))
- blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
-out:
+ if (needs_barrier)
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
return ret;
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 1bee604cc6cd..0fc1293d0e96 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -653,8 +653,12 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",commit=%u",
(unsigned) (sbi->s_commit_interval / HZ));
}
- if (test_opt(sb, BARRIER))
- seq_puts(seq, ",barrier=1");
+
+ /*
+ * Always display barrier state so it's clear what the status is.
+ */
+ seq_puts(seq, ",barrier=");
+ seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
@@ -810,8 +814,8 @@ enum {
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
- Opt_noquota, Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
- Opt_usrquota, Opt_grpquota
+ Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
+ Opt_resize, Opt_usrquota, Opt_grpquota
};
static const match_table_t tokens = {
@@ -865,6 +869,8 @@ static const match_table_t tokens = {
{Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_barrier, "barrier=%u"},
+ {Opt_barrier, "barrier"},
+ {Opt_nobarrier, "nobarrier"},
{Opt_resize, "resize"},
{Opt_err, NULL},
};
@@ -967,7 +973,11 @@ static int parse_options (char *options, struct super_block *sb,
int token;
if (!*p)
continue;
-
+ /*
+ * Initialize args struct so we know whether arg was
+ * found; some options take optional arguments.
+ */
+ args[0].to = args[0].from = 0;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
@@ -1215,9 +1225,15 @@ set_qf_format:
case Opt_abort:
set_opt(sbi->s_mount_opt, ABORT);
break;
+ case Opt_nobarrier:
+ clear_opt(sbi->s_mount_opt, BARRIER);
+ break;
case Opt_barrier:
- if (match_int(&args[0], &option))
- return 0;
+ if (args[0].from) {
+ if (match_int(&args[0], &option))
+ return 0;
+ } else
+ option = 1; /* No argument, default to 1 */
if (option)
set_opt(sbi->s_mount_opt, BARRIER);
else
@@ -1890,21 +1906,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
- err = percpu_counter_init(&sbi->s_freeblocks_counter,
- ext3_count_free_blocks(sb));
- if (!err) {
- err = percpu_counter_init(&sbi->s_freeinodes_counter,
- ext3_count_free_inodes(sb));
- }
- if (!err) {
- err = percpu_counter_init(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
- }
- if (err) {
- ext3_msg(sb, KERN_ERR, "error: insufficient memory");
- goto failed_mount3;
- }
-
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
@@ -1945,15 +1946,29 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (!test_opt(sb, NOLOAD) &&
EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext3_load_journal(sb, es, journal_devnum))
- goto failed_mount3;
+ goto failed_mount2;
} else if (journal_inum) {
if (ext3_create_journal(sb, es, journal_inum))
- goto failed_mount3;
+ goto failed_mount2;
} else {
if (!silent)
ext3_msg(sb, KERN_ERR,
"error: no journal found. "
"mounting ext3 over ext2?");
+ goto failed_mount2;
+ }
+ err = percpu_counter_init(&sbi->s_freeblocks_counter,
+ ext3_count_free_blocks(sb));
+ if (!err) {
+ err = percpu_counter_init(&sbi->s_freeinodes_counter,
+ ext3_count_free_inodes(sb));
+ }
+ if (!err) {
+ err = percpu_counter_init(&sbi->s_dirs_counter,
+ ext3_count_dirs(sb));
+ }
+ if (err) {
+ ext3_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
@@ -1978,7 +1993,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
ext3_msg(sb, KERN_ERR,
"error: journal does not support "
"requested data journaling mode");
- goto failed_mount4;
+ goto failed_mount3;
}
default:
break;
@@ -2001,19 +2016,19 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (IS_ERR(root)) {
ext3_msg(sb, KERN_ERR, "error: get root inode failed");
ret = PTR_ERR(root);
- goto failed_mount4;
+ goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
- goto failed_mount4;
+ goto failed_mount3;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
iput(root);
ret = -ENOMEM;
- goto failed_mount4;
+ goto failed_mount3;
}
ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
@@ -2039,12 +2054,11 @@ cantfind_ext3:
sb->s_id);
goto failed_mount;
-failed_mount4:
- journal_destroy(sbi->s_journal);
failed_mount3:
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
+ journal_destroy(sbi->s_journal);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
@@ -2278,6 +2292,9 @@ static int ext3_load_journal(struct super_block *sb,
return -EINVAL;
}
+ if (!(journal->j_flags & JFS_BARRIER))
+ printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
+
if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
err = journal_update_format(journal);
if (err) {
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 236b834b4ca8..ee611daf0748 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2544,7 +2544,7 @@ static void bi_complete(struct bio *bio, int error)
/* FIXME!! we need to try to merge to left or right after zero-out */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
- int ret = -EIO;
+ int ret;
struct bio *bio;
int blkbits, blocksize;
sector_t ee_pblock;
@@ -2568,6 +2568,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
len = ee_len;
bio = bio_alloc(GFP_NOIO, len);
+ if (!bio)
+ return -ENOMEM;
+
bio->bi_sector = ee_pblock;
bio->bi_bdev = inode->i_sb->s_bdev;
@@ -2595,17 +2598,15 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
submit_bio(WRITE, bio);
wait_for_completion(&event);
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
- ret = 0;
- else {
- ret = -EIO;
- break;
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ bio_put(bio);
+ return -EIO;
}
bio_put(bio);
ee_len -= done;
ee_pblock += done << (blkbits - 9);
}
- return ret;
+ return 0;
}
#define EXT4_EXT_ZERO_LEN 7
@@ -2630,11 +2631,21 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
struct ext4_extent *ex2 = NULL;
struct ext4_extent *ex3 = NULL;
struct ext4_extent_header *eh;
- ext4_lblk_t ee_block;
+ ext4_lblk_t ee_block, eof_block;
unsigned int allocated, ee_len, depth;
ext4_fsblk_t newblock;
int err = 0;
int ret = 0;
+ int may_zeroout;
+
+ ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+ (unsigned long long)iblock, max_blocks);
+
+ eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ inode->i_sb->s_blocksize_bits;
+ if (eof_block < iblock + max_blocks)
+ eof_block = iblock + max_blocks;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
@@ -2643,16 +2654,23 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
ee_len = ext4_ext_get_actual_len(ex);
allocated = ee_len - (iblock - ee_block);
newblock = iblock - ee_block + ext_pblock(ex);
+
ex2 = ex;
orig_ex.ee_block = ex->ee_block;
orig_ex.ee_len = cpu_to_le16(ee_len);
ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
+ /*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully insde i_size or new_size.
+ */
+ may_zeroout = ee_block + ee_len <= eof_block;
+
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
- if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
+ if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2683,7 +2701,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
if (allocated > max_blocks) {
unsigned int newdepth;
/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
- if (allocated <= EXT4_EXT_ZERO_LEN) {
+ if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
/*
* iblock == ee_block is handled by the zerouout
* at the beginning.
@@ -2759,7 +2777,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
ex3->ee_len = cpu_to_le16(allocated - max_blocks);
ext4_ext_mark_uninitialized(ex3);
err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
- if (err == -ENOSPC) {
+ if (err == -ENOSPC && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2783,8 +2801,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
* update the extent length after successful insert of the
* split extent
*/
- orig_ex.ee_len = cpu_to_le16(ee_len -
- ext4_ext_get_actual_len(ex3));
+ ee_len -= ext4_ext_get_actual_len(ex3);
+ orig_ex.ee_len = cpu_to_le16(ee_len);
+ may_zeroout = ee_block + ee_len <= eof_block;
+
depth = newdepth;
ext4_ext_drop_refs(path);
path = ext4_ext_find_extent(inode, iblock, path);
@@ -2808,7 +2828,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
* otherwise give the extent a chance to merge to left
*/
if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
- iblock != ee_block) {
+ iblock != ee_block && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2877,7 +2897,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
goto out;
insert:
err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
- if (err == -ENOSPC) {
+ if (err == -ENOSPC && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -2937,14 +2957,21 @@ static int ext4_split_unwritten_extents(handle_t *handle,
struct ext4_extent *ex2 = NULL;
struct ext4_extent *ex3 = NULL;
struct ext4_extent_header *eh;
- ext4_lblk_t ee_block;
+ ext4_lblk_t ee_block, eof_block;
unsigned int allocated, ee_len, depth;
ext4_fsblk_t newblock;
int err = 0;
+ int may_zeroout;
+
+ ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+ (unsigned long long)iblock, max_blocks);
+
+ eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ inode->i_sb->s_blocksize_bits;
+ if (eof_block < iblock + max_blocks)
+ eof_block = iblock + max_blocks;
- ext_debug("ext4_split_unwritten_extents: inode %lu,"
- "iblock %llu, max_blocks %u\n", inode->i_ino,
- (unsigned long long)iblock, max_blocks);
depth = ext_depth(inode);
eh = path[depth].p_hdr;
ex = path[depth].p_ext;
@@ -2952,12 +2979,19 @@ static int ext4_split_unwritten_extents(handle_t *handle,
ee_len = ext4_ext_get_actual_len(ex);
allocated = ee_len - (iblock - ee_block);
newblock = iblock - ee_block + ext_pblock(ex);
+
ex2 = ex;
orig_ex.ee_block = ex->ee_block;
orig_ex.ee_len = cpu_to_le16(ee_len);
ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
/*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully insde i_size or new_size.
+ */
+ may_zeroout = ee_block + ee_len <= eof_block;
+
+ /*
* If the uninitialized extent begins at the same logical
* block where the write begins, and the write completely
* covers the extent, then we don't need to split it.
@@ -2991,7 +3025,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
ex3->ee_len = cpu_to_le16(allocated - max_blocks);
ext4_ext_mark_uninitialized(ex3);
err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
- if (err == -ENOSPC) {
+ if (err == -ENOSPC && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
@@ -3015,8 +3049,10 @@ static int ext4_split_unwritten_extents(handle_t *handle,
* update the extent length after successful insert of the
* split extent
*/
- orig_ex.ee_len = cpu_to_le16(ee_len -
- ext4_ext_get_actual_len(ex3));
+ ee_len -= ext4_ext_get_actual_len(ex3);
+ orig_ex.ee_len = cpu_to_le16(ee_len);
+ may_zeroout = ee_block + ee_len <= eof_block;
+
depth = newdepth;
ext4_ext_drop_refs(path);
path = ext4_ext_find_extent(inode, iblock, path);
@@ -3062,7 +3098,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
goto out;
insert:
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
- if (err == -ENOSPC) {
+ if (err == -ENOSPC && may_zeroout) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 0d0c3239c1cd..868252915355 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -100,9 +100,11 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
if (ext4_should_writeback_data(inode) &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
- blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
- jbd2_log_wait_commit(journal, commit_tid);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+ NULL, BLKDEV_IFL_WAIT);
+ ret = jbd2_log_wait_commit(journal, commit_tid);
} else if (journal->j_flags & JBD2_BARRIER)
- blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
return ret;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 57f6eef6ccd6..52618d5a1773 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -240,56 +240,49 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (fatal)
goto error_return;
- /* Ok, now we can actually update the inode bitmaps.. */
- cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
- bit, bitmap_bh->b_data);
- if (!cleared)
- ext4_error(sb, "bit already cleared for inode %lu", ino);
- else {
- gdp = ext4_get_group_desc(sb, block_group, &bh2);
-
+ fatal = -ESRCH;
+ gdp = ext4_get_group_desc(sb, block_group, &bh2);
+ if (gdp) {
BUFFER_TRACE(bh2, "get_write_access");
fatal = ext4_journal_get_write_access(handle, bh2);
- if (fatal) goto error_return;
-
- if (gdp) {
- ext4_lock_group(sb, block_group);
- count = ext4_free_inodes_count(sb, gdp) + 1;
- ext4_free_inodes_set(sb, gdp, count);
- if (is_directory) {
- count = ext4_used_dirs_count(sb, gdp) - 1;
- ext4_used_dirs_set(sb, gdp, count);
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t f;
-
- f = ext4_flex_group(sbi, block_group);
- atomic_dec(&sbi->s_flex_groups[f].used_dirs);
- }
+ }
+ ext4_lock_group(sb, block_group);
+ cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
+ if (fatal || !cleared) {
+ ext4_unlock_group(sb, block_group);
+ goto out;
+ }
- }
- gdp->bg_checksum = ext4_group_desc_csum(sbi,
- block_group, gdp);
- ext4_unlock_group(sb, block_group);
- percpu_counter_inc(&sbi->s_freeinodes_counter);
- if (is_directory)
- percpu_counter_dec(&sbi->s_dirs_counter);
-
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t f;
-
- f = ext4_flex_group(sbi, block_group);
- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
- }
- }
- BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, NULL, bh2);
- if (!fatal) fatal = err;
+ count = ext4_free_inodes_count(sb, gdp) + 1;
+ ext4_free_inodes_set(sb, gdp, count);
+ if (is_directory) {
+ count = ext4_used_dirs_count(sb, gdp) - 1;
+ ext4_used_dirs_set(sb, gdp, count);
+ percpu_counter_dec(&sbi->s_dirs_counter);
}
- BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
- if (!fatal)
- fatal = err;
- sb->s_dirt = 1;
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+ ext4_unlock_group(sb, block_group);
+
+ percpu_counter_inc(&sbi->s_freeinodes_counter);
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t f = ext4_flex_group(sbi, block_group);
+
+ atomic_inc(&sbi->s_flex_groups[f].free_inodes);
+ if (is_directory)
+ atomic_dec(&sbi->s_flex_groups[f].used_dirs);
+ }
+ BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+ fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
+out:
+ if (cleared) {
+ BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+ if (!fatal)
+ fatal = err;
+ sb->s_dirt = 1;
+ } else
+ ext4_error(sb, "bit already cleared for inode %lu", ino);
+
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, fatal);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 81d605412844..9ded78c29a07 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1126,7 +1126,8 @@ void ext4_da_update_reserve_space(struct inode *inode,
*/
if (allocated_meta_blocks)
dquot_claim_block(inode, allocated_meta_blocks);
- dquot_release_reservation_block(inode, mdb_free + used);
+ dquot_release_reservation_block(inode, mdb_free + used -
+ allocated_meta_blocks);
}
/*
@@ -2349,6 +2350,15 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
sector_t next;
int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
+ /*
+ * XXX Don't go larger than mballoc is willing to allocate
+ * This is a stopgap solution. We eventually need to fold
+ * mpage_da_submit_io() into this function and then call
+ * ext4_get_blocks() multiple times in a loop
+ */
+ if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
+ goto flush_it;
+
/* check if thereserved journal credits might overflow */
if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
if (nrblocks >= EXT4_MAX_TRANS_DATA) {
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 016d0249294f..66fa0b030e37 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -375,6 +375,8 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case EXT4_IOC_GROUP_ADD:
break;
+ case EXT4_IOC_MOVE_EXT:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index b423a364dca3..4f2d3a9d4e21 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1150,7 +1150,7 @@ err:
return ret;
}
-static void ext4_mb_release_desc(struct ext4_buddy *e4b)
+static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page)
page_cache_release(e4b->bd_bitmap_page);
@@ -1617,7 +1617,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
}
ext4_unlock_group(ac->ac_sb, group);
- ext4_mb_release_desc(e4b);
+ ext4_mb_unload_buddy(e4b);
return 0;
}
@@ -1672,7 +1672,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
ext4_mb_use_best_found(ac, e4b);
}
ext4_unlock_group(ac->ac_sb, group);
- ext4_mb_release_desc(e4b);
+ ext4_mb_unload_buddy(e4b);
return 0;
}
@@ -2025,7 +2025,6 @@ repeat:
for (i = 0; i < ngroups; group++, i++) {
struct ext4_group_info *grp;
- struct ext4_group_desc *desc;
if (group == ngroups)
group = 0;
@@ -2043,12 +2042,11 @@ repeat:
if (!ext4_mb_good_group(ac, group, cr)) {
/* someone did allocation from this group */
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
continue;
}
ac->ac_groups_scanned++;
- desc = ext4_get_group_desc(sb, group, NULL);
if (cr == 0)
ext4_mb_simple_scan_group(ac, &e4b);
else if (cr == 1 &&
@@ -2058,7 +2056,7 @@ repeat:
ext4_mb_complex_scan_group(ac, &e4b);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
if (ac->ac_status != AC_STATUS_CONTINUE)
break;
@@ -2148,7 +2146,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
ext4_lock_group(sb, group);
memcpy(&sg, ext4_get_group_info(sb, group), i);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
sg.info.bb_fragments, sg.info.bb_first_free);
@@ -2536,6 +2534,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
entry->count, entry->group, entry);
if (test_opt(sb, DISCARD)) {
+ int ret;
ext4_fsblk_t discard_block;
discard_block = entry->start_blk +
@@ -2543,7 +2542,12 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
trace_ext4_discard_blocks(sb,
(unsigned long long)discard_block,
entry->count);
- sb_issue_discard(sb, discard_block, entry->count);
+ ret = sb_issue_discard(sb, discard_block, entry->count);
+ if (ret == EOPNOTSUPP) {
+ ext4_warning(sb,
+ "discard not supported, disabling");
+ clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
+ }
}
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
@@ -2568,7 +2572,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
}
ext4_unlock_group(sb, entry->group);
kmem_cache_free(ext4_free_ext_cachep, entry);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
}
mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
@@ -3697,7 +3701,7 @@ out:
ext4_unlock_group(sb, group);
if (ac)
kmem_cache_free(ext4_ac_cachep, ac);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
return free;
}
@@ -3801,7 +3805,7 @@ repeat:
if (bitmap_bh == NULL) {
ext4_error(sb, "Error reading block bitmap for %u",
group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
continue;
}
@@ -3810,7 +3814,7 @@ repeat:
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
list_del(&pa->u.pa_tmp_list);
@@ -4074,7 +4078,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
ext4_mb_release_group_pa(&e4b, pa, ac);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
}
@@ -4610,7 +4614,7 @@ do_more:
atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
}
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
freed += count;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index d1fc662cc311..82621a360932 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -482,6 +482,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
int depth = ext_depth(orig_inode);
int ret;
+ start_ext.ee_block = end_ext.ee_block = 0;
o_start = o_end = oext = orig_path[depth].p_ext;
oext_alen = ext4_ext_get_actual_len(oext);
start_ext.ee_len = end_ext.ee_len = 0;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 5692c48754a0..6df797eb9aeb 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -911,7 +911,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb));
- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
+ sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, input->group);
atomic_add(input->free_blocks_count,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e14d22c170d5..00d09f58f188 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -241,6 +241,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
+ vfs_check_frozen(sb, SB_FREEZE_WRITE);
/* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly. */
@@ -3485,8 +3486,10 @@ int ext4_force_commit(struct super_block *sb)
return 0;
journal = EXT4_SB(sb)->s_journal;
- if (journal)
+ if (journal) {
+ vfs_check_frozen(sb, SB_FREEZE_WRITE);
ret = ext4_journal_force_commit(journal);
+ }
return ret;
}
@@ -3535,18 +3538,16 @@ static int ext4_freeze(struct super_block *sb)
* the journal.
*/
error = jbd2_journal_flush(journal);
- if (error < 0) {
- out:
- jbd2_journal_unlock_updates(journal);
- return error;
- }
+ if (error < 0)
+ goto out;
/* Journal blocked and flushed, clear needs_recovery flag. */
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
error = ext4_commit_super(sb, 1);
- if (error)
- goto out;
- return 0;
+out:
+ /* we rely on s_frozen to stop further updates */
+ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+ return error;
}
/*
@@ -3563,7 +3564,6 @@ static int ext4_unfreeze(struct super_block *sb)
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
ext4_commit_super(sb, 1);
unlock_super(sb);
- jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
return 0;
}
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 00740cb32be3..ed9354aff279 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -34,6 +34,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
+ .setattr = ext4_setattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
@@ -45,6 +46,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
const struct inode_operations ext4_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ext4_follow_link,
+ .setattr = ext4_setattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 530b4ca01510..20a1b92e035e 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -19,6 +19,7 @@
#include <linux/buffer_head.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
+#include <linux/kernel.h>
#include "fat.h"
/*
@@ -140,28 +141,22 @@ static int uni16_to_x8(unsigned char *ascii, const wchar_t *uni, int len,
{
const wchar_t *ip;
wchar_t ec;
- unsigned char *op, nc;
+ unsigned char *op;
int charlen;
- int k;
ip = uni;
op = ascii;
while (*ip && ((len - NLS_MAX_CHARSET_SIZE) > 0)) {
ec = *ip++;
- if ( (charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE)) > 0) {
+ if ((charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE)) > 0) {
op += charlen;
len -= charlen;
} else {
if (uni_xlate == 1) {
- *op = ':';
- for (k = 4; k > 0; k--) {
- nc = ec & 0xF;
- op[k] = nc > 9 ? nc + ('a' - 10)
- : nc + '0';
- ec >>= 4;
- }
- op += 5;
+ *op++ = ':';
+ op = pack_hex_byte(op, ec >> 8);
+ op = pack_hex_byte(op, ec);
len -= 5;
} else {
*op++ = '?';
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0ce143bd7d56..c611818893b2 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1497,10 +1497,8 @@ out_fail:
iput(fat_inode);
if (root_inode)
iput(root_inode);
- if (sbi->nls_io)
- unload_nls(sbi->nls_io);
- if (sbi->nls_disk)
- unload_nls(sbi->nls_disk);
+ unload_nls(sbi->nls_io);
+ unload_nls(sbi->nls_disk);
if (sbi->options.iocharset != fat_default_iocharset)
kfree(sbi->options.iocharset);
sb->s_fs_info = NULL;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 452d02f9075e..0a140741b39e 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -614,9 +614,15 @@ int send_sigurg(struct fown_struct *fown)
return ret;
}
-static DEFINE_RWLOCK(fasync_lock);
+static DEFINE_SPINLOCK(fasync_lock);
static struct kmem_cache *fasync_cache __read_mostly;
+static void fasync_free_rcu(struct rcu_head *head)
+{
+ kmem_cache_free(fasync_cache,
+ container_of(head, struct fasync_struct, fa_rcu));
+}
+
/*
* Remove a fasync entry. If successfully removed, return
* positive and clear the FASYNC flag. If no entry exists,
@@ -625,8 +631,6 @@ static struct kmem_cache *fasync_cache __read_mostly;
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
*
- * We always take the 'filp->f_lock', in since fasync_lock
- * needs to be irq-safe.
*/
static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
@@ -634,17 +638,22 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
int result = 0;
spin_lock(&filp->f_lock);
- write_lock_irq(&fasync_lock);
+ spin_lock(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file != filp)
continue;
+
+ spin_lock_irq(&fa->fa_lock);
+ fa->fa_file = NULL;
+ spin_unlock_irq(&fa->fa_lock);
+
*fp = fa->fa_next;
- kmem_cache_free(fasync_cache, fa);
+ call_rcu(&fa->fa_rcu, fasync_free_rcu);
filp->f_flags &= ~FASYNC;
result = 1;
break;
}
- write_unlock_irq(&fasync_lock);
+ spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
@@ -666,25 +675,30 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
return -ENOMEM;
spin_lock(&filp->f_lock);
- write_lock_irq(&fasync_lock);
+ spin_lock(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file != filp)
continue;
+
+ spin_lock_irq(&fa->fa_lock);
fa->fa_fd = fd;
+ spin_unlock_irq(&fa->fa_lock);
+
kmem_cache_free(fasync_cache, new);
goto out;
}
+ spin_lock_init(&new->fa_lock);
new->magic = FASYNC_MAGIC;
new->fa_file = filp;
new->fa_fd = fd;
new->fa_next = *fapp;
- *fapp = new;
+ rcu_assign_pointer(*fapp, new);
result = 1;
filp->f_flags |= FASYNC;
out:
- write_unlock_irq(&fasync_lock);
+ spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
@@ -704,37 +718,41 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap
EXPORT_SYMBOL(fasync_helper);
-void __kill_fasync(struct fasync_struct *fa, int sig, int band)
+/*
+ * rcu_read_lock() is held
+ */
+static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
{
while (fa) {
- struct fown_struct * fown;
+ struct fown_struct *fown;
if (fa->magic != FASYNC_MAGIC) {
printk(KERN_ERR "kill_fasync: bad magic number in "
"fasync_struct!\n");
return;
}
- fown = &fa->fa_file->f_owner;
- /* Don't send SIGURG to processes which have not set a
- queued signum: SIGURG has its own default signalling
- mechanism. */
- if (!(sig == SIGURG && fown->signum == 0))
- send_sigio(fown, fa->fa_fd, band);
- fa = fa->fa_next;
+ spin_lock(&fa->fa_lock);
+ if (fa->fa_file) {
+ fown = &fa->fa_file->f_owner;
+ /* Don't send SIGURG to processes which have not set a
+ queued signum: SIGURG has its own default signalling
+ mechanism. */
+ if (!(sig == SIGURG && fown->signum == 0))
+ send_sigio(fown, fa->fa_fd, band);
+ }
+ spin_unlock(&fa->fa_lock);
+ fa = rcu_dereference(fa->fa_next);
}
}
-EXPORT_SYMBOL(__kill_fasync);
-
void kill_fasync(struct fasync_struct **fp, int sig, int band)
{
/* First a quick test without locking: usually
* the list is empty.
*/
if (*fp) {
- read_lock(&fasync_lock);
- /* reread *fp after obtaining the lock */
- __kill_fasync(*fp, sig, band);
- read_unlock(&fasync_lock);
+ rcu_read_lock();
+ kill_fasync_rcu(rcu_dereference(*fp), sig, band);
+ rcu_read_unlock();
}
}
EXPORT_SYMBOL(kill_fasync);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 0c1d0b82dcf1..a739a0a48067 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -418,6 +418,7 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
{
struct buffer_head *dibh;
+ u64 dsize = i_size_read(&ip->i_inode);
void *kaddr;
int error;
@@ -437,9 +438,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
return error;
kaddr = kmap_atomic(page, KM_USER0);
- memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
- ip->i_disksize);
- memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize);
+ if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
+ dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
+ memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
+ memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
brelse(dibh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 5e411d5f4697..4a48c0f4b402 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -71,11 +71,13 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
if (!PageUptodate(page)) {
void *kaddr = kmap(page);
+ u64 dsize = i_size_read(inode);
+
+ if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
+ dsize = dibh->b_size - sizeof(struct gfs2_dinode);
- memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
- ip->i_disksize);
- memset(kaddr + ip->i_disksize, 0,
- PAGE_CACHE_SIZE - ip->i_disksize);
+ memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
+ memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
kunmap(page);
SetPageUptodate(page);
@@ -1038,13 +1040,14 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
goto out;
if (gfs2_is_stuffed(ip)) {
- ip->i_disksize = size;
+ u64 dsize = size + sizeof(struct gfs2_inode);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
- gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
+ if (dsize > dibh->b_size)
+ dsize = dibh->b_size;
+ gfs2_buffer_clear_tail(dibh, dsize);
error = 1;
-
} else {
if (size & (u64)(sdp->sd_sb.sb_bsize - 1))
error = gfs2_block_truncate_page(ip->i_inode.i_mapping);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 25fddc100f18..8295c5b5d4a9 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1475,7 +1475,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
- be64_to_cpu(dent->de_inum.no_formal_ino), 0);
+ be64_to_cpu(dent->de_inum.no_formal_ino));
brelse(bh);
return inode;
}
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index c22c21174833..dfe237a3f8ad 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -168,7 +168,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
if (error)
goto fail;
- inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0, 0);
+ inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0);
if (IS_ERR(inode)) {
error = PTR_ERR(inode);
goto fail;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 454d4b4eb36b..ddcdbf493536 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -855,6 +855,9 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
gh->gh_flags = flags;
gh->gh_iflags = 0;
gh->gh_ip = (unsigned long)__builtin_return_address(0);
+ if (gh->gh_owner_pid)
+ put_pid(gh->gh_owner_pid);
+ gh->gh_owner_pid = get_pid(task_pid(current));
}
/**
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 3aac46f6853e..b5d7363b22da 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -439,9 +439,6 @@ struct gfs2_args {
struct gfs2_tune {
spinlock_t gt_spin;
- unsigned int gt_incore_log_blocks;
- unsigned int gt_log_flush_secs;
-
unsigned int gt_logd_secs;
unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
@@ -462,6 +459,7 @@ enum {
SDF_SHUTDOWN = 2,
SDF_NOBARRIERS = 3,
SDF_NORECOVERY = 4,
+ SDF_DEMOTE = 5,
};
#define GFS2_FSNAME_LEN 256
@@ -618,6 +616,7 @@ struct gfs2_sbd {
unsigned int sd_log_commited_databuf;
int sd_log_commited_revoke;
+ atomic_t sd_log_pinned;
unsigned int sd_log_num_buf;
unsigned int sd_log_num_revoke;
unsigned int sd_log_num_rg;
@@ -629,15 +628,17 @@ struct gfs2_sbd {
struct list_head sd_log_le_databuf;
struct list_head sd_log_le_ordered;
+ atomic_t sd_log_thresh1;
+ atomic_t sd_log_thresh2;
atomic_t sd_log_blks_free;
- struct mutex sd_log_reserve_mutex;
+ wait_queue_head_t sd_log_waitq;
+ wait_queue_head_t sd_logd_waitq;
u64 sd_log_sequence;
unsigned int sd_log_head;
unsigned int sd_log_tail;
int sd_log_idle;
- unsigned long sd_log_flush_time;
struct rw_semaphore sd_log_flush_lock;
atomic_t sd_log_in_flight;
wait_queue_head_t sd_log_flush_wait;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index b1bf2694fb2b..51d8061fa07a 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -158,7 +158,6 @@ void gfs2_set_iop(struct inode *inode)
* @sb: The super block
* @no_addr: The inode number
* @type: The type of the inode
- * @skip_freeing: set this not return an inode if it is currently being freed.
*
* Returns: A VFS inode, or an error
*/
@@ -166,17 +165,14 @@ void gfs2_set_iop(struct inode *inode)
struct inode *gfs2_inode_lookup(struct super_block *sb,
unsigned int type,
u64 no_addr,
- u64 no_formal_ino, int skip_freeing)
+ u64 no_formal_ino)
{
struct inode *inode;
struct gfs2_inode *ip;
struct gfs2_glock *io_gl;
int error;
- if (skip_freeing)
- inode = gfs2_iget_skip(sb, no_addr);
- else
- inode = gfs2_iget(sb, no_addr);
+ inode = gfs2_iget(sb, no_addr);
ip = GFS2_I(inode);
if (!inode)
@@ -234,13 +230,100 @@ fail_glock:
fail_iopen:
gfs2_glock_put(io_gl);
fail_put:
- ip->i_gl->gl_object = NULL;
+ if (inode->i_state & I_NEW)
+ ip->i_gl->gl_object = NULL;
gfs2_glock_put(ip->i_gl);
fail:
- iget_failed(inode);
+ if (inode->i_state & I_NEW)
+ iget_failed(inode);
+ else
+ iput(inode);
return ERR_PTR(error);
}
+/**
+ * gfs2_unlinked_inode_lookup - Lookup an unlinked inode for reclamation
+ * @sb: The super block
+ * no_addr: The inode number
+ * @@inode: A pointer to the inode found, if any
+ *
+ * Returns: 0 and *inode if no errors occurred. If an error occurs,
+ * the resulting *inode may or may not be NULL.
+ */
+
+int gfs2_unlinked_inode_lookup(struct super_block *sb, u64 no_addr,
+ struct inode **inode)
+{
+ struct gfs2_sbd *sdp;
+ struct gfs2_inode *ip;
+ struct gfs2_glock *io_gl;
+ int error;
+ struct gfs2_holder gh;
+
+ *inode = gfs2_iget_skip(sb, no_addr);
+
+ if (!(*inode))
+ return -ENOBUFS;
+
+ if (!((*inode)->i_state & I_NEW))
+ return -ENOBUFS;
+
+ ip = GFS2_I(*inode);
+ sdp = GFS2_SB(*inode);
+ ip->i_no_formal_ino = -1;
+
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
+ if (unlikely(error))
+ goto fail;
+ ip->i_gl->gl_object = ip;
+
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ if (unlikely(error))
+ goto fail_put;
+
+ set_bit(GIF_INVALID, &ip->i_flags);
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT,
+ &ip->i_iopen_gh);
+ if (unlikely(error)) {
+ if (error == GLR_TRYFAILED)
+ error = 0;
+ goto fail_iopen;
+ }
+ ip->i_iopen_gh.gh_gl->gl_object = ip;
+ gfs2_glock_put(io_gl);
+
+ (*inode)->i_mode = DT2IF(DT_UNKNOWN);
+
+ /*
+ * We must read the inode in order to work out its type in
+ * this case. Note that this doesn't happen often as we normally
+ * know the type beforehand. This code path only occurs during
+ * unlinked inode recovery (where it is safe to do this glock,
+ * which is not true in the general case).
+ */
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY,
+ &gh);
+ if (unlikely(error)) {
+ if (error == GLR_TRYFAILED)
+ error = 0;
+ goto fail_glock;
+ }
+ /* Inode is now uptodate */
+ gfs2_glock_dq_uninit(&gh);
+ gfs2_set_iop(*inode);
+
+ return 0;
+fail_glock:
+ gfs2_glock_dq(&ip->i_iopen_gh);
+fail_iopen:
+ gfs2_glock_put(io_gl);
+fail_put:
+ ip->i_gl->gl_object = NULL;
+ gfs2_glock_put(ip->i_gl);
+fail:
+ return error;
+}
+
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
const struct gfs2_dinode *str = buf;
@@ -862,7 +945,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
goto fail_gunlock2;
inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
- inum.no_formal_ino, 0);
+ inum.no_formal_ino);
if (IS_ERR(inode))
goto fail_gunlock2;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index c341aaf67adb..e161461d4c57 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -83,8 +83,9 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
extern void gfs2_set_iop(struct inode *inode);
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino,
- int skip_freeing);
+ u64 no_addr, u64 no_formal_ino);
+extern int gfs2_unlinked_inode_lookup(struct super_block *sb, u64 no_addr,
+ struct inode **inode);
extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
extern int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index e5bf4b59d46e..b593f0e28f25 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -168,12 +168,11 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
return list_empty(&ai->ai_ail1_list);
}
-static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
+static void gfs2_ail1_start(struct gfs2_sbd *sdp)
{
struct list_head *head;
u64 sync_gen;
- struct list_head *first;
- struct gfs2_ail *first_ai, *ai, *tmp;
+ struct gfs2_ail *ai;
int done = 0;
gfs2_log_lock(sdp);
@@ -184,21 +183,9 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
}
sync_gen = sdp->sd_ail_sync_gen++;
- first = head->prev;
- first_ai = list_entry(first, struct gfs2_ail, ai_list);
- first_ai->ai_sync_gen = sync_gen;
- gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */
-
- if (flags & DIO_ALL)
- first = NULL;
-
while(!done) {
- if (first && (head->prev != first ||
- gfs2_ail1_empty_one(sdp, first_ai, 0)))
- break;
-
done = 1;
- list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) {
+ list_for_each_entry_reverse(ai, head, ai_list) {
if (ai->ai_sync_gen >= sync_gen)
continue;
ai->ai_sync_gen = sync_gen;
@@ -290,58 +277,57 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
* flush time, so we ensure that we have just enough free blocks at all
* times to avoid running out during a log flush.
*
+ * We no longer flush the log here, instead we wake up logd to do that
+ * for us. To avoid the thundering herd and to ensure that we deal fairly
+ * with queued waiters, we use an exclusive wait. This means that when we
+ * get woken with enough journal space to get our reservation, we need to
+ * wake the next waiter on the list.
+ *
* Returns: errno
*/
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
{
- unsigned int try = 0;
unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
+ unsigned wanted = blks + reserved_blks;
+ DEFINE_WAIT(wait);
+ int did_wait = 0;
+ unsigned int free_blocks;
if (gfs2_assert_warn(sdp, blks) ||
gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
return -EINVAL;
-
- mutex_lock(&sdp->sd_log_reserve_mutex);
- gfs2_log_lock(sdp);
- while(atomic_read(&sdp->sd_log_blks_free) <= (blks + reserved_blks)) {
- gfs2_log_unlock(sdp);
- gfs2_ail1_empty(sdp, 0);
- gfs2_log_flush(sdp, NULL);
-
- if (try++)
- gfs2_ail1_start(sdp, 0);
- gfs2_log_lock(sdp);
+retry:
+ free_blocks = atomic_read(&sdp->sd_log_blks_free);
+ if (unlikely(free_blocks <= wanted)) {
+ do {
+ prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ wake_up(&sdp->sd_logd_waitq);
+ did_wait = 1;
+ if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
+ io_schedule();
+ free_blocks = atomic_read(&sdp->sd_log_blks_free);
+ } while(free_blocks <= wanted);
+ finish_wait(&sdp->sd_log_waitq, &wait);
}
- atomic_sub(blks, &sdp->sd_log_blks_free);
+ if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
+ free_blocks - blks) != free_blocks)
+ goto retry;
trace_gfs2_log_blocks(sdp, -blks);
- gfs2_log_unlock(sdp);
- mutex_unlock(&sdp->sd_log_reserve_mutex);
+
+ /*
+ * If we waited, then so might others, wake them up _after_ we get
+ * our share of the log.
+ */
+ if (unlikely(did_wait))
+ wake_up(&sdp->sd_log_waitq);
down_read(&sdp->sd_log_flush_lock);
return 0;
}
-/**
- * gfs2_log_release - Release a given number of log blocks
- * @sdp: The GFS2 superblock
- * @blks: The number of blocks
- *
- */
-
-void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
-{
-
- gfs2_log_lock(sdp);
- atomic_add(blks, &sdp->sd_log_blks_free);
- trace_gfs2_log_blocks(sdp, blks);
- gfs2_assert_withdraw(sdp,
- atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
- gfs2_log_unlock(sdp);
- up_read(&sdp->sd_log_flush_lock);
-}
-
static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
{
struct gfs2_journal_extent *je;
@@ -559,11 +545,10 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
ail2_empty(sdp, new_tail);
- gfs2_log_lock(sdp);
atomic_add(dist, &sdp->sd_log_blks_free);
trace_gfs2_log_blocks(sdp, dist);
- gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
- gfs2_log_unlock(sdp);
+ gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
+ sdp->sd_jdesc->jd_blocks);
sdp->sd_log_tail = new_tail;
}
@@ -615,6 +600,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh);
set_buffer_uptodate(bh);
+ fs_info(sdp, "barrier sync failed - disabling barriers\n");
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
lock_buffer(bh);
skip_barrier:
@@ -822,6 +808,13 @@ static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
* @sdp: the filesystem
* @tr: the transaction
*
+ * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
+ * or the total number of used blocks (pinned blocks plus AIL blocks)
+ * is greater than thresh2.
+ *
+ * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
+ * journal size.
+ *
* Returns: errno
*/
@@ -832,10 +825,10 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
up_read(&sdp->sd_log_flush_lock);
- gfs2_log_lock(sdp);
- if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
- wake_up_process(sdp->sd_logd_process);
- gfs2_log_unlock(sdp);
+ if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
+ ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
+ atomic_read(&sdp->sd_log_thresh2)))
+ wake_up(&sdp->sd_logd_waitq);
}
/**
@@ -882,13 +875,23 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
{
gfs2_log_flush(sdp, NULL);
for (;;) {
- gfs2_ail1_start(sdp, DIO_ALL);
+ gfs2_ail1_start(sdp);
if (gfs2_ail1_empty(sdp, DIO_ALL))
break;
msleep(10);
}
}
+static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
+{
+ return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
+}
+
+static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
+{
+ unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
+ return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
+}
/**
* gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
@@ -901,28 +904,43 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
int gfs2_logd(void *data)
{
struct gfs2_sbd *sdp = data;
- unsigned long t;
- int need_flush;
+ unsigned long t = 1;
+ DEFINE_WAIT(wait);
+ unsigned preflush;
while (!kthread_should_stop()) {
- /* Advance the log tail */
- t = sdp->sd_log_flush_time +
- gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
+ preflush = atomic_read(&sdp->sd_log_pinned);
+ if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
+ gfs2_ail1_empty(sdp, DIO_ALL);
+ gfs2_log_flush(sdp, NULL);
+ gfs2_ail1_empty(sdp, DIO_ALL);
+ }
- gfs2_ail1_empty(sdp, DIO_ALL);
- gfs2_log_lock(sdp);
- need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
- gfs2_log_unlock(sdp);
- if (need_flush || time_after_eq(jiffies, t)) {
+ if (gfs2_ail_flush_reqd(sdp)) {
+ gfs2_ail1_start(sdp);
+ io_schedule();
+ gfs2_ail1_empty(sdp, 0);
gfs2_log_flush(sdp, NULL);
- sdp->sd_log_flush_time = jiffies;
+ gfs2_ail1_empty(sdp, DIO_ALL);
}
+ wake_up(&sdp->sd_log_waitq);
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
if (freezing(current))
refrigerator();
- schedule_timeout_interruptible(t);
+
+ do {
+ prepare_to_wait(&sdp->sd_logd_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (!gfs2_ail_flush_reqd(sdp) &&
+ !gfs2_jrnl_flush_reqd(sdp) &&
+ !kthread_should_stop())
+ t = schedule_timeout(t);
+ } while(t && !gfs2_ail_flush_reqd(sdp) &&
+ !gfs2_jrnl_flush_reqd(sdp) &&
+ !kthread_should_stop());
+ finish_wait(&sdp->sd_logd_waitq, &wait);
}
return 0;
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 7c64510ccfd2..eb570b4ad443 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -51,7 +51,6 @@ unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
unsigned int ssize);
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
-void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
void gfs2_log_incr_head(struct gfs2_sbd *sdp);
struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index adc260fbea90..bf33f822058d 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -54,6 +54,7 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (bd->bd_ail)
list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
get_bh(bh);
+ atomic_inc(&sdp->sd_log_pinned);
trace_gfs2_pin(bd, 1);
}
@@ -94,6 +95,7 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
trace_gfs2_pin(bd, 0);
gfs2_log_unlock(sdp);
unlock_buffer(bh);
+ atomic_dec(&sdp->sd_log_pinned);
}
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index a88fadc704bb..fb2a5f93b7c3 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -94,7 +94,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_glock_cachep)
goto fail;
- gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)",
+ gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
sizeof(struct gfs2_glock) +
sizeof(struct address_space),
0, 0, gfs2_init_gl_aspace_once);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 0bb12c80937a..abafda1f637b 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -313,6 +313,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
struct gfs2_bufdata *bd = bh->b_private;
if (test_clear_buffer_pinned(bh)) {
+ atomic_dec(&sdp->sd_log_pinned);
list_del_init(&bd->bd_le.le_list);
if (meta) {
gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index c1309ed1c496..3593b3a7290e 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -57,8 +57,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
{
spin_lock_init(&gt->gt_spin);
- gt->gt_incore_log_blocks = 1024;
- gt->gt_logd_secs = 1;
gt->gt_quota_simul_sync = 64;
gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1;
@@ -101,14 +99,15 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
spin_lock_init(&sdp->sd_trunc_lock);
spin_lock_init(&sdp->sd_log_lock);
-
+ atomic_set(&sdp->sd_log_pinned, 0);
INIT_LIST_HEAD(&sdp->sd_log_le_buf);
INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
INIT_LIST_HEAD(&sdp->sd_log_le_rg);
INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
- mutex_init(&sdp->sd_log_reserve_mutex);
+ init_waitqueue_head(&sdp->sd_log_waitq);
+ init_waitqueue_head(&sdp->sd_logd_waitq);
INIT_LIST_HEAD(&sdp->sd_ail1_list);
INIT_LIST_HEAD(&sdp->sd_ail2_list);
@@ -487,7 +486,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
struct dentry *dentry;
struct inode *inode;
- inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
+ inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
if (IS_ERR(inode)) {
fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
return PTR_ERR(inode);
@@ -733,6 +732,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
if (sdp->sd_args.ar_spectator) {
sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
+ atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
+ atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
} else {
if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
fs_err(sdp, "can't mount journal #%u\n",
@@ -770,6 +771,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
goto fail_jinode_gh;
}
atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
+ atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
+ atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
/* Map the extents for this journal's blocks */
map_journal_extents(sdp);
@@ -951,8 +954,6 @@ static int init_threads(struct gfs2_sbd *sdp, int undo)
if (undo)
goto fail_quotad;
- sdp->sd_log_flush_time = jiffies;
-
p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
error = IS_ERR(p);
if (error) {
@@ -1160,7 +1161,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
GFS2_BASIC_BLOCK_SHIFT;
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
- sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
+ sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
if (sdp->sd_args.ar_statfs_quantum) {
sdp->sd_tune.gt_statfs_slow = 0;
@@ -1323,7 +1324,7 @@ static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT;
args.ar_data = GFS2_DATA_DEFAULT;
- args.ar_commit = 60;
+ args.ar_commit = 30;
args.ar_statfs_quantum = 30;
args.ar_quota_quantum = 60;
args.ar_errors = GFS2_ERRORS_DEFAULT;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 6dbcbad6ab17..49667d68769e 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -637,15 +637,40 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
unsigned blocksize, iblock, pos;
struct buffer_head *bh, *dibh;
struct page *page;
- void *kaddr;
- struct gfs2_quota *qp;
- s64 value;
- int err = -EIO;
+ void *kaddr, *ptr;
+ struct gfs2_quota q, *qp;
+ int err, nbytes;
u64 size;
if (gfs2_is_stuffed(ip))
gfs2_unstuff_dinode(ip, NULL);
-
+
+ memset(&q, 0, sizeof(struct gfs2_quota));
+ err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
+ if (err < 0)
+ return err;
+
+ err = -EIO;
+ qp = &q;
+ qp->qu_value = be64_to_cpu(qp->qu_value);
+ qp->qu_value += change;
+ qp->qu_value = cpu_to_be64(qp->qu_value);
+ qd->qd_qb.qb_value = qp->qu_value;
+ if (fdq) {
+ if (fdq->d_fieldmask & FS_DQ_BSOFT) {
+ qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
+ qd->qd_qb.qb_warn = qp->qu_warn;
+ }
+ if (fdq->d_fieldmask & FS_DQ_BHARD) {
+ qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
+ qd->qd_qb.qb_limit = qp->qu_limit;
+ }
+ }
+
+ /* Write the quota into the quota file on disk */
+ ptr = qp;
+ nbytes = sizeof(struct gfs2_quota);
+get_a_page:
page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
@@ -667,7 +692,12 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
if (!buffer_mapped(bh)) {
gfs2_block_map(inode, iblock, bh, 1);
if (!buffer_mapped(bh))
- goto unlock;
+ goto unlock_out;
+ /* If it's a newly allocated disk block for quota, zero it */
+ if (buffer_new(bh)) {
+ memset(bh->b_data, 0, bh->b_size);
+ set_buffer_uptodate(bh);
+ }
}
if (PageUptodate(page))
@@ -677,32 +707,34 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
ll_rw_block(READ_META, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
- goto unlock;
+ goto unlock_out;
}
gfs2_trans_add_bh(ip->i_gl, bh, 0);
kaddr = kmap_atomic(page, KM_USER0);
- qp = kaddr + offset;
- value = (s64)be64_to_cpu(qp->qu_value) + change;
- qp->qu_value = cpu_to_be64(value);
- qd->qd_qb.qb_value = qp->qu_value;
- if (fdq) {
- if (fdq->d_fieldmask & FS_DQ_BSOFT) {
- qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
- qd->qd_qb.qb_warn = qp->qu_warn;
- }
- if (fdq->d_fieldmask & FS_DQ_BHARD) {
- qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
- qd->qd_qb.qb_limit = qp->qu_limit;
- }
- }
+ if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
+ nbytes = PAGE_CACHE_SIZE - offset;
+ memcpy(kaddr + offset, ptr, nbytes);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
+ unlock_page(page);
+ page_cache_release(page);
+ /* If quota straddles page boundary, we need to update the rest of the
+ * quota at the beginning of the next page */
+ if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
+ ptr = ptr + nbytes;
+ nbytes = sizeof(struct gfs2_quota) - nbytes;
+ offset = 0;
+ index++;
+ goto get_a_page;
+ }
+
+ /* Update the disk inode timestamp and size (if extended) */
err = gfs2_meta_inode_buffer(ip, &dibh);
if (err)
- goto unlock;
+ goto out;
size = loc + sizeof(struct gfs2_quota);
if (size > inode->i_size) {
@@ -715,7 +747,9 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
brelse(dibh);
mark_inode_dirty(inode);
-unlock:
+out:
+ return err;
+unlock_out:
unlock_page(page);
page_cache_release(page);
return err;
@@ -779,8 +813,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
* rgrp since it won't be allocated during the transaction
*/
al->al_requested = 1;
- /* +1 in the end for block requested above for unstuffing */
- blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
+ /* +3 in the end for unstuffing block, inode size update block
+ * and another block in case quota straddles page boundary and
+ * two blocks need to be updated instead of 1 */
+ blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
if (nalloc)
al->al_requested += nalloc * (data_blocks + ind_blocks);
@@ -1418,10 +1454,18 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
memset(fqs, 0, sizeof(struct fs_quota_stat));
fqs->qs_version = FS_QSTAT_VERSION;
- if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
- fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
- else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
- fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
+
+ switch (sdp->sd_args.ar_quota) {
+ case GFS2_QUOTA_ON:
+ fqs->qs_flags |= (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
+ /*FALLTHRU*/
+ case GFS2_QUOTA_ACCOUNT:
+ fqs->qs_flags |= (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
+ break;
+ case GFS2_QUOTA_OFF:
+ break;
+ }
+
if (sdp->sd_quota_inode) {
fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
@@ -1432,8 +1476,8 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
return 0;
}
-static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
- struct fs_disk_quota *fdq)
+static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
+ struct fs_disk_quota *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_lvb *qlvb;
@@ -1477,8 +1521,8 @@ out:
/* GFS2 only supports a subset of the XFS fields */
#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
-static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
- struct fs_disk_quota *fdq)
+static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
+ struct fs_disk_quota *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1585,7 +1629,7 @@ out_put:
const struct quotactl_ops gfs2_quotactl_ops = {
.quota_sync = gfs2_quota_sync,
.get_xstate = gfs2_quota_get_xstate,
- .get_xquota = gfs2_xquota_get,
- .set_xquota = gfs2_xquota_set,
+ .get_dqblk = gfs2_get_dqblk,
+ .set_dqblk = gfs2_set_dqblk,
};
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 503b842f3ba2..8e6316bd6c51 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -854,7 +854,8 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
if ((start + nr_sects) != blk) {
rv = blkdev_issue_discard(bdev, start,
nr_sects, GFP_NOFS,
- DISCARD_FL_BARRIER);
+ BLKDEV_IFL_WAIT |
+ BLKDEV_IFL_BARRIER);
if (rv)
goto fail;
nr_sects = 0;
@@ -869,7 +870,7 @@ start_new_extent:
}
if (nr_sects) {
rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS,
- DISCARD_FL_BARRIER);
+ BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
if (rv)
goto fail;
}
@@ -948,18 +949,20 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
* try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
* @rgd: The rgrp
*
- * Returns: The inode, if one has been found
+ * Returns: 0 if no error
+ * The inode, if one has been found, in inode.
*/
-static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
- u64 skip)
+static int try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
+ u64 skip, struct inode **inode)
{
- struct inode *inode;
u32 goal = 0, block;
u64 no_addr;
struct gfs2_sbd *sdp = rgd->rd_sbd;
unsigned int n;
+ int error = 0;
+ *inode = NULL;
for(;;) {
if (goal >= rgd->rd_data)
break;
@@ -979,14 +982,14 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
if (no_addr == skip)
continue;
*last_unlinked = no_addr;
- inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
- no_addr, -1, 1);
- if (!IS_ERR(inode))
- return inode;
+ error = gfs2_unlinked_inode_lookup(rgd->rd_sbd->sd_vfs,
+ no_addr, inode);
+ if (*inode || error)
+ return error;
}
rgd->rd_flags &= ~GFS2_RDF_CHECK;
- return NULL;
+ return 0;
}
/**
@@ -1096,12 +1099,27 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
case 0:
if (try_rgrp_fit(rgd, al))
goto out;
- if (rgd->rd_flags & GFS2_RDF_CHECK)
- inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
+ /* If the rg came in already locked, there's no
+ way we can recover from a failed try_rgrp_unlink
+ because that would require an iput which can only
+ happen after the rgrp is unlocked. */
+ if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK)
+ error = try_rgrp_unlink(rgd, last_unlinked,
+ ip->i_no_addr, &inode);
if (!rg_locked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
- if (inode)
+ if (inode) {
+ if (error) {
+ if (inode->i_state & I_NEW)
+ iget_failed(inode);
+ else
+ iput(inode);
+ return ERR_PTR(error);
+ }
return inode;
+ }
+ if (error)
+ return ERR_PTR(error);
/* fall through */
case GLR_TRYFAILED:
rgd = recent_rgrp_next(rgd);
@@ -1130,12 +1148,23 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
case 0:
if (try_rgrp_fit(rgd, al))
goto out;
- if (rgd->rd_flags & GFS2_RDF_CHECK)
- inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
+ if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK)
+ error = try_rgrp_unlink(rgd, last_unlinked,
+ ip->i_no_addr, &inode);
if (!rg_locked)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
- if (inode)
+ if (inode) {
+ if (error) {
+ if (inode->i_state & I_NEW)
+ iget_failed(inode);
+ else
+ iput(inode);
+ return ERR_PTR(error);
+ }
return inode;
+ }
+ if (error)
+ return ERR_PTR(error);
break;
case GLR_TRYFAILED:
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 50aac606b990..4d1aad38f1b1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1113,7 +1113,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
int error;
spin_lock(&gt->gt_spin);
- args.ar_commit = gt->gt_log_flush_secs;
+ args.ar_commit = gt->gt_logd_secs;
args.ar_quota_quantum = gt->gt_quota_quantum;
if (gt->gt_statfs_slow)
args.ar_statfs_quantum = 0;
@@ -1160,7 +1160,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
else
clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
spin_lock(&gt->gt_spin);
- gt->gt_log_flush_secs = args.ar_commit;
+ gt->gt_logd_secs = args.ar_commit;
gt->gt_quota_quantum = args.ar_quota_quantum;
if (args.ar_statfs_quantum) {
gt->gt_statfs_slow = 0;
@@ -1305,8 +1305,8 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
}
if (args->ar_discard)
seq_printf(s, ",discard");
- val = sdp->sd_tune.gt_log_flush_secs;
- if (val != 60)
+ val = sdp->sd_tune.gt_logd_secs;
+ if (val != 30)
seq_printf(s, ",commit=%d", val);
val = sdp->sd_tune.gt_statfs_quantum;
if (val != 30)
@@ -1334,7 +1334,8 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
}
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
seq_printf(s, ",nobarrier");
-
+ if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
+ seq_printf(s, ",demote_interface_used");
return 0;
}
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 54fd98425991..946922e4b454 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -232,6 +232,8 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
glops = gfs2_glops_list[gltype];
if (glops == NULL)
return -EINVAL;
+ if (test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
+ fs_info(sdp, "demote interface used\n");
rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
if (rv)
return rv;
@@ -468,8 +470,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
} \
TUNE_ATTR_2(name, name##_store)
-TUNE_ATTR(incore_log_blocks, 0);
-TUNE_ATTR(log_flush_secs, 0);
TUNE_ATTR(quota_warn_period, 0);
TUNE_ATTR(quota_quantum, 0);
TUNE_ATTR(max_readahead, 0);
@@ -481,8 +481,6 @@ TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
static struct attribute *tune_attrs[] = {
- &tune_attr_incore_log_blocks.attr,
- &tune_attr_log_flush_secs.attr,
&tune_attr_quota_warn_period.attr,
&tune_attr_quota_quantum.attr,
&tune_attr_max_readahead.attr,
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 4ef0e9fa3549..9ec73a854111 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -23,6 +23,7 @@
#include "meta_io.h"
#include "trans.h"
#include "util.h"
+#include "trace_gfs2.h"
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
unsigned int revokes)
@@ -75,6 +76,23 @@ fail_holder_uninit:
return error;
}
+/**
+ * gfs2_log_release - Release a given number of log blocks
+ * @sdp: The GFS2 superblock
+ * @blks: The number of blocks
+ *
+ */
+
+static void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
+{
+
+ atomic_add(blks, &sdp->sd_log_blks_free);
+ trace_gfs2_log_blocks(sdp, blks);
+ gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
+ sdp->sd_jdesc->jd_blocks);
+ up_read(&sdp->sd_log_flush_lock);
+}
+
void gfs2_trans_end(struct gfs2_sbd *sdp)
{
struct gfs2_trans *tr = current->journal_info;
diff --git a/fs/inode.c b/fs/inode.c
index 407bf392e20a..87dc2e990ead 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -20,7 +20,6 @@
#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
-#include <linux/inotify.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/async.h>
@@ -264,12 +263,8 @@ void inode_init_once(struct inode *inode)
INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
i_size_ordered_init(inode);
-#ifdef CONFIG_INOTIFY
- INIT_LIST_HEAD(&inode->inotify_watches);
- mutex_init(&inode->inotify_mutex);
-#endif
#ifdef CONFIG_FSNOTIFY
- INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries);
+ INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
#endif
}
EXPORT_SYMBOL(inode_init_once);
@@ -415,7 +410,6 @@ int invalidate_inodes(struct super_block *sb)
down_write(&iprune_sem);
spin_lock(&inode_lock);
- inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock);
@@ -1205,8 +1199,6 @@ void generic_delete_inode(struct inode *inode)
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
- security_inode_delete(inode);
-
if (op->delete_inode) {
void (*delete)(struct inode *) = op->delete_inode;
/* Filesystems implementing their own
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index ecb44c94ba8d..28a9ddaa0c49 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -786,6 +786,12 @@ wait_for_iobuf:
jbd_debug(3, "JBD: commit phase 6\n");
+ /* All metadata is written, now write commit record and do cleanup */
+ spin_lock(&journal->j_state_lock);
+ J_ASSERT(commit_transaction->t_state == T_COMMIT);
+ commit_transaction->t_state = T_COMMIT_RECORD;
+ spin_unlock(&journal->j_state_lock);
+
if (journal_write_commit_record(journal, commit_transaction))
err = -EIO;
@@ -923,7 +929,7 @@ restart_loop:
jbd_debug(3, "JBD: commit phase 8\n");
- J_ASSERT(commit_transaction->t_state == T_COMMIT);
+ J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
commit_transaction->t_state = T_FINISHED;
J_ASSERT(commit_transaction == journal->j_committing_transaction);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index bd224eec9b07..93d1e47647bd 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -565,6 +565,38 @@ int log_wait_commit(journal_t *journal, tid_t tid)
}
/*
+ * Return 1 if a given transaction has not yet sent barrier request
+ * connected with a transaction commit. If 0 is returned, transaction
+ * may or may not have sent the barrier. Used to avoid sending barrier
+ * twice in common cases.
+ */
+int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
+{
+ int ret = 0;
+ transaction_t *commit_trans;
+
+ if (!(journal->j_flags & JFS_BARRIER))
+ return 0;
+ spin_lock(&journal->j_state_lock);
+ /* Transaction already committed? */
+ if (tid_geq(journal->j_commit_sequence, tid))
+ goto out;
+ /*
+ * Transaction is being committed and we already proceeded to
+ * writing commit record?
+ */
+ commit_trans = journal->j_committing_transaction;
+ if (commit_trans && commit_trans->t_tid == tid &&
+ commit_trans->t_state >= T_COMMIT_RECORD)
+ goto out;
+ ret = 1;
+out:
+ spin_unlock(&journal->j_state_lock);
+ return ret;
+}
+EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
+
+/*
* Log buffer allocation routines:
*/
@@ -1157,6 +1189,7 @@ int journal_destroy(journal_t *journal)
{
int err = 0;
+
/* Wait for the commit thread to wake up and die. */
journal_kill_thread(journal);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 30beb11ef928..076d1cc44f95 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -530,7 +530,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
*/
if ((journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
- blkdev_issue_flush(journal->j_fs_dev, NULL);
+ blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
if (!(journal->j_flags & JBD2_ABORT))
jbd2_journal_update_superblock(journal, 1);
return 0;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 671da7fb7ffd..75716d3d2be0 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -717,7 +717,8 @@ start_journal_io:
if (commit_transaction->t_flushed_data_blocks &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
- blkdev_issue_flush(journal->j_fs_dev, NULL);
+ blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
/* Done it all: now write the commit record asynchronously. */
if (JBD2_HAS_INCOMPAT_FEATURE(journal,
@@ -727,7 +728,8 @@ start_journal_io:
if (err)
__jbd2_journal_abort_hard(journal);
if (journal->j_flags & JBD2_BARRIER)
- blkdev_issue_flush(journal->j_dev, NULL);
+ blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
}
err = journal_finish_inode_data_buffers(journal, commit_transaction);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c03d4dce4d76..bc2ff5932769 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1889,7 +1889,7 @@ static struct kmem_cache *get_slab(size_t size)
BUG_ON(i >= JBD2_MAX_SLABS);
if (unlikely(i < 0))
i = 0;
- BUG_ON(jbd2_slab[i] == 0);
+ BUG_ON(jbd2_slab[i] == NULL);
return jbd2_slab[i];
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index bfc70f57900f..e214d68620ac 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1311,7 +1311,6 @@ int jbd2_journal_stop(handle_t *handle)
if (handle->h_sync)
transaction->t_synchronous_commit = 1;
current->journal_info = NULL;
- spin_lock(&journal->j_state_lock);
spin_lock(&transaction->t_handle_lock);
transaction->t_outstanding_credits -= handle->h_buffer_credits;
transaction->t_updates--;
@@ -1340,8 +1339,7 @@ int jbd2_journal_stop(handle_t *handle)
jbd_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
- __jbd2_log_start_commit(journal, transaction->t_tid);
- spin_unlock(&journal->j_state_lock);
+ jbd2_log_start_commit(journal, transaction->t_tid);
/*
* Special case: JBD2_SYNC synchronous updates require us
@@ -1351,7 +1349,6 @@ int jbd2_journal_stop(handle_t *handle)
err = jbd2_log_wait_commit(journal, tid);
} else {
spin_unlock(&transaction->t_handle_lock);
- spin_unlock(&journal->j_state_lock);
}
lock_map_release(&handle->h_lockdep_map);
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 3451a81b2142..86e0821fc989 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -313,8 +313,8 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
case S_IFBLK:
case S_IFCHR:
/* Read the device numbers from the media */
- if (f->metadata->size != sizeof(jdev.old) &&
- f->metadata->size != sizeof(jdev.new)) {
+ if (f->metadata->size != sizeof(jdev.old_id) &&
+ f->metadata->size != sizeof(jdev.new_id)) {
printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
goto error_io;
}
@@ -325,10 +325,10 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
goto error;
}
- if (f->metadata->size == sizeof(jdev.old))
- rdev = old_decode_dev(je16_to_cpu(jdev.old));
+ if (f->metadata->size == sizeof(jdev.old_id))
+ rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
else
- rdev = new_decode_dev(je32_to_cpu(jdev.new));
+ rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
case S_IFSOCK:
case S_IFIFO:
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index 507ed6ec1847..36d7a849ee2c 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -312,11 +312,11 @@ static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c)
static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev)
{
if (old_valid_dev(rdev)) {
- jdev->old = cpu_to_je16(old_encode_dev(rdev));
- return sizeof(jdev->old);
+ jdev->old_id = cpu_to_je16(old_encode_dev(rdev));
+ return sizeof(jdev->old_id);
} else {
- jdev->new = cpu_to_je32(new_encode_dev(rdev));
- return sizeof(jdev->new);
+ jdev->new_id = cpu_to_je32(new_encode_dev(rdev));
+ return sizeof(jdev->new_id);
}
}
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 9e2f6a721668..c92ea3b3ea5e 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -2438,7 +2438,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
/* check if this is a control page update for an allocation.
* if so, update the leaf to reflect the new leaf value using
- * dbSplit(); otherwise (deallocation), use dbJoin() to udpate
+ * dbSplit(); otherwise (deallocation), use dbJoin() to update
* the leaf with the new value. in addition to updating the
* leaf, dbSplit() will also split the binary buddy system of
* the leaves, if required, and bubble new values within the
diff --git a/fs/libfs.c b/fs/libfs.c
index ea9a6cc9b35c..232bea425b09 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -547,6 +547,40 @@ ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
}
/**
+ * simple_write_to_buffer - copy data from user space to the buffer
+ * @to: the buffer to write to
+ * @available: the size of the buffer
+ * @ppos: the current position in the buffer
+ * @from: the user space buffer to read from
+ * @count: the maximum number of bytes to read
+ *
+ * The simple_write_to_buffer() function reads up to @count bytes from the user
+ * space address starting at @from into the buffer @to at offset @ppos.
+ *
+ * On success, the number of bytes written is returned and the offset @ppos is
+ * advanced by this number, or negative value is returned on error.
+ **/
+ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count)
+{
+ loff_t pos = *ppos;
+ size_t res;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available || !count)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ res = copy_from_user(to + pos, from, count);
+ if (res == count)
+ return -EFAULT;
+ count -= res;
+ *ppos = pos + count;
+ return count;
+}
+
+/**
* memory_read_from_buffer - copy data from the buffer
* @to: the kernel space buffer to read to
* @count: the maximum number of bytes to read
@@ -864,6 +898,7 @@ EXPORT_SYMBOL(simple_statfs);
EXPORT_SYMBOL(simple_sync_file);
EXPORT_SYMBOL(simple_unlink);
EXPORT_SYMBOL(simple_read_from_buffer);
+EXPORT_SYMBOL(simple_write_to_buffer);
EXPORT_SYMBOL(memory_read_from_buffer);
EXPORT_SYMBOL(simple_transaction_set);
EXPORT_SYMBOL(simple_transaction_get);
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 243c00071f76..9bd2ce2a3040 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -303,6 +303,11 @@ static void bdev_put_device(struct super_block *sb)
close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE);
}
+static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
+{
+ return 0;
+}
+
static const struct logfs_device_ops bd_devops = {
.find_first_sb = bdev_find_first_sb,
.find_last_sb = bdev_find_last_sb,
@@ -310,6 +315,7 @@ static const struct logfs_device_ops bd_devops = {
.readpage = bdev_readpage,
.writeseg = bdev_writeseg,
.erase = bdev_erase,
+ .can_write_buf = bdev_can_write_buf,
.sync = bdev_sync,
.put_device = bdev_put_device,
};
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index cafb6ef2e05b..a85d47d13e4b 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -9,6 +9,7 @@
#include <linux/completion.h>
#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
@@ -126,7 +127,8 @@ static int mtd_readpage(void *_sb, struct page *page)
err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
page_address(page));
- if (err == -EUCLEAN) {
+ if (err == -EUCLEAN || err == -EBADMSG) {
+ /* -EBADMSG happens regularly on power failures */
err = 0;
/* FIXME: force GC this segment */
}
@@ -233,12 +235,32 @@ static void mtd_put_device(struct super_block *sb)
put_mtd_device(logfs_super(sb)->s_mtd);
}
+static int mtd_can_write_buf(struct super_block *sb, u64 ofs)
+{
+ struct logfs_super *super = logfs_super(sb);
+ void *buf;
+ int err;
+
+ buf = kmalloc(super->s_writesize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = mtd_read(sb, ofs, super->s_writesize, buf);
+ if (err)
+ goto out;
+ if (memchr_inv(buf, 0xff, super->s_writesize))
+ err = -EIO;
+ kfree(buf);
+out:
+ return err;
+}
+
static const struct logfs_device_ops mtd_devops = {
.find_first_sb = mtd_find_first_sb,
.find_last_sb = mtd_find_last_sb,
.readpage = mtd_readpage,
.writeseg = mtd_writeseg,
.erase = mtd_erase,
+ .can_write_buf = mtd_can_write_buf,
.sync = mtd_sync,
.put_device = mtd_put_device,
};
@@ -250,5 +272,7 @@ int logfs_get_sb_mtd(struct file_system_type *type, int flags,
const struct logfs_device_ops *devops = &mtd_devops;
mtd = get_mtd_device(NULL, mtdnr);
+ if (IS_ERR(mtd))
+ return PTR_ERR(mtd);
return logfs_get_sb_device(type, flags, mtd, NULL, devops, mnt);
}
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 2396a85c0f55..72d1893ddd36 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -12,7 +12,7 @@
* Atomic dir operations
*
* Directory operations are by default not atomic. Dentries and Inodes are
- * created/removed/altered in seperate operations. Therefore we need to do
+ * created/removed/altered in separate operations. Therefore we need to do
* a small amount of journaling.
*
* Create, link, mkdir, mknod and symlink all share the same function to do
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 370f367a933e..0de524071870 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -161,7 +161,17 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
static void logfs_invalidatepage(struct page *page, unsigned long offset)
{
- move_page_to_btree(page);
+ struct logfs_block *block = logfs_block(page);
+
+ if (block->reserved_bytes) {
+ struct super_block *sb = page->mapping->host->i_sb;
+ struct logfs_super *super = logfs_super(sb);
+
+ super->s_dirty_pages -= block->reserved_bytes;
+ block->ops->free_block(sb, block);
+ BUG_ON(bitmap_weight(block->alias_map, LOGFS_BLOCK_FACTOR));
+ } else
+ move_page_to_btree(page);
BUG_ON(PagePrivate(page) || page->private);
}
@@ -212,10 +222,8 @@ int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
int logfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
struct super_block *sb = dentry->d_inode->i_sb;
- struct logfs_super *super = logfs_super(sb);
- /* FIXME: write anchor */
- super->s_devops->sync(sb);
+ logfs_write_anchor(sb);
return 0;
}
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c
index 76c242fbe1b0..caa4419285dc 100644
--- a/fs/logfs/gc.c
+++ b/fs/logfs/gc.c
@@ -122,7 +122,7 @@ static void logfs_cleanse_block(struct super_block *sb, u64 ofs, u64 ino,
logfs_safe_iput(inode, cookie);
}
-static u32 logfs_gc_segment(struct super_block *sb, u32 segno, u8 dist)
+static u32 logfs_gc_segment(struct super_block *sb, u32 segno)
{
struct logfs_super *super = logfs_super(sb);
struct logfs_segment_header sh;
@@ -401,7 +401,7 @@ static int __logfs_gc_once(struct super_block *sb, struct gc_candidate *cand)
segno, (u64)segno << super->s_segshift,
dist, no_free_segments(sb), valid,
super->s_free_bytes);
- cleaned = logfs_gc_segment(sb, segno, dist);
+ cleaned = logfs_gc_segment(sb, segno);
log_gc("GC segment #%02x complete - now %x valid\n", segno,
valid - cleaned);
BUG_ON(cleaned != valid);
@@ -632,38 +632,31 @@ static int check_area(struct super_block *sb, int i)
{
struct logfs_super *super = logfs_super(sb);
struct logfs_area *area = super->s_area[i];
- struct logfs_object_header oh;
+ gc_level_t gc_level;
+ u32 cleaned, valid, ec;
u32 segno = area->a_segno;
- u32 ofs = area->a_used_bytes;
- __be32 crc;
- int err;
+ u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
if (!area->a_is_open)
return 0;
- for (ofs = area->a_used_bytes;
- ofs <= super->s_segsize - sizeof(oh);
- ofs += (u32)be16_to_cpu(oh.len) + sizeof(oh)) {
- err = wbuf_read(sb, dev_ofs(sb, segno, ofs), sizeof(oh), &oh);
- if (err)
- return err;
-
- if (!memchr_inv(&oh, 0xff, sizeof(oh)))
- break;
+ if (super->s_devops->can_write_buf(sb, ofs) == 0)
+ return 0;
- crc = logfs_crc32(&oh, sizeof(oh) - 4, 4);
- if (crc != oh.crc) {
- printk(KERN_INFO "interrupted header at %llx\n",
- dev_ofs(sb, segno, ofs));
- return 0;
- }
- }
- if (ofs != area->a_used_bytes) {
- printk(KERN_INFO "%x bytes unaccounted data found at %llx\n",
- ofs - area->a_used_bytes,
- dev_ofs(sb, segno, area->a_used_bytes));
- area->a_used_bytes = ofs;
- }
+ printk(KERN_INFO"LogFS: Possibly incomplete write at %llx\n", ofs);
+ /*
+ * The device cannot write back the write buffer. Most likely the
+ * wbuf was already written out and the system crashed at some point
+ * before the journal commit happened. In that case we wouldn't have
+ * to do anything. But if the crash happened before the wbuf was
+ * written out correctly, we must GC this segment. So assume the
+ * worst and always do the GC run.
+ */
+ area->a_is_open = 0;
+ valid = logfs_valid_bytes(sb, segno, &ec, &gc_level);
+ cleaned = logfs_gc_segment(sb, segno);
+ if (cleaned != valid)
+ return -EIO;
return 0;
}
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index 14ed27274da2..755a92e8daa7 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -193,6 +193,7 @@ static void logfs_init_inode(struct super_block *sb, struct inode *inode)
inode->i_ctime = CURRENT_TIME;
inode->i_mtime = CURRENT_TIME;
inode->i_nlink = 1;
+ li->li_refcount = 1;
INIT_LIST_HEAD(&li->li_freeing_list);
for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
@@ -326,7 +327,7 @@ static void logfs_set_ino_generation(struct super_block *sb,
u64 ino;
mutex_lock(&super->s_journal_mutex);
- ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino);
+ ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino + 1);
super->s_last_ino = ino;
super->s_inos_till_wrap--;
if (super->s_inos_till_wrap < 0) {
@@ -386,8 +387,7 @@ static void logfs_init_once(void *_li)
static int logfs_sync_fs(struct super_block *sb, int wait)
{
- /* FIXME: write anchor */
- logfs_super(sb)->s_devops->sync(sb);
+ logfs_write_anchor(sb);
return 0;
}
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index fb0a613f885b..4b0e0616b357 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -132,10 +132,9 @@ static int read_area(struct super_block *sb, struct logfs_je_area *a)
ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
if (super->s_writesize > 1)
- logfs_buf_recover(area, ofs, a + 1, super->s_writesize);
+ return logfs_buf_recover(area, ofs, a + 1, super->s_writesize);
else
- logfs_buf_recover(area, ofs, NULL, 0);
- return 0;
+ return logfs_buf_recover(area, ofs, NULL, 0);
}
static void *unpack(void *from, void *to)
@@ -245,7 +244,7 @@ static int read_je(struct super_block *sb, u64 ofs)
read_erasecount(sb, unpack(jh, scratch));
break;
case JE_AREA:
- read_area(sb, unpack(jh, scratch));
+ err = read_area(sb, unpack(jh, scratch));
break;
case JE_OBJ_ALIAS:
err = logfs_load_object_aliases(sb, unpack(jh, scratch),
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 0a3df1a0c936..1a9db84f8d8f 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -144,6 +144,7 @@ struct logfs_area_ops {
* @erase: erase one segment
* @read: read from the device
* @erase: erase part of the device
+ * @can_write_buf: decide whether wbuf can be written to ofs
*/
struct logfs_device_ops {
struct page *(*find_first_sb)(struct super_block *sb, u64 *ofs);
@@ -153,6 +154,7 @@ struct logfs_device_ops {
void (*writeseg)(struct super_block *sb, u64 ofs, size_t len);
int (*erase)(struct super_block *sb, loff_t ofs, size_t len,
int ensure_write);
+ int (*can_write_buf)(struct super_block *sb, u64 ofs);
void (*sync)(struct super_block *sb);
void (*put_device)(struct super_block *sb);
};
@@ -394,6 +396,7 @@ struct logfs_super {
int s_lock_count;
mempool_t *s_block_pool; /* struct logfs_block pool */
mempool_t *s_shadow_pool; /* struct logfs_shadow pool */
+ struct list_head s_writeback_list; /* writeback pages */
/*
* Space accounting:
* - s_used_bytes specifies space used to store valid data objects.
@@ -598,19 +601,19 @@ void freeseg(struct super_block *sb, u32 segno);
int logfs_init_areas(struct super_block *sb);
void logfs_cleanup_areas(struct super_block *sb);
int logfs_open_area(struct logfs_area *area, size_t bytes);
-void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
+int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
int use_filler);
-static inline void logfs_buf_write(struct logfs_area *area, u64 ofs,
+static inline int logfs_buf_write(struct logfs_area *area, u64 ofs,
void *buf, size_t len)
{
- __logfs_buf_write(area, ofs, buf, len, 0);
+ return __logfs_buf_write(area, ofs, buf, len, 0);
}
-static inline void logfs_buf_recover(struct logfs_area *area, u64 ofs,
+static inline int logfs_buf_recover(struct logfs_area *area, u64 ofs,
void *buf, size_t len)
{
- __logfs_buf_write(area, ofs, buf, len, 1);
+ return __logfs_buf_write(area, ofs, buf, len, 1);
}
/* super.c */
@@ -704,7 +707,7 @@ static inline gc_level_t expand_level(u64 ino, level_t __level)
u8 level = (__force u8)__level;
if (ino == LOGFS_INO_MASTER) {
- /* ifile has seperate areas */
+ /* ifile has separate areas */
level += LOGFS_MAX_LEVELS;
}
return (__force gc_level_t)level;
diff --git a/fs/logfs/logfs_abi.h b/fs/logfs/logfs_abi.h
index f674725663fe..ae960519c54a 100644
--- a/fs/logfs/logfs_abi.h
+++ b/fs/logfs/logfs_abi.h
@@ -50,9 +50,9 @@ static inline void check_##type(void) \
* 12 - gc recycled blocks, long-lived data
* 13 - replacement blocks, short-lived data
*
- * Levels 1-11 are necessary for robust gc operations and help seperate
+ * Levels 1-11 are necessary for robust gc operations and help separate
* short-lived metadata from longer-lived file data. In the future,
- * file data should get seperated into several segments based on simple
+ * file data should get separated into several segments based on simple
* heuristics. Old data recycled during gc operation is expected to be
* long-lived. New data is of uncertain life expectancy. New data
* used to replace older blocks in existing files is expected to be
@@ -117,7 +117,7 @@ static inline void check_##type(void) \
#define pure_ofs(ofs) (ofs & ~LOGFS_FULLY_POPULATED)
/*
- * LogFS needs to seperate data into levels. Each level is defined as the
+ * LogFS needs to separate data into levels. Each level is defined as the
* maximal possible distance from the master inode (inode of the inode file).
* Data blocks reside on level 0, 1x indirect block on level 1, etc.
* Inodes reside on level 6, indirect blocks for the inode file on levels 7-11.
@@ -204,7 +204,7 @@ SIZE_CHECK(logfs_segment_header, LOGFS_SEGMENT_HEADERSIZE);
* @ds_crc: crc32 of structure starting with the next field
* @ds_ifile_levels: maximum number of levels for ifile
* @ds_iblock_levels: maximum number of levels for regular files
- * @ds_data_levels: number of seperate levels for data
+ * @ds_data_levels: number of separate levels for data
* @pad0: reserved, must be 0
* @ds_feature_incompat: incompatible filesystem features
* @ds_feature_ro_compat: read-only compatible filesystem features
@@ -456,7 +456,7 @@ enum logfs_vim {
* @vim: life expectancy of data
*
* "Areas" are segments currently being used for writing. There is at least
- * one area per GC level. Several may be used to seperate long-living from
+ * one area per GC level. Several may be used to separate long-living from
* short-living data. If an area with unknown vim is encountered, it can
* simply be closed.
* The write buffer immediately follow this header.
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 3159db6958e5..0718d112a1a5 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -892,6 +892,8 @@ u64 logfs_seek_hole(struct inode *inode, u64 bix)
return bix;
else if (li->li_data[INDIRECT_INDEX] & LOGFS_FULLY_POPULATED)
bix = maxbix(li->li_height);
+ else if (bix >= maxbix(li->li_height))
+ return bix;
else {
bix = seek_holedata_loop(inode, bix, 0);
if (bix < maxbix(li->li_height))
@@ -1093,17 +1095,25 @@ static int logfs_reserve_bytes(struct inode *inode, int bytes)
int get_page_reserve(struct inode *inode, struct page *page)
{
struct logfs_super *super = logfs_super(inode->i_sb);
+ struct logfs_block *block = logfs_block(page);
int ret;
- if (logfs_block(page) && logfs_block(page)->reserved_bytes)
+ if (block && block->reserved_bytes)
return 0;
logfs_get_wblocks(inode->i_sb, page, WF_LOCK);
- ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE);
+ while ((ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE)) &&
+ !list_empty(&super->s_writeback_list)) {
+ block = list_entry(super->s_writeback_list.next,
+ struct logfs_block, alias_list);
+ block->ops->write_block(block);
+ }
if (!ret) {
alloc_data_block(inode, page);
- logfs_block(page)->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE;
+ block = logfs_block(page);
+ block->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE;
super->s_dirty_pages += 6 * LOGFS_MAX_OBJECTSIZE;
+ list_move_tail(&block->alias_list, &super->s_writeback_list);
}
logfs_put_wblocks(inode->i_sb, page, WF_LOCK);
return ret;
@@ -1861,7 +1871,7 @@ int logfs_truncate(struct inode *inode, u64 target)
size = target;
logfs_get_wblocks(sb, NULL, 1);
- err = __logfs_truncate(inode, target);
+ err = __logfs_truncate(inode, size);
if (!err)
err = __logfs_write_inode(inode, 0);
logfs_put_wblocks(sb, NULL, 1);
@@ -2249,6 +2259,7 @@ int logfs_init_rw(struct super_block *sb)
int min_fill = 3 * super->s_no_blocks;
INIT_LIST_HEAD(&super->s_object_alias);
+ INIT_LIST_HEAD(&super->s_writeback_list);
mutex_init(&super->s_write_mutex);
super->s_block_pool = mempool_create_kmalloc_pool(min_fill,
sizeof(struct logfs_block));
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index f77ce2b470ba..a9657afb70ad 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -67,7 +67,7 @@ static struct page *get_mapping_page(struct super_block *sb, pgoff_t index,
return page;
}
-void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
+int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
int use_filler)
{
pgoff_t index = ofs >> PAGE_SHIFT;
@@ -81,8 +81,10 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
copylen = min((ulong)len, PAGE_SIZE - offset);
page = get_mapping_page(area->a_sb, index, use_filler);
- SetPageUptodate(page);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
BUG_ON(!page); /* FIXME: reserve a pool */
+ SetPageUptodate(page);
memcpy(page_address(page) + offset, buf, copylen);
SetPagePrivate(page);
page_cache_release(page);
@@ -92,6 +94,7 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
offset = 0;
index++;
} while (len);
+ return 0;
}
static void pad_partial_page(struct logfs_area *area)
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 5866ee6e1327..2b2693851969 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -138,10 +138,14 @@ static int logfs_sb_set(struct super_block *sb, void *_super)
sb->s_fs_info = super;
sb->s_mtd = super->s_mtd;
sb->s_bdev = super->s_bdev;
+#ifdef CONFIG_BLOCK
if (sb->s_bdev)
sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+#endif
+#ifdef CONFIG_MTD
if (sb->s_mtd)
sb->s_bdi = sb->s_mtd->backing_dev_info;
+#endif
return 0;
}
@@ -382,7 +386,7 @@ static struct page *find_super_block(struct super_block *sb)
if (!first || IS_ERR(first))
return NULL;
last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
- if (!last || IS_ERR(first)) {
+ if (!last || IS_ERR(last)) {
page_cache_release(first);
return NULL;
}
@@ -413,7 +417,7 @@ static int __logfs_read_sb(struct super_block *sb)
page = find_super_block(sb);
if (!page)
- return -EIO;
+ return -EINVAL;
ds = page_address(page);
super->s_size = be64_to_cpu(ds->ds_filesystem_size);
diff --git a/fs/namei.c b/fs/namei.c
index a7dce91a7e42..f068192e314d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -699,7 +699,7 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
struct path *path)
{
struct vfsmount *mnt = nd->path.mnt;
- struct dentry *dentry, *parent;
+ struct dentry *dentry, *parent, *new;
struct inode *dir;
/*
* See if the low-level filesystem might want
@@ -742,40 +742,41 @@ need_lookup:
* so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
*/
dentry = d_lookup(parent, name);
- if (!dentry) {
- struct dentry *new;
-
- /* Don't create child dentry for a dead directory. */
- dentry = ERR_PTR(-ENOENT);
- if (IS_DEADDIR(dir))
+ if (dentry) {
+ /*
+ * The cache was re-populated while we waited on the
+ * mutex. We need to revalidate, this time while
+ * holding i_mutex (to avoid another race).
+ */
+ if (dentry->d_op && dentry->d_op->d_revalidate) {
+ dentry = do_revalidate(dentry, nd);
+ if (dentry)
+ goto out_unlock;
+ /*
+ * The dentry was left behind invalid. Just
+ * do the lookup.
+ */
+ } else {
goto out_unlock;
-
- new = d_alloc(parent, name);
- dentry = ERR_PTR(-ENOMEM);
- if (new) {
- dentry = dir->i_op->lookup(dir, new, nd);
- if (dentry)
- dput(new);
- else
- dentry = new;
- }
-out_unlock:
- mutex_unlock(&dir->i_mutex);
- if (IS_ERR(dentry))
- goto fail;
- goto done;
+ }
}
- /*
- * Uhhuh! Nasty case: the cache was re-populated while
- * we waited on the semaphore. Need to revalidate.
- */
- mutex_unlock(&dir->i_mutex);
- if (dentry->d_op && dentry->d_op->d_revalidate) {
- dentry = do_revalidate(dentry, nd);
- if (!dentry)
- dentry = ERR_PTR(-ENOENT);
+ /* Don't create child dentry for a dead directory. */
+ dentry = ERR_PTR(-ENOENT);
+ if (IS_DEADDIR(dir))
+ goto out_unlock;
+
+ new = d_alloc(parent, name);
+ dentry = ERR_PTR(-ENOMEM);
+ if (new) {
+ dentry = dir->i_op->lookup(dir, new, nd);
+ if (dentry)
+ dput(new);
+ else
+ dentry = new;
}
+out_unlock:
+ mutex_unlock(&dir->i_mutex);
if (IS_ERR(dentry))
goto fail;
goto done;
@@ -2627,7 +2628,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
{
int error;
int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
- const char *old_name;
+ const unsigned char *old_name;
if (old_dentry->d_inode == new_dentry->d_inode)
return 0;
diff --git a/fs/namespace.c b/fs/namespace.c
index 8174c8ab5c70..0602ad7fc479 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -29,6 +29,7 @@
#include <linux/log2.h>
#include <linux/idr.h>
#include <linux/fs_struct.h>
+#include <linux/fsnotify.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
@@ -150,6 +151,9 @@ struct vfsmount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_share);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
+#ifdef CONFIG_FSNOTIFY
+ INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
+#endif
#ifdef CONFIG_SMP
mnt->mnt_writers = alloc_percpu(int);
if (!mnt->mnt_writers)
@@ -610,6 +614,7 @@ static inline void __mntput(struct vfsmount *mnt)
* provides barriers, so count_mnt_writers() below is safe. AV
*/
WARN_ON(count_mnt_writers(mnt));
+ fsnotify_vfsmount_delete(mnt);
dput(mnt->mnt_root);
free_vfsmnt(mnt);
deactivate_super(sb);
@@ -628,7 +633,6 @@ repeat:
mnt->mnt_pinned = 0;
spin_unlock(&vfsmount_lock);
acct_auto_close_mnt(mnt);
- security_sb_umount_close(mnt);
goto repeat;
}
}
@@ -1117,8 +1121,6 @@ static int do_umount(struct vfsmount *mnt, int flags)
retval = 0;
}
spin_unlock(&vfsmount_lock);
- if (retval)
- security_sb_umount_busy(mnt);
up_write(&namespace_sem);
release_mounts(&umount_list);
return retval;
@@ -1435,17 +1437,10 @@ static int graft_tree(struct vfsmount *mnt, struct path *path)
if (IS_DEADDIR(path->dentry->d_inode))
goto out_unlock;
- err = security_sb_check_sb(mnt, path);
- if (err)
- goto out_unlock;
-
- err = -ENOENT;
if (!d_unlinked(path->dentry))
err = attach_recursive_mnt(mnt, path, NULL);
out_unlock:
mutex_unlock(&path->dentry->d_inode->i_mutex);
- if (!err)
- security_sb_post_addmount(mnt, path);
return err;
}
@@ -1581,8 +1576,6 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
}
up_write(&sb->s_umount);
if (!err) {
- security_sb_post_remount(path->mnt, flags, data);
-
spin_lock(&vfsmount_lock);
touch_mnt_namespace(path->mnt->mnt_ns);
spin_unlock(&vfsmount_lock);
@@ -2277,7 +2270,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
touch_mnt_namespace(current->nsproxy->mnt_ns);
spin_unlock(&vfsmount_lock);
chroot_fs_refs(&root, &new);
- security_sb_post_pivotroot(&root, &new);
error = 0;
path_put(&root_parent);
path_put(&parent_path);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 36dfdae95123..e17b49e2eabd 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -45,7 +45,7 @@ unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
#define NFS_CALLBACK_MAXPORTNR (65535U)
-static int param_set_portnr(const char *val, struct kernel_param *kp)
+static int param_set_portnr(const char *val, const struct kernel_param *kp)
{
unsigned long num;
int ret;
@@ -58,11 +58,10 @@ static int param_set_portnr(const char *val, struct kernel_param *kp)
*((unsigned int *)kp->arg) = num;
return 0;
}
-
-static int param_get_portnr(char *buffer, struct kernel_param *kp)
-{
- return param_get_uint(buffer, kp);
-}
+static struct kernel_param_ops param_ops_portnr = {
+ .set = param_set_portnr,
+ .get = param_get_uint,
+};
#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index acc9c4943b84..7ec9b34a59f8 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -934,7 +934,6 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
}
fsinfo.fattr = fattr;
- nfs_fattr_init(fattr);
error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo);
if (error < 0)
goto out_error;
@@ -1047,13 +1046,18 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
struct nfs_fh *mntfh)
{
struct nfs_server *server;
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
int error;
server = nfs_alloc_server();
if (!server)
return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto error;
+
/* Get a client representation */
error = nfs_init_server(server, data);
if (error < 0)
@@ -1064,7 +1068,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
/* Probe the root fh to retrieve its FSID */
- error = nfs_probe_fsinfo(server, mntfh, &fattr);
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
if (error < 0)
goto error;
if (server->nfs_client->rpc_ops->version == 3) {
@@ -1077,14 +1081,14 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
server->namelen = NFS2_MAXNAMLEN;
}
- if (!(fattr.valid & NFS_ATTR_FATTR)) {
- error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
+ if (!(fattr->valid & NFS_ATTR_FATTR)) {
+ error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
if (error < 0) {
dprintk("nfs_create_server: getattr error = %d\n", -error);
goto error;
}
}
- memcpy(&server->fsid, &fattr.fsid, sizeof(server->fsid));
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
dprintk("Server FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
@@ -1096,9 +1100,11 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
spin_unlock(&nfs_client_lock);
server->mount_time = jiffies;
+ nfs_free_fattr(fattr);
return server;
error:
+ nfs_free_fattr(fattr);
nfs_free_server(server);
return ERR_PTR(error);
}
@@ -1340,7 +1346,7 @@ error:
struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
struct nfs_fh *mntfh)
{
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
struct nfs_server *server;
int error;
@@ -1350,6 +1356,11 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
if (!server)
return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto error;
+
/* set up the general RPC client */
error = nfs4_init_server(server, data);
if (error < 0)
@@ -1364,7 +1375,7 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
goto error;
/* Probe the root fh to retrieve its FSID */
- error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
+ error = nfs4_get_rootfh(server, mntfh);
if (error < 0)
goto error;
@@ -1375,7 +1386,7 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
nfs4_session_set_rwsize(server);
- error = nfs_probe_fsinfo(server, mntfh, &fattr);
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
if (error < 0)
goto error;
@@ -1389,9 +1400,11 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
server->mount_time = jiffies;
dprintk("<-- nfs4_create_server() = %p\n", server);
+ nfs_free_fattr(fattr);
return server;
error:
+ nfs_free_fattr(fattr);
nfs_free_server(server);
dprintk("<-- nfs4_create_server() = error %d\n", error);
return ERR_PTR(error);
@@ -1405,7 +1418,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
{
struct nfs_client *parent_client;
struct nfs_server *server, *parent_server;
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
int error;
dprintk("--> nfs4_create_referral_server()\n");
@@ -1414,6 +1427,11 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (!server)
return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto error;
+
parent_server = NFS_SB(data->sb);
parent_client = parent_server->nfs_client;
@@ -1443,12 +1461,12 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
/* Probe the root fh to retrieve its FSID and filehandle */
- error = nfs4_path_walk(server, mntfh, data->mnt_path);
+ error = nfs4_get_rootfh(server, mntfh);
if (error < 0)
goto error;
/* probe the filesystem info for this server filesystem */
- error = nfs_probe_fsinfo(server, mntfh, &fattr);
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
if (error < 0)
goto error;
@@ -1466,10 +1484,12 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
server->mount_time = jiffies;
+ nfs_free_fattr(fattr);
dprintk("<-- nfs_create_referral_server() = %p\n", server);
return server;
error:
+ nfs_free_fattr(fattr);
nfs_free_server(server);
dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
return ERR_PTR(error);
@@ -1485,7 +1505,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
struct nfs_fattr *fattr)
{
struct nfs_server *server;
- struct nfs_fattr fattr_fsinfo;
+ struct nfs_fattr *fattr_fsinfo;
int error;
dprintk("--> nfs_clone_server(,%llx:%llx,)\n",
@@ -1496,6 +1516,11 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
if (!server)
return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ fattr_fsinfo = nfs_alloc_fattr();
+ if (fattr_fsinfo == NULL)
+ goto out_free_server;
+
/* Copy data from the source */
server->nfs_client = source->nfs_client;
atomic_inc(&server->nfs_client->cl_count);
@@ -1512,7 +1537,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
nfs_init_server_aclclient(server);
/* probe the filesystem info for this server filesystem */
- error = nfs_probe_fsinfo(server, fh, &fattr_fsinfo);
+ error = nfs_probe_fsinfo(server, fh, fattr_fsinfo);
if (error < 0)
goto out_free_server;
@@ -1534,10 +1559,12 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
server->mount_time = jiffies;
+ nfs_free_fattr(fattr_fsinfo);
dprintk("<-- nfs_clone_server() = %p\n", server);
return server;
out_free_server:
+ nfs_free_fattr(fattr_fsinfo);
nfs_free_server(server);
dprintk("<-- nfs_clone_server() = error %d\n", error);
return ERR_PTR(error);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index a7bb5c694aa3..18da8abbece8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -530,9 +530,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
struct nfs_entry my_entry;
- struct nfs_fh fh;
- struct nfs_fattr fattr;
- long res;
+ int res = -ENOMEM;
dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -554,9 +552,11 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
my_entry.cookie = my_entry.prev_cookie = 0;
my_entry.eof = 0;
- my_entry.fh = &fh;
- my_entry.fattr = &fattr;
- nfs_fattr_init(&fattr);
+ my_entry.fh = nfs_alloc_fhandle();
+ my_entry.fattr = nfs_alloc_fattr();
+ if (my_entry.fh == NULL || my_entry.fattr == NULL)
+ goto out_alloc_failed;
+
desc->entry = &my_entry;
nfs_block_sillyrename(dentry);
@@ -598,7 +598,10 @@ out:
nfs_unblock_sillyrename(dentry);
if (res > 0)
res = 0;
- dfprintk(FILE, "NFS: readdir(%s/%s) returns %ld\n",
+out_alloc_failed:
+ nfs_free_fattr(my_entry.fattr);
+ nfs_free_fhandle(my_entry.fh);
+ dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
res);
return res;
@@ -776,9 +779,9 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
struct inode *dir;
struct inode *inode;
struct dentry *parent;
+ struct nfs_fh *fhandle = NULL;
+ struct nfs_fattr *fattr = NULL;
int error;
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
parent = dget_parent(dentry);
dir = parent->d_inode;
@@ -811,14 +814,22 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
if (NFS_STALE(inode))
goto out_bad;
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
+ error = -ENOMEM;
+ fhandle = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fhandle == NULL || fattr == NULL)
+ goto out_error;
+
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_bad;
- if (nfs_compare_fh(NFS_FH(inode), &fhandle))
+ if (nfs_compare_fh(NFS_FH(inode), fhandle))
goto out_bad;
- if ((error = nfs_refresh_inode(inode, &fattr)) != 0)
+ if ((error = nfs_refresh_inode(inode, fattr)) != 0)
goto out_bad;
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
out_set_verifier:
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
out_valid:
@@ -842,11 +853,21 @@ out_zap_parent:
shrink_dcache_parent(dentry);
}
d_drop(dentry);
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
dput(parent);
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
__func__, dentry->d_parent->d_name.name,
dentry->d_name.name);
return 0;
+out_error:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+ dput(parent);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) lookup returned error %d\n",
+ __func__, dentry->d_parent->d_name.name,
+ dentry->d_name.name, error);
+ return error;
}
/*
@@ -911,9 +932,9 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
struct dentry *res;
struct dentry *parent;
struct inode *inode = NULL;
+ struct nfs_fh *fhandle = NULL;
+ struct nfs_fattr *fattr = NULL;
int error;
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
dfprintk(VFS, "NFS: lookup(%s/%s)\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
@@ -923,7 +944,6 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
goto out;
- res = ERR_PTR(-ENOMEM);
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
/*
@@ -936,17 +956,23 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
goto out;
}
+ res = ERR_PTR(-ENOMEM);
+ fhandle = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fhandle == NULL || fattr == NULL)
+ goto out;
+
parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
nfs_block_sillyrename(parent);
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error == -ENOENT)
goto no_entry;
if (error < 0) {
res = ERR_PTR(error);
goto out_unblock_sillyrename;
}
- inode = nfs_fhget(dentry->d_sb, &fhandle, &fattr);
+ inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
res = (struct dentry *)inode;
if (IS_ERR(res))
goto out_unblock_sillyrename;
@@ -962,6 +988,8 @@ no_entry:
out_unblock_sillyrename:
nfs_unblock_sillyrename(parent);
out:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
return res;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 8d965bddb87e..cac96bcc91e4 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -161,14 +161,17 @@ static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
- if (server->flags & NFS_MOUNT_NOAC)
- goto force_reval;
+ if (nfs_have_delegated_attributes(inode))
+ goto out_noreval;
+
if (filp->f_flags & O_DIRECT)
goto force_reval;
- if (nfsi->npages != 0)
- return 0;
- if (!(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode))
- return 0;
+ if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ goto force_reval;
+ if (nfs_attribute_timeout(inode))
+ goto force_reval;
+out_noreval:
+ return 0;
force_reval:
return __nfs_revalidate_inode(server, inode);
}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index b35d2a616066..7428f7d6273b 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -78,159 +78,94 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh)
{
struct nfs_server *server = NFS_SB(sb);
struct nfs_fsinfo fsinfo;
- struct nfs_fattr fattr;
- struct dentry *mntroot;
+ struct dentry *ret;
struct inode *inode;
int error;
/* get the actual root for this mount */
- fsinfo.fattr = &fattr;
+ fsinfo.fattr = nfs_alloc_fattr();
+ if (fsinfo.fattr == NULL)
+ return ERR_PTR(-ENOMEM);
error = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
if (error < 0) {
dprintk("nfs_get_root: getattr error = %d\n", -error);
- return ERR_PTR(error);
+ ret = ERR_PTR(error);
+ goto out;
}
inode = nfs_fhget(sb, mntfh, fsinfo.fattr);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
- return ERR_CAST(inode);
+ ret = ERR_CAST(inode);
+ goto out;
}
error = nfs_superblock_set_dummy_root(sb, inode);
- if (error != 0)
- return ERR_PTR(error);
+ if (error != 0) {
+ ret = ERR_PTR(error);
+ goto out;
+ }
/* root dentries normally start off anonymous and get spliced in later
* if the dentry tree reaches them; however if the dentry already
* exists, we'll pick it up at this point and use it as the root
*/
- mntroot = d_obtain_alias(inode);
- if (IS_ERR(mntroot)) {
+ ret = d_obtain_alias(inode);
+ if (IS_ERR(ret)) {
dprintk("nfs_get_root: get root dentry failed\n");
- return mntroot;
+ goto out;
}
- security_d_instantiate(mntroot, inode);
-
- if (!mntroot->d_op)
- mntroot->d_op = server->nfs_client->rpc_ops->dentry_ops;
+ security_d_instantiate(ret, inode);
- return mntroot;
+ if (ret->d_op == NULL)
+ ret->d_op = server->nfs_client->rpc_ops->dentry_ops;
+out:
+ nfs_free_fattr(fsinfo.fattr);
+ return ret;
}
#ifdef CONFIG_NFS_V4
-/*
- * Do a simple pathwalk from the root FH of the server to the nominated target
- * of the mountpoint
- * - give error on symlinks
- * - give error on ".." occurring in the path
- * - follow traversals
- */
-int nfs4_path_walk(struct nfs_server *server,
- struct nfs_fh *mntfh,
- const char *path)
+int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
{
struct nfs_fsinfo fsinfo;
- struct nfs_fattr fattr;
- struct nfs_fh lastfh;
- struct qstr name;
- int ret;
+ int ret = -ENOMEM;
- dprintk("--> nfs4_path_walk(,,%s)\n", path);
+ dprintk("--> nfs4_get_rootfh()\n");
- fsinfo.fattr = &fattr;
- nfs_fattr_init(&fattr);
-
- /* Eat leading slashes */
- while (*path == '/')
- path++;
+ fsinfo.fattr = nfs_alloc_fattr();
+ if (fsinfo.fattr == NULL)
+ goto out;
/* Start by getting the root filehandle from the server */
ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
if (ret < 0) {
- dprintk("nfs4_get_root: getroot error = %d\n", -ret);
- return ret;
+ dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
+ goto out;
}
- if (!S_ISDIR(fattr.mode)) {
- printk(KERN_ERR "nfs4_get_root:"
+ if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_MODE)
+ || !S_ISDIR(fsinfo.fattr->mode)) {
+ printk(KERN_ERR "nfs4_get_rootfh:"
" getroot encountered non-directory\n");
- return -ENOTDIR;
+ ret = -ENOTDIR;
+ goto out;
}
- /* FIXME: It is quite valid for the server to return a referral here */
- if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) {
- printk(KERN_ERR "nfs4_get_root:"
+ if (fsinfo.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
+ printk(KERN_ERR "nfs4_get_rootfh:"
" getroot obtained referral\n");
- return -EREMOTE;
- }
-
-next_component:
- dprintk("Next: %s\n", path);
-
- /* extract the next bit of the path */
- if (!*path)
- goto path_walk_complete;
-
- name.name = path;
- while (*path && *path != '/')
- path++;
- name.len = path - (const char *) name.name;
-
- if (name.len > NFS4_MAXNAMLEN)
- return -ENAMETOOLONG;
-
-eat_dot_dir:
- while (*path == '/')
- path++;
-
- if (path[0] == '.' && (path[1] == '/' || !path[1])) {
- path += 2;
- goto eat_dot_dir;
- }
-
- /* FIXME: Why shouldn't the user be able to use ".." in the path? */
- if (path[0] == '.' && path[1] == '.' && (path[2] == '/' || !path[2])
- ) {
- printk(KERN_ERR "nfs4_get_root:"
- " Mount path contains reference to \"..\"\n");
- return -EINVAL;
+ ret = -EREMOTE;
+ goto out;
}
- /* lookup the next FH in the sequence */
- memcpy(&lastfh, mntfh, sizeof(lastfh));
-
- dprintk("LookupFH: %*.*s [%s]\n", name.len, name.len, name.name, path);
-
- ret = server->nfs_client->rpc_ops->lookupfh(server, &lastfh, &name,
- mntfh, &fattr);
- if (ret < 0) {
- dprintk("nfs4_get_root: getroot error = %d\n", -ret);
- return ret;
- }
-
- if (!S_ISDIR(fattr.mode)) {
- printk(KERN_ERR "nfs4_get_root:"
- " lookupfh encountered non-directory\n");
- return -ENOTDIR;
- }
-
- /* FIXME: Referrals are quite valid here too */
- if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) {
- printk(KERN_ERR "nfs4_get_root:"
- " lookupfh obtained referral\n");
- return -EREMOTE;
- }
-
- goto next_component;
-
-path_walk_complete:
- memcpy(&server->fsid, &fattr.fsid, sizeof(server->fsid));
- dprintk("<-- nfs4_path_walk() = 0\n");
- return 0;
+ memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
+out:
+ nfs_free_fattr(fsinfo.fattr);
+ dprintk("<-- nfs4_get_rootfh() = %d\n", ret);
+ return ret;
}
/*
@@ -239,8 +174,8 @@ path_walk_complete:
struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
{
struct nfs_server *server = NFS_SB(sb);
- struct nfs_fattr fattr;
- struct dentry *mntroot;
+ struct nfs_fattr *fattr = NULL;
+ struct dentry *ret;
struct inode *inode;
int error;
@@ -254,40 +189,50 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
return ERR_PTR(error);
}
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ return ERR_PTR(-ENOMEM);;
+
/* get the actual root for this mount */
- error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
+ error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
if (error < 0) {
dprintk("nfs_get_root: getattr error = %d\n", -error);
- return ERR_PTR(error);
+ ret = ERR_PTR(error);
+ goto out;
}
- inode = nfs_fhget(sb, mntfh, &fattr);
+ inode = nfs_fhget(sb, mntfh, fattr);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
- return ERR_CAST(inode);
+ ret = ERR_CAST(inode);
+ goto out;
}
error = nfs_superblock_set_dummy_root(sb, inode);
- if (error != 0)
- return ERR_PTR(error);
+ if (error != 0) {
+ ret = ERR_PTR(error);
+ goto out;
+ }
/* root dentries normally start off anonymous and get spliced in later
* if the dentry tree reaches them; however if the dentry already
* exists, we'll pick it up at this point and use it as the root
*/
- mntroot = d_obtain_alias(inode);
- if (IS_ERR(mntroot)) {
+ ret = d_obtain_alias(inode);
+ if (IS_ERR(ret)) {
dprintk("nfs_get_root: get root dentry failed\n");
- return mntroot;
+ goto out;
}
- security_d_instantiate(mntroot, inode);
+ security_d_instantiate(ret, inode);
- if (!mntroot->d_op)
- mntroot->d_op = server->nfs_client->rpc_ops->dentry_ops;
+ if (ret->d_op == NULL)
+ ret->d_op = server->nfs_client->rpc_ops->dentry_ops;
+out:
+ nfs_free_fattr(fattr);
dprintk("<-- nfs4_get_root()\n");
- return mntroot;
+ return ret;
}
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 50a56edca0b5..099b3518feea 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -393,8 +393,8 @@ int
nfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
- struct nfs_fattr fattr;
- int error;
+ struct nfs_fattr *fattr;
+ int error = -ENOMEM;
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
@@ -417,14 +417,20 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
filemap_write_and_wait(inode->i_mapping);
nfs_wb_all(inode);
}
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
/*
* Return any delegations if we're going to change ACLs
*/
if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
nfs_inode_return_delegation(inode);
- error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
+ error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
if (error == 0)
- nfs_refresh_inode(inode, &fattr);
+ nfs_refresh_inode(inode, fattr);
+ nfs_free_fattr(fattr);
+out:
return error;
}
@@ -682,7 +688,7 @@ int
__nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
int status = -ESTALE;
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr = NULL;
struct nfs_inode *nfsi = NFS_I(inode);
dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n",
@@ -693,8 +699,13 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
if (NFS_STALE(inode))
goto out;
+ status = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
+
nfs_inc_stats(inode, NFSIOS_INODEREVALIDATE);
- status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), &fattr);
+ status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr);
if (status != 0) {
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) getattr failed, error=%d\n",
inode->i_sb->s_id,
@@ -707,7 +718,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
goto out;
}
- status = nfs_refresh_inode(inode, &fattr);
+ status = nfs_refresh_inode(inode, fattr);
if (status) {
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
inode->i_sb->s_id,
@@ -723,6 +734,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
(long long)NFS_FILEID(inode));
out:
+ nfs_free_fattr(fattr);
return status;
}
@@ -730,9 +742,14 @@ int nfs_attribute_timeout(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
+}
+
+static int nfs_attribute_cache_expired(struct inode *inode)
+{
if (nfs_have_delegated_attributes(inode))
return 0;
- return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
+ return nfs_attribute_timeout(inode);
}
/**
@@ -745,7 +762,7 @@ int nfs_attribute_timeout(struct inode *inode)
int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
if (!(NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATTR)
- && !nfs_attribute_timeout(inode))
+ && !nfs_attribute_cache_expired(inode))
return NFS_STALE(inode) ? -ESTALE : 0;
return __nfs_revalidate_inode(server, inode);
}
@@ -782,7 +799,8 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
int ret = 0;
if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
- || nfs_attribute_timeout(inode) || NFS_STALE(inode)) {
+ || nfs_attribute_cache_expired(inode)
+ || NFS_STALE(inode)) {
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (ret < 0)
goto out;
@@ -916,6 +934,26 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
fattr->gencount = nfs_inc_attr_generation_counter();
}
+struct nfs_fattr *nfs_alloc_fattr(void)
+{
+ struct nfs_fattr *fattr;
+
+ fattr = kmalloc(sizeof(*fattr), GFP_NOFS);
+ if (fattr != NULL)
+ nfs_fattr_init(fattr);
+ return fattr;
+}
+
+struct nfs_fh *nfs_alloc_fhandle(void)
+{
+ struct nfs_fh *fh;
+
+ fh = kmalloc(sizeof(struct nfs_fh), GFP_NOFS);
+ if (fh != NULL)
+ fh->size = 0;
+ return fh;
+}
+
/**
* nfs_inode_attrs_need_update - check if the inode attributes need updating
* @inode - pointer to inode
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 11f82f03c5de..d8bd619e386c 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,9 +244,7 @@ extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *);
#ifdef CONFIG_NFS_V4
extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *);
-extern int nfs4_path_walk(struct nfs_server *server,
- struct nfs_fh *mntfh,
- const char *path);
+extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
#endif
/* read.c */
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 7888cf36022d..db6aa3673cf3 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -105,8 +105,8 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
struct vfsmount *mnt;
struct nfs_server *server = NFS_SERVER(dentry->d_inode);
struct dentry *parent;
- struct nfs_fh fh;
- struct nfs_fattr fattr;
+ struct nfs_fh *fh = NULL;
+ struct nfs_fattr *fattr = NULL;
int err;
dprintk("--> nfs_follow_mountpoint()\n");
@@ -115,6 +115,12 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
if (IS_ROOT(dentry))
goto out_err;
+ err = -ENOMEM;
+ fh = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fh == NULL || fattr == NULL)
+ goto out_err;
+
dprintk("%s: enter\n", __func__);
dput(nd->path.dentry);
nd->path.dentry = dget(dentry);
@@ -123,16 +129,16 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
parent = dget_parent(nd->path.dentry);
err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
&nd->path.dentry->d_name,
- &fh, &fattr);
+ fh, fattr);
dput(parent);
if (err != 0)
goto out_err;
- if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
+ if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
else
- mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
- &fattr);
+ mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, fh,
+ fattr);
err = PTR_ERR(mnt);
if (IS_ERR(mnt))
goto out_err;
@@ -151,6 +157,8 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
nd->path.dentry = dget(mnt->mnt_root);
schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
out:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fh);
dprintk("%s: done, returned %d\n", __func__, err);
dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index d150ae0c5ecd..9f88c5f4c7e2 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -185,7 +185,6 @@ static void nfs3_cache_acls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_fattr fattr;
struct page *pages[NFSACL_MAXPAGES] = { };
struct nfs3_getaclargs args = {
.fh = NFS_FH(inode),
@@ -193,7 +192,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
.pages = pages,
};
struct nfs3_getaclres res = {
- .fattr = &fattr,
+ 0
};
struct rpc_message msg = {
.rpc_argp = &args,
@@ -228,7 +227,10 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
dprintk("NFS call getacl\n");
msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_GETACL];
- nfs_fattr_init(&fattr);
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ return ERR_PTR(-ENOMEM);
+
status = rpc_call_sync(server->client_acl, &msg, 0);
dprintk("NFS reply getacl: %d\n", status);
@@ -238,7 +240,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
switch (status) {
case 0:
- status = nfs_refresh_inode(inode, &fattr);
+ status = nfs_refresh_inode(inode, res.fattr);
break;
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
@@ -278,6 +280,7 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
getout:
posix_acl_release(res.acl_access);
posix_acl_release(res.acl_default);
+ nfs_free_fattr(res.fattr);
if (status != 0) {
posix_acl_release(acl);
@@ -290,7 +293,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
struct page *pages[NFSACL_MAXPAGES];
struct nfs3_setaclargs args = {
.inode = inode,
@@ -335,8 +338,13 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
}
dprintk("NFS call setacl\n");
+ status = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out_freepages;
+
msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_SETACL];
- nfs_fattr_init(&fattr);
+ msg.rpc_resp = fattr;
status = rpc_call_sync(server->client_acl, &msg, 0);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
@@ -344,7 +352,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
switch (status) {
case 0:
- status = nfs_refresh_inode(inode, &fattr);
+ status = nfs_refresh_inode(inode, fattr);
nfs3_cache_acls(inode, acl, dfacl);
break;
case -EPFNOSUPPORT:
@@ -355,6 +363,7 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
case -ENOTSUPP:
status = -EOPNOTSUPP;
}
+ nfs_free_fattr(fattr);
out_freepages:
while (args.npages != 0) {
args.npages--;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e701002694e5..fabb4f2849a1 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -144,14 +144,12 @@ static int
nfs3_proc_lookup(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
- struct nfs_fattr dir_attr;
struct nfs3_diropargs arg = {
.fh = NFS_FH(dir),
.name = name->name,
.len = name->len
};
struct nfs3_diropres res = {
- .dir_attr = &dir_attr,
.fh = fhandle,
.fattr = fattr
};
@@ -163,29 +161,30 @@ nfs3_proc_lookup(struct inode *dir, struct qstr *name,
int status;
dprintk("NFS call lookup %s\n", name->name);
- nfs_fattr_init(&dir_attr);
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ return -ENOMEM;
+
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_refresh_inode(dir, res.dir_attr);
if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR)) {
msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
msg.rpc_argp = fhandle;
msg.rpc_resp = fattr;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
}
+ nfs_free_fattr(res.dir_attr);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
- struct nfs_fattr fattr;
struct nfs3_accessargs arg = {
.fh = NFS_FH(inode),
};
- struct nfs3_accessres res = {
- .fattr = &fattr,
- };
+ struct nfs3_accessres res;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS],
.rpc_argp = &arg,
@@ -193,7 +192,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
.rpc_cred = entry->cred,
};
int mode = entry->mask;
- int status;
+ int status = -ENOMEM;
dprintk("NFS call access\n");
@@ -210,9 +209,13 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
if (mode & MAY_EXEC)
arg.access |= NFS3_ACCESS_EXECUTE;
}
- nfs_fattr_init(&fattr);
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
- nfs_refresh_inode(inode, &fattr);
+ nfs_refresh_inode(inode, res.fattr);
if (status == 0) {
entry->mask = 0;
if (res.access & NFS3_ACCESS_READ)
@@ -222,6 +225,8 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
if (res.access & (NFS3_ACCESS_LOOKUP|NFS3_ACCESS_EXECUTE))
entry->mask |= MAY_EXEC;
}
+ nfs_free_fattr(res.fattr);
+out:
dprintk("NFS reply access: %d\n", status);
return status;
}
@@ -229,7 +234,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
static int nfs3_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
struct nfs3_readlinkargs args = {
.fh = NFS_FH(inode),
.pgbase = pgbase,
@@ -239,14 +244,19 @@ static int nfs3_proc_readlink(struct inode *inode, struct page *page,
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_READLINK],
.rpc_argp = &args,
- .rpc_resp = &fattr,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call readlink\n");
- nfs_fattr_init(&fattr);
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
+ msg.rpc_resp = fattr;
+
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
- nfs_refresh_inode(inode, &fattr);
+ nfs_refresh_inode(inode, fattr);
+ nfs_free_fattr(fattr);
+out:
dprintk("NFS reply readlink: %d\n", status);
return status;
}
@@ -396,12 +406,17 @@ nfs3_proc_remove(struct inode *dir, struct qstr *name)
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call remove %s\n", name->name);
- nfs_fattr_init(&res.dir_attr);
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- nfs_post_op_update_inode(dir, &res.dir_attr);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_free_fattr(res.dir_attr);
+out:
dprintk("NFS reply remove: %d\n", status);
return status;
}
@@ -419,7 +434,7 @@ nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir)
if (nfs3_async_handle_jukebox(task, dir))
return 0;
res = task->tk_msg.rpc_resp;
- nfs_post_op_update_inode(dir, &res->dir_attr);
+ nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
@@ -427,7 +442,6 @@ static int
nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
- struct nfs_fattr old_dir_attr, new_dir_attr;
struct nfs3_renameargs arg = {
.fromfh = NFS_FH(old_dir),
.fromname = old_name->name,
@@ -436,23 +450,27 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
.toname = new_name->name,
.tolen = new_name->len
};
- struct nfs3_renameres res = {
- .fromattr = &old_dir_attr,
- .toattr = &new_dir_attr
- };
+ struct nfs3_renameres res;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_RENAME],
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
- nfs_fattr_init(&old_dir_attr);
- nfs_fattr_init(&new_dir_attr);
+
+ res.fromattr = nfs_alloc_fattr();
+ res.toattr = nfs_alloc_fattr();
+ if (res.fromattr == NULL || res.toattr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
- nfs_post_op_update_inode(old_dir, &old_dir_attr);
- nfs_post_op_update_inode(new_dir, &new_dir_attr);
+ nfs_post_op_update_inode(old_dir, res.fromattr);
+ nfs_post_op_update_inode(new_dir, res.toattr);
+out:
+ nfs_free_fattr(res.toattr);
+ nfs_free_fattr(res.fromattr);
dprintk("NFS reply rename: %d\n", status);
return status;
}
@@ -460,30 +478,32 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
static int
nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
- struct nfs_fattr dir_attr, fattr;
struct nfs3_linkargs arg = {
.fromfh = NFS_FH(inode),
.tofh = NFS_FH(dir),
.toname = name->name,
.tolen = name->len
};
- struct nfs3_linkres res = {
- .dir_attr = &dir_attr,
- .fattr = &fattr
- };
+ struct nfs3_linkres res;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_LINK],
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call link %s\n", name->name);
- nfs_fattr_init(&dir_attr);
- nfs_fattr_init(&fattr);
+ res.fattr = nfs_alloc_fattr();
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.fattr == NULL || res.dir_attr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
- nfs_post_op_update_inode(dir, &dir_attr);
- nfs_post_op_update_inode(inode, &fattr);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_post_op_update_inode(inode, res.fattr);
+out:
+ nfs_free_fattr(res.dir_attr);
+ nfs_free_fattr(res.fattr);
dprintk("NFS reply link: %d\n", status);
return status;
}
@@ -554,7 +574,7 @@ out:
static int
nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
{
- struct nfs_fattr dir_attr;
+ struct nfs_fattr *dir_attr;
struct nfs3_diropargs arg = {
.fh = NFS_FH(dir),
.name = name->name,
@@ -563,14 +583,19 @@ nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_RMDIR],
.rpc_argp = &arg,
- .rpc_resp = &dir_attr,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call rmdir %s\n", name->name);
- nfs_fattr_init(&dir_attr);
+ dir_attr = nfs_alloc_fattr();
+ if (dir_attr == NULL)
+ goto out;
+
+ msg.rpc_resp = dir_attr;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- nfs_post_op_update_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, dir_attr);
+ nfs_free_fattr(dir_attr);
+out:
dprintk("NFS reply rmdir: %d\n", status);
return status;
}
@@ -589,7 +614,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page *page, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
- struct nfs_fattr dir_attr;
__be32 *verf = NFS_COOKIEVERF(dir);
struct nfs3_readdirargs arg = {
.fh = NFS_FH(dir),
@@ -600,7 +624,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
.pages = &page
};
struct nfs3_readdirres res = {
- .dir_attr = &dir_attr,
.verf = verf,
.plus = plus
};
@@ -610,7 +633,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
.rpc_resp = &res,
.rpc_cred = cred
};
- int status;
+ int status = -ENOMEM;
if (plus)
msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS];
@@ -618,12 +641,17 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
dprintk("NFS call readdir%s %d\n",
plus? "plus" : "", (unsigned int) cookie);
- nfs_fattr_init(&dir_attr);
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
+
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_invalidate_atime(dir);
+ nfs_refresh_inode(dir, res.dir_attr);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_free_fattr(res.dir_attr);
+out:
dprintk("NFS reply readdir: %d\n", status);
return status;
}
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 56a86f6ac8b5..75dcfc7da365 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -762,7 +762,7 @@ nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
static int
nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res)
{
- return nfs3_xdr_wccstat(req, p, &res->dir_attr);
+ return nfs3_xdr_wccstat(req, p, res->dir_attr);
}
/*
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index a187200a7aac..509930664d74 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -206,8 +206,8 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
/* nfs4proc.c */
-extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *);
-extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
+extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index f071d12c613b..3c2a1724fbd2 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -115,6 +115,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
char *page, char *page2,
const struct nfs4_fs_location *location)
{
+ const size_t addr_bufsize = sizeof(struct sockaddr_storage);
struct vfsmount *mnt = ERR_PTR(-ENOENT);
char *mnt_path;
unsigned int maxbuflen;
@@ -126,9 +127,12 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
mountdata->mnt_path = mnt_path;
maxbuflen = mnt_path - 1 - page2;
+ mountdata->addr = kmalloc(addr_bufsize, GFP_KERNEL);
+ if (mountdata->addr == NULL)
+ return ERR_PTR(-ENOMEM);
+
for (s = 0; s < location->nservers; s++) {
const struct nfs4_string *buf = &location->servers[s];
- struct sockaddr_storage addr;
if (buf->len <= 0 || buf->len >= maxbuflen)
continue;
@@ -137,11 +141,10 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
continue;
mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
- (struct sockaddr *)&addr, sizeof(addr));
+ mountdata->addr, addr_bufsize);
if (mountdata->addrlen == 0)
continue;
- mountdata->addr = (struct sockaddr *)&addr;
rpc_set_port(mountdata->addr, NFS_PORT);
memcpy(page2, buf->data, buf->len);
@@ -156,6 +159,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
if (!IS_ERR(mnt))
break;
}
+ kfree(mountdata->addr);
return mnt;
}
@@ -221,8 +225,8 @@ out:
/*
* nfs_do_refmount - handle crossing a referral on server
+ * @mnt_parent - mountpoint of referral
* @dentry - dentry of referral
- * @nd - nameidata info
*
*/
struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentry *dentry)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 071fcedd517c..04f4b2b2506b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -70,6 +70,9 @@ static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinf
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
+static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_fattr *fattr, struct iattr *sattr,
+ struct nfs4_state *state);
/* Prevent leaks of NFSv4 errors into userland */
static int nfs4_map_errors(int err)
@@ -1659,15 +1662,24 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
if (status != 0)
goto err_opendata_put;
- if (opendata->o_arg.open_flags & O_EXCL)
- nfs4_exclusive_attrset(opendata, sattr);
-
state = nfs4_opendata_to_nfs4_state(opendata);
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+
+ if (opendata->o_arg.open_flags & O_EXCL) {
+ nfs4_exclusive_attrset(opendata, sattr);
+
+ nfs_fattr_init(opendata->o_res.f_attr);
+ status = nfs4_do_setattr(state->inode, cred,
+ opendata->o_res.f_attr, sattr,
+ state);
+ if (status == 0)
+ nfs_setattr_update_inode(state->inode, sattr);
+ nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
+ }
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
@@ -2404,14 +2416,12 @@ static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_fattr fattr;
struct nfs4_accessargs args = {
.fh = NFS_FH(inode),
.bitmask = server->attr_bitmask,
};
struct nfs4_accessres res = {
.server = server,
- .fattr = &fattr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
@@ -2438,7 +2448,11 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_EXECUTE;
}
- nfs_fattr_init(&fattr);
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ return -ENOMEM;
+
status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (!status) {
entry->mask = 0;
@@ -2448,8 +2462,9 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
entry->mask |= MAY_WRITE;
if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
entry->mask |= MAY_EXEC;
- nfs_refresh_inode(inode, &fattr);
+ nfs_refresh_inode(inode, res.fattr);
}
+ nfs_free_fattr(res.fattr);
return status;
}
@@ -2562,13 +2577,6 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
}
d_add(dentry, igrab(state->inode));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- if (flags & O_EXCL) {
- struct nfs_fattr fattr;
- status = nfs4_do_setattr(state->inode, cred, &fattr, sattr, state);
- if (status == 0)
- nfs_setattr_update_inode(state->inode, sattr);
- nfs_post_op_update_inode(state->inode, &fattr);
- }
if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0)
status = nfs4_intent_set_file(nd, &path, state, fmode);
else
@@ -2596,14 +2604,19 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
.rpc_argp = &args,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
+
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
- nfs_fattr_init(&res.dir_attr);
status = nfs4_call_sync(server, &msg, &args, &res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
- nfs_post_op_update_inode(dir, &res.dir_attr);
+ nfs_post_op_update_inode(dir, res.dir_attr);
}
+ nfs_free_fattr(res.dir_attr);
+out:
return status;
}
@@ -2638,7 +2651,7 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(dir, &res->cinfo);
- nfs_post_op_update_inode(dir, &res->dir_attr);
+ nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
@@ -2653,29 +2666,31 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
.new_name = new_name,
.bitmask = server->attr_bitmask,
};
- struct nfs_fattr old_fattr, new_fattr;
struct nfs4_rename_res res = {
.server = server,
- .old_fattr = &old_fattr,
- .new_fattr = &new_fattr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
- nfs_fattr_init(res.old_fattr);
- nfs_fattr_init(res.new_fattr);
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ res.old_fattr = nfs_alloc_fattr();
+ res.new_fattr = nfs_alloc_fattr();
+ if (res.old_fattr == NULL || res.new_fattr == NULL)
+ goto out;
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
nfs_post_op_update_inode(old_dir, res.old_fattr);
update_changeattr(new_dir, &res.new_cinfo);
nfs_post_op_update_inode(new_dir, res.new_fattr);
}
+out:
+ nfs_free_fattr(res.new_fattr);
+ nfs_free_fattr(res.old_fattr);
return status;
}
@@ -2702,28 +2717,30 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
.name = name,
.bitmask = server->attr_bitmask,
};
- struct nfs_fattr fattr, dir_attr;
struct nfs4_link_res res = {
.server = server,
- .fattr = &fattr,
- .dir_attr = &dir_attr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
.rpc_argp = &arg,
.rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
+
+ res.fattr = nfs_alloc_fattr();
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.fattr == NULL || res.dir_attr == NULL)
+ goto out;
- nfs_fattr_init(res.fattr);
- nfs_fattr_init(res.dir_attr);
status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
nfs_post_op_update_inode(inode, res.fattr);
}
-
+out:
+ nfs_free_fattr(res.dir_attr);
+ nfs_free_fattr(res.fattr);
return status;
}
@@ -3494,7 +3511,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
return _nfs4_async_handle_error(task, server, server->nfs_client, state);
}
-int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
+int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
+ unsigned short port, struct rpc_cred *cred,
+ struct nfs4_setclientid_res *res)
{
nfs4_verifier sc_verifier;
struct nfs4_setclientid setclientid = {
@@ -3504,7 +3523,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
.rpc_argp = &setclientid,
- .rpc_resp = clp,
+ .rpc_resp = res,
.rpc_cred = cred,
};
__be32 *p;
@@ -3547,12 +3566,14 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
return status;
}
-static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
+static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp,
+ struct nfs4_setclientid_res *arg,
+ struct rpc_cred *cred)
{
struct nfs_fsinfo fsinfo;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
- .rpc_argp = clp,
+ .rpc_argp = arg,
.rpc_resp = &fsinfo,
.rpc_cred = cred,
};
@@ -3570,12 +3591,14 @@ static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cre
return status;
}
-int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
+ struct nfs4_setclientid_res *arg,
+ struct rpc_cred *cred)
{
long timeout = 0;
int err;
do {
- err = _nfs4_proc_setclientid_confirm(clp, cred);
+ err = _nfs4_proc_setclientid_confirm(clp, arg, cred);
switch (err) {
case 0:
return err;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6c5ed51f105e..cd2d90400d46 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -62,6 +62,7 @@ static LIST_HEAD(nfs4_clientid_list);
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
+ struct nfs4_setclientid_res clid;
unsigned short port;
int status;
@@ -69,11 +70,15 @@ int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
if (clp->cl_addr.ss_family == AF_INET6)
port = nfs_callback_tcpport6;
- status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred);
- if (status == 0)
- status = nfs4_proc_setclientid_confirm(clp, cred);
- if (status == 0)
- nfs4_schedule_state_renewal(clp);
+ status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
+ if (status != 0)
+ goto out;
+ status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
+ if (status != 0)
+ goto out;
+ clp->cl_clientid = clid.clientid;
+ nfs4_schedule_state_renewal(clp);
+out:
return status;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 38f3b582e7c2..6bdef28efa33 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1504,14 +1504,14 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie
hdr->replen += decode_setclientid_maxsz;
}
-static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr)
+static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE);
*p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM);
- p = xdr_encode_hyper(p, client_state->cl_clientid);
- xdr_encode_opaque_fixed(p, client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
+ p = xdr_encode_hyper(p, arg->clientid);
+ xdr_encode_opaque_fixed(p, arg->confirm.data, NFS4_VERIFIER_SIZE);
hdr->nops++;
hdr->replen += decode_setclientid_confirm_maxsz;
}
@@ -2324,7 +2324,7 @@ static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4
/*
* a SETCLIENTID_CONFIRM request
*/
-static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp)
+static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
@@ -2334,7 +2334,7 @@ static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, req, &hdr);
- encode_setclientid_confirm(&xdr, clp, &hdr);
+ encode_setclientid_confirm(&xdr, arg, &hdr);
encode_putrootfh(&xdr, &hdr);
encode_fsinfo(&xdr, lease_bitmap, &hdr);
encode_nops(&hdr);
@@ -4397,7 +4397,7 @@ out_overflow:
return -EIO;
}
-static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
+static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_res *res)
{
__be32 *p;
uint32_t opnum;
@@ -4417,8 +4417,8 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
if (unlikely(!p))
goto out_overflow;
- p = xdr_decode_hyper(p, &clp->cl_clientid);
- memcpy(clp->cl_confirm.data, p, NFS4_VERIFIER_SIZE);
+ p = xdr_decode_hyper(p, &res->clientid);
+ memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE);
} else if (nfserr == NFSERR_CLID_INUSE) {
uint32_t len;
@@ -4815,7 +4815,7 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_rem
goto out;
if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
goto out;
- decode_getfattr(&xdr, &res->dir_attr, res->server,
+ decode_getfattr(&xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
@@ -5498,7 +5498,7 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
* Decode SETCLIENTID response
*/
static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
- struct nfs_client *clp)
+ struct nfs4_setclientid_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -5507,7 +5507,7 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
- status = decode_setclientid(&xdr, clp);
+ status = decode_setclientid(&xdr, res);
return status;
}
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 8c55b27c0de4..6bd19d843af7 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -488,7 +488,6 @@ static int __init root_nfs_ports(void)
*/
static int __init root_nfs_get_handle(void)
{
- struct nfs_fh fh;
struct sockaddr_in sin;
unsigned int auth_flav_len = 0;
struct nfs_mount_request request = {
@@ -499,21 +498,24 @@ static int __init root_nfs_get_handle(void)
NFS_MNT3_VERSION : NFS_MNT_VERSION,
.protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP,
- .fh = &fh,
.auth_flav_len = &auth_flav_len,
};
- int status;
+ int status = -ENOMEM;
+ request.fh = nfs_alloc_fhandle();
+ if (!request.fh)
+ goto out;
set_sockaddr(&sin, servaddr, htons(mount_port));
status = nfs_mount(&request);
if (status < 0)
printk(KERN_ERR "Root-NFS: Server returned error %d "
"while mounting %s\n", status, nfs_export_path);
else {
- nfs_data.root.size = fh.size;
- memcpy(nfs_data.root.data, fh.data, fh.size);
+ nfs_data.root.size = request.fh->size;
+ memcpy(&nfs_data.root.data, request.fh->data, request.fh->size);
}
-
+ nfs_free_fhandle(request.fh);
+out:
return status;
}
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 0288be80444f..611bec22f552 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -224,35 +224,60 @@ static int nfs_proc_readlink(struct inode *inode, struct page *page,
return status;
}
+struct nfs_createdata {
+ struct nfs_createargs arg;
+ struct nfs_diropok res;
+ struct nfs_fh fhandle;
+ struct nfs_fattr fattr;
+};
+
+static struct nfs_createdata *nfs_alloc_createdata(struct inode *dir,
+ struct dentry *dentry, struct iattr *sattr)
+{
+ struct nfs_createdata *data;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (data != NULL) {
+ data->arg.fh = NFS_FH(dir);
+ data->arg.name = dentry->d_name.name;
+ data->arg.len = dentry->d_name.len;
+ data->arg.sattr = sattr;
+ nfs_fattr_init(&data->fattr);
+ data->fhandle.size = 0;
+ data->res.fh = &data->fhandle;
+ data->res.fattr = &data->fattr;
+ }
+ return data;
+};
+
+static void nfs_free_createdata(const struct nfs_createdata *data)
+{
+ kfree(data);
+}
+
static int
nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nameidata *nd)
{
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
- struct nfs_createargs arg = {
- .fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len,
- .sattr = sattr
- };
- struct nfs_diropok res = {
- .fh = &fhandle,
- .fattr = &fattr
- };
+ struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_CREATE],
- .rpc_argp = &arg,
- .rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
- nfs_fattr_init(&fattr);
dprintk("NFS call create %s\n", dentry->d_name.name);
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
if (status == 0)
- status = nfs_instantiate(dentry, &fhandle, &fattr);
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
dprintk("NFS reply create: %d\n", status);
return status;
}
@@ -264,24 +289,12 @@ static int
nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
dev_t rdev)
{
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
- struct nfs_createargs arg = {
- .fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len,
- .sattr = sattr
- };
- struct nfs_diropok res = {
- .fh = &fhandle,
- .fattr = &fattr
- };
+ struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_CREATE],
- .rpc_argp = &arg,
- .rpc_resp = &res,
};
- int status, mode;
+ umode_t mode;
+ int status = -ENOMEM;
dprintk("NFS call mknod %s\n", dentry->d_name.name);
@@ -294,17 +307,24 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */
}
- nfs_fattr_init(&fattr);
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
if (status == -EINVAL && S_ISFIFO(mode)) {
sattr->ia_mode = mode;
- nfs_fattr_init(&fattr);
+ nfs_fattr_init(data->res.fattr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
}
if (status == 0)
- status = nfs_instantiate(dentry, &fhandle, &fattr);
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
dprintk("NFS reply mknod: %d\n", status);
return status;
}
@@ -398,8 +418,8 @@ static int
nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
unsigned int len, struct iattr *sattr)
{
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
+ struct nfs_fh *fh;
+ struct nfs_fattr *fattr;
struct nfs_symlinkargs arg = {
.fromfh = NFS_FH(dir),
.fromname = dentry->d_name.name,
@@ -412,12 +432,18 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
.rpc_proc = &nfs_procedures[NFSPROC_SYMLINK],
.rpc_argp = &arg,
};
- int status;
+ int status = -ENAMETOOLONG;
+
+ dprintk("NFS call symlink %s\n", dentry->d_name.name);
if (len > NFS2_MAXPATHLEN)
- return -ENAMETOOLONG;
+ goto out;
- dprintk("NFS call symlink %s\n", dentry->d_name.name);
+ fh = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ status = -ENOMEM;
+ if (fh == NULL || fattr == NULL)
+ goto out;
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
@@ -427,12 +453,12 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
* filehandle size to zero indicates to nfs_instantiate that it
* should fill in the data with a LOOKUP call on the wire.
*/
- if (status == 0) {
- nfs_fattr_init(&fattr);
- fhandle.size = 0;
- status = nfs_instantiate(dentry, &fhandle, &fattr);
- }
+ if (status == 0)
+ status = nfs_instantiate(dentry, fh, fattr);
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fh);
+out:
dprintk("NFS reply symlink: %d\n", status);
return status;
}
@@ -440,31 +466,25 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
static int
nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
- struct nfs_fh fhandle;
- struct nfs_fattr fattr;
- struct nfs_createargs arg = {
- .fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len,
- .sattr = sattr
- };
- struct nfs_diropok res = {
- .fh = &fhandle,
- .fattr = &fattr
- };
+ struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_MKDIR],
- .rpc_argp = &arg,
- .rpc_resp = &res,
};
- int status;
+ int status = -ENOMEM;
dprintk("NFS call mkdir %s\n", dentry->d_name.name);
- nfs_fattr_init(&fattr);
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
if (status == 0)
- status = nfs_instantiate(dentry, &fhandle, &fattr);
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
dprintk("NFS reply mkdir: %d\n", status);
return status;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index b4148fc00f9f..ee051a40fac8 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -423,15 +423,19 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
unsigned char blockbits;
unsigned long blockres;
struct nfs_fh *fh = NFS_FH(dentry->d_inode);
- struct nfs_fattr fattr;
- struct nfs_fsstat res = {
- .fattr = &fattr,
- };
- int error;
+ struct nfs_fsstat res;
+ int error = -ENOMEM;
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ goto out_err;
error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
+
+ nfs_free_fattr(res.fattr);
if (error < 0)
goto out_err;
+
buf->f_type = NFS_SUPER_MAGIC;
/*
@@ -2172,7 +2176,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
int error = -ENOMEM;
data = nfs_alloc_parsed_mount_data(3);
- mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
+ mntfh = nfs_alloc_fhandle();
if (data == NULL || mntfh == NULL)
goto out_free_fh;
@@ -2247,7 +2251,7 @@ out:
kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
- kfree(mntfh);
+ nfs_free_fhandle(mntfh);
kfree(data);
return error;
@@ -2556,7 +2560,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type,
};
int error = -ENOMEM;
- mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
+ mntfh = nfs_alloc_fhandle();
if (data == NULL || mntfh == NULL)
goto out_free_fh;
@@ -2614,7 +2618,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type,
out:
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
- kfree(mntfh);
+ nfs_free_fhandle(mntfh);
return error;
out_free:
@@ -2669,41 +2673,120 @@ out_freepage:
free_page((unsigned long)page);
}
+struct nfs_referral_count {
+ struct list_head list;
+ const struct task_struct *task;
+ unsigned int referral_count;
+};
+
+static LIST_HEAD(nfs_referral_count_list);
+static DEFINE_SPINLOCK(nfs_referral_count_list_lock);
+
+static struct nfs_referral_count *nfs_find_referral_count(void)
+{
+ struct nfs_referral_count *p;
+
+ list_for_each_entry(p, &nfs_referral_count_list, list) {
+ if (p->task == current)
+ return p;
+ }
+ return NULL;
+}
+
+#define NFS_MAX_NESTED_REFERRALS 2
+
+static int nfs_referral_loop_protect(void)
+{
+ struct nfs_referral_count *p, *new;
+ int ret = -ENOMEM;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto out;
+ new->task = current;
+ new->referral_count = 1;
+
+ ret = 0;
+ spin_lock(&nfs_referral_count_list_lock);
+ p = nfs_find_referral_count();
+ if (p != NULL) {
+ if (p->referral_count >= NFS_MAX_NESTED_REFERRALS)
+ ret = -ELOOP;
+ else
+ p->referral_count++;
+ } else {
+ list_add(&new->list, &nfs_referral_count_list);
+ new = NULL;
+ }
+ spin_unlock(&nfs_referral_count_list_lock);
+ kfree(new);
+out:
+ return ret;
+}
+
+static void nfs_referral_loop_unprotect(void)
+{
+ struct nfs_referral_count *p;
+
+ spin_lock(&nfs_referral_count_list_lock);
+ p = nfs_find_referral_count();
+ p->referral_count--;
+ if (p->referral_count == 0)
+ list_del(&p->list);
+ else
+ p = NULL;
+ spin_unlock(&nfs_referral_count_list_lock);
+ kfree(p);
+}
+
static int nfs_follow_remote_path(struct vfsmount *root_mnt,
const char *export_path, struct vfsmount *mnt_target)
{
+ struct nameidata *nd = NULL;
struct mnt_namespace *ns_private;
- struct nameidata nd;
struct super_block *s;
int ret;
+ nd = kmalloc(sizeof(*nd), GFP_KERNEL);
+ if (nd == NULL)
+ return -ENOMEM;
+
ns_private = create_mnt_ns(root_mnt);
ret = PTR_ERR(ns_private);
if (IS_ERR(ns_private))
goto out_mntput;
+ ret = nfs_referral_loop_protect();
+ if (ret != 0)
+ goto out_put_mnt_ns;
+
ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
- export_path, LOOKUP_FOLLOW, &nd);
+ export_path, LOOKUP_FOLLOW, nd);
+ nfs_referral_loop_unprotect();
put_mnt_ns(ns_private);
if (ret != 0)
goto out_err;
- s = nd.path.mnt->mnt_sb;
+ s = nd->path.mnt->mnt_sb;
atomic_inc(&s->s_active);
mnt_target->mnt_sb = s;
- mnt_target->mnt_root = dget(nd.path.dentry);
+ mnt_target->mnt_root = dget(nd->path.dentry);
/* Correct the device pathname */
- nfs_fix_devname(&nd.path, mnt_target);
+ nfs_fix_devname(&nd->path, mnt_target);
- path_put(&nd.path);
+ path_put(&nd->path);
+ kfree(nd);
down_write(&s->s_umount);
return 0;
+out_put_mnt_ns:
+ put_mnt_ns(ns_private);
out_mntput:
mntput(root_mnt);
out_err:
+ kfree(nd);
return ret;
}
@@ -2874,17 +2957,21 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
struct super_block *s;
struct nfs_server *server;
struct dentry *mntroot;
- struct nfs_fh mntfh;
+ struct nfs_fh *mntfh;
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
struct nfs_sb_mountdata sb_mntdata = {
.mntflags = flags,
};
- int error;
+ int error = -ENOMEM;
dprintk("--> nfs4_referral_get_sb()\n");
+ mntfh = nfs_alloc_fhandle();
+ if (mntfh == NULL)
+ goto out_err_nofh;
+
/* create a new volume representation */
- server = nfs4_create_referral_server(data, &mntfh);
+ server = nfs4_create_referral_server(data, mntfh);
if (IS_ERR(server)) {
error = PTR_ERR(server);
goto out_err_noserver;
@@ -2916,7 +3003,7 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
nfs_fscache_get_super_cookie(s, NULL, data);
}
- mntroot = nfs4_get_root(s, &mntfh);
+ mntroot = nfs4_get_root(s, mntfh);
if (IS_ERR(mntroot)) {
error = PTR_ERR(mntroot);
goto error_splat_super;
@@ -2933,12 +3020,15 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
security_sb_clone_mnt_opts(data->sb, s);
+ nfs_free_fhandle(mntfh);
dprintk("<-- nfs4_referral_get_sb() = 0\n");
return 0;
out_err_nosb:
nfs_free_server(server);
out_err_noserver:
+ nfs_free_fhandle(mntfh);
+out_err_nofh:
dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
return error;
@@ -2947,6 +3037,7 @@ error_splat_super:
bdi_unregister(&server->backing_dev_info);
error_splat_bdi:
deactivate_locked_super(s);
+ nfs_free_fhandle(mntfh);
dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
return error;
}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 6da3d3ff6edd..a2242af6a17d 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -23,6 +23,7 @@ struct nfs_unlinkdata {
struct nfs_removeres res;
struct inode *dir;
struct rpc_cred *cred;
+ struct nfs_fattr dir_attr;
};
/**
@@ -169,7 +170,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
}
nfs_sb_active(dir->i_sb);
data->args.fh = NFS_FH(dir);
- nfs_fattr_init(&data->res.dir_attr);
+ nfs_fattr_init(data->res.dir_attr);
NFS_PROTO(dir)->unlink_setup(&msg, dir);
@@ -259,6 +260,7 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry)
goto out_free;
}
data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
+ data->res.dir_attr = &data->dir_attr;
status = -EBUSY;
spin_lock(&dentry->d_lock);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 872a5ef550c7..c2a4f71d87dd 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -259,10 +259,9 @@ static struct cache_detail svc_expkey_cache = {
.alloc = expkey_alloc,
};
-static struct svc_expkey *
-svc_expkey_lookup(struct svc_expkey *item)
+static int
+svc_expkey_hash(struct svc_expkey *item)
{
- struct cache_head *ch;
int hash = item->ek_fsidtype;
char * cp = (char*)item->ek_fsid;
int len = key_len(item->ek_fsidtype);
@@ -270,6 +269,14 @@ svc_expkey_lookup(struct svc_expkey *item)
hash ^= hash_mem(cp, len, EXPKEY_HASHBITS);
hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS);
hash &= EXPKEY_HASHMASK;
+ return hash;
+}
+
+static struct svc_expkey *
+svc_expkey_lookup(struct svc_expkey *item)
+{
+ struct cache_head *ch;
+ int hash = svc_expkey_hash(item);
ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h,
hash);
@@ -283,13 +290,7 @@ static struct svc_expkey *
svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
{
struct cache_head *ch;
- int hash = new->ek_fsidtype;
- char * cp = (char*)new->ek_fsid;
- int len = key_len(new->ek_fsidtype);
-
- hash ^= hash_mem(cp, len, EXPKEY_HASHBITS);
- hash ^= hash_ptr(new->ek_client, EXPKEY_HASHBITS);
- hash &= EXPKEY_HASHMASK;
+ int hash = svc_expkey_hash(new);
ch = sunrpc_cache_update(&svc_expkey_cache, &new->h,
&old->h, hash);
@@ -738,14 +739,22 @@ struct cache_detail svc_export_cache = {
.alloc = svc_export_alloc,
};
-static struct svc_export *
-svc_export_lookup(struct svc_export *exp)
+static int
+svc_export_hash(struct svc_export *exp)
{
- struct cache_head *ch;
int hash;
+
hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS);
hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS);
hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS);
+ return hash;
+}
+
+static struct svc_export *
+svc_export_lookup(struct svc_export *exp)
+{
+ struct cache_head *ch;
+ int hash = svc_export_hash(exp);
ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
hash);
@@ -759,10 +768,7 @@ static struct svc_export *
svc_export_update(struct svc_export *new, struct svc_export *old)
{
struct cache_head *ch;
- int hash;
- hash = hash_ptr(old->ex_client, EXPORT_HASHBITS);
- hash ^= hash_ptr(old->ex_path.dentry, EXPORT_HASHBITS);
- hash ^= hash_ptr(old->ex_path.mnt, EXPORT_HASHBITS);
+ int hash = svc_export_hash(old);
ch = sunrpc_cache_update(&svc_export_cache, &new->h,
&old->h,
@@ -1071,9 +1077,9 @@ exp_export(struct nfsctl_export *nxp)
err = 0;
finish:
kfree(new.ex_pathname);
- if (exp)
+ if (!IS_ERR_OR_NULL(exp))
exp_put(exp);
- if (fsid_key && !IS_ERR(fsid_key))
+ if (!IS_ERR_OR_NULL(fsid_key))
cache_put(&fsid_key->h, &svc_expkey_cache);
path_put(&path);
out_put_clp:
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7e32bd394e86..1d5051d46b46 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -32,6 +32,7 @@
*/
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <linux/slab.h>
#include "nfsd.h"
#include "state.h"
@@ -79,11 +80,6 @@ enum nfs_cb_opnum4 {
cb_sequence_dec_sz + \
op_dec_sz)
-struct nfs4_rpc_args {
- void *args_op;
- struct nfsd4_cb_sequence args_seq;
-};
-
/*
* Generic encode routines from fs/nfs/nfs4xdr.c
*/
@@ -456,15 +452,14 @@ static struct rpc_program cb_program = {
static int max_cb_time(void)
{
- return max(NFSD_LEASE_TIME/10, (time_t)1) * HZ;
+ return max(nfsd4_lease/10, (time_t)1) * HZ;
}
/* Reference counting, callback cleanup, etc., all look racy as heck.
- * And why is cb_set an atomic? */
+ * And why is cl_cb_set an atomic? */
-int setup_callback_client(struct nfs4_client *clp)
+int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
{
- struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_timeout timeparms = {
.to_initval = max_cb_time(),
.to_retries = 0,
@@ -486,7 +481,7 @@ int setup_callback_client(struct nfs4_client *clp)
if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
return -EINVAL;
if (cb->cb_minorversion) {
- args.bc_xprt = clp->cl_cb_xprt;
+ args.bc_xprt = cb->cb_xprt;
args.protocol = XPRT_TRANSPORT_BC_TCP;
}
/* Create RPC client */
@@ -496,7 +491,7 @@ int setup_callback_client(struct nfs4_client *clp)
PTR_ERR(client));
return PTR_ERR(client);
}
- cb->cb_client = client;
+ nfsd4_set_callback_client(clp, client);
return 0;
}
@@ -514,8 +509,7 @@ static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
if (task->tk_status)
warn_no_callback_path(clp, task->tk_status);
else
- atomic_set(&clp->cl_cb_conn.cb_set, 1);
- put_nfs4_client(clp);
+ atomic_set(&clp->cl_cb_set, 1);
}
static const struct rpc_call_ops nfsd4_cb_probe_ops = {
@@ -537,7 +531,6 @@ int set_callback_cred(void)
void do_probe_callback(struct nfs4_client *clp)
{
- struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
.rpc_argp = clp,
@@ -545,34 +538,27 @@ void do_probe_callback(struct nfs4_client *clp)
};
int status;
- status = rpc_call_async(cb->cb_client, &msg,
+ status = rpc_call_async(clp->cl_cb_client, &msg,
RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
&nfsd4_cb_probe_ops, (void *)clp);
- if (status) {
+ if (status)
warn_no_callback_path(clp, status);
- put_nfs4_client(clp);
- }
}
/*
* Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
*/
-void
-nfsd4_probe_callback(struct nfs4_client *clp)
+void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
{
int status;
- BUG_ON(atomic_read(&clp->cl_cb_conn.cb_set));
+ BUG_ON(atomic_read(&clp->cl_cb_set));
- status = setup_callback_client(clp);
+ status = setup_callback_client(clp, cb);
if (status) {
warn_no_callback_path(clp, status);
return;
}
-
- /* the task holds a reference to the nfs4_client struct */
- atomic_inc(&clp->cl_count);
-
do_probe_callback(clp);
}
@@ -658,18 +644,32 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
}
}
+
static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegation *dp = calldata;
struct nfs4_client *clp = dp->dl_client;
+ struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
nfsd4_cb_done(task, calldata);
+ if (current_rpc_client == NULL) {
+ /* We're shutting down; give up. */
+ /* XXX: err, or is it ok just to fall through
+ * and rpc_restart_call? */
+ return;
+ }
+
switch (task->tk_status) {
case -EIO:
/* Network partition? */
- atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ atomic_set(&clp->cl_cb_set, 0);
warn_no_callback_path(clp, task->tk_status);
+ if (current_rpc_client != task->tk_client) {
+ /* queue a callback on the new connection: */
+ nfsd4_cb_recall(dp);
+ return;
+ }
case -EBADHANDLE:
case -NFS4ERR_BAD_STATEID:
/* Race: client probably got cb_recall
@@ -677,7 +677,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
break;
default:
/* success, or error we can't handle */
- goto done;
+ return;
}
if (dp->dl_retries--) {
rpc_delay(task, 2*HZ);
@@ -685,20 +685,16 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
rpc_restart_call(task);
return;
} else {
- atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ atomic_set(&clp->cl_cb_set, 0);
warn_no_callback_path(clp, task->tk_status);
}
-done:
- kfree(task->tk_msg.rpc_argp);
}
static void nfsd4_cb_recall_release(void *calldata)
{
struct nfs4_delegation *dp = calldata;
- struct nfs4_client *clp = dp->dl_client;
nfs4_put_delegation(dp);
- put_nfs4_client(clp);
}
static const struct rpc_call_ops nfsd4_cb_recall_ops = {
@@ -707,33 +703,74 @@ static const struct rpc_call_ops nfsd4_cb_recall_ops = {
.rpc_release = nfsd4_cb_recall_release,
};
+static struct workqueue_struct *callback_wq;
+
+int nfsd4_create_callback_queue(void)
+{
+ callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
+ if (!callback_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+void nfsd4_destroy_callback_queue(void)
+{
+ destroy_workqueue(callback_wq);
+}
+
+void nfsd4_set_callback_client(struct nfs4_client *clp, struct rpc_clnt *new)
+{
+ struct rpc_clnt *old = clp->cl_cb_client;
+
+ clp->cl_cb_client = new;
+ /*
+ * After this, any work that saw the old value of cl_cb_client will
+ * be gone:
+ */
+ flush_workqueue(callback_wq);
+ /* So we can safely shut it down: */
+ if (old)
+ rpc_shutdown_client(old);
+}
+
/*
* called with dp->dl_count inc'ed.
*/
-void
-nfsd4_cb_recall(struct nfs4_delegation *dp)
+static void _nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_client;
- struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
- struct nfs4_rpc_args *args;
+ struct rpc_clnt *clnt = clp->cl_cb_client;
+ struct nfs4_rpc_args *args = &dp->dl_recall.cb_args;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
.rpc_cred = callback_cred
};
- int status = -ENOMEM;
+ int status;
+
+ if (clnt == NULL)
+ return; /* Client is shutting down; give up. */
- args = kzalloc(sizeof(*args), GFP_KERNEL);
- if (!args)
- goto out;
args->args_op = dp;
msg.rpc_argp = args;
dp->dl_retries = 1;
status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
&nfsd4_cb_recall_ops, dp);
-out:
- if (status) {
- kfree(args);
- put_nfs4_client(clp);
+ if (status)
nfs4_put_delegation(dp);
- }
+}
+
+void nfsd4_do_callback_rpc(struct work_struct *w)
+{
+ /* XXX: for now, just send off delegation recall. */
+ /* In future, generalize to handle any sort of callback. */
+ struct nfsd4_callback *c = container_of(w, struct nfsd4_callback, cb_work);
+ struct nfs4_delegation *dp = container_of(c, struct nfs4_delegation, dl_recall);
+
+ _nfsd4_cb_recall(dp);
+}
+
+
+void nfsd4_cb_recall(struct nfs4_delegation *dp)
+{
+ queue_work(callback_wq, &dp->dl_recall.cb_work);
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 2ab9e8501bfe..e2dc9608281b 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -969,20 +969,36 @@ static struct nfsd4_operation nfsd4_ops[];
static const char *nfsd4_op_name(unsigned opnum);
/*
- * Enforce NFSv4.1 COMPOUND ordering rules.
+ * Enforce NFSv4.1 COMPOUND ordering rules:
*
- * TODO:
- * - enforce NFS4ERR_NOT_ONLY_OP,
- * - DESTROY_SESSION MUST be the final operation in the COMPOUND request.
+ * Also note, enforced elsewhere:
+ * - SEQUENCE other than as first op results in
+ * NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
+ * - BIND_CONN_TO_SESSION must be the only op in its compound
+ * (Will be enforced in nfsd4_bind_conn_to_session().)
+ * - DESTROY_SESSION must be the final operation in a compound, if
+ * sessionid's in SEQUENCE and DESTROY_SESSION are the same.
+ * (Enforced in nfsd4_destroy_session().)
*/
-static bool nfs41_op_ordering_ok(struct nfsd4_compoundargs *args)
+static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
{
- if (args->minorversion && args->opcnt > 0) {
- struct nfsd4_op *op = &args->ops[0];
- return (op->status == nfserr_op_illegal) ||
- (nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP);
- }
- return true;
+ struct nfsd4_op *op = &args->ops[0];
+
+ /* These ordering requirements don't apply to NFSv4.0: */
+ if (args->minorversion == 0)
+ return nfs_ok;
+ /* This is weird, but OK, not our problem: */
+ if (args->opcnt == 0)
+ return nfs_ok;
+ if (op->status == nfserr_op_illegal)
+ return nfs_ok;
+ if (!(nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP))
+ return nfserr_op_not_in_session;
+ if (op->opnum == OP_SEQUENCE)
+ return nfs_ok;
+ if (args->opcnt != 1)
+ return nfserr_not_only_op;
+ return nfs_ok;
}
/*
@@ -1012,6 +1028,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
resp->rqstp = rqstp;
resp->cstate.minorversion = args->minorversion;
resp->cstate.replay_owner = NULL;
+ resp->cstate.session = NULL;
fh_init(&resp->cstate.current_fh, NFS4_FHSIZE);
fh_init(&resp->cstate.save_fh, NFS4_FHSIZE);
/* Use the deferral mechanism only for NFSv4.0 compounds */
@@ -1024,13 +1041,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
if (args->minorversion > nfsd_supported_minorversion)
goto out;
- if (!nfs41_op_ordering_ok(args)) {
+ status = nfs41_check_op_ordering(args);
+ if (status) {
op = &args->ops[0];
- op->status = nfserr_sequence_pos;
+ op->status = status;
goto encode_op;
}
- status = nfs_ok;
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 6a8fedaa4f55..835d6cef9ae9 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -45,8 +45,8 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
/* Globals */
-static time_t lease_time = 90; /* default lease time */
-static time_t user_lease_time = 90;
+time_t nfsd4_lease = 90; /* default lease time */
+time_t nfsd4_grace = 90;
static time_t boot_time;
static u32 current_ownerid = 1;
static u32 current_fileid = 1;
@@ -199,6 +199,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
atomic_set(&dp->dl_count, 1);
list_add(&dp->dl_perfile, &fp->fi_delegations);
list_add(&dp->dl_perclnt, &clp->cl_delegations);
+ INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
return dp;
}
@@ -680,27 +681,9 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
return clp;
}
-static void
-shutdown_callback_client(struct nfs4_client *clp)
-{
- struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
-
- if (clnt) {
- /*
- * Callback threads take a reference on the client, so there
- * should be no outstanding callbacks at this point.
- */
- clp->cl_cb_conn.cb_client = NULL;
- rpc_shutdown_client(clnt);
- }
-}
-
static inline void
free_client(struct nfs4_client *clp)
{
- shutdown_callback_client(clp);
- if (clp->cl_cb_xprt)
- svc_xprt_put(clp->cl_cb_xprt);
if (clp->cl_cred.cr_group_info)
put_group_info(clp->cl_cred.cr_group_info);
kfree(clp->cl_principal);
@@ -708,13 +691,6 @@ free_client(struct nfs4_client *clp)
kfree(clp);
}
-void
-put_nfs4_client(struct nfs4_client *clp)
-{
- if (atomic_dec_and_test(&clp->cl_count))
- free_client(clp);
-}
-
static void
expire_client(struct nfs4_client *clp)
{
@@ -722,9 +698,6 @@ expire_client(struct nfs4_client *clp)
struct nfs4_delegation *dp;
struct list_head reaplist;
- dprintk("NFSD: expire_client cl_count %d\n",
- atomic_read(&clp->cl_count));
-
INIT_LIST_HEAD(&reaplist);
spin_lock(&recall_lock);
while (!list_empty(&clp->cl_delegations)) {
@@ -753,7 +726,10 @@ expire_client(struct nfs4_client *clp)
se_perclnt);
release_session(ses);
}
- put_nfs4_client(clp);
+ nfsd4_set_callback_client(clp, NULL);
+ if (clp->cl_cb_conn.cb_xprt)
+ svc_xprt_put(clp->cl_cb_conn.cb_xprt);
+ free_client(clp);
}
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
@@ -839,8 +815,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
}
memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
- atomic_set(&clp->cl_count, 1);
- atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ atomic_set(&clp->cl_cb_set, 0);
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_strhash);
INIT_LIST_HEAD(&clp->cl_openowners);
@@ -1327,15 +1302,9 @@ nfsd4_create_session(struct svc_rqst *rqstp,
cs_slot->sl_seqid++; /* from 0 to 1 */
move_to_confirmed(unconf);
- /*
- * We do not support RDMA or persistent sessions
- */
- cr_ses->flags &= ~SESSION4_PERSIST;
- cr_ses->flags &= ~SESSION4_RDMA;
-
if (cr_ses->flags & SESSION4_BACK_CHAN) {
- unconf->cl_cb_xprt = rqstp->rq_xprt;
- svc_xprt_get(unconf->cl_cb_xprt);
+ unconf->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
+ svc_xprt_get(rqstp->rq_xprt);
rpc_copy_addr(
(struct sockaddr *)&unconf->cl_cb_conn.cb_addr,
sa);
@@ -1344,7 +1313,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
cstate->minorversion;
unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog;
unconf->cl_cb_seq_nr = 1;
- nfsd4_probe_callback(unconf);
+ nfsd4_probe_callback(unconf, &unconf->cl_cb_conn);
}
conf = unconf;
} else {
@@ -1352,6 +1321,12 @@ nfsd4_create_session(struct svc_rqst *rqstp,
goto out;
}
+ /*
+ * We do not support RDMA or persistent sessions
+ */
+ cr_ses->flags &= ~SESSION4_PERSIST;
+ cr_ses->flags &= ~SESSION4_RDMA;
+
status = alloc_init_session(rqstp, conf, cr_ses);
if (status)
goto out;
@@ -1369,6 +1344,21 @@ out:
return status;
}
+static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
+{
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
+ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
+
+ return argp->opcnt == resp->opcnt;
+}
+
+static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
+{
+ if (!session)
+ return 0;
+ return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
+}
+
__be32
nfsd4_destroy_session(struct svc_rqst *r,
struct nfsd4_compound_state *cstate,
@@ -1384,6 +1374,10 @@ nfsd4_destroy_session(struct svc_rqst *r,
* - Do we need to clear any callback info from previous session?
*/
+ if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
+ if (!nfsd4_last_compound_op(r))
+ return nfserr_not_only_op;
+ }
dump_sessionid(__func__, &sessionid->sessionid);
spin_lock(&sessionid_lock);
ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
@@ -1396,7 +1390,7 @@ nfsd4_destroy_session(struct svc_rqst *r,
spin_unlock(&sessionid_lock);
/* wait for callbacks */
- shutdown_callback_client(ses->se_client);
+ nfsd4_set_callback_client(ses->se_client, NULL);
nfsd4_put_session(ses);
status = nfs_ok;
out:
@@ -1456,11 +1450,10 @@ nfsd4_sequence(struct svc_rqst *rqstp,
cstate->slot = slot;
cstate->session = session;
- /* Hold a session reference until done processing the compound:
- * nfsd4_put_session called only if the cstate slot is set.
- */
- nfsd4_get_session(session);
out:
+ /* Hold a session reference until done processing the compound. */
+ if (cstate->session)
+ nfsd4_get_session(cstate->session);
spin_unlock(&sessionid_lock);
/* Renew the clientid on success and on replay */
if (cstate->session) {
@@ -1631,9 +1624,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
status = nfserr_clid_inuse;
else {
- /* XXX: We just turn off callbacks until we can handle
- * change request correctly. */
- atomic_set(&conf->cl_cb_conn.cb_set, 0);
+ atomic_set(&conf->cl_cb_set, 0);
+ nfsd4_probe_callback(conf, &unconf->cl_cb_conn);
expire_client(unconf);
status = nfs_ok;
@@ -1667,7 +1659,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
}
move_to_confirmed(unconf);
conf = unconf;
- nfsd4_probe_callback(conf);
+ nfsd4_probe_callback(conf, &conf->cl_cb_conn);
status = nfs_ok;
}
} else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
@@ -2028,7 +2020,6 @@ void nfsd_break_deleg_cb(struct file_lock *fl)
* lock) we know the server hasn't removed the lease yet, we know
* it's safe to take a reference: */
atomic_inc(&dp->dl_count);
- atomic_inc(&dp->dl_client->cl_count);
spin_lock(&recall_lock);
list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
@@ -2347,7 +2338,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
{
struct nfs4_delegation *dp;
struct nfs4_stateowner *sop = stp->st_stateowner;
- struct nfs4_cb_conn *cb = &sop->so_client->cl_cb_conn;
+ int cb_up = atomic_read(&sop->so_client->cl_cb_set);
struct file_lock fl, *flp = &fl;
int status, flag = 0;
@@ -2355,7 +2346,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
open->op_recall = 0;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_PREVIOUS:
- if (!atomic_read(&cb->cb_set))
+ if (!cb_up)
open->op_recall = 1;
flag = open->op_delegate_type;
if (flag == NFS4_OPEN_DELEGATE_NONE)
@@ -2366,7 +2357,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
* had the chance to reclaim theirs.... */
if (locks_in_grace())
goto out;
- if (!atomic_read(&cb->cb_set) || !sop->so_confirmed)
+ if (!cb_up || !sop->so_confirmed)
goto out;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
flag = NFS4_OPEN_DELEGATE_WRITE;
@@ -2537,7 +2528,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
renew_client(clp);
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
- && !atomic_read(&clp->cl_cb_conn.cb_set))
+ && !atomic_read(&clp->cl_cb_set))
goto out;
status = nfs_ok;
out:
@@ -2554,6 +2545,12 @@ nfsd4_end_grace(void)
dprintk("NFSD: end of grace period\n");
nfsd4_recdir_purge_old();
locks_end_grace(&nfsd4_manager);
+ /*
+ * Now that every NFSv4 client has had the chance to recover and
+ * to see the (possibly new, possibly shorter) lease time, we
+ * can safely set the next grace time to the current lease time:
+ */
+ nfsd4_grace = nfsd4_lease;
}
static time_t
@@ -2563,9 +2560,9 @@ nfs4_laundromat(void)
struct nfs4_stateowner *sop;
struct nfs4_delegation *dp;
struct list_head *pos, *next, reaplist;
- time_t cutoff = get_seconds() - NFSD_LEASE_TIME;
- time_t t, clientid_val = NFSD_LEASE_TIME;
- time_t u, test_val = NFSD_LEASE_TIME;
+ time_t cutoff = get_seconds() - nfsd4_lease;
+ time_t t, clientid_val = nfsd4_lease;
+ time_t u, test_val = nfsd4_lease;
nfs4_lock_state();
@@ -2605,7 +2602,7 @@ nfs4_laundromat(void)
list_del_init(&dp->dl_recall_lru);
unhash_delegation(dp);
}
- test_val = NFSD_LEASE_TIME;
+ test_val = nfsd4_lease;
list_for_each_safe(pos, next, &close_lru) {
sop = list_entry(pos, struct nfs4_stateowner, so_close_lru);
if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) {
@@ -2675,7 +2672,7 @@ EXPIRED_STATEID(stateid_t *stateid)
{
if (time_before((unsigned long)boot_time,
((unsigned long)stateid->si_boot)) &&
- time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) {
+ time_before((unsigned long)(stateid->si_boot + nfsd4_lease), get_seconds())) {
dprintk("NFSD: expired stateid " STATEID_FMT "!\n",
STATEID_VAL(stateid));
return 1;
@@ -3976,12 +3973,6 @@ nfsd4_load_reboot_recovery_data(void)
printk("NFSD: Failure reading reboot recovery data\n");
}
-unsigned long
-get_nfs4_grace_period(void)
-{
- return max(user_lease_time, lease_time) * HZ;
-}
-
/*
* Since the lifetime of a delegation isn't limited to that of an open, a
* client may quite reasonably hang on to a delegation as long as it has
@@ -4008,20 +3999,27 @@ set_max_delegations(void)
static int
__nfs4_state_start(void)
{
- unsigned long grace_time;
+ int ret;
boot_time = get_seconds();
- grace_time = get_nfs4_grace_period();
- lease_time = user_lease_time;
locks_start_grace(&nfsd4_manager);
printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
- grace_time/HZ);
+ nfsd4_grace);
+ ret = set_callback_cred();
+ if (ret)
+ return -ENOMEM;
laundry_wq = create_singlethread_workqueue("nfsd4");
if (laundry_wq == NULL)
return -ENOMEM;
- queue_delayed_work(laundry_wq, &laundromat_work, grace_time);
+ ret = nfsd4_create_callback_queue();
+ if (ret)
+ goto out_free_laundry;
+ queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
set_max_delegations();
- return set_callback_cred();
+ return 0;
+out_free_laundry:
+ destroy_workqueue(laundry_wq);
+ return ret;
}
int
@@ -4039,12 +4037,6 @@ nfs4_state_start(void)
return 0;
}
-time_t
-nfs4_lease_time(void)
-{
- return lease_time;
-}
-
static void
__nfs4_state_shutdown(void)
{
@@ -4089,6 +4081,7 @@ nfs4_state_shutdown(void)
nfs4_lock_state();
nfs4_release_reclaim();
__nfs4_state_shutdown();
+ nfsd4_destroy_callback_queue();
nfs4_unlock_state();
}
@@ -4128,21 +4121,3 @@ nfs4_recoverydir(void)
{
return user_recovery_dirname;
}
-
-/*
- * Called when leasetime is changed.
- *
- * The only way the protocol gives us to handle on-the-fly lease changes is to
- * simulate a reboot. Instead of doing that, we just wait till the next time
- * we start to register any changes in lease time. If the administrator
- * really wants to change the lease time *now*, they can go ahead and bring
- * nfsd down and then back up again after changing the lease time.
- *
- * user_lease_time is protected by nfsd_mutex since it's only really accessed
- * when nfsd is starting
- */
-void
-nfs4_reset_lease(time_t leasetime)
-{
- user_lease_time = leasetime;
-}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 34ccf815ea8a..5c2de471329a 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1900,7 +1900,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
if ((buflen -= 4) < 0)
goto out_resource;
- WRITE32(NFSD_LEASE_TIME);
+ WRITE32(nfsd4_lease);
}
if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
if ((buflen -= 4) < 0)
@@ -3307,11 +3307,13 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
iov = &rqstp->rq_res.head[0];
iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base;
BUG_ON(iov->iov_len > PAGE_SIZE);
- if (nfsd4_has_session(cs) && cs->status != nfserr_replay_cache) {
- nfsd4_store_cache_entry(resp);
- dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__);
- resp->cstate.slot->sl_inuse = false;
- nfsd4_put_session(resp->cstate.session);
+ if (nfsd4_has_session(cs)) {
+ if (cs->status != nfserr_replay_cache) {
+ nfsd4_store_cache_entry(resp);
+ dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__);
+ cs->slot->sl_inuse = false;
+ }
+ nfsd4_put_session(cs->session);
}
return 1;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index e3591073098f..bc3194ea01f5 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -46,6 +46,7 @@ enum {
*/
#ifdef CONFIG_NFSD_V4
NFSD_Leasetime,
+ NFSD_Gracetime,
NFSD_RecoveryDir,
#endif
};
@@ -70,6 +71,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size);
static ssize_t write_maxblksize(struct file *file, char *buf, size_t size);
#ifdef CONFIG_NFSD_V4
static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
+static ssize_t write_gracetime(struct file *file, char *buf, size_t size);
static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
#endif
@@ -91,6 +93,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
[NFSD_MaxBlkSize] = write_maxblksize,
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = write_leasetime,
+ [NFSD_Gracetime] = write_gracetime,
[NFSD_RecoveryDir] = write_recoverydir,
#endif
};
@@ -1204,29 +1207,45 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
}
#ifdef CONFIG_NFSD_V4
-extern time_t nfs4_leasetime(void);
-
-static ssize_t __write_leasetime(struct file *file, char *buf, size_t size)
+static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, time_t *time)
{
- /* if size > 10 seconds, call
- * nfs4_reset_lease() then write out the new lease (seconds) as reply
- */
char *mesg = buf;
- int rv, lease;
+ int rv, i;
if (size > 0) {
if (nfsd_serv)
return -EBUSY;
- rv = get_int(&mesg, &lease);
+ rv = get_int(&mesg, &i);
if (rv)
return rv;
- if (lease < 10 || lease > 3600)
+ /*
+ * Some sanity checking. We don't have a reason for
+ * these particular numbers, but problems with the
+ * extremes are:
+ * - Too short: the briefest network outage may
+ * cause clients to lose all their locks. Also,
+ * the frequent polling may be wasteful.
+ * - Too long: do you really want reboot recovery
+ * to take more than an hour? Or to make other
+ * clients wait an hour before being able to
+ * revoke a dead client's locks?
+ */
+ if (i < 10 || i > 3600)
return -EINVAL;
- nfs4_reset_lease(lease);
+ *time = i;
}
- return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n",
- nfs4_lease_time());
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n", *time);
+}
+
+static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, time_t *time)
+{
+ ssize_t rv;
+
+ mutex_lock(&nfsd_mutex);
+ rv = __nfsd4_write_time(file, buf, size, time);
+ mutex_unlock(&nfsd_mutex);
+ return rv;
}
/**
@@ -1252,12 +1271,22 @@ static ssize_t __write_leasetime(struct file *file, char *buf, size_t size)
*/
static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
{
- ssize_t rv;
+ return nfsd4_write_time(file, buf, size, &nfsd4_lease);
+}
- mutex_lock(&nfsd_mutex);
- rv = __write_leasetime(file, buf, size);
- mutex_unlock(&nfsd_mutex);
- return rv;
+/**
+ * write_gracetime - Set or report current NFSv4 grace period time
+ *
+ * As above, but sets the time of the NFSv4 grace period.
+ *
+ * Note this should never be set to less than the *previous*
+ * lease-period time, but we don't try to enforce this. (In the common
+ * case (a new boot), we don't know what the previous lease time was
+ * anyway.)
+ */
+static ssize_t write_gracetime(struct file *file, char *buf, size_t size)
+{
+ return nfsd4_write_time(file, buf, size, &nfsd4_grace);
}
extern char *nfs4_recoverydir(void);
@@ -1351,6 +1380,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
+ [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
#endif
/* last one */ {""}
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index e942a1aaac92..72377761270e 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -82,7 +82,6 @@ int nfs4_state_init(void);
void nfsd4_free_slabs(void);
int nfs4_state_start(void);
void nfs4_state_shutdown(void);
-time_t nfs4_lease_time(void);
void nfs4_reset_lease(time_t leasetime);
int nfs4_reset_recoverydir(char *recdir);
#else
@@ -90,7 +89,6 @@ static inline int nfs4_state_init(void) { return 0; }
static inline void nfsd4_free_slabs(void) { }
static inline int nfs4_state_start(void) { return 0; }
static inline void nfs4_state_shutdown(void) { }
-static inline time_t nfs4_lease_time(void) { return 0; }
static inline void nfs4_reset_lease(time_t leasetime) { }
static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
#endif
@@ -229,6 +227,9 @@ extern struct timeval nfssvc_boot;
#ifdef CONFIG_NFSD_V4
+extern time_t nfsd4_lease;
+extern time_t nfsd4_grace;
+
/* before processing a COMPOUND operation, we have to check that there
* is enough space in the buffer for XDR encode to succeed. otherwise,
* we might process an operation with side effects, and be unable to
@@ -247,7 +248,6 @@ extern struct timeval nfssvc_boot;
#define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
-#define NFSD_LEASE_TIME (nfs4_lease_time())
#define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */
/*
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index fefeae27f25e..98836fd87f69 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -70,6 +70,16 @@ struct nfsd4_cb_sequence {
struct nfs4_client *cbs_clp;
};
+struct nfs4_rpc_args {
+ void *args_op;
+ struct nfsd4_cb_sequence args_seq;
+};
+
+struct nfsd4_callback {
+ struct nfs4_rpc_args cb_args;
+ struct work_struct cb_work;
+};
+
struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
@@ -86,6 +96,7 @@ struct nfs4_delegation {
stateid_t dl_stateid;
struct knfsd_fh dl_fh;
int dl_retries;
+ struct nfsd4_callback dl_recall;
};
/* client delegation callback info */
@@ -96,9 +107,7 @@ struct nfs4_cb_conn {
u32 cb_prog;
u32 cb_minorversion;
u32 cb_ident; /* minorversion 0 only */
- /* RPC client info */
- atomic_t cb_set; /* successful CB_NULL call */
- struct rpc_clnt * cb_client;
+ struct svc_xprt *cb_xprt; /* minorversion 1 only */
};
/* Maximum number of slots per session. 160 is useful for long haul TCP */
@@ -212,10 +221,13 @@ struct nfs4_client {
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
- struct nfs4_cb_conn cl_cb_conn; /* callback info */
- atomic_t cl_count; /* ref count */
u32 cl_firststate; /* recovery dir creation */
+ /* for v4.0 and v4.1 callbacks: */
+ struct nfs4_cb_conn cl_cb_conn;
+ struct rpc_clnt *cl_cb_client;
+ atomic_t cl_cb_set;
+
/* for nfs41 */
struct list_head cl_sessions;
struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */
@@ -226,7 +238,6 @@ struct nfs4_client {
/* We currently support a single back channel with a single slot */
unsigned long cl_cb_slot_busy;
u32 cl_cb_seq_nr;
- struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
};
@@ -377,11 +388,14 @@ extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
extern int nfs4_in_grace(void);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
-extern void put_nfs4_client(struct nfs4_client *clp);
extern void nfs4_free_stateowner(struct kref *kref);
extern int set_callback_cred(void);
-extern void nfsd4_probe_callback(struct nfs4_client *clp);
+extern void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
+extern void nfsd4_do_callback_rpc(struct work_struct *);
extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
+extern int nfsd4_create_callback_queue(void);
+extern void nfsd4_destroy_callback_queue(void);
+extern void nfsd4_set_callback_client(struct nfs4_client *, struct rpc_clnt *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
extern void nfsd4_init_recdir(char *recdir_name);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6dd5f1970e01..27d61139e53a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -724,7 +724,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
struct inode *inode;
int flags = O_RDONLY|O_LARGEFILE;
__be32 err;
- int host_err;
+ int host_err = 0;
validate_process_creds();
@@ -761,7 +761,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
* Check to see if there are any leases on this file.
* This may block while leases are broken.
*/
- host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
+ if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
+ host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
if (host_err == -EWOULDBLOCK)
host_err = -ETIMEDOUT;
if (host_err) /* NOMEM or WOULDBLOCK */
@@ -951,7 +952,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
nfsdstats.io_read += host_err;
*count = host_err;
err = 0;
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
} else
err = nfserrno(host_err);
out:
@@ -1062,7 +1063,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
goto out_nfserr;
*cnt = host_err;
nfsdstats.io_write += host_err;
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
/* clear setuid/setgid flag after write */
if (inode->i_mode & (S_ISUID | S_ISGID))
@@ -1169,7 +1170,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
}
- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file);
+ err = nfsd_open(rqstp, fhp, S_IFREG,
+ NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file);
if (err)
goto out;
if (EX_ISSYNC(fhp->fh_export)) {
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 4b1de0a9ea75..217a62c2a357 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -20,6 +20,7 @@
#define NFSD_MAY_OWNER_OVERRIDE 64
#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
+#define NFSD_MAY_NOT_BREAK_LEASE 512
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index efa337739534..c28958ec216c 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -513,9 +513,8 @@ extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
struct nfsd4_sequence *seq);
extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
- struct nfsd4_compound_state *,
-struct nfsd4_exchange_id *);
- extern __be32 nfsd4_create_session(struct svc_rqst *,
+ struct nfsd4_compound_state *, struct nfsd4_exchange_id *);
+extern __be32 nfsd4_create_session(struct svc_rqst *,
struct nfsd4_compound_state *,
struct nfsd4_create_session *);
extern __be32 nfsd4_sequence(struct svc_rqst *,
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 7cfb87e692da..d7fd696e595c 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -31,6 +31,11 @@
#include "alloc.h"
+/**
+ * nilfs_palloc_groups_per_desc_block - get the number of groups that a group
+ * descriptor block can maintain
+ * @inode: inode of metadata file using this allocator
+ */
static inline unsigned long
nilfs_palloc_groups_per_desc_block(const struct inode *inode)
{
@@ -38,12 +43,21 @@ nilfs_palloc_groups_per_desc_block(const struct inode *inode)
sizeof(struct nilfs_palloc_group_desc);
}
+/**
+ * nilfs_palloc_groups_count - get maximum number of groups
+ * @inode: inode of metadata file using this allocator
+ */
static inline unsigned long
nilfs_palloc_groups_count(const struct inode *inode)
{
return 1UL << (BITS_PER_LONG - (inode->i_blkbits + 3 /* log2(8) */));
}
+/**
+ * nilfs_palloc_init_blockgroup - initialize private variables for allocator
+ * @inode: inode of metadata file using this allocator
+ * @entry_size: size of the persistent object
+ */
int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
{
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
@@ -69,6 +83,12 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
return 0;
}
+/**
+ * nilfs_palloc_group - get group number and offset from an entry number
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ * @offset: pointer to store offset number in the group
+ */
static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
unsigned long *offset)
{
@@ -78,6 +98,14 @@ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
return group;
}
+/**
+ * nilfs_palloc_desc_blkoff - get block offset of a group descriptor block
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ *
+ * nilfs_palloc_desc_blkoff() returns block offset of the descriptor
+ * block which contains a descriptor of the specified group.
+ */
static unsigned long
nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
{
@@ -86,6 +114,14 @@ nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
return desc_block * NILFS_MDT(inode)->mi_blocks_per_desc_block;
}
+/**
+ * nilfs_palloc_bitmap_blkoff - get block offset of a bitmap block
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ *
+ * nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap
+ * block used to allocate/deallocate entries in the specified group.
+ */
static unsigned long
nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
{
@@ -95,6 +131,12 @@ nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
desc_offset * NILFS_MDT(inode)->mi_blocks_per_group;
}
+/**
+ * nilfs_palloc_group_desc_nfrees - get the number of free entries in a group
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @desc: pointer to descriptor structure for the group
+ */
static unsigned long
nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group,
const struct nilfs_palloc_group_desc *desc)
@@ -107,6 +149,13 @@ nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group,
return nfree;
}
+/**
+ * nilfs_palloc_group_desc_add_entries - adjust count of free entries
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @desc: pointer to descriptor structure for the group
+ * @n: delta to be added
+ */
static void
nilfs_palloc_group_desc_add_entries(struct inode *inode,
unsigned long group,
@@ -118,6 +167,11 @@ nilfs_palloc_group_desc_add_entries(struct inode *inode,
spin_unlock(nilfs_mdt_bgl_lock(inode, group));
}
+/**
+ * nilfs_palloc_entry_blkoff - get block offset of an entry block
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ */
static unsigned long
nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
{
@@ -129,6 +183,12 @@ nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
group_offset / NILFS_MDT(inode)->mi_entries_per_block;
}
+/**
+ * nilfs_palloc_desc_block_init - initialize buffer of a group descriptor block
+ * @inode: inode of metadata file
+ * @bh: buffer head of the buffer to be initialized
+ * @kaddr: kernel address mapped for the page including the buffer
+ */
static void nilfs_palloc_desc_block_init(struct inode *inode,
struct buffer_head *bh, void *kaddr)
{
@@ -179,6 +239,13 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
return ret;
}
+/**
+ * nilfs_palloc_get_desc_block - get buffer head of a group descriptor block
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @create: create flag
+ * @bhp: pointer to store the resultant buffer head
+ */
static int nilfs_palloc_get_desc_block(struct inode *inode,
unsigned long group,
int create, struct buffer_head **bhp)
@@ -191,6 +258,13 @@ static int nilfs_palloc_get_desc_block(struct inode *inode,
bhp, &cache->prev_desc, &cache->lock);
}
+/**
+ * nilfs_palloc_get_bitmap_block - get buffer head of a bitmap block
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @create: create flag
+ * @bhp: pointer to store the resultant buffer head
+ */
static int nilfs_palloc_get_bitmap_block(struct inode *inode,
unsigned long group,
int create, struct buffer_head **bhp)
@@ -203,6 +277,13 @@ static int nilfs_palloc_get_bitmap_block(struct inode *inode,
&cache->prev_bitmap, &cache->lock);
}
+/**
+ * nilfs_palloc_get_entry_block - get buffer head of an entry block
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ * @create: create flag
+ * @bhp: pointer to store the resultant buffer head
+ */
int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
int create, struct buffer_head **bhp)
{
@@ -214,6 +295,13 @@ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
&cache->prev_entry, &cache->lock);
}
+/**
+ * nilfs_palloc_block_get_group_desc - get kernel address of a group descriptor
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @bh: buffer head of the buffer storing the group descriptor block
+ * @kaddr: kernel address mapped for the page including the buffer
+ */
static struct nilfs_palloc_group_desc *
nilfs_palloc_block_get_group_desc(const struct inode *inode,
unsigned long group,
@@ -223,6 +311,13 @@ nilfs_palloc_block_get_group_desc(const struct inode *inode,
group % nilfs_palloc_groups_per_desc_block(inode);
}
+/**
+ * nilfs_palloc_block_get_entry - get kernel address of an entry
+ * @inode: inode of metadata file using this allocator
+ * @nr: serial number of the entry (e.g. inode number)
+ * @bh: buffer head of the buffer storing the entry block
+ * @kaddr: kernel address mapped for the page including the buffer
+ */
void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
const struct buffer_head *bh, void *kaddr)
{
@@ -235,11 +330,19 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
entry_offset * NILFS_MDT(inode)->mi_entry_size;
}
+/**
+ * nilfs_palloc_find_available_slot - find available slot in a group
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @target: offset number of an entry in the group (start point)
+ * @bitmap: bitmap of the group
+ * @bsize: size in bits
+ */
static int nilfs_palloc_find_available_slot(struct inode *inode,
unsigned long group,
unsigned long target,
unsigned char *bitmap,
- int bsize) /* size in bits */
+ int bsize)
{
int curr, pos, end, i;
@@ -277,6 +380,13 @@ static int nilfs_palloc_find_available_slot(struct inode *inode,
return -ENOSPC;
}
+/**
+ * nilfs_palloc_rest_groups_in_desc_block - get the remaining number of groups
+ * in a group descriptor block
+ * @inode: inode of metadata file using this allocator
+ * @curr: current group number
+ * @max: maximum number of groups
+ */
static unsigned long
nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
unsigned long curr, unsigned long max)
@@ -287,6 +397,11 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
max - curr + 1);
}
+/**
+ * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the allocation
+ */
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -366,6 +481,11 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
return ret;
}
+/**
+ * nilfs_palloc_commit_alloc_entry - finish allocation of a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the allocation
+ */
void nilfs_palloc_commit_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -377,6 +497,11 @@ void nilfs_palloc_commit_alloc_entry(struct inode *inode,
brelse(req->pr_desc_bh);
}
+/**
+ * nilfs_palloc_commit_free_entry - finish deallocating a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the removal
+ */
void nilfs_palloc_commit_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -410,6 +535,11 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
brelse(req->pr_desc_bh);
}
+/**
+ * nilfs_palloc_abort_alloc_entry - cancel allocation of a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the allocation
+ */
void nilfs_palloc_abort_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -442,6 +572,11 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
req->pr_desc_bh = NULL;
}
+/**
+ * nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the removal
+ */
int nilfs_palloc_prepare_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -464,6 +599,11 @@ int nilfs_palloc_prepare_free_entry(struct inode *inode,
return 0;
}
+/**
+ * nilfs_palloc_abort_free_entry - cancel deallocating a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the removal
+ */
void nilfs_palloc_abort_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
{
@@ -475,6 +615,12 @@ void nilfs_palloc_abort_free_entry(struct inode *inode,
req->pr_desc_bh = NULL;
}
+/**
+ * nilfs_palloc_group_is_in - judge if an entry is in a group
+ * @inode: inode of metadata file using this allocator
+ * @group: group number
+ * @nr: serial number of the entry (e.g. inode number)
+ */
static int
nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr)
{
@@ -485,6 +631,12 @@ nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr)
return (nr >= first) && (nr <= last);
}
+/**
+ * nilfs_palloc_freev - deallocate a set of persistent objects
+ * @inode: inode of metadata file using this allocator
+ * @entry_nrs: array of entry numbers to be deallocated
+ * @nitems: number of entries stored in @entry_nrs
+ */
int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
{
struct buffer_head *desc_bh, *bitmap_bh;
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 5cccf874d692..9af34a7e6e13 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -29,6 +29,13 @@
#include <linux/buffer_head.h>
#include <linux/fs.h>
+/**
+ * nilfs_palloc_entries_per_group - get the number of entries per group
+ * @inode: inode of metadata file using this allocator
+ *
+ * The number of entries per group is defined by the number of bits
+ * that a bitmap block can maintain.
+ */
static inline unsigned long
nilfs_palloc_entries_per_group(const struct inode *inode)
{
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 76c38e3e19d2..b27a342c5af6 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -31,63 +31,16 @@
#include "alloc.h"
#include "dat.h"
-/**
- * struct nilfs_btree_path - A path on which B-tree operations are executed
- * @bp_bh: buffer head of node block
- * @bp_sib_bh: buffer head of sibling node block
- * @bp_index: index of child node
- * @bp_oldreq: ptr end request for old ptr
- * @bp_newreq: ptr alloc request for new ptr
- * @bp_op: rebalance operation
- */
-struct nilfs_btree_path {
- struct buffer_head *bp_bh;
- struct buffer_head *bp_sib_bh;
- int bp_index;
- union nilfs_bmap_ptr_req bp_oldreq;
- union nilfs_bmap_ptr_req bp_newreq;
- struct nilfs_btnode_chkey_ctxt bp_ctxt;
- void (*bp_op)(struct nilfs_btree *, struct nilfs_btree_path *,
- int, __u64 *, __u64 *);
-};
-
-/*
- * B-tree path operations
- */
-
-static struct kmem_cache *nilfs_btree_path_cache;
-
-int __init nilfs_btree_path_cache_init(void)
-{
- nilfs_btree_path_cache =
- kmem_cache_create("nilfs2_btree_path_cache",
- sizeof(struct nilfs_btree_path) *
- NILFS_BTREE_LEVEL_MAX, 0, 0, NULL);
- return (nilfs_btree_path_cache != NULL) ? 0 : -ENOMEM;
-}
-
-void nilfs_btree_path_cache_destroy(void)
-{
- kmem_cache_destroy(nilfs_btree_path_cache);
-}
-
-static inline struct nilfs_btree_path *nilfs_btree_alloc_path(void)
-{
- return kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
-}
-
-static inline void nilfs_btree_free_path(struct nilfs_btree_path *path)
+static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
{
- kmem_cache_free(nilfs_btree_path_cache, path);
-}
+ struct nilfs_btree_path *path;
+ int level = NILFS_BTREE_LEVEL_DATA;
-static void nilfs_btree_init_path(struct nilfs_btree_path *path)
-{
- int level;
+ path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
+ if (path == NULL)
+ goto out;
- for (level = NILFS_BTREE_LEVEL_DATA;
- level < NILFS_BTREE_LEVEL_MAX;
- level++) {
+ for (; level < NILFS_BTREE_LEVEL_MAX; level++) {
path[level].bp_bh = NULL;
path[level].bp_sib_bh = NULL;
path[level].bp_index = 0;
@@ -95,15 +48,19 @@ static void nilfs_btree_init_path(struct nilfs_btree_path *path)
path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
path[level].bp_op = NULL;
}
+
+out:
+ return path;
}
-static void nilfs_btree_release_path(struct nilfs_btree_path *path)
+static void nilfs_btree_free_path(struct nilfs_btree_path *path)
{
- int level;
+ int level = NILFS_BTREE_LEVEL_DATA;
- for (level = NILFS_BTREE_LEVEL_DATA; level < NILFS_BTREE_LEVEL_MAX;
- level++)
+ for (; level < NILFS_BTREE_LEVEL_MAX; level++)
brelse(path[level].bp_bh);
+
+ kmem_cache_free(nilfs_btree_path_cache, path);
}
/*
@@ -566,14 +523,12 @@ static int nilfs_btree_lookup(const struct nilfs_bmap *bmap,
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
if (ptrp != NULL)
*ptrp = ptr;
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
@@ -594,7 +549,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
+
ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
if (ret < 0)
goto out;
@@ -655,7 +610,6 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
*ptrp = ptr;
ret = cnt;
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
}
@@ -1123,7 +1077,6 @@ static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
ret = nilfs_btree_do_lookup(btree, path, key, NULL,
NILFS_BTREE_LEVEL_NODE_MIN);
@@ -1140,7 +1093,6 @@ static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
}
@@ -1456,7 +1408,7 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
+
ret = nilfs_btree_do_lookup(btree, path, key, NULL,
NILFS_BTREE_LEVEL_NODE_MIN);
if (ret < 0)
@@ -1473,7 +1425,6 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
}
@@ -1488,11 +1439,9 @@ static int nilfs_btree_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL);
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
@@ -1923,7 +1872,6 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
if (buffer_nilfs_node(bh)) {
node = (struct nilfs_btree_node *)bh->b_data;
@@ -1947,7 +1895,6 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
nilfs_btree_propagate_p(btree, path, level, bh);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
@@ -2108,7 +2055,6 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap,
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
if (buffer_nilfs_node(*bh)) {
node = (struct nilfs_btree_node *)(*bh)->b_data;
@@ -2130,7 +2076,6 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap,
nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
@@ -2175,7 +2120,6 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- nilfs_btree_init_path(path);
ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1);
if (ret < 0) {
@@ -2195,7 +2139,6 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
nilfs_bmap_set_dirty(&btree->bt_bmap);
out:
- nilfs_btree_release_path(path);
nilfs_btree_free_path(path);
return ret;
}
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 4b82d84ade75..af638d59e3bf 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -30,9 +30,6 @@
#include "btnode.h"
#include "bmap.h"
-struct nilfs_btree;
-struct nilfs_btree_path;
-
/**
* struct nilfs_btree - B-tree structure
* @bt_bmap: bmap base structure
@@ -41,6 +38,25 @@ struct nilfs_btree {
struct nilfs_bmap bt_bmap;
};
+/**
+ * struct nilfs_btree_path - A path on which B-tree operations are executed
+ * @bp_bh: buffer head of node block
+ * @bp_sib_bh: buffer head of sibling node block
+ * @bp_index: index of child node
+ * @bp_oldreq: ptr end request for old ptr
+ * @bp_newreq: ptr alloc request for new ptr
+ * @bp_op: rebalance operation
+ */
+struct nilfs_btree_path {
+ struct buffer_head *bp_bh;
+ struct buffer_head *bp_sib_bh;
+ int bp_index;
+ union nilfs_bmap_ptr_req bp_oldreq;
+ union nilfs_bmap_ptr_req bp_newreq;
+ struct nilfs_btnode_chkey_ctxt bp_ctxt;
+ void (*bp_op)(struct nilfs_btree *, struct nilfs_btree_path *,
+ int, __u64 *, __u64 *);
+};
#define NILFS_BTREE_ROOT_SIZE NILFS_BMAP_SIZE
#define NILFS_BTREE_ROOT_NCHILDREN_MAX \
@@ -57,6 +73,7 @@ struct nilfs_btree {
#define NILFS_BTREE_KEY_MIN ((__u64)0)
#define NILFS_BTREE_KEY_MAX (~(__u64)0)
+extern struct kmem_cache *nilfs_btree_path_cache;
int nilfs_btree_path_cache_init(void);
void nilfs_btree_path_cache_destroy(void);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 0957b58f909d..5e226d4b41d3 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -451,7 +451,7 @@ static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
inode->i_op = &nilfs_special_inode_operations;
init_special_inode(
inode, inode->i_mode,
- new_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
+ huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
}
nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
brelse(bh);
@@ -511,7 +511,7 @@ void nilfs_write_inode_common(struct inode *inode,
nilfs_bmap_write(ii->i_bmap, raw_inode);
else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_device_code =
- cpu_to_le64(new_encode_dev(inode->i_rdev));
+ cpu_to_le64(huge_encode_dev(inode->i_rdev));
/* When extending inode, nilfs->ns_inode_size should be checked
for substitutions of appended fields */
}
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index ba43146f3c30..bae2a516b4ee 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -105,6 +105,8 @@ static void store_segsum_info(struct nilfs_segsum_info *ssi,
ssi->nsumblk = DIV_ROUND_UP(ssi->sumbytes, blocksize);
ssi->nfileblk = ssi->nblocks - ssi->nsumblk - !!NILFS_SEG_HAS_SR(ssi);
+
+ /* need to verify ->ss_bytes field if read ->ss_cno */
}
/**
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 17851f77f739..2e6a2723b8fa 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -40,35 +40,10 @@ struct nilfs_write_info {
sector_t blocknr;
};
-
static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
struct the_nilfs *nilfs);
static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
-
-static struct kmem_cache *nilfs_segbuf_cachep;
-
-static void nilfs_segbuf_init_once(void *obj)
-{
- memset(obj, 0, sizeof(struct nilfs_segment_buffer));
-}
-
-int __init nilfs_init_segbuf_cache(void)
-{
- nilfs_segbuf_cachep =
- kmem_cache_create("nilfs2_segbuf_cache",
- sizeof(struct nilfs_segment_buffer),
- 0, SLAB_RECLAIM_ACCOUNT,
- nilfs_segbuf_init_once);
-
- return (nilfs_segbuf_cachep == NULL) ? -ENOMEM : 0;
-}
-
-void nilfs_destroy_segbuf_cache(void)
-{
- kmem_cache_destroy(nilfs_segbuf_cachep);
-}
-
struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
{
struct nilfs_segment_buffer *segbuf;
@@ -81,6 +56,7 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
INIT_LIST_HEAD(&segbuf->sb_list);
INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
+ segbuf->sb_super_root = NULL;
init_completion(&segbuf->sb_bio_event);
atomic_set(&segbuf->sb_err, 0);
@@ -158,7 +134,7 @@ int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
}
int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
- time_t ctime)
+ time_t ctime, __u64 cno)
{
int err;
@@ -171,6 +147,7 @@ int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
segbuf->sb_sum.ctime = ctime;
+ segbuf->sb_sum.cno = cno;
return 0;
}
@@ -196,13 +173,14 @@ void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo);
raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
raw_sum->ss_pad = 0;
+ raw_sum->ss_cno = cpu_to_le64(segbuf->sb_sum.cno);
}
/*
* CRC calculation routines
*/
-void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
- u32 seed)
+static void
+nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed)
{
struct buffer_head *bh;
struct nilfs_segment_summary *raw_sum;
@@ -229,8 +207,8 @@ void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
raw_sum->ss_sumsum = cpu_to_le32(crc);
}
-void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
- u32 seed)
+static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
+ u32 seed)
{
struct buffer_head *bh;
struct nilfs_segment_summary *raw_sum;
@@ -256,6 +234,20 @@ void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
raw_sum->ss_datasum = cpu_to_le32(crc);
}
+static void
+nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf,
+ u32 seed)
+{
+ struct nilfs_super_root *raw_sr;
+ u32 crc;
+
+ raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
+ crc = crc32_le(seed,
+ (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
+ NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
+ raw_sr->sr_sum = cpu_to_le32(crc);
+}
+
static void nilfs_release_buffers(struct list_head *list)
{
struct buffer_head *bh, *n;
@@ -282,6 +274,7 @@ static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
{
nilfs_release_buffers(&segbuf->sb_segsum_buffers);
nilfs_release_buffers(&segbuf->sb_payload_buffers);
+ segbuf->sb_super_root = NULL;
}
/*
@@ -334,6 +327,23 @@ int nilfs_wait_on_logs(struct list_head *logs)
return ret;
}
+/**
+ * nilfs_add_checksums_on_logs - add checksums on the logs
+ * @logs: list of segment buffers storing target logs
+ * @seed: checksum seed value
+ */
+void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed)
+{
+ struct nilfs_segment_buffer *segbuf;
+
+ list_for_each_entry(segbuf, logs, sb_list) {
+ if (segbuf->sb_super_root)
+ nilfs_segbuf_fill_in_super_root_crc(segbuf, seed);
+ nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
+ nilfs_segbuf_fill_in_data_crc(segbuf, seed);
+ }
+}
+
/*
* BIO operations
*/
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 94dfd3517bc0..fdf1c3b6d673 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -37,6 +37,7 @@
* @sumbytes: Byte count of segment summary
* @nfileblk: Total number of file blocks
* @seg_seq: Segment sequence number
+ * @cno: Checkpoint number
* @ctime: Creation time
* @next: Block number of the next full segment
*/
@@ -48,6 +49,7 @@ struct nilfs_segsum_info {
unsigned long sumbytes;
unsigned long nfileblk;
u64 seg_seq;
+ __u64 cno;
time_t ctime;
sector_t next;
};
@@ -76,6 +78,7 @@ struct nilfs_segsum_info {
* @sb_rest_blocks: Number of residual blocks in the current segment
* @sb_segsum_buffers: List of buffers for segment summaries
* @sb_payload_buffers: List of buffers for segment payload
+ * @sb_super_root: Pointer to buffer storing a super root block (if exists)
* @sb_nbio: Number of flying bio requests
* @sb_err: I/O error status
* @sb_bio_event: Completion event of log writing
@@ -95,6 +98,7 @@ struct nilfs_segment_buffer {
/* Buffers */
struct list_head sb_segsum_buffers;
struct list_head sb_payload_buffers; /* including super root */
+ struct buffer_head *sb_super_root;
/* io status */
int sb_nbio;
@@ -121,6 +125,7 @@ struct nilfs_segment_buffer {
b_assoc_buffers))
#define NILFS_SEGBUF_BH_IS_LAST(bh, head) ((bh)->b_assoc_buffers.next == head)
+extern struct kmem_cache *nilfs_segbuf_cachep;
int __init nilfs_init_segbuf_cache(void);
void nilfs_destroy_segbuf_cache(void);
@@ -132,13 +137,11 @@ void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
struct nilfs_segment_buffer *prev);
void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64,
struct the_nilfs *);
-int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t);
+int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t, __u64);
int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *);
int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *,
struct buffer_head **);
void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *);
-void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *, u32);
-void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *, u32);
static inline void
nilfs_segbuf_add_segsum_buffer(struct nilfs_segment_buffer *segbuf,
@@ -171,6 +174,7 @@ void nilfs_truncate_logs(struct list_head *logs,
struct nilfs_segment_buffer *last);
int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs);
int nilfs_wait_on_logs(struct list_head *logs);
+void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed);
static inline void nilfs_destroy_logs(struct list_head *logs)
{
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 6a7dbd8451db..c9201649cc49 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -116,42 +116,6 @@ static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *,
#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
-/*
- * Transaction
- */
-static struct kmem_cache *nilfs_transaction_cachep;
-
-/**
- * nilfs_init_transaction_cache - create a cache for nilfs_transaction_info
- *
- * nilfs_init_transaction_cache() creates a slab cache for the struct
- * nilfs_transaction_info.
- *
- * Return Value: On success, it returns 0. On error, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- */
-int nilfs_init_transaction_cache(void)
-{
- nilfs_transaction_cachep =
- kmem_cache_create("nilfs2_transaction_cache",
- sizeof(struct nilfs_transaction_info),
- 0, SLAB_RECLAIM_ACCOUNT, NULL);
- return (nilfs_transaction_cachep == NULL) ? -ENOMEM : 0;
-}
-
-/**
- * nilfs_destroy_transaction_cache - destroy the cache for transaction info
- *
- * nilfs_destroy_transaction_cache() frees the slab cache for the struct
- * nilfs_transaction_info.
- */
-void nilfs_destroy_transaction_cache(void)
-{
- kmem_cache_destroy(nilfs_transaction_cachep);
-}
-
static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
{
struct nilfs_transaction_info *cur_ti = current->journal_info;
@@ -402,7 +366,8 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
if (nilfs_doing_gc())
flags = NILFS_SS_GC;
- err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime);
+ err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime,
+ sci->sc_sbi->s_nilfs->ns_cno);
if (unlikely(err))
return err;
@@ -435,7 +400,7 @@ static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
return err;
segbuf = sci->sc_curseg;
}
- err = nilfs_segbuf_extend_payload(segbuf, &sci->sc_super_root);
+ err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
if (likely(!err))
segbuf->sb_sum.flags |= NILFS_SS_SR;
return err;
@@ -599,7 +564,7 @@ static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
*vblocknr = binfo->bi_v.bi_vblocknr;
}
-struct nilfs_sc_operations nilfs_sc_file_ops = {
+static struct nilfs_sc_operations nilfs_sc_file_ops = {
.collect_data = nilfs_collect_file_data,
.collect_node = nilfs_collect_file_node,
.collect_bmap = nilfs_collect_file_bmap,
@@ -649,7 +614,7 @@ static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
*binfo_dat = binfo->bi_dat;
}
-struct nilfs_sc_operations nilfs_sc_dat_ops = {
+static struct nilfs_sc_operations nilfs_sc_dat_ops = {
.collect_data = nilfs_collect_dat_data,
.collect_node = nilfs_collect_file_node,
.collect_bmap = nilfs_collect_dat_bmap,
@@ -657,7 +622,7 @@ struct nilfs_sc_operations nilfs_sc_dat_ops = {
.write_node_binfo = nilfs_write_dat_node_binfo,
};
-struct nilfs_sc_operations nilfs_sc_dsync_ops = {
+static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
.collect_data = nilfs_collect_file_data,
.collect_node = NULL,
.collect_bmap = NULL,
@@ -932,43 +897,16 @@ static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci,
}
}
-/*
- * CRC calculation routines
- */
-static void nilfs_fill_in_super_root_crc(struct buffer_head *bh_sr, u32 seed)
-{
- struct nilfs_super_root *raw_sr =
- (struct nilfs_super_root *)bh_sr->b_data;
- u32 crc;
-
- crc = crc32_le(seed,
- (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
- NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
- raw_sr->sr_sum = cpu_to_le32(crc);
-}
-
-static void nilfs_segctor_fill_in_checksums(struct nilfs_sc_info *sci,
- u32 seed)
-{
- struct nilfs_segment_buffer *segbuf;
-
- if (sci->sc_super_root)
- nilfs_fill_in_super_root_crc(sci->sc_super_root, seed);
-
- list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
- nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
- nilfs_segbuf_fill_in_data_crc(segbuf, seed);
- }
-}
-
static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct buffer_head *bh_sr = sci->sc_super_root;
- struct nilfs_super_root *raw_sr =
- (struct nilfs_super_root *)bh_sr->b_data;
+ struct buffer_head *bh_sr;
+ struct nilfs_super_root *raw_sr;
unsigned isz = nilfs->ns_inode_size;
+ bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
+ raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
+
raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES);
raw_sr->sr_nongc_ctime
= cpu_to_le64(nilfs_doing_gc() ?
@@ -1491,7 +1429,6 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
/* Collection retry loop */
for (;;) {
- sci->sc_super_root = NULL;
sci->sc_nblk_this_inc = 0;
sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
@@ -1568,7 +1505,7 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
ssp.offset = sizeof(struct nilfs_segment_summary);
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
- if (bh == sci->sc_super_root)
+ if (bh == segbuf->sb_super_root)
break;
if (!finfo) {
finfo = nilfs_segctor_map_segsum_entry(
@@ -1729,7 +1666,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- if (bh == sci->sc_super_root) {
+ if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
lock_page(bd_page);
clear_page_dirty_for_io(bd_page);
@@ -1848,7 +1785,7 @@ static void nilfs_clear_copied_buffers(struct list_head *list, int err)
}
static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
- struct buffer_head *bh_sr, int err)
+ int err)
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
@@ -1869,7 +1806,7 @@ static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- if (bh == bh_sr) {
+ if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;
@@ -1898,7 +1835,7 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
list_splice_tail_init(&sci->sc_write_logs, &logs);
ret = nilfs_wait_on_logs(&logs);
- nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret ? : err);
+ nilfs_abort_logs(&logs, NULL, ret ? : err);
list_splice_tail_init(&sci->sc_segbufs, &logs);
nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
@@ -1914,7 +1851,6 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
}
nilfs_destroy_logs(&logs);
- sci->sc_super_root = NULL;
}
static void nilfs_set_next_segment(struct the_nilfs *nilfs,
@@ -1933,7 +1869,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
- int update_sr = (sci->sc_super_root != NULL);
+ int update_sr = false;
list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
struct buffer_head *bh;
@@ -1964,11 +1900,12 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
clear_buffer_nilfs_volatile(bh);
- if (bh == sci->sc_super_root) {
+ if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;
}
+ update_sr = true;
break;
}
if (bh->b_page != fs_page) {
@@ -2115,7 +2052,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct the_nilfs *nilfs = sbi->s_nilfs;
struct page *failed_page;
- int err, has_sr = 0;
+ int err;
sci->sc_stage.scnt = NILFS_ST_INIT;
@@ -2143,8 +2080,6 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
if (unlikely(err))
goto failed;
- has_sr = (sci->sc_super_root != NULL);
-
/* Avoid empty segment */
if (sci->sc_stage.scnt == NILFS_ST_DONE &&
NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
@@ -2159,7 +2094,8 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
nilfs_segctor_fill_in_file_bmap(sci, sbi->s_ifile);
- if (has_sr) {
+ if (mode == SC_LSEG_SR &&
+ sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
err = nilfs_segctor_fill_in_checkpoint(sci);
if (unlikely(err))
goto failed_to_write;
@@ -2171,11 +2107,12 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
/* Write partial segments */
err = nilfs_segctor_prepare_write(sci, &failed_page);
if (err) {
- nilfs_abort_logs(&sci->sc_segbufs, failed_page,
- sci->sc_super_root, err);
+ nilfs_abort_logs(&sci->sc_segbufs, failed_page, err);
goto failed_to_write;
}
- nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed);
+
+ nilfs_add_checksums_on_logs(&sci->sc_segbufs,
+ nilfs->ns_crc_seed);
err = nilfs_segctor_write(sci, nilfs);
if (unlikely(err))
@@ -2196,8 +2133,6 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
}
} while (sci->sc_stage.scnt != NILFS_ST_DONE);
- sci->sc_super_root = NULL;
-
out:
nilfs_segctor_check_out_files(sci, sbi);
return err;
@@ -2224,9 +2159,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
{
spin_lock(&sci->sc_state_lock);
- if (sci->sc_timer && !(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
- sci->sc_timer->expires = jiffies + sci->sc_interval;
- add_timer(sci->sc_timer);
+ if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
+ sci->sc_timer.expires = jiffies + sci->sc_interval;
+ add_timer(&sci->sc_timer);
sci->sc_state |= NILFS_SEGCTOR_COMMIT;
}
spin_unlock(&sci->sc_state_lock);
@@ -2431,9 +2366,7 @@ static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
spin_lock(&sci->sc_state_lock);
sci->sc_seq_accepted = sci->sc_seq_request;
spin_unlock(&sci->sc_state_lock);
-
- if (sci->sc_timer)
- del_timer_sync(sci->sc_timer);
+ del_timer_sync(&sci->sc_timer);
}
/**
@@ -2459,9 +2392,9 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
/* re-enable timer if checkpoint creation was not done */
- if (sci->sc_timer && (sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
- time_before(jiffies, sci->sc_timer->expires))
- add_timer(sci->sc_timer);
+ if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
+ time_before(jiffies, sci->sc_timer.expires))
+ add_timer(&sci->sc_timer);
}
spin_unlock(&sci->sc_state_lock);
}
@@ -2640,13 +2573,10 @@ static int nilfs_segctor_thread(void *arg)
{
struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
- struct timer_list timer;
int timeout = 0;
- init_timer(&timer);
- timer.data = (unsigned long)current;
- timer.function = nilfs_construction_timeout;
- sci->sc_timer = &timer;
+ sci->sc_timer.data = (unsigned long)current;
+ sci->sc_timer.function = nilfs_construction_timeout;
/* start sync. */
sci->sc_task = current;
@@ -2695,7 +2625,7 @@ static int nilfs_segctor_thread(void *arg)
should_sleep = 0;
else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
should_sleep = time_before(jiffies,
- sci->sc_timer->expires);
+ sci->sc_timer.expires);
if (should_sleep) {
spin_unlock(&sci->sc_state_lock);
@@ -2704,7 +2634,7 @@ static int nilfs_segctor_thread(void *arg)
}
finish_wait(&sci->sc_wait_daemon, &wait);
timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
- time_after_eq(jiffies, sci->sc_timer->expires));
+ time_after_eq(jiffies, sci->sc_timer.expires));
if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
@@ -2713,8 +2643,6 @@ static int nilfs_segctor_thread(void *arg)
end_thread:
spin_unlock(&sci->sc_state_lock);
- del_timer_sync(sci->sc_timer);
- sci->sc_timer = NULL;
/* end sync. */
sci->sc_task = NULL;
@@ -2750,13 +2678,6 @@ static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
}
}
-static int nilfs_segctor_init(struct nilfs_sc_info *sci)
-{
- sci->sc_seq_done = sci->sc_seq_request;
-
- return nilfs_segctor_start_thread(sci);
-}
-
/*
* Setup & clean-up functions
*/
@@ -2780,6 +2701,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi)
INIT_LIST_HEAD(&sci->sc_write_logs);
INIT_LIST_HEAD(&sci->sc_gc_inodes);
INIT_LIST_HEAD(&sci->sc_copied_buffers);
+ init_timer(&sci->sc_timer);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
@@ -2846,6 +2768,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
down_write(&sbi->s_nilfs->ns_segctor_sem);
+ del_timer_sync(&sci->sc_timer);
kfree(sci);
}
@@ -2880,7 +2803,7 @@ int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
return -ENOMEM;
nilfs_attach_writer(nilfs, sbi);
- err = nilfs_segctor_init(NILFS_SC(sbi));
+ err = nilfs_segctor_start_thread(NILFS_SC(sbi));
if (err) {
nilfs_detach_writer(nilfs, sbi);
kfree(sbi->s_sc_info);
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 82dfd6a686b9..dca142361ccf 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -100,7 +100,6 @@ struct nilfs_segsum_pointer {
* @sc_write_logs: List of segment buffers to hold logs under writing
* @sc_segbuf_nblocks: Number of available blocks in segment buffers.
* @sc_curseg: Current segment buffer
- * @sc_super_root: Pointer to the super root buffer
* @sc_stage: Collection stage
* @sc_finfo_ptr: pointer to the current finfo struct in the segment summary
* @sc_binfo_ptr: pointer to the current binfo struct in the segment summary
@@ -148,7 +147,6 @@ struct nilfs_sc_info {
struct list_head sc_write_logs;
unsigned long sc_segbuf_nblocks;
struct nilfs_segment_buffer *sc_curseg;
- struct buffer_head *sc_super_root;
struct nilfs_cstage sc_stage;
@@ -179,7 +177,7 @@ struct nilfs_sc_info {
unsigned long sc_lseg_stime; /* in 1/HZ seconds */
unsigned long sc_watermark;
- struct timer_list *sc_timer;
+ struct timer_list sc_timer;
struct task_struct *sc_task;
};
@@ -219,6 +217,8 @@ enum {
*/
#define NILFS_SC_DEFAULT_WATERMARK 3600
+/* super.c */
+extern struct kmem_cache *nilfs_transaction_cachep;
/* segment.c */
extern int nilfs_init_transaction_cache(void);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 48145f505a6a..03b34b738993 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -67,6 +67,11 @@ MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
"(NILFS)");
MODULE_LICENSE("GPL");
+struct kmem_cache *nilfs_inode_cachep;
+struct kmem_cache *nilfs_transaction_cachep;
+struct kmem_cache *nilfs_segbuf_cachep;
+struct kmem_cache *nilfs_btree_path_cache;
+
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
/**
@@ -129,7 +134,6 @@ void nilfs_warning(struct super_block *sb, const char *function,
va_end(args);
}
-static struct kmem_cache *nilfs_inode_cachep;
struct inode *nilfs_alloc_inode_common(struct the_nilfs *nilfs)
{
@@ -155,34 +159,6 @@ void nilfs_destroy_inode(struct inode *inode)
kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
}
-static void init_once(void *obj)
-{
- struct nilfs_inode_info *ii = obj;
-
- INIT_LIST_HEAD(&ii->i_dirty);
-#ifdef CONFIG_NILFS_XATTR
- init_rwsem(&ii->xattr_sem);
-#endif
- nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
- ii->i_bmap = (struct nilfs_bmap *)&ii->i_bmap_union;
- inode_init_once(&ii->vfs_inode);
-}
-
-static int nilfs_init_inode_cache(void)
-{
- nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache",
- sizeof(struct nilfs_inode_info),
- 0, SLAB_RECLAIM_ACCOUNT,
- init_once);
-
- return (nilfs_inode_cachep == NULL) ? -ENOMEM : 0;
-}
-
-static inline void nilfs_destroy_inode_cache(void)
-{
- kmem_cache_destroy(nilfs_inode_cachep);
-}
-
static void nilfs_clear_inode(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
@@ -266,8 +242,8 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb)
int err;
/* nilfs->sem must be locked by the caller. */
- if (sbp[0]->s_magic != NILFS_SUPER_MAGIC) {
- if (sbp[1] && sbp[1]->s_magic == NILFS_SUPER_MAGIC)
+ if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
+ if (sbp[1] && sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC))
nilfs_swap_super_block(nilfs);
else {
printk(KERN_CRIT "NILFS: superblock broke on dev %s\n",
@@ -470,10 +446,10 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (nilfs_test_opt(sbi, SNAPSHOT))
seq_printf(seq, ",cp=%llu",
(unsigned long long int)sbi->s_snapshot_cno);
- if (nilfs_test_opt(sbi, ERRORS_RO))
- seq_printf(seq, ",errors=remount-ro");
if (nilfs_test_opt(sbi, ERRORS_PANIC))
seq_printf(seq, ",errors=panic");
+ if (nilfs_test_opt(sbi, ERRORS_CONT))
+ seq_printf(seq, ",errors=continue");
if (nilfs_test_opt(sbi, STRICT_ORDER))
seq_printf(seq, ",order=strict");
if (nilfs_test_opt(sbi, NORECOVERY))
@@ -631,7 +607,7 @@ nilfs_set_default_options(struct nilfs_sb_info *sbi,
struct nilfs_super_block *sbp)
{
sbi->s_mount_opt =
- NILFS_MOUNT_ERRORS_CONT | NILFS_MOUNT_BARRIER;
+ NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
}
static int nilfs_setup_super(struct nilfs_sb_info *sbi)
@@ -778,9 +754,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
goto failed_sbi;
}
cno = sbi->s_snapshot_cno;
- } else
- /* Read-only mount */
- sbi->s_snapshot_cno = cno;
+ }
}
err = nilfs_attach_checkpoint(sbi, cno);
@@ -849,7 +823,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
struct the_nilfs *nilfs = sbi->s_nilfs;
unsigned long old_sb_flags;
struct nilfs_mount_options old_opts;
- int err;
+ int was_snapshot, err;
lock_kernel();
@@ -857,6 +831,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
old_sb_flags = sb->s_flags;
old_opts.mount_opt = sbi->s_mount_opt;
old_opts.snapshot_cno = sbi->s_snapshot_cno;
+ was_snapshot = nilfs_test_opt(sbi, SNAPSHOT);
if (!parse_options(data, sb)) {
err = -EINVAL;
@@ -864,20 +839,32 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
}
sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
- if ((*flags & MS_RDONLY) &&
- sbi->s_snapshot_cno != old_opts.snapshot_cno) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount to a different snapshot.\n",
- sb->s_id);
- err = -EINVAL;
- goto restore_opts;
+ err = -EINVAL;
+ if (was_snapshot) {
+ if (!(*flags & MS_RDONLY)) {
+ printk(KERN_ERR "NILFS (device %s): cannot remount "
+ "snapshot read/write.\n",
+ sb->s_id);
+ goto restore_opts;
+ } else if (sbi->s_snapshot_cno != old_opts.snapshot_cno) {
+ printk(KERN_ERR "NILFS (device %s): cannot "
+ "remount to a different snapshot.\n",
+ sb->s_id);
+ goto restore_opts;
+ }
+ } else {
+ if (nilfs_test_opt(sbi, SNAPSHOT)) {
+ printk(KERN_ERR "NILFS (device %s): cannot change "
+ "a regular mount to a snapshot.\n",
+ sb->s_id);
+ goto restore_opts;
+ }
}
if (!nilfs_valid_fs(nilfs)) {
printk(KERN_WARNING "NILFS (device %s): couldn't "
"remount because the filesystem is in an "
"incomplete recovery state.\n", sb->s_id);
- err = -EINVAL;
goto restore_opts;
}
@@ -888,9 +875,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
nilfs_detach_segment_constructor(sbi);
sb->s_flags |= MS_RDONLY;
- sbi->s_snapshot_cno = nilfs_last_cno(nilfs);
- /* nilfs_set_opt(sbi, SNAPSHOT); */
-
/*
* Remounting a valid RW partition RDONLY, so set
* the RDONLY flag and then mark the partition as valid again.
@@ -909,24 +893,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
* store the current valid flag. (It may have been changed
* by fsck since we originally mounted the partition.)
*/
- if (nilfs->ns_current && nilfs->ns_current != sbi) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount because an RW-mount exists.\n",
- sb->s_id);
- err = -EBUSY;
- goto restore_opts;
- }
- if (sbi->s_snapshot_cno != nilfs_last_cno(nilfs)) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount because the current RO-mount is not "
- "the latest one.\n",
- sb->s_id);
- err = -EINVAL;
- goto restore_opts;
- }
sb->s_flags &= ~MS_RDONLY;
- nilfs_clear_opt(sbi, SNAPSHOT);
- sbi->s_snapshot_cno = 0;
err = nilfs_attach_segment_constructor(sbi);
if (err)
@@ -935,8 +902,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
down_write(&nilfs->ns_sem);
nilfs_setup_super(sbi);
up_write(&nilfs->ns_sem);
-
- nilfs->ns_current = sbi;
}
out:
up_write(&nilfs->ns_super_sem);
@@ -1022,10 +987,14 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
{
struct nilfs_super_data sd;
struct super_block *s;
+ fmode_t mode = FMODE_READ;
struct the_nilfs *nilfs;
int err, need_to_close = 1;
- sd.bdev = open_bdev_exclusive(dev_name, flags, fs_type);
+ if (!(flags & MS_RDONLY))
+ mode |= FMODE_WRITE;
+
+ sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type);
if (IS_ERR(sd.bdev))
return PTR_ERR(sd.bdev);
@@ -1092,10 +1061,12 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
/* New superblock instance created */
s->s_flags = flags;
+ s->s_mode = mode;
strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(sd.bdev));
- err = nilfs_fill_super(s, data, flags & MS_VERBOSE, nilfs);
+ err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0,
+ nilfs);
if (err)
goto cancel_new;
@@ -1106,7 +1077,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
mutex_unlock(&nilfs->ns_mount_mutex);
put_nilfs(nilfs);
if (need_to_close)
- close_bdev_exclusive(sd.bdev, flags);
+ close_bdev_exclusive(sd.bdev, mode);
simple_set_mnt(mnt, s);
return 0;
@@ -1114,7 +1085,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
mutex_unlock(&nilfs->ns_mount_mutex);
put_nilfs(nilfs);
failed:
- close_bdev_exclusive(sd.bdev, flags);
+ close_bdev_exclusive(sd.bdev, mode);
return err;
@@ -1124,7 +1095,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
put_nilfs(nilfs);
deactivate_locked_super(s);
/*
- * deactivate_super() invokes close_bdev_exclusive().
+ * deactivate_locked_super() invokes close_bdev_exclusive().
* We must finish all post-cleaning before this call;
* put_nilfs() needs the block device.
*/
@@ -1139,54 +1110,93 @@ struct file_system_type nilfs_fs_type = {
.fs_flags = FS_REQUIRES_DEV,
};
-static int __init init_nilfs_fs(void)
+static void nilfs_inode_init_once(void *obj)
{
- int err;
-
- err = nilfs_init_inode_cache();
- if (err)
- goto failed;
+ struct nilfs_inode_info *ii = obj;
- err = nilfs_init_transaction_cache();
- if (err)
- goto failed_inode_cache;
+ INIT_LIST_HEAD(&ii->i_dirty);
+#ifdef CONFIG_NILFS_XATTR
+ init_rwsem(&ii->xattr_sem);
+#endif
+ nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
+ ii->i_bmap = (struct nilfs_bmap *)&ii->i_bmap_union;
+ inode_init_once(&ii->vfs_inode);
+}
- err = nilfs_init_segbuf_cache();
- if (err)
- goto failed_transaction_cache;
+static void nilfs_segbuf_init_once(void *obj)
+{
+ memset(obj, 0, sizeof(struct nilfs_segment_buffer));
+}
- err = nilfs_btree_path_cache_init();
- if (err)
- goto failed_segbuf_cache;
+static void nilfs_destroy_cachep(void)
+{
+ if (nilfs_inode_cachep)
+ kmem_cache_destroy(nilfs_inode_cachep);
+ if (nilfs_transaction_cachep)
+ kmem_cache_destroy(nilfs_transaction_cachep);
+ if (nilfs_segbuf_cachep)
+ kmem_cache_destroy(nilfs_segbuf_cachep);
+ if (nilfs_btree_path_cache)
+ kmem_cache_destroy(nilfs_btree_path_cache);
+}
- err = register_filesystem(&nilfs_fs_type);
- if (err)
- goto failed_btree_path_cache;
+static int __init nilfs_init_cachep(void)
+{
+ nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache",
+ sizeof(struct nilfs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT, nilfs_inode_init_once);
+ if (!nilfs_inode_cachep)
+ goto fail;
+
+ nilfs_transaction_cachep = kmem_cache_create("nilfs2_transaction_cache",
+ sizeof(struct nilfs_transaction_info), 0,
+ SLAB_RECLAIM_ACCOUNT, NULL);
+ if (!nilfs_transaction_cachep)
+ goto fail;
+
+ nilfs_segbuf_cachep = kmem_cache_create("nilfs2_segbuf_cache",
+ sizeof(struct nilfs_segment_buffer), 0,
+ SLAB_RECLAIM_ACCOUNT, nilfs_segbuf_init_once);
+ if (!nilfs_segbuf_cachep)
+ goto fail;
+
+ nilfs_btree_path_cache = kmem_cache_create("nilfs2_btree_path_cache",
+ sizeof(struct nilfs_btree_path) * NILFS_BTREE_LEVEL_MAX,
+ 0, 0, NULL);
+ if (!nilfs_btree_path_cache)
+ goto fail;
return 0;
- failed_btree_path_cache:
- nilfs_btree_path_cache_destroy();
+fail:
+ nilfs_destroy_cachep();
+ return -ENOMEM;
+}
+
+static int __init init_nilfs_fs(void)
+{
+ int err;
- failed_segbuf_cache:
- nilfs_destroy_segbuf_cache();
+ err = nilfs_init_cachep();
+ if (err)
+ goto fail;
- failed_transaction_cache:
- nilfs_destroy_transaction_cache();
+ err = register_filesystem(&nilfs_fs_type);
+ if (err)
+ goto free_cachep;
- failed_inode_cache:
- nilfs_destroy_inode_cache();
+ printk(KERN_INFO "NILFS version 2 loaded\n");
+ return 0;
- failed:
+free_cachep:
+ nilfs_destroy_cachep();
+fail:
return err;
}
static void __exit exit_nilfs_fs(void)
{
- nilfs_destroy_segbuf_cache();
- nilfs_destroy_transaction_cache();
- nilfs_destroy_inode_cache();
- nilfs_btree_path_cache_destroy();
+ nilfs_destroy_cachep();
unregister_filesystem(&nilfs_fs_type);
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 33871f7e4f01..8c1097327abc 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -486,11 +486,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
printk(KERN_WARNING
"NILFS warning: unable to read secondary superblock\n");
+ /*
+ * Compare two super blocks and set 1 in swp if the secondary
+ * super block is valid and newer. Otherwise, set 0 in swp.
+ */
valid[0] = nilfs_valid_sb(sbp[0]);
valid[1] = nilfs_valid_sb(sbp[1]);
- swp = valid[1] &&
- (!valid[0] ||
- le64_to_cpu(sbp[1]->s_wtime) > le64_to_cpu(sbp[0]->s_wtime));
+ swp = valid[1] && (!valid[0] ||
+ le64_to_cpu(sbp[1]->s_last_cno) >
+ le64_to_cpu(sbp[0]->s_last_cno));
if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) {
brelse(sbh[1]);
@@ -670,7 +674,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
start * sects_per_block,
nblocks * sects_per_block,
GFP_NOFS,
- DISCARD_FL_BARRIER);
+ BLKDEV_IFL_BARRIER);
if (ret < 0)
return ret;
nblocks = 0;
@@ -680,7 +684,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
ret = blkdev_issue_discard(nilfs->ns_bdev,
start * sects_per_block,
nblocks * sects_per_block,
- GFP_NOFS, DISCARD_FL_BARRIER);
+ GFP_NOFS, BLKDEV_IFL_BARRIER);
return ret;
}
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index dffbb0911d02..22c629eedd82 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -3,3 +3,4 @@ config FSNOTIFY
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
+source "fs/notify/fanotify/Kconfig"
diff --git a/fs/notify/Makefile b/fs/notify/Makefile
index 0922cc826c46..ae5f33a6d868 100644
--- a/fs/notify/Makefile
+++ b/fs/notify/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o
+obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o \
+ mark.o vfsmount_mark.o
obj-y += dnotify/
obj-y += inotify/
+obj-y += fanotify/
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 7e54e52964dd..6624c2ee8786 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -29,17 +29,17 @@
int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dnotify_struct_cache __read_mostly;
-static struct kmem_cache *dnotify_mark_entry_cache __read_mostly;
+static struct kmem_cache *dnotify_mark_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly;
static DEFINE_MUTEX(dnotify_mark_mutex);
/*
- * dnotify will attach one of these to each inode (i_fsnotify_mark_entries) which
+ * dnotify will attach one of these to each inode (i_fsnotify_marks) which
* is being watched by dnotify. If multiple userspace applications are watching
* the same directory with dnotify their information is chained in dn
*/
-struct dnotify_mark_entry {
- struct fsnotify_mark_entry fsn_entry;
+struct dnotify_mark {
+ struct fsnotify_mark fsn_mark;
struct dnotify_struct *dn;
};
@@ -51,27 +51,27 @@ struct dnotify_mark_entry {
* it calls the fsnotify function so it can update the set of all events relevant
* to this inode.
*/
-static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
+static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
{
__u32 new_mask, old_mask;
struct dnotify_struct *dn;
- struct dnotify_mark_entry *dnentry = container_of(entry,
- struct dnotify_mark_entry,
- fsn_entry);
+ struct dnotify_mark *dn_mark = container_of(fsn_mark,
+ struct dnotify_mark,
+ fsn_mark);
- assert_spin_locked(&entry->lock);
+ assert_spin_locked(&fsn_mark->lock);
- old_mask = entry->mask;
+ old_mask = fsn_mark->mask;
new_mask = 0;
- for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next)
+ for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next)
new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
- entry->mask = new_mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, new_mask);
if (old_mask == new_mask)
return;
- if (entry->inode)
- fsnotify_recalc_inode_mask(entry->inode);
+ if (fsn_mark->i.inode)
+ fsnotify_recalc_inode_mask(fsn_mark->i.inode);
}
/*
@@ -85,8 +85,8 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
static int dnotify_handle_event(struct fsnotify_group *group,
struct fsnotify_event *event)
{
- struct fsnotify_mark_entry *entry = NULL;
- struct dnotify_mark_entry *dnentry;
+ struct fsnotify_mark *fsn_mark = NULL;
+ struct dnotify_mark *dn_mark;
struct inode *to_tell;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
@@ -95,17 +95,13 @@ static int dnotify_handle_event(struct fsnotify_group *group,
to_tell = event->to_tell;
- spin_lock(&to_tell->i_lock);
- entry = fsnotify_find_mark_entry(group, to_tell);
- spin_unlock(&to_tell->i_lock);
-
- /* unlikely since we alreay passed dnotify_should_send_event() */
- if (unlikely(!entry))
+ fsn_mark = fsnotify_find_inode_mark(group, to_tell);
+ if (unlikely(!fsn_mark))
return 0;
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
- spin_lock(&entry->lock);
- prev = &dnentry->dn;
+ spin_lock(&fsn_mark->lock);
+ prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_mask & test_mask) == 0) {
prev = &dn->dn_next;
@@ -118,12 +114,12 @@ static int dnotify_handle_event(struct fsnotify_group *group,
else {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(fsn_mark);
}
}
- spin_unlock(&entry->lock);
- fsnotify_put_mark(entry);
+ spin_unlock(&fsn_mark->lock);
+ fsnotify_put_mark(fsn_mark);
return 0;
}
@@ -133,9 +129,10 @@ static int dnotify_handle_event(struct fsnotify_group *group,
* userspace notification for that pair.
*/
static bool dnotify_should_send_event(struct fsnotify_group *group,
- struct inode *inode, __u32 mask)
+ struct inode *inode, struct vfsmount *mnt,
+ __u32 mask, void *data, int data_type)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *fsn_mark;
bool send;
/* !dir_notify_enable should never get here, don't waste time checking
@@ -146,31 +143,27 @@ static bool dnotify_should_send_event(struct fsnotify_group *group,
if (!S_ISDIR(inode->i_mode))
return false;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
-
- /* no mark means no dnotify watch */
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
- send = (mask & entry->mask);
+ send = (mask & fsn_mark->mask);
- fsnotify_put_mark(entry); /* matches fsnotify_find_mark_entry */
+ fsnotify_put_mark(fsn_mark); /* matches fsnotify_find_inode_mark */
return send;
}
-static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
+static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)
{
- struct dnotify_mark_entry *dnentry = container_of(entry,
- struct dnotify_mark_entry,
- fsn_entry);
+ struct dnotify_mark *dn_mark = container_of(fsn_mark,
+ struct dnotify_mark,
+ fsn_mark);
- BUG_ON(dnentry->dn);
+ BUG_ON(dn_mark->dn);
- kmem_cache_free(dnotify_mark_entry_cache, dnentry);
+ kmem_cache_free(dnotify_mark_cache, dn_mark);
}
static struct fsnotify_ops dnotify_fsnotify_ops = {
@@ -183,15 +176,15 @@ static struct fsnotify_ops dnotify_fsnotify_ops = {
/*
* Called every time a file is closed. Looks first for a dnotify mark on the
- * inode. If one is found run all of the ->dn entries attached to that
+ * inode. If one is found run all of the ->dn structures attached to that
* mark for one relevant to this process closing the file and remove that
* dnotify_struct. If that was the last dnotify_struct also remove the
- * fsnotify_mark_entry.
+ * fsnotify_mark.
*/
void dnotify_flush(struct file *filp, fl_owner_t id)
{
- struct fsnotify_mark_entry *entry;
- struct dnotify_mark_entry *dnentry;
+ struct fsnotify_mark *fsn_mark;
+ struct dnotify_mark *dn_mark;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct inode *inode;
@@ -200,38 +193,36 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
if (!S_ISDIR(inode->i_mode))
return;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(dnotify_group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
+ if (!fsn_mark)
return;
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
mutex_lock(&dnotify_mark_mutex);
- spin_lock(&entry->lock);
- prev = &dnentry->dn;
+ spin_lock(&fsn_mark->lock);
+ prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(fsn_mark);
break;
}
prev = &dn->dn_next;
}
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
/* nothing else could have found us thanks to the dnotify_mark_mutex */
- if (dnentry->dn == NULL)
- fsnotify_destroy_mark_by_entry(entry);
+ if (dn_mark->dn == NULL)
+ fsnotify_destroy_mark(fsn_mark);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
}
/* this conversion is done only at watch creation */
@@ -259,16 +250,16 @@ static __u32 convert_arg(unsigned long arg)
/*
* If multiple processes watch the same inode with dnotify there is only one
- * dnotify mark in inode->i_fsnotify_mark_entries but we chain a dnotify_struct
+ * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct
* onto that mark. This function either attaches the new dnotify_struct onto
* that list, or it |= the mask onto an existing dnofiy_struct.
*/
-static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry,
+static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark,
fl_owner_t id, int fd, struct file *filp, __u32 mask)
{
struct dnotify_struct *odn;
- odn = dnentry->dn;
+ odn = dn_mark->dn;
while (odn != NULL) {
/* adding more events to existing dnofiy_struct? */
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
@@ -283,8 +274,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
dn->dn_fd = fd;
dn->dn_filp = filp;
dn->dn_owner = id;
- dn->dn_next = dnentry->dn;
- dnentry->dn = dn;
+ dn->dn_next = dn_mark->dn;
+ dn_mark->dn = dn;
return 0;
}
@@ -296,8 +287,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
*/
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{
- struct dnotify_mark_entry *new_dnentry, *dnentry;
- struct fsnotify_mark_entry *new_entry, *entry;
+ struct dnotify_mark *new_dn_mark, *dn_mark;
+ struct fsnotify_mark *new_fsn_mark, *fsn_mark;
struct dnotify_struct *dn;
struct inode *inode;
fl_owner_t id = current->files;
@@ -306,7 +297,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
__u32 mask;
/* we use these to tell if we need to kfree */
- new_entry = NULL;
+ new_fsn_mark = NULL;
dn = NULL;
if (!dir_notify_enable) {
@@ -336,8 +327,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
}
/* new fsnotify mark, we expect most fcntl calls to add a new mark */
- new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL);
- if (!new_dnentry) {
+ new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
+ if (!new_dn_mark) {
error = -ENOMEM;
goto out_err;
}
@@ -345,29 +336,27 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
mask = convert_arg(arg);
- /* set up the new_entry and new_dnentry */
- new_entry = &new_dnentry->fsn_entry;
- fsnotify_init_mark(new_entry, dnotify_free_mark);
- new_entry->mask = mask;
- new_dnentry->dn = NULL;
+ /* set up the new_fsn_mark and new_dn_mark */
+ new_fsn_mark = &new_dn_mark->fsn_mark;
+ fsnotify_init_mark(new_fsn_mark, dnotify_free_mark);
+ new_fsn_mark->mask = mask;
+ new_dn_mark->dn = NULL;
/* this is needed to prevent the fcntl/close race described below */
mutex_lock(&dnotify_mark_mutex);
- /* add the new_entry or find an old one. */
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(dnotify_group, inode);
- spin_unlock(&inode->i_lock);
- if (entry) {
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
- spin_lock(&entry->lock);
+ /* add the new_fsn_mark or find an old one. */
+ fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
+ if (fsn_mark) {
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
+ spin_lock(&fsn_mark->lock);
} else {
- fsnotify_add_mark(new_entry, dnotify_group, inode);
- spin_lock(&new_entry->lock);
- entry = new_entry;
- dnentry = new_dnentry;
- /* we used new_entry, so don't free it */
- new_entry = NULL;
+ fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0);
+ spin_lock(&new_fsn_mark->lock);
+ fsn_mark = new_fsn_mark;
+ dn_mark = new_dn_mark;
+ /* we used new_fsn_mark, so don't free it */
+ new_fsn_mark = NULL;
}
rcu_read_lock();
@@ -376,17 +365,17 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* if (f != filp) means that we lost a race and another task/thread
* actually closed the fd we are still playing with before we grabbed
- * the dnotify_mark_mutex and entry->lock. Since closing the fd is the
- * only time we clean up the mark entries we need to get our mark off
+ * the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the
+ * only time we clean up the marks we need to get our mark off
* the list. */
if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that
- * the flush actually did shoot this entry. That's fine too
+ * the flush actually did shoot this fsn_mark. That's fine too
* since multiple calls to destroy_mark is perfectly safe, if
- * we found a dnentry already attached to the inode, just sod
+ * we found a dn_mark already attached to the inode, just sod
* off silently as the flush at close time dealt with it.
*/
- if (dnentry == new_dnentry)
+ if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
@@ -394,13 +383,13 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
if (error) {
/* if we added, we must shoot */
- if (dnentry == new_dnentry)
+ if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
- error = attach_dn(dn, dnentry, id, fd, filp, mask);
- /* !error means that we attached the dn to the dnentry, so don't free it */
+ error = attach_dn(dn, dn_mark, id, fd, filp, mask);
+ /* !error means that we attached the dn to the dn_mark, so don't free it */
if (!error)
dn = NULL;
/* -EEXIST means that we didn't add this new dn and used an old one.
@@ -408,20 +397,20 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
else if (error == -EEXIST)
error = 0;
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(fsn_mark);
out:
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
if (destroy)
- fsnotify_destroy_mark_by_entry(entry);
+ fsnotify_destroy_mark(fsn_mark);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
out_err:
- if (new_entry)
- fsnotify_put_mark(new_entry);
+ if (new_fsn_mark)
+ fsnotify_put_mark(new_fsn_mark);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
return error;
@@ -430,10 +419,9 @@ out_err:
static int __init dnotify_init(void)
{
dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
- dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC);
+ dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC);
- dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM,
- 0, &dnotify_fsnotify_ops);
+ dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
return 0;
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
new file mode 100644
index 000000000000..566de30395c2
--- /dev/null
+++ b/fs/notify/fanotify/Kconfig
@@ -0,0 +1,26 @@
+config FANOTIFY
+ bool "Filesystem wide access notification"
+ select FSNOTIFY
+ select ANON_INODES
+ default y
+ ---help---
+ Say Y here to enable fanotify suport. fanotify is a file access
+ notification system which differs from inotify in that it sends
+ and open file descriptor to the userspace listener along with
+ the event.
+
+ If unsure, say Y.
+
+config FANOTIFY_ACCESS_PERMISSIONS
+ bool "fanotify permissions checking"
+ depends on FANOTIFY
+ depends on SECURITY
+ default n
+ ---help---
+ Say Y here is you want fanotify listeners to be able to make permissions
+ decisions concerning filesystem events. This is used by some fanotify
+ listeners which need to scan files before allowing the system access to
+ use those files. This is used by some anti-malware vendors and by some
+ hierarchical storage managent systems.
+
+ If unsure, say N.
diff --git a/fs/notify/fanotify/Makefile b/fs/notify/fanotify/Makefile
new file mode 100644
index 000000000000..0999213e7e6e
--- /dev/null
+++ b/fs/notify/fanotify/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_FANOTIFY) += fanotify.o fanotify_user.o
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
new file mode 100644
index 000000000000..bbcfccd4a8ea
--- /dev/null
+++ b/fs/notify/fanotify/fanotify.c
@@ -0,0 +1,255 @@
+#include <linux/fanotify.h>
+#include <linux/fdtable.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h> /* UINT_MAX */
+#include <linux/mount.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
+{
+ pr_debug("%s: old=%p new=%p\n", __func__, old, new);
+
+ if (old->to_tell == new->to_tell &&
+ old->data_type == new->data_type &&
+ old->tgid == new->tgid) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
+ case (FSNOTIFY_EVENT_NONE):
+ return true;
+ default:
+ BUG();
+ };
+ }
+ return false;
+}
+
+/* Note, if we return an event in *arg that a reference is being held... */
+static int fanotify_merge(struct list_head *list,
+ struct fsnotify_event *event,
+ void **arg)
+{
+ struct fsnotify_event_holder *test_holder;
+ struct fsnotify_event *test_event;
+ struct fsnotify_event *new_event;
+ struct fsnotify_event **return_event = (struct fsnotify_event **)arg;
+ int ret = 0;
+
+ pr_debug("%s: list=%p event=%p\n", __func__, list, event);
+
+ *return_event = NULL;
+
+ /* and the list better be locked by something too! */
+
+ list_for_each_entry_reverse(test_holder, list, event_list) {
+ test_event = test_holder->event;
+ if (should_merge(test_event, event)) {
+ fsnotify_get_event(test_event);
+ *return_event = test_event;
+
+ ret = -EEXIST;
+ /* if they are exactly the same we are done */
+ if (test_event->mask == event->mask)
+ goto out;
+
+ /*
+ * if the refcnt == 1 this is the only queue
+ * for this event and so we can update the mask
+ * in place.
+ */
+ if (atomic_read(&test_event->refcnt) == 1) {
+ test_event->mask |= event->mask;
+ goto out;
+ }
+
+ /* can't allocate memory, merge was no possible */
+ new_event = fsnotify_clone_event(test_event);
+ if (unlikely(!new_event)) {
+ ret = 0;
+ goto out;
+ }
+
+ /* we didn't return the test_event, so drop that ref */
+ fsnotify_put_event(test_event);
+ /* the reference we return on new_event is from clone */
+ *return_event = new_event;
+
+ /* build new event and replace it on the list */
+ new_event->mask = (test_event->mask | event->mask);
+ fsnotify_replace_event(test_holder, new_event);
+
+ break;
+ }
+ }
+out:
+ return ret;
+}
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+static int fanotify_get_response_from_access(struct fsnotify_group *group,
+ struct fsnotify_event *event)
+{
+ int ret;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ wait_event(group->fanotify_data.access_waitq, event->response);
+
+ /* userspace responded, convert to something usable */
+ spin_lock(&event->lock);
+ switch (event->response) {
+ case FAN_ALLOW:
+ ret = 0;
+ break;
+ case FAN_DENY:
+ default:
+ ret = -EPERM;
+ }
+ event->response = 0;
+ spin_unlock(&event->lock);
+
+ pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
+ group, event, ret);
+
+ return ret;
+}
+#endif
+
+static int fanotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
+{
+ int ret;
+ struct fsnotify_event *notify_event = NULL;
+
+ BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
+ BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
+ BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
+ BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
+ BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
+ BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
+ BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
+ BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
+ BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ ret = fsnotify_add_notify_event(group, event, NULL, fanotify_merge,
+ (void **)&notify_event);
+ /* -EEXIST means this event was merged with another, not that it was an error */
+ if (ret == -EEXIST)
+ ret = 0;
+ if (ret)
+ goto out;
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ if (event->mask & FAN_ALL_PERM_EVENTS) {
+ /* if we merged we need to wait on the new event */
+ if (notify_event)
+ event = notify_event;
+ ret = fanotify_get_response_from_access(group, event);
+ }
+#endif
+
+out:
+ if (notify_event)
+ fsnotify_put_event(notify_event);
+ return ret;
+}
+
+static bool should_send_vfsmount_event(struct fsnotify_group *group, struct vfsmount *mnt,
+ struct inode *inode, __u32 mask)
+{
+ struct fsnotify_mark *mnt_mark;
+ struct fsnotify_mark *inode_mark;
+
+ pr_debug("%s: group=%p vfsmount=%p mask=%x\n",
+ __func__, group, mnt, mask);
+
+ mnt_mark = fsnotify_find_vfsmount_mark(group, mnt);
+ if (!mnt_mark)
+ return false;
+
+ mask &= mnt_mark->mask;
+ mask &= ~mnt_mark->ignored_mask;
+
+ if (mask) {
+ inode_mark = fsnotify_find_inode_mark(group, inode);
+ if (inode_mark) {
+ mask &= ~inode_mark->ignored_mask;
+ fsnotify_put_mark(inode_mark);
+ }
+ }
+
+ /* find took a reference */
+ fsnotify_put_mark(mnt_mark);
+
+ return mask;
+}
+
+static bool should_send_inode_event(struct fsnotify_group *group, struct inode *inode,
+ __u32 mask)
+{
+ struct fsnotify_mark *fsn_mark;
+
+ pr_debug("%s: group=%p inode=%p mask=%x\n",
+ __func__, group, inode, mask);
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
+ return false;
+
+ /* if the event is for a child and this inode doesn't care about
+ * events on the child, don't send it! */
+ if ((mask & FS_EVENT_ON_CHILD) &&
+ !(fsn_mark->mask & FS_EVENT_ON_CHILD)) {
+ mask = 0;
+ } else {
+ /*
+ * We care about children, but do we care about this particular
+ * type of event?
+ */
+ mask &= ~FS_EVENT_ON_CHILD;
+ mask &= fsn_mark->mask;
+ mask &= ~fsn_mark->ignored_mask;
+ }
+
+ /* find took a reference */
+ fsnotify_put_mark(fsn_mark);
+
+ return mask;
+}
+
+static bool fanotify_should_send_event(struct fsnotify_group *group, struct inode *to_tell,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type)
+{
+ pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x data=%p data_type=%d\n",
+ __func__, group, to_tell, mnt, mask, data, data_type);
+
+ /* sorry, fanotify only gives a damn about files and dirs */
+ if (!S_ISREG(to_tell->i_mode) &&
+ !S_ISDIR(to_tell->i_mode))
+ return false;
+
+ /* if we don't have enough info to send an event to userspace say no */
+ if (data_type != FSNOTIFY_EVENT_PATH)
+ return false;
+
+ if (mnt)
+ return should_send_vfsmount_event(group, mnt, to_tell, mask);
+ else
+ return should_send_inode_event(group, to_tell, mask);
+}
+
+const struct fsnotify_ops fanotify_fsnotify_ops = {
+ .handle_event = fanotify_handle_event,
+ .should_send_event = fanotify_should_send_event,
+ .free_group_priv = NULL,
+ .free_event_priv = NULL,
+ .freeing_mark = NULL,
+};
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
new file mode 100644
index 000000000000..c437bd436e33
--- /dev/null
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -0,0 +1,777 @@
+#include <linux/fanotify.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/init.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <asm/ioctls.h>
+
+extern const struct fsnotify_ops fanotify_fsnotify_ops;
+
+static struct kmem_cache *fanotify_mark_cache __read_mostly;
+static struct kmem_cache *fanotify_response_event_cache __read_mostly;
+
+struct fanotify_response_event {
+ struct list_head list;
+ __s32 fd;
+ struct fsnotify_event *event;
+};
+
+/*
+ * Get an fsnotify notification event if one exists and is small
+ * enough to fit in "count". Return an error pointer if the count
+ * is not large enough.
+ *
+ * Called with the group->notification_mutex held.
+ */
+static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
+ size_t count)
+{
+ BUG_ON(!mutex_is_locked(&group->notification_mutex));
+
+ pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
+
+ if (fsnotify_notify_queue_is_empty(group))
+ return NULL;
+
+ if (FAN_EVENT_METADATA_LEN > count)
+ return ERR_PTR(-EINVAL);
+
+ /* held the notification_mutex the whole time, so this is the
+ * same event we peeked above */
+ return fsnotify_remove_notify_event(group);
+}
+
+static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
+{
+ int client_fd;
+ struct dentry *dentry;
+ struct vfsmount *mnt;
+ struct file *new_file;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ client_fd = get_unused_fd();
+ if (client_fd < 0)
+ return client_fd;
+
+ if (event->data_type != FSNOTIFY_EVENT_PATH) {
+ WARN_ON(1);
+ put_unused_fd(client_fd);
+ return -EINVAL;
+ }
+
+ /*
+ * we need a new file handle for the userspace program so it can read even if it was
+ * originally opened O_WRONLY.
+ */
+ dentry = dget(event->path.dentry);
+ mnt = mntget(event->path.mnt);
+ /* it's possible this event was an overflow event. in that case dentry and mnt
+ * are NULL; That's fine, just don't call dentry open */
+ if (dentry && mnt)
+ new_file = dentry_open(dentry, mnt,
+ O_RDONLY | O_LARGEFILE | FMODE_NONOTIFY,
+ current_cred());
+ else
+ new_file = ERR_PTR(-EOVERFLOW);
+ if (IS_ERR(new_file)) {
+ /*
+ * we still send an event even if we can't open the file. this
+ * can happen when say tasks are gone and we try to open their
+ * /proc files or we try to open a WRONLY file like in sysfs
+ * we just send the errno to userspace since there isn't much
+ * else we can do.
+ */
+ put_unused_fd(client_fd);
+ client_fd = PTR_ERR(new_file);
+ } else {
+ fd_install(client_fd, new_file);
+ }
+
+ return client_fd;
+}
+
+static ssize_t fill_event_metadata(struct fsnotify_group *group,
+ struct fanotify_event_metadata *metadata,
+ struct fsnotify_event *event)
+{
+ pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
+ group, metadata, event);
+
+ metadata->event_len = FAN_EVENT_METADATA_LEN;
+ metadata->vers = FANOTIFY_METADATA_VERSION;
+ metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
+ metadata->pid = pid_vnr(event->tgid);
+ metadata->fd = create_fd(group, event);
+
+ return metadata->fd;
+}
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
+ __s32 fd)
+{
+ struct fanotify_response_event *re, *return_re = NULL;
+
+ mutex_lock(&group->fanotify_data.access_mutex);
+ list_for_each_entry(re, &group->fanotify_data.access_list, list) {
+ if (re->fd != fd)
+ continue;
+
+ list_del_init(&re->list);
+ return_re = re;
+ break;
+ }
+ mutex_unlock(&group->fanotify_data.access_mutex);
+
+ pr_debug("%s: found return_re=%p\n", __func__, return_re);
+
+ return return_re;
+}
+
+static int process_access_response(struct fsnotify_group *group,
+ struct fanotify_response *response_struct)
+{
+ struct fanotify_response_event *re;
+ __s32 fd = response_struct->fd;
+ __u32 response = response_struct->response;
+
+ pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
+ fd, response);
+ /*
+ * make sure the response is valid, if invalid we do nothing and either
+ * userspace can send a valid responce or we will clean it up after the
+ * timeout
+ */
+ switch (response) {
+ case FAN_ALLOW:
+ case FAN_DENY:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (fd < 0)
+ return -EINVAL;
+
+ re = dequeue_re(group, fd);
+ if (!re)
+ return -ENOENT;
+
+ re->event->response = response;
+
+ wake_up(&group->fanotify_data.access_waitq);
+
+ kmem_cache_free(fanotify_response_event_cache, re);
+
+ return 0;
+}
+
+static int prepare_for_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ struct fanotify_response_event *re;
+
+ if (!(event->mask & FAN_ALL_PERM_EVENTS))
+ return 0;
+
+ re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
+ if (!re)
+ return -ENOMEM;
+
+ re->event = event;
+ re->fd = fd;
+
+ mutex_lock(&group->fanotify_data.access_mutex);
+ list_add_tail(&re->list, &group->fanotify_data.access_list);
+ mutex_unlock(&group->fanotify_data.access_mutex);
+
+ return 0;
+}
+
+static void remove_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ struct fanotify_response_event *re;
+
+ if (!(event->mask & FAN_ALL_PERM_EVENTS))
+ return;
+
+ re = dequeue_re(group, fd);
+ if (!re)
+ return;
+
+ BUG_ON(re->event != event);
+
+ kmem_cache_free(fanotify_response_event_cache, re);
+
+ return;
+}
+#else
+static int prepare_for_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ return 0;
+}
+
+static void remove_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ return;
+}
+#endif
+
+static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ char __user *buf)
+{
+ struct fanotify_event_metadata fanotify_event_metadata;
+ int fd, ret;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ fd = fill_event_metadata(group, &fanotify_event_metadata, event);
+ if (fd < 0)
+ return fd;
+
+ ret = prepare_for_access_response(group, event, fd);
+ if (ret)
+ goto out_close_fd;
+
+ ret = -EFAULT;
+ if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
+ goto out_kill_access_response;
+
+ return FAN_EVENT_METADATA_LEN;
+
+out_kill_access_response:
+ remove_access_response(group, event, fd);
+out_close_fd:
+ sys_close(fd);
+ return ret;
+}
+
+/* intofiy userspace file descriptor functions */
+static unsigned int fanotify_poll(struct file *file, poll_table *wait)
+{
+ struct fsnotify_group *group = file->private_data;
+ int ret = 0;
+
+ poll_wait(file, &group->notification_waitq, wait);
+ mutex_lock(&group->notification_mutex);
+ if (!fsnotify_notify_queue_is_empty(group))
+ ret = POLLIN | POLLRDNORM;
+ mutex_unlock(&group->notification_mutex);
+
+ return ret;
+}
+
+static ssize_t fanotify_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct fsnotify_group *group;
+ struct fsnotify_event *kevent;
+ char __user *start;
+ int ret;
+ DEFINE_WAIT(wait);
+
+ start = buf;
+ group = file->private_data;
+
+ pr_debug("%s: group=%p\n", __func__, group);
+
+ while (1) {
+ prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&group->notification_mutex);
+ kevent = get_one_event(group, count);
+ mutex_unlock(&group->notification_mutex);
+
+ if (kevent) {
+ ret = PTR_ERR(kevent);
+ if (IS_ERR(kevent))
+ break;
+ ret = copy_event_to_user(group, kevent, buf);
+ fsnotify_put_event(kevent);
+ if (ret < 0)
+ break;
+ buf += ret;
+ count -= ret;
+ continue;
+ }
+
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ ret = -EINTR;
+ if (signal_pending(current))
+ break;
+
+ if (start != buf)
+ break;
+
+ schedule();
+ }
+
+ finish_wait(&group->notification_waitq, &wait);
+ if (start != buf && ret != -EFAULT)
+ ret = buf - start;
+ return ret;
+}
+
+static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
+{
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ struct fanotify_response response = { .fd = -1, .response = -1 };
+ struct fsnotify_group *group;
+ int ret;
+
+ group = file->private_data;
+
+ if (count > sizeof(response))
+ count = sizeof(response);
+
+ pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
+
+ if (copy_from_user(&response, buf, count))
+ return -EFAULT;
+
+ ret = process_access_response(group, &response);
+ if (ret < 0)
+ count = ret;
+
+ return count;
+#else
+ return -EINVAL;
+#endif
+}
+
+static int fanotify_release(struct inode *ignored, struct file *file)
+{
+ struct fsnotify_group *group = file->private_data;
+
+ pr_debug("%s: file=%p group=%p\n", __func__, file, group);
+
+ /* matches the fanotify_init->fsnotify_alloc_group */
+ fsnotify_put_group(group);
+
+ return 0;
+}
+
+static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct fsnotify_group *group;
+ struct fsnotify_event_holder *holder;
+ void __user *p;
+ int ret = -ENOTTY;
+ size_t send_len = 0;
+
+ group = file->private_data;
+
+ p = (void __user *) arg;
+
+ switch (cmd) {
+ case FIONREAD:
+ mutex_lock(&group->notification_mutex);
+ list_for_each_entry(holder, &group->notification_list, event_list)
+ send_len += FAN_EVENT_METADATA_LEN;
+ mutex_unlock(&group->notification_mutex);
+ ret = put_user(send_len, (int __user *) p);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations fanotify_fops = {
+ .poll = fanotify_poll,
+ .read = fanotify_read,
+ .write = fanotify_write,
+ .fasync = NULL,
+ .release = fanotify_release,
+ .unlocked_ioctl = fanotify_ioctl,
+ .compat_ioctl = fanotify_ioctl,
+};
+
+static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
+{
+ kmem_cache_free(fanotify_mark_cache, fsn_mark);
+}
+
+static int fanotify_find_path(int dfd, const char __user *filename,
+ struct path *path, unsigned int flags)
+{
+ int ret;
+
+ pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
+ dfd, filename, flags);
+
+ if (filename == NULL) {
+ struct file *file;
+ int fput_needed;
+
+ ret = -EBADF;
+ file = fget_light(dfd, &fput_needed);
+ if (!file)
+ goto out;
+
+ ret = -ENOTDIR;
+ if ((flags & FAN_MARK_ONLYDIR) &&
+ !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
+ fput_light(file, fput_needed);
+ goto out;
+ }
+
+ *path = file->f_path;
+ path_get(path);
+ fput_light(file, fput_needed);
+ } else {
+ unsigned int lookup_flags = 0;
+
+ if (!(flags & FAN_MARK_DONT_FOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
+ if (flags & FAN_MARK_ONLYDIR)
+ lookup_flags |= LOOKUP_DIRECTORY;
+
+ ret = user_path_at(dfd, filename, lookup_flags, path);
+ if (ret)
+ goto out;
+ }
+
+ /* you can only watch an inode if you have read permissions on it */
+ ret = inode_permission(path->dentry->d_inode, MAY_READ);
+ if (ret)
+ path_put(path);
+out:
+ return ret;
+}
+
+static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
+ __u32 mask,
+ unsigned int flags)
+{
+ __u32 oldmask;
+
+ spin_lock(&fsn_mark->lock);
+ if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ oldmask = fsn_mark->mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
+ } else {
+ oldmask = fsn_mark->ignored_mask;
+ fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
+ }
+ spin_unlock(&fsn_mark->lock);
+
+ if (!(oldmask & ~mask))
+ fsnotify_destroy_mark(fsn_mark);
+
+ return mask & oldmask;
+}
+
+static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark = NULL;
+ __u32 removed;
+
+ fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
+ if (!fsn_mark)
+ return -ENOENT;
+
+ removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (removed & group->mask)
+ fsnotify_recalc_group_mask(group);
+ if (removed & mnt->mnt_fsnotify_mask)
+ fsnotify_recalc_vfsmount_mask(mnt);
+
+ return 0;
+}
+
+static int fanotify_remove_inode_mark(struct fsnotify_group *group,
+ struct inode *inode, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark = NULL;
+ __u32 removed;
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
+ return -ENOENT;
+
+ removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
+ /* matches the fsnotify_find_inode_mark() */
+ fsnotify_put_mark(fsn_mark);
+
+ if (removed & group->mask)
+ fsnotify_recalc_group_mask(group);
+ if (removed & inode->i_fsnotify_mask)
+ fsnotify_recalc_inode_mask(inode);
+
+ return 0;
+}
+
+static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
+ __u32 mask,
+ unsigned int flags)
+{
+ __u32 oldmask;
+
+ spin_lock(&fsn_mark->lock);
+ if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ oldmask = fsn_mark->mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
+ } else {
+ oldmask = fsn_mark->ignored_mask;
+ fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask));
+ if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
+ fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
+ }
+ spin_unlock(&fsn_mark->lock);
+
+ return mask & ~oldmask;
+}
+
+static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark;
+ __u32 added;
+
+ fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
+ if (!fsn_mark) {
+ int ret;
+
+ fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
+ if (!fsn_mark)
+ return -ENOMEM;
+
+ fsnotify_init_mark(fsn_mark, fanotify_free_mark);
+ ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
+ if (ret) {
+ fanotify_free_mark(fsn_mark);
+ return ret;
+ }
+ }
+ added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (added) {
+ if (added & ~group->mask)
+ fsnotify_recalc_group_mask(group);
+ if (added & ~mnt->mnt_fsnotify_mask)
+ fsnotify_recalc_vfsmount_mask(mnt);
+ }
+ return 0;
+}
+
+static int fanotify_add_inode_mark(struct fsnotify_group *group,
+ struct inode *inode, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark;
+ __u32 added;
+
+ pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark) {
+ int ret;
+
+ fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
+ if (!fsn_mark)
+ return -ENOMEM;
+
+ fsnotify_init_mark(fsn_mark, fanotify_free_mark);
+ ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
+ if (ret) {
+ fanotify_free_mark(fsn_mark);
+ return ret;
+ }
+ }
+ added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (added) {
+ if (added & ~group->mask)
+ fsnotify_recalc_group_mask(group);
+ if (added & ~inode->i_fsnotify_mask)
+ fsnotify_recalc_inode_mask(inode);
+ }
+ return 0;
+}
+
+/* fanotify syscalls */
+SYSCALL_DEFINE3(fanotify_init, unsigned int, flags, unsigned int, event_f_flags,
+ unsigned int, priority)
+{
+ struct fsnotify_group *group;
+ int f_flags, fd;
+
+ pr_debug("%s: flags=%d event_f_flags=%d priority=%d\n",
+ __func__, flags, event_f_flags, priority);
+
+ if (event_f_flags)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (flags & ~FAN_ALL_INIT_FLAGS)
+ return -EINVAL;
+
+ f_flags = O_RDWR | FMODE_NONOTIFY;
+ if (flags & FAN_CLOEXEC)
+ f_flags |= O_CLOEXEC;
+ if (flags & FAN_NONBLOCK)
+ f_flags |= O_NONBLOCK;
+
+ /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
+ group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ group->priority = priority;
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ mutex_init(&group->fanotify_data.access_mutex);
+ init_waitqueue_head(&group->fanotify_data.access_waitq);
+ INIT_LIST_HEAD(&group->fanotify_data.access_list);
+#endif
+
+ fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
+ if (fd < 0)
+ goto out_put_group;
+
+ return fd;
+
+out_put_group:
+ fsnotify_put_group(group);
+ return fd;
+}
+
+SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
+ __u64 mask, int dfd,
+ const char __user * pathname)
+{
+ struct inode *inode = NULL;
+ struct vfsmount *mnt = NULL;
+ struct fsnotify_group *group;
+ struct file *filp;
+ struct path path;
+ int ret, fput_needed;
+
+ pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
+ __func__, fanotify_fd, flags, dfd, pathname, mask);
+
+ /* we only use the lower 32 bits as of right now. */
+ if (mask & ((__u64)0xffffffff << 32))
+ return -EINVAL;
+
+ if (flags & ~FAN_ALL_MARK_FLAGS)
+ return -EINVAL;
+ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
+ case FAN_MARK_ADD:
+ case FAN_MARK_REMOVE:
+ case FAN_MARK_FLUSH:
+ break;
+ default:
+ return -EINVAL;
+ }
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
+#else
+ if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
+#endif
+ return -EINVAL;
+
+ filp = fget_light(fanotify_fd, &fput_needed);
+ if (unlikely(!filp))
+ return -EBADF;
+
+ /* verify that this is indeed an fanotify instance */
+ ret = -EINVAL;
+ if (unlikely(filp->f_op != &fanotify_fops))
+ goto fput_and_out;
+
+ ret = fanotify_find_path(dfd, pathname, &path, flags);
+ if (ret)
+ goto fput_and_out;
+
+ /* inode held in place by reference to path; group by fget on fd */
+ if (!(flags & FAN_MARK_MOUNT))
+ inode = path.dentry->d_inode;
+ else
+ mnt = path.mnt;
+ group = filp->private_data;
+
+ /* create/update an inode mark */
+ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
+ case FAN_MARK_ADD:
+ if (flags & FAN_MARK_MOUNT)
+ ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
+ else
+ ret = fanotify_add_inode_mark(group, inode, mask, flags);
+ break;
+ case FAN_MARK_REMOVE:
+ if (flags & FAN_MARK_MOUNT)
+ ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
+ else
+ ret = fanotify_remove_inode_mark(group, inode, mask, flags);
+ break;
+ case FAN_MARK_FLUSH:
+ if (flags & FAN_MARK_MOUNT)
+ fsnotify_clear_vfsmount_marks_by_group(group);
+ else
+ fsnotify_clear_inode_marks_by_group(group);
+ fsnotify_recalc_group_mask(group);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ path_put(&path);
+fput_and_out:
+ fput_light(filp, fput_needed);
+ return ret;
+}
+
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
+ long dfd, long pathname)
+{
+ return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
+ mask, (int) dfd,
+ (const char __user *) pathname);
+}
+SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
+#endif
+
+/*
+ * fanotify_user_setup - Our initialization function. Note that we cannnot return
+ * error because we have compiled-in VFS hooks. So an (unlikely) failure here
+ * must result in panic().
+ */
+static int __init fanotify_user_setup(void)
+{
+ fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
+ fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
+ SLAB_PANIC);
+
+ return 0;
+}
+device_initcall(fanotify_user_setup);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index fcc2f064af83..9810babb1a3b 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -21,6 +21,7 @@
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/srcu.h>
#include <linux/fsnotify_backend.h>
@@ -35,6 +36,11 @@ void __fsnotify_inode_delete(struct inode *inode)
}
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
+void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{
+ fsnotify_clear_marks_by_mount(mnt);
+}
+
/*
* Given an inode, first check if we care what happens to our children. Inotify
* and dnotify both tell their parents about events. If we care about any event
@@ -78,13 +84,16 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
}
/* Notify this dentry's parent about a child's events. */
-void __fsnotify_parent(struct dentry *dentry, __u32 mask)
+void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
bool send = false;
bool should_update_children = false;
+ if (!dentry)
+ dentry = path->dentry;
+
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return;
@@ -115,8 +124,12 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
- fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
- dentry->d_name.name, 0);
+ if (path)
+ fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
+ dentry->d_name.name, 0);
+ else
+ fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
+ dentry->d_name.name, 0);
dput(parent);
}
@@ -127,51 +140,127 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);
+void __fsnotify_flush_ignored_mask(struct inode *inode, void *data, int data_is)
+{
+ struct fsnotify_mark *mark;
+ struct hlist_node *node;
+
+ if (!hlist_empty(&inode->i_fsnotify_marks)) {
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(mark, node, &inode->i_fsnotify_marks, i.i_list) {
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mark->ignored_mask = 0;
+ }
+ spin_unlock(&inode->i_lock);
+ }
+
+ if (data_is == FSNOTIFY_EVENT_PATH) {
+ struct vfsmount *mnt;
+
+ mnt = ((struct path *)data)->mnt;
+ if (mnt && !hlist_empty(&mnt->mnt_fsnotify_marks)) {
+ spin_lock(&mnt->mnt_root->d_lock);
+ hlist_for_each_entry(mark, node, &mnt->mnt_fsnotify_marks, m.m_list) {
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mark->ignored_mask = 0;
+ }
+ spin_unlock(&mnt->mnt_root->d_lock);
+ }
+ }
+}
+
+static int send_to_group(struct fsnotify_group *group, struct inode *to_tell,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_is, u32 cookie, const unsigned char *file_name,
+ struct fsnotify_event **event)
+{
+ if (!group->ops->should_send_event(group, to_tell, mnt, mask,
+ data, data_is))
+ return 0;
+ if (!*event) {
+ *event = fsnotify_create_event(to_tell, mask, data,
+ data_is, file_name,
+ cookie, GFP_KERNEL);
+ if (!*event)
+ return -ENOMEM;
+ }
+ return group->ops->handle_event(group, *event);
+}
+
+static bool needed_by_vfsmount(__u32 test_mask, struct vfsmount *mnt)
+{
+ if (!mnt)
+ return false;
+
+ return (test_mask & mnt->mnt_fsnotify_mask);
+}
+
/*
* This is the main call to fsnotify. The VFS calls into hook specific functions
* in linux/fsnotify.h. Those functions then in turn call here. Here will call
* out to all of the registered fsnotify_group. Those groups can then use the
* notification event in whatever means they feel necessary.
*/
-void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const char *file_name, u32 cookie)
+int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const unsigned char *file_name, u32 cookie)
{
struct fsnotify_group *group;
struct fsnotify_event *event = NULL;
- int idx;
+ struct vfsmount *mnt = NULL;
+ int idx, ret = 0;
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
- if (list_empty(&fsnotify_groups))
- return;
+ /* if no fsnotify listeners, nothing to do */
+ if (list_empty(&fsnotify_inode_groups) &&
+ list_empty(&fsnotify_vfsmount_groups))
+ return 0;
+
+ if (mask & FS_MODIFY)
+ __fsnotify_flush_ignored_mask(to_tell, data, data_is);
- if (!(test_mask & fsnotify_mask))
- return;
+ /* if none of the directed listeners or vfsmount listeners care */
+ if (!(test_mask & fsnotify_inode_mask) &&
+ !(test_mask & fsnotify_vfsmount_mask))
+ return 0;
+
+ if (data_is == FSNOTIFY_EVENT_PATH)
+ mnt = ((struct path *)data)->mnt;
+
+ /* if this inode's directed listeners don't care and nothing on the vfsmount
+ * listeners list cares, nothing to do */
+ if (!(test_mask & to_tell->i_fsnotify_mask) &&
+ !needed_by_vfsmount(test_mask, mnt))
+ return 0;
- if (!(test_mask & to_tell->i_fsnotify_mask))
- return;
/*
* SRCU!! the groups list is very very much read only and the path is
* very hot. The VAST majority of events are not going to need to do
* anything other than walk the list so it's crazy to pre-allocate.
*/
idx = srcu_read_lock(&fsnotify_grp_srcu);
- list_for_each_entry_rcu(group, &fsnotify_groups, group_list) {
- if (test_mask & group->mask) {
- if (!group->ops->should_send_event(group, to_tell, mask))
- continue;
- if (!event) {
- event = fsnotify_create_event(to_tell, mask, data,
- data_is, file_name, cookie,
- GFP_KERNEL);
- /* shit, we OOM'd and now we can't tell, maybe
- * someday someone else will want to do something
- * here */
- if (!event)
- break;
+
+ if (test_mask & to_tell->i_fsnotify_mask) {
+ list_for_each_entry_rcu(group, &fsnotify_inode_groups, inode_group_list) {
+ if (test_mask & group->mask) {
+ ret = send_to_group(group, to_tell, NULL, mask, data, data_is,
+ cookie, file_name, &event);
+ if (ret)
+ goto out;
}
- group->ops->handle_event(group, event);
}
}
+ if (needed_by_vfsmount(test_mask, mnt)) {
+ list_for_each_entry_rcu(group, &fsnotify_vfsmount_groups, vfsmount_group_list) {
+ if (test_mask & group->mask) {
+ ret = send_to_group(group, to_tell, mnt, mask, data, data_is,
+ cookie, file_name, &event);
+ if (ret)
+ goto out;
+ }
+ }
+ }
+out:
srcu_read_unlock(&fsnotify_grp_srcu, idx);
/*
* fsnotify_create_event() took a reference so the event can't be cleaned
@@ -179,6 +268,8 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const
*/
if (event)
fsnotify_put_event(event);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(fsnotify);
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 4dc240824b2d..1be54f6f9e7d 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -8,19 +8,44 @@
/* protects reads of fsnotify_groups */
extern struct srcu_struct fsnotify_grp_srcu;
-/* all groups which receive fsnotify events */
-extern struct list_head fsnotify_groups;
-/* all bitwise OR of all event types (FS_*) for all fsnotify_groups */
-extern __u32 fsnotify_mask;
+/* all groups which receive inode fsnotify events */
+extern struct list_head fsnotify_inode_groups;
+/* all groups which receive vfsmount fsnotify events */
+extern struct list_head fsnotify_vfsmount_groups;
+/* all bitwise OR of all event types (FS_*) for all fsnotify_inode_groups */
+extern __u32 fsnotify_inode_mask;
+/* all bitwise OR of all event types (FS_*) for all fsnotify_vfsmount_groups */
+extern __u32 fsnotify_vfsmount_mask;
/* destroy all events sitting in this groups notification queue */
extern void fsnotify_flush_notify(struct fsnotify_group *group);
+extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
+ __u32 mask);
+/* add a mark to an inode */
+extern int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ int allow_dups);
+/* add a mark to a vfsmount */
+extern int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct vfsmount *mnt,
+ int allow_dups);
+
+/* add a group to the inode group list */
+extern void fsnotify_add_inode_group(struct fsnotify_group *group);
+/* add a group to the vfsmount group list */
+extern void fsnotify_add_vfsmount_group(struct fsnotify_group *group);
/* final kfree of a group */
extern void fsnotify_final_destroy_group(struct fsnotify_group *group);
+/* vfsmount specific destruction of a mark */
+extern void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark);
+/* inode specific destruction of a mark */
+extern void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark);
/* run the list of all marks associated with inode and flag them to be freed */
extern void fsnotify_clear_marks_by_inode(struct inode *inode);
+/* run the list of all marks associated with vfsmount and flag them to be freed */
+extern void fsnotify_clear_marks_by_mount(struct vfsmount *mnt);
/*
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 0e1677144bc5..ada913fd4f7f 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -32,10 +32,14 @@
static DEFINE_MUTEX(fsnotify_grp_mutex);
/* protects reads while running the fsnotify_groups list */
struct srcu_struct fsnotify_grp_srcu;
-/* all groups registered to receive filesystem notifications */
-LIST_HEAD(fsnotify_groups);
+/* all groups registered to receive inode filesystem notifications */
+LIST_HEAD(fsnotify_inode_groups);
+/* all groups registered to receive mount point filesystem notifications */
+LIST_HEAD(fsnotify_vfsmount_groups);
/* bitwise OR of all events (FS_*) interesting to some group on this system */
-__u32 fsnotify_mask;
+__u32 fsnotify_inode_mask;
+/* bitwise OR of all events (FS_*) interesting to some group on this system */
+__u32 fsnotify_vfsmount_mask;
/*
* When a new group registers or changes it's set of interesting events
@@ -44,14 +48,20 @@ __u32 fsnotify_mask;
void fsnotify_recalc_global_mask(void)
{
struct fsnotify_group *group;
- __u32 mask = 0;
+ __u32 inode_mask = 0;
+ __u32 vfsmount_mask = 0;
int idx;
idx = srcu_read_lock(&fsnotify_grp_srcu);
- list_for_each_entry_rcu(group, &fsnotify_groups, group_list)
- mask |= group->mask;
+ list_for_each_entry_rcu(group, &fsnotify_inode_groups, inode_group_list)
+ inode_mask |= group->mask;
+ list_for_each_entry_rcu(group, &fsnotify_vfsmount_groups, vfsmount_group_list)
+ vfsmount_mask |= group->mask;
+
srcu_read_unlock(&fsnotify_grp_srcu, idx);
- fsnotify_mask = mask;
+
+ fsnotify_inode_mask = inode_mask;
+ fsnotify_vfsmount_mask = vfsmount_mask;
}
/*
@@ -64,11 +74,11 @@ void fsnotify_recalc_group_mask(struct fsnotify_group *group)
{
__u32 mask = 0;
__u32 old_mask = group->mask;
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *mark;
spin_lock(&group->mark_lock);
- list_for_each_entry(entry, &group->mark_entries, g_list)
- mask |= entry->mask;
+ list_for_each_entry(mark, &group->marks_list, g_list)
+ mask |= mark->mask;
spin_unlock(&group->mark_lock);
group->mask = mask;
@@ -77,13 +87,60 @@ void fsnotify_recalc_group_mask(struct fsnotify_group *group)
fsnotify_recalc_global_mask();
}
-/*
- * Take a reference to a group so things found under the fsnotify_grp_mutex
- * can't get freed under us
- */
-static void fsnotify_get_group(struct fsnotify_group *group)
+void fsnotify_add_vfsmount_group(struct fsnotify_group *group)
{
- atomic_inc(&group->refcnt);
+ struct fsnotify_group *group_iter;
+ unsigned int priority = group->priority;
+
+ mutex_lock(&fsnotify_grp_mutex);
+
+ if (!group->on_vfsmount_group_list) {
+ list_for_each_entry(group_iter, &fsnotify_vfsmount_groups,
+ vfsmount_group_list) {
+ /* insert in front of this one? */
+ if (priority < group_iter->priority) {
+ /* list_add_tail() insert in front of group_iter */
+ list_add_tail_rcu(&group->inode_group_list,
+ &group_iter->inode_group_list);
+ goto out;
+ }
+ }
+
+ /* apparently we need to be the last entry */
+ list_add_tail_rcu(&group->vfsmount_group_list, &fsnotify_vfsmount_groups);
+ }
+out:
+ group->on_vfsmount_group_list = 1;
+
+ mutex_unlock(&fsnotify_grp_mutex);
+}
+
+void fsnotify_add_inode_group(struct fsnotify_group *group)
+{
+ struct fsnotify_group *group_iter;
+ unsigned int priority = group->priority;
+
+ mutex_lock(&fsnotify_grp_mutex);
+
+ /* add to global group list, priority 0 first, UINT_MAX last */
+ if (!group->on_inode_group_list) {
+ list_for_each_entry(group_iter, &fsnotify_inode_groups,
+ inode_group_list) {
+ if (priority < group_iter->priority) {
+ /* list_add_tail() insert in front of group_iter */
+ list_add_tail_rcu(&group->inode_group_list,
+ &group_iter->inode_group_list);
+ goto out;
+ }
+ }
+
+ /* apparently we need to be the last entry */
+ list_add_tail_rcu(&group->inode_group_list, &fsnotify_inode_groups);
+ }
+out:
+ group->on_inode_group_list = 1;
+
+ mutex_unlock(&fsnotify_grp_mutex);
}
/*
@@ -110,7 +167,7 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group)
*/
static void fsnotify_destroy_group(struct fsnotify_group *group)
{
- /* clear all inode mark entries for this group */
+ /* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group);
/* past the point of no return, matches the initial value of 1 */
@@ -127,9 +184,12 @@ static void __fsnotify_evict_group(struct fsnotify_group *group)
{
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
- if (group->on_group_list)
- list_del_rcu(&group->group_list);
- group->on_group_list = 0;
+ if (group->on_inode_group_list)
+ list_del_rcu(&group->inode_group_list);
+ group->on_inode_group_list = 0;
+ if (group->on_vfsmount_group_list)
+ list_del_rcu(&group->vfsmount_group_list);
+ group->on_vfsmount_group_list = 0;
}
/*
@@ -171,84 +231,38 @@ void fsnotify_put_group(struct fsnotify_group *group)
}
/*
- * Simply run the fsnotify_groups list and find a group which matches
- * the given parameters. If a group is found we take a reference to that
- * group.
+ * Create a new fsnotify_group and hold a reference for the group returned.
*/
-static struct fsnotify_group *fsnotify_find_group(unsigned int group_num, __u32 mask,
- const struct fsnotify_ops *ops)
+struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
{
- struct fsnotify_group *group_iter;
- struct fsnotify_group *group = NULL;
-
- BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
-
- list_for_each_entry_rcu(group_iter, &fsnotify_groups, group_list) {
- if (group_iter->group_num == group_num) {
- if ((group_iter->mask == mask) &&
- (group_iter->ops == ops)) {
- fsnotify_get_group(group_iter);
- group = group_iter;
- } else
- group = ERR_PTR(-EEXIST);
- }
- }
- return group;
-}
-
-/*
- * Either finds an existing group which matches the group_num, mask, and ops or
- * creates a new group and adds it to the global group list. In either case we
- * take a reference for the group returned.
- */
-struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
- const struct fsnotify_ops *ops)
-{
- struct fsnotify_group *group, *tgroup;
+ struct fsnotify_group *group;
- /* very low use, simpler locking if we just always alloc */
- group = kmalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
+ group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
+ /* set to 0 when there a no external references to this group */
atomic_set(&group->refcnt, 1);
-
- group->on_group_list = 0;
- group->group_num = group_num;
- group->mask = mask;
+ /*
+ * hits 0 when there are no external references AND no marks for
+ * this group
+ */
+ atomic_set(&group->num_marks, 1);
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
- group->q_len = 0;
group->max_events = UINT_MAX;
- spin_lock_init(&group->mark_lock);
- atomic_set(&group->num_marks, 0);
- INIT_LIST_HEAD(&group->mark_entries);
+ INIT_LIST_HEAD(&group->inode_group_list);
+ INIT_LIST_HEAD(&group->vfsmount_group_list);
- group->ops = ops;
-
- mutex_lock(&fsnotify_grp_mutex);
- tgroup = fsnotify_find_group(group_num, mask, ops);
- if (tgroup) {
- /* group already exists */
- mutex_unlock(&fsnotify_grp_mutex);
- /* destroy the new one we made */
- fsnotify_put_group(group);
- return tgroup;
- }
-
- /* group not found, add a new one */
- list_add_rcu(&group->group_list, &fsnotify_groups);
- group->on_group_list = 1;
- /* being on the fsnotify_groups list holds one num_marks */
- atomic_inc(&group->num_marks);
+ spin_lock_init(&group->mark_lock);
+ INIT_LIST_HEAD(&group->marks_list);
- mutex_unlock(&fsnotify_grp_mutex);
+ group->priority = UINT_MAX;
- if (mask)
- fsnotify_recalc_global_mask();
+ group->ops = ops;
return group;
}
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 0399bcbe09c8..0c0a48b1659f 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -16,72 +16,6 @@
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/*
- * fsnotify inode mark locking/lifetime/and refcnting
- *
- * REFCNT:
- * The mark->refcnt tells how many "things" in the kernel currently are
- * referencing this object. The object typically will live inside the kernel
- * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
- * which can find this object holding the appropriete locks, can take a reference
- * and the object itself is guarenteed to survive until the reference is dropped.
- *
- * LOCKING:
- * There are 3 spinlocks involved with fsnotify inode marks and they MUST
- * be taken in order as follows:
- *
- * entry->lock
- * group->mark_lock
- * inode->i_lock
- *
- * entry->lock protects 2 things, entry->group and entry->inode. You must hold
- * that lock to dereference either of these things (they could be NULL even with
- * the lock)
- *
- * group->mark_lock protects the mark_entries list anchored inside a given group
- * and each entry is hooked via the g_list. It also sorta protects the
- * free_g_list, which when used is anchored by a private list on the stack of the
- * task which held the group->mark_lock.
- *
- * inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a
- * given inode and each entry is hooked via the i_list. (and sorta the
- * free_i_list)
- *
- *
- * LIFETIME:
- * Inode marks survive between when they are added to an inode and when their
- * refcnt==0.
- *
- * The inode mark can be cleared for a number of different reasons including:
- * - The inode is unlinked for the last time. (fsnotify_inode_remove)
- * - The inode is being evicted from cache. (fsnotify_inode_delete)
- * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
- * - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry)
- * - The fsnotify_group associated with the mark is going away and all such marks
- * need to be cleaned up. (fsnotify_clear_marks_by_group)
- *
- * Worst case we are given an inode and need to clean up all the marks on that
- * inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each
- * mark on the list we take a reference (so the mark can't disappear under us).
- * We remove that mark form the inode's list of marks and we add this mark to a
- * private list anchored on the stack using i_free_list; At this point we no
- * longer fear anything finding the mark using the inode's list of marks.
- *
- * We can safely and locklessly run the private list on the stack of everything
- * we just unattached from the original inode. For each mark on the private list
- * we grab the mark-> and can thus dereference mark->group and mark->inode. If
- * we see the group and inode are not NULL we take those locks. Now holding all
- * 3 locks we can completely remove the mark from other tasks finding it in the
- * future. Remember, 10 things might already be referencing this mark, but they
- * better be holding a ref. We drop our reference we took before we unhooked it
- * from the inode. When the ref hits 0 we can free the mark.
- *
- * Very similarly for freeing by group, except we use free_g_list.
- *
- * This has the very interesting property of being able to run concurrently with
- * any (or all) other directions.
- */
-
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -95,30 +29,19 @@
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
-void fsnotify_get_mark(struct fsnotify_mark_entry *entry)
-{
- atomic_inc(&entry->refcnt);
-}
-
-void fsnotify_put_mark(struct fsnotify_mark_entry *entry)
-{
- if (atomic_dec_and_test(&entry->refcnt))
- entry->free_mark(entry);
-}
-
/*
* Recalculate the mask of events relevant to a given inode locked.
*/
static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *mark;
struct hlist_node *pos;
__u32 new_mask = 0;
assert_spin_locked(&inode->i_lock);
- hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list)
- new_mask |= entry->mask;
+ hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
+ new_mask |= mark->mask;
inode->i_fsnotify_mask = new_mask;
}
@@ -135,107 +58,26 @@ void fsnotify_recalc_inode_mask(struct inode *inode)
__fsnotify_update_child_dentry_flags(inode);
}
-/*
- * Any time a mark is getting freed we end up here.
- * The caller had better be holding a reference to this mark so we don't actually
- * do the final put under the entry->lock
- */
-void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
+void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
{
- struct fsnotify_group *group;
- struct inode *inode;
-
- spin_lock(&entry->lock);
+ struct inode *inode = mark->i.inode;
- group = entry->group;
- inode = entry->inode;
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&mark->group->mark_lock);
- BUG_ON(group && !inode);
- BUG_ON(!group && inode);
-
- /* if !group something else already marked this to die */
- if (!group) {
- spin_unlock(&entry->lock);
- return;
- }
-
- /* 1 from caller and 1 for being on i_list/g_list */
- BUG_ON(atomic_read(&entry->refcnt) < 2);
-
- spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
- hlist_del_init(&entry->i_list);
- entry->inode = NULL;
-
- list_del_init(&entry->g_list);
- entry->group = NULL;
-
- fsnotify_put_mark(entry); /* for i_list and g_list */
+ hlist_del_init(&mark->i.i_list);
+ mark->i.inode = NULL;
/*
- * this mark is now off the inode->i_fsnotify_mark_entries list and we
+ * this mark is now off the inode->i_fsnotify_marks list and we
* hold the inode->i_lock, so this is the perfect time to update the
* inode->i_fsnotify_mask
*/
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&group->mark_lock);
- spin_unlock(&entry->lock);
-
- /*
- * Some groups like to know that marks are being freed. This is a
- * callback to the group function to let it know that this entry
- * is being freed.
- */
- if (group->ops->freeing_mark)
- group->ops->freeing_mark(entry, group);
-
- /*
- * __fsnotify_update_child_dentry_flags(inode);
- *
- * I really want to call that, but we can't, we have no idea if the inode
- * still exists the second we drop the entry->lock.
- *
- * The next time an event arrive to this inode from one of it's children
- * __fsnotify_parent will see that the inode doesn't care about it's
- * children and will update all of these flags then. So really this
- * is just a lazy update (and could be a perf win...)
- */
-
-
- iput(inode);
-
- /*
- * it's possible that this group tried to destroy itself, but this
- * this mark was simultaneously being freed by inode. If that's the
- * case, we finish freeing the group here.
- */
- if (unlikely(atomic_dec_and_test(&group->num_marks)))
- fsnotify_final_destroy_group(group);
-}
-
-/*
- * Given a group, destroy all of the marks associated with that group.
- */
-void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
-{
- struct fsnotify_mark_entry *lentry, *entry;
- LIST_HEAD(free_list);
-
- spin_lock(&group->mark_lock);
- list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) {
- list_add(&entry->free_g_list, &free_list);
- list_del_init(&entry->g_list);
- fsnotify_get_mark(entry);
- }
- spin_unlock(&group->mark_lock);
-
- list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) {
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
- }
}
/*
@@ -243,112 +85,127 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
*/
void fsnotify_clear_marks_by_inode(struct inode *inode)
{
- struct fsnotify_mark_entry *entry, *lentry;
+ struct fsnotify_mark *mark, *lmark;
struct hlist_node *pos, *n;
LIST_HEAD(free_list);
spin_lock(&inode->i_lock);
- hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) {
- list_add(&entry->free_i_list, &free_list);
- hlist_del_init(&entry->i_list);
- fsnotify_get_mark(entry);
+ hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
+ list_add(&mark->i.free_i_list, &free_list);
+ hlist_del_init(&mark->i.i_list);
+ fsnotify_get_mark(mark);
}
spin_unlock(&inode->i_lock);
- list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) {
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
+ list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
}
}
/*
+ * Given a group clear all of the inode marks associated with that group.
+ */
+void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE);
+}
+
+/*
* given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/
-struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group,
- struct inode *inode)
+struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group,
+ struct inode *inode)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *mark;
struct hlist_node *pos;
assert_spin_locked(&inode->i_lock);
- hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) {
- if (entry->group == group) {
- fsnotify_get_mark(entry);
- return entry;
+ hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
+ if (mark->group == group) {
+ fsnotify_get_mark(mark);
+ return mark;
}
}
return NULL;
}
/*
- * Nothing fancy, just initialize lists and locks and counters.
+ * given a group and inode, find the mark associated with that combination.
+ * if found take a reference to that mark and return it, else return NULL
*/
-void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
- void (*free_mark)(struct fsnotify_mark_entry *entry))
+struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
+ struct inode *inode)
+{
+ struct fsnotify_mark *mark;
+ spin_lock(&inode->i_lock);
+ mark = fsnotify_find_inode_mark_locked(group, inode);
+ spin_unlock(&inode->i_lock);
+
+ return mark;
+}
+
+/*
+ * If we are setting a mark mask on an inode mark we should pin the inode
+ * in memory.
+ */
+void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
+ __u32 mask)
{
- spin_lock_init(&entry->lock);
- atomic_set(&entry->refcnt, 1);
- INIT_HLIST_NODE(&entry->i_list);
- entry->group = NULL;
- entry->mask = 0;
- entry->inode = NULL;
- entry->free_mark = free_mark;
+ struct inode *inode;
+
+ assert_spin_locked(&mark->lock);
+
+ if (mask &&
+ mark->i.inode &&
+ !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
+ mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
+ inode = igrab(mark->i.inode);
+ /*
+ * we shouldn't be able to get here if the inode wasn't
+ * already safely held in memory. But bug in case it
+ * ever is wrong.
+ */
+ BUG_ON(!inode);
+ }
}
/*
- * Attach an initialized mark entry to a given group and inode.
+ * Attach an initialized mark to a given group and inode.
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group and for which inodes.
*/
-int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
- struct fsnotify_group *group, struct inode *inode)
+int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ int allow_dups)
{
- struct fsnotify_mark_entry *lentry;
+ struct fsnotify_mark *lmark = NULL;
int ret = 0;
- inode = igrab(inode);
- if (unlikely(!inode))
- return -EINVAL;
-
- /*
- * LOCKING ORDER!!!!
- * entry->lock
- * group->mark_lock
- * inode->i_lock
- */
- spin_lock(&entry->lock);
- spin_lock(&group->mark_lock);
- spin_lock(&inode->i_lock);
+ mark->flags = FSNOTIFY_MARK_FLAG_INODE;
- lentry = fsnotify_find_mark_entry(group, inode);
- if (!lentry) {
- entry->group = group;
- entry->inode = inode;
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&group->mark_lock);
- hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
- list_add(&entry->g_list, &group->mark_entries);
+ spin_lock(&inode->i_lock);
- fsnotify_get_mark(entry); /* for i_list and g_list */
+ if (!allow_dups)
+ lmark = fsnotify_find_inode_mark_locked(group, inode);
+ if (!lmark) {
+ mark->i.inode = inode;
- atomic_inc(&group->num_marks);
+ hlist_add_head(&mark->i.i_list, &inode->i_fsnotify_marks);
fsnotify_recalc_inode_mask_locked(inode);
}
spin_unlock(&inode->i_lock);
- spin_unlock(&group->mark_lock);
- spin_unlock(&entry->lock);
- if (lentry) {
+ if (lmark)
ret = -EEXIST;
- iput(inode);
- fsnotify_put_mark(lentry);
- } else {
- __fsnotify_update_child_dentry_flags(inode);
- }
return ret;
}
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig
index b3a159b21cfd..14f4974760ea 100644
--- a/fs/notify/inotify/Kconfig
+++ b/fs/notify/inotify/Kconfig
@@ -1,22 +1,8 @@
-config INOTIFY
- bool "Inotify file change notification support"
- default n
- ---help---
- Say Y here to enable legacy in kernel inotify support. Inotify is a
- file change notification system. It is a replacement for dnotify.
- This option only provides the legacy inotify in kernel API. There
- are no in tree kernel users of this interface since it is deprecated.
- You only need this if you are loading an out of tree kernel module
- that uses inotify.
-
- For more information, see <file:Documentation/filesystems/inotify.txt>
-
- If unsure, say N.
-
config INOTIFY_USER
bool "Inotify support for userspace"
select ANON_INODES
select FSNOTIFY
+ select ANON_INODES
default y
---help---
Say Y here to enable inotify support for userspace, including the
diff --git a/fs/notify/inotify/Makefile b/fs/notify/inotify/Makefile
index 943828171362..a380dabe09de 100644
--- a/fs/notify/inotify/Makefile
+++ b/fs/notify/inotify/Makefile
@@ -1,2 +1 @@
-obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
deleted file mode 100644
index 40b1cf914ccb..000000000000
--- a/fs/notify/inotify/inotify.c
+++ /dev/null
@@ -1,933 +0,0 @@
-/*
- * fs/inotify.c - inode-based file event notifications
- *
- * Authors:
- * John McCutchan <ttb@tentacle.dhs.org>
- * Robert Love <rml@novell.com>
- *
- * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
- *
- * Copyright (C) 2005 John McCutchan
- * Copyright 2006 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/writeback.h>
-#include <linux/inotify.h>
-#include <linux/fsnotify_backend.h>
-
-static atomic_t inotify_cookie;
-
-/*
- * Lock ordering:
- *
- * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
- * iprune_mutex (synchronize shrink_icache_memory())
- * inode_lock (protects the super_block->s_inodes list)
- * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
- * inotify_handle->mutex (protects inotify_handle and watches->h_list)
- *
- * The inode->inotify_mutex and inotify_handle->mutex and held during execution
- * of a caller's event handler. Thus, the caller must not hold any locks
- * taken in their event handler while calling any of the published inotify
- * interfaces.
- */
-
-/*
- * Lifetimes of the three main data structures--inotify_handle, inode, and
- * inotify_watch--are managed by reference count.
- *
- * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
- * Additional references can bump the count via get_inotify_handle() and drop
- * the count via put_inotify_handle().
- *
- * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
- * to remove_watch_no_event(). Additional references can bump the count via
- * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
- * is reponsible for the final put after receiving IN_IGNORED, or when using
- * IN_ONESHOT after receiving the first event. Inotify does the final put if
- * inotify_destroy() is called.
- *
- * inode: Pinned so long as the inode is associated with a watch, from
- * inotify_add_watch() to the final put_inotify_watch().
- */
-
-/*
- * struct inotify_handle - represents an inotify instance
- *
- * This structure is protected by the mutex 'mutex'.
- */
-struct inotify_handle {
- struct idr idr; /* idr mapping wd -> watch */
- struct mutex mutex; /* protects this bad boy */
- struct list_head watches; /* list of watches */
- atomic_t count; /* reference count */
- u32 last_wd; /* the last wd allocated */
- const struct inotify_operations *in_ops; /* inotify caller operations */
-};
-
-static inline void get_inotify_handle(struct inotify_handle *ih)
-{
- atomic_inc(&ih->count);
-}
-
-static inline void put_inotify_handle(struct inotify_handle *ih)
-{
- if (atomic_dec_and_test(&ih->count)) {
- idr_destroy(&ih->idr);
- kfree(ih);
- }
-}
-
-/**
- * get_inotify_watch - grab a reference to an inotify_watch
- * @watch: watch to grab
- */
-void get_inotify_watch(struct inotify_watch *watch)
-{
- atomic_inc(&watch->count);
-}
-EXPORT_SYMBOL_GPL(get_inotify_watch);
-
-int pin_inotify_watch(struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- spin_lock(&sb_lock);
- if (sb->s_count >= S_BIAS) {
- atomic_inc(&sb->s_active);
- spin_unlock(&sb_lock);
- atomic_inc(&watch->count);
- return 1;
- }
- spin_unlock(&sb_lock);
- return 0;
-}
-
-/**
- * put_inotify_watch - decrements the ref count on a given watch. cleans up
- * watch references if the count reaches zero. inotify_watch is freed by
- * inotify callers via the destroy_watch() op.
- * @watch: watch to release
- */
-void put_inotify_watch(struct inotify_watch *watch)
-{
- if (atomic_dec_and_test(&watch->count)) {
- struct inotify_handle *ih = watch->ih;
-
- iput(watch->inode);
- ih->in_ops->destroy_watch(watch);
- put_inotify_handle(ih);
- }
-}
-EXPORT_SYMBOL_GPL(put_inotify_watch);
-
-void unpin_inotify_watch(struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- put_inotify_watch(watch);
- deactivate_super(sb);
-}
-
-/*
- * inotify_handle_get_wd - returns the next WD for use by the given handle
- *
- * Callers must hold ih->mutex. This function can sleep.
- */
-static int inotify_handle_get_wd(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- int ret;
-
- do {
- if (unlikely(!idr_pre_get(&ih->idr, GFP_NOFS)))
- return -ENOSPC;
- ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
- } while (ret == -EAGAIN);
-
- if (likely(!ret))
- ih->last_wd = watch->wd;
-
- return ret;
-}
-
-/*
- * inotify_inode_watched - returns nonzero if there are watches on this inode
- * and zero otherwise. We call this lockless, we do not care if we race.
- */
-static inline int inotify_inode_watched(struct inode *inode)
-{
- return !list_empty(&inode->inotify_watches);
-}
-
-/*
- * Get child dentry flag into synch with parent inode.
- * Flag should always be clear for negative dentrys.
- */
-static void set_dentry_child_flags(struct inode *inode, int watched)
-{
- struct dentry *alias;
-
- spin_lock(&dcache_lock);
- list_for_each_entry(alias, &inode->i_dentry, d_alias) {
- struct dentry *child;
-
- list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
- if (!child->d_inode)
- continue;
-
- spin_lock(&child->d_lock);
- if (watched)
- child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- else
- child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
- spin_unlock(&child->d_lock);
- }
- }
- spin_unlock(&dcache_lock);
-}
-
-/*
- * inotify_find_handle - find the watch associated with the given inode and
- * handle
- *
- * Callers must hold inode->inotify_mutex.
- */
-static struct inotify_watch *inode_find_handle(struct inode *inode,
- struct inotify_handle *ih)
-{
- struct inotify_watch *watch;
-
- list_for_each_entry(watch, &inode->inotify_watches, i_list) {
- if (watch->ih == ih)
- return watch;
- }
-
- return NULL;
-}
-
-/*
- * remove_watch_no_event - remove watch without the IN_IGNORED event.
- *
- * Callers must hold both inode->inotify_mutex and ih->mutex.
- */
-static void remove_watch_no_event(struct inotify_watch *watch,
- struct inotify_handle *ih)
-{
- list_del(&watch->i_list);
- list_del(&watch->h_list);
-
- if (!inotify_inode_watched(watch->inode))
- set_dentry_child_flags(watch->inode, 0);
-
- idr_remove(&ih->idr, watch->wd);
-}
-
-/**
- * inotify_remove_watch_locked - Remove a watch from both the handle and the
- * inode. Sends the IN_IGNORED event signifying that the inode is no longer
- * watched. May be invoked from a caller's event handler.
- * @ih: inotify handle associated with watch
- * @watch: watch to remove
- *
- * Callers must hold both inode->inotify_mutex and ih->mutex.
- */
-void inotify_remove_watch_locked(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- remove_watch_no_event(watch, ih);
- ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
-
-/* Kernel API for producing events */
-
-/*
- * inotify_d_instantiate - instantiate dcache entry for inode
- */
-void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
-{
- struct dentry *parent;
-
- if (!inode)
- return;
-
- spin_lock(&entry->d_lock);
- parent = entry->d_parent;
- if (parent->d_inode && inotify_inode_watched(parent->d_inode))
- entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- spin_unlock(&entry->d_lock);
-}
-
-/*
- * inotify_d_move - dcache entry has been moved
- */
-void inotify_d_move(struct dentry *entry)
-{
- struct dentry *parent;
-
- parent = entry->d_parent;
- if (inotify_inode_watched(parent->d_inode))
- entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- else
- entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
-}
-
-/**
- * inotify_inode_queue_event - queue an event to all watches on this inode
- * @inode: inode event is originating from
- * @mask: event mask describing this event
- * @cookie: cookie for synchronization, or zero
- * @name: filename, if any
- * @n_inode: inode associated with name
- */
-void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
- const char *name, struct inode *n_inode)
-{
- struct inotify_watch *watch, *next;
-
- if (!inotify_inode_watched(inode))
- return;
-
- mutex_lock(&inode->inotify_mutex);
- list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
- u32 watch_mask = watch->mask;
- if (watch_mask & mask) {
- struct inotify_handle *ih= watch->ih;
- mutex_lock(&ih->mutex);
- if (watch_mask & IN_ONESHOT)
- remove_watch_no_event(watch, ih);
- ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
- name, n_inode);
- mutex_unlock(&ih->mutex);
- }
- }
- mutex_unlock(&inode->inotify_mutex);
-}
-EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
-
-/**
- * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
- * @dentry: the dentry in question, we queue against this dentry's parent
- * @mask: event mask describing this event
- * @cookie: cookie for synchronization, or zero
- * @name: filename, if any
- */
-void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
- u32 cookie, const char *name)
-{
- struct dentry *parent;
- struct inode *inode;
-
- if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
- return;
-
- spin_lock(&dentry->d_lock);
- parent = dentry->d_parent;
- inode = parent->d_inode;
-
- if (inotify_inode_watched(inode)) {
- dget(parent);
- spin_unlock(&dentry->d_lock);
- inotify_inode_queue_event(inode, mask, cookie, name,
- dentry->d_inode);
- dput(parent);
- } else
- spin_unlock(&dentry->d_lock);
-}
-EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
-
-/**
- * inotify_get_cookie - return a unique cookie for use in synchronizing events.
- */
-u32 inotify_get_cookie(void)
-{
- return atomic_inc_return(&inotify_cookie);
-}
-EXPORT_SYMBOL_GPL(inotify_get_cookie);
-
-/**
- * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
- * @list: list of inodes being unmounted (sb->s_inodes)
- *
- * Called with inode_lock held, protecting the unmounting super block's list
- * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
- * We temporarily drop inode_lock, however, and CAN block.
- */
-void inotify_unmount_inodes(struct list_head *list)
-{
- struct inode *inode, *next_i, *need_iput = NULL;
-
- list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
- struct inotify_watch *watch, *next_w;
- struct inode *need_iput_tmp;
- struct list_head *watches;
-
- /*
- * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
- * I_WILL_FREE, or I_NEW which is fine because by that point
- * the inode cannot have any associated watches.
- */
- if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
- continue;
-
- /*
- * If i_count is zero, the inode cannot have any watches and
- * doing an __iget/iput with MS_ACTIVE clear would actually
- * evict all inodes with zero i_count from icache which is
- * unnecessarily violent and may in fact be illegal to do.
- */
- if (!atomic_read(&inode->i_count))
- continue;
-
- need_iput_tmp = need_iput;
- need_iput = NULL;
- /* In case inotify_remove_watch_locked() drops a reference. */
- if (inode != need_iput_tmp)
- __iget(inode);
- else
- need_iput_tmp = NULL;
- /* In case the dropping of a reference would nuke next_i. */
- if ((&next_i->i_sb_list != list) &&
- atomic_read(&next_i->i_count) &&
- !(next_i->i_state & (I_CLEAR | I_FREEING |
- I_WILL_FREE))) {
- __iget(next_i);
- need_iput = next_i;
- }
-
- /*
- * We can safely drop inode_lock here because we hold
- * references on both inode and next_i. Also no new inodes
- * will be added since the umount has begun. Finally,
- * iprune_mutex keeps shrink_icache_memory() away.
- */
- spin_unlock(&inode_lock);
-
- if (need_iput_tmp)
- iput(need_iput_tmp);
-
- /* for each watch, send IN_UNMOUNT and then remove it */
- mutex_lock(&inode->inotify_mutex);
- watches = &inode->inotify_watches;
- list_for_each_entry_safe(watch, next_w, watches, i_list) {
- struct inotify_handle *ih= watch->ih;
- get_inotify_watch(watch);
- mutex_lock(&ih->mutex);
- ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
- NULL, NULL);
- inotify_remove_watch_locked(ih, watch);
- mutex_unlock(&ih->mutex);
- put_inotify_watch(watch);
- }
- mutex_unlock(&inode->inotify_mutex);
- iput(inode);
-
- spin_lock(&inode_lock);
- }
-}
-EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
-
-/**
- * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
- * @inode: inode that is about to be removed
- */
-void inotify_inode_is_dead(struct inode *inode)
-{
- struct inotify_watch *watch, *next;
-
- mutex_lock(&inode->inotify_mutex);
- list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
- struct inotify_handle *ih = watch->ih;
- mutex_lock(&ih->mutex);
- inotify_remove_watch_locked(ih, watch);
- mutex_unlock(&ih->mutex);
- }
- mutex_unlock(&inode->inotify_mutex);
-}
-EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
-
-/* Kernel Consumer API */
-
-/**
- * inotify_init - allocate and initialize an inotify instance
- * @ops: caller's inotify operations
- */
-struct inotify_handle *inotify_init(const struct inotify_operations *ops)
-{
- struct inotify_handle *ih;
-
- ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
- if (unlikely(!ih))
- return ERR_PTR(-ENOMEM);
-
- idr_init(&ih->idr);
- INIT_LIST_HEAD(&ih->watches);
- mutex_init(&ih->mutex);
- ih->last_wd = 0;
- ih->in_ops = ops;
- atomic_set(&ih->count, 0);
- get_inotify_handle(ih);
-
- return ih;
-}
-EXPORT_SYMBOL_GPL(inotify_init);
-
-/**
- * inotify_init_watch - initialize an inotify watch
- * @watch: watch to initialize
- */
-void inotify_init_watch(struct inotify_watch *watch)
-{
- INIT_LIST_HEAD(&watch->h_list);
- INIT_LIST_HEAD(&watch->i_list);
- atomic_set(&watch->count, 0);
- get_inotify_watch(watch); /* initial get */
-}
-EXPORT_SYMBOL_GPL(inotify_init_watch);
-
-/*
- * Watch removals suck violently. To kick the watch out we need (in this
- * order) inode->inotify_mutex and ih->mutex. That's fine if we have
- * a hold on inode; however, for all other cases we need to make damn sure
- * we don't race with umount. We can *NOT* just grab a reference to a
- * watch - inotify_unmount_inodes() will happily sail past it and we'll end
- * with reference to inode potentially outliving its superblock. Ideally
- * we just want to grab an active reference to superblock if we can; that
- * will make sure we won't go into inotify_umount_inodes() until we are
- * done. Cleanup is just deactivate_super(). However, that leaves a messy
- * case - what if we *are* racing with umount() and active references to
- * superblock can't be acquired anymore? We can bump ->s_count, grab
- * ->s_umount, which will almost certainly wait until the superblock is shut
- * down and the watch in question is pining for fjords. That's fine, but
- * there is a problem - we might have hit the window between ->s_active
- * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock
- * is past the point of no return and is heading for shutdown) and the
- * moment when deactivate_super() acquires ->s_umount. We could just do
- * drop_super() yield() and retry, but that's rather antisocial and this
- * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having
- * found that we'd got there first (i.e. that ->s_root is non-NULL) we know
- * that we won't race with inotify_umount_inodes(). So we could grab a
- * reference to watch and do the rest as above, just with drop_super() instead
- * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we
- * could grab ->s_umount. So the watch could've been gone already.
- *
- * That still can be dealt with - we need to save watch->wd, do idr_find()
- * and compare its result with our pointer. If they match, we either have
- * the damn thing still alive or we'd lost not one but two races at once,
- * the watch had been killed and a new one got created with the same ->wd
- * at the same address. That couldn't have happened in inotify_destroy(),
- * but inotify_rm_wd() could run into that. Still, "new one got created"
- * is not a problem - we have every right to kill it or leave it alone,
- * whatever's more convenient.
- *
- * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
- * "grab it and kill it" check. If it's been our original watch, we are
- * fine, if it's a newcomer - nevermind, just pretend that we'd won the
- * race and kill the fscker anyway; we are safe since we know that its
- * superblock won't be going away.
- *
- * And yes, this is far beyond mere "not very pretty"; so's the entire
- * concept of inotify to start with.
- */
-
-/**
- * pin_to_kill - pin the watch down for removal
- * @ih: inotify handle
- * @watch: watch to kill
- *
- * Called with ih->mutex held, drops it. Possible return values:
- * 0 - nothing to do, it has died
- * 1 - remove it, drop the reference and deactivate_super()
- * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid
- * that variant, since it involved a lot of PITA, but that's the best that
- * could've been done.
- */
-static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- s32 wd = watch->wd;
-
- spin_lock(&sb_lock);
- if (sb->s_count >= S_BIAS) {
- atomic_inc(&sb->s_active);
- spin_unlock(&sb_lock);
- get_inotify_watch(watch);
- mutex_unlock(&ih->mutex);
- return 1; /* the best outcome */
- }
- sb->s_count++;
- spin_unlock(&sb_lock);
- mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
- down_read(&sb->s_umount);
- if (likely(!sb->s_root)) {
- /* fs is already shut down; the watch is dead */
- drop_super(sb);
- return 0;
- }
- /* raced with the final deactivate_super() */
- mutex_lock(&ih->mutex);
- if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) {
- /* the watch is dead */
- mutex_unlock(&ih->mutex);
- drop_super(sb);
- return 0;
- }
- /* still alive or freed and reused with the same sb and wd; kill */
- get_inotify_watch(watch);
- mutex_unlock(&ih->mutex);
- return 2;
-}
-
-static void unpin_and_kill(struct inotify_watch *watch, int how)
-{
- struct super_block *sb = watch->inode->i_sb;
- put_inotify_watch(watch);
- switch (how) {
- case 1:
- deactivate_super(sb);
- break;
- case 2:
- drop_super(sb);
- }
-}
-
-/**
- * inotify_destroy - clean up and destroy an inotify instance
- * @ih: inotify handle
- */
-void inotify_destroy(struct inotify_handle *ih)
-{
- /*
- * Destroy all of the watches for this handle. Unfortunately, not very
- * pretty. We cannot do a simple iteration over the list, because we
- * do not know the inode until we iterate to the watch. But we need to
- * hold inode->inotify_mutex before ih->mutex. The following works.
- *
- * AV: it had to become even uglier to start working ;-/
- */
- while (1) {
- struct inotify_watch *watch;
- struct list_head *watches;
- struct super_block *sb;
- struct inode *inode;
- int how;
-
- mutex_lock(&ih->mutex);
- watches = &ih->watches;
- if (list_empty(watches)) {
- mutex_unlock(&ih->mutex);
- break;
- }
- watch = list_first_entry(watches, struct inotify_watch, h_list);
- sb = watch->inode->i_sb;
- how = pin_to_kill(ih, watch);
- if (!how)
- continue;
-
- inode = watch->inode;
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* make sure we didn't race with another list removal */
- if (likely(idr_find(&ih->idr, watch->wd))) {
- remove_watch_no_event(watch, ih);
- put_inotify_watch(watch);
- }
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- unpin_and_kill(watch, how);
- }
-
- /* free this handle: the put matching the get in inotify_init() */
- put_inotify_handle(ih);
-}
-EXPORT_SYMBOL_GPL(inotify_destroy);
-
-/**
- * inotify_find_watch - find an existing watch for an (ih,inode) pair
- * @ih: inotify handle
- * @inode: inode to watch
- * @watchp: pointer to existing inotify_watch
- *
- * Caller must pin given inode (via nameidata).
- */
-s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
- struct inotify_watch **watchp)
-{
- struct inotify_watch *old;
- int ret = -ENOENT;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- old = inode_find_handle(inode, ih);
- if (unlikely(old)) {
- get_inotify_watch(old); /* caller must put watch */
- *watchp = old;
- ret = old->wd;
- }
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_find_watch);
-
-/**
- * inotify_find_update_watch - find and update the mask of an existing watch
- * @ih: inotify handle
- * @inode: inode's watch to update
- * @mask: mask of events to watch
- *
- * Caller must pin given inode (via nameidata).
- */
-s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
- u32 mask)
-{
- struct inotify_watch *old;
- int mask_add = 0;
- int ret;
-
- if (mask & IN_MASK_ADD)
- mask_add = 1;
-
- /* don't allow invalid bits: we don't want flags set */
- mask &= IN_ALL_EVENTS | IN_ONESHOT;
- if (unlikely(!mask))
- return -EINVAL;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /*
- * Handle the case of re-adding a watch on an (inode,ih) pair that we
- * are already watching. We just update the mask and return its wd.
- */
- old = inode_find_handle(inode, ih);
- if (unlikely(!old)) {
- ret = -ENOENT;
- goto out;
- }
-
- if (mask_add)
- old->mask |= mask;
- else
- old->mask = mask;
- ret = old->wd;
-out:
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_find_update_watch);
-
-/**
- * inotify_add_watch - add a watch to an inotify instance
- * @ih: inotify handle
- * @watch: caller allocated watch structure
- * @inode: inode to watch
- * @mask: mask of events to watch
- *
- * Caller must pin given inode (via nameidata).
- * Caller must ensure it only calls inotify_add_watch() once per watch.
- * Calls inotify_handle_get_wd() so may sleep.
- */
-s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
- struct inode *inode, u32 mask)
-{
- int ret = 0;
- int newly_watched;
-
- /* don't allow invalid bits: we don't want flags set */
- mask &= IN_ALL_EVENTS | IN_ONESHOT;
- if (unlikely(!mask))
- return -EINVAL;
- watch->mask = mask;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* Initialize a new watch */
- ret = inotify_handle_get_wd(ih, watch);
- if (unlikely(ret))
- goto out;
- ret = watch->wd;
-
- /* save a reference to handle and bump the count to make it official */
- get_inotify_handle(ih);
- watch->ih = ih;
-
- /*
- * Save a reference to the inode and bump the ref count to make it
- * official. We hold a reference to nameidata, which makes this safe.
- */
- watch->inode = igrab(inode);
-
- /* Add the watch to the handle's and the inode's list */
- newly_watched = !inotify_inode_watched(inode);
- list_add(&watch->h_list, &ih->watches);
- list_add(&watch->i_list, &inode->inotify_watches);
- /*
- * Set child flags _after_ adding the watch, so there is no race
- * windows where newly instantiated children could miss their parent's
- * watched flag.
- */
- if (newly_watched)
- set_dentry_child_flags(inode, 1);
-
-out:
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_add_watch);
-
-/**
- * inotify_clone_watch - put the watch next to existing one
- * @old: already installed watch
- * @new: new watch
- *
- * Caller must hold the inotify_mutex of inode we are dealing with;
- * it is expected to remove the old watch before unlocking the inode.
- */
-s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new)
-{
- struct inotify_handle *ih = old->ih;
- int ret = 0;
-
- new->mask = old->mask;
- new->ih = ih;
-
- mutex_lock(&ih->mutex);
-
- /* Initialize a new watch */
- ret = inotify_handle_get_wd(ih, new);
- if (unlikely(ret))
- goto out;
- ret = new->wd;
-
- get_inotify_handle(ih);
-
- new->inode = igrab(old->inode);
-
- list_add(&new->h_list, &ih->watches);
- list_add(&new->i_list, &old->inode->inotify_watches);
-out:
- mutex_unlock(&ih->mutex);
- return ret;
-}
-
-void inotify_evict_watch(struct inotify_watch *watch)
-{
- get_inotify_watch(watch);
- mutex_lock(&watch->ih->mutex);
- inotify_remove_watch_locked(watch->ih, watch);
- mutex_unlock(&watch->ih->mutex);
-}
-
-/**
- * inotify_rm_wd - remove a watch from an inotify instance
- * @ih: inotify handle
- * @wd: watch descriptor to remove
- *
- * Can sleep.
- */
-int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
-{
- struct inotify_watch *watch;
- struct super_block *sb;
- struct inode *inode;
- int how;
-
- mutex_lock(&ih->mutex);
- watch = idr_find(&ih->idr, wd);
- if (unlikely(!watch)) {
- mutex_unlock(&ih->mutex);
- return -EINVAL;
- }
- sb = watch->inode->i_sb;
- how = pin_to_kill(ih, watch);
- if (!how)
- return 0;
-
- inode = watch->inode;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* make sure that we did not race */
- if (likely(idr_find(&ih->idr, wd) == watch))
- inotify_remove_watch_locked(ih, watch);
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- unpin_and_kill(watch, how);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(inotify_rm_wd);
-
-/**
- * inotify_rm_watch - remove a watch from an inotify instance
- * @ih: inotify handle
- * @watch: watch to remove
- *
- * Can sleep.
- */
-int inotify_rm_watch(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- return inotify_rm_wd(ih, watch->wd);
-}
-EXPORT_SYMBOL_GPL(inotify_rm_watch);
-
-/*
- * inotify_setup - core initialization function
- */
-static int __init inotify_setup(void)
-{
- BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
- BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
- BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
- BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
- BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
- BUILD_BUG_ON(IN_OPEN != FS_OPEN);
- BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
- BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
- BUILD_BUG_ON(IN_CREATE != FS_CREATE);
- BUILD_BUG_ON(IN_DELETE != FS_DELETE);
- BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
- BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
- BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
-
- BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
- BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
- BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
- BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
-
- atomic_set(&inotify_cookie, 0);
-
- return 0;
-}
-
-module_init(inotify_setup);
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
index f234f3a4c8ca..b6642e4de4bf 100644
--- a/fs/notify/inotify/inotify.h
+++ b/fs/notify/inotify/inotify.h
@@ -9,13 +9,12 @@ struct inotify_event_private_data {
int wd;
};
-struct inotify_inode_mark_entry {
- /* fsnotify_mark_entry MUST be the first thing */
- struct fsnotify_mark_entry fsn_entry;
+struct inotify_inode_mark {
+ struct fsnotify_mark fsn_mark;
int wd;
};
-extern void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
+extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group);
extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv);
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 1afb0a10229f..4706f408971c 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -31,10 +31,66 @@
#include "inotify.h"
+/*
+ * Check if 2 events contain the same information. We do not compare private data
+ * but at this moment that isn't a problem for any know fsnotify listeners.
+ */
+static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
+{
+ if ((old->mask == new->mask) &&
+ (old->to_tell == new->to_tell) &&
+ (old->data_type == new->data_type) &&
+ (old->name_len == new->name_len)) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_INODE):
+ /* remember, after old was put on the wait_q we aren't
+ * allowed to look at the inode any more, only thing
+ * left to check was if the file_name is the same */
+ if (!old->name_len ||
+ !strcmp(old->file_name, new->file_name))
+ return true;
+ break;
+ case (FSNOTIFY_EVENT_PATH):
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
+ break;
+ case (FSNOTIFY_EVENT_NONE):
+ if (old->mask & FS_Q_OVERFLOW)
+ return true;
+ else if (old->mask & FS_IN_IGNORED)
+ return false;
+ return true;
+ };
+ }
+ return false;
+}
+
+static int inotify_merge(struct list_head *list,
+ struct fsnotify_event *event,
+ void **arg)
+{
+ struct fsnotify_event_holder *last_holder;
+ struct fsnotify_event *last_event;
+ int ret = 0;
+
+ /* and the list better be locked by something too */
+ spin_lock(&event->lock);
+
+ last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
+ last_event = last_holder->event;
+ if (event_compare(last_event, event))
+ ret = -EEXIST;
+
+ spin_unlock(&event->lock);
+
+ return ret;
+}
+
static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_mark *fsn_mark;
+ struct inotify_inode_mark *i_mark;
struct inode *to_tell;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
@@ -42,15 +98,13 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
to_tell = event->to_tell;
- spin_lock(&to_tell->i_lock);
- entry = fsnotify_find_mark_entry(group, to_tell);
- spin_unlock(&to_tell->i_lock);
+ fsn_mark = fsnotify_find_inode_mark(group, to_tell);
/* race with watch removal? We already passes should_send */
- if (unlikely(!entry))
+ if (unlikely(!fsn_mark))
return 0;
- ientry = container_of(entry, struct inotify_inode_mark_entry,
- fsn_entry);
- wd = ientry->wd;
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark,
+ fsn_mark);
+ wd = i_mark->wd;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
if (unlikely(!event_priv))
@@ -61,7 +115,7 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
fsn_event_priv->group = group;
event_priv->wd = wd;
- ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
+ ret = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge, NULL);
if (ret) {
inotify_free_event_priv(fsn_event_priv);
/* EEXIST says we tail matched, EOVERFLOW isn't something
@@ -72,35 +126,35 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
}
/*
- * If we hold the entry until after the event is on the queue
+ * If we hold the fsn_mark until after the event is on the queue
* IN_IGNORED won't be able to pass this event in the queue
*/
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
return ret;
}
-static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
+static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
{
- inotify_ignored_and_remove_idr(entry, group);
+ inotify_ignored_and_remove_idr(fsn_mark, group);
}
-static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
+static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *fsn_mark;
bool send;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
- send = (entry->mask & mask);
+ send = (fsn_mark->mask & mask);
/* find took a reference */
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
return send;
}
@@ -114,18 +168,18 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
*/
static int idr_callback(int id, void *p, void *data)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_mark *fsn_mark;
+ struct inotify_inode_mark *i_mark;
static bool warned = false;
if (warned)
return 0;
warned = true;
- entry = p;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ fsn_mark = p;
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
+ WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
"idr. Probably leaking memory\n", id, p, data);
/*
@@ -134,9 +188,9 @@ static int idr_callback(int id, void *p, void *data)
* out why we got here and the panic is no worse than the original
* BUG() that was here.
*/
- if (entry)
- printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
- entry->group, entry->inode, ientry->wd);
+ if (fsn_mark)
+ printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n",
+ fsn_mark->group, fsn_mark->i.inode, i_mark->wd);
return 0;
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 472cdf29ef82..1ce71f5b9589 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -51,12 +51,6 @@ int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
-/*
- * When inotify registers a new group it increments this and uses that
- * value as an offset to set the fsnotify group "name" and priority.
- */
-static atomic_t inotify_grp_num;
-
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
@@ -357,58 +351,158 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error;
}
+static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
+ int *last_wd,
+ struct inotify_inode_mark *i_mark)
+{
+ int ret;
+
+ do {
+ if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
+ return -ENOMEM;
+
+ spin_lock(idr_lock);
+ ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
+ &i_mark->wd);
+ /* we added the mark to the idr, take a reference */
+ if (!ret) {
+ *last_wd = i_mark->wd;
+ fsnotify_get_mark(&i_mark->fsn_mark);
+ }
+ spin_unlock(idr_lock);
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
+ int wd)
+{
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ struct inotify_inode_mark *i_mark;
+
+ assert_spin_locked(idr_lock);
+
+ i_mark = idr_find(idr, wd);
+ if (i_mark) {
+ struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
+
+ fsnotify_get_mark(fsn_mark);
+ /* One ref for being in the idr, one ref we just took */
+ BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+ }
+
+ return i_mark;
+}
+
+static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
+ int wd)
+{
+ struct inotify_inode_mark *i_mark;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+
+ spin_lock(idr_lock);
+ i_mark = inotify_idr_find_locked(group, wd);
+ spin_unlock(idr_lock);
+
+ return i_mark;
+}
+
+static void do_inotify_remove_from_idr(struct fsnotify_group *group,
+ struct inotify_inode_mark *i_mark)
+{
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ int wd = i_mark->wd;
+
+ assert_spin_locked(idr_lock);
+
+ idr_remove(idr, wd);
+
+ /* removed from the idr, drop that ref */
+ fsnotify_put_mark(&i_mark->fsn_mark);
+}
+
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group,
- struct inotify_inode_mark_entry *ientry)
+ struct inotify_inode_mark *i_mark)
{
- struct idr *idr;
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *found_ientry;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ struct inotify_inode_mark *found_i_mark = NULL;
int wd;
- spin_lock(&group->inotify_data.idr_lock);
- idr = &group->inotify_data.idr;
- wd = ientry->wd;
+ spin_lock(idr_lock);
+ wd = i_mark->wd;
- if (wd == -1)
+ /*
+ * does this i_mark think it is in the idr? we shouldn't get called
+ * if it wasn't....
+ */
+ if (wd == -1) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
+ }
- entry = idr_find(&group->inotify_data.idr, wd);
- if (unlikely(!entry))
+ /* Lets look in the idr to see if we find it */
+ found_i_mark = inotify_idr_find_locked(group, wd);
+ if (unlikely(!found_i_mark)) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
+ }
- found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
- if (unlikely(found_ientry != ientry)) {
- /* We found an entry in the idr with the right wd, but it's
- * not the entry we were told to remove. eparis seriously
- * fucked up somewhere. */
- WARN_ON(1);
- ientry->wd = -1;
+ /*
+ * We found an mark in the idr at the right wd, but it's
+ * not the mark we were told to remove. eparis seriously
+ * fucked up somewhere.
+ */
+ if (unlikely(found_i_mark != i_mark)) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
+ "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
+ "found_i_mark->group=%p found_i_mark->inode=%p\n",
+ __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
+ i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
+ found_i_mark->fsn_mark.group,
+ found_i_mark->fsn_mark.i.inode);
goto out;
}
- /* One ref for being in the idr, one ref held by the caller */
- BUG_ON(atomic_read(&entry->refcnt) < 2);
-
- idr_remove(idr, wd);
- ientry->wd = -1;
+ /*
+ * One ref for being in the idr
+ * one ref held by the caller trying to kill us
+ * one ref grabbed by inotify_idr_find
+ */
+ if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
+ printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
+ /* we can't really recover with bad ref cnting.. */
+ BUG();
+ }
- /* removed from the idr, drop that ref */
- fsnotify_put_mark(entry);
+ do_inotify_remove_from_idr(group, i_mark);
out:
- spin_unlock(&group->inotify_data.idr_lock);
+ /* match the ref taken by inotify_idr_find_locked() */
+ if (found_i_mark)
+ fsnotify_put_mark(&found_i_mark->fsn_mark);
+ i_mark->wd = -1;
+ spin_unlock(idr_lock);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
-void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
+void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
- struct inotify_inode_mark_entry *ientry;
+ struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
@@ -420,7 +514,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
if (!ignored_event)
return;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
@@ -429,9 +523,9 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
- event_priv->wd = ientry->wd;
+ event_priv->wd = i_mark->wd;
- ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
+ ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL, NULL);
if (ret)
inotify_free_event_priv(fsn_event_priv);
@@ -440,26 +534,28 @@ skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
- /* remove this entry from the idr */
- inotify_remove_from_idr(group, ientry);
+ /* remove this mark from the idr */
+ inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
-static void inotify_free_mark(struct fsnotify_mark_entry *entry)
+static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
{
- struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
+ struct inotify_inode_mark *i_mark;
+
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- kmem_cache_free(inotify_inode_mark_cachep, ientry);
+ kmem_cache_free(inotify_inode_mark_cachep, i_mark);
}
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_mark *fsn_mark;
+ struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
@@ -470,36 +566,32 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
if (unlikely(!mask))
return -EINVAL;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
return -ENOENT;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- spin_lock(&entry->lock);
+ spin_lock(&fsn_mark->lock);
- old_mask = entry->mask;
- if (add) {
- entry->mask |= mask;
- new_mask = entry->mask;
- } else {
- entry->mask = mask;
- new_mask = entry->mask;
- }
+ old_mask = fsn_mark->mask;
+ if (add)
+ fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
+ else
+ fsnotify_set_mark_mask_locked(fsn_mark, mask);
+ new_mask = fsn_mark->mask;
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
- /* more bits in this entry than the inode's mask? */
+ /* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
- /* more bits in this entry than the group? */
+ /* more bits in this fsn_mark than the group? */
int do_group = (new_mask & ~group->mask);
- /* update the inode with this new entry */
+ /* update the inode with this new fsn_mark */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
@@ -509,10 +601,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
}
/* return the wd */
- ret = ientry->wd;
+ ret = i_mark->wd;
- /* match the get from fsnotify_find_mark_entry() */
- fsnotify_put_mark(entry);
+ /* match the get from fsnotify_find_mark() */
+ fsnotify_put_mark(fsn_mark);
return ret;
}
@@ -521,73 +613,55 @@ static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
- struct inotify_inode_mark_entry *tmp_ientry;
+ struct inotify_inode_mark *tmp_i_mark;
__u32 mask;
int ret;
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!mask))
return -EINVAL;
- tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
- if (unlikely(!tmp_ientry))
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ if (unlikely(!tmp_i_mark))
return -ENOMEM;
- fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
- tmp_ientry->fsn_entry.mask = mask;
- tmp_ientry->wd = -1;
+ fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
+ tmp_i_mark->fsn_mark.mask = mask;
+ tmp_i_mark->wd = -1;
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
-retry:
- ret = -ENOMEM;
- if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
- goto out_err;
- spin_lock(&group->inotify_data.idr_lock);
- ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
- group->inotify_data.last_wd+1,
- &tmp_ientry->wd);
- spin_unlock(&group->inotify_data.idr_lock);
- if (ret) {
- /* idr was out of memory allocate and try again */
- if (ret == -EAGAIN)
- goto retry;
+ ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
+ tmp_i_mark);
+ if (ret)
goto out_err;
- }
-
- /* we put the mark on the idr, take a reference */
- fsnotify_get_mark(&tmp_ientry->fsn_entry);
/* we are on the idr, now get on the inode */
- ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
+ ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
if (ret) {
/* we failed to get on the inode, get off the idr */
- inotify_remove_from_idr(group, tmp_ientry);
+ inotify_remove_from_idr(group, tmp_i_mark);
goto out_err;
}
- /* update the idr hint, who cares about races, it's just a hint */
- group->inotify_data.last_wd = tmp_ientry->wd;
-
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches);
- /* return the watch descriptor for this new entry */
- ret = tmp_ientry->wd;
-
- /* match the ref from fsnotify_init_markentry() */
- fsnotify_put_mark(&tmp_ientry->fsn_entry);
+ /* return the watch descriptor for this new mark */
+ ret = tmp_i_mark->wd;
/* if this mark added a new event update the group mask */
if (mask & ~group->mask)
fsnotify_recalc_group_mask(group);
out_err:
- if (ret < 0)
- kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
+ /* match the ref from fsnotify_init_mark() */
+ fsnotify_put_mark(&tmp_i_mark->fsn_mark);
return ret;
}
@@ -616,11 +690,8 @@ retry:
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
- unsigned int grp_num;
- /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
- grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
- group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
+ group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
@@ -726,7 +797,7 @@ fput_and_out:
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct fsnotify_group *group;
- struct fsnotify_mark_entry *entry;
+ struct inotify_inode_mark *i_mark;
struct file *filp;
int ret = 0, fput_needed;
@@ -735,25 +806,23 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
return -EBADF;
/* verify that this is indeed an inotify instance */
- if (unlikely(filp->f_op != &inotify_fops)) {
- ret = -EINVAL;
+ ret = -EINVAL;
+ if (unlikely(filp->f_op != &inotify_fops))
goto out;
- }
group = filp->private_data;
- spin_lock(&group->inotify_data.idr_lock);
- entry = idr_find(&group->inotify_data.idr, wd);
- if (unlikely(!entry)) {
- spin_unlock(&group->inotify_data.idr_lock);
- ret = -EINVAL;
+ ret = -EINVAL;
+ i_mark = inotify_idr_find(group, wd);
+ if (unlikely(!i_mark))
goto out;
- }
- fsnotify_get_mark(entry);
- spin_unlock(&group->inotify_data.idr_lock);
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
+ ret = 0;
+
+ fsnotify_destroy_mark(&i_mark->fsn_mark);
+
+ /* match ref taken by inotify_idr_find */
+ fsnotify_put_mark(&i_mark->fsn_mark);
out:
fput_light(filp, fput_needed);
@@ -767,7 +836,7 @@ out:
*/
static int __init inotify_user_setup(void)
{
- inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
+ inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_max_queued_events = 16384;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
new file mode 100644
index 000000000000..8f3b0e7a543d
--- /dev/null
+++ b/fs/notify/mark.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * fsnotify inode mark locking/lifetime/and refcnting
+ *
+ * REFCNT:
+ * The mark->refcnt tells how many "things" in the kernel currently are
+ * referencing this object. The object typically will live inside the kernel
+ * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
+ * which can find this object holding the appropriete locks, can take a reference
+ * and the object itself is guarenteed to survive until the reference is dropped.
+ *
+ * LOCKING:
+ * There are 3 spinlocks involved with fsnotify inode marks and they MUST
+ * be taken in order as follows:
+ *
+ * mark->lock
+ * group->mark_lock
+ * inode->i_lock
+ *
+ * mark->lock protects 2 things, mark->group and mark->inode. You must hold
+ * that lock to dereference either of these things (they could be NULL even with
+ * the lock)
+ *
+ * group->mark_lock protects the marks_list anchored inside a given group
+ * and each mark is hooked via the g_list. It also sorta protects the
+ * free_g_list, which when used is anchored by a private list on the stack of the
+ * task which held the group->mark_lock.
+ *
+ * inode->i_lock protects the i_fsnotify_marks list anchored inside a
+ * given inode and each mark is hooked via the i_list. (and sorta the
+ * free_i_list)
+ *
+ *
+ * LIFETIME:
+ * Inode marks survive between when they are added to an inode and when their
+ * refcnt==0.
+ *
+ * The inode mark can be cleared for a number of different reasons including:
+ * - The inode is unlinked for the last time. (fsnotify_inode_remove)
+ * - The inode is being evicted from cache. (fsnotify_inode_delete)
+ * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
+ * - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
+ * - The fsnotify_group associated with the mark is going away and all such marks
+ * need to be cleaned up. (fsnotify_clear_marks_by_group)
+ *
+ * Worst case we are given an inode and need to clean up all the marks on that
+ * inode. We take i_lock and walk the i_fsnotify_marks safely. For each
+ * mark on the list we take a reference (so the mark can't disappear under us).
+ * We remove that mark form the inode's list of marks and we add this mark to a
+ * private list anchored on the stack using i_free_list; At this point we no
+ * longer fear anything finding the mark using the inode's list of marks.
+ *
+ * We can safely and locklessly run the private list on the stack of everything
+ * we just unattached from the original inode. For each mark on the private list
+ * we grab the mark-> and can thus dereference mark->group and mark->inode. If
+ * we see the group and inode are not NULL we take those locks. Now holding all
+ * 3 locks we can completely remove the mark from other tasks finding it in the
+ * future. Remember, 10 things might already be referencing this mark, but they
+ * better be holding a ref. We drop our reference we took before we unhooked it
+ * from the inode. When the ref hits 0 we can free the mark.
+ *
+ * Very similarly for freeing by group, except we use free_g_list.
+ *
+ * This has the very interesting property of being able to run concurrently with
+ * any (or all) other directions.
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/writeback.h> /* for inode_lock */
+
+#include <asm/atomic.h>
+
+#include <linux/fsnotify_backend.h>
+#include "fsnotify.h"
+
+void fsnotify_get_mark(struct fsnotify_mark *mark)
+{
+ atomic_inc(&mark->refcnt);
+}
+
+void fsnotify_put_mark(struct fsnotify_mark *mark)
+{
+ if (atomic_dec_and_test(&mark->refcnt))
+ mark->free_mark(mark);
+}
+
+/*
+ * Any time a mark is getting freed we end up here.
+ * The caller had better be holding a reference to this mark so we don't actually
+ * do the final put under the mark->lock
+ */
+void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+{
+ struct fsnotify_group *group;
+ struct inode *inode = NULL;
+
+ spin_lock(&mark->lock);
+
+ group = mark->group;
+
+ /* if !group something else already marked this to die */
+ if (!group) {
+ spin_unlock(&mark->lock);
+ return;
+ }
+
+ /* 1 from caller and 1 for being on i_list/g_list */
+ BUG_ON(atomic_read(&mark->refcnt) < 2);
+
+ spin_lock(&group->mark_lock);
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
+ inode = mark->i.inode;
+ fsnotify_destroy_inode_mark(mark);
+ } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
+ fsnotify_destroy_vfsmount_mark(mark);
+ else
+ BUG();
+
+ list_del_init(&mark->g_list);
+ mark->group = NULL;
+
+ fsnotify_put_mark(mark); /* for i_list and g_list */
+
+ spin_unlock(&group->mark_lock);
+ spin_unlock(&mark->lock);
+
+ /*
+ * Some groups like to know that marks are being freed. This is a
+ * callback to the group function to let it know that this mark
+ * is being freed.
+ */
+ if (group->ops->freeing_mark)
+ group->ops->freeing_mark(mark, group);
+
+ /*
+ * __fsnotify_update_child_dentry_flags(inode);
+ *
+ * I really want to call that, but we can't, we have no idea if the inode
+ * still exists the second we drop the mark->lock.
+ *
+ * The next time an event arrive to this inode from one of it's children
+ * __fsnotify_parent will see that the inode doesn't care about it's
+ * children and will update all of these flags then. So really this
+ * is just a lazy update (and could be a perf win...)
+ */
+
+ if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
+ iput(inode);
+
+ /*
+ * it's possible that this group tried to destroy itself, but this
+ * this mark was simultaneously being freed by inode. If that's the
+ * case, we finish freeing the group here.
+ */
+ if (unlikely(atomic_dec_and_test(&group->num_marks)))
+ fsnotify_final_destroy_group(group);
+}
+
+void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
+{
+ assert_spin_locked(&mark->lock);
+
+ mark->mask = mask;
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
+ fsnotify_set_inode_mark_mask_locked(mark, mask);
+}
+
+void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
+{
+ assert_spin_locked(&mark->lock);
+
+ mark->ignored_mask = mask;
+}
+
+/*
+ * Attach an initialized mark to a given group and fs object.
+ * These marks may be used for the fsnotify backend to determine which
+ * event types should be delivered to which group.
+ */
+int fsnotify_add_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, int allow_dups)
+{
+ int ret = 0;
+
+ BUG_ON(inode && mnt);
+ BUG_ON(!inode && !mnt);
+
+ /*
+ * if this group isn't being testing for inode type events we need
+ * to start testing
+ */
+ if (inode && unlikely(list_empty(&group->inode_group_list)))
+ fsnotify_add_inode_group(group);
+ else if (mnt && unlikely(list_empty(&group->vfsmount_group_list)))
+ fsnotify_add_vfsmount_group(group);
+
+ /*
+ * LOCKING ORDER!!!!
+ * mark->lock
+ * group->mark_lock
+ * inode->i_lock
+ */
+ spin_lock(&mark->lock);
+ spin_lock(&group->mark_lock);
+
+ mark->group = group;
+ list_add(&mark->g_list, &group->marks_list);
+ atomic_inc(&group->num_marks);
+ fsnotify_get_mark(mark); /* for i_list and g_list */
+
+ if (inode) {
+ ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
+ if (ret)
+ goto err;
+ } else if (mnt) {
+ ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
+ if (ret)
+ goto err;
+ } else {
+ BUG();
+ }
+
+ spin_unlock(&group->mark_lock);
+
+ /* this will pin the object if appropriate */
+ fsnotify_set_mark_mask_locked(mark, mark->mask);
+
+ spin_unlock(&mark->lock);
+
+ if (inode)
+ __fsnotify_update_child_dentry_flags(inode);
+
+ return ret;
+err:
+ mark->group = NULL;
+ list_del_init(&mark->g_list);
+ atomic_dec(&group->num_marks);
+ fsnotify_put_mark(mark);
+
+ spin_unlock(&group->mark_lock);
+ spin_unlock(&mark->lock);
+
+ return ret;
+}
+
+/*
+ * clear any marks in a group in which mark->flags & flags is true
+ */
+void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
+ unsigned int flags)
+{
+ struct fsnotify_mark *lmark, *mark;
+ LIST_HEAD(free_list);
+
+ spin_lock(&group->mark_lock);
+ list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
+ if (mark->flags & flags) {
+ list_add(&mark->free_g_list, &free_list);
+ list_del_init(&mark->g_list);
+ fsnotify_get_mark(mark);
+ }
+ }
+ spin_unlock(&group->mark_lock);
+
+ list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
+ }
+}
+
+/*
+ * Given a group, destroy all of the marks associated with that group.
+ */
+void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
+}
+
+void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
+{
+ assert_spin_locked(&old->lock);
+ new->i.inode = old->i.inode;
+ new->m.mnt = old->m.mnt;
+ new->group = old->group;
+ new->mask = old->mask;
+ new->free_mark = old->free_mark;
+}
+
+/*
+ * Nothing fancy, just initialize lists and locks and counters.
+ */
+void fsnotify_init_mark(struct fsnotify_mark *mark,
+ void (*free_mark)(struct fsnotify_mark *mark))
+{
+ memset(mark, 0, sizeof(*mark));
+ spin_lock_init(&mark->lock);
+ atomic_set(&mark->refcnt, 1);
+ mark->free_mark = free_mark;
+}
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index b8bf53b4c108..b35faafacd38 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -56,7 +56,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
* it is needed. It's refcnt is set 1 at kernel init time and will never
* get set to 0 so it will never get 'freed'
*/
-static struct fsnotify_event q_overflow_event;
+static struct fsnotify_event *q_overflow_event;
static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
/**
@@ -93,6 +93,7 @@ void fsnotify_put_event(struct fsnotify_event *event)
BUG_ON(!list_empty(&event->private_data_list));
kfree(event->file_name);
+ put_pid(event->tgid);
kmem_cache_free(fsnotify_event_cachep, event);
}
}
@@ -104,7 +105,8 @@ struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
{
- kmem_cache_free(fsnotify_event_holder_cachep, holder);
+ if (holder)
+ kmem_cache_free(fsnotify_event_holder_cachep, holder);
}
/*
@@ -129,53 +131,20 @@ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnot
}
/*
- * Check if 2 events contain the same information. We do not compare private data
- * but at this moment that isn't a problem for any know fsnotify listeners.
- */
-static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
-{
- if ((old->mask == new->mask) &&
- (old->to_tell == new->to_tell) &&
- (old->data_type == new->data_type) &&
- (old->name_len == new->name_len)) {
- switch (old->data_type) {
- case (FSNOTIFY_EVENT_INODE):
- /* remember, after old was put on the wait_q we aren't
- * allowed to look at the inode any more, only thing
- * left to check was if the file_name is the same */
- if (!old->name_len ||
- !strcmp(old->file_name, new->file_name))
- return true;
- break;
- case (FSNOTIFY_EVENT_PATH):
- if ((old->path.mnt == new->path.mnt) &&
- (old->path.dentry == new->path.dentry))
- return true;
- break;
- case (FSNOTIFY_EVENT_NONE):
- if (old->mask & FS_Q_OVERFLOW)
- return true;
- else if (old->mask & FS_IN_IGNORED)
- return false;
- return false;
- };
- }
- return false;
-}
-
-/*
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. If the event is successfully added to the
* group's notification queue, a reference is taken on event.
*/
int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
- struct fsnotify_event_private_data *priv)
+ struct fsnotify_event_private_data *priv,
+ int (*merge)(struct list_head *,
+ struct fsnotify_event *,
+ void **arg),
+ void **arg)
{
struct fsnotify_event_holder *holder = NULL;
struct list_head *list = &group->notification_list;
- struct fsnotify_event_holder *last_holder;
- struct fsnotify_event *last_event;
- int ret = 0;
+ int rc = 0;
/*
* There is one fsnotify_event_holder embedded inside each fsnotify_event.
@@ -195,12 +164,24 @@ alloc_holder:
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
- event = &q_overflow_event;
- ret = -EOVERFLOW;
+ event = q_overflow_event;
+ rc = -EOVERFLOW;
/* sorry, no private data on the overflow event */
priv = NULL;
}
+ if (!list_empty(list) && merge) {
+ int ret;
+
+ ret = merge(list, event, arg);
+ if (ret) {
+ mutex_unlock(&group->notification_mutex);
+ if (holder != &event->holder)
+ fsnotify_destroy_event_holder(holder);
+ return ret;
+ }
+ }
+
spin_lock(&event->lock);
if (list_empty(&event->holder.event_list)) {
@@ -215,18 +196,6 @@ alloc_holder:
goto alloc_holder;
}
- if (!list_empty(list)) {
- last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
- last_event = last_holder->event;
- if (event_compare(last_event, event)) {
- spin_unlock(&event->lock);
- mutex_unlock(&group->notification_mutex);
- if (holder != &event->holder)
- fsnotify_destroy_event_holder(holder);
- return -EEXIST;
- }
- }
-
group->q_len++;
holder->event = event;
@@ -238,7 +207,7 @@ alloc_holder:
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
- return ret;
+ return rc;
}
/*
@@ -314,25 +283,78 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
static void initialize_event(struct fsnotify_event *event)
{
- event->holder.event = NULL;
INIT_LIST_HEAD(&event->holder.event_list);
atomic_set(&event->refcnt, 1);
spin_lock_init(&event->lock);
- event->path.dentry = NULL;
- event->path.mnt = NULL;
- event->inode = NULL;
- event->data_type = FSNOTIFY_EVENT_NONE;
-
INIT_LIST_HEAD(&event->private_data_list);
+}
+
+/*
+ * Caller damn well better be holding whatever mutex is protecting the
+ * old_holder->event_list and the new_event must be a clean event which
+ * cannot be found anywhere else in the kernel.
+ */
+int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
+ struct fsnotify_event *new_event)
+{
+ struct fsnotify_event *old_event = old_holder->event;
+ struct fsnotify_event_holder *new_holder = &new_event->holder;
+
+ enum event_spinlock_class {
+ SPINLOCK_OLD,
+ SPINLOCK_NEW,
+ };
+
+ /*
+ * if the new_event's embedded holder is in use someone
+ * screwed up and didn't give us a clean new event.
+ */
+ BUG_ON(!list_empty(&new_holder->event_list));
+
+ spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
+ spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
+
+ new_holder->event = new_event;
+ list_replace_init(&old_holder->event_list, &new_holder->event_list);
+
+ spin_unlock(&new_event->lock);
+ spin_unlock(&old_event->lock);
+
+ /* event == holder means we are referenced through the in event holder */
+ if (old_holder != &old_event->holder)
+ fsnotify_destroy_event_holder(old_holder);
+
+ fsnotify_get_event(new_event); /* on the list take reference */
+ fsnotify_put_event(old_event); /* off the list, drop reference */
+
+ return 0;
+}
+
+struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event)
+{
+ struct fsnotify_event *event;
+
+ event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
+ if (!event)
+ return NULL;
- event->to_tell = NULL;
+ memcpy(event, old_event, sizeof(*event));
+ initialize_event(event);
- event->file_name = NULL;
- event->name_len = 0;
+ if (event->name_len) {
+ event->file_name = kstrdup(old_event->file_name, GFP_KERNEL);
+ if (!event->file_name) {
+ kmem_cache_free(fsnotify_event_cachep, event);
+ return NULL;
+ }
+ }
+ event->tgid = get_pid(old_event->tgid);
+ if (event->data_type == FSNOTIFY_EVENT_PATH)
+ path_get(&event->path);
- event->sync_cookie = 0;
+ return event;
}
/*
@@ -348,12 +370,12 @@ static void initialize_event(struct fsnotify_event *event)
* @name the filename, if available
*/
struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
- int data_type, const char *name, u32 cookie,
- gfp_t gfp)
+ int data_type, const unsigned char *name,
+ u32 cookie, gfp_t gfp)
{
struct fsnotify_event *event;
- event = kmem_cache_alloc(fsnotify_event_cachep, gfp);
+ event = kmem_cache_zalloc(fsnotify_event_cachep, gfp);
if (!event)
return NULL;
@@ -368,30 +390,21 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
event->name_len = strlen(event->file_name);
}
+ event->tgid = get_pid(task_tgid(current));
event->sync_cookie = cookie;
event->to_tell = to_tell;
+ event->data_type = data_type;
switch (data_type) {
- case FSNOTIFY_EVENT_FILE: {
- struct file *file = data;
- struct path *path = &file->f_path;
- event->path.dentry = path->dentry;
- event->path.mnt = path->mnt;
- path_get(&event->path);
- event->data_type = FSNOTIFY_EVENT_PATH;
- break;
- }
case FSNOTIFY_EVENT_PATH: {
struct path *path = data;
event->path.dentry = path->dentry;
event->path.mnt = path->mnt;
path_get(&event->path);
- event->data_type = FSNOTIFY_EVENT_PATH;
break;
}
case FSNOTIFY_EVENT_INODE:
event->inode = data;
- event->data_type = FSNOTIFY_EVENT_INODE;
break;
case FSNOTIFY_EVENT_NONE:
event->inode = NULL;
@@ -412,8 +425,11 @@ __init int fsnotify_notification_init(void)
fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
- initialize_event(&q_overflow_event);
- q_overflow_event.mask = FS_Q_OVERFLOW;
+ q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL,
+ FSNOTIFY_EVENT_NONE, NULL, 0,
+ GFP_KERNEL);
+ if (!q_overflow_event)
+ panic("unable to allocate fsnotify q_overflow_event\n");
return 0;
}
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
new file mode 100644
index 000000000000..ec580a25d293
--- /dev/null
+++ b/fs/notify/vfsmount_mark.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/writeback.h> /* for inode_lock */
+
+#include <asm/atomic.h>
+
+#include <linux/fsnotify_backend.h>
+#include "fsnotify.h"
+
+void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark, *lmark;
+ struct hlist_node *pos, *n;
+ LIST_HEAD(free_list);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+ hlist_for_each_entry_safe(mark, pos, n, &mnt->mnt_fsnotify_marks, m.m_list) {
+ list_add(&mark->m.free_m_list, &free_list);
+ hlist_del_init(&mark->m.m_list);
+ fsnotify_get_mark(mark);
+ }
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
+ }
+}
+
+void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_VFSMOUNT);
+}
+
+/*
+ * Recalculate the mask of events relevant to a given vfsmount locked.
+ */
+static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+ struct hlist_node *pos;
+ __u32 new_mask = 0;
+
+ assert_spin_locked(&mnt->mnt_root->d_lock);
+
+ hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list)
+ new_mask |= mark->mask;
+ mnt->mnt_fsnotify_mask = new_mask;
+}
+
+/*
+ * Recalculate the mnt->mnt_fsnotify_mask, or the mask of all FS_* event types
+ * any notifier is interested in hearing for this mount point
+ */
+void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt)
+{
+ spin_lock(&mnt->mnt_root->d_lock);
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+ spin_unlock(&mnt->mnt_root->d_lock);
+}
+
+void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
+{
+ struct vfsmount *mnt = mark->m.mnt;
+
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&mark->group->mark_lock);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+
+ hlist_del_init(&mark->m.m_list);
+ mark->m.mnt = NULL;
+
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+
+ spin_unlock(&mnt->mnt_root->d_lock);
+}
+
+static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
+ struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+ struct hlist_node *pos;
+
+ assert_spin_locked(&mnt->mnt_root->d_lock);
+
+ hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) {
+ if (mark->group == group) {
+ fsnotify_get_mark(mark);
+ return mark;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * given a group and vfsmount, find the mark associated with that combination.
+ * if found take a reference to that mark and return it, else return NULL
+ */
+struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+
+ spin_lock(&mnt->mnt_root->d_lock);
+ mark = fsnotify_find_vfsmount_mark_locked(group, mnt);
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ return mark;
+}
+
+/*
+ * Attach an initialized mark to a given group and vfsmount.
+ * These marks may be used for the fsnotify backend to determine which
+ * event types should be delivered to which groups.
+ */
+int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct vfsmount *mnt,
+ int allow_dups)
+{
+ struct fsnotify_mark *lmark = NULL;
+ int ret = 0;
+
+ mark->flags = FSNOTIFY_MARK_FLAG_VFSMOUNT;
+
+ /*
+ * LOCKING ORDER!!!!
+ * mark->lock
+ * group->mark_lock
+ * mnt->mnt_root->d_lock
+ */
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&group->mark_lock);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+
+ if (!allow_dups)
+ lmark = fsnotify_find_vfsmount_mark_locked(group, mnt);
+ if (!lmark) {
+ mark->m.mnt = mnt;
+
+ hlist_add_head(&mark->m.m_list, &mnt->mnt_fsnotify_marks);
+
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+ } else {
+ ret = -EEXIST;
+ }
+
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ return ret;
+}
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 791c0886c060..07d9fd854350 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -29,6 +29,7 @@ ocfs2-objs := \
mmap.o \
namei.o \
refcounttree.o \
+ reservations.o \
resize.o \
slot_map.o \
suballoc.o \
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 9f8bd913c51e..0cb2945eb817 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1061,11 +1061,7 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
/* We'll also be dirtied by the caller, so
* this isn't absolutely necessary. */
- status = ocfs2_journal_dirty(handle, bhs[i]);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, bhs[i]);
}
count += num_got;
@@ -1129,8 +1125,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle,
goto out;
}
- status = ocfs2_extend_trans(handle, path_num_items(path) +
- handle->h_buffer_credits);
+ status = ocfs2_extend_trans(handle, path_num_items(path));
if (status < 0) {
mlog_errno(status);
goto out;
@@ -1270,12 +1265,7 @@ static int ocfs2_add_branch(handle_t *handle,
if (!eb_el->l_tree_depth)
new_last_eb_blk = le64_to_cpu(eb->h_blkno);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
+ ocfs2_journal_dirty(handle, bh);
next_blkno = le64_to_cpu(eb->h_blkno);
}
@@ -1321,17 +1311,10 @@ static int ocfs2_add_branch(handle_t *handle,
eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
- status = ocfs2_journal_dirty(handle, *last_eb_bh);
- if (status < 0)
- mlog_errno(status);
- status = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (status < 0)
- mlog_errno(status);
- if (eb_bh) {
- status = ocfs2_journal_dirty(handle, eb_bh);
- if (status < 0)
- mlog_errno(status);
- }
+ ocfs2_journal_dirty(handle, *last_eb_bh);
+ ocfs2_journal_dirty(handle, et->et_root_bh);
+ if (eb_bh)
+ ocfs2_journal_dirty(handle, eb_bh);
/*
* Some callers want to track the rightmost leaf so pass it
@@ -1399,11 +1382,7 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++)
eb_el->l_recs[i] = root_el->l_recs[i];
- status = ocfs2_journal_dirty(handle, new_eb_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, new_eb_bh);
status = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1428,11 +1407,7 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
if (root_el->l_tree_depth == cpu_to_le16(1))
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
- status = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, et->et_root_bh);
*ret_new_eb_bh = new_eb_bh;
new_eb_bh = NULL;
@@ -2064,7 +2039,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
struct ocfs2_path *right_path,
int subtree_index)
{
- int ret, i, idx;
+ int i, idx;
struct ocfs2_extent_list *el, *left_el, *right_el;
struct ocfs2_extent_rec *left_rec, *right_rec;
struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
@@ -2102,13 +2077,8 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec,
right_el);
- ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
- if (ret)
- mlog_errno(ret);
-
- ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
+ ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
/*
* Setup our list pointers now so that the current
@@ -2132,9 +2102,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
root_bh = left_path->p_node[subtree_index].bh;
- ret = ocfs2_journal_dirty(handle, root_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, root_bh);
}
static int ocfs2_rotate_subtree_right(handle_t *handle,
@@ -2207,11 +2175,7 @@ static int ocfs2_rotate_subtree_right(handle_t *handle,
ocfs2_create_empty_extent(right_el);
- ret = ocfs2_journal_dirty(handle, right_leaf_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, right_leaf_bh);
/* Do the copy now. */
i = le16_to_cpu(left_el->l_next_free_rec) - 1;
@@ -2230,11 +2194,7 @@ static int ocfs2_rotate_subtree_right(handle_t *handle,
memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
le16_add_cpu(&left_el->l_next_free_rec, 1);
- ret = ocfs2_journal_dirty(handle, left_leaf_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, left_leaf_bh);
ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
@@ -2327,20 +2287,14 @@ static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
int op_credits,
struct ocfs2_path *path)
{
- int ret;
+ int ret = 0;
int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
- if (handle->h_buffer_credits < credits) {
+ if (handle->h_buffer_credits < credits)
ret = ocfs2_extend_trans(handle,
credits - handle->h_buffer_credits);
- if (ret)
- return ret;
- if (unlikely(handle->h_buffer_credits < credits))
- return ocfs2_extend_trans(handle, credits);
- }
-
- return 0;
+ return ret;
}
/*
@@ -2584,8 +2538,7 @@ static int ocfs2_update_edge_lengths(handle_t *handle,
* records for all the bh in the path.
* So we have to allocate extra credits and access them.
*/
- ret = ocfs2_extend_trans(handle,
- handle->h_buffer_credits + subtree_index);
+ ret = ocfs2_extend_trans(handle, subtree_index);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2823,12 +2776,8 @@ static int ocfs2_rotate_subtree_left(handle_t *handle,
ocfs2_remove_empty_extent(right_leaf_el);
}
- ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
- if (ret)
- mlog_errno(ret);
- ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
+ ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
if (del_right_subtree) {
ocfs2_unlink_subtree(handle, et, left_path, right_path,
@@ -2851,9 +2800,7 @@ static int ocfs2_rotate_subtree_left(handle_t *handle,
if (right_has_empty)
ocfs2_remove_empty_extent(left_leaf_el);
- ret = ocfs2_journal_dirty(handle, et_root_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, et_root_bh);
*deleted = 1;
} else
@@ -2962,10 +2909,7 @@ static int ocfs2_rotate_rightmost_leaf_left(handle_t *handle,
}
ocfs2_remove_empty_extent(el);
-
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, bh);
out:
return ret;
@@ -3506,15 +3450,9 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
ocfs2_cleanup_merge(el, index);
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret)
- mlog_errno(ret);
-
+ ocfs2_journal_dirty(handle, bh);
if (right_path) {
- ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
- if (ret)
- mlog_errno(ret);
-
+ ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
}
@@ -3683,14 +3621,9 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
ocfs2_cleanup_merge(el, index);
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret)
- mlog_errno(ret);
-
+ ocfs2_journal_dirty(handle, bh);
if (left_path) {
- ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
/*
* In the situation that the right_rec is empty and the extent
@@ -4016,10 +3949,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
le32_add_cpu(&rec->e_int_clusters,
-le32_to_cpu(rec->e_cpos));
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret)
- mlog_errno(ret);
-
+ ocfs2_journal_dirty(handle, bh);
}
}
@@ -4203,17 +4133,13 @@ static int ocfs2_insert_path(handle_t *handle,
struct buffer_head *leaf_bh = path_leaf_bh(right_path);
if (left_path) {
- int credits = handle->h_buffer_credits;
-
/*
* There's a chance that left_path got passed back to
* us without being accounted for in the
* journal. Extend our transaction here to be sure we
* can change those blocks.
*/
- credits += left_path->p_tree_depth;
-
- ret = ocfs2_extend_trans(handle, credits);
+ ret = ocfs2_extend_trans(handle, left_path->p_tree_depth);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -4251,17 +4177,13 @@ static int ocfs2_insert_path(handle_t *handle,
* dirty this for us.
*/
if (left_path)
- ret = ocfs2_journal_dirty(handle,
- path_leaf_bh(left_path));
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle,
+ path_leaf_bh(left_path));
} else
ocfs2_insert_at_leaf(et, insert_rec, path_leaf_el(right_path),
insert);
- ret = ocfs2_journal_dirty(handle, leaf_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, leaf_bh);
if (left_path) {
/*
@@ -4384,9 +4306,7 @@ out_update_clusters:
ocfs2_et_update_clusters(et,
le16_to_cpu(insert_rec->e_leaf_clusters));
- ret = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, et->et_root_bh);
out:
ocfs2_free_path(left_path);
@@ -4895,11 +4815,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
goto leave;
}
- status = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, et->et_root_bh);
clusters_to_add -= num_bits;
*logical_offset += num_bits;
@@ -5309,7 +5225,7 @@ static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et,
int index, u32 new_range,
struct ocfs2_alloc_context *meta_ac)
{
- int ret, depth, credits = handle->h_buffer_credits;
+ int ret, depth, credits;
struct buffer_head *last_eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *rightmost_el, *el;
@@ -5340,8 +5256,8 @@ static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et,
} else
rightmost_el = path_leaf_el(path);
- credits += path->p_tree_depth +
- ocfs2_extend_meta_needed(et->et_root_el);
+ credits = path->p_tree_depth +
+ ocfs2_extend_meta_needed(et->et_root_el);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
@@ -5724,11 +5640,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
ocfs2_et_update_clusters(et, -len);
- ret = ocfs2_journal_dirty(handle, et->et_root_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, et->et_root_bh);
ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len);
if (ret)
@@ -5850,11 +5762,7 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
}
tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters);
- status = ocfs2_journal_dirty(handle, tl_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, tl_bh);
bail:
mlog_exit(status);
@@ -5893,11 +5801,7 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
tl->tl_used = cpu_to_le16(i);
- status = ocfs2_journal_dirty(handle, tl_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, tl_bh);
/* TODO: Perhaps we can calculate the bulk of the
* credits up front rather than extending like
@@ -6824,11 +6728,7 @@ find_tail_record:
}
delete:
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, bh);
mlog(0, "extent list container %llu, after: record %d: "
"(%u, %u, %llu), next = %u.\n",
@@ -6959,22 +6859,14 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
} else if (last_eb)
fe->i_last_eb_blk = last_eb->h_blkno;
- status = ocfs2_journal_dirty(handle, fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, fe_bh);
if (last_eb) {
/* If there will be a new last extent block, then by
* definition, there cannot be any leaves to the right of
* him. */
last_eb->h_next_leaf_blk = 0;
- status = ocfs2_journal_dirty(handle, last_eb_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, last_eb_bh);
}
if (delete_blk) {
@@ -7307,6 +7199,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
goto out_commit;
did_quota = 1;
+ data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
+
ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
&num);
if (ret) {
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 21441ddb5506..3623ca20cc18 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1735,6 +1735,9 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
goto out;
}
+ if (data_ac)
+ data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
+
credits = ocfs2_calc_extend_credits(inode->i_sb,
&di->id2.i_list,
clusters_to_alloc);
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 3bb928a2bf7d..c7fba396392d 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -116,6 +116,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
+ define_mask(RESERVATIONS),
};
static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 3dfddbec32f2..fd96e2a2fa56 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -119,6 +119,7 @@
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
#define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */
+#define ML_RESERVATIONS 0x0000000800000000ULL /* ocfs2 alloc reservations */
#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 73e743eea2c8..aa75ca3f78da 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -583,6 +583,9 @@ static void o2net_state_change(struct sock *sk)
o2net_sc_queue_work(sc, &sc->sc_connect_work);
break;
default:
+ printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT
+ " shutdown, state %d\n",
+ SC_NODEF_ARGS(sc), sk->sk_state);
o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
break;
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index efd77d071c80..6c9a28a2d3ae 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1194,7 +1194,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
else
de->inode = 0;
dir->i_version++;
- status = ocfs2_journal_dirty(handle, bh);
+ ocfs2_journal_dirty(handle, bh);
goto bail;
}
i += le16_to_cpu(de->rec_len);
@@ -1752,7 +1752,7 @@ int __ocfs2_add_entry(handle_t *handle,
ocfs2_recalc_free_list(dir, handle, lookup);
dir->i_version++;
- status = ocfs2_journal_dirty(handle, insert_bh);
+ ocfs2_journal_dirty(handle, insert_bh);
retval = 0;
goto bail;
}
@@ -2297,12 +2297,7 @@ static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
}
ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
-
ocfs2_journal_dirty(handle, di_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
i_size_write(inode, size);
inode->i_nlink = 2;
@@ -2366,11 +2361,7 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
ocfs2_init_dir_trailer(inode, new_bh, size);
}
- status = ocfs2_journal_dirty(handle, new_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, new_bh);
i_size_write(inode, inode->i_sb->s_blocksize);
inode->i_nlink = 2;
@@ -2458,10 +2449,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
dx_root->dr_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb));
}
-
- ret = ocfs2_journal_dirty(handle, dx_root_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, dx_root_bh);
ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
@@ -2475,9 +2463,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
- ret = ocfs2_journal_dirty(handle, di_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, di_bh);
*ret_dx_root_bh = dx_root_bh;
dx_root_bh = NULL;
@@ -2991,6 +2977,8 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
* if we only get one now, that's enough to continue. The rest
* will be claimed after the conversion to extents.
*/
+ if (ocfs2_dir_resv_allowed(osb))
+ data_ac->ac_resv = &oi->ip_la_data_resv;
ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &len);
if (ret) {
mlog_errno(ret);
@@ -3034,11 +3022,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
ocfs2_init_dir_trailer(dir, dirdata_bh, i);
}
- ret = ocfs2_journal_dirty(handle, dirdata_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, dirdata_bh);
if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
/*
@@ -3104,11 +3088,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
*/
dir->i_blocks = ocfs2_inode_sector_count(dir);
- ret = ocfs2_journal_dirty(handle, di_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, di_bh);
if (ocfs2_supports_indexed_dirs(osb)) {
ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh,
@@ -3369,6 +3349,9 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
goto bail;
}
+ if (ocfs2_dir_resv_allowed(osb))
+ data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv;
+
credits = ocfs2_calc_extend_credits(sb, el, 1);
} else {
spin_unlock(&OCFS2_I(dir)->ip_lock);
@@ -3423,11 +3406,7 @@ do_extend:
} else {
de->rec_len = cpu_to_le16(sb->s_blocksize);
}
- status = ocfs2_journal_dirty(handle, new_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, new_bh);
dir_i_size += dir->i_sb->s_blocksize;
i_size_write(dir, dir_i_size);
@@ -3906,11 +3885,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
dx_leaf_sort_swap);
- ret = ocfs2_journal_dirty(handle, dx_leaf_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, dx_leaf_bh);
ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash,
&split_hash);
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 12d5eb78a11a..e0c415cb4328 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -451,7 +451,9 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
lock->ml.node, &status);
if (ret < 0)
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key,
+ lock->ml.node);
else {
if (status == DLM_RECOVERING) {
mlog(ML_ERROR, "sent AST to node %u, it thinks this "
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 0102be35980c..40115681d5b0 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -37,7 +37,7 @@
#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
#define DLM_THREAD_MS 200 // flush at least every 200 ms
-#define DLM_HASH_SIZE_DEFAULT (1 << 14)
+#define DLM_HASH_SIZE_DEFAULT (1 << 17)
#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
# define DLM_HASH_PAGES 1
#else
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 90803b47cd8c..9f30491e5e88 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -390,7 +390,9 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
} else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED)
dlm_error(ret);
} else {
- mlog_errno(tmpret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key,
+ res->owner);
if (dlm_is_host_down(tmpret)) {
/* instead of logging the same network error over
* and over, sleep here and wait for the heartbeat
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 988c9055fd4e..e82c0537eff9 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -511,7 +511,7 @@ static void __dlm_print_nodes(struct dlm_ctxt *dlm)
assert_spin_locked(&dlm->spinlock);
- printk(KERN_INFO "ocfs2_dlm: Nodes in domain (\"%s\"): ", dlm->name);
+ printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
@@ -534,7 +534,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
node = exit_msg->node_idx;
- printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name);
+ printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
@@ -565,7 +565,9 @@ static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm,
status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key,
&leave_msg, sizeof(leave_msg), node,
NULL);
-
+ if (status < 0)
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_EXIT_DOMAIN_MSG, dlm->key, node);
mlog(0, "status return %d from o2net_send_message\n", status);
return status;
@@ -904,7 +906,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
set_bit(assert->node_idx, dlm->domain_map);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
- printk(KERN_INFO "ocfs2_dlm: Node %u joins domain %s\n",
+ printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
assert->node_idx, dlm->name);
__dlm_print_nodes(dlm);
@@ -962,7 +964,9 @@ static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
&cancel_msg, sizeof(cancel_msg), node,
NULL);
if (status < 0) {
- mlog_errno(status);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
+ node);
goto bail;
}
@@ -1029,10 +1033,11 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
- sizeof(join_msg), node,
- &join_resp);
+ sizeof(join_msg), node, &join_resp);
if (status < 0 && status != -ENOPROTOOPT) {
- mlog_errno(status);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
+ node);
goto bail;
}
dlm_query_join_wire_to_packet(join_resp, &packet);
@@ -1103,7 +1108,9 @@ static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
&assert_msg, sizeof(assert_msg), node,
NULL);
if (status < 0)
- mlog_errno(status);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
+ node);
return status;
}
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 733337772671..f1fba2a6a8fe 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -329,7 +329,9 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
BUG();
}
} else {
- mlog_errno(tmpret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key,
+ res->owner);
if (dlm_is_host_down(tmpret)) {
ret = DLM_RECOVERING;
mlog(0, "node %u died so returning DLM_RECOVERING "
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9289b4357d27..4fc6fd22a808 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1666,7 +1666,9 @@ again:
tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
&assert, sizeof(assert), to, &r);
if (tmpret < 0) {
- mlog(0, "assert_master returned %d!\n", tmpret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", tmpret,
+ DLM_ASSERT_MASTER_MSG, dlm->key, to);
if (!dlm_is_host_down(tmpret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
BUG();
@@ -2205,7 +2207,9 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
&deref, sizeof(deref), res->owner, &r);
if (ret < 0)
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
+ res->owner);
else if (r < 0) {
/* BAD. other node says I did not have a ref. */
mlog(ML_ERROR,"while dropping ref on %s:%.*s "
@@ -2975,7 +2979,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
&migrate, sizeof(migrate), nodenum,
&status);
if (ret < 0) {
- mlog(0, "migrate_request returned %d!\n", ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
+ dlm->key, nodenum);
if (!dlm_is_host_down(ret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", ret);
BUG();
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index b4f99de2caf3..f8b75ce4be70 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -803,7 +803,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
/* negative status is handled by caller */
if (ret < 0)
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
+ dlm->key, request_from);
// return from here, then
// sleep until all received or error
@@ -955,10 +957,10 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
if (ret < 0) {
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
+ dlm->key, send_to);
if (!dlm_is_host_down(ret)) {
- mlog_errno(ret);
- mlog(ML_ERROR, "%s: unknown error sending data-done "
- "to %u\n", dlm->name, send_to);
BUG();
}
} else
@@ -1126,7 +1128,9 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
if (ret < 0) {
/* XXX: negative status is not handled.
* this will end up killing this node. */
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
+ dlm->key, send_to);
} else {
/* might get an -ENOMEM back here */
ret = status;
@@ -1642,7 +1646,9 @@ int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
&req, sizeof(req), nodenum, &status);
/* XXX: negative status not handled properly here. */
if (ret < 0)
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
+ dlm->key, nodenum);
else {
BUG_ON(status < 0);
BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
@@ -2640,7 +2646,7 @@ retry:
if (dlm_is_host_down(ret)) {
/* node is down. not involved in recovery
* so just keep going */
- mlog(0, "%s: node %u was down when sending "
+ mlog(ML_NOTICE, "%s: node %u was down when sending "
"begin reco msg (%d)\n", dlm->name, nodenum, ret);
ret = 0;
}
@@ -2660,11 +2666,12 @@ retry:
}
if (ret < 0) {
struct dlm_lock_resource *res;
+
/* this is now a serious problem, possibly ENOMEM
* in the network stack. must retry */
mlog_errno(ret);
mlog(ML_ERROR, "begin reco of dlm %s to node %u "
- " returned %d\n", dlm->name, nodenum, ret);
+ "returned %d\n", dlm->name, nodenum, ret);
res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
DLM_RECOVERY_LOCK_NAME_LEN);
if (res) {
@@ -2789,7 +2796,9 @@ stage2:
if (ret >= 0)
ret = status;
if (ret < 0) {
- mlog_errno(ret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
+ dlm->key, nodenum);
if (dlm_is_host_down(ret)) {
/* this has no effect on this recovery
* session, so set the status to zero to
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index b47c1b92b82b..817287c6a6db 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -354,7 +354,8 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
mlog(0, "master was in-progress. retry\n");
ret = status;
} else {
- mlog_errno(tmpret);
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner);
if (dlm_is_host_down(tmpret)) {
/* NOTE: this seems strange, but it is what we want.
* when the master goes down during a cancel or
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index a5fbd9cea968..48dc4fd0a5a2 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -278,10 +278,7 @@ int ocfs2_update_inode_atime(struct inode *inode,
inode->i_atime = CURRENT_TIME;
di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
-
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -430,9 +427,7 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
- status = ocfs2_journal_dirty(handle, fe_bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, fe_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
@@ -488,6 +483,9 @@ static int ocfs2_truncate_file(struct inode *inode,
down_write(&OCFS2_I(inode)->ip_alloc_sem);
+ ocfs2_resv_discard(&osb->osb_la_resmap,
+ &OCFS2_I(inode)->ip_la_data_resv);
+
/*
* The inode lock forced other nodes to sync and drop their
* pages, which (correctly) happens even if we have a truncate
@@ -666,11 +664,7 @@ restarted_transaction:
goto leave;
}
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, bh);
spin_lock(&OCFS2_I(inode)->ip_lock);
clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
@@ -1195,9 +1189,7 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
di = (struct ocfs2_dinode *) bh->b_data;
di->i_mode = cpu_to_le16(inode->i_mode);
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, bh);
out_trans:
ocfs2_commit_trans(osb, handle);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index af189887201c..ce7028b9fc34 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -376,6 +376,10 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
OCFS2_I(inode)->ip_last_used_slot = 0;
OCFS2_I(inode)->ip_last_used_group = 0;
+
+ if (S_ISDIR(inode->i_mode))
+ ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
+ OCFS2_RESV_FLAG_DIR);
mlog_exit_void();
}
@@ -659,12 +663,7 @@ static int ocfs2_remove_inode(struct inode *inode,
di->i_dtime = cpu_to_le64(CURRENT_TIME.tv_sec);
di->i_flags &= cpu_to_le32(~(OCFS2_VALID_FL | OCFS2_ORPHANED_FL));
-
- status = ocfs2_journal_dirty(handle, di_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail_commit;
- }
+ ocfs2_journal_dirty(handle, di_bh);
ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh);
dquot_free_inode(inode);
@@ -1123,6 +1122,10 @@ void ocfs2_clear_inode(struct inode *inode)
ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres);
ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);
+ ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
+ &oi->ip_la_data_resv);
+ ocfs2_resv_init_once(&oi->ip_la_data_resv);
+
/* We very well may get a clear_inode before all an inodes
* metadata has hit disk. Of course, we can't drop any cluster
* locks until the journal has finished with it. The only
@@ -1298,13 +1301,8 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0)
- mlog_errno(status);
-
- status = 0;
+ ocfs2_journal_dirty(handle, bh);
leave:
-
mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 0b28e1921a39..9f5f5fcadc45 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -70,6 +70,8 @@ struct ocfs2_inode_info
/* Only valid if the inode is the dir. */
u32 ip_last_used_slot;
u64 ip_last_used_group;
+
+ struct ocfs2_alloc_reservation ip_la_data_resv;
};
/*
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 9336c60e3a36..47878cf16418 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -402,9 +402,7 @@ int ocfs2_commit_trans(struct ocfs2_super *osb,
}
/*
- * 'nblocks' is what you want to add to the current
- * transaction. extend_trans will either extend the current handle by
- * nblocks, or commit it and start a new one with nblocks credits.
+ * 'nblocks' is what you want to add to the current transaction.
*
* This might call jbd2_journal_restart() which will commit dirty buffers
* and then restart the transaction. Before calling
@@ -422,11 +420,15 @@ int ocfs2_commit_trans(struct ocfs2_super *osb,
*/
int ocfs2_extend_trans(handle_t *handle, int nblocks)
{
- int status;
+ int status, old_nblocks;
BUG_ON(!handle);
- BUG_ON(!nblocks);
+ BUG_ON(nblocks < 0);
+
+ if (!nblocks)
+ return 0;
+ old_nblocks = handle->h_buffer_credits;
mlog_entry_void();
mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
@@ -445,7 +447,8 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
mlog(0,
"jbd2_journal_extend failed, trying "
"jbd2_journal_restart\n");
- status = jbd2_journal_restart(handle, nblocks);
+ status = jbd2_journal_restart(handle,
+ old_nblocks + nblocks);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -734,8 +737,7 @@ int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
return __ocfs2_journal_access(handle, ci, bh, NULL, type);
}
-int ocfs2_journal_dirty(handle_t *handle,
- struct buffer_head *bh)
+void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
{
int status;
@@ -743,13 +745,9 @@ int ocfs2_journal_dirty(handle_t *handle,
(unsigned long long)bh->b_blocknr);
status = jbd2_journal_dirty_metadata(handle, bh);
- if (status < 0)
- mlog(ML_ERROR, "Could not dirty metadata buffer. "
- "(bh->b_blocknr=%llu)\n",
- (unsigned long long)bh->b_blocknr);
+ BUG_ON(status);
- mlog_exit(status);
- return status;
+ mlog_exit_void();
}
#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 3f74e09b0d80..7dc56561c9ae 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -325,8 +325,7 @@ int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
* <modify the bh>
* ocfs2_journal_dirty(handle, bh);
*/
-int ocfs2_journal_dirty(handle_t *handle,
- struct buffer_head *bh);
+void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh);
/*
* Credit Macros:
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index c983715d8d8c..63c41e206792 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -52,7 +52,8 @@ static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc,
- u32 numbits);
+ u32 *numbits,
+ struct ocfs2_alloc_reservation *resv);
static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
@@ -74,6 +75,144 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
struct inode *local_alloc_inode);
+/*
+ * ocfs2_la_default_mb() - determine a default size, in megabytes of
+ * the local alloc.
+ *
+ * Generally, we'd like to pick as large a local alloc as
+ * possible. Performance on large workloads tends to scale
+ * proportionally to la size. In addition to that, the reservations
+ * code functions more efficiently as it can reserve more windows for
+ * write.
+ *
+ * Some things work against us when trying to choose a large local alloc:
+ *
+ * - We need to ensure our sizing is picked to leave enough space in
+ * group descriptors for other allocations (such as block groups,
+ * etc). Picking default sizes which are a multiple of 4 could help
+ * - block groups are allocated in 2mb and 4mb chunks.
+ *
+ * - Likewise, we don't want to starve other nodes of bits on small
+ * file systems. This can easily be taken care of by limiting our
+ * default to a reasonable size (256M) on larger cluster sizes.
+ *
+ * - Some file systems can't support very large sizes - 4k and 8k in
+ * particular are limited to less than 128 and 256 megabytes respectively.
+ *
+ * The following reference table shows group descriptor and local
+ * alloc maximums at various cluster sizes (4k blocksize)
+ *
+ * csize: 4K group: 126M la: 121M
+ * csize: 8K group: 252M la: 243M
+ * csize: 16K group: 504M la: 486M
+ * csize: 32K group: 1008M la: 972M
+ * csize: 64K group: 2016M la: 1944M
+ * csize: 128K group: 4032M la: 3888M
+ * csize: 256K group: 8064M la: 7776M
+ * csize: 512K group: 16128M la: 15552M
+ * csize: 1024K group: 32256M la: 31104M
+ */
+#define OCFS2_LA_MAX_DEFAULT_MB 256
+#define OCFS2_LA_OLD_DEFAULT 8
+unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
+{
+ unsigned int la_mb;
+ unsigned int gd_mb;
+ unsigned int megs_per_slot;
+ struct super_block *sb = osb->sb;
+
+ gd_mb = ocfs2_clusters_to_megabytes(osb->sb,
+ 8 * ocfs2_group_bitmap_size(sb));
+
+ /*
+ * This takes care of files systems with very small group
+ * descriptors - 512 byte blocksize at cluster sizes lower
+ * than 16K and also 1k blocksize with 4k cluster size.
+ */
+ if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192)
+ || (sb->s_blocksize == 1024 && osb->s_clustersize == 4096))
+ return OCFS2_LA_OLD_DEFAULT;
+
+ /*
+ * Leave enough room for some block groups and make the final
+ * value we work from a multiple of 4.
+ */
+ gd_mb -= 16;
+ gd_mb &= 0xFFFFFFFB;
+
+ la_mb = gd_mb;
+
+ /*
+ * Keep window sizes down to a reasonable default
+ */
+ if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) {
+ /*
+ * Some clustersize / blocksize combinations will have
+ * given us a larger than OCFS2_LA_MAX_DEFAULT_MB
+ * default size, but get poor distribution when
+ * limited to exactly 256 megabytes.
+ *
+ * As an example, 16K clustersize at 4K blocksize
+ * gives us a cluster group size of 504M. Paring the
+ * local alloc size down to 256 however, would give us
+ * only one window and around 200MB left in the
+ * cluster group. Instead, find the first size below
+ * 256 which would give us an even distribution.
+ *
+ * Larger cluster group sizes actually work out pretty
+ * well when pared to 256, so we don't have to do this
+ * for any group that fits more than two
+ * OCFS2_LA_MAX_DEFAULT_MB windows.
+ */
+ if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB))
+ la_mb = 256;
+ else {
+ unsigned int gd_mult = gd_mb;
+
+ while (gd_mult > 256)
+ gd_mult = gd_mult >> 1;
+
+ la_mb = gd_mult;
+ }
+ }
+
+ megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots;
+ megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot);
+ /* Too many nodes, too few disk clusters. */
+ if (megs_per_slot < la_mb)
+ la_mb = megs_per_slot;
+
+ return la_mb;
+}
+
+void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
+{
+ struct super_block *sb = osb->sb;
+ unsigned int la_default_mb = ocfs2_la_default_mb(osb);
+ unsigned int la_max_mb;
+
+ la_max_mb = ocfs2_clusters_to_megabytes(sb,
+ ocfs2_local_alloc_size(sb) * 8);
+
+ mlog(0, "requested: %dM, max: %uM, default: %uM\n",
+ requested_mb, la_max_mb, la_default_mb);
+
+ if (requested_mb == -1) {
+ /* No user request - use defaults */
+ osb->local_alloc_default_bits =
+ ocfs2_megabytes_to_clusters(sb, la_default_mb);
+ } else if (requested_mb > la_max_mb) {
+ /* Request is too big, we give the maximum available */
+ osb->local_alloc_default_bits =
+ ocfs2_megabytes_to_clusters(sb, la_max_mb);
+ } else {
+ osb->local_alloc_default_bits =
+ ocfs2_megabytes_to_clusters(sb, requested_mb);
+ }
+
+ osb->local_alloc_bits = osb->local_alloc_default_bits;
+}
+
static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
{
return (osb->local_alloc_state == OCFS2_LA_THROTTLED ||
@@ -156,7 +295,7 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
osb->local_alloc_bits, (osb->bitmap_cpg - 1));
osb->local_alloc_bits =
ocfs2_megabytes_to_clusters(osb->sb,
- OCFS2_DEFAULT_LOCAL_ALLOC_SIZE);
+ ocfs2_la_default_mb(osb));
}
/* read the alloc off disk */
@@ -262,6 +401,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
osb->local_alloc_state = OCFS2_LA_DISABLED;
+ ocfs2_resmap_uninit(&osb->osb_la_resmap);
+
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
@@ -305,12 +446,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
}
ocfs2_clear_local_alloc(alloc);
-
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, bh);
brelse(bh);
osb->local_alloc_bh = NULL;
@@ -481,46 +617,6 @@ out:
return status;
}
-/* Check to see if the local alloc window is within ac->ac_max_block */
-static int ocfs2_local_alloc_in_range(struct inode *inode,
- struct ocfs2_alloc_context *ac,
- u32 bits_wanted)
-{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct ocfs2_dinode *alloc;
- struct ocfs2_local_alloc *la;
- int start;
- u64 block_off;
-
- if (!ac->ac_max_block)
- return 1;
-
- alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
- la = OCFS2_LOCAL_ALLOC(alloc);
-
- start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted);
- if (start == -1) {
- mlog_errno(-ENOSPC);
- return 0;
- }
-
- /*
- * Converting (bm_off + start + bits_wanted) to blocks gives us
- * the blkno just past our actual allocation. This is perfect
- * to compare with ac_max_block.
- */
- block_off = ocfs2_clusters_to_blocks(inode->i_sb,
- le32_to_cpu(la->la_bm_off) +
- start + bits_wanted);
- mlog(0, "Checking %llu against %llu\n",
- (unsigned long long)block_off,
- (unsigned long long)ac->ac_max_block);
- if (block_off > ac->ac_max_block)
- return 0;
-
- return 1;
-}
-
/*
* make sure we've got at least bits_wanted contiguous bits in the
* local alloc. You lose them when you drop i_mutex.
@@ -613,17 +709,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
mlog(0, "Calling in_range for max block %llu\n",
(unsigned long long)ac->ac_max_block);
- if (!ocfs2_local_alloc_in_range(local_alloc_inode, ac,
- bits_wanted)) {
- /*
- * The window is outside ac->ac_max_block.
- * This errno tells the caller to keep localalloc enabled
- * but to get the allocation from the main bitmap.
- */
- status = -EFBIG;
- goto bail;
- }
-
ac->ac_inode = local_alloc_inode;
/* We should never use localalloc from another slot */
ac->ac_alloc_slot = osb->slot_num;
@@ -664,7 +749,8 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
- start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted);
+ start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted,
+ ac->ac_resv);
if (start == -1) {
/* TODO: Shouldn't we just BUG here? */
status = -ENOSPC;
@@ -674,8 +760,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
bitmap = la->la_bitmap;
*bit_off = le32_to_cpu(la->la_bm_off) + start;
- /* local alloc is always contiguous by nature -- we never
- * delete bits from it! */
*num_bits = bits_wanted;
status = ocfs2_journal_access_di(handle,
@@ -687,18 +771,15 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
goto bail;
}
+ ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start,
+ bits_wanted);
+
while(bits_wanted--)
ocfs2_set_bit(start++, bitmap);
le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
+ ocfs2_journal_dirty(handle, osb->local_alloc_bh);
- status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- status = 0;
bail:
mlog_exit(status);
return status;
@@ -722,13 +803,17 @@ static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
}
static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
- struct ocfs2_dinode *alloc,
- u32 numbits)
+ struct ocfs2_dinode *alloc,
+ u32 *numbits,
+ struct ocfs2_alloc_reservation *resv)
{
int numfound, bitoff, left, startoff, lastzero;
+ int local_resv = 0;
+ struct ocfs2_alloc_reservation r;
void *bitmap = NULL;
+ struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
- mlog_entry("(numbits wanted = %u)\n", numbits);
+ mlog_entry("(numbits wanted = %u)\n", *numbits);
if (!alloc->id1.bitmap1.i_total) {
mlog(0, "No bits in my window!\n");
@@ -736,6 +821,30 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
goto bail;
}
+ if (!resv) {
+ local_resv = 1;
+ ocfs2_resv_init_once(&r);
+ ocfs2_resv_set_type(&r, OCFS2_RESV_FLAG_TMP);
+ resv = &r;
+ }
+
+ numfound = *numbits;
+ if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) {
+ if (numfound < *numbits)
+ *numbits = numfound;
+ goto bail;
+ }
+
+ /*
+ * Code error. While reservations are enabled, local
+ * allocation should _always_ go through them.
+ */
+ BUG_ON(osb->osb_resv_level != 0);
+
+ /*
+ * Reservations are disabled. Handle this the old way.
+ */
+
bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
numfound = bitoff = startoff = 0;
@@ -761,7 +870,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
startoff = bitoff+1;
}
/* we got everything we needed */
- if (numfound == numbits) {
+ if (numfound == *numbits) {
/* mlog(0, "Found it all!\n"); */
break;
}
@@ -770,12 +879,15 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff,
numfound);
- if (numfound == numbits)
+ if (numfound == *numbits)
bitoff = startoff - numfound;
else
bitoff = -1;
bail:
+ if (local_resv)
+ ocfs2_resv_discard(resmap, resv);
+
mlog_exit(bitoff);
return bitoff;
}
@@ -1098,6 +1210,9 @@ retry_enospc:
memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
le16_to_cpu(la->la_size));
+ ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
+ OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
+
mlog(0, "New window allocated:\n");
mlog(0, "window la_bm_off = %u\n",
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
@@ -1169,12 +1284,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
}
ocfs2_clear_local_alloc(alloc);
-
- status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, osb->local_alloc_bh);
status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
main_bm_inode, main_bm_bh);
@@ -1192,7 +1302,6 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
atomic_inc(&osb->alloc_stats.moves);
- status = 0;
bail:
if (handle)
ocfs2_commit_trans(osb, handle);
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index ac5ea9f86653..1be9b5864460 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -30,6 +30,9 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb);
void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb);
+void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb);
+unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb);
+
int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
int node_num,
struct ocfs2_dinode **alloc_copy);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 4cbb18f26c5f..eba9033d456e 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -384,11 +384,7 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
ocfs2_add_links_count(dirfe, 1);
- status = ocfs2_journal_dirty(handle, parent_fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, parent_fe_bh);
inc_nlink(dir);
}
@@ -567,11 +563,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
}
- status = ocfs2_journal_dirty(handle, *new_fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, *new_fe_bh);
ocfs2_populate_inode(inode, fe, 1);
ocfs2_ci_set_new(osb, INODE_CACHE(inode));
@@ -705,14 +697,7 @@ static int ocfs2_link(struct dentry *old_dentry,
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
-
- err = ocfs2_journal_dirty(handle, fe_bh);
- if (err < 0) {
- ocfs2_add_links_count(fe, -1);
- drop_nlink(inode);
- mlog_errno(err);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, fe_bh);
err = ocfs2_add_entry(handle, dentry, inode,
OCFS2_I(inode)->ip_blkno,
@@ -909,12 +894,7 @@ static int ocfs2_unlink(struct inode *dir,
drop_nlink(inode);
drop_nlink(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
-
- status = ocfs2_journal_dirty(handle, fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, fe_bh);
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
if (S_ISDIR(inode->i_mode))
@@ -1332,12 +1312,7 @@ static int ocfs2_rename(struct inode *old_dir,
ocfs2_set_links_count(newfe, 0);
else
ocfs2_add_links_count(newfe, -1);
-
- status = ocfs2_journal_dirty(handle, newfe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, newfe_bh);
} else {
/* if the name was not found in new_dir, add it now */
status = ocfs2_add_entry(handle, new_dentry, old_inode,
@@ -1356,10 +1331,7 @@ static int ocfs2_rename(struct inode *old_dir,
old_di->i_ctime = cpu_to_le64(old_inode->i_ctime.tv_sec);
old_di->i_ctime_nsec = cpu_to_le32(old_inode->i_ctime.tv_nsec);
-
- status = ocfs2_journal_dirty(handle, old_inode_bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, old_inode_bh);
} else
mlog_errno(status);
@@ -1431,7 +1403,7 @@ static int ocfs2_rename(struct inode *old_dir,
OCFS2_JOURNAL_ACCESS_WRITE);
fe = (struct ocfs2_dinode *) old_dir_bh->b_data;
ocfs2_set_links_count(fe, old_dir->i_nlink);
- status = ocfs2_journal_dirty(handle, old_dir_bh);
+ ocfs2_journal_dirty(handle, old_dir_bh);
}
}
ocfs2_dentry_move(old_dentry, new_dentry, old_dir, new_dir);
@@ -1563,11 +1535,7 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
(bytes_left > sb->s_blocksize) ? sb->s_blocksize :
bytes_left);
- status = ocfs2_journal_dirty(handle, bhs[virtual]);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, bhs[virtual]);
virtual++;
p_blkno++;
@@ -1961,12 +1929,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
if (S_ISDIR(inode->i_mode))
ocfs2_add_links_count(orphan_fe, 1);
orphan_dir_inode->i_nlink = ocfs2_read_links_count(orphan_fe);
-
- status = ocfs2_journal_dirty(handle, orphan_dir_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, orphan_dir_bh);
status = __ocfs2_add_entry(handle, orphan_dir_inode, name,
OCFS2_ORPHAN_NAMELEN, inode,
@@ -2065,12 +2028,7 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
if (S_ISDIR(inode->i_mode))
ocfs2_add_links_count(orphan_fe, -1);
orphan_dir_inode->i_nlink = ocfs2_read_links_count(orphan_fe);
-
- status = ocfs2_journal_dirty(handle, orphan_dir_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, orphan_dir_bh);
leave:
ocfs2_free_dir_lookup_result(&lookup);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index adf5e2ebc2c4..a388528f485c 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -47,6 +47,7 @@
/* For struct ocfs2_blockcheck_stats */
#include "blockcheck.h"
+#include "reservations.h"
/* Caching of metadata buffers */
@@ -341,6 +342,9 @@ struct ocfs2_super
*/
unsigned int local_alloc_bits;
unsigned int local_alloc_default_bits;
+ /* osb_clusters_at_boot can become stale! Do not trust it to
+ * be up to date. */
+ unsigned int osb_clusters_at_boot;
enum ocfs2_local_alloc_state local_alloc_state; /* protected
* by osb_lock */
@@ -349,6 +353,11 @@ struct ocfs2_super
u64 la_last_gd;
+ struct ocfs2_reservation_map osb_la_resmap;
+
+ unsigned int osb_resv_level;
+ unsigned int osb_dir_resv_level;
+
/* Next three fields are for local node slot recovery during
* mount. */
int dirty;
@@ -763,6 +772,12 @@ static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
}
+static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
+ unsigned int clusters)
+{
+ return clusters >> (20 - OCFS2_SB(sb)->s_clustersize_bits);
+}
+
static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
{
ext2_set_bit(bit, bitmap);
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index bb37218a7978..d61a1521b10e 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -283,14 +283,6 @@
#define OCFS2_MIN_JOURNAL_SIZE (4 * 1024 * 1024)
/*
- * Default local alloc size (in megabytes)
- *
- * The value chosen should be such that most allocations, including new
- * block groups, use local alloc.
- */
-#define OCFS2_DEFAULT_LOCAL_ALLOC_SIZE 8
-
-/*
* Inline extended attribute size (in bytes)
* The value chosen should be aligned to 16 byte boundaries.
*/
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index ab42a74c7539..04ae76d8c6ab 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -261,10 +261,8 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
brelse(bh);
goto out;
}
- err = ocfs2_journal_dirty(handle, bh);
+ ocfs2_journal_dirty(handle, bh);
brelse(bh);
- if (err < 0)
- goto out;
out:
if (err) {
mutex_unlock(&gqinode->i_mutex);
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 9ad49305f450..884b641f199e 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -119,12 +119,8 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
lock_buffer(bh);
modify(bh, private);
unlock_buffer(bh);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- ocfs2_commit_trans(OCFS2_SB(sb), handle);
- return status;
- }
+ ocfs2_journal_dirty(handle, bh);
+
status = ocfs2_commit_trans(OCFS2_SB(sb), handle);
if (status < 0) {
mlog_errno(status);
@@ -523,9 +519,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
ocfs2_clear_bit(bit, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(qbh);
- status = ocfs2_journal_dirty(handle, qbh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, qbh);
out_commit:
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
ocfs2_commit_trans(OCFS2_SB(sb), handle);
@@ -631,9 +625,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
lock_buffer(bh);
ldinfo->dqi_flags = cpu_to_le32(flags | OLQF_CLEAN);
unlock_buffer(bh);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, bh);
out_trans:
ocfs2_commit_trans(osb, handle);
out_bh:
@@ -1009,11 +1001,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) -
OCFS2_QBLK_RESERVED_SPACE);
unlock_buffer(bh);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_trans;
- }
+ ocfs2_journal_dirty(handle, bh);
/* Initialize new block with structures */
down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
@@ -1040,11 +1028,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
lock_buffer(dbh);
memset(dbh->b_data, 0, sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE);
unlock_buffer(dbh);
- status = ocfs2_journal_dirty(handle, dbh);
- if (status < 0) {
- mlog_errno(status);
- goto out_trans;
- }
+ ocfs2_journal_dirty(handle, dbh);
/* Update local quotafile info */
oinfo->dqi_blocks += 2;
@@ -1155,11 +1139,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
lock_buffer(bh);
memset(bh->b_data, 0, sb->s_blocksize);
unlock_buffer(bh);
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_trans;
- }
+ ocfs2_journal_dirty(handle, bh);
+
/* Update chunk header */
status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode),
chunk->qc_headerbh,
@@ -1173,11 +1154,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
lock_buffer(chunk->qc_headerbh);
le32_add_cpu(&dchunk->dqc_free, ol_quota_entries_per_block(sb));
unlock_buffer(chunk->qc_headerbh);
- status = ocfs2_journal_dirty(handle, chunk->qc_headerbh);
- if (status < 0) {
- mlog_errno(status);
- goto out_trans;
- }
+ ocfs2_journal_dirty(handle, chunk->qc_headerbh);
+
/* Update file header */
oinfo->dqi_blocks++;
status = ocfs2_local_write_info(sb, type);
@@ -1312,12 +1290,8 @@ static int ocfs2_local_release_dquot(struct dquot *dquot)
ocfs2_clear_bit(offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(od->dq_chunk->qc_headerbh);
- status = ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
- if (status < 0) {
- mlog_errno(status);
- goto out;
- }
- status = 0;
+ ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
+
out:
/* Clear the read bit so that next time someone uses this
* dquot he reads fresh info from disk and allocates local
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 5cbcd0f008fc..b9c8ff283644 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -1268,9 +1268,7 @@ static int ocfs2_change_refcount_rec(handle_t *handle,
} else if (merge)
ocfs2_refcount_rec_merge(rb, index);
- ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
return ret;
}
@@ -1694,7 +1692,7 @@ static int ocfs2_adjust_refcount_rec(handle_t *handle,
* 2 more credits, one for the leaf refcount block, one for
* the extent block contains the extent rec.
*/
- ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2);
+ ret = ocfs2_extend_trans(handle, 2);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -1802,11 +1800,7 @@ static int ocfs2_insert_refcount_rec(handle_t *handle,
if (merge)
ocfs2_refcount_rec_merge(rb, index);
- ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, ref_leaf_bh);
if (index == 0) {
ret = ocfs2_adjust_refcount_rec(handle, ci,
@@ -1977,9 +1971,7 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
ocfs2_refcount_rec_merge(rb, index);
}
- ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
- if (ret)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
brelse(new_bh);
@@ -3040,11 +3032,7 @@ static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
}
memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
- ret = ocfs2_journal_dirty(handle, new_bh);
- if (ret) {
- mlog_errno(ret);
- break;
- }
+ ocfs2_journal_dirty(handle, new_bh);
brelse(new_bh);
brelse(old_bh);
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
new file mode 100644
index 000000000000..d8b6e4259b80
--- /dev/null
+++ b/fs/ocfs2/reservations.c
@@ -0,0 +1,846 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * reservations.c
+ *
+ * Allocation reservations implementation
+ *
+ * Some code borrowed from fs/ext3/balloc.c and is:
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * The rest is copyright (C) 2010 Novell. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/highmem.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+
+#define MLOG_MASK_PREFIX ML_RESERVATIONS
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#ifdef CONFIG_OCFS2_DEBUG_FS
+#define OCFS2_CHECK_RESERVATIONS
+#endif
+
+DEFINE_SPINLOCK(resv_lock);
+
+#define OCFS2_MIN_RESV_WINDOW_BITS 8
+#define OCFS2_MAX_RESV_WINDOW_BITS 1024
+
+int ocfs2_dir_resv_allowed(struct ocfs2_super *osb)
+{
+ return (osb->osb_resv_level && osb->osb_dir_resv_level);
+}
+
+static unsigned int ocfs2_resv_window_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ struct ocfs2_super *osb = resmap->m_osb;
+ unsigned int bits;
+
+ if (!(resv->r_flags & OCFS2_RESV_FLAG_DIR)) {
+ /* 8, 16, 32, 64, 128, 256, 512, 1024 */
+ bits = 4 << osb->osb_resv_level;
+ } else {
+ bits = 4 << osb->osb_dir_resv_level;
+ }
+ return bits;
+}
+
+static inline unsigned int ocfs2_resv_end(struct ocfs2_alloc_reservation *resv)
+{
+ if (resv->r_len)
+ return resv->r_start + resv->r_len - 1;
+ return resv->r_start;
+}
+
+static inline int ocfs2_resv_empty(struct ocfs2_alloc_reservation *resv)
+{
+ return !!(resv->r_len == 0);
+}
+
+static inline int ocfs2_resmap_disabled(struct ocfs2_reservation_map *resmap)
+{
+ if (resmap->m_osb->osb_resv_level == 0)
+ return 1;
+ return 0;
+}
+
+static void ocfs2_dump_resv(struct ocfs2_reservation_map *resmap)
+{
+ struct ocfs2_super *osb = resmap->m_osb;
+ struct rb_node *node;
+ struct ocfs2_alloc_reservation *resv;
+ int i = 0;
+
+ mlog(ML_NOTICE, "Dumping resmap for device %s. Bitmap length: %u\n",
+ osb->dev_str, resmap->m_bitmap_len);
+
+ node = rb_first(&resmap->m_reservations);
+ while (node) {
+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
+
+ mlog(ML_NOTICE, "start: %u\tend: %u\tlen: %u\tlast_start: %u"
+ "\tlast_len: %u\n", resv->r_start,
+ ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
+ resv->r_last_len);
+
+ node = rb_next(node);
+ i++;
+ }
+
+ mlog(ML_NOTICE, "%d reservations found. LRU follows\n", i);
+
+ i = 0;
+ list_for_each_entry(resv, &resmap->m_lru, r_lru) {
+ mlog(ML_NOTICE, "LRU(%d) start: %u\tend: %u\tlen: %u\t"
+ "last_start: %u\tlast_len: %u\n", i, resv->r_start,
+ ocfs2_resv_end(resv), resv->r_len, resv->r_last_start,
+ resv->r_last_len);
+
+ i++;
+ }
+}
+
+#ifdef OCFS2_CHECK_RESERVATIONS
+static int ocfs2_validate_resmap_bits(struct ocfs2_reservation_map *resmap,
+ int i,
+ struct ocfs2_alloc_reservation *resv)
+{
+ char *disk_bitmap = resmap->m_disk_bitmap;
+ unsigned int start = resv->r_start;
+ unsigned int end = ocfs2_resv_end(resv);
+
+ while (start <= end) {
+ if (ocfs2_test_bit(start, disk_bitmap)) {
+ mlog(ML_ERROR,
+ "reservation %d covers an allocated area "
+ "starting at bit %u!\n", i, start);
+ return 1;
+ }
+
+ start++;
+ }
+ return 0;
+}
+
+static void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
+{
+ unsigned int off = 0;
+ int i = 0;
+ struct rb_node *node;
+ struct ocfs2_alloc_reservation *resv;
+
+ node = rb_first(&resmap->m_reservations);
+ while (node) {
+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
+
+ if (i > 0 && resv->r_start <= off) {
+ mlog(ML_ERROR, "reservation %d has bad start off!\n",
+ i);
+ goto bad;
+ }
+
+ if (resv->r_len == 0) {
+ mlog(ML_ERROR, "reservation %d has no length!\n",
+ i);
+ goto bad;
+ }
+
+ if (resv->r_start > ocfs2_resv_end(resv)) {
+ mlog(ML_ERROR, "reservation %d has invalid range!\n",
+ i);
+ goto bad;
+ }
+
+ if (ocfs2_resv_end(resv) >= resmap->m_bitmap_len) {
+ mlog(ML_ERROR, "reservation %d extends past bitmap!\n",
+ i);
+ goto bad;
+ }
+
+ if (ocfs2_validate_resmap_bits(resmap, i, resv))
+ goto bad;
+
+ off = ocfs2_resv_end(resv);
+ node = rb_next(node);
+
+ i++;
+ }
+ return;
+
+bad:
+ ocfs2_dump_resv(resmap);
+ BUG();
+}
+#else
+static inline void ocfs2_check_resmap(struct ocfs2_reservation_map *resmap)
+{
+
+}
+#endif
+
+void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv)
+{
+ memset(resv, 0, sizeof(*resv));
+ INIT_LIST_HEAD(&resv->r_lru);
+}
+
+void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv,
+ unsigned int flags)
+{
+ BUG_ON(flags & ~OCFS2_RESV_TYPES);
+
+ resv->r_flags |= flags;
+}
+
+int ocfs2_resmap_init(struct ocfs2_super *osb,
+ struct ocfs2_reservation_map *resmap)
+{
+ memset(resmap, 0, sizeof(*resmap));
+
+ resmap->m_osb = osb;
+ resmap->m_reservations = RB_ROOT;
+ /* m_bitmap_len is initialized to zero by the above memset. */
+ INIT_LIST_HEAD(&resmap->m_lru);
+
+ return 0;
+}
+
+static void ocfs2_resv_mark_lru(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ assert_spin_locked(&resv_lock);
+
+ if (!list_empty(&resv->r_lru))
+ list_del_init(&resv->r_lru);
+
+ list_add_tail(&resv->r_lru, &resmap->m_lru);
+}
+
+static void __ocfs2_resv_trunc(struct ocfs2_alloc_reservation *resv)
+{
+ resv->r_len = 0;
+ resv->r_start = 0;
+}
+
+static void ocfs2_resv_remove(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ if (resv->r_flags & OCFS2_RESV_FLAG_INUSE) {
+ list_del_init(&resv->r_lru);
+ rb_erase(&resv->r_node, &resmap->m_reservations);
+ resv->r_flags &= ~OCFS2_RESV_FLAG_INUSE;
+ }
+}
+
+static void __ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ assert_spin_locked(&resv_lock);
+
+ __ocfs2_resv_trunc(resv);
+ /*
+ * last_len and last_start no longer make sense if
+ * we're changing the range of our allocations.
+ */
+ resv->r_last_len = resv->r_last_start = 0;
+
+ ocfs2_resv_remove(resmap, resv);
+}
+
+/* does nothing if 'resv' is null */
+void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv)
+{
+ if (resv) {
+ spin_lock(&resv_lock);
+ __ocfs2_resv_discard(resmap, resv);
+ spin_unlock(&resv_lock);
+ }
+}
+
+static void ocfs2_resmap_clear_all_resv(struct ocfs2_reservation_map *resmap)
+{
+ struct rb_node *node;
+ struct ocfs2_alloc_reservation *resv;
+
+ assert_spin_locked(&resv_lock);
+
+ while ((node = rb_last(&resmap->m_reservations)) != NULL) {
+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
+
+ __ocfs2_resv_discard(resmap, resv);
+ }
+}
+
+void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap,
+ unsigned int clen, char *disk_bitmap)
+{
+ if (ocfs2_resmap_disabled(resmap))
+ return;
+
+ spin_lock(&resv_lock);
+
+ ocfs2_resmap_clear_all_resv(resmap);
+ resmap->m_bitmap_len = clen;
+ resmap->m_disk_bitmap = disk_bitmap;
+
+ spin_unlock(&resv_lock);
+}
+
+void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap)
+{
+ /* Does nothing for now. Keep this around for API symmetry */
+}
+
+static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *new)
+{
+ struct rb_root *root = &resmap->m_reservations;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &root->rb_node;
+ struct ocfs2_alloc_reservation *tmp;
+
+ assert_spin_locked(&resv_lock);
+
+ mlog(0, "Insert reservation start: %u len: %u\n", new->r_start,
+ new->r_len);
+
+ while (*p) {
+ parent = *p;
+
+ tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node);
+
+ if (new->r_start < tmp->r_start) {
+ p = &(*p)->rb_left;
+
+ /*
+ * This is a good place to check for
+ * overlapping reservations.
+ */
+ BUG_ON(ocfs2_resv_end(new) >= tmp->r_start);
+ } else if (new->r_start > ocfs2_resv_end(tmp)) {
+ p = &(*p)->rb_right;
+ } else {
+ /* This should never happen! */
+ mlog(ML_ERROR, "Duplicate reservation window!\n");
+ BUG();
+ }
+ }
+
+ rb_link_node(&new->r_node, parent, p);
+ rb_insert_color(&new->r_node, root);
+ new->r_flags |= OCFS2_RESV_FLAG_INUSE;
+
+ ocfs2_resv_mark_lru(resmap, new);
+
+ ocfs2_check_resmap(resmap);
+}
+
+/**
+ * ocfs2_find_resv_lhs() - find the window which contains goal
+ * @resmap: reservation map to search
+ * @goal: which bit to search for
+ *
+ * If a window containing that goal is not found, we return the window
+ * which comes before goal. Returns NULL on empty rbtree or no window
+ * before goal.
+ */
+static struct ocfs2_alloc_reservation *
+ocfs2_find_resv_lhs(struct ocfs2_reservation_map *resmap, unsigned int goal)
+{
+ struct ocfs2_alloc_reservation *resv = NULL;
+ struct ocfs2_alloc_reservation *prev_resv = NULL;
+ struct rb_node *node = resmap->m_reservations.rb_node;
+
+ assert_spin_locked(&resv_lock);
+
+ if (!node)
+ return NULL;
+
+ node = rb_first(&resmap->m_reservations);
+ while (node) {
+ resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node);
+
+ if (resv->r_start <= goal && ocfs2_resv_end(resv) >= goal)
+ break;
+
+ /* Check if we overshot the reservation just before goal? */
+ if (resv->r_start > goal) {
+ resv = prev_resv;
+ break;
+ }
+
+ prev_resv = resv;
+ node = rb_next(node);
+ }
+
+ return resv;
+}
+
+/*
+ * We are given a range within the bitmap, which corresponds to a gap
+ * inside the reservations tree (search_start, search_len). The range
+ * can be anything from the whole bitmap, to a gap between
+ * reservations.
+ *
+ * The start value of *rstart is insignificant.
+ *
+ * This function searches the bitmap range starting at search_start
+ * with length search_len for a set of contiguous free bits. We try
+ * to find up to 'wanted' bits, but can sometimes return less.
+ *
+ * Returns the length of allocation, 0 if no free bits are found.
+ *
+ * *cstart and *clen will also be populated with the result.
+ */
+static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
+ unsigned int wanted,
+ unsigned int search_start,
+ unsigned int search_len,
+ unsigned int *rstart,
+ unsigned int *rlen)
+{
+ void *bitmap = resmap->m_disk_bitmap;
+ unsigned int best_start, best_len = 0;
+ int offset, start, found;
+
+ mlog(0, "Find %u bits within range (%u, len %u) resmap len: %u\n",
+ wanted, search_start, search_len, resmap->m_bitmap_len);
+
+ found = best_start = best_len = 0;
+
+ start = search_start;
+ while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
+ start)) != -1) {
+ /* Search reached end of the region */
+ if (offset >= (search_start + search_len))
+ break;
+
+ if (offset == start) {
+ /* we found a zero */
+ found++;
+ /* move start to the next bit to test */
+ start++;
+ } else {
+ /* got a zero after some ones */
+ found = 1;
+ start = offset + 1;
+ }
+ if (found > best_len) {
+ best_len = found;
+ best_start = start - found;
+ }
+
+ if (found >= wanted)
+ break;
+ }
+
+ if (best_len == 0)
+ return 0;
+
+ if (best_len >= wanted)
+ best_len = wanted;
+
+ *rlen = best_len;
+ *rstart = best_start;
+
+ mlog(0, "Found start: %u len: %u\n", best_start, best_len);
+
+ return *rlen;
+}
+
+static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ unsigned int goal, unsigned int wanted)
+{
+ struct rb_root *root = &resmap->m_reservations;
+ unsigned int gap_start, gap_end, gap_len;
+ struct ocfs2_alloc_reservation *prev_resv, *next_resv;
+ struct rb_node *prev, *next;
+ unsigned int cstart, clen;
+ unsigned int best_start = 0, best_len = 0;
+
+ /*
+ * Nasty cases to consider:
+ *
+ * - rbtree is empty
+ * - our window should be first in all reservations
+ * - our window should be last in all reservations
+ * - need to make sure we don't go past end of bitmap
+ */
+
+ mlog(0, "resv start: %u resv end: %u goal: %u wanted: %u\n",
+ resv->r_start, ocfs2_resv_end(resv), goal, wanted);
+
+ assert_spin_locked(&resv_lock);
+
+ if (RB_EMPTY_ROOT(root)) {
+ /*
+ * Easiest case - empty tree. We can just take
+ * whatever window of free bits we want.
+ */
+
+ mlog(0, "Empty root\n");
+
+ clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
+ resmap->m_bitmap_len - goal,
+ &cstart, &clen);
+
+ /*
+ * This should never happen - the local alloc window
+ * will always have free bits when we're called.
+ */
+ BUG_ON(goal == 0 && clen == 0);
+
+ if (clen == 0)
+ return;
+
+ resv->r_start = cstart;
+ resv->r_len = clen;
+
+ ocfs2_resv_insert(resmap, resv);
+ return;
+ }
+
+ prev_resv = ocfs2_find_resv_lhs(resmap, goal);
+
+ if (prev_resv == NULL) {
+ mlog(0, "Goal on LHS of leftmost window\n");
+
+ /*
+ * A NULL here means that the search code couldn't
+ * find a window that starts before goal.
+ *
+ * However, we can take the first window after goal,
+ * which is also by definition, the leftmost window in
+ * the entire tree. If we can find free bits in the
+ * gap between goal and the LHS window, then the
+ * reservation can safely be placed there.
+ *
+ * Otherwise we fall back to a linear search, checking
+ * the gaps in between windows for a place to
+ * allocate.
+ */
+
+ next = rb_first(root);
+ next_resv = rb_entry(next, struct ocfs2_alloc_reservation,
+ r_node);
+
+ /*
+ * The search should never return such a window. (see
+ * comment above
+ */
+ if (next_resv->r_start <= goal) {
+ mlog(ML_ERROR, "goal: %u next_resv: start %u len %u\n",
+ goal, next_resv->r_start, next_resv->r_len);
+ ocfs2_dump_resv(resmap);
+ BUG();
+ }
+
+ clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
+ next_resv->r_start - goal,
+ &cstart, &clen);
+ if (clen) {
+ best_len = clen;
+ best_start = cstart;
+ if (best_len == wanted)
+ goto out_insert;
+ }
+
+ prev_resv = next_resv;
+ next_resv = NULL;
+ }
+
+ prev = &prev_resv->r_node;
+
+ /* Now we do a linear search for a window, starting at 'prev_rsv' */
+ while (1) {
+ next = rb_next(prev);
+ if (next) {
+ mlog(0, "One more resv found in linear search\n");
+ next_resv = rb_entry(next,
+ struct ocfs2_alloc_reservation,
+ r_node);
+
+ gap_start = ocfs2_resv_end(prev_resv) + 1;
+ gap_end = next_resv->r_start - 1;
+ gap_len = gap_end - gap_start + 1;
+ } else {
+ mlog(0, "No next node\n");
+ /*
+ * We're at the rightmost edge of the
+ * tree. See if a reservation between this
+ * window and the end of the bitmap will work.
+ */
+ gap_start = ocfs2_resv_end(prev_resv) + 1;
+ gap_len = resmap->m_bitmap_len - gap_start;
+ gap_end = resmap->m_bitmap_len - 1;
+ }
+
+ /*
+ * No need to check this gap if we have already found
+ * a larger region of free bits.
+ */
+ if (gap_len <= best_len)
+ goto next_resv;
+
+ clen = ocfs2_resmap_find_free_bits(resmap, wanted, gap_start,
+ gap_len, &cstart, &clen);
+ if (clen == wanted) {
+ best_len = clen;
+ best_start = cstart;
+ goto out_insert;
+ } else if (clen > best_len) {
+ best_len = clen;
+ best_start = cstart;
+ }
+
+next_resv:
+ if (!next)
+ break;
+
+ prev = next;
+ prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation,
+ r_node);
+ }
+
+out_insert:
+ if (best_len) {
+ resv->r_start = best_start;
+ resv->r_len = best_len;
+ ocfs2_resv_insert(resmap, resv);
+ }
+}
+
+static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ unsigned int wanted)
+{
+ struct ocfs2_alloc_reservation *lru_resv;
+ int tmpwindow = !!(resv->r_flags & OCFS2_RESV_FLAG_TMP);
+ unsigned int min_bits;
+
+ if (!tmpwindow)
+ min_bits = ocfs2_resv_window_bits(resmap, resv) >> 1;
+ else
+ min_bits = wanted; /* We at know the temp window will use all
+ * of these bits */
+
+ /*
+ * Take the first reservation off the LRU as our 'target'. We
+ * don't try to be smart about it. There might be a case for
+ * searching based on size but I don't have enough data to be
+ * sure. --Mark (3/16/2010)
+ */
+ lru_resv = list_first_entry(&resmap->m_lru,
+ struct ocfs2_alloc_reservation, r_lru);
+
+ mlog(0, "lru resv: start: %u len: %u end: %u\n", lru_resv->r_start,
+ lru_resv->r_len, ocfs2_resv_end(lru_resv));
+
+ /*
+ * Cannibalize (some or all) of the target reservation and
+ * feed it to the current window.
+ */
+ if (lru_resv->r_len <= min_bits) {
+ /*
+ * Discard completely if size is less than or equal to a
+ * reasonable threshold - 50% of window bits for non temporary
+ * windows.
+ */
+ resv->r_start = lru_resv->r_start;
+ resv->r_len = lru_resv->r_len;
+
+ __ocfs2_resv_discard(resmap, lru_resv);
+ } else {
+ unsigned int shrink;
+ if (tmpwindow)
+ shrink = min_bits;
+ else
+ shrink = lru_resv->r_len / 2;
+
+ lru_resv->r_len -= shrink;
+
+ resv->r_start = ocfs2_resv_end(lru_resv) + 1;
+ resv->r_len = shrink;
+ }
+
+ mlog(0, "Reservation now looks like: r_start: %u r_end: %u "
+ "r_len: %u r_last_start: %u r_last_len: %u\n",
+ resv->r_start, ocfs2_resv_end(resv), resv->r_len,
+ resv->r_last_start, resv->r_last_len);
+
+ ocfs2_resv_insert(resmap, resv);
+}
+
+static void ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ unsigned int wanted)
+{
+ unsigned int goal = 0;
+
+ BUG_ON(!ocfs2_resv_empty(resv));
+
+ /*
+ * Begin by trying to get a window as close to the previous
+ * one as possible. Using the most recent allocation as a
+ * start goal makes sense.
+ */
+ if (resv->r_last_len) {
+ goal = resv->r_last_start + resv->r_last_len;
+ if (goal >= resmap->m_bitmap_len)
+ goal = 0;
+ }
+
+ __ocfs2_resv_find_window(resmap, resv, goal, wanted);
+
+ /* Search from last alloc didn't work, try once more from beginning. */
+ if (ocfs2_resv_empty(resv) && goal != 0)
+ __ocfs2_resv_find_window(resmap, resv, 0, wanted);
+
+ if (ocfs2_resv_empty(resv)) {
+ /*
+ * Still empty? Pull oldest one off the LRU, remove it from
+ * tree, put this one in it's place.
+ */
+ ocfs2_cannibalize_resv(resmap, resv, wanted);
+ }
+
+ BUG_ON(ocfs2_resv_empty(resv));
+}
+
+int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ int *cstart, int *clen)
+{
+ unsigned int wanted = *clen;
+
+ if (resv == NULL || ocfs2_resmap_disabled(resmap))
+ return -ENOSPC;
+
+ spin_lock(&resv_lock);
+
+ /*
+ * We don't want to over-allocate for temporary
+ * windows. Otherwise, we run the risk of fragmenting the
+ * allocation space.
+ */
+ wanted = ocfs2_resv_window_bits(resmap, resv);
+ if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
+ wanted = *clen;
+
+ if (ocfs2_resv_empty(resv)) {
+ mlog(0, "empty reservation, find new window\n");
+
+ /*
+ * Try to get a window here. If it works, we must fall
+ * through and test the bitmap . This avoids some
+ * ping-ponging of windows due to non-reserved space
+ * being allocation before we initialize a window for
+ * that inode.
+ */
+ ocfs2_resv_find_window(resmap, resv, wanted);
+ }
+
+ BUG_ON(ocfs2_resv_empty(resv));
+
+ *cstart = resv->r_start;
+ *clen = resv->r_len;
+
+ spin_unlock(&resv_lock);
+ return 0;
+}
+
+static void
+ ocfs2_adjust_resv_from_alloc(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ unsigned int start, unsigned int end)
+{
+ unsigned int rhs = 0;
+ unsigned int old_end = ocfs2_resv_end(resv);
+
+ BUG_ON(start != resv->r_start || old_end < end);
+
+ /*
+ * Completely used? We can remove it then.
+ */
+ if (old_end == end) {
+ __ocfs2_resv_discard(resmap, resv);
+ return;
+ }
+
+ rhs = old_end - end;
+
+ /*
+ * This should have been trapped above.
+ */
+ BUG_ON(rhs == 0);
+
+ resv->r_start = end + 1;
+ resv->r_len = old_end - resv->r_start + 1;
+}
+
+void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ u32 cstart, u32 clen)
+{
+ unsigned int cend = cstart + clen - 1;
+
+ if (resmap == NULL || ocfs2_resmap_disabled(resmap))
+ return;
+
+ if (resv == NULL)
+ return;
+
+ BUG_ON(cstart != resv->r_start);
+
+ spin_lock(&resv_lock);
+
+ mlog(0, "claim bits: cstart: %u cend: %u clen: %u r_start: %u "
+ "r_end: %u r_len: %u, r_last_start: %u r_last_len: %u\n",
+ cstart, cend, clen, resv->r_start, ocfs2_resv_end(resv),
+ resv->r_len, resv->r_last_start, resv->r_last_len);
+
+ BUG_ON(cstart < resv->r_start);
+ BUG_ON(cstart > ocfs2_resv_end(resv));
+ BUG_ON(cend > ocfs2_resv_end(resv));
+
+ ocfs2_adjust_resv_from_alloc(resmap, resv, cstart, cend);
+ resv->r_last_start = cstart;
+ resv->r_last_len = clen;
+
+ /*
+ * May have been discarded above from
+ * ocfs2_adjust_resv_from_alloc().
+ */
+ if (!ocfs2_resv_empty(resv))
+ ocfs2_resv_mark_lru(resmap, resv);
+
+ mlog(0, "Reservation now looks like: r_start: %u r_end: %u "
+ "r_len: %u r_last_start: %u r_last_len: %u\n",
+ resv->r_start, ocfs2_resv_end(resv), resv->r_len,
+ resv->r_last_start, resv->r_last_len);
+
+ ocfs2_check_resmap(resmap);
+
+ spin_unlock(&resv_lock);
+}
diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h
new file mode 100644
index 000000000000..1e49cc29d06c
--- /dev/null
+++ b/fs/ocfs2/reservations.h
@@ -0,0 +1,159 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * reservations.h
+ *
+ * Allocation reservations function prototypes and structures.
+ *
+ * Copyright (C) 2010 Novell. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OCFS2_RESERVATIONS_H
+#define OCFS2_RESERVATIONS_H
+
+#include <linux/rbtree.h>
+
+#define OCFS2_DEFAULT_RESV_LEVEL 2
+#define OCFS2_MAX_RESV_LEVEL 9
+#define OCFS2_MIN_RESV_LEVEL 0
+
+struct ocfs2_alloc_reservation {
+ struct rb_node r_node;
+
+ unsigned int r_start; /* Begining of current window */
+ unsigned int r_len; /* Length of the window */
+
+ unsigned int r_last_len; /* Length of most recent alloc */
+ unsigned int r_last_start; /* Start of most recent alloc */
+ struct list_head r_lru; /* LRU list head */
+
+ unsigned int r_flags;
+};
+
+#define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */
+#define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be
+ * destroyed immedately after use */
+#define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed
+ * directory btree */
+
+struct ocfs2_reservation_map {
+ struct rb_root m_reservations;
+ char *m_disk_bitmap;
+
+ struct ocfs2_super *m_osb;
+
+ /* The following are not initialized to meaningful values until a disk
+ * bitmap is provided. */
+ u32 m_bitmap_len; /* Number of valid
+ * bits available */
+
+ struct list_head m_lru; /* LRU of reservations
+ * structures. */
+
+};
+
+void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv);
+
+#define OCFS2_RESV_TYPES (OCFS2_RESV_FLAG_TMP|OCFS2_RESV_FLAG_DIR)
+void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv,
+ unsigned int flags);
+
+int ocfs2_dir_resv_allowed(struct ocfs2_super *osb);
+
+/**
+ * ocfs2_resv_discard() - truncate a reservation
+ * @resmap:
+ * @resv: the reservation to truncate.
+ *
+ * After this function is called, the reservation will be empty, and
+ * unlinked from the rbtree.
+ */
+void ocfs2_resv_discard(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv);
+
+
+/**
+ * ocfs2_resmap_init() - Initialize fields of a reservations bitmap
+ * @resmap: struct ocfs2_reservation_map to initialize
+ * @obj: unused for now
+ * @ops: unused for now
+ * @max_bitmap_bytes: Maximum size of the bitmap (typically blocksize)
+ *
+ * Only possible return value other than '0' is -ENOMEM for failure to
+ * allocation mirror bitmap.
+ */
+int ocfs2_resmap_init(struct ocfs2_super *osb,
+ struct ocfs2_reservation_map *resmap);
+
+/**
+ * ocfs2_resmap_restart() - "restart" a reservation bitmap
+ * @resmap: reservations bitmap
+ * @clen: Number of valid bits in the bitmap
+ * @disk_bitmap: the disk bitmap this resmap should refer to.
+ *
+ * Re-initialize the parameters of a reservation bitmap. This is
+ * useful for local alloc window slides.
+ *
+ * This function will call ocfs2_trunc_resv against all existing
+ * reservations. A future version will recalculate existing
+ * reservations based on the new bitmap.
+ */
+void ocfs2_resmap_restart(struct ocfs2_reservation_map *resmap,
+ unsigned int clen, char *disk_bitmap);
+
+/**
+ * ocfs2_resmap_uninit() - uninitialize a reservation bitmap structure
+ * @resmap: the struct ocfs2_reservation_map to uninitialize
+ */
+void ocfs2_resmap_uninit(struct ocfs2_reservation_map *resmap);
+
+/**
+ * ocfs2_resmap_resv_bits() - Return still-valid reservation bits
+ * @resmap: reservations bitmap
+ * @resv: reservation to base search from
+ * @cstart: start of proposed allocation
+ * @clen: length (in clusters) of proposed allocation
+ *
+ * Using the reservation data from resv, this function will compare
+ * resmap and resmap->m_disk_bitmap to determine what part (if any) of
+ * the reservation window is still clear to use. If resv is empty,
+ * this function will try to allocate a window for it.
+ *
+ * On success, zero is returned and the valid allocation area is set in cstart
+ * and clen.
+ *
+ * Returns -ENOSPC if reservations are disabled.
+ */
+int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ int *cstart, int *clen);
+
+/**
+ * ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used.
+ * @resmap: reservations bitmap
+ * @resv: optional reservation to recalulate based on new bitmap
+ * @cstart: start of allocation in clusters
+ * @clen: end of allocation in clusters.
+ *
+ * Tell the reservation code that bits were used to fulfill allocation in
+ * resmap. The bits don't have to have been part of any existing
+ * reservation. But we must always call this function when bits are claimed.
+ * Internally, the reservations code will use this information to mark the
+ * reservations bitmap. If resv is passed, it's next allocation window will be
+ * calculated. It also expects that 'cstart' is the same as we passed back
+ * from ocfs2_resmap_resv_bits().
+ */
+void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
+ struct ocfs2_alloc_reservation *resv,
+ u32 cstart, u32 clen);
+
+#endif /* OCFS2_RESERVATIONS_H */
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index 3c3d673a4d20..a821f667b5c4 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -134,11 +134,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
}
- ret = ocfs2_journal_dirty(handle, group_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto out_rollback;
- }
+ ocfs2_journal_dirty(handle, group_bh);
/* update the inode accordingly. */
ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
@@ -545,12 +541,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
group = (struct ocfs2_group_desc *)group_bh->b_data;
group->bg_next_group = cr->c_blkno;
-
- ret = ocfs2_journal_dirty(handle, group_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, group_bh);
ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 19ba00f28547..667d622b3659 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -130,6 +130,7 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
}
brelse(ac->ac_bh);
ac->ac_bh = NULL;
+ ac->ac_resv = NULL;
}
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -369,9 +370,7 @@ static int ocfs2_block_group_fill(handle_t *handle,
ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
- status = ocfs2_journal_dirty(handle, bg_bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, bg_bh);
/* There is no need to zero out or otherwise initialize the
* other blocks in a group - All valid FS metadata in a block
@@ -506,11 +505,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
- status = ocfs2_journal_dirty(handle, bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, bh);
spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
@@ -760,7 +755,7 @@ int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
status = ocfs2_reserve_suballoc_bits(osb, (*ac),
EXTENT_ALLOC_SYSTEM_INODE,
(u32)osb->slot_num, NULL,
- ALLOC_NEW_GROUP);
+ ALLOC_GROUPS_FROM_GLOBAL|ALLOC_NEW_GROUP);
if (status >= 0) {
@@ -946,11 +941,7 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
status = ocfs2_reserve_local_alloc_bits(osb,
bits_wanted,
*ac);
- if (status == -EFBIG) {
- /* The local alloc window is outside ac_max_block.
- * use the main bitmap. */
- status = -ENOSPC;
- } else if ((status < 0) && (status != -ENOSPC)) {
+ if ((status < 0) && (status != -ENOSPC)) {
mlog_errno(status);
goto bail;
}
@@ -1129,16 +1120,10 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
}
le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
-
while(num_bits--)
ocfs2_set_bit(bit_off++, bitmap);
- status = ocfs2_journal_dirty(handle,
- group_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, group_bh);
bail:
mlog_exit(status);
@@ -1202,12 +1187,7 @@ static int ocfs2_relink_block_group(handle_t *handle,
}
prev_bg->bg_next_group = bg->bg_next_group;
-
- status = ocfs2_journal_dirty(handle, prev_bg_bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_rollback;
- }
+ ocfs2_journal_dirty(handle, prev_bg_bh);
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
bg_bh, OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1217,12 +1197,7 @@ static int ocfs2_relink_block_group(handle_t *handle,
}
bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
-
- status = ocfs2_journal_dirty(handle, bg_bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_rollback;
- }
+ ocfs2_journal_dirty(handle, bg_bh);
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
fe_bh, OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1232,14 +1207,8 @@ static int ocfs2_relink_block_group(handle_t *handle,
}
fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
+ ocfs2_journal_dirty(handle, fe_bh);
- status = ocfs2_journal_dirty(handle, fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto out_rollback;
- }
-
- status = 0;
out_rollback:
if (status < 0) {
fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
@@ -1386,10 +1355,7 @@ static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
-
- ret = ocfs2_journal_dirty(handle, di_bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, di_bh);
out:
return ret;
@@ -1560,13 +1526,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
-
- status = ocfs2_journal_dirty(handle,
- ac->ac_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, ac->ac_bh);
status = ocfs2_block_group_set_bits(handle,
alloc_inode,
@@ -1907,6 +1867,8 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
&& ac->ac_which != OCFS2_AC_USE_MAIN);
if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
+ WARN_ON(min_clusters > 1);
+
status = ocfs2_claim_local_alloc_bits(osb,
handle,
ac,
@@ -2023,9 +1985,7 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
if (undo_fn)
jbd_unlock_bh_state(group_bh);
- status = ocfs2_journal_dirty(handle, group_bh);
- if (status < 0)
- mlog_errno(status);
+ ocfs2_journal_dirty(handle, group_bh);
bail:
return status;
}
@@ -2092,12 +2052,7 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
count);
tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
-
- status = ocfs2_journal_dirty(handle, alloc_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_journal_dirty(handle, alloc_bh);
bail:
brelse(group_bh);
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index e0f46df357e6..da2f29a55ec3 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -54,6 +54,8 @@ struct ocfs2_alloc_context {
u64 ac_last_group;
u64 ac_max_block; /* Highest block number to allocate. 0 is
is the same as ~0 - unlimited */
+
+ struct ocfs2_alloc_reservation *ac_resv;
};
void ocfs2_init_steal_slots(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index dee03197a494..12c2203a62fe 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -94,7 +94,9 @@ struct mount_options
unsigned long mount_opt;
unsigned int atime_quantum;
signed short slot;
- unsigned int localalloc_opt;
+ int localalloc_opt;
+ unsigned int resv_level;
+ int dir_resv_level;
char cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
};
@@ -176,6 +178,8 @@ enum {
Opt_noacl,
Opt_usrquota,
Opt_grpquota,
+ Opt_resv_level,
+ Opt_dir_resv_level,
Opt_err,
};
@@ -202,6 +206,8 @@ static const match_table_t tokens = {
{Opt_noacl, "noacl"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
+ {Opt_resv_level, "resv_level=%u"},
+ {Opt_dir_resv_level, "dir_resv_level=%u"},
{Opt_err, NULL}
};
@@ -1028,8 +1034,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb->s_atime_quantum = parsed_options.atime_quantum;
osb->preferred_slot = parsed_options.slot;
osb->osb_commit_interval = parsed_options.commit_interval;
- osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, parsed_options.localalloc_opt);
- osb->local_alloc_bits = osb->local_alloc_default_bits;
+
+ ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt);
+ osb->osb_resv_level = parsed_options.resv_level;
+ osb->osb_dir_resv_level = parsed_options.resv_level;
+ if (parsed_options.dir_resv_level == -1)
+ osb->osb_dir_resv_level = parsed_options.resv_level;
+ else
+ osb->osb_dir_resv_level = parsed_options.dir_resv_level;
status = ocfs2_verify_userspace_stack(osb, &parsed_options);
if (status)
@@ -1285,11 +1297,13 @@ static int ocfs2_parse_options(struct super_block *sb,
options ? options : "(none)");
mopt->commit_interval = 0;
- mopt->mount_opt = 0;
+ mopt->mount_opt = OCFS2_MOUNT_NOINTR;
mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
mopt->slot = OCFS2_INVALID_SLOT;
- mopt->localalloc_opt = OCFS2_DEFAULT_LOCAL_ALLOC_SIZE;
+ mopt->localalloc_opt = -1;
mopt->cluster_stack[0] = '\0';
+ mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL;
+ mopt->dir_resv_level = -1;
if (!options) {
status = 1;
@@ -1380,7 +1394,7 @@ static int ocfs2_parse_options(struct super_block *sb,
status = 0;
goto bail;
}
- if (option >= 0 && (option <= ocfs2_local_alloc_size(sb) * 8))
+ if (option >= 0)
mopt->localalloc_opt = option;
break;
case Opt_localflocks:
@@ -1433,6 +1447,28 @@ static int ocfs2_parse_options(struct super_block *sb,
mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL;
break;
+ case Opt_resv_level:
+ if (is_remount)
+ break;
+ if (match_int(&args[0], &option)) {
+ status = 0;
+ goto bail;
+ }
+ if (option >= OCFS2_MIN_RESV_LEVEL &&
+ option < OCFS2_MAX_RESV_LEVEL)
+ mopt->resv_level = option;
+ break;
+ case Opt_dir_resv_level:
+ if (is_remount)
+ break;
+ if (match_int(&args[0], &option)) {
+ status = 0;
+ goto bail;
+ }
+ if (option >= OCFS2_MIN_RESV_LEVEL &&
+ option < OCFS2_MAX_RESV_LEVEL)
+ mopt->dir_resv_level = option;
+ break;
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
@@ -1487,7 +1523,7 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
(unsigned) (osb->osb_commit_interval / HZ));
local_alloc_megs = osb->local_alloc_bits >> (20 - osb->s_clustersize_bits);
- if (local_alloc_megs != OCFS2_DEFAULT_LOCAL_ALLOC_SIZE)
+ if (local_alloc_megs != ocfs2_la_default_mb(osb))
seq_printf(s, ",localalloc=%d", local_alloc_megs);
if (opts & OCFS2_MOUNT_LOCALFLOCKS)
@@ -1514,6 +1550,12 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
else
seq_printf(s, ",noacl");
+ if (osb->osb_resv_level != OCFS2_DEFAULT_RESV_LEVEL)
+ seq_printf(s, ",resv_level=%d", osb->osb_resv_level);
+
+ if (osb->osb_dir_resv_level != osb->osb_resv_level)
+ seq_printf(s, ",dir_resv_level=%d", osb->osb_resv_level);
+
return 0;
}
@@ -1688,6 +1730,8 @@ static void ocfs2_inode_init_once(void *data)
oi->ip_blkno = 0ULL;
oi->ip_clusters = 0;
+ ocfs2_resv_init_once(&oi->ip_la_data_resv);
+
ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
ocfs2_lock_res_init_once(&oi->ip_inode_lockres);
ocfs2_lock_res_init_once(&oi->ip_open_lockres);
@@ -2042,6 +2086,12 @@ static int ocfs2_initialize_super(struct super_block *sb,
init_waitqueue_head(&osb->osb_mount_event);
+ status = ocfs2_resmap_init(osb, &osb->osb_la_resmap);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL);
if (!osb->vol_label) {
mlog(ML_ERROR, "unable to alloc vol label\n");
@@ -2224,6 +2274,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno;
+ osb->osb_clusters_at_boot = OCFS2_I(inode)->ip_clusters;
iput(inode);
osb->bitmap_cpg = ocfs2_group_bitmap_size(sb) * 8;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3e7773089b96..b05ce92ae72a 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -739,11 +739,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
goto leave;
}
- status = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ ocfs2_journal_dirty(handle, vb->vb_bh);
clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters;
@@ -786,12 +782,7 @@ static int __ocfs2_remove_xattr_range(struct inode *inode,
}
le32_add_cpu(&vb->vb_xv->xr_clusters, -len);
-
- ret = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, vb->vb_bh);
if (ext_flags & OCFS2_EXT_REFCOUNTED)
ret = ocfs2_decrease_refcount(inode, handle,
@@ -1374,11 +1365,7 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode,
memset(bh->b_data + cp_len, 0,
blocksize - cp_len);
- ret = ocfs2_journal_dirty(handle, bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+ ocfs2_journal_dirty(handle, bh);
brelse(bh);
bh = NULL;
@@ -2148,15 +2135,18 @@ alloc_value:
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
if (rc < 0) {
- /*
- * If we tried to grow an existing external value,
- * ocfs2_xa_cleanuP-value_truncate() is going to
- * let it stand. We have to restore its original
- * value size.
- */
- loc->xl_entry->xe_value_size = orig_value_size;
ocfs2_xa_cleanup_value_truncate(loc, "growing",
orig_clusters);
+ /*
+ * If we were growing an existing value,
+ * ocfs2_xa_cleanup_value_truncate() won't remove
+ * the entry. We need to restore the original value
+ * size.
+ */
+ if (loc->xl_entry) {
+ BUG_ON(!orig_value_size);
+ loc->xl_entry->xe_value_size = orig_value_size;
+ }
mlog_errno(rc);
}
}
@@ -2594,9 +2584,7 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
- ret = ocfs2_journal_dirty(handle, di_bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
@@ -2724,9 +2712,7 @@ static int ocfs2_xattr_ibody_init(struct inode *inode,
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
spin_unlock(&oi->ip_lock);
- ret = ocfs2_journal_dirty(ctxt->handle, di_bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(ctxt->handle, di_bh);
out:
return ret;
@@ -3312,8 +3298,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
goto out;
}
- ret = ocfs2_extend_trans(ctxt->handle, credits +
- ctxt->handle->h_buffer_credits);
+ ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3343,8 +3328,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
goto out;
}
- ret = ocfs2_extend_trans(ctxt->handle, credits +
- ctxt->handle->h_buffer_credits);
+ ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3378,8 +3362,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
goto out;
}
- ret = ocfs2_extend_trans(ctxt->handle, credits +
- ctxt->handle->h_buffer_credits);
+ ret = ocfs2_extend_trans(ctxt->handle, credits);
if (ret) {
mlog_errno(ret);
goto out;
@@ -4887,8 +4870,7 @@ static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
* We need to update the first bucket of the old extent and all
* the buckets going to the new extent.
*/
- credits = ((num_buckets + 1) * blks_per_bucket) +
- handle->h_buffer_credits;
+ credits = ((num_buckets + 1) * blks_per_bucket);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
@@ -4958,7 +4940,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode,
u32 *first_hash)
{
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
- int ret, credits = 2 * blk_per_bucket + handle->h_buffer_credits;
+ int ret, credits = 2 * blk_per_bucket;
BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize);
@@ -5153,9 +5135,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
goto leave;
}
- ret = ocfs2_journal_dirty(handle, root_bh);
- if (ret < 0)
- mlog_errno(ret);
+ ocfs2_journal_dirty(handle, root_bh);
leave:
return ret;
@@ -5200,8 +5180,7 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode,
* existing bucket. Then we add the last existing bucket, the
* new bucket, and the first bucket (3 * blk_per_bucket).
*/
- credits = (end_blk - target_blk) + (3 * blk_per_bucket) +
- handle->h_buffer_credits;
+ credits = (end_blk - target_blk) + (3 * blk_per_bucket);
ret = ocfs2_extend_trans(handle, credits);
if (ret) {
mlog_errno(ret);
@@ -5477,12 +5456,7 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode,
}
le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len);
-
- ret = ocfs2_journal_dirty(handle, root_bh);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
+ ocfs2_journal_dirty(handle, root_bh);
ret = ocfs2_truncate_log_append(osb, handle, blkno, len);
if (ret)
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index c82af6acc2e7..b44bb835e8ea 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -3,7 +3,6 @@
* Copyright (C) 2006 Bob Copeland <me@bobcopeland.com>
* Released under GPL v2.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/fs/open.c b/fs/open.c
index 74e5cd9f718e..b93eac362e0e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -30,6 +30,7 @@
#include <linux/falloc.h>
#include <linux/fs_struct.h>
#include <linux/ima.h>
+#include <linux/dnotify.h>
#include "internal.h"
@@ -1054,7 +1055,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
put_unused_fd(fd);
fd = PTR_ERR(f);
} else {
- fsnotify_open(f->f_path.dentry);
+ fsnotify_open(f);
fd_install(fd, f);
}
}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 788b5802a7ce..0dbc1b9a9fb6 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -82,7 +82,7 @@
/*
* There are three quota SMP locks. dq_list_lock protects all lists with quotas
- * and quota formats, dqstats structure containing statistics about the lists
+ * and quota formats.
* dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
* also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
* i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
@@ -132,7 +132,9 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock);
+#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
static char *quotatypes[] = INITQFNAMES;
+#endif
static struct quota_format_type *quota_formats; /* List of registered formats */
static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
@@ -226,6 +228,10 @@ static struct hlist_head *dquot_hash;
struct dqstats dqstats;
EXPORT_SYMBOL(dqstats);
+#ifdef CONFIG_SMP
+struct dqstats *dqstats_pcpu;
+EXPORT_SYMBOL(dqstats_pcpu);
+#endif
static qsize_t inode_get_rsv_space(struct inode *inode);
static void __dquot_initialize(struct inode *inode, int type);
@@ -273,7 +279,7 @@ static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
static inline void put_dquot_last(struct dquot *dquot)
{
list_add_tail(&dquot->dq_free, &free_dquots);
- dqstats.free_dquots++;
+ dqstats_inc(DQST_FREE_DQUOTS);
}
static inline void remove_free_dquot(struct dquot *dquot)
@@ -281,7 +287,7 @@ static inline void remove_free_dquot(struct dquot *dquot)
if (list_empty(&dquot->dq_free))
return;
list_del_init(&dquot->dq_free);
- dqstats.free_dquots--;
+ dqstats_dec(DQST_FREE_DQUOTS);
}
static inline void put_inuse(struct dquot *dquot)
@@ -289,12 +295,12 @@ static inline void put_inuse(struct dquot *dquot)
/* We add to the back of inuse list so we don't have to restart
* when traversing this list and we block */
list_add_tail(&dquot->dq_inuse, &inuse_list);
- dqstats.allocated_dquots++;
+ dqstats_inc(DQST_ALLOC_DQUOTS);
}
static inline void remove_inuse(struct dquot *dquot)
{
- dqstats.allocated_dquots--;
+ dqstats_dec(DQST_ALLOC_DQUOTS);
list_del(&dquot->dq_inuse);
}
/*
@@ -317,14 +323,23 @@ static inline int mark_dquot_dirty(struct dquot *dquot)
return dquot->dq_sb->dq_op->mark_dirty(dquot);
}
+/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
int dquot_mark_dquot_dirty(struct dquot *dquot)
{
+ int ret = 1;
+
+ /* If quota is dirty already, we don't have to acquire dq_list_lock */
+ if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ return 1;
+
spin_lock(&dq_list_lock);
- if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
+ if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
info[dquot->dq_type].dqi_dirty_list);
+ ret = 0;
+ }
spin_unlock(&dq_list_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
@@ -550,8 +565,8 @@ int dquot_scan_active(struct super_block *sb,
continue;
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
- dqstats.lookups++;
spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
old_dquot = dquot;
ret = fn(dquot, priv);
@@ -596,8 +611,8 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
* holding reference so we can safely just increase
* use count */
atomic_inc(&dquot->dq_count);
- dqstats.lookups++;
spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_LOOKUPS);
sb->dq_op->write_dquot(dquot);
dqput(dquot);
spin_lock(&dq_list_lock);
@@ -609,9 +624,7 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
&& info_dirty(&dqopt->info[cnt]))
sb->dq_op->write_info(sb, cnt);
- spin_lock(&dq_list_lock);
- dqstats.syncs++;
- spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_SYNCS);
mutex_unlock(&dqopt->dqonoff_mutex);
if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
@@ -663,6 +676,22 @@ static void prune_dqcache(int count)
}
}
+static int dqstats_read(unsigned int type)
+{
+ int count = 0;
+ int cpu;
+#ifdef CONFIG_SMP
+ for_each_possible_cpu(cpu)
+ count += per_cpu_ptr(dqstats_pcpu, cpu)->stat[type];
+ /* Statistics reading is racy, but absolute accuracy isn't required */
+ if (count < 0)
+ count = 0;
+#else
+ count = dqstats.stat[type];
+#endif
+ return count;
+}
+
/*
* This is called from kswapd when we think we need some
* more memory
@@ -675,7 +704,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
prune_dqcache(nr);
spin_unlock(&dq_list_lock);
}
- return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
+ return (dqstats_read(DQST_FREE_DQUOTS)/100) * sysctl_vfs_cache_pressure;
}
static struct shrinker dqcache_shrinker = {
@@ -703,10 +732,7 @@ void dqput(struct dquot *dquot)
BUG();
}
#endif
-
- spin_lock(&dq_list_lock);
- dqstats.drops++;
- spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_DROPS);
we_slept:
spin_lock(&dq_list_lock);
if (atomic_read(&dquot->dq_count) > 1) {
@@ -823,15 +849,15 @@ we_slept:
put_inuse(dquot);
/* hash it first so it can be found */
insert_dquot_hash(dquot);
- dqstats.lookups++;
spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_LOOKUPS);
} else {
if (!atomic_read(&dquot->dq_count))
remove_free_dquot(dquot);
atomic_inc(&dquot->dq_count);
- dqstats.cache_hits++;
- dqstats.lookups++;
spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_CACHE_HITS);
+ dqstats_inc(DQST_LOOKUPS);
}
/* Wait for dq_lock - after this we know that either dquot_release() is
* already finished or it will be canceled due to dq_count > 1 test */
@@ -2275,25 +2301,30 @@ static inline qsize_t stoqb(qsize_t space)
}
/* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
+static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
+ memset(di, 0, sizeof(*di));
+ di->d_version = FS_DQUOT_VERSION;
+ di->d_flags = dquot->dq_type == USRQUOTA ?
+ XFS_USER_QUOTA : XFS_GROUP_QUOTA;
+ di->d_id = dquot->dq_id;
+
spin_lock(&dq_data_lock);
- di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
- di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
- di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
- di->dqb_ihardlimit = dm->dqb_ihardlimit;
- di->dqb_isoftlimit = dm->dqb_isoftlimit;
- di->dqb_curinodes = dm->dqb_curinodes;
- di->dqb_btime = dm->dqb_btime;
- di->dqb_itime = dm->dqb_itime;
- di->dqb_valid = QIF_ALL;
+ di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
+ di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+ di->d_ino_hardlimit = dm->dqb_ihardlimit;
+ di->d_ino_softlimit = dm->dqb_isoftlimit;
+ di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
+ di->d_icount = dm->dqb_curinodes;
+ di->d_btimer = dm->dqb_btime;
+ di->d_itimer = dm->dqb_itime;
spin_unlock(&dq_data_lock);
}
int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
- struct if_dqblk *di)
+ struct fs_disk_quota *di)
{
struct dquot *dquot;
@@ -2307,51 +2338,70 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
}
EXPORT_SYMBOL(vfs_get_dqblk);
+#define VFS_FS_DQ_MASK \
+ (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
+ FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
+ FS_DQ_BTIMER | FS_DQ_ITIMER)
+
/* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
+static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
- if ((di->dqb_valid & QIF_BLIMITS &&
- (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
- di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
- (di->dqb_valid & QIF_ILIMITS &&
- (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
- di->dqb_isoftlimit > dqi->dqi_maxilimit)))
+ if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+ return -EINVAL;
+
+ if (((di->d_fieldmask & FS_DQ_BSOFT) &&
+ (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
+ ((di->d_fieldmask & FS_DQ_BHARD) &&
+ (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
+ ((di->d_fieldmask & FS_DQ_ISOFT) &&
+ (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
+ ((di->d_fieldmask & FS_DQ_IHARD) &&
+ (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
return -ERANGE;
spin_lock(&dq_data_lock);
- if (di->dqb_valid & QIF_SPACE) {
- dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
+ if (di->d_fieldmask & FS_DQ_BCOUNT) {
+ dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_BLIMITS) {
- dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
- dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
+
+ if (di->d_fieldmask & FS_DQ_BSOFT)
+ dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
+ if (di->d_fieldmask & FS_DQ_BHARD)
+ dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
+ if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_INODES) {
- dm->dqb_curinodes = di->dqb_curinodes;
+
+ if (di->d_fieldmask & FS_DQ_ICOUNT) {
+ dm->dqb_curinodes = di->d_icount;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_ILIMITS) {
- dm->dqb_isoftlimit = di->dqb_isoftlimit;
- dm->dqb_ihardlimit = di->dqb_ihardlimit;
+
+ if (di->d_fieldmask & FS_DQ_ISOFT)
+ dm->dqb_isoftlimit = di->d_ino_softlimit;
+ if (di->d_fieldmask & FS_DQ_IHARD)
+ dm->dqb_ihardlimit = di->d_ino_hardlimit;
+ if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_BTIME) {
- dm->dqb_btime = di->dqb_btime;
+
+ if (di->d_fieldmask & FS_DQ_BTIMER) {
+ dm->dqb_btime = di->d_btimer;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
}
- if (di->dqb_valid & QIF_ITIME) {
- dm->dqb_itime = di->dqb_itime;
+
+ if (di->d_fieldmask & FS_DQ_ITIMER) {
+ dm->dqb_itime = di->d_itimer;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
}
@@ -2361,7 +2411,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
dm->dqb_curspace < dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
- } else if (!(di->dqb_valid & QIF_BTIME))
+ } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
}
@@ -2370,7 +2420,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
dm->dqb_curinodes < dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
- } else if (!(di->dqb_valid & QIF_ITIME))
+ } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
}
@@ -2386,7 +2436,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
}
int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
- struct if_dqblk *di)
+ struct fs_disk_quota *di)
{
struct dquot *dquot;
int rc;
@@ -2465,62 +2515,74 @@ const struct quotactl_ops vfs_quotactl_ops = {
.set_dqblk = vfs_set_dqblk
};
+
+static int do_proc_dqstats(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+#ifdef CONFIG_SMP
+ /* Update global table */
+ unsigned int type = (int *)table->data - dqstats.stat;
+ dqstats.stat[type] = dqstats_read(type);
+#endif
+ return proc_dointvec(table, write, buffer, lenp, ppos);
+}
+
static ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
- .data = &dqstats.lookups,
+ .data = &dqstats.stat[DQST_LOOKUPS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
- .data = &dqstats.drops,
+ .data = &dqstats.stat[DQST_DROPS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
- .data = &dqstats.reads,
+ .data = &dqstats.stat[DQST_READS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
- .data = &dqstats.writes,
+ .data = &dqstats.stat[DQST_WRITES],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
- .data = &dqstats.cache_hits,
+ .data = &dqstats.stat[DQST_CACHE_HITS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
- .data = &dqstats.allocated_dquots,
+ .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
- .data = &dqstats.free_dquots,
+ .data = &dqstats.stat[DQST_FREE_DQUOTS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
- .data = &dqstats.syncs,
+ .data = &dqstats.stat[DQST_SYNCS],
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = do_proc_dqstats,
},
#ifdef CONFIG_PRINT_QUOTA_WARNING
{
@@ -2572,6 +2634,13 @@ static int __init dquot_init(void)
if (!dquot_hash)
panic("Cannot create dquot hash table");
+#ifdef CONFIG_SMP
+ dqstats_pcpu = alloc_percpu(struct dqstats);
+ if (!dqstats_pcpu)
+ panic("Cannot create dquot stats table");
+#endif
+ memset(&dqstats, 0, sizeof(struct dqstats));
+
/* Find power-of-two hlist_heads which can fit into allocation */
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
dq_hash_bits = 0;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95388f9b7356..cfc78826da90 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -113,8 +113,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
struct if_dqinfo info;
int ret;
- if (!sb_has_quota_active(sb, type))
- return -ESRCH;
if (!sb->s_qcop->get_info)
return -ENOSYS;
ret = sb->s_qcop->get_info(sb, type, &info);
@@ -129,43 +127,80 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
- if (!sb_has_quota_active(sb, type))
- return -ESRCH;
if (!sb->s_qcop->set_info)
return -ENOSYS;
return sb->s_qcop->set_info(sb, type, &info);
}
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+{
+ dst->dqb_bhardlimit = src->d_blk_hardlimit;
+ dst->dqb_bsoftlimit = src->d_blk_softlimit;
+ dst->dqb_curspace = src->d_bcount;
+ dst->dqb_ihardlimit = src->d_ino_hardlimit;
+ dst->dqb_isoftlimit = src->d_ino_softlimit;
+ dst->dqb_curinodes = src->d_icount;
+ dst->dqb_btime = src->d_btimer;
+ dst->dqb_itime = src->d_itimer;
+ dst->dqb_valid = QIF_ALL;
+}
+
static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
+ struct fs_disk_quota fdq;
struct if_dqblk idq;
int ret;
- if (!sb_has_quota_active(sb, type))
- return -ESRCH;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
- ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
+ ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
if (ret)
return ret;
+ copy_to_if_dqblk(&idq, &fdq);
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
return 0;
}
+static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+{
+ dst->d_blk_hardlimit = src->dqb_bhardlimit;
+ dst->d_blk_softlimit = src->dqb_bsoftlimit;
+ dst->d_bcount = src->dqb_curspace;
+ dst->d_ino_hardlimit = src->dqb_ihardlimit;
+ dst->d_ino_softlimit = src->dqb_isoftlimit;
+ dst->d_icount = src->dqb_curinodes;
+ dst->d_btimer = src->dqb_btime;
+ dst->d_itimer = src->dqb_itime;
+
+ dst->d_fieldmask = 0;
+ if (src->dqb_valid & QIF_BLIMITS)
+ dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+ if (src->dqb_valid & QIF_SPACE)
+ dst->d_fieldmask |= FS_DQ_BCOUNT;
+ if (src->dqb_valid & QIF_ILIMITS)
+ dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+ if (src->dqb_valid & QIF_INODES)
+ dst->d_fieldmask |= FS_DQ_ICOUNT;
+ if (src->dqb_valid & QIF_BTIME)
+ dst->d_fieldmask |= FS_DQ_BTIMER;
+ if (src->dqb_valid & QIF_ITIME)
+ dst->d_fieldmask |= FS_DQ_ITIMER;
+}
+
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
+ struct fs_disk_quota fdq;
struct if_dqblk idq;
if (copy_from_user(&idq, addr, sizeof(idq)))
return -EFAULT;
- if (!sb_has_quota_active(sb, type))
- return -ESRCH;
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
- return sb->s_qcop->set_dqblk(sb, type, id, &idq);
+ copy_from_if_dqblk(&fdq, &idq);
+ return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
@@ -199,9 +234,9 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
if (copy_from_user(&fdq, addr, sizeof(fdq)))
return -EFAULT;
- if (!sb->s_qcop->set_xquota)
+ if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
- return sb->s_qcop->set_xquota(sb, type, id, &fdq);
+ return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
@@ -210,9 +245,9 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
struct fs_disk_quota fdq;
int ret;
- if (!sb->s_qcop->get_xquota)
+ if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
- ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
+ ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index f81f4bcfb178..5b7f7416ec7a 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -384,7 +384,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
} else {
ret = 0;
}
- dqstats.writes++;
+ dqstats_inc(DQST_WRITES);
kfree(ddquot);
return ret;
@@ -634,7 +634,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
spin_unlock(&dq_data_lock);
kfree(ddquot);
out:
- dqstats.reads++;
+ dqstats_inc(DQST_READS);
return ret;
}
EXPORT_SYMBOL(qtree_read_dquot);
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 2ae757e9c008..4af344c5852a 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -71,7 +71,7 @@ static int v1_read_dqblk(struct dquot *dquot)
dquot->dq_dqb.dqb_ihardlimit == 0 &&
dquot->dq_dqb.dqb_isoftlimit == 0)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
- dqstats.reads++;
+ dqstats_inc(DQST_READS);
return 0;
}
@@ -104,7 +104,7 @@ static int v1_commit_dqblk(struct dquot *dquot)
ret = 0;
out:
- dqstats.writes++;
+ dqstats_inc(DQST_WRITES);
return ret;
}
diff --git a/fs/read_write.c b/fs/read_write.c
index 113386d6fd2d..f41a901f17c5 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -294,7 +294,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
else
ret = do_sync_read(file, buf, count, pos);
if (ret > 0) {
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
add_rchar(current, ret);
}
inc_syscr(current);
@@ -350,7 +350,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
else
ret = do_sync_write(file, buf, count, pos);
if (ret > 0) {
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
add_wchar(current, ret);
}
inc_syscw(current);
@@ -658,9 +658,9 @@ out:
kfree(iov);
if ((ret + (type == READ)) > 0) {
if (type == READ)
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
else
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
}
return ret;
}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 1d9c12714c5c..9977df9f3a54 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -147,7 +147,8 @@ static int reiserfs_sync_file(struct file *filp,
barrier_done = reiserfs_commit_for_inode(inode);
reiserfs_write_unlock(inode->i_sb);
if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
- blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
if (barrier_done < 0)
return barrier_done;
return (err < 0) ? -EIO : 0;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 77d5cf4a7547..bcf5a16f30bb 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -64,6 +64,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
if (!c->ro_media) {
c->ro_media = 1;
c->no_chk_data_crc = 0;
+ c->vfs_sb->s_flags |= MS_RDONLY;
ubifs_warn("switched to read-only mode, error %d", err);
dbg_dump_stack();
}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 80b68c3702d1..cffa756f1047 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -603,7 +603,7 @@ static void ufs_set_inode_ops(struct inode *inode)
if (!inode->i_blocks)
inode->i_op = &ufs_fast_symlink_inode_operations;
else {
- inode->i_op = &page_symlink_inode_operations;
+ inode->i_op = &ufs_symlink_inode_operations;
inode->i_mapping->a_ops = &ufs_aops;
}
} else
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 118556243e7a..eabc02eb1294 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -148,7 +148,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
/* slow symlink */
- inode->i_op = &page_symlink_inode_operations;
+ inode->i_op = &ufs_symlink_inode_operations;
inode->i_mapping->a_ops = &ufs_aops;
err = page_symlink(inode, symname, l);
if (err)
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
index c0156eda44bc..d283628b4778 100644
--- a/fs/ufs/symlink.c
+++ b/fs/ufs/symlink.c
@@ -42,4 +42,12 @@ static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
const struct inode_operations ufs_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ufs_follow_link,
+ .setattr = ufs_setattr,
+};
+
+const struct inode_operations ufs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+ .put_link = page_put_link,
+ .setattr = ufs_setattr,
};
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index d3b6270cb377..ee8db3e77bfe 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -508,7 +508,7 @@ out:
* - there is no way to know old size
* - there is no way inform user about error, if it happens in `truncate'
*/
-static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+int ufs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
unsigned int ia_valid = attr->ia_valid;
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 43f9f5d5670e..179ae6b3180a 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -122,9 +122,11 @@ extern void ufs_panic (struct super_block *, const char *, const char *, ...) __
/* symlink.c */
extern const struct inode_operations ufs_fast_symlink_inode_operations;
+extern const struct inode_operations ufs_symlink_inode_operations;
/* truncate.c */
extern int ufs_truncate (struct inode *, loff_t);
+extern int ufs_setattr(struct dentry *dentry, struct iattr *attr);
static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
{
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 0f8b9968a803..089eaca860b4 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -45,6 +45,15 @@
#include <linux/pagevec.h>
#include <linux/writeback.h>
+/*
+ * Types of I/O for bmap clustering and I/O completion tracking.
+ */
+enum {
+ IO_READ, /* mapping for a read */
+ IO_DELAY, /* mapping covers delalloc region */
+ IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
+ IO_NEW /* just allocated */
+};
/*
* Prime number of hash buckets since address is used as the key.
@@ -103,8 +112,9 @@ xfs_count_page_state(
STATIC struct block_device *
xfs_find_bdev_for_inode(
- struct xfs_inode *ip)
+ struct inode *inode)
{
+ struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
if (XFS_IS_REALTIME_INODE(ip))
@@ -183,7 +193,7 @@ xfs_setfilesize(
xfs_fsize_t isize;
ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
- ASSERT(ioend->io_type != IOMAP_READ);
+ ASSERT(ioend->io_type != IO_READ);
if (unlikely(ioend->io_error))
return 0;
@@ -214,7 +224,7 @@ xfs_finish_ioend(
if (atomic_dec_and_test(&ioend->io_remaining)) {
struct workqueue_struct *wq;
- wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
+ wq = (ioend->io_type == IO_UNWRITTEN) ?
xfsconvertd_workqueue : xfsdatad_workqueue;
queue_work(wq, &ioend->io_work);
if (wait)
@@ -237,7 +247,7 @@ xfs_end_io(
* For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
*/
- if (ioend->io_type == IOMAP_UNWRITTEN &&
+ if (ioend->io_type == IO_UNWRITTEN &&
likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
@@ -250,7 +260,7 @@ xfs_end_io(
* We might have to update the on-disk file size after extending
* writes.
*/
- if (ioend->io_type != IOMAP_READ) {
+ if (ioend->io_type != IO_READ) {
error = xfs_setfilesize(ioend);
ASSERT(!error || error == EAGAIN);
}
@@ -309,21 +319,25 @@ xfs_map_blocks(
struct inode *inode,
loff_t offset,
ssize_t count,
- xfs_iomap_t *mapp,
+ struct xfs_bmbt_irec *imap,
int flags)
{
int nmaps = 1;
+ int new = 0;
- return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
+ return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
}
STATIC int
-xfs_iomap_valid(
- xfs_iomap_t *iomapp,
- loff_t offset)
+xfs_imap_valid(
+ struct inode *inode,
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
{
- return offset >= iomapp->iomap_offset &&
- offset < iomapp->iomap_offset + iomapp->iomap_bsize;
+ offset >>= inode->i_blkbits;
+
+ return offset >= imap->br_startoff &&
+ offset < imap->br_startoff + imap->br_blockcount;
}
/*
@@ -554,19 +568,23 @@ xfs_add_to_ioend(
STATIC void
xfs_map_buffer(
+ struct inode *inode,
struct buffer_head *bh,
- xfs_iomap_t *mp,
- xfs_off_t offset,
- uint block_bits)
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
{
sector_t bn;
+ struct xfs_mount *m = XFS_I(inode)->i_mount;
+ xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
+ xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
- ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
- bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
- ((offset - mp->iomap_offset) >> block_bits);
+ bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
+ ((offset - iomap_offset) >> inode->i_blkbits);
- ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
+ ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
bh->b_blocknr = bn;
set_buffer_mapped(bh);
@@ -574,17 +592,17 @@ xfs_map_buffer(
STATIC void
xfs_map_at_offset(
+ struct inode *inode,
struct buffer_head *bh,
- loff_t offset,
- int block_bits,
- xfs_iomap_t *iomapp)
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
{
- ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
lock_buffer(bh);
- xfs_map_buffer(bh, iomapp, offset, block_bits);
- bh->b_bdev = iomapp->iomap_target->bt_bdev;
+ xfs_map_buffer(inode, bh, imap, offset);
+ bh->b_bdev = xfs_find_bdev_for_inode(inode);
set_buffer_mapped(bh);
clear_buffer_delay(bh);
clear_buffer_unwritten(bh);
@@ -713,11 +731,11 @@ xfs_is_delayed_page(
bh = head = page_buffers(page);
do {
if (buffer_unwritten(bh))
- acceptable = (type == IOMAP_UNWRITTEN);
+ acceptable = (type == IO_UNWRITTEN);
else if (buffer_delay(bh))
- acceptable = (type == IOMAP_DELAY);
+ acceptable = (type == IO_DELAY);
else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable = (type == IOMAP_NEW);
+ acceptable = (type == IO_NEW);
else
break;
} while ((bh = bh->b_this_page) != head);
@@ -740,7 +758,7 @@ xfs_convert_page(
struct inode *inode,
struct page *page,
loff_t tindex,
- xfs_iomap_t *mp,
+ struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
int startio,
@@ -750,7 +768,6 @@ xfs_convert_page(
xfs_off_t end_offset;
unsigned long p_offset;
unsigned int type;
- int bbits = inode->i_blkbits;
int len, page_dirty;
int count = 0, done = 0, uptodate = 1;
xfs_off_t offset = page_offset(page);
@@ -802,19 +819,19 @@ xfs_convert_page(
if (buffer_unwritten(bh) || buffer_delay(bh)) {
if (buffer_unwritten(bh))
- type = IOMAP_UNWRITTEN;
+ type = IO_UNWRITTEN;
else
- type = IOMAP_DELAY;
+ type = IO_DELAY;
- if (!xfs_iomap_valid(mp, offset)) {
+ if (!xfs_imap_valid(inode, imap, offset)) {
done = 1;
continue;
}
- ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
- xfs_map_at_offset(bh, offset, bbits, mp);
+ xfs_map_at_offset(inode, bh, imap, offset);
if (startio) {
xfs_add_to_ioend(inode, bh, offset,
type, ioendp, done);
@@ -826,7 +843,7 @@ xfs_convert_page(
page_dirty--;
count++;
} else {
- type = IOMAP_NEW;
+ type = IO_NEW;
if (buffer_mapped(bh) && all_bh && startio) {
lock_buffer(bh);
xfs_add_to_ioend(inode, bh, offset,
@@ -866,7 +883,7 @@ STATIC void
xfs_cluster_write(
struct inode *inode,
pgoff_t tindex,
- xfs_iomap_t *iomapp,
+ struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
int startio,
@@ -885,7 +902,7 @@ xfs_cluster_write(
for (i = 0; i < pagevec_count(&pvec); i++) {
done = xfs_convert_page(inode, pvec.pages[i], tindex++,
- iomapp, ioendp, wbc, startio, all_bh);
+ imap, ioendp, wbc, startio, all_bh);
if (done)
break;
}
@@ -930,7 +947,7 @@ xfs_aops_discard_page(
loff_t offset = page_offset(page);
ssize_t len = 1 << inode->i_blkbits;
- if (!xfs_is_delayed_page(page, IOMAP_DELAY))
+ if (!xfs_is_delayed_page(page, IO_DELAY))
goto out_invalidate;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1042,15 +1059,15 @@ xfs_page_state_convert(
int unmapped) /* also implies page uptodate */
{
struct buffer_head *bh, *head;
- xfs_iomap_t iomap;
+ struct xfs_bmbt_irec imap;
xfs_ioend_t *ioend = NULL, *iohead = NULL;
loff_t offset;
unsigned long p_offset = 0;
unsigned int type;
__uint64_t end_offset;
- pgoff_t end_index, last_index, tlast;
+ pgoff_t end_index, last_index;
ssize_t size, len;
- int flags, err, iomap_valid = 0, uptodate = 1;
+ int flags, err, imap_valid = 0, uptodate = 1;
int page_dirty, count = 0;
int trylock = 0;
int all_bh = unmapped;
@@ -1097,7 +1114,7 @@ xfs_page_state_convert(
bh = head = page_buffers(page);
offset = page_offset(page);
flags = BMAPI_READ;
- type = IOMAP_NEW;
+ type = IO_NEW;
/* TODO: cleanup count and page_dirty */
@@ -1111,12 +1128,12 @@ xfs_page_state_convert(
* the iomap is actually still valid, but the ioend
* isn't. shouldn't happen too often.
*/
- iomap_valid = 0;
+ imap_valid = 0;
continue;
}
- if (iomap_valid)
- iomap_valid = xfs_iomap_valid(&iomap, offset);
+ if (imap_valid)
+ imap_valid = xfs_imap_valid(inode, &imap, offset);
/*
* First case, map an unwritten extent and prepare for
@@ -1137,20 +1154,20 @@ xfs_page_state_convert(
* Make sure we don't use a read-only iomap
*/
if (flags == BMAPI_READ)
- iomap_valid = 0;
+ imap_valid = 0;
if (buffer_unwritten(bh)) {
- type = IOMAP_UNWRITTEN;
+ type = IO_UNWRITTEN;
flags = BMAPI_WRITE | BMAPI_IGNSTATE;
} else if (buffer_delay(bh)) {
- type = IOMAP_DELAY;
+ type = IO_DELAY;
flags = BMAPI_ALLOCATE | trylock;
} else {
- type = IOMAP_NEW;
+ type = IO_NEW;
flags = BMAPI_WRITE | BMAPI_MMAP;
}
- if (!iomap_valid) {
+ if (!imap_valid) {
/*
* if we didn't have a valid mapping then we
* need to ensure that we put the new mapping
@@ -1160,7 +1177,7 @@ xfs_page_state_convert(
* for unwritten extent conversion.
*/
new_ioend = 1;
- if (type == IOMAP_NEW) {
+ if (type == IO_NEW) {
size = xfs_probe_cluster(inode,
page, bh, head, 0);
} else {
@@ -1168,14 +1185,14 @@ xfs_page_state_convert(
}
err = xfs_map_blocks(inode, offset, size,
- &iomap, flags);
+ &imap, flags);
if (err)
goto error;
- iomap_valid = xfs_iomap_valid(&iomap, offset);
+ imap_valid = xfs_imap_valid(inode, &imap,
+ offset);
}
- if (iomap_valid) {
- xfs_map_at_offset(bh, offset,
- inode->i_blkbits, &iomap);
+ if (imap_valid) {
+ xfs_map_at_offset(inode, bh, &imap, offset);
if (startio) {
xfs_add_to_ioend(inode, bh, offset,
type, &ioend,
@@ -1194,40 +1211,41 @@ xfs_page_state_convert(
* That means it must already have extents allocated
* underneath it. Map the extent by reading it.
*/
- if (!iomap_valid || flags != BMAPI_READ) {
+ if (!imap_valid || flags != BMAPI_READ) {
flags = BMAPI_READ;
size = xfs_probe_cluster(inode, page, bh,
head, 1);
err = xfs_map_blocks(inode, offset, size,
- &iomap, flags);
+ &imap, flags);
if (err)
goto error;
- iomap_valid = xfs_iomap_valid(&iomap, offset);
+ imap_valid = xfs_imap_valid(inode, &imap,
+ offset);
}
/*
- * We set the type to IOMAP_NEW in case we are doing a
+ * We set the type to IO_NEW in case we are doing a
* small write at EOF that is extending the file but
* without needing an allocation. We need to update the
* file size on I/O completion in this case so it is
* the same case as having just allocated a new extent
* that we are writing into for the first time.
*/
- type = IOMAP_NEW;
+ type = IO_NEW;
if (trylock_buffer(bh)) {
ASSERT(buffer_mapped(bh));
- if (iomap_valid)
+ if (imap_valid)
all_bh = 1;
xfs_add_to_ioend(inode, bh, offset, type,
- &ioend, !iomap_valid);
+ &ioend, !imap_valid);
page_dirty--;
count++;
} else {
- iomap_valid = 0;
+ imap_valid = 0;
}
} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
(unmapped || startio)) {
- iomap_valid = 0;
+ imap_valid = 0;
}
if (!iohead)
@@ -1241,12 +1259,23 @@ xfs_page_state_convert(
if (startio)
xfs_start_page_writeback(page, 1, count);
- if (ioend && iomap_valid) {
- offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
- PAGE_CACHE_SHIFT;
- tlast = min_t(pgoff_t, offset, last_index);
- xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
- wbc, startio, all_bh, tlast);
+ if (ioend && imap_valid) {
+ xfs_off_t end_index;
+
+ end_index = imap.br_startoff + imap.br_blockcount;
+
+ /* to bytes */
+ end_index <<= inode->i_blkbits;
+
+ /* to pages */
+ end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
+
+ /* check against file size */
+ if (end_index > last_index)
+ end_index = last_index;
+
+ xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
+ wbc, startio, all_bh, end_index);
}
if (iohead)
@@ -1448,10 +1477,11 @@ __xfs_get_blocks(
int direct,
bmapi_flags_t flags)
{
- xfs_iomap_t iomap;
+ struct xfs_bmbt_irec imap;
xfs_off_t offset;
ssize_t size;
- int niomap = 1;
+ int nimap = 1;
+ int new = 0;
int error;
offset = (xfs_off_t)iblock << inode->i_blkbits;
@@ -1462,22 +1492,21 @@ __xfs_get_blocks(
return 0;
error = xfs_iomap(XFS_I(inode), offset, size,
- create ? flags : BMAPI_READ, &iomap, &niomap);
+ create ? flags : BMAPI_READ, &imap, &nimap, &new);
if (error)
return -error;
- if (niomap == 0)
+ if (nimap == 0)
return 0;
- if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
+ if (imap.br_startblock != HOLESTARTBLOCK &&
+ imap.br_startblock != DELAYSTARTBLOCK) {
/*
* For unwritten extents do not report a disk address on
* the read case (treat as if we're reading into a hole).
*/
- if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
- xfs_map_buffer(bh_result, &iomap, offset,
- inode->i_blkbits);
- }
- if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
+ if (create || !ISUNWRITTEN(&imap))
+ xfs_map_buffer(inode, bh_result, &imap, offset);
+ if (create && ISUNWRITTEN(&imap)) {
if (direct)
bh_result->b_private = inode;
set_buffer_unwritten(bh_result);
@@ -1488,7 +1517,7 @@ __xfs_get_blocks(
* If this is a realtime file, data may be on a different device.
* to that pointed to from the buffer_head b_bdev currently.
*/
- bh_result->b_bdev = iomap.iomap_target->bt_bdev;
+ bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
/*
* If we previously allocated a block out beyond eof and we are now
@@ -1502,10 +1531,10 @@ __xfs_get_blocks(
if (create &&
((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
(offset >= i_size_read(inode)) ||
- (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
+ (new || ISUNWRITTEN(&imap))))
set_buffer_new(bh_result);
- if (iomap.iomap_flags & IOMAP_DELAY) {
+ if (imap.br_startblock == DELAYSTARTBLOCK) {
BUG_ON(direct);
if (create) {
set_buffer_uptodate(bh_result);
@@ -1514,11 +1543,23 @@ __xfs_get_blocks(
}
}
+ /*
+ * If this is O_DIRECT or the mpage code calling tell them how large
+ * the mapping is, so that we can avoid repeated get_blocks calls.
+ */
if (direct || size > (1 << inode->i_blkbits)) {
- ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
- offset = min_t(xfs_off_t,
- iomap.iomap_bsize - iomap.iomap_delta, size);
- bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
+ xfs_off_t mapping_size;
+
+ mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
+ mapping_size <<= inode->i_blkbits;
+
+ ASSERT(mapping_size > 0);
+ if (mapping_size > size)
+ mapping_size = size;
+ if (mapping_size > LONG_MAX)
+ mapping_size = LONG_MAX;
+
+ bh_result->b_size = mapping_size;
}
return 0;
@@ -1576,7 +1617,7 @@ xfs_end_io_direct(
*/
ioend->io_offset = offset;
ioend->io_size = size;
- if (ioend->io_type == IOMAP_READ) {
+ if (ioend->io_type == IO_READ) {
xfs_finish_ioend(ioend, 0);
} else if (private && size > 0) {
xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
@@ -1587,7 +1628,7 @@ xfs_end_io_direct(
* didn't map an unwritten extent so switch it's completion
* handler.
*/
- ioend->io_type = IOMAP_NEW;
+ ioend->io_type = IO_NEW;
xfs_finish_ioend(ioend, 0);
}
@@ -1612,10 +1653,10 @@ xfs_vm_direct_IO(
struct block_device *bdev;
ssize_t ret;
- bdev = xfs_find_bdev_for_inode(XFS_I(inode));
+ bdev = xfs_find_bdev_for_inode(inode);
iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
- IOMAP_UNWRITTEN : IOMAP_READ);
+ IO_UNWRITTEN : IO_READ);
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
offset, nr_segs,
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 44c2b0ef9a41..f01de3c55c43 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1007,25 +1007,20 @@ xfs_bwrite(
struct xfs_mount *mp,
struct xfs_buf *bp)
{
- int iowait = (bp->b_flags & XBF_ASYNC) == 0;
- int error = 0;
+ int error;
bp->b_strat = xfs_bdstrat_cb;
bp->b_mount = mp;
bp->b_flags |= XBF_WRITE;
- if (!iowait)
- bp->b_flags |= _XBF_RUN_QUEUES;
+ bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
xfs_buf_delwri_dequeue(bp);
xfs_buf_iostrategy(bp);
- if (iowait) {
- error = xfs_buf_iowait(bp);
- if (error)
- xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
- xfs_buf_relse(bp);
- }
-
+ error = xfs_buf_iowait(bp);
+ if (error)
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+ xfs_buf_relse(bp);
return error;
}
@@ -1614,7 +1609,8 @@ xfs_mapping_buftarg(
STATIC int
xfs_alloc_delwrite_queue(
- xfs_buftarg_t *btp)
+ xfs_buftarg_t *btp,
+ const char *fsname)
{
int error = 0;
@@ -1622,7 +1618,7 @@ xfs_alloc_delwrite_queue(
INIT_LIST_HEAD(&btp->bt_delwrite_queue);
spin_lock_init(&btp->bt_delwrite_lock);
btp->bt_flags = 0;
- btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
+ btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
if (IS_ERR(btp->bt_task)) {
error = PTR_ERR(btp->bt_task);
goto out_error;
@@ -1635,7 +1631,8 @@ out_error:
xfs_buftarg_t *
xfs_alloc_buftarg(
struct block_device *bdev,
- int external)
+ int external,
+ const char *fsname)
{
xfs_buftarg_t *btp;
@@ -1647,7 +1644,7 @@ xfs_alloc_buftarg(
goto error;
if (xfs_mapping_buftarg(btp, bdev))
goto error;
- if (xfs_alloc_delwrite_queue(btp))
+ if (xfs_alloc_delwrite_queue(btp, fsname))
goto error;
xfs_alloc_bufhash(btp, external);
return btp;
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 386e7361e50e..5fbecefa5dfd 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -390,7 +390,7 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
/*
* Handling of buftargs.
*/
-extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
+extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int, const char *);
extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 42dd3bcfba6b..d8fb1b5d6cb5 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -115,6 +115,8 @@ xfs_file_fsync(
xfs_iflags_clear(ip, XFS_ITRUNCATED);
+ xfs_ioend_wait(ip);
+
/*
* We always need to make sure that the required inode state is safe on
* disk. The inode might be clean but we still might need to force the
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 7b26cc2fd284..699b60cbab9c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -527,6 +527,10 @@ xfs_attrmulti_by_handle(
if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
return -XFS_ERROR(EFAULT);
+ /* overflow check */
+ if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
+ return -E2BIG;
+
dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 593c05b4df8d..9287135e9bfc 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -420,6 +420,10 @@ xfs_compat_attrmulti_by_handle(
sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
return -XFS_ERROR(EFAULT);
+ /* overflow check */
+ if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t))
+ return -E2BIG;
+
dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index e65a7937f3a4..9c8019c78c92 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -673,7 +673,10 @@ xfs_vn_fiemap(
bm.bmv_length = BTOBB(length);
/* We add one because in getbmap world count includes the header */
- bm.bmv_count = fieinfo->fi_extents_max + 1;
+ bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM :
+ fieinfo->fi_extents_max + 1;
+ bm.bmv_count = min_t(__s32, bm.bmv_count,
+ (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
bm.bmv_iflags = BMV_IF_PREALLOC;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
bm.bmv_iflags |= BMV_IF_ATTRFORK;
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 1947514ce1ad..e31bf21fe5d3 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -97,7 +97,7 @@ xfs_fs_set_xstate(
}
STATIC int
-xfs_fs_get_xquota(
+xfs_fs_get_dqblk(
struct super_block *sb,
int type,
qid_t id,
@@ -114,7 +114,7 @@ xfs_fs_get_xquota(
}
STATIC int
-xfs_fs_set_xquota(
+xfs_fs_set_dqblk(
struct super_block *sb,
int type,
qid_t id,
@@ -135,6 +135,6 @@ xfs_fs_set_xquota(
const struct quotactl_ops xfs_quotactl_operations = {
.get_xstate = xfs_fs_get_xstate,
.set_xstate = xfs_fs_set_xstate,
- .get_xquota = xfs_fs_get_xquota,
- .set_xquota = xfs_fs_set_xquota,
+ .get_dqblk = xfs_fs_get_dqblk,
+ .set_dqblk = xfs_fs_set_dqblk,
};
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 29f1edca76de..f24dbe5efde3 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -725,7 +725,8 @@ void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
- blkdev_issue_flush(buftarg->bt_bdev, NULL);
+ blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
}
STATIC void
@@ -789,18 +790,18 @@ xfs_open_devices(
* Setup xfs_mount buffer target pointers
*/
error = ENOMEM;
- mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0);
+ mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname);
if (!mp->m_ddev_targp)
goto out_close_rtdev;
if (rtdev) {
- mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1);
+ mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname);
if (!mp->m_rtdev_targp)
goto out_free_ddev_targ;
}
if (logdev && logdev != ddev) {
- mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1);
+ mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname);
if (!mp->m_logdev_targp)
goto out_free_rtdev_targ;
} else {
@@ -902,7 +903,8 @@ xfsaild_start(
struct xfs_ail *ailp)
{
ailp->xa_target = 0;
- ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild");
+ ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
+ ailp->xa_mount->m_fsname);
if (IS_ERR(ailp->xa_task))
return -PTR_ERR(ailp->xa_task);
return 0;
@@ -1092,6 +1094,7 @@ xfs_fs_write_inode(
* the code will only flush the inode if it isn't already
* being flushed.
*/
+ xfs_ioend_wait(ip);
xfs_ilock(ip, XFS_ILOCK_SHARED);
if (ip->i_update_core) {
error = xfs_log_inode(ip);
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index a427c638d909..3884e20bc14e 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -356,68 +356,23 @@ xfs_commit_dummy_trans(
STATIC int
xfs_sync_fsdata(
- struct xfs_mount *mp,
- int flags)
+ struct xfs_mount *mp)
{
struct xfs_buf *bp;
- struct xfs_buf_log_item *bip;
- int error = 0;
/*
- * If this is xfssyncd() then only sync the superblock if we can
- * lock it without sleeping and it is not pinned.
+ * If the buffer is pinned then push on the log so we won't get stuck
+ * waiting in the write for someone, maybe ourselves, to flush the log.
+ *
+ * Even though we just pushed the log above, we did not have the
+ * superblock buffer locked at that point so it can become pinned in
+ * between there and here.
*/
- if (flags & SYNC_TRYLOCK) {
- ASSERT(!(flags & SYNC_WAIT));
-
- bp = xfs_getsb(mp, XBF_TRYLOCK);
- if (!bp)
- goto out;
-
- bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
- if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
- goto out_brelse;
- } else {
- bp = xfs_getsb(mp, 0);
-
- /*
- * If the buffer is pinned then push on the log so we won't
- * get stuck waiting in the write for someone, maybe
- * ourselves, to flush the log.
- *
- * Even though we just pushed the log above, we did not have
- * the superblock buffer locked at that point so it can
- * become pinned in between there and here.
- */
- if (XFS_BUF_ISPINNED(bp))
- xfs_log_force(mp, 0);
- }
-
-
- if (flags & SYNC_WAIT)
- XFS_BUF_UNASYNC(bp);
- else
- XFS_BUF_ASYNC(bp);
-
- error = xfs_bwrite(mp, bp);
- if (error)
- return error;
-
- /*
- * If this is a data integrity sync make sure all pending buffers
- * are flushed out for the log coverage check below.
- */
- if (flags & SYNC_WAIT)
- xfs_flush_buftarg(mp->m_ddev_targp, 1);
-
- if (xfs_log_need_covered(mp))
- error = xfs_commit_dummy_trans(mp, flags);
- return error;
+ bp = xfs_getsb(mp, 0);
+ if (XFS_BUF_ISPINNED(bp))
+ xfs_log_force(mp, 0);
- out_brelse:
- xfs_buf_relse(bp);
- out:
- return error;
+ return xfs_bwrite(mp, bp);
}
/*
@@ -441,7 +396,7 @@ int
xfs_quiesce_data(
struct xfs_mount *mp)
{
- int error;
+ int error, error2 = 0;
/* push non-blocking */
xfs_sync_data(mp, 0);
@@ -452,13 +407,20 @@ xfs_quiesce_data(
xfs_qm_sync(mp, SYNC_WAIT);
/* write superblock and hoover up shutdown errors */
- error = xfs_sync_fsdata(mp, SYNC_WAIT);
+ error = xfs_sync_fsdata(mp);
+
+ /* make sure all delwri buffers are written out */
+ xfs_flush_buftarg(mp->m_ddev_targp, 1);
+
+ /* mark the log as covered if needed */
+ if (xfs_log_need_covered(mp))
+ error2 = xfs_commit_dummy_trans(mp, SYNC_WAIT);
/* flush data-only devices */
if (mp->m_rtdev_targp)
XFS_bflush(mp->m_rtdev_targp);
- return error;
+ return error ? error : error2;
}
STATIC void
@@ -581,9 +543,9 @@ xfs_flush_inodes(
}
/*
- * Every sync period we need to unpin all items, reclaim inodes, sync
- * quota and write out the superblock. We might need to cover the log
- * to indicate it is idle.
+ * Every sync period we need to unpin all items, reclaim inodes and sync
+ * disk quotas. We might need to cover the log to indicate that the
+ * filesystem is idle.
*/
STATIC void
xfs_sync_worker(
@@ -597,7 +559,8 @@ xfs_sync_worker(
xfs_reclaim_inodes(mp, 0);
/* dgc: errors ignored here */
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
- error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
+ if (xfs_log_need_covered(mp))
+ error = xfs_commit_dummy_trans(mp, 0);
}
mp->m_sync_seq++;
wake_up(&mp->m_wait_single_sync_task);
@@ -660,7 +623,7 @@ xfs_syncd_init(
mp->m_sync_work.w_syncer = xfs_sync_worker;
mp->m_sync_work.w_mount = mp;
mp->m_sync_work.w_completion = NULL;
- mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
+ mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
if (IS_ERR(mp->m_sync_task))
return -PTR_ERR(mp->m_sync_task);
return 0;
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c
index 5a107601e969..207fa77f63ae 100644
--- a/fs/xfs/linux-2.6/xfs_trace.c
+++ b/fs/xfs/linux-2.6/xfs_trace.c
@@ -41,7 +41,6 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
-#include "xfs_attr_sf.h"
#include "xfs_attr_leaf.h"
#include "xfs_log_priv.h"
#include "xfs_buf_item.h"
@@ -50,6 +49,9 @@
#include "xfs_aops.h"
#include "quota/xfs_dquot_item.h"
#include "quota/xfs_dquot.h"
+#include "xfs_log_recover.h"
+#include "xfs_buf_item.h"
+#include "xfs_inode_item.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index fcaa62f0799e..8a319cfd2901 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -32,6 +32,10 @@ struct xfs_da_node_entry;
struct xfs_dquot;
struct xlog_ticket;
struct log;
+struct xlog_recover;
+struct xlog_recover_item;
+struct xfs_buf_log_format;
+struct xfs_inode_log_format;
DECLARE_EVENT_CLASS(xfs_attr_list_class,
TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -562,18 +566,21 @@ DECLARE_EVENT_CLASS(xfs_inode_class,
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(int, count)
+ __field(int, pincount)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->ino = ip->i_ino;
__entry->count = atomic_read(&VFS_I(ip)->i_count);
+ __entry->pincount = atomic_read(&ip->i_pincount);
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx count %d caller %pf",
+ TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->count,
+ __entry->pincount,
(char *)__entry->caller_ip)
)
@@ -583,6 +590,10 @@ DEFINE_EVENT(xfs_inode_class, name, \
TP_ARGS(ip, caller_ip))
DEFINE_INODE_EVENT(xfs_ihold);
DEFINE_INODE_EVENT(xfs_irele);
+DEFINE_INODE_EVENT(xfs_inode_pin);
+DEFINE_INODE_EVENT(xfs_inode_unpin);
+DEFINE_INODE_EVENT(xfs_inode_unpin_nowait);
+
/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
DEFINE_INODE_EVENT(xfs_inode);
#define xfs_itrace_entry(ip) \
@@ -642,8 +653,6 @@ DEFINE_EVENT(xfs_dquot_class, name, \
TP_PROTO(struct xfs_dquot *dqp), \
TP_ARGS(dqp))
DEFINE_DQUOT_EVENT(xfs_dqadjust);
-DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
-DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
@@ -658,7 +667,6 @@ DEFINE_DQUOT_EVENT(xfs_dqread_fail);
DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
-DEFINE_DQUOT_EVENT(xfs_dqlookup_move);
DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
DEFINE_DQUOT_EVENT(xfs_dqget_hit);
DEFINE_DQUOT_EVENT(xfs_dqget_miss);
@@ -1495,6 +1503,140 @@ DEFINE_EVENT(xfs_swap_extent_class, name, \
DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
+DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
+ TP_PROTO(struct log *log, struct xlog_recover *trans,
+ struct xlog_recover_item *item, int pass),
+ TP_ARGS(log, trans, item, pass),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long, item)
+ __field(xlog_tid_t, tid)
+ __field(int, type)
+ __field(int, pass)
+ __field(int, count)
+ __field(int, total)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->item = (unsigned long)item;
+ __entry->tid = trans->r_log_tid;
+ __entry->type = ITEM_TYPE(item);
+ __entry->pass = pass;
+ __entry->count = item->ri_cnt;
+ __entry->total = item->ri_total;
+ ),
+ TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
+ "item region count/total %d/%d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tid,
+ __entry->pass,
+ (void *)__entry->item,
+ __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
+ __entry->count,
+ __entry->total)
+)
+
+#define DEFINE_LOG_RECOVER_ITEM(name) \
+DEFINE_EVENT(xfs_log_recover_item_class, name, \
+ TP_PROTO(struct log *log, struct xlog_recover *trans, \
+ struct xlog_recover_item *item, int pass), \
+ TP_ARGS(log, trans, item, pass))
+
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add);
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont);
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head);
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
+DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
+
+DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
+ TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
+ TP_ARGS(log, buf_f),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(__int64_t, blkno)
+ __field(unsigned short, len)
+ __field(unsigned short, flags)
+ __field(unsigned short, size)
+ __field(unsigned int, map_size)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->blkno = buf_f->blf_blkno;
+ __entry->len = buf_f->blf_len;
+ __entry->flags = buf_f->blf_flags;
+ __entry->size = buf_f->blf_size;
+ __entry->map_size = buf_f->blf_map_size;
+ ),
+ TP_printk("dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, "
+ "map_size %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->blkno,
+ __entry->len,
+ __entry->flags,
+ __entry->size,
+ __entry->map_size)
+)
+
+#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
+DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
+ TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
+ TP_ARGS(log, buf_f))
+
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
+
+DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
+ TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
+ TP_ARGS(log, in_f),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(unsigned short, size)
+ __field(int, fields)
+ __field(unsigned short, asize)
+ __field(unsigned short, dsize)
+ __field(__int64_t, blkno)
+ __field(int, len)
+ __field(int, boffset)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->ino = in_f->ilf_ino;
+ __entry->size = in_f->ilf_size;
+ __entry->fields = in_f->ilf_fields;
+ __entry->asize = in_f->ilf_asize;
+ __entry->dsize = in_f->ilf_dsize;
+ __entry->blkno = in_f->ilf_blkno;
+ __entry->len = in_f->ilf_len;
+ __entry->boffset = in_f->ilf_boffset;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
+ "dsize %d, blkno 0x%llx, len %d, boffset %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->fields,
+ __entry->asize,
+ __entry->dsize,
+ __entry->blkno,
+ __entry->len,
+ __entry->boffset)
+)
+#define DEFINE_LOG_RECOVER_INO_ITEM(name) \
+DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
+ TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
+ TP_ARGS(log, in_f))
+
+DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
+DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
+DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 5f79dd78626b..b89ec5df0129 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -101,7 +101,7 @@ xfs_qm_dqinit(
* No need to re-initialize these if this is a reclaimed dquot.
*/
if (brandnewdquot) {
- dqp->dq_flnext = dqp->dq_flprev = dqp;
+ INIT_LIST_HEAD(&dqp->q_freelist);
mutex_init(&dqp->q_qlock);
init_waitqueue_head(&dqp->q_pinwait);
@@ -119,20 +119,20 @@ xfs_qm_dqinit(
* Only the q_core portion was zeroed in dqreclaim_one().
* So, we need to reset others.
*/
- dqp->q_nrefs = 0;
- dqp->q_blkno = 0;
- dqp->MPL_NEXT = dqp->HL_NEXT = NULL;
- dqp->HL_PREVP = dqp->MPL_PREVP = NULL;
- dqp->q_bufoffset = 0;
- dqp->q_fileoffset = 0;
- dqp->q_transp = NULL;
- dqp->q_gdquot = NULL;
- dqp->q_res_bcount = 0;
- dqp->q_res_icount = 0;
- dqp->q_res_rtbcount = 0;
- atomic_set(&dqp->q_pincount, 0);
- dqp->q_hash = NULL;
- ASSERT(dqp->dq_flnext == dqp->dq_flprev);
+ dqp->q_nrefs = 0;
+ dqp->q_blkno = 0;
+ INIT_LIST_HEAD(&dqp->q_mplist);
+ INIT_LIST_HEAD(&dqp->q_hashlist);
+ dqp->q_bufoffset = 0;
+ dqp->q_fileoffset = 0;
+ dqp->q_transp = NULL;
+ dqp->q_gdquot = NULL;
+ dqp->q_res_bcount = 0;
+ dqp->q_res_icount = 0;
+ dqp->q_res_rtbcount = 0;
+ atomic_set(&dqp->q_pincount, 0);
+ dqp->q_hash = NULL;
+ ASSERT(list_empty(&dqp->q_freelist));
trace_xfs_dqreuse(dqp);
}
@@ -158,7 +158,7 @@ void
xfs_qm_dqdestroy(
xfs_dquot_t *dqp)
{
- ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp));
+ ASSERT(list_empty(&dqp->q_freelist));
mutex_destroy(&dqp->q_qlock);
sv_destroy(&dqp->q_pinwait);
@@ -252,7 +252,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_bcount) >=
be64_to_cpu(d->d_blk_hardlimit)))) {
d->d_btimer = cpu_to_be32(get_seconds() +
- XFS_QI_BTIMELIMIT(mp));
+ mp->m_quotainfo->qi_btimelimit);
} else {
d->d_bwarns = 0;
}
@@ -275,7 +275,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_icount) >=
be64_to_cpu(d->d_ino_hardlimit)))) {
d->d_itimer = cpu_to_be32(get_seconds() +
- XFS_QI_ITIMELIMIT(mp));
+ mp->m_quotainfo->qi_itimelimit);
} else {
d->d_iwarns = 0;
}
@@ -298,7 +298,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_rtbcount) >=
be64_to_cpu(d->d_rtb_hardlimit)))) {
d->d_rtbtimer = cpu_to_be32(get_seconds() +
- XFS_QI_RTBTIMELIMIT(mp));
+ mp->m_quotainfo->qi_rtbtimelimit);
} else {
d->d_rtbwarns = 0;
}
@@ -325,6 +325,7 @@ xfs_qm_init_dquot_blk(
uint type,
xfs_buf_t *bp)
{
+ struct xfs_quotainfo *q = mp->m_quotainfo;
xfs_dqblk_t *d;
int curid, i;
@@ -337,16 +338,16 @@ xfs_qm_init_dquot_blk(
/*
* ID of the first dquot in the block - id's are zero based.
*/
- curid = id - (id % XFS_QM_DQPERBLK(mp));
+ curid = id - (id % q->qi_dqperchunk);
ASSERT(curid >= 0);
- memset(d, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)));
- for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++)
+ memset(d, 0, BBTOB(q->qi_dqchunklen));
+ for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++)
xfs_qm_dqinit_core(curid, type, d);
xfs_trans_dquot_buf(tp, bp,
(type & XFS_DQ_USER ? XFS_BLI_UDQUOT_BUF :
((type & XFS_DQ_PROJ) ? XFS_BLI_PDQUOT_BUF :
XFS_BLI_GDQUOT_BUF)));
- xfs_trans_log_buf(tp, bp, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1);
+ xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
}
@@ -419,7 +420,7 @@ xfs_qm_dqalloc(
/* now we can just get the buffer (there's nothing to read yet) */
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(mp),
+ mp->m_quotainfo->qi_dqchunklen,
0);
if (!bp || (error = XFS_BUF_GETERROR(bp)))
goto error1;
@@ -500,7 +501,8 @@ xfs_qm_dqtobp(
*/
if (dqp->q_blkno == (xfs_daddr_t) 0) {
/* We use the id as an index */
- dqp->q_fileoffset = (xfs_fileoff_t)id / XFS_QM_DQPERBLK(mp);
+ dqp->q_fileoffset = (xfs_fileoff_t)id /
+ mp->m_quotainfo->qi_dqperchunk;
nmaps = 1;
quotip = XFS_DQ_TO_QIP(dqp);
xfs_ilock(quotip, XFS_ILOCK_SHARED);
@@ -529,7 +531,7 @@ xfs_qm_dqtobp(
/*
* offset of dquot in the (fixed sized) dquot chunk.
*/
- dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) *
+ dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
sizeof(xfs_dqblk_t);
if (map.br_startblock == HOLESTARTBLOCK) {
/*
@@ -559,15 +561,13 @@ xfs_qm_dqtobp(
* Read in the buffer, unless we've just done the allocation
* (in which case we already have the buf).
*/
- if (! newdquot) {
+ if (!newdquot) {
trace_xfs_dqtobp_read(dqp);
- if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
- dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(mp),
- 0, &bp))) {
- return (error);
- }
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+ dqp->q_blkno,
+ mp->m_quotainfo->qi_dqchunklen,
+ 0, &bp);
if (error || !bp)
return XFS_ERROR(error);
}
@@ -689,14 +689,14 @@ xfs_qm_idtodq(
tp = NULL;
if (flags & XFS_QMOPT_DQALLOC) {
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
- if ((error = xfs_trans_reserve(tp,
- XFS_QM_DQALLOC_SPACE_RES(mp),
- XFS_WRITE_LOG_RES(mp) +
- BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1 +
- 128,
- 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_WRITE_LOG_COUNT))) {
+ error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
+ XFS_WRITE_LOG_RES(mp) +
+ BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 +
+ 128,
+ 0,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_WRITE_LOG_COUNT);
+ if (error) {
cancelflags = 0;
goto error0;
}
@@ -751,7 +751,6 @@ xfs_qm_dqlookup(
{
xfs_dquot_t *dqp;
uint flist_locked;
- xfs_dquot_t *d;
ASSERT(mutex_is_locked(&qh->qh_lock));
@@ -760,7 +759,7 @@ xfs_qm_dqlookup(
/*
* Traverse the hashchain looking for a match
*/
- for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) {
+ list_for_each_entry(dqp, &qh->qh_list, q_hashlist) {
/*
* We already have the hashlock. We don't need the
* dqlock to look at the id field of the dquot, since the
@@ -772,12 +771,12 @@ xfs_qm_dqlookup(
/*
* All in core dquots must be on the dqlist of mp
*/
- ASSERT(dqp->MPL_PREVP != NULL);
+ ASSERT(!list_empty(&dqp->q_mplist));
xfs_dqlock(dqp);
if (dqp->q_nrefs == 0) {
- ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
- if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
+ ASSERT(!list_empty(&dqp->q_freelist));
+ if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
trace_xfs_dqlookup_want(dqp);
/*
@@ -787,7 +786,7 @@ xfs_qm_dqlookup(
*/
dqp->dq_flags |= XFS_DQ_WANT;
xfs_dqunlock(dqp);
- xfs_qm_freelist_lock(xfs_Gqm);
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
xfs_dqlock(dqp);
dqp->dq_flags &= ~(XFS_DQ_WANT);
}
@@ -802,46 +801,28 @@ xfs_qm_dqlookup(
if (flist_locked) {
if (dqp->q_nrefs != 0) {
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
flist_locked = B_FALSE;
} else {
- /*
- * take it off the freelist
- */
+ /* take it off the freelist */
trace_xfs_dqlookup_freelist(dqp);
- XQM_FREELIST_REMOVE(dqp);
- /* xfs_qm_freelist_print(&(xfs_Gqm->
- qm_dqfreelist),
- "after removal"); */
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
}
}
- /*
- * grab a reference
- */
XFS_DQHOLD(dqp);
if (flist_locked)
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
/*
* move the dquot to the front of the hashchain
*/
ASSERT(mutex_is_locked(&qh->qh_lock));
- if (dqp->HL_PREVP != &qh->qh_next) {
- trace_xfs_dqlookup_move(dqp);
- if ((d = dqp->HL_NEXT))
- d->HL_PREVP = dqp->HL_PREVP;
- *(dqp->HL_PREVP) = d;
- d = qh->qh_next;
- d->HL_PREVP = &dqp->HL_NEXT;
- dqp->HL_NEXT = d;
- dqp->HL_PREVP = &qh->qh_next;
- qh->qh_next = dqp;
- }
+ list_move(&dqp->q_hashlist, &qh->qh_list);
trace_xfs_dqlookup_done(dqp);
*O_dqpp = dqp;
- ASSERT(mutex_is_locked(&qh->qh_lock));
- return (0);
+ return 0;
}
}
@@ -975,16 +956,17 @@ xfs_qm_dqget(
*/
if (ip) {
xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (! XFS_IS_DQTYPE_ON(mp, type)) {
- /* inode stays locked on return */
- xfs_qm_dqdestroy(dqp);
- return XFS_ERROR(ESRCH);
- }
+
/*
* A dquot could be attached to this inode by now, since
* we had dropped the ilock.
*/
if (type == XFS_DQ_USER) {
+ if (!XFS_IS_UQUOTA_ON(mp)) {
+ /* inode stays locked on return */
+ xfs_qm_dqdestroy(dqp);
+ return XFS_ERROR(ESRCH);
+ }
if (ip->i_udquot) {
xfs_qm_dqdestroy(dqp);
dqp = ip->i_udquot;
@@ -992,6 +974,11 @@ xfs_qm_dqget(
goto dqret;
}
} else {
+ if (!XFS_IS_OQUOTA_ON(mp)) {
+ /* inode stays locked on return */
+ xfs_qm_dqdestroy(dqp);
+ return XFS_ERROR(ESRCH);
+ }
if (ip->i_gdquot) {
xfs_qm_dqdestroy(dqp);
dqp = ip->i_gdquot;
@@ -1033,13 +1020,14 @@ xfs_qm_dqget(
*/
ASSERT(mutex_is_locked(&h->qh_lock));
dqp->q_hash = h;
- XQM_HASHLIST_INSERT(h, dqp);
+ list_add(&dqp->q_hashlist, &h->qh_list);
+ h->qh_version++;
/*
* Attach this dquot to this filesystem's list of all dquots,
* kept inside the mount structure in m_quotainfo field
*/
- xfs_qm_mplist_lock(mp);
+ mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
/*
* We return a locked dquot to the caller, with a reference taken
@@ -1047,9 +1035,9 @@ xfs_qm_dqget(
xfs_dqlock(dqp);
dqp->q_nrefs = 1;
- XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp);
-
- xfs_qm_mplist_unlock(mp);
+ list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist);
+ mp->m_quotainfo->qi_dquots++;
+ mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
mutex_unlock(&h->qh_lock);
dqret:
ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -1086,10 +1074,10 @@ xfs_qm_dqput(
* drop the dqlock and acquire the freelist and dqlock
* in the right order; but try to get it out-of-order first
*/
- if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
+ if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
trace_xfs_dqput_wait(dqp);
xfs_dqunlock(dqp);
- xfs_qm_freelist_lock(xfs_Gqm);
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
xfs_dqlock(dqp);
}
@@ -1100,10 +1088,8 @@ xfs_qm_dqput(
if (--dqp->q_nrefs == 0) {
trace_xfs_dqput_free(dqp);
- /*
- * insert at end of the freelist.
- */
- XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp);
+ list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
+ xfs_Gqm->qm_dqfrlist_cnt++;
/*
* If we just added a udquot to the freelist, then
@@ -1118,10 +1104,6 @@ xfs_qm_dqput(
xfs_dqlock(gdqp);
dqp->q_gdquot = NULL;
}
-
- /* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist),
- "@@@@@++ Free list (after append) @@@@@+");
- */
}
xfs_dqunlock(dqp);
@@ -1133,7 +1115,7 @@ xfs_qm_dqput(
break;
dqp = gdqp;
}
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
}
/*
@@ -1386,10 +1368,10 @@ int
xfs_qm_dqpurge(
xfs_dquot_t *dqp)
{
- xfs_dqhash_t *thishash;
+ xfs_dqhash_t *qh = dqp->q_hash;
xfs_mount_t *mp = dqp->q_mount;
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+ ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock));
ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock));
xfs_dqlock(dqp);
@@ -1407,7 +1389,7 @@ xfs_qm_dqpurge(
return (1);
}
- ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
+ ASSERT(!list_empty(&dqp->q_freelist));
/*
* If we're turning off quotas, we have to make sure that, for
@@ -1452,14 +1434,16 @@ xfs_qm_dqpurge(
ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
!(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
- thishash = dqp->q_hash;
- XQM_HASHLIST_REMOVE(thishash, dqp);
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp);
+ list_del_init(&dqp->q_hashlist);
+ qh->qh_version++;
+ list_del_init(&dqp->q_mplist);
+ mp->m_quotainfo->qi_dqreclaims++;
+ mp->m_quotainfo->qi_dquots--;
/*
* XXX Move this to the front of the freelist, if we can get the
* freelist lock.
*/
- ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
+ ASSERT(!list_empty(&dqp->q_freelist));
dqp->q_mount = NULL;
dqp->q_hash = NULL;
@@ -1467,7 +1451,7 @@ xfs_qm_dqpurge(
memset(&dqp->q_core, 0, sizeof(dqp->q_core));
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
- mutex_unlock(&thishash->qh_lock);
+ mutex_unlock(&qh->qh_lock);
return (0);
}
@@ -1517,6 +1501,7 @@ void
xfs_qm_dqflock_pushbuf_wait(
xfs_dquot_t *dqp)
{
+ xfs_mount_t *mp = dqp->q_mount;
xfs_buf_t *bp;
/*
@@ -1525,14 +1510,14 @@ xfs_qm_dqflock_pushbuf_wait(
* out immediately. We'll be able to acquire
* the flush lock when the I/O completes.
*/
- bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK);
+ bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno,
+ mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
if (!bp)
goto out_lock;
if (XFS_BUF_ISDELAYWRITE(bp)) {
if (XFS_BUF_ISPINNED(bp))
- xfs_log_force(dqp->q_mount, 0);
+ xfs_log_force(mp, 0);
xfs_buf_delwri_promote(bp);
wake_up_process(bp->b_target->bt_task);
}
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index a0f7da586d1b..5da3a23b820d 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -33,40 +33,23 @@
* The hash chain headers (hash buckets)
*/
typedef struct xfs_dqhash {
- struct xfs_dquot *qh_next;
+ struct list_head qh_list;
struct mutex qh_lock;
uint qh_version; /* ever increasing version */
uint qh_nelems; /* number of dquots on the list */
} xfs_dqhash_t;
-typedef struct xfs_dqlink {
- struct xfs_dquot *ql_next; /* forward link */
- struct xfs_dquot **ql_prevp; /* pointer to prev ql_next */
-} xfs_dqlink_t;
-
struct xfs_mount;
struct xfs_trans;
/*
- * This is the marker which is designed to occupy the first few
- * bytes of the xfs_dquot_t structure. Even inside this, the freelist pointers
- * must come first.
- * This serves as the marker ("sentinel") when we have to restart list
- * iterations because of locking considerations.
- */
-typedef struct xfs_dqmarker {
- struct xfs_dquot*dqm_flnext; /* link to freelist: must be first */
- struct xfs_dquot*dqm_flprev;
- xfs_dqlink_t dqm_mplist; /* link to mount's list of dquots */
- xfs_dqlink_t dqm_hashlist; /* link to the hash chain */
- uint dqm_flags; /* various flags (XFS_DQ_*) */
-} xfs_dqmarker_t;
-
-/*
* The incore dquot structure
*/
typedef struct xfs_dquot {
- xfs_dqmarker_t q_lists; /* list ptrs, q_flags (marker) */
+ uint dq_flags; /* various flags (XFS_DQ_*) */
+ struct list_head q_freelist; /* global free list of dquots */
+ struct list_head q_mplist; /* mount's list of dquots */
+ struct list_head q_hashlist; /* gloabl hash list of dquots */
xfs_dqhash_t *q_hash; /* the hashchain header */
struct xfs_mount*q_mount; /* filesystem this relates to */
struct xfs_trans*q_transp; /* trans this belongs to currently */
@@ -87,13 +70,6 @@ typedef struct xfs_dquot {
wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
} xfs_dquot_t;
-
-#define dq_flnext q_lists.dqm_flnext
-#define dq_flprev q_lists.dqm_flprev
-#define dq_mplist q_lists.dqm_mplist
-#define dq_hashlist q_lists.dqm_hashlist
-#define dq_flags q_lists.dqm_flags
-
/*
* Lock hierarchy for q_qlock:
* XFS_QLOCK_NORMAL is the implicit default,
@@ -127,7 +103,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
}
#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
-#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 4e4ee9a57194..8d89a24ae324 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -107,8 +107,7 @@ xfs_qm_dquot_logitem_pin(
/* ARGSUSED */
STATIC void
xfs_qm_dquot_logitem_unpin(
- xfs_dq_logitem_t *logitem,
- int stale)
+ xfs_dq_logitem_t *logitem)
{
xfs_dquot_t *dqp = logitem->qli_dquot;
@@ -123,7 +122,7 @@ xfs_qm_dquot_logitem_unpin_remove(
xfs_dq_logitem_t *logitem,
xfs_trans_t *tp)
{
- xfs_qm_dquot_logitem_unpin(logitem, 0);
+ xfs_qm_dquot_logitem_unpin(logitem);
}
/*
@@ -228,7 +227,7 @@ xfs_qm_dquot_logitem_pushbuf(
}
mp = dqp->q_mount;
bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
- XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK);
+ mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp);
if (!bp)
return;
@@ -329,8 +328,7 @@ static struct xfs_item_ops xfs_dquot_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_dquot_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))
- xfs_qm_dquot_logitem_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_qm_dquot_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))
@@ -357,9 +355,8 @@ xfs_qm_dquot_logitem_init(
xfs_dq_logitem_t *lp;
lp = &dqp->q_logitem;
- lp->qli_item.li_type = XFS_LI_DQUOT;
- lp->qli_item.li_ops = &xfs_dquot_item_ops;
- lp->qli_item.li_mountp = dqp->q_mount;
+ xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
+ &xfs_dquot_item_ops);
lp->qli_dquot = dqp;
lp->qli_format.qlf_type = XFS_LI_DQUOT;
lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
@@ -426,7 +423,7 @@ xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
*/
/*ARGSUSED*/
STATIC void
-xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale)
+xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf)
{
return;
}
@@ -537,8 +534,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_qoff_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t* ,int))
- xfs_qm_qoff_logitem_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
xfs_qm_qoff_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
@@ -559,8 +555,7 @@ static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_qoff_logitem_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))
- xfs_qm_qoff_logitem_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
xfs_qm_qoff_logitem_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
@@ -586,11 +581,8 @@ xfs_qm_qoff_logitem_init(
qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
- qf->qql_item.li_type = XFS_LI_QUOTAOFF;
- if (start)
- qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops;
- else
- qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops;
+ xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
+ &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
qf->qql_item.li_mountp = mp;
qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
qf->qql_format.qf_flags = flags;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 417e61e3d9dd..38e764146644 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -67,9 +67,6 @@ static cred_t xfs_zerocr;
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
-STATIC void xfs_qm_freelist_init(xfs_frlist_t *);
-STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *);
-
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
STATIC int xfs_qm_shake(int, gfp_t);
@@ -84,21 +81,25 @@ extern struct mutex qcheck_lock;
#endif
#ifdef QUOTADEBUG
-#define XQM_LIST_PRINT(l, NXT, title) \
-{ \
- xfs_dquot_t *dqp; int i = 0; \
- cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
- for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
- cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \
- "bcnt = %d, icnt = %d, refs = %d", \
- ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
- DQFLAGTO_TYPESTR(dqp), \
- (int) be64_to_cpu(dqp->q_core.d_bcount), \
- (int) be64_to_cpu(dqp->q_core.d_icount), \
- (int) dqp->q_nrefs); } \
+static void
+xfs_qm_dquot_list_print(
+ struct xfs_mount *mp)
+{
+ xfs_dquot_t *dqp;
+ int i = 0;
+
+ list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
+ cmn_err(CE_DEBUG, " %d. \"%d (%s)\" "
+ "bcnt = %lld, icnt = %lld, refs = %d",
+ i++, be32_to_cpu(dqp->q_core.d_id),
+ DQFLAGTO_TYPESTR(dqp),
+ (long long)be64_to_cpu(dqp->q_core.d_bcount),
+ (long long)be64_to_cpu(dqp->q_core.d_icount),
+ dqp->q_nrefs);
+ }
}
#else
-#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
+static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { }
#endif
/*
@@ -144,7 +145,9 @@ xfs_Gqm_init(void)
/*
* Freelist of all dquots of all file systems
*/
- xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
+ INIT_LIST_HEAD(&xqm->qm_dqfrlist);
+ xqm->qm_dqfrlist_cnt = 0;
+ mutex_init(&xqm->qm_dqfrlist_lock);
/*
* dquot zone. we register our own low-memory callback.
@@ -189,6 +192,7 @@ STATIC void
xfs_qm_destroy(
struct xfs_qm *xqm)
{
+ struct xfs_dquot *dqp, *n;
int hsize, i;
ASSERT(xqm != NULL);
@@ -204,7 +208,21 @@ xfs_qm_destroy(
xqm->qm_usr_dqhtable = NULL;
xqm->qm_grp_dqhtable = NULL;
xqm->qm_dqhashmask = 0;
- xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
+
+ /* frlist cleanup */
+ mutex_lock(&xqm->qm_dqfrlist_lock);
+ list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
+ xfs_dqlock(dqp);
+#ifdef QUOTADEBUG
+ cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
+#endif
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
+ xfs_dqunlock(dqp);
+ xfs_qm_dqdestroy(dqp);
+ }
+ mutex_unlock(&xqm->qm_dqfrlist_lock);
+ mutex_destroy(&xqm->qm_dqfrlist_lock);
#ifdef DEBUG
mutex_destroy(&qcheck_lock);
#endif
@@ -256,7 +274,7 @@ STATIC void
xfs_qm_rele_quotafs_ref(
struct xfs_mount *mp)
{
- xfs_dquot_t *dqp, *nextdqp;
+ xfs_dquot_t *dqp, *n;
ASSERT(xfs_Gqm);
ASSERT(xfs_Gqm->qm_nrefs > 0);
@@ -264,26 +282,24 @@ xfs_qm_rele_quotafs_ref(
/*
* Go thru the freelist and destroy all inactive dquots.
*/
- xfs_qm_freelist_lock(xfs_Gqm);
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
- for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
- dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
+ list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
xfs_dqlock(dqp);
- nextdqp = dqp->dq_flnext;
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
ASSERT(dqp->q_mount == NULL);
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
- XQM_FREELIST_REMOVE(dqp);
+ ASSERT(list_empty(&dqp->q_hashlist));
+ ASSERT(list_empty(&dqp->q_mplist));
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
xfs_dqunlock(dqp);
xfs_qm_dqdestroy(dqp);
} else {
xfs_dqunlock(dqp);
}
- dqp = nextdqp;
}
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
/*
* Destroy the entire XQM. If somebody mounts with quotaon, this'll
@@ -305,7 +321,7 @@ xfs_qm_unmount(
struct xfs_mount *mp)
{
if (mp->m_quotainfo) {
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
+ xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
xfs_qm_destroy_quotainfo(mp);
}
}
@@ -449,20 +465,21 @@ xfs_qm_unmount_quotas(
*/
STATIC int
xfs_qm_dqflush_all(
- xfs_mount_t *mp,
- int sync_mode)
+ struct xfs_mount *mp,
+ int sync_mode)
{
- int recl;
- xfs_dquot_t *dqp;
- int niters;
- int error;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ int recl;
+ struct xfs_dquot *dqp;
+ int niters;
+ int error;
- if (mp->m_quotainfo == NULL)
+ if (!q)
return 0;
niters = 0;
again:
- xfs_qm_mplist_lock(mp);
- FOREACH_DQUOT_IN_MP(dqp, mp) {
+ mutex_lock(&q->qi_dqlist_lock);
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
xfs_dqlock(dqp);
if (! XFS_DQ_IS_DIRTY(dqp)) {
xfs_dqunlock(dqp);
@@ -470,7 +487,7 @@ again:
}
/* XXX a sentinel would be better */
- recl = XFS_QI_MPLRECLAIMS(mp);
+ recl = q->qi_dqreclaims;
if (!xfs_dqflock_nowait(dqp)) {
/*
* If we can't grab the flush lock then check
@@ -485,21 +502,21 @@ again:
* Let go of the mplist lock. We don't want to hold it
* across a disk write.
*/
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
error = xfs_qm_dqflush(dqp, sync_mode);
xfs_dqunlock(dqp);
if (error)
return error;
- xfs_qm_mplist_lock(mp);
- if (recl != XFS_QI_MPLRECLAIMS(mp)) {
- xfs_qm_mplist_unlock(mp);
+ mutex_lock(&q->qi_dqlist_lock);
+ if (recl != q->qi_dqreclaims) {
+ mutex_unlock(&q->qi_dqlist_lock);
/* XXX restart limit */
goto again;
}
}
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
/* return ! busy */
return 0;
}
@@ -509,15 +526,15 @@ again:
*/
STATIC void
xfs_qm_detach_gdquots(
- xfs_mount_t *mp)
+ struct xfs_mount *mp)
{
- xfs_dquot_t *dqp, *gdqp;
- int nrecl;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ struct xfs_dquot *dqp, *gdqp;
+ int nrecl;
again:
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
- dqp = XFS_QI_MPLNEXT(mp);
- while (dqp) {
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
xfs_dqlock(dqp);
if ((gdqp = dqp->q_gdquot)) {
xfs_dqlock(gdqp);
@@ -530,15 +547,14 @@ xfs_qm_detach_gdquots(
* Can't hold the mplist lock across a dqput.
* XXXmust convert to marker based iterations here.
*/
- nrecl = XFS_QI_MPLRECLAIMS(mp);
- xfs_qm_mplist_unlock(mp);
+ nrecl = q->qi_dqreclaims;
+ mutex_unlock(&q->qi_dqlist_lock);
xfs_qm_dqput(gdqp);
- xfs_qm_mplist_lock(mp);
- if (nrecl != XFS_QI_MPLRECLAIMS(mp))
+ mutex_lock(&q->qi_dqlist_lock);
+ if (nrecl != q->qi_dqreclaims)
goto again;
}
- dqp = dqp->MPL_NEXT;
}
}
@@ -550,23 +566,23 @@ xfs_qm_detach_gdquots(
*/
STATIC int
xfs_qm_dqpurge_int(
- xfs_mount_t *mp,
- uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
+ struct xfs_mount *mp,
+ uint flags)
{
- xfs_dquot_t *dqp;
- uint dqtype;
- int nrecl;
- xfs_dquot_t *nextdqp;
- int nmisses;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ struct xfs_dquot *dqp, *n;
+ uint dqtype;
+ int nrecl;
+ int nmisses;
- if (mp->m_quotainfo == NULL)
+ if (!q)
return 0;
dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
- xfs_qm_mplist_lock(mp);
+ mutex_lock(&q->qi_dqlist_lock);
/*
* In the first pass through all incore dquots of this filesystem,
@@ -578,28 +594,25 @@ xfs_qm_dqpurge_int(
again:
nmisses = 0;
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
/*
* Try to get rid of all of the unwanted dquots. The idea is to
* get them off mplist and hashlist, but leave them on freelist.
*/
- dqp = XFS_QI_MPLNEXT(mp);
- while (dqp) {
+ list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
/*
* It's OK to look at the type without taking dqlock here.
* We're holding the mplist lock here, and that's needed for
* a dqreclaim.
*/
- if ((dqp->dq_flags & dqtype) == 0) {
- dqp = dqp->MPL_NEXT;
+ if ((dqp->dq_flags & dqtype) == 0)
continue;
- }
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
- nrecl = XFS_QI_MPLRECLAIMS(mp);
- xfs_qm_mplist_unlock(mp);
+ nrecl = q->qi_dqreclaims;
+ mutex_unlock(&q->qi_dqlist_lock);
mutex_lock(&dqp->q_hash->qh_lock);
- xfs_qm_mplist_lock(mp);
+ mutex_lock(&q->qi_dqlist_lock);
/*
* XXXTheoretically, we can get into a very long
@@ -607,7 +620,7 @@ xfs_qm_dqpurge_int(
* No one can be adding dquots to the mplist at
* this point, but somebody might be taking things off.
*/
- if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
+ if (nrecl != q->qi_dqreclaims) {
mutex_unlock(&dqp->q_hash->qh_lock);
goto again;
}
@@ -617,11 +630,9 @@ xfs_qm_dqpurge_int(
* Take the dquot off the mplist and hashlist. It may remain on
* freelist in INACTIVE state.
*/
- nextdqp = dqp->MPL_NEXT;
nmisses += xfs_qm_dqpurge(dqp);
- dqp = nextdqp;
}
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
return nmisses;
}
@@ -921,12 +932,13 @@ xfs_qm_dqdetach(
int
xfs_qm_sync(
- xfs_mount_t *mp,
- int flags)
+ struct xfs_mount *mp,
+ int flags)
{
- int recl, restarts;
- xfs_dquot_t *dqp;
- int error;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ int recl, restarts;
+ struct xfs_dquot *dqp;
+ int error;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
@@ -934,18 +946,19 @@ xfs_qm_sync(
restarts = 0;
again:
- xfs_qm_mplist_lock(mp);
+ mutex_lock(&q->qi_dqlist_lock);
/*
* dqpurge_all() also takes the mplist lock and iterate thru all dquots
* in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
* when we have the mplist lock, we know that dquots will be consistent
* as long as we have it locked.
*/
- if (! XFS_IS_QUOTA_ON(mp)) {
- xfs_qm_mplist_unlock(mp);
+ if (!XFS_IS_QUOTA_ON(mp)) {
+ mutex_unlock(&q->qi_dqlist_lock);
return 0;
}
- FOREACH_DQUOT_IN_MP(dqp, mp) {
+ ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
+ list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
/*
* If this is vfs_sync calling, then skip the dquots that
* don't 'seem' to be dirty. ie. don't acquire dqlock.
@@ -969,7 +982,7 @@ xfs_qm_sync(
}
/* XXX a sentinel would be better */
- recl = XFS_QI_MPLRECLAIMS(mp);
+ recl = q->qi_dqreclaims;
if (!xfs_dqflock_nowait(dqp)) {
if (flags & SYNC_TRYLOCK) {
xfs_dqunlock(dqp);
@@ -989,7 +1002,7 @@ xfs_qm_sync(
* Let go of the mplist lock. We don't want to hold it
* across a disk write
*/
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
error = xfs_qm_dqflush(dqp, flags);
xfs_dqunlock(dqp);
if (error && XFS_FORCED_SHUTDOWN(mp))
@@ -997,17 +1010,17 @@ xfs_qm_sync(
else if (error)
return error;
- xfs_qm_mplist_lock(mp);
- if (recl != XFS_QI_MPLRECLAIMS(mp)) {
+ mutex_lock(&q->qi_dqlist_lock);
+ if (recl != q->qi_dqreclaims) {
if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
break;
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
goto again;
}
}
- xfs_qm_mplist_unlock(mp);
+ mutex_unlock(&q->qi_dqlist_lock);
return 0;
}
@@ -1052,8 +1065,9 @@ xfs_qm_init_quotainfo(
return error;
}
- xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
- lockdep_set_class(&qinf->qi_dqlist.qh_lock, &xfs_quota_mplist_class);
+ INIT_LIST_HEAD(&qinf->qi_dqlist);
+ mutex_init(&qinf->qi_dqlist_lock);
+ lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
qinf->qi_dqreclaims = 0;
@@ -1150,7 +1164,8 @@ xfs_qm_destroy_quotainfo(
*/
xfs_qm_rele_quotafs_ref(mp);
- xfs_qm_list_destroy(&qi->qi_dqlist);
+ ASSERT(list_empty(&qi->qi_dqlist));
+ mutex_destroy(&qi->qi_dqlist_lock);
if (qi->qi_uquotaip) {
IRELE(qi->qi_uquotaip);
@@ -1177,7 +1192,7 @@ xfs_qm_list_init(
int n)
{
mutex_init(&list->qh_lock);
- list->qh_next = NULL;
+ INIT_LIST_HEAD(&list->qh_list);
list->qh_version = 0;
list->qh_nelems = 0;
}
@@ -1316,9 +1331,6 @@ xfs_qm_qino_alloc(
*/
spin_lock(&mp->m_sb_lock);
if (flags & XFS_QMOPT_SBVERSION) {
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- unsigned oldv = mp->m_sb.sb_versionnum;
-#endif
ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
@@ -1331,11 +1343,6 @@ xfs_qm_qino_alloc(
/* qflags will get updated _after_ quotacheck */
mp->m_sb.sb_qflags = 0;
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- cmn_err(CE_NOTE,
- "Old superblock version %x, converting to %x.",
- oldv, mp->m_sb.sb_versionnum);
-#endif
}
if (flags & XFS_QMOPT_UQUOTA)
mp->m_sb.sb_uquotino = (*ip)->i_ino;
@@ -1371,10 +1378,10 @@ xfs_qm_reset_dqcounts(
#ifdef DEBUG
j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
do_div(j, sizeof(xfs_dqblk_t));
- ASSERT(XFS_QM_DQPERBLK(mp) == j);
+ ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
#endif
ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
- for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
+ for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
/*
* Do a sanity check, and if needed, repair the dqblk. Don't
* output any warnings because it's perfectly possible to
@@ -1429,7 +1436,7 @@ xfs_qm_dqiter_bufs(
while (blkcnt--) {
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, bno),
- (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
+ mp->m_quotainfo->qi_dqchunklen, 0, &bp);
if (error)
break;
@@ -1439,7 +1446,7 @@ xfs_qm_dqiter_bufs(
* goto the next block.
*/
bno++;
- firstid += XFS_QM_DQPERBLK(mp);
+ firstid += mp->m_quotainfo->qi_dqperchunk;
}
return error;
}
@@ -1505,7 +1512,7 @@ xfs_qm_dqiterate(
continue;
firstid = (xfs_dqid_t) map[i].br_startoff *
- XFS_QM_DQPERBLK(mp);
+ mp->m_quotainfo->qi_dqperchunk;
/*
* Do a read-ahead on the next extent.
*/
@@ -1516,7 +1523,7 @@ xfs_qm_dqiterate(
while (rablkcnt--) {
xfs_baread(mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, rablkno),
- (int)XFS_QI_DQCHUNKLEN(mp));
+ mp->m_quotainfo->qi_dqchunklen);
rablkno++;
}
}
@@ -1576,8 +1583,10 @@ xfs_qm_quotacheck_dqadjust(
/*
* Set default limits, adjust timers (since we changed usages)
+ *
+ * There are no timers for the default values set in the root dquot.
*/
- if (! XFS_IS_SUSER_DQUOT(dqp)) {
+ if (dqp->q_core.d_id) {
xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
}
@@ -1747,14 +1756,14 @@ xfs_qm_quotacheck(
lastino = 0;
flags = 0;
- ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
+ ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
/*
* There should be no cached dquots. The (simplistic) quotacheck
* algorithm doesn't like that.
*/
- ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
+ ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
@@ -1763,15 +1772,19 @@ xfs_qm_quotacheck(
* their counters to zero. We need a clean slate.
* We don't log our changes till later.
*/
- if ((uip = XFS_QI_UQIP(mp))) {
- if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
+ uip = mp->m_quotainfo->qi_uquotaip;
+ if (uip) {
+ error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
+ if (error)
goto error_return;
flags |= XFS_UQUOTA_CHKD;
}
- if ((gip = XFS_QI_GQIP(mp))) {
- if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
- XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))
+ gip = mp->m_quotainfo->qi_gquotaip;
+ if (gip) {
+ error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
+ XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
+ if (error)
goto error_return;
flags |= XFS_OQUOTA_CHKD;
}
@@ -1804,7 +1817,7 @@ xfs_qm_quotacheck(
* at this point (because we intentionally didn't in dqget_noattach).
*/
if (error) {
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
+ xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
goto error_return;
}
@@ -1825,7 +1838,7 @@ xfs_qm_quotacheck(
mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
mp->m_qflags |= flags;
- XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
+ xfs_qm_dquot_list_print(mp);
error_return:
if (error) {
@@ -1920,59 +1933,53 @@ xfs_qm_init_quotainos(
}
}
- XFS_QI_UQIP(mp) = uip;
- XFS_QI_GQIP(mp) = gip;
+ mp->m_quotainfo->qi_uquotaip = uip;
+ mp->m_quotainfo->qi_gquotaip = gip;
return 0;
}
+
/*
- * Traverse the freelist of dquots and attempt to reclaim a maximum of
- * 'howmany' dquots. This operation races with dqlookup(), and attempts to
- * favor the lookup function ...
- * XXXsup merge this with qm_reclaim_one().
+ * Just pop the least recently used dquot off the freelist and
+ * recycle it. The returned dquot is locked.
*/
-STATIC int
-xfs_qm_shake_freelist(
- int howmany)
+STATIC xfs_dquot_t *
+xfs_qm_dqreclaim_one(void)
{
- int nreclaimed;
- xfs_dqhash_t *hash;
- xfs_dquot_t *dqp, *nextdqp;
+ xfs_dquot_t *dqpout;
+ xfs_dquot_t *dqp;
int restarts;
- int nflushes;
-
- if (howmany <= 0)
- return 0;
- nreclaimed = 0;
restarts = 0;
- nflushes = 0;
+ dqpout = NULL;
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
-#endif
- /* lock order is : hashchainlock, freelistlock, mplistlock */
- tryagain:
- xfs_qm_freelist_lock(xfs_Gqm);
+ /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
+startagain:
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
- for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
- ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
- nreclaimed < howmany); ) {
+ list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
+ struct xfs_mount *mp = dqp->q_mount;
xfs_dqlock(dqp);
/*
* We are racing with dqlookup here. Naturally we don't
- * want to reclaim a dquot that lookup wants.
+ * want to reclaim a dquot that lookup wants. We release the
+ * freelist lock and start over, so that lookup will grab
+ * both the dquot and the freelistlock.
*/
if (dqp->dq_flags & XFS_DQ_WANT) {
+ ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
+
+ trace_xfs_dqreclaim_want(dqp);
+
xfs_dqunlock(dqp);
- xfs_qm_freelist_unlock(xfs_Gqm);
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return nreclaimed;
+ return NULL;
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
- goto tryagain;
+ goto startagain;
}
/*
@@ -1981,23 +1988,27 @@ xfs_qm_shake_freelist(
* life easier.
*/
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(dqp->q_mount == NULL);
+ ASSERT(mp == NULL);
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
+ ASSERT(list_empty(&dqp->q_hashlist));
+ ASSERT(list_empty(&dqp->q_mplist));
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
+ xfs_dqunlock(dqp);
+ dqpout = dqp;
XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
- nextdqp = dqp->dq_flnext;
- goto off_freelist;
+ break;
}
- ASSERT(dqp->MPL_PREVP);
+ ASSERT(dqp->q_hash);
+ ASSERT(!list_empty(&dqp->q_mplist));
+
/*
* Try to grab the flush lock. If this dquot is in the process of
* getting flushed to disk, we don't want to reclaim it.
*/
if (!xfs_dqflock_nowait(dqp)) {
xfs_dqunlock(dqp);
- dqp = dqp->dq_flnext;
continue;
}
@@ -2010,21 +2021,21 @@ xfs_qm_shake_freelist(
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- trace_xfs_dqshake_dirty(dqp);
+ trace_xfs_dqreclaim_dirty(dqp);
/*
* We flush it delayed write, so don't bother
- * releasing the mplock.
+ * releasing the freelist lock.
*/
error = xfs_qm_dqflush(dqp, 0);
if (error) {
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqflush_all: dquot %p flush failed", dqp);
+ xfs_fs_cmn_err(CE_WARN, mp,
+ "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
}
xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
- dqp = dqp->dq_flnext;
continue;
}
+
/*
* We're trying to get the hashlock out of order. This races
* with dqlookup; so, we giveup and goto the next dquot if
@@ -2033,56 +2044,74 @@ xfs_qm_shake_freelist(
* waiting for the freelist lock.
*/
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- dqp = dqp->dq_flnext;
- continue;
+ restarts++;
+ goto dqfunlock;
}
+
/*
* This races with dquot allocation code as well as dqflush_all
* and reclaim code. So, if we failed to grab the mplist lock,
* giveup everything and start over.
*/
- hash = dqp->q_hash;
- ASSERT(hash);
- if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
- /* XXX put a sentinel so that we can come back here */
+ if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
+ restarts++;
+ mutex_unlock(&dqp->q_hash->qh_lock);
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
- mutex_unlock(&hash->qh_lock);
- xfs_qm_freelist_unlock(xfs_Gqm);
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return nreclaimed;
- goto tryagain;
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+ if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
+ return NULL;
+ goto startagain;
}
- trace_xfs_dqshake_unlink(dqp);
-
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
- dqp, be32_to_cpu(dqp->q_core.d_id));
-#endif
ASSERT(dqp->q_nrefs == 0);
- nextdqp = dqp->dq_flnext;
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
- XQM_HASHLIST_REMOVE(hash, dqp);
+ list_del_init(&dqp->q_mplist);
+ mp->m_quotainfo->qi_dquots--;
+ mp->m_quotainfo->qi_dqreclaims++;
+ list_del_init(&dqp->q_hashlist);
+ dqp->q_hash->qh_version++;
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
+ dqpout = dqp;
+ mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+ mutex_unlock(&dqp->q_hash->qh_lock);
+dqfunlock:
xfs_dqfunlock(dqp);
- xfs_qm_mplist_unlock(dqp->q_mount);
- mutex_unlock(&hash->qh_lock);
-
- off_freelist:
- XQM_FREELIST_REMOVE(dqp);
xfs_dqunlock(dqp);
- nreclaimed++;
- XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
+ if (dqpout)
+ break;
+ if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
+ return NULL;
+ }
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+ return dqpout;
+}
+
+/*
+ * Traverse the freelist of dquots and attempt to reclaim a maximum of
+ * 'howmany' dquots. This operation races with dqlookup(), and attempts to
+ * favor the lookup function ...
+ */
+STATIC int
+xfs_qm_shake_freelist(
+ int howmany)
+{
+ int nreclaimed = 0;
+ xfs_dquot_t *dqp;
+
+ if (howmany <= 0)
+ return 0;
+
+ while (nreclaimed < howmany) {
+ dqp = xfs_qm_dqreclaim_one();
+ if (!dqp)
+ return nreclaimed;
xfs_qm_dqdestroy(dqp);
- dqp = nextdqp;
+ nreclaimed++;
}
- xfs_qm_freelist_unlock(xfs_Gqm);
return nreclaimed;
}
-
/*
* The kmem_shake interface is invoked when memory is running low.
*/
@@ -2097,7 +2126,7 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
if (!xfs_Gqm)
return 0;
- nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
+ nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
/* incore dquots in all f/s's */
ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
@@ -2113,131 +2142,6 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
}
-/*
- * Just pop the least recently used dquot off the freelist and
- * recycle it. The returned dquot is locked.
- */
-STATIC xfs_dquot_t *
-xfs_qm_dqreclaim_one(void)
-{
- xfs_dquot_t *dqpout;
- xfs_dquot_t *dqp;
- int restarts;
- int nflushes;
-
- restarts = 0;
- dqpout = NULL;
- nflushes = 0;
-
- /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
- startagain:
- xfs_qm_freelist_lock(xfs_Gqm);
-
- FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
- xfs_dqlock(dqp);
-
- /*
- * We are racing with dqlookup here. Naturally we don't
- * want to reclaim a dquot that lookup wants. We release the
- * freelist lock and start over, so that lookup will grab
- * both the dquot and the freelistlock.
- */
- if (dqp->dq_flags & XFS_DQ_WANT) {
- ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
-
- trace_xfs_dqreclaim_want(dqp);
-
- xfs_dqunlock(dqp);
- xfs_qm_freelist_unlock(xfs_Gqm);
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return NULL;
- XQM_STATS_INC(xqmstats.xs_qm_dqwants);
- goto startagain;
- }
-
- /*
- * If the dquot is inactive, we are assured that it is
- * not on the mplist or the hashlist, and that makes our
- * life easier.
- */
- if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(dqp->q_mount == NULL);
- ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
- XQM_FREELIST_REMOVE(dqp);
- xfs_dqunlock(dqp);
- dqpout = dqp;
- XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
- break;
- }
-
- ASSERT(dqp->q_hash);
- ASSERT(dqp->MPL_PREVP);
-
- /*
- * Try to grab the flush lock. If this dquot is in the process of
- * getting flushed to disk, we don't want to reclaim it.
- */
- if (!xfs_dqflock_nowait(dqp)) {
- xfs_dqunlock(dqp);
- continue;
- }
-
- /*
- * We have the flush lock so we know that this is not in the
- * process of being flushed. So, if this is dirty, flush it
- * DELWRI so that we don't get a freelist infested with
- * dirty dquots.
- */
- if (XFS_DQ_IS_DIRTY(dqp)) {
- int error;
-
- trace_xfs_dqreclaim_dirty(dqp);
-
- /*
- * We flush it delayed write, so don't bother
- * releasing the freelist lock.
- */
- error = xfs_qm_dqflush(dqp, 0);
- if (error) {
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
- }
- xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
- continue;
- }
-
- if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- continue;
- }
-
- if (!mutex_trylock(&dqp->q_hash->qh_lock))
- goto mplistunlock;
-
- trace_xfs_dqreclaim_unlink(dqp);
-
- ASSERT(dqp->q_nrefs == 0);
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
- XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
- XQM_FREELIST_REMOVE(dqp);
- dqpout = dqp;
- mutex_unlock(&dqp->q_hash->qh_lock);
- mplistunlock:
- xfs_qm_mplist_unlock(dqp->q_mount);
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- if (dqpout)
- break;
- }
-
- xfs_qm_freelist_unlock(xfs_Gqm);
- return dqpout;
-}
-
-
/*------------------------------------------------------------------*/
/*
@@ -2662,66 +2566,3 @@ xfs_qm_vop_create_dqattach(
}
}
-/* ------------- list stuff -----------------*/
-STATIC void
-xfs_qm_freelist_init(xfs_frlist_t *ql)
-{
- ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
- mutex_init(&ql->qh_lock);
- ql->qh_version = 0;
- ql->qh_nelems = 0;
-}
-
-STATIC void
-xfs_qm_freelist_destroy(xfs_frlist_t *ql)
-{
- xfs_dquot_t *dqp, *nextdqp;
-
- mutex_lock(&ql->qh_lock);
- for (dqp = ql->qh_next;
- dqp != (xfs_dquot_t *)ql; ) {
- xfs_dqlock(dqp);
- nextdqp = dqp->dq_flnext;
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
-#endif
- XQM_FREELIST_REMOVE(dqp);
- xfs_dqunlock(dqp);
- xfs_qm_dqdestroy(dqp);
- dqp = nextdqp;
- }
- mutex_unlock(&ql->qh_lock);
- mutex_destroy(&ql->qh_lock);
-
- ASSERT(ql->qh_nelems == 0);
-}
-
-STATIC void
-xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
- dq->dq_flnext = ql->qh_next;
- dq->dq_flprev = (xfs_dquot_t *)ql;
- ql->qh_next = dq;
- dq->dq_flnext->dq_flprev = dq;
- xfs_Gqm->qm_dqfreelist.qh_nelems++;
- xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_unlink(xfs_dquot_t *dq)
-{
- xfs_dquot_t *next = dq->dq_flnext;
- xfs_dquot_t *prev = dq->dq_flprev;
-
- next->dq_flprev = prev;
- prev->dq_flnext = next;
- dq->dq_flnext = dq->dq_flprev = dq;
- xfs_Gqm->qm_dqfreelist.qh_nelems--;
- xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
- xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
-}
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index 495564b8af38..c9446f1c726d 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -72,17 +72,6 @@ extern kmem_zone_t *qm_dqtrxzone;
#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
typedef xfs_dqhash_t xfs_dqlist_t;
-/*
- * The freelist head. The first two fields match the first two in the
- * xfs_dquot_t structure (in xfs_dqmarker_t)
- */
-typedef struct xfs_frlist {
- struct xfs_dquot *qh_next;
- struct xfs_dquot *qh_prev;
- struct mutex qh_lock;
- uint qh_version;
- uint qh_nelems;
-} xfs_frlist_t;
/*
* Quota Manager (global) structure. Lives only in core.
@@ -91,7 +80,9 @@ typedef struct xfs_qm {
xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */
xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */
uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */
- xfs_frlist_t qm_dqfreelist; /* freelist of dquots */
+ struct list_head qm_dqfrlist; /* freelist of dquots */
+ struct mutex qm_dqfrlist_lock;
+ int qm_dqfrlist_cnt;
atomic_t qm_totaldquots; /* total incore dquots */
uint qm_nrefs; /* file systems with quota on */
int qm_dqfree_ratio;/* ratio of free to inuse dquots */
@@ -106,7 +97,9 @@ typedef struct xfs_qm {
typedef struct xfs_quotainfo {
xfs_inode_t *qi_uquotaip; /* user quota inode */
xfs_inode_t *qi_gquotaip; /* group quota inode */
- xfs_dqlist_t qi_dqlist; /* all dquots in filesys */
+ struct list_head qi_dqlist; /* all dquots in filesys */
+ struct mutex qi_dqlist_lock;
+ int qi_dquots;
int qi_dqreclaims; /* a change here indicates
a removal in the dqlist */
time_t qi_btimelimit; /* limit for blks timer */
@@ -175,10 +168,6 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
-/* list stuff */
-extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
-extern void xfs_qm_freelist_unlink(xfs_dquot_t *);
-
#ifdef DEBUG
extern int xfs_qm_internalqcheck(xfs_mount_t *);
#else
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
index 83e7ea3e25fa..3d1fc79532e2 100644
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ b/fs/xfs/quota/xfs_qm_stats.c
@@ -55,7 +55,7 @@ static int xqm_proc_show(struct seq_file *m, void *v)
ndquot,
xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
- xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0);
+ xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0);
return 0;
}
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 50bee07d6b0e..92b002f1805f 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -79,6 +79,7 @@ xfs_qm_scall_quotaoff(
xfs_mount_t *mp,
uint flags)
{
+ struct xfs_quotainfo *q = mp->m_quotainfo;
uint dqtype;
int error;
uint inactivate_flags;
@@ -102,11 +103,8 @@ xfs_qm_scall_quotaoff(
* critical thing.
* If quotaoff, then we must be dealing with the root filesystem.
*/
- ASSERT(mp->m_quotainfo);
- if (mp->m_quotainfo)
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
-
- ASSERT(mp->m_quotainfo);
+ ASSERT(q);
+ mutex_lock(&q->qi_quotaofflock);
/*
* If we're just turning off quota enforcement, change mp and go.
@@ -117,7 +115,7 @@ xfs_qm_scall_quotaoff(
spin_lock(&mp->m_sb_lock);
mp->m_sb.sb_qflags = mp->m_qflags;
spin_unlock(&mp->m_sb_lock);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_unlock(&q->qi_quotaofflock);
/* XXX what to do if error ? Revert back to old vals incore ? */
error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
@@ -150,10 +148,8 @@ xfs_qm_scall_quotaoff(
* Nothing to do? Don't complain. This happens when we're just
* turning off quota enforcement.
*/
- if ((mp->m_qflags & flags) == 0) {
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
- return (0);
- }
+ if ((mp->m_qflags & flags) == 0)
+ goto out_unlock;
/*
* Write the LI_QUOTAOFF log record, and do SB changes atomically,
@@ -162,7 +158,7 @@ xfs_qm_scall_quotaoff(
*/
error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
if (error)
- goto out_error;
+ goto out_unlock;
/*
* Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
@@ -204,7 +200,7 @@ xfs_qm_scall_quotaoff(
* So, if we couldn't purge all the dquots from the filesystem,
* we can't get rid of the incore data structures.
*/
- while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF)))
+ while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype)))
delay(10 * nculprits);
/*
@@ -222,7 +218,7 @@ xfs_qm_scall_quotaoff(
if (error) {
/* We're screwed now. Shutdown is the only option. */
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- goto out_error;
+ goto out_unlock;
}
/*
@@ -230,27 +226,26 @@ xfs_qm_scall_quotaoff(
*/
if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_unlock(&q->qi_quotaofflock);
xfs_qm_destroy_quotainfo(mp);
return (0);
}
/*
- * Release our quotainode references, and vn_purge them,
- * if we don't need them anymore.
+ * Release our quotainode references if we don't need them anymore.
*/
- if ((dqtype & XFS_QMOPT_UQUOTA) && XFS_QI_UQIP(mp)) {
- IRELE(XFS_QI_UQIP(mp));
- XFS_QI_UQIP(mp) = NULL;
+ if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
+ IRELE(q->qi_uquotaip);
+ q->qi_uquotaip = NULL;
}
- if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && XFS_QI_GQIP(mp)) {
- IRELE(XFS_QI_GQIP(mp));
- XFS_QI_GQIP(mp) = NULL;
+ if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
+ IRELE(q->qi_gquotaip);
+ q->qi_gquotaip = NULL;
}
-out_error:
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
- return (error);
+out_unlock:
+ mutex_unlock(&q->qi_quotaofflock);
+ return error;
}
int
@@ -379,9 +374,9 @@ xfs_qm_scall_quotaon(
/*
* Switch on quota enforcement in core.
*/
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
return (0);
}
@@ -392,11 +387,12 @@ xfs_qm_scall_quotaon(
*/
int
xfs_qm_scall_getqstat(
- xfs_mount_t *mp,
- fs_quota_stat_t *out)
+ struct xfs_mount *mp,
+ struct fs_quota_stat *out)
{
- xfs_inode_t *uip, *gip;
- boolean_t tempuqip, tempgqip;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
+ struct xfs_inode *uip, *gip;
+ boolean_t tempuqip, tempgqip;
uip = gip = NULL;
tempuqip = tempgqip = B_FALSE;
@@ -415,9 +411,9 @@ xfs_qm_scall_getqstat(
out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
- if (mp->m_quotainfo) {
- uip = mp->m_quotainfo->qi_uquotaip;
- gip = mp->m_quotainfo->qi_gquotaip;
+ if (q) {
+ uip = q->qi_uquotaip;
+ gip = q->qi_gquotaip;
}
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
@@ -441,17 +437,20 @@ xfs_qm_scall_getqstat(
if (tempgqip)
IRELE(gip);
}
- if (mp->m_quotainfo) {
- out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp);
- out->qs_btimelimit = XFS_QI_BTIMELIMIT(mp);
- out->qs_itimelimit = XFS_QI_ITIMELIMIT(mp);
- out->qs_rtbtimelimit = XFS_QI_RTBTIMELIMIT(mp);
- out->qs_bwarnlimit = XFS_QI_BWARNLIMIT(mp);
- out->qs_iwarnlimit = XFS_QI_IWARNLIMIT(mp);
+ if (q) {
+ out->qs_incoredqs = q->qi_dquots;
+ out->qs_btimelimit = q->qi_btimelimit;
+ out->qs_itimelimit = q->qi_itimelimit;
+ out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+ out->qs_bwarnlimit = q->qi_bwarnlimit;
+ out->qs_iwarnlimit = q->qi_iwarnlimit;
}
- return (0);
+ return 0;
}
+#define XFS_DQ_MASK \
+ (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+
/*
* Adjust quota limits, and start/stop timers accordingly.
*/
@@ -462,15 +461,17 @@ xfs_qm_scall_setqlim(
uint type,
fs_disk_quota_t *newlim)
{
+ struct xfs_quotainfo *q = mp->m_quotainfo;
xfs_disk_dquot_t *ddq;
xfs_dquot_t *dqp;
xfs_trans_t *tp;
int error;
xfs_qcnt_t hard, soft;
- if ((newlim->d_fieldmask &
- (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0)
- return (0);
+ if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+ return EINVAL;
+ if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+ return 0;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
@@ -485,7 +486,7 @@ xfs_qm_scall_setqlim(
* a quotaoff from happening). (XXXThis doesn't currently happen
* because we take the vfslock before calling xfs_qm_sysent).
*/
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
+ mutex_lock(&q->qi_quotaofflock);
/*
* Get the dquot (locked), and join it to the transaction.
@@ -493,9 +494,8 @@ xfs_qm_scall_setqlim(
*/
if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
xfs_trans_cancel(tp, XFS_TRANS_ABORT);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
ASSERT(error != ENOENT);
- return (error);
+ goto out_unlock;
}
xfs_trans_dqjoin(tp, dqp);
ddq = &dqp->q_core;
@@ -513,8 +513,8 @@ xfs_qm_scall_setqlim(
ddq->d_blk_hardlimit = cpu_to_be64(hard);
ddq->d_blk_softlimit = cpu_to_be64(soft);
if (id == 0) {
- mp->m_quotainfo->qi_bhardlimit = hard;
- mp->m_quotainfo->qi_bsoftlimit = soft;
+ q->qi_bhardlimit = hard;
+ q->qi_bsoftlimit = soft;
}
} else {
qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft);
@@ -529,8 +529,8 @@ xfs_qm_scall_setqlim(
ddq->d_rtb_hardlimit = cpu_to_be64(hard);
ddq->d_rtb_softlimit = cpu_to_be64(soft);
if (id == 0) {
- mp->m_quotainfo->qi_rtbhardlimit = hard;
- mp->m_quotainfo->qi_rtbsoftlimit = soft;
+ q->qi_rtbhardlimit = hard;
+ q->qi_rtbsoftlimit = soft;
}
} else {
qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
@@ -546,8 +546,8 @@ xfs_qm_scall_setqlim(
ddq->d_ino_hardlimit = cpu_to_be64(hard);
ddq->d_ino_softlimit = cpu_to_be64(soft);
if (id == 0) {
- mp->m_quotainfo->qi_ihardlimit = hard;
- mp->m_quotainfo->qi_isoftlimit = soft;
+ q->qi_ihardlimit = hard;
+ q->qi_isoftlimit = soft;
}
} else {
qdprintk("ihard %Ld < isoft %Ld\n", hard, soft);
@@ -572,23 +572,23 @@ xfs_qm_scall_setqlim(
* for warnings.
*/
if (newlim->d_fieldmask & FS_DQ_BTIMER) {
- mp->m_quotainfo->qi_btimelimit = newlim->d_btimer;
+ q->qi_btimelimit = newlim->d_btimer;
ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
}
if (newlim->d_fieldmask & FS_DQ_ITIMER) {
- mp->m_quotainfo->qi_itimelimit = newlim->d_itimer;
+ q->qi_itimelimit = newlim->d_itimer;
ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
}
if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
- mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer;
+ q->qi_rtbtimelimit = newlim->d_rtbtimer;
ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
}
if (newlim->d_fieldmask & FS_DQ_BWARNS)
- mp->m_quotainfo->qi_bwarnlimit = newlim->d_bwarns;
+ q->qi_bwarnlimit = newlim->d_bwarns;
if (newlim->d_fieldmask & FS_DQ_IWARNS)
- mp->m_quotainfo->qi_iwarnlimit = newlim->d_iwarns;
+ q->qi_iwarnlimit = newlim->d_iwarns;
if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- mp->m_quotainfo->qi_rtbwarnlimit = newlim->d_rtbwarns;
+ q->qi_rtbwarnlimit = newlim->d_rtbwarns;
} else {
/*
* If the user is now over quota, start the timelimit.
@@ -605,8 +605,9 @@ xfs_qm_scall_setqlim(
error = xfs_trans_commit(tp, 0);
xfs_qm_dqprint(dqp);
xfs_qm_dqrele(dqp);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
+ out_unlock:
+ mutex_unlock(&q->qi_quotaofflock);
return error;
}
@@ -853,7 +854,8 @@ xfs_dqrele_inode(
int error;
/* skip quota inodes */
- if (ip == XFS_QI_UQIP(ip->i_mount) || ip == XFS_QI_GQIP(ip->i_mount)) {
+ if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
+ ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL);
read_unlock(&pag->pag_ici_lock);
@@ -931,7 +933,8 @@ struct mutex qcheck_lock;
}
typedef struct dqtest {
- xfs_dqmarker_t q_lists;
+ uint dq_flags; /* various flags (XFS_DQ_*) */
+ struct list_head q_hashlist;
xfs_dqhash_t *q_hash; /* the hashchain header */
xfs_mount_t *q_mount; /* filesystem this relates to */
xfs_dqid_t d_id; /* user id or group id */
@@ -942,14 +945,9 @@ typedef struct dqtest {
STATIC void
xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
{
- xfs_dquot_t *d;
- if (((d) = (h)->qh_next))
- (d)->HL_PREVP = &((dqp)->HL_NEXT);
- (dqp)->HL_NEXT = d;
- (dqp)->HL_PREVP = &((h)->qh_next);
- (h)->qh_next = (xfs_dquot_t *)dqp;
- (h)->qh_version++;
- (h)->qh_nelems++;
+ list_add(&dqp->q_hashlist, &h->qh_list);
+ h->qh_version++;
+ h->qh_nelems++;
}
STATIC void
xfs_qm_dqtest_print(
@@ -1061,9 +1059,7 @@ xfs_qm_internalqcheck_dqget(
xfs_dqhash_t *h;
h = DQTEST_HASH(mp, id, type);
- for (d = (xfs_dqtest_t *) h->qh_next; d != NULL;
- d = (xfs_dqtest_t *) d->HL_NEXT) {
- /* DQTEST_LIST_PRINT(h, HL_NEXT, "@@@@@ dqtestlist @@@@@"); */
+ list_for_each_entry(d, &h->qh_list, q_hashlist) {
if (d->d_id == id && mp == d->q_mount) {
*O_dq = d;
return (0);
@@ -1074,6 +1070,7 @@ xfs_qm_internalqcheck_dqget(
d->d_id = id;
d->q_mount = mp;
d->q_hash = h;
+ INIT_LIST_HEAD(&d->q_hashlist);
xfs_qm_hashinsert(h, d);
*O_dq = d;
return (0);
@@ -1180,8 +1177,6 @@ xfs_qm_internalqcheck(
xfs_ino_t lastino;
int done, count;
int i;
- xfs_dqtest_t *d, *e;
- xfs_dqhash_t *h1;
int error;
lastino = 0;
@@ -1221,19 +1216,18 @@ xfs_qm_internalqcheck(
}
cmn_err(CE_DEBUG, "Checking results against system dquots");
for (i = 0; i < qmtest_hashmask; i++) {
- h1 = &qmtest_udqtab[i];
- for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
+ xfs_dqtest_t *d, *n;
+ xfs_dqhash_t *h;
+
+ h = &qmtest_udqtab[i];
+ list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
xfs_dqtest_cmp(d);
- e = (xfs_dqtest_t *) d->HL_NEXT;
kmem_free(d);
- d = e;
}
- h1 = &qmtest_gdqtab[i];
- for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
+ h = &qmtest_gdqtab[i];
+ list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
xfs_dqtest_cmp(d);
- e = (xfs_dqtest_t *) d->HL_NEXT;
kmem_free(d);
- d = e;
}
}
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
index 8286b2842b6b..94a3d927d716 100644
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ b/fs/xfs/quota/xfs_quota_priv.h
@@ -24,43 +24,6 @@
*/
#define XFS_DQITER_MAP_SIZE 10
-/* Number of dquots that fit in to a dquot block */
-#define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk)
-
-#define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t))
-
-#define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims)
-#define XFS_QI_UQIP(mp) ((mp)->m_quotainfo->qi_uquotaip)
-#define XFS_QI_GQIP(mp) ((mp)->m_quotainfo->qi_gquotaip)
-#define XFS_QI_DQCHUNKLEN(mp) ((mp)->m_quotainfo->qi_dqchunklen)
-#define XFS_QI_BTIMELIMIT(mp) ((mp)->m_quotainfo->qi_btimelimit)
-#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit)
-#define XFS_QI_ITIMELIMIT(mp) ((mp)->m_quotainfo->qi_itimelimit)
-#define XFS_QI_BWARNLIMIT(mp) ((mp)->m_quotainfo->qi_bwarnlimit)
-#define XFS_QI_RTBWARNLIMIT(mp) ((mp)->m_quotainfo->qi_rtbwarnlimit)
-#define XFS_QI_IWARNLIMIT(mp) ((mp)->m_quotainfo->qi_iwarnlimit)
-#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock)
-
-#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist)
-#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
-#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
-
-#define xfs_qm_mplist_lock(mp) \
- mutex_lock(&(XFS_QI_MPL_LIST(mp).qh_lock))
-#define xfs_qm_mplist_nowait(mp) \
- mutex_trylock(&(XFS_QI_MPL_LIST(mp).qh_lock))
-#define xfs_qm_mplist_unlock(mp) \
- mutex_unlock(&(XFS_QI_MPL_LIST(mp).qh_lock))
-#define XFS_QM_IS_MPLIST_LOCKED(mp) \
- mutex_is_locked(&(XFS_QI_MPL_LIST(mp).qh_lock))
-
-#define xfs_qm_freelist_lock(qm) \
- mutex_lock(&((qm)->qm_dqfreelist.qh_lock))
-#define xfs_qm_freelist_lock_nowait(qm) \
- mutex_trylock(&((qm)->qm_dqfreelist.qh_lock))
-#define xfs_qm_freelist_unlock(qm) \
- mutex_unlock(&((qm)->qm_dqfreelist.qh_lock))
-
/*
* Hash into a bucket in the dquot hash table, based on <mp, id>.
*/
@@ -72,9 +35,6 @@
XFS_DQ_HASHVAL(mp, id)) : \
(xfs_Gqm->qm_grp_dqhtable + \
XFS_DQ_HASHVAL(mp, id)))
-#define XFS_IS_DQTYPE_ON(mp, type) (type == XFS_DQ_USER ? \
- XFS_IS_UQUOTA_ON(mp) : \
- XFS_IS_OQUOTA_ON(mp))
#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
!dqp->q_core.d_blk_hardlimit && \
!dqp->q_core.d_blk_softlimit && \
@@ -86,68 +46,6 @@
!dqp->q_core.d_rtbcount && \
!dqp->q_core.d_icount)
-#define HL_PREVP dq_hashlist.ql_prevp
-#define HL_NEXT dq_hashlist.ql_next
-#define MPL_PREVP dq_mplist.ql_prevp
-#define MPL_NEXT dq_mplist.ql_next
-
-
-#define _LIST_REMOVE(h, dqp, PVP, NXT) \
- { \
- xfs_dquot_t *d; \
- if (((d) = (dqp)->NXT)) \
- (d)->PVP = (dqp)->PVP; \
- *((dqp)->PVP) = d; \
- (dqp)->NXT = NULL; \
- (dqp)->PVP = NULL; \
- (h)->qh_version++; \
- (h)->qh_nelems--; \
- }
-
-#define _LIST_INSERT(h, dqp, PVP, NXT) \
- { \
- xfs_dquot_t *d; \
- if (((d) = (h)->qh_next)) \
- (d)->PVP = &((dqp)->NXT); \
- (dqp)->NXT = d; \
- (dqp)->PVP = &((h)->qh_next); \
- (h)->qh_next = dqp; \
- (h)->qh_version++; \
- (h)->qh_nelems++; \
- }
-
-#define FOREACH_DQUOT_IN_MP(dqp, mp) \
- for ((dqp) = XFS_QI_MPLNEXT(mp); (dqp) != NULL; (dqp) = (dqp)->MPL_NEXT)
-
-#define FOREACH_DQUOT_IN_FREELIST(dqp, qlist) \
-for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \
- (dqp) = (dqp)->dq_flnext)
-
-#define XQM_HASHLIST_INSERT(h, dqp) \
- _LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT)
-
-#define XQM_FREELIST_INSERT(h, dqp) \
- xfs_qm_freelist_append(h, dqp)
-
-#define XQM_MPLIST_INSERT(h, dqp) \
- _LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT)
-
-#define XQM_HASHLIST_REMOVE(h, dqp) \
- _LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT)
-#define XQM_FREELIST_REMOVE(dqp) \
- xfs_qm_freelist_unlink(dqp)
-#define XQM_MPLIST_REMOVE(h, dqp) \
- { _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \
- XFS_QI_MPLRECLAIMS((dqp)->q_mount)++; }
-
-#define XFS_DQ_IS_LOGITEM_INITD(dqp) ((dqp)->q_logitem.qli_dquot == (dqp))
-
-#define XFS_QM_DQP_TO_DQACCT(tp, dqp) (XFS_QM_ISUDQ(dqp) ? \
- (tp)->t_dqinfo->dqa_usrdquots : \
- (tp)->t_dqinfo->dqa_grpdquots)
-#define XFS_IS_SUSER_DQUOT(dqp) \
- (!((dqp)->q_core.d_id))
-
#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
(((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \
(((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index c3ab75cb1d9a..061d827da33c 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -59,12 +59,11 @@ xfs_trans_dqjoin(
xfs_trans_t *tp,
xfs_dquot_t *dqp)
{
- xfs_dq_logitem_t *lp;
+ xfs_dq_logitem_t *lp = &dqp->q_logitem;
- ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+ ASSERT(dqp->q_transp != tp);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp));
- lp = &dqp->q_logitem;
+ ASSERT(lp->qli_dquot == dqp);
/*
* Get a log_item_desc to point at the new item.
@@ -96,7 +95,7 @@ xfs_trans_log_dquot(
{
xfs_log_item_desc_t *lidp;
- ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+ ASSERT(dqp->q_transp == tp);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
@@ -198,16 +197,16 @@ xfs_trans_get_dqtrx(
int i;
xfs_dqtrx_t *qa;
- for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
- qa = XFS_QM_DQP_TO_DQACCT(tp, dqp);
+ qa = XFS_QM_ISUDQ(dqp) ?
+ tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots;
+ for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
if (qa[i].qt_dquot == NULL ||
- qa[i].qt_dquot == dqp) {
- return (&qa[i]);
- }
+ qa[i].qt_dquot == dqp)
+ return &qa[i];
}
- return (NULL);
+ return NULL;
}
/*
@@ -381,7 +380,7 @@ xfs_trans_apply_dquot_deltas(
break;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
+ ASSERT(dqp->q_transp == tp);
/*
* adjust the actual number of blocks used
@@ -639,7 +638,7 @@ xfs_trans_dqresv(
softlimit = q->qi_bsoftlimit;
timer = be32_to_cpu(dqp->q_core.d_btimer);
warns = be16_to_cpu(dqp->q_core.d_bwarns);
- warnlimit = XFS_QI_BWARNLIMIT(dqp->q_mount);
+ warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
resbcountp = &dqp->q_res_bcount;
} else {
ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
@@ -651,7 +650,7 @@ xfs_trans_dqresv(
softlimit = q->qi_rtbsoftlimit;
timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
- warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);
+ warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
resbcountp = &dqp->q_res_rtbcount;
}
@@ -691,7 +690,7 @@ xfs_trans_dqresv(
count = be64_to_cpu(dqp->q_core.d_icount);
timer = be32_to_cpu(dqp->q_core.d_itimer);
warns = be16_to_cpu(dqp->q_core.d_iwarns);
- warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount);
+ warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
if (!hardlimit)
hardlimit = q->qi_ihardlimit;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 5c11e4d17010..99587ded043f 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -3829,7 +3829,7 @@ xfs_bmap_add_attrfork(
}
if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
goto error2;
- error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
ASSERT(ip->i_df.if_ext_max ==
XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
return error;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index f3c49e69eab9..240340a4727b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -372,12 +372,12 @@ xfs_buf_item_pin(
*/
STATIC void
xfs_buf_item_unpin(
- xfs_buf_log_item_t *bip,
- int stale)
+ xfs_buf_log_item_t *bip)
{
struct xfs_ail *ailp;
xfs_buf_t *bp;
int freed;
+ int stale = bip->bli_flags & XFS_BLI_STALE;
bp = bip->bli_buf;
ASSERT(bp != NULL);
@@ -428,40 +428,34 @@ xfs_buf_item_unpin_remove(
xfs_buf_log_item_t *bip,
xfs_trans_t *tp)
{
- xfs_buf_t *bp;
- xfs_log_item_desc_t *lidp;
- int stale = 0;
-
- bp = bip->bli_buf;
- /*
- * will xfs_buf_item_unpin() call xfs_buf_item_relse()?
- */
+ /* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */
if ((atomic_read(&bip->bli_refcount) == 1) &&
(bip->bli_flags & XFS_BLI_STALE)) {
+ /*
+ * yes -- We can safely do some work here and then call
+ * buf_item_unpin to do the rest because we are
+ * are holding the buffer locked so no one else will be
+ * able to bump up the refcount. We have to remove the
+ * log item from the transaction as we are about to release
+ * our reference to the buffer. If we don't, the unlock that
+ * occurs later in the xfs_trans_uncommit() will try to
+ * reference the buffer which we no longer have a hold on.
+ */
+ struct xfs_log_item_desc *lidp;
+
ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
trace_xfs_buf_item_unpin_stale(bip);
- /*
- * yes -- clear the xaction descriptor in-use flag
- * and free the chunk if required. We can safely
- * do some work here and then call buf_item_unpin
- * to do the rest because if the if is true, then
- * we are holding the buffer locked so no one else
- * will be able to bump up the refcount.
- */
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) bip);
- stale = lidp->lid_flags & XFS_LID_BUF_STALE;
+ lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)bip);
xfs_trans_free_item(tp, lidp);
+
/*
- * Since the transaction no longer refers to the buffer,
- * the buffer should no longer refer to the transaction.
+ * Since the transaction no longer refers to the buffer, the
+ * buffer should no longer refer to the transaction.
*/
- XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+ XFS_BUF_SET_FSPRIVATE2(bip->bli_buf, NULL);
}
-
- xfs_buf_item_unpin(bip, stale);
-
- return;
+ xfs_buf_item_unpin(bip);
}
/*
@@ -675,7 +669,7 @@ static struct xfs_item_ops xfs_buf_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_buf_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_buf_item_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_buf_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
xfs_buf_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock,
@@ -733,10 +727,7 @@ xfs_buf_item_init(
bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
KM_SLEEP);
- bip->bli_item.li_type = XFS_LI_BUF;
- bip->bli_item.li_ops = &xfs_buf_item_ops;
- bip->bli_item.li_mountp = mp;
- bip->bli_item.li_ailp = mp->m_ail;
+ xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
bip->bli_buf = bp;
xfs_buf_hold(bp);
bip->bli_format.blf_type = XFS_LI_BUF;
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 217f34af00cb..df4454511f73 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -26,7 +26,7 @@ extern kmem_zone_t *xfs_buf_item_zone;
* have been logged.
* For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything.
*/
-typedef struct xfs_buf_log_format_t {
+typedef struct xfs_buf_log_format {
unsigned short blf_type; /* buf log item type indicator */
unsigned short blf_size; /* size of this item */
ushort blf_flags; /* misc state */
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 92d5cd5bf4f2..ef96175c0744 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -186,18 +186,18 @@ xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...)
void
xfs_error_report(
- char *tag,
- int level,
- xfs_mount_t *mp,
- char *fname,
- int linenum,
- inst_t *ra)
+ const char *tag,
+ int level,
+ struct xfs_mount *mp,
+ const char *filename,
+ int linenum,
+ inst_t *ra)
{
if (level <= xfs_error_level) {
xfs_cmn_err(XFS_PTAG_ERROR_REPORT,
CE_ALERT, mp,
"XFS internal error %s at line %d of file %s. Caller 0x%p\n",
- tag, linenum, fname, ra);
+ tag, linenum, filename, ra);
xfs_stack_trace();
}
@@ -205,15 +205,15 @@ xfs_error_report(
void
xfs_corruption_error(
- char *tag,
- int level,
- xfs_mount_t *mp,
- void *p,
- char *fname,
- int linenum,
- inst_t *ra)
+ const char *tag,
+ int level,
+ struct xfs_mount *mp,
+ void *p,
+ const char *filename,
+ int linenum,
+ inst_t *ra)
{
if (level <= xfs_error_level)
xfs_hex_dump(p, 16);
- xfs_error_report(tag, level, mp, fname, linenum, ra);
+ xfs_error_report(tag, level, mp, filename, linenum, ra);
}
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 0c93051c4651..c2c1a072bb82 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -29,10 +29,11 @@ extern int xfs_error_trap(int);
struct xfs_mount;
-extern void xfs_error_report(char *tag, int level, struct xfs_mount *mp,
- char *fname, int linenum, inst_t *ra);
-extern void xfs_corruption_error(char *tag, int level, struct xfs_mount *mp,
- void *p, char *fname, int linenum, inst_t *ra);
+extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp,
+ const char *filename, int linenum, inst_t *ra);
+extern void xfs_corruption_error(const char *tag, int level,
+ struct xfs_mount *mp, void *p, const char *filename,
+ int linenum, inst_t *ra);
#define XFS_ERROR_REPORT(e, lvl, mp) \
xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address)
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 6f35ed1b39b9..409fe81585fd 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -106,7 +106,7 @@ xfs_efi_item_pin(xfs_efi_log_item_t *efip)
*/
/*ARGSUSED*/
STATIC void
-xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale)
+xfs_efi_item_unpin(xfs_efi_log_item_t *efip)
{
struct xfs_ail *ailp = efip->efi_item.li_ailp;
@@ -224,7 +224,7 @@ static struct xfs_item_ops xfs_efi_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_efi_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_efi_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_efi_item_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efi_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
xfs_efi_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock,
@@ -259,10 +259,7 @@ xfs_efi_init(xfs_mount_t *mp,
KM_SLEEP);
}
- efip->efi_item.li_type = XFS_LI_EFI;
- efip->efi_item.li_ops = &xfs_efi_item_ops;
- efip->efi_item.li_mountp = mp;
- efip->efi_item.li_ailp = mp->m_ail;
+ xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
efip->efi_format.efi_nextents = nextents;
efip->efi_format.efi_id = (__psint_t)(void*)efip;
@@ -428,7 +425,7 @@ xfs_efd_item_pin(xfs_efd_log_item_t *efdp)
*/
/*ARGSUSED*/
STATIC void
-xfs_efd_item_unpin(xfs_efd_log_item_t *efdp, int stale)
+xfs_efd_item_unpin(xfs_efd_log_item_t *efdp)
{
return;
}
@@ -518,7 +515,7 @@ static struct xfs_item_ops xfs_efd_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_efd_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_efd_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_efd_item_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efd_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_efd_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock,
@@ -554,10 +551,7 @@ xfs_efd_init(xfs_mount_t *mp,
KM_SLEEP);
}
- efdp->efd_item.li_type = XFS_LI_EFD;
- efdp->efd_item.li_ops = &xfs_efd_item_ops;
- efdp->efd_item.li_mountp = mp;
- efdp->efd_item.li_ailp = mp->m_ail;
+ xfs_log_item_init(mp, &efdp->efd_item, XFS_LI_EFD, &xfs_efd_item_ops);
efdp->efd_efip = efip;
efdp->efd_format.efd_nextents = nextents;
efdp->efd_format.efd_efi_id = efip->efi_format.efi_id;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 0ffd56447045..8cd6e8d8fe9c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2449,6 +2449,8 @@ xfs_iunpin_nowait(
{
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+ trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
+
/* Give the log a push to start the unpinning I/O */
xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7bfea8540159..cf8249a60004 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -543,6 +543,7 @@ xfs_inode_item_pin(
{
ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
+ trace_xfs_inode_pin(iip->ili_inode, _RET_IP_);
atomic_inc(&iip->ili_inode->i_pincount);
}
@@ -556,11 +557,11 @@ xfs_inode_item_pin(
/* ARGSUSED */
STATIC void
xfs_inode_item_unpin(
- xfs_inode_log_item_t *iip,
- int stale)
+ xfs_inode_log_item_t *iip)
{
struct xfs_inode *ip = iip->ili_inode;
+ trace_xfs_inode_unpin(ip, _RET_IP_);
ASSERT(atomic_read(&ip->i_pincount) > 0);
if (atomic_dec_and_test(&ip->i_pincount))
wake_up(&ip->i_ipin_wait);
@@ -572,7 +573,7 @@ xfs_inode_item_unpin_remove(
xfs_inode_log_item_t *iip,
xfs_trans_t *tp)
{
- xfs_inode_item_unpin(iip, 0);
+ xfs_inode_item_unpin(iip);
}
/*
@@ -838,7 +839,7 @@ static struct xfs_item_ops xfs_inode_item_ops = {
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_inode_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_inode_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_inode_item_unpin,
+ .iop_unpin = (void(*)(xfs_log_item_t*))xfs_inode_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
xfs_inode_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock,
@@ -865,17 +866,9 @@ xfs_inode_item_init(
ASSERT(ip->i_itemp == NULL);
iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
- iip->ili_item.li_type = XFS_LI_INODE;
- iip->ili_item.li_ops = &xfs_inode_item_ops;
- iip->ili_item.li_mountp = mp;
- iip->ili_item.li_ailp = mp->m_ail;
iip->ili_inode = ip;
-
- /*
- We have zeroed memory. No need ...
- iip->ili_extents_buf = NULL;
- */
-
+ xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
+ &xfs_inode_item_ops);
iip->ili_format.ilf_type = XFS_LI_INODE;
iip->ili_format.ilf_ino = ip->i_ino;
iip->ili_format.ilf_blkno = ip->i_imap.im_blkno;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 0b65039951a0..ef14943829da 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -55,71 +55,33 @@
#define XFS_STRAT_WRITE_IMAPS 2
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
-STATIC int
-xfs_imap_to_bmap(
- xfs_inode_t *ip,
- xfs_off_t offset,
- xfs_bmbt_irec_t *imap,
- xfs_iomap_t *iomapp,
- int imaps, /* Number of imap entries */
- int iomaps, /* Number of iomap entries */
- int flags)
-{
- xfs_mount_t *mp = ip->i_mount;
- int pbm;
- xfs_fsblock_t start_block;
-
-
- for (pbm = 0; imaps && pbm < iomaps; imaps--, iomapp++, imap++, pbm++) {
- iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
- iomapp->iomap_delta = offset - iomapp->iomap_offset;
- iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
- iomapp->iomap_flags = flags;
-
- if (XFS_IS_REALTIME_INODE(ip)) {
- iomapp->iomap_flags |= IOMAP_REALTIME;
- iomapp->iomap_target = mp->m_rtdev_targp;
- } else {
- iomapp->iomap_target = mp->m_ddev_targp;
- }
- start_block = imap->br_startblock;
- if (start_block == HOLESTARTBLOCK) {
- iomapp->iomap_bn = IOMAP_DADDR_NULL;
- iomapp->iomap_flags |= IOMAP_HOLE;
- } else if (start_block == DELAYSTARTBLOCK) {
- iomapp->iomap_bn = IOMAP_DADDR_NULL;
- iomapp->iomap_flags |= IOMAP_DELAY;
- } else {
- iomapp->iomap_bn = xfs_fsb_to_db(ip, start_block);
- if (ISUNWRITTEN(imap))
- iomapp->iomap_flags |= IOMAP_UNWRITTEN;
- }
-
- offset += iomapp->iomap_bsize - iomapp->iomap_delta;
- }
- return pbm; /* Return the number filled */
-}
+STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+ int, struct xfs_bmbt_irec *, int *);
+STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
+ struct xfs_bmbt_irec *, int *);
+STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+ struct xfs_bmbt_irec *, int *);
int
xfs_iomap(
- xfs_inode_t *ip,
- xfs_off_t offset,
- ssize_t count,
- int flags,
- xfs_iomap_t *iomapp,
- int *niomaps)
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ ssize_t count,
+ int flags,
+ struct xfs_bmbt_irec *imap,
+ int *nimaps,
+ int *new)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb, end_fsb;
- int error = 0;
- int lockmode = 0;
- xfs_bmbt_irec_t imap;
- int nimaps = 1;
- int bmapi_flags = 0;
- int iomap_flags = 0;
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb, end_fsb;
+ int error = 0;
+ int lockmode = 0;
+ int bmapi_flags = 0;
ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
+ *new = 0;
+
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@@ -160,8 +122,8 @@ xfs_iomap(
error = xfs_bmapi(NULL, ip, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb),
- bmapi_flags, NULL, 0, &imap,
- &nimaps, NULL, NULL);
+ bmapi_flags, NULL, 0, imap,
+ nimaps, NULL, NULL);
if (error)
goto out;
@@ -169,46 +131,41 @@ xfs_iomap(
switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
case BMAPI_WRITE:
/* If we found an extent, return it */
- if (nimaps &&
- (imap.br_startblock != HOLESTARTBLOCK) &&
- (imap.br_startblock != DELAYSTARTBLOCK)) {
- trace_xfs_iomap_found(ip, offset, count, flags, &imap);
+ if (*nimaps &&
+ (imap->br_startblock != HOLESTARTBLOCK) &&
+ (imap->br_startblock != DELAYSTARTBLOCK)) {
+ trace_xfs_iomap_found(ip, offset, count, flags, imap);
break;
}
if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) {
error = xfs_iomap_write_direct(ip, offset, count, flags,
- &imap, &nimaps, nimaps);
+ imap, nimaps);
} else {
error = xfs_iomap_write_delay(ip, offset, count, flags,
- &imap, &nimaps);
+ imap, nimaps);
}
if (!error) {
- trace_xfs_iomap_alloc(ip, offset, count, flags, &imap);
+ trace_xfs_iomap_alloc(ip, offset, count, flags, imap);
}
- iomap_flags = IOMAP_NEW;
+ *new = 1;
break;
case BMAPI_ALLOCATE:
/* If we found an extent, return it */
xfs_iunlock(ip, lockmode);
lockmode = 0;
- if (nimaps && !isnullstartblock(imap.br_startblock)) {
- trace_xfs_iomap_found(ip, offset, count, flags, &imap);
+ if (*nimaps && !isnullstartblock(imap->br_startblock)) {
+ trace_xfs_iomap_found(ip, offset, count, flags, imap);
break;
}
error = xfs_iomap_write_allocate(ip, offset, count,
- &imap, &nimaps);
+ imap, nimaps);
break;
}
- if (nimaps) {
- *niomaps = xfs_imap_to_bmap(ip, offset, &imap,
- iomapp, nimaps, *niomaps, iomap_flags);
- } else if (niomaps) {
- *niomaps = 0;
- }
+ ASSERT(*nimaps <= 1);
out:
if (lockmode)
@@ -216,7 +173,6 @@ out:
return XFS_ERROR(error);
}
-
STATIC int
xfs_iomap_eof_align_last_fsb(
xfs_mount_t *mp,
@@ -285,15 +241,14 @@ xfs_cmn_err_fsblock_zero(
return EFSCORRUPTED;
}
-int
+STATIC int
xfs_iomap_write_direct(
xfs_inode_t *ip,
xfs_off_t offset,
size_t count,
int flags,
xfs_bmbt_irec_t *ret_imap,
- int *nmaps,
- int found)
+ int *nmaps)
{
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb;
@@ -330,7 +285,7 @@ xfs_iomap_write_direct(
if (error)
goto error_out;
} else {
- if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))
+ if (*nmaps && (ret_imap->br_startblock == HOLESTARTBLOCK))
last_fsb = MIN(last_fsb, (xfs_fileoff_t)
ret_imap->br_blockcount +
ret_imap->br_startoff);
@@ -485,7 +440,7 @@ xfs_iomap_eof_want_preallocate(
return 0;
}
-int
+STATIC int
xfs_iomap_write_delay(
xfs_inode_t *ip,
xfs_off_t offset,
@@ -588,7 +543,7 @@ retry:
* We no longer bother to look at the incoming map - all we have to
* guarantee is that whatever we allocate fills the required range.
*/
-int
+STATIC int
xfs_iomap_write_allocate(
xfs_inode_t *ip,
xfs_off_t offset,
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 174f29990991..81ac4afd45b3 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,19 +18,6 @@
#ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__
-#define IOMAP_DADDR_NULL ((xfs_daddr_t) (-1LL))
-
-
-typedef enum { /* iomap_flags values */
- IOMAP_READ = 0, /* mapping for a read */
- IOMAP_HOLE = 0x02, /* mapping covers a hole */
- IOMAP_DELAY = 0x04, /* mapping covers delalloc region */
- IOMAP_REALTIME = 0x10, /* mapping on the realtime device */
- IOMAP_UNWRITTEN = 0x20, /* mapping covers allocated */
- /* but uninitialized file data */
- IOMAP_NEW = 0x40 /* just allocate */
-} iomap_flags_t;
-
typedef enum {
/* base extent manipulation calls */
BMAPI_READ = (1 << 0), /* read extents */
@@ -52,43 +39,11 @@ typedef enum {
{ BMAPI_MMAP, "MMAP" }, \
{ BMAPI_TRYLOCK, "TRYLOCK" }
-/*
- * xfs_iomap_t: File system I/O map
- *
- * The iomap_bn field is expressed in 512-byte blocks, and is where the
- * mapping starts on disk.
- *
- * The iomap_offset, iomap_bsize and iomap_delta fields are in bytes.
- * iomap_offset is the offset of the mapping in the file itself.
- * iomap_bsize is the size of the mapping, iomap_delta is the
- * desired data's offset into the mapping, given the offset supplied
- * to the file I/O map routine.
- *
- * When a request is made to read beyond the logical end of the object,
- * iomap_size may be set to 0, but iomap_offset and iomap_length should be set
- * to the actual amount of underlying storage that has been allocated, if any.
- */
-
-typedef struct xfs_iomap {
- xfs_daddr_t iomap_bn; /* first 512B blk of mapping */
- xfs_buftarg_t *iomap_target;
- xfs_off_t iomap_offset; /* offset of mapping, bytes */
- xfs_off_t iomap_bsize; /* size of mapping, bytes */
- xfs_off_t iomap_delta; /* offset into mapping, bytes */
- iomap_flags_t iomap_flags;
-} xfs_iomap_t;
-
struct xfs_inode;
struct xfs_bmbt_irec;
extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int,
- struct xfs_iomap *, int *);
-extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
- int, struct xfs_bmbt_irec *, int *, int);
-extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
- struct xfs_bmbt_irec *, int *);
-extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
- struct xfs_bmbt_irec *, int *);
+ struct xfs_bmbt_irec *, int *, int *);
extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 2be019136287..3038dd52c72a 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -44,13 +44,8 @@
kmem_zone_t *xfs_log_ticket_zone;
-#define xlog_write_adv_cnt(ptr, len, off, bytes) \
- { (ptr) += (bytes); \
- (len) -= (bytes); \
- (off) += (bytes);}
-
/* Local miscellaneous function prototypes */
-STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
+STATIC int xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
xlog_in_core_t **, xfs_lsn_t *);
STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
xfs_buftarg_t *log_target,
@@ -59,11 +54,9 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes);
STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
STATIC void xlog_dealloc_log(xlog_t *log);
-STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[],
- int nentries, struct xlog_ticket *tic,
- xfs_lsn_t *start_lsn,
- xlog_in_core_t **commit_iclog,
- uint flags);
+STATIC int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
+ struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
+ xlog_in_core_t **commit_iclog, uint flags);
/* local state machine functions */
STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
@@ -102,7 +95,7 @@ STATIC xlog_ticket_t *xlog_ticket_alloc(xlog_t *log,
uint flags);
#if defined(DEBUG)
-STATIC void xlog_verify_dest_ptr(xlog_t *log, __psint_t ptr);
+STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
STATIC void xlog_verify_grant_head(xlog_t *log, int equals);
STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
int count, boolean_t syncing);
@@ -258,7 +251,7 @@ xfs_log_done(
* If we get an error, just continue and give back the log ticket.
*/
(((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
- (xlog_commit_record(mp, ticket, iclog, &lsn)))) {
+ (xlog_commit_record(log, ticket, iclog, &lsn)))) {
lsn = (xfs_lsn_t) -1;
if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
flags |= XFS_LOG_REL_PERM_RESERV;
@@ -516,18 +509,10 @@ xfs_log_unmount_write(xfs_mount_t *mp)
#ifdef DEBUG
xlog_in_core_t *first_iclog;
#endif
- xfs_log_iovec_t reg[1];
xlog_ticket_t *tic = NULL;
xfs_lsn_t lsn;
int error;
- /* the data section must be 32 bit size aligned */
- struct {
- __uint16_t magic;
- __uint16_t pad1;
- __uint32_t pad2; /* may as well make it 64 bits */
- } magic = { XLOG_UNMOUNT_TYPE, 0, 0 };
-
/*
* Don't write out unmount record on read-only mounts.
* Or, if we are doing a forced umount (typically because of IO errors).
@@ -549,16 +534,30 @@ xfs_log_unmount_write(xfs_mount_t *mp)
} while (iclog != first_iclog);
#endif
if (! (XLOG_FORCED_SHUTDOWN(log))) {
- reg[0].i_addr = (void*)&magic;
- reg[0].i_len = sizeof(magic);
- reg[0].i_type = XLOG_REG_TYPE_UNMOUNT;
-
error = xfs_log_reserve(mp, 600, 1, &tic,
XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
if (!error) {
+ /* the data section must be 32 bit size aligned */
+ struct {
+ __uint16_t magic;
+ __uint16_t pad1;
+ __uint32_t pad2; /* may as well make it 64 bits */
+ } magic = {
+ .magic = XLOG_UNMOUNT_TYPE,
+ };
+ struct xfs_log_iovec reg = {
+ .i_addr = (void *)&magic,
+ .i_len = sizeof(magic),
+ .i_type = XLOG_REG_TYPE_UNMOUNT,
+ };
+ struct xfs_log_vec vec = {
+ .lv_niovecs = 1,
+ .lv_iovecp = &reg,
+ };
+
/* remove inited flag */
- ((xlog_ticket_t *)tic)->t_flags = 0;
- error = xlog_write(mp, reg, 1, tic, &lsn,
+ tic->t_flags = 0;
+ error = xlog_write(log, &vec, tic, &lsn,
NULL, XLOG_UNMOUNT_TRANS);
/*
* At this point, we're umounting anyway,
@@ -648,10 +647,26 @@ xfs_log_unmount(xfs_mount_t *mp)
xlog_dealloc_log(mp->m_log);
}
+void
+xfs_log_item_init(
+ struct xfs_mount *mp,
+ struct xfs_log_item *item,
+ int type,
+ struct xfs_item_ops *ops)
+{
+ item->li_mountp = mp;
+ item->li_ailp = mp->m_ail;
+ item->li_type = type;
+ item->li_ops = ops;
+}
+
/*
* Write region vectors to log. The write happens using the space reservation
* of the ticket (tic). It is not a requirement that all writes for a given
- * transaction occur with one call to xfs_log_write().
+ * transaction occur with one call to xfs_log_write(). However, it is important
+ * to note that the transaction reservation code makes an assumption about the
+ * number of log headers a transaction requires that may be violated if you
+ * don't pass all the transaction vectors in one call....
*/
int
xfs_log_write(
@@ -663,11 +678,15 @@ xfs_log_write(
{
struct log *log = mp->m_log;
int error;
+ struct xfs_log_vec vec = {
+ .lv_niovecs = nentries,
+ .lv_iovecp = reg,
+ };
if (XLOG_FORCED_SHUTDOWN(log))
return XFS_ERROR(EIO);
- error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0);
+ error = xlog_write(log, &vec, tic, start_lsn, NULL, 0);
if (error)
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
return error;
@@ -1020,6 +1039,7 @@ xlog_alloc_log(xfs_mount_t *mp,
int i;
int iclogsize;
int error = ENOMEM;
+ uint log2_size = 0;
log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
if (!log) {
@@ -1045,29 +1065,30 @@ xlog_alloc_log(xfs_mount_t *mp,
error = EFSCORRUPTED;
if (xfs_sb_version_hassector(&mp->m_sb)) {
- log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
- if (log->l_sectbb_log < 0 ||
- log->l_sectbb_log > mp->m_sectbb_log) {
- xlog_warn("XFS: Log sector size (0x%x) out of range.",
- log->l_sectbb_log);
+ log2_size = mp->m_sb.sb_logsectlog;
+ if (log2_size < BBSHIFT) {
+ xlog_warn("XFS: Log sector size too small "
+ "(0x%x < 0x%x)", log2_size, BBSHIFT);
goto out_free_log;
}
- /* for larger sector sizes, must have v2 or external log */
- if (log->l_sectbb_log != 0 &&
- (log->l_logBBstart != 0 &&
- !xfs_sb_version_haslogv2(&mp->m_sb))) {
- xlog_warn("XFS: log sector size (0x%x) invalid "
- "for configuration.", log->l_sectbb_log);
+ log2_size -= BBSHIFT;
+ if (log2_size > mp->m_sectbb_log) {
+ xlog_warn("XFS: Log sector size too large "
+ "(0x%x > 0x%x)", log2_size, mp->m_sectbb_log);
goto out_free_log;
}
- if (mp->m_sb.sb_logsectlog < BBSHIFT) {
- xlog_warn("XFS: Log sector log (0x%x) too small.",
- mp->m_sb.sb_logsectlog);
+
+ /* for larger sector sizes, must have v2 or external log */
+ if (log2_size && log->l_logBBstart > 0 &&
+ !xfs_sb_version_haslogv2(&mp->m_sb)) {
+
+ xlog_warn("XFS: log sector size (0x%x) invalid "
+ "for configuration.", log2_size);
goto out_free_log;
}
}
- log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1;
+ log->l_sectBBsize = 1 << log2_size;
xlog_get_iclog_buffer_size(mp, log);
@@ -1174,26 +1195,31 @@ out:
* ticket. Return the lsn of the commit record.
*/
STATIC int
-xlog_commit_record(xfs_mount_t *mp,
- xlog_ticket_t *ticket,
- xlog_in_core_t **iclog,
- xfs_lsn_t *commitlsnp)
+xlog_commit_record(
+ struct log *log,
+ struct xlog_ticket *ticket,
+ struct xlog_in_core **iclog,
+ xfs_lsn_t *commitlsnp)
{
- int error;
- xfs_log_iovec_t reg[1];
-
- reg[0].i_addr = NULL;
- reg[0].i_len = 0;
- reg[0].i_type = XLOG_REG_TYPE_COMMIT;
+ struct xfs_mount *mp = log->l_mp;
+ int error;
+ struct xfs_log_iovec reg = {
+ .i_addr = NULL,
+ .i_len = 0,
+ .i_type = XLOG_REG_TYPE_COMMIT,
+ };
+ struct xfs_log_vec vec = {
+ .lv_niovecs = 1,
+ .lv_iovecp = &reg,
+ };
ASSERT_ALWAYS(iclog);
- if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
- iclog, XLOG_COMMIT_TRANS))) {
+ error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
+ XLOG_COMMIT_TRANS);
+ if (error)
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- }
return error;
-} /* xlog_commit_record */
-
+}
/*
* Push on the buffer cache code if we ever use more than 75% of the on-disk
@@ -1614,6 +1640,192 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
}
/*
+ * Calculate the potential space needed by the log vector. Each region gets
+ * its own xlog_op_header_t and may need to be double word aligned.
+ */
+static int
+xlog_write_calc_vec_length(
+ struct xlog_ticket *ticket,
+ struct xfs_log_vec *log_vector)
+{
+ struct xfs_log_vec *lv;
+ int headers = 0;
+ int len = 0;
+ int i;
+
+ /* acct for start rec of xact */
+ if (ticket->t_flags & XLOG_TIC_INITED)
+ headers++;
+
+ for (lv = log_vector; lv; lv = lv->lv_next) {
+ headers += lv->lv_niovecs;
+
+ for (i = 0; i < lv->lv_niovecs; i++) {
+ struct xfs_log_iovec *vecp = &lv->lv_iovecp[i];
+
+ len += vecp->i_len;
+ xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
+ }
+ }
+
+ ticket->t_res_num_ophdrs += headers;
+ len += headers * sizeof(struct xlog_op_header);
+
+ return len;
+}
+
+/*
+ * If first write for transaction, insert start record We can't be trying to
+ * commit if we are inited. We can't have any "partial_copy" if we are inited.
+ */
+static int
+xlog_write_start_rec(
+ struct xlog_op_header *ophdr,
+ struct xlog_ticket *ticket)
+{
+ if (!(ticket->t_flags & XLOG_TIC_INITED))
+ return 0;
+
+ ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
+ ophdr->oh_clientid = ticket->t_clientid;
+ ophdr->oh_len = 0;
+ ophdr->oh_flags = XLOG_START_TRANS;
+ ophdr->oh_res2 = 0;
+
+ ticket->t_flags &= ~XLOG_TIC_INITED;
+
+ return sizeof(struct xlog_op_header);
+}
+
+static xlog_op_header_t *
+xlog_write_setup_ophdr(
+ struct log *log,
+ struct xlog_op_header *ophdr,
+ struct xlog_ticket *ticket,
+ uint flags)
+{
+ ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
+ ophdr->oh_clientid = ticket->t_clientid;
+ ophdr->oh_res2 = 0;
+
+ /* are we copying a commit or unmount record? */
+ ophdr->oh_flags = flags;
+
+ /*
+ * We've seen logs corrupted with bad transaction client ids. This
+ * makes sure that XFS doesn't generate them on. Turn this into an EIO
+ * and shut down the filesystem.
+ */
+ switch (ophdr->oh_clientid) {
+ case XFS_TRANSACTION:
+ case XFS_VOLUME:
+ case XFS_LOG:
+ break;
+ default:
+ xfs_fs_cmn_err(CE_WARN, log->l_mp,
+ "Bad XFS transaction clientid 0x%x in ticket 0x%p",
+ ophdr->oh_clientid, ticket);
+ return NULL;
+ }
+
+ return ophdr;
+}
+
+/*
+ * Set up the parameters of the region copy into the log. This has
+ * to handle region write split across multiple log buffers - this
+ * state is kept external to this function so that this code can
+ * can be written in an obvious, self documenting manner.
+ */
+static int
+xlog_write_setup_copy(
+ struct xlog_ticket *ticket,
+ struct xlog_op_header *ophdr,
+ int space_available,
+ int space_required,
+ int *copy_off,
+ int *copy_len,
+ int *last_was_partial_copy,
+ int *bytes_consumed)
+{
+ int still_to_copy;
+
+ still_to_copy = space_required - *bytes_consumed;
+ *copy_off = *bytes_consumed;
+
+ if (still_to_copy <= space_available) {
+ /* write of region completes here */
+ *copy_len = still_to_copy;
+ ophdr->oh_len = cpu_to_be32(*copy_len);
+ if (*last_was_partial_copy)
+ ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
+ *last_was_partial_copy = 0;
+ *bytes_consumed = 0;
+ return 0;
+ }
+
+ /* partial write of region, needs extra log op header reservation */
+ *copy_len = space_available;
+ ophdr->oh_len = cpu_to_be32(*copy_len);
+ ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
+ if (*last_was_partial_copy)
+ ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
+ *bytes_consumed += *copy_len;
+ (*last_was_partial_copy)++;
+
+ /* account for new log op header */
+ ticket->t_curr_res -= sizeof(struct xlog_op_header);
+ ticket->t_res_num_ophdrs++;
+
+ return sizeof(struct xlog_op_header);
+}
+
+static int
+xlog_write_copy_finish(
+ struct log *log,
+ struct xlog_in_core *iclog,
+ uint flags,
+ int *record_cnt,
+ int *data_cnt,
+ int *partial_copy,
+ int *partial_copy_len,
+ int log_offset,
+ struct xlog_in_core **commit_iclog)
+{
+ if (*partial_copy) {
+ /*
+ * This iclog has already been marked WANT_SYNC by
+ * xlog_state_get_iclog_space.
+ */
+ xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
+ *record_cnt = 0;
+ *data_cnt = 0;
+ return xlog_state_release_iclog(log, iclog);
+ }
+
+ *partial_copy = 0;
+ *partial_copy_len = 0;
+
+ if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
+ /* no more space in this iclog - push it. */
+ xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
+ *record_cnt = 0;
+ *data_cnt = 0;
+
+ spin_lock(&log->l_icloglock);
+ xlog_state_want_sync(log, iclog);
+ spin_unlock(&log->l_icloglock);
+
+ if (!commit_iclog)
+ return xlog_state_release_iclog(log, iclog);
+ ASSERT(flags & XLOG_COMMIT_TRANS);
+ *commit_iclog = iclog;
+ }
+
+ return 0;
+}
+
+/*
* Write some region out to in-core log
*
* This will be called when writing externally provided regions or when
@@ -1655,209 +1867,157 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
*/
STATIC int
xlog_write(
- struct xfs_mount *mp,
- struct xfs_log_iovec reg[],
- int nentries,
+ struct log *log,
+ struct xfs_log_vec *log_vector,
struct xlog_ticket *ticket,
xfs_lsn_t *start_lsn,
struct xlog_in_core **commit_iclog,
uint flags)
{
- xlog_t *log = mp->m_log;
- xlog_in_core_t *iclog = NULL; /* ptr to current in-core log */
- xlog_op_header_t *logop_head; /* ptr to log operation header */
- __psint_t ptr; /* copy address into data region */
- int len; /* # xlog_write() bytes 2 still copy */
- int index; /* region index currently copying */
- int log_offset; /* offset (from 0) into data region */
- int start_rec_copy; /* # bytes to copy for start record */
- int partial_copy; /* did we split a region? */
- int partial_copy_len;/* # bytes copied if split region */
- int need_copy; /* # bytes need to memcpy this region */
- int copy_len; /* # bytes actually memcpy'ing */
- int copy_off; /* # bytes from entry start */
- int contwr; /* continued write of in-core log? */
- int error;
- int record_cnt = 0, data_cnt = 0;
-
- partial_copy_len = partial_copy = 0;
-
- /* Calculate potential maximum space. Each region gets its own
- * xlog_op_header_t and may need to be double word aligned.
- */
- len = 0;
- if (ticket->t_flags & XLOG_TIC_INITED) { /* acct for start rec of xact */
- len += sizeof(xlog_op_header_t);
- ticket->t_res_num_ophdrs++;
- }
+ struct xlog_in_core *iclog = NULL;
+ struct xfs_log_iovec *vecp;
+ struct xfs_log_vec *lv;
+ int len;
+ int index;
+ int partial_copy = 0;
+ int partial_copy_len = 0;
+ int contwr = 0;
+ int record_cnt = 0;
+ int data_cnt = 0;
+ int error;
- for (index = 0; index < nentries; index++) {
- len += sizeof(xlog_op_header_t); /* each region gets >= 1 */
- ticket->t_res_num_ophdrs++;
- len += reg[index].i_len;
- xlog_tic_add_region(ticket, reg[index].i_len, reg[index].i_type);
- }
- contwr = *start_lsn = 0;
+ *start_lsn = 0;
- if (ticket->t_curr_res < len) {
- xlog_print_tic_res(mp, ticket);
+ len = xlog_write_calc_vec_length(ticket, log_vector);
+ if (ticket->t_curr_res < len) {
+ xlog_print_tic_res(log->l_mp, ticket);
#ifdef DEBUG
- xlog_panic(
- "xfs_log_write: reservation ran out. Need to up reservation");
+ xlog_panic(
+ "xfs_log_write: reservation ran out. Need to up reservation");
#else
- /* Customer configurable panic */
- xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp,
- "xfs_log_write: reservation ran out. Need to up reservation");
- /* If we did not panic, shutdown the filesystem */
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ /* Customer configurable panic */
+ xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, log->l_mp,
+ "xfs_log_write: reservation ran out. Need to up reservation");
+
+ /* If we did not panic, shutdown the filesystem */
+ xfs_force_shutdown(log->l_mp, SHUTDOWN_CORRUPT_INCORE);
#endif
- } else
+ }
+
ticket->t_curr_res -= len;
- for (index = 0; index < nentries; ) {
- if ((error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
- &contwr, &log_offset)))
- return error;
+ index = 0;
+ lv = log_vector;
+ vecp = lv->lv_iovecp;
+ while (lv && index < lv->lv_niovecs) {
+ void *ptr;
+ int log_offset;
- ASSERT(log_offset <= iclog->ic_size - 1);
- ptr = (__psint_t) ((char *)iclog->ic_datap+log_offset);
+ error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
+ &contwr, &log_offset);
+ if (error)
+ return error;
- /* start_lsn is the first lsn written to. That's all we need. */
- if (! *start_lsn)
- *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+ ASSERT(log_offset <= iclog->ic_size - 1);
+ ptr = iclog->ic_datap + log_offset;
- /* This loop writes out as many regions as can fit in the amount
- * of space which was allocated by xlog_state_get_iclog_space().
- */
- while (index < nentries) {
- ASSERT(reg[index].i_len % sizeof(__int32_t) == 0);
- ASSERT((__psint_t)ptr % sizeof(__int32_t) == 0);
- start_rec_copy = 0;
-
- /* If first write for transaction, insert start record.
- * We can't be trying to commit if we are inited. We can't
- * have any "partial_copy" if we are inited.
- */
- if (ticket->t_flags & XLOG_TIC_INITED) {
- logop_head = (xlog_op_header_t *)ptr;
- logop_head->oh_tid = cpu_to_be32(ticket->t_tid);
- logop_head->oh_clientid = ticket->t_clientid;
- logop_head->oh_len = 0;
- logop_head->oh_flags = XLOG_START_TRANS;
- logop_head->oh_res2 = 0;
- ticket->t_flags &= ~XLOG_TIC_INITED; /* clear bit */
- record_cnt++;
-
- start_rec_copy = sizeof(xlog_op_header_t);
- xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy);
- }
+ /* start_lsn is the first lsn written to. That's all we need. */
+ if (!*start_lsn)
+ *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
- /* Copy log operation header directly into data section */
- logop_head = (xlog_op_header_t *)ptr;
- logop_head->oh_tid = cpu_to_be32(ticket->t_tid);
- logop_head->oh_clientid = ticket->t_clientid;
- logop_head->oh_res2 = 0;
+ /*
+ * This loop writes out as many regions as can fit in the amount
+ * of space which was allocated by xlog_state_get_iclog_space().
+ */
+ while (lv && index < lv->lv_niovecs) {
+ struct xfs_log_iovec *reg = &vecp[index];
+ struct xlog_op_header *ophdr;
+ int start_rec_copy;
+ int copy_len;
+ int copy_off;
+
+ ASSERT(reg->i_len % sizeof(__int32_t) == 0);
+ ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0);
+
+ start_rec_copy = xlog_write_start_rec(ptr, ticket);
+ if (start_rec_copy) {
+ record_cnt++;
+ xlog_write_adv_cnt(&ptr, &len, &log_offset,
+ start_rec_copy);
+ }
- /* header copied directly */
- xlog_write_adv_cnt(ptr, len, log_offset, sizeof(xlog_op_header_t));
+ ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
+ if (!ophdr)
+ return XFS_ERROR(EIO);
- /* are we copying a commit or unmount record? */
- logop_head->oh_flags = flags;
+ xlog_write_adv_cnt(&ptr, &len, &log_offset,
+ sizeof(struct xlog_op_header));
+
+ len += xlog_write_setup_copy(ticket, ophdr,
+ iclog->ic_size-log_offset,
+ reg->i_len,
+ &copy_off, &copy_len,
+ &partial_copy,
+ &partial_copy_len);
+ xlog_verify_dest_ptr(log, ptr);
+
+ /* copy region */
+ ASSERT(copy_len >= 0);
+ memcpy(ptr, reg->i_addr + copy_off, copy_len);
+ xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len);
+
+ copy_len += start_rec_copy + sizeof(xlog_op_header_t);
+ record_cnt++;
+ data_cnt += contwr ? copy_len : 0;
+
+ error = xlog_write_copy_finish(log, iclog, flags,
+ &record_cnt, &data_cnt,
+ &partial_copy,
+ &partial_copy_len,
+ log_offset,
+ commit_iclog);
+ if (error)
+ return error;
- /*
- * We've seen logs corrupted with bad transaction client
- * ids. This makes sure that XFS doesn't generate them on.
- * Turn this into an EIO and shut down the filesystem.
- */
- switch (logop_head->oh_clientid) {
- case XFS_TRANSACTION:
- case XFS_VOLUME:
- case XFS_LOG:
- break;
- default:
- xfs_fs_cmn_err(CE_WARN, mp,
- "Bad XFS transaction clientid 0x%x in ticket 0x%p",
- logop_head->oh_clientid, ticket);
- return XFS_ERROR(EIO);
- }
+ /*
+ * if we had a partial copy, we need to get more iclog
+ * space but we don't want to increment the region
+ * index because there is still more is this region to
+ * write.
+ *
+ * If we completed writing this region, and we flushed
+ * the iclog (indicated by resetting of the record
+ * count), then we also need to get more log space. If
+ * this was the last record, though, we are done and
+ * can just return.
+ */
+ if (partial_copy)
+ break;
- /* Partial write last time? => (partial_copy != 0)
- * need_copy is the amount we'd like to copy if everything could
- * fit in the current memcpy.
- */
- need_copy = reg[index].i_len - partial_copy_len;
-
- copy_off = partial_copy_len;
- if (need_copy <= iclog->ic_size - log_offset) { /*complete write */
- copy_len = need_copy;
- logop_head->oh_len = cpu_to_be32(copy_len);
- if (partial_copy)
- logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
- partial_copy_len = partial_copy = 0;
- } else { /* partial write */
- copy_len = iclog->ic_size - log_offset;
- logop_head->oh_len = cpu_to_be32(copy_len);
- logop_head->oh_flags |= XLOG_CONTINUE_TRANS;
- if (partial_copy)
- logop_head->oh_flags |= XLOG_WAS_CONT_TRANS;
- partial_copy_len += copy_len;
- partial_copy++;
- len += sizeof(xlog_op_header_t); /* from splitting of region */
- /* account for new log op header */
- ticket->t_curr_res -= sizeof(xlog_op_header_t);
- ticket->t_res_num_ophdrs++;
- }
- xlog_verify_dest_ptr(log, ptr);
-
- /* copy region */
- ASSERT(copy_len >= 0);
- memcpy((xfs_caddr_t)ptr, reg[index].i_addr + copy_off, copy_len);
- xlog_write_adv_cnt(ptr, len, log_offset, copy_len);
-
- /* make copy_len total bytes copied, including headers */
- copy_len += start_rec_copy + sizeof(xlog_op_header_t);
- record_cnt++;
- data_cnt += contwr ? copy_len : 0;
- if (partial_copy) { /* copied partial region */
- /* already marked WANT_SYNC by xlog_state_get_iclog_space */
- xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- record_cnt = data_cnt = 0;
- if ((error = xlog_state_release_iclog(log, iclog)))
- return error;
- break; /* don't increment index */
- } else { /* copied entire region */
- index++;
- partial_copy_len = partial_copy = 0;
-
- if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
- xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- record_cnt = data_cnt = 0;
- spin_lock(&log->l_icloglock);
- xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
- if (commit_iclog) {
- ASSERT(flags & XLOG_COMMIT_TRANS);
- *commit_iclog = iclog;
- } else if ((error = xlog_state_release_iclog(log, iclog)))
- return error;
- if (index == nentries)
- return 0; /* we are done */
- else
- break;
+ if (++index == lv->lv_niovecs) {
+ lv = lv->lv_next;
+ index = 0;
+ if (lv)
+ vecp = lv->lv_iovecp;
+ }
+ if (record_cnt == 0) {
+ if (!lv)
+ return 0;
+ break;
+ }
}
- } /* if (partial_copy) */
- } /* while (index < nentries) */
- } /* for (index = 0; index < nentries; ) */
- ASSERT(len == 0);
+ }
+
+ ASSERT(len == 0);
+
+ xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
+ if (!commit_iclog)
+ return xlog_state_release_iclog(log, iclog);
- xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- if (commit_iclog) {
ASSERT(flags & XLOG_COMMIT_TRANS);
*commit_iclog = iclog;
return 0;
- }
- return xlog_state_release_iclog(log, iclog);
-} /* xlog_write */
+}
/*****************************************************************************
@@ -3157,14 +3317,16 @@ xfs_log_ticket_get(
* Allocate and initialise a new log ticket.
*/
STATIC xlog_ticket_t *
-xlog_ticket_alloc(xlog_t *log,
- int unit_bytes,
- int cnt,
- char client,
- uint xflags)
+xlog_ticket_alloc(
+ struct log *log,
+ int unit_bytes,
+ int cnt,
+ char client,
+ uint xflags)
{
- xlog_ticket_t *tic;
+ struct xlog_ticket *tic;
uint num_headers;
+ int iclog_space;
tic = kmem_zone_zalloc(xfs_log_ticket_zone, KM_SLEEP|KM_MAYFAIL);
if (!tic)
@@ -3208,16 +3370,40 @@ xlog_ticket_alloc(xlog_t *log,
/* for start-rec */
unit_bytes += sizeof(xlog_op_header_t);
- /* for LR headers */
- num_headers = ((unit_bytes + log->l_iclog_size-1) >> log->l_iclog_size_log);
+ /*
+ * for LR headers - the space for data in an iclog is the size minus
+ * the space used for the headers. If we use the iclog size, then we
+ * undercalculate the number of headers required.
+ *
+ * Furthermore - the addition of op headers for split-recs might
+ * increase the space required enough to require more log and op
+ * headers, so take that into account too.
+ *
+ * IMPORTANT: This reservation makes the assumption that if this
+ * transaction is the first in an iclog and hence has the LR headers
+ * accounted to it, then the remaining space in the iclog is
+ * exclusively for this transaction. i.e. if the transaction is larger
+ * than the iclog, it will be the only thing in that iclog.
+ * Fundamentally, this means we must pass the entire log vector to
+ * xlog_write to guarantee this.
+ */
+ iclog_space = log->l_iclog_size - log->l_iclog_hsize;
+ num_headers = howmany(unit_bytes, iclog_space);
+
+ /* for split-recs - ophdrs added when data split over LRs */
+ unit_bytes += sizeof(xlog_op_header_t) * num_headers;
+
+ /* add extra header reservations if we overrun */
+ while (!num_headers ||
+ howmany(unit_bytes, iclog_space) > num_headers) {
+ unit_bytes += sizeof(xlog_op_header_t);
+ num_headers++;
+ }
unit_bytes += log->l_iclog_hsize * num_headers;
/* for commit-rec LR header - note: padding will subsume the ophdr */
unit_bytes += log->l_iclog_hsize;
- /* for split-recs - ophdrs added when data split over LRs */
- unit_bytes += sizeof(xlog_op_header_t) * num_headers;
-
/* for roundoff padding for transaction data and one for commit record */
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
log->l_mp->m_sb.sb_logsunit > 1) {
@@ -3233,13 +3419,13 @@ xlog_ticket_alloc(xlog_t *log,
tic->t_curr_res = unit_bytes;
tic->t_cnt = cnt;
tic->t_ocnt = cnt;
- tic->t_tid = (xlog_tid_t)((__psint_t)tic & 0xffffffff);
+ tic->t_tid = random32();
tic->t_clientid = client;
tic->t_flags = XLOG_TIC_INITED;
tic->t_trans_type = 0;
if (xflags & XFS_LOG_PERM_RESERV)
tic->t_flags |= XLOG_TIC_PERM_RESERV;
- sv_init(&(tic->t_wait), SV_DEFAULT, "logtick");
+ sv_init(&tic->t_wait, SV_DEFAULT, "logtick");
xlog_tic_reset_res(tic);
@@ -3260,20 +3446,22 @@ xlog_ticket_alloc(xlog_t *log,
* part of the log in case we trash the log structure.
*/
void
-xlog_verify_dest_ptr(xlog_t *log,
- __psint_t ptr)
+xlog_verify_dest_ptr(
+ struct log *log,
+ char *ptr)
{
int i;
int good_ptr = 0;
- for (i=0; i < log->l_iclog_bufs; i++) {
- if (ptr >= (__psint_t)log->l_iclog_bak[i] &&
- ptr <= (__psint_t)log->l_iclog_bak[i]+log->l_iclog_size)
+ for (i = 0; i < log->l_iclog_bufs; i++) {
+ if (ptr >= log->l_iclog_bak[i] &&
+ ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
good_ptr++;
}
- if (! good_ptr)
+
+ if (!good_ptr)
xlog_panic("xlog_verify_dest_ptr: invalid ptr");
-} /* xlog_verify_dest_ptr */
+}
STATIC void
xlog_verify_grant_head(xlog_t *log, int equals)
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 97a24c7795a4..229d1f36ba9a 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -110,6 +110,12 @@ typedef struct xfs_log_iovec {
uint i_type; /* type of region */
} xfs_log_iovec_t;
+struct xfs_log_vec {
+ struct xfs_log_vec *lv_next; /* next lv in build list */
+ int lv_niovecs; /* number of iovecs in lv */
+ struct xfs_log_iovec *lv_iovecp; /* iovec array */
+};
+
/*
* Structure used to pass callback function and the function's argument
* to the log manager.
@@ -126,6 +132,13 @@ typedef struct xfs_log_callback {
struct xfs_mount;
struct xlog_in_core;
struct xlog_ticket;
+struct xfs_log_item;
+struct xfs_item_ops;
+
+void xfs_log_item_init(struct xfs_mount *mp,
+ struct xfs_log_item *item,
+ int type,
+ struct xfs_item_ops *ops);
xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
struct xlog_ticket *ticket,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index fd02a18facd5..9cf695154451 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -396,9 +396,7 @@ typedef struct log {
struct xfs_buf_cancel **l_buf_cancel_table;
int l_iclog_hsize; /* size of iclog header */
int l_iclog_heads; /* # of iclog header sectors */
- uint l_sectbb_log; /* log2 of sector size in BBs */
- uint l_sectbb_mask; /* sector size (in BBs)
- * alignment mask */
+ uint l_sectBBsize; /* sector size in BBs (2^n) */
int l_iclog_size; /* size of log in bytes */
int l_iclog_size_log; /* log power size of log */
int l_iclog_bufs; /* number of iclog buffers */
@@ -449,6 +447,14 @@ extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
extern kmem_zone_t *xfs_log_ticket_zone;
+static inline void
+xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
+{
+ *ptr += bytes;
+ *len -= bytes;
+ *off += bytes;
+}
+
/*
* Unmount record type is used as a pseudo transaction type for the ticket.
* It's value must be outside the range of XFS_TRANS_* values.
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 22e6efdc17ea..0de08e366315 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -56,33 +56,61 @@ STATIC void xlog_recover_check_summary(xlog_t *);
#define xlog_recover_check_summary(log)
#endif
-
/*
* Sector aligned buffer routines for buffer create/read/write/access
*/
-#define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) \
- ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
- ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
-#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
+/*
+ * Verify the given count of basic blocks is valid number of blocks
+ * to specify for an operation involving the given XFS log buffer.
+ * Returns nonzero if the count is valid, 0 otherwise.
+ */
+static inline int
+xlog_buf_bbcount_valid(
+ xlog_t *log,
+ int bbcount)
+{
+ return bbcount > 0 && bbcount <= log->l_logBBsize;
+}
+
+/*
+ * Allocate a buffer to hold log data. The buffer needs to be able
+ * to map to a range of nbblks basic blocks at any valid (basic
+ * block) offset within the log.
+ */
STATIC xfs_buf_t *
xlog_get_bp(
xlog_t *log,
int nbblks)
{
- if (nbblks <= 0 || nbblks > log->l_logBBsize) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
- XFS_ERROR_REPORT("xlog_get_bp(1)",
- XFS_ERRLEVEL_HIGH, log->l_mp);
+ if (!xlog_buf_bbcount_valid(log, nbblks)) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ nbblks);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return NULL;
}
- if (log->l_sectbb_log) {
- if (nbblks > 1)
- nbblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
- nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
- }
+ /*
+ * We do log I/O in units of log sectors (a power-of-2
+ * multiple of the basic block size), so we round up the
+ * requested size to acommodate the basic blocks required
+ * for complete log sectors.
+ *
+ * In addition, the buffer may be used for a non-sector-
+ * aligned block offset, in which case an I/O of the
+ * requested size could extend beyond the end of the
+ * buffer. If the requested size is only 1 basic block it
+ * will never straddle a sector boundary, so this won't be
+ * an issue. Nor will this be a problem if the log I/O is
+ * done in basic blocks (sector size 1). But otherwise we
+ * extend the buffer by one extra log sector to ensure
+ * there's space to accomodate this possiblility.
+ */
+ if (nbblks > 1 && log->l_sectBBsize > 1)
+ nbblks += log->l_sectBBsize;
+ nbblks = round_up(nbblks, log->l_sectBBsize);
+
return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
}
@@ -93,6 +121,10 @@ xlog_put_bp(
xfs_buf_free(bp);
}
+/*
+ * Return the address of the start of the given block number's data
+ * in a log buffer. The buffer covers a log sector-aligned region.
+ */
STATIC xfs_caddr_t
xlog_align(
xlog_t *log,
@@ -100,14 +132,14 @@ xlog_align(
int nbblks,
xfs_buf_t *bp)
{
+ xfs_daddr_t offset;
xfs_caddr_t ptr;
- if (!log->l_sectbb_log)
- return XFS_BUF_PTR(bp);
+ offset = blk_no & ((xfs_daddr_t) log->l_sectBBsize - 1);
+ ptr = XFS_BUF_PTR(bp) + BBTOB(offset);
+
+ ASSERT(ptr + BBTOB(nbblks) <= XFS_BUF_PTR(bp) + XFS_BUF_SIZE(bp));
- ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
- ASSERT(XFS_BUF_SIZE(bp) >=
- BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
return ptr;
}
@@ -124,21 +156,18 @@ xlog_bread_noalign(
{
int error;
- if (nbblks <= 0 || nbblks > log->l_logBBsize) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
- XFS_ERROR_REPORT("xlog_bread(1)",
- XFS_ERRLEVEL_HIGH, log->l_mp);
+ if (!xlog_buf_bbcount_valid(log, nbblks)) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ nbblks);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return EFSCORRUPTED;
}
- if (log->l_sectbb_log) {
- blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
- nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
- }
+ blk_no = round_down(blk_no, log->l_sectBBsize);
+ nbblks = round_up(nbblks, log->l_sectBBsize);
ASSERT(nbblks > 0);
ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
- ASSERT(bp);
XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
XFS_BUF_READ(bp);
@@ -186,17 +215,15 @@ xlog_bwrite(
{
int error;
- if (nbblks <= 0 || nbblks > log->l_logBBsize) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
- XFS_ERROR_REPORT("xlog_bwrite(1)",
- XFS_ERRLEVEL_HIGH, log->l_mp);
+ if (!xlog_buf_bbcount_valid(log, nbblks)) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ nbblks);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return EFSCORRUPTED;
}
- if (log->l_sectbb_log) {
- blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
- nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
- }
+ blk_no = round_down(blk_no, log->l_sectBBsize);
+ nbblks = round_up(nbblks, log->l_sectBBsize);
ASSERT(nbblks > 0);
ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
@@ -327,39 +354,38 @@ xlog_find_cycle_start(
{
xfs_caddr_t offset;
xfs_daddr_t mid_blk;
+ xfs_daddr_t end_blk;
uint mid_cycle;
int error;
- mid_blk = BLK_AVG(first_blk, *last_blk);
- while (mid_blk != first_blk && mid_blk != *last_blk) {
+ end_blk = *last_blk;
+ mid_blk = BLK_AVG(first_blk, end_blk);
+ while (mid_blk != first_blk && mid_blk != end_blk) {
error = xlog_bread(log, mid_blk, 1, bp, &offset);
if (error)
return error;
mid_cycle = xlog_get_cycle(offset);
- if (mid_cycle == cycle) {
- *last_blk = mid_blk;
- /* last_half_cycle == mid_cycle */
- } else {
- first_blk = mid_blk;
- /* first_half_cycle == mid_cycle */
- }
- mid_blk = BLK_AVG(first_blk, *last_blk);
+ if (mid_cycle == cycle)
+ end_blk = mid_blk; /* last_half_cycle == mid_cycle */
+ else
+ first_blk = mid_blk; /* first_half_cycle == mid_cycle */
+ mid_blk = BLK_AVG(first_blk, end_blk);
}
- ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
- (mid_blk == *last_blk && mid_blk-1 == first_blk));
+ ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
+ (mid_blk == end_blk && mid_blk-1 == first_blk));
+
+ *last_blk = end_blk;
return 0;
}
/*
- * Check that the range of blocks does not contain the cycle number
- * given. The scan needs to occur from front to back and the ptr into the
- * region must be updated since a later routine will need to perform another
- * test. If the region is completely good, we end up returning the same
- * last block number.
- *
- * Set blkno to -1 if we encounter no errors. This is an invalid block number
- * since we don't ever expect logs to get this large.
+ * Check that a range of blocks does not contain stop_on_cycle_no.
+ * Fill in *new_blk with the block offset where such a block is
+ * found, or with -1 (an invalid block number) if there is no such
+ * block in the range. The scan needs to occur from front to back
+ * and the pointer into the region must be updated since a later
+ * routine will need to perform another test.
*/
STATIC int
xlog_find_verify_cycle(
@@ -376,12 +402,16 @@ xlog_find_verify_cycle(
xfs_caddr_t buf = NULL;
int error = 0;
+ /*
+ * Greedily allocate a buffer big enough to handle the full
+ * range of basic blocks we'll be examining. If that fails,
+ * try a smaller size. We need to be able to read at least
+ * a log sector, or we're out of luck.
+ */
bufblks = 1 << ffs(nbblks);
-
while (!(bp = xlog_get_bp(log, bufblks))) {
- /* can't get enough memory to do everything in one big buffer */
bufblks >>= 1;
- if (bufblks <= log->l_sectbb_log)
+ if (bufblks < log->l_sectBBsize)
return ENOMEM;
}
@@ -629,7 +659,7 @@ xlog_find_head(
* In this case we want to find the first block with cycle
* number matching last_half_cycle. We expect the log to be
* some variation on
- * x + 1 ... | x ...
+ * x + 1 ... | x ... | x
* The first block with cycle number x (last_half_cycle) will
* be where the new head belongs. First we do a binary search
* for the first occurrence of last_half_cycle. The binary
@@ -639,11 +669,13 @@ xlog_find_head(
* the log, then we look for occurrences of last_half_cycle - 1
* at the end of the log. The cases we're looking for look
* like
- * x + 1 ... | x | x + 1 | x ...
- * ^ binary search stopped here
+ * v binary search stopped here
+ * x + 1 ... | x | x + 1 | x ... | x
+ * ^ but we want to locate this spot
* or
- * x + 1 ... | x ... | x - 1 | x
* <---------> less than scan distance
+ * x + 1 ... | x ... | x - 1 | x
+ * ^ we want to locate this spot
*/
stop_on_cycle = last_half_cycle;
if ((error = xlog_find_cycle_start(log, bp, first_blk,
@@ -699,16 +731,16 @@ xlog_find_head(
* certainly not the head of the log. By searching for
* last_half_cycle-1 we accomplish that.
*/
- start_blk = log_bbnum - num_scan_bblks + head_blk;
ASSERT(head_blk <= INT_MAX &&
- (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
+ (xfs_daddr_t) num_scan_bblks >= head_blk);
+ start_blk = log_bbnum - (num_scan_bblks - head_blk);
if ((error = xlog_find_verify_cycle(log, start_blk,
num_scan_bblks - (int)head_blk,
(stop_on_cycle - 1), &new_blk)))
goto bp_err;
if (new_blk != -1) {
head_blk = new_blk;
- goto bad_blk;
+ goto validate_head;
}
/*
@@ -726,7 +758,7 @@ xlog_find_head(
head_blk = new_blk;
}
- bad_blk:
+validate_head:
/*
* Now we need to make sure head_blk is not pointing to a block in
* the middle of a log record.
@@ -748,7 +780,7 @@ xlog_find_head(
if ((error = xlog_find_verify_log_record(log, start_blk,
&head_blk, 0)) == -1) {
/* We hit the beginning of the log during our search */
- start_blk = log_bbnum - num_scan_bblks + head_blk;
+ start_blk = log_bbnum - (num_scan_bblks - head_blk);
new_blk = log_bbnum;
ASSERT(start_blk <= INT_MAX &&
(xfs_daddr_t) log_bbnum-start_blk >= 0);
@@ -833,12 +865,12 @@ xlog_find_tail(
if (*head_blk == 0) { /* special case */
error = xlog_bread(log, 0, 1, bp, &offset);
if (error)
- goto bread_err;
+ goto done;
if (xlog_get_cycle(offset) == 0) {
*tail_blk = 0;
/* leave all other log inited values alone */
- goto exit;
+ goto done;
}
}
@@ -849,7 +881,7 @@ xlog_find_tail(
for (i = (int)(*head_blk) - 1; i >= 0; i--) {
error = xlog_bread(log, i, 1, bp, &offset);
if (error)
- goto bread_err;
+ goto done;
if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
found = 1;
@@ -866,7 +898,7 @@ xlog_find_tail(
for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
error = xlog_bread(log, i, 1, bp, &offset);
if (error)
- goto bread_err;
+ goto done;
if (XLOG_HEADER_MAGIC_NUM ==
be32_to_cpu(*(__be32 *)offset)) {
@@ -941,7 +973,7 @@ xlog_find_tail(
umount_data_blk = (i + hblks) % log->l_logBBsize;
error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
if (error)
- goto bread_err;
+ goto done;
op_head = (xlog_op_header_t *)offset;
if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
@@ -987,12 +1019,10 @@ xlog_find_tail(
* But... if the -device- itself is readonly, just skip this.
* We can't recover this device anyway, so it won't matter.
*/
- if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
+ if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
error = xlog_clear_stale_blocks(log, tail_lsn);
- }
-bread_err:
-exit:
+done:
xlog_put_bp(bp);
if (error)
@@ -1152,16 +1182,22 @@ xlog_write_log_records(
xfs_caddr_t offset;
xfs_buf_t *bp;
int balign, ealign;
- int sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
+ int sectbb = log->l_sectBBsize;
int end_block = start_block + blocks;
int bufblks;
int error = 0;
int i, j = 0;
+ /*
+ * Greedily allocate a buffer big enough to handle the full
+ * range of basic blocks to be written. If that fails, try
+ * a smaller size. We need to be able to write at least a
+ * log sector, or we're out of luck.
+ */
bufblks = 1 << ffs(blocks);
while (!(bp = xlog_get_bp(log, bufblks))) {
bufblks >>= 1;
- if (bufblks <= log->l_sectbb_log)
+ if (bufblks < sectbb)
return ENOMEM;
}
@@ -1169,7 +1205,7 @@ xlog_write_log_records(
* the buffer in the starting sector not covered by the first
* write below.
*/
- balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
+ balign = round_down(start_block, sectbb);
if (balign != start_block) {
error = xlog_bread_noalign(log, start_block, 1, bp);
if (error)
@@ -1188,7 +1224,7 @@ xlog_write_log_records(
* the buffer in the final sector not covered by the write.
* If this is the same sector as the above read, skip it.
*/
- ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
+ ealign = round_down(end_block, sectbb);
if (j == 0 && (start_block + endcount > ealign)) {
offset = XFS_BUF_PTR(bp);
balign = BBTOB(ealign - start_block);
@@ -1408,6 +1444,7 @@ xlog_recover_add_item(
STATIC int
xlog_recover_add_to_cont_trans(
+ struct log *log,
xlog_recover_t *trans,
xfs_caddr_t dp,
int len)
@@ -1434,6 +1471,7 @@ xlog_recover_add_to_cont_trans(
memcpy(&ptr[old_len], dp, len); /* d, s, l */
item->ri_buf[item->ri_cnt-1].i_len += len;
item->ri_buf[item->ri_cnt-1].i_addr = ptr;
+ trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
return 0;
}
@@ -1452,6 +1490,7 @@ xlog_recover_add_to_cont_trans(
*/
STATIC int
xlog_recover_add_to_trans(
+ struct log *log,
xlog_recover_t *trans,
xfs_caddr_t dp,
int len)
@@ -1510,6 +1549,7 @@ xlog_recover_add_to_trans(
item->ri_buf[item->ri_cnt].i_addr = ptr;
item->ri_buf[item->ri_cnt].i_len = len;
item->ri_cnt++;
+ trace_xfs_log_recover_item_add(log, trans, item, 0);
return 0;
}
@@ -1521,7 +1561,9 @@ xlog_recover_add_to_trans(
*/
STATIC int
xlog_recover_reorder_trans(
- xlog_recover_t *trans)
+ struct log *log,
+ xlog_recover_t *trans,
+ int pass)
{
xlog_recover_item_t *item, *n;
LIST_HEAD(sort_list);
@@ -1535,6 +1577,8 @@ xlog_recover_reorder_trans(
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) {
+ trace_xfs_log_recover_item_reorder_head(log,
+ trans, item, pass);
list_move(&item->ri_list, &trans->r_itemq);
break;
}
@@ -1543,6 +1587,8 @@ xlog_recover_reorder_trans(
case XFS_LI_QUOTAOFF:
case XFS_LI_EFD:
case XFS_LI_EFI:
+ trace_xfs_log_recover_item_reorder_tail(log,
+ trans, item, pass);
list_move_tail(&item->ri_list, &trans->r_itemq);
break;
default:
@@ -1592,8 +1638,10 @@ xlog_recover_do_buffer_pass1(
/*
* If this isn't a cancel buffer item, then just return.
*/
- if (!(flags & XFS_BLI_CANCEL))
+ if (!(flags & XFS_BLI_CANCEL)) {
+ trace_xfs_log_recover_buf_not_cancel(log, buf_f);
return;
+ }
/*
* Insert an xfs_buf_cancel record into the hash table of
@@ -1627,6 +1675,7 @@ xlog_recover_do_buffer_pass1(
while (nextp != NULL) {
if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
nextp->bc_refcount++;
+ trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
return;
}
prevp = nextp;
@@ -1640,6 +1689,7 @@ xlog_recover_do_buffer_pass1(
bcp->bc_refcount = 1;
bcp->bc_next = NULL;
prevp->bc_next = bcp;
+ trace_xfs_log_recover_buf_cancel_add(log, buf_f);
}
/*
@@ -1779,6 +1829,8 @@ xlog_recover_do_inode_buffer(
unsigned int *data_map = NULL;
unsigned int map_size = 0;
+ trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
+
switch (buf_f->blf_type) {
case XFS_LI_BUF:
data_map = buf_f->blf_data_map;
@@ -1874,6 +1926,7 @@ xlog_recover_do_inode_buffer(
/*ARGSUSED*/
STATIC void
xlog_recover_do_reg_buffer(
+ struct xfs_mount *mp,
xlog_recover_item_t *item,
xfs_buf_t *bp,
xfs_buf_log_format_t *buf_f)
@@ -1885,6 +1938,8 @@ xlog_recover_do_reg_buffer(
unsigned int map_size = 0;
int error;
+ trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
+
switch (buf_f->blf_type) {
case XFS_LI_BUF:
data_map = buf_f->blf_data_map;
@@ -2083,6 +2138,8 @@ xlog_recover_do_dquot_buffer(
{
uint type;
+ trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
+
/*
* Filesystems are required to send in quota flags at mount time.
*/
@@ -2103,7 +2160,7 @@ xlog_recover_do_dquot_buffer(
if (log->l_quotaoffs_flag & type)
return;
- xlog_recover_do_reg_buffer(item, bp, buf_f);
+ xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
}
/*
@@ -2164,9 +2221,11 @@ xlog_recover_do_buffer_trans(
*/
cancel = xlog_recover_do_buffer_pass2(log, buf_f);
if (cancel) {
+ trace_xfs_log_recover_buf_cancel(log, buf_f);
return 0;
}
}
+ trace_xfs_log_recover_buf_recover(log, buf_f);
switch (buf_f->blf_type) {
case XFS_LI_BUF:
blkno = buf_f->blf_blkno;
@@ -2204,7 +2263,7 @@ xlog_recover_do_buffer_trans(
(XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
} else {
- xlog_recover_do_reg_buffer(item, bp, buf_f);
+ xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
}
if (error)
return XFS_ERROR(error);
@@ -2284,8 +2343,10 @@ xlog_recover_do_inode_trans(
if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
in_f->ilf_len, 0)) {
error = 0;
+ trace_xfs_log_recover_inode_cancel(log, in_f);
goto error;
}
+ trace_xfs_log_recover_inode_recover(log, in_f);
bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
XBF_LOCK);
@@ -2337,6 +2398,7 @@ xlog_recover_do_inode_trans(
/* do nothing */
} else {
xfs_buf_relse(bp);
+ trace_xfs_log_recover_inode_skip(log, in_f);
error = 0;
goto error;
}
@@ -2758,11 +2820,12 @@ xlog_recover_do_trans(
int error = 0;
xlog_recover_item_t *item;
- error = xlog_recover_reorder_trans(trans);
+ error = xlog_recover_reorder_trans(log, trans, pass);
if (error)
return error;
list_for_each_entry(item, &trans->r_itemq, ri_list) {
+ trace_xfs_log_recover_item_recover(log, trans, item, pass);
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
error = xlog_recover_do_buffer_trans(log, item, pass);
@@ -2919,8 +2982,9 @@ xlog_recover_process_data(
error = xlog_recover_unmount_trans(trans);
break;
case XLOG_WAS_CONT_TRANS:
- error = xlog_recover_add_to_cont_trans(trans,
- dp, be32_to_cpu(ohead->oh_len));
+ error = xlog_recover_add_to_cont_trans(log,
+ trans, dp,
+ be32_to_cpu(ohead->oh_len));
break;
case XLOG_START_TRANS:
xlog_warn(
@@ -2930,7 +2994,7 @@ xlog_recover_process_data(
break;
case 0:
case XLOG_CONTINUE_TRANS:
- error = xlog_recover_add_to_trans(trans,
+ error = xlog_recover_add_to_trans(log, trans,
dp, be32_to_cpu(ohead->oh_len));
break;
default:
@@ -3331,42 +3395,6 @@ xlog_pack_data(
}
}
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
-STATIC void
-xlog_unpack_data_checksum(
- xlog_rec_header_t *rhead,
- xfs_caddr_t dp,
- xlog_t *log)
-{
- __be32 *up = (__be32 *)dp;
- uint chksum = 0;
- int i;
-
- /* divide length by 4 to get # words */
- for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
- chksum ^= be32_to_cpu(*up);
- up++;
- }
- if (chksum != be32_to_cpu(rhead->h_chksum)) {
- if (rhead->h_chksum ||
- ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
- cmn_err(CE_DEBUG,
- "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
- be32_to_cpu(rhead->h_chksum), chksum);
- cmn_err(CE_DEBUG,
-"XFS: Disregard message if filesystem was created with non-DEBUG kernel");
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
- cmn_err(CE_DEBUG,
- "XFS: LogR this is a LogV2 filesystem\n");
- }
- log->l_flags |= XLOG_CHKSUM_MISMATCH;
- }
- }
-}
-#else
-#define xlog_unpack_data_checksum(rhead, dp, log)
-#endif
-
STATIC void
xlog_unpack_data(
xlog_rec_header_t *rhead,
@@ -3390,8 +3418,6 @@ xlog_unpack_data(
dp += BBSIZE;
}
}
-
- xlog_unpack_data_checksum(rhead, dp, log);
}
STATIC int
@@ -3490,7 +3516,7 @@ xlog_do_recovery_pass(
hblks = 1;
}
} else {
- ASSERT(log->l_sectbb_log == 0);
+ ASSERT(log->l_sectBBsize == 1);
hblks = 1;
hbp = xlog_get_bp(log, 1);
h_size = XLOG_BIG_RECORD_BSIZE;
@@ -3946,10 +3972,6 @@ xlog_recover_check_summary(
xfs_agf_t *agfp;
xfs_buf_t *agfbp;
xfs_buf_t *agibp;
- xfs_buf_t *sbbp;
-#ifdef XFS_LOUD_RECOVERY
- xfs_sb_t *sbp;
-#endif
xfs_agnumber_t agno;
__uint64_t freeblks;
__uint64_t itotal;
@@ -3984,30 +4006,5 @@ xlog_recover_check_summary(
xfs_buf_relse(agibp);
}
}
-
- sbbp = xfs_getsb(mp, 0);
-#ifdef XFS_LOUD_RECOVERY
- sbp = &mp->m_sb;
- xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp));
- cmn_err(CE_NOTE,
- "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
- sbp->sb_icount, itotal);
- cmn_err(CE_NOTE,
- "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
- sbp->sb_ifree, ifree);
- cmn_err(CE_NOTE,
- "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
- sbp->sb_fdblocks, freeblks);
-#if 0
- /*
- * This is turned off until I account for the allocation
- * btree blocks which live in free space.
- */
- ASSERT(sbp->sb_icount == itotal);
- ASSERT(sbp->sb_ifree == ifree);
- ASSERT(sbp->sb_fdblocks == freeblks);
-#endif
-#endif
- xfs_buf_relse(sbbp);
}
#endif /* DEBUG */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index e79b56b4bca6..d7bf38c8cd1c 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1405,13 +1405,6 @@ xfs_mountfs(
xfs_qm_mount_quotas(mp);
}
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- if (XFS_IS_QUOTA_ON(mp))
- xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
- else
- xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
-#endif
-
/*
* Now we are mounted, reserve a small amount of unused space for
* privileged transactions. This is needed so that transaction
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index fdcab3f81dde..e0e64b113bd6 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -201,9 +201,6 @@ typedef struct xfs_qoff_logformat {
#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */
#define XFS_QMOPT_DQSUSER 0x0000020 /* don't cache super users dquot */
#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */
-#define XFS_QMOPT_QUOTAOFF 0x0000080 /* quotas are being turned off */
-#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */
-#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */
#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */
#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */
#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f73e358bae8d..be578ecb4af2 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -45,23 +45,12 @@
#include "xfs_trans_space.h"
#include "xfs_inode_item.h"
-
-STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *);
-STATIC uint xfs_trans_count_vecs(xfs_trans_t *);
-STATIC void xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *);
-STATIC void xfs_trans_uncommit(xfs_trans_t *, uint);
-STATIC void xfs_trans_committed(xfs_trans_t *, int);
-STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int);
-STATIC void xfs_trans_free(xfs_trans_t *);
-
kmem_zone_t *xfs_trans_zone;
-
/*
* Reservation functions here avoid a huge stack in xfs_trans_init
* due to register overflow from temporaries in the calculations.
*/
-
STATIC uint
xfs_calc_write_reservation(xfs_mount_t *mp)
{
@@ -261,6 +250,19 @@ _xfs_trans_alloc(
}
/*
+ * Free the transaction structure. If there is more clean up
+ * to do when the structure is freed, add it here.
+ */
+STATIC void
+xfs_trans_free(
+ xfs_trans_t *tp)
+{
+ atomic_dec(&tp->t_mountp->m_active_trans);
+ xfs_trans_free_dqinfo(tp);
+ kmem_zone_free(xfs_trans_zone, tp);
+}
+
+/*
* This is called to create a new transaction which will share the
* permanent log reservation of the given transaction. The remaining
* unused block and rt extent reservations are also inherited. This
@@ -764,94 +766,278 @@ xfs_trans_unreserve_and_mod_sb(
}
}
+/*
+ * Total up the number of log iovecs needed to commit this
+ * transaction. The transaction itself needs one for the
+ * transaction header. Ask each dirty item in turn how many
+ * it needs to get the total.
+ */
+static uint
+xfs_trans_count_vecs(
+ struct xfs_trans *tp)
+{
+ int nvecs;
+ xfs_log_item_desc_t *lidp;
+
+ nvecs = 1;
+ lidp = xfs_trans_first_item(tp);
+ ASSERT(lidp != NULL);
+
+ /* In the non-debug case we need to start bailing out if we
+ * didn't find a log_item here, return zero and let trans_commit
+ * deal with it.
+ */
+ if (lidp == NULL)
+ return 0;
+
+ while (lidp != NULL) {
+ /*
+ * Skip items which aren't dirty in this transaction.
+ */
+ if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
+ lidp = xfs_trans_next_item(tp, lidp);
+ continue;
+ }
+ lidp->lid_size = IOP_SIZE(lidp->lid_item);
+ nvecs += lidp->lid_size;
+ lidp = xfs_trans_next_item(tp, lidp);
+ }
+
+ return nvecs;
+}
/*
- * xfs_trans_commit
+ * Fill in the vector with pointers to data to be logged
+ * by this transaction. The transaction header takes
+ * the first vector, and then each dirty item takes the
+ * number of vectors it indicated it needed in xfs_trans_count_vecs().
*
- * Commit the given transaction to the log a/synchronously.
+ * As each item fills in the entries it needs, also pin the item
+ * so that it cannot be flushed out until the log write completes.
+ */
+static void
+xfs_trans_fill_vecs(
+ struct xfs_trans *tp,
+ struct xfs_log_iovec *log_vector)
+{
+ xfs_log_item_desc_t *lidp;
+ struct xfs_log_iovec *vecp;
+ uint nitems;
+
+ /*
+ * Skip over the entry for the transaction header, we'll
+ * fill that in at the end.
+ */
+ vecp = log_vector + 1;
+
+ nitems = 0;
+ lidp = xfs_trans_first_item(tp);
+ ASSERT(lidp);
+ while (lidp) {
+ /* Skip items which aren't dirty in this transaction. */
+ if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
+ lidp = xfs_trans_next_item(tp, lidp);
+ continue;
+ }
+
+ /*
+ * The item may be marked dirty but not log anything. This can
+ * be used to get called when a transaction is committed.
+ */
+ if (lidp->lid_size)
+ nitems++;
+ IOP_FORMAT(lidp->lid_item, vecp);
+ vecp += lidp->lid_size;
+ IOP_PIN(lidp->lid_item);
+ lidp = xfs_trans_next_item(tp, lidp);
+ }
+
+ /*
+ * Now that we've counted the number of items in this transaction, fill
+ * in the transaction header. Note that the transaction header does not
+ * have a log item.
+ */
+ tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
+ tp->t_header.th_type = tp->t_type;
+ tp->t_header.th_num_items = nitems;
+ log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
+ log_vector->i_len = sizeof(xfs_trans_header_t);
+ log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
+}
+
+/*
+ * The committed item processing consists of calling the committed routine of
+ * each logged item, updating the item's position in the AIL if necessary, and
+ * unpinning each item. If the committed routine returns -1, then do nothing
+ * further with the item because it may have been freed.
*
- * XFS disk error handling mechanism is not based on a typical
- * transaction abort mechanism. Logically after the filesystem
- * gets marked 'SHUTDOWN', we can't let any new transactions
- * be durable - ie. committed to disk - because some metadata might
- * be inconsistent. In such cases, this returns an error, and the
- * caller may assume that all locked objects joined to the transaction
- * have already been unlocked as if the commit had succeeded.
- * Do not reference the transaction structure after this call.
+ * Since items are unlocked when they are copied to the incore log, it is
+ * possible for two transactions to be completing and manipulating the same
+ * item simultaneously. The AIL lock will protect the lsn field of each item.
+ * The value of this field can never go backwards.
+ *
+ * We unpin the items after repositioning them in the AIL, because otherwise
+ * they could be immediately flushed and we'd have to race with the flusher
+ * trying to pull the item from the AIL as we add it.
*/
- /*ARGSUSED*/
-int
-_xfs_trans_commit(
- xfs_trans_t *tp,
- uint flags,
- int *log_flushed)
+static void
+xfs_trans_item_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t commit_lsn,
+ int aborted)
{
- xfs_log_iovec_t *log_vector;
- int nvec;
- xfs_mount_t *mp;
- xfs_lsn_t commit_lsn;
- /* REFERENCED */
- int error;
- int log_flags;
- int sync;
-#define XFS_TRANS_LOGVEC_COUNT 16
- xfs_log_iovec_t log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
- struct xlog_in_core *commit_iclog;
- int shutdown;
+ xfs_lsn_t item_lsn;
+ struct xfs_ail *ailp;
- commit_lsn = -1;
+ if (aborted)
+ lip->li_flags |= XFS_LI_ABORTED;
+ item_lsn = IOP_COMMITTED(lip, commit_lsn);
+
+ /* If the committed routine returns -1, item has been freed. */
+ if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+ return;
/*
- * Determine whether this commit is releasing a permanent
- * log reservation or not.
+ * If the returned lsn is greater than what it contained before, update
+ * the location of the item in the AIL. If it is not, then do nothing.
+ * Items can never move backwards in the AIL.
+ *
+ * While the new lsn should usually be greater, it is possible that a
+ * later transaction completing simultaneously with an earlier one
+ * using the same item could complete first with a higher lsn. This
+ * would cause the earlier transaction to fail the test below.
*/
- if (flags & XFS_TRANS_RELEASE_LOG_RES) {
- ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
- log_flags = XFS_LOG_REL_PERM_RESERV;
+ ailp = lip->li_ailp;
+ spin_lock(&ailp->xa_lock);
+ if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
+ /*
+ * This will set the item's lsn to item_lsn and update the
+ * position of the item in the AIL.
+ *
+ * xfs_trans_ail_update() drops the AIL lock.
+ */
+ xfs_trans_ail_update(ailp, lip, item_lsn);
} else {
- log_flags = 0;
+ spin_unlock(&ailp->xa_lock);
}
- mp = tp->t_mountp;
/*
- * If there is nothing to be logged by the transaction,
- * then unlock all of the items associated with the
- * transaction and free the transaction structure.
- * Also make sure to return any reserved blocks to
- * the free pool.
+ * Now that we've repositioned the item in the AIL, unpin it so it can
+ * be flushed. Pass information about buffer stale state down from the
+ * log item flags, if anyone else stales the buffer we do not want to
+ * pay any attention to it.
*/
-shut_us_down:
- shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0;
- if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) {
- xfs_trans_unreserve_and_mod_sb(tp);
+ IOP_UNPIN(lip);
+}
+
+/* Clear all the per-AG busy list items listed in this transaction */
+static void
+xfs_trans_clear_busy_extents(
+ struct xfs_trans *tp)
+{
+ xfs_log_busy_chunk_t *lbcp;
+ xfs_log_busy_slot_t *lbsp;
+ int i;
+
+ for (lbcp = &tp->t_busy; lbcp != NULL; lbcp = lbcp->lbc_next) {
+ i = 0;
+ for (lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) {
+ if (XFS_LBC_ISFREE(lbcp, i))
+ continue;
+ xfs_alloc_clear_busy(tp, lbsp->lbc_ag, lbsp->lbc_idx);
+ }
+ }
+ xfs_trans_free_busy(tp);
+}
+
+/*
+ * This is typically called by the LM when a transaction has been fully
+ * committed to disk. It needs to unpin the items which have
+ * been logged by the transaction and update their positions
+ * in the AIL if necessary.
+ *
+ * This also gets called when the transactions didn't get written out
+ * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
+ */
+STATIC void
+xfs_trans_committed(
+ struct xfs_trans *tp,
+ int abortflag)
+{
+ xfs_log_item_desc_t *lidp;
+ xfs_log_item_chunk_t *licp;
+ xfs_log_item_chunk_t *next_licp;
+
+ /* Call the transaction's completion callback if there is one. */
+ if (tp->t_callback != NULL)
+ tp->t_callback(tp, tp->t_callarg);
+
+ for (lidp = xfs_trans_first_item(tp);
+ lidp != NULL;
+ lidp = xfs_trans_next_item(tp, lidp)) {
+ xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
+ }
+
+ /* free the item chunks, ignoring the embedded chunk */
+ for (licp = tp->t_items.lic_next; licp != NULL; licp = next_licp) {
+ next_licp = licp->lic_next;
+ kmem_free(licp);
+ }
+
+ xfs_trans_clear_busy_extents(tp);
+ xfs_trans_free(tp);
+}
+
+/*
+ * Called from the trans_commit code when we notice that
+ * the filesystem is in the middle of a forced shutdown.
+ */
+STATIC void
+xfs_trans_uncommit(
+ struct xfs_trans *tp,
+ uint flags)
+{
+ xfs_log_item_desc_t *lidp;
+
+ for (lidp = xfs_trans_first_item(tp);
+ lidp != NULL;
+ lidp = xfs_trans_next_item(tp, lidp)) {
/*
- * It is indeed possible for the transaction to be
- * not dirty but the dqinfo portion to be. All that
- * means is that we have some (non-persistent) quota
- * reservations that need to be unreserved.
+ * Unpin all but those that aren't dirty.
*/
- xfs_trans_unreserve_and_mod_dquots(tp);
- if (tp->t_ticket) {
- commit_lsn = xfs_log_done(mp, tp->t_ticket,
- NULL, log_flags);
- if (commit_lsn == -1 && !shutdown)
- shutdown = XFS_ERROR(EIO);
- }
- current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
- xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0);
- xfs_trans_free_busy(tp);
- xfs_trans_free(tp);
- XFS_STATS_INC(xs_trans_empty);
- return (shutdown);
+ if (lidp->lid_flags & XFS_LID_DIRTY)
+ IOP_UNPIN_REMOVE(lidp->lid_item, tp);
}
- ASSERT(tp->t_ticket != NULL);
- /*
- * If we need to update the superblock, then do it now.
- */
- if (tp->t_flags & XFS_TRANS_SB_DIRTY)
- xfs_trans_apply_sb_deltas(tp);
- xfs_trans_apply_dquot_deltas(tp);
+ xfs_trans_unreserve_and_mod_sb(tp);
+ xfs_trans_unreserve_and_mod_dquots(tp);
+
+ xfs_trans_free_items(tp, flags);
+ xfs_trans_free_busy(tp);
+ xfs_trans_free(tp);
+}
+
+/*
+ * Format the transaction direct to the iclog. This isolates the physical
+ * transaction commit operation from the logical operation and hence allows
+ * other methods to be introduced without affecting the existing commit path.
+ */
+static int
+xfs_trans_commit_iclog(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ xfs_lsn_t *commit_lsn,
+ int flags)
+{
+ int shutdown;
+ int error;
+ int log_flags = 0;
+ struct xlog_in_core *commit_iclog;
+#define XFS_TRANS_LOGVEC_COUNT 16
+ struct xfs_log_iovec log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
+ struct xfs_log_iovec *log_vector;
+ uint nvec;
+
/*
* Ask each log item how many log_vector entries it will
@@ -861,8 +1047,7 @@ shut_us_down:
*/
nvec = xfs_trans_count_vecs(tp);
if (nvec == 0) {
- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- goto shut_us_down;
+ return ENOMEM; /* triggers a shutdown! */
} else if (nvec <= XFS_TRANS_LOGVEC_COUNT) {
log_vector = log_vector_fast;
} else {
@@ -877,6 +1062,9 @@ shut_us_down:
*/
xfs_trans_fill_vecs(tp, log_vector);
+ if (flags & XFS_TRANS_RELEASE_LOG_RES)
+ log_flags = XFS_LOG_REL_PERM_RESERV;
+
error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn));
/*
@@ -884,18 +1072,17 @@ shut_us_down:
* at any time after this call. However, all the items associated
* with the transaction are still locked and pinned in memory.
*/
- commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
+ *commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
- tp->t_commit_lsn = commit_lsn;
- if (nvec > XFS_TRANS_LOGVEC_COUNT) {
+ tp->t_commit_lsn = *commit_lsn;
+ if (nvec > XFS_TRANS_LOGVEC_COUNT)
kmem_free(log_vector);
- }
/*
* If we got a log write error. Unpin the logitems that we
* had pinned, clean up, free trans structure, and return error.
*/
- if (error || commit_lsn == -1) {
+ if (error || *commit_lsn == -1) {
current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT);
return XFS_ERROR(EIO);
@@ -909,8 +1096,6 @@ shut_us_down:
*/
xfs_trans_unreserve_and_mod_sb(tp);
- sync = tp->t_flags & XFS_TRANS_SYNC;
-
/*
* Tell the LM to call the transaction completion routine
* when the log write with LSN commit_lsn completes (e.g.
@@ -953,7 +1138,7 @@ shut_us_down:
* the commit lsn of this transaction for dependency tracking
* purposes.
*/
- xfs_trans_unlock_items(tp, commit_lsn);
+ xfs_trans_unlock_items(tp, *commit_lsn);
/*
* If we detected a log error earlier, finish committing
@@ -973,156 +1158,114 @@ shut_us_down:
* and the items are released we can finally allow the iclog to
* go to disk.
*/
- error = xfs_log_release_iclog(mp, commit_iclog);
-
- /*
- * If the transaction needs to be synchronous, then force the
- * log out now and wait for it.
- */
- if (sync) {
- if (!error) {
- error = _xfs_log_force_lsn(mp, commit_lsn,
- XFS_LOG_SYNC, log_flushed);
- }
- XFS_STATS_INC(xs_trans_sync);
- } else {
- XFS_STATS_INC(xs_trans_async);
- }
-
- return (error);
+ return xfs_log_release_iclog(mp, commit_iclog);
}
/*
- * Total up the number of log iovecs needed to commit this
- * transaction. The transaction itself needs one for the
- * transaction header. Ask each dirty item in turn how many
- * it needs to get the total.
+ * xfs_trans_commit
+ *
+ * Commit the given transaction to the log a/synchronously.
+ *
+ * XFS disk error handling mechanism is not based on a typical
+ * transaction abort mechanism. Logically after the filesystem
+ * gets marked 'SHUTDOWN', we can't let any new transactions
+ * be durable - ie. committed to disk - because some metadata might
+ * be inconsistent. In such cases, this returns an error, and the
+ * caller may assume that all locked objects joined to the transaction
+ * have already been unlocked as if the commit had succeeded.
+ * Do not reference the transaction structure after this call.
*/
-STATIC uint
-xfs_trans_count_vecs(
- xfs_trans_t *tp)
+int
+_xfs_trans_commit(
+ struct xfs_trans *tp,
+ uint flags,
+ int *log_flushed)
{
- int nvecs;
- xfs_log_item_desc_t *lidp;
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_lsn_t commit_lsn = -1;
+ int error = 0;
+ int log_flags = 0;
+ int sync = tp->t_flags & XFS_TRANS_SYNC;
- nvecs = 1;
- lidp = xfs_trans_first_item(tp);
- ASSERT(lidp != NULL);
-
- /* In the non-debug case we need to start bailing out if we
- * didn't find a log_item here, return zero and let trans_commit
- * deal with it.
+ /*
+ * Determine whether this commit is releasing a permanent
+ * log reservation or not.
*/
- if (lidp == NULL)
- return 0;
-
- while (lidp != NULL) {
- /*
- * Skip items which aren't dirty in this transaction.
- */
- if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
- lidp = xfs_trans_next_item(tp, lidp);
- continue;
- }
- lidp->lid_size = IOP_SIZE(lidp->lid_item);
- nvecs += lidp->lid_size;
- lidp = xfs_trans_next_item(tp, lidp);
+ if (flags & XFS_TRANS_RELEASE_LOG_RES) {
+ ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+ log_flags = XFS_LOG_REL_PERM_RESERV;
}
- return nvecs;
-}
-
-/*
- * Called from the trans_commit code when we notice that
- * the filesystem is in the middle of a forced shutdown.
- */
-STATIC void
-xfs_trans_uncommit(
- xfs_trans_t *tp,
- uint flags)
-{
- xfs_log_item_desc_t *lidp;
+ /*
+ * If there is nothing to be logged by the transaction,
+ * then unlock all of the items associated with the
+ * transaction and free the transaction structure.
+ * Also make sure to return any reserved blocks to
+ * the free pool.
+ */
+ if (!(tp->t_flags & XFS_TRANS_DIRTY))
+ goto out_unreserve;
- for (lidp = xfs_trans_first_item(tp);
- lidp != NULL;
- lidp = xfs_trans_next_item(tp, lidp)) {
- /*
- * Unpin all but those that aren't dirty.
- */
- if (lidp->lid_flags & XFS_LID_DIRTY)
- IOP_UNPIN_REMOVE(lidp->lid_item, tp);
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = XFS_ERROR(EIO);
+ goto out_unreserve;
}
- xfs_trans_unreserve_and_mod_sb(tp);
- xfs_trans_unreserve_and_mod_dquots(tp);
+ ASSERT(tp->t_ticket != NULL);
- xfs_trans_free_items(tp, flags);
- xfs_trans_free_busy(tp);
- xfs_trans_free(tp);
-}
+ /*
+ * If we need to update the superblock, then do it now.
+ */
+ if (tp->t_flags & XFS_TRANS_SB_DIRTY)
+ xfs_trans_apply_sb_deltas(tp);
+ xfs_trans_apply_dquot_deltas(tp);
-/*
- * Fill in the vector with pointers to data to be logged
- * by this transaction. The transaction header takes
- * the first vector, and then each dirty item takes the
- * number of vectors it indicated it needed in xfs_trans_count_vecs().
- *
- * As each item fills in the entries it needs, also pin the item
- * so that it cannot be flushed out until the log write completes.
- */
-STATIC void
-xfs_trans_fill_vecs(
- xfs_trans_t *tp,
- xfs_log_iovec_t *log_vector)
-{
- xfs_log_item_desc_t *lidp;
- xfs_log_iovec_t *vecp;
- uint nitems;
+ error = xfs_trans_commit_iclog(mp, tp, &commit_lsn, flags);
+ if (error == ENOMEM) {
+ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
+ error = XFS_ERROR(EIO);
+ goto out_unreserve;
+ }
/*
- * Skip over the entry for the transaction header, we'll
- * fill that in at the end.
+ * If the transaction needs to be synchronous, then force the
+ * log out now and wait for it.
*/
- vecp = log_vector + 1; /* pointer arithmetic */
-
- nitems = 0;
- lidp = xfs_trans_first_item(tp);
- ASSERT(lidp != NULL);
- while (lidp != NULL) {
- /*
- * Skip items which aren't dirty in this transaction.
- */
- if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
- lidp = xfs_trans_next_item(tp, lidp);
- continue;
- }
- /*
- * The item may be marked dirty but not log anything.
- * This can be used to get called when a transaction
- * is committed.
- */
- if (lidp->lid_size) {
- nitems++;
+ if (sync) {
+ if (!error) {
+ error = _xfs_log_force_lsn(mp, commit_lsn,
+ XFS_LOG_SYNC, log_flushed);
}
- IOP_FORMAT(lidp->lid_item, vecp);
- vecp += lidp->lid_size; /* pointer arithmetic */
- IOP_PIN(lidp->lid_item);
- lidp = xfs_trans_next_item(tp, lidp);
+ XFS_STATS_INC(xs_trans_sync);
+ } else {
+ XFS_STATS_INC(xs_trans_async);
}
+ return error;
+
+out_unreserve:
+ xfs_trans_unreserve_and_mod_sb(tp);
+
/*
- * Now that we've counted the number of items in this
- * transaction, fill in the transaction header.
+ * It is indeed possible for the transaction to be not dirty but
+ * the dqinfo portion to be. All that means is that we have some
+ * (non-persistent) quota reservations that need to be unreserved.
*/
- tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
- tp->t_header.th_type = tp->t_type;
- tp->t_header.th_num_items = nitems;
- log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
- log_vector->i_len = sizeof(xfs_trans_header_t);
- log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
-}
+ xfs_trans_unreserve_and_mod_dquots(tp);
+ if (tp->t_ticket) {
+ commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
+ if (commit_lsn == -1 && !error)
+ error = XFS_ERROR(EIO);
+ }
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+ xfs_trans_free_items(tp, error ? XFS_TRANS_ABORT : 0);
+ xfs_trans_free_busy(tp);
+ xfs_trans_free(tp);
+ XFS_STATS_INC(xs_trans_empty);
+ return error;
+}
/*
* Unlock all of the transaction's items and free the transaction.
@@ -1200,20 +1343,6 @@ xfs_trans_cancel(
xfs_trans_free(tp);
}
-
-/*
- * Free the transaction structure. If there is more clean up
- * to do when the structure is freed, add it here.
- */
-STATIC void
-xfs_trans_free(
- xfs_trans_t *tp)
-{
- atomic_dec(&tp->t_mountp->m_active_trans);
- xfs_trans_free_dqinfo(tp);
- kmem_zone_free(xfs_trans_zone, tp);
-}
-
/*
* Roll from one trans in the sequence of PERMANENT transactions to
* the next: permanent transactions are only flushed out when
@@ -1283,174 +1412,3 @@ xfs_trans_roll(
xfs_trans_ihold(trans, dp);
return 0;
}
-
-/*
- * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item().
- *
- * This is typically called by the LM when a transaction has been fully
- * committed to disk. It needs to unpin the items which have
- * been logged by the transaction and update their positions
- * in the AIL if necessary.
- * This also gets called when the transactions didn't get written out
- * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
- *
- * Call xfs_trans_chunk_committed() to process the items in
- * each chunk.
- */
-STATIC void
-xfs_trans_committed(
- xfs_trans_t *tp,
- int abortflag)
-{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_chunk_t *next_licp;
- xfs_log_busy_chunk_t *lbcp;
- xfs_log_busy_slot_t *lbsp;
- int i;
-
- /*
- * Call the transaction's completion callback if there
- * is one.
- */
- if (tp->t_callback != NULL) {
- tp->t_callback(tp, tp->t_callarg);
- }
-
- /*
- * Special case the chunk embedded in the transaction.
- */
- licp = &(tp->t_items);
- if (!(xfs_lic_are_all_free(licp))) {
- xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
- }
-
- /*
- * Process the items in each chunk in turn.
- */
- licp = licp->lic_next;
- while (licp != NULL) {
- ASSERT(!xfs_lic_are_all_free(licp));
- xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
- next_licp = licp->lic_next;
- kmem_free(licp);
- licp = next_licp;
- }
-
- /*
- * Clear all the per-AG busy list items listed in this transaction
- */
- lbcp = &tp->t_busy;
- while (lbcp != NULL) {
- for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) {
- if (!XFS_LBC_ISFREE(lbcp, i)) {
- xfs_alloc_clear_busy(tp, lbsp->lbc_ag,
- lbsp->lbc_idx);
- }
- }
- lbcp = lbcp->lbc_next;
- }
- xfs_trans_free_busy(tp);
-
- /*
- * That's it for the transaction structure. Free it.
- */
- xfs_trans_free(tp);
-}
-
-/*
- * This is called to perform the commit processing for each
- * item described by the given chunk.
- *
- * The commit processing consists of unlocking items which were
- * held locked with the SYNC_UNLOCK attribute, calling the committed
- * routine of each logged item, updating the item's position in the AIL
- * if necessary, and unpinning each item. If the committed routine
- * returns -1, then do nothing further with the item because it
- * may have been freed.
- *
- * Since items are unlocked when they are copied to the incore
- * log, it is possible for two transactions to be completing
- * and manipulating the same item simultaneously. The AIL lock
- * will protect the lsn field of each item. The value of this
- * field can never go backwards.
- *
- * We unpin the items after repositioning them in the AIL, because
- * otherwise they could be immediately flushed and we'd have to race
- * with the flusher trying to pull the item from the AIL as we add it.
- */
-STATIC void
-xfs_trans_chunk_committed(
- xfs_log_item_chunk_t *licp,
- xfs_lsn_t lsn,
- int aborted)
-{
- xfs_log_item_desc_t *lidp;
- xfs_log_item_t *lip;
- xfs_lsn_t item_lsn;
- int i;
-
- lidp = licp->lic_descs;
- for (i = 0; i < licp->lic_unused; i++, lidp++) {
- struct xfs_ail *ailp;
-
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- lip = lidp->lid_item;
- if (aborted)
- lip->li_flags |= XFS_LI_ABORTED;
-
- /*
- * Send in the ABORTED flag to the COMMITTED routine
- * so that it knows whether the transaction was aborted
- * or not.
- */
- item_lsn = IOP_COMMITTED(lip, lsn);
-
- /*
- * If the committed routine returns -1, make
- * no more references to the item.
- */
- if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) {
- continue;
- }
-
- /*
- * If the returned lsn is greater than what it
- * contained before, update the location of the
- * item in the AIL. If it is not, then do nothing.
- * Items can never move backwards in the AIL.
- *
- * While the new lsn should usually be greater, it
- * is possible that a later transaction completing
- * simultaneously with an earlier one using the
- * same item could complete first with a higher lsn.
- * This would cause the earlier transaction to fail
- * the test below.
- */
- ailp = lip->li_ailp;
- spin_lock(&ailp->xa_lock);
- if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
- /*
- * This will set the item's lsn to item_lsn
- * and update the position of the item in
- * the AIL.
- *
- * xfs_trans_ail_update() drops the AIL lock.
- */
- xfs_trans_ail_update(ailp, lip, item_lsn);
- } else {
- spin_unlock(&ailp->xa_lock);
- }
-
- /*
- * Now that we've repositioned the item in the AIL,
- * unpin it so it can be flushed. Pass information
- * about buffer stale state down from the log item
- * flags, if anyone else stales the buffer we do not
- * want to pay any attention to it.
- */
- IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE);
- }
-}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 79c8bab9dfff..c62beee0921e 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -49,6 +49,15 @@ typedef struct xfs_trans_header {
#define XFS_LI_DQUOT 0x123d
#define XFS_LI_QUOTAOFF 0x123e
+#define XFS_LI_TYPE_DESC \
+ { XFS_LI_EFI, "XFS_LI_EFI" }, \
+ { XFS_LI_EFD, "XFS_LI_EFD" }, \
+ { XFS_LI_IUNLINK, "XFS_LI_IUNLINK" }, \
+ { XFS_LI_INODE, "XFS_LI_INODE" }, \
+ { XFS_LI_BUF, "XFS_LI_BUF" }, \
+ { XFS_LI_DQUOT, "XFS_LI_DQUOT" }, \
+ { XFS_LI_QUOTAOFF, "XFS_LI_QUOTAOFF" }
+
/*
* Transaction types. Used to distinguish types of buffers.
*/
@@ -159,7 +168,6 @@ typedef struct xfs_log_item_desc {
#define XFS_LID_DIRTY 0x1
#define XFS_LID_PINNED 0x2
-#define XFS_LID_BUF_STALE 0x8
/*
* This structure is used to maintain a chunk list of log_item_desc
@@ -833,7 +841,7 @@ typedef struct xfs_item_ops {
uint (*iop_size)(xfs_log_item_t *);
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
void (*iop_pin)(xfs_log_item_t *);
- void (*iop_unpin)(xfs_log_item_t *, int);
+ void (*iop_unpin)(xfs_log_item_t *);
void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *);
uint (*iop_trylock)(xfs_log_item_t *);
void (*iop_unlock)(xfs_log_item_t *);
@@ -846,7 +854,7 @@ typedef struct xfs_item_ops {
#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
-#define IOP_UNPIN(ip, flags) (*(ip)->li_ops->iop_unpin)(ip, flags)
+#define IOP_UNPIN(ip) (*(ip)->li_ops->iop_unpin)(ip)
#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp)
#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip)
#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index fb586360d1c9..9cd809025f3a 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -40,11 +40,51 @@
#include "xfs_rw.h"
#include "xfs_trace.h"
+/*
+ * Check to see if a buffer matching the given parameters is already
+ * a part of the given transaction.
+ */
+STATIC struct xfs_buf *
+xfs_trans_buf_item_match(
+ struct xfs_trans *tp,
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ int len)
+{
+ xfs_log_item_chunk_t *licp;
+ xfs_log_item_desc_t *lidp;
+ xfs_buf_log_item_t *blip;
+ int i;
-STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
- xfs_daddr_t, int);
-STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *,
- xfs_daddr_t, int);
+ len = BBTOB(len);
+ for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
+ if (xfs_lic_are_all_free(licp)) {
+ ASSERT(licp == &tp->t_items);
+ ASSERT(licp->lic_next == NULL);
+ return NULL;
+ }
+
+ for (i = 0; i < licp->lic_unused; i++) {
+ /*
+ * Skip unoccupied slots.
+ */
+ if (xfs_lic_isfree(licp, i))
+ continue;
+
+ lidp = xfs_lic_slot(licp, i);
+ blip = (xfs_buf_log_item_t *)lidp->lid_item;
+ if (blip->bli_item.li_type != XFS_LI_BUF)
+ continue;
+
+ if (XFS_BUF_TARGET(blip->bli_buf) == target &&
+ XFS_BUF_ADDR(blip->bli_buf) == blkno &&
+ XFS_BUF_COUNT(blip->bli_buf) == len)
+ return blip->bli_buf;
+ }
+ }
+
+ return NULL;
+}
/*
* Add the locked buffer to the transaction.
@@ -112,14 +152,6 @@ xfs_trans_bjoin(
* within the transaction, just increment its lock recursion count
* and return a pointer to it.
*
- * Use the fast path function xfs_trans_buf_item_match() or the buffer
- * cache routine incore_match() to find the buffer
- * if it is already owned by this transaction.
- *
- * If we don't already own the buffer, use get_buf() to get it.
- * If it doesn't yet have an associated xfs_buf_log_item structure,
- * then allocate one and add the item to this transaction.
- *
* If the transaction pointer is NULL, make this just a normal
* get_buf() call.
*/
@@ -149,11 +181,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
* have it locked. In this case we just increment the lock
* recursion count and return the buffer to the caller.
*/
- if (tp->t_items.lic_next == NULL) {
- bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
- } else {
- bp = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len);
- }
+ bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
@@ -259,14 +287,6 @@ int xfs_error_mod = 33;
* within the transaction and already read in, just increment its
* lock recursion count and return a pointer to it.
*
- * Use the fast path function xfs_trans_buf_item_match() or the buffer
- * cache routine incore_match() to find the buffer
- * if it is already owned by this transaction.
- *
- * If we don't already own the buffer, use read_buf() to get it.
- * If it doesn't yet have an associated xfs_buf_log_item structure,
- * then allocate one and add the item to this transaction.
- *
* If the transaction pointer is NULL, make this just a normal
* read_buf() call.
*/
@@ -328,11 +348,7 @@ xfs_trans_read_buf(
* If the buffer is not yet read in, then we read it in, increment
* the lock recursion count, and return it to the caller.
*/
- if (tp->t_items.lic_next == NULL) {
- bp = xfs_trans_buf_item_match(tp, target, blkno, len);
- } else {
- bp = xfs_trans_buf_item_match_all(tp, target, blkno, len);
- }
+ bp = xfs_trans_buf_item_match(tp, target, blkno, len);
if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
@@ -696,7 +712,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
- lidp->lid_flags &= ~XFS_LID_BUF_STALE;
bip->bli_flags |= XFS_BLI_LOGGED;
xfs_buf_item_log(bip, first, last);
}
@@ -782,7 +797,7 @@ xfs_trans_binval(
bip->bli_format.blf_flags |= XFS_BLI_CANCEL;
memset((char *)(bip->bli_format.blf_data_map), 0,
(bip->bli_format.blf_map_size * sizeof(uint)));
- lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
+ lidp->lid_flags |= XFS_LID_DIRTY;
tp->t_flags |= XFS_TRANS_DIRTY;
}
@@ -902,111 +917,3 @@ xfs_trans_dquot_buf(
bip->bli_format.blf_flags |= type;
}
-
-/*
- * Check to see if a buffer matching the given parameters is already
- * a part of the given transaction. Only check the first, embedded
- * chunk, since we don't want to spend all day scanning large transactions.
- */
-STATIC xfs_buf_t *
-xfs_trans_buf_item_match(
- xfs_trans_t *tp,
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- int len)
-{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_desc_t *lidp;
- xfs_buf_log_item_t *blip;
- xfs_buf_t *bp;
- int i;
-
- bp = NULL;
- len = BBTOB(len);
- licp = &tp->t_items;
- if (!xfs_lic_are_all_free(licp)) {
- for (i = 0; i < licp->lic_unused; i++) {
- /*
- * Skip unoccupied slots.
- */
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- lidp = xfs_lic_slot(licp, i);
- blip = (xfs_buf_log_item_t *)lidp->lid_item;
- if (blip->bli_item.li_type != XFS_LI_BUF) {
- continue;
- }
-
- bp = blip->bli_buf;
- if ((XFS_BUF_TARGET(bp) == target) &&
- (XFS_BUF_ADDR(bp) == blkno) &&
- (XFS_BUF_COUNT(bp) == len)) {
- /*
- * We found it. Break out and
- * return the pointer to the buffer.
- */
- break;
- } else {
- bp = NULL;
- }
- }
- }
- return bp;
-}
-
-/*
- * Check to see if a buffer matching the given parameters is already
- * a part of the given transaction. Check all the chunks, we
- * want to be thorough.
- */
-STATIC xfs_buf_t *
-xfs_trans_buf_item_match_all(
- xfs_trans_t *tp,
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- int len)
-{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_desc_t *lidp;
- xfs_buf_log_item_t *blip;
- xfs_buf_t *bp;
- int i;
-
- bp = NULL;
- len = BBTOB(len);
- for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
- if (xfs_lic_are_all_free(licp)) {
- ASSERT(licp == &tp->t_items);
- ASSERT(licp->lic_next == NULL);
- return NULL;
- }
- for (i = 0; i < licp->lic_unused; i++) {
- /*
- * Skip unoccupied slots.
- */
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- lidp = xfs_lic_slot(licp, i);
- blip = (xfs_buf_log_item_t *)lidp->lid_item;
- if (blip->bli_item.li_type != XFS_LI_BUF) {
- continue;
- }
-
- bp = blip->bli_buf;
- if ((XFS_BUF_TARGET(bp) == target) &&
- (XFS_BUF_ADDR(bp) == blkno) &&
- (XFS_BUF_COUNT(bp) == len)) {
- /*
- * We found it. Break out and
- * return the pointer to the buffer.
- */
- return bp;
- }
- }
- }
- return NULL;
-}
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 5b2e5e80ecb0..5958d7845bd5 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -87,7 +87,7 @@
#define AE_NO_GLOBAL_LOCK (acpi_status) (0x0017 | AE_CODE_ENVIRONMENTAL)
#define AE_ABORT_METHOD (acpi_status) (0x0018 | AE_CODE_ENVIRONMENTAL)
#define AE_SAME_HANDLER (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL)
-#define AE_WAKE_ONLY_GPE (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
+#define AE_NO_HANDLER (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
#define AE_OWNER_ID_LIMIT (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
#define AE_CODE_ENV_MAX 0x001B
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index d7726685797e..5e952262d6ee 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -206,6 +206,7 @@
#define ACPI_WARNING(plist) acpi_warning plist
#define ACPI_EXCEPTION(plist) acpi_exception plist
#define ACPI_ERROR(plist) acpi_error plist
+#define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i)
#else
@@ -215,6 +216,7 @@
#define ACPI_WARNING(plist)
#define ACPI_EXCEPTION(plist)
#define ACPI_ERROR(plist)
+#define ACPI_DEBUG_OBJECT(obj,l,i)
#endif /* ACPI_NO_ERROR_MESSAGES */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 7bf83ddf82e0..baacd98e7cc6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -373,7 +373,7 @@ struct acpi_pci_root {
struct acpi_pci_id id;
struct pci_bus *bus;
u16 segment;
- u8 bus_nr;
+ struct resource secondary; /* downstream bus range */
u32 osc_support_set; /* _OSC state of support bits */
u32 osc_control_set; /* _OSC state of control bits */
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 4f7b44866b76..23d78b4d088b 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -104,8 +104,7 @@ int acpi_pci_bind_root(struct acpi_device *device);
/* Arch-defined function to add a bus to the system */
-struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
- int bus);
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root);
void pci_acpi_crs_quirks(void);
/* --------------------------------------------------------------------------
diff --git a/include/acpi/acpi_hest.h b/include/acpi/acpi_hest.h
deleted file mode 100644
index 63194d03cb2d..000000000000
--- a/include/acpi/acpi_hest.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef __ACPI_HEST_H
-#define __ACPI_HEST_H
-
-#include <linux/pci.h>
-
-#ifdef CONFIG_ACPI
-extern int acpi_hest_firmware_first_pci(struct pci_dev *pci);
-#else
-static inline int acpi_hest_firmware_first_pci(struct pci_dev *pci) { return 0; }
-#endif
-
-#endif
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index b396854b83b0..29bf945143e8 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -233,8 +233,8 @@ acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
* Interim function needed for PCI IRQ routing
*/
void
-acpi_os_derive_pci_id(acpi_handle rhandle,
- acpi_handle chandle, struct acpi_pci_id **pci_id);
+acpi_os_derive_pci_id(acpi_handle device,
+ acpi_handle region, struct acpi_pci_id **pci_id);
/*
* Miscellaneous
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 4447a0461bae..0e4ab1fe5966 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20100121
+#define ACPI_CA_VERSION 0x20100428
#include "actypes.h"
#include "actbl.h"
@@ -67,6 +67,8 @@ extern u8 acpi_gbl_leave_wake_gpes_disabled;
extern u8 acpi_gbl_use_default_register_widths;
extern acpi_name acpi_gbl_trace_method_name;
extern u32 acpi_gbl_trace_flags;
+extern u8 acpi_gbl_enable_aml_debug_object;
+extern u8 acpi_gbl_copy_dsdt_locally;
extern u32 acpi_current_gpe_count;
extern struct acpi_table_fadt acpi_gbl_FADT;
@@ -164,7 +166,7 @@ acpi_get_devices(const char *HID,
void *context, void **return_value);
acpi_status
-acpi_get_name(acpi_handle handle,
+acpi_get_name(acpi_handle object,
u32 name_type, struct acpi_buffer *ret_path_ptr);
acpi_status
@@ -172,14 +174,12 @@ acpi_get_handle(acpi_handle parent,
acpi_string pathname, acpi_handle * ret_handle);
acpi_status
-acpi_attach_data(acpi_handle obj_handle,
- acpi_object_handler handler, void *data);
+acpi_attach_data(acpi_handle object, acpi_object_handler handler, void *data);
-acpi_status
-acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler);
+acpi_status acpi_detach_data(acpi_handle object, acpi_object_handler handler);
acpi_status
-acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data);
+acpi_get_data(acpi_handle object, acpi_object_handler handler, void **data);
acpi_status
acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags);
@@ -201,7 +201,7 @@ acpi_evaluate_object_typed(acpi_handle object,
acpi_object_type return_type);
acpi_status
-acpi_get_object_info(acpi_handle handle,
+acpi_get_object_info(acpi_handle object,
struct acpi_device_info **return_buffer);
acpi_status acpi_install_method(u8 *buffer);
@@ -283,16 +283,17 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
*/
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action);
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type);
+acpi_status
+acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type);
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type);
+acpi_status
+acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type);
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags);
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
- u32 gpe_number,
- u32 flags, acpi_event_status * event_status);
+ u32 gpe_number, acpi_event_status *event_status);
acpi_status acpi_disable_all_gpes(void);
@@ -315,33 +316,29 @@ acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
void *context);
acpi_status
-acpi_get_vendor_resource(acpi_handle device_handle,
+acpi_get_vendor_resource(acpi_handle device,
char *name,
struct acpi_vendor_uuid *uuid,
struct acpi_buffer *ret_buffer);
acpi_status
-acpi_get_current_resources(acpi_handle device_handle,
- struct acpi_buffer *ret_buffer);
+acpi_get_current_resources(acpi_handle device, struct acpi_buffer *ret_buffer);
#ifdef ACPI_FUTURE_USAGE
acpi_status
-acpi_get_possible_resources(acpi_handle device_handle,
- struct acpi_buffer *ret_buffer);
+acpi_get_possible_resources(acpi_handle device, struct acpi_buffer *ret_buffer);
#endif
acpi_status
-acpi_walk_resources(acpi_handle device_handle,
+acpi_walk_resources(acpi_handle device,
char *name,
acpi_walk_resource_callback user_function, void *context);
acpi_status
-acpi_set_current_resources(acpi_handle device_handle,
- struct acpi_buffer *in_buffer);
+acpi_set_current_resources(acpi_handle device, struct acpi_buffer *in_buffer);
acpi_status
-acpi_get_irq_routing_table(acpi_handle bus_device_handle,
- struct acpi_buffer *ret_buffer);
+acpi_get_irq_routing_table(acpi_handle device, struct acpi_buffer *ret_buffer);
acpi_status
acpi_resource_to_address64(struct acpi_resource *resource,
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 5b02e307bff3..95f4d0ef4819 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -69,6 +69,7 @@
#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
+#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
@@ -679,6 +680,32 @@ struct acpi_mcfg_allocation {
/*******************************************************************************
*
+ * MCHI - Management Controller Host Interface Table
+ * Version 1
+ *
+ * Conforms to "Management Component Transport Protocol (MCTP) Host
+ * Interface Specification", Revision 1.0.0a, October 13, 2009
+ *
+ ******************************************************************************/
+
+struct acpi_table_mchi {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 interface_type;
+ u8 protocol;
+ u64 protocol_data;
+ u8 interrupt_type;
+ u8 gpe;
+ u8 pci_device_flag;
+ u32 global_interrupt;
+ struct acpi_generic_address control_register;
+ u8 pci_segment;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+};
+
+/*******************************************************************************
+ *
* SPCR - Serial Port Console Redirection table
* Version 1
*
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 3f08e64962f8..bade172cad47 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -663,44 +663,42 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_MAX 0xFF
#define ACPI_NUM_GPE 256
+/* Actions for acpi_set_gpe */
+
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
+/* gpe_types for acpi_enable_gpe and acpi_disable_gpe */
+
+#define ACPI_GPE_TYPE_WAKE (u8) 0x01
+#define ACPI_GPE_TYPE_RUNTIME (u8) 0x02
+#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x03
+
/*
* GPE info flags - Per GPE
- * +-+-+-+---+-+-+-+
- * |7|6|5|4:3|2|1|0|
- * +-+-+-+---+-+-+-+
- * | | | | | | |
- * | | | | | | +--- Interrupt type: Edge or Level Triggered
- * | | | | | +--- GPE can wake the system
- * | | | | +--- Unused
- * | | | +--- Type of dispatch -- to method, handler, or none
- * | | +--- Unused
- * | +--- Unused
- * +--- Unused
+ * +-------+---+-+-+
+ * | 7:4 |3:2|1|0|
+ * +-------+---+-+-+
+ * | | | |
+ * | | | +--- Interrupt type: edge or level triggered
+ * | | +----- GPE can wake the system
+ * | +-------- Type of dispatch:to method, handler, or none
+ * +-------------- <Reserved>
*/
#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01
#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01
#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00
-#define ACPI_GPE_TYPE_MASK (u8) 0x06
-#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06
-#define ACPI_GPE_TYPE_WAKE (u8) 0x02
-#define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */
#define ACPI_GPE_CAN_WAKE (u8) 0x02
-#define ACPI_GPE_DISPATCH_MASK (u8) 0x18
-#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08
-#define ACPI_GPE_DISPATCH_METHOD (u8) 0x10
-#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */
+#define ACPI_GPE_DISPATCH_MASK (u8) 0x0C
+#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x04
+#define ACPI_GPE_DISPATCH_METHOD (u8) 0x08
+#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00
/*
* Flags for GPE and Lock interfaces
*/
-#define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */
-#define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */
-
#define ACPI_NOT_ISR 0x1
#define ACPI_ISR 0x0
@@ -953,7 +951,7 @@ acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
#define ACPI_REGION_DEACTIVATE 1
typedef
-acpi_status(*acpi_walk_callback) (acpi_handle obj_handle,
+acpi_status(*acpi_walk_callback) (acpi_handle object,
u32 nesting_level,
void *context, void **return_value);
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
new file mode 100644
index 000000000000..b3365025ff8d
--- /dev/null
+++ b/include/acpi/apei.h
@@ -0,0 +1,34 @@
+/*
+ * apei.h - ACPI Platform Error Interface
+ */
+
+#ifndef ACPI_APEI_H
+#define ACPI_APEI_H
+
+#include <linux/acpi.h>
+#include <linux/cper.h>
+#include <asm/ioctls.h>
+
+#define APEI_ERST_INVALID_RECORD_ID 0xffffffffffffffffULL
+
+#define APEI_ERST_CLEAR_RECORD _IOW('E', 1, u64)
+#define APEI_ERST_GET_RECORD_COUNT _IOR('E', 2, u32)
+
+#ifdef __KERNEL__
+
+extern int hest_disable;
+extern int erst_disable;
+
+typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
+int apei_hest_parse(apei_hest_func_t func, void *data);
+
+int erst_write(const struct cper_record_header *record);
+ssize_t erst_get_record_count(void);
+int erst_get_next_record_id(u64 *record_id);
+ssize_t erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen);
+ssize_t erst_read_next(struct cper_record_header *record, size_t buflen);
+int erst_clear(u64 record_id);
+
+#endif
+#endif
diff --git a/include/acpi/atomicio.h b/include/acpi/atomicio.h
new file mode 100644
index 000000000000..8b9fb4b0b9ce
--- /dev/null
+++ b/include/acpi/atomicio.h
@@ -0,0 +1,10 @@
+#ifndef ACPI_ATOMIC_IO_H
+#define ACPI_ATOMIC_IO_H
+
+int acpi_pre_map_gar(struct acpi_generic_address *reg);
+int acpi_post_unmap_gar(struct acpi_generic_address *reg);
+
+int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg);
+int acpi_atomic_write(u64 val, struct acpi_generic_address *reg);
+
+#endif
diff --git a/include/acpi/hed.h b/include/acpi/hed.h
new file mode 100644
index 000000000000..46e1249b70cc
--- /dev/null
+++ b/include/acpi/hed.h
@@ -0,0 +1,18 @@
+/*
+ * hed.h - ACPI Hardware Error Device
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef ACPI_HED_H
+#define ACPI_HED_H
+
+#include <linux/notifier.h>
+
+int register_acpi_hed_notifier(struct notifier_block *nb);
+void unregister_acpi_hed_notifier(struct notifier_block *nb);
+
+#endif
diff --git a/include/acpi/video.h b/include/acpi/video.h
index cf7be3dd157b..551793c9b6e8 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -1,12 +1,28 @@
#ifndef __ACPI_VIDEO_H
#define __ACPI_VIDEO_H
+#define ACPI_VIDEO_DISPLAY_CRT 1
+#define ACPI_VIDEO_DISPLAY_TV 2
+#define ACPI_VIDEO_DISPLAY_DVI 3
+#define ACPI_VIDEO_DISPLAY_LCD 4
+
+#define ACPI_VIDEO_DISPLAY_LEGACY_MONITOR 0x0100
+#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110
+#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
+
#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
+extern int acpi_video_get_edid(struct acpi_device *device, int type,
+ int device_id, void **edid);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
+static inline int acpi_video_get_edid(struct acpi_device *device, int type,
+ int device_id, void **edid)
+{
+ return -ENODEV;
+}
#endif
#endif
diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h
new file mode 100644
index 000000000000..6a211f40665c
--- /dev/null
+++ b/include/asm-generic/bitops/arch_hweight.h
@@ -0,0 +1,25 @@
+#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
+
+#include <asm/types.h>
+
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+ return __sw_hweight32(w);
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __sw_hweight16(w);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __sw_hweight8(w);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ return __sw_hweight64(w);
+}
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h
new file mode 100644
index 000000000000..fa2a50b7ee66
--- /dev/null
+++ b/include/asm-generic/bitops/const_hweight.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
+
+/*
+ * Compile time versions of __arch_hweightN()
+ */
+#define __const_hweight8(w) \
+ ( (!!((w) & (1ULL << 0))) + \
+ (!!((w) & (1ULL << 1))) + \
+ (!!((w) & (1ULL << 2))) + \
+ (!!((w) & (1ULL << 3))) + \
+ (!!((w) & (1ULL << 4))) + \
+ (!!((w) & (1ULL << 5))) + \
+ (!!((w) & (1ULL << 6))) + \
+ (!!((w) & (1ULL << 7))) )
+
+#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
+#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
+#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
+
+/*
+ * Generic interface.
+ */
+#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
+#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
+#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
+#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
+
+/*
+ * Interface for known constant arguments
+ */
+#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
+#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
+#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
+#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
+
+/*
+ * Type invariant interface to the compile time constant hweight functions.
+ */
+#define HWEIGHT(w) HWEIGHT64((u64)w)
+
+#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h
index fbbc383771da..a94d6519c7ed 100644
--- a/include/asm-generic/bitops/hweight.h
+++ b/include/asm-generic/bitops/hweight.h
@@ -1,11 +1,7 @@
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
-#include <asm/types.h>
-
-extern unsigned int hweight32(unsigned int w);
-extern unsigned int hweight16(unsigned int w);
-extern unsigned int hweight8(unsigned int w);
-extern unsigned long hweight64(__u64 w);
+#include <asm-generic/bitops/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index fcd268ce0674..e3cbc38bdcc2 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -3,6 +3,14 @@
#include <linux/types.h>
+/*
+ * FMODE_EXEC is 0x20
+ * FMODE_NONOTIFY is 0x1000000
+ * These cannot be used by userspace O_* until internal and external open
+ * flags are split.
+ * -Eric Paris
+ */
+
#define O_ACCMODE 00000003
#define O_RDONLY 00000000
#define O_WRONLY 00000001
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index e5f234a08540..97e807c8c812 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
KMAP_D(16) KM_IRQ_PTE,
KMAP_D(17) KM_NMI,
KMAP_D(18) KM_NMI_PTE,
-KMAP_D(19) KM_TYPE_NR
+KMAP_D(19) KM_KDB,
+KMAP_D(20) KM_TYPE_NR
};
#undef KMAP_D
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 04f91c2d3f7b..b5043a9890d8 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void);
#ifndef PER_CPU_BASE_SECTION
#ifdef CONFIG_SMP
-#define PER_CPU_BASE_SECTION ".data.percpu"
+#define PER_CPU_BASE_SECTION ".data..percpu"
#else
#define PER_CPU_BASE_SECTION ".data"
#endif
@@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ""
#else
-#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
-#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
+#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#endif
-#define PER_CPU_FIRST_SECTION ".first"
+#define PER_CPU_FIRST_SECTION "..first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_FIRST_SECTION ""
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67e652068e0e..ea3660526e91 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -175,25 +175,25 @@
#define NOSAVE_DATA \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_begin) = .; \
- *(.data.nosave) \
+ *(.data..nosave) \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_end) = .;
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
- *(.data.page_aligned)
+ *(.data..page_aligned)
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
- *(.data.read_mostly)
+ *(.data..read_mostly)
#define CACHELINE_ALIGNED_DATA(align) \
. = ALIGN(align); \
- *(.data.cacheline_aligned)
+ *(.data..cacheline_aligned)
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
- *(.data.init_task)
+ *(.data..init_task)
/*
* Read only Data
@@ -435,7 +435,7 @@
*/
#define INIT_TASK_DATA_SECTION(align) \
. = ALIGN(align); \
- .data.init_task : { \
+ .data..init_task : { \
INIT_TASK_DATA(align) \
}
@@ -499,7 +499,7 @@
#define BSS(bss_align) \
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
- *(.bss.page_aligned) \
+ *(.bss..page_aligned) \
*(.dynbss) \
*(.bss) \
*(COMMON) \
@@ -666,16 +666,16 @@
*/
#define PERCPU_VADDR(vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
- *(.data.percpu.first) \
- *(.data.percpu.page_aligned) \
- *(.data.percpu) \
- *(.data.percpu.shared_aligned) \
+ *(.data..percpu..first) \
+ *(.data..percpu..page_aligned) \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
} phdr \
- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
+ . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
/**
* PERCPU - define output section for percpu area, simple version
@@ -687,18 +687,18 @@
*
* This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
* that __per_cpu_load is defined as a relative symbol against
- * .data.percpu which is required for relocatable x86_32
+ * .data..percpu which is required for relocatable x86_32
* configuration.
*/
#define PERCPU(align) \
. = ALIGN(align); \
- .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
- *(.data.percpu.first) \
- *(.data.percpu.page_aligned) \
- *(.data.percpu) \
- *(.data.percpu.shared_aligned) \
+ *(.data..percpu..first) \
+ *(.data..percpu..page_aligned) \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
}
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 2f3b3a00b7a3..c1b987158dfa 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1428,10 +1428,13 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector);
/* Graphics Execution Manager library functions (drm_gem.c) */
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
+void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
void drm_gem_object_free_unlocked(struct kref *kref);
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
size_t size);
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
void drm_gem_object_handle_free(struct kref *kref);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 1347524a8e30..c560364663a5 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -271,8 +271,6 @@ struct drm_framebuffer {
unsigned int depth;
int bits_per_pixel;
int flags;
- struct fb_info *fbdev;
- u32 pseudo_palette[17];
struct list_head filp_head;
/* if you are using the helper */
void *helper_private;
@@ -369,9 +367,6 @@ struct drm_crtc_funcs {
* @enabled: is this CRTC enabled?
* @x: x position on screen
* @y: y position on screen
- * @desired_mode: new desired mode
- * @desired_x: desired x for desired_mode
- * @desired_y: desired y for desired_mode
* @funcs: CRTC control functions
*
* Each CRTC may have one or more connectors associated with it. This structure
@@ -391,8 +386,6 @@ struct drm_crtc {
struct drm_display_mode mode;
int x, y;
- struct drm_display_mode *desired_mode;
- int desired_x, desired_y;
const struct drm_crtc_funcs *funcs;
/* CRTC gamma size for reporting to userspace */
@@ -521,7 +514,6 @@ struct drm_connector {
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
uint32_t force_encoder_id;
struct drm_encoder *encoder; /* currently active encoder */
- void *fb_helper_private;
};
/**
@@ -548,16 +540,9 @@ struct drm_mode_set {
/**
* struct drm_mode_config_funcs - configure CRTCs for a given screen layout
- * @resize: adjust CRTCs as necessary for the proposed layout
- *
- * Currently only a resize hook is available. DRM will call back into the
- * driver with a new screen width and height. If the driver can't support
- * the proposed size, it can return false. Otherwise it should adjust
- * the CRTC<->connector mappings as needed and update its view of the screen.
*/
struct drm_mode_config_funcs {
struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
- int (*fb_changed)(struct drm_device *dev);
};
struct drm_mode_group {
@@ -590,9 +575,6 @@ struct drm_mode_config {
struct list_head property_list;
- /* in-kernel framebuffers - hung of filp_head in drm_framebuffer */
- struct list_head fb_kernel_list;
-
int min_width, min_height;
int max_width, max_height;
struct drm_mode_config_funcs *funcs;
@@ -666,8 +648,6 @@ extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
-extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
@@ -799,6 +779,10 @@ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool interlaced, int margins);
+extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins, int GTF_M,
+ int GTF_2C, int GTF_K, int GTF_2J);
extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index b29e20168b5f..b1fa0f8cfa60 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -39,7 +39,6 @@
#include <linux/fb.h>
-#include "drm_fb_helper.h"
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
@@ -96,8 +95,6 @@ struct drm_connector_helper_funcs {
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
-extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
-extern bool drm_helper_initial_config(struct drm_device *dev);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
@@ -123,11 +120,10 @@ static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
encoder->helper_private = (void *)funcs;
}
-static inline int drm_connector_helper_add(struct drm_connector *connector,
+static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
connector->helper_private = (void *)funcs;
- return drm_fb_helper_add_connector(connector);
}
extern int drm_helper_resume_force_mode(struct drm_device *dev);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b4209898f115..d33c3e038606 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -201,7 +201,4 @@ struct edid {
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
-/* define the number of Extension EDID block */
-#define DRM_MAX_EDID_EXT_NUM 4
-
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 58c892a2cbfa..9b55a94feada 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -30,17 +30,14 @@
#ifndef DRM_FB_HELPER_H
#define DRM_FB_HELPER_H
+#include <linux/slow-work.h>
+
+struct drm_fb_helper;
+
struct drm_fb_helper_crtc {
uint32_t crtc_id;
struct drm_mode_set mode_set;
-};
-
-
-struct drm_fb_helper_funcs {
- void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
- void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
+ struct drm_display_mode *desired_mode;
};
/* mode specified on the command line */
@@ -57,8 +54,31 @@ struct drm_fb_helper_cmdline_mode {
bool margins;
};
+struct drm_fb_helper_surface_size {
+ u32 fb_width;
+ u32 fb_height;
+ u32 surface_width;
+ u32 surface_height;
+ u32 surface_bpp;
+ u32 surface_depth;
+};
+
+struct drm_fb_helper_funcs {
+ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ int (*fb_probe)(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+ void (*fb_output_status_changed)(struct drm_fb_helper *helper);
+
+};
+
struct drm_fb_helper_connector {
struct drm_fb_helper_cmdline_mode cmdline_mode;
+ struct drm_connector *connector;
};
struct drm_fb_helper {
@@ -67,24 +87,28 @@ struct drm_fb_helper {
struct drm_display_mode *mode;
int crtc_count;
struct drm_fb_helper_crtc *crtc_info;
+ int connector_count;
+ struct drm_fb_helper_connector **connector_info;
struct drm_fb_helper_funcs *funcs;
int conn_limit;
+ struct fb_info *fbdev;
+ u32 pseudo_palette[17];
struct list_head kernel_fb_list;
+
+ struct delayed_slow_work output_status_change_slow_work;
+ bool poll_enabled;
+ /* we got a hotplug but fbdev wasn't running the console
+ delay until next set_par */
+ bool delayed_hotplug;
};
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
- int preferred_bpp,
- int (*fb_create)(struct drm_device *dev,
- uint32_t fb_width,
- uint32_t fb_height,
- uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth,
- uint32_t surface_bpp,
- struct drm_framebuffer **fb_ptr));
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count,
- int max_conn);
-void drm_fb_helper_free(struct drm_fb_helper *helper);
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
+ int preferred_bpp);
+
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn, bool polled);
+void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
@@ -99,13 +123,17 @@ int drm_fb_helper_setcolreg(unsigned regno,
struct fb_info *info);
void drm_fb_helper_restore(void);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth);
-int drm_fb_helper_add_connector(struct drm_connector *connector);
-int drm_fb_helper_parse_command_line(struct drm_device *dev);
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
+bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper,
+ bool polled);
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+
+void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper);
#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 81eb9f45883c..3e273e0b9417 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -66,6 +66,26 @@ struct ttm_placement {
const uint32_t *busy_placement;
};
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr: mapped virtual address
+ * @base: bus base address
+ * @is_iomem: is this io memory ?
+ * @size: size in byte
+ * @offset: offset from the base address
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+ void *addr;
+ unsigned long base;
+ unsigned long size;
+ unsigned long offset;
+ bool is_iomem;
+ bool io_reserved;
+};
+
/**
* struct ttm_mem_reg
@@ -75,6 +95,7 @@ struct ttm_placement {
* @num_pages: Actual size of memory region in pages.
* @page_alignment: Page alignment.
* @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
*
* Structure indicating the placement and space resources used by a
* buffer object.
@@ -87,6 +108,7 @@ struct ttm_mem_reg {
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
+ struct ttm_bus_placement bus;
};
/**
@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj {
ttm_bo_map_kmap = 3,
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
} bo_kmap_type;
+ struct ttm_buffer_object *bo;
};
/**
@@ -313,7 +336,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping.
- * @no_wait: Return immediately if the buffer is busy.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
*
* Changes placement and caching policy of the buffer object
* according proposed placement.
@@ -325,7 +349,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
*/
extern int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait);
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu);
/**
* ttm_bo_unref
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6b9db917e717..0ea602da43e7 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -176,8 +176,6 @@ struct ttm_tt {
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
-#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
- before kernel access. */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
/**
@@ -189,13 +187,6 @@ struct ttm_tt {
* managed by this memory type.
* @gpu_offset: If used, the GPU offset of the first managed page of
* fixed memory or the first managed location in an aperture.
- * @io_offset: The io_offset of the first managed page of IO memory or
- * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
- * memory, this should be set to NULL.
- * @io_size: The size of a managed IO region (fixed memory or aperture).
- * @io_addr: Virtual kernel address if the io region is pre-mapped. For
- * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
- * @io_addr should be set to NULL.
* @size: Size of the managed region.
* @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
* as defined in ttm_placement_common.h
@@ -221,9 +212,6 @@ struct ttm_mem_type_manager {
bool use_type;
uint32_t flags;
unsigned long gpu_offset;
- unsigned long io_offset;
- unsigned long io_size;
- void *io_addr;
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
@@ -311,7 +299,8 @@ struct ttm_bo_driver {
*/
int (*move) (struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait, struct ttm_mem_reg *new_mem);
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
/**
* struct ttm_bo_driver_member verify_access
@@ -351,12 +340,21 @@ struct ttm_bo_driver {
struct ttm_mem_reg *new_mem);
/* notify the driver we are taking a fault on this BO
* and have reserved it */
- void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+ int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
/**
* notify the driver that we're about to swap out this bo
*/
void (*swap_notify) (struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+ void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
};
/**
@@ -633,7 +631,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* @proposed_placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_mem_reg.
* @interruptible: Sleep interruptible when sliping.
- * @no_wait: Don't sleep waiting for space to become available.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
*
* Allocate memory space for the buffer object pointed to by @bo, using
* the placement flags in @mem, potentially evicting other idle buffer objects.
@@ -647,7 +646,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait);
+ bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu);
/**
* ttm_bo_wait_for_cpu
*
@@ -682,6 +682,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
unsigned long *bus_offset,
unsigned long *bus_size);
+extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+
extern void ttm_bo_global_release(struct ttm_global_reference *ref);
extern int ttm_bo_global_init(struct ttm_global_reference *ref);
@@ -798,7 +803,8 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Optimized move function for a buffer object with both old and
@@ -812,15 +818,16 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait,
- struct ttm_mem_reg *new_mem);
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem);
/**
* ttm_bo_move_memcpy
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Fallback move function for a mappable buffer object in mappable memory.
@@ -834,8 +841,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict,
- bool no_wait, struct ttm_mem_reg *new_mem);
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem);
/**
* ttm_bo_free_old_node
@@ -854,7 +861,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* @sync_obj_arg: An argument to pass to the sync object idle / wait
* functions.
* @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Accelerated move function to be called when an accelerated move
@@ -868,7 +876,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
- bool evict, bool no_wait,
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
* ttm_io_prot
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
new file mode 100644
index 000000000000..8bb4de567b2c
--- /dev/null
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ */
+#ifndef TTM_PAGE_ALLOC
+#define TTM_PAGE_ALLOC
+
+#include "ttm_bo_driver.h"
+#include "ttm_memory.h"
+
+/**
+ * Get count number of pages from pool to pages list.
+ *
+ * @pages: heado of empty linked list where pages are filled.
+ * @flags: ttm flags for page allocation.
+ * @cstate: ttm caching state for the page.
+ * @count: number of pages to allocate.
+ */
+int ttm_get_pages(struct list_head *pages,
+ int flags,
+ enum ttm_caching_state cstate,
+ unsigned count);
+/**
+ * Put linked list of pages to pool.
+ *
+ * @pages: list of pages to free.
+ * @page_count: number of pages in the list. Zero can be passed for unknown
+ * count.
+ * @flags: ttm flags for page allocation.
+ * @cstate: ttm caching state.
+ */
+void ttm_put_pages(struct list_head *pages,
+ unsigned page_count,
+ int flags,
+ enum ttm_caching_state cstate);
+/**
+ * Initialize pool allocator.
+ *
+ * Pool allocator is internaly reference counted so it can be initialized
+ * multiple times but ttm_page_alloc_fini has to be called same number of
+ * times.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+#endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index e2ea0b2159cd..ed51075f0544 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -94,6 +94,7 @@ header-y += if_ppp.h
header-y += if_slip.h
header-y += if_strip.h
header-y += if_tun.h
+header-y += if_x25.h
header-y += in_route.h
header-y += ioctl.h
header-y += ip6_tunnel.h
@@ -209,6 +210,7 @@ unifdef-y += ethtool.h
unifdef-y += eventpoll.h
unifdef-y += signalfd.h
unifdef-y += ext2_fs.h
+unifdef-y += fanotify.h
unifdef-y += fb.h
unifdef-y += fcntl.h
unifdef-y += filter.h
@@ -250,6 +252,7 @@ unifdef-y += in.h
unifdef-y += in6.h
unifdef-y += inotify.h
unifdef-y += input.h
+unifdef-y += ioq.h
unifdef-y += ip.h
unifdef-y += ipc.h
unifdef-y += ipmi.h
@@ -339,6 +342,7 @@ unifdef-y += serial_core.h
unifdef-y += serial.h
unifdef-y += serio.h
unifdef-y += shm.h
+unifdef-y += shm_signal.h
unifdef-y += signal.h
unifdef-y += smb_fs.h
unifdef-y += smb.h
@@ -364,6 +368,8 @@ unifdef-y += uio.h
unifdef-y += unistd.h
unifdef-y += usbdevice_fs.h
unifdef-y += utsname.h
+unifdef-y += vbus_pci.h
+unifdef-y += venet.h
unifdef-y += vhost.h
unifdef-y += videodev2.h
unifdef-y += videodev.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b926afe8c03e..2c60f1f70b38 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -116,11 +116,12 @@ extern unsigned long acpi_realmode_flags;
int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
+int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
#ifdef CONFIG_X86_IO_APIC
-extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity);
+extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
-#define acpi_get_override_irq(bus, trigger, polarity) (-1)
+#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
@@ -247,6 +248,8 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
int acpi_check_mem_region(resource_size_t start, resource_size_t n,
const char *name);
+int acpi_resources_are_enforced(void);
+
#ifdef CONFIG_PM_SLEEP
void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
new file mode 100644
index 000000000000..f7dd576dd5a4
--- /dev/null
+++ b/include/linux/ahci_platform.h
@@ -0,0 +1,29 @@
+/*
+ * AHCI SATA platform driver
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#ifndef _AHCI_PLATFORM_H
+#define _AHCI_PLATFORM_H
+
+struct device;
+struct ata_port_info;
+
+struct ahci_platform_data {
+ int (*init)(struct device *dev);
+ void (*exit)(struct device *dev);
+ const struct ata_port_info *ata_port_info;
+ unsigned int force_port_map;
+ unsigned int mask_port_map;
+};
+
+#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/amba/dma.h b/include/linux/amba/dma.h
new file mode 100644
index 000000000000..382ef01eb98a
--- /dev/null
+++ b/include/linux/amba/dma.h
@@ -0,0 +1,49 @@
+/*
+ * linux/include/amba/dma.h
+ *
+ * Copyright (C) 2010 ST-Ericsson AB
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#if !defined(AMBA_DMA_H) && defined(CONFIG_DMADEVICES)
+#define AMBA_DMA_H
+
+#include <linux/dma-mapping.h>
+struct dma_chan;
+
+/**
+ * struct amba_dma_channel_config - this struct is passed in as
+ * configuration data to a DMA engine in order to set up a certain
+ * channel for DMA transport. Anything the DMA engine needs to
+ * know about the PrimeCell shall be passed through this struct.
+ * The DMA engine has to provide an additional function:
+ * dma_set_ambaconfig() in order for it to work with PrimeCells.
+ * @addr: this is the physical address where DMA data should be
+ * read (RX) or written (TX)
+ * @addr_width: this is the width of the source (RX) or target
+ * (TX) register where DMA data shall be read/written, in bytes.
+ * legal values: 1, 2, 4, 8.
+ * @direction: whether the data goes in or out on this channel,
+ * right now.
+ * @maxburst: the maximum number of words (note: words, not bytes)
+ * that can be sent in one burst to the device. Typically something
+ * like half the FIFO depth on I/O peripherals so you don't
+ * overflow it.
+ */
+struct amba_dma_channel_config {
+ dma_addr_t addr;
+ u8 addr_width:4;
+ enum dma_data_direction direction;
+ int maxburst;
+};
+
+/*
+ * The following is an extension to the DMA engine framework
+ * that is needed to use the engine with PrimeCells.
+ */
+void dma_set_ambaconfig(struct dma_chan *chan,
+ struct amba_dma_channel_config *config);
+
+#endif /* AMBA_DMA_H */
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 6b4241748dda..7e466fe72025 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -6,8 +6,29 @@
#include <linux/mmc/host.h>
+/**
+ * struct mmci_platform_data - platform configuration for the MMCI
+ * (also known as PL180) block.
+ * @f_max: the maximum operational frequency for this host in this
+ * platform configuration. When this is specified it takes precedence
+ * over the module parameter for the same frequency.
+ * @ocr_mask: available voltages on the 4 pins from the block, this
+ * is ignored if a regulator is used, see the MMC_VDD_* masks in
+ * mmc/host.h
+ * @translate_vdd: a callback function to translate a MMC_VDD_*
+ * mask into a value to be binary or:ed and written into the
+ * MMCIPWR register of the block
+ * @status: if no GPIO read function was given to the block in
+ * gpio_wp (below) this function will be called to determine
+ * whether a card is present in the MMC slot or not
+ * @gpio_wp: read this GPIO pin to see if the card is write protected
+ * @gpio_cd: read this GPIO pin to detect card insertion
+ * @capabilities: the capabilities of the block as implemented in
+ * this platform, signify anything MMC_CAP_* from mmc/host.h
+ */
struct mmci_platform_data {
- unsigned int ocr_mask; /* available voltages */
+ unsigned int f_max;
+ unsigned int ocr_mask;
u32 (*translate_vdd)(struct device *, unsigned int);
unsigned int (*status)(struct device *);
int gpio_wp;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index bd0e3c6f323f..7534979d83bd 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
+#include <linux/timer.h>
#include <linux/writeback.h>
#include <asm/atomic.h>
@@ -88,6 +89,8 @@ struct backing_dev_info {
struct device *dev;
+ struct timer_list laptop_mode_wb_timer;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
struct dentry *debug_stats;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index b796eab5ca75..fc68053378ce 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -10,6 +10,11 @@
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#endif
+extern unsigned int __sw_hweight8(unsigned int w);
+extern unsigned int __sw_hweight16(unsigned int w);
+extern unsigned int __sw_hweight32(unsigned int w);
+extern unsigned long __sw_hweight64(__u64 w);
+
/*
* Include this here because some architectures need generic_ffs/fls in
* scope
@@ -44,31 +49,6 @@ static inline unsigned long hweight_long(unsigned long w)
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
-/*
- * Clearly slow versions of the hweightN() functions, their benefit is
- * of course compile time evaluation of constant arguments.
- */
-#define HWEIGHT8(w) \
- ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
- (!!((w) & (1ULL << 0))) + \
- (!!((w) & (1ULL << 1))) + \
- (!!((w) & (1ULL << 2))) + \
- (!!((w) & (1ULL << 3))) + \
- (!!((w) & (1ULL << 4))) + \
- (!!((w) & (1ULL << 5))) + \
- (!!((w) & (1ULL << 6))) + \
- (!!((w) & (1ULL << 7))) )
-
-#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
-#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
-#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
-
-/*
- * Type invariant version that simply casts things to the
- * largest type.
- */
-#define HWEIGHT(w) HWEIGHT64((u64)(w))
-
/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6690e8bae7bb..346fd4856733 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -186,15 +186,19 @@ struct request {
};
/*
- * two pointers are available for the IO schedulers, if they need
+ * Three pointers are available for the IO schedulers, if they need
* more they have to dynamically allocate it.
*/
void *elevator_private;
void *elevator_private2;
+ void *elevator_private3;
struct gendisk *rq_disk;
unsigned long start_time;
-
+#ifdef CONFIG_BLK_CGROUP
+ unsigned long long start_time_ns;
+ unsigned long long io_start_time_ns; /* when passed to hardware */
+#endif
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
*/
@@ -917,7 +921,12 @@ extern void blk_abort_queue(struct request_queue *);
*/
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
+extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
+ request_fn_proc *,
+ spinlock_t *, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
+ request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
@@ -994,20 +1003,25 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return NULL;
return bqt->tag_index[tag];
}
-
-extern int blkdev_issue_flush(struct block_device *, sector_t *);
-#define DISCARD_FL_WAIT 0x01 /* wait for completion */
-#define DISCARD_FL_BARRIER 0x02 /* issue DISCARD_BARRIER request */
-extern int blkdev_issue_discard(struct block_device *, sector_t sector,
- sector_t nr_sects, gfp_t, int flags);
-
+enum{
+ BLKDEV_WAIT, /* wait for completion */
+ BLKDEV_BARRIER, /*issue request with barrier */
+};
+#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
+#define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER)
+extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
+ unsigned long);
+extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
static inline int sb_issue_discard(struct super_block *sb,
sector_t block, sector_t nr_blocks)
{
block <<= (sb->s_blocksize_bits - 9);
nr_blocks <<= (sb->s_blocksize_bits - 9);
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL,
- DISCARD_FL_BARRIER);
+ BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
}
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1196,6 +1210,39 @@ static inline void put_dev_sector(Sector p)
struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
+#ifdef CONFIG_BLK_CGROUP
+static inline void set_start_time_ns(struct request *req)
+{
+ req->start_time_ns = sched_clock();
+}
+
+static inline void set_io_start_time_ns(struct request *req)
+{
+ req->io_start_time_ns = sched_clock();
+}
+
+static inline uint64_t rq_start_time_ns(struct request *req)
+{
+ return req->start_time_ns;
+}
+
+static inline uint64_t rq_io_start_time_ns(struct request *req)
+{
+ return req->io_start_time_ns;
+}
+#else
+static inline void set_start_time_ns(struct request *req) {}
+static inline void set_io_start_time_ns(struct request *req) {}
+static inline uint64_t rq_start_time_ns(struct request *req)
+{
+ return 0;
+}
+static inline uint64_t rq_io_start_time_ns(struct request *req)
+{
+ return 0;
+}
+#endif
+
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 97e24881c4c6..4c570653ab84 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -31,7 +31,7 @@
#ifndef __cacheline_aligned
#define __cacheline_aligned \
__attribute__((__aligned__(SMP_CACHE_BYTES), \
- __section__(".data.cacheline_aligned")))
+ __section__(".data..cacheline_aligned")))
#endif /* __cacheline_aligned */
#ifndef __cacheline_aligned_in_smp
diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h
new file mode 100644
index 000000000000..2a61eb1beb85
--- /dev/null
+++ b/include/linux/caif/caif_socket.h
@@ -0,0 +1,165 @@
+/* linux/caif_socket.h
+ * CAIF Definitions for CAIF socket and network layer
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _LINUX_CAIF_SOCKET_H
+#define _LINUX_CAIF_SOCKET_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+#include <linux/socket.h>
+#else
+#include <sys/socket.h>
+#endif
+
+/**
+ * enum caif_link_selector - Physical Link Selection.
+ * @CAIF_LINK_HIGH_BANDW: Physical interface for high-bandwidth
+ * traffic.
+ * @CAIF_LINK_LOW_LATENCY: Physical interface for low-latency
+ * traffic.
+ *
+ * CAIF Link Layers can register their link properties.
+ * This enum is used for choosing between CAIF Link Layers when
+ * setting up CAIF Channels when multiple CAIF Link Layers exists.
+ */
+enum caif_link_selector {
+ CAIF_LINK_HIGH_BANDW,
+ CAIF_LINK_LOW_LATENCY
+};
+
+/**
+ * enum caif_channel_priority - CAIF channel priorities.
+ *
+ * @CAIF_PRIO_MIN: Min priority for a channel.
+ * @CAIF_PRIO_LOW: Low-priority channel.
+ * @CAIF_PRIO_NORMAL: Normal/default priority level.
+ * @CAIF_PRIO_HIGH: High priority level
+ * @CAIF_PRIO_MAX: Max priority for channel
+ *
+ * Priority can be set on CAIF Channels in order to
+ * prioritize between traffic on different CAIF Channels.
+ * These priority levels are recommended, but the priority value
+ * is not restricted to the values defined in this enum, any value
+ * between CAIF_PRIO_MIN and CAIF_PRIO_MAX could be used.
+ */
+enum caif_channel_priority {
+ CAIF_PRIO_MIN = 0x01,
+ CAIF_PRIO_LOW = 0x04,
+ CAIF_PRIO_NORMAL = 0x0f,
+ CAIF_PRIO_HIGH = 0x14,
+ CAIF_PRIO_MAX = 0x1F
+};
+
+/**
+ * enum caif_protocol_type - CAIF Channel type.
+ * @CAIFPROTO_AT: Classic AT channel.
+ * @CAIFPROTO_DATAGRAM: Datagram channel.
+ * @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing.
+ * @CAIFPROTO_UTIL: Utility (Psock) channel.
+ * @CAIFPROTO_RFM: Remote File Manager
+ *
+ * This enum defines the CAIF Channel type to be used. This defines
+ * the service to connect to on the modem.
+ */
+enum caif_protocol_type {
+ CAIFPROTO_AT,
+ CAIFPROTO_DATAGRAM,
+ CAIFPROTO_DATAGRAM_LOOP,
+ CAIFPROTO_UTIL,
+ CAIFPROTO_RFM,
+ _CAIFPROTO_MAX
+};
+#define CAIFPROTO_MAX _CAIFPROTO_MAX
+
+/**
+ * enum caif_at_type - AT Service Endpoint
+ * @CAIF_ATTYPE_PLAIN: Connects to a plain vanilla AT channel.
+ */
+enum caif_at_type {
+ CAIF_ATTYPE_PLAIN = 2
+};
+
+/**
+ * struct sockaddr_caif - the sockaddr structure for CAIF sockets.
+ * @family: Address family number, must be AF_CAIF.
+ * @u: Union of address data 'switched' by family.
+ * :
+ * @u.at: Applies when family = CAIFPROTO_AT.
+ *
+ * @u.at.type: Type of AT link to set up (enum caif_at_type).
+ *
+ * @u.util: Applies when family = CAIFPROTO_UTIL
+ *
+ * @u.util.service: Utility service name.
+ *
+ * @u.dgm: Applies when family = CAIFPROTO_DATAGRAM
+ *
+ * @u.dgm.connection_id: Datagram connection id.
+ *
+ * @u.dgm.nsapi: NSAPI of the PDP-Context.
+ *
+ * @u.rfm: Applies when family = CAIFPROTO_RFM
+ *
+ * @u.rfm.connection_id: Connection ID for RFM.
+ *
+ * @u.rfm.volume: Volume to mount.
+ *
+ * Description:
+ * This structure holds the connect parameters used for setting up a
+ * CAIF Channel. It defines the service to connect to on the modem.
+ */
+struct sockaddr_caif {
+ sa_family_t family;
+ union {
+ struct {
+ __u8 type; /* type: enum caif_at_type */
+ } at; /* CAIFPROTO_AT */
+ struct {
+ char service[16];
+ } util; /* CAIFPROTO_UTIL */
+ union {
+ __u32 connection_id;
+ __u8 nsapi;
+ } dgm; /* CAIFPROTO_DATAGRAM(_LOOP)*/
+ struct {
+ __u32 connection_id;
+ char volume[16];
+ } rfm; /* CAIFPROTO_RFM */
+ } u;
+};
+
+/**
+ * enum caif_socket_opts - CAIF option values for getsockopt and setsockopt.
+ *
+ * @CAIFSO_LINK_SELECT: Selector used if multiple CAIF Link layers are
+ * available. Either a high bandwidth
+ * link can be selected (CAIF_LINK_HIGH_BANDW) or
+ * or a low latency link (CAIF_LINK_LOW_LATENCY).
+ * This option is of type __u32.
+ * Alternatively SO_BINDTODEVICE can be used.
+ *
+ * @CAIFSO_REQ_PARAM: Used to set the request parameters for a
+ * utility channel. (maximum 256 bytes). This
+ * option must be set before connecting.
+ *
+ * @CAIFSO_RSP_PARAM: Gets the response parameters for a utility
+ * channel. (maximum 256 bytes). This option
+ * is valid after a successful connect.
+ *
+ *
+ * This enum defines the CAIF Socket options to be used on a socket
+ * of type PF_CAIF.
+ *
+ */
+enum caif_socket_opts {
+ CAIFSO_LINK_SELECT = 127,
+ CAIFSO_REQ_PARAM = 128,
+ CAIFSO_RSP_PARAM = 129,
+};
+
+#endif /* _LINUX_CAIF_SOCKET_H */
diff --git a/include/linux/caif/if_caif.h b/include/linux/caif/if_caif.h
new file mode 100644
index 000000000000..5e7eed4edf51
--- /dev/null
+++ b/include/linux/caif/if_caif.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef IF_CAIF_H_
+#define IF_CAIF_H_
+#include <linux/sockios.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+
+/**
+ * enum ifla_caif - CAIF NetlinkRT parameters.
+ * @IFLA_CAIF_IPV4_CONNID: Connection ID for IPv4 PDP Context.
+ * The type of attribute is NLA_U32.
+ * @IFLA_CAIF_IPV6_CONNID: Connection ID for IPv6 PDP Context.
+ * The type of attribute is NLA_U32.
+ * @IFLA_CAIF_LOOPBACK: If different from zero, device is doing loopback
+ * The type of attribute is NLA_U8.
+ *
+ * When using RT Netlink to create, destroy or configure a CAIF IP interface,
+ * enum ifla_caif is used to specify the configuration attributes.
+ */
+enum ifla_caif {
+ __IFLA_CAIF_UNSPEC,
+ IFLA_CAIF_IPV4_CONNID,
+ IFLA_CAIF_IPV6_CONNID,
+ IFLA_CAIF_LOOPBACK,
+ __IFLA_CAIF_MAX
+};
+#define IFLA_CAIF_MAX (__IFLA_CAIF_MAX-1)
+
+#endif /*IF_CAIF_H_*/
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 6e5a7f00223d..cc0bb4961669 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -14,6 +14,7 @@
#ifndef CAN_DEV_H
#define CAN_DEV_H
+#include <linux/can.h>
#include <linux/can/netlink.h>
#include <linux/can/error.h>
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
index 1448177d86d5..dba28268e651 100644
--- a/include/linux/can/platform/mcp251x.h
+++ b/include/linux/can/platform/mcp251x.h
@@ -26,8 +26,8 @@
struct mcp251x_platform_data {
unsigned long oscillator_frequency;
int model;
-#define CAN_MCP251X_MCP2510 0
-#define CAN_MCP251X_MCP2515 1
+#define CAN_MCP251X_MCP2510 0x2510
+#define CAN_MCP251X_MCP2515 0x2515
int (*board_specific_setup)(struct spi_device *spi);
int (*transceiver_enable)(int enable);
int (*power_enable) (int enable);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 4bca8b60cdf7..5ea3c60c160c 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -273,7 +273,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
}
-/* used to install a new clocksource */
extern int clocksource_register(struct clocksource*);
extern void clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
@@ -287,6 +286,24 @@ extern void clocksource_mark_unstable(struct clocksource *cs);
extern void
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
+/*
+ * Don't call __clocksource_register_scale directly, use
+ * clocksource_register_hz/khz
+ */
+extern int
+__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
+
+static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
+{
+ return __clocksource_register_scale(cs, 1, hz);
+}
+
+static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
+{
+ return __clocksource_register_scale(cs, 1000, khz);
+}
+
+
static inline void
clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
{
diff --git a/include/linux/cper.h b/include/linux/cper.h
new file mode 100644
index 000000000000..4b38f905b705
--- /dev/null
+++ b/include/linux/cper.h
@@ -0,0 +1,314 @@
+/*
+ * UEFI Common Platform Error Record
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef LINUX_CPER_H
+#define LINUX_CPER_H
+
+#include <linux/uuid.h>
+
+/* CPER record signature and the size */
+#define CPER_SIG_RECORD "CPER"
+#define CPER_SIG_SIZE 4
+/* Used in signature_end field in struct cper_record_header */
+#define CPER_SIG_END 0xffffffff
+
+/*
+ * CPER record header revision, used in revision field in struct
+ * cper_record_header
+ */
+#define CPER_RECORD_REV 0x0100
+
+/*
+ * Severity difinition for error_severity in struct cper_record_header
+ * and section_severity in struct cper_section_descriptor
+ */
+#define CPER_SER_RECOVERABLE 0x0
+#define CPER_SER_FATAL 0x1
+#define CPER_SER_CORRECTED 0x2
+#define CPER_SER_INFORMATIONAL 0x3
+
+/*
+ * Validation bits difinition for validation_bits in struct
+ * cper_record_header. If set, corresponding fields in struct
+ * cper_record_header contain valid information.
+ *
+ * corresponds platform_id
+ */
+#define CPER_VALID_PLATFORM_ID 0x0001
+/* corresponds timestamp */
+#define CPER_VALID_TIMESTAMP 0x0002
+/* corresponds partition_id */
+#define CPER_VALID_PARTITION_ID 0x0004
+
+/*
+ * Notification type used to generate error record, used in
+ * notification_type in struct cper_record_header
+ *
+ * Corrected Machine Check
+ */
+#define CPER_NOTIFY_CMC \
+ UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
+/* Corrected Platform Error */
+#define CPER_NOTIFY_CPE \
+ UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
+ 0xF2, 0x7E, 0xBE, 0xEE)
+/* Machine Check Exception */
+#define CPER_NOTIFY_MCE \
+ UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
+/* PCI Express Error */
+#define CPER_NOTIFY_PCIE \
+ UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
+ 0xAF, 0x67, 0xC1, 0x04)
+/* INIT Record (for IPF) */
+#define CPER_NOTIFY_INIT \
+ UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
+ 0xD3, 0x9B, 0xC9, 0x8E)
+/* Non-Maskable Interrupt */
+#define CPER_NOTIFY_NMI \
+ UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
+ 0x85, 0xD6, 0xE9, 0x8A)
+/* BOOT Error Record */
+#define CPER_NOTIFY_BOOT \
+ UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
+/* DMA Remapping Error */
+#define CPER_NOTIFY_DMAR \
+ UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
+ 0x72, 0x2D, 0xEB, 0x41)
+
+/*
+ * Flags bits definitions for flags in struct cper_record_header
+ * If set, the error has been recovered
+ */
+#define CPER_HW_ERROR_FLAGS_RECOVERED 0x1
+/* If set, the error is for previous boot */
+#define CPER_HW_ERROR_FLAGS_PREVERR 0x2
+/* If set, the error is injected for testing */
+#define CPER_HW_ERROR_FLAGS_SIMULATED 0x4
+
+/*
+ * CPER section header revision, used in revision field in struct
+ * cper_section_descriptor
+ */
+#define CPER_SEC_REV 0x0100
+
+/*
+ * Validation bits difinition for validation_bits in struct
+ * cper_section_descriptor. If set, corresponding fields in struct
+ * cper_section_descriptor contain valid information.
+ *
+ * corresponds fru_id
+ */
+#define CPER_SEC_VALID_FRU_ID 0x1
+/* corresponds fru_text */
+#define CPER_SEC_VALID_FRU_TEXT 0x2
+
+/*
+ * Flags bits definitions for flags in struct cper_section_descriptor
+ *
+ * If set, the section is associated with the error condition
+ * directly, and should be focused on
+ */
+#define CPER_SEC_PRIMARY 0x0001
+/*
+ * If set, the error was not contained within the processor or memory
+ * hierarchy and the error may have propagated to persistent storage
+ * or network
+ */
+#define CPER_SEC_CONTAINMENT_WARNING 0x0002
+/* If set, the component must be re-initialized or re-enabled prior to use */
+#define CPER_SEC_RESET 0x0004
+/* If set, Linux may choose to discontinue use of the resource */
+#define CPER_SEC_ERROR_THRESHOLD_EXCEEDED 0x0008
+/*
+ * If set, resource could not be queried for error information due to
+ * conflicts with other system software or resources. Some fields of
+ * the section will be invalid
+ */
+#define CPER_SEC_RESOURCE_NOT_ACCESSIBLE 0x0010
+/*
+ * If set, action has been taken to ensure error containment (such as
+ * poisoning data), but the error has not been fully corrected and the
+ * data has not been consumed. Linux may choose to take further
+ * corrective action before the data is consumed
+ */
+#define CPER_SEC_LATENT_ERROR 0x0020
+
+/*
+ * Section type definitions, used in section_type field in struct
+ * cper_section_descriptor
+ *
+ * Processor Generic
+ */
+#define CPER_SEC_PROC_GENERIC \
+ UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
+ 0x93, 0xC4, 0xF3, 0xDB)
+/* Processor Specific: X86/X86_64 */
+#define CPER_SEC_PROC_IA \
+ UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
+/* Processor Specific: IA64 */
+#define CPER_SEC_PROC_IPF \
+ UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
+ 0x80, 0xC7, 0x3C, 0x88, 0x81)
+/* Platform Memory */
+#define CPER_SEC_PLATFORM_MEM \
+ UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
+ 0xED, 0x7C, 0x83, 0xB1)
+#define CPER_SEC_PCIE \
+ UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
+ 0xCB, 0x3C, 0x6F, 0x35)
+/* Firmware Error Record Reference */
+#define CPER_SEC_FW_ERR_REC_REF \
+ UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
+ 0x9C, 0x8E, 0x69, 0xED)
+/* PCI/PCI-X Bus */
+#define CPER_SEC_PCI_X_BUS \
+ UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
+ 0xD3, 0xF9, 0xC9, 0xDD)
+/* PCI Component/Device */
+#define CPER_SEC_PCI_DEV \
+ UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
+ 0x8B, 0x00, 0x13, 0x26)
+#define CPER_SEC_DMAR_GENERIC \
+ UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
+ 0xDE, 0x3E, 0x2C, 0x64)
+/* Intel VT for Directed I/O specific DMAr */
+#define CPER_SEC_DMAR_VT \
+ UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
+ 0xDD, 0x93, 0xE8, 0xCF)
+/* IOMMU specific DMAr */
+#define CPER_SEC_DMAR_IOMMU \
+ UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
+ 0xDF, 0xAA, 0x84, 0xEC)
+
+/*
+ * All tables and structs must be byte-packed to match CPER
+ * specification, since the tables are provided by the system BIOS
+ */
+#pragma pack(1)
+
+struct cper_record_header {
+ char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */
+ __u16 revision; /* must be CPER_RECORD_REV */
+ __u32 signature_end; /* must be CPER_SIG_END */
+ __u16 section_count;
+ __u32 error_severity;
+ __u32 validation_bits;
+ __u32 record_length;
+ __u64 timestamp;
+ uuid_le platform_id;
+ uuid_le partition_id;
+ uuid_le creator_id;
+ uuid_le notification_type;
+ __u64 record_id;
+ __u32 flags;
+ __u64 persistence_information;
+ __u8 reserved[12]; /* must be zero */
+};
+
+struct cper_section_descriptor {
+ __u32 section_offset; /* Offset in bytes of the
+ * section body from the base
+ * of the record header */
+ __u32 section_length;
+ __u16 revision; /* must be CPER_RECORD_REV */
+ __u8 validation_bits;
+ __u8 reserved; /* must be zero */
+ __u32 flags;
+ uuid_le section_type;
+ uuid_le fru_id;
+ __u32 section_severity;
+ __u8 fru_text[20];
+};
+
+/* Generic Processor Error Section */
+struct cper_sec_proc_generic {
+ __u64 validation_bits;
+ __u8 proc_type;
+ __u8 proc_isa;
+ __u8 proc_error_type;
+ __u8 operation;
+ __u8 flags;
+ __u8 level;
+ __u16 reserved;
+ __u64 cpu_version;
+ char cpu_brand[128];
+ __u64 proc_id;
+ __u64 target_addr;
+ __u64 requestor_id;
+ __u64 responder_id;
+ __u64 ip;
+};
+
+/* IA32/X64 Processor Error Section */
+struct cper_sec_proc_ia {
+ __u64 validation_bits;
+ __u8 lapic_id;
+ __u8 cpuid[48];
+};
+
+/* IA32/X64 Processor Error Infomation Structure */
+struct cper_ia_err_info {
+ uuid_le err_type;
+ __u64 validation_bits;
+ __u64 check_info;
+ __u64 target_id;
+ __u64 requestor_id;
+ __u64 responder_id;
+ __u64 ip;
+};
+
+/* IA32/X64 Processor Context Information Structure */
+struct cper_ia_proc_ctx {
+ __u16 reg_ctx_type;
+ __u16 reg_arr_size;
+ __u32 msr_addr;
+ __u64 mm_reg_addr;
+};
+
+/* Memory Error Section */
+struct cper_sec_mem_err {
+ __u64 validation_bits;
+ __u64 error_status;
+ __u64 physical_addr;
+ __u64 physical_addr_mask;
+ __u16 node;
+ __u16 card;
+ __u16 module;
+ __u16 bank;
+ __u16 device;
+ __u16 row;
+ __u16 column;
+ __u16 bit_pos;
+ __u64 requestor_id;
+ __u64 responder_id;
+ __u64 target_id;
+ __u8 error_type;
+};
+
+/* Reset to default packing */
+#pragma pack()
+
+u64 cper_next_record_id(void);
+
+#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4de02b10007f..c3e9de8321c6 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -196,11 +196,6 @@ extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
-int lock_policy_rwsem_read(int cpu);
-int lock_policy_rwsem_write(int cpu);
-void unlock_policy_rwsem_read(int cpu);
-void unlock_policy_rwsem_write(int cpu);
-
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
@@ -278,6 +273,27 @@ struct freq_attr {
ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
};
+#define cpufreq_freq_attr_ro(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define cpufreq_freq_attr_ro_perm(_name, _perm) \
+static struct freq_attr _name = \
+__ATTR(_name, _perm, show_##_name, NULL)
+
+#define cpufreq_freq_attr_ro_old(_name) \
+static struct freq_attr _name##_old = \
+__ATTR(_name, 0444, show_##_name##_old, NULL)
+
+#define cpufreq_freq_attr_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+#define cpufreq_freq_attr_rw_old(_name) \
+static struct freq_attr _name##_old = \
+__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
+
+
struct global_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,
@@ -286,6 +302,15 @@ struct global_attr {
const char *c, size_t count);
};
+#define define_one_global_ro(_name) \
+static struct global_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_global_rw(_name) \
+static struct global_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+
/*********************************************************************
* CPUFREQ 2.6. INTERFACE *
*********************************************************************/
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a5740fc4d04b..a73454aec333 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_locked(struct task_struct *p,
- struct cpumask *mask);
+extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -69,9 +68,6 @@ struct seq_file;
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
-extern void cpuset_lock(void);
-extern void cpuset_unlock(void);
-
extern int cpuset_mem_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
@@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
{
cpumask_copy(mask, cpu_possible_mask);
}
-static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
- struct cpumask *mask)
+
+static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
{
- cpumask_copy(mask, cpu_possible_mask);
+ cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
+ return cpumask_any(cpu_active_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
{
}
-static inline void cpuset_lock(void) {}
-static inline void cpuset_unlock(void) {}
-
static inline int cpuset_mem_spread_node(void)
{
return 0;
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index b7cdbb4373df..8723491f7dfd 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -22,8 +22,6 @@
#include <linux/types.h>
-#define DCB_PROTO_VERSION 1
-
struct dcbmsg {
__u8 dcb_family;
__u8 cmd;
diff --git a/include/linux/device.h b/include/linux/device.h
index 182192892d45..692317073297 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -34,6 +34,7 @@ struct class;
struct class_private;
struct bus_type;
struct bus_type_private;
+struct device_node;
struct bus_attribute {
struct attribute attr;
@@ -128,6 +129,10 @@ struct device_driver {
bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
+#if defined(CONFIG_OF)
+ const struct of_device_id *of_match_table;
+#endif
+
int (*probe) (struct device *dev);
int (*remove) (struct device *dev);
void (*shutdown) (struct device *dev);
@@ -433,6 +438,9 @@ struct device {
override */
/* arch specific additions */
struct dev_archdata archdata;
+#ifdef CONFIG_OF
+ struct device_node *of_node;
+#endif
dev_t devt; /* dev_t, creates the sysfs "dev" */
@@ -451,6 +459,10 @@ struct device {
static inline const char *dev_name(const struct device *dev)
{
+ /* Use the init name until the kobject becomes available */
+ if (dev->init_name)
+ return dev->init_name;
+
return kobject_name(&dev->kobj);
}
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 20ea12c86fd0..50b7b3e0d572 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -40,11 +40,13 @@ typedef s32 dma_cookie_t;
* enum dma_status - DMA transaction status
* @DMA_SUCCESS: transaction completed successfully
* @DMA_IN_PROGRESS: transaction not yet processed
+ * @DMA_PAUSED: transaction is paused
* @DMA_ERROR: transaction failed
*/
enum dma_status {
DMA_SUCCESS,
DMA_IN_PROGRESS,
+ DMA_PAUSED,
DMA_ERROR,
};
@@ -107,6 +109,19 @@ enum dma_ctrl_flags {
};
/**
+ * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
+ * on a running channel.
+ * @DMA_TERMINATE_ALL: terminate all ongoing transfers
+ * @DMA_PAUSE: pause ongoing transfers
+ * @DMA_RESUME: resume paused transfer
+ */
+enum dma_ctrl_cmd {
+ DMA_TERMINATE_ALL,
+ DMA_PAUSE,
+ DMA_RESUME,
+};
+
+/**
* enum sum_check_bits - bit position of pq_check_flags
*/
enum sum_check_bits {
@@ -236,6 +251,21 @@ struct dma_async_tx_descriptor {
};
/**
+ * struct dma_tx_state - filled in to report the status of
+ * a transfer.
+ * @last: last completed DMA cookie
+ * @used: last issued DMA cookie (i.e. the one in progress)
+ * @residue: the remaining number of bytes left to transmit
+ * on the selected transfer for states DMA_IN_PROGRESS and
+ * DMA_PAUSED if this is implemented in the driver, else 0
+ */
+struct dma_tx_state {
+ dma_cookie_t last;
+ dma_cookie_t used;
+ u32 residue;
+};
+
+/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -261,8 +291,12 @@ struct dma_async_tx_descriptor {
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
- * @device_terminate_all: terminate all pending operations
- * @device_is_tx_complete: poll for transaction completion
+ * @device_control: manipulate all pending operations on a channel, returns
+ * zero or error code
+ * @device_tx_status: poll for transaction completion, the optional
+ * txstate parameter can be supplied with a pointer to get a
+ * struct with auxilary transfer status information, otherwise the call
+ * will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
*/
struct dma_device {
@@ -313,11 +347,11 @@ struct dma_device {
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags);
- void (*device_terminate_all)(struct dma_chan *chan);
+ int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd);
- enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *last,
- dma_cookie_t *used);
+ enum dma_status (*device_tx_status)(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
};
@@ -558,7 +592,15 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
{
- return chan->device->device_is_tx_complete(chan, cookie, last, used);
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, cookie, &state);
+ if (last)
+ *last = state.last;
+ if (used)
+ *used = state.used;
+ return status;
}
#define dma_async_memcpy_complete(chan, cookie, last, used)\
@@ -586,6 +628,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
return DMA_IN_PROGRESS;
}
+static inline void
+dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
+{
+ if (st) {
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
+ }
+}
+
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
#ifdef CONFIG_DMA_ENGINE
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index ecc06286226d..3290555a52ee 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -28,6 +28,7 @@ struct dnotify_struct {
FS_CREATE | FS_DN_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
+extern int dir_notify_enable;
extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long);
diff --git a/include/linux/dqblk_xfs.h b/include/linux/dqblk_xfs.h
index 527504c11c5e..4389ae72024e 100644
--- a/include/linux/dqblk_xfs.h
+++ b/include/linux/dqblk_xfs.h
@@ -110,6 +110,15 @@ typedef struct fs_disk_quota {
#define FS_DQ_WARNS_MASK (FS_DQ_BWARNS | FS_DQ_IWARNS | FS_DQ_RTBWARNS)
/*
+ * Accounting values. These can only be set for filesystem with
+ * non-transactional quotas that require quotacheck(8) in userspace.
+ */
+#define FS_DQ_BCOUNT (1<<12)
+#define FS_DQ_ICOUNT (1<<13)
+#define FS_DQ_RTBCOUNT (1<<14)
+#define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT)
+
+/*
* Various flags related to quotactl(2). Only relevant to XFS filesystems.
*/
#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */
diff --git a/include/linux/ds2782_battery.h b/include/linux/ds2782_battery.h
new file mode 100644
index 000000000000..b4e281f65c15
--- /dev/null
+++ b/include/linux/ds2782_battery.h
@@ -0,0 +1,8 @@
+#ifndef __LINUX_DS2782_BATTERY_H
+#define __LINUX_DS2782_BATTERY_H
+
+struct ds278x_platform_data {
+ int rsns;
+};
+
+#endif
diff --git a/include/linux/edac_mce.h b/include/linux/edac_mce.h
new file mode 100644
index 000000000000..f974fc035363
--- /dev/null
+++ b/include/linux/edac_mce.h
@@ -0,0 +1,31 @@
+/* Provides edac interface to mcelog events
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2.
+ *
+ * Copyright (c) 2009 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ */
+
+#if defined(CONFIG_EDAC_MCE) || \
+ (defined(CONFIG_EDAC_MCE_MODULE) && defined(MODULE))
+
+#include <asm/mce.h>
+#include <linux/list.h>
+
+struct edac_mce {
+ struct list_head list;
+
+ void *priv;
+ int (*check_error)(void *priv, struct mce *mce);
+};
+
+int edac_mce_register(struct edac_mce *edac_mce);
+void edac_mce_unregister(struct edac_mce *edac_mce);
+int edac_mce_parse(struct mce *mce);
+
+#else
+#define edac_mce_parse(mce) (0)
+#endif
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 1cb3372e65d8..2c958f4fce1e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -14,6 +14,9 @@ typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int
typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
+typedef void (elevator_bio_merged_fn) (struct request_queue *,
+ struct request *, struct bio *);
+
typedef int (elevator_dispatch_fn) (struct request_queue *, int);
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
@@ -36,6 +39,7 @@ struct elevator_ops
elevator_merged_fn *elevator_merged_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
elevator_allow_merge_fn *elevator_allow_merge_fn;
+ elevator_bio_merged_fn *elevator_bio_merged_fn;
elevator_dispatch_fn *elevator_dispatch_fn;
elevator_add_req_fn *elevator_add_req_fn;
@@ -103,6 +107,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *, int);
+extern void elv_bio_merged(struct request_queue *q, struct request *,
+ struct bio *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern int elv_queue_empty(struct request_queue *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index b33f316bb92e..276b40a16835 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -310,6 +310,7 @@ struct ethtool_perm_addr {
enum ethtool_flags {
ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */
ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */
+ ETH_FLAG_RXHASH = (1 << 28),
};
/* The following structures are for supporting RX network flow
@@ -490,12 +491,12 @@ void ethtool_ntuple_flush(struct net_device *dev);
* get_ufo: Report whether UDP fragmentation offload is enabled
* set_ufo: Turn UDP fragmentation offload on or off
* self_test: Run specified self-tests
- * get_strings: Return a set of strings that describe the requested objects
+ * get_strings: Return a set of strings that describe the requested objects
* phys_id: Identify the device
* get_stats: Return statistics about the device
* get_flags: get 32-bit flags bitmap
* set_flags: set 32-bit flags bitmap
- *
+ *
* Description:
*
* get_settings:
@@ -531,14 +532,20 @@ struct ethtool_ops {
int (*nway_reset)(struct net_device *);
u32 (*get_link)(struct net_device *);
int (*get_eeprom_len)(struct net_device *);
- int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
- int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*get_eeprom)(struct net_device *,
+ struct ethtool_eeprom *, u8 *);
+ int (*set_eeprom)(struct net_device *,
+ struct ethtool_eeprom *, u8 *);
int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
- void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
- int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
- void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam*);
- int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam*);
+ void (*get_ringparam)(struct net_device *,
+ struct ethtool_ringparam *);
+ int (*set_ringparam)(struct net_device *,
+ struct ethtool_ringparam *);
+ void (*get_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ int (*set_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
u32 (*get_rx_csum)(struct net_device *);
int (*set_rx_csum)(struct net_device *, u32);
u32 (*get_tx_csum)(struct net_device *);
@@ -550,21 +557,24 @@ struct ethtool_ops {
void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
void (*get_strings)(struct net_device *, u32 stringset, u8 *);
int (*phys_id)(struct net_device *, u32);
- void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);
+ void (*get_ethtool_stats)(struct net_device *,
+ struct ethtool_stats *, u64 *);
int (*begin)(struct net_device *);
void (*complete)(struct net_device *);
- u32 (*get_ufo)(struct net_device *);
- int (*set_ufo)(struct net_device *, u32);
- u32 (*get_flags)(struct net_device *);
- int (*set_flags)(struct net_device *, u32);
- u32 (*get_priv_flags)(struct net_device *);
- int (*set_priv_flags)(struct net_device *, u32);
+ u32 (*get_ufo)(struct net_device *);
+ int (*set_ufo)(struct net_device *, u32);
+ u32 (*get_flags)(struct net_device *);
+ int (*set_flags)(struct net_device *, u32);
+ u32 (*get_priv_flags)(struct net_device *);
+ int (*set_priv_flags)(struct net_device *, u32);
int (*get_sset_count)(struct net_device *, int);
- int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, void *);
+ int (*get_rxnfc)(struct net_device *,
+ struct ethtool_rxnfc *, void *);
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
- int (*flash_device)(struct net_device *, struct ethtool_flash *);
+ int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
- int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *);
+ int (*set_rx_ntuple)(struct net_device *,
+ struct ethtool_rx_ntuple *);
int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
};
#endif /* __KERNEL__ */
@@ -576,29 +586,29 @@ struct ethtool_ops {
#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */
#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */
#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */
-#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
-#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */
+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */
#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */
#define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */
-#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
-#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */
+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */
#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */
#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
-#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
-#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
-#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
-#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
* (ethtool_value) */
#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
* (ethtool_value). */
#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */
#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
-#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
@@ -609,24 +619,24 @@ struct ethtool_ops {
#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */
#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */
#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */
-#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */
-#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */
+#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */
+#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */
-#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
-#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
+#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
+#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */
#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */
-#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */
-#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */
-#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */
-#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */
-#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */
-#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */
-#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
-#define ETHTOOL_RESET 0x00000034 /* Reset hardware */
-#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
-#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
-#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */
+#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */
+#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */
+#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */
+#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */
+#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */
+#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */
+#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
+#define ETHTOOL_RESET 0x00000034 /* Reset hardware */
+#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
+#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
+#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -635,18 +645,18 @@ struct ethtool_ops {
/* Indicates what features are supported by the interface. */
#define SUPPORTED_10baseT_Half (1 << 0)
#define SUPPORTED_10baseT_Full (1 << 1)
-#define SUPPORTED_100baseT_Half (1 << 2)
-#define SUPPORTED_100baseT_Full (1 << 3)
+#define SUPPORTED_100baseT_Half (1 << 2)
+#define SUPPORTED_100baseT_Full (1 << 3)
#define SUPPORTED_1000baseT_Half (1 << 4)
#define SUPPORTED_1000baseT_Full (1 << 5)
#define SUPPORTED_Autoneg (1 << 6)
#define SUPPORTED_TP (1 << 7)
#define SUPPORTED_AUI (1 << 8)
#define SUPPORTED_MII (1 << 9)
-#define SUPPORTED_FIBRE (1 << 10)
+#define SUPPORTED_FIBRE (1 << 10)
#define SUPPORTED_BNC (1 << 11)
#define SUPPORTED_10000baseT_Full (1 << 12)
-#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Pause (1 << 13)
#define SUPPORTED_Asym_Pause (1 << 14)
#define SUPPORTED_2500baseX_Full (1 << 15)
#define SUPPORTED_Backplane (1 << 16)
@@ -656,8 +666,8 @@ struct ethtool_ops {
#define SUPPORTED_10000baseR_FEC (1 << 20)
/* Indicates what features are advertised by the interface. */
-#define ADVERTISED_10baseT_Half (1 << 0)
-#define ADVERTISED_10baseT_Full (1 << 1)
+#define ADVERTISED_10baseT_Half (1 << 0)
+#define ADVERTISED_10baseT_Full (1 << 1)
#define ADVERTISED_100baseT_Half (1 << 2)
#define ADVERTISED_100baseT_Full (1 << 3)
#define ADVERTISED_1000baseT_Half (1 << 4)
@@ -696,12 +706,12 @@ struct ethtool_ops {
#define DUPLEX_FULL 0x01
/* Which connector port. */
-#define PORT_TP 0x00
+#define PORT_TP 0x00
#define PORT_AUI 0x01
#define PORT_MII 0x02
#define PORT_FIBRE 0x03
#define PORT_BNC 0x04
-#define PORT_DA 0x05
+#define PORT_DA 0x05
#define PORT_NONE 0xef
#define PORT_OTHER 0xff
@@ -715,7 +725,7 @@ struct ethtool_ops {
/* Enable or disable autonegotiation. If this is set to enable,
* the forced link modes above are completely ignored.
*/
-#define AUTONEG_DISABLE 0x00
+#define AUTONEG_DISABLE 0x00
#define AUTONEG_ENABLE 0x01
/* Mode MDI or MDI-X */
@@ -746,8 +756,8 @@ struct ethtool_ops {
#define AH_V6_FLOW 0x0b
#define ESP_V6_FLOW 0x0c
#define IP_USER_FLOW 0x0d
-#define IPV4_FLOW 0x10
-#define IPV6_FLOW 0x11
+#define IPV4_FLOW 0x10
+#define IPV6_FLOW 0x11
/* L3-L4 network traffic flow hash options */
#define RXH_L2DA (1 << 1)
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
index 1cdb66367c98..db4d9f586bb6 100644
--- a/include/linux/ext2_fs_sb.h
+++ b/include/linux/ext2_fs_sb.h
@@ -106,6 +106,15 @@ struct ext2_sb_info {
spinlock_t s_rsv_window_lock;
struct rb_root s_rsv_window_root;
struct ext2_reserve_window_node s_rsv_window_head;
+ /*
+ * s_lock protects against concurrent modifications of s_mount_state,
+ * s_blocks_last, s_overhead_last and the content of superblock's
+ * buffer pointed to by sbi->s_es.
+ *
+ * Note: It is used in ext2_show_options() to provide a consistent view
+ * of the mount options.
+ */
+ spinlock_t s_lock;
};
static inline spinlock_t *
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
new file mode 100644
index 000000000000..f0949a57ca9d
--- /dev/null
+++ b/include/linux/fanotify.h
@@ -0,0 +1,105 @@
+#ifndef _LINUX_FANOTIFY_H
+#define _LINUX_FANOTIFY_H
+
+#include <linux/types.h>
+
+/* the following events that user-space can register for */
+#define FAN_ACCESS 0x00000001 /* File was accessed */
+#define FAN_MODIFY 0x00000002 /* File was modified */
+#define FAN_CLOSE_WRITE 0x00000008 /* Unwrittable file closed */
+#define FAN_CLOSE_NOWRITE 0x00000010 /* Writtable file closed */
+#define FAN_OPEN 0x00000020 /* File was opened */
+
+#define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */
+
+/* FIXME currently Q's have no limit.... */
+#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+
+#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
+#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
+
+/* helper events */
+#define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */
+
+/* flags used for fanotify_init() */
+#define FAN_CLOEXEC 0x00000001
+#define FAN_NONBLOCK 0x00000002
+
+#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK)
+
+/* flags used for fanotify_modify_mark() */
+#define FAN_MARK_ADD 0x00000001
+#define FAN_MARK_REMOVE 0x00000002
+#define FAN_MARK_DONT_FOLLOW 0x00000004
+#define FAN_MARK_ONLYDIR 0x00000008
+#define FAN_MARK_MOUNT 0x00000010
+#define FAN_MARK_IGNORED_MASK 0x00000020
+#define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040
+#define FAN_MARK_FLUSH 0x00000080
+
+#define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\
+ FAN_MARK_REMOVE |\
+ FAN_MARK_DONT_FOLLOW |\
+ FAN_MARK_ONLYDIR |\
+ FAN_MARK_MOUNT |\
+ FAN_MARK_IGNORED_MASK |\
+ FAN_MARK_IGNORED_SURV_MODIFY)
+
+/*
+ * All of the events - we build the list by hand so that we can add flags in
+ * the future and not break backward compatibility. Apps will get only the
+ * events that they originally wanted. Be sure to add new events here!
+ */
+#define FAN_ALL_EVENTS (FAN_ACCESS |\
+ FAN_MODIFY |\
+ FAN_CLOSE |\
+ FAN_OPEN)
+
+/*
+ * All events which require a permission response from userspace
+ */
+#define FAN_ALL_PERM_EVENTS (FAN_OPEN_PERM |\
+ FAN_ACCESS_PERM)
+
+#define FAN_ALL_OUTGOING_EVENTS (FAN_ALL_EVENTS |\
+ FAN_ALL_PERM_EVENTS |\
+ FAN_Q_OVERFLOW)
+
+#define FANOTIFY_METADATA_VERSION 1
+
+struct fanotify_event_metadata {
+ __u32 event_len;
+ __u32 vers;
+ __s32 fd;
+ __u64 mask;
+ __s64 pid;
+} __attribute__ ((packed));
+
+struct fanotify_response {
+ __s32 fd;
+ __u32 response;
+} __attribute__ ((packed));
+
+/* Legit userspace responses to a _PERM event */
+#define FAN_ALLOW 0x01
+#define FAN_DENY 0x02
+
+/* Helper functions to deal with fanotify_event_metadata buffers */
+#define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
+
+#define FAN_EVENT_NEXT(meta, len) ((len) -= (meta)->event_len, \
+ (struct fanotify_event_metadata*)(((char *)(meta)) + \
+ (meta)->event_len))
+
+#define FAN_EVENT_OK(meta, len) ((long)(len) >= (long)FAN_EVENT_METADATA_LEN && \
+ (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \
+ (long)(meta)->event_len <= (long)(len))
+
+#ifdef __KERNEL__
+
+struct fanotify_wait {
+ struct fsnotify_event *event;
+ __s32 fd;
+};
+#endif /* __KERNEL__ */
+#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 29a0e3db9f43..151f5d703b7e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -123,7 +123,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define SKF_AD_NLATTR_NEST 16
#define SKF_AD_MARK 20
#define SKF_AD_QUEUE 24
-#define SKF_AD_MAX 28
+#define SKF_AD_HATYPE 28
+#define SKF_AD_MAX 32
#define SKF_NET_OFF (-0x100000)
#define SKF_LL_OFF (-0x200000)
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 4bd94bf5e739..a527d73f9966 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -55,13 +55,11 @@
#define CSR_DESCRIPTOR 0x01
#define CSR_VENDOR 0x03
#define CSR_HARDWARE_VERSION 0x04
-#define CSR_NODE_CAPABILITIES 0x0c
#define CSR_UNIT 0x11
#define CSR_SPECIFIER_ID 0x12
#define CSR_VERSION 0x13
#define CSR_DEPENDENT_INFO 0x14
#define CSR_MODEL 0x17
-#define CSR_INSTANCE 0x18
#define CSR_DIRECTORY_ID 0x20
struct fw_csr_iterator {
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 043811f0d277..53d1e6c4f848 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -12,6 +12,7 @@
struct firmware {
size_t size;
const u8 *data;
+ struct page **pages;
};
struct device;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 44f35aea2f1f..49d367029e1c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -90,6 +90,9 @@ struct inodes_stat_t {
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)0x1000)
+/* File was opened by fanotify and shouldn't generate fanotify events */
+#define FMODE_NONOTIFY ((__force fmode_t)16777216) /* 0x1000000 */
+
/*
* The below are the various read and write types that we support. Some of
* them include behavioral modifiers that send information down to the
@@ -407,9 +410,6 @@ extern int get_max_files(void);
extern int sysctl_nr_open;
extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
-#ifdef CONFIG_DNOTIFY
-extern int dir_notify_enable;
-#endif
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
@@ -651,6 +651,7 @@ struct block_device {
int bd_openers;
struct mutex bd_mutex; /* open/close mutex */
struct list_head bd_inodes;
+ void * bd_claiming;
void * bd_holder;
int bd_holders;
#ifdef CONFIG_SYSFS
@@ -767,12 +768,7 @@ struct inode {
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
- struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */
-#endif
-
-#ifdef CONFIG_INOTIFY
- struct list_head inotify_watches; /* watches on this inode */
- struct mutex inotify_mutex; /* protects the watches list */
+ struct hlist_head i_fsnotify_marks;
#endif
unsigned long i_state;
@@ -1280,10 +1276,12 @@ static inline int lock_may_write(struct inode *inode, loff_t start,
struct fasync_struct {
- int magic;
- int fa_fd;
- struct fasync_struct *fa_next; /* singly linked list */
- struct file *fa_file;
+ spinlock_t fa_lock;
+ int magic;
+ int fa_fd;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+ struct rcu_head fa_rcu;
};
#define FASYNC_MAGIC 0x4601
@@ -1292,8 +1290,6 @@ struct fasync_struct {
extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
-/* only for net: no internal synchronization */
-extern void __kill_fasync(struct fasync_struct *, int, int);
extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
extern int f_setown(struct file *filp, unsigned long arg, int force);
@@ -2362,6 +2358,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
loff_t *ppos, const void *from, size_t available);
+extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count);
extern int simple_fsync(struct file *, struct dentry *, int);
@@ -2465,7 +2463,8 @@ int proc_nr_files(struct ctl_table *table, int write,
int __init get_filesystem_list(char *buf);
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
-#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
+#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
+ (flag & FMODE_NONOTIFY)))
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 01755909ce81..64efda9aae62 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -11,8 +11,6 @@
* (C) Copyright 2005 Robert Love
*/
-#include <linux/dnotify.h>
-#include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
#include <linux/audit.h>
#include <linux/slab.h>
@@ -21,35 +19,51 @@
* fsnotify_d_instantiate - instantiate a dentry for inode
* Called with dcache_lock held.
*/
-static inline void fsnotify_d_instantiate(struct dentry *entry,
- struct inode *inode)
+static inline void fsnotify_d_instantiate(struct dentry *dentry,
+ struct inode *inode)
{
- __fsnotify_d_instantiate(entry, inode);
-
- inotify_d_instantiate(entry, inode);
+ __fsnotify_d_instantiate(dentry, inode);
}
/* Notify this dentry's parent about a child's events. */
-static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
+static inline void fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
- __fsnotify_parent(dentry, mask);
+ if (!dentry)
+ dentry = path->dentry;
+
+ __fsnotify_parent(path, dentry, mask);
+}
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
+/* simple call site for access decisions */
+static inline int fsnotify_perm(struct file *file, int mask)
+{
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
+ __u32 fsnotify_mask;
+
+ if (file->f_mode & FMODE_NONOTIFY)
+ return 0;
+ if (!(mask & (MAY_READ | MAY_OPEN)))
+ return 0;
+ if (mask & MAY_READ)
+ fsnotify_mask = FS_ACCESS_PERM;
+ if (mask & MAY_OPEN)
+ fsnotify_mask = FS_OPEN_PERM;
+
+ return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
/*
- * fsnotify_d_move - entry has been moved
- * Called with dcache_lock and entry->d_lock held.
+ * fsnotify_d_move - dentry has been moved
+ * Called with dcache_lock and dentry->d_lock held.
*/
-static inline void fsnotify_d_move(struct dentry *entry)
+static inline void fsnotify_d_move(struct dentry *dentry)
{
/*
- * On move we need to update entry->d_flags to indicate if the new parent
- * cares about events from this entry.
+ * On move we need to update dentry->d_flags to indicate if the new parent
+ * cares about events from this dentry.
*/
- __fsnotify_update_dcache_flags(entry);
-
- inotify_d_move(entry);
+ __fsnotify_update_dcache_flags(dentry);
}
/*
@@ -57,8 +71,6 @@ static inline void fsnotify_d_move(struct dentry *entry)
*/
static inline void fsnotify_link_count(struct inode *inode)
{
- inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
-
fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
@@ -66,45 +78,31 @@ static inline void fsnotify_link_count(struct inode *inode)
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
- const char *old_name,
+ const unsigned char *old_name,
int isdir, struct inode *target, struct dentry *moved)
{
struct inode *source = moved->d_inode;
- u32 in_cookie = inotify_get_cookie();
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
__u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
- const char *new_name = moved->d_name.name;
+ const unsigned char *new_name = moved->d_name.name;
if (old_dir == new_dir)
old_dir_mask |= FS_DN_RENAME;
if (isdir) {
- isdir = IN_ISDIR;
old_dir_mask |= FS_IN_ISDIR;
new_dir_mask |= FS_IN_ISDIR;
}
- inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name,
- source);
- inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name,
- source);
-
fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
- if (target) {
- inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL);
- inotify_inode_is_dead(target);
-
- /* this is really a link_count change not a removal */
+ if (target)
fsnotify_link_count(target);
- }
- if (source) {
- inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
+ if (source)
fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
- }
audit_inode_child(moved, new_dir);
}
@@ -117,6 +115,14 @@ static inline void fsnotify_inode_delete(struct inode *inode)
}
/*
+ * fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed
+ */
+static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{
+ __fsnotify_vfsmount_delete(mnt);
+}
+
+/*
* fsnotify_nameremove - a filename was removed from a directory
*/
static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
@@ -126,7 +132,7 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
if (isdir)
mask |= FS_IN_ISDIR;
- fsnotify_parent(dentry, mask);
+ fsnotify_parent(NULL, dentry, mask);
}
/*
@@ -134,9 +140,6 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
*/
static inline void fsnotify_inoderemove(struct inode *inode)
{
- inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL);
- inotify_inode_is_dead(inode);
-
fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
__fsnotify_inode_delete(inode);
}
@@ -146,8 +149,6 @@ static inline void fsnotify_inoderemove(struct inode *inode)
*/
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
- inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
- dentry->d_inode);
audit_inode_child(dentry, inode);
fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
@@ -160,8 +161,6 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{
- inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
- inode);
fsnotify_link_count(inode);
audit_inode_child(new_dentry, dir);
@@ -176,7 +175,6 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
__u32 mask = (FS_CREATE | FS_IN_ISDIR);
struct inode *d_inode = dentry->d_inode;
- inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
audit_inode_child(dentry, inode);
fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
@@ -185,52 +183,55 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
/*
* fsnotify_access - file was read
*/
-static inline void fsnotify_access(struct dentry *dentry)
+static inline void fsnotify_access(struct file *file)
{
- struct inode *inode = dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
}
/*
* fsnotify_modify - file was modified
*/
-static inline void fsnotify_modify(struct dentry *dentry)
+static inline void fsnotify_modify(struct file *file)
{
- struct inode *inode = dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
}
/*
* fsnotify_open - file was opened
*/
-static inline void fsnotify_open(struct dentry *dentry)
+static inline void fsnotify_open(struct file *file)
{
- struct inode *inode = dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
}
/*
@@ -238,18 +239,18 @@ static inline void fsnotify_open(struct dentry *dentry)
*/
static inline void fsnotify_close(struct file *file)
{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = file->f_path.dentry->d_inode;
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
- fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
}
/*
@@ -263,9 +264,7 @@ static inline void fsnotify_xattr(struct dentry *dentry)
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
+ fsnotify_parent(NULL, dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
@@ -299,19 +298,18 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
if (mask) {
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
- fsnotify_parent(dentry, mask);
+ fsnotify_parent(NULL, dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
}
-#if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */
+#if defined(CONFIG_FSNOTIFY) /* notify helpers */
/*
* fsnotify_oldname_init - save off the old filename before we change it
*/
-static inline const char *fsnotify_oldname_init(const char *name)
+static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
{
return kstrdup(name, GFP_KERNEL);
}
@@ -319,22 +317,22 @@ static inline const char *fsnotify_oldname_init(const char *name)
/*
* fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
*/
-static inline void fsnotify_oldname_free(const char *old_name)
+static inline void fsnotify_oldname_free(const unsigned char *old_name)
{
kfree(old_name);
}
-#else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */
+#else /* CONFIG_FSNOTIFY */
-static inline const char *fsnotify_oldname_init(const char *name)
+static inline const char *fsnotify_oldname_init(const unsigned char *name)
{
return NULL;
}
-static inline void fsnotify_oldname_free(const char *old_name)
+static inline void fsnotify_oldname_free(const unsigned char *old_name)
{
}
-#endif /* ! CONFIG_INOTIFY */
+#endif /* CONFIG_FSNOTIFY */
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 4d6f47b51189..b0d00fd6bfad 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -41,6 +41,9 @@
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
+#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
+#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
+
#define FS_IN_ISDIR 0x40000000 /* event occurred against dir */
#define FS_IN_ONESHOT 0x80000000 /* only send event once */
@@ -58,13 +61,11 @@
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
FS_DELETE)
-/* listeners that hard code group numbers near the top */
-#define DNOTIFY_GROUP_NUM UINT_MAX
-#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
+#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
struct fsnotify_group;
struct fsnotify_event;
-struct fsnotify_mark_entry;
+struct fsnotify_mark;
struct fsnotify_event_private_data;
/*
@@ -80,10 +81,12 @@ struct fsnotify_event_private_data;
* valid group and inode to use to clean up.
*/
struct fsnotify_ops {
- bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask);
+ bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type);
int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event);
void (*free_group_priv)(struct fsnotify_group *group);
- void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
+ void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event_priv)(struct fsnotify_event_private_data *priv);
};
@@ -96,10 +99,14 @@ struct fsnotify_ops {
struct fsnotify_group {
/*
* global list of all groups receiving events from fsnotify.
- * anchored by fsnotify_groups and protected by either fsnotify_grp_mutex
+ * anchored by fsnotify_inode_groups and protected by either fsnotify_grp_mutex
* or fsnotify_grp_srcu depending on write vs read.
*/
- struct list_head group_list;
+ struct list_head inode_group_list;
+ /*
+ * same as above except anchored by fsnotify_vfsmount_groups
+ */
+ struct list_head vfsmount_group_list;
/*
* Defines all of the event types in which this group is interested.
@@ -119,7 +126,6 @@ struct fsnotify_group {
* closed.
*/
atomic_t refcnt; /* things with interest in this group */
- unsigned int group_num; /* simply prevents accidental group collision */
const struct fsnotify_ops *ops; /* how this group handles things */
@@ -130,15 +136,17 @@ struct fsnotify_group {
unsigned int q_len; /* events on the queue */
unsigned int max_events; /* maximum events allowed on the list */
- /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */
- spinlock_t mark_lock; /* protect mark_entries list */
- atomic_t num_marks; /* 1 for each mark entry and 1 for not being
+ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
+ spinlock_t mark_lock; /* protect marks_list */
+ atomic_t num_marks; /* 1 for each mark and 1 for not being
* past the point of no return when freeing
* a group */
- struct list_head mark_entries; /* all inode mark entries for this group */
+ struct list_head marks_list; /* all inode marks for this group */
+ unsigned int priority; /* order of this group compared to others */
/* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */
- bool on_group_list;
+ bool on_inode_group_list;
+ bool on_vfsmount_group_list;
/* groups can define private fields here or use the void *private */
union {
@@ -152,6 +160,14 @@ struct fsnotify_group {
struct user_struct *user;
} inotify_data;
#endif
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ struct fanotify_group_private_data {
+ /* allows a group to block waiting for a userspace response */
+ struct mutex access_mutex;
+ struct list_head access_list;
+ wait_queue_head_t access_waitq;
+ } fanotify_data;
+#endif
};
};
@@ -210,20 +226,42 @@ struct fsnotify_event {
#define FSNOTIFY_EVENT_NONE 0
#define FSNOTIFY_EVENT_PATH 1
#define FSNOTIFY_EVENT_INODE 2
-#define FSNOTIFY_EVENT_FILE 3
int data_type; /* which of the above union we have */
atomic_t refcnt; /* how many groups still are using/need to send this event */
__u32 mask; /* the type of access, bitwise OR for FS_* event types */
u32 sync_cookie; /* used to corrolate events, namely inotify mv events */
- char *file_name;
+ const unsigned char *file_name;
size_t name_len;
+ struct pid *tgid;
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ __u32 response; /* userspace answer to question */
+#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
struct list_head private_data_list; /* groups can store private data here */
};
/*
- * a mark is simply an entry attached to an in core inode which allows an
+ * Inode specific fields in an fsnotify_mark
+ */
+struct fsnotify_inode_mark {
+ struct inode *inode; /* inode this mark is associated with */
+ struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */
+ struct list_head free_i_list; /* tmp list used when freeing this mark */
+};
+
+/*
+ * Mount point specific fields in an fsnotify_mark
+ */
+struct fsnotify_vfsmount_mark {
+ struct vfsmount *mnt; /* vfsmount this mark is associated with */
+ struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */
+ struct list_head free_m_list; /* tmp list used when freeing this mark */
+};
+
+/*
+ * a mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
@@ -232,19 +270,26 @@ struct fsnotify_event {
* (such as dnotify) will flush these when the open fd is closed and not at
* inode eviction or modification.
*/
-struct fsnotify_mark_entry {
- __u32 mask; /* mask this mark entry is for */
+struct fsnotify_mark {
+ __u32 mask; /* mask this mark is for */
/* we hold ref for each i_list and g_list. also one ref for each 'thing'
* in kernel that found and may be using this mark. */
atomic_t refcnt; /* active things looking at this mark */
- struct inode *inode; /* inode this entry is associated with */
- struct fsnotify_group *group; /* group this mark entry is for */
- struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
- struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */
- spinlock_t lock; /* protect group, inode, and killme */
- struct list_head free_i_list; /* tmp list used when freeing this mark */
+ struct fsnotify_group *group; /* group this mark is for */
+ struct list_head g_list; /* list of marks by group->i_fsnotify_marks */
+ spinlock_t lock; /* protect group and inode */
+ union {
+ struct fsnotify_inode_mark i;
+ struct fsnotify_vfsmount_mark m;
+ };
+ __u32 ignored_mask; /* events types to ignore */
struct list_head free_g_list; /* tmp list used when freeing this mark */
- void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */
+#define FSNOTIFY_MARK_FLAG_INODE 0x01
+#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
+#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
+ unsigned int flags; /* vfsmount or inode mark? */
+ void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
#ifdef CONFIG_FSNOTIFY
@@ -252,10 +297,11 @@ struct fsnotify_mark_entry {
/* called from the vfs helpers */
/* main fsnotify call to send events */
-extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
- const char *name, u32 cookie);
-extern void __fsnotify_parent(struct dentry *dentry, __u32 mask);
+extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const unsigned char *name, u32 cookie);
+extern void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
+extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
@@ -307,12 +353,10 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode
/* must call when a group changes its ->mask */
extern void fsnotify_recalc_global_mask(void);
/* get a reference to an existing or create a new group */
-extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num,
- __u32 mask,
- const struct fsnotify_ops *ops);
+extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
/* run all marks associated with this group and update group->mask */
extern void fsnotify_recalc_group_mask(struct fsnotify_group *group);
-/* drop reference on a group from fsnotify_obtain_group */
+/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
/* take a reference to an event */
@@ -323,8 +367,13 @@ extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struc
struct fsnotify_event *event);
/* attach the event to the group notification queue */
-extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
- struct fsnotify_event_private_data *priv);
+extern int fsnotify_add_notify_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ struct fsnotify_event_private_data *priv,
+ int (*merge)(struct list_head *,
+ struct fsnotify_event *,
+ void **),
+ void **arg);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
@@ -334,38 +383,66 @@ extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group
/* functions used to manipulate the marks attached to inodes */
+/* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */
+extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt);
/* run all marks associated with an inode and update inode->i_fsnotify_mask */
extern void fsnotify_recalc_inode_mask(struct inode *inode);
-extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
+extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark));
/* find (and take a reference) to a mark associated with group and inode */
-extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
+extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
+/* find (and take a reference) to a mark associated with group and vfsmount */
+extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
+/* copy the values from old into new */
+extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
+/* set the ignored_mask of a mark */
+extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
+/* set the mask of a mark (might pin the object into memory */
+extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask);
/* attach the mark to both the group and the inode */
-extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
+extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
+ struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a mark, flag it to be freed when all references are dropped */
-extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
+extern void fsnotify_destroy_mark(struct fsnotify_mark *mark);
+/* run all the marks in a group, and clear all of the vfsmount marks */
+extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
+/* run all the marks in a group, and clear all of the inode marks */
+extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
+/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
+extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
/* run all the marks in a group, and flag them to be freed */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
-extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry);
-extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry);
+extern void fsnotify_get_mark(struct fsnotify_mark *mark);
+extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct list_head *list);
/* put here because inotify does some weird stuff when destroying watches */
extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
- void *data, int data_is, const char *name,
+ void *data, int data_is,
+ const unsigned char *name,
u32 cookie, gfp_t gfp);
+/* fanotify likes to change events after they are on lists... */
+extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event);
+extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
+ struct fsnotify_event *new_event);
+
#else
-static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
- const char *name, u32 cookie)
-{}
+static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const unsigned char *name, u32 cookie)
+{
+ return 0;
+}
-static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask)
+static inline void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{}
static inline void __fsnotify_inode_delete(struct inode *inode)
{}
+static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{}
+
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 01e6adea07ec..41e46330d9be 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,9 +82,13 @@ void clear_ftrace_function(void);
extern void ftrace_stub(unsigned long a0, unsigned long a1);
#else /* !CONFIG_FUNCTION_TRACER */
-# define register_ftrace_function(ops) do { } while (0)
-# define unregister_ftrace_function(ops) do { } while (0)
-# define clear_ftrace_function(ops) do { } while (0)
+/*
+ * (un)register_ftrace_function must be a macro since the ops parameter
+ * must not be evaluated.
+ */
+#define register_ftrace_function(ops) ({ 0; })
+#define unregister_ftrace_function(ops) ({ 0; })
+static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
static inline void ftrace_stop(void) { }
static inline void ftrace_start(void) { }
@@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
#else
-# define skip_trace(ip) ({ 0; })
-# define ftrace_force_update() ({ 0; })
-# define ftrace_set_filter(buf, len, reset) do { } while (0)
-# define ftrace_disable_daemon() do { } while (0)
-# define ftrace_enable_daemon() do { } while (0)
+static inline int skip_trace(unsigned long ip) { return 0; }
+static inline int ftrace_force_update(void) { return 0; }
+static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
+{
+}
+static inline void ftrace_disable_daemon(void) { }
+static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{
@@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled)
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
#else
-# define time_hardirqs_on(a0, a1) do { } while (0)
-# define time_hardirqs_off(a0, a1) do { } while (0)
+ static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
+ static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
#endif
#ifdef CONFIG_PREEMPT_TRACER
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
-# define trace_preempt_on(a0, a1) do { } while (0)
-# define trace_preempt_off(a0, a1) do { } while (0)
+ static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
+ static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
@@ -352,6 +358,10 @@ struct ftrace_graph_ret {
int depth;
};
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* for init task */
@@ -400,10 +410,6 @@ extern char __irqentry_text_end[];
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
-/* Type of the callback handlers for tracing function graph*/
-typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
-typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
-
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc);
@@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void)
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc)
+{
+ return -1;
+}
+static inline void unregister_ftrace_graph(void) { }
+
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
return -1;
@@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
-extern int ftrace_dump_on_oops;
+enum ftrace_dump_mode;
+
+extern enum ftrace_dump_mode ftrace_dump_on_oops;
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION .trace_recursion = 0,
@@ -504,18 +519,6 @@ extern int ftrace_dump_on_oops;
#define INIT_TRACE_RECURSION
#endif
-#ifdef CONFIG_HW_BRANCH_TRACER
-
-void trace_hw_branch(u64 from, u64 to);
-void trace_hw_branch_oops(void);
-
-#else /* CONFIG_HW_BRANCH_TRACER */
-
-static inline void trace_hw_branch(u64 from, u64 to) {}
-static inline void trace_hw_branch_oops(void) {}
-
-#endif /* CONFIG_HW_BRANCH_TRACER */
-
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 39e71b0a3bfd..c082f223e2fe 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -25,6 +25,9 @@ const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
const struct trace_print_flags *symbol_array);
+const char *ftrace_print_hex_seq(struct trace_seq *p,
+ const unsigned char *buf, int len);
+
/*
* The trace entry - the most basic unit of tracing. This is what
* is printed in the end as a single line in the trace output, such as:
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index b834ef6d59fa..61549b26ad6f 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -80,4 +80,12 @@ enum {
#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
+#ifdef __KERNEL__
+
+/* All generic netlink requests are serialized by a global lock. */
+extern void genl_lock(void);
+extern void genl_unlock(void);
+
+#endif /* __KERNEL__ */
+
#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
deleted file mode 100644
index 6a8715431ae4..000000000000
--- a/include/linux/hdpu_features.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/spinlock.h>
-
-struct cpustate_t {
- spinlock_t lock;
- int excl;
- int open_count;
- unsigned char cached_val;
- int inited;
- unsigned long *set_addr;
- unsigned long *clr_addr;
-};
-
-
-#define HDPU_CPUSTATE_NAME "hdpu cpustate"
-#define HDPU_NEXUS_NAME "hdpu nexus"
-
-#define CPUSTATE_KERNEL_MAJOR 0x10
-
-#define CPUSTATE_KERNEL_INIT_DRV 0 /* CPU State Driver Initialized */
-#define CPUSTATE_KERNEL_INIT_PCI 1 /* 64360 PCI Busses Init */
-#define CPUSTATE_KERNEL_INIT_REG 2 /* 64360 Bridge Init */
-#define CPUSTATE_KERNEL_CPU1_KICK 3 /* Boot cpu 1 */
-#define CPUSTATE_KERNEL_CPU1_OK 4 /* Cpu 1 has checked in */
-#define CPUSTATE_KERNEL_OK 5 /* Terminal state */
-#define CPUSTATE_KERNEL_RESET 14 /* Board reset via SW*/
-#define CPUSTATE_KERNEL_HALT 15 /* Board halted via SW*/
diff --git a/include/linux/hid.h b/include/linux/hid.h
index b1344ec4b7fc..895001f7f4b2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -308,11 +308,13 @@ struct hid_item {
#define HID_QUIRK_NOTOUCH 0x00000002
#define HID_QUIRK_IGNORE 0x00000004
#define HID_QUIRK_NOGET 0x00000008
+#define HID_QUIRK_HIDDEV_FORCE 0x00000010
#define HID_QUIRK_BADPAD 0x00000020
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
+#define HID_QUIRK_NO_IGNORE 0x40000000
/*
* This is the global environment of the parser. This information is
@@ -589,6 +591,9 @@ struct hid_usage_id {
* @report_fixup: called before report descriptor parsing (NULL means nop)
* @input_mapping: invoked on input registering before mapping an usage
* @input_mapped: invoked on input registering after mapping an usage
+ * @suspend: invoked on suspend (NULL means nop)
+ * @resume: invoked on resume if device was not reset (NULL means nop)
+ * @reset_resume: invoked on resume if device was reset (NULL means nop)
*
* raw_event and event should return 0 on no action performed, 1 when no
* further processing should be done and negative on error
@@ -629,6 +634,11 @@ struct hid_driver {
int (*input_mapped)(struct hid_device *hdev,
struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max);
+#ifdef CONFIG_PM
+ int (*suspend)(struct hid_device *hdev, pm_message_t message);
+ int (*resume)(struct hid_device *hdev);
+ int (*reset_resume)(struct hid_device *hdev);
+#endif
/* private: */
struct device_driver driver;
};
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5d86fb2309d2..fd0c1b857d3d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -422,6 +422,8 @@ extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
const enum hrtimer_mode mode);
+extern int schedule_hrtimeout_range_clock(ktime_t *expires,
+ unsigned long delta, const enum hrtimer_mode mode, int clock);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index c70d27af03f9..a2d6ea49ec56 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -9,9 +9,22 @@ enum {
};
enum {
- HW_BREAKPOINT_R = 1,
- HW_BREAKPOINT_W = 2,
- HW_BREAKPOINT_X = 4,
+ HW_BREAKPOINT_EMPTY = 0,
+ HW_BREAKPOINT_R = 1,
+ HW_BREAKPOINT_W = 2,
+ HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+ HW_BREAKPOINT_X = 4,
+ HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
+};
+
+enum bp_type_idx {
+ TYPE_INST = 0,
+#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
+ TYPE_DATA = 0,
+#else
+ TYPE_DATA = 1,
+#endif
+ TYPE_MAX
};
#ifdef __KERNEL__
@@ -34,6 +47,12 @@ static inline void hw_breakpoint_init(struct perf_event_attr *attr)
attr->sample_period = 1;
}
+static inline void ptrace_breakpoint_init(struct perf_event_attr *attr)
+{
+ hw_breakpoint_init(attr);
+ attr->exclude_kernel = 1;
+}
+
static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
{
return bp->attr.bp_addr;
diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h
new file mode 100644
index 000000000000..78ebf507ce56
--- /dev/null
+++ b/include/linux/i2c-omap.h
@@ -0,0 +1,9 @@
+#ifndef __I2C_OMAP_H__
+#define __I2C_OMAP_H__
+
+struct omap_i2c_bus_platform_data {
+ u32 clkrate;
+ void (*set_mpu_wkup_lat)(struct device *dev, long set);
+};
+
+#endif
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 6ed1d59bfb1e..21067b418536 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -34,6 +34,7 @@
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+#include <linux/of.h> /* for struct device_node */
extern struct bus_type i2c_bus_type;
@@ -251,6 +252,9 @@ struct i2c_board_info {
unsigned short addr;
void *platform_data;
struct dev_archdata *archdata;
+#ifdef CONFIG_OF
+ struct device_node *of_node;
+#endif
int irq;
};
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index fb6784e86d5f..6de90bfc6acd 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -496,7 +496,7 @@ struct twl4030_madc_platform_data {
int irq_line;
};
-/* Boards have uniqe mappings of {row, col} --> keycode.
+/* Boards have unique mappings of {row, col} --> keycode.
* Column and row are 8 bits each, but range only from 0..7.
* a PERSISTENT_KEY is "always on" and never reported.
*/
@@ -569,9 +569,9 @@ struct twl4030_codec_data {
struct twl4030_codec_audio_data *audio;
struct twl4030_codec_vibra_data *vibra;
- /* twl6030 */
- int audpwron_gpio; /* audio power-on gpio */
- int naudint_irq; /* audio interrupt */
+ /* twl6040 */
+ int audpwron_gpio; /* audio power-on gpio */
+ int naudint_irq; /* audio interrupt */
};
struct twl4030_platform_data {
@@ -664,15 +664,15 @@ static inline int twl4030charger_usb_en(int enable) { return 0; }
#define TWL4030_REG_VUSB3V1 19
/* TWL6030 SMPS/LDO's */
-/* EXTERNAL dc-to-dc buck convertor contollable via SR */
+/* EXTERNAL dc-to-dc buck convertor controllable via SR */
#define TWL6030_REG_VDD1 30
#define TWL6030_REG_VDD2 31
#define TWL6030_REG_VDD3 32
/* Non SR compliant dc-to-dc buck convertors */
-#define TWL6030_REG_VMEM 33
+#define TWL6030_REG_VMEM 33
#define TWL6030_REG_V2V1 34
-#define TWL6030_REG_V1V29 35
+#define TWL6030_REG_V1V29 35
#define TWL6030_REG_V1V8 36
/* EXTERNAL LDOs */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 3239d1c10acb..5acdbe51348a 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -516,8 +516,8 @@ struct ide_drive_s {
u8 current_speed; /* current transfer rate set */
u8 desired_speed; /* desired transfer rate set */
u8 pio_mode; /* for ->set_pio_mode _only_ */
- u8 dma_mode; /* for ->dma_pio_mode _only_ */
- u8 dn; /* now wide spread use */
+ u8 dma_mode; /* for ->set_dma_mode _only_ */
+ u8 dn; /* now wide spread use */
u8 acoustic; /* acoustic management */
u8 media; /* disk, cdrom, tape, floppy, ... */
u8 ready_stat; /* min status value for drive ready */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 19984958ab7b..97b2eae6a22c 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -876,6 +876,7 @@ struct ieee80211_ht_cap {
#define IEEE80211_HT_CAP_SGI_40 0x0040
#define IEEE80211_HT_CAP_TX_STBC 0x0080
#define IEEE80211_HT_CAP_RX_STBC 0x0300
+#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
#define IEEE80211_HT_CAP_DELAY_BA 0x0400
#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
@@ -1211,6 +1212,8 @@ enum ieee80211_category {
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
WLAN_CATEGORY_WMM = 17,
+ WLAN_CATEGORY_MESH_PLINK = 30, /* Pending ANA approval */
+ WLAN_CATEGORY_MESH_PATH_SEL = 32, /* Pending ANA approval */
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -1324,7 +1327,6 @@ enum ieee80211_back_actioncode {
enum ieee80211_back_parties {
WLAN_BACK_RECIPIENT = 0,
WLAN_BACK_INITIATOR = 1,
- WLAN_BACK_TIMER = 2,
};
/* SA Query action */
diff --git a/include/linux/if.h b/include/linux/if.h
index 3a9f410a296b..be350e62a905 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -71,6 +71,8 @@
* release skb->dst
*/
#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
+#define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */
+#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index e80b7f88f7c6..6d722f41ee7c 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -90,6 +90,7 @@
#define ARPHRD_PHONET 820 /* PhoNet media type */
#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */
+#define ARPHRD_CAIF 822 /* CAIF media type */
#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
#define ARPHRD_NONE 0xFFFE /* zero header length */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 299b4121f914..bed7a4682b90 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -109,6 +109,7 @@
#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */
#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */
+#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */
/*
* This is an Ethernet frame header.
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index c9bf92cd7653..cfd420ba72df 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -37,6 +37,38 @@ struct rtnl_link_stats {
__u32 tx_compressed;
};
+struct rtnl_link_stats64 {
+ __u64 rx_packets; /* total packets received */
+ __u64 tx_packets; /* total packets transmitted */
+ __u64 rx_bytes; /* total bytes received */
+ __u64 tx_bytes; /* total bytes transmitted */
+ __u64 rx_errors; /* bad packets received */
+ __u64 tx_errors; /* packet transmit problems */
+ __u64 rx_dropped; /* no space in linux buffers */
+ __u64 tx_dropped; /* no space available in linux */
+ __u64 multicast; /* multicast packets received */
+ __u64 collisions;
+
+ /* detailed rx_errors: */
+ __u64 rx_length_errors;
+ __u64 rx_over_errors; /* receiver ring buff overflow */
+ __u64 rx_crc_errors; /* recved pkt with crc error */
+ __u64 rx_frame_errors; /* recv'd frame alignment error */
+ __u64 rx_fifo_errors; /* recv'r fifo overrun */
+ __u64 rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ __u64 tx_aborted_errors;
+ __u64 tx_carrier_errors;
+ __u64 tx_fifo_errors;
+ __u64 tx_heartbeat_errors;
+ __u64 tx_window_errors;
+
+ /* for cslip etc */
+ __u64 rx_compressed;
+ __u64 tx_compressed;
+};
+
/* The struct should be in sync with struct ifmap */
struct rtnl_link_ifmap {
__u64 mem_start;
@@ -83,6 +115,7 @@ enum {
IFLA_VF_VLAN,
IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
IFLA_VFINFO,
+ IFLA_STATS64,
__IFLA_MAX
};
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index aa57a5f993fc..6ac23ef1801a 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -47,6 +47,7 @@ struct sockaddr_ll {
#define PACKET_TX_RING 13
#define PACKET_LOSS 14
#define PACKET_VNET_HDR 15
+#define PACKET_TX_TIMESTAMP 16
struct tpacket_stats {
unsigned int tp_packets;
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index c58baea4a25b..184bc5566207 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -2,7 +2,7 @@
* Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661)
*
* This file supplies definitions required by the PPP over L2TP driver
- * (pppol2tp.c). All version information wrt this file is located in pppol2tp.c
+ * (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
*
* License:
* This program is free software; you can redistribute it and/or
@@ -35,6 +35,20 @@ struct pppol2tp_addr {
__u16 d_tunnel, d_session; /* For sending outgoing packets */
};
+/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
+ * bits. So we need a different sockaddr structure.
+ */
+struct pppol2tpv3_addr {
+ pid_t pid; /* pid that owns the fd.
+ * 0 => current */
+ int fd; /* FD of UDP or IP socket to use */
+
+ struct sockaddr_in addr; /* IP address and port to send to */
+
+ __u32 s_tunnel, s_session; /* For matching incoming packets */
+ __u32 d_tunnel, d_session; /* For sending outgoing packets */
+};
+
/* Socket options:
* DEBUG - bitmask of debug message categories
* SENDSEQ - 0 => don't send packets with sequence numbers
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 90b5fae5d714..a6577af0c4e6 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -72,6 +72,15 @@ struct sockaddr_pppol2tp {
struct pppol2tp_addr pppol2tp;
}__attribute__ ((packed));
+/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
+ * bits. So we need a different sockaddr structure.
+ */
+struct sockaddr_pppol2tpv3 {
+ sa_family_t sa_family; /* address family, AF_PPPOX */
+ unsigned int sa_protocol; /* protocol identifier */
+ struct pppol2tpv3_addr pppol2tp;
+} __attribute__ ((packed));
+
/*********************************************************************
*
* ioctl interface for defining forwarding of connections
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 1350a246893a..06b1829731fd 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -51,6 +51,8 @@
#define TUNSETSNDBUF _IOW('T', 212, int)
#define TUNATTACHFILTER _IOW('T', 213, struct sock_fprog)
#define TUNDETACHFILTER _IOW('T', 214, struct sock_fprog)
+#define TUNGETVNETHDRSZ _IOR('T', 215, int)
+#define TUNSETVNETHDRSZ _IOW('T', 216, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/linux/if_x25.h b/include/linux/if_x25.h
new file mode 100644
index 000000000000..897765f5feb8
--- /dev/null
+++ b/include/linux/if_x25.h
@@ -0,0 +1,26 @@
+/*
+ * Linux X.25 packet to device interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IF_X25_H
+#define _IF_X25_H
+
+#include <linux/types.h>
+
+/* Documentation/networking/x25-iface.txt */
+#define X25_IFACE_DATA 0x00
+#define X25_IFACE_CONNECT 0x01
+#define X25_IFACE_DISCONNECT 0x02
+#define X25_IFACE_PARAMS 0x03
+
+#endif /* _IF_X25_H */
diff --git a/include/linux/in6.h b/include/linux/in6.h
index bd55c6e46b2e..c4bf46f764bf 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -221,10 +221,10 @@ struct in6_flowlabel_req {
#define IPV6_RTHDR 57
#define IPV6_RECVDSTOPTS 58
#define IPV6_DSTOPTS 59
-#if 0 /* not yet */
#define IPV6_RECVPATHMTU 60
#define IPV6_PATHMTU 61
#define IPV6_DONTFRAG 62
+#if 0 /* not yet */
#define IPV6_USE_MIN_MTU 63
#endif
@@ -265,6 +265,9 @@ struct in6_flowlabel_req {
#define IPV6_PREFER_SRC_CGA 0x0008
#define IPV6_PREFER_SRC_NONCGA 0x0800
+/* RFC5082: Generalized Ttl Security Mechanism */
+#define IPV6_MINHOPCOUNT 73
+
/*
* Multicast Routing:
* see include/linux/mroute6.h.
diff --git a/include/linux/init.h b/include/linux/init.h
index ab1d31f9352b..de994304e0bb 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -301,7 +301,7 @@ void __init parse_early_options(char *cmdline);
#endif
/* Data marked not to be saved by software suspend */
-#define __nosavedata __section(.data.nosave)
+#define __nosavedata __section(.data..nosave)
/* This means "can be init if no module support, otherwise module load
may call it." */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index b1ed1cd8e2a8..9ebcb9cf6329 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -183,7 +183,7 @@ extern struct cred init_cred;
}
/* Attach to the init_task data structure for proper alignment */
-#define __init_task_data __attribute__((__section__(".data.init_task")))
+#define __init_task_data __attribute__((__section__(".data..init_task")))
#endif
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 37ea2894b3c0..94d209a1b689 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -70,177 +70,8 @@ struct inotify_event {
#define IN_NONBLOCK O_NONBLOCK
#ifdef __KERNEL__
-
-#include <linux/dcache.h>
-#include <linux/fs.h>
-
-/*
- * struct inotify_watch - represents a watch request on a specific inode
- *
- * h_list is protected by ih->mutex of the associated inotify_handle.
- * i_list, mask are protected by inode->inotify_mutex of the associated inode.
- * ih, inode, and wd are never written to once the watch is created.
- *
- * Callers must use the established inotify interfaces to access inotify_watch
- * contents. The content of this structure is private to the inotify
- * implementation.
- */
-struct inotify_watch {
- struct list_head h_list; /* entry in inotify_handle's list */
- struct list_head i_list; /* entry in inode's list */
- atomic_t count; /* reference count */
- struct inotify_handle *ih; /* associated inotify handle */
- struct inode *inode; /* associated inode */
- __s32 wd; /* watch descriptor */
- __u32 mask; /* event mask for this watch */
-};
-
-struct inotify_operations {
- void (*handle_event)(struct inotify_watch *, u32, u32, u32,
- const char *, struct inode *);
- void (*destroy_watch)(struct inotify_watch *);
-};
-
-#ifdef CONFIG_INOTIFY
-
-/* Kernel API for producing events */
-
-extern void inotify_d_instantiate(struct dentry *, struct inode *);
-extern void inotify_d_move(struct dentry *);
-extern void inotify_inode_queue_event(struct inode *, __u32, __u32,
- const char *, struct inode *);
-extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32,
- const char *);
-extern void inotify_unmount_inodes(struct list_head *);
-extern void inotify_inode_is_dead(struct inode *);
-extern u32 inotify_get_cookie(void);
-
-/* Kernel Consumer API */
-
-extern struct inotify_handle *inotify_init(const struct inotify_operations *);
-extern void inotify_init_watch(struct inotify_watch *);
-extern void inotify_destroy(struct inotify_handle *);
-extern __s32 inotify_find_watch(struct inotify_handle *, struct inode *,
- struct inotify_watch **);
-extern __s32 inotify_find_update_watch(struct inotify_handle *, struct inode *,
- u32);
-extern __s32 inotify_add_watch(struct inotify_handle *, struct inotify_watch *,
- struct inode *, __u32);
-extern __s32 inotify_clone_watch(struct inotify_watch *, struct inotify_watch *);
-extern void inotify_evict_watch(struct inotify_watch *);
-extern int inotify_rm_watch(struct inotify_handle *, struct inotify_watch *);
-extern int inotify_rm_wd(struct inotify_handle *, __u32);
-extern void inotify_remove_watch_locked(struct inotify_handle *,
- struct inotify_watch *);
-extern void get_inotify_watch(struct inotify_watch *);
-extern void put_inotify_watch(struct inotify_watch *);
-extern int pin_inotify_watch(struct inotify_watch *);
-extern void unpin_inotify_watch(struct inotify_watch *);
-
-#else
-
-static inline void inotify_d_instantiate(struct dentry *dentry,
- struct inode *inode)
-{
-}
-
-static inline void inotify_d_move(struct dentry *dentry)
-{
-}
-
-static inline void inotify_inode_queue_event(struct inode *inode,
- __u32 mask, __u32 cookie,
- const char *filename,
- struct inode *n_inode)
-{
-}
-
-static inline void inotify_dentry_parent_queue_event(struct dentry *dentry,
- __u32 mask, __u32 cookie,
- const char *filename)
-{
-}
-
-static inline void inotify_unmount_inodes(struct list_head *list)
-{
-}
-
-static inline void inotify_inode_is_dead(struct inode *inode)
-{
-}
-
-static inline u32 inotify_get_cookie(void)
-{
- return 0;
-}
-
-static inline struct inotify_handle *inotify_init(const struct inotify_operations *ops)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void inotify_init_watch(struct inotify_watch *watch)
-{
-}
-
-static inline void inotify_destroy(struct inotify_handle *ih)
-{
-}
-
-static inline __s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
- struct inotify_watch **watchp)
-{
- return -EOPNOTSUPP;
-}
-
-static inline __s32 inotify_find_update_watch(struct inotify_handle *ih,
- struct inode *inode, u32 mask)
-{
- return -EOPNOTSUPP;
-}
-
-static inline __s32 inotify_add_watch(struct inotify_handle *ih,
- struct inotify_watch *watch,
- struct inode *inode, __u32 mask)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int inotify_rm_watch(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int inotify_rm_wd(struct inotify_handle *ih, __u32 wd)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void inotify_remove_watch_locked(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
-}
-
-static inline void get_inotify_watch(struct inotify_watch *watch)
-{
-}
-
-static inline void put_inotify_watch(struct inotify_watch *watch)
-{
-}
-
-extern inline int pin_inotify_watch(struct inotify_watch *watch)
-{
- return 0;
-}
-
-extern inline void unpin_inotify_watch(struct inotify_watch *watch)
-{
-}
-
-#endif /* CONFIG_INOTIFY */
-
-#endif /* __KERNEL __ */
+#include <linux/sysctl.h>
+extern struct ctl_table inotify_table[]; /* for sysctl */
+#endif
#endif /* _LINUX_INOTIFY_H */
diff --git a/include/linux/input.h b/include/linux/input.h
index 7ed2251b33f1..ce16a2f03adc 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -34,7 +34,7 @@ struct input_event {
* Protocol version.
*/
-#define EV_VERSION 0x010000
+#define EV_VERSION 0x010001
/*
* IOCTLs (0x00 - 0x7f)
@@ -56,12 +56,22 @@ struct input_absinfo {
__s32 resolution;
};
+struct keycode_table_entry {
+ __u32 keycode; /* e.g. KEY_A */
+ __u32 index; /* Index for the given scan/key table, on EVIOCGKEYCODEBIG */
+ __u32 len; /* Length of the scancode */
+ __u32 reserved[2]; /* Reserved for future usage */
+ char *scancode; /* scancode, in machine-endian */
+};
+
#define EVIOCGVERSION _IOR('E', 0x01, int) /* get driver version */
#define EVIOCGID _IOR('E', 0x02, struct input_id) /* get device ID */
#define EVIOCGREP _IOR('E', 0x03, unsigned int[2]) /* get repeat settings */
#define EVIOCSREP _IOW('E', 0x03, unsigned int[2]) /* set repeat settings */
#define EVIOCGKEYCODE _IOR('E', 0x04, unsigned int[2]) /* get keycode */
#define EVIOCSKEYCODE _IOW('E', 0x04, unsigned int[2]) /* set keycode */
+#define EVIOCGKEYCODEBIG _IOR('E', 0x04, struct keycode_table_entry) /* get keycode */
+#define EVIOCSKEYCODEBIG _IOW('E', 0x04, struct keycode_table_entry) /* set keycode */
#define EVIOCGNAME(len) _IOC(_IOC_READ, 'E', 0x06, len) /* get device name */
#define EVIOCGPHYS(len) _IOC(_IOC_READ, 'E', 0x07, len) /* get physical location */
@@ -806,6 +816,7 @@ struct input_absinfo {
#define BUS_HOST 0x19
#define BUS_GSC 0x1A
#define BUS_ATARI 0x1B
+#define BUS_SPI 0x1C
/*
* MT_TOOL types
@@ -1065,13 +1076,22 @@ struct ff_effect {
* @keycodemax: size of keycode table
* @keycodesize: size of elements in keycode table
* @keycode: map of scancodes to keycodes for this device
- * @setkeycode: optional method to alter current keymap, used to implement
+ * @setkeycode: optional legacy method to alter current keymap, used to
+ * implement sparse keymaps. Shouldn't be used on new drivers
+ * @getkeycode: optional legacy method to retrieve current keymap.
+ * Shouldn't be used on new drivers.
+ * @setkeycodebig: optional method to alter current keymap, used to implement
* sparse keymaps. If not supplied default mechanism will be used.
* The method is being called while holding event_lock and thus must
* not sleep
- * @getkeycode: optional method to retrieve current keymap. If not supplied
- * default mechanism will be used. The method is being called while
- * holding event_lock and thus must not sleep
+ * @getkeycodebig_from_index: optional method to retrieve current keymap from
+ * an array index. If not supplied default mechanism will be used.
+ * The method is being called while holding event_lock and thus must
+ * not sleep
+ * @getkeycodebig_from_scancode: optional method to retrieve current keymap
+ * from an scancode. If not supplied default mechanism will be used.
+ * The method is being called while holding event_lock and thus must
+ * not sleep
* @ff: force feedback structure associated with the device if device
* supports force feedback effects
* @repeat_key: stores key code of the last key pressed; used to implement
@@ -1146,6 +1166,12 @@ struct input_dev {
unsigned int scancode, unsigned int keycode);
int (*getkeycode)(struct input_dev *dev,
unsigned int scancode, unsigned int *keycode);
+ int (*setkeycodebig)(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry);
+ int (*getkeycodebig_from_index)(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry);
+ int (*getkeycodebig_from_scancode)(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry);
struct ff_device *ff;
@@ -1421,6 +1447,11 @@ int input_get_keycode(struct input_dev *dev,
unsigned int scancode, unsigned int *keycode);
int input_set_keycode(struct input_dev *dev,
unsigned int scancode, unsigned int keycode);
+int input_get_keycode_big(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry);
+int input_set_keycode_big(struct input_dev *dev,
+ struct keycode_table_entry *kt_entry);
+
extern struct class input_class;
diff --git a/include/linux/input/ad714x.h b/include/linux/input/ad714x.h
new file mode 100644
index 000000000000..0cbe5e81482e
--- /dev/null
+++ b/include/linux/input/ad714x.h
@@ -0,0 +1,63 @@
+/*
+ * include/linux/input/ad714x.h
+ *
+ * AD714x is very flexible, it can be used as buttons, scrollwheel,
+ * slider, touchpad at the same time. That depends on the boards.
+ * The platform_data for the device's "struct device" holds this
+ * information.
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_INPUT_AD714X_H__
+#define __LINUX_INPUT_AD714X_H__
+
+#define STAGE_NUM 12
+#define STAGE_CFGREG_NUM 8
+#define SYS_CFGREG_NUM 8
+
+/* board information which need be initialized in arch/mach... */
+struct ad714x_slider_plat {
+ int start_stage;
+ int end_stage;
+ int max_coord;
+};
+
+struct ad714x_wheel_plat {
+ int start_stage;
+ int end_stage;
+ int max_coord;
+};
+
+struct ad714x_touchpad_plat {
+ int x_start_stage;
+ int x_end_stage;
+ int x_max_coord;
+
+ int y_start_stage;
+ int y_end_stage;
+ int y_max_coord;
+};
+
+struct ad714x_button_plat {
+ int keycode;
+ unsigned short l_mask;
+ unsigned short h_mask;
+};
+
+struct ad714x_platform_data {
+ int slider_num;
+ int wheel_num;
+ int touchpad_num;
+ int button_num;
+ struct ad714x_slider_plat *slider;
+ struct ad714x_wheel_plat *wheel;
+ struct ad714x_touchpad_plat *touchpad;
+ struct ad714x_button_plat *button;
+ unsigned short stage_cfg_reg[STAGE_NUM][STAGE_CFGREG_NUM];
+ unsigned short sys_cfg_reg[SYS_CFGREG_NUM];
+};
+
+#endif
diff --git a/include/linux/input/tps6507x-ts.h b/include/linux/input/tps6507x-ts.h
new file mode 100644
index 000000000000..ab1440313924
--- /dev/null
+++ b/include/linux/input/tps6507x-ts.h
@@ -0,0 +1,24 @@
+/* linux/i2c/tps6507x-ts.h
+ *
+ * Functions to access TPS65070 touch screen chip.
+ *
+ * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#ifndef __LINUX_I2C_TPS6507X_TS_H
+#define __LINUX_I2C_TPS6507X_TS_H
+
+/* Board specific touch screen initial values */
+struct touchscreen_init_data {
+ int poll_period; /* ms */
+ int vref; /* non-zero to leave vref on */
+ __u16 min_pressure; /* min reading to be treated as a touch */
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+};
+
+#endif /* __LINUX_I2C_TPS6507X_TS_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 75f3f00ac1e5..5137db3317f9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -39,7 +39,8 @@
* These flags used only by the kernel as part of the
* irq handling routines.
*
- * IRQF_DISABLED - keep irqs disabled when calling the action handler
+ * IRQF_DISABLED - keep irqs disabled when calling the action handler.
+ * DEPRECATED. This flag is a NOOP and scheduled to be removed
* IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
* IRQF_SHARED - allow sharing the irq among several devices
* IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
@@ -77,6 +78,18 @@ enum {
IRQTF_AFFINITY,
};
+/**
+ * These values can be returned by request_any_context_irq() and
+ * describe the context the interrupt will be run in.
+ *
+ * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
+ * IRQC_IS_NESTED - interrupt runs in a nested threaded context
+ */
+enum {
+ IRQC_IS_HARDIRQ = 0,
+ IRQC_IS_NESTED,
+};
+
typedef irqreturn_t (*irq_handler_t)(int, void *);
/**
@@ -120,6 +133,10 @@ request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
return request_threaded_irq(irq, handler, NULL, flags, name, dev);
}
+extern int __must_check
+request_any_context_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, const char *name, void *dev_id);
+
extern void exit_irq_thread(void);
#else
@@ -141,6 +158,13 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler,
return request_irq(irq, handler, flags, name, dev);
}
+static inline int __must_check
+request_any_context_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, const char *name, void *dev_id)
+{
+ return request_irq(irq, handler, flags, name, dev_id);
+}
+
static inline void exit_irq_thread(void) { }
#endif
@@ -209,6 +233,7 @@ extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
+extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -223,6 +248,11 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
+static inline int irq_set_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
#ifdef CONFIG_GENERIC_HARDIRQS
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 26fad187d661..b22790268b64 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -52,6 +52,7 @@ struct resource_list {
#define IORESOURCE_MEM_64 0x00100000
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
+#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
@@ -143,7 +144,8 @@ static inline unsigned long resource_type(const struct resource *res)
}
/* Convenience shorthand with allocation */
-#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
+#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
+#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
#define request_mem_region_exclusive(start,n,name) \
diff --git a/include/linux/ioq.h b/include/linux/ioq.h
new file mode 100644
index 000000000000..7c6d6cad83c7
--- /dev/null
+++ b/include/linux/ioq.h
@@ -0,0 +1,414 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * IOQ is a generic shared-memory, lockless queue mechanism. It can be used
+ * in a variety of ways, though its intended purpose is to become the
+ * asynchronous communication path for virtual-bus drivers.
+ *
+ * The following are a list of key design points:
+ *
+ * #) All shared-memory is always allocated on explicitly one side of the
+ * link. This typically would be the guest side in a VM/VMM scenario.
+ * #) Each IOQ has the concept of "north" and "south" locales, where
+ * north denotes the memory-owner side (e.g. guest).
+ * #) An IOQ is manipulated using an iterator idiom.
+ * #) Provides a bi-directional signaling/notification infrastructure on
+ * a per-queue basis, which includes an event mitigation strategy
+ * to reduce boundary switching.
+ * #) The signaling path is abstracted so that various technologies and
+ * topologies can define their own specific implementation while sharing
+ * the basic structures and code.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_IOQ_H
+#define _LINUX_IOQ_H
+
+#include <linux/types.h>
+#include <linux/shm_signal.h>
+
+/*
+ *---------
+ * The following structures represent data that is shared across boundaries
+ * which may be quite disparate from one another (e.g. Windows vs Linux,
+ * 32 vs 64 bit, etc). Therefore, care has been taken to make sure they
+ * present data in a manner that is independent of the environment.
+ *-----------
+ */
+struct ioq_ring_desc {
+ __u64 cookie; /* for arbitrary use by north-side */
+ __le64 ptr;
+ __le64 len;
+ __u8 valid;
+ __u8 sown; /* South owned = 1, North owned = 0 */
+};
+
+#define IOQ_RING_MAGIC cpu_to_le32(0x47fa2fe4)
+#define IOQ_RING_VER cpu_to_le32(4)
+
+struct ioq_ring_idx {
+ __le32 head; /* 0 based index to head of ptr array */
+ __le32 tail; /* 0 based index to tail of ptr array */
+ __u8 full;
+};
+
+enum ioq_locality {
+ ioq_locality_north,
+ ioq_locality_south,
+};
+
+struct ioq_ring_head {
+ __le32 magic;
+ __le32 ver;
+ struct shm_signal_desc signal;
+ struct ioq_ring_idx idx[2];
+ __le32 count;
+ struct ioq_ring_desc ring[1]; /* "count" elements will be allocated */
+};
+
+#define IOQ_HEAD_DESC_SIZE(count) \
+ (sizeof(struct ioq_ring_head) + sizeof(struct ioq_ring_desc) * (count - 1))
+
+/* --- END SHARED STRUCTURES --- */
+
+#ifdef __KERNEL__
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+
+enum ioq_idx_type {
+ ioq_idxtype_valid,
+ ioq_idxtype_inuse,
+ ioq_idxtype_both,
+ ioq_idxtype_invalid,
+};
+
+enum ioq_seek_type {
+ ioq_seek_tail,
+ ioq_seek_next,
+ ioq_seek_head,
+ ioq_seek_set
+};
+
+struct ioq_iterator {
+ struct ioq *ioq;
+ struct ioq_ring_idx *idx;
+ u32 pos;
+ struct ioq_ring_desc *desc;
+ bool update;
+ bool dualidx;
+ bool flipowner;
+};
+
+struct ioq_notifier {
+ void (*signal)(struct ioq_notifier *);
+};
+
+struct ioq_ops {
+ void (*release)(struct ioq *ioq);
+};
+
+struct ioq {
+ struct ioq_ops *ops;
+
+ struct kref kref;
+ enum ioq_locality locale;
+ struct ioq_ring_head *head_desc;
+ struct ioq_ring_desc *ring;
+ struct shm_signal *signal;
+ wait_queue_head_t wq;
+ struct ioq_notifier *notifier;
+ size_t count;
+ struct shm_signal_notifier shm_notifier;
+};
+
+#define IOQ_ITER_AUTOUPDATE (1 << 0)
+#define IOQ_ITER_NOFLIPOWNER (1 << 1)
+
+/**
+ * ioq_init() - initialize an IOQ
+ * @ioq: IOQ context
+ *
+ * Initializes IOQ context before first use
+ *
+ **/
+void ioq_init(struct ioq *ioq,
+ struct ioq_ops *ops,
+ enum ioq_locality locale,
+ struct ioq_ring_head *head,
+ struct shm_signal *signal,
+ size_t count);
+
+/**
+ * ioq_get() - acquire an IOQ context reference
+ * @ioq: IOQ context
+ *
+ **/
+static inline struct ioq *ioq_get(struct ioq *ioq)
+{
+ kref_get(&ioq->kref);
+
+ return ioq;
+}
+
+static inline void _ioq_kref_release(struct kref *kref)
+{
+ struct ioq *ioq = container_of(kref, struct ioq, kref);
+
+ shm_signal_put(ioq->signal);
+ ioq->ops->release(ioq);
+}
+
+/**
+ * ioq_put() - release an IOQ context reference
+ * @ioq: IOQ context
+ *
+ **/
+static inline void ioq_put(struct ioq *ioq)
+{
+ kref_put(&ioq->kref, _ioq_kref_release);
+}
+
+/**
+ * ioq_notify_enable() - enables local notifications on an IOQ
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Enables/unmasks the registered ioq_notifier (if applicable) and waitq to
+ * receive wakeups whenever the remote side performs an ioq_signal() operation.
+ * A notification will be dispatched immediately if any pending signals have
+ * already been issued prior to invoking this call.
+ *
+ * This is synonymous with unmasking an interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_notify_enable(struct ioq *ioq, int flags)
+{
+ return shm_signal_enable(ioq->signal, 0);
+}
+
+/**
+ * ioq_notify_disable() - disable local notifications on an IOQ
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Disables/masks the registered ioq_notifier (if applicable) and waitq
+ * from receiving any further notifications. Any subsequent calls to
+ * ioq_signal() by the remote side will update the ring as dirty, but
+ * will not traverse the locale boundary and will not invoke the notifier
+ * callback or wakeup the waitq. Signals delivered while masked will
+ * be deferred until ioq_notify_enable() is invoked
+ *
+ * This is synonymous with masking an interrupt
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_notify_disable(struct ioq *ioq, int flags)
+{
+ return shm_signal_disable(ioq->signal, 0);
+}
+
+/**
+ * ioq_signal() - notify the remote side about ring changes
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Marks the ring state as "dirty" and, if enabled, will traverse
+ * a locale boundary to invoke a remote notification. The remote
+ * side controls whether the notification should be delivered via
+ * the ioq_notify_enable/disable() interface.
+ *
+ * The specifics of how to traverse a locale boundary are abstracted
+ * by the ioq_ops->signal() interface and provided by a particular
+ * implementation. However, typically going north to south would be
+ * something like a syscall/hypercall, and going south to north would be
+ * something like a posix-signal/guest-interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_signal(struct ioq *ioq, int flags)
+{
+ return shm_signal_inject(ioq->signal, 0);
+}
+
+/**
+ * ioq_count() - counts the number of outstanding descriptors in an index
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) >=0: # of descriptors outstanding in the index
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_count(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_remain() - counts the number of remaining descriptors in an index
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * This is the converse of ioq_count(). This function returns the number
+ * of "free" descriptors left in a particular index
+ *
+ * Returns:
+ * (*) >=0: # of descriptors remaining in the index
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_remain(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_size() - counts the maximum number of descriptors in an ring
+ * @ioq: IOQ context
+ *
+ * This function returns the maximum number of descriptors supported in
+ * a ring, regardless of their current state (free or inuse).
+ *
+ * Returns:
+ * (*) >=0: total # of descriptors in the ring
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_size(struct ioq *ioq);
+
+/**
+ * ioq_full() - determines if a specific index is "full"
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) 0: index is not full
+ * (*) 1: index is full
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_full(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_empty() - determines if a specific index is "empty"
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) 0: index is not empty
+ * (*) 1: index is empty
+ * (*) <0 = ERRNO
+ *
+ **/
+static inline int ioq_empty(struct ioq *ioq, enum ioq_idx_type type)
+{
+ return !ioq_count(ioq, type);
+}
+
+/**
+ * ioq_iter_init() - initialize an iterator for IOQ descriptor traversal
+ * @ioq: IOQ context to iterate on
+ * @iter: Iterator context to init (usually from stack)
+ * @type: Specifies the index type to iterate against
+ * (*) valid: iterate against the "valid" index
+ * (*) inuse: iterate against the "inuse" index
+ * (*) both: iterate against both indexes simultaneously
+ * @flags: Bitfield with 0 or more bits set to alter behavior
+ * (*) autoupdate: automatically signal the remote side
+ * whenever the iterator pushes/pops to a new desc
+ * (*) noflipowner: do not flip the ownership bit during
+ * a push/pop operation
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_init(struct ioq *ioq, struct ioq_iterator *iter,
+ enum ioq_idx_type type, int flags);
+
+/**
+ * ioq_iter_seek() - seek to a specific location in the IOQ ring
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @type: Specifies the type of seek operation
+ * (*) tail: seek to the absolute tail, offset is ignored
+ * (*) next: seek to the relative next, offset is ignored
+ * (*) head: seek to the absolute head, offset is ignored
+ * (*) set: seek to the absolute offset
+ * @offset: Offset for ioq_seek_set operations
+ * @flags: Reserved for future use, must be 0
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_seek(struct ioq_iterator *iter, enum ioq_seek_type type,
+ long offset, int flags);
+
+/**
+ * ioq_iter_push() - push the tail pointer forward
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @flags: Reserved for future use, must be 0
+ *
+ * This function will simultaneously advance the tail ptr in the current
+ * index (valid/inuse, as specified in the ioq_iter_init) as well as
+ * perform a seek(next) operation. This effectively "pushes" a new pointer
+ * onto the tail of the index.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_push(struct ioq_iterator *iter, int flags);
+
+/**
+ * ioq_iter_pop() - pop the head pointer from the ring
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @flags: Reserved for future use, must be 0
+ *
+ * This function will simultaneously advance the head ptr in the current
+ * index (valid/inuse, as specified in the ioq_iter_init) as well as
+ * perform a seek(next) operation. This effectively "pops" a pointer
+ * from the head of the index.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_pop(struct ioq_iterator *iter, int flags);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_IOQ_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index e0cc9a7db2b5..0e269038bb38 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -21,6 +21,10 @@ struct in6_pktinfo {
int ipi6_ifindex;
};
+struct ip6_mtuinfo {
+ struct sockaddr_in6 ip6m_addr;
+ __u32 ip6m_mtu;
+};
struct in6_ifreq {
struct in6_addr ifr6_addr;
@@ -250,9 +254,11 @@ struct inet6_skb_parm {
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
+#define IP6SKB_REROUTED 4
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
+#define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb))
static inline int inet6_iif(const struct sk_buff *skb)
{
@@ -334,21 +340,25 @@ struct ipv6_pinfo {
dstopts:1,
odstopts:1,
rxflow:1,
- rxtclass:1;
+ rxtclass:1,
+ rxpmtu:1;
} bits;
__u16 all;
} rxopt;
/* sockopt flags */
- __u8 recverr:1,
+ __u16 recverr:1,
sndflow:1,
pmtudisc:2,
ipv6only:1,
- srcprefs:3; /* 001: prefer temporary address
+ srcprefs:3, /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
+ dontfrag:1;
+ __u8 min_hopcount;
__u8 tclass;
+ __u8 padding;
__u32 dst_cookie;
@@ -358,6 +368,7 @@ struct ipv6_pinfo {
struct ipv6_txoptions *opt;
struct sk_buff *pktoptions;
+ struct sk_buff *rxpmtu;
struct {
struct ipv6_txoptions *opt;
u8 hop_limit;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 707ab122e2e6..c03243ad84b4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -195,6 +195,7 @@ struct irq_desc {
raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
+ const struct cpumask *affinity_hint;
unsigned int node;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_var_t pending_mask;
diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h
index cd5a269fdb5e..e2d28b026a8c 100644
--- a/include/linux/isapnp.h
+++ b/include/linux/isapnp.h
@@ -43,10 +43,10 @@
*/
#ifdef __KERNEL__
+#include <linux/mod_devicetable.h>
#define DEVICE_COUNT_COMPATIBLE 4
-#define ISAPNP_ANY_ID 0xffff
#define ISAPNP_CARD_DEVS 8
#define ISAPNP_CARD_ID(_va, _vb, _vc, _device) \
@@ -74,12 +74,6 @@ struct isapnp_card_id {
#define ISAPNP_DEVICE_SINGLE_END \
.card_vendor = 0, .card_device = 0
-struct isapnp_device_id {
- unsigned short card_vendor, card_device;
- unsigned short vendor, function;
- unsigned long driver_data; /* data private to the driver */
-};
-
#if defined(CONFIG_ISAPNP) || (defined(CONFIG_ISAPNP_MODULE) && defined(MODULE))
#define __ISAPNP__
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
new file mode 100644
index 000000000000..f1e6c184f14f
--- /dev/null
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -0,0 +1,123 @@
+/*
+ * Export the iSCSI boot info to userland via sysfs.
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ISCSI_BOOT_SYSFS_
+#define _ISCSI_BOOT_SYSFS_
+
+/*
+ * The text attributes names for each of the kobjects.
+*/
+enum iscsi_boot_eth_properties_enum {
+ ISCSI_BOOT_ETH_INDEX,
+ ISCSI_BOOT_ETH_FLAGS,
+ ISCSI_BOOT_ETH_IP_ADDR,
+ ISCSI_BOOT_ETH_SUBNET_MASK,
+ ISCSI_BOOT_ETH_ORIGIN,
+ ISCSI_BOOT_ETH_GATEWAY,
+ ISCSI_BOOT_ETH_PRIMARY_DNS,
+ ISCSI_BOOT_ETH_SECONDARY_DNS,
+ ISCSI_BOOT_ETH_DHCP,
+ ISCSI_BOOT_ETH_VLAN,
+ ISCSI_BOOT_ETH_MAC,
+ /* eth_pci_bdf - this is replaced by link to the device itself. */
+ ISCSI_BOOT_ETH_HOSTNAME,
+ ISCSI_BOOT_ETH_END_MARKER,
+};
+
+enum iscsi_boot_tgt_properties_enum {
+ ISCSI_BOOT_TGT_INDEX,
+ ISCSI_BOOT_TGT_FLAGS,
+ ISCSI_BOOT_TGT_IP_ADDR,
+ ISCSI_BOOT_TGT_PORT,
+ ISCSI_BOOT_TGT_LUN,
+ ISCSI_BOOT_TGT_CHAP_TYPE,
+ ISCSI_BOOT_TGT_NIC_ASSOC,
+ ISCSI_BOOT_TGT_NAME,
+ ISCSI_BOOT_TGT_CHAP_NAME,
+ ISCSI_BOOT_TGT_CHAP_SECRET,
+ ISCSI_BOOT_TGT_REV_CHAP_NAME,
+ ISCSI_BOOT_TGT_REV_CHAP_SECRET,
+ ISCSI_BOOT_TGT_END_MARKER,
+};
+
+enum iscsi_boot_initiator_properties_enum {
+ ISCSI_BOOT_INI_INDEX,
+ ISCSI_BOOT_INI_FLAGS,
+ ISCSI_BOOT_INI_ISNS_SERVER,
+ ISCSI_BOOT_INI_SLP_SERVER,
+ ISCSI_BOOT_INI_PRI_RADIUS_SERVER,
+ ISCSI_BOOT_INI_SEC_RADIUS_SERVER,
+ ISCSI_BOOT_INI_INITIATOR_NAME,
+ ISCSI_BOOT_INI_END_MARKER,
+};
+
+struct attribute_group;
+
+struct iscsi_boot_kobj {
+ struct kobject kobj;
+ struct attribute_group *attr_group;
+ struct list_head list;
+
+ /*
+ * Pointer to store driver specific info. If set this will
+ * be freed for the LLD when the kobj release function is called.
+ */
+ void *data;
+ /*
+ * Driver specific show function.
+ *
+ * The enum of the type. This can be any value of the above
+ * properties.
+ */
+ ssize_t (*show) (void *data, int type, char *buf);
+
+ /*
+ * Drivers specific visibility function.
+ * The function should return if they the attr should be readable
+ * writable or should not be shown.
+ *
+ * The enum of the type. This can be any value of the above
+ * properties.
+ */
+ mode_t (*is_visible) (void *data, int type);
+};
+
+struct iscsi_boot_kset {
+ struct list_head kobj_list;
+ struct kset *kset;
+};
+
+struct iscsi_boot_kobj *
+iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ mode_t (*is_visible) (void *data, int type));
+
+struct iscsi_boot_kobj *
+iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ mode_t (*is_visible) (void *data, int type));
+struct iscsi_boot_kobj *
+iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ mode_t (*is_visible) (void *data, int type));
+
+struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
+struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno);
+void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset);
+
+#endif
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h
index d2e4042f8f5e..8ba7e5b9d62c 100644
--- a/include/linux/iscsi_ibft.h
+++ b/include/linux/iscsi_ibft.h
@@ -21,21 +21,13 @@
#ifndef ISCSI_IBFT_H
#define ISCSI_IBFT_H
-struct ibft_table_header {
- char signature[4];
- u32 length;
- u8 revision;
- u8 checksum;
- char oem_id[6];
- char oem_table_id[8];
- char reserved[24];
-} __attribute__((__packed__));
+#include <acpi/acpi.h>
/*
* Logical location of iSCSI Boot Format Table.
* If the value is NULL there is no iBFT on the machine.
*/
-extern struct ibft_table_header *ibft_addr;
+extern struct acpi_table_ibft *ibft_addr;
/*
* Routine used to find and reserve the iSCSI Boot Format Table. The
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 516a2a27e87a..e06965081ba5 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -427,9 +427,9 @@ struct transaction_s
enum {
T_RUNNING,
T_LOCKED,
- T_RUNDOWN,
T_FLUSH,
T_COMMIT,
+ T_COMMIT_RECORD,
T_FINISHED
} t_state;
@@ -991,6 +991,7 @@ int journal_start_commit(journal_t *journal, tid_t *tid);
int journal_force_commit_nested(journal_t *journal);
int log_wait_commit(journal_t *journal, tid_t tid);
int log_do_checkpoint(journal_t *journal);
+int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
void __log_wait_for_space(journal_t *journal);
extern void __journal_drop_transaction(journal_t *, transaction_t *);
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h
index 2b32d638147d..0874ab59ffef 100644
--- a/include/linux/jffs2.h
+++ b/include/linux/jffs2.h
@@ -215,8 +215,8 @@ union jffs2_node_union
/* Data payload for device nodes. */
union jffs2_device_node {
- jint16_t old;
- jint32_t new;
+ jint16_t old_id;
+ jint32_t new_id;
};
#endif /* __LINUX_JFFS2_H__ */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
new file mode 100644
index 000000000000..ccb2b3ec0fe8
--- /dev/null
+++ b/include/linux/kdb.h
@@ -0,0 +1,117 @@
+#ifndef _KDB_H
+#define _KDB_H
+
+/*
+ * Kernel Debugger Architecture Independent Global Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
+ */
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#define KDB_POLL_FUNC_MAX 5
+extern int kdb_poll_idx;
+
+/*
+ * kdb_initial_cpu is initialized to -1, and is set to the cpu
+ * number whenever the kernel debugger is entered.
+ */
+extern int kdb_initial_cpu;
+extern atomic_t kdb_event;
+
+/*
+ * kdb_diemsg
+ *
+ * Contains a pointer to the last string supplied to the
+ * kernel 'die' panic function.
+ */
+extern const char *kdb_diemsg;
+
+#define KDB_FLAG_EARLYKDB (1 << 0) /* set from boot parameter kdb=early */
+#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */
+#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
+#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */
+#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when
+ * kdb is off */
+#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available,
+ * kdb is disabled */
+#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do
+ * not use keyboard */
+#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do
+ * not use keyboard */
+
+extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */
+
+extern void kdb_save_flags(void);
+extern void kdb_restore_flags(void);
+
+#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag)
+#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag))
+#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag))
+
+/*
+ * External entry point for the kernel debugger. The pt_regs
+ * at the time of entry are supplied along with the reason for
+ * entry to the kernel debugger.
+ */
+
+typedef enum {
+ KDB_REASON_ENTER = 1, /* KDB_ENTER() trap/fault - regs valid */
+ KDB_REASON_ENTER_SLAVE, /* KDB_ENTER_SLAVE() trap/fault - regs valid */
+ KDB_REASON_BREAK, /* Breakpoint inst. - regs valid */
+ KDB_REASON_DEBUG, /* Debug Fault - regs valid */
+ KDB_REASON_OOPS, /* Kernel Oops - regs valid */
+ KDB_REASON_SWITCH, /* CPU switch - regs valid*/
+ KDB_REASON_KEYBOARD, /* Keyboard entry - regs valid */
+ KDB_REASON_NMI, /* Non-maskable interrupt; regs valid */
+ KDB_REASON_RECURSE, /* Recursive entry to kdb;
+ * regs probably valid */
+ KDB_REASON_SSTEP, /* Single Step trap. - regs valid */
+} kdb_reason_t;
+
+extern int kdb_trap_printk;
+extern int vkdb_printf(const char *fmt, va_list args)
+ __attribute__ ((format (printf, 1, 0)));
+extern int kdb_printf(const char *, ...)
+ __attribute__ ((format (printf, 1, 2)));
+typedef int (*kdb_printf_t)(const char *, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+extern void kdb_init(int level);
+
+/* Access to kdb specific polling devices */
+typedef int (*get_char_func)(void);
+extern get_char_func kdb_poll_funcs[];
+extern int kdb_get_kbd_char(void);
+
+static inline
+int kdb_process_cpu(const struct task_struct *p)
+{
+ unsigned int cpu = task_thread_info(p)->cpu;
+ if (cpu > num_possible_cpus())
+ cpu = 0;
+ return cpu;
+}
+
+/* kdb access to register set for stack dumping */
+extern struct pt_regs *kdb_current_regs;
+
+#else /* ! CONFIG_KGDB_KDB */
+#define kdb_printf(...)
+#define kdb_init(x)
+#endif /* CONFIG_KGDB_KDB */
+enum {
+ KDB_NOT_INITIALIZED,
+ KDB_INIT_EARLY,
+ KDB_INIT_FULL,
+};
+#endif /* !_KDB_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9365227dbaf6..fc33af911852 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -4,6 +4,8 @@
/*
* 'kernel.h' contains some often-used function prototypes etc
*/
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#ifdef __KERNEL__
@@ -37,8 +39,8 @@ extern const char linux_proc_banner[];
#define STACK_MAGIC 0xdeadbeef
-#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
-#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
@@ -490,6 +492,13 @@ static inline void tracing_off(void) { }
static inline void tracing_off_permanent(void) { }
static inline int tracing_is_on(void) { return 0; }
#endif
+
+enum ftrace_dump_mode {
+ DUMP_NONE,
+ DUMP_ALL,
+ DUMP_ORIG,
+};
+
#ifdef CONFIG_TRACING
extern void tracing_start(void);
extern void tracing_stop(void);
@@ -571,7 +580,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
extern int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
-extern void ftrace_dump(void);
+extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
@@ -592,7 +601,7 @@ ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
}
-static inline void ftrace_dump(void) { }
+static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
/*
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index e117b1aee69c..9fad0527344f 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -201,7 +201,7 @@ static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
* @n: the length of the data to be added.
* @lock: pointer to the spinlock to use for locking.
*
- * This function copies at most @len bytes from the @from buffer into
+ * This function copies at most @n bytes from the @from buffer into
* the FIFO depending on the free space, and returns the number of
* bytes copied.
*/
@@ -227,7 +227,7 @@ static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
* @n: the size of the destination buffer.
* @lock: pointer to the spinlock to use for locking.
*
- * This function copies at most @len bytes from the FIFO into the
+ * This function copies at most @n bytes from the FIFO into the
* @to buffer and returns the number of copied bytes.
*/
static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 19ec41a183f5..9340f34d1bb5 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -16,10 +16,12 @@
#include <linux/serial_8250.h>
#include <linux/linkage.h>
#include <linux/init.h>
-
#include <asm/atomic.h>
+#ifdef CONFIG_HAVE_ARCH_KGDB
#include <asm/kgdb.h>
+#endif
+#ifdef CONFIG_KGDB
struct pt_regs;
/**
@@ -34,20 +36,6 @@ struct pt_regs;
extern int kgdb_skipexception(int exception, struct pt_regs *regs);
/**
- * kgdb_post_primary_code - (optional) Save error vector/code numbers.
- * @regs: Original pt_regs.
- * @e_vector: Original error vector.
- * @err_code: Original error code.
- *
- * This is usually needed on architectures which support SMP and
- * KGDB. This function is called after all the secondary cpus have
- * been put to a know spin state and the primary CPU has control over
- * KGDB.
- */
-extern void kgdb_post_primary_code(struct pt_regs *regs, int e_vector,
- int err_code);
-
-/**
* kgdb_disable_hw_debug - (optional) Disable hardware debugging hook
* @regs: Current &struct pt_regs.
*
@@ -72,6 +60,7 @@ struct uart_port;
void kgdb_breakpoint(void);
extern int kgdb_connected;
+extern int kgdb_io_module_registered;
extern atomic_t kgdb_setting_breakpoint;
extern atomic_t kgdb_cpu_doing_single_step;
@@ -202,12 +191,34 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
*/
extern void kgdb_roundup_cpus(unsigned long flags);
+/**
+ * kgdb_arch_set_pc - Generic call back to the program counter
+ * @regs: Current &struct pt_regs.
+ * @pc: The new value for the program counter
+ *
+ * This function handles updating the program counter and requires an
+ * architecture specific implementation.
+ */
+extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc);
+
+
/* Optional functions. */
extern int kgdb_validate_break_address(unsigned long addr);
extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
/**
+ * kgdb_arch_late - Perform any architecture specific initalization.
+ *
+ * This function will handle the late initalization of any
+ * architecture specific callbacks. This is an optional function for
+ * handling things like late initialization of hw breakpoints. The
+ * default implementation does nothing.
+ */
+extern void kgdb_arch_late(void);
+
+
+/**
* struct kgdb_arch - Describe architecture specific values.
* @gdb_bpt_instr: The instruction to trigger a breakpoint.
* @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT.
@@ -247,6 +258,8 @@ struct kgdb_arch {
* the I/O driver.
* @post_exception: Pointer to a function that will do any cleanup work
* for the I/O driver.
+ * @is_console: 1 if the end device is a console 0 if the I/O device is
+ * not a console
*/
struct kgdb_io {
const char *name;
@@ -256,6 +269,7 @@ struct kgdb_io {
int (*init) (void);
void (*pre_exception) (void);
void (*post_exception) (void);
+ int is_console;
};
extern struct kgdb_arch arch_kgdb_ops;
@@ -264,12 +278,14 @@ extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
+extern struct kgdb_io *dbg_io_ops;
extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
extern int kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
+extern void kgdb_schedule_breakpoint(void);
extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
@@ -278,5 +294,12 @@ extern int kgdb_nmicallback(int cpu, void *regs);
extern int kgdb_single_step;
extern atomic_t kgdb_active;
-
+#define in_dbg_master() \
+ (raw_smp_processor_id() == atomic_read(&kgdb_active))
+extern bool dbg_is_early;
+extern void __init dbg_late_init(void);
+#else /* ! CONFIG_KGDB */
+#define in_dbg_master() (0)
+#define dbg_late_init()
+#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h
index 73717ed9ea79..18ca75ffcc5a 100644
--- a/include/linux/kobj_map.h
+++ b/include/linux/kobj_map.h
@@ -1,3 +1,10 @@
+/*
+ * kobj_map.h
+ */
+
+#ifndef _KOBJ_MAP_H_
+#define _KOBJ_MAP_H_
+
#include <linux/mutex.h>
typedef struct kobject *kobj_probe_t(dev_t, int *, void *);
@@ -8,3 +15,5 @@ int kobj_map(struct kobj_map *, dev_t, unsigned long, struct module *,
void kobj_unmap(struct kobj_map *, dev_t, unsigned long);
struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *);
struct kobj_map *kobj_map_init(kobj_probe_t *, struct mutex *);
+
+#endif /* _KOBJ_MAP_H_ */
diff --git a/include/linux/kref.h b/include/linux/kref.h
index b0cb0ebad9e6..baf4b9e4b194 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -1,5 +1,5 @@
/*
- * kref.c - library routines for handling generic reference counted objects
+ * kref.h - library routines for handling generic reference counted objects
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Corp.
diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h
new file mode 100644
index 000000000000..da0341b8ca0a
--- /dev/null
+++ b/include/linux/ks8842.h
@@ -0,0 +1,34 @@
+/*
+ * ks8842.h KS8842 platform data struct definition
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_KS8842_H
+#define _LINUX_KS8842_H
+
+#include <linux/if_ether.h>
+
+/**
+ * struct ks8842_platform_data - Platform data of the KS8842 network driver
+ * @macaddr: The MAC address of the device, set to all 0:s to use the on in
+ * the chip.
+ *
+ */
+struct ks8842_platform_data {
+ u8 macaddr[ETH_ALEN];
+};
+
+#endif
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 60df9c84ecae..23ea02253900 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -160,6 +160,7 @@ struct kvm_pit_config {
#define KVM_EXIT_DCR 15
#define KVM_EXIT_NMI 16
#define KVM_EXIT_INTERNAL_ERROR 17
+#define KVM_EXIT_OSI 18
/* For KVM_EXIT_INTERNAL_ERROR */
#define KVM_INTERNAL_ERROR_EMULATION 1
@@ -259,6 +260,10 @@ struct kvm_run {
__u32 ndata;
__u64 data[16];
} internal;
+ /* KVM_EXIT_OSI */
+ struct {
+ __u64 gprs[32];
+ } osi;
/* Fix the size of the union. */
char padding[256];
};
@@ -400,6 +405,15 @@ struct kvm_ioeventfd {
__u8 pad[36];
};
+/* for KVM_ENABLE_CAP */
+struct kvm_enable_cap {
+ /* in */
+ __u32 cap;
+ __u32 flags;
+ __u64 args[4];
+ __u8 pad[64];
+};
+
#define KVMIO 0xAE
/*
@@ -501,7 +515,15 @@ struct kvm_ioeventfd {
#define KVM_CAP_HYPERV_VAPIC 45
#define KVM_CAP_HYPERV_SPIN 46
#define KVM_CAP_PCI_SEGMENT 47
+#define KVM_CAP_PPC_PAIRED_SINGLES 48
+#define KVM_CAP_INTR_SHADOW 49
+#ifdef __KVM_HAVE_DEBUGREGS
+#define KVM_CAP_DEBUGREGS 50
+#endif
#define KVM_CAP_X86_ROBUST_SINGLESTEP 51
+#define KVM_CAP_PPC_OSI 52
+#define KVM_CAP_PPC_UNSET_IRQ 53
+#define KVM_CAP_ENABLE_CAP 54
#ifdef KVM_CAP_IRQ_ROUTING
@@ -688,6 +710,10 @@ struct kvm_clock_data {
/* Available with KVM_CAP_VCPU_EVENTS */
#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
+/* Available with KVM_CAP_DEBUGREGS */
+#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
+#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
+#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 169d07758ee5..7cb116afa1cd 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -105,6 +105,12 @@ struct kvm_vcpu {
struct kvm_vcpu_arch arch;
};
+/*
+ * Some of the bitops functions do not support too long bitmaps.
+ * This number must be determined not to exceed such limits.
+ */
+#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
+
struct kvm_memory_slot {
gfn_t base_gfn;
unsigned long npages;
@@ -237,17 +243,23 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
-int kvm_init(void *opaque, unsigned int vcpu_size,
+int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
struct module *module);
void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
+static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+{
+ return rcu_dereference_check(kvm->memslots,
+ srcu_read_lock_held(&kvm->srcu)
+ || lockdep_is_held(&kvm->slots_lock));
+}
+
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
extern struct page *bad_page;
extern pfn_t bad_pfn;
diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h
new file mode 100644
index 000000000000..4bdb31df8e72
--- /dev/null
+++ b/include/linux/l2tp.h
@@ -0,0 +1,163 @@
+/*
+ * L2TP-over-IP socket for L2TPv3.
+ *
+ * Author: James Chapman <jchapman@katalix.com>
+ */
+
+#ifndef _LINUX_L2TP_H_
+#define _LINUX_L2TP_H_
+
+#include <linux/types.h>
+#ifdef __KERNEL__
+#include <linux/socket.h>
+#include <linux/in.h>
+#else
+#include <netinet/in.h>
+#endif
+
+#define IPPROTO_L2TP 115
+
+/**
+ * struct sockaddr_l2tpip - the sockaddr structure for L2TP-over-IP sockets
+ * @l2tp_family: address family number AF_L2TPIP.
+ * @l2tp_addr: protocol specific address information
+ * @l2tp_conn_id: connection id of tunnel
+ */
+#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
+struct sockaddr_l2tpip {
+ /* The first fields must match struct sockaddr_in */
+ sa_family_t l2tp_family; /* AF_INET */
+ __be16 l2tp_unused; /* INET port number (unused) */
+ struct in_addr l2tp_addr; /* Internet address */
+
+ __u32 l2tp_conn_id; /* Connection ID of tunnel */
+
+ /* Pad to size of `struct sockaddr'. */
+ unsigned char __pad[sizeof(struct sockaddr) - sizeof(sa_family_t) -
+ sizeof(__be16) - sizeof(struct in_addr) -
+ sizeof(__u32)];
+};
+
+/*****************************************************************************
+ * NETLINK_GENERIC netlink family.
+ *****************************************************************************/
+
+/*
+ * Commands.
+ * Valid TLVs of each command are:-
+ * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum, vlanid
+ * TUNNEL_DELETE - CONN_ID
+ * TUNNEL_MODIFY - CONN_ID, udpcsum
+ * TUNNEL_GETSTATS - CONN_ID, (stats)
+ * TUNNEL_GET - CONN_ID, (...)
+ * SESSION_CREATE - SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec
+ * SESSION_DELETE - SESSION_ID
+ * SESSION_MODIFY - SESSION_ID, data_seq
+ * SESSION_GET - SESSION_ID, (...)
+ * SESSION_GETSTATS - SESSION_ID, (stats)
+ *
+ */
+enum {
+ L2TP_CMD_NOOP,
+ L2TP_CMD_TUNNEL_CREATE,
+ L2TP_CMD_TUNNEL_DELETE,
+ L2TP_CMD_TUNNEL_MODIFY,
+ L2TP_CMD_TUNNEL_GET,
+ L2TP_CMD_SESSION_CREATE,
+ L2TP_CMD_SESSION_DELETE,
+ L2TP_CMD_SESSION_MODIFY,
+ L2TP_CMD_SESSION_GET,
+ __L2TP_CMD_MAX,
+};
+
+#define L2TP_CMD_MAX (__L2TP_CMD_MAX - 1)
+
+/*
+ * ATTR types defined for L2TP
+ */
+enum {
+ L2TP_ATTR_NONE, /* no data */
+ L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
+ L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
+ L2TP_ATTR_OFFSET, /* u16 */
+ L2TP_ATTR_DATA_SEQ, /* u16 */
+ L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
+ L2TP_ATTR_L2SPEC_LEN, /* u8, enum l2tp_l2spec_type */
+ L2TP_ATTR_PROTO_VERSION, /* u8 */
+ L2TP_ATTR_IFNAME, /* string */
+ L2TP_ATTR_CONN_ID, /* u32 */
+ L2TP_ATTR_PEER_CONN_ID, /* u32 */
+ L2TP_ATTR_SESSION_ID, /* u32 */
+ L2TP_ATTR_PEER_SESSION_ID, /* u32 */
+ L2TP_ATTR_UDP_CSUM, /* u8 */
+ L2TP_ATTR_VLAN_ID, /* u16 */
+ L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
+ L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
+ L2TP_ATTR_DEBUG, /* u32 */
+ L2TP_ATTR_RECV_SEQ, /* u8 */
+ L2TP_ATTR_SEND_SEQ, /* u8 */
+ L2TP_ATTR_LNS_MODE, /* u8 */
+ L2TP_ATTR_USING_IPSEC, /* u8 */
+ L2TP_ATTR_RECV_TIMEOUT, /* msec */
+ L2TP_ATTR_FD, /* int */
+ L2TP_ATTR_IP_SADDR, /* u32 */
+ L2TP_ATTR_IP_DADDR, /* u32 */
+ L2TP_ATTR_UDP_SPORT, /* u16 */
+ L2TP_ATTR_UDP_DPORT, /* u16 */
+ L2TP_ATTR_MTU, /* u16 */
+ L2TP_ATTR_MRU, /* u16 */
+ L2TP_ATTR_STATS, /* nested */
+ __L2TP_ATTR_MAX,
+};
+
+#define L2TP_ATTR_MAX (__L2TP_ATTR_MAX - 1)
+
+/* Nested in L2TP_ATTR_STATS */
+enum {
+ L2TP_ATTR_STATS_NONE, /* no data */
+ L2TP_ATTR_TX_PACKETS, /* u64 */
+ L2TP_ATTR_TX_BYTES, /* u64 */
+ L2TP_ATTR_TX_ERRORS, /* u64 */
+ L2TP_ATTR_RX_PACKETS, /* u64 */
+ L2TP_ATTR_RX_BYTES, /* u64 */
+ L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */
+ L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
+ L2TP_ATTR_RX_ERRORS, /* u64 */
+ __L2TP_ATTR_STATS_MAX,
+};
+
+#define L2TP_ATTR_STATS_MAX (__L2TP_ATTR_STATS_MAX - 1)
+
+enum l2tp_pwtype {
+ L2TP_PWTYPE_NONE = 0x0000,
+ L2TP_PWTYPE_ETH_VLAN = 0x0004,
+ L2TP_PWTYPE_ETH = 0x0005,
+ L2TP_PWTYPE_PPP = 0x0007,
+ L2TP_PWTYPE_PPP_AC = 0x0008,
+ L2TP_PWTYPE_IP = 0x000b,
+ __L2TP_PWTYPE_MAX
+};
+
+enum l2tp_l2spec_type {
+ L2TP_L2SPECTYPE_NONE,
+ L2TP_L2SPECTYPE_DEFAULT,
+};
+
+enum l2tp_encap_type {
+ L2TP_ENCAPTYPE_UDP,
+ L2TP_ENCAPTYPE_IP,
+};
+
+enum l2tp_seqmode {
+ L2TP_SEQ_NONE = 0,
+ L2TP_SEQ_IP = 1,
+ L2TP_SEQ_ALL = 2,
+};
+
+/*
+ * NETLINK_GENERIC related info
+ */
+#define L2TP_GENL_NAME "l2tp"
+#define L2TP_GENL_VERSION 0x1
+
+#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b2f2003b92e5..242eb2646101 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -256,12 +256,13 @@ enum {
ATA_TMOUT_INTERNAL_QUICK = 5000,
ATA_TMOUT_MAX_PARK = 30000,
- /* FIXME: GoVault needs 2s but we can't afford that without
- * parallel probing. 800ms is enough for iVDR disk
- * HHD424020F7SV00. Increase to 2secs when parallel probing
- * is in place.
+ /*
+ * GoVault needs 2s and iVDR disk HHD424020F7SV00 800ms. 2s
+ * is too much without parallel probing. Use 2s if parallel
+ * probing is available, 800ms otherwise.
*/
- ATA_TMOUT_FF_WAIT = 800,
+ ATA_TMOUT_FF_WAIT_LONG = 2000,
+ ATA_TMOUT_FF_WAIT = 800,
/* Spec mandates to wait for ">= 2ms" before checking status
* after reset. We wait 150ms, because that was the magic
@@ -1631,7 +1632,6 @@ extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
extern void ata_bmdma_start(struct ata_queued_cmd *qc);
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap);
-extern void ata_bus_reset(struct ata_port *ap);
#ifdef CONFIG_PCI
extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 5126cceb6ae9..7135ebc8428c 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -18,8 +18,8 @@
# define asmregparm
#endif
-#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE)
-#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
/*
* For assembly routines.
@@ -27,8 +27,8 @@
* Note when using these that you must specify the appropriate
* alignment directives yourself
*/
-#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw"
-#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw"
+#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw"
+#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw"
/*
* This is used by architectures to keep arguments on the stack
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index f78f83d7663f..6907251d5200 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -33,7 +33,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_IPC 4
#define LSM_AUDIT_DATA_TASK 5
#define LSM_AUDIT_DATA_KEY 6
-#define LSM_AUDIT_NO_AUDIT 7
+#define LSM_AUDIT_DATA_NONE 7
#define LSM_AUDIT_DATA_KMOD 8
struct task_struct *tsk;
union {
diff --git a/include/linux/meye.h b/include/linux/meye.h
index 12010ace1f04..0dd49954f746 100644
--- a/include/linux/meye.h
+++ b/include/linux/meye.h
@@ -44,17 +44,17 @@ struct meye_params {
};
/* query the extended parameters */
-#define MEYEIOC_G_PARAMS _IOR ('v', BASE_VIDIOCPRIVATE+0, struct meye_params)
+#define MEYEIOC_G_PARAMS _IOR ('v', BASE_VIDIOC_PRIVATE+0, struct meye_params)
/* set the extended parameters */
-#define MEYEIOC_S_PARAMS _IOW ('v', BASE_VIDIOCPRIVATE+1, struct meye_params)
+#define MEYEIOC_S_PARAMS _IOW ('v', BASE_VIDIOC_PRIVATE+1, struct meye_params)
/* queue a buffer for mjpeg capture */
-#define MEYEIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOCPRIVATE+2, int)
+#define MEYEIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOC_PRIVATE+2, int)
/* sync a previously queued mjpeg buffer */
-#define MEYEIOC_SYNC _IOWR('v', BASE_VIDIOCPRIVATE+3, int)
+#define MEYEIOC_SYNC _IOWR('v', BASE_VIDIOC_PRIVATE+3, int)
/* get a still uncompressed snapshot */
-#define MEYEIOC_STILLCAPT _IO ('v', BASE_VIDIOCPRIVATE+4)
+#define MEYEIOC_STILLCAPT _IO ('v', BASE_VIDIOC_PRIVATE+4)
/* get a jpeg compressed snapshot */
-#define MEYEIOC_STILLJCAPT _IOR ('v', BASE_VIDIOCPRIVATE+5, int)
+#define MEYEIOC_STILLJCAPT _IOR ('v', BASE_VIDIOC_PRIVATE+5, int)
/* V4L2 private controls */
#define V4L2_CID_AGC V4L2_CID_PRIVATE_BASE
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 73f92c5feea2..bfd23bef7363 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -132,6 +132,7 @@ enum {
PM8607_ID_LDO9,
PM8607_ID_LDO10,
PM8607_ID_LDO12,
+ PM8607_ID_LDO13,
PM8607_ID_LDO14,
PM8607_ID_RG_MAX,
@@ -309,7 +310,7 @@ struct pm860x_chip {
};
-#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
+#define PM8607_MAX_REGULATOR PM8607_ID_RG_MAX /* 3 Bucks, 13 LDOs */
enum {
GI2C_PORT = 0,
@@ -369,7 +370,7 @@ extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
unsigned char);
extern int pm860x_device_init(struct pm860x_chip *chip,
- struct pm860x_platform_data *pdata);
-extern void pm860x_device_exit(struct pm860x_chip *chip);
+ struct pm860x_platform_data *pdata) __devinit ;
+extern void pm860x_device_exit(struct pm860x_chip *chip) __devexit ;
#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/abx500.h
index 9a881c305a50..390726fcbcb1 100644
--- a/include/linux/mfd/ab3100.h
+++ b/include/linux/mfd/abx500.h
@@ -3,17 +3,37 @@
* License terms: GNU General Public License (GPL) version 2
* AB3100 core access functions
* Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * ABX500 core access functions.
+ * The abx500 interface is used for the Analog Baseband chip
+ * ab3100, ab3550, ab5500 and possibly comming. It is not used for
+ * ab4500 and ab8500 since they are another family of chip.
+ *
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
*/
#include <linux/device.h>
#include <linux/regulator/machine.h>
-#ifndef MFD_AB3100_H
-#define MFD_AB3100_H
+#ifndef MFD_ABX500_H
+#define MFD_ABX500_H
-#define ABUNKNOWN 0
-#define AB3000 1
-#define AB3100 2
+#define AB3100_P1A 0xc0
+#define AB3100_P1B 0xc1
+#define AB3100_P1C 0xc2
+#define AB3100_P1D 0xc3
+#define AB3100_P1E 0xc4
+#define AB3100_P1F 0xc5
+#define AB3100_P1G 0xc6
+#define AB3100_R2A 0xc7
+#define AB3100_R2B 0xc8
+#define AB3550_P1A 0x10
+#define AB5500_1_0 0x20
+#define AB5500_2_0 0x21
+#define AB5500_2_1 0x22
/*
* AB3100, EVENTA1, A2 and A3 event register flags
@@ -89,7 +109,7 @@ struct ab3100 {
char chip_name[32];
u8 chip_id;
struct blocking_notifier_head event_subscribers;
- u32 startup_events;
+ u8 startup_events[3];
bool startup_events_read;
};
@@ -112,18 +132,102 @@ struct ab3100_platform_data {
int external_voltage;
};
-int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval);
-int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval);
-int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
- u8 first_reg, u8 *regvals, u8 numregs);
-int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
- u8 reg, u8 andmask, u8 ormask);
-u8 ab3100_get_chip_type(struct ab3100 *ab3100);
int ab3100_event_register(struct ab3100 *ab3100,
struct notifier_block *nb);
int ab3100_event_unregister(struct ab3100 *ab3100,
struct notifier_block *nb);
-int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
- u32 *fatevent);
+/* AB3550, STR register flags */
+#define AB3550_STR_ONSWA (0x01)
+#define AB3550_STR_ONSWB (0x02)
+#define AB3550_STR_ONSWC (0x04)
+#define AB3550_STR_DCIO (0x08)
+#define AB3550_STR_BOOT_MODE (0x10)
+#define AB3550_STR_SIM_OFF (0x20)
+#define AB3550_STR_BATT_REMOVAL (0x40)
+#define AB3550_STR_VBUS (0x80)
+
+/* Interrupt mask registers */
+#define AB3550_IMR1 0x29
+#define AB3550_IMR2 0x2a
+#define AB3550_IMR3 0x2b
+#define AB3550_IMR4 0x2c
+#define AB3550_IMR5 0x2d
+
+enum ab3550_devid {
+ AB3550_DEVID_ADC,
+ AB3550_DEVID_DAC,
+ AB3550_DEVID_LEDS,
+ AB3550_DEVID_POWER,
+ AB3550_DEVID_REGULATORS,
+ AB3550_DEVID_SIM,
+ AB3550_DEVID_UART,
+ AB3550_DEVID_RTC,
+ AB3550_DEVID_CHARGER,
+ AB3550_DEVID_FUELGAUGE,
+ AB3550_DEVID_VIBRATOR,
+ AB3550_DEVID_CODEC,
+ AB3550_NUM_DEVICES,
+};
+
+/**
+ * struct abx500_init_setting
+ * Initial value of the registers for driver to use during setup.
+ */
+struct abx500_init_settings {
+ u8 bank;
+ u8 reg;
+ u8 setting;
+};
+
+/**
+ * struct ab3550_platform_data
+ * Data supplied to initialize board connections to the AB3550
+ */
+struct ab3550_platform_data {
+ struct {unsigned int base; unsigned int count; } irq;
+ void *dev_data[AB3550_NUM_DEVICES];
+ size_t dev_data_sz[AB3550_NUM_DEVICES];
+ struct abx500_init_settings *init_settings;
+ unsigned int init_settings_sz;
+};
+
+int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value);
+int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value);
+int abx500_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs);
+int abx500_set_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs);
+/**
+ * abx500_mask_and_set_register_inerruptible() - Modifies selected bits of a
+ * target register
+ *
+ * @dev: The AB sub device.
+ * @bank: The i2c bank number.
+ * @bitmask: The bit mask to use.
+ * @bitvalues: The new bit values.
+ *
+ * Updates the value of an AB register:
+ * value -> ((value & ~bitmask) | (bitvalues & bitmask))
+ */
+int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues);
+int abx500_get_chip_id(struct device *dev);
+int abx500_event_registers_startup_state_get(struct device *dev, u8 *event);
+int abx500_startup_irq_enabled(struct device *dev, unsigned int irq);
+
+struct abx500_ops {
+ int (*get_chip_id) (struct device *);
+ int (*get_register) (struct device *, u8, u8, u8 *);
+ int (*set_register) (struct device *, u8, u8, u8);
+ int (*get_register_page) (struct device *, u8, u8, u8 *, u8);
+ int (*set_register_page) (struct device *, u8, u8, u8 *, u8);
+ int (*mask_and_set_register) (struct device *, u8, u8, u8, u8);
+ int (*event_registers_startup_state_get) (struct device *, u8 *);
+ int (*startup_irq_enabled) (struct device *, unsigned int);
+};
+
+int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
#endif
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
new file mode 100644
index 000000000000..0ab61320ffa8
--- /dev/null
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -0,0 +1,126 @@
+/*
+ * DaVinci Voice Codec Core Interface for TI platforms
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
+#define __LINUX_MFD_DAVINIC_VOICECODEC_H_
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+
+#include <mach/edma.h>
+
+/*
+ * Register values.
+ */
+#define DAVINCI_VC_PID 0x00
+#define DAVINCI_VC_CTRL 0x04
+#define DAVINCI_VC_INTEN 0x08
+#define DAVINCI_VC_INTSTATUS 0x0c
+#define DAVINCI_VC_INTCLR 0x10
+#define DAVINCI_VC_EMUL_CTRL 0x14
+#define DAVINCI_VC_RFIFO 0x20
+#define DAVINCI_VC_WFIFO 0x24
+#define DAVINCI_VC_FIFOSTAT 0x28
+#define DAVINCI_VC_TST_CTRL 0x2C
+#define DAVINCI_VC_REG05 0x94
+#define DAVINCI_VC_REG09 0xA4
+#define DAVINCI_VC_REG12 0xB0
+
+/* DAVINCI_VC_CTRL bit fields */
+#define DAVINCI_VC_CTRL_MASK 0x5500
+#define DAVINCI_VC_CTRL_RSTADC BIT(0)
+#define DAVINCI_VC_CTRL_RSTDAC BIT(1)
+#define DAVINCI_VC_CTRL_RD_BITS_8 BIT(4)
+#define DAVINCI_VC_CTRL_RD_UNSIGNED BIT(5)
+#define DAVINCI_VC_CTRL_WD_BITS_8 BIT(6)
+#define DAVINCI_VC_CTRL_WD_UNSIGNED BIT(7)
+#define DAVINCI_VC_CTRL_RFIFOEN BIT(8)
+#define DAVINCI_VC_CTRL_RFIFOCL BIT(9)
+#define DAVINCI_VC_CTRL_RFIFOMD_WORD_1 BIT(10)
+#define DAVINCI_VC_CTRL_WFIFOEN BIT(12)
+#define DAVINCI_VC_CTRL_WFIFOCL BIT(13)
+#define DAVINCI_VC_CTRL_WFIFOMD_WORD_1 BIT(14)
+
+/* DAVINCI_VC_INT bit fields */
+#define DAVINCI_VC_INT_MASK 0x3F
+#define DAVINCI_VC_INT_RDRDY_MASK BIT(0)
+#define DAVINCI_VC_INT_RERROVF_MASK BIT(1)
+#define DAVINCI_VC_INT_RERRUDR_MASK BIT(2)
+#define DAVINCI_VC_INT_WDREQ_MASK BIT(3)
+#define DAVINCI_VC_INT_WERROVF_MASKBIT BIT(4)
+#define DAVINCI_VC_INT_WERRUDR_MASK BIT(5)
+
+/* DAVINCI_VC_REG05 bit fields */
+#define DAVINCI_VC_REG05_PGA_GAIN 0x07
+
+/* DAVINCI_VC_REG09 bit fields */
+#define DAVINCI_VC_REG09_MUTE 0x40
+#define DAVINCI_VC_REG09_DIG_ATTEN 0x3F
+
+/* DAVINCI_VC_REG12 bit fields */
+#define DAVINCI_VC_REG12_POWER_ALL_ON 0xFD
+#define DAVINCI_VC_REG12_POWER_ALL_OFF 0x00
+
+#define DAVINCI_VC_CELLS 2
+
+enum davinci_vc_cells {
+ DAVINCI_VC_VCIF_CELL,
+ DAVINCI_VC_CQ93VC_CELL,
+};
+
+struct davinci_vcif {
+ struct platform_device *pdev;
+ u32 dma_tx_channel;
+ u32 dma_rx_channel;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_rx_addr;
+};
+
+struct cq93vc {
+ struct platform_device *pdev;
+ struct snd_soc_codec *codec;
+ u32 sysclk;
+};
+
+struct davinci_vc;
+
+struct davinci_vc {
+ /* Device data */
+ struct device *dev;
+ struct platform_device *pdev;
+ struct clk *clk;
+
+ /* Memory resources */
+ void __iomem *base;
+ resource_size_t pbase;
+ size_t base_size;
+
+ /* MFD cells */
+ struct mfd_cell cells[DAVINCI_VC_CELLS];
+
+ /* Client devices */
+ struct davinci_vcif davinci_vcif;
+ struct cq93vc cq93vc;
+};
+
+#endif
diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h
new file mode 100644
index 000000000000..e9994c469803
--- /dev/null
+++ b/include/linux/mfd/janz.h
@@ -0,0 +1,54 @@
+/*
+ * Common Definitions for Janz MODULbus devices
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef JANZ_H
+#define JANZ_H
+
+struct janz_platform_data {
+ /* MODULbus Module Number */
+ unsigned int modno;
+};
+
+/* PLX bridge chip onboard registers */
+struct janz_cmodio_onboard_regs {
+ u8 unused1;
+
+ /*
+ * Read access: interrupt status
+ * Write access: interrupt disable
+ */
+ u8 int_disable;
+ u8 unused2;
+
+ /*
+ * Read access: MODULbus number (hex switch)
+ * Write access: interrupt enable
+ */
+ u8 int_enable;
+ u8 unused3;
+
+ /* write-only */
+ u8 reset_assert;
+ u8 unused4;
+
+ /* write-only */
+ u8 reset_deassert;
+ u8 unused5;
+
+ /* read-write access to serial EEPROM */
+ u8 eep;
+ u8 unused6;
+
+ /* write-only access to EEPROM chip select */
+ u8 enid;
+};
+
+#endif /* JANZ_H */
diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h
new file mode 100644
index 000000000000..4bdf19c8eedf
--- /dev/null
+++ b/include/linux/mfd/rdc321x.h
@@ -0,0 +1,26 @@
+#ifndef __RDC321X_MFD_H
+#define __RDC321X_MFD_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/* Offsets to be accessed in the southbridge PCI
+ * device configuration register */
+#define RDC321X_WDT_CTRL 0x44
+#define RDC321X_GPIO_CTRL_REG1 0x48
+#define RDC321X_GPIO_DATA_REG1 0x4c
+#define RDC321X_GPIO_CTRL_REG2 0x84
+#define RDC321X_GPIO_DATA_REG2 0x88
+
+#define RDC321X_MAX_GPIO 58
+
+struct rdc321x_gpio_pdata {
+ struct pci_dev *sb_pdev;
+ unsigned max_gpios;
+};
+
+struct rdc321x_wdt_pdata {
+ struct pci_dev *sb_pdev;
+};
+
+#endif /* __RDC321X_MFD_H */
diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h
new file mode 100644
index 000000000000..e47f770d3068
--- /dev/null
+++ b/include/linux/mfd/tc35892.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ */
+
+#ifndef __LINUX_MFD_TC35892_H
+#define __LINUX_MFD_TC35892_H
+
+#include <linux/device.h>
+
+#define TC35892_RSTCTRL_IRQRST (1 << 4)
+#define TC35892_RSTCTRL_TIMRST (1 << 3)
+#define TC35892_RSTCTRL_ROTRST (1 << 2)
+#define TC35892_RSTCTRL_KBDRST (1 << 1)
+#define TC35892_RSTCTRL_GPIRST (1 << 0)
+
+#define TC35892_IRQST 0x91
+
+#define TC35892_MANFCODE_MAGIC 0x03
+#define TC35892_MANFCODE 0x80
+#define TC35892_VERSION 0x81
+#define TC35892_IOCFG 0xA7
+
+#define TC35892_CLKMODE 0x88
+#define TC35892_CLKCFG 0x89
+#define TC35892_CLKEN 0x8A
+
+#define TC35892_RSTCTRL 0x82
+#define TC35892_EXTRSTN 0x83
+#define TC35892_RSTINTCLR 0x84
+
+#define TC35892_GPIOIS0 0xC9
+#define TC35892_GPIOIS1 0xCA
+#define TC35892_GPIOIS2 0xCB
+#define TC35892_GPIOIBE0 0xCC
+#define TC35892_GPIOIBE1 0xCD
+#define TC35892_GPIOIBE2 0xCE
+#define TC35892_GPIOIEV0 0xCF
+#define TC35892_GPIOIEV1 0xD0
+#define TC35892_GPIOIEV2 0xD1
+#define TC35892_GPIOIE0 0xD2
+#define TC35892_GPIOIE1 0xD3
+#define TC35892_GPIOIE2 0xD4
+#define TC35892_GPIORIS0 0xD6
+#define TC35892_GPIORIS1 0xD7
+#define TC35892_GPIORIS2 0xD8
+#define TC35892_GPIOMIS0 0xD9
+#define TC35892_GPIOMIS1 0xDA
+#define TC35892_GPIOMIS2 0xDB
+#define TC35892_GPIOIC0 0xDC
+#define TC35892_GPIOIC1 0xDD
+#define TC35892_GPIOIC2 0xDE
+
+#define TC35892_GPIODATA0 0xC0
+#define TC35892_GPIOMASK0 0xc1
+#define TC35892_GPIODATA1 0xC2
+#define TC35892_GPIOMASK1 0xc3
+#define TC35892_GPIODATA2 0xC4
+#define TC35892_GPIOMASK2 0xC5
+
+#define TC35892_GPIODIR0 0xC6
+#define TC35892_GPIODIR1 0xC7
+#define TC35892_GPIODIR2 0xC8
+
+#define TC35892_GPIOSYNC0 0xE6
+#define TC35892_GPIOSYNC1 0xE7
+#define TC35892_GPIOSYNC2 0xE8
+
+#define TC35892_GPIOWAKE0 0xE9
+#define TC35892_GPIOWAKE1 0xEA
+#define TC35892_GPIOWAKE2 0xEB
+
+#define TC35892_GPIOODM0 0xE0
+#define TC35892_GPIOODE0 0xE1
+#define TC35892_GPIOODM1 0xE2
+#define TC35892_GPIOODE1 0xE3
+#define TC35892_GPIOODM2 0xE4
+#define TC35892_GPIOODE2 0xE5
+
+#define TC35892_INT_GPIIRQ 0
+#define TC35892_INT_TI0IRQ 1
+#define TC35892_INT_TI1IRQ 2
+#define TC35892_INT_TI2IRQ 3
+#define TC35892_INT_ROTIRQ 5
+#define TC35892_INT_KBDIRQ 6
+#define TC35892_INT_PORIRQ 7
+
+#define TC35892_NR_INTERNAL_IRQS 8
+#define TC35892_INT_GPIO(x) (TC35892_NR_INTERNAL_IRQS + (x))
+
+struct tc35892 {
+ struct mutex lock;
+ struct device *dev;
+ struct i2c_client *i2c;
+
+ int irq_base;
+ int num_gpio;
+ struct tc35892_platform_data *pdata;
+};
+
+extern int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data);
+extern int tc35892_reg_read(struct tc35892 *tc35892, u8 reg);
+extern int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length,
+ u8 *values);
+extern int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values);
+extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val);
+
+/**
+ * struct tc35892_gpio_platform_data - TC35892 GPIO platform data
+ * @gpio_base: first gpio number assigned to TC35892. A maximum of
+ * %TC35892_NR_GPIOS GPIOs will be allocated.
+ */
+struct tc35892_gpio_platform_data {
+ int gpio_base;
+};
+
+/**
+ * struct tc35892_platform_data - TC35892 platform data
+ * @irq_base: base IRQ number. %TC35892_NR_IRQS irqs will be used.
+ * @gpio: GPIO-specific platform data
+ */
+struct tc35892_platform_data {
+ int irq_base;
+ struct tc35892_gpio_platform_data *gpio;
+};
+
+#define TC35892_NR_GPIOS 24
+#define TC35892_NR_IRQS TC35892_INT_GPIO(TC35892_NR_GPIOS)
+
+#endif
diff --git a/include/linux/mfd/tps6507x.h b/include/linux/mfd/tps6507x.h
new file mode 100644
index 000000000000..c923e4864f55
--- /dev/null
+++ b/include/linux/mfd/tps6507x.h
@@ -0,0 +1,169 @@
+/* linux/mfd/tps6507x.h
+ *
+ * Functions to access TPS65070 power management chip.
+ *
+ * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#ifndef __LINUX_MFD_TPS6507X_H
+#define __LINUX_MFD_TPS6507X_H
+
+/*
+ * ----------------------------------------------------------------------------
+ * Registers, all 8 bits
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* Register definitions */
+#define TPS6507X_REG_PPATH1 0X01
+#define TPS6507X_CHG_USB BIT(7)
+#define TPS6507X_CHG_AC BIT(6)
+#define TPS6507X_CHG_USB_PW_ENABLE BIT(5)
+#define TPS6507X_CHG_AC_PW_ENABLE BIT(4)
+#define TPS6507X_CHG_AC_CURRENT BIT(2)
+#define TPS6507X_CHG_USB_CURRENT BIT(0)
+
+#define TPS6507X_REG_INT 0X02
+#define TPS6507X_REG_MASK_AC_USB BIT(7)
+#define TPS6507X_REG_MASK_TSC BIT(6)
+#define TPS6507X_REG_MASK_PB_IN BIT(5)
+#define TPS6507X_REG_TSC_INT BIT(3)
+#define TPS6507X_REG_PB_IN_INT BIT(2)
+#define TPS6507X_REG_AC_USB_APPLIED BIT(1)
+#define TPS6507X_REG_AC_USB_REMOVED BIT(0)
+
+#define TPS6507X_REG_CHGCONFIG0 0X03
+
+#define TPS6507X_REG_CHGCONFIG1 0X04
+#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
+#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
+#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
+#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
+#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
+
+#define TPS6507X_REG_CHGCONFIG2 0X05
+
+#define TPS6507X_REG_CHGCONFIG3 0X06
+
+#define TPS6507X_REG_ADCONFIG 0X07
+#define TPS6507X_ADCONFIG_AD_ENABLE BIT(7)
+#define TPS6507X_ADCONFIG_START_CONVERSION BIT(6)
+#define TPS6507X_ADCONFIG_CONVERSION_DONE BIT(5)
+#define TPS6507X_ADCONFIG_VREF_ENABLE BIT(4)
+#define TPS6507X_ADCONFIG_INPUT_AD_IN1 0
+#define TPS6507X_ADCONFIG_INPUT_AD_IN2 1
+#define TPS6507X_ADCONFIG_INPUT_AD_IN3 2
+#define TPS6507X_ADCONFIG_INPUT_AD_IN4 3
+#define TPS6507X_ADCONFIG_INPUT_TS_PIN 4
+#define TPS6507X_ADCONFIG_INPUT_BAT_CURRENT 5
+#define TPS6507X_ADCONFIG_INPUT_AC_VOLTAGE 6
+#define TPS6507X_ADCONFIG_INPUT_SYS_VOLTAGE 7
+#define TPS6507X_ADCONFIG_INPUT_CHARGER_VOLTAGE 8
+#define TPS6507X_ADCONFIG_INPUT_BAT_VOLTAGE 9
+#define TPS6507X_ADCONFIG_INPUT_THRESHOLD_VOLTAGE 10
+#define TPS6507X_ADCONFIG_INPUT_ISET1_VOLTAGE 11
+#define TPS6507X_ADCONFIG_INPUT_ISET2_VOLTAGE 12
+#define TPS6507X_ADCONFIG_INPUT_REAL_TSC 14
+#define TPS6507X_ADCONFIG_INPUT_TSC 15
+
+#define TPS6507X_REG_TSCMODE 0X08
+#define TPS6507X_TSCMODE_X_POSITION 0
+#define TPS6507X_TSCMODE_Y_POSITION 1
+#define TPS6507X_TSCMODE_PRESSURE 2
+#define TPS6507X_TSCMODE_X_PLATE 3
+#define TPS6507X_TSCMODE_Y_PLATE 4
+#define TPS6507X_TSCMODE_STANDBY 5
+#define TPS6507X_TSCMODE_ADC_INPUT 6
+#define TPS6507X_TSCMODE_DISABLE 7
+
+#define TPS6507X_REG_ADRESULT_1 0X09
+
+#define TPS6507X_REG_ADRESULT_2 0X0A
+#define TPS6507X_REG_ADRESULT_2_MASK (BIT(1) | BIT(0))
+
+#define TPS6507X_REG_PGOOD 0X0B
+
+#define TPS6507X_REG_PGOODMASK 0X0C
+
+#define TPS6507X_REG_CON_CTRL1 0X0D
+#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
+#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
+#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
+#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
+#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
+
+#define TPS6507X_REG_CON_CTRL2 0X0E
+
+#define TPS6507X_REG_CON_CTRL3 0X0F
+
+#define TPS6507X_REG_DEFDCDC1 0X10
+#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7)
+#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F
+
+#define TPS6507X_REG_DEFDCDC2_LOW 0X11
+#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F
+
+#define TPS6507X_REG_DEFDCDC2_HIGH 0X12
+#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F
+
+#define TPS6507X_REG_DEFDCDC3_LOW 0X13
+#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F
+
+#define TPS6507X_REG_DEFDCDC3_HIGH 0X14
+#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F
+
+#define TPS6507X_REG_DEFSLEW 0X15
+
+#define TPS6507X_REG_LDO_CTRL1 0X16
+#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F
+
+#define TPS6507X_REG_DEFLDO2 0X17
+#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F
+
+#define TPS6507X_REG_WLED_CTRL1 0X18
+
+#define TPS6507X_REG_WLED_CTRL2 0X19
+
+/* VDCDC MASK */
+#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F
+
+#define TPS6507X_MAX_REGISTER 0X19
+
+/**
+ * struct tps6507x_board - packages regulator and touchscreen init data
+ * @tps6507x_regulator_data: regulator initialization values
+ *
+ * Board data may be used to initialize regulator and touchscreen.
+ */
+
+struct tps6507x_board {
+ struct regulator_init_data *tps6507x_pmic_init_data;
+ struct touchscreen_init_data *tps6507x_ts_init_data;
+};
+
+/**
+ * struct tps6507x_dev - tps6507x sub-driver chip access routines
+ * @read_dev() - I2C register read function
+ * @write_dev() - I2C register write function
+ *
+ * Device data may be used to access the TPS6507x chip
+ */
+
+struct tps6507x_dev {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ int (*read_dev)(struct tps6507x_dev *tps6507x, char reg, int size,
+ void *dest);
+ int (*write_dev)(struct tps6507x_dev *tps6507x, char reg, int size,
+ void *src);
+
+ /* Client devices */
+ struct tps6507x_pmic *pmic;
+ struct tps6507x_ts *ts;
+};
+
+#endif /* __LINUX_MFD_TPS6507X_H */
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 5915f6e3d9ab..eb5bd4e0e03c 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -256,8 +256,9 @@ struct wm831x {
int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
/* Chip revision based flags */
- unsigned has_gpio_ena:1; /* Has GPIO enable bit */
- unsigned has_cs_sts:1; /* Has current sink status bit */
+ unsigned has_gpio_ena:1; /* Has GPIO enable bit */
+ unsigned has_cs_sts:1; /* Has current sink status bit */
+ unsigned charger_irq_wake:1; /* Are charger IRQs a wake source? */
int num_gpio;
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
index d899dc0223ba..a95141eafce3 100644
--- a/include/linux/mfd/wm8350/audio.h
+++ b/include/linux/mfd/wm8350/audio.h
@@ -492,6 +492,8 @@
*/
#define WM8350_JACK_L_LVL 0x0800
#define WM8350_JACK_R_LVL 0x0400
+#define WM8350_JACK_MICSCD_LVL 0x0200
+#define WM8350_JACK_MICSD_LVL 0x0100
/*
* WM8350 Platform setup
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index b06ff2846748..de79baee4925 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -15,14 +15,38 @@
#ifndef __MFD_WM8994_CORE_H__
#define __MFD_WM8994_CORE_H__
+#include <linux/interrupt.h>
+
struct regulator_dev;
struct regulator_bulk_data;
#define WM8994_NUM_GPIO_REGS 11
-#define WM8994_NUM_LDO_REGS 2
+#define WM8994_NUM_LDO_REGS 2
+#define WM8994_NUM_IRQ_REGS 2
+
+#define WM8994_IRQ_TEMP_SHUT 0
+#define WM8994_IRQ_MIC1_DET 1
+#define WM8994_IRQ_MIC1_SHRT 2
+#define WM8994_IRQ_MIC2_DET 3
+#define WM8994_IRQ_MIC2_SHRT 4
+#define WM8994_IRQ_FLL1_LOCK 5
+#define WM8994_IRQ_FLL2_LOCK 6
+#define WM8994_IRQ_SRC1_LOCK 7
+#define WM8994_IRQ_SRC2_LOCK 8
+#define WM8994_IRQ_AIF1DRC1_SIG_DET 9
+#define WM8994_IRQ_AIF1DRC2_SIG_DET 10
+#define WM8994_IRQ_AIF2DRC_SIG_DET 11
+#define WM8994_IRQ_FIFOS_ERR 12
+#define WM8994_IRQ_WSEQ_DONE 13
+#define WM8994_IRQ_DCS_DONE 14
+#define WM8994_IRQ_TEMP_WARN 15
+
+/* GPIOs in the chip are numbered from 1-11 */
+#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN)
struct wm8994 {
struct mutex io_lock;
+ struct mutex irq_lock;
struct device *dev;
int (*read_dev)(struct wm8994 *wm8994, unsigned short reg,
@@ -33,6 +57,11 @@ struct wm8994 {
void *control_data;
int gpio_base;
+ int irq_base;
+
+ int irq;
+ u16 irq_masks_cur[WM8994_NUM_IRQ_REGS];
+ u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
/* Used over suspend/resume */
u16 ldo_regs[WM8994_NUM_LDO_REGS];
@@ -51,4 +80,26 @@ int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
int count, u16 *buf);
+
+/* Helper to save on boilerplate */
+static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq,
+ irq_handler_t handler, const char *name,
+ void *data)
+{
+ if (!wm8994->irq_base)
+ return -EINVAL;
+ return request_threaded_irq(wm8994->irq_base + irq, NULL, handler,
+ IRQF_TRIGGER_RISING, name,
+ data);
+}
+static inline void wm8994_free_irq(struct wm8994 *wm8994, int irq, void *data)
+{
+ if (!wm8994->irq_base)
+ return;
+ free_irq(wm8994->irq_base + irq, data);
+}
+
+int wm8994_irq_init(struct wm8994 *wm8994);
+void wm8994_irq_exit(struct wm8994 *wm8994);
+
#endif
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 70d6a8687dc5..5c51f367c061 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -70,6 +70,7 @@ struct wm8994_pdata {
struct wm8994_ldo_pdata ldo[WM8994_NUM_LDO];
+ int irq_base; /** Base IRQ number for WM8994, required for IRQs */
int num_drc_cfgs;
struct wm8994_drc_cfg *drc_cfgs;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index e92d1bfdb330..7a7f9c1e679a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -123,8 +123,8 @@ enum {
MLX4_OPCODE_RDMA_READ = 0x10,
MLX4_OPCODE_ATOMIC_CS = 0x11,
MLX4_OPCODE_ATOMIC_FA = 0x12,
- MLX4_OPCODE_ATOMIC_MASK_CS = 0x14,
- MLX4_OPCODE_ATOMIC_MASK_FA = 0x15,
+ MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14,
+ MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15,
MLX4_OPCODE_BIND_MW = 0x18,
MLX4_OPCODE_FMR = 0x19,
MLX4_OPCODE_LOCAL_INVAL = 0x1b,
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9f29d86e5dc9..7abe64326f72 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -285,6 +285,13 @@ struct mlx4_wqe_atomic_seg {
__be64 compare;
};
+struct mlx4_wqe_masked_atomic_seg {
+ __be64 swap_add;
+ __be64 compare;
+ __be64 swap_add_mask;
+ __be64 compare_mask;
+};
+
struct mlx4_wqe_data_seg {
__be32 byte_count;
__be32 lkey;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 462acaf36f3a..fb19bb92b809 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,7 +19,6 @@ struct anon_vma;
struct file_ra_state;
struct user_struct;
struct writeback_control;
-struct rlimit;
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -1449,9 +1448,6 @@ int vmemmap_populate_basepages(struct page *start_page,
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void);
-extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
- size_t size);
-extern void refund_locked_memory(struct mm_struct *mm, size_t size);
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 43eaf5ca5848..3196c84cc630 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -108,6 +108,9 @@ struct mmc_host_ops {
int (*get_cd)(struct mmc_host *host);
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
+
+ /* optional callback for HC quirks */
+ void (*init_card)(struct mmc_host *host, struct mmc_card *card);
};
struct mmc_card;
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 0ebaef577ff5..329a8faa6e37 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -94,6 +94,8 @@
#define SDIO_BUS_WIDTH_1BIT 0x00
#define SDIO_BUS_WIDTH_4BIT 0x02
+#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */
+#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */
#define SDIO_BUS_ASYNC_INT 0x20
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index f58e9d836f32..529e34b661af 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -474,4 +474,37 @@ struct platform_device_id {
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
+#define MDIO_MODULE_PREFIX "mdio:"
+
+#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
+#define MDIO_ID_ARGS(_id) \
+ (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
+ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
+ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
+ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
+ ((_id)>>15) & 1, ((_id)>>14) & 1, ((_id)>>13) & 1, ((_id)>>12) & 1, \
+ ((_id)>>11) & 1, ((_id)>>10) & 1, ((_id)>>9) & 1, ((_id)>>8) & 1, \
+ ((_id)>>7) & 1, ((_id)>>6) & 1, ((_id)>>5) & 1, ((_id)>>4) & 1, \
+ ((_id)>>3) & 1, ((_id)>>2) & 1, ((_id)>>1) & 1, (_id) & 1
+
+/**
+ * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
+ * @phy_id: The result of
+ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
+ * for this PHY type
+ * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
+ * is used to terminate an array of struct mdio_device_id.
+ */
+struct mdio_device_id {
+ __u32 phy_id;
+ __u32 phy_id_mask;
+};
+
+#define ISAPNP_ANY_ID 0xffff
+struct isapnp_device_id {
+ unsigned short card_vendor, card_device;
+ unsigned short vendor, function;
+ kernel_ulong_t driver_data; /* data private to the driver */
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 82a9124f7d75..893549c04265 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -31,20 +31,23 @@ static const char __module_cat(name,__LINE__)[] \
struct kernel_param;
-/* Returns 0, or -errno. arg is in kp->arg. */
-typedef int (*param_set_fn)(const char *val, struct kernel_param *kp);
-/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
-typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp);
+struct kernel_param_ops {
+ /* Returns 0, or -errno. arg is in kp->arg. */
+ int (*set)(const char *val, const struct kernel_param *kp);
+ /* Returns length written or -errno. Buffer is 4k (ie. be short!) */
+ int (*get)(char *buffer, const struct kernel_param *kp);
+ /* Optional function to free kp->arg when module unloaded. */
+ void (*free)(void *arg);
+};
/* Flag bits for kernel_param.flags */
#define KPARAM_ISBOOL 2
struct kernel_param {
const char *name;
+ const struct kernel_param_ops *ops;
u16 perm;
u16 flags;
- param_set_fn set;
- param_get_fn get;
union {
void *arg;
const struct kparam_string *str;
@@ -63,12 +66,67 @@ struct kparam_array
{
unsigned int max;
unsigned int *num;
- param_set_fn set;
- param_get_fn get;
+ const struct kernel_param_ops *ops;
unsigned int elemsize;
void *elem;
};
+/**
+ * module_param - typesafe helper for a module/cmdline parameter
+ * @value: the variable to alter, and exposed parameter name.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
+ *
+ * @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
+ * ".") the kernel commandline parameter. Note that - is changed to _, so
+ * the user can use "foo-bar=1" even for variable "foo_bar".
+ *
+ * @perm is 0 if the the variable is not to appear in sysfs, or 0444
+ * for world-readable, 0644 for root-writable, etc. Note that if it
+ * is writable, you may need to use kparam_block_sysfs_write() around
+ * accesses (esp. charp, which can be kfreed when it changes).
+ *
+ * The @type is simply pasted to refer to a param_ops_##type and a
+ * param_check_##type: for convenience many standard types are provided but
+ * you can create your own by defining those variables.
+ *
+ * Standard types are:
+ * byte, short, ushort, int, uint, long, ulong
+ * charp: a character pointer
+ * bool: a bool, values 0/1, y/n, Y/N.
+ * invbool: the above, only sense-reversed (N = true).
+ */
+#define module_param(name, type, perm) \
+ module_param_named(name, name, type, perm)
+
+/**
+ * module_param_named - typesafe helper for a renamed module/cmdline parameter
+ * @name: a valid C identifier which is the parameter name.
+ * @value: the actual lvalue to alter.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
+ *
+ * Usually it's a good idea to have variable names and user-exposed names the
+ * same, but that's harder if the variable must be non-static or is inside a
+ * structure. This allows exposure under a different name.
+ */
+#define module_param_named(name, value, type, perm) \
+ param_check_##type(name, &(value)); \
+ module_param_cb(name, &param_ops_##type, &value, perm); \
+ __MODULE_PARM_TYPE(name, #type)
+
+/**
+ * module_param_cb - general callback for a module/cmdline parameter
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
+#define module_param_cb(name, ops, arg, perm) \
+ __module_param_call(MODULE_PARAM_PREFIX, \
+ name, ops, arg, __same_type(*(arg), bool), perm)
+
/* On alpha, ia64 and ppc64 relocations to global data cannot go into
read-only sections (which is part of respective UNIX ABI on these
platforms). So 'const' makes no sense and even causes compile failures
@@ -80,10 +138,8 @@ struct kparam_array
#endif
/* This is the fundamental function for registering boot/module
- parameters. perm sets the visibility in sysfs: 000 means it's
- not there, read bits mean it's readable, write bits mean it's
- writable. */
-#define __module_param_call(prefix, name, set, get, arg, isbool, perm) \
+ parameters. */
+#define __module_param_call(prefix, name, ops, arg, isbool, perm) \
/* Default value instead of permissions? */ \
static int __param_perm_check_##name __attribute__((unused)) = \
BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \
@@ -92,31 +148,87 @@ struct kparam_array
static struct kernel_param __moduleparam_const __param_##name \
__used \
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
- = { __param_str_##name, perm, isbool ? KPARAM_ISBOOL : 0, \
- set, get, { arg } }
+ = { __param_str_##name, ops, perm, isbool ? KPARAM_ISBOOL : 0, \
+ { arg } }
+
+/* Obsolete - use module_param_cb() */
+#define module_param_call(name, set, get, arg, perm) \
+ static struct kernel_param_ops __param_ops_##name = \
+ { (void *)set, (void *)get }; \
+ __module_param_call(MODULE_PARAM_PREFIX, \
+ name, &__param_ops_##name, arg, \
+ __same_type(*(arg), bool), \
+ (perm) + sizeof(__check_old_set_param(set))*0)
+
+/* We don't get oldget: it's often a new-style param_get_uint, etc. */
+static inline int
+__check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
+{
+ return 0;
+}
-#define module_param_call(name, set, get, arg, perm) \
- __module_param_call(MODULE_PARAM_PREFIX, \
- name, set, get, arg, \
- __same_type(*(arg), bool), perm)
+/**
+ * kparam_block_sysfs_write - make sure a parameter isn't written via sysfs.
+ * @name: the name of the parameter
+ *
+ * There's no point blocking write on a paramter that isn't writable via sysfs!
+ */
+#define kparam_block_sysfs_write(name) \
+ do { \
+ BUG_ON(!(__param_##name.perm & 0222)); \
+ __kernel_param_lock(); \
+ } while (0)
-/* Helper functions: type is byte, short, ushort, int, uint, long,
- ulong, charp, bool or invbool, or XXX if you define param_get_XXX,
- param_set_XXX and param_check_XXX. */
-#define module_param_named(name, value, type, perm) \
- param_check_##type(name, &(value)); \
- module_param_call(name, param_set_##type, param_get_##type, &value, perm); \
- __MODULE_PARM_TYPE(name, #type)
+/**
+ * kparam_unblock_sysfs_write - allows sysfs to write to a parameter again.
+ * @name: the name of the parameter
+ */
+#define kparam_unblock_sysfs_write(name) \
+ do { \
+ BUG_ON(!(__param_##name.perm & 0222)); \
+ __kernel_param_unlock(); \
+ } while (0)
-#define module_param(name, type, perm) \
- module_param_named(name, name, type, perm)
+/**
+ * kparam_block_sysfs_read - make sure a parameter isn't read via sysfs.
+ * @name: the name of the parameter
+ *
+ * This also blocks sysfs writes.
+ */
+#define kparam_block_sysfs_read(name) \
+ do { \
+ BUG_ON(!(__param_##name.perm & 0444)); \
+ __kernel_param_lock(); \
+ } while (0)
+
+/**
+ * kparam_unblock_sysfs_read - allows sysfs to read a parameter again.
+ * @name: the name of the parameter
+ */
+#define kparam_unblock_sysfs_read(name) \
+ do { \
+ BUG_ON(!(__param_##name.perm & 0444)); \
+ __kernel_param_unlock(); \
+ } while (0)
+
+#ifdef CONFIG_SYSFS
+extern void __kernel_param_lock(void);
+extern void __kernel_param_unlock(void);
+#else
+static inline void __kernel_param_lock(void)
+{
+}
+static inline void __kernel_param_unlock(void)
+{
+}
+#endif
#ifndef MODULE
/**
* core_param - define a historical core kernel parameter.
* @name: the name of the cmdline and sysfs parameter (often the same as var)
* @var: the variable
- * @type: the type (for param_set_##type and param_get_##type)
+ * @type: the type of the parameter
* @perm: visibility in sysfs
*
* core_param is just like module_param(), but cannot be modular and
@@ -126,23 +238,32 @@ struct kparam_array
*/
#define core_param(name, var, type, perm) \
param_check_##type(name, &(var)); \
- __module_param_call("", name, param_set_##type, param_get_##type, \
+ __module_param_call("", name, &param_ops_##type, \
&var, __same_type(var, bool), perm)
#endif /* !MODULE */
-/* Actually copy string: maxlen param is usually sizeof(string). */
+/**
+ * module_param_string - a char array parameter
+ * @name: the name of the parameter
+ * @string: the string variable
+ * @len: the maximum length of the string, incl. terminator
+ * @perm: visibility in sysfs.
+ *
+ * This actually copies the string when it's set (unlike type charp).
+ * @len is usually just sizeof(string).
+ */
#define module_param_string(name, string, len, perm) \
static const struct kparam_string __param_string_##name \
= { len, string }; \
__module_param_call(MODULE_PARAM_PREFIX, name, \
- param_set_copystring, param_get_string, \
+ &param_ops_string, \
.str = &__param_string_##name, 0, perm); \
__MODULE_PARM_TYPE(name, "string")
/* Called on module insert or kernel boot */
extern int parse_args(const char *name,
char *args,
- struct kernel_param *params,
+ const struct kernel_param *params,
unsigned num,
int (*unknown)(char *param, char *val));
@@ -162,41 +283,50 @@ static inline void destroy_params(const struct kernel_param *params,
#define __param_check(name, p, type) \
static inline type *__check_##name(void) { return(p); }
-extern int param_set_byte(const char *val, struct kernel_param *kp);
-extern int param_get_byte(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_byte;
+extern int param_set_byte(const char *val, const struct kernel_param *kp);
+extern int param_get_byte(char *buffer, const struct kernel_param *kp);
#define param_check_byte(name, p) __param_check(name, p, unsigned char)
-extern int param_set_short(const char *val, struct kernel_param *kp);
-extern int param_get_short(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_short;
+extern int param_set_short(const char *val, const struct kernel_param *kp);
+extern int param_get_short(char *buffer, const struct kernel_param *kp);
#define param_check_short(name, p) __param_check(name, p, short)
-extern int param_set_ushort(const char *val, struct kernel_param *kp);
-extern int param_get_ushort(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_ushort;
+extern int param_set_ushort(const char *val, const struct kernel_param *kp);
+extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
#define param_check_ushort(name, p) __param_check(name, p, unsigned short)
-extern int param_set_int(const char *val, struct kernel_param *kp);
-extern int param_get_int(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_int;
+extern int param_set_int(const char *val, const struct kernel_param *kp);
+extern int param_get_int(char *buffer, const struct kernel_param *kp);
#define param_check_int(name, p) __param_check(name, p, int)
-extern int param_set_uint(const char *val, struct kernel_param *kp);
-extern int param_get_uint(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_uint;
+extern int param_set_uint(const char *val, const struct kernel_param *kp);
+extern int param_get_uint(char *buffer, const struct kernel_param *kp);
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
-extern int param_set_long(const char *val, struct kernel_param *kp);
-extern int param_get_long(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_long;
+extern int param_set_long(const char *val, const struct kernel_param *kp);
+extern int param_get_long(char *buffer, const struct kernel_param *kp);
#define param_check_long(name, p) __param_check(name, p, long)
-extern int param_set_ulong(const char *val, struct kernel_param *kp);
-extern int param_get_ulong(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_ulong;
+extern int param_set_ulong(const char *val, const struct kernel_param *kp);
+extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
#define param_check_ulong(name, p) __param_check(name, p, unsigned long)
-extern int param_set_charp(const char *val, struct kernel_param *kp);
-extern int param_get_charp(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_charp;
+extern int param_set_charp(const char *val, const struct kernel_param *kp);
+extern int param_get_charp(char *buffer, const struct kernel_param *kp);
#define param_check_charp(name, p) __param_check(name, p, char *)
/* For historical reasons "bool" parameters can be (unsigned) "int". */
-extern int param_set_bool(const char *val, struct kernel_param *kp);
-extern int param_get_bool(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_bool;
+extern int param_set_bool(const char *val, const struct kernel_param *kp);
+extern int param_get_bool(char *buffer, const struct kernel_param *kp);
#define param_check_bool(name, p) \
static inline void __check_##name(void) \
{ \
@@ -205,29 +335,53 @@ extern int param_get_bool(char *buffer, struct kernel_param *kp);
!__same_type(*(p), int)); \
}
-extern int param_set_invbool(const char *val, struct kernel_param *kp);
-extern int param_get_invbool(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_invbool;
+extern int param_set_invbool(const char *val, const struct kernel_param *kp);
+extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
#define param_check_invbool(name, p) __param_check(name, p, bool)
-/* Comma-separated array: *nump is set to number they actually specified. */
+/**
+ * module_param_array - a parameter which is an array of some type
+ * @name: the name of the array variable
+ * @type: the type, as per module_param()
+ * @nump: optional pointer filled in with the number written
+ * @perm: visibility in sysfs
+ *
+ * Input and output are as comma-separated values. Commas inside values
+ * don't work properly (eg. an array of charp).
+ *
+ * ARRAY_SIZE(@name) is used to determine the number of elements in the
+ * array, so the definition must be visible.
+ */
+#define module_param_array(name, type, nump, perm) \
+ module_param_array_named(name, name, type, nump, perm)
+
+/**
+ * module_param_array_named - renamed parameter which is an array of some type
+ * @name: a valid C identifier which is the parameter name
+ * @array: the name of the array variable
+ * @type: the type, as per module_param()
+ * @nump: optional pointer filled in with the number written
+ * @perm: visibility in sysfs
+ *
+ * This exposes a different name than the actual variable name. See
+ * module_param_named() for why this might be necessary.
+ */
#define module_param_array_named(name, array, type, nump, perm) \
static const struct kparam_array __param_arr_##name \
- = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
+ = { ARRAY_SIZE(array), nump, &param_ops_##type, \
sizeof(array[0]), array }; \
__module_param_call(MODULE_PARAM_PREFIX, name, \
- param_array_set, param_array_get, \
+ &param_array_ops, \
.arr = &__param_arr_##name, \
__same_type(array[0], bool), perm); \
__MODULE_PARM_TYPE(name, "array of " #type)
-#define module_param_array(name, type, nump, perm) \
- module_param_array_named(name, name, type, nump, perm)
-
-extern int param_array_set(const char *val, struct kernel_param *kp);
-extern int param_array_get(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_array_ops;
-extern int param_set_copystring(const char *val, struct kernel_param *kp);
-extern int param_get_string(char *buffer, struct kernel_param *kp);
+extern struct kernel_param_ops param_ops_string;
+extern int param_set_copystring(const char *val, const struct kernel_param *);
+extern int param_get_string(char *buffer, const struct kernel_param *kp);
/* for exporting parameters in /sys/parameters */
@@ -235,13 +389,13 @@ struct module;
#if defined(CONFIG_SYSFS) && defined(CONFIG_MODULES)
extern int module_param_sysfs_setup(struct module *mod,
- struct kernel_param *kparam,
+ const struct kernel_param *kparam,
unsigned int num_params);
extern void module_param_sysfs_remove(struct module *mod);
#else
static inline int module_param_sysfs_setup(struct module *mod,
- struct kernel_param *kparam,
+ const struct kernel_param *kparam,
unsigned int num_params)
{
return 0;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 4bd05474d11d..907210bd9f9c 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -56,7 +56,11 @@ struct vfsmount {
struct list_head mnt_mounts; /* list of children, anchored here */
struct list_head mnt_child; /* and going through their mnt_child */
int mnt_flags;
- /* 4 bytes hole on 64bits arches */
+ /* 4 bytes hole on 64bits arches without fsnotify */
+#ifdef CONFIG_FSNOTIFY
+ __u32 mnt_fsnotify_mask;
+ struct hlist_head mnt_fsnotify_marks;
+#endif
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index c5f3d53548e2..fa04b246c9ae 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -27,7 +27,8 @@
#define MRT_DEL_MFC (MRT_BASE+5) /* Delete a multicast forwarding entry */
#define MRT_VERSION (MRT_BASE+6) /* Get the kernel multicast version */
#define MRT_ASSERT (MRT_BASE+7) /* Activate PIM assert mode */
-#define MRT_PIM (MRT_BASE+8) /* enable PIM code */
+#define MRT_PIM (MRT_BASE+8) /* enable PIM code */
+#define MRT_TABLE (MRT_BASE+9) /* Specify mroute table ID */
#define SIOCGETVIFCNT SIOCPROTOPRIVATE /* IP protocol privates */
#define SIOCGETSGCNT (SIOCPROTOPRIVATE+1)
@@ -191,10 +192,7 @@ struct vif_device {
#define VIFF_STATIC 0x8000
struct mfc_cache {
- struct mfc_cache *next; /* Next entry on cache line */
-#ifdef CONFIG_NET_NS
- struct net *mfc_net;
-#endif
+ struct list_head list;
__be32 mfc_mcastgrp; /* Group the entry belongs to */
__be32 mfc_origin; /* Source of packet */
vifi_t mfc_parent; /* Source interface */
@@ -217,18 +215,6 @@ struct mfc_cache {
} mfc_un;
};
-static inline
-struct net *mfc_net(const struct mfc_cache *mfc)
-{
- return read_pnet(&mfc->mfc_net);
-}
-
-static inline
-void mfc_net_set(struct mfc_cache *mfc, struct net *net)
-{
- write_pnet(&mfc->mfc_net, hold_net(net));
-}
-
#define MFC_STATIC 1
#define MFC_NOTIFY 2
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
new file mode 100644
index 000000000000..d11fe0f2f956
--- /dev/null
+++ b/include/linux/msm_mdp.h
@@ -0,0 +1,78 @@
+/* include/linux/msm_mdp.h
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_MDP_H_
+#define _MSM_MDP_H_
+
+#include <linux/types.h>
+
+#define MSMFB_IOCTL_MAGIC 'm'
+#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
+#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
+
+enum {
+ MDP_RGB_565, /* RGB 565 planar */
+ MDP_XRGB_8888, /* RGB 888 padded */
+ MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */
+ MDP_ARGB_8888, /* ARGB 888 */
+ MDP_RGB_888, /* RGB 888 planar */
+ MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */
+ MDP_YCRYCB_H2V1, /* YCrYCb interleave */
+ MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
+ MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
+ MDP_RGBA_8888, /* ARGB 888 */
+ MDP_BGRA_8888, /* ABGR 888 */
+ MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */
+};
+
+enum {
+ PMEM_IMG,
+ FB_IMG,
+};
+
+/* flag values */
+#define MDP_ROT_NOP 0
+#define MDP_FLIP_LR 0x1
+#define MDP_FLIP_UD 0x2
+#define MDP_ROT_90 0x4
+#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_DITHER 0x8
+#define MDP_BLUR 0x10
+
+#define MDP_TRANSP_NOP 0xffffffff
+#define MDP_ALPHA_NOP 0xff
+
+struct mdp_rect {
+ u32 x, y, w, h;
+};
+
+struct mdp_img {
+ u32 width, height, format, offset;
+ int memory_id; /* the file descriptor */
+};
+
+struct mdp_blit_req {
+ struct mdp_img src;
+ struct mdp_img dst;
+ struct mdp_rect src_rect;
+ struct mdp_rect dst_rect;
+ u32 alpha, transp_mask, flags;
+};
+
+struct mdp_blit_req_list {
+ u32 count;
+ struct mdp_blit_req req[];
+};
+
+#endif /* _MSM_MDP_H_ */
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 8b4aa0523db7..b481ccd7ff3c 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -9,6 +9,8 @@
#define __MTD_TRANS_H__
#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/sysfs.h>
struct hd_geometry;
struct mtd_info;
@@ -24,11 +26,16 @@ struct mtd_blktrans_dev {
int devnum;
unsigned long size;
int readonly;
- void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */
+ int open;
+ struct kref ref;
+ struct gendisk *disk;
+ struct attribute_group *disk_attributes;
+ struct task_struct *thread;
+ struct request_queue *rq;
+ spinlock_t queue_lock;
+ void *priv;
};
-struct blkcore_priv; /* Differs for 2.4 and 2.5 kernels; private */
-
struct mtd_blktrans_ops {
char *name;
int major;
@@ -60,8 +67,6 @@ struct mtd_blktrans_ops {
struct list_head devs;
struct list_head list;
struct module *owner;
-
- struct mtd_blkcore_priv *blkcore_priv;
};
extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr);
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index df89f4275232..cee05b1e62b1 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -297,7 +297,7 @@ static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
* and 32bit devices on 16 bit busses
* set the low bit of the alternating bit sequence of the address.
*/
- if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa))
+ if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
addr |= (type >> 1)*interleave;
return addr;
@@ -518,11 +518,13 @@ struct cfi_fixup {
#define CFI_MFR_ANY 0xffff
#define CFI_ID_ANY 0xffff
-#define CFI_MFR_AMD 0x0001
-#define CFI_MFR_INTEL 0x0089
-#define CFI_MFR_ATMEL 0x001F
-#define CFI_MFR_SAMSUNG 0x00EC
-#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_AMD 0x0001
+#define CFI_MFR_ATMEL 0x001F
+#define CFI_MFR_INTEL 0x0089
+#define CFI_MFR_MACRONIX 0x00C2
+#define CFI_MFR_SAMSUNG 0x00EC
+#define CFI_MFR_SST 0x00BF
+#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index d0bf422ae374..f43e9b49b751 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -15,6 +15,7 @@
* has asm/spinlock.h, or 2.4, which has linux/spinlock.h
*/
#include <linux/sched.h>
+#include <linux/mutex.h>
typedef enum {
FL_READY,
@@ -74,8 +75,7 @@ struct flchip {
unsigned int erase_suspended:1;
unsigned long in_progress_block_addr;
- spinlock_t *mutex;
- spinlock_t _spinlock; /* We do it like this because sometimes they'll be shared. */
+ struct mutex mutex;
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
to be ready */
int word_write_time;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 0f32a9b6ff55..5326435a7571 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -20,7 +20,6 @@
#define MTD_CHAR_MAJOR 90
#define MTD_BLOCK_MAJOR 31
-#define MAX_MTD_DEVICES 32
#define MTD_ERASE_PENDING 0x01
#define MTD_ERASING 0x02
@@ -61,9 +60,7 @@ struct mtd_erase_region_info {
* MTD_OOB_PLACE: oob data are placed at the given offset
* MTD_OOB_AUTO: oob data are automatically placed at the free areas
* which are defined by the ecclayout
- * MTD_OOB_RAW: mode to read raw data+oob in one chunk. The oob data
- * is inserted into the data. Thats a raw image of the
- * flash contents.
+ * MTD_OOB_RAW: mode to read oob and data without doing ECC checking
*/
typedef enum {
MTD_OOB_PLACE,
@@ -290,8 +287,9 @@ extern int add_mtd_device(struct mtd_info *mtd);
extern int del_mtd_device (struct mtd_info *mtd);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
+extern int __get_mtd_device(struct mtd_info *mtd);
+extern void __put_mtd_device(struct mtd_info *mtd);
extern struct mtd_info *get_mtd_device_nm(const char *name);
-
extern void put_mtd_device(struct mtd_info *mtd);
diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h
index 04fdc07b7353..68891313875d 100644
--- a/include/linux/mtd/mtdram.h
+++ b/include/linux/mtd/mtdram.h
@@ -3,6 +3,6 @@
#include <linux/mtd/mtd.h>
int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
- unsigned long size, char *name);
+ unsigned long size, char *name);
#endif /* __MTD_MTDRAM_H__ */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ccab9dfc5217..8bdacb885f90 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -25,11 +25,13 @@
#include <linux/mtd/bbm.h>
struct mtd_info;
+struct nand_flash_dev;
/* Scan and identify a NAND device */
extern int nand_scan (struct mtd_info *mtd, int max_chips);
/* Separate phases of nand_scan(), allowing board driver to intervene
* and override command or ECC setup according to flash type */
-extern int nand_scan_ident(struct mtd_info *mtd, int max_chips);
+extern int nand_scan_ident(struct mtd_info *mtd, int max_chips,
+ struct nand_flash_dev *table);
extern int nand_scan_tail(struct mtd_info *mtd);
/* Free resources held by the NAND device */
@@ -38,6 +40,12 @@ extern void nand_release (struct mtd_info *mtd);
/* Internal helper for board drivers which need to override command function */
extern void nand_wait_ready(struct mtd_info *mtd);
+/* locks all blockes present in the device */
+extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
+/* unlocks specified locked blockes */
+extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -82,6 +90,10 @@ extern void nand_wait_ready(struct mtd_info *mtd);
#define NAND_CMD_ERASE2 0xd0
#define NAND_CMD_RESET 0xff
+#define NAND_CMD_LOCK 0x2a
+#define NAND_CMD_UNLOCK1 0x23
+#define NAND_CMD_UNLOCK2 0x24
+
/* Extended commands for large page devices */
#define NAND_CMD_READSTART 0x30
#define NAND_CMD_RNDOUTSTART 0xE0
@@ -170,6 +182,12 @@ typedef enum {
/* Chip does not allow subpage writes */
#define NAND_NO_SUBPAGE_WRITE 0x00000200
+/* Device is one of 'new' xD cards that expose fake nand command set */
+#define NAND_BROKEN_XD 0x00000400
+
+/* Device behaves just like nand, but is readonly */
+#define NAND_ROM 0x00000800
+
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS \
(NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
@@ -391,6 +409,7 @@ struct nand_chip {
int subpagesize;
uint8_t cellinfo;
int badblockpos;
+ int badblockbits;
flstate_t state;
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index ab77609ec337..178b5c26c995 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -93,7 +93,10 @@
#define INIT_FL4ECCRESULT_VAL 0x03FF03FF
#define LOOP_TIMEOUT_MAX 0x00010000
-#define mtd_to_flctl(mtd) container_of(mtd, struct sh_flctl, mtd)
+static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
+{
+ return container_of(mtdinfo, struct sh_flctl, mtd);
+}
struct sh_flctl {
struct mtd_info mtd;
diff --git a/include/linux/net.h b/include/linux/net.h
index 4157b5d42bd6..2b4deeeb8646 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -59,6 +59,7 @@ typedef enum {
#include <linux/wait.h>
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
#include <linux/kmemcheck.h>
+#include <linux/rcupdate.h>
struct poll_table_struct;
struct pipe_inode_info;
@@ -116,6 +117,12 @@ enum sock_shutdown_cmd {
SHUT_RDWR = 2,
};
+struct socket_wq {
+ wait_queue_head_t wait;
+ struct fasync_struct *fasync_list;
+ struct rcu_head rcu;
+} ____cacheline_aligned_in_smp;
+
/**
* struct socket - general BSD socket
* @state: socket state (%SS_CONNECTED, etc)
@@ -135,11 +142,8 @@ struct socket {
kmemcheck_bitfield_end(type);
unsigned long flags;
- /*
- * Please keep fasync_list & wait fields in the same cache line
- */
- struct fasync_struct *fasync_list;
- wait_queue_head_t wait;
+
+ struct socket_wq *wq;
struct file *file;
struct sock *sk;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fa8b47637997..0bc13dab671f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -31,6 +31,7 @@
#include <linux/if_link.h>
#ifdef __KERNEL__
+#include <linux/pm_qos_params.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -218,34 +219,6 @@ struct neighbour;
struct neigh_parms;
struct sk_buff;
-struct netif_rx_stats {
- unsigned total;
- unsigned dropped;
- unsigned time_squeeze;
- unsigned cpu_collision;
-};
-
-DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
-
-struct dev_addr_list {
- struct dev_addr_list *next;
- u8 da_addr[MAX_ADDR_LEN];
- u8 da_addrlen;
- u8 da_synced;
- int da_users;
- int da_gusers;
-};
-
-/*
- * We tag multicasts with these structures.
- */
-
-#define dev_mc_list dev_addr_list
-#define dmi_addr da_addr
-#define dmi_addrlen da_addrlen
-#define dmi_users da_users
-#define dmi_gusers da_gusers
-
struct netdev_hw_addr {
struct list_head list;
unsigned char addr[MAX_ADDR_LEN];
@@ -254,8 +227,10 @@ struct netdev_hw_addr {
#define NETDEV_HW_ADDR_T_SAN 2
#define NETDEV_HW_ADDR_T_SLAVE 3
#define NETDEV_HW_ADDR_T_UNICAST 4
+#define NETDEV_HW_ADDR_T_MULTICAST 5
int refcount;
bool synced;
+ bool global_use;
struct rcu_head rcu_head;
};
@@ -264,16 +239,20 @@ struct netdev_hw_addr_list {
int count;
};
-#define netdev_uc_count(dev) ((dev)->uc.count)
-#define netdev_uc_empty(dev) ((dev)->uc.count == 0)
-#define netdev_for_each_uc_addr(ha, dev) \
- list_for_each_entry(ha, &dev->uc.list, list)
+#define netdev_hw_addr_list_count(l) ((l)->count)
+#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
+#define netdev_hw_addr_list_for_each(ha, l) \
+ list_for_each_entry(ha, &(l)->list, list)
-#define netdev_mc_count(dev) ((dev)->mc_count)
-#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
+#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
+#define netdev_for_each_uc_addr(ha, dev) \
+ netdev_hw_addr_list_for_each(ha, &(dev)->uc)
-#define netdev_for_each_mc_addr(mclist, dev) \
- for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
+#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
+#define netdev_for_each_mc_addr(ha, dev) \
+ netdev_hw_addr_list_for_each(ha, &(dev)->mc)
struct hh_cache {
struct hh_cache *hh_next; /* Next entry */
@@ -530,6 +509,85 @@ struct netdev_queue {
unsigned long tx_dropped;
} ____cacheline_aligned_in_smp;
+#ifdef CONFIG_RPS
+/*
+ * This structure holds an RPS map which can be of variable length. The
+ * map is an array of CPUs.
+ */
+struct rps_map {
+ unsigned int len;
+ struct rcu_head rcu;
+ u16 cpus[0];
+};
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
+
+/*
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
+ * tail pointer for that CPU's input queue at the time of last enqueue.
+ */
+struct rps_dev_flow {
+ u16 cpu;
+ u16 fill;
+ unsigned int last_qtail;
+};
+
+/*
+ * The rps_dev_flow_table structure contains a table of flow mappings.
+ */
+struct rps_dev_flow_table {
+ unsigned int mask;
+ struct rcu_head rcu;
+ struct work_struct free_work;
+ struct rps_dev_flow flows[0];
+};
+#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
+ (_num * sizeof(struct rps_dev_flow)))
+
+/*
+ * The rps_sock_flow_table contains mappings of flows to the last CPU
+ * on which they were processed by the application (set in recvmsg).
+ */
+struct rps_sock_flow_table {
+ unsigned int mask;
+ u16 ents[0];
+};
+#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
+ (_num * sizeof(u16)))
+
+#define RPS_NO_CPU 0xffff
+
+static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ u32 hash)
+{
+ if (table && hash) {
+ unsigned int cpu, index = hash & table->mask;
+
+ /* We only give a hint, preemption can change cpu under us */
+ cpu = raw_smp_processor_id();
+
+ if (table->ents[index] != cpu)
+ table->ents[index] = cpu;
+ }
+}
+
+static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
+ u32 hash)
+{
+ if (table && hash)
+ table->ents[hash & table->mask] = RPS_NO_CPU;
+}
+
+extern struct rps_sock_flow_table *rps_sock_flow_table;
+
+/* This structure contains an instance of an RX queue. */
+struct netdev_rx_queue {
+ struct rps_map *rps_map;
+ struct rps_dev_flow_table *rps_flow_table;
+ struct kobject kobj;
+ struct netdev_rx_queue *first;
+ atomic_t count;
+} ____cacheline_aligned_in_smp;
+#endif /* CONFIG_RPS */
/*
* This structure defines the management hooks for network devices.
@@ -667,6 +725,7 @@ struct net_device_ops {
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
+ void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
@@ -711,6 +770,9 @@ struct net_device {
* the interface.
*/
char name[IFNAMSIZ];
+
+ struct pm_qos_request_list *pm_qos_req;
+
/* device name hash chain */
struct hlist_node name_hlist;
/* snmp alias */
@@ -764,6 +826,7 @@ struct net_device {
#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
+#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
/* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 16
@@ -820,7 +883,7 @@ struct net_device {
unsigned char operstate; /* RFC2863 operstate */
unsigned char link_mode; /* mapping policy to operstate */
- unsigned mtu; /* interface MTU value */
+ unsigned int mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */
@@ -840,12 +903,10 @@ struct net_device {
unsigned char addr_len; /* hardware address length */
unsigned short dev_id; /* for shared network cards */
- struct netdev_hw_addr_list uc; /* Secondary unicast
- mac addresses */
- int uc_promisc;
spinlock_t addr_list_lock;
- struct dev_addr_list *mc_list; /* Multicast mac addresses */
- int mc_count; /* Number of installed mcasts */
+ struct netdev_hw_addr_list uc; /* Unicast mac addresses */
+ struct netdev_hw_addr_list mc; /* Multicast mac addresses */
+ int uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
@@ -878,6 +939,15 @@ struct net_device {
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+#ifdef CONFIG_RPS
+ struct kset *queues_kset;
+
+ struct netdev_rx_queue *_rx;
+
+ /* Number of RX queues allocated at alloc_netdev_mq() time */
+ unsigned int num_rx_queues;
+#endif
+
struct netdev_queue rx_queue;
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
@@ -1306,19 +1376,44 @@ static inline int unregister_gifconf(unsigned int family)
}
/*
- * Incoming packets are placed on per-cpu queues so that
- * no locking is needed.
+ * Incoming packets are placed on per-cpu queues
*/
struct softnet_data {
struct Qdisc *output_queue;
- struct sk_buff_head input_pkt_queue;
+ struct Qdisc **output_queue_tailp;
struct list_head poll_list;
struct sk_buff *completion_queue;
-
+ struct sk_buff_head process_queue;
+
+ /* stats */
+ unsigned int processed;
+ unsigned int time_squeeze;
+ unsigned int cpu_collision;
+ unsigned int received_rps;
+
+#ifdef CONFIG_RPS
+ struct softnet_data *rps_ipi_list;
+
+ /* Elements below can be accessed between CPUs for RPS */
+ struct call_single_data csd ____cacheline_aligned_in_smp;
+ struct softnet_data *rps_ipi_next;
+ unsigned int cpu;
+ unsigned int input_queue_head;
+#endif
+ unsigned dropped;
+ struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
};
-DECLARE_PER_CPU(struct softnet_data,softnet_data);
+static inline void input_queue_head_add(struct softnet_data *sd,
+ unsigned int len)
+{
+#ifdef CONFIG_RPS
+ sd->input_queue_head += len;
+#endif
+}
+
+DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
#define HAVE_NETIF_QUEUE
@@ -1945,6 +2040,22 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev);
+/* General hardware address lists handling functions */
+extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type);
+extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type);
+extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len);
+extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len);
+extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
+extern void __hw_addr_init(struct netdev_hw_addr_list *list);
+
/* Functions used for device addresses handling */
extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
unsigned char addr_type);
@@ -1956,26 +2067,34 @@ extern int dev_addr_add_multiple(struct net_device *to_dev,
extern int dev_addr_del_multiple(struct net_device *to_dev,
struct net_device *from_dev,
unsigned char addr_type);
+extern void dev_addr_flush(struct net_device *dev);
+extern int dev_addr_init(struct net_device *dev);
+
+/* Functions used for unicast addresses handling */
+extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
+extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
+extern int dev_uc_sync(struct net_device *to, struct net_device *from);
+extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
+extern void dev_uc_flush(struct net_device *dev);
+extern void dev_uc_init(struct net_device *dev);
+
+/* Functions used for multicast addresses handling */
+extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
+extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
+extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
+extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
+extern int dev_mc_sync(struct net_device *to, struct net_device *from);
+extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
+extern void dev_mc_flush(struct net_device *dev);
+extern void dev_mc_init(struct net_device *dev);
/* Functions used for secondary unicast and multicast support */
extern void dev_set_rx_mode(struct net_device *dev);
extern void __dev_set_rx_mode(struct net_device *dev);
-extern int dev_unicast_delete(struct net_device *dev, void *addr);
-extern int dev_unicast_add(struct net_device *dev, void *addr);
-extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
-extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
-extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
-extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
-extern int dev_mc_sync(struct net_device *to, struct net_device *from);
-extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
-extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
-extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
-extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
extern int dev_set_promiscuity(struct net_device *dev, int inc);
extern int dev_set_allmulti(struct net_device *dev, int inc);
extern void netdev_state_change(struct net_device *dev);
-extern void netdev_bonding_change(struct net_device *dev,
+extern int netdev_bonding_change(struct net_device *dev,
unsigned long event);
extern void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
@@ -2045,54 +2164,14 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
-static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
- struct net_device *master)
-{
- if (skb->pkt_type == PACKET_HOST) {
- u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
+extern int __skb_bond_should_drop(struct sk_buff *skb,
+ struct net_device *master);
- memcpy(dest, master->dev_addr, ETH_ALEN);
- }
-}
-
-/* On bonding slaves other than the currently active slave, suppress
- * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
- * ARP on active-backup slaves with arp_validate enabled.
- */
static inline int skb_bond_should_drop(struct sk_buff *skb,
struct net_device *master)
{
- if (master) {
- struct net_device *dev = skb->dev;
-
- if (master->priv_flags & IFF_MASTER_ARPMON)
- dev->last_rx = jiffies;
-
- if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
- /* Do address unmangle. The local destination address
- * will be always the one master has. Provides the right
- * functionality in a bridge.
- */
- skb_bond_set_mac_by_master(skb, master);
- }
-
- if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
- if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
- skb->protocol == __cpu_to_be16(ETH_P_ARP))
- return 0;
-
- if (master->priv_flags & IFF_MASTER_ALB) {
- if (skb->pkt_type != PACKET_BROADCAST &&
- skb->pkt_type != PACKET_MULTICAST)
- return 0;
- }
- if (master->priv_flags & IFF_MASTER_8023AD &&
- skb->protocol == __cpu_to_be16(ETH_P_SLOW))
- return 0;
-
- return 1;
- }
- }
+ if (master)
+ return __skb_bond_should_drop(skb, master);
return 0;
}
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index a5a63e41b8af..48767cd16453 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -16,6 +16,7 @@ header-y += xt_RATEEST.h
header-y += xt_SECMARK.h
header-y += xt_TCPMSS.h
header-y += xt_TCPOPTSTRIP.h
+header-y += xt_TEE.h
header-y += xt_TPROXY.h
header-y += xt_comment.h
header-y += xt_connbytes.h
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index c608677dda60..14e6d32002c4 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -113,6 +113,7 @@ struct ip_conntrack_stat {
unsigned int expect_new;
unsigned int expect_create;
unsigned int expect_delete;
+ unsigned int search_restart;
};
/* call to create an explicit dependency on nf_conntrack. */
diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h
index 8e145f0d61cb..2ea22b018a87 100644
--- a/include/linux/netfilter/nf_conntrack_tuple_common.h
+++ b/include/linux/netfilter/nf_conntrack_tuple_common.h
@@ -1,8 +1,7 @@
#ifndef _NF_CONNTRACK_TUPLE_COMMON_H
#define _NF_CONNTRACK_TUPLE_COMMON_H
-enum ip_conntrack_dir
-{
+enum ip_conntrack_dir {
IP_CT_DIR_ORIGINAL,
IP_CT_DIR_REPLY,
IP_CT_DIR_MAX
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 84c7c928e9eb..eeb4884c30be 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -1,9 +1,10 @@
#ifndef _X_TABLES_H
#define _X_TABLES_H
-
+#include <linux/kernel.h>
#include <linux/types.h>
#define XT_FUNCTION_MAXNAMELEN 30
+#define XT_EXTENSION_MAXNAMELEN 29
#define XT_TABLE_MAXNAMELEN 32
struct xt_entry_match {
@@ -12,8 +13,7 @@ struct xt_entry_match {
__u16 match_size;
/* Used by userspace */
- char name[XT_FUNCTION_MAXNAMELEN-1];
-
+ char name[XT_EXTENSION_MAXNAMELEN];
__u8 revision;
} user;
struct {
@@ -36,8 +36,7 @@ struct xt_entry_target {
__u16 target_size;
/* Used by userspace */
- char name[XT_FUNCTION_MAXNAMELEN-1];
-
+ char name[XT_EXTENSION_MAXNAMELEN];
__u8 revision;
} user;
struct {
@@ -70,8 +69,7 @@ struct xt_standard_target {
/* The argument to IPT_SO_GET_REVISION_*. Returns highest revision
* kernel supports, if >= revision. */
struct xt_get_revision {
- char name[XT_FUNCTION_MAXNAMELEN-1];
-
+ char name[XT_EXTENSION_MAXNAMELEN];
__u8 revision;
};
@@ -93,7 +91,7 @@ struct _xt_align {
__u64 u64;
};
-#define XT_ALIGN(s) ALIGN((s), __alignof__(struct _xt_align))
+#define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align))
/* Standard return verdict, or do jump. */
#define XT_STANDARD_TARGET ""
@@ -197,6 +195,7 @@ struct xt_counters_info {
* @family: Actual NFPROTO_* through which the function is invoked
* (helpful when match->family == NFPROTO_UNSPEC)
* @hotdrop: drop packet if we had inspection problems
+ * Network namespace obtainable using dev_net(in/out)
*/
struct xt_match_param {
const struct net_device *in, *out;
@@ -213,12 +212,14 @@ struct xt_match_param {
* struct xt_mtchk_param - parameters for match extensions'
* checkentry functions
*
+ * @net: network namespace through which the check was invoked
* @table: table the rule is tried to be inserted into
* @entryinfo: the family-specific rule data
- * (struct ipt_ip, ip6t_ip, ebt_entry)
+ * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
* @match: struct xt_match through which this function was invoked
* @matchinfo: per-match data
* @hook_mask: via which hooks the new rule is reachable
+ * Other fields as above.
*/
struct xt_mtchk_param {
struct net *net;
@@ -230,7 +231,10 @@ struct xt_mtchk_param {
u_int8_t family;
};
-/* Match destructor parameters */
+/**
+ * struct xt_mdtor_param - match destructor parameters
+ * Fields as above.
+ */
struct xt_mtdtor_param {
struct net *net;
const struct xt_match *match;
@@ -285,7 +289,7 @@ struct xt_tgdtor_param {
struct xt_match {
struct list_head list;
- const char name[XT_FUNCTION_MAXNAMELEN-1];
+ const char name[XT_EXTENSION_MAXNAMELEN];
u_int8_t revision;
/* Return true or false: return FALSE and set *hotdrop = 1 to
@@ -297,7 +301,7 @@ struct xt_match {
const struct xt_match_param *);
/* Called when user tries to insert an entry of this type. */
- bool (*checkentry)(const struct xt_mtchk_param *);
+ int (*checkentry)(const struct xt_mtchk_param *);
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_mtdtor_param *);
@@ -309,9 +313,6 @@ struct xt_match {
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
- /* Free to use by each match */
- unsigned long data;
-
const char *table;
unsigned int matchsize;
#ifdef CONFIG_COMPAT
@@ -327,7 +328,8 @@ struct xt_match {
struct xt_target {
struct list_head list;
- const char name[XT_FUNCTION_MAXNAMELEN-1];
+ const char name[XT_EXTENSION_MAXNAMELEN];
+ u_int8_t revision;
/* Returns verdict. Argument order changed since 2.6.9, as this
must now handle non-linear skbs, using skb_copy_bits and
@@ -338,8 +340,8 @@ struct xt_target {
/* Called when user tries to insert an entry of this type:
hook_mask is a bitmask of hooks from which it can be
called. */
- /* Should return true or false. */
- bool (*checkentry)(const struct xt_tgchk_param *);
+ /* Should return true or false, or an error code (-Exxxx). */
+ int (*checkentry)(const struct xt_tgchk_param *);
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_tgdtor_param *);
@@ -360,7 +362,6 @@ struct xt_target {
unsigned short proto;
unsigned short family;
- u_int8_t revision;
};
/* Furniture shopping... */
@@ -398,6 +399,13 @@ struct xt_table_info {
unsigned int hook_entry[NF_INET_NUMHOOKS];
unsigned int underflow[NF_INET_NUMHOOKS];
+ /*
+ * Number of user chains. Since tables cannot have loops, at most
+ * @stacksize jumps (number of user chains) can possibly be made.
+ */
+ unsigned int stacksize;
+ unsigned int *stackptr;
+ void ***jumpstack;
/* ipt_entry tables: one per CPU */
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
void *entries[1];
@@ -433,6 +441,8 @@ extern struct xt_table_info *xt_replace_table(struct xt_table *table,
extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
+extern struct xt_match *xt_request_find_match(u8 af, const char *name,
+ u8 revision);
extern struct xt_target *xt_request_find_target(u8 af, const char *name,
u8 revision);
extern int xt_find_revision(u8 af, const char *name, u8 revision,
@@ -598,7 +608,7 @@ struct _compat_xt_align {
compat_u64 u64;
};
-#define COMPAT_XT_ALIGN(s) ALIGN((s), __alignof__(struct _compat_xt_align))
+#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
extern void xt_compat_lock(u_int8_t af);
extern void xt_compat_unlock(u_int8_t af);
diff --git a/include/linux/netfilter/xt_CONNMARK.h b/include/linux/netfilter/xt_CONNMARK.h
index 0a8545866752..2f2e48ec8023 100644
--- a/include/linux/netfilter/xt_CONNMARK.h
+++ b/include/linux/netfilter/xt_CONNMARK.h
@@ -1,26 +1,6 @@
#ifndef _XT_CONNMARK_H_target
#define _XT_CONNMARK_H_target
-#include <linux/types.h>
-
-/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-enum {
- XT_CONNMARK_SET = 0,
- XT_CONNMARK_SAVE,
- XT_CONNMARK_RESTORE
-};
-
-struct xt_connmark_tginfo1 {
- __u32 ctmark, ctmask, nfmask;
- __u8 mode;
-};
+#include <linux/netfilter/xt_connmark.h>
#endif /*_XT_CONNMARK_H_target*/
diff --git a/include/linux/netfilter/xt_MARK.h b/include/linux/netfilter/xt_MARK.h
index bc9561bdef79..41c456deba22 100644
--- a/include/linux/netfilter/xt_MARK.h
+++ b/include/linux/netfilter/xt_MARK.h
@@ -1,10 +1,6 @@
#ifndef _XT_MARK_H_target
#define _XT_MARK_H_target
-#include <linux/types.h>
-
-struct xt_mark_tginfo2 {
- __u32 mark, mask;
-};
+#include <linux/netfilter/xt_mark.h>
#endif /*_XT_MARK_H_target */
diff --git a/include/linux/netfilter/xt_TEE.h b/include/linux/netfilter/xt_TEE.h
new file mode 100644
index 000000000000..5c21d5c829af
--- /dev/null
+++ b/include/linux/netfilter/xt_TEE.h
@@ -0,0 +1,12 @@
+#ifndef _XT_TEE_TARGET_H
+#define _XT_TEE_TARGET_H
+
+struct xt_tee_tginfo {
+ union nf_inet_addr gw;
+ char oif[16];
+
+ /* used internally by the kernel */
+ struct xt_tee_priv *priv __attribute__((aligned(8)));
+};
+
+#endif /* _XT_TEE_TARGET_H */
diff --git a/include/linux/netfilter/xt_connmark.h b/include/linux/netfilter/xt_connmark.h
index 619e47cde01a..efc17a8305fb 100644
--- a/include/linux/netfilter/xt_connmark.h
+++ b/include/linux/netfilter/xt_connmark.h
@@ -12,6 +12,17 @@
* (at your option) any later version.
*/
+enum {
+ XT_CONNMARK_SET = 0,
+ XT_CONNMARK_SAVE,
+ XT_CONNMARK_RESTORE
+};
+
+struct xt_connmark_tginfo1 {
+ __u32 ctmark, ctmask, nfmask;
+ __u8 mode;
+};
+
struct xt_connmark_mtinfo1 {
__u32 mark, mask;
__u8 invert;
diff --git a/include/linux/netfilter/xt_mark.h b/include/linux/netfilter/xt_mark.h
index 6607c8f38ea5..ecadc40d5cde 100644
--- a/include/linux/netfilter/xt_mark.h
+++ b/include/linux/netfilter/xt_mark.h
@@ -3,6 +3,10 @@
#include <linux/types.h>
+struct xt_mark_tginfo2 {
+ __u32 mark, mask;
+};
+
struct xt_mark_mtinfo1 {
__u32 mark, mask;
__u8 invert;
diff --git a/include/linux/netfilter/xt_recent.h b/include/linux/netfilter/xt_recent.h
index d2c276609925..83318e01425e 100644
--- a/include/linux/netfilter/xt_recent.h
+++ b/include/linux/netfilter/xt_recent.h
@@ -9,6 +9,7 @@ enum {
XT_RECENT_UPDATE = 1 << 2,
XT_RECENT_REMOVE = 1 << 3,
XT_RECENT_TTL = 1 << 4,
+ XT_RECENT_REAP = 1 << 5,
XT_RECENT_SOURCE = 0,
XT_RECENT_DEST = 1,
@@ -16,6 +17,12 @@ enum {
XT_RECENT_NAME_LEN = 200,
};
+/* Only allowed with --rcheck and --update */
+#define XT_RECENT_MODIFIERS (XT_RECENT_TTL|XT_RECENT_REAP)
+
+#define XT_RECENT_VALID_FLAGS (XT_RECENT_CHECK|XT_RECENT_SET|XT_RECENT_UPDATE|\
+ XT_RECENT_REMOVE|XT_RECENT_TTL|XT_RECENT_REAP)
+
struct xt_recent_mtinfo {
__u32 seconds;
__u32 hit_count;
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index f8105e54716a..0ddd161f3b06 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -41,10 +41,10 @@ enum nf_br_hook_priorities {
#define BRNF_PKT_TYPE 0x01
#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_DONT_TAKE_PARENT 0x04
-#define BRNF_BRIDGED 0x08
-#define BRNF_NF_BRIDGE_PREROUTING 0x10
-
+#define BRNF_BRIDGED 0x04
+#define BRNF_NF_BRIDGE_PREROUTING 0x08
+#define BRNF_8021Q 0x10
+#define BRNF_PPPoE 0x20
/* Only used in br_forward.c */
extern int nf_bridge_copy_header(struct sk_buff *skb);
@@ -68,6 +68,27 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
}
}
+static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
+{
+ if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
+ return PPPOE_SES_HLEN;
+ return 0;
+}
+
+extern int br_handle_frame_finish(struct sk_buff *skb);
+/* Only used in br_device.c */
+static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+{
+ struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+
+ skb_pull(skb, ETH_HLEN);
+ nf_bridge->mask ^= BRNF_BRIDGED_DNAT;
+ skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
+ skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
+ skb->dev = nf_bridge->physindev;
+ return br_handle_frame_finish(skb);
+}
+
/* This is called by the IP fragmenting code and it ensures there is
* enough room for the encapsulating header (if there is one). */
static inline unsigned int nf_bridge_pad(const struct sk_buff *skb)
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index e5ba03d783c6..18442ff19c07 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -316,10 +316,6 @@ extern int ip6t_ext_hdr(u8 nexthdr);
extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
int target, unsigned short *fragoff);
-extern int ip6_masked_addrcmp(const struct in6_addr *addr1,
- const struct in6_addr *mask,
- const struct in6_addr *addr2);
-
#define IP6T_ALIGN(s) XT_ALIGN(s)
#ifdef CONFIG_COMPAT
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index a765ea898549..e9e231215865 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -14,6 +14,7 @@
struct netpoll {
struct net_device *dev;
+ struct net_device *real_dev;
char dev_name[IFNAMSIZ];
const char *name;
void (*rx_hook)(struct netpoll *, int, char *, int);
@@ -36,8 +37,11 @@ struct netpoll_info {
struct sk_buff_head txq;
struct delayed_work tx_work;
+
+ struct netpoll *netpoll;
};
+void netpoll_poll_dev(struct net_device *dev);
void netpoll_poll(struct netpoll *np);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
@@ -47,22 +51,23 @@ int netpoll_trap(void);
void netpoll_set_trap(int trap);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
+void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
-static inline int netpoll_rx(struct sk_buff *skb)
+static inline bool netpoll_rx(struct sk_buff *skb)
{
struct netpoll_info *npinfo = skb->dev->npinfo;
unsigned long flags;
- int ret = 0;
+ bool ret = false;
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
- return 0;
+ return false;
spin_lock_irqsave(&npinfo->rx_lock, flags);
/* check rx_flags again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb))
- ret = 1;
+ ret = true;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
return ret;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 07ce4609fe50..77c2ae53431c 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -356,6 +356,20 @@ extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struc
extern u64 nfs_compat_user_ino64(u64 fileid);
extern void nfs_fattr_init(struct nfs_fattr *fattr);
+extern struct nfs_fattr *nfs_alloc_fattr(void);
+
+static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
+{
+ kfree(fattr);
+}
+
+extern struct nfs_fh *nfs_alloc_fhandle(void);
+
+static inline void nfs_free_fhandle(const struct nfs_fh *fh)
+{
+ kfree(fh);
+}
+
/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
extern __be32 root_nfs_parse_addr(char *name); /*__init*/
extern unsigned long nfs_inc_attr_generation_counter(void);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index e82957acea56..d6e10a4c06e5 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -44,7 +44,6 @@ struct nfs_client {
#ifdef CONFIG_NFS_V4
u64 cl_clientid; /* constant */
- nfs4_verifier cl_confirm;
unsigned long cl_state;
struct rb_root cl_openowner_id;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 89b28812ec24..51914d7d6cc4 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -386,8 +386,8 @@ struct nfs_removeargs {
struct nfs_removeres {
const struct nfs_server *server;
+ struct nfs_fattr *dir_attr;
struct nfs4_change_info cinfo;
- struct nfs_fattr dir_attr;
struct nfs4_sequence_res seq_res;
};
@@ -824,6 +824,11 @@ struct nfs4_setclientid {
u32 sc_cb_ident;
};
+struct nfs4_setclientid_res {
+ u64 clientid;
+ nfs4_verifier confirm;
+};
+
struct nfs4_statfs_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index 65e333afaee4..80d55bbc5365 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -40,12 +40,12 @@ struct nfs_fhbase_old {
* This is the new flexible, extensible style NFSv2/v3 file handle.
* by Neil Brown <neilb@cse.unsw.edu.au> - March 2000
*
- * The file handle is seens as a list of 4byte words.
- * The first word contains a version number (1) and four descriptor bytes
+ * The file handle starts with a sequence of four-byte words.
+ * The first word contains a version number (1) and three descriptor bytes
* that tell how the remaining 3 variable length fields should be handled.
* These three bytes are auth_type, fsid_type and fileid_type.
*
- * All 4byte values are in host-byte-order.
+ * All four-byte values are in host-byte-order.
*
* The auth_type field specifies how the filehandle can be authenticated
* This might allow a file to be confirmed to be in a writable part of a
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 640702e97457..8c2c6116e788 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -199,16 +199,15 @@ struct nilfs_super_block {
__le32 s_creator_os; /* OS */
__le16 s_def_resuid; /* Default uid for reserved blocks */
__le16 s_def_resgid; /* Default gid for reserved blocks */
- __le32 s_first_ino; /* First non-reserved inode */
+ __le32 s_first_ino; /* First non-reserved inode */
- __le16 s_inode_size; /* Size of an inode */
+ __le16 s_inode_size; /* Size of an inode */
__le16 s_dat_entry_size; /* Size of a dat entry */
__le16 s_checkpoint_size; /* Size of a checkpoint */
__le16 s_segment_usage_size; /* Size of a segment usage */
__u8 s_uuid[16]; /* 128-bit uuid for volume */
- char s_volume_name[16]; /* volume name */
- char s_last_mounted[64]; /* directory where last mounted */
+ char s_volume_name[80]; /* volume name */
__le32 s_c_interval; /* Commit interval of segment */
__le32 s_c_block_max; /* Threshold of data amount for
@@ -377,6 +376,7 @@ union nilfs_binfo {
* @ss_nfinfo: number of finfo structures
* @ss_sumbytes: total size of segment summary in bytes
* @ss_pad: padding
+ * @ss_cno: checkpoint number
*/
struct nilfs_segment_summary {
__le32 ss_datasum;
@@ -391,6 +391,7 @@ struct nilfs_segment_summary {
__le32 ss_nfinfo;
__le32 ss_sumbytes;
__le32 ss_pad;
+ __le64 ss_cno;
/* array of finfo structures */
};
@@ -437,10 +438,10 @@ struct nilfs_palloc_group_desc {
/**
* struct nilfs_dat_entry - disk address translation entry
- * @dt_blocknr: block number
- * @dt_start: start checkpoint number
- * @dt_end: end checkpoint number
- * @dt_rsv: reserved for future use
+ * @de_blocknr: block number
+ * @de_start: start checkpoint number
+ * @de_end: end checkpoint number
+ * @de_rsv: reserved for future use
*/
struct nilfs_dat_entry {
__le64 de_blocknr;
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 28ba20fda3e2..b7c77f9712f4 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -52,6 +52,8 @@
* %NL80211_ATTR_WIPHY_CHANNEL_TYPE, %NL80211_ATTR_WIPHY_RETRY_SHORT,
* %NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
* and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD.
+ * However, for setting the channel, see %NL80211_CMD_SET_CHANNEL
+ * instead, the support here is for backward compatibility only.
* @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request
* or rename notification. Has attributes %NL80211_ATTR_WIPHY and
* %NL80211_ATTR_WIPHY_NAME.
@@ -323,6 +325,21 @@
* the TX command and %NL80211_ATTR_FRAME includes the contents of the
* frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged
* the frame.
+ * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command
+ * is used to configure connection quality monitoring notification trigger
+ * levels.
+ * @NL80211_CMD_NOTIFY_CQM: Connection quality monitor notification. This
+ * command is used as an event to indicate the that a trigger level was
+ * reached.
+ * @NL80211_CMD_SET_CHANNEL: Set the channel (using %NL80211_ATTR_WIPHY_FREQ
+ * and %NL80211_ATTR_WIPHY_CHANNEL_TYPE) the given interface (identifed
+ * by %NL80211_ATTR_IFINDEX) shall operate on.
+ * In case multiple channels are supported by the device, the mechanism
+ * with which it switches channels is implementation-defined.
+ * When a monitor interface is given, it can only switch channel while
+ * no other interfaces are operating to avoid disturbing the operation
+ * of any other interfaces, and other interfaces will again take
+ * precedence when they are used.
*
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
@@ -419,6 +436,11 @@ enum nl80211_commands {
NL80211_CMD_SET_POWER_SAVE,
NL80211_CMD_GET_POWER_SAVE,
+ NL80211_CMD_SET_CQM,
+ NL80211_CMD_NOTIFY_CQM,
+
+ NL80211_CMD_SET_CHANNEL,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -691,6 +713,18 @@ enum nl80211_commands {
* @NL80211_ATTR_ACK: Flag attribute indicating that the frame was
* acknowledged by the recipient.
*
+ * @NL80211_ATTR_CQM: connection quality monitor configuration in a
+ * nested attribute with %NL80211_ATTR_CQM_* sub-attributes.
+ *
+ * @NL80211_ATTR_LOCAL_STATE_CHANGE: Flag attribute to indicate that a command
+ * is requesting a local authentication/association state change without
+ * invoking actual management frame exchange. This can be used with
+ * NL80211_CMD_AUTHENTICATE, NL80211_CMD_DEAUTHENTICATE,
+ * NL80211_CMD_DISASSOCIATE.
+ *
+ * @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations
+ * connected to this BSS.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -842,6 +876,12 @@ enum nl80211_attrs {
NL80211_ATTR_PS_STATE,
+ NL80211_ATTR_CQM,
+
+ NL80211_ATTR_LOCAL_STATE_CHANGE,
+
+ NL80211_ATTR_AP_ISOLATE,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -1583,4 +1623,40 @@ enum nl80211_ps_state {
NL80211_PS_ENABLED,
};
+/**
+ * enum nl80211_attr_cqm - connection quality monitor attributes
+ * @__NL80211_ATTR_CQM_INVALID: invalid
+ * @NL80211_ATTR_CQM_RSSI_THOLD: RSSI threshold in dBm. This value specifies
+ * the threshold for the RSSI level at which an event will be sent. Zero
+ * to disable.
+ * @NL80211_ATTR_CQM_RSSI_HYST: RSSI hysteresis in dBm. This value specifies
+ * the minimum amount the RSSI level must change after an event before a
+ * new event may be issued (to reduce effects of RSSI oscillation).
+ * @NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT: RSSI threshold event
+ * @__NL80211_ATTR_CQM_AFTER_LAST: internal
+ * @NL80211_ATTR_CQM_MAX: highest key attribute
+ */
+enum nl80211_attr_cqm {
+ __NL80211_ATTR_CQM_INVALID,
+ NL80211_ATTR_CQM_RSSI_THOLD,
+ NL80211_ATTR_CQM_RSSI_HYST,
+ NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+
+ /* keep last */
+ __NL80211_ATTR_CQM_AFTER_LAST,
+ NL80211_ATTR_CQM_MAX = __NL80211_ATTR_CQM_AFTER_LAST - 1
+};
+
+/**
+ * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event
+ * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW - The RSSI level is lower than the
+ * configured threshold
+ * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH - The RSSI is higher than the
+ * configured threshold
+ */
+enum nl80211_cqm_rssi_threshold_event {
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b752e807adde..22cc7960b649 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -20,10 +20,14 @@ extern void touch_nmi_watchdog(void);
extern void acpi_nmi_disable(void);
extern void acpi_nmi_enable(void);
#else
+#ifndef CONFIG_NMI_WATCHDOG
static inline void touch_nmi_watchdog(void)
{
touch_softlockup_watchdog();
}
+#else
+extern void touch_nmi_watchdog(void);
+#endif
static inline void acpi_nmi_disable(void) { }
static inline void acpi_nmi_enable(void) { }
#endif
@@ -47,4 +51,13 @@ static inline bool trigger_all_cpu_backtrace(void)
}
#endif
+#ifdef CONFIG_NMI_WATCHDOG
+int hw_nmi_is_cpu_stuck(struct pt_regs *);
+u64 hw_nmi_get_sample_period(void);
+extern int nmi_watchdog_enabled;
+struct ctl_table;
+extern int proc_nmi_enabled(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+#endif
+
#endif
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index fee6c2f68075..7c3609622334 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -182,7 +182,10 @@ static inline int notifier_to_errno(int ret)
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
*/
-/* netdevice notifier chain */
+/* netdevice notifier chain. Please remember to update the rtnetlink
+ * notification exclusion list in rtnetlink_event() when adding new
+ * types.
+ */
#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
#define NETDEV_DOWN 0x0002
#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
@@ -199,10 +202,11 @@ static inline int notifier_to_errno(int ret)
#define NETDEV_FEAT_CHANGE 0x000B
#define NETDEV_BONDING_FAILOVER 0x000C
#define NETDEV_PRE_UP 0x000D
-#define NETDEV_BONDING_OLDTYPE 0x000E
-#define NETDEV_BONDING_NEWTYPE 0x000F
+#define NETDEV_PRE_TYPE_CHANGE 0x000E
+#define NETDEV_POST_TYPE_CHANGE 0x000F
#define NETDEV_POST_INIT 0x0010
#define NETDEV_UNREGISTER_BATCH 0x0011
+#define NETDEV_BONDING_DESLAVE 0x0012
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index d3a74e00a3e1..e7904a9cd3a3 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
+#ifdef CONFIG_OF_DEVICE
#include <linux/device.h>
#include <linux/of.h>
#include <linux/mod_devicetable.h>
@@ -26,5 +27,6 @@ static inline void of_device_free(struct of_device *dev)
extern ssize_t of_device_get_modalias(struct of_device *ofdev,
char *str, ssize_t len);
+#endif /* CONFIG_OF_DEVICE */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index a1ca92ccb0ff..71e1a916d3fa 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -57,6 +57,7 @@ struct boot_param_header {
__be32 dt_struct_size; /* size of the DT structure block */
};
+#if defined(CONFIG_OF_FLATTREE)
/* TBD: Temporary export of fdt globals - remove when code fully merged */
extern int __initdata dt_root_addr_cells;
extern int __initdata dt_root_size_cells;
@@ -98,6 +99,9 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname,
/* Other Prototypes */
extern void unflatten_device_tree(void);
extern void early_init_devtree(void *);
+#else /* CONFIG_OF_FLATTREE */
+static inline void unflatten_device_tree(void) {}
+#endif /* CONFIG_OF_FLATTREE */
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_OF_FDT_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 908406651330..ac3ae0758fbe 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -11,6 +11,7 @@
*
*/
+#ifdef CONFIG_OF_DEVICE
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
@@ -66,5 +67,6 @@ static inline void of_unregister_platform_driver(struct of_platform_driver *drv)
extern struct of_device *of_find_device_by_node(struct device_node *np);
extern int of_bus_type_init(struct bus_type *bus, const char *name);
+#endif /* CONFIG_OF_DEVICE */
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a788fa12ff31..7cb00845f150 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -311,7 +311,8 @@ struct pci_dev {
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
- unsigned int aer_firmware_first:1;
+ unsigned int __aer_firmware_first_valid:1;
+ unsigned int __aer_firmware_first:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -334,6 +335,16 @@ struct pci_dev {
#endif
};
+static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_IOV
+ if (dev->is_virtfn)
+ dev = dev->physfn;
+#endif
+
+ return dev;
+}
+
extern struct pci_dev *alloc_pci_dev(void);
#define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list)
@@ -621,6 +632,7 @@ void pci_fixup_cardbus(struct pci_bus *);
/* Generic PCI functions used internally */
+void pcibios_scan_specific_bus(int busn);
extern struct pci_bus *pci_find_bus(int domain, int busnr);
void pci_bus_add_devices(const struct pci_bus *bus);
struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 9f688d243b86..c07729f1f0d6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2419,8 +2419,8 @@
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
-#define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42
-#define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43
+#define PCI_DEVICE_ID_INTEL_CPT_LPC_MIN 0x1c41
+#define PCI_DEVICE_ID_INTEL_CPT_LPC_MAX 0x1c5f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
@@ -2532,11 +2532,46 @@
#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18
+#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19
+#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a
+#define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC 0x2ca3
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL 0x2ca8
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b
#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c
+#define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e
#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430
#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index c8f302991b66..455b9ccdfca7 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -442,7 +442,10 @@
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */
#define PCI_EXP_LNKSTA 18 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
+#define PCI_EXP_LNKSTA_CLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
+#define PCI_EXP_LNKSTA_CLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */
+#define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */
#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */
#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */
#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
@@ -563,8 +566,7 @@
#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
-#define PCI_ERR_ROOT_COR_SRC 52
-#define PCI_ERR_ROOT_SRC 54
+#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
/* Virtual Channel */
#define PCI_VC_PORT_REG1 4
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
index d4cf7a2ceb3e..c9e4d814ff77 100644
--- a/include/linux/pda_power.h
+++ b/include/linux/pda_power.h
@@ -24,6 +24,8 @@ struct pda_power_pdata {
int (*is_usb_online)(void);
void (*set_charge)(int flags);
void (*exit)(struct device *dev);
+ int (*suspend)(pm_message_t state);
+ int (*resume)(void);
char **supplied_to;
size_t num_supplicants;
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 68567c0b3a5d..ce2dc655cd1d 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -131,11 +131,11 @@
* Declaration/definition used for per-CPU variables that must be page aligned.
*/
#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
- DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \
+ DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
__aligned(PAGE_SIZE)
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
- DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \
+ DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
__aligned(PAGE_SIZE)
/*
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c8e375440403..3fd5c82e0e18 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -203,8 +203,19 @@ struct perf_event_attr {
enable_on_exec : 1, /* next exec enables */
task : 1, /* trace fork/exit */
watermark : 1, /* wakeup_watermark */
-
- __reserved_1 : 49;
+ /*
+ * precise_ip:
+ *
+ * 0 - SAMPLE_IP can have arbitrary skid
+ * 1 - SAMPLE_IP must have constant skid
+ * 2 - SAMPLE_IP requested to have 0 skid
+ * 3 - SAMPLE_IP must have 0 skid
+ *
+ * See also PERF_RECORD_MISC_EXACT_IP
+ */
+ precise_ip : 2, /* skid constraint */
+
+ __reserved_1 : 47;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -287,11 +298,24 @@ struct perf_event_mmap_page {
__u64 data_tail; /* user-space written tail */
};
-#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
+#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0)
#define PERF_RECORD_MISC_USER (2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
+#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
+#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
+
+/*
+ * Indicates that the content of PERF_SAMPLE_IP points to
+ * the actual instruction that triggered the event. See also
+ * perf_event_attr::precise_ip.
+ */
+#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
+/*
+ * Reserve the last bit to indicate some extended misc field
+ */
+#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
struct perf_event_header {
__u32 type;
@@ -439,6 +463,12 @@ enum perf_callchain_context {
# include <asm/perf_event.h>
#endif
+struct perf_guest_info_callbacks {
+ int (*is_in_guest) (void);
+ int (*is_user_mode) (void);
+ unsigned long (*get_guest_ip) (void);
+};
+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif
@@ -468,6 +498,17 @@ struct perf_raw_record {
void *data;
};
+struct perf_branch_entry {
+ __u64 from;
+ __u64 to;
+ __u64 flags;
+};
+
+struct perf_branch_stack {
+ __u64 nr;
+ struct perf_branch_entry entries[0];
+};
+
struct task_struct;
/**
@@ -506,6 +547,8 @@ struct hw_perf_event {
struct perf_event;
+#define PERF_EVENT_TXN_STARTED 1
+
/**
* struct pmu - generic performance monitoring unit
*/
@@ -516,6 +559,16 @@ struct pmu {
void (*stop) (struct perf_event *event);
void (*read) (struct perf_event *event);
void (*unthrottle) (struct perf_event *event);
+
+ /*
+ * group events scheduling is treated as a transaction,
+ * add group events as a whole and perform one schedulability test.
+ * If test fails, roll back the whole group
+ */
+
+ void (*start_txn) (const struct pmu *pmu);
+ void (*cancel_txn) (const struct pmu *pmu);
+ int (*commit_txn) (const struct pmu *pmu);
};
/**
@@ -571,6 +624,14 @@ enum perf_group_flag {
PERF_GROUP_SOFTWARE = 0x1,
};
+#define SWEVENT_HLIST_BITS 8
+#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
+
+struct swevent_hlist {
+ struct hlist_head heads[SWEVENT_HLIST_SIZE];
+ struct rcu_head rcu_head;
+};
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -579,6 +640,7 @@ struct perf_event {
struct list_head group_entry;
struct list_head event_entry;
struct list_head sibling_list;
+ struct hlist_node hlist_entry;
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
@@ -726,6 +788,9 @@ struct perf_cpu_context {
int active_oncpu;
int max_pertask;
int exclusive;
+ struct swevent_hlist *swevent_hlist;
+ struct mutex hlist_mutex;
+ int hlist_refcount;
/*
* Recursion avoidance:
@@ -769,9 +834,6 @@ extern void perf_disable(void);
extern void perf_enable(void);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
-extern int hw_perf_group_sched_in(struct perf_event *group_leader,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
@@ -902,6 +964,10 @@ static inline void perf_event_mmap(struct vm_area_struct *vma)
__perf_event_mmap(vma);
}
+extern struct perf_guest_info_callbacks *perf_guest_cbs;
+extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
@@ -971,6 +1037,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi,
static inline void
perf_bp_event(struct perf_event *event, void *data) { }
+static inline int perf_register_guest_info_callbacks
+(struct perf_guest_info_callbacks *callbacks) { return 0; }
+static inline int perf_unregister_guest_info_callbacks
+(struct perf_guest_info_callbacks *callbacks) { return 0; }
+
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 14d7fdf6a90a..987e111f7b11 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -24,6 +24,7 @@
#include <linux/mii.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/mod_devicetable.h>
#include <asm/atomic.h>
@@ -81,6 +82,10 @@ typedef enum {
*/
#define MII_BUS_ID_SIZE (20 - 3)
+/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
+ IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
+#define MII_ADDR_C45 (1<<30)
+
/*
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
@@ -127,8 +132,8 @@ int mdiobus_register(struct mii_bus *bus);
void mdiobus_unregister(struct mii_bus *bus);
void mdiobus_free(struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
-int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum);
-int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val);
+int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
#define PHY_INTERRUPT_DISABLED 0x0
@@ -422,7 +427,7 @@ struct phy_fixup {
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-static inline int phy_read(struct phy_device *phydev, u16 regnum)
+static inline int phy_read(struct phy_device *phydev, u32 regnum)
{
return mdiobus_read(phydev->bus, phydev->addr, regnum);
}
@@ -437,7 +442,7 @@ static inline int phy_read(struct phy_device *phydev, u16 regnum)
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
+static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
{
return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
}
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
index d74f75ed1e47..8ba440e5eb7f 100644
--- a/include/linux/pm_qos_params.h
+++ b/include/linux/pm_qos_params.h
@@ -14,12 +14,14 @@
#define PM_QOS_NUM_CLASSES 4
#define PM_QOS_DEFAULT_VALUE -1
-int pm_qos_add_requirement(int qos, char *name, s32 value);
-int pm_qos_update_requirement(int qos, char *name, s32 new_value);
-void pm_qos_remove_requirement(int qos, char *name);
+struct pm_qos_request_list;
-int pm_qos_requirement(int qos);
+struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value);
+void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
+ s32 new_value);
+void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
-int pm_qos_add_notifier(int qos, struct notifier_block *notifier);
-int pm_qos_remove_notifier(int qos, struct notifier_block *notifier);
+int pm_qos_request(int pm_qos_class);
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index b776db737244..6e81888c6222 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -30,6 +30,9 @@ extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
extern void pm_runtime_forbid(struct device *dev);
+extern int pm_generic_runtime_idle(struct device *dev);
+extern int pm_generic_runtime_suspend(struct device *dev);
+extern int pm_generic_runtime_resume(struct device *dev);
static inline bool pm_children_suspended(struct device *dev)
{
@@ -96,6 +99,10 @@ static inline bool device_run_wake(struct device *dev) { return false; }
static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
+static inline int pm_generic_runtime_idle(struct device *dev) { return 0; }
+static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
+static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
+
#endif /* !CONFIG_PM_RUNTIME */
static inline int pm_runtime_get(struct device *dev)
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 0aae7776185e..22d64c18056c 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -25,32 +25,34 @@
# error "please don't include this file directly"
#endif
+#include <linux/types.h>
+
#ifdef CONFIG_PM
/* changes to device_may_wakeup take effect on the next pm state change.
* by default, devices should wakeup if they can.
*/
-static inline void device_init_wakeup(struct device *dev, int val)
+static inline void device_init_wakeup(struct device *dev, bool val)
{
- dev->power.can_wakeup = dev->power.should_wakeup = !!val;
+ dev->power.can_wakeup = dev->power.should_wakeup = val;
}
-static inline void device_set_wakeup_capable(struct device *dev, int val)
+static inline void device_set_wakeup_capable(struct device *dev, bool capable)
{
- dev->power.can_wakeup = !!val;
+ dev->power.can_wakeup = capable;
}
-static inline int device_can_wakeup(struct device *dev)
+static inline bool device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
}
-static inline void device_set_wakeup_enable(struct device *dev, int val)
+static inline void device_set_wakeup_enable(struct device *dev, bool enable)
{
- dev->power.should_wakeup = !!val;
+ dev->power.should_wakeup = enable;
}
-static inline int device_may_wakeup(struct device *dev)
+static inline bool device_may_wakeup(struct device *dev)
{
return dev->power.can_wakeup && dev->power.should_wakeup;
}
@@ -58,20 +60,28 @@ static inline int device_may_wakeup(struct device *dev)
#else /* !CONFIG_PM */
/* For some reason the next two routines work even without CONFIG_PM */
-static inline void device_init_wakeup(struct device *dev, int val)
+static inline void device_init_wakeup(struct device *dev, bool val)
{
- dev->power.can_wakeup = !!val;
+ dev->power.can_wakeup = val;
}
-static inline void device_set_wakeup_capable(struct device *dev, int val) { }
+static inline void device_set_wakeup_capable(struct device *dev, bool capable)
+{
+}
-static inline int device_can_wakeup(struct device *dev)
+static inline bool device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
}
-#define device_set_wakeup_enable(dev, val) do {} while (0)
-#define device_may_wakeup(dev) 0
+static inline void device_set_wakeup_enable(struct device *dev, bool enable)
+{
+}
+
+static inline bool device_may_wakeup(struct device *dev)
+{
+ return false;
+}
#endif /* !CONFIG_PM */
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index 0d3fa63e90ea..bff98ec1bfed 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -72,6 +72,9 @@ extern int ppp_channel_index(struct ppp_channel *);
/* Get the unit number associated with a channel, or -1 if none */
extern int ppp_unit_number(struct ppp_channel *);
+/* Get the device name associated with a channel, or NULL if none */
+extern char *ppp_dev_name(struct ppp_channel *);
+
/*
* SMP locking notes:
* The channel code must ensure that when it calls ppp_unregister_channel,
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index e1fb60729979..4272521e29e9 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -345,18 +345,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
#define arch_ptrace_stop(code, info) do { } while (0)
#endif
-#ifndef arch_ptrace_untrace
-/*
- * Do machine-specific work before untracing child.
- *
- * This is called for a normal detach as well as from ptrace_exit()
- * when the tracing task dies.
- *
- * Called with write_lock(&tasklist_lock) held.
- */
-#define arch_ptrace_untrace(task) do { } while (0)
-#endif
-
extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index b462916b2a0a..7126a15467f1 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -174,6 +174,8 @@ enum {
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
#include <linux/dqblk_xfs.h>
#include <linux/dqblk_v1.h>
@@ -238,19 +240,43 @@ static inline int info_dirty(struct mem_dqinfo *info)
return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
}
+enum {
+ DQST_LOOKUPS,
+ DQST_DROPS,
+ DQST_READS,
+ DQST_WRITES,
+ DQST_CACHE_HITS,
+ DQST_ALLOC_DQUOTS,
+ DQST_FREE_DQUOTS,
+ DQST_SYNCS,
+ _DQST_DQSTAT_LAST
+};
+
struct dqstats {
- int lookups;
- int drops;
- int reads;
- int writes;
- int cache_hits;
- int allocated_dquots;
- int free_dquots;
- int syncs;
+ int stat[_DQST_DQSTAT_LAST];
};
+extern struct dqstats *dqstats_pcpu;
extern struct dqstats dqstats;
+static inline void dqstats_inc(unsigned int type)
+{
+#ifdef CONFIG_SMP
+ per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]++;
+#else
+ dqstats.stat[type]++;
+#endif
+}
+
+static inline void dqstats_dec(unsigned int type)
+{
+#ifdef CONFIG_SMP
+ per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]--;
+#else
+ dqstats.stat[type]--;
+#endif
+}
+
#define DQ_MOD_B 0 /* dquot modified since read */
#define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */
#define DQ_INODES_B 2 /* uid/gid has been warned about inode limit */
@@ -311,12 +337,10 @@ struct quotactl_ops {
int (*quota_sync)(struct super_block *, int, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
- int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
- int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
+ int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
+ int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
int (*set_xstate)(struct super_block *, unsigned int, int);
- int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
- int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
};
struct quota_format_type {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index e6fa7acce290..82c70c42d035 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -63,8 +63,10 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags);
int vfs_quota_sync(struct super_block *sb, int type, int wait);
int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
-int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
-int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
+int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
+ struct fs_disk_quota *di);
+int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
+ struct fs_disk_quota *di);
int dquot_transfer(struct inode *inode, struct iattr *iattr);
int vfs_dq_quota_on_remount(struct super_block *sb);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 5210a5c60877..fe1872e5b37e 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -110,6 +110,7 @@ struct rb_node
struct rb_root
{
struct rb_node *rb_node;
+ void (*augment_cb)(struct rb_node *node);
};
@@ -129,7 +130,9 @@ static inline void rb_set_color(struct rb_node *rb, int color)
rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
}
-#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT (struct rb_root) { NULL, NULL, }
+#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x}
+
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 2c9b46cff3d7..4ec3b38ce9c5 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -428,5 +428,47 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(pos->next))
+/**
+ * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \
+ for (pos = rcu_dereference_bh((head)->first); \
+ pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_bh(pos->next))
+
+/**
+ * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \
+ for (pos = rcu_dereference((pos)->next); \
+ pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference(pos->next))
+
+/**
+ * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \
+ for (pos = rcu_dereference_bh((pos)->next); \
+ pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_bh(pos->next))
+
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index db266bbed23f..23be3a702516 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -56,8 +56,6 @@ struct rcu_head {
};
/* Exported common interfaces */
-extern void synchronize_rcu_bh(void);
-extern void synchronize_sched(void);
extern void rcu_barrier(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
@@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page);
/* Internal to kernel */
extern void rcu_init(void);
-extern int rcu_scheduler_active;
-extern void rcu_scheduler_starting(void);
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h>
@@ -106,12 +102,13 @@ extern int debug_lockdep_rcu_enabled(void);
/**
* rcu_read_lock_held - might we be in RCU read-side critical section?
*
- * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
- * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
+ * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
* this assumes we are in an RCU read-side critical section unless it can
* prove otherwise.
*
- * Check rcu_scheduler_active to prevent false positives during boot.
+ * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
*/
static inline int rcu_read_lock_held(void)
{
@@ -129,13 +126,15 @@ extern int rcu_read_lock_bh_held(void);
/**
* rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
*
- * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
- * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
- * this assumes we are in an RCU-sched read-side critical section unless it
- * can prove otherwise. Note that disabling of preemption (including
- * disabling irqs) counts as an RCU-sched read-side critical section.
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
+ * RCU-sched read-side critical section. In absence of
+ * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
+ * critical section unless it can prove otherwise. Note that disabling
+ * of preemption (including disabling irqs) counts as an RCU-sched
+ * read-side critical section.
*
- * Check rcu_scheduler_active to prevent false positives during boot.
+ * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
*/
#ifdef CONFIG_PREEMPT
static inline int rcu_read_lock_sched_held(void)
@@ -177,7 +176,7 @@ static inline int rcu_read_lock_bh_held(void)
#ifdef CONFIG_PREEMPT
static inline int rcu_read_lock_sched_held(void)
{
- return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled();
+ return preempt_count() != 0 || irqs_disabled();
}
#else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void)
@@ -192,6 +191,15 @@ static inline int rcu_read_lock_sched_held(void)
extern int rcu_my_thread_group_empty(void);
+#define __do_rcu_dereference_check(c) \
+ do { \
+ static bool __warned; \
+ if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
+ __warned = true; \
+ lockdep_rcu_dereference(__FILE__, __LINE__); \
+ } \
+ } while (0)
+
/**
* rcu_dereference_check - rcu_dereference with debug checking
* @p: The pointer to read, prior to dereferencing
@@ -221,8 +229,7 @@ extern int rcu_my_thread_group_empty(void);
*/
#define rcu_dereference_check(p, c) \
({ \
- if (debug_lockdep_rcu_enabled() && !(c)) \
- lockdep_rcu_dereference(__FILE__, __LINE__); \
+ __do_rcu_dereference_check(c); \
rcu_dereference_raw(p); \
})
@@ -239,8 +246,7 @@ extern int rcu_my_thread_group_empty(void);
*/
#define rcu_dereference_protected(p, c) \
({ \
- if (debug_lockdep_rcu_enabled() && !(c)) \
- lockdep_rcu_dereference(__FILE__, __LINE__); \
+ __do_rcu_dereference_check(c); \
(p); \
})
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index a5195875480a..e2e893144a84 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -29,6 +29,10 @@
void rcu_sched_qs(int cpu);
void rcu_bh_qs(int cpu);
+static inline void rcu_note_context_switch(int cpu)
+{
+ rcu_sched_qs(cpu);
+}
#define __rcu_read_lock() preempt_disable()
#define __rcu_read_unlock() preempt_enable()
@@ -60,8 +64,6 @@ static inline long rcu_batches_completed_bh(void)
return 0;
}
-extern int rcu_expedited_torture_stats(char *page);
-
static inline void rcu_force_quiescent_state(void)
{
}
@@ -74,7 +76,17 @@ static inline void rcu_sched_force_quiescent_state(void)
{
}
-#define synchronize_rcu synchronize_sched
+extern void synchronize_sched(void);
+
+static inline void synchronize_rcu(void)
+{
+ synchronize_sched();
+}
+
+static inline void synchronize_rcu_bh(void)
+{
+ synchronize_sched();
+}
static inline void synchronize_rcu_expedited(void)
{
@@ -114,4 +126,17 @@ static inline int rcu_preempt_depth(void)
return 0;
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+extern int rcu_scheduler_active __read_mostly;
+extern void rcu_scheduler_starting(void);
+
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline void rcu_scheduler_starting(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 42cc3a04779e..c0ed1c056f29 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -34,8 +34,8 @@ struct notifier_block;
extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
+extern void rcu_note_context_switch(int cpu);
extern int rcu_needs_cpu(int cpu);
-extern int rcu_expedited_torture_stats(char *page);
#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -86,6 +86,8 @@ static inline void __rcu_read_unlock_bh(void)
extern void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu));
+extern void synchronize_rcu_bh(void);
+extern void synchronize_sched(void);
extern void synchronize_rcu_expedited(void);
static inline void synchronize_rcu_bh_expedited(void)
@@ -120,4 +122,7 @@ static inline int rcu_blocking_is_gp(void)
return num_online_cpus() == 1;
}
+extern void rcu_scheduler_starting(void);
+extern int rcu_scheduler_active __read_mostly;
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 234a8476cba8..e2980287245e 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -157,7 +157,11 @@ struct regulator_consumer_supply {
*
* Initialisation constraints, our supply and consumers supplies.
*
- * @supply_regulator_dev: Parent regulator (if any).
+ * @supply_regulator: Parent regulator. Specified using the regulator name
+ * as it appears in the name field in sysfs, which can
+ * be explicitly set using the constraints field 'name'.
+ * @supply_regulator_dev: Parent regulator (if any) - DEPRECATED in favour
+ * of supply_regulator.
*
* @constraints: Constraints. These must be specified for the regulator to
* be usable.
@@ -168,7 +172,8 @@ struct regulator_consumer_supply {
* @driver_data: Data passed to regulator_init.
*/
struct regulator_init_data {
- struct device *supply_regulator_dev; /* or NULL for LINE */
+ const char *supply_regulator; /* or NULL for system supply */
+ struct device *supply_regulator_dev; /* or NULL for system supply */
struct regulation_constraints constraints;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index c8297761e414..25b4f686d918 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -127,7 +127,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
+void ring_buffer_read_prepare_sync(void);
+void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h
index 8ba646e610d9..e55d82cebf80 100644
--- a/include/linux/rtc-v3020.h
+++ b/include/linux/rtc-v3020.h
@@ -15,7 +15,7 @@
struct v3020_platform_data {
int leftshift; /* (1<<(leftshift)) & readl() */
- int use_gpio:1;
+ unsigned int use_gpio:1;
unsigned int gpio_cs;
unsigned int gpio_wr;
unsigned int gpio_rd;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index d1c7c90e9cd4..5a42c36cb6aa 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -7,6 +7,12 @@
#include <linux/if_addr.h>
#include <linux/neighbour.h>
+/* rtnetlink families. Values up to 127 are reserved for real address
+ * families, values above 128 may be used arbitrarily.
+ */
+#define RTNL_FAMILY_IPMR 128
+#define RTNL_FAMILY_MAX 128
+
/****
* Routing/neighbour discovery messages.
****/
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dad7f668ebf7..2a5b146fbaf9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -99,7 +99,6 @@ struct futex_pi_state;
struct robust_list_head;
struct bio_list;
struct fs_struct;
-struct bts_context;
struct perf_event_context;
/*
@@ -275,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
extern int get_nohz_load_balancer(void);
+extern int nohz_ratelimit(int cpu);
#else
static inline int select_nohz_load_balancer(int cpu)
{
return 0;
}
+
+static inline int nohz_ratelimit(int cpu)
+{
+ return 0;
+}
#endif
/*
@@ -954,6 +959,7 @@ struct sched_domain {
char *name;
#endif
+ unsigned int span_weight;
/*
* Span of all CPUs in this domain.
*
@@ -1026,12 +1032,17 @@ struct sched_domain;
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
#define WF_FORK 0x02 /* child wakeup after fork */
+#define ENQUEUE_WAKEUP 1
+#define ENQUEUE_WAKING 2
+#define ENQUEUE_HEAD 4
+
+#define DEQUEUE_SLEEP 1
+
struct sched_class {
const struct sched_class *next;
- void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
- bool head);
- void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
+ void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
@@ -1040,7 +1051,8 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
- int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
+ int (*select_task_rq)(struct rq *rq, struct task_struct *p,
+ int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
@@ -1077,36 +1089,8 @@ struct load_weight {
unsigned long weight, inv_weight;
};
-/*
- * CFS stats for a schedulable entity (task, task-group etc)
- *
- * Current field usage histogram:
- *
- * 4 se->block_start
- * 4 se->run_node
- * 4 se->sleep_start
- * 6 se->load.weight
- */
-struct sched_entity {
- struct load_weight load; /* for load-balancing */
- struct rb_node run_node;
- struct list_head group_node;
- unsigned int on_rq;
-
- u64 exec_start;
- u64 sum_exec_runtime;
- u64 vruntime;
- u64 prev_sum_exec_runtime;
-
- u64 last_wakeup;
- u64 avg_overlap;
-
- u64 nr_migrations;
-
- u64 start_runtime;
- u64 avg_wakeup;
-
#ifdef CONFIG_SCHEDSTATS
+struct sched_statistics {
u64 wait_start;
u64 wait_max;
u64 wait_count;
@@ -1138,6 +1122,24 @@ struct sched_entity {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+};
+#endif
+
+struct sched_entity {
+ struct load_weight load; /* for load-balancing */
+ struct rb_node run_node;
+ struct list_head group_node;
+ unsigned int on_rq;
+
+ u64 exec_start;
+ u64 sum_exec_runtime;
+ u64 vruntime;
+ u64 prev_sum_exec_runtime;
+
+ u64 nr_migrations;
+
+#ifdef CONFIG_SCHEDSTATS
+ struct sched_statistics statistics;
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1272,12 +1274,6 @@ struct task_struct {
struct list_head ptraced;
struct list_head ptrace_entry;
- /*
- * This is the tracer handle for the ptrace BTS extension.
- * This field actually belongs to the ptracer task.
- */
- struct bts_context *bts;
-
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
@@ -1847,6 +1843,7 @@ extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#ifdef CONFIG_HOTPLUG_CPU
+extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
@@ -2123,10 +2120,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
-extern void wait_task_context_switch(struct task_struct *p);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
-static inline void wait_task_context_switch(struct task_struct *p) {}
static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state)
{
diff --git a/include/linux/security.h b/include/linux/security.h
index 3158dd982d27..24fc29540aa3 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -23,6 +23,7 @@
#define __LINUX_SECURITY_H
#include <linux/fs.h>
+#include <linux/fsnotify.h>
#include <linux/binfmts.h>
#include <linux/signal.h>
#include <linux/resource.h>
@@ -267,49 +268,16 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @orig the original mount data copied from userspace.
* @copy copied data which will be passed to the security module.
* Returns 0 if the copy was successful.
- * @sb_check_sb:
- * Check permission before the device with superblock @mnt->sb is mounted
- * on the mount point named by @nd.
- * @mnt contains the vfsmount for device being mounted.
- * @path contains the path for the mount point.
- * Return 0 if permission is granted.
* @sb_umount:
* Check permission before the @mnt file system is unmounted.
* @mnt contains the mounted file system.
* @flags contains the unmount flags, e.g. MNT_FORCE.
* Return 0 if permission is granted.
- * @sb_umount_close:
- * Close any files in the @mnt mounted filesystem that are held open by
- * the security module. This hook is called during an umount operation
- * prior to checking whether the filesystem is still busy.
- * @mnt contains the mounted filesystem.
- * @sb_umount_busy:
- * Handle a failed umount of the @mnt mounted filesystem, e.g. re-opening
- * any files that were closed by umount_close. This hook is called during
- * an umount operation if the umount fails after a call to the
- * umount_close hook.
- * @mnt contains the mounted filesystem.
- * @sb_post_remount:
- * Update the security module's state when a filesystem is remounted.
- * This hook is only called if the remount was successful.
- * @mnt contains the mounted file system.
- * @flags contains the new filesystem flags.
- * @data contains the filesystem-specific data.
- * @sb_post_addmount:
- * Update the security module's state when a filesystem is mounted.
- * This hook is called any time a mount is successfully grafetd to
- * the tree.
- * @mnt contains the mounted filesystem.
- * @mountpoint contains the path for the mount point.
* @sb_pivotroot:
* Check permission before pivoting the root filesystem.
* @old_path contains the path for the new location of the current root (put_old).
* @new_path contains the path for the new root (new_root).
* Return 0 if permission is granted.
- * @sb_post_pivotroot:
- * Update module state after a successful pivot.
- * @old_path contains the path for the old root.
- * @new_path contains the path for the new root.
* @sb_set_mnt_opts:
* Set the security relevant mount options used for a superblock
* @sb the superblock to set security mount options for
@@ -511,12 +479,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @mnt is the vfsmount where the dentry was looked up
* @dentry contains the dentry structure for the file.
* Return 0 if permission is granted.
- * @inode_delete:
- * @inode contains the inode structure for deleted inode.
- * This hook is called when a deleted inode is released (i.e. an inode
- * with no hard links has its use count drop to zero). A security module
- * can use this hook to release any persistent label associated with the
- * inode.
* @inode_setxattr:
* Check permission before setting the extended attributes
* @value identified by @name for @dentry.
@@ -691,10 +653,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @old points to the original credentials.
* @gfp indicates the atomicity of any memory allocations.
* Prepare a new set of credentials by copying the data from the old set.
- * @cred_commit:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * Install a new set of credentials.
* @cred_transfer:
* @new points to the new credentials.
* @old points to the original credentials.
@@ -717,18 +675,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* userspace to load a kernel module with the given name.
* @kmod_name name of the module requested by the kernel
* Return 0 if successful.
- * @task_setuid:
- * Check permission before setting one or more of the user identity
- * attributes of the current process. The @flags parameter indicates
- * which of the set*uid system calls invoked this hook and how to
- * interpret the @id0, @id1, and @id2 parameters. See the LSM_SETID
- * definitions at the beginning of this file for the @flags values and
- * their meanings.
- * @id0 contains a uid.
- * @id1 contains a uid.
- * @id2 contains a uid.
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 if permission is granted.
* @task_fix_setuid:
* Update the module's state after setting one or more of the user
* identity attributes of the current process. The @flags parameter
@@ -738,18 +684,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @old is the set of credentials that are being replaces
* @flags contains one of the LSM_SETID_* values.
* Return 0 on success.
- * @task_setgid:
- * Check permission before setting one or more of the group identity
- * attributes of the current process. The @flags parameter indicates
- * which of the set*gid system calls invoked this hook and how to
- * interpret the @id0, @id1, and @id2 parameters. See the LSM_SETID
- * definitions at the beginning of this file for the @flags values and
- * their meanings.
- * @id0 contains a gid.
- * @id1 contains a gid.
- * @id2 contains a gid.
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 if permission is granted.
* @task_setpgid:
* Check permission before setting the process group identifier of the
* process @p to @pgid.
@@ -771,11 +705,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @p contains the task_struct for the process and place is into @secid.
* In case of failure, @secid will be set to zero.
*
- * @task_setgroups:
- * Check permission before setting the supplementary group set of the
- * current process.
- * @group_info contains the new group information.
- * Return 0 if permission is granted.
* @task_setnice:
* Check permission before setting the nice value of @p to @nice.
* @p contains the task_struct of process.
@@ -1139,13 +1068,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Return the length of the string (including terminating NUL) or -ve if
* an error.
* May also return 0 (and a NULL buffer pointer) if there is no label.
- * @key_session_to_parent:
- * Forcibly assign the session keyring from a process to its parent
- * process.
- * @cred: Pointer to process's credentials
- * @parent_cred: Pointer to parent process's credentials
- * @keyring: Proposed new session keyring
- * Return 0 if permission is granted, -ve error otherwise.
*
* Security hooks affecting all System V IPC operations.
*
@@ -1333,13 +1255,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @cap contains the capability <include/linux/capability.h>.
* @audit: Whether to write an audit message or not
* Return 0 if the capability is granted for @tsk.
- * @acct:
- * Check permission before enabling or disabling process accounting. If
- * accounting is being enabled, then @file refers to the open file used to
- * store accounting records. If accounting is being disabled, then @file
- * is NULL.
- * @file contains the file structure for the accounting file (may be NULL).
- * Return 0 if permission is granted.
* @sysctl:
* Check permission before accessing the @table sysctl variable in the
* manner specified by @op.
@@ -1462,7 +1377,6 @@ struct security_operations {
const kernel_cap_t *permitted);
int (*capable) (struct task_struct *tsk, const struct cred *cred,
int cap, int audit);
- int (*acct) (struct file *file);
int (*sysctl) (struct ctl_table *table, int op);
int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
int (*quota_on) (struct dentry *dentry);
@@ -1484,18 +1398,9 @@ struct security_operations {
int (*sb_statfs) (struct dentry *dentry);
int (*sb_mount) (char *dev_name, struct path *path,
char *type, unsigned long flags, void *data);
- int (*sb_check_sb) (struct vfsmount *mnt, struct path *path);
int (*sb_umount) (struct vfsmount *mnt, int flags);
- void (*sb_umount_close) (struct vfsmount *mnt);
- void (*sb_umount_busy) (struct vfsmount *mnt);
- void (*sb_post_remount) (struct vfsmount *mnt,
- unsigned long flags, void *data);
- void (*sb_post_addmount) (struct vfsmount *mnt,
- struct path *mountpoint);
int (*sb_pivotroot) (struct path *old_path,
struct path *new_path);
- void (*sb_post_pivotroot) (struct path *old_path,
- struct path *new_path);
int (*sb_set_mnt_opts) (struct super_block *sb,
struct security_mnt_opts *opts);
void (*sb_clone_mnt_opts) (const struct super_block *oldsb,
@@ -1544,7 +1449,6 @@ struct security_operations {
int (*inode_permission) (struct inode *inode, int mask);
int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
- void (*inode_delete) (struct inode *inode);
int (*inode_setxattr) (struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
@@ -1585,20 +1489,16 @@ struct security_operations {
void (*cred_free) (struct cred *cred);
int (*cred_prepare)(struct cred *new, const struct cred *old,
gfp_t gfp);
- void (*cred_commit)(struct cred *new, const struct cred *old);
void (*cred_transfer)(struct cred *new, const struct cred *old);
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
- int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
int (*task_fix_setuid) (struct cred *new, const struct cred *old,
int flags);
- int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags);
int (*task_setpgid) (struct task_struct *p, pid_t pgid);
int (*task_getpgid) (struct task_struct *p);
int (*task_getsid) (struct task_struct *p);
void (*task_getsecid) (struct task_struct *p, u32 *secid);
- int (*task_setgroups) (struct group_info *group_info);
int (*task_setnice) (struct task_struct *p, int nice);
int (*task_setioprio) (struct task_struct *p, int ioprio);
int (*task_getioprio) (struct task_struct *p);
@@ -1728,9 +1628,6 @@ struct security_operations {
const struct cred *cred,
key_perm_t perm);
int (*key_getsecurity)(struct key *key, char **_buffer);
- int (*key_session_to_parent)(const struct cred *cred,
- const struct cred *parent_cred,
- struct key *key);
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
@@ -1761,7 +1658,6 @@ int security_capset(struct cred *new, const struct cred *old,
int security_capable(int cap);
int security_real_capable(struct task_struct *tsk, int cap);
int security_real_capable_noaudit(struct task_struct *tsk, int cap);
-int security_acct(struct file *file);
int security_sysctl(struct ctl_table *table, int op);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
@@ -1783,14 +1679,8 @@ int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(char *dev_name, struct path *path,
char *type, unsigned long flags, void *data);
-int security_sb_check_sb(struct vfsmount *mnt, struct path *path);
int security_sb_umount(struct vfsmount *mnt, int flags);
-void security_sb_umount_close(struct vfsmount *mnt);
-void security_sb_umount_busy(struct vfsmount *mnt);
-void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *data);
-void security_sb_post_addmount(struct vfsmount *mnt, struct path *mountpoint);
int security_sb_pivotroot(struct path *old_path, struct path *new_path);
-void security_sb_post_pivotroot(struct path *old_path, struct path *new_path);
int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts);
void security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb);
@@ -1816,7 +1706,6 @@ int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
int security_inode_permission(struct inode *inode, int mask);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
-void security_inode_delete(struct inode *inode);
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -1850,20 +1739,16 @@ int security_task_create(unsigned long clone_flags);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
-void security_commit_creds(struct cred *new, const struct cred *old);
void security_transfer_creds(struct cred *new, const struct cred *old);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
-int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
-int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
void security_task_getsecid(struct task_struct *p, u32 *secid);
-int security_task_setgroups(struct group_info *group_info);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
@@ -1990,11 +1875,6 @@ int security_real_capable_noaudit(struct task_struct *tsk, int cap)
return ret;
}
-static inline int security_acct(struct file *file)
-{
- return 0;
-}
-
static inline int security_sysctl(struct ctl_table *table, int op)
{
return 0;
@@ -2099,41 +1979,17 @@ static inline int security_sb_mount(char *dev_name, struct path *path,
return 0;
}
-static inline int security_sb_check_sb(struct vfsmount *mnt,
- struct path *path)
-{
- return 0;
-}
-
static inline int security_sb_umount(struct vfsmount *mnt, int flags)
{
return 0;
}
-static inline void security_sb_umount_close(struct vfsmount *mnt)
-{ }
-
-static inline void security_sb_umount_busy(struct vfsmount *mnt)
-{ }
-
-static inline void security_sb_post_remount(struct vfsmount *mnt,
- unsigned long flags, void *data)
-{ }
-
-static inline void security_sb_post_addmount(struct vfsmount *mnt,
- struct path *mountpoint)
-{ }
-
static inline int security_sb_pivotroot(struct path *old_path,
struct path *new_path)
{
return 0;
}
-static inline void security_sb_post_pivotroot(struct path *old_path,
- struct path *new_path)
-{ }
-
static inline int security_sb_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts)
{
@@ -2249,9 +2105,6 @@ static inline int security_inode_getattr(struct vfsmount *mnt,
return 0;
}
-static inline void security_inode_delete(struct inode *inode)
-{ }
-
static inline int security_inode_setxattr(struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
{
@@ -2398,11 +2251,6 @@ static inline int security_prepare_creds(struct cred *new,
return 0;
}
-static inline void security_commit_creds(struct cred *new,
- const struct cred *old)
-{
-}
-
static inline void security_transfer_creds(struct cred *new,
const struct cred *old)
{
@@ -2424,12 +2272,6 @@ static inline int security_kernel_module_request(char *kmod_name)
return 0;
}
-static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
- int flags)
-{
- return 0;
-}
-
static inline int security_task_fix_setuid(struct cred *new,
const struct cred *old,
int flags)
@@ -2437,12 +2279,6 @@ static inline int security_task_fix_setuid(struct cred *new,
return cap_task_fix_setuid(new, old, flags);
}
-static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2,
- int flags)
-{
- return 0;
-}
-
static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -2463,11 +2299,6 @@ static inline void security_task_getsecid(struct task_struct *p, u32 *secid)
*secid = 0;
}
-static inline int security_task_setgroups(struct group_info *group_info)
-{
- return 0;
-}
-
static inline int security_task_setnice(struct task_struct *p, int nice)
{
return cap_task_setnice(p, nice);
@@ -3064,9 +2895,6 @@ void security_key_free(struct key *key);
int security_key_permission(key_ref_t key_ref,
const struct cred *cred, key_perm_t perm);
int security_key_getsecurity(struct key *key, char **_buffer);
-int security_key_session_to_parent(const struct cred *cred,
- const struct cred *parent_cred,
- struct key *key);
#else
@@ -3094,13 +2922,6 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
return 0;
}
-static inline int security_key_session_to_parent(const struct cred *cred,
- const struct cred *parent_cred,
- struct key *key)
-{
- return 0;
-}
-
#endif
#endif /* CONFIG_KEYS */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 78dd1e7120a9..ad839963fa68 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -246,6 +246,7 @@ struct uart_ops {
#endif
};
+#define NO_POLL_CHAR 0x00ff0000
#define UART_CONFIG_TYPE (1 << 0)
#define UART_CONFIG_IRQ (1 << 1)
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 193d4bfe42ff..837efa4e63c2 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -3,7 +3,7 @@
#include <linux/serial_core.h>
#ifdef CONFIG_SERIAL_SH_SCI_DMA
-#include <asm/dmaengine.h>
+#include <linux/sh_dma.h>
#endif
/*
@@ -33,8 +33,8 @@ struct plat_sci_port {
char *clk; /* clock string */
struct device *dma_dev;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- enum sh_dmae_slave_chan_id dma_slave_tx;
- enum sh_dmae_slave_chan_id dma_slave_rx;
+ unsigned int dma_slave_tx;
+ unsigned int dma_slave_rx;
#endif
};
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 64b473066b9a..b5552568178d 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -196,5 +196,6 @@ static inline void serio_continue_rx(struct serio *serio)
#define SERIO_TOUCHIT213 0x38
#define SERIO_W8001 0x39
#define SERIO_DYNAPRO 0x3a
+#define SERIO_HAMPSHIRE 0x3b
#endif
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
new file mode 100644
index 000000000000..b08cd4efa15c
--- /dev/null
+++ b/include/linux/sh_dma.h
@@ -0,0 +1,102 @@
+/*
+ * Header for the new SH dmaengine driver
+ *
+ * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SH_DMA_H
+#define SH_DMA_H
+
+#include <linux/list.h>
+#include <linux/dmaengine.h>
+
+/* Used by slave DMA clients to request DMA to/from a specific peripheral */
+struct sh_dmae_slave {
+ unsigned int slave_id; /* Set by the platform */
+ struct device *dma_dev; /* Set by the platform */
+ const struct sh_dmae_slave_config *config; /* Set by the driver */
+};
+
+struct sh_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct sh_desc {
+ struct sh_dmae_regs hw;
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ enum dma_data_direction direction;
+ dma_cookie_t cookie;
+ size_t partial;
+ int chunks;
+ int mark;
+};
+
+struct sh_dmae_slave_config {
+ unsigned int slave_id;
+ dma_addr_t addr;
+ u32 chcr;
+ char mid_rid;
+};
+
+struct sh_dmae_channel {
+ unsigned int offset;
+ unsigned int dmars;
+ unsigned int dmars_bit;
+};
+
+struct sh_dmae_pdata {
+ const struct sh_dmae_slave_config *slave;
+ int slave_num;
+ const struct sh_dmae_channel *channel;
+ int channel_num;
+ unsigned int ts_low_shift;
+ unsigned int ts_low_mask;
+ unsigned int ts_high_shift;
+ unsigned int ts_high_mask;
+ const unsigned int *ts_shift;
+ int ts_shift_num;
+ u16 dmaor_init;
+};
+
+/* DMA register */
+#define SAR 0x00
+#define DAR 0x04
+#define TCR 0x08
+#define CHCR 0x0C
+#define DMAOR 0x40
+
+/* DMAOR definitions */
+#define DMAOR_AE 0x00000004
+#define DMAOR_NMIF 0x00000002
+#define DMAOR_DME 0x00000001
+
+/* Definitions for the SuperH DMAC */
+#define REQ_L 0x00000000
+#define REQ_E 0x00080000
+#define RACK_H 0x00000000
+#define RACK_L 0x00040000
+#define ACK_R 0x00000000
+#define ACK_W 0x00020000
+#define ACK_H 0x00000000
+#define ACK_L 0x00010000
+#define DM_INC 0x00004000
+#define DM_DEC 0x00008000
+#define DM_FIX 0x0000c000
+#define SM_INC 0x00001000
+#define SM_DEC 0x00002000
+#define SM_FIX 0x00003000
+#define RS_IN 0x00000200
+#define RS_OUT 0x00000300
+#define TS_BLK 0x00000040
+#define TM_BUR 0x00000020
+#define CHCR_DE 0x00000001
+#define CHCR_TE 0x00000002
+#define CHCR_IE 0x00000004
+
+#endif
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 51d288d8ac88..0d6cd38e673d 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -1,6 +1,8 @@
#ifndef __SH_INTC_H
#define __SH_INTC_H
+#include <linux/ioport.h>
+
typedef unsigned char intc_enum;
struct intc_vect {
@@ -21,6 +23,9 @@ struct intc_group {
struct intc_mask_reg {
unsigned long set_reg, clr_reg, reg_width;
intc_enum enum_ids[32];
+#ifdef CONFIG_INTC_BALANCING
+ unsigned long dist_reg;
+#endif
#ifdef CONFIG_SMP
unsigned long smp;
#endif
@@ -39,8 +44,14 @@ struct intc_sense_reg {
intc_enum enum_ids[16];
};
+#ifdef CONFIG_INTC_BALANCING
+#define INTC_SMP_BALANCING(reg) .dist_reg = (reg)
+#else
+#define INTC_SMP_BALANCING(reg)
+#endif
+
#ifdef CONFIG_SMP
-#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8)
+#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8)
#else
#define INTC_SMP(stride, nr)
#endif
@@ -71,6 +82,8 @@ struct intc_hw_desc {
struct intc_desc {
char *name;
+ struct resource *resource;
+ unsigned int num_resources;
intc_enum force_enable;
intc_enum force_disable;
struct intc_hw_desc hw;
@@ -92,9 +105,18 @@ struct intc_desc symbol __initdata = { \
prio_regs, sense_regs, ack_regs), \
}
-void __init register_intc_controller(struct intc_desc *desc);
+int __init register_intc_controller(struct intc_desc *desc);
int intc_set_priority(unsigned int irq, unsigned int prio);
+#ifdef CONFIG_INTC_USERIMASK
+int register_intc_userimask(unsigned long addr);
+#else
+static inline int register_intc_userimask(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
int reserve_irq_vector(unsigned int irq);
void reserve_irq_legacy(void);
diff --git a/include/linux/shm_signal.h b/include/linux/shm_signal.h
new file mode 100644
index 000000000000..b2efd72669fb
--- /dev/null
+++ b/include/linux/shm_signal.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_SHM_SIGNAL_H
+#define _LINUX_SHM_SIGNAL_H
+
+#include <linux/types.h>
+
+/*
+ *---------
+ * The following structures represent data that is shared across boundaries
+ * which may be quite disparate from one another (e.g. Windows vs Linux,
+ * 32 vs 64 bit, etc). Therefore, care has been taken to make sure they
+ * present data in a manner that is independent of the environment.
+ *-----------
+ */
+
+#define SHM_SIGNAL_MAGIC cpu_to_le32(0x58fa39df)
+#define SHM_SIGNAL_VER cpu_to_le32(1)
+
+struct shm_signal_irq {
+ __u8 enabled;
+ __u8 pending;
+ __u8 dirty;
+};
+
+enum shm_signal_locality {
+ shm_locality_north,
+ shm_locality_south,
+};
+
+struct shm_signal_desc {
+ __le32 magic;
+ __le32 ver;
+ struct shm_signal_irq irq[2];
+};
+
+/* --- END SHARED STRUCTURES --- */
+
+#ifdef __KERNEL__
+
+#include <linux/kref.h>
+#include <linux/interrupt.h>
+
+struct shm_signal_notifier {
+ void (*signal)(struct shm_signal_notifier *);
+};
+
+struct shm_signal;
+
+struct shm_signal_ops {
+ int (*inject)(struct shm_signal *s);
+ void (*fault)(struct shm_signal *s, const char *fmt, ...);
+ void (*release)(struct shm_signal *s);
+};
+
+enum {
+ shm_signal_in_wakeup,
+};
+
+struct shm_signal {
+ struct kref kref;
+ spinlock_t lock;
+ enum shm_signal_locality locale;
+ unsigned long flags;
+ struct shm_signal_ops *ops;
+ struct shm_signal_desc *desc;
+ struct shm_signal_notifier *notifier;
+ struct tasklet_struct deferred_notify;
+};
+
+#define SHM_SIGNAL_FAULT(s, fmt, args...) \
+ ((s)->ops->fault ? (s)->ops->fault((s), fmt, ## args) : panic(fmt, ## args))
+
+ /*
+ * These functions should only be used internally
+ */
+void _shm_signal_release(struct kref *kref);
+void _shm_signal_wakeup(struct shm_signal *s);
+
+/**
+ * shm_signal_init() - initialize an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ *
+ * Initializes SHM_SIGNAL context before first use
+ *
+ **/
+void shm_signal_init(struct shm_signal *s, enum shm_signal_locality locale,
+ struct shm_signal_ops *ops, struct shm_signal_desc *desc);
+
+/**
+ * shm_signal_get() - acquire an SHM_SIGNAL context reference
+ * @s: SHM_SIGNAL context
+ *
+ **/
+static inline struct shm_signal *shm_signal_get(struct shm_signal *s)
+{
+ kref_get(&s->kref);
+
+ return s;
+}
+
+/**
+ * shm_signal_put() - release an SHM_SIGNAL context reference
+ * @s: SHM_SIGNAL context
+ *
+ **/
+static inline void shm_signal_put(struct shm_signal *s)
+{
+ kref_put(&s->kref, _shm_signal_release);
+}
+
+/**
+ * shm_signal_enable() - enables local notifications on an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Enables/unmasks the registered notifier (if applicable) to receive wakeups
+ * whenever the remote side performs an shm_signal() operation. A notification
+ * will be dispatched immediately if any pending signals have already been
+ * issued prior to invoking this call.
+ *
+ * This is synonymous with unmasking an interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_enable(struct shm_signal *s, int flags);
+
+/**
+ * shm_signal_disable() - disable local notifications on an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Disables/masks the registered shm_signal_notifier (if applicable) from
+ * receiving any further notifications. Any subsequent calls to shm_signal()
+ * by the remote side will update the shm as dirty, but will not traverse the
+ * locale boundary and will not invoke the notifier callback. Signals
+ * delivered while masked will be deferred until shm_signal_enable() is
+ * invoked.
+ *
+ * This is synonymous with masking an interrupt
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_disable(struct shm_signal *s, int flags);
+
+/**
+ * shm_signal_inject() - notify the remote side about shm changes
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Marks the shm state as "dirty" and, if enabled, will traverse
+ * a locale boundary to inject a remote notification. The remote
+ * side controls whether the notification should be delivered via
+ * the shm_signal_enable/disable() interface.
+ *
+ * The specifics of how to traverse a locale boundary are abstracted
+ * by the shm_signal_ops->signal() interface and provided by a particular
+ * implementation. However, typically going north to south would be
+ * something like a syscall/hypercall, and going south to north would be
+ * something like a posix-signal/guest-interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_inject(struct shm_signal *s, int flags);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SHM_SIGNAL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 124f90cd5a38..c9525bce80f6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -187,7 +187,6 @@ union skb_shared_tx {
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- atomic_t dataref;
unsigned short nr_frags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
@@ -197,6 +196,12 @@ struct skb_shared_info {
union skb_shared_tx tx_flags;
struct sk_buff *frag_list;
struct skb_shared_hwtstamps hwtstamps;
+
+ /*
+ * Warning : all fields before dataref are cleared in __alloc_skb()
+ */
+ atomic_t dataref;
+
skb_frag_t frags[MAX_SKB_FRAGS];
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
@@ -294,6 +299,7 @@ typedef unsigned char *sk_buff_data_t;
* @nfct_reasm: netfilter conntrack re-assembly pointer
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
+ * @rxhash: the packet hash computed on receive
* @queue_mapping: Queue mapping for multiqueue devices
* @tc_index: Traffic control index
* @tc_verd: traffic control verdict
@@ -369,6 +375,8 @@ struct sk_buff {
#endif
#endif
+ __u32 rxhash;
+
kmemcheck_bitfield_begin(flags2);
__u16 queue_mapping:16;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -467,11 +475,6 @@ extern int skb_cow_data(struct sk_buff *skb, int tailbits,
struct sk_buff **trailer);
extern int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) consume_skb(a)
-#define dev_consume_skb(a) kfree_skb_clean(a)
-extern void skb_over_panic(struct sk_buff *skb, int len,
- void *here);
-extern void skb_under_panic(struct sk_buff *skb, int len,
- void *here);
extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int getfrag(void *from, char *to, int offset,
@@ -1130,6 +1133,11 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
return skb->data += len;
}
+static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
+{
+ return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+
extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
@@ -1353,9 +1361,12 @@ static inline int skb_network_offset(const struct sk_buff *skb)
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
+ * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
+ * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 32
+#define NET_SKB_PAD 64
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 4435d1084755..52797714ade7 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -100,6 +100,7 @@ enum
ICMP6_MIB_INMSGS, /* InMsgs */
ICMP6_MIB_INERRORS, /* InErrors */
ICMP6_MIB_OUTMSGS, /* OutMsgs */
+ ICMP6_MIB_OUTERRORS, /* OutErrors */
__ICMP6_MIB_MAX
};
@@ -227,6 +228,7 @@ enum
LINUX_MIB_SACKSHIFTFALLBACK,
LINUX_MIB_TCPBACKLOGDROP,
LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
+ LINUX_MIB_TCPDEFERACCEPTDROP,
__LINUX_MIB_MAX
};
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 354cc5617f8b..032a19eb61b1 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -189,7 +189,8 @@ struct ucred {
#define AF_ISDN 34 /* mISDN sockets */
#define AF_PHONET 35 /* Phonet sockets */
#define AF_IEEE802154 36 /* IEEE802154 sockets */
-#define AF_MAX 37 /* For now.. */
+#define AF_CAIF 37 /* CAIF sockets */
+#define AF_MAX 38 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -229,6 +230,7 @@ struct ucred {
#define PF_ISDN AF_ISDN
#define PF_PHONET AF_PHONET
#define PF_IEEE802154 AF_IEEE802154
+#define PF_CAIF AF_CAIF
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -301,6 +303,7 @@ struct ucred {
#define SOL_PNPIPE 275
#define SOL_RDS 276
#define SOL_IUCV 277
+#define SOL_CAIF 278
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/spi/wl12xx.h b/include/linux/spi/wl12xx.h
index aed64ed3dc8a..a223ecbc71ef 100644
--- a/include/linux/spi/wl12xx.h
+++ b/include/linux/spi/wl12xx.h
@@ -26,6 +26,8 @@
struct wl12xx_platform_data {
void (*set_power)(bool enable);
+ /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
+ int irq;
bool use_eeprom;
};
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 89fac6a3f78b..f8854655860e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -60,7 +60,7 @@
/*
* Must define these before including other files, inline functions need them
*/
-#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
+#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 4d5ecb222af9..4d5d2f546dbf 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -27,6 +27,8 @@
#ifndef _LINUX_SRCU_H
#define _LINUX_SRCU_H
+#include <linux/mutex.h>
+
struct srcu_struct_array {
int c[2];
};
@@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp);
/**
* srcu_read_lock_held - might we be in SRCU read-side critical section?
*
- * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
- * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
+ * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
* this assumes we are in an SRCU read-side critical section unless it can
* prove otherwise.
*/
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 24f988547361..a2608bff9c78 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -305,6 +305,7 @@ struct ssb_bus {
/* ID information about the Chip. */
u16 chip_id;
u16 chip_rev;
+ u16 sprom_offset;
u16 sprom_size; /* number of words in sprom */
u8 chip_package;
@@ -394,6 +395,9 @@ extern int ssb_bus_sdiobus_register(struct ssb_bus *bus,
extern void ssb_bus_unregister(struct ssb_bus *bus);
+/* Does the device have an SPROM? */
+extern bool ssb_is_sprom_available(struct ssb_bus *bus);
+
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom);
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 4e27acf0a92f..2cdf249b4e5f 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -53,6 +53,7 @@
#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */
#define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */
#define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */
+#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */
#define SSB_CHIPCO_CORECTL 0x0008
#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */
#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
@@ -385,6 +386,7 @@
/** Chip specific Chip-Status register contents. */
+#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */
#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003
#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
@@ -398,6 +400,18 @@
#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4
#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */
+/** Macros to determine SPROM presence based on Chip-Status register. */
+#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \
+ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_OTP_SEL)
+#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \
+ (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS)
+#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \
+ (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \
+ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_OTP_SEL))
+
/** Clockcontrol masks and values **/
@@ -564,6 +578,7 @@ struct ssb_chipcommon_pmu {
struct ssb_chipcommon {
struct ssb_device *dev;
u32 capabilities;
+ u32 status;
/* Fast Powerup Delay constant */
u16 fast_pwrup_delay;
struct ssb_chipcommon_pmu pmu;
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 9ae9082eaeb4..a6d5225b9275 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -170,26 +170,27 @@
#define SSB_SPROMSIZE_WORDS_R4 220
#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
-#define SSB_SPROM_BASE 0x1000
-#define SSB_SPROM_REVISION 0x107E
+#define SSB_SPROM_BASE1 0x1000
+#define SSB_SPROM_BASE31 0x0800
+#define SSB_SPROM_REVISION 0x007E
#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */
#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */
#define SSB_SPROM_REVISION_CRC_SHIFT 8
/* SPROM Revision 1 */
-#define SSB_SPROM1_SPID 0x1004 /* Subsystem Product ID for PCI */
-#define SSB_SPROM1_SVID 0x1006 /* Subsystem Vendor ID for PCI */
-#define SSB_SPROM1_PID 0x1008 /* Product ID for PCI */
-#define SSB_SPROM1_IL0MAC 0x1048 /* 6 bytes MAC address for 802.11b/g */
-#define SSB_SPROM1_ET0MAC 0x104E /* 6 bytes MAC address for Ethernet */
-#define SSB_SPROM1_ET1MAC 0x1054 /* 6 bytes MAC address for 802.11a */
-#define SSB_SPROM1_ETHPHY 0x105A /* Ethernet PHY settings */
+#define SSB_SPROM1_SPID 0x0004 /* Subsystem Product ID for PCI */
+#define SSB_SPROM1_SVID 0x0006 /* Subsystem Vendor ID for PCI */
+#define SSB_SPROM1_PID 0x0008 /* Product ID for PCI */
+#define SSB_SPROM1_IL0MAC 0x0048 /* 6 bytes MAC address for 802.11b/g */
+#define SSB_SPROM1_ET0MAC 0x004E /* 6 bytes MAC address for Ethernet */
+#define SSB_SPROM1_ET1MAC 0x0054 /* 6 bytes MAC address for 802.11a */
+#define SSB_SPROM1_ETHPHY 0x005A /* Ethernet PHY settings */
#define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */
#define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */
#define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5
#define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */
#define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */
-#define SSB_SPROM1_BINF 0x105C /* Board info */
+#define SSB_SPROM1_BINF 0x005C /* Board info */
#define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */
#define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */
#define SSB_SPROM1_BINF_CCODE_SHIFT 8
@@ -197,63 +198,63 @@
#define SSB_SPROM1_BINF_ANTBG_SHIFT 12
#define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */
#define SSB_SPROM1_BINF_ANTA_SHIFT 14
-#define SSB_SPROM1_PA0B0 0x105E
-#define SSB_SPROM1_PA0B1 0x1060
-#define SSB_SPROM1_PA0B2 0x1062
-#define SSB_SPROM1_GPIOA 0x1064 /* General Purpose IO pins 0 and 1 */
+#define SSB_SPROM1_PA0B0 0x005E
+#define SSB_SPROM1_PA0B1 0x0060
+#define SSB_SPROM1_PA0B2 0x0062
+#define SSB_SPROM1_GPIOA 0x0064 /* General Purpose IO pins 0 and 1 */
#define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */
#define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */
#define SSB_SPROM1_GPIOA_P1_SHIFT 8
-#define SSB_SPROM1_GPIOB 0x1066 /* General Purpuse IO pins 2 and 3 */
+#define SSB_SPROM1_GPIOB 0x0066 /* General Purpuse IO pins 2 and 3 */
#define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */
#define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */
#define SSB_SPROM1_GPIOB_P3_SHIFT 8
-#define SSB_SPROM1_MAXPWR 0x1068 /* Power Amplifier Max Power */
+#define SSB_SPROM1_MAXPWR 0x0068 /* Power Amplifier Max Power */
#define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */
#define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */
#define SSB_SPROM1_MAXPWR_A_SHIFT 8
-#define SSB_SPROM1_PA1B0 0x106A
-#define SSB_SPROM1_PA1B1 0x106C
-#define SSB_SPROM1_PA1B2 0x106E
-#define SSB_SPROM1_ITSSI 0x1070 /* Idle TSSI Target */
+#define SSB_SPROM1_PA1B0 0x006A
+#define SSB_SPROM1_PA1B1 0x006C
+#define SSB_SPROM1_PA1B2 0x006E
+#define SSB_SPROM1_ITSSI 0x0070 /* Idle TSSI Target */
#define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/
#define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */
#define SSB_SPROM1_ITSSI_A_SHIFT 8
-#define SSB_SPROM1_BFLLO 0x1072 /* Boardflags (low 16 bits) */
-#define SSB_SPROM1_AGAIN 0x1074 /* Antenna Gain (in dBm Q5.2) */
+#define SSB_SPROM1_BFLLO 0x0072 /* Boardflags (low 16 bits) */
+#define SSB_SPROM1_AGAIN 0x0074 /* Antenna Gain (in dBm Q5.2) */
#define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */
#define SSB_SPROM1_AGAIN_BG_SHIFT 0
#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */
#define SSB_SPROM1_AGAIN_A_SHIFT 8
/* SPROM Revision 2 (inherits from rev 1) */
-#define SSB_SPROM2_BFLHI 0x1038 /* Boardflags (high 16 bits) */
-#define SSB_SPROM2_MAXP_A 0x103A /* A-PHY Max Power */
+#define SSB_SPROM2_BFLHI 0x0038 /* Boardflags (high 16 bits) */
+#define SSB_SPROM2_MAXP_A 0x003A /* A-PHY Max Power */
#define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */
#define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */
#define SSB_SPROM2_MAXP_A_LO_SHIFT 8
-#define SSB_SPROM2_PA1LOB0 0x103C /* A-PHY PowerAmplifier Low Settings */
-#define SSB_SPROM2_PA1LOB1 0x103E /* A-PHY PowerAmplifier Low Settings */
-#define SSB_SPROM2_PA1LOB2 0x1040 /* A-PHY PowerAmplifier Low Settings */
-#define SSB_SPROM2_PA1HIB0 0x1042 /* A-PHY PowerAmplifier High Settings */
-#define SSB_SPROM2_PA1HIB1 0x1044 /* A-PHY PowerAmplifier High Settings */
-#define SSB_SPROM2_PA1HIB2 0x1046 /* A-PHY PowerAmplifier High Settings */
-#define SSB_SPROM2_OPO 0x1078 /* OFDM Power Offset from CCK Level */
+#define SSB_SPROM2_PA1LOB0 0x003C /* A-PHY PowerAmplifier Low Settings */
+#define SSB_SPROM2_PA1LOB1 0x003E /* A-PHY PowerAmplifier Low Settings */
+#define SSB_SPROM2_PA1LOB2 0x0040 /* A-PHY PowerAmplifier Low Settings */
+#define SSB_SPROM2_PA1HIB0 0x0042 /* A-PHY PowerAmplifier High Settings */
+#define SSB_SPROM2_PA1HIB1 0x0044 /* A-PHY PowerAmplifier High Settings */
+#define SSB_SPROM2_PA1HIB2 0x0046 /* A-PHY PowerAmplifier High Settings */
+#define SSB_SPROM2_OPO 0x0078 /* OFDM Power Offset from CCK Level */
#define SSB_SPROM2_OPO_VALUE 0x00FF
#define SSB_SPROM2_OPO_UNUSED 0xFF00
-#define SSB_SPROM2_CCODE 0x107C /* Two char Country Code */
+#define SSB_SPROM2_CCODE 0x007C /* Two char Country Code */
/* SPROM Revision 3 (inherits most data from rev 2) */
-#define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */
-#define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */
-#define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */
-#define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */
-#define SSB_SPROM3_GPIOLDC 0x1042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */
+#define SSB_SPROM3_OFDMAPO 0x002C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */
+#define SSB_SPROM3_OFDMALPO 0x0030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */
+#define SSB_SPROM3_OFDMAHPO 0x0034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */
+#define SSB_SPROM3_GPIOLDC 0x0042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */
#define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */
#define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8
#define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */
#define SSB_SPROM3_GPIOLDC_ON_SHIFT 16
-#define SSB_SPROM3_CCKPO 0x1078 /* CCK Power Offset */
+#define SSB_SPROM3_IL0MAC 0x004A /* 6 bytes MAC address for 802.11b/g */
+#define SSB_SPROM3_CCKPO 0x0078 /* CCK Power Offset */
#define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */
#define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */
#define SSB_SPROM3_CCKPO_2M_SHIFT 4
@@ -264,100 +265,100 @@
#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */
/* SPROM Revision 4 */
-#define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */
-#define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */
+#define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */
+#define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */
+#define SSB_SPROM4_IL0MAC 0x004C /* 6 byte MAC address for a/b/g/n */
+#define SSB_SPROM4_CCODE 0x0052 /* Country Code (2 bytes) */
+#define SSB_SPROM4_GPIOA 0x0056 /* Gen. Purpose IO # 0 and 1 */
+#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */
+#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */
+#define SSB_SPROM4_GPIOA_P1_SHIFT 8
+#define SSB_SPROM4_GPIOB 0x0058 /* Gen. Purpose IO # 2 and 3 */
+#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */
+#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */
+#define SSB_SPROM4_GPIOB_P3_SHIFT 8
+#define SSB_SPROM4_ETHPHY 0x005A /* Ethernet PHY settings ?? */
#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */
#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */
#define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5
#define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */
#define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */
-#define SSB_SPROM4_CCODE 0x1052 /* Country Code (2 bytes) */
-#define SSB_SPROM4_ANTAVAIL 0x105D /* Antenna available bitfields */
-#define SSB_SPROM4_ANTAVAIL_A 0x00FF /* A-PHY bitfield */
-#define SSB_SPROM4_ANTAVAIL_A_SHIFT 0
-#define SSB_SPROM4_ANTAVAIL_BG 0xFF00 /* B-PHY and G-PHY bitfield */
-#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 8
-#define SSB_SPROM4_BFLLO 0x1044 /* Boardflags (low 16 bits) */
-#define SSB_SPROM4_AGAIN01 0x105E /* Antenna Gain (in dBm Q5.2) */
+#define SSB_SPROM4_ANTAVAIL 0x005D /* Antenna available bitfields */
+#define SSB_SPROM4_ANTAVAIL_A 0x00FF /* A-PHY bitfield */
+#define SSB_SPROM4_ANTAVAIL_A_SHIFT 0
+#define SSB_SPROM4_ANTAVAIL_BG 0xFF00 /* B-PHY and G-PHY bitfield */
+#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 8
+#define SSB_SPROM4_AGAIN01 0x005E /* Antenna Gain (in dBm Q5.2) */
#define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */
#define SSB_SPROM4_AGAIN0_SHIFT 0
#define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */
#define SSB_SPROM4_AGAIN1_SHIFT 8
-#define SSB_SPROM4_AGAIN23 0x1060
+#define SSB_SPROM4_AGAIN23 0x0060
#define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */
#define SSB_SPROM4_AGAIN2_SHIFT 0
#define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */
#define SSB_SPROM4_AGAIN3_SHIFT 8
-#define SSB_SPROM4_BFLHI 0x1046 /* Board Flags Hi */
-#define SSB_SPROM4_MAXP_BG 0x1080 /* Max Power BG in path 1 */
+#define SSB_SPROM4_MAXP_BG 0x0080 /* Max Power BG in path 1 */
#define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */
#define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
#define SSB_SPROM4_ITSSI_BG_SHIFT 8
-#define SSB_SPROM4_MAXP_A 0x108A /* Max Power A in path 1 */
+#define SSB_SPROM4_MAXP_A 0x008A /* Max Power A in path 1 */
#define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */
#define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */
#define SSB_SPROM4_ITSSI_A_SHIFT 8
-#define SSB_SPROM4_GPIOA 0x1056 /* Gen. Purpose IO # 0 and 1 */
-#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */
-#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */
-#define SSB_SPROM4_GPIOA_P1_SHIFT 8
-#define SSB_SPROM4_GPIOB 0x1058 /* Gen. Purpose IO # 2 and 3 */
-#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */
-#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */
-#define SSB_SPROM4_GPIOB_P3_SHIFT 8
-#define SSB_SPROM4_PA0B0 0x1082 /* The paXbY locations are */
-#define SSB_SPROM4_PA0B1 0x1084 /* only guesses */
-#define SSB_SPROM4_PA0B2 0x1086
-#define SSB_SPROM4_PA1B0 0x108E
-#define SSB_SPROM4_PA1B1 0x1090
-#define SSB_SPROM4_PA1B2 0x1092
+#define SSB_SPROM4_PA0B0 0x0082 /* The paXbY locations are */
+#define SSB_SPROM4_PA0B1 0x0084 /* only guesses */
+#define SSB_SPROM4_PA0B2 0x0086
+#define SSB_SPROM4_PA1B0 0x008E
+#define SSB_SPROM4_PA1B1 0x0090
+#define SSB_SPROM4_PA1B2 0x0092
/* SPROM Revision 5 (inherits most data from rev 4) */
-#define SSB_SPROM5_BFLLO 0x104A /* Boardflags (low 16 bits) */
-#define SSB_SPROM5_BFLHI 0x104C /* Board Flags Hi */
-#define SSB_SPROM5_IL0MAC 0x1052 /* 6 byte MAC address for a/b/g/n */
-#define SSB_SPROM5_CCODE 0x1044 /* Country Code (2 bytes) */
-#define SSB_SPROM5_GPIOA 0x1076 /* Gen. Purpose IO # 0 and 1 */
+#define SSB_SPROM5_CCODE 0x0044 /* Country Code (2 bytes) */
+#define SSB_SPROM5_BFLLO 0x004A /* Boardflags (low 16 bits) */
+#define SSB_SPROM5_BFLHI 0x004C /* Board Flags Hi */
+#define SSB_SPROM5_IL0MAC 0x0052 /* 6 byte MAC address for a/b/g/n */
+#define SSB_SPROM5_GPIOA 0x0076 /* Gen. Purpose IO # 0 and 1 */
#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */
#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */
#define SSB_SPROM5_GPIOA_P1_SHIFT 8
-#define SSB_SPROM5_GPIOB 0x1078 /* Gen. Purpose IO # 2 and 3 */
+#define SSB_SPROM5_GPIOB 0x0078 /* Gen. Purpose IO # 2 and 3 */
#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */
#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */
#define SSB_SPROM5_GPIOB_P3_SHIFT 8
/* SPROM Revision 8 */
-#define SSB_SPROM8_BOARDREV 0x1082 /* Board revision */
-#define SSB_SPROM8_BFLLO 0x1084 /* Board flags (bits 0-15) */
-#define SSB_SPROM8_BFLHI 0x1086 /* Board flags (bits 16-31) */
-#define SSB_SPROM8_BFL2LO 0x1088 /* Board flags (bits 32-47) */
-#define SSB_SPROM8_BFL2HI 0x108A /* Board flags (bits 48-63) */
-#define SSB_SPROM8_IL0MAC 0x108C /* 6 byte MAC address */
-#define SSB_SPROM8_CCODE 0x1092 /* 2 byte country code */
-#define SSB_SPROM8_ANTAVAIL 0x109C /* Antenna available bitfields*/
-#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
-#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8
-#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */
-#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0
-#define SSB_SPROM8_AGAIN01 0x109E /* Antenna Gain (in dBm Q5.2) */
+#define SSB_SPROM8_BOARDREV 0x0082 /* Board revision */
+#define SSB_SPROM8_BFLLO 0x0084 /* Board flags (bits 0-15) */
+#define SSB_SPROM8_BFLHI 0x0086 /* Board flags (bits 16-31) */
+#define SSB_SPROM8_BFL2LO 0x0088 /* Board flags (bits 32-47) */
+#define SSB_SPROM8_BFL2HI 0x008A /* Board flags (bits 48-63) */
+#define SSB_SPROM8_IL0MAC 0x008C /* 6 byte MAC address */
+#define SSB_SPROM8_CCODE 0x0092 /* 2 byte country code */
+#define SSB_SPROM8_GPIOA 0x0096 /*Gen. Purpose IO # 0 and 1 */
+#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */
+#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */
+#define SSB_SPROM8_GPIOA_P1_SHIFT 8
+#define SSB_SPROM8_GPIOB 0x0098 /* Gen. Purpose IO # 2 and 3 */
+#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
+#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
+#define SSB_SPROM8_GPIOB_P3_SHIFT 8
+#define SSB_SPROM8_ANTAVAIL 0x009C /* Antenna available bitfields*/
+#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
+#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8
+#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */
+#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0
+#define SSB_SPROM8_AGAIN01 0x009E /* Antenna Gain (in dBm Q5.2) */
#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */
#define SSB_SPROM8_AGAIN0_SHIFT 0
#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */
#define SSB_SPROM8_AGAIN1_SHIFT 8
-#define SSB_SPROM8_AGAIN23 0x10A0
+#define SSB_SPROM8_AGAIN23 0x00A0
#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */
#define SSB_SPROM8_AGAIN2_SHIFT 0
#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */
#define SSB_SPROM8_AGAIN3_SHIFT 8
-#define SSB_SPROM8_GPIOA 0x1096 /*Gen. Purpose IO # 0 and 1 */
-#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */
-#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */
-#define SSB_SPROM8_GPIOA_P1_SHIFT 8
-#define SSB_SPROM8_GPIOB 0x1098 /* Gen. Purpose IO # 2 and 3 */
-#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
-#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
-#define SSB_SPROM8_GPIOB_P3_SHIFT 8
-#define SSB_SPROM8_RSSIPARM2G 0x10A4 /* RSSI params for 2GHz */
+#define SSB_SPROM8_RSSIPARM2G 0x00A4 /* RSSI params for 2GHz */
#define SSB_SPROM8_RSSISMF2G 0x000F
#define SSB_SPROM8_RSSISMC2G 0x00F0
#define SSB_SPROM8_RSSISMC2G_SHIFT 4
@@ -365,7 +366,7 @@
#define SSB_SPROM8_RSSISAV2G_SHIFT 8
#define SSB_SPROM8_BXA2G 0x1800
#define SSB_SPROM8_BXA2G_SHIFT 11
-#define SSB_SPROM8_RSSIPARM5G 0x10A6 /* RSSI params for 5GHz */
+#define SSB_SPROM8_RSSIPARM5G 0x00A6 /* RSSI params for 5GHz */
#define SSB_SPROM8_RSSISMF5G 0x000F
#define SSB_SPROM8_RSSISMC5G 0x00F0
#define SSB_SPROM8_RSSISMC5G_SHIFT 4
@@ -373,47 +374,47 @@
#define SSB_SPROM8_RSSISAV5G_SHIFT 8
#define SSB_SPROM8_BXA5G 0x1800
#define SSB_SPROM8_BXA5G_SHIFT 11
-#define SSB_SPROM8_TRI25G 0x10A8 /* TX isolation 2.4&5.3GHz */
+#define SSB_SPROM8_TRI25G 0x00A8 /* TX isolation 2.4&5.3GHz */
#define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */
#define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */
#define SSB_SPROM8_TRI5G_SHIFT 8
-#define SSB_SPROM8_TRI5GHL 0x10AA /* TX isolation 5.2/5.8GHz */
+#define SSB_SPROM8_TRI5GHL 0x00AA /* TX isolation 5.2/5.8GHz */
#define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */
#define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */
#define SSB_SPROM8_TRI5GH_SHIFT 8
-#define SSB_SPROM8_RXPO 0x10AC /* RX power offsets */
+#define SSB_SPROM8_RXPO 0x00AC /* RX power offsets */
#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */
#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */
#define SSB_SPROM8_RXPO5G_SHIFT 8
-#define SSB_SPROM8_MAXP_BG 0x10C0 /* Max Power 2GHz in path 1 */
+#define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */
#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */
#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
#define SSB_SPROM8_ITSSI_BG_SHIFT 8
-#define SSB_SPROM8_PA0B0 0x10C2 /* 2GHz power amp settings */
-#define SSB_SPROM8_PA0B1 0x10C4
-#define SSB_SPROM8_PA0B2 0x10C6
-#define SSB_SPROM8_MAXP_A 0x10C8 /* Max Power 5.3GHz */
+#define SSB_SPROM8_PA0B0 0x00C2 /* 2GHz power amp settings */
+#define SSB_SPROM8_PA0B1 0x00C4
+#define SSB_SPROM8_PA0B2 0x00C6
+#define SSB_SPROM8_MAXP_A 0x00C8 /* Max Power 5.3GHz */
#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */
#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */
#define SSB_SPROM8_ITSSI_A_SHIFT 8
-#define SSB_SPROM8_MAXP_AHL 0x10CA /* Max Power 5.2/5.8GHz */
+#define SSB_SPROM8_MAXP_AHL 0x00CA /* Max Power 5.2/5.8GHz */
#define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */
#define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */
#define SSB_SPROM8_MAXP_AL_SHIFT 8
-#define SSB_SPROM8_PA1B0 0x10CC /* 5.3GHz power amp settings */
-#define SSB_SPROM8_PA1B1 0x10CE
-#define SSB_SPROM8_PA1B2 0x10D0
-#define SSB_SPROM8_PA1LOB0 0x10D2 /* 5.2GHz power amp settings */
-#define SSB_SPROM8_PA1LOB1 0x10D4
-#define SSB_SPROM8_PA1LOB2 0x10D6
-#define SSB_SPROM8_PA1HIB0 0x10D8 /* 5.8GHz power amp settings */
-#define SSB_SPROM8_PA1HIB1 0x10DA
-#define SSB_SPROM8_PA1HIB2 0x10DC
-#define SSB_SPROM8_CCK2GPO 0x1140 /* CCK power offset */
-#define SSB_SPROM8_OFDM2GPO 0x1142 /* 2.4GHz OFDM power offset */
-#define SSB_SPROM8_OFDM5GPO 0x1146 /* 5.3GHz OFDM power offset */
-#define SSB_SPROM8_OFDM5GLPO 0x114A /* 5.2GHz OFDM power offset */
-#define SSB_SPROM8_OFDM5GHPO 0x114E /* 5.8GHz OFDM power offset */
+#define SSB_SPROM8_PA1B0 0x00CC /* 5.3GHz power amp settings */
+#define SSB_SPROM8_PA1B1 0x00CE
+#define SSB_SPROM8_PA1B2 0x00D0
+#define SSB_SPROM8_PA1LOB0 0x00D2 /* 5.2GHz power amp settings */
+#define SSB_SPROM8_PA1LOB1 0x00D4
+#define SSB_SPROM8_PA1LOB2 0x00D6
+#define SSB_SPROM8_PA1HIB0 0x00D8 /* 5.8GHz power amp settings */
+#define SSB_SPROM8_PA1HIB1 0x00DA
+#define SSB_SPROM8_PA1HIB2 0x00DC
+#define SSB_SPROM8_CCK2GPO 0x0140 /* CCK power offset */
+#define SSB_SPROM8_OFDM2GPO 0x0142 /* 2.4GHz OFDM power offset */
+#define SSB_SPROM8_OFDM5GPO 0x0146 /* 5.3GHz OFDM power offset */
+#define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */
+#define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */
/* Values for SSB_SPROM1_BINF_CCODE */
enum {
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 32bfd1a8a48d..632ff7c03280 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -33,6 +33,7 @@ struct plat_stmmacenet_data {
int bus_id;
int pbl;
int has_gmac;
+ int enh_desc;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(unsigned long ioaddr);
#ifdef CONFIG_STM_DRIVERS
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index baba3a23a814..6b524a0d02e4 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -1,13 +1,101 @@
#ifndef _LINUX_STOP_MACHINE
#define _LINUX_STOP_MACHINE
-/* "Bogolock": stop the entire machine, disable interrupts. This is a
- very heavy lock, which is equivalent to grabbing every spinlock
- (and more). So the "read" side to such a lock is anything which
- disables preeempt. */
+
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/list.h>
#include <asm/system.h>
+/*
+ * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
+ * monopolization mechanism. The caller can specify a non-sleeping
+ * function to be executed on a single or multiple cpus preempting all
+ * other processes and monopolizing those cpus until it finishes.
+ *
+ * Resources for this mechanism are preallocated when a cpu is brought
+ * up and requests are guaranteed to be served as long as the target
+ * cpus are online.
+ */
+typedef int (*cpu_stop_fn_t)(void *arg);
+
+#ifdef CONFIG_SMP
+
+struct cpu_stop_work {
+ struct list_head list; /* cpu_stopper->works */
+ cpu_stop_fn_t fn;
+ void *arg;
+ struct cpu_stop_done *done;
+};
+
+int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
+void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_work *work_buf);
+int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+
+#else /* CONFIG_SMP */
+
+#include <linux/workqueue.h>
+
+struct cpu_stop_work {
+ struct work_struct work;
+ cpu_stop_fn_t fn;
+ void *arg;
+};
+
+static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
+{
+ int ret = -ENOENT;
+ preempt_disable();
+ if (cpu == smp_processor_id())
+ ret = fn(arg);
+ preempt_enable();
+ return ret;
+}
+
+static void stop_one_cpu_nowait_workfn(struct work_struct *work)
+{
+ struct cpu_stop_work *stwork =
+ container_of(work, struct cpu_stop_work, work);
+ preempt_disable();
+ stwork->fn(stwork->arg);
+ preempt_enable();
+}
+
+static inline void stop_one_cpu_nowait(unsigned int cpu,
+ cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_work *work_buf)
+{
+ if (cpu == smp_processor_id()) {
+ INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
+ work_buf->fn = fn;
+ work_buf->arg = arg;
+ schedule_work(&work_buf->work);
+ }
+}
+
+static inline int stop_cpus(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg)
+{
+ if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
+ return stop_one_cpu(raw_smp_processor_id(), fn, arg);
+ return -ENOENT;
+}
+
+static inline int try_stop_cpus(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg)
+{
+ return stop_cpus(cpumask, fn, arg);
+}
+
+#endif /* CONFIG_SMP */
+
+/*
+ * stop_machine "Bogolock": stop the entire machine, disable
+ * interrupts. This is a very heavy lock, which is equivalent to
+ * grabbing every spinlock (and more). So the "read" side to such a
+ * lock is anything which disables preeempt.
+ */
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
/**
@@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
*/
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
-/**
- * stop_machine_create: create all stop_machine threads
- *
- * Description: This causes all stop_machine threads to be created before
- * stop_machine actually gets called. This can be used by subsystems that
- * need a non failing stop_machine infrastructure.
- */
-int stop_machine_create(void);
-
-/**
- * stop_machine_destroy: destroy all stop_machine threads
- *
- * Description: This causes all stop_machine threads which were created with
- * stop_machine_create to be destroyed again.
- */
-void stop_machine_destroy(void);
-
-#else
+#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
static inline int stop_machine(int (*fn)(void *), void *data,
const struct cpumask *cpus)
@@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data,
return ret;
}
-static inline int stop_machine_create(void) { return 0; }
-static inline void stop_machine_destroy(void) { }
-
-#endif /* CONFIG_SMP */
-#endif /* _LINUX_STOP_MACHINE */
+#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 03f33330ece2..b22d7f189ceb 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -80,6 +80,8 @@ struct gss_api_mech {
/* pseudoflavors supported by this mechanism: */
int gm_pf_num;
struct pf_desc * gm_pfs;
+ /* Should the following be a callback operation instead? */
+ const char *gm_upcall_enctypes;
};
/* and must provide the following operations: */
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index e7bbdba474d5..5e774a5abf2c 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -4,7 +4,7 @@
* Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
* lib/gssapi/krb5/gssapiP_krb5.h, and others
*
- * Copyright (c) 2000 The Regents of the University of Michigan.
+ * Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -36,17 +36,86 @@
*
*/
+#include <linux/crypto.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
#include <linux/sunrpc/gss_asn1.h>
+/* Length of constant used in key derivation */
+#define GSS_KRB5_K5CLENGTH (5)
+
+/* Maximum key length (in bytes) for the supported crypto algorithms*/
+#define GSS_KRB5_MAX_KEYLEN (32)
+
+/* Maximum checksum function output for the supported crypto algorithms */
+#define GSS_KRB5_MAX_CKSUM_LEN (20)
+
+/* Maximum blocksize for the supported crypto algorithms */
+#define GSS_KRB5_MAX_BLOCKSIZE (16)
+
+struct krb5_ctx;
+
+struct gss_krb5_enctype {
+ const u32 etype; /* encryption (key) type */
+ const u32 ctype; /* checksum type */
+ const char *name; /* "friendly" name */
+ const char *encrypt_name; /* crypto encrypt name */
+ const char *cksum_name; /* crypto checksum name */
+ const u16 signalg; /* signing algorithm */
+ const u16 sealalg; /* sealing algorithm */
+ const u32 blocksize; /* encryption blocksize */
+ const u32 conflen; /* confounder length
+ (normally the same as
+ the blocksize) */
+ const u32 cksumlength; /* checksum length */
+ const u32 keyed_cksum; /* is it a keyed cksum? */
+ const u32 keybytes; /* raw key len, in bytes */
+ const u32 keylength; /* final key len, in bytes */
+ u32 (*encrypt) (struct crypto_blkcipher *tfm,
+ void *iv, void *in, void *out,
+ int length); /* encryption function */
+ u32 (*decrypt) (struct crypto_blkcipher *tfm,
+ void *iv, void *in, void *out,
+ int length); /* decryption function */
+ u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *in,
+ struct xdr_netobj *out); /* complete key generation */
+ u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, int ec,
+ struct page **pages); /* v2 encryption function */
+ u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, u32 *headskip,
+ u32 *tailskip); /* v2 decryption function */
+};
+
+/* krb5_ctx flags definitions */
+#define KRB5_CTX_FLAG_INITIATOR 0x00000001
+#define KRB5_CTX_FLAG_CFX 0x00000002
+#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
+
struct krb5_ctx {
int initiate; /* 1 = initiating, 0 = accepting */
+ u32 enctype;
+ u32 flags;
+ const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
struct crypto_blkcipher *enc;
struct crypto_blkcipher *seq;
+ struct crypto_blkcipher *acceptor_enc;
+ struct crypto_blkcipher *initiator_enc;
+ struct crypto_blkcipher *acceptor_enc_aux;
+ struct crypto_blkcipher *initiator_enc_aux;
+ u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
+ u8 cksum[GSS_KRB5_MAX_KEYLEN];
s32 endtime;
u32 seq_send;
+ u64 seq_send64;
struct xdr_netobj mech_used;
+ u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
+ u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
+ u8 initiator_seal[GSS_KRB5_MAX_KEYLEN];
+ u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN];
+ u8 initiator_integ[GSS_KRB5_MAX_KEYLEN];
+ u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
};
extern spinlock_t krb5_seq_lock;
@@ -57,6 +126,18 @@ extern spinlock_t krb5_seq_lock;
#define KG_TOK_MIC_MSG 0x0101
#define KG_TOK_WRAP_MSG 0x0201
+#define KG2_TOK_INITIAL 0x0101
+#define KG2_TOK_RESPONSE 0x0202
+#define KG2_TOK_MIC 0x0404
+#define KG2_TOK_WRAP 0x0504
+
+#define KG2_TOKEN_FLAG_SENTBYACCEPTOR 0x01
+#define KG2_TOKEN_FLAG_SEALED 0x02
+#define KG2_TOKEN_FLAG_ACCEPTORSUBKEY 0x04
+
+#define KG2_RESP_FLAG_ERROR 0x0001
+#define KG2_RESP_FLAG_DELEG_OK 0x0002
+
enum sgn_alg {
SGN_ALG_DES_MAC_MD5 = 0x0000,
SGN_ALG_MD2_5 = 0x0001,
@@ -81,6 +162,9 @@ enum seal_alg {
#define CKSUMTYPE_RSA_MD5_DES 0x0008
#define CKSUMTYPE_NIST_SHA 0x0009
#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
+#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
+#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
/* from gssapi_err_krb5.h */
#define KG_CCACHE_NOMATCH (39756032L)
@@ -111,11 +195,56 @@ enum seal_alg {
#define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */
#define ENCTYPE_DES_HMAC_SHA1 0x0008
#define ENCTYPE_DES3_CBC_SHA1 0x0010
+#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
+#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define ENCTYPE_ARCFOUR_HMAC 0x0017
+#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
#define ENCTYPE_UNKNOWN 0x01ff
-s32
-make_checksum(char *, char *header, int hdrlen, struct xdr_buf *body,
- int body_offset, struct xdr_netobj *cksum);
+/*
+ * Constants used for key derivation
+ */
+/* for 3DES */
+#define KG_USAGE_SEAL (22)
+#define KG_USAGE_SIGN (23)
+#define KG_USAGE_SEQ (24)
+
+/* from rfc3961 */
+#define KEY_USAGE_SEED_CHECKSUM (0x99)
+#define KEY_USAGE_SEED_ENCRYPTION (0xAA)
+#define KEY_USAGE_SEED_INTEGRITY (0x55)
+
+/* from rfc4121 */
+#define KG_USAGE_ACCEPTOR_SEAL (22)
+#define KG_USAGE_ACCEPTOR_SIGN (23)
+#define KG_USAGE_INITIATOR_SEAL (24)
+#define KG_USAGE_INITIATOR_SIGN (25)
+
+/*
+ * This compile-time check verifies that we will not exceed the
+ * slack space allotted by the client and server auth_gss code
+ * before they call gss_wrap().
+ */
+#define GSS_KRB5_MAX_SLACK_NEEDED \
+ (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
+ + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
+ + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
+ + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
+ + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\
+ + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
+ + 4 + 4 /* RPC verifier */ \
+ + GSS_KRB5_TOK_HDR_LEN \
+ + GSS_KRB5_MAX_CKSUM_LEN)
+
+u32
+make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *cksumkey,
+ unsigned int usage, struct xdr_netobj *cksumout);
+
+u32
+make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *key,
+ unsigned int usage, struct xdr_netobj *cksum);
u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
struct xdr_netobj *);
@@ -149,11 +278,53 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
int offset);
s32
-krb5_make_seq_num(struct crypto_blkcipher *key,
+krb5_make_seq_num(struct krb5_ctx *kctx,
+ struct crypto_blkcipher *key,
int direction,
u32 seqnum, unsigned char *cksum, unsigned char *buf);
s32
-krb5_get_seq_num(struct crypto_blkcipher *key,
+krb5_get_seq_num(struct krb5_ctx *kctx,
unsigned char *cksum,
unsigned char *buf, int *direction, u32 *seqnum);
+
+int
+xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen);
+
+u32
+krb5_derive_key(const struct gss_krb5_enctype *gk5e,
+ const struct xdr_netobj *inkey,
+ struct xdr_netobj *outkey,
+ const struct xdr_netobj *in_constant);
+
+u32
+gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *randombits,
+ struct xdr_netobj *key);
+
+u32
+gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *randombits,
+ struct xdr_netobj *key);
+
+u32
+gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, int ec,
+ struct page **pages);
+
+u32
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, u32 *plainoffset,
+ u32 *plainlen);
+
+int
+krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
+ struct crypto_blkcipher *cipher,
+ unsigned char *cksum);
+
+int
+krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
+ struct crypto_blkcipher *cipher,
+ s32 seqnum);
+void
+gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 6f9457a75b8f..8263f7aefedf 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -174,8 +174,7 @@ struct rpc_xprt {
/*
* Connection of transports
*/
- unsigned long connect_timeout,
- bind_timeout,
+ unsigned long bind_timeout,
reestablish_timeout;
unsigned int connect_cookie; /* A cookie that gets bumped
every time the transport
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 057929b0a651..f93a5c4b3618 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -834,6 +834,11 @@ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
struct timespec __user *, const sigset_t __user *,
size_t);
+asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags,
+ unsigned int priority);
+asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ u64 mask, int fd,
+ const char __user *pathname);
int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 99adcdc0d3ca..4496322e28dd 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -39,41 +39,34 @@ struct sysrq_key_op {
#ifdef CONFIG_MAGIC_SYSRQ
-extern int sysrq_on(void);
-
-/*
- * Do not use this one directly:
- */
-extern int __sysrq_enabled;
-
/* Generic SysRq interface -- you may call it from any device driver, supplying
* ASCII code of the key, pointer to registers and kbd/tty structs (if they
* are available -- else NULL's).
*/
void handle_sysrq(int key, struct tty_struct *tty);
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
int register_sysrq_key(int key, struct sysrq_key_op *op);
int unregister_sysrq_key(int key, struct sysrq_key_op *op);
struct sysrq_key_op *__sysrq_get_key_op(int key);
+int sysrq_toggle_support(int enable_mask);
+
#else
-static inline int sysrq_on(void)
+static inline void handle_sysrq(int key, struct tty_struct *tty)
{
- return 0;
}
-static inline int __reterr(void)
+
+static inline int register_sysrq_key(int key, struct sysrq_key_op *op)
{
return -EINVAL;
}
-static inline void handle_sysrq(int key, struct tty_struct *tty)
+
+static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op)
{
+ return -EINVAL;
}
-#define register_sysrq_key(ig,nore) __reterr()
-#define unregister_sysrq_key(ig,nore) __reterr()
-
#endif
#endif /* _LINUX_SYSRQ_H */
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
new file mode 100644
index 000000000000..7bd266f3525c
--- /dev/null
+++ b/include/linux/tca6416_keypad.h
@@ -0,0 +1,34 @@
+/*
+ * tca6416 keypad platform support
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Author: Sriramakrishnan <srk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _TCA6416_KEYS_H
+#define _TCA6416_KEYS_H
+
+#include <linux/types.h>
+
+struct tca6416_button {
+ /* Configuration parameters */
+ int code; /* input event code (KEY_*, SW_*) */
+ int active_low;
+ int type; /* input event type (EV_KEY, EV_SW) */
+};
+
+struct tca6416_keys_platform_data {
+ struct tca6416_button *buttons;
+ int nbuttons;
+ unsigned int rep:1; /* enable input subsystem auto repeat */
+ uint16_t pinmask;
+ uint16_t invert;
+ int irq_is_gpio;
+ int use_polling; /* use polling if Interrupt is not connected*/
+};
+#endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index d2ae79e21be3..b232ccc0ee29 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -42,6 +42,7 @@ enum tick_nohz_mode {
* @idle_waketime: Time when the idle was interrupted
* @idle_exittime: Time when the idle state was left
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
+ * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
* @sleep_length: Duration of the current idle sleep
* @do_timer_lst: CPU was the last one doing do_timer before going idle
*/
@@ -60,7 +61,7 @@ struct tick_sched {
ktime_t idle_waketime;
ktime_t idle_exittime;
ktime_t idle_sleeptime;
- ktime_t idle_lastupdate;
+ ktime_t iowait_sleeptime;
ktime_t sleep_length;
unsigned long last_jiffies;
unsigned long next_jiffies;
@@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle);
extern void tick_nohz_restart_sched_tick(void);
extern ktime_t tick_nohz_get_sleep_length(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
+extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
# else
static inline void tick_nohz_stop_sched_tick(int inidle) { }
static inline void tick_nohz_restart_sched_tick(void) { }
@@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
return len;
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
+static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !NO_HZ */
#endif
diff --git a/include/linux/timb_dma.h b/include/linux/timb_dma.h
new file mode 100644
index 000000000000..bb043e970b96
--- /dev/null
+++ b/include/linux/timb_dma.h
@@ -0,0 +1,55 @@
+/*
+ * timb_dma.h timberdale FPGA DMA driver defines
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA DMA engine
+ */
+
+#ifndef _LINUX_TIMB_DMA_H
+#define _LINUX_TIMB_DMA_H
+
+/**
+ * struct timb_dma_platform_data_channel - Description of each individual
+ * DMA channel for the timberdale DMA driver
+ * @rx: true if this channel handles data in the direction to
+ * the CPU.
+ * @bytes_per_line: Number of bytes per line, this is specific for channels
+ * handling video data. For other channels this shall be left to 0.
+ * @descriptors: Number of descriptors to allocate for this channel.
+ * @descriptor_elements: Number of elements in each descriptor.
+ *
+ */
+struct timb_dma_platform_data_channel {
+ bool rx;
+ unsigned int bytes_per_line;
+ unsigned int descriptors;
+ unsigned int descriptor_elements;
+};
+
+/**
+ * struct timb_dma_platform_data - Platform data of the timberdale DMA driver
+ * @nr_channels: Number of defined channels in the channels array.
+ * @channels: Definition of the each channel.
+ *
+ */
+struct timb_dma_platform_data {
+ unsigned nr_channels;
+ struct timb_dma_platform_data_channel channels[32];
+};
+
+#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index 6e026e45a179..ea3559f0b3f2 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -150,7 +150,6 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
extern u64 timekeeping_max_deferment(void);
extern void update_wall_time(void);
-extern void update_xtime_cache(u64 nsec);
extern void timekeeping_leap_insert(int leapsecond);
struct tms;
diff --git a/include/linux/timer.h b/include/linux/timer.h
index a2d1eb6cb3f0..ea965b857a50 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -10,13 +10,19 @@
struct tvec_base;
struct timer_list {
+ /*
+ * All fields that change during normal runtime grouped to the
+ * same cacheline
+ */
struct list_head entry;
unsigned long expires;
+ struct tvec_base *base;
void (*function)(unsigned long);
unsigned long data;
- struct tvec_base *base;
+ int slack;
+
#ifdef CONFIG_TIMER_STATS
void *start_site;
char start_comm[16];
@@ -165,6 +171,8 @@ extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
+extern void set_timer_slack(struct timer_list *time, int slack_hz);
+
#define TIMER_NOT_PINNED 0
#define TIMER_PINNED 1
/*
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 7a082b32d8e1..32d852f8cbe4 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -232,13 +232,11 @@ struct timex {
*/
extern unsigned long tick_usec; /* USER_HZ period (usec) */
extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
-extern int tickadj; /* amount of adjustment per tick */
/*
* phase-lock loop variables
*/
extern int time_status; /* clock synchronization status bits */
-extern long time_adjust; /* The amount of adjtime left */
extern void ntp_init(void);
extern void ntp_clear(void);
@@ -271,9 +269,6 @@ extern void second_overflow(void);
extern void update_ntp_one_tick(void);
extern int do_adjtimex(struct timex *);
-/* Don't use! Compatibility define for existing users. */
-#define tickadj (500/HZ ? : 1)
-
int read_current_timer(unsigned long *timer_val);
/* The clock frequency of the i8253/i8254 PIT */
diff --git a/include/linux/tipc.h b/include/linux/tipc.h
index 3d92396639de..9536d8aeadf1 100644
--- a/include/linux/tipc.h
+++ b/include/linux/tipc.h
@@ -127,23 +127,17 @@ static inline unsigned int tipc_node(__u32 addr)
* TIPC topology subscription service definitions
*/
-#define TIPC_SUB_PORTS 0x01 /* filter for port availability */
-#define TIPC_SUB_SERVICE 0x02 /* filter for service availability */
-#define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */
-#if 0
-/* The following filter options are not currently implemented */
-#define TIPC_SUB_NO_BIND_EVTS 0x04 /* filter out "publish" events */
-#define TIPC_SUB_NO_UNBIND_EVTS 0x08 /* filter out "withdraw" events */
-#define TIPC_SUB_SINGLE_EVT 0x10 /* expire after first event */
-#endif
+#define TIPC_SUB_SERVICE 0x00 /* Filter for service availability */
+#define TIPC_SUB_PORTS 0x01 /* Filter for port availability */
+#define TIPC_SUB_CANCEL 0x04 /* Cancel a subscription */
#define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */
struct tipc_subscr {
- struct tipc_name_seq seq; /* name sequence of interest */
- __u32 timeout; /* subscription duration (in ms) */
- __u32 filter; /* bitmask of filter options */
- char usr_handle[8]; /* available for subscriber use */
+ struct tipc_name_seq seq; /* NBO. Name sequence of interest */
+ __u32 timeout; /* NBO. Subscription duration (in ms) */
+ __u32 filter; /* NBO. Bitmask of filter options */
+ char usr_handle[8]; /* Opaque. Available for subscriber use */
};
#define TIPC_PUBLISHED 1 /* publication event */
@@ -151,11 +145,11 @@ struct tipc_subscr {
#define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */
struct tipc_event {
- __u32 event; /* event type */
- __u32 found_lower; /* matching name seq instances */
- __u32 found_upper; /* " " " " */
- struct tipc_portid port; /* associated port */
- struct tipc_subscr s; /* associated subscription */
+ __u32 event; /* NBO. Event type, as defined above */
+ __u32 found_lower; /* NBO. Matching name seq instances */
+ __u32 found_upper; /* " " " " " */
+ struct tipc_portid port; /* NBO. Associated port */
+ struct tipc_subscr s; /* Original, associated subscription */
};
/*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 78b4bd3be496..1d85f9a6a199 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -33,6 +33,65 @@ struct tracepoint {
* Keep in sync with vmlinux.lds.h.
*/
+/*
+ * Connect a probe to a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_register(const char *name, void *probe);
+
+/*
+ * Disconnect a probe from a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_unregister(const char *name, void *probe);
+
+extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
+extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
+extern void tracepoint_probe_update_all(void);
+
+struct tracepoint_iter {
+ struct module *module;
+ struct tracepoint *tracepoint;
+};
+
+extern void tracepoint_iter_start(struct tracepoint_iter *iter);
+extern void tracepoint_iter_next(struct tracepoint_iter *iter);
+extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
+extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
+extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
+ struct tracepoint *begin, struct tracepoint *end);
+
+/*
+ * tracepoint_synchronize_unregister must be called between the last tracepoint
+ * probe unregistration and the end of module exit to make sure there is no
+ * caller executing a probe when it is freed.
+ */
+static inline void tracepoint_synchronize_unregister(void)
+{
+ synchronize_sched();
+}
+
+#define PARAMS(args...) args
+
+#ifdef CONFIG_TRACEPOINTS
+extern void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end);
+#else
+static inline void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end)
+{ }
+#endif /* CONFIG_TRACEPOINTS */
+
+#endif /* _LINUX_TRACEPOINT_H */
+
+/*
+ * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include
+ * file ifdef protection.
+ * This is due to the way trace events work. If a file includes two
+ * trace event headers under one "CREATE_TRACE_POINTS" the first include
+ * will override the TRACE_EVENT and break the second include.
+ */
+
#ifndef DECLARE_TRACE
#define TP_PROTO(args...) args
@@ -96,9 +155,6 @@ struct tracepoint {
#define EXPORT_TRACEPOINT_SYMBOL(name) \
EXPORT_SYMBOL(__tracepoint_##name)
-extern void tracepoint_update_probe_range(struct tracepoint *begin,
- struct tracepoint *end);
-
#else /* !CONFIG_TRACEPOINTS */
#define DECLARE_TRACE(name, proto, args) \
static inline void _do_trace_##name(struct tracepoint *tp, proto) \
@@ -119,61 +175,9 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
-static inline void tracepoint_update_probe_range(struct tracepoint *begin,
- struct tracepoint *end)
-{ }
#endif /* CONFIG_TRACEPOINTS */
#endif /* DECLARE_TRACE */
-/*
- * Connect a probe to a tracepoint.
- * Internal API, should not be used directly.
- */
-extern int tracepoint_probe_register(const char *name, void *probe);
-
-/*
- * Disconnect a probe from a tracepoint.
- * Internal API, should not be used directly.
- */
-extern int tracepoint_probe_unregister(const char *name, void *probe);
-
-extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
-extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
-extern void tracepoint_probe_update_all(void);
-
-struct tracepoint_iter {
- struct module *module;
- struct tracepoint *tracepoint;
-};
-
-extern void tracepoint_iter_start(struct tracepoint_iter *iter);
-extern void tracepoint_iter_next(struct tracepoint_iter *iter);
-extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
-extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
-extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
- struct tracepoint *begin, struct tracepoint *end);
-
-/*
- * tracepoint_synchronize_unregister must be called between the last tracepoint
- * probe unregistration and the end of module exit to make sure there is no
- * caller executing a probe when it is freed.
- */
-static inline void tracepoint_synchronize_unregister(void)
-{
- synchronize_sched();
-}
-
-#define PARAMS(args...) args
-
-#endif /* _LINUX_TRACEPOINT_H */
-
-/*
- * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
- * This is due to the way trace events work. If a file includes two
- * trace event headers under one "CREATE_TRACE_POINTS" the first include
- * will override the TRACE_EVENT and break the second include.
- */
-
#ifndef TRACE_EVENT
/*
* For use with the TRACE_EVENT macro:
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 4409967db0c4..bb44fa9ae135 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -23,7 +23,7 @@
*/
#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */
#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */
-#define NR_LDISCS 20
+#define NR_LDISCS 21
/* line disciplines */
#define N_TTY 0
@@ -46,8 +46,8 @@
#define N_GIGASET_M101 16 /* Siemens Gigaset M101 serial DECT adapter */
#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */
#define N_PPS 18 /* Pulse per Second */
-
#define N_V253 19 /* Codec control over voice modem */
+#define N_CAIF 20 /* CAIF protocol for talking to modems */
/*
* This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
new file mode 100644
index 000000000000..2389f93a28b5
--- /dev/null
+++ b/include/linux/usb/audio-v2.h
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2010 Daniel Mack <daniel@caiaq.de>
+ *
+ * This software is distributed under the terms of the GNU General Public
+ * License ("GPL") version 2, as published by the Free Software Foundation.
+ *
+ * This file holds USB constants and structures defined
+ * by the USB Device Class Definition for Audio Devices in version 2.0.
+ * Comments below reference relevant sections of the documents contained
+ * in http://www.usb.org/developers/devclass_docs/Audio2.0_final.zip
+ */
+
+#ifndef __LINUX_USB_AUDIO_V2_H
+#define __LINUX_USB_AUDIO_V2_H
+
+#include <linux/types.h>
+
+/* v1.0 and v2.0 of this standard have many things in common. For the rest
+ * of the definitions, please refer to audio.h */
+
+/* 4.7.2.1 Clock Source Descriptor */
+
+struct uac_clock_source_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bmAttributes;
+ __u8 bmControls;
+ __u8 bAssocTerminal;
+ __u8 iClockSource;
+} __attribute__((packed));
+
+/* 4.7.2.2 Clock Source Descriptor */
+
+struct uac_clock_selector_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bNrInPins;
+ __u8 bmControls;
+ __u8 baCSourceID[];
+} __attribute__((packed));
+
+/* 4.7.2.4 Input terminal descriptor */
+
+struct uac2_input_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bCSourceID;
+ __u8 bNrChannels;
+ __u32 bmChannelConfig;
+ __u8 iChannelNames;
+ __u16 bmControls;
+ __u8 iTerminal;
+} __attribute__((packed));
+
+/* 4.7.2.5 Output terminal descriptor */
+
+struct uac2_output_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 bCSourceID;
+ __u16 bmControls;
+ __u8 iTerminal;
+} __attribute__((packed));
+
+
+
+/* 4.7.2.8 Feature Unit Descriptor */
+
+struct uac2_feature_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ /* bmaControls is actually u32,
+ * but u8 is needed for the hybrid parser */
+ __u8 bmaControls[0]; /* variable length */
+} __attribute__((packed));
+
+/* 4.9.2 Class-Specific AS Interface Descriptor */
+
+struct uac_as_header_descriptor_v2 {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalLink;
+ __u8 bmControls;
+ __u8 bFormatType;
+ __u32 bmFormats;
+ __u8 bNrChannels;
+ __u32 bmChannelConfig;
+ __u8 iChannelNames;
+} __attribute__((packed));
+
+/* 6.1 Interrupt Data Message */
+
+#define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0)
+#define UAC2_INTERRUPT_DATA_MSG_EP (1 << 1)
+
+struct uac2_interrupt_data_msg {
+ __u8 bInfo;
+ __u8 bAttribute;
+ __le16 wValue;
+ __le16 wIndex;
+} __attribute__((packed));
+
+/* A.7 Audio Function Category Codes */
+#define UAC2_FUNCTION_SUBCLASS_UNDEFINED 0x00
+#define UAC2_FUNCTION_DESKTOP_SPEAKER 0x01
+#define UAC2_FUNCTION_HOME_THEATER 0x02
+#define UAC2_FUNCTION_MICROPHONE 0x03
+#define UAC2_FUNCTION_HEADSET 0x04
+#define UAC2_FUNCTION_TELEPHONE 0x05
+#define UAC2_FUNCTION_CONVERTER 0x06
+#define UAC2_FUNCTION_SOUND_RECORDER 0x07
+#define UAC2_FUNCTION_IO_BOX 0x08
+#define UAC2_FUNCTION_MUSICAL_INSTRUMENT 0x09
+#define UAC2_FUNCTION_PRO_AUDIO 0x0a
+#define UAC2_FUNCTION_AUDIO_VIDEO 0x0b
+#define UAC2_FUNCTION_CONTROL_PANEL 0x0c
+#define UAC2_FUNCTION_OTHER 0xff
+
+/* A.9 Audio Class-Specific AC Interface Descriptor Subtypes */
+/* see audio.h for the rest, which is identical to v1 */
+#define UAC2_EFFECT_UNIT 0x07
+#define UAC2_PROCESSING_UNIT_V2 0x08
+#define UAC2_EXTENSION_UNIT_V2 0x09
+#define UAC2_CLOCK_SOURCE 0x0a
+#define UAC2_CLOCK_SELECTOR 0x0b
+#define UAC2_CLOCK_MULTIPLIER 0x0c
+#define UAC2_SAMPLE_RATE_CONVERTER 0x0d
+
+/* A.10 Audio Class-Specific AS Interface Descriptor Subtypes */
+/* see audio.h for the rest, which is identical to v1 */
+#define UAC2_ENCODER 0x03
+#define UAC2_DECODER 0x04
+
+/* A.11 Effect Unit Effect Types */
+#define UAC2_EFFECT_UNDEFINED 0x00
+#define UAC2_EFFECT_PARAM_EQ 0x01
+#define UAC2_EFFECT_REVERB 0x02
+#define UAC2_EFFECT_MOD_DELAY 0x03
+#define UAC2_EFFECT_DYN_RANGE_COMP 0x04
+
+/* A.12 Processing Unit Process Types */
+#define UAC2_PROCESS_UNDEFINED 0x00
+#define UAC2_PROCESS_UP_DOWNMIX 0x01
+#define UAC2_PROCESS_DOLBY_PROLOCIC 0x02
+#define UAC2_PROCESS_STEREO_EXTENDER 0x03
+
+/* A.14 Audio Class-Specific Request Codes */
+#define UAC2_CS_CUR 0x01
+#define UAC2_CS_RANGE 0x02
+#define UAC2_CS_MEM 0x03
+
+/* A.15 Encoder Type Codes */
+#define UAC2_ENCODER_UNDEFINED 0x00
+#define UAC2_ENCODER_OTHER 0x01
+#define UAC2_ENCODER_MPEG 0x02
+#define UAC2_ENCODER_AC3 0x03
+#define UAC2_ENCODER_WMA 0x04
+#define UAC2_ENCODER_DTS 0x05
+
+/* A.16 Decoder Type Codes */
+#define UAC2_DECODER_UNDEFINED 0x00
+#define UAC2_DECODER_OTHER 0x01
+#define UAC2_DECODER_MPEG 0x02
+#define UAC2_DECODER_AC3 0x03
+#define UAC2_DECODER_WMA 0x04
+#define UAC2_DECODER_DTS 0x05
+
+/* A.17.1 Clock Source Control Selectors */
+#define UAC2_CS_UNDEFINED 0x00
+#define UAC2_CS_CONTROL_SAM_FREQ 0x01
+#define UAC2_CS_CONTROL_CLOCK_VALID 0x02
+
+/* A.17.2 Clock Selector Control Selectors */
+#define UAC2_CX_UNDEFINED 0x00
+#define UAC2_CX_CLOCK_SELECTOR 0x01
+
+/* A.17.3 Clock Multiplier Control Selectors */
+#define UAC2_CM_UNDEFINED 0x00
+#define UAC2_CM_NUMERATOR 0x01
+#define UAC2_CM_DENOMINTATOR 0x02
+
+/* A.17.4 Terminal Control Selectors */
+#define UAC2_TE_UNDEFINED 0x00
+#define UAC2_TE_COPY_PROTECT 0x01
+#define UAC2_TE_CONNECTOR 0x02
+#define UAC2_TE_OVERLOAD 0x03
+#define UAC2_TE_CLUSTER 0x04
+#define UAC2_TE_UNDERFLOW 0x05
+#define UAC2_TE_OVERFLOW 0x06
+#define UAC2_TE_LATENCY 0x07
+
+/* A.17.5 Mixer Control Selectors */
+#define UAC2_MU_UNDEFINED 0x00
+#define UAC2_MU_MIXER 0x01
+#define UAC2_MU_CLUSTER 0x02
+#define UAC2_MU_UNDERFLOW 0x03
+#define UAC2_MU_OVERFLOW 0x04
+#define UAC2_MU_LATENCY 0x05
+
+/* A.17.6 Selector Control Selectors */
+#define UAC2_SU_UNDEFINED 0x00
+#define UAC2_SU_SELECTOR 0x01
+#define UAC2_SU_LATENCY 0x02
+
+/* A.17.7 Feature Unit Control Selectors */
+/* see audio.h for the rest, which is identical to v1 */
+#define UAC2_FU_INPUT_GAIN 0x0b
+#define UAC2_FU_INPUT_GAIN_PAD 0x0c
+#define UAC2_FU_PHASE_INVERTER 0x0d
+#define UAC2_FU_UNDERFLOW 0x0e
+#define UAC2_FU_OVERFLOW 0x0f
+#define UAC2_FU_LATENCY 0x10
+
+/* A.17.8.1 Parametric Equalizer Section Effect Unit Control Selectors */
+#define UAC2_PE_UNDEFINED 0x00
+#define UAC2_PE_ENABLE 0x01
+#define UAC2_PE_CENTERFREQ 0x02
+#define UAC2_PE_QFACTOR 0x03
+#define UAC2_PE_GAIN 0x04
+#define UAC2_PE_UNDERFLOW 0x05
+#define UAC2_PE_OVERFLOW 0x06
+#define UAC2_PE_LATENCY 0x07
+
+/* A.17.8.2 Reverberation Effect Unit Control Selectors */
+#define UAC2_RV_UNDEFINED 0x00
+#define UAC2_RV_ENABLE 0x01
+#define UAC2_RV_TYPE 0x02
+#define UAC2_RV_LEVEL 0x03
+#define UAC2_RV_TIME 0x04
+#define UAC2_RV_FEEDBACK 0x05
+#define UAC2_RV_PREDELAY 0x06
+#define UAC2_RV_DENSITY 0x07
+#define UAC2_RV_HIFREQ_ROLLOFF 0x08
+#define UAC2_RV_UNDERFLOW 0x09
+#define UAC2_RV_OVERFLOW 0x0a
+#define UAC2_RV_LATENCY 0x0b
+
+/* A.17.8.3 Modulation Delay Effect Control Selectors */
+#define UAC2_MD_UNDEFINED 0x00
+#define UAC2_MD_ENABLE 0x01
+#define UAC2_MD_BALANCE 0x02
+#define UAC2_MD_RATE 0x03
+#define UAC2_MD_DEPTH 0x04
+#define UAC2_MD_TIME 0x05
+#define UAC2_MD_FEEDBACK 0x06
+#define UAC2_MD_UNDERFLOW 0x07
+#define UAC2_MD_OVERFLOW 0x08
+#define UAC2_MD_LATENCY 0x09
+
+/* A.17.8.4 Dynamic Range Compressor Effect Unit Control Selectors */
+#define UAC2_DR_UNDEFINED 0x00
+#define UAC2_DR_ENABLE 0x01
+#define UAC2_DR_COMPRESSION_RATE 0x02
+#define UAC2_DR_MAXAMPL 0x03
+#define UAC2_DR_THRESHOLD 0x04
+#define UAC2_DR_ATTACK_TIME 0x05
+#define UAC2_DR_RELEASE_TIME 0x06
+#define UAC2_DR_UNDEFLOW 0x07
+#define UAC2_DR_OVERFLOW 0x08
+#define UAC2_DR_LATENCY 0x09
+
+/* A.17.9.1 Up/Down-mix Processing Unit Control Selectors */
+#define UAC2_UD_UNDEFINED 0x00
+#define UAC2_UD_ENABLE 0x01
+#define UAC2_UD_MODE_SELECT 0x02
+#define UAC2_UD_CLUSTER 0x03
+#define UAC2_UD_UNDERFLOW 0x04
+#define UAC2_UD_OVERFLOW 0x05
+#define UAC2_UD_LATENCY 0x06
+
+/* A.17.9.2 Dolby Prologic[tm] Processing Unit Control Selectors */
+#define UAC2_DP_UNDEFINED 0x00
+#define UAC2_DP_ENABLE 0x01
+#define UAC2_DP_MODE_SELECT 0x02
+#define UAC2_DP_CLUSTER 0x03
+#define UAC2_DP_UNDERFFLOW 0x04
+#define UAC2_DP_OVERFLOW 0x05
+#define UAC2_DP_LATENCY 0x06
+
+/* A.17.9.3 Stereo Expander Processing Unit Control Selectors */
+#define UAC2_ST_EXT_UNDEFINED 0x00
+#define UAC2_ST_EXT_ENABLE 0x01
+#define UAC2_ST_EXT_WIDTH 0x02
+#define UAC2_ST_EXT_UNDEFLOW 0x03
+#define UAC2_ST_EXT_OVERFLOW 0x04
+#define UAC2_ST_EXT_LATENCY 0x05
+
+/* A.17.10 Extension Unit Control Selectors */
+#define UAC2_XU_UNDEFINED 0x00
+#define UAC2_XU_ENABLE 0x01
+#define UAC2_XU_CLUSTER 0x02
+#define UAC2_XU_UNDERFLOW 0x03
+#define UAC2_XU_OVERFLOW 0x04
+#define UAC2_XU_LATENCY 0x05
+
+/* A.17.11 AudioStreaming Interface Control Selectors */
+#define UAC2_AS_UNDEFINED 0x00
+#define UAC2_AS_ACT_ALT_SETTING 0x01
+#define UAC2_AS_VAL_ALT_SETTINGS 0x02
+#define UAC2_AS_AUDIO_DATA_FORMAT 0x03
+
+/* A.17.12 Encoder Control Selectors */
+#define UAC2_EN_UNDEFINED 0x00
+#define UAC2_EN_BIT_RATE 0x01
+#define UAC2_EN_QUALITY 0x02
+#define UAC2_EN_VBR 0x03
+#define UAC2_EN_TYPE 0x04
+#define UAC2_EN_UNDERFLOW 0x05
+#define UAC2_EN_OVERFLOW 0x06
+#define UAC2_EN_ENCODER_ERROR 0x07
+#define UAC2_EN_PARAM1 0x08
+#define UAC2_EN_PARAM2 0x09
+#define UAC2_EN_PARAM3 0x0a
+#define UAC2_EN_PARAM4 0x0b
+#define UAC2_EN_PARAM5 0x0c
+#define UAC2_EN_PARAM6 0x0d
+#define UAC2_EN_PARAM7 0x0e
+#define UAC2_EN_PARAM8 0x0f
+
+/* A.17.13.1 MPEG Decoder Control Selectors */
+#define UAC2_MPEG_UNDEFINED 0x00
+#define UAC2_MPEG_DUAL_CHANNEL 0x01
+#define UAC2_MPEG_SECOND_STEREO 0x02
+#define UAC2_MPEG_MULTILINGUAL 0x03
+#define UAC2_MPEG_DYN_RANGE 0x04
+#define UAC2_MPEG_SCALING 0x05
+#define UAC2_MPEG_HILO_SCALING 0x06
+#define UAC2_MPEG_UNDERFLOW 0x07
+#define UAC2_MPEG_OVERFLOW 0x08
+#define UAC2_MPEG_DECODER_ERROR 0x09
+
+/* A17.13.2 AC3 Decoder Control Selectors */
+#define UAC2_AC3_UNDEFINED 0x00
+#define UAC2_AC3_MODE 0x01
+#define UAC2_AC3_DYN_RANGE 0x02
+#define UAC2_AC3_SCALING 0x03
+#define UAC2_AC3_HILO_SCALING 0x04
+#define UAC2_AC3_UNDERFLOW 0x05
+#define UAC2_AC3_OVERFLOW 0x06
+#define UAC2_AC3_DECODER_ERROR 0x07
+
+/* A17.13.3 WMA Decoder Control Selectors */
+#define UAC2_WMA_UNDEFINED 0x00
+#define UAC2_WMA_UNDERFLOW 0x01
+#define UAC2_WMA_OVERFLOW 0x02
+#define UAC2_WMA_DECODER_ERROR 0x03
+
+/* A17.13.4 DTS Decoder Control Selectors */
+#define UAC2_DTS_UNDEFINED 0x00
+#define UAC2_DTS_UNDERFLOW 0x01
+#define UAC2_DTS_OVERFLOW 0x02
+#define UAC2_DTS_DECODER_ERROR 0x03
+
+/* A17.14 Endpoint Control Selectors */
+#define UAC2_EP_CS_UNDEFINED 0x00
+#define UAC2_EP_CS_PITCH 0x01
+#define UAC2_EP_CS_DATA_OVERRUN 0x02
+#define UAC2_EP_CS_DATA_UNDERRUN 0x03
+
+#endif /* __LINUX_USB_AUDIO_V2_H */
+
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
index 4d3e450e2b03..c0ef18dc2da7 100644
--- a/include/linux/usb/audio.h
+++ b/include/linux/usb/audio.h
@@ -13,6 +13,9 @@
* Comments below reference relevant sections of that document:
*
* http://www.usb.org/developers/devclass_docs/audio10.pdf
+ *
+ * Types and defines in this file are either specific to version 1.0 of
+ * this standard or common for newer versions.
*/
#ifndef __LINUX_USB_AUDIO_H
@@ -20,14 +23,15 @@
#include <linux/types.h>
+/* bInterfaceProtocol values to denote the version of the standard used */
+#define UAC_VERSION_1 0x00
+#define UAC_VERSION_2 0x20
+
/* A.2 Audio Interface Subclass Codes */
#define USB_SUBCLASS_AUDIOCONTROL 0x01
#define USB_SUBCLASS_AUDIOSTREAMING 0x02
#define USB_SUBCLASS_MIDISTREAMING 0x03
-#define UAC_VERSION_1 0x00
-#define UAC_VERSION_2 0x20
-
/* A.5 Audio Class-Specific AC Interface Descriptor Subtypes */
#define UAC_HEADER 0x01
#define UAC_INPUT_TERMINAL 0x02
@@ -38,15 +42,6 @@
#define UAC_PROCESSING_UNIT_V1 0x07
#define UAC_EXTENSION_UNIT_V1 0x08
-/* UAC v2.0 types */
-#define UAC_EFFECT_UNIT 0x07
-#define UAC_PROCESSING_UNIT_V2 0x08
-#define UAC_EXTENSION_UNIT_V2 0x09
-#define UAC_CLOCK_SOURCE 0x0a
-#define UAC_CLOCK_SELECTOR 0x0b
-#define UAC_CLOCK_MULTIPLIER 0x0c
-#define UAC_SAMPLE_RATE_CONVERTER 0x0d
-
/* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */
#define UAC_AS_GENERAL 0x01
#define UAC_FORMAT_TYPE 0x02
@@ -78,10 +73,6 @@
#define UAC_GET_STAT 0xff
-/* Audio class v2.0 handles all the parameter calls differently */
-#define UAC2_CS_CUR 0x01
-#define UAC2_CS_RANGE 0x02
-
/* MIDI - A.1 MS Class-Specific Interface Descriptor Subtypes */
#define UAC_MS_HEADER 0x01
#define UAC_MIDI_IN_JACK 0x02
@@ -190,6 +181,156 @@ struct uac_feature_unit_descriptor_##ch { \
__u8 iFeature; \
} __attribute__ ((packed))
+/* 4.3.2.3 Mixer Unit Descriptor */
+struct uac_mixer_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bNrInPins;
+ __u8 baSourceID[];
+} __attribute__ ((packed));
+
+static inline __u8 uac_mixer_unit_bNrChannels(struct uac_mixer_unit_descriptor *desc)
+{
+ return desc->baSourceID[desc->bNrInPins];
+}
+
+static inline __u32 uac_mixer_unit_wChannelConfig(struct uac_mixer_unit_descriptor *desc,
+ int protocol)
+{
+ if (protocol == UAC_VERSION_1)
+ return (desc->baSourceID[desc->bNrInPins + 2] << 8) |
+ desc->baSourceID[desc->bNrInPins + 1];
+ else
+ return (desc->baSourceID[desc->bNrInPins + 4] << 24) |
+ (desc->baSourceID[desc->bNrInPins + 3] << 16) |
+ (desc->baSourceID[desc->bNrInPins + 2] << 8) |
+ (desc->baSourceID[desc->bNrInPins + 1]);
+}
+
+static inline __u8 uac_mixer_unit_iChannelNames(struct uac_mixer_unit_descriptor *desc,
+ int protocol)
+{
+ return (protocol == UAC_VERSION_1) ?
+ desc->baSourceID[desc->bNrInPins + 3] :
+ desc->baSourceID[desc->bNrInPins + 5];
+}
+
+static inline __u8 *uac_mixer_unit_bmControls(struct uac_mixer_unit_descriptor *desc,
+ int protocol)
+{
+ return (protocol == UAC_VERSION_1) ?
+ &desc->baSourceID[desc->bNrInPins + 4] :
+ &desc->baSourceID[desc->bNrInPins + 6];
+}
+
+static inline __u8 uac_mixer_unit_iMixer(struct uac_mixer_unit_descriptor *desc)
+{
+ __u8 *raw = (__u8 *) desc;
+ return raw[desc->bLength - 1];
+}
+
+/* 4.3.2.4 Selector Unit Descriptor */
+struct uac_selector_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUintID;
+ __u8 bNrInPins;
+ __u8 baSourceID[];
+} __attribute__ ((packed));
+
+static inline __u8 uac_selector_unit_iSelector(struct uac_selector_unit_descriptor *desc)
+{
+ __u8 *raw = (__u8 *) desc;
+ return raw[9 + desc->bLength - 1];
+}
+
+/* 4.3.2.5 Feature Unit Descriptor */
+struct uac_feature_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ __u8 bControlSize;
+ __u8 bmaControls[0]; /* variable length */
+} __attribute__((packed));
+
+static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc)
+{
+ __u8 *raw = (__u8 *) desc;
+ return raw[desc->bLength - 1];
+}
+
+/* 4.3.2.6 Processing Unit Descriptors */
+struct uac_processing_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u16 wProcessType;
+ __u8 bNrInPins;
+ __u8 baSourceID[];
+} __attribute__ ((packed));
+
+static inline __u8 uac_processing_unit_bNrChannels(struct uac_processing_unit_descriptor *desc)
+{
+ return desc->baSourceID[desc->bNrInPins];
+}
+
+static inline __u32 uac_processing_unit_wChannelConfig(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+{
+ if (protocol == UAC_VERSION_1)
+ return (desc->baSourceID[desc->bNrInPins + 2] << 8) |
+ desc->baSourceID[desc->bNrInPins + 1];
+ else
+ return (desc->baSourceID[desc->bNrInPins + 4] << 24) |
+ (desc->baSourceID[desc->bNrInPins + 3] << 16) |
+ (desc->baSourceID[desc->bNrInPins + 2] << 8) |
+ (desc->baSourceID[desc->bNrInPins + 1]);
+}
+
+static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+{
+ return (protocol == UAC_VERSION_1) ?
+ desc->baSourceID[desc->bNrInPins + 3] :
+ desc->baSourceID[desc->bNrInPins + 5];
+}
+
+static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+{
+ return (protocol == UAC_VERSION_1) ?
+ desc->baSourceID[desc->bNrInPins + 4] :
+ desc->baSourceID[desc->bNrInPins + 6];
+}
+
+static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+{
+ return (protocol == UAC_VERSION_1) ?
+ &desc->baSourceID[desc->bNrInPins + 5] :
+ &desc->baSourceID[desc->bNrInPins + 7];
+}
+
+static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+{
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+ return desc->baSourceID[desc->bNrInPins + control_size];
+}
+
+static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+{
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+ return &desc->baSourceID[desc->bNrInPins + control_size + 1];
+}
+
/* 4.5.2 Class-Specific AS Interface Descriptor */
struct uac_as_header_descriptor_v1 {
__u8 bLength; /* in bytes: 7 */
@@ -200,19 +341,6 @@ struct uac_as_header_descriptor_v1 {
__le16 wFormatTag; /* The Audio Data Format */
} __attribute__ ((packed));
-struct uac_as_header_descriptor_v2 {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubtype;
- __u8 bTerminalLink;
- __u8 bmControls;
- __u8 bFormatType;
- __u32 bmFormats;
- __u8 bNrChannels;
- __u32 bmChannelConfig;
- __u8 iChannelNames;
-} __attribute__((packed));
-
#define UAC_DT_AS_HEADER_SIZE 7
/* Formats - A.1.1 Audio Data Format Type I Codes */
@@ -277,7 +405,6 @@ struct uac_format_type_i_ext_descriptor {
__u8 bSideBandProtocol;
} __attribute__((packed));
-
/* Formats - Audio Data Format Type I Codes */
#define UAC_FORMAT_TYPE_II_MPEG 0x1001
@@ -329,38 +456,15 @@ struct uac_iso_endpoint_descriptor {
__u8 bmAttributes;
__u8 bLockDelayUnits;
__le16 wLockDelay;
-};
+} __attribute__((packed));
#define UAC_ISO_ENDPOINT_DESC_SIZE 7
#define UAC_EP_CS_ATTR_SAMPLE_RATE 0x01
#define UAC_EP_CS_ATTR_PITCH_CONTROL 0x02
#define UAC_EP_CS_ATTR_FILL_MAX 0x80
-/* Audio class v2.0: CLOCK_SOURCE descriptor */
-
-struct uac_clock_source_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubtype;
- __u8 bClockID;
- __u8 bmAttributes;
- __u8 bmControls;
- __u8 bAssocTerminal;
- __u8 iClockSource;
-} __attribute__((packed));
-
/* A.10.2 Feature Unit Control Selectors */
-struct uac_feature_unit_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubtype;
- __u8 bUnitID;
- __u8 bSourceID;
- __u8 bControlSize;
- __u8 controls[0]; /* variable length */
-} __attribute__((packed));
-
#define UAC_FU_CONTROL_UNDEFINED 0x00
#define UAC_MUTE_CONTROL 0x01
#define UAC_VOLUME_CONTROL 0x02
@@ -384,6 +488,21 @@ struct uac_feature_unit_descriptor {
#define UAC_FU_BASS_BOOST (1 << (UAC_BASS_BOOST_CONTROL - 1))
#define UAC_FU_LOUDNESS (1 << (UAC_LOUDNESS_CONTROL - 1))
+/* status word format (3.7.1.1) */
+
+#define UAC1_STATUS_TYPE_ORIG_MASK 0x0f
+#define UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF 0x0
+#define UAC1_STATUS_TYPE_ORIG_AUDIO_STREAM_IF 0x1
+#define UAC1_STATUS_TYPE_ORIG_AUDIO_STREAM_EP 0x2
+
+#define UAC1_STATUS_TYPE_IRQ_PENDING (1 << 7)
+#define UAC1_STATUS_TYPE_MEM_CHANGED (1 << 6)
+
+struct uac1_status_word {
+ __u8 bStatusType;
+ __u8 bOriginator;
+} __attribute__((packed));
+
#ifdef __KERNEL__
struct usb_audio_control {
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
new file mode 100644
index 000000000000..5b7efbfcee4e
--- /dev/null
+++ b/include/linux/uuid.h
@@ -0,0 +1,70 @@
+/*
+ * UUID/GUID definition
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_UUID_H_
+#define _LINUX_UUID_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+typedef struct {
+ __u8 b[16];
+} uuid_le;
+
+typedef struct {
+ __u8 b[16];
+} uuid_be;
+
+#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+((uuid_le) \
+{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+((uuid_be) \
+{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
+ ((b) >> 8) & 0xff, (b) & 0xff, \
+ ((c) >> 8) & 0xff, (c) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+#define NULL_UUID_LE \
+ UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00)
+
+#define NULL_UUID_BE \
+ UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00)
+
+static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
+{
+ return memcmp(&u1, &u2, sizeof(uuid_le));
+}
+
+static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
+{
+ return memcmp(&u1, &u2, sizeof(uuid_be));
+}
+
+extern void uuid_le_gen(uuid_le *u);
+extern void uuid_be_gen(uuid_be *u);
+
+#endif
diff --git a/include/linux/vbus_driver.h b/include/linux/vbus_driver.h
new file mode 100644
index 000000000000..8a7acb1a7a05
--- /dev/null
+++ b/include/linux/vbus_driver.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Mediates access to a host VBUS from a guest kernel by providing a
+ * global view of all VBUS devices
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VBUS_DRIVER_H
+#define _LINUX_VBUS_DRIVER_H
+
+#include <linux/device.h>
+#include <linux/shm_signal.h>
+#include <linux/ioq.h>
+
+struct vbus_device_proxy;
+struct vbus_driver;
+
+struct vbus_device_proxy_ops {
+ int (*open)(struct vbus_device_proxy *dev, int version, int flags);
+ int (*close)(struct vbus_device_proxy *dev, int flags);
+ int (*shm)(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio,
+ void *ptr, size_t len,
+ struct shm_signal_desc *sigdesc, struct shm_signal **signal,
+ int flags);
+ int (*call)(struct vbus_device_proxy *dev, u32 func,
+ void *data, size_t len, int flags);
+ void (*release)(struct vbus_device_proxy *dev);
+};
+
+struct vbus_device_proxy {
+ char *type;
+ u64 id;
+ void *priv; /* Used by drivers */
+ struct vbus_device_proxy_ops *ops;
+ struct device dev;
+};
+
+int vbus_device_proxy_register(struct vbus_device_proxy *dev);
+void vbus_device_proxy_unregister(struct vbus_device_proxy *dev);
+
+struct vbus_device_proxy *vbus_device_proxy_find(u64 id);
+
+struct vbus_driver_ops {
+ int (*probe)(struct vbus_device_proxy *dev);
+ int (*remove)(struct vbus_device_proxy *dev);
+};
+
+struct vbus_driver {
+ char *type;
+ struct module *owner;
+ struct vbus_driver_ops *ops;
+ struct device_driver drv;
+};
+
+int vbus_driver_register(struct vbus_driver *drv);
+void vbus_driver_unregister(struct vbus_driver *drv);
+
+/*
+ * driver-side IOQ helper - allocates device-shm and maps an IOQ on it
+ */
+int vbus_driver_ioq_alloc(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio, size_t ringsize, struct ioq **ioq);
+
+#define VBUS_DRIVER_AUTOPROBE(name) MODULE_ALIAS("vbus-proxy:" name)
+
+#endif /* _LINUX_VBUS_DRIVER_H */
diff --git a/include/linux/vbus_pci.h b/include/linux/vbus_pci.h
new file mode 100644
index 000000000000..fe337590e644
--- /dev/null
+++ b/include/linux/vbus_pci.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * PCI to Virtual-Bus Bridge
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VBUS_PCI_H
+#define _LINUX_VBUS_PCI_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define VBUS_PCI_ABI_MAGIC 0xbf53eef5
+#define VBUS_PCI_ABI_VERSION 2
+#define VBUS_PCI_HC_VERSION 1
+
+enum {
+ VBUS_PCI_BRIDGE_NEGOTIATE,
+ VBUS_PCI_BRIDGE_QREG,
+ VBUS_PCI_BRIDGE_SLOWCALL,
+ VBUS_PCI_BRIDGE_FASTCALL_ADD,
+ VBUS_PCI_BRIDGE_FASTCALL_DROP,
+
+ VBUS_PCI_BRIDGE_MAX, /* must be last */
+};
+
+enum {
+ VBUS_PCI_HC_DEVOPEN,
+ VBUS_PCI_HC_DEVCLOSE,
+ VBUS_PCI_HC_DEVCALL,
+ VBUS_PCI_HC_DEVSHM,
+
+ VBUS_PCI_HC_MAX, /* must be last */
+};
+
+struct vbus_pci_bridge_negotiate {
+ __u32 magic;
+ __u32 version;
+ __u64 capabilities;
+};
+
+struct vbus_pci_deviceopen {
+ __u32 devid;
+ __u32 version; /* device ABI version */
+ __u64 handle; /* return value for devh */
+};
+
+struct vbus_pci_devicecall {
+ __u64 devh; /* device-handle (returned from DEVICEOPEN */
+ __u32 func;
+ __u32 len;
+ __u32 flags;
+ __u64 datap;
+};
+
+struct vbus_pci_deviceshm {
+ __u64 devh; /* device-handle (returned from DEVICEOPEN */
+ __u32 id;
+ __u32 len;
+ __u32 flags;
+ struct {
+ __u32 offset;
+ __u32 prio;
+ __u64 cookie; /* token to pass back when signaling client */
+ } signal;
+ __u64 datap;
+};
+
+struct vbus_pci_call_desc {
+ __u32 vector;
+ __u32 len;
+ __u64 datap;
+};
+
+struct vbus_pci_fastcall_desc {
+ struct vbus_pci_call_desc call;
+ __u32 result;
+};
+
+struct vbus_pci_regs {
+ struct vbus_pci_call_desc bridgecall;
+ __u8 pad[48];
+};
+
+struct vbus_pci_signals {
+ __u32 eventq;
+ __u32 fastcall;
+ __u32 shmsignal;
+ __u8 pad[20];
+};
+
+struct vbus_pci_eventqreg {
+ __u32 count;
+ __u64 ring;
+ __u64 data;
+};
+
+struct vbus_pci_busreg {
+ __u32 count; /* supporting multiple queues allows for prio, etc */
+ struct vbus_pci_eventqreg eventq[1];
+};
+
+enum vbus_pci_eventid {
+ VBUS_PCI_EVENT_DEVADD,
+ VBUS_PCI_EVENT_DEVDROP,
+ VBUS_PCI_EVENT_SHMSIGNAL,
+ VBUS_PCI_EVENT_SHMCLOSE,
+};
+
+#define VBUS_MAX_DEVTYPE_LEN 128
+
+struct vbus_pci_add_event {
+ __u64 id;
+ char type[VBUS_MAX_DEVTYPE_LEN];
+};
+
+struct vbus_pci_handle_event {
+ __u64 handle;
+};
+
+struct vbus_pci_event {
+ __u32 eventid;
+ union {
+ struct vbus_pci_add_event add;
+ struct vbus_pci_handle_event handle;
+ } data;
+};
+
+#endif /* _LINUX_VBUS_PCI_H */
diff --git a/include/linux/venet.h b/include/linux/venet.h
new file mode 100644
index 000000000000..0578d797c973
--- /dev/null
+++ b/include/linux/venet.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Virtual-Ethernet adapter
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VENET_H
+#define _LINUX_VENET_H
+
+#include <linux/types.h>
+
+#define VENET_VERSION 1
+
+#define VENET_TYPE "virtual-ethernet"
+
+#define VENET_QUEUE_RX 0
+#define VENET_QUEUE_TX 1
+
+struct venet_capabilities {
+ __u32 gid;
+ __u32 bits;
+};
+
+#define VENET_CAP_GROUP_SG 0
+#define VENET_CAP_GROUP_EVENTQ 1
+#define VENET_CAP_GROUP_L4RO 2 /* layer-4 reassem offloading */
+
+/* CAPABILITIES-GROUP SG */
+#define VENET_CAP_SG (1 << 0)
+#define VENET_CAP_TSO4 (1 << 1)
+#define VENET_CAP_TSO6 (1 << 2)
+#define VENET_CAP_ECN (1 << 3)
+#define VENET_CAP_UFO (1 << 4)
+#define VENET_CAP_PMTD (1 << 5) /* pre-mapped tx desc */
+
+/* CAPABILITIES-GROUP EVENTQ */
+#define VENET_CAP_EVQ_LINKSTATE (1 << 0)
+#define VENET_CAP_EVQ_TXC (1 << 1) /* tx-complete */
+
+struct venet_iov {
+ __u32 len;
+ __u64 ptr;
+};
+
+#define VENET_SG_FLAG_NEEDS_CSUM (1 << 0)
+#define VENET_SG_FLAG_GSO (1 << 1)
+#define VENET_SG_FLAG_ECN (1 << 2)
+
+struct venet_sg {
+ __u64 cookie;
+ __u32 flags;
+ __u32 len; /* total length of all iovs */
+ struct {
+ __u16 start; /* csum starting position */
+ __u16 offset; /* offset to place csum */
+ } csum;
+ struct {
+#define VENET_GSO_TYPE_TCPV4 0 /* IPv4 TCP (TSO) */
+#define VENET_GSO_TYPE_UDP 1 /* IPv4 UDP (UFO) */
+#define VENET_GSO_TYPE_TCPV6 2 /* IPv6 TCP */
+ __u8 type;
+ __u16 hdrlen;
+ __u16 size;
+ } gso;
+ __u32 count; /* nr of iovs */
+ struct venet_iov iov[1];
+};
+
+struct venet_eventq_query {
+ __u32 flags;
+ __u32 evsize; /* size of each event */
+ __u32 dpid; /* descriptor pool-id */
+ __u32 qid;
+ __u8 pad[16];
+};
+
+#define VENET_EVENT_LINKSTATE 0
+#define VENET_EVENT_TXC 1
+
+struct venet_event_header {
+ __u32 flags;
+ __u32 size;
+ __u32 id;
+};
+
+struct venet_event_linkstate {
+ struct venet_event_header header;
+ __u8 state; /* 0 = down, 1 = up */
+};
+
+struct venet_event_txc {
+ struct venet_event_header header;
+ __u32 txqid;
+ __u64 cookie;
+};
+
+struct venet_l4ro_query {
+ __u32 flags;
+ __u32 dpid; /* descriptor pool-id */
+ __u32 pqid; /* page queue-id */
+ __u8 pad[20];
+};
+
+
+#define VSG_DESC_SIZE(count) (sizeof(struct venet_sg) + \
+ sizeof(struct venet_iov) * ((count) - 1))
+
+#define VENET_FUNC_LINKUP 0
+#define VENET_FUNC_LINKDOWN 1
+#define VENET_FUNC_MACQUERY 2
+#define VENET_FUNC_NEGCAP 3 /* negotiate capabilities */
+#define VENET_FUNC_FLUSHRX 4
+#define VENET_FUNC_PMTDQUERY 5
+#define VENET_FUNC_EVQQUERY 6
+#define VENET_FUNC_L4ROQUERY 7
+
+#endif /* _LINUX_VENET_H */
diff --git a/include/linux/via-core.h b/include/linux/via-core.h
new file mode 100644
index 000000000000..7ffb521e1a7a
--- /dev/null
+++ b/include/linux/via-core.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2009-2010 Jonathan Corbet <corbet@lwn.net>
+ * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __VIA_CORE_H__
+#define __VIA_CORE_H__
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+/*
+ * A description of each known serial I2C/GPIO port.
+ */
+enum via_port_type {
+ VIA_PORT_NONE = 0,
+ VIA_PORT_I2C,
+ VIA_PORT_GPIO,
+};
+
+enum via_port_mode {
+ VIA_MODE_OFF = 0,
+ VIA_MODE_I2C, /* Used as I2C port */
+ VIA_MODE_GPIO, /* Two GPIO ports */
+};
+
+enum viafb_i2c_adap {
+ VIA_PORT_26 = 0,
+ VIA_PORT_31,
+ VIA_PORT_25,
+ VIA_PORT_2C,
+ VIA_PORT_3D,
+};
+#define VIAFB_NUM_PORTS 5
+
+struct via_port_cfg {
+ enum via_port_type type;
+ enum via_port_mode mode;
+ u16 io_port;
+ u8 ioport_index;
+};
+
+/*
+ * This is the global viafb "device" containing stuff needed by
+ * all subdevs.
+ */
+struct viafb_dev {
+ struct pci_dev *pdev;
+ int chip_type;
+ struct via_port_cfg *port_cfg;
+ /*
+ * Spinlock for access to device registers. Not yet
+ * globally used.
+ */
+ spinlock_t reg_lock;
+ /*
+ * The framebuffer MMIO region. Little, if anything, touches
+ * this memory directly, and certainly nothing outside of the
+ * framebuffer device itself. We *do* have to be able to allocate
+ * chunks of this memory for other devices, though.
+ */
+ unsigned long fbmem_start;
+ long fbmem_len;
+ void __iomem *fbmem;
+#if defined(CONFIG_FB_VIA_CAMERA) || defined(CONFIG_FB_VIA_CAMERA_MODULE)
+ long camera_fbmem_offset;
+ long camera_fbmem_size;
+#endif
+ /*
+ * The MMIO region for device registers.
+ */
+ unsigned long engine_start;
+ unsigned long engine_len;
+ void __iomem *engine_mmio;
+
+};
+
+/*
+ * Interrupt management.
+ */
+
+void viafb_irq_enable(u32 mask);
+void viafb_irq_disable(u32 mask);
+
+/*
+ * The global interrupt control register and its bits.
+ */
+#define VDE_INTERRUPT 0x200 /* Video interrupt flags/masks */
+#define VDE_I_DVISENSE 0x00000001 /* DVI sense int status */
+#define VDE_I_VBLANK 0x00000002 /* Vertical blank status */
+#define VDE_I_MCCFI 0x00000004 /* MCE compl. frame int status */
+#define VDE_I_VSYNC 0x00000008 /* VGA VSYNC int status */
+#define VDE_I_DMA0DDONE 0x00000010 /* DMA 0 descr done */
+#define VDE_I_DMA0TDONE 0x00000020 /* DMA 0 transfer done */
+#define VDE_I_DMA1DDONE 0x00000040 /* DMA 1 descr done */
+#define VDE_I_DMA1TDONE 0x00000080 /* DMA 1 transfer done */
+#define VDE_I_C1AV 0x00000100 /* Cap Eng 1 act vid end */
+#define VDE_I_HQV0 0x00000200 /* First HQV engine */
+#define VDE_I_HQV1 0x00000400 /* Second HQV engine */
+#define VDE_I_HQV1EN 0x00000800 /* Second HQV engine enable */
+#define VDE_I_C0AV 0x00001000 /* Cap Eng 0 act vid end */
+#define VDE_I_C0VBI 0x00002000 /* Cap Eng 0 VBI end */
+#define VDE_I_C1VBI 0x00004000 /* Cap Eng 1 VBI end */
+#define VDE_I_VSYNC2 0x00008000 /* Sec. Disp. VSYNC */
+#define VDE_I_DVISNSEN 0x00010000 /* DVI sense enable */
+#define VDE_I_VSYNC2EN 0x00020000 /* Sec Disp VSYNC enable */
+#define VDE_I_MCCFIEN 0x00040000 /* MC comp frame int mask enable */
+#define VDE_I_VSYNCEN 0x00080000 /* VSYNC enable */
+#define VDE_I_DMA0DDEN 0x00100000 /* DMA 0 descr done enable */
+#define VDE_I_DMA0TDEN 0x00200000 /* DMA 0 trans done enable */
+#define VDE_I_DMA1DDEN 0x00400000 /* DMA 1 descr done enable */
+#define VDE_I_DMA1TDEN 0x00800000 /* DMA 1 trans done enable */
+#define VDE_I_C1AVEN 0x01000000 /* cap 1 act vid end enable */
+#define VDE_I_HQV0EN 0x02000000 /* First hqv engine enable */
+#define VDE_I_C1VBIEN 0x04000000 /* Cap 1 VBI end enable */
+#define VDE_I_LVDSSI 0x08000000 /* LVDS sense interrupt */
+#define VDE_I_C0AVEN 0x10000000 /* Cap 0 act vid end enable */
+#define VDE_I_C0VBIEN 0x20000000 /* Cap 0 VBI end enable */
+#define VDE_I_LVDSSIEN 0x40000000 /* LVDS Sense enable */
+#define VDE_I_ENABLE 0x80000000 /* Global interrupt enable */
+
+/*
+ * DMA management.
+ */
+int viafb_request_dma(void);
+void viafb_release_dma(void);
+/* void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len); */
+int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg);
+
+/*
+ * DMA Controller registers.
+ */
+#define VDMA_MR0 0xe00 /* Mod reg 0 */
+#define VDMA_MR_CHAIN 0x01 /* Chaining mode */
+#define VDMA_MR_TDIE 0x02 /* Transfer done int enable */
+#define VDMA_CSR0 0xe04 /* Control/status */
+#define VDMA_C_ENABLE 0x01 /* DMA Enable */
+#define VDMA_C_START 0x02 /* Start a transfer */
+#define VDMA_C_ABORT 0x04 /* Abort a transfer */
+#define VDMA_C_DONE 0x08 /* Transfer is done */
+#define VDMA_MARL0 0xe20 /* Mem addr low */
+#define VDMA_MARH0 0xe24 /* Mem addr high */
+#define VDMA_DAR0 0xe28 /* Device address */
+#define VDMA_DQWCR0 0xe2c /* Count (16-byte) */
+#define VDMA_TMR0 0xe30 /* Tile mode reg */
+#define VDMA_DPRL0 0xe34 /* Not sure */
+#define VDMA_DPR_IN 0x08 /* Inbound transfer to FB */
+#define VDMA_DPRH0 0xe38
+#define VDMA_PMR0 (0xe00 + 0x134) /* Pitch mode */
+
+/*
+ * Useful stuff that probably belongs somewhere global.
+ */
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+/*
+ * Indexed port operations. Note that these are all multi-op
+ * functions; every invocation will be racy if you're not holding
+ * reg_lock.
+ */
+
+#define VIAStatus 0x3DA /* Non-indexed port */
+#define VIACR 0x3D4
+#define VIASR 0x3C4
+#define VIAGR 0x3CE
+#define VIAAR 0x3C0
+
+static inline u8 via_read_reg(u16 port, u8 index)
+{
+ outb(index, port);
+ return inb(port + 1);
+}
+
+static inline void via_write_reg(u16 port, u8 index, u8 data)
+{
+ outb(index, port);
+ outb(data, port + 1);
+}
+
+static inline void via_write_reg_mask(u16 port, u8 index, u8 data, u8 mask)
+{
+ u8 old;
+
+ outb(index, port);
+ old = inb(port + 1);
+ outb((data & mask) | (old & ~mask), port + 1);
+}
+
+#define VIA_MISC_REG_READ 0x03CC
+#define VIA_MISC_REG_WRITE 0x03C2
+
+static inline void via_write_misc_reg_mask(u8 data, u8 mask)
+{
+ u8 old = inb(VIA_MISC_REG_READ);
+ outb((data & mask) | (old & ~mask), VIA_MISC_REG_WRITE);
+}
+
+
+#endif /* __VIA_CORE_H__ */
diff --git a/include/linux/via-gpio.h b/include/linux/via-gpio.h
new file mode 100644
index 000000000000..8281aea3dd6d
--- /dev/null
+++ b/include/linux/via-gpio.h
@@ -0,0 +1,14 @@
+/*
+ * Support for viafb GPIO ports.
+ *
+ * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under version 2 of the GNU General Public License.
+ */
+
+#ifndef __VIA_GPIO_H__
+#define __VIA_GPIO_H__
+
+extern int viafb_gpio_lookup(const char *name);
+extern int viafb_gpio_init(void);
+extern void viafb_gpio_exit(void);
+#endif
diff --git a/drivers/video/via/via_i2c.h b/include/linux/via_i2c.h
index 3a13242a3152..44532e468c05 100644
--- a/drivers/video/via/via_i2c.h
+++ b/include/linux/via_i2c.h
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
@@ -26,21 +26,17 @@
struct via_i2c_stuff {
u16 i2c_port; /* GPIO or I2C port */
+ u16 is_active; /* Being used as I2C? */
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
};
-#define I2CPORT 0x3c4
-#define I2CPORTINDEX 0x31
-#define GPIOPORT 0x3C4
-#define GPIOPORTINDEX 0x2C
-#define I2C_BUS 1
-#define GPIO_BUS 2
-#define DELAYPORT 0x3C3
-
-int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata);
-int viafb_i2c_writebyte(u8 slave_addr, u8 index, u8 data);
-int viafb_i2c_readbytes(u8 slave_addr, u8 index, u8 *buff, int buff_len);
-int viafb_create_i2c_bus(void *par);
-void viafb_delete_i2c_buss(void *par);
+
+int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata);
+int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data);
+int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len);
+struct i2c_adapter *viafb_find_i2c_adapter(enum viafb_i2c_adap which);
+
+extern int viafb_i2c_init(void);
+extern void viafb_i2c_exit(void);
#endif /* __VIA_I2C_H__ */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 3793d168b44d..047f7e6edb86 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -160,16 +160,6 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
-enum v4l2_ctrl_type {
- V4L2_CTRL_TYPE_INTEGER = 1,
- V4L2_CTRL_TYPE_BOOLEAN = 2,
- V4L2_CTRL_TYPE_MENU = 3,
- V4L2_CTRL_TYPE_BUTTON = 4,
- V4L2_CTRL_TYPE_INTEGER64 = 5,
- V4L2_CTRL_TYPE_CTRL_CLASS = 6,
- V4L2_CTRL_TYPE_STRING = 7,
-};
-
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
@@ -294,6 +284,8 @@ struct v4l2_pix_format {
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
+#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
@@ -369,6 +361,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
#define V4L2_PIX_FMT_STV0680 v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */
+#define V4L2_PIX_FMT_TM6000 v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
/*
* F O R M A T E N U M E R A T I O N
@@ -549,6 +542,8 @@ struct v4l2_buffer {
#define V4L2_BUF_FLAG_KEYFRAME 0x0008 /* Image is a keyframe (I-frame) */
#define V4L2_BUF_FLAG_PFRAME 0x0010 /* Image is a P-frame */
#define V4L2_BUF_FLAG_BFRAME 0x0020 /* Image is a B-frame */
+/* Buffer is ready, but the data contained within is corrupted. */
+#define V4L2_BUF_FLAG_ERROR 0x0040
#define V4L2_BUF_FLAG_TIMECODE 0x0100 /* timecode field is valid */
#define V4L2_BUF_FLAG_INPUT 0x0200 /* input field is valid */
@@ -939,6 +934,16 @@ struct v4l2_ext_controls {
#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL)
#define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000)
+enum v4l2_ctrl_type {
+ V4L2_CTRL_TYPE_INTEGER = 1,
+ V4L2_CTRL_TYPE_BOOLEAN = 2,
+ V4L2_CTRL_TYPE_MENU = 3,
+ V4L2_CTRL_TYPE_BUTTON = 4,
+ V4L2_CTRL_TYPE_INTEGER64 = 5,
+ V4L2_CTRL_TYPE_CTRL_CLASS = 6,
+ V4L2_CTRL_TYPE_STRING = 7,
+};
+
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
struct v4l2_queryctrl {
__u32 id;
@@ -1023,14 +1028,24 @@ enum v4l2_colorfx {
V4L2_COLORFX_NONE = 0,
V4L2_COLORFX_BW = 1,
V4L2_COLORFX_SEPIA = 2,
+ V4L2_COLORFX_NEGATIVE = 3,
+ V4L2_COLORFX_EMBOSS = 4,
+ V4L2_COLORFX_SKETCH = 5,
+ V4L2_COLORFX_SKY_BLUE = 6,
+ V4L2_COLORFX_GRASS_GREEN = 7,
+ V4L2_COLORFX_SKIN_WHITEN = 8,
+ V4L2_COLORFX_VIVID = 9,
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
#define V4L2_CID_ROTATE (V4L2_CID_BASE+34)
#define V4L2_CID_BG_COLOR (V4L2_CID_BASE+35)
+
+#define V4L2_CID_CHROMA_GAIN (V4L2_CID_BASE+36)
+
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+36)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+37)
/* MPEG-class control IDs defined by V4L2 */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
@@ -1276,6 +1291,9 @@ enum v4l2_exposure_auto_type {
#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16)
+#define V4L2_CID_IRIS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+17)
+#define V4L2_CID_IRIS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+18)
+
/* FM Modulator class control IDs */
#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
#define V4L2_CID_FM_TX_CLASS (V4L2_CTRL_CLASS_FM_TX | 1)
@@ -1621,6 +1639,38 @@ struct v4l2_streamparm {
};
/*
+ * E V E N T S
+ */
+
+#define V4L2_EVENT_ALL 0
+#define V4L2_EVENT_VSYNC 1
+#define V4L2_EVENT_EOS 2
+#define V4L2_EVENT_PRIVATE_START 0x08000000
+
+/* Payload for V4L2_EVENT_VSYNC */
+struct v4l2_event_vsync {
+ /* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */
+ __u8 field;
+} __attribute__ ((packed));
+
+struct v4l2_event {
+ __u32 type;
+ union {
+ struct v4l2_event_vsync vsync;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct timespec timestamp;
+ __u32 reserved[9];
+};
+
+struct v4l2_event_subscription {
+ __u32 type;
+ __u32 reserved[7];
+};
+
+/*
* A D V A N C E D D E B U G G I N G
*
* NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS!
@@ -1742,6 +1792,9 @@ struct v4l2_dbg_chip_ident {
#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset)
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
+#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event)
+#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription)
+#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 40d1709bdbf4..aff5b4f74041 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/gfp.h>
/**
* virtqueue - a queue to register buffers for sending or receiving.
@@ -14,7 +15,6 @@
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
- * @vq_ops: the operations for this virtqueue (see below).
* @priv: a pointer for the virtqueue implementation to use.
*/
struct virtqueue {
@@ -22,60 +22,71 @@ struct virtqueue {
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
- struct virtqueue_ops *vq_ops;
void *priv;
};
/**
- * virtqueue_ops - operations for virtqueue abstraction layer
- * @add_buf: expose buffer to other end
+ * operations for virtqueue
+ * virtqueue_add_buf: expose buffer to other end
* vq: the struct virtqueue we're talking about.
* sg: the description of the buffer(s).
* out_num: the number of sg readable by other side
* in_num: the number of sg which are writable (after readable ones)
* data: the token identifying the buffer.
+ * gfp: how to do memory allocations (if necessary).
* Returns remaining capacity of queue (sg segments) or a negative error.
- * @kick: update after add_buf
+ * virtqueue_kick: update after add_buf
* vq: the struct virtqueue
* After one or more add_buf calls, invoke this to kick the other side.
- * @get_buf: get the next used buffer
+ * virtqueue_get_buf: get the next used buffer
* vq: the struct virtqueue we're talking about.
* len: the length written into the buffer
* Returns NULL or the "data" token handed to add_buf.
- * @disable_cb: disable callbacks
+ * virtqueue_disable_cb: disable callbacks
* vq: the struct virtqueue we're talking about.
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
- * @enable_cb: restart callbacks after disable_cb.
+ * virtqueue_enable_cb: restart callbacks after disable_cb.
* vq: the struct virtqueue we're talking about.
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
- * @detach_unused_buf: detach first unused buffer
+ * virtqueue_detach_unused_buf: detach first unused buffer
* vq: the struct virtqueue we're talking about.
* Returns NULL or the "data" token handed to add_buf
*
* Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously, with the exception
- * of @disable_cb.
+ * of virtqueue_disable_cb.
*
* All operations can be called in any context.
*/
-struct virtqueue_ops {
- int (*add_buf)(struct virtqueue *vq,
- struct scatterlist sg[],
- unsigned int out_num,
- unsigned int in_num,
- void *data);
- void (*kick)(struct virtqueue *vq);
+int virtqueue_add_buf_gfp(struct virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data,
+ gfp_t gfp);
- void *(*get_buf)(struct virtqueue *vq, unsigned int *len);
+static inline int virtqueue_add_buf(struct virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data)
+{
+ return virtqueue_add_buf_gfp(vq, sg, out_num, in_num, data, GFP_ATOMIC);
+}
- void (*disable_cb)(struct virtqueue *vq);
- bool (*enable_cb)(struct virtqueue *vq);
- void *(*detach_unused_buf)(struct virtqueue *vq);
-};
+void virtqueue_kick(struct virtqueue *vq);
+
+void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+
+void virtqueue_disable_cb(struct virtqueue *vq);
+
+bool virtqueue_enable_cb(struct virtqueue *vq);
+
+void *virtqueue_detach_unused_buf(struct virtqueue *vq);
/**
* virtio_device - representation of a device using virtio
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index e52029e98919..167720d695ed 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -17,6 +17,8 @@
#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */
#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
+#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
+
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
__u64 capacity;
@@ -67,6 +69,9 @@ struct virtio_blk_config {
/* Cache flush command */
#define VIRTIO_BLK_T_FLUSH 4
+/* Get device ID command */
+#define VIRTIO_BLK_T_GET_ID 8
+
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index 92228a8fbcbc..a85064db8f94 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -12,14 +12,39 @@
/* Feature bits */
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
+#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
+
+#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
struct virtio_console_config {
/* colums of the screens */
__u16 cols;
/* rows of the screens */
__u16 rows;
+ /* max. number of ports this device can hold */
+ __u32 max_nr_ports;
} __attribute__((packed));
+/*
+ * A message that's passed between the Host and the Guest for a
+ * particular port.
+ */
+struct virtio_console_control {
+ __u32 id; /* Port number */
+ __u16 event; /* The kind of control event (see below) */
+ __u16 value; /* Extra information for the key */
+};
+
+/* Some events for control messages */
+#define VIRTIO_CONSOLE_DEVICE_READY 0
+#define VIRTIO_CONSOLE_PORT_ADD 1
+#define VIRTIO_CONSOLE_PORT_REMOVE 2
+#define VIRTIO_CONSOLE_PORT_READY 3
+#define VIRTIO_CONSOLE_CONSOLE_PORT 4
+#define VIRTIO_CONSOLE_RESIZE 5
+#define VIRTIO_CONSOLE_PORT_OPEN 6
+#define VIRTIO_CONSOLE_PORT_NAME 7
+
#ifdef __KERNEL__
int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
#endif /* __KERNEL__ */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 5b4c6c772a9b..e6827eedf18b 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -346,6 +346,8 @@
#define SIOCIWFIRST 0x8B00
#define SIOCIWLAST SIOCIWLASTPRIV /* 0x8BFF */
#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
+#define IW_HANDLER(id, func) \
+ [IW_IOCTL_IDX(id)] = func
/* Odd : get (world access), even : set (root access) */
#define IW_IS_SET(cmd) (!((cmd) & 0x1))
@@ -648,7 +650,7 @@
* 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */
#define IW_EVENT_CAPA_BASE(cmd) ((cmd >= SIOCIWFIRSTPRIV) ? \
(cmd - SIOCIWFIRSTPRIV + 0x60) : \
- (cmd - SIOCSIWCOMMIT))
+ (cmd - SIOCIWFIRST))
#define IW_EVENT_CAPA_INDEX(cmd) (IW_EVENT_CAPA_BASE(cmd) >> 5)
#define IW_EVENT_CAPA_MASK(cmd) (1 << (IW_EVENT_CAPA_BASE(cmd) & 0x1F))
/* Event capability constants - event autogenerated by the kernel
diff --git a/include/linux/wlp.h b/include/linux/wlp.h
index ac95ce6606ac..e8e3ff26da58 100644
--- a/include/linux/wlp.h
+++ b/include/linux/wlp.h
@@ -655,7 +655,7 @@ struct wlp {
struct mutex nbmutex; /* Neighbor mutex protects neighbors list */
struct list_head neighbors; /* Elements are wlp_neighbor_e */
struct uwb_notifs_handler uwb_notifs_handler;
- struct wlp_device_info *dev_info;
+ struct wlp_device_info *wdi;
void (*fill_device_info)(struct wlp *wlp, struct wlp_device_info *info);
int (*xmit_frame)(struct wlp *, struct sk_buff *,
struct uwb_dev_addr *);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 36520ded3e06..e9dd9b6f336a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -96,8 +96,9 @@ static inline void inode_sync_wait(struct inode *inode)
/*
* mm/page-writeback.c
*/
-void laptop_io_completion(void);
+void laptop_io_completion(struct backing_dev_info *info);
void laptop_sync_completion(void);
+void laptop_mode_sync(struct work_struct *work);
void throttle_vm_writeout(gfp_t gfp_mask);
/* These are exported to sysctl. */
diff --git a/include/linux/z2_battery.h b/include/linux/z2_battery.h
new file mode 100644
index 000000000000..7b9750404d22
--- /dev/null
+++ b/include/linux/z2_battery.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_Z2_BATTERY_H
+#define _LINUX_Z2_BATTERY_H
+
+struct z2_battery_info {
+ int batt_I2C_bus;
+ int batt_I2C_addr;
+ int batt_I2C_reg;
+ int charge_gpio;
+ int min_voltage;
+ int max_voltage;
+ int batt_div;
+ int batt_mult;
+ int batt_tech;
+ char *batt_name;
+};
+
+#endif
diff --git a/include/media/ak881x.h b/include/media/ak881x.h
new file mode 100644
index 000000000000..b7f2add5ce7b
--- /dev/null
+++ b/include/media/ak881x.h
@@ -0,0 +1,25 @@
+/*
+ * Header for AK8813 / AK8814 TV-ecoders from Asahi Kasei Microsystems Co., Ltd. (AKM)
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef AK881X_H
+#define AK881X_H
+
+#define AK881X_IF_MODE_MASK (3 << 0)
+#define AK881X_IF_MODE_BT656 (0 << 0)
+#define AK881X_IF_MODE_MASTER (1 << 0)
+#define AK881X_IF_MODE_SLAVE (2 << 0)
+#define AK881X_FIELD (1 << 2)
+#define AK881X_COMPONENT (1 << 3)
+
+struct ak881x_pdata {
+ unsigned long flags;
+};
+
+#endif
diff --git a/include/media/davinci/vpfe_capture.h b/include/media/davinci/vpfe_capture.h
index 4314a5f6a087..cc973ed845a7 100644
--- a/include/media/davinci/vpfe_capture.h
+++ b/include/media/davinci/vpfe_capture.h
@@ -94,6 +94,8 @@ struct vpfe_config {
/* vpfe clock */
struct clk *vpssclk;
struct clk *slaveclk;
+ /* Function for Clearing the interrupt */
+ void (*clr_intr)(int vdint);
};
struct vpfe_device {
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index c66298062d39..528050e39ad9 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -50,6 +50,10 @@ struct card_ir {
struct ir_input_state ir;
char name[32];
char phys[32];
+ int users;
+
+ u32 running:1;
+ struct ir_dev_props props;
/* Usual gpio signalling */
@@ -79,6 +83,9 @@ struct card_ir {
/* NEC decoding */
u32 nec_gpio;
struct tasklet_struct tlet;
+
+ /* IR core raw decoding */
+ u32 raw_decode;
};
/* Routines from ir-functions.c */
@@ -97,71 +104,4 @@ u32 ir_rc5_decode(unsigned int code);
void ir_rc5_timer_end(unsigned long data);
void ir_rc5_timer_keyup(unsigned long data);
-/* scancode->keycode map tables from ir-keymaps.c */
-
-extern struct ir_scancode_table ir_codes_empty_table;
-extern struct ir_scancode_table ir_codes_avermedia_table;
-extern struct ir_scancode_table ir_codes_avermedia_dvbt_table;
-extern struct ir_scancode_table ir_codes_avermedia_m135a_table;
-extern struct ir_scancode_table ir_codes_avermedia_cardbus_table;
-extern struct ir_scancode_table ir_codes_apac_viewcomp_table;
-extern struct ir_scancode_table ir_codes_pixelview_table;
-extern struct ir_scancode_table ir_codes_pixelview_new_table;
-extern struct ir_scancode_table ir_codes_nebula_table;
-extern struct ir_scancode_table ir_codes_dntv_live_dvb_t_table;
-extern struct ir_scancode_table ir_codes_iodata_bctv7e_table;
-extern struct ir_scancode_table ir_codes_adstech_dvb_t_pci_table;
-extern struct ir_scancode_table ir_codes_msi_tvanywhere_table;
-extern struct ir_scancode_table ir_codes_cinergy_1400_table;
-extern struct ir_scancode_table ir_codes_avertv_303_table;
-extern struct ir_scancode_table ir_codes_dntv_live_dvbt_pro_table;
-extern struct ir_scancode_table ir_codes_em_terratec_table;
-extern struct ir_scancode_table ir_codes_pinnacle_grey_table;
-extern struct ir_scancode_table ir_codes_flyvideo_table;
-extern struct ir_scancode_table ir_codes_flydvb_table;
-extern struct ir_scancode_table ir_codes_cinergy_table;
-extern struct ir_scancode_table ir_codes_eztv_table;
-extern struct ir_scancode_table ir_codes_avermedia_table;
-extern struct ir_scancode_table ir_codes_videomate_tv_pvr_table;
-extern struct ir_scancode_table ir_codes_manli_table;
-extern struct ir_scancode_table ir_codes_gotview7135_table;
-extern struct ir_scancode_table ir_codes_purpletv_table;
-extern struct ir_scancode_table ir_codes_pctv_sedna_table;
-extern struct ir_scancode_table ir_codes_pv951_table;
-extern struct ir_scancode_table ir_codes_rc5_tv_table;
-extern struct ir_scancode_table ir_codes_winfast_table;
-extern struct ir_scancode_table ir_codes_pinnacle_color_table;
-extern struct ir_scancode_table ir_codes_hauppauge_new_table;
-extern struct ir_scancode_table ir_codes_rc5_hauppauge_new_table;
-extern struct ir_scancode_table ir_codes_npgtech_table;
-extern struct ir_scancode_table ir_codes_norwood_table;
-extern struct ir_scancode_table ir_codes_proteus_2309_table;
-extern struct ir_scancode_table ir_codes_budget_ci_old_table;
-extern struct ir_scancode_table ir_codes_asus_pc39_table;
-extern struct ir_scancode_table ir_codes_encore_enltv_table;
-extern struct ir_scancode_table ir_codes_encore_enltv2_table;
-extern struct ir_scancode_table ir_codes_tt_1500_table;
-extern struct ir_scancode_table ir_codes_fusionhdtv_mce_table;
-extern struct ir_scancode_table ir_codes_behold_table;
-extern struct ir_scancode_table ir_codes_behold_columbus_table;
-extern struct ir_scancode_table ir_codes_pinnacle_pctv_hd_table;
-extern struct ir_scancode_table ir_codes_genius_tvgo_a11mce_table;
-extern struct ir_scancode_table ir_codes_powercolor_real_angel_table;
-extern struct ir_scancode_table ir_codes_avermedia_a16d_table;
-extern struct ir_scancode_table ir_codes_encore_enltv_fm53_table;
-extern struct ir_scancode_table ir_codes_real_audio_220_32_keys_table;
-extern struct ir_scancode_table ir_codes_msi_tvanywhere_plus_table;
-extern struct ir_scancode_table ir_codes_ati_tv_wonder_hd_600_table;
-extern struct ir_scancode_table ir_codes_kworld_plus_tv_analog_table;
-extern struct ir_scancode_table ir_codes_kaiomy_table;
-extern struct ir_scancode_table ir_codes_dm1105_nec_table;
-extern struct ir_scancode_table ir_codes_tevii_nec_table;
-extern struct ir_scancode_table ir_codes_tbs_nec_table;
-extern struct ir_scancode_table ir_codes_evga_indtube_table;
-extern struct ir_scancode_table ir_codes_terratec_cinergy_xs_table;
-extern struct ir_scancode_table ir_codes_videomate_s350_table;
-extern struct ir_scancode_table ir_codes_gadmei_rm008z_table;
-extern struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table;
-extern struct ir_scancode_table ir_codes_winfast_usbii_deluxe_table;
-extern struct ir_scancode_table ir_codes_kworld_315u_table;
#endif
diff --git a/include/media/ir-core.h b/include/media/ir-core.h
index 61c223bc3953..ad1303f20e00 100644
--- a/include/media/ir-core.h
+++ b/include/media/ir-core.h
@@ -1,6 +1,8 @@
/*
* Remote Controller core header
*
+ * Copyright (C) 2009-2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2 of the License.
@@ -14,61 +16,133 @@
#ifndef _IR_CORE
#define _IR_CORE
-#include <linux/input.h>
#include <linux/spinlock.h>
+#include <linux/kfifo.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <media/rc-map.h>
extern int ir_core_debug;
#define IR_dprintk(level, fmt, arg...) if (ir_core_debug >= level) \
printk(KERN_DEBUG "%s: " fmt , __func__, ## arg)
-#define IR_TYPE_UNKNOWN 0
-#define IR_TYPE_RC5 (1 << 0) /* Philips RC5 protocol */
-#define IR_TYPE_PD (1 << 1) /* Pulse distance encoded IR */
-#define IR_TYPE_NEC (1 << 2)
-#define IR_TYPE_OTHER (((u64)1) << 63l)
-
-struct ir_scancode {
- u16 scancode;
- u32 keycode;
-};
-
-struct ir_scancode_table {
- struct ir_scancode *scan;
- int size;
- u64 ir_type;
- spinlock_t lock;
+enum rc_driver_type {
+ RC_DRIVER_SCANCODE = 0, /* Driver or hardware generates a scancode */
+ RC_DRIVER_IR_RAW, /* Needs a Infra-Red pulse/space decoder */
};
+/**
+ * struct ir_dev_props - Allow caller drivers to set special properties
+ * @driver_type: specifies if the driver or hardware have already a decoder,
+ * or if it needs to use the IR raw event decoders to produce a scancode
+ * @allowed_protos: bitmask with the supported IR_TYPE_* protocols
+ * @scanmask: some hardware decoders are not capable of providing the full
+ * scancode to the application. As this is a hardware limit, we can't do
+ * anything with it. Yet, as the same keycode table can be used with other
+ * devices, a mask is provided to allow its usage. Drivers should generally
+ * leave this field in blank
+ * @priv: driver-specific data, to be used on the callbacks
+ * @change_protocol: allow changing the protocol used on hardware decoders
+ * @open: callback to allow drivers to enable polling/irq when IR input device
+ * is opened.
+ * @close: callback to allow drivers to disable polling/irq when IR input device
+ * is opened.
+ */
struct ir_dev_props {
- unsigned long allowed_protos;
- void *priv;
- int (*change_protocol)(void *priv, u64 ir_type);
+ enum rc_driver_type driver_type;
+ unsigned long allowed_protos;
+ u32 scanmask;
+ void *priv;
+ int (*change_protocol)(void *priv, u64 ir_type);
+ int (*open)(void *priv);
+ void (*close)(void *priv);
};
-
struct ir_input_dev {
- struct input_dev *dev; /* Input device*/
+ struct device dev; /* device */
+ char *driver_name; /* Name of the driver module */
struct ir_scancode_table rc_tab; /* scan/key table */
unsigned long devno; /* device number */
- struct attribute_group attr; /* IR attributes */
- struct device *class_dev; /* virtual class dev */
const struct ir_dev_props *props; /* Device properties */
+ struct ir_raw_event_ctrl *raw; /* for raw pulse/space events */
+ struct input_dev *input_dev; /* the input device associated with this device */
+
+ /* key info - needed by IR keycode handlers */
+ spinlock_t keylock; /* protects the below members */
+ bool keypressed; /* current state */
+ unsigned long keyup_jiffies; /* when should the current keypress be released? */
+ struct timer_list timer_keyup; /* timer for releasing a keypress */
+ u32 last_keycode; /* keycode of last command */
+ u32 last_scancode; /* scancode of last command */
+ u8 last_toggle; /* toggle of last command */
};
-#define to_ir_input_dev(_attr) container_of(_attr, struct ir_input_dev, attr)
-/* Routines from ir-keytable.c */
+enum raw_event_type {
+ IR_SPACE = (1 << 0),
+ IR_PULSE = (1 << 1),
+ IR_START_EVENT = (1 << 2),
+ IR_STOP_EVENT = (1 << 3),
+};
-u32 ir_g_keycode_from_table(struct input_dev *input_dev,
- u32 scancode);
+#define to_ir_input_dev(_attr) container_of(_attr, struct ir_input_dev, attr)
-int ir_input_register(struct input_dev *dev,
+/* From ir-keytable.c */
+int __ir_input_register(struct input_dev *dev,
const struct ir_scancode_table *ir_codes,
- const struct ir_dev_props *props);
+ const struct ir_dev_props *props,
+ const char *driver_name);
+
+static inline int ir_input_register(struct input_dev *dev,
+ const char *map_name,
+ const struct ir_dev_props *props,
+ const char *driver_name) {
+ struct ir_scancode_table *ir_codes;
+ struct ir_input_dev *ir_dev;
+ int rc;
+
+ if (!map_name)
+ return -EINVAL;
+
+ ir_codes = get_rc_map(map_name);
+ if (!ir_codes)
+ return -EINVAL;
+
+ rc = __ir_input_register(dev, ir_codes, props, driver_name);
+ if (rc < 0)
+ return -EINVAL;
+
+ ir_dev = input_get_drvdata(dev);
+
+ if (!rc && ir_dev->props && ir_dev->props->change_protocol)
+ rc = ir_dev->props->change_protocol(ir_dev->props->priv,
+ ir_codes->ir_type);
+
+ return rc;
+}
+
void ir_input_unregister(struct input_dev *input_dev);
-/* Routines from ir-sysfs.c */
+void ir_repeat(struct input_dev *dev);
+void ir_keydown(struct input_dev *dev, int scancode, u8 toggle);
+u32 ir_g_keycode_from_table(struct input_dev *input_dev, u32 scancode);
+
+/* From ir-raw-event.c */
+
+struct ir_raw_event {
+ unsigned pulse:1;
+ unsigned duration:31;
+};
+
+#define IR_MAX_DURATION 0x7FFFFFFF /* a bit more than 2 seconds */
-int ir_register_class(struct input_dev *input_dev);
-void ir_unregister_class(struct input_dev *input_dev);
+void ir_raw_event_handle(struct input_dev *input_dev);
+int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev);
+int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type type);
+static inline void ir_raw_event_reset(struct input_dev *input_dev)
+{
+ struct ir_raw_event ev = { .pulse = false, .duration = 0 };
+ ir_raw_event_store(input_dev, &ev);
+ ir_raw_event_handle(input_dev);
+}
-#endif
+#endif /* _IR_CORE */
diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h
index 9142936603cc..0506e45c9a4f 100644
--- a/include/media/ir-kbd-i2c.h
+++ b/include/media/ir-kbd-i2c.h
@@ -6,7 +6,7 @@
struct IR_i2c;
struct IR_i2c {
- struct ir_scancode_table *ir_codes;
+ char *ir_codes;
struct i2c_client *c;
struct input_dev *input;
@@ -34,9 +34,9 @@ enum ir_kbd_get_key_fn {
/* Can be passed when instantiating an ir_video i2c device */
struct IR_i2c_init_data {
- struct ir_scancode_table *ir_codes;
+ char *ir_codes;
const char *name;
- u64 type; /* IR_TYPE_RC5, IR_TYPE_PD, etc */
+ u64 type; /* IR_TYPE_RC5, etc */
/*
* Specify either a function pointer or a value indicating one of
* ir_kbd_i2c's internal get_key functions
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
new file mode 100644
index 000000000000..5833966a7100
--- /dev/null
+++ b/include/media/rc-map.h
@@ -0,0 +1,121 @@
+/*
+ * rc-map.h - define RC map names used by RC drivers
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/input.h>
+
+#define IR_TYPE_UNKNOWN 0
+#define IR_TYPE_RC5 (1 << 0) /* Philips RC5 protocol */
+#define IR_TYPE_NEC (1 << 1)
+#define IR_TYPE_RC6 (1 << 2) /* Philips RC6 protocol */
+#define IR_TYPE_JVC (1 << 3) /* JVC protocol */
+#define IR_TYPE_SONY (1 << 4) /* Sony12/15/20 protocol */
+#define IR_TYPE_OTHER (1u << 31)
+
+struct ir_scancode {
+ u32 scancode;
+ u32 keycode;
+};
+
+struct ir_scancode_table {
+ struct ir_scancode *scan;
+ unsigned int size; /* Max number of entries */
+ unsigned int len; /* Used number of entries */
+ unsigned int alloc; /* Size of *scan in bytes */
+ u64 ir_type;
+ char *name;
+ spinlock_t lock;
+};
+
+struct rc_keymap {
+ struct list_head list;
+ struct ir_scancode_table map;
+};
+
+/* Routines from rc-map.c */
+
+int ir_register_map(struct rc_keymap *map);
+void ir_unregister_map(struct rc_keymap *map);
+struct ir_scancode_table *get_rc_map(const char *name);
+void rc_map_init(void);
+
+/* Names of the several keytables defined in-kernel */
+
+#define RC_MAP_ADSTECH_DVB_T_PCI "rc-adstech-dvb-t-pci"
+#define RC_MAP_APAC_VIEWCOMP "rc-apac-viewcomp"
+#define RC_MAP_ASUS_PC39 "rc-asus-pc39"
+#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600"
+#define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d"
+#define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus"
+#define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt"
+#define RC_MAP_AVERMEDIA_M135A_RM_JX "rc-avermedia-m135a-rm-jx"
+#define RC_MAP_AVERMEDIA "rc-avermedia"
+#define RC_MAP_AVERTV_303 "rc-avertv-303"
+#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
+#define RC_MAP_BEHOLD "rc-behold"
+#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old"
+#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
+#define RC_MAP_CINERGY "rc-cinergy"
+#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
+#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
+#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
+#define RC_MAP_EMPTY "rc-empty"
+#define RC_MAP_EM_TERRATEC "rc-em-terratec"
+#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
+#define RC_MAP_ENCORE_ENLTV_FM53 "rc-encore-enltv-fm53"
+#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv"
+#define RC_MAP_EVGA_INDTUBE "rc-evga-indtube"
+#define RC_MAP_EZTV "rc-eztv"
+#define RC_MAP_FLYDVB "rc-flydvb"
+#define RC_MAP_FLYVIDEO "rc-flyvideo"
+#define RC_MAP_FUSIONHDTV_MCE "rc-fusionhdtv-mce"
+#define RC_MAP_GADMEI_RM008Z "rc-gadmei-rm008z"
+#define RC_MAP_GENIUS_TVGO_A11MCE "rc-genius-tvgo-a11mce"
+#define RC_MAP_GOTVIEW7135 "rc-gotview7135"
+#define RC_MAP_HAUPPAUGE_NEW "rc-hauppauge-new"
+#define RC_MAP_IMON_MCE "rc-imon-mce"
+#define RC_MAP_IMON_PAD "rc-imon-pad"
+#define RC_MAP_IODATA_BCTV7E "rc-iodata-bctv7e"
+#define RC_MAP_KAIOMY "rc-kaiomy"
+#define RC_MAP_KWORLD_315U "rc-kworld-315u"
+#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog"
+#define RC_MAP_MANLI "rc-manli"
+#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
+#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere"
+#define RC_MAP_NEBULA "rc-nebula"
+#define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs"
+#define RC_MAP_NORWOOD "rc-norwood"
+#define RC_MAP_NPGTECH "rc-npgtech"
+#define RC_MAP_PCTV_SEDNA "rc-pctv-sedna"
+#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color"
+#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey"
+#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd"
+#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new"
+#define RC_MAP_PIXELVIEW "rc-pixelview"
+#define RC_MAP_PIXELVIEW_MK12 "rc-pixelview-mk12"
+#define RC_MAP_POWERCOLOR_REAL_ANGEL "rc-powercolor-real-angel"
+#define RC_MAP_PROTEUS_2309 "rc-proteus-2309"
+#define RC_MAP_PURPLETV "rc-purpletv"
+#define RC_MAP_PV951 "rc-pv951"
+#define RC_MAP_RC5_HAUPPAUGE_NEW "rc-rc5-hauppauge-new"
+#define RC_MAP_RC5_TV "rc-rc5-tv"
+#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys"
+#define RC_MAP_TBS_NEC "rc-tbs-nec"
+#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs"
+#define RC_MAP_TEVII_NEC "rc-tevii-nec"
+#define RC_MAP_TT_1500 "rc-tt-1500"
+#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
+#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr"
+#define RC_MAP_WINFAST "rc-winfast"
+#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
+/*
+ * Please, do not just append newer Remote Controller names at the end.
+ * The names should be ordered in alphabetical order
+ */
diff --git a/include/media/sh_vou.h b/include/media/sh_vou.h
new file mode 100644
index 000000000000..a3ef30242b00
--- /dev/null
+++ b/include/media/sh_vou.h
@@ -0,0 +1,34 @@
+/*
+ * SuperH Video Output Unit (VOU) driver header
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SH_VOU_H
+#define SH_VOU_H
+
+#include <linux/i2c.h>
+
+/* Bus flags */
+#define SH_VOU_PCLK_FALLING (1 << 0)
+#define SH_VOU_HSYNC_LOW (1 << 1)
+#define SH_VOU_VSYNC_LOW (1 << 2)
+
+enum sh_vou_bus_fmt {
+ SH_VOU_BUS_8BIT,
+ SH_VOU_BUS_16BIT,
+ SH_VOU_BUS_BT656,
+};
+
+struct sh_vou_pdata {
+ enum sh_vou_bus_fmt bus_fmt;
+ int i2c_adap;
+ struct i2c_board_info *board_info;
+ unsigned long flags;
+ char *module_name;
+};
+
+#endif
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 9d69f01b6fa2..c9a5bbfa6ab5 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -266,8 +266,8 @@ static inline unsigned long soc_camera_bus_param_compatible(
common_flags;
}
-static inline void soc_camera_limit_side(unsigned int *start,
- unsigned int *length, unsigned int start_min,
+static inline void soc_camera_limit_side(int *start, int *length,
+ unsigned int start_min,
unsigned int length_min, unsigned int length_max)
{
if (*length < length_min)
@@ -284,4 +284,12 @@ static inline void soc_camera_limit_side(unsigned int *start,
extern unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
unsigned long flags);
+/* This is only temporary here - until v4l2-subdev begins to link to video_device */
+#include <linux/i2c.h>
+static inline struct video_device *soc_camera_i2c_to_vdev(struct i2c_client *client)
+{
+ struct soc_camera_device *icd = client->dev.platform_data;
+ return icd->vdev;
+}
+
#endif
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
index 56abf21dd786..21b4428c12ab 100644
--- a/include/media/v4l2-chip-ident.h
+++ b/include/media/v4l2-chip-ident.h
@@ -25,6 +25,10 @@
#define V4L2_CHIP_IDENT_H_
/* VIDIOC_DBG_G_CHIP_IDENT: identifies the actual chip installed on the board */
+
+/* KEEP THIS LIST ORDERED BY ID!
+ Otherwise it will be hard to see which ranges are already in use when
+ adding support to a new chip family. */
enum {
/* general idents: reserved range 0-49 */
V4L2_IDENT_NONE = 0, /* No chip matched */
@@ -77,17 +81,14 @@ enum {
V4L2_IDENT_CX23417 = 417,
V4L2_IDENT_CX23418 = 418,
- /* module au0828 */
- V4L2_IDENT_AU0828 = 828,
-
- /* module indycam: just ident 2000 */
- V4L2_IDENT_INDYCAM = 2000,
-
/* module bt819: reserved range 810-819 */
V4L2_IDENT_BT815A = 815,
V4L2_IDENT_BT817A = 817,
V4L2_IDENT_BT819A = 819,
+ /* module au0828 */
+ V4L2_IDENT_AU0828 = 828,
+
/* module bt856: just ident 856 */
V4L2_IDENT_BT856 = 856,
@@ -99,6 +100,9 @@ enum {
V4L2_IDENT_KS0127 = 1127,
V4L2_IDENT_KS0127B = 1128,
+ /* module indycam: just ident 2000 */
+ V4L2_IDENT_INDYCAM = 2000,
+
/* module vp27smpx: just ident 2700 */
V4L2_IDENT_VP27SMPX = 2700,
@@ -162,20 +166,21 @@ enum {
/* module saa7706h: just ident 7706 */
V4L2_IDENT_SAA7706H = 7706,
+ /* module mt9v011, just ident 8243 */
+ V4L2_IDENT_MT9V011 = 8243,
+
/* module wm8739: just ident 8739 */
V4L2_IDENT_WM8739 = 8739,
/* module wm8775: just ident 8775 */
V4L2_IDENT_WM8775 = 8775,
- /* module tda9840: just ident 9840 */
- V4L2_IDENT_TDA9840 = 9840,
-
/* module cafe_ccic, just ident 8801 */
V4L2_IDENT_CAFE = 8801,
- /* module mt9v011, just ident 8243 */
- V4L2_IDENT_MT9V011 = 8243,
+ /* AKM AK8813/AK8814 */
+ V4L2_IDENT_AK8813 = 8813,
+ V4L2_IDENT_AK8814 = 8814,
/* module cx23885 and cx25840 */
V4L2_IDENT_CX23885 = 8850,
@@ -186,6 +191,9 @@ enum {
V4L2_IDENT_CX23888_AV = 8881, /* Integrated A/V decoder */
V4L2_IDENT_CX23888_IR = 8882, /* Integrated infrared controller */
+ /* module tda9840: just ident 9840 */
+ V4L2_IDENT_TDA9840 = 9840,
+
/* module tw9910: just ident 9910 */
V4L2_IDENT_TW9910 = 9910,
@@ -198,72 +206,70 @@ enum {
V4L2_IDENT_CX23101 = 23101,
V4L2_IDENT_CX23102 = 23102,
- /* module msp3400: reserved range 34000-34999 and 44000-44999 */
+ /* module msp3400: reserved range 34000-34999 for msp34xx */
V4L2_IDENT_MSPX4XX = 34000, /* generic MSPX4XX identifier, only
use internally (tveeprom.c). */
V4L2_IDENT_MSP3400B = 34002,
- V4L2_IDENT_MSP3410B = 34102,
-
V4L2_IDENT_MSP3400C = 34003,
- V4L2_IDENT_MSP3410C = 34103,
-
V4L2_IDENT_MSP3400D = 34004,
- V4L2_IDENT_MSP3410D = 34104,
+ V4L2_IDENT_MSP3400G = 34007,
+ V4L2_IDENT_MSP3401G = 34017,
+ V4L2_IDENT_MSP3402G = 34027,
V4L2_IDENT_MSP3405D = 34054,
- V4L2_IDENT_MSP3415D = 34154,
+ V4L2_IDENT_MSP3405G = 34057,
V4L2_IDENT_MSP3407D = 34074,
- V4L2_IDENT_MSP3417D = 34174,
+ V4L2_IDENT_MSP3407G = 34077,
- V4L2_IDENT_MSP3400G = 34007,
+ V4L2_IDENT_MSP3410B = 34102,
+ V4L2_IDENT_MSP3410C = 34103,
+ V4L2_IDENT_MSP3410D = 34104,
V4L2_IDENT_MSP3410G = 34107,
- V4L2_IDENT_MSP3420G = 34207,
- V4L2_IDENT_MSP3430G = 34307,
- V4L2_IDENT_MSP3440G = 34407,
- V4L2_IDENT_MSP3450G = 34507,
- V4L2_IDENT_MSP3460G = 34607,
-
- V4L2_IDENT_MSP3401G = 34017,
V4L2_IDENT_MSP3411G = 34117,
- V4L2_IDENT_MSP3421G = 34217,
- V4L2_IDENT_MSP3431G = 34317,
- V4L2_IDENT_MSP3441G = 34417,
- V4L2_IDENT_MSP3451G = 34517,
- V4L2_IDENT_MSP3461G = 34617,
-
- V4L2_IDENT_MSP3402G = 34027,
V4L2_IDENT_MSP3412G = 34127,
- V4L2_IDENT_MSP3422G = 34227,
- V4L2_IDENT_MSP3442G = 34427,
- V4L2_IDENT_MSP3452G = 34527,
-
- V4L2_IDENT_MSP3405G = 34057,
+ V4L2_IDENT_MSP3415D = 34154,
V4L2_IDENT_MSP3415G = 34157,
- V4L2_IDENT_MSP3425G = 34257,
- V4L2_IDENT_MSP3435G = 34357,
- V4L2_IDENT_MSP3445G = 34457,
- V4L2_IDENT_MSP3455G = 34557,
- V4L2_IDENT_MSP3465G = 34657,
-
- V4L2_IDENT_MSP3407G = 34077,
+ V4L2_IDENT_MSP3417D = 34174,
V4L2_IDENT_MSP3417G = 34177,
+
+ V4L2_IDENT_MSP3420G = 34207,
+ V4L2_IDENT_MSP3421G = 34217,
+ V4L2_IDENT_MSP3422G = 34227,
+ V4L2_IDENT_MSP3425G = 34257,
V4L2_IDENT_MSP3427G = 34277,
+
+ V4L2_IDENT_MSP3430G = 34307,
+ V4L2_IDENT_MSP3431G = 34317,
+ V4L2_IDENT_MSP3435G = 34357,
V4L2_IDENT_MSP3437G = 34377,
+
+ V4L2_IDENT_MSP3440G = 34407,
+ V4L2_IDENT_MSP3441G = 34417,
+ V4L2_IDENT_MSP3442G = 34427,
+ V4L2_IDENT_MSP3445G = 34457,
V4L2_IDENT_MSP3447G = 34477,
+
+ V4L2_IDENT_MSP3450G = 34507,
+ V4L2_IDENT_MSP3451G = 34517,
+ V4L2_IDENT_MSP3452G = 34527,
+ V4L2_IDENT_MSP3455G = 34557,
V4L2_IDENT_MSP3457G = 34577,
+
+ V4L2_IDENT_MSP3460G = 34607,
+ V4L2_IDENT_MSP3461G = 34617,
+ V4L2_IDENT_MSP3465G = 34657,
V4L2_IDENT_MSP3467G = 34677,
- /* module msp3400: reserved range 34000-34999 and 44000-44999 */
+ /* module msp3400: reserved range 44000-44999 for msp44xx */
V4L2_IDENT_MSP4400G = 44007,
- V4L2_IDENT_MSP4410G = 44107,
- V4L2_IDENT_MSP4420G = 44207,
- V4L2_IDENT_MSP4440G = 44407,
- V4L2_IDENT_MSP4450G = 44507,
-
V4L2_IDENT_MSP4408G = 44087,
+ V4L2_IDENT_MSP4410G = 44107,
V4L2_IDENT_MSP4418G = 44187,
+ V4L2_IDENT_MSP4420G = 44207,
V4L2_IDENT_MSP4428G = 44287,
+ V4L2_IDENT_MSP4440G = 44407,
V4L2_IDENT_MSP4448G = 44487,
+ V4L2_IDENT_MSP4450G = 44507,
V4L2_IDENT_MSP4458G = 44587,
/* Micron CMOS sensor chips: 45000-45099 */
@@ -282,20 +288,27 @@ enum {
/* HV7131R CMOS sensor: just ident 46000 */
V4L2_IDENT_HV7131R = 46000,
+ /* Sharp RJ54N1CB0C, 0xCB0C = 51980 */
+ V4L2_IDENT_RJ54N1CB0C = 51980,
+
+ /* module m52790: just ident 52790 */
+ V4L2_IDENT_M52790 = 52790,
+
/* module cs53132a: just ident 53132 */
V4L2_IDENT_CS53l32A = 53132,
+ /* modules upd61151 MPEG2 encoder: just ident 54000 */
+ V4L2_IDENT_UPD61161 = 54000,
+ /* modules upd61152 MPEG2 encoder with AC3: just ident 54001 */
+ V4L2_IDENT_UPD61162 = 54001,
+
/* module upd64031a: just ident 64031 */
V4L2_IDENT_UPD64031A = 64031,
/* module upd64083: just ident 64083 */
V4L2_IDENT_UPD64083 = 64083,
- /* module m52790: just ident 52790 */
- V4L2_IDENT_M52790 = 52790,
-
- /* Sharp RJ54N1CB0C, 0xCB0C = 51980 */
- V4L2_IDENT_RJ54N1CB0C = 51980,
+ /* Don't just add new IDs at the end: KEEP THIS LIST ORDERED BY ID! */
};
#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 1c7b259f341c..3b2efdc6d065 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -184,6 +184,25 @@ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type);
/* ------------------------------------------------------------------------- */
+/* SPI Helper functions */
+#if defined(CONFIG_SPI)
+
+#include <linux/spi/spi.h>
+
+struct spi_device;
+
+/* Load an spi module and return an initialized v4l2_subdev struct.
+ The client_type argument is the name of the chip that's on the adapter. */
+struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+ struct spi_master *master, struct spi_board_info *info);
+
+/* Initialize an v4l2_subdev with data from an spi_device struct */
+void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
+ const struct v4l2_subdev_ops *ops);
+#endif
+
+/* ------------------------------------------------------------------------- */
+
/* Note: these remaining ioctls/structs should be removed as well, but they are
still used in tuner-simple.c (TUNER_SET_CONFIG), cx18/ivtv (RESET) and
v4l2-int-device.h (v4l2_routing). To remove these ioctls some more cleanup
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 2dee93892ea2..bebe44b03e0f 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -32,6 +32,7 @@ struct v4l2_device;
Drivers can clear this flag if they want to block all future
device access. It is cleared by video_unregister_device. */
#define V4L2_FL_REGISTERED (0)
+#define V4L2_FL_USES_V4L2_FH (1)
struct v4l2_file_operations {
struct module *owner;
@@ -77,6 +78,10 @@ struct video_device
/* attribute to differentiate multiple indices on one physical device */
int index;
+ /* V4L2 file handles */
+ spinlock_t fh_lock; /* Lock for all v4l2_fhs */
+ struct list_head fh_list; /* List of struct v4l2_fh */
+
int debug; /* Activates debug level*/
/* Video standard vars */
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
new file mode 100644
index 000000000000..3b86177c8cd2
--- /dev/null
+++ b/include/media/v4l2-event.h
@@ -0,0 +1,67 @@
+/*
+ * v4l2-event.h
+ *
+ * V4L2 events.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef V4L2_EVENT_H
+#define V4L2_EVENT_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+
+struct v4l2_fh;
+struct video_device;
+
+struct v4l2_kevent {
+ struct list_head list;
+ struct v4l2_event event;
+};
+
+struct v4l2_subscribed_event {
+ struct list_head list;
+ u32 type;
+};
+
+struct v4l2_events {
+ wait_queue_head_t wait;
+ struct list_head subscribed; /* Subscribed events */
+ struct list_head free; /* Events ready for use */
+ struct list_head available; /* Dequeueable event */
+ unsigned int navailable;
+ unsigned int nallocated; /* Number of allocated events */
+ u32 sequence;
+};
+
+int v4l2_event_init(struct v4l2_fh *fh);
+int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n);
+void v4l2_event_free(struct v4l2_fh *fh);
+int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
+ int nonblocking);
+void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev);
+int v4l2_event_pending(struct v4l2_fh *fh);
+int v4l2_event_subscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
+#endif /* V4L2_EVENT_H */
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
new file mode 100644
index 000000000000..1d72dde320bf
--- /dev/null
+++ b/include/media/v4l2-fh.h
@@ -0,0 +1,65 @@
+/*
+ * v4l2-fh.h
+ *
+ * V4L2 file handle. Store per file handle data for the V4L2
+ * framework. Using file handles is optional for the drivers.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef V4L2_FH_H
+#define V4L2_FH_H
+
+#include <linux/list.h>
+
+struct video_device;
+struct v4l2_events;
+
+struct v4l2_fh {
+ struct list_head list;
+ struct video_device *vdev;
+ struct v4l2_events *events; /* events, pending and subscribed */
+};
+
+/*
+ * Initialise the file handle. Parts of the V4L2 framework using the
+ * file handles should be initialised in this function. Must be called
+ * from driver's v4l2_file_operations->open() handler if the driver
+ * uses v4l2_fh.
+ */
+int v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev);
+/*
+ * Add the fh to the list of file handles on a video_device. The file
+ * handle must be initialised first.
+ */
+void v4l2_fh_add(struct v4l2_fh *fh);
+/*
+ * Remove file handle from the list of file handles. Must be called in
+ * v4l2_file_operations->release() handler if the driver uses v4l2_fh.
+ */
+void v4l2_fh_del(struct v4l2_fh *fh);
+/*
+ * Release resources related to a file handle. Parts of the V4L2
+ * framework using the v4l2_fh must release their resources here, too.
+ * Must be called in v4l2_file_operations->release() handler if the
+ * driver uses v4l2_fh.
+ */
+void v4l2_fh_exit(struct v4l2_fh *fh);
+
+#endif /* V4L2_EVENT_H */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index e8ba0f2efbae..06daa6e8e051 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -21,6 +21,8 @@
#include <linux/videodev2.h>
#endif
+struct v4l2_fh;
+
struct v4l2_ioctl_ops {
/* ioctl callbacks */
@@ -254,6 +256,11 @@ struct v4l2_ioctl_ops {
int (*vidioc_g_dv_timings) (struct file *file, void *fh,
struct v4l2_dv_timings *timings);
+ int (*vidioc_subscribe_event) (struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+ int (*vidioc_unsubscribe_event)(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
/* For other private ioctls */
long (*vidioc_default) (struct file *file, void *fh,
int cmd, void *arg);
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
new file mode 100644
index 000000000000..8d149f1c58d0
--- /dev/null
+++ b/include/media/v4l2-mem2mem.h
@@ -0,0 +1,201 @@
+/*
+ * Memory-to-memory device framework for Video for Linux 2.
+ *
+ * Helper functions for devices that use memory buffers for both source
+ * and destination.
+ *
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <p.osciak@samsung.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#ifndef _MEDIA_V4L2_MEM2MEM_H
+#define _MEDIA_V4L2_MEM2MEM_H
+
+#include <media/videobuf-core.h>
+
+/**
+ * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
+ * @device_run: required. Begin the actual job (transaction) inside this
+ * callback.
+ * The job does NOT have to end before this callback returns
+ * (and it will be the usual case). When the job finishes,
+ * v4l2_m2m_job_finish() has to be called.
+ * @job_ready: optional. Should return 0 if the driver does not have a job
+ * fully prepared to run yet (i.e. it will not be able to finish a
+ * transaction without sleeping). If not provided, it will be
+ * assumed that one source and one destination buffer are all
+ * that is required for the driver to perform one full transaction.
+ * This method may not sleep.
+ * @job_abort: required. Informs the driver that it has to abort the currently
+ * running transaction as soon as possible (i.e. as soon as it can
+ * stop the device safely; e.g. in the next interrupt handler),
+ * even if the transaction would not have been finished by then.
+ * After the driver performs the necessary steps, it has to call
+ * v4l2_m2m_job_finish() (as if the transaction ended normally).
+ * This function does not have to (and will usually not) wait
+ * until the device enters a state when it can be stopped.
+ */
+struct v4l2_m2m_ops {
+ void (*device_run)(void *priv);
+ int (*job_ready)(void *priv);
+ void (*job_abort)(void *priv);
+};
+
+struct v4l2_m2m_dev;
+
+struct v4l2_m2m_queue_ctx {
+/* private: internal use only */
+ struct videobuf_queue q;
+
+ /* Queue for buffers ready to be processed as soon as this
+ * instance receives access to the device */
+ struct list_head rdy_queue;
+ u8 num_rdy;
+};
+
+struct v4l2_m2m_ctx {
+/* private: internal use only */
+ struct v4l2_m2m_dev *m2m_dev;
+
+ /* Capture (output to memory) queue context */
+ struct v4l2_m2m_queue_ctx cap_q_ctx;
+
+ /* Output (input from memory) queue context */
+ struct v4l2_m2m_queue_ctx out_q_ctx;
+
+ /* For device job queue */
+ struct list_head queue;
+ unsigned long job_flags;
+
+ /* Instance private data */
+ void *priv;
+};
+
+void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
+
+struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type);
+
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx);
+
+int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_requestbuffers *reqbufs);
+
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf);
+
+int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf);
+int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf);
+
+int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type);
+int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type);
+
+unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait);
+
+int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct vm_area_struct *vma);
+
+struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops);
+void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
+
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
+ void (*vq_init)(void *priv, struct videobuf_queue *,
+ enum v4l2_buf_type));
+void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
+
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
+ struct videobuf_buffer *vb);
+
+/**
+ * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
+ * use
+ */
+static inline
+unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return m2m_ctx->cap_q_ctx.num_rdy;
+}
+
+/**
+ * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
+ * ready for use
+ */
+static inline
+unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return m2m_ctx->out_q_ctx.num_rdy;
+}
+
+void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type);
+
+/**
+ * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
+ * buffers
+ */
+static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_next_buf(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+}
+
+/**
+ * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
+ * ready buffers
+ */
+static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_next_buf(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+}
+
+/**
+ * v4l2_m2m_get_src_vq() - return videobuf_queue for source buffers
+ */
+static inline
+struct videobuf_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+}
+
+/**
+ * v4l2_m2m_get_dst_vq() - return videobuf_queue for destination buffers
+ */
+static inline
+struct videobuf_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+}
+
+void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type);
+
+/**
+ * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
+ * buffers and return it
+ */
+static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_buf_remove(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+}
+
+/**
+ * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
+ * ready buffers and return it
+ */
+static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_buf_remove(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+}
+
+#endif /* _MEDIA_V4L2_MEM2MEM_H */
+
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 2bcdca0a57fc..6232983f02d6 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -184,28 +184,6 @@ struct v4l2_subdev_audio_ops {
};
/*
- decode_vbi_line: video decoders that support sliced VBI need to implement
- this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
- start of the VBI data that was generated by the decoder. The driver
- then parses the sliced VBI data and sets the other fields in the
- struct accordingly. The pointer p is updated to point to the start of
- the payload which can be copied verbatim into the data field of the
- v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
- type field is set to 0 on return.
-
- s_vbi_data: used to generate VBI signals on a video signal.
- v4l2_sliced_vbi_data is filled with the data packets that should be
- output. Note that if you set the line field to 0, then that VBI signal
- is disabled. If no valid VBI data was found, then the type field is
- set to 0 on return.
-
- g_vbi_data: used to obtain the sliced VBI packet from a readback register.
- Not all video decoders support this. If no data is available because
- the readback register contains invalid or erroneous data -EIO is
- returned. Note that you must fill in the 'id' member and the 'field'
- member (to determine whether CC data from the first or second field
- should be obtained).
-
s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
video input devices.
@@ -243,10 +221,6 @@ struct v4l2_subdev_audio_ops {
struct v4l2_subdev_video_ops {
int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
int (*s_crystal_freq)(struct v4l2_subdev *sd, u32 freq, u32 flags);
- int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line);
- int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data);
- int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data);
- int (*g_sliced_vbi_cap)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap);
int (*s_std_output)(struct v4l2_subdev *sd, v4l2_std_id std);
int (*querystd)(struct v4l2_subdev *sd, v4l2_std_id *std);
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
@@ -280,6 +254,45 @@ struct v4l2_subdev_video_ops {
struct v4l2_mbus_framefmt *fmt);
};
+/*
+ decode_vbi_line: video decoders that support sliced VBI need to implement
+ this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
+ start of the VBI data that was generated by the decoder. The driver
+ then parses the sliced VBI data and sets the other fields in the
+ struct accordingly. The pointer p is updated to point to the start of
+ the payload which can be copied verbatim into the data field of the
+ v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
+ type field is set to 0 on return.
+
+ s_vbi_data: used to generate VBI signals on a video signal.
+ v4l2_sliced_vbi_data is filled with the data packets that should be
+ output. Note that if you set the line field to 0, then that VBI signal
+ is disabled. If no valid VBI data was found, then the type field is
+ set to 0 on return.
+
+ g_vbi_data: used to obtain the sliced VBI packet from a readback register.
+ Not all video decoders support this. If no data is available because
+ the readback register contains invalid or erroneous data -EIO is
+ returned. Note that you must fill in the 'id' member and the 'field'
+ member (to determine whether CC data from the first or second field
+ should be obtained).
+
+ s_raw_fmt: setup the video encoder/decoder for raw VBI.
+
+ g_sliced_fmt: retrieve the current sliced VBI settings.
+
+ s_sliced_fmt: setup the sliced VBI settings.
+ */
+struct v4l2_subdev_vbi_ops {
+ int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line);
+ int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data);
+ int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data);
+ int (*g_sliced_vbi_cap)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap);
+ int (*s_raw_fmt)(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt);
+ int (*g_sliced_fmt)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
+ int (*s_sliced_fmt)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
+};
+
/**
* struct v4l2_subdev_sensor_ops - v4l2-subdev sensor operations
* @g_skip_top_lines: number of lines at the top of the image to be skipped.
@@ -379,6 +392,7 @@ struct v4l2_subdev_ops {
const struct v4l2_subdev_tuner_ops *tuner;
const struct v4l2_subdev_audio_ops *audio;
const struct v4l2_subdev_video_ops *video;
+ const struct v4l2_subdev_vbi_ops *vbi;
const struct v4l2_subdev_ir_ops *ir;
const struct v4l2_subdev_sensor_ops *sensor;
};
@@ -387,6 +401,8 @@ struct v4l2_subdev_ops {
/* Set this flag if this subdev is a i2c device. */
#define V4L2_SUBDEV_FL_IS_I2C (1U << 0)
+/* Set this flag if this subdev is a spi device. */
+#define V4L2_SUBDEV_FL_IS_SPI (1U << 1)
/* Each instance of a subdev driver should create this struct, either
stand-alone or embedded in a larger struct.
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index 316fdccdcaa0..f91a736c133d 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -127,30 +127,16 @@ struct videobuf_queue_ops {
struct videobuf_qtype_ops {
u32 magic;
- void *(*alloc) (size_t size);
- void *(*vmalloc) (struct videobuf_buffer *buf);
- int (*iolock) (struct videobuf_queue* q,
+ struct videobuf_buffer *(*alloc)(size_t size);
+ void *(*vaddr) (struct videobuf_buffer *buf);
+ int (*iolock) (struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf);
- int (*mmap) (struct videobuf_queue *q,
- unsigned int *count,
- unsigned int *size,
- enum v4l2_memory memory);
- int (*sync) (struct videobuf_queue* q,
+ int (*sync) (struct videobuf_queue *q,
struct videobuf_buffer *buf);
- int (*video_copy_to_user)(struct videobuf_queue *q,
- char __user *data,
- size_t count,
- int nonblocking);
- int (*copy_stream) (struct videobuf_queue *q,
- char __user *data,
- size_t count,
- size_t pos,
- int vbihack,
- int nonblocking);
- int (*mmap_free) (struct videobuf_queue *q);
int (*mmap_mapper) (struct videobuf_queue *q,
- struct vm_area_struct *vma);
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma);
};
struct videobuf_queue {
@@ -171,7 +157,6 @@ struct videobuf_queue {
unsigned int streaming:1;
unsigned int reading:1;
- unsigned int is_mmapped:1;
/* capture via mmap() + ioctl(QBUF/DQBUF) */
struct list_head stream;
@@ -185,14 +170,14 @@ struct videobuf_queue {
};
int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr);
-int videobuf_iolock(struct videobuf_queue* q, struct videobuf_buffer *vb,
+int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf);
-void *videobuf_alloc(struct videobuf_queue* q);
+struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q);
/* Used on videobuf-dvb */
-void *videobuf_queue_to_vmalloc (struct videobuf_queue* q,
- struct videobuf_buffer *buf);
+void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
+ struct videobuf_buffer *buf);
void videobuf_queue_core_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index 53e72f787175..a195f3b9c00a 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -17,6 +17,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2
*/
+#ifndef _VIDEOBUF_DMA_SG_H
+#define _VIDEOBUF_DMA_SG_H
#include <media/videobuf-core.h>
@@ -27,14 +29,14 @@
* block (NULL on errors). Memory for the scatterlist is allocated
* using kmalloc. The caller must free the memory.
*/
-struct scatterlist* videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages);
+struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages);
/*
* Return a scatterlist for a an array of userpages (NULL on errors).
* Memory for the scatterlist is allocated using kmalloc. The caller
* must free the memory.
*/
-struct scatterlist* videobuf_pages_to_sg(struct page **pages, int nr_pages,
+struct scatterlist *videobuf_pages_to_sg(struct page **pages, int nr_pages,
int offset);
/* --------------------------------------------------------------------- */
@@ -78,8 +80,7 @@ struct videobuf_dmabuf {
int direction;
};
-struct videobuf_dma_sg_memory
-{
+struct videobuf_dma_sg_memory {
u32 magic;
/* for mmap'ed buffers */
@@ -95,14 +96,13 @@ int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
dma_addr_t addr, int nr_pages);
int videobuf_dma_free(struct videobuf_dmabuf *dma);
-int videobuf_dma_map(struct videobuf_queue* q,struct videobuf_dmabuf *dma);
-int videobuf_dma_sync(struct videobuf_queue* q,struct videobuf_dmabuf *dma);
-int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma);
-struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf);
+int videobuf_dma_map(struct videobuf_queue *q, struct videobuf_dmabuf *dma);
+int videobuf_dma_unmap(struct videobuf_queue *q, struct videobuf_dmabuf *dma);
+struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
void *videobuf_sg_alloc(size_t size);
-void videobuf_queue_sg_init(struct videobuf_queue* q,
+void videobuf_queue_sg_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
@@ -111,9 +111,11 @@ void videobuf_queue_sg_init(struct videobuf_queue* q,
unsigned int msize,
void *priv);
- /*FIXME: these variants are used only on *-alsa code, where videobuf is
- * used without queue
- */
+/*FIXME: these variants are used only on *-alsa code, where videobuf is
+ * used without queue
+ */
int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma);
int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma);
+#endif /* _VIDEOBUF_DMA_SG_H */
+
diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h
index 4b419a257a7d..851eb1a2ff2a 100644
--- a/include/media/videobuf-vmalloc.h
+++ b/include/media/videobuf-vmalloc.h
@@ -19,17 +19,17 @@
/* --------------------------------------------------------------------- */
-struct videobuf_vmalloc_memory
-{
+struct videobuf_vmalloc_memory {
u32 magic;
void *vmalloc;
- /* remap_vmalloc_range seems to need to run after mmap() on some cases */
+ /* remap_vmalloc_range seems to need to run
+ * after mmap() on some cases */
struct vm_area_struct *vma;
};
-void videobuf_queue_vmalloc_init(struct videobuf_queue* q,
+void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
@@ -38,8 +38,8 @@ void videobuf_queue_vmalloc_init(struct videobuf_queue* q,
unsigned int msize,
void *priv);
-void *videobuf_to_vmalloc (struct videobuf_buffer *buf);
+void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
-void videobuf_vmalloc_free (struct videobuf_buffer *buf);
+void videobuf_vmalloc_free(struct videobuf_buffer *buf);
#endif
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index a7fb54808a23..156c26bb8bd7 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -86,6 +86,10 @@ do { \
/**
* enum p9_msg_t - 9P message types
+ * @P9_TSTATFS: file system status request
+ * @P9_RSTATFS: file system status response
+ * @P9_TRENAME: rename request
+ * @P9_RRENAME: rename response
* @P9_TVERSION: version handshake request
* @P9_RVERSION: version handshake response
* @P9_TAUTH: request to establish authentication channel
@@ -125,6 +129,10 @@ do { \
*/
enum p9_msg_t {
+ P9_TSTATFS = 8,
+ P9_RSTATFS,
+ P9_TRENAME = 20,
+ P9_RRENAME,
P9_TVERSION = 100,
P9_RVERSION,
P9_TAUTH = 102,
@@ -350,6 +358,31 @@ struct p9_wstat {
};
/* Structures for Protocol Operations */
+struct p9_tstatfs {
+ u32 fid;
+};
+
+struct p9_rstatfs {
+ u32 type;
+ u32 bsize;
+ u64 blocks;
+ u64 bfree;
+ u64 bavail;
+ u64 files;
+ u64 ffree;
+ u64 fsid;
+ u32 namelen;
+};
+
+struct p9_trename {
+ u32 fid;
+ u32 newdirfid;
+ struct p9_str name;
+};
+
+struct p9_rrename {
+};
+
struct p9_tversion {
u32 msize;
struct p9_str version;
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 4f3760afc20f..7dd3ed85c782 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -195,6 +195,8 @@ struct p9_fid {
struct list_head dlist; /* list of all fids attached to a dentry */
};
+int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
+int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name);
int p9_client_version(struct p9_client *);
struct p9_client *p9_client_create(const char *dev_name, char *options);
void p9_client_destroy(struct p9_client *clnt);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 1614d78c60ed..20725e213aee 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -30,7 +30,7 @@ struct unix_skb_parms {
#endif
};
-#define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb))
+#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
#define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
#define UNIXSID(skb) (&UNIXCB((skb)).secid)
@@ -45,21 +45,23 @@ struct unix_skb_parms {
struct unix_sock {
/* WARNING: sk has to be the first member */
struct sock sk;
- struct unix_address *addr;
- struct dentry *dentry;
- struct vfsmount *mnt;
+ struct unix_address *addr;
+ struct dentry *dentry;
+ struct vfsmount *mnt;
struct mutex readlock;
- struct sock *peer;
- struct sock *other;
+ struct sock *peer;
+ struct sock *other;
struct list_head link;
- atomic_long_t inflight;
- spinlock_t lock;
+ atomic_long_t inflight;
+ spinlock_t lock;
unsigned int gc_candidate : 1;
unsigned int gc_maybe_cycle : 1;
- wait_queue_head_t peer_wait;
+ struct socket_wq peer_wq;
};
#define unix_sk(__sk) ((struct unix_sock *)__sk)
+#define peer_wait peer_wq.wait
+
#ifdef CONFIG_SYSCTL
extern int unix_sysctl_register(struct net *net);
extern void unix_sysctl_unregister(struct net *net);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ce3c99e5fa25..e42f6ed5421c 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -107,6 +107,8 @@ struct hci_dev {
unsigned long acl_last_tx;
unsigned long sco_last_tx;
+ struct workqueue_struct *workqueue;
+
struct tasklet_struct cmd_task;
struct tasklet_struct rx_task;
struct tasklet_struct tx_task;
@@ -636,8 +638,8 @@ int hci_register_notifier(struct notifier_block *nb);
int hci_unregister_notifier(struct notifier_block *nb);
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
-int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
-int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
+void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 17a689f27a6a..7c695bfd853c 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -30,11 +30,12 @@
#define L2CAP_DEFAULT_MIN_MTU 48
#define L2CAP_DEFAULT_FLUSH_TO 0xffff
#define L2CAP_DEFAULT_TX_WINDOW 63
-#define L2CAP_DEFAULT_NUM_TO_ACK (L2CAP_DEFAULT_TX_WINDOW/5)
#define L2CAP_DEFAULT_MAX_TX 3
#define L2CAP_DEFAULT_RETRANS_TO 1000 /* 1 second */
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
#define L2CAP_DEFAULT_MAX_PDU_SIZE 672
+#define L2CAP_DEFAULT_ACK_TO 200
+#define L2CAP_LOCAL_BUSY_TRIES 12
#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
@@ -55,6 +56,8 @@ struct l2cap_options {
__u16 flush_to;
__u8 mode;
__u8 fcs;
+ __u8 max_tx;
+ __u16 txwin_size;
};
#define L2CAP_CONNINFO 0x02
@@ -292,6 +295,7 @@ struct l2cap_conn {
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
#define TX_QUEUE(sk) (&l2cap_pi(sk)->tx_queue)
#define SREJ_QUEUE(sk) (&l2cap_pi(sk)->srej_queue)
+#define BUSY_QUEUE(sk) (&l2cap_pi(sk)->busy_queue)
#define SREJ_LIST(sk) (&l2cap_pi(sk)->srej_l.list)
struct srej_list {
@@ -320,7 +324,7 @@ struct l2cap_pinfo {
__u8 conf_req[64];
__u8 conf_len;
__u8 conf_state;
- __u8 conn_state;
+ __u16 conn_state;
__u8 next_tx_seq;
__u8 expected_ack_seq;
@@ -328,27 +332,35 @@ struct l2cap_pinfo {
__u8 buffer_seq;
__u8 buffer_seq_srej;
__u8 srej_save_reqseq;
+ __u8 frames_sent;
__u8 unacked_frames;
__u8 retry_count;
- __u8 num_to_ack;
+ __u8 num_acked;
__u16 sdu_len;
__u16 partial_sdu_len;
struct sk_buff *sdu;
__u8 ident;
+ __u8 tx_win;
+ __u8 max_tx;
__u8 remote_tx_win;
__u8 remote_max_tx;
__u16 retrans_timeout;
__u16 monitor_timeout;
- __u16 max_pdu_size;
+ __u16 remote_mps;
+ __u16 mps;
__le16 sport;
+ spinlock_t send_lock;
struct timer_list retrans_timer;
struct timer_list monitor_timer;
+ struct timer_list ack_timer;
struct sk_buff_head tx_queue;
struct sk_buff_head srej_queue;
+ struct sk_buff_head busy_queue;
+ struct work_struct busy_work;
struct srej_list srej_l;
struct l2cap_conn *conn;
struct sock *next_c;
@@ -367,19 +379,24 @@ struct l2cap_pinfo {
#define L2CAP_CONF_MAX_CONF_REQ 2
#define L2CAP_CONF_MAX_CONF_RSP 2
-#define L2CAP_CONN_SAR_SDU 0x01
-#define L2CAP_CONN_SREJ_SENT 0x02
-#define L2CAP_CONN_WAIT_F 0x04
-#define L2CAP_CONN_SREJ_ACT 0x08
-#define L2CAP_CONN_SEND_PBIT 0x10
-#define L2CAP_CONN_REMOTE_BUSY 0x20
-#define L2CAP_CONN_LOCAL_BUSY 0x40
-#define L2CAP_CONN_REJ_ACT 0x80
+#define L2CAP_CONN_SAR_SDU 0x0001
+#define L2CAP_CONN_SREJ_SENT 0x0002
+#define L2CAP_CONN_WAIT_F 0x0004
+#define L2CAP_CONN_SREJ_ACT 0x0008
+#define L2CAP_CONN_SEND_PBIT 0x0010
+#define L2CAP_CONN_REMOTE_BUSY 0x0020
+#define L2CAP_CONN_LOCAL_BUSY 0x0040
+#define L2CAP_CONN_REJ_ACT 0x0080
+#define L2CAP_CONN_SEND_FBIT 0x0100
+#define L2CAP_CONN_RNR_SENT 0x0200
+#define L2CAP_CONN_SAR_RETRY 0x0400
#define __mod_retrans_timer() mod_timer(&l2cap_pi(sk)->retrans_timer, \
jiffies + msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO));
#define __mod_monitor_timer() mod_timer(&l2cap_pi(sk)->monitor_timer, \
jiffies + msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO));
+#define __mod_ack_timer() mod_timer(&l2cap_pi(sk)->ack_timer, \
+ jiffies + msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
static inline int l2cap_tx_window_full(struct sock *sk)
{
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
new file mode 100644
index 000000000000..318ab9478a44
--- /dev/null
+++ b/include/net/caif/caif_dev.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_DEV_H_
+#define CAIF_DEV_H_
+
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfcnfg.h>
+#include <linux/caif/caif_socket.h>
+#include <linux/if.h>
+
+/**
+ * struct caif_param - CAIF parameters.
+ * @size: Length of data
+ * @data: Binary Data Blob
+ */
+struct caif_param {
+ u16 size;
+ u8 data[256];
+};
+
+/**
+ * struct caif_connect_request - Request data for CAIF channel setup.
+ * @protocol: Type of CAIF protocol to use (at, datagram etc)
+ * @sockaddr: Socket address to connect.
+ * @priority: Priority of the connection.
+ * @link_selector: Link selector (high bandwidth or low latency)
+ * @link_name: Name of the CAIF Link Layer to use.
+ * @param: Connect Request parameters (CAIF_SO_REQ_PARAM).
+ *
+ * This struct is used when connecting a CAIF channel.
+ * It contains all CAIF channel configuration options.
+ */
+struct caif_connect_request {
+ enum caif_protocol_type protocol;
+ struct sockaddr_caif sockaddr;
+ enum caif_channel_priority priority;
+ enum caif_link_selector link_selector;
+ char link_name[16];
+ struct caif_param param;
+};
+
+/**
+ * caif_connect_client - Connect a client to CAIF Core Stack.
+ * @config: Channel setup parameters, specifying what address
+ * to connect on the Modem.
+ * @client_layer: User implementation of client layer. This layer
+ * MUST have receive and control callback functions
+ * implemented.
+ *
+ * This function connects a CAIF channel. The Client must implement
+ * the struct cflayer. This layer represents the Client layer and holds
+ * receive functions and control callback functions. Control callback
+ * function will receive information about connect/disconnect responses,
+ * flow control etc (see enum caif_control).
+ * E.g. CAIF Socket will call this function for each socket it connects
+ * and have one client_layer instance for each socket.
+ */
+int caif_connect_client(struct caif_connect_request *config,
+ struct cflayer *client_layer);
+
+/**
+ * caif_disconnect_client - Disconnects a client from the CAIF stack.
+ *
+ * @client_layer: Client layer to be removed.
+ */
+int caif_disconnect_client(struct cflayer *client_layer);
+
+/**
+ * caif_release_client - Release adaptation layer reference to client.
+ *
+ * @client_layer: Client layer.
+ *
+ * Releases a client/adaptation layer use of the caif stack.
+ * This function must be used after caif_disconnect_client to
+ * decrease the reference count of the service layer.
+ */
+void caif_release_client(struct cflayer *client_layer);
+
+/**
+ * connect_req_to_link_param - Translate configuration parameters
+ * from socket format to internal format.
+ * @cnfg: Pointer to configuration handler
+ * @con_req: Configuration parameters supplied in function
+ * caif_connect_client
+ * @channel_setup_param: Parameters supplied to the CAIF Core stack for
+ * setting up channels.
+ *
+ */
+int connect_req_to_link_param(struct cfcnfg *cnfg,
+ struct caif_connect_request *con_req,
+ struct cfctrl_link_param *channel_setup_param);
+
+/**
+ * get_caif_conf() - Get the configuration handler.
+ */
+struct cfcnfg *get_caif_conf(void);
+
+
+#endif /* CAIF_DEV_H_ */
diff --git a/include/net/caif/caif_device.h b/include/net/caif/caif_device.h
new file mode 100644
index 000000000000..d02f044adb8a
--- /dev/null
+++ b/include/net/caif/caif_device.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_DEVICE_H_
+#define CAIF_DEVICE_H_
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/caif/caif_socket.h>
+#include <net/caif/caif_device.h>
+
+/**
+ * struct caif_dev_common - data shared between CAIF drivers and stack.
+ * @flowctrl: Flow Control callback function. This function is
+ * supplied by CAIF Core Stack and is used by CAIF
+ * Link Layer to send flow-stop to CAIF Core.
+ * The flow information will be distributed to all
+ * clients of CAIF.
+ *
+ * @link_select: Profile of device, either high-bandwidth or
+ * low-latency. This member is set by CAIF Link
+ * Layer Device in order to indicate if this device
+ * is a high bandwidth or low latency device.
+ *
+ * @use_frag: CAIF Frames may be fragmented.
+ * Is set by CAIF Link Layer in order to indicate if the
+ * interface receives fragmented frames that must be
+ * assembled by CAIF Core Layer.
+ *
+ * @use_fcs: Indicate if Frame CheckSum (fcs) is used.
+ * Is set if the physical interface is
+ * using Frame Checksum on the CAIF Frames.
+ *
+ * @use_stx: Indicate STart of frame eXtension (stx) in use.
+ * Is set if the CAIF Link Layer expects
+ * CAIF Frames to start with the STX byte.
+ *
+ * This structure is shared between the CAIF drivers and the CAIF stack.
+ * It is used by the device to register its behavior.
+ * CAIF Core layer must set the member flowctrl in order to supply
+ * CAIF Link Layer with the flow control function.
+ *
+ */
+ struct caif_dev_common {
+ void (*flowctrl)(struct net_device *net, int on);
+ enum caif_link_selector link_select;
+ int use_frag;
+ int use_fcs;
+ int use_stx;
+};
+
+#endif /* CAIF_DEVICE_H_ */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
new file mode 100644
index 000000000000..25c472f0e5b8
--- /dev/null
+++ b/include/net/caif/caif_layer.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_LAYER_H_
+#define CAIF_LAYER_H_
+
+#include <linux/list.h>
+
+struct cflayer;
+struct cfpkt;
+struct cfpktq;
+struct caif_payload_info;
+struct caif_packet_funcs;
+
+#define CAIF_MAX_FRAMESIZE 4096
+#define CAIF_MAX_PAYLOAD_SIZE (4096 - 64)
+#define CAIF_NEEDED_HEADROOM (10)
+#define CAIF_NEEDED_TAILROOM (2)
+
+#define CAIF_LAYER_NAME_SZ 16
+#define CAIF_SUCCESS 1
+#define CAIF_FAILURE 0
+
+/**
+ * caif_assert() - Assert function for CAIF.
+ * @assert: expression to evaluate.
+ *
+ * This function will print a error message and a do WARN_ON if the
+ * assertion failes. Normally this will do a stack up at the current location.
+ */
+#define caif_assert(assert) \
+do { \
+ if (!(assert)) { \
+ pr_err("caif:Assert detected:'%s'\n", #assert); \
+ WARN_ON(!(assert)); \
+ } \
+} while (0)
+
+
+/**
+ * enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd().
+ *
+ * @CAIF_CTRLCMD_FLOW_OFF_IND: Flow Control is OFF, transmit function
+ * should stop sending data
+ *
+ * @CAIF_CTRLCMD_FLOW_ON_IND: Flow Control is ON, transmit function
+ * can start sending data
+ *
+ * @CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: Remote end modem has decided to close
+ * down channel
+ *
+ * @CAIF_CTRLCMD_INIT_RSP: Called initially when the layer below
+ * has finished initialization
+ *
+ * @CAIF_CTRLCMD_DEINIT_RSP: Called when de-initialization is
+ * complete
+ *
+ * @CAIF_CTRLCMD_INIT_FAIL_RSP: Called if initialization fails
+ *
+ * @_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: CAIF Link layer temporarily cannot
+ * send more packets.
+ * @_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND: Called if CAIF Link layer is able
+ * to send packets again.
+ * @_CAIF_CTRLCMD_PHYIF_DOWN_IND: Called if CAIF Link layer is going
+ * down.
+ *
+ * These commands are sent upwards in the CAIF stack to the CAIF Client.
+ * They are used for signaling originating from the modem or CAIF Link Layer.
+ * These are either responses (*_RSP) or events (*_IND).
+ */
+enum caif_ctrlcmd {
+ CAIF_CTRLCMD_FLOW_OFF_IND,
+ CAIF_CTRLCMD_FLOW_ON_IND,
+ CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
+ CAIF_CTRLCMD_INIT_RSP,
+ CAIF_CTRLCMD_DEINIT_RSP,
+ CAIF_CTRLCMD_INIT_FAIL_RSP,
+ _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+ _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
+ _CAIF_CTRLCMD_PHYIF_DOWN_IND,
+};
+
+/**
+ * enum caif_modemcmd - Modem Control Signaling, sent from CAIF Client
+ * to the CAIF Link Layer or modem.
+ *
+ * @CAIF_MODEMCMD_FLOW_ON_REQ: Flow Control is ON, transmit function
+ * can start sending data.
+ *
+ * @CAIF_MODEMCMD_FLOW_OFF_REQ: Flow Control is OFF, transmit function
+ * should stop sending data.
+ *
+ * @_CAIF_MODEMCMD_PHYIF_USEFULL: Notify physical layer that it is in use
+ *
+ * @_CAIF_MODEMCMD_PHYIF_USELESS: Notify physical layer that it is
+ * no longer in use.
+ *
+ * These are requests sent 'downwards' in the stack.
+ * Flow ON, OFF can be indicated to the modem.
+ */
+enum caif_modemcmd {
+ CAIF_MODEMCMD_FLOW_ON_REQ = 0,
+ CAIF_MODEMCMD_FLOW_OFF_REQ = 1,
+ _CAIF_MODEMCMD_PHYIF_USEFULL = 3,
+ _CAIF_MODEMCMD_PHYIF_USELESS = 4
+};
+
+/**
+ * enum caif_direction - CAIF Packet Direction.
+ * Indicate if a packet is to be sent out or to be received in.
+ * @CAIF_DIR_IN: Incoming packet received.
+ * @CAIF_DIR_OUT: Outgoing packet to be transmitted.
+ */
+enum caif_direction {
+ CAIF_DIR_IN = 0,
+ CAIF_DIR_OUT = 1
+};
+
+/**
+ * struct cflayer - CAIF Stack layer.
+ * Defines the framework for the CAIF Core Stack.
+ * @up: Pointer up to the layer above.
+ * @dn: Pointer down to the layer below.
+ * @node: List node used when layer participate in a list.
+ * @receive: Packet receive function.
+ * @transmit: Packet transmit funciton.
+ * @ctrlcmd: Used for control signalling upwards in the stack.
+ * @modemcmd: Used for control signaling downwards in the stack.
+ * @prio: Priority of this layer.
+ * @id: The identity of this layer
+ * @type: The type of this layer
+ * @name: Name of the layer.
+ *
+ * This structure defines the layered structure in CAIF.
+ *
+ * It defines CAIF layering structure, used by all CAIF Layers and the
+ * layers interfacing CAIF.
+ *
+ * In order to integrate with CAIF an adaptation layer on top of the CAIF stack
+ * and PHY layer below the CAIF stack
+ * must be implemented. These layer must follow the design principles below.
+ *
+ * Principles for layering of protocol layers:
+ * - All layers must use this structure. If embedding it, then place this
+ * structure first in the layer specific structure.
+ *
+ * - Each layer should not depend on any others layer private data.
+ *
+ * - In order to send data upwards do
+ * layer->up->receive(layer->up, packet);
+ *
+ * - In order to send data downwards do
+ * layer->dn->transmit(layer->dn, info, packet);
+ */
+struct cflayer {
+ struct cflayer *up;
+ struct cflayer *dn;
+ struct list_head node;
+
+ /*
+ * receive() - Receive Function.
+ * Contract: Each layer must implement a receive function passing the
+ * CAIF packets upwards in the stack.
+ * Packet handling rules:
+ * - The CAIF packet (cfpkt) cannot be accessed after
+ * passing it to the next layer using up->receive().
+ * - If parsing of the packet fails, the packet must be
+ * destroyed and -1 returned from the function.
+ * - If parsing succeeds (and above layers return OK) then
+ * the function must return a value > 0.
+ *
+ * Returns result < 0 indicates an error, 0 or positive value
+ * indicates success.
+ *
+ * @layr: Pointer to the current layer the receive function is
+ * implemented for (this pointer).
+ * @cfpkt: Pointer to CaifPacket to be handled.
+ */
+ int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt);
+
+ /*
+ * transmit() - Transmit Function.
+ * Contract: Each layer must implement a transmit function passing the
+ * CAIF packet downwards in the stack.
+ * Packet handling rules:
+ * - The CAIF packet (cfpkt) ownership is passed to the
+ * transmit function. This means that the the packet
+ * cannot be accessed after passing it to the below
+ * layer using dn->transmit().
+ *
+ * - If transmit fails, however, the ownership is returned
+ * to thecaller. The caller of "dn->transmit()" must
+ * destroy or resend packet.
+ *
+ * - Return value less than zero means error, zero or
+ * greater than zero means OK.
+ *
+ * result < 0 indicates an error, 0 or positive value
+ * indicate success.
+ *
+ * @layr: Pointer to the current layer the receive function
+ * isimplemented for (this pointer).
+ * @cfpkt: Pointer to CaifPacket to be handled.
+ */
+ int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt);
+
+ /*
+ * cttrlcmd() - Control Function upwards in CAIF Stack.
+ * Used for signaling responses (CAIF_CTRLCMD_*_RSP)
+ * and asynchronous events from the modem (CAIF_CTRLCMD_*_IND)
+ *
+ * @layr: Pointer to the current layer the receive function
+ * is implemented for (this pointer).
+ * @ctrl: Control Command.
+ */
+ void (*ctrlcmd) (struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+ /*
+ * modemctrl() - Control Function used for controlling the modem.
+ * Used to signal down-wards in the CAIF stack.
+ * Returns 0 on success, < 0 upon failure.
+ *
+ * @layr: Pointer to the current layer the receive function
+ * is implemented for (this pointer).
+ * @ctrl: Control Command.
+ */
+ int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
+
+ unsigned short prio;
+ unsigned int id;
+ unsigned int type;
+ char name[CAIF_LAYER_NAME_SZ];
+};
+
+/**
+ * layer_set_up() - Set the up pointer for a specified layer.
+ * @layr: Layer where up pointer shall be set.
+ * @above: Layer above.
+ */
+#define layer_set_up(layr, above) ((layr)->up = (struct cflayer *)(above))
+
+/**
+ * layer_set_dn() - Set the down pointer for a specified layer.
+ * @layr: Layer where down pointer shall be set.
+ * @below: Layer below.
+ */
+#define layer_set_dn(layr, below) ((layr)->dn = (struct cflayer *)(below))
+
+/**
+ * struct dev_info - Physical Device info information about physical layer.
+ * @dev: Pointer to native physical device.
+ * @id: Physical ID of the physical connection used by the
+ * logical CAIF connection. Used by service layers to
+ * identify their physical id to Caif MUX (CFMUXL)so
+ * that the MUX can add the correct physical ID to the
+ * packet.
+ */
+struct dev_info {
+ void *dev;
+ unsigned int id;
+};
+
+/**
+ * struct caif_payload_info - Payload information embedded in packet (sk_buff).
+ *
+ * @dev_info: Information about the receiving device.
+ *
+ * @hdr_len: Header length, used to align pay load on 32bit boundary.
+ *
+ * @channel_id: Channel ID of the logical CAIF connection.
+ * Used by mux to insert channel id into the caif packet.
+ */
+struct caif_payload_info {
+ struct dev_info *dev_info;
+ unsigned short hdr_len;
+ unsigned short channel_id;
+};
+
+#endif /* CAIF_LAYER_H_ */
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
new file mode 100644
index 000000000000..9fc2fc20b884
--- /dev/null
+++ b/include/net/caif/cfcnfg.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CFCNFG_H_
+#define CFCNFG_H_
+#include <linux/spinlock.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfctrl.h>
+
+struct cfcnfg;
+
+/**
+ * enum cfcnfg_phy_type - Types of physical layers defined in CAIF Stack
+ *
+ * @CFPHYTYPE_FRAG: Fragmented frames physical interface.
+ * @CFPHYTYPE_CAIF: Generic CAIF physical interface
+ */
+enum cfcnfg_phy_type {
+ CFPHYTYPE_FRAG = 1,
+ CFPHYTYPE_CAIF,
+ CFPHYTYPE_MAX
+};
+
+/**
+ * enum cfcnfg_phy_preference - Physical preference HW Abstraction
+ *
+ * @CFPHYPREF_UNSPECIFIED: Default physical interface
+ *
+ * @CFPHYPREF_LOW_LAT: Default physical interface for low-latency
+ * traffic
+ * @CFPHYPREF_HIGH_BW: Default physical interface for high-bandwidth
+ * traffic
+ * @CFPHYPREF_LOOP: TEST only Loopback interface simulating modem
+ * responses.
+ *
+ */
+enum cfcnfg_phy_preference {
+ CFPHYPREF_UNSPECIFIED,
+ CFPHYPREF_LOW_LAT,
+ CFPHYPREF_HIGH_BW,
+ CFPHYPREF_LOOP
+};
+
+/**
+ * cfcnfg_create() - Create the CAIF configuration object.
+ */
+struct cfcnfg *cfcnfg_create(void);
+
+/**
+ * cfcnfg_remove() - Remove the CFCNFG object
+ * @cfg: config object
+ */
+void cfcnfg_remove(struct cfcnfg *cfg);
+
+/**
+ * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
+ * @cnfg: Pointer to a CAIF configuration object, created by
+ * cfcnfg_create().
+ * @phy_type: Specifies the type of physical interface, e.g.
+ * CFPHYTYPE_FRAG.
+ * @dev: Pointer to link layer device
+ * @phy_layer: Specify the physical layer. The transmit function
+ * MUST be set in the structure.
+ * @phyid: The assigned physical ID for this layer, used in
+ * cfcnfg_add_adapt_layer to specify PHY for the link.
+ * @pref: The phy (link layer) preference.
+ * @fcs: Specify if checksum is used in CAIF Framing Layer.
+ * @stx: Specify if Start Of Frame eXtention is used.
+ */
+
+void
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+ void *dev, struct cflayer *phy_layer, u16 *phyid,
+ enum cfcnfg_phy_preference pref,
+ bool fcs, bool stx);
+
+/**
+ * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
+ *
+ * @cnfg: Pointer to a CAIF configuration object, created by
+ * cfcnfg_create().
+ * @phy_layer: Adaptation layer to be removed.
+ */
+int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer);
+
+/**
+ * cfcnfg_disconn_adapt_layer - Disconnects an adaptation layer.
+ *
+ * @cnfg: Pointer to a CAIF configuration object, created by
+ * cfcnfg_create().
+ * @adap_layer: Adaptation layer to be removed.
+ */
+int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg,
+ struct cflayer *adap_layer);
+
+/**
+ * cfcnfg_release_adap_layer - Used by client to release the adaptation layer.
+ *
+ * @adap_layer: Adaptation layer.
+ */
+void cfcnfg_release_adap_layer(struct cflayer *adap_layer);
+
+/**
+ * cfcnfg_add_adaptation_layer - Add an adaptation layer to the CAIF stack.
+ *
+ * The adaptation Layer is where the interface to application or higher-level
+ * driver functionality is implemented.
+ *
+ * @cnfg: Pointer to a CAIF configuration object, created by
+ * cfcnfg_create().
+ * @param: Link setup parameters.
+ * @adap_layer: Specify the adaptation layer; the receive and
+ * flow-control functions MUST be set in the structure.
+ *
+ */
+int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
+ struct cfctrl_link_param *param,
+ struct cflayer *adap_layer);
+
+/**
+ * cfcnfg_get_phyid() - Get physical ID, given type.
+ * Returns one of the physical interfaces matching the given type.
+ * Zero if no match is found.
+ * @cnfg: Configuration object
+ * @phy_pref: Caif Link Layer preference
+ */
+struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
+ enum cfcnfg_phy_preference phy_pref);
+
+/**
+ * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer
+ * @cnfg: Configuration object
+ * @name: Name of the Physical Layer (Caif Link Layer)
+ */
+int cfcnfg_get_named(struct cfcnfg *cnfg, char *name);
+
+#endif /* CFCNFG_H_ */
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
new file mode 100644
index 000000000000..997603f2bf4c
--- /dev/null
+++ b/include/net/caif/cfctrl.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CFCTRL_H_
+#define CFCTRL_H_
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+
+/* CAIF Control packet commands */
+enum cfctrl_cmd {
+ CFCTRL_CMD_LINK_SETUP = 0,
+ CFCTRL_CMD_LINK_DESTROY = 1,
+ CFCTRL_CMD_LINK_ERR = 2,
+ CFCTRL_CMD_ENUM = 3,
+ CFCTRL_CMD_SLEEP = 4,
+ CFCTRL_CMD_WAKE = 5,
+ CFCTRL_CMD_LINK_RECONF = 6,
+ CFCTRL_CMD_START_REASON = 7,
+ CFCTRL_CMD_RADIO_SET = 8,
+ CFCTRL_CMD_MODEM_SET = 9,
+ CFCTRL_CMD_MASK = 0xf
+};
+
+/* Channel types */
+enum cfctrl_srv {
+ CFCTRL_SRV_DECM = 0,
+ CFCTRL_SRV_VEI = 1,
+ CFCTRL_SRV_VIDEO = 2,
+ CFCTRL_SRV_DBG = 3,
+ CFCTRL_SRV_DATAGRAM = 4,
+ CFCTRL_SRV_RFM = 5,
+ CFCTRL_SRV_UTIL = 6,
+ CFCTRL_SRV_MASK = 0xf
+};
+
+#define CFCTRL_RSP_BIT 0x20
+#define CFCTRL_ERR_BIT 0x10
+
+struct cfctrl_rsp {
+ void (*linksetup_rsp)(struct cflayer *layer, u8 linkid,
+ enum cfctrl_srv serv, u8 phyid,
+ struct cflayer *adapt_layer);
+ void (*linkdestroy_rsp)(struct cflayer *layer, u8 linkid);
+ void (*linkerror_ind)(void);
+ void (*enum_rsp)(void);
+ void (*sleep_rsp)(void);
+ void (*wake_rsp)(void);
+ void (*restart_rsp)(void);
+ void (*radioset_rsp)(void);
+ void (*reject_rsp)(struct cflayer *layer, u8 linkid,
+ struct cflayer *client_layer);;
+};
+
+/* Link Setup Parameters for CAIF-Links. */
+struct cfctrl_link_param {
+ enum cfctrl_srv linktype;/* (T3,T0) Type of Channel */
+ u8 priority; /* (P4,P0) Priority of the channel */
+ u8 phyid; /* (U2-U0) Physical interface to connect */
+ u8 endpoint; /* (E1,E0) Endpoint for data channels */
+ u8 chtype; /* (H1,H0) Channel-Type, applies to
+ * VEI, DEBUG */
+ union {
+ struct {
+ u8 connid; /* (D7,D0) Video LinkId */
+ } video;
+
+ struct {
+ u32 connid; /* (N31,Ngit0) Connection ID used
+ * for Datagram */
+ } datagram;
+
+ struct {
+ u32 connid; /* Connection ID used for RFM */
+ char volume[20]; /* Volume to mount for RFM */
+ } rfm; /* Configuration for RFM */
+
+ struct {
+ u16 fifosize_kb; /* Psock FIFO size in KB */
+ u16 fifosize_bufs; /* Psock # signal buffers */
+ char name[16]; /* Name of the PSOCK service */
+ u8 params[255]; /* Link setup Parameters> */
+ u16 paramlen; /* Length of Link Setup
+ * Parameters */
+ } utility; /* Configuration for Utility Links (Psock) */
+ } u;
+};
+
+/* This structure is used internally in CFCTRL */
+struct cfctrl_request_info {
+ int sequence_no;
+ enum cfctrl_cmd cmd;
+ u8 channel_id;
+ struct cfctrl_link_param param;
+ struct cfctrl_request_info *next;
+ struct cflayer *client_layer;
+};
+
+struct cfctrl {
+ struct cfsrvl serv;
+ struct cfctrl_rsp res;
+ atomic_t req_seq_no;
+ atomic_t rsp_seq_no;
+ struct cfctrl_request_info *first_req;
+ /* Protects from simultaneous access to first_req list */
+ spinlock_t info_list_lock;
+#ifndef CAIF_NO_LOOP
+ u8 loop_linkid;
+ int loop_linkused[256];
+ /* Protects simultaneous access to loop_linkid and loop_linkused */
+ spinlock_t loop_linkid_lock;
+#endif
+
+};
+
+void cfctrl_enum_req(struct cflayer *cfctrl, u8 physlinkid);
+int cfctrl_linkup_request(struct cflayer *cfctrl,
+ struct cfctrl_link_param *param,
+ struct cflayer *user_layer);
+int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid,
+ struct cflayer *client);
+void cfctrl_sleep_req(struct cflayer *cfctrl);
+void cfctrl_wake_req(struct cflayer *cfctrl);
+void cfctrl_getstartreason_req(struct cflayer *cfctrl);
+struct cflayer *cfctrl_create(void);
+void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn);
+void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up);
+struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer);
+bool cfctrl_req_eq(struct cfctrl_request_info *r1,
+ struct cfctrl_request_info *r2);
+void cfctrl_insert_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req);
+struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req);
+void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer);
+
+#endif /* CFCTRL_H_ */
diff --git a/include/net/caif/cffrml.h b/include/net/caif/cffrml.h
new file mode 100644
index 000000000000..3f14d2e1ce61
--- /dev/null
+++ b/include/net/caif/cffrml.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CFFRML_H_
+#define CFFRML_H_
+#include <net/caif/caif_layer.h>
+
+struct cffrml;
+struct cflayer *cffrml_create(u16 phyid, bool DoFCS);
+void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up);
+void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn);
+
+#endif /* CFFRML_H_ */
diff --git a/include/net/caif/cfmuxl.h b/include/net/caif/cfmuxl.h
new file mode 100644
index 000000000000..4e1b4f33423e
--- /dev/null
+++ b/include/net/caif/cfmuxl.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CFMUXL_H_
+#define CFMUXL_H_
+#include <net/caif/caif_layer.h>
+
+struct cfsrvl;
+struct cffrml;
+
+struct cflayer *cfmuxl_create(void);
+int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid);
+struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid);
+int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *up, u8 phyid);
+struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 linkid);
+bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid);
+u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id);
+
+#endif /* CFMUXL_H_ */
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
new file mode 100644
index 000000000000..fbc681beff52
--- /dev/null
+++ b/include/net/caif/cfpkt.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CFPKT_H_
+#define CFPKT_H_
+#include <net/caif/caif_layer.h>
+#include <linux/types.h>
+struct cfpkt;
+
+/* Create a CAIF packet.
+ * len: Length of packet to be created
+ * @return New packet.
+ */
+struct cfpkt *cfpkt_create(u16 len);
+
+/* Create a CAIF packet.
+ * data Data to copy.
+ * len Length of packet to be created
+ * @return New packet.
+ */
+struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len);
+/*
+ * Destroy a CAIF Packet.
+ * pkt Packet to be destoyed.
+ */
+void cfpkt_destroy(struct cfpkt *pkt);
+
+/*
+ * Extract header from packet.
+ *
+ * pkt Packet to extract header data from.
+ * data Pointer to copy the header data into.
+ * len Length of head data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
+
+/*
+ * Peek header from packet.
+ * Reads data from packet without changing packet.
+ *
+ * pkt Packet to extract header data from.
+ * data Pointer to copy the header data into.
+ * len Length of head data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len);
+
+/*
+ * Extract header from trailer (end of packet).
+ *
+ * pkt Packet to extract header data from.
+ * data Pointer to copy the trailer data into.
+ * len Length of header data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_extr_trail(struct cfpkt *pkt, void *data, u16 len);
+
+/*
+ * Add header to packet.
+ *
+ *
+ * pkt Packet to add header data to.
+ * data Pointer to data to copy into the header.
+ * len Length of header data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_add_head(struct cfpkt *pkt, const void *data, u16 len);
+
+/*
+ * Add trailer to packet.
+ *
+ *
+ * pkt Packet to add trailer data to.
+ * data Pointer to data to copy into the trailer.
+ * len Length of trailer data to copy.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len);
+
+/*
+ * Pad trailer on packet.
+ * Moves data pointer in packet, no content copied.
+ *
+ * pkt Packet in which to pad trailer.
+ * len Length of padding to add.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_pad_trail(struct cfpkt *pkt, u16 len);
+
+/*
+ * Add a single byte to packet body (tail).
+ *
+ * pkt Packet in which to add byte.
+ * data Byte to add.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_addbdy(struct cfpkt *pkt, const u8 data);
+
+/*
+ * Add a data to packet body (tail).
+ *
+ * pkt Packet in which to add data.
+ * data Pointer to data to copy into the packet body.
+ * len Length of data to add.
+ * @return zero on success and error code upon failure
+ */
+int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len);
+
+/*
+ * Checks whether there are more data to process in packet.
+ * pkt Packet to check.
+ * @return true if more data are available in packet false otherwise
+ */
+bool cfpkt_more(struct cfpkt *pkt);
+
+/*
+ * Checks whether the packet is erroneous,
+ * i.e. if it has been attempted to extract more data than available in packet
+ * or writing more data than has been allocated in cfpkt_create().
+ * pkt Packet to check.
+ * @return true on error false otherwise
+ */
+bool cfpkt_erroneous(struct cfpkt *pkt);
+
+/*
+ * Get the packet length.
+ * pkt Packet to get length from.
+ * @return Number of bytes in packet.
+ */
+u16 cfpkt_getlen(struct cfpkt *pkt);
+
+/*
+ * Set the packet length, by adjusting the trailer pointer according to length.
+ * pkt Packet to set length.
+ * len Packet length.
+ * @return Number of bytes in packet.
+ */
+int cfpkt_setlen(struct cfpkt *pkt, u16 len);
+
+/*
+ * cfpkt_append - Appends a packet's data to another packet.
+ * dstpkt: Packet to append data into, WILL BE FREED BY THIS FUNCTION
+ * addpkt: Packet to be appended and automatically released,
+ * WILL BE FREED BY THIS FUNCTION.
+ * expectlen: Packet's expected total length. This should be considered
+ * as a hint.
+ * NB: Input packets will be destroyed after appending and cannot be used
+ * after calling this function.
+ * @return The new appended packet.
+ */
+struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt,
+ u16 expectlen);
+
+/*
+ * cfpkt_split - Split a packet into two packets at the specified split point.
+ * pkt: Packet to be split (will contain the first part of the data on exit)
+ * pos: Position to split packet in two parts.
+ * @return The new packet, containing the second part of the data.
+ */
+struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
+
+/*
+ * Iteration function, iterates the packet buffers from start to end.
+ *
+ * Checksum iteration function used to iterate buffers
+ * (we may have packets consisting of a chain of buffers)
+ * pkt: Packet to calculate checksum for
+ * iter_func: Function pointer to iteration function
+ * chks: Checksum calculated so far.
+ * buf: Pointer to the buffer to checksum
+ * len: Length of buf.
+ * data: Initial checksum value.
+ * @return Checksum of buffer.
+ */
+
+u16 cfpkt_iterate(struct cfpkt *pkt,
+ u16 (*iter_func)(u16 chks, void *buf, u16 len),
+ u16 data);
+
+/* Append by giving user access to packet buffer
+ * cfpkt Packet to append to
+ * buf Buffer inside pkt that user shall copy data into
+ * buflen Length of buffer and number of bytes added to packet
+ * @return 0 on error, 1 on success
+ */
+int cfpkt_raw_append(struct cfpkt *cfpkt, void **buf, unsigned int buflen);
+
+/* Extract by giving user access to packet buffer
+ * cfpkt Packet to extract from
+ * buf Buffer inside pkt that user shall copy data from
+ * buflen Length of buffer and number of bytes removed from packet
+ * @return 0 on error, 1 on success
+ */
+int cfpkt_raw_extract(struct cfpkt *cfpkt, void **buf, unsigned int buflen);
+
+/* Map from a "native" packet (e.g. Linux Socket Buffer) to a CAIF packet.
+ * dir - Direction indicating whether this packet is to be sent or received.
+ * nativepkt - The native packet to be transformed to a CAIF packet
+ * @return The mapped CAIF Packet CFPKT.
+ */
+struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt);
+
+/* Map from a CAIF packet to a "native" packet (e.g. Linux Socket Buffer).
+ * pkt - The CAIF packet to be transformed into a "native" packet.
+ * @return The native packet transformed from a CAIF packet.
+ */
+void *cfpkt_tonative(struct cfpkt *pkt);
+
+/*
+ * Insert a packet in the packet queue.
+ * pktq Packet queue to insert into
+ * pkt Packet to be inserted in queue
+ * prio Priority of packet
+ */
+void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt,
+ unsigned short prio);
+
+/*
+ * Remove a packet from the packet queue.
+ * pktq Packet queue to fetch packets from.
+ * @return Dequeued packet.
+ */
+struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq);
+
+/*
+ * Peek into a packet from the packet queue.
+ * pktq Packet queue to fetch packets from.
+ * @return Peeked packet.
+ */
+struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq);
+
+/*
+ * Initiates the packet queue.
+ * @return Pointer to new packet queue.
+ */
+struct cfpktq *cfpktq_create(void);
+
+/*
+ * Get the number of packets in the queue.
+ * pktq Packet queue to fetch count from.
+ * @return Number of packets in queue.
+ */
+int cfpkt_qcount(struct cfpktq *pktq);
+
+/*
+ * Put content of packet into buffer for debuging purposes.
+ * pkt Packet to copy data from
+ * buf Buffer to copy data into
+ * buflen Length of data to copy
+ * @return Pointer to copied data
+ */
+char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen);
+
+/*
+ * Clones a packet and releases the original packet.
+ * This is used for taking ownership of a packet e.g queueing.
+ * pkt Packet to clone and release.
+ * @return Cloned packet.
+ */
+struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt);
+
+
+/*
+ * Returns packet information for a packet.
+ * pkt Packet to get info from;
+ * @return Packet information
+ */
+struct caif_payload_info *cfpkt_info(struct cfpkt *pkt);
+/*! @} */
+#endif /* CFPKT_H_ */
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
new file mode 100644
index 000000000000..b8374321b362
--- /dev/null
+++ b/include/net/caif/cfserl.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CFSERL_H_
+#define CFSERL_H_
+#include <net/caif/caif_layer.h>
+
+struct cflayer *cfserl_create(int type, int instance, bool use_stx);
+#endif /* CFSERL_H_ */
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
new file mode 100644
index 000000000000..2dc9eb193ecf
--- /dev/null
+++ b/include/net/caif/cfsrvl.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CFSRVL_H_
+#define CFSRVL_H_
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+
+struct cfsrvl {
+ struct cflayer layer;
+ bool open;
+ bool phy_flow_on;
+ bool modem_flow_on;
+ struct dev_info dev_info;
+ struct kref ref;
+};
+
+void cfsrvl_release(struct kref *kref);
+struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
+bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
+void cfservl_destroy(struct cflayer *layer);
+void cfsrvl_init(struct cfsrvl *service,
+ u8 channel_id,
+ struct dev_info *dev_info);
+bool cfsrvl_ready(struct cfsrvl *service, int *err);
+u8 cfsrvl_getphyid(struct cflayer *layer);
+
+static inline void cfsrvl_get(struct cflayer *layr)
+{
+ struct cfsrvl *s;
+ if (layr == NULL)
+ return;
+ s = container_of(layr, struct cfsrvl, layer);
+ kref_get(&s->ref);
+}
+
+static inline void cfsrvl_put(struct cflayer *layr)
+{
+ struct cfsrvl *s;
+ if (layr == NULL)
+ return;
+ s = container_of(layr, struct cfsrvl, layer);
+ kref_put(&s->ref, cfsrvl_release);
+}
+
+#endif /* CFSRVL_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3d134a1fb96b..b44a2e5321a3 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -511,6 +511,7 @@ struct mpath_info {
* @basic_rates: basic rates in IEEE 802.11 format
* (or NULL for no change)
* @basic_rates_len: number of basic rates
+ * @ap_isolate: do not forward packets between connected stations
*/
struct bss_parameters {
int use_cts_prot;
@@ -518,6 +519,7 @@ struct bss_parameters {
int use_short_slot_time;
u8 *basic_rates;
u8 basic_rates_len;
+ int ap_isolate;
};
struct mesh_config {
@@ -704,6 +706,10 @@ struct cfg80211_crypto_settings {
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
* @key: WEP key for shared key authentication
+ * @local_state_change: This is a request for a local state only, i.e., no
+ * Authentication frame is to be transmitted and authentication state is
+ * to be changed without having to wait for a response from the peer STA
+ * (AP).
*/
struct cfg80211_auth_request {
struct cfg80211_bss *bss;
@@ -712,6 +718,7 @@ struct cfg80211_auth_request {
enum nl80211_auth_type auth_type;
const u8 *key;
u8 key_len, key_idx;
+ bool local_state_change;
};
/**
@@ -744,12 +751,15 @@ struct cfg80211_assoc_request {
* @ie: Extra IEs to add to Deauthentication frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the deauthentication
+ * @local_state_change: This is a request for a local state only, i.e., no
+ * Deauthentication frame is to be transmitted.
*/
struct cfg80211_deauth_request {
struct cfg80211_bss *bss;
const u8 *ie;
size_t ie_len;
u16 reason_code;
+ bool local_state_change;
};
/**
@@ -762,12 +772,15 @@ struct cfg80211_deauth_request {
* @ie: Extra IEs to add to Disassociation frame or %NULL
* @ie_len: Length of ie buffer in octets
* @reason_code: The reason code for the disassociation
+ * @local_state_change: This is a request for a local state only, i.e., no
+ * Disassociation frame is to be transmitted.
*/
struct cfg80211_disassoc_request {
struct cfg80211_bss *bss;
const u8 *ie;
size_t ie_len;
u16 reason_code;
+ bool local_state_change;
};
/**
@@ -953,7 +966,11 @@ struct cfg80211_pmksa {
*
* @set_txq_params: Set TX queue parameters
*
- * @set_channel: Set channel
+ * @set_channel: Set channel for a given wireless interface. Some devices
+ * may support multi-channel operation (by channel hopping) so cfg80211
+ * doesn't verify much. Note, however, that the passed netdev may be
+ * %NULL as well if the user requested changing the channel for the
+ * device itself, or for a monitor interface.
*
* @scan: Request to do a scan. If returning zero, the scan request is given
* the driver, and will be valid until passed to cfg80211_scan_done().
@@ -1007,6 +1024,9 @@ struct cfg80211_pmksa {
* RSN IE. It allows for faster roaming between WPA2 BSSIDs.
* @del_pmksa: Delete a cached PMKID.
* @flush_pmksa: Flush all cached PMKIDs.
+ * @set_power_mgmt: Configure WLAN power management. A timeout value of -1
+ * allows the driver to adjust the dynamic ps timeout value.
+ * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
*
*/
struct cfg80211_ops {
@@ -1079,7 +1099,7 @@ struct cfg80211_ops {
int (*set_txq_params)(struct wiphy *wiphy,
struct ieee80211_txq_params *params);
- int (*set_channel)(struct wiphy *wiphy,
+ int (*set_channel)(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type);
@@ -1152,6 +1172,10 @@ struct cfg80211_ops {
int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
bool enabled, int timeout);
+
+ int (*set_cqm_rssi_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst);
};
/*
@@ -1441,6 +1465,8 @@ struct cfg80211_cached_keys;
* @list: (private) Used to collect the interfaces
* @netdev: (private) Used to reference back to the netdev
* @current_bss: (private) Used by the internal configuration code
+ * @channel: (private) Used by the internal configuration code to track
+ * user-set AP, monitor and WDS channels for wireless extensions
* @bssid: (private) Used by the internal configuration code
* @ssid: (private) Used by the internal configuration code
* @ssid_len: (private) Used by the internal configuration code
@@ -1487,6 +1513,7 @@ struct wireless_dev {
struct cfg80211_internal_bss *authtry_bsses[MAX_AUTH_BSSES];
struct cfg80211_internal_bss *auth_bsses[MAX_AUTH_BSSES];
struct cfg80211_internal_bss *current_bss; /* associated / joined */
+ struct ieee80211_channel *channel;
bool ps;
int ps_timeout;
@@ -1627,7 +1654,7 @@ struct ieee80211_radiotap_iterator {
const struct ieee80211_radiotap_namespace *current_namespace;
unsigned char *_arg, *_next_ns_data;
- uint32_t *_next_bitmap;
+ __le32 *_next_bitmap;
unsigned char *this_arg;
int this_arg_index;
@@ -2337,4 +2364,18 @@ bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
const u8 *buf, size_t len, bool ack, gfp_t gfp);
+
+/**
+ * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
+ * @dev: network device
+ * @rssi_event: the triggered RSSI event
+ * @gfp: context flags
+ *
+ * This function is called when a configured connection quality monitoring
+ * rssi threshold reached event occurs.
+ */
+void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp);
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 52da6c3dd50d..bbcde3238e58 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -50,10 +50,6 @@ struct dn_fib_info {
__le16 fib_prefsrc;
__u32 fib_priority;
__u32 fib_metrics[RTAX_MAX];
-#define dn_fib_mtu fib_metrics[RTAX_MTU-1]
-#define dn_fib_window fib_metrics[RTAX_WINDOW-1]
-#define dn_fib_rtt fib_metrics[RTAX_RTT-1]
-#define dn_fib_advmss fib_metrics[RTAX_ADVMSS-1]
int fib_nhs;
int fib_power;
struct dn_fib_nh fib_nh[0];
diff --git a/include/net/dst.h b/include/net/dst.h
index ce078cda6b74..aac5a5fcfda9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -225,21 +225,6 @@ static inline void dst_confirm(struct dst_entry *dst)
neigh_confirm(dst->neighbour);
}
-static inline void dst_negative_advice(struct dst_entry **dst_p,
- struct sock *sk)
-{
- struct dst_entry * dst = *dst_p;
- if (dst && dst->ops->negative_advice) {
- *dst_p = dst->ops->negative_advice(dst);
-
- if (dst != *dst_p) {
- extern void sk_reset_txq(struct sock *sk);
-
- sk_reset_txq(sk);
- }
- }
-}
-
static inline void dst_link_failure(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index c49086d2bc7d..e8923bc20f9f 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -104,7 +104,7 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
return frh->table;
}
-extern struct fib_rules_ops *fib_rules_register(struct fib_rules_ops *, struct net *);
+extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *);
extern void fib_rules_unregister(struct fib_rules_ops *);
extern void fib_rules_cleanup_ops(struct fib_rules_ops *);
@@ -114,4 +114,5 @@ extern int fib_rules_lookup(struct fib_rules_ops *,
extern int fib_default_rule_add(struct fib_rules_ops *,
u32 pref, u32 table,
u32 flags);
+extern u32 fib_default_rule_pref(struct fib_rules_ops *ops);
#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 809970b7dfee..bb08692a20b0 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -86,11 +86,26 @@ struct flowi {
struct net;
struct sock;
-typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
- u8 dir, void **objp, atomic_t **obj_refp);
+struct flow_cache_ops;
+
+struct flow_cache_object {
+ const struct flow_cache_ops *ops;
+};
+
+struct flow_cache_ops {
+ struct flow_cache_object *(*get)(struct flow_cache_object *);
+ int (*check)(struct flow_cache_object *);
+ void (*delete)(struct flow_cache_object *);
+};
+
+typedef struct flow_cache_object *(*flow_resolve_t)(
+ struct net *net, struct flowi *key, u16 family,
+ u8 dir, struct flow_cache_object *oldobj, void *ctx);
+
+extern struct flow_cache_object *flow_cache_lookup(
+ struct net *net, struct flowi *key, u16 family,
+ u8 dir, flow_resolve_t resolver, void *ctx);
-extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
- u8 dir, flow_resolve_t resolver);
extern void flow_cache_flush(void);
extern atomic_t flow_cache_genid;
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 15b3dfe9fce8..6e991e0d0d6f 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -48,15 +48,4 @@ extern void icmp_out_count(struct net *net, unsigned char type);
/* Move into dst.h ? */
extern int xrlim_allow(struct dst_entry *dst, int timeout);
-struct raw_sock {
- /* inet_sock has to be the first member */
- struct inet_sock inet;
- struct icmp_filter filter;
-};
-
-static inline struct raw_sock *raw_sk(const struct sock *sk)
-{
- return (struct raw_sock *)sk;
-}
-
#endif /* _ICMP_H */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 545d8b059bef..13f9fc086d54 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -54,16 +54,17 @@ struct inet6_ifaddr {
struct inet6_dev *idev;
struct rt6_info *rt;
- struct inet6_ifaddr *lst_next; /* next addr in addr_lst */
- struct inet6_ifaddr *if_next; /* next addr in inet6_dev */
+ struct hlist_node addr_lst;
+ struct list_head if_list;
#ifdef CONFIG_IPV6_PRIVACY
- struct inet6_ifaddr *tmp_next; /* next addr in tempaddr_lst */
+ struct list_head tmp_list;
struct inet6_ifaddr *ifpub;
int regen_count;
#endif
int dead;
+ struct rcu_head rcu;
};
struct ip6_sf_socklist {
@@ -151,9 +152,9 @@ struct ipv6_devstat {
};
struct inet6_dev {
- struct net_device *dev;
+ struct net_device *dev;
- struct inet6_ifaddr *addr_list;
+ struct list_head addr_list;
struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;
@@ -175,7 +176,7 @@ struct inet6_dev {
#ifdef CONFIG_IPV6_PRIVACY
u8 rndid[8];
struct timer_list regen_timer;
- struct inet6_ifaddr *tempaddr_list;
+ struct list_head tempaddr_list;
#endif
struct neigh_parms *nd_parms;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index f13ddc2543b1..aae08f686633 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -38,5 +38,5 @@ extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-extern int inet6_csk_xmit(struct sk_buff *skb, int ipfragok);
+extern int inet6_csk_xmit(struct sk_buff *skb);
#endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 696d6e4ce68a..b6d3b55da19b 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -36,9 +36,8 @@ struct tcp_congestion_ops;
* (i.e. things that depend on the address family)
*/
struct inet_connection_sock_af_ops {
- int (*queue_xmit)(struct sk_buff *skb, int ipfragok);
- void (*send_check)(struct sock *sk, int len,
- struct sk_buff *skb);
+ int (*queue_xmit)(struct sk_buff *skb);
+ void (*send_check)(struct sock *sk, struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83fd34437cf1..1653de515cee 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jhash.h>
+#include <linux/netdevice.h>
#include <net/flow.h>
#include <net/sock.h>
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 79f67eae8a7e..a066fdd50da6 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -224,7 +224,9 @@ static inline
struct net *twsk_net(const struct inet_timewait_sock *twsk)
{
#ifdef CONFIG_NET_NS
- return rcu_dereference(twsk->tw_net);
+ return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
+ /* reference counting, */
+ /* initialization, or RCU. */
#else
return &init_net;
#endif
diff --git a/include/net/ip.h b/include/net/ip.h
index 503994a38ed1..8149b77cea9b 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -101,7 +101,7 @@ extern int ip_do_nat(struct sk_buff *skb);
extern void ip_send_check(struct iphdr *ip);
extern int __ip_local_out(struct sk_buff *skb);
extern int ip_local_out(struct sk_buff *skb);
-extern int ip_queue_xmit(struct sk_buff *skb, int ipfragok);
+extern int ip_queue_xmit(struct sk_buff *skb);
extern void ip_init(void);
extern int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
@@ -393,6 +393,7 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
* Functions provided by ip_sockglue.c
*/
+extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
extern int ip_cmsg_send(struct net *net,
struct msghdr *msg, struct ipcm_cookie *ipc);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 86f46c49e318..4b1dc1161c37 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -88,34 +88,37 @@ struct rt6_info {
struct dst_entry dst;
} u;
- struct inet6_dev *rt6i_idev;
-
#define rt6i_dev u.dst.dev
#define rt6i_nexthop u.dst.neighbour
#define rt6i_expires u.dst.expires
+ /*
+ * Tail elements of dst_entry (__refcnt etc.)
+ * and these elements (rarely used in hot path) are in
+ * the same cache line.
+ */
+ struct fib6_table *rt6i_table;
struct fib6_node *rt6i_node;
struct in6_addr rt6i_gateway;
-
- u32 rt6i_flags;
- u32 rt6i_metric;
- atomic_t rt6i_ref;
- /* more non-fragment space at head required */
- unsigned short rt6i_nfheader_len;
-
- u8 rt6i_protocol;
+ atomic_t rt6i_ref;
- struct fib6_table *rt6i_table;
+ /* These are in a separate cache line. */
+ struct rt6key rt6i_dst ____cacheline_aligned_in_smp;
+ u32 rt6i_flags;
+ struct rt6key rt6i_src;
+ u32 rt6i_metric;
- struct rt6key rt6i_dst;
+ struct inet6_dev *rt6i_idev;
#ifdef CONFIG_XFRM
u32 rt6i_flow_cache_genid;
#endif
+ /* more non-fragment space at head required */
+ unsigned short rt6i_nfheader_len;
- struct rt6key rt6i_src;
+ u8 rt6i_protocol;
};
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 68f67836e146..278312c95f96 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -152,9 +152,9 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct in6_addr *daddr, struct in6_addr *saddr)
{
- write_lock(&sk->sk_dst_lock);
+ spin_lock(&sk->sk_dst_lock);
__ip6_dst_store(sk, dst, daddr, saddr);
- write_unlock(&sk->sk_dst_lock);
+ spin_unlock(&sk->sk_dst_lock);
}
static inline int ipv6_unicast_destination(struct sk_buff *skb)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e72fb10ce573..eba5cc00325a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -422,7 +422,7 @@ static inline int __ipv6_addr_diff(const void *token1, const void *token2, int a
for (i = 0; i < addrlen; i++) {
__be32 xb = a1[i] ^ a2[i];
if (xb)
- return i * 32 + 32 - fls(ntohl(xb));
+ return i * 32 + 31 - __fls(ntohl(xb));
}
/*
@@ -482,8 +482,7 @@ extern int ip6_rcv_finish(struct sk_buff *skb);
extern int ip6_xmit(struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
- struct ipv6_txoptions *opt,
- int ipfragok);
+ struct ipv6_txoptions *opt);
extern int ip6_nd_hdr(struct sock *sk,
struct sk_buff *skb,
@@ -504,7 +503,8 @@ extern int ip6_append_data(struct sock *sk,
struct ipv6_txoptions *opt,
struct flowi *fl,
struct rt6_info *rt,
- unsigned int flags);
+ unsigned int flags,
+ int dontfrag);
extern int ip6_push_pending_frames(struct sock *sk);
@@ -578,9 +578,11 @@ extern int ip6_datagram_connect(struct sock *sk,
struct sockaddr *addr, int addr_len);
extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
u32 info, u8 *payload);
extern void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info);
+extern void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu);
extern int inet6_release(struct socket *sock);
extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr,
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index b2b98f3fa265..3afdb21cc31d 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -323,7 +323,7 @@ typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
struct iw_handler_def {
/* Array of handlers for standard ioctls
- * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT]
+ * We will call dev->wireless_handlers->standard[ioctl - SIOCIWFIRST]
*/
const iw_handler * standard;
/* Number of handlers defined (more precisely, index of the
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 45d7d44d7cbe..ac45c5b9d7e2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -144,6 +144,8 @@ struct ieee80211_low_level_stats {
* new beacon (beaconing modes)
* @BSS_CHANGED_BEACON_ENABLED: Beaconing should be
* enabled/disabled (beaconing modes)
+ * @BSS_CHANGED_CQM: Connection quality monitor config changed
+ * @BSS_CHANGED_IBSS: IBSS join status changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -156,6 +158,10 @@ enum ieee80211_bss_change {
BSS_CHANGED_BSSID = 1<<7,
BSS_CHANGED_BEACON = 1<<8,
BSS_CHANGED_BEACON_ENABLED = 1<<9,
+ BSS_CHANGED_CQM = 1<<10,
+ BSS_CHANGED_IBSS = 1<<11,
+
+ /* when adding here, make sure to change ieee80211_reconfig */
};
/**
@@ -165,6 +171,8 @@ enum ieee80211_bss_change {
* to that BSS) that can change during the lifetime of the BSS.
*
* @assoc: association status
+ * @ibss_joined: indicates whether this station is part of an IBSS
+ * or not
* @aid: association ID number, valid only when @assoc is true
* @use_cts_prot: use CTS protection
* @use_short_preamble: use 802.11b short preamble;
@@ -183,13 +191,19 @@ enum ieee80211_bss_change {
* the current band.
* @bssid: The BSSID for this BSS
* @enable_beacon: whether beaconing should be enabled or not
+ * @channel_type: Channel type for this BSS -- the hardware might be
+ * configured for HT40+ while this BSS only uses no-HT, for
+ * example.
* @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info).
* This field is only valid when the channel type is one of the HT types.
+ * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
+ * implies disabled
+ * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
*/
struct ieee80211_bss_conf {
const u8 *bssid;
/* association related data */
- bool assoc;
+ bool assoc, ibss_joined;
u16 aid;
/* erp related data */
bool use_cts_prot;
@@ -202,6 +216,9 @@ struct ieee80211_bss_conf {
u64 timestamp;
u32 basic_rates;
u16 ht_operation_mode;
+ s32 cqm_rssi_thold;
+ u32 cqm_rssi_hyst;
+ enum nl80211_channel_type channel_type;
};
/**
@@ -267,6 +284,9 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211
* MLME command (internal to mac80211 to figure out whether to send TX
* status to user space)
+ * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
+ * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
+ * frame and selects the maximum number of streams that it can use.
*/
enum mac80211_tx_control_flags {
IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
@@ -290,6 +310,9 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19),
IEEE80211_TX_INTFL_HAS_RADIOTAP = BIT(20),
IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21),
+ IEEE80211_TX_CTL_LDPC = BIT(22),
+ IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24),
+#define IEEE80211_TX_CTL_STBC_SHIFT 23
};
/**
@@ -388,11 +411,11 @@ struct ieee80211_tx_rate {
* @status: union for status data
* @driver_data: array of driver_data pointers
* @ampdu_ack_len: number of acked aggregated frames.
- * relevant only if IEEE80211_TX_STATUS_AMPDU was set.
+ * relevant only if IEEE80211_TX_STAT_AMPDU was set.
* @ampdu_ack_map: block ack bit map for the aggregation.
- * relevant only if IEEE80211_TX_STATUS_AMPDU was set.
+ * relevant only if IEEE80211_TX_STAT_AMPDU was set.
* @ampdu_len: number of aggregated frames.
- * relevant only if IEEE80211_TX_STATUS_AMPDU was set.
+ * relevant only if IEEE80211_TX_STAT_AMPDU was set.
* @ack_signal: signal strength of the ACK frame
*/
struct ieee80211_tx_info {
@@ -543,7 +566,6 @@ enum mac80211_rx_flags {
* @signal: signal strength when receiving this frame, either in dBm, in dB or
* unspecified depending on the hardware capabilities flags
* @IEEE80211_HW_SIGNAL_*
- * @noise: noise when receiving this frame, in dBm.
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT rates are use (RX_FLAG_HT)
@@ -554,7 +576,6 @@ struct ieee80211_rx_status {
enum ieee80211_band band;
int freq;
int signal;
- int noise;
int antenna;
int rate_idx;
int flag;
@@ -580,11 +601,15 @@ struct ieee80211_rx_status {
* may turn the device off as much as possible. Typically, this flag will
* be set when an interface is set UP but not associated or scanning, but
* it can also be unset in that case when monitor interfaces are active.
+ * @IEEE80211_CONF_QOS: Enable 802.11e QoS also know as WMM (Wireless
+ * Multimedia). On some drivers (iwlwifi is one of know) we have
+ * to enable/disable QoS explicitly.
*/
enum ieee80211_conf_flags {
IEEE80211_CONF_MONITOR = (1<<0),
IEEE80211_CONF_PS = (1<<1),
IEEE80211_CONF_IDLE = (1<<2),
+ IEEE80211_CONF_QOS = (1<<3),
};
@@ -599,6 +624,7 @@ enum ieee80211_conf_flags {
* @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed
* @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed
* @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed
+ * @IEEE80211_CONF_CHANGE_QOS: Quality of service was enabled or disabled
*/
enum ieee80211_conf_changed {
IEEE80211_CONF_CHANGE_SMPS = BIT(1),
@@ -609,6 +635,7 @@ enum ieee80211_conf_changed {
IEEE80211_CONF_CHANGE_CHANNEL = BIT(6),
IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7),
IEEE80211_CONF_CHANGE_IDLE = BIT(8),
+ IEEE80211_CONF_CHANGE_QOS = BIT(9),
};
/**
@@ -649,6 +676,9 @@ enum ieee80211_smps_mode {
* @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the
* powersave documentation below. This variable is valid only when
* the CONF_PS flag is set.
+ * @dynamic_ps_forced_timeout: The dynamic powersave timeout (in ms) configured
+ * by cfg80211 (essentially, wext) If set, this value overrules the value
+ * chosen by mac80211 based on ps qos network latency.
*
* @power_level: requested transmit power (in dBm)
*
@@ -668,7 +698,7 @@ enum ieee80211_smps_mode {
*/
struct ieee80211_conf {
u32 flags;
- int power_level, dynamic_ps_timeout;
+ int power_level, dynamic_ps_timeout, dynamic_ps_forced_timeout;
int max_sleep_period;
u16 listen_interval;
@@ -779,6 +809,7 @@ struct ieee80211_key_conf {
u8 iv_len;
u8 hw_key_idx;
u8 flags;
+ u8 *ap_addr;
s8 keyidx;
u8 keylen;
u8 key[0];
@@ -907,10 +938,6 @@ enum ieee80211_tkip_key_type {
* one milliwatt. This is the preferred method since it is standardized
* between different devices. @max_signal does not need to be set.
*
- * @IEEE80211_HW_NOISE_DBM:
- * Hardware can provide noise (radio interference) values in units dBm,
- * decibel difference from one milliwatt.
- *
* @IEEE80211_HW_SPECTRUM_MGMT:
* Hardware supports spectrum management defined in 802.11h
* Measurement, Channel Switch, Quieting, TPC
@@ -954,6 +981,17 @@ enum ieee80211_tkip_key_type {
* Hardware can provide ack status reports of Tx frames to
* the stack.
*
+ * @IEEE80211_HW_CONNECTION_MONITOR:
+ * The hardware performs its own connection monitoring, including
+ * periodic keep-alives to the AP and probing the AP on beacon loss.
+ * When this flag is set, signaling beacon-loss will cause an immediate
+ * change to disassociated state.
+ *
+ * @IEEE80211_HW_SUPPORTS_CQM_RSSI:
+ * Hardware can do connection quality monitoring - i.e. it can monitor
+ * connection quality related parameters, such as the RSSI level and
+ * provide notifications if configured trigger levels are reached.
+ *
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -963,7 +1001,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
IEEE80211_HW_SIGNAL_DBM = 1<<6,
- IEEE80211_HW_NOISE_DBM = 1<<7,
+ /* use this hole */
IEEE80211_HW_SPECTRUM_MGMT = 1<<8,
IEEE80211_HW_AMPDU_AGGREGATION = 1<<9,
IEEE80211_HW_SUPPORTS_PS = 1<<10,
@@ -975,6 +1013,8 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
+ IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
+ IEEE80211_HW_SUPPORTS_CQM_RSSI = 1<<20,
};
/**
@@ -1606,7 +1646,7 @@ struct ieee80211_ops {
struct ieee80211_bss_conf *info,
u32 changed);
u64 (*prepare_multicast)(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mc_list);
+ struct netdev_hw_addr_list *mc_list);
void (*configure_filter)(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -1621,7 +1661,7 @@ struct ieee80211_ops {
struct ieee80211_key_conf *conf,
struct ieee80211_sta *sta,
u32 iv32, u16 *phase1key);
- int (*hw_scan)(struct ieee80211_hw *hw,
+ int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
void (*sw_scan_start)(struct ieee80211_hw *hw);
void (*sw_scan_complete)(struct ieee80211_hw *hw);
@@ -1646,7 +1686,8 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-
+ int (*get_survey)(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
@@ -1802,7 +1843,10 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw);
* ieee80211_rx - receive frame
*
* Use this function to hand received frames to mac80211. The receive
- * buffer in @skb must start with an IEEE 802.11 header.
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
*
* This function may not be called in IRQ context. Calls to this function
* for a single hardware must be synchronized against each other. Calls to
@@ -2364,12 +2408,42 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and
- * IEEE80211_CONF_PS is set, the driver needs to inform whenever the
+ * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING and
+ * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the
* hardware is not receiving beacons with this function.
*/
void ieee80211_beacon_loss(struct ieee80211_vif *vif);
+/**
+ * ieee80211_connection_loss - inform hardware has lost connection to the AP
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING, and
+ * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver
+ * needs to inform if the connection to the AP has been lost.
+ *
+ * This function will cause immediate change to disassociated state,
+ * without connection recovery attempts.
+ */
+void ieee80211_connection_loss(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
+ * rssi threshold triggered
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @rssi_event: the RSSI trigger event type
+ * @gfp: context flags
+ *
+ * When the %IEEE80211_HW_SUPPORTS_CQM_RSSI is set, and a connection quality
+ * monitoring is configured with an rssi threshold, the driver will inform
+ * whenever the rssi level reaches the threshold.
+ */
+void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp);
+
/* Rate control API */
/**
diff --git a/include/net/mld.h b/include/net/mld.h
new file mode 100644
index 000000000000..467143cd4e2f
--- /dev/null
+++ b/include/net/mld.h
@@ -0,0 +1,75 @@
+#ifndef LINUX_MLD_H
+#define LINUX_MLD_H
+
+#include <linux/in6.h>
+#include <linux/icmpv6.h>
+
+/* MLDv1 Query/Report/Done */
+struct mld_msg {
+ struct icmp6hdr mld_hdr;
+ struct in6_addr mld_mca;
+};
+
+#define mld_type mld_hdr.icmp6_type
+#define mld_code mld_hdr.icmp6_code
+#define mld_cksum mld_hdr.icmp6_cksum
+#define mld_maxdelay mld_hdr.icmp6_maxdelay
+#define mld_reserved mld_hdr.icmp6_dataun.un_data16[1]
+
+/* Multicast Listener Discovery version 2 headers */
+/* MLDv2 Report */
+struct mld2_grec {
+ __u8 grec_type;
+ __u8 grec_auxwords;
+ __be16 grec_nsrcs;
+ struct in6_addr grec_mca;
+ struct in6_addr grec_src[0];
+};
+
+struct mld2_report {
+ struct icmp6hdr mld2r_hdr;
+ struct mld2_grec mld2r_grec[0];
+};
+
+#define mld2r_type mld2r_hdr.icmp6_type
+#define mld2r_resv1 mld2r_hdr.icmp6_code
+#define mld2r_cksum mld2r_hdr.icmp6_cksum
+#define mld2r_resv2 mld2r_hdr.icmp6_dataun.un_data16[0]
+#define mld2r_ngrec mld2r_hdr.icmp6_dataun.un_data16[1]
+
+/* MLDv2 Query */
+struct mld2_query {
+ struct icmp6hdr mld2q_hdr;
+ struct in6_addr mld2q_mca;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 mld2q_qrv:3,
+ mld2q_suppress:1,
+ mld2q_resv2:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 mld2q_resv2:4,
+ mld2q_suppress:1,
+ mld2q_qrv:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 mld2q_qqic;
+ __be16 mld2q_nsrcs;
+ struct in6_addr mld2q_srcs[0];
+};
+
+#define mld2q_type mld2q_hdr.icmp6_type
+#define mld2q_code mld2q_hdr.icmp6_code
+#define mld2q_cksum mld2q_hdr.icmp6_cksum
+#define mld2q_mrc mld2q_hdr.icmp6_maxdelay
+#define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1]
+
+/* Max Response Code */
+#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
+#define MLDV2_EXP(thresh, nbmant, nbexp, value) \
+ ((value) < (thresh) ? (value) : \
+ ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
+ (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
+
+#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
+
+#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index da1d58be31b7..eb21340a573b 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -299,6 +299,20 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
return 0;
}
+#ifdef CONFIG_BRIDGE_NETFILTER
+static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
+{
+ unsigned seq, hh_alen;
+
+ do {
+ seq = read_seqbegin(&hh->hh_lock);
+ hh_alen = HH_DATA_ALIGN(ETH_HLEN);
+ memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
+ } while (read_seqretry(&hh->hh_lock, seq));
+ return 0;
+}
+#endif
+
static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
{
unsigned seq;
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index ff4982ab84b6..81a31c0db3e7 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -14,11 +14,8 @@
* The rules are simple:
* 1. set pernet_operations->id. After register_pernet_device you
* will have the id of your private pointer.
- * 2. Either set pernet_operations->size (to have the code allocate and
- * free a private structure pointed to from struct net ) or
- * call net_assign_generic() to put the private data on the struct
- * net (most preferably this should be done in the ->init callback
- * of the ops registered);
+ * 2. set pernet_operations->size to have the code allocate and free
+ * a private structure pointed to from struct net.
* 3. do not change this pointer while the net is alive;
* 4. do not try to have any private reference on the net_generic object.
*
@@ -46,6 +43,4 @@ static inline void *net_generic(struct net *net, int id)
return ptr;
}
-
-extern int net_assign_generic(struct net *net, int id, void *data);
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2764994c9136..d68c3f121774 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -55,19 +55,14 @@ struct netns_ipv4 {
int sysctl_rt_cache_rebuild_count;
int current_rt_cache_rebuild_count;
- struct timer_list rt_secret_timer;
atomic_t rt_genid;
#ifdef CONFIG_IP_MROUTE
- struct sock *mroute_sk;
- struct mfc_cache **mfc_cache_array;
- struct vif_device *vif_table;
- int maxvif;
- atomic_t cache_resolve_queue_len;
- int mroute_do_assert;
- int mroute_do_pim;
-#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
- int mroute_reg_vif_num;
+#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+ struct mr_table *mrt;
+#else
+ struct list_head mr_tables;
+ struct fib_rules_ops *mr_rules_ops;
#endif
#endif
};
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index b6cdc33b39c1..9d4d87cc970e 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -12,7 +12,7 @@ struct qdisc_walker {
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
};
-#define QDISC_ALIGNTO 32
+#define QDISC_ALIGNTO 64
#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
static inline void *qdisc_priv(struct Qdisc *q)
diff --git a/include/net/raw.h b/include/net/raw.h
index 6c14a656357a..43c57502659b 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -19,6 +19,7 @@
#include <net/protocol.h>
+#include <linux/icmp.h>
extern struct proto raw_prot;
@@ -56,4 +57,16 @@ int raw_seq_open(struct inode *ino, struct file *file,
void raw_hash_sk(struct sock *sk);
void raw_unhash_sk(struct sock *sk);
+struct raw_sock {
+ /* inet_sock has to be the first member */
+ struct inet_sock inet;
+ struct icmp_filter filter;
+ u32 ipmr_table;
+};
+
+static inline struct raw_sock *raw_sk(const struct sock *sk)
+{
+ return (struct raw_sock *)sk;
+}
+
#endif /* _RAW_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 67dc08eaaa45..03ca5d826757 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -73,6 +73,7 @@ struct Qdisc {
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
+ struct rcu_head rcu_head;
};
struct Qdisc_class_ops {
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index fa6cde578a1d..65946bc43d00 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -269,7 +269,7 @@ enum {
#define SCTP_MIB_MAX __SCTP_MIB_MAX
struct sctp_mib {
unsigned long mibs[SCTP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
/* Print debugging messages. */
@@ -547,7 +547,7 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
#define WORD_ROUND(s) (((s)+3)&~3)
/* Make a new instance of type. */
-#define t_new(type, flags) (type *)kmalloc(sizeof(type), flags)
+#define t_new(type, flags) (type *)kzalloc(sizeof(type), flags)
/* Compare two timevals. */
#define tv_lt(s, t) \
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 851c813adb3a..4088c89a9055 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -279,6 +279,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
/* 2nd level prototypes */
void sctp_generate_t3_rtx_event(unsigned long peer);
void sctp_generate_heartbeat_event(unsigned long peer);
+void sctp_generate_proto_unreach_event(unsigned long peer);
void sctp_ootb_pkt_free(struct sctp_packet *);
@@ -437,7 +438,7 @@ sctp_vtag_verify_either(const struct sctp_chunk *chunk,
*/
if ((!sctp_test_T_bit(chunk) &&
(ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) ||
- (sctp_test_T_bit(chunk) &&
+ (sctp_test_T_bit(chunk) && asoc->c.peer_vtag &&
(ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) {
return 1;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 597f8e27aaf6..6173c619913a 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -643,17 +643,15 @@ struct sctp_pf {
struct sctp_datamsg {
/* Chunks waiting to be submitted to lower layer. */
struct list_head chunks;
- /* Chunks that have been transmitted. */
- size_t msg_size;
/* Reference counting. */
atomic_t refcnt;
/* When is this message no longer interesting to the peer? */
unsigned long expires_at;
/* Did the messenge fail to send? */
int send_error;
- char send_failed;
- /* Control whether chunks from this message can be abandoned. */
- char can_abandon;
+ u8 send_failed:1,
+ can_abandon:1, /* can chunks from this message can be abandoned. */
+ can_delay; /* should this message be Nagle delayed */
};
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
@@ -757,7 +755,6 @@ struct sctp_chunk {
#define SCTP_NEED_FRTX 0x1
#define SCTP_DONT_FRTX 0x2
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
- resent:1, /* Has this chunk ever been resent. */
has_tsn:1, /* Does this chunk have a TSN yet? */
has_ssn:1, /* Does this chunk have a SSN yet? */
singleton:1, /* Only chunk in the packet? */
@@ -879,7 +876,30 @@ struct sctp_transport {
/* Reference counting. */
atomic_t refcnt;
- int dead;
+ int dead:1,
+ /* RTO-Pending : A flag used to track if one of the DATA
+ * chunks sent to this address is currently being
+ * used to compute a RTT. If this flag is 0,
+ * the next DATA chunk sent to this destination
+ * should be used to compute a RTT and this flag
+ * should be set. Every time the RTT
+ * calculation completes (i.e. the DATA chunk
+ * is SACK'd) clear this flag.
+ */
+ rto_pending:1,
+
+ /*
+ * hb_sent : a flag that signals that we have a pending
+ * heartbeat.
+ */
+ hb_sent:1,
+
+ /* Is the Path MTU update pending on this tranport */
+ pmtu_pending:1,
+
+ /* Is this structure kfree()able? */
+ malloced:1;
+
/* This is the peer's IP address and port. */
union sctp_addr ipaddr;
@@ -909,22 +929,6 @@ struct sctp_transport {
/* SRTT : The current smoothed round trip time. */
__u32 srtt;
- /* RTO-Pending : A flag used to track if one of the DATA
- * chunks sent to this address is currently being
- * used to compute a RTT. If this flag is 0,
- * the next DATA chunk sent to this destination
- * should be used to compute a RTT and this flag
- * should be set. Every time the RTT
- * calculation completes (i.e. the DATA chunk
- * is SACK'd) clear this flag.
- * hb_sent : a flag that signals that we have a pending heartbeat.
- */
- __u8 rto_pending;
- __u8 hb_sent;
-
- /* Flag to track the current fast recovery state */
- __u8 fast_recovery;
-
/*
* These are the congestion stats.
*/
@@ -944,9 +948,6 @@ struct sctp_transport {
__u32 burst_limited; /* Holds old cwnd when max.burst is applied */
- /* TSN marking the fast recovery exit point */
- __u32 fast_recovery_exit;
-
/* Destination */
struct dst_entry *dst;
/* Source address. */
@@ -977,9 +978,6 @@ struct sctp_transport {
*/
__u16 pathmaxrxt;
- /* is the Path MTU update pending on this tranport */
- __u8 pmtu_pending;
-
/* PMTU : The current known path MTU. */
__u32 pathmtu;
@@ -1010,6 +1008,9 @@ struct sctp_transport {
/* Heartbeat timer is per destination. */
struct timer_list hb_timer;
+ /* Timer to handle ICMP proto unreachable envets */
+ struct timer_list proto_unreach_timer;
+
/* Since we're using per-destination retransmission timers
* (see above), we're also using per-destination "transmitted"
* queues. This probably ought to be a private struct
@@ -1023,8 +1024,6 @@ struct sctp_transport {
/* This is the list of transports that have chunks to send. */
struct list_head send_ready;
- int malloced; /* Is this structure kfree()able? */
-
/* State information saved for SFR_CACC algorithm. The key
* idea in SFR_CACC is to maintain state at the sender on a
* per-destination basis when a changeover happens.
@@ -1066,7 +1065,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
struct sctp_sock *);
void sctp_transport_pmtu(struct sctp_transport *);
void sctp_transport_free(struct sctp_transport *);
-void sctp_transport_reset_timers(struct sctp_transport *, int);
+void sctp_transport_reset_timers(struct sctp_transport *);
void sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1720,6 +1719,12 @@ struct sctp_association {
/* Highest TSN that is acknowledged by incoming SACKs. */
__u32 highest_sacked;
+ /* TSN marking the fast recovery exit point */
+ __u32 fast_recovery_exit;
+
+ /* Flag to track the current fast recovery state */
+ __u8 fast_recovery;
+
/* The number of unacknowledged data chunks. Reported through
* the SCTP_STATUS sockopt.
*/
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 692ee0061dc4..92456f1035f5 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -52,26 +52,11 @@ struct snmp_mib {
* count on the 20Gb/s + networks people expect in a few years time!
*/
-/*
- * The rule for padding:
- * Best is power of two because then the right structure can be found by a
- * simple shift. The structure should be always cache line aligned.
- * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add
- * instructions to emulate multiply in case it is not power-of-two.
- * Currently n is always <=3 for all sizes so simple cache line alignment
- * is enough.
- *
- * The best solution would be a global CPU local area , especially on 64
- * and 128byte cacheline machine it makes a *lot* of sense -AK
- */
-
-#define __SNMP_MIB_ALIGN__ ____cacheline_aligned
-
/* IPstats */
#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
struct ipstats_mib {
unsigned long mibs[IPSTATS_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
/* ICMP */
#define ICMP_MIB_DUMMY __ICMP_MIB_MAX
@@ -79,36 +64,36 @@ struct ipstats_mib {
struct icmp_mib {
unsigned long mibs[ICMP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
struct icmpmsg_mib {
unsigned long mibs[ICMPMSG_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
/* ICMP6 (IPv6-ICMP) */
#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
struct icmpv6_mib {
unsigned long mibs[ICMP6_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX
struct icmpv6msg_mib {
unsigned long mibs[ICMP6MSG_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
/* TCP */
#define TCP_MIB_MAX __TCP_MIB_MAX
struct tcp_mib {
unsigned long mibs[TCP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
/* UDP */
#define UDP_MIB_MAX __UDP_MIB_MAX
struct udp_mib {
unsigned long mibs[UDP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
/* Linux */
#define LINUX_MIB_MAX __LINUX_MIB_MAX
@@ -148,6 +133,8 @@ struct linux_xfrm_mib {
__this_cpu_add(mib[0]->mibs[field], addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
this_cpu_add(mib[1]->mibs[field], addend)
+#define SNMP_ADD_STATS(mib, field, addend) \
+ this_cpu_add(mib[0]->mibs[field], addend)
/*
* Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
* to make @ptr a non-percpu pointer.
diff --git a/include/net/sock.h b/include/net/sock.h
index 1ad6435f252e..328e03f47dd1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -159,7 +159,7 @@ struct sock_common {
* @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
* @sk_lock: synchronizer
* @sk_rcvbuf: size of receive buffer in bytes
- * @sk_sleep: sock wait queue
+ * @sk_wq: sock wait queue and async head
* @sk_dst_cache: destination cache
* @sk_dst_lock: destination cache lock
* @sk_policy: flow policy
@@ -198,6 +198,7 @@ struct sock_common {
* @sk_rcvlowat: %SO_RCVLOWAT setting
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
+ * @sk_rxhash: flow hash received from netif layer
* @sk_filter: socket filtering instructions
* @sk_protinfo: private area, net family specific, when not using slab
* @sk_timer: sock cleanup timer
@@ -255,14 +256,13 @@ struct sock {
struct sk_buff *head;
struct sk_buff *tail;
int len;
- int limit;
} sk_backlog;
- wait_queue_head_t *sk_sleep;
+ struct socket_wq *sk_wq;
struct dst_entry *sk_dst_cache;
#ifdef CONFIG_XFRM
struct xfrm_policy *sk_policy[2];
#endif
- rwlock_t sk_dst_lock;
+ spinlock_t sk_dst_lock;
atomic_t sk_rmem_alloc;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
@@ -279,6 +279,9 @@ struct sock {
int sk_gso_type;
unsigned int sk_gso_max_size;
int sk_rcvlowat;
+#ifdef CONFIG_RPS
+ __u32 sk_rxhash;
+#endif
unsigned long sk_flags;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
@@ -604,10 +607,20 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb->next = NULL;
}
+/*
+ * Take into account size of receive queue and backlog queue
+ */
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+{
+ unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
+
+ return qsize + skb->truesize > sk->sk_rcvbuf;
+}
+
/* The per-socket spinlock must be held here. */
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
+ if (sk_rcvqueues_full(sk, skb))
return -ENOBUFS;
__sk_add_backlog(sk, skb);
@@ -620,6 +633,40 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return sk->sk_backlog_rcv(sk, skb);
}
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_reset_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
+{
+#ifdef CONFIG_RPS
+ if (unlikely(sk->sk_rxhash != rxhash)) {
+ sock_rps_reset_flow(sk);
+ sk->sk_rxhash = rxhash;
+ }
+#endif
+}
+
#define sk_wait_event(__sk, __timeo, __condition) \
({ int __rc; \
release_sock(__sk); \
@@ -974,6 +1021,16 @@ extern void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
+static inline void lock_sock_bh(struct sock *sk)
+{
+ spin_lock_bh(&sk->sk_lock.slock);
+}
+
+static inline void unlock_sock_bh(struct sock *sk)
+{
+ spin_unlock_bh(&sk->sk_lock.slock);
+}
+
extern struct sock *sk_alloc(struct net *net, int family,
gfp_t priority,
struct proto *prot);
@@ -1160,6 +1217,10 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
sk->sk_socket = sock;
}
+static inline wait_queue_head_t *sk_sleep(struct sock *sk)
+{
+ return &sk->sk_wq->wait;
+}
/* Detach socket from process context.
* Announce socket dead, detach it from wait queue and inode.
* Note that parent inode held reference count on this struct sock,
@@ -1172,14 +1233,14 @@ static inline void sock_orphan(struct sock *sk)
write_lock_bh(&sk->sk_callback_lock);
sock_set_flag(sk, SOCK_DEAD);
sk_set_socket(sk, NULL);
- sk->sk_sleep = NULL;
+ sk->sk_wq = NULL;
write_unlock_bh(&sk->sk_callback_lock);
}
static inline void sock_graft(struct sock *sk, struct socket *parent)
{
write_lock_bh(&sk->sk_callback_lock);
- sk->sk_sleep = &parent->wait;
+ rcu_assign_pointer(sk->sk_wq, parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
security_sock_graft(sk, parent);
@@ -1192,7 +1253,9 @@ extern unsigned long sock_i_ino(struct sock *sk);
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
- return sk->sk_dst_cache;
+ return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() ||
+ sock_owned_by_user(sk) ||
+ lockdep_is_held(&sk->sk_lock.slock));
}
static inline struct dst_entry *
@@ -1200,50 +1263,65 @@ sk_dst_get(struct sock *sk)
{
struct dst_entry *dst;
- read_lock(&sk->sk_dst_lock);
- dst = sk->sk_dst_cache;
+ rcu_read_lock();
+ dst = rcu_dereference(sk->sk_dst_cache);
if (dst)
dst_hold(dst);
- read_unlock(&sk->sk_dst_lock);
+ rcu_read_unlock();
return dst;
}
+extern void sk_reset_txq(struct sock *sk);
+
+static inline void dst_negative_advice(struct sock *sk)
+{
+ struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+
+ if (dst && dst->ops->negative_advice) {
+ ndst = dst->ops->negative_advice(dst);
+
+ if (ndst != dst) {
+ rcu_assign_pointer(sk->sk_dst_cache, ndst);
+ sk_reset_txq(sk);
+ }
+ }
+}
+
static inline void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- old_dst = sk->sk_dst_cache;
- sk->sk_dst_cache = dst;
+ /*
+ * This can be called while sk is owned by the caller only,
+ * with no state that can be checked in a rcu_dereference_check() cond
+ */
+ old_dst = rcu_dereference_raw(sk->sk_dst_cache);
+ rcu_assign_pointer(sk->sk_dst_cache, dst);
dst_release(old_dst);
}
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
- write_lock(&sk->sk_dst_lock);
+ spin_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
- write_unlock(&sk->sk_dst_lock);
+ spin_unlock(&sk->sk_dst_lock);
}
static inline void
__sk_dst_reset(struct sock *sk)
{
- struct dst_entry *old_dst;
-
- sk_tx_queue_clear(sk);
- old_dst = sk->sk_dst_cache;
- sk->sk_dst_cache = NULL;
- dst_release(old_dst);
+ __sk_dst_set(sk, NULL);
}
static inline void
sk_dst_reset(struct sock *sk)
{
- write_lock(&sk->sk_dst_lock);
+ spin_lock(&sk->sk_dst_lock);
__sk_dst_reset(sk);
- write_unlock(&sk->sk_dst_lock);
+ spin_unlock(&sk->sk_dst_lock);
}
extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
@@ -1314,12 +1392,12 @@ static inline int sk_has_allocations(const struct sock *sk)
}
/**
- * sk_has_sleeper - check if there are any waiting processes
- * @sk: socket
+ * wq_has_sleeper - check if there are any waiting processes
+ * @sk: struct socket_wq
*
- * Returns true if socket has waiting processes
+ * Returns true if socket_wq has waiting processes
*
- * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
+ * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
* barrier call. They were added due to the race found within the tcp code.
*
* Consider following tcp code paths:
@@ -1332,9 +1410,10 @@ static inline int sk_has_allocations(const struct sock *sk)
* ... ...
* tp->rcv_nxt check sock_def_readable
* ... {
- * schedule ...
- * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- * wake_up_interruptible(sk->sk_sleep)
+ * schedule rcu_read_lock();
+ * wq = rcu_dereference(sk->sk_wq);
+ * if (wq && waitqueue_active(&wq->wait))
+ * wake_up_interruptible(&wq->wait)
* ...
* }
*
@@ -1343,19 +1422,18 @@ static inline int sk_has_allocations(const struct sock *sk)
* could then endup calling schedule and sleep forever if there are no more
* data on the socket.
*
- * The sk_has_sleeper is always called right after a call to read_lock, so we
- * can use smp_mb__after_lock barrier.
*/
-static inline int sk_has_sleeper(struct sock *sk)
+static inline bool wq_has_sleeper(struct socket_wq *wq)
{
+
/*
* We need to be sure we are in sync with the
* add_wait_queue modifications to the wait queue.
*
* This memory barrier is paired in the sock_poll_wait.
*/
- smp_mb__after_lock();
- return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
+ smp_mb();
+ return wq && waitqueue_active(&wq->wait);
}
/**
@@ -1364,7 +1442,7 @@ static inline int sk_has_sleeper(struct sock *sk)
* @wait_address: socket wait queue
* @p: poll_table
*
- * See the comments in the sk_has_sleeper function.
+ * See the comments in the wq_has_sleeper function.
*/
static inline void sock_poll_wait(struct file *filp,
wait_queue_head_t *wait_address, poll_table *p)
@@ -1375,7 +1453,7 @@ static inline void sock_poll_wait(struct file *filp,
* We need to be sure we are in sync with the
* socket flags modification.
*
- * This memory barrier is paired in the sk_has_sleeper.
+ * This memory barrier is paired in the wq_has_sleeper.
*/
smp_mb();
}
@@ -1557,7 +1635,24 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
sk->sk_stamp = kt;
}
-extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb);
+extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
+
+static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
+ (1UL << SOCK_RCVTSTAMP) | \
+ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
+ (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
+ (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
+ (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
+
+ if (sk->sk_flags & FLAGS_TS_OR_DROPS)
+ __sock_recv_ts_and_drops(msg, sk, skb);
+ else
+ sk->sk_stamp = skb->tstamp;
+}
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 75be5a28815d..fb5c66b2ab81 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -294,6 +294,7 @@ extern struct proto tcp_prot;
#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
+#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
extern void tcp_v4_err(struct sk_buff *skb, u32);
@@ -423,7 +424,7 @@ extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
* TCP v4 functions exported for the inet6 API
*/
-extern void tcp_v4_send_check(struct sock *sk, int len,
+extern void tcp_v4_send_check(struct sock *sk,
struct sk_buff *skb);
extern int tcp_v4_conn_request(struct sock *sk,
@@ -939,7 +940,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
- wake_up_interruptible_sync_poll(sk->sk_sleep,
+ wake_up_interruptible_sync_poll(sk_sleep(sk),
POLLIN | POLLRDNORM | POLLRDBAND);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
@@ -1032,6 +1033,14 @@ static inline int keepalive_probes(const struct tcp_sock *tp)
return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
}
+static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
+{
+ const struct inet_connection_sock *icsk = &tp->inet_conn;
+
+ return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
+ tcp_time_stamp - tp->rcv_tstamp);
+}
+
static inline int tcp_fin_time(const struct sock *sk)
{
int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index d65381cad0fc..42a0eb68b7b6 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -44,7 +44,8 @@ extern int datagram_send_ctl(struct net *net,
struct msghdr *msg,
struct flowi *fl,
struct ipv6_txoptions *opt,
- int *hlimit, int *tclass);
+ int *hlimit, int *tclass,
+ int *dontfrag);
#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
diff --git a/include/net/x25device.h b/include/net/x25device.h
index 1415bcf93980..1fa08b49f1c2 100644
--- a/include/net/x25device.h
+++ b/include/net/x25device.h
@@ -3,6 +3,7 @@
#include <linux/if_ether.h>
#include <linux/if_packet.h>
+#include <linux/if_x25.h>
#include <linux/skbuff.h>
static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ac52f33f3e4a..1913af67c43d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -20,6 +20,7 @@
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
+#include <net/flow.h>
#include <linux/interrupt.h>
@@ -267,7 +268,6 @@ struct xfrm_policy_afinfo {
xfrm_address_t *saddr,
xfrm_address_t *daddr);
int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
- struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
@@ -482,13 +482,14 @@ struct xfrm_policy {
atomic_t refcnt;
struct timer_list timer;
+ struct flow_cache_object flo;
+ atomic_t genid;
u32 priority;
u32 index;
struct xfrm_mark mark;
struct xfrm_selector selector;
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
- struct dst_entry *bundles;
struct xfrm_policy_walk_entry walk;
u8 type;
u8 action;
@@ -735,19 +736,12 @@ static inline void xfrm_pol_put(struct xfrm_policy *policy)
xfrm_policy_destroy(policy);
}
-#ifdef CONFIG_XFRM_SUB_POLICY
static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
{
int i;
for (i = npols - 1; i >= 0; --i)
xfrm_pol_put(pols[i]);
}
-#else
-static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
-{
- xfrm_pol_put(pols[0]);
-}
-#endif
extern void __xfrm_state_destroy(struct xfrm_state *);
@@ -878,11 +872,15 @@ struct xfrm_dst {
struct rt6_info rt6;
} u;
struct dst_entry *route;
+ struct flow_cache_object flo;
+ struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+ int num_pols, num_xfrms;
#ifdef CONFIG_XFRM_SUB_POLICY
struct flowi *origin;
struct xfrm_selector *partner;
#endif
- u32 genid;
+ u32 xfrm_genid;
+ u32 policy_genid;
u32 route_mtu_cached;
u32 child_mtu_cached;
u32 route_cookie;
@@ -892,6 +890,7 @@ struct xfrm_dst {
#ifdef CONFIG_XFRM
static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
{
+ xfrm_pols_put(xdst->pols, xdst->num_pols);
dst_release(xdst->route);
if (likely(xdst->u.dst.xfrm))
xfrm_state_put(xdst->u.dst.xfrm);
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index 75fa3530345b..57d8d0393567 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -85,6 +85,7 @@ typedef struct config_req_t {
#define CONF_ENABLE_IRQ 0x01
#define CONF_ENABLE_DMA 0x02
#define CONF_ENABLE_SPKR 0x04
+#define CONF_ENABLE_PULSE_IRQ 0x08
#define CONF_VALID_CLIENT 0x100
/* IntType field */
@@ -113,25 +114,7 @@ typedef struct io_req_t {
#define IO_DATA_PATH_WIDTH_16 0x08
#define IO_DATA_PATH_WIDTH_AUTO 0x10
-/* For RequestIRQ and ReleaseIRQ */
-typedef struct irq_req_t {
- u_int Attributes;
- u_int AssignedIRQ;
- irq_handler_t Handler;
-} irq_req_t;
-
-/* Attributes for RequestIRQ and ReleaseIRQ */
-#define IRQ_TYPE 0x03
-#define IRQ_TYPE_EXCLUSIVE 0x00
-#define IRQ_TYPE_TIME 0x01
-#define IRQ_TYPE_DYNAMIC_SHARING 0x02
-#define IRQ_FORCED_PULSE 0x04
-#define IRQ_FIRST_SHARED 0x08 /* unused */
-#define IRQ_HANDLE_PRESENT 0x10 /* unused */
-#define IRQ_PULSE_ALLOCATED 0x100
-
/* Bits in IRQInfo1 field */
-#define IRQ_MASK 0x0f
#define IRQ_NMI_ID 0x01
#define IRQ_IOCK_ID 0x02
#define IRQ_BERR_ID 0x04
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index aab3c13dc310..c180165fbd3e 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -62,15 +62,6 @@ struct pcmcia_driver {
int pcmcia_register_driver(struct pcmcia_driver *driver);
void pcmcia_unregister_driver(struct pcmcia_driver *driver);
-/* Some drivers use dev_node_t to store char or block device information.
- * Don't use this in new drivers, though.
- */
-typedef struct dev_node_t {
- char dev_name[DEV_NAME_LEN];
- u_short major, minor;
- struct dev_node_t *next;
-} dev_node_t;
-
struct pcmcia_device {
/* the socket and the device_no [for multifunction devices]
uniquely define a pcmcia_device */
@@ -88,13 +79,14 @@ struct pcmcia_device {
struct list_head socket_device_list;
/* deprecated, will be cleaned up soon */
- dev_node_t *dev_node;
u_int open;
io_req_t io;
- irq_req_t irq;
config_req_t conf;
window_handle_t win;
+ /* device setup */
+ unsigned int irq;
+
/* Is the device suspended? */
u16 suspended:1;
@@ -191,7 +183,20 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
/* device configuration */
int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req);
-int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req);
+
+int __must_check
+__pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
+ irq_handler_t handler);
+static inline __must_check __deprecated int
+pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
+ irq_handler_t handler)
+{
+ return __pcmcia_request_exclusive_irq(p_dev, handler);
+}
+
+int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
+ irq_handler_t handler);
+
int pcmcia_request_configuration(struct pcmcia_device *p_dev,
config_req_t *req);
diff --git a/include/pcmcia/mem_op.h b/include/pcmcia/mem_op.h
deleted file mode 100644
index 0fa06e5d5376..000000000000
--- a/include/pcmcia/mem_op.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * mem_op.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * The initial developer of the original code is David A. Hinds
- * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- *
- * (C) 1999 David A. Hinds
- */
-
-#ifndef _LINUX_MEM_OP_H
-#define _LINUX_MEM_OP_H
-
-#include <linux/io.h>
-#include <asm/uaccess.h>
-
-/*
- If UNSAFE_MEMCPY is defined, we use the (optimized) system routines
- to copy between a card and kernel memory. These routines do 32-bit
- operations which may not work with all PCMCIA controllers. The
- safe versions defined here will do only 8-bit and 16-bit accesses.
-*/
-
-#ifdef UNSAFE_MEMCPY
-
-#define copy_from_pc memcpy_fromio
-#define copy_to_pc memcpy_toio
-
-static inline void copy_pc_to_user(void *to, const void *from, size_t n)
-{
- size_t odd = (n & 3);
- n -= odd;
- while (n) {
- put_user(__raw_readl(from), (int *)to);
- (char *)from += 4; (char *)to += 4; n -= 4;
- }
- while (odd--)
- put_user(readb((char *)from++), (char *)to++);
-}
-
-static inline void copy_user_to_pc(void *to, const void *from, size_t n)
-{
- int l;
- char c;
- size_t odd = (n & 3);
- n -= odd;
- while (n) {
- get_user(l, (int *)from);
- __raw_writel(l, to);
- (char *)to += 4; (char *)from += 4; n -= 4;
- }
- while (odd--) {
- get_user(c, (char *)from++);
- writeb(c, (char *)to++);
- }
-}
-
-#else /* UNSAFE_MEMCPY */
-
-static inline void copy_from_pc(void *to, void __iomem *from, size_t n)
-{
- __u16 *t = to;
- __u16 __iomem *f = from;
- size_t odd = (n & 1);
- for (n >>= 1; n; n--)
- *t++ = __raw_readw(f++);
- if (odd)
- *(__u8 *)t = readb(f);
-}
-
-static inline void copy_to_pc(void __iomem *to, const void *from, size_t n)
-{
- __u16 __iomem *t = to;
- const __u16 *f = from;
- size_t odd = (n & 1);
- for (n >>= 1; n ; n--)
- __raw_writew(*f++, t++);
- if (odd)
- writeb(*(__u8 *)f, t);
-}
-
-static inline void copy_pc_to_user(void __user *to, void __iomem *from, size_t n)
-{
- __u16 __user *t = to;
- __u16 __iomem *f = from;
- size_t odd = (n & 1);
- for (n >>= 1; n ; n--)
- put_user(__raw_readw(f++), t++);
- if (odd)
- put_user(readb(f), (char __user *)t);
-}
-
-static inline void copy_user_to_pc(void __iomem *to, void __user *from, size_t n)
-{
- __u16 __user *f = from;
- __u16 __iomem *t = to;
- short s;
- char c;
- size_t odd = (n & 1);
- for (n >>= 1; n; n--) {
- get_user(s, f++);
- __raw_writew(s, t++);
- }
- if (odd) {
- get_user(c, (char __user *)f);
- writeb(c, t);
- }
-}
-
-#endif /* UNSAFE_MEMCPY */
-
-#endif /* _LINUX_MEM_OP_H */
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 344705cb42f4..764281b29218 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -141,10 +141,6 @@ struct pcmcia_socket {
u_short lock_count;
pccard_mem_map cis_mem;
void __iomem *cis_virt;
- struct {
- u_int AssignedIRQ;
- u_int Config;
- } irq;
io_window_t io[MAX_IO_WIN];
pccard_mem_map win[MAX_WIN];
struct list_head cis_cache;
@@ -235,6 +231,9 @@ struct pcmcia_socket {
/* non-zero if PCMCIA card is present */
atomic_t present;
+ /* IRQ to be used by PCMCIA devices. May not be IRQ 0. */
+ unsigned int pcmcia_irq;
+
#ifdef CONFIG_PCMCIA_IOCTL
struct user_info_t *user;
wait_queue_head_t queue;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a585e0f92bc3..310d31474034 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -136,6 +136,7 @@ struct ib_device_attr {
int max_qp_init_rd_atom;
int max_ee_init_rd_atom;
enum ib_atomic_cap atomic_cap;
+ enum ib_atomic_cap masked_atomic_cap;
int max_ee;
int max_rdd;
int max_mw;
@@ -467,6 +468,8 @@ enum ib_wc_opcode {
IB_WC_LSO,
IB_WC_LOCAL_INV,
IB_WC_FAST_REG_MR,
+ IB_WC_MASKED_COMP_SWAP,
+ IB_WC_MASKED_FETCH_ADD,
/*
* Set value of IB_WC_RECV so consumers can test if a completion is a
* receive by testing (opcode & IB_WC_RECV).
@@ -689,6 +692,8 @@ enum ib_wr_opcode {
IB_WR_RDMA_READ_WITH_INV,
IB_WR_LOCAL_INV,
IB_WR_FAST_REG_MR,
+ IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
+ IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
};
enum ib_send_flags {
@@ -731,6 +736,8 @@ struct ib_send_wr {
u64 remote_addr;
u64 compare_add;
u64 swap;
+ u64 compare_add_mask;
+ u64 swap_mask;
u32 rkey;
} atomic;
struct {
diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild
index b3a0ee6b2f1c..f2b94918994d 100644
--- a/include/scsi/Kbuild
+++ b/include/scsi/Kbuild
@@ -1,4 +1,3 @@
-header-y += scsi.h
header-y += scsi_netlink.h
header-y += scsi_netlink_fc.h
header-y += scsi_bsg_fc.h
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
index 747e2c7d88d6..8e9b222251c2 100644
--- a/include/scsi/fc/fc_fcp.h
+++ b/include/scsi/fc/fc_fcp.h
@@ -76,6 +76,7 @@ struct fcp_cmnd32 {
#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
#define FCP_PTA_ORDERED 2 /* ordered task attribute */
#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
+#define FCP_PTA_MASK 7 /* mask for task attribute field */
#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 4b912eee33e5..a26bb50c0c8b 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -47,13 +47,18 @@
#define ntohll(x) be64_to_cpu(x)
#define htonll(x) cpu_to_be64(x)
-#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
-#define hton24(p, v) do { \
- p[0] = (((v) >> 16) & 0xFF); \
- p[1] = (((v) >> 8) & 0xFF); \
- p[2] = ((v) & 0xFF); \
- } while (0)
+static inline u32 ntoh24(const u8 *p)
+{
+ return (p[0] << 16) | (p[1] << 8) | p[2];
+}
+
+static inline void hton24(u8 *p, u32 v)
+{
+ p[0] = (v >> 16) & 0xff;
+ p[1] = (v >> 8) & 0xff;
+ p[2] = v & 0xff;
+}
/**
* enum fc_lport_state - Local port states
@@ -918,15 +923,6 @@ static inline void fc_lport_free_stats(struct fc_lport *lport)
}
/**
- * fc_lport_get_stats() - Get a local port's statistics
- * @lport: The local port whose statistics are to be retreived
- */
-static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lport)
-{
- return per_cpu_ptr(lport->dev_stats, smp_processor_id());
-}
-
-/**
* lport_priv() - Return the private data from a local port
* @lport: The local port whose private data is to be retreived
*/
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index c603f4a7e7fc..ec13f51531f8 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -29,6 +29,8 @@
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
+#define FCOE_MAX_CMD_LEN 16 /* Supported CDB length */
+
/*
* FIP tunable parameters.
*/
@@ -65,14 +67,12 @@ enum fip_state {
* @port_ka_time: time of next port keep-alive.
* @ctlr_ka_time: time of next controller keep-alive.
* @timer: timer struct used for all delayed events.
- * @link_work: &work_struct for doing FCF selection.
+ * @timer_work: &work_struct for doing keep-alives and resets.
* @recv_work: &work_struct for receiving FIP frames.
* @fip_recv_list: list of received FIP frames.
* @user_mfs: configured maximum FC frame size, including FC header.
* @flogi_oxid: exchange ID of most recent fabric login.
* @flogi_count: number of FLOGI attempts in AUTO mode.
- * @link: current link status for libfc.
- * @last_link: last link state reported to libfc.
* @map_dest: use the FC_MAP mode for destination MAC addresses.
* @spma: supports SPMA server-provided MACs mode
* @send_ctlr_ka: need to send controller keep alive
@@ -100,14 +100,12 @@ struct fcoe_ctlr {
unsigned long port_ka_time;
unsigned long ctlr_ka_time;
struct timer_list timer;
- struct work_struct link_work;
+ struct work_struct timer_work;
struct work_struct recv_work;
struct sk_buff_head fip_recv_list;
u16 user_mfs;
u16 flogi_oxid;
u8 flogi_count;
- u8 link;
- u8 last_link;
u8 reset_req;
u8 map_dest;
u8 spma;
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8b4deca996ad..9ae5c613131b 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -114,6 +114,7 @@ struct scsi_cmnd;
#define READ_12 0xa8
#define WRITE_12 0xaa
#define WRITE_VERIFY_12 0xae
+#define VERIFY_12 0xaf
#define SEARCH_HIGH_12 0xb0
#define SEARCH_EQUAL_12 0xb1
#define SEARCH_LOW_12 0xb2
@@ -134,6 +135,7 @@ struct scsi_cmnd;
#define MO_SET_TARGET_PGS 0x0a
/* values for variable length command */
#define READ_32 0x09
+#define VERIFY_32 0x0a
#define WRITE_32 0x0b
#define WRITE_SAME_32 0x0d
@@ -423,6 +425,7 @@ static inline int scsi_is_wlun(unsigned int lun)
#define ADD_TO_MLQUEUE 0x2006
#define TIMEOUT_ERROR 0x2007
#define SCSI_RETURN_NOT_HANDLED 0x2008
+#define FAST_IO_FAIL 0x2009
/*
* Midlevel queue return values.
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 8e86a94faf06..87d81b3ce564 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -807,6 +807,6 @@ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
struct fc_vport_identifiers *);
int fc_vport_terminate(struct fc_vport *vport);
-void fc_block_scsi_eh(struct scsi_cmnd *cmnd);
+int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
#endif /* SCSI_TRANSPORT_FC_H */
diff --git a/include/sound/asound.h b/include/sound/asound.h
index 098595500632..9f1eecf99e6b 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -574,7 +574,7 @@ enum {
#define SNDRV_TIMER_FLG_SLAVE (1<<0) /* cannot be controlled */
struct snd_timer_id {
- int dev_class;
+ int dev_class;
int dev_sclass;
int card;
int device;
@@ -762,7 +762,7 @@ struct snd_ctl_elem_id {
snd_ctl_elem_iface_t iface; /* interface identifier */
unsigned int device; /* device/client number */
unsigned int subdevice; /* subdevice (substream) number */
- unsigned char name[44]; /* ASCII name of item */
+ unsigned char name[44]; /* ASCII name of item */
unsigned int index; /* index of item */
};
@@ -809,7 +809,7 @@ struct snd_ctl_elem_info {
struct snd_ctl_elem_value {
struct snd_ctl_elem_id id; /* W: element ID */
unsigned int indirect: 1; /* W: indirect access - obsoleted */
- union {
+ union {
union {
long value[128];
long *value_ptr; /* obsoleted */
@@ -827,15 +827,15 @@ struct snd_ctl_elem_value {
unsigned char *data_ptr; /* obsoleted */
} bytes;
struct snd_aes_iec958 iec958;
- } value; /* RO */
+ } value; /* RO */
struct timespec tstamp;
- unsigned char reserved[128-sizeof(struct timespec)];
+ unsigned char reserved[128-sizeof(struct timespec)];
};
struct snd_ctl_tlv {
- unsigned int numid; /* control element numeric identification */
- unsigned int length; /* in bytes aligned to 4 */
- unsigned int tlv[0]; /* first TLV */
+ unsigned int numid; /* control element numeric identification */
+ unsigned int length; /* in bytes aligned to 4 */
+ unsigned int tlv[0]; /* first TLV */
};
#define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int)
@@ -886,8 +886,8 @@ struct snd_ctl_event {
unsigned int mask;
struct snd_ctl_elem_id id;
} elem;
- unsigned char data8[60];
- } data;
+ unsigned char data8[60];
+ } data;
};
/*
diff --git a/include/sound/es1688.h b/include/sound/es1688.h
index 10fcf1465810..3ec7ecbe2502 100644
--- a/include/sound/es1688.h
+++ b/include/sound/es1688.h
@@ -44,7 +44,6 @@ struct snd_es1688 {
unsigned char pad;
unsigned int dma_size;
- struct snd_card *card;
struct snd_pcm *pcm;
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
@@ -108,14 +107,16 @@ struct snd_es1688 {
void snd_es1688_mixer_write(struct snd_es1688 *chip, unsigned char reg, unsigned char data);
int snd_es1688_create(struct snd_card *card,
+ struct snd_es1688 *chip,
unsigned long port,
unsigned long mpu_port,
int irq,
int mpu_irq,
int dma8,
- unsigned short hardware,
- struct snd_es1688 ** rchip);
-int snd_es1688_pcm(struct snd_es1688 *chip, int device, struct snd_pcm ** rpcm);
-int snd_es1688_mixer(struct snd_es1688 *chip);
+ unsigned short hardware);
+int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip, int device,
+ struct snd_pcm **rpcm);
+int snd_es1688_mixer(struct snd_card *card, struct snd_es1688 *chip);
+int snd_es1688_reset(struct snd_es1688 *chip);
#endif /* __SOUND_ES1688_H */
diff --git a/include/sound/info.h b/include/sound/info.h
index 112e8949e1a7..4e94cf1ff762 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -51,18 +51,18 @@ struct snd_info_entry_ops {
unsigned short mode, void **file_private_data);
int (*release)(struct snd_info_entry *entry,
unsigned short mode, void *file_private_data);
- long (*read)(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos);
- long (*write)(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, const char __user *buf,
- unsigned long count, unsigned long pos);
- long long (*llseek)(struct snd_info_entry *entry,
- void *file_private_data, struct file *file,
- long long offset, int orig);
- unsigned int(*poll)(struct snd_info_entry *entry,
- void *file_private_data, struct file *file,
- poll_table *wait);
+ ssize_t (*read)(struct snd_info_entry *entry, void *file_private_data,
+ struct file *file, char __user *buf,
+ size_t count, loff_t pos);
+ ssize_t (*write)(struct snd_info_entry *entry, void *file_private_data,
+ struct file *file, const char __user *buf,
+ size_t count, loff_t pos);
+ loff_t (*llseek)(struct snd_info_entry *entry,
+ void *file_private_data, struct file *file,
+ loff_t offset, int orig);
+ unsigned int (*poll)(struct snd_info_entry *entry,
+ void *file_private_data, struct file *file,
+ poll_table *wait);
int (*ioctl)(struct snd_info_entry *entry, void *file_private_data,
struct file *file, unsigned int cmd, unsigned long arg);
int (*mmap)(struct snd_info_entry *entry, void *file_private_data,
diff --git a/include/sound/jack.h b/include/sound/jack.h
index f236e426a706..d90b9fa32707 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -42,6 +42,11 @@ enum snd_jack_types {
SND_JACK_MECHANICAL = 0x0008, /* If detected separately */
SND_JACK_VIDEOOUT = 0x0010,
SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT,
+
+ /* Kept separate from switches to facilitate implementation */
+ SND_JACK_BTN_0 = 0x4000,
+ SND_JACK_BTN_1 = 0x2000,
+ SND_JACK_BTN_2 = 0x1000,
};
struct snd_jack {
@@ -50,6 +55,7 @@ struct snd_jack {
int type;
const char *id;
char name[100];
+ unsigned int key[3]; /* Keep in sync with definitions above */
void *private_data;
void (*private_free)(struct snd_jack *);
};
@@ -59,6 +65,8 @@ struct snd_jack {
int snd_jack_new(struct snd_card *card, const char *id, int type,
struct snd_jack **jack);
void snd_jack_set_parent(struct snd_jack *jack, struct device *parent);
+int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type,
+ int keytype);
void snd_jack_report(struct snd_jack *jack, int status);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 8b611a561985..dd76cdede64d 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -29,6 +29,7 @@
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/bitops.h>
+#include <linux/pm_qos_params.h>
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
#define snd_pcm_chip(pcm) ((pcm)->private_data)
@@ -365,7 +366,7 @@ struct snd_pcm_substream {
int number;
char name[32]; /* substream name */
int stream; /* stream (direction) */
- char latency_id[20]; /* latency identifier */
+ struct pm_qos_request_list *latency_pm_qos_req; /* pm_qos request */
size_t buffer_bytes_max; /* limit ring buffer size */
struct snd_dma_buffer dma_buffer;
unsigned int dma_buf_id;
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 0a0b019d41ad..377693a14385 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -182,6 +182,12 @@ struct snd_soc_dai_ops {
struct snd_soc_dai *);
int (*trigger)(struct snd_pcm_substream *, int,
struct snd_soc_dai *);
+ /*
+ * For hardware based FIFO caused delay reporting.
+ * Optional.
+ */
+ snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
+ struct snd_soc_dai *);
};
/*
@@ -215,7 +221,6 @@ struct snd_soc_dai {
unsigned int symmetric_rates:1;
/* DAI runtime info */
- struct snd_pcm_runtime *runtime;
struct snd_soc_codec *codec;
unsigned int active;
unsigned char pop_wait:1;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index c0922a034223..66ff4c124dbd 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -339,6 +339,9 @@ int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin);
int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin);
int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin);
int snd_soc_dapm_sync(struct snd_soc_codec *codec);
+int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec,
+ const char *pin);
+int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin);
/* dapm widget types */
enum snd_soc_dapm_type {
@@ -425,9 +428,8 @@ struct snd_soc_dapm_widget {
unsigned char connected:1; /* connected codec pin */
unsigned char new:1; /* cnew complete */
unsigned char ext:1; /* has external widgets */
- unsigned char muted:1; /* muted for pop reduction */
- unsigned char suspend:1; /* was active before suspend */
- unsigned char pmdown:1; /* waiting for timeout */
+ unsigned char force:1; /* force state */
+ unsigned char ignore_suspend:1; /* kept enabled over suspend */
int (*power_check)(struct snd_soc_dapm_widget *w);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index a57fbfcd4c8f..697e7ffe39d7 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/types.h>
+#include <linux/notifier.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -29,10 +30,10 @@
#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .shift = xshift, .rshift = xshift, .max = xmax, \
- .invert = xinvert})
+ .platform_max = xmax, .invert = xinvert})
#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .max = xmax, .invert = xinvert})
+ {.reg = xreg, .max = xmax, .platform_max = xmax, .invert = xinvert})
#define SOC_SINGLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
@@ -52,14 +53,14 @@
.put = snd_soc_put_volsw, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .shift = shift_left, .rshift = shift_right, \
- .max = xmax, .invert = xinvert} }
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_DOUBLE_R(xname, reg_left, reg_right, xshift, xmax, xinvert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.info = snd_soc_info_volsw_2r, \
.get = snd_soc_get_volsw_2r, .put = snd_soc_put_volsw_2r, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = reg_left, .rreg = reg_right, .shift = xshift, \
- .max = xmax, .invert = xinvert} }
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_DOUBLE_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -69,7 +70,7 @@
.put = snd_soc_put_volsw, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .shift = shift_left, .rshift = shift_right,\
- .max = xmax, .invert = xinvert} }
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -79,7 +80,7 @@
.get = snd_soc_get_volsw_2r, .put = snd_soc_put_volsw_2r, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = reg_left, .rreg = reg_right, .shift = xshift, \
- .max = xmax, .invert = xinvert} }
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_DOUBLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
@@ -88,7 +89,8 @@
.info = snd_soc_info_volsw_s8, .get = snd_soc_get_volsw_s8, \
.put = snd_soc_put_volsw_s8, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .min = xmin, .max = xmax} }
+ {.reg = xreg, .min = xmin, .max = xmax, \
+ .platform_max = xmax} }
#define SOC_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xmax, xtexts) \
{ .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \
.max = xmax, .texts = xtexts }
@@ -125,7 +127,7 @@
.get = xhandler_get, .put = xhandler_put, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .shift = shift_left, .rshift = shift_right, \
- .max = xmax, .invert = xinvert} }
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -145,7 +147,7 @@
.get = xhandler_get, .put = xhandler_put, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .shift = shift_left, .rshift = shift_right, \
- .max = xmax, .invert = xinvert} }
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_DOUBLE_R_EXT_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -156,7 +158,7 @@
.get = xhandler_get, .put = xhandler_put, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = reg_left, .rreg = reg_right, .shift = xshift, \
- .max = xmax, .invert = xinvert} }
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_SINGLE_BOOL_EXT(xname, xdata, xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_bool_ext, \
@@ -212,6 +214,7 @@ struct snd_soc_dai_mode;
struct snd_soc_pcm_runtime;
struct snd_soc_dai;
struct snd_soc_platform;
+struct snd_soc_dai_link;
struct snd_soc_codec;
struct soc_enum;
struct snd_soc_ac97_ops;
@@ -260,6 +263,10 @@ int snd_soc_jack_new(struct snd_soc_card *card, const char *id, int type,
void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_pin *pins);
+void snd_soc_jack_notifier_register(struct snd_soc_jack *jack,
+ struct notifier_block *nb);
+void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack,
+ struct notifier_block *nb);
#ifdef CONFIG_GPIOLIB
int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_gpio *gpios);
@@ -320,6 +327,8 @@ int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+int snd_soc_limit_volume(struct snd_soc_codec *codec,
+ const char *name, int max);
/**
* struct snd_soc_jack_pin - Describes a pin to update based on jack detection
@@ -363,6 +372,7 @@ struct snd_soc_jack {
struct snd_soc_card *card;
struct list_head pins;
int status;
+ struct blocking_notifier_head notifier;
};
/* SoC PCM stream information */
@@ -374,7 +384,7 @@ struct snd_soc_pcm_stream {
unsigned int rate_max; /* max rate */
unsigned int channels_min; /* min channels */
unsigned int channels_max; /* max channels */
- unsigned int active:1; /* stream is in use */
+ unsigned int active; /* stream is in use */
void *dma_data; /* used by platform code */
};
@@ -407,7 +417,7 @@ struct snd_soc_codec {
struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */
unsigned int active;
unsigned int pcm_devs;
- void *private_data;
+ void *drvdata;
/* codec IO */
void *control_data; /* codec control (i2c/3wire) data */
@@ -462,14 +472,21 @@ struct snd_soc_platform {
int (*probe)(struct platform_device *pdev);
int (*remove)(struct platform_device *pdev);
- int (*suspend)(struct snd_soc_dai *dai);
- int (*resume)(struct snd_soc_dai *dai);
+ int (*suspend)(struct snd_soc_dai_link *dai_link);
+ int (*resume)(struct snd_soc_dai_link *dai_link);
/* pcm creation and destruction */
int (*pcm_new)(struct snd_card *, struct snd_soc_dai *,
struct snd_pcm *);
void (*pcm_free)(struct snd_pcm *);
+ /*
+ * For platform caused delay reporting.
+ * Optional.
+ */
+ snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
+ struct snd_soc_dai *);
+
/* platform stream ops */
struct snd_pcm_ops *pcm_ops;
};
@@ -489,6 +506,9 @@ struct snd_soc_dai_link {
/* codec/machine specific init - e.g. add machine controls */
int (*init)(struct snd_soc_codec *codec);
+ /* Keep DAI active over suspend */
+ unsigned int ignore_suspend:1;
+
/* Symmetry requirements */
unsigned int symmetric_rates:1;
@@ -553,7 +573,7 @@ struct snd_soc_pcm_runtime {
/* mixer control */
struct soc_mixer_control {
- int min, max;
+ int min, max, platform_max;
unsigned int reg, rreg, shift, rshift, invert;
};
@@ -583,6 +603,17 @@ static inline unsigned int snd_soc_write(struct snd_soc_codec *codec,
return codec->write(codec, reg, val);
}
+static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec,
+ void *data)
+{
+ codec->drvdata = data;
+}
+
+static inline void *snd_soc_codec_get_drvdata(struct snd_soc_codec *codec)
+{
+ return codec->drvdata;
+}
+
#include <sound/soc-dai.h>
#endif
diff --git a/include/sound/tlv320aic3x.h b/include/sound/tlv320aic3x.h
new file mode 100644
index 000000000000..b1a5f34e5cfa
--- /dev/null
+++ b/include/sound/tlv320aic3x.h
@@ -0,0 +1,17 @@
+/*
+ * Platform data for Texas Instruments TLV320AIC3x codec
+ *
+ * Author: Jarkko Nikula <jhnikula@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __TLV320AIC3x_H__
+#define __TLV320AIC3x_H__
+
+struct aic3x_pdata {
+ int gpio_reset; /* < 0 if not used */
+};
+
+#endif \ No newline at end of file
diff --git a/include/sound/tlv320dac33-plat.h b/include/sound/tlv320dac33-plat.h
index ac0665264bdf..3f428d53195b 100644
--- a/include/sound/tlv320dac33-plat.h
+++ b/include/sound/tlv320dac33-plat.h
@@ -15,6 +15,7 @@
struct tlv320dac33_platform_data {
int power_gpio;
+ int keep_bclk; /* Keep the BCLK running in FIFO modes */
u8 burst_bclkdiv;
};
diff --git a/include/sound/uda134x.h b/include/sound/uda134x.h
index 475ef8bb7dcd..509efb050176 100644
--- a/include/sound/uda134x.h
+++ b/include/sound/uda134x.h
@@ -21,6 +21,7 @@ struct uda134x_platform_data {
#define UDA134X_UDA1340 1
#define UDA134X_UDA1341 2
#define UDA134X_UDA1344 3
+#define UDA134X_UDA1345 4
};
#endif /* _UDA134X_H */
diff --git a/include/sound/version.h b/include/sound/version.h
index 7fed23442db8..bf69a5b7e65f 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
/* include/version.h */
-#define CONFIG_SND_VERSION "1.0.22.1"
+#define CONFIG_SND_VERSION "1.0.23"
#define CONFIG_SND_DATE ""
diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h
new file mode 100644
index 000000000000..b4a0db2307ef
--- /dev/null
+++ b/include/sound/wm8903.h
@@ -0,0 +1,249 @@
+/*
+ * linux/sound/wm8903.h -- Platform data for WM8903
+ *
+ * Copyright 2010 Wolfson Microelectronics. PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_WM8903_H
+#define __LINUX_SND_WM8903_H
+
+/* Used to enable configuration of a GPIO to all zeros */
+#define WM8903_GPIO_NO_CONFIG 0x8000
+
+/*
+ * R6 (0x06) - Mic Bias Control 0
+ */
+#define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */
+#define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */
+#define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */
+#define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */
+#define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */
+#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */
+#define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */
+#define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */
+#define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */
+#define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */
+#define WM8903_MICDET_ENA 0x0002 /* MICDET_ENA */
+#define WM8903_MICDET_ENA_MASK 0x0002 /* MICDET_ENA */
+#define WM8903_MICDET_ENA_SHIFT 1 /* MICDET_ENA */
+#define WM8903_MICDET_ENA_WIDTH 1 /* MICDET_ENA */
+#define WM8903_MICBIAS_ENA 0x0001 /* MICBIAS_ENA */
+#define WM8903_MICBIAS_ENA_MASK 0x0001 /* MICBIAS_ENA */
+#define WM8903_MICBIAS_ENA_SHIFT 0 /* MICBIAS_ENA */
+#define WM8903_MICBIAS_ENA_WIDTH 1 /* MICBIAS_ENA */
+
+/*
+ * R116 (0x74) - GPIO Control 1
+ */
+#define WM8903_GP1_FN_MASK 0x1F00 /* GP1_FN - [12:8] */
+#define WM8903_GP1_FN_SHIFT 8 /* GP1_FN - [12:8] */
+#define WM8903_GP1_FN_WIDTH 5 /* GP1_FN - [12:8] */
+#define WM8903_GP1_DIR 0x0080 /* GP1_DIR */
+#define WM8903_GP1_DIR_MASK 0x0080 /* GP1_DIR */
+#define WM8903_GP1_DIR_SHIFT 7 /* GP1_DIR */
+#define WM8903_GP1_DIR_WIDTH 1 /* GP1_DIR */
+#define WM8903_GP1_OP_CFG 0x0040 /* GP1_OP_CFG */
+#define WM8903_GP1_OP_CFG_MASK 0x0040 /* GP1_OP_CFG */
+#define WM8903_GP1_OP_CFG_SHIFT 6 /* GP1_OP_CFG */
+#define WM8903_GP1_OP_CFG_WIDTH 1 /* GP1_OP_CFG */
+#define WM8903_GP1_IP_CFG 0x0020 /* GP1_IP_CFG */
+#define WM8903_GP1_IP_CFG_MASK 0x0020 /* GP1_IP_CFG */
+#define WM8903_GP1_IP_CFG_SHIFT 5 /* GP1_IP_CFG */
+#define WM8903_GP1_IP_CFG_WIDTH 1 /* GP1_IP_CFG */
+#define WM8903_GP1_LVL 0x0010 /* GP1_LVL */
+#define WM8903_GP1_LVL_MASK 0x0010 /* GP1_LVL */
+#define WM8903_GP1_LVL_SHIFT 4 /* GP1_LVL */
+#define WM8903_GP1_LVL_WIDTH 1 /* GP1_LVL */
+#define WM8903_GP1_PD 0x0008 /* GP1_PD */
+#define WM8903_GP1_PD_MASK 0x0008 /* GP1_PD */
+#define WM8903_GP1_PD_SHIFT 3 /* GP1_PD */
+#define WM8903_GP1_PD_WIDTH 1 /* GP1_PD */
+#define WM8903_GP1_PU 0x0004 /* GP1_PU */
+#define WM8903_GP1_PU_MASK 0x0004 /* GP1_PU */
+#define WM8903_GP1_PU_SHIFT 2 /* GP1_PU */
+#define WM8903_GP1_PU_WIDTH 1 /* GP1_PU */
+#define WM8903_GP1_INTMODE 0x0002 /* GP1_INTMODE */
+#define WM8903_GP1_INTMODE_MASK 0x0002 /* GP1_INTMODE */
+#define WM8903_GP1_INTMODE_SHIFT 1 /* GP1_INTMODE */
+#define WM8903_GP1_INTMODE_WIDTH 1 /* GP1_INTMODE */
+#define WM8903_GP1_DB 0x0001 /* GP1_DB */
+#define WM8903_GP1_DB_MASK 0x0001 /* GP1_DB */
+#define WM8903_GP1_DB_SHIFT 0 /* GP1_DB */
+#define WM8903_GP1_DB_WIDTH 1 /* GP1_DB */
+
+/*
+ * R117 (0x75) - GPIO Control 2
+ */
+#define WM8903_GP2_FN_MASK 0x1F00 /* GP2_FN - [12:8] */
+#define WM8903_GP2_FN_SHIFT 8 /* GP2_FN - [12:8] */
+#define WM8903_GP2_FN_WIDTH 5 /* GP2_FN - [12:8] */
+#define WM8903_GP2_DIR 0x0080 /* GP2_DIR */
+#define WM8903_GP2_DIR_MASK 0x0080 /* GP2_DIR */
+#define WM8903_GP2_DIR_SHIFT 7 /* GP2_DIR */
+#define WM8903_GP2_DIR_WIDTH 1 /* GP2_DIR */
+#define WM8903_GP2_OP_CFG 0x0040 /* GP2_OP_CFG */
+#define WM8903_GP2_OP_CFG_MASK 0x0040 /* GP2_OP_CFG */
+#define WM8903_GP2_OP_CFG_SHIFT 6 /* GP2_OP_CFG */
+#define WM8903_GP2_OP_CFG_WIDTH 1 /* GP2_OP_CFG */
+#define WM8903_GP2_IP_CFG 0x0020 /* GP2_IP_CFG */
+#define WM8903_GP2_IP_CFG_MASK 0x0020 /* GP2_IP_CFG */
+#define WM8903_GP2_IP_CFG_SHIFT 5 /* GP2_IP_CFG */
+#define WM8903_GP2_IP_CFG_WIDTH 1 /* GP2_IP_CFG */
+#define WM8903_GP2_LVL 0x0010 /* GP2_LVL */
+#define WM8903_GP2_LVL_MASK 0x0010 /* GP2_LVL */
+#define WM8903_GP2_LVL_SHIFT 4 /* GP2_LVL */
+#define WM8903_GP2_LVL_WIDTH 1 /* GP2_LVL */
+#define WM8903_GP2_PD 0x0008 /* GP2_PD */
+#define WM8903_GP2_PD_MASK 0x0008 /* GP2_PD */
+#define WM8903_GP2_PD_SHIFT 3 /* GP2_PD */
+#define WM8903_GP2_PD_WIDTH 1 /* GP2_PD */
+#define WM8903_GP2_PU 0x0004 /* GP2_PU */
+#define WM8903_GP2_PU_MASK 0x0004 /* GP2_PU */
+#define WM8903_GP2_PU_SHIFT 2 /* GP2_PU */
+#define WM8903_GP2_PU_WIDTH 1 /* GP2_PU */
+#define WM8903_GP2_INTMODE 0x0002 /* GP2_INTMODE */
+#define WM8903_GP2_INTMODE_MASK 0x0002 /* GP2_INTMODE */
+#define WM8903_GP2_INTMODE_SHIFT 1 /* GP2_INTMODE */
+#define WM8903_GP2_INTMODE_WIDTH 1 /* GP2_INTMODE */
+#define WM8903_GP2_DB 0x0001 /* GP2_DB */
+#define WM8903_GP2_DB_MASK 0x0001 /* GP2_DB */
+#define WM8903_GP2_DB_SHIFT 0 /* GP2_DB */
+#define WM8903_GP2_DB_WIDTH 1 /* GP2_DB */
+
+/*
+ * R118 (0x76) - GPIO Control 3
+ */
+#define WM8903_GP3_FN_MASK 0x1F00 /* GP3_FN - [12:8] */
+#define WM8903_GP3_FN_SHIFT 8 /* GP3_FN - [12:8] */
+#define WM8903_GP3_FN_WIDTH 5 /* GP3_FN - [12:8] */
+#define WM8903_GP3_DIR 0x0080 /* GP3_DIR */
+#define WM8903_GP3_DIR_MASK 0x0080 /* GP3_DIR */
+#define WM8903_GP3_DIR_SHIFT 7 /* GP3_DIR */
+#define WM8903_GP3_DIR_WIDTH 1 /* GP3_DIR */
+#define WM8903_GP3_OP_CFG 0x0040 /* GP3_OP_CFG */
+#define WM8903_GP3_OP_CFG_MASK 0x0040 /* GP3_OP_CFG */
+#define WM8903_GP3_OP_CFG_SHIFT 6 /* GP3_OP_CFG */
+#define WM8903_GP3_OP_CFG_WIDTH 1 /* GP3_OP_CFG */
+#define WM8903_GP3_IP_CFG 0x0020 /* GP3_IP_CFG */
+#define WM8903_GP3_IP_CFG_MASK 0x0020 /* GP3_IP_CFG */
+#define WM8903_GP3_IP_CFG_SHIFT 5 /* GP3_IP_CFG */
+#define WM8903_GP3_IP_CFG_WIDTH 1 /* GP3_IP_CFG */
+#define WM8903_GP3_LVL 0x0010 /* GP3_LVL */
+#define WM8903_GP3_LVL_MASK 0x0010 /* GP3_LVL */
+#define WM8903_GP3_LVL_SHIFT 4 /* GP3_LVL */
+#define WM8903_GP3_LVL_WIDTH 1 /* GP3_LVL */
+#define WM8903_GP3_PD 0x0008 /* GP3_PD */
+#define WM8903_GP3_PD_MASK 0x0008 /* GP3_PD */
+#define WM8903_GP3_PD_SHIFT 3 /* GP3_PD */
+#define WM8903_GP3_PD_WIDTH 1 /* GP3_PD */
+#define WM8903_GP3_PU 0x0004 /* GP3_PU */
+#define WM8903_GP3_PU_MASK 0x0004 /* GP3_PU */
+#define WM8903_GP3_PU_SHIFT 2 /* GP3_PU */
+#define WM8903_GP3_PU_WIDTH 1 /* GP3_PU */
+#define WM8903_GP3_INTMODE 0x0002 /* GP3_INTMODE */
+#define WM8903_GP3_INTMODE_MASK 0x0002 /* GP3_INTMODE */
+#define WM8903_GP3_INTMODE_SHIFT 1 /* GP3_INTMODE */
+#define WM8903_GP3_INTMODE_WIDTH 1 /* GP3_INTMODE */
+#define WM8903_GP3_DB 0x0001 /* GP3_DB */
+#define WM8903_GP3_DB_MASK 0x0001 /* GP3_DB */
+#define WM8903_GP3_DB_SHIFT 0 /* GP3_DB */
+#define WM8903_GP3_DB_WIDTH 1 /* GP3_DB */
+
+/*
+ * R119 (0x77) - GPIO Control 4
+ */
+#define WM8903_GP4_FN_MASK 0x1F00 /* GP4_FN - [12:8] */
+#define WM8903_GP4_FN_SHIFT 8 /* GP4_FN - [12:8] */
+#define WM8903_GP4_FN_WIDTH 5 /* GP4_FN - [12:8] */
+#define WM8903_GP4_DIR 0x0080 /* GP4_DIR */
+#define WM8903_GP4_DIR_MASK 0x0080 /* GP4_DIR */
+#define WM8903_GP4_DIR_SHIFT 7 /* GP4_DIR */
+#define WM8903_GP4_DIR_WIDTH 1 /* GP4_DIR */
+#define WM8903_GP4_OP_CFG 0x0040 /* GP4_OP_CFG */
+#define WM8903_GP4_OP_CFG_MASK 0x0040 /* GP4_OP_CFG */
+#define WM8903_GP4_OP_CFG_SHIFT 6 /* GP4_OP_CFG */
+#define WM8903_GP4_OP_CFG_WIDTH 1 /* GP4_OP_CFG */
+#define WM8903_GP4_IP_CFG 0x0020 /* GP4_IP_CFG */
+#define WM8903_GP4_IP_CFG_MASK 0x0020 /* GP4_IP_CFG */
+#define WM8903_GP4_IP_CFG_SHIFT 5 /* GP4_IP_CFG */
+#define WM8903_GP4_IP_CFG_WIDTH 1 /* GP4_IP_CFG */
+#define WM8903_GP4_LVL 0x0010 /* GP4_LVL */
+#define WM8903_GP4_LVL_MASK 0x0010 /* GP4_LVL */
+#define WM8903_GP4_LVL_SHIFT 4 /* GP4_LVL */
+#define WM8903_GP4_LVL_WIDTH 1 /* GP4_LVL */
+#define WM8903_GP4_PD 0x0008 /* GP4_PD */
+#define WM8903_GP4_PD_MASK 0x0008 /* GP4_PD */
+#define WM8903_GP4_PD_SHIFT 3 /* GP4_PD */
+#define WM8903_GP4_PD_WIDTH 1 /* GP4_PD */
+#define WM8903_GP4_PU 0x0004 /* GP4_PU */
+#define WM8903_GP4_PU_MASK 0x0004 /* GP4_PU */
+#define WM8903_GP4_PU_SHIFT 2 /* GP4_PU */
+#define WM8903_GP4_PU_WIDTH 1 /* GP4_PU */
+#define WM8903_GP4_INTMODE 0x0002 /* GP4_INTMODE */
+#define WM8903_GP4_INTMODE_MASK 0x0002 /* GP4_INTMODE */
+#define WM8903_GP4_INTMODE_SHIFT 1 /* GP4_INTMODE */
+#define WM8903_GP4_INTMODE_WIDTH 1 /* GP4_INTMODE */
+#define WM8903_GP4_DB 0x0001 /* GP4_DB */
+#define WM8903_GP4_DB_MASK 0x0001 /* GP4_DB */
+#define WM8903_GP4_DB_SHIFT 0 /* GP4_DB */
+#define WM8903_GP4_DB_WIDTH 1 /* GP4_DB */
+
+/*
+ * R120 (0x78) - GPIO Control 5
+ */
+#define WM8903_GP5_FN_MASK 0x1F00 /* GP5_FN - [12:8] */
+#define WM8903_GP5_FN_SHIFT 8 /* GP5_FN - [12:8] */
+#define WM8903_GP5_FN_WIDTH 5 /* GP5_FN - [12:8] */
+#define WM8903_GP5_DIR 0x0080 /* GP5_DIR */
+#define WM8903_GP5_DIR_MASK 0x0080 /* GP5_DIR */
+#define WM8903_GP5_DIR_SHIFT 7 /* GP5_DIR */
+#define WM8903_GP5_DIR_WIDTH 1 /* GP5_DIR */
+#define WM8903_GP5_OP_CFG 0x0040 /* GP5_OP_CFG */
+#define WM8903_GP5_OP_CFG_MASK 0x0040 /* GP5_OP_CFG */
+#define WM8903_GP5_OP_CFG_SHIFT 6 /* GP5_OP_CFG */
+#define WM8903_GP5_OP_CFG_WIDTH 1 /* GP5_OP_CFG */
+#define WM8903_GP5_IP_CFG 0x0020 /* GP5_IP_CFG */
+#define WM8903_GP5_IP_CFG_MASK 0x0020 /* GP5_IP_CFG */
+#define WM8903_GP5_IP_CFG_SHIFT 5 /* GP5_IP_CFG */
+#define WM8903_GP5_IP_CFG_WIDTH 1 /* GP5_IP_CFG */
+#define WM8903_GP5_LVL 0x0010 /* GP5_LVL */
+#define WM8903_GP5_LVL_MASK 0x0010 /* GP5_LVL */
+#define WM8903_GP5_LVL_SHIFT 4 /* GP5_LVL */
+#define WM8903_GP5_LVL_WIDTH 1 /* GP5_LVL */
+#define WM8903_GP5_PD 0x0008 /* GP5_PD */
+#define WM8903_GP5_PD_MASK 0x0008 /* GP5_PD */
+#define WM8903_GP5_PD_SHIFT 3 /* GP5_PD */
+#define WM8903_GP5_PD_WIDTH 1 /* GP5_PD */
+#define WM8903_GP5_PU 0x0004 /* GP5_PU */
+#define WM8903_GP5_PU_MASK 0x0004 /* GP5_PU */
+#define WM8903_GP5_PU_SHIFT 2 /* GP5_PU */
+#define WM8903_GP5_PU_WIDTH 1 /* GP5_PU */
+#define WM8903_GP5_INTMODE 0x0002 /* GP5_INTMODE */
+#define WM8903_GP5_INTMODE_MASK 0x0002 /* GP5_INTMODE */
+#define WM8903_GP5_INTMODE_SHIFT 1 /* GP5_INTMODE */
+#define WM8903_GP5_INTMODE_WIDTH 1 /* GP5_INTMODE */
+#define WM8903_GP5_DB 0x0001 /* GP5_DB */
+#define WM8903_GP5_DB_MASK 0x0001 /* GP5_DB */
+#define WM8903_GP5_DB_SHIFT 0 /* GP5_DB */
+#define WM8903_GP5_DB_WIDTH 1 /* GP5_DB */
+
+struct wm8903_platform_data {
+ bool irq_active_low; /* Set if IRQ active low, default high */
+
+ /* Default register value for R6 (Mic bias), used to configure
+ * microphone detection. In conjunction with gpio_cfg this
+ * can be used to route the microphone status signals out onto
+ * the GPIOs for use with snd_soc_jack_add_gpios().
+ */
+ u16 micdet_cfg;
+
+ int micdet_delay; /* Delay after microphone detection (ms) */
+
+ u32 gpio_cfg[5]; /* Default register values for GPIO pin mux */
+};
+
+#endif
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index d66575a601be..898be3a8db9a 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -15,8 +15,111 @@
#ifndef __MFD_WM8994_PDATA_H__
#define __MFD_WM8994_PDATA_H__
-#define WM8904_DRC_REGS 4
-#define WM8904_EQ_REGS 25
+/* Used to enable configuration of a GPIO to all zeros */
+#define WM8904_GPIO_NO_CONFIG 0x8000
+
+/*
+ * R6 (0x06) - Mic Bias Control 0
+ */
+#define WM8904_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */
+#define WM8904_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */
+#define WM8904_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */
+#define WM8904_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */
+#define WM8904_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */
+#define WM8904_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */
+#define WM8904_MICDET_ENA 0x0002 /* MICDET_ENA */
+#define WM8904_MICDET_ENA_MASK 0x0002 /* MICDET_ENA */
+#define WM8904_MICDET_ENA_SHIFT 1 /* MICDET_ENA */
+#define WM8904_MICDET_ENA_WIDTH 1 /* MICDET_ENA */
+#define WM8904_MICBIAS_ENA 0x0001 /* MICBIAS_ENA */
+#define WM8904_MICBIAS_ENA_MASK 0x0001 /* MICBIAS_ENA */
+#define WM8904_MICBIAS_ENA_SHIFT 0 /* MICBIAS_ENA */
+#define WM8904_MICBIAS_ENA_WIDTH 1 /* MICBIAS_ENA */
+
+/*
+ * R7 (0x07) - Mic Bias Control 1
+ */
+#define WM8904_MIC_DET_FILTER_ENA 0x8000 /* MIC_DET_FILTER_ENA */
+#define WM8904_MIC_DET_FILTER_ENA_MASK 0x8000 /* MIC_DET_FILTER_ENA */
+#define WM8904_MIC_DET_FILTER_ENA_SHIFT 15 /* MIC_DET_FILTER_ENA */
+#define WM8904_MIC_DET_FILTER_ENA_WIDTH 1 /* MIC_DET_FILTER_ENA */
+#define WM8904_MIC_SHORT_FILTER_ENA 0x4000 /* MIC_SHORT_FILTER_ENA */
+#define WM8904_MIC_SHORT_FILTER_ENA_MASK 0x4000 /* MIC_SHORT_FILTER_ENA */
+#define WM8904_MIC_SHORT_FILTER_ENA_SHIFT 14 /* MIC_SHORT_FILTER_ENA */
+#define WM8904_MIC_SHORT_FILTER_ENA_WIDTH 1 /* MIC_SHORT_FILTER_ENA */
+#define WM8904_MICBIAS_SEL_MASK 0x0007 /* MICBIAS_SEL - [2:0] */
+#define WM8904_MICBIAS_SEL_SHIFT 0 /* MICBIAS_SEL - [2:0] */
+#define WM8904_MICBIAS_SEL_WIDTH 3 /* MICBIAS_SEL - [2:0] */
+
+
+/*
+ * R121 (0x79) - GPIO Control 1
+ */
+#define WM8904_GPIO1_PU 0x0020 /* GPIO1_PU */
+#define WM8904_GPIO1_PU_MASK 0x0020 /* GPIO1_PU */
+#define WM8904_GPIO1_PU_SHIFT 5 /* GPIO1_PU */
+#define WM8904_GPIO1_PU_WIDTH 1 /* GPIO1_PU */
+#define WM8904_GPIO1_PD 0x0010 /* GPIO1_PD */
+#define WM8904_GPIO1_PD_MASK 0x0010 /* GPIO1_PD */
+#define WM8904_GPIO1_PD_SHIFT 4 /* GPIO1_PD */
+#define WM8904_GPIO1_PD_WIDTH 1 /* GPIO1_PD */
+#define WM8904_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */
+#define WM8904_GPIO1_SEL_SHIFT 0 /* GPIO1_SEL - [3:0] */
+#define WM8904_GPIO1_SEL_WIDTH 4 /* GPIO1_SEL - [3:0] */
+
+/*
+ * R122 (0x7A) - GPIO Control 2
+ */
+#define WM8904_GPIO2_PU 0x0020 /* GPIO2_PU */
+#define WM8904_GPIO2_PU_MASK 0x0020 /* GPIO2_PU */
+#define WM8904_GPIO2_PU_SHIFT 5 /* GPIO2_PU */
+#define WM8904_GPIO2_PU_WIDTH 1 /* GPIO2_PU */
+#define WM8904_GPIO2_PD 0x0010 /* GPIO2_PD */
+#define WM8904_GPIO2_PD_MASK 0x0010 /* GPIO2_PD */
+#define WM8904_GPIO2_PD_SHIFT 4 /* GPIO2_PD */
+#define WM8904_GPIO2_PD_WIDTH 1 /* GPIO2_PD */
+#define WM8904_GPIO2_SEL_MASK 0x000F /* GPIO2_SEL - [3:0] */
+#define WM8904_GPIO2_SEL_SHIFT 0 /* GPIO2_SEL - [3:0] */
+#define WM8904_GPIO2_SEL_WIDTH 4 /* GPIO2_SEL - [3:0] */
+
+/*
+ * R123 (0x7B) - GPIO Control 3
+ */
+#define WM8904_GPIO3_PU 0x0020 /* GPIO3_PU */
+#define WM8904_GPIO3_PU_MASK 0x0020 /* GPIO3_PU */
+#define WM8904_GPIO3_PU_SHIFT 5 /* GPIO3_PU */
+#define WM8904_GPIO3_PU_WIDTH 1 /* GPIO3_PU */
+#define WM8904_GPIO3_PD 0x0010 /* GPIO3_PD */
+#define WM8904_GPIO3_PD_MASK 0x0010 /* GPIO3_PD */
+#define WM8904_GPIO3_PD_SHIFT 4 /* GPIO3_PD */
+#define WM8904_GPIO3_PD_WIDTH 1 /* GPIO3_PD */
+#define WM8904_GPIO3_SEL_MASK 0x000F /* GPIO3_SEL - [3:0] */
+#define WM8904_GPIO3_SEL_SHIFT 0 /* GPIO3_SEL - [3:0] */
+#define WM8904_GPIO3_SEL_WIDTH 4 /* GPIO3_SEL - [3:0] */
+
+/*
+ * R124 (0x7C) - GPIO Control 4
+ */
+#define WM8904_GPI7_ENA 0x0200 /* GPI7_ENA */
+#define WM8904_GPI7_ENA_MASK 0x0200 /* GPI7_ENA */
+#define WM8904_GPI7_ENA_SHIFT 9 /* GPI7_ENA */
+#define WM8904_GPI7_ENA_WIDTH 1 /* GPI7_ENA */
+#define WM8904_GPI8_ENA 0x0100 /* GPI8_ENA */
+#define WM8904_GPI8_ENA_MASK 0x0100 /* GPI8_ENA */
+#define WM8904_GPI8_ENA_SHIFT 8 /* GPI8_ENA */
+#define WM8904_GPI8_ENA_WIDTH 1 /* GPI8_ENA */
+#define WM8904_GPIO_BCLK_MODE_ENA 0x0080 /* GPIO_BCLK_MODE_ENA */
+#define WM8904_GPIO_BCLK_MODE_ENA_MASK 0x0080 /* GPIO_BCLK_MODE_ENA */
+#define WM8904_GPIO_BCLK_MODE_ENA_SHIFT 7 /* GPIO_BCLK_MODE_ENA */
+#define WM8904_GPIO_BCLK_MODE_ENA_WIDTH 1 /* GPIO_BCLK_MODE_ENA */
+#define WM8904_GPIO_BCLK_SEL_MASK 0x000F /* GPIO_BCLK_SEL - [3:0] */
+#define WM8904_GPIO_BCLK_SEL_SHIFT 0 /* GPIO_BCLK_SEL - [3:0] */
+#define WM8904_GPIO_BCLK_SEL_WIDTH 4 /* GPIO_BCLK_SEL - [3:0] */
+
+#define WM8904_MIC_REGS 2
+#define WM8904_GPIO_REGS 4
+#define WM8904_DRC_REGS 4
+#define WM8904_EQ_REGS 25
/**
* DRC configurations are specified with a label and a set of register
@@ -52,6 +155,9 @@ struct wm8904_pdata {
int num_retune_mobile_cfgs;
struct wm8904_retune_mobile_cfg *retune_mobile_cfgs;
+
+ u32 gpio_cfg[WM8904_GPIO_REGS];
+ u32 mic_cfg[WM8904_MIC_REGS];
};
#endif
diff --git a/include/sound/wm8960.h b/include/sound/wm8960.h
new file mode 100644
index 000000000000..74e9a95529c5
--- /dev/null
+++ b/include/sound/wm8960.h
@@ -0,0 +1,24 @@
+/*
+ * wm8960.h -- WM8960 Soc Audio driver platform data
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8960_PDATA_H
+#define _WM8960_PDATA_H
+
+#define WM8960_DRES_400R 0
+#define WM8960_DRES_200R 1
+#define WM8960_DRES_600R 2
+#define WM8960_DRES_150R 3
+#define WM8960_DRES_MAX 3
+
+struct wm8960_data {
+ bool capless; /* Headphone outputs configured in capless mode */
+
+ int dres; /* Discharge resistance for headphone outputs */
+};
+
+#endif
diff --git a/include/sound/wm9090.h b/include/sound/wm9090.h
new file mode 100644
index 000000000000..3718928cde1a
--- /dev/null
+++ b/include/sound/wm9090.h
@@ -0,0 +1,28 @@
+/*
+ * linux/sound/wm9090.h -- Platform data for WM9090
+ *
+ * Copyright 2009, 2010 Wolfson Microelectronics. PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_WM9090_H
+#define __LINUX_SND_WM9090_H
+
+struct wm9090_platform_data {
+ /* Line inputs 1 & 2 can optionally be differential */
+ unsigned int lin1_diff:1;
+ unsigned int lin2_diff:1;
+
+ /* AGC configuration. This is intended to protect the speaker
+ * against overdriving and will therefore depend on the
+ * hardware setup with incorrect runtime configuration
+ * potentially causing hardware damage.
+ */
+ unsigned int agc_ena:1;
+ u16 agc[3];
+};
+
+#endif
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 5acfb1eb4df9..1dfab5401511 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -65,6 +65,10 @@
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/* Make all open coded DECLARE_TRACE nops */
+#undef DECLARE_TRACE
+#define DECLARE_TRACE(name, proto, args)
+
#ifdef CONFIG_EVENT_TRACING
#include <trace/ftrace.h>
#endif
@@ -75,6 +79,7 @@
#undef DEFINE_EVENT
#undef DEFINE_EVENT_PRINT
#undef TRACE_HEADER_MULTI_READ
+#undef DECLARE_TRACE
/* Only undef what we defined in this file */
#ifdef UNDEF_TRACE_INCLUDE_FILE
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index b17d49dfc3ef..6dd3a51ab1cb 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -5,7 +5,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
-#define TRACE_INCLUDE_FILE kvm
#if defined(__KVM_HAVE_IOAPIC)
TRACE_EVENT(kvm_set_irq,
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index 5c1dcfc16c60..2821b86de63b 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -35,15 +35,15 @@ TRACE_EVENT(lock_acquire,
__get_str(name))
);
-TRACE_EVENT(lock_release,
+DECLARE_EVENT_CLASS(lock,
- TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
- TP_ARGS(lock, nested, ip),
+ TP_ARGS(lock, ip),
TP_STRUCT__entry(
- __string(name, lock->name)
- __field(void *, lockdep_addr)
+ __string( name, lock->name )
+ __field( void *, lockdep_addr )
),
TP_fast_assign(
@@ -51,51 +51,30 @@ TRACE_EVENT(lock_release,
__entry->lockdep_addr = lock;
),
- TP_printk("%p %s",
- __entry->lockdep_addr, __get_str(name))
+ TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
);
-#ifdef CONFIG_LOCK_STAT
-
-TRACE_EVENT(lock_contended,
+DEFINE_EVENT(lock, lock_release,
TP_PROTO(struct lockdep_map *lock, unsigned long ip),
- TP_ARGS(lock, ip),
+ TP_ARGS(lock, ip)
+);
- TP_STRUCT__entry(
- __string(name, lock->name)
- __field(void *, lockdep_addr)
- ),
+#ifdef CONFIG_LOCK_STAT
- TP_fast_assign(
- __assign_str(name, lock->name);
- __entry->lockdep_addr = lock;
- ),
+DEFINE_EVENT(lock, lock_contended,
- TP_printk("%p %s",
- __entry->lockdep_addr, __get_str(name))
-);
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
-TRACE_EVENT(lock_acquired,
- TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime),
+ TP_ARGS(lock, ip)
+);
- TP_ARGS(lock, ip, waittime),
+DEFINE_EVENT(lock, lock_acquired,
- TP_STRUCT__entry(
- __string(name, lock->name)
- __field(s64, wait_nsec)
- __field(void *, lockdep_addr)
- ),
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
- TP_fast_assign(
- __assign_str(name, lock->name);
- __entry->wait_nsec = waittime;
- __entry->lockdep_addr = lock;
- ),
- TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
- __get_str(name),
- __entry->wait_nsec)
+ TP_ARGS(lock, ip)
);
#endif
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index a8989c4547e7..188deca2f3c7 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -1,4 +1,7 @@
-#ifndef _TRACE_NAPI_H_
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM napi
+
+#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NAPI_H_
#include <linux/netdevice.h>
@@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll,
TP_PROTO(struct napi_struct *napi),
TP_ARGS(napi));
-#endif
+#endif /* _TRACE_NAPI_H_ */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index cfceb0b73e20..4f733ecea46e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret,
/*
* Tracepoint for waiting on task to unschedule:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_wait_task,
- TP_PROTO(struct rq *rq, struct task_struct *p),
+ TP_PROTO(struct task_struct *p),
- TP_ARGS(rq, p),
+ TP_ARGS(p),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task,
/*
* Tracepoint for waking up a task:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
+ TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(rq, p, success),
+ TP_ARGS(p, success),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
);
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
- TP_ARGS(rq, p, success));
+ TP_PROTO(struct task_struct *p, int success),
+ TP_ARGS(p, success));
/*
* Tracepoint for waking up a new task:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
- TP_ARGS(rq, p, success));
+ TP_PROTO(struct task_struct *p, int success),
+ TP_ARGS(p, success));
/*
* Tracepoint for task switches, performed by the scheduler:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_switch,
- TP_PROTO(struct rq *rq, struct task_struct *prev,
+ TP_PROTO(struct task_struct *prev,
struct task_struct *next),
- TP_ARGS(rq, prev, next),
+ TP_ARGS(prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
new file mode 100644
index 000000000000..25fbefdf2f2e
--- /dev/null
+++ b/include/trace/events/scsi.h
@@ -0,0 +1,345 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM scsi
+
+#if !defined(_TRACE_SCSI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCSI_H
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#define scsi_opcode_name(opcode) { opcode, #opcode }
+#define show_opcode_name(val) \
+ __print_symbolic(val, \
+ scsi_opcode_name(TEST_UNIT_READY), \
+ scsi_opcode_name(REZERO_UNIT), \
+ scsi_opcode_name(REQUEST_SENSE), \
+ scsi_opcode_name(FORMAT_UNIT), \
+ scsi_opcode_name(READ_BLOCK_LIMITS), \
+ scsi_opcode_name(REASSIGN_BLOCKS), \
+ scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \
+ scsi_opcode_name(READ_6), \
+ scsi_opcode_name(WRITE_6), \
+ scsi_opcode_name(SEEK_6), \
+ scsi_opcode_name(READ_REVERSE), \
+ scsi_opcode_name(WRITE_FILEMARKS), \
+ scsi_opcode_name(SPACE), \
+ scsi_opcode_name(INQUIRY), \
+ scsi_opcode_name(RECOVER_BUFFERED_DATA), \
+ scsi_opcode_name(MODE_SELECT), \
+ scsi_opcode_name(RESERVE), \
+ scsi_opcode_name(RELEASE), \
+ scsi_opcode_name(COPY), \
+ scsi_opcode_name(ERASE), \
+ scsi_opcode_name(MODE_SENSE), \
+ scsi_opcode_name(START_STOP), \
+ scsi_opcode_name(RECEIVE_DIAGNOSTIC), \
+ scsi_opcode_name(SEND_DIAGNOSTIC), \
+ scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \
+ scsi_opcode_name(SET_WINDOW), \
+ scsi_opcode_name(READ_CAPACITY), \
+ scsi_opcode_name(READ_10), \
+ scsi_opcode_name(WRITE_10), \
+ scsi_opcode_name(SEEK_10), \
+ scsi_opcode_name(POSITION_TO_ELEMENT), \
+ scsi_opcode_name(WRITE_VERIFY), \
+ scsi_opcode_name(VERIFY), \
+ scsi_opcode_name(SEARCH_HIGH), \
+ scsi_opcode_name(SEARCH_EQUAL), \
+ scsi_opcode_name(SEARCH_LOW), \
+ scsi_opcode_name(SET_LIMITS), \
+ scsi_opcode_name(PRE_FETCH), \
+ scsi_opcode_name(READ_POSITION), \
+ scsi_opcode_name(SYNCHRONIZE_CACHE), \
+ scsi_opcode_name(LOCK_UNLOCK_CACHE), \
+ scsi_opcode_name(READ_DEFECT_DATA), \
+ scsi_opcode_name(MEDIUM_SCAN), \
+ scsi_opcode_name(COMPARE), \
+ scsi_opcode_name(COPY_VERIFY), \
+ scsi_opcode_name(WRITE_BUFFER), \
+ scsi_opcode_name(READ_BUFFER), \
+ scsi_opcode_name(UPDATE_BLOCK), \
+ scsi_opcode_name(READ_LONG), \
+ scsi_opcode_name(WRITE_LONG), \
+ scsi_opcode_name(CHANGE_DEFINITION), \
+ scsi_opcode_name(WRITE_SAME), \
+ scsi_opcode_name(UNMAP), \
+ scsi_opcode_name(READ_TOC), \
+ scsi_opcode_name(LOG_SELECT), \
+ scsi_opcode_name(LOG_SENSE), \
+ scsi_opcode_name(XDWRITEREAD_10), \
+ scsi_opcode_name(MODE_SELECT_10), \
+ scsi_opcode_name(RESERVE_10), \
+ scsi_opcode_name(RELEASE_10), \
+ scsi_opcode_name(MODE_SENSE_10), \
+ scsi_opcode_name(PERSISTENT_RESERVE_IN), \
+ scsi_opcode_name(PERSISTENT_RESERVE_OUT), \
+ scsi_opcode_name(VARIABLE_LENGTH_CMD), \
+ scsi_opcode_name(REPORT_LUNS), \
+ scsi_opcode_name(MAINTENANCE_IN), \
+ scsi_opcode_name(MAINTENANCE_OUT), \
+ scsi_opcode_name(MOVE_MEDIUM), \
+ scsi_opcode_name(EXCHANGE_MEDIUM), \
+ scsi_opcode_name(READ_12), \
+ scsi_opcode_name(WRITE_12), \
+ scsi_opcode_name(WRITE_VERIFY_12), \
+ scsi_opcode_name(SEARCH_HIGH_12), \
+ scsi_opcode_name(SEARCH_EQUAL_12), \
+ scsi_opcode_name(SEARCH_LOW_12), \
+ scsi_opcode_name(READ_ELEMENT_STATUS), \
+ scsi_opcode_name(SEND_VOLUME_TAG), \
+ scsi_opcode_name(WRITE_LONG_2), \
+ scsi_opcode_name(READ_16), \
+ scsi_opcode_name(WRITE_16), \
+ scsi_opcode_name(VERIFY_16), \
+ scsi_opcode_name(WRITE_SAME_16), \
+ scsi_opcode_name(SERVICE_ACTION_IN), \
+ scsi_opcode_name(SAI_READ_CAPACITY_16), \
+ scsi_opcode_name(SAI_GET_LBA_STATUS), \
+ scsi_opcode_name(MI_REPORT_TARGET_PGS), \
+ scsi_opcode_name(MO_SET_TARGET_PGS), \
+ scsi_opcode_name(READ_32), \
+ scsi_opcode_name(WRITE_32), \
+ scsi_opcode_name(WRITE_SAME_32), \
+ scsi_opcode_name(ATA_16), \
+ scsi_opcode_name(ATA_12))
+
+#define scsi_hostbyte_name(result) { result, #result }
+#define show_hostbyte_name(val) \
+ __print_symbolic(val, \
+ scsi_hostbyte_name(DID_OK), \
+ scsi_hostbyte_name(DID_NO_CONNECT), \
+ scsi_hostbyte_name(DID_BUS_BUSY), \
+ scsi_hostbyte_name(DID_TIME_OUT), \
+ scsi_hostbyte_name(DID_BAD_TARGET), \
+ scsi_hostbyte_name(DID_ABORT), \
+ scsi_hostbyte_name(DID_PARITY), \
+ scsi_hostbyte_name(DID_ERROR), \
+ scsi_hostbyte_name(DID_RESET), \
+ scsi_hostbyte_name(DID_BAD_INTR), \
+ scsi_hostbyte_name(DID_PASSTHROUGH), \
+ scsi_hostbyte_name(DID_SOFT_ERROR), \
+ scsi_hostbyte_name(DID_IMM_RETRY), \
+ scsi_hostbyte_name(DID_REQUEUE), \
+ scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
+ scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
+
+#define scsi_driverbyte_name(result) { result, #result }
+#define show_driverbyte_name(val) \
+ __print_symbolic(val, \
+ scsi_driverbyte_name(DRIVER_OK), \
+ scsi_driverbyte_name(DRIVER_BUSY), \
+ scsi_driverbyte_name(DRIVER_SOFT), \
+ scsi_driverbyte_name(DRIVER_MEDIA), \
+ scsi_driverbyte_name(DRIVER_ERROR), \
+ scsi_driverbyte_name(DRIVER_INVALID), \
+ scsi_driverbyte_name(DRIVER_TIMEOUT), \
+ scsi_driverbyte_name(DRIVER_HARD), \
+ scsi_driverbyte_name(DRIVER_SENSE))
+
+#define scsi_msgbyte_name(result) { result, #result }
+#define show_msgbyte_name(val) \
+ __print_symbolic(val, \
+ scsi_msgbyte_name(COMMAND_COMPLETE), \
+ scsi_msgbyte_name(EXTENDED_MESSAGE), \
+ scsi_msgbyte_name(SAVE_POINTERS), \
+ scsi_msgbyte_name(RESTORE_POINTERS), \
+ scsi_msgbyte_name(DISCONNECT), \
+ scsi_msgbyte_name(INITIATOR_ERROR), \
+ scsi_msgbyte_name(ABORT_TASK_SET), \
+ scsi_msgbyte_name(MESSAGE_REJECT), \
+ scsi_msgbyte_name(NOP), \
+ scsi_msgbyte_name(MSG_PARITY_ERROR), \
+ scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
+ scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
+ scsi_msgbyte_name(TARGET_RESET), \
+ scsi_msgbyte_name(ABORT_TASK), \
+ scsi_msgbyte_name(CLEAR_TASK_SET), \
+ scsi_msgbyte_name(INITIATE_RECOVERY), \
+ scsi_msgbyte_name(RELEASE_RECOVERY), \
+ scsi_msgbyte_name(CLEAR_ACA), \
+ scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
+ scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
+ scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
+ scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
+ scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
+ scsi_msgbyte_name(ACA), \
+ scsi_msgbyte_name(QAS_REQUEST), \
+ scsi_msgbyte_name(BUS_DEVICE_RESET), \
+ scsi_msgbyte_name(ABORT))
+
+#define scsi_statusbyte_name(result) { result, #result }
+#define show_statusbyte_name(val) \
+ __print_symbolic(val, \
+ scsi_statusbyte_name(SAM_STAT_GOOD), \
+ scsi_statusbyte_name(SAM_STAT_CHECK_CONDITION), \
+ scsi_statusbyte_name(SAM_STAT_CONDITION_MET), \
+ scsi_statusbyte_name(SAM_STAT_BUSY), \
+ scsi_statusbyte_name(SAM_STAT_INTERMEDIATE), \
+ scsi_statusbyte_name(SAM_STAT_INTERMEDIATE_CONDITION_MET), \
+ scsi_statusbyte_name(SAM_STAT_RESERVATION_CONFLICT), \
+ scsi_statusbyte_name(SAM_STAT_COMMAND_TERMINATED), \
+ scsi_statusbyte_name(SAM_STAT_TASK_SET_FULL), \
+ scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \
+ scsi_statusbyte_name(SAM_STAT_TASK_ABORTED))
+
+const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int);
+#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len)
+
+TRACE_EVENT(scsi_dispatch_cmd_start,
+
+ TP_PROTO(struct scsi_cmnd *cmd),
+
+ TP_ARGS(cmd),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, host_no )
+ __field( unsigned int, channel )
+ __field( unsigned int, id )
+ __field( unsigned int, lun )
+ __field( unsigned int, opcode )
+ __field( unsigned int, cmd_len )
+ __field( unsigned int, data_sglen )
+ __field( unsigned int, prot_sglen )
+ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ ),
+
+ TP_fast_assign(
+ __entry->host_no = cmd->device->host->host_no;
+ __entry->channel = cmd->device->channel;
+ __entry->id = cmd->device->id;
+ __entry->lun = cmd->device->lun;
+ __entry->opcode = cmd->cmnd[0];
+ __entry->cmd_len = cmd->cmd_len;
+ __entry->data_sglen = scsi_sg_count(cmd);
+ __entry->prot_sglen = scsi_prot_sg_count(cmd);
+ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ ),
+
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
+ " cmnd=(%s %s raw=%s)",
+ __entry->host_no, __entry->channel, __entry->id,
+ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_opcode_name(__entry->opcode),
+ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
+ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
+);
+
+TRACE_EVENT(scsi_dispatch_cmd_error,
+
+ TP_PROTO(struct scsi_cmnd *cmd, int rtn),
+
+ TP_ARGS(cmd, rtn),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, host_no )
+ __field( unsigned int, channel )
+ __field( unsigned int, id )
+ __field( unsigned int, lun )
+ __field( int, rtn )
+ __field( unsigned int, opcode )
+ __field( unsigned int, cmd_len )
+ __field( unsigned int, data_sglen )
+ __field( unsigned int, prot_sglen )
+ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ ),
+
+ TP_fast_assign(
+ __entry->host_no = cmd->device->host->host_no;
+ __entry->channel = cmd->device->channel;
+ __entry->id = cmd->device->id;
+ __entry->lun = cmd->device->lun;
+ __entry->rtn = rtn;
+ __entry->opcode = cmd->cmnd[0];
+ __entry->cmd_len = cmd->cmd_len;
+ __entry->data_sglen = scsi_sg_count(cmd);
+ __entry->prot_sglen = scsi_prot_sg_count(cmd);
+ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ ),
+
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
+ " cmnd=(%s %s raw=%s) rtn=%d",
+ __entry->host_no, __entry->channel, __entry->id,
+ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_opcode_name(__entry->opcode),
+ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
+ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
+ __entry->rtn)
+);
+
+DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
+
+ TP_PROTO(struct scsi_cmnd *cmd),
+
+ TP_ARGS(cmd),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, host_no )
+ __field( unsigned int, channel )
+ __field( unsigned int, id )
+ __field( unsigned int, lun )
+ __field( int, result )
+ __field( unsigned int, opcode )
+ __field( unsigned int, cmd_len )
+ __field( unsigned int, data_sglen )
+ __field( unsigned int, prot_sglen )
+ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ ),
+
+ TP_fast_assign(
+ __entry->host_no = cmd->device->host->host_no;
+ __entry->channel = cmd->device->channel;
+ __entry->id = cmd->device->id;
+ __entry->lun = cmd->device->lun;
+ __entry->result = cmd->result;
+ __entry->opcode = cmd->cmnd[0];
+ __entry->cmd_len = cmd->cmd_len;
+ __entry->data_sglen = scsi_sg_count(cmd);
+ __entry->prot_sglen = scsi_prot_sg_count(cmd);
+ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ ),
+
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
+ "prot_sgl=%u cmnd=(%s %s raw=%s) result=(driver=%s host=%s " \
+ "message=%s status=%s)",
+ __entry->host_no, __entry->channel, __entry->id,
+ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_opcode_name(__entry->opcode),
+ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
+ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
+ show_driverbyte_name(((__entry->result) >> 24) & 0xff),
+ show_hostbyte_name(((__entry->result) >> 16) & 0xff),
+ show_msgbyte_name(((__entry->result) >> 8) & 0xff),
+ show_statusbyte_name(__entry->result & 0xff))
+);
+
+DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
+ TP_PROTO(struct scsi_cmnd *cmd),
+ TP_ARGS(cmd));
+
+DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_timeout,
+ TP_PROTO(struct scsi_cmnd *cmd),
+ TP_ARGS(cmd));
+
+TRACE_EVENT(scsi_eh_wakeup,
+
+ TP_PROTO(struct Scsi_Host *shost),
+
+ TP_ARGS(shost),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, host_no )
+ ),
+
+ TP_fast_assign(
+ __entry->host_no = shost->host_no;
+ ),
+
+ TP_printk("host_no=%u", __entry->host_no)
+);
+
+#endif /* _TRACE_SCSI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 75dd7787fb37..88c59c13ea7b 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -200,6 +200,9 @@
ftrace_print_symbols_seq(p, value, symbols); \
})
+#undef __print_hex
+#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
@@ -763,13 +766,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
perf_trace_templ_##call(struct ftrace_event_call *event_call, \
- proto) \
+ struct pt_regs *__regs, proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_raw_##call *entry; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
- struct pt_regs *__regs; \
int __entry_size; \
int __data_size; \
int rctx; \
@@ -790,20 +792,22 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
\
{ assign; } \
\
- __regs = &__get_cpu_var(perf_trace_regs); \
- perf_fetch_caller_regs(__regs, 2); \
- \
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, irq_flags, __regs); \
}
#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static notrace void perf_trace_##call(proto) \
-{ \
- struct ftrace_event_call *event_call = &event_##call; \
- \
- perf_trace_templ_##template(event_call, args); \
+#define DEFINE_EVENT(template, call, proto, args) \
+static notrace void perf_trace_##call(proto) \
+{ \
+ struct ftrace_event_call *event_call = &event_##call; \
+ struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
+ \
+ perf_fetch_caller_regs(__regs, 1); \
+ \
+ perf_trace_templ_##template(event_call, __regs, args); \
+ \
+ put_cpu_var(perf_trace_regs); \
}
#undef DEFINE_EVENT_PRINT
diff --git a/init/Kconfig b/init/Kconfig
index eb77e8ccde1c..3669b62ac322 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -76,6 +76,14 @@ config INIT_ENV_ARG_LIMIT
variables passed to init from the kernel command line.
+config CROSS_COMPILE
+ string "Cross-compiler tool prefix"
+ help
+ Same as running 'make CROSS_COMPILE=prefix-' but stored for
+ default make runs in this kernel build directory. You don't
+ need to set this unless you want the configured kernel build
+ directory to select the cross-compiler automatically.
+
config LOCALVERSION
string "Local version - append to kernel release"
help
@@ -312,13 +320,17 @@ config AUDITSYSCALL
help
Enable low-overhead system-call auditing infrastructure that
can be used independently or with another kernel subsystem,
- such as SELinux. To use audit's filesystem watch feature, please
- ensure that INOTIFY is configured.
+ such as SELinux.
+
+config AUDIT_WATCH
+ def_bool y
+ depends on AUDITSYSCALL
+ select FSNOTIFY
config AUDIT_TREE
def_bool y
depends on AUDITSYSCALL
- select INOTIFY
+ select FSNOTIFY
menu "RCU Subsystem"
@@ -604,14 +616,40 @@ config RT_GROUP_SCHED
default n
help
This feature lets you explicitly allocate real CPU bandwidth
- to users or control groups (depending on the "Basis for grouping tasks"
- setting below. If enabled, it will also make it impossible to
+ to task groups. If enabled, it will also make it impossible to
schedule realtime tasks for non-root users until you allocate
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.txt for more information.
endif #CGROUP_SCHED
+config BLK_CGROUP
+ tristate "Block IO controller"
+ depends on CGROUPS && BLOCK
+ default n
+ ---help---
+ Generic block IO controller cgroup interface. This is the common
+ cgroup interface which should be used by various IO controlling
+ policies.
+
+ Currently, CFQ IO scheduler uses it to recognize task groups and
+ control disk bandwidth allocation (proportional time slice allocation)
+ to such task groups.
+
+ This option only enables generic Block IO controller infrastructure.
+ One needs to also enable actual IO controlling logic in CFQ for it
+ to take effect. (CONFIG_CFQ_GROUP_IOSCHED=y).
+
+ See Documentation/cgroups/blkio-controller.txt for more information.
+
+config DEBUG_BLK_CGROUP
+ bool "Enable Block IO controller debugging"
+ depends on BLK_CGROUP
+ default n
+ ---help---
+ Enable some debugging help. Currently it exports additional stat
+ files in a cgroup which can be useful for debugging.
+
endif # CGROUPS
config MM_OWNER
@@ -942,6 +980,11 @@ config PERF_USE_VMALLOC
help
See tools/perf/design.txt for details
+config PERF_EVENTS_NMI
+ bool
+ help
+ Arch has support for nmi_watchdog
+
menu "Kernel Performance Events And Counters"
config PERF_EVENTS
diff --git a/init/main.c b/init/main.c
index 5c8540271529..a8ae2406d826 100644
--- a/init/main.c
+++ b/init/main.c
@@ -62,6 +62,7 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/idr.h>
+#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/kmemcheck.h>
@@ -201,11 +202,11 @@ static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
static const char *panic_later, *panic_param;
-extern struct obs_kernel_param __setup_start[], __setup_end[];
+extern const struct obs_kernel_param __setup_start[], __setup_end[];
static int __init obsolete_checksetup(char *line)
{
- struct obs_kernel_param *p;
+ const struct obs_kernel_param *p;
int had_early_param = 0;
p = __setup_start;
@@ -451,7 +452,7 @@ static noinline void __init_refok rest_init(void)
/* Check for early params. */
static int __init do_early_param(char *param, char *val)
{
- struct obs_kernel_param *p;
+ const struct obs_kernel_param *p;
for (p = __setup_start; p < __setup_end; p++) {
if ((p->early && strcmp(param, p->str) == 0) ||
@@ -528,7 +529,7 @@ static void __init mm_init(void)
asmlinkage void __init start_kernel(void)
{
char * command_line;
- extern struct kernel_param __start___param[], __stop___param[];
+ extern const struct kernel_param __start___param[], __stop___param[];
smp_setup_processor_id();
@@ -675,6 +676,7 @@ asmlinkage void __init start_kernel(void)
buffer_init();
key_init();
security_init();
+ dbg_late_init();
vfs_caches_init(totalram_pages);
signals_init();
/* rootfs populating might need page-writeback */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 722b0130aa94..d6c09c46ad06 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -429,7 +429,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr,
* sr: SEND or RECV
*/
static int wq_sleep(struct mqueue_inode_info *info, int sr,
- long timeout, struct ext_wait_queue *ewp)
+ ktime_t *timeout, struct ext_wait_queue *ewp)
{
int retval;
signed long time;
@@ -440,7 +440,8 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr,
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&info->lock);
- time = schedule_timeout(timeout);
+ time = schedule_hrtimeout_range_clock(timeout,
+ HRTIMER_MODE_ABS, 0, CLOCK_REALTIME);
while (ewp->state == STATE_PENDING)
cpu_relax();
@@ -552,31 +553,16 @@ static void __do_notify(struct mqueue_inode_info *info)
wake_up(&info->wait_q);
}
-static long prepare_timeout(struct timespec *p)
+static int prepare_timeout(const struct timespec __user *u_abs_timeout,
+ ktime_t *expires, struct timespec *ts)
{
- struct timespec nowts;
- long timeout;
-
- if (p) {
- if (unlikely(p->tv_nsec < 0 || p->tv_sec < 0
- || p->tv_nsec >= NSEC_PER_SEC))
- return -EINVAL;
- nowts = CURRENT_TIME;
- /* first subtract as jiffies can't be too big */
- p->tv_sec -= nowts.tv_sec;
- if (p->tv_nsec < nowts.tv_nsec) {
- p->tv_nsec += NSEC_PER_SEC;
- p->tv_sec--;
- }
- p->tv_nsec -= nowts.tv_nsec;
- if (p->tv_sec < 0)
- return 0;
-
- timeout = timespec_to_jiffies(p) + 1;
- } else
- return MAX_SCHEDULE_TIMEOUT;
+ if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
+ return -EFAULT;
+ if (!timespec_valid(ts))
+ return -EINVAL;
- return timeout;
+ *expires = timespec_to_ktime(*ts);
+ return 0;
}
static void remove_notification(struct mqueue_inode_info *info)
@@ -862,22 +848,21 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
struct ext_wait_queue *receiver;
struct msg_msg *msg_ptr;
struct mqueue_inode_info *info;
- struct timespec ts, *p = NULL;
- long timeout;
+ ktime_t expires, *timeout = NULL;
+ struct timespec ts;
int ret;
if (u_abs_timeout) {
- if (copy_from_user(&ts, u_abs_timeout,
- sizeof(struct timespec)))
- return -EFAULT;
- p = &ts;
+ int res = prepare_timeout(u_abs_timeout, &expires, &ts);
+ if (res)
+ return res;
+ timeout = &expires;
}
if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
return -EINVAL;
- audit_mq_sendrecv(mqdes, msg_len, msg_prio, p);
- timeout = prepare_timeout(p);
+ audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
filp = fget(mqdes);
if (unlikely(!filp)) {
@@ -919,9 +904,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
if (filp->f_flags & O_NONBLOCK) {
spin_unlock(&info->lock);
ret = -EAGAIN;
- } else if (unlikely(timeout < 0)) {
- spin_unlock(&info->lock);
- ret = timeout;
} else {
wait.task = current;
wait.msg = (void *) msg_ptr;
@@ -954,24 +936,23 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
size_t, msg_len, unsigned int __user *, u_msg_prio,
const struct timespec __user *, u_abs_timeout)
{
- long timeout;
ssize_t ret;
struct msg_msg *msg_ptr;
struct file *filp;
struct inode *inode;
struct mqueue_inode_info *info;
struct ext_wait_queue wait;
- struct timespec ts, *p = NULL;
+ ktime_t expires, *timeout = NULL;
+ struct timespec ts;
if (u_abs_timeout) {
- if (copy_from_user(&ts, u_abs_timeout,
- sizeof(struct timespec)))
- return -EFAULT;
- p = &ts;
+ int res = prepare_timeout(u_abs_timeout, &expires, &ts);
+ if (res)
+ return res;
+ timeout = &expires;
}
- audit_mq_sendrecv(mqdes, msg_len, 0, p);
- timeout = prepare_timeout(p);
+ audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
filp = fget(mqdes);
if (unlikely(!filp)) {
@@ -1003,11 +984,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
if (filp->f_flags & O_NONBLOCK) {
spin_unlock(&info->lock);
ret = -EAGAIN;
- msg_ptr = NULL;
- } else if (unlikely(timeout < 0)) {
- spin_unlock(&info->lock);
- ret = timeout;
- msg_ptr = NULL;
} else {
wait.task = current;
wait.state = STATE_NONE;
diff --git a/kernel/Makefile b/kernel/Makefile
index a987aa1676b5..2e7b06adbd52 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -68,15 +68,17 @@ obj-$(CONFIG_USER_NS) += user_namespace.o
obj-$(CONFIG_PID_NS) += pid_namespace.o
obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
-obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
+obj-$(CONFIG_SMP) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
-obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
+obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
-obj-$(CONFIG_GCOV_KERNEL) += gcov/
+obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
+obj-$(CONFIG_GCOV_KERNEL) += gcov/
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KGDB) += debug/
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
+obj-$(CONFIG_NMI_WATCHDOG) += nmi_watchdog.o
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 24f8c81fc48d..9e53bb2acfff 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -216,7 +216,6 @@ static int acct_on(char *name)
{
struct file *file;
struct vfsmount *mnt;
- int error;
struct pid_namespace *ns;
struct bsd_acct_struct *acct = NULL;
@@ -244,13 +243,6 @@ static int acct_on(char *name)
}
}
- error = security_acct(file);
- if (error) {
- kfree(acct);
- filp_close(file, NULL);
- return error;
- }
-
spin_lock(&acct_lock);
if (ns->bacct == NULL) {
ns->bacct = acct;
@@ -281,7 +273,7 @@ static int acct_on(char *name)
*/
SYSCALL_DEFINE1(acct, const char __user *, name)
{
- int error;
+ int error = 0;
if (!capable(CAP_SYS_PACCT))
return -EPERM;
@@ -299,13 +291,11 @@ SYSCALL_DEFINE1(acct, const char __user *, name)
if (acct == NULL)
return 0;
- error = security_acct(NULL);
- if (!error) {
- spin_lock(&acct_lock);
- acct_file_reopen(acct, NULL, NULL);
- spin_unlock(&acct_lock);
- }
+ spin_lock(&acct_lock);
+ acct_file_reopen(acct, NULL, NULL);
+ spin_unlock(&acct_lock);
}
+
return error;
}
diff --git a/kernel/audit.c b/kernel/audit.c
index c71bd26631a2..05a32f0d87dc 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -56,7 +56,6 @@
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
-#include <linux/inotify.h>
#include <linux/freezer.h>
#include <linux/tty.h>
diff --git a/kernel/audit.h b/kernel/audit.h
index 208687be4f30..f7206db4e13d 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -103,21 +103,27 @@ extern struct mutex audit_filter_mutex;
extern void audit_free_rule_rcu(struct rcu_head *);
extern struct list_head audit_filter_list[];
+extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
+
/* audit watch functions */
-extern unsigned long audit_watch_inode(struct audit_watch *watch);
-extern dev_t audit_watch_dev(struct audit_watch *watch);
+#ifdef CONFIG_AUDIT_WATCH
extern void audit_put_watch(struct audit_watch *watch);
extern void audit_get_watch(struct audit_watch *watch);
extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op);
-extern int audit_add_watch(struct audit_krule *krule);
-extern void audit_remove_watch(struct audit_watch *watch);
-extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list);
-extern void audit_inotify_unregister(struct list_head *in_list);
+extern int audit_add_watch(struct audit_krule *krule, struct list_head **list);
+extern void audit_remove_watch_rule(struct audit_krule *krule);
extern char *audit_watch_path(struct audit_watch *watch);
-extern struct list_head *audit_watch_rules(struct audit_watch *watch);
-
-extern struct audit_entry *audit_dupe_rule(struct audit_krule *old,
- struct audit_watch *watch);
+extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev);
+#else
+#define audit_put_watch(w) {}
+#define audit_get_watch(w) {}
+#define audit_to_watch(k, p, l, o) (-EINVAL)
+#define audit_add_watch(k, l) (-EINVAL)
+#define audit_remove_watch_rule(k) BUG()
+#define audit_watch_path(w) ""
+#define audit_watch_compare(w, i, d) 0
+
+#endif /* CONFIG_AUDIT_WATCH */
#ifdef CONFIG_AUDIT_TREE
extern struct audit_chunk *audit_tree_lookup(const struct inode *);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 46a57b57a335..cfb97d752a61 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1,5 +1,5 @@
#include "audit.h"
-#include <linux/inotify.h>
+#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/kthread.h>
@@ -22,7 +22,7 @@ struct audit_tree {
struct audit_chunk {
struct list_head hash;
- struct inotify_watch watch;
+ struct fsnotify_mark mark;
struct list_head trees; /* with root here */
int dead;
int count;
@@ -59,7 +59,7 @@ static LIST_HEAD(prune_list);
* tree is refcounted; one reference for "some rules on rules_list refer to
* it", one for each chunk with pointer to it.
*
- * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
+ * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
* of watch contributes 1 to .refs).
*
* node.index allows to get from node.list to containing chunk.
@@ -68,7 +68,7 @@ static LIST_HEAD(prune_list);
* that makes a difference. Some.
*/
-static struct inotify_handle *rtree_ih;
+static struct fsnotify_group *audit_tree_group;
static struct audit_tree *alloc_tree(const char *s)
{
@@ -111,29 +111,6 @@ const char *audit_tree_path(struct audit_tree *tree)
return tree->pathname;
}
-static struct audit_chunk *alloc_chunk(int count)
-{
- struct audit_chunk *chunk;
- size_t size;
- int i;
-
- size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
- chunk = kzalloc(size, GFP_KERNEL);
- if (!chunk)
- return NULL;
-
- INIT_LIST_HEAD(&chunk->hash);
- INIT_LIST_HEAD(&chunk->trees);
- chunk->count = count;
- atomic_long_set(&chunk->refs, 1);
- for (i = 0; i < count; i++) {
- INIT_LIST_HEAD(&chunk->owners[i].list);
- chunk->owners[i].index = i;
- }
- inotify_init_watch(&chunk->watch);
- return chunk;
-}
-
static void free_chunk(struct audit_chunk *chunk)
{
int i;
@@ -157,6 +134,35 @@ static void __put_chunk(struct rcu_head *rcu)
audit_put_chunk(chunk);
}
+static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
+{
+ struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+ call_rcu(&chunk->head, __put_chunk);
+}
+
+static struct audit_chunk *alloc_chunk(int count)
+{
+ struct audit_chunk *chunk;
+ size_t size;
+ int i;
+
+ size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
+ chunk = kzalloc(size, GFP_KERNEL);
+ if (!chunk)
+ return NULL;
+
+ INIT_LIST_HEAD(&chunk->hash);
+ INIT_LIST_HEAD(&chunk->trees);
+ chunk->count = count;
+ atomic_long_set(&chunk->refs, 1);
+ for (i = 0; i < count; i++) {
+ INIT_LIST_HEAD(&chunk->owners[i].list);
+ chunk->owners[i].index = i;
+ }
+ fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
+ return chunk;
+}
+
enum {HASH_SIZE = 128};
static struct list_head chunk_hash_heads[HASH_SIZE];
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
@@ -167,10 +173,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode)
return chunk_hash_heads + n % HASH_SIZE;
}
-/* hash_lock is held by caller */
+/* hash_lock & entry->lock is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
- struct list_head *list = chunk_hash(chunk->watch.inode);
+ struct fsnotify_mark *entry = &chunk->mark;
+ struct list_head *list;
+
+ if (!entry->i.inode)
+ return;
+ list = chunk_hash(entry->i.inode);
list_add_rcu(&chunk->hash, list);
}
@@ -181,7 +192,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
struct audit_chunk *p;
list_for_each_entry_rcu(p, list, hash) {
- if (p->watch.inode == inode) {
+ /* mark.inode may have gone NULL, but who cares? */
+ if (p->mark.i.inode == inode) {
atomic_long_inc(&p->refs);
return p;
}
@@ -210,38 +222,19 @@ static struct audit_chunk *find_chunk(struct node *p)
static void untag_chunk(struct node *p)
{
struct audit_chunk *chunk = find_chunk(p);
+ struct fsnotify_mark *entry = &chunk->mark;
struct audit_chunk *new;
struct audit_tree *owner;
int size = chunk->count - 1;
int i, j;
- if (!pin_inotify_watch(&chunk->watch)) {
- /*
- * Filesystem is shutting down; all watches are getting
- * evicted, just take it off the node list for this
- * tree and let the eviction logics take care of the
- * rest.
- */
- owner = p->owner;
- if (owner->root == chunk) {
- list_del_init(&owner->same_root);
- owner->root = NULL;
- }
- list_del_init(&p->list);
- p->owner = NULL;
- put_tree(owner);
- return;
- }
+ fsnotify_get_mark(entry);
spin_unlock(&hash_lock);
- /*
- * pin_inotify_watch() succeeded, so the watch won't go away
- * from under us.
- */
- mutex_lock(&chunk->watch.inode->inotify_mutex);
- if (chunk->dead) {
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
+ spin_lock(&entry->lock);
+ if (chunk->dead || !entry->i.inode) {
+ spin_unlock(&entry->lock);
goto out;
}
@@ -256,16 +249,17 @@ static void untag_chunk(struct node *p)
list_del_init(&p->list);
list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock);
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
goto out;
}
new = alloc_chunk(size);
if (!new)
goto Fallback;
- if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
+ fsnotify_duplicate_mark(&new->mark, entry);
+ if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
free_chunk(new);
goto Fallback;
}
@@ -298,9 +292,9 @@ static void untag_chunk(struct node *p)
list_for_each_entry(owner, &new->trees, same_root)
owner->root = new;
spin_unlock(&hash_lock);
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
goto out;
Fallback:
@@ -314,31 +308,33 @@ Fallback:
p->owner = NULL;
put_tree(owner);
spin_unlock(&hash_lock);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
+ spin_unlock(&entry->lock);
out:
- unpin_inotify_watch(&chunk->watch);
+ fsnotify_put_mark(entry);
spin_lock(&hash_lock);
}
static int create_chunk(struct inode *inode, struct audit_tree *tree)
{
+ struct fsnotify_mark *entry;
struct audit_chunk *chunk = alloc_chunk(1);
if (!chunk)
return -ENOMEM;
- if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
+ entry = &chunk->mark;
+ if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
free_chunk(chunk);
return -ENOSPC;
}
- mutex_lock(&inode->inotify_mutex);
+ spin_lock(&entry->lock);
spin_lock(&hash_lock);
if (tree->goner) {
spin_unlock(&hash_lock);
chunk->dead = 1;
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
return 0;
}
chunk->owners[0].index = (1U << 31);
@@ -351,30 +347,31 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
}
insert_hash(chunk);
spin_unlock(&hash_lock);
- mutex_unlock(&inode->inotify_mutex);
+ spin_unlock(&entry->lock);
return 0;
}
/* the first tagged inode becomes root of tree */
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
{
- struct inotify_watch *watch;
+ struct fsnotify_mark *old_entry, *chunk_entry;
struct audit_tree *owner;
struct audit_chunk *chunk, *old;
struct node *p;
int n;
- if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
+ old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
+ if (!old_entry)
return create_chunk(inode, tree);
- old = container_of(watch, struct audit_chunk, watch);
+ old = container_of(old_entry, struct audit_chunk, mark);
/* are we already there? */
spin_lock(&hash_lock);
for (n = 0; n < old->count; n++) {
if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
- put_inotify_watch(&old->watch);
+ fsnotify_put_mark(old_entry);
return 0;
}
}
@@ -382,25 +379,44 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk = alloc_chunk(old->count + 1);
if (!chunk) {
- put_inotify_watch(&old->watch);
+ fsnotify_put_mark(old_entry);
return -ENOMEM;
}
- mutex_lock(&inode->inotify_mutex);
- if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch);
+ chunk_entry = &chunk->mark;
+
+ spin_lock(&old_entry->lock);
+ if (!old_entry->i.inode) {
+ /* old_entry is being shot, lets just lie */
+ spin_unlock(&old_entry->lock);
+ fsnotify_put_mark(old_entry);
free_chunk(chunk);
+ return -ENOENT;
+ }
+
+ fsnotify_duplicate_mark(chunk_entry, old_entry);
+ if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
+ spin_unlock(&old_entry->lock);
+ free_chunk(chunk);
+ fsnotify_put_mark(old_entry);
return -ENOSPC;
}
+
+ /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
+ spin_lock(&chunk_entry->lock);
spin_lock(&hash_lock);
+
+ /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
if (tree->goner) {
spin_unlock(&hash_lock);
chunk->dead = 1;
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+
+ fsnotify_destroy_mark(chunk_entry);
+
+ fsnotify_put_mark(chunk_entry);
+ fsnotify_put_mark(old_entry);
return 0;
}
list_replace_init(&old->trees, &chunk->trees);
@@ -426,10 +442,11 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
list_add(&tree->same_root, &chunk->trees);
}
spin_unlock(&hash_lock);
- inotify_evict_watch(&old->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
- put_inotify_watch(&old->watch); /* and kill it */
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+ fsnotify_destroy_mark(old_entry);
+ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
+ fsnotify_put_mark(old_entry); /* and kill it */
return 0;
}
@@ -584,7 +601,9 @@ void audit_trim_trees(void)
spin_lock(&hash_lock);
list_for_each_entry(node, &tree->chunks, list) {
- struct inode *inode = find_chunk(node)->watch.inode;
+ struct audit_chunk *chunk = find_chunk(node);
+ /* this could be NULL if the watch is dieing else where... */
+ struct inode *inode = chunk->mark.i.inode;
node->index |= 1U<<31;
if (iterate_mounts(compare_root, inode, root_mnt))
node->index &= ~(1U<<31);
@@ -846,7 +865,6 @@ void audit_kill_trees(struct list_head *list)
* Here comes the stuff asynchronous to auditctl operations
*/
-/* inode->inotify_mutex is locked */
static void evict_chunk(struct audit_chunk *chunk)
{
struct audit_tree *owner;
@@ -885,35 +903,42 @@ static void evict_chunk(struct audit_chunk *chunk)
mutex_unlock(&audit_filter_mutex);
}
-static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
- u32 cookie, const char *dname, struct inode *inode)
+static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
- struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
+ BUG();
+ return -EOPNOTSUPP;
+}
- if (mask & IN_IGNORED) {
- evict_chunk(chunk);
- put_inotify_watch(watch);
- }
+static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
+{
+ struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+
+ evict_chunk(chunk);
+ fsnotify_put_mark(entry);
}
-static void destroy_watch(struct inotify_watch *watch)
+static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type)
{
- struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
- call_rcu(&chunk->head, __put_chunk);
+ return 0;
}
-static const struct inotify_operations rtree_inotify_ops = {
- .handle_event = handle_event,
- .destroy_watch = destroy_watch,
+static const struct fsnotify_ops audit_tree_ops = {
+ .handle_event = audit_tree_handle_event,
+ .should_send_event = audit_tree_send_event,
+ .free_group_priv = NULL,
+ .free_event_priv = NULL,
+ .freeing_mark = audit_tree_freeing_mark,
};
static int __init audit_tree_init(void)
{
int i;
- rtree_ih = inotify_init(&rtree_inotify_ops);
- if (IS_ERR(rtree_ih))
- audit_panic("cannot initialize inotify handle for rectree watches");
+ audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
+ if (IS_ERR(audit_tree_group))
+ audit_panic("cannot initialize fsnotify group for rectree watches");
for (i = 0; i < HASH_SIZE; i++)
INIT_LIST_HEAD(&chunk_hash_heads[i]);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 8df43696f4ba..7499397a6100 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -24,18 +24,18 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/fs.h>
+#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/netlink.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/inotify.h>
#include <linux/security.h>
#include "audit.h"
/*
* Reference counting:
*
- * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED
+ * audit_parent: lifetime is from audit_init_parent() to receipt of an FS_IGNORED
* event. Each audit_watch holds a reference to its associated parent.
*
* audit_watch: if added to lists, lifetime is from audit_init_watch() to
@@ -51,40 +51,61 @@ struct audit_watch {
unsigned long ino; /* associated inode number */
struct audit_parent *parent; /* associated parent */
struct list_head wlist; /* entry in parent->watches list */
- struct list_head rules; /* associated rules */
+ struct list_head rules; /* anchor for krule->rlist */
};
struct audit_parent {
- struct list_head ilist; /* entry in inotify registration list */
- struct list_head watches; /* associated watches */
- struct inotify_watch wdata; /* inotify watch data */
- unsigned flags; /* status flags */
+ struct list_head watches; /* anchor for audit_watch->wlist */
+ struct fsnotify_mark mark; /* fsnotify mark on the inode */
};
-/* Inotify handle. */
-struct inotify_handle *audit_ih;
+/* fsnotify handle. */
+struct fsnotify_group *audit_watch_group;
-/*
- * audit_parent status flags:
- *
- * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to
- * a filesystem event to ensure we're adding audit watches to a valid parent.
- * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot
- * receive them while we have nameidata, but must be used for IN_MOVE_SELF which
- * we can receive while holding nameidata.
- */
-#define AUDIT_PARENT_INVALID 0x001
+/* fsnotify events we care about. */
+#define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\
+ FS_MOVE_SELF | FS_EVENT_ON_CHILD)
-/* Inotify events we care about. */
-#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
+static void audit_free_parent(struct audit_parent *parent)
+{
+ WARN_ON(!list_empty(&parent->watches));
+ kfree(parent);
+}
-static void audit_free_parent(struct inotify_watch *i_watch)
+static void audit_watch_free_mark(struct fsnotify_mark *entry)
{
struct audit_parent *parent;
- parent = container_of(i_watch, struct audit_parent, wdata);
- WARN_ON(!list_empty(&parent->watches));
- kfree(parent);
+ parent = container_of(entry, struct audit_parent, mark);
+ audit_free_parent(parent);
+}
+
+static void audit_get_parent(struct audit_parent *parent)
+{
+ if (likely(parent))
+ fsnotify_get_mark(&parent->mark);
+}
+
+static void audit_put_parent(struct audit_parent *parent)
+{
+ if (likely(parent))
+ fsnotify_put_mark(&parent->mark);
+}
+
+/*
+ * Find and return the audit_parent on the given inode. If found a reference
+ * is taken on this parent.
+ */
+static inline struct audit_parent *audit_find_parent(struct inode *inode)
+{
+ struct audit_parent *parent = NULL;
+ struct fsnotify_mark *entry;
+
+ entry = fsnotify_find_inode_mark(audit_watch_group, inode);
+ if (entry)
+ parent = container_of(entry, struct audit_parent, mark);
+
+ return parent;
}
void audit_get_watch(struct audit_watch *watch)
@@ -105,7 +126,7 @@ void audit_put_watch(struct audit_watch *watch)
void audit_remove_watch(struct audit_watch *watch)
{
list_del(&watch->wlist);
- put_inotify_watch(&watch->parent->wdata);
+ audit_put_parent(watch->parent);
watch->parent = NULL;
audit_put_watch(watch); /* match initial get */
}
@@ -115,44 +136,36 @@ char *audit_watch_path(struct audit_watch *watch)
return watch->path;
}
-struct list_head *audit_watch_rules(struct audit_watch *watch)
+int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev)
{
- return &watch->rules;
-}
-
-unsigned long audit_watch_inode(struct audit_watch *watch)
-{
- return watch->ino;
-}
-
-dev_t audit_watch_dev(struct audit_watch *watch)
-{
- return watch->dev;
+ return (watch->ino != (unsigned long)-1) &&
+ (watch->ino == ino) &&
+ (watch->dev == dev);
}
/* Initialize a parent watch entry. */
static struct audit_parent *audit_init_parent(struct nameidata *ndp)
{
+ struct inode *inode = ndp->path.dentry->d_inode;
struct audit_parent *parent;
- s32 wd;
+ int ret;
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
if (unlikely(!parent))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&parent->watches);
- parent->flags = 0;
-
- inotify_init_watch(&parent->wdata);
- /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */
- get_inotify_watch(&parent->wdata);
- wd = inotify_add_watch(audit_ih, &parent->wdata,
- ndp->path.dentry->d_inode, AUDIT_IN_WATCH);
- if (wd < 0) {
- audit_free_parent(&parent->wdata);
- return ERR_PTR(wd);
+
+ fsnotify_init_mark(&parent->mark, audit_watch_free_mark);
+ parent->mark.mask = AUDIT_FS_WATCH;
+ ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode, NULL, 0);
+ if (ret < 0) {
+ audit_free_parent(parent);
+ return ERR_PTR(ret);
}
+ fsnotify_recalc_group_mask(audit_watch_group);
+
return parent;
}
@@ -179,7 +192,7 @@ int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op)
{
struct audit_watch *watch;
- if (!audit_ih)
+ if (!audit_watch_group)
return -EOPNOTSUPP;
if (path[0] != '/' || path[len-1] == '/' ||
@@ -217,7 +230,7 @@ static struct audit_watch *audit_dupe_watch(struct audit_watch *old)
new->dev = old->dev;
new->ino = old->ino;
- get_inotify_watch(&old->parent->wdata);
+ audit_get_parent(old->parent);
new->parent = old->parent;
out:
@@ -251,15 +264,19 @@ static void audit_update_watch(struct audit_parent *parent,
struct audit_entry *oentry, *nentry;
mutex_lock(&audit_filter_mutex);
+ /* Run all of the watches on this parent looking for the one that
+ * matches the given dname */
list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) {
if (audit_compare_dname_path(dname, owatch->path, NULL))
continue;
/* If the update involves invalidating rules, do the inode-based
* filtering now, so we don't omit records. */
- if (invalidating && current->audit_context)
+ if (invalidating && !audit_dummy_context())
audit_filter_inodes(current, current->audit_context);
+ /* updating ino will likely change which audit_hash_list we
+ * are on so we need a new watch for the new list */
nwatch = audit_dupe_watch(owatch);
if (IS_ERR(nwatch)) {
mutex_unlock(&audit_filter_mutex);
@@ -275,12 +292,21 @@ static void audit_update_watch(struct audit_parent *parent,
list_del(&oentry->rule.rlist);
list_del_rcu(&oentry->list);
- nentry = audit_dupe_rule(&oentry->rule, nwatch);
+ nentry = audit_dupe_rule(&oentry->rule);
if (IS_ERR(nentry)) {
list_del(&oentry->rule.list);
audit_panic("error updating watch, removing");
} else {
int h = audit_hash_ino((u32)ino);
+
+ /*
+ * nentry->rule.watch == oentry->rule.watch so
+ * we must drop that reference and set it to our
+ * new watch.
+ */
+ audit_put_watch(nentry->rule.watch);
+ audit_get_watch(nwatch);
+ nentry->rule.watch = nwatch;
list_add(&nentry->rule.rlist, &nwatch->rules);
list_add_rcu(&nentry->list, &audit_inode_hash[h]);
list_replace(&oentry->rule.list,
@@ -312,7 +338,6 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
struct audit_entry *e;
mutex_lock(&audit_filter_mutex);
- parent->flags |= AUDIT_PARENT_INVALID;
list_for_each_entry_safe(w, nextw, &parent->watches, wlist) {
list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
e = container_of(r, struct audit_entry, rule);
@@ -325,20 +350,11 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
audit_remove_watch(w);
}
mutex_unlock(&audit_filter_mutex);
-}
-/* Unregister inotify watches for parents on in_list.
- * Generates an IN_IGNORED event. */
-void audit_inotify_unregister(struct list_head *in_list)
-{
- struct audit_parent *p, *n;
+ fsnotify_destroy_mark(&parent->mark);
+
+ fsnotify_recalc_group_mask(audit_watch_group);
- list_for_each_entry_safe(p, n, in_list, ilist) {
- list_del(&p->ilist);
- inotify_rm_watch(audit_ih, &p->wdata);
- /* the unpin matching the pin in audit_do_del_rule() */
- unpin_inotify_watch(&p->wdata);
- }
}
/* Get path information necessary for adding watches. */
@@ -389,7 +405,7 @@ static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
}
}
-/* Associate the given rule with an existing parent inotify_watch.
+/* Associate the given rule with an existing parent.
* Caller must hold audit_filter_mutex. */
static void audit_add_to_parent(struct audit_krule *krule,
struct audit_parent *parent)
@@ -397,6 +413,8 @@ static void audit_add_to_parent(struct audit_krule *krule,
struct audit_watch *w, *watch = krule->watch;
int watch_found = 0;
+ BUG_ON(!mutex_is_locked(&audit_filter_mutex));
+
list_for_each_entry(w, &parent->watches, wlist) {
if (strcmp(watch->path, w->path))
continue;
@@ -413,7 +431,7 @@ static void audit_add_to_parent(struct audit_krule *krule,
}
if (!watch_found) {
- get_inotify_watch(&parent->wdata);
+ audit_get_parent(parent);
watch->parent = parent;
list_add(&watch->wlist, &parent->watches);
@@ -423,13 +441,12 @@ static void audit_add_to_parent(struct audit_krule *krule,
/* Find a matching watch entry, or add this one.
* Caller must hold audit_filter_mutex. */
-int audit_add_watch(struct audit_krule *krule)
+int audit_add_watch(struct audit_krule *krule, struct list_head **list)
{
struct audit_watch *watch = krule->watch;
- struct inotify_watch *i_watch;
struct audit_parent *parent;
struct nameidata *ndp = NULL, *ndw = NULL;
- int ret = 0;
+ int h, ret = 0;
mutex_unlock(&audit_filter_mutex);
@@ -441,47 +458,38 @@ int audit_add_watch(struct audit_krule *krule)
goto error;
}
+ mutex_lock(&audit_filter_mutex);
+
/* update watch filter fields */
if (ndw) {
watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
watch->ino = ndw->path.dentry->d_inode->i_ino;
}
- /* The audit_filter_mutex must not be held during inotify calls because
- * we hold it during inotify event callback processing. If an existing
- * inotify watch is found, inotify_find_watch() grabs a reference before
- * returning.
- */
- if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode,
- &i_watch) < 0) {
+ /* either find an old parent or attach a new one */
+ parent = audit_find_parent(ndp->path.dentry->d_inode);
+ if (!parent) {
parent = audit_init_parent(ndp);
if (IS_ERR(parent)) {
- /* caller expects mutex locked */
- mutex_lock(&audit_filter_mutex);
ret = PTR_ERR(parent);
goto error;
}
- } else
- parent = container_of(i_watch, struct audit_parent, wdata);
-
- mutex_lock(&audit_filter_mutex);
+ }
- /* parent was moved before we took audit_filter_mutex */
- if (parent->flags & AUDIT_PARENT_INVALID)
- ret = -ENOENT;
- else
- audit_add_to_parent(krule, parent);
+ audit_add_to_parent(krule, parent);
- /* match get in audit_init_parent or inotify_find_watch */
- put_inotify_watch(&parent->wdata);
+ /* match get in audit_find_parent or audit_init_parent */
+ audit_put_parent(parent);
+ h = audit_hash_ino((u32)watch->ino);
+ *list = &audit_inode_hash[h];
error:
audit_put_nd(ndp, ndw); /* NULL args OK */
return ret;
}
-void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
+void audit_remove_watch_rule(struct audit_krule *krule)
{
struct audit_watch *watch = krule->watch;
struct audit_parent *parent = watch->parent;
@@ -492,53 +500,92 @@ void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
audit_remove_watch(watch);
if (list_empty(&parent->watches)) {
- /* Put parent on the inotify un-registration
- * list. Grab a reference before releasing
- * audit_filter_mutex, to be released in
- * audit_inotify_unregister().
- * If filesystem is going away, just leave
- * the sucker alone, eviction will take
- * care of it. */
- if (pin_inotify_watch(&parent->wdata))
- list_add(&parent->ilist, list);
+ audit_get_parent(parent);
+ fsnotify_destroy_mark(&parent->mark);
+ audit_put_parent(parent);
}
}
+
+ fsnotify_recalc_group_mask(audit_watch_group);
+
}
-/* Update watch data in audit rules based on inotify events. */
-static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask,
- u32 cookie, const char *dname, struct inode *inode)
+static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type)
{
+ struct fsnotify_mark *entry;
+ bool send;
+
+ entry = fsnotify_find_inode_mark(group, inode);
+ if (!entry)
+ return false;
+
+ mask = (mask & ~FS_EVENT_ON_CHILD);
+ send = (entry->mask & mask);
+
+ /* find took a reference */
+ fsnotify_put_mark(entry);
+
+ return send;
+}
+
+/* Update watch data in audit rules based on fsnotify events. */
+static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
+{
+ struct inode *inode;
+ __u32 mask = event->mask;
+ const char *dname = event->file_name;
struct audit_parent *parent;
- parent = container_of(i_watch, struct audit_parent, wdata);
+ BUG_ON(group != audit_watch_group);
- if (mask & (IN_CREATE|IN_MOVED_TO) && inode)
- audit_update_watch(parent, dname, inode->i_sb->s_dev,
- inode->i_ino, 0);
- else if (mask & (IN_DELETE|IN_MOVED_FROM))
+ parent = audit_find_parent(event->to_tell);
+ if (unlikely(!parent))
+ return 0;
+
+ switch (event->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
+ inode = event->path.dentry->d_inode;
+ break;
+ case (FSNOTIFY_EVENT_INODE):
+ inode = event->inode;
+ break;
+ default:
+ BUG();
+ inode = NULL;
+ break;
+ };
+
+ if (mask & (FS_CREATE|FS_MOVED_TO) && inode)
+ audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0);
+ else if (mask & (FS_DELETE|FS_MOVED_FROM))
audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
- /* inotify automatically removes the watch and sends IN_IGNORED */
- else if (mask & (IN_DELETE_SELF|IN_UNMOUNT))
+ else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF))
audit_remove_parent_watches(parent);
- /* inotify does not remove the watch, so remove it manually */
- else if(mask & IN_MOVE_SELF) {
- audit_remove_parent_watches(parent);
- inotify_remove_watch_locked(audit_ih, i_watch);
- } else if (mask & IN_IGNORED)
- put_inotify_watch(i_watch);
+ /* moved put_inotify_watch to freeing mark */
+
+ /* matched the ref taken by audit_find_parent */
+ audit_put_parent(parent);
+
+ return 0;
}
-static const struct inotify_operations audit_inotify_ops = {
- .handle_event = audit_handle_ievent,
- .destroy_watch = audit_free_parent,
+static const struct fsnotify_ops audit_watch_fsnotify_ops = {
+ .should_send_event = audit_watch_should_send_event,
+ .handle_event = audit_watch_handle_event,
+ .free_group_priv = NULL,
+ .freeing_mark = NULL,
+ .free_event_priv = NULL,
};
static int __init audit_watch_init(void)
{
- audit_ih = inotify_init(&audit_inotify_ops);
- if (IS_ERR(audit_ih))
- audit_panic("cannot initialize inotify handle");
+ audit_watch_group = fsnotify_alloc_group(&audit_watch_fsnotify_ops);
+ if (IS_ERR(audit_watch_group)) {
+ audit_watch_group = NULL;
+ audit_panic("cannot create audit fsnotify group");
+ }
return 0;
}
-subsys_initcall(audit_watch_init);
+device_initcall(audit_watch_init);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index ce08041f578d..eb7675499fb5 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -71,6 +71,7 @@ static inline void audit_free_rule(struct audit_entry *e)
{
int i;
struct audit_krule *erule = &e->rule;
+
/* some rules don't have associated watches */
if (erule->watch)
audit_put_watch(erule->watch);
@@ -746,8 +747,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
* rule with the new rule in the filterlist, then free the old rule.
* The rlist element is undefined; list manipulations are handled apart from
* the initial copy. */
-struct audit_entry *audit_dupe_rule(struct audit_krule *old,
- struct audit_watch *watch)
+struct audit_entry *audit_dupe_rule(struct audit_krule *old)
{
u32 fcount = old->field_count;
struct audit_entry *entry;
@@ -769,8 +769,8 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
new->prio = old->prio;
new->buflen = old->buflen;
new->inode_f = old->inode_f;
- new->watch = NULL;
new->field_count = old->field_count;
+
/*
* note that we are OK with not refcounting here; audit_match_tree()
* never dereferences tree and we can't get false positives there
@@ -811,9 +811,9 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
}
}
- if (watch) {
- audit_get_watch(watch);
- new->watch = watch;
+ if (old->watch) {
+ audit_get_watch(old->watch);
+ new->watch = old->watch;
}
return entry;
@@ -866,7 +866,7 @@ static inline int audit_add_rule(struct audit_entry *entry)
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
- int h, err;
+ int err;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
@@ -889,15 +889,11 @@ static inline int audit_add_rule(struct audit_entry *entry)
if (watch) {
/* audit_filter_mutex is dropped and re-taken during this call */
- err = audit_add_watch(&entry->rule);
+ err = audit_add_watch(&entry->rule, &list);
if (err) {
mutex_unlock(&audit_filter_mutex);
goto error;
}
- /* entry->rule.watch may have changed during audit_add_watch() */
- watch = entry->rule.watch;
- h = audit_hash_ino((u32)audit_watch_inode(watch));
- list = &audit_inode_hash[h];
}
if (tree) {
err = audit_add_tree_rule(&entry->rule);
@@ -949,7 +945,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
- LIST_HEAD(inotify_list);
int ret = 0;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
@@ -969,7 +964,7 @@ static inline int audit_del_rule(struct audit_entry *entry)
}
if (e->rule.watch)
- audit_remove_watch_rule(&e->rule, &inotify_list);
+ audit_remove_watch_rule(&e->rule);
if (e->rule.tree)
audit_remove_tree_rule(&e->rule);
@@ -987,9 +982,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
#endif
mutex_unlock(&audit_filter_mutex);
- if (!list_empty(&inotify_list))
- audit_inotify_unregister(&inotify_list);
-
out:
if (watch)
audit_put_watch(watch); /* match initial get */
@@ -1323,30 +1315,23 @@ static int update_lsm_rule(struct audit_krule *r)
{
struct audit_entry *entry = container_of(r, struct audit_entry, rule);
struct audit_entry *nentry;
- struct audit_watch *watch;
- struct audit_tree *tree;
int err = 0;
if (!security_audit_rule_known(r))
return 0;
- watch = r->watch;
- tree = r->tree;
- nentry = audit_dupe_rule(r, watch);
+ nentry = audit_dupe_rule(r);
if (IS_ERR(nentry)) {
/* save the first error encountered for the
* return value */
err = PTR_ERR(nentry);
audit_panic("error updating LSM filters");
- if (watch)
+ if (r->watch)
list_del(&r->rlist);
list_del_rcu(&entry->list);
list_del(&r->list);
} else {
- if (watch) {
- list_add(&nentry->rule.rlist, audit_watch_rules(watch));
- list_del(&r->rlist);
- } else if (tree)
+ if (r->watch || r->tree)
list_replace_init(&r->rlist, &nentry->rule.rlist);
list_replace_rcu(&entry->list, &nentry->list);
list_replace(&r->list, &nentry->rule.list);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 3828ad5fb8f1..b87a63beb66c 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -65,7 +65,6 @@
#include <linux/binfmts.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
-#include <linux/inotify.h>
#include <linux/capability.h>
#include <linux/fs_struct.h>
@@ -549,9 +548,8 @@ static int audit_filter_rules(struct task_struct *tsk,
}
break;
case AUDIT_WATCH:
- if (name && audit_watch_inode(rule->watch) != (unsigned long)-1)
- result = (name->dev == audit_watch_dev(rule->watch) &&
- name->ino == audit_watch_inode(rule->watch));
+ if (name)
+ result = audit_watch_compare(rule->watch, name->ino, name->dev);
break;
case AUDIT_DIR:
if (ctx)
@@ -1726,7 +1724,7 @@ static inline void handle_one(const struct inode *inode)
struct audit_tree_refs *p;
struct audit_chunk *chunk;
int count;
- if (likely(list_empty(&inode->inotify_watches)))
+ if (likely(hlist_empty(&inode->i_fsnotify_marks)))
return;
context = current->audit_context;
p = context->trees;
@@ -1769,7 +1767,7 @@ retry:
seq = read_seqbegin(&rename_lock);
for(;;) {
struct inode *inode = d->d_inode;
- if (inode && unlikely(!list_empty(&inode->inotify_watches))) {
+ if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) {
struct audit_chunk *chunk;
chunk = audit_tree_lookup(inode);
if (chunk) {
diff --git a/kernel/capability.c b/kernel/capability.c
index 9e4697e9b276..2f05303715a5 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -15,7 +15,6 @@
#include <linux/syscalls.h>
#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
-#include "cred-internals.h"
/*
* Leveraged for setting/resetting capabilities
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3a53c771e503..b3bfa7424977 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -3615,7 +3615,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
* @ss: the subsystem to load
*
* This function should be called in a modular subsystem's initcall. If the
- * subsytem is built as a module, it will be assigned a new subsys_id and set
+ * subsystem is built as a module, it will be assigned a new subsys_id and set
* up for use. If the subsystem is built-in anyway, work is delegated to the
* simpler cgroup_init_subsys.
*/
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e5c0244962b0..ce71ed53e88f 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -89,10 +89,10 @@ struct cgroup_subsys freezer_subsys;
/* Locks taken and their ordering
* ------------------------------
- * css_set_lock
* cgroup_mutex (AKA cgroup_lock)
- * task->alloc_lock (AKA task_lock)
* freezer->lock
+ * css_set_lock
+ * task->alloc_lock (AKA task_lock)
* task->sighand->siglock
*
* cgroup code forces css_set_lock to be taken before task->alloc_lock
@@ -100,33 +100,38 @@ struct cgroup_subsys freezer_subsys;
* freezer_create(), freezer_destroy():
* cgroup_mutex [ by cgroup core ]
*
- * can_attach():
- * cgroup_mutex
+ * freezer_can_attach():
+ * cgroup_mutex (held by caller of can_attach)
*
- * cgroup_frozen():
+ * cgroup_freezing_or_frozen():
* task->alloc_lock (to get task's cgroup)
*
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
- * task->alloc_lock (to get task's cgroup)
* freezer->lock
* sighand->siglock (if the cgroup is freezing)
*
* freezer_read():
* cgroup_mutex
* freezer->lock
+ * write_lock css_set_lock (cgroup iterator start)
+ * task->alloc_lock
* read_lock css_set_lock (cgroup iterator start)
*
* freezer_write() (freeze):
* cgroup_mutex
* freezer->lock
+ * write_lock css_set_lock (cgroup iterator start)
+ * task->alloc_lock
* read_lock css_set_lock (cgroup iterator start)
- * sighand->siglock
+ * sighand->siglock (fake signal delivery inside freeze_task())
*
* freezer_write() (unfreeze):
* cgroup_mutex
* freezer->lock
+ * write_lock css_set_lock (cgroup iterator start)
+ * task->alloc_lock
* read_lock css_set_lock (cgroup iterator start)
- * task->alloc_lock (to prevent races with freeze_task())
+ * task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
* sighand->siglock
*/
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 25bba73b1be3..545777574779 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -164,6 +164,7 @@ static inline void check_for_tasks(int cpu)
}
struct take_cpu_down_param {
+ struct task_struct *caller;
unsigned long mod;
void *hcpu;
};
@@ -172,6 +173,7 @@ struct take_cpu_down_param {
static int __ref take_cpu_down(void *_param)
{
struct take_cpu_down_param *param = _param;
+ unsigned int cpu = (unsigned long)param->hcpu;
int err;
/* Ensure this CPU doesn't handle any more interrupts. */
@@ -182,6 +184,8 @@ static int __ref take_cpu_down(void *_param)
raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
param->hcpu);
+ if (task_cpu(param->caller) == cpu)
+ move_task_off_dead_cpu(cpu, param->caller);
/* Force idle task to run as soon as we yield: it should
immediately notice cpu is offline and die quickly. */
sched_idle_next();
@@ -192,10 +196,10 @@ static int __ref take_cpu_down(void *_param)
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
int err, nr_calls = 0;
- cpumask_var_t old_allowed;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
+ .caller = current,
.mod = mod,
.hcpu = hcpu,
};
@@ -206,9 +210,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
if (!cpu_online(cpu))
return -EINVAL;
- if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
- return -ENOMEM;
-
cpu_hotplug_begin();
set_cpu_active(cpu, false);
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
@@ -225,10 +226,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
goto out_release;
}
- /* Ensure that we are not runnable on dying cpu */
- cpumask_copy(old_allowed, &current->cpus_allowed);
- set_cpus_allowed_ptr(current, cpu_active_mask);
-
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
set_cpu_active(cpu, true);
@@ -237,7 +234,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
hcpu) == NOTIFY_BAD)
BUG();
- goto out_allowed;
+ goto out_release;
}
BUG_ON(cpu_online(cpu));
@@ -255,8 +252,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
check_for_tasks(cpu);
-out_allowed:
- set_cpus_allowed_ptr(current, old_allowed);
out_release:
cpu_hotplug_done();
if (!err) {
@@ -264,7 +259,6 @@ out_release:
hcpu) == NOTIFY_BAD)
BUG();
}
- free_cpumask_var(old_allowed);
return err;
}
@@ -272,9 +266,6 @@ int __ref cpu_down(unsigned int cpu)
{
int err;
- err = stop_machine_create();
- if (err)
- return err;
cpu_maps_update_begin();
if (cpu_hotplug_disabled) {
@@ -286,7 +277,6 @@ int __ref cpu_down(unsigned int cpu)
out:
cpu_maps_update_done();
- stop_machine_destroy();
return err;
}
EXPORT_SYMBOL(cpu_down);
@@ -367,9 +357,6 @@ int disable_nonboot_cpus(void)
{
int cpu, first_cpu, error;
- error = stop_machine_create();
- if (error)
- return error;
cpu_maps_update_begin();
first_cpu = cpumask_first(cpu_online_mask);
/*
@@ -400,7 +387,6 @@ int disable_nonboot_cpus(void)
printk(KERN_ERR "Non-boot CPUs are not disabled\n");
}
cpu_maps_update_done();
- stop_machine_destroy();
return error;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d10946748ec2..9a50c5f6e727 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2182,19 +2182,52 @@ void __init cpuset_init_smp(void)
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
mutex_lock(&callback_mutex);
- cpuset_cpus_allowed_locked(tsk, pmask);
+ task_lock(tsk);
+ guarantee_online_cpus(task_cs(tsk), pmask);
+ task_unlock(tsk);
mutex_unlock(&callback_mutex);
}
-/**
- * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
- * Must be called with callback_mutex held.
- **/
-void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
+int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
- task_lock(tsk);
- guarantee_online_cpus(task_cs(tsk), pmask);
- task_unlock(tsk);
+ const struct cpuset *cs;
+ int cpu;
+
+ rcu_read_lock();
+ cs = task_cs(tsk);
+ if (cs)
+ cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
+ rcu_read_unlock();
+
+ /*
+ * We own tsk->cpus_allowed, nobody can change it under us.
+ *
+ * But we used cs && cs->cpus_allowed lockless and thus can
+ * race with cgroup_attach_task() or update_cpumask() and get
+ * the wrong tsk->cpus_allowed. However, both cases imply the
+ * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
+ * which takes task_rq_lock().
+ *
+ * If we are called after it dropped the lock we must see all
+ * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
+ * set any mask even if it is not right from task_cs() pov,
+ * the pending set_cpus_allowed_ptr() will fix things.
+ */
+
+ cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
+ if (cpu >= nr_cpu_ids) {
+ /*
+ * Either tsk->cpus_allowed is wrong (see above) or it
+ * is actually empty. The latter case is only possible
+ * if we are racing with remove_tasks_in_empty_cpuset().
+ * Like above we can temporary set any mask and rely on
+ * set_cpus_allowed_ptr() as synchronization point.
+ */
+ cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
+ cpu = cpumask_any(cpu_active_mask);
+ }
+
+ return cpu;
}
void cpuset_init_current_mems_allowed(void)
@@ -2383,22 +2416,6 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
}
/**
- * cpuset_lock - lock out any changes to cpuset structures
- *
- * The out of memory (oom) code needs to mutex_lock cpusets
- * from being changed while it scans the tasklist looking for a
- * task in an overlapping cpuset. Expose callback_mutex via this
- * cpuset_lock() routine, so the oom code can lock it, before
- * locking the task list. The tasklist_lock is a spinlock, so
- * must be taken inside callback_mutex.
- */
-
-void cpuset_lock(void)
-{
- mutex_lock(&callback_mutex);
-}
-
-/**
* cpuset_unlock - release lock on cpuset changes
*
* Undo the lock taken in a previous cpuset_lock() call.
diff --git a/kernel/cred-internals.h b/kernel/cred-internals.h
deleted file mode 100644
index 2dc4fc2d0bf1..000000000000
--- a/kernel/cred-internals.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Internal credentials stuff
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-/*
- * user.c
- */
-static inline void sched_switch_user(struct task_struct *p)
-{
-#ifdef CONFIG_USER_SCHED
- sched_move_task(p);
-#endif /* CONFIG_USER_SCHED */
-}
-
diff --git a/kernel/cred.c b/kernel/cred.c
index 62af1816c235..2c24870c55d1 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -17,7 +17,6 @@
#include <linux/init_task.h>
#include <linux/security.h>
#include <linux/cn_proc.h>
-#include "cred-internals.h"
#if 0
#define kdebug(FMT, ...) \
@@ -523,8 +522,6 @@ int commit_creds(struct cred *new)
#endif
BUG_ON(atomic_read(&new->usage) < 1);
- security_commit_creds(new, old);
-
get_cred(new); /* we will require a ref for the subj creds too */
/* dumpability changes */
@@ -560,8 +557,6 @@ int commit_creds(struct cred *new)
atomic_dec(&old->user->processes);
alter_cred_subscribers(old, -2);
- sched_switch_user(task);
-
/* send notifications */
if (new->uid != old->uid ||
new->euid != old->euid ||
diff --git a/kernel/debug/Makefile b/kernel/debug/Makefile
new file mode 100644
index 000000000000..a85edc339985
--- /dev/null
+++ b/kernel/debug/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the linux kernel debugger
+#
+
+obj-$(CONFIG_KGDB) += debug_core.o gdbstub.o
+obj-$(CONFIG_KGDB_KDB) += kdb/
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
new file mode 100644
index 000000000000..6f27324577dc
--- /dev/null
+++ b/kernel/debug/debug_core.c
@@ -0,0 +1,982 @@
+/*
+ * Kernel Debug Core
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Timesys Corporation
+ * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
+ * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2005-2009 Wind River Systems, Inc.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Contributors at various stages not listed above:
+ * Jason Wessel ( jason.wessel@windriver.com )
+ * George Anzinger <george@mvista.com>
+ * Anurekh Saxena (anurekh.saxena@timesys.com)
+ * Lake Stevens Instrument Division (Glenn Engel)
+ * Jim Kingdon, Cygnus Support.
+ *
+ * Original KGDB stub: David Grothe <dave@gcom.com>,
+ * Tigran Aivazian <tigran@sco.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#include <linux/pid_namespace.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/console.h>
+#include <linux/threads.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/init.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/pid.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include "debug_core.h"
+
+static int kgdb_break_asap;
+
+struct debuggerinfo_struct kgdb_info[NR_CPUS];
+
+/**
+ * kgdb_connected - Is a host GDB connected to us?
+ */
+int kgdb_connected;
+EXPORT_SYMBOL_GPL(kgdb_connected);
+
+/* All the KGDB handlers are installed */
+int kgdb_io_module_registered;
+
+/* Guard for recursive entry */
+static int exception_level;
+
+struct kgdb_io *dbg_io_ops;
+static DEFINE_SPINLOCK(kgdb_registration_lock);
+
+/* kgdb console driver is loaded */
+static int kgdb_con_registered;
+/* determine if kgdb console output should be used */
+static int kgdb_use_con;
+/* Flag for alternate operations for early debugging */
+bool dbg_is_early = true;
+/* Next cpu to become the master debug core */
+int dbg_switch_cpu;
+
+/* Use kdb or gdbserver mode */
+int dbg_kdb_mode = 1;
+
+static int __init opt_kgdb_con(char *str)
+{
+ kgdb_use_con = 1;
+ return 0;
+}
+
+early_param("kgdbcon", opt_kgdb_con);
+
+module_param(kgdb_use_con, int, 0644);
+
+/*
+ * Holds information about breakpoints in a kernel. These breakpoints are
+ * added and removed by gdb.
+ */
+static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
+ [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
+};
+
+/*
+ * The CPU# of the active CPU, or -1 if none:
+ */
+atomic_t kgdb_active = ATOMIC_INIT(-1);
+EXPORT_SYMBOL_GPL(kgdb_active);
+
+/*
+ * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
+ * bootup code (which might not have percpu set up yet):
+ */
+static atomic_t passive_cpu_wait[NR_CPUS];
+static atomic_t cpu_in_kgdb[NR_CPUS];
+static atomic_t kgdb_break_tasklet_var;
+atomic_t kgdb_setting_breakpoint;
+
+struct task_struct *kgdb_usethread;
+struct task_struct *kgdb_contthread;
+
+int kgdb_single_step;
+static pid_t kgdb_sstep_pid;
+
+/* to keep track of the CPU which is doing the single stepping*/
+atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+
+/*
+ * If you are debugging a problem where roundup (the collection of
+ * all other CPUs) is a problem [this should be extremely rare],
+ * then use the nokgdbroundup option to avoid roundup. In that case
+ * the other CPUs might interfere with your debugging context, so
+ * use this with care:
+ */
+static int kgdb_do_roundup = 1;
+
+static int __init opt_nokgdbroundup(char *str)
+{
+ kgdb_do_roundup = 0;
+
+ return 0;
+}
+
+early_param("nokgdbroundup", opt_nokgdbroundup);
+
+/*
+ * Finally, some KGDB code :-)
+ */
+
+/*
+ * Weak aliases for breakpoint management,
+ * can be overriden by architectures when needed:
+ */
+int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
+{
+ int err;
+
+ err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+
+ return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+{
+ return probe_kernel_write((char *)addr,
+ (char *)bundle, BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_validate_break_address(unsigned long addr)
+{
+ char tmp_variable[BREAK_INSTR_SIZE];
+ int err;
+ /* Validate setting the breakpoint and then removing it. In the
+ * remove fails, the kernel needs to emit a bad message because we
+ * are deep trouble not being able to put things back the way we
+ * found them.
+ */
+ err = kgdb_arch_set_breakpoint(addr, tmp_variable);
+ if (err)
+ return err;
+ err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
+ if (err)
+ printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
+ "memory destroyed at: %lx", addr);
+ return err;
+}
+
+unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int __weak kgdb_arch_init(void)
+{
+ return 0;
+}
+
+int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+ return 0;
+}
+
+/**
+ * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
+ * @regs: Current &struct pt_regs.
+ *
+ * This function will be called if the particular architecture must
+ * disable hardware debugging while it is processing gdb packets or
+ * handling exception.
+ */
+void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
+{
+}
+
+/*
+ * Some architectures need cache flushes when we set/clear a
+ * breakpoint:
+ */
+static void kgdb_flush_swbreak_addr(unsigned long addr)
+{
+ if (!CACHE_FLUSH_IS_SAFE)
+ return;
+
+ if (current->mm && current->mm->mmap_cache) {
+ flush_cache_range(current->mm->mmap_cache,
+ addr, addr + BREAK_INSTR_SIZE);
+ }
+ /* Force flush instruction cache if it was outside the mm */
+ flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+}
+
+/*
+ * SW breakpoint management:
+ */
+int dbg_activate_sw_breakpoints(void)
+{
+ unsigned long addr;
+ int error;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state != BP_SET)
+ continue;
+
+ addr = kgdb_break[i].bpt_addr;
+ error = kgdb_arch_set_breakpoint(addr,
+ kgdb_break[i].saved_instr);
+ if (error) {
+ ret = error;
+ printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+ continue;
+ }
+
+ kgdb_flush_swbreak_addr(addr);
+ kgdb_break[i].state = BP_ACTIVE;
+ }
+ return ret;
+}
+
+int dbg_set_sw_break(unsigned long addr)
+{
+ int err = kgdb_validate_break_address(addr);
+ int breakno = -1;
+ int i;
+
+ if (err)
+ return err;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if ((kgdb_break[i].state == BP_SET) &&
+ (kgdb_break[i].bpt_addr == addr))
+ return -EEXIST;
+ }
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state == BP_REMOVED &&
+ kgdb_break[i].bpt_addr == addr) {
+ breakno = i;
+ break;
+ }
+ }
+
+ if (breakno == -1) {
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state == BP_UNDEFINED) {
+ breakno = i;
+ break;
+ }
+ }
+ }
+
+ if (breakno == -1)
+ return -E2BIG;
+
+ kgdb_break[breakno].state = BP_SET;
+ kgdb_break[breakno].type = BP_BREAKPOINT;
+ kgdb_break[breakno].bpt_addr = addr;
+
+ return 0;
+}
+
+int dbg_deactivate_sw_breakpoints(void)
+{
+ unsigned long addr;
+ int error;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state != BP_ACTIVE)
+ continue;
+ addr = kgdb_break[i].bpt_addr;
+ error = kgdb_arch_remove_breakpoint(addr,
+ kgdb_break[i].saved_instr);
+ if (error) {
+ printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+ ret = error;
+ }
+
+ kgdb_flush_swbreak_addr(addr);
+ kgdb_break[i].state = BP_SET;
+ }
+ return ret;
+}
+
+int dbg_remove_sw_break(unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if ((kgdb_break[i].state == BP_SET) &&
+ (kgdb_break[i].bpt_addr == addr)) {
+ kgdb_break[i].state = BP_REMOVED;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int kgdb_isremovedbreak(unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if ((kgdb_break[i].state == BP_REMOVED) &&
+ (kgdb_break[i].bpt_addr == addr))
+ return 1;
+ }
+ return 0;
+}
+
+int dbg_remove_all_break(void)
+{
+ unsigned long addr;
+ int error;
+ int i;
+
+ /* Clear memory breakpoints. */
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state != BP_ACTIVE)
+ goto setundefined;
+ addr = kgdb_break[i].bpt_addr;
+ error = kgdb_arch_remove_breakpoint(addr,
+ kgdb_break[i].saved_instr);
+ if (error)
+ printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+ addr);
+setundefined:
+ kgdb_break[i].state = BP_UNDEFINED;
+ }
+
+ /* Clear hardware breakpoints. */
+ if (arch_kgdb_ops.remove_all_hw_break)
+ arch_kgdb_ops.remove_all_hw_break();
+
+ return 0;
+}
+
+/*
+ * Return true if there is a valid kgdb I/O module. Also if no
+ * debugger is attached a message can be printed to the console about
+ * waiting for the debugger to attach.
+ *
+ * The print_wait argument is only to be true when called from inside
+ * the core kgdb_handle_exception, because it will wait for the
+ * debugger to attach.
+ */
+static int kgdb_io_ready(int print_wait)
+{
+ if (!dbg_io_ops)
+ return 0;
+ if (kgdb_connected)
+ return 1;
+ if (atomic_read(&kgdb_setting_breakpoint))
+ return 1;
+ if (print_wait) {
+#ifdef CONFIG_KGDB_KDB
+ if (!dbg_kdb_mode)
+ printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+#else
+ printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+#endif
+ }
+ return 1;
+}
+
+static int kgdb_reenter_check(struct kgdb_state *ks)
+{
+ unsigned long addr;
+
+ if (atomic_read(&kgdb_active) != raw_smp_processor_id())
+ return 0;
+
+ /* Panic on recursive debugger calls: */
+ exception_level++;
+ addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+ dbg_deactivate_sw_breakpoints();
+
+ /*
+ * If the break point removed ok at the place exception
+ * occurred, try to recover and print a warning to the end
+ * user because the user planted a breakpoint in a place that
+ * KGDB needs in order to function.
+ */
+ if (dbg_remove_sw_break(addr) == 0) {
+ exception_level = 0;
+ kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+ dbg_activate_sw_breakpoints();
+ printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
+ addr);
+ WARN_ON_ONCE(1);
+
+ return 1;
+ }
+ dbg_remove_all_break();
+ kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+
+ if (exception_level > 1) {
+ dump_stack();
+ panic("Recursive entry to debugger");
+ }
+
+ printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+#ifdef CONFIG_KGDB_KDB
+ /* Allow kdb to debug itself one level */
+ return 0;
+#endif
+ dump_stack();
+ panic("Recursive entry to debugger");
+
+ return 1;
+}
+
+static void dbg_cpu_switch(int cpu, int next_cpu)
+{
+ /* Mark the cpu we are switching away from as a slave when it
+ * holds the kgdb_active token. This must be done so that the
+ * that all the cpus wait in for the debug core will not enter
+ * again as the master. */
+ if (cpu == atomic_read(&kgdb_active)) {
+ kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+ kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER;
+ }
+ kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER;
+}
+
+static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
+{
+ unsigned long flags;
+ int sstep_tries = 100;
+ int error;
+ int i, cpu;
+ int trace_on = 0;
+acquirelock:
+ /*
+ * Interrupts will be restored by the 'trap return' code, except when
+ * single stepping.
+ */
+ local_irq_save(flags);
+
+ cpu = ks->cpu;
+ kgdb_info[cpu].debuggerinfo = regs;
+ kgdb_info[cpu].task = current;
+ kgdb_info[cpu].ret_state = 0;
+ kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
+ /*
+ * Make sure the above info reaches the primary CPU before
+ * our cpu_in_kgdb[] flag setting does:
+ */
+ atomic_inc(&cpu_in_kgdb[cpu]);
+
+ if (exception_level == 1)
+ goto cpu_master_loop;
+
+ /*
+ * CPU will loop if it is a slave or request to become a kgdb
+ * master cpu and acquire the kgdb_active lock:
+ */
+ while (1) {
+cpu_loop:
+ if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
+ kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
+ goto cpu_master_loop;
+ } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
+ if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
+ break;
+ } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
+ if (!atomic_read(&passive_cpu_wait[cpu]))
+ goto return_normal;
+ } else {
+return_normal:
+ /* Return to normal operation by executing any
+ * hw breakpoint fixup.
+ */
+ if (arch_kgdb_ops.correct_hw_break)
+ arch_kgdb_ops.correct_hw_break();
+ if (trace_on)
+ tracing_on();
+ atomic_dec(&cpu_in_kgdb[cpu]);
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ local_irq_restore(flags);
+ return 0;
+ }
+ cpu_relax();
+ }
+
+ /*
+ * For single stepping, try to only enter on the processor
+ * that was single stepping. To gaurd against a deadlock, the
+ * kernel will only try for the value of sstep_tries before
+ * giving up and continuing on.
+ */
+ if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
+ (kgdb_info[cpu].task &&
+ kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+ atomic_set(&kgdb_active, -1);
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ local_irq_restore(flags);
+
+ goto acquirelock;
+ }
+
+ if (!kgdb_io_ready(1)) {
+ kgdb_info[cpu].ret_state = 1;
+ goto kgdb_restore; /* No I/O connection, resume the system */
+ }
+
+ /*
+ * Don't enter if we have hit a removed breakpoint.
+ */
+ if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
+ goto kgdb_restore;
+
+ /* Call the I/O driver's pre_exception routine */
+ if (dbg_io_ops->pre_exception)
+ dbg_io_ops->pre_exception();
+
+ kgdb_disable_hw_debug(ks->linux_regs);
+
+ /*
+ * Get the passive CPU lock which will hold all the non-primary
+ * CPU in a spin state while the debugger is active
+ */
+ if (!kgdb_single_step) {
+ for (i = 0; i < NR_CPUS; i++)
+ atomic_inc(&passive_cpu_wait[i]);
+ }
+
+#ifdef CONFIG_SMP
+ /* Signal the other CPUs to enter kgdb_wait() */
+ if ((!kgdb_single_step) && kgdb_do_roundup)
+ kgdb_roundup_cpus(flags);
+#endif
+
+ /*
+ * Wait for the other CPUs to be notified and be waiting for us:
+ */
+ for_each_online_cpu(i) {
+ while (!atomic_read(&cpu_in_kgdb[i]))
+ cpu_relax();
+ }
+
+ /*
+ * At this point the primary processor is completely
+ * in the debugger and all secondary CPUs are quiescent
+ */
+ dbg_deactivate_sw_breakpoints();
+ kgdb_single_step = 0;
+ kgdb_contthread = current;
+ exception_level = 0;
+ trace_on = tracing_is_on();
+ if (trace_on)
+ tracing_off();
+
+ while (1) {
+cpu_master_loop:
+ if (dbg_kdb_mode) {
+ kgdb_connected = 1;
+ error = kdb_stub(ks);
+ } else {
+ error = gdb_serial_stub(ks);
+ }
+
+ if (error == DBG_PASS_EVENT) {
+ dbg_kdb_mode = !dbg_kdb_mode;
+ kgdb_connected = 0;
+ } else if (error == DBG_SWITCH_CPU_EVENT) {
+ dbg_cpu_switch(cpu, dbg_switch_cpu);
+ goto cpu_loop;
+ } else {
+ kgdb_info[cpu].ret_state = error;
+ break;
+ }
+ }
+
+ /* Call the I/O driver's post_exception routine */
+ if (dbg_io_ops->post_exception)
+ dbg_io_ops->post_exception();
+
+ atomic_dec(&cpu_in_kgdb[ks->cpu]);
+
+ if (!kgdb_single_step) {
+ for (i = NR_CPUS-1; i >= 0; i--)
+ atomic_dec(&passive_cpu_wait[i]);
+ /*
+ * Wait till all the CPUs have quit from the debugger,
+ * but allow a CPU that hit an exception and is
+ * waiting to become the master to remain in the debug
+ * core.
+ */
+ for_each_online_cpu(i) {
+ while (atomic_read(&cpu_in_kgdb[i]) &&
+ !(kgdb_info[i].exception_state &
+ DCPU_WANT_MASTER))
+ cpu_relax();
+ }
+ }
+
+kgdb_restore:
+ if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+ int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+ if (kgdb_info[sstep_cpu].task)
+ kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+ else
+ kgdb_sstep_pid = 0;
+ }
+ if (trace_on)
+ tracing_on();
+ /* Free kgdb_active */
+ atomic_set(&kgdb_active, -1);
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ local_irq_restore(flags);
+
+ return kgdb_info[cpu].ret_state;
+}
+
+/*
+ * kgdb_handle_exception() - main entry point from a kernel exception
+ *
+ * Locking hierarchy:
+ * interface locks, if any (begin_session)
+ * kgdb lock (kgdb_active)
+ */
+int
+kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+{
+ struct kgdb_state kgdb_var;
+ struct kgdb_state *ks = &kgdb_var;
+ int ret;
+
+ ks->cpu = raw_smp_processor_id();
+ ks->ex_vector = evector;
+ ks->signo = signo;
+ ks->err_code = ecode;
+ ks->kgdb_usethreadid = 0;
+ ks->linux_regs = regs;
+
+ if (kgdb_reenter_check(ks))
+ return 0; /* Ouch, double exception ! */
+ kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
+ ret = kgdb_cpu_enter(ks, regs);
+ kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER |
+ DCPU_IS_SLAVE);
+ return ret;
+}
+
+int kgdb_nmicallback(int cpu, void *regs)
+{
+#ifdef CONFIG_SMP
+ struct kgdb_state kgdb_var;
+ struct kgdb_state *ks = &kgdb_var;
+
+ memset(ks, 0, sizeof(struct kgdb_state));
+ ks->cpu = cpu;
+ ks->linux_regs = regs;
+
+ if (!atomic_read(&cpu_in_kgdb[cpu]) &&
+ atomic_read(&kgdb_active) != -1 &&
+ atomic_read(&kgdb_active) != cpu) {
+ kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+ kgdb_cpu_enter(ks, regs);
+ kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
+ return 0;
+ }
+#endif
+ return 1;
+}
+
+static void kgdb_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ unsigned long flags;
+
+ /* If we're debugging, or KGDB has not connected, don't try
+ * and print. */
+ if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
+ return;
+
+ local_irq_save(flags);
+ gdbstub_msg_write(s, count);
+ local_irq_restore(flags);
+}
+
+static struct console kgdbcons = {
+ .name = "kgdb",
+ .write = kgdb_console_write,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .index = -1,
+};
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_handle_dbg(int key, struct tty_struct *tty)
+{
+ if (!dbg_io_ops) {
+ printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+ return;
+ }
+ if (!kgdb_connected) {
+#ifdef CONFIG_KGDB_KDB
+ if (!dbg_kdb_mode)
+ printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+#else
+ printk(KERN_CRIT "Entering KGDB\n");
+#endif
+ }
+
+ kgdb_breakpoint();
+}
+
+static struct sysrq_key_op sysrq_dbg_op = {
+ .handler = sysrq_handle_dbg,
+ .help_msg = "debug(G)",
+ .action_msg = "DEBUG",
+};
+#endif
+
+static int kgdb_panic_event(struct notifier_block *self,
+ unsigned long val,
+ void *data)
+{
+ if (dbg_kdb_mode)
+ kdb_printf("PANIC: %s\n", (char *)data);
+ kgdb_breakpoint();
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kgdb_panic_event_nb = {
+ .notifier_call = kgdb_panic_event,
+ .priority = INT_MAX,
+};
+
+void __weak kgdb_arch_late(void)
+{
+}
+
+void __init dbg_late_init(void)
+{
+ dbg_is_early = false;
+ if (kgdb_io_module_registered)
+ kgdb_arch_late();
+ kdb_init(KDB_INIT_FULL);
+}
+
+static void kgdb_register_callbacks(void)
+{
+ if (!kgdb_io_module_registered) {
+ kgdb_io_module_registered = 1;
+ kgdb_arch_init();
+ if (!dbg_is_early)
+ kgdb_arch_late();
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kgdb_panic_event_nb);
+#ifdef CONFIG_MAGIC_SYSRQ
+ register_sysrq_key('g', &sysrq_dbg_op);
+#endif
+ if (kgdb_use_con && !kgdb_con_registered) {
+ register_console(&kgdbcons);
+ kgdb_con_registered = 1;
+ }
+ }
+}
+
+static void kgdb_unregister_callbacks(void)
+{
+ /*
+ * When this routine is called KGDB should unregister from the
+ * panic handler and clean up, making sure it is not handling any
+ * break exceptions at the time.
+ */
+ if (kgdb_io_module_registered) {
+ kgdb_io_module_registered = 0;
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &kgdb_panic_event_nb);
+ kgdb_arch_exit();
+#ifdef CONFIG_MAGIC_SYSRQ
+ unregister_sysrq_key('g', &sysrq_dbg_op);
+#endif
+ if (kgdb_con_registered) {
+ unregister_console(&kgdbcons);
+ kgdb_con_registered = 0;
+ }
+ }
+}
+
+/*
+ * There are times a tasklet needs to be used vs a compiled in
+ * break point so as to cause an exception outside a kgdb I/O module,
+ * such as is the case with kgdboe, where calling a breakpoint in the
+ * I/O driver itself would be fatal.
+ */
+static void kgdb_tasklet_bpt(unsigned long ing)
+{
+ kgdb_breakpoint();
+ atomic_set(&kgdb_break_tasklet_var, 0);
+}
+
+static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+
+void kgdb_schedule_breakpoint(void)
+{
+ if (atomic_read(&kgdb_break_tasklet_var) ||
+ atomic_read(&kgdb_active) != -1 ||
+ atomic_read(&kgdb_setting_breakpoint))
+ return;
+ atomic_inc(&kgdb_break_tasklet_var);
+ tasklet_schedule(&kgdb_tasklet_breakpoint);
+}
+EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+
+static void kgdb_initial_breakpoint(void)
+{
+ kgdb_break_asap = 0;
+
+ printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+ kgdb_breakpoint();
+}
+
+/**
+ * kgdb_register_io_module - register KGDB IO module
+ * @new_dbg_io_ops: the io ops vector
+ *
+ * Register it with the KGDB core.
+ */
+int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
+{
+ int err;
+
+ spin_lock(&kgdb_registration_lock);
+
+ if (dbg_io_ops) {
+ spin_unlock(&kgdb_registration_lock);
+
+ printk(KERN_ERR "kgdb: Another I/O driver is already "
+ "registered with KGDB.\n");
+ return -EBUSY;
+ }
+
+ if (new_dbg_io_ops->init) {
+ err = new_dbg_io_ops->init();
+ if (err) {
+ spin_unlock(&kgdb_registration_lock);
+ return err;
+ }
+ }
+
+ dbg_io_ops = new_dbg_io_ops;
+
+ spin_unlock(&kgdb_registration_lock);
+
+ printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
+ new_dbg_io_ops->name);
+
+ /* Arm KGDB now. */
+ kgdb_register_callbacks();
+
+ if (kgdb_break_asap)
+ kgdb_initial_breakpoint();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kgdb_register_io_module);
+
+/**
+ * kkgdb_unregister_io_module - unregister KGDB IO module
+ * @old_dbg_io_ops: the io ops vector
+ *
+ * Unregister it with the KGDB core.
+ */
+void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
+{
+ BUG_ON(kgdb_connected);
+
+ /*
+ * KGDB is no longer able to communicate out, so
+ * unregister our callbacks and reset state.
+ */
+ kgdb_unregister_callbacks();
+
+ spin_lock(&kgdb_registration_lock);
+
+ WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
+ dbg_io_ops = NULL;
+
+ spin_unlock(&kgdb_registration_lock);
+
+ printk(KERN_INFO
+ "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+ old_dbg_io_ops->name);
+}
+EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
+
+int dbg_io_get_char(void)
+{
+ int ret = dbg_io_ops->read_char();
+ if (ret == NO_POLL_CHAR)
+ return -1;
+ if (!dbg_kdb_mode)
+ return ret;
+ if (ret == 127)
+ return 8;
+ return ret;
+}
+
+/**
+ * kgdb_breakpoint - generate breakpoint exception
+ *
+ * This function will generate a breakpoint exception. It is used at the
+ * beginning of a program to sync up with a debugger and can be used
+ * otherwise as a quick means to stop program execution and "break" into
+ * the debugger.
+ */
+void kgdb_breakpoint(void)
+{
+ atomic_inc(&kgdb_setting_breakpoint);
+ wmb(); /* Sync point before breakpoint */
+ arch_kgdb_breakpoint();
+ wmb(); /* Sync point after breakpoint */
+ atomic_dec(&kgdb_setting_breakpoint);
+}
+EXPORT_SYMBOL_GPL(kgdb_breakpoint);
+
+static int __init opt_kgdb_wait(char *str)
+{
+ kgdb_break_asap = 1;
+
+ kdb_init(KDB_INIT_EARLY);
+ if (kgdb_io_module_registered)
+ kgdb_initial_breakpoint();
+
+ return 0;
+}
+
+early_param("kgdbwait", opt_kgdb_wait);
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
new file mode 100644
index 000000000000..c5d753d80f67
--- /dev/null
+++ b/kernel/debug/debug_core.h
@@ -0,0 +1,81 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _DEBUG_CORE_H_
+#define _DEBUG_CORE_H_
+/*
+ * These are the private implementation headers between the kernel
+ * debugger core and the debugger front end code.
+ */
+
+/* kernel debug core data structures */
+struct kgdb_state {
+ int ex_vector;
+ int signo;
+ int err_code;
+ int cpu;
+ int pass_exception;
+ unsigned long thr_query;
+ unsigned long threadid;
+ long kgdb_usethreadid;
+ struct pt_regs *linux_regs;
+};
+
+/* Exception state values */
+#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
+#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
+#define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */
+#define DCPU_SSTEP 0x8 /* CPU is single stepping */
+
+struct debuggerinfo_struct {
+ void *debuggerinfo;
+ struct task_struct *task;
+ int exception_state;
+ int ret_state;
+ int irq_depth;
+};
+
+extern struct debuggerinfo_struct kgdb_info[];
+
+/* kernel debug core break point routines */
+extern int dbg_remove_all_break(void);
+extern int dbg_set_sw_break(unsigned long addr);
+extern int dbg_remove_sw_break(unsigned long addr);
+extern int dbg_activate_sw_breakpoints(void);
+extern int dbg_deactivate_sw_breakpoints(void);
+
+/* polled character access to i/o module */
+extern int dbg_io_get_char(void);
+
+/* stub return value for switching between the gdbstub and kdb */
+#define DBG_PASS_EVENT -12345
+/* Switch from one cpu to another */
+#define DBG_SWITCH_CPU_EVENT -123456
+extern int dbg_switch_cpu;
+
+/* gdbstub interface functions */
+extern int gdb_serial_stub(struct kgdb_state *ks);
+extern void gdbstub_msg_write(const char *s, int len);
+
+/* gdbstub functions used for kdb <-> gdbstub transition */
+extern int gdbstub_state(struct kgdb_state *ks, char *cmd);
+extern int dbg_kdb_mode;
+
+#ifdef CONFIG_KGDB_KDB
+extern int kdb_stub(struct kgdb_state *ks);
+extern int kdb_parse(const char *cmdstr);
+#else /* ! CONFIG_KGDB_KDB */
+static inline int kdb_stub(struct kgdb_state *ks)
+{
+ return DBG_PASS_EVENT;
+}
+#endif /* CONFIG_KGDB_KDB */
+
+#endif /* _DEBUG_CORE_H_ */
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
new file mode 100644
index 000000000000..4b17b3269525
--- /dev/null
+++ b/kernel/debug/gdbstub.c
@@ -0,0 +1,1017 @@
+/*
+ * Kernel Debug Core
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Timesys Corporation
+ * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
+ * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2005-2009 Wind River Systems, Inc.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Contributors at various stages not listed above:
+ * Jason Wessel ( jason.wessel@windriver.com )
+ * George Anzinger <george@mvista.com>
+ * Anurekh Saxena (anurekh.saxena@timesys.com)
+ * Lake Stevens Instrument Division (Glenn Engel)
+ * Jim Kingdon, Cygnus Support.
+ *
+ * Original KGDB stub: David Grothe <dave@gcom.com>,
+ * Tigran Aivazian <tigran@sco.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/unaligned.h>
+#include "debug_core.h"
+
+#define KGDB_MAX_THREAD_QUERY 17
+
+/* Our I/O buffers. */
+static char remcom_in_buffer[BUFMAX];
+static char remcom_out_buffer[BUFMAX];
+
+/* Storage for the registers, in GDB format. */
+static unsigned long gdb_regs[(NUMREGBYTES +
+ sizeof(unsigned long) - 1) /
+ sizeof(unsigned long)];
+
+/*
+ * GDB remote protocol parser:
+ */
+
+static int hex(char ch)
+{
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ return -1;
+}
+
+#ifdef CONFIG_KGDB_KDB
+static int gdbstub_read_wait(void)
+{
+ int ret = -1;
+ int i;
+
+ /* poll any additional I/O interfaces that are defined */
+ while (ret < 0)
+ for (i = 0; kdb_poll_funcs[i] != NULL; i++) {
+ ret = kdb_poll_funcs[i]();
+ if (ret > 0)
+ break;
+ }
+ return ret;
+}
+#else
+static int gdbstub_read_wait(void)
+{
+ int ret = dbg_io_ops->read_char();
+ while (ret == NO_POLL_CHAR)
+ ret = dbg_io_ops->read_char();
+ return ret;
+}
+#endif
+/* scan for the sequence $<data>#<checksum> */
+static void get_packet(char *buffer)
+{
+ unsigned char checksum;
+ unsigned char xmitcsum;
+ int count;
+ char ch;
+
+ do {
+ /*
+ * Spin and wait around for the start character, ignore all
+ * other characters:
+ */
+ while ((ch = (gdbstub_read_wait())) != '$')
+ /* nothing */;
+
+ kgdb_connected = 1;
+ checksum = 0;
+ xmitcsum = -1;
+
+ count = 0;
+
+ /*
+ * now, read until a # or end of buffer is found:
+ */
+ while (count < (BUFMAX - 1)) {
+ ch = gdbstub_read_wait();
+ if (ch == '#')
+ break;
+ checksum = checksum + ch;
+ buffer[count] = ch;
+ count = count + 1;
+ }
+ buffer[count] = 0;
+
+ if (ch == '#') {
+ xmitcsum = hex(gdbstub_read_wait()) << 4;
+ xmitcsum += hex(gdbstub_read_wait());
+
+ if (checksum != xmitcsum)
+ /* failed checksum */
+ dbg_io_ops->write_char('-');
+ else
+ /* successful transfer */
+ dbg_io_ops->write_char('+');
+ if (dbg_io_ops->flush)
+ dbg_io_ops->flush();
+ }
+ } while (checksum != xmitcsum);
+}
+
+/*
+ * Send the packet in buffer.
+ * Check for gdb connection if asked for.
+ */
+static void put_packet(char *buffer)
+{
+ unsigned char checksum;
+ int count;
+ char ch;
+
+ /*
+ * $<packet info>#<checksum>.
+ */
+ while (1) {
+ dbg_io_ops->write_char('$');
+ checksum = 0;
+ count = 0;
+
+ while ((ch = buffer[count])) {
+ dbg_io_ops->write_char(ch);
+ checksum += ch;
+ count++;
+ }
+
+ dbg_io_ops->write_char('#');
+ dbg_io_ops->write_char(hex_asc_hi(checksum));
+ dbg_io_ops->write_char(hex_asc_lo(checksum));
+ if (dbg_io_ops->flush)
+ dbg_io_ops->flush();
+
+ /* Now see what we get in reply. */
+ ch = gdbstub_read_wait();
+
+ if (ch == 3)
+ ch = gdbstub_read_wait();
+
+ /* If we get an ACK, we are done. */
+ if (ch == '+')
+ return;
+
+ /*
+ * If we get the start of another packet, this means
+ * that GDB is attempting to reconnect. We will NAK
+ * the packet being sent, and stop trying to send this
+ * packet.
+ */
+ if (ch == '$') {
+ dbg_io_ops->write_char('-');
+ if (dbg_io_ops->flush)
+ dbg_io_ops->flush();
+ return;
+ }
+ }
+}
+
+static char gdbmsgbuf[BUFMAX + 1];
+
+void gdbstub_msg_write(const char *s, int len)
+{
+ char *bufptr;
+ int wcount;
+ int i;
+
+ if (len == 0)
+ len = strlen(s);
+
+ /* 'O'utput */
+ gdbmsgbuf[0] = 'O';
+
+ /* Fill and send buffers... */
+ while (len > 0) {
+ bufptr = gdbmsgbuf + 1;
+
+ /* Calculate how many this time */
+ if ((len << 1) > (BUFMAX - 2))
+ wcount = (BUFMAX - 2) >> 1;
+ else
+ wcount = len;
+
+ /* Pack in hex chars */
+ for (i = 0; i < wcount; i++)
+ bufptr = pack_hex_byte(bufptr, s[i]);
+ *bufptr = '\0';
+
+ /* Move up */
+ s += wcount;
+ len -= wcount;
+
+ /* Write packet */
+ put_packet(gdbmsgbuf);
+ }
+}
+
+/*
+ * Convert the memory pointed to by mem into hex, placing result in
+ * buf. Return a pointer to the last char put in buf (null). May
+ * return an error.
+ */
+int kgdb_mem2hex(char *mem, char *buf, int count)
+{
+ char *tmp;
+ int err;
+
+ /*
+ * We use the upper half of buf as an intermediate buffer for the
+ * raw memory copy. Hex conversion will work against this one.
+ */
+ tmp = buf + count;
+
+ err = probe_kernel_read(tmp, mem, count);
+ if (!err) {
+ while (count > 0) {
+ buf = pack_hex_byte(buf, *tmp);
+ tmp++;
+ count--;
+ }
+
+ *buf = 0;
+ }
+
+ return err;
+}
+
+/*
+ * Convert the hex array pointed to by buf into binary to be placed in
+ * mem. Return a pointer to the character AFTER the last byte
+ * written. May return an error.
+ */
+int kgdb_hex2mem(char *buf, char *mem, int count)
+{
+ char *tmp_raw;
+ char *tmp_hex;
+
+ /*
+ * We use the upper half of buf as an intermediate buffer for the
+ * raw memory that is converted from hex.
+ */
+ tmp_raw = buf + count * 2;
+
+ tmp_hex = tmp_raw - 1;
+ while (tmp_hex >= buf) {
+ tmp_raw--;
+ *tmp_raw = hex(*tmp_hex--);
+ *tmp_raw |= hex(*tmp_hex--) << 4;
+ }
+
+ return probe_kernel_write(mem, tmp_raw, count);
+}
+
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int kgdb_hex2long(char **ptr, unsigned long *long_val)
+{
+ int hex_val;
+ int num = 0;
+ int negate = 0;
+
+ *long_val = 0;
+
+ if (**ptr == '-') {
+ negate = 1;
+ (*ptr)++;
+ }
+ while (**ptr) {
+ hex_val = hex(**ptr);
+ if (hex_val < 0)
+ break;
+
+ *long_val = (*long_val << 4) | hex_val;
+ num++;
+ (*ptr)++;
+ }
+
+ if (negate)
+ *long_val = -*long_val;
+
+ return num;
+}
+
+/*
+ * Copy the binary array pointed to by buf into mem. Fix $, #, and
+ * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
+ * The input buf is overwitten with the result to write to mem.
+ */
+static int kgdb_ebin2mem(char *buf, char *mem, int count)
+{
+ int size = 0;
+ char *c = buf;
+
+ while (count-- > 0) {
+ c[size] = *buf++;
+ if (c[size] == 0x7d)
+ c[size] = *buf++ ^ 0x20;
+ size++;
+ }
+
+ return probe_kernel_write(mem, c, size);
+}
+
+/* Write memory due to an 'M' or 'X' packet. */
+static int write_mem_msg(int binary)
+{
+ char *ptr = &remcom_in_buffer[1];
+ unsigned long addr;
+ unsigned long length;
+ int err;
+
+ if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' &&
+ kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') {
+ if (binary)
+ err = kgdb_ebin2mem(ptr, (char *)addr, length);
+ else
+ err = kgdb_hex2mem(ptr, (char *)addr, length);
+ if (err)
+ return err;
+ if (CACHE_FLUSH_IS_SAFE)
+ flush_icache_range(addr, addr + length);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void error_packet(char *pkt, int error)
+{
+ error = -error;
+ pkt[0] = 'E';
+ pkt[1] = hex_asc[(error / 10)];
+ pkt[2] = hex_asc[(error % 10)];
+ pkt[3] = '\0';
+}
+
+/*
+ * Thread ID accessors. We represent a flat TID space to GDB, where
+ * the per CPU idle threads (which under Linux all have PID 0) are
+ * remapped to negative TIDs.
+ */
+
+#define BUF_THREAD_ID_SIZE 16
+
+static char *pack_threadid(char *pkt, unsigned char *id)
+{
+ char *limit;
+
+ limit = pkt + BUF_THREAD_ID_SIZE;
+ while (pkt < limit)
+ pkt = pack_hex_byte(pkt, *id++);
+
+ return pkt;
+}
+
+static void int_to_threadref(unsigned char *id, int value)
+{
+ unsigned char *scan;
+ int i = 4;
+
+ scan = (unsigned char *)id;
+ while (i--)
+ *scan++ = 0;
+ put_unaligned_be32(value, scan);
+}
+
+static struct task_struct *getthread(struct pt_regs *regs, int tid)
+{
+ /*
+ * Non-positive TIDs are remapped to the cpu shadow information
+ */
+ if (tid == 0 || tid == -1)
+ tid = -atomic_read(&kgdb_active) - 2;
+ if (tid < -1 && tid > -NR_CPUS - 2) {
+ if (kgdb_info[-tid - 2].task)
+ return kgdb_info[-tid - 2].task;
+ else
+ return idle_task(-tid - 2);
+ }
+ if (tid <= 0) {
+ printk(KERN_ERR "KGDB: Internal thread select error\n");
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * find_task_by_pid_ns() does not take the tasklist lock anymore
+ * but is nicely RCU locked - hence is a pretty resilient
+ * thing to use:
+ */
+ return find_task_by_pid_ns(tid, &init_pid_ns);
+}
+
+
+/*
+ * Remap normal tasks to their real PID,
+ * CPU shadow threads are mapped to -CPU - 2
+ */
+static inline int shadow_pid(int realpid)
+{
+ if (realpid)
+ return realpid;
+
+ return -raw_smp_processor_id() - 2;
+}
+
+/*
+ * All the functions that start with gdb_cmd are the various
+ * operations to implement the handlers for the gdbserial protocol
+ * where KGDB is communicating with an external debugger
+ */
+
+/* Handle the '?' status packets */
+static void gdb_cmd_status(struct kgdb_state *ks)
+{
+ /*
+ * We know that this packet is only sent
+ * during initial connect. So to be safe,
+ * we clear out our breakpoints now in case
+ * GDB is reconnecting.
+ */
+ dbg_remove_all_break();
+
+ remcom_out_buffer[0] = 'S';
+ pack_hex_byte(&remcom_out_buffer[1], ks->signo);
+}
+
+/* Handle the 'g' get registers request */
+static void gdb_cmd_getregs(struct kgdb_state *ks)
+{
+ struct task_struct *thread;
+ void *local_debuggerinfo;
+ int i;
+
+ thread = kgdb_usethread;
+ if (!thread) {
+ thread = kgdb_info[ks->cpu].task;
+ local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
+ } else {
+ local_debuggerinfo = NULL;
+ for_each_online_cpu(i) {
+ /*
+ * Try to find the task on some other
+ * or possibly this node if we do not
+ * find the matching task then we try
+ * to approximate the results.
+ */
+ if (thread == kgdb_info[i].task)
+ local_debuggerinfo = kgdb_info[i].debuggerinfo;
+ }
+ }
+
+ /*
+ * All threads that don't have debuggerinfo should be
+ * in schedule() sleeping, since all other CPUs
+ * are in kgdb_wait, and thus have debuggerinfo.
+ */
+ if (local_debuggerinfo) {
+ pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo);
+ } else {
+ /*
+ * Pull stuff saved during switch_to; nothing
+ * else is accessible (or even particularly
+ * relevant).
+ *
+ * This should be enough for a stack trace.
+ */
+ sleeping_thread_to_gdb_regs(gdb_regs, thread);
+ }
+ kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES);
+}
+
+/* Handle the 'G' set registers request */
+static void gdb_cmd_setregs(struct kgdb_state *ks)
+{
+ kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES);
+
+ if (kgdb_usethread && kgdb_usethread != current) {
+ error_packet(remcom_out_buffer, -EINVAL);
+ } else {
+ gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs);
+ strcpy(remcom_out_buffer, "OK");
+ }
+}
+
+/* Handle the 'm' memory read bytes */
+static void gdb_cmd_memread(struct kgdb_state *ks)
+{
+ char *ptr = &remcom_in_buffer[1];
+ unsigned long length;
+ unsigned long addr;
+ int err;
+
+ if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' &&
+ kgdb_hex2long(&ptr, &length) > 0) {
+ err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length);
+ if (err)
+ error_packet(remcom_out_buffer, err);
+ } else {
+ error_packet(remcom_out_buffer, -EINVAL);
+ }
+}
+
+/* Handle the 'M' memory write bytes */
+static void gdb_cmd_memwrite(struct kgdb_state *ks)
+{
+ int err = write_mem_msg(0);
+
+ if (err)
+ error_packet(remcom_out_buffer, err);
+ else
+ strcpy(remcom_out_buffer, "OK");
+}
+
+/* Handle the 'X' memory binary write bytes */
+static void gdb_cmd_binwrite(struct kgdb_state *ks)
+{
+ int err = write_mem_msg(1);
+
+ if (err)
+ error_packet(remcom_out_buffer, err);
+ else
+ strcpy(remcom_out_buffer, "OK");
+}
+
+/* Handle the 'D' or 'k', detach or kill packets */
+static void gdb_cmd_detachkill(struct kgdb_state *ks)
+{
+ int error;
+
+ /* The detach case */
+ if (remcom_in_buffer[0] == 'D') {
+ error = dbg_remove_all_break();
+ if (error < 0) {
+ error_packet(remcom_out_buffer, error);
+ } else {
+ strcpy(remcom_out_buffer, "OK");
+ kgdb_connected = 0;
+ }
+ put_packet(remcom_out_buffer);
+ } else {
+ /*
+ * Assume the kill case, with no exit code checking,
+ * trying to force detach the debugger:
+ */
+ dbg_remove_all_break();
+ kgdb_connected = 0;
+ }
+}
+
+/* Handle the 'R' reboot packets */
+static int gdb_cmd_reboot(struct kgdb_state *ks)
+{
+ /* For now, only honor R0 */
+ if (strcmp(remcom_in_buffer, "R0") == 0) {
+ printk(KERN_CRIT "Executing emergency reboot\n");
+ strcpy(remcom_out_buffer, "OK");
+ put_packet(remcom_out_buffer);
+
+ /*
+ * Execution should not return from
+ * machine_emergency_restart()
+ */
+ machine_emergency_restart();
+ kgdb_connected = 0;
+
+ return 1;
+ }
+ return 0;
+}
+
+/* Handle the 'q' query packets */
+static void gdb_cmd_query(struct kgdb_state *ks)
+{
+ struct task_struct *g;
+ struct task_struct *p;
+ unsigned char thref[8];
+ char *ptr;
+ int i;
+ int cpu;
+ int finished = 0;
+
+ switch (remcom_in_buffer[1]) {
+ case 's':
+ case 'f':
+ if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
+ error_packet(remcom_out_buffer, -EINVAL);
+ break;
+ }
+
+ i = 0;
+ remcom_out_buffer[0] = 'm';
+ ptr = remcom_out_buffer + 1;
+ if (remcom_in_buffer[1] == 'f') {
+ /* Each cpu is a shadow thread */
+ for_each_online_cpu(cpu) {
+ ks->thr_query = 0;
+ int_to_threadref(thref, -cpu - 2);
+ pack_threadid(ptr, thref);
+ ptr += BUF_THREAD_ID_SIZE;
+ *(ptr++) = ',';
+ i++;
+ }
+ }
+
+ do_each_thread(g, p) {
+ if (i >= ks->thr_query && !finished) {
+ int_to_threadref(thref, p->pid);
+ pack_threadid(ptr, thref);
+ ptr += BUF_THREAD_ID_SIZE;
+ *(ptr++) = ',';
+ ks->thr_query++;
+ if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
+ finished = 1;
+ }
+ i++;
+ } while_each_thread(g, p);
+
+ *(--ptr) = '\0';
+ break;
+
+ case 'C':
+ /* Current thread id */
+ strcpy(remcom_out_buffer, "QC");
+ ks->threadid = shadow_pid(current->pid);
+ int_to_threadref(thref, ks->threadid);
+ pack_threadid(remcom_out_buffer + 2, thref);
+ break;
+ case 'T':
+ if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
+ error_packet(remcom_out_buffer, -EINVAL);
+ break;
+ }
+ ks->threadid = 0;
+ ptr = remcom_in_buffer + 17;
+ kgdb_hex2long(&ptr, &ks->threadid);
+ if (!getthread(ks->linux_regs, ks->threadid)) {
+ error_packet(remcom_out_buffer, -EINVAL);
+ break;
+ }
+ if ((int)ks->threadid > 0) {
+ kgdb_mem2hex(getthread(ks->linux_regs,
+ ks->threadid)->comm,
+ remcom_out_buffer, 16);
+ } else {
+ static char tmpstr[23 + BUF_THREAD_ID_SIZE];
+
+ sprintf(tmpstr, "shadowCPU%d",
+ (int)(-ks->threadid - 2));
+ kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
+ }
+ break;
+#ifdef CONFIG_KGDB_KDB
+ case 'R':
+ if (strncmp(remcom_in_buffer, "qRcmd,", 6) == 0) {
+ int len = strlen(remcom_in_buffer + 6);
+
+ if ((len % 2) != 0) {
+ strcpy(remcom_out_buffer, "E01");
+ break;
+ }
+ kgdb_hex2mem(remcom_in_buffer + 6,
+ remcom_out_buffer, len);
+ len = len / 2;
+ remcom_out_buffer[len++] = 0;
+
+ kdb_parse(remcom_out_buffer);
+ strcpy(remcom_out_buffer, "OK");
+ }
+ break;
+#endif
+ }
+}
+
+/* Handle the 'H' task query packets */
+static void gdb_cmd_task(struct kgdb_state *ks)
+{
+ struct task_struct *thread;
+ char *ptr;
+
+ switch (remcom_in_buffer[1]) {
+ case 'g':
+ ptr = &remcom_in_buffer[2];
+ kgdb_hex2long(&ptr, &ks->threadid);
+ thread = getthread(ks->linux_regs, ks->threadid);
+ if (!thread && ks->threadid > 0) {
+ error_packet(remcom_out_buffer, -EINVAL);
+ break;
+ }
+ kgdb_usethread = thread;
+ ks->kgdb_usethreadid = ks->threadid;
+ strcpy(remcom_out_buffer, "OK");
+ break;
+ case 'c':
+ ptr = &remcom_in_buffer[2];
+ kgdb_hex2long(&ptr, &ks->threadid);
+ if (!ks->threadid) {
+ kgdb_contthread = NULL;
+ } else {
+ thread = getthread(ks->linux_regs, ks->threadid);
+ if (!thread && ks->threadid > 0) {
+ error_packet(remcom_out_buffer, -EINVAL);
+ break;
+ }
+ kgdb_contthread = thread;
+ }
+ strcpy(remcom_out_buffer, "OK");
+ break;
+ }
+}
+
+/* Handle the 'T' thread query packets */
+static void gdb_cmd_thread(struct kgdb_state *ks)
+{
+ char *ptr = &remcom_in_buffer[1];
+ struct task_struct *thread;
+
+ kgdb_hex2long(&ptr, &ks->threadid);
+ thread = getthread(ks->linux_regs, ks->threadid);
+ if (thread)
+ strcpy(remcom_out_buffer, "OK");
+ else
+ error_packet(remcom_out_buffer, -EINVAL);
+}
+
+/* Handle the 'z' or 'Z' breakpoint remove or set packets */
+static void gdb_cmd_break(struct kgdb_state *ks)
+{
+ /*
+ * Since GDB-5.3, it's been drafted that '0' is a software
+ * breakpoint, '1' is a hardware breakpoint, so let's do that.
+ */
+ char *bpt_type = &remcom_in_buffer[1];
+ char *ptr = &remcom_in_buffer[2];
+ unsigned long addr;
+ unsigned long length;
+ int error = 0;
+
+ if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') {
+ /* Unsupported */
+ if (*bpt_type > '4')
+ return;
+ } else {
+ if (*bpt_type != '0' && *bpt_type != '1')
+ /* Unsupported. */
+ return;
+ }
+
+ /*
+ * Test if this is a hardware breakpoint, and
+ * if we support it:
+ */
+ if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT))
+ /* Unsupported. */
+ return;
+
+ if (*(ptr++) != ',') {
+ error_packet(remcom_out_buffer, -EINVAL);
+ return;
+ }
+ if (!kgdb_hex2long(&ptr, &addr)) {
+ error_packet(remcom_out_buffer, -EINVAL);
+ return;
+ }
+ if (*(ptr++) != ',' ||
+ !kgdb_hex2long(&ptr, &length)) {
+ error_packet(remcom_out_buffer, -EINVAL);
+ return;
+ }
+
+ if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0')
+ error = dbg_set_sw_break(addr);
+ else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0')
+ error = dbg_remove_sw_break(addr);
+ else if (remcom_in_buffer[0] == 'Z')
+ error = arch_kgdb_ops.set_hw_breakpoint(addr,
+ (int)length, *bpt_type - '0');
+ else if (remcom_in_buffer[0] == 'z')
+ error = arch_kgdb_ops.remove_hw_breakpoint(addr,
+ (int) length, *bpt_type - '0');
+
+ if (error == 0)
+ strcpy(remcom_out_buffer, "OK");
+ else
+ error_packet(remcom_out_buffer, error);
+}
+
+/* Handle the 'C' signal / exception passing packets */
+static int gdb_cmd_exception_pass(struct kgdb_state *ks)
+{
+ /* C09 == pass exception
+ * C15 == detach kgdb, pass exception
+ */
+ if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') {
+
+ ks->pass_exception = 1;
+ remcom_in_buffer[0] = 'c';
+
+ } else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') {
+
+ ks->pass_exception = 1;
+ remcom_in_buffer[0] = 'D';
+ dbg_remove_all_break();
+ kgdb_connected = 0;
+ return 1;
+
+ } else {
+ gdbstub_msg_write("KGDB only knows signal 9 (pass)"
+ " and 15 (pass and disconnect)\n"
+ "Executing a continue without signal passing\n", 0);
+ remcom_in_buffer[0] = 'c';
+ }
+
+ /* Indicate fall through */
+ return -1;
+}
+
+/*
+ * This function performs all gdbserial command procesing
+ */
+int gdb_serial_stub(struct kgdb_state *ks)
+{
+ int error = 0;
+ int tmp;
+
+ /* Clear the out buffer. */
+ memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
+
+ if (kgdb_connected) {
+ unsigned char thref[8];
+ char *ptr;
+
+ /* Reply to host that an exception has occurred */
+ ptr = remcom_out_buffer;
+ *ptr++ = 'T';
+ ptr = pack_hex_byte(ptr, ks->signo);
+ ptr += strlen(strcpy(ptr, "thread:"));
+ int_to_threadref(thref, shadow_pid(current->pid));
+ ptr = pack_threadid(ptr, thref);
+ *ptr++ = ';';
+ put_packet(remcom_out_buffer);
+ }
+
+ kgdb_usethread = kgdb_info[ks->cpu].task;
+ ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
+ ks->pass_exception = 0;
+
+ while (1) {
+ error = 0;
+
+ /* Clear the out buffer. */
+ memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
+
+ get_packet(remcom_in_buffer);
+
+ switch (remcom_in_buffer[0]) {
+ case '?': /* gdbserial status */
+ gdb_cmd_status(ks);
+ break;
+ case 'g': /* return the value of the CPU registers */
+ gdb_cmd_getregs(ks);
+ break;
+ case 'G': /* set the value of the CPU registers - return OK */
+ gdb_cmd_setregs(ks);
+ break;
+ case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
+ gdb_cmd_memread(ks);
+ break;
+ case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */
+ gdb_cmd_memwrite(ks);
+ break;
+ case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */
+ gdb_cmd_binwrite(ks);
+ break;
+ /* kill or detach. KGDB should treat this like a
+ * continue.
+ */
+ case 'D': /* Debugger detach */
+ case 'k': /* Debugger detach via kill */
+ gdb_cmd_detachkill(ks);
+ goto default_handle;
+ case 'R': /* Reboot */
+ if (gdb_cmd_reboot(ks))
+ goto default_handle;
+ break;
+ case 'q': /* query command */
+ gdb_cmd_query(ks);
+ break;
+ case 'H': /* task related */
+ gdb_cmd_task(ks);
+ break;
+ case 'T': /* Query thread status */
+ gdb_cmd_thread(ks);
+ break;
+ case 'z': /* Break point remove */
+ case 'Z': /* Break point set */
+ gdb_cmd_break(ks);
+ break;
+#ifdef CONFIG_KGDB_KDB
+ case '3': /* Escape into back into kdb */
+ if (remcom_in_buffer[1] == '\0') {
+ gdb_cmd_detachkill(ks);
+ return DBG_PASS_EVENT;
+ }
+#endif
+ case 'C': /* Exception passing */
+ tmp = gdb_cmd_exception_pass(ks);
+ if (tmp > 0)
+ goto default_handle;
+ if (tmp == 0)
+ break;
+ /* Fall through on tmp < 0 */
+ case 'c': /* Continue packet */
+ case 's': /* Single step packet */
+ if (kgdb_contthread && kgdb_contthread != current) {
+ /* Can't switch threads in kgdb */
+ error_packet(remcom_out_buffer, -EINVAL);
+ break;
+ }
+ dbg_activate_sw_breakpoints();
+ /* Fall through to default processing */
+ default:
+default_handle:
+ error = kgdb_arch_handle_exception(ks->ex_vector,
+ ks->signo,
+ ks->err_code,
+ remcom_in_buffer,
+ remcom_out_buffer,
+ ks->linux_regs);
+ /*
+ * Leave cmd processing on error, detach,
+ * kill, continue, or single step.
+ */
+ if (error >= 0 || remcom_in_buffer[0] == 'D' ||
+ remcom_in_buffer[0] == 'k') {
+ error = 0;
+ goto kgdb_exit;
+ }
+
+ }
+
+ /* reply to the request */
+ put_packet(remcom_out_buffer);
+ }
+
+kgdb_exit:
+ if (ks->pass_exception)
+ error = 1;
+ return error;
+}
+
+int gdbstub_state(struct kgdb_state *ks, char *cmd)
+{
+ int error;
+
+ switch (cmd[0]) {
+ case 'e':
+ error = kgdb_arch_handle_exception(ks->ex_vector,
+ ks->signo,
+ ks->err_code,
+ remcom_in_buffer,
+ remcom_out_buffer,
+ ks->linux_regs);
+ return error;
+ case 's':
+ case 'c':
+ strcpy(remcom_in_buffer, cmd);
+ return 0;
+ case '?':
+ gdb_cmd_status(ks);
+ break;
+ case '\0':
+ strcpy(remcom_out_buffer, "");
+ break;
+ }
+ dbg_io_ops->write_char('+');
+ put_packet(remcom_out_buffer);
+ return 0;
+}
diff --git a/kernel/debug/kdb/.gitignore b/kernel/debug/kdb/.gitignore
new file mode 100644
index 000000000000..396d12eda9e8
--- /dev/null
+++ b/kernel/debug/kdb/.gitignore
@@ -0,0 +1 @@
+gen-kdb_cmds.c
diff --git a/kernel/debug/kdb/Makefile b/kernel/debug/kdb/Makefile
new file mode 100644
index 000000000000..d4fc58f4b88d
--- /dev/null
+++ b/kernel/debug/kdb/Makefile
@@ -0,0 +1,25 @@
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+# Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+#
+
+CCVERSION := $(shell $(CC) -v 2>&1 | sed -ne '$$p')
+obj-y := kdb_io.o kdb_main.o kdb_support.o kdb_bt.o gen-kdb_cmds.o kdb_bp.o kdb_debugger.o
+obj-$(CONFIG_KDB_KEYBOARD) += kdb_keyboard.o
+
+clean-files := gen-kdb_cmds.c
+
+quiet_cmd_gen-kdb = GENKDB $@
+ cmd_gen-kdb = $(AWK) 'BEGIN {print "\#include <linux/stddef.h>"; print "\#include <linux/init.h>"} \
+ /^\#/{next} \
+ /^[ \t]*$$/{next} \
+ {gsub(/"/, "\\\"", $$0); \
+ print "static __initdata char kdb_cmd" cmds++ "[] = \"" $$0 "\\n\";"} \
+ END {print "extern char *kdb_cmds[]; char __initdata *kdb_cmds[] = {"; for (i = 0; i < cmds; ++i) {print " kdb_cmd" i ","}; print(" NULL\n};");}' \
+ $(filter-out %/Makefile,$^) > $@#
+
+$(obj)/gen-kdb_cmds.c: $(src)/kdb_cmds $(src)/Makefile
+ $(call cmd,gen-kdb)
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
new file mode 100644
index 000000000000..75bd9b3ebbb7
--- /dev/null
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -0,0 +1,564 @@
+/*
+ * Kernel Debugger Architecture Independent Breakpoint Handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kdb.h>
+#include <linux/kgdb.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include "kdb_private.h"
+
+/*
+ * Table of kdb_breakpoints
+ */
+kdb_bp_t kdb_breakpoints[KDB_MAXBPT];
+
+static void kdb_setsinglestep(struct pt_regs *regs)
+{
+ KDB_STATE_SET(DOING_SS);
+}
+
+static char *kdb_rwtypes[] = {
+ "Instruction(i)",
+ "Instruction(Register)",
+ "Data Write",
+ "I/O",
+ "Data Access"
+};
+
+static char *kdb_bptype(kdb_bp_t *bp)
+{
+ if (bp->bp_type < 0 || bp->bp_type > 4)
+ return "";
+
+ return kdb_rwtypes[bp->bp_type];
+}
+
+static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
+{
+ int nextarg = *nextargp;
+ int diag;
+
+ bp->bph_length = 1;
+ if ((argc + 1) != nextarg) {
+ if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0)
+ bp->bp_type = BP_ACCESS_WATCHPOINT;
+ else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0)
+ bp->bp_type = BP_WRITE_WATCHPOINT;
+ else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0)
+ bp->bp_type = BP_HARDWARE_BREAKPOINT;
+ else
+ return KDB_ARGCOUNT;
+
+ bp->bph_length = 1;
+
+ nextarg++;
+
+ if ((argc + 1) != nextarg) {
+ unsigned long len;
+
+ diag = kdbgetularg((char *)argv[nextarg],
+ &len);
+ if (diag)
+ return diag;
+
+
+ if (len > 8)
+ return KDB_BADLENGTH;
+
+ bp->bph_length = len;
+ nextarg++;
+ }
+
+ if ((argc + 1) != nextarg)
+ return KDB_ARGCOUNT;
+ }
+
+ *nextargp = nextarg;
+ return 0;
+}
+
+static int _kdb_bp_remove(kdb_bp_t *bp)
+{
+ int ret = 1;
+ if (!bp->bp_installed)
+ return ret;
+ if (!bp->bp_type)
+ ret = dbg_remove_sw_break(bp->bp_addr);
+ else
+ ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr,
+ bp->bph_length,
+ bp->bp_type);
+ if (ret == 0)
+ bp->bp_installed = 0;
+ return ret;
+}
+
+static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
+{
+ if (KDB_DEBUG(BP))
+ kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs));
+
+ /*
+ * Setup single step
+ */
+ kdb_setsinglestep(regs);
+
+ /*
+ * Reset delay attribute
+ */
+ bp->bp_delay = 0;
+ bp->bp_delayed = 1;
+}
+
+static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp)
+{
+ int ret;
+ /*
+ * Install the breakpoint, if it is not already installed.
+ */
+
+ if (KDB_DEBUG(BP))
+ kdb_printf("%s: bp_installed %d\n",
+ __func__, bp->bp_installed);
+ if (!KDB_STATE(SSBPT))
+ bp->bp_delay = 0;
+ if (bp->bp_installed)
+ return 1;
+ if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) {
+ if (KDB_DEBUG(BP))
+ kdb_printf("%s: delayed bp\n", __func__);
+ kdb_handle_bp(regs, bp);
+ return 0;
+ }
+ if (!bp->bp_type)
+ ret = dbg_set_sw_break(bp->bp_addr);
+ else
+ ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr,
+ bp->bph_length,
+ bp->bp_type);
+ if (ret == 0) {
+ bp->bp_installed = 1;
+ } else {
+ kdb_printf("%s: failed to set breakpoint at 0x%lx\n",
+ __func__, bp->bp_addr);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * kdb_bp_install
+ *
+ * Install kdb_breakpoints prior to returning from the
+ * kernel debugger. This allows the kdb_breakpoints to be set
+ * upon functions that are used internally by kdb, such as
+ * printk(). This function is only called once per kdb session.
+ */
+void kdb_bp_install(struct pt_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < KDB_MAXBPT; i++) {
+ kdb_bp_t *bp = &kdb_breakpoints[i];
+
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("%s: bp %d bp_enabled %d\n",
+ __func__, i, bp->bp_enabled);
+ }
+ if (bp->bp_enabled)
+ _kdb_bp_install(regs, bp);
+ }
+}
+
+/*
+ * kdb_bp_remove
+ *
+ * Remove kdb_breakpoints upon entry to the kernel debugger.
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+void kdb_bp_remove(void)
+{
+ int i;
+
+ for (i = KDB_MAXBPT - 1; i >= 0; i--) {
+ kdb_bp_t *bp = &kdb_breakpoints[i];
+
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("%s: bp %d bp_enabled %d\n",
+ __func__, i, bp->bp_enabled);
+ }
+ if (bp->bp_enabled)
+ _kdb_bp_remove(bp);
+ }
+}
+
+
+/*
+ * kdb_printbp
+ *
+ * Internal function to format and print a breakpoint entry.
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+static void kdb_printbp(kdb_bp_t *bp, int i)
+{
+ kdb_printf("%s ", kdb_bptype(bp));
+ kdb_printf("BP #%d at ", i);
+ kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT);
+
+ if (bp->bp_enabled)
+ kdb_printf("\n is enabled");
+ else
+ kdb_printf("\n is disabled");
+
+ kdb_printf("\taddr at %016lx, hardtype=%d installed=%d\n",
+ bp->bp_addr, bp->bp_type, bp->bp_installed);
+
+ kdb_printf("\n");
+}
+
+/*
+ * kdb_bp
+ *
+ * Handle the bp commands.
+ *
+ * [bp|bph] <addr-expression> [DATAR|DATAW]
+ *
+ * Parameters:
+ * argc Count of arguments in argv
+ * argv Space delimited command line arguments
+ * Outputs:
+ * None.
+ * Returns:
+ * Zero for success, a kdb diagnostic if failure.
+ * Locking:
+ * None.
+ * Remarks:
+ *
+ * bp Set breakpoint on all cpus. Only use hardware assist if need.
+ * bph Set breakpoint on all cpus. Force hardware register
+ */
+
+static int kdb_bp(int argc, const char **argv)
+{
+ int i, bpno;
+ kdb_bp_t *bp, *bp_check;
+ int diag;
+ int free;
+ char *symname = NULL;
+ long offset = 0ul;
+ int nextarg;
+ kdb_bp_t template = {0};
+
+ if (argc == 0) {
+ /*
+ * Display breakpoint table
+ */
+ for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT;
+ bpno++, bp++) {
+ if (bp->bp_free)
+ continue;
+ kdb_printbp(bp, bpno);
+ }
+
+ return 0;
+ }
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &template.bp_addr,
+ &offset, &symname);
+ if (diag)
+ return diag;
+ if (!template.bp_addr)
+ return KDB_BADINT;
+
+ /*
+ * Find an empty bp structure to allocate
+ */
+ free = KDB_MAXBPT;
+ for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
+ if (bp->bp_free)
+ break;
+ }
+
+ if (bpno == KDB_MAXBPT)
+ return KDB_TOOMANYBPT;
+
+ if (strcmp(argv[0], "bph") == 0) {
+ template.bp_type = BP_HARDWARE_BREAKPOINT;
+ diag = kdb_parsebp(argc, argv, &nextarg, &template);
+ if (diag)
+ return diag;
+ } else {
+ template.bp_type = BP_BREAKPOINT;
+ }
+
+ /*
+ * Check for clashing breakpoints.
+ *
+ * Note, in this design we can't have hardware breakpoints
+ * enabled for both read and write on the same address.
+ */
+ for (i = 0, bp_check = kdb_breakpoints; i < KDB_MAXBPT;
+ i++, bp_check++) {
+ if (!bp_check->bp_free &&
+ bp_check->bp_addr == template.bp_addr) {
+ kdb_printf("You already have a breakpoint at "
+ kdb_bfd_vma_fmt0 "\n", template.bp_addr);
+ return KDB_DUPBPT;
+ }
+ }
+
+ template.bp_enabled = 1;
+
+ /*
+ * Actually allocate the breakpoint found earlier
+ */
+ *bp = template;
+ bp->bp_free = 0;
+
+ kdb_printbp(bp, bpno);
+
+ return 0;
+}
+
+/*
+ * kdb_bc
+ *
+ * Handles the 'bc', 'be', and 'bd' commands
+ *
+ * [bd|bc|be] <breakpoint-number>
+ * [bd|bc|be] *
+ *
+ * Parameters:
+ * argc Count of arguments in argv
+ * argv Space delimited command line arguments
+ * Outputs:
+ * None.
+ * Returns:
+ * Zero for success, a kdb diagnostic for failure
+ * Locking:
+ * None.
+ * Remarks:
+ */
+static int kdb_bc(int argc, const char **argv)
+{
+ unsigned long addr;
+ kdb_bp_t *bp = NULL;
+ int lowbp = KDB_MAXBPT;
+ int highbp = 0;
+ int done = 0;
+ int i;
+ int diag = 0;
+
+ int cmd; /* KDBCMD_B? */
+#define KDBCMD_BC 0
+#define KDBCMD_BE 1
+#define KDBCMD_BD 2
+
+ if (strcmp(argv[0], "be") == 0)
+ cmd = KDBCMD_BE;
+ else if (strcmp(argv[0], "bd") == 0)
+ cmd = KDBCMD_BD;
+ else
+ cmd = KDBCMD_BC;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ if (strcmp(argv[1], "*") == 0) {
+ lowbp = 0;
+ highbp = KDB_MAXBPT;
+ } else {
+ diag = kdbgetularg(argv[1], &addr);
+ if (diag)
+ return diag;
+
+ /*
+ * For addresses less than the maximum breakpoint number,
+ * assume that the breakpoint number is desired.
+ */
+ if (addr < KDB_MAXBPT) {
+ bp = &kdb_breakpoints[addr];
+ lowbp = highbp = addr;
+ highbp++;
+ } else {
+ for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT;
+ i++, bp++) {
+ if (bp->bp_addr == addr) {
+ lowbp = highbp = i;
+ highbp++;
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * Now operate on the set of breakpoints matching the input
+ * criteria (either '*' for all, or an individual breakpoint).
+ */
+ for (bp = &kdb_breakpoints[lowbp], i = lowbp;
+ i < highbp;
+ i++, bp++) {
+ if (bp->bp_free)
+ continue;
+
+ done++;
+
+ switch (cmd) {
+ case KDBCMD_BC:
+ bp->bp_enabled = 0;
+
+ kdb_printf("Breakpoint %d at "
+ kdb_bfd_vma_fmt " cleared\n",
+ i, bp->bp_addr);
+
+ bp->bp_addr = 0;
+ bp->bp_free = 1;
+
+ break;
+ case KDBCMD_BE:
+ bp->bp_enabled = 1;
+
+ kdb_printf("Breakpoint %d at "
+ kdb_bfd_vma_fmt " enabled",
+ i, bp->bp_addr);
+
+ kdb_printf("\n");
+ break;
+ case KDBCMD_BD:
+ if (!bp->bp_enabled)
+ break;
+
+ bp->bp_enabled = 0;
+
+ kdb_printf("Breakpoint %d at "
+ kdb_bfd_vma_fmt " disabled\n",
+ i, bp->bp_addr);
+
+ break;
+ }
+ if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) {
+ bp->bp_delay = 0;
+ KDB_STATE_CLEAR(SSBPT);
+ }
+ }
+
+ return (!done) ? KDB_BPTNOTFOUND : 0;
+}
+
+/*
+ * kdb_ss
+ *
+ * Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch)
+ * commands.
+ *
+ * ss
+ * ssb
+ *
+ * Parameters:
+ * argc Argument count
+ * argv Argument vector
+ * Outputs:
+ * None.
+ * Returns:
+ * KDB_CMD_SS[B] for success, a kdb error if failure.
+ * Locking:
+ * None.
+ * Remarks:
+ *
+ * Set the arch specific option to trigger a debug trap after the next
+ * instruction.
+ *
+ * For 'ssb', set the trace flag in the debug trap handler
+ * after printing the current insn and return directly without
+ * invoking the kdb command processor, until a branch instruction
+ * is encountered.
+ */
+
+static int kdb_ss(int argc, const char **argv)
+{
+ int ssb = 0;
+
+ ssb = (strcmp(argv[0], "ssb") == 0);
+ if (argc != 0)
+ return KDB_ARGCOUNT;
+ /*
+ * Set trace flag and go.
+ */
+ KDB_STATE_SET(DOING_SS);
+ if (ssb) {
+ KDB_STATE_SET(DOING_SSB);
+ return KDB_CMD_SSB;
+ }
+ return KDB_CMD_SS;
+}
+
+/* Initialize the breakpoint table and register breakpoint commands. */
+
+void __init kdb_initbptab(void)
+{
+ int i;
+ kdb_bp_t *bp;
+
+ /*
+ * First time initialization.
+ */
+ memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints));
+
+ for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
+ bp->bp_free = 1;
+
+ kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
+ "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
+ "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+ if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
+ kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
+ "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("bc", kdb_bc, "<bpnum>",
+ "Clear Breakpoint", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("be", kdb_bc, "<bpnum>",
+ "Enable Breakpoint", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bd", kdb_bc, "<bpnum>",
+ "Disable Breakpoint", 0, KDB_REPEAT_NONE);
+
+ kdb_register_repeat("ss", kdb_ss, "",
+ "Single Step", 1, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("ssb", kdb_ss, "",
+ "Single step to branch/call", 0, KDB_REPEAT_NO_ARGS);
+ /*
+ * Architecture dependent initialization.
+ */
+}
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
new file mode 100644
index 000000000000..2f62fe85f16a
--- /dev/null
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -0,0 +1,210 @@
+/*
+ * Kernel Debugger Architecture Independent Stack Traceback
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kdb.h>
+#include <linux/nmi.h>
+#include <asm/system.h>
+#include "kdb_private.h"
+
+
+static void kdb_show_stack(struct task_struct *p, void *addr)
+{
+ int old_lvl = console_loglevel;
+ console_loglevel = 15;
+ kdb_trap_printk++;
+ kdb_set_current_task(p);
+ if (addr) {
+ show_stack((struct task_struct *)p, addr);
+ } else if (kdb_current_regs) {
+#ifdef CONFIG_X86
+ show_stack(p, &kdb_current_regs->sp);
+#else
+ show_stack(p, NULL);
+#endif
+ } else {
+ show_stack(p, NULL);
+ }
+ console_loglevel = old_lvl;
+ kdb_trap_printk--;
+}
+
+/*
+ * kdb_bt
+ *
+ * This function implements the 'bt' command. Print a stack
+ * traceback.
+ *
+ * bt [<address-expression>] (addr-exp is for alternate stacks)
+ * btp <pid> Kernel stack for <pid>
+ * btt <address-expression> Kernel stack for task structure at
+ * <address-expression>
+ * bta [DRSTCZEUIMA] All useful processes, optionally
+ * filtered by state
+ * btc [<cpu>] The current process on one cpu,
+ * default is all cpus
+ *
+ * bt <address-expression> refers to a address on the stack, that location
+ * is assumed to contain a return address.
+ *
+ * btt <address-expression> refers to the address of a struct task.
+ *
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Outputs:
+ * None.
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ * Locking:
+ * none.
+ * Remarks:
+ * Backtrack works best when the code uses frame pointers. But even
+ * without frame pointers we should get a reasonable trace.
+ *
+ * mds comes in handy when examining the stack to do a manual traceback or
+ * to get a starting point for bt <address-expression>.
+ */
+
+static int
+kdb_bt1(struct task_struct *p, unsigned long mask,
+ int argcount, int btaprompt)
+{
+ char buffer[2];
+ if (kdb_getarea(buffer[0], (unsigned long)p) ||
+ kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
+ return KDB_BADADDR;
+ if (!kdb_task_state(p, mask))
+ return 0;
+ kdb_printf("Stack traceback for pid %d\n", p->pid);
+ kdb_ps1(p);
+ kdb_show_stack(p, NULL);
+ if (btaprompt) {
+ kdb_getstr(buffer, sizeof(buffer),
+ "Enter <q> to end, <cr> to continue:");
+ if (buffer[0] == 'q') {
+ kdb_printf("\n");
+ return 1;
+ }
+ }
+ touch_nmi_watchdog();
+ return 0;
+}
+
+int
+kdb_bt(int argc, const char **argv)
+{
+ int diag;
+ int argcount = 5;
+ int btaprompt = 1;
+ int nextarg;
+ unsigned long addr;
+ long offset;
+
+ kdbgetintenv("BTARGS", &argcount); /* Arguments to print */
+ kdbgetintenv("BTAPROMPT", &btaprompt); /* Prompt after each
+ * proc in bta */
+
+ if (strcmp(argv[0], "bta") == 0) {
+ struct task_struct *g, *p;
+ unsigned long cpu;
+ unsigned long mask = kdb_task_state_string(argc ? argv[1] :
+ NULL);
+ if (argc == 0)
+ kdb_ps_suppressed();
+ /* Run the active tasks first */
+ for_each_online_cpu(cpu) {
+ p = kdb_curr_task(cpu);
+ if (kdb_bt1(p, mask, argcount, btaprompt))
+ return 0;
+ }
+ /* Now the inactive tasks */
+ kdb_do_each_thread(g, p) {
+ if (task_curr(p))
+ continue;
+ if (kdb_bt1(p, mask, argcount, btaprompt))
+ return 0;
+ } kdb_while_each_thread(g, p);
+ } else if (strcmp(argv[0], "btp") == 0) {
+ struct task_struct *p;
+ unsigned long pid;
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+ diag = kdbgetularg((char *)argv[1], &pid);
+ if (diag)
+ return diag;
+ p = find_task_by_pid_ns(pid, &init_pid_ns);
+ if (p) {
+ kdb_set_current_task(p);
+ return kdb_bt1(p, ~0UL, argcount, 0);
+ }
+ kdb_printf("No process with pid == %ld found\n", pid);
+ return 0;
+ } else if (strcmp(argv[0], "btt") == 0) {
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+ diag = kdbgetularg((char *)argv[1], &addr);
+ if (diag)
+ return diag;
+ kdb_set_current_task((struct task_struct *)addr);
+ return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0);
+ } else if (strcmp(argv[0], "btc") == 0) {
+ unsigned long cpu = ~0;
+ struct task_struct *save_current_task = kdb_current_task;
+ char buf[80];
+ if (argc > 1)
+ return KDB_ARGCOUNT;
+ if (argc == 1) {
+ diag = kdbgetularg((char *)argv[1], &cpu);
+ if (diag)
+ return diag;
+ }
+ /* Recursive use of kdb_parse, do not use argv after
+ * this point */
+ argv = NULL;
+ if (cpu != ~0) {
+ if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
+ kdb_printf("no process for cpu %ld\n", cpu);
+ return 0;
+ }
+ sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+ kdb_parse(buf);
+ return 0;
+ }
+ kdb_printf("btc: cpu status: ");
+ kdb_parse("cpu\n");
+ for_each_online_cpu(cpu) {
+ sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+ kdb_parse(buf);
+ touch_nmi_watchdog();
+ }
+ kdb_set_current_task(save_current_task);
+ return 0;
+ } else {
+ if (argc) {
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
+ &offset, NULL);
+ if (diag)
+ return diag;
+ kdb_show_stack(kdb_current_task, (void *)addr);
+ return 0;
+ } else {
+ return kdb_bt1(kdb_current_task, ~0UL, argcount, 0);
+ }
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/kernel/debug/kdb/kdb_cmds b/kernel/debug/kdb/kdb_cmds
new file mode 100644
index 000000000000..56c88e4db309
--- /dev/null
+++ b/kernel/debug/kdb/kdb_cmds
@@ -0,0 +1,35 @@
+# Initial commands for kdb, alter to suit your needs.
+# These commands are executed in kdb_init() context, no SMP, no
+# processes. Commands that require process data (including stack or
+# registers) are not reliable this early. set and bp commands should
+# be safe. Global breakpoint commands affect each cpu as it is booted.
+
+# Standard debugging information for first level support, just type archkdb
+# or archkdbcpu or archkdbshort at the kdb prompt.
+
+defcmd dumpcommon "" "Common kdb debugging"
+ set BTAPROMPT 0
+ set LINES 10000
+ -summary
+ -cpu
+ -ps
+ -dmesg 600
+ -bt
+endefcmd
+
+defcmd dumpall "" "First line debugging"
+ set BTSYMARG 1
+ set BTARGS 9
+ pid R
+ -dumpcommon
+ -bta
+endefcmd
+
+defcmd dumpcpu "" "Same as dumpall but only tasks on cpus"
+ set BTSYMARG 1
+ set BTARGS 9
+ pid R
+ -dumpcommon
+ -btc
+endefcmd
+
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
new file mode 100644
index 000000000000..bf6e8270e957
--- /dev/null
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -0,0 +1,169 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/kdebug.h>
+#include "kdb_private.h"
+#include "../debug_core.h"
+
+/*
+ * KDB interface to KGDB internals
+ */
+get_char_func kdb_poll_funcs[] = {
+ dbg_io_get_char,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(kdb_poll_funcs);
+
+int kdb_poll_idx = 1;
+EXPORT_SYMBOL_GPL(kdb_poll_idx);
+
+int kdb_stub(struct kgdb_state *ks)
+{
+ int error = 0;
+ kdb_bp_t *bp;
+ unsigned long addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+ kdb_reason_t reason = KDB_REASON_OOPS;
+ kdb_dbtrap_t db_result = KDB_DB_NOBPT;
+ int i;
+
+ if (KDB_STATE(REENTRY)) {
+ reason = KDB_REASON_SWITCH;
+ KDB_STATE_CLEAR(REENTRY);
+ addr = instruction_pointer(ks->linux_regs);
+ }
+ ks->pass_exception = 0;
+ if (atomic_read(&kgdb_setting_breakpoint))
+ reason = KDB_REASON_KEYBOARD;
+
+ for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
+ if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
+ reason = KDB_REASON_BREAK;
+ db_result = KDB_DB_BPT;
+ if (addr != instruction_pointer(ks->linux_regs))
+ kgdb_arch_set_pc(ks->linux_regs, addr);
+ break;
+ }
+ }
+ if (reason == KDB_REASON_BREAK || reason == KDB_REASON_SWITCH) {
+ for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
+ if (bp->bp_free)
+ continue;
+ if (bp->bp_addr == addr) {
+ bp->bp_delay = 1;
+ bp->bp_delayed = 1;
+ /*
+ * SSBPT is set when the kernel debugger must single step a
+ * task in order to re-establish an instruction breakpoint
+ * which uses the instruction replacement mechanism. It is
+ * cleared by any action that removes the need to single-step
+ * the breakpoint.
+ */
+ reason = KDB_REASON_BREAK;
+ db_result = KDB_DB_BPT;
+ KDB_STATE_SET(SSBPT);
+ break;
+ }
+ }
+ }
+
+ if (reason != KDB_REASON_BREAK && ks->ex_vector == 0 &&
+ ks->signo == SIGTRAP) {
+ reason = KDB_REASON_SSTEP;
+ db_result = KDB_DB_BPT;
+ }
+ /* Set initial kdb state variables */
+ KDB_STATE_CLEAR(KGDB_TRANS);
+ kdb_initial_cpu = ks->cpu;
+ kdb_current_task = kgdb_info[ks->cpu].task;
+ kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+ /* Remove any breakpoints as needed by kdb and clear single step */
+ kdb_bp_remove();
+ KDB_STATE_CLEAR(DOING_SS);
+ KDB_STATE_CLEAR(DOING_SSB);
+ KDB_STATE_SET(PAGER);
+ /* zero out any offline cpu data */
+ for_each_present_cpu(i) {
+ if (!cpu_online(i)) {
+ kgdb_info[i].debuggerinfo = NULL;
+ kgdb_info[i].task = NULL;
+ }
+ }
+ if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
+ ks->pass_exception = 1;
+ KDB_FLAG_SET(CATASTROPHIC);
+ }
+ kdb_initial_cpu = ks->cpu;
+ if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
+ KDB_STATE_CLEAR(SSBPT);
+ KDB_STATE_CLEAR(DOING_SS);
+ } else {
+ /* Start kdb main loop */
+ error = kdb_main_loop(KDB_REASON_ENTER, reason,
+ ks->err_code, db_result, ks->linux_regs);
+ }
+ /*
+ * Upon exit from the kdb main loop setup break points and restart
+ * the system based on the requested continue state
+ */
+ kdb_initial_cpu = -1;
+ kdb_current_task = NULL;
+ kdb_current_regs = NULL;
+ KDB_STATE_CLEAR(PAGER);
+ kdbnearsym_cleanup();
+ if (error == KDB_CMD_KGDB) {
+ if (KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)) {
+ /*
+ * This inteface glue which allows kdb to transition in into
+ * the gdb stub. In order to do this the '?' or '' gdb serial
+ * packet response is processed here. And then control is
+ * passed to the gdbstub.
+ */
+ if (KDB_STATE(DOING_KGDB))
+ gdbstub_state(ks, "?");
+ else
+ gdbstub_state(ks, "");
+ KDB_STATE_CLEAR(DOING_KGDB);
+ KDB_STATE_CLEAR(DOING_KGDB2);
+ }
+ return DBG_PASS_EVENT;
+ }
+ kdb_bp_install(ks->linux_regs);
+ dbg_activate_sw_breakpoints();
+ /* Set the exit state to a single step or a continue */
+ if (KDB_STATE(DOING_SS))
+ gdbstub_state(ks, "s");
+ else
+ gdbstub_state(ks, "c");
+
+ KDB_FLAG_CLEAR(CATASTROPHIC);
+
+ /* Invoke arch specific exception handling prior to system resume */
+ kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e");
+ if (ks->pass_exception)
+ kgdb_info[ks->cpu].ret_state = 1;
+ if (error == KDB_CMD_CPU) {
+ KDB_STATE_SET(REENTRY);
+ /*
+ * Force clear the single step bit because kdb emulates this
+ * differently vs the gdbstub
+ */
+ kgdb_single_step = 0;
+ dbg_deactivate_sw_breakpoints();
+ return DBG_SWITCH_CPU_EVENT;
+ }
+ return kgdb_info[ks->cpu].ret_state;
+}
+
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
new file mode 100644
index 000000000000..c9b7f4f90bba
--- /dev/null
+++ b/kernel/debug/kdb/kdb_io.c
@@ -0,0 +1,826 @@
+/*
+ * Kernel Debugger Architecture Independent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/kallsyms.h>
+#include "kdb_private.h"
+
+#define CMD_BUFLEN 256
+char kdb_prompt_str[CMD_BUFLEN];
+
+int kdb_trap_printk;
+
+static void kgdb_transition_check(char *buffer)
+{
+ int slen = strlen(buffer);
+ if (strncmp(buffer, "$?#3f", slen) != 0 &&
+ strncmp(buffer, "$qSupported#37", slen) != 0 &&
+ strncmp(buffer, "+$qSupported#37", slen) != 0) {
+ KDB_STATE_SET(KGDB_TRANS);
+ kdb_printf("%s", buffer);
+ }
+}
+
+static int kdb_read_get_key(char *buffer, size_t bufsize)
+{
+#define ESCAPE_UDELAY 1000
+#define ESCAPE_DELAY (2*1000000/ESCAPE_UDELAY) /* 2 seconds worth of udelays */
+ char escape_data[5]; /* longest vt100 escape sequence is 4 bytes */
+ char *ped = escape_data;
+ int escape_delay = 0;
+ get_char_func *f, *f_escape = NULL;
+ int key;
+
+ for (f = &kdb_poll_funcs[0]; ; ++f) {
+ if (*f == NULL) {
+ /* Reset NMI watchdog once per poll loop */
+ touch_nmi_watchdog();
+ f = &kdb_poll_funcs[0];
+ }
+ if (escape_delay == 2) {
+ *ped = '\0';
+ ped = escape_data;
+ --escape_delay;
+ }
+ if (escape_delay == 1) {
+ key = *ped++;
+ if (!*ped)
+ --escape_delay;
+ break;
+ }
+ key = (*f)();
+ if (key == -1) {
+ if (escape_delay) {
+ udelay(ESCAPE_UDELAY);
+ --escape_delay;
+ }
+ continue;
+ }
+ if (bufsize <= 2) {
+ if (key == '\r')
+ key = '\n';
+ *buffer++ = key;
+ *buffer = '\0';
+ return -1;
+ }
+ if (escape_delay == 0 && key == '\e') {
+ escape_delay = ESCAPE_DELAY;
+ ped = escape_data;
+ f_escape = f;
+ }
+ if (escape_delay) {
+ *ped++ = key;
+ if (f_escape != f) {
+ escape_delay = 2;
+ continue;
+ }
+ if (ped - escape_data == 1) {
+ /* \e */
+ continue;
+ } else if (ped - escape_data == 2) {
+ /* \e<something> */
+ if (key != '[')
+ escape_delay = 2;
+ continue;
+ } else if (ped - escape_data == 3) {
+ /* \e[<something> */
+ int mapkey = 0;
+ switch (key) {
+ case 'A': /* \e[A, up arrow */
+ mapkey = 16;
+ break;
+ case 'B': /* \e[B, down arrow */
+ mapkey = 14;
+ break;
+ case 'C': /* \e[C, right arrow */
+ mapkey = 6;
+ break;
+ case 'D': /* \e[D, left arrow */
+ mapkey = 2;
+ break;
+ case '1': /* dropthrough */
+ case '3': /* dropthrough */
+ /* \e[<1,3,4>], may be home, del, end */
+ case '4':
+ mapkey = -1;
+ break;
+ }
+ if (mapkey != -1) {
+ if (mapkey > 0) {
+ escape_data[0] = mapkey;
+ escape_data[1] = '\0';
+ }
+ escape_delay = 2;
+ }
+ continue;
+ } else if (ped - escape_data == 4) {
+ /* \e[<1,3,4><something> */
+ int mapkey = 0;
+ if (key == '~') {
+ switch (escape_data[2]) {
+ case '1': /* \e[1~, home */
+ mapkey = 1;
+ break;
+ case '3': /* \e[3~, del */
+ mapkey = 4;
+ break;
+ case '4': /* \e[4~, end */
+ mapkey = 5;
+ break;
+ }
+ }
+ if (mapkey > 0) {
+ escape_data[0] = mapkey;
+ escape_data[1] = '\0';
+ }
+ escape_delay = 2;
+ continue;
+ }
+ }
+ break; /* A key to process */
+ }
+ return key;
+}
+
+/*
+ * kdb_read
+ *
+ * This function reads a string of characters, terminated by
+ * a newline, or by reaching the end of the supplied buffer,
+ * from the current kernel debugger console device.
+ * Parameters:
+ * buffer - Address of character buffer to receive input characters.
+ * bufsize - size, in bytes, of the character buffer
+ * Returns:
+ * Returns a pointer to the buffer containing the received
+ * character string. This string will be terminated by a
+ * newline character.
+ * Locking:
+ * No locks are required to be held upon entry to this
+ * function. It is not reentrant - it relies on the fact
+ * that while kdb is running on only one "master debug" cpu.
+ * Remarks:
+ *
+ * The buffer size must be >= 2. A buffer size of 2 means that the caller only
+ * wants a single key.
+ *
+ * An escape key could be the start of a vt100 control sequence such as \e[D
+ * (left arrow) or it could be a character in its own right. The standard
+ * method for detecting the difference is to wait for 2 seconds to see if there
+ * are any other characters. kdb is complicated by the lack of a timer service
+ * (interrupts are off), by multiple input sources and by the need to sometimes
+ * return after just one key. Escape sequence processing has to be done as
+ * states in the polling loop.
+ */
+
+static char *kdb_read(char *buffer, size_t bufsize)
+{
+ char *cp = buffer;
+ char *bufend = buffer+bufsize-2; /* Reserve space for newline
+ * and null byte */
+ char *lastchar;
+ char *p_tmp;
+ char tmp;
+ static char tmpbuffer[CMD_BUFLEN];
+ int len = strlen(buffer);
+ int len_tmp;
+ int tab = 0;
+ int count;
+ int i;
+ int diag, dtab_count;
+ int key;
+
+
+ diag = kdbgetintenv("DTABCOUNT", &dtab_count);
+ if (diag)
+ dtab_count = 30;
+
+ if (len > 0) {
+ cp += len;
+ if (*(buffer+len-1) == '\n')
+ cp--;
+ }
+
+ lastchar = cp;
+ *cp = '\0';
+ kdb_printf("%s", buffer);
+poll_again:
+ key = kdb_read_get_key(buffer, bufsize);
+ if (key == -1)
+ return buffer;
+ if (key != 9)
+ tab = 0;
+ switch (key) {
+ case 8: /* backspace */
+ if (cp > buffer) {
+ if (cp < lastchar) {
+ memcpy(tmpbuffer, cp, lastchar - cp);
+ memcpy(cp-1, tmpbuffer, lastchar - cp);
+ }
+ *(--lastchar) = '\0';
+ --cp;
+ kdb_printf("\b%s \r", cp);
+ tmp = *cp;
+ *cp = '\0';
+ kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", buffer);
+ *cp = tmp;
+ }
+ break;
+ case 13: /* enter */
+ *lastchar++ = '\n';
+ *lastchar++ = '\0';
+ kdb_printf("\n");
+ return buffer;
+ case 4: /* Del */
+ if (cp < lastchar) {
+ memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
+ memcpy(cp, tmpbuffer, lastchar - cp - 1);
+ *(--lastchar) = '\0';
+ kdb_printf("%s \r", cp);
+ tmp = *cp;
+ *cp = '\0';
+ kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", buffer);
+ *cp = tmp;
+ }
+ break;
+ case 1: /* Home */
+ if (cp > buffer) {
+ kdb_printf("\r");
+ kdb_printf(kdb_prompt_str);
+ cp = buffer;
+ }
+ break;
+ case 5: /* End */
+ if (cp < lastchar) {
+ kdb_printf("%s", cp);
+ cp = lastchar;
+ }
+ break;
+ case 2: /* Left */
+ if (cp > buffer) {
+ kdb_printf("\b");
+ --cp;
+ }
+ break;
+ case 14: /* Down */
+ memset(tmpbuffer, ' ',
+ strlen(kdb_prompt_str) + (lastchar-buffer));
+ *(tmpbuffer+strlen(kdb_prompt_str) +
+ (lastchar-buffer)) = '\0';
+ kdb_printf("\r%s\r", tmpbuffer);
+ *lastchar = (char)key;
+ *(lastchar+1) = '\0';
+ return lastchar;
+ case 6: /* Right */
+ if (cp < lastchar) {
+ kdb_printf("%c", *cp);
+ ++cp;
+ }
+ break;
+ case 16: /* Up */
+ memset(tmpbuffer, ' ',
+ strlen(kdb_prompt_str) + (lastchar-buffer));
+ *(tmpbuffer+strlen(kdb_prompt_str) +
+ (lastchar-buffer)) = '\0';
+ kdb_printf("\r%s\r", tmpbuffer);
+ *lastchar = (char)key;
+ *(lastchar+1) = '\0';
+ return lastchar;
+ case 9: /* Tab */
+ if (tab < 2)
+ ++tab;
+ p_tmp = buffer;
+ while (*p_tmp == ' ')
+ p_tmp++;
+ if (p_tmp > cp)
+ break;
+ memcpy(tmpbuffer, p_tmp, cp-p_tmp);
+ *(tmpbuffer + (cp-p_tmp)) = '\0';
+ p_tmp = strrchr(tmpbuffer, ' ');
+ if (p_tmp)
+ ++p_tmp;
+ else
+ p_tmp = tmpbuffer;
+ len = strlen(p_tmp);
+ count = kallsyms_symbol_complete(p_tmp,
+ sizeof(tmpbuffer) -
+ (p_tmp - tmpbuffer));
+ if (tab == 2 && count > 0) {
+ kdb_printf("\n%d symbols are found.", count);
+ if (count > dtab_count) {
+ count = dtab_count;
+ kdb_printf(" But only first %d symbols will"
+ " be printed.\nYou can change the"
+ " environment variable DTABCOUNT.",
+ count);
+ }
+ kdb_printf("\n");
+ for (i = 0; i < count; i++) {
+ if (kallsyms_symbol_next(p_tmp, i) < 0)
+ break;
+ kdb_printf("%s ", p_tmp);
+ *(p_tmp + len) = '\0';
+ }
+ if (i >= dtab_count)
+ kdb_printf("...");
+ kdb_printf("\n");
+ kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", buffer);
+ } else if (tab != 2 && count > 0) {
+ len_tmp = strlen(p_tmp);
+ strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
+ len_tmp = strlen(p_tmp);
+ strncpy(cp, p_tmp+len, len_tmp-len + 1);
+ len = len_tmp - len;
+ kdb_printf("%s", cp);
+ cp += len;
+ lastchar += len;
+ }
+ kdb_nextline = 1; /* reset output line number */
+ break;
+ default:
+ if (key >= 32 && lastchar < bufend) {
+ if (cp < lastchar) {
+ memcpy(tmpbuffer, cp, lastchar - cp);
+ memcpy(cp+1, tmpbuffer, lastchar - cp);
+ *++lastchar = '\0';
+ *cp = key;
+ kdb_printf("%s\r", cp);
+ ++cp;
+ tmp = *cp;
+ *cp = '\0';
+ kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", buffer);
+ *cp = tmp;
+ } else {
+ *++lastchar = '\0';
+ *cp++ = key;
+ /* The kgdb transition check will hide
+ * printed characters if we think that
+ * kgdb is connecting, until the check
+ * fails */
+ if (!KDB_STATE(KGDB_TRANS))
+ kgdb_transition_check(buffer);
+ else
+ kdb_printf("%c", key);
+ }
+ /* Special escape to kgdb */
+ if (lastchar - buffer >= 5 &&
+ strcmp(lastchar - 5, "$?#3f") == 0) {
+ strcpy(buffer, "kgdb");
+ KDB_STATE_SET(DOING_KGDB);
+ return buffer;
+ }
+ if (lastchar - buffer >= 14 &&
+ strcmp(lastchar - 14, "$qSupported#37") == 0) {
+ strcpy(buffer, "kgdb");
+ KDB_STATE_SET(DOING_KGDB2);
+ return buffer;
+ }
+ }
+ break;
+ }
+ goto poll_again;
+}
+
+/*
+ * kdb_getstr
+ *
+ * Print the prompt string and read a command from the
+ * input device.
+ *
+ * Parameters:
+ * buffer Address of buffer to receive command
+ * bufsize Size of buffer in bytes
+ * prompt Pointer to string to use as prompt string
+ * Returns:
+ * Pointer to command buffer.
+ * Locking:
+ * None.
+ * Remarks:
+ * For SMP kernels, the processor number will be
+ * substituted for %d, %x or %o in the prompt.
+ */
+
+char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
+{
+ if (prompt && kdb_prompt_str != prompt)
+ strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
+ kdb_printf(kdb_prompt_str);
+ kdb_nextline = 1; /* Prompt and input resets line number */
+ return kdb_read(buffer, bufsize);
+}
+
+/*
+ * kdb_input_flush
+ *
+ * Get rid of any buffered console input.
+ *
+ * Parameters:
+ * none
+ * Returns:
+ * nothing
+ * Locking:
+ * none
+ * Remarks:
+ * Call this function whenever you want to flush input. If there is any
+ * outstanding input, it ignores all characters until there has been no
+ * data for approximately 1ms.
+ */
+
+static void kdb_input_flush(void)
+{
+ get_char_func *f;
+ int res;
+ int flush_delay = 1;
+ while (flush_delay) {
+ flush_delay--;
+empty:
+ touch_nmi_watchdog();
+ for (f = &kdb_poll_funcs[0]; *f; ++f) {
+ res = (*f)();
+ if (res != -1) {
+ flush_delay = 1;
+ goto empty;
+ }
+ }
+ if (flush_delay)
+ mdelay(1);
+ }
+}
+
+/*
+ * kdb_printf
+ *
+ * Print a string to the output device(s).
+ *
+ * Parameters:
+ * printf-like format and optional args.
+ * Returns:
+ * 0
+ * Locking:
+ * None.
+ * Remarks:
+ * use 'kdbcons->write()' to avoid polluting 'log_buf' with
+ * kdb output.
+ *
+ * If the user is doing a cmd args | grep srch
+ * then kdb_grepping_flag is set.
+ * In that case we need to accumulate full lines (ending in \n) before
+ * searching for the pattern.
+ */
+
+static char kdb_buffer[256]; /* A bit too big to go on stack */
+static char *next_avail = kdb_buffer;
+static int size_avail;
+static int suspend_grep;
+
+/*
+ * search arg1 to see if it contains arg2
+ * (kdmain.c provides flags for ^pat and pat$)
+ *
+ * return 1 for found, 0 for not found
+ */
+static int kdb_search_string(char *searched, char *searchfor)
+{
+ char firstchar, *cp;
+ int len1, len2;
+
+ /* not counting the newline at the end of "searched" */
+ len1 = strlen(searched)-1;
+ len2 = strlen(searchfor);
+ if (len1 < len2)
+ return 0;
+ if (kdb_grep_leading && kdb_grep_trailing && len1 != len2)
+ return 0;
+ if (kdb_grep_leading) {
+ if (!strncmp(searched, searchfor, len2))
+ return 1;
+ } else if (kdb_grep_trailing) {
+ if (!strncmp(searched+len1-len2, searchfor, len2))
+ return 1;
+ } else {
+ firstchar = *searchfor;
+ cp = searched;
+ while ((cp = strchr(cp, firstchar))) {
+ if (!strncmp(cp, searchfor, len2))
+ return 1;
+ cp++;
+ }
+ }
+ return 0;
+}
+
+int vkdb_printf(const char *fmt, va_list ap)
+{
+ int diag;
+ int linecount;
+ int logging, saved_loglevel = 0;
+ int saved_trap_printk;
+ int got_printf_lock = 0;
+ int retlen = 0;
+ int fnd, len;
+ char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
+ char *moreprompt = "more> ";
+ struct console *c = console_drivers;
+ static DEFINE_SPINLOCK(kdb_printf_lock);
+ unsigned long uninitialized_var(flags);
+
+ preempt_disable();
+ saved_trap_printk = kdb_trap_printk;
+ kdb_trap_printk = 0;
+
+ /* Serialize kdb_printf if multiple cpus try to write at once.
+ * But if any cpu goes recursive in kdb, just print the output,
+ * even if it is interleaved with any other text.
+ */
+ if (!KDB_STATE(PRINTF_LOCK)) {
+ KDB_STATE_SET(PRINTF_LOCK);
+ spin_lock_irqsave(&kdb_printf_lock, flags);
+ got_printf_lock = 1;
+ atomic_inc(&kdb_event);
+ } else {
+ __acquire(kdb_printf_lock);
+ }
+
+ diag = kdbgetintenv("LINES", &linecount);
+ if (diag || linecount <= 1)
+ linecount = 24;
+
+ diag = kdbgetintenv("LOGGING", &logging);
+ if (diag)
+ logging = 0;
+
+ if (!kdb_grepping_flag || suspend_grep) {
+ /* normally, every vsnprintf starts a new buffer */
+ next_avail = kdb_buffer;
+ size_avail = sizeof(kdb_buffer);
+ }
+ vsnprintf(next_avail, size_avail, fmt, ap);
+
+ /*
+ * If kdb_parse() found that the command was cmd xxx | grep yyy
+ * then kdb_grepping_flag is set, and kdb_grep_string contains yyy
+ *
+ * Accumulate the print data up to a newline before searching it.
+ * (vsnprintf does null-terminate the string that it generates)
+ */
+
+ /* skip the search if prints are temporarily unconditional */
+ if (!suspend_grep && kdb_grepping_flag) {
+ cp = strchr(kdb_buffer, '\n');
+ if (!cp) {
+ /*
+ * Special cases that don't end with newlines
+ * but should be written without one:
+ * The "[nn]kdb> " prompt should
+ * appear at the front of the buffer.
+ *
+ * The "[nn]more " prompt should also be
+ * (MOREPROMPT -> moreprompt)
+ * written * but we print that ourselves,
+ * we set the suspend_grep flag to make
+ * it unconditional.
+ *
+ */
+ if (next_avail == kdb_buffer) {
+ /*
+ * these should occur after a newline,
+ * so they will be at the front of the
+ * buffer
+ */
+ cp2 = kdb_buffer;
+ len = strlen(kdb_prompt_str);
+ if (!strncmp(cp2, kdb_prompt_str, len)) {
+ /*
+ * We're about to start a new
+ * command, so we can go back
+ * to normal mode.
+ */
+ kdb_grepping_flag = 0;
+ goto kdb_printit;
+ }
+ }
+ /* no newline; don't search/write the buffer
+ until one is there */
+ len = strlen(kdb_buffer);
+ next_avail = kdb_buffer + len;
+ size_avail = sizeof(kdb_buffer) - len;
+ goto kdb_print_out;
+ }
+
+ /*
+ * The newline is present; print through it or discard
+ * it, depending on the results of the search.
+ */
+ cp++; /* to byte after the newline */
+ replaced_byte = *cp; /* remember what/where it was */
+ cphold = cp;
+ *cp = '\0'; /* end the string for our search */
+
+ /*
+ * We now have a newline at the end of the string
+ * Only continue with this output if it contains the
+ * search string.
+ */
+ fnd = kdb_search_string(kdb_buffer, kdb_grep_string);
+ if (!fnd) {
+ /*
+ * At this point the complete line at the start
+ * of kdb_buffer can be discarded, as it does
+ * not contain what the user is looking for.
+ * Shift the buffer left.
+ */
+ *cphold = replaced_byte;
+ strcpy(kdb_buffer, cphold);
+ len = strlen(kdb_buffer);
+ next_avail = kdb_buffer + len;
+ size_avail = sizeof(kdb_buffer) - len;
+ goto kdb_print_out;
+ }
+ /*
+ * at this point the string is a full line and
+ * should be printed, up to the null.
+ */
+ }
+kdb_printit:
+
+ /*
+ * Write to all consoles.
+ */
+ retlen = strlen(kdb_buffer);
+ if (!dbg_kdb_mode && kgdb_connected) {
+ gdbstub_msg_write(kdb_buffer, retlen);
+ } else {
+ if (!dbg_io_ops->is_console) {
+ len = strlen(kdb_buffer);
+ cp = kdb_buffer;
+ while (len--) {
+ dbg_io_ops->write_char(*cp);
+ cp++;
+ }
+ }
+ while (c) {
+ c->write(c, kdb_buffer, retlen);
+ touch_nmi_watchdog();
+ c = c->next;
+ }
+ }
+ if (logging) {
+ saved_loglevel = console_loglevel;
+ console_loglevel = 0;
+ printk(KERN_INFO "%s", kdb_buffer);
+ }
+
+ if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n'))
+ kdb_nextline++;
+
+ /* check for having reached the LINES number of printed lines */
+ if (kdb_nextline == linecount) {
+ char buf1[16] = "";
+#if defined(CONFIG_SMP)
+ char buf2[32];
+#endif
+
+ /* Watch out for recursion here. Any routine that calls
+ * kdb_printf will come back through here. And kdb_read
+ * uses kdb_printf to echo on serial consoles ...
+ */
+ kdb_nextline = 1; /* In case of recursion */
+
+ /*
+ * Pause until cr.
+ */
+ moreprompt = kdbgetenv("MOREPROMPT");
+ if (moreprompt == NULL)
+ moreprompt = "more> ";
+
+#if defined(CONFIG_SMP)
+ if (strchr(moreprompt, '%')) {
+ sprintf(buf2, moreprompt, get_cpu());
+ put_cpu();
+ moreprompt = buf2;
+ }
+#endif
+
+ kdb_input_flush();
+ c = console_drivers;
+
+ if (!dbg_io_ops->is_console) {
+ len = strlen(moreprompt);
+ cp = moreprompt;
+ while (len--) {
+ dbg_io_ops->write_char(*cp);
+ cp++;
+ }
+ }
+ while (c) {
+ c->write(c, moreprompt, strlen(moreprompt));
+ touch_nmi_watchdog();
+ c = c->next;
+ }
+
+ if (logging)
+ printk("%s", moreprompt);
+
+ kdb_read(buf1, 2); /* '2' indicates to return
+ * immediately after getting one key. */
+ kdb_nextline = 1; /* Really set output line 1 */
+
+ /* empty and reset the buffer: */
+ kdb_buffer[0] = '\0';
+ next_avail = kdb_buffer;
+ size_avail = sizeof(kdb_buffer);
+ if ((buf1[0] == 'q') || (buf1[0] == 'Q')) {
+ /* user hit q or Q */
+ KDB_FLAG_SET(CMD_INTERRUPT); /* command interrupted */
+ KDB_STATE_CLEAR(PAGER);
+ /* end of command output; back to normal mode */
+ kdb_grepping_flag = 0;
+ kdb_printf("\n");
+ } else if (buf1[0] == ' ') {
+ kdb_printf("\n");
+ suspend_grep = 1; /* for this recursion */
+ } else if (buf1[0] == '\n') {
+ kdb_nextline = linecount - 1;
+ kdb_printf("\r");
+ suspend_grep = 1; /* for this recursion */
+ } else if (buf1[0] && buf1[0] != '\n') {
+ /* user hit something other than enter */
+ suspend_grep = 1; /* for this recursion */
+ kdb_printf("\nOnly 'q' or 'Q' are processed at more "
+ "prompt, input ignored\n");
+ } else if (kdb_grepping_flag) {
+ /* user hit enter */
+ suspend_grep = 1; /* for this recursion */
+ kdb_printf("\n");
+ }
+ kdb_input_flush();
+ }
+
+ /*
+ * For grep searches, shift the printed string left.
+ * replaced_byte contains the character that was overwritten with
+ * the terminating null, and cphold points to the null.
+ * Then adjust the notion of available space in the buffer.
+ */
+ if (kdb_grepping_flag && !suspend_grep) {
+ *cphold = replaced_byte;
+ strcpy(kdb_buffer, cphold);
+ len = strlen(kdb_buffer);
+ next_avail = kdb_buffer + len;
+ size_avail = sizeof(kdb_buffer) - len;
+ }
+
+kdb_print_out:
+ suspend_grep = 0; /* end of what may have been a recursive call */
+ if (logging)
+ console_loglevel = saved_loglevel;
+ if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) {
+ got_printf_lock = 0;
+ spin_unlock_irqrestore(&kdb_printf_lock, flags);
+ KDB_STATE_CLEAR(PRINTF_LOCK);
+ atomic_dec(&kdb_event);
+ } else {
+ __release(kdb_printf_lock);
+ }
+ kdb_trap_printk = saved_trap_printk;
+ preempt_enable();
+ return retlen;
+}
+
+int kdb_printf(const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vkdb_printf(fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c
new file mode 100644
index 000000000000..4bca634975c0
--- /dev/null
+++ b/kernel/debug/kdb/kdb_keyboard.c
@@ -0,0 +1,212 @@
+/*
+ * Kernel Debugger Architecture Dependent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/kdb.h>
+#include <linux/keyboard.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+/* Keyboard Controller Registers on normal PCs. */
+
+#define KBD_STATUS_REG 0x64 /* Status register (R) */
+#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */
+
+/* Status Register Bits */
+
+#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */
+#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */
+
+static int kbd_exists;
+
+/*
+ * Check if the keyboard controller has a keypress for us.
+ * Some parts (Enter Release, LED change) are still blocking polled here,
+ * but hopefully they are all short.
+ */
+int kdb_get_kbd_char(void)
+{
+ int scancode, scanstatus;
+ static int shift_lock; /* CAPS LOCK state (0-off, 1-on) */
+ static int shift_key; /* Shift next keypress */
+ static int ctrl_key;
+ u_short keychar;
+
+ if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) ||
+ (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) {
+ kbd_exists = 0;
+ return -1;
+ }
+ kbd_exists = 1;
+
+ if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+ return -1;
+
+ /*
+ * Fetch the scancode
+ */
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+
+ /*
+ * Ignore mouse events.
+ */
+ if (scanstatus & KBD_STAT_MOUSE_OBF)
+ return -1;
+
+ /*
+ * Ignore release, trigger on make
+ * (except for shift keys, where we want to
+ * keep the shift state so long as the key is
+ * held down).
+ */
+
+ if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) {
+ /*
+ * Next key may use shift table
+ */
+ if ((scancode & 0x80) == 0)
+ shift_key = 1;
+ else
+ shift_key = 0;
+ return -1;
+ }
+
+ if ((scancode&0x7f) == 0x1d) {
+ /*
+ * Left ctrl key
+ */
+ if ((scancode & 0x80) == 0)
+ ctrl_key = 1;
+ else
+ ctrl_key = 0;
+ return -1;
+ }
+
+ if ((scancode & 0x80) != 0)
+ return -1;
+
+ scancode &= 0x7f;
+
+ /*
+ * Translate scancode
+ */
+
+ if (scancode == 0x3a) {
+ /*
+ * Toggle caps lock
+ */
+ shift_lock ^= 1;
+
+#ifdef KDB_BLINK_LED
+ kdb_toggleled(0x4);
+#endif
+ return -1;
+ }
+
+ if (scancode == 0x0e) {
+ /*
+ * Backspace
+ */
+ return 8;
+ }
+
+ /* Special Key */
+ switch (scancode) {
+ case 0xF: /* Tab */
+ return 9;
+ case 0x53: /* Del */
+ return 4;
+ case 0x47: /* Home */
+ return 1;
+ case 0x4F: /* End */
+ return 5;
+ case 0x4B: /* Left */
+ return 2;
+ case 0x48: /* Up */
+ return 16;
+ case 0x50: /* Down */
+ return 14;
+ case 0x4D: /* Right */
+ return 6;
+ }
+
+ if (scancode == 0xe0)
+ return -1;
+
+ /*
+ * For Japanese 86/106 keyboards
+ * See comment in drivers/char/pc_keyb.c.
+ * - Masahiro Adegawa
+ */
+ if (scancode == 0x73)
+ scancode = 0x59;
+ else if (scancode == 0x7d)
+ scancode = 0x7c;
+
+ if (!shift_lock && !shift_key && !ctrl_key) {
+ keychar = plain_map[scancode];
+ } else if ((shift_lock || shift_key) && key_maps[1]) {
+ keychar = key_maps[1][scancode];
+ } else if (ctrl_key && key_maps[4]) {
+ keychar = key_maps[4][scancode];
+ } else {
+ keychar = 0x0020;
+ kdb_printf("Unknown state/scancode (%d)\n", scancode);
+ }
+ keychar &= 0x0fff;
+ if (keychar == '\t')
+ keychar = ' ';
+ switch (KTYP(keychar)) {
+ case KT_LETTER:
+ case KT_LATIN:
+ if (isprint(keychar))
+ break; /* printable characters */
+ /* drop through */
+ case KT_SPEC:
+ if (keychar == K_ENTER)
+ break;
+ /* drop through */
+ default:
+ return -1; /* ignore unprintables */
+ }
+
+ if ((scancode & 0x7f) == 0x1c) {
+ /*
+ * enter key. All done. Absorb the release scancode.
+ */
+ while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+ ;
+
+ /*
+ * Fetch the scancode
+ */
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+
+ while (scanstatus & KBD_STAT_MOUSE_OBF) {
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+ }
+
+ if (scancode != 0x9c) {
+ /*
+ * Wasn't an enter-release, why not?
+ */
+ kdb_printf("kdb: expected enter got 0x%x status 0x%x\n",
+ scancode, scanstatus);
+ }
+
+ return 13;
+ }
+
+ return keychar & 0xff;
+}
+EXPORT_SYMBOL_GPL(kdb_get_kbd_char);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
new file mode 100644
index 000000000000..ec29a9db2406
--- /dev/null
+++ b/kernel/debug/kdb/kdb_main.c
@@ -0,0 +1,2851 @@
+/*
+ * Kernel Debugger Architecture Independent Main Code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Xscale (R) modifications copyright (C) 2003 Intel Corporation.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/smp.h>
+#include <linux/utsname.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nmi.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/sysctl.h>
+#include <linux/cpu.h>
+#include <linux/kdebug.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include "kdb_private.h"
+
+#define GREP_LEN 256
+char kdb_grep_string[GREP_LEN];
+int kdb_grepping_flag;
+EXPORT_SYMBOL(kdb_grepping_flag);
+int kdb_grep_leading;
+int kdb_grep_trailing;
+
+/*
+ * Kernel debugger state flags
+ */
+int kdb_flags;
+atomic_t kdb_event;
+
+/*
+ * kdb_lock protects updates to kdb_initial_cpu. Used to
+ * single thread processors through the kernel debugger.
+ */
+int kdb_initial_cpu = -1; /* cpu number that owns kdb */
+int kdb_nextline = 1;
+int kdb_state; /* General KDB state */
+
+struct task_struct *kdb_current_task;
+EXPORT_SYMBOL(kdb_current_task);
+struct pt_regs *kdb_current_regs;
+
+const char *kdb_diemsg;
+static int kdb_go_count;
+#ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC
+static unsigned int kdb_continue_catastrophic =
+ CONFIG_KDB_CONTINUE_CATASTROPHIC;
+#else
+static unsigned int kdb_continue_catastrophic;
+#endif
+
+/* kdb_commands describes the available commands. */
+static kdbtab_t *kdb_commands;
+#define KDB_BASE_CMD_MAX 50
+static int kdb_max_commands = KDB_BASE_CMD_MAX;
+static kdbtab_t kdb_base_commands[50];
+#define for_each_kdbcmd(cmd, num) \
+ for ((cmd) = kdb_base_commands, (num) = 0; \
+ num < kdb_max_commands; \
+ num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++, num++)
+
+typedef struct _kdbmsg {
+ int km_diag; /* kdb diagnostic */
+ char *km_msg; /* Corresponding message text */
+} kdbmsg_t;
+
+#define KDBMSG(msgnum, text) \
+ { KDB_##msgnum, text }
+
+static kdbmsg_t kdbmsgs[] = {
+ KDBMSG(NOTFOUND, "Command Not Found"),
+ KDBMSG(ARGCOUNT, "Improper argument count, see usage."),
+ KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, "
+ "8 is only allowed on 64 bit systems"),
+ KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"),
+ KDBMSG(NOTENV, "Cannot find environment variable"),
+ KDBMSG(NOENVVALUE, "Environment variable should have value"),
+ KDBMSG(NOTIMP, "Command not implemented"),
+ KDBMSG(ENVFULL, "Environment full"),
+ KDBMSG(ENVBUFFULL, "Environment buffer full"),
+ KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
+#ifdef CONFIG_CPU_XSCALE
+ KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
+#else
+ KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"),
+#endif
+ KDBMSG(DUPBPT, "Duplicate breakpoint address"),
+ KDBMSG(BPTNOTFOUND, "Breakpoint not found"),
+ KDBMSG(BADMODE, "Invalid IDMODE"),
+ KDBMSG(BADINT, "Illegal numeric value"),
+ KDBMSG(INVADDRFMT, "Invalid symbolic address format"),
+ KDBMSG(BADREG, "Invalid register name"),
+ KDBMSG(BADCPUNUM, "Invalid cpu number"),
+ KDBMSG(BADLENGTH, "Invalid length field"),
+ KDBMSG(NOBP, "No Breakpoint exists"),
+ KDBMSG(BADADDR, "Invalid address"),
+};
+#undef KDBMSG
+
+static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
+
+
+/*
+ * Initial environment. This is all kept static and local to
+ * this file. We don't want to rely on the memory allocation
+ * mechanisms in the kernel, so we use a very limited allocate-only
+ * heap for new and altered environment variables. The entire
+ * environment is limited to a fixed number of entries (add more
+ * to __env[] if required) and a fixed amount of heap (add more to
+ * KDB_ENVBUFSIZE if required).
+ */
+
+static char *__env[] = {
+#if defined(CONFIG_SMP)
+ "PROMPT=[%d]kdb> ",
+ "MOREPROMPT=[%d]more> ",
+#else
+ "PROMPT=kdb> ",
+ "MOREPROMPT=more> ",
+#endif
+ "RADIX=16",
+ "MDCOUNT=8", /* lines of md output */
+ "BTARGS=9", /* 9 possible args in bt */
+ KDB_PLATFORM_ENV,
+ "DTABCOUNT=30",
+ "NOSECT=1",
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+};
+
+static const int __nenv = (sizeof(__env) / sizeof(char *));
+
+struct task_struct *kdb_curr_task(int cpu)
+{
+ struct task_struct *p = curr_task(cpu);
+#ifdef _TIF_MCA_INIT
+ if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu))
+ p = krp->p;
+#endif
+ return p;
+}
+
+/*
+ * kdbgetenv - This function will return the character string value of
+ * an environment variable.
+ * Parameters:
+ * match A character string representing an environment variable.
+ * Returns:
+ * NULL No environment variable matches 'match'
+ * char* Pointer to string value of environment variable.
+ */
+char *kdbgetenv(const char *match)
+{
+ char **ep = __env;
+ int matchlen = strlen(match);
+ int i;
+
+ for (i = 0; i < __nenv; i++) {
+ char *e = *ep++;
+
+ if (!e)
+ continue;
+
+ if ((strncmp(match, e, matchlen) == 0)
+ && ((e[matchlen] == '\0')
+ || (e[matchlen] == '='))) {
+ char *cp = strchr(e, '=');
+ return cp ? ++cp : "";
+ }
+ }
+ return NULL;
+}
+
+/*
+ * kdballocenv - This function is used to allocate bytes for
+ * environment entries.
+ * Parameters:
+ * match A character string representing a numeric value
+ * Outputs:
+ * *value the unsigned long representation of the env variable 'match'
+ * Returns:
+ * Zero on success, a kdb diagnostic on failure.
+ * Remarks:
+ * We use a static environment buffer (envbuffer) to hold the values
+ * of dynamically generated environment variables (see kdb_set). Buffer
+ * space once allocated is never free'd, so over time, the amount of space
+ * (currently 512 bytes) will be exhausted if env variables are changed
+ * frequently.
+ */
+static char *kdballocenv(size_t bytes)
+{
+#define KDB_ENVBUFSIZE 512
+ static char envbuffer[KDB_ENVBUFSIZE];
+ static int envbufsize;
+ char *ep = NULL;
+
+ if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
+ ep = &envbuffer[envbufsize];
+ envbufsize += bytes;
+ }
+ return ep;
+}
+
+/*
+ * kdbgetulenv - This function will return the value of an unsigned
+ * long-valued environment variable.
+ * Parameters:
+ * match A character string representing a numeric value
+ * Outputs:
+ * *value the unsigned long represntation of the env variable 'match'
+ * Returns:
+ * Zero on success, a kdb diagnostic on failure.
+ */
+static int kdbgetulenv(const char *match, unsigned long *value)
+{
+ char *ep;
+
+ ep = kdbgetenv(match);
+ if (!ep)
+ return KDB_NOTENV;
+ if (strlen(ep) == 0)
+ return KDB_NOENVVALUE;
+
+ *value = simple_strtoul(ep, NULL, 0);
+
+ return 0;
+}
+
+/*
+ * kdbgetintenv - This function will return the value of an
+ * integer-valued environment variable.
+ * Parameters:
+ * match A character string representing an integer-valued env variable
+ * Outputs:
+ * *value the integer representation of the environment variable 'match'
+ * Returns:
+ * Zero on success, a kdb diagnostic on failure.
+ */
+int kdbgetintenv(const char *match, int *value)
+{
+ unsigned long val;
+ int diag;
+
+ diag = kdbgetulenv(match, &val);
+ if (!diag)
+ *value = (int) val;
+ return diag;
+}
+
+/*
+ * kdbgetularg - This function will convert a numeric string into an
+ * unsigned long value.
+ * Parameters:
+ * arg A character string representing a numeric value
+ * Outputs:
+ * *value the unsigned long represntation of arg.
+ * Returns:
+ * Zero on success, a kdb diagnostic on failure.
+ */
+int kdbgetularg(const char *arg, unsigned long *value)
+{
+ char *endp;
+ unsigned long val;
+
+ val = simple_strtoul(arg, &endp, 0);
+
+ if (endp == arg) {
+ /*
+ * Try base 16, for us folks too lazy to type the
+ * leading 0x...
+ */
+ val = simple_strtoul(arg, &endp, 16);
+ if (endp == arg)
+ return KDB_BADINT;
+ }
+
+ *value = val;
+
+ return 0;
+}
+
+/*
+ * kdb_set - This function implements the 'set' command. Alter an
+ * existing environment variable or create a new one.
+ */
+int kdb_set(int argc, const char **argv)
+{
+ int i;
+ char *ep;
+ size_t varlen, vallen;
+
+ /*
+ * we can be invoked two ways:
+ * set var=value argv[1]="var", argv[2]="value"
+ * set var = value argv[1]="var", argv[2]="=", argv[3]="value"
+ * - if the latter, shift 'em down.
+ */
+ if (argc == 3) {
+ argv[2] = argv[3];
+ argc--;
+ }
+
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+
+ /*
+ * Check for internal variables
+ */
+ if (strcmp(argv[1], "KDBDEBUG") == 0) {
+ unsigned int debugflags;
+ char *cp;
+
+ debugflags = simple_strtoul(argv[2], &cp, 0);
+ if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) {
+ kdb_printf("kdb: illegal debug flags '%s'\n",
+ argv[2]);
+ return 0;
+ }
+ kdb_flags = (kdb_flags &
+ ~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT))
+ | (debugflags << KDB_DEBUG_FLAG_SHIFT);
+
+ return 0;
+ }
+
+ /*
+ * Tokenizer squashed the '=' sign. argv[1] is variable
+ * name, argv[2] = value.
+ */
+ varlen = strlen(argv[1]);
+ vallen = strlen(argv[2]);
+ ep = kdballocenv(varlen + vallen + 2);
+ if (ep == (char *)0)
+ return KDB_ENVBUFFULL;
+
+ sprintf(ep, "%s=%s", argv[1], argv[2]);
+
+ ep[varlen+vallen+1] = '\0';
+
+ for (i = 0; i < __nenv; i++) {
+ if (__env[i]
+ && ((strncmp(__env[i], argv[1], varlen) == 0)
+ && ((__env[i][varlen] == '\0')
+ || (__env[i][varlen] == '=')))) {
+ __env[i] = ep;
+ return 0;
+ }
+ }
+
+ /*
+ * Wasn't existing variable. Fit into slot.
+ */
+ for (i = 0; i < __nenv-1; i++) {
+ if (__env[i] == (char *)0) {
+ __env[i] = ep;
+ return 0;
+ }
+ }
+
+ return KDB_ENVFULL;
+}
+
+static int kdb_check_regs(void)
+{
+ if (!kdb_current_regs) {
+ kdb_printf("No current kdb registers."
+ " You may need to select another task\n");
+ return KDB_BADREG;
+ }
+ return 0;
+}
+
+/*
+ * kdbgetaddrarg - This function is responsible for parsing an
+ * address-expression and returning the value of the expression,
+ * symbol name, and offset to the caller.
+ *
+ * The argument may consist of a numeric value (decimal or
+ * hexidecimal), a symbol name, a register name (preceeded by the
+ * percent sign), an environment variable with a numeric value
+ * (preceeded by a dollar sign) or a simple arithmetic expression
+ * consisting of a symbol name, +/-, and a numeric constant value
+ * (offset).
+ * Parameters:
+ * argc - count of arguments in argv
+ * argv - argument vector
+ * *nextarg - index to next unparsed argument in argv[]
+ * regs - Register state at time of KDB entry
+ * Outputs:
+ * *value - receives the value of the address-expression
+ * *offset - receives the offset specified, if any
+ * *name - receives the symbol name, if any
+ * *nextarg - index to next unparsed argument in argv[]
+ * Returns:
+ * zero is returned on success, a kdb diagnostic code is
+ * returned on error.
+ */
+int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
+ unsigned long *value, long *offset,
+ char **name)
+{
+ unsigned long addr;
+ unsigned long off = 0;
+ int positive;
+ int diag;
+ int found = 0;
+ char *symname;
+ char symbol = '\0';
+ char *cp;
+ kdb_symtab_t symtab;
+
+ /*
+ * Process arguments which follow the following syntax:
+ *
+ * symbol | numeric-address [+/- numeric-offset]
+ * %register
+ * $environment-variable
+ */
+
+ if (*nextarg > argc)
+ return KDB_ARGCOUNT;
+
+ symname = (char *)argv[*nextarg];
+
+ /*
+ * If there is no whitespace between the symbol
+ * or address and the '+' or '-' symbols, we
+ * remember the character and replace it with a
+ * null so the symbol/value can be properly parsed
+ */
+ cp = strpbrk(symname, "+-");
+ if (cp != NULL) {
+ symbol = *cp;
+ *cp++ = '\0';
+ }
+
+ if (symname[0] == '$') {
+ diag = kdbgetulenv(&symname[1], &addr);
+ if (diag)
+ return diag;
+ } else if (symname[0] == '%') {
+ diag = kdb_check_regs();
+ if (diag)
+ return diag;
+ /* Implement register values with % at a later time as it is
+ * arch optional.
+ */
+ return KDB_NOTIMP;
+ } else {
+ found = kdbgetsymval(symname, &symtab);
+ if (found) {
+ addr = symtab.sym_start;
+ } else {
+ diag = kdbgetularg(argv[*nextarg], &addr);
+ if (diag)
+ return diag;
+ }
+ }
+
+ if (!found)
+ found = kdbnearsym(addr, &symtab);
+
+ (*nextarg)++;
+
+ if (name)
+ *name = symname;
+ if (value)
+ *value = addr;
+ if (offset && name && *name)
+ *offset = addr - symtab.sym_start;
+
+ if ((*nextarg > argc)
+ && (symbol == '\0'))
+ return 0;
+
+ /*
+ * check for +/- and offset
+ */
+
+ if (symbol == '\0') {
+ if ((argv[*nextarg][0] != '+')
+ && (argv[*nextarg][0] != '-')) {
+ /*
+ * Not our argument. Return.
+ */
+ return 0;
+ } else {
+ positive = (argv[*nextarg][0] == '+');
+ (*nextarg)++;
+ }
+ } else
+ positive = (symbol == '+');
+
+ /*
+ * Now there must be an offset!
+ */
+ if ((*nextarg > argc)
+ && (symbol == '\0')) {
+ return KDB_INVADDRFMT;
+ }
+
+ if (!symbol) {
+ cp = (char *)argv[*nextarg];
+ (*nextarg)++;
+ }
+
+ diag = kdbgetularg(cp, &off);
+ if (diag)
+ return diag;
+
+ if (!positive)
+ off = -off;
+
+ if (offset)
+ *offset += off;
+
+ if (value)
+ *value += off;
+
+ return 0;
+}
+
+static void kdb_cmderror(int diag)
+{
+ int i;
+
+ if (diag >= 0) {
+ kdb_printf("no error detected (diagnostic is %d)\n", diag);
+ return;
+ }
+
+ for (i = 0; i < __nkdb_err; i++) {
+ if (kdbmsgs[i].km_diag == diag) {
+ kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg);
+ return;
+ }
+ }
+
+ kdb_printf("Unknown diag %d\n", -diag);
+}
+
+/*
+ * kdb_defcmd, kdb_defcmd2 - This function implements the 'defcmd'
+ * command which defines one command as a set of other commands,
+ * terminated by endefcmd. kdb_defcmd processes the initial
+ * 'defcmd' command, kdb_defcmd2 is invoked from kdb_parse for
+ * the following commands until 'endefcmd'.
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ */
+struct defcmd_set {
+ int count;
+ int usable;
+ char *name;
+ char *usage;
+ char *help;
+ char **command;
+};
+static struct defcmd_set *defcmd_set;
+static int defcmd_set_count;
+static int defcmd_in_progress;
+
+/* Forward references */
+static int kdb_exec_defcmd(int argc, const char **argv);
+
+static int kdb_defcmd2(const char *cmdstr, const char *argv0)
+{
+ struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
+ char **save_command = s->command;
+ if (strcmp(argv0, "endefcmd") == 0) {
+ defcmd_in_progress = 0;
+ if (!s->count)
+ s->usable = 0;
+ if (s->usable)
+ kdb_register(s->name, kdb_exec_defcmd,
+ s->usage, s->help, 0);
+ return 0;
+ }
+ if (!s->usable)
+ return KDB_NOTIMP;
+ s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
+ if (!s->command) {
+ kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
+ cmdstr);
+ s->usable = 0;
+ return KDB_NOTIMP;
+ }
+ memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
+ s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB);
+ kfree(save_command);
+ return 0;
+}
+
+static int kdb_defcmd(int argc, const char **argv)
+{
+ struct defcmd_set *save_defcmd_set = defcmd_set, *s;
+ if (defcmd_in_progress) {
+ kdb_printf("kdb: nested defcmd detected, assuming missing "
+ "endefcmd\n");
+ kdb_defcmd2("endefcmd", "endefcmd");
+ }
+ if (argc == 0) {
+ int i;
+ for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) {
+ kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name,
+ s->usage, s->help);
+ for (i = 0; i < s->count; ++i)
+ kdb_printf("%s", s->command[i]);
+ kdb_printf("endefcmd\n");
+ }
+ return 0;
+ }
+ if (argc != 3)
+ return KDB_ARGCOUNT;
+ defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
+ GFP_KDB);
+ if (!defcmd_set) {
+ kdb_printf("Could not allocate new defcmd_set entry for %s\n",
+ argv[1]);
+ defcmd_set = save_defcmd_set;
+ return KDB_NOTIMP;
+ }
+ memcpy(defcmd_set, save_defcmd_set,
+ defcmd_set_count * sizeof(*defcmd_set));
+ kfree(save_defcmd_set);
+ s = defcmd_set + defcmd_set_count;
+ memset(s, 0, sizeof(*s));
+ s->usable = 1;
+ s->name = kdb_strdup(argv[1], GFP_KDB);
+ s->usage = kdb_strdup(argv[2], GFP_KDB);
+ s->help = kdb_strdup(argv[3], GFP_KDB);
+ if (s->usage[0] == '"') {
+ strcpy(s->usage, s->usage+1);
+ s->usage[strlen(s->usage)-1] = '\0';
+ }
+ if (s->help[0] == '"') {
+ strcpy(s->help, s->help+1);
+ s->help[strlen(s->help)-1] = '\0';
+ }
+ ++defcmd_set_count;
+ defcmd_in_progress = 1;
+ return 0;
+}
+
+/*
+ * kdb_exec_defcmd - Execute the set of commands associated with this
+ * defcmd name.
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ */
+static int kdb_exec_defcmd(int argc, const char **argv)
+{
+ int i, ret;
+ struct defcmd_set *s;
+ if (argc != 0)
+ return KDB_ARGCOUNT;
+ for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) {
+ if (strcmp(s->name, argv[0]) == 0)
+ break;
+ }
+ if (i == defcmd_set_count) {
+ kdb_printf("kdb_exec_defcmd: could not find commands for %s\n",
+ argv[0]);
+ return KDB_NOTIMP;
+ }
+ for (i = 0; i < s->count; ++i) {
+ /* Recursive use of kdb_parse, do not use argv after
+ * this point */
+ argv = NULL;
+ kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]);
+ ret = kdb_parse(s->command[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Command history */
+#define KDB_CMD_HISTORY_COUNT 32
+#define CMD_BUFLEN 200 /* kdb_printf: max printline
+ * size == 256 */
+static unsigned int cmd_head, cmd_tail;
+static unsigned int cmdptr;
+static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN];
+static char cmd_cur[CMD_BUFLEN];
+
+/*
+ * The "str" argument may point to something like | grep xyz
+ */
+static void parse_grep(const char *str)
+{
+ int len;
+ char *cp = (char *)str, *cp2;
+
+ /* sanity check: we should have been called with the \ first */
+ if (*cp != '|')
+ return;
+ cp++;
+ while (isspace(*cp))
+ cp++;
+ if (strncmp(cp, "grep ", 5)) {
+ kdb_printf("invalid 'pipe', see grephelp\n");
+ return;
+ }
+ cp += 5;
+ while (isspace(*cp))
+ cp++;
+ cp2 = strchr(cp, '\n');
+ if (cp2)
+ *cp2 = '\0'; /* remove the trailing newline */
+ len = strlen(cp);
+ if (len == 0) {
+ kdb_printf("invalid 'pipe', see grephelp\n");
+ return;
+ }
+ /* now cp points to a nonzero length search string */
+ if (*cp == '"') {
+ /* allow it be "x y z" by removing the "'s - there must
+ be two of them */
+ cp++;
+ cp2 = strchr(cp, '"');
+ if (!cp2) {
+ kdb_printf("invalid quoted string, see grephelp\n");
+ return;
+ }
+ *cp2 = '\0'; /* end the string where the 2nd " was */
+ }
+ kdb_grep_leading = 0;
+ if (*cp == '^') {
+ kdb_grep_leading = 1;
+ cp++;
+ }
+ len = strlen(cp);
+ kdb_grep_trailing = 0;
+ if (*(cp+len-1) == '$') {
+ kdb_grep_trailing = 1;
+ *(cp+len-1) = '\0';
+ }
+ len = strlen(cp);
+ if (!len)
+ return;
+ if (len >= GREP_LEN) {
+ kdb_printf("search string too long\n");
+ return;
+ }
+ strcpy(kdb_grep_string, cp);
+ kdb_grepping_flag++;
+ return;
+}
+
+/*
+ * kdb_parse - Parse the command line, search the command table for a
+ * matching command and invoke the command function. This
+ * function may be called recursively, if it is, the second call
+ * will overwrite argv and cbuf. It is the caller's
+ * responsibility to save their argv if they recursively call
+ * kdb_parse().
+ * Parameters:
+ * cmdstr The input command line to be parsed.
+ * regs The registers at the time kdb was entered.
+ * Returns:
+ * Zero for success, a kdb diagnostic if failure.
+ * Remarks:
+ * Limited to 20 tokens.
+ *
+ * Real rudimentary tokenization. Basically only whitespace
+ * is considered a token delimeter (but special consideration
+ * is taken of the '=' sign as used by the 'set' command).
+ *
+ * The algorithm used to tokenize the input string relies on
+ * there being at least one whitespace (or otherwise useless)
+ * character between tokens as the character immediately following
+ * the token is altered in-place to a null-byte to terminate the
+ * token string.
+ */
+
+#define MAXARGC 20
+
+int kdb_parse(const char *cmdstr)
+{
+ static char *argv[MAXARGC];
+ static int argc;
+ static char cbuf[CMD_BUFLEN+2];
+ char *cp;
+ char *cpp, quoted;
+ kdbtab_t *tp;
+ int i, escaped, ignore_errors = 0, check_grep;
+
+ /*
+ * First tokenize the command string.
+ */
+ cp = (char *)cmdstr;
+ kdb_grepping_flag = check_grep = 0;
+
+ if (KDB_FLAG(CMD_INTERRUPT)) {
+ /* Previous command was interrupted, newline must not
+ * repeat the command */
+ KDB_FLAG_CLEAR(CMD_INTERRUPT);
+ KDB_STATE_SET(PAGER);
+ argc = 0; /* no repeat */
+ }
+
+ if (*cp != '\n' && *cp != '\0') {
+ argc = 0;
+ cpp = cbuf;
+ while (*cp) {
+ /* skip whitespace */
+ while (isspace(*cp))
+ cp++;
+ if ((*cp == '\0') || (*cp == '\n') ||
+ (*cp == '#' && !defcmd_in_progress))
+ break;
+ /* special case: check for | grep pattern */
+ if (*cp == '|') {
+ check_grep++;
+ break;
+ }
+ if (cpp >= cbuf + CMD_BUFLEN) {
+ kdb_printf("kdb_parse: command buffer "
+ "overflow, command ignored\n%s\n",
+ cmdstr);
+ return KDB_NOTFOUND;
+ }
+ if (argc >= MAXARGC - 1) {
+ kdb_printf("kdb_parse: too many arguments, "
+ "command ignored\n%s\n", cmdstr);
+ return KDB_NOTFOUND;
+ }
+ argv[argc++] = cpp;
+ escaped = 0;
+ quoted = '\0';
+ /* Copy to next unquoted and unescaped
+ * whitespace or '=' */
+ while (*cp && *cp != '\n' &&
+ (escaped || quoted || !isspace(*cp))) {
+ if (cpp >= cbuf + CMD_BUFLEN)
+ break;
+ if (escaped) {
+ escaped = 0;
+ *cpp++ = *cp++;
+ continue;
+ }
+ if (*cp == '\\') {
+ escaped = 1;
+ ++cp;
+ continue;
+ }
+ if (*cp == quoted)
+ quoted = '\0';
+ else if (*cp == '\'' || *cp == '"')
+ quoted = *cp;
+ *cpp = *cp++;
+ if (*cpp == '=' && !quoted)
+ break;
+ ++cpp;
+ }
+ *cpp++ = '\0'; /* Squash a ws or '=' character */
+ }
+ }
+ if (!argc)
+ return 0;
+ if (check_grep)
+ parse_grep(cp);
+ if (defcmd_in_progress) {
+ int result = kdb_defcmd2(cmdstr, argv[0]);
+ if (!defcmd_in_progress) {
+ argc = 0; /* avoid repeat on endefcmd */
+ *(argv[0]) = '\0';
+ }
+ return result;
+ }
+ if (argv[0][0] == '-' && argv[0][1] &&
+ (argv[0][1] < '0' || argv[0][1] > '9')) {
+ ignore_errors = 1;
+ ++argv[0];
+ }
+
+ for_each_kdbcmd(tp, i) {
+ if (tp->cmd_name) {
+ /*
+ * If this command is allowed to be abbreviated,
+ * check to see if this is it.
+ */
+
+ if (tp->cmd_minlen
+ && (strlen(argv[0]) <= tp->cmd_minlen)) {
+ if (strncmp(argv[0],
+ tp->cmd_name,
+ tp->cmd_minlen) == 0) {
+ break;
+ }
+ }
+
+ if (strcmp(argv[0], tp->cmd_name) == 0)
+ break;
+ }
+ }
+
+ /*
+ * If we don't find a command by this name, see if the first
+ * few characters of this match any of the known commands.
+ * e.g., md1c20 should match md.
+ */
+ if (i == kdb_max_commands) {
+ for_each_kdbcmd(tp, i) {
+ if (tp->cmd_name) {
+ if (strncmp(argv[0],
+ tp->cmd_name,
+ strlen(tp->cmd_name)) == 0) {
+ break;
+ }
+ }
+ }
+ }
+
+ if (i < kdb_max_commands) {
+ int result;
+ KDB_STATE_SET(CMD);
+ result = (*tp->cmd_func)(argc-1, (const char **)argv);
+ if (result && ignore_errors && result > KDB_CMD_GO)
+ result = 0;
+ KDB_STATE_CLEAR(CMD);
+ switch (tp->cmd_repeat) {
+ case KDB_REPEAT_NONE:
+ argc = 0;
+ if (argv[0])
+ *(argv[0]) = '\0';
+ break;
+ case KDB_REPEAT_NO_ARGS:
+ argc = 1;
+ if (argv[1])
+ *(argv[1]) = '\0';
+ break;
+ case KDB_REPEAT_WITH_ARGS:
+ break;
+ }
+ return result;
+ }
+
+ /*
+ * If the input with which we were presented does not
+ * map to an existing command, attempt to parse it as an
+ * address argument and display the result. Useful for
+ * obtaining the address of a variable, or the nearest symbol
+ * to an address contained in a register.
+ */
+ {
+ unsigned long value;
+ char *name = NULL;
+ long offset;
+ int nextarg = 0;
+
+ if (kdbgetaddrarg(0, (const char **)argv, &nextarg,
+ &value, &offset, &name)) {
+ return KDB_NOTFOUND;
+ }
+
+ kdb_printf("%s = ", argv[0]);
+ kdb_symbol_print(value, NULL, KDB_SP_DEFAULT);
+ kdb_printf("\n");
+ return 0;
+ }
+}
+
+
+static int handle_ctrl_cmd(char *cmd)
+{
+#define CTRL_P 16
+#define CTRL_N 14
+
+ /* initial situation */
+ if (cmd_head == cmd_tail)
+ return 0;
+ switch (*cmd) {
+ case CTRL_P:
+ if (cmdptr != cmd_tail)
+ cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
+ strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+ return 1;
+ case CTRL_N:
+ if (cmdptr != cmd_head)
+ cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
+ strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * kdb_reboot - This function implements the 'reboot' command. Reboot
+ * the system immediately, or loop for ever on failure.
+ */
+static int kdb_reboot(int argc, const char **argv)
+{
+ emergency_restart();
+ kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n");
+ while (1)
+ cpu_relax();
+ /* NOTREACHED */
+ return 0;
+}
+
+static void kdb_dumpregs(struct pt_regs *regs)
+{
+ int old_lvl = console_loglevel;
+ console_loglevel = 15;
+ kdb_trap_printk++;
+ show_regs(regs);
+ kdb_trap_printk--;
+ kdb_printf("\n");
+ console_loglevel = old_lvl;
+}
+
+void kdb_set_current_task(struct task_struct *p)
+{
+ kdb_current_task = p;
+
+ if (kdb_task_has_cpu(p)) {
+ kdb_current_regs = KDB_TSKREGS(kdb_process_cpu(p));
+ return;
+ }
+ kdb_current_regs = NULL;
+}
+
+/*
+ * kdb_local - The main code for kdb. This routine is invoked on a
+ * specific processor, it is not global. The main kdb() routine
+ * ensures that only one processor at a time is in this routine.
+ * This code is called with the real reason code on the first
+ * entry to a kdb session, thereafter it is called with reason
+ * SWITCH, even if the user goes back to the original cpu.
+ * Inputs:
+ * reason The reason KDB was invoked
+ * error The hardware-defined error code
+ * regs The exception frame at time of fault/breakpoint.
+ * db_result Result code from the break or debug point.
+ * Returns:
+ * 0 KDB was invoked for an event which it wasn't responsible
+ * 1 KDB handled the event for which it was invoked.
+ * KDB_CMD_GO User typed 'go'.
+ * KDB_CMD_CPU User switched to another cpu.
+ * KDB_CMD_SS Single step.
+ * KDB_CMD_SSB Single step until branch.
+ */
+static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
+ kdb_dbtrap_t db_result)
+{
+ char *cmdbuf;
+ int diag;
+ struct task_struct *kdb_current =
+ kdb_curr_task(raw_smp_processor_id());
+
+ KDB_DEBUG_STATE("kdb_local 1", reason);
+ kdb_go_count = 0;
+ if (reason == KDB_REASON_DEBUG) {
+ /* special case below */
+ } else {
+ kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
+ kdb_current, kdb_current->pid);
+#if defined(CONFIG_SMP)
+ kdb_printf("on processor %d ", raw_smp_processor_id());
+#endif
+ }
+
+ switch (reason) {
+ case KDB_REASON_DEBUG:
+ {
+ /*
+ * If re-entering kdb after a single step
+ * command, don't print the message.
+ */
+ switch (db_result) {
+ case KDB_DB_BPT:
+ kdb_printf("\nEntering kdb (0x%p, pid %d) ",
+ kdb_current, kdb_current->pid);
+#if defined(CONFIG_SMP)
+ kdb_printf("on processor %d ", raw_smp_processor_id());
+#endif
+ kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
+ instruction_pointer(regs));
+ break;
+ case KDB_DB_SSB:
+ /*
+ * In the midst of ssb command. Just return.
+ */
+ KDB_DEBUG_STATE("kdb_local 3", reason);
+ return KDB_CMD_SSB; /* Continue with SSB command */
+
+ break;
+ case KDB_DB_SS:
+ break;
+ case KDB_DB_SSBPT:
+ KDB_DEBUG_STATE("kdb_local 4", reason);
+ return 1; /* kdba_db_trap did the work */
+ default:
+ kdb_printf("kdb: Bad result from kdba_db_trap: %d\n",
+ db_result);
+ break;
+ }
+
+ }
+ break;
+ case KDB_REASON_ENTER:
+ if (KDB_STATE(KEYBOARD))
+ kdb_printf("due to Keyboard Entry\n");
+ else
+ kdb_printf("due to KDB_ENTER()\n");
+ break;
+ case KDB_REASON_KEYBOARD:
+ KDB_STATE_SET(KEYBOARD);
+ kdb_printf("due to Keyboard Entry\n");
+ break;
+ case KDB_REASON_ENTER_SLAVE:
+ /* drop through, slaves only get released via cpu switch */
+ case KDB_REASON_SWITCH:
+ kdb_printf("due to cpu switch\n");
+ break;
+ case KDB_REASON_OOPS:
+ kdb_printf("Oops: %s\n", kdb_diemsg);
+ kdb_printf("due to oops @ " kdb_machreg_fmt "\n",
+ instruction_pointer(regs));
+ kdb_dumpregs(regs);
+ break;
+ case KDB_REASON_NMI:
+ kdb_printf("due to NonMaskable Interrupt @ "
+ kdb_machreg_fmt "\n",
+ instruction_pointer(regs));
+ kdb_dumpregs(regs);
+ break;
+ case KDB_REASON_SSTEP:
+ case KDB_REASON_BREAK:
+ kdb_printf("due to %s @ " kdb_machreg_fmt "\n",
+ reason == KDB_REASON_BREAK ?
+ "Breakpoint" : "SS trap", instruction_pointer(regs));
+ /*
+ * Determine if this breakpoint is one that we
+ * are interested in.
+ */
+ if (db_result != KDB_DB_BPT) {
+ kdb_printf("kdb: error return from kdba_bp_trap: %d\n",
+ db_result);
+ KDB_DEBUG_STATE("kdb_local 6", reason);
+ return 0; /* Not for us, dismiss it */
+ }
+ break;
+ case KDB_REASON_RECURSE:
+ kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n",
+ instruction_pointer(regs));
+ break;
+ default:
+ kdb_printf("kdb: unexpected reason code: %d\n", reason);
+ KDB_DEBUG_STATE("kdb_local 8", reason);
+ return 0; /* Not for us, dismiss it */
+ }
+
+ while (1) {
+ /*
+ * Initialize pager context.
+ */
+ kdb_nextline = 1;
+ KDB_STATE_CLEAR(SUPPRESS);
+
+ cmdbuf = cmd_cur;
+ *cmdbuf = '\0';
+ *(cmd_hist[cmd_head]) = '\0';
+
+ if (KDB_FLAG(ONLY_DO_DUMP)) {
+ /* kdb is off but a catastrophic error requires a dump.
+ * Take the dump and reboot.
+ * Turn on logging so the kdb output appears in the log
+ * buffer in the dump.
+ */
+ const char *setargs[] = { "set", "LOGGING", "1" };
+ kdb_set(2, setargs);
+ kdb_reboot(0, NULL);
+ /*NOTREACHED*/
+ }
+
+do_full_getstr:
+#if defined(CONFIG_SMP)
+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+ raw_smp_processor_id());
+#else
+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
+#endif
+ if (defcmd_in_progress)
+ strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+
+ /*
+ * Fetch command from keyboard
+ */
+ cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str);
+ if (*cmdbuf != '\n') {
+ if (*cmdbuf < 32) {
+ if (cmdptr == cmd_head) {
+ strncpy(cmd_hist[cmd_head], cmd_cur,
+ CMD_BUFLEN);
+ *(cmd_hist[cmd_head] +
+ strlen(cmd_hist[cmd_head])-1) = '\0';
+ }
+ if (!handle_ctrl_cmd(cmdbuf))
+ *(cmd_cur+strlen(cmd_cur)-1) = '\0';
+ cmdbuf = cmd_cur;
+ goto do_full_getstr;
+ } else {
+ strncpy(cmd_hist[cmd_head], cmd_cur,
+ CMD_BUFLEN);
+ }
+
+ cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT;
+ if (cmd_head == cmd_tail)
+ cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT;
+ }
+
+ cmdptr = cmd_head;
+ diag = kdb_parse(cmdbuf);
+ if (diag == KDB_NOTFOUND) {
+ kdb_printf("Unknown kdb command: '%s'\n", cmdbuf);
+ diag = 0;
+ }
+ if (diag == KDB_CMD_GO
+ || diag == KDB_CMD_CPU
+ || diag == KDB_CMD_SS
+ || diag == KDB_CMD_SSB
+ || diag == KDB_CMD_KGDB)
+ break;
+
+ if (diag)
+ kdb_cmderror(diag);
+ }
+ KDB_DEBUG_STATE("kdb_local 9", diag);
+ return diag;
+}
+
+
+/*
+ * kdb_print_state - Print the state data for the current processor
+ * for debugging.
+ * Inputs:
+ * text Identifies the debug point
+ * value Any integer value to be printed, e.g. reason code.
+ */
+void kdb_print_state(const char *text, int value)
+{
+ kdb_printf("state: %s cpu %d value %d initial %d state %x\n",
+ text, raw_smp_processor_id(), value, kdb_initial_cpu,
+ kdb_state);
+}
+
+/*
+ * kdb_main_loop - After initial setup and assignment of the
+ * controlling cpu, all cpus are in this loop. One cpu is in
+ * control and will issue the kdb prompt, the others will spin
+ * until 'go' or cpu switch.
+ *
+ * To get a consistent view of the kernel stacks for all
+ * processes, this routine is invoked from the main kdb code via
+ * an architecture specific routine. kdba_main_loop is
+ * responsible for making the kernel stacks consistent for all
+ * processes, there should be no difference between a blocked
+ * process and a running process as far as kdb is concerned.
+ * Inputs:
+ * reason The reason KDB was invoked
+ * error The hardware-defined error code
+ * reason2 kdb's current reason code.
+ * Initially error but can change
+ * acording to kdb state.
+ * db_result Result code from break or debug point.
+ * regs The exception frame at time of fault/breakpoint.
+ * should always be valid.
+ * Returns:
+ * 0 KDB was invoked for an event which it wasn't responsible
+ * 1 KDB handled the event for which it was invoked.
+ */
+int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
+ kdb_dbtrap_t db_result, struct pt_regs *regs)
+{
+ int result = 1;
+ /* Stay in kdb() until 'go', 'ss[b]' or an error */
+ while (1) {
+ /*
+ * All processors except the one that is in control
+ * will spin here.
+ */
+ KDB_DEBUG_STATE("kdb_main_loop 1", reason);
+ while (KDB_STATE(HOLD_CPU)) {
+ /* state KDB is turned off by kdb_cpu to see if the
+ * other cpus are still live, each cpu in this loop
+ * turns it back on.
+ */
+ if (!KDB_STATE(KDB))
+ KDB_STATE_SET(KDB);
+ }
+
+ KDB_STATE_CLEAR(SUPPRESS);
+ KDB_DEBUG_STATE("kdb_main_loop 2", reason);
+ if (KDB_STATE(LEAVING))
+ break; /* Another cpu said 'go' */
+ /* Still using kdb, this processor is in control */
+ result = kdb_local(reason2, error, regs, db_result);
+ KDB_DEBUG_STATE("kdb_main_loop 3", result);
+
+ if (result == KDB_CMD_CPU)
+ break;
+
+ if (result == KDB_CMD_SS) {
+ KDB_STATE_SET(DOING_SS);
+ break;
+ }
+
+ if (result == KDB_CMD_SSB) {
+ KDB_STATE_SET(DOING_SS);
+ KDB_STATE_SET(DOING_SSB);
+ break;
+ }
+
+ if (result == KDB_CMD_KGDB) {
+ if (!(KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)))
+ kdb_printf("Entering please attach debugger "
+ "or use $D#44+ or $3#33\n");
+ break;
+ }
+ if (result && result != 1 && result != KDB_CMD_GO)
+ kdb_printf("\nUnexpected kdb_local return code %d\n",
+ result);
+ KDB_DEBUG_STATE("kdb_main_loop 4", reason);
+ break;
+ }
+ if (KDB_STATE(DOING_SS))
+ KDB_STATE_CLEAR(SSBPT);
+
+ return result;
+}
+
+/*
+ * kdb_mdr - This function implements the guts of the 'mdr', memory
+ * read command.
+ * mdr <addr arg>,<byte count>
+ * Inputs:
+ * addr Start address
+ * count Number of bytes
+ * Returns:
+ * Always 0. Any errors are detected and printed by kdb_getarea.
+ */
+static int kdb_mdr(unsigned long addr, unsigned int count)
+{
+ unsigned char c;
+ while (count--) {
+ if (kdb_getarea(c, addr))
+ return 0;
+ kdb_printf("%02x", c);
+ addr++;
+ }
+ kdb_printf("\n");
+ return 0;
+}
+
+/*
+ * kdb_md - This function implements the 'md', 'md1', 'md2', 'md4',
+ * 'md8' 'mdr' and 'mds' commands.
+ *
+ * md|mds [<addr arg> [<line count> [<radix>]]]
+ * mdWcN [<addr arg> [<line count> [<radix>]]]
+ * where W = is the width (1, 2, 4 or 8) and N is the count.
+ * for eg., md1c20 reads 20 bytes, 1 at a time.
+ * mdr <addr arg>,<byte count>
+ */
+static void kdb_md_line(const char *fmtstr, unsigned long addr,
+ int symbolic, int nosect, int bytesperword,
+ int num, int repeat, int phys)
+{
+ /* print just one line of data */
+ kdb_symtab_t symtab;
+ char cbuf[32];
+ char *c = cbuf;
+ int i;
+ unsigned long word;
+
+ memset(cbuf, '\0', sizeof(cbuf));
+ if (phys)
+ kdb_printf("phys " kdb_machreg_fmt0 " ", addr);
+ else
+ kdb_printf(kdb_machreg_fmt0 " ", addr);
+
+ for (i = 0; i < num && repeat--; i++) {
+ if (phys) {
+ if (kdb_getphysword(&word, addr, bytesperword))
+ break;
+ } else if (kdb_getword(&word, addr, bytesperword))
+ break;
+ kdb_printf(fmtstr, word);
+ if (symbolic)
+ kdbnearsym(word, &symtab);
+ else
+ memset(&symtab, 0, sizeof(symtab));
+ if (symtab.sym_name) {
+ kdb_symbol_print(word, &symtab, 0);
+ if (!nosect) {
+ kdb_printf("\n");
+ kdb_printf(" %s %s "
+ kdb_machreg_fmt " "
+ kdb_machreg_fmt " "
+ kdb_machreg_fmt, symtab.mod_name,
+ symtab.sec_name, symtab.sec_start,
+ symtab.sym_start, symtab.sym_end);
+ }
+ addr += bytesperword;
+ } else {
+ union {
+ u64 word;
+ unsigned char c[8];
+ } wc;
+ unsigned char *cp;
+#ifdef __BIG_ENDIAN
+ cp = wc.c + 8 - bytesperword;
+#else
+ cp = wc.c;
+#endif
+ wc.word = word;
+#define printable_char(c) \
+ ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
+ switch (bytesperword) {
+ case 8:
+ *c++ = printable_char(*cp++);
+ *c++ = printable_char(*cp++);
+ *c++ = printable_char(*cp++);
+ *c++ = printable_char(*cp++);
+ addr += 4;
+ case 4:
+ *c++ = printable_char(*cp++);
+ *c++ = printable_char(*cp++);
+ addr += 2;
+ case 2:
+ *c++ = printable_char(*cp++);
+ addr++;
+ case 1:
+ *c++ = printable_char(*cp++);
+ addr++;
+ break;
+ }
+#undef printable_char
+ }
+ }
+ kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1),
+ " ", cbuf);
+}
+
+static int kdb_md(int argc, const char **argv)
+{
+ static unsigned long last_addr;
+ static int last_radix, last_bytesperword, last_repeat;
+ int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat;
+ int nosect = 0;
+ char fmtchar, fmtstr[64];
+ unsigned long addr;
+ unsigned long word;
+ long offset = 0;
+ int symbolic = 0;
+ int valid = 0;
+ int phys = 0;
+
+ kdbgetintenv("MDCOUNT", &mdcount);
+ kdbgetintenv("RADIX", &radix);
+ kdbgetintenv("BYTESPERWORD", &bytesperword);
+
+ /* Assume 'md <addr>' and start with environment values */
+ repeat = mdcount * 16 / bytesperword;
+
+ if (strcmp(argv[0], "mdr") == 0) {
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+ valid = 1;
+ } else if (isdigit(argv[0][2])) {
+ bytesperword = (int)(argv[0][2] - '0');
+ if (bytesperword == 0) {
+ bytesperword = last_bytesperword;
+ if (bytesperword == 0)
+ bytesperword = 4;
+ }
+ last_bytesperword = bytesperword;
+ repeat = mdcount * 16 / bytesperword;
+ if (!argv[0][3])
+ valid = 1;
+ else if (argv[0][3] == 'c' && argv[0][4]) {
+ char *p;
+ repeat = simple_strtoul(argv[0] + 4, &p, 10);
+ mdcount = ((repeat * bytesperword) + 15) / 16;
+ valid = !*p;
+ }
+ last_repeat = repeat;
+ } else if (strcmp(argv[0], "md") == 0)
+ valid = 1;
+ else if (strcmp(argv[0], "mds") == 0)
+ valid = 1;
+ else if (strcmp(argv[0], "mdp") == 0) {
+ phys = valid = 1;
+ }
+ if (!valid)
+ return KDB_NOTFOUND;
+
+ if (argc == 0) {
+ if (last_addr == 0)
+ return KDB_ARGCOUNT;
+ addr = last_addr;
+ radix = last_radix;
+ bytesperword = last_bytesperword;
+ repeat = last_repeat;
+ mdcount = ((repeat * bytesperword) + 15) / 16;
+ }
+
+ if (argc) {
+ unsigned long val;
+ int diag, nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
+ &offset, NULL);
+ if (diag)
+ return diag;
+ if (argc > nextarg+2)
+ return KDB_ARGCOUNT;
+
+ if (argc >= nextarg) {
+ diag = kdbgetularg(argv[nextarg], &val);
+ if (!diag) {
+ mdcount = (int) val;
+ repeat = mdcount * 16 / bytesperword;
+ }
+ }
+ if (argc >= nextarg+1) {
+ diag = kdbgetularg(argv[nextarg+1], &val);
+ if (!diag)
+ radix = (int) val;
+ }
+ }
+
+ if (strcmp(argv[0], "mdr") == 0)
+ return kdb_mdr(addr, mdcount);
+
+ switch (radix) {
+ case 10:
+ fmtchar = 'd';
+ break;
+ case 16:
+ fmtchar = 'x';
+ break;
+ case 8:
+ fmtchar = 'o';
+ break;
+ default:
+ return KDB_BADRADIX;
+ }
+
+ last_radix = radix;
+
+ if (bytesperword > KDB_WORD_SIZE)
+ return KDB_BADWIDTH;
+
+ switch (bytesperword) {
+ case 8:
+ sprintf(fmtstr, "%%16.16l%c ", fmtchar);
+ break;
+ case 4:
+ sprintf(fmtstr, "%%8.8l%c ", fmtchar);
+ break;
+ case 2:
+ sprintf(fmtstr, "%%4.4l%c ", fmtchar);
+ break;
+ case 1:
+ sprintf(fmtstr, "%%2.2l%c ", fmtchar);
+ break;
+ default:
+ return KDB_BADWIDTH;
+ }
+
+ last_repeat = repeat;
+ last_bytesperword = bytesperword;
+
+ if (strcmp(argv[0], "mds") == 0) {
+ symbolic = 1;
+ /* Do not save these changes as last_*, they are temporary mds
+ * overrides.
+ */
+ bytesperword = KDB_WORD_SIZE;
+ repeat = mdcount;
+ kdbgetintenv("NOSECT", &nosect);
+ }
+
+ /* Round address down modulo BYTESPERWORD */
+
+ addr &= ~(bytesperword-1);
+
+ while (repeat > 0) {
+ unsigned long a;
+ int n, z, num = (symbolic ? 1 : (16 / bytesperword));
+
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) {
+ if (phys) {
+ if (kdb_getphysword(&word, a, bytesperword)
+ || word)
+ break;
+ } else if (kdb_getword(&word, a, bytesperword) || word)
+ break;
+ }
+ n = min(num, repeat);
+ kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword,
+ num, repeat, phys);
+ addr += bytesperword * n;
+ repeat -= n;
+ z = (z + num - 1) / num;
+ if (z > 2) {
+ int s = num * (z-2);
+ kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0
+ " zero suppressed\n",
+ addr, addr + bytesperword * s - 1);
+ addr += bytesperword * s;
+ repeat -= s;
+ }
+ }
+ last_addr = addr;
+
+ return 0;
+}
+
+/*
+ * kdb_mm - This function implements the 'mm' command.
+ * mm address-expression new-value
+ * Remarks:
+ * mm works on machine words, mmW works on bytes.
+ */
+static int kdb_mm(int argc, const char **argv)
+{
+ int diag;
+ unsigned long addr;
+ long offset = 0;
+ unsigned long contents;
+ int nextarg;
+ int width;
+
+ if (argv[0][2] && !isdigit(argv[0][2]))
+ return KDB_NOTFOUND;
+
+ if (argc < 2)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+
+ if (nextarg > argc)
+ return KDB_ARGCOUNT;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL);
+ if (diag)
+ return diag;
+
+ if (nextarg != argc + 1)
+ return KDB_ARGCOUNT;
+
+ width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE);
+ diag = kdb_putword(addr, contents, width);
+ if (diag)
+ return diag;
+
+ kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents);
+
+ return 0;
+}
+
+/*
+ * kdb_go - This function implements the 'go' command.
+ * go [address-expression]
+ */
+static int kdb_go(int argc, const char **argv)
+{
+ unsigned long addr;
+ int diag;
+ int nextarg;
+ long offset;
+
+ if (argc == 1) {
+ if (raw_smp_processor_id() != kdb_initial_cpu) {
+ kdb_printf("go <address> must be issued from the "
+ "initial cpu, do cpu %d first\n",
+ kdb_initial_cpu);
+ return KDB_ARGCOUNT;
+ }
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg,
+ &addr, &offset, NULL);
+ if (diag)
+ return diag;
+ } else if (argc) {
+ return KDB_ARGCOUNT;
+ }
+
+ diag = KDB_CMD_GO;
+ if (KDB_FLAG(CATASTROPHIC)) {
+ kdb_printf("Catastrophic error detected\n");
+ kdb_printf("kdb_continue_catastrophic=%d, ",
+ kdb_continue_catastrophic);
+ if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) {
+ kdb_printf("type go a second time if you really want "
+ "to continue\n");
+ return 0;
+ }
+ if (kdb_continue_catastrophic == 2) {
+ kdb_printf("forcing reboot\n");
+ kdb_reboot(0, NULL);
+ }
+ kdb_printf("attempting to continue\n");
+ }
+ return diag;
+}
+
+/*
+ * kdb_rd - This function implements the 'rd' command.
+ */
+static int kdb_rd(int argc, const char **argv)
+{
+ int diag = kdb_check_regs();
+ if (diag)
+ return diag;
+
+ kdb_dumpregs(kdb_current_regs);
+ return 0;
+}
+
+/*
+ * kdb_rm - This function implements the 'rm' (register modify) command.
+ * rm register-name new-contents
+ * Remarks:
+ * Currently doesn't allow modification of control or
+ * debug registers.
+ */
+static int kdb_rm(int argc, const char **argv)
+{
+ int diag;
+ int ind = 0;
+ unsigned long contents;
+
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+ /*
+ * Allow presence or absence of leading '%' symbol.
+ */
+ if (argv[1][0] == '%')
+ ind = 1;
+
+ diag = kdbgetularg(argv[2], &contents);
+ if (diag)
+ return diag;
+
+ diag = kdb_check_regs();
+ if (diag)
+ return diag;
+ kdb_printf("ERROR: Register set currently not implemented\n");
+ return 0;
+}
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+/*
+ * kdb_sr - This function implements the 'sr' (SYSRQ key) command
+ * which interfaces to the soi-disant MAGIC SYSRQ functionality.
+ * sr <magic-sysrq-code>
+ */
+static int kdb_sr(int argc, const char **argv)
+{
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ sysrq_toggle_support(1);
+
+ kdb_trap_printk++;
+ handle_sysrq(*argv[1], NULL);
+ kdb_trap_printk--;
+
+ return 0;
+}
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+/*
+ * kdb_ef - This function implements the 'regs' (display exception
+ * frame) command. This command takes an address and expects to
+ * find an exception frame at that address, formats and prints
+ * it.
+ * regs address-expression
+ * Remarks:
+ * Not done yet.
+ */
+static int kdb_ef(int argc, const char **argv)
+{
+ int diag;
+ unsigned long addr;
+ long offset;
+ int nextarg;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+ show_regs((struct pt_regs *)addr);
+ return 0;
+}
+
+#if defined(CONFIG_MODULES)
+/* modules using other modules */
+struct module_use {
+ struct list_head list;
+ struct module *module_which_uses;
+};
+
+/*
+ * kdb_lsmod - This function implements the 'lsmod' command. Lists
+ * currently loaded kernel modules.
+ * Mostly taken from userland lsmod.
+ */
+static int kdb_lsmod(int argc, const char **argv)
+{
+ struct module *mod;
+
+ if (argc != 0)
+ return KDB_ARGCOUNT;
+
+ kdb_printf("Module Size modstruct Used by\n");
+ list_for_each_entry(mod, kdb_modules, list) {
+
+ kdb_printf("%-20s%8u 0x%p ", mod->name,
+ mod->core_size, (void *)mod);
+#ifdef CONFIG_MODULE_UNLOAD
+ kdb_printf("%4d ", module_refcount(mod));
+#endif
+ if (mod->state == MODULE_STATE_GOING)
+ kdb_printf(" (Unloading)");
+ else if (mod->state == MODULE_STATE_COMING)
+ kdb_printf(" (Loading)");
+ else
+ kdb_printf(" (Live)");
+
+#ifdef CONFIG_MODULE_UNLOAD
+ {
+ struct module_use *use;
+ kdb_printf(" [ ");
+ list_for_each_entry(use, &mod->modules_which_use_me,
+ list)
+ kdb_printf("%s ", use->module_which_uses->name);
+ kdb_printf("]\n");
+ }
+#endif
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_MODULES */
+
+/*
+ * kdb_env - This function implements the 'env' command. Display the
+ * current environment variables.
+ */
+
+static int kdb_env(int argc, const char **argv)
+{
+ int i;
+
+ for (i = 0; i < __nenv; i++) {
+ if (__env[i])
+ kdb_printf("%s\n", __env[i]);
+ }
+
+ if (KDB_DEBUG(MASK))
+ kdb_printf("KDBFLAGS=0x%x\n", kdb_flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_PRINTK
+/*
+ * kdb_dmesg - This function implements the 'dmesg' command to display
+ * the contents of the syslog buffer.
+ * dmesg [lines] [adjust]
+ */
+static int kdb_dmesg(int argc, const char **argv)
+{
+ char *syslog_data[4], *start, *end, c = '\0', *p;
+ int diag, logging, logsize, lines = 0, adjust = 0, n;
+
+ if (argc > 2)
+ return KDB_ARGCOUNT;
+ if (argc) {
+ char *cp;
+ lines = simple_strtol(argv[1], &cp, 0);
+ if (*cp)
+ lines = 0;
+ if (argc > 1) {
+ adjust = simple_strtoul(argv[2], &cp, 0);
+ if (*cp || adjust < 0)
+ adjust = 0;
+ }
+ }
+
+ /* disable LOGGING if set */
+ diag = kdbgetintenv("LOGGING", &logging);
+ if (!diag && logging) {
+ const char *setargs[] = { "set", "LOGGING", "0" };
+ kdb_set(2, setargs);
+ }
+
+ /* syslog_data[0,1] physical start, end+1. syslog_data[2,3]
+ * logical start, end+1. */
+ kdb_syslog_data(syslog_data);
+ if (syslog_data[2] == syslog_data[3])
+ return 0;
+ logsize = syslog_data[1] - syslog_data[0];
+ start = syslog_data[2];
+ end = syslog_data[3];
+#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
+ for (n = 0, p = start; p < end; ++p) {
+ c = *KDB_WRAP(p);
+ if (c == '\n')
+ ++n;
+ }
+ if (c != '\n')
+ ++n;
+ if (lines < 0) {
+ if (adjust >= n)
+ kdb_printf("buffer only contains %d lines, nothing "
+ "printed\n", n);
+ else if (adjust - lines >= n)
+ kdb_printf("buffer only contains %d lines, last %d "
+ "lines printed\n", n, n - adjust);
+ if (adjust) {
+ for (; start < end && adjust; ++start) {
+ if (*KDB_WRAP(start) == '\n')
+ --adjust;
+ }
+ if (start < end)
+ ++start;
+ }
+ for (p = start; p < end && lines; ++p) {
+ if (*KDB_WRAP(p) == '\n')
+ ++lines;
+ }
+ end = p;
+ } else if (lines > 0) {
+ int skip = n - (adjust + lines);
+ if (adjust >= n) {
+ kdb_printf("buffer only contains %d lines, "
+ "nothing printed\n", n);
+ skip = n;
+ } else if (skip < 0) {
+ lines += skip;
+ skip = 0;
+ kdb_printf("buffer only contains %d lines, first "
+ "%d lines printed\n", n, lines);
+ }
+ for (; start < end && skip; ++start) {
+ if (*KDB_WRAP(start) == '\n')
+ --skip;
+ }
+ for (p = start; p < end && lines; ++p) {
+ if (*KDB_WRAP(p) == '\n')
+ --lines;
+ }
+ end = p;
+ }
+ /* Do a line at a time (max 200 chars) to reduce protocol overhead */
+ c = '\n';
+ while (start != end) {
+ char buf[201];
+ p = buf;
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ while (start < end && (c = *KDB_WRAP(start)) &&
+ (p - buf) < sizeof(buf)-1) {
+ ++start;
+ *p++ = c;
+ if (c == '\n')
+ break;
+ }
+ *p = '\0';
+ kdb_printf("%s", buf);
+ }
+ if (c != '\n')
+ kdb_printf("\n");
+
+ return 0;
+}
+#endif /* CONFIG_PRINTK */
+/*
+ * kdb_cpu - This function implements the 'cpu' command.
+ * cpu [<cpunum>]
+ * Returns:
+ * KDB_CMD_CPU for success, a kdb diagnostic if error
+ */
+static void kdb_cpu_status(void)
+{
+ int i, start_cpu, first_print = 1;
+ char state, prev_state = '?';
+
+ kdb_printf("Currently on cpu %d\n", raw_smp_processor_id());
+ kdb_printf("Available cpus: ");
+ for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
+ if (!cpu_online(i)) {
+ state = 'F'; /* cpu is offline */
+ } else {
+ state = ' '; /* cpu is responding to kdb */
+ if (kdb_task_state_char(KDB_TSK(i)) == 'I')
+ state = 'I'; /* idle task */
+ }
+ if (state != prev_state) {
+ if (prev_state != '?') {
+ if (!first_print)
+ kdb_printf(", ");
+ first_print = 0;
+ kdb_printf("%d", start_cpu);
+ if (start_cpu < i-1)
+ kdb_printf("-%d", i-1);
+ if (prev_state != ' ')
+ kdb_printf("(%c)", prev_state);
+ }
+ prev_state = state;
+ start_cpu = i;
+ }
+ }
+ /* print the trailing cpus, ignoring them if they are all offline */
+ if (prev_state != 'F') {
+ if (!first_print)
+ kdb_printf(", ");
+ kdb_printf("%d", start_cpu);
+ if (start_cpu < i-1)
+ kdb_printf("-%d", i-1);
+ if (prev_state != ' ')
+ kdb_printf("(%c)", prev_state);
+ }
+ kdb_printf("\n");
+}
+
+static int kdb_cpu(int argc, const char **argv)
+{
+ unsigned long cpunum;
+ int diag;
+
+ if (argc == 0) {
+ kdb_cpu_status();
+ return 0;
+ }
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ diag = kdbgetularg(argv[1], &cpunum);
+ if (diag)
+ return diag;
+
+ /*
+ * Validate cpunum
+ */
+ if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+ return KDB_BADCPUNUM;
+
+ dbg_switch_cpu = cpunum;
+
+ /*
+ * Switch to other cpu
+ */
+ return KDB_CMD_CPU;
+}
+
+/* The user may not realize that ps/bta with no parameters does not print idle
+ * or sleeping system daemon processes, so tell them how many were suppressed.
+ */
+void kdb_ps_suppressed(void)
+{
+ int idle = 0, daemon = 0;
+ unsigned long mask_I = kdb_task_state_string("I"),
+ mask_M = kdb_task_state_string("M");
+ unsigned long cpu;
+ const struct task_struct *p, *g;
+ for_each_online_cpu(cpu) {
+ p = kdb_curr_task(cpu);
+ if (kdb_task_state(p, mask_I))
+ ++idle;
+ }
+ kdb_do_each_thread(g, p) {
+ if (kdb_task_state(p, mask_M))
+ ++daemon;
+ } kdb_while_each_thread(g, p);
+ if (idle || daemon) {
+ if (idle)
+ kdb_printf("%d idle process%s (state I)%s\n",
+ idle, idle == 1 ? "" : "es",
+ daemon ? " and " : "");
+ if (daemon)
+ kdb_printf("%d sleeping system daemon (state M) "
+ "process%s", daemon,
+ daemon == 1 ? "" : "es");
+ kdb_printf(" suppressed,\nuse 'ps A' to see all.\n");
+ }
+}
+
+/*
+ * kdb_ps - This function implements the 'ps' command which shows a
+ * list of the active processes.
+ * ps [DRSTCZEUIMA] All processes, optionally filtered by state
+ */
+void kdb_ps1(const struct task_struct *p)
+{
+ int cpu;
+ unsigned long tmp;
+
+ if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
+ return;
+
+ cpu = kdb_process_cpu(p);
+ kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n",
+ (void *)p, p->pid, p->parent->pid,
+ kdb_task_has_cpu(p), kdb_process_cpu(p),
+ kdb_task_state_char(p),
+ (void *)(&p->thread),
+ p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ',
+ p->comm);
+ if (kdb_task_has_cpu(p)) {
+ if (!KDB_TSK(cpu)) {
+ kdb_printf(" Error: no saved data for this cpu\n");
+ } else {
+ if (KDB_TSK(cpu) != p)
+ kdb_printf(" Error: does not match running "
+ "process table (0x%p)\n", KDB_TSK(cpu));
+ }
+ }
+}
+
+static int kdb_ps(int argc, const char **argv)
+{
+ struct task_struct *g, *p;
+ unsigned long mask, cpu;
+
+ if (argc == 0)
+ kdb_ps_suppressed();
+ kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n",
+ (int)(2*sizeof(void *))+2, "Task Addr",
+ (int)(2*sizeof(void *))+2, "Thread");
+ mask = kdb_task_state_string(argc ? argv[1] : NULL);
+ /* Run the active tasks first */
+ for_each_online_cpu(cpu) {
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ p = kdb_curr_task(cpu);
+ if (kdb_task_state(p, mask))
+ kdb_ps1(p);
+ }
+ kdb_printf("\n");
+ /* Now the real tasks */
+ kdb_do_each_thread(g, p) {
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ if (kdb_task_state(p, mask))
+ kdb_ps1(p);
+ } kdb_while_each_thread(g, p);
+
+ return 0;
+}
+
+/*
+ * kdb_pid - This function implements the 'pid' command which switches
+ * the currently active process.
+ * pid [<pid> | R]
+ */
+static int kdb_pid(int argc, const char **argv)
+{
+ struct task_struct *p;
+ unsigned long val;
+ int diag;
+
+ if (argc > 1)
+ return KDB_ARGCOUNT;
+
+ if (argc) {
+ if (strcmp(argv[1], "R") == 0) {
+ p = KDB_TSK(kdb_initial_cpu);
+ } else {
+ diag = kdbgetularg(argv[1], &val);
+ if (diag)
+ return KDB_BADINT;
+
+ p = find_task_by_pid_ns((pid_t)val, &init_pid_ns);
+ if (!p) {
+ kdb_printf("No task with pid=%d\n", (pid_t)val);
+ return 0;
+ }
+ }
+ kdb_set_current_task(p);
+ }
+ kdb_printf("KDB current process is %s(pid=%d)\n",
+ kdb_current_task->comm,
+ kdb_current_task->pid);
+
+ return 0;
+}
+
+/*
+ * kdb_ll - This function implements the 'll' command which follows a
+ * linked list and executes an arbitrary command for each
+ * element.
+ */
+static int kdb_ll(int argc, const char **argv)
+{
+ int diag;
+ unsigned long addr;
+ long offset = 0;
+ unsigned long va;
+ unsigned long linkoffset;
+ int nextarg;
+ const char *command;
+
+ if (argc != 3)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+
+ diag = kdbgetularg(argv[2], &linkoffset);
+ if (diag)
+ return diag;
+
+ /*
+ * Using the starting address as
+ * the first element in the list, and assuming that
+ * the list ends with a null pointer.
+ */
+
+ va = addr;
+ command = kdb_strdup(argv[3], GFP_KDB);
+ if (!command) {
+ kdb_printf("%s: cannot duplicate command\n", __func__);
+ return 0;
+ }
+ /* Recursive use of kdb_parse, do not use argv after this point */
+ argv = NULL;
+
+ while (va) {
+ char buf[80];
+
+ sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
+ diag = kdb_parse(buf);
+ if (diag)
+ return diag;
+
+ addr = va + linkoffset;
+ if (kdb_getword(&va, addr, sizeof(va)))
+ return 0;
+ }
+ kfree(command);
+
+ return 0;
+}
+
+static int kdb_kgdb(int argc, const char **argv)
+{
+ return KDB_CMD_KGDB;
+}
+
+/*
+ * kdb_help - This function implements the 'help' and '?' commands.
+ */
+static int kdb_help(int argc, const char **argv)
+{
+ kdbtab_t *kt;
+ int i;
+
+ kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description");
+ kdb_printf("-----------------------------"
+ "-----------------------------\n");
+ for_each_kdbcmd(kt, i) {
+ if (kt->cmd_name)
+ kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
+ kt->cmd_usage, kt->cmd_help);
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * kdb_kill - This function implements the 'kill' commands.
+ */
+static int kdb_kill(int argc, const char **argv)
+{
+ long sig, pid;
+ char *endp;
+ struct task_struct *p;
+ struct siginfo info;
+
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+
+ sig = simple_strtol(argv[1], &endp, 0);
+ if (*endp)
+ return KDB_BADINT;
+ if (sig >= 0) {
+ kdb_printf("Invalid signal parameter.<-signal>\n");
+ return 0;
+ }
+ sig = -sig;
+
+ pid = simple_strtol(argv[2], &endp, 0);
+ if (*endp)
+ return KDB_BADINT;
+ if (pid <= 0) {
+ kdb_printf("Process ID must be large than 0.\n");
+ return 0;
+ }
+
+ /* Find the process. */
+ p = find_task_by_pid_ns(pid, &init_pid_ns);
+ if (!p) {
+ kdb_printf("The specified process isn't found.\n");
+ return 0;
+ }
+ p = p->group_leader;
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = pid; /* same capabilities as process being signalled */
+ info.si_uid = 0; /* kdb has root authority */
+ kdb_send_sig_info(p, &info);
+ return 0;
+}
+
+struct kdb_tm {
+ int tm_sec; /* seconds */
+ int tm_min; /* minutes */
+ int tm_hour; /* hours */
+ int tm_mday; /* day of the month */
+ int tm_mon; /* month */
+ int tm_year; /* year */
+};
+
+static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
+{
+ /* This will work from 1970-2099, 2100 is not a leap year */
+ static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31,
+ 31, 30, 31, 30, 31 };
+ memset(tm, 0, sizeof(*tm));
+ tm->tm_sec = tv->tv_sec % (24 * 60 * 60);
+ tm->tm_mday = tv->tv_sec / (24 * 60 * 60) +
+ (2 * 365 + 1); /* shift base from 1970 to 1968 */
+ tm->tm_min = tm->tm_sec / 60 % 60;
+ tm->tm_hour = tm->tm_sec / 60 / 60;
+ tm->tm_sec = tm->tm_sec % 60;
+ tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1));
+ tm->tm_mday %= (4*365+1);
+ mon_day[1] = 29;
+ while (tm->tm_mday >= mon_day[tm->tm_mon]) {
+ tm->tm_mday -= mon_day[tm->tm_mon];
+ if (++tm->tm_mon == 12) {
+ tm->tm_mon = 0;
+ ++tm->tm_year;
+ mon_day[1] = 28;
+ }
+ }
+ ++tm->tm_mday;
+}
+
+/*
+ * Most of this code has been lifted from kernel/timer.c::sys_sysinfo().
+ * I cannot call that code directly from kdb, it has an unconditional
+ * cli()/sti() and calls routines that take locks which can stop the debugger.
+ */
+static void kdb_sysinfo(struct sysinfo *val)
+{
+ struct timespec uptime;
+ do_posix_clock_monotonic_gettime(&uptime);
+ memset(val, 0, sizeof(*val));
+ val->uptime = uptime.tv_sec;
+ val->loads[0] = avenrun[0];
+ val->loads[1] = avenrun[1];
+ val->loads[2] = avenrun[2];
+ val->procs = nr_threads-1;
+ si_meminfo(val);
+
+ return;
+}
+
+/*
+ * kdb_summary - This function implements the 'summary' command.
+ */
+static int kdb_summary(int argc, const char **argv)
+{
+ struct kdb_tm tm;
+ struct sysinfo val;
+
+ if (argc)
+ return KDB_ARGCOUNT;
+
+ kdb_printf("sysname %s\n", init_uts_ns.name.sysname);
+ kdb_printf("release %s\n", init_uts_ns.name.release);
+ kdb_printf("version %s\n", init_uts_ns.name.version);
+ kdb_printf("machine %s\n", init_uts_ns.name.machine);
+ kdb_printf("nodename %s\n", init_uts_ns.name.nodename);
+ kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
+ kdb_printf("ccversion %s\n", __stringify(CCVERSION));
+
+ kdb_gmtime(&xtime, &tm);
+ kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d "
+ "tz_minuteswest %d\n",
+ 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ sys_tz.tz_minuteswest);
+
+ kdb_sysinfo(&val);
+ kdb_printf("uptime ");
+ if (val.uptime > (24*60*60)) {
+ int days = val.uptime / (24*60*60);
+ val.uptime %= (24*60*60);
+ kdb_printf("%d day%s ", days, days == 1 ? "" : "s");
+ }
+ kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
+
+ /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+ kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n",
+ LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
+ LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
+ LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
+#undef LOAD_INT
+#undef LOAD_FRAC
+ /* Display in kilobytes */
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+ kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
+ "Buffers: %8lu kB\n",
+ val.totalram, val.freeram, val.bufferram);
+ return 0;
+}
+
+/*
+ * kdb_per_cpu - This function implements the 'per_cpu' command.
+ */
+static int kdb_per_cpu(int argc, const char **argv)
+{
+ char buf[256], fmtstr[64];
+ kdb_symtab_t symtab;
+ cpumask_t suppress = CPU_MASK_NONE;
+ int cpu, diag;
+ unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL;
+
+ if (argc < 1 || argc > 3)
+ return KDB_ARGCOUNT;
+
+ snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]);
+ if (!kdbgetsymval(buf, &symtab)) {
+ kdb_printf("%s is not a per_cpu variable\n", argv[1]);
+ return KDB_BADADDR;
+ }
+ if (argc >= 2) {
+ diag = kdbgetularg(argv[2], &bytesperword);
+ if (diag)
+ return diag;
+ }
+ if (!bytesperword)
+ bytesperword = KDB_WORD_SIZE;
+ else if (bytesperword > KDB_WORD_SIZE)
+ return KDB_BADWIDTH;
+ sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword));
+ if (argc >= 3) {
+ diag = kdbgetularg(argv[3], &whichcpu);
+ if (diag)
+ return diag;
+ if (!cpu_online(whichcpu)) {
+ kdb_printf("cpu %ld is not online\n", whichcpu);
+ return KDB_BADCPUNUM;
+ }
+ }
+
+ /* Most architectures use __per_cpu_offset[cpu], some use
+ * __per_cpu_offset(cpu), smp has no __per_cpu_offset.
+ */
+#ifdef __per_cpu_offset
+#define KDB_PCU(cpu) __per_cpu_offset(cpu)
+#else
+#ifdef CONFIG_SMP
+#define KDB_PCU(cpu) __per_cpu_offset[cpu]
+#else
+#define KDB_PCU(cpu) 0
+#endif
+#endif
+
+ for_each_online_cpu(cpu) {
+ if (whichcpu != ~0UL && whichcpu != cpu)
+ continue;
+ addr = symtab.sym_start + KDB_PCU(cpu);
+ diag = kdb_getword(&val, addr, bytesperword);
+ if (diag) {
+ kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to "
+ "read, diag=%d\n", cpu, addr, diag);
+ continue;
+ }
+#ifdef CONFIG_SMP
+ if (!val) {
+ cpu_set(cpu, suppress);
+ continue;
+ }
+#endif /* CONFIG_SMP */
+ kdb_printf("%5d ", cpu);
+ kdb_md_line(fmtstr, addr,
+ bytesperword == KDB_WORD_SIZE,
+ 1, bytesperword, 1, 1, 0);
+ }
+ if (cpus_weight(suppress) == 0)
+ return 0;
+ kdb_printf("Zero suppressed cpu(s):");
+ for (cpu = first_cpu(suppress); cpu < num_possible_cpus();
+ cpu = next_cpu(cpu, suppress)) {
+ kdb_printf(" %d", cpu);
+ if (cpu == num_possible_cpus() - 1 ||
+ next_cpu(cpu, suppress) != cpu + 1)
+ continue;
+ while (cpu < num_possible_cpus() &&
+ next_cpu(cpu, suppress) == cpu + 1)
+ ++cpu;
+ kdb_printf("-%d", cpu);
+ }
+ kdb_printf("\n");
+
+#undef KDB_PCU
+
+ return 0;
+}
+
+/*
+ * display help for the use of cmd | grep pattern
+ */
+static int kdb_grep_help(int argc, const char **argv)
+{
+ kdb_printf("Usage of cmd args | grep pattern:\n");
+ kdb_printf(" Any command's output may be filtered through an ");
+ kdb_printf("emulated 'pipe'.\n");
+ kdb_printf(" 'grep' is just a key word.\n");
+ kdb_printf(" The pattern may include a very limited set of "
+ "metacharacters:\n");
+ kdb_printf(" pattern or ^pattern or pattern$ or ^pattern$\n");
+ kdb_printf(" And if there are spaces in the pattern, you may "
+ "quote it:\n");
+ kdb_printf(" \"pat tern\" or \"^pat tern\" or \"pat tern$\""
+ " or \"^pat tern$\"\n");
+ return 0;
+}
+
+/*
+ * kdb_register_repeat - This function is used to register a kernel
+ * debugger command.
+ * Inputs:
+ * cmd Command name
+ * func Function to execute the command
+ * usage A simple usage string showing arguments
+ * help A simple help string describing command
+ * repeat Does the command auto repeat on enter?
+ * Returns:
+ * zero for success, one if a duplicate command.
+ */
+#define kdb_command_extend 50 /* arbitrary */
+int kdb_register_repeat(char *cmd,
+ kdb_func_t func,
+ char *usage,
+ char *help,
+ short minlen,
+ kdb_repeat_t repeat)
+{
+ int i;
+ kdbtab_t *kp;
+
+ /*
+ * Brute force method to determine duplicates
+ */
+ for_each_kdbcmd(kp, i) {
+ if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
+ kdb_printf("Duplicate kdb command registered: "
+ "%s, func %p help %s\n", cmd, func, help);
+ return 1;
+ }
+ }
+
+ /*
+ * Insert command into first available location in table
+ */
+ for_each_kdbcmd(kp, i) {
+ if (kp->cmd_name == NULL)
+ break;
+ }
+
+ if (i >= kdb_max_commands) {
+ kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX +
+ kdb_command_extend) * sizeof(*new), GFP_KDB);
+ if (!new) {
+ kdb_printf("Could not allocate new kdb_command "
+ "table\n");
+ return 1;
+ }
+ if (kdb_commands) {
+ memcpy(new, kdb_commands,
+ kdb_max_commands * sizeof(*new));
+ kfree(kdb_commands);
+ }
+ memset(new + kdb_max_commands, 0,
+ kdb_command_extend * sizeof(*new));
+ kdb_commands = new;
+ kp = kdb_commands + kdb_max_commands;
+ kdb_max_commands += kdb_command_extend;
+ }
+
+ kp->cmd_name = cmd;
+ kp->cmd_func = func;
+ kp->cmd_usage = usage;
+ kp->cmd_help = help;
+ kp->cmd_flags = 0;
+ kp->cmd_minlen = minlen;
+ kp->cmd_repeat = repeat;
+
+ return 0;
+}
+
+/*
+ * kdb_register - Compatibility register function for commands that do
+ * not need to specify a repeat state. Equivalent to
+ * kdb_register_repeat with KDB_REPEAT_NONE.
+ * Inputs:
+ * cmd Command name
+ * func Function to execute the command
+ * usage A simple usage string showing arguments
+ * help A simple help string describing command
+ * Returns:
+ * zero for success, one if a duplicate command.
+ */
+int kdb_register(char *cmd,
+ kdb_func_t func,
+ char *usage,
+ char *help,
+ short minlen)
+{
+ return kdb_register_repeat(cmd, func, usage, help, minlen,
+ KDB_REPEAT_NONE);
+}
+
+/*
+ * kdb_unregister - This function is used to unregister a kernel
+ * debugger command. It is generally called when a module which
+ * implements kdb commands is unloaded.
+ * Inputs:
+ * cmd Command name
+ * Returns:
+ * zero for success, one command not registered.
+ */
+int kdb_unregister(char *cmd)
+{
+ int i;
+ kdbtab_t *kp;
+
+ /*
+ * find the command.
+ */
+ for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++) {
+ if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
+ kp->cmd_name = NULL;
+ return 0;
+ }
+ }
+
+ /* Couldn't find it. */
+ return 1;
+}
+
+/* Initialize the kdb command table. */
+static void __init kdb_inittab(void)
+{
+ int i;
+ kdbtab_t *kp;
+
+ for_each_kdbcmd(kp, i)
+ kp->cmd_name = NULL;
+
+ kdb_register_repeat("md", kdb_md, "<vaddr>",
+ "Display Memory Contents, also mdWcN, e.g. md8c1", 1,
+ KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
+ "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
+ "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("mds", kdb_md, "<vaddr>",
+ "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
+ "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("go", kdb_go, "[<vaddr>]",
+ "Continue Execution", 1, KDB_REPEAT_NONE);
+ kdb_register_repeat("rd", kdb_rd, "",
+ "Display Registers", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
+ "Modify Registers", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("ef", kdb_ef, "<vaddr>",
+ "Display exception frame", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
+ "Stack traceback", 1, KDB_REPEAT_NONE);
+ kdb_register_repeat("btp", kdb_bt, "<pid>",
+ "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
+ "Display stack all processes", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("btc", kdb_bt, "",
+ "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+ "Backtrace process given its struct task address", 0,
+ KDB_REPEAT_NONE);
+ kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
+ "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("env", kdb_env, "",
+ "Show environment variables", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("set", kdb_set, "",
+ "Set environment variables", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("help", kdb_help, "",
+ "Display Help Message", 1, KDB_REPEAT_NONE);
+ kdb_register_repeat("?", kdb_help, "",
+ "Display Help Message", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
+ "Switch to new cpu", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("kgdb", kdb_kgdb, "",
+ "Enter kgdb mode", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
+ "Display active task list", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("pid", kdb_pid, "<pidnum>",
+ "Switch to another task", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("reboot", kdb_reboot, "",
+ "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+#if defined(CONFIG_MODULES)
+ kdb_register_repeat("lsmod", kdb_lsmod, "",
+ "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+#endif
+#if defined(CONFIG_MAGIC_SYSRQ)
+ kdb_register_repeat("sr", kdb_sr, "<key>",
+ "Magic SysRq key", 0, KDB_REPEAT_NONE);
+#endif
+#if defined(CONFIG_PRINTK)
+ kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
+ "Display syslog buffer", 0, KDB_REPEAT_NONE);
+#endif
+ kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+ "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
+ "Send a signal to a process", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("summary", kdb_summary, "",
+ "Summarize the system", 4, KDB_REPEAT_NONE);
+ kdb_register_repeat("per_cpu", kdb_per_cpu, "",
+ "Display per_cpu variables", 3, KDB_REPEAT_NONE);
+ kdb_register_repeat("grephelp", kdb_grep_help, "",
+ "Display help on | grep", 0, KDB_REPEAT_NONE);
+}
+
+/* Execute any commands defined in kdb_cmds. */
+static void __init kdb_cmd_init(void)
+{
+ int i, diag;
+ for (i = 0; kdb_cmds[i]; ++i) {
+ diag = kdb_parse(kdb_cmds[i]);
+ if (diag)
+ kdb_printf("kdb command %s failed, kdb diag %d\n",
+ kdb_cmds[i], diag);
+ }
+ if (defcmd_in_progress) {
+ kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n");
+ kdb_parse("endefcmd");
+ }
+}
+
+/* Intialize kdb_printf, breakpoint tables and kdb state */
+void __init kdb_init(int lvl)
+{
+ static int kdb_init_lvl = KDB_NOT_INITIALIZED;
+ int i;
+
+ if (kdb_init_lvl == KDB_INIT_FULL || lvl <= kdb_init_lvl)
+ return;
+ for (i = kdb_init_lvl; i < lvl; i++) {
+ switch (i) {
+ case KDB_NOT_INITIALIZED:
+ kdb_inittab(); /* Initialize Command Table */
+ kdb_initbptab(); /* Initialize Breakpoints */
+ break;
+ case KDB_INIT_EARLY:
+ kdb_cmd_init(); /* Build kdb_cmds tables */
+ break;
+ }
+ }
+ kdb_init_lvl = lvl;
+}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
new file mode 100644
index 000000000000..97d3ba69775d
--- /dev/null
+++ b/kernel/debug/kdb/kdb_private.h
@@ -0,0 +1,300 @@
+#ifndef _KDBPRIVATE_H
+#define _KDBPRIVATE_H
+
+/*
+ * Kernel Debugger Architecture Independent Private Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/kgdb.h>
+#include "../debug_core.h"
+
+/* Kernel Debugger Error codes. Must not overlap with command codes. */
+#define KDB_NOTFOUND (-1)
+#define KDB_ARGCOUNT (-2)
+#define KDB_BADWIDTH (-3)
+#define KDB_BADRADIX (-4)
+#define KDB_NOTENV (-5)
+#define KDB_NOENVVALUE (-6)
+#define KDB_NOTIMP (-7)
+#define KDB_ENVFULL (-8)
+#define KDB_ENVBUFFULL (-9)
+#define KDB_TOOMANYBPT (-10)
+#define KDB_TOOMANYDBREGS (-11)
+#define KDB_DUPBPT (-12)
+#define KDB_BPTNOTFOUND (-13)
+#define KDB_BADMODE (-14)
+#define KDB_BADINT (-15)
+#define KDB_INVADDRFMT (-16)
+#define KDB_BADREG (-17)
+#define KDB_BADCPUNUM (-18)
+#define KDB_BADLENGTH (-19)
+#define KDB_NOBP (-20)
+#define KDB_BADADDR (-21)
+
+/* Kernel Debugger Command codes. Must not overlap with error codes. */
+#define KDB_CMD_GO (-1001)
+#define KDB_CMD_CPU (-1002)
+#define KDB_CMD_SS (-1003)
+#define KDB_CMD_SSB (-1004)
+#define KDB_CMD_KGDB (-1005)
+#define KDB_CMD_KGDB2 (-1006)
+
+/* Internal debug flags */
+#define KDB_DEBUG_FLAG_BP 0x0002 /* Breakpoint subsystem debug */
+#define KDB_DEBUG_FLAG_BB_SUMM 0x0004 /* Basic block analysis, summary only */
+#define KDB_DEBUG_FLAG_AR 0x0008 /* Activation record, generic */
+#define KDB_DEBUG_FLAG_ARA 0x0010 /* Activation record, arch specific */
+#define KDB_DEBUG_FLAG_BB 0x0020 /* All basic block analysis */
+#define KDB_DEBUG_FLAG_STATE 0x0040 /* State flags */
+#define KDB_DEBUG_FLAG_MASK 0xffff /* All debug flags */
+#define KDB_DEBUG_FLAG_SHIFT 16 /* Shift factor for dbflags */
+
+#define KDB_DEBUG(flag) (kdb_flags & \
+ (KDB_DEBUG_FLAG_##flag << KDB_DEBUG_FLAG_SHIFT))
+#define KDB_DEBUG_STATE(text, value) if (KDB_DEBUG(STATE)) \
+ kdb_print_state(text, value)
+
+#if BITS_PER_LONG == 32
+
+#define KDB_PLATFORM_ENV "BYTESPERWORD=4"
+
+#define kdb_machreg_fmt "0x%lx"
+#define kdb_machreg_fmt0 "0x%08lx"
+#define kdb_bfd_vma_fmt "0x%lx"
+#define kdb_bfd_vma_fmt0 "0x%08lx"
+#define kdb_elfw_addr_fmt "0x%x"
+#define kdb_elfw_addr_fmt0 "0x%08x"
+#define kdb_f_count_fmt "%d"
+
+#elif BITS_PER_LONG == 64
+
+#define KDB_PLATFORM_ENV "BYTESPERWORD=8"
+
+#define kdb_machreg_fmt "0x%lx"
+#define kdb_machreg_fmt0 "0x%016lx"
+#define kdb_bfd_vma_fmt "0x%lx"
+#define kdb_bfd_vma_fmt0 "0x%016lx"
+#define kdb_elfw_addr_fmt "0x%x"
+#define kdb_elfw_addr_fmt0 "0x%016x"
+#define kdb_f_count_fmt "%ld"
+
+#endif
+
+/*
+ * KDB_MAXBPT describes the total number of breakpoints
+ * supported by this architecure.
+ */
+#define KDB_MAXBPT 16
+
+/* Maximum number of arguments to a function */
+#define KDB_MAXARGS 16
+
+typedef enum {
+ KDB_REPEAT_NONE = 0, /* Do not repeat this command */
+ KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
+ KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
+} kdb_repeat_t;
+
+typedef int (*kdb_func_t)(int, const char **);
+
+/* Symbol table format returned by kallsyms. */
+typedef struct __ksymtab {
+ unsigned long value; /* Address of symbol */
+ const char *mod_name; /* Module containing symbol or
+ * "kernel" */
+ unsigned long mod_start;
+ unsigned long mod_end;
+ const char *sec_name; /* Section containing symbol */
+ unsigned long sec_start;
+ unsigned long sec_end;
+ const char *sym_name; /* Full symbol name, including
+ * any version */
+ unsigned long sym_start;
+ unsigned long sym_end;
+ } kdb_symtab_t;
+extern int kallsyms_symbol_next(char *prefix_name, int flag);
+extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
+
+/* Exported Symbols for kernel loadable modules to use. */
+extern int kdb_register(char *, kdb_func_t, char *, char *, short);
+extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
+ short, kdb_repeat_t);
+extern int kdb_unregister(char *);
+
+extern int kdb_getarea_size(void *, unsigned long, size_t);
+extern int kdb_putarea_size(unsigned long, void *, size_t);
+
+/*
+ * Like get_user and put_user, kdb_getarea and kdb_putarea take variable
+ * names, not pointers. The underlying *_size functions take pointers.
+ */
+#define kdb_getarea(x, addr) kdb_getarea_size(&(x), addr, sizeof((x)))
+#define kdb_putarea(addr, x) kdb_putarea_size(addr, &(x), sizeof((x)))
+
+extern int kdb_getphysword(unsigned long *word,
+ unsigned long addr, size_t size);
+extern int kdb_getword(unsigned long *, unsigned long, size_t);
+extern int kdb_putword(unsigned long, unsigned long, size_t);
+
+extern int kdbgetularg(const char *, unsigned long *);
+extern int kdb_set(int, const char **);
+extern char *kdbgetenv(const char *);
+extern int kdbgetintenv(const char *, int *);
+extern int kdbgetaddrarg(int, const char **, int*, unsigned long *,
+ long *, char **);
+extern int kdbgetsymval(const char *, kdb_symtab_t *);
+extern int kdbnearsym(unsigned long, kdb_symtab_t *);
+extern void kdbnearsym_cleanup(void);
+extern char *kdb_strdup(const char *str, gfp_t type);
+extern void kdb_symbol_print(unsigned long, const kdb_symtab_t *, unsigned int);
+
+/* Routine for debugging the debugger state. */
+extern void kdb_print_state(const char *, int);
+
+extern int kdb_state;
+#define KDB_STATE_KDB 0x00000001 /* Cpu is inside kdb */
+#define KDB_STATE_LEAVING 0x00000002 /* Cpu is leaving kdb */
+#define KDB_STATE_CMD 0x00000004 /* Running a kdb command */
+#define KDB_STATE_KDB_CONTROL 0x00000008 /* This cpu is under
+ * kdb control */
+#define KDB_STATE_HOLD_CPU 0x00000010 /* Hold this cpu inside kdb */
+#define KDB_STATE_DOING_SS 0x00000020 /* Doing ss command */
+#define KDB_STATE_DOING_SSB 0x00000040 /* Doing ssb command,
+ * DOING_SS is also set */
+#define KDB_STATE_SSBPT 0x00000080 /* Install breakpoint
+ * after one ss, independent of
+ * DOING_SS */
+#define KDB_STATE_REENTRY 0x00000100 /* Valid re-entry into kdb */
+#define KDB_STATE_SUPPRESS 0x00000200 /* Suppress error messages */
+#define KDB_STATE_PAGER 0x00000400 /* pager is available */
+#define KDB_STATE_GO_SWITCH 0x00000800 /* go is switching
+ * back to initial cpu */
+#define KDB_STATE_PRINTF_LOCK 0x00001000 /* Holds kdb_printf lock */
+#define KDB_STATE_WAIT_IPI 0x00002000 /* Waiting for kdb_ipi() NMI */
+#define KDB_STATE_RECURSE 0x00004000 /* Recursive entry to kdb */
+#define KDB_STATE_IP_ADJUSTED 0x00008000 /* Restart IP has been
+ * adjusted */
+#define KDB_STATE_GO1 0x00010000 /* go only releases one cpu */
+#define KDB_STATE_KEYBOARD 0x00020000 /* kdb entered via
+ * keyboard on this cpu */
+#define KDB_STATE_KEXEC 0x00040000 /* kexec issued */
+#define KDB_STATE_DOING_KGDB 0x00080000 /* kgdb enter now issued */
+#define KDB_STATE_DOING_KGDB2 0x00100000 /* kgdb enter now issued */
+#define KDB_STATE_KGDB_TRANS 0x00200000 /* Transition to kgdb */
+#define KDB_STATE_ARCH 0xff000000 /* Reserved for arch
+ * specific use */
+
+#define KDB_STATE(flag) (kdb_state & KDB_STATE_##flag)
+#define KDB_STATE_SET(flag) ((void)(kdb_state |= KDB_STATE_##flag))
+#define KDB_STATE_CLEAR(flag) ((void)(kdb_state &= ~KDB_STATE_##flag))
+
+extern int kdb_nextline; /* Current number of lines displayed */
+
+typedef struct _kdb_bp {
+ unsigned long bp_addr; /* Address breakpoint is present at */
+ unsigned int bp_free:1; /* This entry is available */
+ unsigned int bp_enabled:1; /* Breakpoint is active in register */
+ unsigned int bp_type:4; /* Uses hardware register */
+ unsigned int bp_installed:1; /* Breakpoint is installed */
+ unsigned int bp_delay:1; /* Do delayed bp handling */
+ unsigned int bp_delayed:1; /* Delayed breakpoint */
+ unsigned int bph_length; /* HW break length */
+} kdb_bp_t;
+
+#ifdef CONFIG_KGDB_KDB
+extern kdb_bp_t kdb_breakpoints[/* KDB_MAXBPT */];
+
+/* The KDB shell command table */
+typedef struct _kdbtab {
+ char *cmd_name; /* Command name */
+ kdb_func_t cmd_func; /* Function to execute command */
+ char *cmd_usage; /* Usage String for this command */
+ char *cmd_help; /* Help message for this command */
+ short cmd_flags; /* Parsing flags */
+ short cmd_minlen; /* Minimum legal # command
+ * chars required */
+ kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */
+} kdbtab_t;
+
+extern int kdb_bt(int, const char **); /* KDB display back trace */
+
+/* KDB breakpoint management functions */
+extern void kdb_initbptab(void);
+extern void kdb_bp_install(struct pt_regs *);
+extern void kdb_bp_remove(void);
+
+typedef enum {
+ KDB_DB_BPT, /* Breakpoint */
+ KDB_DB_SS, /* Single-step trap */
+ KDB_DB_SSB, /* Single step to branch */
+ KDB_DB_SSBPT, /* Single step over breakpoint */
+ KDB_DB_NOBPT /* Spurious breakpoint */
+} kdb_dbtrap_t;
+
+extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
+ int, kdb_dbtrap_t, struct pt_regs *);
+
+/* Miscellaneous functions and data areas */
+extern int kdb_grepping_flag;
+extern char kdb_grep_string[];
+extern int kdb_grep_leading;
+extern int kdb_grep_trailing;
+extern char *kdb_cmds[];
+extern void kdb_syslog_data(char *syslog_data[]);
+extern unsigned long kdb_task_state_string(const char *);
+extern char kdb_task_state_char (const struct task_struct *);
+extern unsigned long kdb_task_state(const struct task_struct *p,
+ unsigned long mask);
+extern void kdb_ps_suppressed(void);
+extern void kdb_ps1(const struct task_struct *p);
+extern void kdb_print_nameval(const char *name, unsigned long val);
+extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
+extern void kdb_meminfo_proc_show(void);
+extern const char *kdb_walk_kallsyms(loff_t *pos);
+extern char *kdb_getstr(char *, size_t, char *);
+
+/* Defines for kdb_symbol_print */
+#define KDB_SP_SPACEB 0x0001 /* Space before string */
+#define KDB_SP_SPACEA 0x0002 /* Space after string */
+#define KDB_SP_PAREN 0x0004 /* Parenthesis around string */
+#define KDB_SP_VALUE 0x0008 /* Print the value of the address */
+#define KDB_SP_SYMSIZE 0x0010 /* Print the size of the symbol */
+#define KDB_SP_NEWLINE 0x0020 /* Newline after string */
+#define KDB_SP_DEFAULT (KDB_SP_VALUE|KDB_SP_PAREN)
+
+#define KDB_TSK(cpu) kgdb_info[cpu].task
+#define KDB_TSKREGS(cpu) kgdb_info[cpu].debuggerinfo
+
+extern struct task_struct *kdb_curr_task(int);
+
+#define kdb_task_has_cpu(p) (task_curr(p))
+
+/* Simplify coexistence with NPTL */
+#define kdb_do_each_thread(g, p) do_each_thread(g, p)
+#define kdb_while_each_thread(g, p) while_each_thread(g, p)
+
+#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
+
+extern void *debug_kmalloc(size_t size, gfp_t flags);
+extern void debug_kfree(void *);
+extern void debug_kusage(void);
+
+extern void kdb_set_current_task(struct task_struct *);
+extern struct task_struct *kdb_current_task;
+#ifdef CONFIG_MODULES
+extern struct list_head *kdb_modules;
+#endif /* CONFIG_MODULES */
+
+extern char kdb_prompt_str[];
+
+#define KDB_WORD_SIZE ((int)sizeof(unsigned long))
+
+#endif /* CONFIG_KGDB_KDB */
+#endif /* !_KDBPRIVATE_H */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
new file mode 100644
index 000000000000..45344d5c53dd
--- /dev/null
+++ b/kernel/debug/kdb/kdb_support.c
@@ -0,0 +1,927 @@
+/*
+ * Kernel Debugger Architecture Independent Support Functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ * 03/02/13 added new 2.5 kallsyms <xavier.bru@bull.net>
+ */
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kallsyms.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/kdb.h>
+#include <linux/slab.h>
+#include "kdb_private.h"
+
+/*
+ * kdbgetsymval - Return the address of the given symbol.
+ *
+ * Parameters:
+ * symname Character string containing symbol name
+ * symtab Structure to receive results
+ * Returns:
+ * 0 Symbol not found, symtab zero filled
+ * 1 Symbol mapped to module/symbol/section, data in symtab
+ */
+int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
+{
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
+ symtab);
+ memset(symtab, 0, sizeof(*symtab));
+ symtab->sym_start = kallsyms_lookup_name(symname);
+ if (symtab->sym_start) {
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbgetsymval: returns 1, "
+ "symtab->sym_start=0x%lx\n",
+ symtab->sym_start);
+ return 1;
+ }
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbgetsymval: returns 0\n");
+ return 0;
+}
+EXPORT_SYMBOL(kdbgetsymval);
+
+static char *kdb_name_table[100]; /* arbitrary size */
+
+/*
+ * kdbnearsym - Return the name of the symbol with the nearest address
+ * less than 'addr'.
+ *
+ * Parameters:
+ * addr Address to check for symbol near
+ * symtab Structure to receive results
+ * Returns:
+ * 0 No sections contain this address, symtab zero filled
+ * 1 Address mapped to module/symbol/section, data in symtab
+ * Remarks:
+ * 2.6 kallsyms has a "feature" where it unpacks the name into a
+ * string. If that string is reused before the caller expects it
+ * then the caller sees its string change without warning. To
+ * avoid cluttering up the main kdb code with lots of kdb_strdup,
+ * tests and kfree calls, kdbnearsym maintains an LRU list of the
+ * last few unique strings. The list is sized large enough to
+ * hold active strings, no kdb caller of kdbnearsym makes more
+ * than ~20 later calls before using a saved value.
+ */
+int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
+{
+ int ret = 0;
+ unsigned long symbolsize;
+ unsigned long offset;
+#define knt1_size 128 /* must be >= kallsyms table size */
+ char *knt1 = NULL;
+
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
+ memset(symtab, 0, sizeof(*symtab));
+
+ if (addr < 4096)
+ goto out;
+ knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC);
+ if (!knt1) {
+ kdb_printf("kdbnearsym: addr=0x%lx cannot kmalloc knt1\n",
+ addr);
+ goto out;
+ }
+ symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset,
+ (char **)(&symtab->mod_name), knt1);
+ if (offset > 8*1024*1024) {
+ symtab->sym_name = NULL;
+ addr = offset = symbolsize = 0;
+ }
+ symtab->sym_start = addr - offset;
+ symtab->sym_end = symtab->sym_start + symbolsize;
+ ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0';
+
+ if (ret) {
+ int i;
+ /* Another 2.6 kallsyms "feature". Sometimes the sym_name is
+ * set but the buffer passed into kallsyms_lookup is not used,
+ * so it contains garbage. The caller has to work out which
+ * buffer needs to be saved.
+ *
+ * What was Rusty smoking when he wrote that code?
+ */
+ if (symtab->sym_name != knt1) {
+ strncpy(knt1, symtab->sym_name, knt1_size);
+ knt1[knt1_size-1] = '\0';
+ }
+ for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
+ if (kdb_name_table[i] &&
+ strcmp(kdb_name_table[i], knt1) == 0)
+ break;
+ }
+ if (i >= ARRAY_SIZE(kdb_name_table)) {
+ debug_kfree(kdb_name_table[0]);
+ memcpy(kdb_name_table, kdb_name_table+1,
+ sizeof(kdb_name_table[0]) *
+ (ARRAY_SIZE(kdb_name_table)-1));
+ } else {
+ debug_kfree(knt1);
+ knt1 = kdb_name_table[i];
+ memcpy(kdb_name_table+i, kdb_name_table+i+1,
+ sizeof(kdb_name_table[0]) *
+ (ARRAY_SIZE(kdb_name_table)-i-1));
+ }
+ i = ARRAY_SIZE(kdb_name_table) - 1;
+ kdb_name_table[i] = knt1;
+ symtab->sym_name = kdb_name_table[i];
+ knt1 = NULL;
+ }
+
+ if (symtab->mod_name == NULL)
+ symtab->mod_name = "kernel";
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
+ "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
+ symtab->sym_start, symtab->mod_name, symtab->sym_name,
+ symtab->sym_name);
+
+out:
+ debug_kfree(knt1);
+ return ret;
+}
+
+void kdbnearsym_cleanup(void)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
+ if (kdb_name_table[i]) {
+ debug_kfree(kdb_name_table[i]);
+ kdb_name_table[i] = NULL;
+ }
+ }
+}
+
+static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1];
+
+/*
+ * kallsyms_symbol_complete
+ *
+ * Parameters:
+ * prefix_name prefix of a symbol name to lookup
+ * max_len maximum length that can be returned
+ * Returns:
+ * Number of symbols which match the given prefix.
+ * Notes:
+ * prefix_name is changed to contain the longest unique prefix that
+ * starts with this prefix (tab completion).
+ */
+int kallsyms_symbol_complete(char *prefix_name, int max_len)
+{
+ loff_t pos = 0;
+ int prefix_len = strlen(prefix_name), prev_len = 0;
+ int i, number = 0;
+ const char *name;
+
+ while ((name = kdb_walk_kallsyms(&pos))) {
+ if (strncmp(name, prefix_name, prefix_len) == 0) {
+ strcpy(ks_namebuf, name);
+ /* Work out the longest name that matches the prefix */
+ if (++number == 1) {
+ prev_len = min_t(int, max_len-1,
+ strlen(ks_namebuf));
+ memcpy(ks_namebuf_prev, ks_namebuf, prev_len);
+ ks_namebuf_prev[prev_len] = '\0';
+ continue;
+ }
+ for (i = 0; i < prev_len; i++) {
+ if (ks_namebuf[i] != ks_namebuf_prev[i]) {
+ prev_len = i;
+ ks_namebuf_prev[i] = '\0';
+ break;
+ }
+ }
+ }
+ }
+ if (prev_len > prefix_len)
+ memcpy(prefix_name, ks_namebuf_prev, prev_len+1);
+ return number;
+}
+
+/*
+ * kallsyms_symbol_next
+ *
+ * Parameters:
+ * prefix_name prefix of a symbol name to lookup
+ * flag 0 means search from the head, 1 means continue search.
+ * Returns:
+ * 1 if a symbol matches the given prefix.
+ * 0 if no string found
+ */
+int kallsyms_symbol_next(char *prefix_name, int flag)
+{
+ int prefix_len = strlen(prefix_name);
+ static loff_t pos;
+ const char *name;
+
+ if (!flag)
+ pos = 0;
+
+ while ((name = kdb_walk_kallsyms(&pos))) {
+ if (strncmp(name, prefix_name, prefix_len) == 0) {
+ strncpy(prefix_name, name, strlen(name)+1);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * kdb_symbol_print - Standard method for printing a symbol name and offset.
+ * Inputs:
+ * addr Address to be printed.
+ * symtab Address of symbol data, if NULL this routine does its
+ * own lookup.
+ * punc Punctuation for string, bit field.
+ * Remarks:
+ * The string and its punctuation is only printed if the address
+ * is inside the kernel, except that the value is always printed
+ * when requested.
+ */
+void kdb_symbol_print(unsigned long addr, const kdb_symtab_t *symtab_p,
+ unsigned int punc)
+{
+ kdb_symtab_t symtab, *symtab_p2;
+ if (symtab_p) {
+ symtab_p2 = (kdb_symtab_t *)symtab_p;
+ } else {
+ symtab_p2 = &symtab;
+ kdbnearsym(addr, symtab_p2);
+ }
+ if (!(symtab_p2->sym_name || (punc & KDB_SP_VALUE)))
+ return;
+ if (punc & KDB_SP_SPACEB)
+ kdb_printf(" ");
+ if (punc & KDB_SP_VALUE)
+ kdb_printf(kdb_machreg_fmt0, addr);
+ if (symtab_p2->sym_name) {
+ if (punc & KDB_SP_VALUE)
+ kdb_printf(" ");
+ if (punc & KDB_SP_PAREN)
+ kdb_printf("(");
+ if (strcmp(symtab_p2->mod_name, "kernel"))
+ kdb_printf("[%s]", symtab_p2->mod_name);
+ kdb_printf("%s", symtab_p2->sym_name);
+ if (addr != symtab_p2->sym_start)
+ kdb_printf("+0x%lx", addr - symtab_p2->sym_start);
+ if (punc & KDB_SP_SYMSIZE)
+ kdb_printf("/0x%lx",
+ symtab_p2->sym_end - symtab_p2->sym_start);
+ if (punc & KDB_SP_PAREN)
+ kdb_printf(")");
+ }
+ if (punc & KDB_SP_SPACEA)
+ kdb_printf(" ");
+ if (punc & KDB_SP_NEWLINE)
+ kdb_printf("\n");
+}
+
+/*
+ * kdb_strdup - kdb equivalent of strdup, for disasm code.
+ * Inputs:
+ * str The string to duplicate.
+ * type Flags to kmalloc for the new string.
+ * Returns:
+ * Address of the new string, NULL if storage could not be allocated.
+ * Remarks:
+ * This is not in lib/string.c because it uses kmalloc which is not
+ * available when string.o is used in boot loaders.
+ */
+char *kdb_strdup(const char *str, gfp_t type)
+{
+ int n = strlen(str)+1;
+ char *s = kmalloc(n, type);
+ if (!s)
+ return NULL;
+ return strcpy(s, str);
+}
+
+/*
+ * kdb_getarea_size - Read an area of data. The kdb equivalent of
+ * copy_from_user, with kdb messages for invalid addresses.
+ * Inputs:
+ * res Pointer to the area to receive the result.
+ * addr Address of the area to copy.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_getarea_size(void *res, unsigned long addr, size_t size)
+{
+ int ret = probe_kernel_read((char *)res, (char *)addr, size);
+ if (ret) {
+ if (!KDB_STATE(SUPPRESS)) {
+ kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr);
+ KDB_STATE_SET(SUPPRESS);
+ }
+ ret = KDB_BADADDR;
+ } else {
+ KDB_STATE_CLEAR(SUPPRESS);
+ }
+ return ret;
+}
+
+/*
+ * kdb_putarea_size - Write an area of data. The kdb equivalent of
+ * copy_to_user, with kdb messages for invalid addresses.
+ * Inputs:
+ * addr Address of the area to write to.
+ * res Pointer to the area holding the data.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_putarea_size(unsigned long addr, void *res, size_t size)
+{
+ int ret = probe_kernel_read((char *)addr, (char *)res, size);
+ if (ret) {
+ if (!KDB_STATE(SUPPRESS)) {
+ kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr);
+ KDB_STATE_SET(SUPPRESS);
+ }
+ ret = KDB_BADADDR;
+ } else {
+ KDB_STATE_CLEAR(SUPPRESS);
+ }
+ return ret;
+}
+
+/*
+ * kdb_getphys - Read data from a physical address. Validate the
+ * address is in range, use kmap_atomic() to get data
+ * similar to kdb_getarea() - but for phys addresses
+ * Inputs:
+ * res Pointer to the word to receive the result
+ * addr Physical address of the area to copy
+ * size Size of the area
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+static int kdb_getphys(void *res, unsigned long addr, size_t size)
+{
+ unsigned long pfn;
+ void *vaddr;
+ struct page *page;
+
+ pfn = (addr >> PAGE_SHIFT);
+ if (!pfn_valid(pfn))
+ return 1;
+ page = pfn_to_page(pfn);
+ vaddr = kmap_atomic(page, KM_KDB);
+ memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
+ kunmap_atomic(vaddr, KM_KDB);
+
+ return 0;
+}
+
+/*
+ * kdb_getphysword
+ * Inputs:
+ * word Pointer to the word to receive the result.
+ * addr Address of the area to copy.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
+{
+ int diag;
+ __u8 w1;
+ __u16 w2;
+ __u32 w4;
+ __u64 w8;
+ *word = 0; /* Default value if addr or size is invalid */
+
+ switch (size) {
+ case 1:
+ diag = kdb_getphys(&w1, addr, sizeof(w1));
+ if (!diag)
+ *word = w1;
+ break;
+ case 2:
+ diag = kdb_getphys(&w2, addr, sizeof(w2));
+ if (!diag)
+ *word = w2;
+ break;
+ case 4:
+ diag = kdb_getphys(&w4, addr, sizeof(w4));
+ if (!diag)
+ *word = w4;
+ break;
+ case 8:
+ if (size <= sizeof(*word)) {
+ diag = kdb_getphys(&w8, addr, sizeof(w8));
+ if (!diag)
+ *word = w8;
+ break;
+ }
+ /* drop through */
+ default:
+ diag = KDB_BADWIDTH;
+ kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
+ }
+ return diag;
+}
+
+/*
+ * kdb_getword - Read a binary value. Unlike kdb_getarea, this treats
+ * data as numbers.
+ * Inputs:
+ * word Pointer to the word to receive the result.
+ * addr Address of the area to copy.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
+{
+ int diag;
+ __u8 w1;
+ __u16 w2;
+ __u32 w4;
+ __u64 w8;
+ *word = 0; /* Default value if addr or size is invalid */
+ switch (size) {
+ case 1:
+ diag = kdb_getarea(w1, addr);
+ if (!diag)
+ *word = w1;
+ break;
+ case 2:
+ diag = kdb_getarea(w2, addr);
+ if (!diag)
+ *word = w2;
+ break;
+ case 4:
+ diag = kdb_getarea(w4, addr);
+ if (!diag)
+ *word = w4;
+ break;
+ case 8:
+ if (size <= sizeof(*word)) {
+ diag = kdb_getarea(w8, addr);
+ if (!diag)
+ *word = w8;
+ break;
+ }
+ /* drop through */
+ default:
+ diag = KDB_BADWIDTH;
+ kdb_printf("kdb_getword: bad width %ld\n", (long) size);
+ }
+ return diag;
+}
+
+/*
+ * kdb_putword - Write a binary value. Unlike kdb_putarea, this
+ * treats data as numbers.
+ * Inputs:
+ * addr Address of the area to write to..
+ * word The value to set.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_putword(unsigned long addr, unsigned long word, size_t size)
+{
+ int diag;
+ __u8 w1;
+ __u16 w2;
+ __u32 w4;
+ __u64 w8;
+ switch (size) {
+ case 1:
+ w1 = word;
+ diag = kdb_putarea(addr, w1);
+ break;
+ case 2:
+ w2 = word;
+ diag = kdb_putarea(addr, w2);
+ break;
+ case 4:
+ w4 = word;
+ diag = kdb_putarea(addr, w4);
+ break;
+ case 8:
+ if (size <= sizeof(word)) {
+ w8 = word;
+ diag = kdb_putarea(addr, w8);
+ break;
+ }
+ /* drop through */
+ default:
+ diag = KDB_BADWIDTH;
+ kdb_printf("kdb_putword: bad width %ld\n", (long) size);
+ }
+ return diag;
+}
+
+/*
+ * kdb_task_state_string - Convert a string containing any of the
+ * letters DRSTCZEUIMA to a mask for the process state field and
+ * return the value. If no argument is supplied, return the mask
+ * that corresponds to environment variable PS, DRSTCZEU by
+ * default.
+ * Inputs:
+ * s String to convert
+ * Returns:
+ * Mask for process state.
+ * Notes:
+ * The mask folds data from several sources into a single long value, so
+ * be carefull not to overlap the bits. TASK_* bits are in the LSB,
+ * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there
+ * is no overlap between TASK_* and EXIT_* but that may not always be
+ * true, so EXIT_* bits are shifted left 16 bits before being stored in
+ * the mask.
+ */
+
+/* unrunnable is < 0 */
+#define UNRUNNABLE (1UL << (8*sizeof(unsigned long) - 1))
+#define RUNNING (1UL << (8*sizeof(unsigned long) - 2))
+#define IDLE (1UL << (8*sizeof(unsigned long) - 3))
+#define DAEMON (1UL << (8*sizeof(unsigned long) - 4))
+
+unsigned long kdb_task_state_string(const char *s)
+{
+ long res = 0;
+ if (!s) {
+ s = kdbgetenv("PS");
+ if (!s)
+ s = "DRSTCZEU"; /* default value for ps */
+ }
+ while (*s) {
+ switch (*s) {
+ case 'D':
+ res |= TASK_UNINTERRUPTIBLE;
+ break;
+ case 'R':
+ res |= RUNNING;
+ break;
+ case 'S':
+ res |= TASK_INTERRUPTIBLE;
+ break;
+ case 'T':
+ res |= TASK_STOPPED;
+ break;
+ case 'C':
+ res |= TASK_TRACED;
+ break;
+ case 'Z':
+ res |= EXIT_ZOMBIE << 16;
+ break;
+ case 'E':
+ res |= EXIT_DEAD << 16;
+ break;
+ case 'U':
+ res |= UNRUNNABLE;
+ break;
+ case 'I':
+ res |= IDLE;
+ break;
+ case 'M':
+ res |= DAEMON;
+ break;
+ case 'A':
+ res = ~0UL;
+ break;
+ default:
+ kdb_printf("%s: unknown flag '%c' ignored\n",
+ __func__, *s);
+ break;
+ }
+ ++s;
+ }
+ return res;
+}
+
+/*
+ * kdb_task_state_char - Return the character that represents the task state.
+ * Inputs:
+ * p struct task for the process
+ * Returns:
+ * One character to represent the task state.
+ */
+char kdb_task_state_char (const struct task_struct *p)
+{
+ int cpu;
+ char state;
+ unsigned long tmp;
+
+ if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
+ return 'E';
+
+ cpu = kdb_process_cpu(p);
+ state = (p->state == 0) ? 'R' :
+ (p->state < 0) ? 'U' :
+ (p->state & TASK_UNINTERRUPTIBLE) ? 'D' :
+ (p->state & TASK_STOPPED) ? 'T' :
+ (p->state & TASK_TRACED) ? 'C' :
+ (p->exit_state & EXIT_ZOMBIE) ? 'Z' :
+ (p->exit_state & EXIT_DEAD) ? 'E' :
+ (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+ if (p->pid == 0) {
+ /* Idle task. Is it really idle, apart from the kdb
+ * interrupt? */
+ if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
+ if (cpu != kdb_initial_cpu)
+ state = 'I'; /* idle task */
+ }
+ } else if (!p->mm && state == 'S') {
+ state = 'M'; /* sleeping system daemon */
+ }
+ return state;
+}
+
+/*
+ * kdb_task_state - Return true if a process has the desired state
+ * given by the mask.
+ * Inputs:
+ * p struct task for the process
+ * mask mask from kdb_task_state_string to select processes
+ * Returns:
+ * True if the process matches at least one criteria defined by the mask.
+ */
+unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask)
+{
+ char state[] = { kdb_task_state_char(p), '\0' };
+ return (mask & kdb_task_state_string(state)) != 0;
+}
+
+/*
+ * kdb_print_nameval - Print a name and its value, converting the
+ * value to a symbol lookup if possible.
+ * Inputs:
+ * name field name to print
+ * val value of field
+ */
+void kdb_print_nameval(const char *name, unsigned long val)
+{
+ kdb_symtab_t symtab;
+ kdb_printf(" %-11.11s ", name);
+ if (kdbnearsym(val, &symtab))
+ kdb_symbol_print(val, &symtab,
+ KDB_SP_VALUE|KDB_SP_SYMSIZE|KDB_SP_NEWLINE);
+ else
+ kdb_printf("0x%lx\n", val);
+}
+
+/* Last ditch allocator for debugging, so we can still debug even when
+ * the GFP_ATOMIC pool has been exhausted. The algorithms are tuned
+ * for space usage, not for speed. One smallish memory pool, the free
+ * chain is always in ascending address order to allow coalescing,
+ * allocations are done in brute force best fit.
+ */
+
+struct debug_alloc_header {
+ u32 next; /* offset of next header from start of pool */
+ u32 size;
+ void *caller;
+};
+
+/* The memory returned by this allocator must be aligned, which means
+ * so must the header size. Do not assume that sizeof(struct
+ * debug_alloc_header) is a multiple of the alignment, explicitly
+ * calculate the overhead of this header, including the alignment.
+ * The rest of this code must not use sizeof() on any header or
+ * pointer to a header.
+ */
+#define dah_align 8
+#define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align)
+
+static u64 debug_alloc_pool_aligned[256*1024/dah_align]; /* 256K pool */
+static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned;
+static u32 dah_first, dah_first_call = 1, dah_used, dah_used_max;
+
+/* Locking is awkward. The debug code is called from all contexts,
+ * including non maskable interrupts. A normal spinlock is not safe
+ * in NMI context. Try to get the debug allocator lock, if it cannot
+ * be obtained after a second then give up. If the lock could not be
+ * previously obtained on this cpu then only try once.
+ *
+ * sparse has no annotation for "this function _sometimes_ acquires a
+ * lock", so fudge the acquire/release notation.
+ */
+static DEFINE_SPINLOCK(dap_lock);
+static int get_dap_lock(void)
+ __acquires(dap_lock)
+{
+ static int dap_locked = -1;
+ int count;
+ if (dap_locked == smp_processor_id())
+ count = 1;
+ else
+ count = 1000;
+ while (1) {
+ if (spin_trylock(&dap_lock)) {
+ dap_locked = -1;
+ return 1;
+ }
+ if (!count--)
+ break;
+ udelay(1000);
+ }
+ dap_locked = smp_processor_id();
+ __acquire(dap_lock);
+ return 0;
+}
+
+void *debug_kmalloc(size_t size, gfp_t flags)
+{
+ unsigned int rem, h_offset;
+ struct debug_alloc_header *best, *bestprev, *prev, *h;
+ void *p = NULL;
+ if (!get_dap_lock()) {
+ __release(dap_lock); /* we never actually got it */
+ return NULL;
+ }
+ h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
+ if (dah_first_call) {
+ h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead;
+ dah_first_call = 0;
+ }
+ size = ALIGN(size, dah_align);
+ prev = best = bestprev = NULL;
+ while (1) {
+ if (h->size >= size && (!best || h->size < best->size)) {
+ best = h;
+ bestprev = prev;
+ if (h->size == size)
+ break;
+ }
+ if (!h->next)
+ break;
+ prev = h;
+ h = (struct debug_alloc_header *)(debug_alloc_pool + h->next);
+ }
+ if (!best)
+ goto out;
+ rem = best->size - size;
+ /* The pool must always contain at least one header */
+ if (best->next == 0 && bestprev == NULL && rem < dah_overhead)
+ goto out;
+ if (rem >= dah_overhead) {
+ best->size = size;
+ h_offset = ((char *)best - debug_alloc_pool) +
+ dah_overhead + best->size;
+ h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset);
+ h->size = rem - dah_overhead;
+ h->next = best->next;
+ } else
+ h_offset = best->next;
+ best->caller = __builtin_return_address(0);
+ dah_used += best->size;
+ dah_used_max = max(dah_used, dah_used_max);
+ if (bestprev)
+ bestprev->next = h_offset;
+ else
+ dah_first = h_offset;
+ p = (char *)best + dah_overhead;
+ memset(p, POISON_INUSE, best->size - 1);
+ *((char *)p + best->size - 1) = POISON_END;
+out:
+ spin_unlock(&dap_lock);
+ return p;
+}
+
+void debug_kfree(void *p)
+{
+ struct debug_alloc_header *h;
+ unsigned int h_offset;
+ if (!p)
+ return;
+ if ((char *)p < debug_alloc_pool ||
+ (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) {
+ kfree(p);
+ return;
+ }
+ if (!get_dap_lock()) {
+ __release(dap_lock); /* we never actually got it */
+ return; /* memory leak, cannot be helped */
+ }
+ h = (struct debug_alloc_header *)((char *)p - dah_overhead);
+ memset(p, POISON_FREE, h->size - 1);
+ *((char *)p + h->size - 1) = POISON_END;
+ h->caller = NULL;
+ dah_used -= h->size;
+ h_offset = (char *)h - debug_alloc_pool;
+ if (h_offset < dah_first) {
+ h->next = dah_first;
+ dah_first = h_offset;
+ } else {
+ struct debug_alloc_header *prev;
+ unsigned int prev_offset;
+ prev = (struct debug_alloc_header *)(debug_alloc_pool +
+ dah_first);
+ while (1) {
+ if (!prev->next || prev->next > h_offset)
+ break;
+ prev = (struct debug_alloc_header *)
+ (debug_alloc_pool + prev->next);
+ }
+ prev_offset = (char *)prev - debug_alloc_pool;
+ if (prev_offset + dah_overhead + prev->size == h_offset) {
+ prev->size += dah_overhead + h->size;
+ memset(h, POISON_FREE, dah_overhead - 1);
+ *((char *)h + dah_overhead - 1) = POISON_END;
+ h = prev;
+ h_offset = prev_offset;
+ } else {
+ h->next = prev->next;
+ prev->next = h_offset;
+ }
+ }
+ if (h_offset + dah_overhead + h->size == h->next) {
+ struct debug_alloc_header *next;
+ next = (struct debug_alloc_header *)
+ (debug_alloc_pool + h->next);
+ h->size += dah_overhead + next->size;
+ h->next = next->next;
+ memset(next, POISON_FREE, dah_overhead - 1);
+ *((char *)next + dah_overhead - 1) = POISON_END;
+ }
+ spin_unlock(&dap_lock);
+}
+
+void debug_kusage(void)
+{
+ struct debug_alloc_header *h_free, *h_used;
+#ifdef CONFIG_IA64
+ /* FIXME: using dah for ia64 unwind always results in a memory leak.
+ * Fix that memory leak first, then set debug_kusage_one_time = 1 for
+ * all architectures.
+ */
+ static int debug_kusage_one_time;
+#else
+ static int debug_kusage_one_time = 1;
+#endif
+ if (!get_dap_lock()) {
+ __release(dap_lock); /* we never actually got it */
+ return;
+ }
+ h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
+ if (dah_first == 0 &&
+ (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead ||
+ dah_first_call))
+ goto out;
+ if (!debug_kusage_one_time)
+ goto out;
+ debug_kusage_one_time = 0;
+ kdb_printf("%s: debug_kmalloc memory leak dah_first %d\n",
+ __func__, dah_first);
+ if (dah_first) {
+ h_used = (struct debug_alloc_header *)debug_alloc_pool;
+ kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
+ h_used->size);
+ }
+ do {
+ h_used = (struct debug_alloc_header *)
+ ((char *)h_free + dah_overhead + h_free->size);
+ kdb_printf("%s: h_used %p size %d caller %p\n",
+ __func__, h_used, h_used->size, h_used->caller);
+ h_free = (struct debug_alloc_header *)
+ (debug_alloc_pool + h_free->next);
+ } while (h_free->next);
+ h_used = (struct debug_alloc_header *)
+ ((char *)h_free + dah_overhead + h_free->size);
+ if ((char *)h_used - debug_alloc_pool !=
+ sizeof(debug_alloc_pool_aligned))
+ kdb_printf("%s: h_used %p size %d caller %p\n",
+ __func__, h_used, h_used->size, h_used->caller);
+out:
+ spin_unlock(&dap_lock);
+}
+
+/* Maintain a small stack of kdb_flags to allow recursion without disturbing
+ * the global kdb state.
+ */
+
+static int kdb_flags_stack[4], kdb_flags_index;
+
+void kdb_save_flags(void)
+{
+ BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack));
+ kdb_flags_stack[kdb_flags_index++] = kdb_flags;
+}
+
+void kdb_restore_flags(void)
+{
+ BUG_ON(kdb_flags_index <= 0);
+ kdb_flags = kdb_flags_stack[--kdb_flags_index];
+}
diff --git a/kernel/exit.c b/kernel/exit.c
index 7f2683a10ac4..eabca5a73a85 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -55,7 +55,6 @@
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
-#include "cred-internals.h"
static void exit_mm(struct task_struct * tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 44b0791b0a2e..5d3592deaf71 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1111,9 +1111,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->memcg_batch.do_batch = 0;
p->memcg_batch.memcg = NULL;
#endif
-
- p->bts = NULL;
-
p->stack_start = stack_start;
/* Perform scheduler related setup. Assign this task to a CPU. */
diff --git a/kernel/groups.c b/kernel/groups.c
index 2b45b2ee3964..53b1916c9492 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -164,12 +164,6 @@ int groups_search(const struct group_info *group_info, gid_t grp)
*/
int set_groups(struct cred *new, struct group_info *group_info)
{
- int retval;
-
- retval = security_task_setgroups(group_info);
- if (retval)
- return retval;
-
put_group_info(new->group_info);
groups_sort(group_info);
get_group_info(group_info);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 0086628b6e97..b9b134b35088 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1749,35 +1749,15 @@ void __init hrtimers_init(void)
}
/**
- * schedule_hrtimeout_range - sleep until timeout
+ * schedule_hrtimeout_range_clock - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
* @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
- *
- * Make the current task sleep until the given expiry time has
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
- *
- * The @delta argument gives the kernel the freedom to schedule the
- * actual wakeup to a time that is both power and performance friendly.
- * The kernel give the normal best effort behavior for "@expires+@delta",
- * but may decide to fire the timer earlier, but no earlier than @expires.
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns.
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task.
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- *
- * Returns 0 when the timer has expired otherwise -EINTR
+ * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
*/
-int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
- const enum hrtimer_mode mode)
+int __sched
+schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
+ const enum hrtimer_mode mode, int clock)
{
struct hrtimer_sleeper t;
@@ -1799,7 +1779,7 @@ int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
return -EINTR;
}
- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
+ hrtimer_init_on_stack(&t.timer, clock, mode);
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_init_sleeper(&t, current);
@@ -1818,6 +1798,41 @@ int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
return !t.task ? 0 : -EINTR;
}
+
+/**
+ * schedule_hrtimeout_range - sleep until timeout
+ * @expires: timeout value (ktime_t)
+ * @delta: slack in expires timeout (ktime_t)
+ * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ *
+ * Make the current task sleep until the given expiry time has
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * The @delta argument gives the kernel the freedom to schedule the
+ * actual wakeup to a time that is both power and performance friendly.
+ * The kernel give the normal best effort behavior for "@expires+@delta",
+ * but may decide to fire the timer earlier, but no earlier than @expires.
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
+ * pass before the routine returns.
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task.
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Returns 0 when the timer has expired otherwise -EINTR
+ */
+int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+ const enum hrtimer_mode mode)
+{
+ return schedule_hrtimeout_range_clock(expires, delta, mode,
+ CLOCK_MONOTONIC);
+}
EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
/**
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 03808ed342a6..7a56b22e0602 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -40,23 +40,29 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/hw_breakpoint.h>
+
/*
* Constraints data
*/
/* Number of pinned cpu breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
+static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
/* Number of pinned task breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
+static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
/* Number of non-pinned cpu/task breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
+static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
+
+static int nr_slots[TYPE_MAX];
+
+static int constraints_initialized;
/* Gather the number of total pinned and un-pinned bp in a cpuset */
struct bp_busy_slots {
@@ -67,16 +73,29 @@ struct bp_busy_slots {
/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);
+__weak int hw_breakpoint_weight(struct perf_event *bp)
+{
+ return 1;
+}
+
+static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
+{
+ if (bp->attr.bp_type & HW_BREAKPOINT_RW)
+ return TYPE_DATA;
+
+ return TYPE_INST;
+}
+
/*
* Report the maximum number of pinned breakpoints a task
* have in this cpu
*/
-static unsigned int max_task_bp_pinned(int cpu)
+static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
{
int i;
- unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
+ unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
- for (i = HBP_NUM -1; i >= 0; i--) {
+ for (i = nr_slots[type] - 1; i >= 0; i--) {
if (tsk_pinned[i] > 0)
return i + 1;
}
@@ -84,7 +103,7 @@ static unsigned int max_task_bp_pinned(int cpu)
return 0;
}
-static int task_bp_pinned(struct task_struct *tsk)
+static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
{
struct perf_event_context *ctx = tsk->perf_event_ctxp;
struct list_head *list;
@@ -105,7 +124,8 @@ static int task_bp_pinned(struct task_struct *tsk)
*/
list_for_each_entry(bp, list, event_entry) {
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
- count++;
+ if (find_slot_idx(bp) == type)
+ count += hw_breakpoint_weight(bp);
}
raw_spin_unlock_irqrestore(&ctx->lock, flags);
@@ -118,18 +138,19 @@ static int task_bp_pinned(struct task_struct *tsk)
* a given cpu (cpu > -1) or in all of them (cpu = -1).
*/
static void
-fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
+fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
+ enum bp_type_idx type)
{
int cpu = bp->cpu;
struct task_struct *tsk = bp->ctx->task;
if (cpu >= 0) {
- slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
+ slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
if (!tsk)
- slots->pinned += max_task_bp_pinned(cpu);
+ slots->pinned += max_task_bp_pinned(cpu, type);
else
- slots->pinned += task_bp_pinned(tsk);
- slots->flexible = per_cpu(nr_bp_flexible, cpu);
+ slots->pinned += task_bp_pinned(tsk, type);
+ slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
return;
}
@@ -137,16 +158,16 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
for_each_online_cpu(cpu) {
unsigned int nr;
- nr = per_cpu(nr_cpu_bp_pinned, cpu);
+ nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
if (!tsk)
- nr += max_task_bp_pinned(cpu);
+ nr += max_task_bp_pinned(cpu, type);
else
- nr += task_bp_pinned(tsk);
+ nr += task_bp_pinned(tsk, type);
if (nr > slots->pinned)
slots->pinned = nr;
- nr = per_cpu(nr_bp_flexible, cpu);
+ nr = per_cpu(nr_bp_flexible[type], cpu);
if (nr > slots->flexible)
slots->flexible = nr;
@@ -154,31 +175,49 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
}
/*
+ * For now, continue to consider flexible as pinned, until we can
+ * ensure no flexible event can ever be scheduled before a pinned event
+ * in a same cpu.
+ */
+static void
+fetch_this_slot(struct bp_busy_slots *slots, int weight)
+{
+ slots->pinned += weight;
+}
+
+/*
* Add a pinned breakpoint for the given task in our constraint table
*/
-static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
+static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
+ enum bp_type_idx type, int weight)
{
unsigned int *tsk_pinned;
- int count = 0;
+ int old_count = 0;
+ int old_idx = 0;
+ int idx = 0;
- count = task_bp_pinned(tsk);
+ old_count = task_bp_pinned(tsk, type);
+ old_idx = old_count - 1;
+ idx = old_idx + weight;
- tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
+ tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
if (enable) {
- tsk_pinned[count]++;
- if (count > 0)
- tsk_pinned[count-1]--;
+ tsk_pinned[idx]++;
+ if (old_count > 0)
+ tsk_pinned[old_idx]--;
} else {
- tsk_pinned[count]--;
- if (count > 0)
- tsk_pinned[count-1]++;
+ tsk_pinned[idx]--;
+ if (old_count > 0)
+ tsk_pinned[old_idx]++;
}
}
/*
* Add/remove the given breakpoint in our constraint table
*/
-static void toggle_bp_slot(struct perf_event *bp, bool enable)
+static void
+toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
+ int weight)
{
int cpu = bp->cpu;
struct task_struct *tsk = bp->ctx->task;
@@ -186,20 +225,20 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
/* Pinned counter task profiling */
if (tsk) {
if (cpu >= 0) {
- toggle_bp_task_slot(tsk, cpu, enable);
+ toggle_bp_task_slot(tsk, cpu, enable, type, weight);
return;
}
for_each_online_cpu(cpu)
- toggle_bp_task_slot(tsk, cpu, enable);
+ toggle_bp_task_slot(tsk, cpu, enable, type, weight);
return;
}
/* Pinned counter cpu profiling */
if (enable)
- per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
+ per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
else
- per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
+ per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
}
/*
@@ -246,14 +285,29 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
static int __reserve_bp_slot(struct perf_event *bp)
{
struct bp_busy_slots slots = {0};
+ enum bp_type_idx type;
+ int weight;
- fetch_bp_busy_slots(&slots, bp);
+ /* We couldn't initialize breakpoint constraints on boot */
+ if (!constraints_initialized)
+ return -ENOMEM;
+
+ /* Basic checks */
+ if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
+ bp->attr.bp_type == HW_BREAKPOINT_INVALID)
+ return -EINVAL;
+
+ type = find_slot_idx(bp);
+ weight = hw_breakpoint_weight(bp);
+
+ fetch_bp_busy_slots(&slots, bp, type);
+ fetch_this_slot(&slots, weight);
/* Flexible counters need to keep at least one slot */
- if (slots.pinned + (!!slots.flexible) == HBP_NUM)
+ if (slots.pinned + (!!slots.flexible) > nr_slots[type])
return -ENOSPC;
- toggle_bp_slot(bp, true);
+ toggle_bp_slot(bp, true, type, weight);
return 0;
}
@@ -273,7 +327,12 @@ int reserve_bp_slot(struct perf_event *bp)
static void __release_bp_slot(struct perf_event *bp)
{
- toggle_bp_slot(bp, false);
+ enum bp_type_idx type;
+ int weight;
+
+ type = find_slot_idx(bp);
+ weight = hw_breakpoint_weight(bp);
+ toggle_bp_slot(bp, false, type, weight);
}
void release_bp_slot(struct perf_event *bp)
@@ -308,6 +367,28 @@ int dbg_release_bp_slot(struct perf_event *bp)
return 0;
}
+static int validate_hw_breakpoint(struct perf_event *bp)
+{
+ int ret;
+
+ ret = arch_validate_hwbkpt_settings(bp);
+ if (ret)
+ return ret;
+
+ if (arch_check_bp_in_kernelspace(bp)) {
+ if (bp->attr.exclude_kernel)
+ return -EINVAL;
+ /*
+ * Don't let unprivileged users set a breakpoint in the trap
+ * path to avoid trap recursion attacks.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ }
+
+ return 0;
+}
+
int register_perf_hw_breakpoint(struct perf_event *bp)
{
int ret;
@@ -316,17 +397,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
if (ret)
return ret;
- /*
- * Ptrace breakpoints can be temporary perf events only
- * meant to reserve a slot. In this case, it is created disabled and
- * we don't want to check the params right now (as we put a null addr)
- * But perf tools create events as disabled and we want to check
- * the params for them.
- * This is a quick hack that will be removed soon, once we remove
- * the tmp breakpoints from ptrace
- */
- if (!bp->attr.disabled || !bp->overflow_handler)
- ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
+ ret = validate_hw_breakpoint(bp);
/* if arch_validate_hwbkpt_settings() fails then release bp slot */
if (ret)
@@ -373,7 +444,7 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
if (attr->disabled)
goto end;
- err = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
+ err = validate_hw_breakpoint(bp);
if (!err)
perf_event_enable(bp);
@@ -480,7 +551,36 @@ static struct notifier_block hw_breakpoint_exceptions_nb = {
static int __init init_hw_breakpoint(void)
{
+ unsigned int **task_bp_pinned;
+ int cpu, err_cpu;
+ int i;
+
+ for (i = 0; i < TYPE_MAX; i++)
+ nr_slots[i] = hw_breakpoint_slots(i);
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < TYPE_MAX; i++) {
+ task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
+ *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
+ GFP_KERNEL);
+ if (!*task_bp_pinned)
+ goto err_alloc;
+ }
+ }
+
+ constraints_initialized = 1;
+
return register_die_notifier(&hw_breakpoint_exceptions_nb);
+
+ err_alloc:
+ for_each_possible_cpu(err_cpu) {
+ if (err_cpu == cpu)
+ break;
+ for (i = 0; i < TYPE_MAX; i++)
+ kfree(per_cpu(nr_task_bp_pinned[i], cpu));
+ }
+
+ return -ENOMEM;
}
core_initcall(init_hw_breakpoint);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 76d5a671bfe1..27e5c6911223 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -370,9 +370,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;
- if (!(action->flags & IRQF_DISABLED))
- local_irq_enable_in_hardirq();
-
do {
trace_irq_handler_entry(irq, action);
ret = action->handler(irq, action->dev_id);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 704e488730a5..3164ba7ce151 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -138,6 +138,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
return 0;
}
+int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+
+ if (!desc)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ desc->affinity_hint = m;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+
#ifndef CONFIG_AUTO_IRQ_AFFINITY
/*
* Generic version of the affinity autoselector.
@@ -757,16 +773,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (new->flags & IRQF_ONESHOT)
desc->status |= IRQ_ONESHOT;
- /*
- * Force MSI interrupts to run with interrupts
- * disabled. The multi vector cards can cause stack
- * overflows due to nested interrupts when enough of
- * them are directed to a core and fire at the same
- * time.
- */
- if (desc->msi_desc)
- new->flags |= IRQF_DISABLED;
-
if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
@@ -916,6 +922,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
desc->chip->disable(irq);
}
+#ifdef CONFIG_SMP
+ /* make sure affinity_hint is cleaned up */
+ if (WARN_ON_ONCE(desc->affinity_hint))
+ desc->affinity_hint = NULL;
+#endif
+
raw_spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
@@ -1027,7 +1039,6 @@ EXPORT_SYMBOL(free_irq);
* Flags:
*
* IRQF_SHARED Interrupt is shared
- * IRQF_DISABLED Disable local interrupts while processing
* IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
* IRQF_TRIGGER_* Specify active edge(s) or level
*
@@ -1041,25 +1052,6 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
int retval;
/*
- * handle_IRQ_event() always ignores IRQF_DISABLED except for
- * the _first_ irqaction (sigh). That can cause oopsing, but
- * the behavior is classified as "will not fix" so we need to
- * start nudging drivers away from using that idiom.
- */
- if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
- (IRQF_SHARED|IRQF_DISABLED)) {
- pr_warning(
- "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
- irq, devname);
- }
-
-#ifdef CONFIG_LOCKDEP
- /*
- * Lockdep wants atomic interrupt handlers:
- */
- irqflags |= IRQF_DISABLED;
-#endif
- /*
* Sanity-check: shared interrupts must pass in a real dev-ID,
* otherwise we'll have trouble later trying to figure out
* which interrupt is which (messes up the interrupt freeing
@@ -1120,3 +1112,40 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
return retval;
}
EXPORT_SYMBOL(request_threaded_irq);
+
+/**
+ * request_any_context_irq - allocate an interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * Threaded handler for threaded interrupts.
+ * @flags: Interrupt type flags
+ * @name: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources and enables the
+ * interrupt line and IRQ handling. It selects either a
+ * hardirq or threaded handling method depending on the
+ * context.
+ *
+ * On failure, it returns a negative value. On success,
+ * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
+ */
+int request_any_context_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, const char *name, void *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ if (desc->status & IRQ_NESTED_THREAD) {
+ ret = request_threaded_irq(irq, NULL, handler,
+ flags, name, dev_id);
+ return !ret ? IRQC_IS_NESTED : ret;
+ }
+
+ ret = request_irq(irq, handler, flags, name, dev_id);
+ return !ret ? IRQC_IS_HARDIRQ : ret;
+}
+EXPORT_SYMBOL_GPL(request_any_context_irq);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 7a6eb04ef6b5..4f9427a30e14 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -32,6 +32,29 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v)
return 0;
}
+static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
+{
+ struct irq_desc *desc = irq_to_desc((long)m->private);
+ unsigned long flags;
+ cpumask_var_t mask;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (desc->affinity_hint)
+ cpumask_copy(mask, desc->affinity_hint);
+ else
+ cpumask_setall(mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ seq_cpumask(m, mask);
+ seq_putc(m, '\n');
+ free_cpumask_var(mask);
+
+ return 0;
+}
+
#ifndef is_affinity_mask_valid
#define is_affinity_mask_valid(val) 1
#endif
@@ -84,6 +107,11 @@ static int irq_affinity_proc_open(struct inode *inode, struct file *file)
return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
}
+static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data);
+}
+
static const struct file_operations irq_affinity_proc_fops = {
.open = irq_affinity_proc_open,
.read = seq_read,
@@ -92,6 +120,13 @@ static const struct file_operations irq_affinity_proc_fops = {
.write = irq_affinity_proc_write,
};
+static const struct file_operations irq_affinity_hint_proc_fops = {
+ .open = irq_affinity_hint_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int default_affinity_show(struct seq_file *m, void *v)
{
seq_cpumask(m, irq_default_affinity);
@@ -147,6 +182,26 @@ static const struct file_operations default_affinity_proc_fops = {
.release = single_release,
.write = default_affinity_write,
};
+
+static int irq_node_proc_show(struct seq_file *m, void *v)
+{
+ struct irq_desc *desc = irq_to_desc((long) m->private);
+
+ seq_printf(m, "%d\n", desc->node);
+ return 0;
+}
+
+static int irq_node_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, irq_node_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations irq_node_proc_fops = {
+ .open = irq_node_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
static int irq_spurious_proc_show(struct seq_file *m, void *v)
@@ -231,6 +286,13 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
/* create /proc/irq/<irq>/smp_affinity */
proc_create_data("smp_affinity", 0600, desc->dir,
&irq_affinity_proc_fops, (void *)(long)irq);
+
+ /* create /proc/irq/<irq>/affinity_hint */
+ proc_create_data("affinity_hint", 0400, desc->dir,
+ &irq_affinity_hint_proc_fops, (void *)(long)irq);
+
+ proc_create_data("node", 0444, desc->dir,
+ &irq_node_proc_fops, (void *)(long)irq);
#endif
proc_create_data("spurious", 0444, desc->dir,
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 13aff293f4de..6f6d091b5757 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
+#include <linux/kdb.h>
#include <linux/err.h>
#include <linux/proc_fs.h>
#include <linux/sched.h> /* for cond_resched */
@@ -516,6 +517,26 @@ static int kallsyms_open(struct inode *inode, struct file *file)
return ret;
}
+#ifdef CONFIG_KGDB_KDB
+const char *kdb_walk_kallsyms(loff_t *pos)
+{
+ static struct kallsym_iter kdb_walk_kallsyms_iter;
+ if (*pos == 0) {
+ memset(&kdb_walk_kallsyms_iter, 0,
+ sizeof(kdb_walk_kallsyms_iter));
+ reset_iter(&kdb_walk_kallsyms_iter, 0);
+ }
+ while (1) {
+ if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
+ return NULL;
+ ++*pos;
+ /* Some debugging symbols have no name. Ignore them. */
+ if (kdb_walk_kallsyms_iter.name[0])
+ return kdb_walk_kallsyms_iter.name;
+ }
+}
+#endif /* CONFIG_KGDB_KDB */
+
static const struct file_operations kallsyms_operations = {
.open = kallsyms_open,
.read = seq_read,
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
deleted file mode 100644
index 11f3515ca83f..000000000000
--- a/kernel/kgdb.c
+++ /dev/null
@@ -1,1764 +0,0 @@
-/*
- * KGDB stub.
- *
- * Maintainer: Jason Wessel <jason.wessel@windriver.com>
- *
- * Copyright (C) 2000-2001 VERITAS Software Corporation.
- * Copyright (C) 2002-2004 Timesys Corporation
- * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
- * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
- * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
- * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
- * Copyright (C) 2005-2008 Wind River Systems, Inc.
- * Copyright (C) 2007 MontaVista Software, Inc.
- * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *
- * Contributors at various stages not listed above:
- * Jason Wessel ( jason.wessel@windriver.com )
- * George Anzinger <george@mvista.com>
- * Anurekh Saxena (anurekh.saxena@timesys.com)
- * Lake Stevens Instrument Division (Glenn Engel)
- * Jim Kingdon, Cygnus Support.
- *
- * Original KGDB stub: David Grothe <dave@gcom.com>,
- * Tigran Aivazian <tigran@sco.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#include <linux/pid_namespace.h>
-#include <linux/clocksource.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/console.h>
-#include <linux/threads.h>
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ptrace.h>
-#include <linux/reboot.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/sysrq.h>
-#include <linux/init.h>
-#include <linux/kgdb.h>
-#include <linux/pid.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/cacheflush.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/unaligned.h>
-
-static int kgdb_break_asap;
-
-#define KGDB_MAX_THREAD_QUERY 17
-struct kgdb_state {
- int ex_vector;
- int signo;
- int err_code;
- int cpu;
- int pass_exception;
- unsigned long thr_query;
- unsigned long threadid;
- long kgdb_usethreadid;
- struct pt_regs *linux_regs;
-};
-
-/* Exception state values */
-#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
-#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
-#define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */
-#define DCPU_SSTEP 0x8 /* CPU is single stepping */
-
-static struct debuggerinfo_struct {
- void *debuggerinfo;
- struct task_struct *task;
- int exception_state;
-} kgdb_info[NR_CPUS];
-
-/**
- * kgdb_connected - Is a host GDB connected to us?
- */
-int kgdb_connected;
-EXPORT_SYMBOL_GPL(kgdb_connected);
-
-/* All the KGDB handlers are installed */
-static int kgdb_io_module_registered;
-
-/* Guard for recursive entry */
-static int exception_level;
-
-static struct kgdb_io *kgdb_io_ops;
-static DEFINE_SPINLOCK(kgdb_registration_lock);
-
-/* kgdb console driver is loaded */
-static int kgdb_con_registered;
-/* determine if kgdb console output should be used */
-static int kgdb_use_con;
-
-static int __init opt_kgdb_con(char *str)
-{
- kgdb_use_con = 1;
- return 0;
-}
-
-early_param("kgdbcon", opt_kgdb_con);
-
-module_param(kgdb_use_con, int, 0644);
-
-/*
- * Holds information about breakpoints in a kernel. These breakpoints are
- * added and removed by gdb.
- */
-static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
- [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
-};
-
-/*
- * The CPU# of the active CPU, or -1 if none:
- */
-atomic_t kgdb_active = ATOMIC_INIT(-1);
-
-/*
- * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
- * bootup code (which might not have percpu set up yet):
- */
-static atomic_t passive_cpu_wait[NR_CPUS];
-static atomic_t cpu_in_kgdb[NR_CPUS];
-atomic_t kgdb_setting_breakpoint;
-
-struct task_struct *kgdb_usethread;
-struct task_struct *kgdb_contthread;
-
-int kgdb_single_step;
-pid_t kgdb_sstep_pid;
-
-/* Our I/O buffers. */
-static char remcom_in_buffer[BUFMAX];
-static char remcom_out_buffer[BUFMAX];
-
-/* Storage for the registers, in GDB format. */
-static unsigned long gdb_regs[(NUMREGBYTES +
- sizeof(unsigned long) - 1) /
- sizeof(unsigned long)];
-
-/* to keep track of the CPU which is doing the single stepping*/
-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
-
-/*
- * If you are debugging a problem where roundup (the collection of
- * all other CPUs) is a problem [this should be extremely rare],
- * then use the nokgdbroundup option to avoid roundup. In that case
- * the other CPUs might interfere with your debugging context, so
- * use this with care:
- */
-static int kgdb_do_roundup = 1;
-
-static int __init opt_nokgdbroundup(char *str)
-{
- kgdb_do_roundup = 0;
-
- return 0;
-}
-
-early_param("nokgdbroundup", opt_nokgdbroundup);
-
-/*
- * Finally, some KGDB code :-)
- */
-
-/*
- * Weak aliases for breakpoint management,
- * can be overriden by architectures when needed:
- */
-int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
-{
- int err;
-
- err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
- if (err)
- return err;
-
- return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
-}
-
-int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
-{
- return probe_kernel_write((char *)addr,
- (char *)bundle, BREAK_INSTR_SIZE);
-}
-
-int __weak kgdb_validate_break_address(unsigned long addr)
-{
- char tmp_variable[BREAK_INSTR_SIZE];
- int err;
- /* Validate setting the breakpoint and then removing it. In the
- * remove fails, the kernel needs to emit a bad message because we
- * are deep trouble not being able to put things back the way we
- * found them.
- */
- err = kgdb_arch_set_breakpoint(addr, tmp_variable);
- if (err)
- return err;
- err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
- if (err)
- printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
- "memory destroyed at: %lx", addr);
- return err;
-}
-
-unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
-{
- return instruction_pointer(regs);
-}
-
-int __weak kgdb_arch_init(void)
-{
- return 0;
-}
-
-int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
-{
- return 0;
-}
-
-void __weak
-kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
- return;
-}
-
-/**
- * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
- * @regs: Current &struct pt_regs.
- *
- * This function will be called if the particular architecture must
- * disable hardware debugging while it is processing gdb packets or
- * handling exception.
- */
-void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
-{
-}
-
-/*
- * GDB remote protocol parser:
- */
-
-static int hex(char ch)
-{
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- if ((ch >= 'A') && (ch <= 'F'))
- return ch - 'A' + 10;
- return -1;
-}
-
-/* scan for the sequence $<data>#<checksum> */
-static void get_packet(char *buffer)
-{
- unsigned char checksum;
- unsigned char xmitcsum;
- int count;
- char ch;
-
- do {
- /*
- * Spin and wait around for the start character, ignore all
- * other characters:
- */
- while ((ch = (kgdb_io_ops->read_char())) != '$')
- /* nothing */;
-
- kgdb_connected = 1;
- checksum = 0;
- xmitcsum = -1;
-
- count = 0;
-
- /*
- * now, read until a # or end of buffer is found:
- */
- while (count < (BUFMAX - 1)) {
- ch = kgdb_io_ops->read_char();
- if (ch == '#')
- break;
- checksum = checksum + ch;
- buffer[count] = ch;
- count = count + 1;
- }
- buffer[count] = 0;
-
- if (ch == '#') {
- xmitcsum = hex(kgdb_io_ops->read_char()) << 4;
- xmitcsum += hex(kgdb_io_ops->read_char());
-
- if (checksum != xmitcsum)
- /* failed checksum */
- kgdb_io_ops->write_char('-');
- else
- /* successful transfer */
- kgdb_io_ops->write_char('+');
- if (kgdb_io_ops->flush)
- kgdb_io_ops->flush();
- }
- } while (checksum != xmitcsum);
-}
-
-/*
- * Send the packet in buffer.
- * Check for gdb connection if asked for.
- */
-static void put_packet(char *buffer)
-{
- unsigned char checksum;
- int count;
- char ch;
-
- /*
- * $<packet info>#<checksum>.
- */
- while (1) {
- kgdb_io_ops->write_char('$');
- checksum = 0;
- count = 0;
-
- while ((ch = buffer[count])) {
- kgdb_io_ops->write_char(ch);
- checksum += ch;
- count++;
- }
-
- kgdb_io_ops->write_char('#');
- kgdb_io_ops->write_char(hex_asc_hi(checksum));
- kgdb_io_ops->write_char(hex_asc_lo(checksum));
- if (kgdb_io_ops->flush)
- kgdb_io_ops->flush();
-
- /* Now see what we get in reply. */
- ch = kgdb_io_ops->read_char();
-
- if (ch == 3)
- ch = kgdb_io_ops->read_char();
-
- /* If we get an ACK, we are done. */
- if (ch == '+')
- return;
-
- /*
- * If we get the start of another packet, this means
- * that GDB is attempting to reconnect. We will NAK
- * the packet being sent, and stop trying to send this
- * packet.
- */
- if (ch == '$') {
- kgdb_io_ops->write_char('-');
- if (kgdb_io_ops->flush)
- kgdb_io_ops->flush();
- return;
- }
- }
-}
-
-/*
- * Convert the memory pointed to by mem into hex, placing result in buf.
- * Return a pointer to the last char put in buf (null). May return an error.
- */
-int kgdb_mem2hex(char *mem, char *buf, int count)
-{
- char *tmp;
- int err;
-
- /*
- * We use the upper half of buf as an intermediate buffer for the
- * raw memory copy. Hex conversion will work against this one.
- */
- tmp = buf + count;
-
- err = probe_kernel_read(tmp, mem, count);
- if (!err) {
- while (count > 0) {
- buf = pack_hex_byte(buf, *tmp);
- tmp++;
- count--;
- }
-
- *buf = 0;
- }
-
- return err;
-}
-
-/*
- * Copy the binary array pointed to by buf into mem. Fix $, #, and
- * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
- * The input buf is overwitten with the result to write to mem.
- */
-static int kgdb_ebin2mem(char *buf, char *mem, int count)
-{
- int size = 0;
- char *c = buf;
-
- while (count-- > 0) {
- c[size] = *buf++;
- if (c[size] == 0x7d)
- c[size] = *buf++ ^ 0x20;
- size++;
- }
-
- return probe_kernel_write(mem, c, size);
-}
-
-/*
- * Convert the hex array pointed to by buf into binary to be placed in mem.
- * Return a pointer to the character AFTER the last byte written.
- * May return an error.
- */
-int kgdb_hex2mem(char *buf, char *mem, int count)
-{
- char *tmp_raw;
- char *tmp_hex;
-
- /*
- * We use the upper half of buf as an intermediate buffer for the
- * raw memory that is converted from hex.
- */
- tmp_raw = buf + count * 2;
-
- tmp_hex = tmp_raw - 1;
- while (tmp_hex >= buf) {
- tmp_raw--;
- *tmp_raw = hex(*tmp_hex--);
- *tmp_raw |= hex(*tmp_hex--) << 4;
- }
-
- return probe_kernel_write(mem, tmp_raw, count);
-}
-
-/*
- * While we find nice hex chars, build a long_val.
- * Return number of chars processed.
- */
-int kgdb_hex2long(char **ptr, unsigned long *long_val)
-{
- int hex_val;
- int num = 0;
- int negate = 0;
-
- *long_val = 0;
-
- if (**ptr == '-') {
- negate = 1;
- (*ptr)++;
- }
- while (**ptr) {
- hex_val = hex(**ptr);
- if (hex_val < 0)
- break;
-
- *long_val = (*long_val << 4) | hex_val;
- num++;
- (*ptr)++;
- }
-
- if (negate)
- *long_val = -*long_val;
-
- return num;
-}
-
-/* Write memory due to an 'M' or 'X' packet. */
-static int write_mem_msg(int binary)
-{
- char *ptr = &remcom_in_buffer[1];
- unsigned long addr;
- unsigned long length;
- int err;
-
- if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' &&
- kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') {
- if (binary)
- err = kgdb_ebin2mem(ptr, (char *)addr, length);
- else
- err = kgdb_hex2mem(ptr, (char *)addr, length);
- if (err)
- return err;
- if (CACHE_FLUSH_IS_SAFE)
- flush_icache_range(addr, addr + length);
- return 0;
- }
-
- return -EINVAL;
-}
-
-static void error_packet(char *pkt, int error)
-{
- error = -error;
- pkt[0] = 'E';
- pkt[1] = hex_asc[(error / 10)];
- pkt[2] = hex_asc[(error % 10)];
- pkt[3] = '\0';
-}
-
-/*
- * Thread ID accessors. We represent a flat TID space to GDB, where
- * the per CPU idle threads (which under Linux all have PID 0) are
- * remapped to negative TIDs.
- */
-
-#define BUF_THREAD_ID_SIZE 16
-
-static char *pack_threadid(char *pkt, unsigned char *id)
-{
- char *limit;
-
- limit = pkt + BUF_THREAD_ID_SIZE;
- while (pkt < limit)
- pkt = pack_hex_byte(pkt, *id++);
-
- return pkt;
-}
-
-static void int_to_threadref(unsigned char *id, int value)
-{
- unsigned char *scan;
- int i = 4;
-
- scan = (unsigned char *)id;
- while (i--)
- *scan++ = 0;
- put_unaligned_be32(value, scan);
-}
-
-static struct task_struct *getthread(struct pt_regs *regs, int tid)
-{
- /*
- * Non-positive TIDs are remapped to the cpu shadow information
- */
- if (tid == 0 || tid == -1)
- tid = -atomic_read(&kgdb_active) - 2;
- if (tid < -1 && tid > -NR_CPUS - 2) {
- if (kgdb_info[-tid - 2].task)
- return kgdb_info[-tid - 2].task;
- else
- return idle_task(-tid - 2);
- }
- if (tid <= 0) {
- printk(KERN_ERR "KGDB: Internal thread select error\n");
- dump_stack();
- return NULL;
- }
-
- /*
- * find_task_by_pid_ns() does not take the tasklist lock anymore
- * but is nicely RCU locked - hence is a pretty resilient
- * thing to use:
- */
- return find_task_by_pid_ns(tid, &init_pid_ns);
-}
-
-/*
- * Some architectures need cache flushes when we set/clear a
- * breakpoint:
- */
-static void kgdb_flush_swbreak_addr(unsigned long addr)
-{
- if (!CACHE_FLUSH_IS_SAFE)
- return;
-
- if (current->mm && current->mm->mmap_cache) {
- flush_cache_range(current->mm->mmap_cache,
- addr, addr + BREAK_INSTR_SIZE);
- }
- /* Force flush instruction cache if it was outside the mm */
- flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
-}
-
-/*
- * SW breakpoint management:
- */
-static int kgdb_activate_sw_breakpoints(void)
-{
- unsigned long addr;
- int error;
- int ret = 0;
- int i;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state != BP_SET)
- continue;
-
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_set_breakpoint(addr,
- kgdb_break[i].saved_instr);
- if (error) {
- ret = error;
- printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
- continue;
- }
-
- kgdb_flush_swbreak_addr(addr);
- kgdb_break[i].state = BP_ACTIVE;
- }
- return ret;
-}
-
-static int kgdb_set_sw_break(unsigned long addr)
-{
- int err = kgdb_validate_break_address(addr);
- int breakno = -1;
- int i;
-
- if (err)
- return err;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if ((kgdb_break[i].state == BP_SET) &&
- (kgdb_break[i].bpt_addr == addr))
- return -EEXIST;
- }
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state == BP_REMOVED &&
- kgdb_break[i].bpt_addr == addr) {
- breakno = i;
- break;
- }
- }
-
- if (breakno == -1) {
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state == BP_UNDEFINED) {
- breakno = i;
- break;
- }
- }
- }
-
- if (breakno == -1)
- return -E2BIG;
-
- kgdb_break[breakno].state = BP_SET;
- kgdb_break[breakno].type = BP_BREAKPOINT;
- kgdb_break[breakno].bpt_addr = addr;
-
- return 0;
-}
-
-static int kgdb_deactivate_sw_breakpoints(void)
-{
- unsigned long addr;
- int error;
- int ret = 0;
- int i;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state != BP_ACTIVE)
- continue;
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_remove_breakpoint(addr,
- kgdb_break[i].saved_instr);
- if (error) {
- printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
- ret = error;
- }
-
- kgdb_flush_swbreak_addr(addr);
- kgdb_break[i].state = BP_SET;
- }
- return ret;
-}
-
-static int kgdb_remove_sw_break(unsigned long addr)
-{
- int i;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if ((kgdb_break[i].state == BP_SET) &&
- (kgdb_break[i].bpt_addr == addr)) {
- kgdb_break[i].state = BP_REMOVED;
- return 0;
- }
- }
- return -ENOENT;
-}
-
-int kgdb_isremovedbreak(unsigned long addr)
-{
- int i;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if ((kgdb_break[i].state == BP_REMOVED) &&
- (kgdb_break[i].bpt_addr == addr))
- return 1;
- }
- return 0;
-}
-
-static int remove_all_break(void)
-{
- unsigned long addr;
- int error;
- int i;
-
- /* Clear memory breakpoints. */
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state != BP_ACTIVE)
- goto setundefined;
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_remove_breakpoint(addr,
- kgdb_break[i].saved_instr);
- if (error)
- printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
- addr);
-setundefined:
- kgdb_break[i].state = BP_UNDEFINED;
- }
-
- /* Clear hardware breakpoints. */
- if (arch_kgdb_ops.remove_all_hw_break)
- arch_kgdb_ops.remove_all_hw_break();
-
- return 0;
-}
-
-/*
- * Remap normal tasks to their real PID,
- * CPU shadow threads are mapped to -CPU - 2
- */
-static inline int shadow_pid(int realpid)
-{
- if (realpid)
- return realpid;
-
- return -raw_smp_processor_id() - 2;
-}
-
-static char gdbmsgbuf[BUFMAX + 1];
-
-static void kgdb_msg_write(const char *s, int len)
-{
- char *bufptr;
- int wcount;
- int i;
-
- /* 'O'utput */
- gdbmsgbuf[0] = 'O';
-
- /* Fill and send buffers... */
- while (len > 0) {
- bufptr = gdbmsgbuf + 1;
-
- /* Calculate how many this time */
- if ((len << 1) > (BUFMAX - 2))
- wcount = (BUFMAX - 2) >> 1;
- else
- wcount = len;
-
- /* Pack in hex chars */
- for (i = 0; i < wcount; i++)
- bufptr = pack_hex_byte(bufptr, s[i]);
- *bufptr = '\0';
-
- /* Move up */
- s += wcount;
- len -= wcount;
-
- /* Write packet */
- put_packet(gdbmsgbuf);
- }
-}
-
-/*
- * Return true if there is a valid kgdb I/O module. Also if no
- * debugger is attached a message can be printed to the console about
- * waiting for the debugger to attach.
- *
- * The print_wait argument is only to be true when called from inside
- * the core kgdb_handle_exception, because it will wait for the
- * debugger to attach.
- */
-static int kgdb_io_ready(int print_wait)
-{
- if (!kgdb_io_ops)
- return 0;
- if (kgdb_connected)
- return 1;
- if (atomic_read(&kgdb_setting_breakpoint))
- return 1;
- if (print_wait)
- printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
- return 1;
-}
-
-/*
- * All the functions that start with gdb_cmd are the various
- * operations to implement the handlers for the gdbserial protocol
- * where KGDB is communicating with an external debugger
- */
-
-/* Handle the '?' status packets */
-static void gdb_cmd_status(struct kgdb_state *ks)
-{
- /*
- * We know that this packet is only sent
- * during initial connect. So to be safe,
- * we clear out our breakpoints now in case
- * GDB is reconnecting.
- */
- remove_all_break();
-
- remcom_out_buffer[0] = 'S';
- pack_hex_byte(&remcom_out_buffer[1], ks->signo);
-}
-
-/* Handle the 'g' get registers request */
-static void gdb_cmd_getregs(struct kgdb_state *ks)
-{
- struct task_struct *thread;
- void *local_debuggerinfo;
- int i;
-
- thread = kgdb_usethread;
- if (!thread) {
- thread = kgdb_info[ks->cpu].task;
- local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
- } else {
- local_debuggerinfo = NULL;
- for_each_online_cpu(i) {
- /*
- * Try to find the task on some other
- * or possibly this node if we do not
- * find the matching task then we try
- * to approximate the results.
- */
- if (thread == kgdb_info[i].task)
- local_debuggerinfo = kgdb_info[i].debuggerinfo;
- }
- }
-
- /*
- * All threads that don't have debuggerinfo should be
- * in schedule() sleeping, since all other CPUs
- * are in kgdb_wait, and thus have debuggerinfo.
- */
- if (local_debuggerinfo) {
- pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo);
- } else {
- /*
- * Pull stuff saved during switch_to; nothing
- * else is accessible (or even particularly
- * relevant).
- *
- * This should be enough for a stack trace.
- */
- sleeping_thread_to_gdb_regs(gdb_regs, thread);
- }
- kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES);
-}
-
-/* Handle the 'G' set registers request */
-static void gdb_cmd_setregs(struct kgdb_state *ks)
-{
- kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES);
-
- if (kgdb_usethread && kgdb_usethread != current) {
- error_packet(remcom_out_buffer, -EINVAL);
- } else {
- gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs);
- strcpy(remcom_out_buffer, "OK");
- }
-}
-
-/* Handle the 'm' memory read bytes */
-static void gdb_cmd_memread(struct kgdb_state *ks)
-{
- char *ptr = &remcom_in_buffer[1];
- unsigned long length;
- unsigned long addr;
- int err;
-
- if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' &&
- kgdb_hex2long(&ptr, &length) > 0) {
- err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length);
- if (err)
- error_packet(remcom_out_buffer, err);
- } else {
- error_packet(remcom_out_buffer, -EINVAL);
- }
-}
-
-/* Handle the 'M' memory write bytes */
-static void gdb_cmd_memwrite(struct kgdb_state *ks)
-{
- int err = write_mem_msg(0);
-
- if (err)
- error_packet(remcom_out_buffer, err);
- else
- strcpy(remcom_out_buffer, "OK");
-}
-
-/* Handle the 'X' memory binary write bytes */
-static void gdb_cmd_binwrite(struct kgdb_state *ks)
-{
- int err = write_mem_msg(1);
-
- if (err)
- error_packet(remcom_out_buffer, err);
- else
- strcpy(remcom_out_buffer, "OK");
-}
-
-/* Handle the 'D' or 'k', detach or kill packets */
-static void gdb_cmd_detachkill(struct kgdb_state *ks)
-{
- int error;
-
- /* The detach case */
- if (remcom_in_buffer[0] == 'D') {
- error = remove_all_break();
- if (error < 0) {
- error_packet(remcom_out_buffer, error);
- } else {
- strcpy(remcom_out_buffer, "OK");
- kgdb_connected = 0;
- }
- put_packet(remcom_out_buffer);
- } else {
- /*
- * Assume the kill case, with no exit code checking,
- * trying to force detach the debugger:
- */
- remove_all_break();
- kgdb_connected = 0;
- }
-}
-
-/* Handle the 'R' reboot packets */
-static int gdb_cmd_reboot(struct kgdb_state *ks)
-{
- /* For now, only honor R0 */
- if (strcmp(remcom_in_buffer, "R0") == 0) {
- printk(KERN_CRIT "Executing emergency reboot\n");
- strcpy(remcom_out_buffer, "OK");
- put_packet(remcom_out_buffer);
-
- /*
- * Execution should not return from
- * machine_emergency_restart()
- */
- machine_emergency_restart();
- kgdb_connected = 0;
-
- return 1;
- }
- return 0;
-}
-
-/* Handle the 'q' query packets */
-static void gdb_cmd_query(struct kgdb_state *ks)
-{
- struct task_struct *g;
- struct task_struct *p;
- unsigned char thref[8];
- char *ptr;
- int i;
- int cpu;
- int finished = 0;
-
- switch (remcom_in_buffer[1]) {
- case 's':
- case 'f':
- if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
- error_packet(remcom_out_buffer, -EINVAL);
- break;
- }
-
- i = 0;
- remcom_out_buffer[0] = 'm';
- ptr = remcom_out_buffer + 1;
- if (remcom_in_buffer[1] == 'f') {
- /* Each cpu is a shadow thread */
- for_each_online_cpu(cpu) {
- ks->thr_query = 0;
- int_to_threadref(thref, -cpu - 2);
- pack_threadid(ptr, thref);
- ptr += BUF_THREAD_ID_SIZE;
- *(ptr++) = ',';
- i++;
- }
- }
-
- do_each_thread(g, p) {
- if (i >= ks->thr_query && !finished) {
- int_to_threadref(thref, p->pid);
- pack_threadid(ptr, thref);
- ptr += BUF_THREAD_ID_SIZE;
- *(ptr++) = ',';
- ks->thr_query++;
- if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
- finished = 1;
- }
- i++;
- } while_each_thread(g, p);
-
- *(--ptr) = '\0';
- break;
-
- case 'C':
- /* Current thread id */
- strcpy(remcom_out_buffer, "QC");
- ks->threadid = shadow_pid(current->pid);
- int_to_threadref(thref, ks->threadid);
- pack_threadid(remcom_out_buffer + 2, thref);
- break;
- case 'T':
- if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
- error_packet(remcom_out_buffer, -EINVAL);
- break;
- }
- ks->threadid = 0;
- ptr = remcom_in_buffer + 17;
- kgdb_hex2long(&ptr, &ks->threadid);
- if (!getthread(ks->linux_regs, ks->threadid)) {
- error_packet(remcom_out_buffer, -EINVAL);
- break;
- }
- if ((int)ks->threadid > 0) {
- kgdb_mem2hex(getthread(ks->linux_regs,
- ks->threadid)->comm,
- remcom_out_buffer, 16);
- } else {
- static char tmpstr[23 + BUF_THREAD_ID_SIZE];
-
- sprintf(tmpstr, "shadowCPU%d",
- (int)(-ks->threadid - 2));
- kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
- }
- break;
- }
-}
-
-/* Handle the 'H' task query packets */
-static void gdb_cmd_task(struct kgdb_state *ks)
-{
- struct task_struct *thread;
- char *ptr;
-
- switch (remcom_in_buffer[1]) {
- case 'g':
- ptr = &remcom_in_buffer[2];
- kgdb_hex2long(&ptr, &ks->threadid);
- thread = getthread(ks->linux_regs, ks->threadid);
- if (!thread && ks->threadid > 0) {
- error_packet(remcom_out_buffer, -EINVAL);
- break;
- }
- kgdb_usethread = thread;
- ks->kgdb_usethreadid = ks->threadid;
- strcpy(remcom_out_buffer, "OK");
- break;
- case 'c':
- ptr = &remcom_in_buffer[2];
- kgdb_hex2long(&ptr, &ks->threadid);
- if (!ks->threadid) {
- kgdb_contthread = NULL;
- } else {
- thread = getthread(ks->linux_regs, ks->threadid);
- if (!thread && ks->threadid > 0) {
- error_packet(remcom_out_buffer, -EINVAL);
- break;
- }
- kgdb_contthread = thread;
- }
- strcpy(remcom_out_buffer, "OK");
- break;
- }
-}
-
-/* Handle the 'T' thread query packets */
-static void gdb_cmd_thread(struct kgdb_state *ks)
-{
- char *ptr = &remcom_in_buffer[1];
- struct task_struct *thread;
-
- kgdb_hex2long(&ptr, &ks->threadid);
- thread = getthread(ks->linux_regs, ks->threadid);
- if (thread)
- strcpy(remcom_out_buffer, "OK");
- else
- error_packet(remcom_out_buffer, -EINVAL);
-}
-
-/* Handle the 'z' or 'Z' breakpoint remove or set packets */
-static void gdb_cmd_break(struct kgdb_state *ks)
-{
- /*
- * Since GDB-5.3, it's been drafted that '0' is a software
- * breakpoint, '1' is a hardware breakpoint, so let's do that.
- */
- char *bpt_type = &remcom_in_buffer[1];
- char *ptr = &remcom_in_buffer[2];
- unsigned long addr;
- unsigned long length;
- int error = 0;
-
- if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') {
- /* Unsupported */
- if (*bpt_type > '4')
- return;
- } else {
- if (*bpt_type != '0' && *bpt_type != '1')
- /* Unsupported. */
- return;
- }
-
- /*
- * Test if this is a hardware breakpoint, and
- * if we support it:
- */
- if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT))
- /* Unsupported. */
- return;
-
- if (*(ptr++) != ',') {
- error_packet(remcom_out_buffer, -EINVAL);
- return;
- }
- if (!kgdb_hex2long(&ptr, &addr)) {
- error_packet(remcom_out_buffer, -EINVAL);
- return;
- }
- if (*(ptr++) != ',' ||
- !kgdb_hex2long(&ptr, &length)) {
- error_packet(remcom_out_buffer, -EINVAL);
- return;
- }
-
- if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0')
- error = kgdb_set_sw_break(addr);
- else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0')
- error = kgdb_remove_sw_break(addr);
- else if (remcom_in_buffer[0] == 'Z')
- error = arch_kgdb_ops.set_hw_breakpoint(addr,
- (int)length, *bpt_type - '0');
- else if (remcom_in_buffer[0] == 'z')
- error = arch_kgdb_ops.remove_hw_breakpoint(addr,
- (int) length, *bpt_type - '0');
-
- if (error == 0)
- strcpy(remcom_out_buffer, "OK");
- else
- error_packet(remcom_out_buffer, error);
-}
-
-/* Handle the 'C' signal / exception passing packets */
-static int gdb_cmd_exception_pass(struct kgdb_state *ks)
-{
- /* C09 == pass exception
- * C15 == detach kgdb, pass exception
- */
- if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') {
-
- ks->pass_exception = 1;
- remcom_in_buffer[0] = 'c';
-
- } else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') {
-
- ks->pass_exception = 1;
- remcom_in_buffer[0] = 'D';
- remove_all_break();
- kgdb_connected = 0;
- return 1;
-
- } else {
- kgdb_msg_write("KGDB only knows signal 9 (pass)"
- " and 15 (pass and disconnect)\n"
- "Executing a continue without signal passing\n", 0);
- remcom_in_buffer[0] = 'c';
- }
-
- /* Indicate fall through */
- return -1;
-}
-
-/*
- * This function performs all gdbserial command procesing
- */
-static int gdb_serial_stub(struct kgdb_state *ks)
-{
- int error = 0;
- int tmp;
-
- /* Clear the out buffer. */
- memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
-
- if (kgdb_connected) {
- unsigned char thref[8];
- char *ptr;
-
- /* Reply to host that an exception has occurred */
- ptr = remcom_out_buffer;
- *ptr++ = 'T';
- ptr = pack_hex_byte(ptr, ks->signo);
- ptr += strlen(strcpy(ptr, "thread:"));
- int_to_threadref(thref, shadow_pid(current->pid));
- ptr = pack_threadid(ptr, thref);
- *ptr++ = ';';
- put_packet(remcom_out_buffer);
- }
-
- kgdb_usethread = kgdb_info[ks->cpu].task;
- ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
- ks->pass_exception = 0;
-
- while (1) {
- error = 0;
-
- /* Clear the out buffer. */
- memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
-
- get_packet(remcom_in_buffer);
-
- switch (remcom_in_buffer[0]) {
- case '?': /* gdbserial status */
- gdb_cmd_status(ks);
- break;
- case 'g': /* return the value of the CPU registers */
- gdb_cmd_getregs(ks);
- break;
- case 'G': /* set the value of the CPU registers - return OK */
- gdb_cmd_setregs(ks);
- break;
- case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
- gdb_cmd_memread(ks);
- break;
- case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */
- gdb_cmd_memwrite(ks);
- break;
- case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */
- gdb_cmd_binwrite(ks);
- break;
- /* kill or detach. KGDB should treat this like a
- * continue.
- */
- case 'D': /* Debugger detach */
- case 'k': /* Debugger detach via kill */
- gdb_cmd_detachkill(ks);
- goto default_handle;
- case 'R': /* Reboot */
- if (gdb_cmd_reboot(ks))
- goto default_handle;
- break;
- case 'q': /* query command */
- gdb_cmd_query(ks);
- break;
- case 'H': /* task related */
- gdb_cmd_task(ks);
- break;
- case 'T': /* Query thread status */
- gdb_cmd_thread(ks);
- break;
- case 'z': /* Break point remove */
- case 'Z': /* Break point set */
- gdb_cmd_break(ks);
- break;
- case 'C': /* Exception passing */
- tmp = gdb_cmd_exception_pass(ks);
- if (tmp > 0)
- goto default_handle;
- if (tmp == 0)
- break;
- /* Fall through on tmp < 0 */
- case 'c': /* Continue packet */
- case 's': /* Single step packet */
- if (kgdb_contthread && kgdb_contthread != current) {
- /* Can't switch threads in kgdb */
- error_packet(remcom_out_buffer, -EINVAL);
- break;
- }
- kgdb_activate_sw_breakpoints();
- /* Fall through to default processing */
- default:
-default_handle:
- error = kgdb_arch_handle_exception(ks->ex_vector,
- ks->signo,
- ks->err_code,
- remcom_in_buffer,
- remcom_out_buffer,
- ks->linux_regs);
- /*
- * Leave cmd processing on error, detach,
- * kill, continue, or single step.
- */
- if (error >= 0 || remcom_in_buffer[0] == 'D' ||
- remcom_in_buffer[0] == 'k') {
- error = 0;
- goto kgdb_exit;
- }
-
- }
-
- /* reply to the request */
- put_packet(remcom_out_buffer);
- }
-
-kgdb_exit:
- if (ks->pass_exception)
- error = 1;
- return error;
-}
-
-static int kgdb_reenter_check(struct kgdb_state *ks)
-{
- unsigned long addr;
-
- if (atomic_read(&kgdb_active) != raw_smp_processor_id())
- return 0;
-
- /* Panic on recursive debugger calls: */
- exception_level++;
- addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
- kgdb_deactivate_sw_breakpoints();
-
- /*
- * If the break point removed ok at the place exception
- * occurred, try to recover and print a warning to the end
- * user because the user planted a breakpoint in a place that
- * KGDB needs in order to function.
- */
- if (kgdb_remove_sw_break(addr) == 0) {
- exception_level = 0;
- kgdb_skipexception(ks->ex_vector, ks->linux_regs);
- kgdb_activate_sw_breakpoints();
- printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
- addr);
- WARN_ON_ONCE(1);
-
- return 1;
- }
- remove_all_break();
- kgdb_skipexception(ks->ex_vector, ks->linux_regs);
-
- if (exception_level > 1) {
- dump_stack();
- panic("Recursive entry to debugger");
- }
-
- printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
- dump_stack();
- panic("Recursive entry to debugger");
-
- return 1;
-}
-
-static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
-{
- unsigned long flags;
- int sstep_tries = 100;
- int error = 0;
- int i, cpu;
- int trace_on = 0;
-acquirelock:
- /*
- * Interrupts will be restored by the 'trap return' code, except when
- * single stepping.
- */
- local_irq_save(flags);
-
- cpu = ks->cpu;
- kgdb_info[cpu].debuggerinfo = regs;
- kgdb_info[cpu].task = current;
- /*
- * Make sure the above info reaches the primary CPU before
- * our cpu_in_kgdb[] flag setting does:
- */
- atomic_inc(&cpu_in_kgdb[cpu]);
-
- /*
- * CPU will loop if it is a slave or request to become a kgdb
- * master cpu and acquire the kgdb_active lock:
- */
- while (1) {
- if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
- if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
- break;
- } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
- if (!atomic_read(&passive_cpu_wait[cpu]))
- goto return_normal;
- } else {
-return_normal:
- /* Return to normal operation by executing any
- * hw breakpoint fixup.
- */
- if (arch_kgdb_ops.correct_hw_break)
- arch_kgdb_ops.correct_hw_break();
- if (trace_on)
- tracing_on();
- atomic_dec(&cpu_in_kgdb[cpu]);
- touch_softlockup_watchdog_sync();
- clocksource_touch_watchdog();
- local_irq_restore(flags);
- return 0;
- }
- cpu_relax();
- }
-
- /*
- * For single stepping, try to only enter on the processor
- * that was single stepping. To gaurd against a deadlock, the
- * kernel will only try for the value of sstep_tries before
- * giving up and continuing on.
- */
- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
- (kgdb_info[cpu].task &&
- kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
- atomic_set(&kgdb_active, -1);
- touch_softlockup_watchdog_sync();
- clocksource_touch_watchdog();
- local_irq_restore(flags);
-
- goto acquirelock;
- }
-
- if (!kgdb_io_ready(1)) {
- error = 1;
- goto kgdb_restore; /* No I/O connection, so resume the system */
- }
-
- /*
- * Don't enter if we have hit a removed breakpoint.
- */
- if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
- goto kgdb_restore;
-
- /* Call the I/O driver's pre_exception routine */
- if (kgdb_io_ops->pre_exception)
- kgdb_io_ops->pre_exception();
-
- kgdb_disable_hw_debug(ks->linux_regs);
-
- /*
- * Get the passive CPU lock which will hold all the non-primary
- * CPU in a spin state while the debugger is active
- */
- if (!kgdb_single_step) {
- for (i = 0; i < NR_CPUS; i++)
- atomic_inc(&passive_cpu_wait[i]);
- }
-
-#ifdef CONFIG_SMP
- /* Signal the other CPUs to enter kgdb_wait() */
- if ((!kgdb_single_step) && kgdb_do_roundup)
- kgdb_roundup_cpus(flags);
-#endif
-
- /*
- * Wait for the other CPUs to be notified and be waiting for us:
- */
- for_each_online_cpu(i) {
- while (!atomic_read(&cpu_in_kgdb[i]))
- cpu_relax();
- }
-
- /*
- * At this point the primary processor is completely
- * in the debugger and all secondary CPUs are quiescent
- */
- kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code);
- kgdb_deactivate_sw_breakpoints();
- kgdb_single_step = 0;
- kgdb_contthread = current;
- exception_level = 0;
- trace_on = tracing_is_on();
- if (trace_on)
- tracing_off();
-
- /* Talk to debugger with gdbserial protocol */
- error = gdb_serial_stub(ks);
-
- /* Call the I/O driver's post_exception routine */
- if (kgdb_io_ops->post_exception)
- kgdb_io_ops->post_exception();
-
- atomic_dec(&cpu_in_kgdb[ks->cpu]);
-
- if (!kgdb_single_step) {
- for (i = NR_CPUS-1; i >= 0; i--)
- atomic_dec(&passive_cpu_wait[i]);
- /*
- * Wait till all the CPUs have quit
- * from the debugger.
- */
- for_each_online_cpu(i) {
- while (atomic_read(&cpu_in_kgdb[i]))
- cpu_relax();
- }
- }
-
-kgdb_restore:
- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
- if (kgdb_info[sstep_cpu].task)
- kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
- else
- kgdb_sstep_pid = 0;
- }
- if (trace_on)
- tracing_on();
- /* Free kgdb_active */
- atomic_set(&kgdb_active, -1);
- touch_softlockup_watchdog_sync();
- clocksource_touch_watchdog();
- local_irq_restore(flags);
-
- return error;
-}
-
-/*
- * kgdb_handle_exception() - main entry point from a kernel exception
- *
- * Locking hierarchy:
- * interface locks, if any (begin_session)
- * kgdb lock (kgdb_active)
- */
-int
-kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
-{
- struct kgdb_state kgdb_var;
- struct kgdb_state *ks = &kgdb_var;
- int ret;
-
- ks->cpu = raw_smp_processor_id();
- ks->ex_vector = evector;
- ks->signo = signo;
- ks->ex_vector = evector;
- ks->err_code = ecode;
- ks->kgdb_usethreadid = 0;
- ks->linux_regs = regs;
-
- if (kgdb_reenter_check(ks))
- return 0; /* Ouch, double exception ! */
- kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
- ret = kgdb_cpu_enter(ks, regs);
- kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER;
- return ret;
-}
-
-int kgdb_nmicallback(int cpu, void *regs)
-{
-#ifdef CONFIG_SMP
- struct kgdb_state kgdb_var;
- struct kgdb_state *ks = &kgdb_var;
-
- memset(ks, 0, sizeof(struct kgdb_state));
- ks->cpu = cpu;
- ks->linux_regs = regs;
-
- if (!atomic_read(&cpu_in_kgdb[cpu]) &&
- atomic_read(&kgdb_active) != -1 &&
- atomic_read(&kgdb_active) != cpu) {
- kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
- kgdb_cpu_enter(ks, regs);
- kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
- return 0;
- }
-#endif
- return 1;
-}
-
-static void kgdb_console_write(struct console *co, const char *s,
- unsigned count)
-{
- unsigned long flags;
-
- /* If we're debugging, or KGDB has not connected, don't try
- * and print. */
- if (!kgdb_connected || atomic_read(&kgdb_active) != -1)
- return;
-
- local_irq_save(flags);
- kgdb_msg_write(s, count);
- local_irq_restore(flags);
-}
-
-static struct console kgdbcons = {
- .name = "kgdb",
- .write = kgdb_console_write,
- .flags = CON_PRINTBUFFER | CON_ENABLED,
- .index = -1,
-};
-
-#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_gdb(int key, struct tty_struct *tty)
-{
- if (!kgdb_io_ops) {
- printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
- return;
- }
- if (!kgdb_connected)
- printk(KERN_CRIT "Entering KGDB\n");
-
- kgdb_breakpoint();
-}
-
-static struct sysrq_key_op sysrq_gdb_op = {
- .handler = sysrq_handle_gdb,
- .help_msg = "debug(G)",
- .action_msg = "DEBUG",
-};
-#endif
-
-static void kgdb_register_callbacks(void)
-{
- if (!kgdb_io_module_registered) {
- kgdb_io_module_registered = 1;
- kgdb_arch_init();
-#ifdef CONFIG_MAGIC_SYSRQ
- register_sysrq_key('g', &sysrq_gdb_op);
-#endif
- if (kgdb_use_con && !kgdb_con_registered) {
- register_console(&kgdbcons);
- kgdb_con_registered = 1;
- }
- }
-}
-
-static void kgdb_unregister_callbacks(void)
-{
- /*
- * When this routine is called KGDB should unregister from the
- * panic handler and clean up, making sure it is not handling any
- * break exceptions at the time.
- */
- if (kgdb_io_module_registered) {
- kgdb_io_module_registered = 0;
- kgdb_arch_exit();
-#ifdef CONFIG_MAGIC_SYSRQ
- unregister_sysrq_key('g', &sysrq_gdb_op);
-#endif
- if (kgdb_con_registered) {
- unregister_console(&kgdbcons);
- kgdb_con_registered = 0;
- }
- }
-}
-
-static void kgdb_initial_breakpoint(void)
-{
- kgdb_break_asap = 0;
-
- printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
- kgdb_breakpoint();
-}
-
-/**
- * kgdb_register_io_module - register KGDB IO module
- * @new_kgdb_io_ops: the io ops vector
- *
- * Register it with the KGDB core.
- */
-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
-{
- int err;
-
- spin_lock(&kgdb_registration_lock);
-
- if (kgdb_io_ops) {
- spin_unlock(&kgdb_registration_lock);
-
- printk(KERN_ERR "kgdb: Another I/O driver is already "
- "registered with KGDB.\n");
- return -EBUSY;
- }
-
- if (new_kgdb_io_ops->init) {
- err = new_kgdb_io_ops->init();
- if (err) {
- spin_unlock(&kgdb_registration_lock);
- return err;
- }
- }
-
- kgdb_io_ops = new_kgdb_io_ops;
-
- spin_unlock(&kgdb_registration_lock);
-
- printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
- new_kgdb_io_ops->name);
-
- /* Arm KGDB now. */
- kgdb_register_callbacks();
-
- if (kgdb_break_asap)
- kgdb_initial_breakpoint();
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(kgdb_register_io_module);
-
-/**
- * kkgdb_unregister_io_module - unregister KGDB IO module
- * @old_kgdb_io_ops: the io ops vector
- *
- * Unregister it with the KGDB core.
- */
-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
-{
- BUG_ON(kgdb_connected);
-
- /*
- * KGDB is no longer able to communicate out, so
- * unregister our callbacks and reset state.
- */
- kgdb_unregister_callbacks();
-
- spin_lock(&kgdb_registration_lock);
-
- WARN_ON_ONCE(kgdb_io_ops != old_kgdb_io_ops);
- kgdb_io_ops = NULL;
-
- spin_unlock(&kgdb_registration_lock);
-
- printk(KERN_INFO
- "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
- old_kgdb_io_ops->name);
-}
-EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
-
-/**
- * kgdb_breakpoint - generate breakpoint exception
- *
- * This function will generate a breakpoint exception. It is used at the
- * beginning of a program to sync up with a debugger and can be used
- * otherwise as a quick means to stop program execution and "break" into
- * the debugger.
- */
-void kgdb_breakpoint(void)
-{
- atomic_inc(&kgdb_setting_breakpoint);
- wmb(); /* Sync point before breakpoint */
- arch_kgdb_breakpoint();
- wmb(); /* Sync point after breakpoint */
- atomic_dec(&kgdb_setting_breakpoint);
-}
-EXPORT_SYMBOL_GPL(kgdb_breakpoint);
-
-static int __init opt_kgdb_wait(char *str)
-{
- kgdb_break_asap = 1;
-
- if (kgdb_io_module_registered)
- kgdb_initial_breakpoint();
-
- return 0;
-}
-
-early_param("kgdbwait", opt_kgdb_wait);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 0ed46f3e51e9..282035f3ae96 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1588,6 +1588,72 @@ static void __kprobes kill_kprobe(struct kprobe *p)
arch_remove_kprobe(p);
}
+/* Disable one kprobe */
+int __kprobes disable_kprobe(struct kprobe *kp)
+{
+ int ret = 0;
+ struct kprobe *p;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Check whether specified probe is valid. */
+ p = __get_valid_kprobe(kp);
+ if (unlikely(p == NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If the probe is already disabled (or gone), just return */
+ if (kprobe_disabled(kp))
+ goto out;
+
+ kp->flags |= KPROBE_FLAG_DISABLED;
+ if (p != kp)
+ /* When kp != p, p is always enabled. */
+ try_to_disable_aggr_kprobe(p);
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p))
+ disarm_kprobe(p);
+out:
+ mutex_unlock(&kprobe_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(disable_kprobe);
+
+/* Enable one kprobe */
+int __kprobes enable_kprobe(struct kprobe *kp)
+{
+ int ret = 0;
+ struct kprobe *p;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Check whether specified probe is valid. */
+ p = __get_valid_kprobe(kp);
+ if (unlikely(p == NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (kprobe_gone(kp)) {
+ /* This kprobe has gone, we couldn't enable it. */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (p != kp)
+ kp->flags &= ~KPROBE_FLAG_DISABLED;
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p)) {
+ p->flags &= ~KPROBE_FLAG_DISABLED;
+ arm_kprobe(p);
+ }
+out:
+ mutex_unlock(&kprobe_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(enable_kprobe);
+
void __kprobes dump_kprobe(struct kprobe *kp)
{
printk(KERN_WARNING "Dumping kprobe:\n");
@@ -1805,72 +1871,6 @@ static const struct file_operations debugfs_kprobes_operations = {
.release = seq_release,
};
-/* Disable one kprobe */
-int __kprobes disable_kprobe(struct kprobe *kp)
-{
- int ret = 0;
- struct kprobe *p;
-
- mutex_lock(&kprobe_mutex);
-
- /* Check whether specified probe is valid. */
- p = __get_valid_kprobe(kp);
- if (unlikely(p == NULL)) {
- ret = -EINVAL;
- goto out;
- }
-
- /* If the probe is already disabled (or gone), just return */
- if (kprobe_disabled(kp))
- goto out;
-
- kp->flags |= KPROBE_FLAG_DISABLED;
- if (p != kp)
- /* When kp != p, p is always enabled. */
- try_to_disable_aggr_kprobe(p);
-
- if (!kprobes_all_disarmed && kprobe_disabled(p))
- disarm_kprobe(p);
-out:
- mutex_unlock(&kprobe_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(disable_kprobe);
-
-/* Enable one kprobe */
-int __kprobes enable_kprobe(struct kprobe *kp)
-{
- int ret = 0;
- struct kprobe *p;
-
- mutex_lock(&kprobe_mutex);
-
- /* Check whether specified probe is valid. */
- p = __get_valid_kprobe(kp);
- if (unlikely(p == NULL)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (kprobe_gone(kp)) {
- /* This kprobe has gone, we couldn't enable it. */
- ret = -EINVAL;
- goto out;
- }
-
- if (p != kp)
- kp->flags &= ~KPROBE_FLAG_DISABLED;
-
- if (!kprobes_all_disarmed && kprobe_disabled(p)) {
- p->flags &= ~KPROBE_FLAG_DISABLED;
- arm_kprobe(p);
- }
-out:
- mutex_unlock(&kprobe_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(enable_kprobe);
-
static void __kprobes arm_all_kprobes(void)
{
struct hlist_head *head;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 2594e1ce41cb..ec21304856d1 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -431,20 +431,7 @@ static struct stack_trace lockdep_init_trace = {
/*
* Various lockdep statistics:
*/
-atomic_t chain_lookup_hits;
-atomic_t chain_lookup_misses;
-atomic_t hardirqs_on_events;
-atomic_t hardirqs_off_events;
-atomic_t redundant_hardirqs_on;
-atomic_t redundant_hardirqs_off;
-atomic_t softirqs_on_events;
-atomic_t softirqs_off_events;
-atomic_t redundant_softirqs_on;
-atomic_t redundant_softirqs_off;
-atomic_t nr_unused_locks;
-atomic_t nr_cyclic_checks;
-atomic_t nr_find_usage_forwards_checks;
-atomic_t nr_find_usage_backwards_checks;
+DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
#endif
/*
@@ -748,7 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
return NULL;
}
class = lock_classes + nr_lock_classes++;
- debug_atomic_inc(&nr_unused_locks);
+ debug_atomic_inc(nr_unused_locks);
class->key = key;
class->name = lock->name;
class->subclass = subclass;
@@ -818,7 +805,8 @@ static struct lock_list *alloc_list_entry(void)
* Add a new dependency to the head of the list:
*/
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
- struct list_head *head, unsigned long ip, int distance)
+ struct list_head *head, unsigned long ip,
+ int distance, struct stack_trace *trace)
{
struct lock_list *entry;
/*
@@ -829,11 +817,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
if (!entry)
return 0;
- if (!save_trace(&entry->trace))
- return 0;
-
entry->class = this;
entry->distance = distance;
+ entry->trace = *trace;
/*
* Since we never remove from the dependency list, the list can
* be walked lockless by other CPUs, it's only allocation
@@ -1205,7 +1191,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
{
int result;
- debug_atomic_inc(&nr_cyclic_checks);
+ debug_atomic_inc(nr_cyclic_checks);
result = __bfs_forwards(root, target, class_equal, target_entry);
@@ -1242,7 +1228,7 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
{
int result;
- debug_atomic_inc(&nr_find_usage_forwards_checks);
+ debug_atomic_inc(nr_find_usage_forwards_checks);
result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
@@ -1265,7 +1251,7 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
{
int result;
- debug_atomic_inc(&nr_find_usage_backwards_checks);
+ debug_atomic_inc(nr_find_usage_backwards_checks);
result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
@@ -1635,12 +1621,20 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, int distance)
+ struct held_lock *next, int distance, int trylock_loop)
{
struct lock_list *entry;
int ret;
struct lock_list this;
struct lock_list *uninitialized_var(target_entry);
+ /*
+ * Static variable, serialized by the graph_lock().
+ *
+ * We use this static variable to save the stack trace in case
+ * we call into this function multiple times due to encountering
+ * trylocks in the held lock stack.
+ */
+ static struct stack_trace trace;
/*
* Prove that the new <prev> -> <next> dependency would not
@@ -1688,20 +1682,23 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
}
}
+ if (!trylock_loop && !save_trace(&trace))
+ return 0;
+
/*
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
&hlock_class(prev)->locks_after,
- next->acquire_ip, distance);
+ next->acquire_ip, distance, &trace);
if (!ret)
return 0;
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
&hlock_class(next)->locks_before,
- next->acquire_ip, distance);
+ next->acquire_ip, distance, &trace);
if (!ret)
return 0;
@@ -1731,6 +1728,7 @@ static int
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
int depth = curr->lockdep_depth;
+ int trylock_loop = 0;
struct held_lock *hlock;
/*
@@ -1756,7 +1754,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
* added:
*/
if (hlock->read != 2) {
- if (!check_prev_add(curr, hlock, next, distance))
+ if (!check_prev_add(curr, hlock, next,
+ distance, trylock_loop))
return 0;
/*
* Stop after the first non-trylock entry,
@@ -1779,6 +1778,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
break;
+ trylock_loop = 1;
}
return 1;
out_bug:
@@ -1825,7 +1825,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
- debug_atomic_inc(&chain_lookup_hits);
+ debug_atomic_inc(chain_lookup_hits);
if (very_verbose(class))
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%p] %s\n",
@@ -1890,7 +1890,7 @@ cache_hit:
chain_hlocks[chain->base + j] = class - lock_classes;
}
list_add_tail_rcu(&chain->entry, hash_head);
- debug_atomic_inc(&chain_lookup_misses);
+ debug_atomic_inc(chain_lookup_misses);
inc_chains();
return 1;
@@ -2311,7 +2311,12 @@ void trace_hardirqs_on_caller(unsigned long ip)
return;
if (unlikely(curr->hardirqs_enabled)) {
- debug_atomic_inc(&redundant_hardirqs_on);
+ /*
+ * Neither irq nor preemption are disabled here
+ * so this is racy by nature but loosing one hit
+ * in a stat is not a big deal.
+ */
+ __debug_atomic_inc(redundant_hardirqs_on);
return;
}
/* we'll do an OFF -> ON transition: */
@@ -2338,7 +2343,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
curr->hardirq_enable_ip = ip;
curr->hardirq_enable_event = ++curr->irq_events;
- debug_atomic_inc(&hardirqs_on_events);
+ debug_atomic_inc(hardirqs_on_events);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
@@ -2370,9 +2375,9 @@ void trace_hardirqs_off_caller(unsigned long ip)
curr->hardirqs_enabled = 0;
curr->hardirq_disable_ip = ip;
curr->hardirq_disable_event = ++curr->irq_events;
- debug_atomic_inc(&hardirqs_off_events);
+ debug_atomic_inc(hardirqs_off_events);
} else
- debug_atomic_inc(&redundant_hardirqs_off);
+ debug_atomic_inc(redundant_hardirqs_off);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -2396,7 +2401,7 @@ void trace_softirqs_on(unsigned long ip)
return;
if (curr->softirqs_enabled) {
- debug_atomic_inc(&redundant_softirqs_on);
+ debug_atomic_inc(redundant_softirqs_on);
return;
}
@@ -2406,7 +2411,7 @@ void trace_softirqs_on(unsigned long ip)
curr->softirqs_enabled = 1;
curr->softirq_enable_ip = ip;
curr->softirq_enable_event = ++curr->irq_events;
- debug_atomic_inc(&softirqs_on_events);
+ debug_atomic_inc(softirqs_on_events);
/*
* We are going to turn softirqs on, so set the
* usage bit for all held locks, if hardirqs are
@@ -2436,10 +2441,10 @@ void trace_softirqs_off(unsigned long ip)
curr->softirqs_enabled = 0;
curr->softirq_disable_ip = ip;
curr->softirq_disable_event = ++curr->irq_events;
- debug_atomic_inc(&softirqs_off_events);
+ debug_atomic_inc(softirqs_off_events);
DEBUG_LOCKS_WARN_ON(!softirq_count());
} else
- debug_atomic_inc(&redundant_softirqs_off);
+ debug_atomic_inc(redundant_softirqs_off);
}
static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
@@ -2644,7 +2649,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
break;
case LOCK_USED:
- debug_atomic_dec(&nr_unused_locks);
+ debug_atomic_dec(nr_unused_locks);
break;
default:
if (!debug_locks_off_graph_unlock())
@@ -2750,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (!class)
return 0;
}
- debug_atomic_inc((atomic_t *)&class->ops);
+ atomic_inc((atomic_t *)&class->ops);
if (very_verbose(class)) {
printk("\nacquire class [%p] %s", class->key, class->name);
if (class->name_version > 1)
@@ -3227,7 +3232,7 @@ void lock_release(struct lockdep_map *lock, int nested,
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
- trace_lock_release(lock, nested, ip);
+ trace_lock_release(lock, ip);
__lock_release(lock, nested, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
@@ -3380,7 +3385,7 @@ found_it:
hlock->holdtime_stamp = now;
}
- trace_lock_acquired(lock, ip, waittime);
+ trace_lock_acquired(lock, ip);
stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
@@ -3801,8 +3806,11 @@ void lockdep_rcu_dereference(const char *file, const int line)
{
struct task_struct *curr = current;
+#ifndef CONFIG_PROVE_RCU_REPEATEDLY
if (!debug_locks_off())
return;
+#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
+ /* Note: the following can be executed concurrently, so be careful. */
printk("\n===================================================\n");
printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
printk( "---------------------------------------------------\n");
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index a2ee95ad1313..4f560cfedc8f 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -110,30 +110,60 @@ lockdep_count_backward_deps(struct lock_class *class)
#endif
#ifdef CONFIG_DEBUG_LOCKDEP
+
+#include <asm/local.h>
/*
- * Various lockdep statistics:
+ * Various lockdep statistics.
+ * We want them per cpu as they are often accessed in fast path
+ * and we want to avoid too much cache bouncing.
*/
-extern atomic_t chain_lookup_hits;
-extern atomic_t chain_lookup_misses;
-extern atomic_t hardirqs_on_events;
-extern atomic_t hardirqs_off_events;
-extern atomic_t redundant_hardirqs_on;
-extern atomic_t redundant_hardirqs_off;
-extern atomic_t softirqs_on_events;
-extern atomic_t softirqs_off_events;
-extern atomic_t redundant_softirqs_on;
-extern atomic_t redundant_softirqs_off;
-extern atomic_t nr_unused_locks;
-extern atomic_t nr_cyclic_checks;
-extern atomic_t nr_cyclic_check_recursions;
-extern atomic_t nr_find_usage_forwards_checks;
-extern atomic_t nr_find_usage_forwards_recursions;
-extern atomic_t nr_find_usage_backwards_checks;
-extern atomic_t nr_find_usage_backwards_recursions;
-# define debug_atomic_inc(ptr) atomic_inc(ptr)
-# define debug_atomic_dec(ptr) atomic_dec(ptr)
-# define debug_atomic_read(ptr) atomic_read(ptr)
+struct lockdep_stats {
+ int chain_lookup_hits;
+ int chain_lookup_misses;
+ int hardirqs_on_events;
+ int hardirqs_off_events;
+ int redundant_hardirqs_on;
+ int redundant_hardirqs_off;
+ int softirqs_on_events;
+ int softirqs_off_events;
+ int redundant_softirqs_on;
+ int redundant_softirqs_off;
+ int nr_unused_locks;
+ int nr_cyclic_checks;
+ int nr_cyclic_check_recursions;
+ int nr_find_usage_forwards_checks;
+ int nr_find_usage_forwards_recursions;
+ int nr_find_usage_backwards_checks;
+ int nr_find_usage_backwards_recursions;
+};
+
+DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
+
+#define __debug_atomic_inc(ptr) \
+ this_cpu_inc(lockdep_stats.ptr);
+
+#define debug_atomic_inc(ptr) { \
+ WARN_ON_ONCE(!irqs_disabled()); \
+ __this_cpu_inc(lockdep_stats.ptr); \
+}
+
+#define debug_atomic_dec(ptr) { \
+ WARN_ON_ONCE(!irqs_disabled()); \
+ __this_cpu_dec(lockdep_stats.ptr); \
+}
+
+#define debug_atomic_read(ptr) ({ \
+ struct lockdep_stats *__cpu_lockdep_stats; \
+ unsigned long long __total = 0; \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
+ __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
+ __total += __cpu_lockdep_stats->ptr; \
+ } \
+ __total; \
+})
#else
+# define __debug_atomic_inc(ptr) do { } while (0)
# define debug_atomic_inc(ptr) do { } while (0)
# define debug_atomic_dec(ptr) do { } while (0)
# define debug_atomic_read(ptr) 0
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d4aba4f3584c..59b76c8ce9d7 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -184,34 +184,34 @@ static const struct file_operations proc_lockdep_chains_operations = {
static void lockdep_stats_debug_show(struct seq_file *m)
{
#ifdef CONFIG_DEBUG_LOCKDEP
- unsigned int hi1 = debug_atomic_read(&hardirqs_on_events),
- hi2 = debug_atomic_read(&hardirqs_off_events),
- hr1 = debug_atomic_read(&redundant_hardirqs_on),
- hr2 = debug_atomic_read(&redundant_hardirqs_off),
- si1 = debug_atomic_read(&softirqs_on_events),
- si2 = debug_atomic_read(&softirqs_off_events),
- sr1 = debug_atomic_read(&redundant_softirqs_on),
- sr2 = debug_atomic_read(&redundant_softirqs_off);
-
- seq_printf(m, " chain lookup misses: %11u\n",
- debug_atomic_read(&chain_lookup_misses));
- seq_printf(m, " chain lookup hits: %11u\n",
- debug_atomic_read(&chain_lookup_hits));
- seq_printf(m, " cyclic checks: %11u\n",
- debug_atomic_read(&nr_cyclic_checks));
- seq_printf(m, " find-mask forwards checks: %11u\n",
- debug_atomic_read(&nr_find_usage_forwards_checks));
- seq_printf(m, " find-mask backwards checks: %11u\n",
- debug_atomic_read(&nr_find_usage_backwards_checks));
-
- seq_printf(m, " hardirq on events: %11u\n", hi1);
- seq_printf(m, " hardirq off events: %11u\n", hi2);
- seq_printf(m, " redundant hardirq ons: %11u\n", hr1);
- seq_printf(m, " redundant hardirq offs: %11u\n", hr2);
- seq_printf(m, " softirq on events: %11u\n", si1);
- seq_printf(m, " softirq off events: %11u\n", si2);
- seq_printf(m, " redundant softirq ons: %11u\n", sr1);
- seq_printf(m, " redundant softirq offs: %11u\n", sr2);
+ unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
+ hi2 = debug_atomic_read(hardirqs_off_events),
+ hr1 = debug_atomic_read(redundant_hardirqs_on),
+ hr2 = debug_atomic_read(redundant_hardirqs_off),
+ si1 = debug_atomic_read(softirqs_on_events),
+ si2 = debug_atomic_read(softirqs_off_events),
+ sr1 = debug_atomic_read(redundant_softirqs_on),
+ sr2 = debug_atomic_read(redundant_softirqs_off);
+
+ seq_printf(m, " chain lookup misses: %11llu\n",
+ debug_atomic_read(chain_lookup_misses));
+ seq_printf(m, " chain lookup hits: %11llu\n",
+ debug_atomic_read(chain_lookup_hits));
+ seq_printf(m, " cyclic checks: %11llu\n",
+ debug_atomic_read(nr_cyclic_checks));
+ seq_printf(m, " find-mask forwards checks: %11llu\n",
+ debug_atomic_read(nr_find_usage_forwards_checks));
+ seq_printf(m, " find-mask backwards checks: %11llu\n",
+ debug_atomic_read(nr_find_usage_backwards_checks));
+
+ seq_printf(m, " hardirq on events: %11llu\n", hi1);
+ seq_printf(m, " hardirq off events: %11llu\n", hi2);
+ seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
+ seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
+ seq_printf(m, " softirq on events: %11llu\n", si1);
+ seq_printf(m, " softirq off events: %11llu\n", si2);
+ seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
+ seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
#endif
}
@@ -263,7 +263,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
#endif
}
#ifdef CONFIG_DEBUG_LOCKDEP
- DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
+ DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
#endif
seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
nr_lock_classes, MAX_LOCKDEP_KEYS);
diff --git a/kernel/module.c b/kernel/module.c
index b8a1e313448c..0dea0e8cae53 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -77,6 +77,10 @@
DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
+#ifdef CONFIG_KGDB_KDB
+struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
+#endif /* CONFIG_KGDB_KDB */
+
/* Block module loading/unloading? */
int modules_disabled = 0;
@@ -401,7 +405,7 @@ static unsigned int find_pcpusec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
const char *secstrings)
{
- return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
+ return find_sec(hdr, sechdrs, secstrings, ".data..percpu");
}
static void percpu_modcopy(struct module *mod,
@@ -561,33 +565,26 @@ int use_module(struct module *a, struct module *b)
struct module_use *use;
int no_warn, err;
- if (b == NULL || already_uses(a, b)) return 1;
-
- /* If we're interrupted or time out, we fail. */
- if (wait_event_interruptible_timeout(
- module_wq, (err = strong_try_module_get(b)) != -EBUSY,
- 30 * HZ) <= 0) {
- printk("%s: gave up waiting for init of module %s.\n",
- a->name, b->name);
+ if (b == NULL || already_uses(a, b))
return 0;
- }
- /* If strong_try_module_get() returned a different error, we fail. */
+ /* If we're interrupted or time out, we fail. */
+ err = strong_try_module_get(b);
if (err)
- return 0;
+ return err;
DEBUGP("Allocating new usage for %s.\n", a->name);
use = kmalloc(sizeof(*use), GFP_ATOMIC);
if (!use) {
printk("%s: out of memory loading\n", a->name);
module_put(b);
- return 0;
+ return -ENOMEM;
}
use->module_which_uses = a;
list_add(&use->list, &b->modules_which_use_me);
no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
- return 1;
+ return 0;
}
EXPORT_SYMBOL_GPL(use_module);
@@ -724,16 +721,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
return -EFAULT;
name[MODULE_NAME_LEN-1] = '\0';
- /* Create stop_machine threads since free_module relies on
- * a non-failing stop_machine call. */
- ret = stop_machine_create();
- if (ret)
- return ret;
-
- if (mutex_lock_interruptible(&module_mutex) != 0) {
- ret = -EINTR;
- goto out_stop;
- }
+ if (mutex_lock_interruptible(&module_mutex) != 0)
+ return -EINTR;
mod = find_module(name);
if (!mod) {
@@ -793,8 +782,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
out:
mutex_unlock(&module_mutex);
-out_stop:
- stop_machine_destroy();
return ret;
}
@@ -890,7 +877,7 @@ static inline void module_unload_free(struct module *mod)
int use_module(struct module *a, struct module *b)
{
- return strong_try_module_get(b) == 0;
+ return strong_try_module_get(b);
}
EXPORT_SYMBOL_GPL(use_module);
@@ -1061,17 +1048,39 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
struct module *owner;
const struct kernel_symbol *sym;
const unsigned long *crc;
+ DEFINE_WAIT(wait);
+ int err;
+ long timeleft = 30 * HZ;
+again:
sym = find_symbol(name, &owner, &crc,
!(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
- /* use_module can fail due to OOM,
- or module initialization or unloading */
- if (sym) {
- if (!check_version(sechdrs, versindex, name, mod, crc, owner)
- || !use_module(mod, owner))
- sym = NULL;
- }
- return sym;
+ if (!sym)
+ return NULL;
+
+ if (!check_version(sechdrs, versindex, name, mod, crc, owner))
+ return NULL;
+
+ prepare_to_wait(&module_wq, &wait, TASK_INTERRUPTIBLE);
+ err = use_module(mod, owner);
+ if (likely(!err) || err != -EBUSY || signal_pending(current)) {
+ finish_wait(&module_wq, &wait);
+ return err ? NULL : sym;
+ }
+
+ /* Module is still loading. Drop lock and wait. */
+ mutex_unlock(&module_mutex);
+ timeleft = schedule_timeout(timeleft);
+ mutex_lock(&module_mutex);
+ finish_wait(&module_wq, &wait);
+
+ /* Module might be gone entirely, or replaced. Re-lookup. */
+ if (timeleft)
+ goto again;
+
+ printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
+ mod->name, owner->name);
+ return NULL;
}
/*
diff --git a/kernel/nmi_watchdog.c b/kernel/nmi_watchdog.c
new file mode 100644
index 000000000000..a79d211c30df
--- /dev/null
+++ b/kernel/nmi_watchdog.c
@@ -0,0 +1,259 @@
+/*
+ * Detect Hard Lockups using the NMI
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * this code detects hard lockups: incidents in where on a CPU
+ * the kernel does not respond to anything except NMI.
+ *
+ * Note: Most of this code is borrowed heavily from softlockup.c,
+ * so thanks to Ingo for the initial implementation.
+ * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
+ * to those contributors as well.
+ */
+
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/lockdep.h>
+#include <linux/notifier.h>
+#include <linux/module.h>
+#include <linux/sysctl.h>
+
+#include <asm/irq_regs.h>
+#include <linux/perf_event.h>
+
+static DEFINE_PER_CPU(struct perf_event *, nmi_watchdog_ev);
+static DEFINE_PER_CPU(int, nmi_watchdog_touch);
+static DEFINE_PER_CPU(long, alert_counter);
+
+static int panic_on_timeout;
+
+void touch_nmi_watchdog(void)
+{
+ __raw_get_cpu_var(nmi_watchdog_touch) = 1;
+ touch_softlockup_watchdog();
+}
+EXPORT_SYMBOL(touch_nmi_watchdog);
+
+void touch_all_nmi_watchdog(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ per_cpu(nmi_watchdog_touch, cpu) = 1;
+ touch_softlockup_watchdog();
+}
+
+static int __init setup_nmi_watchdog(char *str)
+{
+ if (!strncmp(str, "panic", 5)) {
+ panic_on_timeout = 1;
+ str = strchr(str, ',');
+ if (!str)
+ return 1;
+ ++str;
+ }
+ return 1;
+}
+__setup("nmi_watchdog=", setup_nmi_watchdog);
+
+struct perf_event_attr wd_hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+};
+
+struct perf_event_attr wd_sw_attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+};
+
+void wd_overflow(struct perf_event *event, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ int touched = 0;
+
+ if (__get_cpu_var(nmi_watchdog_touch)) {
+ per_cpu(nmi_watchdog_touch, cpu) = 0;
+ touched = 1;
+ }
+
+ /* check to see if the cpu is doing anything */
+ if (!touched && hw_nmi_is_cpu_stuck(regs)) {
+ /*
+ * Ayiee, looks like this CPU is stuck ...
+ * wait a few IRQs (5 seconds) before doing the oops ...
+ */
+ per_cpu(alert_counter, cpu) += 1;
+ if (per_cpu(alert_counter, cpu) == 5) {
+ if (panic_on_timeout)
+ panic("NMI Watchdog detected LOCKUP on cpu %d", cpu);
+ else
+ WARN(1, "NMI Watchdog detected LOCKUP on cpu %d", cpu);
+ }
+ } else {
+ per_cpu(alert_counter, cpu) = 0;
+ }
+
+ return;
+}
+
+static int enable_nmi_watchdog(int cpu)
+{
+ struct perf_event *event;
+ struct perf_event_attr *wd_attr;
+
+ event = per_cpu(nmi_watchdog_ev, cpu);
+ if (event && event->state > PERF_EVENT_STATE_OFF)
+ return 0;
+
+ if (event == NULL) {
+ /* Try to register using hardware perf events first */
+ wd_attr = &wd_hw_attr;
+ wd_attr->sample_period = hw_nmi_get_sample_period();
+ event = perf_event_create_kernel_counter(wd_attr, cpu, -1, wd_overflow);
+ if (IS_ERR(event)) {
+ /* hardware doesn't exist or not supported, fallback to software events */
+ printk(KERN_INFO "nmi_watchdog: hardware not available, trying software events\n");
+ wd_attr = &wd_sw_attr;
+ wd_attr->sample_period = NSEC_PER_SEC;
+ event = perf_event_create_kernel_counter(wd_attr, cpu, -1, wd_overflow);
+ if (IS_ERR(event)) {
+ printk(KERN_ERR "nmi watchdog failed to create perf event on %i: %p\n", cpu, event);
+ return -1;
+ }
+ }
+ per_cpu(nmi_watchdog_ev, cpu) = event;
+ }
+ perf_event_enable(per_cpu(nmi_watchdog_ev, cpu));
+ return 0;
+}
+
+static void disable_nmi_watchdog(int cpu)
+{
+ struct perf_event *event;
+
+ event = per_cpu(nmi_watchdog_ev, cpu);
+ if (event) {
+ perf_event_disable(per_cpu(nmi_watchdog_ev, cpu));
+ per_cpu(nmi_watchdog_ev, cpu) = NULL;
+ perf_event_release_kernel(event);
+ }
+}
+
+#ifdef CONFIG_SYSCTL
+/*
+ * proc handler for /proc/sys/kernel/nmi_watchdog
+ */
+int nmi_watchdog_enabled;
+
+int proc_nmi_enabled(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+ int cpu;
+
+ if (!write) {
+ struct perf_event *event;
+ for_each_online_cpu(cpu) {
+ event = per_cpu(nmi_watchdog_ev, cpu);
+ if (event && event->state > PERF_EVENT_STATE_OFF) {
+ nmi_watchdog_enabled = 1;
+ break;
+ }
+ }
+ proc_dointvec(table, write, buffer, length, ppos);
+ return 0;
+ }
+
+ touch_all_nmi_watchdog();
+ proc_dointvec(table, write, buffer, length, ppos);
+ if (nmi_watchdog_enabled) {
+ for_each_online_cpu(cpu)
+ if (enable_nmi_watchdog(cpu)) {
+ printk(KERN_ERR "NMI watchdog failed configuration, "
+ " can not be enabled\n");
+ }
+ } else {
+ for_each_online_cpu(cpu)
+ disable_nmi_watchdog(cpu);
+ }
+ return 0;
+}
+
+#endif /* CONFIG_SYSCTL */
+
+/*
+ * Create/destroy watchdog threads as CPUs come and go:
+ */
+static int __cpuinit
+cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ int hotcpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ per_cpu(nmi_watchdog_touch, hotcpu) = 0;
+ break;
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ if (enable_nmi_watchdog(hotcpu))
+ return NOTIFY_BAD;
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ disable_nmi_watchdog(hotcpu);
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ break;
+#endif /* CONFIG_HOTPLUG_CPU */
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
+static int __initdata nonmi_watchdog;
+
+static int __init nonmi_watchdog_setup(char *str)
+{
+ nonmi_watchdog = 1;
+ return 1;
+}
+__setup("nonmi_watchdog", nonmi_watchdog_setup);
+
+static int __init spawn_nmi_watchdog_task(void)
+{
+ void *cpu = (void *)(long)smp_processor_id();
+ int err;
+
+ if (nonmi_watchdog)
+ return 0;
+
+ printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
+
+ err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
+ if (err == NOTIFY_BAD) {
+ BUG();
+ return 1;
+ }
+ cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+ register_cpu_notifier(&cpu_nfb);
+
+ return 0;
+}
+early_initcall(spawn_nmi_watchdog_task);
diff --git a/kernel/padata.c b/kernel/padata.c
index fd03513c7327..82958e01564b 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -29,7 +29,7 @@
#include <linux/rcupdate.h>
#define MAX_SEQ_NR INT_MAX - NR_CPUS
-#define MAX_OBJ_NUM 10000 * NR_CPUS
+#define MAX_OBJ_NUM 1000
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
@@ -358,17 +358,15 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
goto err_free_queue;
- for_each_possible_cpu(cpu) {
+ cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
+
+ for_each_cpu(cpu, pd->cpumask) {
queue = per_cpu_ptr(pd->queue, cpu);
queue->pd = pd;
- if (cpumask_test_cpu(cpu, cpumask)
- && cpumask_test_cpu(cpu, cpu_active_mask)) {
- queue->cpu_index = cpu_index;
- cpu_index++;
- } else
- queue->cpu_index = -1;
+ queue->cpu_index = cpu_index;
+ cpu_index++;
INIT_LIST_HEAD(&queue->reorder.list);
INIT_LIST_HEAD(&queue->parallel.list);
@@ -382,8 +380,6 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
atomic_set(&queue->num_obj, 0);
}
- cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
-
num_cpus = cpumask_weight(pd->cpumask);
pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
@@ -443,10 +439,10 @@ int padata_set_cpumask(struct padata_instance *pinst,
struct parallel_data *pd;
int err = 0;
- might_sleep();
-
mutex_lock(&pinst->lock);
+ get_online_cpus();
+
pd = padata_alloc_pd(pinst, cpumask);
if (!pd) {
err = -ENOMEM;
@@ -458,6 +454,8 @@ int padata_set_cpumask(struct padata_instance *pinst,
padata_replace(pinst, pd);
out:
+ put_online_cpus();
+
mutex_unlock(&pinst->lock);
return err;
@@ -489,12 +487,12 @@ int padata_add_cpu(struct padata_instance *pinst, int cpu)
{
int err;
- might_sleep();
-
mutex_lock(&pinst->lock);
+ get_online_cpus();
cpumask_set_cpu(cpu, pinst->cpumask);
err = __padata_add_cpu(pinst, cpu);
+ put_online_cpus();
mutex_unlock(&pinst->lock);
@@ -527,12 +525,12 @@ int padata_remove_cpu(struct padata_instance *pinst, int cpu)
{
int err;
- might_sleep();
-
mutex_lock(&pinst->lock);
+ get_online_cpus();
cpumask_clear_cpu(cpu, pinst->cpumask);
err = __padata_remove_cpu(pinst, cpu);
+ put_online_cpus();
mutex_unlock(&pinst->lock);
@@ -547,8 +545,6 @@ EXPORT_SYMBOL(padata_remove_cpu);
*/
void padata_start(struct padata_instance *pinst)
{
- might_sleep();
-
mutex_lock(&pinst->lock);
pinst->flags |= PADATA_INIT;
mutex_unlock(&pinst->lock);
@@ -562,16 +558,15 @@ EXPORT_SYMBOL(padata_start);
*/
void padata_stop(struct padata_instance *pinst)
{
- might_sleep();
-
mutex_lock(&pinst->lock);
pinst->flags &= ~PADATA_INIT;
mutex_unlock(&pinst->lock);
}
EXPORT_SYMBOL(padata_stop);
-static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+#ifdef CONFIG_HOTPLUG_CPU
+static int padata_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
{
int err;
struct padata_instance *pinst;
@@ -621,6 +616,7 @@ static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
+#endif
/*
* padata_alloc - allocate and initialize a padata instance
@@ -631,7 +627,6 @@ static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
struct padata_instance *padata_alloc(const struct cpumask *cpumask,
struct workqueue_struct *wq)
{
- int err;
struct padata_instance *pinst;
struct parallel_data *pd;
@@ -639,6 +634,8 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
if (!pinst)
goto err;
+ get_online_cpus();
+
pd = padata_alloc_pd(pinst, cpumask);
if (!pd)
goto err_free_inst;
@@ -654,22 +651,23 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
pinst->flags = 0;
+#ifdef CONFIG_HOTPLUG_CPU
pinst->cpu_notifier.notifier_call = padata_cpu_callback;
pinst->cpu_notifier.priority = 0;
- err = register_hotcpu_notifier(&pinst->cpu_notifier);
- if (err)
- goto err_free_cpumask;
+ register_hotcpu_notifier(&pinst->cpu_notifier);
+#endif
+
+ put_online_cpus();
mutex_init(&pinst->lock);
return pinst;
-err_free_cpumask:
- free_cpumask_var(pinst->cpumask);
err_free_pd:
padata_free_pd(pd);
err_free_inst:
kfree(pinst);
+ put_online_cpus();
err:
return NULL;
}
@@ -689,7 +687,9 @@ void padata_free(struct padata_instance *pinst)
while (atomic_read(&pinst->pd->refcnt) != 0)
yield();
+#ifdef CONFIG_HOTPLUG_CPU
unregister_hotcpu_notifier(&pinst->cpu_notifier);
+#endif
padata_free_pd(pinst->pd);
free_cpumask_var(pinst->cpumask);
kfree(pinst);
diff --git a/kernel/params.c b/kernel/params.c
index 0b30ecd53a52..08107d181758 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -31,6 +31,42 @@
#define DEBUGP(fmt, a...)
#endif
+/* Protects all parameters, and incidentally kmalloced_param list. */
+static DEFINE_MUTEX(param_lock);
+
+/* This just allows us to keep track of which parameters are kmalloced. */
+struct kmalloced_param {
+ struct list_head list;
+ char val[];
+};
+static LIST_HEAD(kmalloced_params);
+
+static void *kmalloc_parameter(unsigned int size)
+{
+ struct kmalloced_param *p;
+
+ p = kmalloc(sizeof(*p) + size, GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ list_add(&p->list, &kmalloced_params);
+ return p->val;
+}
+
+/* Does nothing if parameter wasn't kmalloced above. */
+static void maybe_kfree_parameter(void *param)
+{
+ struct kmalloced_param *p;
+
+ list_for_each_entry(p, &kmalloced_params, list) {
+ if (p->val == param) {
+ list_del(&p->list);
+ kfree(p);
+ break;
+ }
+ }
+}
+
static inline char dash2underscore(char c)
{
if (c == '-')
@@ -49,18 +85,25 @@ static inline int parameq(const char *input, const char *paramname)
static int parse_one(char *param,
char *val,
- struct kernel_param *params,
+ const struct kernel_param *params,
unsigned num_params,
int (*handle_unknown)(char *param, char *val))
{
unsigned int i;
+ int err;
/* Find parameter */
for (i = 0; i < num_params; i++) {
if (parameq(param, params[i].name)) {
+ /* Noone handled NULL, so do it here. */
+ if (!val && params[i].ops->set != param_set_bool)
+ return -EINVAL;
DEBUGP("They are equal! Calling %p\n",
- params[i].set);
- return params[i].set(val, &params[i]);
+ params[i].ops->set);
+ mutex_lock(&param_lock);
+ err = params[i].ops->set(val, &params[i]);
+ mutex_unlock(&param_lock);
+ return err;
}
}
@@ -128,7 +171,7 @@ static char *next_arg(char *args, char **param, char **val)
/* Args looks like "foo=bar,bar2 baz=fuz wiz". */
int parse_args(const char *name,
char *args,
- struct kernel_param *params,
+ const struct kernel_param *params,
unsigned num,
int (*unknown)(char *param, char *val))
{
@@ -176,22 +219,29 @@ int parse_args(const char *name,
/* Lazy bastard, eh? */
#define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \
- int param_set_##name(const char *val, struct kernel_param *kp) \
+ int param_set_##name(const char *val, const struct kernel_param *kp) \
{ \
tmptype l; \
int ret; \
\
- if (!val) return -EINVAL; \
ret = strtolfn(val, 0, &l); \
if (ret == -EINVAL || ((type)l != l)) \
return -EINVAL; \
*((type *)kp->arg) = l; \
return 0; \
} \
- int param_get_##name(char *buffer, struct kernel_param *kp) \
+ int param_get_##name(char *buffer, const struct kernel_param *kp) \
{ \
return sprintf(buffer, format, *((type *)kp->arg)); \
- }
+ } \
+ struct kernel_param_ops param_ops_##name = { \
+ .set = param_set_##name, \
+ .get = param_get_##name, \
+ }; \
+ EXPORT_SYMBOL(param_set_##name); \
+ EXPORT_SYMBOL(param_get_##name); \
+ EXPORT_SYMBOL(param_ops_##name)
+
STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul);
STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol);
@@ -201,39 +251,50 @@ STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, strict_strtoul);
STANDARD_PARAM_DEF(long, long, "%li", long, strict_strtol);
STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, strict_strtoul);
-int param_set_charp(const char *val, struct kernel_param *kp)
+int param_set_charp(const char *val, const struct kernel_param *kp)
{
- if (!val) {
- printk(KERN_ERR "%s: string parameter expected\n",
- kp->name);
- return -EINVAL;
- }
-
if (strlen(val) > 1024) {
printk(KERN_ERR "%s: string parameter too long\n",
kp->name);
return -ENOSPC;
}
- /* This is a hack. We can't need to strdup in early boot, and we
+ maybe_kfree_parameter(*(char **)kp->arg);
+
+ /* This is a hack. We can't kmalloc in early boot, and we
* don't need to; this mangled commandline is preserved. */
if (slab_is_available()) {
- *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
+ *(char **)kp->arg = kmalloc_parameter(strlen(val)+1);
if (!*(char **)kp->arg)
return -ENOMEM;
+ strcpy(*(char **)kp->arg, val);
} else
*(const char **)kp->arg = val;
return 0;
}
+EXPORT_SYMBOL(param_set_charp);
-int param_get_charp(char *buffer, struct kernel_param *kp)
+int param_get_charp(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "%s", *((char **)kp->arg));
}
+EXPORT_SYMBOL(param_get_charp);
+
+static void param_free_charp(void *arg)
+{
+ maybe_kfree_parameter(*((char **)arg));
+}
+
+struct kernel_param_ops param_ops_charp = {
+ .set = param_set_charp,
+ .get = param_get_charp,
+ .free = param_free_charp,
+};
+EXPORT_SYMBOL(param_ops_charp);
/* Actually could be a bool or an int, for historical reasons. */
-int param_set_bool(const char *val, struct kernel_param *kp)
+int param_set_bool(const char *val, const struct kernel_param *kp)
{
bool v;
@@ -258,8 +319,9 @@ int param_set_bool(const char *val, struct kernel_param *kp)
*(int *)kp->arg = v;
return 0;
}
+EXPORT_SYMBOL(param_set_bool);
-int param_get_bool(char *buffer, struct kernel_param *kp)
+int param_get_bool(char *buffer, const struct kernel_param *kp)
{
bool val;
if (kp->flags & KPARAM_ISBOOL)
@@ -270,9 +332,16 @@ int param_get_bool(char *buffer, struct kernel_param *kp)
/* Y and N chosen as being relatively non-coder friendly */
return sprintf(buffer, "%c", val ? 'Y' : 'N');
}
+EXPORT_SYMBOL(param_get_bool);
+
+struct kernel_param_ops param_ops_bool = {
+ .set = param_set_bool,
+ .get = param_get_bool,
+};
+EXPORT_SYMBOL(param_ops_bool);
/* This one must be bool. */
-int param_set_invbool(const char *val, struct kernel_param *kp)
+int param_set_invbool(const char *val, const struct kernel_param *kp)
{
int ret;
bool boolval;
@@ -285,18 +354,26 @@ int param_set_invbool(const char *val, struct kernel_param *kp)
*(bool *)kp->arg = !boolval;
return ret;
}
+EXPORT_SYMBOL(param_set_invbool);
-int param_get_invbool(char *buffer, struct kernel_param *kp)
+int param_get_invbool(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y');
}
+EXPORT_SYMBOL(param_get_invbool);
+
+struct kernel_param_ops param_ops_invbool = {
+ .set = param_set_invbool,
+ .get = param_get_invbool,
+};
+EXPORT_SYMBOL(param_ops_invbool);
/* We break the rule and mangle the string. */
static int param_array(const char *name,
const char *val,
unsigned int min, unsigned int max,
void *elem, int elemsize,
- int (*set)(const char *, struct kernel_param *kp),
+ int (*set)(const char *, const struct kernel_param *kp),
u16 flags,
unsigned int *num)
{
@@ -309,12 +386,6 @@ static int param_array(const char *name,
kp.arg = elem;
kp.flags = flags;
- /* No equals sign? */
- if (!val) {
- printk(KERN_ERR "%s: expects arguments\n", name);
- return -EINVAL;
- }
-
*num = 0;
/* We expect a comma-separated list of values. */
do {
@@ -330,6 +401,7 @@ static int param_array(const char *name,
/* nul-terminate and parse */
save = val[len];
((char *)val)[len] = '\0';
+ BUG_ON(!mutex_is_locked(&param_lock));
ret = set(val, &kp);
if (ret != 0)
@@ -347,17 +419,17 @@ static int param_array(const char *name,
return 0;
}
-int param_array_set(const char *val, struct kernel_param *kp)
+static int param_array_set(const char *val, const struct kernel_param *kp)
{
const struct kparam_array *arr = kp->arr;
unsigned int temp_num;
return param_array(kp->name, val, 1, arr->max, arr->elem,
- arr->elemsize, arr->set, kp->flags,
+ arr->elemsize, arr->ops->set, kp->flags,
arr->num ?: &temp_num);
}
-int param_array_get(char *buffer, struct kernel_param *kp)
+static int param_array_get(char *buffer, const struct kernel_param *kp)
{
int i, off, ret;
const struct kparam_array *arr = kp->arr;
@@ -368,7 +440,8 @@ int param_array_get(char *buffer, struct kernel_param *kp)
if (i)
buffer[off++] = ',';
p.arg = arr->elem + arr->elemsize * i;
- ret = arr->get(buffer + off, &p);
+ BUG_ON(!mutex_is_locked(&param_lock));
+ ret = arr->ops->get(buffer + off, &p);
if (ret < 0)
return ret;
off += ret;
@@ -377,14 +450,27 @@ int param_array_get(char *buffer, struct kernel_param *kp)
return off;
}
-int param_set_copystring(const char *val, struct kernel_param *kp)
+static void param_array_free(void *arg)
+{
+ unsigned int i;
+ const struct kparam_array *arr = arg;
+
+ if (arr->ops->free)
+ for (i = 0; i < (arr->num ? *arr->num : arr->max); i++)
+ arr->ops->free(arr->elem + arr->elemsize * i);
+}
+
+struct kernel_param_ops param_array_ops = {
+ .set = param_array_set,
+ .get = param_array_get,
+ .free = param_array_free,
+};
+EXPORT_SYMBOL(param_array_ops);
+
+int param_set_copystring(const char *val, const struct kernel_param *kp)
{
const struct kparam_string *kps = kp->str;
- if (!val) {
- printk(KERN_ERR "%s: missing param set value\n", kp->name);
- return -EINVAL;
- }
if (strlen(val)+1 > kps->maxlen) {
printk(KERN_ERR "%s: string doesn't fit in %u chars.\n",
kp->name, kps->maxlen-1);
@@ -393,12 +479,20 @@ int param_set_copystring(const char *val, struct kernel_param *kp)
strcpy(kps->string, val);
return 0;
}
+EXPORT_SYMBOL(param_set_copystring);
-int param_get_string(char *buffer, struct kernel_param *kp)
+int param_get_string(char *buffer, const struct kernel_param *kp)
{
const struct kparam_string *kps = kp->str;
return strlcpy(buffer, kps->string, kps->maxlen);
}
+EXPORT_SYMBOL(param_get_string);
+
+struct kernel_param_ops param_ops_string = {
+ .set = param_set_copystring,
+ .get = param_get_string,
+};
+EXPORT_SYMBOL(param_ops_string);
/* sysfs output in /sys/modules/XYZ/parameters/ */
#define to_module_attr(n) container_of(n, struct module_attribute, attr)
@@ -409,7 +503,7 @@ extern struct kernel_param __start___param[], __stop___param[];
struct param_attribute
{
struct module_attribute mattr;
- struct kernel_param *param;
+ const struct kernel_param *param;
};
struct module_param_attrs
@@ -428,10 +522,12 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
int count;
struct param_attribute *attribute = to_param_attr(mattr);
- if (!attribute->param->get)
+ if (!attribute->param->ops->get)
return -EPERM;
- count = attribute->param->get(buf, attribute->param);
+ mutex_lock(&param_lock);
+ count = attribute->param->ops->get(buf, attribute->param);
+ mutex_unlock(&param_lock);
if (count > 0) {
strcat(buf, "\n");
++count;
@@ -447,10 +543,12 @@ static ssize_t param_attr_store(struct module_attribute *mattr,
int err;
struct param_attribute *attribute = to_param_attr(mattr);
- if (!attribute->param->set)
+ if (!attribute->param->ops->set)
return -EPERM;
- err = attribute->param->set(buf, attribute->param);
+ mutex_lock(&param_lock);
+ err = attribute->param->ops->set(buf, attribute->param);
+ mutex_unlock(&param_lock);
if (!err)
return len;
return err;
@@ -464,6 +562,18 @@ static ssize_t param_attr_store(struct module_attribute *mattr,
#endif
#ifdef CONFIG_SYSFS
+void __kernel_param_lock(void)
+{
+ mutex_lock(&param_lock);
+}
+EXPORT_SYMBOL(__kernel_param_lock);
+
+void __kernel_param_unlock(void)
+{
+ mutex_unlock(&param_lock);
+}
+EXPORT_SYMBOL(__kernel_param_unlock);
+
/*
* add_sysfs_param - add a parameter to sysfs
* @mk: struct module_kobject
@@ -475,7 +585,7 @@ static ssize_t param_attr_store(struct module_attribute *mattr,
* if there's an error.
*/
static __modinit int add_sysfs_param(struct module_kobject *mk,
- struct kernel_param *kp,
+ const struct kernel_param *kp,
const char *name)
{
struct module_param_attrs *new;
@@ -557,7 +667,7 @@ static void free_module_param_attrs(struct module_kobject *mk)
* /sys/module/[mod->name]/parameters/
*/
int module_param_sysfs_setup(struct module *mod,
- struct kernel_param *kparam,
+ const struct kernel_param *kparam,
unsigned int num_params)
{
int i, err;
@@ -602,7 +712,11 @@ void module_param_sysfs_remove(struct module *mod)
void destroy_params(const struct kernel_param *params, unsigned num)
{
- /* FIXME: This should free kmalloced charp parameters. It doesn't. */
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (params[i].ops->free)
+ params[i].ops->free(params[i].arg);
}
static void __init kernel_add_sysfs_param(const char *name,
@@ -768,28 +882,3 @@ static int __init param_sysfs_init(void)
subsys_initcall(param_sysfs_init);
#endif /* CONFIG_SYSFS */
-
-EXPORT_SYMBOL(param_set_byte);
-EXPORT_SYMBOL(param_get_byte);
-EXPORT_SYMBOL(param_set_short);
-EXPORT_SYMBOL(param_get_short);
-EXPORT_SYMBOL(param_set_ushort);
-EXPORT_SYMBOL(param_get_ushort);
-EXPORT_SYMBOL(param_set_int);
-EXPORT_SYMBOL(param_get_int);
-EXPORT_SYMBOL(param_set_uint);
-EXPORT_SYMBOL(param_get_uint);
-EXPORT_SYMBOL(param_set_long);
-EXPORT_SYMBOL(param_get_long);
-EXPORT_SYMBOL(param_set_ulong);
-EXPORT_SYMBOL(param_get_ulong);
-EXPORT_SYMBOL(param_set_charp);
-EXPORT_SYMBOL(param_get_charp);
-EXPORT_SYMBOL(param_set_bool);
-EXPORT_SYMBOL(param_get_bool);
-EXPORT_SYMBOL(param_set_invbool);
-EXPORT_SYMBOL(param_get_invbool);
-EXPORT_SYMBOL(param_array_set);
-EXPORT_SYMBOL(param_array_get);
-EXPORT_SYMBOL(param_set_copystring);
-EXPORT_SYMBOL(param_get_string);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 3d1552d3c12b..a9047463fd83 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -16,6 +16,7 @@
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
+#include <linux/hash.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
@@ -82,14 +83,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
void __weak hw_perf_disable(void) { barrier(); }
void __weak hw_perf_enable(void) { barrier(); }
-int __weak
-hw_perf_group_sched_in(struct perf_event *group_leader,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- return 0;
-}
-
void __weak perf_event_print_debug(void) { }
static DEFINE_PER_CPU(int, perf_disable_count);
@@ -640,15 +633,20 @@ group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
- struct perf_event *event, *partial_group;
+ struct perf_event *event, *partial_group = NULL;
+ const struct pmu *pmu = group_event->pmu;
+ bool txn = false;
int ret;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
- ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
- if (ret)
- return ret < 0 ? ret : 0;
+ /* Check if group transaction availabe */
+ if (pmu->start_txn)
+ txn = true;
+
+ if (txn)
+ pmu->start_txn(pmu);
if (event_sched_in(group_event, cpuctx, ctx))
return -EAGAIN;
@@ -663,9 +661,19 @@ group_sched_in(struct perf_event *group_event,
}
}
- return 0;
+ if (!txn)
+ return 0;
+
+ ret = pmu->commit_txn(pmu);
+ if (!ret) {
+ pmu->cancel_txn(pmu);
+ return 0;
+ }
group_error:
+ if (txn)
+ pmu->cancel_txn(pmu);
+
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
@@ -1367,6 +1375,8 @@ void perf_event_task_sched_in(struct task_struct *task)
if (cpuctx->task_ctx == ctx)
return;
+ perf_disable();
+
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
@@ -1379,6 +1389,8 @@ void perf_event_task_sched_in(struct task_struct *task)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
cpuctx->task_ctx = ctx;
+
+ perf_enable();
}
#define MAX_INTERRUPTS (~0ULL)
@@ -1857,7 +1869,19 @@ int perf_event_release_kernel(struct perf_event *event)
struct perf_event_context *ctx = event->ctx;
WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
+ /*
+ * There are two ways this annotation is useful:
+ *
+ * 1) there is a lock recursion from perf_event_exit_task
+ * see the comment there.
+ *
+ * 2) there is a lock-inversion with mmap_sem through
+ * perf_event_read_group(), which takes faults while
+ * holding ctx->mutex, however this is called after
+ * the last filedesc died, so there is no possibility
+ * to trigger the AB-BA case.
+ */
+ mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
perf_event_remove_from_context(event);
mutex_unlock(&ctx->mutex);
@@ -2642,6 +2666,7 @@ static int perf_fasync(int fd, struct file *filp, int on)
}
static const struct file_operations perf_fops = {
+ .llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
@@ -2792,6 +2817,27 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
/*
+ * We assume there is only KVM supporting the callbacks.
+ * Later on, we might change it to a list if there is
+ * another virtualization implementation supporting the callbacks.
+ */
+struct perf_guest_info_callbacks *perf_guest_cbs;
+
+int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+{
+ perf_guest_cbs = cbs;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
+
+int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+{
+ perf_guest_cbs = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
+
+/*
* Output
*/
static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
@@ -3743,7 +3789,7 @@ void __perf_event_mmap(struct vm_area_struct *vma)
.event_id = {
.header = {
.type = PERF_RECORD_MMAP,
- .misc = 0,
+ .misc = PERF_RECORD_MISC_USER,
/* .size */
},
/* .pid */
@@ -3961,36 +4007,6 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
perf_swevent_overflow(event, 0, nmi, data, regs);
}
-static int perf_swevent_is_counting(struct perf_event *event)
-{
- /*
- * The event is active, we're good!
- */
- if (event->state == PERF_EVENT_STATE_ACTIVE)
- return 1;
-
- /*
- * The event is off/error, not counting.
- */
- if (event->state != PERF_EVENT_STATE_INACTIVE)
- return 0;
-
- /*
- * The event is inactive, if the context is active
- * we're part of a group that didn't make it on the 'pmu',
- * not counting.
- */
- if (event->ctx->is_active)
- return 0;
-
- /*
- * We're inactive and the context is too, this means the
- * task is scheduled out, we're counting events that happen
- * to us, like migration events.
- */
- return 1;
-}
-
static int perf_tp_event_match(struct perf_event *event,
struct perf_sample_data *data);
@@ -4014,12 +4030,6 @@ static int perf_swevent_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
- if (event->cpu != -1 && event->cpu != smp_processor_id())
- return 0;
-
- if (!perf_swevent_is_counting(event))
- return 0;
-
if (event->attr.type != type)
return 0;
@@ -4036,18 +4046,53 @@ static int perf_swevent_match(struct perf_event *event,
return 1;
}
-static void perf_swevent_ctx_event(struct perf_event_context *ctx,
- enum perf_type_id type,
- u32 event_id, u64 nr, int nmi,
- struct perf_sample_data *data,
- struct pt_regs *regs)
+static inline u64 swevent_hash(u64 type, u32 event_id)
{
+ u64 val = event_id | (type << 32);
+
+ return hash_64(val, SWEVENT_HLIST_BITS);
+}
+
+static struct hlist_head *
+find_swevent_head(struct perf_cpu_context *ctx, u64 type, u32 event_id)
+{
+ u64 hash;
+ struct swevent_hlist *hlist;
+
+ hash = swevent_hash(type, event_id);
+
+ hlist = rcu_dereference(ctx->swevent_hlist);
+ if (!hlist)
+ return NULL;
+
+ return &hlist->heads[hash];
+}
+
+static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
+ u64 nr, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct perf_cpu_context *cpuctx;
struct perf_event *event;
+ struct hlist_node *node;
+ struct hlist_head *head;
- list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ cpuctx = &__get_cpu_var(perf_cpu_context);
+
+ rcu_read_lock();
+
+ head = find_swevent_head(cpuctx, type, event_id);
+
+ if (!head)
+ goto end;
+
+ hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_add(event, nr, nmi, data, regs);
}
+end:
+ rcu_read_unlock();
}
int perf_swevent_get_recursion_context(void)
@@ -4085,27 +4130,6 @@ void perf_swevent_put_recursion_context(int rctx)
}
EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
-static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
- u64 nr, int nmi,
- struct perf_sample_data *data,
- struct pt_regs *regs)
-{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
-
- cpuctx = &__get_cpu_var(perf_cpu_context);
- rcu_read_lock();
- perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
- nr, nmi, data, regs);
- /*
- * doesn't really matter which of the child contexts the
- * events ends up in.
- */
- ctx = rcu_dereference(current->perf_event_ctxp);
- if (ctx)
- perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
- rcu_read_unlock();
-}
void __perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr)
@@ -4131,16 +4155,28 @@ static void perf_swevent_read(struct perf_event *event)
static int perf_swevent_enable(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
+ struct perf_cpu_context *cpuctx;
+ struct hlist_head *head;
+
+ cpuctx = &__get_cpu_var(perf_cpu_context);
if (hwc->sample_period) {
hwc->last_period = hwc->sample_period;
perf_swevent_set_period(event);
}
+
+ head = find_swevent_head(cpuctx, event->attr.type, event->attr.config);
+ if (WARN_ON_ONCE(!head))
+ return -EINVAL;
+
+ hlist_add_head_rcu(&event->hlist_entry, head);
+
return 0;
}
static void perf_swevent_disable(struct perf_event *event)
{
+ hlist_del_rcu(&event->hlist_entry);
}
static const struct pmu perf_ops_generic = {
@@ -4168,15 +4204,8 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
regs = get_irq_regs();
- /*
- * In case we exclude kernel IPs or are somehow not in interrupt
- * context, provide the next best thing, the user IP.
- */
- if ((event->attr.exclude_kernel || !regs) &&
- !event->attr.exclude_user)
- regs = task_pt_regs(current);
- if (regs) {
+ if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && current->pid == 0))
if (perf_event_overflow(event, 0, &data, regs))
ret = HRTIMER_NORESTART;
@@ -4324,6 +4353,105 @@ static const struct pmu perf_ops_task_clock = {
.read = task_clock_perf_event_read,
};
+static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
+{
+ struct swevent_hlist *hlist;
+
+ hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
+ kfree(hlist);
+}
+
+static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
+{
+ struct swevent_hlist *hlist;
+
+ if (!cpuctx->swevent_hlist)
+ return;
+
+ hlist = cpuctx->swevent_hlist;
+ rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
+ call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
+}
+
+static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
+{
+ struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+
+ mutex_lock(&cpuctx->hlist_mutex);
+
+ if (!--cpuctx->hlist_refcount)
+ swevent_hlist_release(cpuctx);
+
+ mutex_unlock(&cpuctx->hlist_mutex);
+}
+
+static void swevent_hlist_put(struct perf_event *event)
+{
+ int cpu;
+
+ if (event->cpu != -1) {
+ swevent_hlist_put_cpu(event, event->cpu);
+ return;
+ }
+
+ for_each_possible_cpu(cpu)
+ swevent_hlist_put_cpu(event, cpu);
+}
+
+static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
+{
+ struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ int err = 0;
+
+ mutex_lock(&cpuctx->hlist_mutex);
+
+ if (!cpuctx->swevent_hlist && cpu_online(cpu)) {
+ struct swevent_hlist *hlist;
+
+ hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
+ if (!hlist) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+ }
+ cpuctx->hlist_refcount++;
+ exit:
+ mutex_unlock(&cpuctx->hlist_mutex);
+
+ return err;
+}
+
+static int swevent_hlist_get(struct perf_event *event)
+{
+ int err;
+ int cpu, failed_cpu;
+
+ if (event->cpu != -1)
+ return swevent_hlist_get_cpu(event, event->cpu);
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu) {
+ err = swevent_hlist_get_cpu(event, cpu);
+ if (err) {
+ failed_cpu = cpu;
+ goto fail;
+ }
+ }
+ put_online_cpus();
+
+ return 0;
+ fail:
+ for_each_possible_cpu(cpu) {
+ if (cpu == failed_cpu)
+ break;
+ swevent_hlist_put_cpu(event, cpu);
+ }
+
+ put_online_cpus();
+ return err;
+}
+
#ifdef CONFIG_EVENT_TRACING
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
@@ -4357,10 +4485,13 @@ static int perf_tp_event_match(struct perf_event *event,
static void tp_perf_event_destroy(struct perf_event *event)
{
perf_trace_disable(event->attr.config);
+ swevent_hlist_put(event);
}
static const struct pmu *tp_perf_event_init(struct perf_event *event)
{
+ int err;
+
/*
* Raw tracepoint data is a severe data leak, only allow root to
* have these.
@@ -4374,6 +4505,11 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
return NULL;
event->destroy = tp_perf_event_destroy;
+ err = swevent_hlist_get(event);
+ if (err) {
+ perf_trace_disable(event->attr.config);
+ return ERR_PTR(err);
+ }
return &perf_ops_generic;
}
@@ -4474,6 +4610,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
WARN_ON(event->parent);
atomic_dec(&perf_swevent_enabled[event_id]);
+ swevent_hlist_put(event);
}
static const struct pmu *sw_perf_event_init(struct perf_event *event)
@@ -4512,6 +4649,12 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event)
case PERF_COUNT_SW_ALIGNMENT_FAULTS:
case PERF_COUNT_SW_EMULATION_FAULTS:
if (!event->parent) {
+ int err;
+
+ err = swevent_hlist_get(event);
+ if (err)
+ return ERR_PTR(err);
+
atomic_inc(&perf_swevent_enabled[event_id]);
event->destroy = sw_perf_event_destroy;
}
@@ -5176,7 +5319,7 @@ void perf_event_exit_task(struct task_struct *child)
*
* But since its the parent context it won't be the same instance.
*/
- mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
+ mutex_lock(&child_ctx->mutex);
again:
list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
@@ -5384,6 +5527,7 @@ static void __init perf_event_init_all_cpus(void)
for_each_possible_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
+ mutex_init(&cpuctx->hlist_mutex);
__perf_event_init_context(&cpuctx->ctx, NULL);
}
}
@@ -5397,6 +5541,16 @@ static void __cpuinit perf_event_init_cpu(int cpu)
spin_lock(&perf_resource_lock);
cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
spin_unlock(&perf_resource_lock);
+
+ mutex_lock(&cpuctx->hlist_mutex);
+ if (cpuctx->hlist_refcount > 0) {
+ struct swevent_hlist *hlist;
+
+ hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
+ WARN_ON_ONCE(!hlist);
+ rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+ }
+ mutex_unlock(&cpuctx->hlist_mutex);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -5416,6 +5570,10 @@ static void perf_event_exit_cpu(int cpu)
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
struct perf_event_context *ctx = &cpuctx->ctx;
+ mutex_lock(&cpuctx->hlist_mutex);
+ swevent_hlist_release(cpuctx);
+ mutex_unlock(&cpuctx->hlist_mutex);
+
mutex_lock(&ctx->mutex);
smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
mutex_unlock(&ctx->mutex);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 3db49b9ca374..a1aea040eb57 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -2,7 +2,7 @@
* This module exposes the interface to kernel space for specifying
* QoS dependencies. It provides infrastructure for registration of:
*
- * Dependents on a QoS value : register requirements
+ * Dependents on a QoS value : register requests
* Watchers of QoS value : get notified when target QoS value changes
*
* This QoS design is best effort based. Dependents register their QoS needs.
@@ -14,19 +14,21 @@
* timeout: usec <-- currently not used.
* throughput: kbs (kilo byte / sec)
*
- * There are lists of pm_qos_objects each one wrapping requirements, notifiers
+ * There are lists of pm_qos_objects each one wrapping requests, notifiers
*
- * User mode requirements on a QOS parameter register themselves to the
+ * User mode requests on a QOS parameter register themselves to the
* subsystem by opening the device node /dev/... and writing there request to
* the node. As long as the process holds a file handle open to the node the
* client continues to be accounted for. Upon file release the usermode
- * requirement is removed and a new qos target is computed. This way when the
- * requirement that the application has is cleaned up when closes the file
+ * request is removed and a new qos target is computed. This way when the
+ * request that the application has is cleaned up when closes the file
* pointer or exits the pm_qos_object will get an opportunity to clean up.
*
* Mark Gross <mgross@linux.intel.com>
*/
+/*#define DEBUG*/
+
#include <linux/pm_qos_params.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
@@ -42,25 +44,25 @@
#include <linux/uaccess.h>
/*
- * locking rule: all changes to requirements or notifiers lists
+ * locking rule: all changes to requests or notifiers lists
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
* held, taken with _irqsave. One lock to rule them all
*/
-struct requirement_list {
+struct pm_qos_request_list {
struct list_head list;
union {
s32 value;
s32 usec;
s32 kbps;
};
- char *name;
+ int pm_qos_class;
};
static s32 max_compare(s32 v1, s32 v2);
static s32 min_compare(s32 v1, s32 v2);
struct pm_qos_object {
- struct requirement_list requirements;
+ struct pm_qos_request_list requests;
struct blocking_notifier_head *notifiers;
struct miscdevice pm_qos_power_miscdev;
char *name;
@@ -72,7 +74,7 @@ struct pm_qos_object {
static struct pm_qos_object null_pm_qos;
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_object cpu_dma_pm_qos = {
- .requirements = {LIST_HEAD_INIT(cpu_dma_pm_qos.requirements.list)},
+ .requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
.notifiers = &cpu_dma_lat_notifier,
.name = "cpu_dma_latency",
.default_value = 2000 * USEC_PER_SEC,
@@ -82,7 +84,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_object network_lat_pm_qos = {
- .requirements = {LIST_HEAD_INIT(network_lat_pm_qos.requirements.list)},
+ .requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
.notifiers = &network_lat_notifier,
.name = "network_latency",
.default_value = 2000 * USEC_PER_SEC,
@@ -93,8 +95,7 @@ static struct pm_qos_object network_lat_pm_qos = {
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_object network_throughput_pm_qos = {
- .requirements =
- {LIST_HEAD_INIT(network_throughput_pm_qos.requirements.list)},
+ .requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
.notifiers = &network_throughput_notifier,
.name = "network_throughput",
.default_value = 0,
@@ -135,31 +136,34 @@ static s32 min_compare(s32 v1, s32 v2)
}
-static void update_target(int target)
+static void update_target(int pm_qos_class)
{
s32 extreme_value;
- struct requirement_list *node;
+ struct pm_qos_request_list *node;
unsigned long flags;
int call_notifier = 0;
spin_lock_irqsave(&pm_qos_lock, flags);
- extreme_value = pm_qos_array[target]->default_value;
+ extreme_value = pm_qos_array[pm_qos_class]->default_value;
list_for_each_entry(node,
- &pm_qos_array[target]->requirements.list, list) {
- extreme_value = pm_qos_array[target]->comparitor(
+ &pm_qos_array[pm_qos_class]->requests.list, list) {
+ extreme_value = pm_qos_array[pm_qos_class]->comparitor(
extreme_value, node->value);
}
- if (atomic_read(&pm_qos_array[target]->target_value) != extreme_value) {
+ if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) !=
+ extreme_value) {
call_notifier = 1;
- atomic_set(&pm_qos_array[target]->target_value, extreme_value);
- pr_debug(KERN_ERR "new target for qos %d is %d\n", target,
- atomic_read(&pm_qos_array[target]->target_value));
+ atomic_set(&pm_qos_array[pm_qos_class]->target_value,
+ extreme_value);
+ pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class,
+ atomic_read(&pm_qos_array[pm_qos_class]->target_value));
}
spin_unlock_irqrestore(&pm_qos_lock, flags);
if (call_notifier)
- blocking_notifier_call_chain(pm_qos_array[target]->notifiers,
- (unsigned long) extreme_value, NULL);
+ blocking_notifier_call_chain(
+ pm_qos_array[pm_qos_class]->notifiers,
+ (unsigned long) extreme_value, NULL);
}
static int register_pm_qos_misc(struct pm_qos_object *qos)
@@ -185,125 +189,110 @@ static int find_pm_qos_object_by_minor(int minor)
}
/**
- * pm_qos_requirement - returns current system wide qos expectation
+ * pm_qos_request - returns current system wide qos expectation
* @pm_qos_class: identification of which qos value is requested
*
* This function returns the current target value in an atomic manner.
*/
-int pm_qos_requirement(int pm_qos_class)
+int pm_qos_request(int pm_qos_class)
{
return atomic_read(&pm_qos_array[pm_qos_class]->target_value);
}
-EXPORT_SYMBOL_GPL(pm_qos_requirement);
+EXPORT_SYMBOL_GPL(pm_qos_request);
/**
- * pm_qos_add_requirement - inserts new qos request into the list
+ * pm_qos_add_request - inserts new qos request into the list
* @pm_qos_class: identifies which list of qos request to us
- * @name: identifies the request
* @value: defines the qos request
*
* This function inserts a new entry in the pm_qos_class list of requested qos
* performance characteristics. It recomputes the aggregate QoS expectations
- * for the pm_qos_class of parameters.
+ * for the pm_qos_class of parameters, and returns the pm_qos_request list
+ * element as a handle for use in updating and removal. Call needs to save
+ * this handle for later use.
*/
-int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
+struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value)
{
- struct requirement_list *dep;
+ struct pm_qos_request_list *dep;
unsigned long flags;
- dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
+ dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL);
if (dep) {
if (value == PM_QOS_DEFAULT_VALUE)
dep->value = pm_qos_array[pm_qos_class]->default_value;
else
dep->value = value;
- dep->name = kstrdup(name, GFP_KERNEL);
- if (!dep->name)
- goto cleanup;
+ dep->pm_qos_class = pm_qos_class;
spin_lock_irqsave(&pm_qos_lock, flags);
list_add(&dep->list,
- &pm_qos_array[pm_qos_class]->requirements.list);
+ &pm_qos_array[pm_qos_class]->requests.list);
spin_unlock_irqrestore(&pm_qos_lock, flags);
update_target(pm_qos_class);
-
- return 0;
}
-cleanup:
- kfree(dep);
- return -ENOMEM;
+ return dep;
}
-EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
+EXPORT_SYMBOL_GPL(pm_qos_add_request);
/**
- * pm_qos_update_requirement - modifies an existing qos request
- * @pm_qos_class: identifies which list of qos request to us
- * @name: identifies the request
+ * pm_qos_update_request - modifies an existing qos request
+ * @pm_qos_req : handle to list element holding a pm_qos request to use
* @value: defines the qos request
*
- * Updates an existing qos requirement for the pm_qos_class of parameters along
+ * Updates an existing qos request for the pm_qos_class of parameters along
* with updating the target pm_qos_class value.
*
- * If the named request isn't in the list then no change is made.
+ * Attempts are made to make this code callable on hot code paths.
*/
-int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
+void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
+ s32 new_value)
{
unsigned long flags;
- struct requirement_list *node;
int pending_update = 0;
+ s32 temp;
spin_lock_irqsave(&pm_qos_lock, flags);
- list_for_each_entry(node,
- &pm_qos_array[pm_qos_class]->requirements.list, list) {
- if (strcmp(node->name, name) == 0) {
- if (new_value == PM_QOS_DEFAULT_VALUE)
- node->value =
- pm_qos_array[pm_qos_class]->default_value;
- else
- node->value = new_value;
- pending_update = 1;
- break;
- }
+ if (new_value == PM_QOS_DEFAULT_VALUE)
+ temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value;
+ else
+ temp = new_value;
+
+ if (temp != pm_qos_req->value) {
+ pending_update = 1;
+ pm_qos_req->value = temp;
}
spin_unlock_irqrestore(&pm_qos_lock, flags);
if (pending_update)
- update_target(pm_qos_class);
-
- return 0;
+ update_target(pm_qos_req->pm_qos_class);
}
-EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
+EXPORT_SYMBOL_GPL(pm_qos_update_request);
/**
- * pm_qos_remove_requirement - modifies an existing qos request
- * @pm_qos_class: identifies which list of qos request to us
- * @name: identifies the request
+ * pm_qos_remove_request - modifies an existing qos request
+ * @pm_qos_req: handle to request list element
*
- * Will remove named qos request from pm_qos_class list of parameters and
- * recompute the current target value for the pm_qos_class.
+ * Will remove pm qos request from the list of requests and
+ * recompute the current target value for the pm_qos_class. Call this
+ * on slow code paths.
*/
-void pm_qos_remove_requirement(int pm_qos_class, char *name)
+void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
{
unsigned long flags;
- struct requirement_list *node;
- int pending_update = 0;
+ int qos_class;
+
+ if (pm_qos_req == NULL)
+ return;
+ /* silent return to keep pcm code cleaner */
+ qos_class = pm_qos_req->pm_qos_class;
spin_lock_irqsave(&pm_qos_lock, flags);
- list_for_each_entry(node,
- &pm_qos_array[pm_qos_class]->requirements.list, list) {
- if (strcmp(node->name, name) == 0) {
- kfree(node->name);
- list_del(&node->list);
- kfree(node);
- pending_update = 1;
- break;
- }
- }
+ list_del(&pm_qos_req->list);
+ kfree(pm_qos_req);
spin_unlock_irqrestore(&pm_qos_lock, flags);
- if (pending_update)
- update_target(pm_qos_class);
+ update_target(qos_class);
}
-EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
+EXPORT_SYMBOL_GPL(pm_qos_remove_request);
/**
* pm_qos_add_notifier - sets notification entry for changes to target value
@@ -313,7 +302,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
* will register the notifier into a notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
- int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
@@ -343,21 +332,16 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
}
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
-#define PID_NAME_LEN 32
-
static int pm_qos_power_open(struct inode *inode, struct file *filp)
{
- int ret;
long pm_qos_class;
- char name[PID_NAME_LEN];
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= 0) {
- filp->private_data = (void *)pm_qos_class;
- snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
- ret = pm_qos_add_requirement(pm_qos_class, name,
- PM_QOS_DEFAULT_VALUE);
- if (ret >= 0)
+ filp->private_data = (void *) pm_qos_add_request(pm_qos_class,
+ PM_QOS_DEFAULT_VALUE);
+
+ if (filp->private_data)
return 0;
}
return -EPERM;
@@ -365,32 +349,40 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
static int pm_qos_power_release(struct inode *inode, struct file *filp)
{
- int pm_qos_class;
- char name[PID_NAME_LEN];
+ struct pm_qos_request_list *req;
- pm_qos_class = (long)filp->private_data;
- snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
- pm_qos_remove_requirement(pm_qos_class, name);
+ req = (struct pm_qos_request_list *)filp->private_data;
+ pm_qos_remove_request(req);
return 0;
}
+
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
- int pm_qos_class;
- char name[PID_NAME_LEN];
-
- pm_qos_class = (long)filp->private_data;
- if (count != sizeof(s32))
+ int x;
+ char ascii_value[11];
+ struct pm_qos_request_list *pm_qos_req;
+
+ if (count == sizeof(s32)) {
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ } else if (count == 11) { /* len('0x12345678/0') */
+ if (copy_from_user(ascii_value, buf, 11))
+ return -EFAULT;
+ x = sscanf(ascii_value, "%x", &value);
+ if (x != 1)
+ return -EINVAL;
+ pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
+ } else
return -EINVAL;
- if (copy_from_user(&value, buf, sizeof(s32)))
- return -EFAULT;
- snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
- pm_qos_update_requirement(pm_qos_class, name, value);
- return sizeof(s32);
+ pm_qos_req = (struct pm_qos_request_list *)filp->private_data;
+ pm_qos_update_request(pm_qos_req, value);
+
+ return count;
}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index bc7704b3a443..00bb252f29a2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -11,19 +11,18 @@
#include <trace/events/timer.h>
/*
- * Called after updating RLIMIT_CPU to set timer expiration if necessary.
+ * Called after updating RLIMIT_CPU to run cpu timer and update
+ * tsk->signal->cputime_expires expiration cache if necessary. Needs
+ * siglock protection since other code may update expiration cache as
+ * well.
*/
void update_rlimit_cpu(unsigned long rlim_new)
{
cputime_t cputime = secs_to_cputime(rlim_new);
- struct signal_struct *const sig = current->signal;
- if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
- cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
- spin_lock_irq(&current->sighand->siglock);
- set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
- spin_unlock_irq(&current->sighand->siglock);
- }
+ spin_lock_irq(&current->sighand->siglock);
+ set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+ spin_unlock_irq(&current->sighand->siglock);
}
static int check_clock(const clockid_t which_clock)
@@ -548,111 +547,62 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
cputime_gt(expires, new_exp);
}
-static inline int expires_le(cputime_t expires, cputime_t new_exp)
-{
- return !cputime_eq(expires, cputime_zero) &&
- cputime_le(expires, new_exp);
-}
/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held
- * for reading, and interrupts disabled.
+ * for reading, interrupts disabled and p->sighand->siglock taken.
*/
-static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
+static void arm_timer(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
struct list_head *head, *listpos;
+ struct task_cputime *cputime_expires;
struct cpu_timer_list *const nt = &timer->it.cpu;
struct cpu_timer_list *next;
- unsigned long i;
- head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
- p->cpu_timers : p->signal->cpu_timers);
+ if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+ head = p->cpu_timers;
+ cputime_expires = &p->cputime_expires;
+ } else {
+ head = p->signal->cpu_timers;
+ cputime_expires = &p->signal->cputime_expires;
+ }
head += CPUCLOCK_WHICH(timer->it_clock);
- BUG_ON(!irqs_disabled());
- spin_lock(&p->sighand->siglock);
-
listpos = head;
- if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
- list_for_each_entry(next, head, entry) {
- if (next->expires.sched > nt->expires.sched)
- break;
- listpos = &next->entry;
- }
- } else {
- list_for_each_entry(next, head, entry) {
- if (cputime_gt(next->expires.cpu, nt->expires.cpu))
- break;
- listpos = &next->entry;
- }
+ list_for_each_entry(next, head, entry) {
+ if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
+ break;
+ listpos = &next->entry;
}
list_add(&nt->entry, listpos);
if (listpos == head) {
+ union cpu_time_count *exp = &nt->expires;
+
/*
- * We are the new earliest-expiring timer.
- * If we are a thread timer, there can always
- * be a process timer telling us to stop earlier.
+ * We are the new earliest-expiring POSIX 1.b timer, hence
+ * need to update expiration cache. Take into account that
+ * for process timers we share expiration cache with itimers
+ * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
*/
- if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
- union cpu_time_count *exp = &nt->expires;
-
- switch (CPUCLOCK_WHICH(timer->it_clock)) {
- default:
- BUG();
- case CPUCLOCK_PROF:
- if (expires_gt(p->cputime_expires.prof_exp,
- exp->cpu))
- p->cputime_expires.prof_exp = exp->cpu;
- break;
- case CPUCLOCK_VIRT:
- if (expires_gt(p->cputime_expires.virt_exp,
- exp->cpu))
- p->cputime_expires.virt_exp = exp->cpu;
- break;
- case CPUCLOCK_SCHED:
- if (p->cputime_expires.sched_exp == 0 ||
- p->cputime_expires.sched_exp > exp->sched)
- p->cputime_expires.sched_exp =
- exp->sched;
- break;
- }
- } else {
- struct signal_struct *const sig = p->signal;
- union cpu_time_count *exp = &timer->it.cpu.expires;
-
- /*
- * For a process timer, set the cached expiration time.
- */
- switch (CPUCLOCK_WHICH(timer->it_clock)) {
- default:
- BUG();
- case CPUCLOCK_VIRT:
- if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
- exp->cpu))
- break;
- sig->cputime_expires.virt_exp = exp->cpu;
- break;
- case CPUCLOCK_PROF:
- if (expires_le(sig->it[CPUCLOCK_PROF].expires,
- exp->cpu))
- break;
- i = sig->rlim[RLIMIT_CPU].rlim_cur;
- if (i != RLIM_INFINITY &&
- i <= cputime_to_secs(exp->cpu))
- break;
- sig->cputime_expires.prof_exp = exp->cpu;
- break;
- case CPUCLOCK_SCHED:
- sig->cputime_expires.sched_exp = exp->sched;
- break;
- }
+ switch (CPUCLOCK_WHICH(timer->it_clock)) {
+ case CPUCLOCK_PROF:
+ if (expires_gt(cputime_expires->prof_exp, exp->cpu))
+ cputime_expires->prof_exp = exp->cpu;
+ break;
+ case CPUCLOCK_VIRT:
+ if (expires_gt(cputime_expires->virt_exp, exp->cpu))
+ cputime_expires->virt_exp = exp->cpu;
+ break;
+ case CPUCLOCK_SCHED:
+ if (cputime_expires->sched_exp == 0 ||
+ cputime_expires->sched_exp > exp->sched)
+ cputime_expires->sched_exp = exp->sched;
+ break;
}
}
-
- spin_unlock(&p->sighand->siglock);
}
/*
@@ -660,7 +610,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
*/
static void cpu_timer_fire(struct k_itimer *timer)
{
- if (unlikely(timer->sigq == NULL)) {
+ if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
+ /*
+ * User don't want any signal.
+ */
+ timer->it.cpu.expires.sched = 0;
+ } else if (unlikely(timer->sigq == NULL)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
@@ -721,7 +676,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
struct itimerspec *new, struct itimerspec *old)
{
struct task_struct *p = timer->it.cpu.task;
- union cpu_time_count old_expires, new_expires, val;
+ union cpu_time_count old_expires, new_expires, old_incr, val;
int ret;
if (unlikely(p == NULL)) {
@@ -752,6 +707,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
BUG_ON(!irqs_disabled());
ret = 0;
+ old_incr = timer->it.cpu.incr;
spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires;
if (unlikely(timer->it.cpu.firing)) {
@@ -759,7 +715,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
ret = TIMER_RETRY;
} else
list_del_init(&timer->it.cpu.entry);
- spin_unlock(&p->sighand->siglock);
/*
* We need to sample the current value to convert the new
@@ -813,6 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* disable this firing since we are already reporting
* it as an overrun (thanks to bump_cpu_timer above).
*/
+ spin_unlock(&p->sighand->siglock);
read_unlock(&tasklist_lock);
goto out;
}
@@ -828,11 +784,11 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
*/
timer->it.cpu.expires = new_expires;
if (new_expires.sched != 0 &&
- (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
cpu_time_before(timer->it_clock, val, new_expires)) {
- arm_timer(timer, val);
+ arm_timer(timer);
}
+ spin_unlock(&p->sighand->siglock);
read_unlock(&tasklist_lock);
/*
@@ -853,7 +809,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
timer->it_overrun = -1;
if (new_expires.sched != 0 &&
- (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
!cpu_time_before(timer->it_clock, val, new_expires)) {
/*
* The designated time already passed, so we notify
@@ -867,7 +822,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
out:
if (old) {
sample_to_timespec(timer->it_clock,
- timer->it.cpu.incr, &old->it_interval);
+ old_incr, &old->it_interval);
}
return ret;
}
@@ -927,25 +882,6 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
read_unlock(&tasklist_lock);
}
- if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
- if (timer->it.cpu.incr.sched == 0 &&
- cpu_time_before(timer->it_clock,
- timer->it.cpu.expires, now)) {
- /*
- * Do-nothing timer expired and has no reload,
- * so it's as if it was never set.
- */
- timer->it.cpu.expires.sched = 0;
- itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
- return;
- }
- /*
- * Account for any expirations and reloads that should
- * have happened.
- */
- bump_cpu_timer(timer, now);
- }
-
if (unlikely(clear_dead)) {
/*
* We've noticed that the thread is dead, but
@@ -1066,16 +1002,9 @@ static void stop_process_timers(struct signal_struct *sig)
struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags;
- if (!cputimer->running)
- return;
-
spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0;
spin_unlock_irqrestore(&cputimer->lock, flags);
-
- sig->cputime_expires.prof_exp = cputime_zero;
- sig->cputime_expires.virt_exp = cputime_zero;
- sig->cputime_expires.sched_exp = 0;
}
static u32 onecputick;
@@ -1112,6 +1041,23 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
}
}
+/**
+ * task_cputime_zero - Check a task_cputime struct for all zero fields.
+ *
+ * @cputime: The struct to compare.
+ *
+ * Checks @cputime to see if all fields are zero. Returns true if all fields
+ * are zero, false if any field is nonzero.
+ */
+static inline int task_cputime_zero(const struct task_cputime *cputime)
+{
+ if (cputime_eq(cputime->utime, cputime_zero) &&
+ cputime_eq(cputime->stime, cputime_zero) &&
+ cputime->sum_exec_runtime == 0)
+ return 1;
+ return 0;
+}
+
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1129,19 +1075,6 @@ static void check_process_timers(struct task_struct *tsk,
unsigned long soft;
/*
- * Don't sample the current process CPU clocks if there are no timers.
- */
- if (list_empty(&timers[CPUCLOCK_PROF]) &&
- cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
- sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
- list_empty(&timers[CPUCLOCK_VIRT]) &&
- cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
- list_empty(&timers[CPUCLOCK_SCHED])) {
- stop_process_timers(sig);
- return;
- }
-
- /*
* Collect the current process totals.
*/
thread_group_cputimer(tsk, &cputime);
@@ -1230,18 +1163,11 @@ static void check_process_timers(struct task_struct *tsk,
}
}
- if (!cputime_eq(prof_expires, cputime_zero) &&
- (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
- cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
- sig->cputime_expires.prof_exp = prof_expires;
- if (!cputime_eq(virt_expires, cputime_zero) &&
- (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
- cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
- sig->cputime_expires.virt_exp = virt_expires;
- if (sched_expires != 0 &&
- (sig->cputime_expires.sched_exp == 0 ||
- sig->cputime_expires.sched_exp > sched_expires))
- sig->cputime_expires.sched_exp = sched_expires;
+ sig->cputime_expires.prof_exp = prof_expires;
+ sig->cputime_expires.virt_exp = virt_expires;
+ sig->cputime_expires.sched_exp = sched_expires;
+ if (task_cputime_zero(&sig->cputime_expires))
+ stop_process_timers(sig);
}
/*
@@ -1270,6 +1196,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
goto out;
}
read_lock(&tasklist_lock); /* arm_timer needs it. */
+ spin_lock(&p->sighand->siglock);
} else {
read_lock(&tasklist_lock);
if (unlikely(p->signal == NULL)) {
@@ -1290,6 +1217,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
clear_dead_task(timer, now);
goto out_unlock;
}
+ spin_lock(&p->sighand->siglock);
cpu_timer_sample_group(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
/* Leave the tasklist_lock locked for the call below. */
@@ -1298,7 +1226,9 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
- arm_timer(timer, now);
+ BUG_ON(!irqs_disabled());
+ arm_timer(timer);
+ spin_unlock(&p->sighand->siglock);
out_unlock:
read_unlock(&tasklist_lock);
@@ -1310,23 +1240,6 @@ out:
}
/**
- * task_cputime_zero - Check a task_cputime struct for all zero fields.
- *
- * @cputime: The struct to compare.
- *
- * Checks @cputime to see if all fields are zero. Returns true if all fields
- * are zero, false if any field is nonzero.
- */
-static inline int task_cputime_zero(const struct task_cputime *cputime)
-{
- if (cputime_eq(cputime->utime, cputime_zero) &&
- cputime_eq(cputime->stime, cputime_zero) &&
- cputime->sum_exec_runtime == 0)
- return 1;
- return 0;
-}
-
-/**
* task_cputime_expired - Compare two task_cputime entities.
*
* @sample: The task_cputime structure to be checked for expiration.
@@ -1382,7 +1295,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
}
sig = tsk->signal;
- if (!task_cputime_zero(&sig->cputime_expires)) {
+ if (sig->cputimer.running) {
struct task_cputime group_sample;
thread_group_cputimer(tsk, &group_sample);
@@ -1390,7 +1303,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
return 1;
}
- return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
+ return 0;
}
/*
@@ -1419,7 +1332,12 @@ void run_posix_cpu_timers(struct task_struct *tsk)
* put them on the firing list.
*/
check_thread_timers(tsk, &firing);
- check_process_timers(tsk, &firing);
+ /*
+ * If there are any active process wide timers (POSIX 1.b, itimers,
+ * RLIMIT_CPU) cputimer must be running.
+ */
+ if (tsk->signal->cputimer.running)
+ check_process_timers(tsk, &firing);
/*
* We must release these locks before taking any timer's lock.
@@ -1456,21 +1374,23 @@ void run_posix_cpu_timers(struct task_struct *tsk)
}
/*
- * Set one of the process-wide special case CPU timers.
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
- * The *newval argument is relative and we update it to be absolute, *oldval
- * is absolute and we update it to be relative.
*/
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval)
{
union cpu_time_count now;
- struct list_head *head;
BUG_ON(clock_idx == CPUCLOCK_SCHED);
cpu_timer_sample_group(clock_idx, tsk, &now);
if (oldval) {
+ /*
+ * We are setting itimer. The *oldval is absolute and we update
+ * it to be relative, *newval argument is relative and we update
+ * it to be absolute.
+ */
if (!cputime_eq(*oldval, cputime_zero)) {
if (cputime_le(*oldval, now.cpu)) {
/* Just about to fire. */
@@ -1483,33 +1403,21 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
if (cputime_eq(*newval, cputime_zero))
return;
*newval = cputime_add(*newval, now.cpu);
-
- /*
- * If the RLIMIT_CPU timer will expire before the
- * ITIMER_PROF timer, we have nothing else to do.
- */
- if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
- < cputime_to_secs(*newval))
- return;
}
/*
- * Check whether there are any process timers already set to fire
- * before this one. If so, we don't have anything more to do.
+ * Update expiration cache if we are the earliest timer, or eventually
+ * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
*/
- head = &tsk->signal->cpu_timers[clock_idx];
- if (list_empty(head) ||
- cputime_ge(list_first_entry(head,
- struct cpu_timer_list, entry)->expires.cpu,
- *newval)) {
- switch (clock_idx) {
- case CPUCLOCK_PROF:
+ switch (clock_idx) {
+ case CPUCLOCK_PROF:
+ if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
tsk->signal->cputime_expires.prof_exp = *newval;
- break;
- case CPUCLOCK_VIRT:
+ break;
+ case CPUCLOCK_VIRT:
+ if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
tsk->signal->cputime_expires.virt_exp = *newval;
- break;
- }
+ break;
}
}
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 43191815f874..524e058dcf06 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_PM_SLEEP) += console.o
obj-$(CONFIG_FREEZER) += process.o
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
-obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
+ block_io.o
obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c
new file mode 100644
index 000000000000..97024fd40cd5
--- /dev/null
+++ b/kernel/power/block_io.c
@@ -0,0 +1,103 @@
+/*
+ * This file provides functions for block I/O operations on swap/file.
+ *
+ * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/bio.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#include "power.h"
+
+/**
+ * submit - submit BIO request.
+ * @rw: READ or WRITE.
+ * @off physical offset of page.
+ * @page: page we're reading or writing.
+ * @bio_chain: list of pending biod (for async reading)
+ *
+ * Straight from the textbook - allocate and initialize the bio.
+ * If we're reading, make sure the page is marked as dirty.
+ * Then submit it and, if @bio_chain == NULL, wait.
+ */
+static int submit(int rw, struct block_device *bdev, sector_t sector,
+ struct page *page, struct bio **bio_chain)
+{
+ const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+ struct bio *bio;
+
+ bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+ bio->bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio->bi_end_io = end_swap_bio_read;
+
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
+ (unsigned long long)sector);
+ bio_put(bio);
+ return -EFAULT;
+ }
+
+ lock_page(page);
+ bio_get(bio);
+
+ if (bio_chain == NULL) {
+ submit_bio(bio_rw, bio);
+ wait_on_page_locked(page);
+ if (rw == READ)
+ bio_set_pages_dirty(bio);
+ bio_put(bio);
+ } else {
+ if (rw == READ)
+ get_page(page); /* These pages are freed later */
+ bio->bi_private = *bio_chain;
+ *bio_chain = bio;
+ submit_bio(bio_rw, bio);
+ }
+ return 0;
+}
+
+int hib_bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
+{
+ return submit(READ, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
+ virt_to_page(addr), bio_chain);
+}
+
+int hib_bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
+{
+ return submit(WRITE, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
+ virt_to_page(addr), bio_chain);
+}
+
+int hib_wait_on_bio_chain(struct bio **bio_chain)
+{
+ struct bio *bio;
+ struct bio *next_bio;
+ int ret = 0;
+
+ if (bio_chain == NULL)
+ return 0;
+
+ bio = *bio_chain;
+ if (bio == NULL)
+ return 0;
+ while (bio) {
+ struct page *page;
+
+ next_bio = bio->bi_private;
+ page = bio->bi_io_vec[0].bv_page;
+ wait_on_page_locked(page);
+ if (!PageUptodate(page) || PageError(page))
+ ret = -EIO;
+ put_page(page);
+ bio_put(bio);
+ bio = next_bio;
+ }
+ *bio_chain = NULL;
+ return ret;
+}
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 46c5a26630a3..006270fe382d 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -97,24 +97,12 @@ extern int hibernate_preallocate_memory(void);
*/
struct snapshot_handle {
- loff_t offset; /* number of the last byte ready for reading
- * or writing in the sequence
- */
unsigned int cur; /* number of the block of PAGE_SIZE bytes the
* next operation will refer to (ie. current)
*/
- unsigned int cur_offset; /* offset with respect to the current
- * block (for the next operation)
- */
- unsigned int prev; /* number of the block of PAGE_SIZE bytes that
- * was the current one previously
- */
void *buffer; /* address of the block to read from
* or write to
*/
- unsigned int buf_offset; /* location to read from or write to,
- * given as a displacement from 'buffer'
- */
int sync_read; /* Set to one to notify the caller of
* snapshot_write_next() that it may
* need to call wait_on_bio_chain()
@@ -125,12 +113,12 @@ struct snapshot_handle {
* snapshot_read_next()/snapshot_write_next() is allowed to
* read/write data after the function returns
*/
-#define data_of(handle) ((handle).buffer + (handle).buf_offset)
+#define data_of(handle) ((handle).buffer)
extern unsigned int snapshot_additional_pages(struct zone *zone);
extern unsigned long snapshot_get_image_size(void);
-extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
-extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
+extern int snapshot_read_next(struct snapshot_handle *handle);
+extern int snapshot_write_next(struct snapshot_handle *handle);
extern void snapshot_write_finalize(struct snapshot_handle *handle);
extern int snapshot_image_loaded(struct snapshot_handle *handle);
@@ -154,6 +142,15 @@ extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
extern void swsusp_close(fmode_t);
+/* kernel/power/block_io.c */
+extern struct block_device *hib_resume_bdev;
+
+extern int hib_bio_read_page(pgoff_t page_off, void *addr,
+ struct bio **bio_chain);
+extern int hib_bio_write_page(pgoff_t page_off, void *addr,
+ struct bio **bio_chain);
+extern int hib_wait_on_bio_chain(struct bio **bio_chain);
+
struct timeval;
/* kernel/power/swsusp.c */
extern void swsusp_show_speed(struct timeval *, struct timeval *,
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index be861c26dda7..25ce010e9f8b 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1604,14 +1604,9 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
* snapshot_handle structure. The structure gets updated and a pointer
* to it should be passed to this function every next time.
*
- * The @count parameter should contain the number of bytes the caller
- * wants to read from the snapshot. It must not be zero.
- *
* On success the function returns a positive number. Then, the caller
* is allowed to read up to the returned number of bytes from the memory
- * location computed by the data_of() macro. The number returned
- * may be smaller than @count, but this only happens if the read would
- * cross a page boundary otherwise.
+ * location computed by the data_of() macro.
*
* The function returns 0 to indicate the end of data stream condition,
* and a negative number is returned on error. In such cases the
@@ -1619,7 +1614,7 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
* any more.
*/
-int snapshot_read_next(struct snapshot_handle *handle, size_t count)
+int snapshot_read_next(struct snapshot_handle *handle)
{
if (handle->cur > nr_meta_pages + nr_copy_pages)
return 0;
@@ -1630,7 +1625,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
if (!buffer)
return -ENOMEM;
}
- if (!handle->offset) {
+ if (!handle->cur) {
int error;
error = init_header((struct swsusp_info *)buffer);
@@ -1639,42 +1634,30 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
handle->buffer = buffer;
memory_bm_position_reset(&orig_bm);
memory_bm_position_reset(&copy_bm);
- }
- if (handle->prev < handle->cur) {
- if (handle->cur <= nr_meta_pages) {
- memset(buffer, 0, PAGE_SIZE);
- pack_pfns(buffer, &orig_bm);
- } else {
- struct page *page;
+ } else if (handle->cur <= nr_meta_pages) {
+ memset(buffer, 0, PAGE_SIZE);
+ pack_pfns(buffer, &orig_bm);
+ } else {
+ struct page *page;
- page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
- if (PageHighMem(page)) {
- /* Highmem pages are copied to the buffer,
- * because we can't return with a kmapped
- * highmem page (we may not be called again).
- */
- void *kaddr;
+ page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
+ if (PageHighMem(page)) {
+ /* Highmem pages are copied to the buffer,
+ * because we can't return with a kmapped
+ * highmem page (we may not be called again).
+ */
+ void *kaddr;
- kaddr = kmap_atomic(page, KM_USER0);
- memcpy(buffer, kaddr, PAGE_SIZE);
- kunmap_atomic(kaddr, KM_USER0);
- handle->buffer = buffer;
- } else {
- handle->buffer = page_address(page);
- }
+ kaddr = kmap_atomic(page, KM_USER0);
+ memcpy(buffer, kaddr, PAGE_SIZE);
+ kunmap_atomic(kaddr, KM_USER0);
+ handle->buffer = buffer;
+ } else {
+ handle->buffer = page_address(page);
}
- handle->prev = handle->cur;
- }
- handle->buf_offset = handle->cur_offset;
- if (handle->cur_offset + count >= PAGE_SIZE) {
- count = PAGE_SIZE - handle->cur_offset;
- handle->cur_offset = 0;
- handle->cur++;
- } else {
- handle->cur_offset += count;
}
- handle->offset += count;
- return count;
+ handle->cur++;
+ return PAGE_SIZE;
}
/**
@@ -2133,14 +2116,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
* snapshot_handle structure. The structure gets updated and a pointer
* to it should be passed to this function every next time.
*
- * The @count parameter should contain the number of bytes the caller
- * wants to write to the image. It must not be zero.
- *
* On success the function returns a positive number. Then, the caller
* is allowed to write up to the returned number of bytes to the memory
- * location computed by the data_of() macro. The number returned
- * may be smaller than @count, but this only happens if the write would
- * cross a page boundary otherwise.
+ * location computed by the data_of() macro.
*
* The function returns 0 to indicate the "end of file" condition,
* and a negative number is returned on error. In such cases the
@@ -2148,16 +2126,18 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
* any more.
*/
-int snapshot_write_next(struct snapshot_handle *handle, size_t count)
+int snapshot_write_next(struct snapshot_handle *handle)
{
static struct chain_allocator ca;
int error = 0;
/* Check if we have already loaded the entire image */
- if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
return 0;
- if (handle->offset == 0) {
+ handle->sync_read = 1;
+
+ if (!handle->cur) {
if (!buffer)
/* This makes the buffer be freed by swsusp_free() */
buffer = get_image_page(GFP_ATOMIC, PG_ANY);
@@ -2166,56 +2146,43 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
return -ENOMEM;
handle->buffer = buffer;
- }
- handle->sync_read = 1;
- if (handle->prev < handle->cur) {
- if (handle->prev == 0) {
- error = load_header(buffer);
- if (error)
- return error;
+ } else if (handle->cur == 1) {
+ error = load_header(buffer);
+ if (error)
+ return error;
- error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
- if (error)
- return error;
+ error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
+ if (error)
+ return error;
+
+ } else if (handle->cur <= nr_meta_pages + 1) {
+ error = unpack_orig_pfns(buffer, &copy_bm);
+ if (error)
+ return error;
- } else if (handle->prev <= nr_meta_pages) {
- error = unpack_orig_pfns(buffer, &copy_bm);
+ if (handle->cur == nr_meta_pages + 1) {
+ error = prepare_image(&orig_bm, &copy_bm);
if (error)
return error;
- if (handle->prev == nr_meta_pages) {
- error = prepare_image(&orig_bm, &copy_bm);
- if (error)
- return error;
-
- chain_init(&ca, GFP_ATOMIC, PG_SAFE);
- memory_bm_position_reset(&orig_bm);
- restore_pblist = NULL;
- handle->buffer = get_buffer(&orig_bm, &ca);
- handle->sync_read = 0;
- if (IS_ERR(handle->buffer))
- return PTR_ERR(handle->buffer);
- }
- } else {
- copy_last_highmem_page();
+ chain_init(&ca, GFP_ATOMIC, PG_SAFE);
+ memory_bm_position_reset(&orig_bm);
+ restore_pblist = NULL;
handle->buffer = get_buffer(&orig_bm, &ca);
+ handle->sync_read = 0;
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
- if (handle->buffer != buffer)
- handle->sync_read = 0;
}
- handle->prev = handle->cur;
- }
- handle->buf_offset = handle->cur_offset;
- if (handle->cur_offset + count >= PAGE_SIZE) {
- count = PAGE_SIZE - handle->cur_offset;
- handle->cur_offset = 0;
- handle->cur++;
} else {
- handle->cur_offset += count;
+ copy_last_highmem_page();
+ handle->buffer = get_buffer(&orig_bm, &ca);
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+ if (handle->buffer != buffer)
+ handle->sync_read = 0;
}
- handle->offset += count;
- return count;
+ handle->cur++;
+ return PAGE_SIZE;
}
/**
@@ -2230,7 +2197,7 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
{
copy_last_highmem_page();
/* Free only if we have loaded the image entirely */
- if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
free_highmem_data();
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 66824d71983a..b0bb21778391 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -29,6 +29,40 @@
#define SWSUSP_SIG "S1SUSPEND"
+/*
+ * The swap map is a data structure used for keeping track of each page
+ * written to a swap partition. It consists of many swap_map_page
+ * structures that contain each an array of MAP_PAGE_SIZE swap entries.
+ * These structures are stored on the swap and linked together with the
+ * help of the .next_swap member.
+ *
+ * The swap map is created during suspend. The swap map pages are
+ * allocated and populated one at a time, so we only need one memory
+ * page to set up the entire structure.
+ *
+ * During resume we also only need to use one swap_map_page structure
+ * at a time.
+ */
+
+#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
+
+struct swap_map_page {
+ sector_t entries[MAP_PAGE_ENTRIES];
+ sector_t next_swap;
+};
+
+/**
+ * The swap_map_handle structure is used for handling swap in
+ * a file-alike way
+ */
+
+struct swap_map_handle {
+ struct swap_map_page *cur;
+ sector_t cur_swap;
+ sector_t first_sector;
+ unsigned int k;
+};
+
struct swsusp_header {
char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
sector_t image;
@@ -145,110 +179,24 @@ int swsusp_swap_in_use(void)
*/
static unsigned short root_swap = 0xffff;
-static struct block_device *resume_bdev;
-
-/**
- * submit - submit BIO request.
- * @rw: READ or WRITE.
- * @off physical offset of page.
- * @page: page we're reading or writing.
- * @bio_chain: list of pending biod (for async reading)
- *
- * Straight from the textbook - allocate and initialize the bio.
- * If we're reading, make sure the page is marked as dirty.
- * Then submit it and, if @bio_chain == NULL, wait.
- */
-static int submit(int rw, pgoff_t page_off, struct page *page,
- struct bio **bio_chain)
-{
- const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
- struct bio *bio;
-
- bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
- bio->bi_sector = page_off * (PAGE_SIZE >> 9);
- bio->bi_bdev = resume_bdev;
- bio->bi_end_io = end_swap_bio_read;
-
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- printk(KERN_ERR "PM: Adding page to bio failed at %ld\n",
- page_off);
- bio_put(bio);
- return -EFAULT;
- }
-
- lock_page(page);
- bio_get(bio);
-
- if (bio_chain == NULL) {
- submit_bio(bio_rw, bio);
- wait_on_page_locked(page);
- if (rw == READ)
- bio_set_pages_dirty(bio);
- bio_put(bio);
- } else {
- if (rw == READ)
- get_page(page); /* These pages are freed later */
- bio->bi_private = *bio_chain;
- *bio_chain = bio;
- submit_bio(bio_rw, bio);
- }
- return 0;
-}
-
-static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
-{
- return submit(READ, page_off, virt_to_page(addr), bio_chain);
-}
-
-static int bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
-{
- return submit(WRITE, page_off, virt_to_page(addr), bio_chain);
-}
-
-static int wait_on_bio_chain(struct bio **bio_chain)
-{
- struct bio *bio;
- struct bio *next_bio;
- int ret = 0;
-
- if (bio_chain == NULL)
- return 0;
-
- bio = *bio_chain;
- if (bio == NULL)
- return 0;
- while (bio) {
- struct page *page;
-
- next_bio = bio->bi_private;
- page = bio->bi_io_vec[0].bv_page;
- wait_on_page_locked(page);
- if (!PageUptodate(page) || PageError(page))
- ret = -EIO;
- put_page(page);
- bio_put(bio);
- bio = next_bio;
- }
- *bio_chain = NULL;
- return ret;
-}
+struct block_device *hib_resume_bdev;
/*
* Saving part
*/
-static int mark_swapfiles(sector_t start, unsigned int flags)
+static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
{
int error;
- bio_read_page(swsusp_resume_block, swsusp_header, NULL);
+ hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL);
if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
!memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
memcpy(swsusp_header->sig,SWSUSP_SIG, 10);
- swsusp_header->image = start;
+ swsusp_header->image = handle->first_sector;
swsusp_header->flags = flags;
- error = bio_write_page(swsusp_resume_block,
+ error = hib_bio_write_page(swsusp_resume_block,
swsusp_header, NULL);
} else {
printk(KERN_ERR "PM: Swap header not found!\n");
@@ -260,25 +208,26 @@ static int mark_swapfiles(sector_t start, unsigned int flags)
/**
* swsusp_swap_check - check if the resume device is a swap device
* and get its index (if so)
+ *
+ * This is called before saving image
*/
-
-static int swsusp_swap_check(void) /* This is called before saving image */
+static int swsusp_swap_check(void)
{
int res;
res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
- &resume_bdev);
+ &hib_resume_bdev);
if (res < 0)
return res;
root_swap = res;
- res = blkdev_get(resume_bdev, FMODE_WRITE);
+ res = blkdev_get(hib_resume_bdev, FMODE_WRITE);
if (res)
return res;
- res = set_blocksize(resume_bdev, PAGE_SIZE);
+ res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
if (res < 0)
- blkdev_put(resume_bdev, FMODE_WRITE);
+ blkdev_put(hib_resume_bdev, FMODE_WRITE);
return res;
}
@@ -309,42 +258,9 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
} else {
src = buf;
}
- return bio_write_page(offset, src, bio_chain);
+ return hib_bio_write_page(offset, src, bio_chain);
}
-/*
- * The swap map is a data structure used for keeping track of each page
- * written to a swap partition. It consists of many swap_map_page
- * structures that contain each an array of MAP_PAGE_SIZE swap entries.
- * These structures are stored on the swap and linked together with the
- * help of the .next_swap member.
- *
- * The swap map is created during suspend. The swap map pages are
- * allocated and populated one at a time, so we only need one memory
- * page to set up the entire structure.
- *
- * During resume we also only need to use one swap_map_page structure
- * at a time.
- */
-
-#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
-
-struct swap_map_page {
- sector_t entries[MAP_PAGE_ENTRIES];
- sector_t next_swap;
-};
-
-/**
- * The swap_map_handle structure is used for handling swap in
- * a file-alike way
- */
-
-struct swap_map_handle {
- struct swap_map_page *cur;
- sector_t cur_swap;
- unsigned int k;
-};
-
static void release_swap_writer(struct swap_map_handle *handle)
{
if (handle->cur)
@@ -354,16 +270,33 @@ static void release_swap_writer(struct swap_map_handle *handle)
static int get_swap_writer(struct swap_map_handle *handle)
{
+ int ret;
+
+ ret = swsusp_swap_check();
+ if (ret) {
+ if (ret != -ENOSPC)
+ printk(KERN_ERR "PM: Cannot find swap device, try "
+ "swapon -a.\n");
+ return ret;
+ }
handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
- if (!handle->cur)
- return -ENOMEM;
+ if (!handle->cur) {
+ ret = -ENOMEM;
+ goto err_close;
+ }
handle->cur_swap = alloc_swapdev_block(root_swap);
if (!handle->cur_swap) {
- release_swap_writer(handle);
- return -ENOSPC;
+ ret = -ENOSPC;
+ goto err_rel;
}
handle->k = 0;
+ handle->first_sector = handle->cur_swap;
return 0;
+err_rel:
+ release_swap_writer(handle);
+err_close:
+ swsusp_close(FMODE_WRITE);
+ return ret;
}
static int swap_write_page(struct swap_map_handle *handle, void *buf,
@@ -380,7 +313,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
return error;
handle->cur->entries[handle->k++] = offset;
if (handle->k >= MAP_PAGE_ENTRIES) {
- error = wait_on_bio_chain(bio_chain);
+ error = hib_wait_on_bio_chain(bio_chain);
if (error)
goto out;
offset = alloc_swapdev_block(root_swap);
@@ -406,6 +339,24 @@ static int flush_swap_writer(struct swap_map_handle *handle)
return -EINVAL;
}
+static int swap_writer_finish(struct swap_map_handle *handle,
+ unsigned int flags, int error)
+{
+ if (!error) {
+ flush_swap_writer(handle);
+ printk(KERN_INFO "PM: S");
+ error = mark_swapfiles(handle, flags);
+ printk("|\n");
+ }
+
+ if (error)
+ free_all_swap_pages(root_swap);
+ release_swap_writer(handle);
+ swsusp_close(FMODE_WRITE);
+
+ return error;
+}
+
/**
* save_image - save the suspend image data
*/
@@ -431,7 +382,7 @@ static int save_image(struct swap_map_handle *handle,
bio = NULL;
do_gettimeofday(&start);
while (1) {
- ret = snapshot_read_next(snapshot, PAGE_SIZE);
+ ret = snapshot_read_next(snapshot);
if (ret <= 0)
break;
ret = swap_write_page(handle, data_of(*snapshot), &bio);
@@ -441,7 +392,7 @@ static int save_image(struct swap_map_handle *handle,
printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
nr_pages++;
}
- err2 = wait_on_bio_chain(&bio);
+ err2 = hib_wait_on_bio_chain(&bio);
do_gettimeofday(&stop);
if (!ret)
ret = err2;
@@ -483,50 +434,34 @@ int swsusp_write(unsigned int flags)
struct swap_map_handle handle;
struct snapshot_handle snapshot;
struct swsusp_info *header;
+ unsigned long pages;
int error;
- error = swsusp_swap_check();
+ pages = snapshot_get_image_size();
+ error = get_swap_writer(&handle);
if (error) {
- printk(KERN_ERR "PM: Cannot find swap device, try "
- "swapon -a.\n");
+ printk(KERN_ERR "PM: Cannot get swap writer\n");
return error;
}
+ if (!enough_swap(pages)) {
+ printk(KERN_ERR "PM: Not enough free swap\n");
+ error = -ENOSPC;
+ goto out_finish;
+ }
memset(&snapshot, 0, sizeof(struct snapshot_handle));
- error = snapshot_read_next(&snapshot, PAGE_SIZE);
+ error = snapshot_read_next(&snapshot);
if (error < PAGE_SIZE) {
if (error >= 0)
error = -EFAULT;
- goto out;
+ goto out_finish;
}
header = (struct swsusp_info *)data_of(snapshot);
- if (!enough_swap(header->pages)) {
- printk(KERN_ERR "PM: Not enough free swap\n");
- error = -ENOSPC;
- goto out;
- }
- error = get_swap_writer(&handle);
- if (!error) {
- sector_t start = handle.cur_swap;
-
- error = swap_write_page(&handle, header, NULL);
- if (!error)
- error = save_image(&handle, &snapshot,
- header->pages - 1);
-
- if (!error) {
- flush_swap_writer(&handle);
- printk(KERN_INFO "PM: S");
- error = mark_swapfiles(start, flags);
- printk("|\n");
- }
- }
- if (error)
- free_all_swap_pages(root_swap);
-
- release_swap_writer(&handle);
- out:
- swsusp_close(FMODE_WRITE);
+ error = swap_write_page(&handle, header, NULL);
+ if (!error)
+ error = save_image(&handle, &snapshot, pages - 1);
+out_finish:
+ error = swap_writer_finish(&handle, flags, error);
return error;
}
@@ -542,18 +477,21 @@ static void release_swap_reader(struct swap_map_handle *handle)
handle->cur = NULL;
}
-static int get_swap_reader(struct swap_map_handle *handle, sector_t start)
+static int get_swap_reader(struct swap_map_handle *handle,
+ unsigned int *flags_p)
{
int error;
- if (!start)
+ *flags_p = swsusp_header->flags;
+
+ if (!swsusp_header->image) /* how can this happen? */
return -EINVAL;
handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
if (!handle->cur)
return -ENOMEM;
- error = bio_read_page(start, handle->cur, NULL);
+ error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL);
if (error) {
release_swap_reader(handle);
return error;
@@ -573,21 +511,28 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
offset = handle->cur->entries[handle->k];
if (!offset)
return -EFAULT;
- error = bio_read_page(offset, buf, bio_chain);
+ error = hib_bio_read_page(offset, buf, bio_chain);
if (error)
return error;
if (++handle->k >= MAP_PAGE_ENTRIES) {
- error = wait_on_bio_chain(bio_chain);
+ error = hib_wait_on_bio_chain(bio_chain);
handle->k = 0;
offset = handle->cur->next_swap;
if (!offset)
release_swap_reader(handle);
else if (!error)
- error = bio_read_page(offset, handle->cur, NULL);
+ error = hib_bio_read_page(offset, handle->cur, NULL);
}
return error;
}
+static int swap_reader_finish(struct swap_map_handle *handle)
+{
+ release_swap_reader(handle);
+
+ return 0;
+}
+
/**
* load_image - load the image using the swap map handle
* @handle and the snapshot handle @snapshot
@@ -615,21 +560,21 @@ static int load_image(struct swap_map_handle *handle,
bio = NULL;
do_gettimeofday(&start);
for ( ; ; ) {
- error = snapshot_write_next(snapshot, PAGE_SIZE);
+ error = snapshot_write_next(snapshot);
if (error <= 0)
break;
error = swap_read_page(handle, data_of(*snapshot), &bio);
if (error)
break;
if (snapshot->sync_read)
- error = wait_on_bio_chain(&bio);
+ error = hib_wait_on_bio_chain(&bio);
if (error)
break;
if (!(nr_pages % m))
printk("\b\b\b\b%3d%%", nr_pages / m);
nr_pages++;
}
- err2 = wait_on_bio_chain(&bio);
+ err2 = hib_wait_on_bio_chain(&bio);
do_gettimeofday(&stop);
if (!error)
error = err2;
@@ -657,20 +602,20 @@ int swsusp_read(unsigned int *flags_p)
struct snapshot_handle snapshot;
struct swsusp_info *header;
- *flags_p = swsusp_header->flags;
-
memset(&snapshot, 0, sizeof(struct snapshot_handle));
- error = snapshot_write_next(&snapshot, PAGE_SIZE);
+ error = snapshot_write_next(&snapshot);
if (error < PAGE_SIZE)
return error < 0 ? error : -EFAULT;
header = (struct swsusp_info *)data_of(snapshot);
- error = get_swap_reader(&handle, swsusp_header->image);
+ error = get_swap_reader(&handle, flags_p);
+ if (error)
+ goto end;
if (!error)
error = swap_read_page(&handle, header, NULL);
if (!error)
error = load_image(&handle, &snapshot, header->pages - 1);
- release_swap_reader(&handle);
-
+ swap_reader_finish(&handle);
+end:
if (!error)
pr_debug("PM: Image successfully loaded\n");
else
@@ -686,11 +631,11 @@ int swsusp_check(void)
{
int error;
- resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
- if (!IS_ERR(resume_bdev)) {
- set_blocksize(resume_bdev, PAGE_SIZE);
+ hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
+ if (!IS_ERR(hib_resume_bdev)) {
+ set_blocksize(hib_resume_bdev, PAGE_SIZE);
memset(swsusp_header, 0, PAGE_SIZE);
- error = bio_read_page(swsusp_resume_block,
+ error = hib_bio_read_page(swsusp_resume_block,
swsusp_header, NULL);
if (error)
goto put;
@@ -698,7 +643,7 @@ int swsusp_check(void)
if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
/* Reset swap signature now */
- error = bio_write_page(swsusp_resume_block,
+ error = hib_bio_write_page(swsusp_resume_block,
swsusp_header, NULL);
} else {
error = -EINVAL;
@@ -706,11 +651,11 @@ int swsusp_check(void)
put:
if (error)
- blkdev_put(resume_bdev, FMODE_READ);
+ blkdev_put(hib_resume_bdev, FMODE_READ);
else
pr_debug("PM: Signature found, resuming\n");
} else {
- error = PTR_ERR(resume_bdev);
+ error = PTR_ERR(hib_resume_bdev);
}
if (error)
@@ -725,12 +670,12 @@ put:
void swsusp_close(fmode_t mode)
{
- if (IS_ERR(resume_bdev)) {
+ if (IS_ERR(hib_resume_bdev)) {
pr_debug("PM: Image device not initialised\n");
return;
}
- blkdev_put(resume_bdev, mode);
+ blkdev_put(hib_resume_bdev, mode);
}
static int swsusp_header_init(void)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index a8c96212bc1b..e819e17877ca 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -151,6 +151,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
{
struct snapshot_data *data;
ssize_t res;
+ loff_t pg_offp = *offp & ~PAGE_MASK;
mutex_lock(&pm_mutex);
@@ -159,14 +160,19 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
res = -ENODATA;
goto Unlock;
}
- res = snapshot_read_next(&data->handle, count);
- if (res > 0) {
- if (copy_to_user(buf, data_of(data->handle), res))
- res = -EFAULT;
- else
- *offp = data->handle.offset;
+ if (!pg_offp) { /* on page boundary? */
+ res = snapshot_read_next(&data->handle);
+ if (res <= 0)
+ goto Unlock;
+ } else {
+ res = PAGE_SIZE - pg_offp;
}
+ res = simple_read_from_buffer(buf, count, &pg_offp,
+ data_of(data->handle), res);
+ if (res > 0)
+ *offp += res;
+
Unlock:
mutex_unlock(&pm_mutex);
@@ -178,18 +184,25 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
{
struct snapshot_data *data;
ssize_t res;
+ loff_t pg_offp = *offp & ~PAGE_MASK;
mutex_lock(&pm_mutex);
data = filp->private_data;
- res = snapshot_write_next(&data->handle, count);
- if (res > 0) {
- if (copy_from_user(data_of(data->handle), buf, res))
- res = -EFAULT;
- else
- *offp = data->handle.offset;
+
+ if (!pg_offp) {
+ res = snapshot_write_next(&data->handle);
+ if (res <= 0)
+ goto unlock;
+ } else {
+ res = PAGE_SIZE - pg_offp;
}
+ res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
+ buf, count);
+ if (res > 0)
+ *offp += res;
+unlock:
mutex_unlock(&pm_mutex);
return res;
diff --git a/kernel/printk.c b/kernel/printk.c
index 75077ad0b537..444b770c9595 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -33,6 +33,7 @@
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/kexec.h>
+#include <linux/kdb.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
#include <linux/syslog.h>
@@ -413,6 +414,22 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
}
+#ifdef CONFIG_KGDB_KDB
+/* kdb dmesg command needs access to the syslog buffer. do_syslog()
+ * uses locks so it cannot be used during debugging. Just tell kdb
+ * where the start and end of the physical and logical logs are. This
+ * is equivalent to do_syslog(3).
+ */
+void kdb_syslog_data(char *syslog_data[4])
+{
+ syslog_data[0] = log_buf;
+ syslog_data[1] = log_buf + log_buf_len;
+ syslog_data[2] = log_buf + log_end -
+ (logged_chars < log_buf_len ? logged_chars : log_buf_len);
+ syslog_data[3] = log_buf + log_end;
+}
+#endif /* CONFIG_KGDB_KDB */
+
/*
* Call the console drivers on a range of log_buf
*/
@@ -586,6 +603,14 @@ asmlinkage int printk(const char *fmt, ...)
va_list args;
int r;
+#ifdef CONFIG_KGDB_KDB
+ if (unlikely(kdb_trap_printk)) {
+ va_start(args, fmt);
+ r = vkdb_printf(fmt, args);
+ va_end(args);
+ return r;
+ }
+#endif
va_start(args, fmt);
r = vprintk(fmt, args);
va_end(args);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 42ad8ae729a0..9fb51237b18c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -76,7 +76,6 @@ void __ptrace_unlink(struct task_struct *child)
child->parent = child->real_parent;
list_del_init(&child->ptrace_entry);
- arch_ptrace_untrace(child);
if (task_is_traced(child))
ptrace_untrace(child);
}
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 49d808e833b0..72a8dc9567f5 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,7 +44,6 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include <linux/kernel_stat.h>
#include <linux/hardirq.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -64,9 +63,6 @@ struct lockdep_map rcu_sched_lock_map =
EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
#endif
-int rcu_scheduler_active __read_mostly;
-EXPORT_SYMBOL_GPL(rcu_scheduler_active);
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
int debug_lockdep_rcu_enabled(void)
@@ -97,21 +93,6 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/*
- * This function is invoked towards the end of the scheduler's initialization
- * process. Before this is called, the idle task might contain
- * RCU read-side critical sections (during which time, this idle
- * task is booting the system). After this function is called, the
- * idle tasks are prohibited from containing RCU read-side critical
- * sections.
- */
-void rcu_scheduler_starting(void)
-{
- WARN_ON(num_online_cpus() != 1);
- WARN_ON(nr_context_switches() > 0);
- rcu_scheduler_active = 1;
-}
-
-/*
* Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed.
*/
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 9f6d9ff2572c..b1804ff83d5e 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -44,9 +44,9 @@ struct rcu_ctrlblk {
};
/* Definition for rcupdate control block. */
-static struct rcu_ctrlblk rcu_ctrlblk = {
- .donetail = &rcu_ctrlblk.rcucblist,
- .curtail = &rcu_ctrlblk.rcucblist,
+static struct rcu_ctrlblk rcu_sched_ctrlblk = {
+ .donetail = &rcu_sched_ctrlblk.rcucblist,
+ .curtail = &rcu_sched_ctrlblk.rcucblist,
};
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
@@ -54,6 +54,11 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.curtail = &rcu_bh_ctrlblk.rcucblist,
};
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+int rcu_scheduler_active __read_mostly;
+EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
#ifdef CONFIG_NO_HZ
static long rcu_dynticks_nesting = 1;
@@ -108,7 +113,8 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
*/
void rcu_sched_qs(int cpu)
{
- if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk))
+ if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
+ rcu_qsctr_help(&rcu_bh_ctrlblk))
raise_softirq(RCU_SOFTIRQ);
}
@@ -173,7 +179,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
*/
static void rcu_process_callbacks(struct softirq_action *unused)
{
- __rcu_process_callbacks(&rcu_ctrlblk);
+ __rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
}
@@ -187,7 +193,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
*
* Cool, huh? (Due to Josh Triplett.)
*
- * But we want to make this a static inline later.
+ * But we want to make this a static inline later. The cond_resched()
+ * currently makes this problematic.
*/
void synchronize_sched(void)
{
@@ -195,12 +202,6 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
-void synchronize_rcu_bh(void)
-{
- synchronize_sched();
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
-
/*
* Helper function for call_rcu() and call_rcu_bh().
*/
@@ -226,7 +227,7 @@ static void __call_rcu(struct rcu_head *head,
*/
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
- __call_rcu(head, func, &rcu_ctrlblk);
+ __call_rcu(head, func, &rcu_sched_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu);
@@ -280,3 +281,5 @@ void __init rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
+
+#include "rcutiny_plugin.h"
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
new file mode 100644
index 000000000000..d223a92bc742
--- /dev/null
+++ b/kernel/rcutiny_plugin.h
@@ -0,0 +1,39 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Internal non-public definitions that provide either classic
+ * or preemptable semantics.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2009
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+#include <linux/kernel_stat.h>
+
+/*
+ * During boot, we forgive RCU lockdep issues. After this function is
+ * invoked, we start taking RCU lockdep issues seriously.
+ */
+void rcu_scheduler_starting(void)
+{
+ WARN_ON(nr_context_switches() > 0);
+ rcu_scheduler_active = 1;
+}
+
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 58df55bf83ed..2b676f3a0f26 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -669,7 +669,7 @@ static struct rcu_torture_ops sched_expedited_ops = {
.sync = synchronize_sched_expedited,
.cb_barrier = NULL,
.fqs = rcu_sched_force_quiescent_state,
- .stats = rcu_expedited_torture_stats,
+ .stats = NULL,
.irq_capable = 1,
.name = "sched_expedited"
};
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 3ec8160fc75f..ba6996943e28 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,6 +46,7 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/time.h>
+#include <linux/kernel_stat.h>
#include "rcutree.h"
@@ -53,8 +54,8 @@
static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
-#define RCU_STATE_INITIALIZER(name) { \
- .level = { &name.node[0] }, \
+#define RCU_STATE_INITIALIZER(structname) { \
+ .level = { &structname.node[0] }, \
.levelcnt = { \
NUM_RCU_LVL_0, /* root of hierarchy. */ \
NUM_RCU_LVL_1, \
@@ -65,13 +66,14 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
.signaled = RCU_GP_IDLE, \
.gpnum = -300, \
.completed = -300, \
- .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \
+ .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \
.orphan_cbs_list = NULL, \
- .orphan_cbs_tail = &name.orphan_cbs_list, \
+ .orphan_cbs_tail = &structname.orphan_cbs_list, \
.orphan_qlen = 0, \
- .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \
+ .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \
.n_force_qs = 0, \
.n_force_qs_ngp = 0, \
+ .name = #structname, \
}
struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
@@ -80,6 +82,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+int rcu_scheduler_active __read_mostly;
+EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+
/*
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
* permit this function to be invoked without holding the root rcu_node
@@ -97,25 +102,32 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
*/
void rcu_sched_qs(int cpu)
{
- struct rcu_data *rdp;
+ struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
- rdp = &per_cpu(rcu_sched_data, cpu);
rdp->passed_quiesc_completed = rdp->gpnum - 1;
barrier();
rdp->passed_quiesc = 1;
- rcu_preempt_note_context_switch(cpu);
}
void rcu_bh_qs(int cpu)
{
- struct rcu_data *rdp;
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
- rdp = &per_cpu(rcu_bh_data, cpu);
rdp->passed_quiesc_completed = rdp->gpnum - 1;
barrier();
rdp->passed_quiesc = 1;
}
+/*
+ * Note a context switch. This is a quiescent state for RCU-sched,
+ * and requires special handling for preemptible RCU.
+ */
+void rcu_note_context_switch(int cpu)
+{
+ rcu_sched_qs(cpu);
+ rcu_preempt_note_context_switch(cpu);
+}
+
#ifdef CONFIG_NO_HZ
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = 1,
@@ -438,6 +450,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+int rcu_cpu_stall_panicking __read_mostly;
+
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
rsp->gp_start = jiffies;
@@ -470,7 +484,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
/* OK, time to rat on our buddy... */
- printk(KERN_ERR "INFO: RCU detected CPU stalls:");
+ printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
+ rsp->name);
rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irqsave(&rnp->lock, flags);
rcu_print_task_stall(rnp);
@@ -481,7 +496,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
if (rnp->qsmask & (1UL << cpu))
printk(" %d", rnp->grplo + cpu);
}
- printk(" (detected by %d, t=%ld jiffies)\n",
+ printk("} (detected by %d, t=%ld jiffies)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start));
trigger_all_cpu_backtrace();
@@ -497,8 +512,8 @@ static void print_cpu_stall(struct rcu_state *rsp)
unsigned long flags;
struct rcu_node *rnp = rcu_get_root(rsp);
- printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
- smp_processor_id(), jiffies - rsp->gp_start);
+ printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
+ rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
trigger_all_cpu_backtrace();
raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -515,6 +530,8 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
long delta;
struct rcu_node *rnp;
+ if (rcu_cpu_stall_panicking)
+ return;
delta = jiffies - rsp->jiffies_stall;
rnp = rdp->mynode;
if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
@@ -529,6 +546,21 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
}
}
+static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
+{
+ rcu_cpu_stall_panicking = 1;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rcu_panic_block = {
+ .notifier_call = rcu_panic,
+};
+
+static void __init check_cpu_stall_init(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
+}
+
#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
static void record_gp_stall_check_time(struct rcu_state *rsp)
@@ -539,6 +571,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
}
+static void __init check_cpu_stall_init(void)
+{
+}
+
#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/*
@@ -1125,8 +1161,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
*/
void rcu_check_callbacks(int cpu, int user)
{
- if (!rcu_pending(cpu))
- return; /* if nothing for RCU to do. */
if (user ||
(idle_cpu(cpu) && rcu_scheduler_active &&
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -1158,7 +1192,8 @@ void rcu_check_callbacks(int cpu, int user)
rcu_bh_qs(cpu);
}
rcu_preempt_check_callbacks(cpu);
- raise_softirq(RCU_SOFTIRQ);
+ if (rcu_pending(cpu))
+ raise_softirq(RCU_SOFTIRQ);
}
#ifdef CONFIG_SMP
@@ -1236,11 +1271,11 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
break; /* grace period idle or initializing, ignore. */
case RCU_SAVE_DYNTICK:
-
- raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
break; /* So gcc recognizes the dead code. */
+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
+
/* Record dyntick-idle state. */
force_qs_rnp(rsp, dyntick_save_progress_counter);
raw_spin_lock(&rnp->lock); /* irqs already disabled */
@@ -1498,8 +1533,20 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
check_cpu_stall(rsp, rdp);
/* Is the RCU core waiting for a quiescent state from this CPU? */
- if (rdp->qs_pending) {
+ if (rdp->qs_pending && !rdp->passed_quiesc) {
+
+ /*
+ * If force_quiescent_state() coming soon and this CPU
+ * needs a quiescent state, and this is either RCU-sched
+ * or RCU-bh, force a local reschedule.
+ */
rdp->n_rp_qs_pending++;
+ if (!rdp->preemptable &&
+ ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
+ jiffies))
+ set_need_resched();
+ } else if (rdp->qs_pending && rdp->passed_quiesc) {
+ rdp->n_rp_report_qs++;
return 1;
}
@@ -1767,6 +1814,21 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
}
/*
+ * This function is invoked towards the end of the scheduler's initialization
+ * process. Before this is called, the idle task might contain
+ * RCU read-side critical sections (during which time, this idle
+ * task is booting the system). After this function is called, the
+ * idle tasks are prohibited from containing RCU read-side critical
+ * sections. This function also enables RCU lockdep checking.
+ */
+void rcu_scheduler_starting(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+ WARN_ON(nr_context_switches() > 0);
+ rcu_scheduler_active = 1;
+}
+
+/*
* Compute the per-level fanout, either using the exact fanout specified
* or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
*/
@@ -1849,6 +1911,14 @@ static void __init rcu_init_one(struct rcu_state *rsp)
INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
}
}
+
+ rnp = rsp->level[NUM_RCU_LVLS - 1];
+ for_each_possible_cpu(i) {
+ while (i > rnp->grphi)
+ rnp++;
+ rsp->rda[i]->mynode = rnp;
+ rcu_boot_init_percpu_data(i, rsp);
+ }
}
/*
@@ -1859,19 +1929,11 @@ static void __init rcu_init_one(struct rcu_state *rsp)
#define RCU_INIT_FLAVOR(rsp, rcu_data) \
do { \
int i; \
- int j; \
- struct rcu_node *rnp; \
\
- rcu_init_one(rsp); \
- rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
- j = 0; \
for_each_possible_cpu(i) { \
- if (i > rnp[j].grphi) \
- j++; \
- per_cpu(rcu_data, i).mynode = &rnp[j]; \
(rsp)->rda[i] = &per_cpu(rcu_data, i); \
- rcu_boot_init_percpu_data(i, rsp); \
} \
+ rcu_init_one(rsp); \
} while (0)
void __init rcu_init(void)
@@ -1879,12 +1941,6 @@ void __init rcu_init(void)
int cpu;
rcu_bootup_announce();
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-#if NUM_RCU_LVL_4 != 0
- printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n");
-#endif /* #if NUM_RCU_LVL_4 != 0 */
RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
__rcu_init_preempt();
@@ -1898,6 +1954,7 @@ void __init rcu_init(void)
cpu_notifier(rcu_cpu_notify, 0);
for_each_online_cpu(cpu)
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+ check_cpu_stall_init();
}
#include "rcutree_plugin.h"
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 4a525a30e08e..14c040b18ed0 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -223,6 +223,7 @@ struct rcu_data {
/* 5) __rcu_pending() statistics. */
unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
unsigned long n_rp_qs_pending;
+ unsigned long n_rp_report_qs;
unsigned long n_rp_cb_ready;
unsigned long n_rp_cpu_needs_gp;
unsigned long n_rp_gp_completed;
@@ -326,6 +327,7 @@ struct rcu_state {
unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+ char *name; /* Name of structure. */
};
/* Return values for rcu_preempt_offline_tasks(). */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 79b53bda8943..ac7d80fa895c 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -26,6 +26,45 @@
#include <linux/delay.h>
+/*
+ * Check the RCU kernel configuration parameters and print informative
+ * messages about anything out of the ordinary. If you like #ifdef, you
+ * will love this function.
+ */
+static void __init rcu_bootup_announce_oddness(void)
+{
+#ifdef CONFIG_RCU_TRACE
+ printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
+#endif
+#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
+ printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
+ CONFIG_RCU_FANOUT);
+#endif
+#ifdef CONFIG_RCU_FANOUT_EXACT
+ printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
+#endif
+#ifdef CONFIG_RCU_FAST_NO_HZ
+ printk(KERN_INFO
+ "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
+#endif
+#ifdef CONFIG_PROVE_RCU
+ printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
+#endif
+#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
+ printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
+#endif
+#ifndef CONFIG_RCU_CPU_STALL_DETECTOR
+ printk(KERN_INFO
+ "\tRCU-based detection of stalled CPUs is disabled.\n");
+#endif
+#ifndef CONFIG_RCU_CPU_STALL_VERBOSE
+ printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
+#endif
+#if NUM_RCU_LVL_4 != 0
+ printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
+#endif
+}
+
#ifdef CONFIG_TREE_PREEMPT_RCU
struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
@@ -38,8 +77,8 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp);
*/
static void __init rcu_bootup_announce(void)
{
- printk(KERN_INFO
- "Experimental preemptable hierarchical RCU implementation.\n");
+ printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n");
+ rcu_bootup_announce_oddness();
}
/*
@@ -75,13 +114,19 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
* that this just means that the task currently running on the CPU is
* not in a quiescent state. There might be any number of tasks blocked
* while in an RCU read-side critical section.
+ *
+ * Unlike the other rcu_*_qs() functions, callers to this function
+ * must disable irqs in order to protect the assignment to
+ * ->rcu_read_unlock_special.
*/
static void rcu_preempt_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
+
rdp->passed_quiesc_completed = rdp->gpnum - 1;
barrier();
rdp->passed_quiesc = 1;
+ current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
}
/*
@@ -144,9 +189,8 @@ static void rcu_preempt_note_context_switch(int cpu)
* grace period, then the fact that the task has been enqueued
* means that we continue to block the current grace period.
*/
- rcu_preempt_qs(cpu);
local_irq_save(flags);
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+ rcu_preempt_qs(cpu);
local_irq_restore(flags);
}
@@ -236,7 +280,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
*/
special = t->rcu_read_unlock_special;
if (special & RCU_READ_UNLOCK_NEED_QS) {
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
rcu_preempt_qs(smp_processor_id());
}
@@ -473,7 +516,6 @@ static void rcu_preempt_check_callbacks(int cpu)
struct task_struct *t = current;
if (t->rcu_read_lock_nesting == 0) {
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
rcu_preempt_qs(cpu);
return;
}
@@ -754,6 +796,7 @@ void exit_rcu(void)
static void __init rcu_bootup_announce(void)
{
printk(KERN_INFO "Hierarchical RCU implementation.\n");
+ rcu_bootup_announce_oddness();
}
/*
@@ -1008,6 +1051,8 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
int rcu_needs_cpu(int cpu)
{
int c = 0;
+ int snap;
+ int snap_nmi;
int thatcpu;
/* Check for being in the holdoff period. */
@@ -1015,12 +1060,18 @@ int rcu_needs_cpu(int cpu)
return rcu_needs_cpu_quick_check(cpu);
/* Don't bother unless we are the last non-dyntick-idle CPU. */
- for_each_cpu_not(thatcpu, nohz_cpu_mask)
- if (thatcpu != cpu) {
+ for_each_online_cpu(thatcpu) {
+ if (thatcpu == cpu)
+ continue;
+ snap = per_cpu(rcu_dynticks, thatcpu).dynticks;
+ snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi;
+ smp_mb(); /* Order sampling of snap with end of grace period. */
+ if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) {
per_cpu(rcu_dyntick_drain, cpu) = 0;
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
return rcu_needs_cpu_quick_check(cpu);
}
+ }
/* Check and update the rcu_dyntick_drain sequencing. */
if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index d45db2e35d27..36c95b45738e 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -241,11 +241,13 @@ static const struct file_operations rcugp_fops = {
static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
{
seq_printf(m, "%3d%cnp=%ld "
- "qsp=%ld cbr=%ld cng=%ld gpc=%ld gps=%ld nf=%ld nn=%ld\n",
+ "qsp=%ld rpq=%ld cbr=%ld cng=%ld "
+ "gpc=%ld gps=%ld nf=%ld nn=%ld\n",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? '!' : ' ',
rdp->n_rcu_pending,
rdp->n_rp_qs_pending,
+ rdp->n_rp_report_qs,
rdp->n_rp_cb_ready,
rdp->n_rp_cpu_needs_gp,
rdp->n_rp_gp_completed,
diff --git a/kernel/resource.c b/kernel/resource.c
index 9c358e263534..7b36976e5dea 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/pfn.h>
@@ -681,6 +682,8 @@ resource_size_t resource_alignment(struct resource *res)
* release_region releases a matching busy region.
*/
+static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
+
/**
* __request_region - create a new busy resource region
* @parent: parent resource descriptor
@@ -693,6 +696,7 @@ struct resource * __request_region(struct resource *parent,
resource_size_t start, resource_size_t n,
const char *name, int flags)
{
+ DECLARE_WAITQUEUE(wait, current);
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@@ -717,7 +721,15 @@ struct resource * __request_region(struct resource *parent,
if (!(conflict->flags & IORESOURCE_BUSY))
continue;
}
-
+ if (conflict->flags & flags & IORESOURCE_MUXED) {
+ add_wait_queue(&muxed_resource_wait, &wait);
+ write_unlock(&resource_lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ remove_wait_queue(&muxed_resource_wait, &wait);
+ write_lock(&resource_lock);
+ continue;
+ }
/* Uhhuh, that didn't work out.. */
kfree(res);
res = NULL;
@@ -791,6 +803,8 @@ void __release_region(struct resource *parent, resource_size_t start,
break;
*p = res->sibling;
write_unlock(&resource_lock);
+ if (res->flags & IORESOURCE_MUXED)
+ wake_up(&muxed_resource_wait);
kfree(res);
return;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c2a54f70ffe..8d2f0508c481 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -55,9 +55,9 @@
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
-#include <linux/kthread.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/stop_machine.h>
#include <linux/sysctl.h>
#include <linux/syscalls.h>
#include <linux/times.h>
@@ -503,8 +503,11 @@ struct rq {
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
#ifdef CONFIG_NO_HZ
+ u64 nohz_stamp;
unsigned char in_nohz_recently;
#endif
+ unsigned int skip_clock_update;
+
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates;
@@ -546,15 +549,13 @@ struct rq {
int post_schedule;
int active_balance;
int push_cpu;
+ struct cpu_stop_work active_balance_work;
/* cpu of this runqueue: */
int cpu;
int online;
unsigned long avg_load_per_task;
- struct task_struct *migration_thread;
- struct list_head migration_queue;
-
u64 rt_avg;
u64 age_stamp;
u64 idle_stamp;
@@ -602,6 +603,13 @@ static inline
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+
+ /*
+ * A queue event has occurred, and we're going to schedule. In
+ * this case, we can save a useless back to back clock update.
+ */
+ if (test_tsk_need_resched(p))
+ rq->skip_clock_update = 1;
}
static inline int cpu_of(struct rq *rq)
@@ -636,7 +644,8 @@ static inline int cpu_of(struct rq *rq)
inline void update_rq_clock(struct rq *rq)
{
- rq->clock = sched_clock_cpu(cpu_of(rq));
+ if (!rq->skip_clock_update)
+ rq->clock = sched_clock_cpu(cpu_of(rq));
}
/*
@@ -914,16 +923,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/*
- * Check whether the task is waking, we use this to synchronize against
- * ttwu() so that task_cpu() reports a stable number.
- *
- * We need to make an exception for PF_STARTING tasks because the fork
- * path might require task_rq_lock() to work, eg. it can call
- * set_cpus_allowed_ptr() from the cpuset clone_ns code.
+ * Check whether the task is waking, we use this to synchronize ->cpus_allowed
+ * against ttwu().
*/
static inline int task_is_waking(struct task_struct *p)
{
- return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING));
+ return unlikely(p->state == TASK_WAKING);
}
/*
@@ -936,11 +941,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
struct rq *rq;
for (;;) {
- while (task_is_waking(p))
- cpu_relax();
rq = task_rq(p);
raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p) && !task_is_waking(p)))
+ if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock(&rq->lock);
}
@@ -957,12 +960,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
struct rq *rq;
for (;;) {
- while (task_is_waking(p))
- cpu_relax();
local_irq_save(*flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p) && !task_is_waking(p)))
+ if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
@@ -1239,6 +1240,17 @@ void wake_up_idle_cpu(int cpu)
if (!tsk_is_polling(rq->idle))
smp_send_reschedule(cpu);
}
+
+int nohz_ratelimit(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ u64 diff = rq->clock - rq->nohz_stamp;
+
+ rq->nohz_stamp = rq->clock;
+
+ return diff < (NSEC_PER_SEC / HZ) >> 1;
+}
+
#endif /* CONFIG_NO_HZ */
static u64 sched_avg_period(void)
@@ -1781,8 +1793,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
- update_rq_clock(rq1);
- update_rq_clock(rq2);
}
/*
@@ -1813,7 +1823,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
}
#endif
-static void calc_load_account_active(struct rq *this_rq);
+static void calc_load_account_idle(struct rq *this_rq);
static void update_sysctl(void);
static int get_update_sysctl_factor(void);
@@ -1870,62 +1880,43 @@ static void set_load_weight(struct task_struct *p)
p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
}
-static void update_avg(u64 *avg, u64 sample)
+static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
- s64 diff = sample - *avg;
- *avg += diff >> 3;
-}
-
-static void
-enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
-{
- if (wakeup)
- p->se.start_runtime = p->se.sum_exec_runtime;
-
+ update_rq_clock(rq);
sched_info_queued(p);
- p->sched_class->enqueue_task(rq, p, wakeup, head);
+ p->sched_class->enqueue_task(rq, p, flags);
p->se.on_rq = 1;
}
-static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
+static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
- if (sleep) {
- if (p->se.last_wakeup) {
- update_avg(&p->se.avg_overlap,
- p->se.sum_exec_runtime - p->se.last_wakeup);
- p->se.last_wakeup = 0;
- } else {
- update_avg(&p->se.avg_wakeup,
- sysctl_sched_wakeup_granularity);
- }
- }
-
+ update_rq_clock(rq);
sched_info_dequeued(p);
- p->sched_class->dequeue_task(rq, p, sleep);
+ p->sched_class->dequeue_task(rq, p, flags);
p->se.on_rq = 0;
}
/*
* activate_task - move a task to the runqueue.
*/
-static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
+static void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
- enqueue_task(rq, p, wakeup, false);
+ enqueue_task(rq, p, flags);
inc_nr_running(rq);
}
/*
* deactivate_task - remove a task from the runqueue.
*/
-static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
+static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
- dequeue_task(rq, p, sleep);
+ dequeue_task(rq, p, flags);
dec_nr_running(rq);
}
@@ -2054,21 +2045,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
__set_task_cpu(p, new_cpu);
}
-struct migration_req {
- struct list_head list;
-
+struct migration_arg {
struct task_struct *task;
int dest_cpu;
-
- struct completion done;
};
+static int migration_cpu_stop(void *data);
+
/*
* The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread.
*/
-static int
-migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
+static bool migrate_task(struct task_struct *p, int dest_cpu)
{
struct rq *rq = task_rq(p);
@@ -2076,58 +2064,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
* If the task is not on a runqueue (and not running), then
* the next wake-up will properly place the task.
*/
- if (!p->se.on_rq && !task_running(rq, p))
- return 0;
-
- init_completion(&req->done);
- req->task = p;
- req->dest_cpu = dest_cpu;
- list_add(&req->list, &rq->migration_queue);
-
- return 1;
-}
-
-/*
- * wait_task_context_switch - wait for a thread to complete at least one
- * context switch.
- *
- * @p must not be current.
- */
-void wait_task_context_switch(struct task_struct *p)
-{
- unsigned long nvcsw, nivcsw, flags;
- int running;
- struct rq *rq;
-
- nvcsw = p->nvcsw;
- nivcsw = p->nivcsw;
- for (;;) {
- /*
- * The runqueue is assigned before the actual context
- * switch. We need to take the runqueue lock.
- *
- * We could check initially without the lock but it is
- * very likely that we need to take the lock in every
- * iteration.
- */
- rq = task_rq_lock(p, &flags);
- running = task_running(rq, p);
- task_rq_unlock(rq, &flags);
-
- if (likely(!running))
- break;
- /*
- * The switch count is incremented before the actual
- * context switch. We thus wait for two switches to be
- * sure at least one completed.
- */
- if ((p->nvcsw - nvcsw) > 1)
- break;
- if ((p->nivcsw - nivcsw) > 1)
- break;
-
- cpu_relax();
- }
+ return p->se.on_rq || task_running(rq, p);
}
/*
@@ -2185,7 +2122,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* just go back and repeat.
*/
rq = task_rq_lock(p, &flags);
- trace_sched_wait_task(rq, p);
+ trace_sched_wait_task(p);
running = task_running(rq, p);
on_rq = p->se.on_rq;
ncsw = 0;
@@ -2283,6 +2220,9 @@ void task_oncpu_function_call(struct task_struct *p,
}
#ifdef CONFIG_SMP
+/*
+ * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
+ */
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int dest_cpu;
@@ -2299,12 +2239,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
return dest_cpu;
/* No more Mr. Nice Guy. */
- if (dest_cpu >= nr_cpu_ids) {
- rcu_read_lock();
- cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
- rcu_read_unlock();
- dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
-
+ if (unlikely(dest_cpu >= nr_cpu_ids)) {
+ dest_cpu = cpuset_cpus_allowed_fallback(p);
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
@@ -2321,17 +2257,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
}
/*
- * Gets called from 3 sites (exec, fork, wakeup), since it is called without
- * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
- * by:
- *
- * exec: is unstable, retry loop
- * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
+ * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
*/
static inline
-int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
+int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
{
- int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+ int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
@@ -2349,6 +2280,12 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
return cpu;
}
+
+static void update_avg(u64 *avg, u64 sample)
+{
+ s64 diff = sample - *avg;
+ *avg += diff >> 3;
+}
#endif
/***
@@ -2370,16 +2307,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
+ unsigned long en_flags = ENQUEUE_WAKEUP;
struct rq *rq;
- if (!sched_feat(SYNC_WAKEUPS))
- wake_flags &= ~WF_SYNC;
-
this_cpu = get_cpu();
smp_wmb();
rq = task_rq_lock(p, &flags);
- update_rq_clock(rq);
if (!(p->state & state))
goto out;
@@ -2399,28 +2333,26 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
*
* First fix up the nr_uninterruptible count:
*/
- if (task_contributes_to_load(p))
- rq->nr_uninterruptible--;
+ if (task_contributes_to_load(p)) {
+ if (likely(cpu_online(orig_cpu)))
+ rq->nr_uninterruptible--;
+ else
+ this_rq()->nr_uninterruptible--;
+ }
p->state = TASK_WAKING;
- if (p->sched_class->task_waking)
+ if (p->sched_class->task_waking) {
p->sched_class->task_waking(rq, p);
+ en_flags |= ENQUEUE_WAKING;
+ }
- __task_rq_unlock(rq);
-
- cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
- if (cpu != orig_cpu) {
- /*
- * Since we migrate the task without holding any rq->lock,
- * we need to be careful with task_rq_lock(), since that
- * might end up locking an invalid rq.
- */
+ cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
+ if (cpu != orig_cpu)
set_task_cpu(p, cpu);
- }
+ __task_rq_unlock(rq);
rq = cpu_rq(cpu);
raw_spin_lock(&rq->lock);
- update_rq_clock(rq);
/*
* We migrated the task without holding either rq->lock, however
@@ -2448,36 +2380,20 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
out_activate:
#endif /* CONFIG_SMP */
- schedstat_inc(p, se.nr_wakeups);
+ schedstat_inc(p, se.statistics.nr_wakeups);
if (wake_flags & WF_SYNC)
- schedstat_inc(p, se.nr_wakeups_sync);
+ schedstat_inc(p, se.statistics.nr_wakeups_sync);
if (orig_cpu != cpu)
- schedstat_inc(p, se.nr_wakeups_migrate);
+ schedstat_inc(p, se.statistics.nr_wakeups_migrate);
if (cpu == this_cpu)
- schedstat_inc(p, se.nr_wakeups_local);
+ schedstat_inc(p, se.statistics.nr_wakeups_local);
else
- schedstat_inc(p, se.nr_wakeups_remote);
- activate_task(rq, p, 1);
+ schedstat_inc(p, se.statistics.nr_wakeups_remote);
+ activate_task(rq, p, en_flags);
success = 1;
- /*
- * Only attribute actual wakeups done by this task.
- */
- if (!in_interrupt()) {
- struct sched_entity *se = &current->se;
- u64 sample = se->sum_exec_runtime;
-
- if (se->last_wakeup)
- sample -= se->last_wakeup;
- else
- sample -= se->start_runtime;
- update_avg(&se->avg_wakeup, sample);
-
- se->last_wakeup = se->sum_exec_runtime;
- }
-
out_running:
- trace_sched_wakeup(rq, p, success);
+ trace_sched_wakeup(p, success);
check_preempt_curr(rq, p, wake_flags);
p->state = TASK_RUNNING;
@@ -2537,42 +2453,9 @@ static void __sched_fork(struct task_struct *p)
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
- p->se.last_wakeup = 0;
- p->se.avg_overlap = 0;
- p->se.start_runtime = 0;
- p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
#ifdef CONFIG_SCHEDSTATS
- p->se.wait_start = 0;
- p->se.wait_max = 0;
- p->se.wait_count = 0;
- p->se.wait_sum = 0;
-
- p->se.sleep_start = 0;
- p->se.sleep_max = 0;
- p->se.sum_sleep_runtime = 0;
-
- p->se.block_start = 0;
- p->se.block_max = 0;
- p->se.exec_max = 0;
- p->se.slice_max = 0;
-
- p->se.nr_migrations_cold = 0;
- p->se.nr_failed_migrations_affine = 0;
- p->se.nr_failed_migrations_running = 0;
- p->se.nr_failed_migrations_hot = 0;
- p->se.nr_forced_migrations = 0;
-
- p->se.nr_wakeups = 0;
- p->se.nr_wakeups_sync = 0;
- p->se.nr_wakeups_migrate = 0;
- p->se.nr_wakeups_local = 0;
- p->se.nr_wakeups_remote = 0;
- p->se.nr_wakeups_affine = 0;
- p->se.nr_wakeups_affine_attempts = 0;
- p->se.nr_wakeups_passive = 0;
- p->se.nr_wakeups_idle = 0;
-
+ memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
INIT_LIST_HEAD(&p->rt.run_list);
@@ -2593,11 +2476,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
__sched_fork(p);
/*
- * We mark the process as waking here. This guarantees that
+ * We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
- p->state = TASK_WAKING;
+ p->state = TASK_RUNNING;
/*
* Revert to default priority/policy on fork if requested.
@@ -2664,31 +2547,27 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
int cpu __maybe_unused = get_cpu();
#ifdef CONFIG_SMP
+ rq = task_rq_lock(p, &flags);
+ p->state = TASK_WAKING;
+
/*
* Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
*
- * We still have TASK_WAKING but PF_STARTING is gone now, meaning
- * ->cpus_allowed is stable, we have preemption disabled, meaning
- * cpu_online_mask is stable.
+ * We set TASK_WAKING so that select_task_rq() can drop rq->lock
+ * without people poking at ->cpus_allowed.
*/
- cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
+ cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
set_task_cpu(p, cpu);
-#endif
- /*
- * Since the task is not on the rq and we still have TASK_WAKING set
- * nobody else will migrate this task.
- */
- rq = cpu_rq(cpu);
- raw_spin_lock_irqsave(&rq->lock, flags);
-
- BUG_ON(p->state != TASK_WAKING);
p->state = TASK_RUNNING;
- update_rq_clock(rq);
+ task_rq_unlock(rq, &flags);
+#endif
+
+ rq = task_rq_lock(p, &flags);
activate_task(rq, p, 0);
- trace_sched_wakeup_new(rq, p, 1);
+ trace_sched_wakeup_new(p, 1);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
@@ -2908,7 +2787,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next);
- trace_sched_switch(rq, prev, next);
+ trace_sched_switch(prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
@@ -3025,6 +2904,61 @@ static unsigned long calc_load_update;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun);
+static long calc_load_fold_active(struct rq *this_rq)
+{
+ long nr_active, delta = 0;
+
+ nr_active = this_rq->nr_running;
+ nr_active += (long) this_rq->nr_uninterruptible;
+
+ if (nr_active != this_rq->calc_load_active) {
+ delta = nr_active - this_rq->calc_load_active;
+ this_rq->calc_load_active = nr_active;
+ }
+
+ return delta;
+}
+
+#ifdef CONFIG_NO_HZ
+/*
+ * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ *
+ * When making the ILB scale, we should try to pull this in as well.
+ */
+static atomic_long_t calc_load_tasks_idle;
+
+static void calc_load_account_idle(struct rq *this_rq)
+{
+ long delta;
+
+ delta = calc_load_fold_active(this_rq);
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks_idle);
+}
+
+static long calc_load_fold_idle(void)
+{
+ long delta = 0;
+
+ /*
+ * Its got a race, we don't care...
+ */
+ if (atomic_long_read(&calc_load_tasks_idle))
+ delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+
+ return delta;
+}
+#else
+static void calc_load_account_idle(struct rq *this_rq)
+{
+}
+
+static inline long calc_load_fold_idle(void)
+{
+ return 0;
+}
+#endif
+
/**
* get_avenrun - get the load average array
* @loads: pointer to dest load array
@@ -3071,20 +3005,22 @@ void calc_global_load(void)
}
/*
- * Either called from update_cpu_load() or from a cpu going idle
+ * Called from update_cpu_load() to periodically update this CPU's
+ * active count.
*/
static void calc_load_account_active(struct rq *this_rq)
{
- long nr_active, delta;
+ long delta;
- nr_active = this_rq->nr_running;
- nr_active += (long) this_rq->nr_uninterruptible;
+ if (time_before(jiffies, this_rq->calc_load_update))
+ return;
- if (nr_active != this_rq->calc_load_active) {
- delta = nr_active - this_rq->calc_load_active;
- this_rq->calc_load_active = nr_active;
+ delta = calc_load_fold_active(this_rq);
+ delta += calc_load_fold_idle();
+ if (delta)
atomic_long_add(delta, &calc_load_tasks);
- }
+
+ this_rq->calc_load_update += LOAD_FREQ;
}
/*
@@ -3116,10 +3052,7 @@ static void update_cpu_load(struct rq *this_rq)
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}
- if (time_after_eq(jiffies, this_rq->calc_load_update)) {
- this_rq->calc_load_update += LOAD_FREQ;
- calc_load_account_active(this_rq);
- }
+ calc_load_account_active(this_rq);
}
#ifdef CONFIG_SMP
@@ -3131,44 +3064,27 @@ static void update_cpu_load(struct rq *this_rq)
void sched_exec(void)
{
struct task_struct *p = current;
- struct migration_req req;
- int dest_cpu, this_cpu;
unsigned long flags;
struct rq *rq;
-
-again:
- this_cpu = get_cpu();
- dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
- if (dest_cpu == this_cpu) {
- put_cpu();
- return;
- }
+ int dest_cpu;
rq = task_rq_lock(p, &flags);
- put_cpu();
+ dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
+ if (dest_cpu == smp_processor_id())
+ goto unlock;
/*
* select_task_rq() can race against ->cpus_allowed
*/
- if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
- || unlikely(!cpu_active(dest_cpu))) {
- task_rq_unlock(rq, &flags);
- goto again;
- }
-
- /* force the process onto the specified CPU */
- if (migrate_task(p, dest_cpu, &req)) {
- /* Need to wait for migration thread (might exit: take ref). */
- struct task_struct *mt = rq->migration_thread;
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
+ likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
+ struct migration_arg arg = { p, dest_cpu };
- get_task_struct(mt);
task_rq_unlock(rq, &flags);
- wake_up_process(mt);
- put_task_struct(mt);
- wait_for_completion(&req.done);
-
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
return;
}
+unlock:
task_rq_unlock(rq, &flags);
}
@@ -3640,23 +3556,9 @@ static inline void schedule_debug(struct task_struct *prev)
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
- if (prev->state == TASK_RUNNING) {
- u64 runtime = prev->se.sum_exec_runtime;
-
- runtime -= prev->se.prev_sum_exec_runtime;
- runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
-
- /*
- * In order to avoid avg_overlap growing stale when we are
- * indeed overlapping and hence not getting put to sleep, grow
- * the avg_overlap on preemption.
- *
- * We use the average preemption runtime because that
- * correlates to the amount of cache footprint a task can
- * build up.
- */
- update_avg(&prev->se.avg_overlap, runtime);
- }
+ if (prev->se.on_rq)
+ update_rq_clock(rq);
+ rq->skip_clock_update = 0;
prev->sched_class->put_prev_task(rq, prev);
}
@@ -3706,7 +3608,7 @@ need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
- rcu_sched_qs(cpu);
+ rcu_note_context_switch(cpu);
prev = rq->curr;
switch_count = &prev->nivcsw;
@@ -3719,14 +3621,13 @@ need_resched_nonpreemptible:
hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock);
- update_rq_clock(rq);
clear_tsk_need_resched(prev);
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev)))
prev->state = TASK_RUNNING;
else
- deactivate_task(rq, prev, 1);
+ deactivate_task(rq, prev, DEQUEUE_SLEEP);
switch_count = &prev->nvcsw;
}
@@ -4276,7 +4177,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
BUG_ON(prio < 0 || prio > MAX_PRIO);
rq = task_rq_lock(p, &flags);
- update_rq_clock(rq);
oldprio = p->prio;
prev_class = p->sched_class;
@@ -4297,7 +4197,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq) {
- enqueue_task(rq, p, 0, oldprio < prio);
+ enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
check_class_changed(rq, p, prev_class, oldprio, running);
}
@@ -4319,7 +4219,6 @@ void set_user_nice(struct task_struct *p, long nice)
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
- update_rq_clock(rq);
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
@@ -4341,7 +4240,7 @@ void set_user_nice(struct task_struct *p, long nice)
delta = p->prio - old_prio;
if (on_rq) {
- enqueue_task(rq, p, 0, false);
+ enqueue_task(rq, p, 0);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -4602,7 +4501,6 @@ recheck:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
- update_rq_clock(rq);
on_rq = p->se.on_rq;
running = task_current(rq, p);
if (on_rq)
@@ -5339,17 +5237,15 @@ static inline void sched_init_granularity(void)
/*
* This is how migration works:
*
- * 1) we queue a struct migration_req structure in the source CPU's
- * runqueue and wake up that CPU's migration thread.
- * 2) we down() the locked semaphore => thread blocks.
- * 3) migration thread wakes up (implicitly it forces the migrated
- * thread off the CPU)
- * 4) it gets the migration request and checks whether the migrated
- * task is still in the wrong runqueue.
- * 5) if it's in the wrong runqueue then the migration thread removes
+ * 1) we invoke migration_cpu_stop() on the target CPU using
+ * stop_one_cpu().
+ * 2) stopper starts to run (implicitly forcing the migrated thread
+ * off the CPU)
+ * 3) it checks whether the migrated task is still in the wrong runqueue.
+ * 4) if it's in the wrong runqueue then the migration thread removes
* it and puts it into the right queue.
- * 6) migration thread up()s the semaphore.
- * 7) we wake up and the migration is done.
+ * 5) stopper completes and stop_one_cpu() returns and the migration
+ * is done.
*/
/*
@@ -5363,12 +5259,23 @@ static inline void sched_init_granularity(void)
*/
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
- struct migration_req req;
unsigned long flags;
struct rq *rq;
+ unsigned int dest_cpu;
int ret = 0;
+ /*
+ * Serialize against TASK_WAKING so that ttwu() and wunt() can
+ * drop the rq->lock and still rely on ->cpus_allowed.
+ */
+again:
+ while (task_is_waking(p))
+ cpu_relax();
rq = task_rq_lock(p, &flags);
+ if (task_is_waking(p)) {
+ task_rq_unlock(rq, &flags);
+ goto again;
+ }
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
@@ -5392,15 +5299,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
- if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+ if (migrate_task(p, dest_cpu)) {
+ struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
- struct task_struct *mt = rq->migration_thread;
-
- get_task_struct(mt);
task_rq_unlock(rq, &flags);
- wake_up_process(mt);
- put_task_struct(mt);
- wait_for_completion(&req.done);
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
}
@@ -5458,98 +5362,49 @@ fail:
return ret;
}
-#define RCU_MIGRATION_IDLE 0
-#define RCU_MIGRATION_NEED_QS 1
-#define RCU_MIGRATION_GOT_QS 2
-#define RCU_MIGRATION_MUST_SYNC 3
-
/*
- * migration_thread - this is a highprio system thread that performs
- * thread migration by bumping thread off CPU then 'pushing' onto
- * another runqueue.
+ * migration_cpu_stop - this will be executed by a highprio stopper thread
+ * and performs thread migration by bumping thread off CPU then
+ * 'pushing' onto another runqueue.
*/
-static int migration_thread(void *data)
-{
- int badcpu;
- int cpu = (long)data;
- struct rq *rq;
-
- rq = cpu_rq(cpu);
- BUG_ON(rq->migration_thread != current);
-
- set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
- struct migration_req *req;
- struct list_head *head;
-
- raw_spin_lock_irq(&rq->lock);
-
- if (cpu_is_offline(cpu)) {
- raw_spin_unlock_irq(&rq->lock);
- break;
- }
-
- if (rq->active_balance) {
- active_load_balance(rq, cpu);
- rq->active_balance = 0;
- }
-
- head = &rq->migration_queue;
-
- if (list_empty(head)) {
- raw_spin_unlock_irq(&rq->lock);
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- continue;
- }
- req = list_entry(head->next, struct migration_req, list);
- list_del_init(head->next);
-
- if (req->task != NULL) {
- raw_spin_unlock(&rq->lock);
- __migrate_task(req->task, cpu, req->dest_cpu);
- } else if (likely(cpu == (badcpu = smp_processor_id()))) {
- req->dest_cpu = RCU_MIGRATION_GOT_QS;
- raw_spin_unlock(&rq->lock);
- } else {
- req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
- raw_spin_unlock(&rq->lock);
- WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
- }
- local_irq_enable();
-
- complete(&req->done);
- }
- __set_current_state(TASK_RUNNING);
-
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
+static int migration_cpu_stop(void *data)
{
- int ret;
+ struct migration_arg *arg = data;
+ /*
+ * The original target cpu might have gone down and we might
+ * be on another cpu but it doesn't matter.
+ */
local_irq_disable();
- ret = __migrate_task(p, src_cpu, dest_cpu);
+ __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
local_irq_enable();
- return ret;
+ return 0;
}
+#ifdef CONFIG_HOTPLUG_CPU
/*
* Figure out where task on dead CPU should go, use force if necessary.
*/
-static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
+void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{
- int dest_cpu;
+ struct rq *rq = cpu_rq(dead_cpu);
+ int needs_cpu, uninitialized_var(dest_cpu);
+ unsigned long flags;
-again:
- dest_cpu = select_fallback_rq(dead_cpu, p);
+ local_irq_save(flags);
- /* It can have affinity changed while we were choosing. */
- if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
- goto again;
+ raw_spin_lock(&rq->lock);
+ needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
+ if (needs_cpu)
+ dest_cpu = select_fallback_rq(dead_cpu, p);
+ raw_spin_unlock(&rq->lock);
+ /*
+ * It can only fail if we race with set_cpus_allowed(),
+ * in the racer should migrate the task anyway.
+ */
+ if (needs_cpu)
+ __migrate_task(p, dead_cpu, dest_cpu);
+ local_irq_restore(flags);
}
/*
@@ -5613,7 +5468,6 @@ void sched_idle_next(void)
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
- update_rq_clock(rq);
activate_task(rq, p, 0);
raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -5668,7 +5522,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
for ( ; ; ) {
if (!rq->nr_running)
break;
- update_rq_clock(rq);
next = pick_next_task(rq);
if (!next)
break;
@@ -5891,35 +5744,20 @@ static void set_rq_offline(struct rq *rq)
static int __cpuinit
migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
- struct task_struct *p;
int cpu = (long)hcpu;
unsigned long flags;
- struct rq *rq;
+ struct rq *rq = cpu_rq(cpu);
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
- if (IS_ERR(p))
- return NOTIFY_BAD;
- kthread_bind(p, cpu);
- /* Must be high prio: stop_machine expects to yield to it. */
- rq = task_rq_lock(p, &flags);
- __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
- task_rq_unlock(rq, &flags);
- get_task_struct(p);
- cpu_rq(cpu)->migration_thread = p;
rq->calc_load_update = calc_load_update;
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- /* Strictly unnecessary, as first user will wake it. */
- wake_up_process(cpu_rq(cpu)->migration_thread);
-
/* Update our root-domain */
- rq = cpu_rq(cpu);
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -5930,61 +5768,24 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
#ifdef CONFIG_HOTPLUG_CPU
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- if (!cpu_rq(cpu)->migration_thread)
- break;
- /* Unbind it from offline cpu so it can run. Fall thru. */
- kthread_bind(cpu_rq(cpu)->migration_thread,
- cpumask_any(cpu_online_mask));
- kthread_stop(cpu_rq(cpu)->migration_thread);
- put_task_struct(cpu_rq(cpu)->migration_thread);
- cpu_rq(cpu)->migration_thread = NULL;
- break;
-
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
migrate_live_tasks(cpu);
- rq = cpu_rq(cpu);
- kthread_stop(rq->migration_thread);
- put_task_struct(rq->migration_thread);
- rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
raw_spin_lock_irq(&rq->lock);
- update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
raw_spin_unlock_irq(&rq->lock);
- cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
calc_global_load_remove(rq);
- /*
- * No need to migrate the tasks: it was best-effort if
- * they didn't take sched_hotcpu_mutex. Just wake up
- * the requestors.
- */
- raw_spin_lock_irq(&rq->lock);
- while (!list_empty(&rq->migration_queue)) {
- struct migration_req *req;
-
- req = list_entry(rq->migration_queue.next,
- struct migration_req, list);
- list_del_init(&req->list);
- raw_spin_unlock_irq(&rq->lock);
- complete(&req->done);
- raw_spin_lock_irq(&rq->lock);
- }
- raw_spin_unlock_irq(&rq->lock);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
- rq = cpu_rq(cpu);
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -6315,6 +6116,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
+ for (tmp = sd; tmp; tmp = tmp->parent)
+ tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
+
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
struct sched_domain *parent = tmp->parent;
@@ -7798,10 +7602,8 @@ void __init sched_init(void)
rq->push_cpu = 0;
rq->cpu = i;
rq->online = 0;
- rq->migration_thread = NULL;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
- INIT_LIST_HEAD(&rq->migration_queue);
rq_attach_root(rq, &def_root_domain);
#endif
init_rq_hrtick(rq);
@@ -7902,7 +7704,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
{
int on_rq;
- update_rq_clock(rq);
on_rq = p->se.on_rq;
if (on_rq)
deactivate_task(rq, p, 0);
@@ -7929,9 +7730,9 @@ void normalize_rt_tasks(void)
p->se.exec_start = 0;
#ifdef CONFIG_SCHEDSTATS
- p->se.wait_start = 0;
- p->se.sleep_start = 0;
- p->se.block_start = 0;
+ p->se.statistics.wait_start = 0;
+ p->se.statistics.sleep_start = 0;
+ p->se.statistics.block_start = 0;
#endif
if (!rt_task(p)) {
@@ -7958,9 +7759,9 @@ void normalize_rt_tasks(void)
#endif /* CONFIG_MAGIC_SYSRQ */
-#ifdef CONFIG_IA64
+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
/*
- * These functions are only useful for the IA64 MCA handling.
+ * These functions are only useful for the IA64 MCA handling, or kdb.
*
* They can only be called when the whole system has been
* stopped - every CPU needs to be quiescent, and no scheduling
@@ -7980,6 +7781,9 @@ struct task_struct *curr_task(int cpu)
return cpu_curr(cpu);
}
+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
+
+#ifdef CONFIG_IA64
/**
* set_curr_task - set the current task for a given cpu.
* @cpu: the processor in question.
@@ -8264,8 +8068,6 @@ void sched_move_task(struct task_struct *tsk)
rq = task_rq_lock(tsk, &flags);
- update_rq_clock(rq);
-
running = task_current(rq, tsk);
on_rq = tsk->se.on_rq;
@@ -8284,7 +8086,7 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
if (on_rq)
- enqueue_task(rq, tsk, 0, false);
+ enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, &flags);
}
@@ -9098,43 +8900,32 @@ struct cgroup_subsys cpuacct_subsys = {
#ifndef CONFIG_SMP
-int rcu_expedited_torture_stats(char *page)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
-
void synchronize_sched_expedited(void)
{
+ barrier();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#else /* #ifndef CONFIG_SMP */
-static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
-static DEFINE_MUTEX(rcu_sched_expedited_mutex);
+static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
-#define RCU_EXPEDITED_STATE_POST -2
-#define RCU_EXPEDITED_STATE_IDLE -1
-
-static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
-
-int rcu_expedited_torture_stats(char *page)
+static int synchronize_sched_expedited_cpu_stop(void *data)
{
- int cnt = 0;
- int cpu;
-
- cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
- for_each_online_cpu(cpu) {
- cnt += sprintf(&page[cnt], " %d:%d",
- cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
- }
- cnt += sprintf(&page[cnt], "\n");
- return cnt;
+ /*
+ * There must be a full memory barrier on each affected CPU
+ * between the time that try_stop_cpus() is called and the
+ * time that it returns.
+ *
+ * In the current initial implementation of cpu_stop, the
+ * above condition is already met when the control reaches
+ * this point and the following smp_mb() is not strictly
+ * necessary. Do smp_mb() anyway for documentation and
+ * robustness against future implementation changes.
+ */
+ smp_mb(); /* See above comment block. */
+ return 0;
}
-EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
-
-static long synchronize_sched_expedited_count;
/*
* Wait for an rcu-sched grace period to elapse, but use "big hammer"
@@ -9148,18 +8939,14 @@ static long synchronize_sched_expedited_count;
*/
void synchronize_sched_expedited(void)
{
- int cpu;
- unsigned long flags;
- bool need_full_sync = 0;
- struct rq *rq;
- struct migration_req *req;
- long snap;
- int trycount = 0;
+ int snap, trycount = 0;
smp_mb(); /* ensure prior mod happens before capturing snap. */
- snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
+ snap = atomic_read(&synchronize_sched_expedited_count) + 1;
get_online_cpus();
- while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
+ while (try_stop_cpus(cpu_online_mask,
+ synchronize_sched_expedited_cpu_stop,
+ NULL) == -EAGAIN) {
put_online_cpus();
if (trycount++ < 10)
udelay(trycount * num_online_cpus());
@@ -9167,41 +8954,15 @@ void synchronize_sched_expedited(void)
synchronize_sched();
return;
}
- if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
+ if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
smp_mb(); /* ensure test happens before caller kfree */
return;
}
get_online_cpus();
}
- rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
- for_each_online_cpu(cpu) {
- rq = cpu_rq(cpu);
- req = &per_cpu(rcu_migration_req, cpu);
- init_completion(&req->done);
- req->task = NULL;
- req->dest_cpu = RCU_MIGRATION_NEED_QS;
- raw_spin_lock_irqsave(&rq->lock, flags);
- list_add(&req->list, &rq->migration_queue);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- wake_up_process(rq->migration_thread);
- }
- for_each_online_cpu(cpu) {
- rcu_expedited_state = cpu;
- req = &per_cpu(rcu_migration_req, cpu);
- rq = cpu_rq(cpu);
- wait_for_completion(&req->done);
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
- need_full_sync = 1;
- req->dest_cpu = RCU_MIGRATION_IDLE;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
- rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
- synchronize_sched_expedited_count++;
- mutex_unlock(&rcu_sched_expedited_mutex);
+ atomic_inc(&synchronize_sched_expedited_count);
+ smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
put_online_cpus();
- if (need_full_sync)
- synchronize_sched();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 5b496132c28a..906a0f718cb3 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -41,6 +41,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
return (unsigned long long)(jiffies - INITIAL_JIFFIES)
* (NSEC_PER_SEC / HZ);
}
+EXPORT_SYMBOL_GPL(sched_clock);
static __read_mostly int sched_clock_running;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 19be00ba6123..87a330a7185f 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -70,16 +70,16 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu,
PN(se->vruntime);
PN(se->sum_exec_runtime);
#ifdef CONFIG_SCHEDSTATS
- PN(se->wait_start);
- PN(se->sleep_start);
- PN(se->block_start);
- PN(se->sleep_max);
- PN(se->block_max);
- PN(se->exec_max);
- PN(se->slice_max);
- PN(se->wait_max);
- PN(se->wait_sum);
- P(se->wait_count);
+ PN(se->statistics.wait_start);
+ PN(se->statistics.sleep_start);
+ PN(se->statistics.block_start);
+ PN(se->statistics.sleep_max);
+ PN(se->statistics.block_max);
+ PN(se->statistics.exec_max);
+ PN(se->statistics.slice_max);
+ PN(se->statistics.wait_max);
+ PN(se->statistics.wait_sum);
+ P(se->statistics.wait_count);
#endif
P(se->load.weight);
#undef PN
@@ -104,7 +104,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
SPLIT_NS(p->se.vruntime),
SPLIT_NS(p->se.sum_exec_runtime),
- SPLIT_NS(p->se.sum_sleep_runtime));
+ SPLIT_NS(p->se.statistics.sum_sleep_runtime));
#else
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
@@ -175,11 +175,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
task_group_path(tg, path, sizeof(path));
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
-#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
- {
- uid_t uid = cfs_rq->tg->uid;
- SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
- }
#else
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
#endif
@@ -409,40 +404,38 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.exec_start);
PN(se.vruntime);
PN(se.sum_exec_runtime);
- PN(se.avg_overlap);
- PN(se.avg_wakeup);
nr_switches = p->nvcsw + p->nivcsw;
#ifdef CONFIG_SCHEDSTATS
- PN(se.wait_start);
- PN(se.sleep_start);
- PN(se.block_start);
- PN(se.sleep_max);
- PN(se.block_max);
- PN(se.exec_max);
- PN(se.slice_max);
- PN(se.wait_max);
- PN(se.wait_sum);
- P(se.wait_count);
- PN(se.iowait_sum);
- P(se.iowait_count);
+ PN(se.statistics.wait_start);
+ PN(se.statistics.sleep_start);
+ PN(se.statistics.block_start);
+ PN(se.statistics.sleep_max);
+ PN(se.statistics.block_max);
+ PN(se.statistics.exec_max);
+ PN(se.statistics.slice_max);
+ PN(se.statistics.wait_max);
+ PN(se.statistics.wait_sum);
+ P(se.statistics.wait_count);
+ PN(se.statistics.iowait_sum);
+ P(se.statistics.iowait_count);
P(sched_info.bkl_count);
P(se.nr_migrations);
- P(se.nr_migrations_cold);
- P(se.nr_failed_migrations_affine);
- P(se.nr_failed_migrations_running);
- P(se.nr_failed_migrations_hot);
- P(se.nr_forced_migrations);
- P(se.nr_wakeups);
- P(se.nr_wakeups_sync);
- P(se.nr_wakeups_migrate);
- P(se.nr_wakeups_local);
- P(se.nr_wakeups_remote);
- P(se.nr_wakeups_affine);
- P(se.nr_wakeups_affine_attempts);
- P(se.nr_wakeups_passive);
- P(se.nr_wakeups_idle);
+ P(se.statistics.nr_migrations_cold);
+ P(se.statistics.nr_failed_migrations_affine);
+ P(se.statistics.nr_failed_migrations_running);
+ P(se.statistics.nr_failed_migrations_hot);
+ P(se.statistics.nr_forced_migrations);
+ P(se.statistics.nr_wakeups);
+ P(se.statistics.nr_wakeups_sync);
+ P(se.statistics.nr_wakeups_migrate);
+ P(se.statistics.nr_wakeups_local);
+ P(se.statistics.nr_wakeups_remote);
+ P(se.statistics.nr_wakeups_affine);
+ P(se.statistics.nr_wakeups_affine_attempts);
+ P(se.statistics.nr_wakeups_passive);
+ P(se.statistics.nr_wakeups_idle);
{
u64 avg_atom, avg_per_cpu;
@@ -493,31 +486,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
void proc_sched_set_task(struct task_struct *p)
{
#ifdef CONFIG_SCHEDSTATS
- p->se.wait_max = 0;
- p->se.wait_sum = 0;
- p->se.wait_count = 0;
- p->se.iowait_sum = 0;
- p->se.iowait_count = 0;
- p->se.sleep_max = 0;
- p->se.sum_sleep_runtime = 0;
- p->se.block_max = 0;
- p->se.exec_max = 0;
- p->se.slice_max = 0;
- p->se.nr_migrations = 0;
- p->se.nr_migrations_cold = 0;
- p->se.nr_failed_migrations_affine = 0;
- p->se.nr_failed_migrations_running = 0;
- p->se.nr_failed_migrations_hot = 0;
- p->se.nr_forced_migrations = 0;
- p->se.nr_wakeups = 0;
- p->se.nr_wakeups_sync = 0;
- p->se.nr_wakeups_migrate = 0;
- p->se.nr_wakeups_local = 0;
- p->se.nr_wakeups_remote = 0;
- p->se.nr_wakeups_affine = 0;
- p->se.nr_wakeups_affine_attempts = 0;
- p->se.nr_wakeups_passive = 0;
- p->se.nr_wakeups_idle = 0;
- p->sched_info.bkl_count = 0;
+ memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5a5ea2cd924f..217e4a9393e4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -35,8 +35,8 @@
* (to see the precise effective timeslice length of your workload,
* run vmstat and monitor the context-switches (cs) field)
*/
-unsigned int sysctl_sched_latency = 5000000ULL;
-unsigned int normalized_sysctl_sched_latency = 5000000ULL;
+unsigned int sysctl_sched_latency = 6000000ULL;
+unsigned int normalized_sysctl_sched_latency = 6000000ULL;
/*
* The initial- and re-scaling of tunables is configurable
@@ -52,15 +52,15 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
/*
* Minimal preemption granularity for CPU-bound tasks:
- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 1000000ULL;
-unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
+unsigned int sysctl_sched_min_granularity = 2000000ULL;
+unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
*/
-static unsigned int sched_nr_latency = 5;
+static unsigned int sched_nr_latency = 3;
/*
* After fork, child runs first. If set to 0 (default) then
@@ -505,7 +505,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
{
unsigned long delta_exec_weighted;
- schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
+ schedstat_set(curr->statistics.exec_max,
+ max((u64)delta_exec, curr->statistics.exec_max));
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec);
@@ -548,7 +549,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
+ schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
}
/*
@@ -567,18 +568,18 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- schedstat_set(se->wait_max, max(se->wait_max,
- rq_of(cfs_rq)->clock - se->wait_start));
- schedstat_set(se->wait_count, se->wait_count + 1);
- schedstat_set(se->wait_sum, se->wait_sum +
- rq_of(cfs_rq)->clock - se->wait_start);
+ schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
+ rq_of(cfs_rq)->clock - se->statistics.wait_start));
+ schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
+ schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
+ rq_of(cfs_rq)->clock - se->statistics.wait_start);
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
trace_sched_stat_wait(task_of(se),
- rq_of(cfs_rq)->clock - se->wait_start);
+ rq_of(cfs_rq)->clock - se->statistics.wait_start);
}
#endif
- schedstat_set(se->wait_start, 0);
+ schedstat_set(se->statistics.wait_start, 0);
}
static inline void
@@ -657,39 +658,39 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (entity_is_task(se))
tsk = task_of(se);
- if (se->sleep_start) {
- u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
+ if (se->statistics.sleep_start) {
+ u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
if ((s64)delta < 0)
delta = 0;
- if (unlikely(delta > se->sleep_max))
- se->sleep_max = delta;
+ if (unlikely(delta > se->statistics.sleep_max))
+ se->statistics.sleep_max = delta;
- se->sleep_start = 0;
- se->sum_sleep_runtime += delta;
+ se->statistics.sleep_start = 0;
+ se->statistics.sum_sleep_runtime += delta;
if (tsk) {
account_scheduler_latency(tsk, delta >> 10, 1);
trace_sched_stat_sleep(tsk, delta);
}
}
- if (se->block_start) {
- u64 delta = rq_of(cfs_rq)->clock - se->block_start;
+ if (se->statistics.block_start) {
+ u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
if ((s64)delta < 0)
delta = 0;
- if (unlikely(delta > se->block_max))
- se->block_max = delta;
+ if (unlikely(delta > se->statistics.block_max))
+ se->statistics.block_max = delta;
- se->block_start = 0;
- se->sum_sleep_runtime += delta;
+ se->statistics.block_start = 0;
+ se->statistics.sum_sleep_runtime += delta;
if (tsk) {
if (tsk->in_iowait) {
- se->iowait_sum += delta;
- se->iowait_count++;
+ se->statistics.iowait_sum += delta;
+ se->statistics.iowait_count++;
trace_sched_stat_iowait(tsk, delta);
}
@@ -737,20 +738,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
vruntime += sched_vslice(cfs_rq, se);
/* sleeps up to a single latency don't count. */
- if (!initial && sched_feat(FAIR_SLEEPERS)) {
+ if (!initial) {
unsigned long thresh = sysctl_sched_latency;
/*
- * Convert the sleeper threshold into virtual time.
- * SCHED_IDLE is a special sub-class. We care about
- * fairness only relative to other SCHED_IDLE tasks,
- * all of which have the same weight.
- */
- if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
- task_of(se)->policy != SCHED_IDLE))
- thresh = calc_delta_fair(thresh, se);
-
- /*
* Halve their sleep time's effect, to allow
* for a gentler effect of sleepers:
*/
@@ -766,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
se->vruntime = vruntime;
}
-#define ENQUEUE_WAKEUP 1
-#define ENQUEUE_MIGRATE 2
-
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -776,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update the normalized vruntime before updating min_vruntime
* through callig update_curr().
*/
- if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
+ if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
se->vruntime += cfs_rq->min_vruntime;
/*
@@ -812,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
static void
-dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
+dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
/*
* Update run-time statistics of the 'current'.
@@ -820,15 +808,15 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
update_curr(cfs_rq);
update_stats_dequeue(cfs_rq, se);
- if (sleep) {
+ if (flags & DEQUEUE_SLEEP) {
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_INTERRUPTIBLE)
- se->sleep_start = rq_of(cfs_rq)->clock;
+ se->statistics.sleep_start = rq_of(cfs_rq)->clock;
if (tsk->state & TASK_UNINTERRUPTIBLE)
- se->block_start = rq_of(cfs_rq)->clock;
+ se->statistics.block_start = rq_of(cfs_rq)->clock;
}
#endif
}
@@ -845,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
* update can refer to the ->curr item and we need to reflect this
* movement in our normalized position.
*/
- if (!sleep)
+ if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
}
@@ -912,7 +900,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* when there are only lesser-weight tasks around):
*/
if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
- se->slice_max = max(se->slice_max,
+ se->statistics.slice_max = max(se->statistics.slice_max,
se->sum_exec_runtime - se->prev_sum_exec_runtime);
}
#endif
@@ -1054,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq)
* then put the task into the rbtree:
*/
static void
-enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
- int flags = 0;
-
- if (wakeup)
- flags |= ENQUEUE_WAKEUP;
- if (p->state == TASK_WAKING)
- flags |= ENQUEUE_MIGRATE;
for_each_sched_entity(se) {
if (se->on_rq)
@@ -1081,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
* decreased. We remove the task from the rbtree and
* update the fair scheduling stats:
*/
-static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
+static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- dequeue_entity(cfs_rq, se, sleep);
+ dequeue_entity(cfs_rq, se, flags);
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight)
break;
- sleep = 1;
+ flags |= DEQUEUE_SLEEP;
}
hrtick_update(rq);
@@ -1240,7 +1222,6 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
{
- struct task_struct *curr = current;
unsigned long this_load, load;
int idx, this_cpu, prev_cpu;
unsigned long tl_per_task;
@@ -1255,18 +1236,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
load = source_load(prev_cpu, idx);
this_load = target_load(this_cpu, idx);
- if (sync) {
- if (sched_feat(SYNC_LESS) &&
- (curr->se.avg_overlap > sysctl_sched_migration_cost ||
- p->se.avg_overlap > sysctl_sched_migration_cost))
- sync = 0;
- } else {
- if (sched_feat(SYNC_MORE) &&
- (curr->se.avg_overlap < sysctl_sched_migration_cost &&
- p->se.avg_overlap < sysctl_sched_migration_cost))
- sync = 1;
- }
-
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
@@ -1306,7 +1275,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
if (sync && balanced)
return 1;
- schedstat_inc(p, se.nr_wakeups_affine_attempts);
+ schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);
if (balanced ||
@@ -1318,7 +1287,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* there is no bad imbalance.
*/
schedstat_inc(sd, ttwu_move_affine);
- schedstat_inc(p, se.nr_wakeups_affine);
+ schedstat_inc(p, se.statistics.nr_wakeups_affine);
return 1;
}
@@ -1406,29 +1375,48 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
/*
* Try and locate an idle CPU in the sched_domain.
*/
-static int
-select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
+static int select_idle_sibling(struct task_struct *p, int target)
{
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
+ struct sched_domain *sd;
int i;
/*
- * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
- * test in select_task_rq_fair) and the prev_cpu is idle then that's
- * always a better target than the current cpu.
+ * If the task is going to be woken-up on this cpu and if it is
+ * already idle, then it is the right target.
*/
- if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
+ if (target == cpu && idle_cpu(cpu))
+ return cpu;
+
+ /*
+ * If the task is going to be woken-up on the cpu where it previously
+ * ran and if it is currently idle, then it the right target.
+ */
+ if (target == prev_cpu && idle_cpu(prev_cpu))
return prev_cpu;
/*
- * Otherwise, iterate the domain and find an elegible idle cpu.
+ * Otherwise, iterate the domains and find an elegible idle cpu.
*/
- for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
- if (!cpu_rq(i)->cfs.nr_running) {
- target = i;
+ for_each_domain(target, sd) {
+ if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
break;
+
+ for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
+ if (idle_cpu(i)) {
+ target = i;
+ break;
+ }
}
+
+ /*
+ * Lets stop looking for an idle sibling when we reached
+ * the domain that spans the current cpu and prev_cpu.
+ */
+ if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
+ cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
+ break;
}
return target;
@@ -1445,7 +1433,8 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
*
* preempt must be disabled.
*/
-static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
+static int
+select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
{
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id();
@@ -1456,8 +1445,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
int sync = wake_flags & WF_SYNC;
if (sd_flag & SD_BALANCE_WAKE) {
- if (sched_feat(AFFINE_WAKEUPS) &&
- cpumask_test_cpu(cpu, &p->cpus_allowed))
+ if (cpumask_test_cpu(cpu, &p->cpus_allowed))
want_affine = 1;
new_cpu = prev_cpu;
}
@@ -1491,34 +1479,13 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
}
/*
- * While iterating the domains looking for a spanning
- * WAKE_AFFINE domain, adjust the affine target to any idle cpu
- * in cache sharing domains along the way.
+ * If both cpu and prev_cpu are part of this domain,
+ * cpu is a valid SD_WAKE_AFFINE target.
*/
- if (want_affine) {
- int target = -1;
-
- /*
- * If both cpu and prev_cpu are part of this domain,
- * cpu is a valid SD_WAKE_AFFINE target.
- */
- if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
- target = cpu;
-
- /*
- * If there's an idle sibling in this domain, make that
- * the wake_affine target instead of the current cpu.
- */
- if (tmp->flags & SD_SHARE_PKG_RESOURCES)
- target = select_idle_sibling(p, tmp, target);
-
- if (target >= 0) {
- if (tmp->flags & SD_WAKE_AFFINE) {
- affine_sd = tmp;
- want_affine = 0;
- }
- cpu = target;
- }
+ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
+ cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
+ affine_sd = tmp;
+ want_affine = 0;
}
if (!want_sd && !want_affine)
@@ -1531,22 +1498,29 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
sd = tmp;
}
+#ifdef CONFIG_FAIR_GROUP_SCHED
if (sched_feat(LB_SHARES_UPDATE)) {
/*
* Pick the largest domain to update shares over
*/
tmp = sd;
- if (affine_sd && (!tmp ||
- cpumask_weight(sched_domain_span(affine_sd)) >
- cpumask_weight(sched_domain_span(sd))))
+ if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
tmp = affine_sd;
- if (tmp)
+ if (tmp) {
+ raw_spin_unlock(&rq->lock);
update_shares(tmp);
+ raw_spin_lock(&rq->lock);
+ }
}
+#endif
- if (affine_sd && wake_affine(affine_sd, p, sync))
- return cpu;
+ if (affine_sd) {
+ if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
+ return select_idle_sibling(p, cpu);
+ else
+ return select_idle_sibling(p, prev_cpu);
+ }
while (sd) {
int load_idx = sd->forkexec_idx;
@@ -1576,10 +1550,10 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
/* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu;
- weight = cpumask_weight(sched_domain_span(sd));
+ weight = sd->span_weight;
sd = NULL;
for_each_domain(cpu, tmp) {
- if (weight <= cpumask_weight(sched_domain_span(tmp)))
+ if (weight <= tmp->span_weight)
break;
if (tmp->flags & sd_flag)
sd = tmp;
@@ -1591,63 +1565,26 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
}
#endif /* CONFIG_SMP */
-/*
- * Adaptive granularity
- *
- * se->avg_wakeup gives the average time a task runs until it does a wakeup,
- * with the limit of wakeup_gran -- when it never does a wakeup.
- *
- * So the smaller avg_wakeup is the faster we want this task to preempt,
- * but we don't want to treat the preemptee unfairly and therefore allow it
- * to run for at least the amount of time we'd like to run.
- *
- * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
- *
- * NOTE: we use *nr_running to scale with load, this nicely matches the
- * degrading latency on load.
- */
-static unsigned long
-adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
-{
- u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
- u64 gran = 0;
-
- if (this_run < expected_wakeup)
- gran = expected_wakeup - this_run;
-
- return min_t(s64, gran, sysctl_sched_wakeup_granularity);
-}
-
static unsigned long
wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
- if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
- gran = adaptive_gran(curr, se);
-
/*
* Since its curr running now, convert the gran from real-time
* to virtual-time in his units.
+ *
+ * By using 'se' instead of 'curr' we penalize light tasks, so
+ * they get preempted easier. That is, if 'se' < 'curr' then
+ * the resulting gran will be larger, therefore penalizing the
+ * lighter, if otoh 'se' > 'curr' then the resulting gran will
+ * be smaller, again penalizing the lighter task.
+ *
+ * This is especially important for buddies when the leftmost
+ * task is higher priority than the buddy.
*/
- if (sched_feat(ASYM_GRAN)) {
- /*
- * By using 'se' instead of 'curr' we penalize light tasks, so
- * they get preempted easier. That is, if 'se' < 'curr' then
- * the resulting gran will be larger, therefore penalizing the
- * lighter, if otoh 'se' > 'curr' then the resulting gran will
- * be smaller, again penalizing the lighter task.
- *
- * This is especially important for buddies when the leftmost
- * task is higher priority than the buddy.
- */
- if (unlikely(se->load.weight != NICE_0_LOAD))
- gran = calc_delta_fair(gran, se);
- } else {
- if (unlikely(curr->load.weight != NICE_0_LOAD))
- gran = calc_delta_fair(gran, curr);
- }
+ if (unlikely(se->load.weight != NICE_0_LOAD))
+ gran = calc_delta_fair(gran, se);
return gran;
}
@@ -1705,7 +1642,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- int sync = wake_flags & WF_SYNC;
int scale = cfs_rq->nr_running >= sched_nr_latency;
if (unlikely(rt_prio(p->prio)))
@@ -1738,14 +1674,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(curr->policy == SCHED_IDLE))
goto preempt;
- if (sched_feat(WAKEUP_SYNC) && sync)
- goto preempt;
-
- if (sched_feat(WAKEUP_OVERLAP) &&
- se->avg_overlap < sysctl_sched_migration_cost &&
- pse->avg_overlap < sysctl_sched_migration_cost)
- goto preempt;
-
if (!sched_feat(WAKEUP_PREEMPT))
return;
@@ -1844,13 +1772,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
* 3) are cache-hot on their current CPU.
*/
if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
- schedstat_inc(p, se.nr_failed_migrations_affine);
+ schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
return 0;
}
*all_pinned = 0;
if (task_running(rq, p)) {
- schedstat_inc(p, se.nr_failed_migrations_running);
+ schedstat_inc(p, se.statistics.nr_failed_migrations_running);
return 0;
}
@@ -1866,14 +1794,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
#ifdef CONFIG_SCHEDSTATS
if (tsk_cache_hot) {
schedstat_inc(sd, lb_hot_gained[idle]);
- schedstat_inc(p, se.nr_forced_migrations);
+ schedstat_inc(p, se.statistics.nr_forced_migrations);
}
#endif
return 1;
}
if (tsk_cache_hot) {
- schedstat_inc(p, se.nr_failed_migrations_hot);
+ schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
return 0;
}
return 1;
@@ -2311,7 +2239,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
{
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long weight = sd->span_weight;
unsigned long smt_gain = sd->smt_gain;
smt_gain /= weight;
@@ -2344,7 +2272,7 @@ unsigned long scale_rt_power(int cpu)
static void update_cpu_power(struct sched_domain *sd, int cpu)
{
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long weight = sd->span_weight;
unsigned long power = SCHED_LOAD_SCALE;
struct sched_group *sdg = sd->groups;
@@ -2870,6 +2798,8 @@ static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}
+static int active_load_balance_cpu_stop(void *data);
+
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
@@ -2959,8 +2889,9 @@ redo:
if (need_active_balance(sd, sd_idle, idle)) {
raw_spin_lock_irqsave(&busiest->lock, flags);
- /* don't kick the migration_thread, if the curr
- * task on busiest cpu can't be moved to this_cpu
+ /* don't kick the active_load_balance_cpu_stop,
+ * if the curr task on busiest cpu can't be
+ * moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) {
@@ -2970,14 +2901,22 @@ redo:
goto out_one_pinned;
}
+ /*
+ * ->active_balance synchronizes accesses to
+ * ->active_balance_work. Once set, it's cleared
+ * only after active load balance is finished.
+ */
if (!busiest->active_balance) {
busiest->active_balance = 1;
busiest->push_cpu = this_cpu;
active_balance = 1;
}
raw_spin_unlock_irqrestore(&busiest->lock, flags);
+
if (active_balance)
- wake_up_process(busiest->migration_thread);
+ stop_one_cpu_nowait(cpu_of(busiest),
+ active_load_balance_cpu_stop, busiest,
+ &busiest->active_balance_work);
/*
* We've kicked active balancing, reset the failure
@@ -3084,24 +3023,29 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
}
/*
- * active_load_balance is run by migration threads. It pushes running tasks
- * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
- * running on each physical CPU where possible, and avoids physical /
- * logical imbalances.
- *
- * Called with busiest_rq locked.
+ * active_load_balance_cpu_stop is run by cpu stopper. It pushes
+ * running tasks off the busiest CPU onto idle CPUs. It requires at
+ * least 1 task to be running on each physical CPU where possible, and
+ * avoids physical / logical imbalances.
*/
-static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
+static int active_load_balance_cpu_stop(void *data)
{
+ struct rq *busiest_rq = data;
+ int busiest_cpu = cpu_of(busiest_rq);
int target_cpu = busiest_rq->push_cpu;
+ struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd;
- struct rq *target_rq;
+
+ raw_spin_lock_irq(&busiest_rq->lock);
+
+ /* make sure the requested cpu hasn't gone down in the meantime */
+ if (unlikely(busiest_cpu != smp_processor_id() ||
+ !busiest_rq->active_balance))
+ goto out_unlock;
/* Is there any task to move? */
if (busiest_rq->nr_running <= 1)
- return;
-
- target_rq = cpu_rq(target_cpu);
+ goto out_unlock;
/*
* This condition is "impossible", if it occurs
@@ -3112,8 +3056,6 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
/* move a task from busiest_rq to target_rq */
double_lock_balance(busiest_rq, target_rq);
- update_rq_clock(busiest_rq);
- update_rq_clock(target_rq);
/* Search for an sd spanning us and the target CPU. */
for_each_domain(target_cpu, sd) {
@@ -3132,6 +3074,10 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
schedstat_inc(sd, alb_failed);
}
double_unlock_balance(busiest_rq, target_rq);
+out_unlock:
+ busiest_rq->active_balance = 0;
+ raw_spin_unlock_irq(&busiest_rq->lock);
+ return 0;
}
#ifdef CONFIG_NO_HZ
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index d5059fd761d9..83c66e8ad3ee 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,11 +1,4 @@
/*
- * Disregards a certain amount of sleep time (sched_latency_ns) and
- * considers the task to be running during that period. This gives it
- * a service deficit on wakeup, allowing it to run sooner.
- */
-SCHED_FEAT(FAIR_SLEEPERS, 1)
-
-/*
* Only give sleepers 50% of their service deficit. This allows
* them to run sooner, but does not allow tons of sleepers to
* rip the spread apart.
@@ -13,13 +6,6 @@ SCHED_FEAT(FAIR_SLEEPERS, 1)
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
/*
- * By not normalizing the sleep time, heavy tasks get an effective
- * longer period, and lighter task an effective shorter period they
- * are considered running.
- */
-SCHED_FEAT(NORMALIZED_SLEEPER, 0)
-
-/*
* Place new tasks ahead so that they do not starve already running
* tasks
*/
@@ -31,37 +17,6 @@ SCHED_FEAT(START_DEBIT, 1)
SCHED_FEAT(WAKEUP_PREEMPT, 1)
/*
- * Compute wakeup_gran based on task behaviour, clipped to
- * [0, sched_wakeup_gran_ns]
- */
-SCHED_FEAT(ADAPTIVE_GRAN, 1)
-
-/*
- * When converting the wakeup granularity to virtual time, do it such
- * that heavier tasks preempting a lighter task have an edge.
- */
-SCHED_FEAT(ASYM_GRAN, 1)
-
-/*
- * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS.
- */
-SCHED_FEAT(WAKEUP_SYNC, 0)
-
-/*
- * Wakeup preempt based on task behaviour. Tasks that do not overlap
- * don't get preempted.
- */
-SCHED_FEAT(WAKEUP_OVERLAP, 0)
-
-/*
- * Use the SYNC wakeup hint, pipes and the likes use this to indicate
- * the remote end is likely to consume the data we just wrote, and
- * therefore has cache benefit from being placed on the same cpu, see
- * also AFFINE_WAKEUPS.
- */
-SCHED_FEAT(SYNC_WAKEUPS, 1)
-
-/*
* Based on load and program behaviour, see if it makes sense to place
* a newly woken task on the same cpu as the task that woke it --
* improve cache locality. Typically used with SYNC wakeups as
@@ -70,16 +25,6 @@ SCHED_FEAT(SYNC_WAKEUPS, 1)
SCHED_FEAT(AFFINE_WAKEUPS, 1)
/*
- * Weaken SYNC hint based on overlap
- */
-SCHED_FEAT(SYNC_LESS, 1)
-
-/*
- * Add SYNC hint based on overlap
- */
-SCHED_FEAT(SYNC_MORE, 0)
-
-/*
* Prefer to schedule the task we woke last (assuming it failed
* wakeup-preemption), since its likely going to consume data we
* touched, increases cache locality.
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index a8a6d8a50947..9fa0f402c87c 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -6,7 +6,8 @@
*/
#ifdef CONFIG_SMP
-static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
+static int
+select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
@@ -22,8 +23,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
static struct task_struct *pick_next_task_idle(struct rq *rq)
{
schedstat_inc(rq, sched_goidle);
- /* adjust the active tasks as we might go into a long sleep */
- calc_load_account_active(rq);
+ calc_load_account_idle(rq);
return rq->idle;
}
@@ -32,7 +32,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
* message if some code attempts to do it:
*/
static void
-dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
+dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{
raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b5b920ae2ea7..8afb953e31c6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -613,7 +613,7 @@ static void update_curr_rt(struct rq *rq)
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;
- schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
+ schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
@@ -888,20 +888,20 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
* Adding/removing a task to/from a priority array:
*/
static void
-enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
+enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
- if (wakeup)
+ if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
- enqueue_rt_entity(rt_se, head);
+ enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}
-static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
+static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
@@ -948,10 +948,9 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
-static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
+static int
+select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{
- struct rq *rq = task_rq(p);
-
if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id();
diff --git a/kernel/signal.c b/kernel/signal.c
index dbd7fe073c55..825a3f24ad76 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2735,3 +2735,43 @@ void __init signals_init(void)
{
sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
}
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/kdb.h>
+/*
+ * kdb_send_sig_info - Allows kdb to send signals without exposing
+ * signal internals. This function checks if the required locks are
+ * available before calling the main signal code, to avoid kdb
+ * deadlocks.
+ */
+void
+kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
+{
+ static struct task_struct *kdb_prev_t;
+ int sig, new_t;
+ if (!spin_trylock(&t->sighand->siglock)) {
+ kdb_printf("Can't do kill command now.\n"
+ "The sigmask lock is held somewhere else in "
+ "kernel, try again later\n");
+ return;
+ }
+ spin_unlock(&t->sighand->siglock);
+ new_t = kdb_prev_t != t;
+ kdb_prev_t = t;
+ if (t->state != TASK_RUNNING && new_t) {
+ kdb_printf("Process is not RUNNING, sending a signal from "
+ "kdb risks deadlock\n"
+ "on the run queue locks. "
+ "The signal has _not_ been sent.\n"
+ "Reissue the kill command if you want to risk "
+ "the deadlock.\n");
+ return;
+ }
+ sig = info->si_signo;
+ if (send_sig_info(sig, info, t))
+ kdb_printf("Fail to deliver Signal %d to process %d.\n",
+ sig, t->pid);
+ else
+ kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
+}
+#endif /* CONFIG_KGDB_KDB */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 7c1a67ef0274..0db913a5c60f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -716,7 +716,7 @@ static int run_ksoftirqd(void * __bind_cpu)
preempt_enable_no_resched();
cond_resched();
preempt_disable();
- rcu_sched_qs((long)__bind_cpu);
+ rcu_note_context_switch((long)__bind_cpu);
}
preempt_enable();
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 9bb9fb1bd79c..ef51d1fcf5e6 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -1,17 +1,381 @@
-/* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
- * GPL v2 and any later version.
+/*
+ * kernel/stop_machine.c
+ *
+ * Copyright (C) 2008, 2005 IBM Corporation.
+ * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
+ * Copyright (C) 2010 SUSE Linux Products GmbH
+ * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2 and any later version.
*/
+#include <linux/completion.h>
#include <linux/cpu.h>
-#include <linux/err.h>
+#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
+#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/stop_machine.h>
-#include <linux/syscalls.h>
#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
#include <asm/atomic.h>
-#include <asm/uaccess.h>
+
+/*
+ * Structure to determine completion condition and record errors. May
+ * be shared by works on different cpus.
+ */
+struct cpu_stop_done {
+ atomic_t nr_todo; /* nr left to execute */
+ bool executed; /* actually executed? */
+ int ret; /* collected return value */
+ struct completion completion; /* fired if nr_todo reaches 0 */
+};
+
+/* the actual stopper, one per every possible cpu, enabled on online cpus */
+struct cpu_stopper {
+ spinlock_t lock;
+ struct list_head works; /* list of pending works */
+ struct task_struct *thread; /* stopper thread */
+ bool enabled; /* is this stopper enabled? */
+};
+
+static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
+
+static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
+{
+ memset(done, 0, sizeof(*done));
+ atomic_set(&done->nr_todo, nr_todo);
+ init_completion(&done->completion);
+}
+
+/* signal completion unless @done is NULL */
+static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
+{
+ if (done) {
+ if (executed)
+ done->executed = true;
+ if (atomic_dec_and_test(&done->nr_todo))
+ complete(&done->completion);
+ }
+}
+
+/* queue @work to @stopper. if offline, @work is completed immediately */
+static void cpu_stop_queue_work(struct cpu_stopper *stopper,
+ struct cpu_stop_work *work)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&stopper->lock, flags);
+
+ if (stopper->enabled) {
+ list_add_tail(&work->list, &stopper->works);
+ wake_up_process(stopper->thread);
+ } else
+ cpu_stop_signal_done(work->done, false);
+
+ spin_unlock_irqrestore(&stopper->lock, flags);
+}
+
+/**
+ * stop_one_cpu - stop a cpu
+ * @cpu: cpu to stop
+ * @fn: function to execute
+ * @arg: argument to @fn
+ *
+ * Execute @fn(@arg) on @cpu. @fn is run in a process context with
+ * the highest priority preempting any task on the cpu and
+ * monopolizing it. This function returns after the execution is
+ * complete.
+ *
+ * This function doesn't guarantee @cpu stays online till @fn
+ * completes. If @cpu goes down in the middle, execution may happen
+ * partially or fully on different cpus. @fn should either be ready
+ * for that or the caller should ensure that @cpu stays online until
+ * this function completes.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
+ * otherwise, the return value of @fn.
+ */
+int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
+{
+ struct cpu_stop_done done;
+ struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
+
+ cpu_stop_init_done(&done, 1);
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
+ wait_for_completion(&done.completion);
+ return done.executed ? done.ret : -ENOENT;
+}
+
+/**
+ * stop_one_cpu_nowait - stop a cpu but don't wait for completion
+ * @cpu: cpu to stop
+ * @fn: function to execute
+ * @arg: argument to @fn
+ *
+ * Similar to stop_one_cpu() but doesn't wait for completion. The
+ * caller is responsible for ensuring @work_buf is currently unused
+ * and will remain untouched until stopper starts executing @fn.
+ *
+ * CONTEXT:
+ * Don't care.
+ */
+void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_work *work_buf)
+{
+ *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
+}
+
+/* static data for stop_cpus */
+static DEFINE_MUTEX(stop_cpus_mutex);
+static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
+
+int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
+{
+ struct cpu_stop_work *work;
+ struct cpu_stop_done done;
+ unsigned int cpu;
+
+ /* initialize works and done */
+ for_each_cpu(cpu, cpumask) {
+ work = &per_cpu(stop_cpus_work, cpu);
+ work->fn = fn;
+ work->arg = arg;
+ work->done = &done;
+ }
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+
+ /*
+ * Disable preemption while queueing to avoid getting
+ * preempted by a stopper which might wait for other stoppers
+ * to enter @fn which can lead to deadlock.
+ */
+ preempt_disable();
+ for_each_cpu(cpu, cpumask)
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
+ &per_cpu(stop_cpus_work, cpu));
+ preempt_enable();
+
+ wait_for_completion(&done.completion);
+ return done.executed ? done.ret : -ENOENT;
+}
+
+/**
+ * stop_cpus - stop multiple cpus
+ * @cpumask: cpus to stop
+ * @fn: function to execute
+ * @arg: argument to @fn
+ *
+ * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
+ * @fn is run in a process context with the highest priority
+ * preempting any task on the cpu and monopolizing it. This function
+ * returns after all executions are complete.
+ *
+ * This function doesn't guarantee the cpus in @cpumask stay online
+ * till @fn completes. If some cpus go down in the middle, execution
+ * on the cpu may happen partially or fully on different cpus. @fn
+ * should either be ready for that or the caller should ensure that
+ * the cpus stay online until this function completes.
+ *
+ * All stop_cpus() calls are serialized making it safe for @fn to wait
+ * for all cpus to start executing it.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * -ENOENT if @fn(@arg) was not executed at all because all cpus in
+ * @cpumask were offline; otherwise, 0 if all executions of @fn
+ * returned 0, any non zero return value if any returned non zero.
+ */
+int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
+{
+ int ret;
+
+ /* static works are used, process one request at a time */
+ mutex_lock(&stop_cpus_mutex);
+ ret = __stop_cpus(cpumask, fn, arg);
+ mutex_unlock(&stop_cpus_mutex);
+ return ret;
+}
+
+/**
+ * try_stop_cpus - try to stop multiple cpus
+ * @cpumask: cpus to stop
+ * @fn: function to execute
+ * @arg: argument to @fn
+ *
+ * Identical to stop_cpus() except that it fails with -EAGAIN if
+ * someone else is already using the facility.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * -EAGAIN if someone else is already stopping cpus, -ENOENT if
+ * @fn(@arg) was not executed at all because all cpus in @cpumask were
+ * offline; otherwise, 0 if all executions of @fn returned 0, any non
+ * zero return value if any returned non zero.
+ */
+int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
+{
+ int ret;
+
+ /* static works are used, process one request at a time */
+ if (!mutex_trylock(&stop_cpus_mutex))
+ return -EAGAIN;
+ ret = __stop_cpus(cpumask, fn, arg);
+ mutex_unlock(&stop_cpus_mutex);
+ return ret;
+}
+
+static int cpu_stopper_thread(void *data)
+{
+ struct cpu_stopper *stopper = data;
+ struct cpu_stop_work *work;
+ int ret;
+
+repeat:
+ set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
+
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+
+ work = NULL;
+ spin_lock_irq(&stopper->lock);
+ if (!list_empty(&stopper->works)) {
+ work = list_first_entry(&stopper->works,
+ struct cpu_stop_work, list);
+ list_del_init(&work->list);
+ }
+ spin_unlock_irq(&stopper->lock);
+
+ if (work) {
+ cpu_stop_fn_t fn = work->fn;
+ void *arg = work->arg;
+ struct cpu_stop_done *done = work->done;
+ char ksym_buf[KSYM_NAME_LEN];
+
+ __set_current_state(TASK_RUNNING);
+
+ /* cpu stop callbacks are not allowed to sleep */
+ preempt_disable();
+
+ ret = fn(arg);
+ if (ret)
+ done->ret = ret;
+
+ /* restore preemption and check it's still balanced */
+ preempt_enable();
+ WARN_ONCE(preempt_count(),
+ "cpu_stop: %s(%p) leaked preempt count\n",
+ kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
+ ksym_buf), arg);
+
+ cpu_stop_signal_done(done, true);
+ } else
+ schedule();
+
+ goto repeat;
+}
+
+/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
+static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ unsigned int cpu = (unsigned long)hcpu;
+ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+ struct cpu_stop_work *work;
+ struct task_struct *p;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ BUG_ON(stopper->thread || stopper->enabled ||
+ !list_empty(&stopper->works));
+ p = kthread_create(cpu_stopper_thread, stopper, "migration/%d",
+ cpu);
+ if (IS_ERR(p))
+ return NOTIFY_BAD;
+ sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
+ get_task_struct(p);
+ stopper->thread = p;
+ break;
+
+ case CPU_ONLINE:
+ kthread_bind(stopper->thread, cpu);
+ /* strictly unnecessary, as first user will wake it */
+ wake_up_process(stopper->thread);
+ /* mark enabled */
+ spin_lock_irq(&stopper->lock);
+ stopper->enabled = true;
+ spin_unlock_irq(&stopper->lock);
+ break;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ case CPU_DEAD:
+ /* kill the stopper */
+ kthread_stop(stopper->thread);
+ /* drain remaining works */
+ spin_lock_irq(&stopper->lock);
+ list_for_each_entry(work, &stopper->works, list)
+ cpu_stop_signal_done(work->done, false);
+ stopper->enabled = false;
+ spin_unlock_irq(&stopper->lock);
+ /* release the stopper */
+ put_task_struct(stopper->thread);
+ stopper->thread = NULL;
+ break;
+#endif
+ }
+
+ return NOTIFY_OK;
+}
+
+/*
+ * Give it a higher priority so that cpu stopper is available to other
+ * cpu notifiers. It currently shares the same priority as sched
+ * migration_notifier.
+ */
+static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
+ .notifier_call = cpu_stop_cpu_callback,
+ .priority = 10,
+};
+
+static int __init cpu_stop_init(void)
+{
+ void *bcpu = (void *)(long)smp_processor_id();
+ unsigned int cpu;
+ int err;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+
+ spin_lock_init(&stopper->lock);
+ INIT_LIST_HEAD(&stopper->works);
+ }
+
+ /* start one for the boot cpu */
+ err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
+ bcpu);
+ BUG_ON(err == NOTIFY_BAD);
+ cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
+ register_cpu_notifier(&cpu_stop_cpu_notifier);
+
+ return 0;
+}
+early_initcall(cpu_stop_init);
+
+#ifdef CONFIG_STOP_MACHINE
/* This controls the threads on each CPU. */
enum stopmachine_state {
@@ -26,174 +390,94 @@ enum stopmachine_state {
/* Exit */
STOPMACHINE_EXIT,
};
-static enum stopmachine_state state;
struct stop_machine_data {
- int (*fn)(void *);
- void *data;
- int fnret;
+ int (*fn)(void *);
+ void *data;
+ /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
+ unsigned int num_threads;
+ const struct cpumask *active_cpus;
+
+ enum stopmachine_state state;
+ atomic_t thread_ack;
};
-/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
-static unsigned int num_threads;
-static atomic_t thread_ack;
-static DEFINE_MUTEX(lock);
-/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
-static DEFINE_MUTEX(setup_lock);
-/* Users of stop_machine. */
-static int refcount;
-static struct workqueue_struct *stop_machine_wq;
-static struct stop_machine_data active, idle;
-static const struct cpumask *active_cpus;
-static void __percpu *stop_machine_work;
-
-static void set_state(enum stopmachine_state newstate)
+static void set_state(struct stop_machine_data *smdata,
+ enum stopmachine_state newstate)
{
/* Reset ack counter. */
- atomic_set(&thread_ack, num_threads);
+ atomic_set(&smdata->thread_ack, smdata->num_threads);
smp_wmb();
- state = newstate;
+ smdata->state = newstate;
}
/* Last one to ack a state moves to the next state. */
-static void ack_state(void)
+static void ack_state(struct stop_machine_data *smdata)
{
- if (atomic_dec_and_test(&thread_ack))
- set_state(state + 1);
+ if (atomic_dec_and_test(&smdata->thread_ack))
+ set_state(smdata, smdata->state + 1);
}
-/* This is the actual function which stops the CPU. It runs
- * in the context of a dedicated stopmachine workqueue. */
-static void stop_cpu(struct work_struct *unused)
+/* This is the cpu_stop function which stops the CPU. */
+static int stop_machine_cpu_stop(void *data)
{
+ struct stop_machine_data *smdata = data;
enum stopmachine_state curstate = STOPMACHINE_NONE;
- struct stop_machine_data *smdata = &idle;
- int cpu = smp_processor_id();
- int err;
+ int cpu = smp_processor_id(), err = 0;
+ bool is_active;
+
+ if (!smdata->active_cpus)
+ is_active = cpu == cpumask_first(cpu_online_mask);
+ else
+ is_active = cpumask_test_cpu(cpu, smdata->active_cpus);
- if (!active_cpus) {
- if (cpu == cpumask_first(cpu_online_mask))
- smdata = &active;
- } else {
- if (cpumask_test_cpu(cpu, active_cpus))
- smdata = &active;
- }
/* Simple state machine */
do {
/* Chill out and ensure we re-read stopmachine_state. */
cpu_relax();
- if (state != curstate) {
- curstate = state;
+ if (smdata->state != curstate) {
+ curstate = smdata->state;
switch (curstate) {
case STOPMACHINE_DISABLE_IRQ:
local_irq_disable();
hard_irq_disable();
break;
case STOPMACHINE_RUN:
- /* On multiple CPUs only a single error code
- * is needed to tell that something failed. */
- err = smdata->fn(smdata->data);
- if (err)
- smdata->fnret = err;
+ if (is_active)
+ err = smdata->fn(smdata->data);
break;
default:
break;
}
- ack_state();
+ ack_state(smdata);
}
} while (curstate != STOPMACHINE_EXIT);
local_irq_enable();
+ return err;
}
-/* Callback for CPUs which aren't supposed to do anything. */
-static int chill(void *unused)
-{
- return 0;
-}
-
-int stop_machine_create(void)
-{
- mutex_lock(&setup_lock);
- if (refcount)
- goto done;
- stop_machine_wq = create_rt_workqueue("kstop");
- if (!stop_machine_wq)
- goto err_out;
- stop_machine_work = alloc_percpu(struct work_struct);
- if (!stop_machine_work)
- goto err_out;
-done:
- refcount++;
- mutex_unlock(&setup_lock);
- return 0;
-
-err_out:
- if (stop_machine_wq)
- destroy_workqueue(stop_machine_wq);
- mutex_unlock(&setup_lock);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(stop_machine_create);
-
-void stop_machine_destroy(void)
-{
- mutex_lock(&setup_lock);
- refcount--;
- if (refcount)
- goto done;
- destroy_workqueue(stop_machine_wq);
- free_percpu(stop_machine_work);
-done:
- mutex_unlock(&setup_lock);
-}
-EXPORT_SYMBOL_GPL(stop_machine_destroy);
-
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
{
- struct work_struct *sm_work;
- int i, ret;
-
- /* Set up initial state. */
- mutex_lock(&lock);
- num_threads = num_online_cpus();
- active_cpus = cpus;
- active.fn = fn;
- active.data = data;
- active.fnret = 0;
- idle.fn = chill;
- idle.data = NULL;
-
- set_state(STOPMACHINE_PREPARE);
-
- /* Schedule the stop_cpu work on all cpus: hold this CPU so one
- * doesn't hit this CPU until we're ready. */
- get_cpu();
- for_each_online_cpu(i) {
- sm_work = per_cpu_ptr(stop_machine_work, i);
- INIT_WORK(sm_work, stop_cpu);
- queue_work_on(i, stop_machine_wq, sm_work);
- }
- /* This will release the thread on our CPU. */
- put_cpu();
- flush_workqueue(stop_machine_wq);
- ret = active.fnret;
- mutex_unlock(&lock);
- return ret;
+ struct stop_machine_data smdata = { .fn = fn, .data = data,
+ .num_threads = num_online_cpus(),
+ .active_cpus = cpus };
+
+ /* Set the initial state and stop all online cpus. */
+ set_state(&smdata, STOPMACHINE_PREPARE);
+ return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
}
int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
{
int ret;
- ret = stop_machine_create();
- if (ret)
- return ret;
/* No CPUs can come up or down during this. */
get_online_cpus();
ret = __stop_machine(fn, data, cpus);
put_online_cpus();
- stop_machine_destroy();
return ret;
}
EXPORT_SYMBOL_GPL(stop_machine);
+
+#endif /* CONFIG_STOP_MACHINE */
diff --git a/kernel/sys.c b/kernel/sys.c
index 7cb426a58965..0d36d889c74d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -492,10 +492,6 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
return -ENOMEM;
old = current_cred();
- retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
- if (retval)
- goto error;
-
retval = -EPERM;
if (rgid != (gid_t) -1) {
if (old->gid == rgid ||
@@ -543,10 +539,6 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
return -ENOMEM;
old = current_cred();
- retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
- if (retval)
- goto error;
-
retval = -EPERM;
if (capable(CAP_SETGID))
new->gid = new->egid = new->sgid = new->fsgid = gid;
@@ -610,10 +602,6 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
return -ENOMEM;
old = current_cred();
- retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
- if (retval)
- goto error;
-
retval = -EPERM;
if (ruid != (uid_t) -1) {
new->uid = ruid;
@@ -675,10 +663,6 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
return -ENOMEM;
old = current_cred();
- retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
- if (retval)
- goto error;
-
retval = -EPERM;
if (capable(CAP_SETUID)) {
new->suid = new->uid = uid;
@@ -719,9 +703,6 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
if (!new)
return -ENOMEM;
- retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
- if (retval)
- goto error;
old = current_cred();
retval = -EPERM;
@@ -788,10 +769,6 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
return -ENOMEM;
old = current_cred();
- retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
- if (retval)
- goto error;
-
retval = -EPERM;
if (!capable(CAP_SETGID)) {
if (rgid != (gid_t) -1 && rgid != old->gid &&
@@ -851,9 +828,6 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
old = current_cred();
old_fsuid = old->fsuid;
- if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
- goto error;
-
if (uid == old->uid || uid == old->euid ||
uid == old->suid || uid == old->fsuid ||
capable(CAP_SETUID)) {
@@ -864,7 +838,6 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
}
}
-error:
abort_creds(new);
return old_fsuid;
@@ -888,9 +861,6 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
old = current_cred();
old_fsgid = old->fsgid;
- if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
- goto error;
-
if (gid == old->gid || gid == old->egid ||
gid == old->sgid || gid == old->fsgid ||
capable(CAP_SETGID)) {
@@ -900,7 +870,6 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
}
}
-error:
abort_creds(new);
return old_fsgid;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 70f2ea758ffe..bad369ec5403 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -181,3 +181,7 @@ cond_syscall(sys_eventfd2);
/* performance counters: */
cond_syscall(sys_perf_event_open);
+
+/* fanotify! */
+cond_syscall(sys_fanotify_init);
+cond_syscall(sys_fanotify_mark);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8686b0f5fc12..7d6bf6c47a5f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -43,6 +43,7 @@
#include <linux/times.h>
#include <linux/limits.h>
#include <linux/dcache.h>
+#include <linux/dnotify.h>
#include <linux/syscalls.h>
#include <linux/vmstat.h>
#include <linux/nfs_fs.h>
@@ -74,6 +75,10 @@
#include <scsi/sg.h>
#endif
+#ifdef CONFIG_NMI_WATCHDOG
+#include <linux/nmi.h>
+#endif
+
#if defined(CONFIG_SYSCTL)
@@ -128,6 +133,9 @@ static int min_percpu_pagelist_fract = 8;
static int ngroups_max = NGROUPS_MAX;
+#ifdef CONFIG_INOTIFY_USER
+#include <linux/inotify.h>
+#endif
#ifdef CONFIG_SPARC
#include <asm/system.h>
#endif
@@ -163,6 +171,27 @@ static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+static int __sysrq_enabled; /* Note: sysrq code ises it's own private copy */
+
+static int sysrq_sysctl_handler(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int error;
+
+ error = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (error)
+ return error;
+
+ if (write)
+ sysrq_toggle_support(__sysrq_enabled);
+
+ return 0;
+}
+
+#endif
+
static struct ctl_table root_table[];
static struct ctl_table_root sysctl_table_root;
static struct ctl_table_header root_table_header = {
@@ -183,9 +212,6 @@ static struct ctl_table fs_table[];
static struct ctl_table debug_table[];
static struct ctl_table dev_table[];
extern struct ctl_table random_table[];
-#ifdef CONFIG_INOTIFY_USER
-extern struct ctl_table inotify_table[];
-#endif
#ifdef CONFIG_EPOLL
extern struct ctl_table epoll_table[];
#endif
@@ -567,7 +593,7 @@ static struct ctl_table kern_table[] = {
.data = &__sysrq_enabled,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = sysrq_sysctl_handler,
},
#endif
#ifdef CONFIG_PROC_SYSCTL
@@ -682,7 +708,16 @@ static struct ctl_table kern_table[] = {
.mode = 0444,
.proc_handler = proc_dointvec,
},
-#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
+#if defined(CONFIG_NMI_WATCHDOG)
+ {
+ .procname = "nmi_watchdog",
+ .data = &nmi_watchdog_enabled,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = proc_nmi_enabled,
+ },
+#endif
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) && !defined(CONFIG_NMI_WATCHDOG)
{
.procname = "unknown_nmi_panic",
.data = &unknown_nmi_panic,
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 59030570f5ca..937d31dc8566 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -224,7 +224,6 @@ static const struct bin_table bin_net_ipv4_route_table[] = {
{ CTL_INT, NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
{ CTL_INT, NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
{ CTL_INT, NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
- { CTL_INT, NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
{}
};
diff --git a/kernel/time.c b/kernel/time.c
index 656dccfe1cbb..50612faa9baf 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -132,12 +132,11 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
*/
static inline void warp_clock(void)
{
- write_seqlock_irq(&xtime_lock);
- wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
- xtime.tv_sec += sys_tz.tz_minuteswest * 60;
- update_xtime_cache(0);
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
+ struct timespec delta, adjust;
+ delta.tv_sec = sys_tz.tz_minuteswest * 60;
+ delta.tv_nsec = 0;
+ adjust = timespec_add_safe(current_kernel_time(), delta);
+ do_settimeofday(&adjust);
}
/*
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 1f5dde637457..f08e99c1d561 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -625,6 +625,54 @@ static void clocksource_enqueue(struct clocksource *cs)
list_add(&cs->list, entry);
}
+
+/*
+ * Maximum time we expect to go between ticks. This includes idle
+ * tickless time. It provides the trade off between selecting a
+ * mult/shift pair that is very precise but can only handle a short
+ * period of time, vs. a mult/shift pair that can handle long periods
+ * of time but isn't as precise.
+ *
+ * This is a subsystem constant, and actual hardware limitations
+ * may override it (ie: clocksources that wrap every 3 seconds).
+ */
+#define MAX_UPDATE_LENGTH 5 /* Seconds */
+
+/**
+ * __clocksource_register_scale - Used to install new clocksources
+ * @t: clocksource to be registered
+ * @scale: Scale factor multiplied against freq to get clocksource hz
+ * @freq: clocksource frequency (cycles per second) divided by scale
+ *
+ * Returns -EBUSY if registration fails, zero otherwise.
+ *
+ * This *SHOULD NOT* be called directly! Please use the
+ * clocksource_register_hz() or clocksource_register_khz helper functions.
+ */
+int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
+{
+
+ /*
+ * Ideally we want to use some of the limits used in
+ * clocksource_max_deferment, to provide a more informed
+ * MAX_UPDATE_LENGTH. But for now this just gets the
+ * register interface working properly.
+ */
+ clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
+ NSEC_PER_SEC/scale,
+ MAX_UPDATE_LENGTH*scale);
+ cs->max_idle_ns = clocksource_max_deferment(cs);
+
+ mutex_lock(&clocksource_mutex);
+ clocksource_enqueue(cs);
+ clocksource_select();
+ clocksource_enqueue_watchdog(cs);
+ mutex_unlock(&clocksource_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__clocksource_register_scale);
+
+
/**
* clocksource_register - Used to install new clocksources
* @t: clocksource to be registered
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 7c0f180d6e9d..c63116863a80 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -69,7 +69,7 @@ static s64 time_freq;
/* time at last adjustment (secs): */
static long time_reftime;
-long time_adjust;
+static long time_adjust;
/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
static s64 ntp_tick_adj;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f992762d7f51..1d7b9bc1c034 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -150,14 +150,32 @@ static void tick_nohz_update_jiffies(ktime_t now)
touch_softlockup_watchdog();
}
+/*
+ * Updates the per cpu time idle statistics counters
+ */
+static void
+update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
+{
+ ktime_t delta;
+
+ if (ts->idle_active) {
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ if (nr_iowait_cpu() > 0)
+ ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
+ ts->idle_entrytime = now;
+ }
+
+ if (last_update_time)
+ *last_update_time = ktime_to_us(now);
+
+}
+
static void tick_nohz_stop_idle(int cpu, ktime_t now)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
- ktime_t delta;
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_lastupdate = now;
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ update_ts_time_stats(ts, now, NULL);
ts->idle_active = 0;
sched_clock_idle_wakeup_event(0);
@@ -165,20 +183,32 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
{
- ktime_t now, delta;
+ ktime_t now;
now = ktime_get();
- if (ts->idle_active) {
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_lastupdate = now;
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
- }
+
+ update_ts_time_stats(ts, now, NULL);
+
ts->idle_entrytime = now;
ts->idle_active = 1;
sched_clock_idle_sleep_event();
return now;
}
+/**
+ * get_cpu_idle_time_us - get the total idle time of a cpu
+ * @cpu: CPU number to query
+ * @last_update_time: variable to store update time in
+ *
+ * Return the cummulative idle time (since boot) for a given
+ * CPU, in microseconds. The idle time returned includes
+ * the iowait time (unlike what "top" and co report).
+ *
+ * This time is measured via accounting rather than sampling,
+ * and is as accurate as ktime_get() is.
+ *
+ * This function returns -1 if NOHZ is not enabled.
+ */
u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
@@ -186,15 +216,38 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
if (!tick_nohz_enabled)
return -1;
- if (ts->idle_active)
- *last_update_time = ktime_to_us(ts->idle_lastupdate);
- else
- *last_update_time = ktime_to_us(ktime_get());
+ update_ts_time_stats(ts, ktime_get(), last_update_time);
return ktime_to_us(ts->idle_sleeptime);
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
+/*
+ * get_cpu_iowait_time_us - get the total iowait time of a cpu
+ * @cpu: CPU number to query
+ * @last_update_time: variable to store update time in
+ *
+ * Return the cummulative iowait time (since boot) for a given
+ * CPU, in microseconds.
+ *
+ * This time is measured via accounting rather than sampling,
+ * and is as accurate as ktime_get() is.
+ *
+ * This function returns -1 if NOHZ is not enabled.
+ */
+u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+
+ if (!tick_nohz_enabled)
+ return -1;
+
+ update_ts_time_stats(ts, ktime_get(), last_update_time);
+
+ return ktime_to_us(ts->iowait_sleeptime);
+}
+EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
+
/**
* tick_nohz_stop_sched_tick - stop the idle tick from the idle task
*
@@ -262,6 +315,9 @@ void tick_nohz_stop_sched_tick(int inidle)
goto end;
}
+ if (nohz_ratelimit(cpu))
+ goto end;
+
ts->idle_calls++;
/* Read jiffies and the time when jiffies were updated last */
do {
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 39f6177fafac..caf8d4d4f5c8 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -165,13 +165,6 @@ struct timespec raw_time;
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
-static struct timespec xtime_cache __attribute__ ((aligned (16)));
-void update_xtime_cache(u64 nsec)
-{
- xtime_cache = xtime;
- timespec_add_ns(&xtime_cache, nsec);
-}
-
/* must hold xtime_lock */
void timekeeping_leap_insert(int leapsecond)
{
@@ -332,8 +325,6 @@ int do_settimeofday(struct timespec *tv)
xtime = *tv;
- update_xtime_cache(0);
-
timekeeper.ntp_error = 0;
ntp_clear();
@@ -559,7 +550,6 @@ void __init timekeeping_init(void)
}
set_normalized_timespec(&wall_to_monotonic,
-boot.tv_sec, -boot.tv_nsec);
- update_xtime_cache(0);
total_sleep_time.tv_sec = 0;
total_sleep_time.tv_nsec = 0;
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -593,7 +583,6 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
total_sleep_time = timespec_add_safe(total_sleep_time, ts);
}
- update_xtime_cache(0);
/* re-base the last cycle value */
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
@@ -788,7 +777,6 @@ void update_wall_time(void)
{
struct clocksource *clock;
cycle_t offset;
- u64 nsecs;
int shift = 0, maxshift;
/* Make sure we're fully resumed: */
@@ -847,7 +835,9 @@ void update_wall_time(void)
timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
}
- /* store full nanoseconds into xtime after rounding it up and
+
+ /*
+ * Store full nanoseconds into xtime after rounding it up and
* add the remainder to the error difference.
*/
xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
@@ -855,8 +845,15 @@ void update_wall_time(void)
timekeeper.ntp_error += timekeeper.xtime_nsec <<
timekeeper.ntp_error_shift;
- nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
- update_xtime_cache(nsecs);
+ /*
+ * Finally, make sure that after the rounding
+ * xtime.tv_nsec isn't larger then NSEC_PER_SEC
+ */
+ if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
+ xtime.tv_nsec -= NSEC_PER_SEC;
+ xtime.tv_sec++;
+ second_overflow();
+ }
/* check to see if there is a new clocksource to use */
update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
@@ -896,13 +893,13 @@ EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
unsigned long get_seconds(void)
{
- return xtime_cache.tv_sec;
+ return xtime.tv_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- return xtime_cache;
+ return xtime;
}
struct timespec current_kernel_time(void)
@@ -913,7 +910,7 @@ struct timespec current_kernel_time(void)
do {
seq = read_seqbegin(&xtime_lock);
- now = xtime_cache;
+ now = xtime;
} while (read_seqretry(&xtime_lock, seq));
return now;
@@ -928,7 +925,7 @@ struct timespec get_monotonic_coarse(void)
do {
seq = read_seqbegin(&xtime_lock);
- now = xtime_cache;
+ now = xtime;
mono = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 1a4a7dd78777..ab8f5e33fa92 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -176,6 +176,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
P_ns(idle_waketime);
P_ns(idle_exittime);
P_ns(idle_sleeptime);
+ P_ns(iowait_sleeptime);
P(last_jiffies);
P(next_jiffies);
P_ns(idle_expires);
diff --git a/kernel/timer.c b/kernel/timer.c
index aeb6a54f2771..9199f3c52215 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -319,6 +319,24 @@ unsigned long round_jiffies_up_relative(unsigned long j)
}
EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
+/**
+ * set_timer_slack - set the allowed slack for a timer
+ * @slack_hz: the amount of time (in jiffies) allowed for rounding
+ *
+ * Set the amount of time, in jiffies, that a certain timer has
+ * in terms of slack. By setting this value, the timer subsystem
+ * will schedule the actual timer somewhere between
+ * the time mod_timer() asks for, and that time plus the slack.
+ *
+ * By setting the slack to -1, a percentage of the delay is used
+ * instead.
+ */
+void set_timer_slack(struct timer_list *timer, int slack_hz)
+{
+ timer->slack = slack_hz;
+}
+EXPORT_SYMBOL_GPL(set_timer_slack);
+
static inline void set_running_timer(struct tvec_base *base,
struct timer_list *timer)
@@ -550,6 +568,7 @@ static void __init_timer(struct timer_list *timer,
{
timer->entry.next = NULL;
timer->base = __raw_get_cpu_var(tvec_bases);
+ timer->slack = -1;
#ifdef CONFIG_TIMER_STATS
timer->start_site = NULL;
timer->start_pid = -1;
@@ -715,6 +734,41 @@ int mod_timer_pending(struct timer_list *timer, unsigned long expires)
}
EXPORT_SYMBOL(mod_timer_pending);
+/*
+ * Decide where to put the timer while taking the slack into account
+ *
+ * Algorithm:
+ * 1) calculate the maximum (absolute) time
+ * 2) calculate the highest bit where the expires and new max are different
+ * 3) use this bit to make a mask
+ * 4) use the bitmask to round down the maximum time, so that all last
+ * bits are zeros
+ */
+static inline
+unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
+{
+ unsigned long expires_limit, mask;
+ int bit;
+
+ expires_limit = expires + timer->slack;
+
+ if (timer->slack < 0) /* auto slack: use 0.4% */
+ expires_limit = expires + (expires - jiffies)/256;
+
+ mask = expires ^ expires_limit;
+
+ if (mask == 0)
+ return expires;
+
+ bit = find_last_bit(&mask, BITS_PER_LONG);
+
+ mask = (1 << bit) - 1;
+
+ expires_limit = expires_limit & ~(mask);
+
+ return expires_limit;
+}
+
/**
* mod_timer - modify a timer's timeout
* @timer: the timer to be modified
@@ -745,6 +799,8 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
if (timer_pending(timer) && timer->expires == expires)
return 1;
+ expires = apply_slack(timer, expires);
+
return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
}
EXPORT_SYMBOL(mod_timer);
@@ -955,6 +1011,47 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index)
return index;
}
+static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
+ unsigned long data)
+{
+ int preempt_count = preempt_count();
+
+#ifdef CONFIG_LOCKDEP
+ /*
+ * It is permissible to free the timer from inside the
+ * function that is called from it, this we need to take into
+ * account for lockdep too. To avoid bogus "held lock freed"
+ * warnings as well as problems when looking into
+ * timer->lockdep_map, make a copy and use that here.
+ */
+ struct lockdep_map lockdep_map = timer->lockdep_map;
+#endif
+ /*
+ * Couple the lock chain with the lock chain at
+ * del_timer_sync() by acquiring the lock_map around the fn()
+ * call here and in del_timer_sync().
+ */
+ lock_map_acquire(&lockdep_map);
+
+ trace_timer_expire_entry(timer);
+ fn(data);
+ trace_timer_expire_exit(timer);
+
+ lock_map_release(&lockdep_map);
+
+ if (preempt_count != preempt_count()) {
+ WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
+ fn, preempt_count, preempt_count());
+ /*
+ * Restore the preempt count. That gives us a decent
+ * chance to survive and extract information. If the
+ * callback kept a lock held, bad luck, but not worse
+ * than the BUG() we had.
+ */
+ preempt_count() = preempt_count;
+ }
+}
+
#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
/**
@@ -998,45 +1095,7 @@ static inline void __run_timers(struct tvec_base *base)
detach_timer(timer, 1);
spin_unlock_irq(&base->lock);
- {
- int preempt_count = preempt_count();
-
-#ifdef CONFIG_LOCKDEP
- /*
- * It is permissible to free the timer from
- * inside the function that is called from
- * it, this we need to take into account for
- * lockdep too. To avoid bogus "held lock
- * freed" warnings as well as problems when
- * looking into timer->lockdep_map, make a
- * copy and use that here.
- */
- struct lockdep_map lockdep_map =
- timer->lockdep_map;
-#endif
- /*
- * Couple the lock chain with the lock chain at
- * del_timer_sync() by acquiring the lock_map
- * around the fn() call here and in
- * del_timer_sync().
- */
- lock_map_acquire(&lockdep_map);
-
- trace_timer_expire_entry(timer);
- fn(data);
- trace_timer_expire_exit(timer);
-
- lock_map_release(&lockdep_map);
-
- if (preempt_count != preempt_count()) {
- printk(KERN_ERR "huh, entered %p "
- "with preempt_count %08x, exited"
- " with %08x?\n",
- fn, preempt_count,
- preempt_count());
- BUG();
- }
- }
+ call_timer_fn(timer, fn, data);
spin_lock_irq(&base->lock);
}
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 13e13d428cd3..8b1797c4545b 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -44,9 +44,6 @@ config HAVE_FTRACE_MCOUNT_RECORD
help
See Documentation/trace/ftrace-design.txt
-config HAVE_HW_BRANCH_TRACER
- bool
-
config HAVE_SYSCALL_TRACEPOINTS
bool
help
@@ -374,14 +371,6 @@ config STACK_TRACER
Say N if unsure.
-config HW_BRANCH_TRACER
- depends on HAVE_HW_BRANCH_TRACER
- bool "Trace hw branches"
- select GENERIC_TRACER
- help
- This tracer records all branches on the system in a circular
- buffer, giving access to the last N branches for each cpu.
-
config KMEMTRACE
bool "Trace SLAB allocations"
select GENERIC_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 78edc6490038..ffb1a5b0550e 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
-obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2404b59b3097..32837e19e3bd 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -264,6 +264,7 @@ struct ftrace_profile {
unsigned long counter;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned long long time;
+ unsigned long long time_squared;
#endif
};
@@ -366,9 +367,9 @@ static int function_stat_headers(struct seq_file *m)
{
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
seq_printf(m, " Function "
- "Hit Time Avg\n"
+ "Hit Time Avg s^2\n"
" -------- "
- "--- ---- ---\n");
+ "--- ---- --- ---\n");
#else
seq_printf(m, " Function Hit\n"
" -------- ---\n");
@@ -384,6 +385,7 @@ static int function_stat_show(struct seq_file *m, void *v)
static DEFINE_MUTEX(mutex);
static struct trace_seq s;
unsigned long long avg;
+ unsigned long long stddev;
#endif
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
@@ -394,11 +396,25 @@ static int function_stat_show(struct seq_file *m, void *v)
avg = rec->time;
do_div(avg, rec->counter);
+ /* Sample standard deviation (s^2) */
+ if (rec->counter <= 1)
+ stddev = 0;
+ else {
+ stddev = rec->time_squared - rec->counter * avg * avg;
+ /*
+ * Divide only 1000 for ns^2 -> us^2 conversion.
+ * trace_print_graph_duration will divide 1000 again.
+ */
+ do_div(stddev, (rec->counter - 1) * 1000);
+ }
+
mutex_lock(&mutex);
trace_seq_init(&s);
trace_print_graph_duration(rec->time, &s);
trace_seq_puts(&s, " ");
trace_print_graph_duration(avg, &s);
+ trace_seq_puts(&s, " ");
+ trace_print_graph_duration(stddev, &s);
trace_print_seq(m, &s);
mutex_unlock(&mutex);
#endif
@@ -650,6 +666,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
if (!stat->hash || !ftrace_profile_enabled)
goto out;
+ /* If the calltime was zero'd ignore it */
+ if (!trace->calltime)
+ goto out;
+
calltime = trace->rettime - trace->calltime;
if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
@@ -668,8 +688,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
}
rec = ftrace_find_profiled_func(stat, trace->func);
- if (rec)
+ if (rec) {
rec->time += calltime;
+ rec->time_squared += calltime * calltime;
+ }
out:
local_irq_restore(flags);
@@ -3212,8 +3234,7 @@ free:
}
static void
-ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
- struct task_struct *next)
+ftrace_graph_probe_sched_switch(struct task_struct *prev, struct task_struct *next)
{
unsigned long long timestamp;
int index;
@@ -3339,11 +3360,11 @@ void unregister_ftrace_graph(void)
goto out;
ftrace_graph_active--;
- unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
out:
mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5885cdfc41f3..7f6059c5aa94 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2000,17 +2000,13 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
u64 *ts, u64 *delta)
{
struct ring_buffer_event *event;
- static int once;
int ret;
- if (unlikely(*delta > (1ULL << 59) && !once++)) {
- printk(KERN_WARNING "Delta way too big! %llu"
- " ts=%llu write stamp = %llu\n",
- (unsigned long long)*delta,
- (unsigned long long)*ts,
- (unsigned long long)cpu_buffer->write_stamp);
- WARN_ON(1);
- }
+ WARN_ONCE(*delta > (1ULL << 59),
+ KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
+ (unsigned long long)*delta,
+ (unsigned long long)*ts,
+ (unsigned long long)cpu_buffer->write_stamp);
/*
* The delta is too big, we to add a
@@ -3332,23 +3328,30 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
EXPORT_SYMBOL_GPL(ring_buffer_consume);
/**
- * ring_buffer_read_start - start a non consuming read of the buffer
+ * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
* @buffer: The ring buffer to read from
* @cpu: The cpu buffer to iterate over
*
- * This starts up an iteration through the buffer. It also disables
- * the recording to the buffer until the reading is finished.
- * This prevents the reading from being corrupted. This is not
- * a consuming read, so a producer is not expected.
+ * This performs the initial preparations necessary to iterate
+ * through the buffer. Memory is allocated, buffer recording
+ * is disabled, and the iterator pointer is returned to the caller.
*
- * Must be paired with ring_buffer_finish.
+ * Disabling buffer recordng prevents the reading from being
+ * corrupted. This is not a consuming read, so a producer is not
+ * expected.
+ *
+ * After a sequence of ring_buffer_read_prepare calls, the user is
+ * expected to make at least one call to ring_buffer_prepare_sync.
+ * Afterwards, ring_buffer_read_start is invoked to get things going
+ * for real.
+ *
+ * This overall must be paired with ring_buffer_finish.
*/
struct ring_buffer_iter *
-ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter;
- unsigned long flags;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
@@ -3362,15 +3365,52 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
iter->cpu_buffer = cpu_buffer;
atomic_inc(&cpu_buffer->record_disabled);
+
+ return iter;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
+
+/**
+ * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
+ *
+ * All previously invoked ring_buffer_read_prepare calls to prepare
+ * iterators will be synchronized. Afterwards, read_buffer_read_start
+ * calls on those iterators are allowed.
+ */
+void
+ring_buffer_read_prepare_sync(void)
+{
synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
+
+/**
+ * ring_buffer_read_start - start a non consuming read of the buffer
+ * @iter: The iterator returned by ring_buffer_read_prepare
+ *
+ * This finalizes the startup of an iteration through the buffer.
+ * The iterator comes from a call to ring_buffer_read_prepare and
+ * an intervening ring_buffer_read_prepare_sync must have been
+ * performed.
+ *
+ * Must be paired with ring_buffer_finish.
+ */
+void
+ring_buffer_read_start(struct ring_buffer_iter *iter)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
+
+ if (!iter)
+ return;
+
+ cpu_buffer = iter->cpu_buffer;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
arch_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-
- return iter;
}
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index dc56556b55a2..302f8a614635 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -113,7 +113,8 @@ static enum event_status read_page(int cpu)
ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
if (ret >= 0) {
rpage = bpage;
- commit = local_read(&rpage->commit);
+ /* The commit may have missed event flags set, clear them */
+ commit = local_read(&rpage->commit) & 0xfffff;
for (i = 0; i < commit && !kill_test; i += inc) {
if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 60f3b6289731..756d7283318b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
*
* It is default off, but you can enable it with either specifying
* "ftrace_dump_on_oops" in the kernel command line, or setting
- * /proc/sys/kernel/ftrace_dump_on_oops to true.
+ * /proc/sys/kernel/ftrace_dump_on_oops
+ * Set 1 if you want to dump buffers of all CPUs
+ * Set 2 if you want to dump the buffer of the CPU that triggered oops
*/
-int ftrace_dump_on_oops;
+
+enum ftrace_dump_mode ftrace_dump_on_oops;
static int tracing_set_tracer(const char *buf);
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
- ftrace_dump_on_oops = 1;
- return 1;
+ if (*str++ != '=' || !*str) {
+ ftrace_dump_on_oops = DUMP_ALL;
+ return 1;
+ }
+
+ if (!strcmp("orig_cpu", str)) {
+ ftrace_dump_on_oops = DUMP_ORIG;
+ return 1;
+ }
+
+ return 0;
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
@@ -1571,7 +1583,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
{
struct ring_buffer *buffer = iter->tr->buffer;
struct trace_entry *ent, *next = NULL;
- unsigned long lost_events, next_lost = 0;
+ unsigned long lost_events = 0, next_lost = 0;
int cpu_file = iter->cpu_file;
u64 next_ts = 0, ts;
int next_cpu = -1;
@@ -1796,7 +1808,7 @@ static void print_func_help_header(struct seq_file *m)
}
-static void
+void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -2005,7 +2017,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
}
-static int trace_empty(struct trace_iterator *iter)
+int trace_empty(struct trace_iterator *iter)
{
int cpu;
@@ -2072,6 +2084,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
return print_trace_fmt(iter);
}
+void trace_default_header(struct seq_file *m)
+{
+ struct trace_iterator *iter = m->private;
+
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+ /* print nothing if the buffers are empty */
+ if (trace_empty(iter))
+ return;
+ print_trace_header(m, iter);
+ if (!(trace_flags & TRACE_ITER_VERBOSE))
+ print_lat_help_header(m);
+ } else {
+ if (!(trace_flags & TRACE_ITER_VERBOSE))
+ print_func_help_header(m);
+ }
+}
+
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
@@ -2084,17 +2113,9 @@ static int s_show(struct seq_file *m, void *v)
}
if (iter->trace && iter->trace->print_header)
iter->trace->print_header(m);
- else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
- /* print nothing if the buffers are empty */
- if (trace_empty(iter))
- return 0;
- print_trace_header(m, iter);
- if (!(trace_flags & TRACE_ITER_VERBOSE))
- print_lat_help_header(m);
- } else {
- if (!(trace_flags & TRACE_ITER_VERBOSE))
- print_func_help_header(m);
- }
+ else
+ trace_default_header(m);
+
} else if (iter->leftover) {
/*
* If we filled the seq_file buffer earlier, we
@@ -2180,15 +2201,20 @@ __tracing_open(struct inode *inode, struct file *file)
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu) {
-
iter->buffer_iter[cpu] =
- ring_buffer_read_start(iter->tr->buffer, cpu);
+ ring_buffer_read_prepare(iter->tr->buffer, cpu);
+ }
+ ring_buffer_read_prepare_sync();
+ for_each_tracing_cpu(cpu) {
+ ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
}
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
- ring_buffer_read_start(iter->tr->buffer, cpu);
+ ring_buffer_read_prepare(iter->tr->buffer, cpu);
+ ring_buffer_read_prepare_sync();
+ ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
}
@@ -4338,7 +4364,7 @@ static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
{
if (ftrace_dump_on_oops)
- ftrace_dump();
+ ftrace_dump(ftrace_dump_on_oops);
return NOTIFY_OK;
}
@@ -4355,7 +4381,7 @@ static int trace_die_handler(struct notifier_block *self,
switch (val) {
case DIE_OOPS:
if (ftrace_dump_on_oops)
- ftrace_dump();
+ ftrace_dump(ftrace_dump_on_oops);
break;
default:
break;
@@ -4396,7 +4422,8 @@ trace_printk_seq(struct trace_seq *s)
trace_seq_init(s);
}
-static void __ftrace_dump(bool disable_tracing)
+static void
+__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
{
static arch_spinlock_t ftrace_dump_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -4429,12 +4456,25 @@ static void __ftrace_dump(bool disable_tracing)
/* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
- printk(KERN_TRACE "Dumping ftrace buffer:\n");
-
/* Simulate the iterator */
iter.tr = &global_trace;
iter.trace = current_trace;
- iter.cpu_file = TRACE_PIPE_ALL_CPU;
+
+ switch (oops_dump_mode) {
+ case DUMP_ALL:
+ iter.cpu_file = TRACE_PIPE_ALL_CPU;
+ break;
+ case DUMP_ORIG:
+ iter.cpu_file = raw_smp_processor_id();
+ break;
+ case DUMP_NONE:
+ goto out_enable;
+ default:
+ printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
+ iter.cpu_file = TRACE_PIPE_ALL_CPU;
+ }
+
+ printk(KERN_TRACE "Dumping ftrace buffer:\n");
/*
* We need to stop all tracing on all CPUS to read the
@@ -4473,6 +4513,7 @@ static void __ftrace_dump(bool disable_tracing)
else
printk(KERN_TRACE "---------------------------------\n");
+ out_enable:
/* Re-enable tracing if requested */
if (!disable_tracing) {
trace_flags |= old_userobj;
@@ -4489,9 +4530,9 @@ static void __ftrace_dump(bool disable_tracing)
}
/* By default: disable tracing after the dump */
-void ftrace_dump(void)
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
- __ftrace_dump(true);
+ __ftrace_dump(true, oops_dump_mode);
}
__init static int tracer_alloc_buffers(void)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2825ef2c0b15..d1ce0bec1b3f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -34,7 +34,6 @@ enum trace_type {
TRACE_GRAPH_RET,
TRACE_GRAPH_ENT,
TRACE_USER_STACK,
- TRACE_HW_BRANCHES,
TRACE_KMEM_ALLOC,
TRACE_KMEM_FREE,
TRACE_BLK,
@@ -103,29 +102,17 @@ struct syscall_trace_exit {
long ret;
};
-struct kprobe_trace_entry {
+struct kprobe_trace_entry_head {
struct trace_entry ent;
unsigned long ip;
- int nargs;
- unsigned long args[];
};
-#define SIZEOF_KPROBE_TRACE_ENTRY(n) \
- (offsetof(struct kprobe_trace_entry, args) + \
- (sizeof(unsigned long) * (n)))
-
-struct kretprobe_trace_entry {
+struct kretprobe_trace_entry_head {
struct trace_entry ent;
unsigned long func;
unsigned long ret_ip;
- int nargs;
- unsigned long args[];
};
-#define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \
- (offsetof(struct kretprobe_trace_entry, args) + \
- (sizeof(unsigned long) * (n)))
-
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
@@ -229,7 +216,6 @@ extern void __ftrace_bad_type(void);
TRACE_GRAPH_ENT); \
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
- IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
TRACE_KMEM_ALLOC); \
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
@@ -378,6 +364,9 @@ void trace_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
+void trace_default_header(struct seq_file *m);
+void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
+int trace_empty(struct trace_iterator *iter);
void trace_graph_return(struct ftrace_graph_ret *trace);
int trace_graph_entry(struct ftrace_graph_ent *trace);
@@ -467,8 +456,6 @@ extern int trace_selftest_startup_sysprof(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_branch(struct tracer *trace,
struct trace_array *tr);
-extern int trace_selftest_startup_hw_branches(struct tracer *trace,
- struct trace_array *tr);
extern int trace_selftest_startup_ksym(struct tracer *trace,
struct trace_array *tr);
#endif /* CONFIG_FTRACE_STARTUP_TEST */
@@ -491,9 +478,29 @@ extern int trace_clock_id;
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-extern enum print_line_t print_graph_function(struct trace_iterator *iter);
+
+/* Flag options */
+#define TRACE_GRAPH_PRINT_OVERRUN 0x1
+#define TRACE_GRAPH_PRINT_CPU 0x2
+#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
+#define TRACE_GRAPH_PRINT_PROC 0x8
+#define TRACE_GRAPH_PRINT_DURATION 0x10
+#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
+
+extern enum print_line_t
+print_graph_function_flags(struct trace_iterator *iter, u32 flags);
+extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
extern enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
+extern void graph_trace_open(struct trace_iterator *iter);
+extern void graph_trace_close(struct trace_iterator *iter);
+extern int __trace_graph_entry(struct trace_array *tr,
+ struct ftrace_graph_ent *trace,
+ unsigned long flags, int pc);
+extern void __trace_graph_return(struct trace_array *tr,
+ struct ftrace_graph_ret *trace,
+ unsigned long flags, int pc);
+
#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
@@ -524,7 +531,7 @@ static inline int ftrace_graph_addr(unsigned long addr)
#endif /* CONFIG_DYNAMIC_FTRACE */
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
static inline enum print_line_t
-print_graph_function(struct trace_iterator *iter)
+print_graph_function_flags(struct trace_iterator *iter, u32 flags)
{
return TRACE_TYPE_UNHANDLED;
}
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index c16a08f399df..dc008c1240da 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -318,18 +318,6 @@ FTRACE_ENTRY(branch, trace_branch,
__entry->func, __entry->file, __entry->correct)
);
-FTRACE_ENTRY(hw_branch, hw_branch_entry,
-
- TRACE_HW_BRANCHES,
-
- F_STRUCT(
- __field( u64, from )
- __field( u64, to )
- ),
-
- F_printk("from: %llx to: %llx", __entry->from, __entry->to)
-);
-
FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
TRACE_KMEM_ALLOC,
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 88c0b6dbd7fe..58092d844a1f 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1398,7 +1398,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
}
err = -EINVAL;
- if (!call)
+ if (&call->list == &ftrace_events)
goto out_unlock;
err = -EEXIST;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 669b9c31861d..dd11c830eb84 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -40,7 +40,7 @@ struct fgraph_data {
#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
#define TRACE_GRAPH_PRINT_PROC 0x8
#define TRACE_GRAPH_PRINT_DURATION 0x10
-#define TRACE_GRAPH_PRINT_ABS_TIME 0X20
+#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
static struct tracer_opt trace_opts[] = {
/* Display overruns? (for self-debug purpose) */
@@ -179,7 +179,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
return ret;
}
-static int __trace_graph_entry(struct trace_array *tr,
+int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned long flags,
int pc)
@@ -246,7 +246,7 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
return trace_graph_entry(trace);
}
-static void __trace_graph_return(struct trace_array *tr,
+void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
unsigned long flags,
int pc)
@@ -527,17 +527,18 @@ get_return_for_leaf(struct trace_iterator *iter,
/* Signal a overhead of time execution to the output */
static int
-print_graph_overhead(unsigned long long duration, struct trace_seq *s)
+print_graph_overhead(unsigned long long duration, struct trace_seq *s,
+ u32 flags)
{
/* If duration disappear, we don't need anything */
- if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
+ if (!(flags & TRACE_GRAPH_PRINT_DURATION))
return 1;
/* Non nested entry or return */
if (duration == -1)
return trace_seq_printf(s, " ");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+ if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
/* Duration exceeded 100 msecs */
if (duration > 100000ULL)
return trace_seq_printf(s, "! ");
@@ -563,7 +564,7 @@ static int print_graph_abs_time(u64 t, struct trace_seq *s)
static enum print_line_t
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
- enum trace_type type, int cpu, pid_t pid)
+ enum trace_type type, int cpu, pid_t pid, u32 flags)
{
int ret;
struct trace_seq *s = &iter->seq;
@@ -573,21 +574,21 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
return TRACE_TYPE_UNHANDLED;
/* Absolute time */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
+ if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
ret = print_graph_abs_time(iter->ts, s);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Cpu */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+ if (flags & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Proc */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+ if (flags & TRACE_GRAPH_PRINT_PROC) {
ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
@@ -597,7 +598,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
}
/* No overhead */
- ret = print_graph_overhead(-1, s);
+ ret = print_graph_overhead(-1, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -610,7 +611,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
return TRACE_TYPE_PARTIAL_LINE;
/* Don't close the duration column if haven't one */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+ if (flags & TRACE_GRAPH_PRINT_DURATION)
trace_seq_printf(s, " |");
ret = trace_seq_printf(s, "\n");
@@ -680,7 +681,8 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
static enum print_line_t
print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *entry,
- struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
+ struct ftrace_graph_ret_entry *ret_entry,
+ struct trace_seq *s, u32 flags)
{
struct fgraph_data *data = iter->private;
struct ftrace_graph_ret *graph_ret;
@@ -712,12 +714,12 @@ print_graph_entry_leaf(struct trace_iterator *iter,
}
/* Overhead */
- ret = print_graph_overhead(duration, s);
+ ret = print_graph_overhead(duration, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Duration */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+ if (flags & TRACE_GRAPH_PRINT_DURATION) {
ret = print_graph_duration(duration, s);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
@@ -740,7 +742,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
static enum print_line_t
print_graph_entry_nested(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *entry,
- struct trace_seq *s, int cpu)
+ struct trace_seq *s, int cpu, u32 flags)
{
struct ftrace_graph_ent *call = &entry->graph_ent;
struct fgraph_data *data = iter->private;
@@ -760,12 +762,12 @@ print_graph_entry_nested(struct trace_iterator *iter,
}
/* No overhead */
- ret = print_graph_overhead(-1, s);
+ ret = print_graph_overhead(-1, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* No time */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+ if (flags & TRACE_GRAPH_PRINT_DURATION) {
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -791,7 +793,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
static enum print_line_t
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
- int type, unsigned long addr)
+ int type, unsigned long addr, u32 flags)
{
struct fgraph_data *data = iter->private;
struct trace_entry *ent = iter->ent;
@@ -804,27 +806,27 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
if (type) {
/* Interrupt */
- ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
+ ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Absolute time */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
+ if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
ret = print_graph_abs_time(iter->ts, s);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Cpu */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+ if (flags & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Proc */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+ if (flags & TRACE_GRAPH_PRINT_PROC) {
ret = print_graph_proc(s, ent->pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
@@ -846,7 +848,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
- struct trace_iterator *iter)
+ struct trace_iterator *iter, u32 flags)
{
struct fgraph_data *data = iter->private;
struct ftrace_graph_ent *call = &field->graph_ent;
@@ -854,14 +856,14 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
static enum print_line_t ret;
int cpu = iter->cpu;
- if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
+ if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
return TRACE_TYPE_PARTIAL_LINE;
leaf_ret = get_return_for_leaf(iter, field);
if (leaf_ret)
- ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
+ ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
else
- ret = print_graph_entry_nested(iter, field, s, cpu);
+ ret = print_graph_entry_nested(iter, field, s, cpu, flags);
if (data) {
/*
@@ -880,7 +882,8 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
- struct trace_entry *ent, struct trace_iterator *iter)
+ struct trace_entry *ent, struct trace_iterator *iter,
+ u32 flags)
{
unsigned long long duration = trace->rettime - trace->calltime;
struct fgraph_data *data = iter->private;
@@ -910,16 +913,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
}
}
- if (print_graph_prologue(iter, s, 0, 0))
+ if (print_graph_prologue(iter, s, 0, 0, flags))
return TRACE_TYPE_PARTIAL_LINE;
/* Overhead */
- ret = print_graph_overhead(duration, s);
+ ret = print_graph_overhead(duration, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Duration */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+ if (flags & TRACE_GRAPH_PRINT_DURATION) {
ret = print_graph_duration(duration, s);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
@@ -949,14 +952,15 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
}
/* Overrun */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
+ if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
ret = trace_seq_printf(s, " (Overruns: %lu)\n",
trace->overrun);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
- ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
+ ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
+ cpu, pid, flags);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
@@ -964,8 +968,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
}
static enum print_line_t
-print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
- struct trace_iterator *iter)
+print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
+ struct trace_iterator *iter, u32 flags)
{
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct fgraph_data *data = iter->private;
@@ -977,16 +981,16 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
if (data)
depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
- if (print_graph_prologue(iter, s, 0, 0))
+ if (print_graph_prologue(iter, s, 0, 0, flags))
return TRACE_TYPE_PARTIAL_LINE;
/* No overhead */
- ret = print_graph_overhead(-1, s);
+ ret = print_graph_overhead(-1, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* No time */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+ if (flags & TRACE_GRAPH_PRINT_DURATION) {
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -1041,7 +1045,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
enum print_line_t
-print_graph_function(struct trace_iterator *iter)
+print_graph_function_flags(struct trace_iterator *iter, u32 flags)
{
struct ftrace_graph_ent_entry *field;
struct fgraph_data *data = iter->private;
@@ -1062,7 +1066,7 @@ print_graph_function(struct trace_iterator *iter)
if (data && data->failed) {
field = &data->ent;
iter->cpu = data->cpu;
- ret = print_graph_entry(field, s, iter);
+ ret = print_graph_entry(field, s, iter, flags);
if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
ret = TRACE_TYPE_NO_CONSUME;
@@ -1082,32 +1086,49 @@ print_graph_function(struct trace_iterator *iter)
struct ftrace_graph_ent_entry saved;
trace_assign_type(field, entry);
saved = *field;
- return print_graph_entry(&saved, s, iter);
+ return print_graph_entry(&saved, s, iter, flags);
}
case TRACE_GRAPH_RET: {
struct ftrace_graph_ret_entry *field;
trace_assign_type(field, entry);
- return print_graph_return(&field->ret, s, entry, iter);
+ return print_graph_return(&field->ret, s, entry, iter, flags);
}
+ case TRACE_STACK:
+ case TRACE_FN:
+ /* dont trace stack and functions as comments */
+ return TRACE_TYPE_UNHANDLED;
+
default:
- return print_graph_comment(s, entry, iter);
+ return print_graph_comment(s, entry, iter, flags);
}
return TRACE_TYPE_HANDLED;
}
-static void print_lat_header(struct seq_file *s)
+static enum print_line_t
+print_graph_function(struct trace_iterator *iter)
+{
+ return print_graph_function_flags(iter, tracer_flags.val);
+}
+
+static enum print_line_t
+print_graph_function_event(struct trace_iterator *iter, int flags)
+{
+ return print_graph_function(iter);
+}
+
+static void print_lat_header(struct seq_file *s, u32 flags)
{
static const char spaces[] = " " /* 16 spaces */
" " /* 4 spaces */
" "; /* 17 spaces */
int size = 0;
- if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
+ if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
size += 16;
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
+ if (flags & TRACE_GRAPH_PRINT_CPU)
size += 4;
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
+ if (flags & TRACE_GRAPH_PRINT_PROC)
size += 17;
seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
@@ -1118,43 +1139,48 @@ static void print_lat_header(struct seq_file *s)
seq_printf(s, "#%.*s|||| / \n", size, spaces);
}
-static void print_graph_headers(struct seq_file *s)
+void print_graph_headers_flags(struct seq_file *s, u32 flags)
{
int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
if (lat)
- print_lat_header(s);
+ print_lat_header(s, flags);
/* 1st line */
seq_printf(s, "#");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
+ if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
seq_printf(s, " TIME ");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
+ if (flags & TRACE_GRAPH_PRINT_CPU)
seq_printf(s, " CPU");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
+ if (flags & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, " TASK/PID ");
if (lat)
seq_printf(s, "|||||");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+ if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_printf(s, " DURATION ");
seq_printf(s, " FUNCTION CALLS\n");
/* 2nd line */
seq_printf(s, "#");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
+ if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
seq_printf(s, " | ");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
+ if (flags & TRACE_GRAPH_PRINT_CPU)
seq_printf(s, " | ");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
+ if (flags & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, " | | ");
if (lat)
seq_printf(s, "|||||");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+ if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_printf(s, " | | ");
seq_printf(s, " | | | |\n");
}
-static void graph_trace_open(struct trace_iterator *iter)
+void print_graph_headers(struct seq_file *s)
+{
+ print_graph_headers_flags(s, tracer_flags.val);
+}
+
+void graph_trace_open(struct trace_iterator *iter)
{
/* pid and depth on the last trace processed */
struct fgraph_data *data;
@@ -1189,7 +1215,7 @@ static void graph_trace_open(struct trace_iterator *iter)
pr_warning("function graph tracer: not enough memory\n");
}
-static void graph_trace_close(struct trace_iterator *iter)
+void graph_trace_close(struct trace_iterator *iter)
{
struct fgraph_data *data = iter->private;
@@ -1199,6 +1225,16 @@ static void graph_trace_close(struct trace_iterator *iter)
}
}
+static struct trace_event graph_trace_entry_event = {
+ .type = TRACE_GRAPH_ENT,
+ .trace = print_graph_function_event,
+};
+
+static struct trace_event graph_trace_ret_event = {
+ .type = TRACE_GRAPH_RET,
+ .trace = print_graph_function_event,
+};
+
static struct tracer graph_trace __read_mostly = {
.name = "function_graph",
.open = graph_trace_open,
@@ -1220,6 +1256,16 @@ static __init int init_graph_trace(void)
{
max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
+ if (!register_ftrace_event(&graph_trace_entry_event)) {
+ pr_warning("Warning: could not register graph trace events\n");
+ return 1;
+ }
+
+ if (!register_ftrace_event(&graph_trace_ret_event)) {
+ pr_warning("Warning: could not register graph trace events\n");
+ return 1;
+ }
+
return register_tracer(&graph_trace);
}
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
deleted file mode 100644
index 7b97000745f5..000000000000
--- a/kernel/trace/trace_hw_branches.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * h/w branch tracer for x86 based on BTS
- *
- * Copyright (C) 2008-2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
- */
-#include <linux/kallsyms.h>
-#include <linux/debugfs.h>
-#include <linux/ftrace.h>
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#include <linux/fs.h>
-
-#include <asm/ds.h>
-
-#include "trace_output.h"
-#include "trace.h"
-
-
-#define BTS_BUFFER_SIZE (1 << 13)
-
-static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
-static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
-
-#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
-
-static int trace_hw_branches_enabled __read_mostly;
-static int trace_hw_branches_suspended __read_mostly;
-static struct trace_array *hw_branch_trace __read_mostly;
-
-
-static void bts_trace_init_cpu(int cpu)
-{
- per_cpu(hwb_tracer, cpu) =
- ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
- BTS_BUFFER_SIZE, NULL, (size_t)-1,
- BTS_KERNEL);
-
- if (IS_ERR(per_cpu(hwb_tracer, cpu)))
- per_cpu(hwb_tracer, cpu) = NULL;
-}
-
-static int bts_trace_init(struct trace_array *tr)
-{
- int cpu;
-
- hw_branch_trace = tr;
- trace_hw_branches_enabled = 0;
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- bts_trace_init_cpu(cpu);
-
- if (likely(per_cpu(hwb_tracer, cpu)))
- trace_hw_branches_enabled = 1;
- }
- trace_hw_branches_suspended = 0;
- put_online_cpus();
-
- /* If we could not enable tracing on a single cpu, we fail. */
- return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP;
-}
-
-static void bts_trace_reset(struct trace_array *tr)
-{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (likely(per_cpu(hwb_tracer, cpu))) {
- ds_release_bts(per_cpu(hwb_tracer, cpu));
- per_cpu(hwb_tracer, cpu) = NULL;
- }
- }
- trace_hw_branches_enabled = 0;
- trace_hw_branches_suspended = 0;
- put_online_cpus();
-}
-
-static void bts_trace_start(struct trace_array *tr)
-{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- if (likely(per_cpu(hwb_tracer, cpu)))
- ds_resume_bts(per_cpu(hwb_tracer, cpu));
- trace_hw_branches_suspended = 0;
- put_online_cpus();
-}
-
-static void bts_trace_stop(struct trace_array *tr)
-{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- if (likely(per_cpu(hwb_tracer, cpu)))
- ds_suspend_bts(per_cpu(hwb_tracer, cpu));
- trace_hw_branches_suspended = 1;
- put_online_cpus();
-}
-
-static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- /* The notification is sent with interrupts enabled. */
- if (trace_hw_branches_enabled) {
- bts_trace_init_cpu(cpu);
-
- if (trace_hw_branches_suspended &&
- likely(per_cpu(hwb_tracer, cpu)))
- ds_suspend_bts(per_cpu(hwb_tracer, cpu));
- }
- break;
-
- case CPU_DOWN_PREPARE:
- /* The notification is sent with interrupts enabled. */
- if (likely(per_cpu(hwb_tracer, cpu))) {
- ds_release_bts(per_cpu(hwb_tracer, cpu));
- per_cpu(hwb_tracer, cpu) = NULL;
- }
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
- .notifier_call = bts_hotcpu_handler
-};
-
-static void bts_trace_print_header(struct seq_file *m)
-{
- seq_puts(m, "# CPU# TO <- FROM\n");
-}
-
-static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
-{
- unsigned long symflags = TRACE_ITER_SYM_OFFSET;
- struct trace_entry *entry = iter->ent;
- struct trace_seq *seq = &iter->seq;
- struct hw_branch_entry *it;
-
- trace_assign_type(it, entry);
-
- if (entry->type == TRACE_HW_BRANCHES) {
- if (trace_seq_printf(seq, "%4d ", iter->cpu) &&
- seq_print_ip_sym(seq, it->to, symflags) &&
- trace_seq_printf(seq, "\t <- ") &&
- seq_print_ip_sym(seq, it->from, symflags) &&
- trace_seq_printf(seq, "\n"))
- return TRACE_TYPE_HANDLED;
- return TRACE_TYPE_PARTIAL_LINE;
- }
- return TRACE_TYPE_UNHANDLED;
-}
-
-void trace_hw_branch(u64 from, u64 to)
-{
- struct ftrace_event_call *call = &event_hw_branch;
- struct trace_array *tr = hw_branch_trace;
- struct ring_buffer_event *event;
- struct ring_buffer *buf;
- struct hw_branch_entry *entry;
- unsigned long irq1;
- int cpu;
-
- if (unlikely(!tr))
- return;
-
- if (unlikely(!trace_hw_branches_enabled))
- return;
-
- local_irq_save(irq1);
- cpu = raw_smp_processor_id();
- if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
- goto out;
-
- buf = tr->buffer;
- event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
- sizeof(*entry), 0, 0);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, from);
- entry->ent.type = TRACE_HW_BRANCHES;
- entry->from = from;
- entry->to = to;
- if (!filter_check_discard(call, entry, buf, event))
- trace_buffer_unlock_commit(buf, event, 0, 0);
-
- out:
- atomic_dec(&tr->data[cpu]->disabled);
- local_irq_restore(irq1);
-}
-
-static void trace_bts_at(const struct bts_trace *trace, void *at)
-{
- struct bts_struct bts;
- int err = 0;
-
- WARN_ON_ONCE(!trace->read);
- if (!trace->read)
- return;
-
- err = trace->read(this_tracer, at, &bts);
- if (err < 0)
- return;
-
- switch (bts.qualifier) {
- case BTS_BRANCH:
- trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
- break;
- }
-}
-
-/*
- * Collect the trace on the current cpu and write it into the ftrace buffer.
- *
- * pre: tracing must be suspended on the current cpu
- */
-static void trace_bts_cpu(void *arg)
-{
- struct trace_array *tr = (struct trace_array *)arg;
- const struct bts_trace *trace;
- unsigned char *at;
-
- if (unlikely(!tr))
- return;
-
- if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
- return;
-
- if (unlikely(!this_tracer))
- return;
-
- trace = ds_read_bts(this_tracer);
- if (!trace)
- return;
-
- for (at = trace->ds.top; (void *)at < trace->ds.end;
- at += trace->ds.size)
- trace_bts_at(trace, at);
-
- for (at = trace->ds.begin; (void *)at < trace->ds.top;
- at += trace->ds.size)
- trace_bts_at(trace, at);
-}
-
-static void trace_bts_prepare(struct trace_iterator *iter)
-{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- if (likely(per_cpu(hwb_tracer, cpu)))
- ds_suspend_bts(per_cpu(hwb_tracer, cpu));
- /*
- * We need to collect the trace on the respective cpu since ftrace
- * implicitly adds the record for the current cpu.
- * Once that is more flexible, we could collect the data from any cpu.
- */
- on_each_cpu(trace_bts_cpu, iter->tr, 1);
-
- for_each_online_cpu(cpu)
- if (likely(per_cpu(hwb_tracer, cpu)))
- ds_resume_bts(per_cpu(hwb_tracer, cpu));
- put_online_cpus();
-}
-
-static void trace_bts_close(struct trace_iterator *iter)
-{
- tracing_reset_online_cpus(iter->tr);
-}
-
-void trace_hw_branch_oops(void)
-{
- if (this_tracer) {
- ds_suspend_bts_noirq(this_tracer);
- trace_bts_cpu(hw_branch_trace);
- ds_resume_bts_noirq(this_tracer);
- }
-}
-
-struct tracer bts_tracer __read_mostly =
-{
- .name = "hw-branch-tracer",
- .init = bts_trace_init,
- .reset = bts_trace_reset,
- .print_header = bts_trace_print_header,
- .print_line = bts_trace_print_line,
- .start = bts_trace_start,
- .stop = bts_trace_stop,
- .open = trace_bts_prepare,
- .close = trace_bts_close,
-#ifdef CONFIG_FTRACE_SELFTEST
- .selftest = trace_selftest_startup_hw_branches,
-#endif /* CONFIG_FTRACE_SELFTEST */
-};
-
-__init static int init_bts_trace(void)
-{
- register_hotcpu_notifier(&bts_hotcpu_notifier);
- return register_tracer(&bts_tracer);
-}
-device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2974bc7538c7..6fd486e0cef4 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -34,6 +34,9 @@ static int trace_type __read_mostly;
static int save_lat_flag;
+static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
+static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+
#ifdef CONFIG_PREEMPT_TRACER
static inline int
preempt_trace(void)
@@ -55,6 +58,23 @@ irq_trace(void)
# define irq_trace() (0)
#endif
+#define TRACE_DISPLAY_GRAPH 1
+
+static struct tracer_opt trace_opts[] = {
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* display latency trace as call graph */
+ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+#endif
+ { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+ .val = 0,
+ .opts = trace_opts,
+};
+
+#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
+
/*
* Sequence count - we record it when starting a measurement and
* skip the latency if the sequence has changed - some other section
@@ -108,6 +128,202 @@ static struct ftrace_ops trace_ops __read_mostly =
};
#endif /* CONFIG_FUNCTION_TRACER */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+{
+ int cpu;
+
+ if (!(bit & TRACE_DISPLAY_GRAPH))
+ return -EINVAL;
+
+ if (!(is_graph() ^ set))
+ return 0;
+
+ stop_irqsoff_tracer(irqsoff_trace, !set);
+
+ for_each_possible_cpu(cpu)
+ per_cpu(tracing_cpu, cpu) = 0;
+
+ tracing_max_latency = 0;
+ tracing_reset_online_cpus(irqsoff_trace);
+
+ return start_irqsoff_tracer(irqsoff_trace, set);
+}
+
+static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
+{
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int ret;
+ int cpu;
+ int pc;
+
+ cpu = raw_smp_processor_id();
+ if (likely(!per_cpu(tracing_cpu, cpu)))
+ return 0;
+
+ local_save_flags(flags);
+ /* slight chance to get a false positive on tracing_cpu */
+ if (!irqs_disabled_flags(flags))
+ return 0;
+
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ ret = __trace_graph_entry(tr, trace, flags, pc);
+ } else
+ ret = 0;
+
+ atomic_dec(&data->disabled);
+ return ret;
+}
+
+static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
+{
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ cpu = raw_smp_processor_id();
+ if (likely(!per_cpu(tracing_cpu, cpu)))
+ return;
+
+ local_save_flags(flags);
+ /* slight chance to get a false positive on tracing_cpu */
+ if (!irqs_disabled_flags(flags))
+ return;
+
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ __trace_graph_return(tr, trace, flags, pc);
+ }
+
+ atomic_dec(&data->disabled);
+}
+
+static void irqsoff_trace_open(struct trace_iterator *iter)
+{
+ if (is_graph())
+ graph_trace_open(iter);
+
+}
+
+static void irqsoff_trace_close(struct trace_iterator *iter)
+{
+ if (iter->private)
+ graph_trace_close(iter);
+}
+
+#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
+ TRACE_GRAPH_PRINT_PROC)
+
+static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
+{
+ u32 flags = GRAPH_TRACER_FLAGS;
+
+ if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ flags |= TRACE_GRAPH_PRINT_DURATION;
+ else
+ flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+
+ /*
+ * In graph mode call the graph tracer output function,
+ * otherwise go with the TRACE_FN event handler
+ */
+ if (is_graph())
+ return print_graph_function_flags(iter, flags);
+
+ return TRACE_TYPE_UNHANDLED;
+}
+
+static void irqsoff_print_header(struct seq_file *s)
+{
+ if (is_graph()) {
+ struct trace_iterator *iter = s->private;
+ u32 flags = GRAPH_TRACER_FLAGS;
+
+ if (trace_flags & TRACE_ITER_LATENCY_FMT) {
+ /* print nothing if the buffers are empty */
+ if (trace_empty(iter))
+ return;
+
+ print_trace_header(s, iter);
+ flags |= TRACE_GRAPH_PRINT_DURATION;
+ } else
+ flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+
+ print_graph_headers_flags(s, flags);
+ } else
+ trace_default_header(s);
+}
+
+static void
+trace_graph_function(struct trace_array *tr,
+ unsigned long ip, unsigned long flags, int pc)
+{
+ u64 time = trace_clock_local();
+ struct ftrace_graph_ent ent = {
+ .func = ip,
+ .depth = 0,
+ };
+ struct ftrace_graph_ret ret = {
+ .func = ip,
+ .depth = 0,
+ .calltime = time,
+ .rettime = time,
+ };
+
+ __trace_graph_entry(tr, &ent, flags, pc);
+ __trace_graph_return(tr, &ret, flags, pc);
+}
+
+static void
+__trace_function(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip,
+ unsigned long flags, int pc)
+{
+ if (!is_graph())
+ trace_function(tr, ip, parent_ip, flags, pc);
+ else {
+ trace_graph_function(tr, parent_ip, flags, pc);
+ trace_graph_function(tr, ip, flags, pc);
+ }
+}
+
+#else
+#define __trace_function trace_function
+
+static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+{
+ return -EINVAL;
+}
+
+static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
+{
+ return -1;
+}
+
+static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
+{
+ return TRACE_TYPE_UNHANDLED;
+}
+
+static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
+static void irqsoff_print_header(struct seq_file *s) { }
+static void irqsoff_trace_open(struct trace_iterator *iter) { }
+static void irqsoff_trace_close(struct trace_iterator *iter) { }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/*
* Should this new latency be reported/recorded?
*/
@@ -150,7 +366,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out_unlock;
- trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+ __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
/* Skip 5 functions to get to the irq/preempt enable function */
__trace_stack(tr, flags, 5, pc);
@@ -172,7 +388,7 @@ out_unlock:
out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
- trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+ __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
}
static inline void
@@ -204,7 +420,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
local_save_flags(flags);
- trace_function(tr, ip, parent_ip, flags, preempt_count());
+ __trace_function(tr, ip, parent_ip, flags, preempt_count());
per_cpu(tracing_cpu, cpu) = 1;
@@ -238,7 +454,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
atomic_inc(&data->disabled);
local_save_flags(flags);
- trace_function(tr, ip, parent_ip, flags, preempt_count());
+ __trace_function(tr, ip, parent_ip, flags, preempt_count());
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
@@ -347,19 +563,32 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
}
#endif /* CONFIG_PREEMPT_TRACER */
-static void start_irqsoff_tracer(struct trace_array *tr)
+static int start_irqsoff_tracer(struct trace_array *tr, int graph)
{
- register_ftrace_function(&trace_ops);
- if (tracing_is_enabled())
+ int ret = 0;
+
+ if (!graph)
+ ret = register_ftrace_function(&trace_ops);
+ else
+ ret = register_ftrace_graph(&irqsoff_graph_return,
+ &irqsoff_graph_entry);
+
+ if (!ret && tracing_is_enabled())
tracer_enabled = 1;
else
tracer_enabled = 0;
+
+ return ret;
}
-static void stop_irqsoff_tracer(struct trace_array *tr)
+static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
{
tracer_enabled = 0;
- unregister_ftrace_function(&trace_ops);
+
+ if (!graph)
+ unregister_ftrace_function(&trace_ops);
+ else
+ unregister_ftrace_graph();
}
static void __irqsoff_tracer_init(struct trace_array *tr)
@@ -372,12 +601,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
/* make sure that the tracer is visible */
smp_wmb();
tracing_reset_online_cpus(tr);
- start_irqsoff_tracer(tr);
+
+ if (start_irqsoff_tracer(tr, is_graph()))
+ printk(KERN_ERR "failed to start irqsoff tracer\n");
}
static void irqsoff_tracer_reset(struct trace_array *tr)
{
- stop_irqsoff_tracer(tr);
+ stop_irqsoff_tracer(tr, is_graph());
if (!save_lat_flag)
trace_flags &= ~TRACE_ITER_LATENCY_FMT;
@@ -409,9 +640,15 @@ static struct tracer irqsoff_tracer __read_mostly =
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
+ .print_header = irqsoff_print_header,
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff,
#endif
+ .open = irqsoff_trace_open,
+ .close = irqsoff_trace_close,
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
@@ -435,9 +672,15 @@ static struct tracer preemptoff_tracer __read_mostly =
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
+ .print_header = irqsoff_print_header,
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff,
#endif
+ .open = irqsoff_trace_open,
+ .close = irqsoff_trace_close,
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
@@ -463,9 +706,15 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
+ .print_header = irqsoff_print_header,
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff,
#endif
+ .open = irqsoff_trace_open,
+ .close = irqsoff_trace_close,
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1251e367bae9..a7514326052b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -29,6 +29,8 @@
#include <linux/ctype.h>
#include <linux/ptrace.h>
#include <linux/perf_event.h>
+#include <linux/stringify.h>
+#include <asm/bitsperlong.h>
#include "trace.h"
#include "trace_output.h"
@@ -40,7 +42,6 @@
/* Reserved field names */
#define FIELD_STRING_IP "__probe_ip"
-#define FIELD_STRING_NARGS "__probe_nargs"
#define FIELD_STRING_RETIP "__probe_ret_ip"
#define FIELD_STRING_FUNC "__probe_func"
@@ -52,56 +53,102 @@ const char *reserved_field_names[] = {
"common_tgid",
"common_lock_depth",
FIELD_STRING_IP,
- FIELD_STRING_NARGS,
FIELD_STRING_RETIP,
FIELD_STRING_FUNC,
};
-struct fetch_func {
- unsigned long (*func)(struct pt_regs *, void *);
+/* Printing function type */
+typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *);
+#define PRINT_TYPE_FUNC_NAME(type) print_type_##type
+#define PRINT_TYPE_FMT_NAME(type) print_type_format_##type
+
+/* Printing in basic type function template */
+#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \
+static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \
+ const char *name, void *data)\
+{ \
+ return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\
+} \
+static const char PRINT_TYPE_FMT_NAME(type)[] = fmt;
+
+DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int)
+DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int)
+DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long)
+DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long)
+DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int)
+DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int)
+DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long)
+DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long)
+
+/* Data fetch function type */
+typedef void (*fetch_func_t)(struct pt_regs *, void *, void *);
+
+struct fetch_param {
+ fetch_func_t fn;
void *data;
};
-static __kprobes unsigned long call_fetch(struct fetch_func *f,
- struct pt_regs *regs)
+static __kprobes void call_fetch(struct fetch_param *fprm,
+ struct pt_regs *regs, void *dest)
{
- return f->func(regs, f->data);
+ return fprm->fn(regs, fprm->data, dest);
}
-/* fetch handlers */
-static __kprobes unsigned long fetch_register(struct pt_regs *regs,
- void *offset)
-{
- return regs_get_register(regs, (unsigned int)((unsigned long)offset));
+#define FETCH_FUNC_NAME(kind, type) fetch_##kind##_##type
+/*
+ * Define macro for basic types - we don't need to define s* types, because
+ * we have to care only about bitwidth at recording time.
+ */
+#define DEFINE_BASIC_FETCH_FUNCS(kind) \
+DEFINE_FETCH_##kind(u8) \
+DEFINE_FETCH_##kind(u16) \
+DEFINE_FETCH_##kind(u32) \
+DEFINE_FETCH_##kind(u64)
+
+#define CHECK_BASIC_FETCH_FUNCS(kind, fn) \
+ ((FETCH_FUNC_NAME(kind, u8) == fn) || \
+ (FETCH_FUNC_NAME(kind, u16) == fn) || \
+ (FETCH_FUNC_NAME(kind, u32) == fn) || \
+ (FETCH_FUNC_NAME(kind, u64) == fn))
+
+/* Data fetch function templates */
+#define DEFINE_FETCH_reg(type) \
+static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \
+ void *offset, void *dest) \
+{ \
+ *(type *)dest = (type)regs_get_register(regs, \
+ (unsigned int)((unsigned long)offset)); \
}
-
-static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
- void *num)
-{
- return regs_get_kernel_stack_nth(regs,
- (unsigned int)((unsigned long)num));
+DEFINE_BASIC_FETCH_FUNCS(reg)
+
+#define DEFINE_FETCH_stack(type) \
+static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
+ void *offset, void *dest) \
+{ \
+ *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
+ (unsigned int)((unsigned long)offset)); \
}
+DEFINE_BASIC_FETCH_FUNCS(stack)
-static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
-{
- unsigned long retval;
-
- if (probe_kernel_address(addr, retval))
- return 0;
- return retval;
+#define DEFINE_FETCH_retval(type) \
+static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\
+ void *dummy, void *dest) \
+{ \
+ *(type *)dest = (type)regs_return_value(regs); \
}
-
-static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
- void *dummy)
-{
- return regs_return_value(regs);
-}
-
-static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
- void *dummy)
-{
- return kernel_stack_pointer(regs);
+DEFINE_BASIC_FETCH_FUNCS(retval)
+
+#define DEFINE_FETCH_memory(type) \
+static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
+ void *addr, void *dest) \
+{ \
+ type retval; \
+ if (probe_kernel_address(addr, retval)) \
+ *(type *)dest = 0; \
+ else \
+ *(type *)dest = retval; \
}
+DEFINE_BASIC_FETCH_FUNCS(memory)
/* Memory fetching by symbol */
struct symbol_cache {
@@ -145,51 +192,126 @@ static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
return sc;
}
-static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
-{
- struct symbol_cache *sc = data;
-
- if (sc->addr)
- return fetch_memory(regs, (void *)sc->addr);
- else
- return 0;
+#define DEFINE_FETCH_symbol(type) \
+static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\
+ void *data, void *dest) \
+{ \
+ struct symbol_cache *sc = data; \
+ if (sc->addr) \
+ fetch_memory_##type(regs, (void *)sc->addr, dest); \
+ else \
+ *(type *)dest = 0; \
}
+DEFINE_BASIC_FETCH_FUNCS(symbol)
-/* Special indirect memory access interface */
-struct indirect_fetch_data {
- struct fetch_func orig;
+/* Dereference memory access function */
+struct deref_fetch_param {
+ struct fetch_param orig;
long offset;
};
-static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
-{
- struct indirect_fetch_data *ind = data;
- unsigned long addr;
-
- addr = call_fetch(&ind->orig, regs);
- if (addr) {
- addr += ind->offset;
- return fetch_memory(regs, (void *)addr);
- } else
- return 0;
+#define DEFINE_FETCH_deref(type) \
+static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\
+ void *data, void *dest) \
+{ \
+ struct deref_fetch_param *dprm = data; \
+ unsigned long addr; \
+ call_fetch(&dprm->orig, regs, &addr); \
+ if (addr) { \
+ addr += dprm->offset; \
+ fetch_memory_##type(regs, (void *)addr, dest); \
+ } else \
+ *(type *)dest = 0; \
}
+DEFINE_BASIC_FETCH_FUNCS(deref)
-static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
+static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
{
- if (data->orig.func == fetch_indirect)
- free_indirect_fetch_data(data->orig.data);
- else if (data->orig.func == fetch_symbol)
+ if (CHECK_BASIC_FETCH_FUNCS(deref, data->orig.fn))
+ free_deref_fetch_param(data->orig.data);
+ else if (CHECK_BASIC_FETCH_FUNCS(symbol, data->orig.fn))
free_symbol_cache(data->orig.data);
kfree(data);
}
+/* Default (unsigned long) fetch type */
+#define __DEFAULT_FETCH_TYPE(t) u##t
+#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
+#define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG)
+#define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE)
+
+#define ASSIGN_FETCH_FUNC(kind, type) \
+ .kind = FETCH_FUNC_NAME(kind, type)
+
+#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \
+ {.name = #ptype, \
+ .size = sizeof(ftype), \
+ .is_signed = sign, \
+ .print = PRINT_TYPE_FUNC_NAME(ptype), \
+ .fmt = PRINT_TYPE_FMT_NAME(ptype), \
+ASSIGN_FETCH_FUNC(reg, ftype), \
+ASSIGN_FETCH_FUNC(stack, ftype), \
+ASSIGN_FETCH_FUNC(retval, ftype), \
+ASSIGN_FETCH_FUNC(memory, ftype), \
+ASSIGN_FETCH_FUNC(symbol, ftype), \
+ASSIGN_FETCH_FUNC(deref, ftype), \
+ }
+
+/* Fetch type information table */
+static const struct fetch_type {
+ const char *name; /* Name of type */
+ size_t size; /* Byte size of type */
+ int is_signed; /* Signed flag */
+ print_type_func_t print; /* Print functions */
+ const char *fmt; /* Fromat string */
+ /* Fetch functions */
+ fetch_func_t reg;
+ fetch_func_t stack;
+ fetch_func_t retval;
+ fetch_func_t memory;
+ fetch_func_t symbol;
+ fetch_func_t deref;
+} fetch_type_table[] = {
+ ASSIGN_FETCH_TYPE(u8, u8, 0),
+ ASSIGN_FETCH_TYPE(u16, u16, 0),
+ ASSIGN_FETCH_TYPE(u32, u32, 0),
+ ASSIGN_FETCH_TYPE(u64, u64, 0),
+ ASSIGN_FETCH_TYPE(s8, u8, 1),
+ ASSIGN_FETCH_TYPE(s16, u16, 1),
+ ASSIGN_FETCH_TYPE(s32, u32, 1),
+ ASSIGN_FETCH_TYPE(s64, u64, 1),
+};
+
+static const struct fetch_type *find_fetch_type(const char *type)
+{
+ int i;
+
+ if (!type)
+ type = DEFAULT_FETCH_TYPE_STR;
+
+ for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++)
+ if (strcmp(type, fetch_type_table[i].name) == 0)
+ return &fetch_type_table[i];
+ return NULL;
+}
+
+/* Special function : only accept unsigned long */
+static __kprobes void fetch_stack_address(struct pt_regs *regs,
+ void *dummy, void *dest)
+{
+ *(unsigned long *)dest = kernel_stack_pointer(regs);
+}
+
/**
* Kprobe event core functions
*/
struct probe_arg {
- struct fetch_func fetch;
- const char *name;
+ struct fetch_param fetch;
+ unsigned int offset; /* Offset from argument entry */
+ const char *name; /* Name of this argument */
+ const char *comm; /* Command of this argument */
+ const struct fetch_type *type; /* Type of this argument */
};
/* Flags for trace_probe */
@@ -204,6 +326,7 @@ struct trace_probe {
const char *symbol; /* symbol name */
struct ftrace_event_call call;
struct trace_event event;
+ ssize_t size; /* trace entry size */
unsigned int nr_args;
struct probe_arg args[];
};
@@ -212,6 +335,7 @@ struct trace_probe {
(offsetof(struct trace_probe, args) + \
(sizeof(struct probe_arg) * (n)))
+
static __kprobes int probe_is_return(struct trace_probe *tp)
{
return tp->rp.handler != NULL;
@@ -222,49 +346,6 @@ static __kprobes const char *probe_symbol(struct trace_probe *tp)
return tp->symbol ? tp->symbol : "unknown";
}
-static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
-{
- int ret = -EINVAL;
-
- if (ff->func == fetch_register) {
- const char *name;
- name = regs_query_register_name((unsigned int)((long)ff->data));
- ret = snprintf(buf, n, "%%%s", name);
- } else if (ff->func == fetch_stack)
- ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
- else if (ff->func == fetch_memory)
- ret = snprintf(buf, n, "@0x%p", ff->data);
- else if (ff->func == fetch_symbol) {
- struct symbol_cache *sc = ff->data;
- if (sc->offset)
- ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
- sc->offset);
- else
- ret = snprintf(buf, n, "@%s", sc->symbol);
- } else if (ff->func == fetch_retvalue)
- ret = snprintf(buf, n, "$retval");
- else if (ff->func == fetch_stack_address)
- ret = snprintf(buf, n, "$stack");
- else if (ff->func == fetch_indirect) {
- struct indirect_fetch_data *id = ff->data;
- size_t l = 0;
- ret = snprintf(buf, n, "%+ld(", id->offset);
- if (ret >= n)
- goto end;
- l += ret;
- ret = probe_arg_string(buf + l, n - l, &id->orig);
- if (ret < 0)
- goto end;
- l += ret;
- ret = snprintf(buf + l, n - l, ")");
- ret += l;
- }
-end:
- if (ret >= n)
- return -ENOSPC;
- return ret;
-}
-
static int register_probe_event(struct trace_probe *tp);
static void unregister_probe_event(struct trace_probe *tp);
@@ -347,11 +428,12 @@ error:
static void free_probe_arg(struct probe_arg *arg)
{
- if (arg->fetch.func == fetch_symbol)
+ if (CHECK_BASIC_FETCH_FUNCS(deref, arg->fetch.fn))
+ free_deref_fetch_param(arg->fetch.data);
+ else if (CHECK_BASIC_FETCH_FUNCS(symbol, arg->fetch.fn))
free_symbol_cache(arg->fetch.data);
- else if (arg->fetch.func == fetch_indirect)
- free_indirect_fetch_data(arg->fetch.data);
kfree(arg->name);
+ kfree(arg->comm);
}
static void free_trace_probe(struct trace_probe *tp)
@@ -457,28 +539,30 @@ static int split_symbol_offset(char *symbol, unsigned long *offset)
#define PARAM_MAX_ARGS 16
#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
-static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
+static int parse_probe_vars(char *arg, const struct fetch_type *t,
+ struct fetch_param *f, int is_return)
{
int ret = 0;
unsigned long param;
if (strcmp(arg, "retval") == 0) {
- if (is_return) {
- ff->func = fetch_retvalue;
- ff->data = NULL;
- } else
+ if (is_return)
+ f->fn = t->retval;
+ else
ret = -EINVAL;
} else if (strncmp(arg, "stack", 5) == 0) {
if (arg[5] == '\0') {
- ff->func = fetch_stack_address;
- ff->data = NULL;
+ if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0)
+ f->fn = fetch_stack_address;
+ else
+ ret = -EINVAL;
} else if (isdigit(arg[5])) {
ret = strict_strtoul(arg + 5, 10, &param);
if (ret || param > PARAM_MAX_STACK)
ret = -EINVAL;
else {
- ff->func = fetch_stack;
- ff->data = (void *)param;
+ f->fn = t->stack;
+ f->data = (void *)param;
}
} else
ret = -EINVAL;
@@ -488,7 +572,8 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
}
/* Recursive argument parser */
-static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
+static int __parse_probe_arg(char *arg, const struct fetch_type *t,
+ struct fetch_param *f, int is_return)
{
int ret = 0;
unsigned long param;
@@ -497,13 +582,13 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
switch (arg[0]) {
case '$':
- ret = parse_probe_vars(arg + 1, ff, is_return);
+ ret = parse_probe_vars(arg + 1, t, f, is_return);
break;
case '%': /* named register */
ret = regs_query_register_offset(arg + 1);
if (ret >= 0) {
- ff->func = fetch_register;
- ff->data = (void *)(unsigned long)ret;
+ f->fn = t->reg;
+ f->data = (void *)(unsigned long)ret;
ret = 0;
}
break;
@@ -512,26 +597,22 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
ret = strict_strtoul(arg + 1, 0, &param);
if (ret)
break;
- ff->func = fetch_memory;
- ff->data = (void *)param;
+ f->fn = t->memory;
+ f->data = (void *)param;
} else {
ret = split_symbol_offset(arg + 1, &offset);
if (ret)
break;
- ff->data = alloc_symbol_cache(arg + 1, offset);
- if (ff->data)
- ff->func = fetch_symbol;
- else
- ret = -EINVAL;
+ f->data = alloc_symbol_cache(arg + 1, offset);
+ if (f->data)
+ f->fn = t->symbol;
}
break;
- case '+': /* indirect memory */
+ case '+': /* deref memory */
case '-':
tmp = strchr(arg, '(');
- if (!tmp) {
- ret = -EINVAL;
+ if (!tmp)
break;
- }
*tmp = '\0';
ret = strict_strtol(arg + 1, 0, &offset);
if (ret)
@@ -541,38 +622,58 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
arg = tmp + 1;
tmp = strrchr(arg, ')');
if (tmp) {
- struct indirect_fetch_data *id;
+ struct deref_fetch_param *dprm;
+ const struct fetch_type *t2 = find_fetch_type(NULL);
*tmp = '\0';
- id = kzalloc(sizeof(struct indirect_fetch_data),
- GFP_KERNEL);
- if (!id)
+ dprm = kzalloc(sizeof(struct deref_fetch_param),
+ GFP_KERNEL);
+ if (!dprm)
return -ENOMEM;
- id->offset = offset;
- ret = __parse_probe_arg(arg, &id->orig, is_return);
+ dprm->offset = offset;
+ ret = __parse_probe_arg(arg, t2, &dprm->orig,
+ is_return);
if (ret)
- kfree(id);
+ kfree(dprm);
else {
- ff->func = fetch_indirect;
- ff->data = (void *)id;
+ f->fn = t->deref;
+ f->data = (void *)dprm;
}
- } else
- ret = -EINVAL;
+ }
break;
- default:
- /* TODO: support custom handler */
- ret = -EINVAL;
}
+ if (!ret && !f->fn)
+ ret = -EINVAL;
return ret;
}
/* String length checking wrapper */
-static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
+static int parse_probe_arg(char *arg, struct trace_probe *tp,
+ struct probe_arg *parg, int is_return)
{
+ const char *t;
+
if (strlen(arg) > MAX_ARGSTR_LEN) {
pr_info("Argument is too long.: %s\n", arg);
return -ENOSPC;
}
- return __parse_probe_arg(arg, ff, is_return);
+ parg->comm = kstrdup(arg, GFP_KERNEL);
+ if (!parg->comm) {
+ pr_info("Failed to allocate memory for command '%s'.\n", arg);
+ return -ENOMEM;
+ }
+ t = strchr(parg->comm, ':');
+ if (t) {
+ arg[t - parg->comm] = '\0';
+ t++;
+ }
+ parg->type = find_fetch_type(t);
+ if (!parg->type) {
+ pr_info("Unsupported type: %s\n", t);
+ return -EINVAL;
+ }
+ parg->offset = tp->size;
+ tp->size += parg->type->size;
+ return __parse_probe_arg(arg, parg->type, &parg->fetch, is_return);
}
/* Return 1 if name is reserved or already used by another argument */
@@ -602,15 +703,18 @@ static int create_trace_probe(int argc, char **argv)
* @ADDR : fetch memory at ADDR (ADDR should be in kernel)
* @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
* %REG : fetch register REG
- * Indirect memory fetch:
+ * Dereferencing memory fetch:
* +|-offs(ARG) : fetch memory at ARG +|- offs address.
* Alias name of args:
* NAME=FETCHARG : set NAME as alias of FETCHARG.
+ * Type of args:
+ * FETCHARG:TYPE : use TYPE instead of unsigned long.
*/
struct trace_probe *tp;
int i, ret = 0;
int is_return = 0, is_delete = 0;
- char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
+ char *symbol = NULL, *event = NULL, *group = NULL;
+ char *arg, *tmp;
unsigned long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
@@ -723,13 +827,6 @@ static int create_trace_probe(int argc, char **argv)
else
arg = argv[i];
- if (conflict_field_name(argv[i], tp->args, i)) {
- pr_info("Argument%d name '%s' conflicts with "
- "another field.\n", i, argv[i]);
- ret = -EINVAL;
- goto error;
- }
-
tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
if (!tp->args[i].name) {
pr_info("Failed to allocate argument%d name '%s'.\n",
@@ -737,9 +834,19 @@ static int create_trace_probe(int argc, char **argv)
ret = -ENOMEM;
goto error;
}
+ tmp = strchr(tp->args[i].name, ':');
+ if (tmp)
+ *tmp = '_'; /* convert : to _ */
+
+ if (conflict_field_name(tp->args[i].name, tp->args, i)) {
+ pr_info("Argument%d name '%s' conflicts with "
+ "another field.\n", i, argv[i]);
+ ret = -EINVAL;
+ goto error;
+ }
/* Parse fetch argument */
- ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
+ ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
if (ret) {
pr_info("Parse error at argument%d. (%d)\n", i, ret);
kfree(tp->args[i].name);
@@ -794,8 +901,7 @@ static void probes_seq_stop(struct seq_file *m, void *v)
static int probes_seq_show(struct seq_file *m, void *v)
{
struct trace_probe *tp = v;
- int i, ret;
- char buf[MAX_ARGSTR_LEN + 1];
+ int i;
seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
@@ -807,15 +913,10 @@ static int probes_seq_show(struct seq_file *m, void *v)
else
seq_printf(m, " %s", probe_symbol(tp));
- for (i = 0; i < tp->nr_args; i++) {
- ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
- if (ret < 0) {
- pr_warning("Argument%d decoding error(%d).\n", i, ret);
- return ret;
- }
- seq_printf(m, " %s=%s", tp->args[i].name, buf);
- }
+ for (i = 0; i < tp->nr_args; i++)
+ seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
seq_printf(m, "\n");
+
return 0;
}
@@ -945,9 +1046,10 @@ static const struct file_operations kprobe_profile_ops = {
static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
{
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
- struct kprobe_trace_entry *entry;
+ struct kprobe_trace_entry_head *entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
+ u8 *data;
int size, i, pc;
unsigned long irq_flags;
struct ftrace_event_call *call = &tp->call;
@@ -957,7 +1059,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
local_save_flags(irq_flags);
pc = preempt_count();
- size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
+ size = sizeof(*entry) + tp->size;
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
irq_flags, pc);
@@ -965,10 +1067,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
return;
entry = ring_buffer_event_data(event);
- entry->nargs = tp->nr_args;
entry->ip = (unsigned long)kp->addr;
+ data = (u8 *)&entry[1];
for (i = 0; i < tp->nr_args; i++)
- entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
+ call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
@@ -979,9 +1081,10 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
- struct kretprobe_trace_entry *entry;
+ struct kretprobe_trace_entry_head *entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
+ u8 *data;
int size, i, pc;
unsigned long irq_flags;
struct ftrace_event_call *call = &tp->call;
@@ -989,7 +1092,7 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
local_save_flags(irq_flags);
pc = preempt_count();
- size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
+ size = sizeof(*entry) + tp->size;
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
irq_flags, pc);
@@ -997,11 +1100,11 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
return;
entry = ring_buffer_event_data(event);
- entry->nargs = tp->nr_args;
entry->func = (unsigned long)tp->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
+ data = (u8 *)&entry[1];
for (i = 0; i < tp->nr_args; i++)
- entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
+ call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
@@ -1011,13 +1114,14 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
enum print_line_t
print_kprobe_event(struct trace_iterator *iter, int flags)
{
- struct kprobe_trace_entry *field;
+ struct kprobe_trace_entry_head *field;
struct trace_seq *s = &iter->seq;
struct trace_event *event;
struct trace_probe *tp;
+ u8 *data;
int i;
- field = (struct kprobe_trace_entry *)iter->ent;
+ field = (struct kprobe_trace_entry_head *)iter->ent;
event = ftrace_find_event(field->ent.type);
tp = container_of(event, struct trace_probe, event);
@@ -1030,9 +1134,10 @@ print_kprobe_event(struct trace_iterator *iter, int flags)
if (!trace_seq_puts(s, ")"))
goto partial;
- for (i = 0; i < field->nargs; i++)
- if (!trace_seq_printf(s, " %s=%lx",
- tp->args[i].name, field->args[i]))
+ data = (u8 *)&field[1];
+ for (i = 0; i < tp->nr_args; i++)
+ if (!tp->args[i].type->print(s, tp->args[i].name,
+ data + tp->args[i].offset))
goto partial;
if (!trace_seq_puts(s, "\n"))
@@ -1046,13 +1151,14 @@ partial:
enum print_line_t
print_kretprobe_event(struct trace_iterator *iter, int flags)
{
- struct kretprobe_trace_entry *field;
+ struct kretprobe_trace_entry_head *field;
struct trace_seq *s = &iter->seq;
struct trace_event *event;
struct trace_probe *tp;
+ u8 *data;
int i;
- field = (struct kretprobe_trace_entry *)iter->ent;
+ field = (struct kretprobe_trace_entry_head *)iter->ent;
event = ftrace_find_event(field->ent.type);
tp = container_of(event, struct trace_probe, event);
@@ -1071,9 +1177,10 @@ print_kretprobe_event(struct trace_iterator *iter, int flags)
if (!trace_seq_puts(s, ")"))
goto partial;
- for (i = 0; i < field->nargs; i++)
- if (!trace_seq_printf(s, " %s=%lx",
- tp->args[i].name, field->args[i]))
+ data = (u8 *)&field[1];
+ for (i = 0; i < tp->nr_args; i++)
+ if (!tp->args[i].type->print(s, tp->args[i].name,
+ data + tp->args[i].offset))
goto partial;
if (!trace_seq_puts(s, "\n"))
@@ -1129,29 +1236,43 @@ static int probe_event_raw_init(struct ftrace_event_call *event_call)
static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
int ret, i;
- struct kprobe_trace_entry field;
+ struct kprobe_trace_entry_head field;
struct trace_probe *tp = (struct trace_probe *)event_call->data;
DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
- DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
/* Set argument names as fields */
- for (i = 0; i < tp->nr_args; i++)
- DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
+ for (i = 0; i < tp->nr_args; i++) {
+ ret = trace_define_field(event_call, tp->args[i].type->name,
+ tp->args[i].name,
+ sizeof(field) + tp->args[i].offset,
+ tp->args[i].type->size,
+ tp->args[i].type->is_signed,
+ FILTER_OTHER);
+ if (ret)
+ return ret;
+ }
return 0;
}
static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
int ret, i;
- struct kretprobe_trace_entry field;
+ struct kretprobe_trace_entry_head field;
struct trace_probe *tp = (struct trace_probe *)event_call->data;
DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
- DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
/* Set argument names as fields */
- for (i = 0; i < tp->nr_args; i++)
- DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
+ for (i = 0; i < tp->nr_args; i++) {
+ ret = trace_define_field(event_call, tp->args[i].type->name,
+ tp->args[i].name,
+ sizeof(field) + tp->args[i].offset,
+ tp->args[i].type->size,
+ tp->args[i].type->is_signed,
+ FILTER_OTHER);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1176,8 +1297,8 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
for (i = 0; i < tp->nr_args; i++) {
- pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%%lx",
- tp->args[i].name);
+ pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
+ tp->args[i].name, tp->args[i].type->fmt);
}
pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
@@ -1219,12 +1340,13 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
{
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
struct ftrace_event_call *call = &tp->call;
- struct kprobe_trace_entry *entry;
+ struct kprobe_trace_entry_head *entry;
+ u8 *data;
int size, __size, i;
unsigned long irq_flags;
int rctx;
- __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
+ __size = sizeof(*entry) + tp->size;
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
@@ -1235,10 +1357,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
if (!entry)
return;
- entry->nargs = tp->nr_args;
entry->ip = (unsigned long)kp->addr;
+ data = (u8 *)&entry[1];
for (i = 0; i < tp->nr_args; i++)
- entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
+ call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
}
@@ -1249,12 +1371,13 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
{
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
struct ftrace_event_call *call = &tp->call;
- struct kretprobe_trace_entry *entry;
+ struct kretprobe_trace_entry_head *entry;
+ u8 *data;
int size, __size, i;
unsigned long irq_flags;
int rctx;
- __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
+ __size = sizeof(*entry) + tp->size;
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
@@ -1265,11 +1388,11 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
if (!entry)
return;
- entry->nargs = tp->nr_args;
entry->func = (unsigned long)tp->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
+ data = (u8 *)&entry[1];
for (i = 0; i < tp->nr_args; i++)
- entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
+ call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
irq_flags, regs);
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
index d59cd6879477..8eaf00749b65 100644
--- a/kernel/trace/trace_ksym.c
+++ b/kernel/trace/trace_ksym.c
@@ -34,12 +34,6 @@
#include <asm/atomic.h>
-/*
- * For now, let us restrict the no. of symbols traced simultaneously to number
- * of available hardware breakpoint registers.
- */
-#define KSYM_TRACER_MAX HBP_NUM
-
#define KSYM_TRACER_OP_LEN 3 /* rw- */
struct trace_ksym {
@@ -53,7 +47,6 @@ struct trace_ksym {
static struct trace_array *ksym_trace_array;
-static unsigned int ksym_filter_entry_count;
static unsigned int ksym_tracing_enabled;
static HLIST_HEAD(ksym_filter_head);
@@ -181,13 +174,6 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
struct trace_ksym *entry;
int ret = -ENOMEM;
- if (ksym_filter_entry_count >= KSYM_TRACER_MAX) {
- printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No"
- " new requests for tracing can be accepted now.\n",
- KSYM_TRACER_MAX);
- return -ENOSPC;
- }
-
entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -203,13 +189,17 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
if (IS_ERR(entry->ksym_hbp)) {
ret = PTR_ERR(entry->ksym_hbp);
- printk(KERN_INFO "ksym_tracer request failed. Try again"
- " later!!\n");
+ if (ret == -ENOSPC) {
+ printk(KERN_ERR "ksym_tracer: Maximum limit reached."
+ " No new requests for tracing can be accepted now.\n");
+ } else {
+ printk(KERN_INFO "ksym_tracer request failed. Try again"
+ " later!!\n");
+ }
goto err;
}
hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head);
- ksym_filter_entry_count++;
return 0;
@@ -265,7 +255,6 @@ static void __ksym_trace_reset(void)
hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head,
ksym_hlist) {
unregister_wide_hw_breakpoint(entry->ksym_hbp);
- ksym_filter_entry_count--;
hlist_del_rcu(&(entry->ksym_hlist));
synchronize_rcu();
kfree(entry);
@@ -338,7 +327,6 @@ static ssize_t ksym_trace_filter_write(struct file *file,
goto out_unlock;
}
/* Error or "symbol:---" case: drop it */
- ksym_filter_entry_count--;
hlist_del_rcu(&(entry->ksym_hlist));
synchronize_rcu();
kfree(entry);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 8e46b3323cdc..ab13d7008061 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -209,6 +209,7 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c)
return 1;
}
+EXPORT_SYMBOL(trace_seq_putc);
int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
{
@@ -253,7 +254,7 @@ void *trace_seq_reserve(struct trace_seq *s, size_t len)
void *ret;
if (s->full)
- return 0;
+ return NULL;
if (len > ((PAGE_SIZE - 1) - s->len)) {
s->full = 1;
@@ -355,6 +356,21 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
}
EXPORT_SYMBOL(ftrace_print_symbols_seq);
+const char *
+ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
+{
+ int i;
+ const char *ret = p->buffer + p->len;
+
+ for (i = 0; i < buf_len; i++)
+ trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(ftrace_print_hex_seq);
+
#ifdef CONFIG_KRETPROBES
static inline const char *kretprobed(const char *name)
{
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 5fca0f51fde4..a55fccfede5d 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -50,8 +50,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
}
static void
-probe_sched_switch(struct rq *__rq, struct task_struct *prev,
- struct task_struct *next)
+probe_sched_switch(struct task_struct *prev, struct task_struct *next)
{
struct trace_array_cpu *data;
unsigned long flags;
@@ -109,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
}
static void
-probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
+probe_sched_wakeup(struct task_struct *wakee, int success)
{
struct trace_array_cpu *data;
unsigned long flags;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 0271742abb8d..8052446ceeaa 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -107,8 +107,7 @@ static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
}
static void notrace
-probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
- struct task_struct *next)
+probe_wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
{
struct trace_array_cpu *data;
cycle_t T0, T1, delta;
@@ -200,7 +199,7 @@ static void wakeup_reset(struct trace_array *tr)
}
static void
-probe_wakeup(struct rq *rq, struct task_struct *p, int success)
+probe_wakeup(struct task_struct *p, int success)
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 9398034f814a..250e7f9bd2f0 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -17,7 +17,6 @@ static inline int trace_valid_entry(struct trace_entry *entry)
case TRACE_BRANCH:
case TRACE_GRAPH_ENT:
case TRACE_GRAPH_RET:
- case TRACE_HW_BRANCHES:
case TRACE_KSYM:
return 1;
}
@@ -256,7 +255,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* Maximum number of functions to trace before diagnosing a hang */
#define GRAPH_MAX_FUNC_TEST 100000000
-static void __ftrace_dump(bool disable_tracing);
+static void
+__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
static unsigned int graph_hang_thresh;
/* Wrap the real function entry probe to avoid possible hanging */
@@ -267,7 +267,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
ftrace_graph_stop();
printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
if (ftrace_dump_on_oops)
- __ftrace_dump(false);
+ __ftrace_dump(false, DUMP_ALL);
return 0;
}
@@ -755,62 +755,6 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
}
#endif /* CONFIG_BRANCH_TRACER */
-#ifdef CONFIG_HW_BRANCH_TRACER
-int
-trace_selftest_startup_hw_branches(struct tracer *trace,
- struct trace_array *tr)
-{
- struct trace_iterator *iter;
- struct tracer tracer;
- unsigned long count;
- int ret;
-
- if (!trace->open) {
- printk(KERN_CONT "missing open function...");
- return -1;
- }
-
- ret = tracer_init(trace, tr);
- if (ret) {
- warn_failed_init_tracer(trace, ret);
- return ret;
- }
-
- /*
- * The hw-branch tracer needs to collect the trace from the various
- * cpu trace buffers - before tracing is stopped.
- */
- iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter)
- return -ENOMEM;
-
- memcpy(&tracer, trace, sizeof(tracer));
-
- iter->trace = &tracer;
- iter->tr = tr;
- iter->pos = -1;
- mutex_init(&iter->mutex);
-
- trace->open(iter);
-
- mutex_destroy(&iter->mutex);
- kfree(iter);
-
- tracing_stop();
-
- ret = trace_test_buffer(tr, &count);
- trace->reset(tr);
- tracing_start();
-
- if (!ret && !count) {
- printk(KERN_CONT "no entries found..");
- ret = -1;
- }
-
- return ret;
-}
-#endif /* CONFIG_HW_BRANCH_TRACER */
-
#ifdef CONFIG_KSYM_TRACER
static int ksym_selftest_dummy;
diff --git a/kernel/user.c b/kernel/user.c
index 766467b3bcb7..7e72614b736d 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/user_namespace.h>
-#include "cred-internals.h"
struct user_namespace init_user_ns = {
.kref = {
@@ -137,9 +136,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up, *new;
- /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
- * atomic.
- */
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
spin_unlock_irq(&uidhash_lock);
@@ -161,11 +157,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
- /* This case is not possible when CONFIG_USER_SCHED
- * is defined, since we serialize alloc_uid() using
- * uids_mutex. Hence no need to call
- * sched_destroy_user() or remove_user_sysfs_dir().
- */
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
@@ -178,8 +169,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
return up;
- put_user_ns(new->user_ns);
- kmem_cache_free(uid_cachep, new);
out_unlock:
return NULL;
}
diff --git a/lib/Kconfig b/lib/Kconfig
index 170d8ca901d8..af12831f2eea 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -210,4 +210,25 @@ config GENERIC_ATOMIC64
config LRU_CACHE
tristate
+config SHM_SIGNAL
+ tristate "SHM Signal - Generic shared-memory signaling mechanism"
+ default n
+ help
+ Provides a shared-memory based signaling mechanism to indicate
+ memory-dirty notifications between two end-points.
+
+ If unsure, say N
+
+config IOQ
+ tristate "IO-Queue library - Generic shared-memory queue"
+ select SHM_SIGNAL
+ default n
+ help
+ IOQ is a generic shared-memory-queue mechanism that happens to be
+ friendly to virtualization boundaries. It can be used in a variety
+ of ways, though its intended purpose is to become a low-level
+ communication path for paravirtualized drivers.
+
+ If unsure, say N
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 935248bdbc47..9b3ac57d697d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -171,6 +171,18 @@ config DETECT_SOFTLOCKUP
can be detected via the NMI-watchdog, on platforms that
support it.)
+config NMI_WATCHDOG
+ bool "Detect Hard Lockups with an NMI Watchdog"
+ depends on DEBUG_KERNEL && PERF_EVENTS && PERF_EVENTS_NMI
+ help
+ Say Y here to enable the kernel to use the NMI as a watchdog
+ to detect hard lockups. This is useful when a cpu hangs for no
+ reason but can still respond to NMIs. A backtrace is displayed
+ for reviewing and reporting.
+
+ The overhead should be minimal, just an extra NMI every few
+ seconds.
+
config BOOTPARAM_SOFTLOCKUP_PANIC
bool "Panic (Reboot) On Soft Lockups"
depends on DETECT_SOFTLOCKUP
@@ -512,6 +524,18 @@ config PROVE_RCU
Say N if you are unsure.
+config PROVE_RCU_REPEATEDLY
+ bool "RCU debugging: don't disable PROVE_RCU on first splat"
+ depends on PROVE_RCU
+ default n
+ help
+ By itself, PROVE_RCU will disable checking upon issuing the
+ first warning (or "splat"). This feature prevents such
+ disabling, allowing multiple RCU-lockdep warnings to be printed
+ on a single reboot.
+
+ Say N if you are unsure.
+
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -793,7 +817,7 @@ config RCU_CPU_STALL_DETECTOR
config RCU_CPU_STALL_VERBOSE
bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
- default n
+ default y
help
This option causes RCU to printk detailed per-task information
for any tasks that are stalling the current RCU grace period.
@@ -1086,6 +1110,13 @@ config DMA_API_DEBUG
This option causes a performance degredation. Use only if you want
to debug device drivers. If unsure, say N.
+config ATOMIC64_SELFTEST
+ bool "Perform an atomic64_t self-test at boot"
+ help
+ Enable this option to test the atomic64_t functions at boot.
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 9b5d1d7f2ef7..43cb93fa2651 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -3,7 +3,7 @@ config HAVE_ARCH_KGDB
bool
menuconfig KGDB
- bool "KGDB: kernel debugging with remote gdb"
+ bool "KGDB: kernel debugger"
depends on HAVE_ARCH_KGDB
depends on DEBUG_KERNEL && EXPERIMENTAL
help
@@ -57,4 +57,26 @@ config KGDB_TESTS_BOOT_STRING
information about other strings you could use beyond the
default of V1F100.
+config KGDB_LOW_LEVEL_TRAP
+ bool "KGDB: Allow debugging with traps in notifiers"
+ depends on X86 || MIPS
+ default n
+ help
+ This will add an extra call back to kgdb for the breakpoint
+ exception handler on which will will allow kgdb to step
+ through a notify handler.
+
+config KGDB_KDB
+ bool "KGDB_KDB: include kdb frontend for kgdb"
+ default n
+ help
+ KDB frontend for kernel
+
+config KDB_KEYBOARD
+ bool "KGDB_KDB: keyboard as input device"
+ depends on VT && KGDB_KDB
+ default n
+ help
+ KDB can use a PS/2 type keyboard for an input device
+
endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 0d4015205c64..8b3c9fa18c10 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
- string_helpers.o gcd.o lcm.o list_sort.o
+ string_helpers.o gcd.o lcm.o list_sort.o uuid.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -39,7 +39,10 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
+
+CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
@@ -78,6 +81,8 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+obj-$(CONFIG_SHM_SIGNAL) += shm_signal.o
+obj-$(CONFIG_IOQ) += ioq.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
@@ -101,6 +106,8 @@ obj-$(CONFIG_GENERIC_CSUM) += checksum.o
obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
+obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 8bee16ec7524..a21c12bc727c 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -162,12 +162,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
spinlock_t *lock = lock_addr(v);
- int ret = 1;
+ int ret = 0;
spin_lock_irqsave(lock, flags);
if (v->counter != u) {
v->counter += a;
- ret = 0;
+ ret = 1;
}
spin_unlock_irqrestore(lock, flags);
return ret;
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
new file mode 100644
index 000000000000..65e482caf5e9
--- /dev/null
+++ b/lib/atomic64_test.c
@@ -0,0 +1,164 @@
+/*
+ * Testsuite for atomic64_t functions
+ *
+ * Copyright © 2010 Luca Barbieri
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/init.h>
+#include <asm/atomic.h>
+
+#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
+static __init int test_atomic64(void)
+{
+ long long v0 = 0xaaa31337c001d00dLL;
+ long long v1 = 0xdeadbeefdeafcafeLL;
+ long long v2 = 0xfaceabadf00df001LL;
+ long long onestwos = 0x1111111122222222LL;
+ long long one = 1LL;
+
+ atomic64_t v = ATOMIC64_INIT(v0);
+ long long r = v0;
+ BUG_ON(v.counter != r);
+
+ atomic64_set(&v, v1);
+ r = v1;
+ BUG_ON(v.counter != r);
+ BUG_ON(atomic64_read(&v) != r);
+
+ INIT(v0);
+ atomic64_add(onestwos, &v);
+ r += onestwos;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_add(-one, &v);
+ r += -one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r += onestwos;
+ BUG_ON(atomic64_add_return(onestwos, &v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r += -one;
+ BUG_ON(atomic64_add_return(-one, &v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_sub(onestwos, &v);
+ r -= onestwos;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_sub(-one, &v);
+ r -= -one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r -= onestwos;
+ BUG_ON(atomic64_sub_return(onestwos, &v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r -= -one;
+ BUG_ON(atomic64_sub_return(-one, &v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_inc(&v);
+ r += one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r += one;
+ BUG_ON(atomic64_inc_return(&v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_dec(&v);
+ r -= one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r -= one;
+ BUG_ON(atomic64_dec_return(&v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(atomic64_xchg(&v, v1) != v0);
+ r = v1;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
+ r = v1;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(atomic64_add_unless(&v, one, v0));
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(!atomic64_add_unless(&v, one, v1));
+ r += one;
+ BUG_ON(v.counter != r);
+
+#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(_ASM_GENERIC_ATOMIC64_H)
+ INIT(onestwos);
+ BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
+ r -= one;
+ BUG_ON(v.counter != r);
+
+ INIT(0);
+ BUG_ON(atomic64_dec_if_positive(&v) != -one);
+ BUG_ON(v.counter != r);
+
+ INIT(-one);
+ BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
+ BUG_ON(v.counter != r);
+#else
+#warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above
+#endif
+
+ INIT(onestwos);
+ BUG_ON(!atomic64_inc_not_zero(&v));
+ r += one;
+ BUG_ON(v.counter != r);
+
+ INIT(0);
+ BUG_ON(atomic64_inc_not_zero(&v));
+ BUG_ON(v.counter != r);
+
+ INIT(-one);
+ BUG_ON(!atomic64_inc_not_zero(&v));
+ r += one;
+ BUG_ON(v.counter != r);
+
+#ifdef CONFIG_X86
+ printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
+#ifdef CONFIG_X86_64
+ "x86-64",
+#elif defined(CONFIG_X86_CMPXCHG64)
+ "i586+",
+#else
+ "i386+",
+#endif
+ boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
+ boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
+#else
+ printk(KERN_INFO "atomic64 test passed\n");
+#endif
+
+ return 0;
+}
+
+core_initcall(test_atomic64);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index b862b30369ff..bf007a43c053 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -774,7 +774,7 @@ static int __init fixup_free(void *addr, enum debug_obj_state state)
}
}
-static int
+static int __init
check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
{
struct debug_bucket *db;
@@ -917,7 +917,7 @@ void __init debug_objects_early_init(void)
/*
* Convert the statically allocated objects to dynamic ones:
*/
-static int debug_objects_replace_static_objects(void)
+static int __init debug_objects_replace_static_objects(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
diff --git a/lib/hweight.c b/lib/hweight.c
index 63ee4eb1228d..3c79d50814cf 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -9,7 +9,7 @@
* The Hamming Weight of a number is the total number of bits set in it.
*/
-unsigned int hweight32(unsigned int w)
+unsigned int __sw_hweight32(unsigned int w)
{
#ifdef ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
@@ -24,29 +24,30 @@ unsigned int hweight32(unsigned int w)
return (res + (res >> 16)) & 0x000000FF;
#endif
}
-EXPORT_SYMBOL(hweight32);
+EXPORT_SYMBOL(__sw_hweight32);
-unsigned int hweight16(unsigned int w)
+unsigned int __sw_hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
-EXPORT_SYMBOL(hweight16);
+EXPORT_SYMBOL(__sw_hweight16);
-unsigned int hweight8(unsigned int w)
+unsigned int __sw_hweight8(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55);
res = (res & 0x33) + ((res >> 2) & 0x33);
return (res + (res >> 4)) & 0x0F;
}
-EXPORT_SYMBOL(hweight8);
+EXPORT_SYMBOL(__sw_hweight8);
-unsigned long hweight64(__u64 w)
+unsigned long __sw_hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
- return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+ return __sw_hweight32((unsigned int)(w >> 32)) +
+ __sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
#ifdef ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
@@ -63,4 +64,4 @@ unsigned long hweight64(__u64 w)
#endif
#endif
}
-EXPORT_SYMBOL(hweight64);
+EXPORT_SYMBOL(__sw_hweight64);
diff --git a/lib/idr.c b/lib/idr.c
index 2eb1dca03681..422a9d5069cc 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -623,7 +623,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
}
return NULL;
}
-
+EXPORT_SYMBOL(idr_get_next);
/**
diff --git a/lib/ioq.c b/lib/ioq.c
new file mode 100644
index 000000000000..4027848d7436
--- /dev/null
+++ b/lib/ioq.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * See include/linux/ioq.h for documentation
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/ioq.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+static int ioq_iter_setpos(struct ioq_iterator *iter, u32 pos)
+{
+ struct ioq *ioq = iter->ioq;
+
+ BUG_ON(pos >= ioq->count);
+
+ iter->pos = pos;
+ iter->desc = &ioq->ring[pos];
+
+ return 0;
+}
+
+static inline u32 modulo_inc(u32 val, u32 mod)
+{
+ BUG_ON(val >= mod);
+
+ if (val == (mod - 1))
+ return 0;
+
+ return val + 1;
+}
+
+static inline int idx_full(struct ioq_ring_idx *idx)
+{
+ return idx->full && (idx->head == idx->tail);
+}
+
+int ioq_iter_seek(struct ioq_iterator *iter, enum ioq_seek_type type,
+ long offset, int flags)
+{
+ struct ioq_ring_idx *idx = iter->idx;
+ u32 pos;
+
+ switch (type) {
+ case ioq_seek_next:
+ pos = modulo_inc(iter->pos, iter->ioq->count);
+ break;
+ case ioq_seek_tail:
+ pos = le32_to_cpu(idx->tail);
+ break;
+ case ioq_seek_head:
+ pos = le32_to_cpu(idx->head);
+ break;
+ case ioq_seek_set:
+ if (offset >= iter->ioq->count)
+ return -1;
+ pos = offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ioq_iter_setpos(iter, pos);
+}
+EXPORT_SYMBOL_GPL(ioq_iter_seek);
+
+static int ioq_ring_count(struct ioq_ring_idx *idx, int count)
+{
+ u32 head = le32_to_cpu(idx->head);
+ u32 tail = le32_to_cpu(idx->tail);
+
+ if (idx->full && (head == tail))
+ return count;
+ else if (tail >= head)
+ return tail - head;
+ else
+ return (tail + count) - head;
+}
+
+static void idx_tail_push(struct ioq_ring_idx *idx, int count)
+{
+ u32 tail = modulo_inc(le32_to_cpu(idx->tail), count);
+ u32 head = le32_to_cpu(idx->head);
+
+ if (head == tail) {
+ rmb();
+
+ /*
+ * Setting full here may look racy, but note that we havent
+ * flipped the owner bit yet. So it is impossible for the
+ * remote locale to move head in such a way that this operation
+ * becomes invalid
+ */
+ idx->full = 1;
+ wmb();
+ }
+
+ idx->tail = cpu_to_le32(tail);
+}
+
+int ioq_iter_push(struct ioq_iterator *iter, int flags)
+{
+ struct ioq_ring_head *head_desc = iter->ioq->head_desc;
+ struct ioq_ring_idx *idx = iter->idx;
+ int ret;
+
+ /*
+ * Its only valid to push if we are currently pointed at the tail
+ */
+ if (iter->pos != le32_to_cpu(idx->tail) || iter->desc->sown != iter->ioq->locale)
+ return -EINVAL;
+
+ idx_tail_push(idx, iter->ioq->count);
+ if (iter->dualidx) {
+ idx_tail_push(&head_desc->idx[ioq_idxtype_inuse],
+ iter->ioq->count);
+ if (head_desc->idx[ioq_idxtype_inuse].tail !=
+ head_desc->idx[ioq_idxtype_valid].tail) {
+ SHM_SIGNAL_FAULT(iter->ioq->signal,
+ "Tails not synchronized");
+ return -EINVAL;
+ }
+ }
+
+ wmb(); /* the index must be visible before the sown, or signal */
+
+ if (iter->flipowner) {
+ iter->desc->sown = !iter->ioq->locale;
+ wmb(); /* sown must be visible before we signal */
+ }
+
+ ret = ioq_iter_seek(iter, ioq_seek_next, 0, flags);
+
+ if (iter->update)
+ ioq_signal(iter->ioq, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_push);
+
+int ioq_iter_pop(struct ioq_iterator *iter, int flags)
+{
+ struct ioq_ring_idx *idx = iter->idx;
+ int ret;
+
+ /*
+ * Its only valid to pop if we are currently pointed at the head
+ */
+ if (iter->pos != le32_to_cpu(idx->head) || iter->desc->sown != iter->ioq->locale)
+ return -EINVAL;
+
+ idx->head = cpu_to_le32(modulo_inc(le32_to_cpu(idx->head), iter->ioq->count));
+ wmb(); /* head must be visible before full */
+
+ if (idx->full) {
+ idx->full = 0;
+ wmb(); /* full must be visible before sown */
+ }
+
+ if (iter->flipowner) {
+ iter->desc->sown = !iter->ioq->locale;
+ wmb(); /* sown must be visible before we signal */
+ }
+
+ ret = ioq_iter_seek(iter, ioq_seek_next, 0, flags);
+
+ if (iter->update)
+ ioq_signal(iter->ioq, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_pop);
+
+static struct ioq_ring_idx *idxtype_to_idx(struct ioq *ioq,
+ enum ioq_idx_type type)
+{
+ struct ioq_ring_idx *idx;
+
+ switch (type) {
+ case ioq_idxtype_valid:
+ case ioq_idxtype_inuse:
+ idx = &ioq->head_desc->idx[type];
+ break;
+ default:
+ panic("IOQ: illegal index type: %d", type);
+ break;
+ }
+
+ return idx;
+}
+
+int ioq_iter_init(struct ioq *ioq, struct ioq_iterator *iter,
+ enum ioq_idx_type type, int flags)
+{
+ iter->ioq = ioq;
+ iter->update = (flags & IOQ_ITER_AUTOUPDATE);
+ iter->flipowner = !(flags & IOQ_ITER_NOFLIPOWNER);
+ iter->pos = -1;
+ iter->desc = NULL;
+ iter->dualidx = 0;
+
+ if (type == ioq_idxtype_both) {
+ /*
+ * "both" is a special case, so we set the dualidx flag.
+ *
+ * However, we also just want to use the valid-index
+ * for normal processing, so override that here
+ */
+ type = ioq_idxtype_valid;
+ iter->dualidx = 1;
+ }
+
+ iter->idx = idxtype_to_idx(ioq, type);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_init);
+
+int ioq_count(struct ioq *ioq, enum ioq_idx_type type)
+{
+ return ioq_ring_count(idxtype_to_idx(ioq, type), ioq->count);
+}
+EXPORT_SYMBOL_GPL(ioq_count);
+
+int ioq_remain(struct ioq *ioq, enum ioq_idx_type type)
+{
+ int count = ioq_ring_count(idxtype_to_idx(ioq, type), ioq->count);
+
+ return ioq->count - count;
+}
+EXPORT_SYMBOL_GPL(ioq_remain);
+
+int ioq_size(struct ioq *ioq)
+{
+ return ioq->count;
+}
+EXPORT_SYMBOL_GPL(ioq_size);
+
+int ioq_full(struct ioq *ioq, enum ioq_idx_type type)
+{
+ struct ioq_ring_idx *idx = idxtype_to_idx(ioq, type);
+
+ return idx_full(idx);
+}
+EXPORT_SYMBOL_GPL(ioq_full);
+
+static void ioq_shm_signal(struct shm_signal_notifier *notifier)
+{
+ struct ioq *ioq = container_of(notifier, struct ioq, shm_notifier);
+
+ if (waitqueue_active(&ioq->wq))
+ wake_up(&ioq->wq);
+
+ if (ioq->notifier)
+ ioq->notifier->signal(ioq->notifier);
+}
+
+void ioq_init(struct ioq *ioq,
+ struct ioq_ops *ops,
+ enum ioq_locality locale,
+ struct ioq_ring_head *head,
+ struct shm_signal *signal,
+ size_t count)
+{
+ memset(ioq, 0, sizeof(*ioq));
+ kref_init(&ioq->kref);
+ init_waitqueue_head(&ioq->wq);
+
+ ioq->ops = ops;
+ ioq->locale = locale;
+ ioq->head_desc = head;
+ ioq->ring = &head->ring[0];
+ ioq->count = count;
+ ioq->signal = signal;
+
+ ioq->shm_notifier.signal = &ioq_shm_signal;
+ signal->notifier = &ioq->shm_notifier;
+}
+EXPORT_SYMBOL_GPL(ioq_init);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index e2aa3be29858..15e10b1afdd2 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -44,6 +44,11 @@ static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
else
root->rb_node = right;
rb_set_parent(node, right);
+
+ if (root->augment_cb) {
+ root->augment_cb(node);
+ root->augment_cb(right);
+ }
}
static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
@@ -67,12 +72,20 @@ static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
else
root->rb_node = left;
rb_set_parent(node, left);
+
+ if (root->augment_cb) {
+ root->augment_cb(node);
+ root->augment_cb(left);
+ }
}
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
struct rb_node *parent, *gparent;
+ if (root->augment_cb)
+ root->augment_cb(node);
+
while ((parent = rb_parent(node)) && rb_is_red(parent))
{
gparent = rb_parent(parent);
@@ -227,12 +240,15 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
else
{
struct rb_node *old = node, *left;
+ int old_parent_cb = 0;
+ int successor_parent_cb = 0;
node = node->rb_right;
while ((left = node->rb_left) != NULL)
node = left;
if (rb_parent(old)) {
+ old_parent_cb = 1;
if (rb_parent(old)->rb_left == old)
rb_parent(old)->rb_left = node;
else
@@ -247,8 +263,10 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
if (parent == old) {
parent = node;
} else {
+ successor_parent_cb = 1;
if (child)
rb_set_parent(child, parent);
+
parent->rb_left = child;
node->rb_right = old->rb_right;
@@ -259,6 +277,24 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
node->rb_left = old->rb_left;
rb_set_parent(old->rb_left, node);
+ if (root->augment_cb) {
+ /*
+ * Here, three different nodes can have new children.
+ * The parent of the successor node that was selected
+ * to replace the node to be erased.
+ * The node that is getting erased and is now replaced
+ * by its successor.
+ * The parent of the node getting erased-replaced.
+ */
+ if (successor_parent_cb)
+ root->augment_cb(parent);
+
+ root->augment_cb(node);
+
+ if (old_parent_cb)
+ root->augment_cb(rb_parent(old));
+ }
+
goto color;
}
@@ -267,15 +303,19 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
if (child)
rb_set_parent(child, parent);
- if (parent)
- {
+
+ if (parent) {
if (parent->rb_left == node)
parent->rb_left = child;
else
parent->rb_right = child;
- }
- else
+
+ if (root->augment_cb)
+ root->augment_cb(parent);
+
+ } else {
root->rb_node = child;
+ }
color:
if (color == RB_BLACK)
diff --git a/lib/shm_signal.c b/lib/shm_signal.c
new file mode 100644
index 000000000000..8d3e9b418a27
--- /dev/null
+++ b/lib/shm_signal.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * See include/linux/shm_signal.h for documentation
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/shm_signal.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+int shm_signal_enable(struct shm_signal *s, int flags)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+ unsigned long iflags;
+
+ spin_lock_irqsave(&s->lock, iflags);
+
+ irq->enabled = 1;
+ wmb();
+
+ if ((irq->dirty || irq->pending)
+ && !test_bit(shm_signal_in_wakeup, &s->flags)) {
+ rmb();
+ tasklet_schedule(&s->deferred_notify);
+ }
+
+ spin_unlock_irqrestore(&s->lock, iflags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_enable);
+
+int shm_signal_disable(struct shm_signal *s, int flags)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+
+ irq->enabled = 0;
+ wmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_disable);
+
+/*
+ * signaling protocol:
+ *
+ * each side of the shm_signal has an "irq" structure with the following
+ * fields:
+ *
+ * - enabled: controlled by shm_signal_enable/disable() to mask/unmask
+ * the notification locally
+ * - dirty: indicates if the shared-memory is dirty or clean. This
+ * is updated regardless of the enabled/pending state so that
+ * the state is always accurately tracked.
+ * - pending: indicates if a signal is pending to the remote locale.
+ * This allows us to determine if a remote-notification is
+ * already in flight to optimize spurious notifications away.
+ */
+int shm_signal_inject(struct shm_signal *s, int flags)
+{
+ /* Load the irq structure from the other locale */
+ struct shm_signal_irq *irq = &s->desc->irq[!s->locale];
+
+ /*
+ * We always mark the remote side as dirty regardless of whether
+ * they need to be notified.
+ */
+ irq->dirty = 1;
+ wmb(); /* dirty must be visible before we test the pending state */
+
+ if (irq->enabled && !irq->pending) {
+ rmb();
+
+ /*
+ * If the remote side has enabled notifications, and we do
+ * not see a notification pending, we must inject a new one.
+ */
+ irq->pending = 1;
+ wmb(); /* make it visible before we do the injection */
+
+ s->ops->inject(s);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_inject);
+
+void _shm_signal_wakeup(struct shm_signal *s)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+ int dirty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+
+ __set_bit(shm_signal_in_wakeup, &s->flags);
+
+ /*
+ * The outer loop protects against race conditions between
+ * irq->dirty and irq->pending updates
+ */
+ while (irq->enabled && (irq->dirty || irq->pending)) {
+
+ /*
+ * Run until we completely exhaust irq->dirty (it may
+ * be re-dirtied by the remote side while we are in the
+ * callback). We let "pending" remain untouched until we have
+ * processed them all so that the remote side knows we do not
+ * need a new notification (yet).
+ */
+ do {
+ irq->dirty = 0;
+ /* the unlock is an implicit wmb() for dirty = 0 */
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ if (s->notifier)
+ s->notifier->signal(s->notifier);
+
+ spin_lock_irqsave(&s->lock, flags);
+ dirty = irq->dirty;
+ rmb();
+
+ } while (irq->enabled && dirty);
+
+ barrier();
+
+ /*
+ * We can finally acknowledge the notification by clearing
+ * "pending" after all of the dirty memory has been processed
+ * Races against this clearing are handled by the outer loop.
+ * Subsequent iterations of this loop will execute with
+ * pending=0 potentially leading to future spurious
+ * notifications, but this is an acceptable tradeoff as this
+ * will be rare and harmless.
+ */
+ irq->pending = 0;
+ wmb();
+
+ }
+
+ __clear_bit(shm_signal_in_wakeup, &s->flags);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+}
+EXPORT_SYMBOL_GPL(_shm_signal_wakeup);
+
+void _shm_signal_release(struct kref *kref)
+{
+ struct shm_signal *s = container_of(kref, struct shm_signal, kref);
+
+ s->ops->release(s);
+}
+EXPORT_SYMBOL_GPL(_shm_signal_release);
+
+static void
+deferred_notify(unsigned long data)
+{
+ struct shm_signal *s = (struct shm_signal *)data;
+
+ _shm_signal_wakeup(s);
+}
+
+void shm_signal_init(struct shm_signal *s, enum shm_signal_locality locale,
+ struct shm_signal_ops *ops, struct shm_signal_desc *desc)
+{
+ memset(s, 0, sizeof(*s));
+ kref_init(&s->kref);
+ spin_lock_init(&s->lock);
+ tasklet_init(&s->deferred_notify,
+ deferred_notify,
+ (unsigned long)s);
+ s->locale = locale;
+ s->ops = ops;
+ s->desc = desc;
+}
+EXPORT_SYMBOL_GPL(shm_signal_init);
diff --git a/lib/uuid.c b/lib/uuid.c
new file mode 100644
index 000000000000..8fadd7cef46c
--- /dev/null
+++ b/lib/uuid.c
@@ -0,0 +1,53 @@
+/*
+ * Unified UUID/GUID definition
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uuid.h>
+#include <linux/random.h>
+
+static void __uuid_gen_common(__u8 b[16])
+{
+ int i;
+ u32 r;
+
+ for (i = 0; i < 4; i++) {
+ r = random32();
+ memcpy(b + i * 4, &r, 4);
+ }
+ /* reversion 0b10 */
+ b[8] = (b[8] & 0x3F) | 0x80;
+}
+
+void uuid_le_gen(uuid_le *lu)
+{
+ __uuid_gen_common(lu->b);
+ /* version 4 : random generation */
+ lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
+}
+EXPORT_SYMBOL_GPL(uuid_le_gen);
+
+void uuid_be_gen(uuid_be *bu)
+{
+ __uuid_gen_common(bu->b);
+ /* version 4 : random generation */
+ bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
+}
+EXPORT_SYMBOL_GPL(uuid_be_gen);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0f711c213d2e..e29a70536668 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1440,7 +1440,7 @@ static void drain_local_stock(struct work_struct *dummy)
/*
* Cache charges(val) which is from res_counter, to local per_cpu area.
- * This will be consumed by consumt_stock() function, later.
+ * This will be consumed by consume_stock() function, later.
*/
static void refill_stock(struct mem_cgroup *mem, int val)
{
diff --git a/mm/mlock.c b/mm/mlock.c
index 8f4e2dfceec1..3f82720e0515 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -607,44 +607,3 @@ void user_shm_unlock(size_t size, struct user_struct *user)
spin_unlock(&shmlock_user_lock);
free_uid(user);
}
-
-int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
- size_t size)
-{
- unsigned long lim, vm, pgsz;
- int error = -ENOMEM;
-
- pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- down_write(&mm->mmap_sem);
-
- lim = ACCESS_ONCE(rlim[RLIMIT_AS].rlim_cur) >> PAGE_SHIFT;
- vm = mm->total_vm + pgsz;
- if (lim < vm)
- goto out;
-
- lim = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur) >> PAGE_SHIFT;
- vm = mm->locked_vm + pgsz;
- if (lim < vm)
- goto out;
-
- mm->total_vm += pgsz;
- mm->locked_vm += pgsz;
-
- error = 0;
- out:
- up_write(&mm->mmap_sem);
- return error;
-}
-
-void refund_locked_memory(struct mm_struct *mm, size_t size)
-{
- unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- down_write(&mm->mmap_sem);
-
- mm->total_vm -= pgsz;
- mm->locked_vm -= pgsz;
-
- up_write(&mm->mmap_sem);
-}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0b19943ecf8b..bdbac1502f9e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -683,10 +683,6 @@ void throttle_vm_writeout(gfp_t gfp_mask)
}
}
-static void laptop_timer_fn(unsigned long unused);
-
-static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
-
/*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
@@ -697,31 +693,14 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write,
return 0;
}
-static void do_laptop_sync(struct work_struct *work)
-{
- wakeup_flusher_threads(0);
- kfree(work);
-}
-
-static void laptop_timer_fn(unsigned long unused)
-{
- struct work_struct *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK(work, do_laptop_sync);
- schedule_work(work);
- }
-}
-
/*
* We've spun up the disk and we're in laptop mode: schedule writeback
* of all dirty data a few seconds from now. If the flush is already scheduled
* then push it back - the user is still using the disk.
*/
-void laptop_io_completion(void)
+void laptop_io_completion(struct backing_dev_info *info)
{
- mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
+ mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
}
/*
@@ -731,7 +710,14 @@ void laptop_io_completion(void)
*/
void laptop_sync_completion(void)
{
- del_timer(&laptop_mode_wb_timer);
+ struct backing_dev_info *bdi;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
+ del_timer(&bdi->laptop_mode_wb_timer);
+
+ rcu_read_unlock();
}
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d03c946d5566..a6326c71b663 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2579,7 +2579,7 @@ static int default_zonelist_order(void)
struct zone *z;
int average_size;
/*
- * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
+ * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
* If they are really small and used heavily, the system can fall
* into OOM very easily.
* This function detect ZONE_DMA/DMA32 size and confgigures zone order.
diff --git a/mm/slab.c b/mm/slab.c
index bac0f4fcc216..5a9cfcdaed22 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -115,6 +115,7 @@
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
#include <linux/kmemcheck.h>
+#include <linux/memory.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -1102,6 +1103,52 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
}
#endif
+/*
+ * Allocates and initializes nodelists for a node on each slab cache, used for
+ * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3
+ * will be allocated off-node since memory is not yet online for the new node.
+ * When hotplugging memory or a cpu, existing nodelists are not replaced if
+ * already in use.
+ *
+ * Must hold cache_chain_mutex.
+ */
+static int init_cache_nodelists_node(int node)
+{
+ struct kmem_cache *cachep;
+ struct kmem_list3 *l3;
+ const int memsize = sizeof(struct kmem_list3);
+
+ list_for_each_entry(cachep, &cache_chain, next) {
+ /*
+ * Set up the size64 kmemlist for cpu before we can
+ * begin anything. Make sure some other cpu on this
+ * node has not already allocated this
+ */
+ if (!cachep->nodelists[node]) {
+ l3 = kmalloc_node(memsize, GFP_KERNEL, node);
+ if (!l3)
+ return -ENOMEM;
+ kmem_list3_init(l3);
+ l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+
+ /*
+ * The l3s don't come and go as CPUs come and
+ * go. cache_chain_mutex is sufficient
+ * protection here.
+ */
+ cachep->nodelists[node] = l3;
+ }
+
+ spin_lock_irq(&cachep->nodelists[node]->list_lock);
+ cachep->nodelists[node]->free_limit =
+ (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+ spin_unlock_irq(&cachep->nodelists[node]->list_lock);
+ }
+ return 0;
+}
+
static void __cpuinit cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
@@ -1172,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu)
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
- const int memsize = sizeof(struct kmem_list3);
+ int err;
/*
* We need to do this right in the beginning since
@@ -1180,35 +1227,9 @@ static int __cpuinit cpuup_prepare(long cpu)
* kmalloc_node allows us to add the slab to the right
* kmem_list3 and not this cpu's kmem_list3
*/
-
- list_for_each_entry(cachep, &cache_chain, next) {
- /*
- * Set up the size64 kmemlist for cpu before we can
- * begin anything. Make sure some other cpu on this
- * node has not already allocated this
- */
- if (!cachep->nodelists[node]) {
- l3 = kmalloc_node(memsize, GFP_KERNEL, node);
- if (!l3)
- goto bad;
- kmem_list3_init(l3);
- l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
-
- /*
- * The l3s don't come and go as CPUs come and
- * go. cache_chain_mutex is sufficient
- * protection here.
- */
- cachep->nodelists[node] = l3;
- }
-
- spin_lock_irq(&cachep->nodelists[node]->list_lock);
- cachep->nodelists[node]->free_limit =
- (1 + nr_cpus_node(node)) *
- cachep->batchcount + cachep->num;
- spin_unlock_irq(&cachep->nodelists[node]->list_lock);
- }
+ err = init_cache_nodelists_node(node);
+ if (err < 0)
+ goto bad;
/*
* Now we can go ahead with allocating the shared arrays and
@@ -1331,11 +1352,75 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
&cpuup_callback, NULL, 0
};
+#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+/*
+ * Drains freelist for a node on each slab cache, used for memory hot-remove.
+ * Returns -EBUSY if all objects cannot be drained so that the node is not
+ * removed.
+ *
+ * Must hold cache_chain_mutex.
+ */
+static int __meminit drain_cache_nodelists_node(int node)
+{
+ struct kmem_cache *cachep;
+ int ret = 0;
+
+ list_for_each_entry(cachep, &cache_chain, next) {
+ struct kmem_list3 *l3;
+
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+
+ drain_freelist(cachep, l3, l3->free_objects);
+
+ if (!list_empty(&l3->slabs_full) ||
+ !list_empty(&l3->slabs_partial)) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int __meminit slab_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mnb = arg;
+ int ret = 0;
+ int nid;
+
+ nid = mnb->status_change_nid;
+ if (nid < 0)
+ goto out;
+
+ switch (action) {
+ case MEM_GOING_ONLINE:
+ mutex_lock(&cache_chain_mutex);
+ ret = init_cache_nodelists_node(nid);
+ mutex_unlock(&cache_chain_mutex);
+ break;
+ case MEM_GOING_OFFLINE:
+ mutex_lock(&cache_chain_mutex);
+ ret = drain_cache_nodelists_node(nid);
+ mutex_unlock(&cache_chain_mutex);
+ break;
+ case MEM_ONLINE:
+ case MEM_OFFLINE:
+ case MEM_CANCEL_ONLINE:
+ case MEM_CANCEL_OFFLINE:
+ break;
+ }
+out:
+ return ret ? notifier_from_errno(ret) : NOTIFY_OK;
+}
+#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
+
/*
* swap the static kmem_list3 with kmalloced memory
*/
-static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
- int nodeid)
+static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
+ int nodeid)
{
struct kmem_list3 *ptr;
@@ -1580,6 +1665,14 @@ void __init kmem_cache_init_late(void)
*/
register_cpu_notifier(&cpucache_notifier);
+#ifdef CONFIG_NUMA
+ /*
+ * Register a memory hotplug callback that initializes and frees
+ * nodelists.
+ */
+ hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
+#endif
+
/*
* The reap timers are started later, with a module init call: That part
* of the kernel is not yet operational.
@@ -2220,8 +2313,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (ralign < align) {
ralign = align;
}
- /* disable debug if necessary */
- if (ralign > __alignof__(unsigned long long))
+ /* disable debug if not aligning with REDZONE_ALIGN */
+ if (ralign & (__alignof__(unsigned long long) - 1))
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
@@ -2247,8 +2340,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/
if (flags & SLAB_RED_ZONE) {
/* add space for red zone words */
- cachep->obj_offset += sizeof(unsigned long long);
- size += 2 * sizeof(unsigned long long);
+ cachep->obj_offset += align;
+ size += align + sizeof(unsigned long long);
}
if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of
@@ -4216,10 +4309,11 @@ static int s_show(struct seq_file *m, void *p)
unsigned long node_frees = cachep->node_frees;
unsigned long overflows = cachep->node_overflow;
- seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
- %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
- reaped, errors, max_freeable, node_allocs,
- node_frees, overflows);
+ seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
+ "%4lu %4lu %4lu %4lu %4lu",
+ allocs, high, grown,
+ reaped, errors, max_freeable, node_allocs,
+ node_frees, overflows);
}
/* cpu stats */
{
diff --git a/mm/slub.c b/mm/slub.c
index d2a54fe71ea2..2cdd235cb801 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1084,7 +1084,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
if (node == -1)
return alloc_pages(flags, order);
else
- return alloc_pages_node(node, flags, order);
+ return alloc_pages_exact_node(node, flags, order);
}
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -2429,9 +2429,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
void *p;
- DECLARE_BITMAP(map, page->objects);
+ long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long),
+ GFP_ATOMIC);
- bitmap_zero(map, page->objects);
+ if (!map)
+ return;
slab_err(s, page, "%s", text);
slab_lock(page);
for_each_free_object(p, s, page->freelist)
@@ -2446,6 +2448,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
}
}
slab_unlock(page);
+ kfree(map);
#endif
}
@@ -3338,8 +3341,15 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct kmem_cache *s;
void *ret;
- if (unlikely(size > SLUB_MAX_SIZE))
- return kmalloc_large_node(size, gfpflags, node);
+ if (unlikely(size > SLUB_MAX_SIZE)) {
+ ret = kmalloc_large_node(size, gfpflags, node);
+
+ trace_kmalloc_node(caller, ret,
+ size, PAGE_SIZE << get_order(size),
+ gfpflags, node);
+
+ return ret;
+ }
s = get_slab(size, gfpflags);
@@ -3651,10 +3661,10 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
}
static void process_slab(struct loc_track *t, struct kmem_cache *s,
- struct page *page, enum track_item alloc)
+ struct page *page, enum track_item alloc,
+ long *map)
{
void *addr = page_address(page);
- DECLARE_BITMAP(map, page->objects);
void *p;
bitmap_zero(map, page->objects);
@@ -3673,11 +3683,14 @@ static int list_locations(struct kmem_cache *s, char *buf,
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
+ unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
+ sizeof(unsigned long), GFP_KERNEL);
- if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
- GFP_TEMPORARY))
+ if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
+ GFP_TEMPORARY)) {
+ kfree(map);
return sprintf(buf, "Out of memory\n");
-
+ }
/* Push back cpu slabs */
flush_all(s);
@@ -3691,9 +3704,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
- process_slab(&t, s, page, alloc);
+ process_slab(&t, s, page, alloc, map);
list_for_each_entry(page, &n->full, lru)
- process_slab(&t, s, page, alloc);
+ process_slab(&t, s, page, alloc, map);
spin_unlock_irqrestore(&n->list_lock, flags);
}
@@ -3744,6 +3757,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
}
free_loc_track(&t);
+ kfree(map);
if (!t.count)
len += sprintf(buf, "No data\n");
return len;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6cd0a8f90dc7..eb086e0f4dcc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -139,7 +139,8 @@ static int discard_swap(struct swap_info_struct *si)
nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
if (nr_blocks) {
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
+ nr_blocks, GFP_KERNEL,
+ BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
if (err)
return err;
cond_resched();
@@ -150,7 +151,8 @@ static int discard_swap(struct swap_info_struct *si)
nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
+ nr_blocks, GFP_KERNEL,
+ BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
if (err)
break;
@@ -189,7 +191,8 @@ static void discard_swap_cluster(struct swap_info_struct *si,
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER))
+ nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
+ BLKDEV_IFL_BARRIER))
break;
}
diff --git a/net/802/garp.c b/net/802/garp.c
index 9ed7c0e7dc17..941f2a324d3a 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -576,7 +576,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
if (!app)
goto err2;
- err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0);
+ err = dev_mc_add(dev, appl->proto.group_address);
if (err < 0)
goto err3;
@@ -616,7 +616,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
garp_pdu_queue(app);
garp_queue_xmit(app);
- dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0);
+ dev_mc_del(dev, appl->proto.group_address);
kfree(app);
garp_release_port(dev);
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 97da977c2a23..3c1c8c14e929 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -357,13 +357,13 @@ static void vlan_sync_address(struct net_device *dev,
* the new address */
if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
!compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
- dev_unicast_delete(dev, vlandev->dev_addr);
+ dev_uc_del(dev, vlandev->dev_addr);
/* vlan address was equal to the old address and is different from
* the new address */
if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
- dev_unicast_add(dev, vlandev->dev_addr);
+ dev_uc_add(dev, vlandev->dev_addr);
memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
}
@@ -533,6 +533,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
}
unregister_netdevice_many(&list);
break;
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
}
out:
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 29b6348c8d4d..b5249c5fd4d3 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -470,7 +470,7 @@ static int vlan_dev_open(struct net_device *dev)
return -ENETDOWN;
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
- err = dev_unicast_add(real_dev, dev->dev_addr);
+ err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
}
@@ -499,7 +499,7 @@ clear_allmulti:
dev_set_allmulti(real_dev, -1);
del_unicast:
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
- dev_unicast_delete(real_dev, dev->dev_addr);
+ dev_uc_del(real_dev, dev->dev_addr);
out:
netif_carrier_off(dev);
return err;
@@ -514,14 +514,14 @@ static int vlan_dev_stop(struct net_device *dev)
vlan_gvrp_request_leave(dev);
dev_mc_unsync(real_dev, dev);
- dev_unicast_unsync(real_dev, dev);
+ dev_uc_unsync(real_dev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(real_dev, -1);
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
- dev_unicast_delete(real_dev, dev->dev_addr);
+ dev_uc_del(real_dev, dev->dev_addr);
netif_carrier_off(dev);
return 0;
@@ -540,13 +540,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
goto out;
if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
- err = dev_unicast_add(real_dev, addr->sa_data);
+ err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
- dev_unicast_delete(real_dev, dev->dev_addr);
+ dev_uc_del(real_dev, dev->dev_addr);
out:
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -663,7 +663,7 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
{
dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
- dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+ dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
}
/*
diff --git a/net/9p/client.c b/net/9p/client.c
index 0aa79faa9850..37c8da07a80b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1321,7 +1321,8 @@ static int p9_client_statsize(struct p9_wstat *wst, int proto_version)
if (wst->muid)
ret += strlen(wst->muid);
- if (proto_version == p9_proto_2000u) {
+ if ((proto_version == p9_proto_2000u) ||
+ (proto_version == p9_proto_2000L)) {
ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */
if (wst->extension)
ret += strlen(wst->extension);
@@ -1364,3 +1365,70 @@ error:
return err;
}
EXPORT_SYMBOL(p9_client_wstat);
+
+int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
+{
+ int err;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+
+ err = 0;
+ clnt = fid->clnt;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
+
+ req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+
+ err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type,
+ &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
+ &sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
+ if (err) {
+ p9pdu_dump(1, req->rc);
+ p9_free_req(clnt, req);
+ goto error;
+ }
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld "
+ "blocks %llu bfree %llu bavail %llu files %llu ffree %llu "
+ "fsid %llu namelen %ld\n",
+ fid->fid, (long unsigned int)sb->type, (long int)sb->bsize,
+ sb->blocks, sb->bfree, sb->bavail, sb->files, sb->ffree,
+ sb->fsid, (long int)sb->namelen);
+
+ p9_free_req(clnt, req);
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_statfs);
+
+int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name)
+{
+ int err;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+
+ err = 0;
+ clnt = fid->clnt;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
+ fid->fid, newdirfid->fid, name);
+
+ req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid,
+ newdirfid->fid, name);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
+
+ p9_free_req(clnt, req);
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_rename);
+
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index e7541d5b0118..77d3aab4036b 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -341,7 +341,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case '?':
- if (proto_version != p9_proto_2000u)
+ if ((proto_version != p9_proto_2000u) &&
+ (proto_version != p9_proto_2000L))
return 0;
break;
default:
@@ -488,7 +489,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case '?':
- if (proto_version != p9_proto_2000u)
+ if ((proto_version != p9_proto_2000u) &&
+ (proto_version != p9_proto_2000L))
return 0;
break;
default:
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 7eb78ecc1618..dcfbe99ff81c 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq)
P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
- while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
+ while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) {
P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
@@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
req->status = REQ_STATUS_SENT;
- if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
+ if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
P9_DPRINTK(P9_DEBUG_TRANS,
"9p debug: virtio rpc add_buf returned failure");
return -EIO;
}
- chan->vq->vq_ops->kick(chan->vq);
+ virtqueue_kick(chan->vq);
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
return 0;
diff --git a/net/Kconfig b/net/Kconfig
index 041c35edb763..0d68b40fc0e6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -186,6 +186,7 @@ source "net/sctp/Kconfig"
source "net/rds/Kconfig"
source "net/tipc/Kconfig"
source "net/atm/Kconfig"
+source "net/l2tp/Kconfig"
source "net/802/Kconfig"
source "net/bridge/Kconfig"
source "net/dsa/Kconfig"
@@ -203,6 +204,11 @@ source "net/ieee802154/Kconfig"
source "net/sched/Kconfig"
source "net/dcb/Kconfig"
+config RPS
+ boolean
+ depends on SMP && SYSFS
+ default y
+
menu "Network testing"
config NET_PKTGEN
@@ -275,5 +281,7 @@ source "net/wimax/Kconfig"
source "net/rfkill/Kconfig"
source "net/9p/Kconfig"
+source "net/caif/Kconfig"
+
endif # if NET
diff --git a/net/Makefile b/net/Makefile
index 1542e7268a7b..cb7bdc1210cb 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_SUNRPC) += sunrpc/
obj-$(CONFIG_AF_RXRPC) += rxrpc/
obj-$(CONFIG_ATM) += atm/
+obj-$(CONFIG_L2TP) += l2tp/
obj-$(CONFIG_DECNET) += decnet/
obj-$(CONFIG_ECONET) += econet/
obj-$(CONFIG_PHONET) += phonet/
@@ -56,6 +57,7 @@ obj-$(CONFIG_NETLABEL) += netlabel/
obj-$(CONFIG_IUCV) += iucv/
obj-$(CONFIG_RFKILL) += rfkill/
obj-$(CONFIG_NET_9P) += 9p/
+obj-$(CONFIG_CAIF) += caif/
ifneq ($(CONFIG_DCB),)
obj-y += dcb/
endif
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 7b02967fbbe7..c410b93fda2e 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -782,7 +782,7 @@ static int atif_ioctl(int cmd, void __user *arg)
atrtr_create(&rtdef, dev);
}
}
- dev_mc_add(dev, aarp_mcast, 6, 1);
+ dev_mc_add_global(dev, aarp_mcast);
return 0;
case SIOCGIFADDR:
diff --git a/net/atm/common.c b/net/atm/common.c
index 97ed94aa0cbc..b43feb1a3995 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -90,10 +90,13 @@ static void vcc_sock_destruct(struct sock *sk)
static void vcc_def_wakeup(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up(sk->sk_sleep);
- read_unlock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up(&wq->wait);
+ rcu_read_unlock();
}
static inline int vcc_writable(struct sock *sk)
@@ -106,16 +109,19 @@ static inline int vcc_writable(struct sock *sk)
static void vcc_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (vcc_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk->sk_sleep);
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static struct proto vcc_proto = {
@@ -549,7 +555,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
}
eff = (size+3) & ~3; /* align to word boundary */
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
error = 0;
while (!(skb = alloc_tx(vcc, eff))) {
if (m->msg_flags & MSG_DONTWAIT) {
@@ -568,9 +574,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
send_sig(SIGPIPE, current, 0);
break;
}
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
skb->dev = NULL; /* for paths shared with net_device interfaces */
@@ -595,7 +601,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
struct atm_vcc *vcc;
unsigned int mask;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
vcc = ATM_SD(sock);
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 696e218436e5..6262aeae398e 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -407,7 +407,6 @@ EXPORT_SYMBOL(atm_proc_root);
int atm_proc_dev_register(struct atm_dev *dev)
{
- int digits, num;
int error;
/* No proc info */
@@ -415,16 +414,9 @@ int atm_proc_dev_register(struct atm_dev *dev)
return 0;
error = -ENOMEM;
- digits = 0;
- for (num = dev->number; num; num /= 10)
- digits++;
- if (!digits)
- digits++;
-
- dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
+ dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number);
if (!dev->proc_name)
goto err_out;
- sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
&proc_atm_dev_ops, dev);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 6ba6e466ee54..509c8ac02b63 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -131,7 +131,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
}
sk->sk_ack_backlog++;
skb_queue_tail(&sk->sk_receive_queue, skb);
- pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
+ pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
sk->sk_state_change(sk);
as_indicate_complete:
release_sock(sk);
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 3ba9a45a51ac..754ee4791d96 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -49,14 +49,14 @@ static void svc_disconnect(struct atm_vcc *vcc)
pr_debug("%p\n", vcc);
if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc, as_close, NULL, NULL, NULL);
while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
/* beware - socket is still in use by atmsigd until the last
as_indicate has been answered */
@@ -125,13 +125,13 @@ static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
}
vcc->local = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
if (!sigd) {
error = -EUNATCH;
@@ -201,10 +201,10 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
}
vcc->remote = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
if (flags & O_NONBLOCK) {
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
sock->state = SS_CONNECTING;
error = -EINPROGRESS;
goto out;
@@ -213,7 +213,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
if (!signal_pending(current)) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
continue;
}
@@ -232,14 +232,14 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
*/
sigd_enq(vcc, as_close, NULL, NULL, NULL);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
schedule();
}
if (!sk->sk_err)
while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
sigd) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
schedule();
}
@@ -250,7 +250,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
error = -EINTR;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
if (!sigd) {
@@ -302,13 +302,13 @@ static int svc_listen(struct socket *sock, int backlog)
goto out;
}
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
@@ -343,7 +343,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
while (1) {
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
sigd) {
if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
@@ -363,10 +363,10 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
error = -ERESTARTSYS;
break;
}
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
if (!skb) {
@@ -392,17 +392,17 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
}
/* wait should be short, so we ignore the non-blocking flag */
set_bit(ATM_VF_WAITING, &new_vcc->flags);
- prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
TASK_UNINTERRUPTIBLE);
sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
release_sock(sk);
schedule();
lock_sock(sk);
- prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk_atm(new_vcc)), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
@@ -438,14 +438,14 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
DEFINE_WAIT(wait);
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (!sigd)
return -EUNATCH;
return -sk->sk_err;
@@ -534,20 +534,20 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sigd_enq(vcc, as_addparty, NULL, NULL,
(struct sockaddr_atmsvc *) sockaddr);
if (flags & O_NONBLOCK) {
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
error = -EINPROGRESS;
goto out;
}
pr_debug("added wait queue\n");
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
error = xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
@@ -563,13 +563,13 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 65c5801261f9..cfdfd7e2a172 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1281,7 +1281,7 @@ static int __must_check ax25_connect(struct socket *sock,
DEFINE_WAIT(wait);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
@@ -1294,7 +1294,7 @@ static int __must_check ax25_connect(struct socket *sock,
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
@@ -1346,7 +1346,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
* hooked into the SABM we saved
*/
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -1364,7 +1364,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out;
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c133..ee3b3049d385 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -43,6 +43,19 @@ config BT_L2CAP
Say Y here to compile L2CAP support into the kernel or say M to
compile it as module (l2cap).
+config BT_L2CAP_EXT_FEATURES
+ bool "L2CAP Extended Features support (EXPERIMENTAL)"
+ depends on BT_L2CAP && EXPERIMENTAL
+ help
+ This option enables the L2CAP Extended Features support. These
+ new features include the Enhanced Retransmission and Streaming
+ Modes, the Frame Check Sequence (FCS), and Segmentation and
+ Reassembly (SAR) for L2CAP packets. They are a required for the
+ new Alternate MAC/PHY and the Bluetooth Medical Profile.
+
+ You should say N unless you know what you are doing. Note that
+ this is in an experimental state yet.
+
config BT_SCO
tristate "SCO links support"
depends on BT
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 404a8500fd03..421c45bd1b95 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -288,7 +288,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
BT_DBG("sock %p, sk %p", sock, sk);
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == BT_LISTEN)
return bt_accept_poll(sk);
@@ -378,7 +378,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
BT_DBG("sk %p", sk);
- add_wait_queue(sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk), &wait);
while (sk->sk_state != state) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -401,7 +401,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
break;
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
EXPORT_SYMBOL(bt_sock_wait_state);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 8062dad6d10d..f10b41fb05a0 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -474,7 +474,7 @@ static int bnep_session(void *arg)
set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
- add_wait_queue(sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk), &wait);
while (!atomic_read(&s->killed)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -496,7 +496,7 @@ static int bnep_session(void *arg)
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
down_write(&bnep_session_sem);
@@ -507,7 +507,7 @@ static int bnep_session(void *arg)
/* Wakeup user-space polling for socket errors */
s->sock->sk->sk_err = EUNATCH;
- wake_up_interruptible(s->sock->sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(s->sock->sk));
/* Release the socket */
fput(s->sock->file);
@@ -638,7 +638,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
/* Kill session thread */
atomic_inc(&s->killed);
- wake_up_interruptible(s->sock->sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(s->sock->sk));
} else
err = -ENOENT;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 5643a2391e76..0faad5ce6dc4 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -88,7 +88,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
r->len = htons(ETH_ALEN * 2);
} else {
- struct dev_mc_list *dmi = dev->mc_list;
+ struct netdev_hw_addr *ha;
int i, len = skb->len;
if (dev->flags & IFF_BROADCAST) {
@@ -98,18 +98,18 @@ static void bnep_net_set_mc_list(struct net_device *dev)
/* FIXME: We should group addresses here. */
- for (i = 0;
- i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS;
- i++) {
- memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
- memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
- dmi = dmi->next;
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i == BNEP_MAX_MULTICAST_FILTERS)
+ break;
+ memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
+ memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
}
r->len = htons(skb->len - len);
}
skb_queue_tail(&sk->sk_write_queue, skb);
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
#endif
}
@@ -193,11 +193,11 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
/*
* We cannot send L2CAP packets from here as we are potentially in a bh.
* So we have to queue them and wake up session thread which is sleeping
- * on the sk->sk_sleep.
+ * on the sk_sleep(sk).
*/
dev->trans_start = jiffies;
skb_queue_tail(&sk->sk_write_queue, skb);
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
BT_DBG("tx queue is full");
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index e4663aa14d26..785e79e953c5 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -125,7 +125,7 @@ static inline void cmtp_schedule(struct cmtp_session *session)
{
struct sock *sk = session->sock->sk;
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
}
/* CMTP init defines */
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 0073ec8495da..d4c6af082d48 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -284,7 +284,7 @@ static int cmtp_session(void *arg)
set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
- add_wait_queue(sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk), &wait);
while (!atomic_read(&session->terminate)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -301,7 +301,7 @@ static int cmtp_session(void *arg)
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 4ad23192c7a5..5e83f8e0877a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -37,6 +37,7 @@
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/rfkill.h>
@@ -928,6 +929,10 @@ int hci_register_dev(struct hci_dev *hdev)
write_unlock_bh(&hci_dev_list_lock);
+ hdev->workqueue = create_singlethread_workqueue(hdev->name);
+ if (!hdev->workqueue)
+ goto nomem;
+
hci_register_sysfs(hdev);
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -942,6 +947,13 @@ int hci_register_dev(struct hci_dev *hdev)
hci_notify(hdev, HCI_DEV_REG);
return id;
+
+nomem:
+ write_lock_bh(&hci_dev_list_lock);
+ list_del(&hdev->list);
+ write_unlock_bh(&hci_dev_list_lock);
+
+ return -ENOMEM;
}
EXPORT_SYMBOL(hci_register_dev);
@@ -970,6 +982,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
hci_unregister_sysfs(hdev);
+ destroy_workqueue(hdev->workqueue);
+
__hci_dev_put(hdev);
return 0;
@@ -1260,7 +1274,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
hdr->dlen = cpu_to_le16(len);
}
-int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
@@ -1303,23 +1317,18 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
tasklet_schedule(&hdev->tx_task);
- return 0;
+ return;
}
EXPORT_SYMBOL(hci_send_acl);
/* Send SCO data */
-int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
+void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
{
struct hci_dev *hdev = conn->hdev;
struct hci_sco_hdr hdr;
BT_DBG("%s len %d", hdev->name, skb->len);
- if (skb->len > hdev->sco_mtu) {
- kfree_skb(skb);
- return -EINVAL;
- }
-
hdr.handle = cpu_to_le16(conn->handle);
hdr.dlen = skb->len;
@@ -1332,8 +1341,6 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb_queue_tail(&conn->data_q, skb);
tasklet_schedule(&hdev->tx_task);
-
- return 0;
}
EXPORT_SYMBOL(hci_send_sco);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 0e8e1a59856c..463ffa4fe042 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -14,8 +14,6 @@ static struct class *bt_class;
struct dentry *bt_debugfs = NULL;
EXPORT_SYMBOL_GPL(bt_debugfs);
-static struct workqueue_struct *bt_workq;
-
static inline char *link_typetostr(int type)
{
switch (type) {
@@ -161,14 +159,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
- queue_work(bt_workq, &conn->work_add);
+ queue_work(conn->hdev->workqueue, &conn->work_add);
}
void hci_conn_del_sysfs(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
- queue_work(bt_workq, &conn->work_del);
+ queue_work(conn->hdev->workqueue, &conn->work_del);
}
static inline char *host_bustostr(int bus)
@@ -283,11 +281,9 @@ static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *at
static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
- char *ptr;
- __u32 val;
+ unsigned long val;
- val = simple_strtoul(buf, &ptr, 10);
- if (ptr == buf)
+ if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val != 0 && (val < 500 || val > 3600000))
@@ -307,11 +303,9 @@ static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribu
static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
- char *ptr;
- __u16 val;
+ unsigned long val;
- val = simple_strtoul(buf, &ptr, 10);
- if (ptr == buf)
+ if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val < 0x0002 || val > 0xFFFE || val % 2)
@@ -334,11 +328,9 @@ static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribu
static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
- char *ptr;
- __u16 val;
+ unsigned long val;
- val = simple_strtoul(buf, &ptr, 10);
- if (ptr == buf)
+ if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val < 0x0002 || val > 0xFFFE || val % 2)
@@ -487,17 +479,11 @@ void hci_unregister_sysfs(struct hci_dev *hdev)
int __init bt_sysfs_init(void)
{
- bt_workq = create_singlethread_workqueue("bluetooth");
- if (!bt_workq)
- return -ENOMEM;
-
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
bt_class = class_create(THIS_MODULE, "bluetooth");
- if (IS_ERR(bt_class)) {
- destroy_workqueue(bt_workq);
+ if (IS_ERR(bt_class))
return PTR_ERR(bt_class);
- }
return 0;
}
@@ -507,6 +493,4 @@ void bt_sysfs_cleanup(void)
class_destroy(bt_class);
debugfs_remove_recursive(bt_debugfs);
-
- destroy_workqueue(bt_workq);
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 280529ad9274..bfe641b7dfaf 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -561,8 +561,8 @@ static int hidp_session(void *arg)
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
- add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
- add_wait_queue(intr_sk->sk_sleep, &intr_wait);
+ add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
+ add_wait_queue(sk_sleep(intr_sk), &intr_wait);
while (!atomic_read(&session->terminate)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -584,8 +584,8 @@ static int hidp_session(void *arg)
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(intr_sk->sk_sleep, &intr_wait);
- remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
+ remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
+ remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
down_write(&hidp_session_sem);
@@ -609,7 +609,7 @@ static int hidp_session(void *arg)
fput(session->intr_sock->file);
- wait_event_timeout(*(ctrl_sk->sk_sleep),
+ wait_event_timeout(*(sk_sleep(ctrl_sk)),
(ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
fput(session->ctrl_sock->file);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index a4e215d50c10..8d934a19da0a 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -164,8 +164,8 @@ static inline void hidp_schedule(struct hidp_session *session)
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
- wake_up_interruptible(ctrl_sk->sk_sleep);
- wake_up_interruptible(intr_sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(ctrl_sk));
+ wake_up_interruptible(sk_sleep(intr_sk));
}
/* HIDP init defines */
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 9753b690a8b3..673a36886716 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -55,18 +55,27 @@
#define VERSION "2.14"
+#ifdef CONFIG_BT_L2CAP_EXT_FEATURES
+static int enable_ertm = 1;
+#else
static int enable_ertm = 0;
+#endif
static int max_transmit = L2CAP_DEFAULT_MAX_TX;
+static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
static u8 l2cap_fixed_chan[8] = { 0x02, };
static const struct proto_ops l2cap_sock_ops;
+static struct workqueue_struct *_busy_wq;
+
static struct bt_sock_list l2cap_sk_list = {
.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
};
+static void l2cap_busy_work(struct work_struct *work);
+
static void __l2cap_sock_close(struct sock *sk, int reason);
static void l2cap_sock_close(struct sock *sk);
static void l2cap_sock_kill(struct sock *sk);
@@ -219,7 +228,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
l2cap_pi(sk)->conn = conn;
- if (sk->sk_type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
/* Alloc CID for connection-oriented socket */
l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
} else if (sk->sk_type == SOCK_DGRAM) {
@@ -325,19 +334,19 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
return id;
}
-static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
{
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
BT_DBG("code 0x%2.2x", code);
if (!skb)
- return -ENOMEM;
+ return;
- return hci_send_acl(conn->hcon, skb, 0);
+ hci_send_acl(conn->hcon, skb, 0);
}
-static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
+static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
@@ -352,9 +361,19 @@ static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
count = min_t(unsigned int, conn->mtu, hlen);
control |= L2CAP_CTRL_FRAME_TYPE;
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ control |= L2CAP_CTRL_FINAL;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
+
+ if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
+ control |= L2CAP_CTRL_POLL;
+ pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
+ }
+
skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
- return -ENOMEM;
+ return;
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
@@ -366,19 +385,20 @@ static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
put_unaligned_le16(fcs, skb_put(skb, 2));
}
- return hci_send_acl(pi->conn->hcon, skb, 0);
+ hci_send_acl(pi->conn->hcon, skb, 0);
}
-static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
+static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
{
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
control |= L2CAP_SUPER_RCV_NOT_READY;
- else
+ pi->conn_state |= L2CAP_CONN_RNR_SENT;
+ } else
control |= L2CAP_SUPER_RCV_READY;
control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- return l2cap_send_sframe(pi, control);
+ l2cap_send_sframe(pi, control);
}
static void l2cap_do_start(struct sock *sk)
@@ -437,7 +457,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->sk_type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
bh_unlock_sock(sk);
continue;
}
@@ -497,7 +518,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->sk_type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
l2cap_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
@@ -706,7 +728,8 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
case BT_CONNECTED:
case BT_CONFIG:
- if (sk->sk_type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
sk->sk_state = BT_DISCONN;
@@ -717,7 +740,8 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
break;
case BT_CONNECT2:
- if (sk->sk_type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct l2cap_conn_rsp rsp;
__u16 result;
@@ -772,14 +796,21 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
pi->omtu = l2cap_pi(parent)->omtu;
pi->mode = l2cap_pi(parent)->mode;
pi->fcs = l2cap_pi(parent)->fcs;
+ pi->max_tx = l2cap_pi(parent)->max_tx;
+ pi->tx_win = l2cap_pi(parent)->tx_win;
pi->sec_level = l2cap_pi(parent)->sec_level;
pi->role_switch = l2cap_pi(parent)->role_switch;
pi->force_reliable = l2cap_pi(parent)->force_reliable;
} else {
pi->imtu = L2CAP_DEFAULT_MTU;
pi->omtu = 0;
- pi->mode = L2CAP_MODE_BASIC;
+ if (enable_ertm && sk->sk_type == SOCK_STREAM)
+ pi->mode = L2CAP_MODE_ERTM;
+ else
+ pi->mode = L2CAP_MODE_BASIC;
+ pi->max_tx = max_transmit;
pi->fcs = L2CAP_FCS_CRC16;
+ pi->tx_win = tx_window;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
pi->force_reliable = 0;
@@ -790,6 +821,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
skb_queue_head_init(TX_QUEUE(sk));
skb_queue_head_init(SREJ_QUEUE(sk));
+ skb_queue_head_init(BUSY_QUEUE(sk));
INIT_LIST_HEAD(SREJ_LIST(sk));
}
@@ -833,7 +865,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
sock->state = SS_UNCONNECTED;
- if (sock->type != SOCK_SEQPACKET &&
+ if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
@@ -981,7 +1013,8 @@ static int l2cap_do_connect(struct sock *sk)
l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
if (hcon->state == BT_CONNECTED) {
- if (sk->sk_type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
l2cap_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
} else
@@ -1015,7 +1048,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
lock_sock(sk);
- if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
+ if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+ && !la.l2_psm) {
err = -EINVAL;
goto done;
}
@@ -1079,7 +1113,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+ || sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
@@ -1147,7 +1182,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -1170,7 +1205,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
@@ -1207,10 +1242,40 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
return 0;
}
+static int __l2cap_wait_ack(struct sock *sk)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+ int timeo = HZ/5;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (!timeo)
+ timeo = HZ/5;
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+}
+
static void l2cap_monitor_timeout(unsigned long arg)
{
struct sock *sk = (void *) arg;
- u16 control;
bh_lock_sock(sk);
if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
@@ -1222,15 +1287,13 @@ static void l2cap_monitor_timeout(unsigned long arg)
l2cap_pi(sk)->retry_count++;
__mod_monitor_timer();
- control = L2CAP_CTRL_POLL;
- l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
+ l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
bh_unlock_sock(sk);
}
static void l2cap_retrans_timeout(unsigned long arg)
{
struct sock *sk = (void *) arg;
- u16 control;
bh_lock_sock(sk);
l2cap_pi(sk)->retry_count = 1;
@@ -1238,8 +1301,7 @@ static void l2cap_retrans_timeout(unsigned long arg)
l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
- control = L2CAP_CTRL_POLL;
- l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
+ l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
bh_unlock_sock(sk);
}
@@ -1247,7 +1309,8 @@ static void l2cap_drop_acked_frames(struct sock *sk)
{
struct sk_buff *skb;
- while ((skb = skb_peek(TX_QUEUE(sk)))) {
+ while ((skb = skb_peek(TX_QUEUE(sk))) &&
+ l2cap_pi(sk)->unacked_frames) {
if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
break;
@@ -1263,18 +1326,13 @@ static void l2cap_drop_acked_frames(struct sock *sk)
return;
}
-static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
+static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
- int err;
BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
- err = hci_send_acl(pi->conn->hcon, skb, 0);
- if (err < 0)
- kfree_skb(skb);
-
- return err;
+ hci_send_acl(pi->conn->hcon, skb, 0);
}
static int l2cap_streaming_send(struct sock *sk)
@@ -1282,7 +1340,6 @@ static int l2cap_streaming_send(struct sock *sk)
struct sk_buff *skb, *tx_skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
u16 control, fcs;
- int err;
while ((skb = sk->sk_send_head)) {
tx_skb = skb_clone(skb, GFP_ATOMIC);
@@ -1291,16 +1348,12 @@ static int l2cap_streaming_send(struct sock *sk)
control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ if (pi->fcs == L2CAP_FCS_CRC16) {
fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
}
- err = l2cap_do_send(sk, tx_skb);
- if (err < 0) {
- l2cap_send_disconn_req(pi->conn, sk);
- return err;
- }
+ l2cap_do_send(sk, tx_skb);
pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
@@ -1315,48 +1368,44 @@ static int l2cap_streaming_send(struct sock *sk)
return 0;
}
-static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
+static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct sk_buff *skb, *tx_skb;
u16 control, fcs;
- int err;
skb = skb_peek(TX_QUEUE(sk));
- do {
- if (bt_cb(skb)->tx_seq != tx_seq) {
- if (skb_queue_is_last(TX_QUEUE(sk), skb))
- break;
- skb = skb_queue_next(TX_QUEUE(sk), skb);
- continue;
- }
+ if (!skb)
+ return;
- if (pi->remote_max_tx &&
- bt_cb(skb)->retries == pi->remote_max_tx) {
- l2cap_send_disconn_req(pi->conn, sk);
+ do {
+ if (bt_cb(skb)->tx_seq == tx_seq)
break;
- }
- tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ if (skb_queue_is_last(TX_QUEUE(sk), skb))
+ return;
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
- }
+ } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
- err = l2cap_do_send(sk, tx_skb);
- if (err < 0) {
- l2cap_send_disconn_req(pi->conn, sk);
- return err;
- }
- break;
- } while(1);
- return 0;
+ if (pi->remote_max_tx &&
+ bt_cb(skb)->retries == pi->remote_max_tx) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ return;
+ }
+
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ bt_cb(skb)->retries++;
+ control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
+ | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+ put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+
+ if (pi->fcs == L2CAP_FCS_CRC16) {
+ fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
+ put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+ }
+
+ l2cap_do_send(sk, tx_skb);
}
static int l2cap_ertm_send(struct sock *sk)
@@ -1364,13 +1413,13 @@ static int l2cap_ertm_send(struct sock *sk)
struct sk_buff *skb, *tx_skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
u16 control, fcs;
- int err;
+ int nsent = 0;
if (pi->conn_state & L2CAP_CONN_WAIT_F)
return 0;
while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
- !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
+ !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
if (pi->remote_max_tx &&
bt_cb(skb)->retries == pi->remote_max_tx) {
@@ -1383,35 +1432,97 @@ static int l2cap_ertm_send(struct sock *sk)
bt_cb(skb)->retries++;
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ control |= L2CAP_CTRL_FINAL;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
| (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ if (pi->fcs == L2CAP_FCS_CRC16) {
fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
}
- err = l2cap_do_send(sk, tx_skb);
- if (err < 0) {
- l2cap_send_disconn_req(pi->conn, sk);
- return err;
- }
+ l2cap_do_send(sk, tx_skb);
+
__mod_retrans_timer();
bt_cb(skb)->tx_seq = pi->next_tx_seq;
pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
pi->unacked_frames++;
+ pi->frames_sent++;
if (skb_queue_is_last(TX_QUEUE(sk), skb))
sk->sk_send_head = NULL;
else
sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
+
+ nsent++;
}
- return 0;
+ return nsent;
+}
+
+static int l2cap_retransmit_frames(struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int ret;
+
+ spin_lock_bh(&pi->send_lock);
+
+ if (!skb_queue_empty(TX_QUEUE(sk)))
+ sk->sk_send_head = TX_QUEUE(sk)->next;
+
+ pi->next_tx_seq = pi->expected_ack_seq;
+ ret = l2cap_ertm_send(sk);
+
+ spin_unlock_bh(&pi->send_lock);
+
+ return ret;
+}
+
+static void l2cap_send_ack(struct l2cap_pinfo *pi)
+{
+ struct sock *sk = (struct sock *)pi;
+ u16 control = 0;
+ int nframes;
+
+ control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ control |= L2CAP_SUPER_RCV_NOT_READY;
+ pi->conn_state |= L2CAP_CONN_RNR_SENT;
+ l2cap_send_sframe(pi, control);
+ return;
+ }
+
+ spin_lock_bh(&pi->send_lock);
+ nframes = l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
+
+ if (nframes > 0)
+ return;
+
+ control |= L2CAP_SUPER_RCV_READY;
+ l2cap_send_sframe(pi, control);
+}
+
+static void l2cap_send_srejtail(struct sock *sk)
+{
+ struct srej_list *tail;
+ u16 control;
+
+ control = L2CAP_SUPER_SELECT_REJECT;
+ control |= L2CAP_CTRL_FINAL;
+
+ tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
+ control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+ l2cap_send_sframe(l2cap_pi(sk), control);
}
static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
@@ -1420,9 +1531,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
struct sk_buff **frag;
int err, sent = 0;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
+ if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
return -EFAULT;
- }
sent += count;
len -= count;
@@ -1513,6 +1623,9 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *m
BT_DBG("sk %p len %d", sk, (int)len);
+ if (!conn)
+ return ERR_PTR(-ENOTCONN);
+
if (sdulen)
hlen += 2;
@@ -1554,25 +1667,24 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
u16 control;
size_t size = 0;
- __skb_queue_head_init(&sar_queue);
+ skb_queue_head_init(&sar_queue);
control = L2CAP_SDU_START;
- skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
+ skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
__skb_queue_tail(&sar_queue, skb);
- len -= pi->max_pdu_size;
- size +=pi->max_pdu_size;
- control = 0;
+ len -= pi->remote_mps;
+ size += pi->remote_mps;
while (len > 0) {
size_t buflen;
- if (len > pi->max_pdu_size) {
- control |= L2CAP_SDU_CONTINUE;
- buflen = pi->max_pdu_size;
+ if (len > pi->remote_mps) {
+ control = L2CAP_SDU_CONTINUE;
+ buflen = pi->remote_mps;
} else {
- control |= L2CAP_SDU_END;
+ control = L2CAP_SDU_END;
buflen = len;
}
@@ -1585,11 +1697,12 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
__skb_queue_tail(&sar_queue, skb);
len -= buflen;
size += buflen;
- control = 0;
}
skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
+ spin_lock_bh(&pi->send_lock);
if (sk->sk_send_head == NULL)
sk->sk_send_head = sar_queue.next;
+ spin_unlock_bh(&pi->send_lock);
return size;
}
@@ -1611,11 +1724,6 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- /* Check outgoing MTU */
- if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
- len > pi->omtu)
- return -EINVAL;
-
lock_sock(sk);
if (sk->sk_state != BT_CONNECTED) {
@@ -1626,15 +1734,23 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
/* Connectionless channel */
if (sk->sk_type == SOCK_DGRAM) {
skb = l2cap_create_connless_pdu(sk, msg, len);
- if (IS_ERR(skb))
+ if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- else
- err = l2cap_do_send(sk, skb);
+ } else {
+ l2cap_do_send(sk, skb);
+ err = len;
+ }
goto done;
}
switch (pi->mode) {
case L2CAP_MODE_BASIC:
+ /* Check outgoing MTU */
+ if (len > pi->omtu) {
+ err = -EINVAL;
+ goto done;
+ }
+
/* Create a basic PDU */
skb = l2cap_create_basic_pdu(sk, msg, len);
if (IS_ERR(skb)) {
@@ -1642,15 +1758,14 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
goto done;
}
- err = l2cap_do_send(sk, skb);
- if (!err)
- err = len;
+ l2cap_do_send(sk, skb);
+ err = len;
break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
/* Entire SDU fits into one PDU */
- if (len <= pi->max_pdu_size) {
+ if (len <= pi->remote_mps) {
control = L2CAP_SDU_UNSEGMENTED;
skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
if (IS_ERR(skb)) {
@@ -1658,8 +1773,15 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
goto done;
}
__skb_queue_tail(TX_QUEUE(sk), skb);
+
+ if (pi->mode == L2CAP_MODE_ERTM)
+ spin_lock_bh(&pi->send_lock);
+
if (sk->sk_send_head == NULL)
sk->sk_send_head = skb;
+
+ if (pi->mode == L2CAP_MODE_ERTM)
+ spin_unlock_bh(&pi->send_lock);
} else {
/* Segment SDU into multiples PDUs */
err = l2cap_sar_segment_sdu(sk, msg, len);
@@ -1667,12 +1789,15 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
goto done;
}
- if (pi->mode == L2CAP_MODE_STREAMING)
+ if (pi->mode == L2CAP_MODE_STREAMING) {
err = l2cap_streaming_send(sk);
- else
+ } else {
+ spin_lock_bh(&pi->send_lock);
err = l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
+ }
- if (!err)
+ if (err >= 0)
err = len;
break;
@@ -1731,6 +1856,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
opts.flush_to = l2cap_pi(sk)->flush_to;
opts.mode = l2cap_pi(sk)->mode;
opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -1738,10 +1865,25 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
+ l2cap_pi(sk)->mode = opts.mode;
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (enable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -EINVAL;
+ break;
+ }
+
l2cap_pi(sk)->imtu = opts.imtu;
l2cap_pi(sk)->omtu = opts.omtu;
- l2cap_pi(sk)->mode = opts.mode;
l2cap_pi(sk)->fcs = opts.fcs;
+ l2cap_pi(sk)->max_tx = opts.max_tx;
+ l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
break;
case L2CAP_LM:
@@ -1789,7 +1931,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
switch (optname) {
case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
@@ -1856,6 +1999,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
opts.flush_to = l2cap_pi(sk)->flush_to;
opts.mode = l2cap_pi(sk)->mode;
opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -1937,7 +2082,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
switch (optname) {
case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
@@ -1982,6 +2128,9 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
if (!sk->sk_shutdown) {
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
+ err = __l2cap_wait_ack(sk);
+
sk->sk_shutdown = SHUTDOWN_MASK;
l2cap_sock_clear_timer(sk);
__l2cap_sock_close(sk, 0);
@@ -2184,19 +2333,35 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
+static void l2cap_ack_timeout(unsigned long arg)
+{
+ struct sock *sk = (void *) arg;
+
+ bh_lock_sock(sk);
+ l2cap_send_ack(l2cap_pi(sk));
+ bh_unlock_sock(sk);
+}
+
static inline void l2cap_ertm_init(struct sock *sk)
{
l2cap_pi(sk)->expected_ack_seq = 0;
l2cap_pi(sk)->unacked_frames = 0;
l2cap_pi(sk)->buffer_seq = 0;
- l2cap_pi(sk)->num_to_ack = 0;
+ l2cap_pi(sk)->num_acked = 0;
+ l2cap_pi(sk)->frames_sent = 0;
setup_timer(&l2cap_pi(sk)->retrans_timer,
l2cap_retrans_timeout, (unsigned long) sk);
setup_timer(&l2cap_pi(sk)->monitor_timer,
l2cap_monitor_timeout, (unsigned long) sk);
+ setup_timer(&l2cap_pi(sk)->ack_timer,
+ l2cap_ack_timeout, (unsigned long) sk);
__skb_queue_head_init(SREJ_QUEUE(sk));
+ __skb_queue_head_init(BUSY_QUEUE(sk));
+ spin_lock_init(&l2cap_pi(sk)->send_lock);
+
+ INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
}
static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
@@ -2232,7 +2397,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_rfc rfc = { .mode = pi->mode };
void *ptr = req->data;
BT_DBG("sk %p", sk);
@@ -2261,11 +2426,13 @@ done:
case L2CAP_MODE_ERTM:
rfc.mode = L2CAP_MODE_ERTM;
- rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
- rfc.max_transmit = max_transmit;
+ rfc.txwin_size = pi->tx_win;
+ rfc.max_transmit = pi->max_tx;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
+ rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
@@ -2287,6 +2454,8 @@ done:
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
+ rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
@@ -2415,10 +2584,15 @@ done:
case L2CAP_MODE_ERTM:
pi->remote_tx_win = rfc.txwin_size;
pi->remote_max_tx = rfc.max_transmit;
- pi->max_pdu_size = rfc.max_pdu_size;
+ if (rfc.max_pdu_size > pi->conn->mtu - 10)
+ rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
- rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
- rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+ pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+
+ rfc.retrans_timeout =
+ le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout =
+ le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
pi->conf_state |= L2CAP_CONF_MODE_DONE;
@@ -2428,8 +2602,10 @@ done:
break;
case L2CAP_MODE_STREAMING:
- pi->remote_tx_win = rfc.txwin_size;
- pi->max_pdu_size = rfc.max_pdu_size;
+ if (rfc.max_pdu_size > pi->conn->mtu - 10)
+ rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
+
+ pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
pi->conf_state |= L2CAP_CONF_MODE_DONE;
@@ -2506,13 +2682,12 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
pi->remote_tx_win = rfc.txwin_size;
- pi->retrans_timeout = rfc.retrans_timeout;
- pi->monitor_timeout = rfc.monitor_timeout;
- pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
break;
case L2CAP_MODE_STREAMING:
- pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
- break;
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
}
}
@@ -2536,6 +2711,42 @@ static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 fla
return ptr - data;
}
+static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int type, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc;
+
+ BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
+
+ if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
+ return;
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+ switch (type) {
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
+ goto done;
+ }
+ }
+
+done:
+ switch (rfc.mode) {
+ case L2CAP_MODE_ERTM:
+ pi->remote_tx_win = rfc.txwin_size;
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ break;
+ case L2CAP_MODE_STREAMING:
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ }
+}
+
static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
{
struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
@@ -2815,6 +3026,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
struct sock *sk;
+ int len = cmd->len - sizeof(*rsp);
scid = __le16_to_cpu(rsp->scid);
flags = __le16_to_cpu(rsp->flags);
@@ -2829,11 +3041,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
switch (result) {
case L2CAP_CONF_SUCCESS:
+ l2cap_conf_rfc_get(sk, rsp->data, len);
break;
case L2CAP_CONF_UNACCEPT:
if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
- int len = cmd->len - sizeof(*rsp);
char req[64];
if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
@@ -2917,8 +3129,10 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
skb_queue_purge(SREJ_QUEUE(sk));
+ skb_queue_purge(BUSY_QUEUE(sk));
del_timer(&l2cap_pi(sk)->retrans_timer);
del_timer(&l2cap_pi(sk)->monitor_timer);
+ del_timer(&l2cap_pi(sk)->ack_timer);
}
l2cap_chan_del(sk, ECONNRESET);
@@ -2947,8 +3161,10 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
skb_queue_purge(SREJ_QUEUE(sk));
+ skb_queue_purge(BUSY_QUEUE(sk));
del_timer(&l2cap_pi(sk)->retrans_timer);
del_timer(&l2cap_pi(sk)->monitor_timer);
+ del_timer(&l2cap_pi(sk)->ack_timer);
}
l2cap_chan_del(sk, 0);
@@ -3143,7 +3359,40 @@ static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
return 0;
}
-static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
+static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u16 control = 0;
+
+ pi->frames_sent = 0;
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+
+ control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
+ l2cap_send_sframe(pi, control);
+ pi->conn_state |= L2CAP_CONN_RNR_SENT;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
+
+ if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
+ __mod_retrans_timer();
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ spin_lock_bh(&pi->send_lock);
+ l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
+
+ if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+ pi->frames_sent == 0) {
+ control |= L2CAP_SUPER_RCV_READY;
+ l2cap_send_sframe(pi, control);
+ }
+}
+
+static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
{
struct sk_buff *next_skb;
@@ -3153,29 +3402,256 @@ static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_
next_skb = skb_peek(SREJ_QUEUE(sk));
if (!next_skb) {
__skb_queue_tail(SREJ_QUEUE(sk), skb);
- return;
+ return 0;
}
do {
+ if (bt_cb(next_skb)->tx_seq == tx_seq)
+ return -EINVAL;
+
if (bt_cb(next_skb)->tx_seq > tx_seq) {
__skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
- return;
+ return 0;
}
if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
break;
- } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
+ } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
__skb_queue_tail(SREJ_QUEUE(sk), skb);
+
+ return 0;
+}
+
+static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *_skb;
+ int err;
+
+ switch (control & L2CAP_CTRL_SAR) {
+ case L2CAP_SDU_UNSEGMENTED:
+ if (pi->conn_state & L2CAP_CONN_SAR_SDU)
+ goto drop;
+
+ err = sock_queue_rcv_skb(sk, skb);
+ if (!err)
+ return err;
+
+ break;
+
+ case L2CAP_SDU_START:
+ if (pi->conn_state & L2CAP_CONN_SAR_SDU)
+ goto drop;
+
+ pi->sdu_len = get_unaligned_le16(skb->data);
+
+ if (pi->sdu_len > pi->imtu)
+ goto disconnect;
+
+ pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
+ if (!pi->sdu)
+ return -ENOMEM;
+
+ /* pull sdu_len bytes only after alloc, because of Local Busy
+ * condition we have to be sure that this will be executed
+ * only once, i.e., when alloc does not fail */
+ skb_pull(skb, 2);
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+ pi->conn_state |= L2CAP_CONN_SAR_SDU;
+ pi->partial_sdu_len = skb->len;
+ break;
+
+ case L2CAP_SDU_CONTINUE:
+ if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+ goto disconnect;
+
+ if (!pi->sdu)
+ goto disconnect;
+
+ pi->partial_sdu_len += skb->len;
+ if (pi->partial_sdu_len > pi->sdu_len)
+ goto drop;
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+ break;
+
+ case L2CAP_SDU_END:
+ if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+ goto disconnect;
+
+ if (!pi->sdu)
+ goto disconnect;
+
+ if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
+ pi->partial_sdu_len += skb->len;
+
+ if (pi->partial_sdu_len > pi->imtu)
+ goto drop;
+
+ if (pi->partial_sdu_len != pi->sdu_len)
+ goto drop;
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+ }
+
+ _skb = skb_clone(pi->sdu, GFP_ATOMIC);
+ if (!_skb) {
+ pi->conn_state |= L2CAP_CONN_SAR_RETRY;
+ return -ENOMEM;
+ }
+
+ err = sock_queue_rcv_skb(sk, _skb);
+ if (err < 0) {
+ kfree_skb(_skb);
+ pi->conn_state |= L2CAP_CONN_SAR_RETRY;
+ return err;
+ }
+
+ pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
+ pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
+
+ kfree_skb(pi->sdu);
+ break;
+ }
+
+ kfree_skb(skb);
+ return 0;
+
+drop:
+ kfree_skb(pi->sdu);
+ pi->sdu = NULL;
+
+disconnect:
+ l2cap_send_disconn_req(pi->conn, sk);
+ kfree_skb(skb);
+ return 0;
+}
+
+static void l2cap_busy_work(struct work_struct *work)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct l2cap_pinfo *pi =
+ container_of(work, struct l2cap_pinfo, busy_work);
+ struct sock *sk = (struct sock *)pi;
+ int n_tries = 0, timeo = HZ/5, err;
+ struct sk_buff *skb;
+ u16 control;
+
+ lock_sock(sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
+ err = -EBUSY;
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto done;
+ }
+
+ if (!timeo)
+ timeo = HZ/5;
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ goto done;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+
+ err = sock_error(sk);
+ if (err)
+ goto done;
+
+ while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
+ control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+ err = l2cap_ertm_reassembly_sdu(sk, skb, control);
+ if (err < 0) {
+ skb_queue_head(BUSY_QUEUE(sk), skb);
+ break;
+ }
+
+ pi->buffer_seq = (pi->buffer_seq + 1) % 64;
+ }
+
+ if (!skb)
+ break;
+ }
+
+ if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
+ goto done;
+
+ control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
+ l2cap_send_sframe(pi, control);
+ l2cap_pi(sk)->retry_count = 1;
+
+ del_timer(&pi->retrans_timer);
+ __mod_monitor_timer();
+
+ l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
+
+done:
+ pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
+ pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ release_sock(sk);
}
-static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
+static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int sctrl, err;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
+ __skb_queue_tail(BUSY_QUEUE(sk), skb);
+ return -EBUSY;
+ }
+
+ err = l2cap_ertm_reassembly_sdu(sk, skb, control);
+ if (err >= 0) {
+ pi->buffer_seq = (pi->buffer_seq + 1) % 64;
+ return err;
+ }
+
+ /* Busy Condition */
+ pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
+ bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
+ __skb_queue_tail(BUSY_QUEUE(sk), skb);
+
+ sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ sctrl |= L2CAP_SUPER_RCV_NOT_READY;
+ l2cap_send_sframe(pi, sctrl);
+
+ pi->conn_state |= L2CAP_CONN_RNR_SENT;
+
+ queue_work(_busy_wq, &pi->busy_work);
+
+ return err;
+}
+
+static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct sk_buff *_skb;
int err = -EINVAL;
+ /*
+ * TODO: We have to notify the userland if some data is lost with the
+ * Streaming Mode.
+ */
+
switch (control & L2CAP_CTRL_SAR) {
case L2CAP_SDU_UNSEGMENTED:
if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
@@ -3198,6 +3674,11 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
pi->sdu_len = get_unaligned_le16(skb->data);
skb_pull(skb, 2);
+ if (pi->sdu_len > pi->imtu) {
+ err = -EMSGSIZE;
+ break;
+ }
+
pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
if (!pi->sdu) {
err = -ENOMEM;
@@ -3234,15 +3715,19 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
pi->partial_sdu_len += skb->len;
+ if (pi->partial_sdu_len > pi->imtu)
+ goto drop;
+
if (pi->partial_sdu_len == pi->sdu_len) {
_skb = skb_clone(pi->sdu, GFP_ATOMIC);
err = sock_queue_rcv_skb(sk, _skb);
if (err < 0)
kfree_skb(_skb);
}
- kfree_skb(pi->sdu);
err = 0;
+drop:
+ kfree_skb(pi->sdu);
break;
}
@@ -3253,15 +3738,15 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
{
struct sk_buff *skb;
- u16 control = 0;
+ u16 control;
- while((skb = skb_peek(SREJ_QUEUE(sk)))) {
+ while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
if (bt_cb(skb)->tx_seq != tx_seq)
break;
skb = skb_dequeue(SREJ_QUEUE(sk));
- control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
- l2cap_sar_reassembly_sdu(sk, skb, control);
+ control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+ l2cap_ertm_reassembly_sdu(sk, skb, control);
l2cap_pi(sk)->buffer_seq_srej =
(l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
tx_seq++;
@@ -3274,7 +3759,7 @@ static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
struct srej_list *l, *tmp;
u16 control;
- list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
+ list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
if (l->tx_seq == tx_seq) {
list_del(&l->list);
kfree(l);
@@ -3297,10 +3782,6 @@ static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
while (tx_seq != pi->expected_tx_seq) {
control = L2CAP_SUPER_SELECT_REJECT;
control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
- control |= L2CAP_CTRL_POLL;
- pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
- }
l2cap_send_sframe(pi, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
@@ -3315,18 +3796,40 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
struct l2cap_pinfo *pi = l2cap_pi(sk);
u8 tx_seq = __get_txseq(rx_control);
u8 req_seq = __get_reqseq(rx_control);
- u16 tx_control = 0;
u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+ u8 tx_seq_offset, expected_tx_seq_offset;
+ int num_to_ack = (pi->tx_win/6) + 1;
int err = 0;
BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+ if (L2CAP_CTRL_FINAL & rx_control &&
+ l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
+ del_timer(&pi->monitor_timer);
+ if (pi->unacked_frames > 0)
+ __mod_retrans_timer();
+ pi->conn_state &= ~L2CAP_CONN_WAIT_F;
+ }
+
pi->expected_ack_seq = req_seq;
l2cap_drop_acked_frames(sk);
if (tx_seq == pi->expected_tx_seq)
goto expected;
+ tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
+ if (tx_seq_offset < 0)
+ tx_seq_offset += 64;
+
+ /* invalid tx_seq */
+ if (tx_seq_offset >= pi->tx_win) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto drop;
+ }
+
+ if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
+ goto drop;
+
if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
struct srej_list *first;
@@ -3342,10 +3845,14 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
if (list_empty(SREJ_LIST(sk))) {
pi->buffer_seq = pi->buffer_seq_srej;
pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
+ l2cap_send_ack(pi);
}
} else {
struct srej_list *l;
- l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+
+ /* duplicated tx_seq */
+ if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
+ goto drop;
list_for_each_entry(l, SREJ_LIST(sk), list) {
if (l->tx_seq == tx_seq) {
@@ -3356,12 +3863,22 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
l2cap_send_srejframe(sk, tx_seq);
}
} else {
+ expected_tx_seq_offset =
+ (pi->expected_tx_seq - pi->buffer_seq) % 64;
+ if (expected_tx_seq_offset < 0)
+ expected_tx_seq_offset += 64;
+
+ /* duplicated tx_seq */
+ if (tx_seq_offset < expected_tx_seq_offset)
+ goto drop;
+
pi->conn_state |= L2CAP_CONN_SREJ_SENT;
INIT_LIST_HEAD(SREJ_LIST(sk));
pi->buffer_seq_srej = pi->buffer_seq;
__skb_queue_head_init(SREJ_QUEUE(sk));
+ __skb_queue_head_init(BUSY_QUEUE(sk));
l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
pi->conn_state |= L2CAP_CONN_SEND_PBIT;
@@ -3374,153 +3891,189 @@ expected:
pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
- l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+ bt_cb(skb)->tx_seq = tx_seq;
+ bt_cb(skb)->sar = sar;
+ __skb_queue_tail(SREJ_QUEUE(sk), skb);
return 0;
}
if (rx_control & L2CAP_CTRL_FINAL) {
if (pi->conn_state & L2CAP_CONN_REJ_ACT)
pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else {
- sk->sk_send_head = TX_QUEUE(sk)->next;
- pi->next_tx_seq = pi->expected_ack_seq;
- l2cap_ertm_send(sk);
- }
+ else
+ l2cap_retransmit_frames(sk);
}
- pi->buffer_seq = (pi->buffer_seq + 1) % 64;
-
- err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
+ err = l2cap_push_rx_skb(sk, skb, rx_control);
if (err < 0)
- return err;
+ return 0;
+
+ __mod_ack_timer();
+
+ pi->num_acked = (pi->num_acked + 1) % num_to_ack;
+ if (pi->num_acked == num_to_ack - 1)
+ l2cap_send_ack(pi);
- pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
- if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
- tx_control |= L2CAP_SUPER_RCV_READY;
- tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- l2cap_send_sframe(pi, tx_control);
- }
+ return 0;
+
+drop:
+ kfree_skb(skb);
return 0;
}
-static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
+static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
- u8 tx_seq = __get_reqseq(rx_control);
- BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+ pi->expected_ack_seq = __get_reqseq(rx_control);
+ l2cap_drop_acked_frames(sk);
- switch (rx_control & L2CAP_CTRL_SUPERVISE) {
- case L2CAP_SUPER_RCV_READY:
- if (rx_control & L2CAP_CTRL_POLL) {
- u16 control = L2CAP_CTRL_FINAL;
- control |= L2CAP_SUPER_RCV_READY |
- (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
- l2cap_send_sframe(l2cap_pi(sk), control);
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ if (rx_control & L2CAP_CTRL_POLL) {
+ if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->unacked_frames > 0))
+ __mod_retrans_timer();
- } else if (rx_control & L2CAP_CTRL_FINAL) {
pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
-
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else {
- sk->sk_send_head = TX_QUEUE(sk)->next;
- pi->next_tx_seq = pi->expected_ack_seq;
- l2cap_ertm_send(sk);
- }
-
- if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
- break;
+ l2cap_send_srejtail(sk);
+ } else {
+ l2cap_send_i_or_rr_or_rnr(sk);
+ }
- pi->conn_state &= ~L2CAP_CONN_WAIT_F;
- del_timer(&pi->monitor_timer);
+ } else if (rx_control & L2CAP_CTRL_FINAL) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- if (pi->unacked_frames > 0)
- __mod_retrans_timer();
- } else {
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else
+ l2cap_retransmit_frames(sk);
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- (pi->unacked_frames > 0))
- __mod_retrans_timer();
+ } else {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->unacked_frames > 0))
+ __mod_retrans_timer();
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+ l2cap_send_ack(pi);
+ } else {
+ spin_lock_bh(&pi->send_lock);
l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
}
- break;
+ }
+}
- case L2CAP_SUPER_REJECT:
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u8 tx_seq = __get_reqseq(rx_control);
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ pi->expected_ack_seq = tx_seq;
+ l2cap_drop_acked_frames(sk);
- pi->expected_ack_seq = __get_reqseq(rx_control);
+ if (rx_control & L2CAP_CTRL_FINAL) {
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else
+ l2cap_retransmit_frames(sk);
+ } else {
+ l2cap_retransmit_frames(sk);
+
+ if (pi->conn_state & L2CAP_CONN_WAIT_F)
+ pi->conn_state |= L2CAP_CONN_REJ_ACT;
+ }
+}
+static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u8 tx_seq = __get_reqseq(rx_control);
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ if (rx_control & L2CAP_CTRL_POLL) {
+ pi->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(sk);
+ l2cap_retransmit_one_frame(sk, tx_seq);
- if (rx_control & L2CAP_CTRL_FINAL) {
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else {
- sk->sk_send_head = TX_QUEUE(sk)->next;
- pi->next_tx_seq = pi->expected_ack_seq;
- l2cap_ertm_send(sk);
- }
- } else {
- sk->sk_send_head = TX_QUEUE(sk)->next;
- pi->next_tx_seq = pi->expected_ack_seq;
- l2cap_ertm_send(sk);
+ spin_lock_bh(&pi->send_lock);
+ l2cap_ertm_send(sk);
+ spin_unlock_bh(&pi->send_lock);
- if (pi->conn_state & L2CAP_CONN_WAIT_F) {
- pi->srej_save_reqseq = tx_seq;
- pi->conn_state |= L2CAP_CONN_REJ_ACT;
- }
+ if (pi->conn_state & L2CAP_CONN_WAIT_F) {
+ pi->srej_save_reqseq = tx_seq;
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+ }
+ } else if (rx_control & L2CAP_CTRL_FINAL) {
+ if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
+ pi->srej_save_reqseq == tx_seq)
+ pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
+ else
+ l2cap_retransmit_one_frame(sk, tx_seq);
+ } else {
+ l2cap_retransmit_one_frame(sk, tx_seq);
+ if (pi->conn_state & L2CAP_CONN_WAIT_F) {
+ pi->srej_save_reqseq = tx_seq;
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
}
+ }
+}
+static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u8 tx_seq = __get_reqseq(rx_control);
+
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+ pi->expected_ack_seq = tx_seq;
+ l2cap_drop_acked_frames(sk);
+
+ if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
+ del_timer(&pi->retrans_timer);
+ if (rx_control & L2CAP_CTRL_POLL)
+ l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
+ return;
+ }
+
+ if (rx_control & L2CAP_CTRL_POLL)
+ l2cap_send_srejtail(sk);
+ else
+ l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
+}
+
+static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
+{
+ BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+
+ if (L2CAP_CTRL_FINAL & rx_control &&
+ l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
+ del_timer(&l2cap_pi(sk)->monitor_timer);
+ if (l2cap_pi(sk)->unacked_frames > 0)
+ __mod_retrans_timer();
+ l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
+ }
+
+ switch (rx_control & L2CAP_CTRL_SUPERVISE) {
+ case L2CAP_SUPER_RCV_READY:
+ l2cap_data_channel_rrframe(sk, rx_control);
break;
- case L2CAP_SUPER_SELECT_REJECT:
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ case L2CAP_SUPER_REJECT:
+ l2cap_data_channel_rejframe(sk, rx_control);
+ break;
- if (rx_control & L2CAP_CTRL_POLL) {
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
- l2cap_retransmit_frame(sk, tx_seq);
- l2cap_ertm_send(sk);
- if (pi->conn_state & L2CAP_CONN_WAIT_F) {
- pi->srej_save_reqseq = tx_seq;
- pi->conn_state |= L2CAP_CONN_SREJ_ACT;
- }
- } else if (rx_control & L2CAP_CTRL_FINAL) {
- if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
- pi->srej_save_reqseq == tx_seq)
- pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
- else
- l2cap_retransmit_frame(sk, tx_seq);
- }
- else {
- l2cap_retransmit_frame(sk, tx_seq);
- if (pi->conn_state & L2CAP_CONN_WAIT_F) {
- pi->srej_save_reqseq = tx_seq;
- pi->conn_state |= L2CAP_CONN_SREJ_ACT;
- }
- }
+ case L2CAP_SUPER_SELECT_REJECT:
+ l2cap_data_channel_srejframe(sk, rx_control);
break;
case L2CAP_SUPER_RCV_NOT_READY:
- pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
-
- del_timer(&l2cap_pi(sk)->retrans_timer);
- if (rx_control & L2CAP_CTRL_POLL) {
- u16 control = L2CAP_CTRL_FINAL;
- l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
- }
+ l2cap_data_channel_rnrframe(sk, rx_control);
break;
}
+ kfree_skb(skb);
return 0;
}
@@ -3529,7 +4082,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
struct sock *sk;
struct l2cap_pinfo *pi;
u16 control, len;
- u8 tx_seq;
+ u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
if (!sk) {
@@ -3574,16 +4127,45 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
* Receiver will miss it and start proper recovery
* procedures and ask retransmission.
*/
- if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
+ if (len > pi->mps) {
+ l2cap_send_disconn_req(pi->conn, sk);
goto drop;
+ }
if (l2cap_check_fcs(pi, skb))
goto drop;
- if (__is_iframe(control))
+ req_seq = __get_reqseq(control);
+ req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
+ if (req_seq_offset < 0)
+ req_seq_offset += 64;
+
+ next_tx_seq_offset =
+ (pi->next_tx_seq - pi->expected_ack_seq) % 64;
+ if (next_tx_seq_offset < 0)
+ next_tx_seq_offset += 64;
+
+ /* check for invalid req-seq */
+ if (req_seq_offset > next_tx_seq_offset) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto drop;
+ }
+
+ if (__is_iframe(control)) {
+ if (len < 4) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto drop;
+ }
+
l2cap_data_channel_iframe(sk, control, skb);
- else
+ } else {
+ if (len != 0) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ goto drop;
+ }
+
l2cap_data_channel_sframe(sk, control, skb);
+ }
goto done;
@@ -3598,7 +4180,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
if (pi->fcs == L2CAP_FCS_CRC16)
len -= 2;
- if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
+ if (len > pi->mps || len < 4 || __is_sframe(control))
goto drop;
if (l2cap_check_fcs(pi, skb))
@@ -3609,14 +4191,14 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
if (pi->expected_tx_seq == tx_seq)
pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
else
- pi->expected_tx_seq = tx_seq + 1;
+ pi->expected_tx_seq = (tx_seq + 1) % 64;
- l2cap_sar_reassembly_sdu(sk, skb, control);
+ l2cap_streaming_reassembly_sdu(sk, skb, control);
goto done;
default:
- BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
+ BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
break;
}
@@ -3772,7 +4354,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
{
- if (sk->sk_type != SOCK_SEQPACKET)
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
return;
if (encrypt == 0x00) {
@@ -4030,6 +4612,10 @@ static int __init l2cap_init(void)
if (err < 0)
return err;
+ _busy_wq = create_singlethread_workqueue("l2cap");
+ if (!_busy_wq)
+ goto error;
+
err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
if (err < 0) {
BT_ERR("L2CAP socket registration failed");
@@ -4064,6 +4650,9 @@ static void __exit l2cap_exit(void)
{
debugfs_remove(l2cap_debugfs);
+ flush_workqueue(_busy_wq);
+ destroy_workqueue(_busy_wq);
+
if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
BT_ERR("L2CAP socket unregistration failed");
@@ -4091,6 +4680,9 @@ MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
module_param(max_transmit, uint, 0644);
MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
+module_param(tx_window, uint, 0644);
+MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 8ed3c37684fa..43fbf6b4b4bf 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -503,7 +503,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -526,7 +526,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
@@ -621,7 +621,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
- add_wait_queue(sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -640,7 +640,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return timeo;
}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index ca6b2ad1c3fc..4767928a93d3 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -165,11 +165,11 @@ static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct so
int err = 0;
sco_conn_lock(conn);
- if (conn->sk) {
+ if (conn->sk)
err = -EBUSY;
- } else {
+ else
__sco_chan_add(conn, sk, parent);
- }
+
sco_conn_unlock(conn);
return err;
}
@@ -241,22 +241,19 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
count = min_t(unsigned int, conn->mtu, len);
- if (!(skb = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err)))
+ skb = bt_skb_send_alloc(sk, count,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
- err = -EFAULT;
- goto fail;
+ kfree_skb(skb);
+ return -EFAULT;
}
- if ((err = hci_send_sco(conn->hcon, skb)) < 0)
- return err;
+ hci_send_sco(conn->hcon, skb);
return count;
-
-fail:
- kfree_skb(skb);
- return err;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -567,7 +564,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -590,7 +587,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
@@ -626,7 +623,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
- int err = 0;
+ int err;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -851,7 +848,8 @@ static void sco_conn_ready(struct sco_conn *conn)
bh_lock_sock(parent);
- sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC);
+ sk = sco_sock_alloc(sock_net(parent), NULL,
+ BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index d115d5cea5b6..9190ae462cb4 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -33,14 +33,14 @@ config BRIDGE
If unsure, say N.
config BRIDGE_IGMP_SNOOPING
- bool "IGMP snooping"
+ bool "IGMP/MLD snooping"
depends on BRIDGE
depends on INET
default y
---help---
If you say Y here, then the Ethernet bridge will be able selectively
- forward multicast traffic based on IGMP traffic received from each
- port.
+ forward multicast traffic based on IGMP/MLD traffic received from
+ each port.
Say N to exclude this support and reduce the binary size.
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 90a9024e5c1e..f15f9c4a0dd2 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -13,8 +13,11 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/list.h>
+#include <linux/netfilter_bridge.h>
#include <asm/uaccess.h>
#include "br_private.h"
@@ -26,16 +29,24 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
+ struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
- BR_INPUT_SKB_CB(skb)->brdev = dev;
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+ br_nf_pre_routing_finish_bridge_slow(skb);
+ return NETDEV_TX_OK;
+ }
+#endif
+
+ brstats->tx_packets++;
+ brstats->tx_bytes += skb->len;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
- if (dest[0] & 1) {
+ if (is_multicast_ether_addr(dest)) {
if (br_multicast_rcv(br, NULL, skb))
goto out;
@@ -81,6 +92,31 @@ static int br_dev_stop(struct net_device *dev)
return 0;
}
+static struct net_device_stats *br_get_stats(struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct br_cpu_netstats sum = { 0 };
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct br_cpu_netstats *bstats
+ = per_cpu_ptr(br->stats, cpu);
+
+ sum.tx_bytes += bstats->tx_bytes;
+ sum.tx_packets += bstats->tx_packets;
+ sum.rx_bytes += bstats->rx_bytes;
+ sum.rx_packets += bstats->rx_packets;
+ }
+
+ stats->tx_bytes = sum.tx_bytes;
+ stats->tx_packets = sum.tx_packets;
+ stats->rx_bytes = sum.rx_bytes;
+ stats->rx_packets = sum.rx_packets;
+
+ return stats;
+}
+
static int br_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_bridge *br = netdev_priv(dev);
@@ -162,6 +198,59 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+bool br_devices_support_netpoll(struct net_bridge *br)
+{
+ struct net_bridge_port *p;
+ bool ret = true;
+ int count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&br->lock, flags);
+ list_for_each_entry(p, &br->port_list, list) {
+ count++;
+ if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !p->dev->netdev_ops->ndo_poll_controller)
+ ret = false;
+ }
+ spin_unlock_irqrestore(&br->lock, flags);
+ return count != 0 && ret;
+}
+
+static void br_poll_controller(struct net_device *br_dev)
+{
+ struct netpoll *np = br_dev->npinfo->netpoll;
+
+ if (np->real_dev != br_dev)
+ netpoll_poll_dev(np->real_dev);
+}
+
+void br_netpoll_cleanup(struct net_device *br_dev)
+{
+ struct net_bridge *br = netdev_priv(br_dev);
+ struct net_bridge_port *p, *n;
+ const struct net_device_ops *ops;
+
+ br->dev->npinfo = NULL;
+ list_for_each_entry_safe(p, n, &br->port_list, list) {
+ if (p->dev) {
+ ops = p->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(p->dev);
+ else
+ p->dev->npinfo = NULL;
+ }
+ }
+}
+
+#else
+
+void br_netpoll_cleanup(struct net_device *br_dev)
+{
+}
+
+#endif
+
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
@@ -180,19 +269,32 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_start_xmit = br_dev_xmit,
+ .ndo_get_stats = br_get_stats,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_multicast_list = br_dev_set_multicast_list,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_cleanup = br_netpoll_cleanup,
+ .ndo_poll_controller = br_poll_controller,
+#endif
};
+static void br_dev_free(struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ free_percpu(br->stats);
+ free_netdev(dev);
+}
+
void br_dev_setup(struct net_device *dev)
{
random_ether_addr(dev->dev_addr);
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = br_dev_free;
SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
dev->tx_queue_len = 0;
dev->priv_flags = IFF_EBRIDGE;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7a241c396981..a98ef1393097 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
@@ -44,13 +45,19 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
- /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
+ /* ip_fragment doesn't copy the MAC header */
if (nf_bridge_maybe_copy_header(skb))
kfree_skb(skb);
else {
skb_push(skb, ETH_HLEN);
- dev_queue_xmit(skb);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
+ netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
+ skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
+ } else
+#endif
+ dev_queue_xmit(skb);
}
}
@@ -59,16 +66,30 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
int br_forward_finish(struct sk_buff *skb)
{
- return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+ return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
}
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct net_bridge *br = to->br;
+ if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
+ struct netpoll *np;
+ to->dev->npinfo = skb->dev->npinfo;
+ np = skb->dev->npinfo->netpoll;
+ np->real_dev = np->dev = to->dev;
+ to->dev->priv_flags |= IFF_IN_NETPOLL;
+ }
+#endif
skb->dev = to->dev;
- NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
- br_forward_finish);
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ br_forward_finish);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (skb->dev->npinfo)
+ skb->dev->npinfo->netpoll->dev = br->dev;
+#endif
}
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@ -84,8 +105,8 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
skb->dev = to->dev;
skb_forward_csum(skb);
- NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
- br_forward_finish);
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+ br_forward_finish);
}
/* called with rcu_read_lock */
@@ -208,17 +229,15 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(dev);
- struct net_bridge_port *port;
- struct net_bridge_port *lport, *rport;
- struct net_bridge_port *prev;
+ struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
struct hlist_node *rp;
- prev = NULL;
-
- rp = br->router_list.first;
- p = mdst ? mdst->ports : NULL;
+ rp = rcu_dereference(br->router_list.first);
+ p = mdst ? rcu_dereference(mdst->ports) : NULL;
while (p || rp) {
+ struct net_bridge_port *port, *lport, *rport;
+
lport = p ? p->port : NULL;
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
NULL;
@@ -231,9 +250,9 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
goto out;
if ((unsigned long)lport >= (unsigned long)port)
- p = p->next;
+ p = rcu_dereference(p->next);
if ((unsigned long)rport >= (unsigned long)port)
- rp = rp->next;
+ rp = rcu_dereference(rp->next);
}
if (!prev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 0b6b1f2ff7ac..537bdd60d9b9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/module.h>
@@ -153,6 +154,14 @@ static void del_nbp(struct net_bridge_port *p)
kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (br_devices_support_netpoll(br))
+ br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (dev->netdev_ops->ndo_netpoll_cleanup)
+ dev->netdev_ops->ndo_netpoll_cleanup(dev);
+ else
+ dev->npinfo = NULL;
+#endif
call_rcu(&p->rcu, destroy_nbp_rcu);
}
@@ -165,6 +174,8 @@ static void del_br(struct net_bridge *br, struct list_head *head)
del_nbp(p);
}
+ br_netpoll_cleanup(br->dev);
+
del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev);
@@ -186,6 +197,12 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name)
br = netdev_priv(dev);
br->dev = dev;
+ br->stats = alloc_percpu(struct br_cpu_netstats);
+ if (!br->stats) {
+ free_netdev(dev);
+ return NULL;
+ }
+
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
spin_lock_init(&br->hash_lock);
@@ -438,6 +455,20 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
kobject_uevent(&p->kobj, KOBJ_ADD);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (br_devices_support_netpoll(br)) {
+ br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (br->dev->npinfo)
+ dev->npinfo = br->dev->npinfo;
+ } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
+ br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ printk(KERN_INFO "New device %s does not support netpoll\n",
+ dev->name);
+ printk(KERN_INFO "Disabling netpoll for %s\n",
+ br->dev->name);
+ }
+#endif
+
return 0;
err2:
br_fdb_delete_by_port(br, p, 1);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index a82dde2d2ead..d36e700f7a26 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -24,14 +24,16 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
static int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
+ struct net_bridge *br = netdev_priv(brdev);
+ struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
- brdev->stats.rx_packets++;
- brdev->stats.rx_bytes += skb->len;
+ brstats->rx_packets++;
+ brstats->rx_bytes += skb->len;
indev = skb->dev;
skb->dev = brdev;
- return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
+ return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
netif_receive_skb);
}
@@ -154,7 +156,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
goto forward;
- if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
NULL, br_handle_local_finish))
return NULL; /* frame consumed by filter */
else
@@ -175,7 +177,7 @@ forward:
if (!compare_ether_addr(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
- NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish);
break;
default:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index eaa0e1bae49b..c8419e240316 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -24,51 +24,139 @@
#include <linux/slab.h>
#include <linux/timer.h>
#include <net/ip.h>
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#include <net/ipv6.h>
+#include <net/mld.h>
+#include <net/addrconf.h>
+#include <net/ip6_checksum.h>
+#endif
#include "br_private.h"
-static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
{
- return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1);
+ if (ipv6_addr_is_multicast(addr) &&
+ IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
+ return 1;
+ return 0;
+}
+#endif
+
+static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
+{
+ if (a->proto != b->proto)
+ return 0;
+ switch (a->proto) {
+ case htons(ETH_P_IP):
+ return a->u.ip4 == b->u.ip4;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
+#endif
+ }
+ return 0;
+}
+
+static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
+{
+ return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
+ const struct in6_addr *ip)
+{
+ return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1);
+}
+#endif
+
+static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
+ struct br_ip *ip)
+{
+ switch (ip->proto) {
+ case htons(ETH_P_IP):
+ return __br_ip4_hash(mdb, ip->u.ip4);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ return __br_ip6_hash(mdb, &ip->u.ip6);
+#endif
+ }
+ return 0;
}
static struct net_bridge_mdb_entry *__br_mdb_ip_get(
- struct net_bridge_mdb_htable *mdb, __be32 dst, int hash)
+ struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
{
struct net_bridge_mdb_entry *mp;
struct hlist_node *p;
hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
- if (dst == mp->addr)
+ if (br_ip_equal(&mp->addr, dst))
return mp;
}
return NULL;
}
-static struct net_bridge_mdb_entry *br_mdb_ip_get(
+static struct net_bridge_mdb_entry *br_mdb_ip4_get(
struct net_bridge_mdb_htable *mdb, __be32 dst)
{
- if (!mdb)
- return NULL;
+ struct br_ip br_dst;
+
+ br_dst.u.ip4 = dst;
+ br_dst.proto = htons(ETH_P_IP);
+
+ return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst));
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct net_bridge_mdb_entry *br_mdb_ip6_get(
+ struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
+{
+ struct br_ip br_dst;
+
+ ipv6_addr_copy(&br_dst.u.ip6, dst);
+ br_dst.proto = htons(ETH_P_IPV6);
+ return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst));
+}
+#endif
+
+static struct net_bridge_mdb_entry *br_mdb_ip_get(
+ struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
+{
return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
}
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb)
{
- if (br->multicast_disabled)
+ struct net_bridge_mdb_htable *mdb = br->mdb;
+ struct br_ip ip;
+
+ if (!mdb || br->multicast_disabled)
return NULL;
+ if (BR_INPUT_SKB_CB(skb)->igmp)
+ return NULL;
+
+ ip.proto = skb->protocol;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- if (BR_INPUT_SKB_CB(skb)->igmp)
- break;
- return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr);
+ ip.u.ip4 = ip_hdr(skb)->daddr;
+ break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
+ break;
+#endif
+ default:
+ return NULL;
}
- return NULL;
+ return br_mdb_ip_get(mdb, &ip);
}
static void br_mdb_free(struct rcu_head *head)
@@ -95,7 +183,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
for (i = 0; i < old->max; i++)
hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
hlist_add_head(&mp->hlist[new->ver],
- &new->mhash[br_ip_hash(new, mp->addr)]);
+ &new->mhash[br_ip_hash(new, &mp->addr)]);
if (!elasticity)
return 0;
@@ -163,7 +251,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
struct net_bridge_port_group *p;
struct net_bridge_port_group **pp;
- mp = br_mdb_ip_get(mdb, pg->addr);
+ mp = br_mdb_ip_get(mdb, &pg->addr);
if (WARN_ON(!mp))
return;
@@ -171,7 +259,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
if (p != pg)
continue;
- *pp = p->next;
+ rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
del_timer(&p->query_timer);
@@ -249,8 +337,8 @@ out:
return 0;
}
-static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
- __be32 group)
+static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
+ __be32 group)
{
struct sk_buff *skb;
struct igmphdr *ih;
@@ -314,12 +402,104 @@ out:
return skb;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
+ struct in6_addr *group)
+{
+ struct sk_buff *skb;
+ struct ipv6hdr *ip6h;
+ struct mld_msg *mldq;
+ struct ethhdr *eth;
+ u8 *hopopt;
+ unsigned long interval;
+
+ skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
+ 8 + sizeof(*mldq));
+ if (!skb)
+ goto out;
+
+ skb->protocol = htons(ETH_P_IPV6);
+
+ /* Ethernet header */
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+
+ memcpy(eth->h_source, br->dev->dev_addr, 6);
+ ipv6_eth_mc_map(group, eth->h_dest);
+ eth->h_proto = htons(ETH_P_IPV6);
+ skb_put(skb, sizeof(*eth));
+
+ /* IPv6 header + HbH option */
+ skb_set_network_header(skb, skb->len);
+ ip6h = ipv6_hdr(skb);
+
+ *(__force __be32 *)ip6h = htonl(0x60000000);
+ ip6h->payload_len = 8 + sizeof(*mldq);
+ ip6h->nexthdr = IPPROTO_HOPOPTS;
+ ip6h->hop_limit = 1;
+ ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
+ ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+
+ hopopt = (u8 *)(ip6h + 1);
+ hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
+ hopopt[1] = 0; /* length of HbH */
+ hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
+ hopopt[3] = 2; /* Length of RA Option */
+ hopopt[4] = 0; /* Type = 0x0000 (MLD) */
+ hopopt[5] = 0;
+ hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */
+ hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */
+
+ skb_put(skb, sizeof(*ip6h) + 8);
+
+ /* ICMPv6 */
+ skb_set_transport_header(skb, skb->len);
+ mldq = (struct mld_msg *) icmp6_hdr(skb);
+
+ interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
+ br->multicast_query_response_interval;
+
+ mldq->mld_type = ICMPV6_MGM_QUERY;
+ mldq->mld_code = 0;
+ mldq->mld_cksum = 0;
+ mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
+ mldq->mld_reserved = 0;
+ ipv6_addr_copy(&mldq->mld_mca, group);
+
+ /* checksum */
+ mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ sizeof(*mldq), IPPROTO_ICMPV6,
+ csum_partial(mldq,
+ sizeof(*mldq), 0));
+ skb_put(skb, sizeof(*mldq));
+
+ __skb_pull(skb, sizeof(*eth));
+
+out:
+ return skb;
+}
+#endif
+
+static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
+ struct br_ip *addr)
+{
+ switch (addr->proto) {
+ case htons(ETH_P_IP):
+ return br_ip4_multicast_alloc_query(br, addr->u.ip4);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
+#endif
+ }
+ return NULL;
+}
+
static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
{
struct net_bridge *br = mp->br;
struct sk_buff *skb;
- skb = br_multicast_alloc_query(br, mp->addr);
+ skb = br_multicast_alloc_query(br, &mp->addr);
if (!skb)
goto timer;
@@ -353,7 +533,7 @@ static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
struct net_bridge *br = port->br;
struct sk_buff *skb;
- skb = br_multicast_alloc_query(br, pg->addr);
+ skb = br_multicast_alloc_query(br, &pg->addr);
if (!skb)
goto timer;
@@ -383,8 +563,8 @@ out:
}
static struct net_bridge_mdb_entry *br_multicast_get_group(
- struct net_bridge *br, struct net_bridge_port *port, __be32 group,
- int hash)
+ struct net_bridge *br, struct net_bridge_port *port,
+ struct br_ip *group, int hash)
{
struct net_bridge_mdb_htable *mdb = br->mdb;
struct net_bridge_mdb_entry *mp;
@@ -396,9 +576,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
count++;
- if (unlikely(group == mp->addr)) {
+ if (unlikely(br_ip_equal(group, &mp->addr)))
return mp;
- }
}
elasticity = 0;
@@ -463,7 +642,8 @@ err:
}
static struct net_bridge_mdb_entry *br_multicast_new_group(
- struct net_bridge *br, struct net_bridge_port *port, __be32 group)
+ struct net_bridge *br, struct net_bridge_port *port,
+ struct br_ip *group)
{
struct net_bridge_mdb_htable *mdb = br->mdb;
struct net_bridge_mdb_entry *mp;
@@ -496,7 +676,7 @@ rehash:
goto out;
mp->br = br;
- mp->addr = group;
+ mp->addr = *group;
setup_timer(&mp->timer, br_multicast_group_expired,
(unsigned long)mp);
setup_timer(&mp->query_timer, br_multicast_group_query_expired,
@@ -510,7 +690,8 @@ out:
}
static int br_multicast_add_group(struct net_bridge *br,
- struct net_bridge_port *port, __be32 group)
+ struct net_bridge_port *port,
+ struct br_ip *group)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
@@ -518,9 +699,6 @@ static int br_multicast_add_group(struct net_bridge *br,
unsigned long now = jiffies;
int err;
- if (ipv4_is_local_multicast(group))
- return 0;
-
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
@@ -549,7 +727,7 @@ static int br_multicast_add_group(struct net_bridge *br,
if (unlikely(!p))
goto err;
- p->addr = group;
+ p->addr = *group;
p->port = port;
p->next = *pp;
hlist_add_head(&p->mglist, &port->mglist);
@@ -570,6 +748,38 @@ err:
return err;
}
+static int br_ip4_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ __be32 group)
+{
+ struct br_ip br_group;
+
+ if (ipv4_is_local_multicast(group))
+ return 0;
+
+ br_group.u.ip4 = group;
+ br_group.proto = htons(ETH_P_IP);
+
+ return br_multicast_add_group(br, port, &br_group);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int br_ip6_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ const struct in6_addr *group)
+{
+ struct br_ip br_group;
+
+ if (ipv6_is_local_multicast(group))
+ return 0;
+
+ ipv6_addr_copy(&br_group.u.ip6, group);
+ br_group.proto = htons(ETH_P_IP);
+
+ return br_multicast_add_group(br, port, &br_group);
+}
+#endif
+
static void br_multicast_router_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
@@ -591,29 +801,45 @@ static void br_multicast_local_router_expired(unsigned long data)
{
}
-static void br_multicast_send_query(struct net_bridge *br,
- struct net_bridge_port *port, u32 sent)
+static void __br_multicast_send_query(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct br_ip *ip)
{
- unsigned long time;
struct sk_buff *skb;
- if (!netif_running(br->dev) || br->multicast_disabled ||
- timer_pending(&br->multicast_querier_timer))
- return;
-
- skb = br_multicast_alloc_query(br, 0);
+ skb = br_multicast_alloc_query(br, ip);
if (!skb)
- goto timer;
+ return;
if (port) {
__skb_push(skb, sizeof(struct ethhdr));
skb->dev = port->dev;
- NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
} else
netif_rx(skb);
+}
+
+static void br_multicast_send_query(struct net_bridge *br,
+ struct net_bridge_port *port, u32 sent)
+{
+ unsigned long time;
+ struct br_ip br_group;
+
+ if (!netif_running(br->dev) || br->multicast_disabled ||
+ timer_pending(&br->multicast_querier_timer))
+ return;
+
+ memset(&br_group.u, 0, sizeof(br_group.u));
+
+ br_group.proto = htons(ETH_P_IP);
+ __br_multicast_send_query(br, port, &br_group);
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ br_group.proto = htons(ETH_P_IPV6);
+ __br_multicast_send_query(br, port, &br_group);
+#endif
-timer:
time = jiffies;
time += sent < br->multicast_startup_query_count ?
br->multicast_startup_query_interval :
@@ -698,9 +924,9 @@ void br_multicast_disable_port(struct net_bridge_port *port)
spin_unlock(&br->multicast_lock);
}
-static int br_multicast_igmp3_report(struct net_bridge *br,
- struct net_bridge_port *port,
- struct sk_buff *skb)
+static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
{
struct igmpv3_report *ih;
struct igmpv3_grec *grec;
@@ -745,7 +971,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
continue;
}
- err = br_multicast_add_group(br, port, group);
+ err = br_ip4_multicast_add_group(br, port, group);
if (err)
break;
}
@@ -753,24 +979,87 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
return err;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct icmp6hdr *icmp6h;
+ struct mld2_grec *grec;
+ int i;
+ int len;
+ int num;
+ int err = 0;
+
+ if (!pskb_may_pull(skb, sizeof(*icmp6h)))
+ return -EINVAL;
+
+ icmp6h = icmp6_hdr(skb);
+ num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
+ len = sizeof(*icmp6h);
+
+ for (i = 0; i < num; i++) {
+ __be16 *nsrcs, _nsrcs;
+
+ nsrcs = skb_header_pointer(skb,
+ len + offsetof(struct mld2_grec,
+ grec_mca),
+ sizeof(_nsrcs), &_nsrcs);
+ if (!nsrcs)
+ return -EINVAL;
+
+ if (!pskb_may_pull(skb,
+ len + sizeof(*grec) +
+ sizeof(struct in6_addr) * (*nsrcs)))
+ return -EINVAL;
+
+ grec = (struct mld2_grec *)(skb->data + len);
+ len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
+
+ /* We treat these as MLDv1 reports for now. */
+ switch (grec->grec_type) {
+ case MLD2_MODE_IS_INCLUDE:
+ case MLD2_MODE_IS_EXCLUDE:
+ case MLD2_CHANGE_TO_INCLUDE:
+ case MLD2_CHANGE_TO_EXCLUDE:
+ case MLD2_ALLOW_NEW_SOURCES:
+ case MLD2_BLOCK_OLD_SOURCES:
+ break;
+
+ default:
+ continue;
+ }
+
+ err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
+ if (!err)
+ break;
+ }
+
+ return err;
+}
+#endif
+
+/*
+ * Add port to rotuer_list
+ * list is maintained ordered by pointer value
+ * and locked by br->multicast_lock and RCU
+ */
static void br_multicast_add_router(struct net_bridge *br,
struct net_bridge_port *port)
{
- struct hlist_node *p;
- struct hlist_node **h;
-
- for (h = &br->router_list.first;
- (p = *h) &&
- (unsigned long)container_of(p, struct net_bridge_port, rlist) >
- (unsigned long)port;
- h = &p->next)
- ;
-
- port->rlist.pprev = h;
- port->rlist.next = p;
- rcu_assign_pointer(*h, &port->rlist);
- if (p)
- p->pprev = &port->rlist.next;
+ struct net_bridge_port *p;
+ struct hlist_node *n, *slot = NULL;
+
+ hlist_for_each_entry(p, n, &br->router_list, rlist) {
+ if ((unsigned long) port >= (unsigned long) p)
+ break;
+ slot = n;
+ }
+
+ if (slot)
+ hlist_add_after_rcu(slot, &port->rlist);
+ else
+ hlist_add_head_rcu(&port->rlist, &br->router_list);
}
static void br_multicast_mark_router(struct net_bridge *br,
@@ -800,7 +1089,7 @@ timer:
static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
- __be32 saddr)
+ int saddr)
{
if (saddr)
mod_timer(&br->multicast_querier_timer,
@@ -811,9 +1100,9 @@ static void br_multicast_query_received(struct net_bridge *br,
br_multicast_mark_router(br, port);
}
-static int br_multicast_query(struct net_bridge *br,
- struct net_bridge_port *port,
- struct sk_buff *skb)
+static int br_ip4_multicast_query(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
struct igmphdr *ih = igmp_hdr(skb);
@@ -831,7 +1120,7 @@ static int br_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- br_multicast_query_received(br, port, iph->saddr);
+ br_multicast_query_received(br, port, !!iph->saddr);
group = ih->group;
@@ -859,7 +1148,7 @@ static int br_multicast_query(struct net_bridge *br,
if (!group)
goto out;
- mp = br_mdb_ip_get(br->mdb, group);
+ mp = br_mdb_ip4_get(br->mdb, group);
if (!mp)
goto out;
@@ -883,9 +1172,78 @@ out:
return err;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int br_ip6_multicast_query(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
+ struct net_bridge_mdb_entry *mp;
+ struct mld2_query *mld2q;
+ struct net_bridge_port_group *p, **pp;
+ unsigned long max_delay;
+ unsigned long now = jiffies;
+ struct in6_addr *group = NULL;
+ int err = 0;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) ||
+ (port && port->state == BR_STATE_DISABLED))
+ goto out;
+
+ br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
+
+ if (skb->len == sizeof(*mld)) {
+ if (!pskb_may_pull(skb, sizeof(*mld))) {
+ err = -EINVAL;
+ goto out;
+ }
+ mld = (struct mld_msg *) icmp6_hdr(skb);
+ max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
+ if (max_delay)
+ group = &mld->mld_mca;
+ } else if (skb->len >= sizeof(*mld2q)) {
+ if (!pskb_may_pull(skb, sizeof(*mld2q))) {
+ err = -EINVAL;
+ goto out;
+ }
+ mld2q = (struct mld2_query *)icmp6_hdr(skb);
+ if (!mld2q->mld2q_nsrcs)
+ group = &mld2q->mld2q_mca;
+ max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
+ }
+
+ if (!group)
+ goto out;
+
+ mp = br_mdb_ip6_get(br->mdb, group);
+ if (!mp)
+ goto out;
+
+ max_delay *= br->multicast_last_member_count;
+ if (!hlist_unhashed(&mp->mglist) &&
+ (timer_pending(&mp->timer) ?
+ time_after(mp->timer.expires, now + max_delay) :
+ try_to_del_timer_sync(&mp->timer) >= 0))
+ mod_timer(&mp->timer, now + max_delay);
+
+ for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+ if (timer_pending(&p->timer) ?
+ time_after(p->timer.expires, now + max_delay) :
+ try_to_del_timer_sync(&p->timer) >= 0)
+ mod_timer(&mp->timer, now + max_delay);
+ }
+
+out:
+ spin_unlock(&br->multicast_lock);
+ return err;
+}
+#endif
+
static void br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
- __be32 group)
+ struct br_ip *group)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
@@ -893,9 +1251,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
unsigned long now;
unsigned long time;
- if (ipv4_is_local_multicast(group))
- return;
-
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED) ||
@@ -946,6 +1301,38 @@ out:
spin_unlock(&br->multicast_lock);
}
+static void br_ip4_multicast_leave_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ __be32 group)
+{
+ struct br_ip br_group;
+
+ if (ipv4_is_local_multicast(group))
+ return;
+
+ br_group.u.ip4 = group;
+ br_group.proto = htons(ETH_P_IP);
+
+ br_multicast_leave_group(br, port, &br_group);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static void br_ip6_multicast_leave_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ const struct in6_addr *group)
+{
+ struct br_ip br_group;
+
+ if (ipv6_is_local_multicast(group))
+ return;
+
+ ipv6_addr_copy(&br_group.u.ip6, group);
+ br_group.proto = htons(ETH_P_IPV6);
+
+ br_multicast_leave_group(br, port, &br_group);
+}
+#endif
+
static int br_multicast_ipv4_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb)
@@ -1000,8 +1387,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
if (!pskb_may_pull(skb2, sizeof(*ih)))
goto out;
- iph = ip_hdr(skb2);
-
switch (skb2->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb2->csum))
@@ -1022,16 +1407,16 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
- err = br_multicast_add_group(br, port, ih->group);
+ err = br_ip4_multicast_add_group(br, port, ih->group);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
- err = br_multicast_igmp3_report(br, port, skb2);
+ err = br_ip4_multicast_igmp3_report(br, port, skb2);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
- err = br_multicast_query(br, port, skb2);
+ err = br_ip4_multicast_query(br, port, skb2);
break;
case IGMP_HOST_LEAVE_MESSAGE:
- br_multicast_leave_group(br, port, ih->group);
+ br_ip4_multicast_leave_group(br, port, ih->group);
break;
}
@@ -1043,6 +1428,123 @@ err_out:
return err;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int br_multicast_ipv6_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct sk_buff *skb2 = skb;
+ struct ipv6hdr *ip6h;
+ struct icmp6hdr *icmp6h;
+ u8 nexthdr;
+ unsigned len;
+ unsigned offset;
+ int err;
+
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return -EINVAL;
+
+ ip6h = ipv6_hdr(skb);
+
+ /*
+ * We're interested in MLD messages only.
+ * - Version is 6
+ * - MLD has always Router Alert hop-by-hop option
+ * - But we do not support jumbrograms.
+ */
+ if (ip6h->version != 6 ||
+ ip6h->nexthdr != IPPROTO_HOPOPTS ||
+ ip6h->payload_len == 0)
+ return 0;
+
+ len = ntohs(ip6h->payload_len);
+ if (skb->len < len)
+ return -EINVAL;
+
+ nexthdr = ip6h->nexthdr;
+ offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
+
+ if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
+ return 0;
+
+ /* Okay, we found ICMPv6 header */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+ return -ENOMEM;
+
+ len -= offset - skb_network_offset(skb2);
+
+ __skb_pull(skb2, offset);
+ skb_reset_transport_header(skb2);
+
+ err = -EINVAL;
+ if (!pskb_may_pull(skb2, sizeof(*icmp6h)))
+ goto out;
+
+ icmp6h = icmp6_hdr(skb2);
+
+ switch (icmp6h->icmp6_type) {
+ case ICMPV6_MGM_QUERY:
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MGM_REDUCTION:
+ case ICMPV6_MLD2_REPORT:
+ break;
+ default:
+ err = 0;
+ goto out;
+ }
+
+ /* Okay, we found MLD message. Check further. */
+ if (skb2->len > len) {
+ err = pskb_trim_rcsum(skb2, len);
+ if (err)
+ goto out;
+ }
+
+ switch (skb2->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (!csum_fold(skb2->csum))
+ break;
+ /*FALLTHROUGH*/
+ case CHECKSUM_NONE:
+ skb2->csum = 0;
+ if (skb_checksum_complete(skb2))
+ goto out;
+ }
+
+ err = 0;
+
+ BR_INPUT_SKB_CB(skb)->igmp = 1;
+
+ switch (icmp6h->icmp6_type) {
+ case ICMPV6_MGM_REPORT:
+ {
+ struct mld_msg *mld = (struct mld_msg *)icmp6h;
+ BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
+ err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
+ break;
+ }
+ case ICMPV6_MLD2_REPORT:
+ err = br_ip6_multicast_mld2_report(br, port, skb2);
+ break;
+ case ICMPV6_MGM_QUERY:
+ err = br_ip6_multicast_query(br, port, skb2);
+ break;
+ case ICMPV6_MGM_REDUCTION:
+ {
+ struct mld_msg *mld = (struct mld_msg *)icmp6h;
+ br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
+ }
+ }
+
+out:
+ __skb_push(skb2, offset);
+ if (skb2 != skb)
+ kfree_skb(skb2);
+ return err;
+}
+#endif
+
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
struct sk_buff *skb)
{
@@ -1055,6 +1557,10 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
switch (skb->protocol) {
case htons(ETH_P_IP):
return br_multicast_ipv4_rcv(br, port, skb);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ return br_multicast_ipv6_rcv(br, port, skb);
+#endif
}
return 0;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 4c4977d12fd6..93f80fefa496 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -3,15 +3,8 @@
* Linux ethernet bridge
*
* Authors:
- * Lennert Buytenhek <buytenh@gnu.org>
- * Bart De Schuymer (maintainer) <bdschuym@pandora.be>
- *
- * Changes:
- * Apr 29 2003: physdev module support (bdschuym)
- * Jun 19 2003: let arptables see bridged ARP traffic (bdschuym)
- * Oct 06 2003: filter encapsulated IP/ARP VLAN traffic on untagged bridge
- * (bdschuym)
- * Sep 01 2004: add IPv6 filtering (bdschuym)
+ * Lennert Buytenhek <buytenh@gnu.org>
+ * Bart De Schuymer <bdschuym@pandora.be>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -204,15 +197,24 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
skb->nf_bridge->data, header_size);
}
-/*
- * When forwarding bridge frames, we save a copy of the original
- * header before processing.
+static inline void nf_bridge_update_protocol(struct sk_buff *skb)
+{
+ if (skb->nf_bridge->mask & BRNF_8021Q)
+ skb->protocol = htons(ETH_P_8021Q);
+ else if (skb->nf_bridge->mask & BRNF_PPPoE)
+ skb->protocol = htons(ETH_P_PPP_SES);
+}
+
+/* Fill in the header for fragmented IP packets handled by
+ * the IPv4 connection tracking code.
*/
int nf_bridge_copy_header(struct sk_buff *skb)
{
int err;
- int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
+ unsigned int header_size;
+ nf_bridge_update_protocol(skb);
+ header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
err = skb_cow_head(skb, header_size);
if (err)
return err;
@@ -246,27 +248,48 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
skb_dst_set(skb, &rt->u.dst);
skb->dev = nf_bridge->physindev;
+ nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
}
-static void __br_dnat_complain(void)
+/* Obtain the correct destination MAC address, while preserving the original
+ * source MAC address. If we already know this address, we just copy it. If we
+ * don't, we use the neighbour framework to find out. In both cases, we make
+ * sure that br_handle_frame_finish() is called afterwards.
+ */
+static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
{
- static unsigned long last_complaint;
+ struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+ struct dst_entry *dst;
- if (jiffies - last_complaint >= 5 * HZ) {
- printk(KERN_WARNING "Performing cross-bridge DNAT requires IP "
- "forwarding to be enabled\n");
- last_complaint = jiffies;
+ skb->dev = bridge_parent(skb->dev);
+ if (!skb->dev)
+ goto free_skb;
+ dst = skb_dst(skb);
+ if (dst->hh) {
+ neigh_hh_bridge(dst->hh, skb);
+ skb->dev = nf_bridge->physindev;
+ return br_handle_frame_finish(skb);
+ } else if (dst->neighbour) {
+ /* the neighbour function below overwrites the complete
+ * MAC header, so we save the Ethernet source address and
+ * protocol number. */
+ skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
+ /* tell br_dev_xmit to continue with forwarding */
+ nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+ return dst->neighbour->output(skb);
}
+free_skb:
+ kfree_skb(skb);
+ return 0;
}
/* This requires some explaining. If DNAT has taken place,
- * we will need to fix up the destination Ethernet address,
- * and this is a tricky process.
+ * we will need to fix up the destination Ethernet address.
*
* There are two cases to consider:
* 1. The packet was DNAT'ed to a device in the same bridge
@@ -280,62 +303,29 @@ static void __br_dnat_complain(void)
* call ip_route_input() and to look at skb->dst->dev, which is
* changed to the destination device if ip_route_input() succeeds.
*
- * Let us first consider the case that ip_route_input() succeeds:
- *
- * If skb->dst->dev equals the logical bridge device the packet
- * came in on, we can consider this bridging. The packet is passed
- * through the neighbour output function to build a new destination
- * MAC address, which will make the packet enter br_nf_local_out()
- * not much later. In that function it is assured that the iptables
- * FORWARD chain is traversed for the packet.
+ * Let's first consider the case that ip_route_input() succeeds:
*
+ * If the output device equals the logical bridge device the packet
+ * came in on, we can consider this bridging. The corresponding MAC
+ * address will be obtained in br_nf_pre_routing_finish_bridge.
* Otherwise, the packet is considered to be routed and we just
* change the destination MAC address so that the packet will
* later be passed up to the IP stack to be routed. For a redirected
* packet, ip_route_input() will give back the localhost as output device,
* which differs from the bridge device.
*
- * Let us now consider the case that ip_route_input() fails:
+ * Let's now consider the case that ip_route_input() fails:
*
* This can be because the destination address is martian, in which case
* the packet will be dropped.
- * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input()
- * will fail, while __ip_route_output_key() will return success. The source
- * address for __ip_route_output_key() is set to zero, so __ip_route_output_key
+ * If IP forwarding is disabled, ip_route_input() will fail, while
+ * ip_route_output_key() can return success. The source
+ * address for ip_route_output_key() is set to zero, so ip_route_output_key()
* thinks we're handling a locally generated packet and won't care
- * if IP forwarding is allowed. We send a warning message to the users's
- * log telling her to put IP forwarding on.
- *
- * ip_route_input() will also fail if there is no route available.
- * In that case we just drop the packet.
- *
- * --Lennert, 20020411
- * --Bart, 20020416 (updated)
- * --Bart, 20021007 (updated)
- * --Bart, 20062711 (updated) */
-static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
-{
- if (skb->pkt_type == PACKET_OTHERHOST) {
- skb->pkt_type = PACKET_HOST;
- skb->nf_bridge->mask |= BRNF_PKT_TYPE;
- }
- skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
-
- skb->dev = bridge_parent(skb->dev);
- if (skb->dev) {
- struct dst_entry *dst = skb_dst(skb);
-
- nf_bridge_pull_encap_header(skb);
-
- if (dst->hh)
- return neigh_hh_output(dst->hh, skb);
- else if (dst->neighbour)
- return dst->neighbour->output(skb);
- }
- kfree_skb(skb);
- return 0;
-}
-
+ * if IP forwarding is enabled. If the output device equals the logical bridge
+ * device, we proceed as if ip_route_input() succeeded. If it differs from the
+ * logical bridge port or if ip_route_output_key() fails we drop the packet.
+ */
static int br_nf_pre_routing_finish(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
@@ -379,11 +369,6 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
skb_dst_set(skb, (struct dst_entry *)rt);
goto bridged_dnat;
}
- /* we are sure that forwarding is disabled, so printing
- * this message is no problem. Note that the packet could
- * still have a martian destination address, in which case
- * the packet could be dropped even if forwarding were enabled */
- __br_dnat_complain();
dst_release((struct dst_entry *)rt);
}
free_skb:
@@ -392,12 +377,11 @@ free_skb:
} else {
if (skb_dst(skb)->dev == dev) {
bridged_dnat:
- /* Tell br_nf_local_out this is a
- * bridged frame */
- nf_bridge->mask |= BRNF_BRIDGED_DNAT;
skb->dev = nf_bridge->physindev;
+ nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING,
+ NF_HOOK_THRESH(NFPROTO_BRIDGE,
+ NF_BR_PRE_ROUTING,
skb, skb->dev, NULL,
br_nf_pre_routing_finish_bridge,
1);
@@ -417,8 +401,9 @@ bridged_dnat:
}
skb->dev = nf_bridge->physindev;
+ nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
@@ -437,6 +422,10 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb)
nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
nf_bridge->physindev = skb->dev;
skb->dev = bridge_parent(skb->dev);
+ if (skb->protocol == htons(ETH_P_8021Q))
+ nf_bridge->mask |= BRNF_8021Q;
+ else if (skb->protocol == htons(ETH_P_PPP_SES))
+ nf_bridge->mask |= BRNF_PPPoE;
return skb->dev;
}
@@ -535,7 +524,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
if (!setup_pre_routing(skb))
return NF_DROP;
- NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ skb->protocol = htons(ETH_P_IPV6);
+ NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
return NF_STOLEN;
@@ -607,8 +597,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
if (!setup_pre_routing(skb))
return NF_DROP;
store_orig_dstaddr(skb);
+ skb->protocol = htons(ETH_P_IP);
- NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish);
return NF_STOLEN;
@@ -655,8 +646,10 @@ static int br_nf_forward_finish(struct sk_buff *skb)
} else {
in = *((struct net_device **)(skb->cb));
}
+ nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in,
+
+ NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
skb->dev, br_forward_finish, 1);
return 0;
}
@@ -707,6 +700,10 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
+ if (pf == PF_INET)
+ skb->protocol = htons(ETH_P_IP);
+ else
+ skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
br_nf_forward_finish);
@@ -744,60 +741,11 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
return NF_STOLEN;
}
-/* PF_BRIDGE/LOCAL_OUT ***********************************************
- *
- * This function sees both locally originated IP packets and forwarded
- * IP packets (in both cases the destination device is a bridge
- * device). It also sees bridged-and-DNAT'ed packets.
- *
- * If (nf_bridge->mask & BRNF_BRIDGED_DNAT) then the packet is bridged
- * and we fake the PF_BRIDGE/FORWARD hook. The function br_nf_forward()
- * will then fake the PF_INET/FORWARD hook. br_nf_local_out() has priority
- * NF_BR_PRI_FIRST, so no relevant PF_BRIDGE/INPUT functions have been nor
- * will be executed.
- */
-static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- struct net_device *realindev;
- struct nf_bridge_info *nf_bridge;
-
- if (!skb->nf_bridge)
- return NF_ACCEPT;
-
- /* Need exclusive nf_bridge_info since we might have multiple
- * different physoutdevs. */
- if (!nf_bridge_unshare(skb))
- return NF_DROP;
-
- nf_bridge = skb->nf_bridge;
- if (!(nf_bridge->mask & BRNF_BRIDGED_DNAT))
- return NF_ACCEPT;
-
- /* Bridged, take PF_BRIDGE/FORWARD.
- * (see big note in front of br_nf_pre_routing_finish) */
- nf_bridge->physoutdev = skb->dev;
- realindev = nf_bridge->physindev;
-
- if (nf_bridge->mask & BRNF_PKT_TYPE) {
- skb->pkt_type = PACKET_OTHERHOST;
- nf_bridge->mask ^= BRNF_PKT_TYPE;
- }
- nf_bridge_push_encap_header(skb);
-
- NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev,
- br_forward_finish);
- return NF_STOLEN;
-}
-
#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
- if (skb->nfct != NULL &&
- (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) &&
- skb->len > skb->dev->mtu &&
+ if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
+ skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
!skb_is_gso(skb))
return ip_fragment(skb, br_dev_queue_push_xmit);
else
@@ -820,21 +768,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
struct net_device *realoutdev = bridge_parent(skb->dev);
u_int8_t pf;
-#ifdef CONFIG_NETFILTER_DEBUG
- /* Be very paranoid. This probably won't happen anymore, but let's
- * keep the check just to be sure... */
- if (skb_mac_header(skb) < skb->head ||
- skb_mac_header(skb) + ETH_HLEN > skb->data) {
- printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: "
- "bad mac.raw pointer.\n");
- goto print_error;
- }
-#endif
-
- if (!nf_bridge)
- return NF_ACCEPT;
-
- if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)))
+ if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
return NF_ACCEPT;
if (!realoutdev)
@@ -849,13 +783,6 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
else
return NF_ACCEPT;
-#ifdef CONFIG_NETFILTER_DEBUG
- if (skb_dst(skb) == NULL) {
- printk(KERN_INFO "br_netfilter post_routing: skb->dst == NULL\n");
- goto print_error;
- }
-#endif
-
/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
* about the value of skb->pkt_type. */
if (skb->pkt_type == PACKET_OTHERHOST) {
@@ -865,24 +792,15 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
nf_bridge_pull_encap_header(skb);
nf_bridge_save_header(skb);
+ if (pf == PF_INET)
+ skb->protocol = htons(ETH_P_IP);
+ else
+ skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
br_nf_dev_queue_xmit);
return NF_STOLEN;
-
-#ifdef CONFIG_NETFILTER_DEBUG
-print_error:
- if (skb->dev != NULL) {
- printk("[%s]", skb->dev->name);
- if (realoutdev)
- printk("[%s]", realoutdev->name);
- }
- printk(" head:%p, raw:%p, data:%p\n", skb->head, skb_mac_header(skb),
- skb->data);
- dump_stack();
- return NF_ACCEPT;
-#endif
}
/* IP/SABOTAGE *****************************************************/
@@ -901,10 +819,8 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
return NF_ACCEPT;
}
-/* For br_nf_local_out we need (prio = NF_BR_PRI_FIRST), to insure that innocent
- * PF_BRIDGE/NF_BR_LOCAL_OUT functions don't get bridged traffic as input.
- * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
- * ip_refrag() can return NF_STOLEN. */
+/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
+ * br_dev_queue_push_xmit is called afterwards */
static struct nf_hook_ops br_nf_ops[] __read_mostly = {
{
.hook = br_nf_pre_routing,
@@ -935,13 +851,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
.priority = NF_BR_PRI_BRNF,
},
{
- .hook = br_nf_local_out,
- .owner = THIS_MODULE,
- .pf = PF_BRIDGE,
- .hooknum = NF_BR_LOCAL_OUT,
- .priority = NF_BR_PRI_FIRST,
- },
- {
.hook = br_nf_post_routing,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 763a3ec292e5..1413b72acc7f 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -82,6 +82,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
}
/* Events that may cause spanning tree to refresh */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 846d7d1e2075..3d2d3fe0a97e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -45,6 +45,17 @@ struct mac_addr
unsigned char addr[6];
};
+struct br_ip
+{
+ union {
+ __be32 ip4;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct in6_addr ip6;
+#endif
+ } u;
+ __be16 proto;
+};
+
struct net_bridge_fdb_entry
{
struct hlist_node hlist;
@@ -64,7 +75,7 @@ struct net_bridge_port_group {
struct rcu_head rcu;
struct timer_list timer;
struct timer_list query_timer;
- __be32 addr;
+ struct br_ip addr;
u32 queries_sent;
};
@@ -77,7 +88,7 @@ struct net_bridge_mdb_entry
struct rcu_head rcu;
struct timer_list timer;
struct timer_list query_timer;
- __be32 addr;
+ struct br_ip addr;
u32 queries_sent;
};
@@ -130,11 +141,20 @@ struct net_bridge_port
#endif
};
+struct br_cpu_netstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
struct net_bridge
{
spinlock_t lock;
struct list_head port_list;
struct net_device *dev;
+
+ struct br_cpu_netstats __percpu *stats;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
unsigned long feature_mask;
@@ -233,6 +253,8 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
extern void br_dev_setup(struct net_device *dev);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
+extern bool br_devices_support_netpoll(struct net_bridge *br);
+extern void br_netpoll_cleanup(struct net_device *br_dev);
/* br_fdb.c */
extern int br_fdb_init(void);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index d66cce11f3bf..217bd225a42f 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -50,7 +50,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
- NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
}
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index 5d1176758ca5..f7de8dbc3422 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -36,14 +36,14 @@ ebt_802_3_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_802_3_mt_check(const struct xt_mtchk_param *par)
+static int ebt_802_3_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_802_3_info *info = par->matchinfo;
if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK)
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static struct xt_match ebt_802_3_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index b595f091f35b..20068e03fa81 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -7,6 +7,7 @@
* August, 2003
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/module.h>
@@ -171,7 +172,7 @@ ebt_among_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_among_mt_check(const struct xt_mtchk_param *par)
+static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const struct ebt_entry_match *em =
@@ -186,24 +187,20 @@ static bool ebt_among_mt_check(const struct xt_mtchk_param *par)
expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) {
- printk(KERN_WARNING
- "ebtables: among: wrong size: %d "
- "against expected %d, rounded to %Zd\n",
- em->match_size, expected_length,
- EBT_ALIGN(expected_length));
- return false;
+ pr_info("wrong size: %d against expected %d, rounded to %Zd\n",
+ em->match_size, expected_length,
+ EBT_ALIGN(expected_length));
+ return -EINVAL;
}
if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
- printk(KERN_WARNING
- "ebtables: among: dst integrity fail: %x\n", -err);
- return false;
+ pr_info("dst integrity fail: %x\n", -err);
+ return -EINVAL;
}
if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
- printk(KERN_WARNING
- "ebtables: among: src integrity fail: %x\n", -err);
- return false;
+ pr_info("src integrity fail: %x\n", -err);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match ebt_among_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index e727697c5847..952150cd5e7d 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -100,7 +100,7 @@ ebt_arp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_arp_mt_check(const struct xt_mtchk_param *par)
+static int ebt_arp_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_arp_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
@@ -108,10 +108,10 @@ static bool ebt_arp_mt_check(const struct xt_mtchk_param *par)
if ((e->ethproto != htons(ETH_P_ARP) &&
e->ethproto != htons(ETH_P_RARP)) ||
e->invflags & EBT_IPROTO)
- return false;
+ return -EINVAL;
if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_match ebt_arp_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index f392e9d93f53..4581adb27583 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -57,17 +57,17 @@ ebt_arpreply_tg(struct sk_buff *skb, const struct xt_target_param *par)
return info->target;
}
-static bool ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
+static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_arpreply_info *info = par->targinfo;
const struct ebt_entry *e = par->entryinfo;
if (BASE_CHAIN && info->target == EBT_RETURN)
- return false;
+ return -EINVAL;
if (e->ethproto != htons(ETH_P_ARP) ||
e->invflags & EBT_IPROTO)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 2bb40d728a35..59d5b7c8a557 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -26,13 +26,13 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_target_param *par)
return info->target;
}
-static bool ebt_dnat_tg_check(const struct xt_tgchk_param *par)
+static int ebt_dnat_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
- return false;
+ return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
@@ -40,10 +40,10 @@ static bool ebt_dnat_tg_check(const struct xt_tgchk_param *par)
(1 << NF_BR_LOCAL_OUT)))) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
- return false;
+ return -EINVAL;
if (INVALID_TARGET)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target ebt_dnat_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index 5de6df6f86b8..a1c76c7e5219 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -77,31 +77,31 @@ ebt_ip_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_ip_mt_check(const struct xt_mtchk_param *par)
+static int ebt_ip_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
if (e->ethproto != htons(ETH_P_IP) ||
e->invflags & EBT_IPROTO)
- return false;
+ return -EINVAL;
if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK)
- return false;
+ return -EINVAL;
if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) {
if (info->invflags & EBT_IP_PROTO)
- return false;
+ return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
- return false;
+ return -EINVAL;
}
if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
- return false;
+ return -EINVAL;
if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1])
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_match ebt_ip_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index bbf2534ef026..33f8413f05ad 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -4,7 +4,7 @@
* Authors:
* Manohar Castelino <manohar.r.castelino@intel.com>
* Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
- * Jan Engelhardt <jengelh@computergmbh.de>
+ * Jan Engelhardt <jengelh@medozas.de>
*
* Summary:
* This is just a modification of the IPv4 code written by
@@ -35,8 +35,6 @@ ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par)
struct ipv6hdr _ip6h;
const struct tcpudphdr *pptr;
struct tcpudphdr _ports;
- struct in6_addr tmp_addr;
- int i;
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
if (ih6 == NULL)
@@ -44,18 +42,10 @@ ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (info->bitmask & EBT_IP6_TCLASS &&
FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
return false;
- for (i = 0; i < 4; i++)
- tmp_addr.in6_u.u6_addr32[i] = ih6->saddr.in6_u.u6_addr32[i] &
- info->smsk.in6_u.u6_addr32[i];
- if (info->bitmask & EBT_IP6_SOURCE &&
- FWINV((ipv6_addr_cmp(&tmp_addr, &info->saddr) != 0),
- EBT_IP6_SOURCE))
- return false;
- for (i = 0; i < 4; i++)
- tmp_addr.in6_u.u6_addr32[i] = ih6->daddr.in6_u.u6_addr32[i] &
- info->dmsk.in6_u.u6_addr32[i];
- if (info->bitmask & EBT_IP6_DEST &&
- FWINV((ipv6_addr_cmp(&tmp_addr, &info->daddr) != 0), EBT_IP6_DEST))
+ if (FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk,
+ &info->saddr), EBT_IP6_SOURCE) ||
+ FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk,
+ &info->daddr), EBT_IP6_DEST))
return false;
if (info->bitmask & EBT_IP6_PROTO) {
uint8_t nexthdr = ih6->nexthdr;
@@ -90,30 +80,30 @@ ebt_ip6_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_ip6_mt_check(const struct xt_mtchk_param *par)
+static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_entry *e = par->entryinfo;
struct ebt_ip6_info *info = par->matchinfo;
if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
- return false;
+ return -EINVAL;
if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK)
- return false;
+ return -EINVAL;
if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
if (info->invflags & EBT_IP6_PROTO)
- return false;
+ return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
- return false;
+ return -EINVAL;
}
if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
- return false;
+ return -EINVAL;
if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_match ebt_ip6_mt_reg __read_mostly = {
@@ -139,4 +129,5 @@ static void __exit ebt_ip6_fini(void)
module_init(ebt_ip6_init);
module_exit(ebt_ip6_fini);
MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match");
+MODULE_AUTHOR("Kuo-Lang Tseng <kuo-lang.tseng@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index 7a8182710eb3..4b0e2e53fa57 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -10,6 +10,7 @@
* September, 2003
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
@@ -64,16 +65,16 @@ user2credits(u_int32_t user)
return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE;
}
-static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
+static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_limit_info *info = par->matchinfo;
/* Check for overflow. */
if (info->burst == 0 ||
user2credits(info->avg * info->burst) < user2credits(info->avg)) {
- printk("Overflow in ebt_limit, try lower: %u/%u\n",
+ pr_info("overflow, try lower: %u/%u\n",
info->avg, info->burst);
- return false;
+ return -EINVAL;
}
/* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */
@@ -81,7 +82,7 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
info->credit = user2credits(info->avg * info->burst);
info->credit_cap = user2credits(info->avg * info->burst);
info->cost = user2credits(info->avg);
- return true;
+ return 0;
}
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index e873924ddb5d..c46024156539 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -24,16 +24,16 @@
static DEFINE_SPINLOCK(ebt_log_lock);
-static bool ebt_log_tg_check(const struct xt_tgchk_param *par)
+static int ebt_log_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_log_info *info = par->targinfo;
if (info->bitmask & ~EBT_LOG_MASK)
- return false;
+ return -EINVAL;
if (info->loglevel >= 8)
- return false;
+ return -EINVAL;
info->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
- return true;
+ return 0;
}
struct tcpudphdr
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 2b5ce533d6b9..126e536ff8f4 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -36,21 +36,21 @@ ebt_mark_tg(struct sk_buff *skb, const struct xt_target_param *par)
return info->target | ~EBT_VERDICT_BITS;
}
-static bool ebt_mark_tg_check(const struct xt_tgchk_param *par)
+static int ebt_mark_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
- return false;
+ return -EINVAL;
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
- return false;
+ return -EINVAL;
tmp = info->target & ~EBT_VERDICT_BITS;
if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
#ifdef CONFIG_COMPAT
struct compat_ebt_mark_t_info {
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index 8de8c396d913..e4366c0a1a43 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -22,17 +22,17 @@ ebt_mark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
-static bool ebt_mark_mt_check(const struct xt_mtchk_param *par)
+static int ebt_mark_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_mark_m_info *info = par->matchinfo;
if (info->bitmask & ~EBT_MARK_MASK)
- return false;
+ return -EINVAL;
if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND))
- return false;
+ return -EINVAL;
if (!info->bitmask)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 40dbd248b9ae..22e2ad5f23e8 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -35,14 +35,14 @@ ebt_nflog_tg(struct sk_buff *skb, const struct xt_target_param *par)
return EBT_CONTINUE;
}
-static bool ebt_nflog_tg_check(const struct xt_tgchk_param *par)
+static int ebt_nflog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_nflog_info *info = par->targinfo;
if (info->flags & ~EBT_NFLOG_MASK)
- return false;
+ return -EINVAL;
info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
- return true;
+ return 0;
}
static struct xt_target ebt_nflog_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c
index e2a07e6cbef3..f34bcc3197bd 100644
--- a/net/bridge/netfilter/ebt_pkttype.c
+++ b/net/bridge/netfilter/ebt_pkttype.c
@@ -20,14 +20,14 @@ ebt_pkttype_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return (skb->pkt_type == info->pkt_type) ^ info->invert;
}
-static bool ebt_pkttype_mt_check(const struct xt_mtchk_param *par)
+static int ebt_pkttype_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_pkttype_info *info = par->matchinfo;
if (info->invert != 0 && info->invert != 1)
- return false;
+ return -EINVAL;
/* Allow any pkt_type value */
- return true;
+ return 0;
}
static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 9be8fbcd370b..a6044a6f2383 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -32,23 +32,23 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_target_param *par)
return info->target;
}
-static bool ebt_redirect_tg_check(const struct xt_tgchk_param *par)
+static int ebt_redirect_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
- return false;
+ return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
hook_mask & ~(1 << NF_BR_PRE_ROUTING)) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
- return false;
+ return -EINVAL;
if (INVALID_TARGET)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target ebt_redirect_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 9c7b520765a2..79caca34ae2b 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -42,21 +42,21 @@ out:
return info->target | ~EBT_VERDICT_BITS;
}
-static bool ebt_snat_tg_check(const struct xt_tgchk_param *par)
+static int ebt_snat_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
- return false;
+ return -EINVAL;
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
- return false;
+ return -EINVAL;
tmp = info->target | EBT_VERDICT_BITS;
if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target ebt_snat_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 92a93d363765..02f28fdda393 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -153,7 +153,7 @@ ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_stp_mt_check(const struct xt_mtchk_param *par)
+static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_stp_info *info = par->matchinfo;
const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
@@ -162,13 +162,13 @@ static bool ebt_stp_mt_check(const struct xt_mtchk_param *par)
if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK ||
!(info->bitmask & EBT_STP_MASK))
- return false;
+ return -EINVAL;
/* Make sure the match only receives stp frames */
if (compare_ether_addr(e->destmac, bridge_ula) ||
compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static struct xt_match ebt_stp_mt_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index f9560f3dbdc7..852f37c27659 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -27,7 +27,7 @@
* flushed even if it is not full yet.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -44,9 +44,6 @@
#include <net/sock.h>
#include "../br_private.h"
-#define PRINTR(format, args...) do { if (net_ratelimit()) \
- printk(format , ## args); } while (0)
-
static unsigned int nlbufsiz = NLMSG_GOODSIZE;
module_param(nlbufsiz, uint, 0600);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
@@ -107,15 +104,14 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
n = max(size, nlbufsiz);
skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
- PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
- "of size %ub!\n", n);
+ pr_debug("cannot alloc whole buffer of size %ub!\n", n);
if (n > size) {
/* try to allocate only as much as we need for
* current packet */
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
- PRINTR(KERN_ERR "ebt_ulog: can't even allocate "
- "buffer of size %ub\n", size);
+ pr_debug("cannot even allocate "
+ "buffer of size %ub\n", size);
}
}
@@ -142,8 +138,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
size = NLMSG_SPACE(sizeof(*pm) + copy_len);
if (size > nlbufsiz) {
- PRINTR("ebt_ulog: Size %Zd needed, but nlbufsiz=%d\n",
- size, nlbufsiz);
+ pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz);
return;
}
@@ -217,8 +212,8 @@ unlock:
return;
nlmsg_failure:
- printk(KERN_CRIT "ebt_ulog: error during NLMSG_PUT. This should "
- "not happen, please report to author.\n");
+ pr_debug("error during NLMSG_PUT. This should "
+ "not happen, please report to author.\n");
goto unlock;
alloc_failure:
goto unlock;
@@ -255,19 +250,19 @@ ebt_ulog_tg(struct sk_buff *skb, const struct xt_target_param *par)
return EBT_CONTINUE;
}
-static bool ebt_ulog_tg_check(const struct xt_tgchk_param *par)
+static int ebt_ulog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_ulog_info *uloginfo = par->targinfo;
if (uloginfo->nlgroup > 31)
- return false;
+ return -EINVAL;
uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
- return true;
+ return 0;
}
static struct xt_target ebt_ulog_tg_reg __read_mostly = {
@@ -292,8 +287,8 @@ static int __init ebt_ulog_init(void)
int i;
if (nlbufsiz >= 128*1024) {
- printk(KERN_NOTICE "ebt_ulog: Netlink buffer has to be <= 128kB,"
- " please try a smaller nlbufsiz parameter.\n");
+ pr_warning("Netlink buffer has to be <= 128kB,"
+ " please try a smaller nlbufsiz parameter.\n");
return -EINVAL;
}
@@ -306,13 +301,10 @@ static int __init ebt_ulog_init(void)
ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
EBT_ULOG_MAXNLGROUPS, NULL, NULL,
THIS_MODULE);
- if (!ebtulognl) {
- printk(KERN_WARNING KBUILD_MODNAME ": out of memory trying to "
- "call netlink_kernel_create\n");
+ if (!ebtulognl)
ret = -ENOMEM;
- } else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) {
+ else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
netlink_kernel_release(ebtulognl);
- }
if (ret == 0)
nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger);
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index be1dd2e1f615..bf8ae5c7a0c5 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -26,17 +26,12 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_vlan.h>
-static int debug;
#define MODULE_VERS "0.6"
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "debug=1 is turn on debug messages");
MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>");
MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match");
MODULE_LICENSE("GPL");
-
-#define DEBUG_MSG(args...) if (debug) printk (KERN_DEBUG "ebt_vlan: " args)
#define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_
#define EXIT_ON_MISMATCH(_MATCH_,_MASK_) {if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return false; }
@@ -84,32 +79,31 @@ ebt_vlan_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par)
+static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_vlan_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
/* Is it 802.1Q frame checked? */
if (e->ethproto != htons(ETH_P_8021Q)) {
- DEBUG_MSG
- ("passed entry proto %2.4X is not 802.1Q (8100)\n",
- (unsigned short) ntohs(e->ethproto));
- return false;
+ pr_debug("passed entry proto %2.4X is not 802.1Q (8100)\n",
+ ntohs(e->ethproto));
+ return -EINVAL;
}
/* Check for bitmask range
* True if even one bit is out of mask */
if (info->bitmask & ~EBT_VLAN_MASK) {
- DEBUG_MSG("bitmask %2X is out of mask (%2X)\n",
- info->bitmask, EBT_VLAN_MASK);
- return false;
+ pr_debug("bitmask %2X is out of mask (%2X)\n",
+ info->bitmask, EBT_VLAN_MASK);
+ return -EINVAL;
}
/* Check for inversion flags range */
if (info->invflags & ~EBT_VLAN_MASK) {
- DEBUG_MSG("inversion flags %2X is out of mask (%2X)\n",
- info->invflags, EBT_VLAN_MASK);
- return false;
+ pr_debug("inversion flags %2X is out of mask (%2X)\n",
+ info->invflags, EBT_VLAN_MASK);
+ return -EINVAL;
}
/* Reserved VLAN ID (VID) values
@@ -121,10 +115,9 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par)
if (GET_BITMASK(EBT_VLAN_ID)) {
if (!!info->id) { /* if id!=0 => check vid range */
if (info->id > VLAN_GROUP_ARRAY_LEN) {
- DEBUG_MSG
- ("id %d is out of range (1-4096)\n",
- info->id);
- return false;
+ pr_debug("id %d is out of range (1-4096)\n",
+ info->id);
+ return -EINVAL;
}
/* Note: This is valid VLAN-tagged frame point.
* Any value of user_priority are acceptable,
@@ -137,9 +130,9 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par)
if (GET_BITMASK(EBT_VLAN_PRIO)) {
if ((unsigned char) info->prio > 7) {
- DEBUG_MSG("prio %d is out of range (0-7)\n",
- info->prio);
- return false;
+ pr_debug("prio %d is out of range (0-7)\n",
+ info->prio);
+ return -EINVAL;
}
}
/* Check for encapsulated proto range - it is possible to be
@@ -147,14 +140,13 @@ static bool ebt_vlan_mt_check(const struct xt_mtchk_param *par)
* if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */
if (GET_BITMASK(EBT_VLAN_ENCAP)) {
if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) {
- DEBUG_MSG
- ("encap frame length %d is less than minimal\n",
- ntohs(info->encap));
- return false;
+ pr_debug("encap frame length %d is less than "
+ "minimal\n", ntohs(info->encap));
+ return -EINVAL;
}
}
- return true;
+ return 0;
}
static struct xt_match ebt_vlan_mt_reg __read_mostly = {
@@ -169,9 +161,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = {
static int __init ebt_vlan_init(void)
{
- DEBUG_MSG("ebtables 802.1Q extension module v"
- MODULE_VERS "\n");
- DEBUG_MSG("module debug=%d\n", !!debug);
+ pr_debug("ebtables 802.1Q extension module v" MODULE_VERS "\n");
return xt_register_match(&ebt_vlan_mt_reg);
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f0865fd1e3ec..1d8c2c0a7470 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -14,8 +14,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -363,12 +362,9 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
left - sizeof(struct ebt_entry_match) < m->match_size)
return -EINVAL;
- match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
- m->u.name, 0), "ebt_%s", m->u.name);
+ match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
if (IS_ERR(match))
return PTR_ERR(match);
- if (match == NULL)
- return -ENOENT;
m->u.match = match;
par->match = match;
@@ -397,13 +393,9 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
return -EINVAL;
- watcher = try_then_request_module(
- xt_find_target(NFPROTO_BRIDGE, w->u.name, 0),
- "ebt_%s", w->u.name);
+ watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
if (IS_ERR(watcher))
return PTR_ERR(watcher);
- if (watcher == NULL)
- return -ENOENT;
w->u.watcher = watcher;
par->target = watcher;
@@ -716,15 +708,10 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
gap = e->next_offset - e->target_offset;
- target = try_then_request_module(
- xt_find_target(NFPROTO_BRIDGE, t->u.name, 0),
- "ebt_%s", t->u.name);
+ target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto cleanup_watchers;
- } else if (target == NULL) {
- ret = -ENOENT;
- goto cleanup_watchers;
}
t->u.target = target;
@@ -2128,7 +2115,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
return ret;
new_offset += ret;
if (offsets_update && new_offset) {
- pr_debug("ebtables: change offset %d to %d\n",
+ pr_debug("change offset %d to %d\n",
offsets_update[i], offsets[j] + new_offset);
offsets_update[i] = offsets[j] + new_offset;
}
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
new file mode 100644
index 000000000000..cd1daf6008bd
--- /dev/null
+++ b/net/caif/Kconfig
@@ -0,0 +1,48 @@
+#
+# CAIF net configurations
+#
+
+#menu "CAIF Support"
+comment "CAIF Support"
+menuconfig CAIF
+ tristate "Enable CAIF support"
+ select CRC_CCITT
+ default n
+ ---help---
+ The "Communication CPU to Application CPU Interface" (CAIF) is a packet
+ based connection-oriented MUX protocol developed by ST-Ericsson for use
+ with its modems. It is accessed from user space as sockets (PF_CAIF).
+
+ Say Y (or M) here if you build for a phone product (e.g. Android or
+ MeeGo ) that uses CAIF as transport, if unsure say N.
+
+ If you select to build it as module then CAIF_NETDEV also needs to be
+ built as modules. You will also need to say yes to any CAIF physical
+ devices that your platform requires.
+
+ See Documentation/networking/caif for a further explanation on how to
+ use and configure CAIF.
+
+if CAIF
+
+config CAIF_DEBUG
+ bool "Enable Debug"
+ default n
+ --- help ---
+ Enable the inclusion of debug code in the CAIF stack.
+ Be aware that doing this will impact performance.
+ If unsure say N.
+
+
+config CAIF_NETDEV
+ tristate "CAIF GPRS Network device"
+ default CAIF
+ ---help---
+ Say Y if you will be using a CAIF based GPRS network device.
+ This can be either built-in or a loadable module,
+ If you select to build it as a built-in then the main CAIF device must
+ also be a built-in.
+ If unsure say Y.
+
+endif
+#endmenu
diff --git a/net/caif/Makefile b/net/caif/Makefile
new file mode 100644
index 000000000000..34852af2595e
--- /dev/null
+++ b/net/caif/Makefile
@@ -0,0 +1,26 @@
+ifeq ($(CONFIG_CAIF_DEBUG),1)
+CAIF_DBG_FLAGS := -DDEBUG
+endif
+
+ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
+
+caif-objs := caif_dev.o \
+ cfcnfg.o cfmuxl.o cfctrl.o \
+ cffrml.o cfveil.o cfdbgl.o\
+ cfserl.o cfdgml.o \
+ cfrfml.o cfvidl.o cfutill.o \
+ cfsrvl.o cfpkt_skbuff.o caif_config_util.o
+clean-dirs:= .tmp_versions
+
+clean-files:= \
+ Module.symvers \
+ modules.order \
+ *.cmd \
+ *.o \
+ *~
+
+obj-$(CONFIG_CAIF) += caif.o
+obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
+obj-$(CONFIG_CAIF) += caif_socket.o
+
+export-objs := caif.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
new file mode 100644
index 000000000000..6f36580366f0
--- /dev/null
+++ b/net/caif/caif_config_util.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <net/caif/cfctrl.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/caif_dev.h>
+
+int connect_req_to_link_param(struct cfcnfg *cnfg,
+ struct caif_connect_request *s,
+ struct cfctrl_link_param *l)
+{
+ struct dev_info *dev_info;
+ enum cfcnfg_phy_preference pref;
+ memset(l, 0, sizeof(*l));
+ l->priority = s->priority;
+
+ if (s->link_name[0] != '\0')
+ l->phyid = cfcnfg_get_named(cnfg, s->link_name);
+ else {
+ switch (s->link_selector) {
+ case CAIF_LINK_HIGH_BANDW:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ case CAIF_LINK_LOW_LATENCY:
+ pref = CFPHYPREF_LOW_LAT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_info = cfcnfg_get_phyid(cnfg, pref);
+ if (dev_info == NULL)
+ return -ENODEV;
+ l->phyid = dev_info->id;
+ }
+ switch (s->protocol) {
+ case CAIFPROTO_AT:
+ l->linktype = CFCTRL_SRV_VEI;
+ if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN)
+ l->chtype = 0x02;
+ else
+ l->chtype = s->sockaddr.u.at.type;
+ l->endpoint = 0x00;
+ break;
+ case CAIFPROTO_DATAGRAM:
+ l->linktype = CFCTRL_SRV_DATAGRAM;
+ l->chtype = 0x00;
+ l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
+ break;
+ case CAIFPROTO_DATAGRAM_LOOP:
+ l->linktype = CFCTRL_SRV_DATAGRAM;
+ l->chtype = 0x03;
+ l->endpoint = 0x00;
+ l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
+ break;
+ case CAIFPROTO_RFM:
+ l->linktype = CFCTRL_SRV_RFM;
+ l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
+ strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
+ sizeof(l->u.rfm.volume)-1);
+ l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
+ break;
+ case CAIFPROTO_UTIL:
+ l->linktype = CFCTRL_SRV_UTIL;
+ l->endpoint = 0x00;
+ l->chtype = 0x00;
+ strncpy(l->u.utility.name, s->sockaddr.u.util.service,
+ sizeof(l->u.utility.name)-1);
+ l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
+ caif_assert(sizeof(l->u.utility.name) > 10);
+ l->u.utility.paramlen = s->param.size;
+ if (l->u.utility.paramlen > sizeof(l->u.utility.params))
+ l->u.utility.paramlen = sizeof(l->u.utility.params);
+
+ memcpy(l->u.utility.params, s->param.data,
+ l->u.utility.paramlen);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
new file mode 100644
index 000000000000..024fd5bb2d39
--- /dev/null
+++ b/net/caif/caif_dev.c
@@ -0,0 +1,418 @@
+/*
+ * CAIF Interface registration.
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Borrowed heavily from file: pn_dev.c. Thanks to
+ * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * and Sakari Ailus <sakari.ailus@nokia.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <net/netns/generic.h>
+#include <net/net_namespace.h>
+#include <net/pkt_sched.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+#define TIMEOUT (HZ*5)
+
+/* Used for local tracking of the CAIF net devices */
+struct caif_device_entry {
+ struct cflayer layer;
+ struct list_head list;
+ atomic_t in_use;
+ atomic_t state;
+ u16 phyid;
+ struct net_device *netdev;
+ wait_queue_head_t event;
+};
+
+struct caif_device_entry_list {
+ struct list_head list;
+ /* Protects simulanous deletes in list */
+ spinlock_t lock;
+};
+
+struct caif_net {
+ struct caif_device_entry_list caifdevs;
+};
+
+static int caif_net_id;
+static struct cfcnfg *cfg;
+
+static struct caif_device_entry_list *caif_device_list(struct net *net)
+{
+ struct caif_net *caifn;
+ BUG_ON(!net);
+ caifn = net_generic(net, caif_net_id);
+ BUG_ON(!caifn);
+ return &caifn->caifdevs;
+}
+
+/* Allocate new CAIF device. */
+static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs;
+ struct caif_device_entry *caifd;
+ caifdevs = caif_device_list(dev_net(dev));
+ BUG_ON(!caifdevs);
+ caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
+ if (!caifd)
+ return NULL;
+ caifd->netdev = dev;
+ list_add(&caifd->list, &caifdevs->list);
+ init_waitqueue_head(&caifd->event);
+ return caifd;
+}
+
+static struct caif_device_entry *caif_get(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+ BUG_ON(!caifdevs);
+ list_for_each_entry(caifd, &caifdevs->list, list) {
+ if (caifd->netdev == dev)
+ return caifd;
+ }
+ return NULL;
+}
+
+static void caif_device_destroy(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+ ASSERT_RTNL();
+ if (dev->type != ARPHRD_CAIF)
+ return;
+
+ spin_lock_bh(&caifdevs->lock);
+ caifd = caif_get(dev);
+ if (caifd == NULL) {
+ spin_unlock_bh(&caifdevs->lock);
+ return;
+ }
+
+ list_del(&caifd->list);
+ spin_unlock_bh(&caifdevs->lock);
+
+ kfree(caifd);
+ return;
+}
+
+static int transmit(struct cflayer *layer, struct cfpkt *pkt)
+{
+ struct caif_device_entry *caifd =
+ container_of(layer, struct caif_device_entry, layer);
+ struct sk_buff *skb, *skb2;
+ int ret = -EINVAL;
+ skb = cfpkt_tonative(pkt);
+ skb->dev = caifd->netdev;
+ /*
+ * Don't allow SKB to be destroyed upon error, but signal resend
+ * notification to clients. We can't rely on the return value as
+ * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
+ */
+ if (netif_queue_stopped(caifd->netdev))
+ return -EAGAIN;
+ skb2 = skb_get(skb);
+
+ ret = dev_queue_xmit(skb2);
+
+ if (!ret)
+ kfree_skb(skb);
+ else
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ struct caif_device_entry *caifd;
+ struct caif_dev_common *caifdev;
+ caifd = container_of(layr, struct caif_device_entry, layer);
+ caifdev = netdev_priv(caifd->netdev);
+ if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
+ atomic_set(&caifd->in_use, 1);
+ wake_up_interruptible(&caifd->event);
+
+ } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
+ atomic_set(&caifd->in_use, 0);
+ wake_up_interruptible(&caifd->event);
+ }
+ return 0;
+}
+
+/*
+ * Stuff received packets to associated sockets.
+ * On error, returns non-zero and releases the skb.
+ */
+static int receive(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pkttype, struct net_device *orig_dev)
+{
+ struct net *net;
+ struct cfpkt *pkt;
+ struct caif_device_entry *caifd;
+ net = dev_net(dev);
+ pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
+ caifd = caif_get(dev);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return NET_RX_DROP;
+
+ if (caifd->layer.up->receive(caifd->layer.up, pkt))
+ return NET_RX_DROP;
+
+ return 0;
+}
+
+static struct packet_type caif_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_CAIF),
+ .func = receive,
+};
+
+static void dev_flowctrl(struct net_device *dev, int on)
+{
+ struct caif_device_entry *caifd = caif_get(dev);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return;
+
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ on ?
+ _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
+ _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+ caifd->layer.id);
+}
+
+/* notify Caif of device events */
+static int caif_device_notify(struct notifier_block *me, unsigned long what,
+ void *arg)
+{
+ struct net_device *dev = arg;
+ struct caif_device_entry *caifd = NULL;
+ struct caif_dev_common *caifdev;
+ enum cfcnfg_phy_preference pref;
+ int res = -EINVAL;
+ enum cfcnfg_phy_type phy_type;
+
+ if (dev->type != ARPHRD_CAIF)
+ return 0;
+
+ switch (what) {
+ case NETDEV_REGISTER:
+ pr_info("CAIF: %s():register %s\n", __func__, dev->name);
+ caifd = caif_device_alloc(dev);
+ if (caifd == NULL)
+ break;
+ caifdev = netdev_priv(dev);
+ caifdev->flowctrl = dev_flowctrl;
+ atomic_set(&caifd->state, what);
+ res = 0;
+ break;
+
+ case NETDEV_UP:
+ pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ caifdev = netdev_priv(dev);
+ if (atomic_read(&caifd->state) == NETDEV_UP) {
+ pr_info("CAIF: %s():%s already up\n",
+ __func__, dev->name);
+ break;
+ }
+ atomic_set(&caifd->state, what);
+ caifd->layer.transmit = transmit;
+ caifd->layer.modemcmd = modemcmd;
+
+ if (caifdev->use_frag)
+ phy_type = CFPHYTYPE_FRAG;
+ else
+ phy_type = CFPHYTYPE_CAIF;
+
+ switch (caifdev->link_select) {
+ case CAIF_LINK_HIGH_BANDW:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ case CAIF_LINK_LOW_LATENCY:
+ pref = CFPHYPREF_LOW_LAT;
+ break;
+ default:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ }
+
+ cfcnfg_add_phy_layer(get_caif_conf(),
+ phy_type,
+ dev,
+ &caifd->layer,
+ &caifd->phyid,
+ pref,
+ caifdev->use_fcs,
+ caifdev->use_stx);
+ strncpy(caifd->layer.name, dev->name,
+ sizeof(caifd->layer.name) - 1);
+ caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
+
+ if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
+ atomic_read(&caifd->state) == NETDEV_DOWN)
+ break;
+
+ atomic_set(&caifd->state, what);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return -EINVAL;
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ _CAIF_CTRLCMD_PHYIF_DOWN_IND,
+ caifd->layer.id);
+ res = wait_event_interruptible_timeout(caifd->event,
+ atomic_read(&caifd->in_use) == 0,
+ TIMEOUT);
+ break;
+
+ case NETDEV_DOWN:
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
+ if (atomic_read(&caifd->in_use))
+ pr_warning("CAIF: %s(): "
+ "Unregistering an active CAIF device: %s\n",
+ __func__, dev->name);
+ cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
+ atomic_set(&caifd->state, what);
+ break;
+
+ case NETDEV_UNREGISTER:
+ caifd = caif_get(dev);
+ pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
+ atomic_set(&caifd->state, what);
+ caif_device_destroy(dev);
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+ .notifier_call = caif_device_notify,
+ .priority = 0,
+};
+
+
+struct cfcnfg *get_caif_conf(void)
+{
+ return cfg;
+}
+EXPORT_SYMBOL(get_caif_conf);
+
+int caif_connect_client(struct caif_connect_request *conn_req,
+ struct cflayer *client_layer)
+{
+ struct cfctrl_link_param param;
+ int ret;
+ ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
+ if (ret)
+ return ret;
+ /* Hook up the adaptation layer. */
+ return cfcnfg_add_adaptation_layer(get_caif_conf(),
+ &param, client_layer);
+}
+EXPORT_SYMBOL(caif_connect_client);
+
+int caif_disconnect_client(struct cflayer *adap_layer)
+{
+ return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
+}
+EXPORT_SYMBOL(caif_disconnect_client);
+
+void caif_release_client(struct cflayer *adap_layer)
+{
+ cfcnfg_release_adap_layer(adap_layer);
+}
+EXPORT_SYMBOL(caif_release_client);
+
+/* Per-namespace Caif devices handling */
+static int caif_init_net(struct net *net)
+{
+ struct caif_net *caifn = net_generic(net, caif_net_id);
+ INIT_LIST_HEAD(&caifn->caifdevs.list);
+ spin_lock_init(&caifn->caifdevs.lock);
+ return 0;
+}
+
+static void caif_exit_net(struct net *net)
+{
+ struct net_device *dev;
+ int res;
+ rtnl_lock();
+ for_each_netdev(net, dev) {
+ if (dev->type != ARPHRD_CAIF)
+ continue;
+ res = dev_close(dev);
+ caif_device_destroy(dev);
+ }
+ rtnl_unlock();
+}
+
+static struct pernet_operations caif_net_ops = {
+ .init = caif_init_net,
+ .exit = caif_exit_net,
+ .id = &caif_net_id,
+ .size = sizeof(struct caif_net),
+};
+
+/* Initialize Caif devices list */
+static int __init caif_device_init(void)
+{
+ int result;
+ cfg = cfcnfg_create();
+ if (!cfg) {
+ pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
+ goto err_cfcnfg_create_failed;
+ }
+ result = register_pernet_device(&caif_net_ops);
+
+ if (result) {
+ kfree(cfg);
+ cfg = NULL;
+ return result;
+ }
+ dev_add_pack(&caif_packet_type);
+ register_netdevice_notifier(&caif_device_notifier);
+
+ return result;
+err_cfcnfg_create_failed:
+ return -ENODEV;
+}
+
+static void __exit caif_device_exit(void)
+{
+ dev_remove_pack(&caif_packet_type);
+ unregister_pernet_device(&caif_net_ops);
+ unregister_netdevice_notifier(&caif_device_notifier);
+ cfcnfg_remove(cfg);
+}
+
+module_init(caif_device_init);
+module_exit(caif_device_exit);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
new file mode 100644
index 000000000000..c3a70c5c893a
--- /dev/null
+++ b/net/caif/caif_socket.c
@@ -0,0 +1,1252 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/tcp.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/caif/caif_socket.h>
+#include <asm/atomic.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/cfpkt.h>
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(AF_CAIF);
+
+#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
+#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
+
+/*
+ * CAIF state is re-using the TCP socket states.
+ * caif_states stored in sk_state reflect the state as reported by
+ * the CAIF stack, while sk_socket->state is the state of the socket.
+ */
+enum caif_states {
+ CAIF_CONNECTED = TCP_ESTABLISHED,
+ CAIF_CONNECTING = TCP_SYN_SENT,
+ CAIF_DISCONNECTED = TCP_CLOSE
+};
+
+#define TX_FLOW_ON_BIT 1
+#define RX_FLOW_ON_BIT 2
+
+static struct dentry *debugfsdir;
+
+#ifdef CONFIG_DEBUG_FS
+struct debug_fs_counter {
+ atomic_t caif_nr_socks;
+ atomic_t num_connect_req;
+ atomic_t num_connect_resp;
+ atomic_t num_connect_fail_resp;
+ atomic_t num_disconnect;
+ atomic_t num_remote_shutdown_ind;
+ atomic_t num_tx_flow_off_ind;
+ atomic_t num_tx_flow_on_ind;
+ atomic_t num_rx_flow_off;
+ atomic_t num_rx_flow_on;
+};
+struct debug_fs_counter cnt;
+#define dbfs_atomic_inc(v) atomic_inc(v)
+#define dbfs_atomic_dec(v) atomic_dec(v)
+#else
+#define dbfs_atomic_inc(v)
+#define dbfs_atomic_dec(v)
+#endif
+
+struct caifsock {
+ struct sock sk; /* must be first member */
+ struct cflayer layer;
+ char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
+ u32 flow_state;
+ struct caif_connect_request conn_req;
+ struct mutex readlock;
+ struct dentry *debugfs_socket_dir;
+};
+
+static int rx_flow_is_on(struct caifsock *cf_sk)
+{
+ return test_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static int tx_flow_is_on(struct caifsock *cf_sk)
+{
+ return test_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_rx_flow_off(struct caifsock *cf_sk)
+{
+ clear_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_rx_flow_on(struct caifsock *cf_sk)
+{
+ set_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_tx_flow_off(struct caifsock *cf_sk)
+{
+ clear_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_tx_flow_on(struct caifsock *cf_sk)
+{
+ set_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void caif_read_lock(struct sock *sk)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ mutex_lock(&cf_sk->readlock);
+}
+
+static void caif_read_unlock(struct sock *sk)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ mutex_unlock(&cf_sk->readlock);
+}
+
+int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
+{
+ /* A quarter of full buffer is used a low water mark */
+ return cf_sk->sk.sk_rcvbuf / 4;
+}
+
+void caif_flow_ctrl(struct sock *sk, int mode)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
+}
+
+/*
+ * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
+ * not dropped, but CAIF is sending flow off instead.
+ */
+int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+ int skb_len;
+ unsigned long flags;
+ struct sk_buff_head *list = &sk->sk_receive_queue;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
+ trace_printk("CAIF: %s():"
+ " sending flow OFF (queue len = %d %d)\n",
+ __func__,
+ atomic_read(&cf_sk->sk.sk_rmem_alloc),
+ sk_rcvbuf_lowwater(cf_sk));
+ set_rx_flow_off(cf_sk);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+
+ err = sk_filter(sk, skb);
+ if (err)
+ return err;
+ if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
+ set_rx_flow_off(cf_sk);
+ trace_printk("CAIF: %s():"
+ " sending flow OFF due to rmem_schedule\n",
+ __func__);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+ /* Cache the SKB length before we tack it onto the receive
+ * queue. Once it is added it no longer belongs to us and
+ * may be freed by other threads of control pulling packets
+ * from the queue.
+ */
+ skb_len = skb->len;
+ spin_lock_irqsave(&list->lock, flags);
+ if (!sock_flag(sk, SOCK_DEAD))
+ __skb_queue_tail(list, skb);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb_len);
+ else
+ kfree_skb(skb);
+ return 0;
+}
+
+/* Packet Receive Callback function called from CAIF Stack */
+static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct caifsock *cf_sk;
+ struct sk_buff *skb;
+
+ cf_sk = container_of(layr, struct caifsock, layer);
+ skb = cfpkt_tonative(pkt);
+
+ if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
+ cfpkt_destroy(pkt);
+ return 0;
+ }
+ caif_queue_rcv_skb(&cf_sk->sk, skb);
+ return 0;
+}
+
+/* Packet Control Callback function called from CAIF */
+static void caif_ctrl_cb(struct cflayer *layr,
+ enum caif_ctrlcmd flow,
+ int phyid)
+{
+ struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ /* OK from modem to start sending again */
+ dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ /* Modem asks us to shut up */
+ dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
+ set_tx_flow_off(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_INIT_RSP:
+ /* We're now connected */
+ dbfs_atomic_inc(&cnt.num_connect_resp);
+ cf_sk->sk.sk_state = CAIF_CONNECTED;
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ /* We're now disconnected */
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ cfcnfg_release_adap_layer(&cf_sk->layer);
+ break;
+
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ /* Connect request failed */
+ dbfs_atomic_inc(&cnt.num_connect_fail_resp);
+ cf_sk->sk.sk_err = ECONNREFUSED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+ /*
+ * Socket "standards" seems to require POLLOUT to
+ * be set at connect failure.
+ */
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ /* Modem has closed this connection, or device is down. */
+ dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+ cf_sk->sk.sk_err = ECONNRESET;
+ set_rx_flow_on(cf_sk);
+ cf_sk->sk.sk_error_report(&cf_sk->sk);
+ break;
+
+ default:
+ pr_debug("CAIF: %s(): Unexpected flow command %d\n",
+ __func__, flow);
+ }
+}
+
+static void caif_check_flow_release(struct sock *sk)
+{
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
+ return;
+ if (rx_flow_is_on(cf_sk))
+ return;
+
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
+ dbfs_atomic_inc(&cnt.num_rx_flow_on);
+ set_rx_flow_on(cf_sk);
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_ON_REQ);
+ }
+}
+/*
+ * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer
+ * has sufficient size.
+ */
+
+static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
+
+{
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int ret = 0;
+ int len;
+
+ if (unlikely(!buf_len))
+ return -EINVAL;
+
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+
+ len = skb->len;
+
+ if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) {
+ len = buf_len;
+ /*
+ * Push skb back on receive queue if buffer too small.
+ * This has a built-in race where multi-threaded receive
+ * may get packet in wrong order, but multiple read does
+ * not really guarantee ordered delivery anyway.
+ * Let's optimize for speed without taking locks.
+ */
+
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ ret = -EMSGSIZE;
+ goto read_error;
+ }
+
+ ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
+ if (ret)
+ goto read_error;
+
+ skb_free_datagram(sk, skb);
+
+ caif_check_flow_release(sk);
+
+ return len;
+
+read_error:
+ return ret;
+}
+
+
+/* Copied from unix_stream_wait_data, identical except for lock call. */
+static long caif_stream_data_wait(struct sock *sk, long timeo)
+{
+ DEFINE_WAIT(wait);
+ lock_sock(sk);
+
+ for (;;) {
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ sk->sk_err ||
+ sk->sk_state != CAIF_CONNECTED ||
+ sock_flag(sk, SOCK_DEAD) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ signal_pending(current) ||
+ !timeo)
+ break;
+
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ }
+
+ finish_wait(sk_sleep(sk), &wait);
+ release_sock(sk);
+ return timeo;
+}
+
+
+/*
+ * Copied from unix_stream_recvmsg, but removed credit checks,
+ * changed locking calls, changed address handling.
+ */
+static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ int copied = 0;
+ int target;
+ int err = 0;
+ long timeo;
+
+ err = -EOPNOTSUPP;
+ if (flags&MSG_OOB)
+ goto out;
+
+ msg->msg_namelen = 0;
+
+ /*
+ * Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+ err = -EAGAIN;
+ if (sk->sk_state == CAIF_CONNECTING)
+ goto out;
+
+ caif_read_lock(sk);
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+
+ do {
+ int chunk;
+ struct sk_buff *skb;
+
+ lock_sock(sk);
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ caif_check_flow_release(sk);
+
+ if (skb == NULL) {
+ if (copied >= target)
+ goto unlock;
+ /*
+ * POSIX 1003.1g mandates this order.
+ */
+ err = sock_error(sk);
+ if (err)
+ goto unlock;
+ err = -ECONNRESET;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ goto unlock;
+
+ err = -EPIPE;
+ if (sk->sk_state != CAIF_CONNECTED)
+ goto unlock;
+ if (sock_flag(sk, SOCK_DEAD))
+ goto unlock;
+
+ release_sock(sk);
+
+ err = -EAGAIN;
+ if (!timeo)
+ break;
+
+ caif_read_unlock(sk);
+
+ timeo = caif_stream_data_wait(sk, timeo);
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ goto out;
+ }
+ caif_read_lock(sk);
+ continue;
+unlock:
+ release_sock(sk);
+ break;
+ }
+ release_sock(sk);
+ chunk = min_t(unsigned int, skb->len, size);
+ if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ if (copied == 0)
+ copied = -EFAULT;
+ break;
+ }
+ copied += chunk;
+ size -= chunk;
+
+ /* Mark read part of skb as used */
+ if (!(flags & MSG_PEEK)) {
+ skb_pull(skb, chunk);
+
+ /* put the skb back if we didn't use it up. */
+ if (skb->len) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ break;
+ }
+ kfree_skb(skb);
+
+ } else {
+ /*
+ * It is questionable, see note in unix_dgram_recvmsg.
+ */
+ /* put message back and return */
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ break;
+ }
+ } while (size);
+ caif_read_unlock(sk);
+
+out:
+ return copied ? : err;
+}
+
+/*
+ * Copied from sock.c:sock_wait_for_wmem, but change to wait for
+ * CAIF flow-on and sock_writable.
+ */
+static long caif_wait_for_flow_on(struct caifsock *cf_sk,
+ int wait_writeable, long timeo, int *err)
+{
+ struct sock *sk = &cf_sk->sk;
+ DEFINE_WAIT(wait);
+ for (;;) {
+ *err = 0;
+ if (tx_flow_is_on(cf_sk) &&
+ (!wait_writeable || sock_writeable(&cf_sk->sk)))
+ break;
+ *err = -ETIMEDOUT;
+ if (!timeo)
+ break;
+ *err = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ *err = -ECONNRESET;
+ if (sk->sk_shutdown & SHUTDOWN_MASK)
+ break;
+ *err = -sk->sk_err;
+ if (sk->sk_err)
+ break;
+ *err = -EPIPE;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED)
+ break;
+ timeo = schedule_timeout(timeo);
+ }
+ finish_wait(sk_sleep(sk), &wait);
+ return timeo;
+}
+
+/*
+ * Transmit a SKB. The device may temporarily request re-transmission
+ * by returning EAGAIN.
+ */
+static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
+ int noblock, long timeo)
+{
+ struct cfpkt *pkt;
+ int ret, loopcnt = 0;
+
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
+ memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
+ do {
+
+ ret = -ETIMEDOUT;
+
+ /* Slight paranoia, probably not needed. */
+ if (unlikely(loopcnt++ > 1000)) {
+ pr_warning("CAIF: %s(): transmit retries failed,"
+ " error = %d\n", __func__, ret);
+ break;
+ }
+
+ if (cf_sk->layer.dn != NULL)
+ ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
+ if (likely(ret >= 0))
+ break;
+ /* if transmit return -EAGAIN, then retry */
+ if (noblock && ret == -EAGAIN)
+ break;
+ timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
+ if (signal_pending(current)) {
+ ret = sock_intr_errno(timeo);
+ break;
+ }
+ if (ret)
+ break;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
+ sock_flag(&cf_sk->sk, SOCK_DEAD) ||
+ (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
+ ret = -EPIPE;
+ cf_sk->sk.sk_err = EPIPE;
+ break;
+ }
+ } while (ret == -EAGAIN);
+ return ret;
+}
+
+/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
+static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int buffer_size;
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ int noblock;
+ long timeo;
+ caif_assert(cf_sk);
+ ret = sock_error(sk);
+ if (ret)
+ goto err;
+
+ ret = -EOPNOTSUPP;
+ if (msg->msg_flags&MSG_OOB)
+ goto err;
+
+ ret = -EOPNOTSUPP;
+ if (msg->msg_namelen)
+ goto err;
+
+ ret = -EINVAL;
+ if (unlikely(msg->msg_iov->iov_base == NULL))
+ goto err;
+ noblock = msg->msg_flags & MSG_DONTWAIT;
+
+ buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
+
+ ret = -EMSGSIZE;
+ if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
+ goto err;
+
+ timeo = sock_sndtimeo(sk, noblock);
+ timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
+ 1, timeo, &ret);
+
+ ret = -EPIPE;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
+ sock_flag(sk, SOCK_DEAD) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ goto err;
+
+ ret = -ENOMEM;
+ skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
+ if (!skb)
+ goto err;
+ skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+
+ ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+
+ if (ret)
+ goto err;
+ ret = transmit_skb(skb, cf_sk, noblock, timeo);
+ if (ret < 0)
+ goto err;
+ return len;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+/*
+ * Copied from unix_stream_sendmsg and adapted to CAIF:
+ * Changed removed permission handling and added waiting for flow on
+ * and other minor adaptations.
+ */
+static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int err, size;
+ struct sk_buff *skb;
+ int sent = 0;
+ long timeo;
+
+ err = -EOPNOTSUPP;
+
+ if (unlikely(msg->msg_flags&MSG_OOB))
+ goto out_err;
+
+ if (unlikely(msg->msg_namelen))
+ goto out_err;
+
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
+
+ if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
+ goto pipe_err;
+
+ while (sent < len) {
+
+ size = len-sent;
+
+ if (size > CAIF_MAX_PAYLOAD_SIZE)
+ size = CAIF_MAX_PAYLOAD_SIZE;
+
+ /* If size is more than half of sndbuf, chop up message */
+ if (size > ((sk->sk_sndbuf >> 1) - 64))
+ size = (sk->sk_sndbuf >> 1) - 64;
+
+ if (size > SKB_MAX_ALLOC)
+ size = SKB_MAX_ALLOC;
+
+ skb = sock_alloc_send_skb(sk,
+ size + CAIF_NEEDED_HEADROOM
+ + CAIF_NEEDED_TAILROOM,
+ msg->msg_flags&MSG_DONTWAIT,
+ &err);
+ if (skb == NULL)
+ goto out_err;
+
+ skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+ /*
+ * If you pass two values to the sock_alloc_send_skb
+ * it tries to grab the large buffer with GFP_NOFS
+ * (which can fail easily), and if it fails grab the
+ * fallback size buffer which is under a page and will
+ * succeed. [Alan]
+ */
+ size = min_t(int, size, skb_tailroom(skb));
+
+ err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ if (err) {
+ kfree_skb(skb);
+ goto out_err;
+ }
+ err = transmit_skb(skb, cf_sk,
+ msg->msg_flags&MSG_DONTWAIT, timeo);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto pipe_err;
+ }
+ sent += size;
+ }
+
+ return sent;
+
+pipe_err:
+ if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
+ err = -EPIPE;
+out_err:
+ return sent ? : err;
+}
+
+static int setsockopt(struct socket *sock,
+ int lvl, int opt, char __user *ov, unsigned int ol)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int prio, linksel;
+ struct ifreq ifreq;
+
+ if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
+ return -ENOPROTOOPT;
+
+ switch (opt) {
+ case CAIFSO_LINK_SELECT:
+ if (ol < sizeof(int))
+ return -EINVAL;
+ if (lvl != SOL_CAIF)
+ goto bad_sol;
+ if (copy_from_user(&linksel, ov, sizeof(int)))
+ return -EINVAL;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.link_selector = linksel;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case SO_PRIORITY:
+ if (lvl != SOL_SOCKET)
+ goto bad_sol;
+ if (ol < sizeof(int))
+ return -EINVAL;
+ if (copy_from_user(&prio, ov, sizeof(int)))
+ return -EINVAL;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.priority = prio;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case SO_BINDTODEVICE:
+ if (lvl != SOL_SOCKET)
+ goto bad_sol;
+ if (ol < sizeof(struct ifreq))
+ return -EINVAL;
+ if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
+ return -EFAULT;
+ lock_sock(&(cf_sk->sk));
+ strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
+ sizeof(cf_sk->conn_req.link_name));
+ cf_sk->conn_req.link_name
+ [sizeof(cf_sk->conn_req.link_name)-1] = 0;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case CAIFSO_REQ_PARAM:
+ if (lvl != SOL_CAIF)
+ goto bad_sol;
+ if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
+ return -ENOPROTOOPT;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.param.size = ol;
+ if (ol > sizeof(cf_sk->conn_req.param.data) ||
+ copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
+ release_sock(&cf_sk->sk);
+ return -EINVAL;
+ }
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ return 0;
+bad_sol:
+ return -ENOPROTOOPT;
+
+}
+
+/*
+ * caif_connect() - Connect a CAIF Socket
+ * Copied and modified af_irda.c:irda_connect().
+ *
+ * Note : by consulting "errno", the user space caller may learn the cause
+ * of the failure. Most of them are visible in the function, others may come
+ * from subroutines called and are listed here :
+ * o -EAFNOSUPPORT: bad socket family or type.
+ * o -ESOCKTNOSUPPORT: bad socket type or protocol
+ * o -EINVAL: bad socket address, or CAIF link type
+ * o -ECONNREFUSED: remote end refused the connection.
+ * o -EINPROGRESS: connect request sent but timed out (or non-blocking)
+ * o -EISCONN: already connected.
+ * o -ETIMEDOUT: Connection timed out (send timeout)
+ * o -ENODEV: No link layer to send request
+ * o -ECONNRESET: Received Shutdown indication or lost link layer
+ * o -ENOMEM: Out of memory
+ *
+ * State Strategy:
+ * o sk_state: holds the CAIF_* protocol state, it's updated by
+ * caif_ctrl_cb.
+ * o sock->state: holds the SS_* socket state and is updated by connect and
+ * disconnect.
+ */
+static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ long timeo;
+ int err;
+ lock_sock(sk);
+
+ err = -EAFNOSUPPORT;
+ if (uaddr->sa_family != AF_CAIF)
+ goto out;
+
+ err = -ESOCKTNOSUPPORT;
+ if (unlikely(!(sk->sk_type == SOCK_STREAM &&
+ cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
+ sk->sk_type != SOCK_SEQPACKET))
+ goto out;
+ switch (sock->state) {
+ case SS_UNCONNECTED:
+ /* Normal case, a fresh connect */
+ caif_assert(sk->sk_state == CAIF_DISCONNECTED);
+ break;
+ case SS_CONNECTING:
+ switch (sk->sk_state) {
+ case CAIF_CONNECTED:
+ sock->state = SS_CONNECTED;
+ err = -EISCONN;
+ goto out;
+ case CAIF_DISCONNECTED:
+ /* Reconnect allowed */
+ break;
+ case CAIF_CONNECTING:
+ err = -EALREADY;
+ if (flags & O_NONBLOCK)
+ goto out;
+ goto wait_connect;
+ }
+ break;
+ case SS_CONNECTED:
+ caif_assert(sk->sk_state == CAIF_CONNECTED ||
+ sk->sk_state == CAIF_DISCONNECTED);
+ if (sk->sk_shutdown & SHUTDOWN_MASK) {
+ /* Allow re-connect after SHUTDOWN_IND */
+ caif_disconnect_client(&cf_sk->layer);
+ break;
+ }
+ /* No reconnect on a seqpacket socket */
+ err = -EISCONN;
+ goto out;
+ case SS_DISCONNECTING:
+ case SS_FREE:
+ caif_assert(1); /*Should never happen */
+ break;
+ }
+ sk->sk_state = CAIF_DISCONNECTED;
+ sock->state = SS_UNCONNECTED;
+ sk_stream_kill_queues(&cf_sk->sk);
+
+ err = -EINVAL;
+ if (addr_len != sizeof(struct sockaddr_caif) ||
+ !uaddr)
+ goto out;
+
+ memcpy(&cf_sk->conn_req.sockaddr, uaddr,
+ sizeof(struct sockaddr_caif));
+
+ /* Move to connecting socket, start sending Connect Requests */
+ sock->state = SS_CONNECTING;
+ sk->sk_state = CAIF_CONNECTING;
+
+ dbfs_atomic_inc(&cnt.num_connect_req);
+ cf_sk->layer.receive = caif_sktrecv_cb;
+ err = caif_connect_client(&cf_sk->conn_req,
+ &cf_sk->layer);
+ if (err < 0) {
+ cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ goto out;
+ }
+
+ err = -EINPROGRESS;
+wait_connect:
+
+ if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
+ goto out;
+
+ timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+
+ release_sock(sk);
+ err = wait_event_interruptible_timeout(*sk_sleep(sk),
+ sk->sk_state != CAIF_CONNECTING,
+ timeo);
+ lock_sock(sk);
+ if (err < 0)
+ goto out; /* -ERESTARTSYS */
+ if (err == 0 && sk->sk_state != CAIF_CONNECTED) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (sk->sk_state != CAIF_CONNECTED) {
+ sock->state = SS_UNCONNECTED;
+ err = sock_error(sk);
+ if (!err)
+ err = -ECONNREFUSED;
+ goto out;
+ }
+ sock->state = SS_CONNECTED;
+ err = 0;
+out:
+ release_sock(sk);
+ return err;
+}
+
+
+/*
+ * caif_release() - Disconnect a CAIF Socket
+ * Copied and modified af_irda.c:irda_release().
+ */
+static int caif_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int res = 0;
+
+ if (!sk)
+ return 0;
+
+ set_tx_flow_off(cf_sk);
+
+ /*
+ * Ensure that packets are not queued after this point in time.
+ * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
+ * this ensures no packets when sock is dead.
+ */
+ spin_lock(&sk->sk_receive_queue.lock);
+ sock_set_flag(sk, SOCK_DEAD);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sock->sk = NULL;
+
+ dbfs_atomic_inc(&cnt.num_disconnect);
+
+ if (cf_sk->debugfs_socket_dir != NULL)
+ debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
+
+ lock_sock(&(cf_sk->sk));
+ sk->sk_state = CAIF_DISCONNECTED;
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
+ cf_sk->sk.sk_socket->state == SS_CONNECTING)
+ res = caif_disconnect_client(&cf_sk->layer);
+
+ cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
+ wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
+
+ sock_orphan(sk);
+ cf_sk->layer.dn = NULL;
+ sk_stream_kill_queues(&cf_sk->sk);
+ release_sock(sk);
+ sock_put(sk);
+ return res;
+}
+
+/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
+static unsigned int caif_poll(struct file *file,
+ struct socket *sock, poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ unsigned int mask;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ /* exceptional events? */
+ if (sk->sk_err)
+ mask |= POLLERR;
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP;
+
+ /* readable? */
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ mask |= POLLIN | POLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+ if (sk->sk_state == CAIF_DISCONNECTED)
+ mask |= POLLHUP;
+
+ /*
+ * we set writable also when the other side has shut down the
+ * connection. This prevents stuck sockets.
+ */
+ if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+ return mask;
+}
+
+static const struct proto_ops caif_seqpacket_ops = {
+ .family = PF_CAIF,
+ .owner = THIS_MODULE,
+ .release = caif_release,
+ .bind = sock_no_bind,
+ .connect = caif_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = caif_poll,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = caif_seqpkt_sendmsg,
+ .recvmsg = caif_seqpkt_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static const struct proto_ops caif_stream_ops = {
+ .family = PF_CAIF,
+ .owner = THIS_MODULE,
+ .release = caif_release,
+ .bind = sock_no_bind,
+ .connect = caif_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = caif_poll,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = caif_stream_sendmsg,
+ .recvmsg = caif_stream_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+/* This function is called when a socket is finally destroyed. */
+static void caif_sock_destructor(struct sock *sk)
+{
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ caif_assert(!atomic_read(&sk->sk_wmem_alloc));
+ caif_assert(sk_unhashed(sk));
+ caif_assert(!sk->sk_socket);
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ pr_info("Attempt to release alive CAIF socket: %p\n", sk);
+ return;
+ }
+ sk_stream_kill_queues(&cf_sk->sk);
+ dbfs_atomic_dec(&cnt.caif_nr_socks);
+}
+
+static int caif_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk = NULL;
+ struct caifsock *cf_sk = NULL;
+ static struct proto prot = {.name = "PF_CAIF",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct caifsock),
+ };
+
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /*
+ * The sock->type specifies the socket type to use.
+ * The CAIF socket is a packet stream in the sense
+ * that it is packet based. CAIF trusts the reliability
+ * of the link, no resending is implemented.
+ */
+ if (sock->type == SOCK_SEQPACKET)
+ sock->ops = &caif_seqpacket_ops;
+ else if (sock->type == SOCK_STREAM)
+ sock->ops = &caif_stream_ops;
+ else
+ return -ESOCKTNOSUPPORT;
+
+ if (protocol < 0 || protocol >= CAIFPROTO_MAX)
+ return -EPROTONOSUPPORT;
+ /*
+ * Set the socket state to unconnected. The socket state
+ * is really not used at all in the net/core or socket.c but the
+ * initialization makes sure that sock->state is not uninitialized.
+ */
+ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
+ if (!sk)
+ return -ENOMEM;
+
+ cf_sk = container_of(sk, struct caifsock, sk);
+
+ /* Store the protocol */
+ sk->sk_protocol = (unsigned char) protocol;
+
+ /* Sendbuf dictates the amount of outbound packets not yet sent */
+ sk->sk_sndbuf = CAIF_DEF_SNDBUF;
+ sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
+
+ /*
+ * Lock in order to try to stop someone from opening the socket
+ * too early.
+ */
+ lock_sock(&(cf_sk->sk));
+
+ /* Initialize the nozero default sock structure data. */
+ sock_init_data(sock, sk);
+ sk->sk_destruct = caif_sock_destructor;
+
+ mutex_init(&cf_sk->readlock); /* single task reading lock */
+ cf_sk->layer.ctrlcmd = caif_ctrl_cb;
+ cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+
+ set_tx_flow_off(cf_sk);
+ set_rx_flow_on(cf_sk);
+
+ /* Set default options on configuration */
+ cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
+ cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
+ cf_sk->conn_req.protocol = protocol;
+ /* Increase the number of sockets created. */
+ dbfs_atomic_inc(&cnt.caif_nr_socks);
+#ifdef CONFIG_DEBUG_FS
+ if (!IS_ERR(debugfsdir)) {
+ /* Fill in some information concerning the misc socket. */
+ snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d",
+ atomic_read(&cnt.caif_nr_socks));
+
+ cf_sk->debugfs_socket_dir =
+ debugfs_create_dir(cf_sk->name, debugfsdir);
+ debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_state);
+ debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
+ debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_rmem_alloc);
+ debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_wmem_alloc);
+ debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->layer.id);
+ }
+#endif
+ release_sock(&cf_sk->sk);
+ return 0;
+}
+
+
+static struct net_proto_family caif_family_ops = {
+ .family = PF_CAIF,
+ .create = caif_create,
+ .owner = THIS_MODULE,
+};
+
+int af_caif_init(void)
+{
+ int err = sock_register(&caif_family_ops);
+ if (!err)
+ return err;
+ return 0;
+}
+
+static int __init caif_sktinit_module(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ debugfsdir = debugfs_create_dir("caif_sk", NULL);
+ if (!IS_ERR(debugfsdir)) {
+ debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.caif_nr_socks);
+ debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_req);
+ debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_resp);
+ debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_fail_resp);
+ debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_disconnect);
+ debugfs_create_u32("num_remote_shutdown_ind",
+ S_IRUSR | S_IWUSR, debugfsdir,
+ (u32 *) &cnt.num_remote_shutdown_ind);
+ debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_tx_flow_off_ind);
+ debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_tx_flow_on_ind);
+ debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_rx_flow_off);
+ debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_rx_flow_on);
+ }
+#endif
+ return af_caif_init();
+}
+
+static void __exit caif_sktexit_module(void)
+{
+ sock_unregister(PF_CAIF);
+ if (debugfsdir != NULL)
+ debugfs_remove_recursive(debugfsdir);
+}
+module_init(caif_sktinit_module);
+module_exit(caif_sktexit_module);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
new file mode 100644
index 000000000000..471c62939fad
--- /dev/null
+++ b/net/caif/cfcnfg.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/cfctrl.h>
+#include <net/caif/cfmuxl.h>
+#include <net/caif/cffrml.h>
+#include <net/caif/cfserl.h>
+#include <net/caif/cfsrvl.h>
+
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+#define MAX_PHY_LAYERS 7
+#define PHY_NAME_LEN 20
+
+#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
+
+/* Information about CAIF physical interfaces held by Config Module in order
+ * to manage physical interfaces
+ */
+struct cfcnfg_phyinfo {
+ /* Pointer to the layer below the MUX (framing layer) */
+ struct cflayer *frm_layer;
+ /* Pointer to the lowest actual physical layer */
+ struct cflayer *phy_layer;
+ /* Unique identifier of the physical interface */
+ unsigned int id;
+ /* Preference of the physical in interface */
+ enum cfcnfg_phy_preference pref;
+
+ /* Reference count, number of channels using the device */
+ int phy_ref_count;
+
+ /* Information about the physical device */
+ struct dev_info dev_info;
+};
+
+struct cfcnfg {
+ struct cflayer layer;
+ struct cflayer *ctrl;
+ struct cflayer *mux;
+ u8 last_phyid;
+ struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS];
+};
+
+static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
+ enum cfctrl_srv serv, u8 phyid,
+ struct cflayer *adapt_layer);
+static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
+static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
+ struct cflayer *adapt_layer);
+static void cfctrl_resp_func(void);
+static void cfctrl_enum_resp(void);
+
+struct cfcnfg *cfcnfg_create(void)
+{
+ struct cfcnfg *this;
+ struct cfctrl_rsp *resp;
+ /* Initiate this layer */
+ this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ memset(this, 0, sizeof(struct cfcnfg));
+ this->mux = cfmuxl_create();
+ if (!this->mux)
+ goto out_of_mem;
+ this->ctrl = cfctrl_create();
+ if (!this->ctrl)
+ goto out_of_mem;
+ /* Initiate response functions */
+ resp = cfctrl_get_respfuncs(this->ctrl);
+ resp->enum_rsp = cfctrl_enum_resp;
+ resp->linkerror_ind = cfctrl_resp_func;
+ resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
+ resp->sleep_rsp = cfctrl_resp_func;
+ resp->wake_rsp = cfctrl_resp_func;
+ resp->restart_rsp = cfctrl_resp_func;
+ resp->radioset_rsp = cfctrl_resp_func;
+ resp->linksetup_rsp = cfcnfg_linkup_rsp;
+ resp->reject_rsp = cfcnfg_reject_rsp;
+
+ this->last_phyid = 1;
+
+ cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
+ layer_set_dn(this->ctrl, this->mux);
+ layer_set_up(this->ctrl, this);
+ return this;
+out_of_mem:
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ kfree(this->mux);
+ kfree(this->ctrl);
+ kfree(this);
+ return NULL;
+}
+EXPORT_SYMBOL(cfcnfg_create);
+
+void cfcnfg_remove(struct cfcnfg *cfg)
+{
+ if (cfg) {
+ kfree(cfg->mux);
+ kfree(cfg->ctrl);
+ kfree(cfg);
+ }
+}
+
+static void cfctrl_resp_func(void)
+{
+}
+
+static void cfctrl_enum_resp(void)
+{
+}
+
+struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
+ enum cfcnfg_phy_preference phy_pref)
+{
+ u16 i;
+
+ /* Try to match with specified preference */
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].id == i &&
+ cnfg->phy_layers[i].pref == phy_pref &&
+ cnfg->phy_layers[i].frm_layer != NULL) {
+ caif_assert(cnfg->phy_layers != NULL);
+ caif_assert(cnfg->phy_layers[i].id == i);
+ return &cnfg->phy_layers[i].dev_info;
+ }
+ }
+ /* Otherwise just return something */
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].id == i) {
+ caif_assert(cnfg->phy_layers != NULL);
+ caif_assert(cnfg->phy_layers[i].id == i);
+ return &cnfg->phy_layers[i].dev_info;
+ }
+ }
+
+ return NULL;
+}
+
+static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
+ u8 phyid)
+{
+ int i;
+ /* Try to match with specified preference */
+ for (i = 0; i < MAX_PHY_LAYERS; i++)
+ if (cnfg->phy_layers[i].frm_layer != NULL &&
+ cnfg->phy_layers[i].id == phyid)
+ return &cnfg->phy_layers[i];
+ return NULL;
+}
+
+int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
+{
+ int i;
+
+ /* Try to match with specified name */
+ for (i = 0; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].frm_layer != NULL
+ && strcmp(cnfg->phy_layers[i].phy_layer->name,
+ name) == 0)
+ return cnfg->phy_layers[i].frm_layer->id;
+ }
+ return 0;
+}
+
+int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
+{
+ u8 channel_id = 0;
+ int ret = 0;
+ struct cflayer *servl = NULL;
+ struct cfcnfg_phyinfo *phyinfo = NULL;
+ u8 phyid = 0;
+ caif_assert(adap_layer != NULL);
+ channel_id = adap_layer->id;
+ if (adap_layer->dn == NULL || channel_id == 0) {
+ pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
+ ret = -ENOTCONN;
+ goto end;
+ }
+ servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
+ if (servl == NULL)
+ goto end;
+ layer_set_up(servl, NULL);
+ ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
+ if (servl == NULL) {
+ pr_err("CAIF: %s(): PROTOCOL ERROR "
+ "- Error removing service_layer Channel_Id(%d)",
+ __func__, channel_id);
+ ret = -EINVAL;
+ goto end;
+ }
+ caif_assert(channel_id == servl->id);
+ if (adap_layer->dn != NULL) {
+ phyid = cfsrvl_getphyid(adap_layer->dn);
+
+ phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
+ if (phyinfo == NULL) {
+ pr_warning("CAIF: %s(): "
+ "No interface to send disconnect to\n",
+ __func__);
+ ret = -ENODEV;
+ goto end;
+ }
+ if (phyinfo->id != phyid ||
+ phyinfo->phy_layer->id != phyid ||
+ phyinfo->frm_layer->id != phyid) {
+ pr_err("CAIF: %s(): "
+ "Inconsistency in phy registration\n",
+ __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
+ phyinfo->phy_layer != NULL &&
+ phyinfo->phy_layer->modemcmd != NULL) {
+ phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
+ _CAIF_MODEMCMD_PHYIF_USELESS);
+ }
+end:
+ cfsrvl_put(servl);
+ cfctrl_cancel_req(cnfg->ctrl, adap_layer);
+ if (adap_layer->ctrlcmd != NULL)
+ adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
+ return ret;
+
+}
+EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer);
+
+void cfcnfg_release_adap_layer(struct cflayer *adap_layer)
+{
+ if (adap_layer->dn)
+ cfsrvl_put(adap_layer->dn);
+}
+EXPORT_SYMBOL(cfcnfg_release_adap_layer);
+
+static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
+{
+}
+
+int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
+ struct cfctrl_link_param *param,
+ struct cflayer *adap_layer)
+{
+ struct cflayer *frml;
+ if (adap_layer == NULL) {
+ pr_err("CAIF: %s(): adap_layer is zero", __func__);
+ return -EINVAL;
+ }
+ if (adap_layer->receive == NULL) {
+ pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__);
+ return -EINVAL;
+ }
+ if (adap_layer->ctrlcmd == NULL) {
+ pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__);
+ return -EINVAL;
+ }
+ frml = cnfg->phy_layers[param->phyid].frm_layer;
+ if (frml == NULL) {
+ pr_err("CAIF: %s(): Specified PHY type does not exist!",
+ __func__);
+ return -ENODEV;
+ }
+ caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
+ caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id ==
+ param->phyid);
+ caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
+ param->phyid);
+ /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
+ cfctrl_enum_req(cnfg->ctrl, param->phyid);
+ return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
+}
+EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
+
+static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
+ struct cflayer *adapt_layer)
+{
+ if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
+ adapt_layer->ctrlcmd(adapt_layer,
+ CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
+}
+
+static void
+cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
+ u8 phyid, struct cflayer *adapt_layer)
+{
+ struct cfcnfg *cnfg = container_obj(layer);
+ struct cflayer *servicel = NULL;
+ struct cfcnfg_phyinfo *phyinfo;
+ if (adapt_layer == NULL) {
+ pr_debug("CAIF: %s(): link setup response "
+ "but no client exist, send linkdown back\n",
+ __func__);
+ cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
+ return;
+ }
+
+ caif_assert(cnfg != NULL);
+ caif_assert(phyid != 0);
+ phyinfo = &cnfg->phy_layers[phyid];
+ caif_assert(phyinfo != NULL);
+ caif_assert(phyinfo->id == phyid);
+ caif_assert(phyinfo->phy_layer != NULL);
+ caif_assert(phyinfo->phy_layer->id == phyid);
+
+ if (phyinfo != NULL &&
+ phyinfo->phy_ref_count++ == 0 &&
+ phyinfo->phy_layer != NULL &&
+ phyinfo->phy_layer->modemcmd != NULL) {
+ caif_assert(phyinfo->phy_layer->id == phyid);
+ phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
+ _CAIF_MODEMCMD_PHYIF_USEFULL);
+
+ }
+ adapt_layer->id = channel_id;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ servicel = cfvei_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_DATAGRAM:
+ servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_RFM:
+ servicel = cfrfml_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_UTIL:
+ servicel = cfutill_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_DBG:
+ servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
+ break;
+ default:
+ pr_err("CAIF: %s(): Protocol error. "
+ "Link setup response - unknown channel type\n",
+ __func__);
+ return;
+ }
+ if (!servicel) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ layer_set_dn(servicel, cnfg->mux);
+ cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
+ layer_set_up(servicel, adapt_layer);
+ layer_set_dn(adapt_layer, servicel);
+ cfsrvl_get(servicel);
+ servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
+}
+
+void
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+ void *dev, struct cflayer *phy_layer, u16 *phyid,
+ enum cfcnfg_phy_preference pref,
+ bool fcs, bool stx)
+{
+ struct cflayer *frml;
+ struct cflayer *phy_driver = NULL;
+ int i;
+
+
+ if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) {
+ *phyid = cnfg->last_phyid;
+
+ /* range: * 1..(MAX_PHY_LAYERS-1) */
+ cnfg->last_phyid =
+ (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1;
+ } else {
+ *phyid = 0;
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].frm_layer == NULL) {
+ *phyid = i;
+ break;
+ }
+ }
+ }
+ if (*phyid == 0) {
+ pr_err("CAIF: %s(): No Available PHY ID\n", __func__);
+ return;
+ }
+
+ switch (phy_type) {
+ case CFPHYTYPE_FRAG:
+ phy_driver =
+ cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
+ if (!phy_driver) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+
+ break;
+ case CFPHYTYPE_CAIF:
+ phy_driver = NULL;
+ break;
+ default:
+ pr_err("CAIF: %s(): %d", __func__, phy_type);
+ return;
+ break;
+ }
+
+ phy_layer->id = *phyid;
+ cnfg->phy_layers[*phyid].pref = pref;
+ cnfg->phy_layers[*phyid].id = *phyid;
+ cnfg->phy_layers[*phyid].dev_info.id = *phyid;
+ cnfg->phy_layers[*phyid].dev_info.dev = dev;
+ cnfg->phy_layers[*phyid].phy_layer = phy_layer;
+ cnfg->phy_layers[*phyid].phy_ref_count = 0;
+ phy_layer->type = phy_type;
+ frml = cffrml_create(*phyid, fcs);
+ if (!frml) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cnfg->phy_layers[*phyid].frm_layer = frml;
+ cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
+ layer_set_up(frml, cnfg->mux);
+
+ if (phy_driver != NULL) {
+ phy_driver->id = *phyid;
+ layer_set_dn(frml, phy_driver);
+ layer_set_up(phy_driver, frml);
+ layer_set_dn(phy_driver, phy_layer);
+ layer_set_up(phy_layer, phy_driver);
+ } else {
+ layer_set_dn(frml, phy_layer);
+ layer_set_up(phy_layer, frml);
+ }
+}
+EXPORT_SYMBOL(cfcnfg_add_phy_layer);
+
+int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
+{
+ struct cflayer *frml, *frml_dn;
+ u16 phyid;
+ phyid = phy_layer->id;
+ caif_assert(phyid == cnfg->phy_layers[phyid].id);
+ caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer);
+ caif_assert(phy_layer->id == phyid);
+ caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid);
+
+ memset(&cnfg->phy_layers[phy_layer->id], 0,
+ sizeof(struct cfcnfg_phyinfo));
+ frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
+ frml_dn = frml->dn;
+ cffrml_set_uplayer(frml, NULL);
+ cffrml_set_dnlayer(frml, NULL);
+ kfree(frml);
+
+ if (phy_layer != frml_dn) {
+ layer_set_up(frml_dn, NULL);
+ layer_set_dn(frml_dn, NULL);
+ kfree(frml_dn);
+ }
+ layer_set_up(phy_layer, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(cfcnfg_del_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
new file mode 100644
index 000000000000..a521d32cfe56
--- /dev/null
+++ b/net/caif/cfctrl.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfctrl.h>
+
+#define container_obj(layr) container_of(layr, struct cfctrl, serv.layer)
+#define UTILITY_NAME_LENGTH 16
+#define CFPKT_CTRL_PKT_LEN 20
+
+
+#ifdef CAIF_NO_LOOP
+static int handle_loop(struct cfctrl *ctrl,
+ int cmd, struct cfpkt *pkt){
+ return CAIF_FAILURE;
+}
+#else
+static int handle_loop(struct cfctrl *ctrl,
+ int cmd, struct cfpkt *pkt);
+#endif
+static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
+static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+
+struct cflayer *cfctrl_create(void)
+{
+ struct dev_info dev_info;
+ struct cfctrl *this =
+ kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
+ memset(&dev_info, 0, sizeof(dev_info));
+ dev_info.id = 0xff;
+ memset(this, 0, sizeof(*this));
+ cfsrvl_init(&this->serv, 0, &dev_info);
+ spin_lock_init(&this->info_list_lock);
+ atomic_set(&this->req_seq_no, 1);
+ atomic_set(&this->rsp_seq_no, 1);
+ this->serv.layer.receive = cfctrl_recv;
+ sprintf(this->serv.layer.name, "ctrl");
+ this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+ spin_lock_init(&this->loop_linkid_lock);
+ this->loop_linkid = 1;
+ return &this->serv.layer;
+}
+
+static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
+{
+ bool eq =
+ p1->linktype == p2->linktype &&
+ p1->priority == p2->priority &&
+ p1->phyid == p2->phyid &&
+ p1->endpoint == p2->endpoint && p1->chtype == p2->chtype;
+
+ if (!eq)
+ return false;
+
+ switch (p1->linktype) {
+ case CFCTRL_SRV_VEI:
+ return true;
+ case CFCTRL_SRV_DATAGRAM:
+ return p1->u.datagram.connid == p2->u.datagram.connid;
+ case CFCTRL_SRV_RFM:
+ return
+ p1->u.rfm.connid == p2->u.rfm.connid &&
+ strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0;
+ case CFCTRL_SRV_UTIL:
+ return
+ p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb
+ && p1->u.utility.fifosize_bufs ==
+ p2->u.utility.fifosize_bufs
+ && strcmp(p1->u.utility.name, p2->u.utility.name) == 0
+ && p1->u.utility.paramlen == p2->u.utility.paramlen
+ && memcmp(p1->u.utility.params, p2->u.utility.params,
+ p1->u.utility.paramlen) == 0;
+
+ case CFCTRL_SRV_VIDEO:
+ return p1->u.video.connid == p2->u.video.connid;
+ case CFCTRL_SRV_DBG:
+ return true;
+ case CFCTRL_SRV_DECM:
+ return false;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool cfctrl_req_eq(struct cfctrl_request_info *r1,
+ struct cfctrl_request_info *r2)
+{
+ if (r1->cmd != r2->cmd)
+ return false;
+ if (r1->cmd == CFCTRL_CMD_LINK_SETUP)
+ return param_eq(&r1->param, &r2->param);
+ else
+ return r1->channel_id == r2->channel_id;
+}
+
+/* Insert request at the end */
+void cfctrl_insert_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+{
+ struct cfctrl_request_info *p;
+ spin_lock(&ctrl->info_list_lock);
+ req->next = NULL;
+ atomic_inc(&ctrl->req_seq_no);
+ req->sequence_no = atomic_read(&ctrl->req_seq_no);
+ if (ctrl->first_req == NULL) {
+ ctrl->first_req = req;
+ spin_unlock(&ctrl->info_list_lock);
+ return;
+ }
+ p = ctrl->first_req;
+ while (p->next != NULL)
+ p = p->next;
+ p->next = req;
+ spin_unlock(&ctrl->info_list_lock);
+}
+
+/* Compare and remove request */
+struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+{
+ struct cfctrl_request_info *p;
+ struct cfctrl_request_info *ret;
+
+ spin_lock(&ctrl->info_list_lock);
+ if (ctrl->first_req == NULL) {
+ spin_unlock(&ctrl->info_list_lock);
+ return NULL;
+ }
+
+ if (cfctrl_req_eq(req, ctrl->first_req)) {
+ ret = ctrl->first_req;
+ caif_assert(ctrl->first_req);
+ atomic_set(&ctrl->rsp_seq_no,
+ ctrl->first_req->sequence_no);
+ ctrl->first_req = ctrl->first_req->next;
+ spin_unlock(&ctrl->info_list_lock);
+ return ret;
+ }
+
+ p = ctrl->first_req;
+
+ while (p->next != NULL) {
+ if (cfctrl_req_eq(req, p->next)) {
+ pr_warning("CAIF: %s(): Requests are not "
+ "received in order\n",
+ __func__);
+ ret = p->next;
+ atomic_set(&ctrl->rsp_seq_no,
+ p->next->sequence_no);
+ p->next = p->next->next;
+ spin_unlock(&ctrl->info_list_lock);
+ return ret;
+ }
+ p = p->next;
+ }
+ spin_unlock(&ctrl->info_list_lock);
+
+ pr_warning("CAIF: %s(): Request does not match\n",
+ __func__);
+ return NULL;
+}
+
+struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
+{
+ struct cfctrl *this = container_obj(layer);
+ return &this->res;
+}
+
+void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
+{
+ this->dn = dn;
+}
+
+void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
+{
+ this->up = up;
+}
+
+static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
+{
+ info->hdr_len = 0;
+ info->channel_id = cfctrl->serv.layer.id;
+ info->dev_info = &cfctrl->serv.dev_info;
+}
+
+void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
+{
+ struct cfctrl *cfctrl = container_obj(layer);
+ int ret;
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
+ init_info(cfpkt_info(pkt), cfctrl);
+ cfpkt_info(pkt)->dev_info->id = physlinkid;
+ cfctrl->serv.dev_info.id = physlinkid;
+ cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
+ cfpkt_addbdy(pkt, physlinkid);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit enum message\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+}
+
+int cfctrl_linkup_request(struct cflayer *layer,
+ struct cfctrl_link_param *param,
+ struct cflayer *user_layer)
+{
+ struct cfctrl *cfctrl = container_obj(layer);
+ u32 tmp32;
+ u16 tmp16;
+ u8 tmp8;
+ struct cfctrl_request_info *req;
+ int ret;
+ char utility_name[16];
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
+ cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype);
+ cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid);
+ cfpkt_addbdy(pkt, param->endpoint & 0x03);
+
+ switch (param->linktype) {
+ case CFCTRL_SRV_VEI:
+ break;
+ case CFCTRL_SRV_VIDEO:
+ cfpkt_addbdy(pkt, (u8) param->u.video.connid);
+ break;
+ case CFCTRL_SRV_DBG:
+ break;
+ case CFCTRL_SRV_DATAGRAM:
+ tmp32 = cpu_to_le32(param->u.datagram.connid);
+ cfpkt_add_body(pkt, &tmp32, 4);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert DatagramConnectionID to network
+ * format long and copy it out...
+ */
+ tmp32 = cpu_to_le32(param->u.rfm.connid);
+ cfpkt_add_body(pkt, &tmp32, 4);
+ /* Add volume name, including zero termination... */
+ cfpkt_add_body(pkt, param->u.rfm.volume,
+ strlen(param->u.rfm.volume) + 1);
+ break;
+ case CFCTRL_SRV_UTIL:
+ tmp16 = cpu_to_le16(param->u.utility.fifosize_kb);
+ cfpkt_add_body(pkt, &tmp16, 2);
+ tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
+ cfpkt_add_body(pkt, &tmp16, 2);
+ memset(utility_name, 0, sizeof(utility_name));
+ strncpy(utility_name, param->u.utility.name,
+ UTILITY_NAME_LENGTH - 1);
+ cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
+ tmp8 = param->u.utility.paramlen;
+ cfpkt_add_body(pkt, &tmp8, 1);
+ cfpkt_add_body(pkt, param->u.utility.params,
+ param->u.utility.paramlen);
+ break;
+ default:
+ pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
+ __func__, param->linktype);
+ return -EINVAL;
+ }
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ memset(req, 0, sizeof(*req));
+ req->client_layer = user_layer;
+ req->cmd = CFCTRL_CMD_LINK_SETUP;
+ req->param = *param;
+ cfctrl_insert_req(cfctrl, req);
+ init_info(cfpkt_info(pkt), cfctrl);
+ /*
+ * NOTE:Always send linkup and linkdown request on the same
+ * device as the payload. Otherwise old queued up payload
+ * might arrive with the newly allocated channel ID.
+ */
+ cfpkt_info(pkt)->dev_info->id = param->phyid;
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit linksetup request\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
+ struct cflayer *client)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
+ cfpkt_addbdy(pkt, channelid);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit link-down request\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+ return ret;
+}
+
+void cfctrl_sleep_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_wake_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_getstartreason_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+
+void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
+{
+ struct cfctrl_request_info *p, *req;
+ struct cfctrl *ctrl = container_obj(layr);
+ spin_lock(&ctrl->info_list_lock);
+
+ if (ctrl->first_req == NULL) {
+ spin_unlock(&ctrl->info_list_lock);
+ return;
+ }
+
+ if (ctrl->first_req->client_layer == adap_layer) {
+
+ req = ctrl->first_req;
+ ctrl->first_req = ctrl->first_req->next;
+ kfree(req);
+ }
+
+ p = ctrl->first_req;
+ while (p != NULL && p->next != NULL) {
+ if (p->next->client_layer == adap_layer) {
+
+ req = p->next;
+ p->next = p->next->next;
+ kfree(p->next);
+ }
+ p = p->next;
+ }
+
+ spin_unlock(&ctrl->info_list_lock);
+}
+
+static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
+{
+ u8 cmdrsp;
+ u8 cmd;
+ int ret = -1;
+ u16 tmp16;
+ u8 len;
+ u8 param[255];
+ u8 linkid;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfctrl_request_info rsp, *req;
+
+
+ cfpkt_extr_head(pkt, &cmdrsp, 1);
+ cmd = cmdrsp & CFCTRL_CMD_MASK;
+ if (cmd != CFCTRL_CMD_LINK_ERR
+ && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
+ if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE)
+ cmdrsp |= CFCTRL_ERR_BIT;
+ }
+
+ switch (cmd) {
+ case CFCTRL_CMD_LINK_SETUP:
+ {
+ enum cfctrl_srv serv;
+ enum cfctrl_srv servtype;
+ u8 endpoint;
+ u8 physlinkid;
+ u8 prio;
+ u8 tmp;
+ u32 tmp32;
+ u8 *cp;
+ int i;
+ struct cfctrl_link_param linkparam;
+ memset(&linkparam, 0, sizeof(linkparam));
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+
+ serv = tmp & CFCTRL_SRV_MASK;
+ linkparam.linktype = serv;
+
+ servtype = tmp >> 4;
+ linkparam.chtype = servtype;
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+ physlinkid = tmp & 0x07;
+ prio = tmp >> 3;
+
+ linkparam.priority = prio;
+ linkparam.phyid = physlinkid;
+ cfpkt_extr_head(pkt, &endpoint, 1);
+ linkparam.endpoint = endpoint & 0x03;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ case CFCTRL_SRV_DBG:
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ cfpkt_extr_head(pkt, &tmp, 1);
+ linkparam.u.video.connid = tmp;
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+
+ case CFCTRL_SRV_DATAGRAM:
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ linkparam.u.datagram.connid =
+ le32_to_cpu(tmp32);
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ linkparam.u.rfm.connid =
+ le32_to_cpu(tmp32);
+ cp = (u8 *) linkparam.u.rfm.volume;
+ for (cfpkt_extr_head(pkt, &tmp, 1);
+ cfpkt_more(pkt) && tmp != '\0';
+ cfpkt_extr_head(pkt, &tmp, 1))
+ *cp++ = tmp;
+ *cp = '\0';
+
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+
+ break;
+ case CFCTRL_SRV_UTIL:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ /* Fifosize KB */
+ cfpkt_extr_head(pkt, &tmp16, 2);
+ linkparam.u.utility.fifosize_kb =
+ le16_to_cpu(tmp16);
+ /* Fifosize bufs */
+ cfpkt_extr_head(pkt, &tmp16, 2);
+ linkparam.u.utility.fifosize_bufs =
+ le16_to_cpu(tmp16);
+ /* name */
+ cp = (u8 *) linkparam.u.utility.name;
+ caif_assert(sizeof(linkparam.u.utility.name)
+ >= UTILITY_NAME_LENGTH);
+ for (i = 0;
+ i < UTILITY_NAME_LENGTH
+ && cfpkt_more(pkt); i++) {
+ cfpkt_extr_head(pkt, &tmp, 1);
+ *cp++ = tmp;
+ }
+ /* Length */
+ cfpkt_extr_head(pkt, &len, 1);
+ linkparam.u.utility.paramlen = len;
+ /* Param Data */
+ cp = linkparam.u.utility.params;
+ while (cfpkt_more(pkt) && len--) {
+ cfpkt_extr_head(pkt, &tmp, 1);
+ *cp++ = tmp;
+ }
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ /* Length */
+ cfpkt_extr_head(pkt, &len, 1);
+ /* Param Data */
+ cfpkt_extr_head(pkt, &param, len);
+ break;
+ default:
+ pr_warning("CAIF: %s(): Request setup "
+ "- invalid link type (%d)",
+ __func__, serv);
+ goto error;
+ }
+
+ rsp.cmd = cmd;
+ rsp.param = linkparam;
+ req = cfctrl_remove_req(cfctrl, &rsp);
+
+ if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
+ cfpkt_erroneous(pkt)) {
+ pr_err("CAIF: %s(): Invalid O/E bit or parse "
+ "error on CAIF control channel",
+ __func__);
+ cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
+ 0,
+ req ? req->client_layer
+ : NULL);
+ } else {
+ cfctrl->res.linksetup_rsp(cfctrl->serv.
+ layer.up, linkid,
+ serv, physlinkid,
+ req ? req->
+ client_layer : NULL);
+ }
+
+ if (req != NULL)
+ kfree(req);
+ }
+ break;
+ case CFCTRL_CMD_LINK_DESTROY:
+ cfpkt_extr_head(pkt, &linkid, 1);
+ cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
+ break;
+ case CFCTRL_CMD_LINK_ERR:
+ pr_err("CAIF: %s(): Frame Error Indication received\n",
+ __func__);
+ cfctrl->res.linkerror_ind();
+ break;
+ case CFCTRL_CMD_ENUM:
+ cfctrl->res.enum_rsp();
+ break;
+ case CFCTRL_CMD_SLEEP:
+ cfctrl->res.sleep_rsp();
+ break;
+ case CFCTRL_CMD_WAKE:
+ cfctrl->res.wake_rsp();
+ break;
+ case CFCTRL_CMD_LINK_RECONF:
+ cfctrl->res.restart_rsp();
+ break;
+ case CFCTRL_CMD_RADIO_SET:
+ cfctrl->res.radioset_rsp();
+ break;
+ default:
+ pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__);
+ goto error;
+ break;
+ }
+ ret = 0;
+error:
+ cfpkt_destroy(pkt);
+ return ret;
+}
+
+static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfctrl *this = container_obj(layr);
+ switch (ctrl) {
+ case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ spin_lock(&this->info_list_lock);
+ if (this->first_req != NULL) {
+ pr_debug("CAIF: %s(): Received flow off in "
+ "control layer", __func__);
+ }
+ spin_unlock(&this->info_list_lock);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifndef CAIF_NO_LOOP
+static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
+{
+ static int last_linkid;
+ u8 linkid, linktype, tmp;
+ switch (cmd) {
+ case CFCTRL_CMD_LINK_SETUP:
+ spin_lock(&ctrl->loop_linkid_lock);
+ for (linkid = last_linkid + 1; linkid < 255; linkid++)
+ if (!ctrl->loop_linkused[linkid])
+ goto found;
+ for (linkid = last_linkid - 1; linkid > 0; linkid--)
+ if (!ctrl->loop_linkused[linkid])
+ goto found;
+ spin_unlock(&ctrl->loop_linkid_lock);
+ pr_err("CAIF: %s(): Out of link-ids\n", __func__);
+ return -EINVAL;
+found:
+ if (!ctrl->loop_linkused[linkid])
+ ctrl->loop_linkused[linkid] = 1;
+
+ last_linkid = linkid;
+
+ cfpkt_add_trail(pkt, &linkid, 1);
+ spin_unlock(&ctrl->loop_linkid_lock);
+ cfpkt_peek_head(pkt, &linktype, 1);
+ if (linktype == CFCTRL_SRV_UTIL) {
+ tmp = 0x01;
+ cfpkt_add_trail(pkt, &tmp, 1);
+ cfpkt_add_trail(pkt, &tmp, 1);
+ }
+ break;
+
+ case CFCTRL_CMD_LINK_DESTROY:
+ spin_lock(&ctrl->loop_linkid_lock);
+ cfpkt_peek_head(pkt, &linkid, 1);
+ ctrl->loop_linkused[linkid] = 0;
+ spin_unlock(&ctrl->loop_linkid_lock);
+ break;
+ default:
+ break;
+ }
+ return CAIF_SUCCESS;
+}
+#endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
new file mode 100644
index 000000000000..ab6b6dc34cf8
--- /dev/null
+++ b/net/caif/cfdbgl.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dbg) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(dbg, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(dbg, channel_id, dev_info);
+ dbg->layer.receive = cfdbgl_receive;
+ dbg->layer.transmit = cfdbgl_transmit;
+ snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
+ return &dbg->layer;
+}
+
+static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ return layr->dn->transmit(layr->dn, pkt);
+}
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
new file mode 100644
index 000000000000..53194840ecb6
--- /dev/null
+++ b/net/caif/cfdgml.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
+#define DGM_CMD_BIT 0x80
+#define DGM_FLOW_OFF 0x81
+#define DGM_FLOW_ON 0x80
+#define DGM_CTRL_PKT_SIZE 1
+
+static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dgm) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(dgm, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(dgm, channel_id, dev_info);
+ dgm->layer.receive = cfdgml_receive;
+ dgm->layer.transmit = cfdgml_transmit;
+ snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
+ dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0';
+ return &dgm->layer;
+}
+
+static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd = -1;
+ u8 dgmhdr[3];
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+ caif_assert(layr->ctrlcmd != NULL);
+
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ if ((cmd & DGM_CMD_BIT) == 0) {
+ if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+ }
+
+ switch (cmd) {
+ case DGM_FLOW_OFF: /* FLOW OFF */
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case DGM_FLOW_ON: /* FLOW ON */
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ default:
+ cfpkt_destroy(pkt);
+ pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n",
+ __func__, cmd, cmd);
+ return -EPROTO;
+ }
+}
+
+static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u32 zero = 0;
+ struct caif_payload_info *info;
+ struct cfsrvl *service = container_obj(layr);
+ int ret;
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ cfpkt_add_head(pkt, &zero, 4);
+
+ /* Add info for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ /* To optimize alignment, we add up the size of CAIF header
+ * before payload.
+ */
+ info->hdr_len = 4;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ u32 tmp32;
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ }
+ return ret;
+}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
new file mode 100644
index 000000000000..e86a4ca3b217
--- /dev/null
+++ b/net/caif/cffrml.c
@@ -0,0 +1,151 @@
+/*
+ * CAIF Framing Layer.
+ *
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/crc-ccitt.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cffrml.h>
+
+#define container_obj(layr) container_of(layr, struct cffrml, layer)
+
+struct cffrml {
+ struct cflayer layer;
+ bool dofcs; /* !< FCS active */
+};
+
+static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+static u32 cffrml_rcv_error;
+static u32 cffrml_rcv_checsum_error;
+struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
+{
+ struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cffrml, layer) == 0);
+
+ memset(this, 0, sizeof(struct cflayer));
+ this->layer.receive = cffrml_receive;
+ this->layer.transmit = cffrml_transmit;
+ this->layer.ctrlcmd = cffrml_ctrlcmd;
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid);
+ this->dofcs = use_fcs;
+ this->layer.id = phyid;
+ return (struct cflayer *) this;
+}
+
+void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
+{
+ this->up = up;
+}
+
+void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn)
+{
+ this->dn = dn;
+}
+
+static u16 cffrml_checksum(u16 chks, void *buf, u16 len)
+{
+ /* FIXME: FCS should be moved to glue in order to use OS-Specific
+ * solutions
+ */
+ return crc_ccitt(chks, buf, len);
+}
+
+static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u16 tmp;
+ u16 len;
+ u16 hdrchks;
+ u16 pktchks;
+ struct cffrml *this;
+ this = container_obj(layr);
+
+ cfpkt_extr_head(pkt, &tmp, 2);
+ len = le16_to_cpu(tmp);
+
+ /* Subtract for FCS on length if FCS is not used. */
+ if (!this->dofcs)
+ len -= 2;
+
+ if (cfpkt_setlen(pkt, len) < 0) {
+ ++cffrml_rcv_error;
+ pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ /*
+ * Don't do extract if FCS is false, rather do setlen - then we don't
+ * get a cache-miss.
+ */
+ if (this->dofcs) {
+ cfpkt_extr_trail(pkt, &tmp, 2);
+ hdrchks = le16_to_cpu(tmp);
+ pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
+ if (pktchks != hdrchks) {
+ cfpkt_add_trail(pkt, &tmp, 2);
+ ++cffrml_rcv_error;
+ ++cffrml_rcv_checsum_error;
+ pr_info("CAIF: %s(): Frame checksum error "
+ "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks);
+ return -EILSEQ;
+ }
+ }
+ if (cfpkt_erroneous(pkt)) {
+ ++cffrml_rcv_error;
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int tmp;
+ u16 chks;
+ u16 len;
+ int ret;
+ struct cffrml *this = container_obj(layr);
+ if (this->dofcs) {
+ chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
+ tmp = cpu_to_le16(chks);
+ cfpkt_add_trail(pkt, &tmp, 2);
+ } else {
+ cfpkt_pad_trail(pkt, 2);
+ }
+ len = cfpkt_getlen(pkt);
+ tmp = cpu_to_le16(len);
+ cfpkt_add_head(pkt, &tmp, 2);
+ cfpkt_info(pkt)->hdr_len += 2;
+ if (cfpkt_erroneous(pkt)) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ /* Remove header on faulty packet. */
+ cfpkt_extr_head(pkt, &tmp, 2);
+ }
+ return ret;
+}
+
+static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ if (layr->up->ctrlcmd)
+ layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
new file mode 100644
index 000000000000..7372f27f1d32
--- /dev/null
+++ b/net/caif/cfmuxl.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfmuxl.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cffrml.h>
+
+#define container_obj(layr) container_of(layr, struct cfmuxl, layer)
+
+#define CAIF_CTRL_CHANNEL 0
+#define UP_CACHE_SIZE 8
+#define DN_CACHE_SIZE 8
+
+struct cfmuxl {
+ struct cflayer layer;
+ struct list_head srvl_list;
+ struct list_head frml_list;
+ struct cflayer *up_cache[UP_CACHE_SIZE];
+ struct cflayer *dn_cache[DN_CACHE_SIZE];
+ /*
+ * Set when inserting or removing downwards layers.
+ */
+ spinlock_t transmit_lock;
+
+ /*
+ * Set when inserting or removing upwards layers.
+ */
+ spinlock_t receive_lock;
+
+};
+
+static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
+
+struct cflayer *cfmuxl_create(void)
+{
+ struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
+ if (!this)
+ return NULL;
+ memset(this, 0, sizeof(*this));
+ this->layer.receive = cfmuxl_receive;
+ this->layer.transmit = cfmuxl_transmit;
+ this->layer.ctrlcmd = cfmuxl_ctrlcmd;
+ INIT_LIST_HEAD(&this->srvl_list);
+ INIT_LIST_HEAD(&this->frml_list);
+ spin_lock_init(&this->transmit_lock);
+ spin_lock_init(&this->receive_lock);
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
+ return &this->layer;
+}
+
+int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ cfsrvl_get(up);
+ list_add(&up->node, &muxl->srvl_list);
+ spin_unlock(&muxl->receive_lock);
+ return 0;
+}
+
+bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid)
+{
+ struct list_head *node;
+ struct cflayer *layer;
+ struct cfmuxl *muxl = container_obj(layr);
+ bool match = false;
+ spin_lock(&muxl->receive_lock);
+
+ list_for_each(node, &muxl->srvl_list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (cfsrvl_phyid_match(layer, phyid)) {
+ match = true;
+ break;
+ }
+
+ }
+ spin_unlock(&muxl->receive_lock);
+ return match;
+}
+
+u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id)
+{
+ struct cflayer *up;
+ int phyid;
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, channel_id);
+ if (up != NULL)
+ phyid = cfsrvl_getphyid(up);
+ else
+ phyid = 0;
+ spin_unlock(&muxl->receive_lock);
+ return phyid;
+}
+
+int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
+{
+ struct cfmuxl *muxl = (struct cfmuxl *) layr;
+ spin_lock(&muxl->transmit_lock);
+ list_add(&dn->node, &muxl->frml_list);
+ spin_unlock(&muxl->transmit_lock);
+ return 0;
+}
+
+static struct cflayer *get_from_id(struct list_head *list, u16 id)
+{
+ struct list_head *node;
+ struct cflayer *layer;
+ list_for_each(node, list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (layer->id == id)
+ return layer;
+ }
+ return NULL;
+}
+
+struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ struct cflayer *dn;
+ spin_lock(&muxl->transmit_lock);
+ memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache));
+ dn = get_from_id(&muxl->frml_list, phyid);
+ if (dn == NULL) {
+ spin_unlock(&muxl->transmit_lock);
+ return NULL;
+ }
+ list_del(&dn->node);
+ caif_assert(dn != NULL);
+ spin_unlock(&muxl->transmit_lock);
+ return dn;
+}
+
+/* Invariant: lock is taken */
+static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
+{
+ struct cflayer *up;
+ int idx = id % UP_CACHE_SIZE;
+ up = muxl->up_cache[idx];
+ if (up == NULL || up->id != id) {
+ up = get_from_id(&muxl->srvl_list, id);
+ muxl->up_cache[idx] = up;
+ }
+ return up;
+}
+
+/* Invariant: lock is taken */
+static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
+{
+ struct cflayer *dn;
+ int idx = dev_info->id % DN_CACHE_SIZE;
+ dn = muxl->dn_cache[idx];
+ if (dn == NULL || dn->id != dev_info->id) {
+ dn = get_from_id(&muxl->frml_list, dev_info->id);
+ muxl->dn_cache[idx] = dn;
+ }
+ return dn;
+}
+
+struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
+{
+ struct cflayer *up;
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, id);
+ if (up == NULL)
+ return NULL;
+ memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
+ list_del(&up->node);
+ cfsrvl_put(up);
+ spin_unlock(&muxl->receive_lock);
+ return up;
+}
+
+static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int ret;
+ struct cfmuxl *muxl = container_obj(layr);
+ u8 id;
+ struct cflayer *up;
+ if (cfpkt_extr_head(pkt, &id, 1) < 0) {
+ pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, id);
+ spin_unlock(&muxl->receive_lock);
+ if (up == NULL) {
+ pr_info("CAIF: %s():Received data on unknown link ID = %d "
+ "(0x%x) up == NULL", __func__, id, id);
+ cfpkt_destroy(pkt);
+ /*
+ * Don't return ERROR, since modem misbehaves and sends out
+ * flow on before linksetup response.
+ */
+ return /* CFGLU_EPROT; */ 0;
+ }
+ cfsrvl_get(up);
+ ret = up->receive(up, pkt);
+ cfsrvl_put(up);
+ return ret;
+}
+
+static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int ret;
+ struct cfmuxl *muxl = container_obj(layr);
+ u8 linkid;
+ struct cflayer *dn;
+ struct caif_payload_info *info = cfpkt_info(pkt);
+ dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
+ if (dn == NULL) {
+ pr_warning("CAIF: %s(): Send data on unknown phy "
+ "ID = %d (0x%x)\n",
+ __func__, info->dev_info->id, info->dev_info->id);
+ return -ENOTCONN;
+ }
+ info->hdr_len += 1;
+ linkid = info->channel_id;
+ cfpkt_add_head(pkt, &linkid, 1);
+ ret = dn->transmit(dn, pkt);
+ /* Remove MUX protocol header upon error. */
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &linkid, 1);
+ return ret;
+}
+
+static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ struct list_head *node;
+ struct cflayer *layer;
+ list_for_each(node, &muxl->srvl_list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (cfsrvl_phyid_match(layer, phyid))
+ layer->ctrlcmd(layer, ctrl, phyid);
+ }
+}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
new file mode 100644
index 000000000000..83fff2ff6658
--- /dev/null
+++ b/net/caif/cfpkt_skbuff.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/hardirq.h>
+#include <net/caif/cfpkt.h>
+
+#define PKT_PREFIX CAIF_NEEDED_HEADROOM
+#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
+#define PKT_LEN_WHEN_EXTENDING 128
+#define PKT_ERROR(pkt, errmsg) do { \
+ cfpkt_priv(pkt)->erronous = true; \
+ skb_reset_tail_pointer(&pkt->skb); \
+ pr_warning("CAIF: " errmsg);\
+ } while (0)
+
+struct cfpktq {
+ struct sk_buff_head head;
+ atomic_t count;
+ /* Lock protects count updates */
+ spinlock_t lock;
+};
+
+/*
+ * net/caif/ is generic and does not
+ * understand SKB, so we do this typecast
+ */
+struct cfpkt {
+ struct sk_buff skb;
+};
+
+/* Private data inside SKB */
+struct cfpkt_priv_data {
+ struct dev_info dev_info;
+ bool erronous;
+};
+
+inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
+{
+ return (struct cfpkt_priv_data *) pkt->skb.cb;
+}
+
+inline bool is_erronous(struct cfpkt *pkt)
+{
+ return cfpkt_priv(pkt)->erronous;
+}
+
+inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
+{
+ return &pkt->skb;
+}
+
+inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
+{
+ return (struct cfpkt *) skb;
+}
+
+
+struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
+{
+ struct cfpkt *pkt = skb_to_pkt(nativepkt);
+ cfpkt_priv(pkt)->erronous = false;
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_fromnative);
+
+void *cfpkt_tonative(struct cfpkt *pkt)
+{
+ return (void *) pkt;
+}
+EXPORT_SYMBOL(cfpkt_tonative);
+
+static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
+{
+ struct sk_buff *skb;
+
+ if (likely(in_interrupt()))
+ skb = alloc_skb(len + pfx, GFP_ATOMIC);
+ else
+ skb = alloc_skb(len + pfx, GFP_KERNEL);
+
+ if (unlikely(skb == NULL))
+ return NULL;
+
+ skb_reserve(skb, pfx);
+ return skb_to_pkt(skb);
+}
+
+inline struct cfpkt *cfpkt_create(u16 len)
+{
+ return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+}
+EXPORT_SYMBOL(cfpkt_create);
+
+void cfpkt_destroy(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL(cfpkt_destroy);
+
+inline bool cfpkt_more(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ return skb->len > 0;
+}
+EXPORT_SYMBOL(cfpkt_more);
+
+int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ if (skb_headlen(skb) >= len) {
+ memcpy(data, skb->data, len);
+ return 0;
+ }
+ return !cfpkt_extr_head(pkt, data, len) &&
+ !cfpkt_add_head(pkt, data, len);
+}
+EXPORT_SYMBOL(cfpkt_peek_head);
+
+int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *from;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(len > skb->len)) {
+ PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(len > skb_headlen(skb))) {
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
+ return -EPROTO;
+ }
+ }
+ from = skb_pull(skb, len);
+ from -= len;
+ memcpy(data, from, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_extr_head);
+
+int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *data = dta;
+ u8 *from;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n");
+ return -EPROTO;
+ }
+ if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
+ PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n");
+ return -EPROTO;
+ }
+ from = skb_tail_pointer(skb) - len;
+ skb_trim(skb, skb->len - len);
+ memcpy(data, from, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_extr_trail);
+
+int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
+{
+ return cfpkt_add_body(pkt, NULL, len);
+}
+EXPORT_SYMBOL(cfpkt_pad_trail);
+
+int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+ u8 *to;
+ u16 addlen = 0;
+
+
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ lastskb = skb;
+
+ /* Check whether we need to add space at the tail */
+ if (unlikely(skb_tailroom(skb) < len)) {
+ if (likely(len < PKT_LEN_WHEN_EXTENDING))
+ addlen = PKT_LEN_WHEN_EXTENDING;
+ else
+ addlen = len;
+ }
+
+ /* Check whether we need to change the SKB before writing to the tail */
+ if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) {
+
+ /* Make sure data is writable */
+ if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n");
+ return -EPROTO;
+ }
+ /*
+ * Is the SKB non-linear after skb_cow_data()? If so, we are
+ * going to add data to the last SKB, so we need to adjust
+ * lengths of the top SKB.
+ */
+ if (lastskb != skb) {
+ pr_warning("CAIF: %s(): Packet is non-linear\n",
+ __func__);
+ skb->len += len;
+ skb->data_len += len;
+ }
+ }
+
+ /* All set to put the last SKB and optionally write data there. */
+ to = skb_put(lastskb, len);
+ if (likely(data))
+ memcpy(to, data, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_add_body);
+
+inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data)
+{
+ return cfpkt_add_body(pkt, &data, 1);
+}
+EXPORT_SYMBOL(cfpkt_addbdy);
+
+int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+ u8 *to;
+ const u8 *data = data2;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ if (unlikely(skb_headroom(skb) < len)) {
+ PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n");
+ return -EPROTO;
+ }
+
+ /* Make sure data is writable */
+ if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
+ return -EPROTO;
+ }
+
+ to = skb_push(skb, len);
+ memcpy(to, data, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_add_head);
+
+inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
+{
+ return cfpkt_add_body(pkt, data, len);
+}
+EXPORT_SYMBOL(cfpkt_add_trail);
+
+inline u16 cfpkt_getlen(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ return skb->len;
+}
+EXPORT_SYMBOL(cfpkt_getlen);
+
+inline u16 cfpkt_iterate(struct cfpkt *pkt,
+ u16 (*iter_func)(u16, void *, u16),
+ u16 data)
+{
+ /*
+ * Don't care about the performance hit of linearizing,
+ * Checksum should not be used on high-speed interfaces anyway.
+ */
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ if (unlikely(skb_linearize(&pkt->skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n");
+ return -EPROTO;
+ }
+ return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
+}
+EXPORT_SYMBOL(cfpkt_iterate);
+
+int cfpkt_setlen(struct cfpkt *pkt, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+
+
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (likely(len <= skb->len)) {
+ if (unlikely(skb->data_len))
+ ___pskb_trim(skb, len);
+ else
+ skb_trim(skb, len);
+
+ return cfpkt_getlen(pkt);
+ }
+
+ /* Need to expand SKB */
+ if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
+ PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n");
+
+ return cfpkt_getlen(pkt);
+}
+EXPORT_SYMBOL(cfpkt_setlen);
+
+struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
+{
+ struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+ if (unlikely(data != NULL))
+ cfpkt_add_body(pkt, data, len);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_create_uplink);
+
+struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
+ struct cfpkt *addpkt,
+ u16 expectlen)
+{
+ struct sk_buff *dst = pkt_to_skb(dstpkt);
+ struct sk_buff *add = pkt_to_skb(addpkt);
+ u16 addlen = skb_headlen(add);
+ u16 neededtailspace;
+ struct sk_buff *tmp;
+ u16 dstlen;
+ u16 createlen;
+ if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
+ cfpkt_destroy(addpkt);
+ return dstpkt;
+ }
+ if (expectlen > addlen)
+ neededtailspace = expectlen;
+ else
+ neededtailspace = addlen;
+
+ if (dst->tail + neededtailspace > dst->end) {
+ /* Create a dumplicate of 'dst' with more tail space */
+ dstlen = skb_headlen(dst);
+ createlen = dstlen + neededtailspace;
+ tmp = pkt_to_skb(
+ cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX));
+ if (!tmp)
+ return NULL;
+ skb_set_tail_pointer(tmp, dstlen);
+ tmp->len = dstlen;
+ memcpy(tmp->data, dst->data, dstlen);
+ cfpkt_destroy(dstpkt);
+ dst = tmp;
+ }
+ memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add));
+ cfpkt_destroy(addpkt);
+ dst->tail += addlen;
+ dst->len += addlen;
+ return skb_to_pkt(dst);
+}
+EXPORT_SYMBOL(cfpkt_append);
+
+struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
+{
+ struct sk_buff *skb2;
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *split = skb->data + pos;
+ u16 len2nd = skb_tail_pointer(skb) - split;
+
+ if (unlikely(is_erronous(pkt)))
+ return NULL;
+
+ if (skb->data + pos > skb_tail_pointer(skb)) {
+ PKT_ERROR(pkt,
+ "cfpkt_split: trying to split beyond end of packet");
+ return NULL;
+ }
+
+ /* Create a new packet for the second part of the data */
+ skb2 = pkt_to_skb(
+ cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
+ PKT_PREFIX));
+
+ if (skb2 == NULL)
+ return NULL;
+
+ /* Reduce the length of the original packet */
+ skb_set_tail_pointer(skb, pos);
+ skb->len = pos;
+
+ memcpy(skb2->data, split, len2nd);
+ skb2->tail += len2nd;
+ skb2->len += len2nd;
+ return skb_to_pkt(skb2);
+}
+EXPORT_SYMBOL(cfpkt_split);
+
+char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ char *p = buf;
+ int i;
+
+ /*
+ * Sanity check buffer length, it needs to be at least as large as
+ * the header info: ~=50+ bytes
+ */
+ if (buflen < 50)
+ return NULL;
+
+ snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
+ is_erronous(pkt) ? "ERRONOUS-SKB" :
+ (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
+ skb,
+ (long) skb->len,
+ (long) (skb_tail_pointer(skb) - skb->data),
+ (long) skb->data_len,
+ (long) (skb->data - skb->head),
+ (long) (skb_tail_pointer(skb) - skb->head));
+ p = buf + strlen(buf);
+
+ for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
+ if (p > buf + buflen - 10) {
+ sprintf(p, "...");
+ p = buf + strlen(buf);
+ break;
+ }
+ sprintf(p, "%02x,", skb->data[i]);
+ p = buf + strlen(buf);
+ }
+ sprintf(p, "]\n");
+ return buf;
+}
+EXPORT_SYMBOL(cfpkt_log_pkt);
+
+int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+
+ caif_assert(buf != NULL);
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ /* Make sure SKB is writable */
+ if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(skb_tailroom(skb) < buflen)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
+ return -EPROTO;
+ }
+
+ *buf = skb_put(skb, buflen);
+ return 1;
+}
+EXPORT_SYMBOL(cfpkt_raw_append);
+
+int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+
+ caif_assert(buf != NULL);
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(buflen > skb->len)) {
+ PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
+ "- failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(buflen > skb_headlen(skb))) {
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
+ return -EPROTO;
+ }
+ }
+
+ *buf = skb->data;
+ skb_pull(skb, buflen);
+
+ return 1;
+}
+EXPORT_SYMBOL(cfpkt_raw_extract);
+
+inline bool cfpkt_erroneous(struct cfpkt *pkt)
+{
+ return cfpkt_priv(pkt)->erronous;
+}
+EXPORT_SYMBOL(cfpkt_erroneous);
+
+struct cfpktq *cfpktq_create(void)
+{
+ struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
+ if (!q)
+ return NULL;
+ skb_queue_head_init(&q->head);
+ atomic_set(&q->count, 0);
+ spin_lock_init(&q->lock);
+ return q;
+}
+EXPORT_SYMBOL(cfpktq_create);
+
+void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
+{
+ atomic_inc(&pktq->count);
+ spin_lock(&pktq->lock);
+ skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
+ spin_unlock(&pktq->lock);
+
+}
+EXPORT_SYMBOL(cfpkt_queue);
+
+struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
+{
+ struct cfpkt *tmp;
+ spin_lock(&pktq->lock);
+ tmp = skb_to_pkt(skb_peek(&pktq->head));
+ spin_unlock(&pktq->lock);
+ return tmp;
+}
+EXPORT_SYMBOL(cfpkt_qpeek);
+
+struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
+{
+ struct cfpkt *pkt;
+ spin_lock(&pktq->lock);
+ pkt = skb_to_pkt(skb_dequeue(&pktq->head));
+ if (pkt) {
+ atomic_dec(&pktq->count);
+ caif_assert(atomic_read(&pktq->count) >= 0);
+ }
+ spin_unlock(&pktq->lock);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_dequeue);
+
+int cfpkt_qcount(struct cfpktq *pktq)
+{
+ return atomic_read(&pktq->count);
+}
+EXPORT_SYMBOL(cfpkt_qcount);
+
+struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
+{
+ struct cfpkt *clone;
+ clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
+ /* Free original packet. */
+ cfpkt_destroy(pkt);
+ if (!clone)
+ return NULL;
+ return clone;
+}
+EXPORT_SYMBOL(cfpkt_clone_release);
+
+struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
+{
+ return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
+}
+EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
new file mode 100644
index 000000000000..cd2830fec935
--- /dev/null
+++ b/net/caif/cfrfml.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+#define RFM_SEGMENTATION_BIT 0x01
+#define RFM_PAYLOAD 0x00
+#define RFM_CMD_BIT 0x80
+#define RFM_FLOW_OFF 0x81
+#define RFM_FLOW_ON 0x80
+#define RFM_SET_PIN 0x82
+#define RFM_CTRL_PKT_SIZE 1
+
+static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
+
+struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!rfm) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(rfm, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(rfm, channel_id, dev_info);
+ rfm->layer.modemcmd = cfservl_modemcmd;
+ rfm->layer.receive = cfrfml_receive;
+ rfm->layer.transmit = cfrfml_transmit;
+ snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
+ return &rfm->layer;
+}
+
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ return -EPROTO;
+}
+
+static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp;
+ bool segmented;
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+
+ /*
+ * RFM is taking care of segmentation and stripping of
+ * segmentation bit.
+ */
+ if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ segmented = tmp & RFM_SEGMENTATION_BIT;
+ caif_assert(!segmented);
+
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+}
+
+static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp = 0;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_err("CAIF: %s():Packet too large - size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+ if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+
+ /* Add info for MUX-layer to route the packet out. */
+ cfpkt_info(pkt)->channel_id = service->layer.id;
+ /*
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
+ */
+ cfpkt_info(pkt)->hdr_len = 1;
+ cfpkt_info(pkt)->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &tmp, 1);
+ return ret;
+}
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
new file mode 100644
index 000000000000..06029ea2da2f
--- /dev/null
+++ b/net/caif/cfserl.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfserl.h>
+
+#define container_obj(layr) ((struct cfserl *) layr)
+
+#define CFSERL_STX 0x02
+#define CAIF_MINIUM_PACKET_SIZE 4
+struct cfserl {
+ struct cflayer layer;
+ struct cfpkt *incomplete_frm;
+ /* Protects parallel processing of incoming packets */
+ spinlock_t sync;
+ bool usestx;
+};
+#define STXLEN(layr) (layr->usestx ? 1 : 0)
+
+static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+struct cflayer *cfserl_create(int type, int instance, bool use_stx)
+{
+ struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfserl, layer) == 0);
+ memset(this, 0, sizeof(struct cfserl));
+ this->layer.receive = cfserl_receive;
+ this->layer.transmit = cfserl_transmit;
+ this->layer.ctrlcmd = cfserl_ctrlcmd;
+ this->layer.type = type;
+ this->usestx = use_stx;
+ spin_lock_init(&this->sync);
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
+ return &this->layer;
+}
+
+static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
+{
+ struct cfserl *layr = container_obj(l);
+ u16 pkt_len;
+ struct cfpkt *pkt = NULL;
+ struct cfpkt *tail_pkt = NULL;
+ u8 tmp8;
+ u16 tmp;
+ u8 stx = CFSERL_STX;
+ int ret;
+ u16 expectlen = 0;
+ caif_assert(newpkt != NULL);
+ spin_lock(&layr->sync);
+
+ if (layr->incomplete_frm != NULL) {
+
+ layr->incomplete_frm =
+ cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
+ pkt = layr->incomplete_frm;
+ } else {
+ pkt = newpkt;
+ }
+ layr->incomplete_frm = NULL;
+
+ do {
+ /* Search for STX at start of pkt if STX is used */
+ if (layr->usestx) {
+ cfpkt_extr_head(pkt, &tmp8, 1);
+ if (tmp8 != CFSERL_STX) {
+ while (cfpkt_more(pkt)
+ && tmp8 != CFSERL_STX) {
+ cfpkt_extr_head(pkt, &tmp8, 1);
+ }
+ if (!cfpkt_more(pkt)) {
+ cfpkt_destroy(pkt);
+ layr->incomplete_frm = NULL;
+ spin_unlock(&layr->sync);
+ return -EPROTO;
+ }
+ }
+ }
+
+ pkt_len = cfpkt_getlen(pkt);
+
+ /*
+ * pkt_len is the accumulated length of the packet data
+ * we have received so far.
+ * Exit if frame doesn't hold length.
+ */
+
+ if (pkt_len < 2) {
+ if (layr->usestx)
+ cfpkt_add_head(pkt, &stx, 1);
+ layr->incomplete_frm = pkt;
+ spin_unlock(&layr->sync);
+ return 0;
+ }
+
+ /*
+ * Find length of frame.
+ * expectlen is the length we need for a full frame.
+ */
+ cfpkt_peek_head(pkt, &tmp, 2);
+ expectlen = le16_to_cpu(tmp) + 2;
+ /*
+ * Frame error handling
+ */
+ if (expectlen < CAIF_MINIUM_PACKET_SIZE
+ || expectlen > CAIF_MAX_FRAMESIZE) {
+ if (!layr->usestx) {
+ if (pkt != NULL)
+ cfpkt_destroy(pkt);
+ layr->incomplete_frm = NULL;
+ expectlen = 0;
+ spin_unlock(&layr->sync);
+ return -EPROTO;
+ }
+ continue;
+ }
+
+ if (pkt_len < expectlen) {
+ /* Too little received data */
+ if (layr->usestx)
+ cfpkt_add_head(pkt, &stx, 1);
+ layr->incomplete_frm = pkt;
+ spin_unlock(&layr->sync);
+ return 0;
+ }
+
+ /*
+ * Enough data for at least one frame.
+ * Split the frame, if too long
+ */
+ if (pkt_len > expectlen)
+ tail_pkt = cfpkt_split(pkt, expectlen);
+ else
+ tail_pkt = NULL;
+
+ /* Send the first part of packet upwards.*/
+ spin_unlock(&layr->sync);
+ ret = layr->layer.up->receive(layr->layer.up, pkt);
+ spin_lock(&layr->sync);
+ if (ret == -EILSEQ) {
+ if (layr->usestx) {
+ if (tail_pkt != NULL)
+ pkt = cfpkt_append(pkt, tail_pkt, 0);
+
+ /* Start search for next STX if frame failed */
+ continue;
+ } else {
+ cfpkt_destroy(pkt);
+ pkt = NULL;
+ }
+ }
+
+ pkt = tail_pkt;
+
+ } while (pkt != NULL);
+
+ spin_unlock(&layr->sync);
+ return 0;
+}
+
+static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
+{
+ struct cfserl *layr = container_obj(layer);
+ int ret;
+ u8 tmp8 = CFSERL_STX;
+ if (layr->usestx)
+ cfpkt_add_head(newpkt, &tmp8, 1);
+ ret = layer->dn->transmit(layer->dn, newpkt);
+ if (ret < 0)
+ cfpkt_extr_head(newpkt, &tmp8, 1);
+
+ return ret;
+}
+
+static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+}
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
new file mode 100644
index 000000000000..aff31f34528f
--- /dev/null
+++ b/net/caif/cfsrvl.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define SRVL_CTRL_PKT_SIZE 1
+#define SRVL_FLOW_OFF 0x81
+#define SRVL_FLOW_ON 0x80
+#define SRVL_SET_PIN 0x82
+#define SRVL_CTRL_PKT_SIZE 1
+
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->up->ctrlcmd != NULL);
+ switch (ctrl) {
+ case CAIF_CTRLCMD_INIT_RSP:
+ service->open = true;
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ service->open = false;
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
+ if (phyid != service->dev_info.id)
+ break;
+ if (service->modem_flow_on)
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
+ service->phy_flow_on = false;
+ break;
+ case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
+ if (phyid != service->dev_info.id)
+ return;
+ if (service->modem_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_ON_IND,
+ phyid);
+ }
+ service->phy_flow_on = true;
+ break;
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ if (service->phy_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
+ }
+ service->modem_flow_on = false;
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ if (service->phy_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_ON_IND, phyid);
+ }
+ service->modem_flow_on = true;
+ break;
+ case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
+ /* In case interface is down, let's fake a remove shutdown */
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
+ break;
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ default:
+ pr_warning("CAIF: %s(): "
+ "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
+ /* We have both modem and phy flow on, send flow on */
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ service->phy_flow_on = true;
+ break;
+ }
+}
+
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ switch (ctrl) {
+ case CAIF_MODEMCMD_FLOW_ON_REQ:
+ {
+ struct cfpkt *pkt;
+ struct caif_payload_info *info;
+ u8 flow_on = SRVL_FLOW_ON;
+ pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ return layr->dn->transmit(layr->dn, pkt);
+ }
+ case CAIF_MODEMCMD_FLOW_OFF_REQ:
+ {
+ struct cfpkt *pkt;
+ struct caif_payload_info *info;
+ u8 flow_off = SRVL_FLOW_OFF;
+ pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ return layr->dn->transmit(layr->dn, pkt);
+ }
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+void cfservl_destroy(struct cflayer *layer)
+{
+ kfree(layer);
+}
+
+void cfsrvl_init(struct cfsrvl *service,
+ u8 channel_id,
+ struct dev_info *dev_info)
+{
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ service->open = false;
+ service->modem_flow_on = true;
+ service->phy_flow_on = true;
+ service->layer.id = channel_id;
+ service->layer.ctrlcmd = cfservl_ctrlcmd;
+ service->layer.modemcmd = cfservl_modemcmd;
+ service->dev_info = *dev_info;
+ kref_init(&service->ref);
+}
+
+void cfsrvl_release(struct kref *kref)
+{
+ struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
+ kfree(service);
+}
+
+bool cfsrvl_ready(struct cfsrvl *service, int *err)
+{
+ if (service->open && service->modem_flow_on && service->phy_flow_on)
+ return true;
+ if (!service->open) {
+ *err = -ENOTCONN;
+ return false;
+ }
+ caif_assert(!(service->modem_flow_on && service->phy_flow_on));
+ *err = -EAGAIN;
+ return false;
+}
+u8 cfsrvl_getphyid(struct cflayer *layer)
+{
+ struct cfsrvl *servl = container_obj(layer);
+ return servl->dev_info.id;
+}
+
+bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
+{
+ struct cfsrvl *servl = container_obj(layer);
+ return servl->dev_info.id == phyid;
+}
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
new file mode 100644
index 000000000000..5fd2c9ea8b42
--- /dev/null
+++ b/net/caif/cfutill.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+#define UTIL_PAYLOAD 0x00
+#define UTIL_CMD_BIT 0x80
+#define UTIL_REMOTE_SHUTDOWN 0x82
+#define UTIL_FLOW_OFF 0x81
+#define UTIL_FLOW_ON 0x80
+#define UTIL_CTRL_PKT_SIZE 1
+static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!util) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(util, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(util, channel_id, dev_info);
+ util->layer.receive = cfutill_receive;
+ util->layer.transmit = cfutill_transmit;
+ snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
+ return &util->layer;
+}
+
+static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd = -1;
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->up->receive != NULL);
+ caif_assert(layr->up->ctrlcmd != NULL);
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ switch (cmd) {
+ case UTIL_PAYLOAD:
+ return layr->up->receive(layr->up, pkt);
+ case UTIL_FLOW_OFF:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case UTIL_FLOW_ON:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
+ pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n",
+ __func__);
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
+ service->open = false;
+ cfpkt_destroy(pkt);
+ return 0;
+ default:
+ cfpkt_destroy(pkt);
+ pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n",
+ __func__, cmd, cmd);
+ return -EPROTO;
+ }
+}
+
+static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 zero = 0;
+ struct caif_payload_info *info;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_err("CAIF: %s(): packet too large size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+
+ cfpkt_add_head(pkt, &zero, 1);
+ /* Add info for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ /*
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
+ */
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ u32 tmp32;
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ }
+ return ret;
+}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
new file mode 100644
index 000000000000..0fd827f49491
--- /dev/null
+++ b/net/caif/cfveil.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define VEI_PAYLOAD 0x00
+#define VEI_CMD_BIT 0x80
+#define VEI_FLOW_OFF 0x81
+#define VEI_FLOW_ON 0x80
+#define VEI_SET_PIN 0x82
+#define VEI_CTRL_PKT_SIZE 1
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vei) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(vei, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(vei, channel_id, dev_info);
+ vei->layer.receive = cfvei_receive;
+ vei->layer.transmit = cfvei_transmit;
+ snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
+ return &vei->layer;
+}
+
+static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd;
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+ caif_assert(layr->ctrlcmd != NULL);
+
+
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ switch (cmd) {
+ case VEI_PAYLOAD:
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+ case VEI_FLOW_OFF:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case VEI_FLOW_ON:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case VEI_SET_PIN: /* SET RS232 PIN */
+ cfpkt_destroy(pkt);
+ return 0;
+ default: /* SET RS232 PIN */
+ pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n",
+ __func__, cmd, cmd);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+}
+
+static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp = 0;
+ struct caif_payload_info *info;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_warning("CAIF: %s(): Packet too large - size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+
+ if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+
+ /* Add info-> for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &tmp, 1);
+ return ret;
+}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
new file mode 100644
index 000000000000..89ad4ea239f1
--- /dev/null
+++ b/net/caif/cfvidl.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
+static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vid) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+
+ memset(vid, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(vid, channel_id, dev_info);
+ vid->layer.receive = cfvidl_receive;
+ vid->layer.transmit = cfvidl_transmit;
+ snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
+ return &vid->layer;
+}
+
+static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u32 videoheader;
+ if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct cfsrvl *service = container_obj(layr);
+ struct caif_payload_info *info;
+ u32 videoheader = 0;
+ int ret;
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+ cfpkt_add_head(pkt, &videoheader, 4);
+ /* Add info for MUX-layer to route the packet out */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &videoheader, 4);
+ return ret;
+}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
new file mode 100644
index 000000000000..610966abe2dc
--- /dev/null
+++ b/net/caif/chnl_net.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Authors: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/sched.h>
+#include <linux/sockios.h>
+#include <linux/caif/if_caif.h>
+#include <net/rtnetlink.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/caif_dev.h>
+
+/* GPRS PDP connection has MTU to 1500 */
+#define SIZE_MTU 1500
+/* 5 sec. connect timeout */
+#define CONNECT_TIMEOUT (5 * HZ)
+#define CAIF_NET_DEFAULT_QUEUE_LEN 500
+
+#undef pr_debug
+#define pr_debug pr_warning
+
+/*This list is protected by the rtnl lock. */
+static LIST_HEAD(chnl_net_list);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("caif");
+
+enum caif_states {
+ CAIF_CONNECTED = 1,
+ CAIF_CONNECTING,
+ CAIF_DISCONNECTED,
+ CAIF_SHUTDOWN
+};
+
+struct chnl_net {
+ struct cflayer chnl;
+ struct net_device_stats stats;
+ struct caif_connect_request conn_req;
+ struct list_head list_field;
+ struct net_device *netdev;
+ char name[256];
+ wait_queue_head_t netmgmt_wq;
+ /* Flow status to remember and control the transmission. */
+ bool flowenabled;
+ enum caif_states state;
+};
+
+static void robust_list_del(struct list_head *delete_node)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ ASSERT_RTNL();
+ list_for_each_safe(list_node, n, &chnl_net_list) {
+ if (list_node == delete_node) {
+ list_del(list_node);
+ return;
+ }
+ }
+ WARN_ON(1);
+}
+
+static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct sk_buff *skb;
+ struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
+ int pktlen;
+ int err = 0;
+
+ priv = container_of(layr, struct chnl_net, chnl);
+
+ if (!priv)
+ return -EINVAL;
+
+ /* Get length of CAIF packet. */
+ pktlen = cfpkt_getlen(pkt);
+
+ skb = (struct sk_buff *) cfpkt_tonative(pkt);
+ /* Pass some minimum information and
+ * send the packet to the net stack.
+ */
+ skb->dev = priv->netdev;
+ skb->protocol = htons(ETH_P_IP);
+
+ /* If we change the header in loop mode, the checksum is corrupted. */
+ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+ /* Update statistics. */
+ priv->netdev->stats.rx_packets++;
+ priv->netdev->stats.rx_bytes += pktlen;
+
+ return err;
+}
+
+static int delete_device(struct chnl_net *dev)
+{
+ ASSERT_RTNL();
+ if (dev->netdev)
+ unregister_netdevice(dev->netdev);
+ return 0;
+}
+
+static void close_work(struct work_struct *work)
+{
+ struct chnl_net *dev = NULL;
+ struct list_head *list_node;
+ struct list_head *_tmp;
+ /* May be called with or without RTNL lock held */
+ int islocked = rtnl_is_locked();
+ if (!islocked)
+ rtnl_lock();
+ list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+ dev = list_entry(list_node, struct chnl_net, list_field);
+ if (dev->state == CAIF_SHUTDOWN)
+ dev_close(dev->netdev);
+ }
+ if (!islocked)
+ rtnl_unlock();
+}
+static DECLARE_WORK(close_worker, close_work);
+
+static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
+ int phyid)
+{
+ struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
+ pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n",
+ __func__,
+ flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
+ flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
+ flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
+ flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" :
+ flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" :
+ flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
+ "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND");
+
+
+
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ priv->flowenabled = false;
+ netif_stop_queue(priv->netdev);
+ break;
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ priv->state = CAIF_DISCONNECTED;
+ break;
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ priv->state = CAIF_DISCONNECTED;
+ wake_up_interruptible(&priv->netmgmt_wq);
+ break;
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ priv->state = CAIF_SHUTDOWN;
+ netif_tx_disable(priv->netdev);
+ schedule_work(&close_worker);
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ priv->flowenabled = true;
+ netif_wake_queue(priv->netdev);
+ break;
+ case CAIF_CTRLCMD_INIT_RSP:
+ priv->state = CAIF_CONNECTED;
+ priv->flowenabled = true;
+ netif_wake_queue(priv->netdev);
+ wake_up_interruptible(&priv->netmgmt_wq);
+ break;
+ default:
+ break;
+ }
+}
+
+static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct chnl_net *priv;
+ struct cfpkt *pkt = NULL;
+ int len;
+ int result = -1;
+ /* Get our private data. */
+ priv = netdev_priv(dev);
+
+ if (skb->len > priv->netdev->mtu) {
+ pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__);
+ return -ENOSPC;
+ }
+
+ if (!priv->flowenabled) {
+ pr_debug("CAIF: %s(): dropping packets flow off\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
+ swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+
+ /* Store original SKB length. */
+ len = skb->len;
+
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
+
+ /* Send the packet down the stack. */
+ result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
+ if (result) {
+ if (result == -EAGAIN)
+ result = NETDEV_TX_BUSY;
+ return result;
+ }
+
+ /* Update statistics. */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+
+ return NETDEV_TX_OK;
+}
+
+static int chnl_net_open(struct net_device *dev)
+{
+ struct chnl_net *priv = NULL;
+ int result = -1;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ if (!priv) {
+ pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
+ return -ENODEV;
+ }
+
+ if (priv->state != CAIF_CONNECTING) {
+ priv->state = CAIF_CONNECTING;
+ result = caif_connect_client(&priv->conn_req, &priv->chnl);
+ if (result != 0) {
+ priv->state = CAIF_DISCONNECTED;
+ pr_debug("CAIF: %s(): err: "
+ "Unable to register and open device,"
+ " Err:%d\n",
+ __func__,
+ result);
+ return result;
+ }
+ }
+
+ result = wait_event_interruptible_timeout(priv->netmgmt_wq,
+ priv->state != CAIF_CONNECTING,
+ CONNECT_TIMEOUT);
+
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal\n", __func__);
+ return -ERESTARTSYS;
+ }
+ if (result == 0) {
+ pr_debug("CAIF: %s(): connect timeout\n", __func__);
+ caif_disconnect_client(&priv->chnl);
+ priv->state = CAIF_DISCONNECTED;
+ pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ if (priv->state != CAIF_CONNECTED) {
+ pr_debug("CAIF: %s(): connect failed\n", __func__);
+ return -ECONNREFUSED;
+ }
+ pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
+ return 0;
+}
+
+static int chnl_net_stop(struct net_device *dev)
+{
+ struct chnl_net *priv;
+
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ priv->state = CAIF_DISCONNECTED;
+ caif_disconnect_client(&priv->chnl);
+ return 0;
+}
+
+static int chnl_net_init(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ strncpy(priv->name, dev->name, sizeof(priv->name));
+ return 0;
+}
+
+static void chnl_net_uninit(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ robust_list_del(&priv->list_field);
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = chnl_net_open,
+ .ndo_stop = chnl_net_stop,
+ .ndo_init = chnl_net_init,
+ .ndo_uninit = chnl_net_uninit,
+ .ndo_start_xmit = chnl_net_start_xmit,
+};
+
+static void ipcaif_net_setup(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ dev->netdev_ops = &netdev_ops;
+ dev->destructor = free_netdev;
+ dev->flags |= IFF_NOARP;
+ dev->flags |= IFF_POINTOPOINT;
+ dev->needed_headroom = CAIF_NEEDED_HEADROOM;
+ dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
+ dev->mtu = SIZE_MTU;
+ dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
+
+ priv = netdev_priv(dev);
+ priv->chnl.receive = chnl_recv_cb;
+ priv->chnl.ctrlcmd = chnl_flowctrl_cb;
+ priv->netdev = dev;
+ priv->conn_req.protocol = CAIFPROTO_DATAGRAM;
+ priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
+ priv->conn_req.priority = CAIF_PRIO_LOW;
+ /* Insert illegal value */
+ priv->conn_req.sockaddr.u.dgm.connection_id = -1;
+ priv->flowenabled = false;
+
+ ASSERT_RTNL();
+ init_waitqueue_head(&priv->netmgmt_wq);
+ list_add(&priv->list_field, &chnl_net_list);
+}
+
+
+static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct chnl_net *priv;
+ u8 loop;
+ priv = netdev_priv(dev);
+ NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id);
+ NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id);
+ loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
+ NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
+
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+
+}
+
+static void caif_netlink_parms(struct nlattr *data[],
+ struct caif_connect_request *conn_req)
+{
+ if (!data) {
+ pr_warning("CAIF: %s: no params data found\n", __func__);
+ return;
+ }
+ if (data[IFLA_CAIF_IPV4_CONNID])
+ conn_req->sockaddr.u.dgm.connection_id =
+ nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]);
+ if (data[IFLA_CAIF_IPV6_CONNID])
+ conn_req->sockaddr.u.dgm.connection_id =
+ nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]);
+ if (data[IFLA_CAIF_LOOPBACK]) {
+ if (nla_get_u8(data[IFLA_CAIF_LOOPBACK]))
+ conn_req->protocol = CAIFPROTO_DATAGRAM_LOOP;
+ else
+ conn_req->protocol = CAIFPROTO_DATAGRAM;
+ }
+}
+
+static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int ret;
+ struct chnl_net *caifdev;
+ ASSERT_RTNL();
+ caifdev = netdev_priv(dev);
+ caif_netlink_parms(data, &caifdev->conn_req);
+ dev_net_set(caifdev->netdev, src_net);
+
+ ret = register_netdevice(dev);
+ if (ret)
+ pr_warning("CAIF: %s(): device rtml registration failed\n",
+ __func__);
+ return ret;
+}
+
+static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct chnl_net *caifdev;
+ ASSERT_RTNL();
+ caifdev = netdev_priv(dev);
+ caif_netlink_parms(data, &caifdev->conn_req);
+ netdev_state_change(dev);
+ return 0;
+}
+
+static size_t ipcaif_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_CAIF_IPV4_CONNID */
+ nla_total_size(4) +
+ /* IFLA_CAIF_IPV6_CONNID */
+ nla_total_size(4) +
+ /* IFLA_CAIF_LOOPBACK */
+ nla_total_size(2) +
+ 0;
+}
+
+static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
+ [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 },
+ [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 },
+ [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 }
+};
+
+
+static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
+ .kind = "caif",
+ .priv_size = sizeof(struct chnl_net),
+ .setup = ipcaif_net_setup,
+ .maxtype = IFLA_CAIF_MAX,
+ .policy = ipcaif_policy,
+ .newlink = ipcaif_newlink,
+ .changelink = ipcaif_changelink,
+ .get_size = ipcaif_get_size,
+ .fill_info = ipcaif_fill_info,
+
+};
+
+static int __init chnl_init_module(void)
+{
+ return rtnl_link_register(&ipcaif_link_ops);
+}
+
+static void __exit chnl_exit_module(void)
+{
+ struct chnl_net *dev = NULL;
+ struct list_head *list_node;
+ struct list_head *_tmp;
+ rtnl_link_unregister(&ipcaif_link_ops);
+ rtnl_lock();
+ list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+ dev = list_entry(list_node, struct chnl_net, list_field);
+ list_del(list_node);
+ delete_device(dev);
+ }
+ rtnl_unlock();
+}
+
+module_init(chnl_init_module);
+module_exit(chnl_exit_module);
diff --git a/net/core/Makefile b/net/core/Makefile
index 08791ac3e05a..51c3eec850ef 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
-obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
+obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o
obj-$(CONFIG_XFRM) += flow.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 2dccd4ee591b..e0097531417a 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -86,7 +86,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
int error;
DEFINE_WAIT_FUNC(wait, receiver_wake_function);
- prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
@@ -115,7 +115,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
error = 0;
*timeo_p = schedule_timeout(*timeo_p);
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return error;
interrupted:
error = sock_intr_errno(*timeo_p);
@@ -229,9 +229,18 @@ EXPORT_SYMBOL(skb_free_datagram);
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
{
- lock_sock(sk);
- skb_free_datagram(sk, skb);
- release_sock(sk);
+ if (likely(atomic_read(&skb->users) == 1))
+ smp_rmb();
+ else if (likely(!atomic_dec_and_test(&skb->users)))
+ return;
+
+ lock_sock_bh(sk);
+ skb_orphan(skb);
+ sk_mem_reclaim_partial(sk);
+ unlock_sock_bh(sk);
+
+ /* skb is now orphaned, can be freed outside of locked section */
+ __kfree_skb(skb);
}
EXPORT_SYMBOL(skb_free_datagram_locked);
@@ -726,7 +735,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
diff --git a/net/core/dev.c b/net/core/dev.c
index f769098774b7..3daee30a7c82 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -130,6 +130,7 @@
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
+#include <linux/pci.h>
#include "net-sysfs.h"
@@ -207,6 +208,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
+static inline void rps_lock(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ spin_lock(&sd->input_pkt_queue.lock);
+#endif
+}
+
+static inline void rps_unlock(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ spin_unlock(&sd->input_pkt_queue.lock);
+#endif
+}
+
/* Device list insertion */
static int list_netdevice(struct net_device *dev)
{
@@ -249,7 +264,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
* queue in the local softnet handler.
*/
-DEFINE_PER_CPU(struct softnet_data, softnet_data);
+DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
EXPORT_PER_CPU_SYMBOL(softnet_data);
#ifdef CONFIG_LOCKDEP
@@ -773,14 +788,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
- struct net_device *dev;
+ struct net_device *dev, *ret = NULL;
- rtnl_lock();
- dev = __dev_getfirstbyhwtype(net, type);
- if (dev)
- dev_hold(dev);
- rtnl_unlock();
- return dev;
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev)
+ if (dev->type == type) {
+ dev_hold(dev);
+ ret = dev;
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(dev_getfirstbyhwtype);
@@ -1085,9 +1103,9 @@ void netdev_state_change(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_state_change);
-void netdev_bonding_change(struct net_device *dev, unsigned long event)
+int netdev_bonding_change(struct net_device *dev, unsigned long event)
{
- call_netdevice_notifiers(event, dev);
+ return call_netdevice_notifiers(event, dev);
}
EXPORT_SYMBOL(netdev_bonding_change);
@@ -1417,6 +1435,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{
+ ASSERT_RTNL();
return raw_notifier_call_chain(&netdev_chain, val, dev);
}
@@ -1451,7 +1470,7 @@ static inline void net_timestamp(struct sk_buff *skb)
*
* return values:
* NET_RX_SUCCESS (no congestion)
- * NET_RX_DROP (packet was dropped)
+ * NET_RX_DROP (packet was dropped, but freed)
*
* dev_forward_skb can be used for injecting an skb from the
* start_xmit function of one device into the receive queue
@@ -1465,12 +1484,11 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
skb_orphan(skb);
- if (!(dev->flags & IFF_UP))
- return NET_RX_DROP;
-
- if (skb->len > (dev->mtu + dev->hard_header_len))
+ if (!(dev->flags & IFF_UP) ||
+ (skb->len > (dev->mtu + dev->hard_header_len))) {
+ kfree_skb(skb);
return NET_RX_DROP;
-
+ }
skb_set_dev(skb, dev);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
@@ -1538,8 +1556,9 @@ static inline void __netif_reschedule(struct Qdisc *q)
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
- q->next_sched = sd->output_queue;
- sd->output_queue = q;
+ q->next_sched = NULL;
+ *sd->output_queue_tailp = q;
+ sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
@@ -1784,18 +1803,27 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
* 2. No high memory really exists on this machine.
*/
-static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_HIGHMEM
int i;
+ if (!(dev->features & NETIF_F_HIGHDMA)) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ if (PageHighMem(skb_shinfo(skb)->frags[i].page))
+ return 1;
+ }
- if (dev->features & NETIF_F_HIGHDMA)
- return 0;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- if (PageHighMem(skb_shinfo(skb)->frags[i].page))
- return 1;
+ if (PCI_DMA_BUS_IS_PHYS) {
+ struct device *pdev = dev->dev.parent;
+ if (!pdev)
+ return 0;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
+ if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
+ return 1;
+ }
+ }
#endif
return 0;
}
@@ -1853,6 +1881,17 @@ static int dev_gso_segment(struct sk_buff *skb)
return 0;
}
+/*
+ * Try to orphan skb early, right before transmission by the device.
+ * We cannot orphan skb if tx timestamp is requested, since
+ * drivers need to call skb_tstamp_tx() to send the timestamp.
+ */
+static inline void skb_orphan_try(struct sk_buff *skb)
+{
+ if (!skb_tx(skb)->flags)
+ skb_orphan(skb);
+}
+
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
@@ -1863,13 +1902,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (!list_empty(&ptype_all))
dev_queue_xmit_nit(skb, dev);
- if (netif_needs_gso(dev, skb)) {
- if (unlikely(dev_gso_segment(skb)))
- goto out_kfree_skb;
- if (skb->next)
- goto gso;
- }
-
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
@@ -1877,23 +1909,18 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
+ skb_orphan_try(skb);
+
+ if (netif_needs_gso(dev, skb)) {
+ if (unlikely(dev_gso_segment(skb)))
+ goto out_kfree_skb;
+ if (skb->next)
+ goto gso;
+ }
+
rc = ops->ndo_start_xmit(skb, dev);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
- /*
- * TODO: if skb_orphan() was called by
- * dev->hard_start_xmit() (for example, the unmodified
- * igb driver does that; bnx2 doesn't), then
- * skb_tx_software_timestamp() will be unable to send
- * back the time stamp.
- *
- * How can this be prevented? Always create another
- * reference to the socket before calling
- * dev->hard_start_xmit()? Prevent that skb_orphan()
- * does anything in dev->hard_start_xmit() by clearing
- * the skb destructor before the call and restoring it
- * afterwards, then doing the skb_orphan() ourselves?
- */
return rc;
}
@@ -1932,7 +1959,7 @@ out_kfree_skb:
return rc;
}
-static u32 skb_tx_hashrnd;
+static u32 hashrnd __read_mostly;
u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
{
@@ -1948,9 +1975,9 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
- hash = skb->protocol;
+ hash = (__force u16) skb->protocol;
- hash = jhash_1word(hash, skb_tx_hashrnd);
+ hash = jhash_1word(hash, hashrnd);
return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
}
@@ -1960,10 +1987,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
if (net_ratelimit()) {
- WARN(1, "%s selects TX queue %d, but "
- "real number of TX queues is %d\n",
- dev->name, queue_index,
- dev->real_num_tx_queues);
+ pr_warning("%s selects TX queue %d, but "
+ "real number of TX queues is %d\n",
+ dev->name, queue_index, dev->real_num_tx_queues);
}
return 0;
}
@@ -1990,7 +2016,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
queue_index = skb_tx_hash(dev, skb);
if (sk) {
- struct dst_entry *dst = rcu_dereference_bh(sk->sk_dst_cache);
+ struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
if (dst && skb_dst(skb) == dst)
sk_tx_queue_set(sk, queue_index);
@@ -2178,8 +2204,243 @@ int netdev_max_backlog __read_mostly = 1000;
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64; /* old backlog weight */
-DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
+/* Called with irq disabled */
+static inline void ____napi_schedule(struct softnet_data *sd,
+ struct napi_struct *napi)
+{
+ list_add_tail(&napi->poll_list, &sd->poll_list);
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+}
+
+#ifdef CONFIG_RPS
+
+/* One global table that all flow-based protocols share. */
+struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+EXPORT_SYMBOL(rps_sock_flow_table);
+
+/*
+ * get_rps_cpu is called from netif_receive_skb and returns the target
+ * CPU from the RPS map of the receiving queue for a given skb.
+ * rcu_read_lock must be held on entry.
+ */
+static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow **rflowp)
+{
+ struct ipv6hdr *ip6;
+ struct iphdr *ip;
+ struct netdev_rx_queue *rxqueue;
+ struct rps_map *map;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_sock_flow_table *sock_flow_table;
+ int cpu = -1;
+ u8 ip_proto;
+ u16 tcpu;
+ u32 addr1, addr2, ihl;
+ union {
+ u32 v32;
+ u16 v16[2];
+ } ports;
+
+ if (skb_rx_queue_recorded(skb)) {
+ u16 index = skb_get_rx_queue(skb);
+ if (unlikely(index >= dev->num_rx_queues)) {
+ if (net_ratelimit()) {
+ pr_warning("%s received packet on queue "
+ "%u, but number of RX queues is %u\n",
+ dev->name, index, dev->num_rx_queues);
+ }
+ goto done;
+ }
+ rxqueue = dev->_rx + index;
+ } else
+ rxqueue = dev->_rx;
+
+ if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
+ goto done;
+
+ if (skb->rxhash)
+ goto got_hash; /* Skip hash computation on packet header */
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ if (!pskb_may_pull(skb, sizeof(*ip)))
+ goto done;
+
+ ip = (struct iphdr *) skb->data;
+ ip_proto = ip->protocol;
+ addr1 = (__force u32) ip->saddr;
+ addr2 = (__force u32) ip->daddr;
+ ihl = ip->ihl;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ if (!pskb_may_pull(skb, sizeof(*ip6)))
+ goto done;
+
+ ip6 = (struct ipv6hdr *) skb->data;
+ ip_proto = ip6->nexthdr;
+ addr1 = (__force u32) ip6->saddr.s6_addr32[3];
+ addr2 = (__force u32) ip6->daddr.s6_addr32[3];
+ ihl = (40 >> 2);
+ break;
+ default:
+ goto done;
+ }
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP:
+ case IPPROTO_AH:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ if (pskb_may_pull(skb, (ihl * 4) + 4)) {
+ ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
+ if (ports.v16[1] < ports.v16[0])
+ swap(ports.v16[0], ports.v16[1]);
+ break;
+ }
+ default:
+ ports.v32 = 0;
+ break;
+ }
+
+ /* get a consistent hash (same value on both flow directions) */
+ if (addr2 < addr1)
+ swap(addr1, addr2);
+ skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+ if (!skb->rxhash)
+ skb->rxhash = 1;
+
+got_hash:
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ if (flow_table && sock_flow_table) {
+ u16 next_cpu;
+ struct rps_dev_flow *rflow;
+
+ rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
+ tcpu = rflow->cpu;
+
+ next_cpu = sock_flow_table->ents[skb->rxhash &
+ sock_flow_table->mask];
+
+ /*
+ * If the desired CPU (where last recvmsg was done) is
+ * different from current CPU (one in the rx-queue flow
+ * table entry), switch if one of the following holds:
+ * - Current CPU is unset (equal to RPS_NO_CPU).
+ * - Current CPU is offline.
+ * - The current CPU's queue tail has advanced beyond the
+ * last packet that was enqueued using this table entry.
+ * This guarantees that all previous packets for the flow
+ * have been dequeued, thus preserving in order delivery.
+ */
+ if (unlikely(tcpu != next_cpu) &&
+ (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
+ ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
+ rflow->last_qtail)) >= 0)) {
+ tcpu = rflow->cpu = next_cpu;
+ if (tcpu != RPS_NO_CPU)
+ rflow->last_qtail = per_cpu(softnet_data,
+ tcpu).input_queue_head;
+ }
+ if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
+ *rflowp = rflow;
+ cpu = tcpu;
+ goto done;
+ }
+ }
+
+ map = rcu_dereference(rxqueue->rps_map);
+ if (map) {
+ tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
+
+ if (cpu_online(tcpu)) {
+ cpu = tcpu;
+ goto done;
+ }
+ }
+
+done:
+ return cpu;
+}
+
+/* Called from hardirq (IPI) context */
+static void rps_trigger_softirq(void *data)
+{
+ struct softnet_data *sd = data;
+
+ ____napi_schedule(sd, &sd->backlog);
+ sd->received_rps++;
+}
+
+#endif /* CONFIG_RPS */
+/*
+ * Check if this softnet_data structure is another cpu one
+ * If yes, queue it to our IPI list and return 1
+ * If no, return 0
+ */
+static int rps_ipi_queued(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ struct softnet_data *mysd = &__get_cpu_var(softnet_data);
+
+ if (sd != mysd) {
+ sd->rps_ipi_next = mysd->rps_ipi_list;
+ mysd->rps_ipi_list = sd;
+
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ return 1;
+ }
+#endif /* CONFIG_RPS */
+ return 0;
+}
+
+/*
+ * enqueue_to_backlog is called to queue an skb to a per CPU backlog
+ * queue (may be a remote CPU queue).
+ */
+static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ unsigned int *qtail)
+{
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ sd = &per_cpu(softnet_data, cpu);
+
+ local_irq_save(flags);
+
+ rps_lock(sd);
+ if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
+ if (skb_queue_len(&sd->input_pkt_queue)) {
+enqueue:
+ __skb_queue_tail(&sd->input_pkt_queue, skb);
+#ifdef CONFIG_RPS
+ *qtail = sd->input_queue_head +
+ skb_queue_len(&sd->input_pkt_queue);
+#endif
+ rps_unlock(sd);
+ local_irq_restore(flags);
+ return NET_RX_SUCCESS;
+ }
+
+ /* Schedule NAPI for backlog device */
+ if (napi_schedule_prep(&sd->backlog)) {
+ if (!rps_ipi_queued(sd))
+ ____napi_schedule(sd, &sd->backlog);
+ }
+ goto enqueue;
+ }
+
+ sd->dropped++;
+ rps_unlock(sd);
+
+ local_irq_restore(flags);
+
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
/**
* netif_rx - post buffer to the network code
@@ -2198,8 +2459,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
int netif_rx(struct sk_buff *skb)
{
- struct softnet_data *queue;
- unsigned long flags;
+ int ret;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
@@ -2208,31 +2468,29 @@ int netif_rx(struct sk_buff *skb)
if (!skb->tstamp.tv64)
net_timestamp(skb);
- /*
- * The code is rearranged so that the path is the most
- * short when CPU is congested, but is still operating.
- */
- local_irq_save(flags);
- queue = &__get_cpu_var(softnet_data);
+#ifdef CONFIG_RPS
+ {
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu;
- __get_cpu_var(netdev_rx_stat).total++;
- if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
- if (queue->input_pkt_queue.qlen) {
-enqueue:
- __skb_queue_tail(&queue->input_pkt_queue, skb);
- local_irq_restore(flags);
- return NET_RX_SUCCESS;
- }
+ rcu_read_lock();
- napi_schedule(&queue->backlog);
- goto enqueue;
- }
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ if (cpu < 0)
+ cpu = smp_processor_id();
- __get_cpu_var(netdev_rx_stat).dropped++;
- local_irq_restore(flags);
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
- kfree_skb(skb);
- return NET_RX_DROP;
+ rcu_read_unlock();
+ }
+#else
+ {
+ unsigned int qtail;
+ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+ put_cpu();
+ }
+#endif
+ return ret;
}
EXPORT_SYMBOL(netif_rx);
@@ -2277,6 +2535,7 @@ static void net_tx_action(struct softirq_action *h)
local_irq_disable();
head = sd->output_queue;
sd->output_queue = NULL;
+ sd->output_queue_tailp = &sd->output_queue;
local_irq_enable();
while (head) {
@@ -2469,22 +2728,56 @@ void netif_nit_deliver(struct sk_buff *skb)
rcu_read_unlock();
}
-/**
- * netif_receive_skb - process receive buffer from network
- * @skb: buffer to process
- *
- * netif_receive_skb() is the main receive data processing function.
- * It always succeeds. The buffer may be dropped during processing
- * for congestion control or by the protocol layers.
- *
- * This function may only be called from softirq context and interrupts
- * should be enabled.
- *
- * Return values (usually ignored):
- * NET_RX_SUCCESS: no congestion
- * NET_RX_DROP: packet was dropped
+static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
+ struct net_device *master)
+{
+ if (skb->pkt_type == PACKET_HOST) {
+ u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
+
+ memcpy(dest, master->dev_addr, ETH_ALEN);
+ }
+}
+
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
+ * ARP on active-backup slaves with arp_validate enabled.
*/
-int netif_receive_skb(struct sk_buff *skb)
+int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
+{
+ struct net_device *dev = skb->dev;
+
+ if (master->priv_flags & IFF_MASTER_ARPMON)
+ dev->last_rx = jiffies;
+
+ if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+ /* Do address unmangle. The local destination address
+ * will be always the one master has. Provides the right
+ * functionality in a bridge.
+ */
+ skb_bond_set_mac_by_master(skb, master);
+ }
+
+ if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
+ if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
+ skb->protocol == __cpu_to_be16(ETH_P_ARP))
+ return 0;
+
+ if (master->priv_flags & IFF_MASTER_ALB) {
+ if (skb->pkt_type != PACKET_BROADCAST &&
+ skb->pkt_type != PACKET_MULTICAST)
+ return 0;
+ }
+ if (master->priv_flags & IFF_MASTER_8023AD &&
+ skb->protocol == __cpu_to_be16(ETH_P_SLOW))
+ return 0;
+
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(__skb_bond_should_drop);
+
+static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
struct net_device *orig_dev;
@@ -2517,7 +2810,7 @@ int netif_receive_skb(struct sk_buff *skb)
skb->dev = master;
}
- __get_cpu_var(netdev_rx_stat).total++;
+ __get_cpu_var(softnet_data).processed++;
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -2595,20 +2888,72 @@ out:
rcu_read_unlock();
return ret;
}
+
+/**
+ * netif_receive_skb - process receive buffer from network
+ * @skb: buffer to process
+ *
+ * netif_receive_skb() is the main receive data processing function.
+ * It always succeeds. The buffer may be dropped during processing
+ * for congestion control or by the protocol layers.
+ *
+ * This function may only be called from softirq context and interrupts
+ * should be enabled.
+ *
+ * Return values (usually ignored):
+ * NET_RX_SUCCESS: no congestion
+ * NET_RX_DROP: packet was dropped
+ */
+int netif_receive_skb(struct sk_buff *skb)
+{
+#ifdef CONFIG_RPS
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu, ret;
+
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+
+ if (cpu >= 0) {
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+ rcu_read_unlock();
+ } else {
+ rcu_read_unlock();
+ ret = __netif_receive_skb(skb);
+ }
+
+ return ret;
+#else
+ return __netif_receive_skb(skb);
+#endif
+}
EXPORT_SYMBOL(netif_receive_skb);
-/* Network device is going away, flush any packets still pending */
+/* Network device is going away, flush any packets still pending
+ * Called with irqs disabled.
+ */
static void flush_backlog(void *arg)
{
struct net_device *dev = arg;
- struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
struct sk_buff *skb, *tmp;
- skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+ rps_lock(sd);
+ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
- __skb_unlink(skb, &queue->input_pkt_queue);
+ __skb_unlink(skb, &sd->input_pkt_queue);
kfree_skb(skb);
+ input_queue_head_add(sd, 1);
}
+ }
+ rps_unlock(sd);
+
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->process_queue);
+ kfree_skb(skb);
+ }
+ }
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -2911,27 +3256,85 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_gro_frags);
+/*
+ * net_rps_action sends any pending IPI's for rps.
+ * Note: called with local irq disabled, but exits with local irq enabled.
+ */
+static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ struct softnet_data *remsd = sd->rps_ipi_list;
+
+ if (remsd) {
+ sd->rps_ipi_list = NULL;
+
+ local_irq_enable();
+
+ /* Send pending IPI's to kick RPS processing on remote cpus. */
+ while (remsd) {
+ struct softnet_data *next = remsd->rps_ipi_next;
+
+ if (cpu_online(remsd->cpu))
+ __smp_call_function_single(remsd->cpu,
+ &remsd->csd, 0);
+ remsd = next;
+ }
+ } else
+#endif
+ local_irq_enable();
+}
+
static int process_backlog(struct napi_struct *napi, int quota)
{
int work = 0;
- struct softnet_data *queue = &__get_cpu_var(softnet_data);
- unsigned long start_time = jiffies;
+ struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
+#ifdef CONFIG_RPS
+ /* Check if we have pending ipi, its better to send them now,
+ * not waiting net_rx_action() end.
+ */
+ if (sd->rps_ipi_list) {
+ local_irq_disable();
+ net_rps_action_and_irq_enable(sd);
+ }
+#endif
napi->weight = weight_p;
- do {
+ local_irq_disable();
+ while (work < quota) {
struct sk_buff *skb;
+ unsigned int qlen;
- local_irq_disable();
- skb = __skb_dequeue(&queue->input_pkt_queue);
- if (!skb) {
- __napi_complete(napi);
+ while ((skb = __skb_dequeue(&sd->process_queue))) {
local_irq_enable();
- break;
+ __netif_receive_skb(skb);
+ if (++work >= quota)
+ return work;
+ local_irq_disable();
}
- local_irq_enable();
- netif_receive_skb(skb);
- } while (++work < quota && jiffies == start_time);
+ rps_lock(sd);
+ qlen = skb_queue_len(&sd->input_pkt_queue);
+ if (qlen) {
+ input_queue_head_add(sd, qlen);
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ &sd->process_queue);
+ }
+ if (qlen < quota - work) {
+ /*
+ * Inline a custom version of __napi_complete().
+ * only current cpu owns and manipulates this napi,
+ * and NAPI_STATE_SCHED is the only possible flag set on backlog.
+ * we can use a plain write instead of clear_bit(),
+ * and we dont need an smp_mb() memory barrier.
+ */
+ list_del(&napi->poll_list);
+ napi->state = 0;
+
+ quota = work + qlen;
+ }
+ rps_unlock(sd);
+ }
+ local_irq_enable();
return work;
}
@@ -2947,8 +3350,7 @@ void __napi_schedule(struct napi_struct *n)
unsigned long flags;
local_irq_save(flags);
- list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ ____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__napi_schedule);
@@ -3019,17 +3421,16 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
-
static void net_rx_action(struct softirq_action *h)
{
- struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
local_irq_disable();
- while (!list_empty(list)) {
+ while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
@@ -3047,7 +3448,7 @@ static void net_rx_action(struct softirq_action *h)
* entries to the tail of this list, and only ->poll()
* calls can remove this head entry from the list.
*/
- n = list_first_entry(list, struct napi_struct, poll_list);
+ n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
have = netpoll_poll_lock(n);
@@ -3082,13 +3483,13 @@ static void net_rx_action(struct softirq_action *h)
napi_complete(n);
local_irq_disable();
} else
- list_move_tail(&n->poll_list, list);
+ list_move_tail(&n->poll_list, &sd->poll_list);
}
netpoll_poll_unlock(have);
}
out:
- local_irq_enable();
+ net_rps_action_and_irq_enable(sd);
#ifdef CONFIG_NET_DMA
/*
@@ -3101,7 +3502,7 @@ out:
return;
softnet_break:
- __get_cpu_var(netdev_rx_stat).time_squeeze++;
+ sd->time_squeeze++;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
@@ -3302,17 +3703,17 @@ static int dev_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct netif_rx_stats *softnet_get_online(loff_t *pos)
+static struct softnet_data *softnet_get_online(loff_t *pos)
{
- struct netif_rx_stats *rc = NULL;
+ struct softnet_data *sd = NULL;
while (*pos < nr_cpu_ids)
if (cpu_online(*pos)) {
- rc = &per_cpu(netdev_rx_stat, *pos);
+ sd = &per_cpu(softnet_data, *pos);
break;
} else
++*pos;
- return rc;
+ return sd;
}
static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3332,12 +3733,12 @@ static void softnet_seq_stop(struct seq_file *seq, void *v)
static int softnet_seq_show(struct seq_file *seq, void *v)
{
- struct netif_rx_stats *s = v;
+ struct softnet_data *sd = v;
- seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- s->total, s->dropped, s->time_squeeze, 0,
+ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
- s->cpu_collision);
+ sd->cpu_collision, sd->received_rps);
return 0;
}
@@ -3560,11 +3961,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
slave->master = master;
- synchronize_net();
-
- if (old)
+ if (old) {
+ synchronize_net();
dev_put(old);
-
+ }
if (master)
slave->flags |= IFF_SLAVE;
else
@@ -3741,562 +4141,6 @@ void dev_set_rx_mode(struct net_device *dev)
netif_addr_unlock_bh(dev);
}
-/* hw addresses list handling functions */
-
-static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
- int addr_len, unsigned char addr_type)
-{
- struct netdev_hw_addr *ha;
- int alloc_size;
-
- if (addr_len > MAX_ADDR_LEN)
- return -EINVAL;
-
- list_for_each_entry(ha, &list->list, list) {
- if (!memcmp(ha->addr, addr, addr_len) &&
- ha->type == addr_type) {
- ha->refcount++;
- return 0;
- }
- }
-
-
- alloc_size = sizeof(*ha);
- if (alloc_size < L1_CACHE_BYTES)
- alloc_size = L1_CACHE_BYTES;
- ha = kmalloc(alloc_size, GFP_ATOMIC);
- if (!ha)
- return -ENOMEM;
- memcpy(ha->addr, addr, addr_len);
- ha->type = addr_type;
- ha->refcount = 1;
- ha->synced = false;
- list_add_tail_rcu(&ha->list, &list->list);
- list->count++;
- return 0;
-}
-
-static void ha_rcu_free(struct rcu_head *head)
-{
- struct netdev_hw_addr *ha;
-
- ha = container_of(head, struct netdev_hw_addr, rcu_head);
- kfree(ha);
-}
-
-static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
- int addr_len, unsigned char addr_type)
-{
- struct netdev_hw_addr *ha;
-
- list_for_each_entry(ha, &list->list, list) {
- if (!memcmp(ha->addr, addr, addr_len) &&
- (ha->type == addr_type || !addr_type)) {
- if (--ha->refcount)
- return 0;
- list_del_rcu(&ha->list);
- call_rcu(&ha->rcu_head, ha_rcu_free);
- list->count--;
- return 0;
- }
- }
- return -ENOENT;
-}
-
-static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len,
- unsigned char addr_type)
-{
- int err;
- struct netdev_hw_addr *ha, *ha2;
- unsigned char type;
-
- list_for_each_entry(ha, &from_list->list, list) {
- type = addr_type ? addr_type : ha->type;
- err = __hw_addr_add(to_list, ha->addr, addr_len, type);
- if (err)
- goto unroll;
- }
- return 0;
-
-unroll:
- list_for_each_entry(ha2, &from_list->list, list) {
- if (ha2 == ha)
- break;
- type = addr_type ? addr_type : ha2->type;
- __hw_addr_del(to_list, ha2->addr, addr_len, type);
- }
- return err;
-}
-
-static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len,
- unsigned char addr_type)
-{
- struct netdev_hw_addr *ha;
- unsigned char type;
-
- list_for_each_entry(ha, &from_list->list, list) {
- type = addr_type ? addr_type : ha->type;
- __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
- }
-}
-
-static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len)
-{
- int err = 0;
- struct netdev_hw_addr *ha, *tmp;
-
- list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
- if (!ha->synced) {
- err = __hw_addr_add(to_list, ha->addr,
- addr_len, ha->type);
- if (err)
- break;
- ha->synced = true;
- ha->refcount++;
- } else if (ha->refcount == 1) {
- __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
- __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
- }
- }
- return err;
-}
-
-static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len)
-{
- struct netdev_hw_addr *ha, *tmp;
-
- list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
- if (ha->synced) {
- __hw_addr_del(to_list, ha->addr,
- addr_len, ha->type);
- ha->synced = false;
- __hw_addr_del(from_list, ha->addr,
- addr_len, ha->type);
- }
- }
-}
-
-static void __hw_addr_flush(struct netdev_hw_addr_list *list)
-{
- struct netdev_hw_addr *ha, *tmp;
-
- list_for_each_entry_safe(ha, tmp, &list->list, list) {
- list_del_rcu(&ha->list);
- call_rcu(&ha->rcu_head, ha_rcu_free);
- }
- list->count = 0;
-}
-
-static void __hw_addr_init(struct netdev_hw_addr_list *list)
-{
- INIT_LIST_HEAD(&list->list);
- list->count = 0;
-}
-
-/* Device addresses handling functions */
-
-static void dev_addr_flush(struct net_device *dev)
-{
- /* rtnl_mutex must be held here */
-
- __hw_addr_flush(&dev->dev_addrs);
- dev->dev_addr = NULL;
-}
-
-static int dev_addr_init(struct net_device *dev)
-{
- unsigned char addr[MAX_ADDR_LEN];
- struct netdev_hw_addr *ha;
- int err;
-
- /* rtnl_mutex must be held here */
-
- __hw_addr_init(&dev->dev_addrs);
- memset(addr, 0, sizeof(addr));
- err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
- NETDEV_HW_ADDR_T_LAN);
- if (!err) {
- /*
- * Get the first (previously created) address from the list
- * and set dev_addr pointer to this location.
- */
- ha = list_first_entry(&dev->dev_addrs.list,
- struct netdev_hw_addr, list);
- dev->dev_addr = ha->addr;
- }
- return err;
-}
-
-/**
- * dev_addr_add - Add a device address
- * @dev: device
- * @addr: address to add
- * @addr_type: address type
- *
- * Add a device address to the device or increase the reference count if
- * it already exists.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_add(struct net_device *dev, unsigned char *addr,
- unsigned char addr_type)
-{
- int err;
-
- ASSERT_RTNL();
-
- err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
- return err;
-}
-EXPORT_SYMBOL(dev_addr_add);
-
-/**
- * dev_addr_del - Release a device address.
- * @dev: device
- * @addr: address to delete
- * @addr_type: address type
- *
- * Release reference to a device address and remove it from the device
- * if the reference count drops to zero.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_del(struct net_device *dev, unsigned char *addr,
- unsigned char addr_type)
-{
- int err;
- struct netdev_hw_addr *ha;
-
- ASSERT_RTNL();
-
- /*
- * We can not remove the first address from the list because
- * dev->dev_addr points to that.
- */
- ha = list_first_entry(&dev->dev_addrs.list,
- struct netdev_hw_addr, list);
- if (ha->addr == dev->dev_addr && ha->refcount == 1)
- return -ENOENT;
-
- err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
- addr_type);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
- return err;
-}
-EXPORT_SYMBOL(dev_addr_del);
-
-/**
- * dev_addr_add_multiple - Add device addresses from another device
- * @to_dev: device to which addresses will be added
- * @from_dev: device from which addresses will be added
- * @addr_type: address type - 0 means type will be used from from_dev
- *
- * Add device addresses of the one device to another.
- **
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_add_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type)
-{
- int err;
-
- ASSERT_RTNL();
-
- if (from_dev->addr_len != to_dev->addr_len)
- return -EINVAL;
- err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
- to_dev->addr_len, addr_type);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
- return err;
-}
-EXPORT_SYMBOL(dev_addr_add_multiple);
-
-/**
- * dev_addr_del_multiple - Delete device addresses by another device
- * @to_dev: device where the addresses will be deleted
- * @from_dev: device by which addresses the addresses will be deleted
- * @addr_type: address type - 0 means type will used from from_dev
- *
- * Deletes addresses in to device by the list of addresses in from device.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_del_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type)
-{
- ASSERT_RTNL();
-
- if (from_dev->addr_len != to_dev->addr_len)
- return -EINVAL;
- __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
- to_dev->addr_len, addr_type);
- call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
- return 0;
-}
-EXPORT_SYMBOL(dev_addr_del_multiple);
-
-/* multicast addresses handling functions */
-
-int __dev_addr_delete(struct dev_addr_list **list, int *count,
- void *addr, int alen, int glbl)
-{
- struct dev_addr_list *da;
-
- for (; (da = *list) != NULL; list = &da->next) {
- if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
- alen == da->da_addrlen) {
- if (glbl) {
- int old_glbl = da->da_gusers;
- da->da_gusers = 0;
- if (old_glbl == 0)
- break;
- }
- if (--da->da_users)
- return 0;
-
- *list = da->next;
- kfree(da);
- (*count)--;
- return 0;
- }
- }
- return -ENOENT;
-}
-
-int __dev_addr_add(struct dev_addr_list **list, int *count,
- void *addr, int alen, int glbl)
-{
- struct dev_addr_list *da;
-
- for (da = *list; da != NULL; da = da->next) {
- if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
- da->da_addrlen == alen) {
- if (glbl) {
- int old_glbl = da->da_gusers;
- da->da_gusers = 1;
- if (old_glbl)
- return 0;
- }
- da->da_users++;
- return 0;
- }
- }
-
- da = kzalloc(sizeof(*da), GFP_ATOMIC);
- if (da == NULL)
- return -ENOMEM;
- memcpy(da->da_addr, addr, alen);
- da->da_addrlen = alen;
- da->da_users = 1;
- da->da_gusers = glbl ? 1 : 0;
- da->next = *list;
- *list = da;
- (*count)++;
- return 0;
-}
-
-/**
- * dev_unicast_delete - Release secondary unicast address.
- * @dev: device
- * @addr: address to delete
- *
- * Release reference to a secondary unicast address and remove it
- * from the device if the reference count drops to zero.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_unicast_delete(struct net_device *dev, void *addr)
-{
- int err;
-
- ASSERT_RTNL();
-
- netif_addr_lock_bh(dev);
- err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
- NETDEV_HW_ADDR_T_UNICAST);
- if (!err)
- __dev_set_rx_mode(dev);
- netif_addr_unlock_bh(dev);
- return err;
-}
-EXPORT_SYMBOL(dev_unicast_delete);
-
-/**
- * dev_unicast_add - add a secondary unicast address
- * @dev: device
- * @addr: address to add
- *
- * Add a secondary unicast address to the device or increase
- * the reference count if it already exists.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_unicast_add(struct net_device *dev, void *addr)
-{
- int err;
-
- ASSERT_RTNL();
-
- netif_addr_lock_bh(dev);
- err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
- NETDEV_HW_ADDR_T_UNICAST);
- if (!err)
- __dev_set_rx_mode(dev);
- netif_addr_unlock_bh(dev);
- return err;
-}
-EXPORT_SYMBOL(dev_unicast_add);
-
-int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
- struct dev_addr_list **from, int *from_count)
-{
- struct dev_addr_list *da, *next;
- int err = 0;
-
- da = *from;
- while (da != NULL) {
- next = da->next;
- if (!da->da_synced) {
- err = __dev_addr_add(to, to_count,
- da->da_addr, da->da_addrlen, 0);
- if (err < 0)
- break;
- da->da_synced = 1;
- da->da_users++;
- } else if (da->da_users == 1) {
- __dev_addr_delete(to, to_count,
- da->da_addr, da->da_addrlen, 0);
- __dev_addr_delete(from, from_count,
- da->da_addr, da->da_addrlen, 0);
- }
- da = next;
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(__dev_addr_sync);
-
-void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
- struct dev_addr_list **from, int *from_count)
-{
- struct dev_addr_list *da, *next;
-
- da = *from;
- while (da != NULL) {
- next = da->next;
- if (da->da_synced) {
- __dev_addr_delete(to, to_count,
- da->da_addr, da->da_addrlen, 0);
- da->da_synced = 0;
- __dev_addr_delete(from, from_count,
- da->da_addr, da->da_addrlen, 0);
- }
- da = next;
- }
-}
-EXPORT_SYMBOL_GPL(__dev_addr_unsync);
-
-/**
- * dev_unicast_sync - Synchronize device's unicast list to another device
- * @to: destination device
- * @from: source device
- *
- * Add newly added addresses to the destination device and release
- * addresses that have no users left. The source device must be
- * locked by netif_tx_lock_bh.
- *
- * This function is intended to be called from the dev->set_rx_mode
- * function of layered software devices.
- */
-int dev_unicast_sync(struct net_device *to, struct net_device *from)
-{
- int err = 0;
-
- if (to->addr_len != from->addr_len)
- return -EINVAL;
-
- netif_addr_lock_bh(to);
- err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
- if (!err)
- __dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
- return err;
-}
-EXPORT_SYMBOL(dev_unicast_sync);
-
-/**
- * dev_unicast_unsync - Remove synchronized addresses from the destination device
- * @to: destination device
- * @from: source device
- *
- * Remove all addresses that were added to the destination device by
- * dev_unicast_sync(). This function is intended to be called from the
- * dev->stop function of layered software devices.
- */
-void dev_unicast_unsync(struct net_device *to, struct net_device *from)
-{
- if (to->addr_len != from->addr_len)
- return;
-
- netif_addr_lock_bh(from);
- netif_addr_lock(to);
- __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
- __dev_set_rx_mode(to);
- netif_addr_unlock(to);
- netif_addr_unlock_bh(from);
-}
-EXPORT_SYMBOL(dev_unicast_unsync);
-
-static void dev_unicast_flush(struct net_device *dev)
-{
- netif_addr_lock_bh(dev);
- __hw_addr_flush(&dev->uc);
- netif_addr_unlock_bh(dev);
-}
-
-static void dev_unicast_init(struct net_device *dev)
-{
- __hw_addr_init(&dev->uc);
-}
-
-
-static void __dev_addr_discard(struct dev_addr_list **list)
-{
- struct dev_addr_list *tmp;
-
- while (*list != NULL) {
- tmp = *list;
- *list = tmp->next;
- if (tmp->da_users > tmp->da_gusers)
- printk("__dev_addr_discard: address leakage! "
- "da_users=%d\n", tmp->da_users);
- kfree(tmp);
- }
-}
-
-static void dev_addr_discard(struct net_device *dev)
-{
- netif_addr_lock_bh(dev);
-
- __dev_addr_discard(&dev->mc_list);
- netdev_mc_count(dev) = 0;
-
- netif_addr_unlock_bh(dev);
-}
-
/**
* dev_get_flags - get flags reported to userspace
* @dev: device
@@ -4607,8 +4451,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
- return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
- dev->addr_len, 1);
+ return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCDELMULTI:
if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
@@ -4616,8 +4459,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
- return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
- dev->addr_len, 1);
+ return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCSIFTXQLEN:
if (ifr->ifr_qlen < 0)
@@ -4924,8 +4766,8 @@ static void rollback_registered_many(struct list_head *head)
/*
* Flush the unicast and multicast chains
*/
- dev_unicast_flush(dev);
- dev_addr_discard(dev);
+ dev_uc_flush(dev);
+ dev_mc_flush(dev);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
@@ -5074,6 +4916,24 @@ int register_netdevice(struct net_device *dev)
dev->iflink = -1;
+#ifdef CONFIG_RPS
+ if (!dev->num_rx_queues) {
+ /*
+ * Allocate a single RX queue if driver never called
+ * alloc_netdev_mq
+ */
+
+ dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
+ if (!dev->_rx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev->_rx->first = dev->_rx;
+ atomic_set(&dev->_rx->count, 1);
+ dev->num_rx_queues = 1;
+ }
+#endif
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
@@ -5434,6 +5294,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
struct net_device *dev;
size_t alloc_size;
struct net_device *p;
+#ifdef CONFIG_RPS
+ struct netdev_rx_queue *rx;
+ int i;
+#endif
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -5459,13 +5323,32 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
goto free_p;
}
+#ifdef CONFIG_RPS
+ rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
+ if (!rx) {
+ printk(KERN_ERR "alloc_netdev: Unable to allocate "
+ "rx queues.\n");
+ goto free_tx;
+ }
+
+ atomic_set(&rx->count, queue_count);
+
+ /*
+ * Set a pointer to first element in the array which holds the
+ * reference count.
+ */
+ for (i = 0; i < queue_count; i++)
+ rx[i].first = rx;
+#endif
+
dev = PTR_ALIGN(p, NETDEV_ALIGN);
dev->padded = (char *)dev - (char *)p;
if (dev_addr_init(dev))
- goto free_tx;
+ goto free_rx;
- dev_unicast_init(dev);
+ dev_mc_init(dev);
+ dev_uc_init(dev);
dev_net_set(dev, &init_net);
@@ -5473,6 +5356,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev->num_tx_queues = queue_count;
dev->real_num_tx_queues = queue_count;
+#ifdef CONFIG_RPS
+ dev->_rx = rx;
+ dev->num_rx_queues = queue_count;
+#endif
+
dev->gso_max_size = GSO_MAX_SIZE;
netdev_init_queues(dev);
@@ -5487,9 +5375,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
strcpy(dev->name, name);
return dev;
+free_rx:
+#ifdef CONFIG_RPS
+ kfree(rx);
free_tx:
+#endif
kfree(tx);
-
free_p:
kfree(p);
return NULL;
@@ -5691,8 +5582,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/*
* Flush the unicast and multicast chains
*/
- dev_unicast_flush(dev);
- dev_addr_discard(dev);
+ dev_uc_flush(dev);
+ dev_mc_flush(dev);
netdev_unregister_kobject(dev);
@@ -5735,7 +5626,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
void *ocpu)
{
struct sk_buff **list_skb;
- struct Qdisc **list_net;
struct sk_buff *skb;
unsigned int cpu, oldcpu = (unsigned long)ocpu;
struct softnet_data *sd, *oldsd;
@@ -5756,19 +5646,23 @@ static int dev_cpu_callback(struct notifier_block *nfb,
*list_skb = oldsd->completion_queue;
oldsd->completion_queue = NULL;
- /* Find end of our output_queue. */
- list_net = &sd->output_queue;
- while (*list_net)
- list_net = &(*list_net)->next_sched;
/* Append output queue from offline CPU. */
- *list_net = oldsd->output_queue;
- oldsd->output_queue = NULL;
+ if (oldsd->output_queue) {
+ *sd->output_queue_tailp = oldsd->output_queue;
+ sd->output_queue_tailp = oldsd->output_queue_tailp;
+ oldsd->output_queue = NULL;
+ oldsd->output_queue_tailp = &oldsd->output_queue;
+ }
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
/* Process offline CPU's input_pkt_queue */
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ netif_rx(skb);
+ input_queue_head_add(oldsd, 1);
+ }
+ while ((skb = __skb_dequeue(&oldsd->process_queue)))
netif_rx(skb);
return NOTIFY_OK;
@@ -5985,17 +5879,26 @@ static int __init net_dev_init(void)
*/
for_each_possible_cpu(i) {
- struct softnet_data *queue;
+ struct softnet_data *sd = &per_cpu(softnet_data, i);
- queue = &per_cpu(softnet_data, i);
- skb_queue_head_init(&queue->input_pkt_queue);
- queue->completion_queue = NULL;
- INIT_LIST_HEAD(&queue->poll_list);
+ memset(sd, 0, sizeof(*sd));
+ skb_queue_head_init(&sd->input_pkt_queue);
+ skb_queue_head_init(&sd->process_queue);
+ sd->completion_queue = NULL;
+ INIT_LIST_HEAD(&sd->poll_list);
+ sd->output_queue = NULL;
+ sd->output_queue_tailp = &sd->output_queue;
+#ifdef CONFIG_RPS
+ sd->csd.func = rps_trigger_softirq;
+ sd->csd.info = sd;
+ sd->csd.flags = 0;
+ sd->cpu = i;
+#endif
- queue->backlog.poll = process_backlog;
- queue->backlog.weight = weight_p;
- queue->backlog.gro_list = NULL;
- queue->backlog.gro_count = 0;
+ sd->backlog.poll = process_backlog;
+ sd->backlog.weight = weight_p;
+ sd->backlog.gro_list = NULL;
+ sd->backlog.gro_count = 0;
}
dev_boot_phase = 0;
@@ -6030,7 +5933,7 @@ subsys_initcall(net_dev_init);
static int __init initialize_hashrnd(void)
{
- get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
+ get_random_bytes(&hashrnd, sizeof(hashrnd));
return 0;
}
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
new file mode 100644
index 000000000000..508f9c18992f
--- /dev/null
+++ b/net/core/dev_addr_lists.c
@@ -0,0 +1,741 @@
+/*
+ * net/core/dev_addr_lists.c - Functions for handling net device lists
+ * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This file contains functions for working with unicast, multicast and device
+ * addresses lists.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+
+/*
+ * General list handling functions
+ */
+
+static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+ unsigned char *addr, int addr_len,
+ unsigned char addr_type, bool global)
+{
+ struct netdev_hw_addr *ha;
+ int alloc_size;
+
+ if (addr_len > MAX_ADDR_LEN)
+ return -EINVAL;
+
+ list_for_each_entry(ha, &list->list, list) {
+ if (!memcmp(ha->addr, addr, addr_len) &&
+ ha->type == addr_type) {
+ if (global) {
+ /* check if addr is already used as global */
+ if (ha->global_use)
+ return 0;
+ else
+ ha->global_use = true;
+ }
+ ha->refcount++;
+ return 0;
+ }
+ }
+
+
+ alloc_size = sizeof(*ha);
+ if (alloc_size < L1_CACHE_BYTES)
+ alloc_size = L1_CACHE_BYTES;
+ ha = kmalloc(alloc_size, GFP_ATOMIC);
+ if (!ha)
+ return -ENOMEM;
+ memcpy(ha->addr, addr, addr_len);
+ ha->type = addr_type;
+ ha->refcount = 1;
+ ha->global_use = global;
+ ha->synced = false;
+ list_add_tail_rcu(&ha->list, &list->list);
+ list->count++;
+ return 0;
+}
+
+static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
+ int addr_len, unsigned char addr_type)
+{
+ return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
+}
+
+static void ha_rcu_free(struct rcu_head *head)
+{
+ struct netdev_hw_addr *ha;
+
+ ha = container_of(head, struct netdev_hw_addr, rcu_head);
+ kfree(ha);
+}
+
+static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
+ unsigned char *addr, int addr_len,
+ unsigned char addr_type, bool global)
+{
+ struct netdev_hw_addr *ha;
+
+ list_for_each_entry(ha, &list->list, list) {
+ if (!memcmp(ha->addr, addr, addr_len) &&
+ (ha->type == addr_type || !addr_type)) {
+ if (global) {
+ if (!ha->global_use)
+ break;
+ else
+ ha->global_use = false;
+ }
+ if (--ha->refcount)
+ return 0;
+ list_del_rcu(&ha->list);
+ call_rcu(&ha->rcu_head, ha_rcu_free);
+ list->count--;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
+ int addr_len, unsigned char addr_type)
+{
+ return __hw_addr_del_ex(list, addr, addr_len, addr_type, false);
+}
+
+int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type)
+{
+ int err;
+ struct netdev_hw_addr *ha, *ha2;
+ unsigned char type;
+
+ list_for_each_entry(ha, &from_list->list, list) {
+ type = addr_type ? addr_type : ha->type;
+ err = __hw_addr_add(to_list, ha->addr, addr_len, type);
+ if (err)
+ goto unroll;
+ }
+ return 0;
+
+unroll:
+ list_for_each_entry(ha2, &from_list->list, list) {
+ if (ha2 == ha)
+ break;
+ type = addr_type ? addr_type : ha2->type;
+ __hw_addr_del(to_list, ha2->addr, addr_len, type);
+ }
+ return err;
+}
+EXPORT_SYMBOL(__hw_addr_add_multiple);
+
+void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type)
+{
+ struct netdev_hw_addr *ha;
+ unsigned char type;
+
+ list_for_each_entry(ha, &from_list->list, list) {
+ type = addr_type ? addr_type : ha->type;
+ __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+ }
+}
+EXPORT_SYMBOL(__hw_addr_del_multiple);
+
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len)
+{
+ int err = 0;
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+ if (!ha->synced) {
+ err = __hw_addr_add(to_list, ha->addr,
+ addr_len, ha->type);
+ if (err)
+ break;
+ ha->synced = true;
+ ha->refcount++;
+ } else if (ha->refcount == 1) {
+ __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
+ __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
+ }
+ }
+ return err;
+}
+EXPORT_SYMBOL(__hw_addr_sync);
+
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len)
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+ if (ha->synced) {
+ __hw_addr_del(to_list, ha->addr,
+ addr_len, ha->type);
+ ha->synced = false;
+ __hw_addr_del(from_list, ha->addr,
+ addr_len, ha->type);
+ }
+ }
+}
+EXPORT_SYMBOL(__hw_addr_unsync);
+
+void __hw_addr_flush(struct netdev_hw_addr_list *list)
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ list_del_rcu(&ha->list);
+ call_rcu(&ha->rcu_head, ha_rcu_free);
+ }
+ list->count = 0;
+}
+EXPORT_SYMBOL(__hw_addr_flush);
+
+void __hw_addr_init(struct netdev_hw_addr_list *list)
+{
+ INIT_LIST_HEAD(&list->list);
+ list->count = 0;
+}
+EXPORT_SYMBOL(__hw_addr_init);
+
+/*
+ * Device addresses handling functions
+ */
+
+/**
+ * dev_addr_flush - Flush device address list
+ * @dev: device
+ *
+ * Flush device address list and reset ->dev_addr.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void dev_addr_flush(struct net_device *dev)
+{
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_flush(&dev->dev_addrs);
+ dev->dev_addr = NULL;
+}
+EXPORT_SYMBOL(dev_addr_flush);
+
+/**
+ * dev_addr_init - Init device address list
+ * @dev: device
+ *
+ * Init device address list and create the first element,
+ * used by ->dev_addr.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_init(struct net_device *dev)
+{
+ unsigned char addr[MAX_ADDR_LEN];
+ struct netdev_hw_addr *ha;
+ int err;
+
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_init(&dev->dev_addrs);
+ memset(addr, 0, sizeof(addr));
+ err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
+ NETDEV_HW_ADDR_T_LAN);
+ if (!err) {
+ /*
+ * Get the first (previously created) address from the list
+ * and set dev_addr pointer to this location.
+ */
+ ha = list_first_entry(&dev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ dev->dev_addr = ha->addr;
+ }
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_init);
+
+/**
+ * dev_addr_add - Add a device address
+ * @dev: device
+ * @addr: address to add
+ * @addr_type: address type
+ *
+ * Add a device address to the device or increase the reference count if
+ * it already exists.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_add);
+
+/**
+ * dev_addr_del - Release a device address.
+ * @dev: device
+ * @addr: address to delete
+ * @addr_type: address type
+ *
+ * Release reference to a device address and remove it from the device
+ * if the reference count drops to zero.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type)
+{
+ int err;
+ struct netdev_hw_addr *ha;
+
+ ASSERT_RTNL();
+
+ /*
+ * We can not remove the first address from the list because
+ * dev->dev_addr points to that.
+ */
+ ha = list_first_entry(&dev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (ha->addr == dev->dev_addr && ha->refcount == 1)
+ return -ENOENT;
+
+ err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
+ addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_del);
+
+/**
+ * dev_addr_add_multiple - Add device addresses from another device
+ * @to_dev: device to which addresses will be added
+ * @from_dev: device from which addresses will be added
+ * @addr_type: address type - 0 means type will be used from from_dev
+ *
+ * Add device addresses of the one device to another.
+ **
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ if (from_dev->addr_len != to_dev->addr_len)
+ return -EINVAL;
+ err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+ to_dev->addr_len, addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_add_multiple);
+
+/**
+ * dev_addr_del_multiple - Delete device addresses by another device
+ * @to_dev: device where the addresses will be deleted
+ * @from_dev: device by which addresses the addresses will be deleted
+ * @addr_type: address type - 0 means type will used from from_dev
+ *
+ * Deletes addresses in to device by the list of addresses in from device.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type)
+{
+ ASSERT_RTNL();
+
+ if (from_dev->addr_len != to_dev->addr_len)
+ return -EINVAL;
+ __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+ to_dev->addr_len, addr_type);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+ return 0;
+}
+EXPORT_SYMBOL(dev_addr_del_multiple);
+
+/*
+ * Unicast list handling functions
+ */
+
+/**
+ * dev_uc_add - Add a secondary unicast address
+ * @dev: device
+ * @addr: address to add
+ *
+ * Add a secondary unicast address to the device or increase
+ * the reference count if it already exists.
+ */
+int dev_uc_add(struct net_device *dev, unsigned char *addr)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_UNICAST);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_uc_add);
+
+/**
+ * dev_uc_del - Release secondary unicast address.
+ * @dev: device
+ * @addr: address to delete
+ *
+ * Release reference to a secondary unicast address and remove it
+ * from the device if the reference count drops to zero.
+ */
+int dev_uc_del(struct net_device *dev, unsigned char *addr)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_UNICAST);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_uc_del);
+
+/**
+ * dev_uc_sync - Synchronize device's unicast list to another device
+ * @to: destination device
+ * @from: source device
+ *
+ * Add newly added addresses to the destination device and release
+ * addresses that have no users left. The source device must be
+ * locked by netif_tx_lock_bh.
+ *
+ * This function is intended to be called from the dev->set_rx_mode
+ * function of layered software devices.
+ */
+int dev_uc_sync(struct net_device *to, struct net_device *from)
+{
+ int err = 0;
+
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+ netif_addr_lock_bh(to);
+ err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
+ if (!err)
+ __dev_set_rx_mode(to);
+ netif_addr_unlock_bh(to);
+ return err;
+}
+EXPORT_SYMBOL(dev_uc_sync);
+
+/**
+ * dev_uc_unsync - Remove synchronized addresses from the destination device
+ * @to: destination device
+ * @from: source device
+ *
+ * Remove all addresses that were added to the destination device by
+ * dev_uc_sync(). This function is intended to be called from the
+ * dev->stop function of layered software devices.
+ */
+void dev_uc_unsync(struct net_device *to, struct net_device *from)
+{
+ if (to->addr_len != from->addr_len)
+ return;
+
+ netif_addr_lock_bh(from);
+ netif_addr_lock(to);
+ __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
+ __dev_set_rx_mode(to);
+ netif_addr_unlock(to);
+ netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_uc_unsync);
+
+/**
+ * dev_uc_flush - Flush unicast addresses
+ * @dev: device
+ *
+ * Flush unicast addresses.
+ */
+void dev_uc_flush(struct net_device *dev)
+{
+ netif_addr_lock_bh(dev);
+ __hw_addr_flush(&dev->uc);
+ netif_addr_unlock_bh(dev);
+}
+EXPORT_SYMBOL(dev_uc_flush);
+
+/**
+ * dev_uc_flush - Init unicast address list
+ * @dev: device
+ *
+ * Init unicast address list.
+ */
+void dev_uc_init(struct net_device *dev)
+{
+ __hw_addr_init(&dev->uc);
+}
+EXPORT_SYMBOL(dev_uc_init);
+
+/*
+ * Multicast list handling functions
+ */
+
+static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
+ bool global)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_MULTICAST, global);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+/**
+ * dev_mc_add - Add a multicast address
+ * @dev: device
+ * @addr: address to add
+ *
+ * Add a multicast address to the device or increase
+ * the reference count if it already exists.
+ */
+int dev_mc_add(struct net_device *dev, unsigned char *addr)
+{
+ return __dev_mc_add(dev, addr, false);
+}
+EXPORT_SYMBOL(dev_mc_add);
+
+/**
+ * dev_mc_add_global - Add a global multicast address
+ * @dev: device
+ * @addr: address to add
+ *
+ * Add a global multicast address to the device.
+ */
+int dev_mc_add_global(struct net_device *dev, unsigned char *addr)
+{
+ return __dev_mc_add(dev, addr, true);
+}
+EXPORT_SYMBOL(dev_mc_add_global);
+
+static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
+ bool global)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_MULTICAST, global);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+
+/**
+ * dev_mc_del - Delete a multicast address.
+ * @dev: device
+ * @addr: address to delete
+ *
+ * Release reference to a multicast address and remove it
+ * from the device if the reference count drops to zero.
+ */
+int dev_mc_del(struct net_device *dev, unsigned char *addr)
+{
+ return __dev_mc_del(dev, addr, false);
+}
+EXPORT_SYMBOL(dev_mc_del);
+
+/**
+ * dev_mc_del_global - Delete a global multicast address.
+ * @dev: device
+ * @addr: address to delete
+ *
+ * Release reference to a multicast address and remove it
+ * from the device if the reference count drops to zero.
+ */
+int dev_mc_del_global(struct net_device *dev, unsigned char *addr)
+{
+ return __dev_mc_del(dev, addr, true);
+}
+EXPORT_SYMBOL(dev_mc_del_global);
+
+/**
+ * dev_mc_sync - Synchronize device's unicast list to another device
+ * @to: destination device
+ * @from: source device
+ *
+ * Add newly added addresses to the destination device and release
+ * addresses that have no users left. The source device must be
+ * locked by netif_tx_lock_bh.
+ *
+ * This function is intended to be called from the dev->set_multicast_list
+ * or dev->set_rx_mode function of layered software devices.
+ */
+int dev_mc_sync(struct net_device *to, struct net_device *from)
+{
+ int err = 0;
+
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+ netif_addr_lock_bh(to);
+ err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
+ if (!err)
+ __dev_set_rx_mode(to);
+ netif_addr_unlock_bh(to);
+ return err;
+}
+EXPORT_SYMBOL(dev_mc_sync);
+
+/**
+ * dev_mc_unsync - Remove synchronized addresses from the destination device
+ * @to: destination device
+ * @from: source device
+ *
+ * Remove all addresses that were added to the destination device by
+ * dev_mc_sync(). This function is intended to be called from the
+ * dev->stop function of layered software devices.
+ */
+void dev_mc_unsync(struct net_device *to, struct net_device *from)
+{
+ if (to->addr_len != from->addr_len)
+ return;
+
+ netif_addr_lock_bh(from);
+ netif_addr_lock(to);
+ __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
+ __dev_set_rx_mode(to);
+ netif_addr_unlock(to);
+ netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_mc_unsync);
+
+/**
+ * dev_mc_flush - Flush multicast addresses
+ * @dev: device
+ *
+ * Flush multicast addresses.
+ */
+void dev_mc_flush(struct net_device *dev)
+{
+ netif_addr_lock_bh(dev);
+ __hw_addr_flush(&dev->mc);
+ netif_addr_unlock_bh(dev);
+}
+EXPORT_SYMBOL(dev_mc_flush);
+
+/**
+ * dev_mc_flush - Init multicast address list
+ * @dev: device
+ *
+ * Init multicast address list.
+ */
+void dev_mc_init(struct net_device *dev)
+{
+ __hw_addr_init(&dev->mc);
+}
+EXPORT_SYMBOL(dev_mc_init);
+
+#ifdef CONFIG_PROC_FS
+#include <linux/seq_file.h>
+
+static int dev_mc_seq_show(struct seq_file *seq, void *v)
+{
+ struct netdev_hw_addr *ha;
+ struct net_device *dev = v;
+
+ if (v == SEQ_START_TOKEN)
+ return 0;
+
+ netif_addr_lock_bh(dev);
+ netdev_for_each_mc_addr(ha, dev) {
+ int i;
+
+ seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
+ dev->name, ha->refcount, ha->global_use);
+
+ for (i = 0; i < dev->addr_len; i++)
+ seq_printf(seq, "%02x", ha->addr[i]);
+
+ seq_putc(seq, '\n');
+ }
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static const struct seq_operations dev_mc_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = dev_mc_seq_show,
+};
+
+static int dev_mc_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &dev_mc_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations dev_mc_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = dev_mc_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+#endif
+
+static int __net_init dev_mc_net_init(struct net *net)
+{
+ if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
+ return -ENOMEM;
+ return 0;
+}
+
+static void __net_exit dev_mc_net_exit(struct net *net)
+{
+ proc_net_remove(net, "dev_mcast");
+}
+
+static struct pernet_operations __net_initdata dev_mc_net_ops = {
+ .init = dev_mc_net_init,
+ .exit = dev_mc_net_exit,
+};
+
+void __init dev_mcast_init(void)
+{
+ register_pernet_subsys(&dev_mc_net_ops);
+}
+
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
deleted file mode 100644
index 3dc295beb483..000000000000
--- a/net/core/dev_mcast.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Linux NET3: Multicast List maintenance.
- *
- * Authors:
- * Tim Kordas <tjk@nostromo.eeap.cwru.edu>
- * Richard Underwood <richard@wuzz.demon.co.uk>
- *
- * Stir fried together from the IP multicast and CAP patches above
- * Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * Fixes:
- * Alan Cox : Update the device on a real delete
- * rather than any time but...
- * Alan Cox : IFF_ALLMULTI support.
- * Alan Cox : New format set_multicast_list() calls.
- * Gleb Natapov : Remove dev_mc_lock.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <net/net_namespace.h>
-#include <net/ip.h>
-#include <net/route.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/arp.h>
-
-
-/*
- * Device multicast list maintenance.
- *
- * This is used both by IP and by the user level maintenance functions.
- * Unlike BSD we maintain a usage count on a given multicast address so
- * that a casual user application can add/delete multicasts used by
- * protocols without doing damage to the protocols when it deletes the
- * entries. It also helps IP as it tracks overlapping maps.
- *
- * Device mc lists are changed by bh at least if IPv6 is enabled,
- * so that it must be bh protected.
- *
- * We block accesses to device mc filters with netif_tx_lock.
- */
-
-/*
- * Delete a device level multicast
- */
-
-int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
-{
- int err;
-
- netif_addr_lock_bh(dev);
- err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
- addr, alen, glbl);
- if (!err) {
- /*
- * We have altered the list, so the card
- * loaded filter is now wrong. Fix it
- */
-
- __dev_set_rx_mode(dev);
- }
- netif_addr_unlock_bh(dev);
- return err;
-}
-
-/*
- * Add a device level multicast
- */
-
-int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
-{
- int err;
-
- netif_addr_lock_bh(dev);
- if (alen != dev->addr_len)
- err = -EINVAL;
- else
- err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
- if (!err)
- __dev_set_rx_mode(dev);
- netif_addr_unlock_bh(dev);
- return err;
-}
-
-/**
- * dev_mc_sync - Synchronize device's multicast list to another device
- * @to: destination device
- * @from: source device
- *
- * Add newly added addresses to the destination device and release
- * addresses that have no users left. The source device must be
- * locked by netif_tx_lock_bh.
- *
- * This function is intended to be called from the dev->set_multicast_list
- * or dev->set_rx_mode function of layered software devices.
- */
-int dev_mc_sync(struct net_device *to, struct net_device *from)
-{
- int err = 0;
-
- netif_addr_lock_bh(to);
- err = __dev_addr_sync(&to->mc_list, &to->mc_count,
- &from->mc_list, &from->mc_count);
- if (!err)
- __dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
-
- return err;
-}
-EXPORT_SYMBOL(dev_mc_sync);
-
-
-/**
- * dev_mc_unsync - Remove synchronized addresses from the destination
- * device
- * @to: destination device
- * @from: source device
- *
- * Remove all addresses that were added to the destination device by
- * dev_mc_sync(). This function is intended to be called from the
- * dev->stop function of layered software devices.
- */
-void dev_mc_unsync(struct net_device *to, struct net_device *from)
-{
- netif_addr_lock_bh(from);
- netif_addr_lock(to);
-
- __dev_addr_unsync(&to->mc_list, &to->mc_count,
- &from->mc_list, &from->mc_count);
- __dev_set_rx_mode(to);
-
- netif_addr_unlock(to);
- netif_addr_unlock_bh(from);
-}
-EXPORT_SYMBOL(dev_mc_unsync);
-
-#ifdef CONFIG_PROC_FS
-static int dev_mc_seq_show(struct seq_file *seq, void *v)
-{
- struct dev_addr_list *m;
- struct net_device *dev = v;
-
- if (v == SEQ_START_TOKEN)
- return 0;
-
- netif_addr_lock_bh(dev);
- for (m = dev->mc_list; m; m = m->next) {
- int i;
-
- seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
- dev->name, m->dmi_users, m->dmi_gusers);
-
- for (i = 0; i < m->dmi_addrlen; i++)
- seq_printf(seq, "%02x", m->dmi_addr[i]);
-
- seq_putc(seq, '\n');
- }
- netif_addr_unlock_bh(dev);
- return 0;
-}
-
-static const struct seq_operations dev_mc_seq_ops = {
- .start = dev_seq_start,
- .next = dev_seq_next,
- .stop = dev_seq_stop,
- .show = dev_mc_seq_show,
-};
-
-static int dev_mc_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &dev_mc_seq_ops,
- sizeof(struct seq_net_private));
-}
-
-static const struct file_operations dev_mc_seq_fops = {
- .owner = THIS_MODULE,
- .open = dev_mc_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-#endif
-
-static int __net_init dev_mc_net_init(struct net *net)
-{
- if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
- return -ENOMEM;
- return 0;
-}
-
-static void __net_exit dev_mc_net_exit(struct net *net)
-{
- proc_net_remove(net, "dev_mcast");
-}
-
-static struct pernet_operations __net_initdata dev_mc_net_ops = {
- .init = dev_mc_net_init,
- .exit = dev_mc_net_exit,
-};
-
-void __init dev_mcast_init(void)
-{
- register_pernet_subsys(&dev_mc_net_ops);
-}
-
-EXPORT_SYMBOL(dev_mc_add);
-EXPORT_SYMBOL(dev_mc_delete);
diff --git a/net/core/dst.c b/net/core/dst.c
index f307bc18f6a0..9920722cc82b 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -44,7 +44,7 @@ static atomic_t dst_total = ATOMIC_INIT(0);
*/
static struct {
spinlock_t lock;
- struct dst_entry *list;
+ struct dst_entry *list;
unsigned long timer_inc;
unsigned long timer_expires;
} dst_garbage = {
@@ -52,7 +52,7 @@ static struct {
.timer_inc = DST_GC_MAX,
};
static void dst_gc_task(struct work_struct *work);
-static void ___dst_free(struct dst_entry * dst);
+static void ___dst_free(struct dst_entry *dst);
static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
@@ -136,8 +136,8 @@ loop:
}
expires = dst_garbage.timer_expires;
/*
- * if the next desired timer is more than 4 seconds in the future
- * then round the timer to whole seconds
+ * if the next desired timer is more than 4 seconds in the
+ * future then round the timer to whole seconds
*/
if (expires > 4*HZ)
expires = round_jiffies_relative(expires);
@@ -152,7 +152,8 @@ loop:
" expires: %lu elapsed: %lu us\n",
atomic_read(&dst_total), delayed, work_performed,
expires,
- elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC);
+ elapsed.tv_sec * USEC_PER_SEC +
+ elapsed.tv_nsec / NSEC_PER_USEC);
#endif
}
@@ -163,9 +164,9 @@ int dst_discard(struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard);
-void * dst_alloc(struct dst_ops * ops)
+void *dst_alloc(struct dst_ops *ops)
{
- struct dst_entry * dst;
+ struct dst_entry *dst;
if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
if (ops->gc(ops))
@@ -185,19 +186,20 @@ void * dst_alloc(struct dst_ops * ops)
atomic_inc(&ops->entries);
return dst;
}
+EXPORT_SYMBOL(dst_alloc);
-static void ___dst_free(struct dst_entry * dst)
+static void ___dst_free(struct dst_entry *dst)
{
/* The first case (dev==NULL) is required, when
protocol module is unloaded.
*/
- if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
+ if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
dst->input = dst->output = dst_discard;
- }
dst->obsolete = 2;
}
+EXPORT_SYMBOL(__dst_free);
-void __dst_free(struct dst_entry * dst)
+void __dst_free(struct dst_entry *dst)
{
spin_lock_bh(&dst_garbage.lock);
___dst_free(dst);
@@ -262,15 +264,16 @@ again:
}
return NULL;
}
+EXPORT_SYMBOL(dst_destroy);
void dst_release(struct dst_entry *dst)
{
if (dst) {
- int newrefcnt;
+ int newrefcnt;
smp_mb__before_atomic_dec();
- newrefcnt = atomic_dec_return(&dst->__refcnt);
- WARN_ON(newrefcnt < 0);
+ newrefcnt = atomic_dec_return(&dst->__refcnt);
+ WARN_ON(newrefcnt < 0);
}
}
EXPORT_SYMBOL(dst_release);
@@ -283,8 +286,8 @@ EXPORT_SYMBOL(dst_release);
*
* Commented and originally written by Alexey.
*/
-static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
- int unregister)
+static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ int unregister)
{
if (dst->ops->ifdown)
dst->ops->ifdown(dst, dev, unregister);
@@ -306,7 +309,8 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
}
}
-static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+static int dst_dev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
struct net_device *dev = ptr;
struct dst_entry *dst, *last = NULL;
@@ -329,9 +333,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void
last->next = dst;
else
dst_busy_list = dst;
- for (; dst; dst = dst->next) {
+ for (; dst; dst = dst->next)
dst_ifdown(dst, dev, event != NETDEV_DOWN);
- }
mutex_unlock(&dst_gc_mutex);
break;
}
@@ -346,7 +349,3 @@ void __init dst_init(void)
{
register_netdevice_notifier(&dst_dev_notifier);
}
-
-EXPORT_SYMBOL(__dst_free);
-EXPORT_SYMBOL(dst_alloc);
-EXPORT_SYMBOL(dst_destroy);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 9d55c57f318a..1a7db92037fa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -18,8 +18,8 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/bitops.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
/*
* Some useful ethtool_ops methods that're device independent.
@@ -31,6 +31,7 @@ u32 ethtool_op_get_link(struct net_device *dev)
{
return netif_carrier_ok(dev) ? 1 : 0;
}
+EXPORT_SYMBOL(ethtool_op_get_link);
u32 ethtool_op_get_rx_csum(struct net_device *dev)
{
@@ -63,6 +64,7 @@ int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
{
@@ -73,11 +75,13 @@ int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
u32 ethtool_op_get_sg(struct net_device *dev)
{
return (dev->features & NETIF_F_SG) != 0;
}
+EXPORT_SYMBOL(ethtool_op_get_sg);
int ethtool_op_set_sg(struct net_device *dev, u32 data)
{
@@ -88,11 +92,13 @@ int ethtool_op_set_sg(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_sg);
u32 ethtool_op_get_tso(struct net_device *dev)
{
return (dev->features & NETIF_F_TSO) != 0;
}
+EXPORT_SYMBOL(ethtool_op_get_tso);
int ethtool_op_set_tso(struct net_device *dev, u32 data)
{
@@ -103,11 +109,13 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_tso);
u32 ethtool_op_get_ufo(struct net_device *dev)
{
return (dev->features & NETIF_F_UFO) != 0;
}
+EXPORT_SYMBOL(ethtool_op_get_ufo);
int ethtool_op_set_ufo(struct net_device *dev, u32 data)
{
@@ -117,12 +125,13 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
dev->features &= ~NETIF_F_UFO;
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_ufo);
/* the following list of flags are the same as their associated
* NETIF_F_xxx values in include/linux/netdevice.h
*/
static const u32 flags_dup_features =
- (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
u32 ethtool_op_get_flags(struct net_device *dev)
{
@@ -133,6 +142,7 @@ u32 ethtool_op_get_flags(struct net_device *dev)
return dev->features & flags_dup_features;
}
+EXPORT_SYMBOL(ethtool_op_get_flags);
int ethtool_op_set_flags(struct net_device *dev, u32 data)
{
@@ -153,9 +163,15 @@ int ethtool_op_set_flags(struct net_device *dev, u32 data)
features &= ~NETIF_F_NTUPLE;
}
+ if (data & ETH_FLAG_RXHASH)
+ features |= NETIF_F_RXHASH;
+ else
+ features &= ~NETIF_F_RXHASH;
+
dev->features = features;
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_flags);
void ethtool_ntuple_flush(struct net_device *dev)
{
@@ -201,7 +217,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_settings(dev, &cmd);
}
-static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -241,7 +258,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void _
}
static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
- void __user *useraddr)
+ void __user *useraddr)
{
struct ethtool_sset_info info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -300,7 +317,8 @@ out:
return ret;
}
-static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_rxnfc cmd;
@@ -313,7 +331,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __u
return dev->ethtool_ops->set_rxnfc(dev, &cmd);
}
-static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_rxnfc info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -358,8 +377,8 @@ err_out:
}
static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
- struct ethtool_rx_ntuple_flow_spec *spec,
- struct ethtool_rx_ntuple_flow_spec_container *fsc)
+ struct ethtool_rx_ntuple_flow_spec *spec,
+ struct ethtool_rx_ntuple_flow_spec_container *fsc)
{
/* don't add filters forever */
@@ -385,7 +404,8 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
list->count++;
}
-static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_rx_ntuple cmd;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -510,125 +530,125 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.ip4src);
+ fsc->fs.h_u.tcp_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.tcp_ip4_spec.ip4src);
+ fsc->fs.m_u.tcp_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.ip4dst);
+ fsc->fs.h_u.tcp_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.tcp_ip4_spec.ip4dst);
+ fsc->fs.m_u.tcp_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.psrc,
- fsc->fs.m_u.tcp_ip4_spec.psrc);
+ fsc->fs.h_u.tcp_ip4_spec.psrc,
+ fsc->fs.m_u.tcp_ip4_spec.psrc);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.pdst,
- fsc->fs.m_u.tcp_ip4_spec.pdst);
+ fsc->fs.h_u.tcp_ip4_spec.pdst,
+ fsc->fs.m_u.tcp_ip4_spec.pdst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
- fsc->fs.h_u.tcp_ip4_spec.tos,
- fsc->fs.m_u.tcp_ip4_spec.tos);
+ fsc->fs.h_u.tcp_ip4_spec.tos,
+ fsc->fs.m_u.tcp_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case AH_ESP_V4_FLOW:
case ESP_V4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.ah_ip4_spec.ip4src);
+ fsc->fs.h_u.ah_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.ah_ip4_spec.ip4src);
+ fsc->fs.m_u.ah_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.ah_ip4_spec.ip4dst);
+ fsc->fs.h_u.ah_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.ah_ip4_spec.ip4dst);
+ fsc->fs.m_u.ah_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSPI: %d, mask: 0x%x\n",
- fsc->fs.h_u.ah_ip4_spec.spi,
- fsc->fs.m_u.ah_ip4_spec.spi);
+ fsc->fs.h_u.ah_ip4_spec.spi,
+ fsc->fs.m_u.ah_ip4_spec.spi);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
- fsc->fs.h_u.ah_ip4_spec.tos,
- fsc->fs.m_u.ah_ip4_spec.tos);
+ fsc->fs.h_u.ah_ip4_spec.tos,
+ fsc->fs.m_u.ah_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case IP_USER_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.raw_ip4_spec.ip4src);
+ fsc->fs.h_u.raw_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.raw_ip4_spec.ip4src);
+ fsc->fs.m_u.raw_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.raw_ip4_spec.ip4dst);
+ fsc->fs.h_u.raw_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.raw_ip4_spec.ip4dst);
+ fsc->fs.m_u.raw_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case IPV4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.ip4src);
+ fsc->fs.h_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.usr_ip4_spec.ip4src);
+ fsc->fs.m_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.ip4dst);
+ fsc->fs.h_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.usr_ip4_spec.ip4dst);
+ fsc->fs.m_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
- fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
+ fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
+ fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.tos,
- fsc->fs.m_u.usr_ip4_spec.tos);
+ fsc->fs.h_u.usr_ip4_spec.tos,
+ fsc->fs.m_u.usr_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.ip_ver,
- fsc->fs.m_u.usr_ip4_spec.ip_ver);
+ fsc->fs.h_u.usr_ip4_spec.ip_ver,
+ fsc->fs.m_u.usr_ip4_spec.ip_ver);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
- fsc->fs.h_u.usr_ip4_spec.proto,
- fsc->fs.m_u.usr_ip4_spec.proto);
+ fsc->fs.h_u.usr_ip4_spec.proto,
+ fsc->fs.m_u.usr_ip4_spec.proto);
p += ETH_GSTRING_LEN;
num_strings++;
break;
};
sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
- fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
+ fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
@@ -641,7 +661,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
sprintf(p, "\tAction: Drop\n");
else
sprintf(p, "\tAction: Direct to queue %d\n",
- fsc->fs.action);
+ fsc->fs.action);
p += ETH_GSTRING_LEN;
num_strings++;
unknown_filter:
@@ -853,7 +873,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
return ret;
}
-static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
@@ -867,7 +888,8 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void
return 0;
}
-static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
+ void __user *useraddr)
{
struct ethtool_coalesce coalesce;
@@ -971,6 +993,7 @@ static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
return dev->ethtool_ops->set_tx_csum(dev, edata.data);
}
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
{
@@ -1042,7 +1065,7 @@ static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
edata.data = dev->features & NETIF_F_GSO;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
+ return -EFAULT;
return 0;
}
@@ -1065,7 +1088,7 @@ static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
edata.data = dev->features & NETIF_F_GRO;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
+ return -EFAULT;
return 0;
}
@@ -1277,7 +1300,8 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
return actor(dev, edata.data);
}
-static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
+static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
+ char __user *useraddr)
{
struct ethtool_flash efl;
@@ -1306,11 +1330,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
if (!dev->ethtool_ops)
return -EOPNOTSUPP;
- if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
+ if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
return -EFAULT;
/* Allow some commands to be done by anyone */
- switch(ethcmd) {
+ switch (ethcmd) {
case ETHTOOL_GDRVINFO:
case ETHTOOL_GMSGLVL:
case ETHTOOL_GCOALESCE:
@@ -1338,10 +1362,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
return -EPERM;
}
- if (dev->ethtool_ops->begin)
- if ((rc = dev->ethtool_ops->begin(dev)) < 0)
+ if (dev->ethtool_ops->begin) {
+ rc = dev->ethtool_ops->begin(dev);
+ if (rc < 0)
return rc;
-
+ }
old_features = dev->features;
switch (ethcmd) {
@@ -1531,16 +1556,3 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
return rc;
}
-
-EXPORT_SYMBOL(ethtool_op_get_link);
-EXPORT_SYMBOL(ethtool_op_get_sg);
-EXPORT_SYMBOL(ethtool_op_get_tso);
-EXPORT_SYMBOL(ethtool_op_set_sg);
-EXPORT_SYMBOL(ethtool_op_set_tso);
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
-EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
-EXPORT_SYMBOL(ethtool_op_set_ufo);
-EXPORT_SYMBOL(ethtool_op_get_ufo);
-EXPORT_SYMBOL(ethtool_op_set_flags);
-EXPORT_SYMBOL(ethtool_op_get_flags);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index d2c3e7dc2e5f..42e84e08a1be 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -39,6 +39,24 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
}
EXPORT_SYMBOL(fib_default_rule_add);
+u32 fib_default_rule_pref(struct fib_rules_ops *ops)
+{
+ struct list_head *pos;
+ struct fib_rule *rule;
+
+ if (!list_empty(&ops->rules_list)) {
+ pos = ops->rules_list.next;
+ if (pos->next != &ops->rules_list) {
+ rule = list_entry(pos->next, struct fib_rule, list);
+ if (rule->pref)
+ return rule->pref - 1;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(fib_default_rule_pref);
+
static void notify_rule_change(int event, struct fib_rule *rule,
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
u32 pid);
@@ -104,12 +122,12 @@ errout:
}
struct fib_rules_ops *
-fib_rules_register(struct fib_rules_ops *tmpl, struct net *net)
+fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
{
struct fib_rules_ops *ops;
int err;
- ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL);
+ ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
@@ -124,7 +142,6 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net)
return ops;
}
-
EXPORT_SYMBOL_GPL(fib_rules_register);
void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
@@ -158,7 +175,6 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
call_rcu(&ops->rcu, fib_rules_put_rcu);
}
-
EXPORT_SYMBOL_GPL(fib_rules_unregister);
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
@@ -221,7 +237,6 @@ out:
return err;
}
-
EXPORT_SYMBOL_GPL(fib_rules_lookup);
static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
@@ -520,6 +535,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
return -EMSGSIZE;
frh = nlmsg_data(nlh);
+ frh->family = ops->family;
frh->table = rule->table;
NLA_PUT_U32(skb, FRA_TABLE, rule->table);
frh->res1 = 0;
@@ -614,7 +630,7 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
break;
cb->args[1] = 0;
- skip:
+skip:
idx++;
}
rcu_read_unlock();
@@ -686,7 +702,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
struct fib_rules_ops *ops;
ASSERT_RTNL();
- rcu_read_lock();
switch (event) {
case NETDEV_REGISTER:
@@ -700,8 +715,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
break;
}
- rcu_read_unlock();
-
return NOTIFY_DONE;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index ff943bed21af..da69fb728d32 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -302,6 +302,8 @@ load_b:
A = skb->pkt_type;
continue;
case SKF_AD_IFINDEX:
+ if (!skb->dev)
+ return 0;
A = skb->dev->ifindex;
continue;
case SKF_AD_MARK:
@@ -310,6 +312,11 @@ load_b:
case SKF_AD_QUEUE:
A = skb->queue_mapping;
continue;
+ case SKF_AD_HATYPE:
+ if (!skb->dev)
+ return 0;
+ A = skb->dev->type;
+ continue;
case SKF_AD_NLATTR: {
struct nlattr *nla;
diff --git a/net/core/flow.c b/net/core/flow.c
index 96015871ecea..161900674009 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -26,113 +26,158 @@
#include <linux/security.h>
struct flow_cache_entry {
- struct flow_cache_entry *next;
- u16 family;
- u8 dir;
- u32 genid;
- struct flowi key;
- void *object;
- atomic_t *object_ref;
+ union {
+ struct hlist_node hlist;
+ struct list_head gc_list;
+ } u;
+ u16 family;
+ u8 dir;
+ u32 genid;
+ struct flowi key;
+ struct flow_cache_object *object;
};
-atomic_t flow_cache_genid = ATOMIC_INIT(0);
-
-static u32 flow_hash_shift;
-#define flow_hash_size (1 << flow_hash_shift)
-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
-
-#define flow_table(cpu) (per_cpu(flow_tables, cpu))
-
-static struct kmem_cache *flow_cachep __read_mostly;
+struct flow_cache_percpu {
+ struct hlist_head *hash_table;
+ int hash_count;
+ u32 hash_rnd;
+ int hash_rnd_recalc;
+ struct tasklet_struct flush_tasklet;
+};
-static int flow_lwm, flow_hwm;
+struct flow_flush_info {
+ struct flow_cache *cache;
+ atomic_t cpuleft;
+ struct completion completion;
+};
-struct flow_percpu_info {
- int hash_rnd_recalc;
- u32 hash_rnd;
- int count;
+struct flow_cache {
+ u32 hash_shift;
+ unsigned long order;
+ struct flow_cache_percpu *percpu;
+ struct notifier_block hotcpu_notifier;
+ int low_watermark;
+ int high_watermark;
+ struct timer_list rnd_timer;
};
-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
-#define flow_hash_rnd_recalc(cpu) \
- (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
-#define flow_hash_rnd(cpu) \
- (per_cpu(flow_hash_info, cpu).hash_rnd)
-#define flow_count(cpu) \
- (per_cpu(flow_hash_info, cpu).count)
+atomic_t flow_cache_genid = ATOMIC_INIT(0);
+static struct flow_cache flow_cache_global;
+static struct kmem_cache *flow_cachep;
-static struct timer_list flow_hash_rnd_timer;
+static DEFINE_SPINLOCK(flow_cache_gc_lock);
+static LIST_HEAD(flow_cache_gc_list);
-#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
-
-struct flow_flush_info {
- atomic_t cpuleft;
- struct completion completion;
-};
-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
-
-#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
+#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
+#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
static void flow_cache_new_hashrnd(unsigned long arg)
{
+ struct flow_cache *fc = (void *) arg;
int i;
for_each_possible_cpu(i)
- flow_hash_rnd_recalc(i) = 1;
+ per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
- flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
- add_timer(&flow_hash_rnd_timer);
+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
+ add_timer(&fc->rnd_timer);
+}
+
+static int flow_entry_valid(struct flow_cache_entry *fle)
+{
+ if (atomic_read(&flow_cache_genid) != fle->genid)
+ return 0;
+ if (fle->object && !fle->object->ops->check(fle->object))
+ return 0;
+ return 1;
}
-static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
+static void flow_entry_kill(struct flow_cache_entry *fle)
{
if (fle->object)
- atomic_dec(fle->object_ref);
+ fle->object->ops->delete(fle->object);
kmem_cache_free(flow_cachep, fle);
- flow_count(cpu)--;
}
-static void __flow_cache_shrink(int cpu, int shrink_to)
+static void flow_cache_gc_task(struct work_struct *work)
{
- struct flow_cache_entry *fle, **flp;
- int i;
+ struct list_head gc_list;
+ struct flow_cache_entry *fce, *n;
- for (i = 0; i < flow_hash_size; i++) {
- int k = 0;
+ INIT_LIST_HEAD(&gc_list);
+ spin_lock_bh(&flow_cache_gc_lock);
+ list_splice_tail_init(&flow_cache_gc_list, &gc_list);
+ spin_unlock_bh(&flow_cache_gc_lock);
- flp = &flow_table(cpu)[i];
- while ((fle = *flp) != NULL && k < shrink_to) {
- k++;
- flp = &fle->next;
- }
- while ((fle = *flp) != NULL) {
- *flp = fle->next;
- flow_entry_kill(cpu, fle);
- }
+ list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
+ flow_entry_kill(fce);
+}
+static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
+
+static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
+ int deleted, struct list_head *gc_list)
+{
+ if (deleted) {
+ fcp->hash_count -= deleted;
+ spin_lock_bh(&flow_cache_gc_lock);
+ list_splice_tail(gc_list, &flow_cache_gc_list);
+ spin_unlock_bh(&flow_cache_gc_lock);
+ schedule_work(&flow_cache_gc_work);
}
}
-static void flow_cache_shrink(int cpu)
+static void __flow_cache_shrink(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp,
+ int shrink_to)
{
- int shrink_to = flow_lwm / flow_hash_size;
+ struct flow_cache_entry *fle;
+ struct hlist_node *entry, *tmp;
+ LIST_HEAD(gc_list);
+ int i, deleted = 0;
+
+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
+ int saved = 0;
+
+ hlist_for_each_entry_safe(fle, entry, tmp,
+ &fcp->hash_table[i], u.hlist) {
+ if (saved < shrink_to &&
+ flow_entry_valid(fle)) {
+ saved++;
+ } else {
+ deleted++;
+ hlist_del(&fle->u.hlist);
+ list_add_tail(&fle->u.gc_list, &gc_list);
+ }
+ }
+ }
- __flow_cache_shrink(cpu, shrink_to);
+ flow_cache_queue_garbage(fcp, deleted, &gc_list);
}
-static void flow_new_hash_rnd(int cpu)
+static void flow_cache_shrink(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
{
- get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
- flow_hash_rnd_recalc(cpu) = 0;
+ int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
- __flow_cache_shrink(cpu, 0);
+ __flow_cache_shrink(fc, fcp, shrink_to);
}
-static u32 flow_hash_code(struct flowi *key, int cpu)
+static void flow_new_hash_rnd(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
+{
+ get_random_bytes(&fcp->hash_rnd, sizeof(u32));
+ fcp->hash_rnd_recalc = 0;
+ __flow_cache_shrink(fc, fcp, 0);
+}
+
+static u32 flow_hash_code(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp,
+ struct flowi *key)
{
u32 *k = (u32 *) key;
- return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
- (flow_hash_size - 1));
+ return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+ & (flow_cache_hash_size(fc) - 1));
}
#if (BITS_PER_LONG == 64)
@@ -165,114 +210,117 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
return 0;
}
-void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
- flow_resolve_t resolver)
+struct flow_cache_object *
+flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
+ flow_resolve_t resolver, void *ctx)
{
- struct flow_cache_entry *fle, **head;
+ struct flow_cache *fc = &flow_cache_global;
+ struct flow_cache_percpu *fcp;
+ struct flow_cache_entry *fle, *tfle;
+ struct hlist_node *entry;
+ struct flow_cache_object *flo;
unsigned int hash;
- int cpu;
local_bh_disable();
- cpu = smp_processor_id();
+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
fle = NULL;
+ flo = NULL;
/* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */
- if (!flow_table(cpu))
+ if (!fcp->hash_table)
goto nocache;
- if (flow_hash_rnd_recalc(cpu))
- flow_new_hash_rnd(cpu);
- hash = flow_hash_code(key, cpu);
+ if (fcp->hash_rnd_recalc)
+ flow_new_hash_rnd(fc, fcp);
- head = &flow_table(cpu)[hash];
- for (fle = *head; fle; fle = fle->next) {
- if (fle->family == family &&
- fle->dir == dir &&
- flow_key_compare(key, &fle->key) == 0) {
- if (fle->genid == atomic_read(&flow_cache_genid)) {
- void *ret = fle->object;
-
- if (ret)
- atomic_inc(fle->object_ref);
- local_bh_enable();
-
- return ret;
- }
+ hash = flow_hash_code(fc, fcp, key);
+ hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
+ if (tfle->family == family &&
+ tfle->dir == dir &&
+ flow_key_compare(key, &tfle->key) == 0) {
+ fle = tfle;
break;
}
}
- if (!fle) {
- if (flow_count(cpu) > flow_hwm)
- flow_cache_shrink(cpu);
+ if (unlikely(!fle)) {
+ if (fcp->hash_count > fc->high_watermark)
+ flow_cache_shrink(fc, fcp);
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) {
- fle->next = *head;
- *head = fle;
fle->family = family;
fle->dir = dir;
memcpy(&fle->key, key, sizeof(*key));
fle->object = NULL;
- flow_count(cpu)++;
+ hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
+ fcp->hash_count++;
}
+ } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
+ flo = fle->object;
+ if (!flo)
+ goto ret_object;
+ flo = flo->ops->get(flo);
+ if (flo)
+ goto ret_object;
+ } else if (fle->object) {
+ flo = fle->object;
+ flo->ops->delete(flo);
+ fle->object = NULL;
}
nocache:
- {
- int err;
- void *obj;
- atomic_t *obj_ref;
-
- err = resolver(net, key, family, dir, &obj, &obj_ref);
-
- if (fle && !err) {
- fle->genid = atomic_read(&flow_cache_genid);
-
- if (fle->object)
- atomic_dec(fle->object_ref);
-
- fle->object = obj;
- fle->object_ref = obj_ref;
- if (obj)
- atomic_inc(fle->object_ref);
- }
- local_bh_enable();
-
- if (err)
- obj = ERR_PTR(err);
- return obj;
+ flo = NULL;
+ if (fle) {
+ flo = fle->object;
+ fle->object = NULL;
}
+ flo = resolver(net, key, family, dir, flo, ctx);
+ if (fle) {
+ fle->genid = atomic_read(&flow_cache_genid);
+ if (!IS_ERR(flo))
+ fle->object = flo;
+ else
+ fle->genid--;
+ } else {
+ if (flo && !IS_ERR(flo))
+ flo->ops->delete(flo);
+ }
+ret_object:
+ local_bh_enable();
+ return flo;
}
static void flow_cache_flush_tasklet(unsigned long data)
{
struct flow_flush_info *info = (void *)data;
- int i;
- int cpu;
-
- cpu = smp_processor_id();
- for (i = 0; i < flow_hash_size; i++) {
- struct flow_cache_entry *fle;
-
- fle = flow_table(cpu)[i];
- for (; fle; fle = fle->next) {
- unsigned genid = atomic_read(&flow_cache_genid);
-
- if (!fle->object || fle->genid == genid)
+ struct flow_cache *fc = info->cache;
+ struct flow_cache_percpu *fcp;
+ struct flow_cache_entry *fle;
+ struct hlist_node *entry, *tmp;
+ LIST_HEAD(gc_list);
+ int i, deleted = 0;
+
+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
+ hlist_for_each_entry_safe(fle, entry, tmp,
+ &fcp->hash_table[i], u.hlist) {
+ if (flow_entry_valid(fle))
continue;
- fle->object = NULL;
- atomic_dec(fle->object_ref);
+ deleted++;
+ hlist_del(&fle->u.hlist);
+ list_add_tail(&fle->u.gc_list, &gc_list);
}
}
+ flow_cache_queue_garbage(fcp, deleted, &gc_list);
+
if (atomic_dec_and_test(&info->cpuleft))
complete(&info->completion);
}
-static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
static void flow_cache_flush_per_cpu(void *data)
{
struct flow_flush_info *info = data;
@@ -280,8 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
struct tasklet_struct *tasklet;
cpu = smp_processor_id();
-
- tasklet = flow_flush_tasklet(cpu);
+ tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
tasklet->data = (unsigned long)info;
tasklet_schedule(tasklet);
}
@@ -294,6 +341,7 @@ void flow_cache_flush(void)
/* Don't want cpus going down or up during this. */
get_online_cpus();
mutex_lock(&flow_flush_sem);
+ info.cache = &flow_cache_global;
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
@@ -307,62 +355,75 @@ void flow_cache_flush(void)
put_online_cpus();
}
-static void __init flow_cache_cpu_prepare(int cpu)
+static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
{
- struct tasklet_struct *tasklet;
- unsigned long order;
-
- for (order = 0;
- (PAGE_SIZE << order) <
- (sizeof(struct flow_cache_entry *)*flow_hash_size);
- order++)
- /* NOTHING */;
-
- flow_table(cpu) = (struct flow_cache_entry **)
- __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
- if (!flow_table(cpu))
- panic("NET: failed to allocate flow cache order %lu\n", order);
-
- flow_hash_rnd_recalc(cpu) = 1;
- flow_count(cpu) = 0;
-
- tasklet = flow_flush_tasklet(cpu);
- tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
+ fcp->hash_table = (struct hlist_head *)
+ __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
+ if (!fcp->hash_table)
+ panic("NET: failed to allocate flow cache order %lu\n", fc->order);
+
+ fcp->hash_rnd_recalc = 1;
+ fcp->hash_count = 0;
+ tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
}
static int flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
+ struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
+ int cpu = (unsigned long) hcpu;
+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
+
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
- __flow_cache_shrink((unsigned long)hcpu, 0);
+ __flow_cache_shrink(fc, fcp, 0);
return NOTIFY_OK;
}
-static int __init flow_cache_init(void)
+static int flow_cache_init(struct flow_cache *fc)
{
+ unsigned long order;
int i;
- flow_cachep = kmem_cache_create("flow_cache",
- sizeof(struct flow_cache_entry),
- 0, SLAB_PANIC,
- NULL);
- flow_hash_shift = 10;
- flow_lwm = 2 * flow_hash_size;
- flow_hwm = 4 * flow_hash_size;
+ fc->hash_shift = 10;
+ fc->low_watermark = 2 * flow_cache_hash_size(fc);
+ fc->high_watermark = 4 * flow_cache_hash_size(fc);
+
+ for (order = 0;
+ (PAGE_SIZE << order) <
+ (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
+ order++)
+ /* NOTHING */;
+ fc->order = order;
+ fc->percpu = alloc_percpu(struct flow_cache_percpu);
- setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0);
- flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
- add_timer(&flow_hash_rnd_timer);
+ setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
+ (unsigned long) fc);
+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
+ add_timer(&fc->rnd_timer);
for_each_possible_cpu(i)
- flow_cache_cpu_prepare(i);
+ flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
+
+ fc->hotcpu_notifier = (struct notifier_block){
+ .notifier_call = flow_cache_cpu,
+ };
+ register_hotcpu_notifier(&fc->hotcpu_notifier);
- hotcpu_notifier(flow_cache_cpu, 0);
return 0;
}
-module_init(flow_cache_init);
+static int __init flow_cache_init_global(void)
+{
+ flow_cachep = kmem_cache_create("flow_cache",
+ sizeof(struct flow_cache_entry),
+ 0, SLAB_PANIC, NULL);
+
+ return flow_cache_init(&flow_cache_global);
+}
+
+module_init(flow_cache_init_global);
EXPORT_SYMBOL(flow_cache_genid);
EXPORT_SYMBOL(flow_cache_lookup);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 59cfc7d8fc45..c57c4b228bb5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -17,6 +17,7 @@
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/wireless.h>
+#include <linux/vmalloc.h>
#include <net/wext.h>
#include "net-sysfs.h"
@@ -467,6 +468,304 @@ static struct attribute_group wireless_group = {
};
#endif
+#ifdef CONFIG_RPS
+/*
+ * RX queue sysfs structures and functions.
+ */
+struct rx_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr, char *buf);
+ ssize_t (*store)(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr, const char *buf, size_t len);
+};
+#define to_rx_queue_attr(_attr) container_of(_attr, \
+ struct rx_queue_attribute, attr)
+
+#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
+
+static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
+ struct netdev_rx_queue *queue = to_rx_queue(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(queue, attribute, buf);
+}
+
+static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
+ struct netdev_rx_queue *queue = to_rx_queue(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(queue, attribute, buf, count);
+}
+
+static struct sysfs_ops rx_queue_sysfs_ops = {
+ .show = rx_queue_attr_show,
+ .store = rx_queue_attr_store,
+};
+
+static ssize_t show_rps_map(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attribute, char *buf)
+{
+ struct rps_map *map;
+ cpumask_var_t mask;
+ size_t len = 0;
+ int i;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ rcu_read_lock();
+ map = rcu_dereference(queue->rps_map);
+ if (map)
+ for (i = 0; i < map->len; i++)
+ cpumask_set_cpu(map->cpus[i], mask);
+
+ len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
+ if (PAGE_SIZE - len < 3) {
+ rcu_read_unlock();
+ free_cpumask_var(mask);
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ free_cpumask_var(mask);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+static void rps_map_release(struct rcu_head *rcu)
+{
+ struct rps_map *map = container_of(rcu, struct rps_map, rcu);
+
+ kfree(map);
+}
+
+static ssize_t store_rps_map(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attribute,
+ const char *buf, size_t len)
+{
+ struct rps_map *old_map, *map;
+ cpumask_var_t mask;
+ int err, cpu, i;
+ static DEFINE_SPINLOCK(rps_map_lock);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ return err;
+ }
+
+ map = kzalloc(max_t(unsigned,
+ RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
+ GFP_KERNEL);
+ if (!map) {
+ free_cpumask_var(mask);
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_cpu_and(cpu, mask, cpu_online_mask)
+ map->cpus[i++] = cpu;
+
+ if (i)
+ map->len = i;
+ else {
+ kfree(map);
+ map = NULL;
+ }
+
+ spin_lock(&rps_map_lock);
+ old_map = queue->rps_map;
+ rcu_assign_pointer(queue->rps_map, map);
+ spin_unlock(&rps_map_lock);
+
+ if (old_map)
+ call_rcu(&old_map->rcu, rps_map_release);
+
+ free_cpumask_var(mask);
+ return len;
+}
+
+static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr,
+ char *buf)
+{
+ struct rps_dev_flow_table *flow_table;
+ unsigned int val = 0;
+
+ rcu_read_lock();
+ flow_table = rcu_dereference(queue->rps_flow_table);
+ if (flow_table)
+ val = flow_table->mask + 1;
+ rcu_read_unlock();
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static void rps_dev_flow_table_release_work(struct work_struct *work)
+{
+ struct rps_dev_flow_table *table = container_of(work,
+ struct rps_dev_flow_table, free_work);
+
+ vfree(table);
+}
+
+static void rps_dev_flow_table_release(struct rcu_head *rcu)
+{
+ struct rps_dev_flow_table *table = container_of(rcu,
+ struct rps_dev_flow_table, rcu);
+
+ INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
+ schedule_work(&table->free_work);
+}
+
+static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr,
+ const char *buf, size_t len)
+{
+ unsigned int count;
+ char *endp;
+ struct rps_dev_flow_table *table, *old_table;
+ static DEFINE_SPINLOCK(rps_dev_flow_lock);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ count = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EINVAL;
+
+ if (count) {
+ int i;
+
+ if (count > 1<<30) {
+ /* Enforce a limit to prevent overflow */
+ return -EINVAL;
+ }
+ count = roundup_pow_of_two(count);
+ table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
+ if (!table)
+ return -ENOMEM;
+
+ table->mask = count - 1;
+ for (i = 0; i < count; i++)
+ table->flows[i].cpu = RPS_NO_CPU;
+ } else
+ table = NULL;
+
+ spin_lock(&rps_dev_flow_lock);
+ old_table = queue->rps_flow_table;
+ rcu_assign_pointer(queue->rps_flow_table, table);
+ spin_unlock(&rps_dev_flow_lock);
+
+ if (old_table)
+ call_rcu(&old_table->rcu, rps_dev_flow_table_release);
+
+ return len;
+}
+
+static struct rx_queue_attribute rps_cpus_attribute =
+ __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
+
+
+static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
+ __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
+ show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
+
+static struct attribute *rx_queue_default_attrs[] = {
+ &rps_cpus_attribute.attr,
+ &rps_dev_flow_table_cnt_attribute.attr,
+ NULL
+};
+
+static void rx_queue_release(struct kobject *kobj)
+{
+ struct netdev_rx_queue *queue = to_rx_queue(kobj);
+ struct netdev_rx_queue *first = queue->first;
+
+ if (queue->rps_map)
+ call_rcu(&queue->rps_map->rcu, rps_map_release);
+
+ if (queue->rps_flow_table)
+ call_rcu(&queue->rps_flow_table->rcu,
+ rps_dev_flow_table_release);
+
+ if (atomic_dec_and_test(&first->count))
+ kfree(first);
+}
+
+static struct kobj_type rx_queue_ktype = {
+ .sysfs_ops = &rx_queue_sysfs_ops,
+ .release = rx_queue_release,
+ .default_attrs = rx_queue_default_attrs,
+};
+
+static int rx_queue_add_kobject(struct net_device *net, int index)
+{
+ struct netdev_rx_queue *queue = net->_rx + index;
+ struct kobject *kobj = &queue->kobj;
+ int error = 0;
+
+ kobj->kset = net->queues_kset;
+ error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
+ "rx-%u", index);
+ if (error) {
+ kobject_put(kobj);
+ return error;
+ }
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ return error;
+}
+
+static int rx_queue_register_kobjects(struct net_device *net)
+{
+ int i;
+ int error = 0;
+
+ net->queues_kset = kset_create_and_add("queues",
+ NULL, &net->dev.kobj);
+ if (!net->queues_kset)
+ return -ENOMEM;
+ for (i = 0; i < net->num_rx_queues; i++) {
+ error = rx_queue_add_kobject(net, i);
+ if (error)
+ break;
+ }
+
+ if (error)
+ while (--i >= 0)
+ kobject_put(&net->_rx[i].kobj);
+
+ return error;
+}
+
+static void rx_queue_remove_kobjects(struct net_device *net)
+{
+ int i;
+
+ for (i = 0; i < net->num_rx_queues; i++)
+ kobject_put(&net->_rx[i].kobj);
+ kset_unregister(net->queues_kset);
+}
+#endif /* CONFIG_RPS */
#endif /* CONFIG_SYSFS */
#ifdef CONFIG_HOTPLUG
@@ -530,6 +829,10 @@ void netdev_unregister_kobject(struct net_device * net)
if (!net_eq(dev_net(net), &init_net))
return;
+#ifdef CONFIG_RPS
+ rx_queue_remove_kobjects(net);
+#endif
+
device_del(dev);
}
@@ -538,6 +841,7 @@ int netdev_register_kobject(struct net_device *net)
{
struct device *dev = &(net->dev);
const struct attribute_group **groups = net->sysfs_groups;
+ int error = 0;
dev->class = &net_class;
dev->platform_data = net;
@@ -564,7 +868,19 @@ int netdev_register_kobject(struct net_device *net)
if (!net_eq(dev_net(net), &init_net))
return 0;
- return device_add(dev);
+ error = device_add(dev);
+ if (error)
+ return error;
+
+#ifdef CONFIG_RPS
+ error = rx_queue_register_kobjects(net);
+ if (error) {
+ device_del(dev);
+ return error;
+ }
+#endif
+
+ return error;
}
int netdev_class_create_file(struct class_attribute *class_attr)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index bd8c4712ea24..c988e685433a 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -27,6 +27,51 @@ EXPORT_SYMBOL(init_net);
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
+static void net_generic_release(struct rcu_head *rcu)
+{
+ struct net_generic *ng;
+
+ ng = container_of(rcu, struct net_generic, rcu);
+ kfree(ng);
+}
+
+static int net_assign_generic(struct net *net, int id, void *data)
+{
+ struct net_generic *ng, *old_ng;
+
+ BUG_ON(!mutex_is_locked(&net_mutex));
+ BUG_ON(id == 0);
+
+ ng = old_ng = net->gen;
+ if (old_ng->len >= id)
+ goto assign;
+
+ ng = kzalloc(sizeof(struct net_generic) +
+ id * sizeof(void *), GFP_KERNEL);
+ if (ng == NULL)
+ return -ENOMEM;
+
+ /*
+ * Some synchronisation notes:
+ *
+ * The net_generic explores the net->gen array inside rcu
+ * read section. Besides once set the net->gen->ptr[x]
+ * pointer never changes (see rules in netns/generic.h).
+ *
+ * That said, we simply duplicate this array and schedule
+ * the old copy for kfree after a grace period.
+ */
+
+ ng->len = id;
+ memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
+
+ rcu_assign_pointer(net->gen, ng);
+ call_rcu(&old_ng->rcu, net_generic_release);
+assign:
+ ng->ptr[id - 1] = data;
+ return 0;
+}
+
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
int err;
@@ -469,10 +514,10 @@ EXPORT_SYMBOL_GPL(register_pernet_subsys);
* addition run the exit method for all existing network
* namespaces.
*/
-void unregister_pernet_subsys(struct pernet_operations *module)
+void unregister_pernet_subsys(struct pernet_operations *ops)
{
mutex_lock(&net_mutex);
- unregister_pernet_operations(module);
+ unregister_pernet_operations(ops);
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
@@ -526,49 +571,3 @@ void unregister_pernet_device(struct pernet_operations *ops)
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
-
-static void net_generic_release(struct rcu_head *rcu)
-{
- struct net_generic *ng;
-
- ng = container_of(rcu, struct net_generic, rcu);
- kfree(ng);
-}
-
-int net_assign_generic(struct net *net, int id, void *data)
-{
- struct net_generic *ng, *old_ng;
-
- BUG_ON(!mutex_is_locked(&net_mutex));
- BUG_ON(id == 0);
-
- ng = old_ng = net->gen;
- if (old_ng->len >= id)
- goto assign;
-
- ng = kzalloc(sizeof(struct net_generic) +
- id * sizeof(void *), GFP_KERNEL);
- if (ng == NULL)
- return -ENOMEM;
-
- /*
- * Some synchronisation notes:
- *
- * The net_generic explores the net->gen array inside rcu
- * read section. Besides once set the net->gen->ptr[x]
- * pointer never changes (see rules in netns/generic.h).
- *
- * That said, we simply duplicate this array and schedule
- * the old copy for kfree after a grace period.
- */
-
- ng->len = id;
- memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
-
- rcu_assign_pointer(net->gen, ng);
- call_rcu(&old_ng->rcu, net_generic_release);
-assign:
- ng->ptr[id - 1] = data;
- return 0;
-}
-EXPORT_SYMBOL_GPL(net_assign_generic);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a58f59b97597..94825b109551 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -179,9 +179,8 @@ static void service_arp_queue(struct netpoll_info *npi)
}
}
-void netpoll_poll(struct netpoll *np)
+void netpoll_poll_dev(struct net_device *dev)
{
- struct net_device *dev = np->dev;
const struct net_device_ops *ops;
if (!dev || !netif_running(dev))
@@ -201,6 +200,11 @@ void netpoll_poll(struct netpoll *np)
zap_completion_queue();
}
+void netpoll_poll(struct netpoll *np)
+{
+ netpoll_poll_dev(np->dev);
+}
+
static void refill_skbs(void)
{
struct sk_buff *skb;
@@ -282,7 +286,7 @@ static int netpoll_owner_active(struct net_device *dev)
return 0;
}
-static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
int status = NETDEV_TX_BUSY;
unsigned long tries;
@@ -308,7 +312,9 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
tries > 0; --tries) {
if (__netif_tx_trylock(txq)) {
if (!netif_tx_queue_stopped(txq)) {
+ dev->priv_flags |= IFF_IN_NETPOLL;
status = ops->ndo_start_xmit(skb, dev);
+ dev->priv_flags &= ~IFF_IN_NETPOLL;
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
}
@@ -756,7 +762,10 @@ int netpoll_setup(struct netpoll *np)
atomic_inc(&npinfo->refcnt);
}
- if (!ndev->netdev_ops->ndo_poll_controller) {
+ npinfo->netpoll = np;
+
+ if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !ndev->netdev_ops->ndo_poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
err = -ENOTSUPP;
@@ -878,6 +887,7 @@ void netpoll_cleanup(struct netpoll *np)
}
if (atomic_dec_and_test(&npinfo->refcnt)) {
+ const struct net_device_ops *ops;
skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq);
cancel_rearming_delayed_work(&npinfo->tx_work);
@@ -885,7 +895,11 @@ void netpoll_cleanup(struct netpoll *np)
/* clean after last, unfinished work */
__skb_queue_purge(&npinfo->txq);
kfree(npinfo);
- np->dev->npinfo = NULL;
+ ops = np->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(np->dev);
+ else
+ np->dev->npinfo = NULL;
}
}
@@ -908,6 +922,7 @@ void netpoll_set_trap(int trap)
atomic_dec(&trapped);
}
+EXPORT_SYMBOL(netpoll_send_skb);
EXPORT_SYMBOL(netpoll_set_trap);
EXPORT_SYMBOL(netpoll_trap);
EXPORT_SYMBOL(netpoll_print_options);
@@ -915,4 +930,5 @@ EXPORT_SYMBOL(netpoll_parse_options);
EXPORT_SYMBOL(netpoll_setup);
EXPORT_SYMBOL(netpoll_cleanup);
EXPORT_SYMBOL(netpoll_send_udp);
+EXPORT_SYMBOL(netpoll_poll_dev);
EXPORT_SYMBOL(netpoll_poll);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 43923811bd6a..2ad68da418df 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -169,7 +169,7 @@
#include <asm/dma.h>
#include <asm/div64.h> /* do_div */
-#define VERSION "2.72"
+#define VERSION "2.73"
#define IP_NAME_SZ 32
#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -190,6 +190,7 @@
#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
+#define F_NODE (1<<15) /* Node memory alloc*/
/* Thread control flag bits */
#define T_STOP (1<<0) /* Stop run */
@@ -372,6 +373,7 @@ struct pktgen_dev {
u16 queue_map_min;
u16 queue_map_max;
+ int node; /* Memory node */
#ifdef CONFIG_XFRM
__u8 ipsmode; /* IPSEC mode (config) */
@@ -607,6 +609,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->traffic_class)
seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
+ if (pkt_dev->node >= 0)
+ seq_printf(seq, " node: %d\n", pkt_dev->node);
+
seq_printf(seq, " Flags: ");
if (pkt_dev->flags & F_IPV6)
@@ -660,6 +665,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->flags & F_SVID_RND)
seq_printf(seq, "SVID_RND ");
+ if (pkt_dev->flags & F_NODE)
+ seq_printf(seq, "NODE_ALLOC ");
+
seq_puts(seq, "\n");
/* not really stopped, more like last-running-at */
@@ -1074,6 +1082,21 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->dst_mac_count);
return count;
}
+ if (!strcmp(name, "node")) {
+ len = num_arg(&user_buffer[i], 10, &value);
+ if (len < 0)
+ return len;
+
+ i += len;
+
+ if (node_possible(value)) {
+ pkt_dev->node = value;
+ sprintf(pg_result, "OK: node=%d", pkt_dev->node);
+ }
+ else
+ sprintf(pg_result, "ERROR: node not possible");
+ return count;
+ }
if (!strcmp(name, "flag")) {
char f[32];
memset(f, 0, 32);
@@ -1166,12 +1189,18 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "!IPV6") == 0)
pkt_dev->flags &= ~F_IPV6;
+ else if (strcmp(f, "NODE_ALLOC") == 0)
+ pkt_dev->flags |= F_NODE;
+
+ else if (strcmp(f, "!NODE_ALLOC") == 0)
+ pkt_dev->flags &= ~F_NODE;
+
else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
f,
"IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
- "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n");
+ "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n");
return count;
}
sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
@@ -2572,9 +2601,27 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
mod_cur_headers(pkt_dev);
datalen = (odev->hard_header_len + 16) & ~0xf;
- skb = __netdev_alloc_skb(odev,
- pkt_dev->cur_pkt_size + 64
- + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
+
+ if (pkt_dev->flags & F_NODE) {
+ int node;
+
+ if (pkt_dev->node >= 0)
+ node = pkt_dev->node;
+ else
+ node = numa_node_id();
+
+ skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64
+ + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node);
+ if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
+ skb->dev = odev;
+ }
+ }
+ else
+ skb = __netdev_alloc_skb(odev,
+ pkt_dev->cur_pkt_size + 64
+ + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
+
if (!skb) {
sprintf(pkt_dev->result, "No memory");
return NULL;
@@ -3674,6 +3721,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->svlan_p = 0;
pkt_dev->svlan_cfi = 0;
pkt_dev->svlan_id = 0xffff;
+ pkt_dev->node = -1;
err = pktgen_setup_dev(pkt_dev, ifname);
if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fe776c9ddeca..23a71cb21273 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -98,7 +98,7 @@ int lockdep_rtnl_is_held(void)
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
-static struct rtnl_link *rtnl_msg_handlers[NPROTO];
+static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
static inline int rtm_msgindex(int msgtype)
{
@@ -118,7 +118,11 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
{
struct rtnl_link *tab;
- tab = rtnl_msg_handlers[protocol];
+ if (protocol <= RTNL_FAMILY_MAX)
+ tab = rtnl_msg_handlers[protocol];
+ else
+ tab = NULL;
+
if (tab == NULL || tab[msgindex].doit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
@@ -129,7 +133,11 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
{
struct rtnl_link *tab;
- tab = rtnl_msg_handlers[protocol];
+ if (protocol <= RTNL_FAMILY_MAX)
+ tab = rtnl_msg_handlers[protocol];
+ else
+ tab = NULL;
+
if (tab == NULL || tab[msgindex].dumpit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
@@ -159,7 +167,7 @@ int __rtnl_register(int protocol, int msgtype,
struct rtnl_link *tab;
int msgindex;
- BUG_ON(protocol < 0 || protocol >= NPROTO);
+ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
tab = rtnl_msg_handlers[protocol];
@@ -211,7 +219,7 @@ int rtnl_unregister(int protocol, int msgtype)
{
int msgindex;
- BUG_ON(protocol < 0 || protocol >= NPROTO);
+ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
if (rtnl_msg_handlers[protocol] == NULL)
@@ -233,7 +241,7 @@ EXPORT_SYMBOL_GPL(rtnl_unregister);
*/
void rtnl_unregister_all(int protocol)
{
- BUG_ON(protocol < 0 || protocol >= NPROTO);
+ BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
kfree(rtnl_msg_handlers[protocol]);
rtnl_msg_handlers[protocol] = NULL;
@@ -600,7 +608,41 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
a->rx_compressed = b->rx_compressed;
a->tx_compressed = b->tx_compressed;
-};
+}
+
+static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b)
+{
+ struct rtnl_link_stats64 a;
+
+ a.rx_packets = b->rx_packets;
+ a.tx_packets = b->tx_packets;
+ a.rx_bytes = b->rx_bytes;
+ a.tx_bytes = b->tx_bytes;
+ a.rx_errors = b->rx_errors;
+ a.tx_errors = b->tx_errors;
+ a.rx_dropped = b->rx_dropped;
+ a.tx_dropped = b->tx_dropped;
+
+ a.multicast = b->multicast;
+ a.collisions = b->collisions;
+
+ a.rx_length_errors = b->rx_length_errors;
+ a.rx_over_errors = b->rx_over_errors;
+ a.rx_crc_errors = b->rx_crc_errors;
+ a.rx_frame_errors = b->rx_frame_errors;
+ a.rx_fifo_errors = b->rx_fifo_errors;
+ a.rx_missed_errors = b->rx_missed_errors;
+
+ a.tx_aborted_errors = b->tx_aborted_errors;
+ a.tx_carrier_errors = b->tx_carrier_errors;
+ a.tx_fifo_errors = b->tx_fifo_errors;
+ a.tx_heartbeat_errors = b->tx_heartbeat_errors;
+ a.tx_window_errors = b->tx_window_errors;
+
+ a.rx_compressed = b->rx_compressed;
+ a.tx_compressed = b->tx_compressed;
+ memcpy(v, &a, sizeof(a));
+}
static inline int rtnl_vfinfo_size(const struct net_device *dev)
{
@@ -619,6 +661,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
+ nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+ nla_total_size(sizeof(struct rtnl_link_ifmap))
+ nla_total_size(sizeof(struct rtnl_link_stats))
+ + nla_total_size(sizeof(struct rtnl_link_stats64))
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
+ nla_total_size(4) /* IFLA_TXQLEN */
@@ -698,6 +741,12 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
stats = dev_get_stats(dev);
copy_rtnl_link_stats(nla_data(attr), stats);
+ attr = nla_reserve(skb, IFLA_STATS64,
+ sizeof(struct rtnl_link_stats64));
+ if (attr == NULL)
+ goto nla_put_failure;
+ copy_rtnl_link_stats64(nla_data(attr), stats);
+
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
int i;
struct ifla_vf_info ivi;
@@ -1336,7 +1385,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
if (s_idx == 0)
s_idx = 1;
- for (idx = 1; idx < NPROTO; idx++) {
+ for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
int type = cb->nlh->nlmsg_type-RTM_BASE;
if (idx < s_idx || idx == PF_PACKET)
continue;
@@ -1404,9 +1453,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return 0;
family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
- if (family >= NPROTO)
- return -EAFNOSUPPORT;
-
sz_idx = type>>2;
kind = type&3;
@@ -1474,6 +1520,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_POST_INIT:
case NETDEV_REGISTER:
case NETDEV_CHANGE:
+ case NETDEV_PRE_TYPE_CHANGE:
case NETDEV_GOING_DOWN:
case NETDEV_UNREGISTER:
case NETDEV_UNREGISTER_BATCH:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 93c4e060c91e..a9b0e1f77806 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -117,7 +117,7 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
*
* Out of line support code for skb_put(). Not user callable.
*/
-void skb_over_panic(struct sk_buff *skb, int sz, void *here)
+static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
{
printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
"data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -126,7 +126,6 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
-EXPORT_SYMBOL(skb_over_panic);
/**
* skb_under_panic - private function
@@ -137,7 +136,7 @@ EXPORT_SYMBOL(skb_over_panic);
* Out of line support code for skb_push(). Not user callable.
*/
-void skb_under_panic(struct sk_buff *skb, int sz, void *here)
+static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
{
printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
"data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -146,7 +145,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
-EXPORT_SYMBOL(skb_under_panic);
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
@@ -183,12 +181,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
+ prefetchw(skb);
size = SKB_DATA_ALIGN(size);
data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
gfp_mask, node);
if (!data)
goto nodata;
+ prefetchw(data + size);
/*
* Only clear those fields we need to clear, not those that we will
@@ -210,15 +210,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
- shinfo->nr_frags = 0;
- shinfo->gso_size = 0;
- shinfo->gso_segs = 0;
- shinfo->gso_type = 0;
- shinfo->ip6_frag_id = 0;
- shinfo->tx_flags.flags = 0;
- skb_frag_list_init(skb);
- memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
if (fclone) {
struct sk_buff *child = skb + 1;
@@ -507,16 +500,10 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
return 0;
skb_release_head_state(skb);
+
shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
- shinfo->nr_frags = 0;
- shinfo->gso_size = 0;
- shinfo->gso_segs = 0;
- shinfo->gso_type = 0;
- shinfo->ip6_frag_id = 0;
- shinfo->tx_flags.flags = 0;
- skb_frag_list_init(skb);
- memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->data = skb->head + NET_SKB_PAD;
@@ -534,6 +521,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->network_header = old->network_header;
new->mac_header = old->mac_header;
skb_dst_set(new, dst_clone(skb_dst(old)));
+ new->rxhash = old->rxhash;
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
@@ -581,6 +569,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
C(len);
C(data_len);
C(mac_len);
+ C(rxhash);
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
@@ -1051,7 +1040,7 @@ EXPORT_SYMBOL(skb_push);
*/
unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
- return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+ return skb_pull_inline(skb, len);
}
EXPORT_SYMBOL(skb_pull);
diff --git a/net/core/sock.c b/net/core/sock.c
index c5812bbc2cc9..94c4affdda9b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -327,6 +327,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
skb->dev = NULL;
+ if (sk_rcvqueues_full(sk, skb)) {
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+ }
if (nested)
bh_lock_sock_nested(sk);
else
@@ -364,11 +368,11 @@ EXPORT_SYMBOL(sk_reset_txq);
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
- struct dst_entry *dst = sk->sk_dst_cache;
+ struct dst_entry *dst = __sk_dst_get(sk);
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
- sk->sk_dst_cache = NULL;
+ rcu_assign_pointer(sk->sk_dst_cache, NULL);
dst_release(dst);
return NULL;
}
@@ -1157,7 +1161,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
skb_queue_head_init(&newsk->sk_async_wait_queue);
#endif
- rwlock_init(&newsk->sk_dst_lock);
+ spin_lock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock);
lockdep_set_class_and_name(&newsk->sk_callback_lock,
af_callback_keys + newsk->sk_family,
@@ -1207,7 +1211,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
- newsk->sk_sleep = NULL;
+ newsk->sk_wq = NULL;
if (newsk->sk_prot->sockets_allocated)
percpu_counter_inc(newsk->sk_prot->sockets_allocated);
@@ -1395,7 +1399,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
if (signal_pending(current))
break;
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
break;
if (sk->sk_shutdown & SEND_SHUTDOWN)
@@ -1404,7 +1408,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
break;
timeo = schedule_timeout(timeo);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return timeo;
}
@@ -1570,11 +1574,11 @@ int sk_wait_data(struct sock *sk, long *timeo)
int rc;
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return rc;
}
EXPORT_SYMBOL(sk_wait_data);
@@ -1796,41 +1800,53 @@ EXPORT_SYMBOL(sock_no_sendpage);
static void sock_def_wakeup(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_all(sk->sk_sleep);
- read_unlock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_all(&wq->wait);
+ rcu_read_unlock();
}
static void sock_def_error_report(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_poll(&wq->wait, POLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_readable(struct sock *sk, int len)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
/* Should agree with poll, otherwise some programs break */
@@ -1838,7 +1854,7 @@ static void sock_def_write_space(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_destruct(struct sock *sk)
@@ -1885,7 +1901,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
- sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
@@ -1893,12 +1908,12 @@ void sock_init_data(struct socket *sock, struct sock *sk)
if (sock) {
sk->sk_type = sock->type;
- sk->sk_sleep = &sock->wait;
+ sk->sk_wq = sock->wq;
sock->sk = sk;
} else
- sk->sk_sleep = NULL;
+ sk->sk_wq = NULL;
- rwlock_init(&sk->sk_dst_lock);
+ spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
lockdep_set_class_and_name(&sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
diff --git a/net/core/stream.c b/net/core/stream.c
index a37debfeb1b2..cc196f42b8d8 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -28,15 +28,19 @@
void sk_stream_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
+ struct socket_wq *wq;
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_poll(sk->sk_sleep, POLLOUT |
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
- if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
+ if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+ rcu_read_unlock();
}
}
@@ -66,13 +70,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
if (signal_pending(tsk))
return sock_intr_errno(*timeo_p);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk->sk_write_pending++;
done = sk_wait_event(sk, timeo_p,
!sk->sk_err &&
!((1 << sk->sk_state) &
~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
sk->sk_write_pending--;
} while (!done);
return 0;
@@ -96,13 +100,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
break;
} while (!signal_pending(current) && timeout);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
}
@@ -126,7 +130,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
while (1) {
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
@@ -157,7 +161,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
*timeo_p = current_timeo;
}
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return err;
do_error:
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index b7b6b8208f75..dcc7d25996ab 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -11,12 +11,72 @@
#include <linux/socket.h>
#include <linux/netdevice.h>
#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/ip.h>
#include <net/sock.h>
+#ifdef CONFIG_RPS
+static int rps_sock_flow_sysctl(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ unsigned int orig_size, size;
+ int ret, i;
+ ctl_table tmp = {
+ .data = &size,
+ .maxlen = sizeof(size),
+ .mode = table->mode
+ };
+ struct rps_sock_flow_table *orig_sock_table, *sock_table;
+ static DEFINE_MUTEX(sock_flow_mutex);
+
+ mutex_lock(&sock_flow_mutex);
+
+ orig_sock_table = rps_sock_flow_table;
+ size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
+
+ ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+
+ if (write) {
+ if (size) {
+ if (size > 1<<30) {
+ /* Enforce limit to prevent overflow */
+ mutex_unlock(&sock_flow_mutex);
+ return -EINVAL;
+ }
+ size = roundup_pow_of_two(size);
+ if (size != orig_size) {
+ sock_table =
+ vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size));
+ if (!sock_table) {
+ mutex_unlock(&sock_flow_mutex);
+ return -ENOMEM;
+ }
+
+ sock_table->mask = size - 1;
+ } else
+ sock_table = orig_sock_table;
+
+ for (i = 0; i < size; i++)
+ sock_table->ents[i] = RPS_NO_CPU;
+ } else
+ sock_table = NULL;
+
+ if (sock_table != orig_sock_table) {
+ rcu_assign_pointer(rps_sock_flow_table, sock_table);
+ synchronize_rcu();
+ vfree(orig_sock_table);
+ }
+ }
+
+ mutex_unlock(&sock_flow_mutex);
+
+ return ret;
+}
+#endif /* CONFIG_RPS */
+
static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
{
@@ -82,6 +142,14 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+#ifdef CONFIG_RPS
+ {
+ .procname = "rps_sock_flow_entries",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = rps_sock_flow_sysctl
+ },
+#endif
#endif /* CONFIG_NET */
{
.procname = "netdev_budget",
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index bcd7632299f5..d3235899c7e3 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -208,7 +208,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
goto restart_timer;
}
- ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
+ ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
ccid3_tx_state_name(hc->tx_state));
if (hc->tx_state == TFRC_SSTATE_FBACK)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5ef32c2f0d6a..a10a61a1ded2 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -189,7 +189,7 @@ enum {
#define DCCP_MIB_MAX __DCCP_MIB_MAX
struct dccp_mib {
unsigned long mibs[DCCP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
+};
DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
@@ -223,7 +223,7 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb)
skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
}
-extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
+extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
extern int dccp_retransmit_skb(struct sock *sk);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 9ec717426024..58f7bc156850 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -415,7 +415,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
dp->dccps_awl, dp->dccps_awh)) {
dccp_pr_debug("invalid ackno: S.AWL=%llu, "
- "P.ackno=%llu, S.AWH=%llu \n",
+ "P.ackno=%llu, S.AWH=%llu\n",
(unsigned long long)dp->dccps_awl,
(unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
(unsigned long long)dp->dccps_awh);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 52ffa1cde15a..d9b11ef8694c 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -349,7 +349,7 @@ static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
}
-void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb)
+void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
const struct inet_sock *inet = inet_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 3b11e41a2929..091698899594 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -60,8 +60,7 @@ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
}
-static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
- struct sk_buff *skb)
+static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
@@ -293,7 +292,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
&ireq6->loc_addr,
&ireq6->rmt_addr);
ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
- err = ip6_xmit(sk, skb, &fl, opt, 0);
+ err = ip6_xmit(sk, skb, &fl, opt);
err = net_xmit_eval(err);
}
@@ -348,7 +347,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
skb_dst_set(skb, dst);
- ip6_xmit(ctl_sk, skb, &fl, NULL, 0);
+ ip6_xmit(ctl_sk, skb, &fl, NULL);
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
return;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index fc3f436440b4..aadbdb58758b 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -129,14 +129,14 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
break;
}
- icsk->icsk_af_ops->send_check(sk, 0, skb);
+ icsk->icsk_af_ops->send_check(sk, skb);
if (set_ack)
dccp_event_ack_sent(sk);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
- err = icsk->icsk_af_ops->queue_xmit(skb, 0);
+ err = icsk->icsk_af_ops->queue_xmit(skb);
return net_xmit_eval(err);
}
return -ENOBUFS;
@@ -195,15 +195,17 @@ EXPORT_SYMBOL_GPL(dccp_sync_mss);
void dccp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk->sk_sleep);
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/**
@@ -225,7 +227,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
dccp_pr_debug("delayed send by %d msec\n", delay);
jiffdelay = msecs_to_jiffies(delay);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk->sk_write_pending++;
release_sock(sk);
@@ -241,7 +243,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
} while ((delay = rc) > 0);
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return rc;
do_error:
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a0e38d8018f5..b03ecf6b2bb0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -312,7 +312,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
unsigned int mask;
struct sock *sk = sock->sk;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index bbfeb5eae46a..1a9aa05d4dc4 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk)
if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
if (icsk->icsk_retransmits != 0)
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ?
: sysctl_dccp_request_retries;
} else {
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk)
Golden words :-).
*/
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
}
retry_until = sysctl_dccp_retries2;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2b494fac9468..d6b93d19790f 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -446,7 +446,7 @@ static void dn_destruct(struct sock *sk)
skb_queue_purge(&scp->other_xmit_queue);
skb_queue_purge(&scp->other_receive_queue);
- dst_release(xchg(&sk->sk_dst_cache, NULL));
+ dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
}
static int dn_memory_pressure;
@@ -832,7 +832,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
dn_send_conn_conf(sk, allocation);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
if (scp->state == DN_CC)
@@ -850,9 +850,9 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err == 0) {
sk->sk_socket->state = SS_CONNECTED;
} else if (scp->state != DN_CC) {
@@ -873,7 +873,7 @@ static int dn_wait_run(struct sock *sk, long *timeo)
if (!*timeo)
return -EALREADY;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
if (scp->state == DN_CI || scp->state == DN_CC)
@@ -891,9 +891,9 @@ static int dn_wait_run(struct sock *sk, long *timeo)
err = -ETIMEDOUT;
if (!*timeo)
break;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
out:
if (err == 0) {
sk->sk_socket->state = SS_CONNECTED;
@@ -1040,7 +1040,7 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
struct sk_buff *skb = NULL;
int err = 0;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
skb = skb_dequeue(&sk->sk_receive_queue);
@@ -1060,9 +1060,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return skb == NULL ? ERR_PTR(err) : skb;
}
@@ -1105,7 +1105,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
release_sock(sk);
dst = skb_dst(skb);
- dst_release(xchg(&newsk->sk_dst_cache, dst));
+ sk_dst_set(newsk, dst);
skb_dst_set(skb, NULL);
DN_SK(newsk)->state = DN_CR;
@@ -1746,11 +1746,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
skb_queue_walk_safe(queue, skb, n) {
@@ -1956,7 +1956,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
}
if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
mss = scp->segsize_rem;
fctype = scp->services_rem & NSP_FC_MASK;
@@ -2003,12 +2003,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
sk_wait_event(sk, &timeo,
!dn_queue_too_long(scp, queue, flags));
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
continue;
}
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index cead68eb254c..615dbe3b43f9 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -350,7 +350,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de
if (dn_db->dev->type == ARPHRD_ETHER) {
if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
dn_dn2eth(mac_addr, ifa1->ifa_local);
- dev_mc_delete(dev, mac_addr, ETH_ALEN, 0);
+ dev_mc_del(dev, mac_addr);
}
}
@@ -381,7 +381,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
if (dev->type == ARPHRD_ETHER) {
if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
dn_dn2eth(mac_addr, ifa->ifa_local);
- dev_mc_add(dev, mac_addr, ETH_ALEN, 0);
+ dev_mc_add(dev, mac_addr);
}
}
@@ -1001,9 +1001,9 @@ static int dn_eth_up(struct net_device *dev)
struct dn_dev *dn_db = dev->dn_ptr;
if (dn_db->parms.forwarding == 0)
- dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
+ dev_mc_add(dev, dn_rt_all_end_mcast);
else
- dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
+ dev_mc_add(dev, dn_rt_all_rt_mcast);
dn_db->use_long = 1;
@@ -1015,9 +1015,9 @@ static void dn_eth_down(struct net_device *dev)
struct dn_dev *dn_db = dev->dn_ptr;
if (dn_db->parms.forwarding == 0)
- dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
+ dev_mc_del(dev, dn_rt_all_end_mcast);
else
- dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
+ dev_mc_del(dev, dn_rt_all_rt_mcast);
}
static void dn_dev_set_timer(struct net_device *dev);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index deb723dba44b..0363bb95cc7d 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -266,7 +266,8 @@ static int dn_long_output(struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
+ neigh->dev, dn_neigh_output_packet);
}
static int dn_short_output(struct sk_buff *skb)
@@ -305,7 +306,8 @@ static int dn_short_output(struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
+ neigh->dev, dn_neigh_output_packet);
}
/*
@@ -347,7 +349,8 @@ static int dn_phase3_output(struct sk_buff *skb)
skb_reset_network_header(skb);
- return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL,
+ neigh->dev, dn_neigh_output_packet);
}
/*
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 25a37299bc65..b430549e2b91 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -810,7 +810,8 @@ free_out:
int dn_nsp_rx(struct sk_buff *skb)
{
- return NF_HOOK(PF_DECnet, NF_DN_LOCAL_IN, skb, skb->dev, NULL, dn_nsp_rx_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL,
+ dn_nsp_rx_packet);
}
/*
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 70ebe74027d5..a8432e399545 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -518,7 +518,8 @@ static int dn_route_rx_long(struct sk_buff *skb)
ptr++;
cb->hops = *ptr++; /* Visit Count */
- return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
+ dn_route_rx_packet);
drop_it:
kfree_skb(skb);
@@ -544,7 +545,8 @@ static int dn_route_rx_short(struct sk_buff *skb)
ptr += 2;
cb->hops = *ptr & 0x3f;
- return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
+ dn_route_rx_packet);
drop_it:
kfree_skb(skb);
@@ -646,16 +648,24 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
switch(flags & DN_RT_CNTL_MSK) {
case DN_RT_PKT_HELO:
- return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_route_ptp_hello);
case DN_RT_PKT_L1RT:
case DN_RT_PKT_L2RT:
- return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
+ skb, skb->dev, NULL,
+ dn_route_discard);
case DN_RT_PKT_ERTH:
- return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_neigh_router_hello);
case DN_RT_PKT_EEDH:
- return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_neigh_endnode_hello);
}
} else {
if (dn->parms.state != DN_DEV_S_RU)
@@ -704,7 +714,8 @@ static int dn_output(struct sk_buff *skb)
cb->rt_flags |= DN_RT_F_IE;
cb->hops = 0;
- return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
+ neigh->output);
error:
if (net_ratelimit())
@@ -753,7 +764,8 @@ static int dn_forward(struct sk_buff *skb)
if (rt->rt_flags & RTCF_DOREDIRECT)
cb->rt_flags |= DN_RT_F_IE;
- return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output);
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
+ neigh->output);
drop:
kfree_skb(skb);
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 7466c546f286..48fdf10be7a1 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -196,7 +196,6 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
{
struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
- frh->family = AF_DECnet;
frh->dst_len = r->dst_len;
frh->src_len = r->src_len;
frh->tos = 0;
@@ -212,29 +211,12 @@ nla_put_failure:
return -ENOBUFS;
}
-static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops)
-{
- struct list_head *pos;
- struct fib_rule *rule;
-
- if (!list_empty(&dn_fib_rules_ops->rules_list)) {
- pos = dn_fib_rules_ops->rules_list.next;
- if (pos->next != &dn_fib_rules_ops->rules_list) {
- rule = list_entry(pos->next, struct fib_rule, list);
- if (rule->pref)
- return rule->pref - 1;
- }
- }
-
- return 0;
-}
-
static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
{
dn_rt_cache_flush(-1);
}
-static struct fib_rules_ops dn_fib_rules_ops_template = {
+static const struct fib_rules_ops __net_initdata dn_fib_rules_ops_template = {
.family = AF_DECnet,
.rule_size = sizeof(struct dn_fib_rule),
.addr_size = sizeof(u16),
@@ -243,7 +225,7 @@ static struct fib_rules_ops dn_fib_rules_ops_template = {
.configure = dn_fib_rule_configure,
.compare = dn_fib_rule_compare,
.fill = dn_fib_rule_fill,
- .default_pref = dn_fib_rule_default_pref,
+ .default_pref = fib_default_rule_pref,
.flush_cache = dn_fib_rule_flush_cache,
.nlgroup = RTNLGRP_DECnet_RULE,
.policy = dn_fib_rule_policy,
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 2175e6d5cc8d..8fdca56bb08f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev)
return -ENETDOWN;
if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
- err = dev_unicast_add(master, dev->dev_addr);
+ err = dev_uc_add(master, dev->dev_addr);
if (err < 0)
goto out;
}
@@ -90,7 +90,7 @@ clear_allmulti:
dev_set_allmulti(master, -1);
del_unicast:
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
- dev_unicast_delete(master, dev->dev_addr);
+ dev_uc_del(master, dev->dev_addr);
out:
return err;
}
@@ -101,14 +101,14 @@ static int dsa_slave_close(struct net_device *dev)
struct net_device *master = p->parent->dst->master_netdev;
dev_mc_unsync(master, dev);
- dev_unicast_unsync(master, dev);
+ dev_uc_unsync(master, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(master, -1);
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(master, -1);
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
- dev_unicast_delete(master, dev->dev_addr);
+ dev_uc_del(master, dev->dev_addr);
return 0;
}
@@ -130,7 +130,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev)
struct net_device *master = p->parent->dst->master_netdev;
dev_mc_sync(master, dev);
- dev_unicast_sync(master, dev);
+ dev_uc_sync(master, dev);
}
static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
goto out;
if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
- err = dev_unicast_add(master, addr->sa_data);
+ err = dev_uc_add(master, addr->sa_data);
if (err < 0)
return err;
}
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
- dev_unicast_delete(master, dev->dev_addr);
+ dev_uc_del(master, dev->dev_addr);
out:
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 205a1c12f3c0..61ec0329316c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -136,7 +136,7 @@ int eth_rebuild_header(struct sk_buff *skb)
default:
printk(KERN_DEBUG
"%s: unable to resolve type %X addresses.\n",
- dev->name, (int)eth->h_proto);
+ dev->name, ntohs(eth->h_proto));
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
break;
@@ -162,7 +162,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- skb_pull(skb, ETH_HLEN);
+ skb_pull_inline(skb, ETH_HLEN);
eth = eth_hdr(skb);
if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 0c94a1ac2946..8e3a1fd938ab 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -250,6 +250,20 @@ config IP_MROUTE
<file:Documentation/networking/multicast.txt>. If you haven't heard
about it, you don't need it.
+config IP_MROUTE_MULTIPLE_TABLES
+ bool "IP: multicast policy routing"
+ depends on IP_MROUTE && IP_ADVANCED_ROUTER
+ select FIB_RULES
+ help
+ Normally, a multicast router runs a userspace daemon and decides
+ what to do with a multicast packet based on the source and
+ destination addresses. If you say Y here, the multicast router
+ will also be able to take interfaces and packet marks into
+ account and run multiple instances of userspace daemons
+ simultaneously, each one handling a single table.
+
+ If unsure, say N.
+
config IP_PIMSM_V1
bool "IP: PIM-SM version 1 support"
depends on IP_MROUTE
@@ -587,9 +601,15 @@ choice
config DEFAULT_HTCP
bool "Htcp" if TCP_CONG_HTCP=y
+ config DEFAULT_HYBLA
+ bool "Hybla" if TCP_CONG_HYBLA=y
+
config DEFAULT_VEGAS
bool "Vegas" if TCP_CONG_VEGAS=y
+ config DEFAULT_VENO
+ bool "Veno" if TCP_CONG_VENO=y
+
config DEFAULT_WESTWOOD
bool "Westwood" if TCP_CONG_WESTWOOD=y
@@ -610,8 +630,10 @@ config DEFAULT_TCP_CONG
default "bic" if DEFAULT_BIC
default "cubic" if DEFAULT_CUBIC
default "htcp" if DEFAULT_HTCP
+ default "hybla" if DEFAULT_HYBLA
default "vegas" if DEFAULT_VEGAS
default "westwood" if DEFAULT_WESTWOOD
+ default "veno" if DEFAULT_VENO
default "reno" if DEFAULT_RENO
default "cubic"
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f71357422380..c6c43bcd1c6f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
WARN_ON(sk->sk_forward_alloc);
kfree(inet->opt);
- dst_release(sk->sk_dst_cache);
+ dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -419,6 +419,8 @@ int inet_release(struct socket *sock)
if (sk) {
long timeout;
+ sock_rps_reset_flow(sk);
+
/* Applications forget to leave groups before exiting */
ip_mc_drop_socket(sk);
@@ -546,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -559,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return timeo;
}
@@ -720,6 +722,8 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
+ sock_rps_record_flow(sk);
+
/* We may need to bind the socket. */
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
return -EAGAIN;
@@ -728,12 +732,13 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
}
EXPORT_SYMBOL(inet_sendmsg);
-
static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
{
struct sock *sk = sock->sk;
+ sock_rps_record_flow(sk);
+
/* We may need to bind the socket. */
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
return -EAGAIN;
@@ -743,6 +748,22 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
return sock_no_sendpage(sock, page, offset, size, flags);
}
+int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ int addr_len = 0;
+ int err;
+
+ sock_rps_record_flow(sk);
+
+ err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ flags & ~MSG_DONTWAIT, &addr_len);
+ if (err >= 0)
+ msg->msg_namelen = addr_len;
+ return err;
+}
+EXPORT_SYMBOL(inet_recvmsg);
int inet_shutdown(struct socket *sock, int how)
{
@@ -872,7 +893,7 @@ const struct proto_ops inet_stream_ops = {
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = tcp_sendmsg,
- .recvmsg = sock_common_recvmsg,
+ .recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = tcp_sendpage,
.splice_read = tcp_splice_read,
@@ -899,7 +920,7 @@ const struct proto_ops inet_dgram_ops = {
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
- .recvmsg = sock_common_recvmsg,
+ .recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
@@ -929,7 +950,7 @@ static const struct proto_ops inet_sockraw_ops = {
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
- .recvmsg = sock_common_recvmsg,
+ .recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
@@ -1302,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto out_unlock;
- id = ntohl(*(u32 *)&iph->id);
- flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
+ id = ntohl(*(__be32 *)&iph->id);
+ flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
id >>= 16;
for (p = *head; p; p = p->next) {
@@ -1316,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
if ((iph->protocol ^ iph2->protocol) |
(iph->tos ^ iph2->tos) |
- (iph->saddr ^ iph2->saddr) |
- (iph->daddr ^ iph2->daddr)) {
+ ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
+ ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
@@ -1407,10 +1428,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field);
int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
{
BUG_ON(ptr == NULL);
- ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
+ ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long));
if (!ptr[0])
goto err0;
- ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
+ ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long));
if (!ptr[1])
goto err1;
return 0;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 6e747065c202..80769f1f9fab 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -661,13 +661,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
#endif
#endif
-#ifdef CONFIG_FDDI
+#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
case ARPHRD_FDDI:
arp->ar_hrd = htons(ARPHRD_ETHER);
arp->ar_pro = htons(ETH_P_IP);
break;
#endif
-#ifdef CONFIG_TR
+#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
case ARPHRD_IEEE802_TR:
arp->ar_hrd = htons(ARPHRD_IEEE802);
arp->ar_pro = htons(ETH_P_IP);
@@ -1051,7 +1051,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
return -EINVAL;
}
switch (dev->type) {
-#ifdef CONFIG_FDDI
+#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
case ARPHRD_FDDI:
/*
* According to RFC 1390, FDDI devices should accept ARP
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 90e3d6379a42..382bc768ed56 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1096,10 +1096,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
case NETDEV_DOWN:
ip_mc_down(in_dev);
break;
- case NETDEV_BONDING_OLDTYPE:
+ case NETDEV_PRE_TYPE_CHANGE:
ip_mc_unmap(in_dev);
break;
- case NETDEV_BONDING_NEWTYPE:
+ case NETDEV_POST_TYPE_CHANGE:
ip_mc_remap(in_dev);
break;
case NETDEV_CHANGEMTU:
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index ca2d07b1c706..76daeb5ff564 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -213,7 +213,6 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
{
struct fib4_rule *rule4 = (struct fib4_rule *) rule;
- frh->family = AF_INET;
frh->dst_len = rule4->dst_len;
frh->src_len = rule4->src_len;
frh->tos = rule4->tos;
@@ -234,23 +233,6 @@ nla_put_failure:
return -ENOBUFS;
}
-static u32 fib4_rule_default_pref(struct fib_rules_ops *ops)
-{
- struct list_head *pos;
- struct fib_rule *rule;
-
- if (!list_empty(&ops->rules_list)) {
- pos = ops->rules_list.next;
- if (pos->next != &ops->rules_list) {
- rule = list_entry(pos->next, struct fib_rule, list);
- if (rule->pref)
- return rule->pref - 1;
- }
- }
-
- return 0;
-}
-
static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(4) /* dst */
@@ -263,7 +245,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
rt_cache_flush(ops->fro_net, -1);
}
-static struct fib_rules_ops fib4_rules_ops_template = {
+static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
.family = AF_INET,
.rule_size = sizeof(struct fib4_rule),
.addr_size = sizeof(u32),
@@ -272,7 +254,7 @@ static struct fib_rules_ops fib4_rules_ops_template = {
.configure = fib4_rule_configure,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
- .default_pref = fib4_rule_default_pref,
+ .default_pref = fib_default_rule_pref,
.nlmsg_payload = fib4_rule_nlmsg_payload,
.flush_cache = fib4_rule_flush_cache,
.nlgroup = RTNLGRP_IPV4_RULE,
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ac4dec132735..f3d339f728b0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -331,9 +331,10 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
if (ip_append_data(sk, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
- ipc, rt, MSG_DONTWAIT) < 0)
+ ipc, rt, MSG_DONTWAIT) < 0) {
+ ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
ip_flush_pending_frames(sk);
- else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
+ } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb);
__wsum csum = 0;
struct sk_buff *skb1;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 15d3eeda92f5..5fff865a4fa7 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -998,7 +998,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
--ANK
*/
if (arp_mc_map(addr, buf, dev, 0) == 0)
- dev_mc_add(dev, buf, dev->addr_len, 0);
+ dev_mc_add(dev, buf);
}
/*
@@ -1011,7 +1011,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
struct net_device *dev = in_dev->dev;
if (arp_mc_map(addr, buf, dev, 0) == 0)
- dev_mc_delete(dev, buf, dev->addr_len, 0);
+ dev_mc_del(dev, buf);
}
#ifdef CONFIG_IP_MULTICAST
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8da6429269dd..e0a3e3537b14 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -234,7 +234,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
- prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
@@ -253,7 +253,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
if (!timeo)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return err;
}
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index af10942b326c..56cdf68a074c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -112,8 +112,8 @@ int ip_forward(struct sk_buff *skb)
skb->priority = rt_tos2priority(iph->tos);
- return NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev,
- ip_forward_finish);
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
+ rt->u.dst.dev, ip_forward_finish);
sr_failed:
/*
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index f8ab7a380d4a..af76de5f76de 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -266,7 +266,7 @@ int ip_local_deliver(struct sk_buff *skb)
return 0;
}
- return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
ip_local_deliver_finish);
}
@@ -444,7 +444,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
- return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip_rcv_finish);
inhdr_error:
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d1bcc9f21d4f..252897443ef9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -96,8 +96,8 @@ int __ip_local_out(struct sk_buff *skb)
iph->tot_len = htons(skb->len);
ip_send_check(iph);
- return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
- dst_output);
+ return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
+ skb_dst(skb)->dev, dst_output);
}
int ip_local_out(struct sk_buff *skb)
@@ -272,8 +272,8 @@ int ip_mc_output(struct sk_buff *skb)
) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
- NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb,
- NULL, newskb->dev,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+ newskb, NULL, newskb->dev,
ip_dev_loopback_xmit);
}
@@ -288,12 +288,12 @@ int ip_mc_output(struct sk_buff *skb)
if (rt->rt_flags&RTCF_BROADCAST) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
- NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, NULL,
- newskb->dev, ip_dev_loopback_xmit);
+ NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
+ NULL, newskb->dev, ip_dev_loopback_xmit);
}
- return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
- ip_finish_output,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
+ skb->dev, ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
@@ -306,12 +306,12 @@ int ip_output(struct sk_buff *skb)
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
- return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, dev,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
-int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
+int ip_queue_xmit(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
@@ -370,7 +370,7 @@ packet_routed:
skb_reset_network_header(skb);
iph = ip_hdr(skb);
*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
- if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
+ if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
@@ -469,6 +469,10 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
hlen = iph->ihl * 4;
mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge)
+ mtu -= nf_bridge_mtu_reduction(skb);
+#endif
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
/* When frag_list is given, use it. First, check its validity:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 1e64dabbd232..ce231780a2b1 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -287,12 +287,8 @@ int ip_ra_control(struct sock *sk, unsigned char on,
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
- struct inet_sock *inet = inet_sk(sk);
struct sock_exterr_skb *serr;
- if (!inet->recverr)
- return;
-
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
@@ -958,6 +954,22 @@ e_inval:
return -EINVAL;
}
+/**
+ * ip_queue_rcv_skb - Queue an skb into sock receive queue
+ * @sk: socket
+ * @skb: buffer
+ *
+ * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
+ * is not set, we drop skb dst entry now, while dst cache line is hot.
+ */
+int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
+ skb_dst_drop(skb);
+ return sock_queue_rcv_skb(sk, skb);
+}
+EXPORT_SYMBOL(ip_queue_rcv_skb);
+
int ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 067ce9e043dc..b9d84e800cf4 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -976,7 +976,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
/* Is it a reply for the device we are configuring? */
if (b->xid != ic_dev_xid) {
if (net_ratelimit())
- printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n");
+ printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n");
goto drop_unlock;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9d4f6d1340a4..f4d9746fa671 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -22,7 +22,7 @@
* overflow.
* Carlos Picoto : PIMv1 Support
* Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
- * Relax this requrement to work with older peers.
+ * Relax this requirement to work with older peers.
*
*/
@@ -63,11 +63,40 @@
#include <net/ipip.h>
#include <net/checksum.h>
#include <net/netlink.h>
+#include <net/fib_rules.h>
#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
#define CONFIG_IP_PIMSM 1
#endif
+struct mr_table {
+ struct list_head list;
+#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
+ u32 id;
+ struct sock *mroute_sk;
+ struct timer_list ipmr_expire_timer;
+ struct list_head mfc_unres_queue;
+ struct list_head mfc_cache_array[MFC_LINES];
+ struct vif_device vif_table[MAXVIFS];
+ int maxvif;
+ atomic_t cache_resolve_queue_len;
+ int mroute_do_assert;
+ int mroute_do_pim;
+#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
+ int mroute_reg_vif_num;
+#endif
+};
+
+struct ipmr_rule {
+ struct fib_rule common;
+};
+
+struct ipmr_result {
+ struct mr_table *mrt;
+};
+
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
*/
@@ -78,9 +107,7 @@ static DEFINE_RWLOCK(mrt_lock);
* Multicast router control variables
*/
-#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL)
-
-static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
+#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -95,12 +122,215 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
-static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
-static int ipmr_cache_report(struct net *net,
+static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static int ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local);
+static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
-static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
+static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mfc_cache *c, struct rtmsg *rtm);
+static void ipmr_expire_process(unsigned long arg);
+
+#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+#define ipmr_for_each_table(mrt, net) \
+ list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
+
+static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+{
+ struct mr_table *mrt;
+
+ ipmr_for_each_table(mrt, net) {
+ if (mrt->id == id)
+ return mrt;
+ }
+ return NULL;
+}
+
+static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
+ struct mr_table **mrt)
+{
+ struct ipmr_result res;
+ struct fib_lookup_arg arg = { .result = &res, };
+ int err;
+
+ err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
+ if (err < 0)
+ return err;
+ *mrt = res.mrt;
+ return 0;
+}
+
+static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
+ int flags, struct fib_lookup_arg *arg)
+{
+ struct ipmr_result *res = arg->result;
+ struct mr_table *mrt;
+
+ switch (rule->action) {
+ case FR_ACT_TO_TBL:
+ break;
+ case FR_ACT_UNREACHABLE:
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
+ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+ return -EINVAL;
+ }
+
+ mrt = ipmr_get_table(rule->fr_net, rule->table);
+ if (mrt == NULL)
+ return -EAGAIN;
+ res->mrt = mrt;
+ return 0;
+}
+
+static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
+{
+ return 1;
+}
-static struct timer_list ipmr_expire_timer;
+static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
+ FRA_GENERIC_POLICY,
+};
+
+static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
+ struct fib_rule_hdr *frh, struct nlattr **tb)
+{
+ return 0;
+}
+
+static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
+ struct nlattr **tb)
+{
+ return 1;
+}
+
+static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
+ struct fib_rule_hdr *frh)
+{
+ frh->dst_len = 0;
+ frh->src_len = 0;
+ frh->tos = 0;
+ return 0;
+}
+
+static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
+ .family = RTNL_FAMILY_IPMR,
+ .rule_size = sizeof(struct ipmr_rule),
+ .addr_size = sizeof(u32),
+ .action = ipmr_rule_action,
+ .match = ipmr_rule_match,
+ .configure = ipmr_rule_configure,
+ .compare = ipmr_rule_compare,
+ .default_pref = fib_default_rule_pref,
+ .fill = ipmr_rule_fill,
+ .nlgroup = RTNLGRP_IPV4_RULE,
+ .policy = ipmr_rule_policy,
+ .owner = THIS_MODULE,
+};
+
+static int __net_init ipmr_rules_init(struct net *net)
+{
+ struct fib_rules_ops *ops;
+ struct mr_table *mrt;
+ int err;
+
+ ops = fib_rules_register(&ipmr_rules_ops_template, net);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ INIT_LIST_HEAD(&net->ipv4.mr_tables);
+
+ mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
+ if (mrt == NULL) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
+ if (err < 0)
+ goto err2;
+
+ net->ipv4.mr_rules_ops = ops;
+ return 0;
+
+err2:
+ kfree(mrt);
+err1:
+ fib_rules_unregister(ops);
+ return err;
+}
+
+static void __net_exit ipmr_rules_exit(struct net *net)
+{
+ struct mr_table *mrt, *next;
+
+ list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
+ kfree(mrt);
+ fib_rules_unregister(net->ipv4.mr_rules_ops);
+}
+#else
+#define ipmr_for_each_table(mrt, net) \
+ for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
+
+static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+{
+ return net->ipv4.mrt;
+}
+
+static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
+ struct mr_table **mrt)
+{
+ *mrt = net->ipv4.mrt;
+ return 0;
+}
+
+static int __net_init ipmr_rules_init(struct net *net)
+{
+ net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
+ return net->ipv4.mrt ? 0 : -ENOMEM;
+}
+
+static void __net_exit ipmr_rules_exit(struct net *net)
+{
+ kfree(net->ipv4.mrt);
+}
+#endif
+
+static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+{
+ struct mr_table *mrt;
+ unsigned int i;
+
+ mrt = ipmr_get_table(net, id);
+ if (mrt != NULL)
+ return mrt;
+
+ mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
+ if (mrt == NULL)
+ return NULL;
+ write_pnet(&mrt->net, net);
+ mrt->id = id;
+
+ /* Forwarding cache */
+ for (i = 0; i < MFC_LINES; i++)
+ INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
+
+ INIT_LIST_HEAD(&mrt->mfc_unres_queue);
+
+ setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
+ (unsigned long)mrt);
+
+#ifdef CONFIG_IP_PIMSM
+ mrt->mroute_reg_vif_num = -1;
+#endif
+#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+ list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
+#endif
+ return mrt;
+}
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
@@ -201,12 +431,22 @@ failure:
static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net *net = dev_net(dev);
+ struct mr_table *mrt;
+ struct flowi fl = {
+ .oif = dev->ifindex,
+ .iif = skb->skb_iif,
+ .mark = skb->mark,
+ };
+ int err;
+
+ err = ipmr_fib_lookup(net, &fl, &mrt);
+ if (err < 0)
+ return err;
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
- ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num,
- IGMPMSG_WHOLEPKT);
+ ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
read_unlock(&mrt_lock);
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -226,12 +466,18 @@ static void reg_vif_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
}
-static struct net_device *ipmr_reg_vif(struct net *net)
+static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
{
struct net_device *dev;
struct in_device *in_dev;
+ char name[IFNAMSIZ];
- dev = alloc_netdev(0, "pimreg", reg_vif_setup);
+ if (mrt->id == RT_TABLE_DEFAULT)
+ sprintf(name, "pimreg");
+ else
+ sprintf(name, "pimreg%u", mrt->id);
+
+ dev = alloc_netdev(0, name, reg_vif_setup);
if (dev == NULL)
return NULL;
@@ -276,17 +522,17 @@ failure:
* @notify: Set to 1, if the caller is a notifier_call
*/
-static int vif_delete(struct net *net, int vifi, int notify,
+static int vif_delete(struct mr_table *mrt, int vifi, int notify,
struct list_head *head)
{
struct vif_device *v;
struct net_device *dev;
struct in_device *in_dev;
- if (vifi < 0 || vifi >= net->ipv4.maxvif)
+ if (vifi < 0 || vifi >= mrt->maxvif)
return -EADDRNOTAVAIL;
- v = &net->ipv4.vif_table[vifi];
+ v = &mrt->vif_table[vifi];
write_lock_bh(&mrt_lock);
dev = v->dev;
@@ -298,17 +544,17 @@ static int vif_delete(struct net *net, int vifi, int notify,
}
#ifdef CONFIG_IP_PIMSM
- if (vifi == net->ipv4.mroute_reg_vif_num)
- net->ipv4.mroute_reg_vif_num = -1;
+ if (vifi == mrt->mroute_reg_vif_num)
+ mrt->mroute_reg_vif_num = -1;
#endif
- if (vifi+1 == net->ipv4.maxvif) {
+ if (vifi+1 == mrt->maxvif) {
int tmp;
for (tmp=vifi-1; tmp>=0; tmp--) {
- if (VIF_EXISTS(net, tmp))
+ if (VIF_EXISTS(mrt, tmp))
break;
}
- net->ipv4.maxvif = tmp+1;
+ mrt->maxvif = tmp+1;
}
write_unlock_bh(&mrt_lock);
@@ -329,7 +575,6 @@ static int vif_delete(struct net *net, int vifi, int notify,
static inline void ipmr_cache_free(struct mfc_cache *c)
{
- release_net(mfc_net(c));
kmem_cache_free(mrt_cachep, c);
}
@@ -337,13 +582,13 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
and reporting error to netlink readers.
*/
-static void ipmr_destroy_unres(struct mfc_cache *c)
+static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
{
+ struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
struct nlmsgerr *e;
- struct net *net = mfc_net(c);
- atomic_dec(&net->ipv4.cache_resolve_queue_len);
+ atomic_dec(&mrt->cache_resolve_queue_len);
while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
@@ -364,42 +609,40 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
}
-/* Single timer process for all the unresolved queue. */
+/* Timer process for the unresolved queue. */
-static void ipmr_expire_process(unsigned long dummy)
+static void ipmr_expire_process(unsigned long arg)
{
+ struct mr_table *mrt = (struct mr_table *)arg;
unsigned long now;
unsigned long expires;
- struct mfc_cache *c, **cp;
+ struct mfc_cache *c, *next;
if (!spin_trylock(&mfc_unres_lock)) {
- mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
+ mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
return;
}
- if (mfc_unres_queue == NULL)
+ if (list_empty(&mrt->mfc_unres_queue))
goto out;
now = jiffies;
expires = 10*HZ;
- cp = &mfc_unres_queue;
- while ((c=*cp) != NULL) {
+ list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
if (time_after(c->mfc_un.unres.expires, now)) {
unsigned long interval = c->mfc_un.unres.expires - now;
if (interval < expires)
expires = interval;
- cp = &c->next;
continue;
}
- *cp = c->next;
-
- ipmr_destroy_unres(c);
+ list_del(&c->list);
+ ipmr_destroy_unres(mrt, c);
}
- if (mfc_unres_queue != NULL)
- mod_timer(&ipmr_expire_timer, jiffies + expires);
+ if (!list_empty(&mrt->mfc_unres_queue))
+ mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
out:
spin_unlock(&mfc_unres_lock);
@@ -407,17 +650,17 @@ out:
/* Fill oifs list. It is called under write locked mrt_lock. */
-static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
+static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
+ unsigned char *ttls)
{
int vifi;
- struct net *net = mfc_net(cache);
cache->mfc_un.res.minvif = MAXVIFS;
cache->mfc_un.res.maxvif = 0;
memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
- for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) {
- if (VIF_EXISTS(net, vifi) &&
+ for (vifi = 0; vifi < mrt->maxvif; vifi++) {
+ if (VIF_EXISTS(mrt, vifi) &&
ttls[vifi] && ttls[vifi] < 255) {
cache->mfc_un.res.ttls[vifi] = ttls[vifi];
if (cache->mfc_un.res.minvif > vifi)
@@ -428,16 +671,17 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
}
}
-static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
+static int vif_add(struct net *net, struct mr_table *mrt,
+ struct vifctl *vifc, int mrtsock)
{
int vifi = vifc->vifc_vifi;
- struct vif_device *v = &net->ipv4.vif_table[vifi];
+ struct vif_device *v = &mrt->vif_table[vifi];
struct net_device *dev;
struct in_device *in_dev;
int err;
/* Is vif busy ? */
- if (VIF_EXISTS(net, vifi))
+ if (VIF_EXISTS(mrt, vifi))
return -EADDRINUSE;
switch (vifc->vifc_flags) {
@@ -447,9 +691,9 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
* Special Purpose VIF in PIM
* All the packets will be sent to the daemon
*/
- if (net->ipv4.mroute_reg_vif_num >= 0)
+ if (mrt->mroute_reg_vif_num >= 0)
return -EADDRINUSE;
- dev = ipmr_reg_vif(net);
+ dev = ipmr_reg_vif(net, mrt);
if (!dev)
return -ENOBUFS;
err = dev_set_allmulti(dev, 1);
@@ -525,49 +769,47 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
v->dev = dev;
#ifdef CONFIG_IP_PIMSM
if (v->flags&VIFF_REGISTER)
- net->ipv4.mroute_reg_vif_num = vifi;
+ mrt->mroute_reg_vif_num = vifi;
#endif
- if (vifi+1 > net->ipv4.maxvif)
- net->ipv4.maxvif = vifi+1;
+ if (vifi+1 > mrt->maxvif)
+ mrt->maxvif = vifi+1;
write_unlock_bh(&mrt_lock);
return 0;
}
-static struct mfc_cache *ipmr_cache_find(struct net *net,
+static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
__be32 origin,
__be32 mcastgrp)
{
int line = MFC_HASH(mcastgrp, origin);
struct mfc_cache *c;
- for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) {
- if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
- break;
+ list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
+ if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
+ return c;
}
- return c;
+ return NULL;
}
/*
* Allocate a multicast cache entry
*/
-static struct mfc_cache *ipmr_cache_alloc(struct net *net)
+static struct mfc_cache *ipmr_cache_alloc(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (c == NULL)
return NULL;
c->mfc_un.res.minvif = MAXVIFS;
- mfc_net_set(c, net);
return c;
}
-static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
+static struct mfc_cache *ipmr_cache_alloc_unres(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
if (c == NULL)
return NULL;
skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10*HZ;
- mfc_net_set(c, net);
return c;
}
@@ -575,7 +817,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
* A cache entry has gone into a resolved state from queued
*/
-static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
+static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
+ struct mfc_cache *uc, struct mfc_cache *c)
{
struct sk_buff *skb;
struct nlmsgerr *e;
@@ -588,7 +831,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
- if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
+ if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
nlh->nlmsg_len = (skb_tail_pointer(skb) -
(u8 *)nlh);
} else {
@@ -600,9 +843,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
memset(&e->msg, 0, sizeof(e->msg));
}
- rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid);
+ rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
} else
- ip_mr_forward(skb, c, 0);
+ ip_mr_forward(net, mrt, skb, c, 0);
}
}
@@ -613,7 +856,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
* Called under mrt_lock.
*/
-static int ipmr_cache_report(struct net *net,
+static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert)
{
struct sk_buff *skb;
@@ -646,7 +889,7 @@ static int ipmr_cache_report(struct net *net,
memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
msg->im_msgtype = IGMPMSG_WHOLEPKT;
msg->im_mbz = 0;
- msg->im_vif = net->ipv4.mroute_reg_vif_num;
+ msg->im_vif = mrt->mroute_reg_vif_num;
ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
sizeof(struct iphdr));
@@ -678,7 +921,7 @@ static int ipmr_cache_report(struct net *net,
skb->transport_header = skb->network_header;
}
- if (net->ipv4.mroute_sk == NULL) {
+ if (mrt->mroute_sk == NULL) {
kfree_skb(skb);
return -EINVAL;
}
@@ -686,7 +929,7 @@ static int ipmr_cache_report(struct net *net,
/*
* Deliver to mrouted
*/
- ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb);
+ ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
if (ret < 0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
@@ -701,27 +944,29 @@ static int ipmr_cache_report(struct net *net,
*/
static int
-ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
+ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
{
+ bool found = false;
int err;
struct mfc_cache *c;
const struct iphdr *iph = ip_hdr(skb);
spin_lock_bh(&mfc_unres_lock);
- for (c=mfc_unres_queue; c; c=c->next) {
- if (net_eq(mfc_net(c), net) &&
- c->mfc_mcastgrp == iph->daddr &&
- c->mfc_origin == iph->saddr)
+ list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
+ if (c->mfc_mcastgrp == iph->daddr &&
+ c->mfc_origin == iph->saddr) {
+ found = true;
break;
+ }
}
- if (c == NULL) {
+ if (!found) {
/*
* Create a new entry if allowable
*/
- if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 ||
- (c = ipmr_cache_alloc_unres(net)) == NULL) {
+ if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
+ (c = ipmr_cache_alloc_unres()) == NULL) {
spin_unlock_bh(&mfc_unres_lock);
kfree_skb(skb);
@@ -738,7 +983,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
/*
* Reflect first query at mrouted.
*/
- err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE);
+ err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
if (err < 0) {
/* If the report failed throw the cache entry
out - Brad Parker
@@ -750,11 +995,11 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
return err;
}
- atomic_inc(&net->ipv4.cache_resolve_queue_len);
- c->next = mfc_unres_queue;
- mfc_unres_queue = c;
+ atomic_inc(&mrt->cache_resolve_queue_len);
+ list_add(&c->list, &mrt->mfc_unres_queue);
- mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
+ if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
+ mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
}
/*
@@ -776,19 +1021,18 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
* MFC cache manipulation by user space mroute daemon
*/
-static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
+static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
{
int line;
- struct mfc_cache *c, **cp;
+ struct mfc_cache *c, *next;
line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
- for (cp = &net->ipv4.mfc_cache_array[line];
- (c = *cp) != NULL; cp = &c->next) {
+ list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
write_lock_bh(&mrt_lock);
- *cp = c->next;
+ list_del(&c->list);
write_unlock_bh(&mrt_lock);
ipmr_cache_free(c);
@@ -798,27 +1042,30 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
return -ENOENT;
}
-static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
+static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
+ struct mfcctl *mfc, int mrtsock)
{
+ bool found = false;
int line;
- struct mfc_cache *uc, *c, **cp;
+ struct mfc_cache *uc, *c;
if (mfc->mfcc_parent >= MAXVIFS)
return -ENFILE;
line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
- for (cp = &net->ipv4.mfc_cache_array[line];
- (c = *cp) != NULL; cp = &c->next) {
+ list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
- c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
+ c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
+ found = true;
break;
+ }
}
- if (c != NULL) {
+ if (found) {
write_lock_bh(&mrt_lock);
c->mfc_parent = mfc->mfcc_parent;
- ipmr_update_thresholds(c, mfc->mfcc_ttls);
+ ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
write_unlock_bh(&mrt_lock);
@@ -828,43 +1075,42 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
return -EINVAL;
- c = ipmr_cache_alloc(net);
+ c = ipmr_cache_alloc();
if (c == NULL)
return -ENOMEM;
c->mfc_origin = mfc->mfcc_origin.s_addr;
c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
c->mfc_parent = mfc->mfcc_parent;
- ipmr_update_thresholds(c, mfc->mfcc_ttls);
+ ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
write_lock_bh(&mrt_lock);
- c->next = net->ipv4.mfc_cache_array[line];
- net->ipv4.mfc_cache_array[line] = c;
+ list_add(&c->list, &mrt->mfc_cache_array[line]);
write_unlock_bh(&mrt_lock);
/*
* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
+ found = false;
spin_lock_bh(&mfc_unres_lock);
- for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
- cp = &uc->next) {
- if (net_eq(mfc_net(uc), net) &&
- uc->mfc_origin == c->mfc_origin &&
+ list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
+ if (uc->mfc_origin == c->mfc_origin &&
uc->mfc_mcastgrp == c->mfc_mcastgrp) {
- *cp = uc->next;
- atomic_dec(&net->ipv4.cache_resolve_queue_len);
+ list_del(&uc->list);
+ atomic_dec(&mrt->cache_resolve_queue_len);
+ found = true;
break;
}
}
- if (mfc_unres_queue == NULL)
- del_timer(&ipmr_expire_timer);
+ if (list_empty(&mrt->mfc_unres_queue))
+ del_timer(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
- if (uc) {
- ipmr_cache_resolve(uc, c);
+ if (found) {
+ ipmr_cache_resolve(net, mrt, uc, c);
ipmr_cache_free(uc);
}
return 0;
@@ -874,53 +1120,41 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
* Close the multicast socket, and clear the vif tables etc
*/
-static void mroute_clean_tables(struct net *net)
+static void mroute_clean_tables(struct mr_table *mrt)
{
int i;
LIST_HEAD(list);
+ struct mfc_cache *c, *next;
/*
* Shut down all active vif entries
*/
- for (i = 0; i < net->ipv4.maxvif; i++) {
- if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC))
- vif_delete(net, i, 0, &list);
+ for (i = 0; i < mrt->maxvif; i++) {
+ if (!(mrt->vif_table[i].flags&VIFF_STATIC))
+ vif_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
/*
* Wipe the cache
*/
- for (i=0; i<MFC_LINES; i++) {
- struct mfc_cache *c, **cp;
-
- cp = &net->ipv4.mfc_cache_array[i];
- while ((c = *cp) != NULL) {
- if (c->mfc_flags&MFC_STATIC) {
- cp = &c->next;
+ for (i = 0; i < MFC_LINES; i++) {
+ list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
+ if (c->mfc_flags&MFC_STATIC)
continue;
- }
write_lock_bh(&mrt_lock);
- *cp = c->next;
+ list_del(&c->list);
write_unlock_bh(&mrt_lock);
ipmr_cache_free(c);
}
}
- if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) {
- struct mfc_cache *c, **cp;
-
+ if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
spin_lock_bh(&mfc_unres_lock);
- cp = &mfc_unres_queue;
- while ((c = *cp) != NULL) {
- if (!net_eq(mfc_net(c), net)) {
- cp = &c->next;
- continue;
- }
- *cp = c->next;
-
- ipmr_destroy_unres(c);
+ list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
+ list_del(&c->list);
+ ipmr_destroy_unres(mrt, c);
}
spin_unlock_bh(&mfc_unres_lock);
}
@@ -929,16 +1163,19 @@ static void mroute_clean_tables(struct net *net)
static void mrtsock_destruct(struct sock *sk)
{
struct net *net = sock_net(sk);
+ struct mr_table *mrt;
rtnl_lock();
- if (sk == net->ipv4.mroute_sk) {
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
+ ipmr_for_each_table(mrt, net) {
+ if (sk == mrt->mroute_sk) {
+ IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
- write_lock_bh(&mrt_lock);
- net->ipv4.mroute_sk = NULL;
- write_unlock_bh(&mrt_lock);
+ write_lock_bh(&mrt_lock);
+ mrt->mroute_sk = NULL;
+ write_unlock_bh(&mrt_lock);
- mroute_clean_tables(net);
+ mroute_clean_tables(mrt);
+ }
}
rtnl_unlock();
}
@@ -956,9 +1193,14 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
struct vifctl vif;
struct mfcctl mfc;
struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
if (optname != MRT_INIT) {
- if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
+ if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
return -EACCES;
}
@@ -971,7 +1213,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENOPROTOOPT;
rtnl_lock();
- if (net->ipv4.mroute_sk) {
+ if (mrt->mroute_sk) {
rtnl_unlock();
return -EADDRINUSE;
}
@@ -979,7 +1221,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
write_lock_bh(&mrt_lock);
- net->ipv4.mroute_sk = sk;
+ mrt->mroute_sk = sk;
write_unlock_bh(&mrt_lock);
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
@@ -987,7 +1229,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk != net->ipv4.mroute_sk)
+ if (sk != mrt->mroute_sk)
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
@@ -1000,9 +1242,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENFILE;
rtnl_lock();
if (optname == MRT_ADD_VIF) {
- ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk);
+ ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
} else {
- ret = vif_delete(net, vif.vifc_vifi, 0, NULL);
+ ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
}
rtnl_unlock();
return ret;
@@ -1019,9 +1261,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -EFAULT;
rtnl_lock();
if (optname == MRT_DEL_MFC)
- ret = ipmr_mfc_delete(net, &mfc);
+ ret = ipmr_mfc_delete(mrt, &mfc);
else
- ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk);
+ ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
rtnl_unlock();
return ret;
/*
@@ -1032,7 +1274,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
int v;
if (get_user(v,(int __user *)optval))
return -EFAULT;
- net->ipv4.mroute_do_assert = (v) ? 1 : 0;
+ mrt->mroute_do_assert = (v) ? 1 : 0;
return 0;
}
#ifdef CONFIG_IP_PIMSM
@@ -1046,14 +1288,35 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
rtnl_lock();
ret = 0;
- if (v != net->ipv4.mroute_do_pim) {
- net->ipv4.mroute_do_pim = v;
- net->ipv4.mroute_do_assert = v;
+ if (v != mrt->mroute_do_pim) {
+ mrt->mroute_do_pim = v;
+ mrt->mroute_do_assert = v;
}
rtnl_unlock();
return ret;
}
#endif
+#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+ case MRT_TABLE:
+ {
+ u32 v;
+
+ if (optlen != sizeof(u32))
+ return -EINVAL;
+ if (get_user(v, (u32 __user *)optval))
+ return -EFAULT;
+ if (sk == mrt->mroute_sk)
+ return -EBUSY;
+
+ rtnl_lock();
+ ret = 0;
+ if (!ipmr_new_table(net, v))
+ ret = -ENOMEM;
+ raw_sk(sk)->ipmr_table = v;
+ rtnl_unlock();
+ return ret;
+ }
+#endif
/*
* Spurious command, or MRT_VERSION which you cannot
* set.
@@ -1072,6 +1335,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
int olr;
int val;
struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
if (optname != MRT_VERSION &&
#ifdef CONFIG_IP_PIMSM
@@ -1093,10 +1361,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
val = 0x0305;
#ifdef CONFIG_IP_PIMSM
else if (optname == MRT_PIM)
- val = net->ipv4.mroute_do_pim;
+ val = mrt->mroute_do_pim;
#endif
else
- val = net->ipv4.mroute_do_assert;
+ val = mrt->mroute_do_assert;
if (copy_to_user(optval, &val, olr))
return -EFAULT;
return 0;
@@ -1113,16 +1381,21 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
struct vif_device *vif;
struct mfc_cache *c;
struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
switch (cmd) {
case SIOCGETVIFCNT:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
- if (vr.vifi >= net->ipv4.maxvif)
+ if (vr.vifi >= mrt->maxvif)
return -EINVAL;
read_lock(&mrt_lock);
- vif = &net->ipv4.vif_table[vr.vifi];
- if (VIF_EXISTS(net, vr.vifi)) {
+ vif = &mrt->vif_table[vr.vifi];
+ if (VIF_EXISTS(mrt, vr.vifi)) {
vr.icount = vif->pkt_in;
vr.ocount = vif->pkt_out;
vr.ibytes = vif->bytes_in;
@@ -1140,7 +1413,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
return -EFAULT;
read_lock(&mrt_lock);
- c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr);
+ c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
@@ -1163,16 +1436,20 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
{
struct net_device *dev = ptr;
struct net *net = dev_net(dev);
+ struct mr_table *mrt;
struct vif_device *v;
int ct;
LIST_HEAD(list);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- v = &net->ipv4.vif_table[0];
- for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) {
- if (v->dev == dev)
- vif_delete(net, ct, 1, &list);
+
+ ipmr_for_each_table(mrt, net) {
+ v = &mrt->vif_table[0];
+ for (ct = 0; ct < mrt->maxvif; ct++, v++) {
+ if (v->dev == dev)
+ vif_delete(mrt, ct, 1, &list);
+ }
}
unregister_netdevice_many(&list);
return NOTIFY_DONE;
@@ -1231,11 +1508,11 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
* Processing handlers for ipmr_forward
*/
-static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
+static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *c, int vifi)
{
- struct net *net = mfc_net(c);
const struct iphdr *iph = ip_hdr(skb);
- struct vif_device *vif = &net->ipv4.vif_table[vifi];
+ struct vif_device *vif = &mrt->vif_table[vifi];
struct net_device *dev;
struct rtable *rt;
int encap = 0;
@@ -1249,7 +1526,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
vif->bytes_out += skb->len;
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT);
+ ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
goto out_free;
}
#endif
@@ -1323,7 +1600,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
* not mrouter) cannot join to more than one interface - it will
* result in receiving multiple packets.
*/
- NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
ipmr_forward_finish);
return;
@@ -1332,12 +1609,12 @@ out_free:
return;
}
-static int ipmr_find_vif(struct net_device *dev)
+static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
{
- struct net *net = dev_net(dev);
int ct;
- for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) {
- if (net->ipv4.vif_table[ct].dev == dev)
+
+ for (ct = mrt->maxvif-1; ct >= 0; ct--) {
+ if (mrt->vif_table[ct].dev == dev)
break;
}
return ct;
@@ -1345,11 +1622,12 @@ static int ipmr_find_vif(struct net_device *dev)
/* "local" means that we should preserve one skb (for local delivery) */
-static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
+static int ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local)
{
int psend = -1;
int vif, ct;
- struct net *net = mfc_net(cache);
vif = cache->mfc_parent;
cache->mfc_un.res.pkt++;
@@ -1358,7 +1636,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
- if (net->ipv4.vif_table[vif].dev != skb->dev) {
+ if (mrt->vif_table[vif].dev != skb->dev) {
int true_vifi;
if (skb_rtable(skb)->fl.iif == 0) {
@@ -1377,26 +1655,26 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
}
cache->mfc_un.res.wrong_if++;
- true_vifi = ipmr_find_vif(skb->dev);
+ true_vifi = ipmr_find_vif(mrt, skb->dev);
- if (true_vifi >= 0 && net->ipv4.mroute_do_assert &&
+ if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
so that we cannot check that packet arrived on an oif.
It is bad, but otherwise we would need to move pretty
large chunk of pimd to kernel. Ough... --ANK
*/
- (net->ipv4.mroute_do_pim ||
+ (mrt->mroute_do_pim ||
cache->mfc_un.res.ttls[true_vifi] < 255) &&
time_after(jiffies,
cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
cache->mfc_un.res.last_assert = jiffies;
- ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF);
+ ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
}
goto dont_forward;
}
- net->ipv4.vif_table[vif].pkt_in++;
- net->ipv4.vif_table[vif].bytes_in += skb->len;
+ mrt->vif_table[vif].pkt_in++;
+ mrt->vif_table[vif].bytes_in += skb->len;
/*
* Forward the frame
@@ -1406,7 +1684,8 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ipmr_queue_xmit(skb2, cache, psend);
+ ipmr_queue_xmit(net, mrt, skb2, cache,
+ psend);
}
psend = ct;
}
@@ -1415,9 +1694,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ipmr_queue_xmit(skb2, cache, psend);
+ ipmr_queue_xmit(net, mrt, skb2, cache, psend);
} else {
- ipmr_queue_xmit(skb, cache, psend);
+ ipmr_queue_xmit(net, mrt, skb, cache, psend);
return 0;
}
}
@@ -1438,6 +1717,8 @@ int ip_mr_input(struct sk_buff *skb)
struct mfc_cache *cache;
struct net *net = dev_net(skb->dev);
int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
+ struct mr_table *mrt;
+ int err;
/* Packet is looped back after forward, it should not be
forwarded second time, but still can be delivered locally.
@@ -1445,6 +1726,10 @@ int ip_mr_input(struct sk_buff *skb)
if (IPCB(skb)->flags&IPSKB_FORWARDED)
goto dont_forward;
+ err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
+ if (err < 0)
+ return err;
+
if (!local) {
if (IPCB(skb)->opt.router_alert) {
if (ip_call_ra_chain(skb))
@@ -1457,9 +1742,9 @@ int ip_mr_input(struct sk_buff *skb)
that we can forward NO IGMP messages.
*/
read_lock(&mrt_lock);
- if (net->ipv4.mroute_sk) {
+ if (mrt->mroute_sk) {
nf_reset(skb);
- raw_rcv(net->ipv4.mroute_sk, skb);
+ raw_rcv(mrt->mroute_sk, skb);
read_unlock(&mrt_lock);
return 0;
}
@@ -1468,7 +1753,7 @@ int ip_mr_input(struct sk_buff *skb)
}
read_lock(&mrt_lock);
- cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+ cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
/*
* No usable cache entry
@@ -1486,19 +1771,19 @@ int ip_mr_input(struct sk_buff *skb)
skb = skb2;
}
- vif = ipmr_find_vif(skb->dev);
+ vif = ipmr_find_vif(mrt, skb->dev);
if (vif >= 0) {
- int err = ipmr_cache_unresolved(net, vif, skb);
+ int err2 = ipmr_cache_unresolved(mrt, vif, skb);
read_unlock(&mrt_lock);
- return err;
+ return err2;
}
read_unlock(&mrt_lock);
kfree_skb(skb);
return -ENODEV;
}
- ip_mr_forward(skb, cache, local);
+ ip_mr_forward(net, mrt, skb, cache, local);
read_unlock(&mrt_lock);
@@ -1515,11 +1800,11 @@ dont_forward:
}
#ifdef CONFIG_IP_PIMSM
-static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
+static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
+ unsigned int pimlen)
{
struct net_device *reg_dev = NULL;
struct iphdr *encap;
- struct net *net = dev_net(skb->dev);
encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/*
@@ -1534,8 +1819,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
return 1;
read_lock(&mrt_lock);
- if (net->ipv4.mroute_reg_vif_num >= 0)
- reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev;
+ if (mrt->mroute_reg_vif_num >= 0)
+ reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
if (reg_dev)
dev_hold(reg_dev);
read_unlock(&mrt_lock);
@@ -1570,17 +1855,21 @@ int pim_rcv_v1(struct sk_buff * skb)
{
struct igmphdr *pim;
struct net *net = dev_net(skb->dev);
+ struct mr_table *mrt;
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
goto drop;
pim = igmp_hdr(skb);
- if (!net->ipv4.mroute_do_pim ||
+ if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+ goto drop;
+
+ if (!mrt->mroute_do_pim ||
pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
goto drop;
- if (__pim_rcv(skb, sizeof(*pim))) {
+ if (__pim_rcv(mrt, skb, sizeof(*pim))) {
drop:
kfree_skb(skb);
}
@@ -1592,6 +1881,8 @@ drop:
static int pim_rcv(struct sk_buff * skb)
{
struct pimreghdr *pim;
+ struct net *net = dev_net(skb->dev);
+ struct mr_table *mrt;
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
goto drop;
@@ -1603,7 +1894,10 @@ static int pim_rcv(struct sk_buff * skb)
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
- if (__pim_rcv(skb, sizeof(*pim))) {
+ if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+ goto drop;
+
+ if (__pim_rcv(mrt, skb, sizeof(*pim))) {
drop:
kfree_skb(skb);
}
@@ -1611,12 +1905,11 @@ drop:
}
#endif
-static int
-ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
+static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mfc_cache *c, struct rtmsg *rtm)
{
int ct;
struct rtnexthop *nhp;
- struct net *net = mfc_net(c);
u8 *b = skb_tail_pointer(skb);
struct rtattr *mp_head;
@@ -1624,19 +1917,19 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
if (c->mfc_parent > MAXVIFS)
return -ENOENT;
- if (VIF_EXISTS(net, c->mfc_parent))
- RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex);
+ if (VIF_EXISTS(mrt, c->mfc_parent))
+ RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
- if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
+ if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
goto rtattr_failure;
nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
- nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex;
+ nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
@@ -1654,11 +1947,16 @@ int ipmr_get_route(struct net *net,
struct sk_buff *skb, struct rtmsg *rtm, int nowait)
{
int err;
+ struct mr_table *mrt;
struct mfc_cache *cache;
struct rtable *rt = skb_rtable(skb);
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
+
read_lock(&mrt_lock);
- cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst);
+ cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
if (cache == NULL) {
struct sk_buff *skb2;
@@ -1672,7 +1970,7 @@ int ipmr_get_route(struct net *net,
}
dev = skb->dev;
- if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
+ if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
read_unlock(&mrt_lock);
return -ENODEV;
}
@@ -1689,24 +1987,107 @@ int ipmr_get_route(struct net *net,
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
iph->version = 0;
- err = ipmr_cache_unresolved(net, vif, skb2);
+ err = ipmr_cache_unresolved(mrt, vif, skb2);
read_unlock(&mrt_lock);
return err;
}
if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
cache->mfc_flags |= MFC_NOTIFY;
- err = ipmr_fill_mroute(skb, cache, rtm);
+ err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
read_unlock(&mrt_lock);
return err;
}
+static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ u32 pid, u32 seq, struct mfc_cache *c)
+{
+ struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
+
+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ rtm = nlmsg_data(nlh);
+ rtm->rtm_family = RTNL_FAMILY_IPMR;
+ rtm->rtm_dst_len = 32;
+ rtm->rtm_src_len = 32;
+ rtm->rtm_tos = 0;
+ rtm->rtm_table = mrt->id;
+ NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+ rtm->rtm_type = RTN_MULTICAST;
+ rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+ rtm->rtm_protocol = RTPROT_UNSPEC;
+ rtm->rtm_flags = 0;
+
+ NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
+ NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
+
+ if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct mr_table *mrt;
+ struct mfc_cache *mfc;
+ unsigned int t = 0, s_t;
+ unsigned int h = 0, s_h;
+ unsigned int e = 0, s_e;
+
+ s_t = cb->args[0];
+ s_h = cb->args[1];
+ s_e = cb->args[2];
+
+ read_lock(&mrt_lock);
+ ipmr_for_each_table(mrt, net) {
+ if (t < s_t)
+ goto next_table;
+ if (t > s_t)
+ s_h = 0;
+ for (h = s_h; h < MFC_LINES; h++) {
+ list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) {
+ if (e < s_e)
+ goto next_entry;
+ if (ipmr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ mfc) < 0)
+ goto done;
+next_entry:
+ e++;
+ }
+ e = s_e = 0;
+ }
+ s_h = 0;
+next_table:
+ t++;
+ }
+done:
+ read_unlock(&mrt_lock);
+
+ cb->args[2] = e;
+ cb->args[1] = h;
+ cb->args[0] = t;
+
+ return skb->len;
+}
+
#ifdef CONFIG_PROC_FS
/*
* The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
*/
struct ipmr_vif_iter {
struct seq_net_private p;
+ struct mr_table *mrt;
int ct;
};
@@ -1714,11 +2095,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
struct ipmr_vif_iter *iter,
loff_t pos)
{
- for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) {
- if (!VIF_EXISTS(net, iter->ct))
+ struct mr_table *mrt = iter->mrt;
+
+ for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
+ if (!VIF_EXISTS(mrt, iter->ct))
continue;
if (pos-- == 0)
- return &net->ipv4.vif_table[iter->ct];
+ return &mrt->vif_table[iter->ct];
}
return NULL;
}
@@ -1726,7 +2109,15 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(mrt_lock)
{
+ struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return ERR_PTR(-ENOENT);
+
+ iter->mrt = mrt;
read_lock(&mrt_lock);
return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
@@ -1737,15 +2128,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr_table *mrt = iter->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ipmr_vif_seq_idx(net, iter, 0);
- while (++iter->ct < net->ipv4.maxvif) {
- if (!VIF_EXISTS(net, iter->ct))
+ while (++iter->ct < mrt->maxvif) {
+ if (!VIF_EXISTS(mrt, iter->ct))
continue;
- return &net->ipv4.vif_table[iter->ct];
+ return &mrt->vif_table[iter->ct];
}
return NULL;
}
@@ -1758,7 +2150,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
{
- struct net *net = seq_file_net(seq);
+ struct ipmr_vif_iter *iter = seq->private;
+ struct mr_table *mrt = iter->mrt;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -1769,7 +2162,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
- vif - net->ipv4.vif_table,
+ vif - mrt->vif_table,
name, vif->bytes_in, vif->pkt_in,
vif->bytes_out, vif->pkt_out,
vif->flags, vif->local, vif->remote);
@@ -1800,7 +2193,8 @@ static const struct file_operations ipmr_vif_fops = {
struct ipmr_mfc_iter {
struct seq_net_private p;
- struct mfc_cache **cache;
+ struct mr_table *mrt;
+ struct list_head *cache;
int ct;
};
@@ -1808,22 +2202,22 @@ struct ipmr_mfc_iter {
static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
struct ipmr_mfc_iter *it, loff_t pos)
{
+ struct mr_table *mrt = it->mrt;
struct mfc_cache *mfc;
- it->cache = net->ipv4.mfc_cache_array;
read_lock(&mrt_lock);
- for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
- for (mfc = net->ipv4.mfc_cache_array[it->ct];
- mfc; mfc = mfc->next)
+ for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
+ it->cache = &mrt->mfc_cache_array[it->ct];
+ list_for_each_entry(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
+ }
read_unlock(&mrt_lock);
- it->cache = &mfc_unres_queue;
spin_lock_bh(&mfc_unres_lock);
- for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
- if (net_eq(mfc_net(mfc), net) &&
- pos-- == 0)
+ it->cache = &mrt->mfc_unres_queue;
+ list_for_each_entry(mfc, it->cache, list)
+ if (pos-- == 0)
return mfc;
spin_unlock_bh(&mfc_unres_lock);
@@ -1836,7 +2230,13 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr_table *mrt;
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return ERR_PTR(-ENOENT);
+
+ it->mrt = mrt;
it->cache = NULL;
it->ct = 0;
return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
@@ -1848,37 +2248,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct mfc_cache *mfc = v;
struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
+ struct mr_table *mrt = it->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ipmr_mfc_seq_idx(net, seq->private, 0);
- if (mfc->next)
- return mfc->next;
+ if (mfc->list.next != it->cache)
+ return list_entry(mfc->list.next, struct mfc_cache, list);
- if (it->cache == &mfc_unres_queue)
+ if (it->cache == &mrt->mfc_unres_queue)
goto end_of_list;
- BUG_ON(it->cache != net->ipv4.mfc_cache_array);
+ BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
while (++it->ct < MFC_LINES) {
- mfc = net->ipv4.mfc_cache_array[it->ct];
- if (mfc)
- return mfc;
+ it->cache = &mrt->mfc_cache_array[it->ct];
+ if (list_empty(it->cache))
+ continue;
+ return list_first_entry(it->cache, struct mfc_cache, list);
}
/* exhausted cache_array, show unresolved */
read_unlock(&mrt_lock);
- it->cache = &mfc_unres_queue;
+ it->cache = &mrt->mfc_unres_queue;
it->ct = 0;
spin_lock_bh(&mfc_unres_lock);
- mfc = mfc_unres_queue;
- while (mfc && !net_eq(mfc_net(mfc), net))
- mfc = mfc->next;
- if (mfc)
- return mfc;
+ if (!list_empty(it->cache))
+ return list_first_entry(it->cache, struct mfc_cache, list);
end_of_list:
spin_unlock_bh(&mfc_unres_lock);
@@ -1890,18 +2289,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
{
struct ipmr_mfc_iter *it = seq->private;
- struct net *net = seq_file_net(seq);
+ struct mr_table *mrt = it->mrt;
- if (it->cache == &mfc_unres_queue)
+ if (it->cache == &mrt->mfc_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == net->ipv4.mfc_cache_array)
+ else if (it->cache == &mrt->mfc_cache_array[it->ct])
read_unlock(&mrt_lock);
}
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
{
int n;
- struct net *net = seq_file_net(seq);
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
@@ -1909,20 +2307,21 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
} else {
const struct mfc_cache *mfc = v;
const struct ipmr_mfc_iter *it = seq->private;
+ const struct mr_table *mrt = it->mrt;
- seq_printf(seq, "%08lX %08lX %-3hd",
- (unsigned long) mfc->mfc_mcastgrp,
- (unsigned long) mfc->mfc_origin,
+ seq_printf(seq, "%08X %08X %-3hd",
+ (__force u32) mfc->mfc_mcastgrp,
+ (__force u32) mfc->mfc_origin,
mfc->mfc_parent);
- if (it->cache != &mfc_unres_queue) {
+ if (it->cache != &mrt->mfc_unres_queue) {
seq_printf(seq, " %8lu %8lu %8lu",
mfc->mfc_un.res.pkt,
mfc->mfc_un.res.bytes,
mfc->mfc_un.res.wrong_if);
for (n = mfc->mfc_un.res.minvif;
n < mfc->mfc_un.res.maxvif; n++ ) {
- if (VIF_EXISTS(net, n) &&
+ if (VIF_EXISTS(mrt, n) &&
mfc->mfc_un.res.ttls[n] < 255)
seq_printf(seq,
" %2d:%-3d",
@@ -1974,27 +2373,11 @@ static const struct net_protocol pim_protocol = {
*/
static int __net_init ipmr_net_init(struct net *net)
{
- int err = 0;
+ int err;
- net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device),
- GFP_KERNEL);
- if (!net->ipv4.vif_table) {
- err = -ENOMEM;
+ err = ipmr_rules_init(net);
+ if (err < 0)
goto fail;
- }
-
- /* Forwarding cache */
- net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
- sizeof(struct mfc_cache *),
- GFP_KERNEL);
- if (!net->ipv4.mfc_cache_array) {
- err = -ENOMEM;
- goto fail_mfc_cache;
- }
-
-#ifdef CONFIG_IP_PIMSM
- net->ipv4.mroute_reg_vif_num = -1;
-#endif
#ifdef CONFIG_PROC_FS
err = -ENOMEM;
@@ -2009,10 +2392,8 @@ static int __net_init ipmr_net_init(struct net *net)
proc_cache_fail:
proc_net_remove(net, "ip_mr_vif");
proc_vif_fail:
- kfree(net->ipv4.mfc_cache_array);
+ ipmr_rules_exit(net);
#endif
-fail_mfc_cache:
- kfree(net->ipv4.vif_table);
fail:
return err;
}
@@ -2023,8 +2404,7 @@ static void __net_exit ipmr_net_exit(struct net *net)
proc_net_remove(net, "ip_mr_cache");
proc_net_remove(net, "ip_mr_vif");
#endif
- kfree(net->ipv4.mfc_cache_array);
- kfree(net->ipv4.vif_table);
+ ipmr_rules_exit(net);
}
static struct pernet_operations ipmr_net_ops = {
@@ -2047,7 +2427,6 @@ int __init ip_mr_init(void)
if (err)
goto reg_pernet_fail;
- setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
err = register_netdevice_notifier(&ip_mr_notifier);
if (err)
goto reg_notif_fail;
@@ -2058,6 +2437,7 @@ int __init ip_mr_init(void)
goto add_proto_fail;
}
#endif
+ rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute);
return 0;
#ifdef CONFIG_IP_PIMSM_V2
@@ -2065,7 +2445,6 @@ add_proto_fail:
unregister_netdevice_notifier(&ip_mr_notifier);
#endif
reg_notif_fail:
- del_timer(&ipmr_expire_timer);
unregister_pernet_subsys(&ipmr_net_ops);
reg_pernet_fail:
kmem_cache_destroy(mrt_cachep);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f07d77f65751..07a699059390 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -523,13 +523,11 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
return ret;
t = arpt_get_target(e);
- target = try_then_request_module(xt_find_target(NFPROTO_ARP,
- t->u.user.name,
- t->u.user.revision),
- "arpt_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
@@ -651,6 +649,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
break;
++i;
+ if (strcmp(arpt_get_target(iter)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
if (ret != 0)
@@ -1252,14 +1253,12 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
entry_offset = (void *)e - (void *)base;
t = compat_arpt_get_target(e);
- target = try_then_request_module(xt_find_target(NFPROTO_ARP,
- t->u.user.name,
- t->u.user.revision),
- "arpt_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
@@ -1778,8 +1777,7 @@ struct xt_table *arpt_register_table(struct net *net,
{
int ret;
struct xt_table_info *newinfo;
- struct xt_table_info bootstrap
- = { 0, 0, 0, { 0 }, { 0 }, { } };
+ struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index b0d5b1d0a769..4b51a027f307 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -54,7 +54,7 @@ target(struct sk_buff *skb, const struct xt_target_param *par)
return mangle->target;
}
-static bool checkentry(const struct xt_tgchk_param *par)
+static int checkentry(const struct xt_tgchk_param *par)
{
const struct arpt_mangle *mangle = par->targinfo;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index e2787048aa0a..c838238104f5 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -161,8 +161,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
break;
case IPQ_COPY_PACKET:
- if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
- entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
+ if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
(*errp = skb_checksum_help(entry->skb))) {
read_unlock_bh(&queue_lock);
return NULL;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index b29c66df8d1f..3e6af1036fbc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -39,13 +39,13 @@ MODULE_DESCRIPTION("IPv4 packet filter");
/*#define DEBUG_IP_FIREWALL_USER*/
#ifdef DEBUG_IP_FIREWALL
-#define dprintf(format, args...) printk(format , ## args)
+#define dprintf(format, args...) pr_info(format , ## args)
#else
#define dprintf(format, args...)
#endif
#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
+#define duprintf(format, args...) pr_info(format , ## args)
#else
#define duprintf(format, args...)
#endif
@@ -168,8 +168,7 @@ static unsigned int
ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
{
if (net_ratelimit())
- printk("ip_tables: error: `%s'\n",
- (const char *)par->targinfo);
+ pr_info("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
@@ -322,8 +321,6 @@ ipt_do_table(struct sk_buff *skb,
const struct net_device *out,
struct xt_table *table)
{
-#define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
-
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
const struct iphdr *ip;
bool hotdrop = false;
@@ -331,7 +328,8 @@ ipt_do_table(struct sk_buff *skb,
unsigned int verdict = NF_DROP;
const char *indev, *outdev;
const void *table_base;
- struct ipt_entry *e, *back;
+ struct ipt_entry *e, **jumpstack;
+ unsigned int *stackptr, origptr, cpu;
const struct xt_table_info *private;
struct xt_match_param mtpar;
struct xt_target_param tgpar;
@@ -357,19 +355,23 @@ ipt_do_table(struct sk_buff *skb,
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
xt_info_rdlock_bh();
private = table->private;
- table_base = private->entries[smp_processor_id()];
+ cpu = smp_processor_id();
+ table_base = private->entries[cpu];
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+ stackptr = &private->stackptr[cpu];
+ origptr = *stackptr;
e = get_entry(table_base, private->hook_entry[hook]);
- /* For return from builtin chain */
- back = get_entry(table_base, private->underflow[hook]);
+ pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
+ table->name, hook, origptr,
+ get_entry(table_base, private->underflow[hook]));
do {
const struct ipt_entry_target *t;
const struct xt_entry_match *ematch;
IP_NF_ASSERT(e);
- IP_NF_ASSERT(back);
if (!ip_packet_match(ip, indev, outdev,
&e->ip, mtpar.fragoff)) {
no_match:
@@ -404,41 +406,39 @@ ipt_do_table(struct sk_buff *skb,
verdict = (unsigned)(-v) - 1;
break;
}
- e = back;
- back = get_entry(table_base, back->comefrom);
+ if (*stackptr == 0) {
+ e = get_entry(table_base,
+ private->underflow[hook]);
+ pr_debug("Underflow (this is normal) "
+ "to %p\n", e);
+ } else {
+ e = jumpstack[--*stackptr];
+ pr_debug("Pulled %p out from pos %u\n",
+ e, *stackptr);
+ e = ipt_next_entry(e);
+ }
continue;
}
if (table_base + v != ipt_next_entry(e) &&
!(e->ip.flags & IPT_F_GOTO)) {
- /* Save old back ptr in next entry */
- struct ipt_entry *next = ipt_next_entry(e);
- next->comefrom = (void *)back - table_base;
- /* set back pointer to next entry */
- back = next;
+ if (*stackptr >= private->stacksize) {
+ verdict = NF_DROP;
+ break;
+ }
+ jumpstack[(*stackptr)++] = e;
+ pr_debug("Pushed %p into pos %u\n",
+ e, *stackptr - 1);
}
e = get_entry(table_base, v);
continue;
}
- /* Targets which reenter must return
- abs. verdicts */
tgpar.target = t->u.kernel.target;
tgpar.targinfo = t->data;
-#ifdef CONFIG_NETFILTER_DEBUG
- tb_comefrom = 0xeeeeeeec;
-#endif
verdict = t->u.kernel.target->target(skb, &tgpar);
-#ifdef CONFIG_NETFILTER_DEBUG
- if (tb_comefrom != 0xeeeeeeec && verdict == IPT_CONTINUE) {
- printk("Target %s reentered!\n",
- t->u.kernel.target->name);
- verdict = NF_DROP;
- }
- tb_comefrom = 0x57acc001;
-#endif
/* Target might have changed stuff. */
ip = ip_hdr(skb);
if (verdict == IPT_CONTINUE)
@@ -448,7 +448,9 @@ ipt_do_table(struct sk_buff *skb,
break;
} while (!hotdrop);
xt_info_rdunlock_bh();
-
+ pr_debug("Exiting %s; resetting sp from %u to %u\n",
+ __func__, *stackptr, origptr);
+ *stackptr = origptr;
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
#else
@@ -456,8 +458,6 @@ ipt_do_table(struct sk_buff *skb,
return NF_DROP;
else return verdict;
#endif
-
-#undef tb_comefrom
}
/* Figures out from what hook each rule can be called: returns 0 if
@@ -591,7 +591,7 @@ check_entry(const struct ipt_entry *e, const char *name)
const struct ipt_entry_target *t;
if (!ip_checkentry(&e->ip)) {
- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+ duprintf("ip check failed %p %s.\n", e, name);
return -EINVAL;
}
@@ -618,8 +618,7 @@ check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
ret = xt_check_match(par, m->u.match_size - sizeof(*m),
ip->proto, ip->invflags & IPT_INV_PROTO);
if (ret < 0) {
- duprintf("ip_tables: check failed for `%s'.\n",
- par.match->name);
+ duprintf("check failed for `%s'.\n", par.match->name);
return ret;
}
return 0;
@@ -631,12 +630,11 @@ find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
struct xt_match *match;
int ret;
- match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
- m->u.user.revision),
- "ipt_%s", m->u.user.name);
- if (IS_ERR(match) || !match) {
+ match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
+ m->u.user.revision);
+ if (IS_ERR(match)) {
duprintf("find_check_match: `%s' not found\n", m->u.user.name);
- return match ? PTR_ERR(match) : -ENOENT;
+ return PTR_ERR(match);
}
m->u.kernel.match = match;
@@ -667,7 +665,7 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name)
ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
if (ret < 0) {
- duprintf("ip_tables: check failed for `%s'.\n",
+ duprintf("check failed for `%s'.\n",
t->u.kernel.target->name);
return ret;
}
@@ -703,13 +701,11 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
}
t = ipt_get_target(e);
- target = try_then_request_module(xt_find_target(AF_INET,
- t->u.user.name,
- t->u.user.revision),
- "ipt_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto cleanup_matches;
}
t->u.kernel.target = target;
@@ -843,6 +839,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
return ret;
++i;
+ if (strcmp(ipt_get_target(iter)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (i != repl->num_entries) {
@@ -1311,7 +1310,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
if (ret != 0)
goto free_newinfo;
- duprintf("ip_tables: Translated table\n");
+ duprintf("Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, tmp.counters);
@@ -1476,13 +1475,12 @@ compat_find_calc_match(struct ipt_entry_match *m,
{
struct xt_match *match;
- match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
- m->u.user.revision),
- "ipt_%s", m->u.user.name);
- if (IS_ERR(match) || !match) {
+ match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
+ m->u.user.revision);
+ if (IS_ERR(match)) {
duprintf("compat_check_calc_match: `%s' not found\n",
m->u.user.name);
- return match ? PTR_ERR(match) : -ENOENT;
+ return PTR_ERR(match);
}
m->u.kernel.match = match;
*size += xt_compat_match_offset(match);
@@ -1549,14 +1547,12 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
}
t = compat_ipt_get_target(e);
- target = try_then_request_module(xt_find_target(AF_INET,
- t->u.user.name,
- t->u.user.revision),
- "ipt_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
@@ -2094,8 +2090,7 @@ struct xt_table *ipt_register_table(struct net *net,
{
int ret;
struct xt_table_info *newinfo;
- struct xt_table_info bootstrap
- = { 0, 0, 0, { 0 }, { 0 }, { } };
+ struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
@@ -2184,12 +2179,12 @@ icmp_match(const struct sk_buff *skb, const struct xt_match_param *par)
!!(icmpinfo->invflags&IPT_ICMP_INV));
}
-static bool icmp_checkentry(const struct xt_mtchk_param *par)
+static int icmp_checkentry(const struct xt_mtchk_param *par)
{
const struct ipt_icmp *icmpinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(icmpinfo->invflags & ~IPT_ICMP_INV);
+ return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
}
/* The built-in targets: standard (NULL) and error. */
@@ -2276,7 +2271,7 @@ static int __init ip_tables_init(void)
if (ret < 0)
goto err5;
- printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
+ pr_info("(C) 2000-2006 Netfilter Core Team\n");
return 0;
err5:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index ab828400ed71..8815d458de46 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/jhash.h>
@@ -88,7 +89,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
list_del(&c->list);
write_unlock_bh(&clusterip_lock);
- dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0);
+ dev_mc_del(c->dev, c->clustermac);
dev_put(c->dev);
/* In case anyone still accesses the file, the open/close
@@ -239,8 +240,7 @@ clusterip_hashfn(const struct sk_buff *skb,
break;
default:
if (net_ratelimit())
- printk(KERN_NOTICE "CLUSTERIP: unknown protocol `%u'\n",
- iph->protocol);
+ pr_info("unknown protocol %u\n", iph->protocol);
sport = dport = 0;
}
@@ -262,7 +262,7 @@ clusterip_hashfn(const struct sk_buff *skb,
hashval = 0;
/* This cannot happen, unless the check function wasn't called
* at rule load time */
- printk("CLUSTERIP: unknown mode `%u'\n", config->hash_mode);
+ pr_info("unknown mode %u\n", config->hash_mode);
BUG();
break;
}
@@ -295,7 +295,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par)
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL) {
- printk(KERN_ERR "CLUSTERIP: no conntrack!\n");
+ pr_info("no conntrack!\n");
/* FIXME: need to drop invalid ones, since replies
* to outgoing connections of other nodes will be
* marked as INVALID */
@@ -348,25 +348,24 @@ clusterip_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool clusterip_tg_check(const struct xt_tgchk_param *par)
+static int clusterip_tg_check(const struct xt_tgchk_param *par)
{
struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
-
struct clusterip_config *config;
+ int ret;
if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
- printk(KERN_WARNING "CLUSTERIP: unknown mode `%u'\n",
- cipinfo->hash_mode);
- return false;
+ pr_info("unknown mode %u\n", cipinfo->hash_mode);
+ return -EINVAL;
}
if (e->ip.dmsk.s_addr != htonl(0xffffffff) ||
e->ip.dst.s_addr == 0) {
- printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n");
- return false;
+ pr_info("Please specify destination IP\n");
+ return -EINVAL;
}
/* FIXME: further sanity checks */
@@ -374,41 +373,41 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par)
config = clusterip_config_find_get(e->ip.dst.s_addr, 1);
if (!config) {
if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
- printk(KERN_WARNING "CLUSTERIP: no config found for %pI4, need 'new'\n", &e->ip.dst.s_addr);
- return false;
+ pr_info("no config found for %pI4, need 'new'\n",
+ &e->ip.dst.s_addr);
+ return -EINVAL;
} else {
struct net_device *dev;
if (e->ip.iniface[0] == '\0') {
- printk(KERN_WARNING "CLUSTERIP: Please specify an interface name\n");
- return false;
+ pr_info("Please specify an interface name\n");
+ return -EINVAL;
}
dev = dev_get_by_name(&init_net, e->ip.iniface);
if (!dev) {
- printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface);
- return false;
+ pr_info("no such interface %s\n",
+ e->ip.iniface);
+ return -ENOENT;
}
config = clusterip_config_init(cipinfo,
e->ip.dst.s_addr, dev);
if (!config) {
- printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n");
+ pr_info("cannot allocate config\n");
dev_put(dev);
- return false;
+ return -ENOMEM;
}
- dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0);
+ dev_mc_add(config->dev, config->clustermac);
}
}
cipinfo->config = config;
- if (nf_ct_l3proto_try_module_get(par->target->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->target->family);
- return false;
- }
-
- return true;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
/* drop reference count of cluster config when rule is deleted */
@@ -422,7 +421,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
clusterip_config_put(cipinfo->config);
- nf_ct_l3proto_module_put(par->target->family);
+ nf_ct_l3proto_module_put(par->family);
}
#ifdef CONFIG_COMPAT
@@ -479,8 +478,8 @@ static void arp_print(struct arp_payload *payload)
}
hbuffer[--k]='\0';
- printk("src %pI4@%s, dst %pI4\n",
- &payload->src_ip, hbuffer, &payload->dst_ip);
+ pr_debug("src %pI4@%s, dst %pI4\n",
+ &payload->src_ip, hbuffer, &payload->dst_ip);
}
#endif
@@ -519,7 +518,7 @@ arp_mangle(unsigned int hook,
* this wouldn't work, since we didn't subscribe the mcast group on
* other interfaces */
if (c->dev != out) {
- pr_debug("CLUSTERIP: not mangling arp reply on different "
+ pr_debug("not mangling arp reply on different "
"interface: cip'%s'-skb'%s'\n",
c->dev->name, out->name);
clusterip_config_put(c);
@@ -530,7 +529,7 @@ arp_mangle(unsigned int hook,
memcpy(payload->src_hw, c->clustermac, arp->ar_hln);
#ifdef DEBUG
- pr_debug(KERN_DEBUG "CLUSTERIP mangled arp reply: ");
+ pr_debug("mangled arp reply: ");
arp_print(payload);
#endif
@@ -601,7 +600,8 @@ static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void clusterip_seq_stop(struct seq_file *s, void *v)
{
- kfree(v);
+ if (!IS_ERR(v))
+ kfree(v);
}
static int clusterip_seq_show(struct seq_file *s, void *v)
@@ -706,13 +706,13 @@ static int __init clusterip_tg_init(void)
#ifdef CONFIG_PROC_FS
clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net);
if (!clusterip_procdir) {
- printk(KERN_ERR "CLUSTERIP: Unable to proc dir entry\n");
+ pr_err("Unable to proc dir entry\n");
ret = -ENOMEM;
goto cleanup_hook;
}
#endif /* CONFIG_PROC_FS */
- printk(KERN_NOTICE "ClusterIP Version %s loaded successfully\n",
+ pr_info("ClusterIP Version %s loaded successfully\n",
CLUSTERIP_VERSION);
return 0;
@@ -727,8 +727,7 @@ cleanup_target:
static void __exit clusterip_tg_exit(void)
{
- printk(KERN_NOTICE "ClusterIP Version %s unloading\n",
- CLUSTERIP_VERSION);
+ pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
#ifdef CONFIG_PROC_FS
remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
#endif
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index ea5cea2415c1..563049f31aef 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -93,28 +93,25 @@ ecn_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool ecn_tg_check(const struct xt_tgchk_param *par)
+static int ecn_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_ECN_info *einfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
if (einfo->operation & IPT_ECN_OP_MASK) {
- printk(KERN_WARNING "ECN: unsupported ECN operation %x\n",
- einfo->operation);
- return false;
+ pr_info("unsupported ECN operation %x\n", einfo->operation);
+ return -EINVAL;
}
if (einfo->ip_ect & ~IPT_ECN_IP_MASK) {
- printk(KERN_WARNING "ECN: new ECT codepoint %x out of mask\n",
- einfo->ip_ect);
- return false;
+ pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect);
+ return -EINVAL;
}
if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) &&
(e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
- printk(KERN_WARNING "ECN: cannot use TCP operations on a "
- "non-tcp rule\n");
- return false;
+ pr_info("cannot use TCP operations on a non-tcp rule\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_target ecn_tg_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index ee128efa1c8d..3bd35f370817 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
@@ -367,7 +367,7 @@ static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
- .level = 0,
+ .level = 5,
.logflags = NF_LOG_MASK,
},
},
@@ -439,20 +439,19 @@ log_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool log_tg_check(const struct xt_tgchk_param *par)
+static int log_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_log_info *loginfo = par->targinfo;
if (loginfo->level >= 8) {
- pr_debug("LOG: level %u >= 8\n", loginfo->level);
- return false;
+ pr_debug("level %u >= 8\n", loginfo->level);
+ return -EINVAL;
}
if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
- pr_debug("LOG: prefix term %i\n",
- loginfo->prefix[sizeof(loginfo->prefix)-1]);
- return false;
+ pr_debug("prefix is not null-terminated\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_target log_tg_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 650b54042b01..02b1bc477998 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
@@ -28,19 +28,19 @@ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Xtables: automatic-address SNAT");
/* FIXME: Multiple targets. --RR */
-static bool masquerade_tg_check(const struct xt_tgchk_param *par)
+static int masquerade_tg_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
- pr_debug("masquerade_check: bad MAP_IPS.\n");
- return false;
+ pr_debug("bad MAP_IPS.\n");
+ return -EINVAL;
}
if (mr->rangesize != 1) {
- pr_debug("masquerade_check: bad rangesize %u\n", mr->rangesize);
- return false;
+ pr_debug("bad rangesize %u\n", mr->rangesize);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static unsigned int
@@ -72,7 +72,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par)
rt = skb_rtable(skb);
newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE);
if (!newsrc) {
- printk("MASQUERADE: %s ate my IP address\n", par->out->name);
+ pr_info("%s ate my IP address\n", par->out->name);
return NF_DROP;
}
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 7c29582d4ec8..708c7f8f7eea 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -22,19 +22,19 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>");
MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets");
-static bool netmap_tg_check(const struct xt_tgchk_param *par)
+static int netmap_tg_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) {
- pr_debug("NETMAP:check: bad MAP_IPS.\n");
- return false;
+ pr_debug("bad MAP_IPS.\n");
+ return -EINVAL;
}
if (mr->rangesize != 1) {
- pr_debug("NETMAP:check: bad rangesize %u.\n", mr->rangesize);
- return false;
+ pr_debug("bad rangesize %u.\n", mr->rangesize);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static unsigned int
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 698e5e78685b..3cf101916523 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/timer.h>
@@ -26,19 +26,19 @@ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
/* FIXME: Take multiple ranges --RR */
-static bool redirect_tg_check(const struct xt_tgchk_param *par)
+static int redirect_tg_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
- pr_debug("redirect_check: bad MAP_IPS.\n");
- return false;
+ pr_debug("bad MAP_IPS.\n");
+ return -EINVAL;
}
if (mr->rangesize != 1) {
- pr_debug("redirect_check: bad rangesize %u.\n", mr->rangesize);
- return false;
+ pr_debug("bad rangesize %u.\n", mr->rangesize);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static unsigned int
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index a0e8bcf04159..a86135a28058 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -140,9 +140,6 @@ reject_tg(struct sk_buff *skb, const struct xt_target_param *par)
{
const struct ipt_reject_info *reject = par->targinfo;
- /* WARNING: This code causes reentry within iptables.
- This means that the iptables jump stack is now crap. We
- must return an absolute verdict. --RR */
switch (reject->with) {
case IPT_ICMP_NET_UNREACHABLE:
send_unreach(skb, ICMP_NET_UNREACH);
@@ -175,23 +172,23 @@ reject_tg(struct sk_buff *skb, const struct xt_target_param *par)
return NF_DROP;
}
-static bool reject_tg_check(const struct xt_tgchk_param *par)
+static int reject_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_reject_info *rejinfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
- printk("ipt_REJECT: ECHOREPLY no longer supported.\n");
- return false;
+ pr_info("ECHOREPLY no longer supported.\n");
+ return -EINVAL;
} else if (rejinfo->with == IPT_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (e->ip.proto != IPPROTO_TCP ||
(e->ip.invflags & XT_INV_PROTO)) {
- printk("ipt_REJECT: TCP_RESET invalid for non-tcp\n");
- return false;
+ pr_info("TCP_RESET invalid for non-tcp\n");
+ return -EINVAL;
}
}
- return true;
+ return 0;
}
static struct xt_target reject_tg_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 0dbe697f164f..8f60749e87a3 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -29,7 +29,7 @@
* Specify, after how many hundredths of a second the queue should be
* flushed even if it is not full yet.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/socket.h>
@@ -57,8 +57,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
#define ULOG_NL_EVENT 111 /* Harald's favorite number */
#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */
-#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
-
static unsigned int nlbufsiz = NLMSG_GOODSIZE;
module_param(nlbufsiz, uint, 0400);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
@@ -91,12 +89,12 @@ static void ulog_send(unsigned int nlgroupnum)
ulog_buff_t *ub = &ulog_buffers[nlgroupnum];
if (timer_pending(&ub->timer)) {
- pr_debug("ipt_ULOG: ulog_send: timer was pending, deleting\n");
+ pr_debug("ulog_send: timer was pending, deleting\n");
del_timer(&ub->timer);
}
if (!ub->skb) {
- pr_debug("ipt_ULOG: ulog_send: nothing to send\n");
+ pr_debug("ulog_send: nothing to send\n");
return;
}
@@ -105,7 +103,7 @@ static void ulog_send(unsigned int nlgroupnum)
ub->lastnlh->nlmsg_type = NLMSG_DONE;
NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
- pr_debug("ipt_ULOG: throwing %d packets to netlink group %u\n",
+ pr_debug("throwing %d packets to netlink group %u\n",
ub->qlen, nlgroupnum + 1);
netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC);
@@ -118,7 +116,7 @@ static void ulog_send(unsigned int nlgroupnum)
/* timer function to flush queue in flushtimeout time */
static void ulog_timer(unsigned long data)
{
- pr_debug("ipt_ULOG: timer function called, calling ulog_send\n");
+ pr_debug("timer function called, calling ulog_send\n");
/* lock to protect against somebody modifying our structure
* from ipt_ulog_target at the same time */
@@ -139,7 +137,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
n = max(size, nlbufsiz);
skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
- PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
+ pr_debug("cannot alloc whole buffer %ub!\n", n);
if (n > size) {
/* try to allocate only as much as we need for
@@ -147,8 +145,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
- PRINTR("ipt_ULOG: can't even allocate %ub\n",
- size);
+ pr_debug("cannot even allocate %ub\n", size);
}
}
@@ -199,8 +196,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
goto alloc_failure;
}
- pr_debug("ipt_ULOG: qlen %d, qthreshold %Zu\n", ub->qlen,
- loginfo->qthreshold);
+ pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
/* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
@@ -273,11 +269,9 @@ static void ipt_ulog_packet(unsigned int hooknum,
return;
nlmsg_failure:
- PRINTR("ipt_ULOG: error during NLMSG_PUT\n");
-
+ pr_debug("error during NLMSG_PUT\n");
alloc_failure:
- PRINTR("ipt_ULOG: Error building netlink message\n");
-
+ pr_debug("Error building netlink message\n");
spin_unlock_bh(&ulog_lock);
}
@@ -314,21 +308,20 @@ static void ipt_logfn(u_int8_t pf,
ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
}
-static bool ulog_tg_check(const struct xt_tgchk_param *par)
+static int ulog_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_ulog_info *loginfo = par->targinfo;
if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
- pr_debug("ipt_ULOG: prefix term %i\n",
- loginfo->prefix[sizeof(loginfo->prefix) - 1]);
- return false;
+ pr_debug("prefix not null-terminated\n");
+ return -EINVAL;
}
if (loginfo->qthreshold > ULOG_MAX_QLEN) {
- pr_debug("ipt_ULOG: queue threshold %Zu > MAX_QLEN\n",
+ pr_debug("queue threshold %Zu > MAX_QLEN\n",
loginfo->qthreshold);
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
#ifdef CONFIG_COMPAT
@@ -390,10 +383,10 @@ static int __init ulog_tg_init(void)
{
int ret, i;
- pr_debug("ipt_ULOG: init module\n");
+ pr_debug("init module\n");
if (nlbufsiz > 128*1024) {
- printk("Netlink buffer has to be <= 128kB\n");
+ pr_warning("Netlink buffer has to be <= 128kB\n");
return -EINVAL;
}
@@ -423,7 +416,7 @@ static void __exit ulog_tg_exit(void)
ulog_buff_t *ub;
int i;
- pr_debug("ipt_ULOG: cleanup_module\n");
+ pr_debug("cleanup_module\n");
if (nflog)
nf_log_unregister(&ipt_ulog_logger);
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 3b216be3bc9f..e4b8f2bf8aaa 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -70,34 +70,34 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
+static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
{
struct ipt_addrtype_info_v1 *info = par->matchinfo;
if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN &&
info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) {
- printk(KERN_ERR "ipt_addrtype: both incoming and outgoing "
- "interface limitation cannot be selected\n");
- return false;
+ pr_info("both incoming and outgoing "
+ "interface limitation cannot be selected\n");
+ return -EINVAL;
}
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) &&
info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) {
- printk(KERN_ERR "ipt_addrtype: output interface limitation "
- "not valid in PRE_ROUTING and INPUT\n");
- return false;
+ pr_info("output interface limitation "
+ "not valid in PREROUTING and INPUT\n");
+ return -EINVAL;
}
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT)) &&
info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN) {
- printk(KERN_ERR "ipt_addrtype: input interface limitation "
- "not valid in POST_ROUTING and OUTPUT\n");
- return false;
+ pr_info("input interface limitation "
+ "not valid in POSTROUTING and OUTPUT\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match addrtype_mt_reg[] __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index 0104c0b399de..9f9810204892 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -5,7 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -18,21 +18,15 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
MODULE_DESCRIPTION("Xtables: IPv4 IPsec-AH SPI match");
-#ifdef DEBUG_CONNTRACK
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
- duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
- min,spi,max);
+ pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
+ invert ? '!' : ' ', min, spi, max);
r=(spi >= min && spi <= max) ^ invert;
- duprintf(" result %s\n",r? "PASS" : "FAILED");
+ pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
@@ -51,7 +45,7 @@ static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par)
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
- duprintf("Dropping evil AH tinygram.\n");
+ pr_debug("Dropping evil AH tinygram.\n");
*par->hotdrop = true;
return 0;
}
@@ -61,16 +55,16 @@ static bool ah_mt(const struct sk_buff *skb, const struct xt_match_param *par)
!!(ahinfo->invflags & IPT_AH_INV_SPI));
}
-static bool ah_mt_check(const struct xt_mtchk_param *par)
+static int ah_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ah *ahinfo = par->matchinfo;
/* Must specify no unknown invflags */
if (ahinfo->invflags & ~IPT_AH_INV_MASK) {
- duprintf("ipt_ah: unknown flags %X\n", ahinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", ahinfo->invflags);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match ah_mt_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index 2a1e56b71908..32e24100d8d1 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
@@ -85,25 +85,24 @@ static bool ecn_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool ecn_mt_check(const struct xt_mtchk_param *par)
+static int ecn_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ecn_info *info = par->matchinfo;
const struct ipt_ip *ip = par->entryinfo;
if (info->operation & IPT_ECN_OP_MATCH_MASK)
- return false;
+ return -EINVAL;
if (info->invert & IPT_ECN_OP_MATCH_MASK)
- return false;
+ return -EINVAL;
if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
ip->proto != IPPROTO_TCP) {
- printk(KERN_WARNING "ipt_ecn: can't match TCP bits in rule for"
- " non-tcp packets\n");
- return false;
+ pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match ecn_mt_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 2fb7b76da94f..244f7cb08d68 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -336,12 +336,12 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
+ seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
- "%08x %08x %08x %08x %08x %08x %08x %08x \n",
+ "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
st->searched,
st->found,
@@ -358,7 +358,8 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
st->expect_new,
st->expect_create,
- st->expect_delete
+ st->expect_delete,
+ st->search_restart
);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 7e8e6fc75413..d4c061874f8f 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/tcp.h>
#include <net/tcp.h>
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 26de2c1f7fab..b48a0fc3d9ed 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -7,6 +7,7 @@
*/
/* Everything about the rules for NAT. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
@@ -74,28 +75,28 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_target_param *par)
return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
}
-static bool ipt_snat_checkentry(const struct xt_tgchk_param *par)
+static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
/* Must be a valid range */
if (mr->rangesize != 1) {
- printk("SNAT: multiple ranges no longer supported\n");
- return false;
+ pr_info("SNAT: multiple ranges no longer supported\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
-static bool ipt_dnat_checkentry(const struct xt_tgchk_param *par)
+static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
{
const struct nf_nat_multi_range_compat *mr = par->targinfo;
/* Must be a valid range */
if (mr->rangesize != 1) {
- printk("DNAT: multiple ranges no longer supported\n");
- return false;
+ pr_info("DNAT: multiple ranges no longer supported\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
unsigned int
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index c39c9cf6bee6..84c7974f5830 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -138,9 +138,8 @@ nf_nat_fn(unsigned int hooknum,
ret = nf_nat_rule_find(skb, hooknum, in, out,
ct);
- if (ret != NF_ACCEPT) {
+ if (ret != NF_ACCEPT)
return ret;
- }
} else
pr_debug("Already setup manip %s for ct %p\n",
maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index b096e81500ae..7274a43c7a12 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/udp.h>
#include <net/netfilter/nf_nat_helper.h>
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4f1f337f4337..3dc9914c1dce 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -251,6 +251,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
+ SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index cc6f097fbd5f..2c7a1639388a 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -290,7 +290,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
/* Charge it to the socket. */
- if (sock_queue_rcv_skb(sk, skb) < 0) {
+ if (ip_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -381,8 +381,8 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
- err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
- dst_output);
+ err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
+ rt->u.dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cb562fdd9b9a..dea3f9264250 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,7 +129,6 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
-static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
static int rt_chain_length_max __read_mostly = 20;
static struct delayed_work expires_work;
@@ -258,10 +257,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
(__raw_get_cpu_var(rt_cache_stat).field++)
static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
- int genid)
+ int genid)
{
- return jhash_3words((__force u32)(__be32)(daddr),
- (__force u32)(__be32)(saddr),
+ return jhash_3words((__force u32)daddr, (__force u32)saddr,
idx, genid)
& rt_hash_mask;
}
@@ -378,12 +376,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
struct rtable *r = v;
int len;
- seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
- "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
+ seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
+ "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
r->u.dst.dev ? r->u.dst.dev->name : "*",
- (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
+ (__force u32)r->rt_dst,
+ (__force u32)r->rt_gateway,
r->rt_flags, atomic_read(&r->u.dst.__refcnt),
- r->u.dst.__use, 0, (unsigned long)r->rt_src,
+ r->u.dst.__use, 0, (__force u32)r->rt_src,
(dst_metric(&r->u.dst, RTAX_ADVMSS) ?
(int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
dst_metric(&r->u.dst, RTAX_WINDOW),
@@ -685,18 +684,17 @@ static inline bool rt_caching(const struct net *net)
static inline bool compare_hash_inputs(const struct flowi *fl1,
const struct flowi *fl2)
{
- return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
- (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
+ return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
+ ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
(fl1->iif ^ fl2->iif)) == 0);
}
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
- return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
- (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
+ return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
+ ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
(fl1->mark ^ fl2->mark) |
- (*(u16 *)&fl1->nl_u.ip4_u.tos ^
- *(u16 *)&fl2->nl_u.ip4_u.tos) |
+ (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
(fl1->oif ^ fl2->oif) |
(fl1->iif ^ fl2->iif)) == 0;
}
@@ -919,32 +917,11 @@ void rt_cache_flush_batch(void)
rt_do_flush(!in_softirq());
}
-/*
- * We change rt_genid and let gc do the cleanup
- */
-static void rt_secret_rebuild(unsigned long __net)
-{
- struct net *net = (struct net *)__net;
- rt_cache_invalidate(net);
- mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
-}
-
-static void rt_secret_rebuild_oneshot(struct net *net)
-{
- del_timer_sync(&net->ipv4.rt_secret_timer);
- rt_cache_invalidate(net);
- if (ip_rt_secret_interval)
- mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
-}
-
static void rt_emergency_hash_rebuild(struct net *net)
{
- if (net_ratelimit()) {
+ if (net_ratelimit())
printk(KERN_WARNING "Route hash chain too long!\n");
- printk(KERN_WARNING "Adjust your secret_interval!\n");
- }
-
- rt_secret_rebuild_oneshot(net);
+ rt_cache_invalidate(net);
}
/*
@@ -2319,8 +2296,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.dst.rt_next)) {
- if (((rth->fl.fl4_dst ^ daddr) |
- (rth->fl.fl4_src ^ saddr) |
+ if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
+ ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
(rth->fl.iif ^ iif) |
rth->fl.oif |
(rth->fl.fl4_tos ^ tos)) == 0 &&
@@ -3102,48 +3079,6 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
return -EINVAL;
}
-static void rt_secret_reschedule(int old)
-{
- struct net *net;
- int new = ip_rt_secret_interval;
- int diff = new - old;
-
- if (!diff)
- return;
-
- rtnl_lock();
- for_each_net(net) {
- int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
- long time;
-
- if (!new)
- continue;
-
- if (deleted) {
- time = net->ipv4.rt_secret_timer.expires - jiffies;
-
- if (time <= 0 || (time += diff) <= 0)
- time = 0;
- } else
- time = new;
-
- mod_timer(&net->ipv4.rt_secret_timer, jiffies + time);
- }
- rtnl_unlock();
-}
-
-static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int old = ip_rt_secret_interval;
- int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
-
- rt_secret_reschedule(old);
-
- return ret;
-}
-
static ctl_table ipv4_route_table[] = {
{
.procname = "gc_thresh",
@@ -3252,13 +3187,6 @@ static ctl_table ipv4_route_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "secret_interval",
- .data = &ip_rt_secret_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = ipv4_sysctl_rt_secret_interval,
- },
{ }
};
@@ -3337,34 +3265,15 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
};
#endif
-
-static __net_init int rt_secret_timer_init(struct net *net)
+static __net_init int rt_genid_init(struct net *net)
{
- atomic_set(&net->ipv4.rt_genid,
- (int) ((num_physpages ^ (num_physpages>>8)) ^
- (jiffies ^ (jiffies >> 7))));
-
- net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
- net->ipv4.rt_secret_timer.data = (unsigned long)net;
- init_timer_deferrable(&net->ipv4.rt_secret_timer);
-
- if (ip_rt_secret_interval) {
- net->ipv4.rt_secret_timer.expires =
- jiffies + net_random() % ip_rt_secret_interval +
- ip_rt_secret_interval;
- add_timer(&net->ipv4.rt_secret_timer);
- }
+ get_random_bytes(&net->ipv4.rt_genid,
+ sizeof(net->ipv4.rt_genid));
return 0;
}
-static __net_exit void rt_secret_timer_exit(struct net *net)
-{
- del_timer_sync(&net->ipv4.rt_secret_timer);
-}
-
-static __net_initdata struct pernet_operations rt_secret_timer_ops = {
- .init = rt_secret_timer_init,
- .exit = rt_secret_timer_exit,
+static __net_initdata struct pernet_operations rt_genid_ops = {
+ .init = rt_genid_init,
};
@@ -3425,9 +3334,6 @@ int __init ip_rt_init(void)
schedule_delayed_work(&expires_work,
net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
- if (register_pernet_subsys(&rt_secret_timer_ops))
- printk(KERN_ERR "Unable to setup rt_secret_timer\n");
-
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
@@ -3439,6 +3345,7 @@ int __init ip_rt_init(void)
#ifdef CONFIG_SYSCTL
register_pernet_subsys(&sysctl_route_ops);
#endif
+ register_pernet_subsys(&rt_genid_ops);
return rc;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0f8caf64caa3..8ce29747ad9b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -378,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sock *sk = sock->sk;
struct tcp_sock *tp = tcp_sk(sk);
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == TCP_LISTEN)
return inet_csk_listen_poll(sk);
@@ -2298,7 +2298,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (sock_flag(sk, SOCK_KEEPOPEN) &&
!((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN))) {
- __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
+ u32 elapsed = keepalive_time_elapsed(tp);
if (tp->keepalive_time > elapsed)
elapsed = tp->keepalive_time - elapsed;
else
@@ -2721,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct tcphdr *th2;
unsigned int len;
unsigned int thlen;
- unsigned int flags;
+ __be32 flags;
unsigned int mss = 1;
unsigned int hlen;
unsigned int off;
@@ -2771,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
found:
flush = NAPI_GRO_CB(p)->flush;
- flush |= flags & TCP_FLAG_CWR;
- flush |= (flags ^ tcp_flag_word(th2)) &
- ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
- flush |= th->ack_seq ^ th2->ack_seq;
+ flush |= (__force int)(flags & TCP_FLAG_CWR);
+ flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
+ ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
+ flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
for (i = sizeof(*th); i < thlen; i += 4)
flush |= *(u32 *)((u8 *)th + i) ^
*(u32 *)((u8 *)th2 + i);
@@ -2795,8 +2795,9 @@ found:
out_check_final:
flush = len < mss;
- flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
- TCP_FLAG_SYN | TCP_FLAG_FIN);
+ flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
+ TCP_FLAG_RST | TCP_FLAG_SYN |
+ TCP_FLAG_FIN));
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
pp = head;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f240f57b2199..e82162c211bf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3710,7 +3710,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
}
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
- dst_confirm(sk->sk_dst_cache);
+ dst_confirm(__sk_dst_get(sk));
return 1;
@@ -4319,7 +4319,7 @@ static void tcp_ofo_queue(struct sock *sk)
}
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
- SOCK_DEBUG(sk, "ofo packet was already received \n");
+ SOCK_DEBUG(sk, "ofo packet was already received\n");
__skb_unlink(skb, &tp->out_of_order_queue);
__kfree_skb(skb);
continue;
@@ -4367,6 +4367,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
goto drop;
+ skb_dst_drop(skb);
__skb_pull(skb, th->doff * 4);
TCP_ECN_accept_cwr(tp, skb);
@@ -5833,7 +5834,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (tp->snd_una == tp->write_seq) {
tcp_set_state(sk, TCP_FIN_WAIT2);
sk->sk_shutdown |= SEND_SHUTDOWN;
- dst_confirm(sk->sk_dst_cache);
+ dst_confirm(__sk_dst_get(sk));
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3c23e70885f4..771f8146a2e5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -519,26 +519,31 @@ out:
sock_put(sk);
}
-/* This routine computes an IPv4 TCP checksum. */
-void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
+static void __tcp_v4_send_check(struct sk_buff *skb,
+ __be32 saddr, __be32 daddr)
{
- struct inet_sock *inet = inet_sk(sk);
struct tcphdr *th = tcp_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- th->check = ~tcp_v4_check(len, inet->inet_saddr,
- inet->inet_daddr, 0);
+ th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
- th->check = tcp_v4_check(len, inet->inet_saddr,
- inet->inet_daddr,
+ th->check = tcp_v4_check(skb->len, saddr, daddr,
csum_partial(th,
th->doff << 2,
skb->csum));
}
}
+/* This routine computes an IPv4 TCP checksum. */
+void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
+}
+
int tcp_v4_gso_send_check(struct sk_buff *skb)
{
const struct iphdr *iph;
@@ -551,10 +556,8 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
th = tcp_hdr(skb);
th->check = 0;
- th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
+ __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
return 0;
}
@@ -763,13 +766,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
skb = tcp_make_synack(sk, dst, req, rvp);
if (skb) {
- struct tcphdr *th = tcp_hdr(skb);
-
- th->check = tcp_v4_check(skb->len,
- ireq->loc_addr,
- ireq->rmt_addr,
- csum_partial(th, skb->len,
- skb->csum));
+ __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr,
@@ -1289,8 +1286,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_release;
/* Secret recipe starts with IP addresses */
- *mess++ ^= daddr;
- *mess++ ^= saddr;
+ *mess++ ^= (__force u32)daddr;
+ *mess++ ^= (__force u32)saddr;
/* plus variable length Initiator Cookie */
c = (u8 *)mess;
@@ -1675,6 +1672,8 @@ process:
skb->dev = NULL;
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
bh_lock_sock_nested(sk);
ret = 0;
if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 5fabff9ac6d6..794c2e122a41 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -672,6 +672,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
inet_rsk(req)->acked = 1;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
return NULL;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0dda86e72ad8..5db3a2c6cb33 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -350,6 +350,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
*/
static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
{
+ skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
TCP_SKB_CB(skb)->flags = flags;
@@ -860,7 +861,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th->urg_ptr = htons(tp->snd_up - tcb->seq);
th->urg = 1;
} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
- th->urg_ptr = 0xFFFF;
+ th->urg_ptr = htons(0xFFFF);
th->urg = 1;
}
}
@@ -878,7 +879,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
}
#endif
- icsk->icsk_af_ops->send_check(sk, skb->len, skb);
+ icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->flags & TCPCB_FLAG_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
@@ -887,9 +888,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcp_event_data_sent(tp, skb, sk);
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
- TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
+ tcp_skb_pcount(skb));
- err = icsk->icsk_af_ops->queue_xmit(skb, 0);
+ err = icsk->icsk_af_ops->queue_xmit(skb);
if (likely(err <= 0))
return err;
@@ -2484,7 +2486,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
*tail-- ^= TCP_SKB_CB(skb)->seq + 1;
/* recommended */
- *tail-- ^= ((th->dest << 16) | th->source);
+ *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
*tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
sha_transform((__u32 *)&xvp->cookie_bakery[0],
@@ -2502,7 +2504,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
th->window = htons(min(req->rcv_wnd, 65535U));
tcp_options_write((__be32 *)(th + 1), tp, &opts);
th->doff = (tcp_header_size >> 2);
- TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8a0ab2977f1f..440a5c6004f6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -172,14 +172,14 @@ static int tcp_write_timeout(struct sock *sk)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits)
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
} else {
if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
- dst_negative_advice(&sk->sk_dst_cache, sk);
+ dst_negative_advice(sk);
}
retry_until = sysctl_tcp_retries2;
@@ -517,7 +517,7 @@ static void tcp_keepalive_timer (unsigned long data)
struct sock *sk = (struct sock *) data;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- __u32 elapsed;
+ u32 elapsed;
/* Only process if socket is not in use. */
bh_lock_sock(sk);
@@ -554,7 +554,7 @@ static void tcp_keepalive_timer (unsigned long data)
if (tp->packets_out || tcp_send_head(sk))
goto resched;
- elapsed = tcp_time_stamp - tp->rcv_tstamp;
+ elapsed = keepalive_time_elapsed(tp);
if (elapsed >= keepalive_time_when(tp)) {
if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8fef859db35d..f3e00c5cd1ed 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -307,13 +307,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
unsigned int port)
{
- return jhash_1word(saddr, net_hash_mix(net)) ^ port;
+ return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
}
int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
- udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum);
+ udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
unsigned int hash2_partial =
udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
@@ -466,14 +466,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
- hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum);
+ hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
- INADDR_ANY, hnum, dif,
+ htonl(INADDR_ANY), hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
@@ -1062,10 +1062,10 @@ static unsigned int first_packet_length(struct sock *sk)
spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) {
- lock_sock(sk);
+ lock_sock_bh(sk);
__skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk);
- release_sock(sk);
+ unlock_sock_bh(sk);
}
return res;
}
@@ -1196,10 +1196,10 @@ out:
return err;
csum_copy_err:
- lock_sock(sk);
+ lock_sock_bh(sk);
if (!skb_kill_datagram(sk, skb, flags))
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- release_sock(sk);
+ unlock_sock_bh(sk);
if (noblock)
return -EAGAIN;
@@ -1217,6 +1217,7 @@ int udp_disconnect(struct sock *sk, int flags)
sk->sk_state = TCP_CLOSE;
inet->inet_daddr = 0;
inet->inet_dport = 0;
+ sock_rps_save_rxhash(sk, 0);
sk->sk_bound_dev_if = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
@@ -1258,8 +1259,12 @@ EXPORT_SYMBOL(udp_lib_unhash);
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- int rc = sock_queue_rcv_skb(sk, skb);
+ int rc;
+ if (inet_sk(sk)->inet_daddr)
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
+ rc = ip_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
@@ -1367,6 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop;
}
+
+ if (sk_rcvqueues_full(sk, skb))
+ goto drop;
+
rc = 0;
bh_lock_sock(sk);
@@ -1527,6 +1536,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
+ saddr = ip_hdr(skb)->saddr;
+ daddr = ip_hdr(skb)->daddr;
+
if (ulen > skb->len)
goto short_packet;
@@ -1540,9 +1552,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
- saddr = ip_hdr(skb)->saddr;
- daddr = ip_hdr(skb)->daddr;
-
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
return __udp4_lib_mcast_deliver(net, skb, uh,
saddr, daddr, udptable);
@@ -1615,9 +1624,9 @@ int udp_rcv(struct sk_buff *skb)
void udp_destroy_sock(struct sock *sk)
{
- lock_sock(sk);
+ lock_sock_bh(sk);
udp_flush_pending_frames(sk);
- release_sock(sk);
+ unlock_sock_bh(sk);
}
/*
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index c791bb63203f..abcd7ed65db1 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -61,7 +61,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
iph->tot_len = htons(skb->len);
ip_send_check(iph);
- NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
xfrm4_rcv_encap_finish);
return 0;
}
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index c908bd99bcba..571aa96a175c 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -86,7 +86,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
int xfrm4_output(struct sk_buff *skb)
{
- return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb,
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
NULL, skb_dst(skb)->dev, xfrm4_output_finish,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index e4a1483fba77..1705476670ef 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -59,27 +59,6 @@ static int xfrm4_get_saddr(struct net *net,
return 0;
}
-static struct dst_entry *
-__xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
-{
- struct dst_entry *dst;
-
- read_lock_bh(&policy->lock);
- for (dst = policy->bundles; dst; dst = dst->next) {
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
- xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
- xdst->u.rt.fl.fl4_src == fl->fl4_src &&
- xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
- xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
- dst_clone(dst);
- break;
- }
- }
- read_unlock_bh(&policy->lock);
- return dst;
-}
-
static int xfrm4_get_tos(struct flowi *fl)
{
return fl->fl4_tos;
@@ -259,7 +238,6 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.dst_ops = &xfrm4_dst_ops,
.dst_lookup = xfrm4_dst_lookup,
.get_saddr = xfrm4_get_saddr,
- .find_bundle = __xfrm4_find_bundle,
.decode_session = _decode_session4,
.get_tos = xfrm4_get_tos,
.init_path = xfrm4_init_path,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 413054f02aab..3984f52181f4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -82,7 +82,7 @@
#include <linux/random.h>
#endif
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/proc_fs.h>
@@ -98,7 +98,11 @@
#endif
#define INFINITY_LIFE_TIME 0xFFFFFFFF
-#define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b)))
+#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b)))
+
+#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
+#define ADDRCONF_TIMER_FUZZ (HZ / 4)
+#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
#ifdef CONFIG_SYSCTL
static void addrconf_sysctl_register(struct inet6_dev *idev);
@@ -127,8 +131,8 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
/*
* Configured unicast address hash table
*/
-static struct inet6_ifaddr *inet6_addr_lst[IN6_ADDR_HSIZE];
-static DEFINE_RWLOCK(addrconf_hash_lock);
+static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
+static DEFINE_SPINLOCK(addrconf_hash_lock);
static void addrconf_verify(unsigned long);
@@ -138,8 +142,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock);
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
-static void addrconf_bonding_change(struct net_device *dev,
- unsigned long event);
+static void addrconf_type_change(struct net_device *dev,
+ unsigned long event);
static int addrconf_ifdown(struct net_device *dev, int how);
static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
@@ -152,8 +156,8 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
static void inet6_prefix_notify(int event, struct inet6_dev *idev,
struct prefix_info *pinfo);
-static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
- struct net_device *dev);
+static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
@@ -250,8 +254,7 @@ static void addrconf_del_timer(struct inet6_ifaddr *ifp)
__in6_ifa_put(ifp);
}
-enum addrconf_timer_t
-{
+enum addrconf_timer_t {
AC_NONE,
AC_DAD,
AC_RS,
@@ -271,7 +274,8 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
case AC_RS:
ifp->timer.function = addrconf_rs_timer;
break;
- default:;
+ default:
+ break;
}
ifp->timer.expires = jiffies + when;
add_timer(&ifp->timer);
@@ -318,7 +322,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
{
struct net_device *dev = idev->dev;
- WARN_ON(idev->addr_list != NULL);
+ WARN_ON(!list_empty(&idev->addr_list));
WARN_ON(idev->mc_list != NULL);
#ifdef NET_REFCNT_DEBUG
@@ -326,7 +330,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
#endif
dev_put(dev);
if (!idev->dead) {
- printk("Freeing alive inet6 device %p\n", idev);
+ pr_warning("Freeing alive inet6 device %p\n", idev);
return;
}
snmp6_free_dev(idev);
@@ -351,6 +355,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
rwlock_init(&ndev->lock);
ndev->dev = dev;
+ INIT_LIST_HEAD(&ndev->addr_list);
+
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
ndev->cnf.mtu6 = dev->mtu;
ndev->cnf.sysctl = NULL;
@@ -402,6 +408,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
#endif
#ifdef CONFIG_IPV6_PRIVACY
+ INIT_LIST_HEAD(&ndev->tempaddr_list);
setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
if ((dev->flags&IFF_LOOPBACK) ||
dev->type == ARPHRD_TUNNEL ||
@@ -439,8 +446,10 @@ static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
ASSERT_RTNL();
- if ((idev = __in6_dev_get(dev)) == NULL) {
- if ((idev = ipv6_add_dev(dev)) == NULL)
+ idev = __in6_dev_get(dev);
+ if (!idev) {
+ idev = ipv6_add_dev(dev);
+ if (!idev)
return NULL;
}
@@ -466,7 +475,8 @@ static void dev_forward_change(struct inet6_dev *idev)
else
ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
}
- for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) {
+
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (ifa->flags&IFA_F_TENTATIVE)
continue;
if (idev->cnf.forwarding)
@@ -523,12 +533,16 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
}
#endif
-/* Nobody refers to this ifaddr, destroy it */
+static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head)
+{
+ struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu);
+ kfree(ifp);
+}
+/* Nobody refers to this ifaddr, destroy it */
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
{
- WARN_ON(ifp->if_next != NULL);
- WARN_ON(ifp->lst_next != NULL);
+ WARN_ON(!hlist_unhashed(&ifp->addr_lst));
#ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
@@ -537,54 +551,46 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
in6_dev_put(ifp->idev);
if (del_timer(&ifp->timer))
- printk("Timer is still running, when freeing ifa=%p\n", ifp);
+ pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
if (!ifp->dead) {
- printk("Freeing alive inet6 address %p\n", ifp);
+ pr_warning("Freeing alive inet6 address %p\n", ifp);
return;
}
dst_release(&ifp->rt->u.dst);
- kfree(ifp);
+ call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu);
}
static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
{
- struct inet6_ifaddr *ifa, **ifap;
+ struct list_head *p;
int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
/*
* Each device address list is sorted in order of scope -
* global before linklocal.
*/
- for (ifap = &idev->addr_list; (ifa = *ifap) != NULL;
- ifap = &ifa->if_next) {
+ list_for_each(p, &idev->addr_list) {
+ struct inet6_ifaddr *ifa
+ = list_entry(p, struct inet6_ifaddr, if_list);
if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
break;
}
- ifp->if_next = *ifap;
- *ifap = ifp;
+ list_add_tail(&ifp->if_list, p);
}
-/*
- * Hash function taken from net_alias.c
- */
-static u8 ipv6_addr_hash(const struct in6_addr *addr)
+static u32 ipv6_addr_hash(const struct in6_addr *addr)
{
- __u32 word;
-
/*
* We perform the hash function over the last 64 bits of the address
* This will include the IEEE address token on links that support it.
*/
-
- word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]);
- word ^= (word >> 16);
- word ^= (word >> 8);
-
- return ((word ^ (word >> 4)) & 0x0f);
+ return jhash_2words((__force u32)addr->s6_addr32[2],
+ (__force u32)addr->s6_addr32[3], 0)
+ & (IN6_ADDR_HSIZE - 1);
}
/* On success it returns ifp with increased reference count */
@@ -595,7 +601,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
{
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
- int hash;
+ unsigned int hash;
int err = 0;
int addr_type = ipv6_addr_type(addr);
@@ -616,7 +622,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
goto out2;
}
- write_lock(&addrconf_hash_lock);
+ spin_lock(&addrconf_hash_lock);
/* Ignore adding duplicate addresses on an interface */
if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
@@ -643,6 +649,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
spin_lock_init(&ifa->lock);
init_timer(&ifa->timer);
+ INIT_HLIST_NODE(&ifa->addr_lst);
ifa->timer.data = (unsigned long) ifa;
ifa->scope = scope;
ifa->prefix_len = pfxlen;
@@ -669,10 +676,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
/* Add to big hash table */
hash = ipv6_addr_hash(addr);
- ifa->lst_next = inet6_addr_lst[hash];
- inet6_addr_lst[hash] = ifa;
- in6_ifa_hold(ifa);
- write_unlock(&addrconf_hash_lock);
+ hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
+ spin_unlock(&addrconf_hash_lock);
write_lock(&idev->lock);
/* Add to inet6_dev unicast addr list. */
@@ -680,8 +685,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
#ifdef CONFIG_IPV6_PRIVACY
if (ifa->flags&IFA_F_TEMPORARY) {
- ifa->tmp_next = idev->tempaddr_list;
- idev->tempaddr_list = ifa;
+ list_add(&ifa->tmp_list, &idev->tempaddr_list);
in6_ifa_hold(ifa);
}
#endif
@@ -700,7 +704,7 @@ out2:
return ifa;
out:
- write_unlock(&addrconf_hash_lock);
+ spin_unlock(&addrconf_hash_lock);
goto out2;
}
@@ -708,7 +712,7 @@ out:
static void ipv6_del_addr(struct inet6_ifaddr *ifp)
{
- struct inet6_ifaddr *ifa, **ifap;
+ struct inet6_ifaddr *ifa, *ifn;
struct inet6_dev *idev = ifp->idev;
int hash;
int deleted = 0, onlink = 0;
@@ -718,42 +722,27 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
ifp->dead = 1;
- write_lock_bh(&addrconf_hash_lock);
- for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL;
- ifap = &ifa->lst_next) {
- if (ifa == ifp) {
- *ifap = ifa->lst_next;
- __in6_ifa_put(ifp);
- ifa->lst_next = NULL;
- break;
- }
- }
- write_unlock_bh(&addrconf_hash_lock);
+ spin_lock_bh(&addrconf_hash_lock);
+ hlist_del_init_rcu(&ifp->addr_lst);
+ spin_unlock_bh(&addrconf_hash_lock);
write_lock_bh(&idev->lock);
#ifdef CONFIG_IPV6_PRIVACY
if (ifp->flags&IFA_F_TEMPORARY) {
- for (ifap = &idev->tempaddr_list; (ifa=*ifap) != NULL;
- ifap = &ifa->tmp_next) {
- if (ifa == ifp) {
- *ifap = ifa->tmp_next;
- if (ifp->ifpub) {
- in6_ifa_put(ifp->ifpub);
- ifp->ifpub = NULL;
- }
- __in6_ifa_put(ifp);
- ifa->tmp_next = NULL;
- break;
- }
+ list_del(&ifp->tmp_list);
+ if (ifp->ifpub) {
+ in6_ifa_put(ifp->ifpub);
+ ifp->ifpub = NULL;
}
+ __in6_ifa_put(ifp);
}
#endif
- for (ifap = &idev->addr_list; (ifa=*ifap) != NULL;) {
+ list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) {
if (ifa == ifp) {
- *ifap = ifa->if_next;
+ list_del_init(&ifp->if_list);
__in6_ifa_put(ifp);
- ifa->if_next = NULL;
+
if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0)
break;
deleted = 1;
@@ -786,7 +775,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
}
}
}
- ifap = &ifa->if_next;
}
write_unlock_bh(&idev->lock);
@@ -1165,7 +1153,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
continue;
read_lock_bh(&idev->lock);
- for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) {
+ list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
int i;
/*
@@ -1243,7 +1231,6 @@ try_nextdev:
in6_ifa_put(hiscore->ifa);
return 0;
}
-
EXPORT_SYMBOL(ipv6_dev_get_saddr);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
@@ -1253,12 +1240,14 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
int err = -EADDRNOTAVAIL;
rcu_read_lock();
- if ((idev = __in6_dev_get(dev)) != NULL) {
+ idev = __in6_dev_get(dev);
+ if (idev) {
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
- for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
- if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) {
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ if (ifp->scope == IFA_LINK &&
+ !(ifp->flags & banned_flags)) {
ipv6_addr_copy(addr, &ifp->addr);
err = 0;
break;
@@ -1276,7 +1265,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
- for (ifp=idev->addr_list; ifp; ifp=ifp->if_next)
+ list_for_each_entry(ifp, &idev->addr_list, if_list)
cnt++;
read_unlock_bh(&idev->lock);
return cnt;
@@ -1285,11 +1274,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
struct net_device *dev, int strict)
{
- struct inet6_ifaddr * ifp;
- u8 hash = ipv6_addr_hash(addr);
+ struct inet6_ifaddr *ifp = NULL;
+ struct hlist_node *node;
+ unsigned int hash = ipv6_addr_hash(addr);
- read_lock_bh(&addrconf_hash_lock);
- for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -1299,27 +1289,28 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
break;
}
}
- read_unlock_bh(&addrconf_hash_lock);
+ rcu_read_unlock_bh();
+
return ifp != NULL;
}
EXPORT_SYMBOL(ipv6_chk_addr);
-static
-int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
- struct net_device *dev)
+static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev)
{
- struct inet6_ifaddr * ifp;
- u8 hash = ipv6_addr_hash(addr);
+ unsigned int hash = ipv6_addr_hash(addr);
+ struct inet6_ifaddr *ifp;
+ struct hlist_node *node;
- for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
+ hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (dev == NULL || ifp->idev->dev == dev)
- break;
+ return true;
}
}
- return ifp != NULL;
+ return false;
}
int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
@@ -1333,7 +1324,7 @@ int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
idev = __in6_dev_get(dev);
if (idev) {
read_lock_bh(&idev->lock);
- for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) {
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
onlink = ipv6_prefix_equal(addr, &ifa->addr,
ifa->prefix_len);
if (onlink)
@@ -1350,24 +1341,26 @@ EXPORT_SYMBOL(ipv6_chk_prefix);
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
struct net_device *dev, int strict)
{
- struct inet6_ifaddr * ifp;
- u8 hash = ipv6_addr_hash(addr);
+ struct inet6_ifaddr *ifp, *result = NULL;
+ unsigned int hash = ipv6_addr_hash(addr);
+ struct hlist_node *node;
- read_lock_bh(&addrconf_hash_lock);
- for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (dev == NULL || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
+ result = ifp;
in6_ifa_hold(ifp);
break;
}
}
}
- read_unlock_bh(&addrconf_hash_lock);
+ rcu_read_unlock_bh();
- return ifp;
+ return result;
}
/* Gets referenced address, destroys ifaddr */
@@ -1570,7 +1563,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
- for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
memcpy(eui, ifp->addr.s6_addr+8, 8);
err = 0;
@@ -1738,7 +1731,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
ASSERT_RTNL();
- if ((idev = ipv6_find_idev(dev)) == NULL)
+ idev = ipv6_find_idev(dev);
+ if (!idev)
return NULL;
/* Add default multicast route */
@@ -1971,7 +1965,7 @@ ok:
#ifdef CONFIG_IPV6_PRIVACY
read_lock_bh(&in6_dev->lock);
/* update all temporary addresses in the list */
- for (ift=in6_dev->tempaddr_list; ift; ift=ift->tmp_next) {
+ list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) {
/*
* When adjusting the lifetimes of an existing
* temporary address, only lower the lifetimes.
@@ -2174,7 +2168,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
return -ENXIO;
read_lock_bh(&idev->lock);
- for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) {
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->prefix_len == plen &&
ipv6_addr_equal(pfx, &ifp->addr)) {
in6_ifa_hold(ifp);
@@ -2185,7 +2179,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
/* If the last address is deleted administratively,
disable IPv6 on this interface.
*/
- if (idev->addr_list == NULL)
+ if (list_empty(&idev->addr_list))
addrconf_ifdown(idev->dev, 1);
return 0;
}
@@ -2446,7 +2440,8 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
ASSERT_RTNL();
- if ((idev = addrconf_add_dev(dev)) == NULL) {
+ idev = addrconf_add_dev(dev);
+ if (!idev) {
printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
return;
}
@@ -2461,7 +2456,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
int run_pending = 0;
int err;
- switch(event) {
+ switch (event) {
case NETDEV_REGISTER:
if (!idev && dev->mtu >= IPV6_MIN_MTU) {
idev = ipv6_add_dev(dev);
@@ -2469,6 +2464,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
return notifier_from_errno(-ENOMEM);
}
break;
+
case NETDEV_UP:
case NETDEV_CHANGE:
if (dev->flags & IFF_SLAVE)
@@ -2498,10 +2494,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
}
if (idev) {
- if (idev->if_flags & IF_READY) {
+ if (idev->if_flags & IF_READY)
/* device is already configured. */
break;
- }
idev->if_flags |= IF_READY;
}
@@ -2513,7 +2508,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
run_pending = 1;
}
- switch(dev->type) {
+ switch (dev->type) {
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
case ARPHRD_SIT:
addrconf_sit_config(dev);
@@ -2530,25 +2525,30 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
addrconf_dev_config(dev);
break;
}
+
if (idev) {
if (run_pending)
addrconf_dad_run(idev);
- /* If the MTU changed during the interface down, when the
- interface up, the changed MTU must be reflected in the
- idev as well as routers.
+ /*
+ * If the MTU changed during the interface down,
+ * when the interface up, the changed MTU must be
+ * reflected in the idev as well as routers.
*/
- if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) {
+ if (idev->cnf.mtu6 != dev->mtu &&
+ dev->mtu >= IPV6_MIN_MTU) {
rt6_mtu_change(dev, dev->mtu);
idev->cnf.mtu6 = dev->mtu;
}
idev->tstamp = jiffies;
inet6_ifinfo_notify(RTM_NEWLINK, idev);
- /* If the changed mtu during down is lower than IPV6_MIN_MTU
- stop IPv6 on this interface.
+
+ /*
+ * If the changed mtu during down is lower than
+ * IPV6_MIN_MTU stop IPv6 on this interface.
*/
if (dev->mtu < IPV6_MIN_MTU)
- addrconf_ifdown(dev, event != NETDEV_DOWN);
+ addrconf_ifdown(dev, 1);
}
break;
@@ -2565,7 +2565,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
break;
}
- /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */
+ /*
+ * MTU falled under IPV6_MIN_MTU.
+ * Stop IPv6 on this interface.
+ */
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
@@ -2585,9 +2588,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
return notifier_from_errno(err);
}
break;
- case NETDEV_BONDING_OLDTYPE:
- case NETDEV_BONDING_NEWTYPE:
- addrconf_bonding_change(dev, event);
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ case NETDEV_POST_TYPE_CHANGE:
+ addrconf_type_change(dev, event);
break;
}
@@ -2599,28 +2603,27 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
*/
static struct notifier_block ipv6_dev_notf = {
.notifier_call = addrconf_notify,
- .priority = 0
};
-static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
+static void addrconf_type_change(struct net_device *dev, unsigned long event)
{
struct inet6_dev *idev;
ASSERT_RTNL();
idev = __in6_dev_get(dev);
- if (event == NETDEV_BONDING_NEWTYPE)
+ if (event == NETDEV_POST_TYPE_CHANGE)
ipv6_mc_remap(idev);
- else if (event == NETDEV_BONDING_OLDTYPE)
+ else if (event == NETDEV_PRE_TYPE_CHANGE)
ipv6_mc_unmap(idev);
}
static int addrconf_ifdown(struct net_device *dev, int how)
{
- struct inet6_dev *idev;
- struct inet6_ifaddr *ifa, *keep_list, **bifa;
struct net *net = dev_net(dev);
- int i;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifa;
+ LIST_HEAD(keep_list);
ASSERT_RTNL();
@@ -2631,8 +2634,9 @@ static int addrconf_ifdown(struct net_device *dev, int how)
if (idev == NULL)
return -ENODEV;
- /* Step 1: remove reference to ipv6 device from parent device.
- Do not dev_put!
+ /*
+ * Step 1: remove reference to ipv6 device from parent device.
+ * Do not dev_put!
*/
if (how) {
idev->dead = 1;
@@ -2645,40 +2649,21 @@ static int addrconf_ifdown(struct net_device *dev, int how)
}
- /* Step 2: clear hash table */
- for (i=0; i<IN6_ADDR_HSIZE; i++) {
- bifa = &inet6_addr_lst[i];
-
- write_lock_bh(&addrconf_hash_lock);
- while ((ifa = *bifa) != NULL) {
- if (ifa->idev == idev &&
- (how || !(ifa->flags&IFA_F_PERMANENT) ||
- ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
- *bifa = ifa->lst_next;
- ifa->lst_next = NULL;
- __in6_ifa_put(ifa);
- continue;
- }
- bifa = &ifa->lst_next;
- }
- write_unlock_bh(&addrconf_hash_lock);
- }
-
write_lock_bh(&idev->lock);
- /* Step 3: clear flags for stateless addrconf */
+ /* Step 2: clear flags for stateless addrconf */
if (!how)
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
- /* Step 4: clear address list */
#ifdef CONFIG_IPV6_PRIVACY
if (how && del_timer(&idev->regen_timer))
in6_dev_put(idev);
- /* clear tempaddr list */
- while ((ifa = idev->tempaddr_list) != NULL) {
- idev->tempaddr_list = ifa->tmp_next;
- ifa->tmp_next = NULL;
+ /* Step 3: clear tempaddr list */
+ while (!list_empty(&idev->tempaddr_list)) {
+ ifa = list_first_entry(&idev->tempaddr_list,
+ struct inet6_ifaddr, tmp_list);
+ list_del(&ifa->tmp_list);
ifa->dead = 1;
write_unlock_bh(&idev->lock);
spin_lock_bh(&ifa->lock);
@@ -2692,23 +2677,18 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&idev->lock);
}
#endif
- keep_list = NULL;
- bifa = &keep_list;
- while ((ifa = idev->addr_list) != NULL) {
- idev->addr_list = ifa->if_next;
- ifa->if_next = NULL;
+ while (!list_empty(&idev->addr_list)) {
+ ifa = list_first_entry(&idev->addr_list,
+ struct inet6_ifaddr, if_list);
addrconf_del_timer(ifa);
/* If just doing link down, and address is permanent
and not link-local, then retain it. */
- if (how == 0 &&
+ if (!how &&
(ifa->flags&IFA_F_PERMANENT) &&
!(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
-
- /* Move to holding list */
- *bifa = ifa;
- bifa = &ifa->if_next;
+ list_move_tail(&ifa->if_list, &keep_list);
/* If not doing DAD on this address, just keep it. */
if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
@@ -2723,24 +2703,32 @@ static int addrconf_ifdown(struct net_device *dev, int how)
/* Flag it for later restoration when link comes up */
ifa->flags |= IFA_F_TENTATIVE;
in6_ifa_hold(ifa);
+ write_unlock_bh(&idev->lock);
} else {
+ list_del(&ifa->if_list);
ifa->dead = 1;
+ write_unlock_bh(&idev->lock);
+
+ /* clear hash table */
+ spin_lock_bh(&addrconf_hash_lock);
+ hlist_del_init_rcu(&ifa->addr_lst);
+ spin_unlock_bh(&addrconf_hash_lock);
}
- write_unlock_bh(&idev->lock);
__ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
+ if (ifa->dead)
+ atomic_notifier_call_chain(&inet6addr_chain,
+ NETDEV_DOWN, ifa);
in6_ifa_put(ifa);
write_lock_bh(&idev->lock);
}
- idev->addr_list = keep_list;
+ list_splice(&keep_list, &idev->addr_list);
write_unlock_bh(&idev->lock);
/* Step 5: Discard multicast list */
-
if (how)
ipv6_mc_destroy_dev(idev);
else
@@ -2748,8 +2736,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
idev->tstamp = jiffies;
- /* Shot the device (if unregistered) */
-
+ /* Last: Shot the device (if unregistered) */
if (how) {
addrconf_sysctl_unregister(idev);
neigh_parms_release(&nd_tbl, idev->nd_parms);
@@ -2860,7 +2847,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
* Optimistic nodes can start receiving
* Frames right away
*/
- if(ifp->flags & IFA_F_OPTIMISTIC)
+ if (ifp->flags & IFA_F_OPTIMISTIC)
ip6_ins_rt(ifp->rt);
addrconf_dad_kick(ifp);
@@ -2910,7 +2897,7 @@ out:
static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
{
- struct net_device * dev = ifp->idev->dev;
+ struct net_device *dev = ifp->idev->dev;
/*
* Configure the address for reception. Now it is valid.
@@ -2941,11 +2928,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
}
}
-static void addrconf_dad_run(struct inet6_dev *idev) {
+static void addrconf_dad_run(struct inet6_dev *idev)
+{
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
- for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) {
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
spin_lock(&ifp->lock);
if (!(ifp->flags & IFA_F_TENTATIVE)) {
spin_unlock(&ifp->lock);
@@ -2970,36 +2958,35 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
struct net *net = seq_file_net(seq);
for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
- ifa = inet6_addr_lst[state->bucket];
-
- while (ifa && !net_eq(dev_net(ifa->idev->dev), net))
- ifa = ifa->lst_next;
- if (ifa)
- break;
+ struct hlist_node *n;
+ hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
+ addr_lst)
+ if (net_eq(dev_net(ifa->idev->dev), net))
+ return ifa;
}
- return ifa;
+ return NULL;
}
-static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa)
+static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
+ struct inet6_ifaddr *ifa)
{
struct if6_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
+ struct hlist_node *n = &ifa->addr_lst;
- ifa = ifa->lst_next;
-try_again:
- if (ifa) {
- if (!net_eq(dev_net(ifa->idev->dev), net)) {
- ifa = ifa->lst_next;
- goto try_again;
- }
- }
+ hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
+ if (net_eq(dev_net(ifa->idev->dev), net))
+ return ifa;
- if (!ifa && ++state->bucket < IN6_ADDR_HSIZE) {
- ifa = inet6_addr_lst[state->bucket];
- goto try_again;
+ while (++state->bucket < IN6_ADDR_HSIZE) {
+ hlist_for_each_entry_rcu_bh(ifa, n,
+ &inet6_addr_lst[state->bucket], addr_lst) {
+ if (net_eq(dev_net(ifa->idev->dev), net))
+ return ifa;
+ }
}
- return ifa;
+ return NULL;
}
static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
@@ -3007,15 +2994,15 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
struct inet6_ifaddr *ifa = if6_get_first(seq);
if (ifa)
- while(pos && (ifa = if6_get_next(seq, ifa)) != NULL)
+ while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
--pos;
return pos ? NULL : ifa;
}
static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(addrconf_hash_lock)
+ __acquires(rcu_bh)
{
- read_lock_bh(&addrconf_hash_lock);
+ rcu_read_lock_bh();
return if6_get_idx(seq, *pos);
}
@@ -3029,9 +3016,9 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void if6_seq_stop(struct seq_file *seq, void *v)
- __releases(addrconf_hash_lock)
+ __releases(rcu_bh)
{
- read_unlock_bh(&addrconf_hash_lock);
+ rcu_read_unlock_bh();
}
static int if6_seq_show(struct seq_file *seq, void *v)
@@ -3101,10 +3088,12 @@ void if6_proc_exit(void)
int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
{
int ret = 0;
- struct inet6_ifaddr * ifp;
- u8 hash = ipv6_addr_hash(addr);
- read_lock_bh(&addrconf_hash_lock);
- for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) {
+ struct inet6_ifaddr *ifp = NULL;
+ struct hlist_node *n;
+ unsigned int hash = ipv6_addr_hash(addr);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3113,7 +3102,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
break;
}
}
- read_unlock_bh(&addrconf_hash_lock);
+ rcu_read_unlock_bh();
return ret;
}
#endif
@@ -3124,43 +3113,35 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
static void addrconf_verify(unsigned long foo)
{
+ unsigned long now, next, next_sec, next_sched;
struct inet6_ifaddr *ifp;
- unsigned long now, next;
+ struct hlist_node *node;
int i;
- spin_lock_bh(&addrconf_verify_lock);
+ rcu_read_lock_bh();
+ spin_lock(&addrconf_verify_lock);
now = jiffies;
- next = now + ADDR_CHECK_FREQUENCY;
+ next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
del_timer(&addr_chk_timer);
- for (i=0; i < IN6_ADDR_HSIZE; i++) {
-
+ for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
- read_lock(&addrconf_hash_lock);
- for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) {
+ hlist_for_each_entry_rcu_bh(ifp, node,
+ &inet6_addr_lst[i], addr_lst) {
unsigned long age;
-#ifdef CONFIG_IPV6_PRIVACY
- unsigned long regen_advance;
-#endif
if (ifp->flags & IFA_F_PERMANENT)
continue;
spin_lock(&ifp->lock);
- age = (now - ifp->tstamp) / HZ;
-
-#ifdef CONFIG_IPV6_PRIVACY
- regen_advance = ifp->idev->cnf.regen_max_retry *
- ifp->idev->cnf.dad_transmits *
- ifp->idev->nd_parms->retrans_time / HZ;
-#endif
+ /* We try to batch several events at once. */
+ age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (ifp->valid_lft != INFINITY_LIFE_TIME &&
age >= ifp->valid_lft) {
spin_unlock(&ifp->lock);
in6_ifa_hold(ifp);
- read_unlock(&addrconf_hash_lock);
ipv6_del_addr(ifp);
goto restart;
} else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
@@ -3182,7 +3163,6 @@ restart:
if (deprecate) {
in6_ifa_hold(ifp);
- read_unlock(&addrconf_hash_lock);
ipv6_ifa_notify(0, ifp);
in6_ifa_put(ifp);
@@ -3191,6 +3171,10 @@ restart:
#ifdef CONFIG_IPV6_PRIVACY
} else if ((ifp->flags&IFA_F_TEMPORARY) &&
!(ifp->flags&IFA_F_TENTATIVE)) {
+ unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
+ ifp->idev->cnf.dad_transmits *
+ ifp->idev->nd_parms->retrans_time / HZ;
+
if (age >= ifp->prefered_lft - regen_advance) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
@@ -3200,7 +3184,7 @@ restart:
in6_ifa_hold(ifp);
in6_ifa_hold(ifpub);
spin_unlock(&ifp->lock);
- read_unlock(&addrconf_hash_lock);
+
spin_lock(&ifpub->lock);
ifpub->regen_count = 0;
spin_unlock(&ifpub->lock);
@@ -3220,12 +3204,26 @@ restart:
spin_unlock(&ifp->lock);
}
}
- read_unlock(&addrconf_hash_lock);
}
- addr_chk_timer.expires = time_before(next, jiffies + HZ) ? jiffies + HZ : next;
+ next_sec = round_jiffies_up(next);
+ next_sched = next;
+
+ /* If rounded timeout is accurate enough, accept it. */
+ if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
+ next_sched = next_sec;
+
+ /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
+ if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
+ next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
+
+ ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
+ now, next, next_sec, next_sched));
+
+ addr_chk_timer.expires = next_sched;
add_timer(&addr_chk_timer);
- spin_unlock_bh(&addrconf_verify_lock);
+ spin_unlock(&addrconf_verify_lock);
+ rcu_read_unlock_bh();
}
static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local)
@@ -3515,8 +3513,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
return nlmsg_end(skb, nlh);
}
-enum addr_type_t
-{
+enum addr_type_t {
UNICAST_ADDR,
MULTICAST_ADDR,
ANYCAST_ADDR,
@@ -3527,7 +3524,6 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
struct netlink_callback *cb, enum addr_type_t type,
int s_ip_idx, int *p_ip_idx)
{
- struct inet6_ifaddr *ifa;
struct ifmcaddr6 *ifmca;
struct ifacaddr6 *ifaca;
int err = 1;
@@ -3535,11 +3531,12 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
read_lock_bh(&idev->lock);
switch (type) {
- case UNICAST_ADDR:
+ case UNICAST_ADDR: {
+ struct inet6_ifaddr *ifa;
+
/* unicast address incl. temp addr */
- for (ifa = idev->addr_list; ifa;
- ifa = ifa->if_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ if (++ip_idx < s_ip_idx)
continue;
err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).pid,
@@ -3550,6 +3547,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
break;
}
break;
+ }
case MULTICAST_ADDR:
/* multicast address */
for (ifmca = idev->mc_list; ifmca;
@@ -3614,7 +3612,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
if (h > s_h || idx > s_idx)
s_ip_idx = 0;
ip_idx = 0;
- if ((idev = __in6_dev_get(dev)) == NULL)
+ idev = __in6_dev_get(dev);
+ if (!idev)
goto cont;
if (in6_dump_addrs(idev, skb, cb, type,
@@ -3681,12 +3680,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
if (ifm->ifa_index)
dev = __dev_get_by_index(net, ifm->ifa_index);
- if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) {
+ ifa = ipv6_get_ifaddr(net, addr, dev, 1);
+ if (!ifa) {
err = -EADDRNOTAVAIL;
goto errout;
}
- if ((skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL)) == NULL) {
+ skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
+ if (!skb) {
err = -ENOBUFS;
goto errout_ifa;
}
@@ -3811,7 +3812,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
int bytes)
{
- switch(attrtype) {
+ switch (attrtype) {
case IFLA_INET6_STATS:
__snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
break;
@@ -4047,7 +4048,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
addrconf_leave_anycast(ifp);
addrconf_leave_solict(ifp->idev, &ifp->addr);
dst_hold(&ifp->rt->u.dst);
- if (ip6_del_rt(ifp->rt))
+
+ if (ifp->dead && ip6_del_rt(ifp->rt))
dst_free(&ifp->rt->u.dst);
break;
}
@@ -4163,211 +4165,211 @@ static struct addrconf_sysctl_table
.sysctl_header = NULL,
.addrconf_vars = {
{
- .procname = "forwarding",
- .data = &ipv6_devconf.forwarding,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = addrconf_sysctl_forward,
+ .procname = "forwarding",
+ .data = &ipv6_devconf.forwarding,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_forward,
},
{
- .procname = "hop_limit",
- .data = &ipv6_devconf.hop_limit,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "hop_limit",
+ .data = &ipv6_devconf.hop_limit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "mtu",
- .data = &ipv6_devconf.mtu6,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "mtu",
+ .data = &ipv6_devconf.mtu6,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_ra",
- .data = &ipv6_devconf.accept_ra,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra",
+ .data = &ipv6_devconf.accept_ra,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_redirects",
- .data = &ipv6_devconf.accept_redirects,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_redirects",
+ .data = &ipv6_devconf.accept_redirects,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "autoconf",
- .data = &ipv6_devconf.autoconf,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "autoconf",
+ .data = &ipv6_devconf.autoconf,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "dad_transmits",
- .data = &ipv6_devconf.dad_transmits,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "dad_transmits",
+ .data = &ipv6_devconf.dad_transmits,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "router_solicitations",
- .data = &ipv6_devconf.rtr_solicits,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "router_solicitations",
+ .data = &ipv6_devconf.rtr_solicits,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "router_solicitation_interval",
- .data = &ipv6_devconf.rtr_solicit_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .procname = "router_solicitation_interval",
+ .data = &ipv6_devconf.rtr_solicit_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
},
{
- .procname = "router_solicitation_delay",
- .data = &ipv6_devconf.rtr_solicit_delay,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .procname = "router_solicitation_delay",
+ .data = &ipv6_devconf.rtr_solicit_delay,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
},
{
- .procname = "force_mld_version",
- .data = &ipv6_devconf.force_mld_version,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "force_mld_version",
+ .data = &ipv6_devconf.force_mld_version,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_PRIVACY
{
- .procname = "use_tempaddr",
- .data = &ipv6_devconf.use_tempaddr,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "use_tempaddr",
+ .data = &ipv6_devconf.use_tempaddr,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "temp_valid_lft",
- .data = &ipv6_devconf.temp_valid_lft,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "temp_valid_lft",
+ .data = &ipv6_devconf.temp_valid_lft,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "temp_prefered_lft",
- .data = &ipv6_devconf.temp_prefered_lft,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "temp_prefered_lft",
+ .data = &ipv6_devconf.temp_prefered_lft,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "regen_max_retry",
- .data = &ipv6_devconf.regen_max_retry,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "regen_max_retry",
+ .data = &ipv6_devconf.regen_max_retry,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "max_desync_factor",
- .data = &ipv6_devconf.max_desync_factor,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "max_desync_factor",
+ .data = &ipv6_devconf.max_desync_factor,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .procname = "max_addresses",
- .data = &ipv6_devconf.max_addresses,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "max_addresses",
+ .data = &ipv6_devconf.max_addresses,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_ra_defrtr",
- .data = &ipv6_devconf.accept_ra_defrtr,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra_defrtr",
+ .data = &ipv6_devconf.accept_ra_defrtr,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_ra_pinfo",
- .data = &ipv6_devconf.accept_ra_pinfo,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra_pinfo",
+ .data = &ipv6_devconf.accept_ra_pinfo,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_ROUTER_PREF
{
- .procname = "accept_ra_rtr_pref",
- .data = &ipv6_devconf.accept_ra_rtr_pref,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra_rtr_pref",
+ .data = &ipv6_devconf.accept_ra_rtr_pref,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "router_probe_interval",
- .data = &ipv6_devconf.rtr_probe_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .procname = "router_probe_interval",
+ .data = &ipv6_devconf.rtr_probe_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
},
#ifdef CONFIG_IPV6_ROUTE_INFO
{
- .procname = "accept_ra_rt_info_max_plen",
- .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_ra_rt_info_max_plen",
+ .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#endif
#endif
{
- .procname = "proxy_ndp",
- .data = &ipv6_devconf.proxy_ndp,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "proxy_ndp",
+ .data = &ipv6_devconf.proxy_ndp,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
- .procname = "accept_source_route",
- .data = &ipv6_devconf.accept_source_route,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_source_route",
+ .data = &ipv6_devconf.accept_source_route,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
{
- .procname = "optimistic_dad",
- .data = &ipv6_devconf.optimistic_dad,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "optimistic_dad",
+ .data = &ipv6_devconf.optimistic_dad,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_IPV6_MROUTE
{
- .procname = "mc_forwarding",
- .data = &ipv6_devconf.mc_forwarding,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = proc_dointvec,
+ .procname = "mc_forwarding",
+ .data = &ipv6_devconf.mc_forwarding,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
},
#endif
{
- .procname = "disable_ipv6",
- .data = &ipv6_devconf.disable_ipv6,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = addrconf_sysctl_disable,
+ .procname = "disable_ipv6",
+ .data = &ipv6_devconf.disable_ipv6,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_disable,
},
{
- .procname = "accept_dad",
- .data = &ipv6_devconf.accept_dad,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .procname = "accept_dad",
+ .data = &ipv6_devconf.accept_dad,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
},
{
.procname = "force_tllao",
@@ -4403,8 +4405,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
if (t == NULL)
goto out;
- for (i=0; t->addrconf_vars[i].data; i++) {
- t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf;
+ for (i = 0; t->addrconf_vars[i].data; i++) {
+ t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf;
t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
t->addrconf_vars[i].extra2 = net;
}
@@ -4541,14 +4543,12 @@ int register_inet6addr_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&inet6addr_chain, nb);
}
-
EXPORT_SYMBOL(register_inet6addr_notifier);
int unregister_inet6addr_notifier(struct notifier_block *nb)
{
- return atomic_notifier_chain_unregister(&inet6addr_chain,nb);
+ return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
}
-
EXPORT_SYMBOL(unregister_inet6addr_notifier);
/*
@@ -4557,11 +4557,12 @@ EXPORT_SYMBOL(unregister_inet6addr_notifier);
int __init addrconf_init(void)
{
- int err;
+ int i, err;
- if ((err = ipv6_addr_label_init()) < 0) {
- printk(KERN_CRIT "IPv6 Addrconf: cannot initialize default policy table: %d.\n",
- err);
+ err = ipv6_addr_label_init();
+ if (err < 0) {
+ printk(KERN_CRIT "IPv6 Addrconf:"
+ " cannot initialize default policy table: %d.\n", err);
return err;
}
@@ -4592,6 +4593,9 @@ int __init addrconf_init(void)
if (err)
goto errlo;
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&inet6_addr_lst[i]);
+
register_netdevice_notifier(&ipv6_dev_notf);
addrconf_verify(0);
@@ -4620,7 +4624,6 @@ errlo:
void addrconf_cleanup(void)
{
- struct inet6_ifaddr *ifa;
struct net_device *dev;
int i;
@@ -4640,20 +4643,10 @@ void addrconf_cleanup(void)
/*
* Check hash table.
*/
- write_lock_bh(&addrconf_hash_lock);
- for (i=0; i < IN6_ADDR_HSIZE; i++) {
- for (ifa=inet6_addr_lst[i]; ifa; ) {
- struct inet6_ifaddr *bifa;
-
- bifa = ifa;
- ifa = ifa->lst_next;
- printk(KERN_DEBUG "bug: IPv6 address leakage detected: ifa=%p\n", bifa);
- /* Do not free it; something is wrong.
- Now we can investigate it with debugger.
- */
- }
- }
- write_unlock_bh(&addrconf_hash_lock);
+ spin_lock_bh(&addrconf_hash_lock);
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
+ spin_unlock_bh(&addrconf_hash_lock);
del_timer(&addr_chk_timer);
rtnl_unlock();
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3f9e86b15e0d..e733942dafe1 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -417,6 +417,9 @@ void inet6_destroy_sock(struct sock *sk)
if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
kfree_skb(skb);
+ if ((skb = xchg(&np->rxpmtu, NULL)) != NULL)
+ kfree_skb(skb);
+
/* Free flowlabels */
fl6_free_socklist(sk);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 622dc7939a1b..712684687c9a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -222,6 +222,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
if (!skb)
return;
+ skb->protocol = htons(ETH_P_IPV6);
+
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6;
@@ -255,6 +257,8 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
if (!skb)
return;
+ skb->protocol = htons(ETH_P_IPV6);
+
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
@@ -278,6 +282,45 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
kfree_skb(skb);
}
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6hdr *iph;
+ struct sk_buff *skb;
+ struct ip6_mtuinfo *mtu_info;
+
+ if (!np->rxopt.bits.rxpmtu)
+ return;
+
+ skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb_put(skb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ iph = ipv6_hdr(skb);
+ ipv6_addr_copy(&iph->daddr, &fl->fl6_dst);
+
+ mtu_info = IP6CBMTU(skb);
+ if (!mtu_info) {
+ kfree_skb(skb);
+ return;
+ }
+
+ mtu_info->ip6m_mtu = mtu;
+ mtu_info->ip6m_addr.sin6_family = AF_INET6;
+ mtu_info->ip6m_addr.sin6_port = 0;
+ mtu_info->ip6m_addr.sin6_flowinfo = 0;
+ mtu_info->ip6m_addr.sin6_scope_id = fl->oif;
+ ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
+
+ __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
+ skb_reset_transport_header(skb);
+
+ skb = xchg(&np->rxpmtu, skb);
+ kfree_skb(skb);
+}
+
/*
* Handle MSG_ERRQUEUE
*/
@@ -319,7 +362,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_flowinfo = 0;
sin->sin6_port = serr->port;
sin->sin6_scope_id = 0;
- if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
+ if (skb->protocol == htons(ETH_P_IPV6)) {
ipv6_addr_copy(&sin->sin6_addr,
(struct in6_addr *)(nh + serr->addr_offset));
if (np->sndflow)
@@ -341,7 +384,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
sin->sin6_scope_id = 0;
- if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
+ if (skb->protocol == htons(ETH_P_IPV6)) {
ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb);
@@ -381,6 +424,54 @@ out:
return err;
}
+/*
+ * Handle IPV6_RECVPATHMTU
+ */
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sk_buff *skb;
+ struct sockaddr_in6 *sin;
+ struct ip6_mtuinfo mtu_info;
+ int err;
+ int copied;
+
+ err = -EAGAIN;
+ skb = xchg(&np->rxpmtu, NULL);
+ if (skb == NULL)
+ goto out;
+
+ copied = skb->len;
+ if (copied > len) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto out_free_skb;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info));
+
+ sin = (struct sockaddr_in6 *)msg->msg_name;
+ if (sin) {
+ sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
+ sin->sin6_port = 0;
+ sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
+ ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
+ }
+
+ put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
+
+ err = copied;
+
+out_free_skb:
+ kfree_skb(skb);
+out:
+ return err;
+}
int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
@@ -497,7 +588,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
int datagram_send_ctl(struct net *net,
struct msghdr *msg, struct flowi *fl,
struct ipv6_txoptions *opt,
- int *hlimit, int *tclass)
+ int *hlimit, int *tclass, int *dontfrag)
{
struct in6_pktinfo *src_info;
struct cmsghdr *cmsg;
@@ -737,6 +828,25 @@ int datagram_send_ctl(struct net *net,
break;
}
+
+ case IPV6_DONTFRAG:
+ {
+ int df;
+
+ err = -EINVAL;
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+ goto exit_f;
+ }
+
+ df = *(int *)CMSG_DATA(cmsg);
+ if (df < 0 || df > 1)
+ goto exit_f;
+
+ err = 0;
+ *dontfrag = df;
+
+ break;
+ }
default:
LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n",
cmsg->cmsg_type);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 5e463c43fcc2..8e44f8f9c188 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -208,7 +208,6 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
{
struct fib6_rule *rule6 = (struct fib6_rule *) rule;
- frh->family = AF_INET6;
frh->dst_len = rule6->dst.plen;
frh->src_len = rule6->src.plen;
frh->tos = rule6->tclass;
@@ -238,7 +237,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+ nla_total_size(16); /* src */
}
-static struct fib_rules_ops fib6_rules_ops_template = {
+static const struct fib_rules_ops __net_initdata fib6_rules_ops_template = {
.family = AF_INET6,
.rule_size = sizeof(struct fib6_rule),
.addr_size = sizeof(struct in6_addr),
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 3330a4bd6157..ce7992982557 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -481,8 +481,9 @@ route_done:
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), hlimit,
np->tclass, NULL, &fl, (struct rt6_info*)dst,
- MSG_DONTWAIT);
+ MSG_DONTWAIT, np->dontfrag);
if (err) {
+ ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
ip6_flush_pending_frames(sk);
goto out_put;
}
@@ -560,9 +561,11 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl,
- (struct rt6_info*)dst, MSG_DONTWAIT);
+ (struct rt6_info*)dst, MSG_DONTWAIT,
+ np->dontfrag);
if (err) {
+ ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
ip6_flush_pending_frames(sk);
goto out_put;
}
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 628db24bcf22..0c5e3c3b7fd5 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -178,7 +178,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
return dst;
}
-int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
+int inet6_csk_xmit(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
@@ -234,7 +234,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
/* Restore final destination back after routing done */
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
- return ip6_xmit(sk, skb, &fl, np->opt, 0);
+ return ip6_xmit(sk, skb, &fl, np->opt);
}
EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 6b82e02158c6..92a122b7795d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -128,12 +128,24 @@ static __inline__ u32 fib6_new_sernum(void)
/*
* test bit
*/
+#if defined(__LITTLE_ENDIAN)
+# define BITOP_BE32_SWIZZLE (0x1F & ~7)
+#else
+# define BITOP_BE32_SWIZZLE 0
+#endif
static __inline__ __be32 addr_bit_set(void *token, int fn_bit)
{
__be32 *addr = token;
-
- return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5];
+ /*
+ * Here,
+ * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
+ * is optimized version of
+ * htonl(1 << ((~fn_bit)&0x1F))
+ * See include/asm-generic/bitops/le.h.
+ */
+ return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) &
+ addr[fn_bit >> 5];
}
static __inline__ struct fib6_node * node_alloc(void)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 14e23216eb28..13654686aeab 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -360,7 +360,8 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
msg.msg_control = (void*)(fl->opt+1);
flowi.oif = 0;
- err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk);
+ err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk,
+ &junk, &junk);
if (err)
goto done;
err = -EINVAL;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 6aa7ee1295c2..a83e9209cecc 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -143,7 +143,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
- return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip6_rcv_finish);
err:
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
@@ -236,7 +236,7 @@ discard:
int ip6_input(struct sk_buff *skb)
{
- return NF_HOOK(PF_INET6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
ip6_input_finish);
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 75d5ef830097..5173acaeb501 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -67,8 +67,8 @@ int __ip6_local_out(struct sk_buff *skb)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
- return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
- dst_output);
+ return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ skb_dst(skb)->dev, dst_output);
}
int ip6_local_out(struct sk_buff *skb)
@@ -83,22 +83,6 @@ int ip6_local_out(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(ip6_local_out);
-static int ip6_output_finish(struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
-
- if (dst->hh)
- return neigh_hh_output(dst->hh, skb);
- else if (dst->neighbour)
- return dst->neighbour->output(skb);
-
- IP6_INC_STATS_BH(dev_net(dst->dev),
- ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
- kfree_skb(skb);
- return -EINVAL;
-
-}
-
/* dev_loopback_xmit for use with netfilter. */
static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
{
@@ -112,8 +96,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
return 0;
}
-
-static int ip6_output2(struct sk_buff *skb)
+static int ip6_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
@@ -135,8 +118,8 @@ static int ip6_output2(struct sk_buff *skb)
is not supported in any case.
*/
if (newskb)
- NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, newskb,
- NULL, newskb->dev,
+ NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+ newskb, NULL, newskb->dev,
ip6_dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
@@ -151,8 +134,15 @@ static int ip6_output2(struct sk_buff *skb)
skb->len);
}
- return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
- ip6_output_finish);
+ if (dst->hh)
+ return neigh_hh_output(dst->hh, skb);
+ else if (dst->neighbour)
+ return dst->neighbour->output(skb);
+
+ IP6_INC_STATS_BH(dev_net(dst->dev),
+ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ return -EINVAL;
}
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
@@ -163,29 +153,37 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
+static int ip6_finish_output(struct sk_buff *skb)
+{
+ if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+ dst_allfrag(skb_dst(skb)))
+ return ip6_fragment(skb, ip6_finish_output2);
+ else
+ return ip6_finish_output2(skb);
+}
+
int ip6_output(struct sk_buff *skb)
{
+ struct net_device *dev = skb_dst(skb)->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (unlikely(idev->cnf.disable_ipv6)) {
- IP6_INC_STATS(dev_net(skb_dst(skb)->dev), idev,
+ IP6_INC_STATS(dev_net(dev), idev,
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
- if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
- dst_allfrag(skb_dst(skb)))
- return ip6_fragment(skb, ip6_output2);
- else
- return ip6_output2(skb);
+ return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
+ ip6_finish_output,
+ !(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
/*
- * xmit an sk_buff (used by TCP)
+ * xmit an sk_buff (used by TCP, SCTP and DCCP)
*/
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
- struct ipv6_txoptions *opt, int ipfragok)
+ struct ipv6_txoptions *opt)
{
struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -218,8 +216,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
}
kfree_skb(skb);
skb = skb2;
- if (sk)
- skb_set_owner_w(skb, sk);
+ skb_set_owner_w(skb, sk);
}
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
@@ -231,10 +228,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
- /* Allow local fragmentation. */
- if (ipfragok)
- skb->local_df = 1;
-
/*
* Fill in the IPv6 header
*/
@@ -261,8 +254,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len);
- return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
- dst_output);
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ dst->dev, dst_output);
}
if (net_ratelimit())
@@ -538,7 +531,7 @@ int ip6_forward(struct sk_buff *skb)
hdr->hop_limit--;
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
- return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
ip6_forward_finish);
error:
@@ -1109,7 +1102,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int offset, int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
- struct rt6_info *rt, unsigned int flags)
+ struct rt6_info *rt, unsigned int flags, int dontfrag)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1223,15 +1216,23 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
*/
inet->cork.length += length;
- if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
- (rt->u.dst.dev->features & NETIF_F_UFO)) {
+ if (length > mtu) {
+ int proto = sk->sk_protocol;
+ if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
+ ipv6_local_rxpmtu(sk, fl, mtu-exthdrlen);
+ return -EMSGSIZE;
+ }
- err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
- fragheaderlen, transhdrlen, mtu,
- flags);
- if (err)
- goto error;
- return 0;
+ if (proto == IPPROTO_UDP &&
+ (rt->u.dst.dev->features & NETIF_F_UFO)) {
+
+ err = ip6_ufo_append_data(sk, getfrag, from, length,
+ hh_len, fragheaderlen,
+ transhdrlen, mtu, flags);
+ if (err)
+ goto error;
+ return 0;
+ }
}
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 3e333268db89..e0b530ca394c 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1570,7 +1570,7 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
IP6CB(skb)->flags |= IP6SKB_FORWARDED;
- return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
ip6mr_forward2_finish);
out_free:
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 33f60fca7aa7..bd43f0152c21 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -114,9 +114,9 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
}
opt = xchg(&inet6_sk(sk)->opt, opt);
} else {
- write_lock(&sk->sk_dst_lock);
+ spin_lock(&sk->sk_dst_lock);
opt = xchg(&inet6_sk(sk)->opt, opt);
- write_unlock(&sk->sk_dst_lock);
+ spin_unlock(&sk->sk_dst_lock);
}
sk_dst_reset(sk);
@@ -337,6 +337,13 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = 0;
break;
+ case IPV6_RECVPATHMTU:
+ if (optlen < sizeof(int))
+ goto e_inval;
+ np->rxopt.bits.rxpmtu = valbool;
+ retv = 0;
+ break;
+
case IPV6_HOPOPTS:
case IPV6_RTHDRDSTOPTS:
case IPV6_RTHDR:
@@ -451,7 +458,8 @@ sticky_done:
msg.msg_controllen = optlen;
msg.msg_control = (void*)(opt+1);
- retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk);
+ retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk,
+ &junk);
if (retv)
goto done;
update:
@@ -767,6 +775,17 @@ pref_skip_coa:
break;
}
+ case IPV6_MINHOPCOUNT:
+ if (optlen < sizeof(int))
+ goto e_inval;
+ if (val < 0 || val > 255)
+ goto e_inval;
+ np->min_hopcount = val;
+ break;
+ case IPV6_DONTFRAG:
+ np->dontfrag = valbool;
+ retv = 0;
+ break;
}
release_sock(sk);
@@ -971,14 +990,13 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
case IPV6_MTU:
{
struct dst_entry *dst;
+
val = 0;
- lock_sock(sk);
- dst = sk_dst_get(sk);
- if (dst) {
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst)
val = dst_mtu(dst);
- dst_release(dst);
- }
- release_sock(sk);
+ rcu_read_unlock();
if (!val)
return -ENOTCONN;
break;
@@ -1056,6 +1074,38 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->rxopt.bits.rxflow;
break;
+ case IPV6_RECVPATHMTU:
+ val = np->rxopt.bits.rxpmtu;
+ break;
+
+ case IPV6_PATHMTU:
+ {
+ struct dst_entry *dst;
+ struct ip6_mtuinfo mtuinfo;
+
+ if (len < sizeof(mtuinfo))
+ return -EINVAL;
+
+ len = sizeof(mtuinfo);
+ memset(&mtuinfo, 0, sizeof(mtuinfo));
+
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst)
+ mtuinfo.ip6m_mtu = dst_mtu(dst);
+ rcu_read_unlock();
+ if (!mtuinfo.ip6m_mtu)
+ return -ENOTCONN;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &mtuinfo, len))
+ return -EFAULT;
+
+ return 0;
+ break;
+ }
+
case IPV6_UNICAST_HOPS:
case IPV6_MULTICAST_HOPS:
{
@@ -1066,12 +1116,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
else
val = np->mcast_hops;
- dst = sk_dst_get(sk);
- if (dst) {
- if (val < 0)
+ if (val < 0) {
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst)
val = ip6_dst_hoplimit(dst);
- dst_release(dst);
+ rcu_read_unlock();
}
+
if (val < 0)
val = sock_net(sk)->ipv6.devconf_all->hop_limit;
break;
@@ -1115,6 +1167,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val |= IPV6_PREFER_SRC_HOME;
break;
+ case IPV6_MINHOPCOUNT:
+ val = np->min_hopcount;
+ break;
+
+ case IPV6_DONTFRAG:
+ val = np->dontfrag;
+ break;
+
default:
return -ENOPROTOOPT;
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index c483ab9fd67b..59f1881968c7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -44,6 +44,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <net/mld.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
@@ -71,54 +72,11 @@
#define MDBG(x)
#endif
-/*
- * These header formats should be in a separate include file, but icmpv6.h
- * doesn't have in6_addr defined in all cases, there is no __u128, and no
- * other files reference these.
- *
- * +-DLS 4/14/03
- */
-
-/* Multicast Listener Discovery version 2 headers */
-
-struct mld2_grec {
- __u8 grec_type;
- __u8 grec_auxwords;
- __be16 grec_nsrcs;
- struct in6_addr grec_mca;
- struct in6_addr grec_src[0];
-};
-
-struct mld2_report {
- __u8 type;
- __u8 resv1;
- __sum16 csum;
- __be16 resv2;
- __be16 ngrec;
- struct mld2_grec grec[0];
-};
-
-struct mld2_query {
- __u8 type;
- __u8 code;
- __sum16 csum;
- __be16 mrc;
- __be16 resv1;
- struct in6_addr mca;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 qrv:3,
- suppress:1,
- resv2:4;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u8 resv2:4,
- suppress:1,
- qrv:3;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- __u8 qqic;
- __be16 nsrcs;
- struct in6_addr srcs[0];
+/* Ensure that we have struct in6_addr aligned on 32bit word. */
+static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
+ BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
+ BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
+ BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
};
static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
@@ -157,14 +115,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
((idev)->mc_v1_seen && \
time_before(jiffies, (idev)->mc_v1_seen)))
-#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
-#define MLDV2_EXP(thresh, nbmant, nbexp, value) \
- ((value) < (thresh) ? (value) : \
- ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
- (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
-
-#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
-
#define IPV6_MLD_MAX_MSF 64
int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
@@ -715,7 +665,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
if (!(mc->mca_flags&MAF_LOADED)) {
mc->mca_flags |= MAF_LOADED;
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
- dev_mc_add(dev, buf, dev->addr_len, 0);
+ dev_mc_add(dev, buf);
}
spin_unlock_bh(&mc->mca_lock);
@@ -741,7 +691,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
if (mc->mca_flags&MAF_LOADED) {
mc->mca_flags &= ~MAF_LOADED;
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
- dev_mc_delete(dev, buf, dev->addr_len, 0);
+ dev_mc_del(dev, buf);
}
if (mc->mca_flags & MAF_NOREPORT)
@@ -1161,7 +1111,7 @@ int igmp6_event_query(struct sk_buff *skb)
struct in6_addr *group;
unsigned long max_delay;
struct inet6_dev *idev;
- struct icmp6hdr *hdr;
+ struct mld_msg *mld;
int group_type;
int mark = 0;
int len;
@@ -1182,8 +1132,8 @@ int igmp6_event_query(struct sk_buff *skb)
if (idev == NULL)
return 0;
- hdr = icmp6_hdr(skb);
- group = (struct in6_addr *) (hdr + 1);
+ mld = (struct mld_msg *)icmp6_hdr(skb);
+ group = &mld->mld_mca;
group_type = ipv6_addr_type(group);
if (group_type != IPV6_ADDR_ANY &&
@@ -1197,7 +1147,7 @@ int igmp6_event_query(struct sk_buff *skb)
/* MLDv1 router present */
/* Translate milliseconds to jiffies */
- max_delay = (ntohs(hdr->icmp6_maxdelay)*HZ)/1000;
+ max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
switchback = (idev->mc_qrv + 1) * max_delay;
idev->mc_v1_seen = jiffies + switchback;
@@ -1216,14 +1166,14 @@ int igmp6_event_query(struct sk_buff *skb)
return -EINVAL;
}
mlh2 = (struct mld2_query *)skb_transport_header(skb);
- max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000;
+ max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
if (!max_delay)
max_delay = 1;
idev->mc_maxdelay = max_delay;
- if (mlh2->qrv)
- idev->mc_qrv = mlh2->qrv;
+ if (mlh2->mld2q_qrv)
+ idev->mc_qrv = mlh2->mld2q_qrv;
if (group_type == IPV6_ADDR_ANY) { /* general query */
- if (mlh2->nsrcs) {
+ if (mlh2->mld2q_nsrcs) {
in6_dev_put(idev);
return -EINVAL; /* no sources allowed */
}
@@ -1232,9 +1182,9 @@ int igmp6_event_query(struct sk_buff *skb)
return 0;
}
/* mark sources to include, if group & source-specific */
- if (mlh2->nsrcs != 0) {
+ if (mlh2->mld2q_nsrcs != 0) {
if (!pskb_may_pull(skb, srcs_offset +
- ntohs(mlh2->nsrcs) * sizeof(struct in6_addr))) {
+ ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) {
in6_dev_put(idev);
return -EINVAL;
}
@@ -1270,7 +1220,7 @@ int igmp6_event_query(struct sk_buff *skb)
ma->mca_flags &= ~MAF_GSQUERY;
}
if (!(ma->mca_flags & MAF_GSQUERY) ||
- mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
+ mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
igmp6_group_queried(ma, max_delay);
spin_unlock_bh(&ma->mca_lock);
break;
@@ -1286,9 +1236,8 @@ int igmp6_event_query(struct sk_buff *skb)
int igmp6_event_report(struct sk_buff *skb)
{
struct ifmcaddr6 *ma;
- struct in6_addr *addrp;
struct inet6_dev *idev;
- struct icmp6hdr *hdr;
+ struct mld_msg *mld;
int addr_type;
/* Our own report looped back. Ignore it. */
@@ -1300,10 +1249,10 @@ int igmp6_event_report(struct sk_buff *skb)
skb->pkt_type != PACKET_BROADCAST)
return 0;
- if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
+ if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
return -EINVAL;
- hdr = icmp6_hdr(skb);
+ mld = (struct mld_msg *)icmp6_hdr(skb);
/* Drop reports with not link local source */
addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
@@ -1311,8 +1260,6 @@ int igmp6_event_report(struct sk_buff *skb)
!(addr_type&IPV6_ADDR_LINKLOCAL))
return -EINVAL;
- addrp = (struct in6_addr *) (hdr + 1);
-
idev = in6_dev_get(skb->dev);
if (idev == NULL)
return -ENODEV;
@@ -1323,7 +1270,7 @@ int igmp6_event_report(struct sk_buff *skb)
read_lock_bh(&idev->lock);
for (ma = idev->mc_list; ma; ma=ma->next) {
- if (ipv6_addr_equal(&ma->mca_addr, addrp)) {
+ if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
spin_lock(&ma->mca_lock);
if (del_timer(&ma->mca_timer))
atomic_dec(&ma->mca_refcnt);
@@ -1432,11 +1379,11 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
skb_put(skb, sizeof(*pmr));
pmr = (struct mld2_report *)skb_transport_header(skb);
- pmr->type = ICMPV6_MLD2_REPORT;
- pmr->resv1 = 0;
- pmr->csum = 0;
- pmr->resv2 = 0;
- pmr->ngrec = 0;
+ pmr->mld2r_type = ICMPV6_MLD2_REPORT;
+ pmr->mld2r_resv1 = 0;
+ pmr->mld2r_cksum = 0;
+ pmr->mld2r_resv2 = 0;
+ pmr->mld2r_ngrec = 0;
return skb;
}
@@ -1458,9 +1405,10 @@ static void mld_sendpack(struct sk_buff *skb)
mldlen = skb->tail - skb->transport_header;
pip6->payload_len = htons(payload_len);
- pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
- IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb),
- mldlen, 0));
+ pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
+ IPPROTO_ICMPV6,
+ csum_partial(skb_transport_header(skb),
+ mldlen, 0));
dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
@@ -1480,7 +1428,7 @@ static void mld_sendpack(struct sk_buff *skb)
payload_len = skb->len;
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
dst_output);
out:
if (!err) {
@@ -1521,7 +1469,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
pgr->grec_nsrcs = 0;
pgr->grec_mca = pmc->mca_addr; /* structure copy */
pmr = (struct mld2_report *)skb_transport_header(skb);
- pmr->ngrec = htons(ntohs(pmr->ngrec)+1);
+ pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
*ppgr = pgr;
return skb;
}
@@ -1557,7 +1505,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
/* EX and TO_EX get a fresh packet, if needed */
if (truncate) {
- if (pmr && pmr->ngrec &&
+ if (pmr && pmr->mld2r_ngrec &&
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
if (skb)
mld_sendpack(skb);
@@ -1770,9 +1718,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
struct sock *sk = net->ipv6.igmp_sk;
struct inet6_dev *idev;
struct sk_buff *skb;
- struct icmp6hdr *hdr;
+ struct mld_msg *hdr;
const struct in6_addr *snd_addr, *saddr;
- struct in6_addr *addrp;
struct in6_addr addr_buf;
int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
@@ -1820,16 +1767,14 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
- hdr = (struct icmp6hdr *) skb_put(skb, sizeof(struct icmp6hdr));
- memset(hdr, 0, sizeof(struct icmp6hdr));
- hdr->icmp6_type = type;
+ hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
+ memset(hdr, 0, sizeof(struct mld_msg));
+ hdr->mld_type = type;
+ ipv6_addr_copy(&hdr->mld_mca, addr);
- addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr));
- ipv6_addr_copy(addrp, addr);
-
- hdr->icmp6_cksum = csum_ipv6_magic(saddr, snd_addr, len,
- IPPROTO_ICMPV6,
- csum_partial(hdr, len, 0));
+ hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
+ IPPROTO_ICMPV6,
+ csum_partial(hdr, len, 0));
idev = in6_dev_get(skb->dev);
@@ -1848,7 +1793,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
goto err_out;
skb_dst_set(skb, dst);
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
dst_output);
out:
if (!err) {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index da0a4d2adc69..3f7c12b70a26 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -536,7 +536,7 @@ void ndisc_send_skb(struct sk_buff *skb,
idev = in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
dst_output);
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, type);
@@ -1618,7 +1618,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
skb_dst_set(buff, dst);
idev = in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
dst_output);
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, NDISC_REDIRECT);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index d5ed92b14346..a74951c039b6 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -25,20 +25,6 @@ int ip6_route_me_harder(struct sk_buff *skb)
};
dst = ip6_route_output(net, skb->sk, &fl);
-
-#ifdef CONFIG_XFRM
- if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
- xfrm_decode_session(skb, &fl, AF_INET6) == 0) {
- struct dst_entry *dst2 = skb_dst(skb);
-
- if (xfrm_lookup(net, &dst2, &fl, skb->sk, 0)) {
- skb_dst_set(skb, NULL);
- return -1;
- }
- skb_dst_set(skb, dst2);
- }
-#endif
-
if (dst->error) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
@@ -50,6 +36,17 @@ int ip6_route_me_harder(struct sk_buff *skb)
skb_dst_drop(skb);
skb_dst_set(skb, dst);
+
+#ifdef CONFIG_XFRM
+ if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ xfrm_decode_session(skb, &fl, AF_INET6) == 0) {
+ skb_dst_set(skb, NULL);
+ if (xfrm_lookup(net, &dst, &fl, skb->sk, 0))
+ return -1;
+ skb_dst_set(skb, dst);
+ }
+#endif
+
return 0;
}
EXPORT_SYMBOL(ip6_route_me_harder);
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 6a68a74d14a3..8656eb75520c 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -162,8 +162,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
break;
case IPQ_COPY_PACKET:
- if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
- entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
+ if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
(*errp = skb_checksum_help(entry->skb))) {
read_unlock_bh(&queue_lock);
return NULL;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9210e312edf1..7afa11773164 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -40,13 +40,13 @@ MODULE_DESCRIPTION("IPv6 packet filter");
/*#define DEBUG_IP_FIREWALL_USER*/
#ifdef DEBUG_IP_FIREWALL
-#define dprintf(format, args...) printk(format , ## args)
+#define dprintf(format, args...) pr_info(format , ## args)
#else
#define dprintf(format, args...)
#endif
#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
+#define duprintf(format, args...) pr_info(format , ## args)
#else
#define duprintf(format, args...)
#endif
@@ -200,8 +200,7 @@ static unsigned int
ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
{
if (net_ratelimit())
- printk("ip6_tables: error: `%s'\n",
- (const char *)par->targinfo);
+ pr_info("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
@@ -352,15 +351,14 @@ ip6t_do_table(struct sk_buff *skb,
const struct net_device *out,
struct xt_table *table)
{
-#define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
-
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
bool hotdrop = false;
/* Initializing verdict to NF_DROP keeps gcc happy. */
unsigned int verdict = NF_DROP;
const char *indev, *outdev;
const void *table_base;
- struct ip6t_entry *e, *back;
+ struct ip6t_entry *e, **jumpstack;
+ unsigned int *stackptr, origptr, cpu;
const struct xt_table_info *private;
struct xt_match_param mtpar;
struct xt_target_param tgpar;
@@ -384,19 +382,19 @@ ip6t_do_table(struct sk_buff *skb,
xt_info_rdlock_bh();
private = table->private;
- table_base = private->entries[smp_processor_id()];
+ cpu = smp_processor_id();
+ table_base = private->entries[cpu];
+ jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
+ stackptr = &private->stackptr[cpu];
+ origptr = *stackptr;
e = get_entry(table_base, private->hook_entry[hook]);
- /* For return from builtin chain */
- back = get_entry(table_base, private->underflow[hook]);
-
do {
const struct ip6t_entry_target *t;
const struct xt_entry_match *ematch;
IP_NF_ASSERT(e);
- IP_NF_ASSERT(back);
if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
&mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
no_match:
@@ -433,41 +431,30 @@ ip6t_do_table(struct sk_buff *skb,
verdict = (unsigned)(-v) - 1;
break;
}
- e = back;
- back = get_entry(table_base, back->comefrom);
+ if (*stackptr == 0)
+ e = get_entry(table_base,
+ private->underflow[hook]);
+ else
+ e = ip6t_next_entry(jumpstack[--*stackptr]);
continue;
}
if (table_base + v != ip6t_next_entry(e) &&
!(e->ipv6.flags & IP6T_F_GOTO)) {
- /* Save old back ptr in next entry */
- struct ip6t_entry *next = ip6t_next_entry(e);
- next->comefrom = (void *)back - table_base;
- /* set back pointer to next entry */
- back = next;
+ if (*stackptr >= private->stacksize) {
+ verdict = NF_DROP;
+ break;
+ }
+ jumpstack[(*stackptr)++] = e;
}
e = get_entry(table_base, v);
continue;
}
- /* Targets which reenter must return
- abs. verdicts */
tgpar.target = t->u.kernel.target;
tgpar.targinfo = t->data;
-#ifdef CONFIG_NETFILTER_DEBUG
- tb_comefrom = 0xeeeeeeec;
-#endif
verdict = t->u.kernel.target->target(skb, &tgpar);
-
-#ifdef CONFIG_NETFILTER_DEBUG
- if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
- printk("Target %s reentered!\n",
- t->u.kernel.target->name);
- verdict = NF_DROP;
- }
- tb_comefrom = 0x57acc001;
-#endif
if (verdict == IP6T_CONTINUE)
e = ip6t_next_entry(e);
else
@@ -475,10 +462,8 @@ ip6t_do_table(struct sk_buff *skb,
break;
} while (!hotdrop);
-#ifdef CONFIG_NETFILTER_DEBUG
- tb_comefrom = NETFILTER_LINK_POISON;
-#endif
xt_info_rdunlock_bh();
+ *stackptr = origptr;
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
@@ -487,8 +472,6 @@ ip6t_do_table(struct sk_buff *skb,
return NF_DROP;
else return verdict;
#endif
-
-#undef tb_comefrom
}
/* Figures out from what hook each rule can be called: returns 0 if
@@ -661,12 +644,11 @@ find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
struct xt_match *match;
int ret;
- match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
- m->u.user.revision),
- "ip6t_%s", m->u.user.name);
- if (IS_ERR(match) || !match) {
+ match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
+ m->u.user.revision);
+ if (IS_ERR(match)) {
duprintf("find_check_match: `%s' not found\n", m->u.user.name);
- return match ? PTR_ERR(match) : -ENOENT;
+ return PTR_ERR(match);
}
m->u.kernel.match = match;
@@ -734,13 +716,11 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
}
t = ip6t_get_target(e);
- target = try_then_request_module(xt_find_target(AF_INET6,
- t->u.user.name,
- t->u.user.revision),
- "ip6t_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto cleanup_matches;
}
t->u.kernel.target = target;
@@ -873,6 +853,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
if (ret != 0)
return ret;
++i;
+ if (strcmp(ip6t_get_target(iter)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (i != repl->num_entries) {
@@ -1509,13 +1492,12 @@ compat_find_calc_match(struct ip6t_entry_match *m,
{
struct xt_match *match;
- match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
- m->u.user.revision),
- "ip6t_%s", m->u.user.name);
- if (IS_ERR(match) || !match) {
+ match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
+ m->u.user.revision);
+ if (IS_ERR(match)) {
duprintf("compat_check_calc_match: `%s' not found\n",
m->u.user.name);
- return match ? PTR_ERR(match) : -ENOENT;
+ return PTR_ERR(match);
}
m->u.kernel.match = match;
*size += xt_compat_match_offset(match);
@@ -1582,14 +1564,12 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
}
t = compat_ip6t_get_target(e);
- target = try_then_request_module(xt_find_target(AF_INET6,
- t->u.user.name,
- t->u.user.revision),
- "ip6t_%s", t->u.user.name);
- if (IS_ERR(target) || !target) {
+ target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
+ t->u.user.revision);
+ if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
- ret = target ? PTR_ERR(target) : -ENOENT;
+ ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
@@ -2127,8 +2107,7 @@ struct xt_table *ip6t_register_table(struct net *net,
{
int ret;
struct xt_table_info *newinfo;
- struct xt_table_info bootstrap
- = { 0, 0, 0, { 0 }, { 0 }, { } };
+ struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
@@ -2216,12 +2195,12 @@ icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
}
/* Called when user tries to insert an entry of this type. */
-static bool icmp6_checkentry(const struct xt_mtchk_param *par)
+static int icmp6_checkentry(const struct xt_mtchk_param *par)
{
const struct ip6t_icmp *icmpinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
+ return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
}
/* The built-in targets: standard (NULL) and error. */
@@ -2308,7 +2287,7 @@ static int __init ip6_tables_init(void)
if (ret < 0)
goto err5;
- printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
+ pr_info("(C) 2000-2006 Netfilter Core Team\n");
return 0;
err5:
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index b285fdf19050..1f47a525f484 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -9,9 +9,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
@@ -378,7 +377,7 @@ static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
- .level = 0,
+ .level = 5,
.logflags = NF_LOG_MASK,
},
},
@@ -452,20 +451,19 @@ log_tg6(struct sk_buff *skb, const struct xt_target_param *par)
}
-static bool log_tg6_check(const struct xt_tgchk_param *par)
+static int log_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_log_info *loginfo = par->targinfo;
if (loginfo->level >= 8) {
- pr_debug("LOG: level %u >= 8\n", loginfo->level);
- return false;
+ pr_debug("level %u >= 8\n", loginfo->level);
+ return -EINVAL;
}
if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
- pr_debug("LOG: prefix term %i\n",
- loginfo->prefix[sizeof(loginfo->prefix)-1]);
- return false;
+ pr_debug("prefix not null-terminated\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_target log_tg6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 39b50c3768e8..af1d6494ac39 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -14,6 +14,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/gfp.h>
#include <linux/module.h>
@@ -50,7 +51,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
(!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
- pr_debug("ip6t_REJECT: addr is not unicast.\n");
+ pr_debug("addr is not unicast.\n");
return;
}
@@ -58,7 +59,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
- pr_debug("ip6t_REJECT: Can't get TCP header.\n");
+ pr_debug("Cannot get TCP header.\n");
return;
}
@@ -66,7 +67,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
/* IP header checks: fragment, too short. */
if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
- pr_debug("ip6t_REJECT: proto(%d) != IPPROTO_TCP, "
+ pr_debug("proto(%d) != IPPROTO_TCP, "
"or too short. otcplen = %d\n",
proto, otcplen);
return;
@@ -77,14 +78,14 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
/* No RST for RST. */
if (otcph.rst) {
- pr_debug("ip6t_REJECT: RST is set\n");
+ pr_debug("RST is set\n");
return;
}
/* Check checksum. */
if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
skb_checksum(oldskb, tcphoff, otcplen, 0))) {
- pr_debug("ip6t_REJECT: TCP checksum is invalid\n");
+ pr_debug("TCP checksum is invalid\n");
return;
}
@@ -108,7 +109,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
if (!nskb) {
if (net_ratelimit())
- printk("ip6t_REJECT: Can't alloc skb\n");
+ pr_debug("cannot alloc skb\n");
dst_release(dst);
return;
}
@@ -180,9 +181,6 @@ reject_tg6(struct sk_buff *skb, const struct xt_target_param *par)
struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
pr_debug("%s: medium point\n", __func__);
- /* WARNING: This code causes reentry within ip6tables.
- This means that the ip6tables jump stack is now crap. We
- must return an absolute verdict. --RR */
switch (reject->with) {
case IP6T_ICMP6_NO_ROUTE:
send_unreach(net, skb, ICMPV6_NOROUTE, par->hooknum);
@@ -207,30 +205,30 @@ reject_tg6(struct sk_buff *skb, const struct xt_target_param *par)
break;
default:
if (net_ratelimit())
- printk(KERN_WARNING "ip6t_REJECT: case %u not handled yet\n", reject->with);
+ pr_info("case %u not handled yet\n", reject->with);
break;
}
return NF_DROP;
}
-static bool reject_tg6_check(const struct xt_tgchk_param *par)
+static int reject_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_reject_info *rejinfo = par->targinfo;
const struct ip6t_entry *e = par->entryinfo;
if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
- printk("ip6t_REJECT: ECHOREPLY is not supported.\n");
- return false;
+ pr_info("ECHOREPLY is not supported.\n");
+ return -EINVAL;
} else if (rejinfo->with == IP6T_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (e->ipv6.proto != IPPROTO_TCP ||
(e->ipv6.invflags & XT_INV_PROTO)) {
- printk("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
- return false;
+ pr_info("TCP_RESET illegal for non-tcp\n");
+ return -EINVAL;
}
}
- return true;
+ return 0;
}
static struct xt_target reject_tg6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index ac0b7c629d78..1580693c86c1 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -29,7 +29,7 @@ spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
- pr_debug("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",
+ pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, spi, max);
r = (spi >= min && spi <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
@@ -87,15 +87,15 @@ static bool ah_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
!(ahinfo->hdrres && ah->reserved);
}
-static bool ah_mt6_check(const struct xt_mtchk_param *par)
+static int ah_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_ah *ahinfo = par->matchinfo;
if (ahinfo->invflags & ~IP6T_AH_INV_MASK) {
- pr_debug("ip6t_ah: unknown flags %X\n", ahinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", ahinfo->invflags);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match ah_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index 7b91c2598ed5..a5daf0ffb4ec 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
@@ -27,7 +27,7 @@ static inline bool
id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
{
bool r;
- pr_debug("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
+ pr_debug("id_match:%c 0x%x <= 0x%x <= 0x%x\n", invert ? '!' : ' ',
min, id, max);
r = (id >= min && id <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
@@ -102,15 +102,15 @@ frag_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
(ntohs(fh->frag_off) & IP6_MF));
}
-static bool frag_mt6_check(const struct xt_mtchk_param *par)
+static int frag_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_frag *fraginfo = par->matchinfo;
if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
- pr_debug("ip6t_frag: unknown flags %X\n", fraginfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", fraginfo->invflags);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match frag_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index cbe8dec9744b..e424e7c8f824 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
@@ -41,6 +41,8 @@ MODULE_ALIAS("ip6t_dst");
* 5 -> RTALERT 2 x x
*/
+static struct xt_match hbh_mt6_reg[] __read_mostly;
+
static bool
hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
{
@@ -58,7 +60,9 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
unsigned int optlen;
int err;
- err = ipv6_find_hdr(skb, &ptr, par->match->data, NULL);
+ err = ipv6_find_hdr(skb, &ptr,
+ (par->match == &hbh_mt6_reg[0]) ?
+ NEXTHDR_HOP : NEXTHDR_DEST, NULL);
if (err < 0) {
if (err != -ENOENT)
*par->hotdrop = true;
@@ -141,11 +145,11 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
}
/* Step to the next */
- pr_debug("len%04X \n", optlen);
+ pr_debug("len%04X\n", optlen);
if ((ptr > skb->len - optlen || hdrlen < optlen) &&
temp < optinfo->optsnr - 1) {
- pr_debug("new pointer is too large! \n");
+ pr_debug("new pointer is too large!\n");
break;
}
ptr += optlen;
@@ -160,32 +164,32 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
return false;
}
-static bool hbh_mt6_check(const struct xt_mtchk_param *par)
+static int hbh_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_opts *optsinfo = par->matchinfo;
if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
- pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", optsinfo->invflags);
+ return -EINVAL;
}
if (optsinfo->flags & IP6T_OPTS_NSTRICT) {
- pr_debug("ip6t_opts: Not strict - not implemented");
- return false;
+ pr_debug("Not strict - not implemented");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match hbh_mt6_reg[] __read_mostly = {
{
+ /* Note, hbh_mt6 relies on the order of hbh_mt6_reg */
.name = "hbh",
.family = NFPROTO_IPV6,
.match = hbh_mt6,
.matchsize = sizeof(struct ip6t_opts),
.checkentry = hbh_mt6_check,
.me = THIS_MODULE,
- .data = NEXTHDR_HOP,
},
{
.name = "dst",
@@ -194,7 +198,6 @@ static struct xt_match hbh_mt6_reg[] __read_mostly = {
.matchsize = sizeof(struct ip6t_opts),
.checkentry = hbh_mt6_check,
.me = THIS_MODULE,
- .data = NEXTHDR_DEST,
},
};
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 91490ad9302c..46fbabb493fa 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -118,16 +118,16 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
}
}
-static bool ipv6header_mt6_check(const struct xt_mtchk_param *par)
+static int ipv6header_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_ipv6header_info *info = par->matchinfo;
/* invflags is 0 or 0xff in hard mode */
if ((!info->modeflag) && info->invflags != 0x00 &&
info->invflags != 0xFF)
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static struct xt_match ipv6header_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_mh.c b/net/ipv6/netfilter/ip6t_mh.c
index aafe4e66577b..c9f443e0138f 100644
--- a/net/ipv6/netfilter/ip6t_mh.c
+++ b/net/ipv6/netfilter/ip6t_mh.c
@@ -11,6 +11,7 @@
* Based on net/netfilter/xt_tcpudp.c
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <net/ip.h>
@@ -24,12 +25,6 @@
MODULE_DESCRIPTION("Xtables: IPv6 Mobility Header match");
MODULE_LICENSE("GPL");
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
/* Returns 1 if the type is matched by the range, 0 otherwise */
static inline bool
type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert)
@@ -51,13 +46,13 @@ static bool mh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
if (mh == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
- duprintf("Dropping evil MH tinygram.\n");
+ pr_debug("Dropping evil MH tinygram.\n");
*par->hotdrop = true;
return false;
}
if (mh->ip6mh_proto != IPPROTO_NONE) {
- duprintf("Dropping invalid MH Payload Proto: %u\n",
+ pr_debug("Dropping invalid MH Payload Proto: %u\n",
mh->ip6mh_proto);
*par->hotdrop = true;
return false;
@@ -67,12 +62,12 @@ static bool mh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
!!(mhinfo->invflags & IP6T_MH_INV_TYPE));
}
-static bool mh_mt6_check(const struct xt_mtchk_param *par)
+static int mh_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_mh *mhinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(mhinfo->invflags & ~IP6T_MH_INV_MASK);
+ return (mhinfo->invflags & ~IP6T_MH_INV_MASK) ? -EINVAL : 0;
}
static struct xt_match mh_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index b77307fc8743..09322720d2a6 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
@@ -29,7 +29,7 @@ static inline bool
segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
{
bool r;
- pr_debug("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",
+ pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, id, max);
r = (id >= min && id <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
@@ -183,23 +183,23 @@ static bool rt_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
return false;
}
-static bool rt_mt6_check(const struct xt_mtchk_param *par)
+static int rt_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_rt *rtinfo = par->matchinfo;
if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
- pr_debug("ip6t_rt: unknown flags %X\n", rtinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", rtinfo->invflags);
+ return -EINVAL;
}
if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) &&
(!(rtinfo->flags & IP6T_RT_TYP) ||
(rtinfo->rt_type != 0) ||
(rtinfo->invflags & IP6T_RT_INV_TYP))) {
pr_debug("`--rt-type 0' required before `--rt-0-*'");
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match rt_mt6_reg __read_mostly = {
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index dd5b9bd61c62..6fb890187de0 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -644,7 +644,7 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
s2 = s->next;
s->next = NULL;
- NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn,
+ NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s, in, out, okfn,
NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
s = s2;
}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 58344c0fbd13..458eabfbe130 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -97,6 +97,7 @@ static const struct snmp_mib snmp6_icmp6_list[] = {
SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
+ SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8763b1a0814a..4a4dcbe4f8b2 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -381,7 +381,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
}
/* Charge it to the socket. */
- if (sock_queue_rcv_skb(sk, skb) < 0) {
+ if (ip_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -461,6 +461,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+ return ipv6_recv_rxpmtu(sk, msg, len);
+
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
@@ -637,8 +640,8 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
goto error_fault;
IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
- err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
- dst_output);
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ rt->u.dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
@@ -733,6 +736,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
+ int dontfrag = -1;
u16 proto;
int err;
@@ -811,7 +815,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
- err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
+ err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
+ &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -880,6 +885,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (tclass < 0)
tclass = np->tclass;
+ if (dontfrag < 0)
+ dontfrag = np->dontfrag;
+
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
@@ -890,7 +898,7 @@ back_from_confirm:
lock_sock(sk);
err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
- msg->msg_flags);
+ msg->msg_flags, dontfrag);
if (err)
ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 075f540ec197..6603511e3673 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -75,6 +75,9 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
+static void __tcp_v6_send_check(struct sk_buff *skb,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr);
static const struct inet_connection_sock_af_ops ipv6_mapped;
static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -350,6 +353,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sk->sk_state == TCP_CLOSE)
goto out;
+ if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+ goto out;
+ }
+
tp = tcp_sk(sk);
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
@@ -503,14 +511,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
skb = tcp_make_synack(sk, dst, req, rvp);
if (skb) {
- struct tcphdr *th = tcp_hdr(skb);
-
- th->check = tcp_v6_check(skb->len,
- &treq->loc_addr, &treq->rmt_addr,
- csum_partial(th, skb->len, skb->csum));
+ __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
- err = ip6_xmit(sk, skb, &fl, opt, 0);
+ err = ip6_xmit(sk, skb, &fl, opt);
err = net_xmit_eval(err);
}
@@ -918,22 +922,29 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_destructor= tcp_twsk_destructor,
};
-static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
+static void __tcp_v6_send_check(struct sk_buff *skb,
+ struct in6_addr *saddr, struct in6_addr *daddr)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
struct tcphdr *th = tcp_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
+ th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
- th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
- csum_partial(th, th->doff<<2,
- skb->csum));
+ th->check = tcp_v6_check(skb->len, saddr, daddr,
+ csum_partial(th, th->doff << 2,
+ skb->csum));
}
}
+static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
+}
+
static int tcp_v6_gso_send_check(struct sk_buff *skb)
{
struct ipv6hdr *ipv6h;
@@ -946,11 +957,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
th = tcp_hdr(skb);
th->check = 0;
- th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
- IPPROTO_TCP, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
+ __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
return 0;
}
@@ -1047,15 +1055,14 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
}
#endif
- buff->csum = csum_partial(t1, tot_len, 0);
-
memset(&fl, 0, sizeof(fl));
ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
- t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
- tot_len, IPPROTO_TCP,
- buff->csum);
+ buff->ip_summed = CHECKSUM_PARTIAL;
+ buff->csum = 0;
+
+ __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
fl.proto = IPPROTO_TCP;
fl.oif = inet6_iif(skb);
@@ -1070,7 +1077,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
skb_dst_set(buff, dst);
- ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
+ ip6_xmit(ctl_sk, buff, &fl, NULL);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
if (rst)
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -1233,12 +1240,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
/* Secret recipe starts with IP addresses */
- d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
+ d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
- d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
+ d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
@@ -1676,6 +1683,7 @@ ipv6_pktoptions:
static int tcp_v6_rcv(struct sk_buff *skb)
{
struct tcphdr *th;
+ struct ipv6hdr *hdr;
struct sock *sk;
int ret;
struct net *net = dev_net(skb->dev);
@@ -1702,12 +1710,13 @@ static int tcp_v6_rcv(struct sk_buff *skb)
goto bad_packet;
th = tcp_hdr(skb);
+ hdr = ipv6_hdr(skb);
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff*4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
- TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
+ TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1718,6 +1727,11 @@ process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
+ if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+ goto discard_and_relse;
+ }
+
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 90824852f598..3d7a2c0b836a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -91,9 +91,9 @@ static unsigned int udp6_portaddr_hash(struct net *net,
if (ipv6_addr_any(addr6))
hash = jhash_1word(0, mix);
else if (ipv6_addr_v4mapped(addr6))
- hash = jhash_1word(addr6->s6_addr32[3], mix);
+ hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
else
- hash = jhash2(addr6->s6_addr32, 4, mix);
+ hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
return hash ^ port;
}
@@ -335,6 +335,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+ return ipv6_recv_rxpmtu(sk, msg, len);
+
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &err);
@@ -421,7 +424,7 @@ out:
return err;
csum_copy_err:
- lock_sock(sk);
+ lock_sock_bh(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
@@ -430,7 +433,7 @@ csum_copy_err:
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
- release_sock(sk);
+ unlock_sock_bh(sk);
if (flags & MSG_DONTWAIT)
return -EAGAIN;
@@ -511,7 +514,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
goto drop;
}
- if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
+ if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
@@ -581,6 +584,10 @@ static void flush_stack(struct sock **stack, unsigned int count,
sk = stack[i];
if (skb1) {
+ if (sk_rcvqueues_full(sk, skb)) {
+ kfree_skb(skb1);
+ goto drop;
+ }
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
@@ -692,7 +699,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
- goto short_packet;
+ goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
@@ -756,6 +763,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
/* deliver */
+ if (sk_rcvqueues_full(sk, skb)) {
+ sock_put(sk);
+ goto discard;
+ }
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
@@ -770,9 +781,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
return 0;
short_packet:
- LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
+ LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
- ulen, skb->len);
+ saddr,
+ ntohs(uh->source),
+ ulen,
+ skb->len,
+ daddr,
+ ntohs(uh->dest));
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
@@ -919,6 +935,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
int ulen = len;
int hlimit = -1;
int tclass = -1;
+ int dontfrag = -1;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
@@ -1049,7 +1066,8 @@ do_udp_sendmsg:
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
- err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
+ err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
+ &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -1120,6 +1138,9 @@ do_udp_sendmsg:
if (tclass < 0)
tclass = np->tclass;
+ if (dontfrag < 0)
+ dontfrag = np->dontfrag;
+
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
@@ -1143,7 +1164,7 @@ do_append_data:
err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl,
(struct rt6_info*)dst,
- corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
+ corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 2bc98ede1235..f8c3cf842f53 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -42,7 +42,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
ipv6_hdr(skb)->payload_len = htons(skb->len);
__skb_push(skb, skb->data - skb_network_header(skb));
- NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
+ NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
ip6_rcv_finish);
return -1;
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 0c92112dcba3..6434bd5ce088 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -90,6 +90,6 @@ static int xfrm6_output_finish(struct sk_buff *skb)
int xfrm6_output(struct sk_buff *skb)
{
- return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb_dst(skb)->dev,
- xfrm6_output_finish);
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
+ skb_dst(skb)->dev, xfrm6_output_finish);
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 00bf7c962b7e..4a0e77e14468 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -67,36 +67,6 @@ static int xfrm6_get_saddr(struct net *net,
return 0;
}
-static struct dst_entry *
-__xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
-{
- struct dst_entry *dst;
-
- /* Still not clear if we should set fl->fl6_{src,dst}... */
- read_lock_bh(&policy->lock);
- for (dst = policy->bundles; dst; dst = dst->next) {
- struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
- struct in6_addr fl_dst_prefix, fl_src_prefix;
-
- ipv6_addr_prefix(&fl_dst_prefix,
- &fl->fl6_dst,
- xdst->u.rt6.rt6i_dst.plen);
- ipv6_addr_prefix(&fl_src_prefix,
- &fl->fl6_src,
- xdst->u.rt6.rt6i_src.plen);
- if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) &&
- ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) &&
- xfrm_bundle_ok(policy, xdst, fl, AF_INET6,
- (xdst->u.rt6.rt6i_dst.plen != 128 ||
- xdst->u.rt6.rt6i_src.plen != 128))) {
- dst_clone(dst);
- break;
- }
- }
- read_unlock_bh(&policy->lock);
- return dst;
-}
-
static int xfrm6_get_tos(struct flowi *fl)
{
return 0;
@@ -291,7 +261,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
.dst_ops = &xfrm6_dst_ops,
.dst_lookup = xfrm6_dst_lookup,
.get_saddr = xfrm6_get_saddr,
- .find_bundle = __xfrm6_find_bundle,
.decode_session = _decode_session6,
.get_tos = xfrm6_get_tos,
.init_path = xfrm6_init_path,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 2a4efcea3423..79986a674f6e 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -347,7 +347,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
self->tx_flow = flow;
IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
__func__);
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
break;
default:
IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__);
@@ -900,7 +900,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
if (flags & O_NONBLOCK)
goto out;
- err = wait_event_interruptible(*(sk->sk_sleep),
+ err = wait_event_interruptible(*(sk_sleep(sk)),
skb_peek(&sk->sk_receive_queue));
if (err)
goto out;
@@ -1066,7 +1066,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
goto out;
err = -ERESTARTSYS;
- if (wait_event_interruptible(*(sk->sk_sleep),
+ if (wait_event_interruptible(*(sk_sleep(sk)),
(sk->sk_state != TCP_SYN_SENT)))
goto out;
@@ -1318,7 +1318,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Check if IrTTP is wants us to slow down */
- if (wait_event_interruptible(*(sk->sk_sleep),
+ if (wait_event_interruptible(*(sk_sleep(sk)),
(self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
err = -ERESTARTSYS;
goto out;
@@ -1477,7 +1477,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
if (copied >= target)
break;
- prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/*
* POSIX 1003.1g mandates this order.
@@ -1497,7 +1497,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
/* Wait process until data arrives */
schedule();
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out;
@@ -1787,7 +1787,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
IRDA_DEBUG(4, "%s()\n", __func__);
lock_kernel();
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* Exceptional events? */
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index e2e893b474e9..8b915f3ac3b9 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -475,7 +475,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
/* Check if any of the settings have changed */
if (dce & 0x0f) {
if (dce & IRCOMM_DELTA_CTS) {
- IRDA_DEBUG(2, "%s(), CTS \n", __func__ );
+ IRDA_DEBUG(2, "%s(), CTS\n", __func__ );
}
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c18286a2167b..8be324fe08b9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -59,7 +59,7 @@ do { \
DEFINE_WAIT(__wait); \
long __timeo = timeo; \
ret = 0; \
- prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
+ prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
while (!(condition)) { \
if (!__timeo) { \
ret = -EAGAIN; \
@@ -76,7 +76,7 @@ do { \
if (ret) \
break; \
} \
- finish_wait(sk->sk_sleep, &__wait); \
+ finish_wait(sk_sleep(sk), &__wait); \
} while (0)
#define iucv_sock_wait(sk, condition, timeo) \
@@ -305,11 +305,14 @@ static inline int iucv_below_msglim(struct sock *sk)
*/
static void iucv_sock_wake_msglim(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_all(sk->sk_sleep);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_all(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* Timers */
@@ -795,7 +798,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* Wait for an incoming connection */
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -819,7 +822,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
@@ -1269,7 +1272,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask = 0;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk);
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig
new file mode 100644
index 000000000000..4b1e71751e10
--- /dev/null
+++ b/net/l2tp/Kconfig
@@ -0,0 +1,107 @@
+#
+# Layer Two Tunneling Protocol (L2TP)
+#
+
+menuconfig L2TP
+ tristate "Layer Two Tunneling Protocol (L2TP)"
+ depends on INET
+ ---help---
+ Layer Two Tunneling Protocol
+
+ From RFC 2661 <http://www.ietf.org/rfc/rfc2661.txt>.
+
+ L2TP facilitates the tunneling of packets across an
+ intervening network in a way that is as transparent as
+ possible to both end-users and applications.
+
+ L2TP is often used to tunnel PPP traffic over IP
+ tunnels. One IP tunnel may carry thousands of individual PPP
+ connections. L2TP is also used as a VPN protocol, popular
+ with home workers to connect to their offices.
+
+ L2TPv3 allows other protocols as well as PPP to be carried
+ over L2TP tunnels. L2TPv3 is defined in RFC 3931
+ <http://www.ietf.org/rfc/rfc3931.txt>.
+
+ The kernel component handles only L2TP data packets: a
+ userland daemon handles L2TP the control protocol (tunnel
+ and session setup). One such daemon is OpenL2TP
+ (http://openl2tp.org/).
+
+ If you don't need L2TP, say N. To compile all L2TP code as
+ modules, choose M here.
+
+config L2TP_DEBUGFS
+ tristate "L2TP debugfs support"
+ depends on L2TP && DEBUG_FS
+ help
+ Support for l2tp directory in debugfs filesystem. This may be
+ used to dump internal state of the l2tp drivers for problem
+ analysis.
+
+ If unsure, say 'Y'.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_debugfs.
+
+config L2TP_V3
+ bool "L2TPv3 support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && L2TP
+ help
+ Layer Two Tunneling Protocol Version 3
+
+ From RFC 3931 <http://www.ietf.org/rfc/rfc3931.txt>.
+
+ The Layer Two Tunneling Protocol (L2TP) provides a dynamic
+ mechanism for tunneling Layer 2 (L2) "circuits" across a
+ packet-oriented data network (e.g., over IP). L2TP, as
+ originally defined in RFC 2661, is a standard method for
+ tunneling Point-to-Point Protocol (PPP) [RFC1661] sessions.
+ L2TP has since been adopted for tunneling a number of other
+ L2 protocols, including ATM, Frame Relay, HDLC and even raw
+ ethernet frames.
+
+ If you are connecting to L2TPv3 equipment, or you want to
+ tunnel raw ethernet frames using L2TP, say Y here. If
+ unsure, say N.
+
+config L2TP_IP
+ tristate "L2TP IP encapsulation for L2TPv3"
+ depends on L2TP_V3
+ help
+ Support for L2TP-over-IP socket family.
+
+ The L2TPv3 protocol defines two possible encapsulations for
+ L2TP frames, namely UDP and plain IP (without UDP). This
+ driver provides a new L2TPIP socket family with which
+ userspace L2TPv3 daemons may create L2TP/IP tunnel sockets
+ when UDP encapsulation is not required. When L2TP is carried
+ in IP packets, it used IP protocol number 115, so this port
+ must be enabled in firewalls.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_ip.
+
+config L2TP_ETH
+ tristate "L2TP ethernet pseudowire support for L2TPv3"
+ depends on L2TP_V3
+ help
+ Support for carrying raw ethernet frames over L2TPv3.
+
+ From RFC 4719 <http://www.ietf.org/rfc/rfc4719.txt>.
+
+ The Layer 2 Tunneling Protocol, Version 3 (L2TPv3) can be
+ used as a control protocol and for data encapsulation to set
+ up Pseudowires for transporting layer 2 Packet Data Units
+ across an IP network [RFC3931].
+
+ This driver provides an ethernet virtual interface for each
+ L2TP ethernet pseudowire instance. Standard Linux tools may
+ be used to assign an IP address to the local virtual
+ interface, or add the interface to a bridge.
+
+ If you are using L2TPv3, you will almost certainly want to
+ enable this option.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_eth.
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
new file mode 100644
index 000000000000..110e7bc2de5e
--- /dev/null
+++ b/net/l2tp/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the L2TP.
+#
+
+obj-$(CONFIG_L2TP) += l2tp_core.o
+
+# Build l2tp as modules if L2TP is M
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
new file mode 100644
index 000000000000..1712af1c7b3f
--- /dev/null
+++ b/net/l2tp/l2tp_core.c
@@ -0,0 +1,1666 @@
+/*
+ * L2TP core.
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * This file contains some code of the original L2TPv2 pppol2tp
+ * driver, which has the following copyright:
+ *
+ * Authors: Martijn van Oosterhout <kleptog@svana.org>
+ * James Chapman (jchapman@katalix.com)
+ * Contributors:
+ * Michal Ostrowski <mostrows@speakeasy.net>
+ * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
+ * David S. Miller (davem@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/l2tp.h>
+#include <linux/hash.h>
+#include <linux/sort.h>
+#include <linux/file.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/xfrm.h>
+#include <net/protocol.h>
+
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+
+#include "l2tp_core.h"
+
+#define L2TP_DRV_VERSION "V2.0"
+
+/* L2TP header constants */
+#define L2TP_HDRFLAG_T 0x8000
+#define L2TP_HDRFLAG_L 0x4000
+#define L2TP_HDRFLAG_S 0x0800
+#define L2TP_HDRFLAG_O 0x0200
+#define L2TP_HDRFLAG_P 0x0100
+
+#define L2TP_HDR_VER_MASK 0x000F
+#define L2TP_HDR_VER_2 0x0002
+#define L2TP_HDR_VER_3 0x0003
+
+/* L2TPv3 default L2-specific sublayer */
+#define L2TP_SLFLAG_S 0x40000000
+#define L2TP_SL_SEQ_MASK 0x00ffffff
+
+#define L2TP_HDR_SIZE_SEQ 10
+#define L2TP_HDR_SIZE_NOSEQ 6
+
+/* Default trace flags */
+#define L2TP_DEFAULT_DEBUG_FLAGS 0
+
+#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
+ do { \
+ if ((_mask) & (_type)) \
+ printk(_lvl "L2TP: " _fmt, ##args); \
+ } while (0)
+
+/* Private data stored for received packets in the skb.
+ */
+struct l2tp_skb_cb {
+ u32 ns;
+ u16 has_seq;
+ u16 length;
+ unsigned long expires;
+};
+
+#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
+
+static atomic_t l2tp_tunnel_count;
+static atomic_t l2tp_session_count;
+
+/* per-net private data for this module */
+static unsigned int l2tp_net_id;
+struct l2tp_net {
+ struct list_head l2tp_tunnel_list;
+ spinlock_t l2tp_tunnel_list_lock;
+ struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
+ spinlock_t l2tp_session_hlist_lock;
+};
+
+static inline struct l2tp_net *l2tp_pernet(struct net *net)
+{
+ BUG_ON(!net);
+
+ return net_generic(net, l2tp_net_id);
+}
+
+/* Session hash global list for L2TPv3.
+ * The session_id SHOULD be random according to RFC3931, but several
+ * L2TP implementations use incrementing session_ids. So we do a real
+ * hash on the session_id, rather than a simple bitmask.
+ */
+static inline struct hlist_head *
+l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
+{
+ return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
+
+}
+
+/* Lookup a session by id in the global session list
+ */
+static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct hlist_head *session_list =
+ l2tp_session_id_hash_2(pn, session_id);
+ struct l2tp_session *session;
+ struct hlist_node *walk;
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
+ if (session->session_id == session_id) {
+ rcu_read_unlock_bh();
+ return session;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+
+/* Session hash list.
+ * The session_id SHOULD be random according to RFC2661, but several
+ * L2TP implementations (Cisco and Microsoft) use incrementing
+ * session_ids. So we do a real hash on the session_id, rather than a
+ * simple bitmask.
+ */
+static inline struct hlist_head *
+l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
+{
+ return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
+}
+
+/* Lookup a session by id
+ */
+struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+ struct hlist_node *walk;
+
+ /* In L2TPv3, session_ids are unique over all tunnels and we
+ * sometimes need to look them up before we know the
+ * tunnel.
+ */
+ if (tunnel == NULL)
+ return l2tp_session_find_2(net, session_id);
+
+ session_list = l2tp_session_id_hash(tunnel, session_id);
+ read_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, walk, session_list, hlist) {
+ if (session->session_id == session_id) {
+ read_unlock_bh(&tunnel->hlist_lock);
+ return session;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_find);
+
+struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+{
+ int hash;
+ struct hlist_node *walk;
+ struct l2tp_session *session;
+ int count = 0;
+
+ read_lock_bh(&tunnel->hlist_lock);
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+ hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
+ if (++count > nth) {
+ read_unlock_bh(&tunnel->hlist_lock);
+ return session;
+ }
+ }
+ }
+
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+
+/* Lookup a session by interface name.
+ * This is very inefficient but is only used by management interfaces.
+ */
+struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ int hash;
+ struct hlist_node *walk;
+ struct l2tp_session *session;
+
+ rcu_read_lock_bh();
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
+ hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
+ if (!strcmp(session->ifname, ifname)) {
+ rcu_read_unlock_bh();
+ return session;
+ }
+ }
+ }
+
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+
+/* Lookup a tunnel by id
+ */
+struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
+{
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_net *pn = l2tp_pernet(net);
+
+ rcu_read_lock_bh();
+ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+ if (tunnel->tunnel_id == tunnel_id) {
+ rcu_read_unlock_bh();
+ return tunnel;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
+
+struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_tunnel *tunnel;
+ int count = 0;
+
+ rcu_read_lock_bh();
+ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+ if (++count > nth) {
+ rcu_read_unlock_bh();
+ return tunnel;
+ }
+ }
+
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
+
+/*****************************************************************************
+ * Receive data handling
+ *****************************************************************************/
+
+/* Queue a skb in order. We come here only if the skb has an L2TP sequence
+ * number.
+ */
+static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
+{
+ struct sk_buff *skbp;
+ struct sk_buff *tmp;
+ u32 ns = L2TP_SKB_CB(skb)->ns;
+
+ spin_lock_bh(&session->reorder_q.lock);
+ skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
+ if (L2TP_SKB_CB(skbp)->ns > ns) {
+ __skb_queue_before(&session->reorder_q, skbp, skb);
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
+ session->name, ns, L2TP_SKB_CB(skbp)->ns,
+ skb_queue_len(&session->reorder_q));
+ session->stats.rx_oos_packets++;
+ goto out;
+ }
+ }
+
+ __skb_queue_tail(&session->reorder_q, skb);
+
+out:
+ spin_unlock_bh(&session->reorder_q.lock);
+}
+
+/* Dequeue a single skb.
+ */
+static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ int length = L2TP_SKB_CB(skb)->length;
+
+ /* We're about to requeue the skb, so return resources
+ * to its current owner (a socket receive buffer).
+ */
+ skb_orphan(skb);
+
+ tunnel->stats.rx_packets++;
+ tunnel->stats.rx_bytes += length;
+ session->stats.rx_packets++;
+ session->stats.rx_bytes += length;
+
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ /* Bump our Nr */
+ session->nr++;
+ if (tunnel->version == L2TP_HDR_VER_2)
+ session->nr &= 0xffff;
+ else
+ session->nr &= 0xffffff;
+
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated nr to %hu\n", session->name, session->nr);
+ }
+
+ /* call private receive handler */
+ if (session->recv_skb != NULL)
+ (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
+ else
+ kfree_skb(skb);
+
+ if (session->deref)
+ (*session->deref)(session);
+}
+
+/* Dequeue skbs from the session's reorder_q, subject to packet order.
+ * Skbs that have been in the queue for too long are simply discarded.
+ */
+static void l2tp_recv_dequeue(struct l2tp_session *session)
+{
+ struct sk_buff *skb;
+ struct sk_buff *tmp;
+
+ /* If the pkt at the head of the queue has the nr that we
+ * expect to send up next, dequeue it and any other
+ * in-sequence packets behind it.
+ */
+ spin_lock_bh(&session->reorder_q.lock);
+ skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
+ if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: oos pkt %u len %d discarded (too old), "
+ "waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ __skb_unlink(skb, &session->reorder_q);
+ kfree_skb(skb);
+ if (session->deref)
+ (*session->deref)(session);
+ continue;
+ }
+
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ if (L2TP_SKB_CB(skb)->ns != session->nr) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: holding oos pkt %u len %d, "
+ "waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ goto out;
+ }
+ }
+ __skb_unlink(skb, &session->reorder_q);
+
+ /* Process the skb. We release the queue lock while we
+ * do so to let other contexts process the queue.
+ */
+ spin_unlock_bh(&session->reorder_q.lock);
+ l2tp_recv_dequeue_skb(session, skb);
+ spin_lock_bh(&session->reorder_q.lock);
+ }
+
+out:
+ spin_unlock_bh(&session->reorder_q.lock);
+}
+
+static inline int l2tp_verify_udp_checksum(struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ u16 ulen = ntohs(uh->len);
+ struct inet_sock *inet;
+ __wsum psum;
+
+ if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
+ return 0;
+
+ inet = inet_sk(sk);
+ psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
+ IPPROTO_UDP, 0);
+
+ if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !csum_fold(csum_add(psum, skb->csum)))
+ return 0;
+
+ skb->csum = psum;
+
+ return __skb_checksum_complete(skb);
+}
+
+/* Do receive processing of L2TP data frames. We handle both L2TPv2
+ * and L2TPv3 data frames here.
+ *
+ * L2TPv2 Data Message Header
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Tunnel ID | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ns (opt) | Nr (opt) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Offset Size (opt) | Offset pad... (opt)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Data frames are marked by T=0. All other fields are the same as
+ * those in L2TP control frames.
+ *
+ * L2TPv3 Data Message Header
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | L2TP Session Header |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | L2-Specific Sublayer |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Tunnel Payload ...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 L2-Specific Sublayer Format
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |x|S|x|x|x|x|x|x| Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Cookie value, sublayer format and offset (pad) are negotiated with
+ * the peer when the session is set up. Unlike L2TPv2, we do not need
+ * to parse the packet header to determine if optional fields are
+ * present.
+ *
+ * Caller must already have parsed the frame and determined that it is
+ * a data (not control) frame before coming here. Fields up to the
+ * session-id have already been parsed and ptr points to the data
+ * after the session-id.
+ */
+void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ unsigned char *ptr, unsigned char *optr, u16 hdrflags,
+ int length, int (*payload_hook)(struct sk_buff *skb))
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ int offset;
+ u32 ns, nr;
+
+ /* The ref count is increased since we now hold a pointer to
+ * the session. Take care to decrement the refcnt when exiting
+ * this function from now on...
+ */
+ l2tp_session_inc_refcount(session);
+ if (session->ref)
+ (*session->ref)(session);
+
+ /* Parse and check optional cookie */
+ if (session->peer_cookie_len > 0) {
+ if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
+ "%s: cookie mismatch (%u/%u). Discarding.\n",
+ tunnel->name, tunnel->tunnel_id, session->session_id);
+ session->stats.rx_cookie_discards++;
+ goto discard;
+ }
+ ptr += session->peer_cookie_len;
+ }
+
+ /* Handle the optional sequence numbers. Sequence numbers are
+ * in different places for L2TPv2 and L2TPv3.
+ *
+ * If we are the LAC, enable/disable sequence numbers under
+ * the control of the LNS. If no sequence numbers present but
+ * we were expecting them, discard frame.
+ */
+ ns = nr = 0;
+ L2TP_SKB_CB(skb)->has_seq = 0;
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ if (hdrflags & L2TP_HDRFLAG_S) {
+ ns = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ nr = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+
+ /* Store L2TP info in the skb */
+ L2TP_SKB_CB(skb)->ns = ns;
+ L2TP_SKB_CB(skb)->has_seq = 1;
+
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: recv data ns=%u, nr=%u, session nr=%u\n",
+ session->name, ns, nr, session->nr);
+ }
+ } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+ u32 l2h = ntohl(*(__be32 *) ptr);
+
+ if (l2h & 0x40000000) {
+ ns = l2h & 0x00ffffff;
+
+ /* Store L2TP info in the skb */
+ L2TP_SKB_CB(skb)->ns = ns;
+ L2TP_SKB_CB(skb)->has_seq = 1;
+
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: recv data ns=%u, session nr=%u\n",
+ session->name, ns, session->nr);
+ }
+ }
+
+ /* Advance past L2-specific header, if present */
+ ptr += session->l2specific_len;
+
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ /* Received a packet with sequence numbers. If we're the LNS,
+ * check if we sre sending sequence numbers and if not,
+ * configure it so.
+ */
+ if ((!session->lns_mode) && (!session->send_seq)) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
+ "%s: requested to enable seq numbers by LNS\n",
+ session->name);
+ session->send_seq = -1;
+ l2tp_session_set_header_len(session, tunnel->version);
+ }
+ } else {
+ /* No sequence numbers.
+ * If user has configured mandatory sequence numbers, discard.
+ */
+ if (session->recv_seq) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
+ "%s: recv data has no seq numbers when required. "
+ "Discarding\n", session->name);
+ session->stats.rx_seq_discards++;
+ goto discard;
+ }
+
+ /* If we're the LAC and we're sending sequence numbers, the
+ * LNS has requested that we no longer send sequence numbers.
+ * If we're the LNS and we're sending sequence numbers, the
+ * LAC is broken. Discard the frame.
+ */
+ if ((!session->lns_mode) && (session->send_seq)) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
+ "%s: requested to disable seq numbers by LNS\n",
+ session->name);
+ session->send_seq = 0;
+ l2tp_session_set_header_len(session, tunnel->version);
+ } else if (session->send_seq) {
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
+ "%s: recv data has no seq numbers when required. "
+ "Discarding\n", session->name);
+ session->stats.rx_seq_discards++;
+ goto discard;
+ }
+ }
+
+ /* Session data offset is handled differently for L2TPv2 and
+ * L2TPv3. For L2TPv2, there is an optional 16-bit value in
+ * the header. For L2TPv3, the offset is negotiated using AVPs
+ * in the session setup control protocol.
+ */
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ /* If offset bit set, skip it. */
+ if (hdrflags & L2TP_HDRFLAG_O) {
+ offset = ntohs(*(__be16 *)ptr);
+ ptr += 2 + offset;
+ }
+ } else
+ ptr += session->offset;
+
+ offset = ptr - optr;
+ if (!pskb_may_pull(skb, offset))
+ goto discard;
+
+ __skb_pull(skb, offset);
+
+ /* If caller wants to process the payload before we queue the
+ * packet, do so now.
+ */
+ if (payload_hook)
+ if ((*payload_hook)(skb))
+ goto discard;
+
+ /* Prepare skb for adding to the session's reorder_q. Hold
+ * packets for max reorder_timeout or 1 second if not
+ * reordering.
+ */
+ L2TP_SKB_CB(skb)->length = length;
+ L2TP_SKB_CB(skb)->expires = jiffies +
+ (session->reorder_timeout ? session->reorder_timeout : HZ);
+
+ /* Add packet to the session's receive queue. Reordering is done here, if
+ * enabled. Saved L2TP protocol info is stored in skb->sb[].
+ */
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ if (session->reorder_timeout != 0) {
+ /* Packet reordering enabled. Add skb to session's
+ * reorder queue, in order of ns.
+ */
+ l2tp_recv_queue_skb(session, skb);
+ } else {
+ /* Packet reordering disabled. Discard out-of-sequence
+ * packets
+ */
+ if (L2TP_SKB_CB(skb)->ns != session->nr) {
+ session->stats.rx_seq_discards++;
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: oos pkt %u len %d discarded, "
+ "waiting for %u, reorder_q_len=%d\n",
+ session->name, L2TP_SKB_CB(skb)->ns,
+ L2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ goto discard;
+ }
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+ } else {
+ /* No sequence numbers. Add the skb to the tail of the
+ * reorder queue. This ensures that it will be
+ * delivered after all previous sequenced skbs.
+ */
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+
+ /* Try to dequeue as many skbs from reorder_q as we can. */
+ l2tp_recv_dequeue(session);
+
+ l2tp_session_dec_refcount(session);
+
+ return;
+
+discard:
+ session->stats.rx_errors++;
+ kfree_skb(skb);
+
+ if (session->deref)
+ (*session->deref)(session);
+
+ l2tp_session_dec_refcount(session);
+}
+EXPORT_SYMBOL(l2tp_recv_common);
+
+/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
+ * here. The skb is not on a list when we get here.
+ * Returns 0 if the packet was a data packet and was successfully passed on.
+ * Returns 1 if the packet was not a good data packet and could not be
+ * forwarded. All such packets are passed up to userspace to deal with.
+ */
+int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
+ int (*payload_hook)(struct sk_buff *skb))
+{
+ struct l2tp_session *session = NULL;
+ unsigned char *ptr, *optr;
+ u16 hdrflags;
+ u32 tunnel_id, session_id;
+ int offset;
+ u16 version;
+ int length;
+
+ if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
+ goto discard_bad_csum;
+
+ /* UDP always verifies the packet length. */
+ __skb_pull(skb, sizeof(struct udphdr));
+
+ /* Short packet? */
+ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
+ "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
+ goto error;
+ }
+
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+
+ /* Trace packet contents, if enabled */
+ if (tunnel->debug & L2TP_MSG_DATA) {
+ length = min(32u, skb->len);
+ if (!pskb_may_pull(skb, length))
+ goto error;
+
+ printk(KERN_DEBUG "%s: recv: ", tunnel->name);
+
+ offset = 0;
+ do {
+ printk(" %02X", ptr[offset]);
+ } while (++offset < length);
+
+ printk("\n");
+ }
+
+ /* Get L2TP header flags */
+ hdrflags = ntohs(*(__be16 *) ptr);
+
+ /* Check protocol version */
+ version = hdrflags & L2TP_HDR_VER_MASK;
+ if (version != tunnel->version) {
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
+ "%s: recv protocol version mismatch: got %d expected %d\n",
+ tunnel->name, version, tunnel->version);
+ goto error;
+ }
+
+ /* Get length of L2TP packet */
+ length = skb->len;
+
+ /* If type is control packet, it is handled by userspace. */
+ if (hdrflags & L2TP_HDRFLAG_T) {
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
+ "%s: recv control packet, len=%d\n", tunnel->name, length);
+ goto error;
+ }
+
+ /* Skip flags */
+ ptr += 2;
+
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ /* If length is present, skip it */
+ if (hdrflags & L2TP_HDRFLAG_L)
+ ptr += 2;
+
+ /* Extract tunnel and session ID */
+ tunnel_id = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ session_id = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ } else {
+ ptr += 2; /* skip reserved bits */
+ tunnel_id = tunnel->tunnel_id;
+ session_id = ntohl(*(__be32 *) ptr);
+ ptr += 4;
+ }
+
+ /* Find the session context */
+ session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+ if (!session || !session->recv_skb) {
+ /* Not found? Pass to userspace to deal with */
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
+ "%s: no session found (%u/%u). Passing up.\n",
+ tunnel->name, tunnel_id, session_id);
+ goto error;
+ }
+
+ l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+
+ return 0;
+
+discard_bad_csum:
+ LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
+ UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
+ tunnel->stats.rx_errors++;
+ kfree_skb(skb);
+
+ return 0;
+
+error:
+ /* Put UDP header back */
+ __skb_push(skb, sizeof(struct udphdr));
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes:
+ * 0 : success.
+ * <0: error
+ * >0: skb should be passed up to userspace as UDP.
+ */
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct l2tp_tunnel *tunnel;
+
+ tunnel = l2tp_sock_to_tunnel(sk);
+ if (tunnel == NULL)
+ goto pass_up;
+
+ PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
+ "%s: received %d bytes\n", tunnel->name, skb->len);
+
+ if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
+ goto pass_up_put;
+
+ sock_put(sk);
+ return 0;
+
+pass_up_put:
+ sock_put(sk);
+pass_up:
+ return 1;
+}
+EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
+
+/************************************************************************
+ * Transmit handling
+ ***********************************************************************/
+
+/* Build an L2TP header for the session into the buffer provided.
+ */
+static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ __be16 *bufp = buf;
+ __be16 *optr = buf;
+ u16 flags = L2TP_HDR_VER_2;
+ u32 tunnel_id = tunnel->peer_tunnel_id;
+ u32 session_id = session->peer_session_id;
+
+ if (session->send_seq)
+ flags |= L2TP_HDRFLAG_S;
+
+ /* Setup L2TP header. */
+ *bufp++ = htons(flags);
+ *bufp++ = htons(tunnel_id);
+ *bufp++ = htons(session_id);
+ if (session->send_seq) {
+ *bufp++ = htons(session->ns);
+ *bufp++ = 0;
+ session->ns++;
+ session->ns &= 0xffff;
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated ns to %u\n", session->name, session->ns);
+ }
+
+ return bufp - optr;
+}
+
+static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ char *bufp = buf;
+ char *optr = bufp;
+
+ /* Setup L2TP header. The header differs slightly for UDP and
+ * IP encapsulations. For UDP, there is 4 bytes of flags.
+ */
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ u16 flags = L2TP_HDR_VER_3;
+ *((__be16 *) bufp) = htons(flags);
+ bufp += 2;
+ *((__be16 *) bufp) = 0;
+ bufp += 2;
+ }
+
+ *((__be32 *) bufp) = htonl(session->peer_session_id);
+ bufp += 4;
+ if (session->cookie_len) {
+ memcpy(bufp, &session->cookie[0], session->cookie_len);
+ bufp += session->cookie_len;
+ }
+ if (session->l2specific_len) {
+ if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+ u32 l2h = 0;
+ if (session->send_seq) {
+ l2h = 0x40000000 | session->ns;
+ session->ns++;
+ session->ns &= 0xffffff;
+ PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated ns to %u\n", session->name, session->ns);
+ }
+
+ *((__be32 *) bufp) = htonl(l2h);
+ }
+ bufp += session->l2specific_len;
+ }
+ if (session->offset)
+ bufp += session->offset;
+
+ return bufp - optr;
+}
+
+int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ unsigned int len = skb->len;
+ int error;
+
+ /* Debug */
+ if (session->send_seq)
+ PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %Zd bytes, ns=%u\n", session->name,
+ data_len, session->ns - 1);
+ else
+ PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %Zd bytes\n", session->name, data_len);
+
+ if (session->debug & L2TP_MSG_DATA) {
+ int i;
+ int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+ unsigned char *datap = skb->data + uhlen;
+
+ printk(KERN_DEBUG "%s: xmit:", session->name);
+ for (i = 0; i < (len - uhlen); i++) {
+ printk(" %02X", *datap++);
+ if (i == 31) {
+ printk(" ...");
+ break;
+ }
+ }
+ printk("\n");
+ }
+
+ /* Queue the packet to IP for output */
+ skb->local_df = 1;
+ error = ip_queue_xmit(skb);
+
+ /* Update stats */
+ if (error >= 0) {
+ tunnel->stats.tx_packets++;
+ tunnel->stats.tx_bytes += len;
+ session->stats.tx_packets++;
+ session->stats.tx_bytes += len;
+ } else {
+ tunnel->stats.tx_errors++;
+ session->stats.tx_errors++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_xmit_core);
+
+/* Automatically called when the skb is freed.
+ */
+static void l2tp_sock_wfree(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+
+/* For data skbs that we transmit, we associate with the tunnel socket
+ * but don't do accounting.
+ */
+static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = l2tp_sock_wfree;
+}
+
+/* If caller requires the skb to have a ppp header, the header must be
+ * inserted in the skb data before calling this function.
+ */
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
+{
+ int data_len = skb->len;
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ struct sock *sk = tunnel->sock;
+ struct udphdr *uh;
+ struct inet_sock *inet;
+ __wsum csum;
+ int old_headroom;
+ int new_headroom;
+ int headroom;
+ int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+ int udp_len;
+
+ /* Check that there's enough headroom in the skb to insert IP,
+ * UDP and L2TP headers. If not enough, expand it to
+ * make room. Adjust truesize.
+ */
+ headroom = NET_SKB_PAD + sizeof(struct iphdr) +
+ uhlen + hdr_len;
+ old_headroom = skb_headroom(skb);
+ if (skb_cow_head(skb, headroom))
+ goto abort;
+
+ new_headroom = skb_headroom(skb);
+ skb_orphan(skb);
+ skb->truesize += new_headroom - old_headroom;
+
+ /* Setup L2TP header */
+ session->build_header(session, __skb_push(skb, hdr_len));
+
+ /* Reset skb netfilter state */
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ nf_reset(skb);
+
+ /* Get routing info from the tunnel socket */
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* Setup UDP header */
+ inet = inet_sk(sk);
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+ uh->source = inet->inet_sport;
+ uh->dest = inet->inet_dport;
+ udp_len = uhlen + hdr_len + data_len;
+ uh->len = htons(udp_len);
+ uh->check = 0;
+
+ /* Calculate UDP checksum if configured to do so */
+ if (sk->sk_no_check == UDP_CSUM_NOXMIT)
+ skb->ip_summed = CHECKSUM_NONE;
+ else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
+ (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = skb_checksum(skb, 0, udp_len, 0);
+ uh->check = csum_tcpudp_magic(inet->inet_saddr,
+ inet->inet_daddr,
+ udp_len, IPPROTO_UDP, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
+ inet->inet_daddr,
+ udp_len, IPPROTO_UDP, 0);
+ }
+ break;
+
+ case L2TP_ENCAPTYPE_IP:
+ break;
+ }
+
+ l2tp_skb_set_owner_w(skb, sk);
+
+ l2tp_xmit_core(session, skb, data_len);
+
+abort:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
+
+/*****************************************************************************
+ * Tinnel and session create/destroy.
+ *****************************************************************************/
+
+/* Tunnel socket destruct hook.
+ * The tunnel context is deleted only when all session sockets have been
+ * closed.
+ */
+void l2tp_tunnel_destruct(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel;
+
+ tunnel = sk->sk_user_data;
+ if (tunnel == NULL)
+ goto end;
+
+ PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing...\n", tunnel->name);
+
+ /* Close all sessions */
+ l2tp_tunnel_closeall(tunnel);
+
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+ (udp_sk(sk))->encap_type = 0;
+ (udp_sk(sk))->encap_rcv = NULL;
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ break;
+ }
+
+ /* Remove hooks into tunnel socket */
+ tunnel->sock = NULL;
+ sk->sk_destruct = tunnel->old_sk_destruct;
+ sk->sk_user_data = NULL;
+
+ /* Call the original destructor */
+ if (sk->sk_destruct)
+ (*sk->sk_destruct)(sk);
+
+ /* We're finished with the socket */
+ l2tp_tunnel_dec_refcount(tunnel);
+
+end:
+ return;
+}
+EXPORT_SYMBOL(l2tp_tunnel_destruct);
+
+/* When the tunnel is closed, all the attached sessions need to go too.
+ */
+void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+{
+ int hash;
+ struct hlist_node *walk;
+ struct hlist_node *tmp;
+ struct l2tp_session *session;
+
+ BUG_ON(tunnel == NULL);
+
+ PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing all sessions...\n", tunnel->name);
+
+ write_lock_bh(&tunnel->hlist_lock);
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+again:
+ hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
+ session = hlist_entry(walk, struct l2tp_session, hlist);
+
+ PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing session\n", session->name);
+
+ hlist_del_init(&session->hlist);
+
+ /* Since we should hold the sock lock while
+ * doing any unbinding, we need to release the
+ * lock we're holding before taking that lock.
+ * Hold a reference to the sock so it doesn't
+ * disappear as we're jumping between locks.
+ */
+ if (session->ref != NULL)
+ (*session->ref)(session);
+
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ if (tunnel->version != L2TP_HDR_VER_2) {
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_del_init_rcu(&session->global_hlist);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ synchronize_rcu();
+ }
+
+ if (session->session_close != NULL)
+ (*session->session_close)(session);
+
+ if (session->deref != NULL)
+ (*session->deref)(session);
+
+ write_lock_bh(&tunnel->hlist_lock);
+
+ /* Now restart from the beginning of this hash
+ * chain. We always remove a session from the
+ * list so we are guaranteed to make forward
+ * progress.
+ */
+ goto again;
+ }
+ }
+ write_unlock_bh(&tunnel->hlist_lock);
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
+
+/* Really kill the tunnel.
+ * Come here only when all sessions have been cleared from the tunnel.
+ */
+void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+{
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ BUG_ON(atomic_read(&tunnel->ref_count) != 0);
+ BUG_ON(tunnel->sock != NULL);
+
+ PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
+ "%s: free...\n", tunnel->name);
+
+ /* Remove from tunnel list */
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_del_rcu(&tunnel->list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+ synchronize_rcu();
+
+ atomic_dec(&l2tp_tunnel_count);
+ kfree(tunnel);
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
+
+/* Create a socket for the tunnel, if one isn't set up by
+ * userspace. This is used for static tunnels where there is no
+ * managing L2TP daemon.
+ */
+static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp)
+{
+ int err = -EINVAL;
+ struct sockaddr_in udp_addr;
+ struct sockaddr_l2tpip ip_addr;
+ struct socket *sock = NULL;
+
+ switch (cfg->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
+ if (err < 0)
+ goto out;
+
+ sock = *sockp;
+
+ memset(&udp_addr, 0, sizeof(udp_addr));
+ udp_addr.sin_family = AF_INET;
+ udp_addr.sin_addr = cfg->local_ip;
+ udp_addr.sin_port = htons(cfg->local_udp_port);
+ err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr));
+ if (err < 0)
+ goto out;
+
+ udp_addr.sin_family = AF_INET;
+ udp_addr.sin_addr = cfg->peer_ip;
+ udp_addr.sin_port = htons(cfg->peer_udp_port);
+ err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0);
+ if (err < 0)
+ goto out;
+
+ if (!cfg->use_udp_checksums)
+ sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
+
+ break;
+
+ case L2TP_ENCAPTYPE_IP:
+ err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp);
+ if (err < 0)
+ goto out;
+
+ sock = *sockp;
+
+ memset(&ip_addr, 0, sizeof(ip_addr));
+ ip_addr.l2tp_family = AF_INET;
+ ip_addr.l2tp_addr = cfg->local_ip;
+ ip_addr.l2tp_conn_id = tunnel_id;
+ err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr));
+ if (err < 0)
+ goto out;
+
+ ip_addr.l2tp_family = AF_INET;
+ ip_addr.l2tp_addr = cfg->peer_ip;
+ ip_addr.l2tp_conn_id = peer_tunnel_id;
+ err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0);
+ if (err < 0)
+ goto out;
+
+ break;
+
+ default:
+ goto out;
+ }
+
+out:
+ if ((err < 0) && sock) {
+ sock_release(sock);
+ *sockp = NULL;
+ }
+
+ return err;
+}
+
+int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
+{
+ struct l2tp_tunnel *tunnel = NULL;
+ int err;
+ struct socket *sock = NULL;
+ struct sock *sk = NULL;
+ struct l2tp_net *pn;
+ enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
+
+ /* Get the tunnel socket from the fd, which was opened by
+ * the userspace L2TP daemon. If not specified, create a
+ * kernel socket.
+ */
+ if (fd < 0) {
+ err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock);
+ if (err < 0)
+ goto err;
+ } else {
+ err = -EBADF;
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+ tunnel_id, fd, err);
+ goto err;
+ }
+ }
+
+ sk = sock->sk;
+
+ if (cfg != NULL)
+ encap = cfg->encap;
+
+ /* Quick sanity checks */
+ switch (encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ err = -EPROTONOSUPPORT;
+ if (sk->sk_protocol != IPPROTO_UDP) {
+ printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
+ goto err;
+ }
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ err = -EPROTONOSUPPORT;
+ if (sk->sk_protocol != IPPROTO_L2TP) {
+ printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
+ goto err;
+ }
+ break;
+ }
+
+ /* Check if this socket has already been prepped */
+ tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
+ if (tunnel != NULL) {
+ /* This socket has already been prepped */
+ err = -EBUSY;
+ goto err;
+ }
+
+ tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
+ if (tunnel == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ tunnel->version = version;
+ tunnel->tunnel_id = tunnel_id;
+ tunnel->peer_tunnel_id = peer_tunnel_id;
+ tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
+
+ tunnel->magic = L2TP_TUNNEL_MAGIC;
+ sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
+ rwlock_init(&tunnel->hlist_lock);
+
+ /* The net we belong to */
+ tunnel->l2tp_net = net;
+ pn = l2tp_pernet(net);
+
+ if (cfg != NULL)
+ tunnel->debug = cfg->debug;
+
+ /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ tunnel->encap = encap;
+ if (encap == L2TP_ENCAPTYPE_UDP) {
+ /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
+ udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
+ }
+
+ sk->sk_user_data = tunnel;
+
+ /* Hook on the tunnel socket destructor so that we can cleanup
+ * if the tunnel socket goes away.
+ */
+ tunnel->old_sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = &l2tp_tunnel_destruct;
+ tunnel->sock = sk;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ /* Add tunnel to our list */
+ INIT_LIST_HEAD(&tunnel->list);
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+ synchronize_rcu();
+ atomic_inc(&l2tp_tunnel_count);
+
+ /* Bump the reference count. The tunnel context is deleted
+ * only when this drops to zero.
+ */
+ l2tp_tunnel_inc_refcount(tunnel);
+
+ err = 0;
+err:
+ if (tunnelp)
+ *tunnelp = tunnel;
+
+ /* If tunnel's socket was created by the kernel, it doesn't
+ * have a file.
+ */
+ if (sock && sock->file)
+ sockfd_put(sock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+
+/* This function is used by the netlink TUNNEL_DELETE command.
+ */
+int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+{
+ int err = 0;
+ struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
+
+ /* Force the tunnel socket to close. This will eventually
+ * cause the tunnel to be deleted via the normal socket close
+ * mechanisms when userspace closes the tunnel socket.
+ */
+ if (sock != NULL) {
+ err = inet_shutdown(sock, 2);
+
+ /* If the tunnel's socket was created by the kernel,
+ * close the socket here since the socket was not
+ * created by userspace.
+ */
+ if (sock->file == NULL)
+ err = inet_release(sock);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+
+/* Really kill the session.
+ */
+void l2tp_session_free(struct l2tp_session *session)
+{
+ struct l2tp_tunnel *tunnel;
+
+ BUG_ON(atomic_read(&session->ref_count) != 0);
+
+ tunnel = session->tunnel;
+ if (tunnel != NULL) {
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+ /* Delete the session from the hash */
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_del_init(&session->hlist);
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ /* Unlink from the global hash if not L2TPv2 */
+ if (tunnel->version != L2TP_HDR_VER_2) {
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_del_init_rcu(&session->global_hlist);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ synchronize_rcu();
+ }
+
+ if (session->session_id != 0)
+ atomic_dec(&l2tp_session_count);
+
+ sock_put(tunnel->sock);
+
+ /* This will delete the tunnel context if this
+ * is the last session on the tunnel.
+ */
+ session->tunnel = NULL;
+ l2tp_tunnel_dec_refcount(tunnel);
+ }
+
+ kfree(session);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_free);
+
+/* This function is used by the netlink SESSION_DELETE command and by
+ pseudowire modules.
+ */
+int l2tp_session_delete(struct l2tp_session *session)
+{
+ if (session->session_close != NULL)
+ (*session->session_close)(session);
+
+ l2tp_session_dec_refcount(session);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_delete);
+
+
+/* We come here whenever a session's send_seq, cookie_len or
+ * l2specific_len parameters are set.
+ */
+void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+{
+ if (version == L2TP_HDR_VER_2) {
+ session->hdr_len = 6;
+ if (session->send_seq)
+ session->hdr_len += 4;
+ } else {
+ session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
+ if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ session->hdr_len += 4;
+ }
+
+}
+EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
+
+struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+{
+ struct l2tp_session *session;
+
+ session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
+ if (session != NULL) {
+ session->magic = L2TP_SESSION_MAGIC;
+ session->tunnel = tunnel;
+
+ session->session_id = session_id;
+ session->peer_session_id = peer_session_id;
+ session->nr = 1;
+
+ sprintf(&session->name[0], "sess %u/%u",
+ tunnel->tunnel_id, session->session_id);
+
+ skb_queue_head_init(&session->reorder_q);
+
+ INIT_HLIST_NODE(&session->hlist);
+ INIT_HLIST_NODE(&session->global_hlist);
+
+ /* Inherit debug options from tunnel */
+ session->debug = tunnel->debug;
+
+ if (cfg) {
+ session->pwtype = cfg->pw_type;
+ session->debug = cfg->debug;
+ session->mtu = cfg->mtu;
+ session->mru = cfg->mru;
+ session->send_seq = cfg->send_seq;
+ session->recv_seq = cfg->recv_seq;
+ session->lns_mode = cfg->lns_mode;
+ session->reorder_timeout = cfg->reorder_timeout;
+ session->offset = cfg->offset;
+ session->l2specific_type = cfg->l2specific_type;
+ session->l2specific_len = cfg->l2specific_len;
+ session->cookie_len = cfg->cookie_len;
+ memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
+ session->peer_cookie_len = cfg->peer_cookie_len;
+ memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
+ }
+
+ if (tunnel->version == L2TP_HDR_VER_2)
+ session->build_header = l2tp_build_l2tpv2_header;
+ else
+ session->build_header = l2tp_build_l2tpv3_header;
+
+ l2tp_session_set_header_len(session, tunnel->version);
+
+ /* Bump the reference count. The session context is deleted
+ * only when this drops to zero.
+ */
+ l2tp_session_inc_refcount(session);
+ l2tp_tunnel_inc_refcount(tunnel);
+
+ /* Ensure tunnel socket isn't deleted */
+ sock_hold(tunnel->sock);
+
+ /* Add session to the tunnel's hash list */
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_add_head(&session->hlist,
+ l2tp_session_id_hash(tunnel, session_id));
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ /* And to the global session list if L2TPv3 */
+ if (tunnel->version != L2TP_HDR_VER_2) {
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_add_head_rcu(&session->global_hlist,
+ l2tp_session_id_hash_2(pn, session_id));
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ synchronize_rcu();
+ }
+
+ /* Ignore management session in session count value */
+ if (session->session_id != 0)
+ atomic_inc(&l2tp_session_count);
+ }
+
+ return session;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_create);
+
+/*****************************************************************************
+ * Init and cleanup
+ *****************************************************************************/
+
+static __net_init int l2tp_init_net(struct net *net)
+{
+ struct l2tp_net *pn = net_generic(net, l2tp_net_id);
+ int hash;
+
+ INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
+ spin_lock_init(&pn->l2tp_tunnel_list_lock);
+
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
+
+ spin_lock_init(&pn->l2tp_session_hlist_lock);
+
+ return 0;
+}
+
+static struct pernet_operations l2tp_net_ops = {
+ .init = l2tp_init_net,
+ .id = &l2tp_net_id,
+ .size = sizeof(struct l2tp_net),
+};
+
+static int __init l2tp_init(void)
+{
+ int rc = 0;
+
+ rc = register_pernet_device(&l2tp_net_ops);
+ if (rc)
+ goto out;
+
+ printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
+
+out:
+ return rc;
+}
+
+static void __exit l2tp_exit(void)
+{
+ unregister_pernet_device(&l2tp_net_ops);
+}
+
+module_init(l2tp_init);
+module_exit(l2tp_exit);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP core");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(L2TP_DRV_VERSION);
+
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
new file mode 100644
index 000000000000..f0f318edd3f1
--- /dev/null
+++ b/net/l2tp/l2tp_core.h
@@ -0,0 +1,304 @@
+/*
+ * L2TP internal definitions.
+ *
+ * Copyright (c) 2008,2009 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _L2TP_CORE_H_
+#define _L2TP_CORE_H_
+
+/* Just some random numbers */
+#define L2TP_TUNNEL_MAGIC 0x42114DDA
+#define L2TP_SESSION_MAGIC 0x0C04EB7D
+
+/* Per tunnel, session hash table size */
+#define L2TP_HASH_BITS 4
+#define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS)
+
+/* System-wide, session hash table size */
+#define L2TP_HASH_BITS_2 8
+#define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2)
+
+/* Debug message categories for the DEBUG socket option */
+enum {
+ L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
+ * compiled in) */
+ L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
+ * interface */
+ L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
+ L2TP_MSG_DATA = (1 << 3), /* data packets */
+};
+
+struct sk_buff;
+
+struct l2tp_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_seq_discards;
+ u64 rx_oos_packets;
+ u64 rx_errors;
+ u64 rx_cookie_discards;
+};
+
+struct l2tp_tunnel;
+
+/* Describes a session. Contains information to determine incoming
+ * packets and transmit outgoing ones.
+ */
+struct l2tp_session_cfg {
+ enum l2tp_pwtype pw_type;
+ unsigned data_seq:2; /* data sequencing level
+ * 0 => none, 1 => IP only,
+ * 2 => all
+ */
+ unsigned recv_seq:1; /* expect receive packets with
+ * sequence numbers? */
+ unsigned send_seq:1; /* send packets with sequence
+ * numbers? */
+ unsigned lns_mode:1; /* behave as LNS? LAC enables
+ * sequence numbers under
+ * control of LNS. */
+ int debug; /* bitmask of debug message
+ * categories */
+ u16 vlan_id; /* VLAN pseudowire only */
+ u16 offset; /* offset to payload */
+ u16 l2specific_len; /* Layer 2 specific length */
+ u16 l2specific_type; /* Layer 2 specific type */
+ u8 cookie[8]; /* optional cookie */
+ int cookie_len; /* 0, 4 or 8 bytes */
+ u8 peer_cookie[8]; /* peer's cookie */
+ int peer_cookie_len; /* 0, 4 or 8 bytes */
+ int reorder_timeout; /* configured reorder timeout
+ * (in jiffies) */
+ int mtu;
+ int mru;
+ char *ifname;
+};
+
+struct l2tp_session {
+ int magic; /* should be
+ * L2TP_SESSION_MAGIC */
+
+ struct l2tp_tunnel *tunnel; /* back pointer to tunnel
+ * context */
+ u32 session_id;
+ u32 peer_session_id;
+ u8 cookie[8];
+ int cookie_len;
+ u8 peer_cookie[8];
+ int peer_cookie_len;
+ u16 offset; /* offset from end of L2TP header
+ to beginning of data */
+ u16 l2specific_len;
+ u16 l2specific_type;
+ u16 hdr_len;
+ u32 nr; /* session NR state (receive) */
+ u32 ns; /* session NR state (send) */
+ struct sk_buff_head reorder_q; /* receive reorder queue */
+ struct hlist_node hlist; /* Hash list node */
+ atomic_t ref_count;
+
+ char name[32]; /* for logging */
+ char ifname[IFNAMSIZ];
+ unsigned data_seq:2; /* data sequencing level
+ * 0 => none, 1 => IP only,
+ * 2 => all
+ */
+ unsigned recv_seq:1; /* expect receive packets with
+ * sequence numbers? */
+ unsigned send_seq:1; /* send packets with sequence
+ * numbers? */
+ unsigned lns_mode:1; /* behave as LNS? LAC enables
+ * sequence numbers under
+ * control of LNS. */
+ int debug; /* bitmask of debug message
+ * categories */
+ int reorder_timeout; /* configured reorder timeout
+ * (in jiffies) */
+ int mtu;
+ int mru;
+ enum l2tp_pwtype pwtype;
+ struct l2tp_stats stats;
+ struct hlist_node global_hlist; /* Global hash list node */
+
+ int (*build_header)(struct l2tp_session *session, void *buf);
+ void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
+ void (*session_close)(struct l2tp_session *session);
+ void (*ref)(struct l2tp_session *session);
+ void (*deref)(struct l2tp_session *session);
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+ void (*show)(struct seq_file *m, void *priv);
+#endif
+ uint8_t priv[0]; /* private data */
+};
+
+/* Describes the tunnel. It contains info to track all the associated
+ * sessions so incoming packets can be sorted out
+ */
+struct l2tp_tunnel_cfg {
+ int debug; /* bitmask of debug message
+ * categories */
+ enum l2tp_encap_type encap;
+
+ /* Used only for kernel-created sockets */
+ struct in_addr local_ip;
+ struct in_addr peer_ip;
+ u16 local_udp_port;
+ u16 peer_udp_port;
+ unsigned int use_udp_checksums:1;
+};
+
+struct l2tp_tunnel {
+ int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ rwlock_t hlist_lock; /* protect session_hlist */
+ struct hlist_head session_hlist[L2TP_HASH_SIZE];
+ /* hashed list of sessions,
+ * hashed by id */
+ u32 tunnel_id;
+ u32 peer_tunnel_id;
+ int version; /* 2=>L2TPv2, 3=>L2TPv3 */
+
+ char name[20]; /* for logging */
+ int debug; /* bitmask of debug message
+ * categories */
+ enum l2tp_encap_type encap;
+ struct l2tp_stats stats;
+
+ struct list_head list; /* Keep a list of all tunnels */
+ struct net *l2tp_net; /* the net we belong to */
+
+ atomic_t ref_count;
+#ifdef CONFIG_DEBUG_FS
+ void (*show)(struct seq_file *m, void *arg);
+#endif
+ int (*recv_payload_hook)(struct sk_buff *skb);
+ void (*old_sk_destruct)(struct sock *);
+ struct sock *sock; /* Parent socket */
+ int fd;
+
+ uint8_t priv[0]; /* private data */
+};
+
+struct l2tp_nl_cmd_ops {
+ int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
+ int (*session_delete)(struct l2tp_session *session);
+};
+
+static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel)
+{
+ return &tunnel->priv[0];
+}
+
+static inline void *l2tp_session_priv(struct l2tp_session *session)
+{
+ return &session->priv[0];
+}
+
+static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel;
+
+ if (sk == NULL)
+ return NULL;
+
+ sock_hold(sk);
+ tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
+ if (tunnel == NULL) {
+ sock_put(sk);
+ goto out;
+ }
+
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+out:
+ return tunnel;
+}
+
+extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
+extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
+extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+
+extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
+extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
+extern int l2tp_session_delete(struct l2tp_session *session);
+extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+extern void l2tp_session_free(struct l2tp_session *session);
+extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
+extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
+extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+
+extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
+extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
+extern void l2tp_tunnel_destruct(struct sock *sk);
+extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+
+extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
+extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ atomic_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ if (atomic_dec_and_test(&tunnel->ref_count))
+ l2tp_tunnel_free(tunnel);
+}
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_tunnel_inc_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_inc_refcount_1(_t); \
+ } while (0)
+#define l2tp_tunnel_dec_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_dec_refcount_1(_t); \
+ } while (0)
+#else
+#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
+#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
+#endif
+
+/* Session reference counts. Incremented when code obtains a reference
+ * to a session.
+ */
+static inline void l2tp_session_inc_refcount_1(struct l2tp_session *session)
+{
+ atomic_inc(&session->ref_count);
+}
+
+static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
+{
+ if (atomic_dec_and_test(&session->ref_count))
+ l2tp_session_free(session);
+}
+
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_session_inc_refcount(_s) do { \
+ printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
+ l2tp_session_inc_refcount_1(_s); \
+ } while (0)
+#define l2tp_session_dec_refcount(_s) do { \
+ printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
+ l2tp_session_dec_refcount_1(_s); \
+ } while (0)
+#else
+#define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s)
+#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
+#endif
+
+#endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
new file mode 100644
index 000000000000..104ec3b283d4
--- /dev/null
+++ b/net/l2tp/l2tp_debugfs.c
@@ -0,0 +1,341 @@
+/*
+ * L2TP subsystem debugfs
+ *
+ * Copyright (c) 2010 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/hash.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#include "l2tp_core.h"
+
+static struct dentry *rootdir;
+static struct dentry *tunnels;
+
+struct l2tp_dfs_seq_data {
+ struct net *net;
+ int tunnel_idx; /* current tunnel */
+ int session_idx; /* index of session within current tunnel */
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session; /* NULL means get next tunnel */
+};
+
+static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
+{
+ pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx);
+ pd->tunnel_idx++;
+}
+
+static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
+{
+ pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session_idx++;
+
+ if (pd->session == NULL) {
+ pd->session_idx = 0;
+ l2tp_dfs_next_tunnel(pd);
+ }
+
+}
+
+static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs)
+{
+ struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN;
+ loff_t pos = *offs;
+
+ if (!pos)
+ goto out;
+
+ BUG_ON(m->private == NULL);
+ pd = m->private;
+
+ if (pd->tunnel == NULL)
+ l2tp_dfs_next_tunnel(pd);
+ else
+ l2tp_dfs_next_session(pd);
+
+ /* NULL tunnel and session indicates end of list */
+ if ((pd->tunnel == NULL) && (pd->session == NULL))
+ pd = NULL;
+
+out:
+ return pd;
+}
+
+
+static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
+{
+ /* nothing to do */
+}
+
+static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
+{
+ struct l2tp_tunnel *tunnel = v;
+ int session_count = 0;
+ int hash;
+ struct hlist_node *walk;
+ struct hlist_node *tmp;
+
+ read_lock_bh(&tunnel->hlist_lock);
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+ hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
+ struct l2tp_session *session;
+
+ session = hlist_entry(walk, struct l2tp_session, hlist);
+ if (session->session_id == 0)
+ continue;
+
+ session_count++;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
+ if (tunnel->sock) {
+ struct inet_sock *inet = inet_sk(tunnel->sock);
+ seq_printf(m, " from %pI4 to %pI4\n",
+ &inet->inet_saddr, &inet->inet_daddr);
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ seq_printf(m, " source port %hu, dest port %hu\n",
+ ntohs(inet->inet_sport), ntohs(inet->inet_dport));
+ }
+ seq_printf(m, " L2TPv%d, %s\n", tunnel->version,
+ tunnel->encap == L2TP_ENCAPTYPE_UDP ? "UDP" :
+ tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
+ "");
+ seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
+ tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
+ atomic_read(&tunnel->ref_count));
+
+ seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n",
+ tunnel->debug,
+ (unsigned long long)tunnel->stats.tx_packets,
+ (unsigned long long)tunnel->stats.tx_bytes,
+ (unsigned long long)tunnel->stats.tx_errors,
+ (unsigned long long)tunnel->stats.rx_packets,
+ (unsigned long long)tunnel->stats.rx_bytes,
+ (unsigned long long)tunnel->stats.rx_errors);
+
+ if (tunnel->show != NULL)
+ tunnel->show(m, tunnel);
+}
+
+static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
+{
+ struct l2tp_session *session = v;
+
+ seq_printf(m, " SESSION %u, peer %u, %s\n", session->session_id,
+ session->peer_session_id,
+ session->pwtype == L2TP_PWTYPE_ETH ? "ETH" :
+ session->pwtype == L2TP_PWTYPE_PPP ? "PPP" :
+ "");
+ if (session->send_seq || session->recv_seq)
+ seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns);
+ seq_printf(m, " refcnt %d\n", atomic_read(&session->ref_count));
+ seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n",
+ session->mtu, session->mru,
+ session->recv_seq ? 'R' : '-',
+ session->send_seq ? 'S' : '-',
+ session->data_seq == 1 ? "IPSEQ" :
+ session->data_seq == 2 ? "DATASEQ" : "-",
+ session->lns_mode ? "LNS" : "LAC",
+ session->debug,
+ jiffies_to_msecs(session->reorder_timeout));
+ seq_printf(m, " offset %hu l2specific %hu/%hu\n",
+ session->offset, session->l2specific_type, session->l2specific_len);
+ if (session->cookie_len) {
+ seq_printf(m, " cookie %02x%02x%02x%02x",
+ session->cookie[0], session->cookie[1],
+ session->cookie[2], session->cookie[3]);
+ if (session->cookie_len == 8)
+ seq_printf(m, "%02x%02x%02x%02x",
+ session->cookie[4], session->cookie[5],
+ session->cookie[6], session->cookie[7]);
+ seq_printf(m, "\n");
+ }
+ if (session->peer_cookie_len) {
+ seq_printf(m, " peer cookie %02x%02x%02x%02x",
+ session->peer_cookie[0], session->peer_cookie[1],
+ session->peer_cookie[2], session->peer_cookie[3]);
+ if (session->peer_cookie_len == 8)
+ seq_printf(m, "%02x%02x%02x%02x",
+ session->peer_cookie[4], session->peer_cookie[5],
+ session->peer_cookie[6], session->peer_cookie[7]);
+ seq_printf(m, "\n");
+ }
+
+ seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n",
+ session->nr, session->ns,
+ (unsigned long long)session->stats.tx_packets,
+ (unsigned long long)session->stats.tx_bytes,
+ (unsigned long long)session->stats.tx_errors,
+ (unsigned long long)session->stats.rx_packets,
+ (unsigned long long)session->stats.rx_bytes,
+ (unsigned long long)session->stats.rx_errors);
+
+ if (session->show != NULL)
+ session->show(m, session);
+}
+
+static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
+{
+ struct l2tp_dfs_seq_data *pd = v;
+
+ /* display header on line 1 */
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(m, "TUNNEL ID, peer ID from IP to IP\n");
+ seq_puts(m, " L2TPv2/L2TPv3, UDP/IP\n");
+ seq_puts(m, " sessions session-count, refcnt refcnt/sk->refcnt\n");
+ seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ seq_puts(m, " SESSION ID, peer ID, PWTYPE\n");
+ seq_puts(m, " refcnt cnt\n");
+ seq_puts(m, " offset OFFSET l2specific TYPE/LEN\n");
+ seq_puts(m, " [ cookie ]\n");
+ seq_puts(m, " [ peer cookie ]\n");
+ seq_puts(m, " config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n");
+ seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ goto out;
+ }
+
+ /* Show the tunnel or session context */
+ if (pd->session == NULL)
+ l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
+ else
+ l2tp_dfs_seq_session_show(m, pd->session);
+
+out:
+ return 0;
+}
+
+static const struct seq_operations l2tp_dfs_seq_ops = {
+ .start = l2tp_dfs_seq_start,
+ .next = l2tp_dfs_seq_next,
+ .stop = l2tp_dfs_seq_stop,
+ .show = l2tp_dfs_seq_show,
+};
+
+static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
+{
+ struct l2tp_dfs_seq_data *pd;
+ struct seq_file *seq;
+ int rc = -ENOMEM;
+
+ pd = kzalloc(GFP_KERNEL, sizeof(*pd));
+ if (pd == NULL)
+ goto out;
+
+ /* Derive the network namespace from the pid opening the
+ * file.
+ */
+ pd->net = get_net_ns_by_pid(current->pid);
+ if (IS_ERR(pd->net)) {
+ rc = -PTR_ERR(pd->net);
+ goto err_free_pd;
+ }
+
+ rc = seq_open(file, &l2tp_dfs_seq_ops);
+ if (rc)
+ goto err_free_net;
+
+ seq = file->private_data;
+ seq->private = pd;
+
+out:
+ return rc;
+
+err_free_net:
+ put_net(pd->net);
+err_free_pd:
+ kfree(pd);
+ goto out;
+}
+
+static int l2tp_dfs_seq_release(struct inode *inode, struct file *file)
+{
+ struct l2tp_dfs_seq_data *pd;
+ struct seq_file *seq;
+
+ seq = file->private_data;
+ pd = seq->private;
+ if (pd->net)
+ put_net(pd->net);
+ kfree(pd);
+ seq_release(inode, file);
+
+ return 0;
+}
+
+static const struct file_operations l2tp_dfs_fops = {
+ .owner = THIS_MODULE,
+ .open = l2tp_dfs_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = l2tp_dfs_seq_release,
+};
+
+static int __init l2tp_debugfs_init(void)
+{
+ int rc = 0;
+
+ rootdir = debugfs_create_dir("l2tp", NULL);
+ if (IS_ERR(rootdir)) {
+ rc = PTR_ERR(rootdir);
+ rootdir = NULL;
+ goto out;
+ }
+
+ tunnels = debugfs_create_file("tunnels", 0600, rootdir, NULL, &l2tp_dfs_fops);
+ if (tunnels == NULL)
+ rc = -EIO;
+
+ printk(KERN_INFO "L2TP debugfs support\n");
+
+out:
+ if (rc)
+ printk(KERN_WARNING "l2tp debugfs: unable to init\n");
+
+ return rc;
+}
+
+static void __exit l2tp_debugfs_exit(void)
+{
+ debugfs_remove(tunnels);
+ debugfs_remove(rootdir);
+}
+
+module_init(l2tp_debugfs_init);
+module_exit(l2tp_debugfs_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP debugfs driver");
+MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
new file mode 100644
index 000000000000..58c6c4cda73b
--- /dev/null
+++ b/net/l2tp/l2tp_eth.c
@@ -0,0 +1,334 @@
+/*
+ * L2TPv3 ethernet pseudowire driver
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/hash.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#include "l2tp_core.h"
+
+/* Default device name. May be overridden by name specified by user */
+#define L2TP_ETH_DEV_NAME "l2tpeth%d"
+
+/* via netdev_priv() */
+struct l2tp_eth {
+ struct net_device *dev;
+ struct sock *tunnel_sock;
+ struct l2tp_session *session;
+ struct list_head list;
+};
+
+/* via l2tp_session_priv() */
+struct l2tp_eth_sess {
+ struct net_device *dev;
+};
+
+/* per-net private data for this module */
+static unsigned int l2tp_eth_net_id;
+struct l2tp_eth_net {
+ struct list_head l2tp_eth_dev_list;
+ spinlock_t l2tp_eth_lock;
+};
+
+static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
+{
+ return net_generic(net, l2tp_eth_net_id);
+}
+
+static int l2tp_eth_dev_init(struct net_device *dev)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ random_ether_addr(dev->dev_addr);
+ memset(&dev->broadcast[0], 0xff, 6);
+
+ return 0;
+}
+
+static void l2tp_eth_dev_uninit(struct net_device *dev)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+ struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
+
+ spin_lock(&pn->l2tp_eth_lock);
+ list_del_init(&priv->list);
+ spin_unlock(&pn->l2tp_eth_lock);
+ dev_put(dev);
+}
+
+static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+ struct l2tp_session *session = priv->session;
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+
+ return 0;
+}
+
+static struct net_device_ops l2tp_eth_netdev_ops = {
+ .ndo_init = l2tp_eth_dev_init,
+ .ndo_uninit = l2tp_eth_dev_uninit,
+ .ndo_start_xmit = l2tp_eth_dev_xmit,
+};
+
+static void l2tp_eth_dev_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &l2tp_eth_netdev_ops;
+ dev->destructor = free_netdev;
+}
+
+static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
+{
+ struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
+ struct net_device *dev = spriv->dev;
+
+ if (session->debug & L2TP_MSG_DATA) {
+ unsigned int length;
+ int offset;
+ u8 *ptr = skb->data;
+
+ length = min(32u, skb->len);
+ if (!pskb_may_pull(skb, length))
+ goto error;
+
+ printk(KERN_DEBUG "%s: eth recv: ", session->name);
+
+ offset = 0;
+ do {
+ printk(" %02X", ptr[offset]);
+ } while (++offset < length);
+
+ printk("\n");
+ }
+
+ if (data_len < ETH_HLEN)
+ goto error;
+
+ secpath_reset(skb);
+
+ /* checksums verified by L2TP */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_dst_drop(skb);
+ nf_reset(skb);
+
+ if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += data_len;
+ } else
+ dev->stats.rx_errors++;
+
+ return;
+
+error:
+ dev->stats.rx_errors++;
+ kfree_skb(skb);
+}
+
+static void l2tp_eth_delete(struct l2tp_session *session)
+{
+ struct l2tp_eth_sess *spriv;
+ struct net_device *dev;
+
+ if (session) {
+ spriv = l2tp_session_priv(session);
+ dev = spriv->dev;
+ if (dev) {
+ unregister_netdev(dev);
+ spriv->dev = NULL;
+ }
+ }
+}
+
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+static void l2tp_eth_show(struct seq_file *m, void *arg)
+{
+ struct l2tp_session *session = arg;
+ struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
+ struct net_device *dev = spriv->dev;
+
+ seq_printf(m, " interface %s\n", dev->name);
+}
+#endif
+
+static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+{
+ struct net_device *dev;
+ char name[IFNAMSIZ];
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session;
+ struct l2tp_eth *priv;
+ struct l2tp_eth_sess *spriv;
+ int rc;
+ struct l2tp_eth_net *pn;
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (!tunnel) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ session = l2tp_session_find(net, tunnel, session_id);
+ if (session) {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ if (cfg->ifname) {
+ dev = dev_get_by_name(net, cfg->ifname);
+ if (dev) {
+ dev_put(dev);
+ rc = -EEXIST;
+ goto out;
+ }
+ strlcpy(name, cfg->ifname, IFNAMSIZ);
+ } else
+ strcpy(name, L2TP_ETH_DEV_NAME);
+
+ session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
+ peer_session_id, cfg);
+ if (!session) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
+ if (!dev) {
+ rc = -ENOMEM;
+ goto out_del_session;
+ }
+
+ dev_net_set(dev, net);
+ if (session->mtu == 0)
+ session->mtu = dev->mtu - session->hdr_len;
+ dev->mtu = session->mtu;
+ dev->needed_headroom += session->hdr_len;
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->session = session;
+ INIT_LIST_HEAD(&priv->list);
+
+ priv->tunnel_sock = tunnel->sock;
+ session->recv_skb = l2tp_eth_dev_recv;
+ session->session_close = l2tp_eth_delete;
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+ session->show = l2tp_eth_show;
+#endif
+
+ spriv = l2tp_session_priv(session);
+ spriv->dev = dev;
+
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto out_del_dev;
+
+ /* Must be done after register_netdev() */
+ strlcpy(session->ifname, dev->name, IFNAMSIZ);
+
+ dev_hold(dev);
+ pn = l2tp_eth_pernet(dev_net(dev));
+ spin_lock(&pn->l2tp_eth_lock);
+ list_add(&priv->list, &pn->l2tp_eth_dev_list);
+ spin_unlock(&pn->l2tp_eth_lock);
+
+ return 0;
+
+out_del_dev:
+ free_netdev(dev);
+out_del_session:
+ l2tp_session_delete(session);
+out:
+ return rc;
+}
+
+static __net_init int l2tp_eth_init_net(struct net *net)
+{
+ struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
+
+ INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
+ spin_lock_init(&pn->l2tp_eth_lock);
+
+ return 0;
+}
+
+static __net_initdata struct pernet_operations l2tp_eth_net_ops = {
+ .init = l2tp_eth_init_net,
+ .id = &l2tp_eth_net_id,
+ .size = sizeof(struct l2tp_eth_net),
+};
+
+
+static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
+ .session_create = l2tp_eth_create,
+ .session_delete = l2tp_session_delete,
+};
+
+
+static int __init l2tp_eth_init(void)
+{
+ int err = 0;
+
+ err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
+ if (err)
+ goto out;
+
+ err = register_pernet_device(&l2tp_eth_net_ops);
+ if (err)
+ goto out_unreg;
+
+ printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
+
+ return 0;
+
+out_unreg:
+ l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
+out:
+ return err;
+}
+
+static void __exit l2tp_eth_exit(void)
+{
+ unregister_pernet_device(&l2tp_eth_net_ops);
+ l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
+}
+
+module_init(l2tp_eth_init);
+module_exit(l2tp_eth_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
+MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
new file mode 100644
index 000000000000..0852512d392c
--- /dev/null
+++ b/net/l2tp/l2tp_ip.c
@@ -0,0 +1,679 @@
+/*
+ * L2TPv3 IP encapsulation support
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/icmp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/socket.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+#include "l2tp_core.h"
+
+struct l2tp_ip_sock {
+ /* inet_sock has to be the first member of l2tp_ip_sock */
+ struct inet_sock inet;
+
+ __u32 conn_id;
+ __u32 peer_conn_id;
+
+ __u64 tx_packets;
+ __u64 tx_bytes;
+ __u64 tx_errors;
+ __u64 rx_packets;
+ __u64 rx_bytes;
+ __u64 rx_errors;
+};
+
+static DEFINE_RWLOCK(l2tp_ip_lock);
+static struct hlist_head l2tp_ip_table;
+static struct hlist_head l2tp_ip_bind_table;
+
+static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
+{
+ return (struct l2tp_ip_sock *)sk;
+}
+
+static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
+{
+ struct hlist_node *node;
+ struct sock *sk;
+
+ sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
+ struct inet_sock *inet = inet_sk(sk);
+ struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
+
+ if (l2tp == NULL)
+ continue;
+
+ if ((l2tp->conn_id == tunnel_id) &&
+#ifdef CONFIG_NET_NS
+ (sk->sk_net == net) &&
+#endif
+ !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
+ !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+ goto found;
+ }
+
+ sk = NULL;
+found:
+ return sk;
+}
+
+static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
+{
+ struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
+ if (sk)
+ sock_hold(sk);
+
+ return sk;
+}
+
+/* When processing receive frames, there are two cases to
+ * consider. Data frames consist of a non-zero session-id and an
+ * optional cookie. Control frames consist of a regular L2TP header
+ * preceded by 32-bits of zeros.
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Control Message Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | (32 bits of zeros) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Control Connection ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ns | Nr |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * All control frames are passed to userspace.
+ */
+static int l2tp_ip_recv(struct sk_buff *skb)
+{
+ struct sock *sk;
+ u32 session_id;
+ u32 tunnel_id;
+ unsigned char *ptr, *optr;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel = NULL;
+ int length;
+ int offset;
+
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+
+ if (!pskb_may_pull(skb, 4))
+ goto discard;
+
+ session_id = ntohl(*((__be32 *) ptr));
+ ptr += 4;
+
+ /* RFC3931: L2TP/IP packets have the first 4 bytes containing
+ * the session_id. If it is 0, the packet is a L2TP control
+ * frame and the session_id value can be discarded.
+ */
+ if (session_id == 0) {
+ __skb_pull(skb, 4);
+ goto pass_up;
+ }
+
+ /* Ok, this is a data packet. Lookup the session. */
+ session = l2tp_session_find(&init_net, NULL, session_id);
+ if (session == NULL)
+ goto discard;
+
+ tunnel = session->tunnel;
+ if (tunnel == NULL)
+ goto discard;
+
+ /* Trace packet contents, if enabled */
+ if (tunnel->debug & L2TP_MSG_DATA) {
+ length = min(32u, skb->len);
+ if (!pskb_may_pull(skb, length))
+ goto discard;
+
+ printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
+
+ offset = 0;
+ do {
+ printk(" %02X", ptr[offset]);
+ } while (++offset < length);
+
+ printk("\n");
+ }
+
+ l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+
+ return 0;
+
+pass_up:
+ /* Get the tunnel_id from the L2TP header */
+ if (!pskb_may_pull(skb, 12))
+ goto discard;
+
+ if ((skb->data[0] & 0xc0) != 0xc0)
+ goto discard;
+
+ tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+ tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+ if (tunnel != NULL)
+ sk = tunnel->sock;
+ else {
+ struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
+
+ read_lock_bh(&l2tp_ip_lock);
+ sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
+ read_unlock_bh(&l2tp_ip_lock);
+ }
+
+ if (sk == NULL)
+ goto discard;
+
+ sock_hold(sk);
+
+ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+ goto discard_put;
+
+ nf_reset(skb);
+
+ return sk_receive_skb(sk, skb, 1);
+
+discard_put:
+ sock_put(sk);
+
+discard:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int l2tp_ip_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+
+ write_lock_bh(&l2tp_ip_lock);
+ sk_add_node(sk, &l2tp_ip_table);
+ write_unlock_bh(&l2tp_ip_lock);
+
+ return 0;
+}
+
+static void l2tp_ip_close(struct sock *sk, long timeout)
+{
+ write_lock_bh(&l2tp_ip_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ hlist_del_init(&sk->sk_node);
+ write_unlock_bh(&l2tp_ip_lock);
+ sk_common_release(sk);
+}
+
+static void l2tp_ip_destroy_sock(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
+ kfree_skb(skb);
+
+ sk_refcnt_debug_dec(sk);
+}
+
+static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
+ int ret = -EINVAL;
+ int chk_addr_ret;
+
+ ret = -EADDRINUSE;
+ read_lock_bh(&l2tp_ip_lock);
+ if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
+ goto out_in_use;
+
+ read_unlock_bh(&l2tp_ip_lock);
+
+ lock_sock(sk);
+ if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
+ goto out;
+
+ chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
+ ret = -EADDRNOTAVAIL;
+ if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
+ goto out;
+
+ inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
+ if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
+ inet->inet_saddr = 0; /* Use device */
+ sk_dst_reset(sk);
+
+ l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
+
+ write_lock_bh(&l2tp_ip_lock);
+ sk_add_bind_node(sk, &l2tp_ip_bind_table);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip_lock);
+ ret = 0;
+out:
+ release_sock(sk);
+
+ return ret;
+
+out_in_use:
+ read_unlock_bh(&l2tp_ip_lock);
+
+ return ret;
+}
+
+static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ int rc;
+ struct inet_sock *inet = inet_sk(sk);
+ struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
+ struct rtable *rt;
+ __be32 saddr;
+ int oif;
+
+ rc = -EINVAL;
+ if (addr_len < sizeof(*lsa))
+ goto out;
+
+ rc = -EAFNOSUPPORT;
+ if (lsa->l2tp_family != AF_INET)
+ goto out;
+
+ sk_dst_reset(sk);
+
+ oif = sk->sk_bound_dev_if;
+ saddr = inet->inet_saddr;
+
+ rc = -EINVAL;
+ if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
+ goto out;
+
+ rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr,
+ RT_CONN_FLAGS(sk), oif,
+ IPPROTO_L2TP,
+ 0, 0, sk, 1);
+ if (rc) {
+ if (rc == -ENETUNREACH)
+ IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+ rc = -ENETUNREACH;
+ if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
+ ip_rt_put(rt);
+ goto out;
+ }
+
+ l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
+
+ if (!inet->inet_saddr)
+ inet->inet_saddr = rt->rt_src;
+ if (!inet->inet_rcv_saddr)
+ inet->inet_rcv_saddr = rt->rt_src;
+ inet->inet_daddr = rt->rt_dst;
+ sk->sk_state = TCP_ESTABLISHED;
+ inet->inet_id = jiffies;
+
+ sk_dst_set(sk, &rt->u.dst);
+
+ write_lock_bh(&l2tp_ip_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ sk_add_bind_node(sk, &l2tp_ip_bind_table);
+ write_unlock_bh(&l2tp_ip_lock);
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+{
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet = inet_sk(sk);
+ struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
+ struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
+
+ memset(lsa, 0, sizeof(*lsa));
+ lsa->l2tp_family = AF_INET;
+ if (peer) {
+ if (!inet->inet_dport)
+ return -ENOTCONN;
+ lsa->l2tp_conn_id = lsk->peer_conn_id;
+ lsa->l2tp_addr.s_addr = inet->inet_daddr;
+ } else {
+ __be32 addr = inet->inet_rcv_saddr;
+ if (!addr)
+ addr = inet->inet_saddr;
+ lsa->l2tp_conn_id = lsk->conn_id;
+ lsa->l2tp_addr.s_addr = addr;
+ }
+ *uaddr_len = sizeof(*lsa);
+ return 0;
+}
+
+static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+ goto drop;
+
+ nf_reset(skb);
+
+ /* Charge it to the socket, dropping if the queue is full. */
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0)
+ goto drop;
+
+ return 0;
+
+drop:
+ IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+ kfree_skb(skb);
+ return -1;
+}
+
+/* Userspace will call sendmsg() on the tunnel socket to send L2TP
+ * control frames.
+ */
+static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
+{
+ struct sk_buff *skb;
+ int rc;
+ struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+ struct ip_options *opt = inet->opt;
+ struct rtable *rt = NULL;
+ int connected = 0;
+ __be32 daddr;
+
+ if (sock_flag(sk, SOCK_DEAD))
+ return -ENOTCONN;
+
+ /* Get and verify the address. */
+ if (msg->msg_name) {
+ struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
+ if (msg->msg_namelen < sizeof(*lip))
+ return -EINVAL;
+
+ if (lip->l2tp_family != AF_INET) {
+ if (lip->l2tp_family != AF_UNSPEC)
+ return -EAFNOSUPPORT;
+ }
+
+ daddr = lip->l2tp_addr.s_addr;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
+
+ daddr = inet->inet_daddr;
+ connected = 1;
+ }
+
+ /* Allocate a socket buffer */
+ rc = -ENOMEM;
+ skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
+ 4 + len, 0, GFP_KERNEL);
+ if (!skb)
+ goto error;
+
+ /* Reserve space for headers, putting IP header on 4-byte boundary. */
+ skb_reserve(skb, 2 + NET_SKB_PAD);
+ skb_reset_network_header(skb);
+ skb_reserve(skb, sizeof(struct iphdr));
+ skb_reset_transport_header(skb);
+
+ /* Insert 0 session_id */
+ *((__be32 *) skb_put(skb, 4)) = 0;
+
+ /* Copy user data into skb */
+ rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ if (rc < 0) {
+ kfree_skb(skb);
+ goto error;
+ }
+
+ if (connected)
+ rt = (struct rtable *) __sk_dst_check(sk, 0);
+
+ if (rt == NULL) {
+ /* Use correct destination address if we have options. */
+ if (opt && opt->srr)
+ daddr = opt->faddr;
+
+ {
+ struct flowi fl = { .oif = sk->sk_bound_dev_if,
+ .nl_u = { .ip4_u = {
+ .daddr = daddr,
+ .saddr = inet->inet_saddr,
+ .tos = RT_CONN_FLAGS(sk) } },
+ .proto = sk->sk_protocol,
+ .flags = inet_sk_flowi_flags(sk),
+ .uli_u = { .ports = {
+ .sport = inet->inet_sport,
+ .dport = inet->inet_dport } } };
+
+ /* If this fails, retransmit mechanism of transport layer will
+ * keep trying until route appears or the connection times
+ * itself out.
+ */
+ security_sk_classify_flow(sk, &fl);
+ if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
+ goto no_route;
+ }
+ sk_setup_caps(sk, &rt->u.dst);
+ }
+ skb_dst_set(skb, dst_clone(&rt->u.dst));
+
+ /* Queue the packet to IP for output */
+ rc = ip_queue_xmit(skb);
+
+error:
+ /* Update stats */
+ if (rc >= 0) {
+ lsa->tx_packets++;
+ lsa->tx_bytes += len;
+ rc = len;
+ } else {
+ lsa->tx_errors++;
+ }
+
+ return rc;
+
+no_route:
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ return -EHOSTUNREACH;
+}
+
+static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
+ size_t copied = 0;
+ int err = -EOPNOTSUPP;
+ struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+ struct sk_buff *skb;
+
+ if (flags & MSG_OOB)
+ goto out;
+
+ if (addr_len)
+ *addr_len = sizeof(*sin);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto done;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ /* Copy the address. */
+ if (sin) {
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+done:
+ skb_free_datagram(sk, skb);
+out:
+ if (err) {
+ lsk->rx_errors++;
+ return err;
+ }
+
+ lsk->rx_packets++;
+ lsk->rx_bytes += copied;
+
+ return copied;
+}
+
+struct proto l2tp_ip_prot = {
+ .name = "L2TP/IP",
+ .owner = THIS_MODULE,
+ .init = l2tp_ip_open,
+ .close = l2tp_ip_close,
+ .bind = l2tp_ip_bind,
+ .connect = l2tp_ip_connect,
+ .disconnect = udp_disconnect,
+ .ioctl = udp_ioctl,
+ .destroy = l2tp_ip_destroy_sock,
+ .setsockopt = ip_setsockopt,
+ .getsockopt = ip_getsockopt,
+ .sendmsg = l2tp_ip_sendmsg,
+ .recvmsg = l2tp_ip_recvmsg,
+ .backlog_rcv = l2tp_ip_backlog_recv,
+ .hash = inet_hash,
+ .unhash = inet_unhash,
+ .obj_size = sizeof(struct l2tp_ip_sock),
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_ip_setsockopt,
+ .compat_getsockopt = compat_ip_getsockopt,
+#endif
+};
+
+static const struct proto_ops l2tp_ip_ops = {
+ .family = PF_INET,
+ .owner = THIS_MODULE,
+ .release = inet_release,
+ .bind = inet_bind,
+ .connect = inet_dgram_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = l2tp_ip_getname,
+ .poll = datagram_poll,
+ .ioctl = inet_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = inet_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = inet_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_sock_common_setsockopt,
+ .compat_getsockopt = compat_sock_common_getsockopt,
+#endif
+};
+
+static struct inet_protosw l2tp_ip_protosw = {
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_L2TP,
+ .prot = &l2tp_ip_prot,
+ .ops = &l2tp_ip_ops,
+ .no_check = 0,
+};
+
+static struct net_protocol l2tp_ip_protocol __read_mostly = {
+ .handler = l2tp_ip_recv,
+};
+
+static int __init l2tp_ip_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
+
+ err = proto_register(&l2tp_ip_prot, 1);
+ if (err != 0)
+ goto out;
+
+ err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
+ if (err)
+ goto out1;
+
+ inet_register_protosw(&l2tp_ip_protosw);
+ return 0;
+
+out1:
+ proto_unregister(&l2tp_ip_prot);
+out:
+ return err;
+}
+
+static void __exit l2tp_ip_exit(void)
+{
+ inet_unregister_protosw(&l2tp_ip_protosw);
+ inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
+ proto_unregister(&l2tp_ip_prot);
+}
+
+module_init(l2tp_ip_init);
+module_exit(l2tp_ip_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP over IP");
+MODULE_VERSION("1.0");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
new file mode 100644
index 000000000000..4c1e540732d7
--- /dev/null
+++ b/net/l2tp/l2tp_netlink.c
@@ -0,0 +1,840 @@
+/*
+ * L2TP netlink layer, for management
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * Partly based on the IrDA nelink implementation
+ * (see net/irda/irnetlink.c) which is:
+ * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org>
+ * which is in turn partly based on the wireless netlink code:
+ * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/sock.h>
+#include <net/genetlink.h>
+#include <net/udp.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/socket.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <net/net_namespace.h>
+
+#include <linux/l2tp.h>
+
+#include "l2tp_core.h"
+
+
+static struct genl_family l2tp_nl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = L2TP_GENL_NAME,
+ .version = L2TP_GENL_VERSION,
+ .hdrsize = 0,
+ .maxattr = L2TP_ATTR_MAX,
+};
+
+/* Accessed under genl lock */
+static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
+
+static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+{
+ u32 tunnel_id;
+ u32 session_id;
+ char *ifname;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session = NULL;
+ struct net *net = genl_info_net(info);
+
+ if (info->attrs[L2TP_ATTR_IFNAME]) {
+ ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
+ session = l2tp_session_find_by_ifname(net, ifname);
+ } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
+ (info->attrs[L2TP_ATTR_CONN_ID])) {
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+ session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel)
+ session = l2tp_session_find(net, tunnel, session_id);
+ }
+
+ return session;
+}
+
+static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int ret = -ENOBUFS;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ &l2tp_nl_family, 0, L2TP_CMD_NOOP);
+ if (IS_ERR(hdr)) {
+ ret = PTR_ERR(hdr);
+ goto err_out;
+ }
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_out:
+ nlmsg_free(msg);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
+{
+ u32 tunnel_id;
+ u32 peer_tunnel_id;
+ int proto_version;
+ int fd;
+ int ret = 0;
+ struct l2tp_tunnel_cfg cfg = { 0, };
+ struct l2tp_tunnel *tunnel;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);
+
+ if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);
+
+ if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);
+
+ fd = -1;
+ if (info->attrs[L2TP_ATTR_FD]) {
+ fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
+ } else {
+ if (info->attrs[L2TP_ATTR_IP_SADDR])
+ cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]);
+ if (info->attrs[L2TP_ATTR_IP_DADDR])
+ cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]);
+ if (info->attrs[L2TP_ATTR_UDP_SPORT])
+ cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
+ if (info->attrs[L2TP_ATTR_UDP_DPORT])
+ cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
+ if (info->attrs[L2TP_ATTR_UDP_CSUM])
+ cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
+ }
+
+ if (info->attrs[L2TP_ATTR_DEBUG])
+ cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel != NULL) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ switch (cfg.encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ case L2TP_ENCAPTYPE_IP:
+ ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
+ peer_tunnel_id, &cfg, &tunnel);
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ u32 tunnel_id;
+ int ret = 0;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ (void) l2tp_tunnel_delete(tunnel);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ u32 tunnel_id;
+ int ret = 0;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (info->attrs[L2TP_ATTR_DEBUG])
+ tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+ struct l2tp_tunnel *tunnel)
+{
+ void *hdr;
+ struct nlattr *nest;
+ struct sock *sk = NULL;
+ struct inet_sock *inet;
+
+ hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
+ L2TP_CMD_TUNNEL_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
+ NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
+ NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
+
+ nest = nla_nest_start(skb, L2TP_ATTR_STATS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
+ nla_nest_end(skb, nest);
+
+ sk = tunnel->sock;
+ if (!sk)
+ goto out;
+
+ inet = inet_sk(sk);
+
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
+ NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
+ NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
+ /* NOBREAK */
+ case L2TP_ENCAPTYPE_IP:
+ NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
+ NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
+ break;
+ }
+
+out:
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -1;
+}
+
+static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ struct sk_buff *msg;
+ u32 tunnel_id;
+ int ret = -ENOBUFS;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (tunnel == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq,
+ NLM_F_ACK, tunnel);
+ if (ret < 0)
+ goto err_out;
+
+ return genlmsg_unicast(net, msg, info->snd_pid);
+
+err_out:
+ nlmsg_free(msg);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int ti = cb->args[0];
+ struct l2tp_tunnel *tunnel;
+ struct net *net = sock_net(skb->sk);
+
+ for (;;) {
+ tunnel = l2tp_tunnel_find_nth(net, ti);
+ if (tunnel == NULL)
+ goto out;
+
+ if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ tunnel) <= 0)
+ goto out;
+
+ ti++;
+ }
+
+out:
+ cb->args[0] = ti;
+
+ return skb->len;
+}
+
+static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info)
+{
+ u32 tunnel_id = 0;
+ u32 session_id;
+ u32 peer_session_id;
+ int ret = 0;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session;
+ struct l2tp_session_cfg cfg = { 0, };
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+ if (!tunnel) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
+ session = l2tp_session_find(net, tunnel, session_id);
+ if (session) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
+
+ if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
+ if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (tunnel->version > 2) {
+ if (info->attrs[L2TP_ATTR_OFFSET])
+ cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
+
+ if (info->attrs[L2TP_ATTR_DATA_SEQ])
+ cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
+
+ cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
+ if (info->attrs[L2TP_ATTR_L2SPEC_TYPE])
+ cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
+
+ cfg.l2specific_len = 4;
+ if (info->attrs[L2TP_ATTR_L2SPEC_LEN])
+ cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]);
+
+ if (info->attrs[L2TP_ATTR_COOKIE]) {
+ u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
+ if (len > 8) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.cookie_len = len;
+ memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
+ }
+ if (info->attrs[L2TP_ATTR_PEER_COOKIE]) {
+ u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
+ if (len > 8) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.peer_cookie_len = len;
+ memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
+ }
+ if (info->attrs[L2TP_ATTR_IFNAME])
+ cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
+
+ if (info->attrs[L2TP_ATTR_VLAN_ID])
+ cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]);
+ }
+
+ if (info->attrs[L2TP_ATTR_DEBUG])
+ cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+
+ if (info->attrs[L2TP_ATTR_RECV_SEQ])
+ cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_SEND_SEQ])
+ cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_LNS_MODE])
+ cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
+
+ if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
+ cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
+
+ if (info->attrs[L2TP_ATTR_MTU])
+ cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
+
+ if (info->attrs[L2TP_ATTR_MRU])
+ cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
+
+ if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
+ (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ /* Check that pseudowire-specific params are present */
+ switch (cfg.pw_type) {
+ case L2TP_PWTYPE_NONE:
+ break;
+ case L2TP_PWTYPE_ETH_VLAN:
+ if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case L2TP_PWTYPE_ETH:
+ break;
+ case L2TP_PWTYPE_PPP:
+ case L2TP_PWTYPE_PPP_AC:
+ break;
+ case L2TP_PWTYPE_IP:
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ ret = -EPROTONOSUPPORT;
+ if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
+ ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
+ session_id, peer_session_id, &cfg);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info)
+{
+ int ret = 0;
+ struct l2tp_session *session;
+ u16 pw_type;
+
+ session = l2tp_nl_session_find(info);
+ if (session == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pw_type = session->pwtype;
+ if (pw_type < __L2TP_PWTYPE_MAX)
+ if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
+ ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info)
+{
+ int ret = 0;
+ struct l2tp_session *session;
+
+ session = l2tp_nl_session_find(info);
+ if (session == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (info->attrs[L2TP_ATTR_DEBUG])
+ session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+
+ if (info->attrs[L2TP_ATTR_DATA_SEQ])
+ session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_RECV_SEQ])
+ session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_SEND_SEQ])
+ session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_LNS_MODE])
+ session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
+
+ if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
+ session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
+
+ if (info->attrs[L2TP_ATTR_MTU])
+ session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
+
+ if (info->attrs[L2TP_ATTR_MRU])
+ session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+ struct l2tp_session *session)
+{
+ void *hdr;
+ struct nlattr *nest;
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ struct sock *sk = NULL;
+
+ sk = tunnel->sock;
+
+ hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
+ NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
+ NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
+ if (session->mru)
+ NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
+
+ if (session->ifname && session->ifname[0])
+ NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
+ if (session->cookie_len)
+ NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
+ if (session->peer_cookie_len)
+ NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
+ NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
+ NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
+ NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
+#ifdef CONFIG_XFRM
+ if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
+ NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
+#endif
+ if (session->reorder_timeout)
+ NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
+
+ nest = nla_nest_start(skb, L2TP_ATTR_STATS);
+ if (nest == NULL)
+ goto nla_put_failure;
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
+ NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
+ NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
+ nla_nest_end(skb, nest);
+
+ return genlmsg_end(skb, hdr);
+
+ nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -1;
+}
+
+static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_session *session;
+ struct sk_buff *msg;
+ int ret;
+
+ session = l2tp_nl_session_find(info);
+ if (session == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
+ 0, session);
+ if (ret < 0)
+ goto err_out;
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_out:
+ nlmsg_free(msg);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel = NULL;
+ int ti = cb->args[0];
+ int si = cb->args[1];
+
+ for (;;) {
+ if (tunnel == NULL) {
+ tunnel = l2tp_tunnel_find_nth(net, ti);
+ if (tunnel == NULL)
+ goto out;
+ }
+
+ session = l2tp_session_find_nth(tunnel, si);
+ if (session == NULL) {
+ ti++;
+ tunnel = NULL;
+ si = 0;
+ continue;
+ }
+
+ if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ session) <= 0)
+ break;
+
+ si++;
+ }
+
+out:
+ cb->args[0] = ti;
+ cb->args[1] = si;
+
+ return skb->len;
+}
+
+static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
+ [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, },
+ [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, },
+ [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, },
+ [L2TP_ATTR_OFFSET] = { .type = NLA_U16, },
+ [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, },
+ [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, },
+ [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, },
+ [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, },
+ [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, },
+ [L2TP_ATTR_DEBUG] = { .type = NLA_U32, },
+ [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, },
+ [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, },
+ [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, },
+ [L2TP_ATTR_FD] = { .type = NLA_U32, },
+ [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, },
+ [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, },
+ [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, },
+ [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, },
+ [L2TP_ATTR_MTU] = { .type = NLA_U16, },
+ [L2TP_ATTR_MRU] = { .type = NLA_U16, },
+ [L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
+ [L2TP_ATTR_IFNAME] = {
+ .type = NLA_NUL_STRING,
+ .len = IFNAMSIZ - 1,
+ },
+ [L2TP_ATTR_COOKIE] = {
+ .type = NLA_BINARY,
+ .len = 8,
+ },
+ [L2TP_ATTR_PEER_COOKIE] = {
+ .type = NLA_BINARY,
+ .len = 8,
+ },
+};
+
+static struct genl_ops l2tp_nl_ops[] = {
+ {
+ .cmd = L2TP_CMD_NOOP,
+ .doit = l2tp_nl_cmd_noop,
+ .policy = l2tp_nl_policy,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_CREATE,
+ .doit = l2tp_nl_cmd_tunnel_create,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_DELETE,
+ .doit = l2tp_nl_cmd_tunnel_delete,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_MODIFY,
+ .doit = l2tp_nl_cmd_tunnel_modify,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_GET,
+ .doit = l2tp_nl_cmd_tunnel_get,
+ .dumpit = l2tp_nl_cmd_tunnel_dump,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_CREATE,
+ .doit = l2tp_nl_cmd_session_create,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_DELETE,
+ .doit = l2tp_nl_cmd_session_delete,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_MODIFY,
+ .doit = l2tp_nl_cmd_session_modify,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_GET,
+ .doit = l2tp_nl_cmd_session_get,
+ .dumpit = l2tp_nl_cmd_session_dump,
+ .policy = l2tp_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (pw_type >= __L2TP_PWTYPE_MAX)
+ goto err;
+
+ genl_lock();
+ ret = -EBUSY;
+ if (l2tp_nl_cmd_ops[pw_type])
+ goto out;
+
+ l2tp_nl_cmd_ops[pw_type] = ops;
+
+out:
+ genl_unlock();
+err:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
+
+void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type)
+{
+ if (pw_type < __L2TP_PWTYPE_MAX) {
+ genl_lock();
+ l2tp_nl_cmd_ops[pw_type] = NULL;
+ genl_unlock();
+ }
+}
+EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
+
+static int l2tp_nl_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "L2TP netlink interface\n");
+ err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
+ ARRAY_SIZE(l2tp_nl_ops));
+
+ return err;
+}
+
+static void l2tp_nl_cleanup(void)
+{
+ genl_unregister_family(&l2tp_nl_family);
+}
+
+module_init(l2tp_nl_init);
+module_exit(l2tp_nl_cleanup);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP netlink");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
+ __stringify(NETLINK_GENERIC) "-type-" "l2tp");
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
new file mode 100644
index 000000000000..90d82b3f2889
--- /dev/null
+++ b/net/l2tp/l2tp_ppp.c
@@ -0,0 +1,1837 @@
+/*****************************************************************************
+ * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoL2TP --- PPP over L2TP (RFC 2661)
+ *
+ * Version: 2.0.0
+ *
+ * Authors: James Chapman (jchapman@katalix.com)
+ *
+ * Based on original work by Martijn van Oosterhout <kleptog@svana.org>
+ *
+ * License:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* This driver handles only L2TP data frames; control frames are handled by a
+ * userspace application.
+ *
+ * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
+ * attaches it to a bound UDP socket with local tunnel_id / session_id and
+ * peer tunnel_id / session_id set. Data can then be sent or received using
+ * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
+ * can be read or modified using ioctl() or [gs]etsockopt() calls.
+ *
+ * When a PPPoL2TP socket is connected with local and peer session_id values
+ * zero, the socket is treated as a special tunnel management socket.
+ *
+ * Here's example userspace code to create a socket for sending/receiving data
+ * over an L2TP session:-
+ *
+ * struct sockaddr_pppol2tp sax;
+ * int fd;
+ * int session_fd;
+ *
+ * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
+ *
+ * sax.sa_family = AF_PPPOX;
+ * sax.sa_protocol = PX_PROTO_OL2TP;
+ * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
+ * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
+ * sax.pppol2tp.addr.sin_port = addr->sin_port;
+ * sax.pppol2tp.addr.sin_family = AF_INET;
+ * sax.pppol2tp.s_tunnel = tunnel_id;
+ * sax.pppol2tp.s_session = session_id;
+ * sax.pppol2tp.d_tunnel = peer_tunnel_id;
+ * sax.pppol2tp.d_session = peer_session_id;
+ *
+ * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
+ *
+ * A pppd plugin that allows PPP traffic to be carried over L2TP using
+ * this driver is available from the OpenL2TP project at
+ * http://openl2tp.sourceforge.net.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/if_pppox.h>
+#include <linux/if_pppol2tp.h>
+#include <net/sock.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/file.h>
+#include <linux/hash.h>
+#include <linux/sort.h>
+#include <linux/proc_fs.h>
+#include <linux/l2tp.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/xfrm.h>
+
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+
+#include "l2tp_core.h"
+
+#define PPPOL2TP_DRV_VERSION "V2.0"
+
+/* Space for UDP, L2TP and PPP headers */
+#define PPPOL2TP_HEADER_OVERHEAD 40
+
+#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
+ do { \
+ if ((_mask) & (_type)) \
+ printk(_lvl "PPPOL2TP: " _fmt, ##args); \
+ } while (0)
+
+/* Number of bytes to build transmit L2TP headers.
+ * Unfortunately the size is different depending on whether sequence numbers
+ * are enabled.
+ */
+#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
+#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
+
+/* Private data of each session. This data lives at the end of struct
+ * l2tp_session, referenced via session->priv[].
+ */
+struct pppol2tp_session {
+ int owner; /* pid that opened the socket */
+
+ struct sock *sock; /* Pointer to the session
+ * PPPoX socket */
+ struct sock *tunnel_sock; /* Pointer to the tunnel UDP
+ * socket */
+ int flags; /* accessed by PPPIOCGFLAGS.
+ * Unused. */
+};
+
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
+
+static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
+static const struct proto_ops pppol2tp_ops;
+
+/* Helpers to obtain tunnel/session contexts from sockets.
+ */
+static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
+{
+ struct l2tp_session *session;
+
+ if (sk == NULL)
+ return NULL;
+
+ sock_hold(sk);
+ session = (struct l2tp_session *)(sk->sk_user_data);
+ if (session == NULL) {
+ sock_put(sk);
+ goto out;
+ }
+
+ BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+
+out:
+ return session;
+}
+
+/*****************************************************************************
+ * Receive data handling
+ *****************************************************************************/
+
+static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
+{
+ /* Skip PPP header, if present. In testing, Microsoft L2TP clients
+ * don't send the PPP header (PPP header compression enabled), but
+ * other clients can include the header. So we cope with both cases
+ * here. The PPP header is always FF03 when using L2TP.
+ *
+ * Note that skb->data[] isn't dereferenced from a u16 ptr here since
+ * the field may be unaligned.
+ */
+ if (!pskb_may_pull(skb, 2))
+ return 1;
+
+ if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
+ skb_pull(skb, 2);
+
+ return 0;
+}
+
+/* Receive message. This is the recvmsg for the PPPoL2TP socket.
+ */
+static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len,
+ int flags)
+{
+ int err;
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+
+ err = -EIO;
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+ msg->msg_namelen = 0;
+
+ err = 0;
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ goto end;
+
+ if (len > skb->len)
+ len = skb->len;
+ else if (len < skb->len)
+ msg->msg_flags |= MSG_TRUNC;
+
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
+ if (likely(err == 0))
+ err = len;
+
+ kfree_skb(skb);
+end:
+ return err;
+}
+
+static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct sock *sk = NULL;
+
+ /* If the socket is bound, send it in to PPP's input queue. Otherwise
+ * queue it on the session socket.
+ */
+ sk = ps->sock;
+ if (sk == NULL)
+ goto no_sock;
+
+ if (sk->sk_state & PPPOX_BOUND) {
+ struct pppox_sock *po;
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: recv %d byte data frame, passing to ppp\n",
+ session->name, data_len);
+
+ /* We need to forget all info related to the L2TP packet
+ * gathered in the skb as we are going to reuse the same
+ * skb for the inner packet.
+ * Namely we need to:
+ * - reset xfrm (IPSec) information as it applies to
+ * the outer L2TP packet and not to the inner one
+ * - release the dst to force a route lookup on the inner
+ * IP packet since skb->dst currently points to the dst
+ * of the UDP tunnel
+ * - reset netfilter information as it doesn't apply
+ * to the inner packet either
+ */
+ secpath_reset(skb);
+ skb_dst_drop(skb);
+ nf_reset(skb);
+
+ po = pppox_sk(sk);
+ ppp_input(&po->chan, skb);
+ } else {
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: socket not bound\n", session->name);
+
+ /* Not bound. Nothing we can do, so discard. */
+ session->stats.rx_errors++;
+ kfree_skb(skb);
+ }
+
+ return;
+
+no_sock:
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: no socket\n", session->name);
+ kfree_skb(skb);
+}
+
+static void pppol2tp_session_sock_hold(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ if (ps->sock)
+ sock_hold(ps->sock);
+}
+
+static void pppol2tp_session_sock_put(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ if (ps->sock)
+ sock_put(ps->sock);
+}
+
+/************************************************************************
+ * Transmit handling
+ ***********************************************************************/
+
+/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
+ * when a user application does a sendmsg() on the session socket. L2TP and
+ * PPP headers must be inserted into the user's data.
+ */
+static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+ size_t total_len)
+{
+ static const unsigned char ppph[2] = { 0xff, 0x03 };
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int error;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ int uhlen;
+
+ error = -ENOTCONN;
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto error;
+
+ /* Get session and tunnel contexts */
+ error = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto error;
+
+ ps = l2tp_session_priv(session);
+ tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
+ if (tunnel == NULL)
+ goto error_put_sess;
+
+ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+
+ /* Allocate a socket buffer */
+ error = -ENOMEM;
+ skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
+ uhlen + session->hdr_len +
+ sizeof(ppph) + total_len,
+ 0, GFP_KERNEL);
+ if (!skb)
+ goto error_put_sess_tun;
+
+ /* Reserve space for headers. */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_reset_network_header(skb);
+ skb_reserve(skb, sizeof(struct iphdr));
+ skb_reset_transport_header(skb);
+ skb_reserve(skb, uhlen);
+
+ /* Add PPP header */
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+ skb_put(skb, 2);
+
+ /* Copy user data into skb */
+ error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
+ if (error < 0) {
+ kfree_skb(skb);
+ goto error_put_sess_tun;
+ }
+ skb_put(skb, total_len);
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ sock_put(ps->tunnel_sock);
+
+ return error;
+
+error_put_sess_tun:
+ sock_put(ps->tunnel_sock);
+error_put_sess:
+ sock_put(sk);
+error:
+ return error;
+}
+
+/* Transmit function called by generic PPP driver. Sends PPP frame
+ * over PPPoL2TP socket.
+ *
+ * This is almost the same as pppol2tp_sendmsg(), but rather than
+ * being called with a msghdr from userspace, it is called with a skb
+ * from the kernel.
+ *
+ * The supplied skb from ppp doesn't have enough headroom for the
+ * insertion of L2TP, UDP and IP headers so we need to allocate more
+ * headroom in the skb. This will create a cloned skb. But we must be
+ * careful in the error case because the caller will expect to free
+ * the skb it supplied, not our cloned skb. So we take care to always
+ * leave the original skb unfreed if we return an error.
+ */
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ static const u8 ppph[2] = { 0xff, 0x03 };
+ struct sock *sk = (struct sock *) chan->private;
+ struct sock *sk_tun;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ int old_headroom;
+ int new_headroom;
+
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto abort;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto abort;
+
+ ps = l2tp_session_priv(session);
+ sk_tun = ps->tunnel_sock;
+ if (sk_tun == NULL)
+ goto abort_put_sess;
+ tunnel = l2tp_sock_to_tunnel(sk_tun);
+ if (tunnel == NULL)
+ goto abort_put_sess;
+
+ old_headroom = skb_headroom(skb);
+ if (skb_cow_head(skb, sizeof(ppph)))
+ goto abort_put_sess_tun;
+
+ new_headroom = skb_headroom(skb);
+ skb->truesize += new_headroom - old_headroom;
+
+ /* Setup PPP header */
+ __skb_push(skb, sizeof(ppph));
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ sock_put(sk_tun);
+ sock_put(sk);
+ return 1;
+
+abort_put_sess_tun:
+ sock_put(sk_tun);
+abort_put_sess:
+ sock_put(sk);
+abort:
+ /* Free the original skb */
+ kfree_skb(skb);
+ return 1;
+}
+
+/*****************************************************************************
+ * Session (and tunnel control) socket create/destroy.
+ *****************************************************************************/
+
+/* Called by l2tp_core when a session socket is being closed.
+ */
+static void pppol2tp_session_close(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct sock *sk = ps->sock;
+ struct sk_buff *skb;
+
+ BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+
+ if (session->session_id == 0)
+ goto out;
+
+ if (sk != NULL) {
+ lock_sock(sk);
+
+ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ pppox_unbind_sock(sk);
+ sk->sk_state = PPPOX_DEAD;
+ sk->sk_state_change(sk);
+ }
+
+ /* Purge any queued data */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ while ((skb = skb_dequeue(&session->reorder_q))) {
+ kfree_skb(skb);
+ sock_put(sk);
+ }
+
+ release_sock(sk);
+ }
+
+out:
+ return;
+}
+
+/* Really kill the session socket. (Called from sock_put() if
+ * refcnt == 0.)
+ */
+static void pppol2tp_session_destruct(struct sock *sk)
+{
+ struct l2tp_session *session;
+
+ if (sk->sk_user_data != NULL) {
+ session = sk->sk_user_data;
+ if (session == NULL)
+ goto out;
+
+ sk->sk_user_data = NULL;
+ BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+ l2tp_session_dec_refcount(session);
+ }
+
+out:
+ return;
+}
+
+/* Called when the PPPoX socket (session) is closed.
+ */
+static int pppol2tp_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ int error;
+
+ if (!sk)
+ return 0;
+
+ error = -EBADF;
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto error;
+
+ pppox_unbind_sock(sk);
+
+ /* Signal the death of the socket. */
+ sk->sk_state = PPPOX_DEAD;
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ session = pppol2tp_sock_to_session(sk);
+
+ /* Purge any queued data */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ if (session != NULL) {
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&session->reorder_q))) {
+ kfree_skb(skb);
+ sock_put(sk);
+ }
+ sock_put(sk);
+ }
+
+ release_sock(sk);
+
+ /* This will delete the session context via
+ * pppol2tp_session_destruct() if the socket's refcnt drops to
+ * zero.
+ */
+ sock_put(sk);
+
+ return 0;
+
+error:
+ release_sock(sk);
+ return error;
+}
+
+static struct proto pppol2tp_sk_proto = {
+ .name = "PPPOL2TP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ rc = l2tp_udp_encap_recv(sk, skb);
+ if (rc)
+ kfree_skb(skb);
+
+ return NET_RX_SUCCESS;
+}
+
+/* socket() handler. Initialize a new struct sock.
+ */
+static int pppol2tp_create(struct net *net, struct socket *sock)
+{
+ int error = -ENOMEM;
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
+ if (!sk)
+ goto out;
+
+ sock_init_data(sock, sk);
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppol2tp_ops;
+
+ sk->sk_backlog_rcv = pppol2tp_backlog_recv;
+ sk->sk_protocol = PX_PROTO_OL2TP;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_destruct = pppol2tp_session_destruct;
+
+ error = 0;
+
+out:
+ return error;
+}
+
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+static void pppol2tp_show(struct seq_file *m, void *arg)
+{
+ struct l2tp_session *session = arg;
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ if (ps) {
+ struct pppox_sock *po = pppox_sk(ps->sock);
+ if (po)
+ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+ }
+}
+#endif
+
+/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
+ */
+static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
+ struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct l2tp_session *session = NULL;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ struct dst_entry *dst;
+ struct l2tp_session_cfg cfg = { 0, };
+ int error = 0;
+ u32 tunnel_id, peer_tunnel_id;
+ u32 session_id, peer_session_id;
+ int ver = 2;
+ int fd;
+
+ lock_sock(sk);
+
+ error = -EINVAL;
+ if (sp->sa_protocol != PX_PROTO_OL2TP)
+ goto end;
+
+ /* Check for already bound sockets */
+ error = -EBUSY;
+ if (sk->sk_state & PPPOX_CONNECTED)
+ goto end;
+
+ /* We don't supporting rebinding anyway */
+ error = -EALREADY;
+ if (sk->sk_user_data)
+ goto end; /* socket is already attached */
+
+ /* Get params from socket address. Handle L2TPv2 and L2TPv3 */
+ if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
+ fd = sp->pppol2tp.fd;
+ tunnel_id = sp->pppol2tp.s_tunnel;
+ peer_tunnel_id = sp->pppol2tp.d_tunnel;
+ session_id = sp->pppol2tp.s_session;
+ peer_session_id = sp->pppol2tp.d_session;
+ } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
+ ver = 3;
+ fd = sp3->pppol2tp.fd;
+ tunnel_id = sp3->pppol2tp.s_tunnel;
+ peer_tunnel_id = sp3->pppol2tp.d_tunnel;
+ session_id = sp3->pppol2tp.s_session;
+ peer_session_id = sp3->pppol2tp.d_session;
+ } else {
+ error = -EINVAL;
+ goto end; /* bad socket address */
+ }
+
+ /* Don't bind if tunnel_id is 0 */
+ error = -EINVAL;
+ if (tunnel_id == 0)
+ goto end;
+
+ tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id);
+
+ /* Special case: create tunnel context if session_id and
+ * peer_session_id is 0. Otherwise look up tunnel using supplied
+ * tunnel id.
+ */
+ if ((session_id == 0) && (peer_session_id == 0)) {
+ if (tunnel == NULL) {
+ struct l2tp_tunnel_cfg tcfg = {
+ .encap = L2TP_ENCAPTYPE_UDP,
+ .debug = 0,
+ };
+ error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
+ if (error < 0)
+ goto end;
+ }
+ } else {
+ /* Error if we can't find the tunnel */
+ error = -ENOENT;
+ if (tunnel == NULL)
+ goto end;
+
+ /* Error if socket is not prepped */
+ if (tunnel->sock == NULL)
+ goto end;
+ }
+
+ if (tunnel->recv_payload_hook == NULL)
+ tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
+
+ if (tunnel->peer_tunnel_id == 0) {
+ if (ver == 2)
+ tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
+ else
+ tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
+ }
+
+ /* Create session if it doesn't already exist. We handle the
+ * case where a session was previously created by the netlink
+ * interface by checking that the session doesn't already have
+ * a socket and its tunnel socket are what we expect. If any
+ * of those checks fail, return EEXIST to the caller.
+ */
+ session = l2tp_session_find(sock_net(sk), tunnel, session_id);
+ if (session == NULL) {
+ /* Default MTU must allow space for UDP/L2TP/PPP
+ * headers.
+ */
+ cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+
+ /* Allocate and initialize a new session context. */
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, &cfg);
+ if (session == NULL) {
+ error = -ENOMEM;
+ goto end;
+ }
+ } else {
+ ps = l2tp_session_priv(session);
+ error = -EEXIST;
+ if (ps->sock != NULL)
+ goto end;
+
+ /* consistency checks */
+ if (ps->tunnel_sock != tunnel->sock)
+ goto end;
+ }
+
+ /* Associate session with its PPPoL2TP socket */
+ ps = l2tp_session_priv(session);
+ ps->owner = current->pid;
+ ps->sock = sk;
+ ps->tunnel_sock = tunnel->sock;
+
+ session->recv_skb = pppol2tp_recv;
+ session->session_close = pppol2tp_session_close;
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+ session->show = pppol2tp_show;
+#endif
+
+ /* We need to know each time a skb is dropped from the reorder
+ * queue.
+ */
+ session->ref = pppol2tp_session_sock_hold;
+ session->deref = pppol2tp_session_sock_put;
+
+ /* If PMTU discovery was enabled, use the MTU that was discovered */
+ dst = sk_dst_get(sk);
+ if (dst != NULL) {
+ u32 pmtu = dst_mtu(__sk_dst_get(sk));
+ if (pmtu != 0)
+ session->mtu = session->mru = pmtu -
+ PPPOL2TP_HEADER_OVERHEAD;
+ dst_release(dst);
+ }
+
+ /* Special case: if source & dest session_id == 0x0000, this
+ * socket is being created to manage the tunnel. Just set up
+ * the internal context for use by ioctl() and sockopt()
+ * handlers.
+ */
+ if ((session->session_id == 0) &&
+ (session->peer_session_id == 0)) {
+ error = 0;
+ goto out_no_ppp;
+ }
+
+ /* The only header we need to worry about is the L2TP
+ * header. This size is different depending on whether
+ * sequence numbers are enabled for the data channel.
+ */
+ po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+
+ po->chan.private = sk;
+ po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.mtu = session->mtu;
+
+ error = ppp_register_net_channel(sock_net(sk), &po->chan);
+ if (error)
+ goto end;
+
+out_no_ppp:
+ /* This is how we get the session context from the socket. */
+ sk->sk_user_data = session;
+ sk->sk_state = PPPOX_CONNECTED;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: created\n", session->name);
+
+end:
+ release_sock(sk);
+
+ return error;
+}
+
+#ifdef CONFIG_L2TP_V3
+
+/* Called when creating sessions via the netlink interface.
+ */
+static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+{
+ int error;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session;
+ struct pppol2tp_session *ps;
+
+ tunnel = l2tp_tunnel_find(net, tunnel_id);
+
+ /* Error if we can't find the tunnel */
+ error = -ENOENT;
+ if (tunnel == NULL)
+ goto out;
+
+ /* Error if tunnel socket is not prepped */
+ if (tunnel->sock == NULL)
+ goto out;
+
+ /* Check that this session doesn't already exist */
+ error = -EEXIST;
+ session = l2tp_session_find(net, tunnel, session_id);
+ if (session != NULL)
+ goto out;
+
+ /* Default MTU values. */
+ if (cfg->mtu == 0)
+ cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ if (cfg->mru == 0)
+ cfg->mru = cfg->mtu;
+
+ /* Allocate and initialize a new session context. */
+ error = -ENOMEM;
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, cfg);
+ if (session == NULL)
+ goto out;
+
+ ps = l2tp_session_priv(session);
+ ps->tunnel_sock = tunnel->sock;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: created\n", session->name);
+
+ error = 0;
+
+out:
+ return error;
+}
+
+/* Called when deleting sessions via the netlink interface.
+ */
+static int pppol2tp_session_delete(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ if (ps->sock == NULL)
+ l2tp_session_dec_refcount(session);
+
+ return 0;
+}
+
+#endif /* CONFIG_L2TP_V3 */
+
+/* getname() support.
+ */
+static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer)
+{
+ int len = 0;
+ int error = 0;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet;
+ struct pppol2tp_session *pls;
+
+ error = -ENOTCONN;
+ if (sk == NULL)
+ goto end;
+ if (sk->sk_state != PPPOX_CONNECTED)
+ goto end;
+
+ error = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ pls = l2tp_session_priv(session);
+ tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock);
+ if (tunnel == NULL) {
+ error = -EBADF;
+ goto end_put_sess;
+ }
+
+ inet = inet_sk(sk);
+ if (tunnel->version == 2) {
+ struct sockaddr_pppol2tp sp;
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin_family = AF_INET;
+ sp.pppol2tp.addr.sin_port = inet->inet_dport;
+ sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
+ memcpy(uaddr, &sp, len);
+ } else if (tunnel->version == 3) {
+ struct sockaddr_pppol2tpv3 sp;
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin_family = AF_INET;
+ sp.pppol2tp.addr.sin_port = inet->inet_dport;
+ sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
+ memcpy(uaddr, &sp, len);
+ }
+
+ *usockaddr_len = len;
+
+ sock_put(pls->tunnel_sock);
+end_put_sess:
+ sock_put(sk);
+ error = 0;
+
+end:
+ return error;
+}
+
+/****************************************************************************
+ * ioctl() handlers.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. However, in order to control kernel tunnel features, we allow
+ * userspace to create a special "tunnel" PPPoX socket which is used for
+ * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
+ * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
+ * calls.
+ ****************************************************************************/
+
+static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
+ struct l2tp_stats *stats)
+{
+ dest->tx_packets = stats->tx_packets;
+ dest->tx_bytes = stats->tx_bytes;
+ dest->tx_errors = stats->tx_errors;
+ dest->rx_packets = stats->rx_packets;
+ dest->rx_bytes = stats->rx_bytes;
+ dest->rx_seq_discards = stats->rx_seq_discards;
+ dest->rx_oos_packets = stats->rx_oos_packets;
+ dest->rx_errors = stats->rx_errors;
+}
+
+/* Session ioctl helper.
+ */
+static int pppol2tp_session_ioctl(struct l2tp_session *session,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ifreq ifr;
+ int err = 0;
+ struct sock *sk;
+ int val = (int) arg;
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ struct pppol2tp_ioc_stats stats;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
+ "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
+ session->name, cmd, arg);
+
+ sk = ps->sock;
+ sock_hold(sk);
+
+ switch (cmd) {
+ case SIOCGIFMTU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
+ break;
+ ifr.ifr_mtu = session->mtu;
+ if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get mtu=%d\n", session->name, session->mtu);
+ err = 0;
+ break;
+
+ case SIOCSIFMTU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
+ break;
+
+ session->mtu = ifr.ifr_mtu;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set mtu=%d\n", session->name, session->mtu);
+ err = 0;
+ break;
+
+ case PPPIOCGMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (put_user(session->mru, (int __user *) arg))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get mru=%d\n", session->name, session->mru);
+ err = 0;
+ break;
+
+ case PPPIOCSMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (get_user(val, (int __user *) arg))
+ break;
+
+ session->mru = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set mru=%d\n", session->name, session->mru);
+ err = 0;
+ break;
+
+ case PPPIOCGFLAGS:
+ err = -EFAULT;
+ if (put_user(ps->flags, (int __user *) arg))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get flags=%d\n", session->name, ps->flags);
+ err = 0;
+ break;
+
+ case PPPIOCSFLAGS:
+ err = -EFAULT;
+ if (get_user(val, (int __user *) arg))
+ break;
+ ps->flags = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set flags=%d\n", session->name, ps->flags);
+ err = 0;
+ break;
+
+ case PPPIOCGL2TPSTATS:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ memset(&stats, 0, sizeof(stats));
+ stats.tunnel_id = tunnel->tunnel_id;
+ stats.session_id = session->session_id;
+ pppol2tp_copy_stats(&stats, &session->stats);
+ if (copy_to_user((void __user *) arg, &stats,
+ sizeof(stats)))
+ break;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get L2TP stats\n", session->name);
+ err = 0;
+ break;
+
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ sock_put(sk);
+
+ return err;
+}
+
+/* Tunnel ioctl helper.
+ *
+ * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
+ * specifies a session_id, the session ioctl handler is called. This allows an
+ * application to retrieve session stats via a tunnel socket.
+ */
+static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct sock *sk;
+ struct pppol2tp_ioc_stats stats;
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
+ "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
+ tunnel->name, cmd, arg);
+
+ sk = tunnel->sock;
+ sock_hold(sk);
+
+ switch (cmd) {
+ case PPPIOCGL2TPSTATS:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ if (copy_from_user(&stats, (void __user *) arg,
+ sizeof(stats))) {
+ err = -EFAULT;
+ break;
+ }
+ if (stats.session_id != 0) {
+ /* resend to session ioctl handler */
+ struct l2tp_session *session =
+ l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
+ if (session != NULL)
+ err = pppol2tp_session_ioctl(session, cmd, arg);
+ else
+ err = -EBADR;
+ break;
+ }
+#ifdef CONFIG_XFRM
+ stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
+#endif
+ pppol2tp_copy_stats(&stats, &tunnel->stats);
+ if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) {
+ err = -EFAULT;
+ break;
+ }
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get L2TP stats\n", tunnel->name);
+ err = 0;
+ break;
+
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ sock_put(sk);
+
+ return err;
+}
+
+/* Main ioctl() handler.
+ * Dispatch to tunnel or session helpers depending on the socket.
+ */
+static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ int err;
+
+ if (!sk)
+ return 0;
+
+ err = -EBADF;
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto end;
+
+ err = -ENOTCONN;
+ if ((sk->sk_user_data == NULL) ||
+ (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session's session_id is zero, treat ioctl as a
+ * tunnel ioctl
+ */
+ ps = l2tp_session_priv(session);
+ if ((session->session_id == 0) &&
+ (session->peer_session_id == 0)) {
+ err = -EBADF;
+ tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
+ if (tunnel == NULL)
+ goto end_put_sess;
+
+ err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
+ sock_put(ps->tunnel_sock);
+ goto end_put_sess;
+ }
+
+ err = pppol2tp_session_ioctl(session, cmd, arg);
+
+end_put_sess:
+ sock_put(sk);
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * setsockopt() / getsockopt() support.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. In order to control kernel tunnel features, we allow userspace to
+ * create a special "tunnel" PPPoX socket which is used for control only.
+ * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
+ * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
+ *****************************************************************************/
+
+/* Tunnel setsockopt() helper.
+ */
+static int pppol2tp_tunnel_setsockopt(struct sock *sk,
+ struct l2tp_tunnel *tunnel,
+ int optname, int val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ tunnel->debug = val;
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set debug=%x\n", tunnel->name, tunnel->debug);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session setsockopt helper.
+ */
+static int pppol2tp_session_setsockopt(struct sock *sk,
+ struct l2tp_session *session,
+ int optname, int val)
+{
+ int err = 0;
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->recv_seq = val ? -1 : 0;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set recv_seq=%d\n", session->name, session->recv_seq);
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->send_seq = val ? -1 : 0;
+ {
+ struct sock *ssk = ps->sock;
+ struct pppox_sock *po = pppox_sk(ssk);
+ po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
+ PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+ }
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set send_seq=%d\n", session->name, session->send_seq);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->lns_mode = val ? -1 : 0;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set lns_mode=%d\n", session->name, session->lns_mode);
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ session->debug = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set debug=%x\n", session->name, session->debug);
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ session->reorder_timeout = msecs_to_jiffies(val);
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Main setsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session setsockopt
+ * handler, according to whether the PPPoL2TP socket is a for a regular
+ * session or the special tunnel type.
+ */
+static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ int val;
+ int err;
+
+ if (level != SOL_PPPOL2TP)
+ return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ err = -ENOTCONN;
+ if (sk->sk_user_data == NULL)
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel
+ */
+ ps = l2tp_session_priv(session);
+ if ((session->session_id == 0) &&
+ (session->peer_session_id == 0)) {
+ err = -EBADF;
+ tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
+ if (tunnel == NULL)
+ goto end_put_sess;
+
+ err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
+ sock_put(ps->tunnel_sock);
+ } else
+ err = pppol2tp_session_setsockopt(sk, session, optname, val);
+
+ err = 0;
+
+end_put_sess:
+ sock_put(sk);
+end:
+ return err;
+}
+
+/* Tunnel getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_tunnel_getsockopt(struct sock *sk,
+ struct l2tp_tunnel *tunnel,
+ int optname, int *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ *val = tunnel->debug;
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get debug=%x\n", tunnel->name, tunnel->debug);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_session_getsockopt(struct sock *sk,
+ struct l2tp_session *session,
+ int optname, int *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ *val = session->recv_seq;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get recv_seq=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ *val = session->send_seq;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get send_seq=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ *val = session->lns_mode;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get lns_mode=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ *val = session->debug;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get debug=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ *val = (int) jiffies_to_msecs(session->reorder_timeout);
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get reorder_timeout=%d\n", session->name, *val);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ }
+
+ return err;
+}
+
+/* Main getsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session getsockopt
+ * handler, according to whether the PPPoX socket is a for a regular session
+ * or the special tunnel type.
+ */
+static int pppol2tp_getsockopt(struct socket *sock, int level,
+ int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int val, len;
+ int err;
+ struct pppol2tp_session *ps;
+
+ if (level != SOL_PPPOL2TP)
+ return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+
+ if (get_user(len, (int __user *) optlen))
+ return -EFAULT;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ if (len < 0)
+ return -EINVAL;
+
+ err = -ENOTCONN;
+ if (sk->sk_user_data == NULL)
+ goto end;
+
+ /* Get the session context */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel */
+ ps = l2tp_session_priv(session);
+ if ((session->session_id == 0) &&
+ (session->peer_session_id == 0)) {
+ err = -EBADF;
+ tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
+ if (tunnel == NULL)
+ goto end_put_sess;
+
+ err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
+ sock_put(ps->tunnel_sock);
+ } else
+ err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+
+ err = -EFAULT;
+ if (put_user(len, (int __user *) optlen))
+ goto end_put_sess;
+
+ if (copy_to_user((void __user *) optval, &val, len))
+ goto end_put_sess;
+
+ err = 0;
+
+end_put_sess:
+ sock_put(sk);
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * /proc filesystem for debug
+ * Since the original pppol2tp driver provided /proc/net/pppol2tp for
+ * L2TPv2, we dump only L2TPv2 tunnels and sessions here.
+ *****************************************************************************/
+
+static unsigned int pppol2tp_net_id;
+
+#ifdef CONFIG_PROC_FS
+
+struct pppol2tp_seq_data {
+ struct seq_net_private p;
+ int tunnel_idx; /* current tunnel */
+ int session_idx; /* index of session within current tunnel */
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session; /* NULL means get next tunnel */
+};
+
+static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
+{
+ for (;;) {
+ pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx);
+ pd->tunnel_idx++;
+
+ if (pd->tunnel == NULL)
+ break;
+
+ /* Ignore L2TPv3 tunnels */
+ if (pd->tunnel->version < 3)
+ break;
+ }
+}
+
+static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
+{
+ pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session_idx++;
+
+ if (pd->session == NULL) {
+ pd->session_idx = 0;
+ pppol2tp_next_tunnel(net, pd);
+ }
+}
+
+static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
+{
+ struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
+ loff_t pos = *offs;
+ struct net *net;
+
+ if (!pos)
+ goto out;
+
+ BUG_ON(m->private == NULL);
+ pd = m->private;
+ net = seq_file_net(m);
+
+ if (pd->tunnel == NULL)
+ pppol2tp_next_tunnel(net, pd);
+ else
+ pppol2tp_next_session(net, pd);
+
+ /* NULL tunnel and session indicates end of list */
+ if ((pd->tunnel == NULL) && (pd->session == NULL))
+ pd = NULL;
+
+out:
+ return pd;
+}
+
+static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void pppol2tp_seq_stop(struct seq_file *p, void *v)
+{
+ /* nothing to do */
+}
+
+static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
+{
+ struct l2tp_tunnel *tunnel = v;
+
+ seq_printf(m, "\nTUNNEL '%s', %c %d\n",
+ tunnel->name,
+ (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
+ atomic_read(&tunnel->ref_count) - 1);
+ seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
+ tunnel->debug,
+ (unsigned long long)tunnel->stats.tx_packets,
+ (unsigned long long)tunnel->stats.tx_bytes,
+ (unsigned long long)tunnel->stats.tx_errors,
+ (unsigned long long)tunnel->stats.rx_packets,
+ (unsigned long long)tunnel->stats.rx_bytes,
+ (unsigned long long)tunnel->stats.rx_errors);
+}
+
+static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
+{
+ struct l2tp_session *session = v;
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct pppox_sock *po = pppox_sk(ps->sock);
+ u32 ip = 0;
+ u16 port = 0;
+
+ if (tunnel->sock) {
+ struct inet_sock *inet = inet_sk(tunnel->sock);
+ ip = ntohl(inet->inet_saddr);
+ port = ntohs(inet->inet_sport);
+ }
+
+ seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
+ "%04X/%04X %d %c\n",
+ session->name, ip, port,
+ tunnel->tunnel_id,
+ session->session_id,
+ tunnel->peer_tunnel_id,
+ session->peer_session_id,
+ ps->sock->sk_state,
+ (session == ps->sock->sk_user_data) ?
+ 'Y' : 'N');
+ seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
+ session->mtu, session->mru,
+ session->recv_seq ? 'R' : '-',
+ session->send_seq ? 'S' : '-',
+ session->lns_mode ? "LNS" : "LAC",
+ session->debug,
+ jiffies_to_msecs(session->reorder_timeout));
+ seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
+ session->nr, session->ns,
+ (unsigned long long)session->stats.tx_packets,
+ (unsigned long long)session->stats.tx_bytes,
+ (unsigned long long)session->stats.tx_errors,
+ (unsigned long long)session->stats.rx_packets,
+ (unsigned long long)session->stats.rx_bytes,
+ (unsigned long long)session->stats.rx_errors);
+
+ if (po)
+ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+}
+
+static int pppol2tp_seq_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_seq_data *pd = v;
+
+ /* display header on line 1 */
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
+ seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
+ seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ seq_puts(m, " SESSION name, addr/port src-tid/sid "
+ "dest-tid/sid state user-data-ok\n");
+ seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
+ seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ goto out;
+ }
+
+ /* Show the tunnel or session context.
+ */
+ if (pd->session == NULL)
+ pppol2tp_seq_tunnel_show(m, pd->tunnel);
+ else
+ pppol2tp_seq_session_show(m, pd->session);
+
+out:
+ return 0;
+}
+
+static const struct seq_operations pppol2tp_seq_ops = {
+ .start = pppol2tp_seq_start,
+ .next = pppol2tp_seq_next,
+ .stop = pppol2tp_seq_stop,
+ .show = pppol2tp_seq_show,
+};
+
+/* Called when our /proc file is opened. We allocate data for use when
+ * iterating our tunnel / session contexts and store it in the private
+ * data of the seq_file.
+ */
+static int pppol2tp_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &pppol2tp_seq_ops,
+ sizeof(struct pppol2tp_seq_data));
+}
+
+static const struct file_operations pppol2tp_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pppol2tp_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+#endif /* CONFIG_PROC_FS */
+
+/*****************************************************************************
+ * Network namespace
+ *****************************************************************************/
+
+static __net_init int pppol2tp_init_net(struct net *net)
+{
+ struct proc_dir_entry *pde;
+ int err = 0;
+
+ pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
+ if (!pde) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static __net_exit void pppol2tp_exit_net(struct net *net)
+{
+ proc_net_remove(net, "pppol2tp");
+}
+
+static struct pernet_operations pppol2tp_net_ops = {
+ .init = pppol2tp_init_net,
+ .exit = pppol2tp_exit_net,
+ .id = &pppol2tp_net_id,
+};
+
+/*****************************************************************************
+ * Init and cleanup
+ *****************************************************************************/
+
+static const struct proto_ops pppol2tp_ops = {
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppol2tp_release,
+ .bind = sock_no_bind,
+ .connect = pppol2tp_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pppol2tp_getname,
+ .poll = datagram_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = pppol2tp_setsockopt,
+ .getsockopt = pppol2tp_getsockopt,
+ .sendmsg = pppol2tp_sendmsg,
+ .recvmsg = pppol2tp_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
+};
+
+static struct pppox_proto pppol2tp_proto = {
+ .create = pppol2tp_create,
+ .ioctl = pppol2tp_ioctl
+};
+
+#ifdef CONFIG_L2TP_V3
+
+static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
+ .session_create = pppol2tp_session_create,
+ .session_delete = pppol2tp_session_delete,
+};
+
+#endif /* CONFIG_L2TP_V3 */
+
+static int __init pppol2tp_init(void)
+{
+ int err;
+
+ err = register_pernet_device(&pppol2tp_net_ops);
+ if (err)
+ goto out;
+
+ err = proto_register(&pppol2tp_sk_proto, 0);
+ if (err)
+ goto out_unregister_pppol2tp_pernet;
+
+ err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
+ if (err)
+ goto out_unregister_pppol2tp_proto;
+
+#ifdef CONFIG_L2TP_V3
+ err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops);
+ if (err)
+ goto out_unregister_pppox;
+#endif
+
+ printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
+ PPPOL2TP_DRV_VERSION);
+
+out:
+ return err;
+
+#ifdef CONFIG_L2TP_V3
+out_unregister_pppox:
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+#endif
+out_unregister_pppol2tp_proto:
+ proto_unregister(&pppol2tp_sk_proto);
+out_unregister_pppol2tp_pernet:
+ unregister_pernet_device(&pppol2tp_net_ops);
+ goto out;
+}
+
+static void __exit pppol2tp_exit(void)
+{
+#ifdef CONFIG_L2TP_V3
+ l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP);
+#endif
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+ proto_unregister(&pppol2tp_sk_proto);
+ unregister_pernet_device(&pppol2tp_net_ops);
+}
+
+module_init(pppol2tp_init);
+module_exit(pppol2tp_exit);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("PPP over L2TP over UDP");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 2db6a9f75913..023ba820236f 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -536,7 +536,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
int rc = 0;
while (1) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE))
break;
rc = -ERESTARTSYS;
@@ -547,7 +547,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
break;
rc = 0;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return rc;
}
@@ -556,13 +556,13 @@ static int llc_ui_wait_for_conn(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
while (1) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT))
break;
if (signal_pending(current) || !timeout)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return timeout;
}
@@ -573,7 +573,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
int rc;
while (1) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
rc = 0;
if (sk_wait_event(sk, &timeout,
(sk->sk_shutdown & RCV_SHUTDOWN) ||
@@ -588,7 +588,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
if (!timeout)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return rc;
}
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 78167e81dfeb..2bb0ddff8c0f 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -144,12 +144,6 @@ static struct packet_type llc_tr_packet_type __read_mostly = {
static int __init llc_init(void)
{
- struct net_device *dev;
-
- dev = first_net_device(&init_net);
- if (dev != NULL)
- dev = next_net_device(dev);
-
dev_add_pack(&llc_packet_type);
dev_add_pack(&llc_tr_packet_type);
return 0;
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index a432f0ec051c..94e7fca75b85 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -31,7 +31,7 @@ static int llc_mac_header_len(unsigned short devtype)
case ARPHRD_ETHER:
case ARPHRD_LOOPBACK:
return sizeof(struct ethhdr);
-#ifdef CONFIG_TR
+#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
case ARPHRD_IEEE802_TR:
return sizeof(struct trh_hdr);
#endif
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a952b7f8c648..8a91f6c0bb18 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211"
if MAC80211 != n
+config MAC80211_HAS_RC
+ def_bool n
+
config MAC80211_RC_PID
bool "PID controller based rate control algorithm" if EMBEDDED
+ select MAC80211_HAS_RC
---help---
This option enables a TX rate control algorithm for
mac80211 that uses a PID controller to select the TX
@@ -24,12 +28,14 @@ config MAC80211_RC_PID
config MAC80211_RC_MINSTREL
bool "Minstrel" if EMBEDDED
+ select MAC80211_HAS_RC
default y
---help---
This option enables the 'minstrel' TX rate control algorithm
choice
prompt "Default rate control algorithm"
+ depends on MAC80211_HAS_RC
default MAC80211_RC_DEFAULT_MINSTREL
---help---
This option selects the default rate control algorithm
@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT
endif
+comment "Some wireless drivers require a rate control algorithm"
+ depends on MAC80211_HAS_RC=n
+
config MAC80211_MESH
bool "Enable mac80211 mesh networking (pre-802.11s) support"
depends on MAC80211 && EXPERIMENTAL
@@ -212,8 +221,8 @@ config MAC80211_DRIVER_API_TRACER
depends on EVENT_TRACING
help
Say Y here to make mac80211 register with the ftrace
- framework for the driver API -- you can see which
- driver methods it is calling then by looking at the
- trace.
+ framework for the driver API -- you can then see which
+ driver methods it is calling and which API functions
+ drivers are calling by looking at the trace.
- If unsure, say N.
+ If unsure, say Y.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 04420291e7ad..84b48ba8a77e 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -23,7 +23,8 @@ mac80211-y := \
key.o \
util.o \
wme.o \
- event.o
+ event.o \
+ chan.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index f9516a27e233..6bb9a9a94960 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -19,23 +19,25 @@
#include "ieee80211_i.h"
#include "driver-ops.h"
-void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
- u16 initiator, u16 reason)
+static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ u16 initiator, u16 reason,
+ bool from_timer)
{
struct ieee80211_local *local = sta->local;
+ struct tid_ampdu_rx *tid_rx;
int i;
- /* check if TID is in operational state */
spin_lock_bh(&sta->lock);
- if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) {
+
+ /* check if TID is in operational state */
+ if (!sta->ampdu_mlme.tid_active_rx[tid]) {
spin_unlock_bh(&sta->lock);
return;
}
- sta->ampdu_mlme.tid_state_rx[tid] =
- HT_AGG_STATE_REQ_STOP_BA_MSK |
- (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
- spin_unlock_bh(&sta->lock);
+ sta->ampdu_mlme.tid_active_rx[tid] = false;
+
+ tid_rx = sta->ampdu_mlme.tid_rx[tid];
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -47,61 +49,42 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
printk(KERN_DEBUG "HW problem - can not stop rx "
"aggregation for tid %d\n", tid);
- /* shutdown timer has not expired */
- if (initiator != WLAN_BACK_TIMER)
- del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
-
/* check if this is a self generated aggregation halt */
- if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
+ if (initiator == WLAN_BACK_RECIPIENT)
ieee80211_send_delba(sta->sdata, sta->sta.addr,
tid, 0, reason);
/* free the reordering buffer */
- for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
- if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
+ for (i = 0; i < tid_rx->buf_size; i++) {
+ if (tid_rx->reorder_buf[i]) {
/* release the reordered frames */
- dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
- sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
- sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
+ dev_kfree_skb(tid_rx->reorder_buf[i]);
+ tid_rx->stored_mpdu_num--;
+ tid_rx->reorder_buf[i] = NULL;
}
}
- spin_lock_bh(&sta->lock);
/* free resources */
- kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
- kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_time);
-
- if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) {
- kfree(sta->ampdu_mlme.tid_rx[tid]);
- sta->ampdu_mlme.tid_rx[tid] = NULL;
- }
+ kfree(tid_rx->reorder_buf);
+ kfree(tid_rx->reorder_time);
+ sta->ampdu_mlme.tid_rx[tid] = NULL;
- sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
spin_unlock_bh(&sta->lock);
+
+ if (!from_timer)
+ del_timer_sync(&tid_rx->session_timer);
+ kfree(tid_rx);
}
-void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
- u16 initiator, u16 reason)
+void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ u16 initiator, u16 reason)
{
- struct sta_info *sta;
-
- rcu_read_lock();
-
- sta = sta_info_get(sdata, ra);
- if (!sta) {
- rcu_read_unlock();
- return;
- }
-
- __ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
-
- rcu_read_unlock();
+ ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false);
}
/*
* After accepting the AddBA Request we activated a timer,
* resetting it after each frame that arrives from the originator.
- * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
*/
static void sta_rx_agg_session_timer_expired(unsigned long data)
{
@@ -117,9 +100,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
#endif
- ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
- (u16)*ptid, WLAN_BACK_TIMER,
- WLAN_REASON_QSTA_TIMEOUT);
+ ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_TIMEOUT, true);
}
static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
@@ -194,7 +176,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_REQUEST_DECLINED;
- if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
+ if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
"Denying ADDBA request\n");
@@ -232,7 +214,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* examine state machine */
spin_lock_bh(&sta->lock);
- if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
+ if (sta->ampdu_mlme.tid_active_rx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -294,7 +276,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
}
/* change state and send addba resp */
- sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
+ sta->ampdu_mlme.tid_active_rx[tid] = true;
tid_agg_rx->dialog_token = dialog_token;
tid_agg_rx->ssn = start_seq_num;
tid_agg_rx->head_seq_num = start_seq_num;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 87782a4bb541..c163d0a149f4 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -186,7 +186,7 @@ static void sta_addba_resp_timer_expired(unsigned long data)
spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "timer expired on tid %d but we are not "
- "(or no longer) expecting addBA response there",
+ "(or no longer) expecting addBA response there\n",
tid);
#endif
return;
@@ -214,6 +214,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
int ret = 0;
u16 start_seq_num;
+ trace_api_start_tx_ba_session(pubsta, tid);
+
if (WARN_ON(!local->ops->ampdu_action))
return -EINVAL;
@@ -245,7 +247,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
return -EINVAL;
}
- if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
+ if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
"Denying BA session request\n");
@@ -414,7 +416,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
+ printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
#endif
spin_lock(&local->ampdu_lock);
@@ -440,6 +442,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
struct sta_info *sta;
u8 *state;
+ trace_api_start_tx_ba_cb(sdata, ra, tid);
+
if (tid >= STA_TID_NUM) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
@@ -541,6 +545,8 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
+ trace_api_stop_tx_ba_session(pubsta, tid, initiator);
+
if (!local->ops->ampdu_action)
return -EINVAL;
@@ -558,6 +564,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
struct sta_info *sta;
u8 *state;
+ trace_api_stop_tx_ba_cb(sdata, ra, tid);
+
if (tid >= STA_TID_NUM) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
@@ -674,7 +682,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
+ printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index edc872e22c9b..c7000a6ca379 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -97,9 +97,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
params->mesh_id_len,
params->mesh_id);
- if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
- return 0;
-
if (type == NL80211_IFTYPE_AP_VLAN &&
params && params->use_4addr == 0)
rcu_assign_pointer(sdata->u.vlan.sta, NULL);
@@ -107,7 +104,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
params && params->use_4addr >= 0)
sdata->u.mgd.use_4addr = params->use_4addr;
- sdata->u.mntr_flags = *flags;
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags)
+ sdata->u.mntr_flags = *flags;
+
return 0;
}
@@ -411,6 +410,17 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
+ int idx, struct survey_info *survey)
+{
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+
+ if (!local->ops->get_survey)
+ return -EOPNOTSUPP;
+
+ return drv_get_survey(local, idx, survey);
+}
+
static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
@@ -1104,6 +1114,13 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
changed |= BSS_CHANGED_BASIC_RATES;
}
+ if (params->ap_isolate >= 0) {
+ if (params->ap_isolate)
+ sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
+ else
+ sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
+ }
+
ieee80211_bss_info_change_notify(sdata, changed);
return 0;
@@ -1137,19 +1154,47 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
return -EINVAL;
}
+ /* enable WMM or activate new settings */
+ local->hw.conf.flags |= IEEE80211_CONF_QOS;
+ drv_config(local, IEEE80211_CONF_CHANGE_QOS);
+
return 0;
}
static int ieee80211_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = NULL;
+
+ if (netdev)
+ sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
+
+ switch (ieee80211_get_channel_mode(local, NULL)) {
+ case CHAN_MODE_HOPPING:
+ return -EBUSY;
+ case CHAN_MODE_FIXED:
+ if (local->oper_channel != chan)
+ return -EBUSY;
+ if (!sdata && local->_oper_channel_type == channel_type)
+ return 0;
+ break;
+ case CHAN_MODE_UNDEFINED:
+ break;
+ }
local->oper_channel = chan;
- local->oper_channel_type = channel_type;
- return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (!ieee80211_set_channel_type(local, sdata, channel_type))
+ return -EBUSY;
+
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+
+ return 0;
}
#ifdef CONFIG_PM
@@ -1193,6 +1238,20 @@ static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_assoc_request *req)
{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ switch (ieee80211_get_channel_mode(local, sdata)) {
+ case CHAN_MODE_HOPPING:
+ return -EBUSY;
+ case CHAN_MODE_FIXED:
+ if (local->oper_channel == req->bss->channel)
+ break;
+ return -EBUSY;
+ case CHAN_MODE_UNDEFINED:
+ break;
+ }
+
return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
}
@@ -1215,8 +1274,22 @@ static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params)
{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ switch (ieee80211_get_channel_mode(local, sdata)) {
+ case CHAN_MODE_HOPPING:
+ return -EBUSY;
+ case CHAN_MODE_FIXED:
+ if (!params->channel_fixed)
+ return -EBUSY;
+ if (local->oper_channel == params->channel)
+ break;
+ return -EBUSY;
+ case CHAN_MODE_UNDEFINED:
+ break;
+ }
+
return ieee80211_ibss_join(sdata, params);
}
@@ -1345,7 +1418,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
* association, there's no need to send an action frame.
*/
if (!sdata->u.mgd.associated ||
- sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) {
+ sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) {
mutex_lock(&sdata->local->iflist_mtx);
ieee80211_recalc_smps(sdata->local, sdata);
mutex_unlock(&sdata->local->iflist_mtx);
@@ -1384,11 +1457,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
if (enabled == sdata->u.mgd.powersave &&
- timeout == conf->dynamic_ps_timeout)
+ timeout == conf->dynamic_ps_forced_timeout)
return 0;
sdata->u.mgd.powersave = enabled;
- conf->dynamic_ps_timeout = timeout;
+ conf->dynamic_ps_forced_timeout = timeout;
/* no change, but if automatic follow powersave */
mutex_lock(&sdata->u.mgd.mtx);
@@ -1403,6 +1476,35 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_vif *vif = &sdata->vif;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if (rssi_thold == bss_conf->cqm_rssi_thold &&
+ rssi_hyst == bss_conf->cqm_rssi_hyst)
+ return 0;
+
+ bss_conf->cqm_rssi_thold = rssi_thold;
+ bss_conf->cqm_rssi_hyst = rssi_hyst;
+
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+ return 0;
+ }
+
+ /* tell the driver upon association, unless already associated */
+ if (sdata->u.mgd.associated)
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
+
+ return 0;
+}
+
static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr,
@@ -1475,6 +1577,7 @@ struct cfg80211_ops mac80211_config_ops = {
.change_station = ieee80211_change_station,
.get_station = ieee80211_get_station,
.dump_station = ieee80211_dump_station,
+ .dump_survey = ieee80211_dump_survey,
#ifdef CONFIG_MAC80211_MESH
.add_mpath = ieee80211_add_mpath,
.del_mpath = ieee80211_del_mpath,
@@ -1507,4 +1610,5 @@ struct cfg80211_ops mac80211_config_ops = {
.remain_on_channel = ieee80211_remain_on_channel,
.cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
.action = ieee80211_action,
+ .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
new file mode 100644
index 000000000000..5d218c530a4e
--- /dev/null
+++ b/net/mac80211/chan.c
@@ -0,0 +1,127 @@
+/*
+ * mac80211 - channel management
+ */
+
+#include <linux/nl80211.h>
+#include "ieee80211_i.h"
+
+enum ieee80211_chan_mode
+__ieee80211_get_channel_mode(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *ignore)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ WARN_ON(!mutex_is_locked(&local->iflist_mtx));
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata == ignore)
+ continue;
+
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated)
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ if (!sdata->u.ibss.ssid_len)
+ continue;
+ if (!sdata->u.ibss.fixed_channel)
+ return CHAN_MODE_HOPPING;
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP &&
+ !sdata->u.ap.beacon)
+ continue;
+
+ return CHAN_MODE_FIXED;
+ }
+
+ return CHAN_MODE_UNDEFINED;
+}
+
+enum ieee80211_chan_mode
+ieee80211_get_channel_mode(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *ignore)
+{
+ enum ieee80211_chan_mode mode;
+
+ mutex_lock(&local->iflist_mtx);
+ mode = __ieee80211_get_channel_mode(local, ignore);
+ mutex_unlock(&local->iflist_mtx);
+
+ return mode;
+}
+
+bool ieee80211_set_channel_type(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum nl80211_channel_type chantype)
+{
+ struct ieee80211_sub_if_data *tmp;
+ enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT;
+ bool result;
+
+ mutex_lock(&local->iflist_mtx);
+
+ list_for_each_entry(tmp, &local->interfaces, list) {
+ if (tmp == sdata)
+ continue;
+
+ if (!ieee80211_sdata_running(tmp))
+ continue;
+
+ switch (tmp->vif.bss_conf.channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ superchan = tmp->vif.bss_conf.channel_type;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ WARN_ON(superchan == NL80211_CHAN_HT40MINUS);
+ superchan = NL80211_CHAN_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ WARN_ON(superchan == NL80211_CHAN_HT40PLUS);
+ superchan = NL80211_CHAN_HT40MINUS;
+ break;
+ }
+ }
+
+ switch (superchan) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ /*
+ * allow any change that doesn't go to no-HT
+ * (if it already is no-HT no change is needed)
+ */
+ if (chantype == NL80211_CHAN_NO_HT)
+ break;
+ superchan = chantype;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ case NL80211_CHAN_HT40MINUS:
+ /* allow smaller bandwidth and same */
+ if (chantype == NL80211_CHAN_NO_HT)
+ break;
+ if (chantype == NL80211_CHAN_HT20)
+ break;
+ if (superchan == chantype)
+ break;
+ result = false;
+ goto out;
+ }
+
+ local->_oper_channel_type = superchan;
+
+ if (sdata)
+ sdata->vif.bss_conf.channel_type = chantype;
+
+ result = true;
+ out:
+ mutex_unlock(&local->iflist_mtx);
+
+ return result;
+}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 83d4289d954b..20b2998fa0ed 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -100,6 +100,14 @@ static ssize_t ieee80211_if_fmt_##name( \
return scnprintf(buf, buflen, "%pM\n", sdata->field); \
}
+#define IEEE80211_IF_FMT_DEC_DIV_16(name, field) \
+static ssize_t ieee80211_if_fmt_##name( \
+ const struct ieee80211_sub_if_data *sdata, \
+ char *buf, int buflen) \
+{ \
+ return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \
+}
+
#define __IEEE80211_IF_FILE(name, _write) \
static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
@@ -140,6 +148,8 @@ IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
/* STA attributes */
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
+IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC);
+IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16);
static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps_mode)
@@ -276,6 +286,8 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(bssid);
DEBUGFS_ADD(aid);
+ DEBUGFS_ADD(last_beacon);
+ DEBUGFS_ADD(ave_beacon);
DEBUGFS_ADD_MODE(smps, 0600);
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d92800bb2d2f..e763f1529ddb 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -39,6 +39,13 @@ static const struct file_operations sta_ ##name## _ops = { \
.open = mac80211_open_file_generic, \
}
+#define STA_OPS_RW(name) \
+static const struct file_operations sta_ ##name## _ops = { \
+ .read = sta_##name##_read, \
+ .write = sta_##name##_write, \
+ .open = mac80211_open_file_generic, \
+}
+
#define STA_FILE(name, field, format) \
STA_READ_##format(name, field) \
STA_OPS(name)
@@ -57,7 +64,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU);
STA_FILE(tx_retry_failed, tx_retry_failed, LU);
STA_FILE(tx_retry_count, tx_retry_count, LU);
STA_FILE(last_signal, last_signal, D);
-STA_FILE(last_noise, last_noise, D);
STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
@@ -120,7 +126,7 @@ STA_OPS(last_seq_ctrl);
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[64 + STA_TID_NUM * 40], *p = buf;
+ char buf[71 + STA_TID_NUM * 40], *p = buf;
int i;
struct sta_info *sta = file->private_data;
@@ -128,16 +134,16 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, sizeof(buf) + buf - p,
- "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
+ "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
for (i = 0; i < STA_TID_NUM; i++) {
p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
- sta->ampdu_mlme.tid_state_rx[i]);
+ sta->ampdu_mlme.tid_active_rx[i]);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
- sta->ampdu_mlme.tid_state_rx[i] ?
+ sta->ampdu_mlme.tid_active_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
- sta->ampdu_mlme.tid_state_rx[i] ?
+ sta->ampdu_mlme.tid_active_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
@@ -157,7 +163,63 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
-STA_OPS(agg_status);
+
+static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char _buf[12], *buf = _buf;
+ struct sta_info *sta = file->private_data;
+ bool start, tx;
+ unsigned long tid;
+ int ret;
+
+ if (count > sizeof(_buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[sizeof(_buf) - 1] = '\0';
+
+ if (strncmp(buf, "tx ", 3) == 0) {
+ buf += 3;
+ tx = true;
+ } else if (strncmp(buf, "rx ", 3) == 0) {
+ buf += 3;
+ tx = false;
+ } else
+ return -EINVAL;
+
+ if (strncmp(buf, "start ", 6) == 0) {
+ buf += 6;
+ start = true;
+ if (!tx)
+ return -EINVAL;
+ } else if (strncmp(buf, "stop ", 5) == 0) {
+ buf += 5;
+ start = false;
+ } else
+ return -EINVAL;
+
+ tid = simple_strtoul(buf, NULL, 0);
+
+ if (tid >= STA_TID_NUM)
+ return -EINVAL;
+
+ if (tx) {
+ if (start)
+ ret = ieee80211_start_tx_ba_session(&sta->sta, tid);
+ else
+ ret = ieee80211_stop_tx_ba_session(&sta->sta, tid,
+ WLAN_BACK_RECIPIENT);
+ } else {
+ __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3);
+ ret = 0;
+ }
+
+ return ret ?: count;
+}
+STA_OPS_RW(agg_status);
static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -177,7 +239,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
if (htc->ht_supported) {
p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
- PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP");
+ PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC");
PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
@@ -289,7 +351,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(tx_retry_failed);
DEBUGFS_ADD(tx_retry_count);
DEBUGFS_ADD(last_signal);
- DEBUGFS_ADD(last_noise);
DEBUGFS_ADD(wep_weak_iv_count);
DEBUGFS_ADD(ht_capa);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c3d844093a2f..ee8b63f92f71 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -84,16 +84,14 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
}
static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
- int mc_count,
- struct dev_addr_list *mc_list)
+ struct netdev_hw_addr_list *mc_list)
{
u64 ret = 0;
if (local->ops->prepare_multicast)
- ret = local->ops->prepare_multicast(&local->hw, mc_count,
- mc_list);
+ ret = local->ops->prepare_multicast(&local->hw, mc_list);
- trace_drv_prepare_multicast(local, mc_count, ret);
+ trace_drv_prepare_multicast(local, mc_list->count, ret);
return ret;
}
@@ -154,14 +152,15 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
}
static inline int drv_hw_scan(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req)
{
int ret;
might_sleep();
- ret = local->ops->hw_scan(&local->hw, req);
- trace_drv_hw_scan(local, req, ret);
+ ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
+ trace_drv_hw_scan(local, sdata, req, ret);
return ret;
}
@@ -346,6 +345,15 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
return ret;
}
+static inline int drv_get_survey(struct ieee80211_local *local, int idx,
+ struct survey_info *survey)
+{
+ int ret = -EOPNOTSUPP;
+ if (local->ops->conf_tx)
+ ret = local->ops->get_survey(&local->hw, idx, survey);
+ /* trace_drv_get_survey(local, idx, survey, ret); */
+ return ret;
+}
static inline void drv_rfkill_poll(struct ieee80211_local *local)
{
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 41baf730a5c7..ce734b58d07a 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -32,6 +32,10 @@ static inline void trace_ ## name(proto) {}
#define VIF_PR_FMT " vif:%s(%d)"
#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
+/*
+ * Tracing for driver callbacks.
+ */
+
TRACE_EVENT(drv_start,
TP_PROTO(struct ieee80211_local *local, int ret),
@@ -359,23 +363,26 @@ TRACE_EVENT(drv_update_tkip_key,
TRACE_EVENT(drv_hw_scan,
TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req, int ret),
- TP_ARGS(local, req, ret),
+ TP_ARGS(local, sdata, req, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
__field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
__entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " ret:%d",
- LOCAL_PR_ARG, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT " ret:%d",
+ LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret
)
);
@@ -766,6 +773,277 @@ TRACE_EVENT(drv_flush,
LOCAL_PR_ARG, __entry->drop
)
);
+
+/*
+ * Tracing for API calls that drivers call.
+ */
+
+TRACE_EVENT(api_start_tx_ba_session,
+ TP_PROTO(struct ieee80211_sta *sta, u16 tid),
+
+ TP_ARGS(sta, tid),
+
+ TP_STRUCT__entry(
+ STA_ENTRY
+ __field(u16, tid)
+ ),
+
+ TP_fast_assign(
+ STA_ASSIGN;
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ STA_PR_FMT " tid:%d",
+ STA_PR_ARG, __entry->tid
+ )
+);
+
+TRACE_EVENT(api_start_tx_ba_cb,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
+
+ TP_ARGS(sdata, ra, tid),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __array(u8, ra, ETH_ALEN)
+ __field(u16, tid)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ memcpy(__entry->ra, ra, ETH_ALEN);
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " ra:%pM tid:%d",
+ VIF_PR_ARG, __entry->ra, __entry->tid
+ )
+);
+
+TRACE_EVENT(api_stop_tx_ba_session,
+ TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator),
+
+ TP_ARGS(sta, tid, initiator),
+
+ TP_STRUCT__entry(
+ STA_ENTRY
+ __field(u16, tid)
+ __field(u16, initiator)
+ ),
+
+ TP_fast_assign(
+ STA_ASSIGN;
+ __entry->tid = tid;
+ __entry->initiator = initiator;
+ ),
+
+ TP_printk(
+ STA_PR_FMT " tid:%d initiator:%d",
+ STA_PR_ARG, __entry->tid, __entry->initiator
+ )
+);
+
+TRACE_EVENT(api_stop_tx_ba_cb,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
+
+ TP_ARGS(sdata, ra, tid),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __array(u8, ra, ETH_ALEN)
+ __field(u16, tid)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ memcpy(__entry->ra, ra, ETH_ALEN);
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " ra:%pM tid:%d",
+ VIF_PR_ARG, __entry->ra, __entry->tid
+ )
+);
+
+TRACE_EVENT(api_restart_hw,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT,
+ LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(api_beacon_loss,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata),
+
+ TP_ARGS(sdata),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT,
+ VIF_PR_ARG
+ )
+);
+
+TRACE_EVENT(api_connection_loss,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata),
+
+ TP_ARGS(sdata),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT,
+ VIF_PR_ARG
+ )
+);
+
+TRACE_EVENT(api_cqm_rssi_notify,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_cqm_rssi_threshold_event rssi_event),
+
+ TP_ARGS(sdata, rssi_event),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __field(u32, rssi_event)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ __entry->rssi_event = rssi_event;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " event:%d",
+ VIF_PR_ARG, __entry->rssi_event
+ )
+);
+
+TRACE_EVENT(api_scan_completed,
+ TP_PROTO(struct ieee80211_local *local, bool aborted),
+
+ TP_ARGS(local, aborted),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(bool, aborted)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->aborted = aborted;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " aborted:%d",
+ LOCAL_PR_ARG, __entry->aborted
+ )
+);
+
+TRACE_EVENT(api_sta_block_awake,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta, bool block),
+
+ TP_ARGS(local, sta, block),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(bool, block)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->block = block;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT " block:%d",
+ LOCAL_PR_ARG, STA_PR_FMT, __entry->block
+ )
+);
+
+/*
+ * Tracing for internal functions
+ * (which may also be called in response to driver calls)
+ */
+
+TRACE_EVENT(wake_queue,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ enum queue_stop_reason reason),
+
+ TP_ARGS(local, queue, reason),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u16, queue)
+ __field(u32, reason)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->queue = queue;
+ __entry->reason = reason;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " queue:%d, reason:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->reason
+ )
+);
+
+TRACE_EVENT(stop_queue,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ enum queue_stop_reason reason),
+
+ TP_ARGS(local, queue, reason),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u16, queue)
+ __field(u32, reason)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->queue = queue;
+ __entry->reason = reason;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " queue:%d, reason:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->reason
+ )
+);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index bb677a73b7c9..2ab106a0a491 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -175,8 +175,7 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (initiator == WLAN_BACK_INITIATOR)
- ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid,
- WLAN_BACK_INITIATOR, 0);
+ __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
else { /* WLAN_BACK_RECIPIENT */
spin_lock_bh(&sta->lock);
if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index e2976da4e0d9..b2cc1fda6cfd 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -92,12 +92,18 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
if (memcmp(ifibss->bssid, bssid, ETH_ALEN))
sta_info_flush(sdata->local, sdata);
+ /* if merging, indicate to driver that we leave the old IBSS */
+ if (sdata->vif.bss_conf.ibss_joined) {
+ sdata->vif.bss_conf.ibss_joined = false;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
+ }
+
memcpy(ifibss->bssid, bssid, ETH_ALEN);
sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
local->oper_channel = chan;
- local->oper_channel_type = NL80211_CHAN_NO_HT;
+ WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
sband = local->hw.wiphy->bands[chan->band];
@@ -171,6 +177,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
bss_change |= BSS_CHANGED_BSSID;
bss_change |= BSS_CHANGED_BEACON;
bss_change |= BSS_CHANGED_BEACON_ENABLED;
+ bss_change |= BSS_CHANGED_IBSS;
+ sdata->vif.bss_conf.ibss_joined = true;
ieee80211_bss_info_change_notify(sdata, bss_change);
ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
@@ -265,17 +273,16 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sta->sta.supp_rates[band] = supp_rates |
ieee80211_mandatory_rates(local, band);
+ if (sta->sta.supp_rates[band] != prev_rates) {
#ifdef CONFIG_MAC80211_IBSS_DEBUG
- if (sta->sta.supp_rates[band] != prev_rates)
printk(KERN_DEBUG "%s: updated supp_rates set "
- "for %pM based on beacon info (0x%llx | "
- "0x%llx -> 0x%llx)\n",
- sdata->name,
- sta->sta.addr,
- (unsigned long long) prev_rates,
- (unsigned long long) supp_rates,
- (unsigned long long) sta->sta.supp_rates[band]);
+ "for %pM based on beacon/probe_response "
+ "(0x%x -> 0x%x)\n",
+ sdata->name, sta->sta.addr,
+ prev_rates, sta->sta.supp_rates[band]);
#endif
+ rate_control_rate_init(sta);
+ }
rcu_read_unlock();
} else {
rcu_read_unlock();
@@ -371,6 +378,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sdata->name, mgmt->bssid);
#endif
ieee80211_sta_join_ibss(sdata, bss);
+ supp_rates = ieee80211_sta_get_rates(local, elems, band);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
supp_rates, GFP_KERNEL);
}
@@ -481,7 +489,9 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
"IBSS networks with same SSID (merge)\n", sdata->name);
- ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
+ ieee80211_request_internal_scan(sdata,
+ ifibss->ssid, ifibss->ssid_len,
+ ifibss->fixed_channel ? ifibss->channel : NULL);
}
static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -588,8 +598,9 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
"join\n", sdata->name);
- ieee80211_request_internal_scan(sdata, ifibss->ssid,
- ifibss->ssid_len);
+ ieee80211_request_internal_scan(sdata,
+ ifibss->ssid, ifibss->ssid_len,
+ ifibss->fixed_channel ? ifibss->channel : NULL);
} else {
int interval = IEEE80211_SCAN_INTERVAL;
@@ -897,6 +908,13 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.channel = params->channel;
sdata->u.ibss.fixed_channel = params->channel_fixed;
+ /* fix ourselves to that channel now already */
+ if (params->channel_fixed) {
+ sdata->local->oper_channel = params->channel;
+ WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata,
+ NL80211_CHAN_NO_HT));
+ }
+
if (params->ie) {
sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len,
GFP_KERNEL);
@@ -951,7 +969,9 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
kfree(sdata->u.ibss.ie);
skb = sdata->u.ibss.presp;
rcu_assign_pointer(sdata->u.ibss.presp, NULL);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+ sdata->vif.bss_conf.ibss_joined = false;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_IBSS);
synchronize_rcu();
kfree_skb(skb);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 241533e1bc03..7ef7798d04cd 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -317,6 +317,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_MFP_ENABLED = BIT(6),
IEEE80211_STA_UAPSD_ENABLED = BIT(7),
IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
+ IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
};
struct ieee80211_if_managed {
@@ -327,7 +328,7 @@ struct ieee80211_if_managed {
struct work_struct work;
struct work_struct monitor_work;
struct work_struct chswitch_work;
- struct work_struct beacon_loss_work;
+ struct work_struct beacon_connection_loss_work;
unsigned long probe_timeout;
int probe_send_count;
@@ -359,6 +360,24 @@ struct ieee80211_if_managed {
int wmm_last_param_set;
u8 use_4addr;
+
+ /* Signal strength from the last Beacon frame in the current BSS. */
+ int last_beacon_signal;
+
+ /*
+ * Weighted average of the signal strength from Beacon frames in the
+ * current BSS. This is in units of 1/16 of the signal unit to maintain
+ * accuracy and to speed up calculations, i.e., the value need to be
+ * divided by 16 to get the actual value.
+ */
+ int ave_beacon_signal;
+
+ /*
+ * Last Beacon frame signal strength average (ave_beacon_signal / 16)
+ * that triggered a cqm event. 0 indicates that no event has been
+ * generated for the current association.
+ */
+ int last_cqm_event_signal;
};
enum ieee80211_ibss_request {
@@ -646,8 +665,7 @@ struct ieee80211_local {
struct work_struct recalc_smps;
/* aggregated multicast list */
- struct dev_addr_list *mc_list;
- int mc_count;
+ struct netdev_hw_addr_list mc_list;
bool tim_in_locked_section; /* see ieee80211_beacon_get() */
@@ -745,10 +763,11 @@ struct ieee80211_local {
int scan_channel_idx;
int scan_ies_len;
+ unsigned long leave_oper_channel_time;
enum mac80211_scan_state next_scan_state;
struct delayed_work scan_work;
struct ieee80211_sub_if_data *scan_sdata;
- enum nl80211_channel_type oper_channel_type;
+ enum nl80211_channel_type _oper_channel_type;
struct ieee80211_channel *oper_channel, *csa_channel;
/* Temporary remain-on-channel for off-channel operations */
@@ -1000,7 +1019,8 @@ void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
/* scan/BSS handling */
void ieee80211_scan_work(struct work_struct *work);
int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
- const u8 *ssid, u8 ssid_len);
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel *chan);
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1078,8 +1098,6 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps, const u8 *da,
const u8 *bssid);
-void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
- u16 tid, u16 initiator, u16 reason);
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason);
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
@@ -1155,7 +1173,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
int powersave);
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr);
-void ieee80211_beacon_loss_work(struct work_struct *work);
+void ieee80211_beacon_connection_loss_work(struct work_struct *work);
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
enum queue_stop_reason reason);
@@ -1210,6 +1228,20 @@ int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
int ieee80211_wk_cancel_remain_on_channel(
struct ieee80211_sub_if_data *sdata, u64 cookie);
+/* channel management */
+enum ieee80211_chan_mode {
+ CHAN_MODE_UNDEFINED,
+ CHAN_MODE_HOPPING,
+ CHAN_MODE_FIXED,
+};
+
+enum ieee80211_chan_mode
+ieee80211_get_channel_mode(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *ignore);
+bool ieee80211_set_channel_type(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum nl80211_channel_type chantype);
+
#ifdef CONFIG_MAC80211_NOINLINE
#define debug_noinline noinline
#else
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index e08fa8eda1b3..50deb017fd6e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -413,8 +413,7 @@ static int ieee80211_stop(struct net_device *dev)
netif_addr_lock_bh(dev);
spin_lock_bh(&local->filter_lock);
- __dev_addr_unsync(&local->mc_list, &local->mc_count,
- &dev->mc_list, &dev->mc_count);
+ __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len);
spin_unlock_bh(&local->filter_lock);
netif_addr_unlock_bh(dev);
@@ -487,7 +486,7 @@ static int ieee80211_stop(struct net_device *dev)
cancel_work_sync(&sdata->u.mgd.work);
cancel_work_sync(&sdata->u.mgd.chswitch_work);
cancel_work_sync(&sdata->u.mgd.monitor_work);
- cancel_work_sync(&sdata->u.mgd.beacon_loss_work);
+ cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
/*
* When we get here, the interface is marked down.
@@ -597,8 +596,7 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
sdata->flags ^= IEEE80211_SDATA_PROMISC;
}
spin_lock_bh(&local->filter_lock);
- __dev_addr_sync(&local->mc_list, &local->mc_count,
- &dev->mc_list, &dev->mc_count);
+ __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
spin_unlock_bh(&local->filter_lock);
ieee80211_queue_work(&local->hw, &local->reconfig_filter);
}
@@ -816,6 +814,118 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
+ struct net_device *dev,
+ enum nl80211_iftype type)
+{
+ struct ieee80211_sub_if_data *sdata;
+ u64 mask, start, addr, val, inc;
+ u8 *m;
+ u8 tmp_addr[ETH_ALEN];
+ int i;
+
+ /* default ... something at least */
+ memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
+
+ if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
+ local->hw.wiphy->n_addresses <= 1)
+ return;
+
+
+ mutex_lock(&local->iflist_mtx);
+
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ /* doesn't matter */
+ break;
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* match up with an AP interface */
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ continue;
+ memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
+ break;
+ }
+ /* keep default if no AP interface present */
+ break;
+ default:
+ /* assign a new address if possible -- try n_addresses first */
+ for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
+ bool used = false;
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (memcmp(local->hw.wiphy->addresses[i].addr,
+ sdata->vif.addr, ETH_ALEN) == 0) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used) {
+ memcpy(dev->perm_addr,
+ local->hw.wiphy->addresses[i].addr,
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ /* try mask if available */
+ if (is_zero_ether_addr(local->hw.wiphy->addr_mask))
+ break;
+
+ m = local->hw.wiphy->addr_mask;
+ mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
+ ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
+ ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
+
+ if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
+ /* not a contiguous mask ... not handled now! */
+ printk(KERN_DEBUG "not contiguous\n");
+ break;
+ }
+
+ m = local->hw.wiphy->perm_addr;
+ start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
+ ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
+ ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
+
+ inc = 1ULL<<__ffs64(mask);
+ val = (start & mask);
+ addr = (start & ~mask) | (val & mask);
+ do {
+ bool used = false;
+
+ tmp_addr[5] = addr >> 0*8;
+ tmp_addr[4] = addr >> 1*8;
+ tmp_addr[3] = addr >> 2*8;
+ tmp_addr[2] = addr >> 3*8;
+ tmp_addr[1] = addr >> 4*8;
+ tmp_addr[0] = addr >> 5*8;
+
+ val += inc;
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (memcmp(tmp_addr, sdata->vif.addr,
+ ETH_ALEN) == 0) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used) {
+ memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
+ break;
+ }
+ addr = (start & ~mask) | (val & mask);
+ } while (addr != start);
+
+ break;
+ }
+
+ mutex_unlock(&local->iflist_mtx);
+}
+
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct net_device **new_dev, enum nl80211_iftype type,
struct vif_params *params)
@@ -845,8 +955,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
if (ret < 0)
goto fail;
- memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
- memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN);
+ ieee80211_assign_perm_addr(local, ndev, type);
+ memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
/* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index e8f6e3b252d8..8d4b41787dcf 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -140,6 +140,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
struct ieee80211_sub_if_data,
u.ap);
+ key->conf.ap_addr = sdata->dev->dev_addr;
ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
if (!ret) {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b887e484ae04..22a384dfab65 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -71,7 +71,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
spin_lock_bh(&local->filter_lock);
changed_flags = local->filter_flags ^ new_flags;
- mc = drv_prepare_multicast(local, local->mc_count, local->mc_list);
+ mc = drv_prepare_multicast(local, &local->mc_list);
spin_unlock_bh(&local->filter_lock);
/* be a bit nasty */
@@ -111,7 +111,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
- channel_type = local->oper_channel_type;
+ channel_type = local->_oper_channel_type;
}
if (chan != local->hw.conf.channel ||
@@ -309,6 +309,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
+ trace_api_restart_hw(local);
+
/* use this reason, __ieee80211_resume will unblock it */
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
@@ -388,6 +390,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
INIT_LIST_HEAD(&local->interfaces);
+
+ __hw_addr_init(&local->mc_list);
+
mutex_init(&local->iflist_mtx);
mutex_init(&local->scan_mtx);
@@ -437,7 +442,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
struct ieee80211_local *local = hw_to_local(hw);
int result;
enum ieee80211_band band;
- int channels, i, j, max_bitrates;
+ int channels, max_bitrates;
bool supp_ht;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
@@ -567,6 +572,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.conf.listen_interval = local->hw.max_listen_interval;
+ local->hw.conf.dynamic_ps_forced_timeout = -1;
+
result = sta_info_start(local);
if (result < 0)
goto fail_sta_info;
@@ -601,21 +608,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_led_init(local);
- /* alloc internal scan request */
- i = 0;
- local->int_scan_req->ssids = &local->scan_ssid;
- local->int_scan_req->n_ssids = 1;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- if (!hw->wiphy->bands[band])
- continue;
- for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) {
- local->int_scan_req->channels[i] =
- &hw->wiphy->bands[band]->channels[j];
- i++;
- }
- }
- local->int_scan_req->n_channels = i;
-
local->network_latency_notifier.notifier_call =
ieee80211_max_network_latency;
result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 859ee5f3d941..7e93524459fc 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -601,10 +601,10 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
switch (mgmt->u.action.category) {
- case MESH_PLINK_CATEGORY:
+ case WLAN_CATEGORY_MESH_PLINK:
mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
break;
- case MESH_PATH_SEL_CATEGORY:
+ case WLAN_CATEGORY_MESH_PATH_SEL:
mesh_rx_path_sel_frame(sdata, mgmt, len);
break;
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 85562c59d7d6..c88087f1cd0f 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -209,8 +209,6 @@ struct mesh_rmc {
#define MESH_MAX_MPATHS 1024
/* Pending ANA approval */
-#define MESH_PLINK_CATEGORY 30
-#define MESH_PATH_SEL_CATEGORY 32
#define MESH_PATH_SEL_ACTION 0
/* PERR reason codes */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index fefc45c4b4e8..d89ed7f2592b 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -132,7 +132,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
switch (action) {
@@ -225,7 +225,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
memcpy(mgmt->da, ra, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
- mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7b7080e2b49f..3cd5f7b5d693 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
- mgmt->u.action.category = MESH_PLINK_CATEGORY;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_PLINK;
mgmt->u.action.u.plink_action.action_code = action;
if (action == PLINK_CLOSE)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4aefa6dc3091..5f2215aee752 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -47,6 +47,13 @@
*/
#define IEEE80211_PROBE_WAIT (HZ / 2)
+/*
+ * Weight given to the latest Beacon frame when calculating average signal
+ * strength for Beacon frames received in the current BSS. This must be
+ * between 1 and 15.
+ */
+#define IEEE80211_SIGNAL_AVE_WEIGHT 3
+
#define TMR_RUNNING_TIMER 0
#define TMR_RUNNING_CHANSW 1
@@ -130,11 +137,14 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
u32 changed = 0;
u16 ht_opmode;
- bool enable_ht = true, ht_changed;
+ bool enable_ht = true;
+ enum nl80211_channel_type prev_chantype;
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ prev_chantype = sdata->vif.bss_conf.channel_type;
+
/* HT is not supported */
if (!sband->ht_cap.ht_supported)
enable_ht = false;
@@ -165,38 +175,37 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
}
}
- ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
- channel_type != local->hw.conf.channel_type;
-
if (local->tmp_channel)
local->tmp_channel_type = channel_type;
- local->oper_channel_type = channel_type;
- if (ht_changed) {
- /* channel_type change automatically detected */
- ieee80211_hw_config(local, 0);
+ if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
+ /* can only fail due to HT40+/- mismatch */
+ channel_type = NL80211_CHAN_HT20;
+ WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
+ }
+
+ /* channel_type change automatically detected */
+ ieee80211_hw_config(local, 0);
+ if (prev_chantype != channel_type) {
rcu_read_lock();
sta = sta_info_get(sdata, bssid);
if (sta)
rate_control_rate_update(local, sband, sta,
IEEE80211_RC_HT_CHANGED,
- local->oper_channel_type);
+ channel_type);
rcu_read_unlock();
- }
-
- /* disable HT */
- if (!enable_ht)
- return 0;
+ }
ht_opmode = le16_to_cpu(hti->operation_mode);
/* if bss configuration changed store the new one */
- if (!sdata->ht_opmode_valid ||
- sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
+ if (sdata->ht_opmode_valid != enable_ht ||
+ sdata->vif.bss_conf.ht_operation_mode != ht_opmode ||
+ prev_chantype != channel_type) {
changed |= BSS_CHANGED_HT;
sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
- sdata->ht_opmode_valid = true;
+ sdata->ht_opmode_valid = enable_ht;
}
return changed;
@@ -206,7 +215,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, u16 stype, u16 reason,
- void *cookie)
+ void *cookie, bool send_frame)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -243,7 +252,11 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- ieee80211_tx_skb(sdata, skb);
+
+ if (send_frame)
+ ieee80211_tx_skb(sdata, skb);
+ else
+ kfree_skb(skb);
}
void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -467,6 +480,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
{
struct ieee80211_sub_if_data *sdata, *found = NULL;
int count = 0;
+ int timeout;
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) {
local->ps_sdata = NULL;
@@ -495,11 +509,31 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
s32 beaconint_us;
if (latency < 0)
- latency = pm_qos_requirement(PM_QOS_NETWORK_LATENCY);
+ latency = pm_qos_request(PM_QOS_NETWORK_LATENCY);
beaconint_us = ieee80211_tu_to_usec(
found->vif.bss_conf.beacon_int);
+ timeout = local->hw.conf.dynamic_ps_forced_timeout;
+ if (timeout < 0) {
+ /*
+ * The 2 second value is there for compatibility until
+ * the PM_QOS_NETWORK_LATENCY is configured with real
+ * values.
+ */
+ if (latency == 2000000000)
+ timeout = 100;
+ else if (latency <= 50000)
+ timeout = 300;
+ else if (latency <= 100000)
+ timeout = 100;
+ else if (latency <= 500000)
+ timeout = 50;
+ else
+ timeout = 0;
+ }
+ local->hw.conf.dynamic_ps_timeout = timeout;
+
if (beaconint_us > latency) {
local->ps_sdata = NULL;
} else {
@@ -592,6 +626,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
int count;
u8 *pos, uapsd_queues = 0;
+ if (!local->ops->conf_tx)
+ return;
+
if (local->hw.queues < 4)
return;
@@ -666,11 +703,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.aifs, params.cw_min, params.cw_max, params.txop,
params.uapsd);
#endif
- if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
+ if (drv_conf_tx(local, queue, &params))
printk(KERN_DEBUG "%s: failed to set TX queue "
"parameters for queue %d\n",
wiphy_name(local->hw.wiphy), queue);
}
+
+ /* enable WMM or activate new settings */
+ local->hw.conf.flags |= IEEE80211_CONF_QOS;
+ drv_config(local, IEEE80211_CONF_CHANGE_QOS);
}
static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
@@ -731,6 +772,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.associated = cbss;
memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
+ sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
+
/* just to be sure */
sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
IEEE80211_STA_BEACON_POLL);
@@ -756,6 +799,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
/* And the BSSID changed - we're associated now */
bss_info_changed |= BSS_CHANGED_BSSID;
+ /* Tell the driver to monitor connection quality (if supported) */
+ if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
+ sdata->vif.bss_conf.cqm_rssi_thold)
+ bss_info_changed |= BSS_CHANGED_CQM;
+
ieee80211_bss_info_change_notify(sdata, bss_info_changed);
mutex_lock(&local->iflist_mtx);
@@ -767,7 +815,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
netif_carrier_on(sdata->dev);
}
-static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
+ bool remove_sta)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
@@ -819,7 +868,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
ieee80211_set_wmm_default(sdata);
/* channel(_type) changes are handled by ieee80211_hw_config */
- local->oper_channel_type = NL80211_CHAN_NO_HT;
+ WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
/* on the next assoc, re-program HT parameters */
sdata->ht_opmode_valid = false;
@@ -836,11 +885,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
ieee80211_hw_config(local, config_changed);
- /* And the BSSID changed -- not very interesting here */
- changed |= BSS_CHANGED_BSSID;
+ /* The BSSID (not really interesting) and HT changed */
+ changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
- sta_info_destroy_addr(sdata, bssid);
+ if (remove_sta)
+ sta_info_destroy_addr(sdata, bssid);
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -857,6 +907,9 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
if (is_multicast_ether_addr(hdr->addr1))
return;
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ return;
+
mod_timer(&sdata->u.mgd.conn_mon_timer,
round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
}
@@ -934,23 +987,72 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&ifmgd->mtx);
}
-void ieee80211_beacon_loss_work(struct work_struct *work)
+static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = sdata->local;
+ u8 bssid[ETH_ALEN];
+
+ mutex_lock(&ifmgd->mtx);
+ if (!ifmgd->associated) {
+ mutex_unlock(&ifmgd->mtx);
+ return;
+ }
+
+ memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
+
+ printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
+
+ ieee80211_set_disassoc(sdata, true);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&ifmgd->mtx);
+ /*
+ * must be outside lock due to cfg80211,
+ * but that's not a problem.
+ */
+ ieee80211_send_deauth_disassoc(sdata, bssid,
+ IEEE80211_STYPE_DEAUTH,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
+ NULL, true);
+}
+
+void ieee80211_beacon_connection_loss_work(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
- u.mgd.beacon_loss_work);
+ u.mgd.beacon_connection_loss_work);
- ieee80211_mgd_probe_ap(sdata, true);
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ __ieee80211_connection_loss(sdata);
+ else
+ ieee80211_mgd_probe_ap(sdata, true);
}
void ieee80211_beacon_loss(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_hw *hw = &sdata->local->hw;
+
+ trace_api_beacon_loss(sdata);
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work);
+ WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR);
+ ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
}
EXPORT_SYMBOL(ieee80211_beacon_loss);
+void ieee80211_connection_loss(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_hw *hw = &sdata->local->hw;
+
+ trace_api_connection_loss(sdata);
+
+ WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR));
+ ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
+}
+EXPORT_SYMBOL(ieee80211_connection_loss);
+
+
static enum rx_mgmt_action __must_check
ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
@@ -971,7 +1073,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
sdata->name, bssid, reason_code);
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DEAUTH;
@@ -1001,7 +1103,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
sdata->name, mgmt->sa, reason_code);
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DISASSOC;
}
@@ -1254,12 +1356,17 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
mutex_lock(&sdata->local->iflist_mtx);
ieee80211_recalc_ps(sdata->local, -1);
mutex_unlock(&sdata->local->iflist_mtx);
+
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ return;
+
/*
* We've received a probe response, but are not sure whether
* we have or will be receiving any beacons or data, so let's
* schedule the timers again, just in case.
*/
mod_beacon_timer(sdata);
+
mod_timer(&ifmgd->conn_mon_timer,
round_jiffies_up(jiffies +
IEEE80211_CONNECTION_IDLE_TIME));
@@ -1293,6 +1400,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
size_t baselen;
struct ieee802_11_elems elems;
struct ieee80211_local *local = sdata->local;
@@ -1328,6 +1436,41 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0)
return;
+ /* Track average RSSI from the Beacon frames of the current AP */
+ ifmgd->last_beacon_signal = rx_status->signal;
+ if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
+ ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
+ ifmgd->ave_beacon_signal = rx_status->signal;
+ ifmgd->last_cqm_event_signal = 0;
+ } else {
+ ifmgd->ave_beacon_signal =
+ (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
+ (16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
+ ifmgd->ave_beacon_signal) / 16;
+ }
+ if (bss_conf->cqm_rssi_thold &&
+ !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
+ int sig = ifmgd->ave_beacon_signal / 16;
+ int last_event = ifmgd->last_cqm_event_signal;
+ int thold = bss_conf->cqm_rssi_thold;
+ int hyst = bss_conf->cqm_rssi_hyst;
+ if (sig < thold &&
+ (last_event == 0 || sig < last_event - hyst)) {
+ ifmgd->last_cqm_event_signal = sig;
+ ieee80211_cqm_rssi_notify(
+ &sdata->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ GFP_KERNEL);
+ } else if (sig > thold &&
+ (last_event == 0 || sig > last_event + hyst)) {
+ ifmgd->last_cqm_event_signal = sig;
+ ieee80211_cqm_rssi_notify(
+ &sdata->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ GFP_KERNEL);
+ }
+ }
+
if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit()) {
@@ -1613,7 +1756,7 @@ static void ieee80211_sta_work(struct work_struct *work)
printk(KERN_DEBUG "No probe response from AP %pM"
" after %dms, disconnecting.\n",
bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
ieee80211_recalc_idle(local);
mutex_unlock(&ifmgd->mtx);
/*
@@ -1623,7 +1766,7 @@ static void ieee80211_sta_work(struct work_struct *work)
ieee80211_send_deauth_disassoc(sdata, bssid,
IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
- NULL);
+ NULL, true);
mutex_lock(&ifmgd->mtx);
}
}
@@ -1640,7 +1783,8 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
if (local->quiescing)
return;
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work);
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.beacon_connection_loss_work);
}
static void ieee80211_sta_conn_mon_timer(unsigned long data)
@@ -1692,7 +1836,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
*/
cancel_work_sync(&ifmgd->work);
- cancel_work_sync(&ifmgd->beacon_loss_work);
+ cancel_work_sync(&ifmgd->beacon_connection_loss_work);
if (del_timer_sync(&ifmgd->timer))
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1726,7 +1870,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
INIT_WORK(&ifmgd->work, ieee80211_sta_work);
INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
- INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work);
+ INIT_WORK(&ifmgd->beacon_connection_loss_work,
+ ieee80211_beacon_connection_loss_work);
setup_timer(&ifmgd->timer, ieee80211_sta_timer,
(unsigned long) sdata);
setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -1805,6 +1950,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_work *wk;
u16 auth_alg;
+ if (req->local_state_change)
+ return 0; /* no need to update mac80211 state */
+
switch (req->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
auth_alg = WLAN_AUTH_OPEN;
@@ -1913,7 +2061,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
/* Trying to reassociate - clear previous association state */
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
}
mutex_unlock(&ifmgd->mtx);
@@ -2017,7 +2165,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated == req->bss) {
bssid = req->bss->bssid;
- ieee80211_set_disassoc(sdata);
+ ieee80211_set_disassoc(sdata, true);
mutex_unlock(&ifmgd->mtx);
} else {
bool not_auth_yet = false;
@@ -2030,7 +2178,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
continue;
if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
- wk->type != IEEE80211_WORK_AUTH)
+ wk->type != IEEE80211_WORK_AUTH &&
+ wk->type != IEEE80211_WORK_ASSOC)
continue;
if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
@@ -2060,9 +2209,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
sdata->name, bssid, req->reason_code);
- ieee80211_send_deauth_disassoc(sdata, bssid,
- IEEE80211_STYPE_DEAUTH, req->reason_code,
- cookie);
+ ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
+ req->reason_code, cookie,
+ !req->local_state_change);
ieee80211_recalc_idle(sdata->local);
@@ -2074,6 +2223,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
void *cookie)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ u8 bssid[ETH_ALEN];
mutex_lock(&ifmgd->mtx);
@@ -2091,13 +2241,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
sdata->name, req->bss->bssid, req->reason_code);
- ieee80211_set_disassoc(sdata);
+ memcpy(bssid, req->bss->bssid, ETH_ALEN);
+ ieee80211_set_disassoc(sdata, false);
mutex_unlock(&ifmgd->mtx);
ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
IEEE80211_STYPE_DISASSOC, req->reason_code,
- cookie);
+ cookie, !req->local_state_change);
+ sta_info_destroy_addr(sdata, bssid);
ieee80211_recalc_idle(sdata->local);
@@ -2117,7 +2269,7 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
if ((chan != local->tmp_channel ||
channel_type != local->tmp_channel_type) &&
(chan != local->oper_channel ||
- channel_type != local->oper_channel_type))
+ channel_type != local->_oper_channel_type))
return -EBUSY;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
@@ -2138,3 +2290,15 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
*cookie = (unsigned long) skb;
return 0;
}
+
+void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ trace_api_cqm_rssi_notify(sdata, rssi_event);
+
+ cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
+}
+EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 0e64484e861c..75202b295a4e 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -46,7 +46,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- set_sta_flags(sta, WLAN_STA_SUSPEND);
+ set_sta_flags(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta);
}
}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 6d0bd198af19..be04d46110fe 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -103,6 +103,7 @@ ieee80211_rate_control_ops_get(const char *name)
struct rate_control_ops *ops;
const char *alg_name;
+ kparam_block_sysfs_write(ieee80211_default_rc_algo);
if (!name)
alg_name = ieee80211_default_rc_algo;
else
@@ -120,6 +121,7 @@ ieee80211_rate_control_ops_get(const char *name)
/* try built-in one if specific alg requested but not found */
if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT))
ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT);
+ kparam_unblock_sysfs_write(ieee80211_default_rc_algo);
return ops;
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 818abfae9007..f65ce6dcc8e2 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -542,7 +542,7 @@ minstrel_free(void *priv)
kfree(priv);
}
-static struct rate_control_ops mac80211_minstrel = {
+struct rate_control_ops mac80211_minstrel = {
.name = "minstrel",
.tx_status = minstrel_tx_status,
.get_rate = minstrel_get_rate,
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 38bf4168fc3a..0f5a83370aa6 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -80,7 +80,18 @@ struct minstrel_priv {
unsigned int lookaround_rate_mrr;
};
+struct minstrel_debugfs_info {
+ size_t len;
+ char buf[];
+};
+
+extern struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
+/* debugfs */
+int minstrel_stats_open(struct inode *inode, struct file *file);
+ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos);
+int minstrel_stats_release(struct inode *inode, struct file *file);
+
#endif
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 0e1f12b1b6dd..241e76f3fdf2 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -53,21 +53,15 @@
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
-struct minstrel_stats_info {
- struct minstrel_sta_info *mi;
- char buf[4096];
- size_t len;
-};
-
-static int
+int
minstrel_stats_open(struct inode *inode, struct file *file)
{
struct minstrel_sta_info *mi = inode->i_private;
- struct minstrel_stats_info *ms;
+ struct minstrel_debugfs_info *ms;
unsigned int i, tp, prob, eprob;
char *p;
- ms = kmalloc(sizeof(*ms), GFP_KERNEL);
+ ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL);
if (!ms)
return -ENOMEM;
@@ -107,36 +101,19 @@ minstrel_stats_open(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t
-minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o)
+ssize_t
+minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
{
- struct minstrel_stats_info *ms;
- char *src;
+ struct minstrel_debugfs_info *ms;
ms = file->private_data;
- src = ms->buf;
-
- len = min(len, ms->len);
- if (len <= *o)
- return 0;
-
- src += *o;
- len -= *o;
- *o += len;
-
- if (copy_to_user(buf, src, len))
- return -EFAULT;
-
- return len;
+ return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
}
-static int
+int
minstrel_stats_release(struct inode *inode, struct file *file)
{
- struct minstrel_stats_info *ms = file->private_data;
-
- kfree(ms);
-
+ kfree(file->private_data);
return 0;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 04ea07f0e78a..9a08f2c446c6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -39,7 +39,7 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
{
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
if (likely(skb->len > FCS_LEN))
- skb_trim(skb, skb->len - FCS_LEN);
+ __pskb_trim(skb, skb->len - FCS_LEN);
else {
/* driver bug */
WARN_ON(1);
@@ -81,8 +81,6 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
len += 8;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
len += 1;
- if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
- len += 1;
if (len & 1) /* padding for RX_FLAGS if necessary */
len++;
@@ -179,14 +177,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos++;
}
- /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
- if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
- *pos = status->noise;
- rthdr->it_present |=
- cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
- pos++;
- }
-
/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
/* IEEE80211_RADIOTAP_ANTENNA */
@@ -236,6 +226,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
present_fcs_len = FCS_LEN;
+ /* make sure hdr->frame_control is on the linear part */
+ if (!pskb_may_pull(origskb, 2)) {
+ dev_kfree_skb(origskb);
+ return NULL;
+ }
+
if (!local->monitors) {
if (should_drop_frame(origskb, present_fcs_len)) {
dev_kfree_skb(origskb);
@@ -493,7 +489,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
if (ieee80211_is_action(hdr->frame_control)) {
mgmt = (struct ieee80211_mgmt *)hdr;
- if (mgmt->u.action.category != MESH_PLINK_CATEGORY)
+ if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
return RX_DROP_MONITOR;
return RX_CONTINUE;
}
@@ -723,14 +719,16 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
- goto dont_reorder;
+ spin_lock(&sta->lock);
+
+ if (!sta->ampdu_mlme.tid_active_rx[tid])
+ goto dont_reorder_unlock;
tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
/* qos null data frames are excluded */
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
- goto dont_reorder;
+ goto dont_reorder_unlock;
/* new, potentially un-ordered, ampdu frame - process it */
@@ -742,15 +740,20 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
/* if this mpdu is fragmented - terminate rx aggregation session */
sc = le16_to_cpu(hdr->seq_ctrl);
if (sc & IEEE80211_SCTL_FRAG) {
- ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
- tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
+ spin_unlock(&sta->lock);
+ __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP);
dev_kfree_skb(skb);
return;
}
- if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
+ if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) {
+ spin_unlock(&sta->lock);
return;
+ }
+ dont_reorder_unlock:
+ spin_unlock(&sta->lock);
dont_reorder:
__skb_queue_tail(frames, skb);
}
@@ -897,6 +900,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
rx->key = key;
return RX_CONTINUE;
} else {
+ u8 keyid;
/*
* The device doesn't give us the IV so we won't be
* able to look up the key. That's ok though, we
@@ -919,7 +923,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
* no need to call ieee80211_wep_get_keyidx,
* it verifies a bunch of things we've done already
*/
- keyidx = rx->skb->data[hdrlen + 3] >> 6;
+ skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
+ keyidx = keyid >> 6;
rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
@@ -940,6 +945,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
}
+ if (skb_linearize(rx->skb))
+ return RX_DROP_UNUSABLE;
+
+ hdr = (struct ieee80211_hdr *)rx->skb->data;
+
/* Check for weak IVs if possible */
if (rx->sta && rx->key->conf.alg == ALG_WEP &&
ieee80211_is_data(hdr->frame_control) &&
@@ -1078,7 +1088,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->rx_fragments++;
sta->rx_bytes += rx->skb->len;
sta->last_signal = status->signal;
- sta->last_noise = status->noise;
/*
* Change STA power saving mode only at the end of a frame
@@ -1241,6 +1250,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
}
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
+ if (skb_linearize(rx->skb))
+ return RX_DROP_UNUSABLE;
+
seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
if (frag == 0) {
@@ -1406,21 +1418,24 @@ static int
ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
__le16 fc = hdr->frame_control;
- int res;
- res = ieee80211_drop_unencrypted(rx, fc);
- if (unlikely(res))
- return res;
+ /*
+ * Pass through unencrypted frames if the hardware has
+ * decrypted them already.
+ */
+ if (status->flag & RX_FLAG_DECRYPTED)
+ return 0;
if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
- if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
+ if (unlikely(!ieee80211_has_protected(fc) &&
+ ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
rx->key))
return -EACCES;
/* BIP does not use Protected field, so need to check MMIE */
if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
- ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
- rx->key))
+ ieee80211_get_mmie_keyidx(rx->skb) < 0))
return -EACCES;
/*
* When using MFP, Action frames are not allowed prior to
@@ -1598,6 +1613,9 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
skb->dev = dev;
__skb_queue_head_init(&frame_list);
+ if (skb_linearize(skb))
+ return RX_DROP_UNUSABLE;
+
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
rx->local->hw.extra_tx_headroom);
@@ -1796,10 +1814,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
if (ieee80211_is_back_req(bar->frame_control)) {
if (!rx->sta)
return RX_DROP_MONITOR;
+ spin_lock(&rx->sta->lock);
tid = le16_to_cpu(bar->control) >> 12;
- if (rx->sta->ampdu_mlme.tid_state_rx[tid]
- != HT_AGG_STATE_OPERATIONAL)
+ if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
+ spin_unlock(&rx->sta->lock);
return RX_DROP_MONITOR;
+ }
tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
@@ -1813,6 +1833,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
frames);
kfree_skb(skb);
+ spin_unlock(&rx->sta->lock);
return RX_QUEUED;
}
@@ -1974,8 +1995,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
}
break;
- case MESH_PLINK_CATEGORY:
- case MESH_PATH_SEL_CATEGORY:
+ case WLAN_CATEGORY_MESH_PLINK:
+ case WLAN_CATEGORY_MESH_PATH_SEL:
if (ieee80211_vif_is_mesh(&sdata->vif))
return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
break;
@@ -2372,29 +2393,42 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr;
+ __le16 fc;
struct ieee80211_rx_data rx;
int prepares;
struct ieee80211_sub_if_data *prev = NULL;
struct sk_buff *skb_new;
struct sta_info *sta, *tmp;
bool found_sta = false;
+ int err = 0;
- hdr = (struct ieee80211_hdr *)skb->data;
+ fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
memset(&rx, 0, sizeof(rx));
rx.skb = skb;
rx.local = local;
- if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
+ if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
local->dot11ReceivedFragmentCount++;
if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
rx.flags |= IEEE80211_RX_IN_SCAN;
+ if (ieee80211_is_mgmt(fc))
+ err = skb_linearize(skb);
+ else
+ err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
+
+ if (err) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
ieee80211_parse_qos(&rx);
ieee80211_verify_alignment(&rx);
- if (ieee80211_is_data(hdr->frame_control)) {
+ if (ieee80211_is_data(fc)) {
for_each_sta_info(local, hdr->addr2, sta, tmp) {
rx.sta = sta;
found_sta = true;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 85507bd9e341..e1b0be7a57b9 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,6 +14,8 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
+#include <linux/pm_qos_params.h>
+#include <net/sch_generic.h>
#include <linux/slab.h>
#include <net/mac80211.h>
@@ -83,7 +85,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
{
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
- int clen;
+ int clen, srlen;
s32 signal = 0;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -112,23 +114,24 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
bss->dtim_period = tim_ie->dtim_period;
}
- bss->supp_rates_len = 0;
+ /* replace old supported rates if we get new values */
+ srlen = 0;
if (elems->supp_rates) {
- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
+ clen = IEEE80211_MAX_SUPP_RATES;
if (clen > elems->supp_rates_len)
clen = elems->supp_rates_len;
- memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
- clen);
- bss->supp_rates_len += clen;
+ memcpy(bss->supp_rates, elems->supp_rates, clen);
+ srlen += clen;
}
if (elems->ext_supp_rates) {
- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
+ clen = IEEE80211_MAX_SUPP_RATES - srlen;
if (clen > elems->ext_supp_rates_len)
clen = elems->ext_supp_rates_len;
- memcpy(&bss->supp_rates[bss->supp_rates_len],
- elems->ext_supp_rates, clen);
- bss->supp_rates_len += clen;
+ memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen);
+ srlen += clen;
}
+ if (srlen)
+ bss->supp_rates_len = srlen;
bss->wmm_used = elems->wmm_param || elems->wmm_info;
bss->uapsd_supported = is_uapsd_supported(elems);
@@ -246,6 +249,8 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
struct ieee80211_local *local = hw_to_local(hw);
bool was_hw_scan;
+ trace_api_scan_completed(local, aborted);
+
mutex_lock(&local->scan_mtx);
/*
@@ -322,6 +327,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
ieee80211_offchannel_stop_beaconing(local);
+ local->leave_oper_channel_time = 0;
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
@@ -406,7 +412,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (local->ops->hw_scan) {
WARN_ON(!ieee80211_prep_hw_scan(local));
- rc = drv_hw_scan(local, local->hw_scan_req);
+ rc = drv_hw_scan(local, sdata, local->hw_scan_req);
} else
rc = ieee80211_start_sw_scan(local);
@@ -426,11 +432,28 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
return rc;
}
+static unsigned long
+ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
+{
+ /*
+ * TODO: channel switching also consumes quite some time,
+ * add that delay as well to get a better estimation
+ */
+ if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ return IEEE80211_PASSIVE_CHANNEL_TIME;
+ return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
+}
+
static int ieee80211_scan_state_decision(struct ieee80211_local *local,
unsigned long *next_delay)
{
bool associated = false;
+ bool tx_empty = true;
+ bool bad_latency;
+ bool listen_int_exceeded;
+ unsigned long min_beacon_int = 0;
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_channel *next_chan;
/* if no more bands/channels left, complete scan and advance to the idle state */
if (local->scan_channel_idx >= local->scan_req->n_channels) {
@@ -438,7 +461,11 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
return 1;
}
- /* check if at least one STA interface is associated */
+ /*
+ * check if at least one STA interface is associated,
+ * check if at least one STA interface has pending tx frames
+ * and grab the lowest used beacon interval
+ */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
@@ -447,7 +474,16 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.associated) {
associated = true;
- break;
+
+ if (sdata->vif.bss_conf.beacon_int <
+ min_beacon_int || min_beacon_int == 0)
+ min_beacon_int =
+ sdata->vif.bss_conf.beacon_int;
+
+ if (!qdisc_all_tx_empty(sdata->dev)) {
+ tx_empty = false;
+ break;
+ }
}
}
}
@@ -456,11 +492,34 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
if (local->scan_channel) {
/*
* we're currently scanning a different channel, let's
- * switch back to the operating channel now if at least
- * one interface is associated. Otherwise just scan the
- * next channel
+ * see if we can scan another channel without interfering
+ * with the current traffic situation.
+ *
+ * Since we don't know if the AP has pending frames for us
+ * we can only check for our tx queues and use the current
+ * pm_qos requirements for rx. Hence, if no tx traffic occurs
+ * at all we will scan as many channels in a row as the pm_qos
+ * latency allows us to. Additionally we also check for the
+ * currently negotiated listen interval to prevent losing
+ * frames unnecessarily.
+ *
+ * Otherwise switch back to the operating channel.
*/
- if (associated)
+ next_chan = local->scan_req->channels[local->scan_channel_idx];
+
+ bad_latency = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+ local->leave_oper_channel_time +
+ usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
+
+ listen_int_exceeded = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+ local->leave_oper_channel_time +
+ usecs_to_jiffies(min_beacon_int * 1024) *
+ local->hw.conf.listen_interval);
+
+ if (associated && ( !tx_empty || bad_latency ||
+ listen_int_exceeded))
local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
else
local->next_scan_state = SCAN_SET_CHANNEL;
@@ -492,6 +551,9 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca
else
*next_delay = HZ / 10;
+ /* remember when we left the operating channel */
+ local->leave_oper_channel_time = jiffies;
+
/* advance to the next channel to be scanned */
local->next_scan_state = SCAN_SET_CHANNEL;
}
@@ -594,7 +656,7 @@ void ieee80211_scan_work(struct work_struct *work)
}
if (local->hw_scan_req) {
- int rc = drv_hw_scan(local, local->hw_scan_req);
+ int rc = drv_hw_scan(local, sdata, local->hw_scan_req);
mutex_unlock(&local->scan_mtx);
if (rc)
ieee80211_scan_completed(&local->hw, true);
@@ -667,10 +729,12 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
}
int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
- const u8 *ssid, u8 ssid_len)
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel *chan)
{
struct ieee80211_local *local = sdata->local;
int ret = -EBUSY;
+ enum nl80211_band band;
mutex_lock(&local->scan_mtx);
@@ -678,6 +742,30 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
if (local->scan_req)
goto unlock;
+ /* fill internal scan request */
+ if (!chan) {
+ int i, nchan = 0;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!local->hw.wiphy->bands[band])
+ continue;
+ for (i = 0;
+ i < local->hw.wiphy->bands[band]->n_channels;
+ i++) {
+ local->int_scan_req->channels[nchan] =
+ &local->hw.wiphy->bands[band]->channels[i];
+ nchan++;
+ }
+ }
+
+ local->int_scan_req->n_channels = nchan;
+ } else {
+ local->int_scan_req->channels[0] = chan;
+ local->int_scan_req->n_channels = 1;
+ }
+
+ local->int_scan_req->ssids = &local->scan_ssid;
+ local->int_scan_req->n_ssids = 1;
memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
local->int_scan_req->ssids[0].ssid_len = ssid_len;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index fb12cec4d333..730197591ab5 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -250,9 +250,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
* enable session_timer's data differentiation. refer to
* sta_rx_agg_session_timer_expired for useage */
sta->timer_to_tid[i] = i;
- /* rx */
- sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
- sta->ampdu_mlme.tid_rx[i] = NULL;
/* tx */
sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
sta->ampdu_mlme.tid_tx[i] = NULL;
@@ -578,7 +575,7 @@ static int sta_info_buffer_expired(struct sta_info *sta,
}
-static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
+static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
struct sta_info *sta)
{
unsigned long flags;
@@ -586,7 +583,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata;
if (skb_queue_empty(&sta->ps_tx_buf))
- return;
+ return false;
for (;;) {
spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
@@ -611,6 +608,8 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
if (skb_queue_empty(&sta->ps_tx_buf))
sta_info_clear_tim_bit(sta);
}
+
+ return true;
}
static int __must_check __sta_info_destroy(struct sta_info *sta)
@@ -619,7 +618,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata;
struct sk_buff *skb;
unsigned long flags;
- int ret, i;
+ int ret;
might_sleep();
@@ -629,6 +628,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
local = sta->local;
sdata = sta->sdata;
+ /*
+ * Before removing the station from the driver and
+ * rate control, it might still start new aggregation
+ * sessions -- block that to make sure the tear-down
+ * will be sufficient.
+ */
+ set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ ieee80211_sta_tear_down_BA_sessions(sta);
+
spin_lock_irqsave(&local->sta_lock, flags);
ret = sta_info_hash_del(local, sta);
/* this might still be the pending list ... which is fine */
@@ -645,9 +653,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
* may mean it is removed from hardware which requires that
* the key->sta pointer is still valid, so flush the key todo
* list here.
- *
- * ieee80211_key_todo() will synchronize_rcu() so after this
- * nothing can reference this sta struct any more.
*/
ieee80211_key_todo();
@@ -679,11 +684,17 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
sdata = sta->sdata;
}
+ /*
+ * At this point, after we wait for an RCU grace period,
+ * neither mac80211 nor the driver can reference this
+ * sta struct any more except by still existing timers
+ * associated with this station that we clean up below.
+ */
+ synchronize_rcu();
+
#ifdef CONFIG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
+ if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
- del_timer(&sta->plink_timer);
- }
#endif
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -710,50 +721,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
dev_kfree_skb_any(skb);
- for (i = 0; i < STA_TID_NUM; i++) {
- struct tid_ampdu_rx *tid_rx;
- struct tid_ampdu_tx *tid_tx;
-
- spin_lock_bh(&sta->lock);
- tid_rx = sta->ampdu_mlme.tid_rx[i];
- /* Make sure timer won't free the tid_rx struct, see below */
- if (tid_rx)
- tid_rx->shutdown = true;
-
- spin_unlock_bh(&sta->lock);
-
- /*
- * Outside spinlock - shutdown is true now so that the timer
- * won't free tid_rx, we have to do that now. Can't let the
- * timer do it because we have to sync the timer outside the
- * lock that it takes itself.
- */
- if (tid_rx) {
- del_timer_sync(&tid_rx->session_timer);
- kfree(tid_rx);
- }
-
- /*
- * No need to do such complications for TX agg sessions, the
- * path leading to freeing the tid_tx struct goes via a call
- * from the driver, and thus needs to look up the sta struct
- * again, which cannot be found when we get here. Hence, we
- * just need to delete the timer and free the aggregation
- * info; we won't be telling the peer about it then but that
- * doesn't matter if we're not talking to it again anyway.
- */
- tid_tx = sta->ampdu_mlme.tid_tx[i];
- if (tid_tx) {
- del_timer_sync(&tid_tx->addba_resp_timer);
- /*
- * STA removed while aggregation session being
- * started? Bit odd, but purge frames anyway.
- */
- skb_queue_purge(&tid_tx->pending);
- kfree(tid_tx);
- }
- }
-
__sta_info_free(local, sta);
return 0;
@@ -790,15 +757,20 @@ static void sta_info_cleanup(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
struct sta_info *sta;
+ bool timer_needed = false;
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list)
- sta_info_cleanup_expire_buffered(local, sta);
+ if (sta_info_cleanup_expire_buffered(local, sta))
+ timer_needed = true;
rcu_read_unlock();
if (local->quiescing)
return;
+ if (!timer_needed)
+ return;
+
local->sta_cleanup.expires =
round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
add_timer(&local->sta_cleanup);
@@ -883,8 +855,12 @@ struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
struct sta_info *sta, *nxt;
/* Just return a random station ... first in list ... */
- for_each_sta_info(hw_to_local(hw), addr, sta, nxt)
+ for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
+ if (!sta->uploaded)
+ return NULL;
return &sta->sta;
+ }
+
return NULL;
}
EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
@@ -892,14 +868,19 @@ EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
const u8 *addr)
{
- struct ieee80211_sub_if_data *sdata;
+ struct sta_info *sta;
if (!vif)
return NULL;
- sdata = vif_to_sdata(vif);
+ sta = sta_info_get_bss(vif_to_sdata(vif), addr);
+ if (!sta)
+ return NULL;
+
+ if (!sta->uploaded)
+ return NULL;
- return ieee80211_find_sta_by_hw(&sdata->local->hw, addr);
+ return &sta->sta;
}
EXPORT_SYMBOL(ieee80211_find_sta);
@@ -992,6 +973,8 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ trace_api_sta_block_awake(sta->local, pubsta, block);
+
if (block)
set_sta_flags(sta, WLAN_STA_PS_DRIVER);
else
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 822d84522937..48a5e80957f0 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -35,8 +35,8 @@
* IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
* frame to this station is transmitted.
* @WLAN_STA_MFP: Management frame protection is used with this STA.
- * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle.
- * Used to deny ADDBA requests (both TX and RX).
+ * @WLAN_STA_BLOCK_BA: Used to deny ADDBA requests (both TX and RX)
+ * during suspend/resume and station removal.
* @WLAN_STA_PS_DRIVER: driver requires keeping this station in
* power-save mode logically to flush frames that might still
* be in the queues
@@ -57,7 +57,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_WDS = 1<<7,
WLAN_STA_CLEAR_PS_FILT = 1<<9,
WLAN_STA_MFP = 1<<10,
- WLAN_STA_SUSPEND = 1<<11,
+ WLAN_STA_BLOCK_BA = 1<<11,
WLAN_STA_PS_DRIVER = 1<<12,
WLAN_STA_PSPOLL = 1<<13,
WLAN_STA_DISASSOC = 1<<14,
@@ -106,7 +106,6 @@ struct tid_ampdu_tx {
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
* @dialog_token: dialog token for aggregation session
- * @shutdown: this session is being shut down due to STA removal
*/
struct tid_ampdu_rx {
struct sk_buff **reorder_buf;
@@ -118,7 +117,6 @@ struct tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u8 dialog_token;
- bool shutdown;
};
/**
@@ -156,7 +154,7 @@ enum plink_state {
*/
struct sta_ampdu_mlme {
/* rx */
- u8 tid_state_rx[STA_TID_NUM];
+ bool tid_active_rx[STA_TID_NUM];
struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
/* tx */
u8 tid_state_tx[STA_TID_NUM];
@@ -200,7 +198,6 @@ struct sta_ampdu_mlme {
* @rx_fragments: number of received MPDUs
* @rx_dropped: number of dropped MPDUs from this STA
* @last_signal: signal of last received frame from this STA
- * @last_noise: noise of last received frame from this STA
* @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
* @tx_filtered_count: number of frames the hardware filtered for this STA
* @tx_retry_failed: number of frames that failed retry
@@ -267,7 +264,6 @@ struct sta_info {
unsigned long rx_fragments;
unsigned long rx_dropped;
int last_signal;
- int last_noise;
__le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
/* Updated from TX status path only, no locking requirements */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 56d5b9a6ec5b..94613af009f3 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -171,13 +171,16 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct net_device *prev_dev = NULL;
struct sta_info *sta, *tmp;
int retry_count = -1, i;
- bool injected;
+ int rates_idx = -1;
+ bool send_to_cooked;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
/* the HW cannot have attempted that rate */
if (i >= hw->max_rates) {
info->status.rates[i].idx = -1;
info->status.rates[i].count = 0;
+ } else if (info->status.rates[i].idx >= 0) {
+ rates_idx = i;
}
retry_count += info->status.rates[i].count;
@@ -206,6 +209,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
+ if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
+ (rates_idx != -1))
+ sta->last_tx_rate = info->status.rates[rates_idx];
+
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
(ieee80211_is_data_qos(fc))) {
u16 tid, ssn;
@@ -296,11 +303,15 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
/* this was a transmitted frame, but now we want to reuse it */
skb_orphan(skb);
+ /* Need to make a copy before skb->cb gets cleared */
+ send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
+ (type != IEEE80211_FTYPE_DATA);
+
/*
* This is a bit racy but we can avoid a lot of work
* with this test...
*/
- if (!local->monitors && !local->cooked_mntrs) {
+ if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
dev_kfree_skb(skb);
return;
}
@@ -345,9 +356,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
/* for now report the total retry_count */
rthdr->data_retries = retry_count;
- /* Need to make a copy before skb->cb gets cleared */
- injected = !!(info->flags & IEEE80211_TX_CTL_INJECTED);
-
/* XXX: is this sufficient for BPF? */
skb_set_mac_header(skb, 0);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -362,8 +370,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
continue;
if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
- !injected &&
- (type == IEEE80211_FTYPE_DATA))
+ !send_to_cooked)
continue;
if (prev_dev) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index cfc473e1b050..680bcb7093db 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -429,6 +429,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
+ struct ieee80211_local *local = tx->local;
u32 staflags;
if (unlikely(!sta ||
@@ -476,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
skb_queue_tail(&sta->ps_tx_buf, tx->skb);
+
+ if (!timer_pending(&local->sta_cleanup))
+ mod_timer(&local->sta_cleanup,
+ round_jiffies(jiffies +
+ STA_INFO_CLEANUP_INTERVAL));
+
return TX_QUEUED;
}
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -513,6 +520,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
tx->key = key;
else if (ieee80211_is_mgmt(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_robust_mgmt_frame(hdr) &&
(key = rcu_dereference(tx->sdata->default_mgmt_key)))
tx->key = key;
else if ((key = rcu_dereference(tx->sdata->default_key)))
@@ -584,7 +593,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
struct ieee80211_hdr *hdr = (void *)tx->skb->data;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
- int i, len;
+ int i;
+ u32 len;
bool inval = false, rts = false, short_preamble = false;
struct ieee80211_tx_rate_control txrc;
u32 sta_flags;
@@ -593,7 +603,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
sband = tx->local->hw.wiphy->bands[tx->channel->band];
- len = min_t(int, tx->skb->len + FCS_LEN,
+ len = min_t(u32, tx->skb->len + FCS_LEN,
tx->local->hw.wiphy->frag_threshold);
/* set up the tx rate control struct we give the RC algo */
@@ -1142,13 +1152,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
- unsigned long flags;
struct tid_ampdu_tx *tid_tx;
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- spin_lock_irqsave(&tx->sta->lock, flags);
+ spin_lock(&tx->sta->lock);
/*
* XXX: This spinlock could be fairly expensive, but see the
* comment in agg-tx.c:ieee80211_agg_tx_operational().
@@ -1173,7 +1182,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
__skb_queue_tail(&tid_tx->pending, skb);
}
- spin_unlock_irqrestore(&tx->sta->lock, flags);
+ spin_unlock(&tx->sta->lock);
if (unlikely(queued))
return TX_QUEUED;
@@ -2011,14 +2020,12 @@ void ieee80211_tx_pending(unsigned long data)
while (!skb_queue_empty(&local->pending[i])) {
struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sub_if_data *sdata;
if (WARN_ON(!info->control.vif)) {
kfree_skb(skb);
continue;
}
- sdata = vif_to_sdata(info->control.vif);
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
@@ -2244,8 +2251,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
info->control.vif = vif;
- info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
+ IEEE80211_TX_CTL_ASSIGN_SEQ |
+ IEEE80211_TX_CTL_FIRST_FRAGMENT;
out:
rcu_read_unlock();
return skb;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 53af57047435..5b79d552780a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -270,6 +270,8 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
+ trace_wake_queue(local, queue, reason);
+
if (WARN_ON(queue >= hw->queues))
return;
@@ -312,6 +314,8 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
+ trace_stop_queue(local, queue, reason);
+
if (WARN_ON(queue >= hw->queues))
return;
@@ -796,6 +800,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
drv_conf_tx(local, queue, &qparam);
}
+
+ /* after reinitialize QoS TX queues setting to default,
+ * disable QoS at all */
+ local->hw.conf.flags &= ~IEEE80211_CONF_QOS;
+ drv_config(local, IEEE80211_CONF_CHANGE_QOS);
}
void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -1135,7 +1144,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- clear_sta_flags(sta, WLAN_STA_SUSPEND);
+ clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
}
}
@@ -1151,18 +1160,33 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* Finally also reconfigure all the BSS information */
list_for_each_entry(sdata, &local->interfaces, list) {
- u32 changed = ~0;
+ u32 changed;
+
if (!ieee80211_sdata_running(sdata))
continue;
+
+ /* common change flags for all interface types */
+ changed = BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_ERP_PREAMBLE |
+ BSS_CHANGED_ERP_SLOT |
+ BSS_CHANGED_HT |
+ BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_BEACON_INT |
+ BSS_CHANGED_BSSID |
+ BSS_CHANGED_CQM;
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- /* disable beacon change bits */
- changed &= ~(BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED);
- /* fall through */
+ changed |= BSS_CHANGED_ASSOC;
+ ieee80211_bss_info_change_notify(sdata, changed);
+ break;
case NL80211_IFTYPE_ADHOC:
+ changed |= BSS_CHANGED_IBSS;
+ /* fall through */
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ changed |= BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED;
ieee80211_bss_info_change_notify(sdata, changed);
break;
case NL80211_IFTYPE_WDS:
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 15e1ba931b87..3dd07600199d 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -213,15 +213,25 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
sband = local->hw.wiphy->bands[wk->chan->band];
- /*
- * Get all rates supported by the device and the AP as
- * some APs don't like getting a superset of their rates
- * in the association request (e.g. D-Link DAP 1353 in
- * b-only mode)...
- */
- rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
- wk->assoc.supp_rates_len,
- sband, &rates);
+ if (wk->assoc.supp_rates_len) {
+ /*
+ * Get all rates supported by the device and the AP as
+ * some APs don't like getting a superset of their rates
+ * in the association request (e.g. D-Link DAP 1353 in
+ * b-only mode)...
+ */
+ rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
+ wk->assoc.supp_rates_len,
+ sband, &rates);
+ } else {
+ /*
+ * In case AP not provide any supported rates information
+ * before association, we send information element(s) with
+ * all rates that we support.
+ */
+ rates = ~0;
+ rates_len = sband->n_bitrates;
+ }
skb = alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + /* bit too much but doesn't matter */
@@ -920,11 +930,16 @@ static void ieee80211_work_work(struct work_struct *work)
run_again(local, jiffies + HZ/2);
}
- if (list_empty(&local->work_list) && local->scan_req)
+ mutex_lock(&local->scan_mtx);
+
+ if (list_empty(&local->work_list) && local->scan_req &&
+ !local->scanning)
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
round_jiffies_relative(0));
+ mutex_unlock(&local->scan_mtx);
+
mutex_unlock(&local->work_mtx);
ieee80211_recalc_idle(local);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 18d77b5c351a..673a6c8f0e95 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -314,8 +314,39 @@ config NETFILTER_XTABLES
if NETFILTER_XTABLES
+comment "Xtables combined modules"
+
+config NETFILTER_XT_MARK
+ tristate 'nfmark target and match support'
+ default m if NETFILTER_ADVANCED=n
+ ---help---
+ This option adds the "MARK" target and "mark" match.
+
+ Netfilter mark matching allows you to match packets based on the
+ "nfmark" value in the packet.
+ The target allows you to create rules in the "mangle" table which alter
+ the netfilter mark (nfmark) field associated with the packet.
+
+ Prior to routing, the nfmark can influence the routing method (see
+ "Use netfilter MARK value as routing key") and can also be used by
+ other subsystems to change their behavior.
+
+config NETFILTER_XT_CONNMARK
+ tristate 'ctmark target and match support'
+ depends on NF_CONNTRACK
+ depends on NETFILTER_ADVANCED
+ select NF_CONNTRACK_MARK
+ ---help---
+ This option adds the "CONNMARK" target and "connmark" match.
+
+ Netfilter allows you to store a mark value per connection (a.k.a.
+ ctmark), similarly to the packet mark (nfmark). Using this
+ target and match, you can set and match on this mark.
+
# alphabetically ordered list of targets
+comment "Xtables targets"
+
config NETFILTER_XT_TARGET_CLASSIFY
tristate '"CLASSIFY" target support'
depends on NETFILTER_ADVANCED
@@ -332,15 +363,11 @@ config NETFILTER_XT_TARGET_CONNMARK
tristate '"CONNMARK" target support'
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
- select NF_CONNTRACK_MARK
- help
- This option adds a `CONNMARK' target, which allows one to manipulate
- the connection mark value. Similar to the MARK target, but
- affects the connection mark value rather than the packet mark value.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/kbuild/modules.txt>. The module will be called
- ipt_CONNMARK. If unsure, say `N'.
+ select NETFILTER_XT_CONNMARK
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_CONNMARK (combined connmark/CONNMARK module).
config NETFILTER_XT_TARGET_CONNSECMARK
tristate '"CONNSECMARK" target support'
@@ -423,16 +450,12 @@ config NETFILTER_XT_TARGET_LED
config NETFILTER_XT_TARGET_MARK
tristate '"MARK" target support'
- default m if NETFILTER_ADVANCED=n
- help
- This option adds a `MARK' target, which allows you to create rules
- in the `mangle' table which alter the netfilter mark (nfmark) field
- associated with the packet prior to routing. This can change
- the routing method (see `Use netfilter MARK value as routing
- key') and can also be used by other subsystems to change their
- behavior.
-
- To compile it as a module, choose M here. If unsure, say N.
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_XT_MARK
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
config NETFILTER_XT_TARGET_NFLOG
tristate '"NFLOG" target support'
@@ -479,6 +502,13 @@ config NETFILTER_XT_TARGET_RATEEST
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_TARGET_TEE
+ tristate '"TEE" - packet cloning to alternate destiantion'
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds a "TEE" target with which a packet can be cloned and
+ this clone be rerouted to another nexthop.
+
config NETFILTER_XT_TARGET_TPROXY
tristate '"TPROXY" target support (EXPERIMENTAL)'
depends on EXPERIMENTAL
@@ -552,6 +582,10 @@ config NETFILTER_XT_TARGET_TCPOPTSTRIP
This option adds a "TCPOPTSTRIP" target, which allows you to strip
TCP options from TCP packets.
+# alphabetically ordered list of matches
+
+comment "Xtables matches"
+
config NETFILTER_XT_MATCH_CLUSTER
tristate '"cluster" match support'
depends on NF_CONNTRACK
@@ -602,14 +636,11 @@ config NETFILTER_XT_MATCH_CONNMARK
tristate '"connmark" connection mark match support'
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
- select NF_CONNTRACK_MARK
- help
- This option adds a `connmark' match, which allows you to match the
- connection mark value previously set for the session by `CONNMARK'.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/kbuild/modules.txt>. The module will be called
- ipt_connmark. If unsure, say `N'.
+ select NETFILTER_XT_CONNMARK
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_CONNMARK (combined connmark/CONNMARK module).
config NETFILTER_XT_MATCH_CONNTRACK
tristate '"conntrack" connection tracking match support'
@@ -733,13 +764,12 @@ config NETFILTER_XT_MATCH_MAC
config NETFILTER_XT_MATCH_MARK
tristate '"mark" match support'
- default m if NETFILTER_ADVANCED=n
- help
- Netfilter mark matching allows you to match packets based on the
- `nfmark' value in the packet. This can be set by the MARK target
- (see below).
-
- To compile it as a module, choose M here. If unsure, say N.
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_XT_MARK
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
config NETFILTER_XT_MATCH_MULTIPORT
tristate '"multiport" Multiple port match support'
@@ -751,6 +781,19 @@ config NETFILTER_XT_MATCH_MULTIPORT
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_OSF
+ tristate '"osf" Passive OS fingerprint match'
+ depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
+ help
+ This option selects the Passive OS Fingerprinting match module
+ that allows to passively match the remote operating system by
+ analyzing incoming TCP SYN packets.
+
+ Rules and loading software can be downloaded from
+ http://www.ioremap.net/projects/osf
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_OWNER
tristate '"owner" match support'
depends on NETFILTER_ADVANCED
@@ -836,13 +879,6 @@ config NETFILTER_XT_MATCH_RECENT
Short options are available by using 'iptables -m recent -h'
Official Website: <http://snowman.net/projects/ipt_recent/>
-config NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- bool 'Enable obsolete /proc/net/ipt_recent'
- depends on NETFILTER_XT_MATCH_RECENT && PROC_FS
- ---help---
- This option enables the old /proc/net/ipt_recent interface,
- which has been obsoleted by /proc/net/xt_recent.
-
config NETFILTER_XT_MATCH_SCTP
tristate '"sctp" protocol match support (EXPERIMENTAL)'
depends on EXPERIMENTAL
@@ -942,19 +978,6 @@ config NETFILTER_XT_MATCH_U32
Details and examples are in the kernel module source.
-config NETFILTER_XT_MATCH_OSF
- tristate '"osf" Passive OS fingerprint match'
- depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
- help
- This option selects the Passive OS Fingerprinting match module
- that allows to passively match the remote operating system by
- analyzing incoming TCP SYN packets.
-
- Rules and loading software can be downloaded from
- http://www.ioremap.net/projects/osf
-
- To compile it as a module, choose M here. If unsure, say N.
-
endif # NETFILTER_XTABLES
endmenu
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f873644f02f6..14e3a8fd8180 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -40,15 +40,17 @@ obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
+# combos
+obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
+obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
+
# targets
obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
# matches
@@ -64,7 +67,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
-obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
@@ -76,7 +78,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
-obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o
obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 2c7f185dfae4..2ae747a376a5 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -209,8 +209,14 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
*/
from.ip = n_cp->vaddr.ip;
port = n_cp->vport;
- sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip),
- (ntohs(port)>>8)&255, ntohs(port)&255);
+ snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u",
+ ((unsigned char *)&from.ip)[0],
+ ((unsigned char *)&from.ip)[1],
+ ((unsigned char *)&from.ip)[2],
+ ((unsigned char *)&from.ip)[3],
+ ntohs(port) >> 8,
+ ntohs(port) & 0xFF);
+
buf_len = strlen(buf);
/*
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 7fc49f4cf5ad..2d3d5e4b35f8 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -167,26 +167,24 @@ ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp,
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
- sprintf(buf, "%s TRUNCATED", pp->name);
+ sprintf(buf, "TRUNCATED");
else if (ih->frag_off & htons(IP_OFFSET))
- sprintf(buf, "%s %pI4->%pI4 frag",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr);
else {
__be16 _ports[2], *pptr
;
pptr = skb_header_pointer(skb, offset + ih->ihl*4,
sizeof(_ports), _ports);
if (pptr == NULL)
- sprintf(buf, "%s TRUNCATED %pI4->%pI4",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "TRUNCATED %pI4->%pI4",
+ &ih->saddr, &ih->daddr);
else
- sprintf(buf, "%s %pI4:%u->%pI4:%u",
- pp->name,
+ sprintf(buf, "%pI4:%u->%pI4:%u",
&ih->saddr, ntohs(pptr[0]),
&ih->daddr, ntohs(pptr[1]));
}
- pr_debug("%s: %s\n", msg, buf);
+ pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -201,26 +199,24 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
- sprintf(buf, "%s TRUNCATED", pp->name);
+ sprintf(buf, "TRUNCATED");
else if (ih->nexthdr == IPPROTO_FRAGMENT)
- sprintf(buf, "%s %pI6->%pI6 frag",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "%pI6->%pI6 frag", &ih->saddr, &ih->daddr);
else {
__be16 _ports[2], *pptr;
pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
sizeof(_ports), _ports);
if (pptr == NULL)
- sprintf(buf, "%s TRUNCATED %pI6->%pI6",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "TRUNCATED %pI6->%pI6",
+ &ih->saddr, &ih->daddr);
else
- sprintf(buf, "%s %pI6:%u->%pI6:%u",
- pp->name,
+ sprintf(buf, "%pI6:%u->%pI6:%u",
&ih->saddr, ntohs(pptr[0]),
&ih->daddr, ntohs(pptr[1]));
}
- pr_debug("%s: %s\n", msg, buf);
+ pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#endif
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index c30b43c36cd7..1892dfc12fdd 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -136,12 +136,11 @@ ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb,
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
- sprintf(buf, "%s TRUNCATED", pp->name);
+ sprintf(buf, "TRUNCATED");
else
- sprintf(buf, "%s %pI4->%pI4",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "%pI4->%pI4", &ih->saddr, &ih->daddr);
- pr_debug("%s: %s\n", msg, buf);
+ pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -154,12 +153,11 @@ ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb,
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
- sprintf(buf, "%s TRUNCATED", pp->name);
+ sprintf(buf, "TRUNCATED");
else
- sprintf(buf, "%s %pI6->%pI6",
- pp->name, &ih->saddr, &ih->daddr);
+ sprintf(buf, "%pI6->%pI6", &ih->saddr, &ih->daddr);
- pr_debug("%s: %s\n", msg, buf);
+ pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
#endif
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8fb0ae616761..7ba06939829f 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -802,7 +802,7 @@ static int sync_thread_backup(void *data)
ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
while (!kthread_should_stop()) {
- wait_event_interruptible(*tinfo->sock->sk->sk_sleep,
+ wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
!skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
|| kthread_should_stop());
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index e450cd6f4eb5..93c15a107b2c 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -270,7 +270,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -334,7 +334,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET6, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -410,7 +410,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -486,7 +486,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET6, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -785,7 +785,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -838,7 +838,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET6, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
LeaveFunction(10);
return NF_STOLEN;
@@ -912,7 +912,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV4, skb, rt);
rc = NF_STOLEN;
goto out;
@@ -987,7 +987,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
- IP_VS_XMIT(PF_INET6, skb, rt);
+ IP_VS_XMIT(NFPROTO_IPV6, skb, rt);
rc = NF_STOLEN;
goto out;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0c9bbe93cc16..3907efb97a7c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -319,8 +319,10 @@ begin:
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
- if (get_nulls_value(n) != hash)
+ if (get_nulls_value(n) != hash) {
+ NF_CT_STAT_INC(net, search_restart);
goto begin;
+ }
local_bh_enable();
return NULL;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index f516961a83b4..cdcc7649476b 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -85,7 +85,8 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
@@ -105,7 +106,8 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
rcu_assign_pointer(nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
@@ -118,7 +120,8 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference_protected(nf_expect_event_cb,
+ lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
@@ -138,7 +141,8 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference_protected(nf_expect_event_cb,
+ lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
rcu_assign_pointer(nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index a487c8038044..48bf15073a85 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -194,8 +194,7 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
return 0;
}
- if (net_ratelimit())
- printk("nf_ct_h323: incomplete TPKT (fragmented?)\n");
+ pr_debug("nf_ct_h323: incomplete TPKT (fragmented?)\n");
goto clear_out;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index afc52f2ee4ac..4e55403bf263 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -427,6 +427,17 @@ ctnetlink_proto_size(const struct nf_conn *ct)
}
static inline size_t
+ctnetlink_counters_size(const struct nf_conn *ct)
+{
+ if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
+ return 0;
+ return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
+ + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
+ + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
+ ;
+}
+
+static inline size_t
ctnetlink_nlmsg_size(const struct nf_conn *ct)
{
return NLMSG_ALIGN(sizeof(struct nfgenmsg))
@@ -436,11 +447,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
+ 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
-#ifdef CONFIG_NF_CT_ACCT
- + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
- + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
- + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
-#endif
+ + ctnetlink_counters_size(ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ nla_total_size(0) /* CTA_PROTOINFO */
+ nla_total_size(0) /* CTA_HELP */
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index a44fa75b5178..5886ba1d52a0 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -14,12 +14,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
-#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -119,9 +117,13 @@ void nf_ct_l3proto_module_put(unsigned short l3proto)
{
struct nf_conntrack_l3proto *p;
- /* rcu_read_lock not necessary since the caller holds a reference */
+ /* rcu_read_lock not necessary since the caller holds a reference, but
+ * taken anyways to avoid lockdep warnings in __nf_ct_l3proto_find()
+ */
+ rcu_read_lock();
p = __nf_ct_l3proto_find(l3proto);
module_put(p->me);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index faa8eb3722b9..ea4a8d384234 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -252,12 +252,12 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
+ seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
- "%08x %08x %08x %08x %08x %08x %08x %08x \n",
+ "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
st->searched,
st->found,
@@ -274,7 +274,8 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
st->expect_new,
st->expect_create,
- st->expect_delete
+ st->expect_delete,
+ st->search_restart
);
return 0;
}
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 015725a5cd50..7df37fd786bc 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -52,7 +52,8 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
} else {
/* register at end of list to honor first register win */
list_add_tail(&logger->list[pf], &nf_loggers_l[pf]);
- llog = rcu_dereference(nf_loggers[pf]);
+ llog = rcu_dereference_protected(nf_loggers[pf],
+ lockdep_is_held(&nf_log_mutex));
if (llog == NULL)
rcu_assign_pointer(nf_loggers[pf], logger);
}
@@ -70,7 +71,8 @@ void nf_log_unregister(struct nf_logger *logger)
mutex_lock(&nf_log_mutex);
for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) {
- c_logger = rcu_dereference(nf_loggers[i]);
+ c_logger = rcu_dereference_protected(nf_loggers[i],
+ lockdep_is_held(&nf_log_mutex));
if (c_logger == logger)
rcu_assign_pointer(nf_loggers[i], NULL);
list_del(&logger->list[i]);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 6afa3d52ea5f..39b0e3100575 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -18,12 +18,9 @@
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
-#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <asm/uaccess.h>
#include <asm/system.h>
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index e70a6ef1f4f2..12e1ab37fcd8 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -246,8 +246,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
break;
case NFQNL_COPY_PACKET:
- if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
- entskb->ip_summed == CHECKSUM_COMPLETE) &&
+ if (entskb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(entskb)) {
spin_unlock_bh(&queue->lock);
return NULL;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 665f5beef6ad..445de702b8b7 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -12,7 +12,7 @@
* published by the Free Software Foundation.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/socket.h>
#include <linux/net.h>
@@ -55,12 +55,6 @@ struct xt_af {
static struct xt_af *xt;
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_UNSPEC] = "x",
[NFPROTO_IPV4] = "ip",
@@ -69,6 +63,9 @@ static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_IPV6] = "ip6",
};
+/* Allow this many total (re)entries. */
+static const unsigned int xt_jumpstack_multiplier = 2;
+
/* Registration hooks for targets. */
int
xt_register_target(struct xt_target *target)
@@ -221,6 +218,17 @@ struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
}
EXPORT_SYMBOL(xt_find_match);
+struct xt_match *
+xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
+{
+ struct xt_match *match;
+
+ match = try_then_request_module(xt_find_match(nfproto, name, revision),
+ "%st_%s", xt_prefix[nfproto], name);
+ return (match != NULL) ? match : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(xt_request_find_match);
+
/* Find target, grabs ref. Returns ERR_PTR() on error. */
struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
{
@@ -257,9 +265,7 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
target = try_then_request_module(xt_find_target(af, name, revision),
"%st_%s", xt_prefix[af], name);
- if (IS_ERR(target) || !target)
- return NULL;
- return target;
+ return (target != NULL) ? target : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(xt_request_find_target);
@@ -361,6 +367,8 @@ static char *textify_hooks(char *buf, size_t size, unsigned int mask)
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
+ int ret;
+
if (XT_ALIGN(par->match->matchsize) != size &&
par->match->matchsize != -1) {
/*
@@ -397,8 +405,14 @@ int xt_check_match(struct xt_mtchk_param *par,
par->match->proto);
return -EINVAL;
}
- if (par->match->checkentry != NULL && !par->match->checkentry(par))
- return -EINVAL;
+ if (par->match->checkentry != NULL) {
+ ret = par->match->checkentry(par);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ /* Flag up potential errors. */
+ return -EIO;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_match);
@@ -518,6 +532,8 @@ EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
+ int ret;
+
if (XT_ALIGN(par->target->targetsize) != size) {
pr_err("%s_tables: %s.%u target: invalid size "
"%u (kernel) != (user) %u\n",
@@ -549,8 +565,14 @@ int xt_check_target(struct xt_tgchk_param *par,
par->target->proto);
return -EINVAL;
}
- if (par->target->checkentry != NULL && !par->target->checkentry(par))
- return -EINVAL;
+ if (par->target->checkentry != NULL) {
+ ret = par->target->checkentry(par);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ /* Flag up potential errors. */
+ return -EIO;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_target);
@@ -662,6 +684,26 @@ void xt_free_table_info(struct xt_table_info *info)
else
vfree(info->entries[cpu]);
}
+
+ if (info->jumpstack != NULL) {
+ if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
+ for_each_possible_cpu(cpu)
+ vfree(info->jumpstack[cpu]);
+ } else {
+ for_each_possible_cpu(cpu)
+ kfree(info->jumpstack[cpu]);
+ }
+ }
+
+ if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
+ vfree(info->jumpstack);
+ else
+ kfree(info->jumpstack);
+ if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
+ vfree(info->stackptr);
+ else
+ kfree(info->stackptr);
+
kfree(info);
}
EXPORT_SYMBOL(xt_free_table_info);
@@ -706,6 +748,49 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock);
DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
+static int xt_jumpstack_alloc(struct xt_table_info *i)
+{
+ unsigned int size;
+ int cpu;
+
+ size = sizeof(unsigned int) * nr_cpu_ids;
+ if (size > PAGE_SIZE)
+ i->stackptr = vmalloc(size);
+ else
+ i->stackptr = kmalloc(size, GFP_KERNEL);
+ if (i->stackptr == NULL)
+ return -ENOMEM;
+ memset(i->stackptr, 0, size);
+
+ size = sizeof(void **) * nr_cpu_ids;
+ if (size > PAGE_SIZE)
+ i->jumpstack = vmalloc(size);
+ else
+ i->jumpstack = kmalloc(size, GFP_KERNEL);
+ if (i->jumpstack == NULL)
+ return -ENOMEM;
+ memset(i->jumpstack, 0, size);
+
+ i->stacksize *= xt_jumpstack_multiplier;
+ size = sizeof(void *) * i->stacksize;
+ for_each_possible_cpu(cpu) {
+ if (size > PAGE_SIZE)
+ i->jumpstack[cpu] = vmalloc_node(size,
+ cpu_to_node(cpu));
+ else
+ i->jumpstack[cpu] = kmalloc_node(size,
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (i->jumpstack[cpu] == NULL)
+ /*
+ * Freeing will be done later on by the callers. The
+ * chain is: xt_replace_table -> __do_replace ->
+ * do_replace -> xt_free_table_info.
+ */
+ return -ENOMEM;
+ }
+
+ return 0;
+}
struct xt_table_info *
xt_replace_table(struct xt_table *table,
@@ -714,6 +799,13 @@ xt_replace_table(struct xt_table *table,
int *error)
{
struct xt_table_info *private;
+ int ret;
+
+ ret = xt_jumpstack_alloc(newinfo);
+ if (ret < 0) {
+ *error = ret;
+ return NULL;
+ }
/* Do the substitution. */
local_bh_disable();
@@ -721,7 +813,7 @@ xt_replace_table(struct xt_table *table,
/* Check inside lock: is the old number correct? */
if (num_counters != private->number) {
- duprintf("num_counters != table->private->number (%u/%u)\n",
+ pr_debug("num_counters != table->private->number (%u/%u)\n",
num_counters, private->number);
local_bh_enable();
*error = -EAGAIN;
@@ -752,6 +844,10 @@ struct xt_table *xt_register_table(struct net *net,
struct xt_table_info *private;
struct xt_table *t, *table;
+ ret = xt_jumpstack_alloc(newinfo);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {
@@ -778,7 +874,7 @@ struct xt_table *xt_register_table(struct net *net,
goto unlock;
private = table->private;
- duprintf("table->private->number = %u\n", private->number);
+ pr_debug("table->private->number = %u\n", private->number);
/* save number of initial entries */
private->initial_entries = private->number;
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
deleted file mode 100644
index 593457068ae1..000000000000
--- a/net/netfilter/xt_CONNMARK.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * xt_CONNMARK - Netfilter module to modify the connection mark values
- *
- * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- * Copyright © CC Computer Consultants GmbH, 2007 - 2008
- * Jan Engelhardt <jengelh@computergmbh.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>");
-MODULE_DESCRIPTION("Xtables: connection mark modification");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("ipt_CONNMARK");
-MODULE_ALIAS("ip6t_CONNMARK");
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter/xt_CONNMARK.h>
-#include <net/netfilter/nf_conntrack_ecache.h>
-
-static unsigned int
-connmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_connmark_tginfo1 *info = par->targinfo;
- enum ip_conntrack_info ctinfo;
- struct nf_conn *ct;
- u_int32_t newmark;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (ct == NULL)
- return XT_CONTINUE;
-
- switch (info->mode) {
- case XT_CONNMARK_SET:
- newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
- if (ct->mark != newmark) {
- ct->mark = newmark;
- nf_conntrack_event_cache(IPCT_MARK, ct);
- }
- break;
- case XT_CONNMARK_SAVE:
- newmark = (ct->mark & ~info->ctmask) ^
- (skb->mark & info->nfmask);
- if (ct->mark != newmark) {
- ct->mark = newmark;
- nf_conntrack_event_cache(IPCT_MARK, ct);
- }
- break;
- case XT_CONNMARK_RESTORE:
- newmark = (skb->mark & ~info->nfmask) ^
- (ct->mark & info->ctmask);
- skb->mark = newmark;
- break;
- }
-
- return XT_CONTINUE;
-}
-
-static bool connmark_tg_check(const struct xt_tgchk_param *par)
-{
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "cannot load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
-}
-
-static void connmark_tg_destroy(const struct xt_tgdtor_param *par)
-{
- nf_ct_l3proto_module_put(par->family);
-}
-
-static struct xt_target connmark_tg_reg __read_mostly = {
- .name = "CONNMARK",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = connmark_tg_check,
- .target = connmark_tg,
- .targetsize = sizeof(struct xt_connmark_tginfo1),
- .destroy = connmark_tg_destroy,
- .me = THIS_MODULE,
-};
-
-static int __init connmark_tg_init(void)
-{
- return xt_register_target(&connmark_tg_reg);
-}
-
-static void __exit connmark_tg_exit(void)
-{
- xt_unregister_target(&connmark_tg_reg);
-}
-
-module_init(connmark_tg_init);
-module_exit(connmark_tg_exit);
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index b54c3756fdc3..e953e302141d 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -15,6 +15,7 @@
* published by the Free Software Foundation.
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
@@ -22,8 +23,6 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_ecache.h>
-#define PFX "CONNSECMARK: "
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris@redhat.com>");
MODULE_DESCRIPTION("Xtables: target for copying between connection and security mark");
@@ -85,15 +84,16 @@ connsecmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool connsecmark_tg_check(const struct xt_tgchk_param *par)
+static int connsecmark_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_connsecmark_target_info *info = par->targinfo;
+ int ret;
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "security") != 0) {
- printk(KERN_INFO PFX "target only valid in the \'mangle\' "
- "or \'security\' tables, not \'%s\'.\n", par->table);
- return false;
+ pr_info("target only valid in the \'mangle\' "
+ "or \'security\' tables, not \'%s\'.\n", par->table);
+ return -EINVAL;
}
switch (info->mode) {
@@ -102,16 +102,15 @@ static bool connsecmark_tg_check(const struct xt_tgchk_param *par)
break;
default:
- printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode);
- return false;
+ pr_info("invalid mode: %hu\n", info->mode);
+ return -EINVAL;
}
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index ee18b231b950..c8f547829bad 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -38,13 +38,13 @@ static unsigned int xt_ct_target(struct sk_buff *skb,
static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
{
- if (par->family == AF_INET) {
+ if (par->family == NFPROTO_IPV4) {
const struct ipt_entry *e = par->entryinfo;
if (e->ip.invflags & IPT_INV_PROTO)
return 0;
return e->ip.proto;
- } else if (par->family == AF_INET6) {
+ } else if (par->family == NFPROTO_IPV6) {
const struct ip6t_entry *e = par->entryinfo;
if (e->ipv6.invflags & IP6T_INV_PROTO)
@@ -54,16 +54,17 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
return 0;
}
-static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
+static int xt_ct_tg_check(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct nf_conntrack_tuple t;
struct nf_conn_help *help;
struct nf_conn *ct;
+ int ret = 0;
u8 proto;
if (info->flags & ~XT_CT_NOTRACK)
- return false;
+ return -EINVAL;
if (info->flags & XT_CT_NOTRACK) {
ct = &nf_conntrack_untracked;
@@ -76,28 +77,34 @@ static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
goto err1;
#endif
- if (nf_ct_l3proto_try_module_get(par->family) < 0)
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
goto err1;
memset(&t, 0, sizeof(t));
ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
+ ret = PTR_ERR(ct);
if (IS_ERR(ct))
goto err2;
+ ret = 0;
if ((info->ct_events || info->exp_events) &&
!nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
GFP_KERNEL))
goto err3;
if (info->helper[0]) {
+ ret = -ENOENT;
proto = xt_ct_find_proto(par);
if (!proto)
goto err3;
+ ret = -ENOMEM;
help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
if (help == NULL)
goto err3;
+ ret = -ENOENT;
help->helper = nf_conntrack_helper_try_module_get(info->helper,
par->family,
proto);
@@ -109,14 +116,14 @@ static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
out:
info->ct = ct;
- return true;
+ return 0;
err3:
nf_conntrack_free(ct);
err2:
nf_ct_l3proto_module_put(par->family);
err1:
- return false;
+ return ret;
}
static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
@@ -138,7 +145,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
static struct xt_target xt_ct_tg __read_mostly = {
.name = "CT",
.family = NFPROTO_UNSPEC,
- .targetsize = XT_ALIGN(sizeof(struct xt_ct_target_info)),
+ .targetsize = sizeof(struct xt_ct_target_info),
.checkentry = xt_ct_tg_check,
.destroy = xt_ct_tg_destroy,
.target = xt_ct_target,
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index 74ce89260056..969634f293e5 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -9,7 +9,7 @@
*
* See RFC2474 for a description of the DSCP field within the IP Header.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -60,15 +60,15 @@ dscp_tg6(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool dscp_tg_check(const struct xt_tgchk_param *par)
+static int dscp_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_DSCP_info *info = par->targinfo;
if (info->dscp > XT_DSCP_MAX) {
- printk(KERN_WARNING "DSCP: dscp %x out of range\n", info->dscp);
- return false;
+ pr_info("dscp %x out of range\n", info->dscp);
+ return -EDOM;
}
- return true;
+ return 0;
}
static unsigned int
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c
index 10e789e2d12a..77b99f732711 100644
--- a/net/netfilter/xt_HL.c
+++ b/net/netfilter/xt_HL.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -101,35 +101,33 @@ hl_tg6(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool ttl_tg_check(const struct xt_tgchk_param *par)
+static int ttl_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_TTL_info *info = par->targinfo;
if (info->mode > IPT_TTL_MAXMODE) {
- printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
- info->mode);
- return false;
+ pr_info("TTL: invalid or unknown mode %u\n", info->mode);
+ return -EINVAL;
}
if (info->mode != IPT_TTL_SET && info->ttl == 0)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
-static bool hl_tg6_check(const struct xt_tgchk_param *par)
+static int hl_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_HL_info *info = par->targinfo;
if (info->mode > IP6T_HL_MAXMODE) {
- printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n",
- info->mode);
- return false;
+ pr_info("invalid or unknown mode %u\n", info->mode);
+ return -EINVAL;
}
if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
- printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't "
+ pr_info("increment/decrement does not "
"make sense with value 0\n");
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_target hl_tg_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 3271c8e52153..ab6f8ff9c9a7 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -18,7 +18,7 @@
* 02110-1301 USA.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
@@ -32,12 +32,18 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
+static LIST_HEAD(xt_led_triggers);
+static DEFINE_MUTEX(xt_led_mutex);
+
/*
* This is declared in here (the kernel module) only, to avoid having these
* dependencies in userspace code. This is what xt_led_info.internal_data
* points to.
*/
struct xt_led_info_internal {
+ struct list_head list;
+ int refcnt;
+ char *trigger_id;
struct led_trigger netfilter_led_trigger;
struct timer_list timer;
};
@@ -54,7 +60,7 @@ led_tg(struct sk_buff *skb, const struct xt_target_param *par)
*/
if ((ledinfo->delay > 0) && ledinfo->always_blink &&
timer_pending(&ledinternal->timer))
- led_trigger_event(&ledinternal->netfilter_led_trigger,LED_OFF);
+ led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
@@ -75,54 +81,86 @@ led_tg(struct sk_buff *skb, const struct xt_target_param *par)
static void led_timeout_callback(unsigned long data)
{
- struct xt_led_info *ledinfo = (struct xt_led_info *)data;
- struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
+ struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data;
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
}
-static bool led_tg_check(const struct xt_tgchk_param *par)
+static struct xt_led_info_internal *led_trigger_lookup(const char *name)
+{
+ struct xt_led_info_internal *ledinternal;
+
+ list_for_each_entry(ledinternal, &xt_led_triggers, list) {
+ if (!strcmp(name, ledinternal->netfilter_led_trigger.name)) {
+ return ledinternal;
+ }
+ }
+ return NULL;
+}
+
+static int led_tg_check(const struct xt_tgchk_param *par)
{
struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal;
int err;
if (ledinfo->id[0] == '\0') {
- printk(KERN_ERR KBUILD_MODNAME ": No 'id' parameter given.\n");
- return false;
+ pr_info("No 'id' parameter given.\n");
+ return -EINVAL;
}
- ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL);
- if (!ledinternal) {
- printk(KERN_CRIT KBUILD_MODNAME ": out of memory\n");
- return false;
+ mutex_lock(&xt_led_mutex);
+
+ ledinternal = led_trigger_lookup(ledinfo->id);
+ if (ledinternal) {
+ ledinternal->refcnt++;
+ goto out;
}
- ledinternal->netfilter_led_trigger.name = ledinfo->id;
+ err = -ENOMEM;
+ ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL);
+ if (!ledinternal)
+ goto exit_mutex_only;
+
+ ledinternal->trigger_id = kstrdup(ledinfo->id, GFP_KERNEL);
+ if (!ledinternal->trigger_id)
+ goto exit_internal_alloc;
+
+ ledinternal->refcnt = 1;
+ ledinternal->netfilter_led_trigger.name = ledinternal->trigger_id;
err = led_trigger_register(&ledinternal->netfilter_led_trigger);
if (err) {
- printk(KERN_CRIT KBUILD_MODNAME
- ": led_trigger_register() failed\n");
+ pr_warning("led_trigger_register() failed\n");
if (err == -EEXIST)
- printk(KERN_ERR KBUILD_MODNAME
- ": Trigger name is already in use.\n");
+ pr_warning("Trigger name is already in use.\n");
goto exit_alloc;
}
/* See if we need to set up a timer */
if (ledinfo->delay > 0)
setup_timer(&ledinternal->timer, led_timeout_callback,
- (unsigned long)ledinfo);
+ (unsigned long)ledinternal);
+
+ list_add_tail(&ledinternal->list, &xt_led_triggers);
+
+out:
+ mutex_unlock(&xt_led_mutex);
ledinfo->internal_data = ledinternal;
- return true;
+ return 0;
exit_alloc:
+ kfree(ledinternal->trigger_id);
+
+exit_internal_alloc:
kfree(ledinternal);
- return false;
+exit_mutex_only:
+ mutex_unlock(&xt_led_mutex);
+
+ return err;
}
static void led_tg_destroy(const struct xt_tgdtor_param *par)
@@ -130,10 +168,23 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
const struct xt_led_info *ledinfo = par->targinfo;
struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
+ mutex_lock(&xt_led_mutex);
+
+ if (--ledinternal->refcnt) {
+ mutex_unlock(&xt_led_mutex);
+ return;
+ }
+
+ list_del(&ledinternal->list);
+
if (ledinfo->delay > 0)
del_timer_sync(&ledinternal->timer);
led_trigger_unregister(&ledinternal->netfilter_led_trigger);
+
+ mutex_unlock(&xt_led_mutex);
+
+ kfree(ledinternal->trigger_id);
kfree(ledinternal);
}
@@ -142,7 +193,7 @@ static struct xt_target led_tg_reg __read_mostly = {
.revision = 0,
.family = NFPROTO_UNSPEC,
.target = led_tg,
- .targetsize = XT_ALIGN(sizeof(struct xt_led_info)),
+ .targetsize = sizeof(struct xt_led_info),
.checkentry = led_tg_check,
.destroy = led_tg_destroy,
.me = THIS_MODULE,
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
deleted file mode 100644
index 225f8d11e173..000000000000
--- a/net/netfilter/xt_MARK.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * xt_MARK - Netfilter module to modify the NFMARK field of an skb
- *
- * (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
- * Copyright © CC Computer Consultants GmbH, 2007 - 2008
- * Jan Engelhardt <jengelh@computergmbh.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter/xt_MARK.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("Xtables: packet mark modification");
-MODULE_ALIAS("ipt_MARK");
-MODULE_ALIAS("ip6t_MARK");
-
-static unsigned int
-mark_tg(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_mark_tginfo2 *info = par->targinfo;
-
- skb->mark = (skb->mark & ~info->mask) ^ info->mark;
- return XT_CONTINUE;
-}
-
-static struct xt_target mark_tg_reg __read_mostly = {
- .name = "MARK",
- .revision = 2,
- .family = NFPROTO_UNSPEC,
- .target = mark_tg,
- .targetsize = sizeof(struct xt_mark_tginfo2),
- .me = THIS_MODULE,
-};
-
-static int __init mark_tg_init(void)
-{
- return xt_register_target(&mark_tg_reg);
-}
-
-static void __exit mark_tg_exit(void)
-{
- xt_unregister_target(&mark_tg_reg);
-}
-
-module_init(mark_tg_init);
-module_exit(mark_tg_exit);
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index a57c5cf018ec..42dd8747b421 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -37,15 +37,15 @@ nflog_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool nflog_tg_check(const struct xt_tgchk_param *par)
+static int nflog_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
if (info->flags & ~XT_NFLOG_MASK)
- return false;
+ return -EINVAL;
if (info->prefix[sizeof(info->prefix) - 1] != '\0')
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target nflog_tg_reg __read_mostly = {
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 12dcd7007c3e..f9217cb56fe3 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -49,17 +49,6 @@ static u32 hash_v4(const struct sk_buff *skb)
return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
}
-static unsigned int
-nfqueue_tg4_v1(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_NFQ_info_v1 *info = par->targinfo;
- u32 queue = info->queuenum;
-
- if (info->queues_total > 1)
- queue = hash_v4(skb) % info->queues_total + queue;
- return NF_QUEUE_NR(queue);
-}
-
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
static u32 hash_v6(const struct sk_buff *skb)
{
@@ -73,20 +62,26 @@ static u32 hash_v6(const struct sk_buff *skb)
return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
}
+#endif
static unsigned int
-nfqueue_tg6_v1(struct sk_buff *skb, const struct xt_target_param *par)
+nfqueue_tg_v1(struct sk_buff *skb, const struct xt_target_param *par)
{
const struct xt_NFQ_info_v1 *info = par->targinfo;
u32 queue = info->queuenum;
- if (info->queues_total > 1)
- queue = hash_v6(skb) % info->queues_total + queue;
+ if (info->queues_total > 1) {
+ if (par->family == NFPROTO_IPV4)
+ queue = hash_v4(skb) % info->queues_total + queue;
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+ else if (par->family == NFPROTO_IPV6)
+ queue = hash_v6(skb) % info->queues_total + queue;
+#endif
+ }
return NF_QUEUE_NR(queue);
}
-#endif
-static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
+static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
{
const struct xt_NFQ_info_v1 *info = par->targinfo;
u32 maxid;
@@ -97,15 +92,15 @@ static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
}
if (info->queues_total == 0) {
pr_err("NFQUEUE: number of total queues is 0\n");
- return false;
+ return -EINVAL;
}
maxid = info->queues_total - 1 + info->queuenum;
if (maxid > 0xffff) {
pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n",
info->queues_total, maxid);
- return false;
+ return -ERANGE;
}
- return true;
+ return 0;
}
static struct xt_target nfqueue_tg_reg[] __read_mostly = {
@@ -119,23 +114,12 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
{
.name = "NFQUEUE",
.revision = 1,
- .family = NFPROTO_IPV4,
- .checkentry = nfqueue_tg_v1_check,
- .target = nfqueue_tg4_v1,
- .targetsize = sizeof(struct xt_NFQ_info_v1),
- .me = THIS_MODULE,
- },
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
- {
- .name = "NFQUEUE",
- .revision = 1,
- .family = NFPROTO_IPV6,
+ .family = NFPROTO_UNSPEC,
.checkentry = nfqueue_tg_v1_check,
- .target = nfqueue_tg6_v1,
+ .target = nfqueue_tg_v1,
.targetsize = sizeof(struct xt_NFQ_info_v1),
.me = THIS_MODULE,
},
-#endif
};
static int __init nfqueue_tg_init(void)
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index d16d55df4f61..a02193f06e39 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -86,7 +86,7 @@ xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
+static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
{
struct xt_rateest_target_info *info = par->targinfo;
struct xt_rateest *est;
@@ -94,6 +94,7 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
struct nlattr opt;
struct gnet_estimator est;
} cfg;
+ int ret;
if (unlikely(!rnd_inited)) {
get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
@@ -110,12 +111,13 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
(info->interval != est->params.interval ||
info->ewma_log != est->params.ewma_log)) {
xt_rateest_put(est);
- return false;
+ return -EINVAL;
}
info->est = est;
- return true;
+ return 0;
}
+ ret = -ENOMEM;
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (!est)
goto err1;
@@ -131,19 +133,19 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
cfg.est.interval = info->interval;
cfg.est.ewma_log = info->ewma_log;
- if (gen_new_estimator(&est->bstats, &est->rstats, &est->lock,
- &cfg.opt) < 0)
+ ret = gen_new_estimator(&est->bstats, &est->rstats,
+ &est->lock, &cfg.opt);
+ if (ret < 0)
goto err2;
info->est = est;
xt_rateest_hash_insert(est);
-
- return true;
+ return 0;
err2:
kfree(est);
err1:
- return false;
+ return ret;
}
static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 7a6f9e6f5dfa..a91d4a7d5a2c 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/selinux.h>
@@ -49,7 +50,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool checkentry_selinux(struct xt_secmark_target_info *info)
+static int checkentry_selinux(struct xt_secmark_target_info *info)
{
int err;
struct xt_secmark_target_selinux_info *sel = &info->u.sel;
@@ -59,58 +60,59 @@ static bool checkentry_selinux(struct xt_secmark_target_info *info)
err = selinux_string_to_sid(sel->selctx, &sel->selsid);
if (err) {
if (err == -EINVAL)
- printk(KERN_INFO PFX "invalid SELinux context \'%s\'\n",
- sel->selctx);
- return false;
+ pr_info("invalid SELinux context \'%s\'\n",
+ sel->selctx);
+ return err;
}
if (!sel->selsid) {
- printk(KERN_INFO PFX "unable to map SELinux context \'%s\'\n",
- sel->selctx);
- return false;
+ pr_info("unable to map SELinux context \'%s\'\n", sel->selctx);
+ return -ENOENT;
}
err = selinux_secmark_relabel_packet_permission(sel->selsid);
if (err) {
- printk(KERN_INFO PFX "unable to obtain relabeling permission\n");
- return false;
+ pr_info("unable to obtain relabeling permission\n");
+ return err;
}
selinux_secmark_refcount_inc();
- return true;
+ return 0;
}
-static bool secmark_tg_check(const struct xt_tgchk_param *par)
+static int secmark_tg_check(const struct xt_tgchk_param *par)
{
struct xt_secmark_target_info *info = par->targinfo;
+ int err;
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "security") != 0) {
- printk(KERN_INFO PFX "target only valid in the \'mangle\' "
- "or \'security\' tables, not \'%s\'.\n", par->table);
- return false;
+ pr_info("target only valid in the \'mangle\' "
+ "or \'security\' tables, not \'%s\'.\n", par->table);
+ return -EINVAL;
}
if (mode && mode != info->mode) {
- printk(KERN_INFO PFX "mode already set to %hu cannot mix with "
- "rules for mode %hu\n", mode, info->mode);
- return false;
+ pr_info("mode already set to %hu cannot mix with "
+ "rules for mode %hu\n", mode, info->mode);
+ return -EINVAL;
}
switch (info->mode) {
case SECMARK_MODE_SEL:
- if (!checkentry_selinux(info))
- return false;
+ err = checkentry_selinux(info);
+ if (err <= 0)
+ return err;
break;
default:
- printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode);
- return false;
+ pr_info("invalid mode: %hu\n", info->mode);
+ return -EINVAL;
}
if (!mode)
mode = info->mode;
- return true;
+ return 0;
}
static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index c5f4b9919e9a..d04606459c9d 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -68,15 +68,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
if (dst_mtu(skb_dst(skb)) <= minlen) {
if (net_ratelimit())
- printk(KERN_ERR "xt_TCPMSS: "
- "unknown or invalid path-MTU (%u)\n",
+ pr_err("unknown or invalid path-MTU (%u)\n",
dst_mtu(skb_dst(skb)));
return -1;
}
if (in_mtu <= minlen) {
if (net_ratelimit())
- printk(KERN_ERR "xt_TCPMSS: unknown or "
- "invalid path-MTU (%u)\n", in_mtu);
+ pr_err("unknown or invalid path-MTU (%u)\n",
+ in_mtu);
return -1;
}
newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
@@ -236,7 +235,7 @@ static inline bool find_syn_match(const struct xt_entry_match *m)
return false;
}
-static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
+static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
@@ -246,19 +245,19 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
- printk("xt_TCPMSS: path-MTU clamping only supported in "
- "FORWARD, OUTPUT and POSTROUTING hooks\n");
- return false;
+ pr_info("path-MTU clamping only supported in "
+ "FORWARD, OUTPUT and POSTROUTING hooks\n");
+ return -EINVAL;
}
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
- return true;
- printk("xt_TCPMSS: Only works on TCP SYN packets\n");
- return false;
+ return 0;
+ pr_info("Only works on TCP SYN packets\n");
+ return -EINVAL;
}
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
-static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
+static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ip6t_entry *e = par->entryinfo;
@@ -268,15 +267,15 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
- printk("xt_TCPMSS: path-MTU clamping only supported in "
- "FORWARD, OUTPUT and POSTROUTING hooks\n");
- return false;
+ pr_info("path-MTU clamping only supported in "
+ "FORWARD, OUTPUT and POSTROUTING hooks\n");
+ return -EINVAL;
}
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
- return true;
- printk("xt_TCPMSS: Only works on TCP SYN packets\n");
- return false;
+ return 0;
+ pr_info("Only works on TCP SYN packets\n");
+ return -EINVAL;
}
#endif
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 9dd8c8ef63eb..e8b57609ddc0 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -3,7 +3,6 @@
*
* Copyright (C) 2007 Sven Schnelle <svens@bitebene.org>
* Copyright © CC Computer Consultants GmbH, 2007
- * Contact: Jan Engelhardt <jengelh@computergmbh.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -136,7 +135,7 @@ static void __exit tcpoptstrip_tg_exit(void)
module_init(tcpoptstrip_tg_init);
module_exit(tcpoptstrip_tg_exit);
-MODULE_AUTHOR("Sven Schnelle <svens@bitebene.org>, Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Sven Schnelle <svens@bitebene.org>, Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: TCP option stripping");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_TCPOPTSTRIP");
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
new file mode 100644
index 000000000000..49da6c05f4e0
--- /dev/null
+++ b/net/netfilter/xt_TEE.c
@@ -0,0 +1,309 @@
+/*
+ * "TEE" target extension for Xtables
+ * Copyright © Sebastian Claßen, 2007
+ * Jan Engelhardt, 2007-2010
+ *
+ * based on ipt_ROUTE.c from Cédric de Launois
+ * <delaunois@info.ucl.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 or later, as published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <linux/notifier.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/route.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_TEE.h>
+
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+# define WITH_CONNTRACK 1
+# include <net/netfilter/nf_conntrack.h>
+#endif
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+# define WITH_IPV6 1
+#endif
+
+struct xt_tee_priv {
+ struct notifier_block notifier;
+ struct xt_tee_tginfo *tginfo;
+ int oif;
+};
+
+static const union nf_inet_addr tee_zero_address;
+static DEFINE_PER_CPU(bool, tee_active);
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+ const struct dst_entry *dst;
+
+ if (skb->dev != NULL)
+ return dev_net(skb->dev);
+ dst = skb_dst(skb);
+ if (dst != NULL && dst->dev != NULL)
+ return dev_net(dst->dev);
+#endif
+ return &init_net;
+}
+
+static bool
+tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = pick_net(skb);
+ struct rtable *rt;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
+ if (info->priv) {
+ if (info->priv->oif == -1)
+ return false;
+ fl.oif = info->priv->oif;
+ }
+ fl.nl_u.ip4_u.daddr = info->gw.ip;
+ fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
+ fl.nl_u.ip4_u.scope = RT_SCOPE_UNIVERSE;
+ if (ip_route_output_key(net, &rt, &fl) != 0)
+ return false;
+
+ dst_release(skb_dst(skb));
+ skb_dst_set(skb, &rt->u.dst);
+ skb->dev = rt->u.dst.dev;
+ skb->protocol = htons(ETH_P_IP);
+ return true;
+}
+
+static unsigned int
+tee_tg4(struct sk_buff *skb, const struct xt_target_param *par)
+{
+ const struct xt_tee_tginfo *info = par->targinfo;
+ struct iphdr *iph;
+
+ if (percpu_read(tee_active))
+ return XT_CONTINUE;
+ /*
+ * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+ * the original skb, which should continue on its way as if nothing has
+ * happened. The copy should be independently delivered to the TEE
+ * --gateway.
+ */
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ return XT_CONTINUE;
+
+#ifdef WITH_CONNTRACK
+ /* Avoid counting cloned packets towards the original connection. */
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+#endif
+ /*
+ * If we are in PREROUTING/INPUT, the checksum must be recalculated
+ * since the length could have changed as a result of defragmentation.
+ *
+ * We also decrease the TTL to mitigate potential TEE loops
+ * between two hosts.
+ *
+ * Set %IP_DF so that the original source is notified of a potentially
+ * decreased MTU on the clone route. IPv6 does this too.
+ */
+ iph = ip_hdr(skb);
+ iph->frag_off |= htons(IP_DF);
+ if (par->hooknum == NF_INET_PRE_ROUTING ||
+ par->hooknum == NF_INET_LOCAL_IN)
+ --iph->ttl;
+ ip_send_check(iph);
+
+ if (tee_tg_route4(skb, info)) {
+ percpu_write(tee_active, true);
+ ip_local_out(skb);
+ percpu_write(tee_active, false);
+ } else {
+ kfree_skb(skb);
+ }
+ return XT_CONTINUE;
+}
+
+#ifdef WITH_IPV6
+static bool
+tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct net *net = pick_net(skb);
+ struct dst_entry *dst;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
+ if (info->priv) {
+ if (info->priv->oif == -1)
+ return false;
+ fl.oif = info->priv->oif;
+ }
+ fl.nl_u.ip6_u.daddr = info->gw.in6;
+ fl.nl_u.ip6_u.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
+ (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+ dst = ip6_route_output(net, NULL, &fl);
+ if (dst == NULL)
+ return false;
+
+ dst_release(skb_dst(skb));
+ skb_dst_set(skb, dst);
+ skb->dev = dst->dev;
+ skb->protocol = htons(ETH_P_IPV6);
+ return true;
+}
+
+static unsigned int
+tee_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+{
+ const struct xt_tee_tginfo *info = par->targinfo;
+
+ if (percpu_read(tee_active))
+ return XT_CONTINUE;
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ return XT_CONTINUE;
+
+#ifdef WITH_CONNTRACK
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+#endif
+ if (par->hooknum == NF_INET_PRE_ROUTING ||
+ par->hooknum == NF_INET_LOCAL_IN) {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ --iph->hop_limit;
+ }
+ if (tee_tg_route6(skb, info)) {
+ percpu_write(tee_active, true);
+ ip6_local_out(skb);
+ percpu_write(tee_active, false);
+ } else {
+ kfree_skb(skb);
+ }
+ return XT_CONTINUE;
+}
+#endif /* WITH_IPV6 */
+
+static int tee_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct xt_tee_priv *priv;
+
+ priv = container_of(this, struct xt_tee_priv, notifier);
+ switch (event) {
+ case NETDEV_REGISTER:
+ if (!strcmp(dev->name, priv->tginfo->oif))
+ priv->oif = dev->ifindex;
+ break;
+ case NETDEV_UNREGISTER:
+ if (dev->ifindex == priv->oif)
+ priv->oif = -1;
+ break;
+ case NETDEV_CHANGENAME:
+ if (!strcmp(dev->name, priv->tginfo->oif))
+ priv->oif = dev->ifindex;
+ else if (dev->ifindex == priv->oif)
+ priv->oif = -1;
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int tee_tg_check(const struct xt_tgchk_param *par)
+{
+ struct xt_tee_tginfo *info = par->targinfo;
+ struct xt_tee_priv *priv;
+
+ /* 0.0.0.0 and :: not allowed */
+ if (memcmp(&info->gw, &tee_zero_address,
+ sizeof(tee_zero_address)) == 0)
+ return -EINVAL;
+
+ if (info->oif[0]) {
+ if (info->oif[sizeof(info->oif)-1] != '\0')
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ priv->tginfo = info;
+ priv->oif = -1;
+ priv->notifier.notifier_call = tee_netdev_event;
+ info->priv = priv;
+
+ register_netdevice_notifier(&priv->notifier);
+ } else
+ info->priv = NULL;
+
+ return 0;
+}
+
+static void tee_tg_destroy(const struct xt_tgdtor_param *par)
+{
+ struct xt_tee_tginfo *info = par->targinfo;
+
+ if (info->priv) {
+ unregister_netdevice_notifier(&info->priv->notifier);
+ kfree(info->priv);
+ }
+}
+
+static struct xt_target tee_tg_reg[] __read_mostly = {
+ {
+ .name = "TEE",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .target = tee_tg4,
+ .targetsize = sizeof(struct xt_tee_tginfo),
+ .checkentry = tee_tg_check,
+ .destroy = tee_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#ifdef WITH_IPV6
+ {
+ .name = "TEE",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .target = tee_tg6,
+ .targetsize = sizeof(struct xt_tee_tginfo),
+ .checkentry = tee_tg_check,
+ .destroy = tee_tg_destroy,
+ .me = THIS_MODULE,
+ },
+#endif
+};
+
+static int __init tee_tg_init(void)
+{
+ return xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+}
+
+static void __exit tee_tg_exit(void)
+{
+ xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+}
+
+module_init(tee_tg_init);
+module_exit(tee_tg_exit);
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("Xtables: Reroute packet copy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_TEE");
+MODULE_ALIAS("ip6t_TEE");
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 1340c2fa3621..4f246ddc5c48 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -59,17 +59,17 @@ tproxy_tg(struct sk_buff *skb, const struct xt_target_param *par)
return NF_DROP;
}
-static bool tproxy_tg_check(const struct xt_tgchk_param *par)
+static int tproxy_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_ip *i = par->entryinfo;
if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP)
&& !(i->invflags & IPT_INV_PROTO))
- return true;
+ return 0;
- pr_info("xt_TPROXY: Can be used only in combination with "
+ pr_info("Can be used only in combination with "
"either -p tcp or -p udp\n");
- return false;
+ return -EINVAL;
}
static struct xt_target tproxy_tg_reg __read_mostly = {
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 225ee3ecd69d..6c941e1c6b9e 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/jhash.h>
@@ -131,22 +132,22 @@ xt_cluster_mt(const struct sk_buff *skb, const struct xt_match_param *par)
!!(info->flags & XT_CLUSTER_F_INV);
}
-static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
+static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_cluster_match_info *info = par->matchinfo;
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
- printk(KERN_ERR "xt_cluster: you have exceeded the maximum "
- "number of cluster nodes (%u > %u)\n",
- info->total_nodes, XT_CLUSTER_NODES_MAX);
- return false;
+ pr_info("you have exceeded the maximum "
+ "number of cluster nodes (%u > %u)\n",
+ info->total_nodes, XT_CLUSTER_NODES_MAX);
+ return -EINVAL;
}
if (info->node_mask >= (1ULL << info->total_nodes)) {
- printk(KERN_ERR "xt_cluster: this node mask cannot be "
- "higher than the total number of nodes\n");
- return false;
+ pr_info("this node mask cannot be "
+ "higher than the total number of nodes\n");
+ return -EDOM;
}
- return true;
+ return 0;
}
static struct xt_match xt_cluster_match __read_mostly = {
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 955e6598a7f0..ff738a5f963a 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -1,6 +1,7 @@
/* Kernel module to match connection tracking byte counter.
* GPL (C) 2002 Martin Devera (devik@cdi.cz).
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/skbuff.h>
@@ -92,27 +93,26 @@ connbytes_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return what >= sinfo->count.from;
}
-static bool connbytes_mt_check(const struct xt_mtchk_param *par)
+static int connbytes_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_connbytes_info *sinfo = par->matchinfo;
+ int ret;
if (sinfo->what != XT_CONNBYTES_PKTS &&
sinfo->what != XT_CONNBYTES_BYTES &&
sinfo->what != XT_CONNBYTES_AVGPKT)
- return false;
+ return -EINVAL;
if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL &&
sinfo->direction != XT_CONNBYTES_DIR_REPLY &&
sinfo->direction != XT_CONNBYTES_DIR_BOTH)
- return false;
-
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
+ return -EINVAL;
- return true;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void connbytes_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 388ca4596098..326bc1b81681 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -5,13 +5,13 @@
* Nov 2002: Martin Bene <martin.bene@icomedias.com>:
* only ignore TIME_WAIT or gone connections
* (C) CC Computer Consultants GmbH, 2007
- * Contact: <jengelh@computergmbh.de>
*
* based on ...
*
* Kernel module to match connection tracking information.
* GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
@@ -217,33 +217,35 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return false;
}
-static bool connlimit_mt_check(const struct xt_mtchk_param *par)
+static int connlimit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_connlimit_info *info = par->matchinfo;
unsigned int i;
+ int ret;
if (unlikely(!connlimit_rnd_inited)) {
get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
connlimit_rnd_inited = true;
}
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "cannot load conntrack support for "
- "address family %u\n", par->family);
- return false;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0) {
+ pr_info("cannot load conntrack support for "
+ "address family %u\n", par->family);
+ return ret;
}
/* init private data */
info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
if (info->data == NULL) {
nf_ct_l3proto_module_put(par->family);
- return false;
+ return -ENOMEM;
}
spin_lock_init(&info->data->lock);
for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i)
INIT_LIST_HEAD(&info->data->iphash[i]);
- return true;
+ return 0;
}
static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 122aa8b0147b..ae1015484ae2 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -1,10 +1,10 @@
/*
- * xt_connmark - Netfilter module to match connection mark values
+ * xt_connmark - Netfilter module to operate on connection marks
*
* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
* by Henrik Nordstrom <hno@marasystems.com>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
- * Jan Engelhardt <jengelh@computergmbh.de>
+ * Jan Engelhardt <jengelh@medozas.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,15 +24,72 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_connmark.h>
MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>");
-MODULE_DESCRIPTION("Xtables: connection mark match");
+MODULE_DESCRIPTION("Xtables: connection mark operations");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_CONNMARK");
+MODULE_ALIAS("ip6t_CONNMARK");
MODULE_ALIAS("ipt_connmark");
MODULE_ALIAS("ip6t_connmark");
+static unsigned int
+connmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
+{
+ const struct xt_connmark_tginfo1 *info = par->targinfo;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ u_int32_t newmark;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return XT_CONTINUE;
+
+ switch (info->mode) {
+ case XT_CONNMARK_SET:
+ newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
+ if (ct->mark != newmark) {
+ ct->mark = newmark;
+ nf_conntrack_event_cache(IPCT_MARK, ct);
+ }
+ break;
+ case XT_CONNMARK_SAVE:
+ newmark = (ct->mark & ~info->ctmask) ^
+ (skb->mark & info->nfmask);
+ if (ct->mark != newmark) {
+ ct->mark = newmark;
+ nf_conntrack_event_cache(IPCT_MARK, ct);
+ }
+ break;
+ case XT_CONNMARK_RESTORE:
+ newmark = (skb->mark & ~info->nfmask) ^
+ (ct->mark & info->ctmask);
+ skb->mark = newmark;
+ break;
+ }
+
+ return XT_CONTINUE;
+}
+
+static int connmark_tg_check(const struct xt_tgchk_param *par)
+{
+ int ret;
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
+}
+
+static void connmark_tg_destroy(const struct xt_tgdtor_param *par)
+{
+ nf_ct_l3proto_module_put(par->family);
+}
+
static bool
connmark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
@@ -47,14 +104,15 @@ connmark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ((ct->mark & info->mask) == info->mark) ^ info->invert;
}
-static bool connmark_mt_check(const struct xt_mtchk_param *par)
+static int connmark_mt_check(const struct xt_mtchk_param *par)
{
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "cannot load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
+ int ret;
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void connmark_mt_destroy(const struct xt_mtdtor_param *par)
@@ -62,6 +120,17 @@ static void connmark_mt_destroy(const struct xt_mtdtor_param *par)
nf_ct_l3proto_module_put(par->family);
}
+static struct xt_target connmark_tg_reg __read_mostly = {
+ .name = "CONNMARK",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connmark_tg_check,
+ .target = connmark_tg,
+ .targetsize = sizeof(struct xt_connmark_tginfo1),
+ .destroy = connmark_tg_destroy,
+ .me = THIS_MODULE,
+};
+
static struct xt_match connmark_mt_reg __read_mostly = {
.name = "connmark",
.revision = 1,
@@ -75,12 +144,23 @@ static struct xt_match connmark_mt_reg __read_mostly = {
static int __init connmark_mt_init(void)
{
- return xt_register_match(&connmark_mt_reg);
+ int ret;
+
+ ret = xt_register_target(&connmark_tg_reg);
+ if (ret < 0)
+ return ret;
+ ret = xt_register_match(&connmark_mt_reg);
+ if (ret < 0) {
+ xt_unregister_target(&connmark_tg_reg);
+ return ret;
+ }
+ return 0;
}
static void __exit connmark_mt_exit(void)
{
xt_unregister_match(&connmark_mt_reg);
+ xt_unregister_target(&connmark_tg_reg);
}
module_init(connmark_mt_init);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index ae66305f0fe5..3348706ce56d 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -9,7 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ipv6.h>
@@ -206,14 +206,15 @@ conntrack_mt_v2(const struct sk_buff *skb, const struct xt_match_param *par)
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
-static bool conntrack_mt_check(const struct xt_mtchk_param *par)
+static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
+ int ret;
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c
index 395af5943ffd..0d260aec487f 100644
--- a/net/netfilter/xt_dccp.c
+++ b/net/netfilter/xt_dccp.c
@@ -124,13 +124,17 @@ dccp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
XT_DCCP_OPTION, info->flags, info->invflags);
}
-static bool dccp_mt_check(const struct xt_mtchk_param *par)
+static int dccp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_dccp_info *info = par->matchinfo;
- return !(info->flags & ~XT_DCCP_VALID_FLAGS)
- && !(info->invflags & ~XT_DCCP_VALID_FLAGS)
- && !(info->invflags & ~info->flags);
+ if (info->flags & ~XT_DCCP_VALID_FLAGS)
+ return -EINVAL;
+ if (info->invflags & ~XT_DCCP_VALID_FLAGS)
+ return -EINVAL;
+ if (info->invflags & ~info->flags)
+ return -EINVAL;
+ return 0;
}
static struct xt_match dccp_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c
index 0280d3a8c161..9db51fddbdb8 100644
--- a/net/netfilter/xt_dscp.c
+++ b/net/netfilter/xt_dscp.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
@@ -42,23 +42,23 @@ dscp_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
return (dscp == info->dscp) ^ !!info->invert;
}
-static bool dscp_mt_check(const struct xt_mtchk_param *par)
+static int dscp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_dscp_info *info = par->matchinfo;
if (info->dscp > XT_DSCP_MAX) {
- printk(KERN_ERR "xt_dscp: dscp %x out of range\n", info->dscp);
- return false;
+ pr_info("dscp %x out of range\n", info->dscp);
+ return -EDOM;
}
- return true;
+ return 0;
}
static bool tos_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
const struct xt_tos_match_info *info = par->matchinfo;
- if (par->match->family == NFPROTO_IPV4)
+ if (par->family == NFPROTO_IPV4)
return ((ip_hdr(skb)->tos & info->tos_mask) ==
info->tos_value) ^ !!info->invert;
else
diff --git a/net/netfilter/xt_esp.c b/net/netfilter/xt_esp.c
index 609439967c2c..143bfdc8e38f 100644
--- a/net/netfilter/xt_esp.c
+++ b/net/netfilter/xt_esp.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/in.h>
@@ -24,21 +24,15 @@ MODULE_DESCRIPTION("Xtables: IPsec-ESP packet match");
MODULE_ALIAS("ipt_esp");
MODULE_ALIAS("ip6t_esp");
-#if 0
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
- duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
- min, spi, max);
+ pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
+ invert ? '!' : ' ', min, spi, max);
r = (spi >= min && spi <= max) ^ invert;
- duprintf(" result %s\n", r ? "PASS" : "FAILED");
+ pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
@@ -57,7 +51,7 @@ static bool esp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
- duprintf("Dropping evil ESP tinygram.\n");
+ pr_debug("Dropping evil ESP tinygram.\n");
*par->hotdrop = true;
return false;
}
@@ -66,16 +60,16 @@ static bool esp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
!!(espinfo->invflags & XT_ESP_INV_SPI));
}
-static bool esp_mt_check(const struct xt_mtchk_param *par)
+static int esp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_esp *espinfo = par->matchinfo;
if (espinfo->invflags & ~XT_ESP_INV_MASK) {
- duprintf("xt_esp: unknown flags %X\n", espinfo->invflags);
- return false;
+ pr_debug("unknown flags %X\n", espinfo->invflags);
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match esp_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 215a64835de8..0c366d387c8c 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -7,6 +7,7 @@
*
* Development of this code was funded by Astaro AG, http://www.astaro.com/
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/random.h>
@@ -36,7 +37,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
MODULE_ALIAS("ipt_hashlimit");
MODULE_ALIAS("ip6t_hashlimit");
@@ -80,12 +81,14 @@ struct dsthash_ent {
struct dsthash_dst dst;
/* modified structure members in the end */
+ spinlock_t lock;
unsigned long expires; /* precalculated expiry time */
struct {
unsigned long prev; /* last modification */
u_int32_t credit;
u_int32_t credit_cap, cost;
} rateinfo;
+ struct rcu_head rcu;
};
struct xt_hashlimit_htable {
@@ -142,9 +145,11 @@ dsthash_find(const struct xt_hashlimit_htable *ht,
u_int32_t hash = hash_dst(ht, dst);
if (!hlist_empty(&ht->hash[hash])) {
- hlist_for_each_entry(ent, pos, &ht->hash[hash], node)
- if (dst_cmp(ent, dst))
+ hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node)
+ if (dst_cmp(ent, dst)) {
+ spin_lock(&ent->lock);
return ent;
+ }
}
return NULL;
}
@@ -156,9 +161,10 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
{
struct dsthash_ent *ent;
+ spin_lock(&ht->lock);
/* initialize hash with random val at the time we allocate
* the first hashtable entry */
- if (!ht->rnd_initialized) {
+ if (unlikely(!ht->rnd_initialized)) {
get_random_bytes(&ht->rnd, sizeof(ht->rnd));
ht->rnd_initialized = true;
}
@@ -166,106 +172,40 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
if (ht->cfg.max && ht->count >= ht->cfg.max) {
/* FIXME: do something. question is what.. */
if (net_ratelimit())
- printk(KERN_WARNING
- "xt_hashlimit: max count of %u reached\n",
- ht->cfg.max);
- return NULL;
- }
-
- ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
+ pr_err("max count of %u reached\n", ht->cfg.max);
+ ent = NULL;
+ } else
+ ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
if (!ent) {
if (net_ratelimit())
- printk(KERN_ERR
- "xt_hashlimit: can't allocate dsthash_ent\n");
- return NULL;
- }
- memcpy(&ent->dst, dst, sizeof(ent->dst));
+ pr_err("cannot allocate dsthash_ent\n");
+ } else {
+ memcpy(&ent->dst, dst, sizeof(ent->dst));
+ spin_lock_init(&ent->lock);
- hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]);
- ht->count++;
+ spin_lock(&ent->lock);
+ hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]);
+ ht->count++;
+ }
+ spin_unlock(&ht->lock);
return ent;
}
-static inline void
-dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
+static void dsthash_free_rcu(struct rcu_head *head)
{
- hlist_del(&ent->node);
+ struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu);
+
kmem_cache_free(hashlimit_cachep, ent);
- ht->count--;
}
-static void htable_gc(unsigned long htlong);
-static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family)
+static inline void
+dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
{
- struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
- struct xt_hashlimit_htable *hinfo;
- unsigned int size;
- unsigned int i;
-
- if (minfo->cfg.size)
- size = minfo->cfg.size;
- else {
- size = ((totalram_pages << PAGE_SHIFT) / 16384) /
- sizeof(struct list_head);
- if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
- size = 8192;
- if (size < 16)
- size = 16;
- }
- /* FIXME: don't use vmalloc() here or anywhere else -HW */
- hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
- sizeof(struct list_head) * size);
- if (!hinfo) {
- printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
- return -1;
- }
- minfo->hinfo = hinfo;
-
- /* copy match config into hashtable config */
- hinfo->cfg.mode = minfo->cfg.mode;
- hinfo->cfg.avg = minfo->cfg.avg;
- hinfo->cfg.burst = minfo->cfg.burst;
- hinfo->cfg.max = minfo->cfg.max;
- hinfo->cfg.gc_interval = minfo->cfg.gc_interval;
- hinfo->cfg.expire = minfo->cfg.expire;
-
- if (family == NFPROTO_IPV4)
- hinfo->cfg.srcmask = hinfo->cfg.dstmask = 32;
- else
- hinfo->cfg.srcmask = hinfo->cfg.dstmask = 128;
-
- hinfo->cfg.size = size;
- if (!hinfo->cfg.max)
- hinfo->cfg.max = 8 * hinfo->cfg.size;
- else if (hinfo->cfg.max < hinfo->cfg.size)
- hinfo->cfg.max = hinfo->cfg.size;
-
- for (i = 0; i < hinfo->cfg.size; i++)
- INIT_HLIST_HEAD(&hinfo->hash[i]);
-
- hinfo->use = 1;
- hinfo->count = 0;
- hinfo->family = family;
- hinfo->rnd_initialized = false;
- spin_lock_init(&hinfo->lock);
- hinfo->pde = proc_create_data(minfo->name, 0,
- (family == NFPROTO_IPV4) ?
- hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
- &dl_file_ops, hinfo);
- if (!hinfo->pde) {
- vfree(hinfo);
- return -1;
- }
- hinfo->net = net;
-
- setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
- hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
- add_timer(&hinfo->timer);
-
- hlist_add_head(&hinfo->node, &hashlimit_net->htables);
-
- return 0;
+ hlist_del_rcu(&ent->node);
+ call_rcu_bh(&ent->rcu, dsthash_free_rcu);
+ ht->count--;
}
+static void htable_gc(unsigned long htlong);
static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
u_int8_t family)
@@ -288,10 +228,8 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
/* FIXME: don't use vmalloc() here or anywhere else -HW */
hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
sizeof(struct list_head) * size);
- if (hinfo == NULL) {
- printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
- return -1;
- }
+ if (hinfo == NULL)
+ return -ENOMEM;
minfo->hinfo = hinfo;
/* copy match config into hashtable config */
@@ -317,7 +255,7 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
&dl_file_ops, hinfo);
if (hinfo->pde == NULL) {
vfree(hinfo);
- return -1;
+ return -ENOMEM;
}
hinfo->net = net;
@@ -578,57 +516,6 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
}
static bool
-hashlimit_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct xt_hashlimit_info *r = par->matchinfo;
- struct xt_hashlimit_htable *hinfo = r->hinfo;
- unsigned long now = jiffies;
- struct dsthash_ent *dh;
- struct dsthash_dst dst;
-
- if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
- goto hotdrop;
-
- spin_lock_bh(&hinfo->lock);
- dh = dsthash_find(hinfo, &dst);
- if (!dh) {
- dh = dsthash_alloc_init(hinfo, &dst);
- if (!dh) {
- spin_unlock_bh(&hinfo->lock);
- goto hotdrop;
- }
-
- dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
- dh->rateinfo.prev = jiffies;
- dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
- } else {
- /* update expiration timeout */
- dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
- rateinfo_recalc(dh, now);
- }
-
- if (dh->rateinfo.credit >= dh->rateinfo.cost) {
- /* We're underlimit. */
- dh->rateinfo.credit -= dh->rateinfo.cost;
- spin_unlock_bh(&hinfo->lock);
- return true;
- }
-
- spin_unlock_bh(&hinfo->lock);
-
- /* default case: we're overlimit, thus don't match */
- return false;
-
-hotdrop:
- *par->hotdrop = true;
- return false;
-}
-
-static bool
hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
@@ -640,15 +527,14 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
goto hotdrop;
- spin_lock_bh(&hinfo->lock);
+ rcu_read_lock_bh();
dh = dsthash_find(hinfo, &dst);
if (dh == NULL) {
dh = dsthash_alloc_init(hinfo, &dst);
if (dh == NULL) {
- spin_unlock_bh(&hinfo->lock);
+ rcu_read_unlock_bh();
goto hotdrop;
}
-
dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
dh->rateinfo.prev = jiffies;
dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
@@ -665,11 +551,13 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (dh->rateinfo.credit >= dh->rateinfo.cost) {
/* below the limit */
dh->rateinfo.credit -= dh->rateinfo.cost;
- spin_unlock_bh(&hinfo->lock);
+ spin_unlock(&dh->lock);
+ rcu_read_unlock_bh();
return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
}
- spin_unlock_bh(&hinfo->lock);
+ spin_unlock(&dh->lock);
+ rcu_read_unlock_bh();
/* default match is underlimit - so over the limit, we need to invert */
return info->cfg.mode & XT_HASHLIMIT_INVERT;
@@ -678,83 +566,43 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return false;
}
-static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
-{
- struct net *net = par->net;
- struct xt_hashlimit_info *r = par->matchinfo;
-
- /* Check for overflow. */
- if (r->cfg.burst == 0 ||
- user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) {
- printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
- r->cfg.avg, r->cfg.burst);
- return false;
- }
- if (r->cfg.mode == 0 ||
- r->cfg.mode > (XT_HASHLIMIT_HASH_DPT |
- XT_HASHLIMIT_HASH_DIP |
- XT_HASHLIMIT_HASH_SIP |
- XT_HASHLIMIT_HASH_SPT))
- return false;
- if (!r->cfg.gc_interval)
- return false;
- if (!r->cfg.expire)
- return false;
- if (r->name[sizeof(r->name) - 1] != '\0')
- return false;
-
- mutex_lock(&hashlimit_mutex);
- r->hinfo = htable_find_get(net, r->name, par->match->family);
- if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) {
- mutex_unlock(&hashlimit_mutex);
- return false;
- }
- mutex_unlock(&hashlimit_mutex);
-
- return true;
-}
-
-static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
+static int hashlimit_mt_check(const struct xt_mtchk_param *par)
{
struct net *net = par->net;
struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
+ int ret;
/* Check for overflow. */
if (info->cfg.burst == 0 ||
user2credits(info->cfg.avg * info->cfg.burst) <
user2credits(info->cfg.avg)) {
- printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
- info->cfg.avg, info->cfg.burst);
- return false;
+ pr_info("overflow, try lower: %u/%u\n",
+ info->cfg.avg, info->cfg.burst);
+ return -ERANGE;
}
if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
- return false;
+ return -EINVAL;
if (info->name[sizeof(info->name)-1] != '\0')
- return false;
- if (par->match->family == NFPROTO_IPV4) {
+ return -EINVAL;
+ if (par->family == NFPROTO_IPV4) {
if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
- return false;
+ return -EINVAL;
} else {
if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128)
- return false;
+ return -EINVAL;
}
mutex_lock(&hashlimit_mutex);
- info->hinfo = htable_find_get(net, info->name, par->match->family);
- if (!info->hinfo && htable_create(net, info, par->match->family) != 0) {
- mutex_unlock(&hashlimit_mutex);
- return false;
+ info->hinfo = htable_find_get(net, info->name, par->family);
+ if (info->hinfo == NULL) {
+ ret = htable_create(net, info, par->family);
+ if (ret < 0) {
+ mutex_unlock(&hashlimit_mutex);
+ return ret;
+ }
}
mutex_unlock(&hashlimit_mutex);
- return true;
-}
-
-static void
-hashlimit_mt_destroy_v0(const struct xt_mtdtor_param *par)
-{
- const struct xt_hashlimit_info *r = par->matchinfo;
-
- htable_put(r->hinfo);
+ return 0;
}
static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
@@ -764,47 +612,8 @@ static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
htable_put(info->hinfo);
}
-#ifdef CONFIG_COMPAT
-struct compat_xt_hashlimit_info {
- char name[IFNAMSIZ];
- struct hashlimit_cfg cfg;
- compat_uptr_t hinfo;
- compat_uptr_t master;
-};
-
-static void hashlimit_mt_compat_from_user(void *dst, const void *src)
-{
- int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
-
- memcpy(dst, src, off);
- memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
-}
-
-static int hashlimit_mt_compat_to_user(void __user *dst, const void *src)
-{
- int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
-
- return copy_to_user(dst, src, off) ? -EFAULT : 0;
-}
-#endif
-
static struct xt_match hashlimit_mt_reg[] __read_mostly = {
{
- .name = "hashlimit",
- .revision = 0,
- .family = NFPROTO_IPV4,
- .match = hashlimit_mt_v0,
- .matchsize = sizeof(struct xt_hashlimit_info),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_hashlimit_info),
- .compat_from_user = hashlimit_mt_compat_from_user,
- .compat_to_user = hashlimit_mt_compat_to_user,
-#endif
- .checkentry = hashlimit_mt_check_v0,
- .destroy = hashlimit_mt_destroy_v0,
- .me = THIS_MODULE
- },
- {
.name = "hashlimit",
.revision = 1,
.family = NFPROTO_IPV4,
@@ -816,20 +625,6 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
},
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
{
- .name = "hashlimit",
- .family = NFPROTO_IPV6,
- .match = hashlimit_mt_v0,
- .matchsize = sizeof(struct xt_hashlimit_info),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_hashlimit_info),
- .compat_from_user = hashlimit_mt_compat_from_user,
- .compat_to_user = hashlimit_mt_compat_to_user,
-#endif
- .checkentry = hashlimit_mt_check_v0,
- .destroy = hashlimit_mt_destroy_v0,
- .me = THIS_MODULE
- },
- {
.name = "hashlimit",
.revision = 1,
.family = NFPROTO_IPV6,
@@ -888,12 +683,15 @@ static void dl_seq_stop(struct seq_file *s, void *v)
static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
+ int res;
+
+ spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
rateinfo_recalc(ent, jiffies);
switch (family) {
case NFPROTO_IPV4:
- return seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
+ res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
(long)(ent->expires - jiffies)/HZ,
&ent->dst.ip.src,
ntohs(ent->dst.src_port),
@@ -901,9 +699,10 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
ntohs(ent->dst.dst_port),
ent->rateinfo.credit, ent->rateinfo.credit_cap,
ent->rateinfo.cost);
+ break;
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
case NFPROTO_IPV6:
- return seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
+ res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
(long)(ent->expires - jiffies)/HZ,
&ent->dst.ip6.src,
ntohs(ent->dst.src_port),
@@ -911,11 +710,14 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
ntohs(ent->dst.dst_port),
ent->rateinfo.credit, ent->rateinfo.credit_cap,
ent->rateinfo.cost);
+ break;
#endif
default:
BUG();
- return 0;
+ res = 0;
}
+ spin_unlock(&ent->lock);
+ return res;
}
static int dl_seq_show(struct seq_file *s, void *v)
@@ -1024,7 +826,7 @@ static int __init hashlimit_mt_init(void)
sizeof(struct dsthash_ent), 0, 0,
NULL);
if (!hashlimit_cachep) {
- printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
+ pr_warning("unable to create slab cache\n");
goto err2;
}
return 0;
@@ -1039,9 +841,11 @@ err1:
static void __exit hashlimit_mt_exit(void)
{
- kmem_cache_destroy(hashlimit_cachep);
xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
unregister_pernet_subsys(&hashlimit_net_ops);
+
+ rcu_barrier_bh();
+ kmem_cache_destroy(hashlimit_cachep);
}
module_init(hashlimit_mt_init);
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c
index 64fc7f277221..b8b3e13dc71e 100644
--- a/net/netfilter/xt_helper.c
+++ b/net/netfilter/xt_helper.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
@@ -54,17 +54,19 @@ helper_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool helper_mt_check(const struct xt_mtchk_param *par)
+static int helper_mt_check(const struct xt_mtchk_param *par)
{
struct xt_helper_info *info = par->matchinfo;
+ int ret;
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0) {
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
info->name[29] = '\0';
- return true;
+ return 0;
}
static void helper_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_hl.c b/net/netfilter/xt_hl.c
index 7726154c87b2..be53f7299623 100644
--- a/net/netfilter/xt_hl.c
+++ b/net/netfilter/xt_hl.c
@@ -39,10 +39,6 @@ static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ttl < info->ttl;
case IPT_TTL_GT:
return ttl > info->ttl;
- default:
- printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
- info->mode);
- return false;
}
return false;
@@ -56,20 +52,12 @@ static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
switch (info->mode) {
case IP6T_HL_EQ:
return ip6h->hop_limit == info->hop_limit;
- break;
case IP6T_HL_NE:
return ip6h->hop_limit != info->hop_limit;
- break;
case IP6T_HL_LT:
return ip6h->hop_limit < info->hop_limit;
- break;
case IP6T_HL_GT:
return ip6h->hop_limit > info->hop_limit;
- break;
- default:
- printk(KERN_WARNING "ip6t_hl: unknown mode %d\n",
- info->mode);
- return false;
}
return false;
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index ffc96387d556..8471d9715bde 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -8,6 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index e5d7e1ffb1a4..88215dca19cb 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/module.h>
@@ -98,7 +99,7 @@ user2credits(u_int32_t user)
return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE;
}
-static bool limit_mt_check(const struct xt_mtchk_param *par)
+static int limit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_rateinfo *r = par->matchinfo;
struct xt_limit_priv *priv;
@@ -106,14 +107,14 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
/* Check for overflow. */
if (r->burst == 0
|| user2credits(r->avg * r->burst) < user2credits(r->avg)) {
- printk("Overflow in xt_limit, try lower: %u/%u\n",
- r->avg, r->burst);
- return false;
+ pr_info("Overflow, try lower: %u/%u\n",
+ r->avg, r->burst);
+ return -ERANGE;
}
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
- return false;
+ return -ENOMEM;
/* For SMP, we only want to use one set of state. */
r->master = priv;
@@ -125,7 +126,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
r->cost = user2credits(r->avg);
}
- return true;
+ return 0;
}
static void limit_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c
index c2007116ce5b..b971ce93773e 100644
--- a/net/netfilter/xt_mac.c
+++ b/net/netfilter/xt_mac.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
+#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
@@ -26,14 +27,18 @@ MODULE_ALIAS("ip6t_mac");
static bool mac_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
- const struct xt_mac_info *info = par->matchinfo;
-
- /* Is mac pointer valid? */
- return skb_mac_header(skb) >= skb->head &&
- skb_mac_header(skb) + ETH_HLEN <= skb->data
- /* If so, compare... */
- && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr))
- ^ info->invert);
+ const struct xt_mac_info *info = par->matchinfo;
+ bool ret;
+
+ if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER)
+ return false;
+ if (skb_mac_header(skb) < skb->head)
+ return false;
+ if (skb_mac_header(skb) + ETH_HLEN > skb->data)
+ return false;
+ ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0;
+ ret ^= info->invert;
+ return ret;
}
static struct xt_match mac_mt_reg __read_mostly = {
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 1db07d8125f8..035c468a0040 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -18,9 +18,20 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("Xtables: packet mark match");
+MODULE_DESCRIPTION("Xtables: packet mark operations");
MODULE_ALIAS("ipt_mark");
MODULE_ALIAS("ip6t_mark");
+MODULE_ALIAS("ipt_MARK");
+MODULE_ALIAS("ip6t_MARK");
+
+static unsigned int
+mark_tg(struct sk_buff *skb, const struct xt_target_param *par)
+{
+ const struct xt_mark_tginfo2 *info = par->targinfo;
+
+ skb->mark = (skb->mark & ~info->mask) ^ info->mark;
+ return XT_CONTINUE;
+}
static bool
mark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
@@ -30,6 +41,15 @@ mark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
+static struct xt_target mark_tg_reg __read_mostly = {
+ .name = "MARK",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .target = mark_tg,
+ .targetsize = sizeof(struct xt_mark_tginfo2),
+ .me = THIS_MODULE,
+};
+
static struct xt_match mark_mt_reg __read_mostly = {
.name = "mark",
.revision = 1,
@@ -41,12 +61,23 @@ static struct xt_match mark_mt_reg __read_mostly = {
static int __init mark_mt_init(void)
{
- return xt_register_match(&mark_mt_reg);
+ int ret;
+
+ ret = xt_register_target(&mark_tg_reg);
+ if (ret < 0)
+ return ret;
+ ret = xt_register_match(&mark_mt_reg);
+ if (ret < 0) {
+ xt_unregister_target(&mark_tg_reg);
+ return ret;
+ }
+ return 0;
}
static void __exit mark_mt_exit(void)
{
xt_unregister_match(&mark_mt_reg);
+ xt_unregister_target(&mark_tg_reg);
}
module_init(mark_mt_init);
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c
index d06bb2dd3900..83b77ceb264f 100644
--- a/net/netfilter/xt_multiport.c
+++ b/net/netfilter/xt_multiport.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/udp.h>
@@ -26,29 +26,6 @@ MODULE_DESCRIPTION("Xtables: multiple port matching for TCP, UDP, UDP-Lite, SCTP
MODULE_ALIAS("ipt_multiport");
MODULE_ALIAS("ip6t_multiport");
-#if 0
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-/* Returns 1 if the port is matched by the test, 0 otherwise. */
-static inline bool
-ports_match_v0(const u_int16_t *portlist, enum xt_multiport_flags flags,
- u_int8_t count, u_int16_t src, u_int16_t dst)
-{
- unsigned int i;
- for (i = 0; i < count; i++) {
- if (flags != XT_MULTIPORT_DESTINATION && portlist[i] == src)
- return true;
-
- if (flags != XT_MULTIPORT_SOURCE && portlist[i] == dst)
- return true;
- }
-
- return false;
-}
-
/* Returns 1 if the port is matched by the test, 0 otherwise. */
static inline bool
ports_match_v1(const struct xt_multiport_v1 *minfo,
@@ -63,7 +40,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo,
if (minfo->pflags[i]) {
/* range port matching */
e = minfo->ports[++i];
- duprintf("src or dst matches with %d-%d?\n", s, e);
+ pr_debug("src or dst matches with %d-%d?\n", s, e);
if (minfo->flags == XT_MULTIPORT_SOURCE
&& src >= s && src <= e)
@@ -77,7 +54,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo,
return true ^ minfo->invert;
} else {
/* exact port matching */
- duprintf("src or dst matches with %d?\n", s);
+ pr_debug("src or dst matches with %d?\n", s);
if (minfo->flags == XT_MULTIPORT_SOURCE
&& src == s)
@@ -95,30 +72,6 @@ ports_match_v1(const struct xt_multiport_v1 *minfo,
}
static bool
-multiport_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const __be16 *pptr;
- __be16 _ports[2];
- const struct xt_multiport *multiinfo = par->matchinfo;
-
- if (par->fragoff != 0)
- return false;
-
- pptr = skb_header_pointer(skb, par->thoff, sizeof(_ports), _ports);
- if (pptr == NULL) {
- /* We've been asked to examine this packet, and we
- * can't. Hence, no choice but to drop.
- */
- duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
- *par->hotdrop = true;
- return false;
- }
-
- return ports_match_v0(multiinfo->ports, multiinfo->flags,
- multiinfo->count, ntohs(pptr[0]), ntohs(pptr[1]));
-}
-
-static bool
multiport_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
const __be16 *pptr;
@@ -133,7 +86,7 @@ multiport_mt(const struct sk_buff *skb, const struct xt_match_param *par)
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
- duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
+ pr_debug("Dropping evil offset=0 tinygram.\n");
*par->hotdrop = true;
return false;
}
@@ -158,16 +111,7 @@ check(u_int16_t proto,
&& count <= XT_MULTI_PORTS;
}
-static bool multiport_mt_check_v0(const struct xt_mtchk_param *par)
-{
- const struct ipt_ip *ip = par->entryinfo;
- const struct xt_multiport *multiinfo = par->matchinfo;
-
- return check(ip->proto, ip->invflags, multiinfo->flags,
- multiinfo->count);
-}
-
-static bool multiport_mt_check(const struct xt_mtchk_param *par)
+static int multiport_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ip *ip = par->entryinfo;
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
@@ -176,16 +120,7 @@ static bool multiport_mt_check(const struct xt_mtchk_param *par)
multiinfo->count);
}
-static bool multiport_mt6_check_v0(const struct xt_mtchk_param *par)
-{
- const struct ip6t_ip6 *ip = par->entryinfo;
- const struct xt_multiport *multiinfo = par->matchinfo;
-
- return check(ip->proto, ip->invflags, multiinfo->flags,
- multiinfo->count);
-}
-
-static bool multiport_mt6_check(const struct xt_mtchk_param *par)
+static int multiport_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_ip6 *ip = par->entryinfo;
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
@@ -198,15 +133,6 @@ static struct xt_match multiport_mt_reg[] __read_mostly = {
{
.name = "multiport",
.family = NFPROTO_IPV4,
- .revision = 0,
- .checkentry = multiport_mt_check_v0,
- .match = multiport_mt_v0,
- .matchsize = sizeof(struct xt_multiport),
- .me = THIS_MODULE,
- },
- {
- .name = "multiport",
- .family = NFPROTO_IPV4,
.revision = 1,
.checkentry = multiport_mt_check,
.match = multiport_mt,
@@ -216,15 +142,6 @@ static struct xt_match multiport_mt_reg[] __read_mostly = {
{
.name = "multiport",
.family = NFPROTO_IPV6,
- .revision = 0,
- .checkentry = multiport_mt6_check_v0,
- .match = multiport_mt_v0,
- .matchsize = sizeof(struct xt_multiport),
- .me = THIS_MODULE,
- },
- {
- .name = "multiport",
- .family = NFPROTO_IPV6,
.revision = 1,
.checkentry = multiport_mt6_check,
.match = multiport_mt,
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 4169e200588d..8dcde13a0781 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
@@ -382,14 +382,14 @@ static int __init xt_osf_init(void)
err = nfnetlink_subsys_register(&xt_osf_nfnetlink);
if (err < 0) {
- printk(KERN_ERR "Failed (%d) to register OSF nsfnetlink helper.\n", err);
+ pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err);
goto err_out_exit;
}
err = xt_register_match(&xt_osf_match);
if (err) {
- printk(KERN_ERR "Failed (%d) to register OS fingerprint "
- "matching module.\n", err);
+ pr_err("Failed to register OS fingerprint "
+ "matching module (%d)\n", err);
goto err_out_remove;
}
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 8d28ca5848bc..d0bdf3dd4d25 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_bridge.h>
@@ -83,25 +83,25 @@ match_outdev:
return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
}
-static bool physdev_mt_check(const struct xt_mtchk_param *par)
+static int physdev_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
- return false;
+ return -EINVAL;
if (info->bitmask & XT_PHYSDEV_OP_OUT &&
(!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
- printk(KERN_WARNING "physdev match: using --physdev-out in the "
- "OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
- "traffic is not supported anymore.\n");
+ pr_info("using --physdev-out in the OUTPUT, FORWARD and "
+ "POSTROUTING chains for non-bridged traffic is not "
+ "supported anymore.\n");
if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match physdev_mt_reg __read_mostly = {
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 4cbfebda8fa1..1fa239c1fb93 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -116,9 +116,9 @@ policy_mt(const struct sk_buff *skb, const struct xt_match_param *par)
int ret;
if (info->flags & XT_POLICY_MATCH_IN)
- ret = match_policy_in(skb, info, par->match->family);
+ ret = match_policy_in(skb, info, par->family);
else
- ret = match_policy_out(skb, info, par->match->family);
+ ret = match_policy_out(skb, info, par->family);
if (ret < 0)
ret = info->flags & XT_POLICY_MATCH_NONE ? true : false;
@@ -128,32 +128,29 @@ policy_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool policy_mt_check(const struct xt_mtchk_param *par)
+static int policy_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_policy_info *info = par->matchinfo;
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
- printk(KERN_ERR "xt_policy: neither incoming nor "
- "outgoing policy selected\n");
- return false;
+ pr_info("neither incoming nor outgoing policy selected\n");
+ return -EINVAL;
}
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) {
- printk(KERN_ERR "xt_policy: output policy not valid in "
- "PRE_ROUTING and INPUT\n");
- return false;
+ pr_info("output policy not valid in PREROUTING and INPUT\n");
+ return -EINVAL;
}
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) {
- printk(KERN_ERR "xt_policy: input policy not valid in "
- "POST_ROUTING and OUTPUT\n");
- return false;
+ pr_info("input policy not valid in POSTROUTING and OUTPUT\n");
+ return -EINVAL;
}
if (info->len > XT_POLICY_MAX_ELEM) {
- printk(KERN_ERR "xt_policy: too many policy elements\n");
- return false;
+ pr_info("too many policy elements\n");
+ return -EINVAL;
}
- return true;
+ return 0;
}
static struct xt_match policy_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 2d5562498c43..7c95d69f6f06 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -44,19 +44,19 @@ quota_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool quota_mt_check(const struct xt_mtchk_param *par)
+static int quota_mt_check(const struct xt_mtchk_param *par)
{
struct xt_quota_info *q = par->matchinfo;
if (q->flags & ~XT_QUOTA_MASK)
- return false;
+ return -EINVAL;
q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
if (q->master == NULL)
- return false;
+ return -ENOMEM;
q->master->quota = q->quota;
- return true;
+ return 0;
}
static void quota_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 4fc6a917f6de..23805f8a444b 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -74,10 +74,11 @@ xt_rateest_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
+static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_rateest_match_info *info = par->matchinfo;
struct xt_rateest *est1, *est2;
+ int ret = false;
if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
XT_RATEEST_MATCH_REL)) != 1)
@@ -95,6 +96,7 @@ static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
goto err1;
}
+ ret = -ENOENT;
est1 = xt_rateest_lookup(info->name1);
if (!est1)
goto err1;
@@ -109,12 +111,12 @@ static bool xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
info->est1 = est1;
info->est2 = est2;
- return true;
+ return 0;
err2:
xt_rateest_put(est1);
err1:
- return false;
+ return -EINVAL;
}
static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 834b736857cb..b88d63b9c76a 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -12,6 +12,7 @@
* Author: Stephen Frost <sfrost@snowman.net>
* Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -35,8 +36,8 @@
#include <linux/netfilter/xt_recent.h>
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
-MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching for IPv4");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_recent");
MODULE_ALIAS("ip6t_recent");
@@ -51,14 +52,14 @@ module_param(ip_list_tot, uint, 0400);
module_param(ip_pkt_list_tot, uint, 0400);
module_param(ip_list_hash_size, uint, 0400);
module_param(ip_list_perms, uint, 0400);
-module_param(ip_list_uid, uint, 0400);
-module_param(ip_list_gid, uint, 0400);
+module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR);
+module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
-MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files");
-MODULE_PARM_DESC(ip_list_gid,"owning group of /proc/net/xt_recent/* files");
+MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files");
+MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files");
struct recent_entry {
struct list_head list;
@@ -84,9 +85,6 @@ struct recent_net {
struct list_head tables;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *xt_recent;
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- struct proc_dir_entry *ipt_recent;
-#endif
#endif
};
@@ -147,6 +145,25 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
t->entries--;
}
+/*
+ * Drop entries with timestamps older then 'time'.
+ */
+static void recent_entry_reap(struct recent_table *t, unsigned long time)
+{
+ struct recent_entry *e;
+
+ /*
+ * The head of the LRU list is always the oldest entry.
+ */
+ e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
+
+ /*
+ * The last time stamp is the most recent.
+ */
+ if (time_after(time, e->stamps[e->index-1]))
+ recent_entry_remove(t, e);
+}
+
static struct recent_entry *
recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
u_int16_t family, u_int8_t ttl)
@@ -218,7 +235,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
u_int8_t ttl;
bool ret = info->invert;
- if (par->match->family == NFPROTO_IPV4) {
+ if (par->family == NFPROTO_IPV4) {
const struct iphdr *iph = ip_hdr(skb);
if (info->side == XT_RECENT_DEST)
@@ -244,12 +261,12 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
spin_lock_bh(&recent_lock);
t = recent_table_lookup(recent_net, info->name);
- e = recent_entry_lookup(t, &addr, par->match->family,
+ e = recent_entry_lookup(t, &addr, par->family,
(info->check_set & XT_RECENT_TTL) ? ttl : 0);
if (e == NULL) {
if (!(info->check_set & XT_RECENT_SET))
goto out;
- e = recent_entry_init(t, &addr, par->match->family, ttl);
+ e = recent_entry_init(t, &addr, par->family, ttl);
if (e == NULL)
*par->hotdrop = true;
ret = !ret;
@@ -273,6 +290,10 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
break;
}
}
+
+ /* info->seconds must be non-zero */
+ if (info->check_set & XT_RECENT_REAP)
+ recent_entry_reap(t, time);
}
if (info->check_set & XT_RECENT_SET ||
@@ -285,7 +306,7 @@ out:
return ret;
}
-static bool recent_mt_check(const struct xt_mtchk_param *par)
+static int recent_mt_check(const struct xt_mtchk_param *par)
{
struct recent_net *recent_net = recent_pernet(par->net);
const struct xt_recent_mtinfo *info = par->matchinfo;
@@ -294,41 +315,51 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
struct proc_dir_entry *pde;
#endif
unsigned i;
- bool ret = false;
+ int ret = -EINVAL;
if (unlikely(!hash_rnd_inited)) {
get_random_bytes(&hash_rnd, sizeof(hash_rnd));
hash_rnd_inited = true;
}
+ if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
+ pr_info("Unsupported user space flags (%08x)\n",
+ info->check_set);
+ return -EINVAL;
+ }
if (hweight8(info->check_set &
(XT_RECENT_SET | XT_RECENT_REMOVE |
XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1)
- return false;
+ return -EINVAL;
if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) &&
- (info->seconds || info->hit_count))
- return false;
+ (info->seconds || info->hit_count ||
+ (info->check_set & XT_RECENT_MODIFIERS)))
+ return -EINVAL;
+ if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
+ return -EINVAL;
if (info->hit_count > ip_pkt_list_tot) {
- pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than "
+ pr_info("hitcount (%u) is larger than "
"packets to be remembered (%u)\n",
info->hit_count, ip_pkt_list_tot);
- return false;
+ return -EINVAL;
}
if (info->name[0] == '\0' ||
strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
- return false;
+ return -EINVAL;
mutex_lock(&recent_mutex);
t = recent_table_lookup(recent_net, info->name);
if (t != NULL) {
t->refcnt++;
- ret = true;
+ ret = 0;
goto out;
}
t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size,
GFP_KERNEL);
- if (t == NULL)
+ if (t == NULL) {
+ ret = -ENOMEM;
goto out;
+ }
t->refcnt = 1;
strcpy(t->name, info->name);
INIT_LIST_HEAD(&t->lru_list);
@@ -339,26 +370,16 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
&recent_mt_fops, t);
if (pde == NULL) {
kfree(t);
- goto out;
- }
- pde->uid = ip_list_uid;
- pde->gid = ip_list_gid;
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent,
- &recent_old_fops, t);
- if (pde == NULL) {
- remove_proc_entry(t->name, recent_net->xt_recent);
- kfree(t);
+ ret = -ENOMEM;
goto out;
}
pde->uid = ip_list_uid;
pde->gid = ip_list_gid;
#endif
-#endif
spin_lock_bh(&recent_lock);
list_add_tail(&t->list, &recent_net->tables);
spin_unlock_bh(&recent_lock);
- ret = true;
+ ret = 0;
out:
mutex_unlock(&recent_mutex);
return ret;
@@ -377,9 +398,6 @@ static void recent_mt_destroy(const struct xt_mtdtor_param *par)
list_del(&t->list);
spin_unlock_bh(&recent_lock);
#ifdef CONFIG_PROC_FS
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- remove_proc_entry(t->name, recent_net->ipt_recent);
-#endif
remove_proc_entry(t->name, recent_net->xt_recent);
#endif
recent_table_flush(t);
@@ -471,84 +489,6 @@ static int recent_seq_open(struct inode *inode, struct file *file)
return 0;
}
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
-static int recent_old_seq_open(struct inode *inode, struct file *filp)
-{
- static bool warned_of_old;
-
- if (unlikely(!warned_of_old)) {
- printk(KERN_INFO KBUILD_MODNAME ": Use of /proc/net/ipt_recent"
- " is deprecated; use /proc/net/xt_recent.\n");
- warned_of_old = true;
- }
- return recent_seq_open(inode, filp);
-}
-
-static ssize_t recent_old_proc_write(struct file *file,
- const char __user *input,
- size_t size, loff_t *loff)
-{
- const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
- struct recent_table *t = pde->data;
- struct recent_entry *e;
- char buf[sizeof("+255.255.255.255")], *c = buf;
- union nf_inet_addr addr = {};
- int add;
-
- if (size > sizeof(buf))
- size = sizeof(buf);
- if (copy_from_user(buf, input, size))
- return -EFAULT;
-
- c = skip_spaces(c);
-
- if (size - (c - buf) < 5)
- return c - buf;
- if (!strncmp(c, "clear", 5)) {
- c += 5;
- spin_lock_bh(&recent_lock);
- recent_table_flush(t);
- spin_unlock_bh(&recent_lock);
- return c - buf;
- }
-
- switch (*c) {
- case '-':
- add = 0;
- c++;
- break;
- case '+':
- c++;
- default:
- add = 1;
- break;
- }
- addr.ip = in_aton(c);
-
- spin_lock_bh(&recent_lock);
- e = recent_entry_lookup(t, &addr, NFPROTO_IPV4, 0);
- if (e == NULL) {
- if (add)
- recent_entry_init(t, &addr, NFPROTO_IPV4, 0);
- } else {
- if (add)
- recent_entry_update(t, e);
- else
- recent_entry_remove(t, e);
- }
- spin_unlock_bh(&recent_lock);
- return size;
-}
-
-static const struct file_operations recent_old_fops = {
- .open = recent_old_seq_open,
- .read = seq_read,
- .write = recent_old_proc_write,
- .release = seq_release_private,
- .owner = THIS_MODULE,
-};
-#endif
-
static ssize_t
recent_mt_proc_write(struct file *file, const char __user *input,
size_t size, loff_t *loff)
@@ -585,7 +525,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
add = true;
break;
default:
- printk(KERN_INFO KBUILD_MODNAME ": Need +ip, -ip or /\n");
+ pr_info("Need \"+ip\", \"-ip\" or \"/\"\n");
return -EINVAL;
}
@@ -600,8 +540,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
}
if (!succ) {
- printk(KERN_INFO KBUILD_MODNAME ": illegal address written "
- "to procfs\n");
+ pr_info("illegal address written to procfs\n");
return -EINVAL;
}
@@ -637,21 +576,11 @@ static int __net_init recent_proc_net_init(struct net *net)
recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
if (!recent_net->xt_recent)
return -ENOMEM;
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net);
- if (!recent_net->ipt_recent) {
- proc_net_remove(net, "xt_recent");
- return -ENOMEM;
- }
-#endif
return 0;
}
static void __net_exit recent_proc_net_exit(struct net *net)
{
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- proc_net_remove(net, "ipt_recent");
-#endif
proc_net_remove(net, "xt_recent");
}
#else
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index a189ada9128f..c3694df54672 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ip.h>
@@ -15,12 +16,6 @@ MODULE_DESCRIPTION("Xtables: SCTP protocol packet match");
MODULE_ALIAS("ipt_sctp");
MODULE_ALIAS("ip6t_sctp");
-#ifdef DEBUG_SCTP
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
|| (!!((invflag) & (option)) ^ (cond)))
@@ -52,7 +47,7 @@ match_packet(const struct sk_buff *skb,
const struct xt_sctp_flag_info *flag_info = info->flag_info;
int flag_count = info->flag_count;
-#ifdef DEBUG_SCTP
+#ifdef DEBUG
int i = 0;
#endif
@@ -62,17 +57,19 @@ match_packet(const struct sk_buff *skb,
do {
sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
if (sch == NULL || sch->length == 0) {
- duprintf("Dropping invalid SCTP packet.\n");
+ pr_debug("Dropping invalid SCTP packet.\n");
*hotdrop = true;
return false;
}
-
- duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n",
- ++i, offset, sch->type, htons(sch->length), sch->flags);
-
+#ifdef DEBUG
+ pr_debug("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d"
+ "\tflags: %x\n",
+ ++i, offset, sch->type, htons(sch->length),
+ sch->flags);
+#endif
offset += (ntohs(sch->length) + 3) & ~3;
- duprintf("skb->len: %d\toffset: %d\n", skb->len, offset);
+ pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) {
switch (chunk_match_type) {
@@ -124,17 +121,17 @@ sctp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
sctp_sctphdr_t _sh;
if (par->fragoff != 0) {
- duprintf("Dropping non-first fragment.. FIXME\n");
+ pr_debug("Dropping non-first fragment.. FIXME\n");
return false;
}
sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh);
if (sh == NULL) {
- duprintf("Dropping evil TCP offset=0 tinygram.\n");
+ pr_debug("Dropping evil TCP offset=0 tinygram.\n");
*par->hotdrop = true;
return false;
}
- duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
+ pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
return SCCHECK(ntohs(sh->source) >= info->spts[0]
&& ntohs(sh->source) <= info->spts[1],
@@ -147,18 +144,22 @@ sctp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
XT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
}
-static bool sctp_mt_check(const struct xt_mtchk_param *par)
+static int sctp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_sctp_info *info = par->matchinfo;
- return !(info->flags & ~XT_SCTP_VALID_FLAGS)
- && !(info->invflags & ~XT_SCTP_VALID_FLAGS)
- && !(info->invflags & ~info->flags)
- && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) ||
- (info->chunk_match_type &
- (SCTP_CHUNK_MATCH_ALL
- | SCTP_CHUNK_MATCH_ANY
- | SCTP_CHUNK_MATCH_ONLY)));
+ if (info->flags & ~XT_SCTP_VALID_FLAGS)
+ return -EINVAL;
+ if (info->invflags & ~XT_SCTP_VALID_FLAGS)
+ return -EINVAL;
+ if (info->invflags & ~info->flags)
+ return -EINVAL;
+ if (!(info->flags & XT_SCTP_CHUNK_TYPES))
+ return 0;
+ if (info->chunk_match_type & (SCTP_CHUNK_MATCH_ALL |
+ SCTP_CHUNK_MATCH_ANY | SCTP_CHUNK_MATCH_ONLY))
+ return 0;
+ return -EINVAL;
}
static struct xt_match sctp_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 6a902564d24f..a9b16867e1f7 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
@@ -165,8 +165,7 @@ socket_match(const struct sk_buff *skb, const struct xt_match_param *par,
sk = NULL;
}
- pr_debug("socket match: proto %u %08x:%u -> %08x:%u "
- "(orig %08x:%u) sock %p\n",
+ pr_debug("proto %u %08x:%u -> %08x:%u (orig %08x:%u) sock %p\n",
protocol, ntohl(saddr), ntohs(sport),
ntohl(daddr), ntohs(dport),
ntohl(iph->daddr), hp ? ntohs(hp->dest) : 0, sk);
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index 4c946cbd731f..bb1271852d50 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -37,50 +37,40 @@ state_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return (sinfo->statemask & statebit);
}
-static bool state_mt_check(const struct xt_mtchk_param *par)
+static int state_mt_check(const struct xt_mtchk_param *par)
{
- if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->match->family);
- return false;
- }
- return true;
+ int ret;
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ pr_info("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
}
static void state_mt_destroy(const struct xt_mtdtor_param *par)
{
- nf_ct_l3proto_module_put(par->match->family);
+ nf_ct_l3proto_module_put(par->family);
}
-static struct xt_match state_mt_reg[] __read_mostly = {
- {
- .name = "state",
- .family = NFPROTO_IPV4,
- .checkentry = state_mt_check,
- .match = state_mt,
- .destroy = state_mt_destroy,
- .matchsize = sizeof(struct xt_state_info),
- .me = THIS_MODULE,
- },
- {
- .name = "state",
- .family = NFPROTO_IPV6,
- .checkentry = state_mt_check,
- .match = state_mt,
- .destroy = state_mt_destroy,
- .matchsize = sizeof(struct xt_state_info),
- .me = THIS_MODULE,
- },
+static struct xt_match state_mt_reg __read_mostly = {
+ .name = "state",
+ .family = NFPROTO_UNSPEC,
+ .checkentry = state_mt_check,
+ .match = state_mt,
+ .destroy = state_mt_destroy,
+ .matchsize = sizeof(struct xt_state_info),
+ .me = THIS_MODULE,
};
static int __init state_mt_init(void)
{
- return xt_register_matches(state_mt_reg, ARRAY_SIZE(state_mt_reg));
+ return xt_register_match(&state_mt_reg);
}
static void __exit state_mt_exit(void)
{
- xt_unregister_matches(state_mt_reg, ARRAY_SIZE(state_mt_reg));
+ xt_unregister_match(&state_mt_reg);
}
module_init(state_mt_init);
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 937ce0633e99..5aeca1d023d8 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -53,22 +53,20 @@ statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ret;
}
-static bool statistic_mt_check(const struct xt_mtchk_param *par)
+static int statistic_mt_check(const struct xt_mtchk_param *par)
{
struct xt_statistic_info *info = par->matchinfo;
if (info->mode > XT_STATISTIC_MODE_MAX ||
info->flags & ~XT_STATISTIC_MASK)
- return false;
+ return -EINVAL;
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
- if (info->master == NULL) {
- printk(KERN_ERR KBUILD_MODNAME ": Out of memory\n");
- return false;
- }
+ if (info->master == NULL)
+ return -ENOMEM;
info->master->count = info->u.nth.count;
- return true;
+ return 0;
}
static void statistic_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index 96801ffd8af8..f6d5112175e6 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -27,12 +27,10 @@ string_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
const struct xt_string_info *conf = par->matchinfo;
struct ts_state state;
- int invert;
+ bool invert;
memset(&state, 0, sizeof(struct ts_state));
-
- invert = (par->match->revision == 0 ? conf->u.v0.invert :
- conf->u.v1.flags & XT_STRING_FLAG_INVERT);
+ invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT;
return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
conf->to_offset, conf->config, &state)
@@ -41,7 +39,7 @@ string_mt(const struct sk_buff *skb, const struct xt_match_param *par)
#define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m))
-static bool string_mt_check(const struct xt_mtchk_param *par)
+static int string_mt_check(const struct xt_mtchk_param *par)
{
struct xt_string_info *conf = par->matchinfo;
struct ts_config *ts_conf;
@@ -49,26 +47,23 @@ static bool string_mt_check(const struct xt_mtchk_param *par)
/* Damn, can't handle this case properly with iptables... */
if (conf->from_offset > conf->to_offset)
- return false;
+ return -EINVAL;
if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0')
- return false;
+ return -EINVAL;
if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
- return false;
- if (par->match->revision == 1) {
- if (conf->u.v1.flags &
- ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT))
- return false;
- if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE)
- flags |= TS_IGNORECASE;
- }
+ return -EINVAL;
+ if (conf->u.v1.flags &
+ ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT))
+ return -EINVAL;
+ if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE)
+ flags |= TS_IGNORECASE;
ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
GFP_KERNEL, flags);
if (IS_ERR(ts_conf))
- return false;
+ return PTR_ERR(ts_conf);
conf->config = ts_conf;
-
- return true;
+ return 0;
}
static void string_mt_destroy(const struct xt_mtdtor_param *par)
@@ -76,38 +71,25 @@ static void string_mt_destroy(const struct xt_mtdtor_param *par)
textsearch_destroy(STRING_TEXT_PRIV(par->matchinfo)->config);
}
-static struct xt_match xt_string_mt_reg[] __read_mostly = {
- {
- .name = "string",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = string_mt_check,
- .match = string_mt,
- .destroy = string_mt_destroy,
- .matchsize = sizeof(struct xt_string_info),
- .me = THIS_MODULE
- },
- {
- .name = "string",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = string_mt_check,
- .match = string_mt,
- .destroy = string_mt_destroy,
- .matchsize = sizeof(struct xt_string_info),
- .me = THIS_MODULE
- },
+static struct xt_match xt_string_mt_reg __read_mostly = {
+ .name = "string",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = string_mt_check,
+ .match = string_mt,
+ .destroy = string_mt_destroy,
+ .matchsize = sizeof(struct xt_string_info),
+ .me = THIS_MODULE,
};
static int __init string_mt_init(void)
{
- return xt_register_matches(xt_string_mt_reg,
- ARRAY_SIZE(xt_string_mt_reg));
+ return xt_register_match(&xt_string_mt_reg);
}
static void __exit string_mt_exit(void)
{
- xt_unregister_matches(xt_string_mt_reg, ARRAY_SIZE(xt_string_mt_reg));
+ xt_unregister_match(&xt_string_mt_reg);
}
module_init(string_mt_init);
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index 1ebdc4934eed..efa2ede24ae6 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <net/ip.h>
@@ -19,13 +20,6 @@ MODULE_ALIAS("ipt_tcp");
MODULE_ALIAS("ip6t_udp");
MODULE_ALIAS("ip6t_tcp");
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-
/* Returns 1 if the port is matched by the range, 0 otherwise */
static inline bool
port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
@@ -46,7 +40,7 @@ tcp_find_option(u_int8_t option,
u_int8_t _opt[60 - sizeof(struct tcphdr)];
unsigned int i;
- duprintf("tcp_match: finding option\n");
+ pr_debug("finding option\n");
if (!optlen)
return invert;
@@ -82,7 +76,7 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
flag overwrite to pass the direction checks.
*/
if (par->fragoff == 1) {
- duprintf("Dropping evil TCP offset=1 frag.\n");
+ pr_debug("Dropping evil TCP offset=1 frag.\n");
*par->hotdrop = true;
}
/* Must not be a fragment. */
@@ -95,7 +89,7 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (th == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
- duprintf("Dropping evil TCP offset=0 tinygram.\n");
+ pr_debug("Dropping evil TCP offset=0 tinygram.\n");
*par->hotdrop = true;
return false;
}
@@ -126,12 +120,12 @@ static bool tcp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool tcp_mt_check(const struct xt_mtchk_param *par)
+static int tcp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_tcp *tcpinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(tcpinfo->invflags & ~XT_TCP_INV_MASK);
+ return (tcpinfo->invflags & ~XT_TCP_INV_MASK) ? -EINVAL : 0;
}
static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
@@ -148,7 +142,7 @@ static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (uh == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
- duprintf("Dropping evil UDP tinygram.\n");
+ pr_debug("Dropping evil UDP tinygram.\n");
*par->hotdrop = true;
return false;
}
@@ -161,12 +155,12 @@ static bool udp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
!!(udpinfo->invflags & XT_UDP_INV_DSTPT));
}
-static bool udp_mt_check(const struct xt_mtchk_param *par)
+static int udp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_udp *udpinfo = par->matchinfo;
/* Must specify no unknown invflags */
- return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
+ return (udpinfo->invflags & ~XT_UDP_INV_MASK) ? -EINVAL : 0;
}
static struct xt_match tcpudp_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 93acaa59d108..d8556fdda440 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -1,7 +1,6 @@
/*
* xt_time
* Copyright © CC Computer Consultants GmbH, 2007
- * Contact: <jengelh@computergmbh.de>
*
* based on ipt_time by Fabrice MARIE <fabrice@netfilter.org>
* This is a module which is used for time matching
@@ -218,18 +217,18 @@ time_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool time_mt_check(const struct xt_mtchk_param *par)
+static int time_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_time_info *info = par->matchinfo;
if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
info->daytime_stop > XT_TIME_MAX_DAYTIME) {
- printk(KERN_WARNING "xt_time: invalid argument - start or "
- "stop time greater than 23:59:59\n");
- return false;
+ pr_info("invalid argument - start or "
+ "stop time greater than 23:59:59\n");
+ return -EDOM;
}
- return true;
+ return 0;
}
static struct xt_match xt_time_mt_reg __read_mostly = {
@@ -264,7 +263,7 @@ static void __exit time_mt_exit(void)
module_init(time_mt_init);
module_exit(time_mt_exit);
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: time-based matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_time");
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index 24a527624500..d7c05f03a7e7 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -3,7 +3,6 @@
*
* Original author: Don Cohen <don@isis.cs3-inc.com>
* (C) CC Computer Consultants GmbH, 2007
- * Contact: <jengelh@computergmbh.de>
*/
#include <linux/module.h>
@@ -117,7 +116,7 @@ static void __exit u32_mt_exit(void)
module_init(u32_mt_init);
module_exit(u32_mt_exit);
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: arbitrary byte matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_u32");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 795424396aff..6464a1972a69 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -545,7 +545,7 @@ static int netlink_autobind(struct socket *sock)
struct hlist_head *head;
struct sock *osk;
struct hlist_node *node;
- s32 pid = current->tgid;
+ s32 pid = task_tgid_vnr(current);
int err;
static s32 rover = -4097;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 06438fa2b1e5..aa4308afcc7f 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -21,15 +21,17 @@
static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
-static inline void genl_lock(void)
+void genl_lock(void)
{
mutex_lock(&genl_mutex);
}
+EXPORT_SYMBOL(genl_lock);
-static inline void genl_unlock(void)
+void genl_unlock(void)
{
mutex_unlock(&genl_mutex);
}
+EXPORT_SYMBOL(genl_unlock);
#define GENL_FAM_TAB_SIZE 16
#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index fa07f044b599..06cb02796a0e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -739,7 +739,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
DEFINE_WAIT(wait);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
@@ -752,7 +752,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
@@ -798,7 +798,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
* hooked into the SABM we saved
*/
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -816,7 +816,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 243946d4809d..2078a277e06b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -82,6 +82,7 @@
#include <linux/mutex.h>
#include <linux/if_vlan.h>
#include <linux/virtio_net.h>
+#include <linux/errqueue.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@@ -315,6 +316,8 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
static void packet_sock_destruct(struct sock *sk)
{
+ skb_queue_purge(&sk->sk_error_queue);
+
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
@@ -483,6 +486,9 @@ retry:
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ if (err < 0)
+ goto out_unlock;
dev_queue_xmit(skb);
rcu_read_unlock();
@@ -1188,6 +1194,9 @@ static int packet_snd(struct socket *sock,
err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
if (err)
goto out_free;
+ err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ if (err < 0)
+ goto out_free;
skb->protocol = proto;
skb->dev = dev;
@@ -1487,6 +1496,51 @@ out:
return err;
}
+static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+{
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+ int copied, err;
+
+ err = -EAGAIN;
+ skb = skb_dequeue(&sk->sk_error_queue);
+ if (skb == NULL)
+ goto out;
+
+ copied = skb->len;
+ if (copied > len) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto out_free_skb;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ serr = SKB_EXT_ERR(skb);
+ put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
+ sizeof(serr->ee), &serr->ee);
+
+ msg->msg_flags |= MSG_ERRQUEUE;
+ err = copied;
+
+ /* Reset and regenerate socket error */
+ spin_lock_bh(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
+ } else
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+
+out_free_skb:
+ kfree_skb(skb);
+out:
+ return err;
+}
+
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
@@ -1502,7 +1556,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
int vnet_hdr_len = 0;
err = -EINVAL;
- if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
+ if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
goto out;
#if 0
@@ -1511,6 +1565,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
return -ENODEV;
#endif
+ if (flags & MSG_ERRQUEUE) {
+ err = packet_recv_error(sk, msg, len);
+ goto out;
+ }
+
/*
* Call the generic datagram receiver. This handles all sorts
* of horrible races and re-entrancy so we can forget about it
@@ -1692,9 +1751,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
- return dev_mc_add(dev, i->addr, i->alen, 0);
+ return dev_mc_add(dev, i->addr);
else
- return dev_mc_delete(dev, i->addr, i->alen, 0);
+ return dev_mc_del(dev, i->addr);
break;
case PACKET_MR_PROMISC:
return dev_set_promiscuity(dev, what);
@@ -1706,9 +1765,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
if (i->alen != dev->addr_len)
return -EINVAL;
if (what > 0)
- return dev_unicast_add(dev, i->addr);
+ return dev_uc_add(dev, i->addr);
else
- return dev_unicast_delete(dev, i->addr);
+ return dev_uc_del(dev, i->addr);
break;
default:
break;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e2a95762abd3..af4d38bc3b22 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -664,12 +664,12 @@ static int pep_wait_connreq(struct sock *sk, int noblock)
if (signal_pending(tsk))
return sock_intr_errno(timeo);
- prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- finish_wait(&sk->sk_socket->wait, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
return 0;
@@ -910,10 +910,10 @@ disabled:
goto out;
}
- prepare_to_wait(&sk->sk_socket->wait, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
- finish_wait(&sk->sk_socket->wait, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (sk->sk_state != TCP_ESTABLISHED)
goto disabled;
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9b4ced6e0968..c33da6576942 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -46,9 +46,16 @@ struct phonet_net {
int phonet_net_id __read_mostly;
+static struct phonet_net *phonet_pernet(struct net *net)
+{
+ BUG_ON(!net);
+
+ return net_generic(net, phonet_net_id);
+}
+
struct phonet_device_list *phonet_device_list(struct net *net)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
return &pnn->pndevs;
}
@@ -261,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev)
static void phonet_route_autodel(struct net_device *dev)
{
- struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
unsigned i;
DECLARE_BITMAP(deleted, 64);
@@ -313,7 +320,7 @@ static struct notifier_block phonet_device_notifier = {
/* Per-namespace Phonet devices handling */
static int __net_init phonet_init_net(struct net *net)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops))
return -ENOMEM;
@@ -326,7 +333,7 @@ static int __net_init phonet_init_net(struct net *net)
static void __net_exit phonet_exit_net(struct net *net)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
struct net_device *dev;
unsigned i;
@@ -376,7 +383,7 @@ void phonet_device_exit(void)
int phonet_route_add(struct net_device *dev, u8 daddr)
{
- struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
struct phonet_routes *routes = &pnn->routes;
int err = -EEXIST;
@@ -393,7 +400,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
int phonet_route_del(struct net_device *dev, u8 daddr)
{
- struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
struct phonet_routes *routes = &pnn->routes;
daddr = daddr >> 2;
@@ -413,7 +420,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
struct net_device *phonet_route_get(struct net *net, u8 daddr)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
@@ -428,7 +435,7 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr)
struct net_device *phonet_route_output(struct net *net, u8 daddr)
{
- struct phonet_net *pnn = net_generic(net, phonet_net_id);
+ struct phonet_net *pnn = phonet_pernet(net);
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index c785bfd0744f..6e9848bf0370 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -265,7 +265,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
struct pep_sock *pn = pep_sk(sk);
unsigned int mask = 0;
- poll_wait(file, &sock->wait, wait);
+ poll_wait(file, sk_sleep(sk), wait);
switch (sk->sk_state) {
case TCP_LISTEN:
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index f81862baf4d0..aebfecbdb841 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -158,9 +158,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
unsigned int mask = 0;
unsigned long flags;
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
- poll_wait(file, &rds_poll_waitq, wait);
+ if (rs->rs_seen_congestion)
+ poll_wait(file, &rds_poll_waitq, wait);
read_lock_irqsave(&rs->rs_recv_lock, flags);
if (!rs->rs_cong_monitor) {
@@ -182,6 +183,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
mask |= (POLLOUT | POLLWRNORM);
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
+ /* clear state any time we wake a seen-congested socket */
+ if (mask)
+ rs->rs_seen_congestion = 0;
+
return mask;
}
@@ -447,7 +452,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
struct rds_info_lengths *lens)
{
struct rds_sock *rs;
- struct sock *sk;
struct rds_incoming *inc;
unsigned long flags;
unsigned int total = 0;
@@ -457,7 +461,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
spin_lock_irqsave(&rds_sock_lock, flags);
list_for_each_entry(rs, &rds_sock_list, rs_item) {
- sk = rds_rs_to_sk(rs);
read_lock(&rs->rs_recv_lock);
/* XXX too lazy to maintain counts.. */
diff --git a/net/rds/cong.c b/net/rds/cong.c
index f1da27ceb064..0871a29f0780 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -219,8 +219,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
spin_lock_irqsave(&rds_cong_lock, flags);
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
- if (conn->c_loopback)
- continue;
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 88d0856cb797..10ed0d55f759 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -204,9 +204,10 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
- rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u "
+ rdsdebug("Fatal QP Event %u "
"- connection %pI4->%pI4, reconnecting\n",
event->event, &conn->c_laddr, &conn->c_faddr);
+ rds_conn_drop(conn);
break;
}
}
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 059989fdb7d7..a54cd63f9e35 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -235,8 +235,8 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
{
flush_workqueue(rds_wq);
rds_ib_flush_mr_pool(pool, 1);
- BUG_ON(atomic_read(&pool->item_count));
- BUG_ON(atomic_read(&pool->free_pinned));
+ WARN_ON(atomic_read(&pool->item_count));
+ WARN_ON(atomic_read(&pool->free_pinned));
kfree(pool);
}
@@ -441,6 +441,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
/* FIXME we need a way to tell a r/w MR
* from a r/o MR */
+ BUG_ON(in_interrupt());
set_page_dirty(page);
put_page(page);
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c7dd11b835f0..c74e9904a6b2 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -469,8 +469,8 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
rds_ib_stats_inc(s_ib_ack_send_failure);
- /* Need to finesse this later. */
- BUG();
+
+ rds_ib_conn_error(ic->conn, "sending ack failed\n");
} else
rds_ib_stats_inc(s_ib_ack_sent);
}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index a10fab6886d1..17fa80803ab0 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -243,8 +243,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
struct rds_message *rm;
rm = rds_send_get_message(conn, send->s_op);
- if (rm)
+ if (rm) {
+ if (rm->m_rdma_op)
+ rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
rds_ib_send_rdma_complete(rm, wc.status);
+ rds_message_put(rm);
+ }
}
oldest = (oldest + 1) % ic->i_send_ring.w_nr;
@@ -482,6 +486,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
+ /* Do not send cong updates to IB loopback */
+ if (conn->c_loopback
+ && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
+ rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
+ return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+ }
+
/* FIXME we may overallocate here */
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
@@ -574,8 +585,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
- } else if (ic->i_rm != rm)
- BUG();
+ }
send = &ic->i_sends[pos];
first = send;
@@ -714,8 +724,8 @@ add_header:
ic->i_rm = prev->s_rm;
prev->s_rm = NULL;
}
- /* Finesse this later */
- BUG();
+
+ rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 3e9460f935d8..a9d951b4fbae 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -157,9 +157,11 @@ static void rds_iw_qp_event_handler(struct ib_event *event, void *data)
case IB_EVENT_QP_REQ_ERR:
case IB_EVENT_QP_FATAL:
default:
- rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n",
+ rdsdebug("Fatal QP Event %u "
+ "- connection %pI4->%pI4, reconnecting\n",
event->event, &conn->c_laddr,
&conn->c_faddr);
+ rds_conn_drop(conn);
break;
}
}
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index da43ee840ca3..3d479067d54d 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -469,8 +469,8 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
rds_iw_stats_inc(s_iw_ack_send_failure);
- /* Need to finesse this later. */
- BUG();
+
+ rds_iw_conn_error(ic->conn, "sending ack failed\n");
} else
rds_iw_stats_inc(s_iw_ack_sent);
}
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 1379e9d66a78..52182ff7519e 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -616,8 +616,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
- } else if (ic->i_rm != rm)
- BUG();
+ }
send = &ic->i_sends[pos];
first = send;
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 0d7a159158b8..dd9879379457 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -81,16 +81,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn,
struct rds_cong_map *map,
unsigned long offset)
{
- unsigned long i;
-
BUG_ON(offset);
BUG_ON(map != conn->c_lcong);
- for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
- memcpy((void *)conn->c_fcong->m_page_addrs[i],
- (void *)map->m_page_addrs[i], PAGE_SIZE);
- }
-
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 5ce9437cad67..75fd13bb631b 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -439,8 +439,10 @@ void rds_rdma_free_op(struct rds_rdma_op *ro)
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
- if (!ro->r_write)
+ if (!ro->r_write) {
+ BUG_ON(in_interrupt());
set_page_dirty(page);
+ }
put_page(page);
}
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 7b155081b4dc..e599ba2f950d 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_DISCONNECTED:
- printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection "
+ rdsdebug("DISCONNECT event - dropping connection "
"%pI4->%pI4\n", &conn->c_laddr,
&conn->c_faddr);
rds_conn_drop(conn);
@@ -109,8 +109,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
default:
/* things like device disconnect? */
- printk(KERN_ERR "unknown event %u\n", event->event);
- BUG();
+ printk(KERN_ERR "RDS: unknown event %u!\n", event->event);
break;
}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 85d6f897ecc7..c224b5bb3ba9 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -388,6 +388,8 @@ struct rds_sock {
/* flag indicating we were congested or not */
int rs_congested;
+ /* seen congestion (ENOBUFS) when sending? */
+ int rs_seen_congestion;
/* rs_lock protects all these adjacent members before the newline */
spinlock_t rs_lock;
@@ -490,7 +492,7 @@ void rds_sock_put(struct rds_sock *rs);
void rds_wake_sk_sleep(struct rds_sock *rs);
static inline void __rds_wake_sk_sleep(struct sock *sk)
{
- wait_queue_head_t *waitq = sk->sk_sleep;
+ wait_queue_head_t *waitq = sk_sleep(sk);
if (!sock_flag(sk, SOCK_DEAD) && waitq)
wake_up(waitq);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index e2a2b9344f7b..795a00b7f2cb 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -432,7 +432,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
break;
}
- timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
+ timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
(!list_empty(&rs->rs_notify_queue) ||
rs->rs_cong_notify ||
rds_next_incoming(rs, &inc)), timeo);
diff --git a/net/rds/send.c b/net/rds/send.c
index f04b929ded92..9c1c6bcaa6c9 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -508,12 +508,13 @@ EXPORT_SYMBOL_GPL(rds_send_get_message);
*/
void rds_send_remove_from_sock(struct list_head *messages, int status)
{
- unsigned long flags = 0; /* silence gcc :P */
+ unsigned long flags;
struct rds_sock *rs = NULL;
struct rds_message *rm;
- local_irq_save(flags);
while (!list_empty(messages)) {
+ int was_on_sock = 0;
+
rm = list_entry(messages->next, struct rds_message,
m_conn_item);
list_del_init(&rm->m_conn_item);
@@ -528,20 +529,19 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
* while we're messing with it. It does not prevent the
* message from being removed from the socket, though.
*/
- spin_lock(&rm->m_rs_lock);
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
goto unlock_and_drop;
if (rs != rm->m_rs) {
if (rs) {
- spin_unlock(&rs->rs_lock);
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
rs = rm->m_rs;
- spin_lock(&rs->rs_lock);
sock_hold(rds_rs_to_sk(rs));
}
+ spin_lock(&rs->rs_lock);
if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
struct rds_rdma_op *ro = rm->m_rdma_op;
@@ -558,21 +558,22 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
notifier->n_status = status;
rm->m_rdma_op->r_notifier = NULL;
}
- rds_message_put(rm);
+ was_on_sock = 1;
rm->m_rs = NULL;
}
+ spin_unlock(&rs->rs_lock);
unlock_and_drop:
- spin_unlock(&rm->m_rs_lock);
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
rds_message_put(rm);
+ if (was_on_sock)
+ rds_message_put(rm);
}
if (rs) {
- spin_unlock(&rs->rs_lock);
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
- local_irq_restore(flags);
}
/*
@@ -634,9 +635,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
list_move(&rm->m_sock_item, &list);
rds_send_sndbuf_remove(rs, rm);
clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
-
- /* If this is a RDMA operation, notify the app. */
- __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
}
/* order flag updates with the rs lock */
@@ -645,9 +643,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
spin_unlock_irqrestore(&rs->rs_lock, flags);
- if (wake)
- rds_wake_sk_sleep(rs);
-
conn = NULL;
/* now remove the messages from the conn list as needed */
@@ -655,6 +650,10 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
/* We do this here rather than in the loop above, so that
* we don't have to nest m_rs_lock under rs->rs_lock */
spin_lock_irqsave(&rm->m_rs_lock, flags2);
+ /* If this is a RDMA operation, notify the app. */
+ spin_lock(&rs->rs_lock);
+ __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
+ spin_unlock(&rs->rs_lock);
rm->m_rs = NULL;
spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
@@ -683,6 +682,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
if (conn)
spin_unlock_irqrestore(&conn->c_lock, flags);
+ if (wake)
+ rds_wake_sk_sleep(rs);
+
while (!list_empty(&list)) {
rm = list_entry(list.next, struct rds_message, m_sock_item);
list_del_init(&rm->m_sock_item);
@@ -816,7 +818,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
int ret = 0;
int queued = 0, allocated_mr = 0;
int nonblock = msg->msg_flags & MSG_DONTWAIT;
- long timeo = sock_rcvtimeo(sk, nonblock);
+ long timeo = sock_sndtimeo(sk, nonblock);
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
@@ -895,8 +897,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
- if (ret)
+ if (ret) {
+ rs->rs_seen_congestion = 1;
goto out;
+ }
while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
dport, &queued)) {
@@ -911,7 +915,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
goto out;
}
- timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
+ timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
rds_send_queue_rm(rs, conn, rm,
rs->rs_bound_port,
dport,
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e08ec912d8b0..1aba6878fa5d 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -98,6 +98,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
goto out;
}
+ rds_stats_add(s_copy_to_user, to_copy);
size -= to_copy;
ret += to_copy;
skb_off += to_copy;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 34fdcc059e54..a28b895ff0d1 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -240,7 +240,9 @@ void rds_tcp_write_space(struct sock *sk)
tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
out:
read_unlock(&sk->sk_callback_lock);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 00fa10e59af8..786c20eaaf5e 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -259,7 +259,7 @@ void rds_threads_exit(void)
int __init rds_threads_init(void)
{
- rds_wq = create_singlethread_workqueue("krdsd");
+ rds_wq = create_workqueue("krdsd");
if (rds_wq == NULL)
return -ENOMEM;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index a9fa86f65983..51875a0c5d48 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -629,6 +629,49 @@ static ssize_t rfkill_persistent_show(struct device *dev,
return sprintf(buf, "%d\n", rfkill->persistent);
}
+static ssize_t rfkill_hard_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
+}
+
+static ssize_t rfkill_soft_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
+}
+
+static ssize_t rfkill_soft_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+ unsigned long state;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ err = strict_strtoul(buf, 0, &state);
+ if (err)
+ return err;
+
+ if (state > 1 )
+ return -EINVAL;
+
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_set_block(rfkill, state);
+ mutex_unlock(&rfkill_global_mutex);
+
+ return err ?: count;
+}
+
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
@@ -644,14 +687,8 @@ static ssize_t rfkill_state_show(struct device *dev,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
- unsigned long flags;
- u32 state;
-
- spin_lock_irqsave(&rfkill->lock, flags);
- state = rfkill->state;
- spin_unlock_irqrestore(&rfkill->lock, flags);
- return sprintf(buf, "%d\n", user_state_from_blocked(state));
+ return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
}
static ssize_t rfkill_state_store(struct device *dev,
@@ -701,6 +738,8 @@ static struct device_attribute rfkill_dev_attrs[] = {
__ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
+ __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
+ __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
__ATTR_NULL
};
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 4fb711a035f4..8e45e76a95f5 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -845,7 +845,7 @@ rose_try_next_neigh:
DEFINE_WAIT(wait);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
@@ -858,7 +858,7 @@ rose_try_next_neigh:
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
@@ -911,7 +911,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
* hooked into the SABM we saved
*/
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
@@ -930,7 +930,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
err = -ERESTARTSYS;
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c060095b27ce..0b9bb2085ce4 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -62,13 +62,15 @@ static inline int rxrpc_writable(struct sock *sk)
static void rxrpc_write_space(struct sock *sk)
{
_enter("%p", sk);
- read_lock(&sk->sk_callback_lock);
+ rcu_read_lock();
if (rxrpc_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk->sk_sleep);
+ struct socket_wq *wq = rcu_dereference(sk->sk_wq);
+
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/*
@@ -589,7 +591,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
unsigned int mask;
struct sock *sk = sock->sk;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* the socket is readable if there are any messages waiting on the Rx
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 60c2b94e6b54..0c65013e3bfe 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -91,7 +91,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
/* wait for a message to turn up */
release_sock(&rx->sk);
- prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait,
+ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
@@ -102,7 +102,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
goto wait_interrupted;
timeo = schedule_timeout(timeo);
}
- finish_wait(rx->sk.sk_sleep, &wait);
+ finish_wait(sk_sleep(&rx->sk), &wait);
lock_sock(&rx->sk);
continue;
}
@@ -356,7 +356,7 @@ csum_copy_error:
wait_interrupted:
ret = sock_intr_errno(timeo);
wait_error:
- finish_wait(rx->sk.sk_sleep, &wait);
+ finish_wait(sk_sleep(&rx->sk), &wait);
if (continue_call)
rxrpc_put_call(continue_call);
if (copied)
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index d8e0171d9a4b..019045174fc3 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -668,7 +668,8 @@ nlmsg_failure:
}
static int
-act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
+act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
+ struct tc_action *a, int event)
{
struct sk_buff *skb;
@@ -680,7 +681,7 @@ act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
return -EINVAL;
}
- return rtnl_unicast(skb, &init_net, pid);
+ return rtnl_unicast(skb, net, pid);
}
static struct tc_action *
@@ -750,7 +751,8 @@ static struct tc_action *create_a(int i)
return act;
}
-static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
+static int tca_action_flush(struct net *net, struct nlattr *nla,
+ struct nlmsghdr *n, u32 pid)
{
struct sk_buff *skb;
unsigned char *b;
@@ -809,7 +811,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
nlh->nlmsg_flags |= NLM_F_ROOT;
module_put(a->ops->owner);
kfree(a);
- err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
if (err > 0)
return 0;
@@ -826,7 +828,8 @@ noflush_out:
}
static int
-tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
+tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
+ u32 pid, int event)
{
int i, ret;
struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
@@ -838,7 +841,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
if (tb[1] != NULL)
- return tca_action_flush(tb[1], n, pid);
+ return tca_action_flush(net, tb[1], n, pid);
else
return -EINVAL;
}
@@ -859,7 +862,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
}
if (event == RTM_GETACTION)
- ret = act_get_notify(pid, n, head, event);
+ ret = act_get_notify(net, pid, n, head, event);
else { /* delete */
struct sk_buff *skb;
@@ -878,7 +881,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
/* now do the delete */
tcf_action_destroy(head, 0);
- ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC,
+ ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags&NLM_F_ECHO);
if (ret > 0)
return 0;
@@ -889,8 +892,8 @@ err:
return ret;
}
-static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
- u16 flags)
+static int tcf_add_notify(struct net *net, struct tc_action *a,
+ u32 pid, u32 seq, int event, u16 flags)
{
struct tcamsg *t;
struct nlmsghdr *nlh;
@@ -923,7 +926,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
NETLINK_CB(skb).dst_group = RTNLGRP_TC;
- err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
if (err > 0)
err = 0;
return err;
@@ -936,7 +939,8 @@ nlmsg_failure:
static int
-tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr)
+tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
+ u32 pid, int ovr)
{
int ret = 0;
struct tc_action *act;
@@ -954,7 +958,7 @@ tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr)
/* dump then free all the actions after update; inserted policy
* stays intact
* */
- ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
+ ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
for (a = act; a; a = act) {
act = a->next;
kfree(a);
@@ -970,9 +974,6 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 pid = skb ? NETLINK_CB(skb).pid : 0;
int ret = 0, ovr = 0;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
if (ret < 0)
return ret;
@@ -995,15 +996,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (n->nlmsg_flags&NLM_F_REPLACE)
ovr = 1;
replay:
- ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr);
+ ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
if (ret == -EAGAIN)
goto replay;
break;
case RTM_DELACTION:
- ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION);
+ ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
+ pid, RTM_DELACTION);
break;
case RTM_GETACTION:
- ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION);
+ ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
+ pid, RTM_GETACTION);
break;
default:
BUG();
@@ -1043,7 +1046,6 @@ find_dump_kind(const struct nlmsghdr *n)
static int
tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct net *net = sock_net(skb->sk);
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
@@ -1053,9 +1055,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
struct nlattr *kind = find_dump_kind(cb->nlh);
- if (!net_eq(net, &init_net))
- return 0;
-
if (kind == NULL) {
printk("tc_dump_action: action bad kind\n");
return 0;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index da27a170b6b7..03f80a0fa167 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -47,8 +47,8 @@ static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int
target = xt_request_find_target(AF_INET, t->u.user.name,
t->u.user.revision);
- if (!target)
- return -ENOENT;
+ if (IS_ERR(target))
+ return PTR_ERR(target);
t->u.kernel.target = target;
par.table = table;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f082b27ff46d..5fd0c28ef79a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -99,8 +99,9 @@ out:
}
EXPORT_SYMBOL(unregister_tcf_proto_ops);
-static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct tcf_proto *tp, unsigned long fh, int event);
+static int tfilter_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, struct tcf_proto *tp,
+ unsigned long fh, int event);
/* Select new prio value from the range, managed by kernel. */
@@ -138,9 +139,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
int err;
int tp_created = 0;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
replay:
t = NLMSG_DATA(n);
protocol = TC_H_MIN(t->tcm_info);
@@ -159,7 +157,7 @@ replay:
/* Find head of filter chain. */
/* Find link */
- dev = __dev_get_by_index(&init_net, t->tcm_ifindex);
+ dev = __dev_get_by_index(net, t->tcm_ifindex);
if (dev == NULL)
return -ENODEV;
@@ -283,7 +281,7 @@ replay:
*back = tp->next;
spin_unlock_bh(root_lock);
- tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
+ tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
tcf_destroy(tp);
err = 0;
goto errout;
@@ -306,10 +304,10 @@ replay:
case RTM_DELTFILTER:
err = tp->ops->delete(tp, fh);
if (err == 0)
- tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
+ tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
goto errout;
case RTM_GETTFILTER:
- err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
+ err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
goto errout;
default:
err = -EINVAL;
@@ -325,7 +323,7 @@ replay:
*back = tp;
spin_unlock_bh(root_lock);
}
- tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
+ tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
} else {
if (tp_created)
tcf_destroy(tp);
@@ -371,8 +369,9 @@ nla_put_failure:
return -1;
}
-static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct tcf_proto *tp, unsigned long fh, int event)
+static int tfilter_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, struct tcf_proto *tp,
+ unsigned long fh, int event)
{
struct sk_buff *skb;
u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -386,7 +385,7 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
return -EINVAL;
}
- return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC,
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
}
@@ -419,12 +418,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
const struct Qdisc_class_ops *cops;
struct tcf_dump_args arg;
- if (!net_eq(net, &init_net))
- return 0;
-
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len;
- if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return skb->len;
if (!tcm->tcm_parent)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 17c5dfc67320..593eac056e8d 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -773,10 +773,10 @@ static int __init init_u32(void)
printk(" Performance counters on\n");
#endif
#ifdef CONFIG_NET_CLS_IND
- printk(" input device check on \n");
+ printk(" input device check on\n");
#endif
#ifdef CONFIG_NET_CLS_ACT
- printk(" Actions configured \n");
+ printk(" Actions configured\n");
#endif
return register_tcf_proto_ops(&cls_u32_ops);
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 145268ca57cf..9839b26674f4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -35,10 +35,12 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
-static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
+static int qdisc_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new);
-static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct Qdisc *q, unsigned long cl, int event);
+static int tclass_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, struct Qdisc *q,
+ unsigned long cl, int event);
/*
@@ -639,11 +641,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
}
EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
-static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
+static void notify_and_destroy(struct net *net, struct sk_buff *skb,
+ struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new)
{
if (new || old)
- qdisc_notify(skb, n, clid, old, new);
+ qdisc_notify(net, skb, n, clid, old, new);
if (old)
qdisc_destroy(old);
@@ -663,6 +666,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
struct Qdisc *new, struct Qdisc *old)
{
struct Qdisc *q = old;
+ struct net *net = dev_net(dev);
int err = 0;
if (parent == NULL) {
@@ -699,12 +703,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
}
if (!ingress) {
- notify_and_destroy(skb, n, classid, dev->qdisc, new);
+ notify_and_destroy(net, skb, n, classid,
+ dev->qdisc, new);
if (new && !new->ops->attach)
atomic_inc(&new->refcnt);
dev->qdisc = new ? : &noop_qdisc;
} else {
- notify_and_destroy(skb, n, classid, old, new);
+ notify_and_destroy(net, skb, n, classid, old, new);
}
if (dev->flags & IFF_UP)
@@ -722,7 +727,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
err = -ENOENT;
}
if (!err)
- notify_and_destroy(skb, n, classid, old, new);
+ notify_and_destroy(net, skb, n, classid, old, new);
}
return err;
}
@@ -948,10 +953,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *p = NULL;
int err;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
- if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -991,7 +993,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
return err;
} else {
- qdisc_notify(skb, n, clid, NULL, q);
+ qdisc_notify(net, skb, n, clid, NULL, q);
}
return 0;
}
@@ -1010,16 +1012,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *q, *p;
int err;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
replay:
/* Reinit, just in case something touches this. */
tcm = NLMSG_DATA(n);
clid = tcm->tcm_parent;
q = p = NULL;
- if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1106,7 +1105,7 @@ replay:
return -EINVAL;
err = qdisc_change(q, tca);
if (err == 0)
- qdisc_notify(skb, n, clid, NULL, q);
+ qdisc_notify(net, skb, n, clid, NULL, q);
return err;
create_n_graft:
@@ -1196,8 +1195,9 @@ nla_put_failure:
return -1;
}
-static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- u32 clid, struct Qdisc *old, struct Qdisc *new)
+static int qdisc_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, u32 clid,
+ struct Qdisc *old, struct Qdisc *new)
{
struct sk_buff *skb;
u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -1216,7 +1216,7 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
}
if (skb->len)
- return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
err_out:
kfree_skb(skb);
@@ -1275,15 +1275,12 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
int s_idx, s_q_idx;
struct net_device *dev;
- if (!net_eq(net, &init_net))
- return 0;
-
s_idx = cb->args[0];
s_q_idx = q_idx = cb->args[1];
rcu_read_lock();
idx = 0;
- for_each_netdev_rcu(&init_net, dev) {
+ for_each_netdev_rcu(net, dev) {
struct netdev_queue *dev_queue;
if (idx < s_idx)
@@ -1335,10 +1332,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 qid = TC_H_MAJ(clid);
int err;
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
- if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1419,10 +1413,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (cops->delete)
err = cops->delete(q, cl);
if (err == 0)
- tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
+ tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
goto out;
case RTM_GETTCLASS:
- err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
+ err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
goto out;
default:
err = -EINVAL;
@@ -1435,7 +1429,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (cops->change)
err = cops->change(q, clid, pid, tca, &new_cl);
if (err == 0)
- tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
+ tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
out:
if (cl)
@@ -1487,8 +1481,9 @@ nla_put_failure:
return -1;
}
-static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct Qdisc *q, unsigned long cl, int event)
+static int tclass_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, struct Qdisc *q,
+ unsigned long cl, int event)
{
struct sk_buff *skb;
u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -1502,7 +1497,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
return -EINVAL;
}
- return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
}
struct qdisc_dump_args
@@ -1577,12 +1572,9 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
struct net_device *dev;
int t, s_t;
- if (!net_eq(net, &init_net))
- return 0;
-
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return 0;
- if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
return 0;
s_t = cb->args[0];
@@ -1692,7 +1684,7 @@ static int psched_show(struct seq_file *seq, void *v)
static int psched_open(struct inode *inode, struct file *file)
{
- return single_open(file, psched_show, PDE(inode)->data);
+ return single_open(file, psched_show, NULL);
}
static const struct file_operations psched_fops = {
@@ -1702,15 +1694,53 @@ static const struct file_operations psched_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+
+static int __net_init psched_net_init(struct net *net)
+{
+ struct proc_dir_entry *e;
+
+ e = proc_net_fops_create(net, "psched", 0, &psched_fops);
+ if (e == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __net_exit psched_net_exit(struct net *net)
+{
+ proc_net_remove(net, "psched");
+}
+#else
+static int __net_init psched_net_init(struct net *net)
+{
+ return 0;
+}
+
+static void __net_exit psched_net_exit(struct net *net)
+{
+}
#endif
+static struct pernet_operations psched_net_ops = {
+ .init = psched_net_init,
+ .exit = psched_net_exit,
+};
+
static int __init pktsched_init(void)
{
+ int err;
+
+ err = register_pernet_subsys(&psched_net_ops);
+ if (err) {
+ printk(KERN_ERR "pktsched_init: "
+ "cannot initialize per netns operations\n");
+ return err;
+ }
+
register_qdisc(&pfifo_qdisc_ops);
register_qdisc(&bfifo_qdisc_ops);
register_qdisc(&pfifo_head_drop_qdisc_ops);
register_qdisc(&mq_qdisc_ops);
- proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ff4dd53eeff0..a969b111bd76 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -94,7 +94,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
* Another cpu is holding lock, requeue & delay xmits for
* some time.
*/
- __get_cpu_var(netdev_rx_stat).cpu_collision++;
+ __get_cpu_var(softnet_data).cpu_collision++;
ret = dev_requeue_skb(skb, q);
}
@@ -529,7 +529,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
unsigned int size;
int err = -ENOBUFS;
- /* ensure that the Qdisc and the private data are 32-byte aligned */
+ /* ensure that the Qdisc and the private data are 64-byte aligned */
size = QDISC_ALIGN(sizeof(*sch));
size += ops->priv_size + (QDISC_ALIGNTO - 1);
@@ -591,6 +591,13 @@ void qdisc_reset(struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_reset);
+static void qdisc_rcu_free(struct rcu_head *head)
+{
+ struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
+
+ kfree((char *) qdisc - qdisc->padded);
+}
+
void qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
@@ -614,7 +621,11 @@ void qdisc_destroy(struct Qdisc *qdisc)
dev_put(qdisc_dev(qdisc));
kfree_skb(qdisc->gso_skb);
- kfree((char *) qdisc - qdisc->padded);
+ /*
+ * gen_estimator est_timer() might access qdisc->q.lock,
+ * wait a RCU grace period before freeing qdisc.
+ */
+ call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
}
EXPORT_SYMBOL(qdisc_destroy);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c5a9ac566007..c65762823f5e 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -123,8 +123,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
case htons(ETH_P_IP):
{
const struct iphdr *iph = ip_hdr(skb);
- h = iph->daddr;
- h2 = iph->saddr ^ iph->protocol;
+ h = (__force u32)iph->daddr;
+ h2 = (__force u32)iph->saddr ^ iph->protocol;
if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
(iph->protocol == IPPROTO_TCP ||
iph->protocol == IPPROTO_UDP ||
@@ -138,8 +138,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
case htons(ETH_P_IPV6):
{
struct ipv6hdr *iph = ipv6_hdr(skb);
- h = iph->daddr.s6_addr32[3];
- h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr;
+ h = (__force u32)iph->daddr.s6_addr32[3];
+ h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
if (iph->nexthdr == IPPROTO_TCP ||
iph->nexthdr == IPPROTO_UDP ||
iph->nexthdr == IPPROTO_UDPLITE ||
@@ -150,7 +150,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
break;
}
default:
- h = (unsigned long)skb_dst(skb) ^ skb->protocol;
+ h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
h2 = (unsigned long)skb->sk;
}
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 58b3e882a187..126b014eb79b 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -37,6 +37,18 @@ menuconfig IP_SCTP
if IP_SCTP
+config NET_SCTPPROBE
+ tristate "SCTP: Association probing"
+ depends on PROC_FS && KPROBES
+ ---help---
+ This module allows for capturing the changes to SCTP association
+ state in response to incoming packets. It is used for debugging
+ SCTP congestion control algorithms. If you don't understand
+ what was just said, you don't need it: say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called sctp_probe.
+
config SCTP_DBG_MSG
bool "SCTP: Debug messages"
help
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 6b794734380a..5c30b7a873df 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_IP_SCTP) += sctp.o
+obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
protocol.o endpointola.o associola.o \
@@ -11,6 +12,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o debug.o ssnmap.o auth.o
+sctp_probe-y := probe.o
+
sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o
sctp-$(CONFIG_PROC_FS) += proc.o
sctp-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 99c93ee98ad9..3912420cedcc 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -87,9 +87,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Retrieve the SCTP per socket area. */
sp = sctp_sk((struct sock *)sk);
- /* Init all variables to a known value. */
- memset(asoc, 0, sizeof(struct sctp_association));
-
/* Discarding const is appropriate here. */
asoc->ep = (struct sctp_endpoint *)ep;
sctp_endpoint_hold(asoc->ep);
@@ -762,7 +759,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
asoc->peer.retran_path = peer;
}
- if (asoc->peer.active_path == asoc->peer.retran_path) {
+ if (asoc->peer.active_path == asoc->peer.retran_path &&
+ peer->state != SCTP_UNCONFIRMED) {
asoc->peer.retran_path = peer;
}
@@ -1320,12 +1318,13 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
/* Keep track of the next transport in case
* we don't find any active transport.
*/
- if (!next)
+ if (t->state != SCTP_UNCONFIRMED && !next)
next = t;
}
}
- asoc->peer.retran_path = t;
+ if (t)
+ asoc->peer.retran_path = t;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
" %p addr: ",
@@ -1485,7 +1484,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
if (asoc->rwnd >= len) {
asoc->rwnd -= len;
if (over) {
- asoc->rwnd_press = asoc->rwnd;
+ asoc->rwnd_press += asoc->rwnd;
asoc->rwnd = 0;
}
} else {
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 3eab6db59a37..476caaf100ed 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -58,9 +58,9 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
msg->send_failed = 0;
msg->send_error = 0;
msg->can_abandon = 0;
+ msg->can_delay = 1;
msg->expires_at = 0;
INIT_LIST_HEAD(&msg->chunks);
- msg->msg_size = 0;
}
/* Allocate and initialize datamsg. */
@@ -157,7 +157,6 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu
{
sctp_datamsg_hold(msg);
chunk->msg = msg;
- msg->msg_size += chunk->skb->len;
}
@@ -247,6 +246,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
if (msg_len >= first_len) {
msg_len -= first_len;
whole = 1;
+ msg->can_delay = 0;
}
/* How many full sized? How many bytes leftover? */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 7ec09ba03a1c..e10acc01c75f 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -70,8 +70,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
struct sctp_shared_key *null_key;
int err;
- memset(ep, 0, sizeof(struct sctp_endpoint));
-
ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
if (!ep->digest)
return NULL;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2a570184e5a9..ea2192444ce6 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -440,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
{
SCTP_DEBUG_PRINTK("%s\n", __func__);
- sctp_do_sm(SCTP_EVENT_T_OTHER,
- SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
- asoc->state, asoc->ep, asoc, t,
- GFP_ATOMIC);
+ if (sock_owned_by_user(sk)) {
+ if (timer_pending(&t->proto_unreach_timer))
+ return;
+ else {
+ if (!mod_timer(&t->proto_unreach_timer,
+ jiffies + (HZ/20)))
+ sctp_association_hold(asoc);
+ }
+
+ } else {
+ if (timer_pending(&t->proto_unreach_timer) &&
+ del_timer(&t->proto_unreach_timer))
+ sctp_association_put(asoc);
+ sctp_do_sm(SCTP_EVENT_T_OTHER,
+ SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
+ asoc->state, asoc->ep, asoc, t,
+ GFP_ATOMIC);
+ }
}
/* Common lookup code for icmp/icmpv6 error handler. */
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 9fb5d37c37ad..732689140fb8 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -232,7 +232,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
if (!(transport->param_flags & SPP_PMTUD_ENABLE))
skb->local_df = 1;
- return ip6_xmit(sk, skb, &fl, np->opt, 0);
+ return ip6_xmit(sk, skb, &fl, np->opt);
}
/* Returns the dst cache entry for the given source and destination ip
@@ -277,20 +277,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
union sctp_addr *s2)
{
- struct in6_addr *a1 = &s1->v6.sin6_addr;
- struct in6_addr *a2 = &s2->v6.sin6_addr;
- int i, j;
-
- for (i = 0; i < 4 ; i++) {
- __be32 a1xora2;
-
- a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i];
-
- if ((j = fls(ntohl(a1xora2))))
- return (i * 32 + 32 - j);
- }
-
- return (i*32);
+ return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr);
}
/* Fills in the source address(saddr) based on the destination address(daddr)
@@ -372,13 +359,13 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
}
read_lock_bh(&in6_dev->lock);
- for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) {
+ list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
/* Add the address to the local list. */
addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
- addr->a.v6.sin6_addr = ifp->addr;
+ ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
@@ -419,7 +406,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
{
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = 0;
- addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
+ ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -432,7 +419,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
addr->v4.sin_addr.s_addr;
} else {
- inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
+ ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
}
}
@@ -445,7 +432,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
} else {
- inet6_sk(sk)->daddr = addr->v6.sin6_addr;
+ ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
}
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index fad261d41ec2..a646681f5acd 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -429,24 +429,17 @@ int sctp_packet_transmit(struct sctp_packet *packet)
list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
list_del_init(&chunk->list);
if (sctp_chunk_is_data(chunk)) {
+ /* 6.3.1 C4) When data is in flight and when allowed
+ * by rule C5, a new RTT measurement MUST be made each
+ * round trip. Furthermore, new RTT measurements
+ * SHOULD be made no more than once per round-trip
+ * for a given destination transport address.
+ */
- if (!chunk->resent) {
-
- /* 6.3.1 C4) When data is in flight and when allowed
- * by rule C5, a new RTT measurement MUST be made each
- * round trip. Furthermore, new RTT measurements
- * SHOULD be made no more than once per round-trip
- * for a given destination transport address.
- */
-
- if (!tp->rto_pending) {
- chunk->rtt_in_progress = 1;
- tp->rto_pending = 1;
- }
+ if (!tp->rto_pending) {
+ chunk->rtt_in_progress = 1;
+ tp->rto_pending = 1;
}
-
- chunk->resent = 1;
-
has_data = 1;
}
@@ -681,7 +674,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
* Don't delay large message writes that may have been
* fragmeneted into small peices.
*/
- if ((len < max) && (chunk->msg->msg_size < max)) {
+ if ((len < max) && chunk->msg->can_delay) {
retval = SCTP_XMIT_NAGLE_DELAY;
goto finish;
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index abfc0b8dee74..5d057178ce0c 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -62,7 +62,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue,
struct sctp_transport *transport,
struct sctp_sackhdr *sack,
- __u32 highest_new_tsn);
+ __u32 *highest_new_tsn);
static void sctp_mark_missing(struct sctp_outq *q,
struct list_head *transmitted_queue,
@@ -308,7 +308,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
/* If it is data, queue it up, otherwise, send it
* immediately.
*/
- if (SCTP_CID_DATA == chunk->chunk_hdr->type) {
+ if (sctp_chunk_is_data(chunk)) {
/* Is it OK to queue data chunks? */
/* From 9. Termination of Association
*
@@ -598,11 +598,23 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
if (fast_rtx && !chunk->fast_retransmit)
continue;
+redo:
/* Attempt to append this chunk to the packet. */
status = sctp_packet_append_chunk(pkt, chunk);
switch (status) {
case SCTP_XMIT_PMTU_FULL:
+ if (!pkt->has_data && !pkt->has_cookie_echo) {
+ /* If this packet did not contain DATA then
+ * retransmission did not happen, so do it
+ * again. We'll ignore the error here since
+ * control chunks are already freed so there
+ * is nothing we can do.
+ */
+ sctp_packet_transmit(pkt);
+ goto redo;
+ }
+
/* Send this packet. */
error = sctp_packet_transmit(pkt);
@@ -647,14 +659,6 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
if (chunk->fast_retransmit == SCTP_NEED_FRTX)
chunk->fast_retransmit = SCTP_DONT_FRTX;
- /* Force start T3-rtx timer when fast retransmitting
- * the earliest outstanding TSN
- */
- if (!timer && fast_rtx &&
- ntohl(chunk->subh.data_hdr->tsn) ==
- asoc->ctsn_ack_point + 1)
- timer = 2;
-
q->empty = 0;
break;
}
@@ -854,6 +858,12 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if (status != SCTP_XMIT_OK) {
/* put the chunk back */
list_add(&chunk->list, &q->control_chunk_list);
+ } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+ /* PR-SCTP C5) If a FORWARD TSN is sent, the
+ * sender MUST assure that at least one T3-rtx
+ * timer is running.
+ */
+ sctp_transport_reset_timers(transport);
}
break;
@@ -906,8 +916,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
rtx_timeout, &start_timer);
if (start_timer)
- sctp_transport_reset_timers(transport,
- start_timer-1);
+ sctp_transport_reset_timers(transport);
/* This can happen on COOKIE-ECHO resend. Only
* one chunk can get bundled with a COOKIE-ECHO.
@@ -1040,7 +1049,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
list_add_tail(&chunk->transmitted_list,
&transport->transmitted);
- sctp_transport_reset_timers(transport, 0);
+ sctp_transport_reset_timers(transport);
q->empty = 0;
@@ -1100,32 +1109,6 @@ static void sctp_sack_update_unack_data(struct sctp_association *assoc,
assoc->unack_data = unack_data;
}
-/* Return the highest new tsn that is acknowledged by the given SACK chunk. */
-static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack,
- struct sctp_association *asoc)
-{
- struct sctp_transport *transport;
- struct sctp_chunk *chunk;
- __u32 highest_new_tsn, tsn;
- struct list_head *transport_list = &asoc->peer.transport_addr_list;
-
- highest_new_tsn = ntohl(sack->cum_tsn_ack);
-
- list_for_each_entry(transport, transport_list, transports) {
- list_for_each_entry(chunk, &transport->transmitted,
- transmitted_list) {
- tsn = ntohl(chunk->subh.data_hdr->tsn);
-
- if (!chunk->tsn_gap_acked &&
- TSN_lt(highest_new_tsn, tsn) &&
- sctp_acked(sack, tsn))
- highest_new_tsn = tsn;
- }
- }
-
- return highest_new_tsn;
-}
-
/* This is where we REALLY process a SACK.
*
* Process the SACK against the outqueue. Mostly, this just frees
@@ -1145,6 +1128,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
struct sctp_transport *primary = asoc->peer.primary_path;
int count_of_newacks = 0;
int gap_ack_blocks;
+ u8 accum_moved = 0;
/* Grab the association's destination address list. */
transport_list = &asoc->peer.transport_addr_list;
@@ -1193,18 +1177,15 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
if (gap_ack_blocks)
highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
- if (TSN_lt(asoc->highest_sacked, highest_tsn)) {
- highest_new_tsn = highest_tsn;
+ if (TSN_lt(asoc->highest_sacked, highest_tsn))
asoc->highest_sacked = highest_tsn;
- } else {
- highest_new_tsn = sctp_highest_new_tsn(sack, asoc);
- }
+ highest_new_tsn = sack_ctsn;
/* Run through the retransmit queue. Credit bytes received
* and free those chunks that we can.
*/
- sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn);
+ sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn);
/* Run through the transmitted queue.
* Credit bytes received and free those chunks which we can.
@@ -1213,7 +1194,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
*/
list_for_each_entry(transport, transport_list, transports) {
sctp_check_transmitted(q, &transport->transmitted,
- transport, sack, highest_new_tsn);
+ transport, sack, &highest_new_tsn);
/*
* SFR-CACC algorithm:
* C) Let count_of_newacks be the number of
@@ -1223,16 +1204,22 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
count_of_newacks ++;
}
+ /* Move the Cumulative TSN Ack Point if appropriate. */
+ if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
+ asoc->ctsn_ack_point = sack_ctsn;
+ accum_moved = 1;
+ }
+
if (gap_ack_blocks) {
+
+ if (asoc->fast_recovery && accum_moved)
+ highest_new_tsn = highest_tsn;
+
list_for_each_entry(transport, transport_list, transports)
sctp_mark_missing(q, &transport->transmitted, transport,
highest_new_tsn, count_of_newacks);
}
- /* Move the Cumulative TSN Ack Point if appropriate. */
- if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn))
- asoc->ctsn_ack_point = sack_ctsn;
-
/* Update unack_data field in the assoc. */
sctp_sack_update_unack_data(asoc, sack);
@@ -1315,7 +1302,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue,
struct sctp_transport *transport,
struct sctp_sackhdr *sack,
- __u32 highest_new_tsn_in_sack)
+ __u32 *highest_new_tsn_in_sack)
{
struct list_head *lchunk;
struct sctp_chunk *tchunk;
@@ -1387,7 +1374,6 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* instance).
*/
if (!tchunk->tsn_gap_acked &&
- !tchunk->resent &&
tchunk->rtt_in_progress) {
tchunk->rtt_in_progress = 0;
rtt = jiffies - tchunk->sent_at;
@@ -1404,6 +1390,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
*/
if (!tchunk->tsn_gap_acked) {
tchunk->tsn_gap_acked = 1;
+ *highest_new_tsn_in_sack = tsn;
bytes_acked += sctp_data_size(tchunk);
if (!tchunk->transport)
migrate_bytes += sctp_data_size(tchunk);
@@ -1677,7 +1664,8 @@ static void sctp_mark_missing(struct sctp_outq *q,
struct sctp_chunk *chunk;
__u32 tsn;
char do_fast_retransmit = 0;
- struct sctp_transport *primary = q->asoc->peer.primary_path;
+ struct sctp_association *asoc = q->asoc;
+ struct sctp_transport *primary = asoc->peer.primary_path;
list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
new file mode 100644
index 000000000000..db3a42b8b349
--- /dev/null
+++ b/net/sctp/probe.c
@@ -0,0 +1,214 @@
+/*
+ * sctp_probe - Observe the SCTP flow with kprobes.
+ *
+ * The idea for this came from Werner Almesberger's umlsim
+ * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
+ *
+ * Modified for SCTP from Stephen Hemminger's code
+ * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/socket.h>
+#include <linux/sctp.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/kfifo.h>
+#include <linux/time.h>
+#include <net/net_namespace.h>
+
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
+MODULE_DESCRIPTION("SCTP snooper");
+MODULE_LICENSE("GPL");
+
+static int port __read_mostly = 0;
+MODULE_PARM_DESC(port, "Port to match (0=all)");
+module_param(port, int, 0);
+
+static int bufsize __read_mostly = 64 * 1024;
+MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
+module_param(bufsize, int, 0);
+
+static int full __read_mostly = 1;
+MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
+module_param(full, int, 0);
+
+static const char procname[] = "sctpprobe";
+
+static struct {
+ struct kfifo fifo;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ struct timespec tstart;
+} sctpw;
+
+static void printl(const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ char tbuf[256];
+
+ va_start(args, fmt);
+ len = vscnprintf(tbuf, sizeof(tbuf), fmt, args);
+ va_end(args);
+
+ kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
+ wake_up(&sctpw.wait);
+}
+
+static int sctpprobe_open(struct inode *inode, struct file *file)
+{
+ kfifo_reset(&sctpw.fifo);
+ getnstimeofday(&sctpw.tstart);
+
+ return 0;
+}
+
+static ssize_t sctpprobe_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int error = 0, cnt = 0;
+ unsigned char *tbuf;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (len == 0)
+ return 0;
+
+ tbuf = vmalloc(len);
+ if (!tbuf)
+ return -ENOMEM;
+
+ error = wait_event_interruptible(sctpw.wait,
+ kfifo_len(&sctpw.fifo) != 0);
+ if (error)
+ goto out_free;
+
+ cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
+ error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
+
+out_free:
+ vfree(tbuf);
+
+ return error ? error : cnt;
+}
+
+static const struct file_operations sctpprobe_fops = {
+ .owner = THIS_MODULE,
+ .open = sctpprobe_open,
+ .read = sctpprobe_read,
+};
+
+sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands)
+{
+ struct sctp_transport *sp;
+ static __u32 lcwnd = 0;
+ struct timespec now;
+
+ sp = asoc->peer.primary_path;
+
+ if ((full || sp->cwnd != lcwnd) &&
+ (!port || asoc->peer.port == port ||
+ ep->base.bind_addr.port == port)) {
+ lcwnd = sp->cwnd;
+
+ getnstimeofday(&now);
+ now = timespec_sub(now, sctpw.tstart);
+
+ printl("%lu.%06lu ", (unsigned long) now.tv_sec,
+ (unsigned long) now.tv_nsec / NSEC_PER_USEC);
+
+ printl("%p %5d %5d %5d %8d %5d ", asoc,
+ ep->base.bind_addr.port, asoc->peer.port,
+ asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data);
+
+ list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+ transports) {
+ if (sp == asoc->peer.primary_path)
+ printl("*");
+
+ if (sp->ipaddr.sa.sa_family == AF_INET)
+ printl("%pI4 ", &sp->ipaddr.v4.sin_addr);
+ else
+ printl("%pI6 ", &sp->ipaddr.v6.sin6_addr);
+
+ printl("%2u %8u %8u %8u %8u %8u ",
+ sp->state, sp->cwnd, sp->ssthresh,
+ sp->flight_size, sp->partial_bytes_acked,
+ sp->pathmtu);
+ }
+ printl("\n");
+ }
+
+ jprobe_return();
+ return 0;
+}
+
+static struct jprobe sctp_recv_probe = {
+ .kp = {
+ .symbol_name = "sctp_sf_eat_sack_6_2",
+ },
+ .entry = jsctp_sf_eat_sack,
+};
+
+static __init int sctpprobe_init(void)
+{
+ int ret = -ENOMEM;
+
+ init_waitqueue_head(&sctpw.wait);
+ spin_lock_init(&sctpw.lock);
+ if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
+ return ret;
+
+ if (!proc_net_fops_create(&init_net, procname, S_IRUSR,
+ &sctpprobe_fops))
+ goto free_kfifo;
+
+ ret = register_jprobe(&sctp_recv_probe);
+ if (ret)
+ goto remove_proc;
+
+ pr_info("SCTP probe registered (port=%d)\n", port);
+
+ return 0;
+
+remove_proc:
+ proc_net_remove(&init_net, procname);
+free_kfifo:
+ kfifo_free(&sctpw.fifo);
+ return ret;
+}
+
+static __exit void sctpprobe_exit(void)
+{
+ kfifo_free(&sctpw.fifo);
+ proc_net_remove(&init_net, procname);
+ unregister_jprobe(&sctp_recv_probe);
+}
+
+module_init(sctpprobe_init);
+module_exit(sctpprobe_exit);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a56f98e82f92..182749867c72 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -474,13 +474,17 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
memset(&fl, 0x0, sizeof(struct flowi));
fl.fl4_dst = daddr->v4.sin_addr.s_addr;
+ fl.fl_ip_dport = daddr->v4.sin_port;
fl.proto = IPPROTO_SCTP;
if (asoc) {
fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk);
fl.oif = asoc->base.sk->sk_bound_dev_if;
+ fl.fl_ip_sport = htons(asoc->base.bind_addr.port);
}
- if (saddr)
+ if (saddr) {
fl.fl4_src = saddr->v4.sin_addr.s_addr;
+ fl.fl_ip_sport = saddr->v4.sin_port;
+ }
SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
__func__, &fl.fl4_dst, &fl.fl4_src);
@@ -528,6 +532,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
if ((laddr->state == SCTP_ADDR_SRC) &&
(AF_INET == laddr->a.sa.sa_family)) {
fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
+ fl.fl_ip_sport = laddr->a.v4.sin_port;
if (!ip_route_output_key(&init_net, &rt, &fl)) {
dst = &rt->u.dst;
goto out_unlock;
@@ -854,7 +859,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
- return ip_queue_xmit(skb, 0);
+ return ip_queue_xmit(skb);
}
static struct sctp_af sctp_af_inet;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 30c1767186b8..d8261f3d7715 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -445,10 +445,17 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
if (!retval)
goto nomem_chunk;
- /* Per the advice in RFC 2960 6.4, send this reply to
- * the source of the INIT packet.
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [INIT ACK back to where the INIT came from.]
*/
retval->transport = chunk->transport;
+
retval->subh.init_hdr =
sctp_addto_chunk(retval, sizeof(initack), &initack);
retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v);
@@ -487,18 +494,6 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
/* We need to remove the const qualifier at this point. */
retval->asoc = (struct sctp_association *) asoc;
- /* RFC 2960 6.4 Multi-homed SCTP Endpoints
- *
- * An endpoint SHOULD transmit reply chunks (e.g., SACK,
- * HEARTBEAT ACK, * etc.) to the same destination transport
- * address from which it received the DATA or control chunk
- * to which it is replying.
- *
- * [INIT ACK back to where the INIT came from.]
- */
- if (chunk)
- retval->transport = chunk->transport;
-
nomem_chunk:
kfree(cookie);
nomem_cookie:
@@ -1254,7 +1249,6 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
INIT_LIST_HEAD(&retval->list);
retval->skb = skb;
retval->asoc = (struct sctp_association *)asoc;
- retval->resent = 0;
retval->has_tsn = 0;
retval->has_ssn = 0;
retval->rtt_in_progress = 0;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d5ae450b6f02..22e670200449 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -397,6 +397,41 @@ out_unlock:
sctp_transport_put(transport);
}
+/* Handle the timeout of the ICMP protocol unreachable timer. Trigger
+ * the correct state machine transition that will close the association.
+ */
+void sctp_generate_proto_unreach_event(unsigned long data)
+{
+ struct sctp_transport *transport = (struct sctp_transport *) data;
+ struct sctp_association *asoc = transport->asoc;
+
+ sctp_bh_lock_sock(asoc->base.sk);
+ if (sock_owned_by_user(asoc->base.sk)) {
+ SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->proto_unreach_timer,
+ jiffies + (HZ/20)))
+ sctp_association_hold(asoc);
+ goto out_unlock;
+ }
+
+ /* Is this structure just waiting around for us to actually
+ * get destroyed?
+ */
+ if (asoc->base.dead)
+ goto out_unlock;
+
+ sctp_do_sm(SCTP_EVENT_T_OTHER,
+ SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
+ asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
+
+out_unlock:
+ sctp_bh_unlock_sock(asoc->base.sk);
+ sctp_association_put(asoc);
+}
+
+
/* Inject a SACK Timeout event into the state machine. */
static void sctp_generate_sack_event(unsigned long data)
{
@@ -697,11 +732,15 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
{
struct sctp_transport *t;
- t = sctp_assoc_choose_alter_transport(asoc,
+ if (chunk->transport)
+ t = chunk->transport;
+ else {
+ t = sctp_assoc_choose_alter_transport(asoc,
asoc->shutdown_last_sent_to);
+ chunk->transport = t;
+ }
asoc->shutdown_last_sent_to = t;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
- chunk->transport = t;
}
/* Helper function to change the state of an association. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 44a1ab03a3f0..ba1add0b13c3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3720,9 +3720,6 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
- /* Set socket backlog limit. */
- sk->sk_backlog.limit = sysctl_sctp_rmem[1];
-
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -4387,7 +4384,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
transports) {
memcpy(&temp, &from->ipaddr, sizeof(temp));
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
- addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
+ addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen)
return -ENOMEM;
if (copy_to_user(to, &temp, addrlen))
@@ -5482,7 +5479,6 @@ pp_found:
*/
int reuse = sk->sk_reuse;
struct sock *sk2;
- struct hlist_node *node;
SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
if (pp->fastreuse && sk->sk_reuse &&
@@ -5703,7 +5699,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sctp_sock *sp = sctp_sk(sk);
unsigned int mask;
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
/* A TCP-style listening socket becomes readable when the accept queue
* is not empty.
@@ -5944,7 +5940,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
int error;
DEFINE_WAIT(wait);
- prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
@@ -5981,14 +5977,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
sctp_lock_sock(sk);
ready:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return 0;
interrupted:
error = sock_intr_errno(*timeo_p);
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
*err = error;
return error;
}
@@ -6062,14 +6058,14 @@ static void __sctp_write_space(struct sctp_association *asoc)
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
- if (sock->fasync_list &&
+ if (sock->wq->fasync_list &&
!(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock,
SOCK_WAKE_SPACE, POLL_OUT);
@@ -6191,12 +6187,15 @@ do_nonblock:
void sctp_data_ready(struct sock *sk, int len)
{
- read_lock_bh(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock_bh(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* If socket sndbuf has changed, wake up all per association waiters. */
@@ -6307,7 +6306,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
for (;;) {
- prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
@@ -6333,7 +6332,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return err;
}
@@ -6343,7 +6342,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
sctp_release_sock(sk);
@@ -6351,7 +6350,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
sctp_lock_sock(sk);
} while (!signal_pending(current) && timeout);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index be4d63d5a5cc..d67501f92ca3 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -64,9 +64,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
/* Copy in the address. */
peer->ipaddr = *addr;
peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
- peer->asoc = NULL;
-
- peer->dst = NULL;
memset(&peer->saddr, 0, sizeof(union sctp_addr));
/* From 6.3.1 RTO Calculation:
@@ -76,52 +73,32 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
* parameter 'RTO.Initial'.
*/
peer->rto = msecs_to_jiffies(sctp_rto_initial);
- peer->rtt = 0;
- peer->rttvar = 0;
- peer->srtt = 0;
- peer->rto_pending = 0;
- peer->hb_sent = 0;
- peer->fast_recovery = 0;
peer->last_time_heard = jiffies;
peer->last_time_ecne_reduced = jiffies;
- peer->init_sent_count = 0;
-
peer->param_flags = SPP_HB_DISABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
- peer->hbinterval = 0;
/* Initialize the default path max_retrans. */
peer->pathmaxrxt = sctp_max_retrans_path;
- peer->error_count = 0;
INIT_LIST_HEAD(&peer->transmitted);
INIT_LIST_HEAD(&peer->send_ready);
INIT_LIST_HEAD(&peer->transports);
- peer->T3_rtx_timer.expires = 0;
- peer->hb_timer.expires = 0;
-
setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
(unsigned long)peer);
setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
(unsigned long)peer);
+ setup_timer(&peer->proto_unreach_timer,
+ sctp_generate_proto_unreach_event, (unsigned long)peer);
/* Initialize the 64-bit random nonce sent with heartbeat. */
get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
atomic_set(&peer->refcnt, 1);
- peer->dead = 0;
-
- peer->malloced = 0;
-
- /* Initialize the state information for SFR-CACC */
- peer->cacc.changeover_active = 0;
- peer->cacc.cycling_changeover = 0;
- peer->cacc.next_tsn_at_change = 0;
- peer->cacc.cacc_saw_newack = 0;
return peer;
}
@@ -195,7 +172,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
/* Start T3_rtx timer if it is not already running and update the heartbeat
* timer. This routine is called every time a DATA chunk is sent.
*/
-void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
+void sctp_transport_reset_timers(struct sctp_transport *transport)
{
/* RFC 2960 6.3.2 Retransmission Timer Rules
*
@@ -205,7 +182,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
* address.
*/
- if (force || !timer_pending(&transport->T3_rtx_timer))
+ if (!timer_pending(&transport->T3_rtx_timer))
if (!mod_timer(&transport->T3_rtx_timer,
jiffies + transport->rto))
sctp_transport_hold(transport);
@@ -403,15 +380,16 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
void sctp_transport_raise_cwnd(struct sctp_transport *transport,
__u32 sack_ctsn, __u32 bytes_acked)
{
+ struct sctp_association *asoc = transport->asoc;
__u32 cwnd, ssthresh, flight_size, pba, pmtu;
cwnd = transport->cwnd;
flight_size = transport->flight_size;
/* See if we need to exit Fast Recovery first */
- if (transport->fast_recovery &&
- TSN_lte(transport->fast_recovery_exit, sack_ctsn))
- transport->fast_recovery = 0;
+ if (asoc->fast_recovery &&
+ TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
+ asoc->fast_recovery = 0;
/* The appropriate cwnd increase algorithm is performed if, and only
* if the cumulative TSN whould advanced and the congestion window is
@@ -440,7 +418,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
* 2) the destination's path MTU. This upper bound protects
* against the ACK-Splitting attack outlined in [SAVAGE99].
*/
- if (transport->fast_recovery)
+ if (asoc->fast_recovery)
return;
if (bytes_acked > pmtu)
@@ -491,6 +469,8 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
void sctp_transport_lower_cwnd(struct sctp_transport *transport,
sctp_lower_cwnd_t reason)
{
+ struct sctp_association *asoc = transport->asoc;
+
switch (reason) {
case SCTP_LOWER_CWND_T3_RTX:
/* RFC 2960 Section 7.2.3, sctpimpguide
@@ -501,11 +481,11 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* partial_bytes_acked = 0
*/
transport->ssthresh = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
- transport->cwnd = transport->asoc->pathmtu;
+ 4*asoc->pathmtu);
+ transport->cwnd = asoc->pathmtu;
- /* T3-rtx also clears fast recovery on the transport */
- transport->fast_recovery = 0;
+ /* T3-rtx also clears fast recovery */
+ asoc->fast_recovery = 0;
break;
case SCTP_LOWER_CWND_FAST_RTX:
@@ -521,15 +501,15 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* cwnd = ssthresh
* partial_bytes_acked = 0
*/
- if (transport->fast_recovery)
+ if (asoc->fast_recovery)
return;
/* Mark Fast recovery */
- transport->fast_recovery = 1;
- transport->fast_recovery_exit = transport->asoc->next_tsn - 1;
+ asoc->fast_recovery = 1;
+ asoc->fast_recovery_exit = asoc->next_tsn - 1;
transport->ssthresh = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
+ 4*asoc->pathmtu);
transport->cwnd = transport->ssthresh;
break;
@@ -549,7 +529,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
if (time_after(jiffies, transport->last_time_ecne_reduced +
transport->rtt)) {
transport->ssthresh = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
+ 4*asoc->pathmtu);
transport->cwnd = transport->ssthresh;
transport->last_time_ecne_reduced = jiffies;
}
@@ -565,7 +545,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* interval.
*/
transport->cwnd = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
+ 4*asoc->pathmtu);
break;
}
@@ -650,7 +630,6 @@ void sctp_transport_reset(struct sctp_transport *t)
t->error_count = 0;
t->rto_pending = 0;
t->hb_sent = 0;
- t->fast_recovery = 0;
/* Initialize the state information for SFR-CACC */
t->cacc.changeover_active = 0;
diff --git a/net/socket.c b/net/socket.c
index 5e8d0af3c0e7..dae8c6b84a09 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -252,9 +252,14 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
- init_waitqueue_head(&ei->socket.wait);
+ ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
+ if (!ei->socket.wq) {
+ kmem_cache_free(sock_inode_cachep, ei);
+ return NULL;
+ }
+ init_waitqueue_head(&ei->socket.wq->wait);
+ ei->socket.wq->fasync_list = NULL;
- ei->socket.fasync_list = NULL;
ei->socket.state = SS_UNCONNECTED;
ei->socket.flags = 0;
ei->socket.ops = NULL;
@@ -264,10 +269,21 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
return &ei->vfs_inode;
}
+
+static void wq_free_rcu(struct rcu_head *head)
+{
+ struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
+
+ kfree(wq);
+}
+
static void sock_destroy_inode(struct inode *inode)
{
- kmem_cache_free(sock_inode_cachep,
- container_of(inode, struct socket_alloc, vfs_inode));
+ struct socket_alloc *ei;
+
+ ei = container_of(inode, struct socket_alloc, vfs_inode);
+ call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+ kmem_cache_free(sock_inode_cachep, ei);
}
static void init_once(void *foo)
@@ -513,7 +529,7 @@ void sock_release(struct socket *sock)
module_put(owner);
}
- if (sock->fasync_list)
+ if (sock->wq->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
percpu_sub(sockets_in_use, 1);
@@ -620,10 +636,9 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
sizeof(tv), &tv);
} else {
- struct timespec ts;
- skb_get_timestampns(skb, &ts);
+ skb_get_timestampns(skb, &ts[0]);
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
- sizeof(ts), &ts);
+ sizeof(ts[0]), &ts[0]);
}
}
@@ -656,13 +671,13 @@ inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff
sizeof(__u32), &skb->dropcount);
}
-void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
sock_recv_timestamp(msg, sk, skb);
sock_recv_drops(msg, sk, skb);
}
-EXPORT_SYMBOL_GPL(sock_recv_ts_and_drops);
+EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
@@ -1068,87 +1083,44 @@ static int sock_close(struct inode *inode, struct file *filp)
* 1. fasync_list is modified only under process context socket lock
* i.e. under semaphore.
* 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
- * or under socket lock.
- * 3. fasync_list can be used from softirq context, so that
- * modification under socket lock have to be enhanced with
- * write_lock_bh(&sk->sk_callback_lock).
- * --ANK (990710)
+ * or under socket lock
*/
static int sock_fasync(int fd, struct file *filp, int on)
{
- struct fasync_struct *fa, *fna = NULL, **prev;
- struct socket *sock;
- struct sock *sk;
-
- if (on) {
- fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL);
- if (fna == NULL)
- return -ENOMEM;
- }
-
- sock = filp->private_data;
+ struct socket *sock = filp->private_data;
+ struct sock *sk = sock->sk;
- sk = sock->sk;
- if (sk == NULL) {
- kfree(fna);
+ if (sk == NULL)
return -EINVAL;
- }
lock_sock(sk);
- spin_lock(&filp->f_lock);
- if (on)
- filp->f_flags |= FASYNC;
- else
- filp->f_flags &= ~FASYNC;
- spin_unlock(&filp->f_lock);
-
- prev = &(sock->fasync_list);
-
- for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev)
- if (fa->fa_file == filp)
- break;
-
- if (on) {
- if (fa != NULL) {
- write_lock_bh(&sk->sk_callback_lock);
- fa->fa_fd = fd;
- write_unlock_bh(&sk->sk_callback_lock);
+ fasync_helper(fd, filp, on, &sock->wq->fasync_list);
- kfree(fna);
- goto out;
- }
- fna->fa_file = filp;
- fna->fa_fd = fd;
- fna->magic = FASYNC_MAGIC;
- fna->fa_next = sock->fasync_list;
- write_lock_bh(&sk->sk_callback_lock);
- sock->fasync_list = fna;
+ if (!sock->wq->fasync_list)
+ sock_reset_flag(sk, SOCK_FASYNC);
+ else
sock_set_flag(sk, SOCK_FASYNC);
- write_unlock_bh(&sk->sk_callback_lock);
- } else {
- if (fa != NULL) {
- write_lock_bh(&sk->sk_callback_lock);
- *prev = fa->fa_next;
- if (!sock->fasync_list)
- sock_reset_flag(sk, SOCK_FASYNC);
- write_unlock_bh(&sk->sk_callback_lock);
- kfree(fa);
- }
- }
-out:
- release_sock(sock->sk);
+ release_sock(sk);
return 0;
}
-/* This function may be called only under socket lock or callback_lock */
+/* This function may be called only under socket lock or callback_lock or rcu_lock */
int sock_wake_async(struct socket *sock, int how, int band)
{
- if (!sock || !sock->fasync_list)
+ struct socket_wq *wq;
+
+ if (!sock)
return -1;
+ rcu_read_lock();
+ wq = rcu_dereference(sock->wq);
+ if (!wq || !wq->fasync_list) {
+ rcu_read_unlock();
+ return -1;
+ }
switch (how) {
case SOCK_WAKE_WAITD:
if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
@@ -1160,11 +1132,12 @@ int sock_wake_async(struct socket *sock, int how, int band)
/* fall through */
case SOCK_WAKE_IO:
call_kill:
- __kill_fasync(sock->fasync_list, SIGIO, band);
+ kill_fasync(&wq->fasync_list, SIGIO, band);
break;
case SOCK_WAKE_URG:
- __kill_fasync(sock->fasync_list, SIGURG, band);
+ kill_fasync(&wq->fasync_list, SIGURG, band);
}
+ rcu_read_unlock();
return 0;
}
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 4de8bcf26fa7..74a231735f67 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \
obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
- gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o
+ gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index c389ccf6437d..6654c8534d32 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -61,7 +61,7 @@ static const struct rpc_credops gss_nullops;
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-#define GSS_CRED_SLACK 1024
+#define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
/* length of a krb5 verifier (48), plus data added before arguments when
* using integrity (two 4-byte integers): */
#define GSS_VERF_SLACK 100
@@ -377,11 +377,12 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
struct rpc_clnt *clnt, int machine_cred)
{
+ struct gss_api_mech *mech = gss_msg->auth->mech;
char *p = gss_msg->databuf;
int len = 0;
gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
- gss_msg->auth->mech->gm_name,
+ mech->gm_name,
gss_msg->uid);
p += gss_msg->msg.len;
if (clnt->cl_principal) {
@@ -398,6 +399,11 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
p += len;
gss_msg->msg.len += len;
}
+ if (mech->gm_upcall_enctypes) {
+ len = sprintf(p, mech->gm_upcall_enctypes);
+ p += len;
+ gss_msg->msg.len += len;
+ }
len = sprintf(p, "\n");
gss_msg->msg.len += len;
@@ -1316,15 +1322,21 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
inpages = snd_buf->pages + first;
snd_buf->pages = rqstp->rq_enc_pages;
snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
- /* Give the tail its own page, in case we need extra space in the
- * head when wrapping: */
+ /*
+ * Give the tail its own page, in case we need extra space in the
+ * head when wrapping:
+ *
+ * call_allocate() allocates twice the slack space required
+ * by the authentication flavor to rq_callsize.
+ * For GSS, slack is GSS_CRED_SLACK.
+ */
if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
snd_buf->tail[0].iov_base = tmp;
}
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
- /* RPC_SLACK_SPACE should prevent this ever happening: */
+ /* slack space should prevent this ever happening: */
BUG_ON(snd_buf->len > snd_buf->buflen);
status = -EIO;
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index e9b636176687..75ee993ea057 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -1,7 +1,7 @@
/*
* linux/net/sunrpc/gss_krb5_crypto.c
*
- * Copyright (c) 2000 The Regents of the University of Michigan.
+ * Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -41,6 +41,7 @@
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/random.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
@@ -58,13 +59,13 @@ krb5_encrypt(
{
u32 ret = -EINVAL;
struct scatterlist sg[1];
- u8 local_iv[16] = {0};
+ u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
- if (crypto_blkcipher_ivsize(tfm) > 16) {
+ if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
crypto_blkcipher_ivsize(tfm));
goto out;
@@ -92,13 +93,13 @@ krb5_decrypt(
{
u32 ret = -EINVAL;
struct scatterlist sg[1];
- u8 local_iv[16] = {0};
+ u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
- if (crypto_blkcipher_ivsize(tfm) > 16) {
+ if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
crypto_blkcipher_ivsize(tfm));
goto out;
@@ -123,21 +124,155 @@ checksummer(struct scatterlist *sg, void *data)
return crypto_hash_update(desc, sg, sg->length);
}
-/* checksum the plaintext data and hdrlen bytes of the token header */
-s32
-make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
- int body_offset, struct xdr_netobj *cksum)
+static int
+arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
+{
+ unsigned int ms_usage;
+
+ switch (usage) {
+ case KG_USAGE_SIGN:
+ ms_usage = 15;
+ break;
+ case KG_USAGE_SEAL:
+ ms_usage = 13;
+ break;
+ default:
+ return EINVAL;;
+ }
+ salt[0] = (ms_usage >> 0) & 0xff;
+ salt[1] = (ms_usage >> 8) & 0xff;
+ salt[2] = (ms_usage >> 16) & 0xff;
+ salt[3] = (ms_usage >> 24) & 0xff;
+
+ return 0;
+}
+
+static u32
+make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *cksumkey,
+ unsigned int usage, struct xdr_netobj *cksumout)
{
- struct hash_desc desc; /* XXX add to ctx? */
+ struct hash_desc desc;
struct scatterlist sg[1];
int err;
+ u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ u8 rc4salt[4];
+ struct crypto_hash *md5;
+ struct crypto_hash *hmac_md5;
+
+ if (cksumkey == NULL)
+ return GSS_S_FAILURE;
+
+ if (cksumout->len < kctx->gk5e->cksumlength) {
+ dprintk("%s: checksum buffer length, %u, too small for %s\n",
+ __func__, cksumout->len, kctx->gk5e->name);
+ return GSS_S_FAILURE;
+ }
+
+ if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
+ dprintk("%s: invalid usage value %u\n", __func__, usage);
+ return GSS_S_FAILURE;
+ }
+
+ md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(md5))
+ return GSS_S_FAILURE;
+
+ hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac_md5)) {
+ crypto_free_hash(md5);
+ return GSS_S_FAILURE;
+ }
+
+ desc.tfm = md5;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out;
+ sg_init_one(sg, rc4salt, 4);
+ err = crypto_hash_update(&desc, sg, 4);
+ if (err)
+ goto out;
+
+ sg_init_one(sg, header, hdrlen);
+ err = crypto_hash_update(&desc, sg, hdrlen);
+ if (err)
+ goto out;
+ err = xdr_process_buf(body, body_offset, body->len - body_offset,
+ checksummer, &desc);
+ if (err)
+ goto out;
+ err = crypto_hash_final(&desc, checksumdata);
+ if (err)
+ goto out;
+
+ desc.tfm = hmac_md5;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out;
+ err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
+ if (err)
+ goto out;
+
+ sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
+ err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
+ checksumdata);
+ if (err)
+ goto out;
+
+ memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
+ cksumout->len = kctx->gk5e->cksumlength;
+out:
+ crypto_free_hash(md5);
+ crypto_free_hash(hmac_md5);
+ return err ? GSS_S_FAILURE : 0;
+}
+
+/*
+ * checksum the plaintext data and hdrlen bytes of the token header
+ * The checksum is performed over the first 8 bytes of the
+ * gss token header and then over the data body
+ */
+u32
+make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *cksumkey,
+ unsigned int usage, struct xdr_netobj *cksumout)
+{
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int err;
+ u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ unsigned int checksumlen;
+
+ if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
+ return make_checksum_hmac_md5(kctx, header, hdrlen,
+ body, body_offset,
+ cksumkey, usage, cksumout);
+
+ if (cksumout->len < kctx->gk5e->cksumlength) {
+ dprintk("%s: checksum buffer length, %u, too small for %s\n",
+ __func__, cksumout->len, kctx->gk5e->name);
+ return GSS_S_FAILURE;
+ }
- desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
+ desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm))
return GSS_S_FAILURE;
- cksum->len = crypto_hash_digestsize(desc.tfm);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ checksumlen = crypto_hash_digestsize(desc.tfm);
+
+ if (cksumkey != NULL) {
+ err = crypto_hash_setkey(desc.tfm, cksumkey,
+ kctx->gk5e->keylength);
+ if (err)
+ goto out;
+ }
+
err = crypto_hash_init(&desc);
if (err)
goto out;
@@ -149,15 +284,109 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
checksummer, &desc);
if (err)
goto out;
- err = crypto_hash_final(&desc, cksum->data);
+ err = crypto_hash_final(&desc, checksumdata);
+ if (err)
+ goto out;
+ switch (kctx->gk5e->ctype) {
+ case CKSUMTYPE_RSA_MD5:
+ err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
+ checksumdata, checksumlen);
+ if (err)
+ goto out;
+ memcpy(cksumout->data,
+ checksumdata + checksumlen - kctx->gk5e->cksumlength,
+ kctx->gk5e->cksumlength);
+ break;
+ case CKSUMTYPE_HMAC_SHA1_DES3:
+ memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ cksumout->len = kctx->gk5e->cksumlength;
+out:
+ crypto_free_hash(desc.tfm);
+ return err ? GSS_S_FAILURE : 0;
+}
+
+/*
+ * checksum the plaintext data and hdrlen bytes of the token header
+ * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
+ * body then over the first 16 octets of the MIC token
+ * Inclusion of the header data in the calculation of the
+ * checksum is optional.
+ */
+u32
+make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *cksumkey,
+ unsigned int usage, struct xdr_netobj *cksumout)
+{
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int err;
+ u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ unsigned int checksumlen;
+
+ if (kctx->gk5e->keyed_cksum == 0) {
+ dprintk("%s: expected keyed hash for %s\n",
+ __func__, kctx->gk5e->name);
+ return GSS_S_FAILURE;
+ }
+ if (cksumkey == NULL) {
+ dprintk("%s: no key supplied for %s\n",
+ __func__, kctx->gk5e->name);
+ return GSS_S_FAILURE;
+ }
+
+ desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(desc.tfm))
+ return GSS_S_FAILURE;
+ checksumlen = crypto_hash_digestsize(desc.tfm);
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
+ if (err)
+ goto out;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out;
+ err = xdr_process_buf(body, body_offset, body->len - body_offset,
+ checksummer, &desc);
+ if (err)
+ goto out;
+ if (header != NULL) {
+ sg_init_one(sg, header, hdrlen);
+ err = crypto_hash_update(&desc, sg, hdrlen);
+ if (err)
+ goto out;
+ }
+ err = crypto_hash_final(&desc, checksumdata);
+ if (err)
+ goto out;
+
+ cksumout->len = kctx->gk5e->cksumlength;
+
+ switch (kctx->gk5e->ctype) {
+ case CKSUMTYPE_HMAC_SHA1_96_AES128:
+ case CKSUMTYPE_HMAC_SHA1_96_AES256:
+ /* note that this truncates the hash */
+ memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
+ break;
+ default:
+ BUG();
+ break;
+ }
out:
crypto_free_hash(desc.tfm);
return err ? GSS_S_FAILURE : 0;
}
struct encryptor_desc {
- u8 iv[8]; /* XXX hard-coded blocksize */
+ u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct blkcipher_desc desc;
int pos;
struct xdr_buf *outbuf;
@@ -198,7 +427,7 @@ encryptor(struct scatterlist *sg, void *data)
desc->fraglen += sg->length;
desc->pos += sg->length;
- fraglen = thislen & 7; /* XXX hardcoded blocksize */
+ fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
@@ -256,7 +485,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
}
struct decryptor_desc {
- u8 iv[8]; /* XXX hard-coded blocksize */
+ u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct blkcipher_desc desc;
struct scatterlist frags[4];
int fragno;
@@ -278,7 +507,7 @@ decryptor(struct scatterlist *sg, void *data)
desc->fragno++;
desc->fraglen += sg->length;
- fraglen = thislen & 7; /* XXX hardcoded blocksize */
+ fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
@@ -325,3 +554,437 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
}
+
+/*
+ * This function makes the assumption that it was ultimately called
+ * from gss_wrap().
+ *
+ * The client auth_gss code moves any existing tail data into a
+ * separate page before calling gss_wrap.
+ * The server svcauth_gss code ensures that both the head and the
+ * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
+ *
+ * Even with that guarantee, this function may be called more than
+ * once in the processing of gss_wrap(). The best we can do is
+ * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
+ * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
+ * At run-time we can verify that a single invocation of this
+ * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
+ */
+
+int
+xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
+{
+ u8 *p;
+
+ if (shiftlen == 0)
+ return 0;
+
+ BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
+ BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
+
+ p = buf->head[0].iov_base + base;
+
+ memmove(p + shiftlen, p, buf->head[0].iov_len - base);
+
+ buf->head[0].iov_len += shiftlen;
+ buf->len += shiftlen;
+
+ return 0;
+}
+
+static u32
+gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
+ u32 offset, u8 *iv, struct page **pages, int encrypt)
+{
+ u32 ret;
+ struct scatterlist sg[1];
+ struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
+ u8 data[crypto_blkcipher_blocksize(cipher) * 2];
+ struct page **save_pages;
+ u32 len = buf->len - offset;
+
+ BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
+
+ /*
+ * For encryption, we want to read from the cleartext
+ * page cache pages, and write the encrypted data to
+ * the supplied xdr_buf pages.
+ */
+ save_pages = buf->pages;
+ if (encrypt)
+ buf->pages = pages;
+
+ ret = read_bytes_from_xdr_buf(buf, offset, data, len);
+ buf->pages = save_pages;
+ if (ret)
+ goto out;
+
+ sg_init_one(sg, data, len);
+
+ if (encrypt)
+ ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
+ else
+ ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
+
+ if (ret)
+ goto out;
+
+ ret = write_bytes_to_xdr_buf(buf, offset, data, len);
+
+out:
+ return ret;
+}
+
+u32
+gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, int ec, struct page **pages)
+{
+ u32 err;
+ struct xdr_netobj hmac;
+ u8 *cksumkey;
+ u8 *ecptr;
+ struct crypto_blkcipher *cipher, *aux_cipher;
+ int blocksize;
+ struct page **save_pages;
+ int nblocks, nbytes;
+ struct encryptor_desc desc;
+ u32 cbcbytes;
+ unsigned int usage;
+
+ if (kctx->initiate) {
+ cipher = kctx->initiator_enc;
+ aux_cipher = kctx->initiator_enc_aux;
+ cksumkey = kctx->initiator_integ;
+ usage = KG_USAGE_INITIATOR_SEAL;
+ } else {
+ cipher = kctx->acceptor_enc;
+ aux_cipher = kctx->acceptor_enc_aux;
+ cksumkey = kctx->acceptor_integ;
+ usage = KG_USAGE_ACCEPTOR_SEAL;
+ }
+ blocksize = crypto_blkcipher_blocksize(cipher);
+
+ /* hide the gss token header and insert the confounder */
+ offset += GSS_KRB5_TOK_HDR_LEN;
+ if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
+ return GSS_S_FAILURE;
+ gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
+ offset -= GSS_KRB5_TOK_HDR_LEN;
+
+ if (buf->tail[0].iov_base != NULL) {
+ ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
+ } else {
+ buf->tail[0].iov_base = buf->head[0].iov_base
+ + buf->head[0].iov_len;
+ buf->tail[0].iov_len = 0;
+ ecptr = buf->tail[0].iov_base;
+ }
+
+ memset(ecptr, 'X', ec);
+ buf->tail[0].iov_len += ec;
+ buf->len += ec;
+
+ /* copy plaintext gss token header after filler (if any) */
+ memcpy(ecptr + ec, buf->head[0].iov_base + offset,
+ GSS_KRB5_TOK_HDR_LEN);
+ buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
+ buf->len += GSS_KRB5_TOK_HDR_LEN;
+
+ /* Do the HMAC */
+ hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
+ hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
+
+ /*
+ * When we are called, pages points to the real page cache
+ * data -- which we can't go and encrypt! buf->pages points
+ * to scratch pages which we are going to send off to the
+ * client/server. Swap in the plaintext pages to calculate
+ * the hmac.
+ */
+ save_pages = buf->pages;
+ buf->pages = pages;
+
+ err = make_checksum_v2(kctx, NULL, 0, buf,
+ offset + GSS_KRB5_TOK_HDR_LEN,
+ cksumkey, usage, &hmac);
+ buf->pages = save_pages;
+ if (err)
+ return GSS_S_FAILURE;
+
+ nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
+ nblocks = (nbytes + blocksize - 1) / blocksize;
+ cbcbytes = 0;
+ if (nblocks > 2)
+ cbcbytes = (nblocks - 2) * blocksize;
+
+ memset(desc.iv, 0, sizeof(desc.iv));
+
+ if (cbcbytes) {
+ desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
+ desc.fragno = 0;
+ desc.fraglen = 0;
+ desc.pages = pages;
+ desc.outbuf = buf;
+ desc.desc.info = desc.iv;
+ desc.desc.flags = 0;
+ desc.desc.tfm = aux_cipher;
+
+ sg_init_table(desc.infrags, 4);
+ sg_init_table(desc.outfrags, 4);
+
+ err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
+ cbcbytes, encryptor, &desc);
+ if (err)
+ goto out_err;
+ }
+
+ /* Make sure IV carries forward from any CBC results. */
+ err = gss_krb5_cts_crypt(cipher, buf,
+ offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
+ desc.iv, pages, 1);
+ if (err) {
+ err = GSS_S_FAILURE;
+ goto out_err;
+ }
+
+ /* Now update buf to account for HMAC */
+ buf->tail[0].iov_len += kctx->gk5e->cksumlength;
+ buf->len += kctx->gk5e->cksumlength;
+
+out_err:
+ if (err)
+ err = GSS_S_FAILURE;
+ return err;
+}
+
+u32
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
+ u32 *headskip, u32 *tailskip)
+{
+ struct xdr_buf subbuf;
+ u32 ret = 0;
+ u8 *cksum_key;
+ struct crypto_blkcipher *cipher, *aux_cipher;
+ struct xdr_netobj our_hmac_obj;
+ u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
+ u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
+ int nblocks, blocksize, cbcbytes;
+ struct decryptor_desc desc;
+ unsigned int usage;
+
+ if (kctx->initiate) {
+ cipher = kctx->acceptor_enc;
+ aux_cipher = kctx->acceptor_enc_aux;
+ cksum_key = kctx->acceptor_integ;
+ usage = KG_USAGE_ACCEPTOR_SEAL;
+ } else {
+ cipher = kctx->initiator_enc;
+ aux_cipher = kctx->initiator_enc_aux;
+ cksum_key = kctx->initiator_integ;
+ usage = KG_USAGE_INITIATOR_SEAL;
+ }
+ blocksize = crypto_blkcipher_blocksize(cipher);
+
+
+ /* create a segment skipping the header and leaving out the checksum */
+ xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
+ (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
+ kctx->gk5e->cksumlength));
+
+ nblocks = (subbuf.len + blocksize - 1) / blocksize;
+
+ cbcbytes = 0;
+ if (nblocks > 2)
+ cbcbytes = (nblocks - 2) * blocksize;
+
+ memset(desc.iv, 0, sizeof(desc.iv));
+
+ if (cbcbytes) {
+ desc.fragno = 0;
+ desc.fraglen = 0;
+ desc.desc.info = desc.iv;
+ desc.desc.flags = 0;
+ desc.desc.tfm = aux_cipher;
+
+ sg_init_table(desc.frags, 4);
+
+ ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
+ if (ret)
+ goto out_err;
+ }
+
+ /* Make sure IV carries forward from any CBC results. */
+ ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
+ if (ret)
+ goto out_err;
+
+
+ /* Calculate our hmac over the plaintext data */
+ our_hmac_obj.len = sizeof(our_hmac);
+ our_hmac_obj.data = our_hmac;
+
+ ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
+ cksum_key, usage, &our_hmac_obj);
+ if (ret)
+ goto out_err;
+
+ /* Get the packet's hmac value */
+ ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
+ pkt_hmac, kctx->gk5e->cksumlength);
+ if (ret)
+ goto out_err;
+
+ if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
+ ret = GSS_S_BAD_SIG;
+ goto out_err;
+ }
+ *headskip = kctx->gk5e->conflen;
+ *tailskip = kctx->gk5e->cksumlength;
+out_err:
+ if (ret && ret != GSS_S_BAD_SIG)
+ ret = GSS_S_FAILURE;
+ return ret;
+}
+
+/*
+ * Compute Kseq given the initial session key and the checksum.
+ * Set the key of the given cipher.
+ */
+int
+krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
+ unsigned char *cksum)
+{
+ struct crypto_hash *hmac;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ u8 Kseq[GSS_KRB5_MAX_KEYLEN];
+ u32 zeroconstant = 0;
+ int err;
+
+ dprintk("%s: entered\n", __func__);
+
+ hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac)) {
+ dprintk("%s: error %ld, allocating hash '%s'\n",
+ __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
+ return PTR_ERR(hmac);
+ }
+
+ desc.tfm = hmac;
+ desc.flags = 0;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out_err;
+
+ /* Compute intermediate Kseq from session key */
+ err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, &zeroconstant, 4);
+
+ err = crypto_hash_digest(&desc, sg, 4, Kseq);
+ if (err)
+ goto out_err;
+
+ /* Compute final Kseq from the checksum and intermediate Kseq */
+ err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ sg_set_buf(sg, cksum, 8);
+
+ err = crypto_hash_digest(&desc, sg, 8, Kseq);
+ if (err)
+ goto out_err;
+
+ err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ err = 0;
+
+out_err:
+ crypto_free_hash(hmac);
+ dprintk("%s: returning %d\n", __func__, err);
+ return err;
+}
+
+/*
+ * Compute Kcrypt given the initial session key and the plaintext seqnum.
+ * Set the key of cipher kctx->enc.
+ */
+int
+krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
+ s32 seqnum)
+{
+ struct crypto_hash *hmac;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
+ u8 zeroconstant[4] = {0};
+ u8 seqnumarray[4];
+ int err, i;
+
+ dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
+
+ hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac)) {
+ dprintk("%s: error %ld, allocating hash '%s'\n",
+ __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
+ return PTR_ERR(hmac);
+ }
+
+ desc.tfm = hmac;
+ desc.flags = 0;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out_err;
+
+ /* Compute intermediate Kcrypt from session key */
+ for (i = 0; i < kctx->gk5e->keylength; i++)
+ Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
+
+ err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, zeroconstant, 4);
+
+ err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
+ if (err)
+ goto out_err;
+
+ /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
+ err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
+ seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
+ seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
+ seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
+
+ sg_set_buf(sg, seqnumarray, 4);
+
+ err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
+ if (err)
+ goto out_err;
+
+ err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
+ if (err)
+ goto out_err;
+
+ err = 0;
+
+out_err:
+ crypto_free_hash(hmac);
+ dprintk("%s: returning %d\n", __func__, err);
+ return err;
+}
+
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
new file mode 100644
index 000000000000..33b87f04b30b
--- /dev/null
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -0,0 +1,335 @@
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+/*
+ * Copyright (C) 1998 by the FundsXpress, INC.
+ *
+ * All rights reserved.
+ *
+ * Export of this software from the United States of America may require
+ * a specific license from the United States Government. It is the
+ * responsibility of any person or organization contemplating export to
+ * obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of FundsXpress. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. FundsXpress makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/sunrpc/gss_krb5.h>
+#include <linux/sunrpc/xdr.h>
+
+#ifdef RPC_DEBUG
+# define RPCDBG_FACILITY RPCDBG_AUTH
+#endif
+
+/*
+ * This is the n-fold function as described in rfc3961, sec 5.1
+ * Taken from MIT Kerberos and modified.
+ */
+
+static void krb5_nfold(u32 inbits, const u8 *in,
+ u32 outbits, u8 *out)
+{
+ int a, b, c, lcm;
+ int byte, i, msbit;
+
+ /* the code below is more readable if I make these bytes
+ instead of bits */
+
+ inbits >>= 3;
+ outbits >>= 3;
+
+ /* first compute lcm(n,k) */
+
+ a = outbits;
+ b = inbits;
+
+ while (b != 0) {
+ c = b;
+ b = a%b;
+ a = c;
+ }
+
+ lcm = outbits*inbits/a;
+
+ /* now do the real work */
+
+ memset(out, 0, outbits);
+ byte = 0;
+
+ /* this will end up cycling through k lcm(k,n)/k times, which
+ is correct */
+ for (i = lcm-1; i >= 0; i--) {
+ /* compute the msbit in k which gets added into this byte */
+ msbit = (
+ /* first, start with the msbit in the first,
+ * unrotated byte */
+ ((inbits << 3) - 1)
+ /* then, for each byte, shift to the right
+ * for each repetition */
+ + (((inbits << 3) + 13) * (i/inbits))
+ /* last, pick out the correct byte within
+ * that shifted repetition */
+ + ((inbits - (i % inbits)) << 3)
+ ) % (inbits << 3);
+
+ /* pull out the byte value itself */
+ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)|
+ (in[((inbits) - (msbit >> 3)) % inbits]))
+ >> ((msbit & 7) + 1)) & 0xff;
+
+ /* do the addition */
+ byte += out[i % outbits];
+ out[i % outbits] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+
+ }
+
+ /* if there's a carry bit left over, add it back in */
+ if (byte) {
+ for (i = outbits - 1; i >= 0; i--) {
+ /* do the addition */
+ byte += out[i];
+ out[i] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+ }
+ }
+}
+
+/*
+ * This is the DK (derive_key) function as described in rfc3961, sec 5.1
+ * Taken from MIT Kerberos and modified.
+ */
+
+u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
+ const struct xdr_netobj *inkey,
+ struct xdr_netobj *outkey,
+ const struct xdr_netobj *in_constant)
+{
+ size_t blocksize, keybytes, keylength, n;
+ unsigned char *inblockdata, *outblockdata, *rawkey;
+ struct xdr_netobj inblock, outblock;
+ struct crypto_blkcipher *cipher;
+ u32 ret = EINVAL;
+
+ blocksize = gk5e->blocksize;
+ keybytes = gk5e->keybytes;
+ keylength = gk5e->keylength;
+
+ if ((inkey->len != keylength) || (outkey->len != keylength))
+ goto err_return;
+
+ cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ goto err_return;
+ if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len))
+ goto err_return;
+
+ /* allocate and set up buffers */
+
+ ret = ENOMEM;
+ inblockdata = kmalloc(blocksize, GFP_KERNEL);
+ if (inblockdata == NULL)
+ goto err_free_cipher;
+
+ outblockdata = kmalloc(blocksize, GFP_KERNEL);
+ if (outblockdata == NULL)
+ goto err_free_in;
+
+ rawkey = kmalloc(keybytes, GFP_KERNEL);
+ if (rawkey == NULL)
+ goto err_free_out;
+
+ inblock.data = (char *) inblockdata;
+ inblock.len = blocksize;
+
+ outblock.data = (char *) outblockdata;
+ outblock.len = blocksize;
+
+ /* initialize the input block */
+
+ if (in_constant->len == inblock.len) {
+ memcpy(inblock.data, in_constant->data, inblock.len);
+ } else {
+ krb5_nfold(in_constant->len * 8, in_constant->data,
+ inblock.len * 8, inblock.data);
+ }
+
+ /* loop encrypting the blocks until enough key bytes are generated */
+
+ n = 0;
+ while (n < keybytes) {
+ (*(gk5e->encrypt))(cipher, NULL, inblock.data,
+ outblock.data, inblock.len);
+
+ if ((keybytes - n) <= outblock.len) {
+ memcpy(rawkey + n, outblock.data, (keybytes - n));
+ break;
+ }
+
+ memcpy(rawkey + n, outblock.data, outblock.len);
+ memcpy(inblock.data, outblock.data, outblock.len);
+ n += outblock.len;
+ }
+
+ /* postprocess the key */
+
+ inblock.data = (char *) rawkey;
+ inblock.len = keybytes;
+
+ BUG_ON(gk5e->mk_key == NULL);
+ ret = (*(gk5e->mk_key))(gk5e, &inblock, outkey);
+ if (ret) {
+ dprintk("%s: got %d from mk_key function for '%s'\n",
+ __func__, ret, gk5e->encrypt_name);
+ goto err_free_raw;
+ }
+
+ /* clean memory, free resources and exit */
+
+ ret = 0;
+
+err_free_raw:
+ memset(rawkey, 0, keybytes);
+ kfree(rawkey);
+err_free_out:
+ memset(outblockdata, 0, blocksize);
+ kfree(outblockdata);
+err_free_in:
+ memset(inblockdata, 0, blocksize);
+ kfree(inblockdata);
+err_free_cipher:
+ crypto_free_blkcipher(cipher);
+err_return:
+ return ret;
+}
+
+#define smask(step) ((1<<step)-1)
+#define pstep(x, step) (((x)&smask(step))^(((x)>>step)&smask(step)))
+#define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1)
+
+static void mit_des_fixup_key_parity(u8 key[8])
+{
+ int i;
+ for (i = 0; i < 8; i++) {
+ key[i] &= 0xfe;
+ key[i] |= 1^parity_char(key[i]);
+ }
+}
+
+/*
+ * This is the des3 key derivation postprocess function
+ */
+u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *randombits,
+ struct xdr_netobj *key)
+{
+ int i;
+ u32 ret = EINVAL;
+
+ if (key->len != 24) {
+ dprintk("%s: key->len is %d\n", __func__, key->len);
+ goto err_out;
+ }
+ if (randombits->len != 21) {
+ dprintk("%s: randombits->len is %d\n",
+ __func__, randombits->len);
+ goto err_out;
+ }
+
+ /* take the seven bytes, move them around into the top 7 bits of the
+ 8 key bytes, then compute the parity bits. Do this three times. */
+
+ for (i = 0; i < 3; i++) {
+ memcpy(key->data + i*8, randombits->data + i*7, 7);
+ key->data[i*8+7] = (((key->data[i*8]&1)<<1) |
+ ((key->data[i*8+1]&1)<<2) |
+ ((key->data[i*8+2]&1)<<3) |
+ ((key->data[i*8+3]&1)<<4) |
+ ((key->data[i*8+4]&1)<<5) |
+ ((key->data[i*8+5]&1)<<6) |
+ ((key->data[i*8+6]&1)<<7));
+
+ mit_des_fixup_key_parity(key->data + i*8);
+ }
+ ret = 0;
+err_out:
+ return ret;
+}
+
+/*
+ * This is the aes key derivation postprocess function
+ */
+u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *randombits,
+ struct xdr_netobj *key)
+{
+ u32 ret = EINVAL;
+
+ if (key->len != 16 && key->len != 32) {
+ dprintk("%s: key->len is %d\n", __func__, key->len);
+ goto err_out;
+ }
+ if (randombits->len != 16 && randombits->len != 32) {
+ dprintk("%s: randombits->len is %d\n",
+ __func__, randombits->len);
+ goto err_out;
+ }
+ if (randombits->len != key->len) {
+ dprintk("%s: randombits->len is %d, key->len is %d\n",
+ __func__, randombits->len, key->len);
+ goto err_out;
+ }
+ memcpy(key->data, randombits->data, key->len);
+ ret = 0;
+err_out:
+ return ret;
+}
+
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 2deb0ed72ff4..7c249a3f9a03 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -1,7 +1,7 @@
/*
* linux/net/sunrpc/gss_krb5_mech.c
*
- * Copyright (c) 2001 The Regents of the University of Michigan.
+ * Copyright (c) 2001-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -48,6 +48,143 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
+static struct gss_api_mech gss_kerberos_mech; /* forward declaration */
+
+static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
+ /*
+ * DES (All DES enctypes are mapped to the same gss functionality)
+ */
+ {
+ .etype = ENCTYPE_DES_CBC_RAW,
+ .ctype = CKSUMTYPE_RSA_MD5,
+ .name = "des-cbc-crc",
+ .encrypt_name = "cbc(des)",
+ .cksum_name = "md5",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = NULL,
+ .signalg = SGN_ALG_DES_MAC_MD5,
+ .sealalg = SEAL_ALG_DES,
+ .keybytes = 7,
+ .keylength = 8,
+ .blocksize = 8,
+ .conflen = 8,
+ .cksumlength = 8,
+ .keyed_cksum = 0,
+ },
+ /*
+ * RC4-HMAC
+ */
+ {
+ .etype = ENCTYPE_ARCFOUR_HMAC,
+ .ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR,
+ .name = "rc4-hmac",
+ .encrypt_name = "ecb(arc4)",
+ .cksum_name = "hmac(md5)",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = NULL,
+ .signalg = SGN_ALG_HMAC_MD5,
+ .sealalg = SEAL_ALG_MICROSOFT_RC4,
+ .keybytes = 16,
+ .keylength = 16,
+ .blocksize = 1,
+ .conflen = 8,
+ .cksumlength = 8,
+ .keyed_cksum = 1,
+ },
+ /*
+ * 3DES
+ */
+ {
+ .etype = ENCTYPE_DES3_CBC_RAW,
+ .ctype = CKSUMTYPE_HMAC_SHA1_DES3,
+ .name = "des3-hmac-sha1",
+ .encrypt_name = "cbc(des3_ede)",
+ .cksum_name = "hmac(sha1)",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = gss_krb5_des3_make_key,
+ .signalg = SGN_ALG_HMAC_SHA1_DES3_KD,
+ .sealalg = SEAL_ALG_DES3KD,
+ .keybytes = 21,
+ .keylength = 24,
+ .blocksize = 8,
+ .conflen = 8,
+ .cksumlength = 20,
+ .keyed_cksum = 1,
+ },
+ /*
+ * AES128
+ */
+ {
+ .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96,
+ .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128,
+ .name = "aes128-cts",
+ .encrypt_name = "cts(cbc(aes))",
+ .cksum_name = "hmac(sha1)",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = gss_krb5_aes_make_key,
+ .encrypt_v2 = gss_krb5_aes_encrypt,
+ .decrypt_v2 = gss_krb5_aes_decrypt,
+ .signalg = -1,
+ .sealalg = -1,
+ .keybytes = 16,
+ .keylength = 16,
+ .blocksize = 16,
+ .conflen = 16,
+ .cksumlength = 12,
+ .keyed_cksum = 1,
+ },
+ /*
+ * AES256
+ */
+ {
+ .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96,
+ .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256,
+ .name = "aes256-cts",
+ .encrypt_name = "cts(cbc(aes))",
+ .cksum_name = "hmac(sha1)",
+ .encrypt = krb5_encrypt,
+ .decrypt = krb5_decrypt,
+ .mk_key = gss_krb5_aes_make_key,
+ .encrypt_v2 = gss_krb5_aes_encrypt,
+ .decrypt_v2 = gss_krb5_aes_decrypt,
+ .signalg = -1,
+ .sealalg = -1,
+ .keybytes = 32,
+ .keylength = 32,
+ .blocksize = 16,
+ .conflen = 16,
+ .cksumlength = 12,
+ .keyed_cksum = 1,
+ },
+};
+
+static const int num_supported_enctypes =
+ ARRAY_SIZE(supported_gss_krb5_enctypes);
+
+static int
+supported_gss_krb5_enctype(int etype)
+{
+ int i;
+ for (i = 0; i < num_supported_enctypes; i++)
+ if (supported_gss_krb5_enctypes[i].etype == etype)
+ return 1;
+ return 0;
+}
+
+static const struct gss_krb5_enctype *
+get_gss_krb5_enctype(int etype)
+{
+ int i;
+ for (i = 0; i < num_supported_enctypes; i++)
+ if (supported_gss_krb5_enctypes[i].etype == etype)
+ return &supported_gss_krb5_enctypes[i];
+ return NULL;
+}
+
static const void *
simple_get_bytes(const void *p, const void *end, void *res, int len)
{
@@ -78,35 +215,45 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
}
static inline const void *
-get_key(const void *p, const void *end, struct crypto_blkcipher **res)
+get_key(const void *p, const void *end,
+ struct krb5_ctx *ctx, struct crypto_blkcipher **res)
{
struct xdr_netobj key;
int alg;
- char *alg_name;
p = simple_get_bytes(p, end, &alg, sizeof(alg));
if (IS_ERR(p))
goto out_err;
+
+ switch (alg) {
+ case ENCTYPE_DES_CBC_CRC:
+ case ENCTYPE_DES_CBC_MD4:
+ case ENCTYPE_DES_CBC_MD5:
+ /* Map all these key types to ENCTYPE_DES_CBC_RAW */
+ alg = ENCTYPE_DES_CBC_RAW;
+ break;
+ }
+
+ if (!supported_gss_krb5_enctype(alg)) {
+ printk(KERN_WARNING "gss_kerberos_mech: unsupported "
+ "encryption key algorithm %d\n", alg);
+ goto out_err;
+ }
p = simple_get_netobj(p, end, &key);
if (IS_ERR(p))
goto out_err;
- switch (alg) {
- case ENCTYPE_DES_CBC_RAW:
- alg_name = "cbc(des)";
- break;
- default:
- printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
- goto out_err_free_key;
- }
- *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
+ *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(*res)) {
- printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
+ printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
+ "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
*res = NULL;
goto out_err_free_key;
}
if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
- printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
+ printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
+ "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
goto out_err_free_tfm;
}
@@ -123,56 +270,55 @@ out_err:
}
static int
-gss_import_sec_context_kerberos(const void *p,
- size_t len,
- struct gss_ctx *ctx_id)
+gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
{
- const void *end = (const void *)((const char *)p + len);
- struct krb5_ctx *ctx;
int tmp;
- if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
- p = ERR_PTR(-ENOMEM);
- goto out_err;
- }
-
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
+
+ /* Old format supports only DES! Any other enctype uses new format */
+ ctx->enctype = ENCTYPE_DES_CBC_RAW;
+
+ ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
+ if (ctx->gk5e == NULL)
+ goto out_err;
+
/* The downcall format was designed before we completely understood
* the uses of the context fields; so it includes some stuff we
* just give some minimal sanity-checking, and some we ignore
* completely (like the next twenty bytes): */
if (unlikely(p + 20 > end || p + 20 < p))
- goto out_err_free_ctx;
+ goto out_err;
p += 20;
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
if (tmp != SGN_ALG_DES_MAC_MD5) {
p = ERR_PTR(-ENOSYS);
- goto out_err_free_ctx;
+ goto out_err;
}
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
if (tmp != SEAL_ALG_DES) {
p = ERR_PTR(-ENOSYS);
- goto out_err_free_ctx;
+ goto out_err;
}
p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send));
if (IS_ERR(p))
- goto out_err_free_ctx;
+ goto out_err;
p = simple_get_netobj(p, end, &ctx->mech_used);
if (IS_ERR(p))
- goto out_err_free_ctx;
- p = get_key(p, end, &ctx->enc);
+ goto out_err;
+ p = get_key(p, end, ctx, &ctx->enc);
if (IS_ERR(p))
goto out_err_free_mech;
- p = get_key(p, end, &ctx->seq);
+ p = get_key(p, end, ctx, &ctx->seq);
if (IS_ERR(p))
goto out_err_free_key1;
if (p != end) {
@@ -180,9 +326,6 @@ gss_import_sec_context_kerberos(const void *p,
goto out_err_free_key2;
}
- ctx_id->internal_ctx_id = ctx;
-
- dprintk("RPC: Successfully imported new context.\n");
return 0;
out_err_free_key2:
@@ -191,18 +334,376 @@ out_err_free_key1:
crypto_free_blkcipher(ctx->enc);
out_err_free_mech:
kfree(ctx->mech_used.data);
-out_err_free_ctx:
- kfree(ctx);
out_err:
return PTR_ERR(p);
}
+struct crypto_blkcipher *
+context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
+{
+ struct crypto_blkcipher *cp;
+
+ cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cp)) {
+ dprintk("gss_kerberos_mech: unable to initialize "
+ "crypto algorithm %s\n", cname);
+ return NULL;
+ }
+ if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) {
+ dprintk("gss_kerberos_mech: error setting key for "
+ "crypto algorithm %s\n", cname);
+ crypto_free_blkcipher(cp);
+ return NULL;
+ }
+ return cp;
+}
+
+static inline void
+set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed)
+{
+ cdata[0] = (usage>>24)&0xff;
+ cdata[1] = (usage>>16)&0xff;
+ cdata[2] = (usage>>8)&0xff;
+ cdata[3] = usage&0xff;
+ cdata[4] = seed;
+}
+
+static int
+context_derive_keys_des3(struct krb5_ctx *ctx)
+{
+ struct xdr_netobj c, keyin, keyout;
+ u8 cdata[GSS_KRB5_K5CLENGTH];
+ u32 err;
+
+ c.len = GSS_KRB5_K5CLENGTH;
+ c.data = cdata;
+
+ keyin.data = ctx->Ksess;
+ keyin.len = ctx->gk5e->keylength;
+ keyout.len = ctx->gk5e->keylength;
+
+ /* seq uses the raw key */
+ ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
+ ctx->Ksess);
+ if (ctx->seq == NULL)
+ goto out_err;
+
+ ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
+ ctx->Ksess);
+ if (ctx->enc == NULL)
+ goto out_free_seq;
+
+ /* derive cksum */
+ set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM);
+ keyout.data = ctx->cksum;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c);
+ if (err) {
+ dprintk("%s: Error %d deriving cksum key\n",
+ __func__, err);
+ goto out_free_enc;
+ }
+
+ return 0;
+
+out_free_enc:
+ crypto_free_blkcipher(ctx->enc);
+out_free_seq:
+ crypto_free_blkcipher(ctx->seq);
+out_err:
+ return -EINVAL;
+}
+
+/*
+ * Note that RC4 depends on deriving keys using the sequence
+ * number or the checksum of a token. Therefore, the final keys
+ * cannot be calculated until the token is being constructed!
+ */
+static int
+context_derive_keys_rc4(struct krb5_ctx *ctx)
+{
+ struct crypto_hash *hmac;
+ char sigkeyconstant[] = "signaturekey";
+ int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int err;
+
+ dprintk("RPC: %s: entered\n", __func__);
+ /*
+ * derive cksum (aka Ksign) key
+ */
+ hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac)) {
+ dprintk("%s: error %ld allocating hash '%s'\n",
+ __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name);
+ err = PTR_ERR(hmac);
+ goto out_err;
+ }
+
+ err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
+ if (err)
+ goto out_err_free_hmac;
+
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, sigkeyconstant, slen);
+
+ desc.tfm = hmac;
+ desc.flags = 0;
+
+ err = crypto_hash_init(&desc);
+ if (err)
+ goto out_err_free_hmac;
+
+ err = crypto_hash_digest(&desc, sg, slen, ctx->cksum);
+ if (err)
+ goto out_err_free_hmac;
+ /*
+ * allocate hash, and blkciphers for data and seqnum encryption
+ */
+ ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->enc)) {
+ err = PTR_ERR(ctx->enc);
+ goto out_err_free_hmac;
+ }
+
+ ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->seq)) {
+ crypto_free_blkcipher(ctx->enc);
+ err = PTR_ERR(ctx->seq);
+ goto out_err_free_hmac;
+ }
+
+ dprintk("RPC: %s: returning success\n", __func__);
+
+ err = 0;
+
+out_err_free_hmac:
+ crypto_free_hash(hmac);
+out_err:
+ dprintk("RPC: %s: returning %d\n", __func__, err);
+ return err;
+}
+
+static int
+context_derive_keys_new(struct krb5_ctx *ctx)
+{
+ struct xdr_netobj c, keyin, keyout;
+ u8 cdata[GSS_KRB5_K5CLENGTH];
+ u32 err;
+
+ c.len = GSS_KRB5_K5CLENGTH;
+ c.data = cdata;
+
+ keyin.data = ctx->Ksess;
+ keyin.len = ctx->gk5e->keylength;
+ keyout.len = ctx->gk5e->keylength;
+
+ /* initiator seal encryption */
+ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
+ keyout.data = ctx->initiator_seal;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c);
+ if (err) {
+ dprintk("%s: Error %d deriving initiator_seal key\n",
+ __func__, err);
+ goto out_err;
+ }
+ ctx->initiator_enc = context_v2_alloc_cipher(ctx,
+ ctx->gk5e->encrypt_name,
+ ctx->initiator_seal);
+ if (ctx->initiator_enc == NULL)
+ goto out_err;
+
+ /* acceptor seal encryption */
+ set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
+ keyout.data = ctx->acceptor_seal;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c);
+ if (err) {
+ dprintk("%s: Error %d deriving acceptor_seal key\n",
+ __func__, err);
+ goto out_free_initiator_enc;
+ }
+ ctx->acceptor_enc = context_v2_alloc_cipher(ctx,
+ ctx->gk5e->encrypt_name,
+ ctx->acceptor_seal);
+ if (ctx->acceptor_enc == NULL)
+ goto out_free_initiator_enc;
+
+ /* initiator sign checksum */
+ set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
+ keyout.data = ctx->initiator_sign;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c);
+ if (err) {
+ dprintk("%s: Error %d deriving initiator_sign key\n",
+ __func__, err);
+ goto out_free_acceptor_enc;
+ }
+
+ /* acceptor sign checksum */
+ set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
+ keyout.data = ctx->acceptor_sign;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c);
+ if (err) {
+ dprintk("%s: Error %d deriving acceptor_sign key\n",
+ __func__, err);
+ goto out_free_acceptor_enc;
+ }
+
+ /* initiator seal integrity */
+ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
+ keyout.data = ctx->initiator_integ;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c);
+ if (err) {
+ dprintk("%s: Error %d deriving initiator_integ key\n",
+ __func__, err);
+ goto out_free_acceptor_enc;
+ }
+
+ /* acceptor seal integrity */
+ set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
+ keyout.data = ctx->acceptor_integ;
+ err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c);
+ if (err) {
+ dprintk("%s: Error %d deriving acceptor_integ key\n",
+ __func__, err);
+ goto out_free_acceptor_enc;
+ }
+
+ switch (ctx->enctype) {
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ ctx->initiator_enc_aux =
+ context_v2_alloc_cipher(ctx, "cbc(aes)",
+ ctx->initiator_seal);
+ if (ctx->initiator_enc_aux == NULL)
+ goto out_free_acceptor_enc;
+ ctx->acceptor_enc_aux =
+ context_v2_alloc_cipher(ctx, "cbc(aes)",
+ ctx->acceptor_seal);
+ if (ctx->acceptor_enc_aux == NULL) {
+ crypto_free_blkcipher(ctx->initiator_enc_aux);
+ goto out_free_acceptor_enc;
+ }
+ }
+
+ return 0;
+
+out_free_acceptor_enc:
+ crypto_free_blkcipher(ctx->acceptor_enc);
+out_free_initiator_enc:
+ crypto_free_blkcipher(ctx->initiator_enc);
+out_err:
+ return -EINVAL;
+}
+
+static int
+gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx)
+{
+ int keylen;
+
+ p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
+ if (IS_ERR(p))
+ goto out_err;
+ ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR;
+
+ p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
+ if (IS_ERR(p))
+ goto out_err;
+ p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64));
+ if (IS_ERR(p))
+ goto out_err;
+ /* set seq_send for use by "older" enctypes */
+ ctx->seq_send = ctx->seq_send64;
+ if (ctx->seq_send64 != ctx->seq_send) {
+ dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
+ (long unsigned)ctx->seq_send64, ctx->seq_send);
+ goto out_err;
+ }
+ p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
+ if (IS_ERR(p))
+ goto out_err;
+ /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */
+ if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1)
+ ctx->enctype = ENCTYPE_DES3_CBC_RAW;
+ ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
+ if (ctx->gk5e == NULL) {
+ dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n",
+ ctx->enctype);
+ p = ERR_PTR(-EINVAL);
+ goto out_err;
+ }
+ keylen = ctx->gk5e->keylength;
+
+ p = simple_get_bytes(p, end, ctx->Ksess, keylen);
+ if (IS_ERR(p))
+ goto out_err;
+
+ if (p != end) {
+ p = ERR_PTR(-EINVAL);
+ goto out_err;
+ }
+
+ ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data,
+ gss_kerberos_mech.gm_oid.len, GFP_KERNEL);
+ if (unlikely(ctx->mech_used.data == NULL)) {
+ p = ERR_PTR(-ENOMEM);
+ goto out_err;
+ }
+ ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
+
+ switch (ctx->enctype) {
+ case ENCTYPE_DES3_CBC_RAW:
+ return context_derive_keys_des3(ctx);
+ case ENCTYPE_ARCFOUR_HMAC:
+ return context_derive_keys_rc4(ctx);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return context_derive_keys_new(ctx);
+ default:
+ return -EINVAL;
+ }
+
+out_err:
+ return PTR_ERR(p);
+}
+
+static int
+gss_import_sec_context_kerberos(const void *p, size_t len,
+ struct gss_ctx *ctx_id)
+{
+ const void *end = (const void *)((const char *)p + len);
+ struct krb5_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+ return -ENOMEM;
+
+ if (len == 85)
+ ret = gss_import_v1_context(p, end, ctx);
+ else
+ ret = gss_import_v2_context(p, end, ctx);
+
+ if (ret == 0)
+ ctx_id->internal_ctx_id = ctx;
+ else
+ kfree(ctx);
+
+ dprintk("RPC: %s: returning %d\n", __func__, ret);
+ return ret;
+}
+
static void
gss_delete_sec_context_kerberos(void *internal_ctx) {
struct krb5_ctx *kctx = internal_ctx;
crypto_free_blkcipher(kctx->seq);
crypto_free_blkcipher(kctx->enc);
+ crypto_free_blkcipher(kctx->acceptor_enc);
+ crypto_free_blkcipher(kctx->initiator_enc);
+ crypto_free_blkcipher(kctx->acceptor_enc_aux);
+ crypto_free_blkcipher(kctx->initiator_enc_aux);
kfree(kctx->mech_used.data);
kfree(kctx);
}
@@ -241,6 +742,7 @@ static struct gss_api_mech gss_kerberos_mech = {
.gm_ops = &gss_kerberos_ops,
.gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
.gm_pfs = gss_kerberos_pfs,
+ .gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ",
};
static int __init init_kerberos_module(void)
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 88fe6e75ed7e..d7941eab7796 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -3,7 +3,7 @@
*
* Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c
*
- * Copyright (c) 2000 The Regents of the University of Michigan.
+ * Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -70,53 +70,154 @@
DEFINE_SPINLOCK(krb5_seq_lock);
-u32
-gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
+static char *
+setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
+{
+ __be16 *ptr, *krb5_hdr;
+ int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
+
+ token->len = g_token_size(&ctx->mech_used, body_size);
+
+ ptr = (__be16 *)token->data;
+ g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr);
+
+ /* ptr now at start of header described in rfc 1964, section 1.2.1: */
+ krb5_hdr = ptr;
+ *ptr++ = KG_TOK_MIC_MSG;
+ *ptr++ = cpu_to_le16(ctx->gk5e->signalg);
+ *ptr++ = SEAL_ALG_NONE;
+ *ptr++ = 0xffff;
+
+ return (char *)krb5_hdr;
+}
+
+static void *
+setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
+{
+ __be16 *ptr, *krb5_hdr;
+ u8 *p, flags = 0x00;
+
+ if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
+ flags |= 0x01;
+ if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
+ flags |= 0x04;
+
+ /* Per rfc 4121, sec 4.2.6.1, there is no header,
+ * just start the token */
+ krb5_hdr = ptr = (__be16 *)token->data;
+
+ *ptr++ = KG2_TOK_MIC;
+ p = (u8 *)ptr;
+ *p++ = flags;
+ *p++ = 0xff;
+ ptr = (__be16 *)p;
+ *ptr++ = 0xffff;
+ *ptr++ = 0xffff;
+
+ token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
+ return krb5_hdr;
+}
+
+static u32
+gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token)
{
- struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
- unsigned char *ptr, *msg_start;
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
+ .data = cksumdata};
+ void *ptr;
s32 now;
u32 seq_send;
+ u8 *cksumkey;
- dprintk("RPC: gss_krb5_seal\n");
+ dprintk("RPC: %s\n", __func__);
BUG_ON(ctx == NULL);
now = get_seconds();
- token->len = g_token_size(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8);
+ ptr = setup_token(ctx, token);
- ptr = token->data;
- g_make_token_header(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8, &ptr);
+ if (ctx->gk5e->keyed_cksum)
+ cksumkey = ctx->cksum;
+ else
+ cksumkey = NULL;
- /* ptr now at header described in rfc 1964, section 1.2.1: */
- ptr[0] = (unsigned char) ((KG_TOK_MIC_MSG >> 8) & 0xff);
- ptr[1] = (unsigned char) (KG_TOK_MIC_MSG & 0xff);
+ if (make_checksum(ctx, ptr, 8, text, 0, cksumkey,
+ KG_USAGE_SIGN, &md5cksum))
+ return GSS_S_FAILURE;
- msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8;
+ memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
- *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
- memset(ptr + 4, 0xff, 4);
+ spin_lock(&krb5_seq_lock);
+ seq_send = ctx->seq_send++;
+ spin_unlock(&krb5_seq_lock);
- if (make_checksum("md5", ptr, 8, text, 0, &md5cksum))
+ if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
+ seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
return GSS_S_FAILURE;
- if (krb5_encrypt(ctx->seq, NULL, md5cksum.data,
- md5cksum.data, md5cksum.len))
- return GSS_S_FAILURE;
+ return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
+}
+
+u32
+gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
+ struct xdr_netobj *token)
+{
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj cksumobj = { .len = sizeof(cksumdata),
+ .data = cksumdata};
+ void *krb5_hdr;
+ s32 now;
+ u64 seq_send;
+ u8 *cksumkey;
+ unsigned int cksum_usage;
- memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
+ dprintk("RPC: %s\n", __func__);
+ krb5_hdr = setup_token_v2(ctx, token);
+
+ /* Set up the sequence number. Now 64-bits in clear
+ * text and w/o direction indicator */
spin_lock(&krb5_seq_lock);
- seq_send = ctx->seq_send++;
+ seq_send = ctx->seq_send64++;
spin_unlock(&krb5_seq_lock);
-
- if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
- seq_send, ptr + GSS_KRB5_TOK_HDR_LEN,
- ptr + 8))
+ *((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send);
+
+ if (ctx->initiate) {
+ cksumkey = ctx->initiator_sign;
+ cksum_usage = KG_USAGE_INITIATOR_SIGN;
+ } else {
+ cksumkey = ctx->acceptor_sign;
+ cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
+ }
+
+ if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN,
+ text, 0, cksumkey, cksum_usage, &cksumobj))
return GSS_S_FAILURE;
+ memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len);
+
+ now = get_seconds();
+
return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
+
+u32
+gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
+ struct xdr_netobj *token)
+{
+ struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
+
+ switch (ctx->enctype) {
+ default:
+ BUG();
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+ return gss_get_mic_v1(ctx, text, token);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return gss_get_mic_v2(ctx, text, token);
+ }
+}
+
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 6331cd6866ec..415c013ba382 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -39,14 +39,51 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
+static s32
+krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
+ unsigned char *cksum, unsigned char *buf)
+{
+ struct crypto_blkcipher *cipher;
+ unsigned char plain[8];
+ s32 code;
+
+ dprintk("RPC: %s:\n", __func__);
+ cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
+ plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
+ plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
+ plain[3] = (unsigned char) ((seqnum >> 0) & 0xff);
+ plain[4] = direction;
+ plain[5] = direction;
+ plain[6] = direction;
+ plain[7] = direction;
+
+ code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
+ if (code)
+ goto out;
+
+ code = krb5_encrypt(cipher, cksum, plain, buf, 8);
+out:
+ crypto_free_blkcipher(cipher);
+ return code;
+}
s32
-krb5_make_seq_num(struct crypto_blkcipher *key,
+krb5_make_seq_num(struct krb5_ctx *kctx,
+ struct crypto_blkcipher *key,
int direction,
u32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
unsigned char plain[8];
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
+ return krb5_make_rc4_seq_num(kctx, direction, seqnum,
+ cksum, buf);
+
plain[0] = (unsigned char) (seqnum & 0xff);
plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -60,17 +97,59 @@ krb5_make_seq_num(struct crypto_blkcipher *key,
return krb5_encrypt(key, cksum, plain, buf, 8);
}
+static s32
+krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
+ unsigned char *buf, int *direction, s32 *seqnum)
+{
+ struct crypto_blkcipher *cipher;
+ unsigned char plain[8];
+ s32 code;
+
+ dprintk("RPC: %s:\n", __func__);
+ cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
+ if (code)
+ goto out;
+
+ code = krb5_decrypt(cipher, cksum, buf, plain, 8);
+ if (code)
+ goto out;
+
+ if ((plain[4] != plain[5]) || (plain[4] != plain[6])
+ || (plain[4] != plain[7])) {
+ code = (s32)KG_BAD_SEQ;
+ goto out;
+ }
+
+ *direction = plain[4];
+
+ *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
+ (plain[2] << 8) | (plain[3]));
+out:
+ crypto_free_blkcipher(cipher);
+ return code;
+}
+
s32
-krb5_get_seq_num(struct crypto_blkcipher *key,
+krb5_get_seq_num(struct krb5_ctx *kctx,
unsigned char *cksum,
unsigned char *buf,
int *direction, u32 *seqnum)
{
s32 code;
unsigned char plain[8];
+ struct crypto_blkcipher *key = kctx->seq;
dprintk("RPC: krb5_get_seq_num:\n");
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
+ return krb5_get_rc4_seq_num(kctx, cksum, buf,
+ direction, seqnum);
+
if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
return code;
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index ce6c247edad0..6cd930f3678f 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -3,7 +3,7 @@
*
* Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c
*
- * Copyright (c) 2000 The Regents of the University of Michigan.
+ * Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
@@ -70,20 +70,21 @@
/* read_token is a mic token, and message_buffer is the data that the mic was
* supposedly taken over. */
-u32
-gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
+static u32
+gss_verify_mic_v1(struct krb5_ctx *ctx,
struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
{
- struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
int signalg;
int sealalg;
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
+ .data = cksumdata};
s32 now;
int direction;
u32 seqnum;
unsigned char *ptr = (unsigned char *)read_token->data;
int bodysize;
+ u8 *cksumkey;
dprintk("RPC: krb5_read_token\n");
@@ -98,7 +99,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
/* XXX sanity-check bodysize?? */
signalg = ptr[2] + (ptr[3] << 8);
- if (signalg != SGN_ALG_DES_MAC_MD5)
+ if (signalg != ctx->gk5e->signalg)
return GSS_S_DEFECTIVE_TOKEN;
sealalg = ptr[4] + (ptr[5] << 8);
@@ -108,13 +109,17 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
return GSS_S_DEFECTIVE_TOKEN;
- if (make_checksum("md5", ptr, 8, message_buffer, 0, &md5cksum))
- return GSS_S_FAILURE;
+ if (ctx->gk5e->keyed_cksum)
+ cksumkey = ctx->cksum;
+ else
+ cksumkey = NULL;
- if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16))
+ if (make_checksum(ctx, ptr, 8, message_buffer, 0,
+ cksumkey, KG_USAGE_SIGN, &md5cksum))
return GSS_S_FAILURE;
- if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
+ if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
+ ctx->gk5e->cksumlength))
return GSS_S_BAD_SIG;
/* it got through unscathed. Make sure the context is unexpired */
@@ -126,7 +131,8 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
/* do sequencing checks */
- if (krb5_get_seq_num(ctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum))
+ if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
+ &direction, &seqnum))
return GSS_S_FAILURE;
if ((ctx->initiate && direction != 0xff) ||
@@ -135,3 +141,86 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
return GSS_S_COMPLETE;
}
+
+static u32
+gss_verify_mic_v2(struct krb5_ctx *ctx,
+ struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
+{
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj cksumobj = {.len = sizeof(cksumdata),
+ .data = cksumdata};
+ s32 now;
+ u64 seqnum;
+ u8 *ptr = read_token->data;
+ u8 *cksumkey;
+ u8 flags;
+ int i;
+ unsigned int cksum_usage;
+
+ dprintk("RPC: %s\n", __func__);
+
+ if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_MIC)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ flags = ptr[2];
+ if ((!ctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
+ (ctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
+ return GSS_S_BAD_SIG;
+
+ if (flags & KG2_TOKEN_FLAG_SEALED) {
+ dprintk("%s: token has unexpected sealed flag\n", __func__);
+ return GSS_S_FAILURE;
+ }
+
+ for (i = 3; i < 8; i++)
+ if (ptr[i] != 0xff)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ if (ctx->initiate) {
+ cksumkey = ctx->acceptor_sign;
+ cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
+ } else {
+ cksumkey = ctx->initiator_sign;
+ cksum_usage = KG_USAGE_INITIATOR_SIGN;
+ }
+
+ if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0,
+ cksumkey, cksum_usage, &cksumobj))
+ return GSS_S_FAILURE;
+
+ if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN,
+ ctx->gk5e->cksumlength))
+ return GSS_S_BAD_SIG;
+
+ /* it got through unscathed. Make sure the context is unexpired */
+ now = get_seconds();
+ if (now > ctx->endtime)
+ return GSS_S_CONTEXT_EXPIRED;
+
+ /* do sequencing checks */
+
+ seqnum = be64_to_cpup((__be64 *)ptr + 8);
+
+ return GSS_S_COMPLETE;
+}
+
+u32
+gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
+ struct xdr_buf *message_buffer,
+ struct xdr_netobj *read_token)
+{
+ struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
+
+ switch (ctx->enctype) {
+ default:
+ BUG();
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+ return gss_verify_mic_v1(ctx, message_buffer, read_token);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return gss_verify_mic_v2(ctx, message_buffer, read_token);
+ }
+}
+
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index a6e905637e03..2763e3e48db4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -1,3 +1,33 @@
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/sunrpc/gss_krb5.h>
@@ -12,10 +42,7 @@
static inline int
gss_krb5_padding(int blocksize, int length)
{
- /* Most of the code is block-size independent but currently we
- * use only 8: */
- BUG_ON(blocksize != 8);
- return 8 - (length & 7);
+ return blocksize - (length % blocksize);
}
static inline void
@@ -86,8 +113,8 @@ out:
return 0;
}
-static void
-make_confounder(char *p, u32 conflen)
+void
+gss_krb5_make_confounder(char *p, u32 conflen)
{
static u64 i = 0;
u64 *q = (u64 *)p;
@@ -127,69 +154,73 @@ make_confounder(char *p, u32 conflen)
/* XXX factor out common code with seal/unseal. */
-u32
-gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
+static u32
+gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages)
{
- struct krb5_ctx *kctx = ctx->internal_ctx_id;
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
+ .data = cksumdata};
int blocksize = 0, plainlen;
unsigned char *ptr, *msg_start;
s32 now;
int headlen;
struct page **tmp_pages;
u32 seq_send;
+ u8 *cksumkey;
+ u32 conflen = kctx->gk5e->conflen;
- dprintk("RPC: gss_wrap_kerberos\n");
+ dprintk("RPC: %s\n", __func__);
now = get_seconds();
blocksize = crypto_blkcipher_blocksize(kctx->enc);
gss_krb5_add_padding(buf, offset, blocksize);
BUG_ON((buf->len - offset) % blocksize);
- plainlen = blocksize + buf->len - offset;
+ plainlen = conflen + buf->len - offset;
- headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
- (buf->len - offset);
+ headlen = g_token_size(&kctx->mech_used,
+ GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
+ (buf->len - offset);
ptr = buf->head[0].iov_base + offset;
/* shift data to make room for header. */
+ xdr_extend_head(buf, offset, headlen);
+
/* XXX Would be cleverer to encrypt while copying. */
- /* XXX bounds checking, slack, etc. */
- memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
- buf->head[0].iov_len += headlen;
- buf->len += headlen;
BUG_ON((buf->len - offset - headlen) % blocksize);
g_make_token_header(&kctx->mech_used,
- GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr);
+ GSS_KRB5_TOK_HDR_LEN +
+ kctx->gk5e->cksumlength + plainlen, &ptr);
/* ptr now at header described in rfc 1964, section 1.2.1: */
ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
- msg_start = ptr + 24;
+ msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
- *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
+ *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
memset(ptr + 4, 0xff, 4);
- *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES);
+ *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
- make_confounder(msg_start, blocksize);
+ gss_krb5_make_confounder(msg_start, conflen);
+
+ if (kctx->gk5e->keyed_cksum)
+ cksumkey = kctx->cksum;
+ else
+ cksumkey = NULL;
/* XXXJBF: UGH!: */
tmp_pages = buf->pages;
buf->pages = pages;
- if (make_checksum("md5", ptr, 8, buf,
- offset + headlen - blocksize, &md5cksum))
+ if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
+ cksumkey, KG_USAGE_SEAL, &md5cksum))
return GSS_S_FAILURE;
buf->pages = tmp_pages;
- if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
- md5cksum.data, md5cksum.len))
- return GSS_S_FAILURE;
- memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
+ memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
spin_lock(&krb5_seq_lock);
seq_send = kctx->seq_send++;
@@ -197,25 +228,42 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
/* XXX would probably be more efficient to compute checksum
* and encrypt at the same time: */
- if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
+ if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
return GSS_S_FAILURE;
- if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
- pages))
- return GSS_S_FAILURE;
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
+ struct crypto_blkcipher *cipher;
+ int err;
+ cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ return GSS_S_FAILURE;
+
+ krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
+
+ err = gss_encrypt_xdr_buf(cipher, buf,
+ offset + headlen - conflen, pages);
+ crypto_free_blkcipher(cipher);
+ if (err)
+ return GSS_S_FAILURE;
+ } else {
+ if (gss_encrypt_xdr_buf(kctx->enc, buf,
+ offset + headlen - conflen, pages))
+ return GSS_S_FAILURE;
+ }
return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
-u32
-gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
+static u32
+gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
{
- struct krb5_ctx *kctx = ctx->internal_ctx_id;
int signalg;
int sealalg;
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
+ char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
+ struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
+ .data = cksumdata};
s32 now;
int direction;
s32 seqnum;
@@ -224,6 +272,9 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
void *data_start, *orig_start;
int data_len;
int blocksize;
+ u32 conflen = kctx->gk5e->conflen;
+ int crypt_offset;
+ u8 *cksumkey;
dprintk("RPC: gss_unwrap_kerberos\n");
@@ -241,29 +292,65 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
/* get the sign and seal algorithms */
signalg = ptr[2] + (ptr[3] << 8);
- if (signalg != SGN_ALG_DES_MAC_MD5)
+ if (signalg != kctx->gk5e->signalg)
return GSS_S_DEFECTIVE_TOKEN;
sealalg = ptr[4] + (ptr[5] << 8);
- if (sealalg != SEAL_ALG_DES)
+ if (sealalg != kctx->gk5e->sealalg)
return GSS_S_DEFECTIVE_TOKEN;
if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
return GSS_S_DEFECTIVE_TOKEN;
- if (gss_decrypt_xdr_buf(kctx->enc, buf,
- ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base))
- return GSS_S_DEFECTIVE_TOKEN;
+ /*
+ * Data starts after token header and checksum. ptr points
+ * to the beginning of the token header
+ */
+ crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
+ (unsigned char *)buf->head[0].iov_base;
+
+ /*
+ * Need plaintext seqnum to derive encryption key for arcfour-hmac
+ */
+ if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
+ ptr + 8, &direction, &seqnum))
+ return GSS_S_BAD_SIG;
- if (make_checksum("md5", ptr, 8, buf,
- ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
- return GSS_S_FAILURE;
+ if ((kctx->initiate && direction != 0xff) ||
+ (!kctx->initiate && direction != 0))
+ return GSS_S_BAD_SIG;
+
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
+ struct crypto_blkcipher *cipher;
+ int err;
+
+ cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher))
+ return GSS_S_FAILURE;
+
+ krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
- if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
- md5cksum.data, md5cksum.len))
+ err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
+ crypto_free_blkcipher(cipher);
+ if (err)
+ return GSS_S_DEFECTIVE_TOKEN;
+ } else {
+ if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (kctx->gk5e->keyed_cksum)
+ cksumkey = kctx->cksum;
+ else
+ cksumkey = NULL;
+
+ if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
+ cksumkey, KG_USAGE_SEAL, &md5cksum))
return GSS_S_FAILURE;
- if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
+ if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
+ kctx->gk5e->cksumlength))
return GSS_S_BAD_SIG;
/* it got through unscathed. Make sure the context is unexpired */
@@ -275,19 +362,12 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
/* do sequencing checks */
- if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
- &direction, &seqnum))
- return GSS_S_BAD_SIG;
-
- if ((kctx->initiate && direction != 0xff) ||
- (!kctx->initiate && direction != 0))
- return GSS_S_BAD_SIG;
-
/* Copy the data back to the right position. XXX: Would probably be
* better to copy and encrypt at the same time. */
blocksize = crypto_blkcipher_blocksize(kctx->enc);
- data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize;
+ data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
+ conflen;
orig_start = buf->head[0].iov_base + offset;
data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
memmove(orig_start, data_start, data_len);
@@ -299,3 +379,209 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
return GSS_S_COMPLETE;
}
+
+/*
+ * We cannot currently handle tokens with rotated data. We need a
+ * generalized routine to rotate the data in place. It is anticipated
+ * that we won't encounter rotated data in the general case.
+ */
+static u32
+rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)
+{
+ unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);
+
+ if (realrrc == 0)
+ return 0;
+
+ dprintk("%s: cannot process token with rotated data: "
+ "rrc %u, realrrc %u\n", __func__, rrc, realrrc);
+ return 1;
+}
+
+static u32
+gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, struct page **pages)
+{
+ int blocksize;
+ u8 *ptr, *plainhdr;
+ s32 now;
+ u8 flags = 0x00;
+ __be16 *be16ptr, ec = 0;
+ __be64 *be64ptr;
+ u32 err;
+
+ dprintk("RPC: %s\n", __func__);
+
+ if (kctx->gk5e->encrypt_v2 == NULL)
+ return GSS_S_FAILURE;
+
+ /* make room for gss token header */
+ if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
+ return GSS_S_FAILURE;
+
+ /* construct gss token header */
+ ptr = plainhdr = buf->head[0].iov_base + offset;
+ *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
+ *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
+
+ if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
+ flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
+ if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
+ flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
+ /* We always do confidentiality in wrap tokens */
+ flags |= KG2_TOKEN_FLAG_SEALED;
+
+ *ptr++ = flags;
+ *ptr++ = 0xff;
+ be16ptr = (__be16 *)ptr;
+
+ blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
+ *be16ptr++ = cpu_to_be16(ec);
+ /* "inner" token header always uses 0 for RRC */
+ *be16ptr++ = cpu_to_be16(0);
+
+ be64ptr = (__be64 *)be16ptr;
+ spin_lock(&krb5_seq_lock);
+ *be64ptr = cpu_to_be64(kctx->seq_send64++);
+ spin_unlock(&krb5_seq_lock);
+
+ err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages);
+ if (err)
+ return err;
+
+ now = get_seconds();
+ return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
+}
+
+static u32
+gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+{
+ s32 now;
+ u64 seqnum;
+ u8 *ptr;
+ u8 flags = 0x00;
+ u16 ec, rrc;
+ int err;
+ u32 headskip, tailskip;
+ u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
+ unsigned int movelen;
+
+
+ dprintk("RPC: %s\n", __func__);
+
+ if (kctx->gk5e->decrypt_v2 == NULL)
+ return GSS_S_FAILURE;
+
+ ptr = buf->head[0].iov_base + offset;
+
+ if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ flags = ptr[2];
+ if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
+ (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
+ return GSS_S_BAD_SIG;
+
+ if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
+ dprintk("%s: token missing expected sealed flag\n", __func__);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (ptr[3] != 0xff)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ ec = be16_to_cpup((__be16 *)(ptr + 4));
+ rrc = be16_to_cpup((__be16 *)(ptr + 6));
+
+ seqnum = be64_to_cpup((__be64 *)(ptr + 8));
+
+ if (rrc != 0) {
+ err = rotate_left(kctx, offset, buf, rrc);
+ if (err)
+ return GSS_S_FAILURE;
+ }
+
+ err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
+ &headskip, &tailskip);
+ if (err)
+ return GSS_S_FAILURE;
+
+ /*
+ * Retrieve the decrypted gss token header and verify
+ * it against the original
+ */
+ err = read_bytes_from_xdr_buf(buf,
+ buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
+ decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
+ if (err) {
+ dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
+ return GSS_S_FAILURE;
+ }
+ if (memcmp(ptr, decrypted_hdr, 6)
+ || memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
+ dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
+ return GSS_S_FAILURE;
+ }
+
+ /* do sequencing checks */
+
+ /* it got through unscathed. Make sure the context is unexpired */
+ now = get_seconds();
+ if (now > kctx->endtime)
+ return GSS_S_CONTEXT_EXPIRED;
+
+ /*
+ * Move the head data back to the right position in xdr_buf.
+ * We ignore any "ec" data since it might be in the head or
+ * the tail, and we really don't need to deal with it.
+ * Note that buf->head[0].iov_len may indicate the available
+ * head buffer space rather than that actually occupied.
+ */
+ movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
+ movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
+ BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
+ buf->head[0].iov_len);
+ memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
+ buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+ buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+
+ return GSS_S_COMPLETE;
+}
+
+u32
+gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
+ struct xdr_buf *buf, struct page **pages)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+
+ switch (kctx->enctype) {
+ default:
+ BUG();
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+ return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
+ }
+}
+
+u32
+gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+
+ switch (kctx->enctype) {
+ default:
+ BUG();
+ case ENCTYPE_DES_CBC_RAW:
+ case ENCTYPE_DES3_CBC_RAW:
+ case ENCTYPE_ARCFOUR_HMAC:
+ return gss_unwrap_kerberos_v1(kctx, offset, buf);
+ case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
+ case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
+ return gss_unwrap_kerberos_v2(kctx, offset, buf);
+ }
+}
+
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 76e4c6f4ac3c..28a84ef41d13 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -285,6 +285,20 @@ gss_verify_mic(struct gss_ctx *context_handle,
mic_token);
}
+/*
+ * This function is called from both the client and server code.
+ * Each makes guarantees about how much "slack" space is available
+ * for the underlying function in "buf"'s head and tail while
+ * performing the wrap.
+ *
+ * The client and server code allocate RPC_MAX_AUTH_SIZE extra
+ * space in both the head and tail which is available for use by
+ * the wrap function.
+ *
+ * Underlying functions should verify they do not use more than
+ * RPC_MAX_AUTH_SIZE of extra space in either the head or tail
+ * when performing the wrap.
+ */
u32
gss_wrap(struct gss_ctx *ctx_id,
int offset,
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 3308157436d2..a99825d7caa0 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -223,7 +223,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
/* only support SPKM_MIC_TOK */
if((ptr[6] != 0x01) || (ptr[7] != 0x01)) {
- dprintk("RPC: ERROR unsupported SPKM3 token \n");
+ dprintk("RPC: ERROR unsupported SPKM3 token\n");
goto out;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index b81e790ef9f4..1d9ac4ac818a 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1315,6 +1315,14 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
inpages = resbuf->pages;
/* XXX: Would be better to write some xdr helper functions for
* nfs{2,3,4}xdr.c that place the data right, instead of copying: */
+
+ /*
+ * If there is currently tail data, make sure there is
+ * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
+ * the page, and move the current tail data such that
+ * there is RPC_MAX_AUTH_SIZE slack space available in
+ * both the head and tail.
+ */
if (resbuf->tail[0].iov_base) {
BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
+ PAGE_SIZE);
@@ -1327,6 +1335,13 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
resbuf->tail[0].iov_len);
resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
}
+ /*
+ * If there is no current tail data, make sure there is
+ * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
+ * allotted page, and set up tail information such that there
+ * is RPC_MAX_AUTH_SIZE slack space available in both the
+ * head and tail.
+ */
if (resbuf->tail[0].iov_base == NULL) {
if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
return -ENOMEM;
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index f0c05d3311c1..7dcfe0cc3500 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -60,7 +60,7 @@ int bc_send(struct rpc_rqst *req)
rpc_put_task(task);
}
return ret;
- dprintk("RPC: bc_send ret= %d \n", ret);
+ dprintk("RPC: bc_send ret= %d\n", ret);
}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 39bddba53ba1..a3f340c8b79a 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -49,11 +49,17 @@ static void cache_init(struct cache_head *h)
h->last_refresh = now;
}
+static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
+{
+ return (h->expiry_time < get_seconds()) ||
+ (detail->flush_time > h->last_refresh);
+}
+
struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
struct cache_head *key, int hash)
{
struct cache_head **head, **hp;
- struct cache_head *new = NULL;
+ struct cache_head *new = NULL, *freeme = NULL;
head = &detail->hash_table[hash];
@@ -62,6 +68,9 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
struct cache_head *tmp = *hp;
if (detail->match(tmp, key)) {
+ if (cache_is_expired(detail, tmp))
+ /* This entry is expired, we will discard it. */
+ break;
cache_get(tmp);
read_unlock(&detail->hash_lock);
return tmp;
@@ -86,6 +95,13 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
struct cache_head *tmp = *hp;
if (detail->match(tmp, key)) {
+ if (cache_is_expired(detail, tmp)) {
+ *hp = tmp->next;
+ tmp->next = NULL;
+ detail->entries --;
+ freeme = tmp;
+ break;
+ }
cache_get(tmp);
write_unlock(&detail->hash_lock);
cache_put(new, detail);
@@ -98,6 +114,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
cache_get(new);
write_unlock(&detail->hash_lock);
+ if (freeme)
+ cache_put(freeme, detail);
return new;
}
EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
@@ -183,10 +201,7 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
{
- if (!test_bit(CACHE_VALID, &h->flags) ||
- h->expiry_time < get_seconds())
- return -EAGAIN;
- else if (detail->flush_time > h->last_refresh)
+ if (!test_bit(CACHE_VALID, &h->flags))
return -EAGAIN;
else {
/* entry is valid */
@@ -397,31 +412,27 @@ static int cache_clean(void)
/* Ok, now to clean this strand */
cp = & current_detail->hash_table[current_index];
- ch = *cp;
- for (; ch; cp= & ch->next, ch= *cp) {
+ for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
if (current_detail->nextcheck > ch->expiry_time)
current_detail->nextcheck = ch->expiry_time+1;
- if (ch->expiry_time >= get_seconds() &&
- ch->last_refresh >= current_detail->flush_time)
+ if (!cache_is_expired(current_detail, ch))
continue;
- if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
- cache_dequeue(current_detail, ch);
- if (atomic_read(&ch->ref.refcount) == 1)
- break;
- }
- if (ch) {
*cp = ch->next;
ch->next = NULL;
current_detail->entries--;
rv = 1;
+ break;
}
+
write_unlock(&current_detail->hash_lock);
d = current_detail;
if (!ch)
current_index ++;
spin_unlock(&cache_list_lock);
if (ch) {
+ if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
+ cache_dequeue(current_detail, ch);
cache_revisit_request(ch);
cache_put(ch, d);
}
@@ -1233,8 +1244,10 @@ static int content_open(struct inode *inode, struct file *file,
if (!cd || !try_module_get(cd->owner))
return -EACCES;
han = __seq_open_private(file, &cache_content_op, sizeof(*han));
- if (han == NULL)
+ if (han == NULL) {
+ module_put(cd->owner);
return -ENOMEM;
+ }
han->cd = cd;
return 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 19c9983d5360..8c7b5433022a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -556,26 +556,16 @@ static const struct rpc_call_ops rpc_default_ops = {
*/
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
{
- struct rpc_task *task, *ret;
+ struct rpc_task *task;
task = rpc_new_task(task_setup_data);
- if (task == NULL) {
- rpc_release_calldata(task_setup_data->callback_ops,
- task_setup_data->callback_data);
- ret = ERR_PTR(-ENOMEM);
+ if (IS_ERR(task))
goto out;
- }
- if (task->tk_status != 0) {
- ret = ERR_PTR(task->tk_status);
- rpc_put_task(task);
- goto out;
- }
atomic_inc(&task->tk_count);
rpc_execute(task);
- ret = task;
out:
- return ret;
+ return task;
}
EXPORT_SYMBOL_GPL(rpc_run_task);
@@ -657,9 +647,8 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
* Create an rpc_task to send the data
*/
task = rpc_new_task(&task_setup_data);
- if (!task) {
+ if (IS_ERR(task)) {
xprt_free_bc_request(req);
- task = ERR_PTR(-ENOMEM);
goto out;
}
task->tk_rqstp = req;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index aae6907fd546..c8979ce5d88a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -856,16 +856,23 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
if (task == NULL) {
task = rpc_alloc_task();
- if (task == NULL)
- goto out;
+ if (task == NULL) {
+ rpc_release_calldata(setup_data->callback_ops,
+ setup_data->callback_data);
+ return ERR_PTR(-ENOMEM);
+ }
flags = RPC_TASK_DYNAMIC;
}
rpc_init_task(task, setup_data);
+ if (task->tk_status < 0) {
+ int err = task->tk_status;
+ rpc_put_task(task);
+ return ERR_PTR(err);
+ }
task->tk_flags |= flags;
dprintk("RPC: allocated task %p\n", task);
-out:
return task;
}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 061b2e0f9118..cbc084939dd8 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -744,8 +744,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (rqstp->rq_deferred) {
svc_xprt_received(xprt);
len = svc_deferred_recv(rqstp);
- } else
+ } else {
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
+ svc_xprt_received(xprt);
+ }
dprintk("svc: got len=%d\n", len);
}
@@ -893,12 +895,12 @@ void svc_delete_xprt(struct svc_xprt *xprt)
*/
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
+ spin_unlock_bh(&serv->sv_lock);
while ((dr = svc_deferred_dequeue(xprt)) != NULL)
kfree(dr);
svc_xprt_put(xprt);
- spin_unlock_bh(&serv->sv_lock);
}
void svc_close_xprt(struct svc_xprt *xprt)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a29f259204e6..2fb12c2bec66 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -419,8 +419,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
}
/*
@@ -436,10 +436,10 @@ static void svc_write_space(struct sock *sk)
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
dprintk("RPC svc_write_space: someone sleeping on %p\n",
svsk);
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sk));
}
}
@@ -547,7 +547,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
dprintk("svc: recvfrom returned error %d\n", -err);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
}
- svc_xprt_received(&svsk->sk_xprt);
return -EAGAIN;
}
len = svc_addr_len(svc_addr(rqstp));
@@ -562,11 +561,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
svsk->sk_sk->sk_stamp = skb->tstamp;
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
- /*
- * Maybe more packets - kick another thread ASAP.
- */
- svc_xprt_received(&svsk->sk_xprt);
-
len = skb->len - sizeof(struct udphdr);
rqstp->rq_arg.len = len;
@@ -757,8 +751,8 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
printk("svc: socket %p: no user data\n", sk);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_all(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible_all(sk_sleep(sk));
}
/*
@@ -777,8 +771,8 @@ static void svc_tcp_state_change(struct sock *sk)
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_all(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible_all(sk_sleep(sk));
}
static void svc_tcp_data_ready(struct sock *sk, int count)
@@ -791,8 +785,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
}
/*
@@ -917,7 +911,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (len < want) {
dprintk("svc: short recvfrom while reading record "
"length (%d of %d)\n", len, want);
- svc_xprt_received(&svsk->sk_xprt);
goto err_again; /* record header not complete */
}
@@ -953,7 +946,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (len < svsk->sk_reclen) {
dprintk("svc: incomplete TCP record (%d of %d)\n",
len, svsk->sk_reclen);
- svc_xprt_received(&svsk->sk_xprt);
goto err_again; /* record not complete */
}
len = svsk->sk_reclen;
@@ -961,14 +953,11 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
return len;
error:
- if (len == -EAGAIN) {
+ if (len == -EAGAIN)
dprintk("RPC: TCP recv_record got EAGAIN\n");
- svc_xprt_received(&svsk->sk_xprt);
- }
return len;
err_delete:
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- svc_xprt_received(&svsk->sk_xprt);
err_again:
return -EAGAIN;
}
@@ -1110,7 +1099,6 @@ out:
svsk->sk_tcplen = 0;
svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
- svc_xprt_received(&svsk->sk_xprt);
if (serv->sv_stats)
serv->sv_stats->nettcpcnt++;
@@ -1119,7 +1107,6 @@ out:
err_again:
if (len == -EAGAIN) {
dprintk("RPC: TCP recvfrom got EAGAIN\n");
- svc_xprt_received(&svsk->sk_xprt);
return len;
}
error:
@@ -1494,8 +1481,8 @@ static void svc_sock_detach(struct svc_xprt *xprt)
sk->sk_data_ready = svsk->sk_odata;
sk->sk_write_space = svsk->sk_owspace;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
}
/*
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 2763fde88499..a1f82a87d34d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -762,6 +762,7 @@ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, un
__write_bytes_to_xdr_buf(&subbuf, obj, len);
return 0;
}
+EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
int
xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 42f09ade0044..9feccefdac34 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -62,7 +62,6 @@
* Local functions
*/
static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
-static inline void do_xprt_reserve(struct rpc_task *);
static void xprt_connect_status(struct rpc_task *task);
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
@@ -711,12 +710,16 @@ void xprt_connect(struct rpc_task *task)
if (task->tk_rqstp)
task->tk_rqstp->rq_bytes_sent = 0;
- task->tk_timeout = xprt->connect_timeout;
+ task->tk_timeout = task->tk_rqstp->rq_timeout;
rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
+
+ if (test_bit(XPRT_CLOSING, &xprt->state))
+ return;
+ if (xprt_test_and_set_connecting(xprt))
+ return;
xprt->stat.connect_start = jiffies;
xprt->ops->connect(task);
}
- return;
}
static void xprt_connect_status(struct rpc_task *task)
@@ -935,7 +938,7 @@ void xprt_transmit(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
}
-static inline void do_xprt_reserve(struct rpc_task *task)
+static void xprt_alloc_slot(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
@@ -955,6 +958,16 @@ static inline void do_xprt_reserve(struct rpc_task *task)
rpc_sleep_on(&xprt->backlog, task, NULL);
}
+static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+{
+ memset(req, 0, sizeof(*req)); /* mark unused */
+
+ spin_lock(&xprt->reserve_lock);
+ list_add(&req->rq_list, &xprt->free);
+ rpc_wake_up_next(&xprt->backlog);
+ spin_unlock(&xprt->reserve_lock);
+}
+
/**
* xprt_reserve - allocate an RPC request slot
* @task: RPC task requesting a slot allocation
@@ -968,13 +981,13 @@ void xprt_reserve(struct rpc_task *task)
task->tk_status = -EIO;
spin_lock(&xprt->reserve_lock);
- do_xprt_reserve(task);
+ xprt_alloc_slot(task);
spin_unlock(&xprt->reserve_lock);
}
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
{
- return xprt->xid++;
+ return (__force __be32)xprt->xid++;
}
static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1006,14 +1019,10 @@ void xprt_release(struct rpc_task *task)
{
struct rpc_xprt *xprt;
struct rpc_rqst *req;
- int is_bc_request;
if (!(req = task->tk_rqstp))
return;
- /* Preallocated backchannel request? */
- is_bc_request = bc_prealloc(req);
-
xprt = req->rq_xprt;
rpc_count_iostats(task);
spin_lock_bh(&xprt->transport_lock);
@@ -1027,21 +1036,16 @@ void xprt_release(struct rpc_task *task)
mod_timer(&xprt->timer,
xprt->last_used + xprt->idle_timeout);
spin_unlock_bh(&xprt->transport_lock);
- if (!bc_prealloc(req))
+ if (req->rq_buffer)
xprt->ops->buf_free(req->rq_buffer);
task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
- if (likely(!is_bc_request)) {
- memset(req, 0, sizeof(*req)); /* mark unused */
-
- spin_lock(&xprt->reserve_lock);
- list_add(&req->rq_list, &xprt->free);
- rpc_wake_up_next(&xprt->backlog);
- spin_unlock(&xprt->reserve_lock);
- } else
+ if (likely(!bc_prealloc(req)))
+ xprt_free_slot(xprt, req);
+ else
xprt_free_bc_request(req);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index f92e37eb413c..0194de814933 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -566,7 +566,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
rqstp->rq_arg.head[0].iov_len);
- svc_xprt_received(rqstp->rq_xprt);
return ret;
}
@@ -665,7 +664,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_arg.head[0].iov_len);
rqstp->rq_prot = IPPROTO_MAX;
svc_xprt_copy_addrs(rqstp, xprt);
- svc_xprt_received(xprt);
return ret;
close_out:
@@ -678,6 +676,5 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
*/
set_bit(XPT_CLOSE, &xprt->xpt_flags);
defer:
- svc_xprt_received(xprt);
return 0;
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 187257b1d880..3f3b38c5642f 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -305,7 +305,6 @@ xprt_setup_rdma(struct xprt_create *args)
/* 60 second timeout, no retries */
xprt->timeout = &xprt_rdma_default_timeout;
xprt->bind_timeout = (60U * HZ);
- xprt->connect_timeout = (60U * HZ);
xprt->reestablish_timeout = (5U * HZ);
xprt->idle_timeout = (5U * 60 * HZ);
@@ -449,21 +448,19 @@ xprt_rdma_connect(struct rpc_task *task)
struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- if (!xprt_test_and_set_connecting(xprt)) {
- if (r_xprt->rx_ep.rep_connected != 0) {
- /* Reconnect */
- schedule_delayed_work(&r_xprt->rdma_connect,
- xprt->reestablish_timeout);
- xprt->reestablish_timeout <<= 1;
- if (xprt->reestablish_timeout > (30 * HZ))
- xprt->reestablish_timeout = (30 * HZ);
- else if (xprt->reestablish_timeout < (5 * HZ))
- xprt->reestablish_timeout = (5 * HZ);
- } else {
- schedule_delayed_work(&r_xprt->rdma_connect, 0);
- if (!RPC_IS_ASYNC(task))
- flush_scheduled_work();
- }
+ if (r_xprt->rx_ep.rep_connected != 0) {
+ /* Reconnect */
+ schedule_delayed_work(&r_xprt->rdma_connect,
+ xprt->reestablish_timeout);
+ xprt->reestablish_timeout <<= 1;
+ if (xprt->reestablish_timeout > (30 * HZ))
+ xprt->reestablish_timeout = (30 * HZ);
+ else if (xprt->reestablish_timeout < (5 * HZ))
+ xprt->reestablish_timeout = (5 * HZ);
+ } else {
+ schedule_delayed_work(&r_xprt->rdma_connect, 0);
+ if (!RPC_IS_ASYNC(task))
+ flush_scheduled_work();
}
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9847c30b5001..b9867d3dfc5b 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -138,20 +138,6 @@ static ctl_table sunrpc_table[] = {
#endif
/*
- * Time out for an RPC UDP socket connect. UDP socket connects are
- * synchronous, but we set a timeout anyway in case of resource
- * exhaustion on the local host.
- */
-#define XS_UDP_CONN_TO (5U * HZ)
-
-/*
- * Wait duration for an RPC TCP connection to be established. Solaris
- * NFS over TCP uses 60 seconds, for example, which is in line with how
- * long a server takes to reboot.
- */
-#define XS_TCP_CONN_TO (60U * HZ)
-
-/*
* Wait duration for a reply from the RPC portmapper.
*/
#define XS_BIND_TO (60U * HZ)
@@ -2016,9 +2002,6 @@ static void xs_connect(struct rpc_task *task)
struct rpc_xprt *xprt = task->tk_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- if (xprt_test_and_set_connecting(xprt))
- return;
-
if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
dprintk("RPC: xs_connect delayed xprt %p for %lu "
"seconds\n",
@@ -2038,16 +2021,6 @@ static void xs_connect(struct rpc_task *task)
}
}
-static void xs_tcp_connect(struct rpc_task *task)
-{
- struct rpc_xprt *xprt = task->tk_xprt;
-
- /* Exit if we need to wait for socket shutdown to complete */
- if (test_bit(XPRT_CLOSING, &xprt->state))
- return;
- xs_connect(task);
-}
-
/**
* xs_udp_print_stats - display UDP socket-specifc stats
* @xprt: rpc_xprt struct containing statistics
@@ -2246,7 +2219,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
.release_xprt = xs_tcp_release_xprt,
.rpcbind = rpcb_getport_async,
.set_port = xs_set_port,
- .connect = xs_tcp_connect,
+ .connect = xs_connect,
.buf_alloc = rpc_malloc,
.buf_free = rpc_free,
.send_request = xs_tcp_send_request,
@@ -2337,7 +2310,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
xprt->bind_timeout = XS_BIND_TO;
- xprt->connect_timeout = XS_UDP_CONN_TO;
xprt->reestablish_timeout = XS_UDP_REEST_TO;
xprt->idle_timeout = XS_IDLE_DISC_TO;
@@ -2412,7 +2384,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->bind_timeout = XS_BIND_TO;
- xprt->connect_timeout = XS_TCP_CONN_TO;
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
xprt->idle_timeout = XS_IDLE_DISC_TO;
@@ -2488,7 +2459,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
/* backchannel */
xprt_set_bound(xprt);
xprt->bind_timeout = 0;
- xprt->connect_timeout = 0;
xprt->reestablish_timeout = 0;
xprt->idle_timeout = 0;
@@ -2602,7 +2572,8 @@ void cleanup_socket_xprt(void)
xprt_unregister_transport(&xs_bc_tcp_transport);
}
-static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
+static int param_set_uint_minmax(const char *val,
+ const struct kernel_param *kp,
unsigned int min, unsigned int max)
{
unsigned long num;
@@ -2617,34 +2588,37 @@ static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
return 0;
}
-static int param_set_portnr(const char *val, struct kernel_param *kp)
+static int param_set_portnr(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp,
RPC_MIN_RESVPORT,
RPC_MAX_RESVPORT);
}
-static int param_get_portnr(char *buffer, struct kernel_param *kp)
-{
- return param_get_uint(buffer, kp);
-}
+static struct kernel_param_ops param_ops_portnr = {
+ .set = param_set_portnr,
+ .get = param_get_uint,
+};
+
#define param_check_portnr(name, p) \
__param_check(name, p, unsigned int);
module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
-static int param_set_slot_table_size(const char *val, struct kernel_param *kp)
+static int param_set_slot_table_size(const char *val,
+ const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp,
RPC_MIN_SLOT_TABLE,
RPC_MAX_SLOT_TABLE);
}
-static int param_get_slot_table_size(char *buffer, struct kernel_param *kp)
-{
- return param_get_uint(buffer, kp);
-}
+static struct kernel_param_ops param_ops_slot_table_size = {
+ .set = param_set_slot_table_size,
+ .get = param_get_uint,
+};
+
#define param_check_slot_table_size(name, p) \
__param_check(name, p, unsigned int);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a3bfd4064912..90a051912c03 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -558,10 +558,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
struct tipc_bearer *unused1,
struct tipc_media_addr *unused2)
{
- static int send_count = 0;
-
int bp_index;
- int swap_time;
/* Prepare buffer for broadcasting (if first time trying to send it) */
@@ -575,11 +572,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
msg_set_mc_netid(msg, tipc_net_id);
}
- /* Determine if bearer pairs should be swapped following this attempt */
-
- if ((swap_time = (++send_count >= 10)))
- send_count = 0;
-
/* Send buffer over bearers until all targets reached */
bcbearer->remains = tipc_cltr_bcast_nodes;
@@ -595,21 +587,22 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
if (bcbearer->remains_new.count == bcbearer->remains.count)
continue; /* bearer pair doesn't add anything */
- if (!p->publ.blocked &&
- !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
- if (swap_time && s && !s->publ.blocked)
- goto swap;
- else
- goto update;
+ if (p->publ.blocked ||
+ p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+ /* unable to send on primary bearer */
+ if (!s || s->publ.blocked ||
+ s->media->send_msg(buf, &s->publ,
+ &s->media->bcast_addr)) {
+ /* unable to send on either bearer */
+ continue;
+ }
+ }
+
+ if (s) {
+ bcbearer->bpairs[bp_index].primary = s;
+ bcbearer->bpairs[bp_index].secondary = p;
}
- if (!s || s->publ.blocked ||
- s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
- continue; /* unable to send using bearer pair */
-swap:
- bcbearer->bpairs[bp_index].primary = s;
- bcbearer->bpairs[bp_index].secondary = p;
-update:
if (bcbearer->remains_new.count == 0)
return 0;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 52c571fedbe0..4e84c8431f32 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -49,7 +49,7 @@
#include "config.h"
-#define TIPC_MOD_VER "1.6.4"
+#define TIPC_MOD_VER "2.0.0"
#ifndef CONFIG_TIPC_ZONES
#define CONFIG_TIPC_ZONES 3
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1a7e4665af80..c76e82e5f982 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -877,7 +877,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
case TIMEOUT_EVT:
dbg_link("TIM ");
if (l_ptr->next_in_no != l_ptr->checkpoint) {
- dbg_link("-> WW \n");
+ dbg_link("-> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
l_ptr->checkpoint = l_ptr->next_in_no;
@@ -934,7 +934,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- dbg_link("RES \n");
+ dbg_link("RES\n");
dbg_link(" -> RR\n");
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
l_ptr->started = 1;
/* fall through */
case TIMEOUT_EVT:
- dbg_link("TIM \n");
+ dbg_link("TIM\n");
tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
@@ -1553,7 +1553,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
/* Continue retransmission now, if there is anything: */
- if (r_q_size && buf && !skb_cloned(buf)) {
+ if (r_q_size && buf) {
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
@@ -1722,15 +1722,16 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
- if (!skb_cloned(buf)) {
+ if (l_ptr->retransm_queue_size == 0) {
msg_dbg(msg, ">NO_RETR->BCONG>");
dbg_print_link(l_ptr, " ");
l_ptr->retransm_queue_head = msg_seqno(msg);
l_ptr->retransm_queue_size = retransmits;
- return;
} else {
- /* Don't retransmit if driver already has the buffer */
+ err("Unexpected retransmit on link %s (qsize=%d)\n",
+ l_ptr->name, l_ptr->retransm_queue_size);
}
+ return;
} else {
/* Detect repeated retransmit failures on uncongested bearer */
@@ -1745,7 +1746,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
}
}
- while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
+ while (retransmits && (buf != l_ptr->next_out) && buf) {
msg = buf_msg(buf);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
@@ -3294,7 +3295,7 @@ static void link_dump_rec_queue(struct link *l_ptr)
info("buffer %x invalid\n", crs);
return;
}
- msg_dbg(buf_msg(crs), "In rec queue: \n");
+ msg_dbg(buf_msg(crs), "In rec queue:\n");
crs = crs->next;
}
}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f25b1cdb64eb..d7cd1e064a80 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,7 @@
*/
DEFINE_RWLOCK(tipc_net_lock);
-struct _zone *tipc_zones[256] = { NULL, };
+static struct _zone *tipc_zones[256] = { NULL, };
struct network tipc_net = { tipc_zones };
struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
@@ -291,6 +291,6 @@ void tipc_net_stop(void)
tipc_bclink_stop();
net_stop();
write_unlock_bh(&tipc_net_lock);
- info("Left network mode \n");
+ info("Left network mode\n");
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2c24e7d6d950..17cc394f424f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -278,7 +278,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
n_ptr->link_cnt++;
return n_ptr;
}
- err("Attempt to establish second link on <%s> to %s \n",
+ err("Attempt to establish second link on <%s> to %s\n",
l_ptr->b_ptr->publ.name,
addr_string_fill(addr_string, l_ptr->addr));
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index cfb20b80b3a1..66e889ba48fd 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -446,7 +446,7 @@ static unsigned int poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
u32 mask;
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
if (!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state == SS_UNCONNECTED) ||
@@ -591,7 +591,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
break;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
!tport->congested);
lock_sock(sk);
if (res)
@@ -650,7 +650,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
break;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
(!tport->congested || !tport->connected));
lock_sock(sk);
if (res)
@@ -931,7 +931,7 @@ restart:
goto exit;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state == SS_DISCONNECTING)));
lock_sock(sk);
@@ -1064,7 +1064,7 @@ restart:
goto exit;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state == SS_DISCONNECTING)));
lock_sock(sk);
@@ -1271,8 +1271,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
tipc_disconnect_port(tipc_sk_port(sk));
}
- if (waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
return TIPC_OK;
}
@@ -1343,8 +1343,8 @@ static void wakeupdispatch(struct tipc_port *tport)
{
struct sock *sk = (struct sock *)tport->usr_handle;
- if (waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
}
/**
@@ -1426,7 +1426,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
release_sock(sk);
- res = wait_event_interruptible_timeout(*sk->sk_sleep,
+ res = wait_event_interruptible_timeout(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state != SS_CONNECTING)),
sk->sk_rcvtimeo);
@@ -1521,7 +1521,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
goto exit;
}
release_sock(sk);
- res = wait_event_interruptible(*sk->sk_sleep,
+ res = wait_event_interruptible(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue)));
lock_sock(sk);
if (res)
@@ -1632,8 +1632,8 @@ restart:
/* Discard any unreceived messages; wake up sleeping tasks */
discard_rx_queue(sk);
- if (waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
res = 0;
break;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ff123e56114a..ab6eab4c45e2 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -274,7 +274,7 @@ static void subscr_cancel(struct tipc_subscr *s,
{
struct subscription *sub;
struct subscription *sub_temp;
- __u32 type, lower, upper;
+ __u32 type, lower, upper, timeout, filter;
int found = 0;
/* Find first matching subscription, exit if not found */
@@ -282,12 +282,18 @@ static void subscr_cancel(struct tipc_subscr *s,
type = ntohl(s->seq.type);
lower = ntohl(s->seq.lower);
upper = ntohl(s->seq.upper);
+ timeout = ntohl(s->timeout);
+ filter = ntohl(s->filter) & ~TIPC_SUB_CANCEL;
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if ((type == sub->seq.type) &&
(lower == sub->seq.lower) &&
- (upper == sub->seq.upper)) {
+ (upper == sub->seq.upper) &&
+ (timeout == sub->timeout) &&
+ (filter == sub->filter) &&
+ !memcmp(s->usr_handle,sub->evt.s.usr_handle,
+ sizeof(s->usr_handle)) ){
found = 1;
break;
}
@@ -304,7 +310,7 @@ static void subscr_cancel(struct tipc_subscr *s,
k_term_timer(&sub->timer);
spin_lock_bh(subscriber->lock);
}
- dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n",
+ dbg("Cancel: removing sub %u,%u,%u from subscriber %p list\n",
sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
subscr_del(sub);
}
@@ -352,8 +358,7 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
sub->seq.upper = ntohl(s->seq.upper);
sub->timeout = ntohl(s->timeout);
sub->filter = ntohl(s->filter);
- if ((!(sub->filter & TIPC_SUB_PORTS) ==
- !(sub->filter & TIPC_SUB_SERVICE)) ||
+ if ((sub->filter && (sub->filter != TIPC_SUB_PORTS)) ||
(sub->seq.lower > sub->seq.upper)) {
warn("Subscription rejected, illegal request\n");
kfree(sub);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3d9122e78f41..fef2cc5e9d2b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -313,13 +313,16 @@ static inline int unix_writable(struct sock *sk)
static void unix_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (unix_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -406,9 +409,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
skpair->sk_err = ECONNRESET;
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
- read_lock(&skpair->sk_callback_lock);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
- read_unlock(&skpair->sk_callback_lock);
}
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
@@ -1142,7 +1143,7 @@ restart:
newsk->sk_peercred.pid = task_tgid_vnr(current);
current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
newu = unix_sk(newsk);
- newsk->sk_sleep = &newu->peer_wait;
+ newsk->sk_wq = &newu->peer_wq;
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
@@ -1736,7 +1737,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
unix_state_lock(sk);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (!skb_queue_empty(&sk->sk_receive_queue) ||
sk->sk_err ||
@@ -1752,7 +1753,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
unix_state_unlock(sk);
return timeo;
}
@@ -1931,12 +1932,10 @@ static int unix_shutdown(struct socket *sock, int mode)
other->sk_shutdown |= peer_mode;
unix_state_unlock(other);
other->sk_state_change(other);
- read_lock(&other->sk_callback_lock);
if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock(&other->sk_callback_lock);
}
if (other)
sock_put(other);
@@ -1991,7 +1990,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
struct sock *sk = sock->sk;
unsigned int mask;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
@@ -2028,7 +2027,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk, *other;
unsigned int mask, writable;
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 14c22c3768da..c8df6fda0b1f 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -153,15 +153,6 @@ void unix_notinflight(struct file *fp)
}
}
-static inline struct sk_buff *sock_queue_head(struct sock *sk)
-{
- return (struct sk_buff *)&sk->sk_receive_queue;
-}
-
-#define receive_queue_for_each_skb(sk, next, skb) \
- for (skb = sock_queue_head(sk)->next, next = skb->next; \
- skb != sock_queue_head(sk); skb = next, next = skb->next)
-
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
@@ -169,7 +160,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
- receive_queue_for_each_skb(x, next, skb) {
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/*
* Do we have file descriptors ?
*/
@@ -225,7 +216,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
- receive_queue_for_each_skb(x, next, skb) {
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/*
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 4dc82a54ba30..68bedf3e5443 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -110,7 +110,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
{
int result, ifindex;
struct wimax_dev *wimax_dev;
- struct device *dev;
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
@@ -123,7 +122,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
if (wimax_dev == NULL)
goto error_no_wimax_dev;
- dev = wimax_dev_to_dev(wimax_dev);
/* Execute the operation and send the result back to user space */
result = wimax_reset(wimax_dev);
dev_put(wimax_dev->net_dev);
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index e978c7136c97..2609e445fe7d 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -43,7 +43,7 @@
* wimax_rfkill() Kernel calling wimax_rfkill()
* __wimax_rf_toggle_radio()
*
- * wimax_rfkill_set_radio_block() RF-Kill subsytem calling
+ * wimax_rfkill_set_radio_block() RF-Kill subsystem calling
* __wimax_rf_toggle_radio()
*
* __wimax_rf_toggle_radio()
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index 11ad3356eb56..aff8776e2d41 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -53,7 +53,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
{
int result, ifindex;
struct wimax_dev *wimax_dev;
- struct device *dev;
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
@@ -66,7 +65,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
if (wimax_dev == NULL)
goto error_no_wimax_dev;
- dev = wimax_dev_to_dev(wimax_dev);
/* Execute the operation and send the result back to user space */
result = wimax_state_get(wimax_dev);
dev_put(wimax_dev->net_dev);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index bf1737fc9a7e..d92d088026bf 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -10,38 +10,6 @@
#include "core.h"
struct ieee80211_channel *
-rdev_fixed_channel(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev)
-{
- struct wireless_dev *wdev;
- struct ieee80211_channel *result = NULL;
-
- WARN_ON(!mutex_is_locked(&rdev->devlist_mtx));
-
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
- if (wdev == for_wdev)
- continue;
-
- /*
- * Lock manually to tell lockdep about allowed
- * nesting here if for_wdev->mtx is held already.
- * This is ok as it's all under the rdev devlist
- * mutex and as such can only be done once at any
- * given time.
- */
- mutex_lock_nested(&wdev->mtx, SINGLE_DEPTH_NESTING);
- if (wdev->current_bss)
- result = wdev->current_bss->pub.channel;
- wdev_unlock(wdev);
-
- if (result)
- break;
- }
-
- return result;
-}
-
-struct ieee80211_channel *
rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
int freq, enum nl80211_channel_type channel_type)
{
@@ -75,15 +43,22 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
return chan;
}
-int rdev_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev,
- int freq, enum nl80211_channel_type channel_type)
+int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, int freq,
+ enum nl80211_channel_type channel_type)
{
struct ieee80211_channel *chan;
int result;
- if (rdev_fixed_channel(rdev, for_wdev))
- return -EBUSY;
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR)
+ wdev = NULL;
+
+ if (wdev) {
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!netif_running(wdev->netdev))
+ return -ENETDOWN;
+ }
if (!rdev->ops->set_channel)
return -EOPNOTSUPP;
@@ -92,11 +67,14 @@ int rdev_set_freq(struct cfg80211_registered_device *rdev,
if (!chan)
return -EINVAL;
- result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
+ result = rdev->ops->set_channel(&rdev->wiphy,
+ wdev ? wdev->netdev : NULL,
+ chan, channel_type);
if (result)
return result;
- rdev->channel = chan;
+ if (wdev)
+ wdev->channel = chan;
return 0;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6ac70c101523..37d0e0ab4432 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -705,7 +705,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
wdev->ps = true;
else
wdev->ps = false;
- wdev->ps_timeout = 100;
+ /* allow mac80211 to determine the timeout */
+ wdev->ps_timeout = -1;
if (rdev->ops->set_power_mgmt)
if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
wdev->ps,
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d52da913145a..ae930acf75e9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -70,9 +70,6 @@ struct cfg80211_registered_device {
struct work_struct conn_work;
struct work_struct event_work;
- /* current channel */
- struct ieee80211_channel *channel;
-
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -293,13 +290,15 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx);
+ const u8 *key, int key_len, int key_idx,
+ bool local_state_change);
int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev, struct ieee80211_channel *chan,
enum nl80211_auth_type auth_type, const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx);
+ const u8 *key, int key_len, int key_idx,
+ bool local_state_change);
int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct ieee80211_channel *chan,
@@ -315,13 +314,16 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct cfg80211_crypto_settings *crypt);
int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason);
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change);
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason);
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change);
int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason);
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change);
void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
struct net_device *dev);
void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -383,14 +385,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
struct ieee80211_channel *
-rdev_fixed_channel(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev);
-struct ieee80211_channel *
rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
int freq, enum nl80211_channel_type channel_type);
-int rdev_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev,
- int freq, enum nl80211_channel_type channel_type);
+int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, int freq,
+ enum nl80211_channel_type channel_type);
u16 cfg80211_calculate_bitrate(struct rate_info *rate);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 6a5acf750174..adcabba02e20 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -81,15 +81,10 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
struct cfg80211_cached_keys *connkeys)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct ieee80211_channel *chan;
int err;
ASSERT_WDEV_LOCK(wdev);
- chan = rdev_fixed_channel(rdev, wdev);
- if (chan && chan != params->channel)
- return -EBUSY;
-
if (wdev->ssid_len)
return -EALREADY;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 22139fa46115..48ead6f0426d 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -378,7 +378,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx)
+ const u8 *key, int key_len, int key_idx,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_auth_request req;
@@ -408,6 +409,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
memset(&req, 0, sizeof(req));
+ req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
req.auth_type = auth_type;
@@ -434,12 +436,18 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
goto out;
}
- wdev->authtry_bsses[slot] = bss;
+ if (local_state_change)
+ wdev->auth_bsses[slot] = bss;
+ else
+ wdev->authtry_bsses[slot] = bss;
cfg80211_hold_bss(bss);
err = rdev->ops->auth(&rdev->wiphy, dev, &req);
if (err) {
- wdev->authtry_bsses[slot] = NULL;
+ if (local_state_change)
+ wdev->auth_bsses[slot] = NULL;
+ else
+ wdev->authtry_bsses[slot] = NULL;
cfg80211_unhold_bss(bss);
}
@@ -454,14 +462,15 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
enum nl80211_auth_type auth_type, const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx)
+ const u8 *key, int key_len, int key_idx,
+ bool local_state_change)
{
int err;
wdev_lock(dev->ieee80211_ptr);
err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
- key, key_len, key_idx);
+ key, key_len, key_idx, local_state_change);
wdev_unlock(dev->ieee80211_ptr);
return err;
@@ -555,7 +564,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason)
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_deauth_request req;
@@ -565,6 +575,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
memset(&req, 0, sizeof(req));
req.reason_code = reason;
+ req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
if (wdev->current_bss &&
@@ -591,13 +602,15 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason)
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
wdev_lock(wdev);
- err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason);
+ err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason,
+ local_state_change);
wdev_unlock(wdev);
return err;
@@ -605,7 +618,8 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason)
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_disassoc_request req;
@@ -620,6 +634,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
memset(&req, 0, sizeof(req));
req.reason_code = reason;
+ req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0)
@@ -632,13 +647,15 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
- const u8 *ie, int ie_len, u16 reason)
+ const u8 *ie, int ie_len, u16 reason,
+ bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
wdev_lock(wdev);
- err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason);
+ err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason,
+ local_state_change);
wdev_unlock(wdev);
return err;
@@ -895,3 +912,16 @@ void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
}
EXPORT_SYMBOL(cfg80211_action_tx_status);
+
+void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ /* Indicate roaming trigger event to user space */
+ nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
+}
+EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 030cf153bea2..aaa1aad566cd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -150,6 +150,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
[NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
+ [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
+ [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
};
/* policy for the attributes */
@@ -586,6 +589,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
i++;
NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
}
+ CMD(set_channel, SET_CHANNEL);
#undef CMD
@@ -686,10 +690,90 @@ static int parse_txq_params(struct nlattr *tb[],
return 0;
}
+static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
+{
+ /*
+ * You can only set the channel explicitly for AP, mesh
+ * and WDS type interfaces; all others have their channel
+ * managed via their respective "establish a connection"
+ * command (connect, join, ...)
+ *
+ * Monitors are special as they are normally slaved to
+ * whatever else is going on, so they behave as though
+ * you tried setting the wiphy channel itself.
+ */
+ return !wdev ||
+ wdev->iftype == NL80211_IFTYPE_AP ||
+ wdev->iftype == NL80211_IFTYPE_WDS ||
+ wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
+ wdev->iftype == NL80211_IFTYPE_MONITOR;
+}
+
+static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct genl_info *info)
+{
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+ u32 freq;
+ int result;
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
+ return -EINVAL;
+
+ if (!nl80211_can_set_dev_channel(wdev))
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+ channel_type = nla_get_u32(info->attrs[
+ NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (channel_type != NL80211_CHAN_NO_HT &&
+ channel_type != NL80211_CHAN_HT20 &&
+ channel_type != NL80211_CHAN_HT40PLUS &&
+ channel_type != NL80211_CHAN_HT40MINUS)
+ return -EINVAL;
+ }
+
+ freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+
+ mutex_lock(&rdev->devlist_mtx);
+ if (wdev) {
+ wdev_lock(wdev);
+ result = cfg80211_set_freq(rdev, wdev, freq, channel_type);
+ wdev_unlock(wdev);
+ } else {
+ result = cfg80211_set_freq(rdev, NULL, freq, channel_type);
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return result;
+}
+
+static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *netdev;
+ int result;
+
+ rtnl_lock();
+
+ result = get_rdev_dev_by_info_ifindex(info, &rdev, &netdev);
+ if (result)
+ goto unlock;
+
+ result = __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info);
+
+ unlock:
+ rtnl_unlock();
+
+ return result;
+}
+
static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
- int result = 0, rem_txq_params = 0;
+ struct net_device *netdev = NULL;
+ struct wireless_dev *wdev;
+ int result, rem_txq_params = 0;
struct nlattr *nl_txq_params;
u32 changed;
u8 retry_short = 0, retry_long = 0;
@@ -698,16 +782,50 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
+ /*
+ * Try to find the wiphy and netdev. Normally this
+ * function shouldn't need the netdev, but this is
+ * done for backward compatibility -- previously
+ * setting the channel was done per wiphy, but now
+ * it is per netdev. Previous userland like hostapd
+ * also passed a netdev to set_wiphy, so that it is
+ * possible to let that go to the right netdev!
+ */
mutex_lock(&cfg80211_mutex);
- rdev = __cfg80211_rdev_from_info(info);
- if (IS_ERR(rdev)) {
- mutex_unlock(&cfg80211_mutex);
- result = PTR_ERR(rdev);
- goto unlock;
+ if (info->attrs[NL80211_ATTR_IFINDEX]) {
+ int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
+
+ netdev = dev_get_by_index(genl_info_net(info), ifindex);
+ if (netdev && netdev->ieee80211_ptr) {
+ rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy);
+ mutex_lock(&rdev->mtx);
+ } else
+ netdev = NULL;
}
- mutex_lock(&rdev->mtx);
+ if (!netdev) {
+ rdev = __cfg80211_rdev_from_info(info);
+ if (IS_ERR(rdev)) {
+ mutex_unlock(&cfg80211_mutex);
+ result = PTR_ERR(rdev);
+ goto unlock;
+ }
+ wdev = NULL;
+ netdev = NULL;
+ result = 0;
+
+ mutex_lock(&rdev->mtx);
+ } else if (netif_running(netdev) &&
+ nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
+ wdev = netdev->ieee80211_ptr;
+ else
+ wdev = NULL;
+
+ /*
+ * end workaround code, by now the rdev is available
+ * and locked, and wdev may or may not be NULL.
+ */
if (info->attrs[NL80211_ATTR_WIPHY_NAME])
result = cfg80211_dev_rename(
@@ -746,26 +864,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
- enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
- u32 freq;
-
- result = -EINVAL;
-
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(info->attrs[
- NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
- goto bad_res;
- }
-
- freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
-
- mutex_lock(&rdev->devlist_mtx);
- result = rdev_set_freq(rdev, NULL, freq, channel_type);
- mutex_unlock(&rdev->devlist_mtx);
+ result = __nl80211_set_channel(rdev, wdev, info);
if (result)
goto bad_res;
}
@@ -862,6 +961,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
bad_res:
mutex_unlock(&rdev->mtx);
+ if (netdev)
+ dev_put(netdev);
unlock:
rtnl_unlock();
return result;
@@ -2096,7 +2197,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
goto out_rtnl;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
err = -EINVAL;
goto out;
}
@@ -2439,6 +2541,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.use_cts_prot = -1;
params.use_short_preamble = -1;
params.use_short_slot_time = -1;
+ params.ap_isolate = -1;
if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
params.use_cts_prot =
@@ -2455,6 +2558,8 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.basic_rates_len =
nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
}
+ if (info->attrs[NL80211_ATTR_AP_ISOLATE])
+ params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]);
rtnl_lock();
@@ -3392,6 +3497,7 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
int err, ssid_len, ie_len = 0;
enum nl80211_auth_type auth_type;
struct key_parse key;
+ bool local_state_change;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -3470,9 +3576,12 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
+
err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
- key.p.key, key.p.key_len, key.idx);
+ key.p.key, key.p.key_len, key.idx,
+ local_state_change);
out:
cfg80211_unlock_rdev(rdev);
@@ -3551,9 +3660,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
struct net_device *dev;
- struct wireless_dev *wdev;
struct cfg80211_crypto_settings crypto;
- struct ieee80211_channel *chan, *fixedchan;
+ struct ieee80211_channel *chan;
const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
int err, ssid_len, ie_len = 0;
bool use_mfp = false;
@@ -3596,16 +3704,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- mutex_lock(&rdev->devlist_mtx);
- wdev = dev->ieee80211_ptr;
- fixedchan = rdev_fixed_channel(rdev, wdev);
- if (fixedchan && chan != fixedchan) {
- err = -EBUSY;
- mutex_unlock(&rdev->devlist_mtx);
- goto out;
- }
- mutex_unlock(&rdev->devlist_mtx);
-
ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
@@ -3649,6 +3747,7 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
const u8 *ie = NULL, *bssid;
int err, ie_len = 0;
u16 reason_code;
+ bool local_state_change;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -3694,7 +3793,10 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
- err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code);
+ local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
+
+ err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code,
+ local_state_change);
out:
cfg80211_unlock_rdev(rdev);
@@ -3711,6 +3813,7 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
const u8 *ie = NULL, *bssid;
int err, ie_len = 0;
u16 reason_code;
+ bool local_state_change;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -3756,7 +3859,10 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
- err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code);
+ local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
+
+ err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code,
+ local_state_change);
out:
cfg80211_unlock_rdev(rdev);
@@ -4779,6 +4885,84 @@ unlock_rtnl:
return err;
}
+static struct nla_policy
+nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
+ [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
+};
+
+static int nl80211_set_cqm_rssi(struct genl_info *info,
+ s32 threshold, u32 hysteresis)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+ struct net_device *dev;
+ int err;
+
+ if (threshold > 0)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rdev;
+
+ wdev = dev->ieee80211_ptr;
+
+ if (!rdev->ops->set_cqm_rssi_config) {
+ err = -EOPNOTSUPP;
+ goto unlock_rdev;
+ }
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto unlock_rdev;
+ }
+
+ err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev,
+ threshold, hysteresis);
+
+unlock_rdev:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1];
+ struct nlattr *cqm;
+ int err;
+
+ cqm = info->attrs[NL80211_ATTR_CQM];
+ if (!cqm) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm,
+ nl80211_attr_cqm_policy);
+ if (err)
+ goto out;
+
+ if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
+ attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
+ s32 threshold;
+ u32 hysteresis;
+ threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
+ hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
+ err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
+ } else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
static struct genl_ops nl80211_ops[] = {
{
.cmd = NL80211_CMD_GET_WIPHY,
@@ -5083,6 +5267,18 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
/* can be retrieved by unprivileged users */
},
+ {
+ .cmd = NL80211_CMD_SET_CQM,
+ .doit = nl80211_set_cqm,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_SET_CHANNEL,
+ .doit = nl80211_set_channel,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -5833,6 +6029,52 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
+void
+nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp)
+{
+ struct sk_buff *msg;
+ struct nlattr *pinfoattr;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+
+ pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
+ if (!pinfoattr)
+ goto nla_put_failure;
+
+ NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+ rssi_event);
+
+ nla_nest_end(msg, pinfoattr);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
static int nl80211_netlink_notify(struct notifier_block * nb,
unsigned long state,
void *_notify)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 4ca511102c6c..2ad7fbc7d9f1 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -82,4 +82,10 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
const u8 *buf, size_t len, bool ack,
gfp_t gfp);
+void
+nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 422da20d1e5b..8f0d97dd3109 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2356,10 +2356,10 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
rdev->country_ie_alpha2[1]);
} else
printk(KERN_INFO "cfg80211: Current regulatory "
- "domain intersected: \n");
+ "domain intersected:\n");
} else
- printk(KERN_INFO "cfg80211: Current regulatory "
- "domain intersected: \n");
+ printk(KERN_INFO "cfg80211: Current regulatory "
+ "domain intersected:\n");
} else if (is_world_regdom(rd->alpha2))
printk(KERN_INFO "cfg80211: World regulatory "
"domain updated:\n");
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f4dfd5f5f2ea..72222f0074db 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -171,7 +171,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
params->ssid, params->ssid_len,
NULL, 0,
params->key, params->key_len,
- params->key_idx);
+ params->key_idx, false);
case CFG80211_CONN_ASSOCIATE_NEXT:
BUG_ON(!rdev->ops->assoc);
wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -186,12 +186,13 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
if (err)
__cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING);
+ WLAN_REASON_DEAUTH_LEAVING,
+ false);
return err;
case CFG80211_CONN_DEAUTH_ASSOC_FAIL:
__cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING);
+ WLAN_REASON_DEAUTH_LEAVING, false);
/* return an error so that we call __cfg80211_connect_result() */
return -EINVAL;
default:
@@ -517,12 +518,16 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
ev->type = EVENT_CONNECT_RESULT;
if (bssid)
memcpy(ev->cr.bssid, bssid, ETH_ALEN);
- ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
- ev->cr.req_ie_len = req_ie_len;
- memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
- ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
- ev->cr.resp_ie_len = resp_ie_len;
- memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+ if (req_ie_len) {
+ ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
+ ev->cr.req_ie_len = req_ie_len;
+ memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
+ }
+ if (resp_ie_len) {
+ ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
+ ev->cr.resp_ie_len = resp_ie_len;
+ memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+ }
ev->cr.status = status;
spin_lock_irqsave(&wdev->event_lock, flags);
@@ -676,7 +681,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
continue;
bssid = wdev->auth_bsses[i]->pub.bssid;
ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING);
+ WLAN_REASON_DEAUTH_LEAVING,
+ false);
WARN(ret, "deauth failed: %d\n", ret);
}
}
@@ -735,7 +741,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
const u8 *prev_bssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct ieee80211_channel *chan;
struct cfg80211_bss *bss = NULL;
int err;
@@ -744,10 +749,6 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
if (wdev->sme_state != CFG80211_SME_IDLE)
return -EALREADY;
- chan = rdev_fixed_channel(rdev, wdev);
- if (chan && chan != connect->channel)
- return -EBUSY;
-
if (WARN_ON(wdev->connect_keys)) {
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
@@ -935,7 +936,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
/* wdev->conn->params.bssid must be set if > SCANNING */
err = __cfg80211_mlme_deauth(rdev, dev,
wdev->conn->params.bssid,
- NULL, 0, reason);
+ NULL, 0, reason, false);
if (err)
return err;
} else {
@@ -991,7 +992,8 @@ void cfg80211_sme_disassoc(struct net_device *dev, int idx)
memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN);
if (__cfg80211_mlme_deauth(rdev, dev, bssid,
- NULL, 0, WLAN_REASON_DEAUTH_LEAVING)) {
+ NULL, 0, WLAN_REASON_DEAUTH_LEAVING,
+ false)) {
/* whatever -- assume gone anyway */
cfg80211_unhold_bss(wdev->auth_bsses[idx]);
cfg80211_put_bss(&wdev->auth_bsses[idx]->pub);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d3574a4eb3ba..3416373a9c0c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -331,11 +331,18 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
if (iftype == NL80211_IFTYPE_MESH_POINT) {
struct ieee80211s_hdr *meshdr =
(struct ieee80211s_hdr *) (skb->data + hdrlen);
- hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+ /* make sure meshdr->flags is on the linear part */
+ if (!pskb_may_pull(skb, hdrlen + 1))
+ return -1;
if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
- memcpy(dst, meshdr->eaddr1, ETH_ALEN);
- memcpy(src, meshdr->eaddr2, ETH_ALEN);
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+ dst, ETH_ALEN);
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr2),
+ src, ETH_ALEN);
}
+ hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
}
break;
case cpu_to_le16(IEEE80211_FCTL_FROMDS):
@@ -347,9 +354,14 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
if (iftype == NL80211_IFTYPE_MESH_POINT) {
struct ieee80211s_hdr *meshdr =
(struct ieee80211s_hdr *) (skb->data + hdrlen);
- hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+ /* make sure meshdr->flags is on the linear part */
+ if (!pskb_may_pull(skb, hdrlen + 1))
+ return -1;
if (meshdr->flags & MESH_FLAGS_AE_A4)
- memcpy(src, meshdr->eaddr1, ETH_ALEN);
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+ src, ETH_ALEN);
+ hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
}
break;
case cpu_to_le16(0):
@@ -358,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
break;
}
- if (unlikely(skb->len - hdrlen < 8))
+ if (!pskb_may_pull(skb, hdrlen + 8))
return -1;
payload = skb->data + hdrlen;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index a60a2773b497..96342993cf93 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -782,16 +782,22 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra);
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
- default:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
if (freq < 0)
return freq;
if (freq == 0)
return -EINVAL;
+ wdev_lock(wdev);
mutex_lock(&rdev->devlist_mtx);
- err = rdev_set_freq(rdev, NULL, freq, NL80211_CHAN_NO_HT);
+ err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
mutex_unlock(&rdev->devlist_mtx);
+ wdev_unlock(wdev);
return err;
+ default:
+ return -EOPNOTSUPP;
}
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq);
@@ -801,7 +807,6 @@ int cfg80211_wext_giwfreq(struct net_device *dev,
struct iw_freq *freq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -809,9 +814,9 @@ int cfg80211_wext_giwfreq(struct net_device *dev,
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
default:
- if (!rdev->channel)
+ if (!wdev->channel)
return -EINVAL;
- freq->m = rdev->channel->center_freq;
+ freq->m = wdev->channel->center_freq;
freq->e = 6;
return 0;
}
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 4f5a47091fde..0ef17bc42bac 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -29,226 +29,226 @@ typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *,
* know about.
*/
static const struct iw_ioctl_description standard_ioctl[] = {
- [SIOCSIWCOMMIT - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = {
.header_type = IW_HEADER_TYPE_NULL,
},
- [SIOCGIWNAME - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWNAME)] = {
.header_type = IW_HEADER_TYPE_CHAR,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWNWID - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWNWID)] = {
.header_type = IW_HEADER_TYPE_PARAM,
.flags = IW_DESCR_FLAG_EVENT,
},
- [SIOCGIWNWID - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWNWID)] = {
.header_type = IW_HEADER_TYPE_PARAM,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWFREQ - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWFREQ)] = {
.header_type = IW_HEADER_TYPE_FREQ,
.flags = IW_DESCR_FLAG_EVENT,
},
- [SIOCGIWFREQ - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWFREQ)] = {
.header_type = IW_HEADER_TYPE_FREQ,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWMODE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWMODE)] = {
.header_type = IW_HEADER_TYPE_UINT,
.flags = IW_DESCR_FLAG_EVENT,
},
- [SIOCGIWMODE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWMODE)] = {
.header_type = IW_HEADER_TYPE_UINT,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWSENS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWSENS)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWSENS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWSENS)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWRANGE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWRANGE)] = {
.header_type = IW_HEADER_TYPE_NULL,
},
- [SIOCGIWRANGE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWRANGE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = sizeof(struct iw_range),
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWPRIV - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWPRIV)] = {
.header_type = IW_HEADER_TYPE_NULL,
},
- [SIOCGIWPRIV - SIOCIWFIRST] = { /* (handled directly by us) */
+ [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct iw_priv_args),
.max_tokens = 16,
.flags = IW_DESCR_FLAG_NOMAX,
},
- [SIOCSIWSTATS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWSTATS)] = {
.header_type = IW_HEADER_TYPE_NULL,
},
- [SIOCGIWSTATS - SIOCIWFIRST] = { /* (handled directly by us) */
+ [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = sizeof(struct iw_statistics),
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWSPY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWSPY)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct sockaddr),
.max_tokens = IW_MAX_SPY,
},
- [SIOCGIWSPY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWSPY)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct sockaddr) +
sizeof(struct iw_quality),
.max_tokens = IW_MAX_SPY,
},
- [SIOCSIWTHRSPY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct iw_thrspy),
.min_tokens = 1,
.max_tokens = 1,
},
- [SIOCGIWTHRSPY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct iw_thrspy),
.min_tokens = 1,
.max_tokens = 1,
},
- [SIOCSIWAP - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWAP)] = {
.header_type = IW_HEADER_TYPE_ADDR,
},
- [SIOCGIWAP - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWAP)] = {
.header_type = IW_HEADER_TYPE_ADDR,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWMLME - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWMLME)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = sizeof(struct iw_mlme),
.max_tokens = sizeof(struct iw_mlme),
},
- [SIOCGIWAPLIST - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWAPLIST)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = sizeof(struct sockaddr) +
sizeof(struct iw_quality),
.max_tokens = IW_MAX_AP,
.flags = IW_DESCR_FLAG_NOMAX,
},
- [SIOCSIWSCAN - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWSCAN)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = 0,
.max_tokens = sizeof(struct iw_scan_req),
},
- [SIOCGIWSCAN - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWSCAN)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_SCAN_MAX_DATA,
.flags = IW_DESCR_FLAG_NOMAX,
},
- [SIOCSIWESSID - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWESSID)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ESSID_MAX_SIZE,
.flags = IW_DESCR_FLAG_EVENT,
},
- [SIOCGIWESSID - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWESSID)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ESSID_MAX_SIZE,
.flags = IW_DESCR_FLAG_DUMP,
},
- [SIOCSIWNICKN - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWNICKN)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ESSID_MAX_SIZE,
},
- [SIOCGIWNICKN - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWNICKN)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ESSID_MAX_SIZE,
},
- [SIOCSIWRATE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWRATE)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWRATE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWRATE)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWRTS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWRTS)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWRTS - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWRTS)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWFRAG - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWFRAG)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWFRAG - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWFRAG)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWTXPOW - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWTXPOW)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWTXPOW - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWTXPOW)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWRETRY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWRETRY)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWRETRY - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWRETRY)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWENCODE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWENCODE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ENCODING_TOKEN_MAX,
.flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT,
},
- [SIOCGIWENCODE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWENCODE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_ENCODING_TOKEN_MAX,
.flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT,
},
- [SIOCSIWPOWER - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWPOWER)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWPOWER - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWPOWER)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWGENIE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWGENIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [SIOCGIWGENIE - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWGENIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [SIOCSIWAUTH - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWAUTH)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCGIWAUTH - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWAUTH)] = {
.header_type = IW_HEADER_TYPE_PARAM,
},
- [SIOCSIWENCODEEXT - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = sizeof(struct iw_encode_ext),
.max_tokens = sizeof(struct iw_encode_ext) +
IW_ENCODING_TOKEN_MAX,
},
- [SIOCGIWENCODEEXT - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = sizeof(struct iw_encode_ext),
.max_tokens = sizeof(struct iw_encode_ext) +
IW_ENCODING_TOKEN_MAX,
},
- [SIOCSIWPMKSA - SIOCIWFIRST] = {
+ [IW_IOCTL_IDX(SIOCSIWPMKSA)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.min_tokens = sizeof(struct iw_pmksa),
@@ -262,44 +262,44 @@ static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
* we know about.
*/
static const struct iw_ioctl_description standard_event[] = {
- [IWEVTXDROP - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVTXDROP)] = {
.header_type = IW_HEADER_TYPE_ADDR,
},
- [IWEVQUAL - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVQUAL)] = {
.header_type = IW_HEADER_TYPE_QUAL,
},
- [IWEVCUSTOM - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVCUSTOM)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_CUSTOM_MAX,
},
- [IWEVREGISTERED - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVREGISTERED)] = {
.header_type = IW_HEADER_TYPE_ADDR,
},
- [IWEVEXPIRED - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVEXPIRED)] = {
.header_type = IW_HEADER_TYPE_ADDR,
},
- [IWEVGENIE - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVGENIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [IWEVMICHAELMICFAILURE - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = sizeof(struct iw_michaelmicfailure),
},
- [IWEVASSOCREQIE - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVASSOCREQIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [IWEVASSOCRESPIE - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVASSOCRESPIE)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = IW_GENERIC_IE_MAX,
},
- [IWEVPMKIDCAND - IWEVFIRST] = {
+ [IW_EVENT_IDX(IWEVPMKIDCAND)] = {
.header_type = IW_HEADER_TYPE_POINT,
.token_size = 1,
.max_tokens = sizeof(struct iw_pmkid_cand),
@@ -450,11 +450,11 @@ void wireless_send_event(struct net_device * dev,
/* Get the description of the Event */
if (cmd <= SIOCIWLAST) {
- cmd_index = cmd - SIOCIWFIRST;
+ cmd_index = IW_IOCTL_IDX(cmd);
if (cmd_index < standard_ioctl_num)
descr = &(standard_ioctl[cmd_index]);
} else {
- cmd_index = cmd - IWEVFIRST;
+ cmd_index = IW_EVENT_IDX(cmd);
if (cmd_index < standard_event_num)
descr = &(standard_event[cmd_index]);
}
@@ -663,7 +663,7 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
return NULL;
/* Try as a standard command */
- index = cmd - SIOCIWFIRST;
+ index = IW_IOCTL_IDX(cmd);
if (index < handlers->num_standard)
return handlers->standard[index];
@@ -955,9 +955,9 @@ static int ioctl_standard_call(struct net_device * dev,
int ret = -EINVAL;
/* Get the description of the IOCTL */
- if ((cmd - SIOCIWFIRST) >= standard_ioctl_num)
+ if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num)
return -EOPNOTSUPP;
- descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
+ descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]);
/* Check if we have a pointer to user space data or not */
if (descr->header_type != IW_HEADER_TYPE_POINT) {
@@ -1013,7 +1013,7 @@ static int compat_standard_call(struct net_device *dev,
struct iw_point iwp;
int err;
- descr = standard_ioctl + (cmd - SIOCIWFIRST);
+ descr = standard_ioctl + IW_IOCTL_IDX(cmd);
if (descr->header_type != IW_HEADER_TYPE_POINT)
return ioctl_standard_call(dev, iwr, cmd, info, handler);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index d5c6140f4cb8..9818198add8a 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -108,7 +108,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
/* SSID is not set, we just want to switch channel */
if (chan && !wdev->wext.connect.ssid_len) {
- err = rdev_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
+ err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
goto out;
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 36e84e13c6aa..296e65e01064 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -719,7 +719,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
DECLARE_WAITQUEUE(wait, current);
int rc;
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ERESTARTSYS;
@@ -739,7 +739,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return rc;
}
@@ -839,7 +839,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
DECLARE_WAITQUEUE(wait, current);
int rc = 0;
- add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -859,7 +859,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return rc;
}
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index b9ef682230a0..9005f6daeab5 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -24,6 +24,7 @@
#include <net/sock.h>
#include <linux/if_arp.h>
#include <net/x25.h>
+#include <net/x25device.h>
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
@@ -115,19 +116,22 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
}
switch (skb->data[0]) {
- case 0x00:
- skb_pull(skb, 1);
- if (x25_receive_data(skb, nb)) {
- x25_neigh_put(nb);
- goto out;
- }
- break;
- case 0x01:
- x25_link_established(nb);
- break;
- case 0x02:
- x25_link_terminated(nb);
- break;
+
+ case X25_IFACE_DATA:
+ skb_pull(skb, 1);
+ if (x25_receive_data(skb, nb)) {
+ x25_neigh_put(nb);
+ goto out;
+ }
+ break;
+
+ case X25_IFACE_CONNECT:
+ x25_link_established(nb);
+ break;
+
+ case X25_IFACE_DISCONNECT:
+ x25_link_terminated(nb);
+ break;
}
x25_neigh_put(nb);
drop:
@@ -148,7 +152,7 @@ void x25_establish_link(struct x25_neigh *nb)
return;
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
break;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
@@ -184,7 +188,7 @@ void x25_terminate_link(struct x25_neigh *nb)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = htons(ETH_P_X25);
skb->dev = nb->dev;
@@ -200,7 +204,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
switch (nb->dev->type) {
case ARPHRD_X25:
dptr = skb_push(skb, 1);
- *dptr = 0x00;
+ *dptr = X25_IFACE_DATA;
break;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index e5195c99f71e..1396572d2ade 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -16,7 +16,8 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
{
- return ntohl(daddr->a4 + saddr->a4);
+ u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4;
+ return ntohl((__force __be32)sum);
}
static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 843e066649cb..31f4ba43b48f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -37,6 +37,8 @@
DEFINE_MUTEX(xfrm_cfg_mutex);
EXPORT_SYMBOL(xfrm_cfg_mutex);
+static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
+static struct dst_entry *xfrm_policy_sk_bundles;
static DEFINE_RWLOCK(xfrm_policy_lock);
static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
@@ -44,12 +46,10 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
static struct kmem_cache *xfrm_dst_cache __read_mostly;
-static HLIST_HEAD(xfrm_policy_gc_list);
-static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
-
static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
static void xfrm_init_pmtu(struct dst_entry *dst);
+static int stale_bundle(struct dst_entry *dst);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
@@ -156,7 +156,7 @@ static void xfrm_policy_timer(unsigned long data)
read_lock(&xp->lock);
- if (xp->walk.dead)
+ if (unlikely(xp->walk.dead))
goto out;
dir = xfrm_policy_id2dir(xp->index);
@@ -216,6 +216,35 @@ expired:
xfrm_pol_put(xp);
}
+static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
+{
+ struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
+
+ if (unlikely(pol->walk.dead))
+ flo = NULL;
+ else
+ xfrm_pol_hold(pol);
+
+ return flo;
+}
+
+static int xfrm_policy_flo_check(struct flow_cache_object *flo)
+{
+ struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
+
+ return !pol->walk.dead;
+}
+
+static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
+{
+ xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
+}
+
+static const struct flow_cache_ops xfrm_policy_fc_ops = {
+ .get = xfrm_policy_flo_get,
+ .check = xfrm_policy_flo_check,
+ .delete = xfrm_policy_flo_delete,
+};
/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
* SPD calls.
@@ -236,6 +265,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
atomic_set(&policy->refcnt, 1);
setup_timer(&policy->timer, xfrm_policy_timer,
(unsigned long)policy);
+ policy->flo.ops = &xfrm_policy_fc_ops;
}
return policy;
}
@@ -247,8 +277,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
{
BUG_ON(!policy->walk.dead);
- BUG_ON(policy->bundles);
-
if (del_timer(&policy->timer))
BUG();
@@ -257,63 +285,20 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
}
EXPORT_SYMBOL(xfrm_policy_destroy);
-static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
-{
- struct dst_entry *dst;
-
- while ((dst = policy->bundles) != NULL) {
- policy->bundles = dst->next;
- dst_free(dst);
- }
-
- if (del_timer(&policy->timer))
- atomic_dec(&policy->refcnt);
-
- if (atomic_read(&policy->refcnt) > 1)
- flow_cache_flush();
-
- xfrm_pol_put(policy);
-}
-
-static void xfrm_policy_gc_task(struct work_struct *work)
-{
- struct xfrm_policy *policy;
- struct hlist_node *entry, *tmp;
- struct hlist_head gc_list;
-
- spin_lock_bh(&xfrm_policy_gc_lock);
- gc_list.first = xfrm_policy_gc_list.first;
- INIT_HLIST_HEAD(&xfrm_policy_gc_list);
- spin_unlock_bh(&xfrm_policy_gc_lock);
-
- hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
- xfrm_policy_gc_kill(policy);
-}
-static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
-
/* Rule must be locked. Release descentant resources, announce
* entry dead. The rule must be unlinked from lists to the moment.
*/
static void xfrm_policy_kill(struct xfrm_policy *policy)
{
- int dead;
-
- write_lock_bh(&policy->lock);
- dead = policy->walk.dead;
policy->walk.dead = 1;
- write_unlock_bh(&policy->lock);
- if (unlikely(dead)) {
- WARN_ON(1);
- return;
- }
+ atomic_inc(&policy->genid);
- spin_lock_bh(&xfrm_policy_gc_lock);
- hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
- spin_unlock_bh(&xfrm_policy_gc_lock);
+ if (del_timer(&policy->timer))
+ xfrm_pol_put(policy);
- schedule_work(&xfrm_policy_gc_work);
+ xfrm_pol_put(policy);
}
static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
@@ -555,7 +540,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
struct xfrm_policy *delpol;
struct hlist_head *chain;
struct hlist_node *entry, *newpos;
- struct dst_entry *gc_list;
u32 mark = policy->mark.v & policy->mark.m;
write_lock_bh(&xfrm_policy_lock);
@@ -605,34 +589,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
else if (xfrm_bydst_should_resize(net, dir, NULL))
schedule_work(&net->xfrm.policy_hash_work);
- read_lock_bh(&xfrm_policy_lock);
- gc_list = NULL;
- entry = &policy->bydst;
- hlist_for_each_entry_continue(policy, entry, bydst) {
- struct dst_entry *dst;
-
- write_lock(&policy->lock);
- dst = policy->bundles;
- if (dst) {
- struct dst_entry *tail = dst;
- while (tail->next)
- tail = tail->next;
- tail->next = gc_list;
- gc_list = dst;
-
- policy->bundles = NULL;
- }
- write_unlock(&policy->lock);
- }
- read_unlock_bh(&xfrm_policy_lock);
-
- while (gc_list) {
- struct dst_entry *dst = gc_list;
-
- gc_list = dst->next;
- dst_free(dst);
- }
-
return 0;
}
EXPORT_SYMBOL(xfrm_policy_insert);
@@ -671,10 +627,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
}
write_unlock_bh(&xfrm_policy_lock);
- if (ret && delete) {
- atomic_inc(&flow_cache_genid);
+ if (ret && delete)
xfrm_policy_kill(ret);
- }
return ret;
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
@@ -713,10 +667,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
}
write_unlock_bh(&xfrm_policy_lock);
- if (ret && delete) {
- atomic_inc(&flow_cache_genid);
+ if (ret && delete)
xfrm_policy_kill(ret);
- }
return ret;
}
EXPORT_SYMBOL(xfrm_policy_byid);
@@ -776,7 +728,6 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
{
int dir, err = 0, cnt = 0;
- struct xfrm_policy *dp;
write_lock_bh(&xfrm_policy_lock);
@@ -794,10 +745,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
&net->xfrm.policy_inexact[dir], bydst) {
if (pol->type != type)
continue;
- dp = __xfrm_policy_unlink(pol, dir);
+ __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
- if (dp)
- cnt++;
+ cnt++;
xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
audit_info->sessionid,
@@ -816,10 +766,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
bydst) {
if (pol->type != type)
continue;
- dp = __xfrm_policy_unlink(pol, dir);
+ __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
- if (dp)
- cnt++;
+ cnt++;
xfrm_audit_policy_delete(pol, 1,
audit_info->loginuid,
@@ -835,7 +784,6 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
}
if (!cnt)
err = -ESRCH;
- atomic_inc(&flow_cache_genid);
out:
write_unlock_bh(&xfrm_policy_lock);
return err;
@@ -989,32 +937,37 @@ fail:
return ret;
}
-static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
- u8 dir, void **objp, atomic_t **obj_refp)
+static struct xfrm_policy *
+__xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
{
+#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_policy *pol;
- int err = 0;
-#ifdef CONFIG_XFRM_SUB_POLICY
pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
- pol = NULL;
- }
- if (pol || err)
- goto end;
-#endif
- pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
- pol = NULL;
- }
-#ifdef CONFIG_XFRM_SUB_POLICY
-end:
+ if (pol != NULL)
+ return pol;
#endif
- if ((*objp = (void *) pol) != NULL)
- *obj_refp = &pol->refcnt;
- return err;
+ return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
+}
+
+static struct flow_cache_object *
+xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
+ u8 dir, struct flow_cache_object *old_obj, void *ctx)
+{
+ struct xfrm_policy *pol;
+
+ if (old_obj)
+ xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
+
+ pol = __xfrm_policy_lookup(net, fl, family, dir);
+ if (IS_ERR_OR_NULL(pol))
+ return ERR_CAST(pol);
+
+ /* Resolver returns two references:
+ * one for cache and one for caller of flow_cache_lookup() */
+ xfrm_pol_hold(pol);
+
+ return &pol->flo;
}
static inline int policy_to_flow_dir(int dir)
@@ -1104,8 +1057,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
pol = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
if (pol) {
- if (dir < XFRM_POLICY_MAX)
- atomic_inc(&flow_cache_genid);
xfrm_policy_kill(pol);
return 0;
}
@@ -1132,6 +1083,9 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
}
if (old_pol)
+ /* Unlinking succeeds always. This is the only function
+ * allowed to delete or replace socket policy.
+ */
__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
write_unlock_bh(&xfrm_policy_lock);
@@ -1300,18 +1254,6 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
* still valid.
*/
-static struct dst_entry *
-xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
-{
- struct dst_entry *x;
- struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
- if (unlikely(afinfo == NULL))
- return ERR_PTR(-EINVAL);
- x = afinfo->find_bundle(fl, policy);
- xfrm_policy_put_afinfo(afinfo);
- return x;
-}
-
static inline int xfrm_get_tos(struct flowi *fl, int family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -1327,6 +1269,54 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
return tos;
}
+static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ if (xdst->route == NULL) {
+ /* Dummy bundle - if it has xfrms we were not
+ * able to build bundle as template resolution failed.
+ * It means we need to try again resolving. */
+ if (xdst->num_xfrms > 0)
+ return NULL;
+ } else {
+ /* Real bundle */
+ if (stale_bundle(dst))
+ return NULL;
+ }
+
+ dst_hold(dst);
+ return flo;
+}
+
+static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ if (!xdst->route)
+ return 0;
+ if (stale_bundle(dst))
+ return 0;
+
+ return 1;
+}
+
+static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
+{
+ struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
+ struct dst_entry *dst = &xdst->u.dst;
+
+ dst_free(dst);
+}
+
+static const struct flow_cache_ops xfrm_bundle_fc_ops = {
+ .get = xfrm_bundle_flo_get,
+ .check = xfrm_bundle_flo_check,
+ .delete = xfrm_bundle_flo_delete,
+};
+
static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -1349,9 +1339,10 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
BUG();
}
xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
-
xfrm_policy_put_afinfo(afinfo);
+ xdst->flo.ops = &xfrm_bundle_fc_ops;
+
return xdst;
}
@@ -1389,6 +1380,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
return err;
}
+
/* Allocate chain of dst_entry's, attach known xfrm's, calculate
* all the metrics... Shortly, bundle a bundle.
*/
@@ -1452,7 +1444,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst_hold(dst);
dst1->xfrm = xfrm[i];
- xdst->genid = xfrm[i]->genid;
+ xdst->xfrm_genid = xfrm[i]->genid;
dst1->obsolete = -1;
dst1->flags |= DST_HOST;
@@ -1545,7 +1537,186 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
#endif
}
-static int stale_bundle(struct dst_entry *dst);
+static int xfrm_expand_policies(struct flowi *fl, u16 family,
+ struct xfrm_policy **pols,
+ int *num_pols, int *num_xfrms)
+{
+ int i;
+
+ if (*num_pols == 0 || !pols[0]) {
+ *num_pols = 0;
+ *num_xfrms = 0;
+ return 0;
+ }
+ if (IS_ERR(pols[0]))
+ return PTR_ERR(pols[0]);
+
+ *num_xfrms = pols[0]->xfrm_nr;
+
+#ifdef CONFIG_XFRM_SUB_POLICY
+ if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
+ pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
+ pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
+ XFRM_POLICY_TYPE_MAIN,
+ fl, family,
+ XFRM_POLICY_OUT);
+ if (pols[1]) {
+ if (IS_ERR(pols[1])) {
+ xfrm_pols_put(pols, *num_pols);
+ return PTR_ERR(pols[1]);
+ }
+ (*num_pols) ++;
+ (*num_xfrms) += pols[1]->xfrm_nr;
+ }
+ }
+#endif
+ for (i = 0; i < *num_pols; i++) {
+ if (pols[i]->action != XFRM_POLICY_ALLOW) {
+ *num_xfrms = -1;
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
+static struct xfrm_dst *
+xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
+ struct flowi *fl, u16 family,
+ struct dst_entry *dst_orig)
+{
+ struct net *net = xp_net(pols[0]);
+ struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
+ struct dst_entry *dst;
+ struct xfrm_dst *xdst;
+ int err;
+
+ /* Try to instantiate a bundle */
+ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
+ if (err < 0) {
+ if (err != -EAGAIN)
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+ return ERR_PTR(err);
+ }
+
+ dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
+ if (IS_ERR(dst)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
+ return ERR_CAST(dst);
+ }
+
+ xdst = (struct xfrm_dst *)dst;
+ xdst->num_xfrms = err;
+ if (num_pols > 1)
+ err = xfrm_dst_update_parent(dst, &pols[1]->selector);
+ else
+ err = xfrm_dst_update_origin(dst, fl);
+ if (unlikely(err)) {
+ dst_free(dst);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
+ return ERR_PTR(err);
+ }
+
+ xdst->num_pols = num_pols;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
+ xdst->policy_genid = atomic_read(&pols[0]->genid);
+
+ return xdst;
+}
+
+static struct flow_cache_object *
+xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
+ struct flow_cache_object *oldflo, void *ctx)
+{
+ struct dst_entry *dst_orig = (struct dst_entry *)ctx;
+ struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+ struct xfrm_dst *xdst, *new_xdst;
+ int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
+
+ /* Check if the policies from old bundle are usable */
+ xdst = NULL;
+ if (oldflo) {
+ xdst = container_of(oldflo, struct xfrm_dst, flo);
+ num_pols = xdst->num_pols;
+ num_xfrms = xdst->num_xfrms;
+ pol_dead = 0;
+ for (i = 0; i < num_pols; i++) {
+ pols[i] = xdst->pols[i];
+ pol_dead |= pols[i]->walk.dead;
+ }
+ if (pol_dead) {
+ dst_free(&xdst->u.dst);
+ xdst = NULL;
+ num_pols = 0;
+ num_xfrms = 0;
+ oldflo = NULL;
+ }
+ }
+
+ /* Resolve policies to use if we couldn't get them from
+ * previous cache entry */
+ if (xdst == NULL) {
+ num_pols = 1;
+ pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
+ err = xfrm_expand_policies(fl, family, pols,
+ &num_pols, &num_xfrms);
+ if (err < 0)
+ goto inc_error;
+ if (num_pols == 0)
+ return NULL;
+ if (num_xfrms <= 0)
+ goto make_dummy_bundle;
+ }
+
+ new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
+ if (IS_ERR(new_xdst)) {
+ err = PTR_ERR(new_xdst);
+ if (err != -EAGAIN)
+ goto error;
+ if (oldflo == NULL)
+ goto make_dummy_bundle;
+ dst_hold(&xdst->u.dst);
+ return oldflo;
+ }
+
+ /* Kill the previous bundle */
+ if (xdst) {
+ /* The policies were stolen for newly generated bundle */
+ xdst->num_pols = 0;
+ dst_free(&xdst->u.dst);
+ }
+
+ /* Flow cache does not have reference, it dst_free()'s,
+ * but we do need to return one reference for original caller */
+ dst_hold(&new_xdst->u.dst);
+ return &new_xdst->flo;
+
+make_dummy_bundle:
+ /* We found policies, but there's no bundles to instantiate:
+ * either because the policy blocks, has no transformations or
+ * we could not build template (no xfrm_states).*/
+ xdst = xfrm_alloc_dst(net, family);
+ if (IS_ERR(xdst)) {
+ xfrm_pols_put(pols, num_pols);
+ return ERR_CAST(xdst);
+ }
+ xdst->num_pols = num_pols;
+ xdst->num_xfrms = num_xfrms;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
+
+ dst_hold(&xdst->u.dst);
+ return &xdst->flo;
+
+inc_error:
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+error:
+ if (xdst != NULL)
+ dst_free(&xdst->u.dst);
+ else
+ xfrm_pols_put(pols, num_pols);
+ return ERR_PTR(err);
+}
/* Main function: finds/creates a bundle for given flow.
*
@@ -1555,245 +1726,152 @@ static int stale_bundle(struct dst_entry *dst);
int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
struct sock *sk, int flags)
{
- struct xfrm_policy *policy;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
- int npols;
- int pol_dead;
- int xfrm_nr;
- int pi;
- struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
- struct dst_entry *dst, *dst_orig = *dst_p;
- int nx = 0;
- int err;
- u32 genid;
- u16 family;
+ struct flow_cache_object *flo;
+ struct xfrm_dst *xdst;
+ struct dst_entry *dst, *dst_orig = *dst_p, *route;
+ u16 family = dst_orig->ops->family;
u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
+ int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
restart:
- genid = atomic_read(&flow_cache_genid);
- policy = NULL;
- for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
- pols[pi] = NULL;
- npols = 0;
- pol_dead = 0;
- xfrm_nr = 0;
+ dst = NULL;
+ xdst = NULL;
+ route = NULL;
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
- policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
- err = PTR_ERR(policy);
- if (IS_ERR(policy)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+ num_pols = 1;
+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+ err = xfrm_expand_policies(fl, family, pols,
+ &num_pols, &num_xfrms);
+ if (err < 0)
goto dropdst;
+
+ if (num_pols) {
+ if (num_xfrms <= 0) {
+ drop_pols = num_pols;
+ goto no_transform;
+ }
+
+ xdst = xfrm_resolve_and_create_bundle(
+ pols, num_pols, fl,
+ family, dst_orig);
+ if (IS_ERR(xdst)) {
+ xfrm_pols_put(pols, num_pols);
+ err = PTR_ERR(xdst);
+ goto dropdst;
+ }
+
+ spin_lock_bh(&xfrm_policy_sk_bundle_lock);
+ xdst->u.dst.next = xfrm_policy_sk_bundles;
+ xfrm_policy_sk_bundles = &xdst->u.dst;
+ spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
+
+ route = xdst->route;
}
}
- if (!policy) {
+ if (xdst == NULL) {
/* To accelerate a bit... */
if ((dst_orig->flags & DST_NOXFRM) ||
!net->xfrm.policy_count[XFRM_POLICY_OUT])
goto nopol;
- policy = flow_cache_lookup(net, fl, dst_orig->ops->family,
- dir, xfrm_policy_lookup);
- err = PTR_ERR(policy);
- if (IS_ERR(policy)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
+ flo = flow_cache_lookup(net, fl, family, dir,
+ xfrm_bundle_lookup, dst_orig);
+ if (flo == NULL)
+ goto nopol;
+ if (IS_ERR(flo)) {
+ err = PTR_ERR(flo);
goto dropdst;
}
+ xdst = container_of(flo, struct xfrm_dst, flo);
+
+ num_pols = xdst->num_pols;
+ num_xfrms = xdst->num_xfrms;
+ memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
+ route = xdst->route;
+ }
+
+ dst = &xdst->u.dst;
+ if (route == NULL && num_xfrms > 0) {
+ /* The only case when xfrm_bundle_lookup() returns a
+ * bundle with null route, is when the template could
+ * not be resolved. It means policies are there, but
+ * bundle could not be created, since we don't yet
+ * have the xfrm_state's. We need to wait for KM to
+ * negotiate new SA's or bail out with error.*/
+ if (net->xfrm.sysctl_larval_drop) {
+ /* EREMOTE tells the caller to generate
+ * a one-shot blackhole route. */
+ dst_release(dst);
+ xfrm_pols_put(pols, num_pols);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+ return -EREMOTE;
+ }
+ if (flags & XFRM_LOOKUP_WAIT) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue(&net->xfrm.km_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&net->xfrm.km_waitq, &wait);
+
+ if (!signal_pending(current)) {
+ dst_release(dst);
+ goto restart;
+ }
+
+ err = -ERESTART;
+ } else
+ err = -EAGAIN;
+
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+ goto error;
}
- if (!policy)
+no_transform:
+ if (num_pols == 0)
goto nopol;
- family = dst_orig->ops->family;
- pols[0] = policy;
- npols ++;
- xfrm_nr += pols[0]->xfrm_nr;
-
- err = -ENOENT;
- if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
+ if ((flags & XFRM_LOOKUP_ICMP) &&
+ !(pols[0]->flags & XFRM_POLICY_ICMP)) {
+ err = -ENOENT;
goto error;
+ }
- policy->curlft.use_time = get_seconds();
+ for (i = 0; i < num_pols; i++)
+ pols[i]->curlft.use_time = get_seconds();
- switch (policy->action) {
- default:
- case XFRM_POLICY_BLOCK:
+ if (num_xfrms < 0) {
/* Prohibit the flow */
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
err = -EPERM;
goto error;
-
- case XFRM_POLICY_ALLOW:
-#ifndef CONFIG_XFRM_SUB_POLICY
- if (policy->xfrm_nr == 0) {
- /* Flow passes not transformed. */
- xfrm_pol_put(policy);
- return 0;
- }
-#endif
-
- /* Try to find matching bundle.
- *
- * LATER: help from flow cache. It is optional, this
- * is required only for output policy.
- */
- dst = xfrm_find_bundle(fl, policy, family);
- if (IS_ERR(dst)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- err = PTR_ERR(dst);
- goto error;
- }
-
- if (dst)
- break;
-
-#ifdef CONFIG_XFRM_SUB_POLICY
- if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
- pols[1] = xfrm_policy_lookup_bytype(net,
- XFRM_POLICY_TYPE_MAIN,
- fl, family,
- XFRM_POLICY_OUT);
- if (pols[1]) {
- if (IS_ERR(pols[1])) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
- err = PTR_ERR(pols[1]);
- goto error;
- }
- if (pols[1]->action == XFRM_POLICY_BLOCK) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
- err = -EPERM;
- goto error;
- }
- npols ++;
- xfrm_nr += pols[1]->xfrm_nr;
- }
- }
-
- /*
- * Because neither flowi nor bundle information knows about
- * transformation template size. On more than one policy usage
- * we can realize whether all of them is bypass or not after
- * they are searched. See above not-transformed bypass
- * is surrounded by non-sub policy configuration, too.
- */
- if (xfrm_nr == 0) {
- /* Flow passes not transformed. */
- xfrm_pols_put(pols, npols);
- return 0;
- }
-
-#endif
- nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
-
- if (unlikely(nx<0)) {
- err = nx;
- if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) {
- /* EREMOTE tells the caller to generate
- * a one-shot blackhole route.
- */
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
- xfrm_pol_put(policy);
- return -EREMOTE;
- }
- if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&net->xfrm.km_waitq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&net->xfrm.km_waitq, &wait);
-
- nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
-
- if (nx == -EAGAIN && signal_pending(current)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
- err = -ERESTART;
- goto error;
- }
- if (nx == -EAGAIN ||
- genid != atomic_read(&flow_cache_genid)) {
- xfrm_pols_put(pols, npols);
- goto restart;
- }
- err = nx;
- }
- if (err < 0) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
- goto error;
- }
- }
- if (nx == 0) {
- /* Flow passes not transformed. */
- xfrm_pols_put(pols, npols);
- return 0;
- }
-
- dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
- err = PTR_ERR(dst);
- if (IS_ERR(dst)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
- goto error;
- }
-
- for (pi = 0; pi < npols; pi++) {
- read_lock_bh(&pols[pi]->lock);
- pol_dead |= pols[pi]->walk.dead;
- read_unlock_bh(&pols[pi]->lock);
- }
-
- write_lock_bh(&policy->lock);
- if (unlikely(pol_dead || stale_bundle(dst))) {
- /* Wow! While we worked on resolving, this
- * policy has gone. Retry. It is not paranoia,
- * we just cannot enlist new bundle to dead object.
- * We can't enlist stable bundles either.
- */
- write_unlock_bh(&policy->lock);
- dst_free(dst);
-
- if (pol_dead)
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD);
- else
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- err = -EHOSTUNREACH;
- goto error;
- }
-
- if (npols > 1)
- err = xfrm_dst_update_parent(dst, &pols[1]->selector);
- else
- err = xfrm_dst_update_origin(dst, fl);
- if (unlikely(err)) {
- write_unlock_bh(&policy->lock);
- dst_free(dst);
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- goto error;
- }
-
- dst->next = policy->bundles;
- policy->bundles = dst;
- dst_hold(dst);
- write_unlock_bh(&policy->lock);
+ } else if (num_xfrms > 0) {
+ /* Flow transformed */
+ *dst_p = dst;
+ dst_release(dst_orig);
+ } else {
+ /* Flow passes untransformed */
+ dst_release(dst);
}
- *dst_p = dst;
- dst_release(dst_orig);
- xfrm_pols_put(pols, npols);
+ok:
+ xfrm_pols_put(pols, drop_pols);
return 0;
+nopol:
+ if (!(flags & XFRM_LOOKUP_ICMP))
+ goto ok;
+ err = -ENOENT;
error:
- xfrm_pols_put(pols, npols);
+ dst_release(dst);
dropdst:
dst_release(dst_orig);
*dst_p = NULL;
+ xfrm_pols_put(pols, drop_pols);
return err;
-
-nopol:
- err = -ENOENT;
- if (flags & XFRM_LOOKUP_ICMP)
- goto dropdst;
- return 0;
}
EXPORT_SYMBOL(__xfrm_lookup);
@@ -1952,9 +2030,16 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
}
- if (!pol)
- pol = flow_cache_lookup(net, &fl, family, fl_dir,
- xfrm_policy_lookup);
+ if (!pol) {
+ struct flow_cache_object *flo;
+
+ flo = flow_cache_lookup(net, &fl, family, fl_dir,
+ xfrm_policy_lookup, NULL);
+ if (IS_ERR_OR_NULL(flo))
+ pol = ERR_CAST(flo);
+ else
+ pol = container_of(flo, struct xfrm_policy, flo);
+ }
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
@@ -2138,71 +2223,24 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
return dst;
}
-static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
-{
- struct dst_entry *dst, **dstp;
-
- write_lock(&pol->lock);
- dstp = &pol->bundles;
- while ((dst=*dstp) != NULL) {
- if (func(dst)) {
- *dstp = dst->next;
- dst->next = *gc_list_p;
- *gc_list_p = dst;
- } else {
- dstp = &dst->next;
- }
- }
- write_unlock(&pol->lock);
-}
-
-static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
+static void __xfrm_garbage_collect(struct net *net)
{
- struct dst_entry *gc_list = NULL;
- int dir;
+ struct dst_entry *head, *next;
- read_lock_bh(&xfrm_policy_lock);
- for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
- struct xfrm_policy *pol;
- struct hlist_node *entry;
- struct hlist_head *table;
- int i;
+ flow_cache_flush();
- hlist_for_each_entry(pol, entry,
- &net->xfrm.policy_inexact[dir], bydst)
- prune_one_bundle(pol, func, &gc_list);
+ spin_lock_bh(&xfrm_policy_sk_bundle_lock);
+ head = xfrm_policy_sk_bundles;
+ xfrm_policy_sk_bundles = NULL;
+ spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
- table = net->xfrm.policy_bydst[dir].table;
- for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
- hlist_for_each_entry(pol, entry, table + i, bydst)
- prune_one_bundle(pol, func, &gc_list);
- }
- }
- read_unlock_bh(&xfrm_policy_lock);
-
- while (gc_list) {
- struct dst_entry *dst = gc_list;
- gc_list = dst->next;
- dst_free(dst);
+ while (head) {
+ next = head->next;
+ dst_free(head);
+ head = next;
}
}
-static int unused_bundle(struct dst_entry *dst)
-{
- return !atomic_read(&dst->__refcnt);
-}
-
-static void __xfrm_garbage_collect(struct net *net)
-{
- xfrm_prune_bundles(net, unused_bundle);
-}
-
-static int xfrm_flush_bundles(struct net *net)
-{
- xfrm_prune_bundles(net, stale_bundle);
- return 0;
-}
-
static void xfrm_init_pmtu(struct dst_entry *dst)
{
do {
@@ -2260,7 +2298,9 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
return 0;
if (dst->xfrm->km.state != XFRM_STATE_VALID)
return 0;
- if (xdst->genid != dst->xfrm->genid)
+ if (xdst->xfrm_genid != dst->xfrm->genid)
+ return 0;
+ if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
if (strict && fl &&
@@ -2425,7 +2465,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
switch (event) {
case NETDEV_DOWN:
- xfrm_flush_bundles(dev_net(dev));
+ __xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}
@@ -2531,7 +2571,6 @@ static void xfrm_policy_fini(struct net *net)
audit_info.sessionid = -1;
audit_info.secid = 0;
xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
- flush_work(&xfrm_policy_gc_work);
WARN_ON(!list_empty(&net->xfrm.policy_all));
@@ -2757,7 +2796,6 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
struct xfrm_migrate *m, int num_migrate)
{
struct xfrm_migrate *mp;
- struct dst_entry *dst;
int i, j, n = 0;
write_lock_bh(&pol->lock);
@@ -2782,10 +2820,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
sizeof(pol->xfrm_vec[i].saddr));
pol->xfrm_vec[i].encap_family = mp->new_family;
/* flush bundles */
- while ((dst = pol->bundles) != NULL) {
- pol->bundles = dst->next;
- dst_free(dst);
- }
+ atomic_inc(&pol->genid);
}
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index add77ecb8ac4..5208b12fbfb4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -38,7 +38,6 @@
static DEFINE_SPINLOCK(xfrm_state_lock);
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static unsigned int xfrm_state_genid;
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
@@ -924,8 +923,6 @@ static void __xfrm_state_insert(struct xfrm_state *x)
struct net *net = xs_net(x);
unsigned int h;
- x->genid = ++xfrm_state_genid;
-
list_add(&x->km.all, &net->xfrm.state_all);
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
@@ -971,7 +968,7 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
(mark & x->mark.m) == x->mark.v &&
!xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
!xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
- x->genid = xfrm_state_genid;
+ x->genid++;
}
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 6106b72826d3..a267fbdda525 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1741,6 +1741,10 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
+ err = verify_policy_dir(p->dir);
+ if (err)
+ return err;
+
if (p->index)
xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
else {
@@ -1766,13 +1770,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (xp == NULL)
return -ENOENT;
- read_lock(&xp->lock);
- if (xp->walk.dead) {
- read_unlock(&xp->lock);
+ if (unlikely(xp->walk.dead))
goto out;
- }
- read_unlock(&xp->lock);
err = 0;
if (up->hard) {
uid_t loginuid = NETLINK_CB(skb).loginuid;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 0b94d2fa3a88..e4deb73e9a84 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -82,7 +82,7 @@ ifneq ($(strip $(lib-y) $(lib-m) $(lib-n) $(lib-)),)
lib-target := $(obj)/lib.a
endif
-ifneq ($(strip $(obj-y) $(obj-m) $(obj-n) $(obj-) $(lib-target)),)
+ifneq ($(strip $(obj-y) $(obj-m) $(obj-n) $(obj-) $(subdir-m) $(lib-target)),)
builtin-target := $(obj)/built-in.o
endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index f9bdf264473d..54fd1b700131 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -241,7 +241,11 @@ cmd_lzma = (cat $(filter-out FORCE,$^) | \
lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
(rm -f $@ ; false)
-quiet_cmd_lzo = LZO $@
+quiet_cmd_lzo = LZO $@
cmd_lzo = (cat $(filter-out FORCE,$^) | \
lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
(rm -f $@ ; false)
+
+# misc stuff
+# ---------------------------------------------------------------------------
+quote:="
diff --git a/scripts/checkincludes.pl b/scripts/checkincludes.pl
index 676ddc07d6fa..97b2c6143fe4 100755
--- a/scripts/checkincludes.pl
+++ b/scripts/checkincludes.pl
@@ -11,6 +11,8 @@
# you do have real dups and do not have them under #ifdef's. You
# could also just review the results.
+use strict;
+
sub usage {
print "Usage: checkincludes.pl [-r]\n";
print "By default we just warn of duplicates\n";
@@ -35,23 +37,24 @@ if ($#ARGV >= 1) {
}
}
-foreach $file (@ARGV) {
- open(FILE, $file) or die "Cannot open $file: $!.\n";
+foreach my $file (@ARGV) {
+ open(my $f, '<', $file)
+ or die "Cannot open $file: $!.\n";
my %includedfiles = ();
my @file_lines = ();
- while (<FILE>) {
+ while (<$f>) {
if (m/^\s*#\s*include\s*[<"](\S*)[>"]/o) {
++$includedfiles{$1};
}
push(@file_lines, $_);
}
- close(FILE);
+ close($f);
if (!$remove) {
- foreach $filename (keys %includedfiles) {
+ foreach my $filename (keys %includedfiles) {
if ($includedfiles{$filename} > 1) {
print "$file: $filename is included more than once.\n";
}
@@ -59,27 +62,28 @@ foreach $file (@ARGV) {
next;
}
- open(FILE,">$file") || die("Cannot write to $file: $!");
+ open($f, '>', $file)
+ or die("Cannot write to $file: $!");
my $dups = 0;
foreach (@file_lines) {
if (m/^\s*#\s*include\s*[<"](\S*)[>"]/o) {
- foreach $filename (keys %includedfiles) {
+ foreach my $filename (keys %includedfiles) {
if ($1 eq $filename) {
if ($includedfiles{$filename} > 1) {
$includedfiles{$filename}--;
$dups++;
} else {
- print FILE $_;
+ print {$f} $_;
}
}
}
} else {
- print FILE $_;
+ print {$f} $_;
}
}
if ($dups > 0) {
print "$file: removed $dups duplicate includes\n";
}
- close(FILE);
+ close($f);
}
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 14ee68e991dd..1afff6658a7d 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -21,6 +21,8 @@
#
# TODO : Port to all architectures (one regex per arch)
+use strict;
+
# check for arch
#
# $re is used for two matches:
@@ -104,19 +106,11 @@ my (@stack, $re, $dre, $x, $xs);
}
}
-sub bysize($) {
- my ($asize, $bsize);
- ($asize = $a) =~ s/.*: *(.*)$/$1/;
- ($bsize = $b) =~ s/.*: *(.*)$/$1/;
- $bsize <=> $asize
-}
-
#
# main()
#
my $funcre = qr/^$x* <(.*)>:$/;
-my $func;
-my $file, $lastslash;
+my ($func, $file, $lastslash);
while (my $line = <STDIN>) {
if ($line =~ m/$funcre/) {
@@ -173,4 +167,6 @@ while (my $line = <STDIN>) {
}
}
-print sort bysize @stack;
+# Sort output by size (last field)
+print sort { ($b =~ /:\t*(\d+)$/)[0] <=> ($a =~ /:\t*(\d+)$/)[0] } @stack;
+
diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl
index ec7d21161bdc..b444e89a0095 100755
--- a/scripts/checkversion.pl
+++ b/scripts/checkversion.pl
@@ -5,23 +5,22 @@
# including <linux/version.h> that don't need it.
# Copyright (C) 2003, Randy Dunlap <rdunlap@xenotime.net>
+use strict;
+
$| = 1;
-my $debugging = 0;
+my $debugging;
-foreach $file (@ARGV)
-{
+foreach my $file (@ARGV) {
# Open this file.
- open(FILE, $file) || die "Can't open $file: $!\n";
+ open( my $f, '<', $file )
+ or die "Can't open $file: $!\n";
# Initialize variables.
- my $fInComment = 0;
- my $fInString = 0;
- my $fUseVersion = 0;
+ my ($fInComment, $fInString, $fUseVersion);
my $iLinuxVersion = 0;
- LINE: while ( <FILE> )
- {
+ while (<$f>) {
# Strip comments.
$fInComment && (s+^.*?\*/+ +o ? ($fInComment = 0) : next);
m+/\*+o && (s+/\*.*?\*/+ +go, (s+/\*.*$+ +o && ($fInComment = 1)));
@@ -43,8 +42,8 @@ foreach $file (@ARGV)
# Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE
if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) {
$fUseVersion = 1;
- last LINE if $iLinuxVersion;
- }
+ last if $iLinuxVersion;
+ }
}
# Report used version IDs without include?
@@ -67,5 +66,5 @@ foreach $file (@ARGV)
}
}
- close(FILE);
+ close($f);
}
diff --git a/scripts/decodecode b/scripts/decodecode
index 4b00647814bc..8b30cc36744f 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -7,7 +7,7 @@
# AFLAGS=--32 decodecode < 386.oops
cleanup() {
- rm -f $T $T.s $T.o $T.oo $T.aa $T.aaa
+ rm -f $T $T.s $T.o $T.oo $T.aa $T.dis
exit 1
}
@@ -39,6 +39,29 @@ fi
echo $code
code=`echo $code | sed -e 's/.*Code: //'`
+width=`expr index "$code" ' '`
+width=$[($width-1)/2]
+case $width in
+1) type=byte ;;
+2) type=2byte ;;
+4) type=4byte ;;
+esac
+
+disas() {
+ ${CROSS_COMPILE}as $AFLAGS -o $1.o $1.s &> /dev/null
+
+ if [ "$ARCH" == "arm" ]; then
+ if [ $width == 2 ]; then
+ OBJDUMPFLAGS="-M force-thumb"
+ fi
+
+ ${CROSS_COMPILE}strip $1.o
+ fi
+
+ ${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \
+ grep -v "/tmp\|Disassembly\|\.text\|^$" &> $1.dis
+}
+
marker=`expr index "$code" "\<"`
if [ $marker -eq 0 ]; then
marker=`expr index "$code" "\("`
@@ -49,26 +72,25 @@ if [ $marker -ne 0 ]; then
echo All code >> $T.oo
echo ======== >> $T.oo
beforemark=`echo "$code"`
- echo -n " .byte 0x" > $T.s
- echo $beforemark | sed -e 's/ /,0x/g' | sed -e 's/<//g' | sed -e 's/>//g' >> $T.s
- as $AFLAGS -o $T.o $T.s &> /dev/null
- objdump -S $T.o | grep -v "/tmp" | grep -v "Disassembly" | grep -v "\.text" | grep -v "^$" &> $T.ooo
- cat $T.ooo >> $T.oo
- rm -f $T.o $T.s $T.ooo
+ echo -n " .$type 0x" > $T.s
+ echo $beforemark | sed -e 's/ /,0x/g; s/[<>()]//g' >> $T.s
+ disas $T
+ cat $T.dis >> $T.oo
+ rm -f $T.o $T.s $T.dis
# and fix code at-and-after marker
code=`echo "$code" | cut -c$((${marker} + 1))-`
fi
echo Code starting with the faulting instruction > $T.aa
echo =========================================== >> $T.aa
-code=`echo $code | sed -e 's/ [<(]/ /;s/[>)] / /;s/ /,0x/g'`
-echo -n " .byte 0x" > $T.s
+code=`echo $code | sed -e 's/ [<(]/ /;s/[>)] / /;s/ /,0x/g; s/[>)]$//'`
+echo -n " .$type 0x" > $T.s
echo $code >> $T.s
-as $AFLAGS -o $T.o $T.s &> /dev/null
-objdump -S $T.o | grep -v "Disassembly" | grep -v "/tmp" | grep -v "\.text" | grep -v "^$" &> $T.aaa
-cat $T.aaa >> $T.aa
+disas $T
+cat $T.dis >> $T.aa
-faultline=`cat $T.aaa | head -1 | cut -d":" -f2`
+faultline=`cat $T.dis | head -1 | cut -d":" -f2`
+faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
cat $T.oo | sed -e "s/\($faultline\)/\*\1 <-- trapping instruction/g"
echo
diff --git a/scripts/export_report.pl b/scripts/export_report.pl
index 705b5ba7c152..04dce7c15f83 100644
--- a/scripts/export_report.pl
+++ b/scripts/export_report.pl
@@ -49,10 +49,10 @@ sub usage {
}
sub collectcfiles {
- my @file = `cat .tmp_versions/*.mod | grep '.*\.ko\$'`;
- @file = grep {s/\.ko/.mod.c/} @file;
- chomp @file;
- return @file;
+ my @file
+ = `cat .tmp_versions/*.mod | grep '.*\.ko\$' | sed s/\.ko$/.mod.c/`;
+ chomp @file;
+ return @file;
}
my (%SYMBOL, %MODULE, %opt, @allcfiles);
@@ -71,37 +71,40 @@ if (not defined $opt{'k'}) {
$opt{'k'} = "Module.symvers";
}
-unless (open(MODULE_SYMVERS, $opt{'k'})) {
- die "Sorry, cannot open $opt{'k'}: $!\n";
-}
+open (my $module_symvers, '<', $opt{'k'})
+ or die "Sorry, cannot open $opt{'k'}: $!\n";
if (defined $opt{'o'}) {
- unless (open(OUTPUT_HANDLE, ">$opt{'o'}")) {
- die "Sorry, cannot open $opt{'o'} $!\n";
- }
- select OUTPUT_HANDLE;
+ open (my $out, '>', $opt{'o'})
+ or die "Sorry, cannot open $opt{'o'} $!\n";
+
+ select $out;
}
+
#
# collect all the symbols and their attributes from the
# Module.symvers file
#
-while ( <MODULE_SYMVERS> ) {
+while ( <$module_symvers> ) {
chomp;
my (undef, $symbol, $module, $gpl) = split;
$SYMBOL { $symbol } = [ $module , "0" , $symbol, $gpl];
}
-close(MODULE_SYMVERS);
+close($module_symvers);
#
# collect the usage count of each symbol.
#
foreach my $thismod (@allcfiles) {
- unless (open(MODULE_MODULE, $thismod)) {
- print "Sorry, cannot open $thismod: $!\n";
+ my $module;
+
+ unless (open ($module, '<', $thismod)) {
+ warn "Sorry, cannot open $thismod: $!\n";
next;
}
+
my $state=0;
- while ( <MODULE_MODULE> ) {
+ while ( <$module> ) {
chomp;
if ($state == 0) {
$state = 1 if ($_ =~ /static const struct modversion_info/);
@@ -124,7 +127,7 @@ foreach my $thismod (@allcfiles) {
if ($state != 2) {
print "WARNING:$thismod is not built with CONFIG_MODVERSION enabled\n";
}
- close(MODULE_MODULE);
+ close($module);
}
print "\tThis file reports the exported symbols usage patterns by in-tree\n",
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 76af5f9623e3..f3b5c05ced40 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -202,6 +202,7 @@ input_file() {
print_mtime "$1" >> ${output}
cat "$1" >> ${output}
else
+ echo "$1 \\"
cat "$1" | while read type dir file perm ; do
if [ "$type" == "file" ]; then
echo "$file \\";
@@ -231,7 +232,7 @@ arg="$1"
case "$arg" in
"-l") # files included in initramfs - used by kbuild
dep_list="list_"
- echo "deps_initramfs := \\"
+ echo "deps_initramfs := $0 \\"
shift
;;
"-o") # generate compressed cpio image named $1
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index af6b8363a2d5..f99115ebe925 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -758,8 +758,10 @@ int main(int argc, char **argv)
/* setlinebuf(debugfile); */
}
- if (flag_reference)
+ if (flag_reference) {
read_reference(ref_file);
+ fclose(ref_file);
+ }
yyparse();
diff --git a/scripts/headerdep.pl b/scripts/headerdep.pl
index b7f6c560e24d..8dd019bc5a73 100755
--- a/scripts/headerdep.pl
+++ b/scripts/headerdep.pl
@@ -80,8 +80,7 @@ sub search {
my $path = "$i/$filename";
return $path if -f $path;
}
-
- return undef;
+ return;
}
sub parse_all {
diff --git a/scripts/headers_check.pl b/scripts/headers_check.pl
index db1dd7a549f2..50d6cfd1fa77 100644
--- a/scripts/headers_check.pl
+++ b/scripts/headers_check.pl
@@ -28,11 +28,12 @@ my $lineno = 0;
my $filename;
foreach my $file (@files) {
- local *FH;
$filename = $file;
- open(FH, "<$filename") or die "$filename: $!\n";
+
+ open(my $fh, '<', $filename)
+ or die "$filename: $!\n";
$lineno = 0;
- while ($line = <FH>) {
+ while ($line = <$fh>) {
$lineno++;
&check_include();
&check_asm_types();
@@ -40,7 +41,7 @@ foreach my $file (@files) {
&check_declarations();
# Dropped for now. Too much noise &check_config();
}
- close FH;
+ close $fh;
}
exit $ret;
@@ -78,7 +79,7 @@ sub check_config
}
my $linux_asm_types;
-sub check_asm_types()
+sub check_asm_types
{
if ($filename =~ /types.h|int-l64.h|int-ll64.h/o) {
return;
diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl
index b89ca2c58fdb..4ca3be3b2e50 100644
--- a/scripts/headers_install.pl
+++ b/scripts/headers_install.pl
@@ -23,13 +23,13 @@ my ($readdir, $installdir, $arch, @files) = @ARGV;
my $unifdef = "scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__";
foreach my $file (@files) {
- local *INFILE;
- local *OUTFILE;
my $tmpfile = "$installdir/$file.tmp";
- open(INFILE, "<$readdir/$file")
- or die "$readdir/$file: $!\n";
- open(OUTFILE, ">$tmpfile") or die "$tmpfile: $!\n";
- while (my $line = <INFILE>) {
+
+ open(my $in, '<', "$readdir/$file")
+ or die "$readdir/$file: $!\n";
+ open(my $out, '>', $tmpfile)
+ or die "$tmpfile: $!\n";
+ while (my $line = <$in>) {
$line =~ s/([\s(])__user\s/$1/g;
$line =~ s/([\s(])__force\s/$1/g;
$line =~ s/([\s(])__iomem\s/$1/g;
@@ -39,10 +39,11 @@ foreach my $file (@files) {
$line =~ s/(^|\s)(inline)\b/$1__$2__/g;
$line =~ s/(^|\s)(asm)\b(\s|[(]|$)/$1__$2__$3/g;
$line =~ s/(^|\s|[(])(volatile)\b(\s|[(]|$)/$1__$2__$3/g;
- printf OUTFILE "%s", $line;
+ printf {$out} "%s", $line;
}
- close OUTFILE;
- close INFILE;
+ close $out;
+ close $in;
+
system $unifdef . " $tmpfile > $installdir/$file";
unlink $tmpfile;
}
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 86c3896a1e01..e3902fb39afd 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -108,8 +108,10 @@ static int read_symbol(FILE *in, struct sym_entry *s)
rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, str);
if (rc != 3) {
if (rc != EOF) {
- /* skip line */
- fgets(str, 500, in);
+ /* skip line. sym is used as dummy to
+ * shut of "warn_unused_result" warning.
+ */
+ sym = fgets(str, 500, in);
}
return -1;
}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 186c46604d06..7cdae39b8f09 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -23,6 +23,9 @@ menuconfig: $(obj)/mconf
config: $(obj)/conf
$< $(Kconfig)
+nconfig: $(obj)/nconf
+ $< $(Kconfig)
+
oldconfig: $(obj)/conf
$< -o $(Kconfig)
@@ -120,6 +123,7 @@ endif
# Help text used by make help
help:
@echo ' config - Update current config utilising a line-oriented program'
+ @echo ' nconfig - Update current config utilising a ncurses menu based program'
@echo ' menuconfig - Update current config utilising a menu based program'
@echo ' xconfig - Update current config utilising a QT based front-end'
@echo ' gconfig - Update current config utilising a GTK based front-end'
@@ -147,6 +151,8 @@ HOST_EXTRACFLAGS += -DLOCALE
# ===========================================================================
# Shared Makefile for the various kconfig executables:
# conf: Used for defconfig, oldconfig and related targets
+# nconf: Used for the nconfig target.
+# Utilizes ncurses
# mconf: Used for the menuconfig target
# Utilizes the lxdialog package
# qconf: Used for the xconfig target
@@ -159,11 +165,16 @@ lxdialog := lxdialog/checklist.o lxdialog/util.o lxdialog/inputbox.o
lxdialog += lxdialog/textbox.o lxdialog/yesno.o lxdialog/menubox.o
conf-objs := conf.o zconf.tab.o
-mconf-objs := mconf.o zconf.tab.o $(lxdialog)
+mconf-objs := mconf.o zconf.tab.o $(lxdialog)
+nconf-objs := nconf.o zconf.tab.o nconf.gui.o
kxgettext-objs := kxgettext.o zconf.tab.o
hostprogs-y := conf qconf gconf kxgettext
+ifeq ($(MAKECMDGOALS),nconfig)
+ hostprogs-y += nconf
+endif
+
ifeq ($(MAKECMDGOALS),menuconfig)
hostprogs-y += mconf
endif
@@ -187,7 +198,7 @@ endif
clean-files := lkc_defs.h qconf.moc .tmp_qtcheck \
.tmp_gtkcheck zconf.tab.c lex.zconf.c zconf.hash.c gconf.glade.h
-clean-files += mconf qconf gconf
+clean-files += mconf qconf gconf nconf
clean-files += config.pot linux.pot
# Check that we have the required ncurses stuff installed for lxdialog (menuconfig)
@@ -212,6 +223,7 @@ HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0`
HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \
-D LKC_DIRECT_LINK
+HOSTLOADLIBES_nconf = -lmenu -lpanel -lncurses
$(obj)/qconf.o: $(obj)/.tmp_qtcheck
ifeq ($(qconf-target),1)
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index edd3f39a080a..d83f2322893a 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -1097,9 +1097,32 @@ void expr_fprint(struct expr *e, FILE *out)
static void expr_print_gstr_helper(void *data, struct symbol *sym, const char *str)
{
- str_append((struct gstr*)data, str);
+ struct gstr *gs = (struct gstr*)data;
+ const char *sym_str = NULL;
+
+ if (sym)
+ sym_str = sym_get_string_value(sym);
+
+ if (gs->max_width) {
+ unsigned extra_length = strlen(str);
+ const char *last_cr = strrchr(gs->s, '\n');
+ unsigned last_line_length;
+
+ if (sym_str)
+ extra_length += 4 + strlen(sym_str);
+
+ if (!last_cr)
+ last_cr = gs->s;
+
+ last_line_length = strlen(gs->s) - (last_cr - gs->s);
+
+ if ((last_line_length + extra_length) > gs->max_width)
+ str_append(gs, "\\\n");
+ }
+
+ str_append(gs, str);
if (sym)
- str_printf((struct gstr*)data, " [=%s]", sym_get_string_value(sym));
+ str_printf(gs, " [=%s]", sym_str);
}
void expr_gstr_print(struct expr *e, struct gstr *gs)
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 6408fefae083..891cd9ce9ba2 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -86,7 +86,7 @@ struct symbol {
struct expr_value rev_dep;
};
-#define for_all_symbols(i, sym) for (i = 0; i < 257; i++) for (sym = symbol_hash[i]; sym; sym = sym->next) if (sym->type != S_OTHER)
+#define for_all_symbols(i, sym) for (i = 0; i < SYMBOL_HASHSIZE; i++) for (sym = symbol_hash[i]; sym; sym = sym->next) if (sym->type != S_OTHER)
#define SYMBOL_CONST 0x0001 /* symbol is const */
#define SYMBOL_CHECK 0x0008 /* used during dependency checking */
@@ -108,8 +108,7 @@ struct symbol {
#define SYMBOL_DEF4 0x80000 /* symbol.def[S_DEF_4] is valid */
#define SYMBOL_MAXLENGTH 256
-#define SYMBOL_HASHSIZE 257
-#define SYMBOL_HASHMASK 0xff
+#define SYMBOL_HASHSIZE 9973
/* A property represent the config options that can be associated
* with a config "symbol".
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 65464366fe38..bef10411837f 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -30,13 +30,16 @@ enum {
SINGLE_VIEW, SPLIT_VIEW, FULL_VIEW
};
+enum {
+ OPT_NORMAL, OPT_ALL, OPT_PROMPT
+};
+
static gint view_mode = FULL_VIEW;
static gboolean show_name = TRUE;
static gboolean show_range = TRUE;
static gboolean show_value = TRUE;
-static gboolean show_all = FALSE;
-static gboolean show_debug = FALSE;
static gboolean resizeable = FALSE;
+static int opt_mode = OPT_NORMAL;
GtkWidget *main_wnd = NULL;
GtkWidget *tree1_w = NULL; // left frame
@@ -76,36 +79,7 @@ static void conf_changed(void);
/* Helping/Debugging Functions */
-
-const char *dbg_print_stype(int val)
-{
- static char buf[256];
-
- bzero(buf, 256);
-
- if (val == S_UNKNOWN)
- strcpy(buf, "unknown");
- if (val == S_BOOLEAN)
- strcpy(buf, "boolean");
- if (val == S_TRISTATE)
- strcpy(buf, "tristate");
- if (val == S_INT)
- strcpy(buf, "int");
- if (val == S_HEX)
- strcpy(buf, "hex");
- if (val == S_STRING)
- strcpy(buf, "string");
- if (val == S_OTHER)
- strcpy(buf, "other");
-
-#ifdef DEBUG
- printf("%s", buf);
-#endif
-
- return buf;
-}
-
-const char *dbg_print_flags(int val)
+const char *dbg_sym_flags(int val)
{
static char buf[256];
@@ -131,40 +105,10 @@ const char *dbg_print_flags(int val)
strcat(buf, "auto/");
buf[strlen(buf) - 1] = '\0';
-#ifdef DEBUG
- printf("%s", buf);
-#endif
-
- return buf;
-}
-
-const char *dbg_print_ptype(int val)
-{
- static char buf[256];
-
- bzero(buf, 256);
-
- if (val == P_UNKNOWN)
- strcpy(buf, "unknown");
- if (val == P_PROMPT)
- strcpy(buf, "prompt");
- if (val == P_COMMENT)
- strcpy(buf, "comment");
- if (val == P_MENU)
- strcpy(buf, "menu");
- if (val == P_DEFAULT)
- strcpy(buf, "default");
- if (val == P_CHOICE)
- strcpy(buf, "choice");
-
-#ifdef DEBUG
- printf("%s", buf);
-#endif
return buf;
}
-
void replace_button_icon(GladeXML * xml, GdkDrawable * window,
GtkStyle * style, gchar * btn_name, gchar ** xpm)
{
@@ -697,20 +641,29 @@ void on_show_data1_activate(GtkMenuItem * menuitem, gpointer user_data)
void
-on_show_all_options1_activate(GtkMenuItem * menuitem, gpointer user_data)
+on_set_option_mode1_activate(GtkMenuItem *menuitem, gpointer user_data)
{
- show_all = GTK_CHECK_MENU_ITEM(menuitem)->active;
+ opt_mode = OPT_NORMAL;
+ gtk_tree_store_clear(tree2);
+ display_tree(&rootmenu); /* instead of update_tree to speed-up */
+}
+
+void
+on_set_option_mode2_activate(GtkMenuItem *menuitem, gpointer user_data)
+{
+ opt_mode = OPT_ALL;
gtk_tree_store_clear(tree2);
- display_tree(&rootmenu); // instead of update_tree to speed-up
+ display_tree(&rootmenu); /* instead of update_tree to speed-up */
}
void
-on_show_debug_info1_activate(GtkMenuItem * menuitem, gpointer user_data)
+on_set_option_mode3_activate(GtkMenuItem *menuitem, gpointer user_data)
{
- show_debug = GTK_CHECK_MENU_ITEM(menuitem)->active;
- update_tree(&rootmenu, NULL);
+ opt_mode = OPT_PROMPT;
+ gtk_tree_store_clear(tree2);
+ display_tree(&rootmenu); /* instead of update_tree to speed-up */
}
@@ -1163,7 +1116,10 @@ static gchar **fill_row(struct menu *menu)
g_strdup_printf("%s %s", _(menu_get_prompt(menu)),
sym && sym_has_value(sym) ? "(NEW)" : "");
- if (show_all && !menu_is_visible(menu))
+ if (opt_mode == OPT_ALL && !menu_is_visible(menu))
+ row[COL_COLOR] = g_strdup("DarkGray");
+ else if (opt_mode == OPT_PROMPT &&
+ menu_has_prompt(menu) && !menu_is_visible(menu))
row[COL_COLOR] = g_strdup("DarkGray");
else
row[COL_COLOR] = g_strdup("Black");
@@ -1386,16 +1342,19 @@ static void update_tree(struct menu *src, GtkTreeIter * dst)
menu2 ? menu_get_prompt(menu2) : "nil");
#endif
- if (!menu_is_visible(child1) && !show_all) { // remove node
+ if ((opt_mode == OPT_NORMAL && !menu_is_visible(child1)) ||
+ (opt_mode == OPT_PROMPT && !menu_has_prompt(child1))) {
+
+ /* remove node */
if (gtktree_iter_find_node(dst, menu1) != NULL) {
memcpy(&tmp, child2, sizeof(GtkTreeIter));
valid = gtk_tree_model_iter_next(model2,
child2);
gtk_tree_store_remove(tree2, &tmp);
if (!valid)
- return; // next parent
+ return; /* next parent */
else
- goto reparse; // next child
+ goto reparse; /* next child */
} else
continue;
}
@@ -1464,17 +1423,19 @@ static void display_tree(struct menu *menu)
&& (tree == tree2))
continue;
- if (menu_is_visible(child) || show_all)
+ if ((opt_mode == OPT_NORMAL && menu_is_visible(child)) ||
+ (opt_mode == OPT_PROMPT && menu_has_prompt(child)) ||
+ (opt_mode == OPT_ALL))
place_node(child, fill_row(child));
#ifdef DEBUG
printf("%*c%s: ", indent, ' ', menu_get_prompt(child));
printf("%s", child->flags & MENU_ROOT ? "rootmenu | " : "");
- dbg_print_ptype(ptype);
+ printf("%s", prop_get_type_name(ptype));
printf(" | ");
if (sym) {
- dbg_print_stype(sym->type);
+ printf("%s", sym_type_name(sym->type));
printf(" | ");
- dbg_print_flags(sym->flags);
+ printf("%s", dbg_sym_flags(sym->flags));
printf("\n");
} else
printf("\n");
diff --git a/scripts/kconfig/gconf.glade b/scripts/kconfig/gconf.glade
index b1c86c19292c..d52b0a75d824 100644
--- a/scripts/kconfig/gconf.glade
+++ b/scripts/kconfig/gconf.glade
@@ -190,26 +190,40 @@
</child>
<child>
- <widget class="GtkCheckMenuItem" id="show_all_options1">
+ <widget class="GtkRadioMenuItem" id="set_option_mode1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Show normal options</property>
+ <property name="label" translatable="yes">Show normal options</property>
+ <property name="use_underline">True</property>
+ <property name="active">True</property>
+ <signal name="activate" handler="on_set_option_mode1_activate"/>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkRadioMenuItem" id="set_option_mode2">
<property name="visible">True</property>
<property name="tooltip" translatable="yes">Show all options</property>
<property name="label" translatable="yes">Show all _options</property>
<property name="use_underline">True</property>
<property name="active">False</property>
- <signal name="activate" handler="on_show_all_options1_activate"/>
+ <property name="group">set_option_mode1</property>
+ <signal name="activate" handler="on_set_option_mode2_activate"/>
</widget>
</child>
<child>
- <widget class="GtkCheckMenuItem" id="show_debug_info1">
+ <widget class="GtkRadioMenuItem" id="set_option_mode3">
<property name="visible">True</property>
- <property name="tooltip" translatable="yes">Show masked options</property>
- <property name="label" translatable="yes">Show _debug info</property>
+ <property name="tooltip" translatable="yes">Show all options with prompts</property>
+ <property name="label" translatable="yes">Show all prompt options</property>
<property name="use_underline">True</property>
<property name="active">False</property>
- <signal name="activate" handler="on_show_debug_info1_activate"/>
+ <property name="group">set_option_mode1</property>
+ <signal name="activate" handler="on_set_option_mode3_activate"/>
</widget>
</child>
+
</widget>
</child>
</widget>
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index f379b0bf8c9e..ce6549cdaccf 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -84,7 +84,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode);
void kconfig_load(void);
/* menu.c */
-void menu_init(void);
+void _menu_init(void);
void menu_warn(struct menu *menu, const char *fmt, ...);
struct menu *menu_add_menu(void);
void menu_end_menu(void);
@@ -106,6 +106,11 @@ int file_write_dep(const char *name);
struct gstr {
size_t len;
char *s;
+ /*
+ * when max_width is not zero long lines in string s (if any) get
+ * wrapped not to exceed the max_width value
+ */
+ int max_width;
};
struct gstr str_new(void);
struct gstr str_assign(const char *s);
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index ffeb532b2cff..7cadcad8233b 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -11,13 +11,15 @@ P(conf_set_changed_callback, void,(void (*fn)(void)));
/* menu.c */
P(rootmenu,struct menu,);
-P(menu_is_visible,bool,(struct menu *menu));
+P(menu_is_visible, bool, (struct menu *menu));
+P(menu_has_prompt, bool, (struct menu *menu));
P(menu_get_prompt,const char *,(struct menu *menu));
P(menu_get_root_menu,struct menu *,(struct menu *menu));
P(menu_get_parent_menu,struct menu *,(struct menu *menu));
P(menu_has_help,bool,(struct menu *menu));
P(menu_get_help,const char *,(struct menu *menu));
-P(get_symbol_str,void,(struct gstr *r, struct symbol *sym));
+P(get_symbol_str, void, (struct gstr *r, struct symbol *sym));
+P(get_relations_str, struct gstr, (struct symbol **sym_arr));
P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help));
/* symbol.c */
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index 616c60138183..dd8e587c50e2 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -180,7 +180,7 @@ do_resize:
case KEY_LEFT:
switch (button) {
case -1:
- button = 1; /* Indicates "Cancel" button is selected */
+ button = 1; /* Indicates "Help" button is selected */
print_buttons(dialog, height, width, 1);
break;
case 0:
@@ -204,7 +204,7 @@ do_resize:
print_buttons(dialog, height, width, 0);
break;
case 0:
- button = 1; /* Indicates "Cancel" button is selected */
+ button = 1; /* Indicates "Help" button is selected */
print_buttons(dialog, height, width, 1);
break;
case 1:
diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c
index fa9d633f293c..1d604738fa13 100644
--- a/scripts/kconfig/lxdialog/menubox.c
+++ b/scripts/kconfig/lxdialog/menubox.c
@@ -383,6 +383,10 @@ do_resize:
case 'n':
case 'm':
case '/':
+ case 'h':
+ case '?':
+ case 'z':
+ case '\n':
/* save scroll info */
*s_scroll = scroll;
delwin(menu);
@@ -390,8 +394,10 @@ do_resize:
item_set(scroll + choice);
item_set_selected(1);
switch (key) {
+ case 'h':
+ case '?':
+ return 2;
case 's':
- return 3;
case 'y':
return 3;
case 'n':
@@ -402,18 +408,12 @@ do_resize:
return 6;
case '/':
return 7;
+ case 'z':
+ return 8;
+ case '\n':
+ return button;
}
return 0;
- case 'h':
- case '?':
- button = 2;
- case '\n':
- *s_scroll = scroll;
- delwin(menu);
- delwin(dialog);
- item_set(scroll + choice);
- item_set_selected(1);
- return button;
case 'e':
case 'x':
key = KEY_ESC;
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 8413cf38ed27..2c83d3234d30 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -67,13 +67,15 @@ static const char mconf_readme[] = N_(
" there is a delayed response which you may find annoying.\n"
"\n"
" Also, the <TAB> and cursor keys will cycle between <Select>,\n"
-" <Exit> and <Help>\n"
+" <Exit> and <Help>.\n"
"\n"
"o To get help with an item, use the cursor keys to highlight <Help>\n"
-" and Press <ENTER>.\n"
+" and press <ENTER>.\n"
"\n"
" Shortcut: Press <H> or <?>.\n"
"\n"
+"o To show hidden options, press <Z>.\n"
+"\n"
"\n"
"Radiolists (Choice lists)\n"
"-----------\n"
@@ -272,6 +274,7 @@ static int indent;
static struct menu *current_menu;
static int child_count;
static int single_menu_mode;
+static int show_all_options;
static void conf(struct menu *menu);
static void conf_choice(struct menu *menu);
@@ -282,19 +285,6 @@ static void show_textbox(const char *title, const char *text, int r, int c);
static void show_helptext(const char *title, const char *text);
static void show_help(struct menu *menu);
-static struct gstr get_relations_str(struct symbol **sym_arr)
-{
- struct symbol *sym;
- struct gstr res = str_new();
- int i;
-
- for (i = 0; sym_arr && (sym = sym_arr[i]); i++)
- get_symbol_str(&res, sym);
- if (!i)
- str_append(&res, _("No matches found.\n"));
- return res;
-}
-
static char filename[PATH_MAX+1];
static void set_config_filename(const char *config_filename)
{
@@ -359,8 +349,16 @@ static void build_conf(struct menu *menu)
int type, tmp, doint = 2;
tristate val;
char ch;
-
- if (!menu_is_visible(menu))
+ bool visible;
+
+ /*
+ * note: menu_is_visible() has side effect that it will
+ * recalc the value of the symbol.
+ */
+ visible = menu_is_visible(menu);
+ if (show_all_options && !menu_has_prompt(menu))
+ return;
+ else if (!show_all_options && !visible)
return;
sym = menu->sym;
@@ -619,6 +617,9 @@ static void conf(struct menu *menu)
case 7:
search_conf();
break;
+ case 8:
+ show_all_options = !show_all_options;
+ break;
}
}
}
@@ -638,6 +639,7 @@ static void show_help(struct menu *menu)
{
struct gstr help = str_new();
+ help.max_width = getmaxx(stdscr) - 10;
menu_get_ext_help(menu, &help);
show_helptext(_(menu_get_prompt(menu)), str_get(&help));
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 059a2465c574..203632cc30bd 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -38,7 +38,7 @@ static void prop_warn(struct property *prop, const char *fmt, ...)
va_end(ap);
}
-void menu_init(void)
+void _menu_init(void)
{
current_entry = current_menu = &rootmenu;
last_entry_ptr = &rootmenu.list;
@@ -197,7 +197,7 @@ static void sym_check_prop(struct symbol *sym)
if ((sym->type == S_STRING || sym->type == S_INT || sym->type == S_HEX) &&
prop->expr->type != E_SYMBOL)
prop_warn(prop,
- "default for config symbol '%'"
+ "default for config symbol '%s'"
" must be a single symbol", sym->name);
break;
case P_SELECT:
@@ -390,6 +390,13 @@ void menu_finalize(struct menu *parent)
}
}
+bool menu_has_prompt(struct menu *menu)
+{
+ if (!menu->prompt)
+ return false;
+ return true;
+}
+
bool menu_is_visible(struct menu *menu)
{
struct menu *child;
@@ -398,6 +405,7 @@ bool menu_is_visible(struct menu *menu)
if (!menu->prompt)
return false;
+
sym = menu->sym;
if (sym) {
sym_calc_value(sym);
@@ -407,12 +415,14 @@ bool menu_is_visible(struct menu *menu)
if (visible != no)
return true;
+
if (!sym || sym_get_tristate_value(menu->sym) == no)
return false;
for (child = menu->list; child; child = child->next)
if (menu_is_visible(child))
return true;
+
return false;
}
@@ -515,6 +525,20 @@ void get_symbol_str(struct gstr *r, struct symbol *sym)
str_append(r, "\n\n");
}
+struct gstr get_relations_str(struct symbol **sym_arr)
+{
+ struct symbol *sym;
+ struct gstr res = str_new();
+ int i;
+
+ for (i = 0; sym_arr && (sym = sym_arr[i]); i++)
+ get_symbol_str(&res, sym);
+ if (!i)
+ str_append(&res, _("No matches found.\n"));
+ return res;
+}
+
+
void menu_get_ext_help(struct menu *menu, struct gstr *help)
{
struct symbol *sym = menu->sym;
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
new file mode 100644
index 000000000000..762caf80ce37
--- /dev/null
+++ b/scripts/kconfig/nconf.c
@@ -0,0 +1,1568 @@
+/*
+ * Copyright (C) 2008 Nir Tzachar <nir.tzachar@gmail.com?
+ * Released under the terms of the GNU GPL v2.0.
+ *
+ * Derived from menuconfig.
+ *
+ */
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+#include "nconf.h"
+
+static const char nconf_readme[] = N_(
+"Overview\n"
+"--------\n"
+"Some kernel features may be built directly into the kernel.\n"
+"Some may be made into loadable runtime modules. Some features\n"
+"may be completely removed altogether. There are also certain\n"
+"kernel parameters which are not really features, but must be\n"
+"entered in as decimal or hexadecimal numbers or possibly text.\n"
+"\n"
+"Menu items beginning with following braces represent features that\n"
+" [ ] can be built in or removed\n"
+" < > can be built in, modularized or removed\n"
+" { } can be built in or modularized (selected by other feature)\n"
+" - - are selected by other feature,\n"
+" XXX cannot be selected. use Symbol Info to find out why,\n"
+"while *, M or whitespace inside braces means to build in, build as\n"
+"a module or to exclude the feature respectively.\n"
+"\n"
+"To change any of these features, highlight it with the cursor\n"
+"keys and press <Y> to build it in, <M> to make it a module or\n"
+"<N> to removed it. You may also press the <Space Bar> to cycle\n"
+"through the available options (ie. Y->N->M->Y).\n"
+"\n"
+"Some additional keyboard hints:\n"
+"\n"
+"Menus\n"
+"----------\n"
+"o Use the Up/Down arrow keys (cursor keys) to highlight the item\n"
+" you wish to change use <Enter> or <Space>. Goto submenu by \n"
+" pressing <Enter> of <right-arrow>. Use <Esc> or <left-arrow> to go back.\n"
+" Submenus are designated by \"--->\".\n"
+"\n"
+" Shortcut: Press the option's highlighted letter (hotkey).\n"
+" Pressing a hotkey more than once will sequence\n"
+" through all visible items which use that hotkey.\n"
+"\n"
+" You may also use the <PAGE UP> and <PAGE DOWN> keys to scroll\n"
+" unseen options into view.\n"
+"\n"
+"o To exit a menu use the just press <ESC> <F5> <F8> or <left-arrow>.\n"
+"\n"
+"o To get help with an item, press <F1>\n"
+" Shortcut: Press <h> or <?>.\n"
+"\n"
+"\n"
+"Radiolists (Choice lists)\n"
+"-----------\n"
+"o Use the cursor keys to select the option you wish to set and press\n"
+" <S> or the <SPACE BAR>.\n"
+"\n"
+" Shortcut: Press the first letter of the option you wish to set then\n"
+" press <S> or <SPACE BAR>.\n"
+"\n"
+"o To see available help for the item, press <F1>\n"
+" Shortcut: Press <H> or <?>.\n"
+"\n"
+"\n"
+"Data Entry\n"
+"-----------\n"
+"o Enter the requested information and press <ENTER>\n"
+" If you are entering hexadecimal values, it is not necessary to\n"
+" add the '0x' prefix to the entry.\n"
+"\n"
+"o For help, press <F1>.\n"
+"\n"
+"\n"
+"Text Box (Help Window)\n"
+"--------\n"
+"o Use the cursor keys to scroll up/down/left/right. The VI editor\n"
+" keys h,j,k,l function here as do <SPACE BAR> for those\n"
+" who are familiar with less and lynx.\n"
+"\n"
+"o Press <Enter>, <F1>, <F5>, <F7> or <Esc> to exit.\n"
+"\n"
+"\n"
+"Alternate Configuration Files\n"
+"-----------------------------\n"
+"nconfig supports the use of alternate configuration files for\n"
+"those who, for various reasons, find it necessary to switch\n"
+"between different kernel configurations.\n"
+"\n"
+"At the end of the main menu you will find two options. One is\n"
+"for saving the current configuration to a file of your choosing.\n"
+"The other option is for loading a previously saved alternate\n"
+"configuration.\n"
+"\n"
+"Even if you don't use alternate configuration files, but you\n"
+"find during a nconfig session that you have completely messed\n"
+"up your settings, you may use the \"Load Alternate...\" option to\n"
+"restore your previously saved settings from \".config\" without\n"
+"restarting nconfig.\n"
+"\n"
+"Other information\n"
+"-----------------\n"
+"If you use nconfig in an XTERM window make sure you have your\n"
+"$TERM variable set to point to a xterm definition which supports color.\n"
+"Otherwise, nconfig will look rather bad. nconfig will not\n"
+"display correctly in a RXVT window because rxvt displays only one\n"
+"intensity of color, bright.\n"
+"\n"
+"nconfig will display larger menus on screens or xterms which are\n"
+"set to display more than the standard 25 row by 80 column geometry.\n"
+"In order for this to work, the \"stty size\" command must be able to\n"
+"display the screen's current row and column geometry. I STRONGLY\n"
+"RECOMMEND that you make sure you do NOT have the shell variables\n"
+"LINES and COLUMNS exported into your environment. Some distributions\n"
+"export those variables via /etc/profile. Some ncurses programs can\n"
+"become confused when those variables (LINES & COLUMNS) don't reflect\n"
+"the true screen size.\n"
+"\n"
+"Optional personality available\n"
+"------------------------------\n"
+"If you prefer to have all of the kernel options listed in a single\n"
+"menu, rather than the default multimenu hierarchy, run the nconfig\n"
+"with NCONFIG_MODE environment variable set to single_menu. Example:\n"
+"\n"
+"make NCONFIG_MODE=single_menu nconfig\n"
+"\n"
+"<Enter> will then unroll the appropriate category, or enfold it if it\n"
+"is already unrolled.\n"
+"\n"
+"Note that this mode can eventually be a little more CPU expensive\n"
+"(especially with a larger number of unrolled categories) than the\n"
+"default mode.\n"
+"\n"),
+menu_no_f_instructions[] = N_(
+" You do not have function keys support. Please follow the\n"
+" following instructions:\n"
+" Arrow keys navigate the menu.\n"
+" <Enter> or <right-arrow> selects submenus --->.\n"
+" Capital Letters are hotkeys.\n"
+" Pressing <Y> includes, <N> excludes, <M> modularizes features.\n"
+" Pressing SpaceBar toggles between the above options\n"
+" Press <Esc> or <left-arrow> to go back one menu, \n"
+" <?> or <h> for Help, </> for Search.\n"
+" <1> is interchangable with <F1>, <2> with <F2>, etc.\n"
+" Legend: [*] built-in [ ] excluded <M> module < > module capable.\n"
+" <Esc> always leaves the current window\n"),
+menu_instructions[] = N_(
+" Arrow keys navigate the menu.\n"
+" <Enter> or <right-arrow> selects submenus --->.\n"
+" Capital Letters are hotkeys.\n"
+" Pressing <Y> includes, <N> excludes, <M> modularizes features.\n"
+" Pressing SpaceBar toggles between the above options\n"
+" Press <Esc>, <F3> or <left-arrow> to go back one menu, \n"
+" <?>, <F1> or <h> for Help, </> for Search.\n"
+" <1> is interchangable with <F1>, <2> with <F2>, etc.\n"
+" Legend: [*] built-in [ ] excluded <M> module < > module capable.\n"
+" <Esc> always leaves the current window\n"),
+radiolist_instructions[] = N_(
+" Use the arrow keys to navigate this window or\n"
+" press the hotkey of the item you wish to select\n"
+" followed by the <SPACE BAR>.\n"
+" Press <?>, <F1> or <h> for additional information about this option.\n"),
+inputbox_instructions_int[] = N_(
+"Please enter a decimal value.\n"
+"Fractions will not be accepted.\n"
+"Press <RETURN> to accept, <ESC> to cancel."),
+inputbox_instructions_hex[] = N_(
+"Please enter a hexadecimal value.\n"
+"Press <RETURN> to accept, <ESC> to cancel."),
+inputbox_instructions_string[] = N_(
+"Please enter a string value.\n"
+"Press <RETURN> to accept, <ESC> to cancel."),
+setmod_text[] = N_(
+"This feature depends on another which\n"
+"has been configured as a module.\n"
+"As a result, this feature will be built as a module."),
+nohelp_text[] = N_(
+"There is no help available for this kernel option.\n"),
+load_config_text[] = N_(
+"Enter the name of the configuration file you wish to load.\n"
+"Accept the name shown to restore the configuration you\n"
+"last retrieved. Leave blank to abort."),
+load_config_help[] = N_(
+"\n"
+"For various reasons, one may wish to keep several different kernel\n"
+"configurations available on a single machine.\n"
+"\n"
+"If you have saved a previous configuration in a file other than the\n"
+"kernel's default, entering the name of the file here will allow you\n"
+"to modify that configuration.\n"
+"\n"
+"If you are uncertain, then you have probably never used alternate\n"
+"configuration files. You should therefor leave this blank to abort.\n"),
+save_config_text[] = N_(
+"Enter a filename to which this configuration should be saved\n"
+"as an alternate. Leave blank to abort."),
+save_config_help[] = N_(
+"\n"
+"For various reasons, one may wish to keep different kernel\n"
+"configurations available on a single machine.\n"
+"\n"
+"Entering a file name here will allow you to later retrieve, modify\n"
+"and use the current configuration as an alternate to whatever\n"
+"configuration options you have selected at that time.\n"
+"\n"
+"If you are uncertain what all this means then you should probably\n"
+"leave this blank.\n"),
+search_help[] = N_(
+"\n"
+"Search for CONFIG_ symbols and display their relations.\n"
+"Regular expressions are allowed.\n"
+"Example: search for \"^FOO\"\n"
+"Result:\n"
+"-----------------------------------------------------------------\n"
+"Symbol: FOO [ = m]\n"
+"Prompt: Foo bus is used to drive the bar HW\n"
+"Defined at drivers/pci/Kconfig:47\n"
+"Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
+"Location:\n"
+" -> Bus options (PCI, PCMCIA, EISA, MCA, ISA)\n"
+" -> PCI support (PCI [ = y])\n"
+" -> PCI access mode (<choice> [ = y])\n"
+"Selects: LIBCRC32\n"
+"Selected by: BAR\n"
+"-----------------------------------------------------------------\n"
+"o The line 'Prompt:' shows the text used in the menu structure for\n"
+" this CONFIG_ symbol\n"
+"o The 'Defined at' line tell at what file / line number the symbol\n"
+" is defined\n"
+"o The 'Depends on:' line tell what symbols needs to be defined for\n"
+" this symbol to be visible in the menu (selectable)\n"
+"o The 'Location:' lines tell where in the menu structure this symbol\n"
+" is located\n"
+" A location followed by a [ = y] indicate that this is a selectable\n"
+" menu item - and current value is displayed inside brackets.\n"
+"o The 'Selects:' line tell what symbol will be automatically\n"
+" selected if this symbol is selected (y or m)\n"
+"o The 'Selected by' line tell what symbol has selected this symbol\n"
+"\n"
+"Only relevant lines are shown.\n"
+"\n\n"
+"Search examples:\n"
+"Examples: USB = > find all CONFIG_ symbols containing USB\n"
+" ^USB => find all CONFIG_ symbols starting with USB\n"
+" USB$ => find all CONFIG_ symbols ending with USB\n"
+"\n");
+
+struct mitem {
+ char str[256];
+ char tag;
+ void *usrptr;
+ int is_hot;
+ int is_visible;
+};
+
+#define MAX_MENU_ITEMS 4096
+static int show_all_items;
+static int indent;
+static struct menu *current_menu;
+static int child_count;
+static int single_menu_mode;
+/* the window in which all information appears */
+static WINDOW *main_window;
+/* the largest size of the menu window */
+static int mwin_max_lines;
+static int mwin_max_cols;
+/* the window in which we show option buttons */
+static MENU *curses_menu;
+static ITEM *curses_menu_items[MAX_MENU_ITEMS];
+static struct mitem k_menu_items[MAX_MENU_ITEMS];
+static int items_num;
+static int global_exit;
+/* the currently selected button */
+const char *current_instructions = menu_instructions;
+/* this array is used to implement hot keys. it is updated in item_make and
+ * resetted in clean_items. It would be better to use a hash, but lets keep it
+ * simple... */
+#define MAX_SAME_KEY MAX_MENU_ITEMS
+struct {
+ int count;
+ int ptrs[MAX_MENU_ITEMS];
+} hotkeys[1<<(sizeof(char)*8)];
+
+static void conf(struct menu *menu);
+static void conf_choice(struct menu *menu);
+static void conf_string(struct menu *menu);
+static void conf_load(void);
+static void conf_save(void);
+static void show_help(struct menu *menu);
+static int do_exit(void);
+static void setup_windows(void);
+
+typedef void (*function_key_handler_t)(int *key, struct menu *menu);
+static void handle_f1(int *key, struct menu *current_item);
+static void handle_f2(int *key, struct menu *current_item);
+static void handle_f3(int *key, struct menu *current_item);
+static void handle_f4(int *key, struct menu *current_item);
+static void handle_f5(int *key, struct menu *current_item);
+static void handle_f6(int *key, struct menu *current_item);
+static void handle_f7(int *key, struct menu *current_item);
+static void handle_f8(int *key, struct menu *current_item);
+
+struct function_keys {
+ const char *key_str;
+ const char *func;
+ function_key key;
+ function_key_handler_t handler;
+};
+
+static const int function_keys_num = 8;
+struct function_keys function_keys[] = {
+ {
+ .key_str = "F1",
+ .func = "Help",
+ .key = F_HELP,
+ .handler = handle_f1,
+ },
+ {
+ .key_str = "F2",
+ .func = "Symbol Info",
+ .key = F_SYMBOL,
+ .handler = handle_f2,
+ },
+ {
+ .key_str = "F3",
+ .func = "Instructions",
+ .key = F_INSTS,
+ .handler = handle_f3,
+ },
+ {
+ .key_str = "F4",
+ .func = "Config",
+ .key = F_CONF,
+ .handler = handle_f4,
+ },
+ {
+ .key_str = "F5",
+ .func = "Back",
+ .key = F_BACK,
+ .handler = handle_f5,
+ },
+ {
+ .key_str = "F6",
+ .func = "Save",
+ .key = F_SAVE,
+ .handler = handle_f6,
+ },
+ {
+ .key_str = "F7",
+ .func = "Load",
+ .key = F_LOAD,
+ .handler = handle_f7,
+ },
+ {
+ .key_str = "F8",
+ .func = "Exit",
+ .key = F_EXIT,
+ .handler = handle_f8,
+ },
+};
+
+static void print_function_line(void)
+{
+ int i;
+ int offset = 1;
+ const int skip = 1;
+
+ for (i = 0; i < function_keys_num; i++) {
+ wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]);
+ mvwprintw(main_window, LINES-3, offset,
+ "%s",
+ function_keys[i].key_str);
+ wattrset(main_window, attributes[FUNCTION_TEXT]);
+ offset += strlen(function_keys[i].key_str);
+ mvwprintw(main_window, LINES-3,
+ offset, "%s",
+ function_keys[i].func);
+ offset += strlen(function_keys[i].func) + skip;
+ }
+ wattrset(main_window, attributes[NORMAL]);
+}
+
+/* help */
+static void handle_f1(int *key, struct menu *current_item)
+{
+ show_scroll_win(main_window,
+ _("README"), _(nconf_readme));
+ return;
+}
+
+/* symbole help */
+static void handle_f2(int *key, struct menu *current_item)
+{
+ show_help(current_item);
+ return;
+}
+
+/* instructions */
+static void handle_f3(int *key, struct menu *current_item)
+{
+ show_scroll_win(main_window,
+ _("Instructions"),
+ _(current_instructions));
+ return;
+}
+
+/* config */
+static void handle_f4(int *key, struct menu *current_item)
+{
+ int res = btn_dialog(main_window,
+ _("Show all symbols?"),
+ 2,
+ " <Show All> ",
+ "<Don't show all>");
+ if (res == 0)
+ show_all_items = 1;
+ else if (res == 1)
+ show_all_items = 0;
+
+ return;
+}
+
+/* back */
+static void handle_f5(int *key, struct menu *current_item)
+{
+ *key = KEY_LEFT;
+ return;
+}
+
+/* save */
+static void handle_f6(int *key, struct menu *current_item)
+{
+ conf_save();
+ return;
+}
+
+/* load */
+static void handle_f7(int *key, struct menu *current_item)
+{
+ conf_load();
+ return;
+}
+
+/* exit */
+static void handle_f8(int *key, struct menu *current_item)
+{
+ do_exit();
+ return;
+}
+
+/* return != 0 to indicate the key was handles */
+static int process_special_keys(int *key, struct menu *menu)
+{
+ int i;
+
+ if (*key == KEY_RESIZE) {
+ setup_windows();
+ return 1;
+ }
+
+ for (i = 0; i < function_keys_num; i++) {
+ if (*key == KEY_F(function_keys[i].key) ||
+ *key == '0' + function_keys[i].key){
+ function_keys[i].handler(key, menu);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void clean_items(void)
+{
+ int i;
+ for (i = 0; curses_menu_items[i]; i++)
+ free_item(curses_menu_items[i]);
+ bzero(curses_menu_items, sizeof(curses_menu_items));
+ bzero(k_menu_items, sizeof(k_menu_items));
+ bzero(hotkeys, sizeof(hotkeys));
+ items_num = 0;
+}
+
+/* return the index of the next hot item, or -1 if no such item exists */
+static int get_next_hot(int c)
+{
+ static int hot_index;
+ static int hot_char;
+
+ if (c < 0 || c > 255 || hotkeys[c].count <= 0)
+ return -1;
+
+ if (hot_char == c) {
+ hot_index = (hot_index+1)%hotkeys[c].count;
+ return hotkeys[c].ptrs[hot_index];
+ } else {
+ hot_char = c;
+ hot_index = 0;
+ return hotkeys[c].ptrs[0];
+ }
+}
+
+/* can the char c be a hot key? no, if c is a common shortcut used elsewhere */
+static int canbhot(char c)
+{
+ c = tolower(c);
+ return isalnum(c) && c != 'y' && c != 'm' && c != 'h' &&
+ c != 'n' && c != '?';
+}
+
+/* check if str already contains a hot key. */
+static int is_hot(int index)
+{
+ return k_menu_items[index].is_hot;
+}
+
+/* find the first possible hot key, and mark it.
+ * index is the index of the item in the menu
+ * return 0 on success*/
+static int make_hot(char *dest, int len, const char *org, int index)
+{
+ int position = -1;
+ int i;
+ int tmp;
+ int c;
+ int org_len = strlen(org);
+
+ if (org == NULL || is_hot(index))
+ return 1;
+
+ /* make sure not to make hot keys out of markers.
+ * find where to start looking for a hot key
+ */
+ i = 0;
+ /* skip white space */
+ while (i < org_len && org[i] == ' ')
+ i++;
+ if (i == org_len)
+ return -1;
+ /* if encountering '(' or '<' or '[', find the match and look from there
+ **/
+ if (org[i] == '[' || org[i] == '<' || org[i] == '(') {
+ i++;
+ for (; i < org_len; i++)
+ if (org[i] == ']' || org[i] == '>' || org[i] == ')')
+ break;
+ }
+ if (i == org_len)
+ return -1;
+ for (; i < org_len; i++) {
+ if (canbhot(org[i]) && org[i-1] != '<' && org[i-1] != '(') {
+ position = i;
+ break;
+ }
+ }
+ if (position == -1)
+ return 1;
+
+ /* ok, char at org[position] should be a hot key to this item */
+ c = tolower(org[position]);
+ tmp = hotkeys[c].count;
+ hotkeys[c].ptrs[tmp] = index;
+ hotkeys[c].count++;
+ /*
+ snprintf(dest, len, "%.*s(%c)%s", position, org, org[position],
+ &org[position+1]);
+ */
+ /* make org[position] uppercase, and all leading letter small case */
+ strncpy(dest, org, len);
+ for (i = 0; i < position; i++)
+ dest[i] = tolower(dest[i]);
+ dest[position] = toupper(dest[position]);
+ k_menu_items[index].is_hot = 1;
+ return 0;
+}
+
+/* Make a new item. Add a hotkey mark in the first possible letter.
+ * As ncurses does not allow any attributes inside menue item, we mark the
+ * hot key as the first capitalized letter in the string */
+static void item_make(struct menu *menu, char tag, const char *fmt, ...)
+{
+ va_list ap;
+ char tmp_str[256];
+
+ if (items_num > MAX_MENU_ITEMS-1)
+ return;
+
+ bzero(&k_menu_items[items_num], sizeof(k_menu_items[0]));
+ k_menu_items[items_num].tag = tag;
+ k_menu_items[items_num].usrptr = menu;
+ if (menu != NULL)
+ k_menu_items[items_num].is_visible =
+ menu_is_visible(menu);
+ else
+ k_menu_items[items_num].is_visible = 1;
+
+ va_start(ap, fmt);
+ vsnprintf(tmp_str, sizeof(tmp_str), fmt, ap);
+ if (!k_menu_items[items_num].is_visible)
+ memcpy(tmp_str, "XXX", 3);
+ va_end(ap);
+ if (make_hot(
+ k_menu_items[items_num].str,
+ sizeof(k_menu_items[items_num].str), tmp_str, items_num) != 0)
+ strncpy(k_menu_items[items_num].str,
+ tmp_str,
+ sizeof(k_menu_items[items_num].str));
+
+ curses_menu_items[items_num] = new_item(
+ k_menu_items[items_num].str,
+ k_menu_items[items_num].str);
+ set_item_userptr(curses_menu_items[items_num],
+ &k_menu_items[items_num]);
+ /*
+ if (!k_menu_items[items_num].is_visible)
+ item_opts_off(curses_menu_items[items_num], O_SELECTABLE);
+ */
+
+ items_num++;
+ curses_menu_items[items_num] = NULL;
+}
+
+/* very hackish. adds a string to the last item added */
+static void item_add_str(const char *fmt, ...)
+{
+ va_list ap;
+ int index = items_num-1;
+ char new_str[256];
+ char tmp_str[256];
+
+ if (index < 0)
+ return;
+
+ va_start(ap, fmt);
+ vsnprintf(new_str, sizeof(new_str), fmt, ap);
+ va_end(ap);
+ snprintf(tmp_str, sizeof(tmp_str), "%s%s",
+ k_menu_items[index].str, new_str);
+ if (make_hot(k_menu_items[index].str,
+ sizeof(k_menu_items[index].str), tmp_str, index) != 0)
+ strncpy(k_menu_items[index].str,
+ tmp_str,
+ sizeof(k_menu_items[index].str));
+
+ free_item(curses_menu_items[index]);
+ curses_menu_items[index] = new_item(
+ k_menu_items[index].str,
+ k_menu_items[index].str);
+ set_item_userptr(curses_menu_items[index],
+ &k_menu_items[index]);
+}
+
+/* get the tag of the currently selected item */
+static char item_tag(void)
+{
+ ITEM *cur;
+ struct mitem *mcur;
+
+ cur = current_item(curses_menu);
+ if (cur == NULL)
+ return 0;
+ mcur = (struct mitem *) item_userptr(cur);
+ return mcur->tag;
+}
+
+static int curses_item_index(void)
+{
+ return item_index(current_item(curses_menu));
+}
+
+static void *item_data(void)
+{
+ ITEM *cur;
+ struct mitem *mcur;
+
+ cur = current_item(curses_menu);
+ mcur = (struct mitem *) item_userptr(cur);
+ return mcur->usrptr;
+
+}
+
+static int item_is_tag(char tag)
+{
+ return item_tag() == tag;
+}
+
+static char filename[PATH_MAX+1];
+static char menu_backtitle[PATH_MAX+128];
+static const char *set_config_filename(const char *config_filename)
+{
+ int size;
+ struct symbol *sym;
+
+ sym = sym_lookup("KERNELVERSION", 0);
+ sym_calc_value(sym);
+ size = snprintf(menu_backtitle, sizeof(menu_backtitle),
+ _("%s - Linux Kernel v%s Configuration"),
+ config_filename, sym_get_string_value(sym));
+ if (size >= sizeof(menu_backtitle))
+ menu_backtitle[sizeof(menu_backtitle)-1] = '\0';
+
+ size = snprintf(filename, sizeof(filename), "%s", config_filename);
+ if (size >= sizeof(filename))
+ filename[sizeof(filename)-1] = '\0';
+ return menu_backtitle;
+}
+
+/* command = 0 is supress, 1 is restore */
+static void supress_stdout(int command)
+{
+ static FILE *org_stdout;
+ static FILE *org_stderr;
+
+ if (command == 0) {
+ org_stdout = stdout;
+ org_stderr = stderr;
+ stdout = fopen("/dev/null", "a");
+ stderr = fopen("/dev/null", "a");
+ } else {
+ fclose(stdout);
+ fclose(stderr);
+ stdout = org_stdout;
+ stderr = org_stderr;
+ }
+}
+
+/* return = 0 means we are successful.
+ * -1 means go on doing what you were doing
+ */
+static int do_exit(void)
+{
+ int res;
+ if (!conf_get_changed()) {
+ global_exit = 1;
+ return 0;
+ }
+ res = btn_dialog(main_window,
+ _("Do you wish to save your "
+ "new kernel configuration?\n"
+ "<ESC> to cancel and resume nconfig."),
+ 2,
+ " <save> ",
+ "<don't save>");
+ if (res == KEY_EXIT) {
+ global_exit = 0;
+ return -1;
+ }
+
+ /* if we got here, the user really wants to exit */
+ switch (res) {
+ case 0:
+ supress_stdout(0);
+ res = conf_write(filename);
+ supress_stdout(1);
+ if (res)
+ btn_dialog(
+ main_window,
+ _("Error during writing of the kernel "
+ "configuration.\n"
+ "Your kernel configuration "
+ "changes were NOT saved."),
+ 1,
+ "<OK>");
+ else {
+ char buf[1024];
+ snprintf(buf, 1024,
+ _("Configuration written to %s\n"
+ "End of Linux kernel configuration.\n"
+ "Execute 'make' to build the kernel or try"
+ " 'make help'."), filename);
+ btn_dialog(
+ main_window,
+ buf,
+ 1,
+ "<OK>");
+ }
+ break;
+ default:
+ btn_dialog(
+ main_window,
+ _("Your kernel configuration changes were NOT saved."),
+ 1,
+ "<OK>");
+ break;
+ }
+ global_exit = 1;
+ return 0;
+}
+
+
+static void search_conf(void)
+{
+ struct symbol **sym_arr;
+ struct gstr res;
+ char dialog_input_result[100];
+ char *dialog_input;
+ int dres;
+again:
+ dres = dialog_inputbox(main_window,
+ _("Search Configuration Parameter"),
+ _("Enter CONFIG_ (sub)string to search for "
+ "(with or without \"CONFIG\")"),
+ "", dialog_input_result, 99);
+ switch (dres) {
+ case 0:
+ break;
+ case 1:
+ show_scroll_win(main_window,
+ _("Search Configuration"), search_help);
+ goto again;
+ default:
+ return;
+ }
+
+ /* strip CONFIG_ if necessary */
+ dialog_input = dialog_input_result;
+ if (strncasecmp(dialog_input_result, "CONFIG_", 7) == 0)
+ dialog_input += 7;
+
+ sym_arr = sym_re_search(dialog_input);
+ res = get_relations_str(sym_arr);
+ free(sym_arr);
+ show_scroll_win(main_window,
+ _("Search Results"), str_get(&res));
+ str_free(&res);
+}
+
+
+static void build_conf(struct menu *menu)
+{
+ struct symbol *sym;
+ struct property *prop;
+ struct menu *child;
+ int type, tmp, doint = 2;
+ tristate val;
+ char ch;
+
+ if (!menu || (!show_all_items && !menu_is_visible(menu)))
+ return;
+
+ sym = menu->sym;
+ prop = menu->prompt;
+ if (!sym) {
+ if (prop && menu != current_menu) {
+ const char *prompt = menu_get_prompt(menu);
+ enum prop_type ptype;
+ ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN;
+ switch (ptype) {
+ case P_MENU:
+ child_count++;
+ prompt = _(prompt);
+ if (single_menu_mode) {
+ item_make(menu, 'm',
+ "%s%*c%s",
+ menu->data ? "-->" : "++>",
+ indent + 1, ' ', prompt);
+ } else
+ item_make(menu, 'm',
+ " %*c%s --->",
+ indent + 1,
+ ' ', prompt);
+
+ if (single_menu_mode && menu->data)
+ goto conf_childs;
+ return;
+ case P_COMMENT:
+ if (prompt) {
+ child_count++;
+ item_make(menu, ':',
+ " %*c*** %s ***",
+ indent + 1, ' ',
+ _(prompt));
+ }
+ break;
+ default:
+ if (prompt) {
+ child_count++;
+ item_make(menu, ':', "---%*c%s",
+ indent + 1, ' ',
+ _(prompt));
+ }
+ }
+ } else
+ doint = 0;
+ goto conf_childs;
+ }
+
+ type = sym_get_type(sym);
+ if (sym_is_choice(sym)) {
+ struct symbol *def_sym = sym_get_choice_value(sym);
+ struct menu *def_menu = NULL;
+
+ child_count++;
+ for (child = menu->list; child; child = child->next) {
+ if (menu_is_visible(child) && child->sym == def_sym)
+ def_menu = child;
+ }
+
+ val = sym_get_tristate_value(sym);
+ if (sym_is_changable(sym)) {
+ switch (type) {
+ case S_BOOLEAN:
+ item_make(menu, 't', "[%c]",
+ val == no ? ' ' : '*');
+ break;
+ case S_TRISTATE:
+ switch (val) {
+ case yes:
+ ch = '*';
+ break;
+ case mod:
+ ch = 'M';
+ break;
+ default:
+ ch = ' ';
+ break;
+ }
+ item_make(menu, 't', "<%c>", ch);
+ break;
+ }
+ } else {
+ item_make(menu, def_menu ? 't' : ':', " ");
+ }
+
+ item_add_str("%*c%s", indent + 1,
+ ' ', _(menu_get_prompt(menu)));
+ if (val == yes) {
+ if (def_menu) {
+ item_add_str(" (%s)",
+ _(menu_get_prompt(def_menu)));
+ item_add_str(" --->");
+ if (def_menu->list) {
+ indent += 2;
+ build_conf(def_menu);
+ indent -= 2;
+ }
+ }
+ return;
+ }
+ } else {
+ if (menu == current_menu) {
+ item_make(menu, ':',
+ "---%*c%s", indent + 1,
+ ' ', _(menu_get_prompt(menu)));
+ goto conf_childs;
+ }
+ child_count++;
+ val = sym_get_tristate_value(sym);
+ if (sym_is_choice_value(sym) && val == yes) {
+ item_make(menu, ':', " ");
+ } else {
+ switch (type) {
+ case S_BOOLEAN:
+ if (sym_is_changable(sym))
+ item_make(menu, 't', "[%c]",
+ val == no ? ' ' : '*');
+ else
+ item_make(menu, 't', "-%c-",
+ val == no ? ' ' : '*');
+ break;
+ case S_TRISTATE:
+ switch (val) {
+ case yes:
+ ch = '*';
+ break;
+ case mod:
+ ch = 'M';
+ break;
+ default:
+ ch = ' ';
+ break;
+ }
+ if (sym_is_changable(sym)) {
+ if (sym->rev_dep.tri == mod)
+ item_make(menu,
+ 't', "{%c}", ch);
+ else
+ item_make(menu,
+ 't', "<%c>", ch);
+ } else
+ item_make(menu, 't', "-%c-", ch);
+ break;
+ default:
+ tmp = 2 + strlen(sym_get_string_value(sym));
+ item_make(menu, 's', " (%s)",
+ sym_get_string_value(sym));
+ tmp = indent - tmp + 4;
+ if (tmp < 0)
+ tmp = 0;
+ item_add_str("%*c%s%s", tmp, ' ',
+ _(menu_get_prompt(menu)),
+ (sym_has_value(sym) ||
+ !sym_is_changable(sym)) ? "" :
+ _(" (NEW)"));
+ goto conf_childs;
+ }
+ }
+ item_add_str("%*c%s%s", indent + 1, ' ',
+ _(menu_get_prompt(menu)),
+ (sym_has_value(sym) || !sym_is_changable(sym)) ?
+ "" : _(" (NEW)"));
+ if (menu->prompt && menu->prompt->type == P_MENU) {
+ item_add_str(" --->");
+ return;
+ }
+ }
+
+conf_childs:
+ indent += doint;
+ for (child = menu->list; child; child = child->next)
+ build_conf(child);
+ indent -= doint;
+}
+
+static void reset_menu(void)
+{
+ unpost_menu(curses_menu);
+ clean_items();
+}
+
+/* adjust the menu to show this item.
+ * prefer not to scroll the menu if possible*/
+static void center_item(int selected_index, int *last_top_row)
+{
+ int toprow;
+ int maxy, maxx;
+
+ scale_menu(curses_menu, &maxy, &maxx);
+ set_top_row(curses_menu, *last_top_row);
+ toprow = top_row(curses_menu);
+ if (selected_index >= toprow && selected_index < toprow+maxy) {
+ /* we can only move the selected item. no need to scroll */
+ set_current_item(curses_menu,
+ curses_menu_items[selected_index]);
+ } else {
+ toprow = max(selected_index-maxy/2, 0);
+ if (toprow >= item_count(curses_menu)-maxy)
+ toprow = item_count(curses_menu)-mwin_max_lines;
+ set_top_row(curses_menu, toprow);
+ set_current_item(curses_menu,
+ curses_menu_items[selected_index]);
+ }
+ *last_top_row = toprow;
+ post_menu(curses_menu);
+ refresh_all_windows(main_window);
+}
+
+/* this function assumes reset_menu has been called before */
+static void show_menu(const char *prompt, const char *instructions,
+ int selected_index, int *last_top_row)
+{
+ int maxx, maxy;
+ WINDOW *menu_window;
+
+ current_instructions = instructions;
+
+ clear();
+ wattrset(main_window, attributes[NORMAL]);
+ print_in_middle(stdscr, 1, 0, COLS,
+ menu_backtitle,
+ attributes[MAIN_HEADING]);
+
+ wattrset(main_window, attributes[MAIN_MENU_BOX]);
+ box(main_window, 0, 0);
+ wattrset(main_window, attributes[MAIN_MENU_HEADING]);
+ mvwprintw(main_window, 0, 3, " %s ", prompt);
+ wattrset(main_window, attributes[NORMAL]);
+
+ set_menu_items(curses_menu, curses_menu_items);
+
+ /* position the menu at the middle of the screen */
+ scale_menu(curses_menu, &maxy, &maxx);
+ maxx = min(maxx, mwin_max_cols-2);
+ maxy = mwin_max_lines-2;
+ menu_window = derwin(main_window,
+ maxy,
+ maxx,
+ 2,
+ (mwin_max_cols-maxx)/2);
+ keypad(menu_window, TRUE);
+ set_menu_win(curses_menu, menu_window);
+ set_menu_sub(curses_menu, menu_window);
+
+ /* must reassert this after changing items, otherwise returns to a
+ * default of 16
+ */
+ set_menu_format(curses_menu, maxy, 1);
+ center_item(selected_index, last_top_row);
+ set_menu_format(curses_menu, maxy, 1);
+
+ print_function_line();
+
+ /* Post the menu */
+ post_menu(curses_menu);
+ refresh_all_windows(main_window);
+}
+
+
+static void conf(struct menu *menu)
+{
+ char pattern[256];
+ struct menu *submenu = 0;
+ const char *prompt = menu_get_prompt(menu);
+ struct symbol *sym;
+ struct menu *active_menu = NULL;
+ int res;
+ int current_index = 0;
+ int last_top_row = 0;
+
+ bzero(pattern, sizeof(pattern));
+
+ while (!global_exit) {
+ reset_menu();
+ current_menu = menu;
+ build_conf(menu);
+ if (!child_count)
+ break;
+
+ show_menu(prompt ? _(prompt) : _("Main Menu"),
+ _(menu_instructions),
+ current_index, &last_top_row);
+ keypad((menu_win(curses_menu)), TRUE);
+ while (!global_exit && (res = wgetch(menu_win(curses_menu)))) {
+ if (process_special_keys(&res,
+ (struct menu *) item_data()))
+ break;
+ switch (res) {
+ case KEY_DOWN:
+ menu_driver(curses_menu, REQ_DOWN_ITEM);
+ break;
+ case KEY_UP:
+ menu_driver(curses_menu, REQ_UP_ITEM);
+ break;
+ case KEY_NPAGE:
+ menu_driver(curses_menu, REQ_SCR_DPAGE);
+ break;
+ case KEY_PPAGE:
+ menu_driver(curses_menu, REQ_SCR_UPAGE);
+ break;
+ case KEY_HOME:
+ menu_driver(curses_menu, REQ_FIRST_ITEM);
+ break;
+ case KEY_END:
+ menu_driver(curses_menu, REQ_LAST_ITEM);
+ break;
+ case 'h':
+ case '?':
+ show_help((struct menu *) item_data());
+ break;
+ }
+ if (res == 10 || res == 27 ||
+ res == 32 || res == 'n' || res == 'y' ||
+ res == KEY_LEFT || res == KEY_RIGHT ||
+ res == 'm' || res == '/')
+ break;
+ else if (canbhot(res)) {
+ /* check for hot keys: */
+ int tmp = get_next_hot(res);
+ if (tmp != -1)
+ center_item(tmp, &last_top_row);
+ }
+ refresh_all_windows(main_window);
+ }
+
+ refresh_all_windows(main_window);
+ /* if ESC or left*/
+ if (res == 27 || (menu != &rootmenu && res == KEY_LEFT))
+ break;
+
+ /* remember location in the menu */
+ last_top_row = top_row(curses_menu);
+ current_index = curses_item_index();
+
+ if (!item_tag())
+ continue;
+
+ submenu = (struct menu *) item_data();
+ active_menu = (struct menu *)item_data();
+ if (!submenu || !menu_is_visible(submenu))
+ continue;
+ if (submenu)
+ sym = submenu->sym;
+ else
+ sym = NULL;
+
+ switch (res) {
+ case ' ':
+ if (item_is_tag('t'))
+ sym_toggle_tristate_value(sym);
+ else if (item_is_tag('m'))
+ conf(submenu);
+ break;
+ case KEY_RIGHT:
+ case 10: /* ENTER WAS PRESSED */
+ switch (item_tag()) {
+ case 'm':
+ if (single_menu_mode)
+ submenu->data =
+ (void *) (long) !submenu->data;
+ else
+ conf(submenu);
+ break;
+ case 't':
+ if (sym_is_choice(sym) &&
+ sym_get_tristate_value(sym) == yes)
+ conf_choice(submenu);
+ else if (submenu->prompt &&
+ submenu->prompt->type == P_MENU)
+ conf(submenu);
+ else if (res == 10)
+ sym_toggle_tristate_value(sym);
+ break;
+ case 's':
+ conf_string(submenu);
+ break;
+ }
+ break;
+ case 'y':
+ if (item_is_tag('t')) {
+ if (sym_set_tristate_value(sym, yes))
+ break;
+ if (sym_set_tristate_value(sym, mod))
+ btn_dialog(main_window, setmod_text, 0);
+ }
+ break;
+ case 'n':
+ if (item_is_tag('t'))
+ sym_set_tristate_value(sym, no);
+ break;
+ case 'm':
+ if (item_is_tag('t'))
+ sym_set_tristate_value(sym, mod);
+ break;
+ case '/':
+ search_conf();
+ break;
+ }
+ }
+}
+
+static void show_help(struct menu *menu)
+{
+ struct gstr help = str_new();
+
+ if (menu && menu->sym && menu_has_help(menu)) {
+ if (menu->sym->name) {
+ str_printf(&help, "CONFIG_%s:\n\n", menu->sym->name);
+ str_append(&help, _(menu_get_help(menu)));
+ str_append(&help, "\n");
+ get_symbol_str(&help, menu->sym);
+ }
+ } else {
+ str_append(&help, nohelp_text);
+ }
+ show_scroll_win(main_window, _(menu_get_prompt(menu)), str_get(&help));
+ str_free(&help);
+}
+
+static void conf_choice(struct menu *menu)
+{
+ const char *prompt = _(menu_get_prompt(menu));
+ struct menu *child = 0;
+ struct symbol *active;
+ int selected_index = 0;
+ int last_top_row = 0;
+ int res, i = 0;
+
+ active = sym_get_choice_value(menu->sym);
+ /* this is mostly duplicated from the conf() function. */
+ while (!global_exit) {
+ reset_menu();
+
+ for (i = 0, child = menu->list; child; child = child->next) {
+ if (!show_all_items && !menu_is_visible(child))
+ continue;
+
+ if (child->sym == sym_get_choice_value(menu->sym))
+ item_make(child, ':', "<X> %s",
+ _(menu_get_prompt(child)));
+ else
+ item_make(child, ':', " %s",
+ _(menu_get_prompt(child)));
+ if (child->sym == active){
+ last_top_row = top_row(curses_menu);
+ selected_index = i;
+ }
+ i++;
+ }
+ show_menu(prompt ? _(prompt) : _("Choice Menu"),
+ _(radiolist_instructions),
+ selected_index,
+ &last_top_row);
+ while (!global_exit && (res = wgetch(menu_win(curses_menu)))) {
+ if (process_special_keys(
+ &res,
+ (struct menu *) item_data()))
+ break;
+ switch (res) {
+ case KEY_DOWN:
+ menu_driver(curses_menu, REQ_DOWN_ITEM);
+ break;
+ case KEY_UP:
+ menu_driver(curses_menu, REQ_UP_ITEM);
+ break;
+ case KEY_NPAGE:
+ menu_driver(curses_menu, REQ_SCR_DPAGE);
+ break;
+ case KEY_PPAGE:
+ menu_driver(curses_menu, REQ_SCR_UPAGE);
+ break;
+ case KEY_HOME:
+ menu_driver(curses_menu, REQ_FIRST_ITEM);
+ break;
+ case KEY_END:
+ menu_driver(curses_menu, REQ_LAST_ITEM);
+ break;
+ case 'h':
+ case '?':
+ show_help((struct menu *) item_data());
+ break;
+ }
+ if (res == 10 || res == 27 || res == ' ' ||
+ res == KEY_LEFT)
+ break;
+ else if (canbhot(res)) {
+ /* check for hot keys: */
+ int tmp = get_next_hot(res);
+ if (tmp != -1)
+ center_item(tmp, &last_top_row);
+ }
+ refresh_all_windows(main_window);
+ }
+ /* if ESC or left */
+ if (res == 27 || res == KEY_LEFT)
+ break;
+
+ child = item_data();
+ if (!child || !menu_is_visible(child))
+ continue;
+ switch (res) {
+ case ' ':
+ case 10:
+ case KEY_RIGHT:
+ sym_set_tristate_value(child->sym, yes);
+ return;
+ case 'h':
+ case '?':
+ show_help(child);
+ active = child->sym;
+ break;
+ case KEY_EXIT:
+ return;
+ }
+ }
+}
+
+static void conf_string(struct menu *menu)
+{
+ const char *prompt = menu_get_prompt(menu);
+ char dialog_input_result[256];
+
+ while (1) {
+ int res;
+ const char *heading;
+
+ switch (sym_get_type(menu->sym)) {
+ case S_INT:
+ heading = _(inputbox_instructions_int);
+ break;
+ case S_HEX:
+ heading = _(inputbox_instructions_hex);
+ break;
+ case S_STRING:
+ heading = _(inputbox_instructions_string);
+ break;
+ default:
+ heading = _("Internal nconf error!");
+ }
+ res = dialog_inputbox(main_window,
+ prompt ? _(prompt) : _("Main Menu"),
+ heading,
+ sym_get_string_value(menu->sym),
+ dialog_input_result,
+ sizeof(dialog_input_result));
+ switch (res) {
+ case 0:
+ if (sym_set_string_value(menu->sym,
+ dialog_input_result))
+ return;
+ btn_dialog(main_window,
+ _("You have made an invalid entry."), 0);
+ break;
+ case 1:
+ show_help(menu);
+ break;
+ case KEY_EXIT:
+ return;
+ }
+ }
+}
+
+static void conf_load(void)
+{
+ char dialog_input_result[256];
+ while (1) {
+ int res;
+ res = dialog_inputbox(main_window,
+ NULL, load_config_text,
+ filename,
+ dialog_input_result,
+ sizeof(dialog_input_result));
+ switch (res) {
+ case 0:
+ if (!dialog_input_result[0])
+ return;
+ if (!conf_read(dialog_input_result)) {
+ set_config_filename(dialog_input_result);
+ sym_set_change_count(1);
+ return;
+ }
+ btn_dialog(main_window, _("File does not exist!"), 0);
+ break;
+ case 1:
+ show_scroll_win(main_window,
+ _("Load Alternate Configuration"),
+ load_config_help);
+ break;
+ case KEY_EXIT:
+ return;
+ }
+ }
+}
+
+static void conf_save(void)
+{
+ char dialog_input_result[256];
+ while (1) {
+ int res;
+ res = dialog_inputbox(main_window,
+ NULL, save_config_text,
+ filename,
+ dialog_input_result,
+ sizeof(dialog_input_result));
+ switch (res) {
+ case 0:
+ if (!dialog_input_result[0])
+ return;
+ supress_stdout(0);
+ res = conf_write(dialog_input_result);
+ supress_stdout(1);
+ if (!res) {
+ char buf[1024];
+ sprintf(buf, "%s %s",
+ _("configuration file saved to: "),
+ dialog_input_result);
+ btn_dialog(main_window,
+ buf, 1, "<OK>");
+ set_config_filename(dialog_input_result);
+ return;
+ }
+ btn_dialog(main_window, _("Can't create file! "
+ "Probably a nonexistent directory."),
+ 1, "<OK>");
+ break;
+ case 1:
+ show_scroll_win(main_window,
+ _("Save Alternate Configuration"),
+ save_config_help);
+ break;
+ case KEY_EXIT:
+ return;
+ }
+ }
+}
+
+void setup_windows(void)
+{
+ if (main_window != NULL)
+ delwin(main_window);
+
+ /* set up the menu and menu window */
+ main_window = newwin(LINES-2, COLS-2, 2, 1);
+ keypad(main_window, TRUE);
+ mwin_max_lines = LINES-6;
+ mwin_max_cols = COLS-6;
+
+ /* panels order is from bottom to top */
+ new_panel(main_window);
+}
+
+int main(int ac, char **av)
+{
+ char *mode;
+
+ setlocale(LC_ALL, "");
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+
+ conf_parse(av[1]);
+ conf_read(NULL);
+
+ mode = getenv("NCONFIG_MODE");
+ if (mode) {
+ if (!strcasecmp(mode, "single_menu"))
+ single_menu_mode = 1;
+ }
+
+ /* Initialize curses */
+ initscr();
+ /* set color theme */
+ set_colors();
+
+ cbreak();
+ noecho();
+ keypad(stdscr, TRUE);
+ curs_set(0);
+
+ if (COLS < 75 || LINES < 20) {
+ endwin();
+ printf("Your terminal should have at "
+ "least 20 lines and 75 columns\n");
+ return 1;
+ }
+
+ notimeout(stdscr, FALSE);
+ ESCDELAY = 1;
+
+ /* set btns menu */
+ curses_menu = new_menu(curses_menu_items);
+ menu_opts_off(curses_menu, O_SHOWDESC);
+ menu_opts_off(curses_menu, O_SHOWMATCH);
+ menu_opts_on(curses_menu, O_ONEVALUE);
+ menu_opts_on(curses_menu, O_NONCYCLIC);
+ set_menu_mark(curses_menu, " ");
+ set_menu_fore(curses_menu, attributes[MAIN_MENU_FORE]);
+ set_menu_back(curses_menu, attributes[MAIN_MENU_BACK]);
+ set_menu_grey(curses_menu, attributes[MAIN_MENU_GREY]);
+
+ set_config_filename(conf_get_configname());
+ setup_windows();
+
+ /* check for KEY_FUNC(1) */
+ if (has_key(KEY_F(1)) == FALSE) {
+ show_scroll_win(main_window,
+ _("Instructions"),
+ _(menu_no_f_instructions));
+ }
+
+
+
+ /* do the work */
+ while (!global_exit) {
+ conf(&rootmenu);
+ if (!global_exit && do_exit() == 0)
+ break;
+ }
+ /* ok, we are done */
+ unpost_menu(curses_menu);
+ free_menu(curses_menu);
+ delwin(main_window);
+ clear();
+ refresh();
+ endwin();
+ return 0;
+}
+
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
new file mode 100644
index 000000000000..115edb437fb1
--- /dev/null
+++ b/scripts/kconfig/nconf.gui.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright (C) 2008 Nir Tzachar <nir.tzachar@gmail.com?
+ * Released under the terms of the GNU GPL v2.0.
+ *
+ * Derived from menuconfig.
+ *
+ */
+#include "nconf.h"
+
+/* a list of all the different widgets we use */
+attributes_t attributes[ATTR_MAX+1] = {0};
+
+/* available colors:
+ COLOR_BLACK 0
+ COLOR_RED 1
+ COLOR_GREEN 2
+ COLOR_YELLOW 3
+ COLOR_BLUE 4
+ COLOR_MAGENTA 5
+ COLOR_CYAN 6
+ COLOR_WHITE 7
+ */
+static void set_normal_colors(void)
+{
+ init_pair(NORMAL, -1, -1);
+ init_pair(MAIN_HEADING, COLOR_MAGENTA, -1);
+
+ /* FORE is for the selected item */
+ init_pair(MAIN_MENU_FORE, -1, -1);
+ /* BACK for all the rest */
+ init_pair(MAIN_MENU_BACK, -1, -1);
+ init_pair(MAIN_MENU_GREY, -1, -1);
+ init_pair(MAIN_MENU_HEADING, COLOR_GREEN, -1);
+ init_pair(MAIN_MENU_BOX, COLOR_YELLOW, -1);
+
+ init_pair(SCROLLWIN_TEXT, -1, -1);
+ init_pair(SCROLLWIN_HEADING, COLOR_GREEN, -1);
+ init_pair(SCROLLWIN_BOX, COLOR_YELLOW, -1);
+
+ init_pair(DIALOG_TEXT, -1, -1);
+ init_pair(DIALOG_BOX, COLOR_YELLOW, -1);
+ init_pair(DIALOG_MENU_BACK, COLOR_YELLOW, -1);
+ init_pair(DIALOG_MENU_FORE, COLOR_RED, -1);
+
+ init_pair(INPUT_BOX, COLOR_YELLOW, -1);
+ init_pair(INPUT_HEADING, COLOR_GREEN, -1);
+ init_pair(INPUT_TEXT, -1, -1);
+ init_pair(INPUT_FIELD, -1, -1);
+
+ init_pair(FUNCTION_HIGHLIGHT, -1, -1);
+ init_pair(FUNCTION_TEXT, COLOR_BLUE, -1);
+}
+
+/* available attributes:
+ A_NORMAL Normal display (no highlight)
+ A_STANDOUT Best highlighting mode of the terminal.
+ A_UNDERLINE Underlining
+ A_REVERSE Reverse video
+ A_BLINK Blinking
+ A_DIM Half bright
+ A_BOLD Extra bright or bold
+ A_PROTECT Protected mode
+ A_INVIS Invisible or blank mode
+ A_ALTCHARSET Alternate character set
+ A_CHARTEXT Bit-mask to extract a character
+ COLOR_PAIR(n) Color-pair number n
+ */
+static void normal_color_theme(void)
+{
+ /* automatically add color... */
+#define mkattr(name, attr) do { \
+attributes[name] = attr | COLOR_PAIR(name); } while (0)
+ mkattr(NORMAL, NORMAL);
+ mkattr(MAIN_HEADING, A_BOLD | A_UNDERLINE);
+
+ mkattr(MAIN_MENU_FORE, A_REVERSE);
+ mkattr(MAIN_MENU_BACK, A_NORMAL);
+ mkattr(MAIN_MENU_GREY, A_NORMAL);
+ mkattr(MAIN_MENU_HEADING, A_BOLD);
+ mkattr(MAIN_MENU_BOX, A_NORMAL);
+
+ mkattr(SCROLLWIN_TEXT, A_NORMAL);
+ mkattr(SCROLLWIN_HEADING, A_BOLD);
+ mkattr(SCROLLWIN_BOX, A_BOLD);
+
+ mkattr(DIALOG_TEXT, A_BOLD);
+ mkattr(DIALOG_BOX, A_BOLD);
+ mkattr(DIALOG_MENU_FORE, A_STANDOUT);
+ mkattr(DIALOG_MENU_BACK, A_NORMAL);
+
+ mkattr(INPUT_BOX, A_NORMAL);
+ mkattr(INPUT_HEADING, A_BOLD);
+ mkattr(INPUT_TEXT, A_NORMAL);
+ mkattr(INPUT_FIELD, A_UNDERLINE);
+
+ mkattr(FUNCTION_HIGHLIGHT, A_BOLD);
+ mkattr(FUNCTION_TEXT, A_REVERSE);
+}
+
+static void no_colors_theme(void)
+{
+ /* automatically add highlight, no color */
+#define mkattrn(name, attr) { attributes[name] = attr; }
+
+ mkattrn(NORMAL, NORMAL);
+ mkattrn(MAIN_HEADING, A_BOLD | A_UNDERLINE);
+
+ mkattrn(MAIN_MENU_FORE, A_STANDOUT);
+ mkattrn(MAIN_MENU_BACK, A_NORMAL);
+ mkattrn(MAIN_MENU_GREY, A_NORMAL);
+ mkattrn(MAIN_MENU_HEADING, A_BOLD);
+ mkattrn(MAIN_MENU_BOX, A_NORMAL);
+
+ mkattrn(SCROLLWIN_TEXT, A_NORMAL);
+ mkattrn(SCROLLWIN_HEADING, A_BOLD);
+ mkattrn(SCROLLWIN_BOX, A_BOLD);
+
+ mkattrn(DIALOG_TEXT, A_NORMAL);
+ mkattrn(DIALOG_BOX, A_BOLD);
+ mkattrn(DIALOG_MENU_FORE, A_STANDOUT);
+ mkattrn(DIALOG_MENU_BACK, A_NORMAL);
+
+ mkattrn(INPUT_BOX, A_BOLD);
+ mkattrn(INPUT_HEADING, A_BOLD);
+ mkattrn(INPUT_TEXT, A_NORMAL);
+ mkattrn(INPUT_FIELD, A_UNDERLINE);
+
+ mkattrn(FUNCTION_HIGHLIGHT, A_BOLD);
+ mkattrn(FUNCTION_TEXT, A_REVERSE);
+}
+
+void set_colors()
+{
+ start_color();
+ use_default_colors();
+ set_normal_colors();
+ if (has_colors()) {
+ normal_color_theme();
+ } else {
+ /* give deafults */
+ no_colors_theme();
+ }
+}
+
+
+/* this changes the windows attributes !!! */
+void print_in_middle(WINDOW *win,
+ int starty,
+ int startx,
+ int width,
+ const char *string,
+ chtype color)
+{ int length, x, y;
+ float temp;
+
+
+ if (win == NULL)
+ win = stdscr;
+ getyx(win, y, x);
+ if (startx != 0)
+ x = startx;
+ if (starty != 0)
+ y = starty;
+ if (width == 0)
+ width = 80;
+
+ length = strlen(string);
+ temp = (width - length) / 2;
+ x = startx + (int)temp;
+ wattrset(win, color);
+ mvwprintw(win, y, x, "%s", string);
+ refresh();
+}
+
+int get_line_no(const char *text)
+{
+ int i;
+ int total = 1;
+
+ if (!text)
+ return 0;
+
+ for (i = 0; text[i] != '\0'; i++)
+ if (text[i] == '\n')
+ total++;
+ return total;
+}
+
+const char *get_line(const char *text, int line_no)
+{
+ int i;
+ int lines = 0;
+
+ if (!text)
+ return 0;
+
+ for (i = 0; text[i] != '\0' && lines < line_no; i++)
+ if (text[i] == '\n')
+ lines++;
+ return text+i;
+}
+
+int get_line_length(const char *line)
+{
+ int res = 0;
+ while (*line != '\0' && *line != '\n') {
+ line++;
+ res++;
+ }
+ return res;
+}
+
+/* print all lines to the window. */
+void fill_window(WINDOW *win, const char *text)
+{
+ int x, y;
+ int total_lines = get_line_no(text);
+ int i;
+
+ getmaxyx(win, y, x);
+ /* do not go over end of line */
+ total_lines = min(total_lines, y);
+ for (i = 0; i < total_lines; i++) {
+ char tmp[x+10];
+ const char *line = get_line(text, i);
+ int len = get_line_length(line);
+ strncpy(tmp, line, min(len, x));
+ tmp[len] = '\0';
+ mvwprintw(win, i, 0, tmp);
+ }
+}
+
+/* get the message, and buttons.
+ * each button must be a char*
+ * return the selected button
+ *
+ * this dialog is used for 2 different things:
+ * 1) show a text box, no buttons.
+ * 2) show a dialog, with horizontal buttons
+ */
+int btn_dialog(WINDOW *main_window, const char *msg, int btn_num, ...)
+{
+ va_list ap;
+ char *btn;
+ int btns_width = 0;
+ int msg_lines = 0;
+ int msg_width = 0;
+ int total_width;
+ int win_rows = 0;
+ WINDOW *win;
+ WINDOW *msg_win;
+ WINDOW *menu_win;
+ MENU *menu;
+ ITEM *btns[btn_num+1];
+ int i, x, y;
+ int res = -1;
+
+
+ va_start(ap, btn_num);
+ for (i = 0; i < btn_num; i++) {
+ btn = va_arg(ap, char *);
+ btns[i] = new_item(btn, "");
+ btns_width += strlen(btn)+1;
+ }
+ va_end(ap);
+ btns[btn_num] = NULL;
+
+ /* find the widest line of msg: */
+ msg_lines = get_line_no(msg);
+ for (i = 0; i < msg_lines; i++) {
+ const char *line = get_line(msg, i);
+ int len = get_line_length(line);
+ if (msg_width < len)
+ msg_width = len;
+ }
+
+ total_width = max(msg_width, btns_width);
+ /* place dialog in middle of screen */
+ y = (LINES-(msg_lines+4))/2;
+ x = (COLS-(total_width+4))/2;
+
+
+ /* create the windows */
+ if (btn_num > 0)
+ win_rows = msg_lines+4;
+ else
+ win_rows = msg_lines+2;
+
+ win = newwin(win_rows, total_width+4, y, x);
+ keypad(win, TRUE);
+ menu_win = derwin(win, 1, btns_width, win_rows-2,
+ 1+(total_width+2-btns_width)/2);
+ menu = new_menu(btns);
+ msg_win = derwin(win, win_rows-2, msg_width, 1,
+ 1+(total_width+2-msg_width)/2);
+
+ set_menu_fore(menu, attributes[DIALOG_MENU_FORE]);
+ set_menu_back(menu, attributes[DIALOG_MENU_BACK]);
+
+ wattrset(win, attributes[DIALOG_BOX]);
+ box(win, 0, 0);
+
+ /* print message */
+ wattrset(msg_win, attributes[DIALOG_TEXT]);
+ fill_window(msg_win, msg);
+
+ set_menu_win(menu, win);
+ set_menu_sub(menu, menu_win);
+ set_menu_format(menu, 1, btn_num);
+ menu_opts_off(menu, O_SHOWDESC);
+ menu_opts_off(menu, O_SHOWMATCH);
+ menu_opts_on(menu, O_ONEVALUE);
+ menu_opts_on(menu, O_NONCYCLIC);
+ set_menu_mark(menu, "");
+ post_menu(menu);
+
+
+ touchwin(win);
+ refresh_all_windows(main_window);
+ while ((res = wgetch(win))) {
+ switch (res) {
+ case KEY_LEFT:
+ menu_driver(menu, REQ_LEFT_ITEM);
+ break;
+ case KEY_RIGHT:
+ menu_driver(menu, REQ_RIGHT_ITEM);
+ break;
+ case 10: /* ENTER */
+ case 27: /* ESCAPE */
+ case ' ':
+ case KEY_F(F_BACK):
+ case KEY_F(F_EXIT):
+ break;
+ }
+ touchwin(win);
+ refresh_all_windows(main_window);
+
+ if (res == 10 || res == ' ') {
+ res = item_index(current_item(menu));
+ break;
+ } else if (res == 27 || res == KEY_F(F_BACK) ||
+ res == KEY_F(F_EXIT)) {
+ res = KEY_EXIT;
+ break;
+ }
+ }
+
+ unpost_menu(menu);
+ free_menu(menu);
+ for (i = 0; i < btn_num; i++)
+ free_item(btns[i]);
+
+ delwin(win);
+ return res;
+}
+
+int dialog_inputbox(WINDOW *main_window,
+ const char *title, const char *prompt,
+ const char *init, char *result, int result_len)
+{
+ int prompt_lines = 0;
+ int prompt_width = 0;
+ WINDOW *win;
+ WINDOW *prompt_win;
+ WINDOW *form_win;
+ PANEL *panel;
+ int i, x, y;
+ int res = -1;
+ int cursor_position = strlen(init);
+
+
+ /* find the widest line of msg: */
+ prompt_lines = get_line_no(prompt);
+ for (i = 0; i < prompt_lines; i++) {
+ const char *line = get_line(prompt, i);
+ int len = get_line_length(line);
+ prompt_width = max(prompt_width, len);
+ }
+
+ if (title)
+ prompt_width = max(prompt_width, strlen(title));
+
+ /* place dialog in middle of screen */
+ y = (LINES-(prompt_lines+4))/2;
+ x = (COLS-(prompt_width+4))/2;
+
+ strncpy(result, init, result_len);
+
+ /* create the windows */
+ win = newwin(prompt_lines+6, prompt_width+7, y, x);
+ prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
+ form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
+ keypad(form_win, TRUE);
+
+ wattrset(form_win, attributes[INPUT_FIELD]);
+
+ wattrset(win, attributes[INPUT_BOX]);
+ box(win, 0, 0);
+ wattrset(win, attributes[INPUT_HEADING]);
+ if (title)
+ mvwprintw(win, 0, 3, "%s", title);
+
+ /* print message */
+ wattrset(prompt_win, attributes[INPUT_TEXT]);
+ fill_window(prompt_win, prompt);
+
+ mvwprintw(form_win, 0, 0, "%*s", prompt_width, " ");
+ mvwprintw(form_win, 0, 0, "%s", result);
+
+ /* create panels */
+ panel = new_panel(win);
+
+ /* show the cursor */
+ curs_set(1);
+
+ touchwin(win);
+ refresh_all_windows(main_window);
+ while ((res = wgetch(form_win))) {
+ int len = strlen(result);
+ switch (res) {
+ case 10: /* ENTER */
+ case 27: /* ESCAPE */
+ case KEY_F(F_HELP):
+ case KEY_F(F_EXIT):
+ case KEY_F(F_BACK):
+ break;
+ case 127:
+ case KEY_BACKSPACE:
+ if (cursor_position > 0) {
+ memmove(&result[cursor_position-1],
+ &result[cursor_position],
+ len-cursor_position+1);
+ cursor_position--;
+ }
+ break;
+ case KEY_DC:
+ if (cursor_position >= 0 && cursor_position < len) {
+ memmove(&result[cursor_position],
+ &result[cursor_position+1],
+ len-cursor_position+1);
+ }
+ break;
+ case KEY_UP:
+ case KEY_RIGHT:
+ if (cursor_position < len &&
+ cursor_position < min(result_len, prompt_width))
+ cursor_position++;
+ break;
+ case KEY_DOWN:
+ case KEY_LEFT:
+ if (cursor_position > 0)
+ cursor_position--;
+ break;
+ default:
+ if ((isgraph(res) || isspace(res)) &&
+ len-2 < result_len) {
+ /* insert the char at the proper position */
+ memmove(&result[cursor_position+1],
+ &result[cursor_position],
+ len+1);
+ result[cursor_position] = res;
+ cursor_position++;
+ } else {
+ mvprintw(0, 0, "unknow key: %d\n", res);
+ }
+ break;
+ }
+ wmove(form_win, 0, 0);
+ wclrtoeol(form_win);
+ mvwprintw(form_win, 0, 0, "%*s", prompt_width, " ");
+ mvwprintw(form_win, 0, 0, "%s", result);
+ wmove(form_win, 0, cursor_position);
+ touchwin(win);
+ refresh_all_windows(main_window);
+
+ if (res == 10) {
+ res = 0;
+ break;
+ } else if (res == 27 || res == KEY_F(F_BACK) ||
+ res == KEY_F(F_EXIT)) {
+ res = KEY_EXIT;
+ break;
+ } else if (res == KEY_F(F_HELP)) {
+ res = 1;
+ break;
+ }
+ }
+
+ /* hide the cursor */
+ curs_set(0);
+ del_panel(panel);
+ delwin(prompt_win);
+ delwin(form_win);
+ delwin(win);
+ return res;
+}
+
+/* refresh all windows in the correct order */
+void refresh_all_windows(WINDOW *main_window)
+{
+ update_panels();
+ touchwin(main_window);
+ refresh();
+}
+
+/* layman's scrollable window... */
+void show_scroll_win(WINDOW *main_window,
+ const char *title,
+ const char *text)
+{
+ int res;
+ int total_lines = get_line_no(text);
+ int x, y;
+ int start_x = 0, start_y = 0;
+ int text_lines = 0, text_cols = 0;
+ int total_cols = 0;
+ int win_cols = 0;
+ int win_lines = 0;
+ int i = 0;
+ WINDOW *win;
+ WINDOW *pad;
+ PANEL *panel;
+
+ /* find the widest line of msg: */
+ total_lines = get_line_no(text);
+ for (i = 0; i < total_lines; i++) {
+ const char *line = get_line(text, i);
+ int len = get_line_length(line);
+ total_cols = max(total_cols, len+2);
+ }
+
+ /* create the pad */
+ pad = newpad(total_lines+10, total_cols+10);
+ wattrset(pad, attributes[SCROLLWIN_TEXT]);
+ fill_window(pad, text);
+
+ win_lines = min(total_lines+4, LINES-2);
+ win_cols = min(total_cols+2, COLS-2);
+ text_lines = max(win_lines-4, 0);
+ text_cols = max(win_cols-2, 0);
+
+ /* place window in middle of screen */
+ y = (LINES-win_lines)/2;
+ x = (COLS-win_cols)/2;
+
+ win = newwin(win_lines, win_cols, y, x);
+ keypad(win, TRUE);
+ /* show the help in the help window, and show the help panel */
+ wattrset(win, attributes[SCROLLWIN_BOX]);
+ box(win, 0, 0);
+ wattrset(win, attributes[SCROLLWIN_HEADING]);
+ mvwprintw(win, 0, 3, " %s ", title);
+ panel = new_panel(win);
+
+ /* handle scrolling */
+ do {
+
+ copywin(pad, win, start_y, start_x, 2, 2, text_lines,
+ text_cols, 0);
+ print_in_middle(win,
+ text_lines+2,
+ 0,
+ text_cols,
+ "<OK>",
+ attributes[DIALOG_MENU_FORE]);
+ wrefresh(win);
+
+ res = wgetch(win);
+ switch (res) {
+ case KEY_NPAGE:
+ case ' ':
+ start_y += text_lines-2;
+ break;
+ case KEY_PPAGE:
+ start_y -= text_lines+2;
+ break;
+ case KEY_HOME:
+ start_y = 0;
+ break;
+ case KEY_END:
+ start_y = total_lines-text_lines;
+ break;
+ case KEY_DOWN:
+ case 'j':
+ start_y++;
+ break;
+ case KEY_UP:
+ case 'k':
+ start_y--;
+ break;
+ case KEY_LEFT:
+ case 'h':
+ start_x--;
+ break;
+ case KEY_RIGHT:
+ case 'l':
+ start_x++;
+ break;
+ }
+ if (res == 10 || res == 27 || res == 'q'
+ || res == KEY_F(F_BACK) || res == KEY_F(F_EXIT)) {
+ break;
+ }
+ if (start_y < 0)
+ start_y = 0;
+ if (start_y >= total_lines-text_lines)
+ start_y = total_lines-text_lines;
+ if (start_x < 0)
+ start_x = 0;
+ if (start_x >= total_cols-text_cols)
+ start_x = total_cols-text_cols;
+ } while (res);
+
+ del_panel(panel);
+ delwin(win);
+ refresh_all_windows(main_window);
+}
diff --git a/scripts/kconfig/nconf.h b/scripts/kconfig/nconf.h
new file mode 100644
index 000000000000..fb4296666004
--- /dev/null
+++ b/scripts/kconfig/nconf.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2008 Nir Tzachar <nir.tzachar@gmail.com?
+ * Released under the terms of the GNU GPL v2.0.
+ *
+ * Derived from menuconfig.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <locale.h>
+#include <curses.h>
+#include <menu.h>
+#include <panel.h>
+#include <form.h>
+
+#include <stdio.h>
+#include <time.h>
+#include <sys/time.h>
+
+#include "ncurses.h"
+
+#define max(a, b) ({\
+ typeof(a) _a = a;\
+ typeof(b) _b = b;\
+ _a > _b ? _a : _b; })
+
+#define min(a, b) ({\
+ typeof(a) _a = a;\
+ typeof(b) _b = b;\
+ _a < _b ? _a : _b; })
+
+typedef enum {
+ NORMAL = 1,
+ MAIN_HEADING,
+ MAIN_MENU_BOX,
+ MAIN_MENU_FORE,
+ MAIN_MENU_BACK,
+ MAIN_MENU_GREY,
+ MAIN_MENU_HEADING,
+ SCROLLWIN_TEXT,
+ SCROLLWIN_HEADING,
+ SCROLLWIN_BOX,
+ DIALOG_TEXT,
+ DIALOG_MENU_FORE,
+ DIALOG_MENU_BACK,
+ DIALOG_BOX,
+ INPUT_BOX,
+ INPUT_HEADING,
+ INPUT_TEXT,
+ INPUT_FIELD,
+ FUNCTION_TEXT,
+ FUNCTION_HIGHLIGHT,
+ ATTR_MAX
+} attributes_t;
+extern attributes_t attributes[];
+
+typedef enum {
+ F_HELP = 1,
+ F_SYMBOL = 2,
+ F_INSTS = 3,
+ F_CONF = 4,
+ F_BACK = 5,
+ F_SAVE = 6,
+ F_LOAD = 7,
+ F_EXIT = 8
+} function_key;
+
+void set_colors(void);
+
+/* this changes the windows attributes !!! */
+void print_in_middle(WINDOW *win,
+ int starty,
+ int startx,
+ int width,
+ const char *string,
+ chtype color);
+int get_line_length(const char *line);
+int get_line_no(const char *text);
+const char *get_line(const char *text, int line_no);
+void fill_window(WINDOW *win, const char *text);
+int btn_dialog(WINDOW *main_window, const char *msg, int btn_num, ...);
+int dialog_inputbox(WINDOW *main_window,
+ const char *title, const char *prompt,
+ const char *init, char *result, int result_len);
+void refresh_all_windows(WINDOW *main_window);
+void show_scroll_win(WINDOW *main_window,
+ const char *title,
+ const char *text);
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 6c8fbbb66ebc..2e7a048e0cfc 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -651,12 +651,20 @@ bool sym_is_changable(struct symbol *sym)
return sym->visible > sym->rev_dep.tri;
}
+static unsigned strhash(const char *s)
+{
+ /* fnv32 hash */
+ unsigned hash = 2166136261U;
+ for (; *s; s++)
+ hash = (hash ^ *s) * 0x01000193;
+ return hash;
+}
+
struct symbol *sym_lookup(const char *name, int flags)
{
struct symbol *symbol;
- const char *ptr;
char *new_name;
- int hash = 0;
+ int hash;
if (name) {
if (name[0] && !name[1]) {
@@ -666,12 +674,11 @@ struct symbol *sym_lookup(const char *name, int flags)
case 'n': return &symbol_no;
}
}
- for (ptr = name; *ptr; ptr++)
- hash += *ptr;
- hash &= 0xff;
+ hash = strhash(name) % SYMBOL_HASHSIZE;
for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) {
- if (!strcmp(symbol->name, name) &&
+ if (symbol->name &&
+ !strcmp(symbol->name, name) &&
(flags ? symbol->flags & flags
: !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE))))
return symbol;
@@ -679,7 +686,7 @@ struct symbol *sym_lookup(const char *name, int flags)
new_name = strdup(name);
} else {
new_name = NULL;
- hash = 256;
+ hash = 0;
}
symbol = malloc(sizeof(*symbol));
@@ -697,7 +704,6 @@ struct symbol *sym_lookup(const char *name, int flags)
struct symbol *sym_find(const char *name)
{
struct symbol *symbol = NULL;
- const char *ptr;
int hash = 0;
if (!name)
@@ -710,12 +716,11 @@ struct symbol *sym_find(const char *name)
case 'n': return &symbol_no;
}
}
- for (ptr = name; *ptr; ptr++)
- hash += *ptr;
- hash &= 0xff;
+ hash = strhash(name) % SYMBOL_HASHSIZE;
for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) {
- if (!strcmp(symbol->name, name) &&
+ if (symbol->name &&
+ !strcmp(symbol->name, name) &&
!(symbol->flags & SYMBOL_CONST))
break;
}
@@ -750,6 +755,7 @@ struct symbol **sym_re_search(const char *pattern)
return NULL;
}
}
+ sym_calc_value(sym);
sym_arr[cnt++] = sym;
}
if (sym_arr)
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index b6b2a46af14c..78b5c04e736b 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -72,12 +72,13 @@ int file_write_dep(const char *name)
}
-/* Allocate initial growable sting */
+/* Allocate initial growable string */
struct gstr str_new(void)
{
struct gstr gs;
gs.s = malloc(sizeof(char) * 64);
gs.len = 64;
+ gs.max_width = 0;
strcpy(gs.s, "\0");
return gs;
}
@@ -88,6 +89,7 @@ struct gstr str_assign(const char *s)
struct gstr gs;
gs.s = strdup(s);
gs.len = strlen(s) + 1;
+ gs.max_width = 0;
return gs;
}
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index 6e9dcd59aa87..32a9eefd842c 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -104,7 +104,7 @@ static void zconf_error(const char *err, ...);
static void zconferror(const char *err);
static bool zconf_endtoken(struct kconf_id *id, int starttoken, int endtoken);
-struct symbol *symbol_hash[257];
+struct symbol *symbol_hash[SYMBOL_HASHSIZE];
static struct menu *current_menu, *current_entry;
@@ -2220,7 +2220,7 @@ void conf_parse(const char *name)
zconf_initscan(name);
sym_init();
- menu_init();
+ _menu_init();
modules_sym = sym_lookup(NULL, 0);
modules_sym->type = S_BOOLEAN;
modules_sym->flags |= SYMBOL_AUTO;
@@ -2336,9 +2336,9 @@ static void print_symbol(FILE *out, struct menu *menu)
struct property *prop;
if (sym_is_choice(sym))
- fprintf(out, "choice\n");
+ fprintf(out, "\nchoice\n");
else
- fprintf(out, "config %s\n", sym->name);
+ fprintf(out, "\nconfig %s\n", sym->name);
switch (sym->type) {
case S_BOOLEAN:
fputs(" boolean\n", out);
@@ -2384,6 +2384,21 @@ static void print_symbol(FILE *out, struct menu *menu)
case P_CHOICE:
fputs(" #choice value\n", out);
break;
+ case P_SELECT:
+ fputs( " select ", out);
+ expr_fprint(prop->expr, out);
+ fputc('\n', out);
+ break;
+ case P_RANGE:
+ fputs( " range ", out);
+ expr_fprint(prop->expr, out);
+ fputc('\n', out);
+ break;
+ case P_MENU:
+ fputs( " menu ", out);
+ print_quoted_string(out, prop->text);
+ fputc('\n', out);
+ break;
default:
fprintf(out, " unknown prop %d!\n", prop->type);
break;
@@ -2395,7 +2410,6 @@ static void print_symbol(FILE *out, struct menu *menu)
menu->help[len] = 0;
fprintf(out, " help\n%s\n", menu->help);
}
- fputc('\n', out);
}
void zconfdump(FILE *out)
@@ -2428,7 +2442,6 @@ void zconfdump(FILE *out)
expr_fprint(prop->visible.expr, out);
fputc('\n', out);
}
- fputs("\n", out);
}
if (menu->list)
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 8c43491f8cc9..23dfd3baa7a1 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -27,7 +27,7 @@ static void zconf_error(const char *err, ...);
static void zconferror(const char *err);
static bool zconf_endtoken(struct kconf_id *id, int starttoken, int endtoken);
-struct symbol *symbol_hash[257];
+struct symbol *symbol_hash[SYMBOL_HASHSIZE];
static struct menu *current_menu, *current_entry;
@@ -475,7 +475,7 @@ void conf_parse(const char *name)
zconf_initscan(name);
sym_init();
- menu_init();
+ _menu_init();
modules_sym = sym_lookup(NULL, 0);
modules_sym->type = S_BOOLEAN;
modules_sym->flags |= SYMBOL_AUTO;
@@ -591,9 +591,9 @@ static void print_symbol(FILE *out, struct menu *menu)
struct property *prop;
if (sym_is_choice(sym))
- fprintf(out, "choice\n");
+ fprintf(out, "\nchoice\n");
else
- fprintf(out, "config %s\n", sym->name);
+ fprintf(out, "\nconfig %s\n", sym->name);
switch (sym->type) {
case S_BOOLEAN:
fputs(" boolean\n", out);
@@ -639,6 +639,21 @@ static void print_symbol(FILE *out, struct menu *menu)
case P_CHOICE:
fputs(" #choice value\n", out);
break;
+ case P_SELECT:
+ fputs( " select ", out);
+ expr_fprint(prop->expr, out);
+ fputc('\n', out);
+ break;
+ case P_RANGE:
+ fputs( " range ", out);
+ expr_fprint(prop->expr, out);
+ fputc('\n', out);
+ break;
+ case P_MENU:
+ fputs( " menu ", out);
+ print_quoted_string(out, prop->text);
+ fputc('\n', out);
+ break;
default:
fprintf(out, " unknown prop %d!\n", prop->type);
break;
@@ -650,7 +665,6 @@ static void print_symbol(FILE *out, struct menu *menu)
menu->help[len] = 0;
fprintf(out, " help\n%s\n", menu->help);
}
- fputc('\n', out);
}
void zconfdump(FILE *out)
@@ -683,7 +697,6 @@ void zconfdump(FILE *out)
expr_fprint(prop->visible.expr, out);
fputc('\n', out);
}
- fputs("\n", out);
}
if (menu->list)
diff --git a/scripts/markup_oops.pl b/scripts/markup_oops.pl
index e950f9cde019..827896f56501 100644
--- a/scripts/markup_oops.pl
+++ b/scripts/markup_oops.pl
@@ -2,6 +2,7 @@
use File::Basename;
use Math::BigInt;
+use Getopt::Long;
# Copyright 2008, Intel Corporation
#
@@ -15,6 +16,16 @@ use Math::BigInt;
# Arjan van de Ven <arjan@linux.intel.com>
+my $cross_compile = "";
+my $vmlinux_name = "";
+my $modulefile = "";
+
+# Get options
+Getopt::Long::GetOptions(
+ 'cross-compile|c=s' => \$cross_compile,
+ 'module|m=s' => \$modulefile,
+ 'help|h' => \&usage,
+) || usage ();
my $vmlinux_name = $ARGV[0];
if (!defined($vmlinux_name)) {
my $kerver = `uname -r`;
@@ -23,9 +34,8 @@ if (!defined($vmlinux_name)) {
print "No vmlinux specified, assuming $vmlinux_name\n";
}
my $filename = $vmlinux_name;
-#
-# Step 1: Parse the oops to find the EIP value
-#
+
+# Parse the oops to find the EIP value
my $target = "0";
my $function;
@@ -177,26 +187,26 @@ my $decodestart = Math::BigInt->from_hex("0x$target") - Math::BigInt->from_hex("
my $decodestop = Math::BigInt->from_hex("0x$target") + 8192;
if ($target eq "0") {
print "No oops found!\n";
- print "Usage: \n";
- print " dmesg | perl scripts/markup_oops.pl vmlinux\n";
- exit;
+ usage();
}
# if it's a module, we need to find the .ko file and calculate a load offset
if ($module ne "") {
- my $modulefile = `modinfo $module | grep '^filename:' | awk '{ print \$2 }'`;
- chomp($modulefile);
+ if ($modulefile eq "") {
+ $modulefile = `modinfo -F filename $module`;
+ chomp($modulefile);
+ }
$filename = $modulefile;
if ($filename eq "") {
print "Module .ko file for $module not found. Aborting\n";
exit;
}
# ok so we found the module, now we need to calculate the vma offset
- open(FILE, "objdump -dS $filename |") || die "Cannot start objdump";
+ open(FILE, $cross_compile."objdump -dS $filename |") || die "Cannot start objdump";
while (<FILE>) {
if ($_ =~ /^([0-9a-f]+) \<$function\>\:/) {
my $fu = $1;
- $vmaoffset = hex($target) - hex($fu) - hex($func_offset);
+ $vmaoffset = Math::BigInt->from_hex("0x$target") - Math::BigInt->from_hex("0x$fu") - Math::BigInt->from_hex("0x$func_offset");
}
}
close(FILE);
@@ -204,7 +214,7 @@ if ($module ne "") {
my $counter = 0;
my $state = 0;
-my $center = 0;
+my $center = -1;
my @lines;
my @reglines;
@@ -212,7 +222,7 @@ sub InRange {
my ($address, $target) = @_;
my $ad = "0x".$address;
my $ta = "0x".$target;
- my $delta = hex($ad) - hex($ta);
+ my $delta = Math::BigInt->from_hex($ad) - Math::BigInt->from_hex($ta);
if (($delta > -4096) && ($delta < 4096)) {
return 1;
@@ -225,7 +235,7 @@ sub InRange {
# first, parse the input into the lines array, but to keep size down,
# we only do this for 4Kb around the sweet spot
-open(FILE, "objdump -dS --adjust-vma=$vmaoffset --start-address=$decodestart --stop-address=$decodestop $filename |") || die "Cannot start objdump";
+open(FILE, $cross_compile."objdump -dS --adjust-vma=$vmaoffset --start-address=$decodestart --stop-address=$decodestop $filename |") || die "Cannot start objdump";
while (<FILE>) {
my $line = $_;
@@ -236,7 +246,8 @@ while (<FILE>) {
$state = 1;
}
}
- } else {
+ }
+ if ($state == 1) {
if ($line =~ /^([a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9]+)\:/) {
my $val = $1;
if (!InRange($val, $target)) {
@@ -259,7 +270,7 @@ if ($counter == 0) {
exit;
}
-if ($center == 0) {
+if ($center == -1) {
print "No matching code found \n";
exit;
}
@@ -344,3 +355,16 @@ while ($i < $finish) {
$i = $i +1;
}
+sub usage {
+ print <<EOT;
+Usage:
+ dmesg | perl $0 [OPTION] [VMLINUX]
+
+OPTION:
+ -c, --cross-compile CROSS_COMPILE Specify the prefix used for toolchain.
+ -m, --module MODULE_DIRNAME Specify the module filename.
+ -h, --help Help.
+EOT
+ exit;
+}
+
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 23dbad80cce9..50ad317a4bf9 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -67,9 +67,8 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
echo \#define LINUX_COMPILE_BY \"`whoami`\"
echo \#define LINUX_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\"
- if [ -x /bin/dnsdomainname ]; then
- domain=`dnsdomainname 2> /dev/null`
- elif [ -x /bin/domainname ]; then
+ domain=`dnsdomainname 2> /dev/null`
+ if [ -z "$domain" ]; then
domain=`domainname 2> /dev/null`
fi
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 220213e603db..003ccd72601d 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -796,6 +796,41 @@ static int do_platform_entry(const char *filename,
return 1;
}
+static int do_mdio_entry(const char *filename,
+ struct mdio_device_id *id, char *alias)
+{
+ int i;
+
+ alias += sprintf(alias, MDIO_MODULE_PREFIX);
+
+ for (i = 0; i < 32; i++) {
+ if (!((id->phy_id_mask >> (31-i)) & 1))
+ *(alias++) = '?';
+ else if ((id->phy_id >> (31-i)) & 1)
+ *(alias++) = '1';
+ else
+ *(alias++) = '0';
+ }
+
+ /* Terminate the string */
+ *alias = 0;
+
+ return 1;
+}
+
+/* looks like: "pnp:dD" */
+static int do_isapnp_entry(const char *filename,
+ struct isapnp_device_id *id, char *alias)
+{
+ sprintf(alias, "pnp:d%c%c%c%x%x%x%x*",
+ 'A' + ((id->vendor >> 2) & 0x3f) - 1,
+ 'A' + (((id->vendor & 3) << 3) | ((id->vendor >> 13) & 7)) - 1,
+ 'A' + ((id->vendor >> 8) & 0x1f) - 1,
+ (id->function >> 4) & 0x0f, id->function & 0x0f,
+ (id->function >> 12) & 0x0f, (id->function >> 8) & 0x0f);
+ return 1;
+}
+
/* Ignore any prefix, eg. some architectures prepend _ */
static inline int sym_is(const char *symbol, const char *name)
{
@@ -943,6 +978,14 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
do_table(symval, sym->st_size,
sizeof(struct platform_device_id), "platform",
do_platform_entry, mod);
+ else if (sym_is(symname, "__mod_mdio_device_table"))
+ do_table(symval, sym->st_size,
+ sizeof(struct mdio_device_id), "mdio",
+ do_mdio_entry, mod);
+ else if (sym_is(symname, "__mod_isapnp_device_table"))
+ do_table(symval, sym->st_size,
+ sizeof(struct isapnp_device_id), "isa",
+ do_isapnp_entry, mod);
free(zeros);
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 20923613467c..a5d1457cf239 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -781,10 +781,13 @@ static void check_section(const char *modname, struct elf_info *elf,
#define ALL_EXIT_TEXT_SECTIONS \
".exit.text$", ".devexit.text$", ".cpuexit.text$", ".memexit.text$"
-#define ALL_INIT_SECTIONS INIT_SECTIONS, DEV_INIT_SECTIONS, \
- CPU_INIT_SECTIONS, MEM_INIT_SECTIONS
-#define ALL_EXIT_SECTIONS EXIT_SECTIONS, DEV_EXIT_SECTIONS, \
- CPU_EXIT_SECTIONS, MEM_EXIT_SECTIONS
+#define ALL_XXXINIT_SECTIONS DEV_INIT_SECTIONS, CPU_INIT_SECTIONS, \
+ MEM_INIT_SECTIONS
+#define ALL_XXXEXIT_SECTIONS DEV_EXIT_SECTIONS, CPU_EXIT_SECTIONS, \
+ MEM_EXIT_SECTIONS
+
+#define ALL_INIT_SECTIONS INIT_SECTIONS, ALL_XXXINIT_SECTIONS
+#define ALL_EXIT_SECTIONS EXIT_SECTIONS, ALL_XXXEXIT_SECTIONS
#define DATA_SECTIONS ".data$", ".data.rel$"
#define TEXT_SECTIONS ".text$"
@@ -814,33 +817,29 @@ static const char *data_sections[] = { DATA_SECTIONS, NULL };
/* symbols in .data that may refer to init/exit sections */
-static const char *symbol_white_list[] =
-{
- "*driver",
- "*_template", /* scsi uses *_template a lot */
- "*_timer", /* arm uses ops structures named _timer a lot */
- "*_sht", /* scsi also used *_sht to some extent */
- "*_ops",
- "*_probe",
- "*_probe_one",
- "*_console",
- NULL
-};
+#define DEFAULT_SYMBOL_WHITE_LIST \
+ "*driver", \
+ "*_template", /* scsi uses *_template a lot */ \
+ "*_timer", /* arm uses ops structures named _timer a lot */ \
+ "*_sht", /* scsi also used *_sht to some extent */ \
+ "*_ops", \
+ "*_probe", \
+ "*_probe_one", \
+ "*_console"
static const char *head_sections[] = { ".head.text*", NULL };
static const char *linker_symbols[] =
{ "__init_begin", "_sinittext", "_einittext", NULL };
enum mismatch {
- NO_MISMATCH,
- TEXT_TO_INIT,
- DATA_TO_INIT,
- TEXT_TO_EXIT,
- DATA_TO_EXIT,
- XXXINIT_TO_INIT,
- XXXEXIT_TO_EXIT,
- INIT_TO_EXIT,
- EXIT_TO_INIT,
+ TEXT_TO_ANY_INIT,
+ DATA_TO_ANY_INIT,
+ TEXT_TO_ANY_EXIT,
+ DATA_TO_ANY_EXIT,
+ XXXINIT_TO_SOME_INIT,
+ XXXEXIT_TO_SOME_EXIT,
+ ANY_INIT_TO_ANY_EXIT,
+ ANY_EXIT_TO_ANY_INIT,
EXPORT_TO_INIT_EXIT,
};
@@ -848,6 +847,7 @@ struct sectioncheck {
const char *fromsec[20];
const char *tosec[20];
enum mismatch mismatch;
+ const char *symbol_white_list[20];
};
const struct sectioncheck sectioncheck[] = {
@@ -857,80 +857,103 @@ const struct sectioncheck sectioncheck[] = {
{
.fromsec = { TEXT_SECTIONS, NULL },
.tosec = { ALL_INIT_SECTIONS, NULL },
- .mismatch = TEXT_TO_INIT,
+ .mismatch = TEXT_TO_ANY_INIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { DATA_SECTIONS, NULL },
- .tosec = { ALL_INIT_SECTIONS, NULL },
- .mismatch = DATA_TO_INIT,
+ .tosec = { ALL_XXXINIT_SECTIONS, NULL },
+ .mismatch = DATA_TO_ANY_INIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
+},
+{
+ .fromsec = { DATA_SECTIONS, NULL },
+ .tosec = { INIT_SECTIONS, NULL },
+ .mismatch = DATA_TO_ANY_INIT,
+ .symbol_white_list = {
+ "*_template", "*_timer", "*_sht", "*_ops",
+ "*_probe", "*_probe_one", "*_console", NULL
+ },
},
{
.fromsec = { TEXT_SECTIONS, NULL },
.tosec = { ALL_EXIT_SECTIONS, NULL },
- .mismatch = TEXT_TO_EXIT,
+ .mismatch = TEXT_TO_ANY_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { DATA_SECTIONS, NULL },
.tosec = { ALL_EXIT_SECTIONS, NULL },
- .mismatch = DATA_TO_EXIT,
+ .mismatch = DATA_TO_ANY_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference init code/data from devinit/cpuinit/meminit code/data */
{
- .fromsec = { DEV_INIT_SECTIONS, CPU_INIT_SECTIONS, MEM_INIT_SECTIONS, NULL },
+ .fromsec = { ALL_XXXINIT_SECTIONS, NULL },
.tosec = { INIT_SECTIONS, NULL },
- .mismatch = XXXINIT_TO_INIT,
+ .mismatch = XXXINIT_TO_SOME_INIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference cpuinit code/data from meminit code/data */
{
.fromsec = { MEM_INIT_SECTIONS, NULL },
.tosec = { CPU_INIT_SECTIONS, NULL },
- .mismatch = XXXINIT_TO_INIT,
+ .mismatch = XXXINIT_TO_SOME_INIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference meminit code/data from cpuinit code/data */
{
.fromsec = { CPU_INIT_SECTIONS, NULL },
.tosec = { MEM_INIT_SECTIONS, NULL },
- .mismatch = XXXINIT_TO_INIT,
+ .mismatch = XXXINIT_TO_SOME_INIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference exit code/data from devexit/cpuexit/memexit code/data */
{
- .fromsec = { DEV_EXIT_SECTIONS, CPU_EXIT_SECTIONS, MEM_EXIT_SECTIONS, NULL },
+ .fromsec = { ALL_XXXEXIT_SECTIONS, NULL },
.tosec = { EXIT_SECTIONS, NULL },
- .mismatch = XXXEXIT_TO_EXIT,
+ .mismatch = XXXEXIT_TO_SOME_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference cpuexit code/data from memexit code/data */
{
.fromsec = { MEM_EXIT_SECTIONS, NULL },
.tosec = { CPU_EXIT_SECTIONS, NULL },
- .mismatch = XXXEXIT_TO_EXIT,
+ .mismatch = XXXEXIT_TO_SOME_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference memexit code/data from cpuexit code/data */
{
.fromsec = { CPU_EXIT_SECTIONS, NULL },
.tosec = { MEM_EXIT_SECTIONS, NULL },
- .mismatch = XXXEXIT_TO_EXIT,
+ .mismatch = XXXEXIT_TO_SOME_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not use exit code/data from init code */
{
.fromsec = { ALL_INIT_SECTIONS, NULL },
.tosec = { ALL_EXIT_SECTIONS, NULL },
- .mismatch = INIT_TO_EXIT,
+ .mismatch = ANY_INIT_TO_ANY_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not use init code/data from exit code */
{
.fromsec = { ALL_EXIT_SECTIONS, NULL },
.tosec = { ALL_INIT_SECTIONS, NULL },
- .mismatch = EXIT_TO_INIT,
+ .mismatch = ANY_EXIT_TO_ANY_INIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not export init/exit functions or data */
{
.fromsec = { "__ksymtab*", NULL },
.tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
- .mismatch = EXPORT_TO_INIT_EXIT
+ .mismatch = EXPORT_TO_INIT_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
}
};
-static int section_mismatch(const char *fromsec, const char *tosec)
+static const struct sectioncheck *section_mismatch(
+ const char *fromsec, const char *tosec)
{
int i;
int elems = sizeof(sectioncheck) / sizeof(struct sectioncheck);
@@ -939,10 +962,10 @@ static int section_mismatch(const char *fromsec, const char *tosec)
for (i = 0; i < elems; i++) {
if (match(fromsec, check->fromsec) &&
match(tosec, check->tosec))
- return check->mismatch;
+ return check;
check++;
}
- return NO_MISMATCH;
+ return NULL;
}
/**
@@ -958,10 +981,17 @@ static int section_mismatch(const char *fromsec, const char *tosec)
* fromsec = .data*
* atsym =__param*
*
+ * Pattern 1a:
+ * module_param_call() ops can refer to __init set function if permissions=0
+ * The pattern is identified by:
+ * tosec = .init.text
+ * fromsec = .data*
+ * atsym = __param_ops_*
+ *
* Pattern 2:
* Many drivers utilise a *driver container with references to
* add, remove, probe functions etc.
- * These functions may often be marked __init and we do not want to
+ * These functions may often be marked __devinit and we do not want to
* warn here.
* the pattern is identified by:
* tosec = init or exit section
@@ -982,7 +1012,8 @@ static int section_mismatch(const char *fromsec, const char *tosec)
* refsymname = __init_begin, _sinittext, _einittext
*
**/
-static int secref_whitelist(const char *fromsec, const char *fromsym,
+static int secref_whitelist(const struct sectioncheck *mismatch,
+ const char *fromsec, const char *fromsym,
const char *tosec, const char *tosym)
{
/* Check for pattern 1 */
@@ -991,10 +1022,16 @@ static int secref_whitelist(const char *fromsec, const char *fromsym,
(strncmp(fromsym, "__param", strlen("__param")) == 0))
return 0;
+ /* Check for pattern 1a */
+ if (strcmp(tosec, ".init.text") == 0 &&
+ match(fromsec, data_sections) &&
+ (strncmp(fromsym, "__param_ops_", strlen("__param_ops_")) == 0))
+ return 0;
+
/* Check for pattern 2 */
if (match(tosec, init_exit_sections) &&
match(fromsec, data_sections) &&
- match(fromsym, symbol_white_list))
+ match(fromsym, mismatch->symbol_white_list))
return 0;
/* Check for pattern 3 */
@@ -1155,7 +1192,8 @@ static int is_function(Elf_Sym *sym)
* Try to find symbols near it so user can find it.
* Check whitelist before warning - it may be a false positive.
*/
-static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
+static void report_sec_mismatch(const char *modname,
+ const struct sectioncheck *mismatch,
const char *fromsec,
unsigned long long fromaddr,
const char *fromsym,
@@ -1186,8 +1224,8 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
modname, fromsec, fromaddr, from, fromsym, from_p, to, tosec,
tosym, to_p);
- switch (mismatch) {
- case TEXT_TO_INIT:
+ switch (mismatch->mismatch) {
+ case TEXT_TO_ANY_INIT:
fprintf(stderr,
"The function %s%s() references\n"
"the %s %s%s%s.\n"
@@ -1197,8 +1235,8 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
to, sec2annotation(tosec), tosym, to_p,
fromsym, sec2annotation(tosec), tosym);
break;
- case DATA_TO_INIT: {
- const char **s = symbol_white_list;
+ case DATA_TO_ANY_INIT: {
+ const char *const *s = mismatch->symbol_white_list;
fprintf(stderr,
"The variable %s references\n"
"the %s %s%s%s\n"
@@ -1211,15 +1249,15 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
fprintf(stderr, "\n");
break;
}
- case TEXT_TO_EXIT:
+ case TEXT_TO_ANY_EXIT:
fprintf(stderr,
"The function %s() references a %s in an exit section.\n"
"Often the %s %s%s has valid usage outside the exit section\n"
"and the fix is to remove the %sannotation of %s.\n",
fromsym, to, to, tosym, to_p, sec2annotation(tosec), tosym);
break;
- case DATA_TO_EXIT: {
- const char **s = symbol_white_list;
+ case DATA_TO_ANY_EXIT: {
+ const char *const *s = mismatch->symbol_white_list;
fprintf(stderr,
"The variable %s references\n"
"the %s %s%s%s\n"
@@ -1232,8 +1270,8 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
fprintf(stderr, "\n");
break;
}
- case XXXINIT_TO_INIT:
- case XXXEXIT_TO_EXIT:
+ case XXXINIT_TO_SOME_INIT:
+ case XXXEXIT_TO_SOME_EXIT:
fprintf(stderr,
"The %s %s%s%s references\n"
"a %s %s%s%s.\n"
@@ -1243,7 +1281,7 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
to, sec2annotation(tosec), tosym, to_p,
tosym, fromsym, tosym);
break;
- case INIT_TO_EXIT:
+ case ANY_INIT_TO_ANY_EXIT:
fprintf(stderr,
"The %s %s%s%s references\n"
"a %s %s%s%s.\n"
@@ -1256,7 +1294,7 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
to, sec2annotation(tosec), tosym, to_p,
sec2annotation(tosec), tosym, to_p);
break;
- case EXIT_TO_INIT:
+ case ANY_EXIT_TO_ANY_INIT:
fprintf(stderr,
"The %s %s%s%s references\n"
"a %s %s%s%s.\n"
@@ -1275,8 +1313,6 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
"Fix this by removing the %sannotation of %s "
"or drop the export.\n",
tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
- case NO_MISMATCH:
- /* To get warnings on missing members */
break;
}
fprintf(stderr, "\n");
@@ -1286,11 +1322,11 @@ static void check_section_mismatch(const char *modname, struct elf_info *elf,
Elf_Rela *r, Elf_Sym *sym, const char *fromsec)
{
const char *tosec;
- enum mismatch mismatch;
+ const struct sectioncheck *mismatch;
tosec = sec_name(elf, sym->st_shndx);
mismatch = section_mismatch(fromsec, tosec);
- if (mismatch != NO_MISMATCH) {
+ if (mismatch) {
Elf_Sym *to;
Elf_Sym *from;
const char *tosym;
@@ -1302,7 +1338,8 @@ static void check_section_mismatch(const char *modname, struct elf_info *elf,
tosym = sym_name(elf, to);
/* check whitelist - we may ignore it */
- if (secref_whitelist(fromsec, fromsym, tosec, tosym)) {
+ if (secref_whitelist(mismatch,
+ fromsec, fromsym, tosec, tosym)) {
report_sec_mismatch(modname, mismatch,
fromsec, r->r_offset, fromsym,
is_function(from), tosec, tosym,
diff --git a/scripts/namespace.pl b/scripts/namespace.pl
index c6e88c652c2f..361d0f71184b 100755
--- a/scripts/namespace.pl
+++ b/scripts/namespace.pl
@@ -175,12 +175,11 @@ sub do_nm
}
if (! -e "$source.c" && ! -e "$source.S") {
# No obvious source, exclude the object if it is conglomerate
- if (! open(OBJDUMPDATA, "$objdump $basename|")) {
- printf STDERR "$objdump $fullname failed $!\n";
- return;
- }
+ open(my $objdumpdata, "$objdump $basename|")
+ or die "$objdump $fullname failed $!\n";
+
my $comment;
- while (<OBJDUMPDATA>) {
+ while (<$objdumpdata>) {
chomp();
if (/^In archive/) {
# Archives are always conglomerate
@@ -190,18 +189,18 @@ sub do_nm
next if (! /^[ 0-9a-f]{5,} /);
$comment .= substr($_, 43);
}
- close(OBJDUMPDATA);
+ close($objdumpdata);
+
if (!defined($comment) || $comment !~ /GCC\:.*GCC\:/m) {
printf STDERR "No source file found for $fullname\n";
}
return;
}
- if (! open(NMDATA, "$nm $basename|")) {
- printf STDERR "$nm $fullname failed $!\n";
- return;
- }
+ open (my $nmdata, "$nm $basename|")
+ or die "$nm $fullname failed $!\n";
+
my @nmdata;
- while (<NMDATA>) {
+ while (<$nmdata>) {
chop;
($type, $name) = (split(/ +/, $_, 3))[1..2];
# Expected types
@@ -268,7 +267,8 @@ sub do_nm
}
}
}
- close(NMDATA);
+ close($nmdata);
+
if ($#nmdata < 0) {
if (
$fullname ne "lib/brlock.o"
@@ -316,8 +316,7 @@ sub drop_def
sub list_multiply_defined
{
- my ($name, $module);
- foreach $name (keys(%def)) {
+ foreach my $name (keys(%def)) {
if ($#{$def{$name}} > 0) {
# Special case for cond_syscall
if ($#{$def{$name}} == 1 && $name =~ /^sys_/ &&
@@ -333,8 +332,9 @@ sub list_multiply_defined
&drop_def("arch/x86/kernel/vsyscall-sysenter_32.o", $name);
next;
}
+
printf "$name is multiply defined in :-\n";
- foreach $module (@{$def{$name}}) {
+ foreach my $module (@{$def{$name}}) {
printf "\t$module\n";
}
}
@@ -343,12 +343,13 @@ sub list_multiply_defined
sub resolve_external_references
{
- my ($object, $type, $name, $i, $j, $kstrtab, $ksymtab, $export);
+ my ($kstrtab, $ksymtab, $export);
+
printf "\n";
- foreach $object (keys(%nmdata)) {
+ foreach my $object (keys(%nmdata)) {
my $nmdata = $nmdata{$object};
- for ($i = 0; $i <= $#{$nmdata}; ++$i) {
- ($type, $name) = split(' ', $nmdata->[$i], 2);
+ for (my $i = 0; $i <= $#{$nmdata}; ++$i) {
+ my ($type, $name) = split(' ', $nmdata->[$i], 2);
if ($type eq "U" || $type eq "w") {
if (exists($def{$name}) || exists($ksymtab{$name})) {
# add the owning object to the nmdata
@@ -357,7 +358,7 @@ sub resolve_external_references
$kstrtab = "R __kstrtab_$name";
$ksymtab = "R __ksymtab_$name";
$export = 0;
- for ($j = 0; $j <= $#{$nmdata}; ++$j) {
+ for (my $j = 0; $j <= $#{$nmdata}; ++$j) {
if ($nmdata->[$j] eq $kstrtab ||
$nmdata->[$j] eq $ksymtab) {
$export = 1;
@@ -424,11 +425,11 @@ sub resolve_external_references
sub list_extra_externals
{
my %noref = ();
- my ($name, @module, $module, $export);
- foreach $name (keys(%def)) {
+
+ foreach my $name (keys(%def)) {
if (! exists($ref{$name})) {
- @module = @{$def{$name}};
- foreach $module (@module) {
+ my @module = @{$def{$name}};
+ foreach my $module (@module) {
if (! exists($noref{$module})) {
$noref{$module} = [];
}
@@ -438,16 +439,16 @@ sub list_extra_externals
}
if (%noref) {
printf "\nExternally defined symbols with no external references\n";
- foreach $module (sort(keys(%noref))) {
+ foreach my $module (sort(keys(%noref))) {
printf " $module\n";
foreach (sort(@{$noref{$module}})) {
- if (exists($export{$_})) {
- $export = " (export only)";
- }
- else {
- $export = "";
- }
- printf " $_$export\n";
+ my $export;
+ if (exists($export{$_})) {
+ $export = " (export only)";
+ } else {
+ $export = "";
+ }
+ printf " $_$export\n";
}
}
}
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 8b357b0bd250..07f2fbde2abf 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -18,6 +18,8 @@ create_package() {
cp debian/copyright "$pdir/usr/share/doc/$pname/"
cp debian/changelog "$pdir/usr/share/doc/$pname/changelog.Debian"
gzip -9 "$pdir/usr/share/doc/$pname/changelog.Debian"
+ sh -c "cd '$pdir'; find . -type f ! -path './DEBIAN/*' -printf '%P\0' \
+ | xargs -r0 md5sum > DEBIAN/md5sums"
# Fix ownership and permissions
chown -R root:root "$pdir"
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 47bdd2f99b78..15440f55aef6 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -1,6 +1,6 @@
#!/bin/sh
#
-# Output a simple RPM spec file that uses no fancy features requring
+# Output a simple RPM spec file that uses no fancy features requiring
# RPM v4. This is intended to work with any RPM distro.
#
# The only gothic bit here is redefining install_post to avoid
@@ -39,7 +39,7 @@ if ! $PREBUILT; then
echo "Source: kernel-$__KERNELRELEASE.tar.gz"
fi
-echo "BuildRoot: /var/tmp/%{name}-%{PACKAGE_VERSION}-root"
+echo "BuildRoot: %{_tmppath}/%{name}-%{PACKAGE_VERSION}-root"
echo "Provides: $PROVIDES"
echo "%define __spec_install_post /usr/lib/rpm/brp-compress || :"
echo "%define debug_package %{nil}"
diff --git a/scripts/profile2linkerlist.pl b/scripts/profile2linkerlist.pl
index cb4260ebdb91..6943fa7cc95b 100644
--- a/scripts/profile2linkerlist.pl
+++ b/scripts/profile2linkerlist.pl
@@ -7,15 +7,13 @@
# usage:
# readprofile | sort -rn | perl profile2linkerlist.pl > functionlist
#
+use strict;
while (<>) {
my $line = $_;
$_ =~ /\W*[0-9]+\W*([a-zA-Z\_0-9]+)\W*[0-9]+/;
- if ( ($line =~ /unknown/) || ($line =~ /total/)) {
-
- } else {
- print "*(.text.$1)\n";
- }
+ print "*(.text.$1)\n"
+ unless ($line =~ /unknown/) || ($line =~ /total/);
}
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py
index 4c79660793cf..44423b4dcb82 100644
--- a/scripts/rt-tester/rt-tester.py
+++ b/scripts/rt-tester/rt-tester.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python
#
# rt-mutex tester
#
diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c
index 24626968055d..58a12c278706 100644
--- a/scripts/selinux/genheaders/genheaders.c
+++ b/scripts/selinux/genheaders/genheaders.c
@@ -81,7 +81,7 @@ int main(int argc, char *argv[])
fprintf(fout, "\n");
for (i = 1; i < isids_len; i++) {
- char *s = initial_sid_to_string[i];
+ const char *s = initial_sid_to_string[i];
fprintf(fout, "#define SECINITSID_%s", s);
for (j = 0; j < max(1, 40 - strlen(s)); j++)
fprintf(fout, " ");
diff --git a/scripts/show_delta b/scripts/show_delta
index 48a706ab3d0c..17df3051747a 100755
--- a/scripts/show_delta
+++ b/scripts/show_delta
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python
#
# show_deltas: Read list of printk messages instrumented with
# time data, and format with time deltas.
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 1a0c44d7c4a7..8509bb512935 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -5,7 +5,7 @@
# mode may be any of: tags, TAGS, cscope
#
# Uses the following environment variables:
-# ARCH, SUBARCH, srctree, src, obj
+# ARCH, SUBARCH, SRCARCH, srctree, src, obj
if [ "$KBUILD_VERBOSE" = "1" ]; then
set -x
@@ -17,28 +17,48 @@ ignore="( -name SCCS -o -name BitKeeper -o -name .svn -o \
-name .git ) \
-prune -o"
-# Do not use full path is we do not use O=.. builds
+# Do not use full path if we do not use O=.. builds
+# Use make O=. {tags|cscope}
+# to force full paths for a non-O= build
if [ "${KBUILD_SRC}" = "" ]; then
tree=
else
tree=${srctree}/
fi
+# Find all available archs
+find_all_archs()
+{
+ ALLSOURCE_ARCHS=""
+ for arch in `ls ${tree}arch`; do
+ ALLSOURCE_ARCHS="${ALLSOURCE_ARCHS} "${arch##\/}
+ done
+}
+
# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
if [ "${ALLSOURCE_ARCHS}" = "" ]; then
ALLSOURCE_ARCHS=${SRCARCH}
+elif [ "${ALLSOURCE_ARCHS}" = "all" ]; then
+ find_all_archs
fi
# find sources in arch/$ARCH
find_arch_sources()
{
- find ${tree}arch/$1 $ignore -name "$2" -print;
+ for i in $archincludedir; do
+ prune="$prune -wholename $i -prune -o"
+ done
+ find ${tree}arch/$1 $ignore $prune -name "$2" -print;
}
# find sources in arch/$1/include
find_arch_include_sources()
{
- find ${tree}arch/$1/include $ignore -name "$2" -print;
+ include=$(find ${tree}arch/$1/ -name include -type d);
+ if [ -n "$include" ]; then
+ archincludedir="$archincludedir $include"
+ find $include $ignore -name "$2" -print;
+ fi
}
# find sources in include/
@@ -63,14 +83,15 @@ find_sources()
all_sources()
{
- for arch in $ALLSOURCE_ARCHS
- do
- find_sources $arch '*.[chS]'
- done
+ find_arch_include_sources ${SRCARCH} '*.[chS]'
if [ ! -z "$archinclude" ]; then
find_arch_include_sources $archinclude '*.[chS]'
fi
find_include_sources '*.[chS]'
+ for arch in $ALLSOURCE_ARCHS
+ do
+ find_sources $arch '*.[chS]'
+ done
find_other_sources '*.[chS]'
}
@@ -89,13 +110,7 @@ all_defconfigs()
docscope()
{
- # always use absolute paths for cscope, as recommended by cscope
- # upstream
- case "$tree" in
- /*) ;;
- *) tree=$PWD/$tree ;;
- esac
- (cd /; echo \-k; echo \-q; all_sources) > cscope.files
+ (echo \-k; echo \-q; all_sources) > cscope.files
cscope -b -f cscope.out
}
diff --git a/security/capability.c b/security/capability.c
index 4875142b858d..7f093d573ede 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -12,11 +12,6 @@
#include <linux/security.h>
-static int cap_acct(struct file *file)
-{
- return 0;
-}
-
static int cap_sysctl(ctl_table *table, int op)
{
return 0;
@@ -80,42 +75,16 @@ static int cap_sb_mount(char *dev_name, struct path *path, char *type,
return 0;
}
-static int cap_sb_check_sb(struct vfsmount *mnt, struct path *path)
-{
- return 0;
-}
-
static int cap_sb_umount(struct vfsmount *mnt, int flags)
{
return 0;
}
-static void cap_sb_umount_close(struct vfsmount *mnt)
-{
-}
-
-static void cap_sb_umount_busy(struct vfsmount *mnt)
-{
-}
-
-static void cap_sb_post_remount(struct vfsmount *mnt, unsigned long flags,
- void *data)
-{
-}
-
-static void cap_sb_post_addmount(struct vfsmount *mnt, struct path *path)
-{
-}
-
static int cap_sb_pivotroot(struct path *old_path, struct path *new_path)
{
return 0;
}
-static void cap_sb_post_pivotroot(struct path *old_path, struct path *new_path)
-{
-}
-
static int cap_sb_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts)
{
@@ -221,10 +190,6 @@ static int cap_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
return 0;
}
-static void cap_inode_delete(struct inode *ino)
-{
-}
-
static void cap_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
@@ -403,10 +368,6 @@ static int cap_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp)
return 0;
}
-static void cap_cred_commit(struct cred *new, const struct cred *old)
-{
-}
-
static void cap_cred_transfer(struct cred *new, const struct cred *old)
{
}
@@ -426,16 +387,6 @@ static int cap_kernel_module_request(char *kmod_name)
return 0;
}
-static int cap_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
-{
- return 0;
-}
-
-static int cap_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags)
-{
- return 0;
-}
-
static int cap_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -456,11 +407,6 @@ static void cap_task_getsecid(struct task_struct *p, u32 *secid)
*secid = 0;
}
-static int cap_task_setgroups(struct group_info *group_info)
-{
- return 0;
-}
-
static int cap_task_getioprio(struct task_struct *p)
{
return 0;
@@ -875,13 +821,6 @@ static int cap_key_getsecurity(struct key *key, char **_buffer)
return 0;
}
-static int cap_key_session_to_parent(const struct cred *cred,
- const struct cred *parent_cred,
- struct key *key)
-{
- return 0;
-}
-
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
@@ -921,7 +860,6 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, ptrace_traceme);
set_to_cap_if_null(ops, capget);
set_to_cap_if_null(ops, capset);
- set_to_cap_if_null(ops, acct);
set_to_cap_if_null(ops, capable);
set_to_cap_if_null(ops, quotactl);
set_to_cap_if_null(ops, quota_on);
@@ -941,14 +879,8 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, sb_show_options);
set_to_cap_if_null(ops, sb_statfs);
set_to_cap_if_null(ops, sb_mount);
- set_to_cap_if_null(ops, sb_check_sb);
set_to_cap_if_null(ops, sb_umount);
- set_to_cap_if_null(ops, sb_umount_close);
- set_to_cap_if_null(ops, sb_umount_busy);
- set_to_cap_if_null(ops, sb_post_remount);
- set_to_cap_if_null(ops, sb_post_addmount);
set_to_cap_if_null(ops, sb_pivotroot);
- set_to_cap_if_null(ops, sb_post_pivotroot);
set_to_cap_if_null(ops, sb_set_mnt_opts);
set_to_cap_if_null(ops, sb_clone_mnt_opts);
set_to_cap_if_null(ops, sb_parse_opts_str);
@@ -968,7 +900,6 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, inode_permission);
set_to_cap_if_null(ops, inode_setattr);
set_to_cap_if_null(ops, inode_getattr);
- set_to_cap_if_null(ops, inode_delete);
set_to_cap_if_null(ops, inode_setxattr);
set_to_cap_if_null(ops, inode_post_setxattr);
set_to_cap_if_null(ops, inode_getxattr);
@@ -1009,19 +940,15 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, cred_alloc_blank);
set_to_cap_if_null(ops, cred_free);
set_to_cap_if_null(ops, cred_prepare);
- set_to_cap_if_null(ops, cred_commit);
set_to_cap_if_null(ops, cred_transfer);
set_to_cap_if_null(ops, kernel_act_as);
set_to_cap_if_null(ops, kernel_create_files_as);
set_to_cap_if_null(ops, kernel_module_request);
- set_to_cap_if_null(ops, task_setuid);
set_to_cap_if_null(ops, task_fix_setuid);
- set_to_cap_if_null(ops, task_setgid);
set_to_cap_if_null(ops, task_setpgid);
set_to_cap_if_null(ops, task_getpgid);
set_to_cap_if_null(ops, task_getsid);
set_to_cap_if_null(ops, task_getsecid);
- set_to_cap_if_null(ops, task_setgroups);
set_to_cap_if_null(ops, task_setnice);
set_to_cap_if_null(ops, task_setioprio);
set_to_cap_if_null(ops, task_getioprio);
@@ -1113,7 +1040,6 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, key_free);
set_to_cap_if_null(ops, key_permission);
set_to_cap_if_null(ops, key_getsecurity);
- set_to_cap_if_null(ops, key_session_to_parent);
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
set_to_cap_if_null(ops, audit_rule_init);
diff --git a/security/commoncap.c b/security/commoncap.c
index 61669730da98..4e015996dd4d 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -570,7 +570,7 @@ int cap_inode_setxattr(struct dentry *dentry, const char *name,
}
if (!strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1) &&
+ sizeof(XATTR_SECURITY_PREFIX) - 1) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
@@ -596,7 +596,7 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name)
}
if (!strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1) &&
+ sizeof(XATTR_SECURITY_PREFIX) - 1) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
@@ -931,7 +931,7 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages)
* @addr: address attempting to be mapped
* @addr_only: unused
*
- * If the process is attempting to map memory below mmap_min_addr they need
+ * If the process is attempting to map memory below dac_mmap_min_addr they need
* CAP_SYS_RAWIO. The other parameters to this function are unused by the
* capability security module. Returns 0 if this mapping should be allowed
* -EPERM if not.
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index f77c60423992..8d9c48f13774 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -470,7 +470,7 @@ struct cgroup_subsys devices_subsys = {
.name = "devices",
.can_attach = devcgroup_can_attach,
.create = devcgroup_create,
- .destroy = devcgroup_destroy,
+ .destroy = devcgroup_destroy,
.populate = devcgroup_populate,
.subsys_id = devices_subsys_id,
};
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 47fb65d1fcbd..16d100d3fc38 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -135,7 +135,7 @@ enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
void ima_init_policy(void);
void ima_update_policy(void);
-int ima_parse_add_rule(char *);
+ssize_t ima_parse_add_rule(char *);
void ima_delete_rules(void);
/* LSM based policy rules require audit */
diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
index 5af76340470c..c5c5a72c30be 100644
--- a/security/integrity/ima/ima_audit.c
+++ b/security/integrity/ima/ima_audit.c
@@ -41,7 +41,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
return;
ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
- audit_log_format(ab, "integrity: pid=%d uid=%u auid=%u ses=%u",
+ audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
current->pid, current_cred()->uid,
audit_get_loginuid(current),
audit_get_sessionid(current));
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 952e51373f58..9b3ade7468b2 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -27,7 +27,7 @@ static int init_desc(struct hash_desc *desc)
desc->tfm = crypto_alloc_hash(ima_hash, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc->tfm)) {
- pr_info("failed to load %s transform: %ld\n",
+ pr_info("IMA: failed to load %s transform: %ld\n",
ima_hash, PTR_ERR(desc->tfm));
rc = PTR_ERR(desc->tfm);
return rc;
@@ -112,7 +112,7 @@ static void __init ima_pcrread(int idx, u8 *pcr)
return;
if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
- pr_err("Error Communicating to TPM chip\n");
+ pr_err("IMA: Error Communicating to TPM chip\n");
}
/*
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 07cb9c338cc4..8fe736aabe71 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -244,32 +244,34 @@ static const struct file_operations ima_ascii_measurements_ops = {
static ssize_t ima_write_policy(struct file *file, const char __user *buf,
size_t datalen, loff_t *ppos)
{
- char *data;
- int rc;
+ char *data = NULL;
+ ssize_t result;
if (datalen >= PAGE_SIZE)
- return -ENOMEM;
- if (*ppos != 0) {
- /* No partial writes. */
- return -EINVAL;
- }
+ datalen = PAGE_SIZE - 1;
+
+ /* No partial writes. */
+ result = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ result = -ENOMEM;
data = kmalloc(datalen + 1, GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ goto out;
- if (copy_from_user(data, buf, datalen)) {
- kfree(data);
- return -EFAULT;
- }
*(data + datalen) = '\0';
- rc = ima_parse_add_rule(data);
- if (rc < 0) {
- datalen = -EINVAL;
- valid_policy = 0;
- }
+ result = -EFAULT;
+ if (copy_from_user(data, buf, datalen))
+ goto out;
+
+ result = ima_parse_add_rule(data);
+out:
+ if (result < 0)
+ valid_policy = 0;
kfree(data);
- return datalen;
+ return result;
}
static struct dentry *ima_dir;
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 2c744d488014..2dc2d6594145 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -80,17 +80,17 @@ void iint_free(struct kref *kref)
iint->version = 0;
iint->flags = 0UL;
if (iint->readcount != 0) {
- printk(KERN_INFO "%s: readcount: %ld\n", __FUNCTION__,
+ printk(KERN_INFO "%s: readcount: %ld\n", __func__,
iint->readcount);
iint->readcount = 0;
}
if (iint->writecount != 0) {
- printk(KERN_INFO "%s: writecount: %ld\n", __FUNCTION__,
+ printk(KERN_INFO "%s: writecount: %ld\n", __func__,
iint->writecount);
iint->writecount = 0;
}
if (iint->opencount != 0) {
- printk(KERN_INFO "%s: opencount: %ld\n", __FUNCTION__,
+ printk(KERN_INFO "%s: opencount: %ld\n", __func__,
iint->opencount);
iint->opencount = 0;
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index b1bcb702a27c..17f1f060306f 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -83,7 +83,7 @@ int __init ima_init(void)
ima_used_chip = 1;
if (!ima_used_chip)
- pr_info("No TPM chip found, activating TPM-bypass!\n");
+ pr_info("IMA: No TPM chip found, activating TPM-bypass!\n");
ima_add_boot_aggregate(); /* boot aggregate must be first entry */
ima_init_policy();
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index b2c89d9de2a4..f93641382e9f 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -195,7 +195,7 @@ static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
(iint->writecount < 0)) &&
!ima_limit_imbalance(file)) {
printk(KERN_INFO "%s: open/free imbalance (r:%ld w:%ld o:%ld)\n",
- __FUNCTION__, iint->readcount, iint->writecount,
+ __func__, iint->readcount, iint->writecount,
iint->opencount);
dump_stack();
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 8643a93c5963..aef8c0a923ab 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -246,6 +246,9 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry,
{
int result;
+ if (entry->lsm[lsm_rule].rule)
+ return -EINVAL;
+
entry->lsm[lsm_rule].type = audit_type;
result = security_filter_rule_init(entry->lsm[lsm_rule].type,
Audit_equal, args,
@@ -253,6 +256,13 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry,
return result;
}
+static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
+{
+ audit_log_format(ab, "%s=", key);
+ audit_log_untrustedstring(ab, value);
+ audit_log_format(ab, " ");
+}
+
static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
{
struct audit_buffer *ab;
@@ -261,28 +271,41 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE);
- entry->action = -1;
- while ((p = strsep(&rule, " \n")) != NULL) {
+ entry->uid = -1;
+ entry->action = UNKNOWN;
+ while ((p = strsep(&rule, " \t")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
unsigned long lnum;
if (result < 0)
break;
- if (!*p)
+ if ((*p == '\0') || (*p == ' ') || (*p == '\t'))
continue;
token = match_token(p, policy_tokens, args);
switch (token) {
case Opt_measure:
- audit_log_format(ab, "%s ", "measure");
+ ima_log_string(ab, "action", "measure");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
entry->action = MEASURE;
break;
case Opt_dont_measure:
- audit_log_format(ab, "%s ", "dont_measure");
+ ima_log_string(ab, "action", "dont_measure");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
entry->action = DONT_MEASURE;
break;
case Opt_func:
- audit_log_format(ab, "func=%s ", args[0].from);
+ ima_log_string(ab, "func", args[0].from);
+
+ if (entry->func)
+ result = -EINVAL;
+
if (strcmp(args[0].from, "FILE_CHECK") == 0)
entry->func = FILE_CHECK;
/* PATH_CHECK is for backwards compat */
@@ -298,7 +321,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
entry->flags |= IMA_FUNC;
break;
case Opt_mask:
- audit_log_format(ab, "mask=%s ", args[0].from);
+ ima_log_string(ab, "mask", args[0].from);
+
+ if (entry->mask)
+ result = -EINVAL;
+
if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
entry->mask = MAY_EXEC;
else if (strcmp(args[0].from, "MAY_WRITE") == 0)
@@ -313,14 +340,26 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
entry->flags |= IMA_MASK;
break;
case Opt_fsmagic:
- audit_log_format(ab, "fsmagic=%s ", args[0].from);
+ ima_log_string(ab, "fsmagic", args[0].from);
+
+ if (entry->fsmagic) {
+ result = -EINVAL;
+ break;
+ }
+
result = strict_strtoul(args[0].from, 16,
&entry->fsmagic);
if (!result)
entry->flags |= IMA_FSMAGIC;
break;
case Opt_uid:
- audit_log_format(ab, "uid=%s ", args[0].from);
+ ima_log_string(ab, "uid", args[0].from);
+
+ if (entry->uid != -1) {
+ result = -EINVAL;
+ break;
+ }
+
result = strict_strtoul(args[0].from, 10, &lnum);
if (!result) {
entry->uid = (uid_t) lnum;
@@ -331,50 +370,51 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
}
break;
case Opt_obj_user:
- audit_log_format(ab, "obj_user=%s ", args[0].from);
+ ima_log_string(ab, "obj_user", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_OBJ_USER,
AUDIT_OBJ_USER);
break;
case Opt_obj_role:
- audit_log_format(ab, "obj_role=%s ", args[0].from);
+ ima_log_string(ab, "obj_role", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_OBJ_ROLE,
AUDIT_OBJ_ROLE);
break;
case Opt_obj_type:
- audit_log_format(ab, "obj_type=%s ", args[0].from);
+ ima_log_string(ab, "obj_type", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_OBJ_TYPE,
AUDIT_OBJ_TYPE);
break;
case Opt_subj_user:
- audit_log_format(ab, "subj_user=%s ", args[0].from);
+ ima_log_string(ab, "subj_user", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_SUBJ_USER,
AUDIT_SUBJ_USER);
break;
case Opt_subj_role:
- audit_log_format(ab, "subj_role=%s ", args[0].from);
+ ima_log_string(ab, "subj_role", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_SUBJ_ROLE,
AUDIT_SUBJ_ROLE);
break;
case Opt_subj_type:
- audit_log_format(ab, "subj_type=%s ", args[0].from);
+ ima_log_string(ab, "subj_type", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_SUBJ_TYPE,
AUDIT_SUBJ_TYPE);
break;
case Opt_err:
- audit_log_format(ab, "UNKNOWN=%s ", p);
+ ima_log_string(ab, "UNKNOWN", p);
+ result = -EINVAL;
break;
}
}
- if (entry->action == UNKNOWN)
+ if (!result && (entry->action == UNKNOWN))
result = -EINVAL;
- audit_log_format(ab, "res=%d", !result ? 0 : 1);
+ audit_log_format(ab, "res=%d", !!result);
audit_log_end(ab);
return result;
}
@@ -384,13 +424,14 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
* @rule - ima measurement policy rule
*
* Uses a mutex to protect the policy list from multiple concurrent writers.
- * Returns 0 on success, an error code on failure.
+ * Returns the length of the rule parsed, an error code on failure
*/
-int ima_parse_add_rule(char *rule)
+ssize_t ima_parse_add_rule(char *rule)
{
const char *op = "update_policy";
+ char *p;
struct ima_measure_rule_entry *entry;
- int result = 0;
+ ssize_t result, len;
int audit_info = 0;
/* Prevent installed policy from changing */
@@ -410,18 +451,28 @@ int ima_parse_add_rule(char *rule)
INIT_LIST_HEAD(&entry->list);
- result = ima_parse_rule(rule, entry);
- if (!result) {
- mutex_lock(&ima_measure_mutex);
- list_add_tail(&entry->list, &measure_policy_rules);
- mutex_unlock(&ima_measure_mutex);
- } else {
+ p = strsep(&rule, "\n");
+ len = strlen(p) + 1;
+
+ if (*p == '#') {
+ kfree(entry);
+ return len;
+ }
+
+ result = ima_parse_rule(p, entry);
+ if (result) {
kfree(entry);
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
NULL, op, "invalid policy", result,
audit_info);
+ return result;
}
- return result;
+
+ mutex_lock(&ima_measure_mutex);
+ list_add_tail(&entry->list, &measure_policy_rules);
+ mutex_unlock(&ima_measure_mutex);
+
+ return len;
}
/* ima_delete_rules called to cleanup invalid policy */
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 46ba62b1adf5..8e28f04a5e2e 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -71,7 +71,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
qe = kmalloc(sizeof(*qe), GFP_KERNEL);
if (qe == NULL) {
- pr_err("OUT OF MEMORY ERROR creating queue entry.\n");
+ pr_err("IMA: OUT OF MEMORY ERROR creating queue entry.\n");
return -ENOMEM;
}
qe->entry = entry;
@@ -94,7 +94,7 @@ static int ima_pcr_extend(const u8 *hash)
result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
if (result != 0)
- pr_err("Error Communicating to TPM chip\n");
+ pr_err("IMA: Error Communicating to TPM chip\n");
return result;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 24ba0307b7ad..5d4402a1161a 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -87,7 +87,16 @@ extern wait_queue_head_t request_key_conswq;
extern struct key_type *key_type_lookup(const char *type);
extern void key_type_put(struct key_type *ktype);
-extern int __key_link(struct key *keyring, struct key *key);
+extern int __key_link_begin(struct key *keyring,
+ const struct key_type *type,
+ const char *description,
+ struct keyring_list **_prealloc);
+extern int __key_link_check_live_key(struct key *keyring, struct key *key);
+extern void __key_link(struct key *keyring, struct key *key,
+ struct keyring_list **_prealloc);
+extern void __key_link_end(struct key *keyring,
+ struct key_type *type,
+ struct keyring_list *prealloc);
extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
const struct key_type *type,
diff --git a/security/keys/key.c b/security/keys/key.c
index e50d264c9ad1..c1eac8084ade 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -355,7 +355,7 @@ EXPORT_SYMBOL(key_alloc);
*/
int key_payload_reserve(struct key *key, size_t datalen)
{
- int delta = (int) datalen - key->datalen;
+ int delta = (int)datalen - key->datalen;
int ret = 0;
key_check(key);
@@ -398,7 +398,8 @@ static int __key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
- struct key *authkey)
+ struct key *authkey,
+ struct keyring_list **_prealloc)
{
int ret, awaken;
@@ -425,7 +426,7 @@ static int __key_instantiate_and_link(struct key *key,
/* and link it into the destination keyring */
if (keyring)
- ret = __key_link(keyring, key);
+ __key_link(keyring, key, _prealloc);
/* disable the authorisation key */
if (authkey)
@@ -453,15 +454,21 @@ int key_instantiate_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
+ struct keyring_list *prealloc;
int ret;
- if (keyring)
- down_write(&keyring->sem);
+ if (keyring) {
+ ret = __key_link_begin(keyring, key->type, key->description,
+ &prealloc);
+ if (ret < 0)
+ return ret;
+ }
- ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey);
+ ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey,
+ &prealloc);
if (keyring)
- up_write(&keyring->sem);
+ __key_link_end(keyring, key->type, prealloc);
return ret;
@@ -478,8 +485,9 @@ int key_negate_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
+ struct keyring_list *prealloc;
struct timespec now;
- int ret, awaken;
+ int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
@@ -488,7 +496,8 @@ int key_negate_and_link(struct key *key,
ret = -EBUSY;
if (keyring)
- down_write(&keyring->sem);
+ link_ret = __key_link_begin(keyring, key->type,
+ key->description, &prealloc);
mutex_lock(&key_construction_mutex);
@@ -508,8 +517,8 @@ int key_negate_and_link(struct key *key,
ret = 0;
/* and link it into the destination keyring */
- if (keyring)
- ret = __key_link(keyring, key);
+ if (keyring && link_ret == 0)
+ __key_link(keyring, key, &prealloc);
/* disable the authorisation key */
if (authkey)
@@ -519,13 +528,13 @@ int key_negate_and_link(struct key *key,
mutex_unlock(&key_construction_mutex);
if (keyring)
- up_write(&keyring->sem);
+ __key_link_end(keyring, key->type, prealloc);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
- return ret;
+ return ret == 0 ? link_ret : ret;
} /* end key_negate_and_link() */
@@ -749,6 +758,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_perm_t perm,
unsigned long flags)
{
+ struct keyring_list *prealloc;
const struct cred *cred = current_cred();
struct key_type *ktype;
struct key *keyring, *key = NULL;
@@ -775,7 +785,9 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
if (keyring->type != &key_type_keyring)
goto error_2;
- down_write(&keyring->sem);
+ ret = __key_link_begin(keyring, ktype, description, &prealloc);
+ if (ret < 0)
+ goto error_2;
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
@@ -817,7 +829,8 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
}
/* instantiate it and link it into the target keyring */
- ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
+ ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL,
+ &prealloc);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
@@ -827,7 +840,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_3:
- up_write(&keyring->sem);
+ __key_link_end(keyring, ktype, prealloc);
error_2:
key_type_put(ktype);
error:
@@ -837,7 +850,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
- up_write(&keyring->sem);
+ __key_link_end(keyring, ktype, prealloc);
key_type_put(ktype);
key_ref = __key_update(key_ref, payload, plen);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index e9c2e7c584d9..8f4dce1987c4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -212,15 +212,15 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
ret = key->serial;
key_put(key);
- error5:
+error5:
key_type_put(ktype);
- error4:
+error4:
key_ref_put(dest_ref);
- error3:
+error3:
kfree(callout_info);
- error2:
+error2:
kfree(description);
- error:
+error:
return ret;
} /* end sys_request_key() */
@@ -246,7 +246,7 @@ long keyctl_get_keyring_ID(key_serial_t id, int create)
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
- error:
+error:
return ret;
} /* end keyctl_get_keyring_ID() */
@@ -275,7 +275,7 @@ long keyctl_join_session_keyring(const char __user *_name)
ret = join_session_keyring(name);
kfree(name);
- error:
+error:
return ret;
} /* end keyctl_join_session_keyring() */
@@ -322,9 +322,9 @@ long keyctl_update_key(key_serial_t id,
ret = key_update(key_ref, payload, plen);
key_ref_put(key_ref);
- error2:
+error2:
kfree(payload);
- error:
+error:
return ret;
} /* end keyctl_update_key() */
@@ -356,7 +356,7 @@ long keyctl_revoke_key(key_serial_t id)
ret = 0;
key_ref_put(key_ref);
- error:
+error:
return ret;
} /* end keyctl_revoke_key() */
@@ -381,7 +381,7 @@ long keyctl_keyring_clear(key_serial_t ringid)
ret = keyring_clear(key_ref_to_ptr(keyring_ref));
key_ref_put(keyring_ref);
- error:
+error:
return ret;
} /* end keyctl_keyring_clear() */
@@ -413,9 +413,9 @@ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
key_ref_put(key_ref);
- error2:
+error2:
key_ref_put(keyring_ref);
- error:
+error:
return ret;
} /* end keyctl_keyring_link() */
@@ -447,9 +447,9 @@ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
ret = key_unlink(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
key_ref_put(key_ref);
- error2:
+error2:
key_ref_put(keyring_ref);
- error:
+error:
return ret;
} /* end keyctl_keyring_unlink() */
@@ -529,9 +529,9 @@ okay:
}
kfree(tmpbuf);
- error2:
+error2:
key_ref_put(key_ref);
- error:
+error:
return ret;
} /* end keyctl_describe_key() */
@@ -616,17 +616,17 @@ long keyctl_keyring_search(key_serial_t ringid,
ret = key_ref_to_ptr(key_ref)->serial;
- error6:
+error6:
key_ref_put(key_ref);
- error5:
+error5:
key_type_put(ktype);
- error4:
+error4:
key_ref_put(dest_ref);
- error3:
+error3:
key_ref_put(keyring_ref);
- error2:
+error2:
kfree(description);
- error:
+error:
return ret;
} /* end keyctl_keyring_search() */
@@ -673,7 +673,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
}
/* the key is probably readable - now try to read it */
- can_read_key:
+can_read_key:
ret = key_validate(key);
if (ret == 0) {
ret = -EOPNOTSUPP;
@@ -686,9 +686,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
}
}
- error2:
+error2:
key_put(key);
- error:
+error:
return ret;
} /* end keyctl_read_key() */
@@ -1282,26 +1282,19 @@ long keyctl_session_to_parent(void)
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
- if (pcred-> uid != mycred->euid ||
+ if (pcred->uid != mycred->euid ||
pcred->euid != mycred->euid ||
pcred->suid != mycred->euid ||
- pcred-> gid != mycred->egid ||
+ pcred->gid != mycred->egid ||
pcred->egid != mycred->egid ||
pcred->sgid != mycred->egid)
goto not_permitted;
/* the keyrings must have the same UID */
- if (pcred ->tgcred->session_keyring->uid != mycred->euid ||
+ if (pcred->tgcred->session_keyring->uid != mycred->euid ||
mycred->tgcred->session_keyring->uid != mycred->euid)
goto not_permitted;
- /* the LSM must permit the replacement of the parent's keyring with the
- * keyring from this process */
- ret = security_key_session_to_parent(mycred, pcred,
- key_ref_to_ptr(keyring_r));
- if (ret < 0)
- goto not_permitted;
-
/* if there's an already pending keyring replacement, then we replace
* that */
oldcred = parent->replacement_session_keyring;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 1e4b0037935c..ef03a82a0135 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -17,7 +17,7 @@
#include <linux/seq_file.h>
#include <linux/err.h>
#include <keys/keyring-type.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "internal.h"
#define rcu_dereference_locked_keyring(keyring) \
@@ -44,7 +44,7 @@ static inline unsigned keyring_hash(const char *desc)
unsigned bucket = 0;
for (; *desc; desc++)
- bucket += (unsigned char) *desc;
+ bucket += (unsigned char)*desc;
return bucket & (KEYRING_NAME_HASH_SIZE - 1);
}
@@ -175,12 +175,10 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
{
struct keyring_list *klist;
- if (keyring->description) {
+ if (keyring->description)
seq_puts(m, keyring->description);
- }
- else {
+ else
seq_puts(m, "[anon]");
- }
rcu_read_lock();
klist = rcu_dereference(keyring->payload.subscriptions);
@@ -241,7 +239,7 @@ static long keyring_read(const struct key *keyring,
ret = qty;
}
- error:
+error:
return ret;
} /* end keyring_read() */
@@ -310,7 +308,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
key_check(keyring);
/* top keyring must have search permission to begin the search */
- err = key_task_permission(keyring_ref, cred, KEY_SEARCH);
+ err = key_task_permission(keyring_ref, cred, KEY_SEARCH);
if (err < 0) {
key_ref = ERR_PTR(err);
goto error;
@@ -512,7 +510,7 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref,
rcu_read_unlock();
return ERR_PTR(-ENOKEY);
- found:
+found:
atomic_inc(&key->usage);
rcu_read_unlock();
return make_key_ref(key, possessed);
@@ -602,7 +600,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
sp = 0;
/* start processing a new keyring */
- descend:
+descend:
if (test_bit(KEY_FLAG_REVOKED, &subtree->flags))
goto not_this_keyring;
@@ -611,7 +609,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
goto not_this_keyring;
kix = 0;
- ascend:
+ascend:
/* iterate through the remaining keys in this keyring */
for (; kix < keylist->nkeys; kix++) {
key = keylist->keys[kix];
@@ -637,7 +635,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
/* the keyring we're looking at was disqualified or didn't contain a
* matching key */
- not_this_keyring:
+not_this_keyring:
if (sp > 0) {
/* resume the checking of a keyring higher up in the tree */
sp--;
@@ -648,34 +646,20 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
ret = 0; /* no cycles detected */
- error:
+error:
rcu_read_unlock();
return ret;
- too_deep:
+too_deep:
ret = -ELOOP;
goto error;
- cycle_detected:
+cycle_detected:
ret = -EDEADLK;
goto error;
} /* end keyring_detect_cycle() */
-/*****************************************************************************/
-/*
- * dispose of a keyring list after the RCU grace period
- */
-static void keyring_link_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist =
- container_of(rcu, struct keyring_list, rcu);
-
- kfree(klist);
-
-} /* end keyring_link_rcu_disposal() */
-
-/*****************************************************************************/
/*
* dispose of a keyring list after the RCU grace period, freeing the unlinked
* key
@@ -685,55 +669,51 @@ static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
struct keyring_list *klist =
container_of(rcu, struct keyring_list, rcu);
- key_put(klist->keys[klist->delkey]);
+ if (klist->delkey != USHORT_MAX)
+ key_put(klist->keys[klist->delkey]);
kfree(klist);
+}
-} /* end keyring_unlink_rcu_disposal() */
-
-/*****************************************************************************/
/*
- * link a key into to a keyring
- * - must be called with the keyring's semaphore write-locked
- * - discard already extant link to matching key if there is one
+ * preallocate memory so that a key can be linked into to a keyring
*/
-int __key_link(struct key *keyring, struct key *key)
+int __key_link_begin(struct key *keyring, const struct key_type *type,
+ const char *description,
+ struct keyring_list **_prealloc)
+ __acquires(&keyring->sem)
{
struct keyring_list *klist, *nklist;
unsigned max;
size_t size;
int loop, ret;
- ret = -EKEYREVOKED;
- if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- goto error;
+ kenter("%d,%s,%s,", key_serial(keyring), type->name, description);
- ret = -ENOTDIR;
if (keyring->type != &key_type_keyring)
- goto error;
+ return -ENOTDIR;
- /* serialise link/link calls to prevent parallel calls causing a
- * cycle when applied to two keyring in opposite orders */
- down_write(&keyring_serialise_link_sem);
+ down_write(&keyring->sem);
- /* check that we aren't going to create a cycle adding one keyring to
- * another */
- if (key->type == &key_type_keyring) {
- ret = keyring_detect_cycle(keyring, key);
- if (ret < 0)
- goto error2;
- }
+ ret = -EKEYREVOKED;
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ goto error_krsem;
+
+ /* serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders */
+ if (type == &key_type_keyring)
+ down_write(&keyring_serialise_link_sem);
- /* see if there's a matching key we can displace */
klist = rcu_dereference_locked_keyring(keyring);
- if (klist && klist->nkeys > 0) {
- struct key_type *type = key->type;
+ /* see if there's a matching key we can displace */
+ if (klist && klist->nkeys > 0) {
for (loop = klist->nkeys - 1; loop >= 0; loop--) {
if (klist->keys[loop]->type == type &&
strcmp(klist->keys[loop]->description,
- key->description) == 0
+ description) == 0
) {
- /* found a match - replace with new key */
+ /* found a match - we'll replace this one with
+ * the new key */
size = sizeof(struct key *) * klist->maxkeys;
size += sizeof(*klist);
BUG_ON(size > PAGE_SIZE);
@@ -741,22 +721,10 @@ int __key_link(struct key *keyring, struct key *key)
ret = -ENOMEM;
nklist = kmemdup(klist, size, GFP_KERNEL);
if (!nklist)
- goto error2;
-
- /* replace matched key */
- atomic_inc(&key->usage);
- nklist->keys[loop] = key;
-
- rcu_assign_pointer(
- keyring->payload.subscriptions,
- nklist);
-
- /* dispose of the old keyring list and the
- * displaced key */
- klist->delkey = loop;
- call_rcu(&klist->rcu,
- keyring_unlink_rcu_disposal);
+ goto error_sem;
+ /* note replacement slot */
+ klist->delkey = nklist->delkey = loop;
goto done;
}
}
@@ -766,88 +734,167 @@ int __key_link(struct key *keyring, struct key *key)
ret = key_payload_reserve(keyring,
keyring->datalen + KEYQUOTA_LINK_BYTES);
if (ret < 0)
- goto error2;
+ goto error_sem;
if (klist && klist->nkeys < klist->maxkeys) {
- /* there's sufficient slack space to add directly */
- atomic_inc(&key->usage);
-
- klist->keys[klist->nkeys] = key;
- smp_wmb();
- klist->nkeys++;
- smp_wmb();
- }
- else {
+ /* there's sufficient slack space to append directly */
+ nklist = NULL;
+ } else {
/* grow the key list */
max = 4;
if (klist)
max += klist->maxkeys;
ret = -ENFILE;
- if (max > 65535)
- goto error3;
+ if (max > USHORT_MAX - 1)
+ goto error_quota;
size = sizeof(*klist) + sizeof(struct key *) * max;
if (size > PAGE_SIZE)
- goto error3;
+ goto error_quota;
ret = -ENOMEM;
nklist = kmalloc(size, GFP_KERNEL);
if (!nklist)
- goto error3;
- nklist->maxkeys = max;
- nklist->nkeys = 0;
+ goto error_quota;
+ nklist->maxkeys = max;
if (klist) {
- nklist->nkeys = klist->nkeys;
- memcpy(nklist->keys,
- klist->keys,
+ memcpy(nklist->keys, klist->keys,
sizeof(struct key *) * klist->nkeys);
+ nklist->delkey = klist->nkeys;
+ nklist->nkeys = klist->nkeys + 1;
+ klist->delkey = USHORT_MAX;
+ } else {
+ nklist->nkeys = 1;
+ nklist->delkey = 0;
}
/* add the key into the new space */
- atomic_inc(&key->usage);
- nklist->keys[nklist->nkeys++] = key;
-
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- /* dispose of the old keyring list */
- if (klist)
- call_rcu(&klist->rcu, keyring_link_rcu_disposal);
+ nklist->keys[nklist->delkey] = NULL;
}
done:
- ret = 0;
-error2:
- up_write(&keyring_serialise_link_sem);
-error:
- return ret;
+ *_prealloc = nklist;
+ kleave(" = 0");
+ return 0;
-error3:
+error_quota:
/* undo the quota changes */
key_payload_reserve(keyring,
keyring->datalen - KEYQUOTA_LINK_BYTES);
- goto error2;
+error_sem:
+ if (type == &key_type_keyring)
+ up_write(&keyring_serialise_link_sem);
+error_krsem:
+ up_write(&keyring->sem);
+ kleave(" = %d", ret);
+ return ret;
+}
-} /* end __key_link() */
+/*
+ * check already instantiated keys aren't going to be a problem
+ * - the caller must have called __key_link_begin()
+ * - don't need to call this for keys that were created since __key_link_begin()
+ * was called
+ */
+int __key_link_check_live_key(struct key *keyring, struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ /* check that we aren't going to create a cycle by linking one
+ * keyring to another */
+ return keyring_detect_cycle(keyring, key);
+ return 0;
+}
+
+/*
+ * link a key into to a keyring
+ * - must be called with __key_link_begin() having being called
+ * - discard already extant link to matching key if there is one
+ */
+void __key_link(struct key *keyring, struct key *key,
+ struct keyring_list **_prealloc)
+{
+ struct keyring_list *klist, *nklist;
+
+ nklist = *_prealloc;
+ *_prealloc = NULL;
+
+ kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
+
+ klist = rcu_dereference_protected(keyring->payload.subscriptions,
+ rwsem_is_locked(&keyring->sem));
+
+ atomic_inc(&key->usage);
+
+ /* there's a matching key we can displace or an empty slot in a newly
+ * allocated list we can fill */
+ if (nklist) {
+ kdebug("replace %hu/%hu/%hu",
+ nklist->delkey, nklist->nkeys, nklist->maxkeys);
+
+ nklist->keys[nklist->delkey] = key;
+
+ rcu_assign_pointer(keyring->payload.subscriptions, nklist);
+
+ /* dispose of the old keyring list and, if there was one, the
+ * displaced key */
+ if (klist) {
+ kdebug("dispose %hu/%hu/%hu",
+ klist->delkey, klist->nkeys, klist->maxkeys);
+ call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
+ }
+ } else {
+ /* there's sufficient slack space to append directly */
+ klist->keys[klist->nkeys] = key;
+ smp_wmb();
+ klist->nkeys++;
+ }
+}
+
+/*
+ * finish linking a key into to a keyring
+ * - must be called with __key_link_begin() having being called
+ */
+void __key_link_end(struct key *keyring, struct key_type *type,
+ struct keyring_list *prealloc)
+ __releases(&keyring->sem)
+{
+ BUG_ON(type == NULL);
+ BUG_ON(type->name == NULL);
+ kenter("%d,%s,%p", keyring->serial, type->name, prealloc);
+
+ if (type == &key_type_keyring)
+ up_write(&keyring_serialise_link_sem);
+
+ if (prealloc) {
+ kfree(prealloc);
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ }
+ up_write(&keyring->sem);
+}
-/*****************************************************************************/
/*
* link a key to a keyring
*/
int key_link(struct key *keyring, struct key *key)
{
+ struct keyring_list *prealloc;
int ret;
key_check(keyring);
key_check(key);
- down_write(&keyring->sem);
- ret = __key_link(keyring, key);
- up_write(&keyring->sem);
+ ret = __key_link_begin(keyring, key->type, key->description, &prealloc);
+ if (ret == 0) {
+ ret = __key_link_check_live_key(keyring, key);
+ if (ret == 0)
+ __key_link(keyring, key, &prealloc);
+ __key_link_end(keyring, key->type, prealloc);
+ }
return ret;
-
-} /* end key_link() */
+}
EXPORT_SYMBOL(key_link);
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 0ed802c9e698..28645502cd0d 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -109,7 +109,7 @@ int key_validate(struct key *key)
}
}
- error:
+error:
return ret;
} /* end key_validate() */
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 706d63f4f185..068b66ea2f1b 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -306,7 +306,7 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
- return key_user_next((struct rb_node *) v);
+ return key_user_next((struct rb_node *)v);
}
static void proc_key_users_stop(struct seq_file *p, void *v)
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index d8c1a6a0fb08..f656e9c069e3 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -299,12 +299,15 @@ static int construct_alloc_key(struct key_type *type,
struct key_user *user,
struct key **_key)
{
+ struct keyring_list *prealloc;
const struct cred *cred = current_cred();
struct key *key;
key_ref_t key_ref;
+ int ret;
kenter("%s,%s,,,", type->name, description);
+ *_key = NULL;
mutex_lock(&user->cons_lock);
key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred,
@@ -314,8 +317,12 @@ static int construct_alloc_key(struct key_type *type,
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
- if (dest_keyring)
- down_write(&dest_keyring->sem);
+ if (dest_keyring) {
+ ret = __key_link_begin(dest_keyring, type, description,
+ &prealloc);
+ if (ret < 0)
+ goto link_prealloc_failed;
+ }
/* attach the key to the destination keyring under lock, but we do need
* to do another check just in case someone beat us to it whilst we
@@ -327,31 +334,49 @@ static int construct_alloc_key(struct key_type *type,
goto key_already_present;
if (dest_keyring)
- __key_link(dest_keyring, key);
+ __key_link(dest_keyring, key, &prealloc);
mutex_unlock(&key_construction_mutex);
if (dest_keyring)
- up_write(&dest_keyring->sem);
+ __key_link_end(dest_keyring, type, prealloc);
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = 0 [%d]", key_serial(key));
return 0;
+ /* the key is now present - we tell the caller that we found it by
+ * returning -EINPROGRESS */
key_already_present:
+ key_put(key);
mutex_unlock(&key_construction_mutex);
+ key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
- __key_link(dest_keyring, key_ref_to_ptr(key_ref));
- up_write(&dest_keyring->sem);
+ ret = __key_link_check_live_key(dest_keyring, key);
+ if (ret == 0)
+ __key_link(dest_keyring, key, &prealloc);
+ __key_link_end(dest_keyring, type, prealloc);
+ if (ret < 0)
+ goto link_check_failed;
}
mutex_unlock(&user->cons_lock);
- key_put(key);
- *_key = key = key_ref_to_ptr(key_ref);
+ *_key = key;
kleave(" = -EINPROGRESS [%d]", key_serial(key));
return -EINPROGRESS;
+link_check_failed:
+ mutex_unlock(&user->cons_lock);
+ key_put(key);
+ kleave(" = %d [linkcheck]", ret);
+ return ret;
+
+link_prealloc_failed:
+ up_write(&dest_keyring->sem);
+ mutex_unlock(&user->cons_lock);
+ kleave(" = %d [prelink]", ret);
+ return ret;
+
alloc_failed:
mutex_unlock(&user->cons_lock);
- *_key = NULL;
kleave(" = %ld", PTR_ERR(key));
return PTR_ERR(key);
}
@@ -390,6 +415,10 @@ static struct key *construct_key_and_link(struct key_type *type,
kdebug("cons failed");
goto construction_failed;
}
+ } else if (ret == -EINPROGRESS) {
+ ret = 0;
+ } else {
+ key = ERR_PTR(ret);
}
key_put(dest_keyring);
@@ -422,6 +451,7 @@ struct key *request_key_and_link(struct key_type *type,
const struct cred *cred = current_cred();
struct key *key;
key_ref_t key_ref;
+ int ret;
kenter("%s,%s,%p,%zu,%p,%p,%lx",
type->name, description, callout_info, callout_len, aux,
@@ -435,8 +465,13 @@ struct key *request_key_and_link(struct key_type *type,
key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
construct_get_dest_keyring(&dest_keyring);
- key_link(dest_keyring, key);
+ ret = key_link(dest_keyring, key);
key_put(dest_keyring);
+ if (ret < 0) {
+ key_put(key);
+ key = ERR_PTR(ret);
+ goto error;
+ }
}
} else if (PTR_ERR(key_ref) != -EAGAIN) {
key = ERR_CAST(key_ref);
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 893365b79a29..908aa712816a 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -221,7 +221,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
switch (a->type) {
- case LSM_AUDIT_NO_AUDIT:
+ case LSM_AUDIT_DATA_NONE:
return;
case LSM_AUDIT_DATA_IPC:
audit_log_format(ab, " key=%d ", a->u.ipc_id);
diff --git a/security/min_addr.c b/security/min_addr.c
index e86f297522bf..f728728f193b 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -33,7 +33,7 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
{
int ret;
- if (!capable(CAP_SYS_RAWIO))
+ if (write && !capable(CAP_SYS_RAWIO))
return -EPERM;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
diff --git a/security/security.c b/security/security.c
index 687c6fd14bb6..a525e0128abe 100644
--- a/security/security.c
+++ b/security/security.c
@@ -117,7 +117,7 @@ int __init security_module_enable(struct security_operations *ops)
* If there is already a security module registered with the kernel,
* an error will be returned. Otherwise %0 is returned on success.
*/
-int register_security(struct security_operations *ops)
+int __init register_security(struct security_operations *ops)
{
if (verify(ops)) {
printk(KERN_DEBUG "%s could not verify "
@@ -190,11 +190,6 @@ int security_real_capable_noaudit(struct task_struct *tsk, int cap)
return ret;
}
-int security_acct(struct file *file)
-{
- return security_ops->acct(file);
-}
-
int security_sysctl(struct ctl_table *table, int op)
{
return security_ops->sysctl(table, op);
@@ -306,46 +301,16 @@ int security_sb_mount(char *dev_name, struct path *path,
return security_ops->sb_mount(dev_name, path, type, flags, data);
}
-int security_sb_check_sb(struct vfsmount *mnt, struct path *path)
-{
- return security_ops->sb_check_sb(mnt, path);
-}
-
int security_sb_umount(struct vfsmount *mnt, int flags)
{
return security_ops->sb_umount(mnt, flags);
}
-void security_sb_umount_close(struct vfsmount *mnt)
-{
- security_ops->sb_umount_close(mnt);
-}
-
-void security_sb_umount_busy(struct vfsmount *mnt)
-{
- security_ops->sb_umount_busy(mnt);
-}
-
-void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *data)
-{
- security_ops->sb_post_remount(mnt, flags, data);
-}
-
-void security_sb_post_addmount(struct vfsmount *mnt, struct path *mountpoint)
-{
- security_ops->sb_post_addmount(mnt, mountpoint);
-}
-
int security_sb_pivotroot(struct path *old_path, struct path *new_path)
{
return security_ops->sb_pivotroot(old_path, new_path);
}
-void security_sb_post_pivotroot(struct path *old_path, struct path *new_path)
-{
- security_ops->sb_post_pivotroot(old_path, new_path);
-}
-
int security_sb_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts)
{
@@ -580,13 +545,6 @@ int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
return security_ops->inode_getattr(mnt, dentry);
}
-void security_inode_delete(struct inode *inode)
-{
- if (unlikely(IS_PRIVATE(inode)))
- return;
- security_ops->inode_delete(inode);
-}
-
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
@@ -662,7 +620,13 @@ void security_inode_getsecid(const struct inode *inode, u32 *secid)
int security_file_permission(struct file *file, int mask)
{
- return security_ops->file_permission(file, mask);
+ int ret;
+
+ ret = security_ops->file_permission(file, mask);
+ if (ret)
+ return ret;
+
+ return fsnotify_perm(file, mask);
}
int security_file_alloc(struct file *file)
@@ -726,7 +690,13 @@ int security_file_receive(struct file *file)
int security_dentry_open(struct file *file, const struct cred *cred)
{
- return security_ops->dentry_open(file, cred);
+ int ret;
+
+ ret = security_ops->dentry_open(file, cred);
+ if (ret)
+ return ret;
+
+ return fsnotify_perm(file, MAY_OPEN);
}
int security_task_create(unsigned long clone_flags)
@@ -749,11 +719,6 @@ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
return security_ops->cred_prepare(new, old, gfp);
}
-void security_commit_creds(struct cred *new, const struct cred *old)
-{
- security_ops->cred_commit(new, old);
-}
-
void security_transfer_creds(struct cred *new, const struct cred *old)
{
security_ops->cred_transfer(new, old);
@@ -774,22 +739,12 @@ int security_kernel_module_request(char *kmod_name)
return security_ops->kernel_module_request(kmod_name);
}
-int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
-{
- return security_ops->task_setuid(id0, id1, id2, flags);
-}
-
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags)
{
return security_ops->task_fix_setuid(new, old, flags);
}
-int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags)
-{
- return security_ops->task_setgid(id0, id1, id2, flags);
-}
-
int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return security_ops->task_setpgid(p, pgid);
@@ -811,11 +766,6 @@ void security_task_getsecid(struct task_struct *p, u32 *secid)
}
EXPORT_SYMBOL(security_task_getsecid);
-int security_task_setgroups(struct group_info *group_info)
-{
- return security_ops->task_setgroups(group_info);
-}
-
int security_task_setnice(struct task_struct *p, int nice)
{
return security_ops->task_setnice(p, nice);
@@ -1319,13 +1269,6 @@ int security_key_getsecurity(struct key *key, char **_buffer)
return security_ops->key_getsecurity(key, _buffer);
}
-int security_key_session_to_parent(const struct cred *cred,
- const struct cred *parent_cred,
- struct key *key)
-{
- return security_ops->key_session_to_parent(cred, parent_cred, key);
-}
-
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 989fef82563a..7f1a304712a9 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -499,8 +499,7 @@ void avc_audit(u32 ssid, u32 tsid,
return;
if (!a) {
a = &stack_data;
- memset(a, 0, sizeof(*a));
- a->type = LSM_AUDIT_NO_AUDIT;
+ COMMON_AUDIT_DATA_INIT(a, NONE);
}
a->selinux_audit_data.tclass = tclass;
a->selinux_audit_data.requested = requested;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 5feecb41009d..a03fd74602b4 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -293,28 +293,28 @@ static void superblock_free_security(struct super_block *sb)
static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
{
- struct sk_security_struct *ssec;
+ struct sk_security_struct *sksec;
- ssec = kzalloc(sizeof(*ssec), priority);
- if (!ssec)
+ sksec = kzalloc(sizeof(*sksec), priority);
+ if (!sksec)
return -ENOMEM;
- ssec->peer_sid = SECINITSID_UNLABELED;
- ssec->sid = SECINITSID_UNLABELED;
- sk->sk_security = ssec;
+ sksec->peer_sid = SECINITSID_UNLABELED;
+ sksec->sid = SECINITSID_UNLABELED;
+ sk->sk_security = sksec;
- selinux_netlbl_sk_security_reset(ssec);
+ selinux_netlbl_sk_security_reset(sksec);
return 0;
}
static void sk_free_security(struct sock *sk)
{
- struct sk_security_struct *ssec = sk->sk_security;
+ struct sk_security_struct *sksec = sk->sk_security;
sk->sk_security = NULL;
- selinux_netlbl_sk_security_free(ssec);
- kfree(ssec);
+ selinux_netlbl_sk_security_free(sksec);
+ kfree(sksec);
}
/* The security server must be initialized before
@@ -323,7 +323,7 @@ extern int ss_initialized;
/* The file system's label must be initialized prior to use. */
-static char *labeling_behaviors[6] = {
+static const char *labeling_behaviors[6] = {
"uses xattr",
"uses transition SIDs",
"uses task SIDs",
@@ -2999,13 +2999,15 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
return file_has_perm(cred, file, av);
}
+static int default_noexec;
+
static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
{
const struct cred *cred = current_cred();
int rc = 0;
-#ifndef CONFIG_PPC32
- if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
+ if (default_noexec &&
+ (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
/*
* We are making executable an anonymous mapping or a
* private file mapping that will also be writable.
@@ -3015,7 +3017,6 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
if (rc)
goto error;
}
-#endif
if (file) {
/* read access is always possible with a mapping */
@@ -3076,8 +3077,8 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
if (selinux_checkreqprot)
prot = reqprot;
-#ifndef CONFIG_PPC32
- if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
+ if (default_noexec &&
+ (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
int rc = 0;
if (vma->vm_start >= vma->vm_mm->start_brk &&
vma->vm_end <= vma->vm_mm->brk) {
@@ -3099,7 +3100,6 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
if (rc)
return rc;
}
-#endif
return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED);
}
@@ -4002,7 +4002,7 @@ static int selinux_socket_unix_stream_connect(struct socket *sock,
struct socket *other,
struct sock *newsk)
{
- struct sk_security_struct *ssec;
+ struct sk_security_struct *sksec;
struct inode_security_struct *isec;
struct inode_security_struct *other_isec;
struct common_audit_data ad;
@@ -4021,13 +4021,13 @@ static int selinux_socket_unix_stream_connect(struct socket *sock,
return err;
/* connecting socket */
- ssec = sock->sk->sk_security;
- ssec->peer_sid = other_isec->sid;
+ sksec = sock->sk->sk_security;
+ sksec->peer_sid = other_isec->sid;
/* server child socket */
- ssec = newsk->sk_security;
- ssec->peer_sid = isec->sid;
- err = security_sid_mls_copy(other_isec->sid, ssec->peer_sid, &ssec->sid);
+ sksec = newsk->sk_security;
+ sksec->peer_sid = isec->sid;
+ err = security_sid_mls_copy(other_isec->sid, sksec->peer_sid, &sksec->sid);
return err;
}
@@ -4190,7 +4190,7 @@ static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *op
int err = 0;
char *scontext;
u32 scontext_len;
- struct sk_security_struct *ssec;
+ struct sk_security_struct *sksec;
struct inode_security_struct *isec;
u32 peer_sid = SECSID_NULL;
@@ -4198,8 +4198,8 @@ static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *op
if (isec->sclass == SECCLASS_UNIX_STREAM_SOCKET ||
isec->sclass == SECCLASS_TCP_SOCKET) {
- ssec = sock->sk->sk_security;
- peer_sid = ssec->peer_sid;
+ sksec = sock->sk->sk_security;
+ peer_sid = sksec->peer_sid;
}
if (peer_sid == SECSID_NULL) {
err = -ENOPROTOOPT;
@@ -4266,14 +4266,14 @@ static void selinux_sk_free_security(struct sock *sk)
static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk)
{
- struct sk_security_struct *ssec = sk->sk_security;
- struct sk_security_struct *newssec = newsk->sk_security;
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct sk_security_struct *newsksec = newsk->sk_security;
- newssec->sid = ssec->sid;
- newssec->peer_sid = ssec->peer_sid;
- newssec->sclass = ssec->sclass;
+ newsksec->sid = sksec->sid;
+ newsksec->peer_sid = sksec->peer_sid;
+ newsksec->sclass = sksec->sclass;
- selinux_netlbl_sk_security_reset(newssec);
+ selinux_netlbl_sk_security_reset(newsksec);
}
static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
@@ -5662,6 +5662,8 @@ static __init int selinux_init(void)
/* Set the security state for the initial task. */
cred_init_security();
+ default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC);
+
sel_inode_cache = kmem_cache_create("selinux_inode_security",
sizeof(struct inode_security_struct),
0, SLAB_PANIC, NULL);
diff --git a/security/selinux/include/initial_sid_to_string.h b/security/selinux/include/initial_sid_to_string.h
index d4fac82793ae..a59b64e3fd02 100644
--- a/security/selinux/include/initial_sid_to_string.h
+++ b/security/selinux/include/initial_sid_to_string.h
@@ -1,5 +1,5 @@
/* This file is automatically generated. Do not edit. */
-static char *initial_sid_to_string[] =
+static const char *initial_sid_to_string[] =
{
"null",
"kernel",
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index 8d7384280a7a..cf2f628e6e28 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -42,8 +42,8 @@ void selinux_netlbl_cache_invalidate(void);
void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway);
-void selinux_netlbl_sk_security_free(struct sk_security_struct *ssec);
-void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec);
+void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec);
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec);
int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
u16 family,
@@ -79,13 +79,13 @@ static inline void selinux_netlbl_err(struct sk_buff *skb,
}
static inline void selinux_netlbl_sk_security_free(
- struct sk_security_struct *ssec)
+ struct sk_security_struct *sksec)
{
return;
}
static inline void selinux_netlbl_sk_security_reset(
- struct sk_security_struct *ssec)
+ struct sk_security_struct *sksec)
{
return;
}
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 628da72ee763..1c2fc46544bf 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -132,21 +132,21 @@ void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway)
/**
* selinux_netlbl_sk_security_free - Free the NetLabel fields
- * @sssec: the sk_security_struct
+ * @sksec: the sk_security_struct
*
* Description:
* Free all of the memory in the NetLabel fields of a sk_security_struct.
*
*/
-void selinux_netlbl_sk_security_free(struct sk_security_struct *ssec)
+void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec)
{
- if (ssec->nlbl_secattr != NULL)
- netlbl_secattr_free(ssec->nlbl_secattr);
+ if (sksec->nlbl_secattr != NULL)
+ netlbl_secattr_free(sksec->nlbl_secattr);
}
/**
* selinux_netlbl_sk_security_reset - Reset the NetLabel fields
- * @ssec: the sk_security_struct
+ * @sksec: the sk_security_struct
* @family: the socket family
*
* Description:
@@ -154,9 +154,9 @@ void selinux_netlbl_sk_security_free(struct sk_security_struct *ssec)
* The caller is responsibile for all the NetLabel sk_security_struct locking.
*
*/
-void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec)
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec)
{
- ssec->nlbl_state = NLBL_UNSET;
+ sksec->nlbl_state = NLBL_UNSET;
}
/**
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 0e147b6914ad..36ac257cec9a 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/selinux_netlink.h>
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index dd7cc6de77f9..75ec0c6ebacd 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -11,7 +11,6 @@
*/
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if.h>
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index cd191bbec03c..0293843f7eda 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -503,11 +503,11 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
return length;
length = -ENOMEM;
- scon = kzalloc(size+1, GFP_KERNEL);
+ scon = kzalloc(size + 1, GFP_KERNEL);
if (!scon)
return length;
- tcon = kzalloc(size+1, GFP_KERNEL);
+ tcon = kzalloc(size + 1, GFP_KERNEL);
if (!tcon)
goto out;
@@ -515,10 +515,10 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out2;
- length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
if (length < 0)
goto out2;
- length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
if (length < 0)
goto out2;
@@ -550,11 +550,11 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
return length;
length = -ENOMEM;
- scon = kzalloc(size+1, GFP_KERNEL);
+ scon = kzalloc(size + 1, GFP_KERNEL);
if (!scon)
return length;
- tcon = kzalloc(size+1, GFP_KERNEL);
+ tcon = kzalloc(size + 1, GFP_KERNEL);
if (!tcon)
goto out;
@@ -562,10 +562,10 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out2;
- length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
if (length < 0)
goto out2;
- length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
if (length < 0)
goto out2;
@@ -609,11 +609,11 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
return length;
length = -ENOMEM;
- scon = kzalloc(size+1, GFP_KERNEL);
+ scon = kzalloc(size + 1, GFP_KERNEL);
if (!scon)
return length;
- tcon = kzalloc(size+1, GFP_KERNEL);
+ tcon = kzalloc(size + 1, GFP_KERNEL);
if (!tcon)
goto out;
@@ -621,10 +621,10 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out2;
- length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
if (length < 0)
goto out2;
- length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
if (length < 0)
goto out2;
@@ -666,11 +666,11 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
return length;
length = -ENOMEM;
- con = kzalloc(size+1, GFP_KERNEL);
+ con = kzalloc(size + 1, GFP_KERNEL);
if (!con)
return length;
- user = kzalloc(size+1, GFP_KERNEL);
+ user = kzalloc(size + 1, GFP_KERNEL);
if (!user)
goto out;
@@ -678,7 +678,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s", con, user) != 2)
goto out2;
- length = security_context_to_sid(con, strlen(con)+1, &sid);
+ length = security_context_to_sid(con, strlen(con) + 1, &sid);
if (length < 0)
goto out2;
@@ -727,11 +727,11 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
return length;
length = -ENOMEM;
- scon = kzalloc(size+1, GFP_KERNEL);
+ scon = kzalloc(size + 1, GFP_KERNEL);
if (!scon)
return length;
- tcon = kzalloc(size+1, GFP_KERNEL);
+ tcon = kzalloc(size + 1, GFP_KERNEL);
if (!tcon)
goto out;
@@ -739,10 +739,10 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out2;
- length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
if (length < 0)
goto out2;
- length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
if (length < 0)
goto out2;
@@ -1401,7 +1401,7 @@ static int sel_make_perm_files(char *objclass, int classvalue,
}
inode->i_fop = &sel_perm_ops;
/* i+1 since perm values are 1-indexed */
- inode->i_ino = sel_perm_to_ino(classvalue, i+1);
+ inode->i_ino = sel_perm_to_ino(classvalue, i + 1);
d_add(dentry, inode);
}
@@ -1489,7 +1489,7 @@ static int sel_make_classes(void)
goto out;
/* +2 since classes are 1-indexed */
- last_class_ino = sel_class_to_ino(nclasses+2);
+ last_class_ino = sel_class_to_ino(nclasses + 2);
for (i = 0; i < nclasses; i++) {
struct dentry *class_name_dir;
@@ -1506,7 +1506,7 @@ static int sel_make_classes(void)
goto out1;
/* i+1 since class values are 1-indexed */
- rc = sel_make_class_dir_entries(classes[i], i+1,
+ rc = sel_make_class_dir_entries(classes[i], i + 1,
class_name_dir);
if (rc)
goto out1;
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 372b773f8210..b4eff7a60c50 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -255,7 +255,7 @@ int mls_context_to_sid(struct policydb *pol,
if (!pol->mls_enabled) {
if (def_sid != SECSID_NULL && oldc)
- *scontext += strlen(*scontext)+1;
+ *scontext += strlen(*scontext) + 1;
return 0;
}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 23c6e53c102c..4f584fb71ef9 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -40,7 +40,7 @@
#define _DEBUG_HASHES
#ifdef DEBUG_HASHES
-static char *symtab_name[SYM_NUM] = {
+static const char *symtab_name[SYM_NUM] = {
"common prefixes",
"classes",
"roles",
@@ -156,7 +156,7 @@ static int roles_init(struct policydb *p)
rc = -EINVAL;
goto out_free_role;
}
- key = kmalloc(strlen(OBJECT_R)+1, GFP_KERNEL);
+ key = kmalloc(strlen(OBJECT_R) + 1, GFP_KERNEL);
if (!key) {
rc = -ENOMEM;
goto out_free_role;
@@ -2195,7 +2195,7 @@ int policydb_read(struct policydb *p, void *fp)
rangetr_hash_eval(p->range_tr);
}
- p->type_attr_map = kmalloc(p->p_types.nprim*sizeof(struct ebitmap), GFP_KERNEL);
+ p->type_attr_map = kmalloc(p->p_types.nprim * sizeof(struct ebitmap), GFP_KERNEL);
if (!p->type_attr_map)
goto bad;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index cf27b3ee1a95..1de60ce90d9a 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -274,15 +274,15 @@ static int constraint_expr_eval(struct context *scontext,
case CEXPR_AND:
BUG_ON(sp < 1);
sp--;
- s[sp] &= s[sp+1];
+ s[sp] &= s[sp + 1];
break;
case CEXPR_OR:
BUG_ON(sp < 1);
sp--;
- s[sp] |= s[sp+1];
+ s[sp] |= s[sp + 1];
break;
case CEXPR_ATTR:
- if (sp == (CEXPR_MAXDEPTH-1))
+ if (sp == (CEXPR_MAXDEPTH - 1))
return 0;
switch (e->attr) {
case CEXPR_USER:
@@ -1216,7 +1216,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
*sid = SECSID_NULL;
/* Copy the string so that we can modify the copy as we parse it. */
- scontext2 = kmalloc(scontext_len+1, gfp_flags);
+ scontext2 = kmalloc(scontext_len + 1, gfp_flags);
if (!scontext2)
return -ENOMEM;
memcpy(scontext2, scontext, scontext_len);
@@ -1760,22 +1760,28 @@ int security_load_policy(void *data, size_t len)
if (!ss_initialized) {
avtab_cache_init();
- if (policydb_read(&policydb, fp)) {
+ rc = policydb_read(&policydb, fp);
+ if (rc) {
avtab_cache_destroy();
- return -EINVAL;
+ return rc;
}
- if (selinux_set_mapping(&policydb, secclass_map,
- &current_mapping,
- &current_mapping_size)) {
+
+ rc = selinux_set_mapping(&policydb, secclass_map,
+ &current_mapping,
+ &current_mapping_size);
+ if (rc) {
policydb_destroy(&policydb);
avtab_cache_destroy();
- return -EINVAL;
+ return rc;
}
- if (policydb_load_isids(&policydb, &sidtab)) {
+
+ rc = policydb_load_isids(&policydb, &sidtab);
+ if (rc) {
policydb_destroy(&policydb);
avtab_cache_destroy();
- return -EINVAL;
+ return rc;
}
+
security_load_policycaps();
ss_initialized = 1;
seqno = ++latest_granting;
@@ -1791,8 +1797,9 @@ int security_load_policy(void *data, size_t len)
sidtab_hash_eval(&sidtab, "sids");
#endif
- if (policydb_read(&newpolicydb, fp))
- return -EINVAL;
+ rc = policydb_read(&newpolicydb, fp);
+ if (rc)
+ return rc;
/* If switching between different policy types, log MLS status */
if (policydb.mls_enabled && !newpolicydb.mls_enabled)
@@ -1807,8 +1814,8 @@ int security_load_policy(void *data, size_t len)
return rc;
}
- if (selinux_set_mapping(&newpolicydb, secclass_map,
- &map, &map_size))
+ rc = selinux_set_mapping(&newpolicydb, secclass_map, &map, &map_size);
+ if (rc)
goto err;
rc = security_preserve_bools(&newpolicydb);
@@ -1819,10 +1826,10 @@ int security_load_policy(void *data, size_t len)
/* Clone the SID table. */
sidtab_shutdown(&sidtab);
- if (sidtab_map(&sidtab, clone_sid, &newsidtab)) {
- rc = -ENOMEM;
+
+ rc = sidtab_map(&sidtab, clone_sid, &newsidtab);
+ if (rc)
goto err;
- }
/*
* Convert the internal representations of contexts
@@ -2101,9 +2108,9 @@ int security_get_user_sids(u32 fromsid,
ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
role = policydb.role_val_to_struct[i];
- usercon.role = i+1;
+ usercon.role = i + 1;
ebitmap_for_each_positive_bit(&role->types, tnode, j) {
- usercon.type = j+1;
+ usercon.type = j + 1;
if (mls_setup_user_range(fromcon, user, &usercon))
continue;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index fdfeaa2f28ec..0f2fc480fc61 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -19,7 +19,6 @@
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/stat.h>
-#include <linux/ext2_fs.h>
#include <linux/kd.h>
#include <asm/ioctls.h>
#include <linux/ip.h>
@@ -1119,15 +1118,6 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
}
/**
- * smack_cred_commit - commit new credentials
- * @new: the new credentials
- * @old: the original credentials
- */
-static void smack_cred_commit(struct cred *new, const struct cred *old)
-{
-}
-
-/**
* smack_cred_transfer - Transfer the old credentials to the new credentials
* @new: the new credentials
* @old: the original credentials
@@ -3121,7 +3111,6 @@ struct security_operations smack_ops = {
.cred_alloc_blank = smack_cred_alloc_blank,
.cred_free = smack_cred_free,
.cred_prepare = smack_cred_prepare,
- .cred_commit = smack_cred_commit,
.cred_transfer = smack_cred_transfer,
.kernel_act_as = smack_kernel_act_as,
.kernel_create_files_as = smack_kernel_create_files_as,
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 975c45d88baa..62e089c50ae8 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -874,17 +874,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
static struct tomoyo_profile *tomoyo_find_or_assign_new_profile(const unsigned
int profile)
{
- static DEFINE_MUTEX(lock);
struct tomoyo_profile *ptr = NULL;
int i;
if (profile >= TOMOYO_MAX_PROFILES)
return NULL;
- mutex_lock(&lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return NULL;
ptr = tomoyo_profile_ptr[profile];
if (ptr)
goto ok;
- ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
+ ptr = kmalloc(sizeof(*ptr), GFP_NOFS);
if (!tomoyo_memory_ok(ptr)) {
kfree(ptr);
ptr = NULL;
@@ -895,7 +895,7 @@ static struct tomoyo_profile *tomoyo_find_or_assign_new_profile(const unsigned
mb(); /* Avoid out-of-order execution. */
tomoyo_profile_ptr[profile] = ptr;
ok:
- mutex_unlock(&lock);
+ mutex_unlock(&tomoyo_policy_lock);
return ptr;
}
@@ -1071,44 +1071,42 @@ LIST_HEAD(tomoyo_policy_manager_list);
static int tomoyo_update_manager_entry(const char *manager,
const bool is_delete)
{
- struct tomoyo_policy_manager_entry *entry = NULL;
struct tomoyo_policy_manager_entry *ptr;
- const struct tomoyo_path_info *saved_manager;
+ struct tomoyo_policy_manager_entry e = { };
int error = is_delete ? -ENOENT : -ENOMEM;
- bool is_domain = false;
if (tomoyo_is_domain_def(manager)) {
if (!tomoyo_is_correct_domain(manager))
return -EINVAL;
- is_domain = true;
+ e.is_domain = true;
} else {
if (!tomoyo_is_correct_path(manager, 1, -1, -1))
return -EINVAL;
}
- saved_manager = tomoyo_get_name(manager);
- if (!saved_manager)
+ e.manager = tomoyo_get_name(manager);
+ if (!e.manager)
return -ENOMEM;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
- if (ptr->manager != saved_manager)
+ if (ptr->manager != e.manager)
continue;
ptr->is_deleted = is_delete;
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->manager = saved_manager;
- saved_manager = NULL;
- entry->is_domain = is_domain;
- list_add_tail_rcu(&entry->list, &tomoyo_policy_manager_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_policy_manager_entry *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->list,
+ &tomoyo_policy_manager_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
- tomoyo_put_name(saved_manager);
- kfree(entry);
+ out:
+ tomoyo_put_name(e.manager);
return error;
}
@@ -1287,7 +1285,8 @@ static int tomoyo_delete_domain(char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
- mutex_lock(&tomoyo_policy_lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return 0;
/* Is there an active domain? */
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
/* Never delete tomoyo_kernel_domain */
@@ -1369,7 +1368,6 @@ static bool tomoyo_print_path_acl(struct tomoyo_io_buffer *head,
{
int pos;
u8 bit;
- const char *atmark = "";
const char *filename;
const u32 perm = ptr->perm | (((u32) ptr->perm_high) << 16);
@@ -1384,8 +1382,7 @@ static bool tomoyo_print_path_acl(struct tomoyo_io_buffer *head,
continue;
msg = tomoyo_path2keyword(bit);
pos = head->read_avail;
- if (!tomoyo_io_printf(head, "allow_%s %s%s\n", msg,
- atmark, filename))
+ if (!tomoyo_io_printf(head, "allow_%s %s\n", msg, filename))
goto out;
}
head->read_bit = 0;
@@ -1408,8 +1405,6 @@ static bool tomoyo_print_path2_acl(struct tomoyo_io_buffer *head,
struct tomoyo_path2_acl *ptr)
{
int pos;
- const char *atmark1 = "";
- const char *atmark2 = "";
const char *filename1;
const char *filename2;
const u8 perm = ptr->perm;
@@ -1423,8 +1418,8 @@ static bool tomoyo_print_path2_acl(struct tomoyo_io_buffer *head,
continue;
msg = tomoyo_path22keyword(bit);
pos = head->read_avail;
- if (!tomoyo_io_printf(head, "allow_%s %s%s %s%s\n", msg,
- atmark1, filename1, atmark2, filename2))
+ if (!tomoyo_io_printf(head, "allow_%s %s %s\n", msg,
+ filename1, filename2))
goto out;
}
head->read_bit = 0;
@@ -1886,7 +1881,7 @@ static int tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
*/
static int tomoyo_open_control(const u8 type, struct file *file)
{
- struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_KERNEL);
+ struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_NOFS);
if (!head)
return -ENOMEM;
@@ -1947,7 +1942,7 @@ static int tomoyo_open_control(const u8 type, struct file *file)
} else {
if (!head->readbuf_size)
head->readbuf_size = 4096 * 2;
- head->read_buf = kzalloc(head->readbuf_size, GFP_KERNEL);
+ head->read_buf = kzalloc(head->readbuf_size, GFP_NOFS);
if (!head->read_buf) {
kfree(head);
return -ENOMEM;
@@ -1961,7 +1956,7 @@ static int tomoyo_open_control(const u8 type, struct file *file)
head->write = NULL;
} else if (head->write) {
head->writebuf_size = 4096 * 2;
- head->write_buf = kzalloc(head->writebuf_size, GFP_KERNEL);
+ head->write_buf = kzalloc(head->writebuf_size, GFP_NOFS);
if (!head->write_buf) {
kfree(head->read_buf);
kfree(head);
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 67bd22dd3e68..c95f48609461 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -616,6 +616,7 @@ char *tomoyo_realpath_from_path(struct path *path);
/* Check memory quota. */
bool tomoyo_memory_ok(void *ptr);
+void *tomoyo_commit_ok(void *data, const unsigned int size);
/*
* Keep the given name on the RAM.
@@ -662,7 +663,6 @@ extern struct list_head tomoyo_pattern_list;
extern struct list_head tomoyo_no_rewrite_list;
extern struct list_head tomoyo_policy_manager_list;
extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
-extern struct mutex tomoyo_name_list_lock;
/* Lock for protecting policy. */
extern struct mutex tomoyo_policy_lock;
@@ -736,6 +736,31 @@ static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
return task_cred_xxx(task, security);
}
+static inline bool tomoyo_is_same_domain_initializer_entry
+(const struct tomoyo_domain_initializer_entry *p1,
+ const struct tomoyo_domain_initializer_entry *p2)
+{
+ return p1->is_not == p2->is_not && p1->is_last_name == p2->is_last_name
+ && p1->domainname == p2->domainname
+ && p1->program == p2->program;
+}
+
+static inline bool tomoyo_is_same_domain_keeper_entry
+(const struct tomoyo_domain_keeper_entry *p1,
+ const struct tomoyo_domain_keeper_entry *p2)
+{
+ return p1->is_not == p2->is_not && p1->is_last_name == p2->is_last_name
+ && p1->domainname == p2->domainname
+ && p1->program == p2->program;
+}
+
+static inline bool tomoyo_is_same_alias_entry
+(const struct tomoyo_alias_entry *p1, const struct tomoyo_alias_entry *p2)
+{
+ return p1->original_name == p2->original_name &&
+ p1->aliased_name == p2->aliased_name;
+}
+
/**
* list_for_each_cookie - iterate over a list with cookie.
* @pos: the &struct list_head to use as a loop cursor.
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index acb8c397d5cf..cd8ba4446763 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -130,57 +130,47 @@ static int tomoyo_update_domain_initializer_entry(const char *domainname,
const bool is_not,
const bool is_delete)
{
- struct tomoyo_domain_initializer_entry *entry = NULL;
struct tomoyo_domain_initializer_entry *ptr;
- const struct tomoyo_path_info *saved_program = NULL;
- const struct tomoyo_path_info *saved_domainname = NULL;
+ struct tomoyo_domain_initializer_entry e = { .is_not = is_not };
int error = is_delete ? -ENOENT : -ENOMEM;
- bool is_last_name = false;
if (!tomoyo_is_correct_path(program, 1, -1, -1))
return -EINVAL; /* No patterns allowed. */
if (domainname) {
if (!tomoyo_is_domain_def(domainname) &&
tomoyo_is_correct_path(domainname, 1, -1, -1))
- is_last_name = true;
+ e.is_last_name = true;
else if (!tomoyo_is_correct_domain(domainname))
return -EINVAL;
- saved_domainname = tomoyo_get_name(domainname);
- if (!saved_domainname)
+ e.domainname = tomoyo_get_name(domainname);
+ if (!e.domainname)
goto out;
}
- saved_program = tomoyo_get_name(program);
- if (!saved_program)
+ e.program = tomoyo_get_name(program);
+ if (!e.program)
+ goto out;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
- if (ptr->is_not != is_not ||
- ptr->domainname != saved_domainname ||
- ptr->program != saved_program)
+ if (!tomoyo_is_same_domain_initializer_entry(ptr, &e))
continue;
ptr->is_deleted = is_delete;
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->domainname = saved_domainname;
- saved_domainname = NULL;
- entry->program = saved_program;
- saved_program = NULL;
- entry->is_not = is_not;
- entry->is_last_name = is_last_name;
- list_add_tail_rcu(&entry->list,
- &tomoyo_domain_initializer_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_domain_initializer_entry *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->list,
+ &tomoyo_domain_initializer_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
out:
- tomoyo_put_name(saved_domainname);
- tomoyo_put_name(saved_program);
- kfree(entry);
+ tomoyo_put_name(e.domainname);
+ tomoyo_put_name(e.program);
return error;
}
@@ -350,56 +340,47 @@ static int tomoyo_update_domain_keeper_entry(const char *domainname,
const bool is_not,
const bool is_delete)
{
- struct tomoyo_domain_keeper_entry *entry = NULL;
struct tomoyo_domain_keeper_entry *ptr;
- const struct tomoyo_path_info *saved_domainname = NULL;
- const struct tomoyo_path_info *saved_program = NULL;
+ struct tomoyo_domain_keeper_entry e = { .is_not = is_not };
int error = is_delete ? -ENOENT : -ENOMEM;
- bool is_last_name = false;
if (!tomoyo_is_domain_def(domainname) &&
tomoyo_is_correct_path(domainname, 1, -1, -1))
- is_last_name = true;
+ e.is_last_name = true;
else if (!tomoyo_is_correct_domain(domainname))
return -EINVAL;
if (program) {
if (!tomoyo_is_correct_path(program, 1, -1, -1))
return -EINVAL;
- saved_program = tomoyo_get_name(program);
- if (!saved_program)
+ e.program = tomoyo_get_name(program);
+ if (!e.program)
goto out;
}
- saved_domainname = tomoyo_get_name(domainname);
- if (!saved_domainname)
+ e.domainname = tomoyo_get_name(domainname);
+ if (!e.domainname)
+ goto out;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
- if (ptr->is_not != is_not ||
- ptr->domainname != saved_domainname ||
- ptr->program != saved_program)
+ if (!tomoyo_is_same_domain_keeper_entry(ptr, &e))
continue;
ptr->is_deleted = is_delete;
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->domainname = saved_domainname;
- saved_domainname = NULL;
- entry->program = saved_program;
- saved_program = NULL;
- entry->is_not = is_not;
- entry->is_last_name = is_last_name;
- list_add_tail_rcu(&entry->list, &tomoyo_domain_keeper_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_domain_keeper_entry *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->list,
+ &tomoyo_domain_keeper_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
out:
- tomoyo_put_name(saved_domainname);
- tomoyo_put_name(saved_program);
- kfree(entry);
+ tomoyo_put_name(e.domainname);
+ tomoyo_put_name(e.program);
return error;
}
@@ -551,44 +532,38 @@ static int tomoyo_update_alias_entry(const char *original_name,
const char *aliased_name,
const bool is_delete)
{
- struct tomoyo_alias_entry *entry = NULL;
struct tomoyo_alias_entry *ptr;
- const struct tomoyo_path_info *saved_original_name;
- const struct tomoyo_path_info *saved_aliased_name;
+ struct tomoyo_alias_entry e = { };
int error = is_delete ? -ENOENT : -ENOMEM;
if (!tomoyo_is_correct_path(original_name, 1, -1, -1) ||
!tomoyo_is_correct_path(aliased_name, 1, -1, -1))
return -EINVAL; /* No patterns allowed. */
- saved_original_name = tomoyo_get_name(original_name);
- saved_aliased_name = tomoyo_get_name(aliased_name);
- if (!saved_original_name || !saved_aliased_name)
+ e.original_name = tomoyo_get_name(original_name);
+ e.aliased_name = tomoyo_get_name(aliased_name);
+ if (!e.original_name || !e.aliased_name)
+ goto out;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
- if (ptr->original_name != saved_original_name ||
- ptr->aliased_name != saved_aliased_name)
+ if (!tomoyo_is_same_alias_entry(ptr, &e))
continue;
ptr->is_deleted = is_delete;
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->original_name = saved_original_name;
- saved_original_name = NULL;
- entry->aliased_name = saved_aliased_name;
- saved_aliased_name = NULL;
- list_add_tail_rcu(&entry->list, &tomoyo_alias_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_alias_entry *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->list, &tomoyo_alias_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
out:
- tomoyo_put_name(saved_original_name);
- tomoyo_put_name(saved_aliased_name);
- kfree(entry);
+ tomoyo_put_name(e.original_name);
+ tomoyo_put_name(e.aliased_name);
return error;
}
@@ -656,7 +631,7 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
const u8 profile)
{
struct tomoyo_domain_info *entry;
- struct tomoyo_domain_info *domain;
+ struct tomoyo_domain_info *domain = NULL;
const struct tomoyo_path_info *saved_domainname;
bool found = false;
@@ -665,8 +640,9 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
saved_domainname = tomoyo_get_name(domainname);
if (!saved_domainname)
return NULL;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
+ entry = kzalloc(sizeof(*entry), GFP_NOFS);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
if (domain->is_deleted ||
tomoyo_pathcmp(saved_domainname, domain->domainname))
@@ -685,6 +661,7 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
found = true;
}
mutex_unlock(&tomoyo_policy_lock);
+ out:
tomoyo_put_name(saved_domainname);
kfree(entry);
return found ? domain : NULL;
@@ -705,7 +682,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
* This function assumes that the size of buffer returned by
* tomoyo_realpath() = TOMOYO_MAX_PATHNAME_LEN.
*/
- struct tomoyo_page_buffer *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ struct tomoyo_page_buffer *tmp = kzalloc(sizeof(*tmp), GFP_NOFS);
struct tomoyo_domain_info *old_domain = tomoyo_domain();
struct tomoyo_domain_info *domain = NULL;
const char *old_domain_name = old_domain->domainname->name;
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 6f3fe76a1fde..6651cac87625 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -100,7 +100,7 @@ static struct tomoyo_path_info *tomoyo_get_path(struct path *path)
{
int error;
struct tomoyo_path_info_with_data *buf = kzalloc(sizeof(*buf),
- GFP_KERNEL);
+ GFP_NOFS);
if (!buf)
return NULL;
@@ -164,36 +164,36 @@ LIST_HEAD(tomoyo_globally_readable_list);
static int tomoyo_update_globally_readable_entry(const char *filename,
const bool is_delete)
{
- struct tomoyo_globally_readable_file_entry *entry = NULL;
struct tomoyo_globally_readable_file_entry *ptr;
- const struct tomoyo_path_info *saved_filename;
+ struct tomoyo_globally_readable_file_entry e = { };
int error = is_delete ? -ENOENT : -ENOMEM;
if (!tomoyo_is_correct_path(filename, 1, 0, -1))
return -EINVAL;
- saved_filename = tomoyo_get_name(filename);
- if (!saved_filename)
+ e.filename = tomoyo_get_name(filename);
+ if (!e.filename)
return -ENOMEM;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
- if (ptr->filename != saved_filename)
+ if (ptr->filename != e.filename)
continue;
ptr->is_deleted = is_delete;
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->filename = saved_filename;
- saved_filename = NULL;
- list_add_tail_rcu(&entry->list, &tomoyo_globally_readable_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_globally_readable_file_entry *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->list,
+ &tomoyo_globally_readable_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
- tomoyo_put_name(saved_filename);
- kfree(entry);
+ out:
+ tomoyo_put_name(e.filename);
return error;
}
@@ -311,37 +311,34 @@ LIST_HEAD(tomoyo_pattern_list);
static int tomoyo_update_file_pattern_entry(const char *pattern,
const bool is_delete)
{
- struct tomoyo_pattern_entry *entry = NULL;
struct tomoyo_pattern_entry *ptr;
- const struct tomoyo_path_info *saved_pattern;
+ struct tomoyo_pattern_entry e = { .pattern = tomoyo_get_name(pattern) };
int error = is_delete ? -ENOENT : -ENOMEM;
- saved_pattern = tomoyo_get_name(pattern);
- if (!saved_pattern)
+ if (!e.pattern)
return error;
- if (!saved_pattern->is_patterned)
+ if (!e.pattern->is_patterned)
+ goto out;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
- if (saved_pattern != ptr->pattern)
+ if (e.pattern != ptr->pattern)
continue;
ptr->is_deleted = is_delete;
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->pattern = saved_pattern;
- saved_pattern = NULL;
- list_add_tail_rcu(&entry->list, &tomoyo_pattern_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_pattern_entry *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->list, &tomoyo_pattern_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
out:
- kfree(entry);
- tomoyo_put_name(saved_pattern);
+ tomoyo_put_name(e.pattern);
return error;
}
@@ -464,36 +461,36 @@ LIST_HEAD(tomoyo_no_rewrite_list);
static int tomoyo_update_no_rewrite_entry(const char *pattern,
const bool is_delete)
{
- struct tomoyo_no_rewrite_entry *entry = NULL;
struct tomoyo_no_rewrite_entry *ptr;
- const struct tomoyo_path_info *saved_pattern;
+ struct tomoyo_no_rewrite_entry e = { };
int error = is_delete ? -ENOENT : -ENOMEM;
if (!tomoyo_is_correct_path(pattern, 0, 0, 0))
return -EINVAL;
- saved_pattern = tomoyo_get_name(pattern);
- if (!saved_pattern)
+ e.pattern = tomoyo_get_name(pattern);
+ if (!e.pattern)
return error;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
- if (ptr->pattern != saved_pattern)
+ if (ptr->pattern != e.pattern)
continue;
ptr->is_deleted = is_delete;
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->pattern = saved_pattern;
- saved_pattern = NULL;
- list_add_tail_rcu(&entry->list, &tomoyo_no_rewrite_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_no_rewrite_entry *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->list,
+ &tomoyo_no_rewrite_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
- tomoyo_put_name(saved_pattern);
- kfree(entry);
+ out:
+ tomoyo_put_name(e.pattern);
return error;
}
@@ -805,70 +802,69 @@ static int tomoyo_update_path_acl(const u8 type, const char *filename,
struct tomoyo_domain_info *const domain,
const bool is_delete)
{
- static const u32 rw_mask =
+ static const u32 tomoyo_rw_mask =
(1 << TOMOYO_TYPE_READ) | (1 << TOMOYO_TYPE_WRITE);
- const struct tomoyo_path_info *saved_filename;
+ const u32 perm = 1 << type;
struct tomoyo_acl_info *ptr;
- struct tomoyo_path_acl *entry = NULL;
+ struct tomoyo_path_acl e = {
+ .head.type = TOMOYO_TYPE_PATH_ACL,
+ .perm_high = perm >> 16,
+ .perm = perm
+ };
int error = is_delete ? -ENOENT : -ENOMEM;
- const u32 perm = 1 << type;
+ if (type == TOMOYO_TYPE_READ_WRITE)
+ e.perm |= tomoyo_rw_mask;
if (!domain)
return -EINVAL;
if (!tomoyo_is_correct_path(filename, 0, 0, 0))
return -EINVAL;
- saved_filename = tomoyo_get_name(filename);
- if (!saved_filename)
+ e.filename = tomoyo_get_name(filename);
+ if (!e.filename)
return -ENOMEM;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
struct tomoyo_path_acl *acl =
container_of(ptr, struct tomoyo_path_acl, head);
if (ptr->type != TOMOYO_TYPE_PATH_ACL)
continue;
- if (acl->filename != saved_filename)
+ if (acl->filename != e.filename)
continue;
if (is_delete) {
if (perm <= 0xFFFF)
acl->perm &= ~perm;
else
acl->perm_high &= ~(perm >> 16);
- if ((acl->perm & rw_mask) != rw_mask)
+ if ((acl->perm & tomoyo_rw_mask) != tomoyo_rw_mask)
acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE);
else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE)))
- acl->perm &= ~rw_mask;
+ acl->perm &= ~tomoyo_rw_mask;
} else {
if (perm <= 0xFFFF)
acl->perm |= perm;
else
acl->perm_high |= (perm >> 16);
- if ((acl->perm & rw_mask) == rw_mask)
+ if ((acl->perm & tomoyo_rw_mask) == tomoyo_rw_mask)
acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE;
else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE))
- acl->perm |= rw_mask;
+ acl->perm |= tomoyo_rw_mask;
}
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->head.type = TOMOYO_TYPE_PATH_ACL;
- if (perm <= 0xFFFF)
- entry->perm = perm;
- else
- entry->perm_high = (perm >> 16);
- if (perm == (1 << TOMOYO_TYPE_READ_WRITE))
- entry->perm |= rw_mask;
- entry->filename = saved_filename;
- saved_filename = NULL;
- list_add_tail_rcu(&entry->head.list, &domain->acl_info_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_path_acl *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->head.list,
+ &domain->acl_info_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
- kfree(entry);
- tomoyo_put_name(saved_filename);
+ out:
+ tomoyo_put_name(e.filename);
return error;
}
@@ -890,32 +886,32 @@ static int tomoyo_update_path2_acl(const u8 type, const char *filename1,
struct tomoyo_domain_info *const domain,
const bool is_delete)
{
- const struct tomoyo_path_info *saved_filename1;
- const struct tomoyo_path_info *saved_filename2;
+ const u8 perm = 1 << type;
+ struct tomoyo_path2_acl e = {
+ .head.type = TOMOYO_TYPE_PATH2_ACL,
+ .perm = perm
+ };
struct tomoyo_acl_info *ptr;
- struct tomoyo_path2_acl *entry = NULL;
int error = is_delete ? -ENOENT : -ENOMEM;
- const u8 perm = 1 << type;
if (!domain)
return -EINVAL;
if (!tomoyo_is_correct_path(filename1, 0, 0, 0) ||
!tomoyo_is_correct_path(filename2, 0, 0, 0))
return -EINVAL;
- saved_filename1 = tomoyo_get_name(filename1);
- saved_filename2 = tomoyo_get_name(filename2);
- if (!saved_filename1 || !saved_filename2)
+ e.filename1 = tomoyo_get_name(filename1);
+ e.filename2 = tomoyo_get_name(filename2);
+ if (!e.filename1 || !e.filename2)
+ goto out;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
- if (!is_delete)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- mutex_lock(&tomoyo_policy_lock);
list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
struct tomoyo_path2_acl *acl =
container_of(ptr, struct tomoyo_path2_acl, head);
if (ptr->type != TOMOYO_TYPE_PATH2_ACL)
continue;
- if (acl->filename1 != saved_filename1 ||
- acl->filename2 != saved_filename2)
+ if (acl->filename1 != e.filename1 ||
+ acl->filename2 != e.filename2)
continue;
if (is_delete)
acl->perm &= ~perm;
@@ -924,22 +920,19 @@ static int tomoyo_update_path2_acl(const u8 type, const char *filename1,
error = 0;
break;
}
- if (!is_delete && error && tomoyo_memory_ok(entry)) {
- entry->head.type = TOMOYO_TYPE_PATH2_ACL;
- entry->perm = perm;
- entry->filename1 = saved_filename1;
- saved_filename1 = NULL;
- entry->filename2 = saved_filename2;
- saved_filename2 = NULL;
- list_add_tail_rcu(&entry->head.list, &domain->acl_info_list);
- entry = NULL;
- error = 0;
+ if (!is_delete && error) {
+ struct tomoyo_path2_acl *entry =
+ tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ list_add_tail_rcu(&entry->head.list,
+ &domain->acl_info_list);
+ error = 0;
+ }
}
mutex_unlock(&tomoyo_policy_lock);
out:
- tomoyo_put_name(saved_filename1);
- tomoyo_put_name(saved_filename2);
- kfree(entry);
+ tomoyo_put_name(e.filename1);
+ tomoyo_put_name(e.filename2);
return error;
}
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
index d9ad35bc7fa8..245bf422e3a5 100644
--- a/security/tomoyo/gc.c
+++ b/security/tomoyo/gc.c
@@ -151,7 +151,8 @@ static void tomoyo_del_name(const struct tomoyo_name_entry *ptr)
static void tomoyo_collect_entry(void)
{
- mutex_lock(&tomoyo_policy_lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return;
{
struct tomoyo_globally_readable_file_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list,
@@ -275,8 +276,6 @@ static void tomoyo_collect_entry(void)
break;
}
}
- mutex_unlock(&tomoyo_policy_lock);
- mutex_lock(&tomoyo_name_list_lock);
{
int i;
for (i = 0; i < TOMOYO_MAX_HASH; i++) {
@@ -294,7 +293,7 @@ static void tomoyo_collect_entry(void)
}
}
}
- mutex_unlock(&tomoyo_name_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
}
static void tomoyo_kfree_entry(void)
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index c225c65ce426..d1b96f019621 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -139,7 +139,7 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
*/
char *tomoyo_realpath_from_path(struct path *path)
{
- char *buf = kzalloc(sizeof(struct tomoyo_page_buffer), GFP_KERNEL);
+ char *buf = kzalloc(sizeof(struct tomoyo_page_buffer), GFP_NOFS);
BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer)
<= TOMOYO_MAX_PATHNAME_LEN - 1);
@@ -223,6 +223,25 @@ bool tomoyo_memory_ok(void *ptr)
}
/**
+ * tomoyo_commit_ok - Check memory quota.
+ *
+ * @data: Data to copy from.
+ * @size: Size in byte.
+ *
+ * Returns pointer to allocated memory on success, NULL otherwise.
+ */
+void *tomoyo_commit_ok(void *data, const unsigned int size)
+{
+ void *ptr = kzalloc(size, GFP_NOFS);
+ if (tomoyo_memory_ok(ptr)) {
+ memmove(ptr, data, size);
+ memset(data, 0, size);
+ return ptr;
+ }
+ return NULL;
+}
+
+/**
* tomoyo_memory_free - Free memory for elements.
*
* @ptr: Pointer to allocated memory.
@@ -240,8 +259,6 @@ void tomoyo_memory_free(void *ptr)
* "const struct tomoyo_path_info *".
*/
struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
-/* Lock for protecting tomoyo_name_list . */
-DEFINE_MUTEX(tomoyo_name_list_lock);
/**
* tomoyo_get_name - Allocate permanent memory for string data.
@@ -263,14 +280,15 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name)
len = strlen(name) + 1;
hash = full_name_hash((const unsigned char *) name, len - 1);
head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
- mutex_lock(&tomoyo_name_list_lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return NULL;
list_for_each_entry(ptr, head, list) {
if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
continue;
atomic_inc(&ptr->users);
goto out;
}
- ptr = kzalloc(sizeof(*ptr) + len, GFP_KERNEL);
+ ptr = kzalloc(sizeof(*ptr) + len, GFP_NOFS);
allocated_len = ptr ? ksize(ptr) : 0;
if (!ptr || (tomoyo_quota_for_policy &&
atomic_read(&tomoyo_policy_memory_size) + allocated_len
@@ -290,7 +308,7 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name)
tomoyo_fill_path_info(&ptr->entry);
list_add_tail(&ptr->list, head);
out:
- mutex_unlock(&tomoyo_name_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
return ptr ? &ptr->entry : NULL;
}
diff --git a/sound/aoa/core/gpio-pmf.c b/sound/aoa/core/gpio-pmf.c
index 6776d1c12b63..7e267c9379bc 100644
--- a/sound/aoa/core/gpio-pmf.c
+++ b/sound/aoa/core/gpio-pmf.c
@@ -116,12 +116,9 @@ static void pmf_gpio_exit(struct gpio_runtime *rt)
mutex_destroy(&rt->line_in_notify.mutex);
mutex_destroy(&rt->line_out_notify.mutex);
- if (rt->headphone_notify.gpio_private)
- kfree(rt->headphone_notify.gpio_private);
- if (rt->line_in_notify.gpio_private)
- kfree(rt->line_in_notify.gpio_private);
- if (rt->line_out_notify.gpio_private)
- kfree(rt->line_out_notify.gpio_private);
+ kfree(rt->headphone_notify.gpio_private);
+ kfree(rt->line_in_notify.gpio_private);
+ kfree(rt->line_out_notify.gpio_private);
}
static void pmf_handle_notify_irq(void *data)
diff --git a/sound/atmel/Kconfig b/sound/atmel/Kconfig
index 6c228a91940d..94de43a096f1 100644
--- a/sound/atmel/Kconfig
+++ b/sound/atmel/Kconfig
@@ -12,7 +12,7 @@ config SND_ATMEL_AC97C
tristate "Atmel AC97 Controller (AC97C) driver"
select SND_PCM
select SND_AC97_CODEC
- depends on DW_DMAC && AVR32
+ depends on (DW_DMAC && AVR32) || ARCH_AT91
help
ALSA sound driver for the Atmel AC97 controller.
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 0c0f8771656a..428121a7e705 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/atmel_pdc.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -31,6 +32,10 @@
#include <linux/dw_dmac.h>
+#include <mach/cpu.h>
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+
#include "ac97c.h"
enum {
@@ -63,6 +68,7 @@ struct atmel_ac97c {
u64 cur_format;
unsigned int cur_rate;
unsigned long flags;
+ int playback_period, capture_period;
/* Serialize access to opened variable */
spinlock_t lock;
void __iomem *regs;
@@ -242,10 +248,12 @@ static int atmel_ac97c_playback_hw_params(struct snd_pcm_substream *substream,
if (retval < 0)
return retval;
/* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (retval == 1)
- if (test_and_clear_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.tx_chan);
-
+ if (cpu_is_at32ap7000()) {
+ /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
+ if (retval == 1)
+ if (test_and_clear_bit(DMA_TX_READY, &chip->flags))
+ dw_dma_cyclic_free(chip->dma.tx_chan);
+ }
/* Set restrictions to params. */
mutex_lock(&opened_mutex);
chip->cur_rate = params_rate(hw_params);
@@ -266,9 +274,14 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,
if (retval < 0)
return retval;
/* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (retval == 1)
- if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.rx_chan);
+ if (cpu_is_at32ap7000()) {
+ if (retval < 0)
+ return retval;
+ /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
+ if (retval == 1)
+ if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
+ dw_dma_cyclic_free(chip->dma.rx_chan);
+ }
/* Set restrictions to params. */
mutex_lock(&opened_mutex);
@@ -282,16 +295,20 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,
static int atmel_ac97c_playback_hw_free(struct snd_pcm_substream *substream)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
- if (test_and_clear_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.tx_chan);
+ if (cpu_is_at32ap7000()) {
+ if (test_and_clear_bit(DMA_TX_READY, &chip->flags))
+ dw_dma_cyclic_free(chip->dma.tx_chan);
+ }
return snd_pcm_lib_free_pages(substream);
}
static int atmel_ac97c_capture_hw_free(struct snd_pcm_substream *substream)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
- if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.rx_chan);
+ if (cpu_is_at32ap7000()) {
+ if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
+ dw_dma_cyclic_free(chip->dma.rx_chan);
+ }
return snd_pcm_lib_free_pages(substream);
}
@@ -299,9 +316,11 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
+ int block_size = frames_to_bytes(runtime, runtime->period_size);
unsigned long word = ac97c_readl(chip, OCA);
int retval;
+ chip->playback_period = 0;
word &= ~(AC97C_CH_MASK(PCM_LEFT) | AC97C_CH_MASK(PCM_RIGHT));
/* assign channels to AC97C channel A */
@@ -320,11 +339,16 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
ac97c_writel(chip, OCA, word);
/* configure sample format and size */
- word = AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16;
+ word = ac97c_readl(chip, CAMR);
+ if (chip->opened <= 1)
+ word = AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16;
+ else
+ word |= AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16;
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
- word |= AC97C_CMR_CEM_LITTLE;
+ if (cpu_is_at32ap7000())
+ word |= AC97C_CMR_CEM_LITTLE;
break;
case SNDRV_PCM_FORMAT_S16_BE: /* fall through */
word &= ~(AC97C_CMR_CEM_LITTLE);
@@ -363,9 +387,18 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
dev_dbg(&chip->pdev->dev, "could not set rate %d Hz\n",
runtime->rate);
- if (!test_bit(DMA_TX_READY, &chip->flags))
- retval = atmel_ac97c_prepare_dma(chip, substream,
- DMA_TO_DEVICE);
+ if (cpu_is_at32ap7000()) {
+ if (!test_bit(DMA_TX_READY, &chip->flags))
+ retval = atmel_ac97c_prepare_dma(chip, substream,
+ DMA_TO_DEVICE);
+ } else {
+ /* Initialize and start the PDC */
+ writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_TCR);
+ writel(runtime->dma_addr + block_size,
+ chip->regs + ATMEL_PDC_TNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR);
+ }
return retval;
}
@@ -374,9 +407,11 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
+ int block_size = frames_to_bytes(runtime, runtime->period_size);
unsigned long word = ac97c_readl(chip, ICA);
int retval;
+ chip->capture_period = 0;
word &= ~(AC97C_CH_MASK(PCM_LEFT) | AC97C_CH_MASK(PCM_RIGHT));
/* assign channels to AC97C channel A */
@@ -395,11 +430,16 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
ac97c_writel(chip, ICA, word);
/* configure sample format and size */
- word = AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16;
+ word = ac97c_readl(chip, CAMR);
+ if (chip->opened <= 1)
+ word = AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16;
+ else
+ word |= AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16;
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
- word |= AC97C_CMR_CEM_LITTLE;
+ if (cpu_is_at32ap7000())
+ word |= AC97C_CMR_CEM_LITTLE;
break;
case SNDRV_PCM_FORMAT_S16_BE: /* fall through */
word &= ~(AC97C_CMR_CEM_LITTLE);
@@ -438,9 +478,18 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
dev_dbg(&chip->pdev->dev, "could not set rate %d Hz\n",
runtime->rate);
- if (!test_bit(DMA_RX_READY, &chip->flags))
- retval = atmel_ac97c_prepare_dma(chip, substream,
- DMA_FROM_DEVICE);
+ if (cpu_is_at32ap7000()) {
+ if (!test_bit(DMA_RX_READY, &chip->flags))
+ retval = atmel_ac97c_prepare_dma(chip, substream,
+ DMA_FROM_DEVICE);
+ } else {
+ /* Initialize and start the PDC */
+ writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_RCR);
+ writel(runtime->dma_addr + block_size,
+ chip->regs + ATMEL_PDC_RNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR);
+ }
return retval;
}
@@ -449,7 +498,7 @@ static int
atmel_ac97c_playback_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
- unsigned long camr;
+ unsigned long camr, ptcr = 0;
int retval = 0;
camr = ac97c_readl(chip, CAMR);
@@ -458,15 +507,22 @@ atmel_ac97c_playback_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
- retval = dw_dma_cyclic_start(chip->dma.tx_chan);
- if (retval)
- goto out;
- camr |= AC97C_CMR_CENA;
+ if (cpu_is_at32ap7000()) {
+ retval = dw_dma_cyclic_start(chip->dma.tx_chan);
+ if (retval)
+ goto out;
+ } else {
+ ptcr = ATMEL_PDC_TXTEN;
+ }
+ camr |= AC97C_CMR_CENA | AC97C_CSR_ENDTX;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
- dw_dma_cyclic_stop(chip->dma.tx_chan);
+ if (cpu_is_at32ap7000())
+ dw_dma_cyclic_stop(chip->dma.tx_chan);
+ else
+ ptcr |= ATMEL_PDC_TXTDIS;
if (chip->opened <= 1)
camr &= ~AC97C_CMR_CENA;
break;
@@ -476,6 +532,8 @@ atmel_ac97c_playback_trigger(struct snd_pcm_substream *substream, int cmd)
}
ac97c_writel(chip, CAMR, camr);
+ if (!cpu_is_at32ap7000())
+ writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
out:
return retval;
}
@@ -484,24 +542,32 @@ static int
atmel_ac97c_capture_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
- unsigned long camr;
+ unsigned long camr, ptcr = 0;
int retval = 0;
camr = ac97c_readl(chip, CAMR);
+ ptcr = readl(chip->regs + ATMEL_PDC_PTSR);
switch (cmd) {
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
- retval = dw_dma_cyclic_start(chip->dma.rx_chan);
- if (retval)
- goto out;
- camr |= AC97C_CMR_CENA;
+ if (cpu_is_at32ap7000()) {
+ retval = dw_dma_cyclic_start(chip->dma.rx_chan);
+ if (retval)
+ goto out;
+ } else {
+ ptcr = ATMEL_PDC_RXTEN;
+ }
+ camr |= AC97C_CMR_CENA | AC97C_CSR_ENDRX;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
- dw_dma_cyclic_stop(chip->dma.rx_chan);
+ if (cpu_is_at32ap7000())
+ dw_dma_cyclic_stop(chip->dma.rx_chan);
+ else
+ ptcr |= (ATMEL_PDC_RXTDIS);
if (chip->opened <= 1)
camr &= ~AC97C_CMR_CENA;
break;
@@ -511,6 +577,8 @@ atmel_ac97c_capture_trigger(struct snd_pcm_substream *substream, int cmd)
}
ac97c_writel(chip, CAMR, camr);
+ if (!cpu_is_at32ap7000())
+ writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
out:
return retval;
}
@@ -523,7 +591,10 @@ atmel_ac97c_playback_pointer(struct snd_pcm_substream *substream)
snd_pcm_uframes_t frames;
unsigned long bytes;
- bytes = dw_dma_get_src_addr(chip->dma.tx_chan);
+ if (cpu_is_at32ap7000())
+ bytes = dw_dma_get_src_addr(chip->dma.tx_chan);
+ else
+ bytes = readl(chip->regs + ATMEL_PDC_TPR);
bytes -= runtime->dma_addr;
frames = bytes_to_frames(runtime, bytes);
@@ -540,7 +611,10 @@ atmel_ac97c_capture_pointer(struct snd_pcm_substream *substream)
snd_pcm_uframes_t frames;
unsigned long bytes;
- bytes = dw_dma_get_dst_addr(chip->dma.rx_chan);
+ if (cpu_is_at32ap7000())
+ bytes = dw_dma_get_dst_addr(chip->dma.rx_chan);
+ else
+ bytes = readl(chip->regs + ATMEL_PDC_RPR);
bytes -= runtime->dma_addr;
frames = bytes_to_frames(runtime, bytes);
@@ -578,8 +652,11 @@ static irqreturn_t atmel_ac97c_interrupt(int irq, void *dev)
u32 sr = ac97c_readl(chip, SR);
u32 casr = ac97c_readl(chip, CASR);
u32 cosr = ac97c_readl(chip, COSR);
+ u32 camr = ac97c_readl(chip, CAMR);
if (sr & AC97C_SR_CAEVT) {
+ struct snd_pcm_runtime *runtime;
+ int offset, next_period, block_size;
dev_info(&chip->pdev->dev, "channel A event%s%s%s%s%s%s\n",
casr & AC97C_CSR_OVRUN ? " OVRUN" : "",
casr & AC97C_CSR_RXRDY ? " RXRDY" : "",
@@ -587,6 +664,50 @@ static irqreturn_t atmel_ac97c_interrupt(int irq, void *dev)
casr & AC97C_CSR_TXEMPTY ? " TXEMPTY" : "",
casr & AC97C_CSR_TXRDY ? " TXRDY" : "",
!casr ? " NONE" : "");
+ if (!cpu_is_at32ap7000()) {
+ if ((casr & camr) & AC97C_CSR_ENDTX) {
+ runtime = chip->playback_substream->runtime;
+ block_size = frames_to_bytes(runtime,
+ runtime->period_size);
+ chip->playback_period++;
+
+ if (chip->playback_period == runtime->periods)
+ chip->playback_period = 0;
+ next_period = chip->playback_period + 1;
+ if (next_period == runtime->periods)
+ next_period = 0;
+
+ offset = block_size * next_period;
+
+ writel(runtime->dma_addr + offset,
+ chip->regs + ATMEL_PDC_TNPR);
+ writel(block_size / 2,
+ chip->regs + ATMEL_PDC_TNCR);
+
+ snd_pcm_period_elapsed(
+ chip->playback_substream);
+ }
+ if ((casr & camr) & AC97C_CSR_ENDRX) {
+ runtime = chip->capture_substream->runtime;
+ block_size = frames_to_bytes(runtime,
+ runtime->period_size);
+ chip->capture_period++;
+
+ if (chip->capture_period == runtime->periods)
+ chip->capture_period = 0;
+ next_period = chip->capture_period + 1;
+ if (next_period == runtime->periods)
+ next_period = 0;
+
+ offset = block_size * next_period;
+
+ writel(runtime->dma_addr + offset,
+ chip->regs + ATMEL_PDC_RNPR);
+ writel(block_size / 2,
+ chip->regs + ATMEL_PDC_RNCR);
+ snd_pcm_period_elapsed(chip->capture_substream);
+ }
+ }
retval = IRQ_HANDLED;
}
@@ -608,15 +729,50 @@ static irqreturn_t atmel_ac97c_interrupt(int irq, void *dev)
return retval;
}
+static struct ac97_pcm at91_ac97_pcm_defs[] __devinitdata = {
+ /* Playback */
+ {
+ .exclusive = 1,
+ .r = { {
+ .slots = ((1 << AC97_SLOT_PCM_LEFT)
+ | (1 << AC97_SLOT_PCM_RIGHT)),
+ } },
+ },
+ /* PCM in */
+ {
+ .stream = 1,
+ .exclusive = 1,
+ .r = { {
+ .slots = ((1 << AC97_SLOT_PCM_LEFT)
+ | (1 << AC97_SLOT_PCM_RIGHT)),
+ } }
+ },
+ /* Mic in */
+ {
+ .stream = 1,
+ .exclusive = 1,
+ .r = { {
+ .slots = (1<<AC97_SLOT_MIC),
+ } }
+ },
+};
+
static int __devinit atmel_ac97c_pcm_new(struct atmel_ac97c *chip)
{
struct snd_pcm *pcm;
struct snd_pcm_hardware hw = atmel_ac97c_hw;
- int capture, playback, retval;
+ int capture, playback, retval, err;
capture = test_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
playback = test_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
+ if (!cpu_is_at32ap7000()) {
+ err = snd_ac97_pcm_assign(chip->ac97_bus,
+ ARRAY_SIZE(at91_ac97_pcm_defs),
+ at91_ac97_pcm_defs);
+ if (err)
+ return err;
+ }
retval = snd_pcm_new(chip->card, chip->card->shortname,
chip->pdev->id, playback, capture, &pcm);
if (retval)
@@ -775,7 +931,12 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
return -ENXIO;
}
- pclk = clk_get(&pdev->dev, "pclk");
+ if (cpu_is_at32ap7000()) {
+ pclk = clk_get(&pdev->dev, "pclk");
+ } else {
+ pclk = clk_get(&pdev->dev, "ac97_clk");
+ }
+
if (IS_ERR(pclk)) {
dev_dbg(&pdev->dev, "no peripheral clock\n");
return PTR_ERR(pclk);
@@ -844,43 +1005,52 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
goto err_ac97_bus;
}
- if (pdata->rx_dws.dma_dev) {
- struct dw_dma_slave *dws = &pdata->rx_dws;
- dma_cap_mask_t mask;
+ if (cpu_is_at32ap7000()) {
+ if (pdata->rx_dws.dma_dev) {
+ struct dw_dma_slave *dws = &pdata->rx_dws;
+ dma_cap_mask_t mask;
- dws->rx_reg = regs->start + AC97C_CARHR + 2;
+ dws->rx_reg = regs->start + AC97C_CARHR + 2;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- chip->dma.rx_chan = dma_request_channel(mask, filter, dws);
+ chip->dma.rx_chan = dma_request_channel(mask, filter,
+ dws);
- dev_info(&chip->pdev->dev, "using %s for DMA RX\n",
+ dev_info(&chip->pdev->dev, "using %s for DMA RX\n",
dev_name(&chip->dma.rx_chan->dev->device));
- set_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- }
+ set_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
+ }
- if (pdata->tx_dws.dma_dev) {
- struct dw_dma_slave *dws = &pdata->tx_dws;
- dma_cap_mask_t mask;
+ if (pdata->tx_dws.dma_dev) {
+ struct dw_dma_slave *dws = &pdata->tx_dws;
+ dma_cap_mask_t mask;
- dws->tx_reg = regs->start + AC97C_CATHR + 2;
+ dws->tx_reg = regs->start + AC97C_CATHR + 2;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- chip->dma.tx_chan = dma_request_channel(mask, filter, dws);
+ chip->dma.tx_chan = dma_request_channel(mask, filter,
+ dws);
- dev_info(&chip->pdev->dev, "using %s for DMA TX\n",
+ dev_info(&chip->pdev->dev, "using %s for DMA TX\n",
dev_name(&chip->dma.tx_chan->dev->device));
- set_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- }
+ set_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
+ }
- if (!test_bit(DMA_RX_CHAN_PRESENT, &chip->flags) &&
- !test_bit(DMA_TX_CHAN_PRESENT, &chip->flags)) {
- dev_dbg(&pdev->dev, "DMA not available\n");
- retval = -ENODEV;
- goto err_dma;
+ if (!test_bit(DMA_RX_CHAN_PRESENT, &chip->flags) &&
+ !test_bit(DMA_TX_CHAN_PRESENT, &chip->flags)) {
+ dev_dbg(&pdev->dev, "DMA not available\n");
+ retval = -ENODEV;
+ goto err_dma;
+ }
+ } else {
+ /* Just pretend that we have DMA channel(for at91 i is actually
+ * the PDC) */
+ set_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
+ set_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
}
retval = atmel_ac97c_pcm_new(chip);
@@ -897,20 +1067,22 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, card);
- dev_info(&pdev->dev, "Atmel AC97 controller at 0x%p\n",
- chip->regs);
+ dev_info(&pdev->dev, "Atmel AC97 controller at 0x%p, irq = %d\n",
+ chip->regs, irq);
return 0;
err_dma:
- if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.rx_chan);
- if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.tx_chan);
- clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- chip->dma.rx_chan = NULL;
- chip->dma.tx_chan = NULL;
+ if (cpu_is_at32ap7000()) {
+ if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags))
+ dma_release_channel(chip->dma.rx_chan);
+ if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags))
+ dma_release_channel(chip->dma.tx_chan);
+ clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
+ clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
+ chip->dma.rx_chan = NULL;
+ chip->dma.tx_chan = NULL;
+ }
err_ac97_bus:
snd_card_set_dev(card, NULL);
@@ -934,10 +1106,12 @@ static int atmel_ac97c_suspend(struct platform_device *pdev, pm_message_t msg)
struct snd_card *card = platform_get_drvdata(pdev);
struct atmel_ac97c *chip = card->private_data;
- if (test_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_stop(chip->dma.rx_chan);
- if (test_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_stop(chip->dma.tx_chan);
+ if (cpu_is_at32ap7000()) {
+ if (test_bit(DMA_RX_READY, &chip->flags))
+ dw_dma_cyclic_stop(chip->dma.rx_chan);
+ if (test_bit(DMA_TX_READY, &chip->flags))
+ dw_dma_cyclic_stop(chip->dma.tx_chan);
+ }
clk_disable(chip->pclk);
return 0;
@@ -949,11 +1123,12 @@ static int atmel_ac97c_resume(struct platform_device *pdev)
struct atmel_ac97c *chip = card->private_data;
clk_enable(chip->pclk);
- if (test_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_start(chip->dma.rx_chan);
- if (test_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_start(chip->dma.tx_chan);
-
+ if (cpu_is_at32ap7000()) {
+ if (test_bit(DMA_RX_READY, &chip->flags))
+ dw_dma_cyclic_start(chip->dma.rx_chan);
+ if (test_bit(DMA_TX_READY, &chip->flags))
+ dw_dma_cyclic_start(chip->dma.tx_chan);
+ }
return 0;
}
#else
@@ -978,14 +1153,16 @@ static int __devexit atmel_ac97c_remove(struct platform_device *pdev)
iounmap(chip->regs);
free_irq(chip->irq, chip);
- if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.rx_chan);
- if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.tx_chan);
- clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- chip->dma.rx_chan = NULL;
- chip->dma.tx_chan = NULL;
+ if (cpu_is_at32ap7000()) {
+ if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags))
+ dma_release_channel(chip->dma.rx_chan);
+ if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags))
+ dma_release_channel(chip->dma.tx_chan);
+ clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
+ clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
+ chip->dma.rx_chan = NULL;
+ chip->dma.tx_chan = NULL;
+ }
snd_card_set_dev(card, NULL);
snd_card_free(card);
diff --git a/sound/core/control.c b/sound/core/control.c
index 439ce64f9d82..070aab490191 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -50,6 +50,10 @@ static int snd_ctl_open(struct inode *inode, struct file *file)
struct snd_ctl_file *ctl;
int err;
+ err = nonseekable_open(inode, file);
+ if (err < 0)
+ return err;
+
card = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_CONTROL);
if (!card) {
err = -ENODEV;
@@ -1388,6 +1392,7 @@ static const struct file_operations snd_ctl_f_ops =
.read = snd_ctl_read,
.open = snd_ctl_open,
.release = snd_ctl_release,
+ .llseek = no_llseek,
.poll = snd_ctl_poll,
.unlocked_ioctl = snd_ctl_ioctl,
.compat_ioctl = snd_ctl_ioctl_compat,
diff --git a/sound/core/info.c b/sound/core/info.c
index cc4a53d4b7f8..b70564ed8b37 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -164,40 +164,44 @@ static loff_t snd_info_entry_llseek(struct file *file, loff_t offset, int orig)
{
struct snd_info_private_data *data;
struct snd_info_entry *entry;
- loff_t ret;
+ loff_t ret = -EINVAL, size;
data = file->private_data;
entry = data->entry;
- lock_kernel();
- switch (entry->content) {
- case SNDRV_INFO_CONTENT_TEXT:
- switch (orig) {
- case SEEK_SET:
- file->f_pos = offset;
- ret = file->f_pos;
- goto out;
- case SEEK_CUR:
- file->f_pos += offset;
- ret = file->f_pos;
- goto out;
- case SEEK_END:
- default:
- ret = -EINVAL;
- goto out;
- }
+ mutex_lock(&entry->access);
+ if (entry->content == SNDRV_INFO_CONTENT_DATA &&
+ entry->c.ops->llseek) {
+ offset = entry->c.ops->llseek(entry,
+ data->file_private_data,
+ file, offset, orig);
+ goto out;
+ }
+ if (entry->content == SNDRV_INFO_CONTENT_DATA)
+ size = entry->size;
+ else
+ size = 0;
+ switch (orig) {
+ case SEEK_SET:
break;
- case SNDRV_INFO_CONTENT_DATA:
- if (entry->c.ops->llseek) {
- ret = entry->c.ops->llseek(entry,
- data->file_private_data,
- file, offset, orig);
+ case SEEK_CUR:
+ offset += file->f_pos;
+ break;
+ case SEEK_END:
+ if (!size)
goto out;
- }
+ offset += size;
break;
- }
- ret = -ENXIO;
-out:
- unlock_kernel();
+ default:
+ goto out;
+ }
+ if (offset < 0)
+ goto out;
+ if (size && offset > size)
+ offset = size;
+ file->f_pos = offset;
+ ret = offset;
+ out:
+ mutex_unlock(&entry->access);
return ret;
}
@@ -232,10 +236,15 @@ static ssize_t snd_info_entry_read(struct file *file, char __user *buffer,
return -EFAULT;
break;
case SNDRV_INFO_CONTENT_DATA:
- if (entry->c.ops->read)
+ if (pos >= entry->size)
+ return 0;
+ if (entry->c.ops->read) {
+ size = entry->size - pos;
+ size = min(count, size);
size = entry->c.ops->read(entry,
data->file_private_data,
- file, buffer, count, pos);
+ file, buffer, size, pos);
+ }
break;
}
if ((ssize_t) size > 0)
@@ -282,10 +291,13 @@ static ssize_t snd_info_entry_write(struct file *file, const char __user *buffer
size = count;
break;
case SNDRV_INFO_CONTENT_DATA:
- if (entry->c.ops->write)
+ if (entry->c.ops->write && count > 0) {
+ size_t maxsize = entry->size - pos;
+ count = min(count, maxsize);
size = entry->c.ops->write(entry,
data->file_private_data,
file, buffer, count, pos);
+ }
break;
}
if ((ssize_t) size > 0)
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 14b8a4ee690d..4902ae568730 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -24,7 +24,7 @@
#include <sound/jack.h>
#include <sound/core.h>
-static int jack_types[] = {
+static int jack_switch_types[] = {
SW_HEADPHONE_INSERT,
SW_MICROPHONE_INSERT,
SW_LINEOUT_INSERT,
@@ -56,7 +56,7 @@ static int snd_jack_dev_register(struct snd_device *device)
{
struct snd_jack *jack = device->device_data;
struct snd_card *card = device->card;
- int err;
+ int err, i;
snprintf(jack->name, sizeof(jack->name), "%s %s",
card->shortname, jack->id);
@@ -66,6 +66,19 @@ static int snd_jack_dev_register(struct snd_device *device)
if (!jack->input_dev->dev.parent)
jack->input_dev->dev.parent = snd_card_get_device_link(card);
+ /* Add capabilities for any keys that are enabled */
+ for (i = 0; i < ARRAY_SIZE(jack->key); i++) {
+ int testbit = SND_JACK_BTN_0 >> i;
+
+ if (!(jack->type & testbit))
+ continue;
+
+ if (!jack->key[i])
+ jack->key[i] = BTN_0 + i;
+
+ input_set_capability(jack->input_dev, EV_KEY, jack->key[i]);
+ }
+
err = input_register_device(jack->input_dev);
if (err == 0)
jack->registered = 1;
@@ -113,10 +126,10 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
jack->type = type;
- for (i = 0; i < ARRAY_SIZE(jack_types); i++)
+ for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++)
if (type & (1 << i))
input_set_capability(jack->input_dev, EV_SW,
- jack_types[i]);
+ jack_switch_types[i]);
err = snd_device_new(card, SNDRV_DEV_JACK, jack, &ops);
if (err < 0)
@@ -152,6 +165,43 @@ void snd_jack_set_parent(struct snd_jack *jack, struct device *parent)
EXPORT_SYMBOL(snd_jack_set_parent);
/**
+ * snd_jack_set_key - Set a key mapping on a jack
+ *
+ * @jack: The jack to configure
+ * @type: Jack report type for this key
+ * @keytype: Input layer key type to be reported
+ *
+ * Map a SND_JACK_BTN_ button type to an input layer key, allowing
+ * reporting of keys on accessories via the jack abstraction. If no
+ * mapping is provided but keys are enabled in the jack type then
+ * BTN_n numeric buttons will be reported.
+ *
+ * Note that this is intended to be use by simple devices with small
+ * numbers of keys that can be reported. It is also possible to
+ * access the input device directly - devices with complex input
+ * capabilities on accessories should consider doing this rather than
+ * using this abstraction.
+ *
+ * This function may only be called prior to registration of the jack.
+ */
+int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type,
+ int keytype)
+{
+ int key = fls(SND_JACK_BTN_0) - fls(type);
+
+ WARN_ON(jack->registered);
+
+ if (!keytype || key >= ARRAY_SIZE(jack->key))
+ return -EINVAL;
+
+ jack->type |= type;
+ jack->key[key] = keytype;
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_jack_set_key);
+
+/**
* snd_jack_report - Report the current status of a jack
*
* @jack: The jack to report status for
@@ -164,10 +214,19 @@ void snd_jack_report(struct snd_jack *jack, int status)
if (!jack)
return;
- for (i = 0; i < ARRAY_SIZE(jack_types); i++) {
+ for (i = 0; i < ARRAY_SIZE(jack->key); i++) {
+ int testbit = SND_JACK_BTN_0 >> i;
+
+ if (jack->type & testbit)
+ input_report_key(jack->input_dev, jack->key[i],
+ status & testbit);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++) {
int testbit = 1 << i;
if (jack->type & testbit)
- input_report_switch(jack->input_dev, jack_types[i],
+ input_report_switch(jack->input_dev,
+ jack_switch_types[i],
status & testbit);
}
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 54e2eb56e4c2..f50ebf20df96 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -43,6 +43,10 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file)
struct snd_mixer_oss_file *fmixer;
int err;
+ err = nonseekable_open(inode, file);
+ if (err < 0)
+ return err;
+
card = snd_lookup_oss_minor_data(iminor(inode),
SNDRV_OSS_DEVICE_TYPE_MIXER);
if (card == NULL)
@@ -397,6 +401,7 @@ static const struct file_operations snd_mixer_oss_f_ops =
.owner = THIS_MODULE,
.open = snd_mixer_oss_open,
.release = snd_mixer_oss_release,
+ .llseek = no_llseek,
.unlocked_ioctl = snd_mixer_oss_ioctl,
.compat_ioctl = snd_mixer_oss_ioctl_compat,
};
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 82d4e3329b3d..5c8c7dff8ede 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2379,6 +2379,10 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
int nonblock;
wait_queue_t wait;
+ err = nonseekable_open(inode, file);
+ if (err < 0)
+ return err;
+
pcm = snd_lookup_oss_minor_data(iminor(inode),
SNDRV_OSS_DEVICE_TYPE_PCM);
if (pcm == NULL) {
@@ -2977,6 +2981,7 @@ static const struct file_operations snd_pcm_oss_f_reg =
.write = snd_pcm_oss_write,
.open = snd_pcm_oss_open,
.release = snd_pcm_oss_release,
+ .llseek = no_llseek,
.poll = snd_pcm_oss_poll,
.unlocked_ioctl = snd_pcm_oss_ioctl,
.compat_ioctl = snd_pcm_oss_ioctl_compat,
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 0d428d0896db..cbe815dfbdc8 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -648,9 +648,6 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
substream->number = idx;
substream->stream = stream;
sprintf(substream->name, "subdevice #%i", idx);
- snprintf(substream->latency_id, sizeof(substream->latency_id),
- "ALSA-PCM%d-%d%c%d", pcm->card->number, pcm->device,
- (stream ? 'c' : 'p'), idx);
substream->buffer_bytes_max = UINT_MAX;
if (prev == NULL)
pstr->substream = substream;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 872887624030..1bb0e23df35c 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -481,11 +481,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
snd_pcm_timer_resolution_change(substream);
runtime->status->state = SNDRV_PCM_STATE_SETUP;
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
- substream->latency_id);
+ if (substream->latency_pm_qos_req) {
+ pm_qos_remove_request(substream->latency_pm_qos_req);
+ substream->latency_pm_qos_req = NULL;
+ }
if ((usecs = period_to_usecs(runtime)) >= 0)
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
- substream->latency_id, usecs);
+ substream->latency_pm_qos_req = pm_qos_add_request(
+ PM_QOS_CPU_DMA_LATENCY, usecs);
return 0;
_error:
/* hardware might be unuseable from this time,
@@ -540,8 +542,8 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
if (substream->ops->hw_free)
result = substream->ops->hw_free(substream);
runtime->status->state = SNDRV_PCM_STATE_OPEN;
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
- substream->latency_id);
+ pm_qos_remove_request(substream->latency_pm_qos_req);
+ substream->latency_pm_qos_req = NULL;
return result;
}
@@ -2107,7 +2109,9 @@ static int snd_pcm_open_file(struct file *file,
static int snd_pcm_playback_open(struct inode *inode, struct file *file)
{
struct snd_pcm *pcm;
-
+ int err = nonseekable_open(inode, file);
+ if (err < 0)
+ return err;
pcm = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
@@ -2116,7 +2120,9 @@ static int snd_pcm_playback_open(struct inode *inode, struct file *file)
static int snd_pcm_capture_open(struct inode *inode, struct file *file)
{
struct snd_pcm *pcm;
-
+ int err = nonseekable_open(inode, file);
+ if (err < 0)
+ return err;
pcm = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_PCM_CAPTURE);
return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
@@ -3303,18 +3309,13 @@ static int snd_pcm_fasync(int fd, struct file * file, int on)
struct snd_pcm_file * pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
- int err = -ENXIO;
- lock_kernel();
pcm_file = file->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
- goto out;
+ return -ENXIO;
runtime = substream->runtime;
- err = fasync_helper(fd, file, on, &runtime->fasync);
-out:
- unlock_kernel();
- return err;
+ return fasync_helper(fd, file, on, &runtime->fasync);
}
/*
@@ -3434,14 +3435,28 @@ out:
#endif /* CONFIG_SND_SUPPORT_OLD_API */
#ifndef CONFIG_MMU
-unsigned long dummy_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- return 0;
+static unsigned long snd_pcm_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ struct snd_pcm_file *pcm_file = file->private_data;
+ struct snd_pcm_substream *substream = pcm_file->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned long offset = pgoff << PAGE_SHIFT;
+
+ switch (offset) {
+ case SNDRV_PCM_MMAP_OFFSET_STATUS:
+ return (unsigned long)runtime->status;
+ case SNDRV_PCM_MMAP_OFFSET_CONTROL:
+ return (unsigned long)runtime->control;
+ default:
+ return (unsigned long)runtime->dma_area + offset;
+ }
}
#else
-# define dummy_get_unmapped_area NULL
+# define snd_pcm_get_unmapped_area NULL
#endif
/*
@@ -3455,12 +3470,13 @@ const struct file_operations snd_pcm_f_ops[2] = {
.aio_write = snd_pcm_aio_write,
.open = snd_pcm_playback_open,
.release = snd_pcm_release,
+ .llseek = no_llseek,
.poll = snd_pcm_playback_poll,
.unlocked_ioctl = snd_pcm_playback_ioctl,
.compat_ioctl = snd_pcm_ioctl_compat,
.mmap = snd_pcm_mmap,
.fasync = snd_pcm_fasync,
- .get_unmapped_area = dummy_get_unmapped_area,
+ .get_unmapped_area = snd_pcm_get_unmapped_area,
},
{
.owner = THIS_MODULE,
@@ -3468,11 +3484,12 @@ const struct file_operations snd_pcm_f_ops[2] = {
.aio_read = snd_pcm_aio_read,
.open = snd_pcm_capture_open,
.release = snd_pcm_release,
+ .llseek = no_llseek,
.poll = snd_pcm_capture_poll,
.unlocked_ioctl = snd_pcm_capture_ioctl,
.compat_ioctl = snd_pcm_ioctl_compat,
.mmap = snd_pcm_mmap,
.fasync = snd_pcm_fasync,
- .get_unmapped_area = dummy_get_unmapped_area,
+ .get_unmapped_area = snd_pcm_get_unmapped_area,
}
};
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 0f5a194695d9..eb68326c37d4 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -376,6 +376,10 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK))
return -EINVAL; /* invalid combination */
+ err = nonseekable_open(inode, file);
+ if (err < 0)
+ return err;
+
if (maj == snd_major) {
rmidi = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_RAWMIDI);
@@ -1391,6 +1395,7 @@ static const struct file_operations snd_rawmidi_f_ops =
.write = snd_rawmidi_write,
.open = snd_rawmidi_open,
.release = snd_rawmidi_release,
+ .llseek = no_llseek,
.poll = snd_rawmidi_poll,
.unlocked_ioctl = snd_rawmidi_ioctl,
.compat_ioctl = snd_rawmidi_ioctl_compat,
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 48eca9ff9ee7..99a485f13648 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -318,6 +318,11 @@ static int snd_seq_open(struct inode *inode, struct file *file)
int c, mode; /* client id */
struct snd_seq_client *client;
struct snd_seq_user_client *user;
+ int err;
+
+ err = nonseekable_open(inode, file);
+ if (err < 0)
+ return err;
if (mutex_lock_interruptible(&register_mutex))
return -ERESTARTSYS;
@@ -2550,6 +2555,7 @@ static const struct file_operations snd_seq_f_ops =
.write = snd_seq_write,
.open = snd_seq_open,
.release = snd_seq_release,
+ .llseek = no_llseek,
.poll = snd_seq_poll,
.unlocked_ioctl = snd_seq_ioctl,
.compat_ioctl = snd_seq_ioctl_compat,
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 563d1967a0ad..ac42af42b787 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -120,7 +120,29 @@ void *snd_lookup_minor_data(unsigned int minor, int type)
EXPORT_SYMBOL(snd_lookup_minor_data);
-static int __snd_open(struct inode *inode, struct file *file)
+#ifdef CONFIG_MODULES
+static struct snd_minor *autoload_device(unsigned int minor)
+{
+ int dev;
+ mutex_unlock(&sound_mutex); /* release lock temporarily */
+ dev = SNDRV_MINOR_DEVICE(minor);
+ if (dev == SNDRV_MINOR_CONTROL) {
+ /* /dev/aloadC? */
+ int card = SNDRV_MINOR_CARD(minor);
+ if (snd_cards[card] == NULL)
+ snd_request_card(card);
+ } else if (dev == SNDRV_MINOR_GLOBAL) {
+ /* /dev/aloadSEQ */
+ snd_request_other(minor);
+ }
+ mutex_lock(&sound_mutex); /* reacuire lock */
+ return snd_minors[minor];
+}
+#else /* !CONFIG_MODULES */
+#define autoload_device(minor) NULL
+#endif /* CONFIG_MODULES */
+
+static int snd_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct snd_minor *mptr = NULL;
@@ -129,55 +151,36 @@ static int __snd_open(struct inode *inode, struct file *file)
if (minor >= ARRAY_SIZE(snd_minors))
return -ENODEV;
+ mutex_lock(&sound_mutex);
mptr = snd_minors[minor];
if (mptr == NULL) {
-#ifdef CONFIG_MODULES
- int dev = SNDRV_MINOR_DEVICE(minor);
- if (dev == SNDRV_MINOR_CONTROL) {
- /* /dev/aloadC? */
- int card = SNDRV_MINOR_CARD(minor);
- if (snd_cards[card] == NULL)
- snd_request_card(card);
- } else if (dev == SNDRV_MINOR_GLOBAL) {
- /* /dev/aloadSEQ */
- snd_request_other(minor);
- }
-#ifndef CONFIG_SND_DYNAMIC_MINORS
- /* /dev/snd/{controlC?,seq} */
- mptr = snd_minors[minor];
- if (mptr == NULL)
-#endif
-#endif
+ mptr = autoload_device(minor);
+ if (!mptr) {
+ mutex_unlock(&sound_mutex);
return -ENODEV;
+ }
}
old_fops = file->f_op;
file->f_op = fops_get(mptr->f_ops);
if (file->f_op == NULL) {
file->f_op = old_fops;
- return -ENODEV;
+ err = -ENODEV;
}
- if (file->f_op->open)
+ mutex_unlock(&sound_mutex);
+ if (err < 0)
+ return err;
+
+ if (file->f_op->open) {
err = file->f_op->open(inode, file);
- if (err) {
- fops_put(file->f_op);
- file->f_op = fops_get(old_fops);
+ if (err) {
+ fops_put(file->f_op);
+ file->f_op = fops_get(old_fops);
+ }
}
fops_put(old_fops);
return err;
}
-
-/* BKL pushdown: nasty #ifdef avoidance wrapper */
-static int snd_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- lock_kernel();
- ret = __snd_open(inode, file);
- unlock_kernel();
- return ret;
-}
-
static const struct file_operations snd_fops =
{
.owner = THIS_MODULE,
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 5040c7b862fe..13afb60999b9 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1238,6 +1238,11 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
+ int err;
+
+ err = nonseekable_open(inode, file);
+ if (err < 0)
+ return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
@@ -1922,6 +1927,7 @@ static const struct file_operations snd_timer_f_ops =
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
+ .llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
diff --git a/sound/drivers/opl4/opl4_proc.c b/sound/drivers/opl4/opl4_proc.c
index 1679300b7583..df850b8830a5 100644
--- a/sound/drivers/opl4/opl4_proc.c
+++ b/sound/drivers/opl4/opl4_proc.c
@@ -49,77 +49,45 @@ static int snd_opl4_mem_proc_release(struct snd_info_entry *entry,
return 0;
}
-static long snd_opl4_mem_proc_read(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, char __user *_buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_opl4_mem_proc_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file, char __user *_buf,
+ size_t count, loff_t pos)
{
struct snd_opl4 *opl4 = entry->private_data;
- long size;
char* buf;
- size = count;
- if (pos + size > entry->size)
- size = entry->size - pos;
- if (size > 0) {
- buf = vmalloc(size);
- if (!buf)
- return -ENOMEM;
- snd_opl4_read_memory(opl4, buf, pos, size);
- if (copy_to_user(_buf, buf, size)) {
- vfree(buf);
- return -EFAULT;
- }
+ buf = vmalloc(count);
+ if (!buf)
+ return -ENOMEM;
+ snd_opl4_read_memory(opl4, buf, pos, count);
+ if (copy_to_user(_buf, buf, count)) {
vfree(buf);
- return size;
+ return -EFAULT;
}
- return 0;
+ vfree(buf);
+ return count;
}
-static long snd_opl4_mem_proc_write(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, const char __user *_buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_opl4_mem_proc_write(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file,
+ const char __user *_buf,
+ size_t count, loff_t pos)
{
struct snd_opl4 *opl4 = entry->private_data;
- long size;
char *buf;
- size = count;
- if (pos + size > entry->size)
- size = entry->size - pos;
- if (size > 0) {
- buf = vmalloc(size);
- if (!buf)
- return -ENOMEM;
- if (copy_from_user(buf, _buf, size)) {
- vfree(buf);
- return -EFAULT;
- }
- snd_opl4_write_memory(opl4, buf, pos, size);
+ buf = vmalloc(count);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, _buf, count)) {
vfree(buf);
- return size;
- }
- return 0;
-}
-
-static long long snd_opl4_mem_proc_llseek(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, long long offset, int orig)
-{
- switch (orig) {
- case SEEK_SET:
- file->f_pos = offset;
- break;
- case SEEK_CUR:
- file->f_pos += offset;
- break;
- case SEEK_END: /* offset is negative */
- file->f_pos = entry->size + offset;
- break;
- default:
- return -EINVAL;
+ return -EFAULT;
}
- if (file->f_pos > entry->size)
- file->f_pos = entry->size;
- return file->f_pos;
+ snd_opl4_write_memory(opl4, buf, pos, count);
+ vfree(buf);
+ return count;
}
static struct snd_info_entry_ops snd_opl4_mem_proc_ops = {
@@ -127,7 +95,6 @@ static struct snd_info_entry_ops snd_opl4_mem_proc_ops = {
.release = snd_opl4_mem_proc_release,
.read = snd_opl4_mem_proc_read,
.write = snd_opl4_mem_proc_write,
- .llseek = snd_opl4_mem_proc_llseek,
};
int snd_opl4_create_proc(struct snd_opl4 *opl4)
diff --git a/sound/drivers/pcsp/pcsp.h b/sound/drivers/pcsp/pcsp.h
index 1e123077923d..4ff6c8cc5077 100644
--- a/sound/drivers/pcsp/pcsp.h
+++ b/sound/drivers/pcsp/pcsp.h
@@ -16,7 +16,7 @@
#include <asm/i8253.h>
#else
#include <asm/8253pit.h>
-static DEFINE_SPINLOCK(i8253_lock);
+static DEFINE_RAW_SPINLOCK(i8253_lock);
#endif
#define PCSP_SOUND_VERSION 0x400 /* read 4.00 */
diff --git a/sound/drivers/pcsp/pcsp_input.c b/sound/drivers/pcsp/pcsp_input.c
index 0444cdeb4bec..b5e2b54c2604 100644
--- a/sound/drivers/pcsp/pcsp_input.c
+++ b/sound/drivers/pcsp/pcsp_input.c
@@ -21,7 +21,7 @@ static void pcspkr_do_sound(unsigned int count)
{
unsigned long flags;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
if (count) {
/* set command for counter 2, 2 byte write */
@@ -36,7 +36,7 @@ static void pcspkr_do_sound(unsigned int count)
outb(inb_p(0x61) & 0xFC, 0x61);
}
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
}
void pcspkr_stop_sound(void)
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index d77ffa9a9387..ce9e7d170c0d 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -66,7 +66,7 @@ static u64 pcsp_timer_update(struct snd_pcsp *chip)
timer_cnt = val * CUR_DIV() / 256;
if (timer_cnt && chip->enable) {
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
if (!nforce_wa) {
outb_p(chip->val61, 0x61);
outb_p(timer_cnt, 0x42);
@@ -75,7 +75,7 @@ static u64 pcsp_timer_update(struct snd_pcsp *chip)
outb(chip->val61 ^ 2, 0x61);
chip->thalf = 1;
}
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
}
chip->ns_rem = PCSP_PERIOD_NS();
@@ -159,10 +159,10 @@ static int pcsp_start_playing(struct snd_pcsp *chip)
return -EIO;
}
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
chip->val61 = inb(0x61) | 0x03;
outb_p(0x92, 0x43); /* binary, mode 1, LSB only, ch 2 */
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
atomic_set(&chip->timer_active, 1);
chip->thalf = 0;
@@ -179,11 +179,11 @@ static void pcsp_stop_playing(struct snd_pcsp *chip)
return;
atomic_set(&chip->timer_active, 0);
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
/* restore the timer */
outb_p(0xb6, 0x43); /* binary, mode 3, LSB/MSB, ch 2 */
outb(chip->val61 & 0xFC, 0x61);
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
}
/*
diff --git a/sound/i2c/i2c.c b/sound/i2c/i2c.c
index 5c0c77dd01c3..eb7c7d05a7c1 100644
--- a/sound/i2c/i2c.c
+++ b/sound/i2c/i2c.c
@@ -98,7 +98,8 @@ int snd_i2c_bus_create(struct snd_card *card, const char *name,
bus->master = master;
}
strlcpy(bus->name, name, sizeof(bus->name));
- if ((err = snd_device_new(card, SNDRV_DEV_BUS, bus, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_BUS, bus, &ops);
+ if (err < 0) {
snd_i2c_bus_free(bus);
return err;
}
@@ -246,7 +247,8 @@ static int snd_i2c_bit_sendbyte(struct snd_i2c_bus *bus, unsigned char data)
for (i = 7; i >= 0; i--)
snd_i2c_bit_send(bus, !!(data & (1 << i)));
- if ((err = snd_i2c_bit_ack(bus)) < 0)
+ err = snd_i2c_bit_ack(bus);
+ if (err < 0)
return err;
return 0;
}
@@ -278,12 +280,14 @@ static int snd_i2c_bit_sendbytes(struct snd_i2c_device *device,
if (device->flags & SND_I2C_DEVICE_ADDRTEN)
return -EIO; /* not yet implemented */
snd_i2c_bit_start(bus);
- if ((err = snd_i2c_bit_sendbyte(bus, device->addr << 1)) < 0) {
+ err = snd_i2c_bit_sendbyte(bus, device->addr << 1);
+ if (err < 0) {
snd_i2c_bit_hw_stop(bus);
return err;
}
while (count-- > 0) {
- if ((err = snd_i2c_bit_sendbyte(bus, *bytes++)) < 0) {
+ err = snd_i2c_bit_sendbyte(bus, *bytes++);
+ if (err < 0) {
snd_i2c_bit_hw_stop(bus);
return err;
}
@@ -302,12 +306,14 @@ static int snd_i2c_bit_readbytes(struct snd_i2c_device *device,
if (device->flags & SND_I2C_DEVICE_ADDRTEN)
return -EIO; /* not yet implemented */
snd_i2c_bit_start(bus);
- if ((err = snd_i2c_bit_sendbyte(bus, (device->addr << 1) | 1)) < 0) {
+ err = snd_i2c_bit_sendbyte(bus, (device->addr << 1) | 1);
+ if (err < 0) {
snd_i2c_bit_hw_stop(bus);
return err;
}
while (count-- > 0) {
- if ((err = snd_i2c_bit_readbyte(bus, count == 0)) < 0) {
+ err = snd_i2c_bit_readbyte(bus, count == 0);
+ if (err < 0) {
snd_i2c_bit_hw_stop(bus);
return err;
}
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index 755a0a5f0e3f..c6990c680796 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -128,26 +128,14 @@ config SND_CS4236
To compile this driver as a module, choose M here: the module
will be called snd-cs4236.
-config SND_ES968
- tristate "Generic ESS ES968 driver"
- depends on PNP
- select ISAPNP
- select SND_MPU401_UART
- select SND_SB8_DSP
- help
- Say Y here to include support for ESS AudioDrive ES968 chips.
-
- To compile this driver as a module, choose M here: the module
- will be called snd-es968.
-
config SND_ES1688
- tristate "Generic ESS ES688/ES1688 driver"
+ tristate "Generic ESS ES688/ES1688 and ES968 PnP driver"
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_PCM
help
Say Y here to include support for ESS AudioDrive ES688 or
- ES1688 chips.
+ ES1688 chips. Also, this module support cards with ES968 PnP chip.
To compile this driver as a module, choose M here: the module
will be called snd-es1688.
diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
index 07df201ed8fa..fdcce311f80a 100644
--- a/sound/isa/es1688/es1688.c
+++ b/sound/isa/es1688/es1688.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/isa.h>
+#include <linux/isapnp.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/moduleparam.h>
@@ -45,8 +46,13 @@ MODULE_SUPPORTED_DEVICE("{{ESS,ES688 PnP AudioDrive,pnp:ESS0100},"
"{ESS,ES688 AudioDrive,pnp:ESS6881},"
"{ESS,ES1688 AudioDrive,pnp:ESS1681}}");
+MODULE_ALIAS("snd_es968");
+
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+#ifdef CONFIG_PNP
+static int isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP;
+#endif
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */
static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260 */
static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* Usually 0x388 */
@@ -60,6 +66,10 @@ MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard.");
module_param_array(enable, bool, NULL, 0444);
+#ifdef CONFIG_PNP
+module_param_array(isapnp, bool, NULL, 0444);
+MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard.");
+#endif
MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard.");
module_param_array(port, long, NULL, 0444);
MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver.");
@@ -74,14 +84,21 @@ MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for " CRD_NAME " driver.");
module_param_array(dma8, int, NULL, 0444);
MODULE_PARM_DESC(dma8, "8-bit DMA # for " CRD_NAME " driver.");
+#ifdef CONFIG_PNP
+#define is_isapnp_selected(dev) isapnp[dev]
+#else
+#define is_isapnp_selected(dev) 0
+#endif
+
static int __devinit snd_es1688_match(struct device *dev, unsigned int n)
{
- return enable[n];
+ return enable[n] && !is_isapnp_selected(n);
}
-static int __devinit snd_es1688_legacy_create(struct snd_card *card,
- struct device *dev, unsigned int n, struct snd_es1688 **rchip)
+static int __devinit snd_es1688_legacy_create(struct snd_card *card,
+ struct device *dev, unsigned int n)
{
+ struct snd_es1688 *chip = card->private_data;
static long possible_ports[] = {0x220, 0x240, 0x260};
static int possible_irqs[] = {5, 9, 10, 7, -1};
static int possible_dmas[] = {1, 3, 0, -1};
@@ -104,42 +121,33 @@ static int __devinit snd_es1688_legacy_create(struct snd_card *card,
}
if (port[n] != SNDRV_AUTO_PORT)
- return snd_es1688_create(card, port[n], mpu_port[n], irq[n],
- mpu_irq[n], dma8[n], ES1688_HW_AUTO, rchip);
+ return snd_es1688_create(card, chip, port[n], mpu_port[n],
+ irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO);
i = 0;
do {
port[n] = possible_ports[i];
- error = snd_es1688_create(card, port[n], mpu_port[n], irq[n],
- mpu_irq[n], dma8[n], ES1688_HW_AUTO, rchip);
+ error = snd_es1688_create(card, chip, port[n], mpu_port[n],
+ irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO);
} while (error < 0 && ++i < ARRAY_SIZE(possible_ports));
return error;
}
-static int __devinit snd_es1688_probe(struct device *dev, unsigned int n)
+static int __devinit snd_es1688_probe(struct snd_card *card, unsigned int n)
{
- struct snd_card *card;
- struct snd_es1688 *chip;
+ struct snd_es1688 *chip = card->private_data;
struct snd_opl3 *opl3;
struct snd_pcm *pcm;
int error;
- error = snd_card_create(index[n], id[n], THIS_MODULE, 0, &card);
+ error = snd_es1688_pcm(card, chip, 0, &pcm);
if (error < 0)
return error;
- error = snd_es1688_legacy_create(card, dev, n, &chip);
- if (error < 0)
- goto out;
-
- error = snd_es1688_pcm(chip, 0, &pcm);
+ error = snd_es1688_mixer(card, chip);
if (error < 0)
- goto out;
-
- error = snd_es1688_mixer(chip);
- if (error < 0)
- goto out;
+ return error;
strcpy(card->driver, "ES1688");
strcpy(card->shortname, pcm->name);
@@ -152,12 +160,12 @@ static int __devinit snd_es1688_probe(struct device *dev, unsigned int n)
if (fm_port[n] > 0) {
if (snd_opl3_create(card, fm_port[n], fm_port[n] + 2,
OPL3_HW_OPL3, 0, &opl3) < 0)
- dev_warn(dev,
+ dev_warn(card->dev,
"opl3 not detected at 0x%lx\n", fm_port[n]);
else {
error = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
if (error < 0)
- goto out;
+ return error;
}
}
@@ -167,23 +175,41 @@ static int __devinit snd_es1688_probe(struct device *dev, unsigned int n)
chip->mpu_port, 0,
mpu_irq[n], IRQF_DISABLED, NULL);
if (error < 0)
- goto out;
+ return error;
}
+ return snd_card_register(card);
+}
+
+static int __devinit snd_es1688_isa_probe(struct device *dev, unsigned int n)
+{
+ struct snd_card *card;
+ int error;
+
+ error = snd_card_create(index[n], id[n], THIS_MODULE,
+ sizeof(struct snd_es1688), &card);
+ if (error < 0)
+ return error;
+
+ error = snd_es1688_legacy_create(card, dev, n);
+ if (error < 0)
+ goto out;
+
snd_card_set_dev(card, dev);
- error = snd_card_register(card);
+ error = snd_es1688_probe(card, n);
if (error < 0)
goto out;
dev_set_drvdata(dev, card);
- return 0;
-out: snd_card_free(card);
+ return 0;
+out:
+ snd_card_free(card);
return error;
}
-static int __devexit snd_es1688_remove(struct device *dev, unsigned int n)
+static int __devexit snd_es1688_isa_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
dev_set_drvdata(dev, NULL);
@@ -192,8 +218,8 @@ static int __devexit snd_es1688_remove(struct device *dev, unsigned int n)
static struct isa_driver snd_es1688_driver = {
.match = snd_es1688_match,
- .probe = snd_es1688_probe,
- .remove = __devexit_p(snd_es1688_remove),
+ .probe = snd_es1688_isa_probe,
+ .remove = __devexit_p(snd_es1688_isa_remove),
#if 0 /* FIXME */
.suspend = snd_es1688_suspend,
.resume = snd_es1688_resume,
@@ -203,14 +229,140 @@ static struct isa_driver snd_es1688_driver = {
}
};
+static int snd_es968_pnp_is_probed;
+
+#ifdef CONFIG_PNP
+static int __devinit snd_card_es968_pnp(struct snd_card *card, unsigned int n,
+ struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
+{
+ struct snd_es1688 *chip = card->private_data;
+ struct pnp_dev *pdev;
+ int error;
+
+ pdev = pnp_request_card_device(pcard, pid->devs[0].id, NULL);
+ if (pdev == NULL)
+ return -ENODEV;
+
+ error = pnp_activate_dev(pdev);
+ if (error < 0) {
+ snd_printk(KERN_ERR "ES968 pnp configure failure\n");
+ return error;
+ }
+ port[n] = pnp_port_start(pdev, 0);
+ dma8[n] = pnp_dma(pdev, 0);
+ irq[n] = pnp_irq(pdev, 0);
+
+ return snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n],
+ mpu_irq[n], dma8[n], ES1688_HW_AUTO);
+}
+
+static int __devinit snd_es968_pnp_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
+{
+ struct snd_card *card;
+ static unsigned int dev;
+ int error;
+ struct snd_es1688 *chip;
+
+ if (snd_es968_pnp_is_probed)
+ return -EBUSY;
+ for ( ; dev < SNDRV_CARDS; dev++) {
+ if (enable[dev] && isapnp[dev])
+ break;
+ }
+
+ error = snd_card_create(index[dev], id[dev], THIS_MODULE,
+ sizeof(struct snd_es1688), &card);
+ if (error < 0)
+ return error;
+ chip = card->private_data;
+
+ error = snd_card_es968_pnp(card, dev, pcard, pid);
+ if (error < 0) {
+ snd_card_free(card);
+ return error;
+ }
+ snd_card_set_dev(card, &pcard->card->dev);
+ error = snd_es1688_probe(card, dev);
+ if (error < 0)
+ return error;
+ pnp_set_card_drvdata(pcard, card);
+ snd_es968_pnp_is_probed = 1;
+ return 0;
+}
+
+static void __devexit snd_es968_pnp_remove(struct pnp_card_link * pcard)
+{
+ snd_card_free(pnp_get_card_drvdata(pcard));
+ pnp_set_card_drvdata(pcard, NULL);
+ snd_es968_pnp_is_probed = 0;
+}
+
+#ifdef CONFIG_PM
+static int snd_es968_pnp_suspend(struct pnp_card_link *pcard,
+ pm_message_t state)
+{
+ struct snd_card *card = pnp_get_card_drvdata(pcard);
+ struct snd_es1688 *chip = card->private_data;
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+ snd_pcm_suspend_all(chip->pcm);
+ return 0;
+}
+
+static int snd_es968_pnp_resume(struct pnp_card_link *pcard)
+{
+ struct snd_card *card = pnp_get_card_drvdata(pcard);
+ struct snd_es1688 *chip = card->private_data;
+
+ snd_es1688_reset(chip);
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ return 0;
+}
+#endif
+
+static struct pnp_card_device_id snd_es968_pnpids[] = {
+ { .id = "ESS0968", .devs = { { "@@@0968" }, } },
+ { .id = "ESS0968", .devs = { { "ESS0968" }, } },
+ { .id = "", } /* end */
+};
+
+MODULE_DEVICE_TABLE(pnp_card, snd_es968_pnpids);
+
+static struct pnp_card_driver es968_pnpc_driver = {
+ .flags = PNP_DRIVER_RES_DISABLE,
+ .name = DEV_NAME " PnP",
+ .id_table = snd_es968_pnpids,
+ .probe = snd_es968_pnp_detect,
+ .remove = __devexit_p(snd_es968_pnp_remove),
+#ifdef CONFIG_PM
+ .suspend = snd_es968_pnp_suspend,
+ .resume = snd_es968_pnp_resume,
+#endif
+};
+#endif
+
static int __init alsa_card_es1688_init(void)
{
+#ifdef CONFIG_PNP
+ pnp_register_card_driver(&es968_pnpc_driver);
+ if (snd_es968_pnp_is_probed)
+ return 0;
+ pnp_unregister_card_driver(&es968_pnpc_driver);
+#endif
return isa_register_driver(&snd_es1688_driver, SNDRV_CARDS);
}
static void __exit alsa_card_es1688_exit(void)
{
- isa_unregister_driver(&snd_es1688_driver);
+ if (!snd_es968_pnp_is_probed) {
+ isa_unregister_driver(&snd_es1688_driver);
+ return;
+ }
+#ifdef CONFIG_PNP
+ pnp_unregister_card_driver(&es968_pnpc_driver);
+#endif
}
module_init(alsa_card_es1688_init);
diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c
index c76bb00c9d15..07676200496a 100644
--- a/sound/isa/es1688/es1688_lib.c
+++ b/sound/isa/es1688/es1688_lib.c
@@ -99,7 +99,7 @@ static unsigned char snd_es1688_mixer_read(struct snd_es1688 *chip, unsigned cha
return result;
}
-static int snd_es1688_reset(struct snd_es1688 *chip)
+int snd_es1688_reset(struct snd_es1688 *chip)
{
int i;
@@ -115,6 +115,7 @@ static int snd_es1688_reset(struct snd_es1688 *chip)
snd_es1688_dsp_command(chip, 0xc6); /* enable extended mode */
return 0;
}
+EXPORT_SYMBOL(snd_es1688_reset);
static int snd_es1688_probe(struct snd_es1688 *chip)
{
@@ -620,7 +621,6 @@ static int snd_es1688_free(struct snd_es1688 *chip)
disable_dma(chip->dma8);
free_dma(chip->dma8);
}
- kfree(chip);
return 0;
}
@@ -638,23 +638,20 @@ static const char *snd_es1688_chip_id(struct snd_es1688 *chip)
}
int snd_es1688_create(struct snd_card *card,
+ struct snd_es1688 *chip,
unsigned long port,
unsigned long mpu_port,
int irq,
int mpu_irq,
int dma8,
- unsigned short hardware,
- struct snd_es1688 **rchip)
+ unsigned short hardware)
{
static struct snd_device_ops ops = {
.dev_free = snd_es1688_dev_free,
};
- struct snd_es1688 *chip;
int err;
- *rchip = NULL;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
chip->irq = -1;
@@ -662,25 +659,21 @@ int snd_es1688_create(struct snd_card *card,
if ((chip->res_port = request_region(port + 4, 12, "ES1688")) == NULL) {
snd_printk(KERN_ERR "es1688: can't grab port 0x%lx\n", port + 4);
- snd_es1688_free(chip);
return -EBUSY;
}
if (request_irq(irq, snd_es1688_interrupt, IRQF_DISABLED, "ES1688", (void *) chip)) {
snd_printk(KERN_ERR "es1688: can't grab IRQ %d\n", irq);
- snd_es1688_free(chip);
return -EBUSY;
}
chip->irq = irq;
if (request_dma(dma8, "ES1688")) {
snd_printk(KERN_ERR "es1688: can't grab DMA8 %d\n", dma8);
- snd_es1688_free(chip);
return -EBUSY;
}
chip->dma8 = dma8;
spin_lock_init(&chip->reg_lock);
spin_lock_init(&chip->mixer_lock);
- chip->card = card;
chip->port = port;
mpu_port &= ~0x000f;
if (mpu_port < 0x300 || mpu_port > 0x330)
@@ -689,23 +682,16 @@ int snd_es1688_create(struct snd_card *card,
chip->mpu_irq = mpu_irq;
chip->hardware = hardware;
- if ((err = snd_es1688_probe(chip)) < 0) {
- snd_es1688_free(chip);
+ err = snd_es1688_probe(chip);
+ if (err < 0)
return err;
- }
- if ((err = snd_es1688_init(chip, 1)) < 0) {
- snd_es1688_free(chip);
- return err;
- }
- /* Register device */
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
- snd_es1688_free(chip);
+ err = snd_es1688_init(chip, 1);
+ if (err < 0)
return err;
- }
- *rchip = chip;
- return 0;
+ /* Register device */
+ return snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
}
static struct snd_pcm_ops snd_es1688_playback_ops = {
@@ -730,12 +716,14 @@ static struct snd_pcm_ops snd_es1688_capture_ops = {
.pointer = snd_es1688_capture_pointer,
};
-int snd_es1688_pcm(struct snd_es1688 * chip, int device, struct snd_pcm ** rpcm)
+int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip,
+ int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "ESx688", device, 1, 1, &pcm)) < 0)
+ err = snd_pcm_new(card, "ESx688", device, 1, 1, &pcm);
+ if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_es1688_playback_ops);
@@ -1009,18 +997,15 @@ static unsigned char snd_es1688_init_table[][2] = {
{ ES1688_REC_DEV, 0x17 }
};
-int snd_es1688_mixer(struct snd_es1688 *chip)
+int snd_es1688_mixer(struct snd_card *card, struct snd_es1688 *chip)
{
- struct snd_card *card;
unsigned int idx;
int err;
unsigned char reg, val;
- if (snd_BUG_ON(!chip || !chip->card))
+ if (snd_BUG_ON(!chip || !card))
return -EINVAL;
- card = chip->card;
-
strcpy(card->mixername, snd_es1688_chip_id(chip));
for (idx = 0; idx < ARRAY_SIZE(snd_es1688_controls); idx++) {
diff --git a/sound/isa/gus/gus_mem_proc.c b/sound/isa/gus/gus_mem_proc.c
index 2803e227aec9..2ccb3fadd7be 100644
--- a/sound/isa/gus/gus_mem_proc.c
+++ b/sound/isa/gus/gus_mem_proc.c
@@ -31,52 +31,21 @@ struct gus_proc_private {
struct snd_gus_card * gus;
};
-static long snd_gf1_mem_proc_dump(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_gf1_mem_proc_dump(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file, char __user *buf,
+ size_t count, loff_t pos)
{
- long size;
struct gus_proc_private *priv = entry->private_data;
struct snd_gus_card *gus = priv->gus;
int err;
- size = count;
- if (pos + size > priv->size)
- size = (long)priv->size - pos;
- if (size > 0) {
- if ((err = snd_gus_dram_read(gus, buf, pos, size, priv->rom)) < 0)
- return err;
- return size;
- }
- return 0;
+ err = snd_gus_dram_read(gus, buf, pos, count, priv->rom);
+ if (err < 0)
+ return err;
+ return count;
}
-static long long snd_gf1_mem_proc_llseek(struct snd_info_entry *entry,
- void *private_file_data,
- struct file *file,
- long long offset,
- int orig)
-{
- struct gus_proc_private *priv = entry->private_data;
-
- switch (orig) {
- case SEEK_SET:
- file->f_pos = offset;
- break;
- case SEEK_CUR:
- file->f_pos += offset;
- break;
- case SEEK_END: /* offset is negative */
- file->f_pos = priv->size + offset;
- break;
- default:
- return -EINVAL;
- }
- if (file->f_pos > priv->size)
- file->f_pos = priv->size;
- return file->f_pos;
-}
-
static void snd_gf1_mem_proc_free(struct snd_info_entry *entry)
{
struct gus_proc_private *priv = entry->private_data;
@@ -85,7 +54,6 @@ static void snd_gf1_mem_proc_free(struct snd_info_entry *entry)
static struct snd_info_entry_ops snd_gf1_mem_proc_ops = {
.read = snd_gf1_mem_proc_dump,
- .llseek = snd_gf1_mem_proc_llseek,
};
int snd_gf1_mem_proc_init(struct snd_gus_card * gus)
diff --git a/sound/isa/gus/gusextreme.c b/sound/isa/gus/gusextreme.c
index 65e4b18581a6..008e8e5bfa37 100644
--- a/sound/isa/gus/gusextreme.c
+++ b/sound/isa/gus/gusextreme.c
@@ -95,7 +95,7 @@ static int __devinit snd_gusextreme_match(struct device *dev, unsigned int n)
}
static int __devinit snd_gusextreme_es1688_create(struct snd_card *card,
- struct device *dev, unsigned int n, struct snd_es1688 **rchip)
+ struct snd_es1688 *chip, struct device *dev, unsigned int n)
{
static long possible_ports[] = {0x220, 0x240, 0x260};
static int possible_irqs[] = {5, 9, 10, 7, -1};
@@ -119,14 +119,14 @@ static int __devinit snd_gusextreme_es1688_create(struct snd_card *card,
}
if (port[n] != SNDRV_AUTO_PORT)
- return snd_es1688_create(card, port[n], mpu_port[n], irq[n],
- mpu_irq[n], dma8[n], ES1688_HW_1688, rchip);
+ return snd_es1688_create(card, chip, port[n], mpu_port[n],
+ irq[n], mpu_irq[n], dma8[n], ES1688_HW_1688);
i = 0;
do {
port[n] = possible_ports[i];
- error = snd_es1688_create(card, port[n], mpu_port[n], irq[n],
- mpu_irq[n], dma8[n], ES1688_HW_1688, rchip);
+ error = snd_es1688_create(card, chip, port[n], mpu_port[n],
+ irq[n], mpu_irq[n], dma8[n], ES1688_HW_1688);
} while (error < 0 && ++i < ARRAY_SIZE(possible_ports));
return error;
@@ -206,9 +206,8 @@ static int __devinit snd_gusextreme_detect(struct snd_gus_card *gus,
return 0;
}
-static int __devinit snd_gusextreme_mixer(struct snd_es1688 *chip)
+static int __devinit snd_gusextreme_mixer(struct snd_card *card)
{
- struct snd_card *card = chip->card;
struct snd_ctl_elem_id id1, id2;
int error;
@@ -241,17 +240,20 @@ static int __devinit snd_gusextreme_probe(struct device *dev, unsigned int n)
struct snd_opl3 *opl3;
int error;
- error = snd_card_create(index[n], id[n], THIS_MODULE, 0, &card);
+ error = snd_card_create(index[n], id[n], THIS_MODULE,
+ sizeof(struct snd_es1688), &card);
if (error < 0)
return error;
+ es1688 = card->private_data;
+
if (mpu_port[n] == SNDRV_AUTO_PORT)
mpu_port[n] = 0;
if (mpu_irq[n] > 15)
mpu_irq[n] = -1;
- error = snd_gusextreme_es1688_create(card, dev, n, &es1688);
+ error = snd_gusextreme_es1688_create(card, es1688, dev, n);
if (error < 0)
goto out;
@@ -280,11 +282,11 @@ static int __devinit snd_gusextreme_probe(struct device *dev, unsigned int n)
}
gus->codec_flag = 1;
- error = snd_es1688_pcm(es1688, 0, NULL);
+ error = snd_es1688_pcm(card, es1688, 0, NULL);
if (error < 0)
goto out;
- error = snd_es1688_mixer(es1688);
+ error = snd_es1688_mixer(card, es1688);
if (error < 0)
goto out;
@@ -300,7 +302,7 @@ static int __devinit snd_gusextreme_probe(struct device *dev, unsigned int n)
if (error < 0)
goto out;
- error = snd_gusextreme_mixer(es1688);
+ error = snd_gusextreme_mixer(card);
if (error < 0)
goto out;
diff --git a/sound/isa/sb/Makefile b/sound/isa/sb/Makefile
index af3669681788..08b9fb974658 100644
--- a/sound/isa/sb/Makefile
+++ b/sound/isa/sb/Makefile
@@ -11,7 +11,6 @@ snd-sb8-objs := sb8.o
snd-sb16-objs := sb16.o
snd-sbawe-objs := sbawe.o emu8000.o
snd-emu8000-synth-objs := emu8000_synth.o emu8000_callback.o emu8000_patch.o emu8000_pcm.o
-snd-es968-objs := es968.o
snd-jazz16-objs := jazz16.o
# Toplevel Module Dependency
@@ -21,7 +20,6 @@ obj-$(CONFIG_SND_SB8_DSP) += snd-sb8-dsp.o
obj-$(CONFIG_SND_SB8) += snd-sb8.o
obj-$(CONFIG_SND_SB16) += snd-sb16.o
obj-$(CONFIG_SND_SBAWE) += snd-sbawe.o
-obj-$(CONFIG_SND_ES968) += snd-es968.o
obj-$(CONFIG_SND_JAZZ16) += snd-jazz16.o
ifeq ($(CONFIG_SND_SB16_CSP),y)
obj-$(CONFIG_SND_SB16) += snd-sb16-csp.o
diff --git a/sound/isa/sb/es968.c b/sound/isa/sb/es968.c
deleted file mode 100644
index ff18286fef9d..000000000000
--- a/sound/isa/sb/es968.c
+++ /dev/null
@@ -1,248 +0,0 @@
-
-/*
- card-es968.c - driver for ESS AudioDrive ES968 based soundcards.
- Copyright (C) 1999 by Massimo Piccioni <dafastidio@libero.it>
-
- Thanks to Pierfrancesco 'qM2' Passerini.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/pnp.h>
-#include <linux/moduleparam.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/sb.h>
-
-#define PFX "es968: "
-
-MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
-MODULE_DESCRIPTION("ESS AudioDrive ES968");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ESS,AudioDrive ES968}}");
-
-static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */
-static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
-static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */
-static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */
-
-module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "Index value for es968 based soundcard.");
-module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string for es968 based soundcard.");
-module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "Enable es968 based soundcard.");
-
-struct snd_card_es968 {
- struct pnp_dev *dev;
- struct snd_sb *chip;
-};
-
-static struct pnp_card_device_id snd_es968_pnpids[] = {
- { .id = "ESS0968", .devs = { { "@@@0968" }, } },
- { .id = "", } /* end */
-};
-
-MODULE_DEVICE_TABLE(pnp_card, snd_es968_pnpids);
-
-#define DRIVER_NAME "snd-card-es968"
-
-static irqreturn_t snd_card_es968_interrupt(int irq, void *dev_id)
-{
- struct snd_sb *chip = dev_id;
-
- if (chip->open & SB_OPEN_PCM) {
- return snd_sb8dsp_interrupt(chip);
- } else {
- return snd_sb8dsp_midi_interrupt(chip);
- }
-}
-
-static int __devinit snd_card_es968_pnp(int dev, struct snd_card_es968 *acard,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
-{
- struct pnp_dev *pdev;
- int err;
-
- acard->dev = pnp_request_card_device(card, id->devs[0].id, NULL);
- if (acard->dev == NULL)
- return -ENODEV;
-
- pdev = acard->dev;
-
- err = pnp_activate_dev(pdev);
- if (err < 0) {
- snd_printk(KERN_ERR PFX "AUDIO pnp configure failure\n");
- return err;
- }
- port[dev] = pnp_port_start(pdev, 0);
- dma8[dev] = pnp_dma(pdev, 0);
- irq[dev] = pnp_irq(pdev, 0);
-
- return 0;
-}
-
-static int __devinit snd_card_es968_probe(int dev,
- struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
-{
- int error;
- struct snd_sb *chip;
- struct snd_card *card;
- struct snd_card_es968 *acard;
-
- error = snd_card_create(index[dev], id[dev], THIS_MODULE,
- sizeof(struct snd_card_es968), &card);
- if (error < 0)
- return error;
- acard = card->private_data;
- if ((error = snd_card_es968_pnp(dev, acard, pcard, pid))) {
- snd_card_free(card);
- return error;
- }
- snd_card_set_dev(card, &pcard->card->dev);
-
- if ((error = snd_sbdsp_create(card, port[dev],
- irq[dev],
- snd_card_es968_interrupt,
- dma8[dev],
- -1,
- SB_HW_AUTO, &chip)) < 0) {
- snd_card_free(card);
- return error;
- }
- acard->chip = chip;
-
- if ((error = snd_sb8dsp_pcm(chip, 0, NULL)) < 0) {
- snd_card_free(card);
- return error;
- }
-
- if ((error = snd_sbmixer_new(chip)) < 0) {
- snd_card_free(card);
- return error;
- }
-
- if ((error = snd_sb8dsp_midi(chip, 0, NULL)) < 0) {
- snd_card_free(card);
- return error;
- }
-
- strcpy(card->driver, "ES968");
- strcpy(card->shortname, "ESS ES968");
- sprintf(card->longname, "%s soundcard, %s at 0x%lx, irq %d, dma %d",
- card->shortname, chip->name, chip->port, irq[dev], dma8[dev]);
-
- if ((error = snd_card_register(card)) < 0) {
- snd_card_free(card);
- return error;
- }
- pnp_set_card_drvdata(pcard, card);
- return 0;
-}
-
-static unsigned int __devinitdata es968_devices;
-
-static int __devinit snd_es968_pnp_detect(struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
-{
- static int dev;
- int res;
-
- for ( ; dev < SNDRV_CARDS; dev++) {
- if (!enable[dev])
- continue;
- res = snd_card_es968_probe(dev, card, id);
- if (res < 0)
- return res;
- dev++;
- es968_devices++;
- return 0;
- }
- return -ENODEV;
-}
-
-static void __devexit snd_es968_pnp_remove(struct pnp_card_link * pcard)
-{
- snd_card_free(pnp_get_card_drvdata(pcard));
- pnp_set_card_drvdata(pcard, NULL);
-}
-
-#ifdef CONFIG_PM
-static int snd_es968_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state)
-{
- struct snd_card *card = pnp_get_card_drvdata(pcard);
- struct snd_card_es968 *acard = card->private_data;
- struct snd_sb *chip = acard->chip;
-
- snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
- snd_sbmixer_suspend(chip);
- return 0;
-}
-
-static int snd_es968_pnp_resume(struct pnp_card_link *pcard)
-{
- struct snd_card *card = pnp_get_card_drvdata(pcard);
- struct snd_card_es968 *acard = card->private_data;
- struct snd_sb *chip = acard->chip;
-
- snd_sbdsp_reset(chip);
- snd_sbmixer_resume(chip);
- snd_power_change_state(card, SNDRV_CTL_POWER_D0);
- return 0;
-}
-#endif
-
-static struct pnp_card_driver es968_pnpc_driver = {
- .flags = PNP_DRIVER_RES_DISABLE,
- .name = "es968",
- .id_table = snd_es968_pnpids,
- .probe = snd_es968_pnp_detect,
- .remove = __devexit_p(snd_es968_pnp_remove),
-#ifdef CONFIG_PM
- .suspend = snd_es968_pnp_suspend,
- .resume = snd_es968_pnp_resume,
-#endif
-};
-
-static int __init alsa_card_es968_init(void)
-{
- int err = pnp_register_card_driver(&es968_pnpc_driver);
- if (err)
- return err;
-
- if (!es968_devices) {
- pnp_unregister_card_driver(&es968_pnpc_driver);
-#ifdef MODULE
- snd_printk(KERN_ERR "no ES968 based soundcards found\n");
-#endif
- return -ENODEV;
- }
- return 0;
-}
-
-static void __exit alsa_card_es968_exit(void)
-{
- pnp_unregister_card_driver(&es968_pnpc_driver);
-}
-
-module_init(alsa_card_es968_init)
-module_exit(alsa_card_es968_exit)
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 1298c68d6bf0..e7a8cd058efb 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -58,6 +58,18 @@ config SND_ALI5451
To compile this driver as a module, choose M here: the module
will be called snd-ali5451.
+config SND_ASIHPI
+ tristate "AudioScience ASIxxxx"
+ depends on X86
+ select FW_LOADER
+ select SND_PCM
+ select SND_HWDEP
+ help
+ Say Y here to include support for AudioScience ASI sound cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-asihpi.
+
config SND_ATIIXP
tristate "ATI IXP AC97 Controller"
select SND_AC97_CODEC
@@ -501,6 +513,16 @@ config SND_ES1968
To compile this driver as a module, choose M here: the module
will be called snd-es1968.
+config SND_ES1968_INPUT
+ bool "Enable input device for es1968 volume buttons"
+ depends on SND_ES1968
+ depends on INPUT=y || INPUT=SND_ES1968
+ help
+ If you say Y here, you will get an input device which reports
+ keypresses for the volume buttons connected to the es1968 chip.
+ If you say N the buttons will directly control the master volume.
+ It is recommended to say Y.
+
config SND_FM801
tristate "ForteMedia FM801"
select SND_OPL3_LIB
@@ -655,6 +677,16 @@ config SND_MAESTRO3
To compile this driver as a module, choose M here: the module
will be called snd-maestro3.
+config SND_MAESTRO3_INPUT
+ bool "Enable input device for maestro3 volume buttons"
+ depends on SND_MAESTRO3
+ depends on INPUT=y || INPUT=SND_MAESTRO3
+ help
+ If you say Y here, you will get an input device which reports
+ keypresses for the volume buttons connected to the maestro3 chip.
+ If you say N the buttons will directly control the master volume.
+ It is recommended to say Y.
+
config SND_MIXART
tristate "Digigram miXart"
select SND_HWDEP
diff --git a/sound/pci/Makefile b/sound/pci/Makefile
index ecfc609d2b9f..9cf4348ec137 100644
--- a/sound/pci/Makefile
+++ b/sound/pci/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_SND_VIA82XX_MODEM) += snd-via82xx-modem.o
obj-$(CONFIG_SND) += \
ac97/ \
ali5451/ \
+ asihpi/ \
au88x0/ \
aw2/ \
ctxfi/ \
diff --git a/sound/pci/asihpi/Makefile b/sound/pci/asihpi/Makefile
new file mode 100644
index 000000000000..391830a4556c
--- /dev/null
+++ b/sound/pci/asihpi/Makefile
@@ -0,0 +1,5 @@
+snd-asihpi-objs := asihpi.o hpioctl.o hpimsginit.o\
+ hpicmn.o hpifunc.o hpidebug.o hpidspcd.o\
+ hpios.o hpi6000.o hpi6205.o hpimsgx.o
+
+obj-$(CONFIG_SND_ASIHPI) += snd-asihpi.o
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
new file mode 100644
index 000000000000..f74c7372b3d1
--- /dev/null
+++ b/sound/pci/asihpi/asihpi.c
@@ -0,0 +1,3002 @@
+/*
+ * Asihpi soundcard
+ * Copyright (c) by AudioScience Inc <alsa@audioscience.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * The following is not a condition of use, merely a request:
+ * If you modify this program, particularly if you fix errors, AudioScience Inc
+ * would appreciate it if you grant us the right to use those modifications
+ * for any purpose including commercial applications.
+ */
+/* >0: print Hw params, timer vars. >1: print stream write/copy sizes */
+#define REALLY_VERBOSE_LOGGING 0
+
+#if REALLY_VERBOSE_LOGGING
+#define VPRINTK1 snd_printd
+#else
+#define VPRINTK1(...)
+#endif
+
+#if REALLY_VERBOSE_LOGGING > 1
+#define VPRINTK2 snd_printd
+#else
+#define VPRINTK2(...)
+#endif
+
+#ifndef ASI_STYLE_NAMES
+/* not sure how ALSA style name should look */
+#define ASI_STYLE_NAMES 1
+#endif
+
+#include "hpi_internal.h"
+#include "hpimsginit.h"
+#include "hpioctl.h"
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/info.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/hwdep.h>
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AudioScience inc. <support@audioscience.com>");
+MODULE_DESCRIPTION("AudioScience ALSA ASI5000 ASI6000 ASI87xx ASI89xx");
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+static int enable_hpi_hwdep = 1;
+
+module_param_array(index, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(index, "ALSA index value for AudioScience soundcard.");
+
+module_param_array(id, charp, NULL, S_IRUGO);
+MODULE_PARM_DESC(id, "ALSA ID string for AudioScience soundcard.");
+
+module_param_array(enable, bool, NULL, S_IRUGO);
+MODULE_PARM_DESC(enable, "ALSA enable AudioScience soundcard.");
+
+module_param(enable_hpi_hwdep, bool, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(enable_hpi_hwdep,
+ "ALSA enable HPI hwdep for AudioScience soundcard ");
+
+/* identify driver */
+#ifdef KERNEL_ALSA_BUILD
+static char *build_info = "built using headers from kernel source";
+module_param(build_info, charp, S_IRUGO);
+MODULE_PARM_DESC(build_info, "built using headers from kernel source");
+#else
+static char *build_info = "built within ALSA source";
+module_param(build_info, charp, S_IRUGO);
+MODULE_PARM_DESC(build_info, "built within ALSA source");
+#endif
+
+/* set to 1 to dump every control from adapter to log */
+static const int mixer_dump;
+
+#define DEFAULT_SAMPLERATE 44100
+static int adapter_fs = DEFAULT_SAMPLERATE;
+
+static struct hpi_hsubsys *ss; /* handle to HPI audio subsystem */
+
+/* defaults */
+#define PERIODS_MIN 2
+#define PERIOD_BYTES_MIN 2304
+#define BUFFER_BYTES_MAX (512 * 1024)
+
+/*#define TIMER_MILLISECONDS 20
+#define FORCE_TIMER_JIFFIES ((TIMER_MILLISECONDS * HZ + 999)/1000)
+*/
+
+#define MAX_CLOCKSOURCES (HPI_SAMPLECLOCK_SOURCE_LAST + 1 + 7)
+
+struct clk_source {
+ int source;
+ int index;
+ char *name;
+};
+
+struct clk_cache {
+ int count;
+ int has_local;
+ struct clk_source s[MAX_CLOCKSOURCES];
+};
+
+/* Per card data */
+struct snd_card_asihpi {
+ struct snd_card *card;
+ struct pci_dev *pci;
+ u16 adapter_index;
+ u32 serial_number;
+ u16 type;
+ u16 version;
+ u16 num_outstreams;
+ u16 num_instreams;
+
+ u32 h_mixer;
+ struct clk_cache cc;
+
+ u16 support_mmap;
+ u16 support_grouping;
+ u16 support_mrx;
+ u16 update_interval_frames;
+ u16 in_max_chans;
+ u16 out_max_chans;
+};
+
+/* Per stream data */
+struct snd_card_asihpi_pcm {
+ struct timer_list timer;
+ unsigned int respawn_timer;
+ unsigned int hpi_buffer_attached;
+ unsigned int pcm_size;
+ unsigned int pcm_count;
+ unsigned int bytes_per_sec;
+ unsigned int pcm_irq_pos; /* IRQ position */
+ unsigned int pcm_buf_pos; /* position in buffer */
+ struct snd_pcm_substream *substream;
+ u32 h_stream;
+ struct hpi_format format;
+};
+
+/* universal stream verbs work with out or in stream handles */
+
+/* Functions to allow driver to give a buffer to HPI for busmastering */
+
+static u16 hpi_stream_host_buffer_attach(
+ struct hpi_hsubsys *hS,
+ u32 h_stream, /* handle to outstream. */
+ u32 size_in_bytes, /* size in bytes of bus mastering buffer */
+ u32 pci_address
+)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ unsigned int obj = hpi_handle_object(h_stream);
+
+ if (!h_stream)
+ return HPI_ERROR_INVALID_OBJ;
+ hpi_init_message_response(&hm, &hr, obj,
+ obj == HPI_OBJ_OSTREAM ?
+ HPI_OSTREAM_HOSTBUFFER_ALLOC :
+ HPI_ISTREAM_HOSTBUFFER_ALLOC);
+
+ hpi_handle_to_indexes(h_stream, &hm.adapter_index,
+ &hm.obj_index);
+
+ hm.u.d.u.buffer.buffer_size = size_in_bytes;
+ hm.u.d.u.buffer.pci_address = pci_address;
+ hm.u.d.u.buffer.command = HPI_BUFFER_CMD_INTERNAL_GRANTADAPTER;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+static u16 hpi_stream_host_buffer_detach(
+ struct hpi_hsubsys *hS,
+ u32 h_stream
+)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ unsigned int obj = hpi_handle_object(h_stream);
+
+ if (!h_stream)
+ return HPI_ERROR_INVALID_OBJ;
+
+ hpi_init_message_response(&hm, &hr, obj,
+ obj == HPI_OBJ_OSTREAM ?
+ HPI_OSTREAM_HOSTBUFFER_FREE :
+ HPI_ISTREAM_HOSTBUFFER_FREE);
+
+ hpi_handle_to_indexes(h_stream, &hm.adapter_index,
+ &hm.obj_index);
+ hm.u.d.u.buffer.command = HPI_BUFFER_CMD_INTERNAL_REVOKEADAPTER;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+static inline u16 hpi_stream_start(struct hpi_hsubsys *hS, u32 h_stream)
+{
+ if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
+ return hpi_outstream_start(hS, h_stream);
+ else
+ return hpi_instream_start(hS, h_stream);
+}
+
+static inline u16 hpi_stream_stop(struct hpi_hsubsys *hS, u32 h_stream)
+{
+ if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
+ return hpi_outstream_stop(hS, h_stream);
+ else
+ return hpi_instream_stop(hS, h_stream);
+}
+
+static inline u16 hpi_stream_get_info_ex(
+ struct hpi_hsubsys *hS,
+ u32 h_stream,
+ u16 *pw_state,
+ u32 *pbuffer_size,
+ u32 *pdata_in_buffer,
+ u32 *psample_count,
+ u32 *pauxiliary_data
+)
+{
+ if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
+ return hpi_outstream_get_info_ex(hS, h_stream, pw_state,
+ pbuffer_size, pdata_in_buffer,
+ psample_count, pauxiliary_data);
+ else
+ return hpi_instream_get_info_ex(hS, h_stream, pw_state,
+ pbuffer_size, pdata_in_buffer,
+ psample_count, pauxiliary_data);
+}
+
+static inline u16 hpi_stream_group_add(struct hpi_hsubsys *hS,
+ u32 h_master,
+ u32 h_stream)
+{
+ if (hpi_handle_object(h_master) == HPI_OBJ_OSTREAM)
+ return hpi_outstream_group_add(hS, h_master, h_stream);
+ else
+ return hpi_instream_group_add(hS, h_master, h_stream);
+}
+
+static inline u16 hpi_stream_group_reset(struct hpi_hsubsys *hS,
+ u32 h_stream)
+{
+ if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
+ return hpi_outstream_group_reset(hS, h_stream);
+ else
+ return hpi_instream_group_reset(hS, h_stream);
+}
+
+static inline u16 hpi_stream_group_get_map(struct hpi_hsubsys *hS,
+ u32 h_stream, u32 *mo, u32 *mi)
+{
+ if (hpi_handle_object(h_stream) == HPI_OBJ_OSTREAM)
+ return hpi_outstream_group_get_map(hS, h_stream, mo, mi);
+ else
+ return hpi_instream_group_get_map(hS, h_stream, mo, mi);
+}
+
+static u16 handle_error(u16 err, int line, char *filename)
+{
+ if (err)
+ printk(KERN_WARNING
+ "in file %s, line %d: HPI error %d\n",
+ filename, line, err);
+ return err;
+}
+
+#define hpi_handle_error(x) handle_error(x, __LINE__, __FILE__)
+
+/***************************** GENERAL PCM ****************/
+#if REALLY_VERBOSE_LOGGING
+static void print_hwparams(struct snd_pcm_hw_params *p)
+{
+ snd_printd("HWPARAMS \n");
+ snd_printd("samplerate %d \n", params_rate(p));
+ snd_printd("channels %d \n", params_channels(p));
+ snd_printd("format %d \n", params_format(p));
+ snd_printd("subformat %d \n", params_subformat(p));
+ snd_printd("buffer bytes %d \n", params_buffer_bytes(p));
+ snd_printd("period bytes %d \n", params_period_bytes(p));
+ snd_printd("access %d \n", params_access(p));
+ snd_printd("period_size %d \n", params_period_size(p));
+ snd_printd("periods %d \n", params_periods(p));
+ snd_printd("buffer_size %d \n", params_buffer_size(p));
+}
+#else
+#define print_hwparams(x)
+#endif
+
+static snd_pcm_format_t hpi_to_alsa_formats[] = {
+ -1, /* INVALID */
+ SNDRV_PCM_FORMAT_U8, /* HPI_FORMAT_PCM8_UNSIGNED 1 */
+ SNDRV_PCM_FORMAT_S16, /* HPI_FORMAT_PCM16_SIGNED 2 */
+ -1, /* HPI_FORMAT_MPEG_L1 3 */
+ SNDRV_PCM_FORMAT_MPEG, /* HPI_FORMAT_MPEG_L2 4 */
+ SNDRV_PCM_FORMAT_MPEG, /* HPI_FORMAT_MPEG_L3 5 */
+ -1, /* HPI_FORMAT_DOLBY_AC2 6 */
+ -1, /* HPI_FORMAT_DOLBY_AC3 7 */
+ SNDRV_PCM_FORMAT_S16_BE,/* HPI_FORMAT_PCM16_BIGENDIAN 8 */
+ -1, /* HPI_FORMAT_AA_TAGIT1_HITS 9 */
+ -1, /* HPI_FORMAT_AA_TAGIT1_INSERTS 10 */
+ SNDRV_PCM_FORMAT_S32, /* HPI_FORMAT_PCM32_SIGNED 11 */
+ -1, /* HPI_FORMAT_RAW_BITSTREAM 12 */
+ -1, /* HPI_FORMAT_AA_TAGIT1_HITS_EX1 13 */
+ SNDRV_PCM_FORMAT_FLOAT, /* HPI_FORMAT_PCM32_FLOAT 14 */
+#if 1
+ /* ALSA can't handle 3 byte sample size together with power-of-2
+ * constraint on buffer_bytes, so disable this format
+ */
+ -1
+#else
+ /* SNDRV_PCM_FORMAT_S24_3LE */ /* { HPI_FORMAT_PCM24_SIGNED 15 */
+#endif
+};
+
+
+static int snd_card_asihpi_format_alsa2hpi(snd_pcm_format_t alsa_format,
+ u16 *hpi_format)
+{
+ u16 format;
+
+ for (format = HPI_FORMAT_PCM8_UNSIGNED;
+ format <= HPI_FORMAT_PCM24_SIGNED; format++) {
+ if (hpi_to_alsa_formats[format] == alsa_format) {
+ *hpi_format = format;
+ return 0;
+ }
+ }
+
+ snd_printd(KERN_WARNING "failed match for alsa format %d\n",
+ alsa_format);
+ *hpi_format = 0;
+ return -EINVAL;
+}
+
+static void snd_card_asihpi_pcm_samplerates(struct snd_card_asihpi *asihpi,
+ struct snd_pcm_hardware *pcmhw)
+{
+ u16 err;
+ u32 h_control;
+ u32 sample_rate;
+ int idx;
+ unsigned int rate_min = 200000;
+ unsigned int rate_max = 0;
+ unsigned int rates = 0;
+
+ if (asihpi->support_mrx) {
+ rates |= SNDRV_PCM_RATE_CONTINUOUS;
+ rates |= SNDRV_PCM_RATE_8000_96000;
+ rate_min = 8000;
+ rate_max = 100000;
+ } else {
+ /* on cards without SRC,
+ valid rates are determined by sampleclock */
+ err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
+ HPI_CONTROL_SAMPLECLOCK, &h_control);
+ if (err) {
+ snd_printk(KERN_ERR
+ "no local sampleclock, err %d\n", err);
+ }
+
+ for (idx = 0; idx < 100; idx++) {
+ if (hpi_sample_clock_query_local_rate(ss,
+ h_control, idx, &sample_rate)) {
+ if (!idx)
+ snd_printk(KERN_ERR
+ "local rate query failed\n");
+
+ break;
+ }
+
+ rate_min = min(rate_min, sample_rate);
+ rate_max = max(rate_max, sample_rate);
+
+ switch (sample_rate) {
+ case 5512:
+ rates |= SNDRV_PCM_RATE_5512;
+ break;
+ case 8000:
+ rates |= SNDRV_PCM_RATE_8000;
+ break;
+ case 11025:
+ rates |= SNDRV_PCM_RATE_11025;
+ break;
+ case 16000:
+ rates |= SNDRV_PCM_RATE_16000;
+ break;
+ case 22050:
+ rates |= SNDRV_PCM_RATE_22050;
+ break;
+ case 32000:
+ rates |= SNDRV_PCM_RATE_32000;
+ break;
+ case 44100:
+ rates |= SNDRV_PCM_RATE_44100;
+ break;
+ case 48000:
+ rates |= SNDRV_PCM_RATE_48000;
+ break;
+ case 64000:
+ rates |= SNDRV_PCM_RATE_64000;
+ break;
+ case 88200:
+ rates |= SNDRV_PCM_RATE_88200;
+ break;
+ case 96000:
+ rates |= SNDRV_PCM_RATE_96000;
+ break;
+ case 176400:
+ rates |= SNDRV_PCM_RATE_176400;
+ break;
+ case 192000:
+ rates |= SNDRV_PCM_RATE_192000;
+ break;
+ default: /* some other rate */
+ rates |= SNDRV_PCM_RATE_KNOT;
+ }
+ }
+ }
+
+ /* printk(KERN_INFO "Supported rates %X %d %d\n",
+ rates, rate_min, rate_max); */
+ pcmhw->rates = rates;
+ pcmhw->rate_min = rate_min;
+ pcmhw->rate_max = rate_max;
+}
+
+static int snd_card_asihpi_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ struct snd_card_asihpi *card = snd_pcm_substream_chip(substream);
+ int err;
+ u16 format;
+ unsigned int bytes_per_sec;
+
+ print_hwparams(params);
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (err < 0)
+ return err;
+ err = snd_card_asihpi_format_alsa2hpi(params_format(params), &format);
+ if (err)
+ return err;
+
+ VPRINTK1(KERN_INFO "format %d, %d chans, %d_hz\n",
+ format, params_channels(params),
+ params_rate(params));
+
+ hpi_handle_error(hpi_format_create(&dpcm->format,
+ params_channels(params),
+ format, params_rate(params), 0, 0));
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (hpi_instream_reset(ss, dpcm->h_stream) != 0)
+ return -EINVAL;
+
+ if (hpi_instream_set_format(ss,
+ dpcm->h_stream, &dpcm->format) != 0)
+ return -EINVAL;
+ }
+
+ dpcm->hpi_buffer_attached = 0;
+ if (card->support_mmap) {
+
+ err = hpi_stream_host_buffer_attach(ss, dpcm->h_stream,
+ params_buffer_bytes(params), runtime->dma_addr);
+ if (err == 0) {
+ snd_printd(KERN_INFO
+ "stream_host_buffer_attach succeeded %u %lu\n",
+ params_buffer_bytes(params),
+ (unsigned long)runtime->dma_addr);
+ } else {
+ snd_printd(KERN_INFO
+ "stream_host_buffer_attach error %d\n",
+ err);
+ return -ENOMEM;
+ }
+
+ err = hpi_stream_get_info_ex(ss, dpcm->h_stream, NULL,
+ &dpcm->hpi_buffer_attached,
+ NULL, NULL, NULL);
+
+ snd_printd(KERN_INFO "stream_host_buffer_attach status 0x%x\n",
+ dpcm->hpi_buffer_attached);
+ }
+ bytes_per_sec = params_rate(params) * params_channels(params);
+ bytes_per_sec *= snd_pcm_format_width(params_format(params));
+ bytes_per_sec /= 8;
+ if (bytes_per_sec <= 0)
+ return -EINVAL;
+
+ dpcm->bytes_per_sec = bytes_per_sec;
+ dpcm->pcm_size = params_buffer_bytes(params);
+ dpcm->pcm_count = params_period_bytes(params);
+ snd_printd(KERN_INFO "pcm_size=%d, pcm_count=%d, bps=%d\n",
+ dpcm->pcm_size, dpcm->pcm_count, bytes_per_sec);
+
+ dpcm->pcm_irq_pos = 0;
+ dpcm->pcm_buf_pos = 0;
+ return 0;
+}
+
+static void snd_card_asihpi_pcm_timer_start(struct snd_pcm_substream *
+ substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ int expiry;
+
+ expiry = (dpcm->pcm_count * HZ / dpcm->bytes_per_sec);
+ /* wait longer the first time, for samples to propagate */
+ expiry = max(expiry, 20);
+ dpcm->timer.expires = jiffies + expiry;
+ dpcm->respawn_timer = 1;
+ add_timer(&dpcm->timer);
+}
+
+static void snd_card_asihpi_pcm_timer_stop(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+
+ dpcm->respawn_timer = 0;
+ del_timer(&dpcm->timer);
+}
+
+static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ struct snd_card_asihpi_pcm *dpcm = substream->runtime->private_data;
+ struct snd_card_asihpi *card = snd_pcm_substream_chip(substream);
+ struct snd_pcm_substream *s;
+ u16 e;
+
+ snd_printd("trigger %dstream %d\n",
+ substream->stream, substream->number);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ snd_pcm_group_for_each_entry(s, substream) {
+ struct snd_card_asihpi_pcm *ds;
+ ds = s->runtime->private_data;
+
+ if (snd_pcm_substream_chip(s) != card)
+ continue;
+
+ if ((s->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
+ (card->support_mmap)) {
+ /* How do I know how much valid data is present
+ * in buffer? Just guessing 2 periods, but if
+ * buffer is bigger it may contain even more
+ * data??
+ */
+ unsigned int preload = ds->pcm_count * 2;
+ VPRINTK2("preload %d\n", preload);
+ hpi_handle_error(hpi_outstream_write_buf(
+ ss, ds->h_stream,
+ &s->runtime->dma_area[0],
+ preload,
+ &ds->format));
+ }
+
+ if (card->support_grouping) {
+ VPRINTK1("\t_group %dstream %d\n", s->stream,
+ s->number);
+ e = hpi_stream_group_add(ss,
+ dpcm->h_stream,
+ ds->h_stream);
+ if (!e) {
+ snd_pcm_trigger_done(s, substream);
+ } else {
+ hpi_handle_error(e);
+ break;
+ }
+ } else
+ break;
+ }
+ snd_printd("start\n");
+ /* start the master stream */
+ snd_card_asihpi_pcm_timer_start(substream);
+ hpi_handle_error(hpi_stream_start(ss, dpcm->h_stream));
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ snd_card_asihpi_pcm_timer_stop(substream);
+ snd_pcm_group_for_each_entry(s, substream) {
+ if (snd_pcm_substream_chip(s) != card)
+ continue;
+
+ /*? workaround linked streams don't
+ transition to SETUP 20070706*/
+ s->runtime->status->state = SNDRV_PCM_STATE_SETUP;
+
+ if (card->support_grouping) {
+ VPRINTK1("\t_group %dstream %d\n", s->stream,
+ s->number);
+ snd_pcm_trigger_done(s, substream);
+ } else
+ break;
+ }
+ snd_printd("stop\n");
+
+ /* _prepare and _hwparams reset the stream */
+ hpi_handle_error(hpi_stream_stop(ss, dpcm->h_stream));
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ hpi_handle_error(
+ hpi_outstream_reset(ss, dpcm->h_stream));
+
+ if (card->support_grouping)
+ hpi_handle_error(hpi_stream_group_reset(ss,
+ dpcm->h_stream));
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_printd("pause release\n");
+ hpi_handle_error(hpi_stream_start(ss, dpcm->h_stream));
+ snd_card_asihpi_pcm_timer_start(substream);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ snd_printd("pause\n");
+ snd_card_asihpi_pcm_timer_stop(substream);
+ hpi_handle_error(hpi_stream_stop(ss, dpcm->h_stream));
+ break;
+ default:
+ snd_printd("\tINVALID\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+snd_card_asihpi_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ if (dpcm->hpi_buffer_attached)
+ hpi_stream_host_buffer_detach(ss, dpcm->h_stream);
+
+ snd_pcm_lib_free_pages(substream);
+ return 0;
+}
+
+static void snd_card_asihpi_runtime_free(struct snd_pcm_runtime *runtime)
+{
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ kfree(dpcm);
+}
+
+/*algorithm outline
+ Without linking degenerates to getting single stream pos etc
+ Without mmap 2nd loop degenerates to snd_pcm_period_elapsed
+*/
+/*
+buf_pos=get_buf_pos(s);
+for_each_linked_stream(s) {
+ buf_pos=get_buf_pos(s);
+ min_buf_pos = modulo_min(min_buf_pos, buf_pos, pcm_size)
+ new_data = min(new_data, calc_new_data(buf_pos,irq_pos)
+}
+timer.expires = jiffies + predict_next_period_ready(min_buf_pos);
+for_each_linked_stream(s) {
+ s->buf_pos = min_buf_pos;
+ if (new_data > pcm_count) {
+ if (mmap) {
+ irq_pos = (irq_pos + pcm_count) % pcm_size;
+ if (playback) {
+ write(pcm_count);
+ } else {
+ read(pcm_count);
+ }
+ }
+ snd_pcm_period_elapsed(s);
+ }
+}
+*/
+
+/** Minimum of 2 modulo values. Works correctly when the difference between
+* the values is less than half the modulus
+*/
+static inline unsigned int modulo_min(unsigned int a, unsigned int b,
+ unsigned long int modulus)
+{
+ unsigned int result;
+ if (((a-b) % modulus) < (modulus/2))
+ result = b;
+ else
+ result = a;
+
+ return result;
+}
+
+/** Timer function, equivalent to interrupt service routine for cards
+*/
+static void snd_card_asihpi_timer_function(unsigned long data)
+{
+ struct snd_card_asihpi_pcm *dpcm = (struct snd_card_asihpi_pcm *)data;
+ struct snd_card_asihpi *card = snd_pcm_substream_chip(dpcm->substream);
+ struct snd_pcm_runtime *runtime;
+ struct snd_pcm_substream *s;
+ unsigned int newdata = 0;
+ unsigned int buf_pos, min_buf_pos = 0;
+ unsigned int remdata, xfercount, next_jiffies;
+ int first = 1;
+ u16 state;
+ u32 buffer_size, data_avail, samples_played, aux;
+
+ /* find minimum newdata and buffer pos in group */
+ snd_pcm_group_for_each_entry(s, dpcm->substream) {
+ struct snd_card_asihpi_pcm *ds = s->runtime->private_data;
+ runtime = s->runtime;
+
+ if (snd_pcm_substream_chip(s) != card)
+ continue;
+
+ hpi_handle_error(hpi_stream_get_info_ex(ss,
+ ds->h_stream, &state,
+ &buffer_size, &data_avail,
+ &samples_played, &aux));
+
+ /* number of bytes in on-card buffer */
+ runtime->delay = aux;
+
+ if (state == HPI_STATE_DRAINED) {
+ snd_printd(KERN_WARNING "outstream %d drained\n",
+ s->number);
+ snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
+ return;
+ }
+
+ if (s->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ buf_pos = frames_to_bytes(runtime, samples_played);
+ } else {
+ buf_pos = data_avail + ds->pcm_irq_pos;
+ }
+
+ if (first) {
+ /* can't statically init min when wrap is involved */
+ min_buf_pos = buf_pos;
+ newdata = (buf_pos - ds->pcm_irq_pos) % ds->pcm_size;
+ first = 0;
+ } else {
+ min_buf_pos =
+ modulo_min(min_buf_pos, buf_pos, UINT_MAX+1L);
+ newdata = min(
+ (buf_pos - ds->pcm_irq_pos) % ds->pcm_size,
+ newdata);
+ }
+
+ VPRINTK1("PB timer hw_ptr x%04lX, appl_ptr x%04lX\n",
+ (unsigned long)frames_to_bytes(runtime,
+ runtime->status->hw_ptr),
+ (unsigned long)frames_to_bytes(runtime,
+ runtime->control->appl_ptr));
+ VPRINTK1("%d S=%d, irq=%04X, pos=x%04X, left=x%04X,"
+ " aux=x%04X space=x%04X\n", s->number,
+ state, ds->pcm_irq_pos, buf_pos, (int)data_avail,
+ (int)aux, buffer_size-data_avail);
+ }
+
+ remdata = newdata % dpcm->pcm_count;
+ xfercount = newdata - remdata; /* a multiple of pcm_count */
+ next_jiffies = ((dpcm->pcm_count-remdata) * HZ / dpcm->bytes_per_sec)+1;
+ next_jiffies = max(next_jiffies, 2U * HZ / 1000U);
+ dpcm->timer.expires = jiffies + next_jiffies;
+ VPRINTK1("jif %d buf pos x%04X newdata x%04X xc x%04X\n",
+ next_jiffies, min_buf_pos, newdata, xfercount);
+
+ snd_pcm_group_for_each_entry(s, dpcm->substream) {
+ struct snd_card_asihpi_pcm *ds = s->runtime->private_data;
+ ds->pcm_buf_pos = min_buf_pos;
+
+ if (xfercount) {
+ if (card->support_mmap) {
+ ds->pcm_irq_pos = ds->pcm_irq_pos + xfercount;
+ if (s->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ VPRINTK2("write OS%d x%04x\n",
+ s->number,
+ ds->pcm_count);
+ hpi_handle_error(
+ hpi_outstream_write_buf(
+ ss, ds->h_stream,
+ &s->runtime->
+ dma_area[0],
+ xfercount,
+ &ds->format));
+ } else {
+ VPRINTK2("read IS%d x%04x\n",
+ s->number,
+ dpcm->pcm_count);
+ hpi_handle_error(
+ hpi_instream_read_buf(
+ ss, ds->h_stream,
+ NULL, xfercount));
+ }
+ } /* else R/W will be handled by read/write callbacks */
+ snd_pcm_period_elapsed(s);
+ }
+ }
+
+ if (dpcm->respawn_timer)
+ add_timer(&dpcm->timer);
+}
+
+/***************************** PLAYBACK OPS ****************/
+static int snd_card_asihpi_playback_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ /* snd_printd(KERN_INFO "Playback ioctl %d\n", cmd); */
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+static int snd_card_asihpi_playback_prepare(struct snd_pcm_substream *
+ substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+
+ snd_printd(KERN_INFO "playback prepare %d\n", substream->number);
+
+ hpi_handle_error(hpi_outstream_reset(ss, dpcm->h_stream));
+ dpcm->pcm_irq_pos = 0;
+ dpcm->pcm_buf_pos = 0;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t
+snd_card_asihpi_playback_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ snd_pcm_uframes_t ptr;
+
+ u32 samples_played;
+ u16 err;
+
+ if (!snd_pcm_stream_linked(substream)) {
+ /* NOTE, can use samples played for playback position here and
+ * in timer fn because it LAGS the actual read pointer, and is a
+ * better representation of actual playout position
+ */
+ err = hpi_outstream_get_info_ex(ss, dpcm->h_stream, NULL,
+ NULL, NULL,
+ &samples_played, NULL);
+ hpi_handle_error(err);
+
+ dpcm->pcm_buf_pos = frames_to_bytes(runtime, samples_played);
+ }
+ /* else must return most conservative value found in timer func
+ * by looping over all streams
+ */
+
+ ptr = bytes_to_frames(runtime, dpcm->pcm_buf_pos % dpcm->pcm_size);
+ VPRINTK2("playback_pointer=%04ld\n", (unsigned long)ptr);
+ return ptr;
+}
+
+static void snd_card_asihpi_playback_format(struct snd_card_asihpi *asihpi,
+ u32 h_stream,
+ struct snd_pcm_hardware *pcmhw)
+{
+ struct hpi_format hpi_format;
+ u16 format;
+ u16 err;
+ u32 h_control;
+ u32 sample_rate = 48000;
+
+ /* on cards without SRC, must query at valid rate,
+ * maybe set by external sync
+ */
+ err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
+ HPI_CONTROL_SAMPLECLOCK, &h_control);
+
+ if (!err)
+ err = hpi_sample_clock_get_sample_rate(ss, h_control,
+ &sample_rate);
+
+ for (format = HPI_FORMAT_PCM8_UNSIGNED;
+ format <= HPI_FORMAT_PCM24_SIGNED; format++) {
+ err = hpi_format_create(&hpi_format,
+ 2, format, sample_rate, 128000, 0);
+ if (!err)
+ err = hpi_outstream_query_format(ss, h_stream,
+ &hpi_format);
+ if (!err && (hpi_to_alsa_formats[format] != -1))
+ pcmhw->formats |=
+ (1ULL << hpi_to_alsa_formats[format]);
+ }
+}
+
+static struct snd_pcm_hardware snd_card_asihpi_playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = BUFFER_BYTES_MAX,
+ .period_bytes_min = PERIOD_BYTES_MIN,
+ .period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN,
+ .periods_min = PERIODS_MIN,
+ .periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
+ .fifo_size = 0,
+};
+
+static int snd_card_asihpi_playback_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm;
+ struct snd_card_asihpi *card = snd_pcm_substream_chip(substream);
+ int err;
+
+ dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
+ if (dpcm == NULL)
+ return -ENOMEM;
+
+ err =
+ hpi_outstream_open(ss, card->adapter_index,
+ substream->number, &dpcm->h_stream);
+ hpi_handle_error(err);
+ if (err)
+ kfree(dpcm);
+ if (err == HPI_ERROR_OBJ_ALREADY_OPEN)
+ return -EBUSY;
+ if (err)
+ return -EIO;
+
+ /*? also check ASI5000 samplerate source
+ If external, only support external rate.
+ If internal and other stream playing, cant switch
+ */
+
+ init_timer(&dpcm->timer);
+ dpcm->timer.data = (unsigned long) dpcm;
+ dpcm->timer.function = snd_card_asihpi_timer_function;
+ dpcm->substream = substream;
+ runtime->private_data = dpcm;
+ runtime->private_free = snd_card_asihpi_runtime_free;
+
+ snd_card_asihpi_playback.channels_max = card->out_max_chans;
+ /*?snd_card_asihpi_playback.period_bytes_min =
+ card->out_max_chans * 4096; */
+
+ snd_card_asihpi_playback_format(card, dpcm->h_stream,
+ &snd_card_asihpi_playback);
+
+ snd_card_asihpi_pcm_samplerates(card, &snd_card_asihpi_playback);
+
+ snd_card_asihpi_playback.info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_DOUBLE |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_PAUSE;
+
+ if (card->support_mmap)
+ snd_card_asihpi_playback.info |= SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID;
+
+ if (card->support_grouping)
+ snd_card_asihpi_playback.info |= SNDRV_PCM_INFO_SYNC_START;
+
+ /* struct is copied, so can create initializer dynamically */
+ runtime->hw = snd_card_asihpi_playback;
+
+ if (card->support_mmap)
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES);
+ if (err < 0)
+ return err;
+
+ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ card->update_interval_frames);
+ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ card->update_interval_frames * 4, UINT_MAX);
+
+ snd_pcm_set_sync(substream);
+
+ snd_printd(KERN_INFO "playback open\n");
+
+ return 0;
+}
+
+static int snd_card_asihpi_playback_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+
+ hpi_handle_error(hpi_outstream_close(ss, dpcm->h_stream));
+ snd_printd(KERN_INFO "playback close\n");
+
+ return 0;
+}
+
+static int snd_card_asihpi_playback_copy(struct snd_pcm_substream *substream,
+ int channel,
+ snd_pcm_uframes_t pos,
+ void __user *src,
+ snd_pcm_uframes_t count)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ unsigned int len;
+
+ len = frames_to_bytes(runtime, count);
+
+ if (copy_from_user(runtime->dma_area, src, len))
+ return -EFAULT;
+
+ VPRINTK2(KERN_DEBUG "playback copy%d %u bytes\n",
+ substream->number, len);
+
+ hpi_handle_error(hpi_outstream_write_buf(ss, dpcm->h_stream,
+ runtime->dma_area, len, &dpcm->format));
+
+ return 0;
+}
+
+static int snd_card_asihpi_playback_silence(struct snd_pcm_substream *
+ substream, int channel,
+ snd_pcm_uframes_t pos,
+ snd_pcm_uframes_t count)
+{
+ unsigned int len;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+
+ len = frames_to_bytes(runtime, count);
+ snd_printd(KERN_INFO "playback silence %u bytes\n", len);
+
+ memset(runtime->dma_area, 0, len);
+ hpi_handle_error(hpi_outstream_write_buf(ss, dpcm->h_stream,
+ runtime->dma_area, len, &dpcm->format));
+ return 0;
+}
+
+static struct snd_pcm_ops snd_card_asihpi_playback_ops = {
+ .open = snd_card_asihpi_playback_open,
+ .close = snd_card_asihpi_playback_close,
+ .ioctl = snd_card_asihpi_playback_ioctl,
+ .hw_params = snd_card_asihpi_pcm_hw_params,
+ .hw_free = snd_card_asihpi_hw_free,
+ .prepare = snd_card_asihpi_playback_prepare,
+ .trigger = snd_card_asihpi_trigger,
+ .pointer = snd_card_asihpi_playback_pointer,
+ .copy = snd_card_asihpi_playback_copy,
+ .silence = snd_card_asihpi_playback_silence,
+};
+
+static struct snd_pcm_ops snd_card_asihpi_playback_mmap_ops = {
+ .open = snd_card_asihpi_playback_open,
+ .close = snd_card_asihpi_playback_close,
+ .ioctl = snd_card_asihpi_playback_ioctl,
+ .hw_params = snd_card_asihpi_pcm_hw_params,
+ .hw_free = snd_card_asihpi_hw_free,
+ .prepare = snd_card_asihpi_playback_prepare,
+ .trigger = snd_card_asihpi_trigger,
+ .pointer = snd_card_asihpi_playback_pointer,
+};
+
+/***************************** CAPTURE OPS ****************/
+static snd_pcm_uframes_t
+snd_card_asihpi_capture_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+
+ VPRINTK2("capture pointer %d=%d\n",
+ substream->number, dpcm->pcm_buf_pos);
+ /* NOTE Unlike playback can't use actual dwSamplesPlayed
+ for the capture position, because those samples aren't yet in
+ the local buffer available for reading.
+ */
+ return bytes_to_frames(runtime, dpcm->pcm_buf_pos % dpcm->pcm_size);
+}
+
+static int snd_card_asihpi_capture_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+static int snd_card_asihpi_capture_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+
+ hpi_handle_error(hpi_instream_reset(ss, dpcm->h_stream));
+ dpcm->pcm_irq_pos = 0;
+ dpcm->pcm_buf_pos = 0;
+
+ snd_printd("capture prepare %d\n", substream->number);
+ return 0;
+}
+
+
+
+static void snd_card_asihpi_capture_format(struct snd_card_asihpi *asihpi,
+ u32 h_stream,
+ struct snd_pcm_hardware *pcmhw)
+{
+ struct hpi_format hpi_format;
+ u16 format;
+ u16 err;
+ u32 h_control;
+ u32 sample_rate = 48000;
+
+ /* on cards without SRC, must query at valid rate,
+ maybe set by external sync */
+ err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
+ HPI_CONTROL_SAMPLECLOCK, &h_control);
+
+ if (!err)
+ err = hpi_sample_clock_get_sample_rate(ss, h_control,
+ &sample_rate);
+
+ for (format = HPI_FORMAT_PCM8_UNSIGNED;
+ format <= HPI_FORMAT_PCM24_SIGNED; format++) {
+
+ err = hpi_format_create(&hpi_format, 2, format,
+ sample_rate, 128000, 0);
+ if (!err)
+ err = hpi_instream_query_format(ss, h_stream,
+ &hpi_format);
+ if (!err)
+ pcmhw->formats |=
+ (1ULL << hpi_to_alsa_formats[format]);
+ }
+}
+
+
+static struct snd_pcm_hardware snd_card_asihpi_capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = BUFFER_BYTES_MAX,
+ .period_bytes_min = PERIOD_BYTES_MIN,
+ .period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN,
+ .periods_min = PERIODS_MIN,
+ .periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
+ .fifo_size = 0,
+};
+
+static int snd_card_asihpi_capture_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi *card = snd_pcm_substream_chip(substream);
+ struct snd_card_asihpi_pcm *dpcm;
+ int err;
+
+ dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
+ if (dpcm == NULL)
+ return -ENOMEM;
+
+ snd_printd("hpi_instream_open adapter %d stream %d\n",
+ card->adapter_index, substream->number);
+
+ err = hpi_handle_error(
+ hpi_instream_open(ss, card->adapter_index,
+ substream->number, &dpcm->h_stream));
+ if (err)
+ kfree(dpcm);
+ if (err == HPI_ERROR_OBJ_ALREADY_OPEN)
+ return -EBUSY;
+ if (err)
+ return -EIO;
+
+
+ init_timer(&dpcm->timer);
+ dpcm->timer.data = (unsigned long) dpcm;
+ dpcm->timer.function = snd_card_asihpi_timer_function;
+ dpcm->substream = substream;
+ runtime->private_data = dpcm;
+ runtime->private_free = snd_card_asihpi_runtime_free;
+
+ snd_card_asihpi_capture.channels_max = card->in_max_chans;
+ snd_card_asihpi_capture_format(card, dpcm->h_stream,
+ &snd_card_asihpi_capture);
+ snd_card_asihpi_pcm_samplerates(card, &snd_card_asihpi_capture);
+ snd_card_asihpi_capture.info = SNDRV_PCM_INFO_INTERLEAVED;
+
+ if (card->support_mmap)
+ snd_card_asihpi_capture.info |= SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID;
+
+ runtime->hw = snd_card_asihpi_capture;
+
+ if (card->support_mmap)
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES);
+ if (err < 0)
+ return err;
+
+ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ card->update_interval_frames);
+ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ card->update_interval_frames * 2, UINT_MAX);
+
+ snd_pcm_set_sync(substream);
+
+ return 0;
+}
+
+static int snd_card_asihpi_capture_close(struct snd_pcm_substream *substream)
+{
+ struct snd_card_asihpi_pcm *dpcm = substream->runtime->private_data;
+
+ hpi_handle_error(hpi_instream_close(ss, dpcm->h_stream));
+ return 0;
+}
+
+static int snd_card_asihpi_capture_copy(struct snd_pcm_substream *substream,
+ int channel, snd_pcm_uframes_t pos,
+ void __user *dst, snd_pcm_uframes_t count)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ u32 data_size;
+
+ data_size = frames_to_bytes(runtime, count);
+
+ VPRINTK2("capture copy%d %d bytes\n", substream->number, data_size);
+ hpi_handle_error(hpi_instream_read_buf(ss, dpcm->h_stream,
+ runtime->dma_area, data_size));
+
+ /* Used by capture_pointer */
+ dpcm->pcm_irq_pos = dpcm->pcm_irq_pos + data_size;
+
+ if (copy_to_user(dst, runtime->dma_area, data_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct snd_pcm_ops snd_card_asihpi_capture_mmap_ops = {
+ .open = snd_card_asihpi_capture_open,
+ .close = snd_card_asihpi_capture_close,
+ .ioctl = snd_card_asihpi_capture_ioctl,
+ .hw_params = snd_card_asihpi_pcm_hw_params,
+ .hw_free = snd_card_asihpi_hw_free,
+ .prepare = snd_card_asihpi_capture_prepare,
+ .trigger = snd_card_asihpi_trigger,
+ .pointer = snd_card_asihpi_capture_pointer,
+};
+
+static struct snd_pcm_ops snd_card_asihpi_capture_ops = {
+ .open = snd_card_asihpi_capture_open,
+ .close = snd_card_asihpi_capture_close,
+ .ioctl = snd_card_asihpi_capture_ioctl,
+ .hw_params = snd_card_asihpi_pcm_hw_params,
+ .hw_free = snd_card_asihpi_hw_free,
+ .prepare = snd_card_asihpi_capture_prepare,
+ .trigger = snd_card_asihpi_trigger,
+ .pointer = snd_card_asihpi_capture_pointer,
+ .copy = snd_card_asihpi_capture_copy
+};
+
+static int __devinit snd_card_asihpi_pcm_new(struct snd_card_asihpi *asihpi,
+ int device, int substreams)
+{
+ struct snd_pcm *pcm;
+ int err;
+
+ err = snd_pcm_new(asihpi->card, "asihpi PCM", device,
+ asihpi->num_outstreams, asihpi->num_instreams,
+ &pcm);
+ if (err < 0)
+ return err;
+ /* pointer to ops struct is stored, dont change ops afterwards! */
+ if (asihpi->support_mmap) {
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_card_asihpi_playback_mmap_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &snd_card_asihpi_capture_mmap_ops);
+ } else {
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_card_asihpi_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &snd_card_asihpi_capture_ops);
+ }
+
+ pcm->private_data = asihpi;
+ pcm->info_flags = 0;
+ strcpy(pcm->name, "asihpi PCM");
+
+ /*? do we want to emulate MMAP for non-BBM cards?
+ Jack doesn't work with ALSAs MMAP emulation - WHY NOT? */
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(asihpi->pci),
+ 64*1024, BUFFER_BYTES_MAX);
+
+ return 0;
+}
+
+/***************************** MIXER CONTROLS ****************/
+struct hpi_control {
+ u32 h_control;
+ u16 control_type;
+ u16 src_node_type;
+ u16 src_node_index;
+ u16 dst_node_type;
+ u16 dst_node_index;
+ u16 band;
+ char name[44]; /* copied to snd_ctl_elem_id.name[44]; */
+};
+
+static char *asihpi_tuner_band_names[] =
+{
+ "invalid",
+ "AM",
+ "FM mono",
+ "TV NTSC-M",
+ "FM stereo",
+ "AUX",
+ "TV PAL BG",
+ "TV PAL I",
+ "TV PAL DK",
+ "TV SECAM",
+};
+
+compile_time_assert(
+ (ARRAY_SIZE(asihpi_tuner_band_names) ==
+ (HPI_TUNER_BAND_LAST+1)),
+ assert_tuner_band_names_size);
+
+#if ASI_STYLE_NAMES
+static char *asihpi_src_names[] =
+{
+ "no source",
+ "outstream",
+ "line_in",
+ "aes_in",
+ "tuner",
+ "RF",
+ "clock",
+ "bitstr",
+ "mic",
+ "cobranet",
+ "analog_in",
+ "adapter",
+};
+#else
+static char *asihpi_src_names[] =
+{
+ "no source",
+ "PCM playback",
+ "line in",
+ "digital in",
+ "tuner",
+ "RF",
+ "clock",
+ "bitstream",
+ "mic",
+ "cobranet in",
+ "analog in",
+ "adapter",
+};
+#endif
+
+compile_time_assert(
+ (ARRAY_SIZE(asihpi_src_names) ==
+ (HPI_SOURCENODE_LAST_INDEX-HPI_SOURCENODE_BASE+1)),
+ assert_src_names_size);
+
+#if ASI_STYLE_NAMES
+static char *asihpi_dst_names[] =
+{
+ "no destination",
+ "instream",
+ "line_out",
+ "aes_out",
+ "RF",
+ "speaker" ,
+ "cobranet",
+ "analog_out",
+};
+#else
+static char *asihpi_dst_names[] =
+{
+ "no destination",
+ "PCM capture",
+ "line out",
+ "digital out",
+ "RF",
+ "speaker",
+ "cobranet out",
+ "analog out"
+};
+#endif
+
+compile_time_assert(
+ (ARRAY_SIZE(asihpi_dst_names) ==
+ (HPI_DESTNODE_LAST_INDEX-HPI_DESTNODE_BASE+1)),
+ assert_dst_names_size);
+
+static inline int ctl_add(struct snd_card *card, struct snd_kcontrol_new *ctl,
+ struct snd_card_asihpi *asihpi)
+{
+ int err;
+
+ err = snd_ctl_add(card, snd_ctl_new1(ctl, asihpi));
+ if (err < 0)
+ return err;
+ else if (mixer_dump)
+ snd_printk(KERN_INFO "added %s(%d)\n", ctl->name, ctl->index);
+
+ return 0;
+}
+
+/* Convert HPI control name and location into ALSA control name */
+static void asihpi_ctl_init(struct snd_kcontrol_new *snd_control,
+ struct hpi_control *hpi_ctl,
+ char *name)
+{
+ memset(snd_control, 0, sizeof(*snd_control));
+ snd_control->name = hpi_ctl->name;
+ snd_control->private_value = hpi_ctl->h_control;
+ snd_control->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ snd_control->index = 0;
+
+ if (hpi_ctl->src_node_type && hpi_ctl->dst_node_type)
+ sprintf(hpi_ctl->name, "%s%d to %s%d %s",
+ asihpi_src_names[hpi_ctl->src_node_type],
+ hpi_ctl->src_node_index,
+ asihpi_dst_names[hpi_ctl->dst_node_type],
+ hpi_ctl->dst_node_index,
+ name);
+ else if (hpi_ctl->dst_node_type) {
+ sprintf(hpi_ctl->name, "%s%d %s",
+ asihpi_dst_names[hpi_ctl->dst_node_type],
+ hpi_ctl->dst_node_index,
+ name);
+ } else {
+ sprintf(hpi_ctl->name, "%s%d %s",
+ asihpi_src_names[hpi_ctl->src_node_type],
+ hpi_ctl->src_node_index,
+ name);
+ }
+}
+
+/*------------------------------------------------------------
+ Volume controls
+ ------------------------------------------------------------*/
+#define VOL_STEP_mB 1
+static int snd_asihpi_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ u32 h_control = kcontrol->private_value;
+ u16 err;
+ /* native gains are in millibels */
+ short min_gain_mB;
+ short max_gain_mB;
+ short step_gain_mB;
+
+ err = hpi_volume_query_range(ss, h_control,
+ &min_gain_mB, &max_gain_mB, &step_gain_mB);
+ if (err) {
+ max_gain_mB = 0;
+ min_gain_mB = -10000;
+ step_gain_mB = VOL_STEP_mB;
+ }
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 2;
+ uinfo->value.integer.min = min_gain_mB / VOL_STEP_mB;
+ uinfo->value.integer.max = max_gain_mB / VOL_STEP_mB;
+ uinfo->value.integer.step = step_gain_mB / VOL_STEP_mB;
+ return 0;
+}
+
+static int snd_asihpi_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ short an_gain_mB[HPI_MAX_CHANNELS];
+
+ hpi_handle_error(hpi_volume_get_gain(ss, h_control, an_gain_mB));
+ ucontrol->value.integer.value[0] = an_gain_mB[0] / VOL_STEP_mB;
+ ucontrol->value.integer.value[1] = an_gain_mB[1] / VOL_STEP_mB;
+
+ return 0;
+}
+
+static int snd_asihpi_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int change;
+ u32 h_control = kcontrol->private_value;
+ short an_gain_mB[HPI_MAX_CHANNELS];
+
+ an_gain_mB[0] =
+ (ucontrol->value.integer.value[0]) * VOL_STEP_mB;
+ an_gain_mB[1] =
+ (ucontrol->value.integer.value[1]) * VOL_STEP_mB;
+ /* change = asihpi->mixer_volume[addr][0] != left ||
+ asihpi->mixer_volume[addr][1] != right;
+ */
+ change = 1;
+ hpi_handle_error(hpi_volume_set_gain(ss, h_control, an_gain_mB));
+ return change;
+}
+
+static const DECLARE_TLV_DB_SCALE(db_scale_100, -10000, VOL_STEP_mB, 0);
+
+static int __devinit snd_asihpi_volume_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "volume");
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+ snd_control.info = snd_asihpi_volume_info;
+ snd_control.get = snd_asihpi_volume_get;
+ snd_control.put = snd_asihpi_volume_put;
+ snd_control.tlv.p = db_scale_100;
+
+ return ctl_add(card, &snd_control, asihpi);
+}
+
+/*------------------------------------------------------------
+ Level controls
+ ------------------------------------------------------------*/
+static int snd_asihpi_level_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ u32 h_control = kcontrol->private_value;
+ u16 err;
+ short min_gain_mB;
+ short max_gain_mB;
+ short step_gain_mB;
+
+ err =
+ hpi_level_query_range(ss, h_control, &min_gain_mB,
+ &max_gain_mB, &step_gain_mB);
+ if (err) {
+ max_gain_mB = 2400;
+ min_gain_mB = -1000;
+ step_gain_mB = 100;
+ }
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 2;
+ uinfo->value.integer.min = min_gain_mB / HPI_UNITS_PER_dB;
+ uinfo->value.integer.max = max_gain_mB / HPI_UNITS_PER_dB;
+ uinfo->value.integer.step = step_gain_mB / HPI_UNITS_PER_dB;
+ return 0;
+}
+
+static int snd_asihpi_level_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ short an_gain_mB[HPI_MAX_CHANNELS];
+
+ hpi_handle_error(hpi_level_get_gain(ss, h_control, an_gain_mB));
+ ucontrol->value.integer.value[0] =
+ an_gain_mB[0] / HPI_UNITS_PER_dB;
+ ucontrol->value.integer.value[1] =
+ an_gain_mB[1] / HPI_UNITS_PER_dB;
+
+ return 0;
+}
+
+static int snd_asihpi_level_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int change;
+ u32 h_control = kcontrol->private_value;
+ short an_gain_mB[HPI_MAX_CHANNELS];
+
+ an_gain_mB[0] =
+ (ucontrol->value.integer.value[0]) * HPI_UNITS_PER_dB;
+ an_gain_mB[1] =
+ (ucontrol->value.integer.value[1]) * HPI_UNITS_PER_dB;
+ /* change = asihpi->mixer_level[addr][0] != left ||
+ asihpi->mixer_level[addr][1] != right;
+ */
+ change = 1;
+ hpi_handle_error(hpi_level_set_gain(ss, h_control, an_gain_mB));
+ return change;
+}
+
+static const DECLARE_TLV_DB_SCALE(db_scale_level, -1000, 100, 0);
+
+static int __devinit snd_asihpi_level_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+ /* can't use 'volume' cos some nodes have volume as well */
+ asihpi_ctl_init(&snd_control, hpi_ctl, "level");
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+ snd_control.info = snd_asihpi_level_info;
+ snd_control.get = snd_asihpi_level_get;
+ snd_control.put = snd_asihpi_level_put;
+ snd_control.tlv.p = db_scale_level;
+
+ return ctl_add(card, &snd_control, asihpi);
+}
+
+/*------------------------------------------------------------
+ AESEBU controls
+ ------------------------------------------------------------*/
+
+/* AESEBU format */
+static char *asihpi_aesebu_format_names[] =
+{
+ "N/A",
+ "S/PDIF",
+ "AES/EBU",
+};
+
+static int snd_asihpi_aesebu_format_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 3;
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ asihpi_aesebu_format_names[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int snd_asihpi_aesebu_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol,
+ u16 (*func)(const struct hpi_hsubsys *, u32, u16 *))
+{
+ u32 h_control = kcontrol->private_value;
+ u16 source, err;
+
+ err = func(ss, h_control, &source);
+
+ /* default to N/A */
+ ucontrol->value.enumerated.item[0] = 0;
+ /* return success but set the control to N/A */
+ if (err)
+ return 0;
+ if (source == HPI_AESEBU_FORMAT_SPDIF)
+ ucontrol->value.enumerated.item[0] = 1;
+ if (source == HPI_AESEBU_FORMAT_AESEBU)
+ ucontrol->value.enumerated.item[0] = 2;
+
+ return 0;
+}
+
+static int snd_asihpi_aesebu_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol,
+ u16 (*func)(const struct hpi_hsubsys *, u32, u16))
+{
+ u32 h_control = kcontrol->private_value;
+
+ /* default to S/PDIF */
+ u16 source = HPI_AESEBU_FORMAT_SPDIF;
+
+ if (ucontrol->value.enumerated.item[0] == 1)
+ source = HPI_AESEBU_FORMAT_SPDIF;
+ if (ucontrol->value.enumerated.item[0] == 2)
+ source = HPI_AESEBU_FORMAT_AESEBU;
+
+ if (func(ss, h_control, source) != 0)
+ return -EINVAL;
+
+ return 1;
+}
+
+static int snd_asihpi_aesebu_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ return snd_asihpi_aesebu_format_get(kcontrol, ucontrol,
+ HPI_AESEBU__receiver_get_format);
+}
+
+static int snd_asihpi_aesebu_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ return snd_asihpi_aesebu_format_put(kcontrol, ucontrol,
+ HPI_AESEBU__receiver_set_format);
+}
+
+static int snd_asihpi_aesebu_rxstatus_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 0X1F;
+ uinfo->value.integer.step = 1;
+
+ return 0;
+}
+
+static int snd_asihpi_aesebu_rxstatus_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+
+ u32 h_control = kcontrol->private_value;
+ u16 status;
+
+ hpi_handle_error(HPI_AESEBU__receiver_get_error_status(
+ ss, h_control, &status));
+ ucontrol->value.integer.value[0] = status;
+ return 0;
+}
+
+static int __devinit snd_asihpi_aesebu_rx_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "format");
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ snd_control.info = snd_asihpi_aesebu_format_info;
+ snd_control.get = snd_asihpi_aesebu_rx_format_get;
+ snd_control.put = snd_asihpi_aesebu_rx_format_put;
+
+
+ if (ctl_add(card, &snd_control, asihpi) < 0)
+ return -EINVAL;
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "status");
+ snd_control.access =
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE | SNDRV_CTL_ELEM_ACCESS_READ;
+ snd_control.info = snd_asihpi_aesebu_rxstatus_info;
+ snd_control.get = snd_asihpi_aesebu_rxstatus_get;
+
+ return ctl_add(card, &snd_control, asihpi);
+}
+
+static int snd_asihpi_aesebu_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ return snd_asihpi_aesebu_format_get(kcontrol, ucontrol,
+ HPI_AESEBU__transmitter_get_format);
+}
+
+static int snd_asihpi_aesebu_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol) {
+ return snd_asihpi_aesebu_format_put(kcontrol, ucontrol,
+ HPI_AESEBU__transmitter_set_format);
+}
+
+
+static int __devinit snd_asihpi_aesebu_tx_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "format");
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ snd_control.info = snd_asihpi_aesebu_format_info;
+ snd_control.get = snd_asihpi_aesebu_tx_format_get;
+ snd_control.put = snd_asihpi_aesebu_tx_format_put;
+
+ return ctl_add(card, &snd_control, asihpi);
+}
+
+/*------------------------------------------------------------
+ Tuner controls
+ ------------------------------------------------------------*/
+
+/* Gain */
+
+static int snd_asihpi_tuner_gain_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ u32 h_control = kcontrol->private_value;
+ u16 err;
+ short idx;
+ u16 gain_range[3];
+
+ for (idx = 0; idx < 3; idx++) {
+ err = hpi_tuner_query_gain(ss, h_control,
+ idx, &gain_range[idx]);
+ if (err != 0)
+ return err;
+ }
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = ((int)gain_range[0]) / HPI_UNITS_PER_dB;
+ uinfo->value.integer.max = ((int)gain_range[1]) / HPI_UNITS_PER_dB;
+ uinfo->value.integer.step = ((int) gain_range[2]) / HPI_UNITS_PER_dB;
+ return 0;
+}
+
+static int snd_asihpi_tuner_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ /*
+ struct snd_card_asihpi *asihpi = snd_kcontrol_chip(kcontrol);
+ */
+ u32 h_control = kcontrol->private_value;
+ short gain;
+
+ hpi_handle_error(hpi_tuner_get_gain(ss, h_control, &gain));
+ ucontrol->value.integer.value[0] = gain / HPI_UNITS_PER_dB;
+
+ return 0;
+}
+
+static int snd_asihpi_tuner_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ /*
+ struct snd_card_asihpi *asihpi = snd_kcontrol_chip(kcontrol);
+ */
+ u32 h_control = kcontrol->private_value;
+ short gain;
+
+ gain = (ucontrol->value.integer.value[0]) * HPI_UNITS_PER_dB;
+ hpi_handle_error(hpi_tuner_set_gain(ss, h_control, gain));
+
+ return 1;
+}
+
+/* Band */
+
+static int asihpi_tuner_band_query(struct snd_kcontrol *kcontrol,
+ u16 *band_list, u32 len) {
+ u32 h_control = kcontrol->private_value;
+ u16 err = 0;
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ err = hpi_tuner_query_band(ss,
+ h_control, i, &band_list[i]);
+ if (err != 0)
+ break;
+ }
+
+ if (err && (err != HPI_ERROR_INVALID_OBJ_INDEX))
+ return -EIO;
+
+ return i;
+}
+
+static int snd_asihpi_tuner_band_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ u16 tuner_bands[HPI_TUNER_BAND_LAST];
+ int num_bands = 0;
+
+ num_bands = asihpi_tuner_band_query(kcontrol, tuner_bands,
+ HPI_TUNER_BAND_LAST);
+
+ if (num_bands < 0)
+ return num_bands;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = num_bands;
+
+ if (num_bands > 0) {
+ if (uinfo->value.enumerated.item >=
+ uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ asihpi_tuner_band_names[
+ tuner_bands[uinfo->value.enumerated.item]]);
+
+ }
+ return 0;
+}
+
+static int snd_asihpi_tuner_band_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ /*
+ struct snd_card_asihpi *asihpi = snd_kcontrol_chip(kcontrol);
+ */
+ u16 band, idx;
+ u16 tuner_bands[HPI_TUNER_BAND_LAST];
+ u32 num_bands = 0;
+
+ num_bands = asihpi_tuner_band_query(kcontrol, tuner_bands,
+ HPI_TUNER_BAND_LAST);
+
+ hpi_handle_error(hpi_tuner_get_band(ss, h_control, &band));
+
+ ucontrol->value.enumerated.item[0] = -1;
+ for (idx = 0; idx < HPI_TUNER_BAND_LAST; idx++)
+ if (tuner_bands[idx] == band) {
+ ucontrol->value.enumerated.item[0] = idx;
+ break;
+ }
+
+ return 0;
+}
+
+static int snd_asihpi_tuner_band_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ /*
+ struct snd_card_asihpi *asihpi = snd_kcontrol_chip(kcontrol);
+ */
+ u32 h_control = kcontrol->private_value;
+ u16 band;
+ u16 tuner_bands[HPI_TUNER_BAND_LAST];
+ u32 num_bands = 0;
+
+ num_bands = asihpi_tuner_band_query(kcontrol, tuner_bands,
+ HPI_TUNER_BAND_LAST);
+
+ band = tuner_bands[ucontrol->value.enumerated.item[0]];
+ hpi_handle_error(hpi_tuner_set_band(ss, h_control, band));
+
+ return 1;
+}
+
+/* Freq */
+
+static int snd_asihpi_tuner_freq_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ u32 h_control = kcontrol->private_value;
+ u16 err;
+ u16 tuner_bands[HPI_TUNER_BAND_LAST];
+ u16 num_bands = 0, band_iter, idx;
+ u32 freq_range[3], temp_freq_range[3];
+
+ num_bands = asihpi_tuner_band_query(kcontrol, tuner_bands,
+ HPI_TUNER_BAND_LAST);
+
+ freq_range[0] = INT_MAX;
+ freq_range[1] = 0;
+ freq_range[2] = INT_MAX;
+
+ for (band_iter = 0; band_iter < num_bands; band_iter++) {
+ for (idx = 0; idx < 3; idx++) {
+ err = hpi_tuner_query_frequency(ss, h_control,
+ idx, tuner_bands[band_iter],
+ &temp_freq_range[idx]);
+ if (err != 0)
+ return err;
+ }
+
+ /* skip band with bogus stepping */
+ if (temp_freq_range[2] <= 0)
+ continue;
+
+ if (temp_freq_range[0] < freq_range[0])
+ freq_range[0] = temp_freq_range[0];
+ if (temp_freq_range[1] > freq_range[1])
+ freq_range[1] = temp_freq_range[1];
+ if (temp_freq_range[2] < freq_range[2])
+ freq_range[2] = temp_freq_range[2];
+ }
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = ((int)freq_range[0]);
+ uinfo->value.integer.max = ((int)freq_range[1]);
+ uinfo->value.integer.step = ((int)freq_range[2]);
+ return 0;
+}
+
+static int snd_asihpi_tuner_freq_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ u32 freq;
+
+ hpi_handle_error(hpi_tuner_get_frequency(ss, h_control, &freq));
+ ucontrol->value.integer.value[0] = freq;
+
+ return 0;
+}
+
+static int snd_asihpi_tuner_freq_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ u32 freq;
+
+ freq = ucontrol->value.integer.value[0];
+ hpi_handle_error(hpi_tuner_set_frequency(ss, h_control, freq));
+
+ return 1;
+}
+
+/* Tuner control group initializer */
+static int __devinit snd_asihpi_tuner_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+ snd_control.private_value = hpi_ctl->h_control;
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+
+ if (!hpi_tuner_get_gain(ss, hpi_ctl->h_control, NULL)) {
+ asihpi_ctl_init(&snd_control, hpi_ctl, "gain");
+ snd_control.info = snd_asihpi_tuner_gain_info;
+ snd_control.get = snd_asihpi_tuner_gain_get;
+ snd_control.put = snd_asihpi_tuner_gain_put;
+
+ if (ctl_add(card, &snd_control, asihpi) < 0)
+ return -EINVAL;
+ }
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "band");
+ snd_control.info = snd_asihpi_tuner_band_info;
+ snd_control.get = snd_asihpi_tuner_band_get;
+ snd_control.put = snd_asihpi_tuner_band_put;
+
+ if (ctl_add(card, &snd_control, asihpi) < 0)
+ return -EINVAL;
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "freq");
+ snd_control.info = snd_asihpi_tuner_freq_info;
+ snd_control.get = snd_asihpi_tuner_freq_get;
+ snd_control.put = snd_asihpi_tuner_freq_put;
+
+ return ctl_add(card, &snd_control, asihpi);
+}
+
+/*------------------------------------------------------------
+ Meter controls
+ ------------------------------------------------------------*/
+static int snd_asihpi_meter_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = HPI_MAX_CHANNELS;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 0x7FFFFFFF;
+ return 0;
+}
+
+/* linear values for 10dB steps */
+static int log2lin[] = {
+ 0x7FFFFFFF, /* 0dB */
+ 679093956,
+ 214748365,
+ 67909396,
+ 21474837,
+ 6790940,
+ 2147484, /* -60dB */
+ 679094,
+ 214748, /* -80 */
+ 67909,
+ 21475, /* -100 */
+ 6791,
+ 2147,
+ 679,
+ 214,
+ 68,
+ 21,
+ 7,
+ 2
+};
+
+static int snd_asihpi_meter_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ short an_gain_mB[HPI_MAX_CHANNELS], i;
+ u16 err;
+
+ err = hpi_meter_get_peak(ss, h_control, an_gain_mB);
+
+ for (i = 0; i < HPI_MAX_CHANNELS; i++) {
+ if (err) {
+ ucontrol->value.integer.value[i] = 0;
+ } else if (an_gain_mB[i] >= 0) {
+ ucontrol->value.integer.value[i] =
+ an_gain_mB[i] << 16;
+ } else {
+ /* -ve is log value in millibels < -60dB,
+ * convert to (roughly!) linear,
+ */
+ ucontrol->value.integer.value[i] =
+ log2lin[an_gain_mB[i] / -1000];
+ }
+ }
+ return 0;
+}
+
+static int __devinit snd_asihpi_meter_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl, int subidx)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "meter");
+ snd_control.access =
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE | SNDRV_CTL_ELEM_ACCESS_READ;
+ snd_control.info = snd_asihpi_meter_info;
+ snd_control.get = snd_asihpi_meter_get;
+
+ snd_control.index = subidx;
+
+ return ctl_add(card, &snd_control, asihpi);
+}
+
+/*------------------------------------------------------------
+ Multiplexer controls
+ ------------------------------------------------------------*/
+static int snd_card_asihpi_mux_count_sources(struct snd_kcontrol *snd_control)
+{
+ u32 h_control = snd_control->private_value;
+ struct hpi_control hpi_ctl;
+ int s, err;
+ for (s = 0; s < 32; s++) {
+ err = hpi_multiplexer_query_source(ss, h_control, s,
+ &hpi_ctl.
+ src_node_type,
+ &hpi_ctl.
+ src_node_index);
+ if (err)
+ break;
+ }
+ return s;
+}
+
+static int snd_asihpi_mux_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ int err;
+ u16 src_node_type, src_node_index;
+ u32 h_control = kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items =
+ snd_card_asihpi_mux_count_sources(kcontrol);
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ err =
+ hpi_multiplexer_query_source(ss, h_control,
+ uinfo->value.enumerated.item,
+ &src_node_type, &src_node_index);
+
+ sprintf(uinfo->value.enumerated.name, "%s %d",
+ asihpi_src_names[src_node_type - HPI_SOURCENODE_BASE],
+ src_node_index);
+ return 0;
+}
+
+static int snd_asihpi_mux_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ u16 source_type, source_index;
+ u16 src_node_type, src_node_index;
+ int s;
+
+ hpi_handle_error(hpi_multiplexer_get_source(ss, h_control,
+ &source_type, &source_index));
+ /* Should cache this search result! */
+ for (s = 0; s < 256; s++) {
+ if (hpi_multiplexer_query_source(ss, h_control, s,
+ &src_node_type, &src_node_index))
+ break;
+
+ if ((source_type == src_node_type)
+ && (source_index == src_node_index)) {
+ ucontrol->value.enumerated.item[0] = s;
+ return 0;
+ }
+ }
+ snd_printd(KERN_WARNING
+ "control %x failed to match mux source %hu %hu\n",
+ h_control, source_type, source_index);
+ ucontrol->value.enumerated.item[0] = 0;
+ return 0;
+}
+
+static int snd_asihpi_mux_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int change;
+ u32 h_control = kcontrol->private_value;
+ u16 source_type, source_index;
+ u16 e;
+
+ change = 1;
+
+ e = hpi_multiplexer_query_source(ss, h_control,
+ ucontrol->value.enumerated.item[0],
+ &source_type, &source_index);
+ if (!e)
+ hpi_handle_error(
+ hpi_multiplexer_set_source(ss, h_control,
+ source_type, source_index));
+ return change;
+}
+
+
+static int __devinit snd_asihpi_mux_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+#if ASI_STYLE_NAMES
+ asihpi_ctl_init(&snd_control, hpi_ctl, "multiplexer");
+#else
+ asihpi_ctl_init(&snd_control, hpi_ctl, "route");
+#endif
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ snd_control.info = snd_asihpi_mux_info;
+ snd_control.get = snd_asihpi_mux_get;
+ snd_control.put = snd_asihpi_mux_put;
+
+ return ctl_add(card, &snd_control, asihpi);
+
+}
+
+/*------------------------------------------------------------
+ Channel mode controls
+ ------------------------------------------------------------*/
+static int snd_asihpi_cmode_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static char *mode_names[HPI_CHANNEL_MODE_LAST] = {
+ "normal", "swap",
+ "from_left", "from_right",
+ "to_left", "to_right"
+ };
+
+ u32 h_control = kcontrol->private_value;
+ u16 mode;
+ int i;
+
+ /* HPI channel mode values can be from 1 to 6
+ Some adapters only support a contiguous subset
+ */
+ for (i = 0; i < HPI_CHANNEL_MODE_LAST; i++)
+ if (hpi_channel_mode_query_mode(
+ ss, h_control, i, &mode))
+ break;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = i;
+
+ if (uinfo->value.enumerated.item >= i)
+ uinfo->value.enumerated.item = i - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ mode_names[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int snd_asihpi_cmode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ u16 mode;
+
+ if (hpi_channel_mode_get(ss, h_control, &mode))
+ mode = 1;
+
+ ucontrol->value.enumerated.item[0] = mode - 1;
+
+ return 0;
+}
+
+static int snd_asihpi_cmode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int change;
+ u32 h_control = kcontrol->private_value;
+
+ change = 1;
+
+ hpi_handle_error(hpi_channel_mode_set(ss, h_control,
+ ucontrol->value.enumerated.item[0] + 1));
+ return change;
+}
+
+
+static int __devinit snd_asihpi_cmode_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "channel mode");
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ snd_control.info = snd_asihpi_cmode_info;
+ snd_control.get = snd_asihpi_cmode_get;
+ snd_control.put = snd_asihpi_cmode_put;
+
+ return ctl_add(card, &snd_control, asihpi);
+}
+
+/*------------------------------------------------------------
+ Sampleclock source controls
+ ------------------------------------------------------------*/
+
+static char *sampleclock_sources[MAX_CLOCKSOURCES] =
+ { "N/A", "local PLL", "AES/EBU sync", "word external", "word header",
+ "SMPTE", "AES/EBU in1", "auto", "network", "invalid",
+ "prev module",
+ "AES/EBU in2", "AES/EBU in3", "AES/EBU in4", "AES/EBU in5",
+ "AES/EBU in6", "AES/EBU in7", "AES/EBU in8"};
+
+
+
+static int snd_asihpi_clksrc_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct snd_card_asihpi *asihpi =
+ (struct snd_card_asihpi *)(kcontrol->private_data);
+ struct clk_cache *clkcache = &asihpi->cc;
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = clkcache->count;
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ clkcache->s[uinfo->value.enumerated.item].name);
+ return 0;
+}
+
+static int snd_asihpi_clksrc_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_card_asihpi *asihpi =
+ (struct snd_card_asihpi *)(kcontrol->private_data);
+ struct clk_cache *clkcache = &asihpi->cc;
+ u32 h_control = kcontrol->private_value;
+ u16 source, srcindex = 0;
+ int i;
+
+ ucontrol->value.enumerated.item[0] = 0;
+ if (hpi_sample_clock_get_source(ss, h_control, &source))
+ source = 0;
+
+ if (source == HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT)
+ if (hpi_sample_clock_get_source_index(ss, h_control, &srcindex))
+ srcindex = 0;
+
+ for (i = 0; i < clkcache->count; i++)
+ if ((clkcache->s[i].source == source) &&
+ (clkcache->s[i].index == srcindex))
+ break;
+
+ ucontrol->value.enumerated.item[0] = i;
+
+ return 0;
+}
+
+static int snd_asihpi_clksrc_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_card_asihpi *asihpi =
+ (struct snd_card_asihpi *)(kcontrol->private_data);
+ struct clk_cache *clkcache = &asihpi->cc;
+ int change, item;
+ u32 h_control = kcontrol->private_value;
+
+ change = 1;
+ item = ucontrol->value.enumerated.item[0];
+ if (item >= clkcache->count)
+ item = clkcache->count-1;
+
+ hpi_handle_error(hpi_sample_clock_set_source(ss,
+ h_control, clkcache->s[item].source));
+
+ if (clkcache->s[item].source == HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT)
+ hpi_handle_error(hpi_sample_clock_set_source_index(ss,
+ h_control, clkcache->s[item].index));
+ return change;
+}
+
+/*------------------------------------------------------------
+ Clkrate controls
+ ------------------------------------------------------------*/
+/* Need to change this to enumerated control with list of rates */
+static int snd_asihpi_clklocal_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 8000;
+ uinfo->value.integer.max = 192000;
+ uinfo->value.integer.step = 100;
+
+ return 0;
+}
+
+static int snd_asihpi_clklocal_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ u32 rate;
+ u16 e;
+
+ e = hpi_sample_clock_get_local_rate(ss, h_control, &rate);
+ if (!e)
+ ucontrol->value.integer.value[0] = rate;
+ else
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int snd_asihpi_clklocal_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int change;
+ u32 h_control = kcontrol->private_value;
+
+ /* change = asihpi->mixer_clkrate[addr][0] != left ||
+ asihpi->mixer_clkrate[addr][1] != right;
+ */
+ change = 1;
+ hpi_handle_error(hpi_sample_clock_set_local_rate(ss, h_control,
+ ucontrol->value.integer.value[0]));
+ return change;
+}
+
+static int snd_asihpi_clkrate_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 8000;
+ uinfo->value.integer.max = 192000;
+ uinfo->value.integer.step = 100;
+
+ return 0;
+}
+
+static int snd_asihpi_clkrate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 h_control = kcontrol->private_value;
+ u32 rate;
+ u16 e;
+
+ e = hpi_sample_clock_get_sample_rate(ss, h_control, &rate);
+ if (!e)
+ ucontrol->value.integer.value[0] = rate;
+ else
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int __devinit snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
+{
+ struct snd_card *card = asihpi->card;
+ struct snd_kcontrol_new snd_control;
+
+ struct clk_cache *clkcache = &asihpi->cc;
+ u32 hSC = hpi_ctl->h_control;
+ int has_aes_in = 0;
+ int i, j;
+ u16 source;
+
+ snd_control.private_value = hpi_ctl->h_control;
+
+ clkcache->has_local = 0;
+
+ for (i = 0; i <= HPI_SAMPLECLOCK_SOURCE_LAST; i++) {
+ if (hpi_sample_clock_query_source(ss, hSC,
+ i, &source))
+ break;
+ clkcache->s[i].source = source;
+ clkcache->s[i].index = 0;
+ clkcache->s[i].name = sampleclock_sources[source];
+ if (source == HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT)
+ has_aes_in = 1;
+ if (source == HPI_SAMPLECLOCK_SOURCE_LOCAL)
+ clkcache->has_local = 1;
+ }
+ if (has_aes_in)
+ /* already will have picked up index 0 above */
+ for (j = 1; j < 8; j++) {
+ if (hpi_sample_clock_query_source_index(ss, hSC,
+ j, HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT,
+ &source))
+ break;
+ clkcache->s[i].source =
+ HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT;
+ clkcache->s[i].index = j;
+ clkcache->s[i].name = sampleclock_sources[
+ j+HPI_SAMPLECLOCK_SOURCE_LAST];
+ i++;
+ }
+ clkcache->count = i;
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "source");
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE ;
+ snd_control.info = snd_asihpi_clksrc_info;
+ snd_control.get = snd_asihpi_clksrc_get;
+ snd_control.put = snd_asihpi_clksrc_put;
+ if (ctl_add(card, &snd_control, asihpi) < 0)
+ return -EINVAL;
+
+
+ if (clkcache->has_local) {
+ asihpi_ctl_init(&snd_control, hpi_ctl, "local_rate");
+ snd_control.access = SNDRV_CTL_ELEM_ACCESS_READWRITE ;
+ snd_control.info = snd_asihpi_clklocal_info;
+ snd_control.get = snd_asihpi_clklocal_get;
+ snd_control.put = snd_asihpi_clklocal_put;
+
+
+ if (ctl_add(card, &snd_control, asihpi) < 0)
+ return -EINVAL;
+ }
+
+ asihpi_ctl_init(&snd_control, hpi_ctl, "rate");
+ snd_control.access =
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE | SNDRV_CTL_ELEM_ACCESS_READ;
+ snd_control.info = snd_asihpi_clkrate_info;
+ snd_control.get = snd_asihpi_clkrate_get;
+
+ return ctl_add(card, &snd_control, asihpi);
+}
+/*------------------------------------------------------------
+ Mixer
+ ------------------------------------------------------------*/
+
+static int __devinit snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
+{
+ struct snd_card *card = asihpi->card;
+ unsigned int idx = 0;
+ unsigned int subindex = 0;
+ int err;
+ struct hpi_control hpi_ctl, prev_ctl;
+
+ if (snd_BUG_ON(!asihpi))
+ return -EINVAL;
+ strcpy(card->mixername, "asihpi mixer");
+
+ err =
+ hpi_mixer_open(ss, asihpi->adapter_index,
+ &asihpi->h_mixer);
+ hpi_handle_error(err);
+ if (err)
+ return -err;
+
+ for (idx = 0; idx < 2000; idx++) {
+ err = hpi_mixer_get_control_by_index(
+ ss, asihpi->h_mixer,
+ idx,
+ &hpi_ctl.src_node_type,
+ &hpi_ctl.src_node_index,
+ &hpi_ctl.dst_node_type,
+ &hpi_ctl.dst_node_index,
+ &hpi_ctl.control_type,
+ &hpi_ctl.h_control);
+ if (err) {
+ if (err == HPI_ERROR_CONTROL_DISABLED) {
+ if (mixer_dump)
+ snd_printk(KERN_INFO
+ "disabled HPI control(%d)\n",
+ idx);
+ continue;
+ } else
+ break;
+
+ }
+
+ hpi_ctl.src_node_type -= HPI_SOURCENODE_BASE;
+ hpi_ctl.dst_node_type -= HPI_DESTNODE_BASE;
+
+ /* ASI50xx in SSX mode has multiple meters on the same node.
+ Use subindex to create distinct ALSA controls
+ for any duplicated controls.
+ */
+ if ((hpi_ctl.control_type == prev_ctl.control_type) &&
+ (hpi_ctl.src_node_type == prev_ctl.src_node_type) &&
+ (hpi_ctl.src_node_index == prev_ctl.src_node_index) &&
+ (hpi_ctl.dst_node_type == prev_ctl.dst_node_type) &&
+ (hpi_ctl.dst_node_index == prev_ctl.dst_node_index))
+ subindex++;
+ else
+ subindex = 0;
+
+ prev_ctl = hpi_ctl;
+
+ switch (hpi_ctl.control_type) {
+ case HPI_CONTROL_VOLUME:
+ err = snd_asihpi_volume_add(asihpi, &hpi_ctl);
+ break;
+ case HPI_CONTROL_LEVEL:
+ err = snd_asihpi_level_add(asihpi, &hpi_ctl);
+ break;
+ case HPI_CONTROL_MULTIPLEXER:
+ err = snd_asihpi_mux_add(asihpi, &hpi_ctl);
+ break;
+ case HPI_CONTROL_CHANNEL_MODE:
+ err = snd_asihpi_cmode_add(asihpi, &hpi_ctl);
+ break;
+ case HPI_CONTROL_METER:
+ err = snd_asihpi_meter_add(asihpi, &hpi_ctl, subindex);
+ break;
+ case HPI_CONTROL_SAMPLECLOCK:
+ err = snd_asihpi_sampleclock_add(
+ asihpi, &hpi_ctl);
+ break;
+ case HPI_CONTROL_CONNECTION: /* ignore these */
+ continue;
+ case HPI_CONTROL_TUNER:
+ err = snd_asihpi_tuner_add(asihpi, &hpi_ctl);
+ break;
+ case HPI_CONTROL_AESEBU_TRANSMITTER:
+ err = snd_asihpi_aesebu_tx_add(asihpi, &hpi_ctl);
+ break;
+ case HPI_CONTROL_AESEBU_RECEIVER:
+ err = snd_asihpi_aesebu_rx_add(asihpi, &hpi_ctl);
+ break;
+ case HPI_CONTROL_VOX:
+ case HPI_CONTROL_BITSTREAM:
+ case HPI_CONTROL_MICROPHONE:
+ case HPI_CONTROL_PARAMETRIC_EQ:
+ case HPI_CONTROL_COMPANDER:
+ default:
+ if (mixer_dump)
+ snd_printk(KERN_INFO
+ "untranslated HPI control"
+ "(%d) %d %d %d %d %d\n",
+ idx,
+ hpi_ctl.control_type,
+ hpi_ctl.src_node_type,
+ hpi_ctl.src_node_index,
+ hpi_ctl.dst_node_type,
+ hpi_ctl.dst_node_index);
+ continue;
+ };
+ if (err < 0)
+ return err;
+ }
+ if (HPI_ERROR_INVALID_OBJ_INDEX != err)
+ hpi_handle_error(err);
+
+ snd_printk(KERN_INFO "%d mixer controls found\n", idx);
+
+ return 0;
+}
+
+/*------------------------------------------------------------
+ /proc interface
+ ------------------------------------------------------------*/
+
+static void
+snd_asihpi_proc_read(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct snd_card_asihpi *asihpi = entry->private_data;
+ u16 version;
+ u32 h_control;
+ u32 rate = 0;
+ u16 source = 0;
+ int err;
+
+ snd_iprintf(buffer, "ASIHPI driver proc file\n");
+ snd_iprintf(buffer,
+ "adapter ID=%4X\n_index=%d\n"
+ "num_outstreams=%d\n_num_instreams=%d\n",
+ asihpi->type, asihpi->adapter_index,
+ asihpi->num_outstreams, asihpi->num_instreams);
+
+ version = asihpi->version;
+ snd_iprintf(buffer,
+ "serial#=%d\n_hw version %c%d\nDSP code version %03d\n",
+ asihpi->serial_number, ((version >> 3) & 0xf) + 'A',
+ version & 0x7,
+ ((version >> 13) * 100) + ((version >> 7) & 0x3f));
+
+ err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
+ HPI_CONTROL_SAMPLECLOCK, &h_control);
+
+ if (!err) {
+ err = hpi_sample_clock_get_sample_rate(ss,
+ h_control, &rate);
+ err += hpi_sample_clock_get_source(ss, h_control, &source);
+
+ if (!err)
+ snd_iprintf(buffer, "sample_clock=%d_hz, source %s\n",
+ rate, sampleclock_sources[source]);
+ }
+
+}
+
+
+static void __devinit snd_asihpi_proc_init(struct snd_card_asihpi *asihpi)
+{
+ struct snd_info_entry *entry;
+
+ if (!snd_card_proc_new(asihpi->card, "info", &entry))
+ snd_info_set_text_ops(entry, asihpi, snd_asihpi_proc_read);
+}
+
+/*------------------------------------------------------------
+ HWDEP
+ ------------------------------------------------------------*/
+
+static int snd_asihpi_hpi_open(struct snd_hwdep *hw, struct file *file)
+{
+ if (enable_hpi_hwdep)
+ return 0;
+ else
+ return -ENODEV;
+
+}
+
+static int snd_asihpi_hpi_release(struct snd_hwdep *hw, struct file *file)
+{
+ if (enable_hpi_hwdep)
+ return asihpi_hpi_release(file);
+ else
+ return -ENODEV;
+}
+
+static int snd_asihpi_hpi_ioctl(struct snd_hwdep *hw, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ if (enable_hpi_hwdep)
+ return asihpi_hpi_ioctl(file, cmd, arg);
+ else
+ return -ENODEV;
+}
+
+
+/* results in /dev/snd/hwC#D0 file for each card with index #
+ also /proc/asound/hwdep will contain '#-00: asihpi (HPI) for each card'
+*/
+static int __devinit snd_asihpi_hpi_new(struct snd_card_asihpi *asihpi,
+ int device, struct snd_hwdep **rhwdep)
+{
+ struct snd_hwdep *hw;
+ int err;
+
+ if (rhwdep)
+ *rhwdep = NULL;
+ err = snd_hwdep_new(asihpi->card, "HPI", device, &hw);
+ if (err < 0)
+ return err;
+ strcpy(hw->name, "asihpi (HPI)");
+ hw->iface = SNDRV_HWDEP_IFACE_LAST;
+ hw->ops.open = snd_asihpi_hpi_open;
+ hw->ops.ioctl = snd_asihpi_hpi_ioctl;
+ hw->ops.release = snd_asihpi_hpi_release;
+ hw->private_data = asihpi;
+ if (rhwdep)
+ *rhwdep = hw;
+ return 0;
+}
+
+/*------------------------------------------------------------
+ CARD
+ ------------------------------------------------------------*/
+static int __devinit snd_asihpi_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ int err;
+
+ u16 version;
+ int pcm_substreams;
+
+ struct hpi_adapter *hpi_card;
+ struct snd_card *card;
+ struct snd_card_asihpi *asihpi;
+
+ u32 h_control;
+ u32 h_stream;
+
+ static int dev;
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
+
+ /* Should this be enable[hpi_card->index] ? */
+ if (!enable[dev]) {
+ dev++;
+ return -ENOENT;
+ }
+
+ err = asihpi_adapter_probe(pci_dev, pci_id);
+ if (err < 0)
+ return err;
+
+ hpi_card = pci_get_drvdata(pci_dev);
+ /* first try to give the card the same index as its hardware index */
+ err = snd_card_create(hpi_card->index,
+ id[hpi_card->index], THIS_MODULE,
+ sizeof(struct snd_card_asihpi),
+ &card);
+ if (err < 0) {
+ /* if that fails, try the default index==next available */
+ err =
+ snd_card_create(index[dev], id[dev],
+ THIS_MODULE,
+ sizeof(struct snd_card_asihpi),
+ &card);
+ if (err < 0)
+ return err;
+ snd_printk(KERN_WARNING
+ "**** WARNING **** adapter index %d->ALSA index %d\n",
+ hpi_card->index, card->number);
+ }
+
+ asihpi = (struct snd_card_asihpi *) card->private_data;
+ asihpi->card = card;
+ asihpi->pci = hpi_card->pci;
+ asihpi->adapter_index = hpi_card->index;
+ hpi_handle_error(hpi_adapter_get_info(ss,
+ asihpi->adapter_index,
+ &asihpi->num_outstreams,
+ &asihpi->num_instreams,
+ &asihpi->version,
+ &asihpi->serial_number, &asihpi->type));
+
+ version = asihpi->version;
+ snd_printk(KERN_INFO "adapter ID=%4X index=%d num_outstreams=%d "
+ "num_instreams=%d S/N=%d\n"
+ "hw version %c%d DSP code version %03d\n",
+ asihpi->type, asihpi->adapter_index,
+ asihpi->num_outstreams,
+ asihpi->num_instreams, asihpi->serial_number,
+ ((version >> 3) & 0xf) + 'A',
+ version & 0x7,
+ ((version >> 13) * 100) + ((version >> 7) & 0x3f));
+
+ pcm_substreams = asihpi->num_outstreams;
+ if (pcm_substreams < asihpi->num_instreams)
+ pcm_substreams = asihpi->num_instreams;
+
+ err = hpi_adapter_get_property(ss, asihpi->adapter_index,
+ HPI_ADAPTER_PROPERTY_CAPS1,
+ NULL, &asihpi->support_grouping);
+ if (err)
+ asihpi->support_grouping = 0;
+
+ err = hpi_adapter_get_property(ss, asihpi->adapter_index,
+ HPI_ADAPTER_PROPERTY_CAPS2,
+ &asihpi->support_mrx, NULL);
+ if (err)
+ asihpi->support_mrx = 0;
+
+ err = hpi_adapter_get_property(ss, asihpi->adapter_index,
+ HPI_ADAPTER_PROPERTY_INTERVAL,
+ NULL, &asihpi->update_interval_frames);
+ if (err)
+ asihpi->update_interval_frames = 512;
+
+ hpi_handle_error(hpi_instream_open(ss, asihpi->adapter_index,
+ 0, &h_stream));
+
+ err = hpi_instream_host_buffer_free(ss, h_stream);
+ asihpi->support_mmap = (!err);
+
+ hpi_handle_error(hpi_instream_close(ss, h_stream));
+
+ err = hpi_adapter_get_property(ss, asihpi->adapter_index,
+ HPI_ADAPTER_PROPERTY_CURCHANNELS,
+ &asihpi->in_max_chans, &asihpi->out_max_chans);
+ if (err) {
+ asihpi->in_max_chans = 2;
+ asihpi->out_max_chans = 2;
+ }
+
+ snd_printk(KERN_INFO "supports mmap:%d grouping:%d mrx:%d\n",
+ asihpi->support_mmap,
+ asihpi->support_grouping,
+ asihpi->support_mrx
+ );
+
+
+ err = snd_card_asihpi_pcm_new(asihpi, 0, pcm_substreams);
+ if (err < 0) {
+ snd_printk(KERN_ERR "pcm_new failed\n");
+ goto __nodev;
+ }
+ err = snd_card_asihpi_mixer_new(asihpi);
+ if (err < 0) {
+ snd_printk(KERN_ERR "mixer_new failed\n");
+ goto __nodev;
+ }
+
+ err = hpi_mixer_get_control(ss, asihpi->h_mixer,
+ HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
+ HPI_CONTROL_SAMPLECLOCK, &h_control);
+
+ if (!err)
+ err = hpi_sample_clock_set_local_rate(
+ ss, h_control, adapter_fs);
+
+ snd_asihpi_proc_init(asihpi);
+
+ /* always create, can be enabled or disabled dynamically
+ by enable_hwdep module param*/
+ snd_asihpi_hpi_new(asihpi, 0, NULL);
+
+ if (asihpi->support_mmap)
+ strcpy(card->driver, "ASIHPI-MMAP");
+ else
+ strcpy(card->driver, "ASIHPI");
+
+ sprintf(card->shortname, "AudioScience ASI%4X", asihpi->type);
+ sprintf(card->longname, "%s %i",
+ card->shortname, asihpi->adapter_index);
+ err = snd_card_register(card);
+ if (!err) {
+ hpi_card->snd_card_asihpi = card;
+ dev++;
+ return 0;
+ }
+__nodev:
+ snd_card_free(card);
+ snd_printk(KERN_ERR "snd_asihpi_probe error %d\n", err);
+ return err;
+
+}
+
+static void __devexit snd_asihpi_remove(struct pci_dev *pci_dev)
+{
+ struct hpi_adapter *hpi_card = pci_get_drvdata(pci_dev);
+
+ snd_card_free(hpi_card->snd_card_asihpi);
+ hpi_card->snd_card_asihpi = NULL;
+ asihpi_adapter_remove(pci_dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(asihpi_pci_tbl) = {
+ {HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_DSP6205,
+ HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID, 0, 0,
+ (kernel_ulong_t)HPI_6205},
+ {HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_PCI2040,
+ HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID, 0, 0,
+ (kernel_ulong_t)HPI_6000},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, asihpi_pci_tbl);
+
+static struct pci_driver driver = {
+ .name = "asihpi",
+ .id_table = asihpi_pci_tbl,
+ .probe = snd_asihpi_probe,
+ .remove = __devexit_p(snd_asihpi_remove),
+#ifdef CONFIG_PM
+/* .suspend = snd_asihpi_suspend,
+ .resume = snd_asihpi_resume, */
+#endif
+};
+
+static int __init snd_asihpi_init(void)
+{
+ asihpi_init();
+ return pci_register_driver(&driver);
+}
+
+static void __exit snd_asihpi_exit(void)
+{
+
+ pci_unregister_driver(&driver);
+ asihpi_exit();
+}
+
+module_init(snd_asihpi_init)
+module_exit(snd_asihpi_exit)
+
diff --git a/sound/pci/asihpi/hpi.h b/sound/pci/asihpi/hpi.h
new file mode 100644
index 000000000000..99400de6c075
--- /dev/null
+++ b/sound/pci/asihpi/hpi.h
@@ -0,0 +1,2001 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+*/
+/** \file hpi.h
+
+ AudioScience Hardware Programming Interface (HPI)
+ public API definition.
+
+ The HPI is a low-level hardware abstraction layer to all
+ AudioScience digital audio adapters
+*/
+/*
+ You must define one operating system that the HPI is to be compiled under
+ HPI_OS_WIN32_USER 32bit Windows
+ HPI_OS_DSP_C6000 DSP TI C6000 (automatically set)
+ HPI_OS_WDM Windows WDM kernel driver
+ HPI_OS_LINUX Linux userspace
+ HPI_OS_LINUX_KERNEL Linux kernel (automatically set)
+
+(C) Copyright AudioScience Inc. 1998-2010
+******************************************************************************/
+#ifndef _HPI_H_
+#define _HPI_H_
+/* HPI Version
+If HPI_VER_MINOR is odd then its a development release not intended for the
+public. If HPI_VER_MINOR is even then is a release version
+i.e 3.05.02 is a development version
+*/
+#define HPI_VERSION_CONSTRUCTOR(maj, min, rel) \
+ ((maj << 16) + (min << 8) + rel)
+
+#define HPI_VER_MAJOR(v) ((int)(v >> 16))
+#define HPI_VER_MINOR(v) ((int)((v >> 8) & 0xFF))
+#define HPI_VER_RELEASE(v) ((int)(v & 0xFF))
+
+/* Use single digits for versions less that 10 to avoid octal. */
+#define HPI_VER HPI_VERSION_CONSTRUCTOR(4L, 3, 18)
+
+/* Library version as documented in hpi-api-versions.txt */
+#define HPI_LIB_VER HPI_VERSION_CONSTRUCTOR(9, 0, 0)
+
+#include <linux/types.h>
+#define HPI_EXCLUDE_DEPRECATED
+
+/******************************************************************************/
+/******************************************************************************/
+/******** HPI API DEFINITIONS *****/
+/******************************************************************************/
+/******************************************************************************/
+/*******************************************/
+/** Audio format types
+\ingroup stream
+*/
+enum HPI_FORMATS {
+/** Used internally on adapter. */
+ HPI_FORMAT_MIXER_NATIVE = 0,
+/** 8-bit unsigned PCM. Windows equivalent is WAVE_FORMAT_PCM. */
+ HPI_FORMAT_PCM8_UNSIGNED = 1,
+/** 16-bit signed PCM. Windows equivalent is WAVE_FORMAT_PCM. */
+ HPI_FORMAT_PCM16_SIGNED = 2,
+/** MPEG-1 Layer-1. */
+ HPI_FORMAT_MPEG_L1 = 3,
+/** MPEG-1 Layer-2.
+
+Windows equivalent is WAVE_FORMAT_MPEG.
+
+The following table shows what combinations of mode and bitrate are possible:
+
+<table border=1 cellspacing=0 cellpadding=5>
+<tr>
+<td><p><b>Bitrate (kbs)</b></p>
+<td><p><b>Mono</b></p>
+<td><p><b>Stereo,<br>Joint Stereo or<br>Dual Channel</b></p>
+
+<tr><td>32<td>X<td>_
+<tr><td>40<td>_<td>_
+<tr><td>48<td>X<td>_
+<tr><td>56<td>X<td>_
+<tr><td>64<td>X<td>X
+<tr><td>80<td>X<td>_
+<tr><td>96<td>X<td>X
+<tr><td>112<td>X<td>X
+<tr><td>128<td>X<td>X
+<tr><td>160<td>X<td>X
+<tr><td>192<td>X<td>X
+<tr><td>224<td>_<td>X
+<tr><td>256<td>-<td>X
+<tr><td>320<td>-<td>X
+<tr><td>384<td>_<td>X
+</table>
+*/
+ HPI_FORMAT_MPEG_L2 = 4,
+/** MPEG-1 Layer-3.
+Windows equivalent is WAVE_FORMAT_MPEG.
+
+The following table shows what combinations of mode and bitrate are possible:
+
+<table border=1 cellspacing=0 cellpadding=5>
+<tr>
+<td><p><b>Bitrate (kbs)</b></p>
+<td><p><b>Mono<br>Stereo @ 8,<br>11.025 and<br>12kHz*</b></p>
+<td><p><b>Mono<br>Stereo @ 16,<br>22.050 and<br>24kHz*</b></p>
+<td><p><b>Mono<br>Stereo @ 32,<br>44.1 and<br>48kHz</b></p>
+
+<tr><td>16<td>X<td>X<td>_
+<tr><td>24<td>X<td>X<td>_
+<tr><td>32<td>X<td>X<td>X
+<tr><td>40<td>X<td>X<td>X
+<tr><td>48<td>X<td>X<td>X
+<tr><td>56<td>X<td>X<td>X
+<tr><td>64<td>X<td>X<td>X
+<tr><td>80<td>_<td>X<td>X
+<tr><td>96<td>_<td>X<td>X
+<tr><td>112<td>_<td>X<td>X
+<tr><td>128<td>_<td>X<td>X
+<tr><td>144<td>_<td>X<td>_
+<tr><td>160<td>_<td>X<td>X
+<tr><td>192<td>_<td>_<td>X
+<tr><td>224<td>_<td>_<td>X
+<tr><td>256<td>-<td>_<td>X
+<tr><td>320<td>-<td>_<td>X
+</table>
+\b * Available on the ASI6000 series only
+*/
+ HPI_FORMAT_MPEG_L3 = 5,
+/** Dolby AC-2. */
+ HPI_FORMAT_DOLBY_AC2 = 6,
+/** Dolbt AC-3. */
+ HPI_FORMAT_DOLBY_AC3 = 7,
+/** 16-bit PCM big-endian. */
+ HPI_FORMAT_PCM16_BIGENDIAN = 8,
+/** TAGIT-1 algorithm - hits. */
+ HPI_FORMAT_AA_TAGIT1_HITS = 9,
+/** TAGIT-1 algorithm - inserts. */
+ HPI_FORMAT_AA_TAGIT1_INSERTS = 10,
+/** 32-bit signed PCM. Windows equivalent is WAVE_FORMAT_PCM.
+Each sample is a 32bit word. The most significant 24 bits contain a 24-bit
+sample and the least significant 8 bits are set to 0.
+*/
+ HPI_FORMAT_PCM32_SIGNED = 11,
+/** Raw bitstream - unknown format. */
+ HPI_FORMAT_RAW_BITSTREAM = 12,
+/** TAGIT-1 algorithm hits - extended. */
+ HPI_FORMAT_AA_TAGIT1_HITS_EX1 = 13,
+/** 32-bit PCM as an IEEE float. Windows equivalent is WAVE_FORMAT_IEEE_FLOAT.
+Each sample is a 32bit word in IEEE754 floating point format.
+The range is +1.0 to -1.0, which corresponds to digital fullscale.
+*/
+ HPI_FORMAT_PCM32_FLOAT = 14,
+/** 24-bit PCM signed. Windows equivalent is WAVE_FORMAT_PCM. */
+ HPI_FORMAT_PCM24_SIGNED = 15,
+/** OEM format 1 - private. */
+ HPI_FORMAT_OEM1 = 16,
+/** OEM format 2 - private. */
+ HPI_FORMAT_OEM2 = 17,
+/** Undefined format. */
+ HPI_FORMAT_UNDEFINED = 0xffff
+};
+
+/******************************************* in/out Stream states */
+/*******************************************/
+/** Stream States
+\ingroup stream
+*/
+enum HPI_STREAM_STATES {
+ /** State stopped - stream is stopped. */
+ HPI_STATE_STOPPED = 1,
+ /** State playing - stream is playing audio. */
+ HPI_STATE_PLAYING = 2,
+ /** State recording - stream is recording. */
+ HPI_STATE_RECORDING = 3,
+ /** State drained - playing stream ran out of data to play. */
+ HPI_STATE_DRAINED = 4,
+ /** State generate sine - to be implemented. */
+ HPI_STATE_SINEGEN = 5,
+ /** State wait - used for inter-card sync to mean waiting for all
+ cards to be ready. */
+ HPI_STATE_WAIT = 6
+};
+/******************************************* mixer source node types */
+/** Source node types
+\ingroup mixer
+*/
+enum HPI_SOURCENODES {
+ /** This define can be used instead of 0 to indicate
+ that there is no valid source node. A control that
+ exists on a destination node can be searched for using a source
+ node value of either 0, or HPI_SOURCENODE_NONE */
+ HPI_SOURCENODE_NONE = 100,
+ /** \deprecated Use HPI_SOURCENODE_NONE instead. */
+ HPI_SOURCENODE_BASE = 100,
+ /** Out Stream (Play) node. */
+ HPI_SOURCENODE_OSTREAM = 101,
+ /** Line in node - could be analog, AES/EBU or network. */
+ HPI_SOURCENODE_LINEIN = 102,
+ HPI_SOURCENODE_AESEBU_IN = 103, /**< AES/EBU input node. */
+ HPI_SOURCENODE_TUNER = 104, /**< tuner node. */
+ HPI_SOURCENODE_RF = 105, /**< RF input node. */
+ HPI_SOURCENODE_CLOCK_SOURCE = 106, /**< clock source node. */
+ HPI_SOURCENODE_RAW_BITSTREAM = 107, /**< raw bitstream node. */
+ HPI_SOURCENODE_MICROPHONE = 108, /**< microphone node. */
+ /** Cobranet input node -
+ Audio samples come from the Cobranet network and into the device. */
+ HPI_SOURCENODE_COBRANET = 109,
+ HPI_SOURCENODE_ANALOG = 110, /**< analog input node. */
+ HPI_SOURCENODE_ADAPTER = 111, /**< adapter node. */
+ /* !!!Update this AND hpidebug.h if you add a new sourcenode type!!! */
+ HPI_SOURCENODE_LAST_INDEX = 111 /**< largest ID */
+ /* AX6 max sourcenode types = 15 */
+};
+
+/******************************************* mixer dest node types */
+/** Destination node types
+\ingroup mixer
+*/
+enum HPI_DESTNODES {
+ /** This define can be used instead of 0 to indicate
+ that there is no valid destination node. A control that
+ exists on a source node can be searched for using a destination
+ node value of either 0, or HPI_DESTNODE_NONE */
+ HPI_DESTNODE_NONE = 200,
+ /** \deprecated Use HPI_DESTNODE_NONE instead. */
+ HPI_DESTNODE_BASE = 200,
+ /** In Stream (Record) node. */
+ HPI_DESTNODE_ISTREAM = 201,
+ HPI_DESTNODE_LINEOUT = 202, /**< line out node. */
+ HPI_DESTNODE_AESEBU_OUT = 203, /**< AES/EBU output node. */
+ HPI_DESTNODE_RF = 204, /**< RF output node. */
+ HPI_DESTNODE_SPEAKER = 205, /**< speaker output node. */
+ /** Cobranet output node -
+ Audio samples from the device are sent out on the Cobranet network.*/
+ HPI_DESTNODE_COBRANET = 206,
+ HPI_DESTNODE_ANALOG = 207, /**< analog output node. */
+
+ /* !!!Update this AND hpidebug.h if you add a new destnode type!!! */
+ HPI_DESTNODE_LAST_INDEX = 207 /**< largest ID */
+ /* AX6 max destnode types = 15 */
+};
+
+/*******************************************/
+/** Mixer control types
+\ingroup mixer
+*/
+enum HPI_CONTROLS {
+ HPI_CONTROL_GENERIC = 0, /**< generic control. */
+ HPI_CONTROL_CONNECTION = 1, /**< A connection between nodes. */
+ HPI_CONTROL_VOLUME = 2, /**< volume control - works in dB_fs. */
+ HPI_CONTROL_METER = 3, /**< peak meter control. */
+ HPI_CONTROL_MUTE = 4, /*mute control - not used at present. */
+ HPI_CONTROL_MULTIPLEXER = 5, /**< multiplexer control. */
+
+ HPI_CONTROL_AESEBU_TRANSMITTER = 6, /**< AES/EBU transmitter control. */
+ HPI_CONTROL_AESEBUTX = HPI_CONTROL_AESEBU_TRANSMITTER,
+
+ HPI_CONTROL_AESEBU_RECEIVER = 7, /**< AES/EBU receiver control. */
+ HPI_CONTROL_AESEBURX = HPI_CONTROL_AESEBU_RECEIVER,
+
+ HPI_CONTROL_LEVEL = 8, /**< level/trim control - works in d_bu. */
+ HPI_CONTROL_TUNER = 9, /**< tuner control. */
+/* HPI_CONTROL_ONOFFSWITCH = 10 */
+ HPI_CONTROL_VOX = 11, /**< vox control. */
+/* HPI_CONTROL_AES18_TRANSMITTER = 12 */
+/* HPI_CONTROL_AES18_RECEIVER = 13 */
+/* HPI_CONTROL_AES18_BLOCKGENERATOR = 14 */
+ HPI_CONTROL_CHANNEL_MODE = 15, /**< channel mode control. */
+
+ HPI_CONTROL_BITSTREAM = 16, /**< bitstream control. */
+ HPI_CONTROL_SAMPLECLOCK = 17, /**< sample clock control. */
+ HPI_CONTROL_MICROPHONE = 18, /**< microphone control. */
+ HPI_CONTROL_PARAMETRIC_EQ = 19, /**< parametric EQ control. */
+ HPI_CONTROL_EQUALIZER = HPI_CONTROL_PARAMETRIC_EQ,
+
+ HPI_CONTROL_COMPANDER = 20, /**< compander control. */
+ HPI_CONTROL_COBRANET = 21, /**< cobranet control. */
+ HPI_CONTROL_TONEDETECTOR = 22, /**< tone detector control. */
+ HPI_CONTROL_SILENCEDETECTOR = 23, /**< silence detector control. */
+ HPI_CONTROL_PAD = 24, /**< tuner PAD control. */
+ HPI_CONTROL_SRC = 25, /**< samplerate converter control. */
+ HPI_CONTROL_UNIVERSAL = 26, /**< universal control. */
+
+/* !!! Update this AND hpidebug.h if you add a new control type!!!*/
+ HPI_CONTROL_LAST_INDEX = 26 /**<highest control type ID */
+/* WARNING types 256 or greater impact bit packing in all AX6 DSP code */
+};
+
+/* Shorthand names that match attribute names */
+
+/******************************************* ADAPTER ATTRIBUTES ****/
+
+/** Adapter properties
+These are used in HPI_AdapterSetProperty() and HPI_AdapterGetProperty()
+\ingroup adapter
+*/
+enum HPI_ADAPTER_PROPERTIES {
+/** \internal Used in dwProperty field of HPI_AdapterSetProperty() and
+HPI_AdapterGetProperty(). This errata applies to all ASI6000 cards with both
+analog and digital outputs. The CS4224 A/D+D/A has a one sample delay between
+left and right channels on both its input (ADC) and output (DAC).
+More details are available in Cirrus Logic errata ER284B2.
+PDF available from www.cirrus.com, released by Cirrus in 2001.
+*/
+ HPI_ADAPTER_PROPERTY_ERRATA_1 = 1,
+
+/** Adapter grouping property
+Indicates whether the adapter supports the grouping API (for ASIO and SSX2)
+*/
+ HPI_ADAPTER_PROPERTY_GROUPING = 2,
+
+/** Driver SSX2 property
+Tells the kernel driver to turn on SSX2 stream mapping.
+This feature is not used by the DSP. In fact the call is completely processed
+by the driver and is not passed on to the DSP at all.
+*/
+ HPI_ADAPTER_PROPERTY_ENABLE_SSX2 = 3,
+
+/** Adapter SSX2 property
+Indicates the state of the adapter's SSX2 setting. This setting is stored in
+non-volatile memory on the adapter. A typical call sequence would be to use
+HPI_ADAPTER_PROPERTY_SSX2_SETTING to set SSX2 on the adapter and then to reload
+the driver. The driver would query HPI_ADAPTER_PROPERTY_SSX2_SETTING during startup
+and if SSX2 is set, it would then call HPI_ADAPTER_PROPERTY_ENABLE_SSX2 to enable
+SSX2 stream mapping within the kernel level of the driver.
+*/
+ HPI_ADAPTER_PROPERTY_SSX2_SETTING = 4,
+
+/** Base number for readonly properties */
+ HPI_ADAPTER_PROPERTY_READONLYBASE = 256,
+
+/** Readonly adapter latency property.
+This property returns in the input and output latency in samples.
+Property 1 is the estimated input latency
+in samples, while Property 2 is that output latency in samples.
+*/
+ HPI_ADAPTER_PROPERTY_LATENCY = 256,
+
+/** Readonly adapter granularity property.
+The granulariy is the smallest size chunk of stereo samples that is processed by
+the adapter.
+This property returns the record granularity in samples in Property 1.
+Property 2 returns the play granularity.
+*/
+ HPI_ADAPTER_PROPERTY_GRANULARITY = 257,
+
+/** Readonly adapter number of current channels property.
+Property 1 is the number of record channels per record device.
+Property 2 is the number of play channels per playback device.*/
+ HPI_ADAPTER_PROPERTY_CURCHANNELS = 258,
+
+/** Readonly adapter software version.
+The SOFTWARE_VERSION property returns the version of the software running
+on the adapter as Major.Minor.Release.
+Property 1 contains Major in bits 15..8 and Minor in bits 7..0.
+Property 2 contains Release in bits 7..0. */
+ HPI_ADAPTER_PROPERTY_SOFTWARE_VERSION = 259,
+
+/** Readonly adapter MAC address MSBs.
+The MAC_ADDRESS_MSB property returns
+the most significant 32 bits of the MAC address.
+Property 1 contains bits 47..32 of the MAC address.
+Property 2 contains bits 31..16 of the MAC address. */
+ HPI_ADAPTER_PROPERTY_MAC_ADDRESS_MSB = 260,
+
+/** Readonly adapter MAC address LSBs
+The MAC_ADDRESS_LSB property returns
+the least significant 16 bits of the MAC address.
+Property 1 contains bits 15..0 of the MAC address. */
+ HPI_ADAPTER_PROPERTY_MAC_ADDRESS_LSB = 261,
+
+/** Readonly extended adapter type number
+The EXTENDED_ADAPTER_TYPE property returns the 4 digits of an extended
+adapter type, i.e ASI8920-0022, 0022 is the extended type.
+The digits are returned as ASCII characters rather than the hex digits that
+are returned for the main type
+Property 1 returns the 1st two (left most) digits, i.e "00"
+in the example above, the upper byte being the left most digit.
+Property 2 returns the 2nd two digits, i.e "22" in the example above*/
+ HPI_ADAPTER_PROPERTY_EXTENDED_ADAPTER_TYPE = 262,
+
+/** Readonly debug log buffer information */
+ HPI_ADAPTER_PROPERTY_LOGTABLEN = 263,
+ HPI_ADAPTER_PROPERTY_LOGTABBEG = 264,
+
+/** Readonly adapter IP address
+For 192.168.1.101
+Property 1 returns the 1st two (left most) digits, i.e 192*256 + 168
+in the example above, the upper byte being the left most digit.
+Property 2 returns the 2nd two digits, i.e 1*256 + 101 in the example above, */
+ HPI_ADAPTER_PROPERTY_IP_ADDRESS = 265,
+
+/** Readonly adapter buffer processed count. Returns a buffer processed count
+that is incremented every time all buffers for all streams are updated. This
+is useful for checking completion of all stream operations across the adapter
+when using grouped streams.
+*/
+ HPI_ADAPTER_PROPERTY_BUFFER_UPDATE_COUNT = 266,
+
+/** Readonly mixer and stream intervals
+
+These intervals are measured in mixer frames.
+To convert to time, divide by the adapter samplerate.
+
+The mixer interval is the number of frames processed in one mixer iteration.
+The stream update interval is the interval at which streams check for and
+process data, and BBM host buffer counters are updated.
+
+Property 1 is the mixer interval in mixer frames.
+Property 2 is the stream update interval in mixer frames.
+*/
+ HPI_ADAPTER_PROPERTY_INTERVAL = 267,
+/** Adapter capabilities 1
+Property 1 - adapter can do multichannel (SSX1)
+Property 2 - adapter can do stream grouping (supports SSX2)
+*/
+ HPI_ADAPTER_PROPERTY_CAPS1 = 268,
+/** Adapter capabilities 2
+Property 1 - adapter can do samplerate conversion (MRX)
+Property 2 - adapter can do timestretch (TSX)
+*/
+ HPI_ADAPTER_PROPERTY_CAPS2 = 269
+};
+
+/** Adapter mode commands
+
+Used in wQueryOrSet field of HPI_AdapterSetModeEx().
+\ingroup adapter
+*/
+enum HPI_ADAPTER_MODE_CMDS {
+ HPI_ADAPTER_MODE_SET = 0,
+ HPI_ADAPTER_MODE_QUERY = 1
+};
+
+/** Adapter Modes
+ These are used by HPI_AdapterSetModeEx()
+
+\warning - more than 16 possible modes breaks
+a bitmask in the Windows WAVE DLL
+\ingroup adapter
+*/
+enum HPI_ADAPTER_MODES {
+/** 4 outstream mode.
+- ASI6114: 1 instream
+- ASI6044: 4 instreams
+- ASI6012: 1 instream
+- ASI6102: no instreams
+- ASI6022, ASI6122: 2 instreams
+- ASI5111, ASI5101: 2 instreams
+- ASI652x, ASI662x: 2 instreams
+- ASI654x, ASI664x: 4 instreams
+*/
+ HPI_ADAPTER_MODE_4OSTREAM = 1,
+
+/** 6 outstream mode.
+- ASI6012: 1 instream,
+- ASI6022, ASI6122: 2 instreams
+- ASI652x, ASI662x: 4 instreams
+*/
+ HPI_ADAPTER_MODE_6OSTREAM = 2,
+
+/** 8 outstream mode.
+- ASI6114: 8 instreams
+- ASI6118: 8 instreams
+- ASI6585: 8 instreams
+*/
+ HPI_ADAPTER_MODE_8OSTREAM = 3,
+
+/** 16 outstream mode.
+- ASI6416 16 instreams
+- ASI6518, ASI6618 16 instreams
+- ASI6118 16 mono out and in streams
+*/
+ HPI_ADAPTER_MODE_16OSTREAM = 4,
+
+/** one outstream mode.
+- ASI5111 1 outstream, 1 instream
+*/
+ HPI_ADAPTER_MODE_1OSTREAM = 5,
+
+/** ASI504X mode 1. 12 outstream, 4 instream 0 to 48kHz sample rates
+ (see ASI504X datasheet for more info).
+*/
+ HPI_ADAPTER_MODE_1 = 6,
+
+/** ASI504X mode 2. 4 outstreams, 4 instreams at 0 to 192kHz sample rates
+ (see ASI504X datasheet for more info).
+*/
+ HPI_ADAPTER_MODE_2 = 7,
+
+/** ASI504X mode 3. 4 outstreams, 4 instreams at 0 to 192kHz sample rates
+ (see ASI504X datasheet for more info).
+*/
+ HPI_ADAPTER_MODE_3 = 8,
+
+/** ASI504X multichannel mode.
+ 2 outstreams -> 4 line outs = 1 to 8 channel streams),
+ 4 lineins -> 1 instream (1 to 8 channel streams) at 0-48kHz.
+ For more info see the SSX Specification.
+*/
+ HPI_ADAPTER_MODE_MULTICHANNEL = 9,
+
+/** 12 outstream mode.
+- ASI6514, ASI6614: 2 instreams
+- ASI6540,ASI6544: 8 instreams
+- ASI6640,ASI6644: 8 instreams
+*/
+ HPI_ADAPTER_MODE_12OSTREAM = 10,
+
+/** 9 outstream mode.
+- ASI6044: 8 instreams
+*/
+ HPI_ADAPTER_MODE_9OSTREAM = 11,
+
+/** mono mode.
+- ASI6416: 16 outstreams/instreams
+- ASI5402: 2 outstreams/instreams
+*/
+ HPI_ADAPTER_MODE_MONO = 12,
+
+/** Low latency mode.
+- ASI6416/ASI6316: 1 16 channel outstream and instream
+*/
+ HPI_ADAPTER_MODE_LOW_LATENCY = 13
+};
+
+/* Note, adapters can have more than one capability -
+encoding as bitfield is recommended. */
+#define HPI_CAPABILITY_NONE (0)
+#define HPI_CAPABILITY_MPEG_LAYER3 (1)
+
+/* Set this equal to maximum capability index,
+Must not be greater than 32 - see axnvdef.h */
+#define HPI_CAPABILITY_MAX 1
+/* #define HPI_CAPABILITY_AAC 2 */
+
+/******************************************* STREAM ATTRIBUTES ****/
+
+/** MPEG Ancillary Data modes
+
+The mode for the ancillary data insertion or extraction to operate in.
+\ingroup stream
+*/
+enum HPI_MPEG_ANC_MODES {
+ /** the MPEG frames have energy information stored in them (5 bytes per stereo frame, 3 per mono) */
+ HPI_MPEG_ANC_HASENERGY = 0,
+ /** the entire ancillary data field is taken up by data from the Anc data buffer
+ On encode, the encoder will insert the energy bytes before filling the remainder
+ of the ancillary data space with data from the ancillary data buffer.
+ */
+ HPI_MPEG_ANC_RAW = 1
+};
+
+/** Ancillary Data Alignment
+\ingroup instream
+*/
+enum HPI_ISTREAM_MPEG_ANC_ALIGNS {
+ /** data is packed against the end of data, then padded to the end of frame */
+ HPI_MPEG_ANC_ALIGN_LEFT = 0,
+ /** data is packed against the end of the frame */
+ HPI_MPEG_ANC_ALIGN_RIGHT = 1
+};
+
+/** MPEG modes
+MPEG modes - can be used optionally for HPI_FormatCreate()
+parameter dwAttributes.
+
+Using any mode setting other than HPI_MPEG_MODE_DEFAULT
+with single channel format will return an error.
+\ingroup stream
+*/
+enum HPI_MPEG_MODES {
+/** Causes the MPEG-1 Layer II bitstream to be recorded
+in single_channel mode when the number of channels is 1 and in stereo when the
+number of channels is 2. */
+ HPI_MPEG_MODE_DEFAULT = 0,
+ /** Standard stereo without joint-stereo compression */
+ HPI_MPEG_MODE_STEREO = 1,
+ /** Joint stereo */
+ HPI_MPEG_MODE_JOINTSTEREO = 2,
+ /** Left and Right channels are completely independent */
+ HPI_MPEG_MODE_DUALCHANNEL = 3
+};
+/******************************************* MIXER ATTRIBUTES ****/
+
+/* \defgroup mixer_flags Mixer flags for HPI_MIXER_GET_CONTROL_MULTIPLE_VALUES
+{
+*/
+#define HPI_MIXER_GET_CONTROL_MULTIPLE_CHANGED (0)
+#define HPI_MIXER_GET_CONTROL_MULTIPLE_RESET (1)
+/*}*/
+
+/** Commands used by HPI_MixerStore()
+\ingroup mixer
+*/
+enum HPI_MIXER_STORE_COMMAND {
+/** Save all mixer control settings. */
+ HPI_MIXER_STORE_SAVE = 1,
+/** Restore all controls from saved. */
+ HPI_MIXER_STORE_RESTORE = 2,
+/** Delete saved control settings. */
+ HPI_MIXER_STORE_DELETE = 3,
+/** Enable auto storage of some control settings. */
+ HPI_MIXER_STORE_ENABLE = 4,
+/** Disable auto storage of some control settings. */
+ HPI_MIXER_STORE_DISABLE = 5,
+/** Save the attributes of a single control. */
+ HPI_MIXER_STORE_SAVE_SINGLE = 6
+};
+
+/************************************* CONTROL ATTRIBUTE VALUES ****/
+/** Used by mixer plugin enable functions
+
+E.g. HPI_ParametricEQ_SetState()
+\ingroup mixer
+*/
+enum HPI_SWITCH_STATES {
+ HPI_SWITCH_OFF = 0, /**< turn the mixer plugin on. */
+ HPI_SWITCH_ON = 1 /**< turn the mixer plugin off. */
+};
+
+/* Volume control special gain values */
+/** volumes units are 100ths of a dB
+\ingroup volume
+*/
+#define HPI_UNITS_PER_dB 100
+/** turns volume control OFF or MUTE
+\ingroup volume
+*/
+#define HPI_GAIN_OFF (-100 * HPI_UNITS_PER_dB)
+
+/** value returned for no signal
+\ingroup meter
+*/
+#define HPI_METER_MINIMUM (-150 * HPI_UNITS_PER_dB)
+
+/** autofade profiles
+\ingroup volume
+*/
+enum HPI_VOLUME_AUTOFADES {
+/** log fade - dB attenuation changes linearly over time */
+ HPI_VOLUME_AUTOFADE_LOG = 2,
+/** linear fade - amplitude changes linearly */
+ HPI_VOLUME_AUTOFADE_LINEAR = 3
+};
+
+/** The physical encoding format of the AESEBU I/O.
+
+Used in HPI_AESEBU_Transmitter_SetFormat(), HPI_AESEBU_Receiver_SetFormat()
+along with related Get and Query functions
+\ingroup aestx
+*/
+enum HPI_AESEBU_FORMATS {
+/** AES/EBU physical format - AES/EBU balanced "professional" */
+ HPI_AESEBU_FORMAT_AESEBU = 1,
+/** AES/EBU physical format - S/PDIF unbalanced "consumer" */
+ HPI_AESEBU_FORMAT_SPDIF = 2
+};
+
+/** AES/EBU error status bits
+
+Returned by HPI_AESEBU_Receiver_GetErrorStatus()
+\ingroup aesrx
+*/
+enum HPI_AESEBU_ERRORS {
+/** bit0: 1 when PLL is not locked */
+ HPI_AESEBU_ERROR_NOT_LOCKED = 0x01,
+/** bit1: 1 when signal quality is poor */
+ HPI_AESEBU_ERROR_POOR_QUALITY = 0x02,
+/** bit2: 1 when there is a parity error */
+ HPI_AESEBU_ERROR_PARITY_ERROR = 0x04,
+/** bit3: 1 when there is a bi-phase coding violation */
+ HPI_AESEBU_ERROR_BIPHASE_VIOLATION = 0x08,
+/** bit4: 1 when the validity bit is high */
+ HPI_AESEBU_ERROR_VALIDITY = 0x10,
+/** bit5: 1 when the CRC error bit is high */
+ HPI_AESEBU_ERROR_CRC = 0x20
+};
+
+/** \addtogroup pad
+\{
+*/
+/** The text string containing the station/channel combination. */
+#define HPI_PAD_CHANNEL_NAME_LEN 16
+/** The text string containing the artist. */
+#define HPI_PAD_ARTIST_LEN 64
+/** The text string containing the title. */
+#define HPI_PAD_TITLE_LEN 64
+/** The text string containing the comment. */
+#define HPI_PAD_COMMENT_LEN 256
+/** The PTY when the tuner has not recieved any PTY. */
+#define HPI_PAD_PROGRAM_TYPE_INVALID 0xffff
+/** \} */
+
+/** Data types for PTY string translation.
+\ingroup rds
+*/
+enum eHPI_RDS_type {
+ HPI_RDS_DATATYPE_RDS = 0, /**< RDS bitstream.*/
+ HPI_RDS_DATATYPE_RBDS = 1 /**< RBDS bitstream.*/
+};
+
+/** Tuner bands
+
+Used for HPI_Tuner_SetBand(),HPI_Tuner_GetBand()
+\ingroup tuner
+*/
+enum HPI_TUNER_BAND {
+ HPI_TUNER_BAND_AM = 1, /**< AM band */
+ HPI_TUNER_BAND_FM = 2, /**< FM band (mono) */
+ HPI_TUNER_BAND_TV_NTSC_M = 3, /**< NTSC-M TV band*/
+ HPI_TUNER_BAND_TV = 3, /* use TV_NTSC_M */
+ HPI_TUNER_BAND_FM_STEREO = 4, /**< FM band (stereo) */
+ HPI_TUNER_BAND_AUX = 5, /**< auxiliary input */
+ HPI_TUNER_BAND_TV_PAL_BG = 6, /**< PAL-B/G TV band*/
+ HPI_TUNER_BAND_TV_PAL_I = 7, /**< PAL-I TV band*/
+ HPI_TUNER_BAND_TV_PAL_DK = 8, /**< PAL-D/K TV band*/
+ HPI_TUNER_BAND_TV_SECAM_L = 9, /**< SECAM-L TV band*/
+ HPI_TUNER_BAND_LAST = 9 /**< the index of the last tuner band. */
+};
+
+/** Tuner mode attributes
+
+Used by HPI_Tuner_SetMode(), HPI_Tuner_GetMode()
+\ingroup tuner
+
+*/
+enum HPI_TUNER_MODES {
+ HPI_TUNER_MODE_RSS = 1, /**< control RSS */
+ HPI_TUNER_MODE_RDS = 2 /**< control RBDS/RDS */
+};
+
+/** Tuner mode attribute values
+
+Used by HPI_Tuner_SetMode(), HPI_Tuner_GetMode()
+\ingroup tuner
+*/
+enum HPI_TUNER_MODE_VALUES {
+/* RSS attribute values */
+ HPI_TUNER_MODE_RSS_DISABLE = 0, /**< RSS disable */
+ HPI_TUNER_MODE_RSS_ENABLE = 1, /**< RSS enable */
+
+/* RDS mode attributes */
+ HPI_TUNER_MODE_RDS_DISABLE = 0, /**< RDS - disabled */
+ HPI_TUNER_MODE_RDS_RDS = 1, /**< RDS - RDS mode */
+ HPI_TUNER_MODE_RDS_RBDS = 2 /**< RDS - RBDS mode */
+};
+
+/** Tuner Level settings
+\ingroup tuner
+*/
+enum HPI_TUNER_LEVEL {
+ HPI_TUNER_LEVEL_AVERAGE = 0,
+ HPI_TUNER_LEVEL_RAW = 1
+};
+
+/** Tuner Status Bits
+
+These bitfield values are returned by a call to HPI_Tuner_GetStatus().
+Multiple fields are returned from a single call.
+\ingroup tuner
+*/
+enum HPI_TUNER_STATUS_BITS {
+ HPI_TUNER_VIDEO_COLOR_PRESENT = 0x0001, /**< video color is present. */
+ HPI_TUNER_VIDEO_IS_60HZ = 0x0020, /**< 60 hz video detected. */
+ HPI_TUNER_VIDEO_HORZ_SYNC_MISSING = 0x0040, /**< video HSYNC is missing. */
+ HPI_TUNER_VIDEO_STATUS_VALID = 0x0100, /**< video status is valid. */
+ HPI_TUNER_PLL_LOCKED = 0x1000, /**< the tuner's PLL is locked. */
+ HPI_TUNER_FM_STEREO = 0x2000, /**< tuner reports back FM stereo. */
+ HPI_TUNER_DIGITAL = 0x0200, /**< tuner reports digital programming. */
+ HPI_TUNER_MULTIPROGRAM = 0x0400 /**< tuner reports multiple programs. */
+};
+
+/** Channel Modes
+Used for HPI_ChannelModeSet/Get()
+\ingroup channelmode
+*/
+enum HPI_CHANNEL_MODES {
+/** Left channel out = left channel in, Right channel out = right channel in. */
+ HPI_CHANNEL_MODE_NORMAL = 1,
+/** Left channel out = right channel in, Right channel out = left channel in. */
+ HPI_CHANNEL_MODE_SWAP = 2,
+/** Left channel out = left channel in, Right channel out = left channel in. */
+ HPI_CHANNEL_MODE_LEFT_TO_STEREO = 3,
+/** Left channel out = right channel in, Right channel out = right channel in.*/
+ HPI_CHANNEL_MODE_RIGHT_TO_STEREO = 4,
+/** Left channel out = (left channel in + right channel in)/2,
+ Right channel out = mute. */
+ HPI_CHANNEL_MODE_STEREO_TO_LEFT = 5,
+/** Left channel out = mute,
+ Right channel out = (right channel in + left channel in)/2. */
+ HPI_CHANNEL_MODE_STEREO_TO_RIGHT = 6,
+ HPI_CHANNEL_MODE_LAST = 6
+};
+
+/** SampleClock source values
+\ingroup sampleclock
+*/
+enum HPI_SAMPLECLOCK_SOURCES {
+/** The sampleclock output is derived from its local samplerate generator.
+ The local samplerate may be set using HPI_SampleClock_SetLocalRate(). */
+ HPI_SAMPLECLOCK_SOURCE_LOCAL = 1,
+/** \deprecated Use HPI_SAMPLECLOCK_SOURCE_LOCAL instead */
+ HPI_SAMPLECLOCK_SOURCE_ADAPTER = 1,
+/** The adapter is clocked from a dedicated AES/EBU SampleClock input.*/
+ HPI_SAMPLECLOCK_SOURCE_AESEBU_SYNC = 2,
+/** From external wordclock connector */
+ HPI_SAMPLECLOCK_SOURCE_WORD = 3,
+/** Board-to-board header */
+ HPI_SAMPLECLOCK_SOURCE_WORD_HEADER = 4,
+/** FUTURE - SMPTE clock. */
+ HPI_SAMPLECLOCK_SOURCE_SMPTE = 5,
+/** One of the aesebu inputs */
+ HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT = 6,
+/** \deprecated The first aesebu input with a valid signal
+Superseded by separate Auto enable flag
+*/
+ HPI_SAMPLECLOCK_SOURCE_AESEBU_AUTO = 7,
+/** From a network interface e.g. Cobranet or Livewire at either 48 or 96kHz */
+ HPI_SAMPLECLOCK_SOURCE_NETWORK = 8,
+/** From previous adjacent module (ASI2416 only)*/
+ HPI_SAMPLECLOCK_SOURCE_PREV_MODULE = 10,
+/*! Update this if you add a new clock source.*/
+ HPI_SAMPLECLOCK_SOURCE_LAST = 10
+};
+
+/** Equalizer filter types. Used by HPI_ParametricEQ_SetBand()
+\ingroup parmeq
+*/
+enum HPI_FILTER_TYPE {
+ HPI_FILTER_TYPE_BYPASS = 0, /**< filter is turned off */
+
+ HPI_FILTER_TYPE_LOWSHELF = 1, /**< EQ low shelf */
+ HPI_FILTER_TYPE_HIGHSHELF = 2, /**< EQ high shelf */
+ HPI_FILTER_TYPE_EQ_BAND = 3, /**< EQ gain */
+
+ HPI_FILTER_TYPE_LOWPASS = 4, /**< standard low pass */
+ HPI_FILTER_TYPE_HIGHPASS = 5, /**< standard high pass */
+ HPI_FILTER_TYPE_BANDPASS = 6, /**< standard band pass */
+ HPI_FILTER_TYPE_BANDSTOP = 7 /**< standard band stop/notch */
+};
+
+/** Async Event sources
+\ingroup async
+*/
+enum ASYNC_EVENT_SOURCES {
+ HPI_ASYNC_EVENT_GPIO = 1, /**< GPIO event. */
+ HPI_ASYNC_EVENT_SILENCE = 2, /**< silence event detected. */
+ HPI_ASYNC_EVENT_TONE = 3 /**< tone event detected. */
+};
+/*******************************************/
+/** HPI Error codes
+
+Almost all HPI functions return an error code
+A return value of zero means there was no error.
+Otherwise one of these error codes is returned.
+Error codes can be converted to a descriptive string using HPI_GetErrorText()
+
+\note When a new error code is added HPI_GetErrorText() MUST be updated.
+\note Codes 1-100 are reserved for driver use
+\ingroup utility
+*/
+enum HPI_ERROR_CODES {
+ /** Message type does not exist. */
+ HPI_ERROR_INVALID_TYPE = 100,
+ /** Object type does not exist. */
+ HPI_ERROR_INVALID_OBJ = 101,
+ /** Function does not exist. */
+ HPI_ERROR_INVALID_FUNC = 102,
+ /** The specified object (adapter/Stream) does not exist. */
+ HPI_ERROR_INVALID_OBJ_INDEX = 103,
+ /** Trying to access an object that has not been opened yet. */
+ HPI_ERROR_OBJ_NOT_OPEN = 104,
+ /** Trying to open an already open object. */
+ HPI_ERROR_OBJ_ALREADY_OPEN = 105,
+ /** PCI, ISA resource not valid. */
+ HPI_ERROR_INVALID_RESOURCE = 106,
+ /** GetInfo call from SubSysFindAdapters failed. */
+ HPI_ERROR_SUBSYSFINDADAPTERS_GETINFO = 107,
+ /** Default response was never updated with actual error code. */
+ HPI_ERROR_INVALID_RESPONSE = 108,
+ /** wSize field of response was not updated,
+ indicating that the message was not processed. */
+ HPI_ERROR_PROCESSING_MESSAGE = 109,
+ /** The network did not respond in a timely manner. */
+ HPI_ERROR_NETWORK_TIMEOUT = 110,
+ /** An HPI handle is invalid (uninitialised?). */
+ HPI_ERROR_INVALID_HANDLE = 111,
+ /** A function or attribute has not been implemented yet. */
+ HPI_ERROR_UNIMPLEMENTED = 112,
+ /** There are too many clients attempting to access a network resource. */
+ HPI_ERROR_NETWORK_TOO_MANY_CLIENTS = 113,
+ /** Response buffer passed to HPI_Message was smaller than returned response */
+ HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL = 114,
+ /** The returned response did not match the sent message */
+ HPI_ERROR_RESPONSE_MISMATCH = 115,
+
+ /** Too many adapters.*/
+ HPI_ERROR_TOO_MANY_ADAPTERS = 200,
+ /** Bad adpater. */
+ HPI_ERROR_BAD_ADAPTER = 201,
+ /** Adapter number out of range or not set properly. */
+ HPI_ERROR_BAD_ADAPTER_NUMBER = 202,
+ /** 2 adapters with the same adapter number. */
+ HPI_DUPLICATE_ADAPTER_NUMBER = 203,
+ /** DSP code failed to bootload. */
+ HPI_ERROR_DSP_BOOTLOAD = 204,
+ /** Adapter failed DSP code self test. */
+ HPI_ERROR_DSP_SELFTEST = 205,
+ /** Couldn't find or open the DSP code file. */
+ HPI_ERROR_DSP_FILE_NOT_FOUND = 206,
+ /** Internal DSP hardware error. */
+ HPI_ERROR_DSP_HARDWARE = 207,
+ /** Could not allocate memory in DOS. */
+ HPI_ERROR_DOS_MEMORY_ALLOC = 208,
+ /** Could not allocate memory */
+ HPI_ERROR_MEMORY_ALLOC = 208,
+ /** Failed to correctly load/config PLD .*/
+ HPI_ERROR_PLD_LOAD = 209,
+ /** Unexpected end of file, block length too big etc. */
+ HPI_ERROR_DSP_FILE_FORMAT = 210,
+
+ /** Found but could not open DSP code file. */
+ HPI_ERROR_DSP_FILE_ACCESS_DENIED = 211,
+ /** First DSP code section header not found in DSP file. */
+ HPI_ERROR_DSP_FILE_NO_HEADER = 212,
+ /** File read operation on DSP code file failed. */
+ HPI_ERROR_DSP_FILE_READ_ERROR = 213,
+ /** DSP code for adapter family not found. */
+ HPI_ERROR_DSP_SECTION_NOT_FOUND = 214,
+ /** Other OS specific error opening DSP file. */
+ HPI_ERROR_DSP_FILE_OTHER_ERROR = 215,
+ /** Sharing violation opening DSP code file. */
+ HPI_ERROR_DSP_FILE_SHARING_VIOLATION = 216,
+ /** DSP code section header had size == 0. */
+ HPI_ERROR_DSP_FILE_NULL_HEADER = 217,
+
+ /** Base number for flash errors. */
+ HPI_ERROR_FLASH = 220,
+
+ /** Flash has bad checksum */
+ HPI_ERROR_BAD_CHECKSUM = (HPI_ERROR_FLASH + 1),
+ HPI_ERROR_BAD_SEQUENCE = (HPI_ERROR_FLASH + 2),
+ HPI_ERROR_FLASH_ERASE = (HPI_ERROR_FLASH + 3),
+ HPI_ERROR_FLASH_PROGRAM = (HPI_ERROR_FLASH + 4),
+ HPI_ERROR_FLASH_VERIFY = (HPI_ERROR_FLASH + 5),
+ HPI_ERROR_FLASH_TYPE = (HPI_ERROR_FLASH + 6),
+ HPI_ERROR_FLASH_START = (HPI_ERROR_FLASH + 7),
+
+ /** Reserved for OEMs. */
+ HPI_ERROR_RESERVED_1 = 290,
+
+ /** Stream does not exist. */
+ HPI_ERROR_INVALID_STREAM = 300,
+ /** Invalid compression format. */
+ HPI_ERROR_INVALID_FORMAT = 301,
+ /** Invalid format samplerate */
+ HPI_ERROR_INVALID_SAMPLERATE = 302,
+ /** Invalid format number of channels. */
+ HPI_ERROR_INVALID_CHANNELS = 303,
+ /** Invalid format bitrate. */
+ HPI_ERROR_INVALID_BITRATE = 304,
+ /** Invalid datasize used for stream read/write. */
+ HPI_ERROR_INVALID_DATASIZE = 305,
+ /** Stream buffer is full during stream write. */
+ HPI_ERROR_BUFFER_FULL = 306,
+ /** Stream buffer is empty during stream read. */
+ HPI_ERROR_BUFFER_EMPTY = 307,
+ /** Invalid datasize used for stream read/write. */
+ HPI_ERROR_INVALID_DATA_TRANSFER = 308,
+ /** Packet ordering error for stream read/write. */
+ HPI_ERROR_INVALID_PACKET_ORDER = 309,
+
+ /** Object can't do requested operation in its current
+ state, eg set format, change rec mux state while recording.*/
+ HPI_ERROR_INVALID_OPERATION = 310,
+
+ /** Where an SRG is shared amongst streams, an incompatible samplerate is one
+ that is different to any currently playing or recording stream. */
+ HPI_ERROR_INCOMPATIBLE_SAMPLERATE = 311,
+ /** Adapter mode is illegal.*/
+ HPI_ERROR_BAD_ADAPTER_MODE = 312,
+
+ /** There have been too many attempts to set the adapter's
+ capabilities (using bad keys), the card should be returned
+ to ASI if further capabilities updates are required */
+ HPI_ERROR_TOO_MANY_CAPABILITY_CHANGE_ATTEMPTS = 313,
+ /** Streams on different adapters cannot be grouped. */
+ HPI_ERROR_NO_INTERADAPTER_GROUPS = 314,
+ /** Streams on different DSPs cannot be grouped. */
+ HPI_ERROR_NO_INTERDSP_GROUPS = 315,
+
+ /** Invalid mixer node for this adapter. */
+ HPI_ERROR_INVALID_NODE = 400,
+ /** Invalid control. */
+ HPI_ERROR_INVALID_CONTROL = 401,
+ /** Invalid control value was passed. */
+ HPI_ERROR_INVALID_CONTROL_VALUE = 402,
+ /** Control attribute not supported by this control. */
+ HPI_ERROR_INVALID_CONTROL_ATTRIBUTE = 403,
+ /** Control is disabled. */
+ HPI_ERROR_CONTROL_DISABLED = 404,
+ /** I2C transaction failed due to a missing ACK. */
+ HPI_ERROR_CONTROL_I2C_MISSING_ACK = 405,
+ /** Control attribute is valid, but not supported by this hardware. */
+ HPI_ERROR_UNSUPPORTED_CONTROL_ATTRIBUTE = 406,
+ /** Control is busy, or coming out of
+ reset and cannot be accessed at this time. */
+ HPI_ERROR_CONTROL_NOT_READY = 407,
+
+ /** Non volatile memory */
+ HPI_ERROR_NVMEM_BUSY = 450,
+ HPI_ERROR_NVMEM_FULL = 451,
+ HPI_ERROR_NVMEM_FAIL = 452,
+
+ /** I2C */
+ HPI_ERROR_I2C_MISSING_ACK = HPI_ERROR_CONTROL_I2C_MISSING_ACK,
+ HPI_ERROR_I2C_BAD_ADR = 460,
+
+ /** Entity errors */
+ HPI_ERROR_ENTITY_TYPE_MISMATCH = 470,
+ HPI_ERROR_ENTITY_ITEM_COUNT = 471,
+ HPI_ERROR_ENTITY_TYPE_INVALID = 472,
+ HPI_ERROR_ENTITY_ROLE_INVALID = 473,
+
+ /* AES18 specific errors were 500..507 */
+
+ /** custom error to use for debugging */
+ HPI_ERROR_CUSTOM = 600,
+
+ /** hpioct32.c can't obtain mutex */
+ HPI_ERROR_MUTEX_TIMEOUT = 700,
+
+ /** errors from HPI backends have values >= this */
+ HPI_ERROR_BACKEND_BASE = 900,
+
+ /** indicates a cached u16 value is invalid. */
+ HPI_ERROR_ILLEGAL_CACHE_VALUE = 0xffff
+};
+
+/** \defgroup maximums HPI maximum values
+\{
+*/
+/** Maximum number of adapters per HPI sub-system
+ WARNING: modifying this value changes the response structure size.*/
+#define HPI_MAX_ADAPTERS 20
+/** Maximum number of in or out streams per adapter */
+#define HPI_MAX_STREAMS 16
+#define HPI_MAX_CHANNELS 2 /* per stream */
+#define HPI_MAX_NODES 8 /* per mixer ? */
+#define HPI_MAX_CONTROLS 4 /* per node ? */
+/** maximum number of ancillary bytes per MPEG frame */
+#define HPI_MAX_ANC_BYTES_PER_FRAME (64)
+#define HPI_STRING_LEN 16
+
+/** Velocity units */
+#define HPI_OSTREAM_VELOCITY_UNITS 4096
+/** OutStream timescale units */
+#define HPI_OSTREAM_TIMESCALE_UNITS 10000
+/** OutStream timescale passthrough - turns timescaling on in passthough mode */
+#define HPI_OSTREAM_TIMESCALE_PASSTHROUGH 99999
+
+/**\}*/
+
+/* ////////////////////////////////////////////////////////////////////// */
+/* STRUCTURES */
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(push, 1)
+#endif
+
+/** Structure containing sample format information.
+ See also HPI_FormatCreate().
+ */
+struct hpi_format {
+ u32 sample_rate;
+ /**< 11025, 32000, 44100 ... */
+ u32 bit_rate; /**< for MPEG */
+ u32 attributes;
+ /**< Stereo/JointStereo/Mono */
+ u16 mode_legacy;
+ /**< Legacy ancillary mode or idle bit */
+ u16 unused; /**< unused */
+ u16 channels; /**< 1,2..., (or ancillary mode or idle bit */
+ u16 format; /**< HPI_FORMAT_PCM16, _MPEG etc. see #HPI_FORMATS. */
+};
+
+struct hpi_anc_frame {
+ u32 valid_bits_in_this_frame;
+ u8 b_data[HPI_MAX_ANC_BYTES_PER_FRAME];
+};
+
+/** An object for containing a single async event.
+*/
+struct hpi_async_event {
+ u16 event_type; /**< type of event. \sa async_event */
+ u16 sequence; /**< sequence number, allows lost event detection */
+ u32 state; /**< new state */
+ u32 h_object; /**< handle to the object returning the event. */
+ union {
+ struct {
+ u16 index; /**< GPIO bit index. */
+ } gpio;
+ struct {
+ u16 node_index; /**< what node is the control on ? */
+ u16 node_type; /**< what type of node is the control on ? */
+ } control;
+ } u;
+};
+
+/*/////////////////////////////////////////////////////////////////////////// */
+/* Public HPI Entity related definitions */
+
+struct hpi_entity;
+
+enum e_entity_type {
+ entity_type_null,
+ entity_type_sequence, /* sequence of potentially heterogeneous TLV entities */
+
+ entity_type_reference, /* refers to a TLV entity or NULL */
+
+ entity_type_int, /* 32 bit */
+ entity_type_float, /* ieee754 binary 32 bit encoding */
+ entity_type_double,
+
+ entity_type_cstring,
+ entity_type_octet,
+ entity_type_ip4_address,
+ entity_type_ip6_address,
+ entity_type_mac_address,
+
+ LAST_ENTITY_TYPE
+};
+
+enum e_entity_role {
+ entity_role_null,
+ entity_role_value,
+ entity_role_classname,
+
+ entity_role_units,
+ entity_role_flags,
+ entity_role_range,
+
+ entity_role_mapping,
+ entity_role_enum,
+
+ entity_role_instance_of,
+ entity_role_depends_on,
+ entity_role_member_of_group,
+ entity_role_value_constraint,
+ entity_role_parameter_port,
+
+ entity_role_block,
+ entity_role_node_group,
+ entity_role_audio_port,
+ entity_role_clock_port,
+ LAST_ENTITY_ROLE
+};
+
+/* skip host side function declarations for
+ DSP compile and documentation extraction */
+
+struct hpi_hsubsys {
+ int not_really_used;
+};
+
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(pop)
+#endif
+
+/*////////////////////////////////////////////////////////////////////////// */
+/* HPI FUNCTIONS */
+
+/*/////////////////////////// */
+/* DATA and FORMAT and STREAM */
+
+u16 hpi_stream_estimate_buffer_size(struct hpi_format *pF,
+ u32 host_polling_rate_in_milli_seconds, u32 *recommended_buffer_size);
+
+/*/////////// */
+/* SUB SYSTEM */
+struct hpi_hsubsys *hpi_subsys_create(void
+ );
+
+void hpi_subsys_free(const struct hpi_hsubsys *ph_subsys);
+
+u16 hpi_subsys_get_version(const struct hpi_hsubsys *ph_subsys,
+ u32 *pversion);
+
+u16 hpi_subsys_get_version_ex(const struct hpi_hsubsys *ph_subsys,
+ u32 *pversion_ex);
+
+u16 hpi_subsys_get_info(const struct hpi_hsubsys *ph_subsys, u32 *pversion,
+ u16 *pw_num_adapters, u16 aw_adapter_list[], u16 list_length);
+
+u16 hpi_subsys_find_adapters(const struct hpi_hsubsys *ph_subsys,
+ u16 *pw_num_adapters, u16 aw_adapter_list[], u16 list_length);
+
+u16 hpi_subsys_get_num_adapters(const struct hpi_hsubsys *ph_subsys,
+ int *pn_num_adapters);
+
+u16 hpi_subsys_get_adapter(const struct hpi_hsubsys *ph_subsys, int iterator,
+ u32 *padapter_index, u16 *pw_adapter_type);
+
+u16 hpi_subsys_ssx2_bypass(const struct hpi_hsubsys *ph_subsys, u16 bypass);
+
+u16 hpi_subsys_set_host_network_interface(const struct hpi_hsubsys *ph_subsys,
+ const char *sz_interface);
+
+/*///////// */
+/* ADAPTER */
+
+u16 hpi_adapter_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index);
+
+u16 hpi_adapter_close(const struct hpi_hsubsys *ph_subsys, u16 adapter_index);
+
+u16 hpi_adapter_get_info(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 *pw_num_outstreams, u16 *pw_num_instreams,
+ u16 *pw_version, u32 *pserial_number, u16 *pw_adapter_type);
+
+u16 hpi_adapter_get_module_by_index(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 module_index, u16 *pw_num_outputs,
+ u16 *pw_num_inputs, u16 *pw_version, u32 *pserial_number,
+ u16 *pw_module_type, u32 *ph_module);
+
+u16 hpi_adapter_set_mode(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 adapter_mode);
+
+u16 hpi_adapter_set_mode_ex(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 adapter_mode, u16 query_or_set);
+
+u16 hpi_adapter_get_mode(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 *padapter_mode);
+
+u16 hpi_adapter_get_assert(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 *assert_present, char *psz_assert,
+ u16 *pw_line_number);
+
+u16 hpi_adapter_get_assert_ex(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 *assert_present, char *psz_assert,
+ u32 *pline_number, u16 *pw_assert_on_dsp);
+
+u16 hpi_adapter_test_assert(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 assert_id);
+
+u16 hpi_adapter_enable_capability(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 capability, u32 key);
+
+u16 hpi_adapter_self_test(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index);
+
+u16 hpi_adapter_debug_read(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 dsp_address, char *p_bytes, int *count_bytes);
+
+u16 hpi_adapter_set_property(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 property, u16 paramter1, u16 paramter2);
+
+u16 hpi_adapter_get_property(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 property, u16 *pw_paramter1,
+ u16 *pw_paramter2);
+
+u16 hpi_adapter_enumerate_property(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 index, u16 what_to_enumerate,
+ u16 property_index, u32 *psetting);
+
+/*////////////// */
+/* NonVol Memory */
+u16 hpi_nv_memory_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_nv_memory, u16 *pw_size_in_bytes);
+
+u16 hpi_nv_memory_read_byte(const struct hpi_hsubsys *ph_subsys,
+ u32 h_nv_memory, u16 index, u16 *pw_data);
+
+u16 hpi_nv_memory_write_byte(const struct hpi_hsubsys *ph_subsys,
+ u32 h_nv_memory, u16 index, u16 data);
+
+/*////////////// */
+/* Digital I/O */
+u16 hpi_gpio_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_gpio, u16 *pw_number_input_bits, u16 *pw_number_output_bits);
+
+u16 hpi_gpio_read_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
+ u16 bit_index, u16 *pw_bit_data);
+
+u16 hpi_gpio_read_all_bits(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
+ u16 aw_all_bit_data[4]
+ );
+
+u16 hpi_gpio_write_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
+ u16 bit_index, u16 bit_data);
+
+u16 hpi_gpio_write_status(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
+ u16 aw_all_bit_data[4]
+ );
+
+/**********************/
+/* Async Event Object */
+/**********************/
+u16 hpi_async_event_open(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 *ph_async);
+
+u16 hpi_async_event_close(const struct hpi_hsubsys *ph_subsys, u32 h_async);
+
+u16 hpi_async_event_wait(const struct hpi_hsubsys *ph_subsys, u32 h_async,
+ u16 maximum_events, struct hpi_async_event *p_events,
+ u16 *pw_number_returned);
+
+u16 hpi_async_event_get_count(const struct hpi_hsubsys *ph_subsys,
+ u32 h_async, u16 *pw_count);
+
+u16 hpi_async_event_get(const struct hpi_hsubsys *ph_subsys, u32 h_async,
+ u16 maximum_events, struct hpi_async_event *p_events,
+ u16 *pw_number_returned);
+
+/*/////////// */
+/* WATCH-DOG */
+u16 hpi_watchdog_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_watchdog);
+
+u16 hpi_watchdog_set_time(const struct hpi_hsubsys *ph_subsys, u32 h_watchdog,
+ u32 time_millisec);
+
+u16 hpi_watchdog_ping(const struct hpi_hsubsys *ph_subsys, u32 h_watchdog);
+
+/**************/
+/* OUT STREAM */
+/**************/
+u16 hpi_outstream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u16 outstream_index, u32 *ph_outstream);
+
+u16 hpi_outstream_close(const struct hpi_hsubsys *ph_subsys, u32 h_outstream);
+
+u16 hpi_outstream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_to_play,
+ u32 *psamples_played, u32 *pauxiliary_data_to_play);
+
+u16 hpi_outstream_write_buf(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, const u8 *pb_write_buf, u32 bytes_to_write,
+ const struct hpi_format *p_format);
+
+u16 hpi_outstream_start(const struct hpi_hsubsys *ph_subsys, u32 h_outstream);
+
+u16 hpi_outstream_wait_start(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream);
+
+u16 hpi_outstream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_outstream);
+
+u16 hpi_outstream_sinegen(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream);
+
+u16 hpi_outstream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_outstream);
+
+u16 hpi_outstream_query_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, struct hpi_format *p_format);
+
+u16 hpi_outstream_set_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, struct hpi_format *p_format);
+
+u16 hpi_outstream_set_punch_in_out(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 punch_in_sample, u32 punch_out_sample);
+
+u16 hpi_outstream_set_velocity(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, short velocity);
+
+u16 hpi_outstream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u16 mode);
+
+u16 hpi_outstream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 *pframes_available);
+
+u16 hpi_outstream_ancillary_read(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, struct hpi_anc_frame *p_anc_frame_buffer,
+ u32 anc_frame_buffer_size_in_bytes,
+ u32 number_of_ancillary_frames_to_read);
+
+u16 hpi_outstream_set_time_scale(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 time_scaleX10000);
+
+u16 hpi_outstream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 size_in_bytes);
+
+u16 hpi_outstream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream);
+
+u16 hpi_outstream_group_add(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 h_stream);
+
+u16 hpi_outstream_group_get_map(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 *poutstream_map, u32 *pinstream_map);
+
+u16 hpi_outstream_group_reset(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream);
+
+/*////////// */
+/* IN_STREAM */
+u16 hpi_instream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u16 instream_index, u32 *ph_instream);
+
+u16 hpi_instream_close(const struct hpi_hsubsys *ph_subsys, u32 h_instream);
+
+u16 hpi_instream_query_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, const struct hpi_format *p_format);
+
+u16 hpi_instream_set_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, const struct hpi_format *p_format);
+
+u16 hpi_instream_read_buf(const struct hpi_hsubsys *ph_subsys, u32 h_instream,
+ u8 *pb_read_buf, u32 bytes_to_read);
+
+u16 hpi_instream_start(const struct hpi_hsubsys *ph_subsys, u32 h_instream);
+
+u16 hpi_instream_wait_start(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream);
+
+u16 hpi_instream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_instream);
+
+u16 hpi_instream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_instream);
+
+u16 hpi_instream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_recorded,
+ u32 *psamples_recorded, u32 *pauxiliary_data_recorded);
+
+u16 hpi_instream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u16 bytes_per_frame, u16 mode, u16 alignment,
+ u16 idle_bit);
+
+u16 hpi_instream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u32 *pframe_space);
+
+u16 hpi_instream_ancillary_write(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, const struct hpi_anc_frame *p_anc_frame_buffer,
+ u32 anc_frame_buffer_size_in_bytes,
+ u32 number_of_ancillary_frames_to_write);
+
+u16 hpi_instream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u32 size_in_bytes);
+
+u16 hpi_instream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream);
+
+u16 hpi_instream_group_add(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u32 h_stream);
+
+u16 hpi_instream_group_get_map(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u32 *poutstream_map, u32 *pinstream_map);
+
+u16 hpi_instream_group_reset(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream);
+
+/*********/
+/* MIXER */
+/*********/
+u16 hpi_mixer_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_mixer);
+
+u16 hpi_mixer_close(const struct hpi_hsubsys *ph_subsys, u32 h_mixer);
+
+u16 hpi_mixer_get_control(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
+ u16 src_node_type, u16 src_node_type_index, u16 dst_node_type,
+ u16 dst_node_type_index, u16 control_type, u32 *ph_control);
+
+u16 hpi_mixer_get_control_by_index(const struct hpi_hsubsys *ph_subsys,
+ u32 h_mixer, u16 control_index, u16 *pw_src_node_type,
+ u16 *pw_src_node_index, u16 *pw_dst_node_type, u16 *pw_dst_node_index,
+ u16 *pw_control_type, u32 *ph_control);
+
+u16 hpi_mixer_store(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
+ enum HPI_MIXER_STORE_COMMAND command, u16 index);
+/*************************/
+/* mixer CONTROLS */
+/*************************/
+/*************************/
+/* volume control */
+/*************************/
+u16 hpi_volume_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_gain0_01dB[HPI_MAX_CHANNELS]
+ );
+
+u16 hpi_volume_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_gain0_01dB_out[HPI_MAX_CHANNELS]
+ );
+
+#define hpi_volume_get_range hpi_volume_query_range
+u16 hpi_volume_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB);
+
+u16 hpi_volume_query_channels(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_volume, u32 *p_channels);
+
+u16 hpi_volume_auto_fade(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms);
+
+u16 hpi_volume_auto_fade_profile(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS],
+ u32 duration_ms, u16 profile);
+
+/*************************/
+/* level control */
+/*************************/
+u16 hpi_level_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB);
+
+u16 hpi_level_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_gain0_01dB[HPI_MAX_CHANNELS]
+ );
+
+u16 hpi_level_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_gain0_01dB_out[HPI_MAX_CHANNELS]
+ );
+
+/*************************/
+/* meter control */
+/*************************/
+u16 hpi_meter_query_channels(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_meter, u32 *p_channels);
+
+u16 hpi_meter_get_peak(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_peak0_01dB_out[HPI_MAX_CHANNELS]
+ );
+
+u16 hpi_meter_get_rms(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_peak0_01dB_out[HPI_MAX_CHANNELS]
+ );
+
+u16 hpi_meter_set_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 attack, u16 decay);
+
+u16 hpi_meter_set_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 attack, u16 decay);
+
+u16 hpi_meter_get_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *attack, u16 *decay);
+
+u16 hpi_meter_get_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *attack, u16 *decay);
+
+/*************************/
+/* channel mode control */
+/*************************/
+u16 hpi_channel_mode_query_mode(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_mode, const u32 index, u16 *pw_mode);
+
+u16 hpi_channel_mode_set(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 mode);
+
+u16 hpi_channel_mode_get(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 *mode);
+
+/*************************/
+/* Tuner control */
+/*************************/
+u16 hpi_tuner_query_band(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, const u32 index, u16 *pw_band);
+
+u16 hpi_tuner_set_band(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 band);
+
+u16 hpi_tuner_get_band(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 *pw_band);
+
+u16 hpi_tuner_query_frequency(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, const u32 index, const u16 band, u32 *pfreq);
+
+u16 hpi_tuner_set_frequency(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 freq_ink_hz);
+
+u16 hpi_tuner_get_frequency(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pw_freq_ink_hz);
+
+u16 hpi_tuner_getRF_level(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *pw_level);
+
+u16 hpi_tuner_get_rawRF_level(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, short *pw_level);
+
+u16 hpi_tuner_query_gain(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, const u32 index, u16 *pw_gain);
+
+u16 hpi_tuner_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short gain);
+
+u16 hpi_tuner_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *pn_gain);
+
+u16 hpi_tuner_get_status(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 *pw_status_mask, u16 *pw_status);
+
+u16 hpi_tuner_set_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 mode, u32 value);
+
+u16 hpi_tuner_get_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 mode, u32 *pn_value);
+
+u16 hpi_tuner_getRDS(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ char *p_rds_data);
+
+u16 hpi_tuner_query_deemphasis(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, const u32 index, const u16 band, u32 *pdeemphasis);
+
+u16 hpi_tuner_set_deemphasis(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 deemphasis);
+u16 hpi_tuner_get_deemphasis(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pdeemphasis);
+
+u16 hpi_tuner_query_program(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, u32 *pbitmap_program);
+
+u16 hpi_tuner_set_program(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 program);
+
+u16 hpi_tuner_get_program(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 *pprogram);
+
+u16 hpi_tuner_get_hd_radio_dsp_version(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, char *psz_dsp_version, const u32 string_size);
+
+u16 hpi_tuner_get_hd_radio_sdk_version(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, char *psz_sdk_version, const u32 string_size);
+
+u16 hpi_tuner_get_hd_radio_signal_quality(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pquality);
+
+/****************************/
+/* PADs control */
+/****************************/
+
+u16 HPI_PAD__get_channel_name(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, char *psz_string, const u32 string_length);
+
+u16 HPI_PAD__get_artist(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ char *psz_string, const u32 string_length);
+
+u16 HPI_PAD__get_title(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ char *psz_string, const u32 string_length);
+
+u16 HPI_PAD__get_comment(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ char *psz_string, const u32 string_length);
+
+u16 HPI_PAD__get_program_type(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *ppTY);
+
+u16 HPI_PAD__get_rdsPI(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 *ppI);
+
+u16 HPI_PAD__get_program_type_string(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, const u32 data_type, const u32 pTY, char *psz_string,
+ const u32 string_length);
+
+/****************************/
+/* AES/EBU Receiver control */
+/****************************/
+u16 HPI_AESEBU__receiver_query_format(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_aes_rx, const u32 index, u16 *pw_format);
+
+u16 HPI_AESEBU__receiver_set_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 source);
+
+u16 HPI_AESEBU__receiver_get_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_source);
+
+u16 HPI_AESEBU__receiver_get_sample_rate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *psample_rate);
+
+u16 HPI_AESEBU__receiver_get_user_data(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 *pw_data);
+
+u16 HPI_AESEBU__receiver_get_channel_status(const struct hpi_hsubsys
+ *ph_subsys, u32 h_control, u16 index, u16 *pw_data);
+
+u16 HPI_AESEBU__receiver_get_error_status(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_error_data);
+
+/*******************************/
+/* AES/EBU Transmitter control */
+/*******************************/
+u16 HPI_AESEBU__transmitter_set_sample_rate(const struct hpi_hsubsys
+ *ph_subsys, u32 h_control, u32 sample_rate);
+
+u16 HPI_AESEBU__transmitter_set_user_data(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 data);
+
+u16 HPI_AESEBU__transmitter_set_channel_status(const struct hpi_hsubsys
+ *ph_subsys, u32 h_control, u16 index, u16 data);
+
+u16 HPI_AESEBU__transmitter_get_channel_status(const struct hpi_hsubsys
+ *ph_subsys, u32 h_control, u16 index, u16 *pw_data);
+
+u16 HPI_AESEBU__transmitter_query_format(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_aes_tx, const u32 index, u16 *pw_format);
+
+u16 HPI_AESEBU__transmitter_set_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 output_format);
+
+u16 HPI_AESEBU__transmitter_get_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_output_format);
+
+/***********************/
+/* multiplexer control */
+/***********************/
+u16 hpi_multiplexer_set_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 source_node_type, u16 source_node_index);
+
+u16 hpi_multiplexer_get_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *source_node_type, u16 *source_node_index);
+
+u16 hpi_multiplexer_query_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 *source_node_type,
+ u16 *source_node_index);
+
+/***************/
+/* VOX control */
+/***************/
+u16 hpi_vox_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_gain0_01dB);
+
+u16 hpi_vox_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *an_gain0_01dB);
+
+/*********************/
+/* Bitstream control */
+/*********************/
+u16 hpi_bitstream_set_clock_edge(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 edge_type);
+
+u16 hpi_bitstream_set_data_polarity(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 polarity);
+
+u16 hpi_bitstream_get_activity(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_clk_activity, u16 *pw_data_activity);
+
+/***********************/
+/* SampleClock control */
+/***********************/
+
+u16 hpi_sample_clock_query_source(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_clock, const u32 index, u16 *pw_source);
+
+u16 hpi_sample_clock_set_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 source);
+
+u16 hpi_sample_clock_get_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_source);
+
+u16 hpi_sample_clock_query_source_index(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_clock, const u32 index, const u32 source,
+ u16 *pw_source_index);
+
+u16 hpi_sample_clock_set_source_index(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 source_index);
+
+u16 hpi_sample_clock_get_source_index(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_source_index);
+
+u16 hpi_sample_clock_get_sample_rate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *psample_rate);
+
+u16 hpi_sample_clock_query_local_rate(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_clock, const u32 index, u32 *psource);
+
+u16 hpi_sample_clock_set_local_rate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 sample_rate);
+
+u16 hpi_sample_clock_get_local_rate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *psample_rate);
+
+u16 hpi_sample_clock_set_auto(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 enable);
+
+u16 hpi_sample_clock_get_auto(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *penable);
+
+u16 hpi_sample_clock_set_local_rate_lock(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 lock);
+
+u16 hpi_sample_clock_get_local_rate_lock(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *plock);
+
+/***********************/
+/* Microphone control */
+/***********************/
+u16 hpi_microphone_set_phantom_power(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 on_off);
+
+u16 hpi_microphone_get_phantom_power(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_on_off);
+
+/*******************************
+ Parametric Equalizer control
+*******************************/
+u16 hpi_parametricEQ__get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_number_of_bands, u16 *pw_enabled);
+
+u16 hpi_parametricEQ__set_state(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 on_off);
+
+u16 hpi_parametricEQ__set_band(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 type, u32 frequency_hz, short q100,
+ short gain0_01dB);
+
+u16 hpi_parametricEQ__get_band(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 *pn_type, u32 *pfrequency_hz,
+ short *pnQ100, short *pn_gain0_01dB);
+
+u16 hpi_parametricEQ__get_coeffs(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, short coeffs[5]
+ );
+
+/*******************************
+ Compressor Expander control
+*******************************/
+
+u16 hpi_compander_set(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 attack, u16 decay, short ratio100, short threshold0_01dB,
+ short makeup_gain0_01dB);
+
+u16 hpi_compander_get(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 *pw_attack, u16 *pw_decay, short *pw_ratio100,
+ short *pn_threshold0_01dB, short *pn_makeup_gain0_01dB);
+
+/*******************************
+ Cobranet HMI control
+*******************************/
+u16 hpi_cobranet_hmi_write(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 hmi_address, u32 byte_count, u8 *pb_data);
+
+u16 hpi_cobranet_hmi_read(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 hmi_address, u32 max_byte_count, u32 *pbyte_count, u8 *pb_data);
+
+u16 hpi_cobranet_hmi_get_status(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pstatus, u32 *preadable_size,
+ u32 *pwriteable_size);
+
+/*Read the current IP address
+*/
+u16 hpi_cobranet_getI_paddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pi_paddress);
+
+/* Write the current IP address
+*/
+u16 hpi_cobranet_setI_paddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 i_paddress);
+
+/* Read the static IP address
+*/
+u16 hpi_cobranet_get_staticI_paddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pi_paddress);
+
+/* Write the static IP address
+*/
+u16 hpi_cobranet_set_staticI_paddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 i_paddress);
+
+/* Read the MAC address
+*/
+u16 hpi_cobranet_getMA_caddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pmAC_MS_bs, u32 *pmAC_LS_bs);
+
+/*******************************
+ Tone Detector control
+*******************************/
+u16 hpi_tone_detector_get_state(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ u32 *state);
+
+u16 hpi_tone_detector_set_enable(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ u32 enable);
+
+u16 hpi_tone_detector_get_enable(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ u32 *enable);
+
+u16 hpi_tone_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 event_enable);
+
+u16 hpi_tone_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 *event_enable);
+
+u16 hpi_tone_detector_set_threshold(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, int threshold);
+
+u16 hpi_tone_detector_get_threshold(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, int *threshold);
+
+u16 hpi_tone_detector_get_frequency(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 index, u32 *frequency);
+
+/*******************************
+ Silence Detector control
+*******************************/
+u16 hpi_silence_detector_get_state(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 *state);
+
+u16 hpi_silence_detector_set_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 enable);
+
+u16 hpi_silence_detector_get_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 *enable);
+
+u16 hpi_silence_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 event_enable);
+
+u16 hpi_silence_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 *event_enable);
+
+u16 hpi_silence_detector_set_delay(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 delay);
+
+u16 hpi_silence_detector_get_delay(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, u32 *delay);
+
+u16 hpi_silence_detector_set_threshold(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, int threshold);
+
+u16 hpi_silence_detector_get_threshold(const struct hpi_hsubsys *ph_subsys,
+ u32 hC, int *threshold);
+
+/*******************************
+ Universal control
+*******************************/
+u16 hpi_entity_find_next(struct hpi_entity *container_entity,
+ enum e_entity_type type, enum e_entity_role role, int recursive_flag,
+ struct hpi_entity **current_match);
+
+u16 hpi_entity_copy_value_from(struct hpi_entity *entity,
+ enum e_entity_type type, size_t item_count, void *value_dst_p);
+
+u16 hpi_entity_unpack(struct hpi_entity *entity, enum e_entity_type *type,
+ size_t *items, enum e_entity_role *role, void **value);
+
+u16 hpi_entity_alloc_and_pack(const enum e_entity_type type,
+ const size_t item_count, const enum e_entity_role role, void *value,
+ struct hpi_entity **entity);
+
+void hpi_entity_free(struct hpi_entity *entity);
+
+u16 hpi_universal_info(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ struct hpi_entity **info);
+
+u16 hpi_universal_get(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ struct hpi_entity **value);
+
+u16 hpi_universal_set(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ struct hpi_entity *value);
+
+/*/////////// */
+/* DSP CLOCK */
+/*/////////// */
+u16 hpi_clock_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_dsp_clock);
+
+u16 hpi_clock_set_time(const struct hpi_hsubsys *ph_subsys, u32 h_clock,
+ u16 hour, u16 minute, u16 second, u16 milli_second);
+
+u16 hpi_clock_get_time(const struct hpi_hsubsys *ph_subsys, u32 h_clock,
+ u16 *pw_hour, u16 *pw_minute, u16 *pw_second, u16 *pw_milli_second);
+
+/*/////////// */
+/* PROFILE */
+/*/////////// */
+u16 hpi_profile_open_all(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 profile_index, u32 *ph_profile,
+ u16 *pw_max_profiles);
+
+u16 hpi_profile_get(const struct hpi_hsubsys *ph_subsys, u32 h_profile,
+ u16 index, u16 *pw_seconds, u32 *pmicro_seconds, u32 *pcall_count,
+ u32 *pmax_micro_seconds, u32 *pmin_micro_seconds);
+
+u16 hpi_profile_start_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile);
+
+u16 hpi_profile_stop_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile);
+
+u16 hpi_profile_get_name(const struct hpi_hsubsys *ph_subsys, u32 h_profile,
+ u16 index, char *sz_profile_name, u16 profile_name_length);
+
+u16 hpi_profile_get_utilization(const struct hpi_hsubsys *ph_subsys,
+ u32 h_profile, u32 *putilization);
+
+/*//////////////////// */
+/* UTILITY functions */
+
+u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format,
+ u32 sample_rate, u32 bit_rate, u32 attributes);
+
+/* Until it's verified, this function is for Windows OSs only */
+
+#endif /*_H_HPI_ */
+/*
+///////////////////////////////////////////////////////////////////////////////
+// See CVS for history. Last complete set in rev 1.146
+////////////////////////////////////////////////////////////////////////////////
+*/
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
new file mode 100644
index 000000000000..9c50931731a5
--- /dev/null
+++ b/sound/pci/asihpi/hpi6000.c
@@ -0,0 +1,1841 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Hardware Programming Interface (HPI) for AudioScience ASI6200 series adapters.
+ These PCI bus adapters are based on the TI C6711 DSP.
+
+ Exported functions:
+ void HPI_6000(struct hpi_message *phm, struct hpi_response *phr)
+
+ #defines
+ HIDE_PCI_ASSERTS to show the PCI asserts
+ PROFILE_DSP2 get profile data from DSP2 if present (instead of DSP 1)
+
+(C) Copyright AudioScience Inc. 1998-2003
+*******************************************************************************/
+#define SOURCEFILE_NAME "hpi6000.c"
+
+#include "hpi_internal.h"
+#include "hpimsginit.h"
+#include "hpidebug.h"
+#include "hpi6000.h"
+#include "hpidspcd.h"
+#include "hpicmn.h"
+
+#define HPI_HIF_BASE (0x00000200) /* start of C67xx internal RAM */
+#define HPI_HIF_ADDR(member) \
+ (HPI_HIF_BASE + offsetof(struct hpi_hif_6000, member))
+#define HPI_HIF_ERROR_MASK 0x4000
+
+/* HPI6000 specific error codes */
+
+#define HPI6000_ERROR_BASE 900
+#define HPI6000_ERROR_MSG_RESP_IDLE_TIMEOUT 901
+#define HPI6000_ERROR_MSG_RESP_SEND_MSG_ACK 902
+#define HPI6000_ERROR_MSG_RESP_GET_RESP_ACK 903
+#define HPI6000_ERROR_MSG_GET_ADR 904
+#define HPI6000_ERROR_RESP_GET_ADR 905
+#define HPI6000_ERROR_MSG_RESP_BLOCKWRITE32 906
+#define HPI6000_ERROR_MSG_RESP_BLOCKREAD32 907
+#define HPI6000_ERROR_MSG_INVALID_DSP_INDEX 908
+#define HPI6000_ERROR_CONTROL_CACHE_PARAMS 909
+
+#define HPI6000_ERROR_SEND_DATA_IDLE_TIMEOUT 911
+#define HPI6000_ERROR_SEND_DATA_ACK 912
+#define HPI6000_ERROR_SEND_DATA_ADR 913
+#define HPI6000_ERROR_SEND_DATA_TIMEOUT 914
+#define HPI6000_ERROR_SEND_DATA_CMD 915
+#define HPI6000_ERROR_SEND_DATA_WRITE 916
+#define HPI6000_ERROR_SEND_DATA_IDLECMD 917
+#define HPI6000_ERROR_SEND_DATA_VERIFY 918
+
+#define HPI6000_ERROR_GET_DATA_IDLE_TIMEOUT 921
+#define HPI6000_ERROR_GET_DATA_ACK 922
+#define HPI6000_ERROR_GET_DATA_CMD 923
+#define HPI6000_ERROR_GET_DATA_READ 924
+#define HPI6000_ERROR_GET_DATA_IDLECMD 925
+
+#define HPI6000_ERROR_CONTROL_CACHE_ADDRLEN 951
+#define HPI6000_ERROR_CONTROL_CACHE_READ 952
+#define HPI6000_ERROR_CONTROL_CACHE_FLUSH 953
+
+#define HPI6000_ERROR_MSG_RESP_GETRESPCMD 961
+#define HPI6000_ERROR_MSG_RESP_IDLECMD 962
+#define HPI6000_ERROR_MSG_RESP_BLOCKVERIFY32 963
+
+/* adapter init errors */
+#define HPI6000_ERROR_UNHANDLED_SUBSYS_ID 930
+
+/* can't access PCI2040 */
+#define HPI6000_ERROR_INIT_PCI2040 931
+/* can't access DSP HPI i/f */
+#define HPI6000_ERROR_INIT_DSPHPI 932
+/* can't access internal DSP memory */
+#define HPI6000_ERROR_INIT_DSPINTMEM 933
+/* can't access SDRAM - test#1 */
+#define HPI6000_ERROR_INIT_SDRAM1 934
+/* can't access SDRAM - test#2 */
+#define HPI6000_ERROR_INIT_SDRAM2 935
+
+#define HPI6000_ERROR_INIT_VERIFY 938
+
+#define HPI6000_ERROR_INIT_NOACK 939
+
+#define HPI6000_ERROR_INIT_PLDTEST1 941
+#define HPI6000_ERROR_INIT_PLDTEST2 942
+
+/* local defines */
+
+#define HIDE_PCI_ASSERTS
+#define PROFILE_DSP2
+
+/* for PCI2040 i/f chip */
+/* HPI CSR registers */
+/* word offsets from CSR base */
+/* use when io addresses defined as u32 * */
+
+#define INTERRUPT_EVENT_SET 0
+#define INTERRUPT_EVENT_CLEAR 1
+#define INTERRUPT_MASK_SET 2
+#define INTERRUPT_MASK_CLEAR 3
+#define HPI_ERROR_REPORT 4
+#define HPI_RESET 5
+#define HPI_DATA_WIDTH 6
+
+#define MAX_DSPS 2
+/* HPI registers, spaced 8K bytes = 2K words apart */
+#define DSP_SPACING 0x800
+
+#define CONTROL 0x0000
+#define ADDRESS 0x0200
+#define DATA_AUTOINC 0x0400
+#define DATA 0x0600
+
+#define TIMEOUT 500000
+
+struct dsp_obj {
+ __iomem u32 *prHPI_control;
+ __iomem u32 *prHPI_address;
+ __iomem u32 *prHPI_data;
+ __iomem u32 *prHPI_data_auto_inc;
+ char c_dsp_rev; /*A, B */
+ u32 control_cache_address_on_dsp;
+ u32 control_cache_length_on_dsp;
+ struct hpi_adapter_obj *pa_parent_adapter;
+};
+
+struct hpi_hw_obj {
+ __iomem u32 *dw2040_HPICSR;
+ __iomem u32 *dw2040_HPIDSP;
+
+ u16 num_dsp;
+ struct dsp_obj ado[MAX_DSPS];
+
+ u32 message_buffer_address_on_dsp;
+ u32 response_buffer_address_on_dsp;
+ u32 pCI2040HPI_error_count;
+
+ struct hpi_control_cache_single control_cache[HPI_NMIXER_CONTROLS];
+ struct hpi_control_cache *p_cache;
+};
+
+static u16 hpi6000_dsp_block_write32(struct hpi_adapter_obj *pao,
+ u16 dsp_index, u32 hpi_address, u32 *source, u32 count);
+static u16 hpi6000_dsp_block_read32(struct hpi_adapter_obj *pao,
+ u16 dsp_index, u32 hpi_address, u32 *dest, u32 count);
+
+static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
+ u32 *pos_error_code);
+static short hpi6000_check_PCI2040_error_flag(struct hpi_adapter_obj *pao,
+ u16 read_or_write);
+#define H6READ 1
+#define H6WRITE 0
+
+static short hpi6000_update_control_cache(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm);
+static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao,
+ u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr);
+
+static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
+ struct hpi_response *phr);
+
+static short hpi6000_wait_dsp_ack(struct hpi_adapter_obj *pao, u16 dsp_index,
+ u32 ack_value);
+
+static short hpi6000_send_host_command(struct hpi_adapter_obj *pao,
+ u16 dsp_index, u32 host_cmd);
+
+static void hpi6000_send_dsp_interrupt(struct dsp_obj *pdo);
+
+static short hpi6000_send_data(struct hpi_adapter_obj *pao, u16 dsp_index,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static short hpi6000_get_data(struct hpi_adapter_obj *pao, u16 dsp_index,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void hpi_write_word(struct dsp_obj *pdo, u32 address, u32 data);
+
+static u32 hpi_read_word(struct dsp_obj *pdo, u32 address);
+
+static void hpi_write_block(struct dsp_obj *pdo, u32 address, u32 *pdata,
+ u32 length);
+
+static void hpi_read_block(struct dsp_obj *pdo, u32 address, u32 *pdata,
+ u32 length);
+
+static void subsys_create_adapter(struct hpi_message *phm,
+ struct hpi_response *phr);
+
+static void subsys_delete_adapter(struct hpi_message *phm,
+ struct hpi_response *phr);
+
+static void adapter_get_asserts(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static short create_adapter_obj(struct hpi_adapter_obj *pao,
+ u32 *pos_error_code);
+
+/* local globals */
+
+static u16 gw_pci_read_asserts; /* used to count PCI2040 errors */
+static u16 gw_pci_write_asserts; /* used to count PCI2040 errors */
+
+static void subsys_message(struct hpi_message *phm, struct hpi_response *phr)
+{
+
+ switch (phm->function) {
+ case HPI_SUBSYS_OPEN:
+ case HPI_SUBSYS_CLOSE:
+ case HPI_SUBSYS_GET_INFO:
+ case HPI_SUBSYS_DRIVER_UNLOAD:
+ case HPI_SUBSYS_DRIVER_LOAD:
+ case HPI_SUBSYS_FIND_ADAPTERS:
+ /* messages that should not get here */
+ phr->error = HPI_ERROR_UNIMPLEMENTED;
+ break;
+ case HPI_SUBSYS_CREATE_ADAPTER:
+ subsys_create_adapter(phm, phr);
+ break;
+ case HPI_SUBSYS_DELETE_ADAPTER:
+ subsys_delete_adapter(phm, phr);
+ break;
+ default:
+ phr->error = HPI_ERROR_INVALID_FUNC;
+ break;
+ }
+}
+
+static void control_message(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+
+ switch (phm->function) {
+ case HPI_CONTROL_GET_STATE:
+ if (pao->has_control_cache) {
+ u16 err;
+ err = hpi6000_update_control_cache(pao, phm);
+
+ if (err) {
+ phr->error = err;
+ break;
+ }
+
+ if (hpi_check_control_cache(((struct hpi_hw_obj *)
+ pao->priv)->p_cache, phm,
+ phr))
+ break;
+ }
+ hw_message(pao, phm, phr);
+ break;
+ case HPI_CONTROL_GET_INFO:
+ hw_message(pao, phm, phr);
+ break;
+ case HPI_CONTROL_SET_STATE:
+ hw_message(pao, phm, phr);
+ hpi_sync_control_cache(((struct hpi_hw_obj *)pao->priv)->
+ p_cache, phm, phr);
+ break;
+ default:
+ phr->error = HPI_ERROR_INVALID_FUNC;
+ break;
+ }
+}
+
+static void adapter_message(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ switch (phm->function) {
+ case HPI_ADAPTER_GET_INFO:
+ hw_message(pao, phm, phr);
+ break;
+ case HPI_ADAPTER_GET_ASSERT:
+ adapter_get_asserts(pao, phm, phr);
+ break;
+ case HPI_ADAPTER_OPEN:
+ case HPI_ADAPTER_CLOSE:
+ case HPI_ADAPTER_TEST_ASSERT:
+ case HPI_ADAPTER_SELFTEST:
+ case HPI_ADAPTER_GET_MODE:
+ case HPI_ADAPTER_SET_MODE:
+ case HPI_ADAPTER_FIND_OBJECT:
+ case HPI_ADAPTER_GET_PROPERTY:
+ case HPI_ADAPTER_SET_PROPERTY:
+ case HPI_ADAPTER_ENUM_PROPERTY:
+ hw_message(pao, phm, phr);
+ break;
+ default:
+ phr->error = HPI_ERROR_INVALID_FUNC;
+ break;
+ }
+}
+
+static void outstream_message(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ switch (phm->function) {
+ case HPI_OSTREAM_HOSTBUFFER_ALLOC:
+ case HPI_OSTREAM_HOSTBUFFER_FREE:
+ /* Don't let these messages go to the HW function because
+ * they're called without allocating the spinlock.
+ * For the HPI6000 adapters the HW would return
+ * HPI_ERROR_INVALID_FUNC anyway.
+ */
+ phr->error = HPI_ERROR_INVALID_FUNC;
+ break;
+ default:
+ hw_message(pao, phm, phr);
+ return;
+ }
+}
+
+static void instream_message(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+
+ switch (phm->function) {
+ case HPI_ISTREAM_HOSTBUFFER_ALLOC:
+ case HPI_ISTREAM_HOSTBUFFER_FREE:
+ /* Don't let these messages go to the HW function because
+ * they're called without allocating the spinlock.
+ * For the HPI6000 adapters the HW would return
+ * HPI_ERROR_INVALID_FUNC anyway.
+ */
+ phr->error = HPI_ERROR_INVALID_FUNC;
+ break;
+ default:
+ hw_message(pao, phm, phr);
+ return;
+ }
+}
+
+/************************************************************************/
+/** HPI_6000()
+ * Entry point from HPIMAN
+ * All calls to the HPI start here
+ */
+void HPI_6000(struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_adapter_obj *pao = NULL;
+
+ /* subsytem messages get executed by every HPI. */
+ /* All other messages are ignored unless the adapter index matches */
+ /* an adapter in the HPI */
+ HPI_DEBUG_LOG(DEBUG, "O %d,F %x\n", phm->object, phm->function);
+
+ /* if Dsp has crashed then do not communicate with it any more */
+ if (phm->object != HPI_OBJ_SUBSYSTEM) {
+ pao = hpi_find_adapter(phm->adapter_index);
+ if (!pao) {
+ HPI_DEBUG_LOG(DEBUG,
+ " %d,%d refused, for another HPI?\n",
+ phm->object, phm->function);
+ return;
+ }
+
+ if (pao->dsp_crashed >= 10) {
+ hpi_init_response(phr, phm->object, phm->function,
+ HPI_ERROR_DSP_HARDWARE);
+ HPI_DEBUG_LOG(DEBUG, " %d,%d dsp crashed.\n",
+ phm->object, phm->function);
+ return;
+ }
+ }
+ /* Init default response including the size field */
+ if (phm->function != HPI_SUBSYS_CREATE_ADAPTER)
+ hpi_init_response(phr, phm->object, phm->function,
+ HPI_ERROR_PROCESSING_MESSAGE);
+
+ switch (phm->type) {
+ case HPI_TYPE_MESSAGE:
+ switch (phm->object) {
+ case HPI_OBJ_SUBSYSTEM:
+ subsys_message(phm, phr);
+ break;
+
+ case HPI_OBJ_ADAPTER:
+ phr->size =
+ sizeof(struct hpi_response_header) +
+ sizeof(struct hpi_adapter_res);
+ adapter_message(pao, phm, phr);
+ break;
+
+ case HPI_OBJ_CONTROL:
+ control_message(pao, phm, phr);
+ break;
+
+ case HPI_OBJ_OSTREAM:
+ outstream_message(pao, phm, phr);
+ break;
+
+ case HPI_OBJ_ISTREAM:
+ instream_message(pao, phm, phr);
+ break;
+
+ default:
+ hw_message(pao, phm, phr);
+ break;
+ }
+ break;
+
+ default:
+ phr->error = HPI_ERROR_INVALID_TYPE;
+ break;
+ }
+}
+
+/************************************************************************/
+/* SUBSYSTEM */
+
+/* create an adapter object and initialise it based on resource information
+ * passed in in the message
+ * NOTE - you cannot use this function AND the FindAdapters function at the
+ * same time, the application must use only one of them to get the adapters
+ */
+static void subsys_create_adapter(struct hpi_message *phm,
+ struct hpi_response *phr)
+{
+ /* create temp adapter obj, because we don't know what index yet */
+ struct hpi_adapter_obj ao;
+ struct hpi_adapter_obj *pao;
+ u32 os_error_code;
+ short error = 0;
+ u32 dsp_index = 0;
+
+ HPI_DEBUG_LOG(VERBOSE, "subsys_create_adapter\n");
+
+ memset(&ao, 0, sizeof(ao));
+
+ /* this HPI only creates adapters for TI/PCI2040 based devices */
+ if (phm->u.s.resource.bus_type != HPI_BUS_PCI)
+ return;
+ if (phm->u.s.resource.r.pci->vendor_id != HPI_PCI_VENDOR_ID_TI)
+ return;
+ if (phm->u.s.resource.r.pci->device_id != HPI_PCI_DEV_ID_PCI2040)
+ return;
+
+ ao.priv = kmalloc(sizeof(struct hpi_hw_obj), GFP_KERNEL);
+ if (!ao.priv) {
+ HPI_DEBUG_LOG(ERROR, "cant get mem for adapter object\n");
+ phr->error = HPI_ERROR_MEMORY_ALLOC;
+ return;
+ }
+
+ memset(ao.priv, 0, sizeof(struct hpi_hw_obj));
+ /* create the adapter object based on the resource information */
+ /*? memcpy(&ao.Pci,&phm->u.s.Resource.r.Pci,sizeof(ao.Pci)); */
+ ao.pci = *phm->u.s.resource.r.pci;
+
+ error = create_adapter_obj(&ao, &os_error_code);
+ if (!error)
+ error = hpi_add_adapter(&ao);
+ if (error) {
+ phr->u.s.data = os_error_code;
+ kfree(ao.priv);
+ phr->error = error;
+ return;
+ }
+ /* need to update paParentAdapter */
+ pao = hpi_find_adapter(ao.index);
+ if (!pao) {
+ /* We just added this adapter, why can't we find it!? */
+ HPI_DEBUG_LOG(ERROR, "lost adapter after boot\n");
+ phr->error = 950;
+ return;
+ }
+
+ for (dsp_index = 0; dsp_index < MAX_DSPS; dsp_index++) {
+ struct hpi_hw_obj *phw = (struct hpi_hw_obj *)pao->priv;
+ phw->ado[dsp_index].pa_parent_adapter = pao;
+ }
+
+ phr->u.s.aw_adapter_list[ao.index] = ao.adapter_type;
+ phr->u.s.adapter_index = ao.index;
+ phr->u.s.num_adapters++;
+ phr->error = 0;
+}
+
+static void subsys_delete_adapter(struct hpi_message *phm,
+ struct hpi_response *phr)
+{
+ struct hpi_adapter_obj *pao = NULL;
+ struct hpi_hw_obj *phw;
+
+ pao = hpi_find_adapter(phm->adapter_index);
+ if (!pao)
+ return;
+
+ phw = (struct hpi_hw_obj *)pao->priv;
+
+ if (pao->has_control_cache)
+ hpi_free_control_cache(phw->p_cache);
+
+ hpi_delete_adapter(pao);
+ kfree(phw);
+
+ phr->error = 0;
+}
+
+/* this routine is called from SubSysFindAdapter and SubSysCreateAdapter */
+static short create_adapter_obj(struct hpi_adapter_obj *pao,
+ u32 *pos_error_code)
+{
+ short boot_error = 0;
+ u32 dsp_index = 0;
+ u32 control_cache_size = 0;
+ u32 control_cache_count = 0;
+ struct hpi_hw_obj *phw = (struct hpi_hw_obj *)pao->priv;
+
+ /* init error reporting */
+ pao->dsp_crashed = 0;
+
+ /* The PCI2040 has the following address map */
+ /* BAR0 - 4K = HPI control and status registers on PCI2040 (HPI CSR) */
+ /* BAR1 - 32K = HPI registers on DSP */
+ phw->dw2040_HPICSR = pao->pci.ap_mem_base[0];
+ phw->dw2040_HPIDSP = pao->pci.ap_mem_base[1];
+ HPI_DEBUG_LOG(VERBOSE, "csr %p, dsp %p\n", phw->dw2040_HPICSR,
+ phw->dw2040_HPIDSP);
+
+ /* set addresses for the possible DSP HPI interfaces */
+ for (dsp_index = 0; dsp_index < MAX_DSPS; dsp_index++) {
+ phw->ado[dsp_index].prHPI_control =
+ phw->dw2040_HPIDSP + (CONTROL +
+ DSP_SPACING * dsp_index);
+
+ phw->ado[dsp_index].prHPI_address =
+ phw->dw2040_HPIDSP + (ADDRESS +
+ DSP_SPACING * dsp_index);
+ phw->ado[dsp_index].prHPI_data =
+ phw->dw2040_HPIDSP + (DATA + DSP_SPACING * dsp_index);
+
+ phw->ado[dsp_index].prHPI_data_auto_inc =
+ phw->dw2040_HPIDSP + (DATA_AUTOINC +
+ DSP_SPACING * dsp_index);
+
+ HPI_DEBUG_LOG(VERBOSE, "ctl %p, adr %p, dat %p, dat++ %p\n",
+ phw->ado[dsp_index].prHPI_control,
+ phw->ado[dsp_index].prHPI_address,
+ phw->ado[dsp_index].prHPI_data,
+ phw->ado[dsp_index].prHPI_data_auto_inc);
+
+ phw->ado[dsp_index].pa_parent_adapter = pao;
+ }
+
+ phw->pCI2040HPI_error_count = 0;
+ pao->has_control_cache = 0;
+
+ /* Set the default number of DSPs on this card */
+ /* This is (conditionally) adjusted after bootloading */
+ /* of the first DSP in the bootload section. */
+ phw->num_dsp = 1;
+
+ boot_error = hpi6000_adapter_boot_load_dsp(pao, pos_error_code);
+ if (boot_error)
+ return boot_error;
+
+ HPI_DEBUG_LOG(INFO, "bootload DSP OK\n");
+
+ phw->message_buffer_address_on_dsp = 0L;
+ phw->response_buffer_address_on_dsp = 0L;
+
+ /* get info about the adapter by asking the adapter */
+ /* send a HPI_ADAPTER_GET_INFO message */
+ {
+ struct hpi_message hM;
+ struct hpi_response hR0; /* response from DSP 0 */
+ struct hpi_response hR1; /* response from DSP 1 */
+ u16 error = 0;
+
+ HPI_DEBUG_LOG(VERBOSE, "send ADAPTER_GET_INFO\n");
+ memset(&hM, 0, sizeof(hM));
+ hM.type = HPI_TYPE_MESSAGE;
+ hM.size = sizeof(struct hpi_message);
+ hM.object = HPI_OBJ_ADAPTER;
+ hM.function = HPI_ADAPTER_GET_INFO;
+ hM.adapter_index = 0;
+ memset(&hR0, 0, sizeof(hR0));
+ memset(&hR1, 0, sizeof(hR1));
+ hR0.size = sizeof(hR0);
+ hR1.size = sizeof(hR1);
+
+ error = hpi6000_message_response_sequence(pao, 0, &hM, &hR0);
+ if (hR0.error) {
+ HPI_DEBUG_LOG(DEBUG, "message error %d\n", hR0.error);
+ return hR0.error;
+ }
+ if (phw->num_dsp == 2) {
+ error = hpi6000_message_response_sequence(pao, 1, &hM,
+ &hR1);
+ if (error)
+ return error;
+ }
+ pao->adapter_type = hR0.u.a.adapter_type;
+ pao->index = hR0.u.a.adapter_index;
+ }
+
+ memset(&phw->control_cache[0], 0,
+ sizeof(struct hpi_control_cache_single) *
+ HPI_NMIXER_CONTROLS);
+ /* Read the control cache length to figure out if it is turned on */
+ control_cache_size =
+ hpi_read_word(&phw->ado[0],
+ HPI_HIF_ADDR(control_cache_size_in_bytes));
+ if (control_cache_size) {
+ control_cache_count =
+ hpi_read_word(&phw->ado[0],
+ HPI_HIF_ADDR(control_cache_count));
+ pao->has_control_cache = 1;
+
+ phw->p_cache =
+ hpi_alloc_control_cache(control_cache_count,
+ control_cache_size, (struct hpi_control_cache_info *)
+ &phw->control_cache[0]
+ );
+ } else
+ pao->has_control_cache = 0;
+
+ HPI_DEBUG_LOG(DEBUG, "get adapter info ASI%04X index %d\n",
+ pao->adapter_type, pao->index);
+ pao->open = 0; /* upon creation the adapter is closed */
+ return 0;
+}
+
+/************************************************************************/
+/* ADAPTER */
+
+static void adapter_get_asserts(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+#ifndef HIDE_PCI_ASSERTS
+ /* if we have PCI2040 asserts then collect them */
+ if ((gw_pci_read_asserts > 0) || (gw_pci_write_asserts > 0)) {
+ phr->u.a.serial_number =
+ gw_pci_read_asserts * 100 + gw_pci_write_asserts;
+ phr->u.a.adapter_index = 1; /* assert count */
+ phr->u.a.adapter_type = -1; /* "dsp index" */
+ strcpy(phr->u.a.sz_adapter_assert, "PCI2040 error");
+ gw_pci_read_asserts = 0;
+ gw_pci_write_asserts = 0;
+ phr->error = 0;
+ } else
+#endif
+ hw_message(pao, phm, phr); /*get DSP asserts */
+
+ return;
+}
+
+/************************************************************************/
+/* LOW-LEVEL */
+
+static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
+ u32 *pos_error_code)
+{
+ struct hpi_hw_obj *phw = (struct hpi_hw_obj *)pao->priv;
+ short error;
+ u32 timeout;
+ u32 read = 0;
+ u32 i = 0;
+ u32 data = 0;
+ u32 j = 0;
+ u32 test_addr = 0x80000000;
+ u32 test_data = 0x00000001;
+ u32 dw2040_reset = 0;
+ u32 dsp_index = 0;
+ u32 endian = 0;
+ u32 adapter_info = 0;
+ u32 delay = 0;
+
+ struct dsp_code dsp_code;
+ u16 boot_load_family = 0;
+
+ /* NOTE don't use wAdapterType in this routine. It is not setup yet */
+
+ switch (pao->pci.subsys_device_id) {
+ case 0x5100:
+ case 0x5110: /* ASI5100 revB or higher with C6711D */
+ case 0x6100:
+ case 0x6200:
+ boot_load_family = HPI_ADAPTER_FAMILY_ASI(0x6200);
+ break;
+ case 0x8800:
+ boot_load_family = HPI_ADAPTER_FAMILY_ASI(0x8800);
+ break;
+ default:
+ return HPI6000_ERROR_UNHANDLED_SUBSYS_ID;
+ }
+
+ /* reset all DSPs, indicate two DSPs are present
+ * set RST3-=1 to disconnect HAD8 to set DSP in little endian mode
+ */
+ endian = 0;
+ dw2040_reset = 0x0003000F;
+ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET);
+
+ /* read back register to make sure PCI2040 chip is functioning
+ * note that bits 4..15 are read-only and so should always return zero,
+ * even though we wrote 1 to them
+ */
+ for (i = 0; i < 1000; i++)
+ delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+ if (delay != dw2040_reset) {
+ HPI_DEBUG_LOG(ERROR, "INIT_PCI2040 %x %x\n", dw2040_reset,
+ delay);
+ return HPI6000_ERROR_INIT_PCI2040;
+ }
+
+ /* Indicate that DSP#0,1 is a C6X */
+ iowrite32(0x00000003, phw->dw2040_HPICSR + HPI_DATA_WIDTH);
+ /* set Bit30 and 29 - which will prevent Target aborts from being
+ * issued upon HPI or GP error
+ */
+ iowrite32(0x60000000, phw->dw2040_HPICSR + INTERRUPT_MASK_SET);
+
+ /* isolate DSP HAD8 line from PCI2040 so that
+ * Little endian can be set by pullup
+ */
+ dw2040_reset = dw2040_reset & (~(endian << 3));
+ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET);
+
+ phw->ado[0].c_dsp_rev = 'B'; /* revB */
+ phw->ado[1].c_dsp_rev = 'B'; /* revB */
+
+ /*Take both DSPs out of reset, setting HAD8 to the correct Endian */
+ dw2040_reset = dw2040_reset & (~0x00000001); /* start DSP 0 */
+ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET);
+ dw2040_reset = dw2040_reset & (~0x00000002); /* start DSP 1 */
+ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET);
+
+ /* set HAD8 back to PCI2040, now that DSP set to little endian mode */
+ dw2040_reset = dw2040_reset & (~0x00000008);
+ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET);
+ /*delay to allow DSP to get going */
+ for (i = 0; i < 100; i++)
+ delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+
+ /* loop through all DSPs, downloading DSP code */
+ for (dsp_index = 0; dsp_index < phw->num_dsp; dsp_index++) {
+ struct dsp_obj *pdo = &phw->ado[dsp_index];
+
+ /* configure DSP so that we download code into the SRAM */
+ /* set control reg for little endian, HWOB=1 */
+ iowrite32(0x00010001, pdo->prHPI_control);
+
+ /* test access to the HPI address register (HPIA) */
+ test_data = 0x00000001;
+ for (j = 0; j < 32; j++) {
+ iowrite32(test_data, pdo->prHPI_address);
+ data = ioread32(pdo->prHPI_address);
+ if (data != test_data) {
+ HPI_DEBUG_LOG(ERROR, "INIT_DSPHPI %x %x %x\n",
+ test_data, data, dsp_index);
+ return HPI6000_ERROR_INIT_DSPHPI;
+ }
+ test_data = test_data << 1;
+ }
+
+/* if C6713 the setup PLL to generate 225MHz from 25MHz.
+* Since the PLLDIV1 read is sometimes wrong, even on a C6713,
+* we're going to do this unconditionally
+*/
+/* PLLDIV1 should have a value of 8000 after reset */
+/*
+ if (HpiReadWord(pdo,0x01B7C118) == 0x8000)
+*/
+ {
+ /* C6713 datasheet says we cannot program PLL from HPI,
+ * and indeed if we try to set the PLL multiply from the
+ * HPI, the PLL does not seem to lock,
+ * so we enable the PLL and use the default of x 7
+ */
+ /* bypass PLL */
+ hpi_write_word(pdo, 0x01B7C100, 0x0000);
+ for (i = 0; i < 100; i++)
+ delay = ioread32(phw->dw2040_HPICSR +
+ HPI_RESET);
+
+ /* ** use default of PLL x7 ** */
+ /* EMIF = 225/3=75MHz */
+ hpi_write_word(pdo, 0x01B7C120, 0x8002);
+ /* peri = 225/2 */
+ hpi_write_word(pdo, 0x01B7C11C, 0x8001);
+ /* cpu = 225/1 */
+ hpi_write_word(pdo, 0x01B7C118, 0x8000);
+ /* ~200us delay */
+ for (i = 0; i < 2000; i++)
+ delay = ioread32(phw->dw2040_HPICSR +
+ HPI_RESET);
+ /* PLL not bypassed */
+ hpi_write_word(pdo, 0x01B7C100, 0x0001);
+ /* ~200us delay */
+ for (i = 0; i < 2000; i++)
+ delay = ioread32(phw->dw2040_HPICSR +
+ HPI_RESET);
+ }
+
+ /* test r/w to internal DSP memory
+ * C6711 has L2 cache mapped to 0x0 when reset
+ *
+ * revB - because of bug 3.0.1 last HPI read
+ * (before HPI address issued) must be non-autoinc
+ */
+ /* test each bit in the 32bit word */
+ for (i = 0; i < 100; i++) {
+ test_addr = 0x00000000;
+ test_data = 0x00000001;
+ for (j = 0; j < 32; j++) {
+ hpi_write_word(pdo, test_addr + i, test_data);
+ data = hpi_read_word(pdo, test_addr + i);
+ if (data != test_data) {
+ HPI_DEBUG_LOG(ERROR,
+ "DSP mem %x %x %x %x\n",
+ test_addr + i, test_data,
+ data, dsp_index);
+
+ return HPI6000_ERROR_INIT_DSPINTMEM;
+ }
+ test_data = test_data << 1;
+ }
+ }
+
+ /* memory map of ASI6200
+ 00000000-0000FFFF 16Kx32 internal program
+ 01800000-019FFFFF Internal peripheral
+ 80000000-807FFFFF CE0 2Mx32 SDRAM running @ 100MHz
+ 90000000-9000FFFF CE1 Async peripherals:
+
+ EMIF config
+ ------------
+ Global EMIF control
+ 0 -
+ 1 -
+ 2 -
+ 3 CLK2EN = 1 CLKOUT2 enabled
+ 4 CLK1EN = 0 CLKOUT1 disabled
+ 5 EKEN = 1 <--!! C6713 specific, enables ECLKOUT
+ 6 -
+ 7 NOHOLD = 1 external HOLD disabled
+ 8 HOLDA = 0 HOLDA output is low
+ 9 HOLD = 0 HOLD input is low
+ 10 ARDY = 1 ARDY input is high
+ 11 BUSREQ = 0 BUSREQ output is low
+ 12,13 Reserved = 1
+ */
+ hpi_write_word(pdo, 0x01800000, 0x34A8);
+
+ /* EMIF CE0 setup - 2Mx32 Sync DRAM
+ 31..28 Wr setup
+ 27..22 Wr strobe
+ 21..20 Wr hold
+ 19..16 Rd setup
+ 15..14 -
+ 13..8 Rd strobe
+ 7..4 MTYPE 0011 Sync DRAM 32bits
+ 3 Wr hold MSB
+ 2..0 Rd hold
+ */
+ hpi_write_word(pdo, 0x01800008, 0x00000030);
+
+ /* EMIF SDRAM Extension
+ 31-21 0
+ 20 WR2RD = 0
+ 19-18 WR2DEAC = 1
+ 17 WR2WR = 0
+ 16-15 R2WDQM = 2
+ 14-12 RD2WR = 4
+ 11-10 RD2DEAC = 1
+ 9 RD2RD = 1
+ 8-7 THZP = 10b
+ 6-5 TWR = 2-1 = 01b (tWR = 10ns)
+ 4 TRRD = 0b = 2 ECLK (tRRD = 14ns)
+ 3-1 TRAS = 5-1 = 100b (Tras=42ns = 5 ECLK)
+ 1 CAS latency = 3 ECLK
+ (for Micron 2M32-7 operating at 100Mhz)
+ */
+
+ /* need to use this else DSP code crashes */
+ hpi_write_word(pdo, 0x01800020, 0x001BDF29);
+
+ /* EMIF SDRAM control - set up for a 2Mx32 SDRAM (512x32x4 bank)
+ 31 - -
+ 30 SDBSZ 1 4 bank
+ 29..28 SDRSZ 00 11 row address pins
+ 27..26 SDCSZ 01 8 column address pins
+ 25 RFEN 1 refersh enabled
+ 24 INIT 1 init SDRAM
+ 23..20 TRCD 0001
+ 19..16 TRP 0001
+ 15..12 TRC 0110
+ 11..0 - -
+ */
+ /* need to use this else DSP code crashes */
+ hpi_write_word(pdo, 0x01800018, 0x47117000);
+
+ /* EMIF SDRAM Refresh Timing */
+ hpi_write_word(pdo, 0x0180001C, 0x00000410);
+
+ /*MIF CE1 setup - Async peripherals
+ @100MHz bus speed, each cycle is 10ns,
+ 31..28 Wr setup = 1
+ 27..22 Wr strobe = 3 30ns
+ 21..20 Wr hold = 1
+ 19..16 Rd setup =1
+ 15..14 Ta = 2
+ 13..8 Rd strobe = 3 30ns
+ 7..4 MTYPE 0010 Async 32bits
+ 3 Wr hold MSB =0
+ 2..0 Rd hold = 1
+ */
+ {
+ u32 cE1 =
+ (1L << 28) | (3L << 22) | (1L << 20) | (1L <<
+ 16) | (2L << 14) | (3L << 8) | (2L << 4) | 1L;
+ hpi_write_word(pdo, 0x01800004, cE1);
+ }
+
+ /* delay a little to allow SDRAM and DSP to "get going" */
+
+ for (i = 0; i < 1000; i++)
+ delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+
+ /* test access to SDRAM */
+ {
+ test_addr = 0x80000000;
+ test_data = 0x00000001;
+ /* test each bit in the 32bit word */
+ for (j = 0; j < 32; j++) {
+ hpi_write_word(pdo, test_addr, test_data);
+ data = hpi_read_word(pdo, test_addr);
+ if (data != test_data) {
+ HPI_DEBUG_LOG(ERROR,
+ "DSP dram %x %x %x %x\n",
+ test_addr, test_data, data,
+ dsp_index);
+
+ return HPI6000_ERROR_INIT_SDRAM1;
+ }
+ test_data = test_data << 1;
+ }
+ /* test every Nth address in the DRAM */
+#define DRAM_SIZE_WORDS 0x200000 /*2_mx32 */
+#define DRAM_INC 1024
+ test_addr = 0x80000000;
+ test_data = 0x0;
+ for (i = 0; i < DRAM_SIZE_WORDS; i = i + DRAM_INC) {
+ hpi_write_word(pdo, test_addr + i, test_data);
+ test_data++;
+ }
+ test_addr = 0x80000000;
+ test_data = 0x0;
+ for (i = 0; i < DRAM_SIZE_WORDS; i = i + DRAM_INC) {
+ data = hpi_read_word(pdo, test_addr + i);
+ if (data != test_data) {
+ HPI_DEBUG_LOG(ERROR,
+ "DSP dram %x %x %x %x\n",
+ test_addr + i, test_data,
+ data, dsp_index);
+ return HPI6000_ERROR_INIT_SDRAM2;
+ }
+ test_data++;
+ }
+
+ }
+
+ /* write the DSP code down into the DSPs memory */
+ /*HpiDspCode_Open(nBootLoadFamily,&DspCode,pdwOsErrorCode); */
+ dsp_code.ps_dev = pao->pci.p_os_data;
+
+ error = hpi_dsp_code_open(boot_load_family, &dsp_code,
+ pos_error_code);
+
+ if (error)
+ return error;
+
+ while (1) {
+ u32 length;
+ u32 address;
+ u32 type;
+ u32 *pcode;
+
+ error = hpi_dsp_code_read_word(&dsp_code, &length);
+ if (error)
+ break;
+ if (length == 0xFFFFFFFF)
+ break; /* end of code */
+
+ error = hpi_dsp_code_read_word(&dsp_code, &address);
+ if (error)
+ break;
+ error = hpi_dsp_code_read_word(&dsp_code, &type);
+ if (error)
+ break;
+ error = hpi_dsp_code_read_block(length, &dsp_code,
+ &pcode);
+ if (error)
+ break;
+ error = hpi6000_dsp_block_write32(pao, (u16)dsp_index,
+ address, pcode, length);
+ if (error)
+ break;
+ }
+
+ if (error) {
+ hpi_dsp_code_close(&dsp_code);
+ return error;
+ }
+ /* verify that code was written correctly */
+ /* this time through, assume no errors in DSP code file/array */
+ hpi_dsp_code_rewind(&dsp_code);
+ while (1) {
+ u32 length;
+ u32 address;
+ u32 type;
+ u32 *pcode;
+
+ hpi_dsp_code_read_word(&dsp_code, &length);
+ if (length == 0xFFFFFFFF)
+ break; /* end of code */
+
+ hpi_dsp_code_read_word(&dsp_code, &address);
+ hpi_dsp_code_read_word(&dsp_code, &type);
+ hpi_dsp_code_read_block(length, &dsp_code, &pcode);
+
+ for (i = 0; i < length; i++) {
+ data = hpi_read_word(pdo, address);
+ if (data != *pcode) {
+ error = HPI6000_ERROR_INIT_VERIFY;
+ HPI_DEBUG_LOG(ERROR,
+ "DSP verify %x %x %x %x\n",
+ address, *pcode, data,
+ dsp_index);
+ break;
+ }
+ pcode++;
+ address += 4;
+ }
+ if (error)
+ break;
+ }
+ hpi_dsp_code_close(&dsp_code);
+ if (error)
+ return error;
+
+ /* zero out the hostmailbox */
+ {
+ u32 address = HPI_HIF_ADDR(host_cmd);
+ for (i = 0; i < 4; i++) {
+ hpi_write_word(pdo, address, 0);
+ address += 4;
+ }
+ }
+ /* write the DSP number into the hostmailbox */
+ /* structure before starting the DSP */
+ hpi_write_word(pdo, HPI_HIF_ADDR(dsp_number), dsp_index);
+
+ /* write the DSP adapter Info into the */
+ /* hostmailbox before starting the DSP */
+ if (dsp_index > 0)
+ hpi_write_word(pdo, HPI_HIF_ADDR(adapter_info),
+ adapter_info);
+
+ /* step 3. Start code by sending interrupt */
+ iowrite32(0x00030003, pdo->prHPI_control);
+ for (i = 0; i < 10000; i++)
+ delay = ioread32(phw->dw2040_HPICSR + HPI_RESET);
+
+ /* wait for a non-zero value in hostcmd -
+ * indicating initialization is complete
+ *
+ * Init could take a while if DSP checks SDRAM memory
+ * Was 200000. Increased to 2000000 for ASI8801 so we
+ * don't get 938 errors.
+ */
+ timeout = 2000000;
+ while (timeout) {
+ do {
+ read = hpi_read_word(pdo,
+ HPI_HIF_ADDR(host_cmd));
+ } while (--timeout
+ && hpi6000_check_PCI2040_error_flag(pao,
+ H6READ));
+
+ if (read)
+ break;
+ /* The following is a workaround for bug #94:
+ * Bluescreen on install and subsequent boots on a
+ * DELL PowerEdge 600SC PC with 1.8GHz P4 and
+ * ServerWorks chipset. Without this delay the system
+ * locks up with a bluescreen (NOT GPF or pagefault).
+ */
+ else
+ hpios_delay_micro_seconds(1000);
+ }
+ if (timeout == 0)
+ return HPI6000_ERROR_INIT_NOACK;
+
+ /* read the DSP adapter Info from the */
+ /* hostmailbox structure after starting the DSP */
+ if (dsp_index == 0) {
+ /*u32 dwTestData=0; */
+ u32 mask = 0;
+
+ adapter_info =
+ hpi_read_word(pdo,
+ HPI_HIF_ADDR(adapter_info));
+ if (HPI_ADAPTER_FAMILY_ASI
+ (HPI_HIF_ADAPTER_INFO_EXTRACT_ADAPTER
+ (adapter_info)) ==
+ HPI_ADAPTER_FAMILY_ASI(0x6200))
+ /* all 6200 cards have this many DSPs */
+ phw->num_dsp = 2;
+
+ /* test that the PLD is programmed */
+ /* and we can read/write 24bits */
+#define PLD_BASE_ADDRESS 0x90000000L /*for ASI6100/6200/8800 */
+
+ switch (boot_load_family) {
+ case HPI_ADAPTER_FAMILY_ASI(0x6200):
+ /* ASI6100/6200 has 24bit path to FPGA */
+ mask = 0xFFFFFF00L;
+ /* ASI5100 uses AX6 code, */
+ /* but has no PLD r/w register to test */
+ if (HPI_ADAPTER_FAMILY_ASI(pao->pci.
+ subsys_device_id) ==
+ HPI_ADAPTER_FAMILY_ASI(0x5100))
+ mask = 0x00000000L;
+ break;
+ case HPI_ADAPTER_FAMILY_ASI(0x8800):
+ /* ASI8800 has 16bit path to FPGA */
+ mask = 0xFFFF0000L;
+ break;
+ }
+ test_data = 0xAAAAAA00L & mask;
+ /* write to 24 bit Debug register (D31-D8) */
+ hpi_write_word(pdo, PLD_BASE_ADDRESS + 4L, test_data);
+ read = hpi_read_word(pdo,
+ PLD_BASE_ADDRESS + 4L) & mask;
+ if (read != test_data) {
+ HPI_DEBUG_LOG(ERROR, "PLD %x %x\n", test_data,
+ read);
+ return HPI6000_ERROR_INIT_PLDTEST1;
+ }
+ test_data = 0x55555500L & mask;
+ hpi_write_word(pdo, PLD_BASE_ADDRESS + 4L, test_data);
+ read = hpi_read_word(pdo,
+ PLD_BASE_ADDRESS + 4L) & mask;
+ if (read != test_data) {
+ HPI_DEBUG_LOG(ERROR, "PLD %x %x\n", test_data,
+ read);
+ return HPI6000_ERROR_INIT_PLDTEST2;
+ }
+ }
+ } /* for numDSP */
+ return 0;
+}
+
+#define PCI_TIMEOUT 100
+
+static int hpi_set_address(struct dsp_obj *pdo, u32 address)
+{
+ u32 timeout = PCI_TIMEOUT;
+
+ do {
+ iowrite32(address, pdo->prHPI_address);
+ } while (hpi6000_check_PCI2040_error_flag(pdo->pa_parent_adapter,
+ H6WRITE)
+ && --timeout);
+
+ if (timeout)
+ return 0;
+
+ return 1;
+}
+
+/* write one word to the HPI port */
+static void hpi_write_word(struct dsp_obj *pdo, u32 address, u32 data)
+{
+ if (hpi_set_address(pdo, address))
+ return;
+ iowrite32(data, pdo->prHPI_data);
+}
+
+/* read one word from the HPI port */
+static u32 hpi_read_word(struct dsp_obj *pdo, u32 address)
+{
+ u32 data = 0;
+
+ if (hpi_set_address(pdo, address))
+ return 0; /*? no way to return error */
+
+ /* take care of errata in revB DSP (2.0.1) */
+ data = ioread32(pdo->prHPI_data);
+ return data;
+}
+
+/* write a block of 32bit words to the DSP HPI port using auto-inc mode */
+static void hpi_write_block(struct dsp_obj *pdo, u32 address, u32 *pdata,
+ u32 length)
+{
+ u16 length16 = length - 1;
+
+ if (length == 0)
+ return;
+
+ if (hpi_set_address(pdo, address))
+ return;
+
+ iowrite32_rep(pdo->prHPI_data_auto_inc, pdata, length16);
+
+ /* take care of errata in revB DSP (2.0.1) */
+ /* must end with non auto-inc */
+ iowrite32(*(pdata + length - 1), pdo->prHPI_data);
+}
+
+/** read a block of 32bit words from the DSP HPI port using auto-inc mode
+ */
+static void hpi_read_block(struct dsp_obj *pdo, u32 address, u32 *pdata,
+ u32 length)
+{
+ u16 length16 = length - 1;
+
+ if (length == 0)
+ return;
+
+ if (hpi_set_address(pdo, address))
+ return;
+
+ ioread32_rep(pdo->prHPI_data_auto_inc, pdata, length16);
+
+ /* take care of errata in revB DSP (2.0.1) */
+ /* must end with non auto-inc */
+ *(pdata + length - 1) = ioread32(pdo->prHPI_data);
+}
+
+static u16 hpi6000_dsp_block_write32(struct hpi_adapter_obj *pao,
+ u16 dsp_index, u32 hpi_address, u32 *source, u32 count)
+{
+ struct dsp_obj *pdo =
+ &(*(struct hpi_hw_obj *)pao->priv).ado[dsp_index];
+ u32 time_out = PCI_TIMEOUT;
+ int c6711_burst_size = 128;
+ u32 local_hpi_address = hpi_address;
+ int local_count = count;
+ int xfer_size;
+ u32 *pdata = source;
+
+ while (local_count) {
+ if (local_count > c6711_burst_size)
+ xfer_size = c6711_burst_size;
+ else
+ xfer_size = local_count;
+
+ time_out = PCI_TIMEOUT;
+ do {
+ hpi_write_block(pdo, local_hpi_address, pdata,
+ xfer_size);
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE)
+ && --time_out);
+
+ if (!time_out)
+ break;
+ pdata += xfer_size;
+ local_hpi_address += sizeof(u32) * xfer_size;
+ local_count -= xfer_size;
+ }
+
+ if (time_out)
+ return 0;
+ else
+ return 1;
+}
+
+static u16 hpi6000_dsp_block_read32(struct hpi_adapter_obj *pao,
+ u16 dsp_index, u32 hpi_address, u32 *dest, u32 count)
+{
+ struct dsp_obj *pdo =
+ &(*(struct hpi_hw_obj *)pao->priv).ado[dsp_index];
+ u32 time_out = PCI_TIMEOUT;
+ int c6711_burst_size = 16;
+ u32 local_hpi_address = hpi_address;
+ int local_count = count;
+ int xfer_size;
+ u32 *pdata = dest;
+ u32 loop_count = 0;
+
+ while (local_count) {
+ if (local_count > c6711_burst_size)
+ xfer_size = c6711_burst_size;
+ else
+ xfer_size = local_count;
+
+ time_out = PCI_TIMEOUT;
+ do {
+ hpi_read_block(pdo, local_hpi_address, pdata,
+ xfer_size);
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6READ)
+ && --time_out);
+ if (!time_out)
+ break;
+
+ pdata += xfer_size;
+ local_hpi_address += sizeof(u32) * xfer_size;
+ local_count -= xfer_size;
+ loop_count++;
+ }
+
+ if (time_out)
+ return 0;
+ else
+ return 1;
+}
+
+static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao,
+ u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = (struct hpi_hw_obj *)pao->priv;
+ struct dsp_obj *pdo = &phw->ado[dsp_index];
+ u32 timeout;
+ u16 ack;
+ u32 address;
+ u32 length;
+ u32 *p_data;
+ u16 error = 0;
+
+ /* does the DSP we are referencing exist? */
+ if (dsp_index >= phw->num_dsp)
+ return HPI6000_ERROR_MSG_INVALID_DSP_INDEX;
+
+ ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE);
+ if (ack & HPI_HIF_ERROR_MASK) {
+ pao->dsp_crashed++;
+ return HPI6000_ERROR_MSG_RESP_IDLE_TIMEOUT;
+ }
+ pao->dsp_crashed = 0;
+
+ /* send the message */
+
+ /* get the address and size */
+ if (phw->message_buffer_address_on_dsp == 0) {
+ timeout = TIMEOUT;
+ do {
+ address =
+ hpi_read_word(pdo,
+ HPI_HIF_ADDR(message_buffer_address));
+ phw->message_buffer_address_on_dsp = address;
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6READ)
+ && --timeout);
+ if (!timeout)
+ return HPI6000_ERROR_MSG_GET_ADR;
+ } else
+ address = phw->message_buffer_address_on_dsp;
+
+ /* dwLength = sizeof(struct hpi_message); */
+ length = phm->size;
+
+ /* send it */
+ p_data = (u32 *)phm;
+ if (hpi6000_dsp_block_write32(pao, dsp_index, address, p_data,
+ (u16)length / 4))
+ return HPI6000_ERROR_MSG_RESP_BLOCKWRITE32;
+
+ if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_GET_RESP))
+ return HPI6000_ERROR_MSG_RESP_GETRESPCMD;
+ hpi6000_send_dsp_interrupt(pdo);
+
+ ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_GET_RESP);
+ if (ack & HPI_HIF_ERROR_MASK)
+ return HPI6000_ERROR_MSG_RESP_GET_RESP_ACK;
+
+ /* get the address and size */
+ if (phw->response_buffer_address_on_dsp == 0) {
+ timeout = TIMEOUT;
+ do {
+ address =
+ hpi_read_word(pdo,
+ HPI_HIF_ADDR(response_buffer_address));
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6READ)
+ && --timeout);
+ phw->response_buffer_address_on_dsp = address;
+
+ if (!timeout)
+ return HPI6000_ERROR_RESP_GET_ADR;
+ } else
+ address = phw->response_buffer_address_on_dsp;
+
+ /* read the length of the response back from the DSP */
+ timeout = TIMEOUT;
+ do {
+ length = hpi_read_word(pdo, HPI_HIF_ADDR(length));
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout);
+ if (!timeout)
+ length = sizeof(struct hpi_response);
+
+ /* get it */
+ p_data = (u32 *)phr;
+ if (hpi6000_dsp_block_read32(pao, dsp_index, address, p_data,
+ (u16)length / 4))
+ return HPI6000_ERROR_MSG_RESP_BLOCKREAD32;
+
+ /* set i/f back to idle */
+ if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE))
+ return HPI6000_ERROR_MSG_RESP_IDLECMD;
+ hpi6000_send_dsp_interrupt(pdo);
+
+ error = hpi_validate_response(phm, phr);
+ return error;
+}
+
+/* have to set up the below defines to match stuff in the MAP file */
+
+#define MSG_ADDRESS (HPI_HIF_BASE+0x18)
+#define MSG_LENGTH 11
+#define RESP_ADDRESS (HPI_HIF_BASE+0x44)
+#define RESP_LENGTH 16
+#define QUEUE_START (HPI_HIF_BASE+0x88)
+#define QUEUE_SIZE 0x8000
+
+static short hpi6000_send_data_check_adr(u32 address, u32 length_in_dwords)
+{
+/*#define CHECKING // comment this line in to enable checking */
+#ifdef CHECKING
+ if (address < (u32)MSG_ADDRESS)
+ return 0;
+ if (address > (u32)(QUEUE_START + QUEUE_SIZE))
+ return 0;
+ if ((address + (length_in_dwords << 2)) >
+ (u32)(QUEUE_START + QUEUE_SIZE))
+ return 0;
+#else
+ (void)address;
+ (void)length_in_dwords;
+ return 1;
+#endif
+}
+
+static short hpi6000_send_data(struct hpi_adapter_obj *pao, u16 dsp_index,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct dsp_obj *pdo =
+ &(*(struct hpi_hw_obj *)pao->priv).ado[dsp_index];
+ u32 data_sent = 0;
+ u16 ack;
+ u32 length, address;
+ u32 *p_data = (u32 *)phm->u.d.u.data.pb_data;
+ u16 time_out = 8;
+
+ (void)phr;
+
+ /* round dwDataSize down to nearest 4 bytes */
+ while ((data_sent < (phm->u.d.u.data.data_size & ~3L))
+ && --time_out) {
+ ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE);
+ if (ack & HPI_HIF_ERROR_MASK)
+ return HPI6000_ERROR_SEND_DATA_IDLE_TIMEOUT;
+
+ if (hpi6000_send_host_command(pao, dsp_index,
+ HPI_HIF_SEND_DATA))
+ return HPI6000_ERROR_SEND_DATA_CMD;
+
+ hpi6000_send_dsp_interrupt(pdo);
+
+ ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_SEND_DATA);
+
+ if (ack & HPI_HIF_ERROR_MASK)
+ return HPI6000_ERROR_SEND_DATA_ACK;
+
+ do {
+ /* get the address and size */
+ address = hpi_read_word(pdo, HPI_HIF_ADDR(address));
+ /* DSP returns number of DWORDS */
+ length = hpi_read_word(pdo, HPI_HIF_ADDR(length));
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6READ));
+
+ if (!hpi6000_send_data_check_adr(address, length))
+ return HPI6000_ERROR_SEND_DATA_ADR;
+
+ /* send the data. break data into 512 DWORD blocks (2K bytes)
+ * and send using block write. 2Kbytes is the max as this is the
+ * memory window given to the HPI data register by the PCI2040
+ */
+
+ {
+ u32 len = length;
+ u32 blk_len = 512;
+ while (len) {
+ if (len < blk_len)
+ blk_len = len;
+ if (hpi6000_dsp_block_write32(pao, dsp_index,
+ address, p_data, blk_len))
+ return HPI6000_ERROR_SEND_DATA_WRITE;
+ address += blk_len * 4;
+ p_data += blk_len;
+ len -= blk_len;
+ }
+ }
+
+ if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE))
+ return HPI6000_ERROR_SEND_DATA_IDLECMD;
+
+ hpi6000_send_dsp_interrupt(pdo);
+
+ data_sent += length * 4;
+ }
+ if (!time_out)
+ return HPI6000_ERROR_SEND_DATA_TIMEOUT;
+ return 0;
+}
+
+static short hpi6000_get_data(struct hpi_adapter_obj *pao, u16 dsp_index,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct dsp_obj *pdo =
+ &(*(struct hpi_hw_obj *)pao->priv).ado[dsp_index];
+ u32 data_got = 0;
+ u16 ack;
+ u32 length, address;
+ u32 *p_data = (u32 *)phm->u.d.u.data.pb_data;
+
+ (void)phr; /* this parameter not used! */
+
+ /* round dwDataSize down to nearest 4 bytes */
+ while (data_got < (phm->u.d.u.data.data_size & ~3L)) {
+ ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE);
+ if (ack & HPI_HIF_ERROR_MASK)
+ return HPI6000_ERROR_GET_DATA_IDLE_TIMEOUT;
+
+ if (hpi6000_send_host_command(pao, dsp_index,
+ HPI_HIF_GET_DATA))
+ return HPI6000_ERROR_GET_DATA_CMD;
+ hpi6000_send_dsp_interrupt(pdo);
+
+ ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_GET_DATA);
+
+ if (ack & HPI_HIF_ERROR_MASK)
+ return HPI6000_ERROR_GET_DATA_ACK;
+
+ /* get the address and size */
+ do {
+ address = hpi_read_word(pdo, HPI_HIF_ADDR(address));
+ length = hpi_read_word(pdo, HPI_HIF_ADDR(length));
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6READ));
+
+ /* read the data */
+ {
+ u32 len = length;
+ u32 blk_len = 512;
+ while (len) {
+ if (len < blk_len)
+ blk_len = len;
+ if (hpi6000_dsp_block_read32(pao, dsp_index,
+ address, p_data, blk_len))
+ return HPI6000_ERROR_GET_DATA_READ;
+ address += blk_len * 4;
+ p_data += blk_len;
+ len -= blk_len;
+ }
+ }
+
+ if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE))
+ return HPI6000_ERROR_GET_DATA_IDLECMD;
+ hpi6000_send_dsp_interrupt(pdo);
+
+ data_got += length * 4;
+ }
+ return 0;
+}
+
+static void hpi6000_send_dsp_interrupt(struct dsp_obj *pdo)
+{
+ iowrite32(0x00030003, pdo->prHPI_control); /* DSPINT */
+}
+
+static short hpi6000_send_host_command(struct hpi_adapter_obj *pao,
+ u16 dsp_index, u32 host_cmd)
+{
+ struct dsp_obj *pdo =
+ &(*(struct hpi_hw_obj *)pao->priv).ado[dsp_index];
+ u32 timeout = TIMEOUT;
+
+ /* set command */
+ do {
+ hpi_write_word(pdo, HPI_HIF_ADDR(host_cmd), host_cmd);
+ /* flush the FIFO */
+ hpi_set_address(pdo, HPI_HIF_ADDR(host_cmd));
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE) && --timeout);
+
+ /* reset the interrupt bit */
+ iowrite32(0x00040004, pdo->prHPI_control);
+
+ if (timeout)
+ return 0;
+ else
+ return 1;
+}
+
+/* if the PCI2040 has recorded an HPI timeout, reset the error and return 1 */
+static short hpi6000_check_PCI2040_error_flag(struct hpi_adapter_obj *pao,
+ u16 read_or_write)
+{
+ u32 hPI_error;
+
+ struct hpi_hw_obj *phw = (struct hpi_hw_obj *)pao->priv;
+
+ /* read the error bits from the PCI2040 */
+ hPI_error = ioread32(phw->dw2040_HPICSR + HPI_ERROR_REPORT);
+ if (hPI_error) {
+ /* reset the error flag */
+ iowrite32(0L, phw->dw2040_HPICSR + HPI_ERROR_REPORT);
+ phw->pCI2040HPI_error_count++;
+ if (read_or_write == 1)
+ gw_pci_read_asserts++; /************* inc global */
+ else
+ gw_pci_write_asserts++;
+ return 1;
+ } else
+ return 0;
+}
+
+static short hpi6000_wait_dsp_ack(struct hpi_adapter_obj *pao, u16 dsp_index,
+ u32 ack_value)
+{
+ struct dsp_obj *pdo =
+ &(*(struct hpi_hw_obj *)pao->priv).ado[dsp_index];
+ u32 ack = 0L;
+ u32 timeout;
+ u32 hPIC = 0L;
+
+ /* wait for host interrupt to signal ack is ready */
+ timeout = TIMEOUT;
+ while (--timeout) {
+ hPIC = ioread32(pdo->prHPI_control);
+ if (hPIC & 0x04) /* 0x04 = HINT from DSP */
+ break;
+ }
+ if (timeout == 0)
+ return HPI_HIF_ERROR_MASK;
+
+ /* wait for dwAckValue */
+ timeout = TIMEOUT;
+ while (--timeout) {
+ /* read the ack mailbox */
+ ack = hpi_read_word(pdo, HPI_HIF_ADDR(dsp_ack));
+ if (ack == ack_value)
+ break;
+ if ((ack & HPI_HIF_ERROR_MASK)
+ && !hpi6000_check_PCI2040_error_flag(pao, H6READ))
+ break;
+ /*for (i=0;i<1000;i++) */
+ /* dwPause=i+1; */
+ }
+ if (ack & HPI_HIF_ERROR_MASK)
+ /* indicates bad read from DSP -
+ typically 0xffffff is read for some reason */
+ ack = HPI_HIF_ERROR_MASK;
+
+ if (timeout == 0)
+ ack = HPI_HIF_ERROR_MASK;
+ return (short)ack;
+}
+
+static short hpi6000_update_control_cache(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm)
+{
+ const u16 dsp_index = 0;
+ struct hpi_hw_obj *phw = (struct hpi_hw_obj *)pao->priv;
+ struct dsp_obj *pdo = &phw->ado[dsp_index];
+ u32 timeout;
+ u32 cache_dirty_flag;
+ u16 err;
+
+ hpios_dsplock_lock(pao);
+
+ timeout = TIMEOUT;
+ do {
+ cache_dirty_flag =
+ hpi_read_word((struct dsp_obj *)pdo,
+ HPI_HIF_ADDR(control_cache_is_dirty));
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout);
+ if (!timeout) {
+ err = HPI6000_ERROR_CONTROL_CACHE_PARAMS;
+ goto unlock;
+ }
+
+ if (cache_dirty_flag) {
+ /* read the cached controls */
+ u32 address;
+ u32 length;
+
+ timeout = TIMEOUT;
+ if (pdo->control_cache_address_on_dsp == 0) {
+ do {
+ address =
+ hpi_read_word((struct dsp_obj *)pdo,
+ HPI_HIF_ADDR(control_cache_address));
+
+ length = hpi_read_word((struct dsp_obj *)pdo,
+ HPI_HIF_ADDR
+ (control_cache_size_in_bytes));
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6READ)
+ && --timeout);
+ if (!timeout) {
+ err = HPI6000_ERROR_CONTROL_CACHE_ADDRLEN;
+ goto unlock;
+ }
+ pdo->control_cache_address_on_dsp = address;
+ pdo->control_cache_length_on_dsp = length;
+ } else {
+ address = pdo->control_cache_address_on_dsp;
+ length = pdo->control_cache_length_on_dsp;
+ }
+
+ if (hpi6000_dsp_block_read32(pao, dsp_index, address,
+ (u32 *)&phw->control_cache[0],
+ length / sizeof(u32))) {
+ err = HPI6000_ERROR_CONTROL_CACHE_READ;
+ goto unlock;
+ }
+ do {
+ hpi_write_word((struct dsp_obj *)pdo,
+ HPI_HIF_ADDR(control_cache_is_dirty), 0);
+ /* flush the FIFO */
+ hpi_set_address(pdo, HPI_HIF_ADDR(host_cmd));
+ } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE)
+ && --timeout);
+ if (!timeout) {
+ err = HPI6000_ERROR_CONTROL_CACHE_FLUSH;
+ goto unlock;
+ }
+
+ }
+ err = 0;
+
+unlock:
+ hpios_dsplock_unlock(pao);
+ return err;
+}
+
+/** Get dsp index for multi DSP adapters only */
+static u16 get_dsp_index(struct hpi_adapter_obj *pao, struct hpi_message *phm)
+{
+ u16 ret = 0;
+ switch (phm->object) {
+ case HPI_OBJ_ISTREAM:
+ if (phm->obj_index < 2)
+ ret = 1;
+ break;
+ case HPI_OBJ_PROFILE:
+ ret = phm->obj_index;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/** Complete transaction with DSP
+
+Send message, get response, send or get stream data if any.
+*/
+static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
+ struct hpi_response *phr)
+{
+ u16 error = 0;
+ u16 dsp_index = 0;
+ u16 num_dsp = ((struct hpi_hw_obj *)pao->priv)->num_dsp;
+ hpios_dsplock_lock(pao);
+
+ if (num_dsp < 2)
+ dsp_index = 0;
+ else {
+ dsp_index = get_dsp_index(pao, phm);
+
+ /* is this checked on the DSP anyway? */
+ if ((phm->function == HPI_ISTREAM_GROUP_ADD)
+ || (phm->function == HPI_OSTREAM_GROUP_ADD)) {
+ struct hpi_message hm;
+ u16 add_index;
+ hm.obj_index = phm->u.d.u.stream.stream_index;
+ hm.object = phm->u.d.u.stream.object_type;
+ add_index = get_dsp_index(pao, &hm);
+ if (add_index != dsp_index) {
+ phr->error = HPI_ERROR_NO_INTERDSP_GROUPS;
+ return;
+ }
+ }
+ }
+ error = hpi6000_message_response_sequence(pao, dsp_index, phm, phr);
+
+ /* maybe an error response */
+ if (error) {
+ /* something failed in the HPI/DSP interface */
+ phr->error = error;
+ /* just the header of the response is valid */
+ phr->size = sizeof(struct hpi_response_header);
+ goto err;
+ }
+
+ if (phr->error != 0) /* something failed in the DSP */
+ goto err;
+
+ switch (phm->function) {
+ case HPI_OSTREAM_WRITE:
+ case HPI_ISTREAM_ANC_WRITE:
+ error = hpi6000_send_data(pao, dsp_index, phm, phr);
+ break;
+ case HPI_ISTREAM_READ:
+ case HPI_OSTREAM_ANC_READ:
+ error = hpi6000_get_data(pao, dsp_index, phm, phr);
+ break;
+ case HPI_ADAPTER_GET_ASSERT:
+ phr->u.a.adapter_index = 0; /* dsp 0 default */
+ if (num_dsp == 2) {
+ if (!phr->u.a.adapter_type) {
+ /* no assert from dsp 0, check dsp 1 */
+ error = hpi6000_message_response_sequence(pao,
+ 1, phm, phr);
+ phr->u.a.adapter_index = 1;
+ }
+ }
+ }
+
+ if (error)
+ phr->error = error;
+
+err:
+ hpios_dsplock_unlock(pao);
+ return;
+}
diff --git a/sound/pci/asihpi/hpi6000.h b/sound/pci/asihpi/hpi6000.h
new file mode 100644
index 000000000000..4c7d507c0ecd
--- /dev/null
+++ b/sound/pci/asihpi/hpi6000.h
@@ -0,0 +1,70 @@
+/*****************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Public declarations for DSP Proramming Interface to TI C6701
+
+Shared between hpi6000.c and DSP code
+
+(C) Copyright AudioScience Inc. 1998-2003
+******************************************************************************/
+
+#ifndef _HPI6000_H_
+#define _HPI6000_H_
+
+#define HPI_NMIXER_CONTROLS 200
+
+/*
+ * Control caching is always supported in the HPI code.
+ * The DSP should make sure that dwControlCacheSizeInBytes is initialized to 0
+ * during boot to make it in-active.
+ */
+struct hpi_hif_6000 {
+ u32 host_cmd;
+ u32 dsp_ack;
+ u32 address;
+ u32 length;
+ u32 message_buffer_address;
+ u32 response_buffer_address;
+ u32 dsp_number;
+ u32 adapter_info;
+ u32 control_cache_is_dirty;
+ u32 control_cache_address;
+ u32 control_cache_size_in_bytes;
+ u32 control_cache_count;
+};
+
+#define HPI_HIF_PACK_ADAPTER_INFO(adapter, version_major, version_minor) \
+ ((adapter << 16) | (version_major << 8) | version_minor)
+#define HPI_HIF_ADAPTER_INFO_EXTRACT_ADAPTER(adapterinfo) \
+ ((adapterinfo >> 16) & 0xffff)
+#define HPI_HIF_ADAPTER_INFO_EXTRACT_HWVERSION_MAJOR(adapterinfo) \
+ ((adapterinfo >> 8) & 0xff)
+#define HPI_HIF_ADAPTER_INFO_EXTRACT_HWVERSION_MINOR(adapterinfo) \
+ (adapterinfo & 0xff)
+
+/* Command/status exchanged between host and DSP */
+#define HPI_HIF_IDLE 0
+#define HPI_HIF_SEND_MSG 1
+#define HPI_HIF_GET_RESP 2
+#define HPI_HIF_DATA_MASK 0x10
+#define HPI_HIF_SEND_DATA 0x13
+#define HPI_HIF_GET_DATA 0x14
+#define HPI_HIF_SEND_DONE 5
+#define HPI_HIF_RESET 9
+
+#endif /* _HPI6000_H_ */
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
new file mode 100644
index 000000000000..8df2ff76d944
--- /dev/null
+++ b/sound/pci/asihpi/hpi6205.c
@@ -0,0 +1,2332 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Hardware Programming Interface (HPI) for AudioScience
+ ASI50xx, AS51xx, ASI6xxx, ASI87xx ASI89xx series adapters.
+ These PCI and PCIe bus adapters are based on a
+ TMS320C6205 PCI bus mastering DSP,
+ and (except ASI50xx) TI TMS320C6xxx floating point DSP
+
+ Exported function:
+ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
+
+(C) Copyright AudioScience Inc. 1998-2010
+*******************************************************************************/
+#define SOURCEFILE_NAME "hpi6205.c"
+
+#include "hpi_internal.h"
+#include "hpimsginit.h"
+#include "hpidebug.h"
+#include "hpi6205.h"
+#include "hpidspcd.h"
+#include "hpicmn.h"
+
+/*****************************************************************************/
+/* HPI6205 specific error codes */
+#define HPI6205_ERROR_BASE 1000
+/*#define HPI6205_ERROR_MEM_ALLOC 1001 */
+#define HPI6205_ERROR_6205_NO_IRQ 1002
+#define HPI6205_ERROR_6205_INIT_FAILED 1003
+/*#define HPI6205_ERROR_MISSING_DSPCODE 1004 */
+#define HPI6205_ERROR_UNKNOWN_PCI_DEVICE 1005
+#define HPI6205_ERROR_6205_REG 1006
+#define HPI6205_ERROR_6205_DSPPAGE 1007
+#define HPI6205_ERROR_BAD_DSPINDEX 1008
+#define HPI6205_ERROR_C6713_HPIC 1009
+#define HPI6205_ERROR_C6713_HPIA 1010
+#define HPI6205_ERROR_C6713_PLL 1011
+#define HPI6205_ERROR_DSP_INTMEM 1012
+#define HPI6205_ERROR_DSP_EXTMEM 1013
+#define HPI6205_ERROR_DSP_PLD 1014
+#define HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT 1015
+#define HPI6205_ERROR_MSG_RESP_TIMEOUT 1016
+#define HPI6205_ERROR_6205_EEPROM 1017
+#define HPI6205_ERROR_DSP_EMIF 1018
+
+#define hpi6205_error(dsp_index, err) (err)
+/*****************************************************************************/
+/* for C6205 PCI i/f */
+/* Host Status Register (HSR) bitfields */
+#define C6205_HSR_INTSRC 0x01
+#define C6205_HSR_INTAVAL 0x02
+#define C6205_HSR_INTAM 0x04
+#define C6205_HSR_CFGERR 0x08
+#define C6205_HSR_EEREAD 0x10
+/* Host-to-DSP Control Register (HDCR) bitfields */
+#define C6205_HDCR_WARMRESET 0x01
+#define C6205_HDCR_DSPINT 0x02
+#define C6205_HDCR_PCIBOOT 0x04
+/* DSP Page Register (DSPP) bitfields, */
+/* defines 4 Mbyte page that BAR0 points to */
+#define C6205_DSPP_MAP1 0x400
+
+/* BAR0 maps to prefetchable 4 Mbyte memory block set by DSPP.
+ * BAR1 maps to non-prefetchable 8 Mbyte memory block
+ * of DSP memory mapped registers (starting at 0x01800000).
+ * 0x01800000 is hardcoded in the PCI i/f, so that only the offset from this
+ * needs to be added to the BAR1 base address set in the PCI config reg
+ */
+#define C6205_BAR1_PCI_IO_OFFSET (0x027FFF0L)
+#define C6205_BAR1_HSR (C6205_BAR1_PCI_IO_OFFSET)
+#define C6205_BAR1_HDCR (C6205_BAR1_PCI_IO_OFFSET+4)
+#define C6205_BAR1_DSPP (C6205_BAR1_PCI_IO_OFFSET+8)
+
+/* used to control LED (revA) and reset C6713 (revB) */
+#define C6205_BAR0_TIMER1_CTL (0x01980000L)
+
+/* For first 6713 in CE1 space, using DA17,16,2 */
+#define HPICL_ADDR 0x01400000L
+#define HPICH_ADDR 0x01400004L
+#define HPIAL_ADDR 0x01410000L
+#define HPIAH_ADDR 0x01410004L
+#define HPIDIL_ADDR 0x01420000L
+#define HPIDIH_ADDR 0x01420004L
+#define HPIDL_ADDR 0x01430000L
+#define HPIDH_ADDR 0x01430004L
+
+#define C6713_EMIF_GCTL 0x01800000
+#define C6713_EMIF_CE1 0x01800004
+#define C6713_EMIF_CE0 0x01800008
+#define C6713_EMIF_CE2 0x01800010
+#define C6713_EMIF_CE3 0x01800014
+#define C6713_EMIF_SDRAMCTL 0x01800018
+#define C6713_EMIF_SDRAMTIMING 0x0180001C
+#define C6713_EMIF_SDRAMEXT 0x01800020
+
+struct hpi_hw_obj {
+ /* PCI registers */
+ __iomem u32 *prHSR;
+ __iomem u32 *prHDCR;
+ __iomem u32 *prDSPP;
+
+ u32 dsp_page;
+
+ struct consistent_dma_area h_locked_mem;
+ struct bus_master_interface *p_interface_buffer;
+
+ u16 flag_outstream_just_reset[HPI_MAX_STREAMS];
+ /* a non-NULL handle means there is an HPI allocated buffer */
+ struct consistent_dma_area instream_host_buffers[HPI_MAX_STREAMS];
+ struct consistent_dma_area outstream_host_buffers[HPI_MAX_STREAMS];
+ /* non-zero size means a buffer exists, may be external */
+ u32 instream_host_buffer_size[HPI_MAX_STREAMS];
+ u32 outstream_host_buffer_size[HPI_MAX_STREAMS];
+
+ struct consistent_dma_area h_control_cache;
+ struct consistent_dma_area h_async_event_buffer;
+/* struct hpi_control_cache_single *pControlCache; */
+ struct hpi_async_event *p_async_event_buffer;
+ struct hpi_control_cache *p_cache;
+};
+
+/*****************************************************************************/
+/* local prototypes */
+
+#define check_before_bbm_copy(status, p_bbm_data, l_first_write, l_second_write)
+
+static int wait_dsp_ack(struct hpi_hw_obj *phw, int state, int timeout_us);
+
+static void send_dsp_command(struct hpi_hw_obj *phw, int cmd);
+
+static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
+ u32 *pos_error_code);
+
+static u16 message_response_sequence(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
+ struct hpi_response *phr);
+
+#define HPI6205_TIMEOUT 1000000
+
+static void subsys_create_adapter(struct hpi_message *phm,
+ struct hpi_response *phr);
+static void subsys_delete_adapter(struct hpi_message *phm,
+ struct hpi_response *phr);
+
+static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
+ u32 *pos_error_code);
+
+static void delete_adapter_obj(struct hpi_adapter_obj *pao);
+
+static void outstream_host_buffer_allocate(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void outstream_host_buffer_get_info(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void outstream_host_buffer_free(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+static void outstream_write(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void outstream_get_info(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void outstream_start(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void outstream_open(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void outstream_reset(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void instream_host_buffer_allocate(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void instream_host_buffer_get_info(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void instream_host_buffer_free(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void instream_read(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void instream_get_info(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static void instream_start(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr);
+
+static u32 boot_loader_read_mem32(struct hpi_adapter_obj *pao, int dsp_index,
+ u32 address);
+
+static u16 boot_loader_write_mem32(struct hpi_adapter_obj *pao, int dsp_index,
+ u32 address, u32 data);
+
+static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao,
+ int dsp_index);
+
+static u16 boot_loader_test_memory(struct hpi_adapter_obj *pao, int dsp_index,
+ u32 address, u32 length);
+
+static u16 boot_loader_test_internal_memory(struct hpi_adapter_obj *pao,
+ int dsp_index);
+
+static u16 boot_loader_test_external_memory(struct hpi_adapter_obj *pao,
+ int dsp_index);
+
+static u16 boot_loader_test_pld(struct hpi_adapter_obj *pao, int dsp_index);
+
+/*****************************************************************************/
+
+static void subsys_message(struct hpi_message *phm, struct hpi_response *phr)
+{
+
+ switch (phm->function) {
+ case HPI_SUBSYS_OPEN:
+ case HPI_SUBSYS_CLOSE:
+ case HPI_SUBSYS_GET_INFO:
+ case HPI_SUBSYS_DRIVER_UNLOAD:
+ case HPI_SUBSYS_DRIVER_LOAD:
+ case HPI_SUBSYS_FIND_ADAPTERS:
+ /* messages that should not get here */
+ phr->error = HPI_ERROR_UNIMPLEMENTED;
+ break;
+ case HPI_SUBSYS_CREATE_ADAPTER:
+ subsys_create_adapter(phm, phr);
+ break;
+ case HPI_SUBSYS_DELETE_ADAPTER:
+ subsys_delete_adapter(phm, phr);
+ break;
+ default:
+ phr->error = HPI_ERROR_INVALID_FUNC;
+ break;
+ }
+}
+
+static void control_message(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+
+ struct hpi_hw_obj *phw = pao->priv;
+
+ switch (phm->function) {
+ case HPI_CONTROL_GET_STATE:
+ if (pao->has_control_cache) {
+ rmb(); /* make sure we see updates DM_aed from DSP */
+ if (hpi_check_control_cache(phw->p_cache, phm, phr))
+ break;
+ }
+ hw_message(pao, phm, phr);
+ break;
+ case HPI_CONTROL_GET_INFO:
+ hw_message(pao, phm, phr);
+ break;
+ case HPI_CONTROL_SET_STATE:
+ hw_message(pao, phm, phr);
+ if (pao->has_control_cache)
+ hpi_sync_control_cache(phw->p_cache, phm, phr);
+ break;
+ default:
+ phr->error = HPI_ERROR_INVALID_FUNC;
+ break;
+ }
+}
+
+static void adapter_message(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ switch (phm->function) {
+ default:
+ hw_message(pao, phm, phr);
+ break;
+ }
+}
+
+static void outstream_message(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+
+ if (phm->obj_index >= HPI_MAX_STREAMS) {
+ phr->error = HPI_ERROR_INVALID_STREAM;
+ HPI_DEBUG_LOG(WARNING,
+ "message referencing invalid stream %d "
+ "on adapter index %d\n", phm->obj_index,
+ phm->adapter_index);
+ return;
+ }
+
+ switch (phm->function) {
+ case HPI_OSTREAM_WRITE:
+ outstream_write(pao, phm, phr);
+ break;
+ case HPI_OSTREAM_GET_INFO:
+ outstream_get_info(pao, phm, phr);
+ break;
+ case HPI_OSTREAM_HOSTBUFFER_ALLOC:
+ outstream_host_buffer_allocate(pao, phm, phr);
+ break;
+ case HPI_OSTREAM_HOSTBUFFER_GET_INFO:
+ outstream_host_buffer_get_info(pao, phm, phr);
+ break;
+ case HPI_OSTREAM_HOSTBUFFER_FREE:
+ outstream_host_buffer_free(pao, phm, phr);
+ break;
+ case HPI_OSTREAM_START:
+ outstream_start(pao, phm, phr);
+ break;
+ case HPI_OSTREAM_OPEN:
+ outstream_open(pao, phm, phr);
+ break;
+ case HPI_OSTREAM_RESET:
+ outstream_reset(pao, phm, phr);
+ break;
+ default:
+ hw_message(pao, phm, phr);
+ break;
+ }
+}
+
+static void instream_message(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+
+ if (phm->obj_index >= HPI_MAX_STREAMS) {
+ phr->error = HPI_ERROR_INVALID_STREAM;
+ HPI_DEBUG_LOG(WARNING,
+ "message referencing invalid stream %d "
+ "on adapter index %d\n", phm->obj_index,
+ phm->adapter_index);
+ return;
+ }
+
+ switch (phm->function) {
+ case HPI_ISTREAM_READ:
+ instream_read(pao, phm, phr);
+ break;
+ case HPI_ISTREAM_GET_INFO:
+ instream_get_info(pao, phm, phr);
+ break;
+ case HPI_ISTREAM_HOSTBUFFER_ALLOC:
+ instream_host_buffer_allocate(pao, phm, phr);
+ break;
+ case HPI_ISTREAM_HOSTBUFFER_GET_INFO:
+ instream_host_buffer_get_info(pao, phm, phr);
+ break;
+ case HPI_ISTREAM_HOSTBUFFER_FREE:
+ instream_host_buffer_free(pao, phm, phr);
+ break;
+ case HPI_ISTREAM_START:
+ instream_start(pao, phm, phr);
+ break;
+ default:
+ hw_message(pao, phm, phr);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/** Entry point to this HPI backend
+ * All calls to the HPI start here
+ */
+void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_adapter_obj *pao = NULL;
+
+ /* subsytem messages are processed by every HPI.
+ * All other messages are ignored unless the adapter index matches
+ * an adapter in the HPI
+ */
+ HPI_DEBUG_LOG(DEBUG, "HPI obj=%d, func=%d\n", phm->object,
+ phm->function);
+
+ /* if Dsp has crashed then do not communicate with it any more */
+ if (phm->object != HPI_OBJ_SUBSYSTEM) {
+ pao = hpi_find_adapter(phm->adapter_index);
+ if (!pao) {
+ HPI_DEBUG_LOG(DEBUG,
+ " %d,%d refused, for another HPI?\n",
+ phm->object, phm->function);
+ return;
+ }
+
+ if ((pao->dsp_crashed >= 10)
+ && (phm->function != HPI_ADAPTER_DEBUG_READ)) {
+ /* allow last resort debug read even after crash */
+ hpi_init_response(phr, phm->object, phm->function,
+ HPI_ERROR_DSP_HARDWARE);
+ HPI_DEBUG_LOG(WARNING, " %d,%d dsp crashed.\n",
+ phm->object, phm->function);
+ return;
+ }
+ }
+
+ /* Init default response */
+ if (phm->function != HPI_SUBSYS_CREATE_ADAPTER)
+ hpi_init_response(phr, phm->object, phm->function,
+ HPI_ERROR_PROCESSING_MESSAGE);
+
+ HPI_DEBUG_LOG(VERBOSE, "start of switch\n");
+ switch (phm->type) {
+ case HPI_TYPE_MESSAGE:
+ switch (phm->object) {
+ case HPI_OBJ_SUBSYSTEM:
+ subsys_message(phm, phr);
+ break;
+
+ case HPI_OBJ_ADAPTER:
+ phr->size =
+ sizeof(struct hpi_response_header) +
+ sizeof(struct hpi_adapter_res);
+ adapter_message(pao, phm, phr);
+ break;
+
+ case HPI_OBJ_CONTROLEX:
+ case HPI_OBJ_CONTROL:
+ control_message(pao, phm, phr);
+ break;
+
+ case HPI_OBJ_OSTREAM:
+ outstream_message(pao, phm, phr);
+ break;
+
+ case HPI_OBJ_ISTREAM:
+ instream_message(pao, phm, phr);
+ break;
+
+ default:
+ hw_message(pao, phm, phr);
+ break;
+ }
+ break;
+
+ default:
+ phr->error = HPI_ERROR_INVALID_TYPE;
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* SUBSYSTEM */
+
+/** Create an adapter object and initialise it based on resource information
+ * passed in in the message
+ * *** NOTE - you cannot use this function AND the FindAdapters function at the
+ * same time, the application must use only one of them to get the adapters ***
+ */
+static void subsys_create_adapter(struct hpi_message *phm,
+ struct hpi_response *phr)
+{
+ /* create temp adapter obj, because we don't know what index yet */
+ struct hpi_adapter_obj ao;
+ u32 os_error_code;
+ u16 err;
+
+ HPI_DEBUG_LOG(DEBUG, " subsys_create_adapter\n");
+
+ memset(&ao, 0, sizeof(ao));
+
+ /* this HPI only creates adapters for TI/PCI devices */
+ if (phm->u.s.resource.bus_type != HPI_BUS_PCI)
+ return;
+ if (phm->u.s.resource.r.pci->vendor_id != HPI_PCI_VENDOR_ID_TI)
+ return;
+ if (phm->u.s.resource.r.pci->device_id != HPI_PCI_DEV_ID_DSP6205)
+ return;
+
+ ao.priv = kmalloc(sizeof(struct hpi_hw_obj), GFP_KERNEL);
+ if (!ao.priv) {
+ HPI_DEBUG_LOG(ERROR, "cant get mem for adapter object\n");
+ phr->error = HPI_ERROR_MEMORY_ALLOC;
+ return;
+ }
+ memset(ao.priv, 0, sizeof(struct hpi_hw_obj));
+
+ ao.pci = *phm->u.s.resource.r.pci;
+ err = create_adapter_obj(&ao, &os_error_code);
+ if (!err)
+ err = hpi_add_adapter(&ao);
+ if (err) {
+ phr->u.s.data = os_error_code;
+ delete_adapter_obj(&ao);
+ phr->error = err;
+ return;
+ }
+
+ phr->u.s.aw_adapter_list[ao.index] = ao.adapter_type;
+ phr->u.s.adapter_index = ao.index;
+ phr->u.s.num_adapters++;
+ phr->error = 0;
+}
+
+/** delete an adapter - required by WDM driver */
+static void subsys_delete_adapter(struct hpi_message *phm,
+ struct hpi_response *phr)
+{
+ struct hpi_adapter_obj *pao;
+ struct hpi_hw_obj *phw;
+
+ pao = hpi_find_adapter(phm->adapter_index);
+ if (!pao) {
+ phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
+ return;
+ }
+ phw = (struct hpi_hw_obj *)pao->priv;
+ /* reset adapter h/w */
+ /* Reset C6713 #1 */
+ boot_loader_write_mem32(pao, 0, C6205_BAR0_TIMER1_CTL, 0);
+ /* reset C6205 */
+ iowrite32(C6205_HDCR_WARMRESET, phw->prHDCR);
+
+ delete_adapter_obj(pao);
+ phr->error = 0;
+}
+
+/** Create adapter object
+ allocate buffers, bootload DSPs, initialise control cache
+*/
+static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
+ u32 *pos_error_code)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface;
+ u32 phys_addr;
+#ifndef HPI6205_NO_HSR_POLL
+ u32 time_out = HPI6205_TIMEOUT;
+ u32 temp1;
+#endif
+ int i;
+ u16 err;
+
+ /* init error reporting */
+ pao->dsp_crashed = 0;
+
+ for (i = 0; i < HPI_MAX_STREAMS; i++)
+ phw->flag_outstream_just_reset[i] = 1;
+
+ /* The C6205 memory area 1 is 8Mbyte window into DSP registers */
+ phw->prHSR =
+ pao->pci.ap_mem_base[1] +
+ C6205_BAR1_HSR / sizeof(*pao->pci.ap_mem_base[1]);
+ phw->prHDCR =
+ pao->pci.ap_mem_base[1] +
+ C6205_BAR1_HDCR / sizeof(*pao->pci.ap_mem_base[1]);
+ phw->prDSPP =
+ pao->pci.ap_mem_base[1] +
+ C6205_BAR1_DSPP / sizeof(*pao->pci.ap_mem_base[1]);
+
+ pao->has_control_cache = 0;
+
+ if (hpios_locked_mem_alloc(&phw->h_locked_mem,
+ sizeof(struct bus_master_interface),
+ pao->pci.p_os_data))
+ phw->p_interface_buffer = NULL;
+ else if (hpios_locked_mem_get_virt_addr(&phw->h_locked_mem,
+ (void *)&phw->p_interface_buffer))
+ phw->p_interface_buffer = NULL;
+
+ HPI_DEBUG_LOG(DEBUG, "interface buffer address %p\n",
+ phw->p_interface_buffer);
+
+ if (phw->p_interface_buffer) {
+ memset((void *)phw->p_interface_buffer, 0,
+ sizeof(struct bus_master_interface));
+ phw->p_interface_buffer->dsp_ack = H620_HIF_UNKNOWN;
+ }
+
+ err = adapter_boot_load_dsp(pao, pos_error_code);
+ if (err)
+ /* no need to clean up as SubSysCreateAdapter */
+ /* calls DeleteAdapter on error. */
+ return err;
+
+ HPI_DEBUG_LOG(INFO, "load DSP code OK\n");
+
+ /* allow boot load even if mem alloc wont work */
+ if (!phw->p_interface_buffer)
+ return hpi6205_error(0, HPI_ERROR_MEMORY_ALLOC);
+
+ interface = phw->p_interface_buffer;
+
+#ifndef HPI6205_NO_HSR_POLL
+ /* wait for first interrupt indicating the DSP init is done */
+ time_out = HPI6205_TIMEOUT * 10;
+ temp1 = 0;
+ while (((temp1 & C6205_HSR_INTSRC) == 0) && --time_out)
+ temp1 = ioread32(phw->prHSR);
+
+ if (temp1 & C6205_HSR_INTSRC)
+ HPI_DEBUG_LOG(INFO,
+ "interrupt confirming DSP code running OK\n");
+ else {
+ HPI_DEBUG_LOG(ERROR,
+ "timed out waiting for interrupt "
+ "confirming DSP code running\n");
+ return hpi6205_error(0, HPI6205_ERROR_6205_NO_IRQ);
+ }
+
+ /* reset the interrupt */
+ iowrite32(C6205_HSR_INTSRC, phw->prHSR);
+#endif
+
+ /* make sure the DSP has started ok */
+ if (!wait_dsp_ack(phw, H620_HIF_RESET, HPI6205_TIMEOUT * 10)) {
+ HPI_DEBUG_LOG(ERROR, "timed out waiting reset state \n");
+ return hpi6205_error(0, HPI6205_ERROR_6205_INIT_FAILED);
+ }
+ /* Note that *pao, *phw are zeroed after allocation,
+ * so pointers and flags are NULL by default.
+ * Allocate bus mastering control cache buffer and tell the DSP about it
+ */
+ if (interface->control_cache.number_of_controls) {
+ void *p_control_cache_virtual;
+
+ err = hpios_locked_mem_alloc(&phw->h_control_cache,
+ interface->control_cache.size_in_bytes,
+ pao->pci.p_os_data);
+ if (!err)
+ err = hpios_locked_mem_get_virt_addr(&phw->
+ h_control_cache, &p_control_cache_virtual);
+ if (!err) {
+ memset(p_control_cache_virtual, 0,
+ interface->control_cache.size_in_bytes);
+
+ phw->p_cache =
+ hpi_alloc_control_cache(interface->
+ control_cache.number_of_controls,
+ interface->control_cache.size_in_bytes,
+ (struct hpi_control_cache_info *)
+ p_control_cache_virtual);
+ }
+ if (!err) {
+ err = hpios_locked_mem_get_phys_addr(&phw->
+ h_control_cache, &phys_addr);
+ interface->control_cache.physical_address32 =
+ phys_addr;
+ }
+
+ if (!err)
+ pao->has_control_cache = 1;
+ else {
+ if (hpios_locked_mem_valid(&phw->h_control_cache))
+ hpios_locked_mem_free(&phw->h_control_cache);
+ pao->has_control_cache = 0;
+ }
+ }
+ /* allocate bus mastering async buffer and tell the DSP about it */
+ if (interface->async_buffer.b.size) {
+ err = hpios_locked_mem_alloc(&phw->h_async_event_buffer,
+ interface->async_buffer.b.size *
+ sizeof(struct hpi_async_event), pao->pci.p_os_data);
+ if (!err)
+ err = hpios_locked_mem_get_virt_addr
+ (&phw->h_async_event_buffer, (void *)
+ &phw->p_async_event_buffer);
+ if (!err)
+ memset((void *)phw->p_async_event_buffer, 0,
+ interface->async_buffer.b.size *
+ sizeof(struct hpi_async_event));
+ if (!err) {
+ err = hpios_locked_mem_get_phys_addr
+ (&phw->h_async_event_buffer, &phys_addr);
+ interface->async_buffer.physical_address32 =
+ phys_addr;
+ }
+ if (err) {
+ if (hpios_locked_mem_valid(&phw->
+ h_async_event_buffer)) {
+ hpios_locked_mem_free
+ (&phw->h_async_event_buffer);
+ phw->p_async_event_buffer = NULL;
+ }
+ }
+ }
+ send_dsp_command(phw, H620_HIF_IDLE);
+
+ {
+ struct hpi_message hM;
+ struct hpi_response hR;
+ u32 max_streams;
+
+ HPI_DEBUG_LOG(VERBOSE, "init ADAPTER_GET_INFO\n");
+ memset(&hM, 0, sizeof(hM));
+ hM.type = HPI_TYPE_MESSAGE;
+ hM.size = sizeof(hM);
+ hM.object = HPI_OBJ_ADAPTER;
+ hM.function = HPI_ADAPTER_GET_INFO;
+ hM.adapter_index = 0;
+ memset(&hR, 0, sizeof(hR));
+ hR.size = sizeof(hR);
+
+ err = message_response_sequence(pao, &hM, &hR);
+ if (err) {
+ HPI_DEBUG_LOG(ERROR, "message transport error %d\n",
+ err);
+ return err;
+ }
+ if (hR.error)
+ return hR.error;
+
+ pao->adapter_type = hR.u.a.adapter_type;
+ pao->index = hR.u.a.adapter_index;
+
+ max_streams = hR.u.a.num_outstreams + hR.u.a.num_instreams;
+
+ hpios_locked_mem_prepare((max_streams * 6) / 10, max_streams,
+ 65536, pao->pci.p_os_data);
+
+ HPI_DEBUG_LOG(VERBOSE,
+ "got adapter info type %x index %d serial %d\n",
+ hR.u.a.adapter_type, hR.u.a.adapter_index,
+ hR.u.a.serial_number);
+ }
+
+ pao->open = 0; /* upon creation the adapter is closed */
+
+ HPI_DEBUG_LOG(INFO, "bootload DSP OK\n");
+ return 0;
+}
+
+/** Free memory areas allocated by adapter
+ * this routine is called from SubSysDeleteAdapter,
+ * and SubSysCreateAdapter if duplicate index
+*/
+static void delete_adapter_obj(struct hpi_adapter_obj *pao)
+{
+ struct hpi_hw_obj *phw;
+ int i;
+
+ phw = pao->priv;
+
+ if (hpios_locked_mem_valid(&phw->h_async_event_buffer)) {
+ hpios_locked_mem_free(&phw->h_async_event_buffer);
+ phw->p_async_event_buffer = NULL;
+ }
+
+ if (hpios_locked_mem_valid(&phw->h_control_cache)) {
+ hpios_locked_mem_free(&phw->h_control_cache);
+ hpi_free_control_cache(phw->p_cache);
+ }
+
+ if (hpios_locked_mem_valid(&phw->h_locked_mem)) {
+ hpios_locked_mem_free(&phw->h_locked_mem);
+ phw->p_interface_buffer = NULL;
+ }
+
+ for (i = 0; i < HPI_MAX_STREAMS; i++)
+ if (hpios_locked_mem_valid(&phw->instream_host_buffers[i])) {
+ hpios_locked_mem_free(&phw->instream_host_buffers[i]);
+ /*?phw->InStreamHostBuffers[i] = NULL; */
+ phw->instream_host_buffer_size[i] = 0;
+ }
+
+ for (i = 0; i < HPI_MAX_STREAMS; i++)
+ if (hpios_locked_mem_valid(&phw->outstream_host_buffers[i])) {
+ hpios_locked_mem_free(&phw->outstream_host_buffers
+ [i]);
+ phw->outstream_host_buffer_size[i] = 0;
+ }
+
+ hpios_locked_mem_unprepare(pao->pci.p_os_data);
+
+ hpi_delete_adapter(pao);
+ kfree(phw);
+}
+
+/*****************************************************************************/
+/* OutStream Host buffer functions */
+
+/** Allocate or attach buffer for busmastering
+*/
+static void outstream_host_buffer_allocate(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ u16 err = 0;
+ u32 command = phm->u.d.u.buffer.command;
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+
+ hpi_init_response(phr, phm->object, phm->function, 0);
+
+ if (command == HPI_BUFFER_CMD_EXTERNAL
+ || command == HPI_BUFFER_CMD_INTERNAL_ALLOC) {
+ /* ALLOC phase, allocate a buffer with power of 2 size,
+ get its bus address for PCI bus mastering
+ */
+ phm->u.d.u.buffer.buffer_size =
+ roundup_pow_of_two(phm->u.d.u.buffer.buffer_size);
+ /* return old size and allocated size,
+ so caller can detect change */
+ phr->u.d.u.stream_info.data_available =
+ phw->outstream_host_buffer_size[phm->obj_index];
+ phr->u.d.u.stream_info.buffer_size =
+ phm->u.d.u.buffer.buffer_size;
+
+ if (phw->outstream_host_buffer_size[phm->obj_index] ==
+ phm->u.d.u.buffer.buffer_size) {
+ /* Same size, no action required */
+ return;
+ }
+
+ if (hpios_locked_mem_valid(&phw->outstream_host_buffers[phm->
+ obj_index]))
+ hpios_locked_mem_free(&phw->outstream_host_buffers
+ [phm->obj_index]);
+
+ err = hpios_locked_mem_alloc(&phw->outstream_host_buffers
+ [phm->obj_index], phm->u.d.u.buffer.buffer_size,
+ pao->pci.p_os_data);
+
+ if (err) {
+ phr->error = HPI_ERROR_INVALID_DATASIZE;
+ phw->outstream_host_buffer_size[phm->obj_index] = 0;
+ return;
+ }
+
+ err = hpios_locked_mem_get_phys_addr
+ (&phw->outstream_host_buffers[phm->obj_index],
+ &phm->u.d.u.buffer.pci_address);
+ /* get the phys addr into msg for single call alloc caller
+ * needs to do this for split alloc (or use the same message)
+ * return the phy address for split alloc in the respose too
+ */
+ phr->u.d.u.stream_info.auxiliary_data_available =
+ phm->u.d.u.buffer.pci_address;
+
+ if (err) {
+ hpios_locked_mem_free(&phw->outstream_host_buffers
+ [phm->obj_index]);
+ phw->outstream_host_buffer_size[phm->obj_index] = 0;
+ phr->error = HPI_ERROR_MEMORY_ALLOC;
+ return;
+ }
+ }
+
+ if (command == HPI_BUFFER_CMD_EXTERNAL
+ || command == HPI_BUFFER_CMD_INTERNAL_GRANTADAPTER) {
+ /* GRANT phase. Set up the BBM status, tell the DSP about
+ the buffer so it can start using BBM.
+ */
+ struct hpi_hostbuffer_status *status;
+
+ if (phm->u.d.u.buffer.buffer_size & (phm->u.d.u.buffer.
+ buffer_size - 1)) {
+ HPI_DEBUG_LOG(ERROR,
+ "buffer size must be 2^N not %d\n",
+ phm->u.d.u.buffer.buffer_size);
+ phr->error = HPI_ERROR_INVALID_DATASIZE;
+ return;
+ }
+ phw->outstream_host_buffer_size[phm->obj_index] =
+ phm->u.d.u.buffer.buffer_size;
+ status = &interface->outstream_host_buffer_status[phm->
+ obj_index];
+ status->samples_processed = 0;
+ status->stream_state = HPI_STATE_STOPPED;
+ status->dSP_index = 0;
+ status->host_index = status->dSP_index;
+ status->size_in_bytes = phm->u.d.u.buffer.buffer_size;
+
+ hw_message(pao, phm, phr);
+
+ if (phr->error
+ && hpios_locked_mem_valid(&phw->
+ outstream_host_buffers[phm->obj_index])) {
+ hpios_locked_mem_free(&phw->outstream_host_buffers
+ [phm->obj_index]);
+ phw->outstream_host_buffer_size[phm->obj_index] = 0;
+ }
+ }
+}
+
+static void outstream_host_buffer_get_info(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+ struct hpi_hostbuffer_status *status;
+ u8 *p_bbm_data;
+
+ if (hpios_locked_mem_valid(&phw->outstream_host_buffers[phm->
+ obj_index])) {
+ if (hpios_locked_mem_get_virt_addr(&phw->
+ outstream_host_buffers[phm->obj_index],
+ (void *)&p_bbm_data)) {
+ phr->error = HPI_ERROR_INVALID_OPERATION;
+ return;
+ }
+ status = &interface->outstream_host_buffer_status[phm->
+ obj_index];
+ hpi_init_response(phr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_HOSTBUFFER_GET_INFO, 0);
+ phr->u.d.u.hostbuffer_info.p_buffer = p_bbm_data;
+ phr->u.d.u.hostbuffer_info.p_status = status;
+ } else {
+ hpi_init_response(phr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_HOSTBUFFER_GET_INFO,
+ HPI_ERROR_INVALID_OPERATION);
+ }
+}
+
+static void outstream_host_buffer_free(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ u32 command = phm->u.d.u.buffer.command;
+
+ if (phw->outstream_host_buffer_size[phm->obj_index]) {
+ if (command == HPI_BUFFER_CMD_EXTERNAL
+ || command == HPI_BUFFER_CMD_INTERNAL_REVOKEADAPTER) {
+ phw->outstream_host_buffer_size[phm->obj_index] = 0;
+ hw_message(pao, phm, phr);
+ /* Tell adapter to stop using the host buffer. */
+ }
+ if (command == HPI_BUFFER_CMD_EXTERNAL
+ || command == HPI_BUFFER_CMD_INTERNAL_FREE)
+ hpios_locked_mem_free(&phw->outstream_host_buffers
+ [phm->obj_index]);
+ }
+ /* Should HPI_ERROR_INVALID_OPERATION be returned
+ if no host buffer is allocated? */
+ else
+ hpi_init_response(phr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_HOSTBUFFER_FREE, 0);
+
+}
+
+static long outstream_get_space_available(struct hpi_hostbuffer_status
+ *status)
+{
+ return status->size_in_bytes - ((long)(status->host_index) -
+ (long)(status->dSP_index));
+}
+
+static void outstream_write(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+ struct hpi_hostbuffer_status *status;
+ long space_available;
+
+ if (!phw->outstream_host_buffer_size[phm->obj_index]) {
+ /* there is no BBM buffer, write via message */
+ hw_message(pao, phm, phr);
+ return;
+ }
+
+ hpi_init_response(phr, phm->object, phm->function, 0);
+ status = &interface->outstream_host_buffer_status[phm->obj_index];
+
+ if (phw->flag_outstream_just_reset[phm->obj_index]) {
+ /* Format can only change after reset. Must tell DSP. */
+ u16 function = phm->function;
+ phw->flag_outstream_just_reset[phm->obj_index] = 0;
+ phm->function = HPI_OSTREAM_SET_FORMAT;
+ hw_message(pao, phm, phr); /* send the format to the DSP */
+ phm->function = function;
+ if (phr->error)
+ return;
+ }
+#if 1
+ if (phw->flag_outstream_just_reset[phm->obj_index]) {
+ /* First OutStremWrite() call following reset will write data to the
+ adapter's buffers, reducing delay before stream can start
+ */
+ int partial_write = 0;
+ unsigned int original_size = 0;
+
+ /* Send the first buffer to the DSP the old way. */
+ /* Limit size of first transfer - */
+ /* expect that this will not usually be triggered. */
+ if (phm->u.d.u.data.data_size > HPI6205_SIZEOF_DATA) {
+ partial_write = 1;
+ original_size = phm->u.d.u.data.data_size;
+ phm->u.d.u.data.data_size = HPI6205_SIZEOF_DATA;
+ }
+ /* write it */
+ phm->function = HPI_OSTREAM_WRITE;
+ hw_message(pao, phm, phr);
+ /* update status information that the DSP would typically
+ * update (and will update next time the DSP
+ * buffer update task reads data from the host BBM buffer)
+ */
+ status->auxiliary_data_available = phm->u.d.u.data.data_size;
+ status->host_index += phm->u.d.u.data.data_size;
+ status->dSP_index += phm->u.d.u.data.data_size;
+
+ /* if we did a full write, we can return from here. */
+ if (!partial_write)
+ return;
+
+ /* tweak buffer parameters and let the rest of the */
+ /* buffer land in internal BBM buffer */
+ phm->u.d.u.data.data_size =
+ original_size - HPI6205_SIZEOF_DATA;
+ phm->u.d.u.data.pb_data += HPI6205_SIZEOF_DATA;
+ }
+#endif
+
+ space_available = outstream_get_space_available(status);
+ if (space_available < (long)phm->u.d.u.data.data_size) {
+ phr->error = HPI_ERROR_INVALID_DATASIZE;
+ return;
+ }
+
+ /* HostBuffers is used to indicate host buffer is internally allocated.
+ otherwise, assumed external, data written externally */
+ if (phm->u.d.u.data.pb_data
+ && hpios_locked_mem_valid(&phw->outstream_host_buffers[phm->
+ obj_index])) {
+ u8 *p_bbm_data;
+ long l_first_write;
+ u8 *p_app_data = (u8 *)phm->u.d.u.data.pb_data;
+
+ if (hpios_locked_mem_get_virt_addr(&phw->
+ outstream_host_buffers[phm->obj_index],
+ (void *)&p_bbm_data)) {
+ phr->error = HPI_ERROR_INVALID_OPERATION;
+ return;
+ }
+
+ /* either all data,
+ or enough to fit from current to end of BBM buffer */
+ l_first_write =
+ min(phm->u.d.u.data.data_size,
+ status->size_in_bytes -
+ (status->host_index & (status->size_in_bytes - 1)));
+
+ memcpy(p_bbm_data +
+ (status->host_index & (status->size_in_bytes - 1)),
+ p_app_data, l_first_write);
+ /* remaining data if any */
+ memcpy(p_bbm_data, p_app_data + l_first_write,
+ phm->u.d.u.data.data_size - l_first_write);
+ }
+ status->host_index += phm->u.d.u.data.data_size;
+}
+
+static void outstream_get_info(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+ struct hpi_hostbuffer_status *status;
+
+ if (!phw->outstream_host_buffer_size[phm->obj_index]) {
+ hw_message(pao, phm, phr);
+ return;
+ }
+
+ hpi_init_response(phr, phm->object, phm->function, 0);
+
+ status = &interface->outstream_host_buffer_status[phm->obj_index];
+
+ phr->u.d.u.stream_info.state = (u16)status->stream_state;
+ phr->u.d.u.stream_info.samples_transferred =
+ status->samples_processed;
+ phr->u.d.u.stream_info.buffer_size = status->size_in_bytes;
+ phr->u.d.u.stream_info.data_available =
+ status->size_in_bytes - outstream_get_space_available(status);
+ phr->u.d.u.stream_info.auxiliary_data_available =
+ status->auxiliary_data_available;
+}
+
+static void outstream_start(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ hw_message(pao, phm, phr);
+}
+
+static void outstream_reset(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ phw->flag_outstream_just_reset[phm->obj_index] = 1;
+ hw_message(pao, phm, phr);
+}
+
+static void outstream_open(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ outstream_reset(pao, phm, phr);
+}
+
+/*****************************************************************************/
+/* InStream Host buffer functions */
+
+static void instream_host_buffer_allocate(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ u16 err = 0;
+ u32 command = phm->u.d.u.buffer.command;
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+
+ hpi_init_response(phr, phm->object, phm->function, 0);
+
+ if (command == HPI_BUFFER_CMD_EXTERNAL
+ || command == HPI_BUFFER_CMD_INTERNAL_ALLOC) {
+
+ phm->u.d.u.buffer.buffer_size =
+ roundup_pow_of_two(phm->u.d.u.buffer.buffer_size);
+ phr->u.d.u.stream_info.data_available =
+ phw->instream_host_buffer_size[phm->obj_index];
+ phr->u.d.u.stream_info.buffer_size =
+ phm->u.d.u.buffer.buffer_size;
+
+ if (phw->instream_host_buffer_size[phm->obj_index] ==
+ phm->u.d.u.buffer.buffer_size) {
+ /* Same size, no action required */
+ return;
+ }
+
+ if (hpios_locked_mem_valid(&phw->instream_host_buffers[phm->
+ obj_index]))
+ hpios_locked_mem_free(&phw->instream_host_buffers
+ [phm->obj_index]);
+
+ err = hpios_locked_mem_alloc(&phw->instream_host_buffers[phm->
+ obj_index], phm->u.d.u.buffer.buffer_size,
+ pao->pci.p_os_data);
+
+ if (err) {
+ phr->error = HPI_ERROR_INVALID_DATASIZE;
+ phw->instream_host_buffer_size[phm->obj_index] = 0;
+ return;
+ }
+
+ err = hpios_locked_mem_get_phys_addr
+ (&phw->instream_host_buffers[phm->obj_index],
+ &phm->u.d.u.buffer.pci_address);
+ /* get the phys addr into msg for single call alloc. Caller
+ needs to do this for split alloc so return the phy address */
+ phr->u.d.u.stream_info.auxiliary_data_available =
+ phm->u.d.u.buffer.pci_address;
+ if (err) {
+ hpios_locked_mem_free(&phw->instream_host_buffers
+ [phm->obj_index]);
+ phw->instream_host_buffer_size[phm->obj_index] = 0;
+ phr->error = HPI_ERROR_MEMORY_ALLOC;
+ return;
+ }
+ }
+
+ if (command == HPI_BUFFER_CMD_EXTERNAL
+ || command == HPI_BUFFER_CMD_INTERNAL_GRANTADAPTER) {
+ struct hpi_hostbuffer_status *status;
+
+ if (phm->u.d.u.buffer.buffer_size & (phm->u.d.u.buffer.
+ buffer_size - 1)) {
+ HPI_DEBUG_LOG(ERROR,
+ "buffer size must be 2^N not %d\n",
+ phm->u.d.u.buffer.buffer_size);
+ phr->error = HPI_ERROR_INVALID_DATASIZE;
+ return;
+ }
+
+ phw->instream_host_buffer_size[phm->obj_index] =
+ phm->u.d.u.buffer.buffer_size;
+ status = &interface->instream_host_buffer_status[phm->
+ obj_index];
+ status->samples_processed = 0;
+ status->stream_state = HPI_STATE_STOPPED;
+ status->dSP_index = 0;
+ status->host_index = status->dSP_index;
+ status->size_in_bytes = phm->u.d.u.buffer.buffer_size;
+
+ hw_message(pao, phm, phr);
+ if (phr->error
+ && hpios_locked_mem_valid(&phw->
+ instream_host_buffers[phm->obj_index])) {
+ hpios_locked_mem_free(&phw->instream_host_buffers
+ [phm->obj_index]);
+ phw->instream_host_buffer_size[phm->obj_index] = 0;
+ }
+ }
+}
+
+static void instream_host_buffer_get_info(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+ struct hpi_hostbuffer_status *status;
+ u8 *p_bbm_data;
+
+ if (hpios_locked_mem_valid(&phw->instream_host_buffers[phm->
+ obj_index])) {
+ if (hpios_locked_mem_get_virt_addr(&phw->
+ instream_host_buffers[phm->obj_index],
+ (void *)&p_bbm_data)) {
+ phr->error = HPI_ERROR_INVALID_OPERATION;
+ return;
+ }
+ status = &interface->instream_host_buffer_status[phm->
+ obj_index];
+ hpi_init_response(phr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_HOSTBUFFER_GET_INFO, 0);
+ phr->u.d.u.hostbuffer_info.p_buffer = p_bbm_data;
+ phr->u.d.u.hostbuffer_info.p_status = status;
+ } else {
+ hpi_init_response(phr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_HOSTBUFFER_GET_INFO,
+ HPI_ERROR_INVALID_OPERATION);
+ }
+}
+
+static void instream_host_buffer_free(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ u32 command = phm->u.d.u.buffer.command;
+
+ if (phw->instream_host_buffer_size[phm->obj_index]) {
+ if (command == HPI_BUFFER_CMD_EXTERNAL
+ || command == HPI_BUFFER_CMD_INTERNAL_REVOKEADAPTER) {
+ phw->instream_host_buffer_size[phm->obj_index] = 0;
+ hw_message(pao, phm, phr);
+ }
+
+ if (command == HPI_BUFFER_CMD_EXTERNAL
+ || command == HPI_BUFFER_CMD_INTERNAL_FREE)
+ hpios_locked_mem_free(&phw->instream_host_buffers
+ [phm->obj_index]);
+
+ } else {
+ /* Should HPI_ERROR_INVALID_OPERATION be returned
+ if no host buffer is allocated? */
+ hpi_init_response(phr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_HOSTBUFFER_FREE, 0);
+
+ }
+
+}
+
+static void instream_start(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ hw_message(pao, phm, phr);
+}
+
+static long instream_get_bytes_available(struct hpi_hostbuffer_status *status)
+{
+ return (long)(status->dSP_index) - (long)(status->host_index);
+}
+
+static void instream_read(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+ struct hpi_hostbuffer_status *status;
+ long data_available;
+ u8 *p_bbm_data;
+ long l_first_read;
+ u8 *p_app_data = (u8 *)phm->u.d.u.data.pb_data;
+
+ if (!phw->instream_host_buffer_size[phm->obj_index]) {
+ hw_message(pao, phm, phr);
+ return;
+ }
+ hpi_init_response(phr, phm->object, phm->function, 0);
+
+ status = &interface->instream_host_buffer_status[phm->obj_index];
+ data_available = instream_get_bytes_available(status);
+ if (data_available < (long)phm->u.d.u.data.data_size) {
+ phr->error = HPI_ERROR_INVALID_DATASIZE;
+ return;
+ }
+
+ if (hpios_locked_mem_valid(&phw->instream_host_buffers[phm->
+ obj_index])) {
+ if (hpios_locked_mem_get_virt_addr(&phw->
+ instream_host_buffers[phm->obj_index],
+ (void *)&p_bbm_data)) {
+ phr->error = HPI_ERROR_INVALID_OPERATION;
+ return;
+ }
+
+ /* either all data,
+ or enough to fit from current to end of BBM buffer */
+ l_first_read =
+ min(phm->u.d.u.data.data_size,
+ status->size_in_bytes -
+ (status->host_index & (status->size_in_bytes - 1)));
+
+ memcpy(p_app_data,
+ p_bbm_data +
+ (status->host_index & (status->size_in_bytes - 1)),
+ l_first_read);
+ /* remaining data if any */
+ memcpy(p_app_data + l_first_read, p_bbm_data,
+ phm->u.d.u.data.data_size - l_first_read);
+ }
+ status->host_index += phm->u.d.u.data.data_size;
+}
+
+static void instream_get_info(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+ struct hpi_hostbuffer_status *status;
+ if (!phw->instream_host_buffer_size[phm->obj_index]) {
+ hw_message(pao, phm, phr);
+ return;
+ }
+
+ status = &interface->instream_host_buffer_status[phm->obj_index];
+
+ hpi_init_response(phr, phm->object, phm->function, 0);
+
+ phr->u.d.u.stream_info.state = (u16)status->stream_state;
+ phr->u.d.u.stream_info.samples_transferred =
+ status->samples_processed;
+ phr->u.d.u.stream_info.buffer_size = status->size_in_bytes;
+ phr->u.d.u.stream_info.data_available =
+ instream_get_bytes_available(status);
+ phr->u.d.u.stream_info.auxiliary_data_available =
+ status->auxiliary_data_available;
+}
+
+/*****************************************************************************/
+/* LOW-LEVEL */
+#define HPI6205_MAX_FILES_TO_LOAD 2
+
+static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
+ u32 *pos_error_code)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ struct dsp_code dsp_code;
+ u16 boot_code_id[HPI6205_MAX_FILES_TO_LOAD];
+ u16 firmware_id = pao->pci.subsys_device_id;
+ u32 temp;
+ int dsp = 0, i = 0;
+ u16 err = 0;
+
+ boot_code_id[0] = HPI_ADAPTER_ASI(0x6205);
+
+ /* special cases where firmware_id != subsys ID */
+ switch (firmware_id) {
+ case HPI_ADAPTER_FAMILY_ASI(0x5000):
+ boot_code_id[0] = firmware_id;
+ firmware_id = 0;
+ break;
+ case HPI_ADAPTER_FAMILY_ASI(0x5300):
+ case HPI_ADAPTER_FAMILY_ASI(0x5400):
+ case HPI_ADAPTER_FAMILY_ASI(0x6300):
+ firmware_id = HPI_ADAPTER_FAMILY_ASI(0x6400);
+ break;
+ case HPI_ADAPTER_FAMILY_ASI(0x5600):
+ case HPI_ADAPTER_FAMILY_ASI(0x6500):
+ firmware_id = HPI_ADAPTER_FAMILY_ASI(0x6600);
+ break;
+ }
+ boot_code_id[1] = firmware_id;
+
+ /* reset DSP by writing a 1 to the WARMRESET bit */
+ temp = C6205_HDCR_WARMRESET;
+ iowrite32(temp, phw->prHDCR);
+ hpios_delay_micro_seconds(1000);
+
+ /* check that PCI i/f was configured by EEPROM */
+ temp = ioread32(phw->prHSR);
+ if ((temp & (C6205_HSR_CFGERR | C6205_HSR_EEREAD)) !=
+ C6205_HSR_EEREAD)
+ return hpi6205_error(0, HPI6205_ERROR_6205_EEPROM);
+ temp |= 0x04;
+ /* disable PINTA interrupt */
+ iowrite32(temp, phw->prHSR);
+
+ /* check control register reports PCI boot mode */
+ temp = ioread32(phw->prHDCR);
+ if (!(temp & C6205_HDCR_PCIBOOT))
+ return hpi6205_error(0, HPI6205_ERROR_6205_REG);
+
+ /* try writing a couple of numbers to the DSP page register */
+ /* and reading them back. */
+ temp = 1;
+ iowrite32(temp, phw->prDSPP);
+ if ((temp | C6205_DSPP_MAP1) != ioread32(phw->prDSPP))
+ return hpi6205_error(0, HPI6205_ERROR_6205_DSPPAGE);
+ temp = 2;
+ iowrite32(temp, phw->prDSPP);
+ if ((temp | C6205_DSPP_MAP1) != ioread32(phw->prDSPP))
+ return hpi6205_error(0, HPI6205_ERROR_6205_DSPPAGE);
+ temp = 3;
+ iowrite32(temp, phw->prDSPP);
+ if ((temp | C6205_DSPP_MAP1) != ioread32(phw->prDSPP))
+ return hpi6205_error(0, HPI6205_ERROR_6205_DSPPAGE);
+ /* reset DSP page to the correct number */
+ temp = 0;
+ iowrite32(temp, phw->prDSPP);
+ if ((temp | C6205_DSPP_MAP1) != ioread32(phw->prDSPP))
+ return hpi6205_error(0, HPI6205_ERROR_6205_DSPPAGE);
+ phw->dsp_page = 0;
+
+ /* release 6713 from reset before 6205 is bootloaded.
+ This ensures that the EMIF is inactive,
+ and the 6713 HPI gets the correct bootmode etc
+ */
+ if (boot_code_id[1] != 0) {
+ /* DSP 1 is a C6713 */
+ /* CLKX0 <- '1' release the C6205 bootmode pulldowns */
+ boot_loader_write_mem32(pao, 0, (0x018C0024L), 0x00002202);
+ hpios_delay_micro_seconds(100);
+ /* Reset the 6713 #1 - revB */
+ boot_loader_write_mem32(pao, 0, C6205_BAR0_TIMER1_CTL, 0);
+
+ /* dummy read every 4 words for 6205 advisory 1.4.4 */
+ boot_loader_read_mem32(pao, 0, 0);
+
+ hpios_delay_micro_seconds(100);
+ /* Release C6713 from reset - revB */
+ boot_loader_write_mem32(pao, 0, C6205_BAR0_TIMER1_CTL, 4);
+ hpios_delay_micro_seconds(100);
+ }
+
+ for (dsp = 0; dsp < HPI6205_MAX_FILES_TO_LOAD; dsp++) {
+ /* is there a DSP to load? */
+ if (boot_code_id[dsp] == 0)
+ continue;
+
+ err = boot_loader_config_emif(pao, dsp);
+ if (err)
+ return err;
+
+ err = boot_loader_test_internal_memory(pao, dsp);
+ if (err)
+ return err;
+
+ err = boot_loader_test_external_memory(pao, dsp);
+ if (err)
+ return err;
+
+ err = boot_loader_test_pld(pao, dsp);
+ if (err)
+ return err;
+
+ /* write the DSP code down into the DSPs memory */
+ dsp_code.ps_dev = pao->pci.p_os_data;
+ err = hpi_dsp_code_open(boot_code_id[dsp], &dsp_code,
+ pos_error_code);
+ if (err)
+ return err;
+
+ while (1) {
+ u32 length;
+ u32 address;
+ u32 type;
+ u32 *pcode;
+
+ err = hpi_dsp_code_read_word(&dsp_code, &length);
+ if (err)
+ break;
+ if (length == 0xFFFFFFFF)
+ break; /* end of code */
+
+ err = hpi_dsp_code_read_word(&dsp_code, &address);
+ if (err)
+ break;
+ err = hpi_dsp_code_read_word(&dsp_code, &type);
+ if (err)
+ break;
+ err = hpi_dsp_code_read_block(length, &dsp_code,
+ &pcode);
+ if (err)
+ break;
+ for (i = 0; i < (int)length; i++) {
+ err = boot_loader_write_mem32(pao, dsp,
+ address, *pcode);
+ if (err)
+ break;
+ /* dummy read every 4 words */
+ /* for 6205 advisory 1.4.4 */
+ if (i % 4 == 0)
+ boot_loader_read_mem32(pao, dsp,
+ address);
+ pcode++;
+ address += 4;
+ }
+
+ }
+ if (err) {
+ hpi_dsp_code_close(&dsp_code);
+ return err;
+ }
+
+ /* verify code */
+ hpi_dsp_code_rewind(&dsp_code);
+ while (1) {
+ u32 length = 0;
+ u32 address = 0;
+ u32 type = 0;
+ u32 *pcode = NULL;
+ u32 data = 0;
+
+ hpi_dsp_code_read_word(&dsp_code, &length);
+ if (length == 0xFFFFFFFF)
+ break; /* end of code */
+
+ hpi_dsp_code_read_word(&dsp_code, &address);
+ hpi_dsp_code_read_word(&dsp_code, &type);
+ hpi_dsp_code_read_block(length, &dsp_code, &pcode);
+
+ for (i = 0; i < (int)length; i++) {
+ data = boot_loader_read_mem32(pao, dsp,
+ address);
+ if (data != *pcode) {
+ err = 0;
+ break;
+ }
+ pcode++;
+ address += 4;
+ }
+ if (err)
+ break;
+ }
+ hpi_dsp_code_close(&dsp_code);
+ if (err)
+ return err;
+ }
+
+ /* After bootloading all DSPs, start DSP0 running
+ * The DSP0 code will handle starting and synchronizing with its slaves
+ */
+ if (phw->p_interface_buffer) {
+ /* we need to tell the card the physical PCI address */
+ u32 physicalPC_iaddress;
+ struct bus_master_interface *interface =
+ phw->p_interface_buffer;
+ u32 host_mailbox_address_on_dsp;
+ u32 physicalPC_iaddress_verify = 0;
+ int time_out = 10;
+ /* set ack so we know when DSP is ready to go */
+ /* (dwDspAck will be changed to HIF_RESET) */
+ interface->dsp_ack = H620_HIF_UNKNOWN;
+ wmb(); /* ensure ack is written before dsp writes back */
+
+ err = hpios_locked_mem_get_phys_addr(&phw->h_locked_mem,
+ &physicalPC_iaddress);
+
+ /* locate the host mailbox on the DSP. */
+ host_mailbox_address_on_dsp = 0x80000000;
+ while ((physicalPC_iaddress != physicalPC_iaddress_verify)
+ && time_out--) {
+ err = boot_loader_write_mem32(pao, 0,
+ host_mailbox_address_on_dsp,
+ physicalPC_iaddress);
+ physicalPC_iaddress_verify =
+ boot_loader_read_mem32(pao, 0,
+ host_mailbox_address_on_dsp);
+ }
+ }
+ HPI_DEBUG_LOG(DEBUG, "starting DS_ps running\n");
+ /* enable interrupts */
+ temp = ioread32(phw->prHSR);
+ temp &= ~(u32)C6205_HSR_INTAM;
+ iowrite32(temp, phw->prHSR);
+
+ /* start code running... */
+ temp = ioread32(phw->prHDCR);
+ temp |= (u32)C6205_HDCR_DSPINT;
+ iowrite32(temp, phw->prHDCR);
+
+ /* give the DSP 10ms to start up */
+ hpios_delay_micro_seconds(10000);
+ return err;
+
+}
+
+/*****************************************************************************/
+/* Bootloader utility functions */
+
+static u32 boot_loader_read_mem32(struct hpi_adapter_obj *pao, int dsp_index,
+ u32 address)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ u32 data = 0;
+ __iomem u32 *p_data;
+
+ if (dsp_index == 0) {
+ /* DSP 0 is always C6205 */
+ if ((address >= 0x01800000) & (address < 0x02000000)) {
+ /* BAR1 register access */
+ p_data = pao->pci.ap_mem_base[1] +
+ (address & 0x007fffff) /
+ sizeof(*pao->pci.ap_mem_base[1]);
+ /* HPI_DEBUG_LOG(WARNING,
+ "BAR1 access %08x\n", dwAddress); */
+ } else {
+ u32 dw4M_page = address >> 22L;
+ if (dw4M_page != phw->dsp_page) {
+ phw->dsp_page = dw4M_page;
+ /* *INDENT OFF* */
+ iowrite32(phw->dsp_page, phw->prDSPP);
+ /* *INDENT-ON* */
+ }
+ address &= 0x3fffff; /* address within 4M page */
+ /* BAR0 memory access */
+ p_data = pao->pci.ap_mem_base[0] +
+ address / sizeof(u32);
+ }
+ data = ioread32(p_data);
+ } else if (dsp_index == 1) {
+ /* DSP 1 is a C6713 */
+ u32 lsb;
+ boot_loader_write_mem32(pao, 0, HPIAL_ADDR, address);
+ boot_loader_write_mem32(pao, 0, HPIAH_ADDR, address >> 16);
+ lsb = boot_loader_read_mem32(pao, 0, HPIDL_ADDR);
+ data = boot_loader_read_mem32(pao, 0, HPIDH_ADDR);
+ data = (data << 16) | (lsb & 0xFFFF);
+ }
+ return data;
+}
+
+static u16 boot_loader_write_mem32(struct hpi_adapter_obj *pao, int dsp_index,
+ u32 address, u32 data)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ u16 err = 0;
+ __iomem u32 *p_data;
+ /* u32 dwVerifyData=0; */
+
+ if (dsp_index == 0) {
+ /* DSP 0 is always C6205 */
+ if ((address >= 0x01800000) & (address < 0x02000000)) {
+ /* BAR1 - DSP register access using */
+ /* Non-prefetchable PCI access */
+ p_data = pao->pci.ap_mem_base[1] +
+ (address & 0x007fffff) /
+ sizeof(*pao->pci.ap_mem_base[1]);
+ } else {
+ /* BAR0 access - all of DSP memory using */
+ /* pre-fetchable PCI access */
+ u32 dw4M_page = address >> 22L;
+ if (dw4M_page != phw->dsp_page) {
+ phw->dsp_page = dw4M_page;
+ /* *INDENT-OFF* */
+ iowrite32(phw->dsp_page, phw->prDSPP);
+ /* *INDENT-ON* */
+ }
+ address &= 0x3fffff; /* address within 4M page */
+ p_data = pao->pci.ap_mem_base[0] +
+ address / sizeof(u32);
+ }
+ iowrite32(data, p_data);
+ } else if (dsp_index == 1) {
+ /* DSP 1 is a C6713 */
+ boot_loader_write_mem32(pao, 0, HPIAL_ADDR, address);
+ boot_loader_write_mem32(pao, 0, HPIAH_ADDR, address >> 16);
+
+ /* dummy read every 4 words for 6205 advisory 1.4.4 */
+ boot_loader_read_mem32(pao, 0, 0);
+
+ boot_loader_write_mem32(pao, 0, HPIDL_ADDR, data);
+ boot_loader_write_mem32(pao, 0, HPIDH_ADDR, data >> 16);
+
+ /* dummy read every 4 words for 6205 advisory 1.4.4 */
+ boot_loader_read_mem32(pao, 0, 0);
+ } else
+ err = hpi6205_error(dsp_index, HPI6205_ERROR_BAD_DSPINDEX);
+ return err;
+}
+
+static u16 boot_loader_config_emif(struct hpi_adapter_obj *pao, int dsp_index)
+{
+ u16 err = 0;
+
+ if (dsp_index == 0) {
+ u32 setting;
+
+ /* DSP 0 is always C6205 */
+
+ /* Set the EMIF */
+ /* memory map of C6205 */
+ /* 00000000-0000FFFF 16Kx32 internal program */
+ /* 00400000-00BFFFFF CE0 2Mx32 SDRAM running @ 100MHz */
+
+ /* EMIF config */
+ /*------------ */
+ /* Global EMIF control */
+ boot_loader_write_mem32(pao, dsp_index, 0x01800000, 0x3779);
+#define WS_OFS 28
+#define WST_OFS 22
+#define WH_OFS 20
+#define RS_OFS 16
+#define RST_OFS 8
+#define MTYPE_OFS 4
+#define RH_OFS 0
+
+ /* EMIF CE0 setup - 2Mx32 Sync DRAM on ASI5000 cards only */
+ setting = 0x00000030;
+ boot_loader_write_mem32(pao, dsp_index, 0x01800008, setting);
+ if (setting != boot_loader_read_mem32(pao, dsp_index,
+ 0x01800008))
+ return hpi6205_error(dsp_index,
+ HPI6205_ERROR_DSP_EMIF);
+
+ /* EMIF CE1 setup - 32 bit async. This is 6713 #1 HPI, */
+ /* which occupies D15..0. 6713 starts at 27MHz, so need */
+ /* plenty of wait states. See dsn8701.rtf, and 6713 errata. */
+ /* WST should be 71, but 63 is max possible */
+ setting =
+ (1L << WS_OFS) | (63L << WST_OFS) | (1L << WH_OFS) |
+ (1L << RS_OFS) | (63L << RST_OFS) | (1L << RH_OFS) |
+ (2L << MTYPE_OFS);
+ boot_loader_write_mem32(pao, dsp_index, 0x01800004, setting);
+ if (setting != boot_loader_read_mem32(pao, dsp_index,
+ 0x01800004))
+ return hpi6205_error(dsp_index,
+ HPI6205_ERROR_DSP_EMIF);
+
+ /* EMIF CE2 setup - 32 bit async. This is 6713 #2 HPI, */
+ /* which occupies D15..0. 6713 starts at 27MHz, so need */
+ /* plenty of wait states */
+ setting =
+ (1L << WS_OFS) | (28L << WST_OFS) | (1L << WH_OFS) |
+ (1L << RS_OFS) | (63L << RST_OFS) | (1L << RH_OFS) |
+ (2L << MTYPE_OFS);
+ boot_loader_write_mem32(pao, dsp_index, 0x01800010, setting);
+ if (setting != boot_loader_read_mem32(pao, dsp_index,
+ 0x01800010))
+ return hpi6205_error(dsp_index,
+ HPI6205_ERROR_DSP_EMIF);
+
+ /* EMIF CE3 setup - 32 bit async. */
+ /* This is the PLD on the ASI5000 cards only */
+ setting =
+ (1L << WS_OFS) | (10L << WST_OFS) | (1L << WH_OFS) |
+ (1L << RS_OFS) | (10L << RST_OFS) | (1L << RH_OFS) |
+ (2L << MTYPE_OFS);
+ boot_loader_write_mem32(pao, dsp_index, 0x01800014, setting);
+ if (setting != boot_loader_read_mem32(pao, dsp_index,
+ 0x01800014))
+ return hpi6205_error(dsp_index,
+ HPI6205_ERROR_DSP_EMIF);
+
+ /* set EMIF SDRAM control for 2Mx32 SDRAM (512x32x4 bank) */
+ /* need to use this else DSP code crashes? */
+ boot_loader_write_mem32(pao, dsp_index, 0x01800018,
+ 0x07117000);
+
+ /* EMIF SDRAM Refresh Timing */
+ /* EMIF SDRAM timing (orig = 0x410, emulator = 0x61a) */
+ boot_loader_write_mem32(pao, dsp_index, 0x0180001C,
+ 0x00000410);
+
+ } else if (dsp_index == 1) {
+ /* test access to the C6713s HPI registers */
+ u32 write_data = 0, read_data = 0, i = 0;
+
+ /* Set up HPIC for little endian, by setiing HPIC:HWOB=1 */
+ write_data = 1;
+ boot_loader_write_mem32(pao, 0, HPICL_ADDR, write_data);
+ boot_loader_write_mem32(pao, 0, HPICH_ADDR, write_data);
+ /* C67 HPI is on lower 16bits of 32bit EMIF */
+ read_data =
+ 0xFFF7 & boot_loader_read_mem32(pao, 0, HPICL_ADDR);
+ if (write_data != read_data) {
+ err = hpi6205_error(dsp_index,
+ HPI6205_ERROR_C6713_HPIC);
+ HPI_DEBUG_LOG(ERROR, "HPICL %x %x\n", write_data,
+ read_data);
+
+ return err;
+ }
+ /* HPIA - walking ones test */
+ write_data = 1;
+ for (i = 0; i < 32; i++) {
+ boot_loader_write_mem32(pao, 0, HPIAL_ADDR,
+ write_data);
+ boot_loader_write_mem32(pao, 0, HPIAH_ADDR,
+ (write_data >> 16));
+ read_data =
+ 0xFFFF & boot_loader_read_mem32(pao, 0,
+ HPIAL_ADDR);
+ read_data =
+ read_data | ((0xFFFF &
+ boot_loader_read_mem32(pao, 0,
+ HPIAH_ADDR))
+ << 16);
+ if (read_data != write_data) {
+ err = hpi6205_error(dsp_index,
+ HPI6205_ERROR_C6713_HPIA);
+ HPI_DEBUG_LOG(ERROR, "HPIA %x %x\n",
+ write_data, read_data);
+ return err;
+ }
+ write_data = write_data << 1;
+ }
+
+ /* setup C67x PLL
+ * ** C6713 datasheet says we cannot program PLL from HPI,
+ * and indeed if we try to set the PLL multiply from the HPI,
+ * the PLL does not seem to lock, so we enable the PLL and
+ * use the default multiply of x 7, which for a 27MHz clock
+ * gives a DSP speed of 189MHz
+ */
+ /* bypass PLL */
+ boot_loader_write_mem32(pao, dsp_index, 0x01B7C100, 0x0000);
+ hpios_delay_micro_seconds(1000);
+ /* EMIF = 189/3=63MHz */
+ boot_loader_write_mem32(pao, dsp_index, 0x01B7C120, 0x8002);
+ /* peri = 189/2 */
+ boot_loader_write_mem32(pao, dsp_index, 0x01B7C11C, 0x8001);
+ /* cpu = 189/1 */
+ boot_loader_write_mem32(pao, dsp_index, 0x01B7C118, 0x8000);
+ hpios_delay_micro_seconds(1000);
+ /* ** SGT test to take GPO3 high when we start the PLL */
+ /* and low when the delay is completed */
+ /* FSX0 <- '1' (GPO3) */
+ boot_loader_write_mem32(pao, 0, (0x018C0024L), 0x00002A0A);
+ /* PLL not bypassed */
+ boot_loader_write_mem32(pao, dsp_index, 0x01B7C100, 0x0001);
+ hpios_delay_micro_seconds(1000);
+ /* FSX0 <- '0' (GPO3) */
+ boot_loader_write_mem32(pao, 0, (0x018C0024L), 0x00002A02);
+
+ /* 6205 EMIF CE1 resetup - 32 bit async. */
+ /* Now 6713 #1 is running at 189MHz can reduce waitstates */
+ boot_loader_write_mem32(pao, 0, 0x01800004, /* CE1 */
+ (1L << WS_OFS) | (8L << WST_OFS) | (1L << WH_OFS) |
+ (1L << RS_OFS) | (12L << RST_OFS) | (1L << RH_OFS) |
+ (2L << MTYPE_OFS));
+
+ hpios_delay_micro_seconds(1000);
+
+ /* check that we can read one of the PLL registers */
+ /* PLL should not be bypassed! */
+ if ((boot_loader_read_mem32(pao, dsp_index, 0x01B7C100) & 0xF)
+ != 0x0001) {
+ err = hpi6205_error(dsp_index,
+ HPI6205_ERROR_C6713_PLL);
+ return err;
+ }
+ /* setup C67x EMIF (note this is the only use of
+ BAR1 via BootLoader_WriteMem32) */
+ boot_loader_write_mem32(pao, dsp_index, C6713_EMIF_GCTL,
+ 0x000034A8);
+ boot_loader_write_mem32(pao, dsp_index, C6713_EMIF_CE0,
+ 0x00000030);
+ boot_loader_write_mem32(pao, dsp_index, C6713_EMIF_SDRAMEXT,
+ 0x001BDF29);
+ boot_loader_write_mem32(pao, dsp_index, C6713_EMIF_SDRAMCTL,
+ 0x47117000);
+ boot_loader_write_mem32(pao, dsp_index,
+ C6713_EMIF_SDRAMTIMING, 0x00000410);
+
+ hpios_delay_micro_seconds(1000);
+ } else if (dsp_index == 2) {
+ /* DSP 2 is a C6713 */
+
+ } else
+ err = hpi6205_error(dsp_index, HPI6205_ERROR_BAD_DSPINDEX);
+ return err;
+}
+
+static u16 boot_loader_test_memory(struct hpi_adapter_obj *pao, int dsp_index,
+ u32 start_address, u32 length)
+{
+ u32 i = 0, j = 0;
+ u32 test_addr = 0;
+ u32 test_data = 0, data = 0;
+
+ length = 1000;
+
+ /* for 1st word, test each bit in the 32bit word, */
+ /* dwLength specifies number of 32bit words to test */
+ /*for(i=0; i<dwLength; i++) */
+ i = 0;
+ {
+ test_addr = start_address + i * 4;
+ test_data = 0x00000001;
+ for (j = 0; j < 32; j++) {
+ boot_loader_write_mem32(pao, dsp_index, test_addr,
+ test_data);
+ data = boot_loader_read_mem32(pao, dsp_index,
+ test_addr);
+ if (data != test_data) {
+ HPI_DEBUG_LOG(VERBOSE,
+ "memtest error details "
+ "%08x %08x %08x %i\n", test_addr,
+ test_data, data, dsp_index);
+ return 1; /* error */
+ }
+ test_data = test_data << 1;
+ } /* for(j) */
+ } /* for(i) */
+
+ /* for the next 100 locations test each location, leaving it as zero */
+ /* write a zero to the next word in memory before we read */
+ /* the previous write to make sure every memory location is unique */
+ for (i = 0; i < 100; i++) {
+ test_addr = start_address + i * 4;
+ test_data = 0xA5A55A5A;
+ boot_loader_write_mem32(pao, dsp_index, test_addr, test_data);
+ boot_loader_write_mem32(pao, dsp_index, test_addr + 4, 0);
+ data = boot_loader_read_mem32(pao, dsp_index, test_addr);
+ if (data != test_data) {
+ HPI_DEBUG_LOG(VERBOSE,
+ "memtest error details "
+ "%08x %08x %08x %i\n", test_addr, test_data,
+ data, dsp_index);
+ return 1; /* error */
+ }
+ /* leave location as zero */
+ boot_loader_write_mem32(pao, dsp_index, test_addr, 0x0);
+ }
+
+ /* zero out entire memory block */
+ for (i = 0; i < length; i++) {
+ test_addr = start_address + i * 4;
+ boot_loader_write_mem32(pao, dsp_index, test_addr, 0x0);
+ }
+ return 0;
+}
+
+static u16 boot_loader_test_internal_memory(struct hpi_adapter_obj *pao,
+ int dsp_index)
+{
+ int err = 0;
+ if (dsp_index == 0) {
+ /* DSP 0 is a C6205 */
+ /* 64K prog mem */
+ err = boot_loader_test_memory(pao, dsp_index, 0x00000000,
+ 0x10000);
+ if (!err)
+ /* 64K data mem */
+ err = boot_loader_test_memory(pao, dsp_index,
+ 0x80000000, 0x10000);
+ } else if ((dsp_index == 1) || (dsp_index == 2)) {
+ /* DSP 1&2 are a C6713 */
+ /* 192K internal mem */
+ err = boot_loader_test_memory(pao, dsp_index, 0x00000000,
+ 0x30000);
+ if (!err)
+ /* 64K internal mem / L2 cache */
+ err = boot_loader_test_memory(pao, dsp_index,
+ 0x00030000, 0x10000);
+ } else
+ return hpi6205_error(dsp_index, HPI6205_ERROR_BAD_DSPINDEX);
+
+ if (err)
+ return hpi6205_error(dsp_index, HPI6205_ERROR_DSP_INTMEM);
+ else
+ return 0;
+}
+
+static u16 boot_loader_test_external_memory(struct hpi_adapter_obj *pao,
+ int dsp_index)
+{
+ u32 dRAM_start_address = 0;
+ u32 dRAM_size = 0;
+
+ if (dsp_index == 0) {
+ /* only test for SDRAM if an ASI5000 card */
+ if (pao->pci.subsys_device_id == 0x5000) {
+ /* DSP 0 is always C6205 */
+ dRAM_start_address = 0x00400000;
+ dRAM_size = 0x200000;
+ /*dwDRAMinc=1024; */
+ } else
+ return 0;
+ } else if ((dsp_index == 1) || (dsp_index == 2)) {
+ /* DSP 1 is a C6713 */
+ dRAM_start_address = 0x80000000;
+ dRAM_size = 0x200000;
+ /*dwDRAMinc=1024; */
+ } else
+ return hpi6205_error(dsp_index, HPI6205_ERROR_BAD_DSPINDEX);
+
+ if (boot_loader_test_memory(pao, dsp_index, dRAM_start_address,
+ dRAM_size))
+ return hpi6205_error(dsp_index, HPI6205_ERROR_DSP_EXTMEM);
+ return 0;
+}
+
+static u16 boot_loader_test_pld(struct hpi_adapter_obj *pao, int dsp_index)
+{
+ u32 data = 0;
+ if (dsp_index == 0) {
+ /* only test for DSP0 PLD on ASI5000 card */
+ if (pao->pci.subsys_device_id == 0x5000) {
+ /* PLD is located at CE3=0x03000000 */
+ data = boot_loader_read_mem32(pao, dsp_index,
+ 0x03000008);
+ if ((data & 0xF) != 0x5)
+ return hpi6205_error(dsp_index,
+ HPI6205_ERROR_DSP_PLD);
+ data = boot_loader_read_mem32(pao, dsp_index,
+ 0x0300000C);
+ if ((data & 0xF) != 0xA)
+ return hpi6205_error(dsp_index,
+ HPI6205_ERROR_DSP_PLD);
+ }
+ } else if (dsp_index == 1) {
+ /* DSP 1 is a C6713 */
+ if (pao->pci.subsys_device_id == 0x8700) {
+ /* PLD is located at CE1=0x90000000 */
+ data = boot_loader_read_mem32(pao, dsp_index,
+ 0x90000010);
+ if ((data & 0xFF) != 0xAA)
+ return hpi6205_error(dsp_index,
+ HPI6205_ERROR_DSP_PLD);
+ /* 8713 - LED on */
+ boot_loader_write_mem32(pao, dsp_index, 0x90000000,
+ 0x02);
+ }
+ }
+ return 0;
+}
+
+/** Transfer data to or from DSP
+ nOperation = H620_H620_HIF_SEND_DATA or H620_HIF_GET_DATA
+*/
+static short hpi6205_transfer_data(struct hpi_adapter_obj *pao, u8 *p_data,
+ u32 data_size, int operation)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ u32 data_transferred = 0;
+ u16 err = 0;
+#ifndef HPI6205_NO_HSR_POLL
+ u32 time_out;
+#endif
+ u32 temp2;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+
+ if (!p_data)
+ return HPI_ERROR_INVALID_DATA_TRANSFER;
+
+ data_size &= ~3L; /* round data_size down to nearest 4 bytes */
+
+ /* make sure state is IDLE */
+ if (!wait_dsp_ack(phw, H620_HIF_IDLE, HPI6205_TIMEOUT))
+ return HPI_ERROR_DSP_HARDWARE;
+
+ while (data_transferred < data_size) {
+ u32 this_copy = data_size - data_transferred;
+
+ if (this_copy > HPI6205_SIZEOF_DATA)
+ this_copy = HPI6205_SIZEOF_DATA;
+
+ if (operation == H620_HIF_SEND_DATA)
+ memcpy((void *)&interface->u.b_data[0],
+ &p_data[data_transferred], this_copy);
+
+ interface->transfer_size_in_bytes = this_copy;
+
+#ifdef HPI6205_NO_HSR_POLL
+ /* DSP must change this back to nOperation */
+ interface->dsp_ack = H620_HIF_IDLE;
+#endif
+
+ send_dsp_command(phw, operation);
+
+#ifdef HPI6205_NO_HSR_POLL
+ temp2 = wait_dsp_ack(phw, operation, HPI6205_TIMEOUT);
+ HPI_DEBUG_LOG(DEBUG, "spun %d times for data xfer of %d\n",
+ HPI6205_TIMEOUT - temp2, this_copy);
+
+ if (!temp2) {
+ /* timed out */
+ HPI_DEBUG_LOG(ERROR,
+ "timed out waiting for " "state %d got %d\n",
+ operation, interface->dsp_ack);
+
+ break;
+ }
+#else
+ /* spin waiting on the result */
+ time_out = HPI6205_TIMEOUT;
+ temp2 = 0;
+ while ((temp2 == 0) && time_out--) {
+ /* give 16k bus mastering transfer time to happen */
+ /*(16k / 132Mbytes/s = 122usec) */
+ hpios_delay_micro_seconds(20);
+ temp2 = ioread32(phw->prHSR);
+ temp2 &= C6205_HSR_INTSRC;
+ }
+ HPI_DEBUG_LOG(DEBUG, "spun %d times for data xfer of %d\n",
+ HPI6205_TIMEOUT - time_out, this_copy);
+ if (temp2 == C6205_HSR_INTSRC) {
+ HPI_DEBUG_LOG(VERBOSE,
+ "interrupt from HIF <data> OK\n");
+ /*
+ if(interface->dwDspAck != nOperation) {
+ HPI_DEBUG_LOG(DEBUG("interface->dwDspAck=%d,
+ expected %d \n",
+ interface->dwDspAck,nOperation);
+ }
+ */
+ }
+/* need to handle this differently... */
+ else {
+ HPI_DEBUG_LOG(ERROR,
+ "interrupt from HIF <data> BAD\n");
+ err = HPI_ERROR_DSP_HARDWARE;
+ }
+
+ /* reset the interrupt from the DSP */
+ iowrite32(C6205_HSR_INTSRC, phw->prHSR);
+#endif
+ if (operation == H620_HIF_GET_DATA)
+ memcpy(&p_data[data_transferred],
+ (void *)&interface->u.b_data[0], this_copy);
+
+ data_transferred += this_copy;
+ }
+ if (interface->dsp_ack != operation)
+ HPI_DEBUG_LOG(DEBUG, "interface->dsp_ack=%d, expected %d\n",
+ interface->dsp_ack, operation);
+ /* err=HPI_ERROR_DSP_HARDWARE; */
+
+ send_dsp_command(phw, H620_HIF_IDLE);
+
+ return err;
+}
+
+/* wait for up to timeout_us microseconds for the DSP
+ to signal state by DMA into dwDspAck
+*/
+static int wait_dsp_ack(struct hpi_hw_obj *phw, int state, int timeout_us)
+{
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+ int t = timeout_us / 4;
+
+ rmb(); /* ensure interface->dsp_ack is up to date */
+ while ((interface->dsp_ack != state) && --t) {
+ hpios_delay_micro_seconds(4);
+ rmb(); /* DSP changes dsp_ack by DMA */
+ }
+
+ /*HPI_DEBUG_LOG(VERBOSE, "Spun %d for %d\n", timeout_us/4-t, state); */
+ return t * 4;
+}
+
+/* set the busmaster interface to cmd, then interrupt the DSP */
+static void send_dsp_command(struct hpi_hw_obj *phw, int cmd)
+{
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+
+ u32 r;
+
+ interface->host_cmd = cmd;
+ wmb(); /* DSP gets state by DMA, make sure it is written to memory */
+ /* before we interrupt the DSP */
+ r = ioread32(phw->prHDCR);
+ r |= (u32)C6205_HDCR_DSPINT;
+ iowrite32(r, phw->prHDCR);
+ r &= ~(u32)C6205_HDCR_DSPINT;
+ iowrite32(r, phw->prHDCR);
+}
+
+static unsigned int message_count;
+
+static u16 message_response_sequence(struct hpi_adapter_obj *pao,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+#ifndef HPI6205_NO_HSR_POLL
+ u32 temp2;
+#endif
+ u32 time_out, time_out2;
+ struct hpi_hw_obj *phw = pao->priv;
+ struct bus_master_interface *interface = phw->p_interface_buffer;
+ u16 err = 0;
+
+ message_count++;
+ /* Assume buffer of type struct bus_master_interface
+ is allocated "noncacheable" */
+
+ if (!wait_dsp_ack(phw, H620_HIF_IDLE, HPI6205_TIMEOUT)) {
+ HPI_DEBUG_LOG(DEBUG, "timeout waiting for idle\n");
+ return hpi6205_error(0, HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT);
+ }
+ interface->u.message_buffer = *phm;
+ /* signal we want a response */
+ send_dsp_command(phw, H620_HIF_GET_RESP);
+
+ time_out2 = wait_dsp_ack(phw, H620_HIF_GET_RESP, HPI6205_TIMEOUT);
+
+ if (time_out2 == 0) {
+ HPI_DEBUG_LOG(ERROR,
+ "(%u) timed out waiting for " "GET_RESP state [%x]\n",
+ message_count, interface->dsp_ack);
+ } else {
+ HPI_DEBUG_LOG(VERBOSE,
+ "(%u) transition to GET_RESP after %u\n",
+ message_count, HPI6205_TIMEOUT - time_out2);
+ }
+ /* spin waiting on HIF interrupt flag (end of msg process) */
+ time_out = HPI6205_TIMEOUT;
+
+#ifndef HPI6205_NO_HSR_POLL
+ temp2 = 0;
+ while ((temp2 == 0) && --time_out) {
+ temp2 = ioread32(phw->prHSR);
+ temp2 &= C6205_HSR_INTSRC;
+ hpios_delay_micro_seconds(1);
+ }
+ if (temp2 == C6205_HSR_INTSRC) {
+ rmb(); /* ensure we see latest value for dsp_ack */
+ if ((interface->dsp_ack != H620_HIF_GET_RESP)) {
+ HPI_DEBUG_LOG(DEBUG,
+ "(%u)interface->dsp_ack(0x%x) != "
+ "H620_HIF_GET_RESP, t=%u\n", message_count,
+ interface->dsp_ack,
+ HPI6205_TIMEOUT - time_out);
+ } else {
+ HPI_DEBUG_LOG(VERBOSE,
+ "(%u)int with GET_RESP after %u\n",
+ message_count, HPI6205_TIMEOUT - time_out);
+ }
+
+ } else {
+ /* can we do anything else in response to the error ? */
+ HPI_DEBUG_LOG(ERROR,
+ "interrupt from HIF module BAD (function %x)\n",
+ phm->function);
+ }
+
+ /* reset the interrupt from the DSP */
+ iowrite32(C6205_HSR_INTSRC, phw->prHSR);
+#endif
+
+ /* read the result */
+ if (time_out != 0)
+ *phr = interface->u.response_buffer;
+
+ /* set interface back to idle */
+ send_dsp_command(phw, H620_HIF_IDLE);
+
+ if ((time_out == 0) || (time_out2 == 0)) {
+ HPI_DEBUG_LOG(DEBUG, "something timed out!\n");
+ return hpi6205_error(0, HPI6205_ERROR_MSG_RESP_TIMEOUT);
+ }
+ /* special case for adapter close - */
+ /* wait for the DSP to indicate it is idle */
+ if (phm->function == HPI_ADAPTER_CLOSE) {
+ if (!wait_dsp_ack(phw, H620_HIF_IDLE, HPI6205_TIMEOUT)) {
+ HPI_DEBUG_LOG(DEBUG,
+ "timeout waiting for idle "
+ "(on adapter_close)\n");
+ return hpi6205_error(0,
+ HPI6205_ERROR_MSG_RESP_IDLE_TIMEOUT);
+ }
+ }
+ err = hpi_validate_response(phm, phr);
+ return err;
+}
+
+static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
+ struct hpi_response *phr)
+{
+
+ u16 err = 0;
+
+ hpios_dsplock_lock(pao);
+
+ err = message_response_sequence(pao, phm, phr);
+
+ /* maybe an error response */
+ if (err) {
+ /* something failed in the HPI/DSP interface */
+ phr->error = err;
+ pao->dsp_crashed++;
+
+ /* just the header of the response is valid */
+ phr->size = sizeof(struct hpi_response_header);
+ goto err;
+ } else
+ pao->dsp_crashed = 0;
+
+ if (phr->error != 0) /* something failed in the DSP */
+ goto err;
+
+ switch (phm->function) {
+ case HPI_OSTREAM_WRITE:
+ case HPI_ISTREAM_ANC_WRITE:
+ err = hpi6205_transfer_data(pao, phm->u.d.u.data.pb_data,
+ phm->u.d.u.data.data_size, H620_HIF_SEND_DATA);
+ break;
+
+ case HPI_ISTREAM_READ:
+ case HPI_OSTREAM_ANC_READ:
+ err = hpi6205_transfer_data(pao, phm->u.d.u.data.pb_data,
+ phm->u.d.u.data.data_size, H620_HIF_GET_DATA);
+ break;
+
+ case HPI_CONTROL_SET_STATE:
+ if (phm->object == HPI_OBJ_CONTROLEX
+ && phm->u.cx.attribute == HPI_COBRANET_SET_DATA)
+ err = hpi6205_transfer_data(pao,
+ phm->u.cx.u.cobranet_bigdata.pb_data,
+ phm->u.cx.u.cobranet_bigdata.byte_count,
+ H620_HIF_SEND_DATA);
+ break;
+
+ case HPI_CONTROL_GET_STATE:
+ if (phm->object == HPI_OBJ_CONTROLEX
+ && phm->u.cx.attribute == HPI_COBRANET_GET_DATA)
+ err = hpi6205_transfer_data(pao,
+ phm->u.cx.u.cobranet_bigdata.pb_data,
+ phr->u.cx.u.cobranet_data.byte_count,
+ H620_HIF_GET_DATA);
+ break;
+ }
+ phr->error = err;
+
+err:
+ hpios_dsplock_unlock(pao);
+
+ return;
+}
diff --git a/sound/pci/asihpi/hpi6205.h b/sound/pci/asihpi/hpi6205.h
new file mode 100644
index 000000000000..1adae0857cda
--- /dev/null
+++ b/sound/pci/asihpi/hpi6205.h
@@ -0,0 +1,93 @@
+/*****************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Host Interface module for an ASI6205 based
+bus mastering PCI adapter.
+
+Copyright AudioScience, Inc., 2003
+******************************************************************************/
+
+#ifndef _HPI6205_H_
+#define _HPI6205_H_
+
+/* transitional conditional compile shared between host and DSP */
+/* #define HPI6205_NO_HSR_POLL */
+
+#include "hpi_internal.h"
+
+/***********************************************************
+ Defines used for basic messaging
+************************************************************/
+#define H620_HIF_RESET 0
+#define H620_HIF_IDLE 1
+#define H620_HIF_GET_RESP 2
+#define H620_HIF_DATA_DONE 3
+#define H620_HIF_DATA_MASK 0x10
+#define H620_HIF_SEND_DATA 0x14
+#define H620_HIF_GET_DATA 0x15
+#define H620_HIF_UNKNOWN 0x0000ffff
+
+/***********************************************************
+ Types used for mixer control caching
+************************************************************/
+
+#define H620_MAX_ISTREAMS 32
+#define H620_MAX_OSTREAMS 32
+#define HPI_NMIXER_CONTROLS 2048
+
+/*********************************************************************
+This is used for dynamic control cache allocation
+**********************************************************************/
+struct controlcache_6205 {
+ u32 number_of_controls;
+ u32 physical_address32;
+ u32 size_in_bytes;
+};
+
+/*********************************************************************
+This is used for dynamic allocation of async event array
+**********************************************************************/
+struct async_event_buffer_6205 {
+ u32 physical_address32;
+ u32 spare;
+ struct hpi_fifo_buffer b;
+};
+
+/***********************************************************
+The Host located memory buffer that the 6205 will bus master
+in and out of.
+************************************************************/
+#define HPI6205_SIZEOF_DATA (16*1024)
+struct bus_master_interface {
+ u32 host_cmd;
+ u32 dsp_ack;
+ u32 transfer_size_in_bytes;
+ union {
+ struct hpi_message message_buffer;
+ struct hpi_response response_buffer;
+ u8 b_data[HPI6205_SIZEOF_DATA];
+ } u;
+ struct controlcache_6205 control_cache;
+ struct async_event_buffer_6205 async_buffer;
+ struct hpi_hostbuffer_status
+ instream_host_buffer_status[H620_MAX_ISTREAMS];
+ struct hpi_hostbuffer_status
+ outstream_host_buffer_status[H620_MAX_OSTREAMS];
+};
+
+#endif
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
new file mode 100644
index 000000000000..f1cd6f1a0d44
--- /dev/null
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -0,0 +1,1641 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+HPI internal definitions
+
+(C) Copyright AudioScience Inc. 1996-2009
+******************************************************************************/
+
+#ifndef _HPI_INTERNAL_H_
+#define _HPI_INTERNAL_H_
+
+#include "hpi.h"
+/** maximum number of memory regions mapped to an adapter */
+#define HPI_MAX_ADAPTER_MEM_SPACES (2)
+
+/* Each OS needs its own hpios.h, or specific define as above */
+#include "hpios.h"
+
+/* physical memory allocation */
+void hpios_locked_mem_init(void
+ );
+void hpios_locked_mem_free_all(void
+ );
+#define hpios_locked_mem_prepare(a, b, c, d);
+#define hpios_locked_mem_unprepare(a)
+
+/** Allocate and map an area of locked memory for bus master DMA operations.
+
+On success, *pLockedMemeHandle is a valid handle, and 0 is returned
+On error *pLockedMemHandle marked invalid, non-zero returned.
+
+If this function succeeds, then HpiOs_LockedMem_GetVirtAddr() and
+HpiOs_LockedMem_GetPyhsAddr() will always succed on the returned handle.
+*/
+u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle,
+ /**< memory handle */
+ u32 size, /**< size in bytes to allocate */
+ struct pci_dev *p_os_reference
+ /**< OS specific data required for memory allocation */
+ );
+
+/** Free mapping and memory represented by LockedMemHandle
+
+Frees any resources, then invalidates the handle.
+Returns 0 on success, 1 if handle is invalid.
+
+*/
+u16 hpios_locked_mem_free(struct consistent_dma_area *locked_mem_handle);
+
+/** Get the physical PCI address of memory represented by LockedMemHandle.
+
+If handle is invalid *pPhysicalAddr is set to zero and return 1
+*/
+u16 hpios_locked_mem_get_phys_addr(struct consistent_dma_area
+ *locked_mem_handle, u32 *p_physical_addr);
+
+/** Get the CPU address of of memory represented by LockedMemHandle.
+
+If handle is NULL *ppvVirtualAddr is set to NULL and return 1
+*/
+u16 hpios_locked_mem_get_virt_addr(struct consistent_dma_area
+ *locked_mem_handle, void **ppv_virtual_addr);
+
+/** Check that handle is valid
+i.e it represents a valid memory area
+*/
+u16 hpios_locked_mem_valid(struct consistent_dma_area *locked_mem_handle);
+
+/* timing/delay */
+void hpios_delay_micro_seconds(u32 num_micro_sec);
+
+struct hpi_message;
+struct hpi_response;
+
+typedef void hpi_handler_func(struct hpi_message *, struct hpi_response *);
+
+/* If the assert fails, compiler complains
+ something like size of array `msg' is negative.
+ Unlike linux BUILD_BUG_ON, this works outside function scope.
+*/
+#define compile_time_assert(cond, msg) \
+ typedef char ASSERT_##msg[(cond) ? 1 : -1]
+
+/*/////////////////////////////////////////////////////////////////////////// */
+/* Private HPI Entity related definitions */
+
+#define STR_SIZE_FIELD_MAX 65535U
+#define STR_TYPE_FIELD_MAX 255U
+#define STR_ROLE_FIELD_MAX 255U
+
+struct hpi_entity_str {
+ uint16_t size;
+ uint8_t type;
+ uint8_t role;
+};
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4200)
+#endif
+
+struct hpi_entity {
+ struct hpi_entity_str header;
+#if ! defined(HPI_OS_DSP_C6000) || (defined(HPI_OS_DSP_C6000) && (__TI_COMPILER_VERSION__ > 6000008))
+ /* DSP C6000 compiler v6.0.8 and lower
+ do not support flexible array member */
+ uint8_t value[];
+#else
+ /* NOTE! Using sizeof(struct hpi_entity) will give erroneous results */
+#define HPI_INTERNAL_WARN_ABOUT_ENTITY_VALUE
+ uint8_t value[1];
+#endif
+};
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+/******************************************* bus types */
+enum HPI_BUSES {
+ HPI_BUS_ISAPNP = 1,
+ HPI_BUS_PCI = 2,
+ HPI_BUS_USB = 3,
+ HPI_BUS_NET = 4
+};
+
+/******************************************* CONTROL ATTRIBUTES ****/
+/* (in order of control type ID */
+
+ /* This allows for 255 control types, 256 unique attributes each */
+#define HPI_CTL_ATTR(ctl, ai) (HPI_CONTROL_##ctl * 0x100 + ai)
+
+/* Get the sub-index of the attribute for a control type */
+#define HPI_CTL_ATTR_INDEX(i) (i&0xff)
+
+/* Generic control attributes. */
+
+/** Enable a control.
+0=disable, 1=enable
+\note generic to all mixer plugins?
+*/
+#define HPI_GENERIC_ENABLE HPI_CTL_ATTR(GENERIC, 1)
+
+/** Enable event generation for a control.
+0=disable, 1=enable
+\note generic to all controls that can generate events
+*/
+#define HPI_GENERIC_EVENT_ENABLE HPI_CTL_ATTR(GENERIC, 2)
+
+/* Volume Control attributes */
+#define HPI_VOLUME_GAIN HPI_CTL_ATTR(VOLUME, 1)
+#define HPI_VOLUME_AUTOFADE HPI_CTL_ATTR(VOLUME, 2)
+
+/** For HPI_ControlQuery() to get the number of channels of a volume control*/
+#define HPI_VOLUME_NUM_CHANNELS HPI_CTL_ATTR(VOLUME, 6)
+#define HPI_VOLUME_RANGE HPI_CTL_ATTR(VOLUME, 10)
+
+/** Level Control attributes */
+#define HPI_LEVEL_GAIN HPI_CTL_ATTR(LEVEL, 1)
+#define HPI_LEVEL_RANGE HPI_CTL_ATTR(LEVEL, 10)
+
+/* Meter Control attributes */
+/** return RMS signal level */
+#define HPI_METER_RMS HPI_CTL_ATTR(METER, 1)
+/** return peak signal level */
+#define HPI_METER_PEAK HPI_CTL_ATTR(METER, 2)
+/** ballistics for ALL rms meters on adapter */
+#define HPI_METER_RMS_BALLISTICS HPI_CTL_ATTR(METER, 3)
+/** ballistics for ALL peak meters on adapter */
+#define HPI_METER_PEAK_BALLISTICS HPI_CTL_ATTR(METER, 4)
+
+/** For HPI_ControlQuery() to get the number of channels of a meter control*/
+#define HPI_METER_NUM_CHANNELS HPI_CTL_ATTR(METER, 5)
+
+/* Multiplexer control attributes */
+#define HPI_MULTIPLEXER_SOURCE HPI_CTL_ATTR(MULTIPLEXER, 1)
+#define HPI_MULTIPLEXER_QUERYSOURCE HPI_CTL_ATTR(MULTIPLEXER, 2)
+
+/** AES/EBU transmitter control attributes */
+/** AESEBU or SPDIF */
+#define HPI_AESEBUTX_FORMAT HPI_CTL_ATTR(AESEBUTX, 1)
+#define HPI_AESEBUTX_SAMPLERATE HPI_CTL_ATTR(AESEBUTX, 3)
+#define HPI_AESEBUTX_CHANNELSTATUS HPI_CTL_ATTR(AESEBUTX, 4)
+#define HPI_AESEBUTX_USERDATA HPI_CTL_ATTR(AESEBUTX, 5)
+
+/** AES/EBU receiver control attributes */
+#define HPI_AESEBURX_FORMAT HPI_CTL_ATTR(AESEBURX, 1)
+#define HPI_AESEBURX_ERRORSTATUS HPI_CTL_ATTR(AESEBURX, 2)
+#define HPI_AESEBURX_SAMPLERATE HPI_CTL_ATTR(AESEBURX, 3)
+#define HPI_AESEBURX_CHANNELSTATUS HPI_CTL_ATTR(AESEBURX, 4)
+#define HPI_AESEBURX_USERDATA HPI_CTL_ATTR(AESEBURX, 5)
+
+/** \defgroup tuner_defs Tuners
+\{
+*/
+/** \defgroup tuner_attrs Tuner control attributes
+\{
+*/
+#define HPI_TUNER_BAND HPI_CTL_ATTR(TUNER, 1)
+#define HPI_TUNER_FREQ HPI_CTL_ATTR(TUNER, 2)
+#define HPI_TUNER_LEVEL HPI_CTL_ATTR(TUNER, 3)
+#define HPI_TUNER_AUDIOMUTE HPI_CTL_ATTR(TUNER, 4)
+/* use TUNER_STATUS instead */
+#define HPI_TUNER_VIDEO_STATUS HPI_CTL_ATTR(TUNER, 5)
+#define HPI_TUNER_GAIN HPI_CTL_ATTR(TUNER, 6)
+#define HPI_TUNER_STATUS HPI_CTL_ATTR(TUNER, 7)
+#define HPI_TUNER_MODE HPI_CTL_ATTR(TUNER, 8)
+/** RDS data. */
+#define HPI_TUNER_RDS HPI_CTL_ATTR(TUNER, 9)
+/** Audio pre-emphasis. */
+#define HPI_TUNER_DEEMPHASIS HPI_CTL_ATTR(TUNER, 10)
+/** HD Radio tuner program control. */
+#define HPI_TUNER_PROGRAM HPI_CTL_ATTR(TUNER, 11)
+/** HD Radio tuner digital signal quality. */
+#define HPI_TUNER_HDRADIO_SIGNAL_QUALITY HPI_CTL_ATTR(TUNER, 12)
+/** HD Radio SDK firmware version. */
+#define HPI_TUNER_HDRADIO_SDK_VERSION HPI_CTL_ATTR(TUNER, 13)
+/** HD Radio DSP firmware version. */
+#define HPI_TUNER_HDRADIO_DSP_VERSION HPI_CTL_ATTR(TUNER, 14)
+
+/** \} */
+
+/** \defgroup pads_attrs Tuner PADs control attributes
+\{
+*/
+/** The text string containing the station/channel combination. */
+#define HPI_PAD_CHANNEL_NAME HPI_CTL_ATTR(PAD, 1)
+/** The text string containing the artist. */
+#define HPI_PAD_ARTIST HPI_CTL_ATTR(PAD, 2)
+/** The text string containing the title. */
+#define HPI_PAD_TITLE HPI_CTL_ATTR(PAD, 3)
+/** The text string containing the comment. */
+#define HPI_PAD_COMMENT HPI_CTL_ATTR(PAD, 4)
+/** The integer containing the PTY code. */
+#define HPI_PAD_PROGRAM_TYPE HPI_CTL_ATTR(PAD, 5)
+/** The integer containing the program identification. */
+#define HPI_PAD_PROGRAM_ID HPI_CTL_ATTR(PAD, 6)
+/** The integer containing whether traffic information is supported.
+Contains either 1 or 0. */
+#define HPI_PAD_TA_SUPPORT HPI_CTL_ATTR(PAD, 7)
+/** The integer containing whether traffic announcement is in progress.
+Contains either 1 or 0. */
+#define HPI_PAD_TA_ACTIVE HPI_CTL_ATTR(PAD, 8)
+/** \} */
+/** \} */
+
+/* VOX control attributes */
+#define HPI_VOX_THRESHOLD HPI_CTL_ATTR(VOX, 1)
+
+/*?? channel mode used hpi_multiplexer_source attribute == 1 */
+#define HPI_CHANNEL_MODE_MODE HPI_CTL_ATTR(CHANNEL_MODE, 1)
+
+/** \defgroup channel_modes Channel Modes
+Used for HPI_ChannelModeSet/Get()
+\{
+*/
+/** Left channel out = left channel in, Right channel out = right channel in. */
+#define HPI_CHANNEL_MODE_NORMAL 1
+/** Left channel out = right channel in, Right channel out = left channel in. */
+#define HPI_CHANNEL_MODE_SWAP 2
+/** Left channel out = left channel in, Right channel out = left channel in. */
+#define HPI_CHANNEL_MODE_LEFT_TO_STEREO 3
+/** Left channel out = right channel in, Right channel out = right channel in.*/
+#define HPI_CHANNEL_MODE_RIGHT_TO_STEREO 4
+/** Left channel out = (left channel in + right channel in)/2,
+ Right channel out = mute. */
+#define HPI_CHANNEL_MODE_STEREO_TO_LEFT 5
+/** Left channel out = mute,
+ Right channel out = (right channel in + left channel in)/2. */
+#define HPI_CHANNEL_MODE_STEREO_TO_RIGHT 6
+#define HPI_CHANNEL_MODE_LAST 6
+/** \} */
+
+/* Bitstream control set attributes */
+#define HPI_BITSTREAM_DATA_POLARITY HPI_CTL_ATTR(BITSTREAM, 1)
+#define HPI_BITSTREAM_CLOCK_EDGE HPI_CTL_ATTR(BITSTREAM, 2)
+#define HPI_BITSTREAM_CLOCK_SOURCE HPI_CTL_ATTR(BITSTREAM, 3)
+
+#define HPI_POLARITY_POSITIVE 0
+#define HPI_POLARITY_NEGATIVE 1
+
+/* Bitstream control get attributes */
+#define HPI_BITSTREAM_ACTIVITY 1
+
+/* SampleClock control attributes */
+#define HPI_SAMPLECLOCK_SOURCE HPI_CTL_ATTR(SAMPLECLOCK, 1)
+#define HPI_SAMPLECLOCK_SAMPLERATE HPI_CTL_ATTR(SAMPLECLOCK, 2)
+#define HPI_SAMPLECLOCK_SOURCE_INDEX HPI_CTL_ATTR(SAMPLECLOCK, 3)
+#define HPI_SAMPLECLOCK_LOCAL_SAMPLERATE\
+ HPI_CTL_ATTR(SAMPLECLOCK, 4)
+#define HPI_SAMPLECLOCK_AUTO HPI_CTL_ATTR(SAMPLECLOCK, 5)
+#define HPI_SAMPLECLOCK_LOCAL_LOCK HPI_CTL_ATTR(SAMPLECLOCK, 6)
+
+/* Microphone control attributes */
+#define HPI_MICROPHONE_PHANTOM_POWER HPI_CTL_ATTR(MICROPHONE, 1)
+
+/** Equalizer control attributes
+*/
+/** Used to get number of filters in an EQ. (Can't set) */
+#define HPI_EQUALIZER_NUM_FILTERS HPI_CTL_ATTR(EQUALIZER, 1)
+/** Set/get the filter by type, freq, Q, gain */
+#define HPI_EQUALIZER_FILTER HPI_CTL_ATTR(EQUALIZER, 2)
+/** Get the biquad coefficients */
+#define HPI_EQUALIZER_COEFFICIENTS HPI_CTL_ATTR(EQUALIZER, 3)
+
+#define HPI_COMPANDER_PARAMS HPI_CTL_ATTR(COMPANDER, 1)
+
+/* Cobranet control attributes.
+ MUST be distinct from all other control attributes.
+ This is so that host side processing can easily identify a Cobranet control
+ and apply additional host side operations (like copying data) as required.
+*/
+#define HPI_COBRANET_SET HPI_CTL_ATTR(COBRANET, 1)
+#define HPI_COBRANET_GET HPI_CTL_ATTR(COBRANET, 2)
+#define HPI_COBRANET_SET_DATA HPI_CTL_ATTR(COBRANET, 3)
+#define HPI_COBRANET_GET_DATA HPI_CTL_ATTR(COBRANET, 4)
+#define HPI_COBRANET_GET_STATUS HPI_CTL_ATTR(COBRANET, 5)
+#define HPI_COBRANET_SEND_PACKET HPI_CTL_ATTR(COBRANET, 6)
+#define HPI_COBRANET_GET_PACKET HPI_CTL_ATTR(COBRANET, 7)
+
+/*------------------------------------------------------------
+ Cobranet Chip Bridge - copied from HMI.H
+------------------------------------------------------------*/
+#define HPI_COBRANET_HMI_cobra_bridge 0x20000
+#define HPI_COBRANET_HMI_cobra_bridge_tx_pkt_buf \
+ (HPI_COBRANET_HMI_cobra_bridge + 0x1000)
+#define HPI_COBRANET_HMI_cobra_bridge_rx_pkt_buf \
+ (HPI_COBRANET_HMI_cobra_bridge + 0x2000)
+#define HPI_COBRANET_HMI_cobra_if_table1 0x110000
+#define HPI_COBRANET_HMI_cobra_if_phy_address \
+ (HPI_COBRANET_HMI_cobra_if_table1 + 0xd)
+#define HPI_COBRANET_HMI_cobra_protocolIP 0x72000
+#define HPI_COBRANET_HMI_cobra_ip_mon_currentIP \
+ (HPI_COBRANET_HMI_cobra_protocolIP + 0x0)
+#define HPI_COBRANET_HMI_cobra_ip_mon_staticIP \
+ (HPI_COBRANET_HMI_cobra_protocolIP + 0x2)
+#define HPI_COBRANET_HMI_cobra_sys 0x100000
+#define HPI_COBRANET_HMI_cobra_sys_desc \
+ (HPI_COBRANET_HMI_cobra_sys + 0x0)
+#define HPI_COBRANET_HMI_cobra_sys_objectID \
+ (HPI_COBRANET_HMI_cobra_sys + 0x100)
+#define HPI_COBRANET_HMI_cobra_sys_contact \
+ (HPI_COBRANET_HMI_cobra_sys + 0x200)
+#define HPI_COBRANET_HMI_cobra_sys_name \
+ (HPI_COBRANET_HMI_cobra_sys + 0x300)
+#define HPI_COBRANET_HMI_cobra_sys_location \
+ (HPI_COBRANET_HMI_cobra_sys + 0x400)
+
+/*------------------------------------------------------------
+ Cobranet Chip Status bits
+------------------------------------------------------------*/
+#define HPI_COBRANET_HMI_STATUS_RXPACKET 2
+#define HPI_COBRANET_HMI_STATUS_TXPACKET 3
+
+/*------------------------------------------------------------
+ Ethernet header size
+------------------------------------------------------------*/
+#define HPI_ETHERNET_HEADER_SIZE (16)
+
+/* These defines are used to fill in protocol information for an Ethernet packet
+ sent using HMI on CS18102 */
+/** ID supplied by Cirrius for ASI packets. */
+#define HPI_ETHERNET_PACKET_ID 0x85
+/** Simple packet - no special routing required */
+#define HPI_ETHERNET_PACKET_V1 0x01
+/** This packet must make its way to the host across the HPI interface */
+#define HPI_ETHERNET_PACKET_HOSTED_VIA_HMI 0x20
+/** This packet must make its way to the host across the HPI interface */
+#define HPI_ETHERNET_PACKET_HOSTED_VIA_HMI_V1 0x21
+/** This packet must make its way to the host across the HPI interface */
+#define HPI_ETHERNET_PACKET_HOSTED_VIA_HPI 0x40
+/** This packet must make its way to the host across the HPI interface */
+#define HPI_ETHERNET_PACKET_HOSTED_VIA_HPI_V1 0x41
+
+#define HPI_ETHERNET_UDP_PORT (44600) /*!< UDP messaging port */
+
+/** Base network time out is set to 100 milli-seconds. */
+#define HPI_ETHERNET_TIMEOUT_MS (100)
+
+/** \defgroup tonedet_attr Tonedetector attributes
+\{
+Used by HPI_ToneDetector_Set() and HPI_ToneDetector_Get()
+*/
+
+/** Set the threshold level of a tonedetector,
+Threshold is a -ve number in units of dB/100,
+*/
+#define HPI_TONEDETECTOR_THRESHOLD HPI_CTL_ATTR(TONEDETECTOR, 1)
+
+/** Get the current state of tonedetection
+The result is a bitmap of detected tones. pairs of bits represent the left
+and right channels, with left channel in LSB.
+The lowest frequency detector state is in the LSB
+*/
+#define HPI_TONEDETECTOR_STATE HPI_CTL_ATTR(TONEDETECTOR, 2)
+
+/** Get the frequency of a tonedetector band.
+*/
+#define HPI_TONEDETECTOR_FREQUENCY HPI_CTL_ATTR(TONEDETECTOR, 3)
+
+/**\}*/
+
+/** \defgroup silencedet_attr SilenceDetector attributes
+\{
+*/
+
+/** Get the current state of tonedetection
+The result is a bitmap with 1s for silent channels. Left channel is in LSB
+*/
+#define HPI_SILENCEDETECTOR_STATE \
+ HPI_CTL_ATTR(SILENCEDETECTOR, 2)
+
+/** Set the threshold level of a SilenceDetector,
+Threshold is a -ve number in units of dB/100,
+*/
+#define HPI_SILENCEDETECTOR_THRESHOLD \
+ HPI_CTL_ATTR(SILENCEDETECTOR, 1)
+
+/** get/set the silence time before the detector triggers
+*/
+#define HPI_SILENCEDETECTOR_DELAY \
+ HPI_CTL_ATTR(SILENCEDETECTOR, 3)
+
+/**\}*/
+
+/* Locked memory buffer alloc/free phases */
+/** use one message to allocate or free physical memory */
+#define HPI_BUFFER_CMD_EXTERNAL 0
+/** alloc physical memory */
+#define HPI_BUFFER_CMD_INTERNAL_ALLOC 1
+/** send physical memory address to adapter */
+#define HPI_BUFFER_CMD_INTERNAL_GRANTADAPTER 2
+/** notify adapter to stop using physical buffer */
+#define HPI_BUFFER_CMD_INTERNAL_REVOKEADAPTER 3
+/** free physical buffer */
+#define HPI_BUFFER_CMD_INTERNAL_FREE 4
+
+/******************************************* CONTROLX ATTRIBUTES ****/
+/* NOTE: All controlx attributes must be unique, unlike control attributes */
+
+/*****************************************************************************/
+/*****************************************************************************/
+/******** HPI LOW LEVEL MESSAGES *******/
+/*****************************************************************************/
+/*****************************************************************************/
+/** Pnp ids */
+/** "ASI" - actual is "ASX" - need to change */
+#define HPI_ID_ISAPNP_AUDIOSCIENCE 0x0669
+/** PCI vendor ID that AudioScience uses */
+#define HPI_PCI_VENDOR_ID_AUDIOSCIENCE 0x175C
+/** PCI vendor ID that the DSP56301 has */
+#define HPI_PCI_VENDOR_ID_MOTOROLA 0x1057
+/** PCI vendor ID that TI uses */
+#define HPI_PCI_VENDOR_ID_TI 0x104C
+
+#define HPI_PCI_DEV_ID_PCI2040 0xAC60
+/** TI's C6205 PCI interface has this ID */
+#define HPI_PCI_DEV_ID_DSP6205 0xA106
+
+#define HPI_USB_VENDOR_ID_AUDIOSCIENCE 0x1257
+#define HPI_USB_W2K_TAG 0x57495341 /* "ASIW" */
+#define HPI_USB_LINUX_TAG 0x4C495341 /* "ASIL" */
+
+/** First 2 hex digits define the adapter family */
+#define HPI_ADAPTER_FAMILY_MASK 0xff00
+
+#define HPI_ADAPTER_FAMILY_ASI(f) (f & HPI_ADAPTER_FAMILY_MASK)
+#define HPI_ADAPTER_ASI(f) (f)
+
+/******************************************* message types */
+#define HPI_TYPE_MESSAGE 1
+#define HPI_TYPE_RESPONSE 2
+#define HPI_TYPE_DATA 3
+#define HPI_TYPE_SSX2BYPASS_MESSAGE 4
+
+/******************************************* object types */
+#define HPI_OBJ_SUBSYSTEM 1
+#define HPI_OBJ_ADAPTER 2
+#define HPI_OBJ_OSTREAM 3
+#define HPI_OBJ_ISTREAM 4
+#define HPI_OBJ_MIXER 5
+#define HPI_OBJ_NODE 6
+#define HPI_OBJ_CONTROL 7
+#define HPI_OBJ_NVMEMORY 8
+#define HPI_OBJ_GPIO 9
+#define HPI_OBJ_WATCHDOG 10
+#define HPI_OBJ_CLOCK 11
+#define HPI_OBJ_PROFILE 12
+#define HPI_OBJ_CONTROLEX 13
+#define HPI_OBJ_ASYNCEVENT 14
+
+#define HPI_OBJ_MAXINDEX 14
+
+/******************************************* methods/functions */
+
+#define HPI_OBJ_FUNCTION_SPACING 0x100
+#define HPI_MAKE_INDEX(obj, index) (obj * HPI_OBJ_FUNCTION_SPACING + index)
+#define HPI_EXTRACT_INDEX(fn) (fn & 0xff)
+
+/* SUB-SYSTEM */
+#define HPI_SUBSYS_OPEN HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 1)
+#define HPI_SUBSYS_GET_VERSION HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 2)
+#define HPI_SUBSYS_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 3)
+#define HPI_SUBSYS_FIND_ADAPTERS HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 4)
+#define HPI_SUBSYS_CREATE_ADAPTER HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 5)
+#define HPI_SUBSYS_CLOSE HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 6)
+#define HPI_SUBSYS_DELETE_ADAPTER HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 7)
+#define HPI_SUBSYS_DRIVER_LOAD HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 8)
+#define HPI_SUBSYS_DRIVER_UNLOAD HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 9)
+#define HPI_SUBSYS_READ_PORT_8 HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 10)
+#define HPI_SUBSYS_WRITE_PORT_8 HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 11)
+#define HPI_SUBSYS_GET_NUM_ADAPTERS HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 12)
+#define HPI_SUBSYS_GET_ADAPTER HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 13)
+#define HPI_SUBSYS_SET_NETWORK_INTERFACE HPI_MAKE_INDEX(HPI_OBJ_SUBSYSTEM, 14)
+#define HPI_SUBSYS_FUNCTION_COUNT 14
+/* ADAPTER */
+#define HPI_ADAPTER_OPEN HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 1)
+#define HPI_ADAPTER_CLOSE HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 2)
+#define HPI_ADAPTER_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 3)
+#define HPI_ADAPTER_GET_ASSERT HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 4)
+#define HPI_ADAPTER_TEST_ASSERT HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 5)
+#define HPI_ADAPTER_SET_MODE HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 6)
+#define HPI_ADAPTER_GET_MODE HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 7)
+#define HPI_ADAPTER_ENABLE_CAPABILITY HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 8)
+#define HPI_ADAPTER_SELFTEST HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 9)
+#define HPI_ADAPTER_FIND_OBJECT HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 10)
+#define HPI_ADAPTER_QUERY_FLASH HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 11)
+#define HPI_ADAPTER_START_FLASH HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 12)
+#define HPI_ADAPTER_PROGRAM_FLASH HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 13)
+#define HPI_ADAPTER_SET_PROPERTY HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 14)
+#define HPI_ADAPTER_GET_PROPERTY HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 15)
+#define HPI_ADAPTER_ENUM_PROPERTY HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 16)
+#define HPI_ADAPTER_MODULE_INFO HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 17)
+#define HPI_ADAPTER_DEBUG_READ HPI_MAKE_INDEX(HPI_OBJ_ADAPTER, 18)
+#define HPI_ADAPTER_FUNCTION_COUNT 18
+/* OUTPUT STREAM */
+#define HPI_OSTREAM_OPEN HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 1)
+#define HPI_OSTREAM_CLOSE HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 2)
+#define HPI_OSTREAM_WRITE HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 3)
+#define HPI_OSTREAM_START HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 4)
+#define HPI_OSTREAM_STOP HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 5)
+#define HPI_OSTREAM_RESET HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 6)
+#define HPI_OSTREAM_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 7)
+#define HPI_OSTREAM_QUERY_FORMAT HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 8)
+#define HPI_OSTREAM_DATA HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 9)
+#define HPI_OSTREAM_SET_VELOCITY HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 10)
+#define HPI_OSTREAM_SET_PUNCHINOUT HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 11)
+#define HPI_OSTREAM_SINEGEN HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 12)
+#define HPI_OSTREAM_ANC_RESET HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 13)
+#define HPI_OSTREAM_ANC_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 14)
+#define HPI_OSTREAM_ANC_READ HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 15)
+#define HPI_OSTREAM_SET_TIMESCALE HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 16)
+#define HPI_OSTREAM_SET_FORMAT HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 17)
+#define HPI_OSTREAM_HOSTBUFFER_ALLOC HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 18)
+#define HPI_OSTREAM_HOSTBUFFER_FREE HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 19)
+#define HPI_OSTREAM_GROUP_ADD HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 20)
+#define HPI_OSTREAM_GROUP_GETMAP HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 21)
+#define HPI_OSTREAM_GROUP_RESET HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 22)
+#define HPI_OSTREAM_HOSTBUFFER_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 23)
+#define HPI_OSTREAM_WAIT_START HPI_MAKE_INDEX(HPI_OBJ_OSTREAM, 24)
+#define HPI_OSTREAM_FUNCTION_COUNT 24
+/* INPUT STREAM */
+#define HPI_ISTREAM_OPEN HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 1)
+#define HPI_ISTREAM_CLOSE HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 2)
+#define HPI_ISTREAM_SET_FORMAT HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 3)
+#define HPI_ISTREAM_READ HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 4)
+#define HPI_ISTREAM_START HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 5)
+#define HPI_ISTREAM_STOP HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 6)
+#define HPI_ISTREAM_RESET HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 7)
+#define HPI_ISTREAM_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 8)
+#define HPI_ISTREAM_QUERY_FORMAT HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 9)
+#define HPI_ISTREAM_ANC_RESET HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 10)
+#define HPI_ISTREAM_ANC_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 11)
+#define HPI_ISTREAM_ANC_WRITE HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 12)
+#define HPI_ISTREAM_HOSTBUFFER_ALLOC HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 13)
+#define HPI_ISTREAM_HOSTBUFFER_FREE HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 14)
+#define HPI_ISTREAM_GROUP_ADD HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 15)
+#define HPI_ISTREAM_GROUP_GETMAP HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 16)
+#define HPI_ISTREAM_GROUP_RESET HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 17)
+#define HPI_ISTREAM_HOSTBUFFER_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 18)
+#define HPI_ISTREAM_WAIT_START HPI_MAKE_INDEX(HPI_OBJ_ISTREAM, 19)
+#define HPI_ISTREAM_FUNCTION_COUNT 19
+/* MIXER */
+/* NOTE:
+ GET_NODE_INFO, SET_CONNECTION, GET_CONNECTIONS are not currently used */
+#define HPI_MIXER_OPEN HPI_MAKE_INDEX(HPI_OBJ_MIXER, 1)
+#define HPI_MIXER_CLOSE HPI_MAKE_INDEX(HPI_OBJ_MIXER, 2)
+#define HPI_MIXER_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_MIXER, 3)
+#define HPI_MIXER_GET_NODE_INFO HPI_MAKE_INDEX(HPI_OBJ_MIXER, 4)
+#define HPI_MIXER_GET_CONTROL HPI_MAKE_INDEX(HPI_OBJ_MIXER, 5)
+#define HPI_MIXER_SET_CONNECTION HPI_MAKE_INDEX(HPI_OBJ_MIXER, 6)
+#define HPI_MIXER_GET_CONNECTIONS HPI_MAKE_INDEX(HPI_OBJ_MIXER, 7)
+#define HPI_MIXER_GET_CONTROL_BY_INDEX HPI_MAKE_INDEX(HPI_OBJ_MIXER, 8)
+#define HPI_MIXER_GET_CONTROL_ARRAY_BY_INDEX HPI_MAKE_INDEX(HPI_OBJ_MIXER, 9)
+#define HPI_MIXER_GET_CONTROL_MULTIPLE_VALUES HPI_MAKE_INDEX(HPI_OBJ_MIXER, 10)
+#define HPI_MIXER_STORE HPI_MAKE_INDEX(HPI_OBJ_MIXER, 11)
+#define HPI_MIXER_FUNCTION_COUNT 11
+/* MIXER CONTROLS */
+#define HPI_CONTROL_GET_INFO HPI_MAKE_INDEX(HPI_OBJ_CONTROL, 1)
+#define HPI_CONTROL_GET_STATE HPI_MAKE_INDEX(HPI_OBJ_CONTROL, 2)
+#define HPI_CONTROL_SET_STATE HPI_MAKE_INDEX(HPI_OBJ_CONTROL, 3)
+#define HPI_CONTROL_FUNCTION_COUNT 3
+/* NONVOL MEMORY */
+#define HPI_NVMEMORY_OPEN HPI_MAKE_INDEX(HPI_OBJ_NVMEMORY, 1)
+#define HPI_NVMEMORY_READ_BYTE HPI_MAKE_INDEX(HPI_OBJ_NVMEMORY, 2)
+#define HPI_NVMEMORY_WRITE_BYTE HPI_MAKE_INDEX(HPI_OBJ_NVMEMORY, 3)
+#define HPI_NVMEMORY_FUNCTION_COUNT 3
+/* GPIO */
+#define HPI_GPIO_OPEN HPI_MAKE_INDEX(HPI_OBJ_GPIO, 1)
+#define HPI_GPIO_READ_BIT HPI_MAKE_INDEX(HPI_OBJ_GPIO, 2)
+#define HPI_GPIO_WRITE_BIT HPI_MAKE_INDEX(HPI_OBJ_GPIO, 3)
+#define HPI_GPIO_READ_ALL HPI_MAKE_INDEX(HPI_OBJ_GPIO, 4)
+#define HPI_GPIO_WRITE_STATUS HPI_MAKE_INDEX(HPI_OBJ_GPIO, 5)
+#define HPI_GPIO_FUNCTION_COUNT 5
+/* ASYNC EVENT */
+#define HPI_ASYNCEVENT_OPEN HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 1)
+#define HPI_ASYNCEVENT_CLOSE HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 2)
+#define HPI_ASYNCEVENT_WAIT HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 3)
+#define HPI_ASYNCEVENT_GETCOUNT HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 4)
+#define HPI_ASYNCEVENT_GET HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 5)
+#define HPI_ASYNCEVENT_SENDEVENTS HPI_MAKE_INDEX(HPI_OBJ_ASYNCEVENT, 6)
+#define HPI_ASYNCEVENT_FUNCTION_COUNT 6
+/* WATCH-DOG */
+#define HPI_WATCHDOG_OPEN HPI_MAKE_INDEX(HPI_OBJ_WATCHDOG, 1)
+#define HPI_WATCHDOG_SET_TIME HPI_MAKE_INDEX(HPI_OBJ_WATCHDOG, 2)
+#define HPI_WATCHDOG_PING HPI_MAKE_INDEX(HPI_OBJ_WATCHDOG, 3)
+/* CLOCK */
+#define HPI_CLOCK_OPEN HPI_MAKE_INDEX(HPI_OBJ_CLOCK, 1)
+#define HPI_CLOCK_SET_TIME HPI_MAKE_INDEX(HPI_OBJ_CLOCK, 2)
+#define HPI_CLOCK_GET_TIME HPI_MAKE_INDEX(HPI_OBJ_CLOCK, 3)
+/* PROFILE */
+#define HPI_PROFILE_OPEN_ALL HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 1)
+#define HPI_PROFILE_START_ALL HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 2)
+#define HPI_PROFILE_STOP_ALL HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 3)
+#define HPI_PROFILE_GET HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 4)
+#define HPI_PROFILE_GET_IDLECOUNT HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 5)
+#define HPI_PROFILE_GET_NAME HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 6)
+#define HPI_PROFILE_GET_UTILIZATION HPI_MAKE_INDEX(HPI_OBJ_PROFILE, 7)
+#define HPI_PROFILE_FUNCTION_COUNT 7
+/* ////////////////////////////////////////////////////////////////////// */
+/* PRIVATE ATTRIBUTES */
+
+/* ////////////////////////////////////////////////////////////////////// */
+/* STRUCTURES */
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(push, 1)
+#endif
+
+/** PCI bus resource */
+struct hpi_pci {
+ u32 __iomem *ap_mem_base[HPI_MAX_ADAPTER_MEM_SPACES];
+ struct pci_dev *p_os_data;
+
+#ifndef HPI64BIT /* keep structure size constant */
+ u32 padding[HPI_MAX_ADAPTER_MEM_SPACES + 1];
+#endif
+ u16 vendor_id;
+ u16 device_id;
+ u16 subsys_vendor_id;
+ u16 subsys_device_id;
+ u16 bus_number;
+ u16 device_number;
+ u32 interrupt;
+};
+
+struct hpi_resource {
+ union {
+ const struct hpi_pci *pci;
+ const char *net_if;
+ } r;
+#ifndef HPI64BIT /* keep structure size constant */
+ u32 pad_to64;
+#endif
+ u16 bus_type; /* HPI_BUS_PNPISA, _PCI, _USB etc */
+ u16 padding;
+
+};
+
+/** Format info used inside struct hpi_message
+ Not the same as public API struct hpi_format */
+struct hpi_msg_format {
+ u32 sample_rate;
+ /**< 11025, 32000, 44100 ... */
+ u32 bit_rate; /**< for MPEG */
+ u32 attributes;
+ /**< Stereo/JointStereo/Mono */
+ u16 channels; /**< 1,2..., (or ancillary mode or idle bit */
+ u16 format; /**< HPI_FORMAT_PCM16, _MPEG etc. see \ref HPI_FORMATS. */
+};
+
+/** Buffer+format structure.
+ Must be kept 7 * 32 bits to match public struct hpi_datastruct */
+struct hpi_msg_data {
+ struct hpi_msg_format format;
+ u8 *pb_data;
+#ifndef HPI64BIT
+ u32 padding;
+#endif
+ u32 data_size;
+};
+
+/** struct hpi_datastructure used up to 3.04 driver */
+struct hpi_data_legacy32 {
+ struct hpi_format format;
+ u32 pb_data;
+ u32 data_size;
+};
+
+#ifdef HPI64BIT
+/* Compatibility version of struct hpi_data*/
+struct hpi_data_compat32 {
+ struct hpi_msg_format format;
+ u32 pb_data;
+ u32 padding;
+ u32 data_size;
+};
+#endif
+
+struct hpi_buffer {
+ /** placehoder for backward compatability (see dwBufferSize) */
+ struct hpi_msg_format reserved;
+ u32 command; /**< HPI_BUFFER_CMD_xxx*/
+ u32 pci_address; /**< PCI physical address of buffer for DSP DMA */
+ u32 buffer_size; /**< must line up with data_size of HPI_DATA*/
+};
+
+/*/////////////////////////////////////////////////////////////////////////// */
+/* This is used for background buffer bus mastering stream buffers. */
+struct hpi_hostbuffer_status {
+ u32 samples_processed;
+ u32 auxiliary_data_available;
+ u32 stream_state;
+ /* DSP index in to the host bus master buffer. */
+ u32 dSP_index;
+ /* Host index in to the host bus master buffer. */
+ u32 host_index;
+ u32 size_in_bytes;
+};
+
+struct hpi_streamid {
+ u16 object_type;
+ /**< Type of object, HPI_OBJ_OSTREAM or HPI_OBJ_ISTREAM. */
+ u16 stream_index; /**< outstream or instream index. */
+};
+
+struct hpi_punchinout {
+ u32 punch_in_sample;
+ u32 punch_out_sample;
+};
+
+struct hpi_subsys_msg {
+ struct hpi_resource resource;
+};
+
+struct hpi_subsys_res {
+ u32 version;
+ u32 data; /* used to return extended version */
+ u16 num_adapters; /* number of adapters */
+ u16 adapter_index;
+ u16 aw_adapter_list[HPI_MAX_ADAPTERS];
+};
+
+struct hpi_adapter_msg {
+ u32 adapter_mode; /* adapter mode */
+ u16 assert_id; /* assert number for "test assert" call
+ object_index for find object call
+ query_or_set for hpi_adapter_set_mode_ex() */
+ u16 object_type; /* for adapter find object call */
+};
+
+union hpi_adapterx_msg {
+ struct hpi_adapter_msg adapter;
+ struct {
+ u32 offset;
+ } query_flash;
+ struct {
+ u32 offset;
+ u32 length;
+ u32 key;
+ } start_flash;
+ struct {
+ u32 checksum;
+ u16 sequence;
+ u16 length;
+ u16 offset; /**< offset from start of msg to data */
+ u16 unused;
+ } program_flash;
+ struct {
+ u16 property;
+ u16 parameter1;
+ u16 parameter2;
+ } property_set;
+ struct {
+ u16 index;
+ u16 what;
+ u16 property_index;
+ } property_enum;
+ struct {
+ u16 index;
+ } module_info;
+ struct {
+ u32 dsp_address;
+ u32 count_bytes;
+ } debug_read;
+};
+
+struct hpi_adapter_res {
+ u32 serial_number;
+ u16 adapter_type;
+ u16 adapter_index; /* is this needed? also used for dsp_index */
+ u16 num_instreams;
+ u16 num_outstreams;
+ u16 num_mixers;
+ u16 version;
+ u8 sz_adapter_assert[HPI_STRING_LEN];
+};
+
+union hpi_adapterx_res {
+ struct hpi_adapter_res adapter;
+ struct {
+ u32 checksum;
+ u32 length;
+ u32 version;
+ } query_flash;
+ struct {
+ u16 sequence;
+ } program_flash;
+ struct {
+ u16 parameter1;
+ u16 parameter2;
+ } property_get;
+};
+
+struct hpi_stream_msg {
+ union {
+ struct hpi_msg_data data;
+ struct hpi_data_legacy32 data32;
+ u16 velocity;
+ struct hpi_punchinout pio;
+ u32 time_scale;
+ struct hpi_buffer buffer;
+ struct hpi_streamid stream;
+ } u;
+};
+
+struct hpi_stream_res {
+ union {
+ struct {
+ /* size of hardware buffer */
+ u32 buffer_size;
+ /* OutStream - data to play,
+ InStream - data recorded */
+ u32 data_available;
+ /* OutStream - samples played,
+ InStream - samples recorded */
+ u32 samples_transferred;
+ /* Adapter - OutStream - data to play,
+ InStream - data recorded */
+ u32 auxiliary_data_available;
+ u16 state; /* HPI_STATE_PLAYING, _STATE_STOPPED */
+ u16 padding;
+ } stream_info;
+ struct {
+ u32 buffer_size;
+ u32 data_available;
+ u32 samples_transfered;
+ u16 state;
+ u16 outstream_index;
+ u16 instream_index;
+ u16 padding;
+ u32 auxiliary_data_available;
+ } legacy_stream_info;
+ struct {
+ /* bitmap of grouped OutStreams */
+ u32 outstream_group_map;
+ /* bitmap of grouped InStreams */
+ u32 instream_group_map;
+ } group_info;
+ struct {
+ /* pointer to the buffer */
+ u8 *p_buffer;
+ /* pointer to the hostbuffer status */
+ struct hpi_hostbuffer_status *p_status;
+ } hostbuffer_info;
+ } u;
+};
+
+struct hpi_mixer_msg {
+ u16 control_index;
+ u16 control_type; /* = HPI_CONTROL_METER _VOLUME etc */
+ u16 padding1; /* maintain alignment of subsequent fields */
+ u16 node_type1; /* = HPI_SOURCENODE_LINEIN etc */
+ u16 node_index1; /* = 0..N */
+ u16 node_type2;
+ u16 node_index2;
+ u16 padding2; /* round to 4 bytes */
+};
+
+struct hpi_mixer_res {
+ u16 src_node_type; /* = HPI_SOURCENODE_LINEIN etc */
+ u16 src_node_index; /* = 0..N */
+ u16 dst_node_type;
+ u16 dst_node_index;
+ /* Also controlType for MixerGetControlByIndex */
+ u16 control_index;
+ /* may indicate which DSP the control is located on */
+ u16 dsp_index;
+};
+
+union hpi_mixerx_msg {
+ struct {
+ u16 starting_index;
+ u16 flags;
+ u32 length_in_bytes; /* length in bytes of p_data */
+ u32 p_data; /* pointer to a data array */
+ } gcabi;
+ struct {
+ u16 command;
+ u16 index;
+ } store; /* for HPI_MIXER_STORE message */
+};
+
+union hpi_mixerx_res {
+ struct {
+ u32 bytes_returned; /* size of items returned */
+ u32 p_data; /* pointer to data array */
+ u16 more_to_do; /* indicates if there is more to do */
+ } gcabi;
+};
+
+struct hpi_control_msg {
+ u16 attribute; /* control attribute or property */
+ u16 saved_index;
+ u32 param1; /* generic parameter 1 */
+ u32 param2; /* generic parameter 2 */
+ short an_log_value[HPI_MAX_CHANNELS];
+};
+
+struct hpi_control_union_msg {
+ u16 attribute; /* control attribute or property */
+ u16 saved_index; /* only used in ctrl save/restore */
+ union {
+ struct {
+ u32 param1; /* generic parameter 1 */
+ u32 param2; /* generic parameter 2 */
+ short an_log_value[HPI_MAX_CHANNELS];
+ } old;
+ union {
+ u32 frequency;
+ u32 gain;
+ u32 band;
+ u32 deemphasis;
+ u32 program;
+ struct {
+ u32 mode;
+ u32 value;
+ } mode;
+ } tuner;
+ } u;
+};
+
+struct hpi_control_res {
+ /* Could make union. dwParam, anLogValue never used in same response */
+ u32 param1;
+ u32 param2;
+ short an_log_value[HPI_MAX_CHANNELS];
+};
+
+union hpi_control_union_res {
+ struct {
+ u32 param1;
+ u32 param2;
+ short an_log_value[HPI_MAX_CHANNELS];
+ } old;
+ union {
+ u32 band;
+ u32 frequency;
+ u32 gain;
+ u32 level;
+ u32 deemphasis;
+ struct {
+ u32 data[2];
+ u32 bLER;
+ } rds;
+ } tuner;
+ struct {
+ char sz_data[8];
+ u32 remaining_chars;
+ } chars8;
+ char c_data12[12];
+};
+
+/* HPI_CONTROLX_STRUCTURES */
+
+/* Message */
+
+/** Used for all HMI variables where max length <= 8 bytes
+*/
+struct hpi_controlx_msg_cobranet_data {
+ u32 hmi_address;
+ u32 byte_count;
+ u32 data[2];
+};
+
+/** Used for string data, and for packet bridge
+*/
+struct hpi_controlx_msg_cobranet_bigdata {
+ u32 hmi_address;
+ u32 byte_count;
+ u8 *pb_data;
+#ifndef HPI64BIT
+ u32 padding;
+#endif
+};
+
+/** Used for PADS control reading of string fields.
+*/
+struct hpi_controlx_msg_pad_data {
+ u32 field;
+ u32 byte_count;
+ u8 *pb_data;
+#ifndef HPI64BIT
+ u32 padding;
+#endif
+};
+
+/** Used for generic data
+*/
+
+struct hpi_controlx_msg_generic {
+ u32 param1;
+ u32 param2;
+};
+
+struct hpi_controlx_msg {
+ u16 attribute; /* control attribute or property */
+ u16 saved_index;
+ union {
+ struct hpi_controlx_msg_cobranet_data cobranet_data;
+ struct hpi_controlx_msg_cobranet_bigdata cobranet_bigdata;
+ struct hpi_controlx_msg_generic generic;
+ struct hpi_controlx_msg_pad_data pad_data;
+ /*struct param_value universal_value; */
+ /* nothing extra to send for status read */
+ } u;
+};
+
+/* Response */
+/**
+*/
+struct hpi_controlx_res_cobranet_data {
+ u32 byte_count;
+ u32 data[2];
+};
+
+struct hpi_controlx_res_cobranet_bigdata {
+ u32 byte_count;
+};
+
+struct hpi_controlx_res_cobranet_status {
+ u32 status;
+ u32 readable_size;
+ u32 writeable_size;
+};
+
+struct hpi_controlx_res_generic {
+ u32 param1;
+ u32 param2;
+};
+
+struct hpi_controlx_res {
+ union {
+ struct hpi_controlx_res_cobranet_bigdata cobranet_bigdata;
+ struct hpi_controlx_res_cobranet_data cobranet_data;
+ struct hpi_controlx_res_cobranet_status cobranet_status;
+ struct hpi_controlx_res_generic generic;
+ /*struct param_info universal_info; */
+ /*struct param_value universal_value; */
+ } u;
+};
+
+struct hpi_nvmemory_msg {
+ u16 address;
+ u16 data;
+};
+
+struct hpi_nvmemory_res {
+ u16 size_in_bytes;
+ u16 data;
+};
+
+struct hpi_gpio_msg {
+ u16 bit_index;
+ u16 bit_data;
+};
+
+struct hpi_gpio_res {
+ u16 number_input_bits;
+ u16 number_output_bits;
+ u16 bit_data[4];
+};
+
+struct hpi_async_msg {
+ u32 events;
+ u16 maximum_events;
+ u16 padding;
+};
+
+struct hpi_async_res {
+ union {
+ struct {
+ u16 count;
+ } count;
+ struct {
+ u32 events;
+ u16 number_returned;
+ u16 padding;
+ } get;
+ struct hpi_async_event event;
+ } u;
+};
+
+struct hpi_watchdog_msg {
+ u32 time_ms;
+};
+
+struct hpi_watchdog_res {
+ u32 time_ms;
+};
+
+struct hpi_clock_msg {
+ u16 hours;
+ u16 minutes;
+ u16 seconds;
+ u16 milli_seconds;
+};
+
+struct hpi_clock_res {
+ u16 size_in_bytes;
+ u16 hours;
+ u16 minutes;
+ u16 seconds;
+ u16 milli_seconds;
+ u16 padding;
+};
+
+struct hpi_profile_msg {
+ u16 bin_index;
+ u16 padding;
+};
+
+struct hpi_profile_res_open {
+ u16 max_profiles;
+};
+
+struct hpi_profile_res_time {
+ u32 micro_seconds;
+ u32 call_count;
+ u32 max_micro_seconds;
+ u32 min_micro_seconds;
+ u16 seconds;
+};
+
+struct hpi_profile_res_name {
+ u8 sz_name[32];
+};
+
+struct hpi_profile_res {
+ union {
+ struct hpi_profile_res_open o;
+ struct hpi_profile_res_time t;
+ struct hpi_profile_res_name n;
+ } u;
+};
+
+struct hpi_message_header {
+ u16 size; /* total size in bytes */
+ u8 type; /* HPI_TYPE_MESSAGE */
+ u8 version; /* message version */
+ u16 object; /* HPI_OBJ_* */
+ u16 function; /* HPI_SUBSYS_xxx, HPI_ADAPTER_xxx */
+ u16 adapter_index; /* the adapter index */
+ u16 obj_index; /* */
+};
+
+struct hpi_message {
+ /* following fields must match HPI_MESSAGE_HEADER */
+ u16 size; /* total size in bytes */
+ u8 type; /* HPI_TYPE_MESSAGE */
+ u8 version; /* message version */
+ u16 object; /* HPI_OBJ_* */
+ u16 function; /* HPI_SUBSYS_xxx, HPI_ADAPTER_xxx */
+ u16 adapter_index; /* the adapter index */
+ u16 obj_index; /* */
+ union {
+ struct hpi_subsys_msg s;
+ struct hpi_adapter_msg a;
+ union hpi_adapterx_msg ax;
+ struct hpi_stream_msg d;
+ struct hpi_mixer_msg m;
+ union hpi_mixerx_msg mx; /* extended mixer; */
+ struct hpi_control_msg c; /* mixer control; */
+ /* identical to struct hpi_control_msg,
+ but field naming is improved */
+ struct hpi_control_union_msg cu;
+ struct hpi_controlx_msg cx; /* extended mixer control; */
+ struct hpi_nvmemory_msg n;
+ struct hpi_gpio_msg l; /* digital i/o */
+ struct hpi_watchdog_msg w;
+ struct hpi_clock_msg t; /* dsp time */
+ struct hpi_profile_msg p;
+ struct hpi_async_msg as;
+ char fixed_size[32];
+ } u;
+};
+
+#define HPI_MESSAGE_SIZE_BY_OBJECT { \
+ sizeof(struct hpi_message_header) , /* default, no object type 0 */ \
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_subsys_msg),\
+ sizeof(struct hpi_message_header) + sizeof(union hpi_adapterx_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_stream_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_stream_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_mixer_msg),\
+ sizeof(struct hpi_message_header) , /* no node message */ \
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_control_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_nvmemory_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_gpio_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_watchdog_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_clock_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_profile_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_controlx_msg),\
+ sizeof(struct hpi_message_header) + sizeof(struct hpi_async_msg) \
+}
+
+struct hpi_response_header {
+ u16 size;
+ u8 type; /* HPI_TYPE_RESPONSE */
+ u8 version; /* response version */
+ u16 object; /* HPI_OBJ_* */
+ u16 function; /* HPI_SUBSYS_xxx, HPI_ADAPTER_xxx */
+ u16 error; /* HPI_ERROR_xxx */
+ u16 specific_error; /* adapter specific error */
+};
+
+struct hpi_response {
+/* following fields must match HPI_RESPONSE_HEADER */
+ u16 size;
+ u8 type; /* HPI_TYPE_RESPONSE */
+ u8 version; /* response version */
+ u16 object; /* HPI_OBJ_* */
+ u16 function; /* HPI_SUBSYS_xxx, HPI_ADAPTER_xxx */
+ u16 error; /* HPI_ERROR_xxx */
+ u16 specific_error; /* adapter specific error */
+ union {
+ struct hpi_subsys_res s;
+ struct hpi_adapter_res a;
+ union hpi_adapterx_res ax;
+ struct hpi_stream_res d;
+ struct hpi_mixer_res m;
+ union hpi_mixerx_res mx; /* extended mixer; */
+ struct hpi_control_res c; /* mixer control; */
+ /* identical to hpi_control_res, but field naming is improved */
+ union hpi_control_union_res cu;
+ struct hpi_controlx_res cx; /* extended mixer control; */
+ struct hpi_nvmemory_res n;
+ struct hpi_gpio_res l; /* digital i/o */
+ struct hpi_watchdog_res w;
+ struct hpi_clock_res t; /* dsp time */
+ struct hpi_profile_res p;
+ struct hpi_async_res as;
+ u8 bytes[52];
+ } u;
+};
+
+#define HPI_RESPONSE_SIZE_BY_OBJECT { \
+ sizeof(struct hpi_response_header) ,/* default, no object type 0 */ \
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_subsys_res),\
+ sizeof(struct hpi_response_header) + sizeof(union hpi_adapterx_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_stream_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_stream_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_mixer_res),\
+ sizeof(struct hpi_response_header) , /* no node response */ \
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_control_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_nvmemory_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_gpio_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_watchdog_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_clock_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_profile_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_controlx_res),\
+ sizeof(struct hpi_response_header) + sizeof(struct hpi_async_res) \
+}
+
+/*********************** version 1 message/response *****************************/
+#define HPINET_ETHERNET_DATA_SIZE (1500)
+#define HPINET_IP_HDR_SIZE (20)
+#define HPINET_IP_DATA_SIZE (HPINET_ETHERNET_DATA_SIZE - HPINET_IP_HDR_SIZE)
+#define HPINET_UDP_HDR_SIZE (8)
+#define HPINET_UDP_DATA_SIZE (HPINET_IP_DATA_SIZE - HPINET_UDP_HDR_SIZE)
+#define HPINET_ASI_HDR_SIZE (2)
+#define HPINET_ASI_DATA_SIZE (HPINET_UDP_DATA_SIZE - HPINET_ASI_HDR_SIZE)
+
+#define HPI_MAX_PAYLOAD_SIZE (HPINET_ASI_DATA_SIZE - 2)
+
+/* New style message/response, but still V0 compatible */
+struct hpi_msg_adapter_get_info {
+ struct hpi_message_header h;
+};
+
+struct hpi_res_adapter_get_info {
+ struct hpi_response_header h; /*v0 */
+ struct hpi_adapter_res p;
+};
+
+/* padding is so these are same size as v0 hpi_message */
+struct hpi_msg_adapter_query_flash {
+ struct hpi_message_header h;
+ u32 offset;
+ u8 pad_to_version0_size[sizeof(struct hpi_message) - /* V0 res */
+ sizeof(struct hpi_message_header) - 1 * sizeof(u32)];
+};
+
+/* padding is so these are same size as v0 hpi_response */
+struct hpi_res_adapter_query_flash {
+ struct hpi_response_header h;
+ u32 checksum;
+ u32 length;
+ u32 version;
+ u8 pad_to_version0_size[sizeof(struct hpi_response) - /* V0 res */
+ sizeof(struct hpi_response_header) - 3 * sizeof(u32)];
+};
+
+struct hpi_msg_adapter_start_flash {
+ struct hpi_message_header h;
+ u32 offset;
+ u32 length;
+ u32 key;
+ u8 pad_to_version0_size[sizeof(struct hpi_message) - /* V0 res */
+ sizeof(struct hpi_message_header) - 3 * sizeof(u32)];
+};
+
+struct hpi_res_adapter_start_flash {
+ struct hpi_response_header h;
+ u8 pad_to_version0_size[sizeof(struct hpi_response) - /* V0 res */
+ sizeof(struct hpi_response_header)];
+};
+
+struct hpi_msg_adapter_program_flash_payload {
+ u32 checksum;
+ u16 sequence;
+ u16 length;
+ u16 offset; /**< offset from start of msg to data */
+ u16 unused;
+ /* ensure sizeof(header + payload) == sizeof(hpi_message_V0)
+ because old firmware expects data after message of this size */
+ u8 pad_to_version0_size[sizeof(struct hpi_message) - /* V0 message */
+ sizeof(struct hpi_message_header) - sizeof(u32) -
+ 4 * sizeof(u16)];
+};
+
+struct hpi_msg_adapter_program_flash {
+ struct hpi_message_header h;
+ struct hpi_msg_adapter_program_flash_payload p;
+ u32 data[256];
+};
+
+struct hpi_res_adapter_program_flash {
+ struct hpi_response_header h;
+ u16 sequence;
+ u8 pad_to_version0_size[sizeof(struct hpi_response) - /* V0 res */
+ sizeof(struct hpi_response_header) - sizeof(u16)];
+};
+
+#if 1
+#define hpi_message_header_v1 hpi_message_header
+#define hpi_response_header_v1 hpi_response_header
+#else
+/* V1 headers in Addition to v0 headers */
+struct hpi_message_header_v1 {
+ struct hpi_message_header h0;
+/* struct {
+} h1; */
+};
+
+struct hpi_response_header_v1 {
+ struct hpi_response_header h0;
+ struct {
+ u16 adapter_index; /* the adapter index */
+ u16 obj_index; /* object index */
+ } h1;
+};
+#endif
+
+/* STRV HPI Packet */
+struct hpi_msg_strv {
+ struct hpi_message_header h;
+ struct hpi_entity strv;
+};
+
+struct hpi_res_strv {
+ struct hpi_response_header h;
+ struct hpi_entity strv;
+};
+#define MIN_STRV_PACKET_SIZE sizeof(struct hpi_res_strv)
+
+struct hpi_msg_payload_v0 {
+ struct hpi_message_header h;
+ union {
+ struct hpi_subsys_msg s;
+ struct hpi_adapter_msg a;
+ union hpi_adapterx_msg ax;
+ struct hpi_stream_msg d;
+ struct hpi_mixer_msg m;
+ union hpi_mixerx_msg mx;
+ struct hpi_control_msg c;
+ struct hpi_control_union_msg cu;
+ struct hpi_controlx_msg cx;
+ struct hpi_nvmemory_msg n;
+ struct hpi_gpio_msg l;
+ struct hpi_watchdog_msg w;
+ struct hpi_clock_msg t;
+ struct hpi_profile_msg p;
+ struct hpi_async_msg as;
+ } u;
+};
+
+struct hpi_res_payload_v0 {
+ struct hpi_response_header h;
+ union {
+ struct hpi_subsys_res s;
+ struct hpi_adapter_res a;
+ union hpi_adapterx_res ax;
+ struct hpi_stream_res d;
+ struct hpi_mixer_res m;
+ union hpi_mixerx_res mx;
+ struct hpi_control_res c;
+ union hpi_control_union_res cu;
+ struct hpi_controlx_res cx;
+ struct hpi_nvmemory_res n;
+ struct hpi_gpio_res l;
+ struct hpi_watchdog_res w;
+ struct hpi_clock_res t;
+ struct hpi_profile_res p;
+ struct hpi_async_res as;
+ } u;
+};
+
+union hpi_message_buffer_v1 {
+ struct hpi_message m0; /* version 0 */
+ struct hpi_message_header_v1 h;
+ unsigned char buf[HPI_MAX_PAYLOAD_SIZE];
+};
+
+union hpi_response_buffer_v1 {
+ struct hpi_response r0; /* version 0 */
+ struct hpi_response_header_v1 h;
+ unsigned char buf[HPI_MAX_PAYLOAD_SIZE];
+};
+
+compile_time_assert((sizeof(union hpi_message_buffer_v1) <=
+ HPI_MAX_PAYLOAD_SIZE), message_buffer_ok);
+compile_time_assert((sizeof(union hpi_response_buffer_v1) <=
+ HPI_MAX_PAYLOAD_SIZE), response_buffer_ok);
+
+/*////////////////////////////////////////////////////////////////////////// */
+/* declarations for compact control calls */
+struct hpi_control_defn {
+ u8 type;
+ u8 channels;
+ u8 src_node_type;
+ u8 src_node_index;
+ u8 dest_node_type;
+ u8 dest_node_index;
+};
+
+/*////////////////////////////////////////////////////////////////////////// */
+/* declarations for control caching (internal to HPI<->DSP interaction) */
+
+/** A compact representation of (part of) a controls state.
+Used for efficient transfer of the control state
+between DSP and host or across a network
+*/
+struct hpi_control_cache_info {
+ /** one of HPI_CONTROL_* */
+ u8 control_type;
+ /** The total size of cached information in 32-bit words. */
+ u8 size_in32bit_words;
+ /** The original index of the control on the DSP */
+ u16 control_index;
+};
+
+struct hpi_control_cache_single {
+ struct hpi_control_cache_info i;
+ union {
+ struct { /* volume */
+ u16 an_log[2];
+ } v;
+ struct { /* peak meter */
+ u16 an_log_peak[2];
+ u16 an_logRMS[2];
+ } p;
+ struct { /* channel mode */
+ u16 mode;
+ } m;
+ struct { /* multiplexer */
+ u16 source_node_type;
+ u16 source_node_index;
+ } x;
+ struct { /* level/trim */
+ u16 an_log[2];
+ } l;
+ struct { /* tuner - partial caching.
+ some attributes go to the DSP. */
+ u32 freq_ink_hz;
+ u16 band;
+ u16 level;
+ } t;
+ struct { /* AESEBU rx status */
+ u32 error_status;
+ u32 source;
+ } aes3rx;
+ struct { /* AESEBU tx */
+ u32 format;
+ } aes3tx;
+ struct { /* tone detector */
+ u16 state;
+ } tone;
+ struct { /* silence detector */
+ u32 state;
+ u32 count;
+ } silence;
+ struct { /* sample clock */
+ u16 source;
+ u16 source_index;
+ u32 sample_rate;
+ } clk;
+ struct { /* microphone control */
+ u16 state;
+ } phantom_power;
+ struct { /* generic control */
+ u32 dw1;
+ u32 dw2;
+ } g;
+ } u;
+};
+
+struct hpi_control_cache_pad {
+ struct hpi_control_cache_info i;
+ u32 field_valid_flags;
+ u8 c_channel[8];
+ u8 c_artist[40];
+ u8 c_title[40];
+ u8 c_comment[200];
+ u32 pTY;
+ u32 pI;
+ u32 traffic_supported;
+ u32 traffic_anouncement;
+};
+
+/*/////////////////////////////////////////////////////////////////////////// */
+/* declarations for 2^N sized FIFO buffer (internal to HPI<->DSP interaction) */
+struct hpi_fifo_buffer {
+ u32 size;
+ u32 dSP_index;
+ u32 host_index;
+};
+
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(pop)
+#endif
+
+/* skip host side function declarations for DSP
+ compile and documentation extraction */
+
+char hpi_handle_object(const u32 handle);
+
+void hpi_handle_to_indexes(const u32 handle, u16 *pw_adapter_index,
+ u16 *pw_object_index);
+
+u32 hpi_indexes_to_handle(const char c_object, const u16 adapter_index,
+ const u16 object_index);
+
+/*////////////////////////////////////////////////////////////////////////// */
+
+/* main HPI entry point */
+hpi_handler_func hpi_send_recv;
+
+/* UDP message */
+void hpi_send_recvUDP(struct hpi_message *phm, struct hpi_response *phr,
+ const unsigned int timeout);
+
+/* used in PnP OS/driver */
+u16 hpi_subsys_create_adapter(const struct hpi_hsubsys *ph_subsys,
+ const struct hpi_resource *p_resource, u16 *pw_adapter_index);
+
+u16 hpi_subsys_delete_adapter(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index);
+
+u16 hpi_outstream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u8 **pp_buffer,
+ struct hpi_hostbuffer_status **pp_status);
+
+u16 hpi_instream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u8 **pp_buffer,
+ struct hpi_hostbuffer_status **pp_status);
+
+u16 hpi_adapter_restart(u16 adapter_index);
+
+/*
+The following 3 functions were last declared in header files for
+driver 3.10. HPI_ControlQuery() used to be the recommended way
+of getting a volume range. Declared here for binary asihpi32.dll
+compatibility.
+*/
+
+void hpi_format_to_msg(struct hpi_msg_format *pMF,
+ const struct hpi_format *pF);
+void hpi_stream_response_to_legacy(struct hpi_stream_res *pSR);
+
+/*////////////////////////////////////////////////////////////////////////// */
+/* declarations for individual HPI entry points */
+hpi_handler_func HPI_1000;
+hpi_handler_func HPI_6000;
+hpi_handler_func HPI_6205;
+hpi_handler_func HPI_COMMON;
+
+#endif /* _HPI_INTERNAL_H_ */
diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c
new file mode 100644
index 000000000000..565102cae4f8
--- /dev/null
+++ b/sound/pci/asihpi/hpicmn.c
@@ -0,0 +1,643 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+\file hpicmn.c
+
+ Common functions used by hpixxxx.c modules
+
+(C) Copyright AudioScience Inc. 1998-2003
+*******************************************************************************/
+#define SOURCEFILE_NAME "hpicmn.c"
+
+#include "hpi_internal.h"
+#include "hpidebug.h"
+#include "hpicmn.h"
+
+struct hpi_adapters_list {
+ struct hpios_spinlock list_lock;
+ struct hpi_adapter_obj adapter[HPI_MAX_ADAPTERS];
+ u16 gw_num_adapters;
+};
+
+static struct hpi_adapters_list adapters;
+
+/**
+* Given an HPI Message that was sent out and a response that was received,
+* validate that the response has the correct fields filled in,
+* i.e ObjectType, Function etc
+**/
+u16 hpi_validate_response(struct hpi_message *phm, struct hpi_response *phr)
+{
+ u16 error = 0;
+
+ if ((phr->type != HPI_TYPE_RESPONSE)
+ || (phr->object != phm->object)
+ || (phr->function != phm->function))
+ error = HPI_ERROR_INVALID_RESPONSE;
+
+ return error;
+}
+
+u16 hpi_add_adapter(struct hpi_adapter_obj *pao)
+{
+ u16 retval = 0;
+ /*HPI_ASSERT(pao->wAdapterType); */
+
+ hpios_alistlock_lock(&adapters);
+
+ if (pao->index >= HPI_MAX_ADAPTERS) {
+ retval = HPI_ERROR_BAD_ADAPTER_NUMBER;
+ goto unlock;
+ }
+
+ if (adapters.adapter[pao->index].adapter_type) {
+ {
+ retval = HPI_DUPLICATE_ADAPTER_NUMBER;
+ goto unlock;
+ }
+ }
+ adapters.adapter[pao->index] = *pao;
+ hpios_dsplock_init(&adapters.adapter[pao->index]);
+ adapters.gw_num_adapters++;
+
+unlock:
+ hpios_alistlock_un_lock(&adapters);
+ return retval;
+}
+
+void hpi_delete_adapter(struct hpi_adapter_obj *pao)
+{
+ memset(pao, 0, sizeof(struct hpi_adapter_obj));
+
+ hpios_alistlock_lock(&adapters);
+ adapters.gw_num_adapters--; /* dec the number of adapters */
+ hpios_alistlock_un_lock(&adapters);
+}
+
+/**
+* FindAdapter returns a pointer to the struct hpi_adapter_obj with
+* index wAdapterIndex in an HPI_ADAPTERS_LIST structure.
+*
+*/
+struct hpi_adapter_obj *hpi_find_adapter(u16 adapter_index)
+{
+ struct hpi_adapter_obj *pao = NULL;
+
+ if (adapter_index >= HPI_MAX_ADAPTERS) {
+ HPI_DEBUG_LOG(VERBOSE, "find_adapter invalid index %d ",
+ adapter_index);
+ return NULL;
+ }
+
+ pao = &adapters.adapter[adapter_index];
+ if (pao->adapter_type != 0) {
+ /*
+ HPI_DEBUG_LOG(VERBOSE, "Found adapter index %d\n",
+ wAdapterIndex);
+ */
+ return pao;
+ } else {
+ /*
+ HPI_DEBUG_LOG(VERBOSE, "No adapter index %d\n",
+ wAdapterIndex);
+ */
+ return NULL;
+ }
+}
+
+/**
+*
+* wipe an HPI_ADAPTERS_LIST structure.
+*
+**/
+static void wipe_adapter_list(void
+ )
+{
+ memset(&adapters, 0, sizeof(adapters));
+}
+
+/**
+* SubSysGetAdapters fills awAdapterList in an struct hpi_response structure
+* with all adapters in the given HPI_ADAPTERS_LIST.
+*
+*/
+static void subsys_get_adapters(struct hpi_response *phr)
+{
+ /* fill in the response adapter array with the position */
+ /* identified by the adapter number/index of the adapters in */
+ /* this HPI */
+ /* i.e. if we have an A120 with it's jumper set to */
+ /* Adapter Number 2 then put an Adapter type A120 in the */
+ /* array in position 1 */
+ /* NOTE: AdapterNumber is 1..N, Index is 0..N-1 */
+
+ /* input: NONE */
+ /* output: wNumAdapters */
+ /* awAdapter[] */
+ /* */
+
+ short i;
+ struct hpi_adapter_obj *pao = NULL;
+
+ HPI_DEBUG_LOG(VERBOSE, "subsys_get_adapters\n");
+
+ /* for each adapter, place it's type in the position of the array */
+ /* corresponding to it's adapter number */
+ for (i = 0; i < adapters.gw_num_adapters; i++) {
+ pao = &adapters.adapter[i];
+ if (phr->u.s.aw_adapter_list[pao->index] != 0) {
+ phr->error = HPI_DUPLICATE_ADAPTER_NUMBER;
+ phr->specific_error = pao->index;
+ return;
+ }
+ phr->u.s.aw_adapter_list[pao->index] = pao->adapter_type;
+ }
+
+ phr->u.s.num_adapters = adapters.gw_num_adapters;
+ phr->error = 0; /* the function completed OK; */
+}
+
+static unsigned int control_cache_alloc_check(struct hpi_control_cache *pC)
+{
+ unsigned int i;
+ int cached = 0;
+ if (!pC)
+ return 0;
+ if ((!pC->init) && (pC->p_cache != NULL) && (pC->control_count)
+ && (pC->cache_size_in_bytes)
+ ) {
+ u32 *p_master_cache;
+ pC->init = 1;
+
+ p_master_cache = (u32 *)pC->p_cache;
+ HPI_DEBUG_LOG(VERBOSE, "check %d controls\n",
+ pC->control_count);
+ for (i = 0; i < pC->control_count; i++) {
+ struct hpi_control_cache_info *info =
+ (struct hpi_control_cache_info *)
+ p_master_cache;
+
+ if (info->control_type) {
+ pC->p_info[i] = info;
+ cached++;
+ } else
+ pC->p_info[i] = NULL;
+
+ if (info->size_in32bit_words)
+ p_master_cache += info->size_in32bit_words;
+ else
+ p_master_cache +=
+ sizeof(struct
+ hpi_control_cache_single) /
+ sizeof(u32);
+
+ HPI_DEBUG_LOG(VERBOSE,
+ "cached %d, pinfo %p index %d type %d\n",
+ cached, pC->p_info[i], info->control_index,
+ info->control_type);
+ }
+ /*
+ We didn't find anything to cache, so try again later !
+ */
+ if (!cached)
+ pC->init = 0;
+ }
+ return pC->init;
+}
+
+/** Find a control.
+*/
+static short find_control(struct hpi_message *phm,
+ struct hpi_control_cache *p_cache, struct hpi_control_cache_info **pI,
+ u16 *pw_control_index)
+{
+ *pw_control_index = phm->obj_index;
+
+ if (!control_cache_alloc_check(p_cache)) {
+ HPI_DEBUG_LOG(VERBOSE,
+ "control_cache_alloc_check() failed. adap%d ci%d\n",
+ phm->adapter_index, *pw_control_index);
+ return 0;
+ }
+
+ *pI = p_cache->p_info[*pw_control_index];
+ if (!*pI) {
+ HPI_DEBUG_LOG(VERBOSE, "uncached adap %d, control %d\n",
+ phm->adapter_index, *pw_control_index);
+ return 0;
+ } else {
+ HPI_DEBUG_LOG(VERBOSE, "find_control() type %d\n",
+ (*pI)->control_type);
+ }
+ return 1;
+}
+
+/** Used by the kernel driver to figure out if a buffer needs mapping.
+ */
+short hpi_check_buffer_mapping(struct hpi_control_cache *p_cache,
+ struct hpi_message *phm, void **p, unsigned int *pN)
+{
+ *pN = 0;
+ *p = NULL;
+ if ((phm->function == HPI_CONTROL_GET_STATE)
+ && (phm->object == HPI_OBJ_CONTROLEX)
+ ) {
+ u16 control_index;
+ struct hpi_control_cache_info *pI;
+
+ if (!find_control(phm, p_cache, &pI, &control_index))
+ return 0;
+ }
+ return 0;
+}
+
+/* allow unified treatment of several string fields within struct */
+#define HPICMN_PAD_OFS_AND_SIZE(m) {\
+ offsetof(struct hpi_control_cache_pad, m), \
+ sizeof(((struct hpi_control_cache_pad *)(NULL))->m) }
+
+struct pad_ofs_size {
+ unsigned int offset;
+ unsigned int field_size;
+};
+
+static struct pad_ofs_size pad_desc[] = {
+ HPICMN_PAD_OFS_AND_SIZE(c_channel), /* HPI_PAD_CHANNEL_NAME */
+ HPICMN_PAD_OFS_AND_SIZE(c_artist), /* HPI_PAD_ARTIST */
+ HPICMN_PAD_OFS_AND_SIZE(c_title), /* HPI_PAD_TITLE */
+ HPICMN_PAD_OFS_AND_SIZE(c_comment), /* HPI_PAD_COMMENT */
+};
+
+/** CheckControlCache checks the cache and fills the struct hpi_response
+ * accordingly. It returns one if a cache hit occurred, zero otherwise.
+ */
+short hpi_check_control_cache(struct hpi_control_cache *p_cache,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ short found = 1;
+ u16 control_index;
+ struct hpi_control_cache_info *pI;
+ struct hpi_control_cache_single *pC;
+ struct hpi_control_cache_pad *p_pad;
+
+ if (!find_control(phm, p_cache, &pI, &control_index))
+ return 0;
+
+ phr->error = 0;
+
+ /* pC is the default cached control strucure. May be cast to
+ something else in the following switch statement.
+ */
+ pC = (struct hpi_control_cache_single *)pI;
+ p_pad = (struct hpi_control_cache_pad *)pI;
+
+ switch (pI->control_type) {
+
+ case HPI_CONTROL_METER:
+ if (phm->u.c.attribute == HPI_METER_PEAK) {
+ phr->u.c.an_log_value[0] = pC->u.p.an_log_peak[0];
+ phr->u.c.an_log_value[1] = pC->u.p.an_log_peak[1];
+ } else if (phm->u.c.attribute == HPI_METER_RMS) {
+ phr->u.c.an_log_value[0] = pC->u.p.an_logRMS[0];
+ phr->u.c.an_log_value[1] = pC->u.p.an_logRMS[1];
+ } else
+ found = 0;
+ break;
+ case HPI_CONTROL_VOLUME:
+ if (phm->u.c.attribute == HPI_VOLUME_GAIN) {
+ phr->u.c.an_log_value[0] = pC->u.v.an_log[0];
+ phr->u.c.an_log_value[1] = pC->u.v.an_log[1];
+ } else
+ found = 0;
+ break;
+ case HPI_CONTROL_MULTIPLEXER:
+ if (phm->u.c.attribute == HPI_MULTIPLEXER_SOURCE) {
+ phr->u.c.param1 = pC->u.x.source_node_type;
+ phr->u.c.param2 = pC->u.x.source_node_index;
+ } else {
+ found = 0;
+ }
+ break;
+ case HPI_CONTROL_CHANNEL_MODE:
+ if (phm->u.c.attribute == HPI_CHANNEL_MODE_MODE)
+ phr->u.c.param1 = pC->u.m.mode;
+ else
+ found = 0;
+ break;
+ case HPI_CONTROL_LEVEL:
+ if (phm->u.c.attribute == HPI_LEVEL_GAIN) {
+ phr->u.c.an_log_value[0] = pC->u.l.an_log[0];
+ phr->u.c.an_log_value[1] = pC->u.l.an_log[1];
+ } else
+ found = 0;
+ break;
+ case HPI_CONTROL_TUNER:
+ {
+ struct hpi_control_cache_single *pCT =
+ (struct hpi_control_cache_single *)pI;
+ if (phm->u.c.attribute == HPI_TUNER_FREQ)
+ phr->u.c.param1 = pCT->u.t.freq_ink_hz;
+ else if (phm->u.c.attribute == HPI_TUNER_BAND)
+ phr->u.c.param1 = pCT->u.t.band;
+ else if ((phm->u.c.attribute == HPI_TUNER_LEVEL)
+ && (phm->u.c.param1 ==
+ HPI_TUNER_LEVEL_AVERAGE))
+ phr->u.c.param1 = pCT->u.t.level;
+ else
+ found = 0;
+ }
+ break;
+ case HPI_CONTROL_AESEBU_RECEIVER:
+ if (phm->u.c.attribute == HPI_AESEBURX_ERRORSTATUS)
+ phr->u.c.param1 = pC->u.aes3rx.error_status;
+ else if (phm->u.c.attribute == HPI_AESEBURX_FORMAT)
+ phr->u.c.param1 = pC->u.aes3rx.source;
+ else
+ found = 0;
+ break;
+ case HPI_CONTROL_AESEBU_TRANSMITTER:
+ if (phm->u.c.attribute == HPI_AESEBUTX_FORMAT)
+ phr->u.c.param1 = pC->u.aes3tx.format;
+ else
+ found = 0;
+ break;
+ case HPI_CONTROL_TONEDETECTOR:
+ if (phm->u.c.attribute == HPI_TONEDETECTOR_STATE)
+ phr->u.c.param1 = pC->u.tone.state;
+ else
+ found = 0;
+ break;
+ case HPI_CONTROL_SILENCEDETECTOR:
+ if (phm->u.c.attribute == HPI_SILENCEDETECTOR_STATE) {
+ phr->u.c.param1 = pC->u.silence.state;
+ phr->u.c.param2 = pC->u.silence.count;
+ } else
+ found = 0;
+ break;
+ case HPI_CONTROL_MICROPHONE:
+ if (phm->u.c.attribute == HPI_MICROPHONE_PHANTOM_POWER)
+ phr->u.c.param1 = pC->u.phantom_power.state;
+ else
+ found = 0;
+ break;
+ case HPI_CONTROL_SAMPLECLOCK:
+ if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE)
+ phr->u.c.param1 = pC->u.clk.source;
+ else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE_INDEX) {
+ if (pC->u.clk.source_index ==
+ HPI_ERROR_ILLEGAL_CACHE_VALUE) {
+ phr->u.c.param1 = 0;
+ phr->error = HPI_ERROR_INVALID_OPERATION;
+ } else
+ phr->u.c.param1 = pC->u.clk.source_index;
+ } else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SAMPLERATE)
+ phr->u.c.param1 = pC->u.clk.sample_rate;
+ else
+ found = 0;
+ break;
+ case HPI_CONTROL_PAD:
+
+ if (!(p_pad->field_valid_flags & (1 <<
+ HPI_CTL_ATTR_INDEX(phm->u.c.
+ attribute)))) {
+ phr->error = HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
+ break;
+ }
+
+ if (phm->u.c.attribute == HPI_PAD_PROGRAM_ID)
+ phr->u.c.param1 = p_pad->pI;
+ else if (phm->u.c.attribute == HPI_PAD_PROGRAM_TYPE)
+ phr->u.c.param1 = p_pad->pTY;
+ else {
+ unsigned int index =
+ HPI_CTL_ATTR_INDEX(phm->u.c.attribute) - 1;
+ unsigned int offset = phm->u.c.param1;
+ unsigned int pad_string_len, field_size;
+ char *pad_string;
+ unsigned int tocopy;
+
+ HPI_DEBUG_LOG(VERBOSE, "PADS HPI_PADS_ %d\n",
+ phm->u.c.attribute);
+
+ if (index > ARRAY_SIZE(pad_desc) - 1) {
+ phr->error =
+ HPI_ERROR_INVALID_CONTROL_ATTRIBUTE;
+ break;
+ }
+
+ pad_string = ((char *)p_pad) + pad_desc[index].offset;
+ field_size = pad_desc[index].field_size;
+ /* Ensure null terminator */
+ pad_string[field_size - 1] = 0;
+
+ pad_string_len = strlen(pad_string) + 1;
+
+ if (offset > pad_string_len) {
+ phr->error = HPI_ERROR_INVALID_CONTROL_VALUE;
+ break;
+ }
+
+ tocopy = pad_string_len - offset;
+ if (tocopy > sizeof(phr->u.cu.chars8.sz_data))
+ tocopy = sizeof(phr->u.cu.chars8.sz_data);
+
+ HPI_DEBUG_LOG(VERBOSE,
+ "PADS memcpy(%d), offset %d \n", tocopy,
+ offset);
+ memcpy(phr->u.cu.chars8.sz_data, &pad_string[offset],
+ tocopy);
+
+ phr->u.cu.chars8.remaining_chars =
+ pad_string_len - offset - tocopy;
+ }
+ break;
+ default:
+ found = 0;
+ break;
+ }
+
+ if (found)
+ HPI_DEBUG_LOG(VERBOSE,
+ "cached adap %d, ctl %d, type %d, attr %d\n",
+ phm->adapter_index, pI->control_index,
+ pI->control_type, phm->u.c.attribute);
+ else
+ HPI_DEBUG_LOG(VERBOSE,
+ "uncached adap %d, ctl %d, ctl type %d\n",
+ phm->adapter_index, pI->control_index,
+ pI->control_type);
+
+ if (found)
+ phr->size =
+ sizeof(struct hpi_response_header) +
+ sizeof(struct hpi_control_res);
+
+ return found;
+}
+
+/** Updates the cache with Set values.
+
+Only update if no error.
+Volume and Level return the limited values in the response, so use these
+Multiplexer does so use sent values
+*/
+void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ u16 control_index;
+ struct hpi_control_cache_single *pC;
+ struct hpi_control_cache_info *pI;
+
+ if (!find_control(phm, p_cache, &pI, &control_index))
+ return;
+
+ /* pC is the default cached control strucure.
+ May be cast to something else in the following switch statement.
+ */
+ pC = (struct hpi_control_cache_single *)pI;
+
+ switch (pI->control_type) {
+ case HPI_CONTROL_VOLUME:
+ if (phm->u.c.attribute == HPI_VOLUME_GAIN) {
+ pC->u.v.an_log[0] = phr->u.c.an_log_value[0];
+ pC->u.v.an_log[1] = phr->u.c.an_log_value[1];
+ }
+ break;
+ case HPI_CONTROL_MULTIPLEXER:
+ /* mux does not return its setting on Set command. */
+ if (phr->error)
+ return;
+ if (phm->u.c.attribute == HPI_MULTIPLEXER_SOURCE) {
+ pC->u.x.source_node_type = (u16)phm->u.c.param1;
+ pC->u.x.source_node_index = (u16)phm->u.c.param2;
+ }
+ break;
+ case HPI_CONTROL_CHANNEL_MODE:
+ /* mode does not return its setting on Set command. */
+ if (phr->error)
+ return;
+ if (phm->u.c.attribute == HPI_CHANNEL_MODE_MODE)
+ pC->u.m.mode = (u16)phm->u.c.param1;
+ break;
+ case HPI_CONTROL_LEVEL:
+ if (phm->u.c.attribute == HPI_LEVEL_GAIN) {
+ pC->u.v.an_log[0] = phr->u.c.an_log_value[0];
+ pC->u.v.an_log[1] = phr->u.c.an_log_value[1];
+ }
+ break;
+ case HPI_CONTROL_MICROPHONE:
+ if (phm->u.c.attribute == HPI_MICROPHONE_PHANTOM_POWER)
+ pC->u.phantom_power.state = (u16)phm->u.c.param1;
+ break;
+ case HPI_CONTROL_AESEBU_TRANSMITTER:
+ if (phr->error)
+ return;
+ if (phm->u.c.attribute == HPI_AESEBUTX_FORMAT)
+ pC->u.aes3tx.format = phm->u.c.param1;
+ break;
+ case HPI_CONTROL_AESEBU_RECEIVER:
+ if (phr->error)
+ return;
+ if (phm->u.c.attribute == HPI_AESEBURX_FORMAT)
+ pC->u.aes3rx.source = phm->u.c.param1;
+ break;
+ case HPI_CONTROL_SAMPLECLOCK:
+ if (phr->error)
+ return;
+ if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE)
+ pC->u.clk.source = (u16)phm->u.c.param1;
+ else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE_INDEX)
+ pC->u.clk.source_index = (u16)phm->u.c.param1;
+ else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SAMPLERATE)
+ pC->u.clk.sample_rate = phm->u.c.param1;
+ break;
+ default:
+ break;
+ }
+}
+
+struct hpi_control_cache *hpi_alloc_control_cache(const u32
+ number_of_controls, const u32 size_in_bytes,
+ struct hpi_control_cache_info *pDSP_control_buffer)
+{
+ struct hpi_control_cache *p_cache =
+ kmalloc(sizeof(*p_cache), GFP_KERNEL);
+ p_cache->cache_size_in_bytes = size_in_bytes;
+ p_cache->control_count = number_of_controls;
+ p_cache->p_cache =
+ (struct hpi_control_cache_single *)pDSP_control_buffer;
+ p_cache->init = 0;
+ p_cache->p_info =
+ kmalloc(sizeof(*p_cache->p_info) * p_cache->control_count,
+ GFP_KERNEL);
+ return p_cache;
+}
+
+void hpi_free_control_cache(struct hpi_control_cache *p_cache)
+{
+ if ((p_cache->init) && (p_cache->p_info)) {
+ kfree(p_cache->p_info);
+ p_cache->p_info = NULL;
+ p_cache->init = 0;
+ kfree(p_cache);
+ }
+}
+
+static void subsys_message(struct hpi_message *phm, struct hpi_response *phr)
+{
+
+ switch (phm->function) {
+ case HPI_SUBSYS_OPEN:
+ case HPI_SUBSYS_CLOSE:
+ case HPI_SUBSYS_DRIVER_UNLOAD:
+ phr->error = 0;
+ break;
+ case HPI_SUBSYS_DRIVER_LOAD:
+ wipe_adapter_list();
+ hpios_alistlock_init(&adapters);
+ phr->error = 0;
+ break;
+ case HPI_SUBSYS_GET_INFO:
+ subsys_get_adapters(phr);
+ break;
+ case HPI_SUBSYS_CREATE_ADAPTER:
+ case HPI_SUBSYS_DELETE_ADAPTER:
+ phr->error = 0;
+ break;
+ default:
+ phr->error = HPI_ERROR_INVALID_FUNC;
+ break;
+ }
+}
+
+void HPI_COMMON(struct hpi_message *phm, struct hpi_response *phr)
+{
+ switch (phm->type) {
+ case HPI_TYPE_MESSAGE:
+ switch (phm->object) {
+ case HPI_OBJ_SUBSYSTEM:
+ subsys_message(phm, phr);
+ break;
+ }
+ break;
+
+ default:
+ phr->error = HPI_ERROR_INVALID_TYPE;
+ break;
+ }
+}
diff --git a/sound/pci/asihpi/hpicmn.h b/sound/pci/asihpi/hpicmn.h
new file mode 100644
index 000000000000..6229022f56cb
--- /dev/null
+++ b/sound/pci/asihpi/hpicmn.h
@@ -0,0 +1,64 @@
+/**
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+*/
+
+struct hpi_adapter_obj {
+ struct hpi_pci pci; /* PCI info - bus#,dev#,address etc */
+ u16 adapter_type; /* ASI6701 etc */
+ u16 index; /* */
+ u16 open; /* =1 when adapter open */
+ u16 mixer_open;
+
+ struct hpios_spinlock dsp_lock;
+
+ u16 dsp_crashed;
+ u16 has_control_cache;
+ void *priv;
+};
+
+struct hpi_control_cache {
+ u32 init; /**< indicates whether the
+ structures are initialized */
+ u32 control_count;
+ u32 cache_size_in_bytes;
+ struct hpi_control_cache_info
+ **p_info; /**< pointer to allocated memory of
+ lookup pointers. */
+ struct hpi_control_cache_single
+ *p_cache; /**< pointer to DSP's control cache. */
+};
+
+struct hpi_adapter_obj *hpi_find_adapter(u16 adapter_index);
+u16 hpi_add_adapter(struct hpi_adapter_obj *pao);
+
+void hpi_delete_adapter(struct hpi_adapter_obj *pao);
+
+short hpi_check_control_cache(struct hpi_control_cache *pC,
+ struct hpi_message *phm, struct hpi_response *phr);
+struct hpi_control_cache *hpi_alloc_control_cache(const u32
+ number_of_controls, const u32 size_in_bytes,
+ struct hpi_control_cache_info
+ *pDSP_control_buffer);
+void hpi_free_control_cache(struct hpi_control_cache *p_cache);
+
+void hpi_sync_control_cache(struct hpi_control_cache *pC,
+ struct hpi_message *phm, struct hpi_response *phr);
+u16 hpi_validate_response(struct hpi_message *phm, struct hpi_response *phr);
+short hpi_check_buffer_mapping(struct hpi_control_cache *p_cache,
+ struct hpi_message *phm, void **p, unsigned int *pN);
diff --git a/sound/pci/asihpi/hpidebug.c b/sound/pci/asihpi/hpidebug.c
new file mode 100644
index 000000000000..4cd85a401b34
--- /dev/null
+++ b/sound/pci/asihpi/hpidebug.c
@@ -0,0 +1,225 @@
+/************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Debug macro translation.
+
+************************************************************************/
+
+#include "hpi_internal.h"
+#include "hpidebug.h"
+
+/* Debug level; 0 quiet; 1 informative, 2 debug, 3 verbose debug. */
+int hpi_debug_level = HPI_DEBUG_LEVEL_DEFAULT;
+
+void hpi_debug_init(void)
+{
+ printk(KERN_INFO "debug start\n");
+}
+
+int hpi_debug_level_set(int level)
+{
+ int old_level;
+
+ old_level = hpi_debug_level;
+ hpi_debug_level = level;
+ return old_level;
+}
+
+int hpi_debug_level_get(void)
+{
+ return hpi_debug_level;
+}
+
+#ifdef HPIOS_DEBUG_PRINT
+/* implies OS has no printf-like function */
+#include <stdarg.h>
+
+void hpi_debug_printf(char *fmt, ...)
+{
+ va_list arglist;
+ char buffer[128];
+
+ va_start(arglist, fmt);
+
+ if (buffer[0])
+ HPIOS_DEBUG_PRINT(buffer);
+ va_end(arglist);
+}
+#endif
+
+struct treenode {
+ void *array;
+ unsigned int num_elements;
+};
+
+#define make_treenode_from_array(nodename, array) \
+static void *tmp_strarray_##nodename[] = array; \
+static struct treenode nodename = { \
+ &tmp_strarray_##nodename, \
+ ARRAY_SIZE(tmp_strarray_##nodename) \
+};
+
+#define get_treenode_elem(node_ptr, idx, type) \
+ (&(*((type *)(node_ptr)->array)[idx]))
+
+make_treenode_from_array(hpi_control_type_strings, HPI_CONTROL_TYPE_STRINGS)
+
+ make_treenode_from_array(hpi_subsys_strings, HPI_SUBSYS_STRINGS)
+ make_treenode_from_array(hpi_adapter_strings, HPI_ADAPTER_STRINGS)
+ make_treenode_from_array(hpi_istream_strings, HPI_ISTREAM_STRINGS)
+ make_treenode_from_array(hpi_ostream_strings, HPI_OSTREAM_STRINGS)
+ make_treenode_from_array(hpi_mixer_strings, HPI_MIXER_STRINGS)
+ make_treenode_from_array(hpi_node_strings,
+ {
+ "NODE is invalid object"})
+
+ make_treenode_from_array(hpi_control_strings, HPI_CONTROL_STRINGS)
+ make_treenode_from_array(hpi_nvmemory_strings, HPI_OBJ_STRINGS)
+ make_treenode_from_array(hpi_digitalio_strings, HPI_DIGITALIO_STRINGS)
+ make_treenode_from_array(hpi_watchdog_strings, HPI_WATCHDOG_STRINGS)
+ make_treenode_from_array(hpi_clock_strings, HPI_CLOCK_STRINGS)
+ make_treenode_from_array(hpi_profile_strings, HPI_PROFILE_STRINGS)
+ make_treenode_from_array(hpi_asyncevent_strings, HPI_ASYNCEVENT_STRINGS)
+#define HPI_FUNCTION_STRINGS \
+{ \
+ &hpi_subsys_strings,\
+ &hpi_adapter_strings,\
+ &hpi_ostream_strings,\
+ &hpi_istream_strings,\
+ &hpi_mixer_strings,\
+ &hpi_node_strings,\
+ &hpi_control_strings,\
+ &hpi_nvmemory_strings,\
+ &hpi_digitalio_strings,\
+ &hpi_watchdog_strings,\
+ &hpi_clock_strings,\
+ &hpi_profile_strings,\
+ &hpi_control_strings, \
+ &hpi_asyncevent_strings \
+};
+ make_treenode_from_array(hpi_function_strings, HPI_FUNCTION_STRINGS)
+
+ compile_time_assert(HPI_OBJ_MAXINDEX == 14, obj_list_doesnt_match);
+
+static char *hpi_function_string(unsigned int function)
+{
+ unsigned int object;
+ struct treenode *tmp;
+
+ object = function / HPI_OBJ_FUNCTION_SPACING;
+ function = function - object * HPI_OBJ_FUNCTION_SPACING;
+
+ if (object == 0 || object == HPI_OBJ_NODE
+ || object > hpi_function_strings.num_elements)
+ return "invalid object";
+
+ tmp = get_treenode_elem(&hpi_function_strings, object - 1,
+ struct treenode *);
+
+ if (function == 0 || function > tmp->num_elements)
+ return "invalid function";
+
+ return get_treenode_elem(tmp, function - 1, char *);
+}
+
+void hpi_debug_message(struct hpi_message *phm, char *sz_fileline)
+{
+ if (phm) {
+ if ((phm->object <= HPI_OBJ_MAXINDEX) && phm->object) {
+ u16 index = 0;
+ u16 attrib = 0;
+ int is_control = 0;
+
+ index = phm->obj_index;
+ switch (phm->object) {
+ case HPI_OBJ_ADAPTER:
+ case HPI_OBJ_PROFILE:
+ break;
+ case HPI_OBJ_MIXER:
+ if (phm->function ==
+ HPI_MIXER_GET_CONTROL_BY_INDEX)
+ index = phm->u.m.control_index;
+ break;
+ case HPI_OBJ_OSTREAM:
+ case HPI_OBJ_ISTREAM:
+ break;
+
+ case HPI_OBJ_CONTROLEX:
+ case HPI_OBJ_CONTROL:
+ if (phm->version == 1)
+ attrib = HPI_CTL_ATTR(UNIVERSAL, 1);
+ else
+ attrib = phm->u.c.attribute;
+ is_control = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (is_control && (attrib & 0xFF00)) {
+ int control_type = (attrib & 0xFF00) >> 8;
+ int attr_index = HPI_CTL_ATTR_INDEX(attrib);
+ /* note the KERN facility level
+ is in szFileline already */
+ printk("%s adapter %d %s "
+ "ctrl_index x%04x %s %d\n",
+ sz_fileline, phm->adapter_index,
+ hpi_function_string(phm->function),
+ index,
+ get_treenode_elem
+ (&hpi_control_type_strings,
+ control_type, char *),
+ attr_index);
+
+ } else
+ printk("%s adapter %d %s "
+ "idx x%04x attr x%04x \n",
+ sz_fileline, phm->adapter_index,
+ hpi_function_string(phm->function),
+ index, attrib);
+ } else {
+ printk("adap=%d, invalid obj=%d, func=0x%x\n",
+ phm->adapter_index, phm->object,
+ phm->function);
+ }
+ } else
+ printk(KERN_ERR
+ "NULL message pointer to hpi_debug_message!\n");
+}
+
+void hpi_debug_data(u16 *pdata, u32 len)
+{
+ u32 i;
+ int j;
+ int k;
+ int lines;
+ int cols = 8;
+
+ lines = (len + cols - 1) / cols;
+ if (lines > 8)
+ lines = 8;
+
+ for (i = 0, j = 0; j < lines; j++) {
+ printk(KERN_DEBUG "%p:", (pdata + i));
+
+ for (k = 0; k < cols && i < len; i++, k++)
+ printk("%s%04x", k == 0 ? "" : " ", pdata[i]);
+
+ printk("\n");
+ }
+}
diff --git a/sound/pci/asihpi/hpidebug.h b/sound/pci/asihpi/hpidebug.h
new file mode 100644
index 000000000000..44dccadcc25b
--- /dev/null
+++ b/sound/pci/asihpi/hpidebug.h
@@ -0,0 +1,385 @@
+/*****************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Debug macros.
+
+*****************************************************************************/
+
+#ifndef _HPIDEBUG_H
+#define _HPIDEBUG_H
+
+#include "hpi_internal.h"
+
+/* Define debugging levels. */
+enum { HPI_DEBUG_LEVEL_ERROR = 0, /* always log errors */
+ HPI_DEBUG_LEVEL_WARNING = 1,
+ HPI_DEBUG_LEVEL_NOTICE = 2,
+ HPI_DEBUG_LEVEL_INFO = 3,
+ HPI_DEBUG_LEVEL_DEBUG = 4,
+ HPI_DEBUG_LEVEL_VERBOSE = 5 /* same printk level as DEBUG */
+};
+
+#define HPI_DEBUG_LEVEL_DEFAULT HPI_DEBUG_LEVEL_NOTICE
+
+/* an OS can define an extra flag string that is appended to
+ the start of each message, eg see hpios_linux.h */
+
+#ifdef SOURCEFILE_NAME
+#define FILE_LINE SOURCEFILE_NAME ":" __stringify(__LINE__) " "
+#else
+#define FILE_LINE __FILE__ ":" __stringify(__LINE__) " "
+#endif
+
+#if defined(HPI_DEBUG) && defined(_WINDOWS)
+#define HPI_DEBUGBREAK() debug_break()
+#else
+#define HPI_DEBUGBREAK()
+#endif
+
+#define HPI_DEBUG_ASSERT(expression) \
+ do { \
+ if (!(expression)) {\
+ printk(KERN_ERR FILE_LINE\
+ "ASSERT " __stringify(expression));\
+ HPI_DEBUGBREAK();\
+ } \
+ } while (0)
+
+#define HPI_DEBUG_LOG(level, ...) \
+ do { \
+ if (hpi_debug_level >= HPI_DEBUG_LEVEL_##level) { \
+ printk(HPI_DEBUG_FLAG_##level \
+ FILE_LINE __VA_ARGS__); \
+ } \
+ } while (0)
+
+void hpi_debug_init(void);
+int hpi_debug_level_set(int level);
+int hpi_debug_level_get(void);
+/* needed by Linux driver for dynamic debug level changes */
+extern int hpi_debug_level;
+
+void hpi_debug_message(struct hpi_message *phm, char *sz_fileline);
+
+void hpi_debug_data(u16 *pdata, u32 len);
+
+#define HPI_DEBUG_DATA(pdata, len) \
+ do { \
+ if (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE) \
+ hpi_debug_data(pdata, len); \
+ } while (0)
+
+#define HPI_DEBUG_MESSAGE(level, phm) \
+ do { \
+ if (hpi_debug_level >= HPI_DEBUG_LEVEL_##level) { \
+ hpi_debug_message(phm,HPI_DEBUG_FLAG_##level \
+ FILE_LINE __stringify(level));\
+ } \
+ } while (0)
+
+#define HPI_DEBUG_RESPONSE(phr) \
+ do { \
+ if ((hpi_debug_level >= HPI_DEBUG_LEVEL_DEBUG) && (phr->error))\
+ HPI_DEBUG_LOG(ERROR, \
+ "HPI response - error# %d\n", \
+ phr->error); \
+ else if (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE) \
+ HPI_DEBUG_LOG(VERBOSE, "HPI response OK\n");\
+ } while (0)
+
+#ifndef compile_time_assert
+#define compile_time_assert(cond, msg) \
+ typedef char msg[(cond) ? 1 : -1]
+#endif
+
+ /* check that size is exactly some number */
+#define function_count_check(sym, size) \
+ compile_time_assert((sym##_FUNCTION_COUNT) == (size),\
+ strings_match_defs_##sym)
+
+/* These strings should be generated using a macro which defines
+ the corresponding symbol values. */
+#define HPI_OBJ_STRINGS \
+{ \
+ "HPI_OBJ_SUBSYSTEM", \
+ "HPI_OBJ_ADAPTER", \
+ "HPI_OBJ_OSTREAM", \
+ "HPI_OBJ_ISTREAM", \
+ "HPI_OBJ_MIXER", \
+ "HPI_OBJ_NODE", \
+ "HPI_OBJ_CONTROL", \
+ "HPI_OBJ_NVMEMORY", \
+ "HPI_OBJ_DIGITALIO", \
+ "HPI_OBJ_WATCHDOG", \
+ "HPI_OBJ_CLOCK", \
+ "HPI_OBJ_PROFILE", \
+ "HPI_OBJ_CONTROLEX" \
+}
+
+#define HPI_SUBSYS_STRINGS \
+{ \
+ "HPI_SUBSYS_OPEN", \
+ "HPI_SUBSYS_GET_VERSION", \
+ "HPI_SUBSYS_GET_INFO", \
+ "HPI_SUBSYS_FIND_ADAPTERS", \
+ "HPI_SUBSYS_CREATE_ADAPTER",\
+ "HPI_SUBSYS_CLOSE", \
+ "HPI_SUBSYS_DELETE_ADAPTER", \
+ "HPI_SUBSYS_DRIVER_LOAD", \
+ "HPI_SUBSYS_DRIVER_UNLOAD", \
+ "HPI_SUBSYS_READ_PORT_8", \
+ "HPI_SUBSYS_WRITE_PORT_8", \
+ "HPI_SUBSYS_GET_NUM_ADAPTERS",\
+ "HPI_SUBSYS_GET_ADAPTER", \
+ "HPI_SUBSYS_SET_NETWORK_INTERFACE"\
+}
+function_count_check(HPI_SUBSYS, 14);
+
+#define HPI_ADAPTER_STRINGS \
+{ \
+ "HPI_ADAPTER_OPEN", \
+ "HPI_ADAPTER_CLOSE", \
+ "HPI_ADAPTER_GET_INFO", \
+ "HPI_ADAPTER_GET_ASSERT", \
+ "HPI_ADAPTER_TEST_ASSERT", \
+ "HPI_ADAPTER_SET_MODE", \
+ "HPI_ADAPTER_GET_MODE", \
+ "HPI_ADAPTER_ENABLE_CAPABILITY",\
+ "HPI_ADAPTER_SELFTEST", \
+ "HPI_ADAPTER_FIND_OBJECT", \
+ "HPI_ADAPTER_QUERY_FLASH", \
+ "HPI_ADAPTER_START_FLASH", \
+ "HPI_ADAPTER_PROGRAM_FLASH", \
+ "HPI_ADAPTER_SET_PROPERTY", \
+ "HPI_ADAPTER_GET_PROPERTY", \
+ "HPI_ADAPTER_ENUM_PROPERTY", \
+ "HPI_ADAPTER_MODULE_INFO", \
+ "HPI_ADAPTER_DEBUG_READ" \
+}
+
+function_count_check(HPI_ADAPTER, 18);
+
+#define HPI_OSTREAM_STRINGS \
+{ \
+ "HPI_OSTREAM_OPEN", \
+ "HPI_OSTREAM_CLOSE", \
+ "HPI_OSTREAM_WRITE", \
+ "HPI_OSTREAM_START", \
+ "HPI_OSTREAM_STOP", \
+ "HPI_OSTREAM_RESET", \
+ "HPI_OSTREAM_GET_INFO", \
+ "HPI_OSTREAM_QUERY_FORMAT", \
+ "HPI_OSTREAM_DATA", \
+ "HPI_OSTREAM_SET_VELOCITY", \
+ "HPI_OSTREAM_SET_PUNCHINOUT", \
+ "HPI_OSTREAM_SINEGEN", \
+ "HPI_OSTREAM_ANC_RESET", \
+ "HPI_OSTREAM_ANC_GET_INFO", \
+ "HPI_OSTREAM_ANC_READ", \
+ "HPI_OSTREAM_SET_TIMESCALE",\
+ "HPI_OSTREAM_SET_FORMAT", \
+ "HPI_OSTREAM_HOSTBUFFER_ALLOC", \
+ "HPI_OSTREAM_HOSTBUFFER_FREE", \
+ "HPI_OSTREAM_GROUP_ADD",\
+ "HPI_OSTREAM_GROUP_GETMAP", \
+ "HPI_OSTREAM_GROUP_RESET", \
+ "HPI_OSTREAM_HOSTBUFFER_GET_INFO", \
+ "HPI_OSTREAM_WAIT_START", \
+}
+function_count_check(HPI_OSTREAM, 24);
+
+#define HPI_ISTREAM_STRINGS \
+{ \
+ "HPI_ISTREAM_OPEN", \
+ "HPI_ISTREAM_CLOSE", \
+ "HPI_ISTREAM_SET_FORMAT", \
+ "HPI_ISTREAM_READ", \
+ "HPI_ISTREAM_START", \
+ "HPI_ISTREAM_STOP", \
+ "HPI_ISTREAM_RESET", \
+ "HPI_ISTREAM_GET_INFO", \
+ "HPI_ISTREAM_QUERY_FORMAT", \
+ "HPI_ISTREAM_ANC_RESET", \
+ "HPI_ISTREAM_ANC_GET_INFO", \
+ "HPI_ISTREAM_ANC_WRITE", \
+ "HPI_ISTREAM_HOSTBUFFER_ALLOC",\
+ "HPI_ISTREAM_HOSTBUFFER_FREE", \
+ "HPI_ISTREAM_GROUP_ADD", \
+ "HPI_ISTREAM_GROUP_GETMAP", \
+ "HPI_ISTREAM_GROUP_RESET", \
+ "HPI_ISTREAM_HOSTBUFFER_GET_INFO", \
+ "HPI_ISTREAM_WAIT_START", \
+}
+function_count_check(HPI_ISTREAM, 19);
+
+#define HPI_MIXER_STRINGS \
+{ \
+ "HPI_MIXER_OPEN", \
+ "HPI_MIXER_CLOSE", \
+ "HPI_MIXER_GET_INFO", \
+ "HPI_MIXER_GET_NODE_INFO", \
+ "HPI_MIXER_GET_CONTROL", \
+ "HPI_MIXER_SET_CONNECTION", \
+ "HPI_MIXER_GET_CONNECTIONS", \
+ "HPI_MIXER_GET_CONTROL_BY_INDEX", \
+ "HPI_MIXER_GET_CONTROL_ARRAY_BY_INDEX", \
+ "HPI_MIXER_GET_CONTROL_MULTIPLE_VALUES", \
+ "HPI_MIXER_STORE", \
+}
+function_count_check(HPI_MIXER, 11);
+
+#define HPI_CONTROL_STRINGS \
+{ \
+ "HPI_CONTROL_GET_INFO", \
+ "HPI_CONTROL_GET_STATE", \
+ "HPI_CONTROL_SET_STATE" \
+}
+function_count_check(HPI_CONTROL, 3);
+
+#define HPI_NVMEMORY_STRINGS \
+{ \
+ "HPI_NVMEMORY_OPEN", \
+ "HPI_NVMEMORY_READ_BYTE", \
+ "HPI_NVMEMORY_WRITE_BYTE" \
+}
+function_count_check(HPI_NVMEMORY, 3);
+
+#define HPI_DIGITALIO_STRINGS \
+{ \
+ "HPI_GPIO_OPEN", \
+ "HPI_GPIO_READ_BIT", \
+ "HPI_GPIO_WRITE_BIT", \
+ "HPI_GPIO_READ_ALL", \
+ "HPI_GPIO_WRITE_STATUS"\
+}
+function_count_check(HPI_GPIO, 5);
+
+#define HPI_WATCHDOG_STRINGS \
+{ \
+ "HPI_WATCHDOG_OPEN", \
+ "HPI_WATCHDOG_SET_TIME", \
+ "HPI_WATCHDOG_PING" \
+}
+
+#define HPI_CLOCK_STRINGS \
+{ \
+ "HPI_CLOCK_OPEN", \
+ "HPI_CLOCK_SET_TIME", \
+ "HPI_CLOCK_GET_TIME" \
+}
+
+#define HPI_PROFILE_STRINGS \
+{ \
+ "HPI_PROFILE_OPEN_ALL", \
+ "HPI_PROFILE_START_ALL", \
+ "HPI_PROFILE_STOP_ALL", \
+ "HPI_PROFILE_GET", \
+ "HPI_PROFILE_GET_IDLECOUNT", \
+ "HPI_PROFILE_GET_NAME", \
+ "HPI_PROFILE_GET_UTILIZATION" \
+}
+function_count_check(HPI_PROFILE, 7);
+
+#define HPI_ASYNCEVENT_STRINGS \
+{ \
+ "HPI_ASYNCEVENT_OPEN",\
+ "HPI_ASYNCEVENT_CLOSE ",\
+ "HPI_ASYNCEVENT_WAIT",\
+ "HPI_ASYNCEVENT_GETCOUNT",\
+ "HPI_ASYNCEVENT_GET",\
+ "HPI_ASYNCEVENT_SENDEVENTS"\
+}
+function_count_check(HPI_ASYNCEVENT, 6);
+
+#define HPI_CONTROL_TYPE_STRINGS \
+{ \
+ "null control", \
+ "HPI_CONTROL_CONNECTION", \
+ "HPI_CONTROL_VOLUME", \
+ "HPI_CONTROL_METER", \
+ "HPI_CONTROL_MUTE", \
+ "HPI_CONTROL_MULTIPLEXER", \
+ "HPI_CONTROL_AESEBU_TRANSMITTER", \
+ "HPI_CONTROL_AESEBU_RECEIVER", \
+ "HPI_CONTROL_LEVEL", \
+ "HPI_CONTROL_TUNER", \
+ "HPI_CONTROL_ONOFFSWITCH", \
+ "HPI_CONTROL_VOX", \
+ "HPI_CONTROL_AES18_TRANSMITTER", \
+ "HPI_CONTROL_AES18_RECEIVER", \
+ "HPI_CONTROL_AES18_BLOCKGENERATOR", \
+ "HPI_CONTROL_CHANNEL_MODE", \
+ "HPI_CONTROL_BITSTREAM", \
+ "HPI_CONTROL_SAMPLECLOCK", \
+ "HPI_CONTROL_MICROPHONE", \
+ "HPI_CONTROL_PARAMETRIC_EQ", \
+ "HPI_CONTROL_COMPANDER", \
+ "HPI_CONTROL_COBRANET", \
+ "HPI_CONTROL_TONE_DETECT", \
+ "HPI_CONTROL_SILENCE_DETECT", \
+ "HPI_CONTROL_PAD", \
+ "HPI_CONTROL_SRC" ,\
+ "HPI_CONTROL_UNIVERSAL" \
+}
+
+compile_time_assert((HPI_CONTROL_LAST_INDEX + 1 == 27),
+ controltype_strings_match_defs);
+
+#define HPI_SOURCENODE_STRINGS \
+{ \
+ "no source", \
+ "HPI_SOURCENODE_OSTREAM", \
+ "HPI_SOURCENODE_LINEIN", \
+ "HPI_SOURCENODE_AESEBU_IN", \
+ "HPI_SOURCENODE_TUNER", \
+ "HPI_SOURCENODE_RF", \
+ "HPI_SOURCENODE_CLOCK_SOURCE", \
+ "HPI_SOURCENODE_RAW_BITSTREAM", \
+ "HPI_SOURCENODE_MICROPHONE", \
+ "HPI_SOURCENODE_COBRANET", \
+ "HPI_SOURCENODE_ANALOG", \
+ "HPI_SOURCENODE_ADAPTER" \
+}
+
+compile_time_assert((HPI_SOURCENODE_LAST_INDEX - HPI_SOURCENODE_BASE + 1) ==
+ (12), sourcenode_strings_match_defs);
+
+#define HPI_DESTNODE_STRINGS \
+{ \
+ "no destination", \
+ "HPI_DESTNODE_ISTREAM", \
+ "HPI_DESTNODE_LINEOUT", \
+ "HPI_DESTNODE_AESEBU_OUT", \
+ "HPI_DESTNODE_RF", \
+ "HPI_DESTNODE_SPEAKER", \
+ "HPI_DESTNODE_COBRANET", \
+ "HPI_DESTNODE_ANALOG" \
+}
+compile_time_assert((HPI_DESTNODE_LAST_INDEX - HPI_DESTNODE_BASE + 1) == (8),
+ destnode_strings_match_defs);
+
+#define HPI_CONTROL_CHANNEL_MODE_STRINGS \
+{ \
+ "XXX HPI_CHANNEL_MODE_ERROR XXX", \
+ "HPI_CHANNEL_MODE_NORMAL", \
+ "HPI_CHANNEL_MODE_SWAP", \
+ "HPI_CHANNEL_MODE_LEFT_ONLY", \
+ "HPI_CHANNEL_MODE_RIGHT_ONLY" \
+}
+
+#endif /* _HPIDEBUG_H */
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
new file mode 100644
index 000000000000..9b10d9a5c255
--- /dev/null
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -0,0 +1,172 @@
+/***********************************************************************/
+/*!
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+\file
+Functions for reading DSP code to load into DSP
+
+(Linux only:) If DSPCODE_FIRMWARE_LOADER is defined, code is read using
+hotplug firmware loader from individual dsp code files
+
+If neither of the above is defined, code is read from linked arrays.
+DSPCODE_ARRAY is defined.
+
+HPI_INCLUDE_**** must be defined
+and the appropriate hzz?????.c or hex?????.c linked in
+
+ */
+/***********************************************************************/
+#define SOURCEFILE_NAME "hpidspcd.c"
+#include "hpidspcd.h"
+#include "hpidebug.h"
+
+/**
+ Header structure for binary dsp code file (see asidsp.doc)
+ This structure must match that used in s2bin.c for generation of asidsp.bin
+ */
+
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(push, 1)
+#endif
+
+struct code_header {
+ u32 size;
+ char type[4];
+ u32 adapter;
+ u32 version;
+ u32 crc;
+};
+
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(pop)
+#endif
+
+#define HPI_VER_DECIMAL ((int)(HPI_VER_MAJOR(HPI_VER) * 10000 + \
+ HPI_VER_MINOR(HPI_VER) * 100 + HPI_VER_RELEASE(HPI_VER)))
+
+/***********************************************************************/
+#include "linux/pci.h"
+/*-------------------------------------------------------------------*/
+short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code,
+ u32 *pos_error_code)
+{
+ const struct firmware *ps_firmware = ps_dsp_code->ps_firmware;
+ struct code_header header;
+ char fw_name[20];
+ int err;
+
+ sprintf(fw_name, "asihpi/dsp%04x.bin", adapter);
+ HPI_DEBUG_LOG(INFO, "requesting firmware for %s\n", fw_name);
+
+ err = request_firmware(&ps_firmware, fw_name,
+ &ps_dsp_code->ps_dev->dev);
+ if (err != 0) {
+ HPI_DEBUG_LOG(ERROR, "%d, request_firmware failed for %s\n",
+ err, fw_name);
+ goto error1;
+ }
+ if (ps_firmware->size < sizeof(header)) {
+ HPI_DEBUG_LOG(ERROR, "header size too small %s\n", fw_name);
+ goto error2;
+ }
+ memcpy(&header, ps_firmware->data, sizeof(header));
+ if (header.adapter != adapter) {
+ HPI_DEBUG_LOG(ERROR, "adapter type incorrect %4x != %4x\n",
+ header.adapter, adapter);
+ goto error2;
+ }
+ if (header.size != ps_firmware->size) {
+ HPI_DEBUG_LOG(ERROR, "code size wrong %d != %ld\n",
+ header.size, (unsigned long)ps_firmware->size);
+ goto error2;
+ }
+
+ if (header.version / 10000 != HPI_VER_DECIMAL / 10000) {
+ HPI_DEBUG_LOG(ERROR,
+ "firmware major version mismatch "
+ "DSP image %d != driver %d\n", header.version,
+ HPI_VER_DECIMAL);
+ goto error2;
+ }
+
+ if (header.version != HPI_VER_DECIMAL) {
+ HPI_DEBUG_LOG(WARNING,
+ "version mismatch DSP image %d != driver %d\n",
+ header.version, HPI_VER_DECIMAL);
+ /* goto error2; still allow driver to load */
+ }
+
+ HPI_DEBUG_LOG(INFO, "dsp code %s opened\n", fw_name);
+ ps_dsp_code->ps_firmware = ps_firmware;
+ ps_dsp_code->block_length = header.size / sizeof(u32);
+ ps_dsp_code->word_count = sizeof(header) / sizeof(u32);
+ ps_dsp_code->version = header.version;
+ ps_dsp_code->crc = header.crc;
+ return 0;
+
+error2:
+ release_firmware(ps_firmware);
+error1:
+ ps_dsp_code->ps_firmware = NULL;
+ ps_dsp_code->block_length = 0;
+ return HPI_ERROR_DSP_FILE_NOT_FOUND;
+}
+
+/*-------------------------------------------------------------------*/
+void hpi_dsp_code_close(struct dsp_code *ps_dsp_code)
+{
+ if (ps_dsp_code->ps_firmware != NULL) {
+ HPI_DEBUG_LOG(DEBUG, "dsp code closed\n");
+ release_firmware(ps_dsp_code->ps_firmware);
+ ps_dsp_code->ps_firmware = NULL;
+ }
+}
+
+/*-------------------------------------------------------------------*/
+void hpi_dsp_code_rewind(struct dsp_code *ps_dsp_code)
+{
+ /* Go back to start of data, after header */
+ ps_dsp_code->word_count = sizeof(struct code_header) / sizeof(u32);
+}
+
+/*-------------------------------------------------------------------*/
+short hpi_dsp_code_read_word(struct dsp_code *ps_dsp_code, u32 *pword)
+{
+ if (ps_dsp_code->word_count + 1 > ps_dsp_code->block_length)
+ return (HPI_ERROR_DSP_FILE_FORMAT);
+
+ *pword = ((u32 *)(ps_dsp_code->ps_firmware->data))[ps_dsp_code->
+ word_count];
+ ps_dsp_code->word_count++;
+ return 0;
+}
+
+/*-------------------------------------------------------------------*/
+short hpi_dsp_code_read_block(size_t words_requested,
+ struct dsp_code *ps_dsp_code, u32 **ppblock)
+{
+ if (ps_dsp_code->word_count + words_requested >
+ ps_dsp_code->block_length)
+ return HPI_ERROR_DSP_FILE_FORMAT;
+
+ *ppblock =
+ ((u32 *)(ps_dsp_code->ps_firmware->data)) +
+ ps_dsp_code->word_count;
+ ps_dsp_code->word_count += words_requested;
+ return 0;
+}
diff --git a/sound/pci/asihpi/hpidspcd.h b/sound/pci/asihpi/hpidspcd.h
new file mode 100644
index 000000000000..d7c240398225
--- /dev/null
+++ b/sound/pci/asihpi/hpidspcd.h
@@ -0,0 +1,104 @@
+/***********************************************************************/
+/**
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+\file
+Functions for reading DSP code to load into DSP
+
+ hpi_dspcode_defines HPI DSP code loading method
+Define exactly one of these to select how the DSP code is supplied to
+the adapter.
+
+End users writing applications that use the HPI interface do not have to
+use any of the below defines; they are only necessary for building drivers
+
+HPI_DSPCODE_FILE:
+DSP code is supplied as a file that is opened and read from by the driver.
+
+HPI_DSPCODE_FIRMWARE:
+DSP code is read using the hotplug firmware loader module.
+ Only valid when compiling the HPI kernel driver under Linux.
+*/
+/***********************************************************************/
+#ifndef _HPIDSPCD_H_
+#define _HPIDSPCD_H_
+
+#include "hpi_internal.h"
+
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(push, 1)
+#endif
+
+/** Descriptor for dspcode from firmware loader */
+struct dsp_code {
+ /** Firmware descriptor */
+ const struct firmware *ps_firmware;
+ struct pci_dev *ps_dev;
+ /** Expected number of words in the whole dsp code,INCL header */
+ long int block_length;
+ /** Number of words read so far */
+ long int word_count;
+ /** Version read from dsp code file */
+ u32 version;
+ /** CRC read from dsp code file */
+ u32 crc;
+};
+
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(pop)
+#endif
+
+/** Prepare *psDspCode to refer to the requuested adapter.
+ Searches the file, or selects the appropriate linked array
+
+\return 0 for success, or error code if requested code is not available
+*/
+short hpi_dsp_code_open(
+ /** Code identifier, usually adapter family */
+ u32 adapter,
+ /** Pointer to DSP code control structure */
+ struct dsp_code *ps_dsp_code,
+ /** Pointer to dword to receive OS specific error code */
+ u32 *pos_error_code);
+
+/** Close the DSP code file */
+void hpi_dsp_code_close(struct dsp_code *ps_dsp_code);
+
+/** Rewind to the beginning of the DSP code file (for verify) */
+void hpi_dsp_code_rewind(struct dsp_code *ps_dsp_code);
+
+/** Read one word from the dsp code file
+ \return 0 for success, or error code if eof, or block length exceeded
+*/
+short hpi_dsp_code_read_word(struct dsp_code *ps_dsp_code,
+ /**< DSP code descriptor */
+ u32 *pword /**< where to store the read word */
+ );
+
+/** Get a block of dsp code into an internal buffer, and provide a pointer to
+that buffer. (If dsp code is already an array in memory, it is referenced,
+not copied.)
+
+\return Error if requested number of words are not available
+*/
+short hpi_dsp_code_read_block(size_t words_requested,
+ struct dsp_code *ps_dsp_code,
+ /* Pointer to store (Pointer to code buffer) */
+ u32 **ppblock);
+
+#endif
diff --git a/sound/pci/asihpi/hpifunc.c b/sound/pci/asihpi/hpifunc.c
new file mode 100644
index 000000000000..254c580db639
--- /dev/null
+++ b/sound/pci/asihpi/hpifunc.c
@@ -0,0 +1,3864 @@
+
+#include "hpi_internal.h"
+#include "hpimsginit.h"
+
+#include "hpidebug.h"
+
+struct hpi_handle {
+ unsigned int obj_index:12;
+ unsigned int obj_type:4;
+ unsigned int adapter_index:14;
+ unsigned int spare:1;
+ unsigned int read_only:1;
+};
+
+union handle_word {
+ struct hpi_handle h;
+ u32 w;
+};
+
+u32 hpi_indexes_to_handle(const char c_object, const u16 adapter_index,
+ const u16 object_index)
+{
+ union handle_word handle;
+
+ handle.h.adapter_index = adapter_index;
+ handle.h.spare = 0;
+ handle.h.read_only = 0;
+ handle.h.obj_type = c_object;
+ handle.h.obj_index = object_index;
+ return handle.w;
+}
+
+void hpi_handle_to_indexes(const u32 handle, u16 *pw_adapter_index,
+ u16 *pw_object_index)
+{
+ union handle_word uhandle;
+ uhandle.w = handle;
+
+ if (pw_adapter_index)
+ *pw_adapter_index = (u16)uhandle.h.adapter_index;
+ if (pw_object_index)
+ *pw_object_index = (u16)uhandle.h.obj_index;
+}
+
+char hpi_handle_object(const u32 handle)
+{
+ union handle_word uhandle;
+ uhandle.w = handle;
+ return (char)uhandle.h.obj_type;
+}
+
+#define u32TOINDEX(h, i1) \
+do {\
+ if (h == 0) \
+ return HPI_ERROR_INVALID_OBJ; \
+ else \
+ hpi_handle_to_indexes(h, i1, NULL); \
+} while (0)
+
+#define u32TOINDEXES(h, i1, i2) \
+do {\
+ if (h == 0) \
+ return HPI_ERROR_INVALID_OBJ; \
+ else \
+ hpi_handle_to_indexes(h, i1, i2);\
+} while (0)
+
+void hpi_format_to_msg(struct hpi_msg_format *pMF,
+ const struct hpi_format *pF)
+{
+ pMF->sample_rate = pF->sample_rate;
+ pMF->bit_rate = pF->bit_rate;
+ pMF->attributes = pF->attributes;
+ pMF->channels = pF->channels;
+ pMF->format = pF->format;
+}
+
+static void hpi_msg_to_format(struct hpi_format *pF,
+ struct hpi_msg_format *pMF)
+{
+ pF->sample_rate = pMF->sample_rate;
+ pF->bit_rate = pMF->bit_rate;
+ pF->attributes = pMF->attributes;
+ pF->channels = pMF->channels;
+ pF->format = pMF->format;
+ pF->mode_legacy = 0;
+ pF->unused = 0;
+}
+
+void hpi_stream_response_to_legacy(struct hpi_stream_res *pSR)
+{
+ pSR->u.legacy_stream_info.auxiliary_data_available =
+ pSR->u.stream_info.auxiliary_data_available;
+ pSR->u.legacy_stream_info.state = pSR->u.stream_info.state;
+}
+
+static struct hpi_hsubsys gh_subsys;
+
+struct hpi_hsubsys *hpi_subsys_create(void
+ )
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ memset(&gh_subsys, 0, sizeof(struct hpi_hsubsys));
+
+ {
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_OPEN);
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0)
+ return &gh_subsys;
+
+ }
+ return NULL;
+}
+
+void hpi_subsys_free(const struct hpi_hsubsys *ph_subsys)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_CLOSE);
+ hpi_send_recv(&hm, &hr);
+
+}
+
+u16 hpi_subsys_get_version(const struct hpi_hsubsys *ph_subsys, u32 *pversion)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_GET_VERSION);
+ hpi_send_recv(&hm, &hr);
+ *pversion = hr.u.s.version;
+ return hr.error;
+}
+
+u16 hpi_subsys_get_version_ex(const struct hpi_hsubsys *ph_subsys,
+ u32 *pversion_ex)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_GET_VERSION);
+ hpi_send_recv(&hm, &hr);
+ *pversion_ex = hr.u.s.data;
+ return hr.error;
+}
+
+u16 hpi_subsys_get_info(const struct hpi_hsubsys *ph_subsys, u32 *pversion,
+ u16 *pw_num_adapters, u16 aw_adapter_list[], u16 list_length)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_GET_INFO);
+
+ hpi_send_recv(&hm, &hr);
+
+ *pversion = hr.u.s.version;
+ if (list_length > HPI_MAX_ADAPTERS)
+ memcpy(aw_adapter_list, &hr.u.s.aw_adapter_list,
+ HPI_MAX_ADAPTERS);
+ else
+ memcpy(aw_adapter_list, &hr.u.s.aw_adapter_list, list_length);
+ *pw_num_adapters = hr.u.s.num_adapters;
+ return hr.error;
+}
+
+u16 hpi_subsys_find_adapters(const struct hpi_hsubsys *ph_subsys,
+ u16 *pw_num_adapters, u16 aw_adapter_list[], u16 list_length)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_FIND_ADAPTERS);
+
+ hpi_send_recv(&hm, &hr);
+
+ if (list_length > HPI_MAX_ADAPTERS) {
+ memcpy(aw_adapter_list, &hr.u.s.aw_adapter_list,
+ HPI_MAX_ADAPTERS * sizeof(u16));
+ memset(&aw_adapter_list[HPI_MAX_ADAPTERS], 0,
+ (list_length - HPI_MAX_ADAPTERS) * sizeof(u16));
+ } else
+ memcpy(aw_adapter_list, &hr.u.s.aw_adapter_list,
+ list_length * sizeof(u16));
+ *pw_num_adapters = hr.u.s.num_adapters;
+
+ return hr.error;
+}
+
+u16 hpi_subsys_create_adapter(const struct hpi_hsubsys *ph_subsys,
+ const struct hpi_resource *p_resource, u16 *pw_adapter_index)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_CREATE_ADAPTER);
+ hm.u.s.resource = *p_resource;
+
+ hpi_send_recv(&hm, &hr);
+
+ *pw_adapter_index = hr.u.s.adapter_index;
+ return hr.error;
+}
+
+u16 hpi_subsys_delete_adapter(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_DELETE_ADAPTER);
+ hm.adapter_index = adapter_index;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_subsys_get_num_adapters(const struct hpi_hsubsys *ph_subsys,
+ int *pn_num_adapters)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_GET_NUM_ADAPTERS);
+ hpi_send_recv(&hm, &hr);
+ *pn_num_adapters = (int)hr.u.s.num_adapters;
+ return hr.error;
+}
+
+u16 hpi_subsys_get_adapter(const struct hpi_hsubsys *ph_subsys, int iterator,
+ u32 *padapter_index, u16 *pw_adapter_type)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_GET_ADAPTER);
+ hm.adapter_index = (u16)iterator;
+ hpi_send_recv(&hm, &hr);
+ *padapter_index = (int)hr.u.s.adapter_index;
+ *pw_adapter_type = hr.u.s.aw_adapter_list[0];
+ return hr.error;
+}
+
+u16 hpi_subsys_set_host_network_interface(const struct hpi_hsubsys *ph_subsys,
+ const char *sz_interface)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_SET_NETWORK_INTERFACE);
+ if (sz_interface == NULL)
+ return HPI_ERROR_INVALID_RESOURCE;
+ hm.u.s.resource.r.net_if = sz_interface;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_adapter_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_OPEN);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+
+}
+
+u16 hpi_adapter_close(const struct hpi_hsubsys *ph_subsys, u16 adapter_index)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_CLOSE);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_adapter_set_mode(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 adapter_mode)
+{
+ return hpi_adapter_set_mode_ex(ph_subsys, adapter_index, adapter_mode,
+ HPI_ADAPTER_MODE_SET);
+}
+
+u16 hpi_adapter_set_mode_ex(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 adapter_mode, u16 query_or_set)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_SET_MODE);
+ hm.adapter_index = adapter_index;
+ hm.u.a.adapter_mode = adapter_mode;
+ hm.u.a.assert_id = query_or_set;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_adapter_get_mode(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 *padapter_mode)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_GET_MODE);
+ hm.adapter_index = adapter_index;
+ hpi_send_recv(&hm, &hr);
+ if (padapter_mode)
+ *padapter_mode = hr.u.a.serial_number;
+ return hr.error;
+}
+
+u16 hpi_adapter_get_info(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 *pw_num_outstreams, u16 *pw_num_instreams,
+ u16 *pw_version, u32 *pserial_number, u16 *pw_adapter_type)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_GET_INFO);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ *pw_adapter_type = hr.u.a.adapter_type;
+ *pw_num_outstreams = hr.u.a.num_outstreams;
+ *pw_num_instreams = hr.u.a.num_instreams;
+ *pw_version = hr.u.a.version;
+ *pserial_number = hr.u.a.serial_number;
+ return hr.error;
+}
+
+u16 hpi_adapter_get_module_by_index(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 module_index, u16 *pw_num_outputs,
+ u16 *pw_num_inputs, u16 *pw_version, u32 *pserial_number,
+ u16 *pw_module_type, u32 *ph_module)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_MODULE_INFO);
+ hm.adapter_index = adapter_index;
+ hm.u.ax.module_info.index = module_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ *pw_module_type = hr.u.a.adapter_type;
+ *pw_num_outputs = hr.u.a.num_outstreams;
+ *pw_num_inputs = hr.u.a.num_instreams;
+ *pw_version = hr.u.a.version;
+ *pserial_number = hr.u.a.serial_number;
+ *ph_module = 0;
+
+ return hr.error;
+}
+
+u16 hpi_adapter_get_assert(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 *assert_present, char *psz_assert,
+ u16 *pw_line_number)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_GET_ASSERT);
+ hm.adapter_index = adapter_index;
+ hpi_send_recv(&hm, &hr);
+
+ *assert_present = 0;
+
+ if (!hr.error) {
+
+ *pw_line_number = (u16)hr.u.a.serial_number;
+ if (*pw_line_number) {
+
+ int i;
+ char *src = (char *)hr.u.a.sz_adapter_assert;
+ char *dst = psz_assert;
+
+ *assert_present = 1;
+
+ for (i = 0; i < HPI_STRING_LEN; i++) {
+ char c;
+ c = *src++;
+ *dst++ = c;
+ if (c == 0)
+ break;
+ }
+
+ }
+ }
+ return hr.error;
+}
+
+u16 hpi_adapter_get_assert_ex(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 *assert_present, char *psz_assert,
+ u32 *pline_number, u16 *pw_assert_on_dsp)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_GET_ASSERT);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ *assert_present = 0;
+
+ if (!hr.error) {
+
+ *pline_number = hr.u.a.serial_number;
+
+ *assert_present = hr.u.a.adapter_type;
+
+ *pw_assert_on_dsp = hr.u.a.adapter_index;
+
+ if (!*assert_present && *pline_number)
+
+ *assert_present = 1;
+
+ if (*assert_present) {
+
+ int i;
+ char *src = (char *)hr.u.a.sz_adapter_assert;
+ char *dst = psz_assert;
+
+ for (i = 0; i < HPI_STRING_LEN; i++) {
+ char c;
+ c = *src++;
+ *dst++ = c;
+ if (c == 0)
+ break;
+ }
+
+ } else {
+ *psz_assert = 0;
+ }
+ }
+ return hr.error;
+}
+
+u16 hpi_adapter_test_assert(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 assert_id)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_TEST_ASSERT);
+ hm.adapter_index = adapter_index;
+ hm.u.a.assert_id = assert_id;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_adapter_enable_capability(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 capability, u32 key)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_ENABLE_CAPABILITY);
+ hm.adapter_index = adapter_index;
+ hm.u.a.assert_id = capability;
+ hm.u.a.adapter_mode = key;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_adapter_self_test(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_SELFTEST);
+ hm.adapter_index = adapter_index;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_adapter_debug_read(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 dsp_address, char *p_buffer, int *count_bytes)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_DEBUG_READ);
+
+ hr.size = sizeof(hr);
+
+ hm.adapter_index = adapter_index;
+ hm.u.ax.debug_read.dsp_address = dsp_address;
+
+ if (*count_bytes > sizeof(hr.u.bytes))
+ *count_bytes = sizeof(hr.u.bytes);
+
+ hm.u.ax.debug_read.count_bytes = *count_bytes;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (!hr.error) {
+ *count_bytes = hr.size - 12;
+ memcpy(p_buffer, &hr.u.bytes, *count_bytes);
+ } else
+ *count_bytes = 0;
+ return hr.error;
+}
+
+u16 hpi_adapter_set_property(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 property, u16 parameter1, u16 parameter2)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_SET_PROPERTY);
+ hm.adapter_index = adapter_index;
+ hm.u.ax.property_set.property = property;
+ hm.u.ax.property_set.parameter1 = parameter1;
+ hm.u.ax.property_set.parameter2 = parameter2;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_adapter_get_property(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 property, u16 *pw_parameter1,
+ u16 *pw_parameter2)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_GET_PROPERTY);
+ hm.adapter_index = adapter_index;
+ hm.u.ax.property_set.property = property;
+
+ hpi_send_recv(&hm, &hr);
+ if (!hr.error) {
+ if (pw_parameter1)
+ *pw_parameter1 = hr.u.ax.property_get.parameter1;
+ if (pw_parameter2)
+ *pw_parameter2 = hr.u.ax.property_get.parameter2;
+ }
+
+ return hr.error;
+}
+
+u16 hpi_adapter_enumerate_property(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 index, u16 what_to_enumerate,
+ u16 property_index, u32 *psetting)
+{
+ return 0;
+}
+
+u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format,
+ u32 sample_rate, u32 bit_rate, u32 attributes)
+{
+ u16 error = 0;
+ struct hpi_msg_format fmt;
+
+ switch (channels) {
+ case 1:
+ case 2:
+ case 4:
+ case 6:
+ case 8:
+ case 16:
+ break;
+ default:
+ error = HPI_ERROR_INVALID_CHANNELS;
+ return error;
+ }
+ fmt.channels = channels;
+
+ switch (format) {
+ case HPI_FORMAT_PCM16_SIGNED:
+ case HPI_FORMAT_PCM24_SIGNED:
+ case HPI_FORMAT_PCM32_SIGNED:
+ case HPI_FORMAT_PCM32_FLOAT:
+ case HPI_FORMAT_PCM16_BIGENDIAN:
+ case HPI_FORMAT_PCM8_UNSIGNED:
+ case HPI_FORMAT_MPEG_L1:
+ case HPI_FORMAT_MPEG_L2:
+ case HPI_FORMAT_MPEG_L3:
+ case HPI_FORMAT_DOLBY_AC2:
+ case HPI_FORMAT_AA_TAGIT1_HITS:
+ case HPI_FORMAT_AA_TAGIT1_INSERTS:
+ case HPI_FORMAT_RAW_BITSTREAM:
+ case HPI_FORMAT_AA_TAGIT1_HITS_EX1:
+ case HPI_FORMAT_OEM1:
+ case HPI_FORMAT_OEM2:
+ break;
+ default:
+ error = HPI_ERROR_INVALID_FORMAT;
+ return error;
+ }
+ fmt.format = format;
+
+ if (sample_rate < 8000L) {
+ error = HPI_ERROR_INCOMPATIBLE_SAMPLERATE;
+ sample_rate = 8000L;
+ }
+ if (sample_rate > 200000L) {
+ error = HPI_ERROR_INCOMPATIBLE_SAMPLERATE;
+ sample_rate = 200000L;
+ }
+ fmt.sample_rate = sample_rate;
+
+ switch (format) {
+ case HPI_FORMAT_MPEG_L1:
+ case HPI_FORMAT_MPEG_L2:
+ case HPI_FORMAT_MPEG_L3:
+ fmt.bit_rate = bit_rate;
+ break;
+ case HPI_FORMAT_PCM16_SIGNED:
+ case HPI_FORMAT_PCM16_BIGENDIAN:
+ fmt.bit_rate = channels * sample_rate * 2;
+ break;
+ case HPI_FORMAT_PCM32_SIGNED:
+ case HPI_FORMAT_PCM32_FLOAT:
+ fmt.bit_rate = channels * sample_rate * 4;
+ break;
+ case HPI_FORMAT_PCM8_UNSIGNED:
+ fmt.bit_rate = channels * sample_rate;
+ break;
+ default:
+ fmt.bit_rate = 0;
+ }
+
+ switch (format) {
+ case HPI_FORMAT_MPEG_L2:
+ if ((channels == 1)
+ && (attributes != HPI_MPEG_MODE_DEFAULT)) {
+ attributes = HPI_MPEG_MODE_DEFAULT;
+ error = HPI_ERROR_INVALID_FORMAT;
+ } else if (attributes > HPI_MPEG_MODE_DUALCHANNEL) {
+ attributes = HPI_MPEG_MODE_DEFAULT;
+ error = HPI_ERROR_INVALID_FORMAT;
+ }
+ fmt.attributes = attributes;
+ break;
+ default:
+ fmt.attributes = attributes;
+ }
+
+ hpi_msg_to_format(p_format, &fmt);
+ return error;
+}
+
+u16 hpi_stream_estimate_buffer_size(struct hpi_format *p_format,
+ u32 host_polling_rate_in_milli_seconds, u32 *recommended_buffer_size)
+{
+
+ u32 bytes_per_second;
+ u32 size;
+ u16 channels;
+ struct hpi_format *pF = p_format;
+
+ channels = pF->channels;
+
+ switch (pF->format) {
+ case HPI_FORMAT_PCM16_BIGENDIAN:
+ case HPI_FORMAT_PCM16_SIGNED:
+ bytes_per_second = pF->sample_rate * 2L * channels;
+ break;
+ case HPI_FORMAT_PCM24_SIGNED:
+ bytes_per_second = pF->sample_rate * 3L * channels;
+ break;
+ case HPI_FORMAT_PCM32_SIGNED:
+ case HPI_FORMAT_PCM32_FLOAT:
+ bytes_per_second = pF->sample_rate * 4L * channels;
+ break;
+ case HPI_FORMAT_PCM8_UNSIGNED:
+ bytes_per_second = pF->sample_rate * 1L * channels;
+ break;
+ case HPI_FORMAT_MPEG_L1:
+ case HPI_FORMAT_MPEG_L2:
+ case HPI_FORMAT_MPEG_L3:
+ bytes_per_second = pF->bit_rate / 8L;
+ break;
+ case HPI_FORMAT_DOLBY_AC2:
+
+ bytes_per_second = 256000L / 8L;
+ break;
+ default:
+ return HPI_ERROR_INVALID_FORMAT;
+ }
+ size = (bytes_per_second * host_polling_rate_in_milli_seconds * 2) /
+ 1000L;
+
+ *recommended_buffer_size =
+ roundup_pow_of_two(((size + 4095L) & ~4095L));
+ return 0;
+}
+
+u16 hpi_outstream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u16 outstream_index, u32 *ph_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_OPEN);
+ hm.adapter_index = adapter_index;
+ hm.obj_index = outstream_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0)
+ *ph_outstream =
+ hpi_indexes_to_handle(HPI_OBJ_OSTREAM, adapter_index,
+ outstream_index);
+ else
+ *ph_outstream = 0;
+ return hr.error;
+}
+
+u16 hpi_outstream_close(const struct hpi_hsubsys *ph_subsys, u32 h_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_HOSTBUFFER_FREE);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_GROUP_RESET);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_CLOSE);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_to_play,
+ u32 *psamples_played, u32 *pauxiliary_data_to_play)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_GET_INFO);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ if (pw_state)
+ *pw_state = hr.u.d.u.stream_info.state;
+ if (pbuffer_size)
+ *pbuffer_size = hr.u.d.u.stream_info.buffer_size;
+ if (pdata_to_play)
+ *pdata_to_play = hr.u.d.u.stream_info.data_available;
+ if (psamples_played)
+ *psamples_played = hr.u.d.u.stream_info.samples_transferred;
+ if (pauxiliary_data_to_play)
+ *pauxiliary_data_to_play =
+ hr.u.d.u.stream_info.auxiliary_data_available;
+ return hr.error;
+}
+
+u16 hpi_outstream_write_buf(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, const u8 *pb_data, u32 bytes_to_write,
+ const struct hpi_format *p_format)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_WRITE);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.data.pb_data = (u8 *)pb_data;
+ hm.u.d.u.data.data_size = bytes_to_write;
+
+ hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_start(const struct hpi_hsubsys *ph_subsys, u32 h_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_START);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_wait_start(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_WAIT_START);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_STOP);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_sinegen(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_SINEGEN);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_RESET);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_query_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, struct hpi_format *p_format)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_QUERY_FORMAT);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_set_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, struct hpi_format *p_format)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_SET_FORMAT);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_set_velocity(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, short velocity)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_SET_VELOCITY);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.velocity = velocity;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_set_punch_in_out(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 punch_in_sample, u32 punch_out_sample)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_SET_PUNCHINOUT);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hm.u.d.u.pio.punch_in_sample = punch_in_sample;
+ hm.u.d.u.pio.punch_out_sample = punch_out_sample;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u16 mode)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_ANC_RESET);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.data.format.channels = mode;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_outstream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 *pframes_available)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_ANC_GET_INFO);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+ if (hr.error == 0) {
+ if (pframes_available)
+ *pframes_available =
+ hr.u.d.u.stream_info.data_available /
+ sizeof(struct hpi_anc_frame);
+ }
+ return hr.error;
+}
+
+u16 hpi_outstream_ancillary_read(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, struct hpi_anc_frame *p_anc_frame_buffer,
+ u32 anc_frame_buffer_size_in_bytes,
+ u32 number_of_ancillary_frames_to_read)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_ANC_READ);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.data.pb_data = (u8 *)p_anc_frame_buffer;
+ hm.u.d.u.data.data_size =
+ number_of_ancillary_frames_to_read *
+ sizeof(struct hpi_anc_frame);
+ if (hm.u.d.u.data.data_size <= anc_frame_buffer_size_in_bytes)
+ hpi_send_recv(&hm, &hr);
+ else
+ hr.error = HPI_ERROR_INVALID_DATA_TRANSFER;
+ return hr.error;
+}
+
+u16 hpi_outstream_set_time_scale(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 time_scale)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_SET_TIMESCALE);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+
+ hm.u.d.u.time_scale = time_scale;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_outstream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 size_in_bytes)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_HOSTBUFFER_ALLOC);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.data.data_size = size_in_bytes;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_outstream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u8 **pp_buffer,
+ struct hpi_hostbuffer_status **pp_status)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_HOSTBUFFER_GET_INFO);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0) {
+ if (pp_buffer)
+ *pp_buffer = hr.u.d.u.hostbuffer_info.p_buffer;
+ if (pp_status)
+ *pp_status = hr.u.d.u.hostbuffer_info.p_status;
+ }
+ return hr.error;
+}
+
+u16 hpi_outstream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_HOSTBUFFER_FREE);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_outstream_group_add(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 h_stream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ u16 adapter;
+ char c_obj_type;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_GROUP_ADD);
+ hr.error = 0;
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ c_obj_type = hpi_handle_object(h_stream);
+ switch (c_obj_type) {
+ case HPI_OBJ_OSTREAM:
+ hm.u.d.u.stream.object_type = HPI_OBJ_OSTREAM;
+ u32TOINDEXES(h_stream, &adapter,
+ &hm.u.d.u.stream.stream_index);
+ break;
+ case HPI_OBJ_ISTREAM:
+ hm.u.d.u.stream.object_type = HPI_OBJ_ISTREAM;
+ u32TOINDEXES(h_stream, &adapter,
+ &hm.u.d.u.stream.stream_index);
+ break;
+ default:
+ return HPI_ERROR_INVALID_STREAM;
+ }
+ if (adapter != hm.adapter_index)
+ return HPI_ERROR_NO_INTERADAPTER_GROUPS;
+
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_outstream_group_get_map(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream, u32 *poutstream_map, u32 *pinstream_map)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_GROUP_GETMAP);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ if (poutstream_map)
+ *poutstream_map = hr.u.d.u.group_info.outstream_group_map;
+ if (pinstream_map)
+ *pinstream_map = hr.u.d.u.group_info.instream_group_map;
+
+ return hr.error;
+}
+
+u16 hpi_outstream_group_reset(const struct hpi_hsubsys *ph_subsys,
+ u32 h_outstream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_GROUP_RESET);
+ u32TOINDEXES(h_outstream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_instream_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u16 instream_index, u32 *ph_instream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_OPEN);
+ hm.adapter_index = adapter_index;
+ hm.obj_index = instream_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0)
+ *ph_instream =
+ hpi_indexes_to_handle(HPI_OBJ_ISTREAM, adapter_index,
+ instream_index);
+ else
+ *ph_instream = 0;
+
+ return hr.error;
+}
+
+u16 hpi_instream_close(const struct hpi_hsubsys *ph_subsys, u32 h_instream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_HOSTBUFFER_FREE);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_GROUP_RESET);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_CLOSE);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_instream_query_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, const struct hpi_format *p_format)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_QUERY_FORMAT);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_instream_set_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, const struct hpi_format *p_format)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_SET_FORMAT);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_format_to_msg(&hm.u.d.u.data.format, p_format);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_instream_read_buf(const struct hpi_hsubsys *ph_subsys, u32 h_instream,
+ u8 *pb_data, u32 bytes_to_read)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_READ);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.data.data_size = bytes_to_read;
+ hm.u.d.u.data.pb_data = pb_data;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_instream_start(const struct hpi_hsubsys *ph_subsys, u32 h_instream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_START);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_instream_wait_start(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_WAIT_START);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_instream_stop(const struct hpi_hsubsys *ph_subsys, u32 h_instream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_STOP);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_instream_reset(const struct hpi_hsubsys *ph_subsys, u32 h_instream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_RESET);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_instream_get_info_ex(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_recorded,
+ u32 *psamples_recorded, u32 *pauxiliary_data_recorded)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_GET_INFO);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ if (pw_state)
+ *pw_state = hr.u.d.u.stream_info.state;
+ if (pbuffer_size)
+ *pbuffer_size = hr.u.d.u.stream_info.buffer_size;
+ if (pdata_recorded)
+ *pdata_recorded = hr.u.d.u.stream_info.data_available;
+ if (psamples_recorded)
+ *psamples_recorded = hr.u.d.u.stream_info.samples_transferred;
+ if (pauxiliary_data_recorded)
+ *pauxiliary_data_recorded =
+ hr.u.d.u.stream_info.auxiliary_data_available;
+ return hr.error;
+}
+
+u16 hpi_instream_ancillary_reset(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u16 bytes_per_frame, u16 mode, u16 alignment,
+ u16 idle_bit)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_ANC_RESET);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.data.format.attributes = bytes_per_frame;
+ hm.u.d.u.data.format.format = (mode << 8) | (alignment & 0xff);
+ hm.u.d.u.data.format.channels = idle_bit;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_instream_ancillary_get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u32 *pframe_space)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_ANC_GET_INFO);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+ if (pframe_space)
+ *pframe_space =
+ (hr.u.d.u.stream_info.buffer_size -
+ hr.u.d.u.stream_info.data_available) /
+ sizeof(struct hpi_anc_frame);
+ return hr.error;
+}
+
+u16 hpi_instream_ancillary_write(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, const struct hpi_anc_frame *p_anc_frame_buffer,
+ u32 anc_frame_buffer_size_in_bytes,
+ u32 number_of_ancillary_frames_to_write)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_ANC_WRITE);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.data.pb_data = (u8 *)p_anc_frame_buffer;
+ hm.u.d.u.data.data_size =
+ number_of_ancillary_frames_to_write *
+ sizeof(struct hpi_anc_frame);
+ if (hm.u.d.u.data.data_size <= anc_frame_buffer_size_in_bytes)
+ hpi_send_recv(&hm, &hr);
+ else
+ hr.error = HPI_ERROR_INVALID_DATA_TRANSFER;
+ return hr.error;
+}
+
+u16 hpi_instream_host_buffer_allocate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u32 size_in_bytes)
+{
+
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_HOSTBUFFER_ALLOC);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hm.u.d.u.data.data_size = size_in_bytes;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_instream_host_buffer_get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u8 **pp_buffer,
+ struct hpi_hostbuffer_status **pp_status)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_HOSTBUFFER_GET_INFO);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0) {
+ if (pp_buffer)
+ *pp_buffer = hr.u.d.u.hostbuffer_info.p_buffer;
+ if (pp_status)
+ *pp_status = hr.u.d.u.hostbuffer_info.p_status;
+ }
+ return hr.error;
+}
+
+u16 hpi_instream_host_buffer_free(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream)
+{
+
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_HOSTBUFFER_FREE);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_instream_group_add(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u32 h_stream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ u16 adapter;
+ char c_obj_type;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_GROUP_ADD);
+ hr.error = 0;
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ c_obj_type = hpi_handle_object(h_stream);
+
+ switch (c_obj_type) {
+ case HPI_OBJ_OSTREAM:
+ hm.u.d.u.stream.object_type = HPI_OBJ_OSTREAM;
+ u32TOINDEXES(h_stream, &adapter,
+ &hm.u.d.u.stream.stream_index);
+ break;
+ case HPI_OBJ_ISTREAM:
+ hm.u.d.u.stream.object_type = HPI_OBJ_ISTREAM;
+ u32TOINDEXES(h_stream, &adapter,
+ &hm.u.d.u.stream.stream_index);
+ break;
+ default:
+ return HPI_ERROR_INVALID_STREAM;
+ }
+
+ if (adapter != hm.adapter_index)
+ return HPI_ERROR_NO_INTERADAPTER_GROUPS;
+
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_instream_group_get_map(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream, u32 *poutstream_map, u32 *pinstream_map)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_HOSTBUFFER_FREE);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ if (poutstream_map)
+ *poutstream_map = hr.u.d.u.group_info.outstream_group_map;
+ if (pinstream_map)
+ *pinstream_map = hr.u.d.u.group_info.instream_group_map;
+
+ return hr.error;
+}
+
+u16 hpi_instream_group_reset(const struct hpi_hsubsys *ph_subsys,
+ u32 h_instream)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_GROUP_RESET);
+ u32TOINDEXES(h_instream, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_mixer_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_mixer)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0)
+ *ph_mixer =
+ hpi_indexes_to_handle(HPI_OBJ_MIXER, adapter_index,
+ 0);
+ else
+ *ph_mixer = 0;
+ return hr.error;
+}
+
+u16 hpi_mixer_close(const struct hpi_hsubsys *ph_subsys, u32 h_mixer)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE);
+ u32TOINDEX(h_mixer, &hm.adapter_index);
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+u16 hpi_mixer_get_control(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
+ u16 src_node_type, u16 src_node_type_index, u16 dst_node_type,
+ u16 dst_node_type_index, u16 control_type, u32 *ph_control)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER,
+ HPI_MIXER_GET_CONTROL);
+ u32TOINDEX(h_mixer, &hm.adapter_index);
+ hm.u.m.node_type1 = src_node_type;
+ hm.u.m.node_index1 = src_node_type_index;
+ hm.u.m.node_type2 = dst_node_type;
+ hm.u.m.node_index2 = dst_node_type_index;
+ hm.u.m.control_type = control_type;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0)
+ *ph_control =
+ hpi_indexes_to_handle(HPI_OBJ_CONTROL,
+ hm.adapter_index, hr.u.m.control_index);
+ else
+ *ph_control = 0;
+ return hr.error;
+}
+
+u16 hpi_mixer_get_control_by_index(const struct hpi_hsubsys *ph_subsys,
+ u32 h_mixer, u16 control_index, u16 *pw_src_node_type,
+ u16 *pw_src_node_index, u16 *pw_dst_node_type, u16 *pw_dst_node_index,
+ u16 *pw_control_type, u32 *ph_control)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER,
+ HPI_MIXER_GET_CONTROL_BY_INDEX);
+ u32TOINDEX(h_mixer, &hm.adapter_index);
+ hm.u.m.control_index = control_index;
+ hpi_send_recv(&hm, &hr);
+
+ if (pw_src_node_type) {
+ *pw_src_node_type =
+ hr.u.m.src_node_type + HPI_SOURCENODE_NONE;
+ *pw_src_node_index = hr.u.m.src_node_index;
+ *pw_dst_node_type = hr.u.m.dst_node_type + HPI_DESTNODE_NONE;
+ *pw_dst_node_index = hr.u.m.dst_node_index;
+ }
+ if (pw_control_type)
+ *pw_control_type = hr.u.m.control_index;
+
+ if (ph_control) {
+ if (hr.error == 0)
+ *ph_control =
+ hpi_indexes_to_handle(HPI_OBJ_CONTROL,
+ hm.adapter_index, control_index);
+ else
+ *ph_control = 0;
+ }
+ return hr.error;
+}
+
+u16 hpi_mixer_store(const struct hpi_hsubsys *ph_subsys, u32 h_mixer,
+ enum HPI_MIXER_STORE_COMMAND command, u16 index)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_STORE);
+ u32TOINDEX(h_mixer, &hm.adapter_index);
+ hm.u.mx.store.command = command;
+ hm.u.mx.store.index = index;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+static
+u16 hpi_control_param_set(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_control, const u16 attrib, const u32 param1,
+ const u32 param2)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = attrib;
+ hm.u.c.param1 = param1;
+ hm.u.c.param2 = param2;
+ hpi_send_recv(&hm, &hr);
+ return hr.error;
+}
+
+static
+u16 hpi_control_param_get(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_control, const u16 attrib, u32 param1, u32 param2,
+ u32 *pparam1, u32 *pparam2)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = attrib;
+ hm.u.c.param1 = param1;
+ hm.u.c.param2 = param2;
+ hpi_send_recv(&hm, &hr);
+ if (pparam1)
+ *pparam1 = hr.u.c.param1;
+ if (pparam2)
+ *pparam2 = hr.u.c.param2;
+
+ return hr.error;
+}
+
+#define hpi_control_param1_get(s, h, a, p1) \
+ hpi_control_param_get(s, h, a, 0, 0, p1, NULL)
+#define hpi_control_param2_get(s, h, a, p1, p2) \
+ hpi_control_param_get(s, h, a, 0, 0, p1, p2)
+#define hpi_control_ex_param1_get(s, h, a, p1) \
+ hpi_control_ex_param_get(s, h, a, 0, 0, p1, NULL)
+#define hpi_control_ex_param2_get(s, h, a, p1, p2) \
+ hpi_control_ex_param_get(s, h, a, 0, 0, p1, p2)
+
+static
+u16 hpi_control_query(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_control, const u16 attrib, const u32 index,
+ const u32 param, u32 *psetting)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_INFO);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+
+ hm.u.c.attribute = attrib;
+ hm.u.c.param1 = index;
+ hm.u.c.param2 = param;
+
+ hpi_send_recv(&hm, &hr);
+ *psetting = hr.u.c.param1;
+
+ return hr.error;
+}
+
+static u16 hpi_control_get_string(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_control, const u16 attribute, char *psz_string,
+ const u32 string_length)
+{
+ unsigned int sub_string_index = 0, j = 0;
+ char c = 0;
+ unsigned int n = 0;
+ u16 hE = 0;
+
+ if ((string_length < 1) || (string_length > 256))
+ return HPI_ERROR_INVALID_CONTROL_VALUE;
+ for (sub_string_index = 0; sub_string_index < string_length;
+ sub_string_index += 8) {
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = attribute;
+ hm.u.c.param1 = sub_string_index;
+ hm.u.c.param2 = 0;
+ hpi_send_recv(&hm, &hr);
+
+ if (sub_string_index == 0
+ && (hr.u.cu.chars8.remaining_chars + 8) >
+ string_length)
+ return HPI_ERROR_INVALID_CONTROL_VALUE;
+
+ if (hr.error) {
+ hE = hr.error;
+ break;
+ }
+ for (j = 0; j < 8; j++) {
+ c = hr.u.cu.chars8.sz_data[j];
+ psz_string[sub_string_index + j] = c;
+ n++;
+ if (n >= string_length) {
+ psz_string[string_length - 1] = 0;
+ hE = HPI_ERROR_INVALID_CONTROL_VALUE;
+ break;
+ }
+ if (c == 0)
+ break;
+ }
+
+ if ((hr.u.cu.chars8.remaining_chars == 0)
+ && ((sub_string_index + j) < string_length)
+ && (c != 0)) {
+ c = 0;
+ psz_string[sub_string_index + j] = c;
+ }
+ if (c == 0)
+ break;
+ }
+ return hE;
+}
+
+u16 HPI_AESEBU__receiver_query_format(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_aes_rx, const u32 index, u16 *pw_format)
+{
+ u32 qr;
+ u16 err;
+
+ err = hpi_control_query(ph_subsys, h_aes_rx, HPI_AESEBURX_FORMAT,
+ index, 0, &qr);
+ *pw_format = (u16)qr;
+ return err;
+}
+
+u16 HPI_AESEBU__receiver_set_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 format)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_AESEBURX_FORMAT, format, 0);
+}
+
+u16 HPI_AESEBU__receiver_get_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_format)
+{
+ u16 err;
+ u32 param;
+
+ err = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_AESEBURX_FORMAT, &param);
+ if (!err && pw_format)
+ *pw_format = (u16)param;
+
+ return err;
+}
+
+u16 HPI_AESEBU__receiver_get_sample_rate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *psample_rate)
+{
+ return hpi_control_param1_get(ph_subsys, h_control,
+ HPI_AESEBURX_SAMPLERATE, psample_rate);
+}
+
+u16 HPI_AESEBU__receiver_get_user_data(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 *pw_data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_AESEBURX_USERDATA;
+ hm.u.c.param1 = index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (pw_data)
+ *pw_data = (u16)hr.u.c.param2;
+ return hr.error;
+}
+
+u16 HPI_AESEBU__receiver_get_channel_status(const struct hpi_hsubsys
+ *ph_subsys, u32 h_control, u16 index, u16 *pw_data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_AESEBURX_CHANNELSTATUS;
+ hm.u.c.param1 = index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (pw_data)
+ *pw_data = (u16)hr.u.c.param2;
+ return hr.error;
+}
+
+u16 HPI_AESEBU__receiver_get_error_status(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_error_data)
+{
+ u32 error_data = 0;
+ u16 error = 0;
+
+ error = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_AESEBURX_ERRORSTATUS, &error_data);
+ if (pw_error_data)
+ *pw_error_data = (u16)error_data;
+ return error;
+}
+
+u16 HPI_AESEBU__transmitter_set_sample_rate(const struct hpi_hsubsys
+ *ph_subsys, u32 h_control, u32 sample_rate)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_AESEBUTX_SAMPLERATE, sample_rate, 0);
+}
+
+u16 HPI_AESEBU__transmitter_set_user_data(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 data)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_AESEBUTX_USERDATA, index, data);
+}
+
+u16 HPI_AESEBU__transmitter_set_channel_status(const struct hpi_hsubsys
+ *ph_subsys, u32 h_control, u16 index, u16 data)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_AESEBUTX_CHANNELSTATUS, index, data);
+}
+
+u16 HPI_AESEBU__transmitter_get_channel_status(const struct hpi_hsubsys
+ *ph_subsys, u32 h_control, u16 index, u16 *pw_data)
+{
+ return HPI_ERROR_INVALID_OPERATION;
+}
+
+u16 HPI_AESEBU__transmitter_query_format(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_aes_tx, const u32 index, u16 *pw_format)
+{
+ u32 qr;
+ u16 err;
+
+ err = hpi_control_query(ph_subsys, h_aes_tx, HPI_AESEBUTX_FORMAT,
+ index, 0, &qr);
+ *pw_format = (u16)qr;
+ return err;
+}
+
+u16 HPI_AESEBU__transmitter_set_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 output_format)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_AESEBUTX_FORMAT, output_format, 0);
+}
+
+u16 HPI_AESEBU__transmitter_get_format(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_output_format)
+{
+ u16 err;
+ u32 param;
+
+ err = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_AESEBUTX_FORMAT, &param);
+ if (!err && pw_output_format)
+ *pw_output_format = (u16)param;
+
+ return err;
+}
+
+u16 hpi_bitstream_set_clock_edge(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 edge_type)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_BITSTREAM_CLOCK_EDGE, edge_type, 0);
+}
+
+u16 hpi_bitstream_set_data_polarity(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 polarity)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_BITSTREAM_DATA_POLARITY, polarity, 0);
+}
+
+u16 hpi_bitstream_get_activity(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_clk_activity, u16 *pw_data_activity)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_BITSTREAM_ACTIVITY;
+ hpi_send_recv(&hm, &hr);
+ if (pw_clk_activity)
+ *pw_clk_activity = (u16)hr.u.c.param1;
+ if (pw_data_activity)
+ *pw_data_activity = (u16)hr.u.c.param2;
+ return hr.error;
+}
+
+u16 hpi_channel_mode_query_mode(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_mode, const u32 index, u16 *pw_mode)
+{
+ u32 qr;
+ u16 err;
+
+ err = hpi_control_query(ph_subsys, h_mode, HPI_CHANNEL_MODE_MODE,
+ index, 0, &qr);
+ *pw_mode = (u16)qr;
+ return err;
+}
+
+u16 hpi_channel_mode_set(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 mode)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_CHANNEL_MODE_MODE, mode, 0);
+}
+
+u16 hpi_channel_mode_get(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 *mode)
+{
+ u32 mode32 = 0;
+ u16 error = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_CHANNEL_MODE_MODE, &mode32);
+ if (mode)
+ *mode = (u16)mode32;
+ return error;
+}
+
+u16 hpi_cobranet_hmi_write(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 hmi_address, u32 byte_count, u8 *pb_data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX,
+ HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+
+ hm.u.cx.u.cobranet_data.byte_count = byte_count;
+ hm.u.cx.u.cobranet_data.hmi_address = hmi_address;
+
+ if (byte_count <= 8) {
+ memcpy(hm.u.cx.u.cobranet_data.data, pb_data, byte_count);
+ hm.u.cx.attribute = HPI_COBRANET_SET;
+ } else {
+ hm.u.cx.u.cobranet_bigdata.pb_data = pb_data;
+ hm.u.cx.attribute = HPI_COBRANET_SET_DATA;
+ }
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_cobranet_hmi_read(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 hmi_address, u32 max_byte_count, u32 *pbyte_count, u8 *pb_data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+
+ hm.u.cx.u.cobranet_data.byte_count = max_byte_count;
+ hm.u.cx.u.cobranet_data.hmi_address = hmi_address;
+
+ if (max_byte_count <= 8) {
+ hm.u.cx.attribute = HPI_COBRANET_GET;
+ } else {
+ hm.u.cx.u.cobranet_bigdata.pb_data = pb_data;
+ hm.u.cx.attribute = HPI_COBRANET_GET_DATA;
+ }
+
+ hpi_send_recv(&hm, &hr);
+ if (!hr.error && pb_data) {
+
+ *pbyte_count = hr.u.cx.u.cobranet_data.byte_count;
+
+ if (*pbyte_count < max_byte_count)
+ max_byte_count = *pbyte_count;
+
+ if (hm.u.cx.attribute == HPI_COBRANET_GET) {
+ memcpy(pb_data, hr.u.cx.u.cobranet_data.data,
+ max_byte_count);
+ } else {
+
+ }
+
+ }
+ return hr.error;
+}
+
+u16 hpi_cobranet_hmi_get_status(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pstatus, u32 *preadable_size,
+ u32 *pwriteable_size)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROLEX,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+
+ hm.u.cx.attribute = HPI_COBRANET_GET_STATUS;
+
+ hpi_send_recv(&hm, &hr);
+ if (!hr.error) {
+ if (pstatus)
+ *pstatus = hr.u.cx.u.cobranet_status.status;
+ if (preadable_size)
+ *preadable_size =
+ hr.u.cx.u.cobranet_status.readable_size;
+ if (pwriteable_size)
+ *pwriteable_size =
+ hr.u.cx.u.cobranet_status.writeable_size;
+ }
+ return hr.error;
+}
+
+u16 hpi_cobranet_getI_paddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pi_paddress)
+{
+ u32 byte_count;
+ u32 iP;
+ u16 error;
+ error = hpi_cobranet_hmi_read(ph_subsys, h_control,
+ HPI_COBRANET_HMI_cobra_ip_mon_currentIP, 4, &byte_count,
+ (u8 *)&iP);
+
+ *pi_paddress =
+ ((iP & 0xff000000) >> 8) | ((iP & 0x00ff0000) << 8) | ((iP &
+ 0x0000ff00) >> 8) | ((iP & 0x000000ff) << 8);
+
+ if (error)
+ *pi_paddress = 0;
+
+ return error;
+
+}
+
+u16 hpi_cobranet_setI_paddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 i_paddress)
+{
+ u32 iP;
+ u16 error;
+
+ iP = ((i_paddress & 0xff000000) >> 8) | ((i_paddress & 0x00ff0000) <<
+ 8) | ((i_paddress & 0x0000ff00) >> 8) | ((i_paddress &
+ 0x000000ff) << 8);
+
+ error = hpi_cobranet_hmi_write(ph_subsys, h_control,
+ HPI_COBRANET_HMI_cobra_ip_mon_currentIP, 4, (u8 *)&iP);
+
+ return error;
+
+}
+
+u16 hpi_cobranet_get_staticI_paddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pi_paddress)
+{
+ u32 byte_count;
+ u32 iP;
+ u16 error;
+ error = hpi_cobranet_hmi_read(ph_subsys, h_control,
+ HPI_COBRANET_HMI_cobra_ip_mon_staticIP, 4, &byte_count,
+ (u8 *)&iP);
+
+ *pi_paddress =
+ ((iP & 0xff000000) >> 8) | ((iP & 0x00ff0000) << 8) | ((iP &
+ 0x0000ff00) >> 8) | ((iP & 0x000000ff) << 8);
+
+ if (error)
+ *pi_paddress = 0;
+
+ return error;
+
+}
+
+u16 hpi_cobranet_set_staticI_paddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 i_paddress)
+{
+ u32 iP;
+ u16 error;
+
+ iP = ((i_paddress & 0xff000000) >> 8) | ((i_paddress & 0x00ff0000) <<
+ 8) | ((i_paddress & 0x0000ff00) >> 8) | ((i_paddress &
+ 0x000000ff) << 8);
+
+ error = hpi_cobranet_hmi_write(ph_subsys, h_control,
+ HPI_COBRANET_HMI_cobra_ip_mon_staticIP, 4, (u8 *)&iP);
+
+ return error;
+
+}
+
+u16 hpi_cobranet_getMA_caddress(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pmAC_MS_bs, u32 *pmAC_LS_bs)
+{
+ u32 byte_count;
+ u16 error;
+ u32 mAC;
+ error = hpi_cobranet_hmi_read(ph_subsys, h_control,
+ HPI_COBRANET_HMI_cobra_if_phy_address, 4, &byte_count,
+ (u8 *)&mAC);
+ *pmAC_MS_bs =
+ ((mAC & 0xff000000) >> 8) | ((mAC & 0x00ff0000) << 8) | ((mAC
+ & 0x0000ff00) >> 8) | ((mAC & 0x000000ff) << 8);
+ error += hpi_cobranet_hmi_read(ph_subsys, h_control,
+ HPI_COBRANET_HMI_cobra_if_phy_address + 1, 4, &byte_count,
+ (u8 *)&mAC);
+ *pmAC_LS_bs =
+ ((mAC & 0xff000000) >> 8) | ((mAC & 0x00ff0000) << 8) | ((mAC
+ & 0x0000ff00) >> 8) | ((mAC & 0x000000ff) << 8);
+
+ if (error) {
+ *pmAC_MS_bs = 0;
+ *pmAC_LS_bs = 0;
+ }
+
+ return error;
+}
+
+u16 hpi_compander_set(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 attack, u16 decay, short ratio100, short threshold0_01dB,
+ short makeup_gain0_01dB)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+
+ hm.u.c.param1 = attack + ((u32)ratio100 << 16);
+ hm.u.c.param2 = (decay & 0xFFFFL);
+ hm.u.c.an_log_value[0] = threshold0_01dB;
+ hm.u.c.an_log_value[1] = makeup_gain0_01dB;
+ hm.u.c.attribute = HPI_COMPANDER_PARAMS;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_compander_get(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 *pw_attack, u16 *pw_decay, short *pw_ratio100,
+ short *pn_threshold0_01dB, short *pn_makeup_gain0_01dB)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_COMPANDER_PARAMS;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (pw_attack)
+ *pw_attack = (short)(hr.u.c.param1 & 0xFFFF);
+ if (pw_decay)
+ *pw_decay = (short)(hr.u.c.param2 & 0xFFFF);
+ if (pw_ratio100)
+ *pw_ratio100 = (short)(hr.u.c.param1 >> 16);
+
+ if (pn_threshold0_01dB)
+ *pn_threshold0_01dB = hr.u.c.an_log_value[0];
+ if (pn_makeup_gain0_01dB)
+ *pn_makeup_gain0_01dB = hr.u.c.an_log_value[1];
+
+ return hr.error;
+}
+
+u16 hpi_level_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_LEVEL_RANGE;
+
+ hpi_send_recv(&hm, &hr);
+ if (hr.error) {
+ hr.u.c.an_log_value[0] = 0;
+ hr.u.c.an_log_value[1] = 0;
+ hr.u.c.param1 = 0;
+ }
+ if (min_gain_01dB)
+ *min_gain_01dB = hr.u.c.an_log_value[0];
+ if (max_gain_01dB)
+ *max_gain_01dB = hr.u.c.an_log_value[1];
+ if (step_gain_01dB)
+ *step_gain_01dB = (short)hr.u.c.param1;
+ return hr.error;
+}
+
+u16 hpi_level_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_gain0_01dB[HPI_MAX_CHANNELS]
+ )
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ memcpy(hm.u.c.an_log_value, an_gain0_01dB,
+ sizeof(short) * HPI_MAX_CHANNELS);
+ hm.u.c.attribute = HPI_LEVEL_GAIN;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_level_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_gain0_01dB[HPI_MAX_CHANNELS]
+ )
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_LEVEL_GAIN;
+
+ hpi_send_recv(&hm, &hr);
+
+ memcpy(an_gain0_01dB, hr.u.c.an_log_value,
+ sizeof(short) * HPI_MAX_CHANNELS);
+ return hr.error;
+}
+
+u16 hpi_meter_query_channels(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_meter, u32 *p_channels)
+{
+ return hpi_control_query(ph_subsys, h_meter, HPI_METER_NUM_CHANNELS,
+ 0, 0, p_channels);
+}
+
+u16 hpi_meter_get_peak(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_peakdB[HPI_MAX_CHANNELS]
+ )
+{
+ short i = 0;
+
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.obj_index = hm.obj_index;
+ hm.u.c.attribute = HPI_METER_PEAK;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (!hr.error)
+ memcpy(an_peakdB, hr.u.c.an_log_value,
+ sizeof(short) * HPI_MAX_CHANNELS);
+ else
+ for (i = 0; i < HPI_MAX_CHANNELS; i++)
+ an_peakdB[i] = HPI_METER_MINIMUM;
+ return hr.error;
+}
+
+u16 hpi_meter_get_rms(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_rmsdB[HPI_MAX_CHANNELS]
+ )
+{
+ short i = 0;
+
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_METER_RMS;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (!hr.error)
+ memcpy(an_rmsdB, hr.u.c.an_log_value,
+ sizeof(short) * HPI_MAX_CHANNELS);
+ else
+ for (i = 0; i < HPI_MAX_CHANNELS; i++)
+ an_rmsdB[i] = HPI_METER_MINIMUM;
+
+ return hr.error;
+}
+
+u16 hpi_meter_set_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 attack, u16 decay)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_METER_RMS_BALLISTICS, attack, decay);
+}
+
+u16 hpi_meter_get_rms_ballistics(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pn_attack, u16 *pn_decay)
+{
+ u32 attack;
+ u32 decay;
+ u16 error;
+
+ error = hpi_control_param2_get(ph_subsys, h_control,
+ HPI_METER_RMS_BALLISTICS, &attack, &decay);
+
+ if (pn_attack)
+ *pn_attack = (unsigned short)attack;
+ if (pn_decay)
+ *pn_decay = (unsigned short)decay;
+
+ return error;
+}
+
+u16 hpi_meter_set_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 attack, u16 decay)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_METER_PEAK_BALLISTICS, attack, decay);
+}
+
+u16 hpi_meter_get_peak_ballistics(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pn_attack, u16 *pn_decay)
+{
+ u32 attack;
+ u32 decay;
+ u16 error;
+
+ error = hpi_control_param2_get(ph_subsys, h_control,
+ HPI_METER_PEAK_BALLISTICS, &attack, &decay);
+
+ if (pn_attack)
+ *pn_attack = (short)attack;
+ if (pn_decay)
+ *pn_decay = (short)decay;
+
+ return error;
+}
+
+u16 hpi_microphone_set_phantom_power(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 on_off)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_MICROPHONE_PHANTOM_POWER, (u32)on_off, 0);
+}
+
+u16 hpi_microphone_get_phantom_power(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_on_off)
+{
+ u16 error = 0;
+ u32 on_off = 0;
+ error = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_MICROPHONE_PHANTOM_POWER, &on_off);
+ if (pw_on_off)
+ *pw_on_off = (u16)on_off;
+ return error;
+}
+
+u16 hpi_multiplexer_set_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 source_node_type, u16 source_node_index)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_MULTIPLEXER_SOURCE, source_node_type, source_node_index);
+}
+
+u16 hpi_multiplexer_get_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *source_node_type, u16 *source_node_index)
+{
+ u32 node, index;
+ u16 error = hpi_control_param2_get(ph_subsys, h_control,
+ HPI_MULTIPLEXER_SOURCE, &node,
+ &index);
+ if (source_node_type)
+ *source_node_type = (u16)node;
+ if (source_node_index)
+ *source_node_index = (u16)index;
+ return error;
+}
+
+u16 hpi_multiplexer_query_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 *source_node_type,
+ u16 *source_node_index)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_MULTIPLEXER_QUERYSOURCE;
+ hm.u.c.param1 = index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (source_node_type)
+ *source_node_type = (u16)hr.u.c.param1;
+ if (source_node_index)
+ *source_node_index = (u16)hr.u.c.param2;
+ return hr.error;
+}
+
+u16 hpi_parametricEQ__get_info(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_number_of_bands, u16 *pw_on_off)
+{
+ u32 oB = 0;
+ u32 oO = 0;
+ u16 error = 0;
+
+ error = hpi_control_param2_get(ph_subsys, h_control,
+ HPI_EQUALIZER_NUM_FILTERS, &oO, &oB);
+ if (pw_number_of_bands)
+ *pw_number_of_bands = (u16)oB;
+ if (pw_on_off)
+ *pw_on_off = (u16)oO;
+ return error;
+}
+
+u16 hpi_parametricEQ__set_state(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 on_off)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_EQUALIZER_NUM_FILTERS, on_off, 0);
+}
+
+u16 hpi_parametricEQ__get_band(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 *pn_type, u32 *pfrequency_hz,
+ short *pnQ100, short *pn_gain0_01dB)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_EQUALIZER_FILTER;
+ hm.u.c.param2 = index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (pfrequency_hz)
+ *pfrequency_hz = hr.u.c.param1;
+ if (pn_type)
+ *pn_type = (u16)(hr.u.c.param2 >> 16);
+ if (pnQ100)
+ *pnQ100 = hr.u.c.an_log_value[1];
+ if (pn_gain0_01dB)
+ *pn_gain0_01dB = hr.u.c.an_log_value[0];
+
+ return hr.error;
+}
+
+u16 hpi_parametricEQ__set_band(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, u16 type, u32 frequency_hz, short q100,
+ short gain0_01dB)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+
+ hm.u.c.param1 = frequency_hz;
+ hm.u.c.param2 = (index & 0xFFFFL) + ((u32)type << 16);
+ hm.u.c.an_log_value[0] = gain0_01dB;
+ hm.u.c.an_log_value[1] = q100;
+ hm.u.c.attribute = HPI_EQUALIZER_FILTER;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_parametricEQ__get_coeffs(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 index, short coeffs[5]
+ )
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_EQUALIZER_COEFFICIENTS;
+ hm.u.c.param2 = index;
+
+ hpi_send_recv(&hm, &hr);
+
+ coeffs[0] = (short)hr.u.c.an_log_value[0];
+ coeffs[1] = (short)hr.u.c.an_log_value[1];
+ coeffs[2] = (short)hr.u.c.param1;
+ coeffs[3] = (short)(hr.u.c.param1 >> 16);
+ coeffs[4] = (short)hr.u.c.param2;
+
+ return hr.error;
+}
+
+u16 hpi_sample_clock_query_source(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_clock, const u32 index, u16 *pw_source)
+{
+ u32 qr;
+ u16 err;
+
+ err = hpi_control_query(ph_subsys, h_clock, HPI_SAMPLECLOCK_SOURCE,
+ index, 0, &qr);
+ *pw_source = (u16)qr;
+ return err;
+}
+
+u16 hpi_sample_clock_set_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 source)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_SOURCE, source, 0);
+}
+
+u16 hpi_sample_clock_get_source(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_source)
+{
+ u16 error = 0;
+ u32 source = 0;
+ error = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_SOURCE, &source);
+ if (!error)
+ if (pw_source)
+ *pw_source = (u16)source;
+ return error;
+}
+
+u16 hpi_sample_clock_query_source_index(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_clock, const u32 index, const u32 source,
+ u16 *pw_source_index)
+{
+ u32 qr;
+ u16 err;
+
+ err = hpi_control_query(ph_subsys, h_clock,
+ HPI_SAMPLECLOCK_SOURCE_INDEX, index, source, &qr);
+ *pw_source_index = (u16)qr;
+ return err;
+}
+
+u16 hpi_sample_clock_set_source_index(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 source_index)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_SOURCE_INDEX, source_index, 0);
+}
+
+u16 hpi_sample_clock_get_source_index(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u16 *pw_source_index)
+{
+ u16 error = 0;
+ u32 source_index = 0;
+ error = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_SOURCE_INDEX, &source_index);
+ if (!error)
+ if (pw_source_index)
+ *pw_source_index = (u16)source_index;
+ return error;
+}
+
+u16 hpi_sample_clock_query_local_rate(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_clock, const u32 index, u32 *prate)
+{
+ u16 err;
+ err = hpi_control_query(ph_subsys, h_clock,
+ HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, index, 0, prate);
+
+ return err;
+}
+
+u16 hpi_sample_clock_set_local_rate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 sample_rate)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, sample_rate, 0);
+}
+
+u16 hpi_sample_clock_get_local_rate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *psample_rate)
+{
+ u16 error = 0;
+ u32 sample_rate = 0;
+ error = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, &sample_rate);
+ if (!error)
+ if (psample_rate)
+ *psample_rate = sample_rate;
+ return error;
+}
+
+u16 hpi_sample_clock_get_sample_rate(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *psample_rate)
+{
+ u16 error = 0;
+ u32 sample_rate = 0;
+ error = hpi_control_param1_get(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_SAMPLERATE, &sample_rate);
+ if (!error)
+ if (psample_rate)
+ *psample_rate = sample_rate;
+ return error;
+}
+
+u16 hpi_sample_clock_set_auto(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 enable)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_AUTO, enable, 0);
+}
+
+u16 hpi_sample_clock_get_auto(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *penable)
+{
+ return hpi_control_param1_get(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_AUTO, penable);
+}
+
+u16 hpi_sample_clock_set_local_rate_lock(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 lock)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_LOCAL_LOCK, lock, 0);
+}
+
+u16 hpi_sample_clock_get_local_rate_lock(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *plock)
+{
+ return hpi_control_param1_get(ph_subsys, h_control,
+ HPI_SAMPLECLOCK_LOCAL_LOCK, plock);
+}
+
+u16 hpi_tone_detector_get_frequency(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 index, u32 *frequency)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_TONEDETECTOR_FREQUENCY, index, 0, frequency, NULL);
+}
+
+u16 hpi_tone_detector_get_state(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *state)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_TONEDETECTOR_STATE, 0, 0, (u32 *)state, NULL);
+}
+
+u16 hpi_tone_detector_set_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 enable)
+{
+ return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_ENABLE,
+ (u32)enable, 0);
+}
+
+u16 hpi_tone_detector_get_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *enable)
+{
+ return hpi_control_param_get(ph_subsys, h_control, HPI_GENERIC_ENABLE,
+ 0, 0, (u32 *)enable, NULL);
+}
+
+u16 hpi_tone_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 event_enable)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_GENERIC_EVENT_ENABLE, (u32)event_enable, 0);
+}
+
+u16 hpi_tone_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *event_enable)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_GENERIC_EVENT_ENABLE, 0, 0, (u32 *)event_enable, NULL);
+}
+
+u16 hpi_tone_detector_set_threshold(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, int threshold)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_TONEDETECTOR_THRESHOLD, (u32)threshold, 0);
+}
+
+u16 hpi_tone_detector_get_threshold(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, int *threshold)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_TONEDETECTOR_THRESHOLD, 0, 0, (u32 *)threshold, NULL);
+}
+
+u16 hpi_silence_detector_get_state(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *state)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_SILENCEDETECTOR_STATE, 0, 0, (u32 *)state, NULL);
+}
+
+u16 hpi_silence_detector_set_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 enable)
+{
+ return hpi_control_param_set(ph_subsys, h_control, HPI_GENERIC_ENABLE,
+ (u32)enable, 0);
+}
+
+u16 hpi_silence_detector_get_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *enable)
+{
+ return hpi_control_param_get(ph_subsys, h_control, HPI_GENERIC_ENABLE,
+ 0, 0, (u32 *)enable, NULL);
+}
+
+u16 hpi_silence_detector_set_event_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 event_enable)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_GENERIC_EVENT_ENABLE, (u32)event_enable, 0);
+}
+
+u16 hpi_silence_detector_get_event_enable(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *event_enable)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_GENERIC_EVENT_ENABLE, 0, 0, (u32 *)event_enable, NULL);
+}
+
+u16 hpi_silence_detector_set_delay(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 delay)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_SILENCEDETECTOR_DELAY, (u32)delay, 0);
+}
+
+u16 hpi_silence_detector_get_delay(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *delay)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_SILENCEDETECTOR_DELAY, 0, 0, (u32 *)delay, NULL);
+}
+
+u16 hpi_silence_detector_set_threshold(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, int threshold)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_SILENCEDETECTOR_THRESHOLD, (u32)threshold, 0);
+}
+
+u16 hpi_silence_detector_get_threshold(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, int *threshold)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_SILENCEDETECTOR_THRESHOLD, 0, 0, (u32 *)threshold, NULL);
+}
+
+u16 hpi_tuner_query_band(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, const u32 index, u16 *pw_band)
+{
+ u32 qr;
+ u16 err;
+
+ err = hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_BAND, index, 0,
+ &qr);
+ *pw_band = (u16)qr;
+ return err;
+}
+
+u16 hpi_tuner_set_band(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 band)
+{
+ return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_BAND,
+ band, 0);
+}
+
+u16 hpi_tuner_get_band(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 *pw_band)
+{
+ u32 band = 0;
+ u16 error = 0;
+
+ error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_BAND,
+ &band);
+ if (pw_band)
+ *pw_band = (u16)band;
+ return error;
+}
+
+u16 hpi_tuner_query_frequency(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, const u32 index, const u16 band, u32 *pfreq)
+{
+ return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_FREQ, index,
+ band, pfreq);
+}
+
+u16 hpi_tuner_set_frequency(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 freq_ink_hz)
+{
+ return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_FREQ,
+ freq_ink_hz, 0);
+}
+
+u16 hpi_tuner_get_frequency(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pw_freq_ink_hz)
+{
+ return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_FREQ,
+ pw_freq_ink_hz);
+}
+
+u16 hpi_tuner_query_gain(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, const u32 index, u16 *pw_gain)
+{
+ u32 qr;
+ u16 err;
+
+ err = hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_BAND, index, 0,
+ &qr);
+ *pw_gain = (u16)qr;
+ return err;
+}
+
+u16 hpi_tuner_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short gain)
+{
+ return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_GAIN,
+ gain, 0);
+}
+
+u16 hpi_tuner_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *pn_gain)
+{
+ u32 gain = 0;
+ u16 error = 0;
+
+ error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_GAIN,
+ &gain);
+ if (pn_gain)
+ *pn_gain = (u16)gain;
+ return error;
+}
+
+u16 hpi_tuner_getRF_level(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *pw_level)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_TUNER_LEVEL;
+ hm.u.c.param1 = HPI_TUNER_LEVEL_AVERAGE;
+ hpi_send_recv(&hm, &hr);
+ if (pw_level)
+ *pw_level = (short)hr.u.c.param1;
+ return hr.error;
+}
+
+u16 hpi_tuner_get_rawRF_level(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, short *pw_level)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_TUNER_LEVEL;
+ hm.u.c.param1 = HPI_TUNER_LEVEL_RAW;
+ hpi_send_recv(&hm, &hr);
+ if (pw_level)
+ *pw_level = (short)hr.u.c.param1;
+ return hr.error;
+}
+
+u16 hpi_tuner_query_deemphasis(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, const u32 index, const u16 band, u32 *pdeemphasis)
+{
+ return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_DEEMPHASIS,
+ index, band, pdeemphasis);
+}
+
+u16 hpi_tuner_set_deemphasis(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 deemphasis)
+{
+ return hpi_control_param_set(ph_subsys, h_control,
+ HPI_TUNER_DEEMPHASIS, deemphasis, 0);
+}
+
+u16 hpi_tuner_get_deemphasis(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pdeemphasis)
+{
+ return hpi_control_param1_get(ph_subsys, h_control,
+ HPI_TUNER_DEEMPHASIS, pdeemphasis);
+}
+
+u16 hpi_tuner_query_program(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_tuner, u32 *pbitmap_program)
+{
+ return hpi_control_query(ph_subsys, h_tuner, HPI_TUNER_PROGRAM, 0, 0,
+ pbitmap_program);
+}
+
+u16 hpi_tuner_set_program(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 program)
+{
+ return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_PROGRAM,
+ program, 0);
+}
+
+u16 hpi_tuner_get_program(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 *pprogram)
+{
+ return hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_PROGRAM,
+ pprogram);
+}
+
+u16 hpi_tuner_get_hd_radio_dsp_version(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, char *psz_dsp_version, const u32 string_size)
+{
+ return hpi_control_get_string(ph_subsys, h_control,
+ HPI_TUNER_HDRADIO_DSP_VERSION, psz_dsp_version, string_size);
+}
+
+u16 hpi_tuner_get_hd_radio_sdk_version(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, char *psz_sdk_version, const u32 string_size)
+{
+ return hpi_control_get_string(ph_subsys, h_control,
+ HPI_TUNER_HDRADIO_SDK_VERSION, psz_sdk_version, string_size);
+}
+
+u16 hpi_tuner_get_status(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u16 *pw_status_mask, u16 *pw_status)
+{
+ u32 status = 0;
+ u16 error = 0;
+
+ error = hpi_control_param1_get(ph_subsys, h_control, HPI_TUNER_STATUS,
+ &status);
+ if (pw_status) {
+ if (!error) {
+ *pw_status_mask = (u16)(status >> 16);
+ *pw_status = (u16)(status & 0xFFFF);
+ } else {
+ *pw_status_mask = 0;
+ *pw_status = 0;
+ }
+ }
+ return error;
+}
+
+u16 hpi_tuner_set_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 mode, u32 value)
+{
+ return hpi_control_param_set(ph_subsys, h_control, HPI_TUNER_MODE,
+ mode, value);
+}
+
+u16 hpi_tuner_get_mode(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 mode, u32 *pn_value)
+{
+ return hpi_control_param_get(ph_subsys, h_control, HPI_TUNER_MODE,
+ mode, 0, pn_value, NULL);
+}
+
+u16 hpi_tuner_get_hd_radio_signal_quality(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *pquality)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_TUNER_HDRADIO_SIGNAL_QUALITY, 0, 0, pquality, NULL);
+}
+
+u16 hpi_tuner_getRDS(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ char *p_data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_TUNER_RDS;
+ hpi_send_recv(&hm, &hr);
+ if (p_data) {
+ *(u32 *)&p_data[0] = hr.u.cu.tuner.rds.data[0];
+ *(u32 *)&p_data[4] = hr.u.cu.tuner.rds.data[1];
+ *(u32 *)&p_data[8] = hr.u.cu.tuner.rds.bLER;
+ }
+ return hr.error;
+}
+
+u16 HPI_PAD__get_channel_name(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, char *psz_string, const u32 data_length)
+{
+ return hpi_control_get_string(ph_subsys, h_control,
+ HPI_PAD_CHANNEL_NAME, psz_string, data_length);
+}
+
+u16 HPI_PAD__get_artist(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ char *psz_string, const u32 data_length)
+{
+ return hpi_control_get_string(ph_subsys, h_control, HPI_PAD_ARTIST,
+ psz_string, data_length);
+}
+
+u16 HPI_PAD__get_title(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ char *psz_string, const u32 data_length)
+{
+ return hpi_control_get_string(ph_subsys, h_control, HPI_PAD_TITLE,
+ psz_string, data_length);
+}
+
+u16 HPI_PAD__get_comment(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ char *psz_string, const u32 data_length)
+{
+ return hpi_control_get_string(ph_subsys, h_control, HPI_PAD_COMMENT,
+ psz_string, data_length);
+}
+
+u16 HPI_PAD__get_program_type(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, u32 *ppTY)
+{
+ return hpi_control_param_get(ph_subsys, h_control,
+ HPI_PAD_PROGRAM_TYPE, 0, 0, ppTY, NULL);
+}
+
+u16 HPI_PAD__get_rdsPI(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ u32 *ppI)
+{
+ return hpi_control_param_get(ph_subsys, h_control, HPI_PAD_PROGRAM_ID,
+ 0, 0, ppI, NULL);
+}
+
+u16 hpi_volume_query_channels(const struct hpi_hsubsys *ph_subsys,
+ const u32 h_volume, u32 *p_channels)
+{
+ return hpi_control_query(ph_subsys, h_volume, HPI_VOLUME_NUM_CHANNELS,
+ 0, 0, p_channels);
+}
+
+u16 hpi_volume_set_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_log_gain[HPI_MAX_CHANNELS]
+ )
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ memcpy(hm.u.c.an_log_value, an_log_gain,
+ sizeof(short) * HPI_MAX_CHANNELS);
+ hm.u.c.attribute = HPI_VOLUME_GAIN;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_volume_get_gain(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_log_gain[HPI_MAX_CHANNELS]
+ )
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_VOLUME_GAIN;
+
+ hpi_send_recv(&hm, &hr);
+
+ memcpy(an_log_gain, hr.u.c.an_log_value,
+ sizeof(short) * HPI_MAX_CHANNELS);
+ return hr.error;
+}
+
+u16 hpi_volume_query_range(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_VOLUME_RANGE;
+
+ hpi_send_recv(&hm, &hr);
+ if (hr.error) {
+ hr.u.c.an_log_value[0] = 0;
+ hr.u.c.an_log_value[1] = 0;
+ hr.u.c.param1 = 0;
+ }
+ if (min_gain_01dB)
+ *min_gain_01dB = hr.u.c.an_log_value[0];
+ if (max_gain_01dB)
+ *max_gain_01dB = hr.u.c.an_log_value[1];
+ if (step_gain_01dB)
+ *step_gain_01dB = (short)hr.u.c.param1;
+ return hr.error;
+}
+
+u16 hpi_volume_auto_fade_profile(const struct hpi_hsubsys *ph_subsys,
+ u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS],
+ u32 duration_ms, u16 profile)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+
+ memcpy(hm.u.c.an_log_value, an_stop_gain0_01dB,
+ sizeof(short) * HPI_MAX_CHANNELS);
+
+ hm.u.c.attribute = HPI_VOLUME_AUTOFADE;
+ hm.u.c.param1 = duration_ms;
+ hm.u.c.param2 = profile;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_volume_auto_fade(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms)
+{
+ return hpi_volume_auto_fade_profile(ph_subsys, h_control,
+ an_stop_gain0_01dB, duration_ms, HPI_VOLUME_AUTOFADE_LOG);
+}
+
+u16 hpi_vox_set_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short an_gain0_01dB)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_VOX_THRESHOLD;
+
+ hm.u.c.an_log_value[0] = an_gain0_01dB;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_vox_get_threshold(const struct hpi_hsubsys *ph_subsys, u32 h_control,
+ short *an_gain0_01dB)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(h_control, &hm.adapter_index, &hm.obj_index);
+ hm.u.c.attribute = HPI_VOX_THRESHOLD;
+
+ hpi_send_recv(&hm, &hr);
+
+ *an_gain0_01dB = hr.u.c.an_log_value[0];
+
+ return hr.error;
+}
+
+static size_t strv_packet_size = MIN_STRV_PACKET_SIZE;
+
+static size_t entity_type_to_size[LAST_ENTITY_TYPE] = {
+ 0,
+ sizeof(struct hpi_entity),
+ sizeof(void *),
+
+ sizeof(int),
+ sizeof(float),
+ sizeof(double),
+
+ sizeof(char),
+ sizeof(char),
+
+ 4 * sizeof(char),
+ 16 * sizeof(char),
+ 6 * sizeof(char),
+};
+
+inline size_t hpi_entity_size(struct hpi_entity *entity_ptr)
+{
+ return entity_ptr->header.size;
+}
+
+inline size_t hpi_entity_header_size(struct hpi_entity *entity_ptr)
+{
+ return sizeof(entity_ptr->header);
+}
+
+inline size_t hpi_entity_value_size(struct hpi_entity *entity_ptr)
+{
+ return hpi_entity_size(entity_ptr) -
+ hpi_entity_header_size(entity_ptr);
+}
+
+inline size_t hpi_entity_item_count(struct hpi_entity *entity_ptr)
+{
+ return hpi_entity_value_size(entity_ptr) /
+ entity_type_to_size[entity_ptr->header.type];
+}
+
+inline struct hpi_entity *hpi_entity_ptr_to_next(struct hpi_entity
+ *entity_ptr)
+{
+ return (void *)(((uint8_t *) entity_ptr) +
+ hpi_entity_size(entity_ptr));
+}
+
+inline u16 hpi_entity_check_type(const enum e_entity_type t)
+{
+ if (t >= 0 && t < STR_TYPE_FIELD_MAX)
+ return 0;
+ return HPI_ERROR_ENTITY_TYPE_INVALID;
+}
+
+inline u16 hpi_entity_check_role(const enum e_entity_role r)
+{
+ if (r >= 0 && r < STR_ROLE_FIELD_MAX)
+ return 0;
+ return HPI_ERROR_ENTITY_ROLE_INVALID;
+}
+
+static u16 hpi_entity_get_next(struct hpi_entity *entity, int recursive_flag,
+ void *guard_p, struct hpi_entity **next)
+{
+ HPI_DEBUG_ASSERT(entity != NULL);
+ HPI_DEBUG_ASSERT(next != NULL);
+ HPI_DEBUG_ASSERT(hpi_entity_size(entity) != 0);
+
+ if (guard_p <= (void *)entity) {
+ *next = NULL;
+ return 0;
+ }
+
+ if (recursive_flag && entity->header.type == entity_type_sequence)
+ *next = (struct hpi_entity *)entity->value;
+ else
+ *next = (struct hpi_entity *)hpi_entity_ptr_to_next(entity);
+
+ if (guard_p <= (void *)*next) {
+ *next = NULL;
+ return 0;
+ }
+
+ HPI_DEBUG_ASSERT(guard_p >= (void *)hpi_entity_ptr_to_next(*next));
+ return 0;
+}
+
+u16 hpi_entity_find_next(struct hpi_entity *container_entity,
+ enum e_entity_type type, enum e_entity_role role, int recursive_flag,
+ struct hpi_entity **current_match)
+{
+ struct hpi_entity *tmp = NULL;
+ void *guard_p = NULL;
+
+ HPI_DEBUG_ASSERT(container_entity != NULL);
+ guard_p = hpi_entity_ptr_to_next(container_entity);
+
+ if (*current_match != NULL)
+ hpi_entity_get_next(*current_match, recursive_flag, guard_p,
+ &tmp);
+ else
+ hpi_entity_get_next(container_entity, 1, guard_p, &tmp);
+
+ while (tmp) {
+ u16 err;
+
+ HPI_DEBUG_ASSERT((void *)tmp >= (void *)container_entity);
+
+ if ((!type || tmp->header.type == type) && (!role
+ || tmp->header.role == role)) {
+ *current_match = tmp;
+ return 0;
+ }
+
+ err = hpi_entity_get_next(tmp, recursive_flag, guard_p,
+ current_match);
+ if (err)
+ return err;
+
+ tmp = *current_match;
+ }
+
+ *current_match = NULL;
+ return 0;
+}
+
+void hpi_entity_free(struct hpi_entity *entity)
+{
+ if (entity != NULL)
+ kfree(entity);
+}
+
+static u16 hpi_entity_alloc_and_copy(struct hpi_entity *src,
+ struct hpi_entity **dst)
+{
+ size_t buf_size;
+ HPI_DEBUG_ASSERT(dst != NULL);
+ HPI_DEBUG_ASSERT(src != NULL);
+
+ buf_size = hpi_entity_size(src);
+ *dst = kmalloc(buf_size, GFP_KERNEL);
+ if (dst == NULL)
+ return HPI_ERROR_MEMORY_ALLOC;
+ memcpy(*dst, src, buf_size);
+ return 0;
+}
+
+u16 hpi_universal_info(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ struct hpi_entity **info)
+{
+ struct hpi_msg_strv hm;
+ struct hpi_res_strv *phr;
+ u16 hpi_err;
+ int remaining_attempts = 2;
+ size_t resp_packet_size = 1024;
+
+ *info = NULL;
+
+ while (remaining_attempts--) {
+ phr = kmalloc(resp_packet_size, GFP_KERNEL);
+ HPI_DEBUG_ASSERT(phr != NULL);
+
+ hpi_init_message_responseV1(&hm.h, (u16)sizeof(hm), &phr->h,
+ (u16)resp_packet_size, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_INFO);
+ u32TOINDEXES(hC, &hm.h.adapter_index, &hm.h.obj_index);
+
+ hm.strv.header.size = sizeof(hm.strv);
+ phr->strv.header.size = resp_packet_size - sizeof(phr->h);
+
+ hpi_send_recv((struct hpi_message *)&hm.h,
+ (struct hpi_response *)&phr->h);
+ if (phr->h.error == HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL) {
+
+ HPI_DEBUG_ASSERT(phr->h.specific_error >
+ MIN_STRV_PACKET_SIZE
+ && phr->h.specific_error < 1500);
+ resp_packet_size = phr->h.specific_error;
+ } else {
+ remaining_attempts = 0;
+ if (!phr->h.error)
+ hpi_entity_alloc_and_copy(&phr->strv, info);
+ }
+
+ hpi_err = phr->h.error;
+ kfree(phr);
+ }
+
+ return hpi_err;
+}
+
+u16 hpi_universal_get(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ struct hpi_entity **value)
+{
+ struct hpi_msg_strv hm;
+ struct hpi_res_strv *phr;
+ u16 hpi_err;
+ int remaining_attempts = 2;
+
+ *value = NULL;
+
+ while (remaining_attempts--) {
+ phr = kmalloc(strv_packet_size, GFP_KERNEL);
+ if (!phr)
+ return HPI_ERROR_MEMORY_ALLOC;
+
+ hpi_init_message_responseV1(&hm.h, (u16)sizeof(hm), &phr->h,
+ (u16)strv_packet_size, HPI_OBJ_CONTROL,
+ HPI_CONTROL_GET_STATE);
+ u32TOINDEXES(hC, &hm.h.adapter_index, &hm.h.obj_index);
+
+ hm.strv.header.size = sizeof(hm.strv);
+ phr->strv.header.size = strv_packet_size - sizeof(phr->h);
+
+ hpi_send_recv((struct hpi_message *)&hm.h,
+ (struct hpi_response *)&phr->h);
+ if (phr->h.error == HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL) {
+
+ HPI_DEBUG_ASSERT(phr->h.specific_error >
+ MIN_STRV_PACKET_SIZE
+ && phr->h.specific_error < 1000);
+ strv_packet_size = phr->h.specific_error;
+ } else {
+ remaining_attempts = 0;
+ if (!phr->h.error)
+ hpi_entity_alloc_and_copy(&phr->strv, value);
+ }
+
+ hpi_err = phr->h.error;
+ kfree(phr);
+ }
+
+ return hpi_err;
+}
+
+u16 hpi_universal_set(const struct hpi_hsubsys *ph_subsys, u32 hC,
+ struct hpi_entity *value)
+{
+ struct hpi_msg_strv *phm;
+ struct hpi_res_strv hr;
+
+ phm = kmalloc(sizeof(phm->h) + value->header.size, GFP_KERNEL);
+ HPI_DEBUG_ASSERT(phm != NULL);
+
+ hpi_init_message_responseV1(&phm->h,
+ sizeof(phm->h) + value->header.size, &hr.h, sizeof(hr),
+ HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE);
+ u32TOINDEXES(hC, &phm->h.adapter_index, &phm->h.obj_index);
+ hr.strv.header.size = sizeof(hr.strv);
+
+ memcpy(&phm->strv, value, value->header.size);
+ hpi_send_recv((struct hpi_message *)&phm->h,
+ (struct hpi_response *)&hr.h);
+
+ return hr.h.error;
+}
+
+u16 hpi_entity_alloc_and_pack(const enum e_entity_type type,
+ const size_t item_count, const enum e_entity_role role, void *value,
+ struct hpi_entity **entity)
+{
+ size_t bytes_to_copy, total_size;
+ u16 hE = 0;
+ *entity = NULL;
+
+ hE = hpi_entity_check_type(type);
+ if (hE)
+ return hE;
+
+ HPI_DEBUG_ASSERT(role > entity_role_null && type < LAST_ENTITY_ROLE);
+
+ bytes_to_copy = entity_type_to_size[type] * item_count;
+ total_size = hpi_entity_header_size(*entity) + bytes_to_copy;
+
+ HPI_DEBUG_ASSERT(total_size >= hpi_entity_header_size(*entity)
+ && total_size < STR_SIZE_FIELD_MAX);
+
+ *entity = kmalloc(total_size, GFP_KERNEL);
+ if (*entity == NULL)
+ return HPI_ERROR_MEMORY_ALLOC;
+ memcpy((*entity)->value, value, bytes_to_copy);
+ (*entity)->header.size =
+ hpi_entity_header_size(*entity) + bytes_to_copy;
+ (*entity)->header.type = type;
+ (*entity)->header.role = role;
+ return 0;
+}
+
+u16 hpi_entity_copy_value_from(struct hpi_entity *entity,
+ enum e_entity_type type, size_t item_count, void *value_dst_p)
+{
+ size_t bytes_to_copy;
+
+ if (entity->header.type != type)
+ return HPI_ERROR_ENTITY_TYPE_MISMATCH;
+
+ if (hpi_entity_item_count(entity) != item_count)
+ return HPI_ERROR_ENTITY_ITEM_COUNT;
+
+ bytes_to_copy = entity_type_to_size[type] * item_count;
+ memcpy(value_dst_p, entity->value, bytes_to_copy);
+ return 0;
+}
+
+u16 hpi_entity_unpack(struct hpi_entity *entity, enum e_entity_type *type,
+ size_t *item_count, enum e_entity_role *role, void **value)
+{
+ u16 err = 0;
+ HPI_DEBUG_ASSERT(entity != NULL);
+
+ if (type)
+ *type = entity->header.type;
+
+ if (role)
+ *role = entity->header.role;
+
+ if (value)
+ *value = entity->value;
+
+ if (item_count != NULL) {
+ if (entity->header.type == entity_type_sequence) {
+ void *guard_p = hpi_entity_ptr_to_next(entity);
+ struct hpi_entity *next = NULL;
+ void *contents = entity->value;
+
+ *item_count = 0;
+ while (contents < guard_p) {
+ (*item_count)++;
+ err = hpi_entity_get_next(contents, 0,
+ guard_p, &next);
+ if (next == NULL || err)
+ break;
+ contents = next;
+ }
+ } else {
+ *item_count = hpi_entity_item_count(entity);
+ }
+ }
+ return err;
+}
+
+u16 hpi_gpio_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_gpio, u16 *pw_number_input_bits, u16 *pw_number_output_bits)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_OPEN);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0) {
+ *ph_gpio =
+ hpi_indexes_to_handle(HPI_OBJ_GPIO, adapter_index, 0);
+ if (pw_number_input_bits)
+ *pw_number_input_bits = hr.u.l.number_input_bits;
+ if (pw_number_output_bits)
+ *pw_number_output_bits = hr.u.l.number_output_bits;
+ } else
+ *ph_gpio = 0;
+ return hr.error;
+}
+
+u16 hpi_gpio_read_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
+ u16 bit_index, u16 *pw_bit_data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_READ_BIT);
+ u32TOINDEX(h_gpio, &hm.adapter_index);
+ hm.u.l.bit_index = bit_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ *pw_bit_data = hr.u.l.bit_data[0];
+ return hr.error;
+}
+
+u16 hpi_gpio_read_all_bits(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
+ u16 aw_all_bit_data[4]
+ )
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_READ_ALL);
+ u32TOINDEX(h_gpio, &hm.adapter_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ if (aw_all_bit_data) {
+ aw_all_bit_data[0] = hr.u.l.bit_data[0];
+ aw_all_bit_data[1] = hr.u.l.bit_data[1];
+ aw_all_bit_data[2] = hr.u.l.bit_data[2];
+ aw_all_bit_data[3] = hr.u.l.bit_data[3];
+ }
+ return hr.error;
+}
+
+u16 hpi_gpio_write_bit(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
+ u16 bit_index, u16 bit_data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO, HPI_GPIO_WRITE_BIT);
+ u32TOINDEX(h_gpio, &hm.adapter_index);
+ hm.u.l.bit_index = bit_index;
+ hm.u.l.bit_data = bit_data;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_gpio_write_status(const struct hpi_hsubsys *ph_subsys, u32 h_gpio,
+ u16 aw_all_bit_data[4]
+ )
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_GPIO,
+ HPI_GPIO_WRITE_STATUS);
+ u32TOINDEX(h_gpio, &hm.adapter_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ if (aw_all_bit_data) {
+ aw_all_bit_data[0] = hr.u.l.bit_data[0];
+ aw_all_bit_data[1] = hr.u.l.bit_data[1];
+ aw_all_bit_data[2] = hr.u.l.bit_data[2];
+ aw_all_bit_data[3] = hr.u.l.bit_data[3];
+ }
+ return hr.error;
+}
+
+u16 hpi_async_event_open(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u32 *ph_async)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT,
+ HPI_ASYNCEVENT_OPEN);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0)
+
+ *ph_async =
+ hpi_indexes_to_handle(HPI_OBJ_ASYNCEVENT,
+ adapter_index, 0);
+ else
+ *ph_async = 0;
+ return hr.error;
+
+}
+
+u16 hpi_async_event_close(const struct hpi_hsubsys *ph_subsys, u32 h_async)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT,
+ HPI_ASYNCEVENT_OPEN);
+ u32TOINDEX(h_async, &hm.adapter_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_async_event_wait(const struct hpi_hsubsys *ph_subsys, u32 h_async,
+ u16 maximum_events, struct hpi_async_event *p_events,
+ u16 *pw_number_returned)
+{
+ return 0;
+}
+
+u16 hpi_async_event_get_count(const struct hpi_hsubsys *ph_subsys,
+ u32 h_async, u16 *pw_count)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT,
+ HPI_ASYNCEVENT_GETCOUNT);
+ u32TOINDEX(h_async, &hm.adapter_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0)
+ if (pw_count)
+ *pw_count = hr.u.as.u.count.count;
+
+ return hr.error;
+}
+
+u16 hpi_async_event_get(const struct hpi_hsubsys *ph_subsys, u32 h_async,
+ u16 maximum_events, struct hpi_async_event *p_events,
+ u16 *pw_number_returned)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ASYNCEVENT,
+ HPI_ASYNCEVENT_GET);
+ u32TOINDEX(h_async, &hm.adapter_index);
+
+ hpi_send_recv(&hm, &hr);
+ if (!hr.error) {
+ memcpy(p_events, &hr.u.as.u.event,
+ sizeof(struct hpi_async_event));
+ *pw_number_returned = 1;
+ }
+
+ return hr.error;
+}
+
+u16 hpi_nv_memory_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_nv_memory, u16 *pw_size_in_bytes)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY,
+ HPI_NVMEMORY_OPEN);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0) {
+ *ph_nv_memory =
+ hpi_indexes_to_handle(HPI_OBJ_NVMEMORY, adapter_index,
+ 0);
+ if (pw_size_in_bytes)
+ *pw_size_in_bytes = hr.u.n.size_in_bytes;
+ } else
+ *ph_nv_memory = 0;
+ return hr.error;
+}
+
+u16 hpi_nv_memory_read_byte(const struct hpi_hsubsys *ph_subsys,
+ u32 h_nv_memory, u16 index, u16 *pw_data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY,
+ HPI_NVMEMORY_READ_BYTE);
+ u32TOINDEX(h_nv_memory, &hm.adapter_index);
+ hm.u.n.address = index;
+
+ hpi_send_recv(&hm, &hr);
+
+ *pw_data = hr.u.n.data;
+ return hr.error;
+}
+
+u16 hpi_nv_memory_write_byte(const struct hpi_hsubsys *ph_subsys,
+ u32 h_nv_memory, u16 index, u16 data)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_NVMEMORY,
+ HPI_NVMEMORY_WRITE_BYTE);
+ u32TOINDEX(h_nv_memory, &hm.adapter_index);
+ hm.u.n.address = index;
+ hm.u.n.data = data;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_profile_open_all(const struct hpi_hsubsys *ph_subsys,
+ u16 adapter_index, u16 profile_index, u32 *ph_profile,
+ u16 *pw_max_profiles)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
+ HPI_PROFILE_OPEN_ALL);
+ hm.adapter_index = adapter_index;
+ hm.obj_index = profile_index;
+ hpi_send_recv(&hm, &hr);
+
+ *pw_max_profiles = hr.u.p.u.o.max_profiles;
+ if (hr.error == 0)
+ *ph_profile =
+ hpi_indexes_to_handle(HPI_OBJ_PROFILE, adapter_index,
+ profile_index);
+ else
+ *ph_profile = 0;
+ return hr.error;
+}
+
+u16 hpi_profile_get(const struct hpi_hsubsys *ph_subsys, u32 h_profile,
+ u16 bin_index, u16 *pw_seconds, u32 *pmicro_seconds, u32 *pcall_count,
+ u32 *pmax_micro_seconds, u32 *pmin_micro_seconds)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE, HPI_PROFILE_GET);
+ u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
+ hm.u.p.bin_index = bin_index;
+ hpi_send_recv(&hm, &hr);
+ if (pw_seconds)
+ *pw_seconds = hr.u.p.u.t.seconds;
+ if (pmicro_seconds)
+ *pmicro_seconds = hr.u.p.u.t.micro_seconds;
+ if (pcall_count)
+ *pcall_count = hr.u.p.u.t.call_count;
+ if (pmax_micro_seconds)
+ *pmax_micro_seconds = hr.u.p.u.t.max_micro_seconds;
+ if (pmin_micro_seconds)
+ *pmin_micro_seconds = hr.u.p.u.t.min_micro_seconds;
+ return hr.error;
+}
+
+u16 hpi_profile_get_utilization(const struct hpi_hsubsys *ph_subsys,
+ u32 h_profile, u32 *putilization)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
+ HPI_PROFILE_GET_UTILIZATION);
+ u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+ if (hr.error) {
+ if (putilization)
+ *putilization = 0;
+ } else {
+ if (putilization)
+ *putilization = hr.u.p.u.t.call_count;
+ }
+ return hr.error;
+}
+
+u16 hpi_profile_get_name(const struct hpi_hsubsys *ph_subsys, u32 h_profile,
+ u16 bin_index, char *sz_name, u16 name_length)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
+ HPI_PROFILE_GET_NAME);
+ u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
+ hm.u.p.bin_index = bin_index;
+ hpi_send_recv(&hm, &hr);
+ if (hr.error) {
+ if (sz_name)
+ strcpy(sz_name, "??");
+ } else {
+ if (sz_name)
+ memcpy(sz_name, (char *)hr.u.p.u.n.sz_name,
+ name_length);
+ }
+ return hr.error;
+}
+
+u16 hpi_profile_start_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
+ HPI_PROFILE_START_ALL);
+ u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_profile_stop_all(const struct hpi_hsubsys *ph_subsys, u32 h_profile)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_PROFILE,
+ HPI_PROFILE_STOP_ALL);
+ u32TOINDEXES(h_profile, &hm.adapter_index, &hm.obj_index);
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_watchdog_open(const struct hpi_hsubsys *ph_subsys, u16 adapter_index,
+ u32 *ph_watchdog)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_WATCHDOG,
+ HPI_WATCHDOG_OPEN);
+ hm.adapter_index = adapter_index;
+
+ hpi_send_recv(&hm, &hr);
+
+ if (hr.error == 0)
+ *ph_watchdog =
+ hpi_indexes_to_handle(HPI_OBJ_WATCHDOG, adapter_index,
+ 0);
+ else
+ *ph_watchdog = 0;
+ return hr.error;
+}
+
+u16 hpi_watchdog_set_time(const struct hpi_hsubsys *ph_subsys, u32 h_watchdog,
+ u32 time_millisec)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_WATCHDOG,
+ HPI_WATCHDOG_SET_TIME);
+ u32TOINDEX(h_watchdog, &hm.adapter_index);
+ hm.u.w.time_ms = time_millisec;
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
+
+u16 hpi_watchdog_ping(const struct hpi_hsubsys *ph_subsys, u32 h_watchdog)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_WATCHDOG,
+ HPI_WATCHDOG_PING);
+ u32TOINDEX(h_watchdog, &hm.adapter_index);
+
+ hpi_send_recv(&hm, &hr);
+
+ return hr.error;
+}
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
new file mode 100644
index 000000000000..8e1d099ed7e4
--- /dev/null
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -0,0 +1,130 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Hardware Programming Interface (HPI) Utility functions.
+
+ (C) Copyright AudioScience Inc. 2007
+*******************************************************************************/
+
+#include "hpi_internal.h"
+#include "hpimsginit.h"
+
+/* The actual message size for each object type */
+static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
+/* The actual response size for each object type */
+static u16 res_size[HPI_OBJ_MAXINDEX + 1] = HPI_RESPONSE_SIZE_BY_OBJECT;
+/* Flag to enable alternate message type for SSX2 bypass. */
+static u16 gwSSX2_bypass;
+
+/** \internal
+ * Used by ASIO driver to disable SSX2 for a single process
+ * \param phSubSys Pointer to HPI subsystem handle.
+ * \param wBypass New bypass setting 0 = off, nonzero = on
+ * \return Previous bypass setting.
+ */
+u16 hpi_subsys_ssx2_bypass(const struct hpi_hsubsys *ph_subsys, u16 bypass)
+{
+ u16 old_value = gwSSX2_bypass;
+
+ gwSSX2_bypass = bypass;
+
+ return old_value;
+}
+
+/** \internal
+ * initialize the HPI message structure
+ */
+static void hpi_init_message(struct hpi_message *phm, u16 object,
+ u16 function)
+{
+ memset(phm, 0, sizeof(*phm));
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ phm->size = msg_size[object];
+ else
+ phm->size = sizeof(*phm);
+
+ if (gwSSX2_bypass)
+ phm->type = HPI_TYPE_SSX2BYPASS_MESSAGE;
+ else
+ phm->type = HPI_TYPE_MESSAGE;
+ phm->object = object;
+ phm->function = function;
+ phm->version = 0;
+ /* Expect adapter index to be set by caller */
+}
+
+/** \internal
+ * initialize the HPI response structure
+ */
+void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
+ u16 error)
+{
+ memset(phr, 0, sizeof(*phr));
+ phr->type = HPI_TYPE_RESPONSE;
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ phr->size = res_size[object];
+ else
+ phr->size = sizeof(*phr);
+ phr->object = object;
+ phr->function = function;
+ phr->error = error;
+ phr->specific_error = 0;
+ phr->version = 0;
+}
+
+void hpi_init_message_response(struct hpi_message *phm,
+ struct hpi_response *phr, u16 object, u16 function)
+{
+ hpi_init_message(phm, object, function);
+ /* default error return if the response is
+ not filled in by the callee */
+ hpi_init_response(phr, object, function,
+ HPI_ERROR_PROCESSING_MESSAGE);
+}
+
+static void hpi_init_messageV1(struct hpi_message_header *phm, u16 size,
+ u16 object, u16 function)
+{
+ memset(phm, 0, sizeof(*phm));
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ phm->size = size;
+ phm->type = HPI_TYPE_MESSAGE;
+ phm->object = object;
+ phm->function = function;
+ phm->version = 1;
+ /* Expect adapter index to be set by caller */
+ }
+}
+
+void hpi_init_responseV1(struct hpi_response_header *phr, u16 size,
+ u16 object, u16 function)
+{
+ memset(phr, 0, sizeof(*phr));
+ phr->size = size;
+ phr->version = 1;
+ phr->type = HPI_TYPE_RESPONSE;
+ phr->error = HPI_ERROR_PROCESSING_MESSAGE;
+}
+
+void hpi_init_message_responseV1(struct hpi_message_header *phm, u16 msg_size,
+ struct hpi_response_header *phr, u16 res_size, u16 object,
+ u16 function)
+{
+ hpi_init_messageV1(phm, msg_size, object, function);
+ hpi_init_responseV1(phr, res_size, object, function);
+}
diff --git a/sound/pci/asihpi/hpimsginit.h b/sound/pci/asihpi/hpimsginit.h
new file mode 100644
index 000000000000..864ad020c9b3
--- /dev/null
+++ b/sound/pci/asihpi/hpimsginit.h
@@ -0,0 +1,40 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Hardware Programming Interface (HPI) Utility functions
+
+ (C) Copyright AudioScience Inc. 2007
+*******************************************************************************/
+/* Initialise response headers, or msg/response pairs.
+Note that it is valid to just init a response e.g. when a lower level is preparing
+a response to a message.
+However, when sending a message, a matching response buffer always must be prepared
+*/
+
+void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
+ u16 error);
+
+void hpi_init_message_response(struct hpi_message *phm,
+ struct hpi_response *phr, u16 object, u16 function);
+
+void hpi_init_responseV1(struct hpi_response_header *phr, u16 size,
+ u16 object, u16 function);
+
+void hpi_init_message_responseV1(struct hpi_message_header *phm, u16 msg_size,
+ struct hpi_response_header *phr, u16 res_size, u16 object,
+ u16 function);
diff --git a/sound/pci/asihpi/hpimsgx.c b/sound/pci/asihpi/hpimsgx.c
new file mode 100644
index 000000000000..2ee90dc3d897
--- /dev/null
+++ b/sound/pci/asihpi/hpimsgx.c
@@ -0,0 +1,907 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Extended Message Function With Response Cacheing
+
+(C) Copyright AudioScience Inc. 2002
+*****************************************************************************/
+#define SOURCEFILE_NAME "hpimsgx.c"
+#include "hpi_internal.h"
+#include "hpimsginit.h"
+#include "hpimsgx.h"
+#include "hpidebug.h"
+
+static struct pci_device_id asihpi_pci_tbl[] = {
+#include "hpipcida.h"
+};
+
+static struct hpios_spinlock msgx_lock;
+
+static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
+
+static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
+ *pci_info)
+{
+
+ int i;
+
+ for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
+ if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
+ && asihpi_pci_tbl[i].vendor != pci_info->vendor_id)
+ continue;
+ if (asihpi_pci_tbl[i].device != PCI_ANY_ID
+ && asihpi_pci_tbl[i].device != pci_info->device_id)
+ continue;
+ if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
+ && asihpi_pci_tbl[i].subvendor !=
+ pci_info->subsys_vendor_id)
+ continue;
+ if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
+ && asihpi_pci_tbl[i].subdevice !=
+ pci_info->subsys_device_id)
+ continue;
+
+ HPI_DEBUG_LOG(DEBUG, " %x,%lu\n", i,
+ asihpi_pci_tbl[i].driver_data);
+ return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
+ }
+
+ return NULL;
+}
+
+static inline void hw_entry_point(struct hpi_message *phm,
+ struct hpi_response *phr)
+{
+
+ hpi_handler_func *ep;
+
+ if (phm->adapter_index < HPI_MAX_ADAPTERS) {
+ ep = (hpi_handler_func *) hpi_entry_points[phm->
+ adapter_index];
+ if (ep) {
+ HPI_DEBUG_MESSAGE(DEBUG, phm);
+ ep(phm, phr);
+ HPI_DEBUG_RESPONSE(phr);
+ return;
+ }
+ }
+ hpi_init_response(phr, phm->object, phm->function,
+ HPI_ERROR_PROCESSING_MESSAGE);
+}
+
+static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
+static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
+
+static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
+static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
+
+static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner);
+static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner);
+static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner);
+static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner);
+
+static void HPIMSGX__reset(u16 adapter_index);
+static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
+static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
+
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(push, 1)
+#endif
+
+struct hpi_subsys_response {
+ struct hpi_response_header h;
+ struct hpi_subsys_res s;
+};
+
+struct hpi_adapter_response {
+ struct hpi_response_header h;
+ struct hpi_adapter_res a;
+};
+
+struct hpi_mixer_response {
+ struct hpi_response_header h;
+ struct hpi_mixer_res m;
+};
+
+struct hpi_stream_response {
+ struct hpi_response_header h;
+ struct hpi_stream_res d;
+};
+
+struct adapter_info {
+ u16 type;
+ u16 num_instreams;
+ u16 num_outstreams;
+};
+
+struct asi_open_state {
+ int open_flag;
+ void *h_owner;
+};
+
+#ifndef DISABLE_PRAGMA_PACK1
+#pragma pack(pop)
+#endif
+
+/* Globals */
+static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
+
+static struct hpi_stream_response
+ rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
+
+static struct hpi_stream_response
+ rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
+
+static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
+
+static struct hpi_subsys_response gRESP_HPI_SUBSYS_FIND_ADAPTERS;
+
+static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
+
+/* use these to keep track of opens from user mode apps/DLLs */
+static struct asi_open_state
+ outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
+
+static struct asi_open_state
+ instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
+
+static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner)
+{
+ switch (phm->function) {
+ case HPI_SUBSYS_GET_VERSION:
+ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_GET_VERSION, 0);
+ phr->u.s.version = HPI_VER >> 8; /* return major.minor */
+ phr->u.s.data = HPI_VER; /* return major.minor.release */
+ break;
+ case HPI_SUBSYS_OPEN:
+ /*do not propagate the message down the chain */
+ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
+ break;
+ case HPI_SUBSYS_CLOSE:
+ /*do not propagate the message down the chain */
+ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
+ 0);
+ HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
+ break;
+ case HPI_SUBSYS_DRIVER_LOAD:
+ /* Initialize this module's internal state */
+ hpios_msgxlock_init(&msgx_lock);
+ memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
+ hpios_locked_mem_init();
+ /* Init subsys_findadapters response to no-adapters */
+ HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
+ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_DRIVER_LOAD, 0);
+ /* individual HPIs dont implement driver load */
+ HPI_COMMON(phm, phr);
+ break;
+ case HPI_SUBSYS_DRIVER_UNLOAD:
+ HPI_COMMON(phm, phr);
+ HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
+ hpios_locked_mem_free_all();
+ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_DRIVER_UNLOAD, 0);
+ return;
+
+ case HPI_SUBSYS_GET_INFO:
+ HPI_COMMON(phm, phr);
+ break;
+
+ case HPI_SUBSYS_FIND_ADAPTERS:
+ memcpy(phr, &gRESP_HPI_SUBSYS_FIND_ADAPTERS,
+ sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
+ break;
+ case HPI_SUBSYS_GET_NUM_ADAPTERS:
+ memcpy(phr, &gRESP_HPI_SUBSYS_FIND_ADAPTERS,
+ sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
+ phr->function = HPI_SUBSYS_GET_NUM_ADAPTERS;
+ break;
+ case HPI_SUBSYS_GET_ADAPTER:
+ {
+ int count = phm->adapter_index;
+ int index = 0;
+ hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_GET_ADAPTER, 0);
+
+ /* This is complicated by the fact that we want to
+ * "skip" 0's in the adapter list.
+ * First, make sure we are pointing to a
+ * non-zero adapter type.
+ */
+ while (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
+ s.aw_adapter_list[index] == 0) {
+ index++;
+ if (index >= HPI_MAX_ADAPTERS)
+ break;
+ }
+ while (count) {
+ /* move on to the next adapter */
+ index++;
+ if (index >= HPI_MAX_ADAPTERS)
+ break;
+ while (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
+ s.aw_adapter_list[index] == 0) {
+ index++;
+ if (index >= HPI_MAX_ADAPTERS)
+ break;
+ }
+ count--;
+ }
+
+ if (index < HPI_MAX_ADAPTERS) {
+ phr->u.s.adapter_index = (u16)index;
+ phr->u.s.aw_adapter_list[0] =
+ gRESP_HPI_SUBSYS_FIND_ADAPTERS.
+ s.aw_adapter_list[index];
+ } else {
+ phr->u.s.adapter_index = 0;
+ phr->u.s.aw_adapter_list[0] = 0;
+ phr->error = HPI_ERROR_BAD_ADAPTER_NUMBER;
+ }
+ break;
+ }
+ case HPI_SUBSYS_CREATE_ADAPTER:
+ HPIMSGX__init(phm, phr);
+ break;
+ case HPI_SUBSYS_DELETE_ADAPTER:
+ HPIMSGX__cleanup(phm->adapter_index, h_owner);
+ {
+ struct hpi_message hm;
+ struct hpi_response hr;
+ /* call to HPI_ADAPTER_CLOSE */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_CLOSE);
+ hm.adapter_index = phm->adapter_index;
+ hw_entry_point(&hm, &hr);
+ }
+ hw_entry_point(phm, phr);
+ gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.
+ aw_adapter_list[phm->adapter_index]
+ = 0;
+ hpi_entry_points[phm->adapter_index] = NULL;
+ break;
+ default:
+ hw_entry_point(phm, phr);
+ break;
+ }
+}
+
+static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner)
+{
+ switch (phm->function) {
+ case HPI_ADAPTER_OPEN:
+ adapter_open(phm, phr);
+ break;
+ case HPI_ADAPTER_CLOSE:
+ adapter_close(phm, phr);
+ break;
+ default:
+ hw_entry_point(phm, phr);
+ break;
+ }
+}
+
+static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
+{
+ switch (phm->function) {
+ case HPI_MIXER_OPEN:
+ mixer_open(phm, phr);
+ break;
+ case HPI_MIXER_CLOSE:
+ mixer_close(phm, phr);
+ break;
+ default:
+ hw_entry_point(phm, phr);
+ break;
+ }
+}
+
+static void outstream_message(struct hpi_message *phm,
+ struct hpi_response *phr, void *h_owner)
+{
+ if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
+ hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
+ HPI_ERROR_INVALID_OBJ_INDEX);
+ return;
+ }
+
+ switch (phm->function) {
+ case HPI_OSTREAM_OPEN:
+ outstream_open(phm, phr, h_owner);
+ break;
+ case HPI_OSTREAM_CLOSE:
+ outstream_close(phm, phr, h_owner);
+ break;
+ default:
+ hw_entry_point(phm, phr);
+ break;
+ }
+}
+
+static void instream_message(struct hpi_message *phm,
+ struct hpi_response *phr, void *h_owner)
+{
+ if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
+ hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
+ HPI_ERROR_INVALID_OBJ_INDEX);
+ return;
+ }
+
+ switch (phm->function) {
+ case HPI_ISTREAM_OPEN:
+ instream_open(phm, phr, h_owner);
+ break;
+ case HPI_ISTREAM_CLOSE:
+ instream_close(phm, phr, h_owner);
+ break;
+ default:
+ hw_entry_point(phm, phr);
+ break;
+ }
+}
+
+/* NOTE: HPI_Message() must be defined in the driver as a wrapper for
+ * HPI_MessageEx so that functions in hpifunc.c compile.
+ */
+void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner)
+{
+ HPI_DEBUG_MESSAGE(DEBUG, phm);
+
+ if (phm->type != HPI_TYPE_MESSAGE) {
+ hpi_init_response(phr, phm->object, phm->function,
+ HPI_ERROR_INVALID_TYPE);
+ return;
+ }
+
+ if (phm->adapter_index >= HPI_MAX_ADAPTERS
+ && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
+ hpi_init_response(phr, phm->object, phm->function,
+ HPI_ERROR_BAD_ADAPTER_NUMBER);
+ return;
+ }
+
+ switch (phm->object) {
+ case HPI_OBJ_SUBSYSTEM:
+ subsys_message(phm, phr, h_owner);
+ break;
+
+ case HPI_OBJ_ADAPTER:
+ adapter_message(phm, phr, h_owner);
+ break;
+
+ case HPI_OBJ_MIXER:
+ mixer_message(phm, phr);
+ break;
+
+ case HPI_OBJ_OSTREAM:
+ outstream_message(phm, phr, h_owner);
+ break;
+
+ case HPI_OBJ_ISTREAM:
+ instream_message(phm, phr, h_owner);
+ break;
+
+ default:
+ hw_entry_point(phm, phr);
+ break;
+ }
+ HPI_DEBUG_RESPONSE(phr);
+#if 1
+ if (phr->error >= HPI_ERROR_BACKEND_BASE) {
+ void *ep = NULL;
+ char *ep_name;
+
+ HPI_DEBUG_MESSAGE(ERROR, phm);
+
+ if (phm->adapter_index < HPI_MAX_ADAPTERS)
+ ep = hpi_entry_points[phm->adapter_index];
+
+ /* Don't need this? Have adapter index in debug info
+ Know at driver load time index->backend mapping */
+ if (ep == HPI_6000)
+ ep_name = "HPI_6000";
+ else if (ep == HPI_6205)
+ ep_name = "HPI_6205";
+ else
+ ep_name = "unknown";
+
+ HPI_DEBUG_LOG(ERROR, "HPI %s response - error# %d\n", ep_name,
+ phr->error);
+
+ if (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE)
+ hpi_debug_data((u16 *)phm,
+ sizeof(*phm) / sizeof(u16));
+ }
+#endif
+}
+
+static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
+{
+ HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
+ memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
+ sizeof(rESP_HPI_ADAPTER_OPEN[0]));
+}
+
+static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
+{
+ HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
+ hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
+}
+
+static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
+{
+ memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
+ sizeof(rESP_HPI_MIXER_OPEN[0]));
+}
+
+static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
+{
+ hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
+}
+
+static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner)
+{
+
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
+
+ hpios_msgxlock_lock(&msgx_lock);
+
+ if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
+ phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
+ else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
+ [phm->obj_index].h.error)
+ memcpy(phr,
+ &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
+ obj_index],
+ sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
+ else {
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].open_flag = 1;
+ hpios_msgxlock_un_lock(&msgx_lock);
+
+ /* issue a reset */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_RESET);
+ hm.adapter_index = phm->adapter_index;
+ hm.obj_index = phm->obj_index;
+ hw_entry_point(&hm, &hr);
+
+ hpios_msgxlock_lock(&msgx_lock);
+ if (hr.error) {
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].open_flag = 0;
+ phr->error = hr.error;
+ } else {
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].open_flag = 1;
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner = h_owner;
+ memcpy(phr,
+ &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
+ [phm->obj_index],
+ sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
+ }
+ }
+ hpios_msgxlock_un_lock(&msgx_lock);
+}
+
+static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner)
+{
+
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
+
+ hpios_msgxlock_lock(&msgx_lock);
+ if (h_owner ==
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner) {
+ /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
+ "instream %d owned by %p\n",
+ phm->wAdapterIndex, phm->wObjIndex, hOwner); */
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner = NULL;
+ hpios_msgxlock_un_lock(&msgx_lock);
+ /* issue a reset */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_RESET);
+ hm.adapter_index = phm->adapter_index;
+ hm.obj_index = phm->obj_index;
+ hw_entry_point(&hm, &hr);
+ hpios_msgxlock_lock(&msgx_lock);
+ if (hr.error) {
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner = h_owner;
+ phr->error = hr.error;
+ } else {
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].open_flag = 0;
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner = NULL;
+ }
+ } else {
+ HPI_DEBUG_LOG(WARNING,
+ "%p trying to close %d instream %d owned by %p\n",
+ h_owner, phm->adapter_index, phm->obj_index,
+ instream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner);
+ phr->error = HPI_ERROR_OBJ_NOT_OPEN;
+ }
+ hpios_msgxlock_un_lock(&msgx_lock);
+}
+
+static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner)
+{
+
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
+
+ hpios_msgxlock_lock(&msgx_lock);
+
+ if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
+ phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
+ else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
+ [phm->obj_index].h.error)
+ memcpy(phr,
+ &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
+ obj_index],
+ sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
+ else {
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].open_flag = 1;
+ hpios_msgxlock_un_lock(&msgx_lock);
+
+ /* issue a reset */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_RESET);
+ hm.adapter_index = phm->adapter_index;
+ hm.obj_index = phm->obj_index;
+ hw_entry_point(&hm, &hr);
+
+ hpios_msgxlock_lock(&msgx_lock);
+ if (hr.error) {
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].open_flag = 0;
+ phr->error = hr.error;
+ } else {
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].open_flag = 1;
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner = h_owner;
+ memcpy(phr,
+ &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
+ [phm->obj_index],
+ sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
+ }
+ }
+ hpios_msgxlock_un_lock(&msgx_lock);
+}
+
+static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner)
+{
+
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
+
+ hpios_msgxlock_lock(&msgx_lock);
+
+ if (h_owner ==
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner) {
+ /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
+ "outstream %d owned by %p\n",
+ phm->wAdapterIndex, phm->wObjIndex, hOwner); */
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner = NULL;
+ hpios_msgxlock_un_lock(&msgx_lock);
+ /* issue a reset */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_RESET);
+ hm.adapter_index = phm->adapter_index;
+ hm.obj_index = phm->obj_index;
+ hw_entry_point(&hm, &hr);
+ hpios_msgxlock_lock(&msgx_lock);
+ if (hr.error) {
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner = h_owner;
+ phr->error = hr.error;
+ } else {
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].open_flag = 0;
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner = NULL;
+ }
+ } else {
+ HPI_DEBUG_LOG(WARNING,
+ "%p trying to close %d outstream %d owned by %p\n",
+ h_owner, phm->adapter_index, phm->obj_index,
+ outstream_user_open[phm->adapter_index][phm->
+ obj_index].h_owner);
+ phr->error = HPI_ERROR_OBJ_NOT_OPEN;
+ }
+ hpios_msgxlock_un_lock(&msgx_lock);
+}
+
+static u16 adapter_prepare(u16 adapter)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ /* Open the adapter and streams */
+ u16 i;
+
+ /* call to HPI_ADAPTER_OPEN */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_OPEN);
+ hm.adapter_index = adapter;
+ hw_entry_point(&hm, &hr);
+ memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
+ sizeof(rESP_HPI_ADAPTER_OPEN[0]));
+ if (hr.error)
+ return hr.error;
+
+ /* call to HPI_ADAPTER_GET_INFO */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_GET_INFO);
+ hm.adapter_index = adapter;
+ hw_entry_point(&hm, &hr);
+ if (hr.error)
+ return hr.error;
+
+ aDAPTER_INFO[adapter].num_outstreams = hr.u.a.num_outstreams;
+ aDAPTER_INFO[adapter].num_instreams = hr.u.a.num_instreams;
+ aDAPTER_INFO[adapter].type = hr.u.a.adapter_type;
+
+ gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list[adapter] =
+ hr.u.a.adapter_type;
+ gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters++;
+ if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters > HPI_MAX_ADAPTERS)
+ gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters =
+ HPI_MAX_ADAPTERS;
+
+ /* call to HPI_OSTREAM_OPEN */
+ for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_OPEN);
+ hm.adapter_index = adapter;
+ hm.obj_index = i;
+ hw_entry_point(&hm, &hr);
+ memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
+ sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
+ outstream_user_open[adapter][i].open_flag = 0;
+ outstream_user_open[adapter][i].h_owner = NULL;
+ }
+
+ /* call to HPI_ISTREAM_OPEN */
+ for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_OPEN);
+ hm.adapter_index = adapter;
+ hm.obj_index = i;
+ hw_entry_point(&hm, &hr);
+ memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
+ sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
+ instream_user_open[adapter][i].open_flag = 0;
+ instream_user_open[adapter][i].h_owner = NULL;
+ }
+
+ /* call to HPI_MIXER_OPEN */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
+ hm.adapter_index = adapter;
+ hw_entry_point(&hm, &hr);
+ memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
+ sizeof(rESP_HPI_MIXER_OPEN[0]));
+
+ return gRESP_HPI_SUBSYS_FIND_ADAPTERS.h.error;
+}
+
+static void HPIMSGX__reset(u16 adapter_index)
+{
+ int i;
+ u16 adapter;
+ struct hpi_response hr;
+
+ if (adapter_index == HPIMSGX_ALLADAPTERS) {
+ /* reset all responses to contain errors */
+ hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_FIND_ADAPTERS, 0);
+ memcpy(&gRESP_HPI_SUBSYS_FIND_ADAPTERS, &hr,
+ sizeof(&gRESP_HPI_SUBSYS_FIND_ADAPTERS));
+
+ for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
+
+ hpi_init_response(&hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
+ memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
+ sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
+
+ hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
+ HPI_ERROR_INVALID_OBJ);
+ memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
+ sizeof(rESP_HPI_MIXER_OPEN[adapter]));
+
+ for (i = 0; i < HPI_MAX_STREAMS; i++) {
+ hpi_init_response(&hr, HPI_OBJ_OSTREAM,
+ HPI_OSTREAM_OPEN,
+ HPI_ERROR_INVALID_OBJ);
+ memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
+ &hr,
+ sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
+ [i]));
+ hpi_init_response(&hr, HPI_OBJ_ISTREAM,
+ HPI_ISTREAM_OPEN,
+ HPI_ERROR_INVALID_OBJ);
+ memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
+ &hr,
+ sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
+ [i]));
+ }
+ }
+ } else if (adapter_index < HPI_MAX_ADAPTERS) {
+ rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
+ HPI_ERROR_BAD_ADAPTER;
+ rESP_HPI_MIXER_OPEN[adapter_index].h.error =
+ HPI_ERROR_INVALID_OBJ;
+ for (i = 0; i < HPI_MAX_STREAMS; i++) {
+ rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
+ HPI_ERROR_INVALID_OBJ;
+ rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
+ HPI_ERROR_INVALID_OBJ;
+ }
+ if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
+ s.aw_adapter_list[adapter_index]) {
+ gRESP_HPI_SUBSYS_FIND_ADAPTERS.
+ s.aw_adapter_list[adapter_index] = 0;
+ gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters--;
+ }
+ }
+}
+
+static u16 HPIMSGX__init(struct hpi_message *phm,
+ /* HPI_SUBSYS_CREATE_ADAPTER structure with */
+ /* resource list or NULL=find all */
+ struct hpi_response *phr
+ /* response from HPI_ADAPTER_GET_INFO */
+ )
+{
+ hpi_handler_func *entry_point_func;
+ struct hpi_response hr;
+
+ if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters >= HPI_MAX_ADAPTERS)
+ return HPI_ERROR_BAD_ADAPTER_NUMBER;
+
+ /* Init response here so we can pass in previous adapter list */
+ hpi_init_response(&hr, phm->object, phm->function,
+ HPI_ERROR_INVALID_OBJ);
+ memcpy(hr.u.s.aw_adapter_list,
+ gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list,
+ sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list));
+
+ entry_point_func =
+ hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
+
+ if (entry_point_func) {
+ HPI_DEBUG_MESSAGE(DEBUG, phm);
+ entry_point_func(phm, &hr);
+ } else {
+ phr->error = HPI_ERROR_PROCESSING_MESSAGE;
+ return phr->error;
+ }
+ if (hr.error == 0) {
+ /* the adapter was created succesfully
+ save the mapping for future use */
+ hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
+ /* prepare adapter (pre-open streams etc.) */
+ HPI_DEBUG_LOG(DEBUG,
+ "HPI_SUBSYS_CREATE_ADAPTER successful,"
+ " preparing adapter\n");
+ adapter_prepare(hr.u.s.adapter_index);
+ }
+ memcpy(phr, &hr, hr.size);
+ return phr->error;
+}
+
+static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
+{
+ int i, adapter, adapter_limit;
+
+ if (!h_owner)
+ return;
+
+ if (adapter_index == HPIMSGX_ALLADAPTERS) {
+ adapter = 0;
+ adapter_limit = HPI_MAX_ADAPTERS;
+ } else {
+ adapter = adapter_index;
+ adapter_limit = adapter + 1;
+ }
+
+ for (; adapter < adapter_limit; adapter++) {
+ /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
+ for (i = 0; i < HPI_MAX_STREAMS; i++) {
+ if (h_owner ==
+ outstream_user_open[adapter][i].h_owner) {
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ HPI_DEBUG_LOG(DEBUG,
+ "close adapter %d ostream %d\n",
+ adapter, i);
+
+ hpi_init_message_response(&hm, &hr,
+ HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
+ hm.adapter_index = (u16)adapter;
+ hm.obj_index = (u16)i;
+ hw_entry_point(&hm, &hr);
+
+ hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
+ hw_entry_point(&hm, &hr);
+
+ hm.function = HPI_OSTREAM_GROUP_RESET;
+ hw_entry_point(&hm, &hr);
+
+ outstream_user_open[adapter][i].open_flag = 0;
+ outstream_user_open[adapter][i].h_owner =
+ NULL;
+ }
+ if (h_owner == instream_user_open[adapter][i].h_owner) {
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ HPI_DEBUG_LOG(DEBUG,
+ "close adapter %d istream %d\n",
+ adapter, i);
+
+ hpi_init_message_response(&hm, &hr,
+ HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
+ hm.adapter_index = (u16)adapter;
+ hm.obj_index = (u16)i;
+ hw_entry_point(&hm, &hr);
+
+ hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
+ hw_entry_point(&hm, &hr);
+
+ hm.function = HPI_ISTREAM_GROUP_RESET;
+ hw_entry_point(&hm, &hr);
+
+ instream_user_open[adapter][i].open_flag = 0;
+ instream_user_open[adapter][i].h_owner = NULL;
+ }
+ }
+ }
+}
diff --git a/sound/pci/asihpi/hpimsgx.h b/sound/pci/asihpi/hpimsgx.h
new file mode 100644
index 000000000000..fd49e7542a88
--- /dev/null
+++ b/sound/pci/asihpi/hpimsgx.h
@@ -0,0 +1,36 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ HPI Extended Message Handler Functions
+
+(C) Copyright AudioScience Inc. 1997-2003
+******************************************************************************/
+
+#ifndef _HPIMSGX_H_
+#define _HPIMSGX_H_
+
+#include "hpi_internal.h"
+
+#define HPIMSGX_ALLADAPTERS (0xFFFF)
+
+void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
+ void *h_owner);
+
+#define HPI_MESSAGE_LOWER_LAYER hpi_send_recv_ex
+
+#endif /* _HPIMSGX_H_ */
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
new file mode 100644
index 000000000000..7396ac54e99f
--- /dev/null
+++ b/sound/pci/asihpi/hpioctl.c
@@ -0,0 +1,484 @@
+/*******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Common Linux HPI ioctl and module probe/remove functions
+*******************************************************************************/
+#define SOURCEFILE_NAME "hpioctl.c"
+
+#include "hpi_internal.h"
+#include "hpimsginit.h"
+#include "hpidebug.h"
+#include "hpimsgx.h"
+#include "hpioctl.h"
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <asm/uaccess.h>
+#include <linux/stringify.h>
+
+#ifdef MODULE_FIRMWARE
+MODULE_FIRMWARE("asihpi/dsp5000.bin");
+MODULE_FIRMWARE("asihpi/dsp6200.bin");
+MODULE_FIRMWARE("asihpi/dsp6205.bin");
+MODULE_FIRMWARE("asihpi/dsp6400.bin");
+MODULE_FIRMWARE("asihpi/dsp6600.bin");
+MODULE_FIRMWARE("asihpi/dsp8700.bin");
+MODULE_FIRMWARE("asihpi/dsp8900.bin");
+#endif
+
+static int prealloc_stream_buf;
+module_param(prealloc_stream_buf, int, S_IRUGO);
+MODULE_PARM_DESC(prealloc_stream_buf,
+ "preallocate size for per-adapter stream buffer");
+
+/* Allow the debug level to be changed after module load.
+ E.g. echo 2 > /sys/module/asihpi/parameters/hpiDebugLevel
+*/
+module_param(hpi_debug_level, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hpi_debug_level, "debug verbosity 0..5");
+
+/* List of adapters found */
+static struct hpi_adapter adapters[HPI_MAX_ADAPTERS];
+
+/* Wrapper function to HPI_Message to enable dumping of the
+ message and response types.
+*/
+static void hpi_send_recv_f(struct hpi_message *phm, struct hpi_response *phr,
+ struct file *file)
+{
+ int adapter = phm->adapter_index;
+
+ if ((adapter >= HPI_MAX_ADAPTERS || adapter < 0)
+ && (phm->object != HPI_OBJ_SUBSYSTEM))
+ phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
+ else
+ hpi_send_recv_ex(phm, phr, file);
+}
+
+/* This is called from hpifunc.c functions, called by ALSA
+ * (or other kernel process) In this case there is no file descriptor
+ * available for the message cache code
+ */
+void hpi_send_recv(struct hpi_message *phm, struct hpi_response *phr)
+{
+ hpi_send_recv_f(phm, phr, HOWNER_KERNEL);
+}
+
+EXPORT_SYMBOL(hpi_send_recv);
+/* for radio-asihpi */
+
+int asihpi_hpi_release(struct file *file)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+/* HPI_DEBUG_LOG(INFO,"hpi_release file %p, pid %d\n", file, current->pid); */
+ /* close the subsystem just in case the application forgot to. */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_CLOSE);
+ hpi_send_recv_ex(&hm, &hr, file);
+ return 0;
+}
+
+long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct hpi_ioctl_linux __user *phpi_ioctl_data;
+ void __user *puhm;
+ void __user *puhr;
+ union hpi_message_buffer_v1 *hm;
+ union hpi_response_buffer_v1 *hr;
+ u16 res_max_size;
+ u32 uncopied_bytes;
+ struct hpi_adapter *pa = NULL;
+ int err = 0;
+
+ if (cmd != HPI_IOCTL_LINUX)
+ return -EINVAL;
+
+ hm = kmalloc(sizeof(*hm), GFP_KERNEL);
+ hr = kmalloc(sizeof(*hr), GFP_KERNEL);
+ if (!hm || !hr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ phpi_ioctl_data = (struct hpi_ioctl_linux __user *)arg;
+
+ /* Read the message and response pointers from user space. */
+ get_user(puhm, &phpi_ioctl_data->phm);
+ get_user(puhr, &phpi_ioctl_data->phr);
+
+ /* Now read the message size and data from user space. */
+ get_user(hm->h.size, (u16 __user *)puhm);
+ if (hm->h.size > sizeof(*hm))
+ hm->h.size = sizeof(*hm);
+
+ /*printk(KERN_INFO "message size %d\n", hm->h.wSize); */
+
+ uncopied_bytes = copy_from_user(hm, puhm, hm->h.size);
+ if (uncopied_bytes) {
+ HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
+ err = -EFAULT;
+ goto out;
+ }
+
+ get_user(res_max_size, (u16 __user *)puhr);
+ /* printk(KERN_INFO "user response size %d\n", res_max_size); */
+ if (res_max_size < sizeof(struct hpi_response_header)) {
+ HPI_DEBUG_LOG(WARNING, "small res size %d\n", res_max_size);
+ err = -EFAULT;
+ goto out;
+ }
+
+ pa = &adapters[hm->h.adapter_index];
+ hr->h.size = 0;
+ if (hm->h.object == HPI_OBJ_SUBSYSTEM) {
+ switch (hm->h.function) {
+ case HPI_SUBSYS_CREATE_ADAPTER:
+ case HPI_SUBSYS_DELETE_ADAPTER:
+ /* Application must not use these functions! */
+ hr->h.size = sizeof(hr->h);
+ hr->h.error = HPI_ERROR_INVALID_OPERATION;
+ hr->h.function = hm->h.function;
+ uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
+ if (uncopied_bytes)
+ err = -EFAULT;
+ else
+ err = 0;
+ goto out;
+
+ default:
+ hpi_send_recv_f(&hm->m0, &hr->r0, file);
+ }
+ } else {
+ u16 __user *ptr = NULL;
+ u32 size = 0;
+
+ /* -1=no data 0=read from user mem, 1=write to user mem */
+ int wrflag = -1;
+ u32 adapter = hm->h.adapter_index;
+
+ if ((hm->h.adapter_index > HPI_MAX_ADAPTERS) || (!pa->type)) {
+ hpi_init_response(&hr->r0, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_OPEN,
+ HPI_ERROR_BAD_ADAPTER_NUMBER);
+
+ uncopied_bytes =
+ copy_to_user(puhr, hr, sizeof(hr->h));
+ if (uncopied_bytes)
+ err = -EFAULT;
+ else
+ err = 0;
+ goto out;
+ }
+
+ if (mutex_lock_interruptible(&adapters[adapter].mutex)) {
+ err = -EINTR;
+ goto out;
+ }
+
+ /* Dig out any pointers embedded in the message. */
+ switch (hm->h.function) {
+ case HPI_OSTREAM_WRITE:
+ case HPI_ISTREAM_READ:{
+ /* Yes, sparse, this is correct. */
+ ptr = (u16 __user *)hm->m0.u.d.u.data.pb_data;
+ size = hm->m0.u.d.u.data.data_size;
+
+ /* Allocate buffer according to application request.
+ ?Is it better to alloc/free for the duration
+ of the transaction?
+ */
+ if (pa->buffer_size < size) {
+ HPI_DEBUG_LOG(DEBUG,
+ "realloc adapter %d stream "
+ "buffer from %zd to %d\n",
+ hm->h.adapter_index,
+ pa->buffer_size, size);
+ if (pa->p_buffer) {
+ pa->buffer_size = 0;
+ vfree(pa->p_buffer);
+ }
+ pa->p_buffer = vmalloc(size);
+ if (pa->p_buffer)
+ pa->buffer_size = size;
+ else {
+ HPI_DEBUG_LOG(ERROR,
+ "HPI could not allocate "
+ "stream buffer size %d\n",
+ size);
+
+ mutex_unlock(&adapters
+ [adapter].mutex);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ hm->m0.u.d.u.data.pb_data = pa->p_buffer;
+ if (hm->h.function == HPI_ISTREAM_READ)
+ /* from card, WRITE to user mem */
+ wrflag = 1;
+ else
+ wrflag = 0;
+ break;
+ }
+
+ default:
+ size = 0;
+ break;
+ }
+
+ if (size && (wrflag == 0)) {
+ uncopied_bytes =
+ copy_from_user(pa->p_buffer, ptr, size);
+ if (uncopied_bytes)
+ HPI_DEBUG_LOG(WARNING,
+ "missed %d of %d "
+ "bytes from user\n", uncopied_bytes,
+ size);
+ }
+
+ hpi_send_recv_f(&hm->m0, &hr->r0, file);
+
+ if (size && (wrflag == 1)) {
+ uncopied_bytes =
+ copy_to_user(ptr, pa->p_buffer, size);
+ if (uncopied_bytes)
+ HPI_DEBUG_LOG(WARNING,
+ "missed %d of %d " "bytes to user\n",
+ uncopied_bytes, size);
+ }
+
+ mutex_unlock(&adapters[adapter].mutex);
+ }
+
+ /* on return response size must be set */
+ /*printk(KERN_INFO "response size %d\n", hr->h.wSize); */
+
+ if (!hr->h.size) {
+ HPI_DEBUG_LOG(ERROR, "response zero size\n");
+ err = -EFAULT;
+ goto out;
+ }
+
+ if (hr->h.size > res_max_size) {
+ HPI_DEBUG_LOG(ERROR, "response too big %d %d\n", hr->h.size,
+ res_max_size);
+ /*HPI_DEBUG_MESSAGE(ERROR, hm); */
+ err = -EFAULT;
+ goto out;
+ }
+
+ uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
+ if (uncopied_bytes) {
+ HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
+ err = -EFAULT;
+ goto out;
+ }
+
+out:
+ kfree(hm);
+ kfree(hr);
+ return err;
+}
+
+int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ int err, idx, nm;
+ unsigned int memlen;
+ struct hpi_message hm;
+ struct hpi_response hr;
+ struct hpi_adapter adapter;
+ struct hpi_pci pci;
+
+ memset(&adapter, 0, sizeof(adapter));
+
+ printk(KERN_DEBUG "probe PCI device (%04x:%04x,%04x:%04x,%04x)\n",
+ pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor,
+ pci_dev->subsystem_device, pci_dev->devfn);
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_CREATE_ADAPTER);
+ hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
+ HPI_ERROR_PROCESSING_MESSAGE);
+
+ hm.adapter_index = -1; /* an invalid index */
+
+ /* fill in HPI_PCI information from kernel provided information */
+ adapter.pci = pci_dev;
+
+ nm = HPI_MAX_ADAPTER_MEM_SPACES;
+
+ for (idx = 0; idx < nm; idx++) {
+ HPI_DEBUG_LOG(INFO, "resource %d %s %08llx-%08llx %04llx\n",
+ idx, pci_dev->resource[idx].name,
+ (unsigned long long)pci_resource_start(pci_dev, idx),
+ (unsigned long long)pci_resource_end(pci_dev, idx),
+ (unsigned long long)pci_resource_flags(pci_dev, idx));
+
+ if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
+ memlen = pci_resource_len(pci_dev, idx);
+ adapter.ap_remapped_mem_base[idx] =
+ ioremap(pci_resource_start(pci_dev, idx),
+ memlen);
+ if (!adapter.ap_remapped_mem_base[idx]) {
+ HPI_DEBUG_LOG(ERROR,
+ "ioremap failed, aborting\n");
+ /* unmap previously mapped pci mem space */
+ goto err;
+ }
+ }
+
+ pci.ap_mem_base[idx] = adapter.ap_remapped_mem_base[idx];
+ }
+
+ /* could replace Pci with direct pointer to pci_dev for linux
+ Instead wrap accessor functions for IDs etc.
+ Would it work for windows?
+ */
+ pci.bus_number = pci_dev->bus->number;
+ pci.vendor_id = (u16)pci_dev->vendor;
+ pci.device_id = (u16)pci_dev->device;
+ pci.subsys_vendor_id = (u16)(pci_dev->subsystem_vendor & 0xffff);
+ pci.subsys_device_id = (u16)(pci_dev->subsystem_device & 0xffff);
+ pci.device_number = pci_dev->devfn;
+ pci.interrupt = pci_dev->irq;
+ pci.p_os_data = pci_dev;
+
+ hm.u.s.resource.bus_type = HPI_BUS_PCI;
+ hm.u.s.resource.r.pci = &pci;
+
+ /* call CreateAdapterObject on the relevant hpi module */
+ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
+ if (hr.error)
+ goto err;
+
+ if (prealloc_stream_buf) {
+ adapter.p_buffer = vmalloc(prealloc_stream_buf);
+ if (!adapter.p_buffer) {
+ HPI_DEBUG_LOG(ERROR,
+ "HPI could not allocate "
+ "kernel buffer size %d\n",
+ prealloc_stream_buf);
+ goto err;
+ }
+ }
+
+ adapter.index = hr.u.s.adapter_index;
+ adapter.type = hr.u.s.aw_adapter_list[adapter.index];
+ hm.adapter_index = adapter.index;
+
+ err = hpi_adapter_open(NULL, adapter.index);
+ if (err)
+ goto err;
+
+ adapter.snd_card_asihpi = NULL;
+ /* WARNING can't init mutex in 'adapter'
+ * and then copy it to adapters[] ?!?!
+ */
+ adapters[hr.u.s.adapter_index] = adapter;
+ mutex_init(&adapters[adapter.index].mutex);
+ pci_set_drvdata(pci_dev, &adapters[adapter.index]);
+
+ printk(KERN_INFO "probe found adapter ASI%04X HPI index #%d.\n",
+ adapter.type, adapter.index);
+
+ return 0;
+
+err:
+ for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
+ if (adapter.ap_remapped_mem_base[idx]) {
+ iounmap(adapter.ap_remapped_mem_base[idx]);
+ adapter.ap_remapped_mem_base[idx] = NULL;
+ }
+ }
+
+ if (adapter.p_buffer) {
+ adapter.buffer_size = 0;
+ vfree(adapter.p_buffer);
+ }
+
+ HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
+ return -ENODEV;
+}
+
+void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev)
+{
+ int idx;
+ struct hpi_message hm;
+ struct hpi_response hr;
+ struct hpi_adapter *pa;
+ pa = (struct hpi_adapter *)pci_get_drvdata(pci_dev);
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_DELETE_ADAPTER);
+ hm.adapter_index = pa->index;
+ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
+
+ /* unmap PCI memory space, mapped during device init. */
+ for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
+ if (pa->ap_remapped_mem_base[idx]) {
+ iounmap(pa->ap_remapped_mem_base[idx]);
+ pa->ap_remapped_mem_base[idx] = NULL;
+ }
+ }
+
+ if (pa->p_buffer) {
+ pa->buffer_size = 0;
+ vfree(pa->p_buffer);
+ }
+
+ pci_set_drvdata(pci_dev, NULL);
+ /*
+ printk(KERN_INFO "PCI device (%04x:%04x,%04x:%04x,%04x),"
+ " HPI index # %d, removed.\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor,
+ pci_dev->subsystem_device, pci_dev->devfn,
+ pa->index);
+ */
+}
+
+void __init asihpi_init(void)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ memset(adapters, 0, sizeof(adapters));
+
+ printk(KERN_INFO "ASIHPI driver %d.%02d.%02d\n",
+ HPI_VER_MAJOR(HPI_VER), HPI_VER_MINOR(HPI_VER),
+ HPI_VER_RELEASE(HPI_VER));
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_DRIVER_LOAD);
+ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
+}
+
+void asihpi_exit(void)
+{
+ struct hpi_message hm;
+ struct hpi_response hr;
+
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
+ HPI_SUBSYS_DRIVER_UNLOAD);
+ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
+}
diff --git a/sound/pci/asihpi/hpioctl.h b/sound/pci/asihpi/hpioctl.h
new file mode 100644
index 000000000000..847f72f03fe1
--- /dev/null
+++ b/sound/pci/asihpi/hpioctl.h
@@ -0,0 +1,38 @@
+/*******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Linux HPI ioctl, and shared module init functions
+*******************************************************************************/
+
+int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id);
+void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev);
+void __init asihpi_init(void);
+void __exit asihpi_exit(void);
+
+int asihpi_hpi_release(struct file *file);
+
+long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+/* This is called from hpifunc.c functions, called by ALSA
+ * (or other kernel process) In this case there is no file descriptor
+ * available for the message cache code
+ */
+void hpi_send_recv(struct hpi_message *phm, struct hpi_response *phr);
+
+#define HOWNER_KERNEL ((void *)-1)
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
new file mode 100644
index 000000000000..de615cfdb950
--- /dev/null
+++ b/sound/pci/asihpi/hpios.c
@@ -0,0 +1,114 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+HPI Operating System function implementation for Linux
+
+(C) Copyright AudioScience Inc. 1997-2003
+******************************************************************************/
+#define SOURCEFILE_NAME "hpios.c"
+#include "hpi_internal.h"
+#include "hpidebug.h"
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+void hpios_delay_micro_seconds(u32 num_micro_sec)
+{
+ if ((usecs_to_jiffies(num_micro_sec) > 1) && !in_interrupt()) {
+ /* MUST NOT SCHEDULE IN INTERRUPT CONTEXT! */
+ schedule_timeout_uninterruptible(usecs_to_jiffies
+ (num_micro_sec));
+ } else if (num_micro_sec <= 2000)
+ udelay(num_micro_sec);
+ else
+ mdelay(num_micro_sec / 1000);
+
+}
+
+void hpios_locked_mem_init(void)
+{
+}
+
+/** Allocated an area of locked memory for bus master DMA operations.
+
+On error, return -ENOMEM, and *pMemArea.size = 0
+*/
+u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
+ struct pci_dev *pdev)
+{
+ /*?? any benefit in using managed dmam_alloc_coherent? */
+ p_mem_area->vaddr =
+ dma_alloc_coherent(&pdev->dev, size, &p_mem_area->dma_handle,
+ GFP_DMA32 | GFP_KERNEL);
+
+ if (p_mem_area->vaddr) {
+ HPI_DEBUG_LOG(DEBUG, "allocated %d bytes, dma 0x%x vma %p\n",
+ size, (unsigned int)p_mem_area->dma_handle,
+ p_mem_area->vaddr);
+ p_mem_area->pdev = &pdev->dev;
+ p_mem_area->size = size;
+ return 0;
+ } else {
+ HPI_DEBUG_LOG(WARNING,
+ "failed to allocate %d bytes locked memory\n", size);
+ p_mem_area->size = 0;
+ return -ENOMEM;
+ }
+}
+
+u16 hpios_locked_mem_free(struct consistent_dma_area *p_mem_area)
+{
+ if (p_mem_area->size) {
+ dma_free_coherent(p_mem_area->pdev, p_mem_area->size,
+ p_mem_area->vaddr, p_mem_area->dma_handle);
+ HPI_DEBUG_LOG(DEBUG, "freed %lu bytes, dma 0x%x vma %p\n",
+ (unsigned long)p_mem_area->size,
+ (unsigned int)p_mem_area->dma_handle,
+ p_mem_area->vaddr);
+ p_mem_area->size = 0;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+void hpios_locked_mem_free_all(void)
+{
+}
+
+void __iomem *hpios_map_io(struct pci_dev *pci_dev, int idx,
+ unsigned int length)
+{
+ HPI_DEBUG_LOG(DEBUG, "mapping %d %s %08llx-%08llx %04llx len 0x%x\n",
+ idx, pci_dev->resource[idx].name,
+ (unsigned long long)pci_resource_start(pci_dev, idx),
+ (unsigned long long)pci_resource_end(pci_dev, idx),
+ (unsigned long long)pci_resource_flags(pci_dev, idx), length);
+
+ if (!(pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM)) {
+ HPI_DEBUG_LOG(ERROR, "not an io memory resource\n");
+ return NULL;
+ }
+
+ if (length > pci_resource_len(pci_dev, idx)) {
+ HPI_DEBUG_LOG(ERROR, "resource too small for requested %d \n",
+ length);
+ return NULL;
+ }
+
+ return ioremap(pci_resource_start(pci_dev, idx), length);
+}
diff --git a/sound/pci/asihpi/hpios.h b/sound/pci/asihpi/hpios.h
new file mode 100644
index 000000000000..a62c3f1e5f09
--- /dev/null
+++ b/sound/pci/asihpi/hpios.h
@@ -0,0 +1,178 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+HPI Operating System Specific macros for Linux Kernel driver
+
+(C) Copyright AudioScience Inc. 1997-2003
+******************************************************************************/
+#ifndef _HPIOS_H_
+#define _HPIOS_H_
+
+#undef HPI_OS_LINUX_KERNEL
+#define HPI_OS_LINUX_KERNEL
+
+#define HPI_OS_DEFINED
+#define HPI_KERNEL_MODE
+
+#define HPI_REASSIGN_DUPLICATE_ADAPTER_IDX
+
+#include <linux/io.h>
+#include <asm/system.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#define HPI_NO_OS_FILE_OPS
+
+#ifdef CONFIG_64BIT
+#define HPI64BIT
+#endif
+
+/** Details of a memory area allocated with pci_alloc_consistent
+Need all info for parameters to pci_free_consistent
+*/
+struct consistent_dma_area {
+ struct device *pdev;
+ /* looks like dma-mapping dma_devres ?! */
+ size_t size;
+ void *vaddr;
+ dma_addr_t dma_handle;
+};
+
+static inline u16 hpios_locked_mem_get_phys_addr(struct consistent_dma_area
+ *locked_mem_handle, u32 *p_physical_addr)
+{
+ *p_physical_addr = locked_mem_handle->dma_handle;
+ return 0;
+}
+
+static inline u16 hpios_locked_mem_get_virt_addr(struct consistent_dma_area
+ *locked_mem_handle, void **pp_virtual_addr)
+{
+ *pp_virtual_addr = locked_mem_handle->vaddr;
+ return 0;
+}
+
+static inline u16 hpios_locked_mem_valid(struct consistent_dma_area
+ *locked_mem_handle)
+{
+ return locked_mem_handle->size != 0;
+}
+
+struct hpi_ioctl_linux {
+ void __user *phm;
+ void __user *phr;
+};
+
+/* Conflict?: H is already used by a number of drivers hid, bluetooth hci,
+ and some sound drivers sb16, hdsp, emu10k. AFAIK 0xFC is ununsed command
+*/
+#define HPI_IOCTL_LINUX _IOWR('H', 0xFC, struct hpi_ioctl_linux)
+
+#define HPI_DEBUG_FLAG_ERROR KERN_ERR
+#define HPI_DEBUG_FLAG_WARNING KERN_WARNING
+#define HPI_DEBUG_FLAG_NOTICE KERN_NOTICE
+#define HPI_DEBUG_FLAG_INFO KERN_INFO
+#define HPI_DEBUG_FLAG_DEBUG KERN_DEBUG
+#define HPI_DEBUG_FLAG_VERBOSE KERN_DEBUG /* kernel has no verbose */
+
+#include <linux/spinlock.h>
+
+#define HPI_LOCKING
+
+struct hpios_spinlock {
+ spinlock_t lock; /* SEE hpios_spinlock */
+ int lock_context;
+};
+
+/* The reason for all this evilness is that ALSA calls some of a drivers
+ * operators in atomic context, and some not. But all our functions channel
+ * through the HPI_Message conduit, so we can't handle the different context
+ * per function
+ */
+#define IN_LOCK_BH 1
+#define IN_LOCK_IRQ 0
+static inline void cond_lock(struct hpios_spinlock *l)
+{
+ if (irqs_disabled()) {
+ /* NO bh or isr can execute on this processor,
+ so ordinary lock will do
+ */
+ spin_lock(&((l)->lock));
+ l->lock_context = IN_LOCK_IRQ;
+ } else {
+ spin_lock_bh(&((l)->lock));
+ l->lock_context = IN_LOCK_BH;
+ }
+}
+
+static inline void cond_unlock(struct hpios_spinlock *l)
+{
+ if (l->lock_context == IN_LOCK_BH)
+ spin_unlock_bh(&((l)->lock));
+ else
+ spin_unlock(&((l)->lock));
+}
+
+#define hpios_msgxlock_init(obj) spin_lock_init(&(obj)->lock)
+#define hpios_msgxlock_lock(obj) cond_lock(obj)
+#define hpios_msgxlock_un_lock(obj) cond_unlock(obj)
+
+#define hpios_dsplock_init(obj) spin_lock_init(&(obj)->dsp_lock.lock)
+#define hpios_dsplock_lock(obj) cond_lock(&(obj)->dsp_lock)
+#define hpios_dsplock_unlock(obj) cond_unlock(&(obj)->dsp_lock)
+
+#ifdef CONFIG_SND_DEBUG
+#define HPI_DEBUG
+#endif
+
+#define HPI_ALIST_LOCKING
+#define hpios_alistlock_init(obj) spin_lock_init(&((obj)->list_lock.lock))
+#define hpios_alistlock_lock(obj) spin_lock(&((obj)->list_lock.lock))
+#define hpios_alistlock_un_lock(obj) spin_unlock(&((obj)->list_lock.lock))
+
+struct hpi_adapter {
+ /* mutex prevents contention for one card
+ between multiple user programs (via ioctl) */
+ struct mutex mutex;
+ u16 index;
+ u16 type;
+
+ /* ALSA card structure */
+ void *snd_card_asihpi;
+
+ char *p_buffer;
+ size_t buffer_size;
+ struct pci_dev *pci;
+ void __iomem *ap_remapped_mem_base[HPI_MAX_ADAPTER_MEM_SPACES];
+};
+
+static inline void hpios_unmap_io(void __iomem *addr,
+ unsigned long size)
+{
+ iounmap(addr);
+}
+
+void __iomem *hpios_map_io(struct pci_dev *pci_dev, int idx,
+ unsigned int length);
+
+#endif
diff --git a/sound/pci/asihpi/hpipcida.h b/sound/pci/asihpi/hpipcida.h
new file mode 100644
index 000000000000..bb30868ce1a3
--- /dev/null
+++ b/sound/pci/asihpi/hpipcida.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+
+ AudioScience HPI driver
+ Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation;
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Array initializer for PCI card IDs
+
+(C) Copyright AudioScience Inc. 1998-2003
+*******************************************************************************/
+
+/*NOTE: when adding new lines to this header file
+ they MUST be grouped by HPI entry point.
+*/
+
+{
+HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_DSP6205,
+ HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID, 0, 0,
+ (kernel_ulong_t) HPI_6205}
+, {
+HPI_PCI_VENDOR_ID_TI, HPI_PCI_DEV_ID_PCI2040,
+ HPI_PCI_VENDOR_ID_AUDIOSCIENCE, PCI_ANY_ID, 0, 0,
+ (kernel_ulong_t) HPI_6000}
+, {
+0}
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 9edc65059e3e..6772070ed492 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1139,40 +1139,28 @@ static void snd_cs4281_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "Spurious end IRQs : %u\n", chip->spurious_dtc_irq);
}
-static long snd_cs4281_BA0_read(struct snd_info_entry *entry,
- void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_cs4281_BA0_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file, char __user *buf,
+ size_t count, loff_t pos)
{
- long size;
struct cs4281 *chip = entry->private_data;
- size = count;
- if (pos + size > CS4281_BA0_SIZE)
- size = (long)CS4281_BA0_SIZE - pos;
- if (size > 0) {
- if (copy_to_user_fromio(buf, chip->ba0 + pos, size))
- return -EFAULT;
- }
- return size;
+ if (copy_to_user_fromio(buf, chip->ba0 + pos, count))
+ return -EFAULT;
+ return count;
}
-static long snd_cs4281_BA1_read(struct snd_info_entry *entry,
- void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_cs4281_BA1_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file, char __user *buf,
+ size_t count, loff_t pos)
{
- long size;
struct cs4281 *chip = entry->private_data;
- size = count;
- if (pos + size > CS4281_BA1_SIZE)
- size = (long)CS4281_BA1_SIZE - pos;
- if (size > 0) {
- if (copy_to_user_fromio(buf, chip->ba1 + pos, size))
- return -EFAULT;
- }
- return size;
+ if (copy_to_user_fromio(buf, chip->ba1 + pos, count))
+ return -EFAULT;
+ return count;
}
static struct snd_info_entry_ops snd_cs4281_proc_ops_BA0 = {
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 3f99a5e8528c..aad37082cb6e 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -2657,21 +2657,16 @@ static inline void snd_cs46xx_remove_gameport(struct snd_cs46xx *chip) { }
* proc interface
*/
-static long snd_cs46xx_io_read(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_cs46xx_io_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file, char __user *buf,
+ size_t count, loff_t pos)
{
- long size;
struct snd_cs46xx_region *region = entry->private_data;
- size = count;
- if (pos + (size_t)size > region->size)
- size = region->size - pos;
- if (size > 0) {
- if (copy_to_user_fromio(buf, region->remap_addr + pos, size))
- return -EFAULT;
- }
- return size;
+ if (copy_to_user_fromio(buf, region->remap_addr + pos, count))
+ return -EFAULT;
+ return count;
}
static struct snd_info_entry_ops snd_cs46xx_proc_io_ops = {
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index f18bd6207c50..66c7fb3ced3e 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1787,7 +1787,7 @@ int __devinit snd_emu10k1_create(struct snd_card *card,
else if (subsystem)
snd_printdd("Sound card name = %s, "
"vendor = 0x%x, device = 0x%x, subsystem = 0x%x. "
- "Forced to subsytem = 0x%x\n", c->name,
+ "Forced to subsystem = 0x%x\n", c->name,
pci->vendor, pci->device, emu->serial, c->subsystem);
else
snd_printdd("Sound card name = %s, "
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index baa7cd508cd8..bc38dd4d071f 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -341,15 +341,17 @@ static void snd_emu10k1_proc_acode_read(struct snd_info_entry *entry,
#define TOTAL_SIZE_CODE (0x200*8)
#define A_TOTAL_SIZE_CODE (0x400*8)
-static long snd_emu10k1_fx8010_read(struct snd_info_entry *entry,
- void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_emu10k1_fx8010_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file, char __user *buf,
+ size_t count, loff_t pos)
{
- long size;
struct snd_emu10k1 *emu = entry->private_data;
unsigned int offset;
int tram_addr = 0;
+ unsigned int *tmp;
+ long res;
+ unsigned int idx;
if (!strcmp(entry->name, "fx8010_tram_addr")) {
offset = TANKMEMADDRREGBASE;
@@ -361,30 +363,25 @@ static long snd_emu10k1_fx8010_read(struct snd_info_entry *entry,
} else {
offset = emu->audigy ? A_FXGPREGBASE : FXGPREGBASE;
}
- size = count;
- if (pos + size > entry->size)
- size = (long)entry->size - pos;
- if (size > 0) {
- unsigned int *tmp;
- long res;
- unsigned int idx;
- if ((tmp = kmalloc(size + 8, GFP_KERNEL)) == NULL)
- return -ENOMEM;
- for (idx = 0; idx < ((pos & 3) + size + 3) >> 2; idx++)
- if (tram_addr && emu->audigy) {
- tmp[idx] = snd_emu10k1_ptr_read(emu, offset + idx + (pos >> 2), 0) >> 11;
- tmp[idx] |= snd_emu10k1_ptr_read(emu, 0x100 + idx + (pos >> 2), 0) << 20;
- } else
- tmp[idx] = snd_emu10k1_ptr_read(emu, offset + idx + (pos >> 2), 0);
- if (copy_to_user(buf, ((char *)tmp) + (pos & 3), size))
- res = -EFAULT;
- else {
- res = size;
+
+ tmp = kmalloc(count + 8, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ for (idx = 0; idx < ((pos & 3) + count + 3) >> 2; idx++) {
+ unsigned int val;
+ val = snd_emu10k1_ptr_read(emu, offset + idx + (pos >> 2), 0);
+ if (tram_addr && emu->audigy) {
+ val >>= 11;
+ val |= snd_emu10k1_ptr_read(emu, 0x100 + idx + (pos >> 2), 0) << 20;
}
- kfree(tmp);
- return res;
+ tmp[idx] = val;
}
- return 0;
+ if (copy_to_user(buf, ((char *)tmp) + (pos & 3), count))
+ res = -EFAULT;
+ else
+ res = count;
+ kfree(tmp);
+ return res;
}
static void snd_emu10k1_proc_voices_read(struct snd_info_entry *entry,
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index ecaea9fb48ec..23a58f0d6cb9 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -104,6 +104,7 @@
#include <linux/gameport.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
+#include <linux/input.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -517,14 +518,9 @@ struct es1968 {
/* ALSA Stuff */
struct snd_ac97 *ac97;
- struct snd_kcontrol *master_switch; /* for h/w volume control */
- struct snd_kcontrol *master_volume;
-
struct snd_rawmidi *rmidi;
spinlock_t reg_lock;
- spinlock_t ac97_lock;
- struct tasklet_struct hwvol_tq;
unsigned int in_suspend;
/* Maestro Stuff */
@@ -547,6 +543,16 @@ struct es1968 {
#ifdef SUPPORT_JOYSTICK
struct gameport *gameport;
#endif
+
+#ifdef CONFIG_SND_ES1968_INPUT
+ struct input_dev *input_dev;
+ char phys[64]; /* physical device path */
+#else
+ struct snd_kcontrol *master_switch; /* for h/w volume control */
+ struct snd_kcontrol *master_volume;
+ spinlock_t ac97_lock;
+ struct tasklet_struct hwvol_tq;
+#endif
};
static irqreturn_t snd_es1968_interrupt(int irq, void *dev_id);
@@ -632,28 +638,38 @@ static int snd_es1968_ac97_wait_poll(struct es1968 *chip)
static void snd_es1968_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val)
{
struct es1968 *chip = ac97->private_data;
+#ifndef CONFIG_SND_ES1968_INPUT
unsigned long flags;
+#endif
snd_es1968_ac97_wait(chip);
/* Write the bus */
+#ifndef CONFIG_SND_ES1968_INPUT
spin_lock_irqsave(&chip->ac97_lock, flags);
+#endif
outw(val, chip->io_port + ESM_AC97_DATA);
/*msleep(1);*/
outb(reg, chip->io_port + ESM_AC97_INDEX);
/*msleep(1);*/
+#ifndef CONFIG_SND_ES1968_INPUT
spin_unlock_irqrestore(&chip->ac97_lock, flags);
+#endif
}
static unsigned short snd_es1968_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
{
u16 data = 0;
struct es1968 *chip = ac97->private_data;
+#ifndef CONFIG_SND_ES1968_INPUT
unsigned long flags;
+#endif
snd_es1968_ac97_wait(chip);
+#ifndef CONFIG_SND_ES1968_INPUT
spin_lock_irqsave(&chip->ac97_lock, flags);
+#endif
outb(reg | 0x80, chip->io_port + ESM_AC97_INDEX);
/*msleep(1);*/
@@ -661,7 +677,9 @@ static unsigned short snd_es1968_ac97_read(struct snd_ac97 *ac97, unsigned short
data = inw(chip->io_port + ESM_AC97_DATA);
/*msleep(1);*/
}
+#ifndef CONFIG_SND_ES1968_INPUT
spin_unlock_irqrestore(&chip->ac97_lock, flags);
+#endif
return data;
}
@@ -1874,13 +1892,17 @@ static void snd_es1968_update_pcm(struct es1968 *chip, struct esschan *es)
}
}
-/*
- */
+/* The hardware volume works by incrementing / decrementing 2 counters
+ (without wrap around) in response to volume button presses and then
+ generating an interrupt. The pair of counters is stored in bits 1-3 and 5-7
+ of a byte wide register. The meaning of bits 0 and 4 is unknown. */
static void es1968_update_hw_volume(unsigned long private_data)
{
struct es1968 *chip = (struct es1968 *) private_data;
int x, val;
+#ifndef CONFIG_SND_ES1968_INPUT
unsigned long flags;
+#endif
/* Figure out which volume control button was pushed,
based on differences from the default register
@@ -1895,6 +1917,7 @@ static void es1968_update_hw_volume(unsigned long private_data)
if (chip->in_suspend)
return;
+#ifndef CONFIG_SND_ES1968_INPUT
if (! chip->master_switch || ! chip->master_volume)
return;
@@ -1937,6 +1960,35 @@ static void es1968_update_hw_volume(unsigned long private_data)
break;
}
spin_unlock_irqrestore(&chip->ac97_lock, flags);
+#else
+ if (!chip->input_dev)
+ return;
+
+ val = 0;
+ switch (x) {
+ case 0x88:
+ /* The counters have not changed, yet we've received a HV
+ interrupt. According to tests run by various people this
+ happens when pressing the mute button. */
+ val = KEY_MUTE;
+ break;
+ case 0xaa:
+ /* counters increased by 1 -> volume up */
+ val = KEY_VOLUMEUP;
+ break;
+ case 0x66:
+ /* counters decreased by 1 -> volume down */
+ val = KEY_VOLUMEDOWN;
+ break;
+ }
+
+ if (val) {
+ input_report_key(chip->input_dev, val, 1);
+ input_sync(chip->input_dev);
+ input_report_key(chip->input_dev, val, 0);
+ input_sync(chip->input_dev);
+ }
+#endif
}
/*
@@ -1953,7 +2005,11 @@ static irqreturn_t snd_es1968_interrupt(int irq, void *dev_id)
outw(inw(chip->io_port + 4) & 1, chip->io_port + 4);
if (event & ESM_HWVOL_IRQ)
+#ifdef CONFIG_SND_ES1968_INPUT
+ es1968_update_hw_volume((unsigned long)chip);
+#else
tasklet_schedule(&chip->hwvol_tq); /* we'll do this later */
+#endif
/* else ack 'em all, i imagine */
outb(0xFF, chip->io_port + 0x1A);
@@ -1993,7 +2049,9 @@ snd_es1968_mixer(struct es1968 *chip)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
+#ifndef CONFIG_SND_ES1968_INPUT
struct snd_ctl_elem_id elem_id;
+#endif
int err;
static struct snd_ac97_bus_ops ops = {
.write = snd_es1968_ac97_write,
@@ -2009,6 +2067,7 @@ snd_es1968_mixer(struct es1968 *chip)
if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97)) < 0)
return err;
+#ifndef CONFIG_SND_ES1968_INPUT
/* attach master switch / volumes for h/w volume control */
memset(&elem_id, 0, sizeof(elem_id));
elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
@@ -2018,6 +2077,7 @@ snd_es1968_mixer(struct es1968 *chip)
elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
strcpy(elem_id.name, "Master Playback Volume");
chip->master_volume = snd_ctl_find_id(chip->card, &elem_id);
+#endif
return 0;
}
@@ -2341,6 +2401,7 @@ static void snd_es1968_start_irq(struct es1968 *chip)
w = ESM_HIRQ_DSIE | ESM_HIRQ_HW_VOLUME;
if (chip->rmidi)
w |= ESM_HIRQ_MPU401;
+ outb(w, chip->io_port + 0x1A);
outw(w, chip->io_port + ESM_PORT_HOST_IRQ);
}
@@ -2474,8 +2535,49 @@ static inline int snd_es1968_create_gameport(struct es1968 *chip, int dev) { ret
static inline void snd_es1968_free_gameport(struct es1968 *chip) { }
#endif
+#ifdef CONFIG_SND_ES1968_INPUT
+static int __devinit snd_es1968_input_register(struct es1968 *chip)
+{
+ struct input_dev *input_dev;
+ int err;
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return -ENOMEM;
+
+ snprintf(chip->phys, sizeof(chip->phys), "pci-%s/input0",
+ pci_name(chip->pci));
+
+ input_dev->name = chip->card->driver;
+ input_dev->phys = chip->phys;
+ input_dev->id.bustype = BUS_PCI;
+ input_dev->id.vendor = chip->pci->vendor;
+ input_dev->id.product = chip->pci->device;
+ input_dev->dev.parent = &chip->pci->dev;
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(KEY_MUTE, input_dev->keybit);
+ __set_bit(KEY_VOLUMEDOWN, input_dev->keybit);
+ __set_bit(KEY_VOLUMEUP, input_dev->keybit);
+
+ err = input_register_device(input_dev);
+ if (err) {
+ input_free_device(input_dev);
+ return err;
+ }
+
+ chip->input_dev = input_dev;
+ return 0;
+}
+#endif /* CONFIG_SND_ES1968_INPUT */
+
static int snd_es1968_free(struct es1968 *chip)
{
+#ifdef CONFIG_SND_ES1968_INPUT
+ if (chip->input_dev)
+ input_unregister_device(chip->input_dev);
+#endif
+
if (chip->io_port) {
if (chip->irq >= 0)
synchronize_irq(chip->irq);
@@ -2486,8 +2588,6 @@ static int snd_es1968_free(struct es1968 *chip)
if (chip->irq >= 0)
free_irq(chip->irq, chip);
snd_es1968_free_gameport(chip);
- chip->master_switch = NULL;
- chip->master_volume = NULL;
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip);
@@ -2558,9 +2658,11 @@ static int __devinit snd_es1968_create(struct snd_card *card,
spin_lock_init(&chip->substream_lock);
INIT_LIST_HEAD(&chip->buf_list);
INIT_LIST_HEAD(&chip->substream_list);
- spin_lock_init(&chip->ac97_lock);
mutex_init(&chip->memory_mutex);
+#ifndef CONFIG_SND_ES1968_INPUT
+ spin_lock_init(&chip->ac97_lock);
tasklet_init(&chip->hwvol_tq, es1968_update_hw_volume, (unsigned long)chip);
+#endif
chip->card = card;
chip->pci = pci;
chip->irq = -1;
@@ -2713,6 +2815,13 @@ static int __devinit snd_es1968_probe(struct pci_dev *pci,
snd_es1968_create_gameport(chip, dev);
+#ifdef CONFIG_SND_ES1968_INPUT
+ err = snd_es1968_input_register(chip);
+ if (err)
+ snd_printk(KERN_WARNING "Input device registration "
+ "failed with error %i", err);
+#endif
+
snd_es1968_start_irq(chip);
chip->clock = clock[dev];
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 0e76ac2b2ace..a3d638c8c1fd 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1209,8 +1209,7 @@ static void free_hda_cache(struct hda_cache_rec *cache)
}
/* query the hash. allocate an entry if not found. */
-static struct hda_cache_head *get_alloc_hash(struct hda_cache_rec *cache,
- u32 key)
+static struct hda_cache_head *get_hash(struct hda_cache_rec *cache, u32 key)
{
u16 idx = key % (u16)ARRAY_SIZE(cache->hash);
u16 cur = cache->hash[idx];
@@ -1222,17 +1221,27 @@ static struct hda_cache_head *get_alloc_hash(struct hda_cache_rec *cache,
return info;
cur = info->next;
}
+ return NULL;
+}
- /* add a new hash entry */
- info = snd_array_new(&cache->buf);
- if (!info)
- return NULL;
- cur = snd_array_index(&cache->buf, info);
- info->key = key;
- info->val = 0;
- info->next = cache->hash[idx];
- cache->hash[idx] = cur;
-
+/* query the hash. allocate an entry if not found. */
+static struct hda_cache_head *get_alloc_hash(struct hda_cache_rec *cache,
+ u32 key)
+{
+ struct hda_cache_head *info = get_hash(cache, key);
+ if (!info) {
+ u16 idx, cur;
+ /* add a new hash entry */
+ info = snd_array_new(&cache->buf);
+ if (!info)
+ return NULL;
+ cur = snd_array_index(&cache->buf, info);
+ info->key = key;
+ info->val = 0;
+ idx = key % (u16)ARRAY_SIZE(cache->hash);
+ info->next = cache->hash[idx];
+ cache->hash[idx] = cur;
+ }
return info;
}
@@ -1461,6 +1470,8 @@ int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, int ch,
info = get_alloc_amp_hash(codec, HDA_HASH_KEY(nid, direction, idx));
if (!info)
return 0;
+ if (snd_BUG_ON(mask & ~0xff))
+ mask &= 0xff;
val &= mask;
val |= get_vol_mute(codec, info, nid, ch, direction, idx) & ~mask;
if (info->vol[ch] == val)
@@ -1486,6 +1497,9 @@ int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid,
int direction, int idx, int mask, int val)
{
int ch, ret = 0;
+
+ if (snd_BUG_ON(mask & ~0xff))
+ mask &= 0xff;
for (ch = 0; ch < 2; ch++)
ret |= snd_hda_codec_amp_update(codec, nid, ch, direction,
idx, mask, val);
@@ -2717,6 +2731,41 @@ int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
EXPORT_SYMBOL_HDA(snd_hda_codec_write_cache);
/**
+ * snd_hda_codec_update_cache - check cache and write the cmd only when needed
+ * @codec: the HDA codec
+ * @nid: NID to send the command
+ * @direct: direct flag
+ * @verb: the verb to send
+ * @parm: the parameter for the verb
+ *
+ * This function works like snd_hda_codec_write_cache(), but it doesn't send
+ * command if the parameter is already identical with the cached value.
+ * If not, it sends the command and refreshes the cache.
+ *
+ * Returns 0 if successful, or a negative error code.
+ */
+int snd_hda_codec_update_cache(struct hda_codec *codec, hda_nid_t nid,
+ int direct, unsigned int verb, unsigned int parm)
+{
+ struct hda_cache_head *c;
+ u32 key;
+
+ /* parm may contain the verb stuff for get/set amp */
+ verb = verb | (parm >> 8);
+ parm &= 0xff;
+ key = build_cmd_cache_key(nid, verb);
+ mutex_lock(&codec->bus->cmd_mutex);
+ c = get_hash(&codec->cmd_cache, key);
+ if (c && c->val == parm) {
+ mutex_unlock(&codec->bus->cmd_mutex);
+ return 0;
+ }
+ mutex_unlock(&codec->bus->cmd_mutex);
+ return snd_hda_codec_write_cache(codec, nid, direct, verb, parm);
+}
+EXPORT_SYMBOL_HDA(snd_hda_codec_update_cache);
+
+/**
* snd_hda_codec_resume_cache - Resume the all commands from the cache
* @codec: HD-audio codec
*
@@ -4218,7 +4267,8 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
break;
case AC_JACK_MIC_IN: {
int preferred, alt;
- if (loc == AC_JACK_LOC_FRONT) {
+ if (loc == AC_JACK_LOC_FRONT ||
+ (loc & 0x30) == AC_JACK_LOC_INTERNAL) {
preferred = AUTO_PIN_FRONT_MIC;
alt = AUTO_PIN_MIC;
} else {
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index b75da47571e6..49e939e7e5cd 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -885,9 +885,12 @@ int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
int direct, unsigned int verb, unsigned int parm);
void snd_hda_sequence_write_cache(struct hda_codec *codec,
const struct hda_verb *seq);
+int snd_hda_codec_update_cache(struct hda_codec *codec, hda_nid_t nid,
+ int direct, unsigned int verb, unsigned int parm);
void snd_hda_codec_resume_cache(struct hda_codec *codec);
#else
#define snd_hda_codec_write_cache snd_hda_codec_write
+#define snd_hda_codec_update_cache snd_hda_codec_write
#define snd_hda_sequence_write_cache snd_hda_sequence_write
#endif
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cec68152dcb1..170610e1d7da 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -84,7 +84,7 @@ module_param_array(bdl_pos_adj, int, NULL, 0644);
MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
module_param_array(probe_mask, int, NULL, 0444);
MODULE_PARM_DESC(probe_mask, "Bitmask to probe codecs (default = -1).");
-module_param_array(probe_only, bool, NULL, 0444);
+module_param_array(probe_only, int, NULL, 0444);
MODULE_PARM_DESC(probe_only, "Only probing and no codec initialization.");
module_param(single_cmd, bool, 0444);
MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs "
@@ -174,7 +174,7 @@ MODULE_DESCRIPTION("Intel HDA driver");
#define ICH6_GSTS_FSTS (1 << 1) /* flush status */
#define ICH6_REG_INTCTL 0x20
#define ICH6_REG_INTSTS 0x24
-#define ICH6_REG_WALCLK 0x30
+#define ICH6_REG_WALLCLK 0x30 /* 24Mhz source */
#define ICH6_REG_SYNC 0x34
#define ICH6_REG_CORBLBASE 0x40
#define ICH6_REG_CORBUBASE 0x44
@@ -340,8 +340,8 @@ struct azx_dev {
unsigned int period_bytes; /* size of the period in bytes */
unsigned int frags; /* number for period in the play buffer */
unsigned int fifo_size; /* FIFO size */
- unsigned long start_jiffies; /* start + minimum jiffies */
- unsigned long min_jiffies; /* minimum jiffies before position is valid */
+ unsigned long start_wallclk; /* start + minimum wallclk */
+ unsigned long period_wallclk; /* wallclk for period */
void __iomem *sd_addr; /* stream descriptor pointer */
@@ -361,7 +361,6 @@ struct azx_dev {
unsigned int opened :1;
unsigned int running :1;
unsigned int irq_pending :1;
- unsigned int start_flag: 1; /* stream full start flag */
/*
* For VIA:
* A flag to ensure DMA position is 0
@@ -425,7 +424,7 @@ struct azx {
struct snd_dma_buffer posbuf;
/* flags */
- int position_fix;
+ int position_fix[2]; /* for both playback/capture streams */
int poll_count;
unsigned int running :1;
unsigned int initialized :1;
@@ -858,10 +857,13 @@ static void azx_power_notify(struct hda_bus *bus);
#endif
/* reset codec link */
-static int azx_reset(struct azx *chip)
+static int azx_reset(struct azx *chip, int full_reset)
{
int count;
+ if (!full_reset)
+ goto __skip;
+
/* clear STATESTS */
azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
@@ -887,6 +889,7 @@ static int azx_reset(struct azx *chip)
/* Brent Chartrand said to wait >= 540us for codecs to initialize */
msleep(1);
+ __skip:
/* check to see if controller is ready */
if (!azx_readb(chip, GCTL)) {
snd_printd(SFX "azx_reset: controller not ready!\n");
@@ -998,13 +1001,13 @@ static void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
/*
* reset and start the controller registers
*/
-static void azx_init_chip(struct azx *chip)
+static void azx_init_chip(struct azx *chip, int full_reset)
{
if (chip->initialized)
return;
/* reset controller */
- azx_reset(chip);
+ azx_reset(chip, full_reset);
/* initialize interrupts */
azx_int_clear(chip);
@@ -1302,8 +1305,10 @@ static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
azx_sd_writel(azx_dev, SD_BDLPU, upper_32_bits(azx_dev->bdl.addr));
/* enable the position buffer */
- if (chip->position_fix == POS_FIX_POSBUF ||
- chip->position_fix == POS_FIX_AUTO ||
+ if (chip->position_fix[0] == POS_FIX_POSBUF ||
+ chip->position_fix[0] == POS_FIX_AUTO ||
+ chip->position_fix[1] == POS_FIX_POSBUF ||
+ chip->position_fix[1] == POS_FIX_AUTO ||
chip->via_dmapos_patch) {
if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
azx_writel(chip, DPLBASE,
@@ -1348,7 +1353,7 @@ static void azx_bus_reset(struct hda_bus *bus)
bus->in_reset = 1;
azx_stop_chip(chip);
- azx_init_chip(chip);
+ azx_init_chip(chip, 1);
#ifdef CONFIG_PM
if (chip->initialized) {
int i;
@@ -1422,7 +1427,7 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
* get back to the sanity state.
*/
azx_stop_chip(chip);
- azx_init_chip(chip);
+ azx_init_chip(chip, 1);
}
}
}
@@ -1670,8 +1675,9 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
return err;
}
- azx_dev->min_jiffies = (runtime->period_size * HZ) /
- (runtime->rate * 2);
+ /* wallclk has 24Mhz clock source */
+ azx_dev->period_wallclk = (((runtime->period_size * 24000) /
+ runtime->rate) * 1000);
azx_setup_controller(chip, azx_dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
azx_dev->fifo_size = azx_sd_readw(azx_dev, SD_FIFOSIZE) + 1;
@@ -1725,14 +1731,15 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
- if (rstart) {
- azx_dev->start_flag = 1;
- azx_dev->start_jiffies = jiffies + azx_dev->min_jiffies;
- }
- if (start)
+ if (start) {
+ azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
+ if (!rstart)
+ azx_dev->start_wallclk -=
+ azx_dev->period_wallclk;
azx_stream_start(chip, azx_dev);
- else
+ } else {
azx_stream_stop(chip, azx_dev);
+ }
azx_dev->running = start;
}
spin_unlock(&chip->reg_lock);
@@ -1843,13 +1850,16 @@ static unsigned int azx_get_position(struct azx *chip,
if (chip->via_dmapos_patch)
pos = azx_via_get_position(chip, azx_dev);
- else if (chip->position_fix == POS_FIX_POSBUF ||
- chip->position_fix == POS_FIX_AUTO) {
- /* use the position buffer */
- pos = le32_to_cpu(*azx_dev->posbuf);
- } else {
- /* read LPIB */
- pos = azx_sd_readl(azx_dev, SD_LPIB);
+ else {
+ int stream = azx_dev->substream->stream;
+ if (chip->position_fix[stream] == POS_FIX_POSBUF ||
+ chip->position_fix[stream] == POS_FIX_AUTO) {
+ /* use the position buffer */
+ pos = le32_to_cpu(*azx_dev->posbuf);
+ } else {
+ /* read LPIB */
+ pos = azx_sd_readl(azx_dev, SD_LPIB);
+ }
}
if (pos >= azx_dev->bufsize)
pos = 0;
@@ -1876,32 +1886,35 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
*/
static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
{
+ u32 wallclk;
unsigned int pos;
+ int stream;
- if (azx_dev->start_flag &&
- time_before_eq(jiffies, azx_dev->start_jiffies))
+ wallclk = azx_readl(chip, WALLCLK) - azx_dev->start_wallclk;
+ if (wallclk < (azx_dev->period_wallclk * 2) / 3)
return -1; /* bogus (too early) interrupt */
- azx_dev->start_flag = 0;
+ stream = azx_dev->substream->stream;
pos = azx_get_position(chip, azx_dev);
- if (chip->position_fix == POS_FIX_AUTO) {
+ if (chip->position_fix[stream] == POS_FIX_AUTO) {
if (!pos) {
printk(KERN_WARNING
"hda-intel: Invalid position buffer, "
"using LPIB read method instead.\n");
- chip->position_fix = POS_FIX_LPIB;
+ chip->position_fix[stream] = POS_FIX_LPIB;
pos = azx_get_position(chip, azx_dev);
} else
- chip->position_fix = POS_FIX_POSBUF;
+ chip->position_fix[stream] = POS_FIX_POSBUF;
}
- if (!bdl_pos_adj[chip->dev_index])
- return 1; /* no delayed ack */
if (WARN_ONCE(!azx_dev->period_bytes,
"hda-intel: zero azx_dev->period_bytes"))
- return 0; /* this shouldn't happen! */
- if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
- return 0; /* NG - it's below the period boundary */
+ return -1; /* this shouldn't happen! */
+ if (wallclk <= azx_dev->period_wallclk &&
+ pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
+ /* NG - it's below the first next period boundary */
+ return bdl_pos_adj[chip->dev_index] ? 0 : -1;
+ azx_dev->start_wallclk = wallclk;
return 1; /* OK, it's fine */
}
@@ -1911,7 +1924,7 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
static void azx_irq_pending_work(struct work_struct *work)
{
struct azx *chip = container_of(work, struct azx, irq_pending_work);
- int i, pending;
+ int i, pending, ok;
if (!chip->irq_pending_warned) {
printk(KERN_WARNING
@@ -1930,11 +1943,14 @@ static void azx_irq_pending_work(struct work_struct *work)
!azx_dev->substream ||
!azx_dev->running)
continue;
- if (azx_position_ok(chip, azx_dev)) {
+ ok = azx_position_ok(chip, azx_dev);
+ if (ok > 0) {
azx_dev->irq_pending = 0;
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(azx_dev->substream);
spin_lock(&chip->reg_lock);
+ } else if (ok < 0) {
+ pending = 0; /* too early */
} else
pending++;
}
@@ -2112,7 +2128,7 @@ static void azx_power_notify(struct hda_bus *bus)
}
}
if (power_on)
- azx_init_chip(chip);
+ azx_init_chip(chip, 1);
else if (chip->running && power_save_controller &&
!bus->power_keep_link_on)
azx_stop_chip(chip);
@@ -2182,7 +2198,7 @@ static int azx_resume(struct pci_dev *pci)
azx_init_pci(chip);
if (snd_hda_codecs_inuse(chip->bus))
- azx_init_chip(chip);
+ azx_init_chip(chip, 1);
snd_hda_resume(chip->bus);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
@@ -2431,7 +2447,8 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
chip->dev_index = dev;
INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
- chip->position_fix = check_position_fix(chip, position_fix[dev]);
+ chip->position_fix[0] = chip->position_fix[1] =
+ check_position_fix(chip, position_fix[dev]);
check_probe_mask(chip, dev);
chip->single_cmd = single_cmd;
@@ -2577,7 +2594,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
/* initialize chip */
azx_init_pci(chip);
- azx_init_chip(chip);
+ azx_init_chip(chip, (probe_only[dev] & 2) == 0);
/* codec detection */
if (!chip->codec_mask) {
@@ -2666,7 +2683,7 @@ static int __devinit azx_probe(struct pci_dev *pci,
goto out_free;
}
#endif
- if (!probe_only[dev]) {
+ if ((probe_only[dev] & 1) == 0) {
err = azx_codec_configure(chip);
if (err < 0)
goto out_free;
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 7cee364976ff..7a97f126f6f7 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -361,7 +361,7 @@ struct hda_bus_unsolicited {
};
/*
- * Helper for automatic ping configuration
+ * Helper for automatic pin configuration
*/
enum {
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index e9fdfc4b1c57..afbe314a5bf3 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -71,9 +71,10 @@ struct ad198x_spec {
struct hda_input_mux private_imux;
hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
- unsigned int jack_present :1;
- unsigned int inv_jack_detect:1; /* inverted jack-detection */
- unsigned int inv_eapd:1; /* inverted EAPD implementation */
+ unsigned int jack_present: 1;
+ unsigned int inv_jack_detect: 1;/* inverted jack-detection */
+ unsigned int inv_eapd: 1; /* inverted EAPD implementation */
+ unsigned int analog_beep: 1; /* analog beep input present */
#ifdef CONFIG_SND_HDA_POWER_SAVE
struct hda_loopback_check loopback;
@@ -165,6 +166,12 @@ static struct snd_kcontrol_new ad_beep_mixer[] = {
{ } /* end */
};
+static struct snd_kcontrol_new ad_beep2_mixer[] = {
+ HDA_CODEC_VOLUME("Digital Beep Playback Volume", 0, 0, HDA_OUTPUT),
+ HDA_CODEC_MUTE_BEEP("Digital Beep Playback Switch", 0, 0, HDA_OUTPUT),
+ { } /* end */
+};
+
#define set_beep_amp(spec, nid, idx, dir) \
((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir)) /* mono */
#else
@@ -203,7 +210,8 @@ static int ad198x_build_controls(struct hda_codec *codec)
#ifdef CONFIG_SND_HDA_INPUT_BEEP
if (spec->beep_amp) {
struct snd_kcontrol_new *knew;
- for (knew = ad_beep_mixer; knew->name; knew++) {
+ knew = spec->analog_beep ? ad_beep2_mixer : ad_beep_mixer;
+ for ( ; knew->name; knew++) {
struct snd_kcontrol *kctl;
kctl = snd_ctl_new1(knew, codec);
if (!kctl)
@@ -3481,6 +3489,8 @@ static struct snd_kcontrol_new ad1984_thinkpad_mixers[] = {
HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
+ HDA_CODEC_VOLUME("Beep Playback Volume", 0x20, 0x03, HDA_INPUT),
+ HDA_CODEC_MUTE("Beep Playback Switch", 0x20, 0x03, HDA_INPUT),
HDA_CODEC_VOLUME("Docking Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
HDA_CODEC_MUTE("Docking Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
@@ -3522,6 +3532,8 @@ static struct hda_verb ad1984_thinkpad_init_verbs[] = {
{0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
/* docking mic boost */
{0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+ /* Analog PC Beeper - allow firmware/ACPI beeps */
+ {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3) | 0x1a},
/* Analog mixer - docking mic; mute as default */
{0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
/* enable EAPD bit */
@@ -3654,6 +3666,7 @@ static int patch_ad1984(struct hda_codec *codec)
spec->input_mux = &ad1984_thinkpad_capture_source;
spec->mixers[0] = ad1984_thinkpad_mixers;
spec->init_verbs[spec->num_init_verbs++] = ad1984_thinkpad_init_verbs;
+ spec->analog_beep = 1;
break;
case AD1984_DELL_DESKTOP:
spec->multiout.dig_out_nid = 0;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 56e52071c769..e863649d31f5 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -115,6 +115,7 @@ struct conexant_spec {
unsigned int port_d_mode;
unsigned int dell_vostro:1;
unsigned int ideapad:1;
+ unsigned int thinkpad:1;
unsigned int ext_mic_present;
unsigned int recording;
@@ -1197,9 +1198,10 @@ static int patch_cxt5045(struct hda_codec *codec)
case 0x103c:
case 0x1631:
case 0x1734:
- /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad
- * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB
- * (originally it has 0x2b steps with 0dB offset 0x14)
+ case 0x17aa:
+ /* HP, Packard Bell, Fujitsu-Siemens & Lenovo laptops have
+ * really bad sound over 0dB on NID 0x17. Fix max PCM level to
+ * 0 dB (originally it has 0x2b steps with 0dB offset 0x14)
*/
snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
(0x14 << AC_AMPCAP_OFFSET_SHIFT) |
@@ -1783,6 +1785,7 @@ static struct hda_verb cxt5051_init_verbs[] = {
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
/* SPDIF route: PCM */
+ {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x1c, AC_VERB_SET_CONNECT_SEL, 0x0},
/* EAPD */
{0x1a, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
@@ -1839,6 +1842,7 @@ static struct hda_verb cxt5051_lenovo_x200_init_verbs[] = {
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
/* SPDIF route: PCM */
+ {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, /* needed for W500 Advanced Mini Dock 250410 */
{0x1c, AC_VERB_SET_CONNECT_SEL, 0x0},
/* EAPD */
{0x1a, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
@@ -1910,7 +1914,7 @@ enum {
CXT5051_LAPTOP, /* Laptops w/ EAPD support */
CXT5051_HP, /* no docking */
CXT5051_HP_DV6736, /* HP without mic switch */
- CXT5051_LENOVO_X200, /* Lenovo X200 laptop */
+ CXT5051_LENOVO_X200, /* Lenovo X200 laptop, also used for Advanced Mini Dock 250410 */
CXT5051_F700, /* HP Compaq Presario F700 */
CXT5051_TOSHIBA, /* Toshiba M300 & co */
CXT5051_MODELS
@@ -2032,6 +2036,9 @@ static void cxt5066_update_speaker(struct hda_codec *codec)
/* Port D (HP/LO) */
pinctl = ((spec->hp_present & 2) && spec->cur_eapd)
? spec->port_d_mode : 0;
+ /* Mute if Port A is connected on Thinkpad */
+ if (spec->thinkpad && (spec->hp_present & 1))
+ pinctl = 0;
snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl);
@@ -2212,6 +2219,50 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
}
}
+/* toggle input of built-in digital mic and mic jack appropriately
+ order is: external mic -> dock mic -> interal mic */
+static void cxt5066_thinkpad_automic(struct hda_codec *codec)
+{
+ unsigned int ext_present, dock_present;
+
+ static struct hda_verb ext_mic_present[] = {
+ {0x14, AC_VERB_SET_CONNECT_SEL, 0},
+ {0x17, AC_VERB_SET_CONNECT_SEL, 1},
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+ {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {}
+ };
+ static struct hda_verb dock_mic_present[] = {
+ {0x14, AC_VERB_SET_CONNECT_SEL, 0},
+ {0x17, AC_VERB_SET_CONNECT_SEL, 0},
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+ {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {}
+ };
+ static struct hda_verb ext_mic_absent[] = {
+ {0x14, AC_VERB_SET_CONNECT_SEL, 2},
+ {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {}
+ };
+
+ ext_present = snd_hda_jack_detect(codec, 0x1b);
+ dock_present = snd_hda_jack_detect(codec, 0x1a);
+ if (ext_present) {
+ snd_printdd("CXT5066: external microphone detected\n");
+ snd_hda_sequence_write(codec, ext_mic_present);
+ } else if (dock_present) {
+ snd_printdd("CXT5066: dock microphone detected\n");
+ snd_hda_sequence_write(codec, dock_mic_present);
+ } else {
+ snd_printdd("CXT5066: external microphone absent\n");
+ snd_hda_sequence_write(codec, ext_mic_absent);
+ }
+}
+
/* mute internal speaker if HP is plugged */
static void cxt5066_hp_automute(struct hda_codec *codec)
{
@@ -2224,7 +2275,8 @@ static void cxt5066_hp_automute(struct hda_codec *codec)
/* Port D */
portD = snd_hda_jack_detect(codec, 0x1c);
- spec->hp_present = !!(portA | portD);
+ spec->hp_present = !!(portA);
+ spec->hp_present |= portD ? 2 : 0;
snd_printdd("CXT5066: hp automute portA=%x portD=%x present=%d\n",
portA, portD, spec->hp_present);
cxt5066_update_speaker(codec);
@@ -2275,6 +2327,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
}
}
+/* unsolicited event for jack sensing */
+static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
+{
+ snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
+ switch (res >> 26) {
+ case CONEXANT_HP_EVENT:
+ cxt5066_hp_automute(codec);
+ break;
+ case CONEXANT_MIC_EVENT:
+ cxt5066_thinkpad_automic(codec);
+ break;
+ }
+}
+
static const struct hda_input_mux cxt5066_analog_mic_boost = {
.num_items = 5,
.items = {
@@ -2293,7 +2359,7 @@ static void cxt5066_set_mic_boost(struct hda_codec *codec)
AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | AC_AMP_SET_OUTPUT |
cxt5066_analog_mic_boost.items[spec->mic_boost].index);
- if (spec->ideapad) {
+ if (spec->ideapad || spec->thinkpad) {
/* adjust the internal mic as well...it is not through 0x17 */
snd_hda_codec_write_cache(codec, 0x23, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
@@ -2781,6 +2847,64 @@ static struct hda_verb cxt5066_init_verbs_ideapad[] = {
{ } /* end */
};
+static struct hda_verb cxt5066_init_verbs_thinkpad[] = {
+ {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port F */
+ {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port E */
+
+ /* Port G: internal speakers */
+ {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
+
+ /* Port A: HP, Amp */
+ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
+
+ /* Port B: Mic Dock */
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* Port C: Mic */
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* Port D: HP Dock, Amp */
+ {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {0x1c, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
+
+ /* DAC1 */
+ {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+
+ /* Node 14 connections: 0x17 0x18 0x23 0x24 0x27 */
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2) | 0x50},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
+ {0x14, AC_VERB_SET_CONNECT_SEL, 2}, /* default to internal mic */
+
+ /* Audio input selector */
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x2},
+ {0x17, AC_VERB_SET_CONNECT_SEL, 1}, /* route ext mic */
+
+ /* SPDIF route: PCM */
+ {0x20, AC_VERB_SET_CONNECT_SEL, 0x0},
+ {0x22, AC_VERB_SET_CONNECT_SEL, 0x0},
+
+ {0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+
+ /* internal microphone */
+ {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */
+
+ /* EAPD */
+ {0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
+
+ /* enable unsolicited events for Port A, B, C and D */
+ {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+ {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+ {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+ {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+ { } /* end */
+};
+
static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{ } /* end */
@@ -2799,6 +2923,8 @@ static int cxt5066_init(struct hda_codec *codec)
cxt5066_vostro_automic(codec);
else if (spec->ideapad)
cxt5066_ideapad_automic(codec);
+ else if (spec->thinkpad)
+ cxt5066_thinkpad_automic(codec);
}
cxt5066_set_mic_boost(codec);
return 0;
@@ -2820,20 +2946,22 @@ static int cxt5066_olpc_init(struct hda_codec *codec)
}
enum {
- CXT5066_LAPTOP, /* Laptops w/ EAPD support */
+ CXT5066_LAPTOP, /* Laptops w/ EAPD support */
CXT5066_DELL_LAPTOP, /* Dell Laptop */
CXT5066_OLPC_XO_1_5, /* OLPC XO 1.5 */
CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */
CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */
+ CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */
CXT5066_MODELS
};
static const char *cxt5066_models[CXT5066_MODELS] = {
- [CXT5066_LAPTOP] = "laptop",
+ [CXT5066_LAPTOP] = "laptop",
[CXT5066_DELL_LAPTOP] = "dell-laptop",
[CXT5066_OLPC_XO_1_5] = "olpc-xo-1_5",
[CXT5066_DELL_VOSTO] = "dell-vostro",
[CXT5066_IDEAPAD] = "ideapad",
+ [CXT5066_THINKPAD] = "thinkpad",
};
static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -2846,7 +2974,9 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
+ SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
{}
};
@@ -2954,6 +3084,22 @@ static int patch_cxt5066(struct hda_codec *codec)
/* input source automatically selected */
spec->input_mux = NULL;
break;
+ case CXT5066_THINKPAD:
+ codec->patch_ops.init = cxt5066_init;
+ codec->patch_ops.unsol_event = cxt5066_thinkpad_event;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+ spec->init_verbs[0] = cxt5066_init_verbs_thinkpad;
+ spec->thinkpad = 1;
+ spec->port_d_mode = PIN_OUT;
+ spec->mic_boost = 2; /* default 20dB gain */
+
+ /* no S/PDIF out */
+ spec->multiout.dig_out_nid = 0;
+
+ /* input source automatically selected */
+ spec->input_mux = NULL;
+ break;
}
return 0;
@@ -2973,6 +3119,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
.patch = patch_cxt5066 },
{ .id = 0x14f15067, .name = "CX20583 (Pebble HSF)",
.patch = patch_cxt5066 },
+ { .id = 0x14f15069, .name = "CX20585",
+ .patch = patch_cxt5066 },
{} /* terminator */
};
@@ -2981,6 +3129,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15047");
MODULE_ALIAS("snd-hda-codec-id:14f15051");
MODULE_ALIAS("snd-hda-codec-id:14f15066");
MODULE_ALIAS("snd-hda-codec-id:14f15067");
+MODULE_ALIAS("snd-hda-codec-id:14f15069");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Conexant HD-audio codec");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7404dba16f83..80a183f99eb3 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -276,6 +276,18 @@ struct alc_mic_route {
#define MUX_IDX_UNDEF ((unsigned char)-1)
+struct alc_customize_define {
+ unsigned int sku_cfg;
+ unsigned char port_connectivity;
+ unsigned char check_sum;
+ unsigned char customization;
+ unsigned char external_amp;
+ unsigned int enable_pcbeep:1;
+ unsigned int platform_type:1;
+ unsigned int swap:1;
+ unsigned int override:1;
+};
+
struct alc_spec {
/* codec parameterization */
struct snd_kcontrol_new *mixers[5]; /* mixer arrays */
@@ -333,6 +345,7 @@ struct alc_spec {
/* dynamic controls, init_verbs and input_mux */
struct auto_pin_cfg autocfg;
+ struct alc_customize_define cdefine;
struct snd_array kctls;
struct hda_input_mux private_imux[3];
hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
@@ -1248,6 +1261,62 @@ static void alc_init_auto_mic(struct hda_codec *codec)
spec->unsol_event = alc_sku_unsol_event;
}
+static int alc_auto_parse_customize_define(struct hda_codec *codec)
+{
+ unsigned int ass, tmp, i;
+ unsigned nid = 0;
+ struct alc_spec *spec = codec->spec;
+
+ ass = codec->subsystem_id & 0xffff;
+ if (ass != codec->bus->pci->subsystem_device && (ass & 1))
+ goto do_sku;
+
+ nid = 0x1d;
+ if (codec->vendor_id == 0x10ec0260)
+ nid = 0x17;
+ ass = snd_hda_codec_get_pincfg(codec, nid);
+
+ if (!(ass & 1)) {
+ printk(KERN_INFO "hda_codec: %s: SKU not ready 0x%08x\n",
+ codec->chip_name, ass);
+ return -1;
+ }
+
+ /* check sum */
+ tmp = 0;
+ for (i = 1; i < 16; i++) {
+ if ((ass >> i) & 1)
+ tmp++;
+ }
+ if (((ass >> 16) & 0xf) != tmp)
+ return -1;
+
+ spec->cdefine.port_connectivity = ass >> 30;
+ spec->cdefine.enable_pcbeep = (ass & 0x100000) >> 20;
+ spec->cdefine.check_sum = (ass >> 16) & 0xf;
+ spec->cdefine.customization = ass >> 8;
+do_sku:
+ spec->cdefine.sku_cfg = ass;
+ spec->cdefine.external_amp = (ass & 0x38) >> 3;
+ spec->cdefine.platform_type = (ass & 0x4) >> 2;
+ spec->cdefine.swap = (ass & 0x2) >> 1;
+ spec->cdefine.override = ass & 0x1;
+
+ snd_printd("SKU: Nid=0x%x sku_cfg=0x%08x\n",
+ nid, spec->cdefine.sku_cfg);
+ snd_printd("SKU: port_connectivity=0x%x\n",
+ spec->cdefine.port_connectivity);
+ snd_printd("SKU: enable_pcbeep=0x%x\n", spec->cdefine.enable_pcbeep);
+ snd_printd("SKU: check_sum=0x%08x\n", spec->cdefine.check_sum);
+ snd_printd("SKU: customization=0x%08x\n", spec->cdefine.customization);
+ snd_printd("SKU: external_amp=0x%x\n", spec->cdefine.external_amp);
+ snd_printd("SKU: platform_type=0x%x\n", spec->cdefine.platform_type);
+ snd_printd("SKU: swap=0x%x\n", spec->cdefine.swap);
+ snd_printd("SKU: override=0x%x\n", spec->cdefine.override);
+
+ return 0;
+}
+
/* check subsystem ID and set up device-specific initialization;
* return 1 if initialized, 0 if invalid SSID
*/
@@ -3415,6 +3484,10 @@ static int alc_init(struct hda_codec *codec)
if (spec->init_hook)
spec->init_hook(codec);
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+ if (codec->patch_ops.check_power_status)
+ codec->patch_ops.check_power_status(codec, 0x01);
+#endif
return 0;
}
@@ -3775,6 +3848,10 @@ static int alc_resume(struct hda_codec *codec)
codec->patch_ops.init(codec);
snd_hda_codec_resume_amp(codec);
snd_hda_codec_resume_cache(codec);
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+ if (codec->patch_ops.check_power_status)
+ codec->patch_ops.check_power_status(codec, 0x01);
+#endif
return 0;
}
#endif
@@ -3797,6 +3874,17 @@ static struct hda_codec_ops alc_patch_ops = {
.reboot_notify = alc_shutup,
};
+/* replace the codec chip_name with the given string */
+static int alc_codec_rename(struct hda_codec *codec, const char *name)
+{
+ kfree(codec->chip_name);
+ codec->chip_name = kstrdup(name, GFP_KERNEL);
+ if (!codec->chip_name) {
+ alc_free(codec);
+ return -ENOMEM;
+ }
+ return 0;
+}
/*
* Test configuration for debugging
@@ -10189,21 +10277,20 @@ static int alc882_auto_create_input_ctls(struct hda_codec *codec,
static void alc882_auto_set_output_and_unmute(struct hda_codec *codec,
hda_nid_t nid, int pin_type,
- int dac_idx)
+ hda_nid_t dac)
{
- /* set as output */
- struct alc_spec *spec = codec->spec;
int idx;
+ /* set as output */
alc_set_pin_output(codec, nid, pin_type);
- if (dac_idx >= spec->multiout.num_dacs)
- return;
- if (spec->multiout.dac_nids[dac_idx] == 0x25)
+
+ if (dac == 0x25)
idx = 4;
+ else if (dac >= 0x02 && dac <= 0x05)
+ idx = dac - 2;
else
- idx = spec->multiout.dac_nids[dac_idx] - 2;
+ return;
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx);
-
}
static void alc882_auto_init_multi_out(struct hda_codec *codec)
@@ -10216,22 +10303,29 @@ static void alc882_auto_init_multi_out(struct hda_codec *codec)
int pin_type = get_pin_type(spec->autocfg.line_out_type);
if (nid)
alc882_auto_set_output_and_unmute(codec, nid, pin_type,
- i);
+ spec->multiout.dac_nids[i]);
}
}
static void alc882_auto_init_hp_out(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t pin;
+ hda_nid_t pin, dac;
pin = spec->autocfg.hp_pins[0];
- if (pin) /* connect to front */
- /* use dac 0 */
- alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
+ if (pin) {
+ dac = spec->multiout.hp_nid;
+ if (!dac)
+ dac = spec->multiout.dac_nids[0]; /* to front */
+ alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac);
+ }
pin = spec->autocfg.speaker_pins[0];
- if (pin)
- alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
+ if (pin) {
+ dac = spec->multiout.extra_out_nid[0];
+ if (!dac)
+ dac = spec->multiout.dac_nids[0]; /* to front */
+ alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac);
+ }
}
static void alc882_auto_init_analog_input(struct hda_codec *codec)
@@ -10347,15 +10441,15 @@ static int alc882_parse_auto_config(struct hda_codec *codec)
err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
if (err < 0)
return err;
+ err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
+ "Headphone");
+ if (err < 0)
+ return err;
err = alc880_auto_create_extra_out(spec,
spec->autocfg.speaker_pins[0],
"Speaker");
if (err < 0)
return err;
- err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
- "Headphone");
- if (err < 0)
- return err;
err = alc882_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -10425,6 +10519,8 @@ static int patch_alc882(struct hda_codec *codec)
codec->spec = spec;
+ alc_auto_parse_customize_define(codec);
+
switch (codec->vendor_id) {
case 0x10ec0882:
case 0x10ec0885:
@@ -10484,9 +10580,6 @@ static int patch_alc882(struct hda_codec *codec)
spec->stream_digital_playback = &alc882_pcm_digital_playback;
spec->stream_digital_capture = &alc882_pcm_digital_capture;
- if (codec->vendor_id == 0x10ec0888)
- spec->init_amp = ALC_INIT_DEFAULT; /* always initialize */
-
if (!spec->adc_nids && spec->input_mux) {
int i, j;
spec->num_adc_nids = 0;
@@ -10521,7 +10614,9 @@ static int patch_alc882(struct hda_codec *codec)
}
set_capture_mixer(codec);
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+
+ if (spec->cdefine.enable_pcbeep)
+ set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
if (board_config == ALC882_AUTO)
alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 0);
@@ -12308,6 +12403,7 @@ static int patch_alc262(struct hda_codec *codec)
snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PROC_COEF, tmp | 0x80);
}
#endif
+ alc_auto_parse_customize_define(codec);
alc_fix_pll_init(codec, 0x20, 0x0a, 10);
@@ -12386,7 +12482,7 @@ static int patch_alc262(struct hda_codec *codec)
}
if (!spec->cap_mixer && !spec->no_analog)
set_capture_mixer(codec);
- if (!spec->no_analog)
+ if (!spec->no_analog && spec->cdefine.enable_pcbeep)
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
spec->vmaster_nid = 0x0c;
@@ -14005,6 +14101,35 @@ static struct hda_pcm_stream alc269_44k_pcm_analog_capture = {
/* NID is set in alc_build_pcms */
};
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+static int alc269_mic2_for_mute_led(struct hda_codec *codec)
+{
+ switch (codec->subsystem_id) {
+ case 0x103c1586:
+ return 1;
+ }
+ return 0;
+}
+
+static int alc269_mic2_mute_check_ps(struct hda_codec *codec, hda_nid_t nid)
+{
+ /* update mute-LED according to the speaker mute state */
+ if (nid == 0x01 || nid == 0x14) {
+ int pinval;
+ if (snd_hda_codec_amp_read(codec, 0x14, 0, HDA_OUTPUT, 0) &
+ HDA_AMP_MUTE)
+ pinval = 0x24;
+ else
+ pinval = 0x20;
+ /* mic2 vref pin is used for mute LED control */
+ snd_hda_codec_update_cache(codec, 0x19, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL,
+ pinval);
+ }
+ return alc_check_power_status(codec, nid);
+}
+#endif /* CONFIG_SND_HDA_POWER_SAVE */
+
/*
* BIOS auto configuration
*/
@@ -14290,17 +14415,17 @@ static int patch_alc269(struct hda_codec *codec)
codec->spec = spec;
- alc_fix_pll_init(codec, 0x20, 0x04, 15);
+ alc_auto_parse_customize_define(codec);
if ((alc_read_coef_idx(codec, 0) & 0x00f0) == 0x0010){
- kfree(codec->chip_name);
- codec->chip_name = kstrdup("ALC259", GFP_KERNEL);
- if (!codec->chip_name) {
- alc_free(codec);
- return -ENOMEM;
- }
+ if (codec->bus->pci->subsystem_vendor == 0x1025 &&
+ spec->cdefine.platform_type == 1)
+ alc_codec_rename(codec, "ALC271X");
+ else
+ alc_codec_rename(codec, "ALC259");
is_alc269vb = 1;
- }
+ } else
+ alc_fix_pll_init(codec, 0x20, 0x04, 15);
board_config = snd_hda_check_board_config(codec, ALC269_MODEL_LAST,
alc269_models,
@@ -14365,7 +14490,8 @@ static int patch_alc269(struct hda_codec *codec)
if (!spec->cap_mixer)
set_capture_mixer(codec);
- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ if (spec->cdefine.enable_pcbeep)
+ set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
if (board_config == ALC269_AUTO)
alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 0);
@@ -14378,6 +14504,8 @@ static int patch_alc269(struct hda_codec *codec)
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (!spec->loopback.amplist)
spec->loopback.amplist = alc269_loopbacks;
+ if (alc269_mic2_for_mute_led(codec))
+ codec->patch_ops.check_power_status = alc269_mic2_mute_check_ps;
#endif
return 0;
@@ -17871,7 +17999,6 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
ALC662_3ST_6ch_DIG),
SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x",
ALC663_ASUS_H13),
- SND_PCI_QUIRK(0x8086, 0xd604, "Intel mobo", ALC662_3ST_2ch_DIG),
{}
};
@@ -18526,16 +18653,16 @@ static int patch_alc662(struct hda_codec *codec)
codec->spec = spec;
+ alc_auto_parse_customize_define(codec);
+
alc_fix_pll_init(codec, 0x20, 0x04, 15);
- if (alc_read_coef_idx(codec, 0)==0x8020){
- kfree(codec->chip_name);
- codec->chip_name = kstrdup("ALC661", GFP_KERNEL);
- if (!codec->chip_name) {
- alc_free(codec);
- return -ENOMEM;
- }
- }
+ if (alc_read_coef_idx(codec, 0) == 0x8020)
+ alc_codec_rename(codec, "ALC661");
+ else if ((alc_read_coef_idx(codec, 0) & (1 << 14)) &&
+ codec->bus->pci->subsystem_vendor == 0x1025 &&
+ spec->cdefine.platform_type == 1)
+ alc_codec_rename(codec, "ALC272X");
board_config = snd_hda_check_board_config(codec, ALC662_MODEL_LAST,
alc662_models,
@@ -18585,18 +18712,20 @@ static int patch_alc662(struct hda_codec *codec)
if (!spec->cap_mixer)
set_capture_mixer(codec);
- switch (codec->vendor_id) {
- case 0x10ec0662:
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
- break;
- case 0x10ec0272:
- case 0x10ec0663:
- case 0x10ec0665:
- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
- break;
- case 0x10ec0273:
- set_beep_amp(spec, 0x0b, 0x03, HDA_INPUT);
- break;
+ if (spec->cdefine.enable_pcbeep) {
+ switch (codec->vendor_id) {
+ case 0x10ec0662:
+ set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ break;
+ case 0x10ec0272:
+ case 0x10ec0663:
+ case 0x10ec0665:
+ set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ break;
+ case 0x10ec0273:
+ set_beep_amp(spec, 0x0b, 0x03, HDA_INPUT);
+ break;
+ }
}
spec->vmaster_nid = 0x02;
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 7fb7d017a347..eb4ea3df5d84 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1544,11 +1544,9 @@ static unsigned int alienware_m17x_pin_configs[13] = {
0x904601b0,
};
-static unsigned int intel_dg45id_pin_configs[14] = {
+static unsigned int intel_dg45id_pin_configs[13] = {
0x02214230, 0x02A19240, 0x01013214, 0x01014210,
- 0x01A19250, 0x01011212, 0x01016211, 0x40f000f0,
- 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x014510A0,
- 0x074510B0, 0x40f000f0
+ 0x01A19250, 0x01011212, 0x01016211
};
static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
@@ -4768,6 +4766,9 @@ static void set_hp_led_gpio(struct hda_codec *codec)
struct sigmatel_spec *spec = codec->spec;
unsigned int gpio;
+ if (spec->gpio_led)
+ return;
+
gpio = snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP);
gpio &= AC_GPIO_IO_COUNT;
if (gpio > 3)
@@ -5685,11 +5686,13 @@ again:
* detection.
*/
spec->hp_detect = 1;
+ spec->gpio_led = 0x01;
break;
case STAC_HP_HDX:
spec->num_dmics = 1;
spec->num_dmuxes = 1;
spec->num_smuxes = 1;
+ spec->gpio_led = 0x08;
break;
}
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 9e66f6d306f8..2f6252266a02 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -1956,11 +1956,10 @@ static int __devinit aureon_add_controls(struct snd_ice1712 *ice)
return 0;
}
-
/*
- * initialize the chip
+ * reset the chip
*/
-static int __devinit aureon_init(struct snd_ice1712 *ice)
+static int aureon_reset(struct snd_ice1712 *ice)
{
static const unsigned short wm_inits_aureon[] = {
/* These come first to reduce init pop noise */
@@ -2047,30 +2046,10 @@ static int __devinit aureon_init(struct snd_ice1712 *ice)
0x0605, /* slave, 24bit, MSB on second OSCLK, SDOUT for right channel when OLRCK is high */
(unsigned short)-1
};
- struct aureon_spec *spec;
unsigned int tmp;
const unsigned short *p;
- int err, i;
-
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
- ice->spec = spec;
-
- if (ice->eeprom.subvendor == VT1724_SUBDEVICE_AUREON51_SKY) {
- ice->num_total_dacs = 6;
- ice->num_total_adcs = 2;
- } else {
- /* aureon 7.1 and prodigy 7.1 */
- ice->num_total_dacs = 8;
- ice->num_total_adcs = 2;
- }
-
- /* to remeber the register values of CS8415 */
- ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL);
- if (!ice->akm)
- return -ENOMEM;
- ice->akm_codecs = 1;
+ int err;
+ struct aureon_spec *spec = ice->spec;
err = aureon_ac97_init(ice);
if (err != 0)
@@ -2118,6 +2097,61 @@ static int __devinit aureon_init(struct snd_ice1712 *ice)
/* initialize PCA9554 pin directions & set default input */
aureon_pca9554_write(ice, PCA9554_DIR, 0x00);
aureon_pca9554_write(ice, PCA9554_OUT, 0x00); /* internal AUX */
+ return 0;
+}
+
+/*
+ * suspend/resume
+ */
+#ifdef CONFIG_PM
+static int aureon_resume(struct snd_ice1712 *ice)
+{
+ struct aureon_spec *spec = ice->spec;
+ int err, i;
+
+ err = aureon_reset(ice);
+ if (err != 0)
+ return err;
+
+ /* workaround for poking volume with alsamixer after resume:
+ * just set stored volume again */
+ for (i = 0; i < ice->num_total_dacs; i++)
+ wm_set_vol(ice, i, spec->vol[i], spec->master[i % 2]);
+ return 0;
+}
+#endif
+
+/*
+ * initialize the chip
+ */
+static int __devinit aureon_init(struct snd_ice1712 *ice)
+{
+ struct aureon_spec *spec;
+ int i, err;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ ice->spec = spec;
+
+ if (ice->eeprom.subvendor == VT1724_SUBDEVICE_AUREON51_SKY) {
+ ice->num_total_dacs = 6;
+ ice->num_total_adcs = 2;
+ } else {
+ /* aureon 7.1 and prodigy 7.1 */
+ ice->num_total_dacs = 8;
+ ice->num_total_adcs = 2;
+ }
+
+ /* to remeber the register values of CS8415 */
+ ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL);
+ if (!ice->akm)
+ return -ENOMEM;
+ ice->akm_codecs = 1;
+
+ err = aureon_reset(ice);
+ if (err != 0)
+ return err;
spec->master[0] = WM_VOL_MUTE;
spec->master[1] = WM_VOL_MUTE;
@@ -2126,6 +2160,11 @@ static int __devinit aureon_init(struct snd_ice1712 *ice)
wm_set_vol(ice, i, spec->vol[i], spec->master[i % 2]);
}
+#ifdef CONFIG_PM
+ ice->pm_resume = aureon_resume;
+ ice->pm_suspend_enabled = 1;
+#endif
+
return 0;
}
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index b56e33676780..3c40d726b46e 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -41,6 +41,7 @@
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
+#include <linux/input.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -844,11 +845,17 @@ struct snd_m3 {
struct m3_dma *substreams;
spinlock_t reg_lock;
- spinlock_t ac97_lock;
+#ifdef CONFIG_SND_MAESTRO3_INPUT
+ struct input_dev *input_dev;
+ char phys[64]; /* physical device path */
+#else
+ spinlock_t ac97_lock;
struct snd_kcontrol *master_switch;
struct snd_kcontrol *master_volume;
struct tasklet_struct hwvol_tq;
+#endif
+
unsigned int in_suspend;
#ifdef CONFIG_PM
@@ -1598,18 +1605,32 @@ static void snd_m3_update_ptr(struct snd_m3 *chip, struct m3_dma *s)
}
}
+/* The m3's hardware volume works by incrementing / decrementing 2 counters
+ (without wrap around) in response to volume button presses and then
+ generating an interrupt. The pair of counters is stored in bits 1-3 and 5-7
+ of a byte wide register. The meaning of bits 0 and 4 is unknown. */
static void snd_m3_update_hw_volume(unsigned long private_data)
{
struct snd_m3 *chip = (struct snd_m3 *) private_data;
int x, val;
+#ifndef CONFIG_SND_MAESTRO3_INPUT
unsigned long flags;
+#endif
/* Figure out which volume control button was pushed,
based on differences from the default register
values. */
x = inb(chip->iobase + SHADOW_MIX_REG_VOICE) & 0xee;
- /* Reset the volume control registers. */
+ /* Reset the volume counters to 4. Tests on the allegro integrated
+ into a Compaq N600C laptop, have revealed that:
+ 1) Writing any value will result in the 2 counters being reset to
+ 4 so writing 0x88 is not strictly necessary
+ 2) Writing to any of the 4 involved registers will reset all 4
+ of them (and reading them always returns the same value for all
+ of them)
+ It could be that a maestro deviates from this, so leave the code
+ as is. */
outb(0x88, chip->iobase + SHADOW_MIX_REG_VOICE);
outb(0x88, chip->iobase + HW_VOL_COUNTER_VOICE);
outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
@@ -1620,6 +1641,7 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
if (chip->in_suspend)
return;
+#ifndef CONFIG_SND_MAESTRO3_INPUT
if (!chip->master_switch || !chip->master_volume)
return;
@@ -1629,7 +1651,9 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
val = chip->ac97->regs[AC97_MASTER_VOL];
switch (x) {
case 0x88:
- /* mute */
+ /* The counters have not changed, yet we've received a HV
+ interrupt. According to tests run by various people this
+ happens when pressing the mute button. */
val ^= 0x8000;
chip->ac97->regs[AC97_MASTER_VOL] = val;
outw(val, chip->iobase + CODEC_DATA);
@@ -1638,7 +1662,7 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
&chip->master_switch->id);
break;
case 0xaa:
- /* volume up */
+ /* counters increased by 1 -> volume up */
if ((val & 0x7f) > 0)
val--;
if ((val & 0x7f00) > 0)
@@ -1650,7 +1674,7 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
&chip->master_volume->id);
break;
case 0x66:
- /* volume down */
+ /* counters decreased by 1 -> volume down */
if ((val & 0x7f) < 0x1f)
val++;
if ((val & 0x7f00) < 0x1f00)
@@ -1663,6 +1687,35 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
break;
}
spin_unlock_irqrestore(&chip->ac97_lock, flags);
+#else
+ if (!chip->input_dev)
+ return;
+
+ val = 0;
+ switch (x) {
+ case 0x88:
+ /* The counters have not changed, yet we've received a HV
+ interrupt. According to tests run by various people this
+ happens when pressing the mute button. */
+ val = KEY_MUTE;
+ break;
+ case 0xaa:
+ /* counters increased by 1 -> volume up */
+ val = KEY_VOLUMEUP;
+ break;
+ case 0x66:
+ /* counters decreased by 1 -> volume down */
+ val = KEY_VOLUMEDOWN;
+ break;
+ }
+
+ if (val) {
+ input_report_key(chip->input_dev, val, 1);
+ input_sync(chip->input_dev);
+ input_report_key(chip->input_dev, val, 0);
+ input_sync(chip->input_dev);
+ }
+#endif
}
static irqreturn_t snd_m3_interrupt(int irq, void *dev_id)
@@ -1677,7 +1730,11 @@ static irqreturn_t snd_m3_interrupt(int irq, void *dev_id)
return IRQ_NONE;
if (status & HV_INT_PENDING)
+#ifdef CONFIG_SND_MAESTRO3_INPUT
+ snd_m3_update_hw_volume((unsigned long)chip);
+#else
tasklet_schedule(&chip->hwvol_tq);
+#endif
/*
* ack an assp int if its running
@@ -1943,18 +2000,24 @@ static unsigned short
snd_m3_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
{
struct snd_m3 *chip = ac97->private_data;
+#ifndef CONFIG_SND_MAESTRO3_INPUT
unsigned long flags;
+#endif
unsigned short data = 0xffff;
if (snd_m3_ac97_wait(chip))
goto fail;
+#ifndef CONFIG_SND_MAESTRO3_INPUT
spin_lock_irqsave(&chip->ac97_lock, flags);
+#endif
snd_m3_outb(chip, 0x80 | (reg & 0x7f), CODEC_COMMAND);
if (snd_m3_ac97_wait(chip))
goto fail_unlock;
data = snd_m3_inw(chip, CODEC_DATA);
fail_unlock:
+#ifndef CONFIG_SND_MAESTRO3_INPUT
spin_unlock_irqrestore(&chip->ac97_lock, flags);
+#endif
fail:
return data;
}
@@ -1963,14 +2026,20 @@ static void
snd_m3_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val)
{
struct snd_m3 *chip = ac97->private_data;
+#ifndef CONFIG_SND_MAESTRO3_INPUT
unsigned long flags;
+#endif
if (snd_m3_ac97_wait(chip))
return;
+#ifndef CONFIG_SND_MAESTRO3_INPUT
spin_lock_irqsave(&chip->ac97_lock, flags);
+#endif
snd_m3_outw(chip, val, CODEC_DATA);
snd_m3_outb(chip, reg & 0x7f, CODEC_COMMAND);
+#ifndef CONFIG_SND_MAESTRO3_INPUT
spin_unlock_irqrestore(&chip->ac97_lock, flags);
+#endif
}
@@ -2077,7 +2146,9 @@ static int __devinit snd_m3_mixer(struct snd_m3 *chip)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
+#ifndef CONFIG_SND_MAESTRO3_INPUT
struct snd_ctl_elem_id elem_id;
+#endif
int err;
static struct snd_ac97_bus_ops ops = {
.write = snd_m3_ac97_write,
@@ -2097,6 +2168,7 @@ static int __devinit snd_m3_mixer(struct snd_m3 *chip)
schedule_timeout_uninterruptible(msecs_to_jiffies(100));
snd_ac97_write(chip->ac97, AC97_PCM, 0);
+#ifndef CONFIG_SND_MAESTRO3_INPUT
memset(&elem_id, 0, sizeof(elem_id));
elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
strcpy(elem_id.name, "Master Playback Switch");
@@ -2105,6 +2177,7 @@ static int __devinit snd_m3_mixer(struct snd_m3 *chip)
elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
strcpy(elem_id.name, "Master Playback Volume");
chip->master_volume = snd_ctl_find_id(chip->card, &elem_id);
+#endif
return 0;
}
@@ -2370,6 +2443,7 @@ snd_m3_enable_ints(struct snd_m3 *chip)
val = ASSP_INT_ENABLE /*| MPU401_INT_ENABLE*/;
if (chip->hv_config & HV_CTRL_ENABLE)
val |= HV_INT_ENABLE;
+ outb(val, chip->iobase + HOST_INT_STATUS);
outw(val, io + HOST_INT_CTRL);
outb(inb(io + ASSP_CONTROL_C) | ASSP_HOST_INT_ENABLE,
io + ASSP_CONTROL_C);
@@ -2384,6 +2458,11 @@ static int snd_m3_free(struct snd_m3 *chip)
struct m3_dma *s;
int i;
+#ifdef CONFIG_SND_MAESTRO3_INPUT
+ if (chip->input_dev)
+ input_unregister_device(chip->input_dev);
+#endif
+
if (chip->substreams) {
spin_lock_irq(&chip->reg_lock);
for (i = 0; i < chip->num_substreams; i++) {
@@ -2510,6 +2589,41 @@ static int m3_resume(struct pci_dev *pci)
}
#endif /* CONFIG_PM */
+#ifdef CONFIG_SND_MAESTRO3_INPUT
+static int __devinit snd_m3_input_register(struct snd_m3 *chip)
+{
+ struct input_dev *input_dev;
+ int err;
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return -ENOMEM;
+
+ snprintf(chip->phys, sizeof(chip->phys), "pci-%s/input0",
+ pci_name(chip->pci));
+
+ input_dev->name = chip->card->driver;
+ input_dev->phys = chip->phys;
+ input_dev->id.bustype = BUS_PCI;
+ input_dev->id.vendor = chip->pci->vendor;
+ input_dev->id.product = chip->pci->device;
+ input_dev->dev.parent = &chip->pci->dev;
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(KEY_MUTE, input_dev->keybit);
+ __set_bit(KEY_VOLUMEDOWN, input_dev->keybit);
+ __set_bit(KEY_VOLUMEUP, input_dev->keybit);
+
+ err = input_register_device(input_dev);
+ if (err) {
+ input_free_device(input_dev);
+ return err;
+ }
+
+ chip->input_dev = input_dev;
+ return 0;
+}
+#endif /* CONFIG_INPUT */
/*
*/
@@ -2553,7 +2667,9 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
}
spin_lock_init(&chip->reg_lock);
+#ifndef CONFIG_SND_MAESTRO3_INPUT
spin_lock_init(&chip->ac97_lock);
+#endif
switch (pci->device) {
case PCI_DEVICE_ID_ESS_ALLEGRO:
@@ -2636,7 +2752,9 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
snd_m3_hv_init(chip);
+#ifndef CONFIG_SND_MAESTRO3_INPUT
tasklet_init(&chip->hwvol_tq, snd_m3_update_hw_volume, (unsigned long)chip);
+#endif
if (request_irq(pci->irq, snd_m3_interrupt, IRQF_SHARED,
card->driver, chip)) {
@@ -2668,7 +2786,16 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
if ((err = snd_m3_pcm(chip, 0)) < 0)
return err;
-
+
+#ifdef CONFIG_SND_MAESTRO3_INPUT
+ if (chip->hv_config & HV_CTRL_ENABLE) {
+ err = snd_m3_input_register(chip);
+ if (err)
+ snd_printk(KERN_WARNING "Input device registration "
+ "failed with error %i", err);
+ }
+#endif
+
snd_m3_enable_ints(chip);
snd_m3_assp_continue(chip);
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 3be8f97c8bc0..6c3fd4d1c49d 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -1102,73 +1102,17 @@ static int snd_mixart_free(struct mixart_mgr *mgr)
/*
* proc interface
*/
-static long long snd_mixart_BA0_llseek(struct snd_info_entry *entry,
- void *private_file_data,
- struct file *file,
- long long offset,
- int orig)
-{
- offset = offset & ~3; /* 4 bytes aligned */
-
- switch(orig) {
- case SEEK_SET:
- file->f_pos = offset;
- break;
- case SEEK_CUR:
- file->f_pos += offset;
- break;
- case SEEK_END: /* offset is negative */
- file->f_pos = MIXART_BA0_SIZE + offset;
- break;
- default:
- return -EINVAL;
- }
- if(file->f_pos > MIXART_BA0_SIZE)
- file->f_pos = MIXART_BA0_SIZE;
- return file->f_pos;
-}
-
-static long long snd_mixart_BA1_llseek(struct snd_info_entry *entry,
- void *private_file_data,
- struct file *file,
- long long offset,
- int orig)
-{
- offset = offset & ~3; /* 4 bytes aligned */
-
- switch(orig) {
- case SEEK_SET:
- file->f_pos = offset;
- break;
- case SEEK_CUR:
- file->f_pos += offset;
- break;
- case SEEK_END: /* offset is negative */
- file->f_pos = MIXART_BA1_SIZE + offset;
- break;
- default:
- return -EINVAL;
- }
- if(file->f_pos > MIXART_BA1_SIZE)
- file->f_pos = MIXART_BA1_SIZE;
- return file->f_pos;
-}
/*
mixart_BA0 proc interface for BAR 0 - read callback
*/
-static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_mixart_BA0_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file, char __user *buf,
+ size_t count, loff_t pos)
{
struct mixart_mgr *mgr = entry->private_data;
- unsigned long maxsize;
- if (pos >= MIXART_BA0_SIZE)
- return 0;
- maxsize = MIXART_BA0_SIZE - pos;
- if (count > maxsize)
- count = maxsize;
count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count))
return -EFAULT;
@@ -1178,18 +1122,13 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private
/*
mixart_BA1 proc interface for BAR 1 - read callback
*/
-static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+static ssize_t snd_mixart_BA1_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file, char __user *buf,
+ size_t count, loff_t pos)
{
struct mixart_mgr *mgr = entry->private_data;
- unsigned long maxsize;
- if (pos > MIXART_BA1_SIZE)
- return 0;
- maxsize = MIXART_BA1_SIZE - pos;
- if (count > maxsize)
- count = maxsize;
count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count))
return -EFAULT;
@@ -1198,12 +1137,10 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private
static struct snd_info_entry_ops snd_mixart_proc_ops_BA0 = {
.read = snd_mixart_BA0_read,
- .llseek = snd_mixart_BA0_llseek
};
static struct snd_info_entry_ops snd_mixart_proc_ops_BA1 = {
.read = snd_mixart_BA1_read,
- .llseek = snd_mixart_BA1_llseek
};
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index edaa729126bb..df110df52a8b 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -142,12 +142,7 @@ static int snd_pdacf_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.NumPorts1 = 16;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_FORCED_PULSE;
- /* FIXME: This driver should be updated to allow for dynamic IRQ sharing */
- /* link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE; */
-
- link->irq.Handler = pdacf_interrupt;
- link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Attributes = CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
@@ -228,7 +223,7 @@ static int pdacf_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_exclusive_irq(link, pdacf_interrupt);
if (ret)
goto failed;
@@ -236,10 +231,9 @@ static int pdacf_config(struct pcmcia_device *link)
if (ret)
goto failed;
- if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
+ if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq) < 0)
goto failed;
- link->dev_node = &pdacf->node;
return 0;
failed:
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.h b/sound/pcmcia/pdaudiocf/pdaudiocf.h
index b0601838112d..a0a7ec64222a 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.h
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.h
@@ -117,7 +117,6 @@ struct snd_pdacf {
/* pcmcia stuff */
struct pcmcia_device *p_dev;
- dev_node_t node;
};
static inline void pdacf_reg_write(struct snd_pdacf *chip, unsigned char reg, unsigned short val)
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index cfd1438bcc64..624b47a85f0a 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -162,10 +162,6 @@ static int snd_vxpocket_new(struct snd_card *card, int ibl,
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.NumPorts1 = 16;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
-
- link->irq.Handler = &snd_vx_irq_handler;
-
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -215,7 +211,6 @@ static int snd_vxpocket_assign_resources(struct vx_core *chip, int port, int irq
static int vxpocket_config(struct pcmcia_device *link)
{
struct vx_core *chip = link->priv;
- struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
int ret;
snd_printdd(KERN_DEBUG "vxpocket_config called\n");
@@ -235,7 +230,7 @@ static int vxpocket_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_exclusive_irq(link, snd_vx_irq_handler);
if (ret)
goto failed;
@@ -246,10 +241,9 @@ static int vxpocket_config(struct pcmcia_device *link)
chip->dev = &link->dev;
snd_card_set_dev(chip->card, chip->dev);
- if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
+ if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq) < 0)
goto failed;
- link->dev_node = &vxp->node;
return 0;
failed:
diff --git a/sound/pcmcia/vx/vxpocket.h b/sound/pcmcia/vx/vxpocket.h
index 27ea002294c0..ea4df16a28ef 100644
--- a/sound/pcmcia/vx/vxpocket.h
+++ b/sound/pcmcia/vx/vxpocket.h
@@ -43,7 +43,6 @@ struct snd_vxpocket {
/* pcmcia stuff */
struct pcmcia_device *p_dev;
- dev_node_t node;
};
extern struct snd_vx_ops snd_vxpocket_ops;
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 789f44f4ac78..20afdf9772ee 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -30,6 +30,7 @@
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/string.h>
#include <sound/core.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -46,6 +47,8 @@
#define DBG(fmt...)
#endif
+#define IS_G4DA (of_machine_is_compatible("PowerMac3,4"))
+
/* i2c address for tumbler */
#define TAS_I2C_ADDR 0x34
@@ -243,6 +246,7 @@ static int tumbler_set_master_volume(struct pmac_tumbler *mix)
snd_printk(KERN_ERR "failed to set volume \n");
return -EINVAL;
}
+ DBG("(I) succeeded to set volume (%u, %u)\n", left_vol, right_vol);
return 0;
}
@@ -353,6 +357,7 @@ static int tumbler_set_drc(struct pmac_tumbler *mix)
snd_printk(KERN_ERR "failed to set DRC\n");
return -EINVAL;
}
+ DBG("(I) succeeded to set DRC (%u, %u)\n", val[0], val[1]);
return 0;
}
@@ -389,6 +394,7 @@ static int snapper_set_drc(struct pmac_tumbler *mix)
snd_printk(KERN_ERR "failed to set DRC\n");
return -EINVAL;
}
+ DBG("(I) succeeded to set DRC (%u, %u)\n", val[0], val[1]);
return 0;
}
@@ -1134,7 +1140,8 @@ static long tumbler_find_device(const char *device, const char *platform,
gp->inactive_val = (*base) ? 0x4 : 0x5;
} else {
const u32 *prop = NULL;
- gp->active_state = 0;
+ gp->active_state = IS_G4DA
+ && !strncmp(device, "keywest-gpio1", 13);
gp->active_val = 0x4;
gp->inactive_val = 0x5;
/* Here are some crude hacks to extract the GPIO polarity and
@@ -1312,6 +1319,9 @@ static int __devinit tumbler_init(struct snd_pmac *chip)
if (irq <= NO_IRQ)
irq = tumbler_find_device("line-output-detect",
NULL, &mix->line_detect, 1);
+ if (IS_G4DA && irq <= NO_IRQ)
+ irq = tumbler_find_device("keywest-gpio16",
+ NULL, &mix->line_detect, 1);
mix->lineout_irq = irq;
tumbler_reset_audio(chip);
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c
index 3e6628c8e665..f6b3cc04b34b 100644
--- a/sound/soc/atmel/atmel-pcm.c
+++ b/sound/soc/atmel/atmel-pcm.c
@@ -415,9 +415,12 @@ static void atmel_pcm_free_dma_buffers(struct snd_pcm *pcm)
}
#ifdef CONFIG_PM
-static int atmel_pcm_suspend(struct snd_soc_dai *dai)
+static int atmel_pcm_suspend(struct snd_soc_dai_link *dai_link)
{
- struct snd_pcm_runtime *runtime = dai->runtime;
+ struct snd_pcm *pcm = dai_link->pcm;
+ struct snd_pcm_str *stream = &pcm->streams[0];
+ struct snd_pcm_substream *substream = stream->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd;
struct atmel_pcm_dma_params *params;
@@ -439,9 +442,12 @@ static int atmel_pcm_suspend(struct snd_soc_dai *dai)
return 0;
}
-static int atmel_pcm_resume(struct snd_soc_dai *dai)
+static int atmel_pcm_resume(struct snd_soc_dai_link *dai_link)
{
- struct snd_pcm_runtime *runtime = dai->runtime;
+ struct snd_pcm *pcm = dai_link->pcm;
+ struct snd_pcm_str *stream = &pcm->streams[0];
+ struct snd_pcm_substream *substream = stream->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd;
struct atmel_pcm_dma_params *params;
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 97f1a251e446..8ef25025f3dc 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -49,13 +49,14 @@ config SND_BF5XX_SOC_AD1836
help
Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
-config SND_BF5XX_SOC_AD1938
- tristate "SoC AD1938 Audio support for Blackfin"
+config SND_BF5XX_SOC_AD193X
+ tristate "SoC AD193X Audio support for Blackfin"
depends on SND_BF5XX_TDM
select SND_BF5XX_SOC_TDM
- select SND_SOC_AD1938
+ select SND_SOC_AD193X
help
- Say Y if you want to add support for AD1938 codec on Blackfin.
+ Say Y if you want to add support for AD193X codec on Blackfin.
+ This driver supports AD1936, AD1937, AD1938 and AD1939.
config SND_BF5XX_AC97
tristate "SoC AC97 Audio for the ADI BF5xx chip"
diff --git a/sound/soc/blackfin/Makefile b/sound/soc/blackfin/Makefile
index 87e30423912f..49af3f32aec8 100644
--- a/sound/soc/blackfin/Makefile
+++ b/sound/soc/blackfin/Makefile
@@ -20,10 +20,10 @@ snd-ad1836-objs := bf5xx-ad1836.o
snd-ad1980-objs := bf5xx-ad1980.o
snd-ssm2602-objs := bf5xx-ssm2602.o
snd-ad73311-objs := bf5xx-ad73311.o
-snd-ad1938-objs := bf5xx-ad1938.o
+snd-ad193x-objs := bf5xx-ad193x.o
obj-$(CONFIG_SND_BF5XX_SOC_AD1836) += snd-ad1836.o
obj-$(CONFIG_SND_BF5XX_SOC_AD1980) += snd-ad1980.o
obj-$(CONFIG_SND_BF5XX_SOC_SSM2602) += snd-ssm2602.o
obj-$(CONFIG_SND_BF5XX_SOC_AD73311) += snd-ad73311.o
-obj-$(CONFIG_SND_BF5XX_SOC_AD1938) += snd-ad1938.o
+obj-$(CONFIG_SND_BF5XX_SOC_AD193X) += snd-ad193x.o
diff --git a/sound/soc/blackfin/bf5xx-ad1938.c b/sound/soc/blackfin/bf5xx-ad193x.c
index 2ef1e5013b8c..b8c9060cfd8e 100644
--- a/sound/soc/blackfin/bf5xx-ad1938.c
+++ b/sound/soc/blackfin/bf5xx-ad193x.c
@@ -1,9 +1,9 @@
/*
- * File: sound/soc/blackfin/bf5xx-ad1938.c
+ * File: sound/soc/blackfin/bf5xx-ad193x.c
* Author: Barry Song <Barry.Song@analog.com>
*
* Created: Thur June 4 2009
- * Description: Board driver for ad1938 sound chip
+ * Description: Board driver for ad193x sound chip
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
@@ -38,15 +38,15 @@
#include <asm/dma.h>
#include <asm/portmux.h>
-#include "../codecs/ad1938.h"
+#include "../codecs/ad193x.h"
#include "bf5xx-sport.h"
#include "bf5xx-tdm-pcm.h"
#include "bf5xx-tdm.h"
-static struct snd_soc_card bf5xx_ad1938;
+static struct snd_soc_card bf5xx_ad193x;
-static int bf5xx_ad1938_startup(struct snd_pcm_substream *substream)
+static int bf5xx_ad193x_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
@@ -55,7 +55,7 @@ static int bf5xx_ad1938_startup(struct snd_pcm_substream *substream)
return 0;
}
-static int bf5xx_ad1938_hw_params(struct snd_pcm_substream *substream,
+static int bf5xx_ad193x_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -89,61 +89,61 @@ static int bf5xx_ad1938_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_soc_ops bf5xx_ad1938_ops = {
- .startup = bf5xx_ad1938_startup,
- .hw_params = bf5xx_ad1938_hw_params,
+static struct snd_soc_ops bf5xx_ad193x_ops = {
+ .startup = bf5xx_ad193x_startup,
+ .hw_params = bf5xx_ad193x_hw_params,
};
-static struct snd_soc_dai_link bf5xx_ad1938_dai = {
- .name = "ad1938",
- .stream_name = "AD1938",
+static struct snd_soc_dai_link bf5xx_ad193x_dai = {
+ .name = "ad193x",
+ .stream_name = "AD193X",
.cpu_dai = &bf5xx_tdm_dai,
- .codec_dai = &ad1938_dai,
- .ops = &bf5xx_ad1938_ops,
+ .codec_dai = &ad193x_dai,
+ .ops = &bf5xx_ad193x_ops,
};
-static struct snd_soc_card bf5xx_ad1938 = {
- .name = "bf5xx_ad1938",
+static struct snd_soc_card bf5xx_ad193x = {
+ .name = "bf5xx_ad193x",
.platform = &bf5xx_tdm_soc_platform,
- .dai_link = &bf5xx_ad1938_dai,
+ .dai_link = &bf5xx_ad193x_dai,
.num_links = 1,
};
-static struct snd_soc_device bf5xx_ad1938_snd_devdata = {
- .card = &bf5xx_ad1938,
- .codec_dev = &soc_codec_dev_ad1938,
+static struct snd_soc_device bf5xx_ad193x_snd_devdata = {
+ .card = &bf5xx_ad193x,
+ .codec_dev = &soc_codec_dev_ad193x,
};
-static struct platform_device *bfxx_ad1938_snd_device;
+static struct platform_device *bfxx_ad193x_snd_device;
-static int __init bf5xx_ad1938_init(void)
+static int __init bf5xx_ad193x_init(void)
{
int ret;
- bfxx_ad1938_snd_device = platform_device_alloc("soc-audio", -1);
- if (!bfxx_ad1938_snd_device)
+ bfxx_ad193x_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!bfxx_ad193x_snd_device)
return -ENOMEM;
- platform_set_drvdata(bfxx_ad1938_snd_device, &bf5xx_ad1938_snd_devdata);
- bf5xx_ad1938_snd_devdata.dev = &bfxx_ad1938_snd_device->dev;
- ret = platform_device_add(bfxx_ad1938_snd_device);
+ platform_set_drvdata(bfxx_ad193x_snd_device, &bf5xx_ad193x_snd_devdata);
+ bf5xx_ad193x_snd_devdata.dev = &bfxx_ad193x_snd_device->dev;
+ ret = platform_device_add(bfxx_ad193x_snd_device);
if (ret)
- platform_device_put(bfxx_ad1938_snd_device);
+ platform_device_put(bfxx_ad193x_snd_device);
return ret;
}
-static void __exit bf5xx_ad1938_exit(void)
+static void __exit bf5xx_ad193x_exit(void)
{
- platform_device_unregister(bfxx_ad1938_snd_device);
+ platform_device_unregister(bfxx_ad193x_snd_device);
}
-module_init(bf5xx_ad1938_init);
-module_exit(bf5xx_ad1938_exit);
+module_init(bf5xx_ad193x_init);
+module_exit(bf5xx_ad193x_exit);
/* Module information */
MODULE_AUTHOR("Barry Song");
-MODULE_DESCRIPTION("ALSA SoC AD1938 board driver");
+MODULE_DESCRIPTION("ALSA SoC AD193X board driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/blackfin/bf5xx-sport.h b/sound/soc/blackfin/bf5xx-sport.h
index 2e63dea73e9c..a86e8cc0b2d3 100644
--- a/sound/soc/blackfin/bf5xx-sport.h
+++ b/sound/soc/blackfin/bf5xx-sport.h
@@ -34,33 +34,7 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <asm/dma.h>
-
-struct sport_register {
- u16 tcr1; u16 reserved0;
- u16 tcr2; u16 reserved1;
- u16 tclkdiv; u16 reserved2;
- u16 tfsdiv; u16 reserved3;
- u32 tx;
- u32 reserved_l0;
- u32 rx;
- u32 reserved_l1;
- u16 rcr1; u16 reserved4;
- u16 rcr2; u16 reserved5;
- u16 rclkdiv; u16 reserved6;
- u16 rfsdiv; u16 reserved7;
- u16 stat; u16 reserved8;
- u16 chnl; u16 reserved9;
- u16 mcmc1; u16 reserved10;
- u16 mcmc2; u16 reserved11;
- u32 mtcs0;
- u32 mtcs1;
- u32 mtcs2;
- u32 mtcs3;
- u32 mrcs0;
- u32 mrcs1;
- u32 mrcs2;
- u32 mrcs3;
-};
+#include <asm/bfin_sport.h>
#define DESC_ELEMENT_COUNT 9
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 1743d565e996..31ac5538fe7e 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -13,7 +13,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_L3
select SND_SOC_AC97_CODEC if SND_SOC_AC97_BUS
select SND_SOC_AD1836 if SPI_MASTER
- select SND_SOC_AD1938 if SPI_MASTER
+ select SND_SOC_AD193X if SND_SOC_I2C_AND_SPI
select SND_SOC_AD1980 if SND_SOC_AC97_BUS
select SND_SOC_ADS117X
select SND_SOC_AD73311 if I2C
@@ -21,6 +21,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_AK4535 if I2C
select SND_SOC_AK4642 if I2C
select SND_SOC_AK4671 if I2C
+ select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
select SND_SOC_CS4270 if I2C
select SND_SOC_MAX9877 if I2C
select SND_SOC_DA7210 if I2C
@@ -34,6 +35,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TPA6130A2 if I2C
select SND_SOC_TLV320DAC33 if I2C
select SND_SOC_TWL4030 if TWL4030_CORE
+ select SND_SOC_TWL6040 if TWL4030_CORE
select SND_SOC_UDA134X
select SND_SOC_UDA1380 if I2C
select SND_SOC_WM2000 if I2C
@@ -64,6 +66,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8993 if I2C
select SND_SOC_WM8994 if MFD_WM8994
select SND_SOC_WM9081 if I2C
+ select SND_SOC_WM9090 if I2C
select SND_SOC_WM9705 if SND_SOC_AC97_BUS
select SND_SOC_WM9712 if SND_SOC_AC97_BUS
select SND_SOC_WM9713 if SND_SOC_AC97_BUS
@@ -90,7 +93,7 @@ config SND_SOC_AC97_CODEC
config SND_SOC_AD1836
tristate
-config SND_SOC_AD1938
+config SND_SOC_AD193X
tristate
config SND_SOC_AD1980
@@ -114,6 +117,9 @@ config SND_SOC_AK4642
config SND_SOC_AK4671
tristate
+config SND_SOC_CQ0093VC
+ tristate
+
# Cirrus Logic CS4270 Codec
config SND_SOC_CS4270
tristate
@@ -164,6 +170,9 @@ config SND_SOC_TWL4030
select TWL4030_CODEC
tristate
+config SND_SOC_TWL6040
+ tristate
+
config SND_SOC_UDA134X
tristate
@@ -269,3 +278,6 @@ config SND_SOC_TPA6130A2
config SND_SOC_WM2000
tristate
+
+config SND_SOC_WM9090
+ tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index dd5ce6df6292..91429eab0707 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -1,6 +1,6 @@
snd-soc-ac97-objs := ac97.o
snd-soc-ad1836-objs := ad1836.o
-snd-soc-ad1938-objs := ad1938.o
+snd-soc-ad193x-objs := ad193x.o
snd-soc-ad1980-objs := ad1980.o
snd-soc-ad73311-objs := ad73311.o
snd-soc-ads117x-objs := ads117x.o
@@ -8,6 +8,7 @@ snd-soc-ak4104-objs := ak4104.o
snd-soc-ak4535-objs := ak4535.o
snd-soc-ak4642-objs := ak4642.o
snd-soc-ak4671-objs := ak4671.o
+snd-soc-cq93vc-objs := cq93vc.o
snd-soc-cs4270-objs := cs4270.o
snd-soc-cx20442-objs := cx20442.o
snd-soc-da7210-objs := da7210.o
@@ -21,6 +22,7 @@ snd-soc-tlv320aic26-objs := tlv320aic26.o
snd-soc-tlv320aic3x-objs := tlv320aic3x.o
snd-soc-tlv320dac33-objs := tlv320dac33.o
snd-soc-twl4030-objs := twl4030.o
+snd-soc-twl6040-objs := twl6040.o
snd-soc-uda134x-objs := uda134x.o
snd-soc-uda1380-objs := uda1380.o
snd-soc-wm8350-objs := wm8350.o
@@ -59,10 +61,11 @@ snd-soc-wm-hubs-objs := wm_hubs.o
snd-soc-max9877-objs := max9877.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
snd-soc-wm2000-objs := wm2000.o
+snd-soc-wm9090-objs := wm9090.o
obj-$(CONFIG_SND_SOC_AC97_CODEC) += snd-soc-ac97.o
obj-$(CONFIG_SND_SOC_AD1836) += snd-soc-ad1836.o
-obj-$(CONFIG_SND_SOC_AD1938) += snd-soc-ad1938.o
+obj-$(CONFIG_SND_SOC_AD193X) += snd-soc-ad193x.o
obj-$(CONFIG_SND_SOC_AD1980) += snd-soc-ad1980.o
obj-$(CONFIG_SND_SOC_AD73311) += snd-soc-ad73311.o
obj-$(CONFIG_SND_SOC_ADS117X) += snd-soc-ads117x.o
@@ -70,6 +73,7 @@ obj-$(CONFIG_SND_SOC_AK4104) += snd-soc-ak4104.o
obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o
obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o
obj-$(CONFIG_SND_SOC_AK4671) += snd-soc-ak4671.o
+obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
@@ -83,6 +87,7 @@ obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o
obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
+obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
obj-$(CONFIG_SND_SOC_UDA134X) += snd-soc-uda134x.o
obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
obj-$(CONFIG_SND_SOC_WM8350) += snd-soc-wm8350.o
@@ -121,3 +126,4 @@ obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o
obj-$(CONFIG_SND_SOC_WM2000) += snd-soc-wm2000.o
+obj-$(CONFIG_SND_SOC_WM9090) += snd-soc-wm9090.o
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index 11b62dee842c..217538423225 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -278,7 +278,7 @@ static int ad1836_register(struct ad1836_priv *ad1836)
mutex_init(&codec->mutex);
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = ad1836;
+ snd_soc_codec_set_drvdata(codec, ad1836);
codec->reg_cache = ad1836->reg_cache;
codec->reg_cache_size = AD1836_NUM_REGS;
codec->name = "AD1836";
diff --git a/sound/soc/codecs/ad1938.c b/sound/soc/codecs/ad1938.c
deleted file mode 100644
index 240cd155b313..000000000000
--- a/sound/soc/codecs/ad1938.c
+++ /dev/null
@@ -1,522 +0,0 @@
-/*
- * File: sound/soc/codecs/ad1938.c
- * Author: Barry Song <Barry.Song@analog.com>
- *
- * Created: June 04 2009
- * Description: Driver for AD1938 sound chip
- *
- * Modified:
- * Copyright 2009 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <sound/soc.h>
-#include <sound/tlv.h>
-#include <sound/soc-dapm.h>
-#include <linux/spi/spi.h>
-#include "ad1938.h"
-
-/* codec private data */
-struct ad1938_priv {
- struct snd_soc_codec codec;
- u8 reg_cache[AD1938_NUM_REGS];
-};
-
-/* ad1938 register cache & default register settings */
-static const u8 ad1938_reg[AD1938_NUM_REGS] = {
- 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0,
-};
-
-static struct snd_soc_codec *ad1938_codec;
-struct snd_soc_codec_device soc_codec_dev_ad1938;
-static int ad1938_register(struct ad1938_priv *ad1938);
-static void ad1938_unregister(struct ad1938_priv *ad1938);
-
-/*
- * AD1938 volume/mute/de-emphasis etc. controls
- */
-static const char *ad1938_deemp[] = {"None", "48kHz", "44.1kHz", "32kHz"};
-
-static const struct soc_enum ad1938_deemp_enum =
- SOC_ENUM_SINGLE(AD1938_DAC_CTRL2, 1, 4, ad1938_deemp);
-
-static const struct snd_kcontrol_new ad1938_snd_controls[] = {
- /* DAC volume control */
- SOC_DOUBLE_R("DAC1 Volume", AD1938_DAC_L1_VOL,
- AD1938_DAC_R1_VOL, 0, 0xFF, 1),
- SOC_DOUBLE_R("DAC2 Volume", AD1938_DAC_L2_VOL,
- AD1938_DAC_R2_VOL, 0, 0xFF, 1),
- SOC_DOUBLE_R("DAC3 Volume", AD1938_DAC_L3_VOL,
- AD1938_DAC_R3_VOL, 0, 0xFF, 1),
- SOC_DOUBLE_R("DAC4 Volume", AD1938_DAC_L4_VOL,
- AD1938_DAC_R4_VOL, 0, 0xFF, 1),
-
- /* ADC switch control */
- SOC_DOUBLE("ADC1 Switch", AD1938_ADC_CTRL0, AD1938_ADCL1_MUTE,
- AD1938_ADCR1_MUTE, 1, 1),
- SOC_DOUBLE("ADC2 Switch", AD1938_ADC_CTRL0, AD1938_ADCL2_MUTE,
- AD1938_ADCR2_MUTE, 1, 1),
-
- /* DAC switch control */
- SOC_DOUBLE("DAC1 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL1_MUTE,
- AD1938_DACR1_MUTE, 1, 1),
- SOC_DOUBLE("DAC2 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL2_MUTE,
- AD1938_DACR2_MUTE, 1, 1),
- SOC_DOUBLE("DAC3 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL3_MUTE,
- AD1938_DACR3_MUTE, 1, 1),
- SOC_DOUBLE("DAC4 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL4_MUTE,
- AD1938_DACR4_MUTE, 1, 1),
-
- /* ADC high-pass filter */
- SOC_SINGLE("ADC High Pass Filter Switch", AD1938_ADC_CTRL0,
- AD1938_ADC_HIGHPASS_FILTER, 1, 0),
-
- /* DAC de-emphasis */
- SOC_ENUM("Playback Deemphasis", ad1938_deemp_enum),
-};
-
-static const struct snd_soc_dapm_widget ad1938_dapm_widgets[] = {
- SND_SOC_DAPM_DAC("DAC", "Playback", AD1938_DAC_CTRL0, 0, 1),
- SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
- SND_SOC_DAPM_SUPPLY("PLL_PWR", AD1938_PLL_CLK_CTRL0, 0, 1, NULL, 0),
- SND_SOC_DAPM_SUPPLY("ADC_PWR", AD1938_ADC_CTRL0, 0, 1, NULL, 0),
- SND_SOC_DAPM_OUTPUT("DAC1OUT"),
- SND_SOC_DAPM_OUTPUT("DAC2OUT"),
- SND_SOC_DAPM_OUTPUT("DAC3OUT"),
- SND_SOC_DAPM_OUTPUT("DAC4OUT"),
- SND_SOC_DAPM_INPUT("ADC1IN"),
- SND_SOC_DAPM_INPUT("ADC2IN"),
-};
-
-static const struct snd_soc_dapm_route audio_paths[] = {
- { "DAC", NULL, "PLL_PWR" },
- { "ADC", NULL, "PLL_PWR" },
- { "DAC", NULL, "ADC_PWR" },
- { "ADC", NULL, "ADC_PWR" },
- { "DAC1OUT", "DAC1 Switch", "DAC" },
- { "DAC2OUT", "DAC2 Switch", "DAC" },
- { "DAC3OUT", "DAC3 Switch", "DAC" },
- { "DAC4OUT", "DAC4 Switch", "DAC" },
- { "ADC", "ADC1 Switch", "ADC1IN" },
- { "ADC", "ADC2 Switch", "ADC2IN" },
-};
-
-/*
- * DAI ops entries
- */
-
-static int ad1938_mute(struct snd_soc_dai *dai, int mute)
-{
- struct snd_soc_codec *codec = dai->codec;
- int reg;
-
- reg = snd_soc_read(codec, AD1938_DAC_CTRL2);
- reg = (mute > 0) ? reg | AD1938_DAC_MASTER_MUTE : reg &
- (~AD1938_DAC_MASTER_MUTE);
- snd_soc_write(codec, AD1938_DAC_CTRL2, reg);
-
- return 0;
-}
-
-static int ad1938_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
- unsigned int rx_mask, int slots, int width)
-{
- struct snd_soc_codec *codec = dai->codec;
- int dac_reg = snd_soc_read(codec, AD1938_DAC_CTRL1);
- int adc_reg = snd_soc_read(codec, AD1938_ADC_CTRL2);
-
- dac_reg &= ~AD1938_DAC_CHAN_MASK;
- adc_reg &= ~AD1938_ADC_CHAN_MASK;
-
- switch (slots) {
- case 2:
- dac_reg |= AD1938_DAC_2_CHANNELS << AD1938_DAC_CHAN_SHFT;
- adc_reg |= AD1938_ADC_2_CHANNELS << AD1938_ADC_CHAN_SHFT;
- break;
- case 4:
- dac_reg |= AD1938_DAC_4_CHANNELS << AD1938_DAC_CHAN_SHFT;
- adc_reg |= AD1938_ADC_4_CHANNELS << AD1938_ADC_CHAN_SHFT;
- break;
- case 8:
- dac_reg |= AD1938_DAC_8_CHANNELS << AD1938_DAC_CHAN_SHFT;
- adc_reg |= AD1938_ADC_8_CHANNELS << AD1938_ADC_CHAN_SHFT;
- break;
- case 16:
- dac_reg |= AD1938_DAC_16_CHANNELS << AD1938_DAC_CHAN_SHFT;
- adc_reg |= AD1938_ADC_16_CHANNELS << AD1938_ADC_CHAN_SHFT;
- break;
- default:
- return -EINVAL;
- }
-
- snd_soc_write(codec, AD1938_DAC_CTRL1, dac_reg);
- snd_soc_write(codec, AD1938_ADC_CTRL2, adc_reg);
-
- return 0;
-}
-
-static int ad1938_set_dai_fmt(struct snd_soc_dai *codec_dai,
- unsigned int fmt)
-{
- struct snd_soc_codec *codec = codec_dai->codec;
- int adc_reg, dac_reg;
-
- adc_reg = snd_soc_read(codec, AD1938_ADC_CTRL2);
- dac_reg = snd_soc_read(codec, AD1938_DAC_CTRL1);
-
- /* At present, the driver only support AUX ADC mode(SND_SOC_DAIFMT_I2S
- * with TDM) and ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A)
- */
- switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
- case SND_SOC_DAIFMT_I2S:
- adc_reg &= ~AD1938_ADC_SERFMT_MASK;
- adc_reg |= AD1938_ADC_SERFMT_TDM;
- break;
- case SND_SOC_DAIFMT_DSP_A:
- adc_reg &= ~AD1938_ADC_SERFMT_MASK;
- adc_reg |= AD1938_ADC_SERFMT_AUX;
- break;
- default:
- return -EINVAL;
- }
-
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF: /* normal bit clock + frame */
- adc_reg &= ~AD1938_ADC_LEFT_HIGH;
- adc_reg &= ~AD1938_ADC_BCLK_INV;
- dac_reg &= ~AD1938_DAC_LEFT_HIGH;
- dac_reg &= ~AD1938_DAC_BCLK_INV;
- break;
- case SND_SOC_DAIFMT_NB_IF: /* normal bclk + invert frm */
- adc_reg |= AD1938_ADC_LEFT_HIGH;
- adc_reg &= ~AD1938_ADC_BCLK_INV;
- dac_reg |= AD1938_DAC_LEFT_HIGH;
- dac_reg &= ~AD1938_DAC_BCLK_INV;
- break;
- case SND_SOC_DAIFMT_IB_NF: /* invert bclk + normal frm */
- adc_reg &= ~AD1938_ADC_LEFT_HIGH;
- adc_reg |= AD1938_ADC_BCLK_INV;
- dac_reg &= ~AD1938_DAC_LEFT_HIGH;
- dac_reg |= AD1938_DAC_BCLK_INV;
- break;
-
- case SND_SOC_DAIFMT_IB_IF: /* invert bclk + frm */
- adc_reg |= AD1938_ADC_LEFT_HIGH;
- adc_reg |= AD1938_ADC_BCLK_INV;
- dac_reg |= AD1938_DAC_LEFT_HIGH;
- dac_reg |= AD1938_DAC_BCLK_INV;
- break;
- default:
- return -EINVAL;
- }
-
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & frm master */
- adc_reg |= AD1938_ADC_LCR_MASTER;
- adc_reg |= AD1938_ADC_BCLK_MASTER;
- dac_reg |= AD1938_DAC_LCR_MASTER;
- dac_reg |= AD1938_DAC_BCLK_MASTER;
- break;
- case SND_SOC_DAIFMT_CBS_CFM: /* codec clk slave & frm master */
- adc_reg |= AD1938_ADC_LCR_MASTER;
- adc_reg &= ~AD1938_ADC_BCLK_MASTER;
- dac_reg |= AD1938_DAC_LCR_MASTER;
- dac_reg &= ~AD1938_DAC_BCLK_MASTER;
- break;
- case SND_SOC_DAIFMT_CBM_CFS: /* codec clk master & frame slave */
- adc_reg &= ~AD1938_ADC_LCR_MASTER;
- adc_reg |= AD1938_ADC_BCLK_MASTER;
- dac_reg &= ~AD1938_DAC_LCR_MASTER;
- dac_reg |= AD1938_DAC_BCLK_MASTER;
- break;
- case SND_SOC_DAIFMT_CBS_CFS: /* codec clk & frm slave */
- adc_reg &= ~AD1938_ADC_LCR_MASTER;
- adc_reg &= ~AD1938_ADC_BCLK_MASTER;
- dac_reg &= ~AD1938_DAC_LCR_MASTER;
- dac_reg &= ~AD1938_DAC_BCLK_MASTER;
- break;
- default:
- return -EINVAL;
- }
-
- snd_soc_write(codec, AD1938_ADC_CTRL2, adc_reg);
- snd_soc_write(codec, AD1938_DAC_CTRL1, dac_reg);
-
- return 0;
-}
-
-static int ad1938_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- int word_len = 0, reg = 0;
-
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_device *socdev = rtd->socdev;
- struct snd_soc_codec *codec = socdev->card->codec;
-
- /* bit size */
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- word_len = 3;
- break;
- case SNDRV_PCM_FORMAT_S20_3LE:
- word_len = 1;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- case SNDRV_PCM_FORMAT_S32_LE:
- word_len = 0;
- break;
- }
-
- reg = snd_soc_read(codec, AD1938_DAC_CTRL2);
- reg = (reg & (~AD1938_DAC_WORD_LEN_MASK)) | word_len;
- snd_soc_write(codec, AD1938_DAC_CTRL2, reg);
-
- reg = snd_soc_read(codec, AD1938_ADC_CTRL1);
- reg = (reg & (~AD1938_ADC_WORD_LEN_MASK)) | word_len;
- snd_soc_write(codec, AD1938_ADC_CTRL1, reg);
-
- return 0;
-}
-
-static int __devinit ad1938_spi_probe(struct spi_device *spi)
-{
- struct snd_soc_codec *codec;
- struct ad1938_priv *ad1938;
-
- ad1938 = kzalloc(sizeof(struct ad1938_priv), GFP_KERNEL);
- if (ad1938 == NULL)
- return -ENOMEM;
-
- codec = &ad1938->codec;
- codec->control_data = spi;
- codec->dev = &spi->dev;
-
- dev_set_drvdata(&spi->dev, ad1938);
-
- return ad1938_register(ad1938);
-}
-
-static int __devexit ad1938_spi_remove(struct spi_device *spi)
-{
- struct ad1938_priv *ad1938 = dev_get_drvdata(&spi->dev);
-
- ad1938_unregister(ad1938);
- return 0;
-}
-
-static struct spi_driver ad1938_spi_driver = {
- .driver = {
- .name = "ad1938",
- .owner = THIS_MODULE,
- },
- .probe = ad1938_spi_probe,
- .remove = __devexit_p(ad1938_spi_remove),
-};
-
-static struct snd_soc_dai_ops ad1938_dai_ops = {
- .hw_params = ad1938_hw_params,
- .digital_mute = ad1938_mute,
- .set_tdm_slot = ad1938_set_tdm_slot,
- .set_fmt = ad1938_set_dai_fmt,
-};
-
-/* codec DAI instance */
-struct snd_soc_dai ad1938_dai = {
- .name = "AD1938",
- .playback = {
- .stream_name = "Playback",
- .channels_min = 2,
- .channels_max = 8,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
- },
- .capture = {
- .stream_name = "Capture",
- .channels_min = 2,
- .channels_max = 4,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
- },
- .ops = &ad1938_dai_ops,
-};
-EXPORT_SYMBOL_GPL(ad1938_dai);
-
-static int ad1938_register(struct ad1938_priv *ad1938)
-{
- int ret;
- struct snd_soc_codec *codec = &ad1938->codec;
-
- if (ad1938_codec) {
- dev_err(codec->dev, "Another ad1938 is registered\n");
- return -EINVAL;
- }
-
- mutex_init(&codec->mutex);
- INIT_LIST_HEAD(&codec->dapm_widgets);
- INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = ad1938;
- codec->reg_cache = ad1938->reg_cache;
- codec->reg_cache_size = AD1938_NUM_REGS;
- codec->name = "AD1938";
- codec->owner = THIS_MODULE;
- codec->dai = &ad1938_dai;
- codec->num_dai = 1;
- INIT_LIST_HEAD(&codec->dapm_widgets);
- INIT_LIST_HEAD(&codec->dapm_paths);
-
- ad1938_dai.dev = codec->dev;
- ad1938_codec = codec;
-
- memcpy(codec->reg_cache, ad1938_reg, AD1938_NUM_REGS);
-
- ret = snd_soc_codec_set_cache_io(codec, 16, 8, SND_SOC_SPI);
- if (ret < 0) {
- dev_err(codec->dev, "failed to set cache I/O: %d\n",
- ret);
- kfree(ad1938);
- return ret;
- }
-
- /* default setting for ad1938 */
-
- /* unmute dac channels */
- snd_soc_write(codec, AD1938_DAC_CHNL_MUTE, 0x0);
- /* de-emphasis: 48kHz, powedown dac */
- snd_soc_write(codec, AD1938_DAC_CTRL2, 0x1A);
- /* powerdown dac, dac in tdm mode */
- snd_soc_write(codec, AD1938_DAC_CTRL0, 0x41);
- /* high-pass filter enable */
- snd_soc_write(codec, AD1938_ADC_CTRL0, 0x3);
- /* sata delay=1, adc aux mode */
- snd_soc_write(codec, AD1938_ADC_CTRL1, 0x43);
- /* pll input: mclki/xi */
- snd_soc_write(codec, AD1938_PLL_CLK_CTRL0, 0x9D);
- snd_soc_write(codec, AD1938_PLL_CLK_CTRL1, 0x04);
-
- ret = snd_soc_register_codec(codec);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to register codec: %d\n", ret);
- kfree(ad1938);
- return ret;
- }
-
- ret = snd_soc_register_dai(&ad1938_dai);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
- snd_soc_unregister_codec(codec);
- kfree(ad1938);
- return ret;
- }
-
- return 0;
-}
-
-static void ad1938_unregister(struct ad1938_priv *ad1938)
-{
- snd_soc_unregister_dai(&ad1938_dai);
- snd_soc_unregister_codec(&ad1938->codec);
- kfree(ad1938);
- ad1938_codec = NULL;
-}
-
-static int ad1938_probe(struct platform_device *pdev)
-{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_codec *codec;
- int ret = 0;
-
- if (ad1938_codec == NULL) {
- dev_err(&pdev->dev, "Codec device not registered\n");
- return -ENODEV;
- }
-
- socdev->card->codec = ad1938_codec;
- codec = ad1938_codec;
-
- /* register pcms */
- ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
- if (ret < 0) {
- dev_err(codec->dev, "failed to create pcms: %d\n", ret);
- goto pcm_err;
- }
-
- snd_soc_add_controls(codec, ad1938_snd_controls,
- ARRAY_SIZE(ad1938_snd_controls));
- snd_soc_dapm_new_controls(codec, ad1938_dapm_widgets,
- ARRAY_SIZE(ad1938_dapm_widgets));
- snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
-
-
-pcm_err:
- return ret;
-}
-
-/* power down chip */
-static int ad1938_remove(struct platform_device *pdev)
-{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
-
- snd_soc_free_pcms(socdev);
- snd_soc_dapm_free(socdev);
-
- return 0;
-}
-
-struct snd_soc_codec_device soc_codec_dev_ad1938 = {
- .probe = ad1938_probe,
- .remove = ad1938_remove,
-};
-EXPORT_SYMBOL_GPL(soc_codec_dev_ad1938);
-
-static int __init ad1938_init(void)
-{
- int ret;
-
- ret = spi_register_driver(&ad1938_spi_driver);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register ad1938 SPI driver: %d\n",
- ret);
- }
-
- return ret;
-}
-module_init(ad1938_init);
-
-static void __exit ad1938_exit(void)
-{
- spi_unregister_driver(&ad1938_spi_driver);
-}
-module_exit(ad1938_exit);
-
-MODULE_DESCRIPTION("ASoC ad1938 driver");
-MODULE_AUTHOR("Barry Song ");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ad1938.h b/sound/soc/codecs/ad1938.h
deleted file mode 100644
index fe3c48cd2d5b..000000000000
--- a/sound/soc/codecs/ad1938.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * File: sound/soc/codecs/ad1836.h
- * Based on:
- * Author: Barry Song <Barry.Song@analog.com>
- *
- * Created: May 25, 2009
- * Description: definitions for AD1938 registers
- *
- * Modified:
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef __AD1938_H__
-#define __AD1938_H__
-
-#define AD1938_PLL_CLK_CTRL0 0
-#define AD1938_PLL_POWERDOWN 0x01
-#define AD1938_PLL_CLK_CTRL1 1
-#define AD1938_DAC_CTRL0 2
-#define AD1938_DAC_POWERDOWN 0x01
-#define AD1938_DAC_SERFMT_MASK 0xC0
-#define AD1938_DAC_SERFMT_STEREO (0 << 6)
-#define AD1938_DAC_SERFMT_TDM (1 << 6)
-#define AD1938_DAC_CTRL1 3
-#define AD1938_DAC_2_CHANNELS 0
-#define AD1938_DAC_4_CHANNELS 1
-#define AD1938_DAC_8_CHANNELS 2
-#define AD1938_DAC_16_CHANNELS 3
-#define AD1938_DAC_CHAN_SHFT 1
-#define AD1938_DAC_CHAN_MASK (3 << AD1938_DAC_CHAN_SHFT)
-#define AD1938_DAC_LCR_MASTER (1 << 4)
-#define AD1938_DAC_BCLK_MASTER (1 << 5)
-#define AD1938_DAC_LEFT_HIGH (1 << 3)
-#define AD1938_DAC_BCLK_INV (1 << 7)
-#define AD1938_DAC_CTRL2 4
-#define AD1938_DAC_WORD_LEN_MASK 0xC
-#define AD1938_DAC_MASTER_MUTE 1
-#define AD1938_DAC_CHNL_MUTE 5
-#define AD1938_DACL1_MUTE 0
-#define AD1938_DACR1_MUTE 1
-#define AD1938_DACL2_MUTE 2
-#define AD1938_DACR2_MUTE 3
-#define AD1938_DACL3_MUTE 4
-#define AD1938_DACR3_MUTE 5
-#define AD1938_DACL4_MUTE 6
-#define AD1938_DACR4_MUTE 7
-#define AD1938_DAC_L1_VOL 6
-#define AD1938_DAC_R1_VOL 7
-#define AD1938_DAC_L2_VOL 8
-#define AD1938_DAC_R2_VOL 9
-#define AD1938_DAC_L3_VOL 10
-#define AD1938_DAC_R3_VOL 11
-#define AD1938_DAC_L4_VOL 12
-#define AD1938_DAC_R4_VOL 13
-#define AD1938_ADC_CTRL0 14
-#define AD1938_ADC_POWERDOWN 0x01
-#define AD1938_ADC_HIGHPASS_FILTER 1
-#define AD1938_ADCL1_MUTE 2
-#define AD1938_ADCR1_MUTE 3
-#define AD1938_ADCL2_MUTE 4
-#define AD1938_ADCR2_MUTE 5
-#define AD1938_ADC_CTRL1 15
-#define AD1938_ADC_SERFMT_MASK 0x60
-#define AD1938_ADC_SERFMT_STEREO (0 << 5)
-#define AD1938_ADC_SERFMT_TDM (1 << 2)
-#define AD1938_ADC_SERFMT_AUX (2 << 5)
-#define AD1938_ADC_WORD_LEN_MASK 0x3
-#define AD1938_ADC_CTRL2 16
-#define AD1938_ADC_2_CHANNELS 0
-#define AD1938_ADC_4_CHANNELS 1
-#define AD1938_ADC_8_CHANNELS 2
-#define AD1938_ADC_16_CHANNELS 3
-#define AD1938_ADC_CHAN_SHFT 4
-#define AD1938_ADC_CHAN_MASK (3 << AD1938_ADC_CHAN_SHFT)
-#define AD1938_ADC_LCR_MASTER (1 << 3)
-#define AD1938_ADC_BCLK_MASTER (1 << 6)
-#define AD1938_ADC_LEFT_HIGH (1 << 2)
-#define AD1938_ADC_BCLK_INV (1 << 1)
-
-#define AD1938_NUM_REGS 17
-
-extern struct snd_soc_dai ad1938_dai;
-extern struct snd_soc_codec_device soc_codec_dev_ad1938;
-#endif
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
new file mode 100644
index 000000000000..c8ca1142b2f4
--- /dev/null
+++ b/sound/soc/codecs/ad193x.c
@@ -0,0 +1,547 @@
+/*
+ * AD193X Audio Codec driver supporting AD1936/7/8/9
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <sound/soc-dapm.h>
+#include "ad193x.h"
+
+/* codec private data */
+struct ad193x_priv {
+ struct snd_soc_codec codec;
+ u8 reg_cache[AD193X_NUM_REGS];
+};
+
+/* ad193x register cache & default register settings */
+static const u8 ad193x_reg[AD193X_NUM_REGS] = {
+ 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0,
+};
+
+static struct snd_soc_codec *ad193x_codec;
+struct snd_soc_codec_device soc_codec_dev_ad193x;
+
+/*
+ * AD193X volume/mute/de-emphasis etc. controls
+ */
+static const char *ad193x_deemp[] = {"None", "48kHz", "44.1kHz", "32kHz"};
+
+static const struct soc_enum ad193x_deemp_enum =
+ SOC_ENUM_SINGLE(AD193X_DAC_CTRL2, 1, 4, ad193x_deemp);
+
+static const struct snd_kcontrol_new ad193x_snd_controls[] = {
+ /* DAC volume control */
+ SOC_DOUBLE_R("DAC1 Volume", AD193X_DAC_L1_VOL,
+ AD193X_DAC_R1_VOL, 0, 0xFF, 1),
+ SOC_DOUBLE_R("DAC2 Volume", AD193X_DAC_L2_VOL,
+ AD193X_DAC_R2_VOL, 0, 0xFF, 1),
+ SOC_DOUBLE_R("DAC3 Volume", AD193X_DAC_L3_VOL,
+ AD193X_DAC_R3_VOL, 0, 0xFF, 1),
+ SOC_DOUBLE_R("DAC4 Volume", AD193X_DAC_L4_VOL,
+ AD193X_DAC_R4_VOL, 0, 0xFF, 1),
+
+ /* ADC switch control */
+ SOC_DOUBLE("ADC1 Switch", AD193X_ADC_CTRL0, AD193X_ADCL1_MUTE,
+ AD193X_ADCR1_MUTE, 1, 1),
+ SOC_DOUBLE("ADC2 Switch", AD193X_ADC_CTRL0, AD193X_ADCL2_MUTE,
+ AD193X_ADCR2_MUTE, 1, 1),
+
+ /* DAC switch control */
+ SOC_DOUBLE("DAC1 Switch", AD193X_DAC_CHNL_MUTE, AD193X_DACL1_MUTE,
+ AD193X_DACR1_MUTE, 1, 1),
+ SOC_DOUBLE("DAC2 Switch", AD193X_DAC_CHNL_MUTE, AD193X_DACL2_MUTE,
+ AD193X_DACR2_MUTE, 1, 1),
+ SOC_DOUBLE("DAC3 Switch", AD193X_DAC_CHNL_MUTE, AD193X_DACL3_MUTE,
+ AD193X_DACR3_MUTE, 1, 1),
+ SOC_DOUBLE("DAC4 Switch", AD193X_DAC_CHNL_MUTE, AD193X_DACL4_MUTE,
+ AD193X_DACR4_MUTE, 1, 1),
+
+ /* ADC high-pass filter */
+ SOC_SINGLE("ADC High Pass Filter Switch", AD193X_ADC_CTRL0,
+ AD193X_ADC_HIGHPASS_FILTER, 1, 0),
+
+ /* DAC de-emphasis */
+ SOC_ENUM("Playback Deemphasis", ad193x_deemp_enum),
+};
+
+static const struct snd_soc_dapm_widget ad193x_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("DAC", "Playback", AD193X_DAC_CTRL0, 0, 1),
+ SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_SUPPLY("PLL_PWR", AD193X_PLL_CLK_CTRL0, 0, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC_PWR", AD193X_ADC_CTRL0, 0, 1, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("DAC1OUT"),
+ SND_SOC_DAPM_OUTPUT("DAC2OUT"),
+ SND_SOC_DAPM_OUTPUT("DAC3OUT"),
+ SND_SOC_DAPM_OUTPUT("DAC4OUT"),
+ SND_SOC_DAPM_INPUT("ADC1IN"),
+ SND_SOC_DAPM_INPUT("ADC2IN"),
+};
+
+static const struct snd_soc_dapm_route audio_paths[] = {
+ { "DAC", NULL, "PLL_PWR" },
+ { "ADC", NULL, "PLL_PWR" },
+ { "DAC", NULL, "ADC_PWR" },
+ { "ADC", NULL, "ADC_PWR" },
+ { "DAC1OUT", "DAC1 Switch", "DAC" },
+ { "DAC2OUT", "DAC2 Switch", "DAC" },
+ { "DAC3OUT", "DAC3 Switch", "DAC" },
+ { "DAC4OUT", "DAC4 Switch", "DAC" },
+ { "ADC", "ADC1 Switch", "ADC1IN" },
+ { "ADC", "ADC2 Switch", "ADC2IN" },
+};
+
+/*
+ * DAI ops entries
+ */
+
+static int ad193x_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int reg;
+
+ reg = snd_soc_read(codec, AD193X_DAC_CTRL2);
+ reg = (mute > 0) ? reg | AD193X_DAC_MASTER_MUTE : reg &
+ (~AD193X_DAC_MASTER_MUTE);
+ snd_soc_write(codec, AD193X_DAC_CTRL2, reg);
+
+ return 0;
+}
+
+static int ad193x_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int dac_reg = snd_soc_read(codec, AD193X_DAC_CTRL1);
+ int adc_reg = snd_soc_read(codec, AD193X_ADC_CTRL2);
+
+ dac_reg &= ~AD193X_DAC_CHAN_MASK;
+ adc_reg &= ~AD193X_ADC_CHAN_MASK;
+
+ switch (slots) {
+ case 2:
+ dac_reg |= AD193X_DAC_2_CHANNELS << AD193X_DAC_CHAN_SHFT;
+ adc_reg |= AD193X_ADC_2_CHANNELS << AD193X_ADC_CHAN_SHFT;
+ break;
+ case 4:
+ dac_reg |= AD193X_DAC_4_CHANNELS << AD193X_DAC_CHAN_SHFT;
+ adc_reg |= AD193X_ADC_4_CHANNELS << AD193X_ADC_CHAN_SHFT;
+ break;
+ case 8:
+ dac_reg |= AD193X_DAC_8_CHANNELS << AD193X_DAC_CHAN_SHFT;
+ adc_reg |= AD193X_ADC_8_CHANNELS << AD193X_ADC_CHAN_SHFT;
+ break;
+ case 16:
+ dac_reg |= AD193X_DAC_16_CHANNELS << AD193X_DAC_CHAN_SHFT;
+ adc_reg |= AD193X_ADC_16_CHANNELS << AD193X_ADC_CHAN_SHFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_write(codec, AD193X_DAC_CTRL1, dac_reg);
+ snd_soc_write(codec, AD193X_ADC_CTRL2, adc_reg);
+
+ return 0;
+}
+
+static int ad193x_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int adc_reg1, adc_reg2, dac_reg;
+
+ adc_reg1 = snd_soc_read(codec, AD193X_ADC_CTRL1);
+ adc_reg2 = snd_soc_read(codec, AD193X_ADC_CTRL2);
+ dac_reg = snd_soc_read(codec, AD193X_DAC_CTRL1);
+
+ /* At present, the driver only support AUX ADC mode(SND_SOC_DAIFMT_I2S
+ * with TDM) and ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A)
+ */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ adc_reg1 &= ~AD193X_ADC_SERFMT_MASK;
+ adc_reg1 |= AD193X_ADC_SERFMT_TDM;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ adc_reg1 &= ~AD193X_ADC_SERFMT_MASK;
+ adc_reg1 |= AD193X_ADC_SERFMT_AUX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF: /* normal bit clock + frame */
+ adc_reg2 &= ~AD193X_ADC_LEFT_HIGH;
+ adc_reg2 &= ~AD193X_ADC_BCLK_INV;
+ dac_reg &= ~AD193X_DAC_LEFT_HIGH;
+ dac_reg &= ~AD193X_DAC_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF: /* normal bclk + invert frm */
+ adc_reg2 |= AD193X_ADC_LEFT_HIGH;
+ adc_reg2 &= ~AD193X_ADC_BCLK_INV;
+ dac_reg |= AD193X_DAC_LEFT_HIGH;
+ dac_reg &= ~AD193X_DAC_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF: /* invert bclk + normal frm */
+ adc_reg2 &= ~AD193X_ADC_LEFT_HIGH;
+ adc_reg2 |= AD193X_ADC_BCLK_INV;
+ dac_reg &= ~AD193X_DAC_LEFT_HIGH;
+ dac_reg |= AD193X_DAC_BCLK_INV;
+ break;
+
+ case SND_SOC_DAIFMT_IB_IF: /* invert bclk + frm */
+ adc_reg2 |= AD193X_ADC_LEFT_HIGH;
+ adc_reg2 |= AD193X_ADC_BCLK_INV;
+ dac_reg |= AD193X_DAC_LEFT_HIGH;
+ dac_reg |= AD193X_DAC_BCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & frm master */
+ adc_reg2 |= AD193X_ADC_LCR_MASTER;
+ adc_reg2 |= AD193X_ADC_BCLK_MASTER;
+ dac_reg |= AD193X_DAC_LCR_MASTER;
+ dac_reg |= AD193X_DAC_BCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM: /* codec clk slave & frm master */
+ adc_reg2 |= AD193X_ADC_LCR_MASTER;
+ adc_reg2 &= ~AD193X_ADC_BCLK_MASTER;
+ dac_reg |= AD193X_DAC_LCR_MASTER;
+ dac_reg &= ~AD193X_DAC_BCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS: /* codec clk master & frame slave */
+ adc_reg2 &= ~AD193X_ADC_LCR_MASTER;
+ adc_reg2 |= AD193X_ADC_BCLK_MASTER;
+ dac_reg &= ~AD193X_DAC_LCR_MASTER;
+ dac_reg |= AD193X_DAC_BCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS: /* codec clk & frm slave */
+ adc_reg2 &= ~AD193X_ADC_LCR_MASTER;
+ adc_reg2 &= ~AD193X_ADC_BCLK_MASTER;
+ dac_reg &= ~AD193X_DAC_LCR_MASTER;
+ dac_reg &= ~AD193X_DAC_BCLK_MASTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_write(codec, AD193X_ADC_CTRL1, adc_reg1);
+ snd_soc_write(codec, AD193X_ADC_CTRL2, adc_reg2);
+ snd_soc_write(codec, AD193X_DAC_CTRL1, dac_reg);
+
+ return 0;
+}
+
+static int ad193x_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int word_len = 0, reg = 0;
+
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ /* bit size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ word_len = 3;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ word_len = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S32_LE:
+ word_len = 0;
+ break;
+ }
+
+ reg = snd_soc_read(codec, AD193X_DAC_CTRL2);
+ reg = (reg & (~AD193X_DAC_WORD_LEN_MASK)) | word_len;
+ snd_soc_write(codec, AD193X_DAC_CTRL2, reg);
+
+ reg = snd_soc_read(codec, AD193X_ADC_CTRL1);
+ reg = (reg & (~AD193X_ADC_WORD_LEN_MASK)) | word_len;
+ snd_soc_write(codec, AD193X_ADC_CTRL1, reg);
+
+ return 0;
+}
+
+static int ad193x_bus_probe(struct device *dev, void *ctrl_data, int bus_type)
+{
+ struct snd_soc_codec *codec;
+ struct ad193x_priv *ad193x;
+ int ret;
+
+ if (ad193x_codec) {
+ dev_err(dev, "Another ad193x is registered\n");
+ return -EINVAL;
+ }
+
+ ad193x = kzalloc(sizeof(struct ad193x_priv), GFP_KERNEL);
+ if (ad193x == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, ad193x);
+
+ codec = &ad193x->codec;
+ mutex_init(&codec->mutex);
+ codec->control_data = ctrl_data;
+ codec->dev = dev;
+ snd_soc_codec_set_drvdata(codec, ad193x);
+ codec->reg_cache = ad193x->reg_cache;
+ codec->reg_cache_size = AD193X_NUM_REGS;
+ codec->name = "AD193X";
+ codec->owner = THIS_MODULE;
+ codec->dai = &ad193x_dai;
+ codec->num_dai = 1;
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ ad193x_dai.dev = codec->dev;
+ ad193x_codec = codec;
+
+ memcpy(codec->reg_cache, ad193x_reg, AD193X_NUM_REGS);
+
+ if (bus_type == SND_SOC_I2C)
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, bus_type);
+ else
+ ret = snd_soc_codec_set_cache_io(codec, 16, 8, bus_type);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to set cache I/O: %d\n",
+ ret);
+ kfree(ad193x);
+ return ret;
+ }
+
+ /* default setting for ad193x */
+
+ /* unmute dac channels */
+ snd_soc_write(codec, AD193X_DAC_CHNL_MUTE, 0x0);
+ /* de-emphasis: 48kHz, powedown dac */
+ snd_soc_write(codec, AD193X_DAC_CTRL2, 0x1A);
+ /* powerdown dac, dac in tdm mode */
+ snd_soc_write(codec, AD193X_DAC_CTRL0, 0x41);
+ /* high-pass filter enable */
+ snd_soc_write(codec, AD193X_ADC_CTRL0, 0x3);
+ /* sata delay=1, adc aux mode */
+ snd_soc_write(codec, AD193X_ADC_CTRL1, 0x43);
+ /* pll input: mclki/xi */
+ snd_soc_write(codec, AD193X_PLL_CLK_CTRL0, 0x99); /* mclk=24.576Mhz: 0x9D; mclk=12.288Mhz: 0x99 */
+ snd_soc_write(codec, AD193X_PLL_CLK_CTRL1, 0x04);
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ kfree(ad193x);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&ad193x_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ kfree(ad193x);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ad193x_bus_remove(struct device *dev)
+{
+ struct ad193x_priv *ad193x = dev_get_drvdata(dev);
+
+ snd_soc_unregister_dai(&ad193x_dai);
+ snd_soc_unregister_codec(&ad193x->codec);
+ kfree(ad193x);
+ ad193x_codec = NULL;
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops ad193x_dai_ops = {
+ .hw_params = ad193x_hw_params,
+ .digital_mute = ad193x_mute,
+ .set_tdm_slot = ad193x_set_tdm_slot,
+ .set_fmt = ad193x_set_dai_fmt,
+};
+
+/* codec DAI instance */
+struct snd_soc_dai ad193x_dai = {
+ .name = "AD193X",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 4,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &ad193x_dai_ops,
+};
+EXPORT_SYMBOL_GPL(ad193x_dai);
+
+static int ad193x_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (ad193x_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = ad193x_codec;
+ codec = ad193x_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, ad193x_snd_controls,
+ ARRAY_SIZE(ad193x_snd_controls));
+ snd_soc_dapm_new_controls(codec, ad193x_dapm_widgets,
+ ARRAY_SIZE(ad193x_dapm_widgets));
+ snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+
+pcm_err:
+ return ret;
+}
+
+/* power down chip */
+static int ad193x_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_ad193x = {
+ .probe = ad193x_probe,
+ .remove = ad193x_remove,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_ad193x);
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit ad193x_spi_probe(struct spi_device *spi)
+{
+ return ad193x_bus_probe(&spi->dev, spi, SND_SOC_SPI);
+}
+
+static int __devexit ad193x_spi_remove(struct spi_device *spi)
+{
+ return ad193x_bus_remove(&spi->dev);
+}
+
+static struct spi_driver ad193x_spi_driver = {
+ .driver = {
+ .name = "ad193x",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad193x_spi_probe,
+ .remove = __devexit_p(ad193x_spi_remove),
+};
+#endif
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static const struct i2c_device_id ad193x_id[] = {
+ { "ad1936", 0 },
+ { "ad1937", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad193x_id);
+
+static int __devinit ad193x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return ad193x_bus_probe(&client->dev, client, SND_SOC_I2C);
+}
+
+static int __devexit ad193x_i2c_remove(struct i2c_client *client)
+{
+ return ad193x_bus_remove(&client->dev);
+}
+
+static struct i2c_driver ad193x_i2c_driver = {
+ .driver = {
+ .name = "ad193x",
+ },
+ .probe = ad193x_i2c_probe,
+ .remove = __devexit_p(ad193x_i2c_remove),
+ .id_table = ad193x_id,
+};
+#endif
+
+static int __init ad193x_modinit(void)
+{
+ int ret;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&ad193x_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register AD193X I2C driver: %d\n",
+ ret);
+ }
+#endif
+
+#if defined(CONFIG_SPI_MASTER)
+ ret = spi_register_driver(&ad193x_spi_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register AD193X SPI driver: %d\n",
+ ret);
+ }
+#endif
+ return ret;
+}
+module_init(ad193x_modinit);
+
+static void __exit ad193x_modexit(void)
+{
+#if defined(CONFIG_SPI_MASTER)
+ spi_unregister_driver(&ad193x_spi_driver);
+#endif
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&ad193x_i2c_driver);
+#endif
+}
+module_exit(ad193x_modexit);
+
+MODULE_DESCRIPTION("ASoC ad193x driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ad193x.h b/sound/soc/codecs/ad193x.h
new file mode 100644
index 000000000000..a03c880d52f9
--- /dev/null
+++ b/sound/soc/codecs/ad193x.h
@@ -0,0 +1,81 @@
+/*
+ * AD193X Audio Codec driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __AD193X_H__
+#define __AD193X_H__
+
+#define AD193X_PLL_CLK_CTRL0 0x800
+#define AD193X_PLL_POWERDOWN 0x01
+#define AD193X_PLL_CLK_CTRL1 0x801
+#define AD193X_DAC_CTRL0 0x802
+#define AD193X_DAC_POWERDOWN 0x01
+#define AD193X_DAC_SERFMT_MASK 0xC0
+#define AD193X_DAC_SERFMT_STEREO (0 << 6)
+#define AD193X_DAC_SERFMT_TDM (1 << 6)
+#define AD193X_DAC_CTRL1 0x803
+#define AD193X_DAC_2_CHANNELS 0
+#define AD193X_DAC_4_CHANNELS 1
+#define AD193X_DAC_8_CHANNELS 2
+#define AD193X_DAC_16_CHANNELS 3
+#define AD193X_DAC_CHAN_SHFT 1
+#define AD193X_DAC_CHAN_MASK (3 << AD193X_DAC_CHAN_SHFT)
+#define AD193X_DAC_LCR_MASTER (1 << 4)
+#define AD193X_DAC_BCLK_MASTER (1 << 5)
+#define AD193X_DAC_LEFT_HIGH (1 << 3)
+#define AD193X_DAC_BCLK_INV (1 << 7)
+#define AD193X_DAC_CTRL2 0x804
+#define AD193X_DAC_WORD_LEN_MASK 0xC
+#define AD193X_DAC_MASTER_MUTE 1
+#define AD193X_DAC_CHNL_MUTE 0x805
+#define AD193X_DACL1_MUTE 0
+#define AD193X_DACR1_MUTE 1
+#define AD193X_DACL2_MUTE 2
+#define AD193X_DACR2_MUTE 3
+#define AD193X_DACL3_MUTE 4
+#define AD193X_DACR3_MUTE 5
+#define AD193X_DACL4_MUTE 6
+#define AD193X_DACR4_MUTE 7
+#define AD193X_DAC_L1_VOL 0x806
+#define AD193X_DAC_R1_VOL 0x807
+#define AD193X_DAC_L2_VOL 0x808
+#define AD193X_DAC_R2_VOL 0x809
+#define AD193X_DAC_L3_VOL 0x80a
+#define AD193X_DAC_R3_VOL 0x80b
+#define AD193X_DAC_L4_VOL 0x80c
+#define AD193X_DAC_R4_VOL 0x80d
+#define AD193X_ADC_CTRL0 0x80e
+#define AD193X_ADC_POWERDOWN 0x01
+#define AD193X_ADC_HIGHPASS_FILTER 1
+#define AD193X_ADCL1_MUTE 2
+#define AD193X_ADCR1_MUTE 3
+#define AD193X_ADCL2_MUTE 4
+#define AD193X_ADCR2_MUTE 5
+#define AD193X_ADC_CTRL1 0x80f
+#define AD193X_ADC_SERFMT_MASK 0x60
+#define AD193X_ADC_SERFMT_STEREO (0 << 5)
+#define AD193X_ADC_SERFMT_TDM (1 << 2)
+#define AD193X_ADC_SERFMT_AUX (2 << 5)
+#define AD193X_ADC_WORD_LEN_MASK 0x3
+#define AD193X_ADC_CTRL2 0x810
+#define AD193X_ADC_2_CHANNELS 0
+#define AD193X_ADC_4_CHANNELS 1
+#define AD193X_ADC_8_CHANNELS 2
+#define AD193X_ADC_16_CHANNELS 3
+#define AD193X_ADC_CHAN_SHFT 4
+#define AD193X_ADC_CHAN_MASK (3 << AD193X_ADC_CHAN_SHFT)
+#define AD193X_ADC_LCR_MASTER (1 << 3)
+#define AD193X_ADC_BCLK_MASTER (1 << 6)
+#define AD193X_ADC_LEFT_HIGH (1 << 2)
+#define AD193X_ADC_BCLK_INV (1 << 1)
+
+#define AD193X_NUM_REGS 17
+
+extern struct snd_soc_dai ad193x_dai;
+extern struct snd_soc_codec_device soc_codec_dev_ad193x;
+
+#endif
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index bdeb10dfd887..192aebda3029 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -222,7 +222,7 @@ static int ak4104_spi_probe(struct spi_device *spi)
codec->owner = THIS_MODULE;
codec->dai = &ak4104_dai;
codec->num_dai = 1;
- codec->private_data = ak4104;
+ snd_soc_codec_set_drvdata(codec, ak4104);
codec->control_data = spi;
codec->reg_cache = ak4104->reg_cache;
codec->reg_cache_size = AK4104_NUM_REGS;
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index 352d1d08dbd9..d4253675b2d3 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -302,7 +302,7 @@ static int ak4535_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct ak4535_priv *ak4535 = codec->private_data;
+ struct ak4535_priv *ak4535 = snd_soc_codec_get_drvdata(codec);
ak4535->sysclk = freq;
return 0;
@@ -315,7 +315,7 @@ static int ak4535_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct ak4535_priv *ak4535 = codec->private_data;
+ struct ak4535_priv *ak4535 = snd_soc_codec_get_drvdata(codec);
u8 mode2 = ak4535_read_reg_cache(codec, AK4535_MODE2) & ~(0x3 << 5);
int rate = params_rate(params), fs = 256;
@@ -446,7 +446,6 @@ static int ak4535_resume(struct platform_device *pdev)
struct snd_soc_codec *codec = socdev->card->codec;
ak4535_sync(codec);
ak4535_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- ak4535_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
@@ -600,7 +599,7 @@ static int ak4535_probe(struct platform_device *pdev)
return -ENOMEM;
}
- codec->private_data = ak4535;
+ snd_soc_codec_set_drvdata(codec, ak4535);
socdev->card->codec = codec;
mutex_init(&codec->mutex);
INIT_LIST_HEAD(&codec->dapm_widgets);
@@ -617,7 +616,7 @@ static int ak4535_probe(struct platform_device *pdev)
#endif
if (ret != 0) {
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec);
}
return ret;
@@ -639,7 +638,7 @@ static int ak4535_remove(struct platform_device *pdev)
i2c_unregister_device(codec->control_data);
i2c_del_driver(&ak4535_i2c_driver);
#endif
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec);
return 0;
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 729859cf6ca8..7528a54102b5 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -81,12 +81,39 @@
#define AK4642_CACHEREGNUM 0x25
+/* PW_MGMT2 */
+#define HPMTN (1 << 6)
+#define PMHPL (1 << 5)
+#define PMHPR (1 << 4)
+#define MS (1 << 3) /* master/slave select */
+#define MCKO (1 << 1)
+#define PMPLL (1 << 0)
+
+#define PMHP_MASK (PMHPL | PMHPR)
+#define PMHP PMHP_MASK
+
+/* MD_CTL1 */
+#define PLL3 (1 << 7)
+#define PLL2 (1 << 6)
+#define PLL1 (1 << 5)
+#define PLL0 (1 << 4)
+#define PLL_MASK (PLL3 | PLL2 | PLL1 | PLL0)
+
+#define BCKO_MASK (1 << 3)
+#define BCKO_64 BCKO_MASK
+
+/* MD_CTL2 */
+#define FS0 (1 << 0)
+#define FS1 (1 << 1)
+#define FS2 (1 << 2)
+#define FS3 (1 << 5)
+#define FS_MASK (FS0 | FS1 | FS2 | FS3)
+
struct snd_soc_codec_device soc_codec_dev_ak4642;
/* codec private data */
struct ak4642_priv {
struct snd_soc_codec codec;
- unsigned int sysclk;
};
static struct snd_soc_codec *ak4642_codec;
@@ -177,17 +204,12 @@ static int ak4642_dai_startup(struct snd_pcm_substream *substream,
*
* PLL, Master Mode
* Audio I/F Format :MSB justified (ADC & DAC)
- * Sampling Frequency: 44.1kHz
- * Digital Volume: −8dB
+ * Digital Volume: -8dB
* Bass Boost Level : Middle
*
* This operation came from example code of
* "ASAHI KASEI AK4642" (japanese) manual p97.
- *
- * Example code use 0x39, 0x79 value for 0x01 address,
- * But we need MCKO (0x02) bit now
*/
- ak4642_write(codec, 0x05, 0x27);
ak4642_write(codec, 0x0f, 0x09);
ak4642_write(codec, 0x0e, 0x19);
ak4642_write(codec, 0x09, 0x91);
@@ -195,15 +217,14 @@ static int ak4642_dai_startup(struct snd_pcm_substream *substream,
ak4642_write(codec, 0x0a, 0x28);
ak4642_write(codec, 0x0d, 0x28);
ak4642_write(codec, 0x00, 0x64);
- ak4642_write(codec, 0x01, 0x3b); /* + MCKO bit */
- ak4642_write(codec, 0x01, 0x7b); /* + MCKO bit */
+ snd_soc_update_bits(codec, PW_MGMT2, PMHP_MASK, PMHP);
+ snd_soc_update_bits(codec, PW_MGMT2, HPMTN, HPMTN);
} else {
/*
* start stereo input
*
* PLL Master Mode
* Audio I/F Format:MSB justified (ADC & DAC)
- * Sampling Frequency:44.1kHz
* Pre MIC AMP:+20dB
* MIC Power On
* ALC setting:Refer to Table 35
@@ -212,7 +233,6 @@ static int ak4642_dai_startup(struct snd_pcm_substream *substream,
* This operation came from example code of
* "ASAHI KASEI AK4642" (japanese) manual p94.
*/
- ak4642_write(codec, 0x05, 0x27);
ak4642_write(codec, 0x02, 0x05);
ak4642_write(codec, 0x06, 0x3c);
ak4642_write(codec, 0x08, 0xe1);
@@ -233,8 +253,8 @@ static void ak4642_dai_shutdown(struct snd_pcm_substream *substream,
if (is_play) {
/* stop headphone output */
- ak4642_write(codec, 0x01, 0x3b);
- ak4642_write(codec, 0x01, 0x0b);
+ snd_soc_update_bits(codec, PW_MGMT2, HPMTN, 0);
+ snd_soc_update_bits(codec, PW_MGMT2, PMHP_MASK, 0);
ak4642_write(codec, 0x00, 0x40);
ak4642_write(codec, 0x0e, 0x11);
ak4642_write(codec, 0x0f, 0x08);
@@ -250,9 +270,111 @@ static int ak4642_dai_set_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct ak4642_priv *ak4642 = codec->private_data;
+ u8 pll;
+
+ switch (freq) {
+ case 11289600:
+ pll = PLL2;
+ break;
+ case 12288000:
+ pll = PLL2 | PLL0;
+ break;
+ case 12000000:
+ pll = PLL2 | PLL1;
+ break;
+ case 24000000:
+ pll = PLL2 | PLL1 | PLL0;
+ break;
+ case 13500000:
+ pll = PLL3 | PLL2;
+ break;
+ case 27000000:
+ pll = PLL3 | PLL2 | PLL0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, MD_CTL1, PLL_MASK, pll);
+
+ return 0;
+}
+
+static int ak4642_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 data;
+ u8 bcko;
+
+ data = MCKO | PMPLL; /* use MCKO */
+ bcko = 0;
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ data |= MS;
+ bcko = BCKO_64;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, PW_MGMT2, MS, data);
+ snd_soc_update_bits(codec, MD_CTL1, BCKO_MASK, bcko);
+
+ return 0;
+}
+
+static int ak4642_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 rate;
+
+ switch (params_rate(params)) {
+ case 7350:
+ rate = FS2;
+ break;
+ case 8000:
+ rate = 0;
+ break;
+ case 11025:
+ rate = FS2 | FS0;
+ break;
+ case 12000:
+ rate = FS0;
+ break;
+ case 14700:
+ rate = FS2 | FS1;
+ break;
+ case 16000:
+ rate = FS1;
+ break;
+ case 22050:
+ rate = FS2 | FS1 | FS0;
+ break;
+ case 24000:
+ rate = FS1 | FS0;
+ break;
+ case 29400:
+ rate = FS3 | FS2 | FS1;
+ break;
+ case 32000:
+ rate = FS3 | FS1;
+ break;
+ case 44100:
+ rate = FS3 | FS2 | FS1 | FS0;
+ break;
+ case 48000:
+ rate = FS3 | FS1 | FS0;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ snd_soc_update_bits(codec, MD_CTL2, FS_MASK, rate);
- ak4642->sysclk = freq;
return 0;
}
@@ -260,6 +382,8 @@ static struct snd_soc_dai_ops ak4642_dai_ops = {
.startup = ak4642_dai_startup,
.shutdown = ak4642_dai_shutdown,
.set_sysclk = ak4642_dai_set_sysclk,
+ .set_fmt = ak4642_dai_set_fmt,
+ .hw_params = ak4642_dai_hw_params,
};
struct snd_soc_dai ak4642_dai = {
@@ -277,6 +401,7 @@ struct snd_soc_dai ak4642_dai = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE },
.ops = &ak4642_dai_ops,
+ .symmetric_rates = 1,
};
EXPORT_SYMBOL_GPL(ak4642_dai);
@@ -307,7 +432,7 @@ static int ak4642_init(struct ak4642_priv *ak4642)
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = ak4642;
+ snd_soc_codec_set_drvdata(codec, ak4642);
codec->name = "AK4642";
codec->owner = THIS_MODULE;
codec->read = ak4642_read_reg_cache;
@@ -338,26 +463,6 @@ static int ak4642_init(struct ak4642_priv *ak4642)
goto reg_cache_err;
}
- /*
- * clock setting
- *
- * Audio I/F Format: MSB justified (ADC & DAC)
- * BICK frequency at Master Mode: 64fs
- * Input Master Clock Select at PLL Mode: 11.2896MHz
- * MCKO: Enable
- * Sampling Frequency: 44.1kHz
- *
- * This operation came from example code of
- * "ASAHI KASEI AK4642" (japanese) manual p89.
- *
- * please fix-me
- */
- ak4642_write(codec, 0x01, 0x08);
- ak4642_write(codec, 0x04, 0x4a);
- ak4642_write(codec, 0x05, 0x27);
- ak4642_write(codec, 0x00, 0x40);
- ak4642_write(codec, 0x01, 0x0b);
-
return ret;
reg_cache_err:
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 926797a014c7..87566932a3b1 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -702,7 +702,7 @@ static int ak4671_register(struct ak4671_priv *ak4671,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = ak4671;
+ snd_soc_codec_set_drvdata(codec, ak4671);
codec->name = "AK4671";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
new file mode 100644
index 000000000000..a320fb5a0e26
--- /dev/null
+++ b/sound/soc/codecs/cq93vc.c
@@ -0,0 +1,299 @@
+/*
+ * ALSA SoC CQ0093 Voice Codec Driver for DaVinci platforms
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/mfd/davinci_voicecodec.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+
+#include <mach/dm365.h>
+
+#include "cq93vc.h"
+
+static inline unsigned int cq93vc_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ struct davinci_vc *davinci_vc = codec->control_data;
+
+ return readl(davinci_vc->base + reg);
+}
+
+static inline int cq93vc_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ struct davinci_vc *davinci_vc = codec->control_data;
+
+ writel(value, davinci_vc->base + reg);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new cq93vc_snd_controls[] = {
+ SOC_SINGLE("PGA Capture Volume", DAVINCI_VC_REG05, 0, 0x03, 0),
+ SOC_SINGLE("Mono DAC Playback Volume", DAVINCI_VC_REG09, 0, 0x3f, 0),
+};
+
+static int cq93vc_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 reg = cq93vc_read(codec, DAVINCI_VC_REG09) & ~DAVINCI_VC_REG09_MUTE;
+
+ if (mute)
+ cq93vc_write(codec, DAVINCI_VC_REG09,
+ reg | DAVINCI_VC_REG09_MUTE);
+ else
+ cq93vc_write(codec, DAVINCI_VC_REG09, reg);
+
+ return 0;
+}
+
+static int cq93vc_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct davinci_vc *davinci_vc = codec->control_data;
+
+ switch (freq) {
+ case 22579200:
+ case 27000000:
+ case 33868800:
+ davinci_vc->cq93vc.sysclk = freq;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cq93vc_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ cq93vc_write(codec, DAVINCI_VC_REG12,
+ DAVINCI_VC_REG12_POWER_ALL_ON);
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ cq93vc_write(codec, DAVINCI_VC_REG12,
+ DAVINCI_VC_REG12_POWER_ALL_OFF);
+ break;
+ case SND_SOC_BIAS_OFF:
+ /* force all power off */
+ cq93vc_write(codec, DAVINCI_VC_REG12,
+ DAVINCI_VC_REG12_POWER_ALL_OFF);
+ break;
+ }
+ codec->bias_level = level;
+
+ return 0;
+}
+
+#define CQ93VC_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000)
+#define CQ93VC_FORMATS (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE)
+
+static struct snd_soc_dai_ops cq93vc_dai_ops = {
+ .digital_mute = cq93vc_mute,
+ .set_sysclk = cq93vc_set_dai_sysclk,
+};
+
+struct snd_soc_dai cq93vc_dai = {
+ .name = "CQ93VC",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = CQ93VC_RATES,
+ .formats = CQ93VC_FORMATS,},
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = CQ93VC_RATES,
+ .formats = CQ93VC_FORMATS,},
+ .ops = &cq93vc_dai_ops,
+};
+EXPORT_SYMBOL_GPL(cq93vc_dai);
+
+static int cq93vc_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+
+static struct snd_soc_codec *cq93vc_codec;
+
+static int cq93vc_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct snd_soc_codec *codec;
+ int ret;
+
+ socdev->card->codec = cq93vc_codec;
+ codec = socdev->card->codec;
+
+ /* Register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to create pcms\n", pdev->name);
+ return ret;
+ }
+
+ /* Set controls */
+ snd_soc_add_controls(codec, cq93vc_snd_controls,
+ ARRAY_SIZE(cq93vc_snd_controls));
+
+ /* Off, with power on */
+ cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+
+static int cq93vc_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_cq93vc = {
+ .probe = cq93vc_probe,
+ .remove = cq93vc_remove,
+ .resume = cq93vc_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_cq93vc);
+
+static __init int cq93vc_codec_probe(struct platform_device *pdev)
+{
+ struct davinci_vc *davinci_vc = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret;
+
+ codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
+ if (codec == NULL) {
+ dev_dbg(davinci_vc->dev,
+ "could not allocate memory for codec data\n");
+ return -ENOMEM;
+ }
+
+ davinci_vc->cq93vc.codec = codec;
+
+ cq93vc_dai.dev = &pdev->dev;
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+ codec->dev = &pdev->dev;
+ codec->name = "CQ93VC";
+ codec->owner = THIS_MODULE;
+ codec->read = cq93vc_read;
+ codec->write = cq93vc_write;
+ codec->set_bias_level = cq93vc_set_bias_level;
+ codec->dai = &cq93vc_dai;
+ codec->num_dai = 1;
+ codec->control_data = davinci_vc;
+
+ cq93vc_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret) {
+ dev_err(davinci_vc->dev, "failed to register codec\n");
+ goto fail1;
+ }
+
+ ret = snd_soc_register_dai(&cq93vc_dai);
+ if (ret) {
+ dev_err(davinci_vc->dev, "could register dai\n");
+ goto fail2;
+ }
+ return 0;
+
+fail2:
+ snd_soc_unregister_codec(codec);
+
+fail1:
+ kfree(codec);
+ cq93vc_codec = NULL;
+
+ return ret;
+}
+
+static int __devexit cq93vc_codec_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ snd_soc_unregister_dai(&cq93vc_dai);
+ snd_soc_unregister_codec(&codec);
+
+ kfree(codec);
+ cq93vc_codec = NULL;
+
+ return 0;
+}
+
+static struct platform_driver cq93vc_codec_driver = {
+ .driver = {
+ .name = "cq93vc",
+ .owner = THIS_MODULE,
+ },
+ .probe = cq93vc_codec_probe,
+ .remove = __devexit_p(cq93vc_codec_remove),
+};
+
+static __init int cq93vc_init(void)
+{
+ return platform_driver_probe(&cq93vc_codec_driver, cq93vc_codec_probe);
+}
+module_init(cq93vc_init);
+
+static __exit void cq93vc_exit(void)
+{
+ platform_driver_unregister(&cq93vc_codec_driver);
+}
+module_exit(cq93vc_exit);
+
+MODULE_DESCRIPTION("Texas Instruments DaVinci ASoC CQ0093 Voice Codec Driver");
+MODULE_AUTHOR("Miguel Aguilar");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cq93vc.h b/sound/soc/codecs/cq93vc.h
new file mode 100644
index 000000000000..845b1968ef9c
--- /dev/null
+++ b/sound/soc/codecs/cq93vc.h
@@ -0,0 +1,29 @@
+/*
+ * ALSA SoC CQ0093 Voice Codec Driver for DaVinci platforms
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _CQ93VC_H
+#define _CQ93VC_H
+
+extern struct snd_soc_dai cq93vc_dai;
+extern struct snd_soc_codec_device soc_codec_dev_cq93vc;
+
+#endif
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 81a62d198b70..30d949239def 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -211,7 +211,7 @@ static int cs4270_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
unsigned int rates = 0;
unsigned int rate_min = -1;
unsigned int rate_max = 0;
@@ -270,7 +270,7 @@ static int cs4270_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int format)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
/* set DAI format */
@@ -412,7 +412,7 @@ static int cs4270_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
int ret;
unsigned int i;
unsigned int rate;
@@ -491,7 +491,7 @@ static int cs4270_hw_params(struct snd_pcm_substream *substream,
static int cs4270_dai_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
int reg6;
reg6 = snd_soc_read(codec, CS4270_MUTE);
@@ -524,7 +524,7 @@ static int cs4270_soc_put_mute(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
int left = !ucontrol->value.integer.value[0];
int right = !ucontrol->value.integer.value[1];
@@ -600,7 +600,7 @@ static int cs4270_probe(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = cs4270_codec;
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
int i, ret;
/* Connect the codec to the socdev. snd_soc_new_pcms() needs this. */
@@ -657,7 +657,7 @@ static int cs4270_remove(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = cs4270_codec;
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
snd_soc_free_pcms(socdev);
regulator_bulk_disable(ARRAY_SIZE(cs4270->supplies), cs4270->supplies);
@@ -730,7 +730,7 @@ static int cs4270_i2c_probe(struct i2c_client *i2c_client,
codec->owner = THIS_MODULE;
codec->dai = &cs4270_dai;
codec->num_dai = 1;
- codec->private_data = cs4270;
+ snd_soc_codec_set_drvdata(codec, cs4270);
codec->control_data = i2c_client;
codec->read = cs4270_read_reg_cache;
codec->write = cs4270_i2c_write;
@@ -843,7 +843,7 @@ MODULE_DEVICE_TABLE(i2c, cs4270_id);
static int cs4270_soc_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct snd_soc_codec *codec = cs4270_codec;
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
int reg, ret;
reg = snd_soc_read(codec, CS4270_PWRCTL) | CS4270_PWRCTL_PDN_ALL;
@@ -863,7 +863,7 @@ static int cs4270_soc_suspend(struct platform_device *pdev, pm_message_t mesg)
static int cs4270_soc_resume(struct platform_device *pdev)
{
struct snd_soc_codec *codec = cs4270_codec;
- struct cs4270_private *cs4270 = codec->private_data;
+ struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c_client = codec->control_data;
int reg;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 9f169c477108..f07a415c753f 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -387,7 +387,7 @@ static int cx20442_register(struct cx20442_priv *cx20442)
codec->name = "CX20442";
codec->owner = THIS_MODULE;
- codec->private_data = cx20442;
+ snd_soc_codec_set_drvdata(codec, cx20442);
codec->dai = &cx20442_dai;
codec->num_dai = 1;
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 366daf1d044e..75af2d6e0e78 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -56,8 +56,14 @@
#define DA7210_DAI_SRC_SEL 0x25
#define DA7210_DAI_CFG1 0x26
#define DA7210_DAI_CFG3 0x28
+#define DA7210_PLL_DIV1 0x29
+#define DA7210_PLL_DIV2 0x2A
#define DA7210_PLL_DIV3 0x2B
#define DA7210_PLL 0x2C
+#define DA7210_A_HID_UNLOCK 0x8A
+#define DA7210_A_TEST_UNLOCK 0x8B
+#define DA7210_A_PLL1 0x90
+#define DA7210_A_CP_MODE 0xA7
/* STARTUP1 bit fields */
#define DA7210_SC_MST_EN (1 << 0)
@@ -75,15 +81,14 @@
/* INMIX_R bit fields */
#define DA7210_IN_R_EN (1 << 7)
-/* ADC_HPF bit fields */
-#define DA7210_ADC_VOICE_EN (1 << 7)
-
/* ADC bit fields */
#define DA7210_ADC_L_EN (1 << 3)
#define DA7210_ADC_R_EN (1 << 7)
-/* DAC_HPF fields */
-#define DA7210_DAC_VOICE_EN (1 << 7)
+/* DAC/ADC HPF fields */
+#define DA7210_VOICE_F0_MASK (0x7 << 4)
+#define DA7210_VOICE_F0_25 (1 << 4)
+#define DA7210_VOICE_EN (1 << 7)
/* DAC_SEL bit fields */
#define DA7210_DAC_L_SRC_DAI_L (4 << 0)
@@ -124,7 +129,19 @@
#define DA7210_PLL_BYP (1 << 6)
/* PLL bit fields */
-#define DA7210_PLL_FS_48000 (11 << 0)
+#define DA7210_PLL_FS_MASK (0xF << 0)
+#define DA7210_PLL_FS_8000 (0x1 << 0)
+#define DA7210_PLL_FS_11025 (0x2 << 0)
+#define DA7210_PLL_FS_12000 (0x3 << 0)
+#define DA7210_PLL_FS_16000 (0x5 << 0)
+#define DA7210_PLL_FS_22050 (0x6 << 0)
+#define DA7210_PLL_FS_24000 (0x7 << 0)
+#define DA7210_PLL_FS_32000 (0x9 << 0)
+#define DA7210_PLL_FS_44100 (0xA << 0)
+#define DA7210_PLL_FS_48000 (0xB << 0)
+#define DA7210_PLL_FS_88200 (0xE << 0)
+#define DA7210_PLL_FS_96000 (0xF << 0)
+#define DA7210_PLL_EN (0x1 << 7)
#define DA7210_VERSION "0.0.1"
@@ -165,7 +182,7 @@ static const u8 da7210_reg[] = {
static inline u32 da7210_read_reg_cache(struct snd_soc_codec *codec, u32 reg)
{
u8 *cache = codec->reg_cache;
- BUG_ON(reg > ARRAY_SIZE(da7210_reg));
+ BUG_ON(reg >= ARRAY_SIZE(da7210_reg));
return cache[reg];
}
@@ -242,7 +259,8 @@ static int da7210_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
u32 dai_cfg1;
- u32 reg, mask;
+ u32 hpf_reg, hpf_mask, hpf_value;
+ u32 fs, bypass;
/* set DAI source to Left and Right ADC */
da7210_write(codec, DA7210_DAI_SRC_SEL,
@@ -266,25 +284,84 @@ static int da7210_hw_params(struct snd_pcm_substream *substream,
da7210_write(codec, DA7210_DAI_CFG1, dai_cfg1);
- /* FIXME
- *
- * It support 48K only now
- */
+ hpf_reg = (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) ?
+ DA7210_DAC_HPF : DA7210_ADC_HPF;
+
switch (params_rate(params)) {
+ case 8000:
+ fs = DA7210_PLL_FS_8000;
+ hpf_mask = DA7210_VOICE_F0_MASK | DA7210_VOICE_EN;
+ hpf_value = DA7210_VOICE_F0_25 | DA7210_VOICE_EN;
+ bypass = DA7210_PLL_BYP;
+ break;
+ case 11025:
+ fs = DA7210_PLL_FS_11025;
+ hpf_mask = DA7210_VOICE_F0_MASK | DA7210_VOICE_EN;
+ hpf_value = DA7210_VOICE_F0_25 | DA7210_VOICE_EN;
+ bypass = 0;
+ break;
+ case 12000:
+ fs = DA7210_PLL_FS_12000;
+ hpf_mask = DA7210_VOICE_F0_MASK | DA7210_VOICE_EN;
+ hpf_value = DA7210_VOICE_F0_25 | DA7210_VOICE_EN;
+ bypass = DA7210_PLL_BYP;
+ break;
+ case 16000:
+ fs = DA7210_PLL_FS_16000;
+ hpf_mask = DA7210_VOICE_F0_MASK | DA7210_VOICE_EN;
+ hpf_value = DA7210_VOICE_F0_25 | DA7210_VOICE_EN;
+ bypass = DA7210_PLL_BYP;
+ break;
+ case 22050:
+ fs = DA7210_PLL_FS_22050;
+ hpf_mask = DA7210_VOICE_EN;
+ hpf_value = 0;
+ bypass = 0;
+ break;
+ case 32000:
+ fs = DA7210_PLL_FS_32000;
+ hpf_mask = DA7210_VOICE_EN;
+ hpf_value = 0;
+ bypass = DA7210_PLL_BYP;
+ break;
+ case 44100:
+ fs = DA7210_PLL_FS_44100;
+ hpf_mask = DA7210_VOICE_EN;
+ hpf_value = 0;
+ bypass = 0;
+ break;
case 48000:
- if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) {
- reg = DA7210_DAC_HPF;
- mask = DA7210_DAC_VOICE_EN;
- } else {
- reg = DA7210_ADC_HPF;
- mask = DA7210_ADC_VOICE_EN;
- }
+ fs = DA7210_PLL_FS_48000;
+ hpf_mask = DA7210_VOICE_EN;
+ hpf_value = 0;
+ bypass = DA7210_PLL_BYP;
+ break;
+ case 88200:
+ fs = DA7210_PLL_FS_88200;
+ hpf_mask = DA7210_VOICE_EN;
+ hpf_value = 0;
+ bypass = 0;
+ break;
+ case 96000:
+ fs = DA7210_PLL_FS_96000;
+ hpf_mask = DA7210_VOICE_EN;
+ hpf_value = 0;
+ bypass = DA7210_PLL_BYP;
break;
default:
return -EINVAL;
}
- snd_soc_update_bits(codec, reg, mask, 0);
+ /* Disable active mode */
+ snd_soc_update_bits(codec, DA7210_STARTUP1, DA7210_SC_MST_EN, 0);
+
+ snd_soc_update_bits(codec, hpf_reg, hpf_mask, hpf_value);
+ snd_soc_update_bits(codec, DA7210_PLL, DA7210_PLL_FS_MASK, fs);
+ snd_soc_update_bits(codec, DA7210_PLL_DIV3, DA7210_PLL_BYP, bypass);
+
+ /* Enable active mode */
+ snd_soc_update_bits(codec, DA7210_STARTUP1,
+ DA7210_SC_MST_EN, DA7210_SC_MST_EN);
return 0;
}
@@ -362,6 +439,7 @@ struct snd_soc_dai da7210_dai = {
.formats = DA7210_FORMATS,
},
.ops = &da7210_dai_ops,
+ .symmetric_rates = 1,
};
EXPORT_SYMBOL_GPL(da7210_dai);
@@ -383,7 +461,7 @@ static int da7210_init(struct da7210_priv *da7210)
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = da7210;
+ snd_soc_codec_set_drvdata(codec, da7210);
codec->name = "DA7210";
codec->owner = THIS_MODULE;
codec->read = da7210_read;
@@ -416,9 +494,23 @@ static int da7210_init(struct da7210_priv *da7210)
/* FIXME
*
* This driver use fixed value here
+ * And below settings expects MCLK = 12.288MHz
+ *
+ * When you select different MCLK, please check...
+ * DA7210_PLL_DIV1 val
+ * DA7210_PLL_DIV2 val
+ * DA7210_PLL_DIV3 val
+ * DA7210_PLL_DIV3 :: DA7210_MCLK_RANGExxx
*/
/*
+ * make sure that DA7210 use bypass mode before start up
+ */
+ da7210_write(codec, DA7210_STARTUP1, 0);
+ da7210_write(codec, DA7210_PLL_DIV3,
+ DA7210_MCLK_RANGE_10_20_MHZ | DA7210_PLL_BYP);
+
+ /*
* ADC settings
*/
@@ -454,9 +546,28 @@ static int da7210_init(struct da7210_priv *da7210)
/* Diable PLL and bypass it */
da7210_write(codec, DA7210_PLL, DA7210_PLL_FS_48000);
- /* Bypass PLL and set MCLK freq rang to 10-20MHz */
- da7210_write(codec, DA7210_PLL_DIV3,
+ /*
+ * If 48kHz sound came, it use bypass mode,
+ * and when it is 44.1kHz, it use PLL.
+ *
+ * This time, this driver sets PLL always ON
+ * and controls bypass/PLL mode by switching
+ * DA7210_PLL_DIV3 :: DA7210_PLL_BYP bit.
+ * see da7210_hw_params
+ */
+ da7210_write(codec, DA7210_PLL_DIV1, 0xE5); /* MCLK = 12.288MHz */
+ da7210_write(codec, DA7210_PLL_DIV2, 0x99);
+ da7210_write(codec, DA7210_PLL_DIV3, 0x0A |
DA7210_MCLK_RANGE_10_20_MHZ | DA7210_PLL_BYP);
+ snd_soc_update_bits(codec, DA7210_PLL, DA7210_PLL_EN, DA7210_PLL_EN);
+
+ /* As suggested by Dialog */
+ da7210_write(codec, DA7210_A_HID_UNLOCK, 0x8B); /* unlock */
+ da7210_write(codec, DA7210_A_TEST_UNLOCK, 0xB4);
+ da7210_write(codec, DA7210_A_PLL1, 0x01);
+ da7210_write(codec, DA7210_A_CP_MODE, 0x7C);
+ da7210_write(codec, DA7210_A_HID_UNLOCK, 0x00); /* re-lock */
+ da7210_write(codec, DA7210_A_TEST_UNLOCK, 0x00);
/* Activate all enabled subsystem */
da7210_write(codec, DA7210_STARTUP1, DA7210_SC_MST_EN);
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 29d0906a924a..b47ed4f6ab20 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -140,6 +140,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0),
SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1),
SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0),
+SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0),
SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1),
SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1),
@@ -277,7 +278,7 @@ static int ssm2602_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct ssm2602_priv *ssm2602 = codec->private_data;
+ struct ssm2602_priv *ssm2602 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c = codec->control_data;
u16 iface = ssm2602_read_reg_cache(codec, SSM2602_IFACE) & 0xfff3;
int i = get_coeff(ssm2602->sysclk, params_rate(params));
@@ -322,7 +323,7 @@ static int ssm2602_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct ssm2602_priv *ssm2602 = codec->private_data;
+ struct ssm2602_priv *ssm2602 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c = codec->control_data;
struct snd_pcm_runtime *master_runtime;
@@ -373,7 +374,7 @@ static void ssm2602_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct ssm2602_priv *ssm2602 = codec->private_data;
+ struct ssm2602_priv *ssm2602 = snd_soc_codec_get_drvdata(codec);
/* deactivate */
if (!codec->active)
@@ -401,7 +402,7 @@ static int ssm2602_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct ssm2602_priv *ssm2602 = codec->private_data;
+ struct ssm2602_priv *ssm2602 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
case 12000000:
@@ -559,7 +560,6 @@ static int ssm2602_resume(struct platform_device *pdev)
codec->hw_write(codec->control_data, data, 2);
}
ssm2602_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- ssm2602_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
@@ -605,8 +605,7 @@ static int ssm2602_init(struct snd_soc_device *socdev)
reg = ssm2602_read_reg_cache(codec, SSM2602_ROUT1V);
ssm2602_write(codec, SSM2602_ROUT1V, reg | ROUT1V_RLHP_BOTH);
/*select Line in as default input*/
- ssm2602_write(codec, SSM2602_APANA,
- APANA_ENABLE_MIC_BOOST2 | APANA_SELECT_DAC |
+ ssm2602_write(codec, SSM2602_APANA, APANA_SELECT_DAC |
APANA_ENABLE_MIC_BOOST);
ssm2602_write(codec, SSM2602_PWR, 0);
@@ -727,7 +726,7 @@ static int ssm2602_probe(struct platform_device *pdev)
return -ENOMEM;
}
- codec->private_data = ssm2602;
+ snd_soc_codec_set_drvdata(codec, ssm2602);
socdev->card->codec = codec;
mutex_init(&codec->mutex);
INIT_LIST_HEAD(&codec->dapm_widgets);
@@ -760,7 +759,7 @@ static int ssm2602_remove(struct platform_device *pdev)
i2c_unregister_device(codec->control_data);
i2c_del_driver(&ssm2602_i2c_driver);
#endif
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec);
return 0;
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 3293629dcb3b..ee86568545c2 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -289,9 +289,6 @@ reset:
}
stac9766_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- if (codec->suspend_bias_level == SND_SOC_BIAS_ON)
- stac9766_set_bias_level(codec, SND_SOC_BIAS_ON);
-
return 0;
}
@@ -410,7 +407,7 @@ reset_err:
pcm_err:
snd_soc_free_ac97_codec(codec);
codec_err:
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
cache_err:
kfree(socdev->card->codec);
socdev->card->codec = NULL;
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 776b79cde904..b0bae3508b29 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -634,7 +634,6 @@ static int tlv320aic23_resume(struct platform_device *pdev)
}
tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- tlv320aic23_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index b5b7d6a03844..f0e00fd4b435 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -49,7 +49,7 @@ struct aic26 {
static unsigned int aic26_reg_read(struct snd_soc_codec *codec,
unsigned int reg)
{
- struct aic26 *aic26 = codec->private_data;
+ struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
u16 *cache = codec->reg_cache;
u16 cmd, value;
u8 buffer[2];
@@ -93,7 +93,7 @@ static unsigned int aic26_reg_read_cache(struct snd_soc_codec *codec,
static int aic26_reg_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
- struct aic26 *aic26 = codec->private_data;
+ struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
u16 *cache = codec->reg_cache;
u16 cmd;
u8 buffer[4];
@@ -132,7 +132,7 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct aic26 *aic26 = codec->private_data;
+ struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
int fsref, divisor, wlen, pval, jval, dval, qval;
u16 reg;
@@ -199,7 +199,7 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
static int aic26_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- struct aic26 *aic26 = codec->private_data;
+ struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
u16 reg = aic26_reg_read_cache(codec, AIC26_REG_DAC_GAIN);
dev_dbg(&aic26->spi->dev, "aic26_mute(dai=%p, mute=%i)\n",
@@ -218,7 +218,7 @@ static int aic26_set_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct aic26 *aic26 = codec->private_data;
+ struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
dev_dbg(&aic26->spi->dev, "aic26_set_sysclk(dai=%p, clk_id==%i,"
" freq=%i, dir=%i)\n",
@@ -235,7 +235,7 @@ static int aic26_set_sysclk(struct snd_soc_dai *codec_dai,
static int aic26_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct aic26 *aic26 = codec->private_data;
+ struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
dev_dbg(&aic26->spi->dev, "aic26_set_fmt(dai=%p, fmt==%i)\n",
codec_dai, fmt);
@@ -431,7 +431,7 @@ static int aic26_spi_probe(struct spi_device *spi)
/* Setup what we can in the codec structure so that the register
* access functions will work as expected. More will be filled
* out when it is probed by the SoC CODEC part of this driver */
- aic26->codec.private_data = aic26;
+ snd_soc_codec_set_drvdata(&aic26->codec, aic26);
aic26->codec.name = "aic26";
aic26->codec.owner = THIS_MODULE;
aic26->codec.dai = &aic26_dai;
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 4a6d56c3fed9..71a69908ccf6 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -38,6 +38,8 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -47,16 +49,25 @@
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
+#include <sound/tlv320aic3x.h>
#include "tlv320aic3x.h"
-#define AIC3X_VERSION "0.2"
+#define AIC3X_NUM_SUPPLIES 4
+static const char *aic3x_supply_names[AIC3X_NUM_SUPPLIES] = {
+ "IOVDD", /* I/O Voltage */
+ "DVDD", /* Digital Core Voltage */
+ "AVDD", /* Analog DAC Voltage */
+ "DRVDD", /* ADC Analog and Output Driver Voltage */
+};
/* codec private data */
struct aic3x_priv {
struct snd_soc_codec codec;
+ struct regulator_bulk_data supplies[AIC3X_NUM_SUPPLIES];
unsigned int sysclk;
int master;
+ int gpio_reset;
};
/*
@@ -764,7 +775,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct aic3x_priv *aic3x = codec->private_data;
+ struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
int codec_clk = 0, bypass_pll = 0, fsref, last_clk = 0;
u8 data, j, r, p, pll_q, pll_p = 1, pll_r = 1, pll_j = 1;
u16 d, pll_d = 1;
@@ -931,7 +942,7 @@ static int aic3x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct aic3x_priv *aic3x = codec->private_data;
+ struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
aic3x->sysclk = freq;
return 0;
@@ -941,7 +952,7 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct aic3x_priv *aic3x = codec->private_data;
+ struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
u8 iface_areg, iface_breg;
int delay = 0;
@@ -995,12 +1006,13 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
static int aic3x_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- struct aic3x_priv *aic3x = codec->private_data;
+ struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
u8 reg;
switch (level) {
case SND_SOC_BIAS_ON:
- /* all power is driven by DAPM system */
+ break;
+ case SND_SOC_BIAS_PREPARE:
if (aic3x->master) {
/* enable pll */
reg = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG);
@@ -1008,48 +1020,9 @@ static int aic3x_set_bias_level(struct snd_soc_codec *codec,
reg | PLL_ENABLE);
}
break;
- case SND_SOC_BIAS_PREPARE:
- break;
case SND_SOC_BIAS_STANDBY:
- /*
- * all power is driven by DAPM system,
- * so output power is safe if bypass was set
- */
- if (aic3x->master) {
- /* disable pll */
- reg = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG);
- aic3x_write(codec, AIC3X_PLL_PROGA_REG,
- reg & ~PLL_ENABLE);
- }
- break;
+ /* fall through and disable pll */
case SND_SOC_BIAS_OFF:
- /* force all power off */
- reg = aic3x_read_reg_cache(codec, LINE1L_2_LADC_CTRL);
- aic3x_write(codec, LINE1L_2_LADC_CTRL, reg & ~LADC_PWR_ON);
- reg = aic3x_read_reg_cache(codec, LINE1R_2_RADC_CTRL);
- aic3x_write(codec, LINE1R_2_RADC_CTRL, reg & ~RADC_PWR_ON);
-
- reg = aic3x_read_reg_cache(codec, DAC_PWR);
- aic3x_write(codec, DAC_PWR, reg & ~(LDAC_PWR_ON | RDAC_PWR_ON));
-
- reg = aic3x_read_reg_cache(codec, HPLOUT_CTRL);
- aic3x_write(codec, HPLOUT_CTRL, reg & ~HPLOUT_PWR_ON);
- reg = aic3x_read_reg_cache(codec, HPROUT_CTRL);
- aic3x_write(codec, HPROUT_CTRL, reg & ~HPROUT_PWR_ON);
-
- reg = aic3x_read_reg_cache(codec, HPLCOM_CTRL);
- aic3x_write(codec, HPLCOM_CTRL, reg & ~HPLCOM_PWR_ON);
- reg = aic3x_read_reg_cache(codec, HPRCOM_CTRL);
- aic3x_write(codec, HPRCOM_CTRL, reg & ~HPRCOM_PWR_ON);
-
- reg = aic3x_read_reg_cache(codec, MONOLOPM_CTRL);
- aic3x_write(codec, MONOLOPM_CTRL, reg & ~MONOLOPM_PWR_ON);
-
- reg = aic3x_read_reg_cache(codec, LLOPM_CTRL);
- aic3x_write(codec, LLOPM_CTRL, reg & ~LLOPM_PWR_ON);
- reg = aic3x_read_reg_cache(codec, RLOPM_CTRL);
- aic3x_write(codec, RLOPM_CTRL, reg & ~RLOPM_PWR_ON);
-
if (aic3x->master) {
/* disable pll */
reg = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG);
@@ -1171,7 +1144,7 @@ static int aic3x_resume(struct platform_device *pdev)
codec->hw_write(codec->control_data, data, 2);
}
- aic3x_set_bias_level(codec, codec->suspend_bias_level);
+ aic3x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
@@ -1309,6 +1282,13 @@ static int aic3x_unregister(struct aic3x_priv *aic3x)
snd_soc_unregister_dai(&aic3x_dai);
snd_soc_unregister_codec(&aic3x->codec);
+ if (aic3x->gpio_reset >= 0) {
+ gpio_set_value(aic3x->gpio_reset, 0);
+ gpio_free(aic3x->gpio_reset);
+ }
+ regulator_bulk_disable(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
+ regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
+
kfree(aic3x);
aic3x_codec = NULL;
@@ -1330,6 +1310,8 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
{
struct snd_soc_codec *codec;
struct aic3x_priv *aic3x;
+ struct aic3x_pdata *pdata = i2c->dev.platform_data;
+ int ret, i;
aic3x = kzalloc(sizeof(struct aic3x_priv), GFP_KERNEL);
if (aic3x == NULL) {
@@ -1339,13 +1321,53 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
codec = &aic3x->codec;
codec->dev = &i2c->dev;
- codec->private_data = aic3x;
+ snd_soc_codec_set_drvdata(codec, aic3x);
codec->control_data = i2c;
codec->hw_write = (hw_write_t) i2c_master_send;
i2c_set_clientdata(i2c, aic3x);
+ aic3x->gpio_reset = -1;
+ if (pdata && pdata->gpio_reset >= 0) {
+ ret = gpio_request(pdata->gpio_reset, "tlv320aic3x reset");
+ if (ret != 0)
+ goto err_gpio;
+ aic3x->gpio_reset = pdata->gpio_reset;
+ gpio_direction_output(aic3x->gpio_reset, 0);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
+ aic3x->supplies[i].supply = aic3x_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(aic3x->supplies),
+ aic3x->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ goto err_get;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(aic3x->supplies),
+ aic3x->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_enable;
+ }
+
+ if (aic3x->gpio_reset >= 0) {
+ udelay(1);
+ gpio_set_value(aic3x->gpio_reset, 1);
+ }
+
return aic3x_register(codec);
+
+err_enable:
+ regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
+err_get:
+ if (aic3x->gpio_reset >= 0)
+ gpio_free(aic3x->gpio_reset);
+err_gpio:
+ kfree(aic3x);
+ return ret;
}
static int aic3x_i2c_remove(struct i2c_client *client)
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index d1e0e81ef30c..0d1d1a12cc59 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -51,6 +51,20 @@
#define LATENCY_TIME_MS 20
+#define MODE7_LTHR 10
+#define MODE7_UTHR (DAC33_BUFFER_SIZE_SAMPLES - 10)
+
+#define BURST_BASEFREQ_HZ 49152000
+
+#define SAMPLES_TO_US(rate, samples) \
+ (1000000000 / ((rate * 1000) / samples))
+
+#define US_TO_SAMPLES(rate, us) \
+ (rate / (1000000 / us))
+
+static void dac33_calculate_times(struct snd_pcm_substream *substream);
+static int dac33_prepare_chip(struct snd_pcm_substream *substream);
+
static struct snd_soc_codec *tlv320dac33_codec;
enum dac33_state {
@@ -80,6 +94,7 @@ struct tlv320dac33_priv {
struct work_struct work;
struct snd_soc_codec codec;
struct regulator_bulk_data supplies[DAC33_NUM_SUPPLIES];
+ struct snd_pcm_substream *substream;
int power_gpio;
int chip_power;
int irq;
@@ -93,6 +108,17 @@ struct tlv320dac33_priv {
enum dac33_fifo_modes fifo_mode;/* FIFO mode selection */
unsigned int nsample; /* burst read amount from host */
u8 burst_bclkdiv; /* BCLK divider value in burst mode */
+ unsigned int burst_rate; /* Interface speed in Burst modes */
+
+ int keep_bclk; /* Keep the BCLK continuously running
+ * in FIFO modes */
+ spinlock_t lock;
+ unsigned long long t_stamp1; /* Time stamp for FIFO modes to */
+ unsigned long long t_stamp2; /* calculate the FIFO caused delay */
+
+ unsigned int mode1_us_burst; /* Time to burst read n number of
+ * samples */
+ unsigned int mode7_us_to_lthr; /* Time to reach lthr from uthr */
enum dac33_state state;
};
@@ -166,7 +192,7 @@ static inline void dac33_write_reg_cache(struct snd_soc_codec *codec,
static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
u8 *value)
{
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int val;
*value = reg & 0xff;
@@ -191,7 +217,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
u8 data[2];
int ret = 0;
@@ -218,7 +244,7 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
static int dac33_write_locked(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int ret;
mutex_lock(&dac33->mutex);
@@ -232,7 +258,7 @@ static int dac33_write_locked(struct snd_soc_codec *codec, unsigned int reg,
static int dac33_write16(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
u8 data[3];
int ret = 0;
@@ -262,45 +288,47 @@ static int dac33_write16(struct snd_soc_codec *codec, unsigned int reg,
return ret;
}
-static void dac33_restore_regs(struct snd_soc_codec *codec)
+static void dac33_init_chip(struct snd_soc_codec *codec)
{
- struct tlv320dac33_priv *dac33 = codec->private_data;
- u8 *cache = codec->reg_cache;
- u8 data[2];
- int i, ret;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
- if (!dac33->chip_power)
+ if (unlikely(!dac33->chip_power))
return;
- for (i = DAC33_PWR_CTRL; i <= DAC33_INTP_CTRL_B; i++) {
- data[0] = i;
- data[1] = cache[i];
- /* Skip the read only registers */
- if ((i >= DAC33_INT_OSC_STATUS &&
- i <= DAC33_INT_OSC_FREQ_RAT_READ_B) ||
- (i >= DAC33_FIFO_WPTR_MSB && i <= DAC33_FIFO_IRQ_FLAG) ||
- i == DAC33_DAC_STATUS_FLAGS ||
- i == DAC33_SRC_EST_REF_CLK_RATIO_A ||
- i == DAC33_SRC_EST_REF_CLK_RATIO_B)
- continue;
- ret = codec->hw_write(codec->control_data, data, 2);
- if (ret != 2)
- dev_err(codec->dev, "Write failed (%d)\n", ret);
- }
- for (i = DAC33_LDAC_PWR_CTRL; i <= DAC33_LINEL_TO_LLO_VOL; i++) {
- data[0] = i;
- data[1] = cache[i];
- ret = codec->hw_write(codec->control_data, data, 2);
- if (ret != 2)
- dev_err(codec->dev, "Write failed (%d)\n", ret);
- }
- for (i = DAC33_LINER_TO_RLO_VOL; i <= DAC33_OSC_TRIM; i++) {
- data[0] = i;
- data[1] = cache[i];
- ret = codec->hw_write(codec->control_data, data, 2);
- if (ret != 2)
- dev_err(codec->dev, "Write failed (%d)\n", ret);
- }
+ /* 44-46: DAC Control Registers */
+ /* A : DAC sample rate Fsref/1.5 */
+ dac33_write(codec, DAC33_DAC_CTRL_A, DAC33_DACRATE(0));
+ /* B : DAC src=normal, not muted */
+ dac33_write(codec, DAC33_DAC_CTRL_B, DAC33_DACSRCR_RIGHT |
+ DAC33_DACSRCL_LEFT);
+ /* C : (defaults) */
+ dac33_write(codec, DAC33_DAC_CTRL_C, 0x00);
+
+ /* 73 : volume soft stepping control,
+ clock source = internal osc (?) */
+ dac33_write(codec, DAC33_ANA_VOL_SOFT_STEP_CTRL, DAC33_VOLCLKEN);
+
+ dac33_write(codec, DAC33_PWR_CTRL, DAC33_PDNALLB);
+
+ /* Restore only selected registers (gains mostly) */
+ dac33_write(codec, DAC33_LDAC_DIG_VOL_CTRL,
+ dac33_read_reg_cache(codec, DAC33_LDAC_DIG_VOL_CTRL));
+ dac33_write(codec, DAC33_RDAC_DIG_VOL_CTRL,
+ dac33_read_reg_cache(codec, DAC33_RDAC_DIG_VOL_CTRL));
+
+ dac33_write(codec, DAC33_LINEL_TO_LLO_VOL,
+ dac33_read_reg_cache(codec, DAC33_LINEL_TO_LLO_VOL));
+ dac33_write(codec, DAC33_LINER_TO_RLO_VOL,
+ dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL));
+}
+
+static inline void dac33_read_id(struct snd_soc_codec *codec)
+{
+ u8 reg;
+
+ dac33_read(codec, DAC33_DEVICE_ID_MSB, &reg);
+ dac33_read(codec, DAC33_DEVICE_ID_LSB, &reg);
+ dac33_read(codec, DAC33_DEVICE_REV_ID, &reg);
}
static inline void dac33_soft_power(struct snd_soc_codec *codec, int power)
@@ -311,16 +339,25 @@ static inline void dac33_soft_power(struct snd_soc_codec *codec, int power)
if (power)
reg |= DAC33_PDNALLB;
else
- reg &= ~DAC33_PDNALLB;
+ reg &= ~(DAC33_PDNALLB | DAC33_OSCPDNB |
+ DAC33_DACRPDNB | DAC33_DACLPDNB);
dac33_write(codec, DAC33_PWR_CTRL, reg);
}
static int dac33_hard_power(struct snd_soc_codec *codec, int power)
{
- struct tlv320dac33_priv *dac33 = codec->private_data;
- int ret;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
mutex_lock(&dac33->mutex);
+
+ /* Safety check */
+ if (unlikely(power == dac33->chip_power)) {
+ dev_warn(codec->dev, "Trying to set the same power state: %s\n",
+ power ? "ON" : "OFF");
+ goto exit;
+ }
+
if (power) {
ret = regulator_bulk_enable(ARRAY_SIZE(dac33->supplies),
dac33->supplies);
@@ -334,11 +371,6 @@ static int dac33_hard_power(struct snd_soc_codec *codec, int power)
gpio_set_value(dac33->power_gpio, 1);
dac33->chip_power = 1;
-
- /* Restore registers */
- dac33_restore_regs(codec);
-
- dac33_soft_power(codec, 1);
} else {
dac33_soft_power(codec, 0);
if (dac33->power_gpio >= 0)
@@ -360,11 +392,27 @@ exit:
return ret;
}
+static int playback_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(w->codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (likely(dac33->substream)) {
+ dac33_calculate_times(dac33->substream);
+ dac33_prepare_chip(dac33->substream);
+ }
+ break;
+ }
+ return 0;
+}
+
static int dac33_get_nsample(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = dac33->nsample;
@@ -375,17 +423,21 @@ static int dac33_set_nsample(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
if (dac33->nsample == ucontrol->value.integer.value[0])
return 0;
if (ucontrol->value.integer.value[0] < dac33->nsample_min ||
- ucontrol->value.integer.value[0] > dac33->nsample_max)
+ ucontrol->value.integer.value[0] > dac33->nsample_max) {
ret = -EINVAL;
- else
+ } else {
dac33->nsample = ucontrol->value.integer.value[0];
+ /* Re calculate the burst time */
+ dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate,
+ dac33->nsample);
+ }
return ret;
}
@@ -394,7 +446,7 @@ static int dac33_get_fifo_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = dac33->fifo_mode;
@@ -405,7 +457,7 @@ static int dac33_set_fifo_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
if (dac33->fifo_mode == ucontrol->value.integer.value[0])
@@ -485,6 +537,8 @@ static const struct snd_soc_dapm_widget dac33_dapm_widgets[] = {
DAC33_OUT_AMP_PWR_CTRL, 6, 3, 3, 0),
SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amp Power",
DAC33_OUT_AMP_PWR_CTRL, 4, 3, 3, 0),
+
+ SND_SOC_DAPM_PRE("Prepare Playback", playback_event),
};
static const struct snd_soc_dapm_route audio_map[] = {
@@ -527,18 +581,18 @@ static int dac33_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_STANDBY:
if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ /* Coming from OFF, switch on the codec */
ret = dac33_hard_power(codec, 1);
if (ret != 0)
return ret;
- }
- dac33_soft_power(codec, 0);
+ dac33_init_chip(codec);
+ }
break;
case SND_SOC_BIAS_OFF:
ret = dac33_hard_power(codec, 0);
if (ret != 0)
return ret;
-
break;
}
codec->bias_level = level;
@@ -555,13 +609,34 @@ static inline void dac33_prefill_handler(struct tlv320dac33_priv *dac33)
switch (dac33->fifo_mode) {
case DAC33_FIFO_MODE1:
dac33_write16(codec, DAC33_NSAMPLE_MSB,
- DAC33_THRREG(dac33->nsample));
+ DAC33_THRREG(dac33->nsample + dac33->alarm_threshold));
+
+ /* Take the timestamps */
+ spin_lock_irq(&dac33->lock);
+ dac33->t_stamp2 = ktime_to_us(ktime_get());
+ dac33->t_stamp1 = dac33->t_stamp2;
+ spin_unlock_irq(&dac33->lock);
+
dac33_write16(codec, DAC33_PREFILL_MSB,
DAC33_THRREG(dac33->alarm_threshold));
+ /* Enable Alarm Threshold IRQ with a delay */
+ udelay(SAMPLES_TO_US(dac33->burst_rate,
+ dac33->alarm_threshold));
+ dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MAT);
break;
case DAC33_FIFO_MODE7:
+ /* Take the timestamp */
+ spin_lock_irq(&dac33->lock);
+ dac33->t_stamp1 = ktime_to_us(ktime_get());
+ /* Move back the timestamp with drain time */
+ dac33->t_stamp1 -= dac33->mode7_us_to_lthr;
+ spin_unlock_irq(&dac33->lock);
+
dac33_write16(codec, DAC33_PREFILL_MSB,
- DAC33_THRREG(10));
+ DAC33_THRREG(MODE7_LTHR));
+
+ /* Enable Upper Threshold IRQ */
+ dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MUT);
break;
default:
dev_warn(codec->dev, "Unhandled FIFO mode: %d\n",
@@ -578,6 +653,11 @@ static inline void dac33_playback_handler(struct tlv320dac33_priv *dac33)
switch (dac33->fifo_mode) {
case DAC33_FIFO_MODE1:
+ /* Take the timestamp */
+ spin_lock_irq(&dac33->lock);
+ dac33->t_stamp2 = ktime_to_us(ktime_get());
+ spin_unlock_irq(&dac33->lock);
+
dac33_write16(codec, DAC33_NSAMPLE_MSB,
DAC33_THRREG(dac33->nsample));
break;
@@ -628,31 +708,17 @@ static void dac33_work(struct work_struct *work)
static irqreturn_t dac33_interrupt_handler(int irq, void *dev)
{
struct snd_soc_codec *codec = dev;
- struct tlv320dac33_priv *dac33 = codec->private_data;
-
- queue_work(dac33->dac33_wq, &dac33->work);
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
- return IRQ_HANDLED;
-}
+ spin_lock(&dac33->lock);
+ dac33->t_stamp1 = ktime_to_us(ktime_get());
+ spin_unlock(&dac33->lock);
-static void dac33_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_device *socdev = rtd->socdev;
- struct snd_soc_codec *codec = socdev->card->codec;
- struct tlv320dac33_priv *dac33 = codec->private_data;
- unsigned int pwr_ctrl;
+ /* Do not schedule the workqueue in Mode7 */
+ if (dac33->fifo_mode != DAC33_FIFO_MODE7)
+ queue_work(dac33->dac33_wq, &dac33->work);
- /* Stop pending workqueue */
- if (dac33->fifo_mode)
- cancel_work_sync(&dac33->work);
-
- mutex_lock(&dac33->mutex);
- pwr_ctrl = dac33_read_reg_cache(codec, DAC33_PWR_CTRL);
- pwr_ctrl &= ~(DAC33_OSCPDNB | DAC33_DACRPDNB | DAC33_DACLPDNB);
- dac33_write(codec, DAC33_PWR_CTRL, pwr_ctrl);
- mutex_unlock(&dac33->mutex);
+ return IRQ_HANDLED;
}
static void dac33_oscwait(struct snd_soc_codec *codec)
@@ -669,6 +735,31 @@ static void dac33_oscwait(struct snd_soc_codec *codec)
"internal oscillator calibration failed\n");
}
+static int dac33_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+
+ /* Stream started, save the substream pointer */
+ dac33->substream = substream;
+
+ return 0;
+}
+
+static void dac33_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+
+ dac33->substream = NULL;
+}
+
static int dac33_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -715,7 +806,7 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
unsigned int oscset, ratioset, pwr_ctrl, reg_tmp;
u8 aictrl_a, aictrl_b, fifoctrl_a;
@@ -752,6 +843,17 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
}
mutex_lock(&dac33->mutex);
+
+ if (!dac33->chip_power) {
+ /*
+ * Chip is not powered yet.
+ * Do the init in the dac33_set_bias_level later.
+ */
+ mutex_unlock(&dac33->mutex);
+ return 0;
+ }
+
+ dac33_soft_power(codec, 0);
dac33_soft_power(codec, 1);
reg_tmp = dac33_read_reg_cache(codec, DAC33_INT_OSC_CTRL);
@@ -799,11 +901,10 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
case DAC33_FIFO_MODE1:
dac33_write(codec, DAC33_FIFO_IRQ_MODE_B,
DAC33_ATM(DAC33_FIFO_IRQ_MODE_LEVEL));
- dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MAT);
break;
case DAC33_FIFO_MODE7:
- /* Disable all interrupts */
- dac33_write(codec, DAC33_FIFO_IRQ_MASK, 0);
+ dac33_write(codec, DAC33_FIFO_IRQ_MODE_A,
+ DAC33_UTM(DAC33_FIFO_IRQ_MODE_LEVEL));
break;
default:
/* in FIFO bypass mode, the interrupts are not used */
@@ -822,7 +923,10 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
*/
fifoctrl_a &= ~DAC33_FBYPAS;
fifoctrl_a &= ~DAC33_FAUTO;
- aictrl_b &= ~DAC33_BCLKON;
+ if (dac33->keep_bclk)
+ aictrl_b |= DAC33_BCLKON;
+ else
+ aictrl_b &= ~DAC33_BCLKON;
break;
case DAC33_FIFO_MODE7:
/*
@@ -833,7 +937,10 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
*/
fifoctrl_a &= ~DAC33_FBYPAS;
fifoctrl_a |= DAC33_FAUTO;
- aictrl_b &= ~DAC33_BCLKON;
+ if (dac33->keep_bclk)
+ aictrl_b |= DAC33_BCLKON;
+ else
+ aictrl_b &= ~DAC33_BCLKON;
break;
default:
/*
@@ -875,10 +982,8 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
* Configure the threshold levels, and leave 10 sample space
* at the bottom, and also at the top of the FIFO
*/
- dac33_write16(codec, DAC33_UTHR_MSB,
- DAC33_THRREG(DAC33_BUFFER_SIZE_SAMPLES - 10));
- dac33_write16(codec, DAC33_LTHR_MSB,
- DAC33_THRREG(10));
+ dac33_write16(codec, DAC33_UTHR_MSB, DAC33_THRREG(MODE7_UTHR));
+ dac33_write16(codec, DAC33_LTHR_MSB, DAC33_THRREG(MODE7_LTHR));
break;
default:
break;
@@ -894,9 +999,13 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
unsigned int nsample_limit;
+ /* In bypass mode we don't need to calculate */
+ if (!dac33->fifo_mode)
+ return;
+
/* Number of samples (16bit, stereo) in one period */
dac33->nsample_min = snd_pcm_lib_period_bytes(substream) / 4;
@@ -930,15 +1039,24 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream)
if (dac33->nsample > dac33->nsample_max)
dac33->nsample = dac33->nsample_max;
-}
-static int dac33_pcm_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- dac33_calculate_times(substream);
- dac33_prepare_chip(substream);
+ switch (dac33->fifo_mode) {
+ case DAC33_FIFO_MODE1:
+ dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate,
+ dac33->nsample);
+ dac33->t_stamp1 = 0;
+ dac33->t_stamp2 = 0;
+ break;
+ case DAC33_FIFO_MODE7:
+ dac33->mode7_us_to_lthr =
+ SAMPLES_TO_US(substream->runtime->rate,
+ MODE7_UTHR - MODE7_LTHR + 1);
+ dac33->t_stamp1 = 0;
+ break;
+ default:
+ break;
+ }
- return 0;
}
static int dac33_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -947,7 +1065,7 @@ static int dac33_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
switch (cmd) {
@@ -974,11 +1092,156 @@ static int dac33_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
return ret;
}
+static snd_pcm_sframes_t dac33_dai_delay(
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+ unsigned long long t0, t1, t_now;
+ unsigned int time_delta;
+ int samples_out, samples_in, samples;
+ snd_pcm_sframes_t delay = 0;
+
+ switch (dac33->fifo_mode) {
+ case DAC33_FIFO_BYPASS:
+ break;
+ case DAC33_FIFO_MODE1:
+ spin_lock(&dac33->lock);
+ t0 = dac33->t_stamp1;
+ t1 = dac33->t_stamp2;
+ spin_unlock(&dac33->lock);
+ t_now = ktime_to_us(ktime_get());
+
+ /* We have not started to fill the FIFO yet, delay is 0 */
+ if (!t1)
+ goto out;
+
+ if (t0 > t1) {
+ /*
+ * Phase 1:
+ * After Alarm threshold, and before nSample write
+ */
+ time_delta = t_now - t0;
+ samples_out = time_delta ? US_TO_SAMPLES(
+ substream->runtime->rate,
+ time_delta) : 0;
+
+ if (likely(dac33->alarm_threshold > samples_out))
+ delay = dac33->alarm_threshold - samples_out;
+ else
+ delay = 0;
+ } else if ((t_now - t1) <= dac33->mode1_us_burst) {
+ /*
+ * Phase 2:
+ * After nSample write (during burst operation)
+ */
+ time_delta = t_now - t0;
+ samples_out = time_delta ? US_TO_SAMPLES(
+ substream->runtime->rate,
+ time_delta) : 0;
+
+ time_delta = t_now - t1;
+ samples_in = time_delta ? US_TO_SAMPLES(
+ dac33->burst_rate,
+ time_delta) : 0;
+
+ samples = dac33->alarm_threshold;
+ samples += (samples_in - samples_out);
+
+ if (likely(samples > 0))
+ delay = samples;
+ else
+ delay = 0;
+ } else {
+ /*
+ * Phase 3:
+ * After burst operation, before next alarm threshold
+ */
+ time_delta = t_now - t0;
+ samples_out = time_delta ? US_TO_SAMPLES(
+ substream->runtime->rate,
+ time_delta) : 0;
+
+ samples_in = dac33->nsample;
+ samples = dac33->alarm_threshold;
+ samples += (samples_in - samples_out);
+
+ if (likely(samples > 0))
+ delay = samples > DAC33_BUFFER_SIZE_SAMPLES ?
+ DAC33_BUFFER_SIZE_SAMPLES : samples;
+ else
+ delay = 0;
+ }
+ break;
+ case DAC33_FIFO_MODE7:
+ spin_lock(&dac33->lock);
+ t0 = dac33->t_stamp1;
+ spin_unlock(&dac33->lock);
+ t_now = ktime_to_us(ktime_get());
+
+ /* We have not started to fill the FIFO yet, delay is 0 */
+ if (!t0)
+ goto out;
+
+ if (t_now <= t0) {
+ /*
+ * Either the timestamps are messed or equal. Report
+ * maximum delay
+ */
+ delay = MODE7_UTHR;
+ goto out;
+ }
+
+ time_delta = t_now - t0;
+ if (time_delta <= dac33->mode7_us_to_lthr) {
+ /*
+ * Phase 1:
+ * After burst (draining phase)
+ */
+ samples_out = US_TO_SAMPLES(
+ substream->runtime->rate,
+ time_delta);
+
+ if (likely(MODE7_UTHR > samples_out))
+ delay = MODE7_UTHR - samples_out;
+ else
+ delay = 0;
+ } else {
+ /*
+ * Phase 2:
+ * During burst operation
+ */
+ time_delta = time_delta - dac33->mode7_us_to_lthr;
+
+ samples_out = US_TO_SAMPLES(
+ substream->runtime->rate,
+ time_delta);
+ samples_in = US_TO_SAMPLES(
+ dac33->burst_rate,
+ time_delta);
+ delay = MODE7_LTHR + samples_in - samples_out;
+
+ if (unlikely(delay > MODE7_UTHR))
+ delay = MODE7_UTHR;
+ }
+ break;
+ default:
+ dev_warn(codec->dev, "Unhandled FIFO mode: %d\n",
+ dac33->fifo_mode);
+ break;
+ }
+out:
+ return delay;
+}
+
static int dac33_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
u8 ioc_reg, asrcb_reg;
ioc_reg = dac33_read_reg_cache(codec, DAC33_INT_OSC_CTRL);
@@ -1008,7 +1271,7 @@ static int dac33_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct tlv320dac33_priv *dac33 = codec->private_data;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
u8 aictrl_a, aictrl_b;
aictrl_a = dac33_read_reg_cache(codec, DAC33_SER_AUDIOIF_CTRL_A);
@@ -1059,35 +1322,6 @@ static int dac33_set_dai_fmt(struct snd_soc_dai *codec_dai,
return 0;
}
-static void dac33_init_chip(struct snd_soc_codec *codec)
-{
- /* 44-46: DAC Control Registers */
- /* A : DAC sample rate Fsref/1.5 */
- dac33_write(codec, DAC33_DAC_CTRL_A, DAC33_DACRATE(0));
- /* B : DAC src=normal, not muted */
- dac33_write(codec, DAC33_DAC_CTRL_B, DAC33_DACSRCR_RIGHT |
- DAC33_DACSRCL_LEFT);
- /* C : (defaults) */
- dac33_write(codec, DAC33_DAC_CTRL_C, 0x00);
-
- /* 64-65 : L&R DAC power control
- Line In -> OUT 1V/V Gain, DAC -> OUT 4V/V Gain*/
- dac33_write(codec, DAC33_LDAC_PWR_CTRL, DAC33_LROUT_GAIN(2));
- dac33_write(codec, DAC33_RDAC_PWR_CTRL, DAC33_LROUT_GAIN(2));
-
- /* 73 : volume soft stepping control,
- clock source = internal osc (?) */
- dac33_write(codec, DAC33_ANA_VOL_SOFT_STEP_CTRL, DAC33_VOLCLKEN);
-
- /* 66 : LOP/LOM Modes */
- dac33_write(codec, DAC33_OUT_AMP_CM_CTRL, 0xff);
-
- /* 68 : LOM inverted from LOP */
- dac33_write(codec, DAC33_OUT_AMP_CTRL, (3<<2));
-
- dac33_write(codec, DAC33_PWR_CTRL, DAC33_PDNALLB);
-}
-
static int dac33_soc_probe(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
@@ -1099,12 +1333,7 @@ static int dac33_soc_probe(struct platform_device *pdev)
codec = tlv320dac33_codec;
socdev->card->codec = codec;
- dac33 = codec->private_data;
-
- /* Power up the codec */
- dac33_hard_power(codec, 1);
- /* Set default configuration */
- dac33_init_chip(codec);
+ dac33 = snd_soc_codec_get_drvdata(codec);
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
@@ -1122,12 +1351,6 @@ static int dac33_soc_probe(struct platform_device *pdev)
dac33_add_widgets(codec);
- /* power on device */
- dac33_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- /* Bias level configuration has enabled regulator an extra time */
- regulator_bulk_disable(ARRAY_SIZE(dac33->supplies), dac33->supplies);
-
return 0;
pcm_err:
@@ -1164,7 +1387,6 @@ static int dac33_soc_resume(struct platform_device *pdev)
struct snd_soc_codec *codec = socdev->card->codec;
dac33_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- dac33_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
@@ -1182,10 +1404,11 @@ EXPORT_SYMBOL_GPL(soc_codec_dev_tlv320dac33);
#define DAC33_FORMATS SNDRV_PCM_FMTBIT_S16_LE
static struct snd_soc_dai_ops dac33_dai_ops = {
+ .startup = dac33_startup,
.shutdown = dac33_shutdown,
.hw_params = dac33_hw_params,
- .prepare = dac33_pcm_prepare,
.trigger = dac33_pcm_trigger,
+ .delay = dac33_dai_delay,
.set_sysclk = dac33_set_dai_sysclk,
.set_fmt = dac33_set_dai_fmt,
};
@@ -1221,11 +1444,12 @@ static int __devinit dac33_i2c_probe(struct i2c_client *client,
return -ENOMEM;
codec = &dac33->codec;
- codec->private_data = dac33;
+ snd_soc_codec_set_drvdata(codec, dac33);
codec->control_data = client;
mutex_init(&codec->mutex);
mutex_init(&dac33->mutex);
+ spin_lock_init(&dac33->lock);
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
@@ -1236,6 +1460,7 @@ static int __devinit dac33_i2c_probe(struct i2c_client *client,
codec->hw_write = (hw_write_t) i2c_master_send;
codec->bias_level = SND_SOC_BIAS_OFF;
codec->set_bias_level = dac33_set_bias_level;
+ codec->idle_bias_off = 1;
codec->dai = &dac33_dai;
codec->num_dai = 1;
codec->reg_cache_size = ARRAY_SIZE(dac33_reg);
@@ -1250,8 +1475,12 @@ static int __devinit dac33_i2c_probe(struct i2c_client *client,
dac33->power_gpio = pdata->power_gpio;
dac33->burst_bclkdiv = pdata->burst_bclkdiv;
+ /* Pre calculate the burst rate */
+ dac33->burst_rate = BURST_BASEFREQ_HZ / dac33->burst_bclkdiv / 32;
+ dac33->keep_bclk = pdata->keep_bclk;
dac33->irq = client->irq;
dac33->nsample = NSAMPLE_MAX;
+ dac33->nsample_max = NSAMPLE_MAX;
/* Disable FIFO use by default */
dac33->fifo_mode = DAC33_FIFO_BYPASS;
@@ -1272,8 +1501,6 @@ static int __devinit dac33_i2c_probe(struct i2c_client *client,
goto error_gpio;
}
gpio_direction_output(dac33->power_gpio, 0);
- } else {
- dac33->chip_power = 1;
}
/* Check if the IRQ number is valid and request it */
@@ -1311,12 +1538,14 @@ static int __devinit dac33_i2c_probe(struct i2c_client *client,
goto err_get;
}
- ret = regulator_bulk_enable(ARRAY_SIZE(dac33->supplies),
- dac33->supplies);
+ /* Read the tlv320dac33 ID registers */
+ ret = dac33_hard_power(codec, 1);
if (ret != 0) {
- dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
- goto err_enable;
+ dev_err(codec->dev, "Failed to power up codec: %d\n", ret);
+ goto error_codec;
}
+ dac33_read_id(codec);
+ dac33_hard_power(codec, 0);
ret = snd_soc_register_codec(codec);
if (ret != 0) {
@@ -1331,14 +1560,9 @@ static int __devinit dac33_i2c_probe(struct i2c_client *client,
goto error_codec;
}
- /* Shut down the codec for now */
- dac33_hard_power(codec, 0);
-
return ret;
error_codec:
- regulator_bulk_disable(ARRAY_SIZE(dac33->supplies), dac33->supplies);
-err_enable:
regulator_bulk_free(ARRAY_SIZE(dac33->supplies), dac33->supplies);
err_get:
if (dac33->irq >= 0) {
@@ -1362,7 +1586,9 @@ static int __devexit dac33_i2c_remove(struct i2c_client *client)
struct tlv320dac33_priv *dac33;
dac33 = i2c_get_clientdata(client);
- dac33_hard_power(&dac33->codec, 0);
+
+ if (unlikely(dac33->chip_power))
+ dac33_hard_power(&dac33->codec, 0);
if (dac33->power_gpio >= 0)
gpio_free(dac33->power_gpio);
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 569ad8758a84..221bb9090504 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -54,6 +54,7 @@ struct tpa6130a2_data {
struct regulator_bulk_data supplies[TPA6130A2_NUM_SUPPLIES];
int power_gpio;
unsigned char power_state;
+ enum tpa_model id;
};
static int tpa6130a2_i2c_read(int reg)
@@ -176,7 +177,7 @@ exit:
return ret;
}
-static int tpa6130a2_get_reg(struct snd_kcontrol *kcontrol,
+static int tpa6130a2_get_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
@@ -184,7 +185,8 @@ static int tpa6130a2_get_reg(struct snd_kcontrol *kcontrol,
struct tpa6130a2_data *data;
unsigned int reg = mc->reg;
unsigned int shift = mc->shift;
- unsigned int mask = mc->max;
+ int max = mc->max;
+ unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
BUG_ON(tpa6130a2_client == NULL);
@@ -197,13 +199,13 @@ static int tpa6130a2_get_reg(struct snd_kcontrol *kcontrol,
if (invert)
ucontrol->value.integer.value[0] =
- mask - ucontrol->value.integer.value[0];
+ max - ucontrol->value.integer.value[0];
mutex_unlock(&data->mutex);
return 0;
}
-static int tpa6130a2_set_reg(struct snd_kcontrol *kcontrol,
+static int tpa6130a2_put_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
@@ -211,7 +213,8 @@ static int tpa6130a2_set_reg(struct snd_kcontrol *kcontrol,
struct tpa6130a2_data *data;
unsigned int reg = mc->reg;
unsigned int shift = mc->shift;
- unsigned int mask = mc->max;
+ int max = mc->max;
+ unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned int val = (ucontrol->value.integer.value[0] & mask);
unsigned int val_reg;
@@ -220,7 +223,7 @@ static int tpa6130a2_set_reg(struct snd_kcontrol *kcontrol,
data = i2c_get_clientdata(tpa6130a2_client);
if (invert)
- val = mask - val;
+ val = max - val;
mutex_lock(&data->mutex);
@@ -260,10 +263,24 @@ static const unsigned int tpa6130_tlv[] = {
static const struct snd_kcontrol_new tpa6130a2_controls[] = {
SOC_SINGLE_EXT_TLV("TPA6130A2 Headphone Playback Volume",
TPA6130A2_REG_VOL_MUTE, 0, 0x3f, 0,
- tpa6130a2_get_reg, tpa6130a2_set_reg,
+ tpa6130a2_get_volsw, tpa6130a2_put_volsw,
tpa6130_tlv),
};
+static const unsigned int tpa6140_tlv[] = {
+ TLV_DB_RANGE_HEAD(3),
+ 0, 8, TLV_DB_SCALE_ITEM(-5900, 400, 0),
+ 9, 16, TLV_DB_SCALE_ITEM(-2500, 200, 0),
+ 17, 31, TLV_DB_SCALE_ITEM(-1000, 100, 0),
+};
+
+static const struct snd_kcontrol_new tpa6140a2_controls[] = {
+ SOC_SINGLE_EXT_TLV("TPA6140A2 Headphone Playback Volume",
+ TPA6130A2_REG_VOL_MUTE, 1, 0x1f, 0,
+ tpa6130a2_get_volsw, tpa6130a2_put_volsw,
+ tpa6140_tlv),
+};
+
/*
* Enable or disable channel (left or right)
* The bit number for mute and amplifier are the same per channel:
@@ -369,13 +386,22 @@ static const struct snd_soc_dapm_route audio_map[] = {
int tpa6130a2_add_controls(struct snd_soc_codec *codec)
{
+ struct tpa6130a2_data *data;
+
+ BUG_ON(tpa6130a2_client == NULL);
+ data = i2c_get_clientdata(tpa6130a2_client);
+
snd_soc_dapm_new_controls(codec, tpa6130a2_dapm_widgets,
ARRAY_SIZE(tpa6130a2_dapm_widgets));
snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
- return snd_soc_add_controls(codec, tpa6130a2_controls,
- ARRAY_SIZE(tpa6130a2_controls));
+ if (data->id == TPA6140A2)
+ return snd_soc_add_controls(codec, tpa6140a2_controls,
+ ARRAY_SIZE(tpa6140a2_controls));
+ else
+ return snd_soc_add_controls(codec, tpa6130a2_controls,
+ ARRAY_SIZE(tpa6130a2_controls));
}
EXPORT_SYMBOL_GPL(tpa6130a2_add_controls);
@@ -408,6 +434,7 @@ static int __devinit tpa6130a2_probe(struct i2c_client *client,
pdata = client->dev.platform_data;
data->power_gpio = pdata->power_gpio;
+ data->id = pdata->id;
mutex_init(&data->mutex);
@@ -426,7 +453,7 @@ static int __devinit tpa6130a2_probe(struct i2c_client *client,
gpio_direction_output(data->power_gpio, 0);
}
- switch (pdata->id) {
+ switch (data->id) {
case TPA6130A2:
for (i = 0; i < ARRAY_SIZE(data->supplies); i++)
data->supplies[i].supply = tpa6130a2_supply_names[i];
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 520ffd6536c3..aaa8cd47b8c0 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -124,6 +124,8 @@ struct twl4030_priv {
struct snd_soc_codec codec;
unsigned int codec_powered;
+
+ /* reference counts of AIF/APLL users */
unsigned int apll_enabled;
struct snd_pcm_substream *master_substream;
@@ -136,9 +138,11 @@ struct twl4030_priv {
unsigned int sysclk;
- /* Headset output state handling */
- unsigned int hsl_enabled;
- unsigned int hsr_enabled;
+ /* Output (with associated amp) states */
+ u8 hsl_enabled, hsr_enabled;
+ u8 earpiece_enabled;
+ u8 predrivel_enabled, predriver_enabled;
+ u8 carkitl_enabled, carkitr_enabled;
};
/*
@@ -174,17 +178,52 @@ static inline void twl4030_write_reg_cache(struct snd_soc_codec *codec,
static int twl4030_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value)
{
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
+ int write_to_reg = 0;
+
twl4030_write_reg_cache(codec, reg, value);
- if (likely(reg < TWL4030_REG_SW_SHADOW))
- return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value,
- reg);
- else
- return 0;
+ if (likely(reg < TWL4030_REG_SW_SHADOW)) {
+ /* Decide if the given register can be written */
+ switch (reg) {
+ case TWL4030_REG_EAR_CTL:
+ if (twl4030->earpiece_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_PREDL_CTL:
+ if (twl4030->predrivel_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_PREDR_CTL:
+ if (twl4030->predriver_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_PRECKL_CTL:
+ if (twl4030->carkitl_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_PRECKR_CTL:
+ if (twl4030->carkitr_enabled)
+ write_to_reg = 1;
+ break;
+ case TWL4030_REG_HS_GAIN_SET:
+ if (twl4030->hsl_enabled || twl4030->hsr_enabled)
+ write_to_reg = 1;
+ break;
+ default:
+ /* All other register can be written */
+ write_to_reg = 1;
+ break;
+ }
+ if (write_to_reg)
+ return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ value, reg);
+ }
+ return 0;
}
static void twl4030_codec_enable(struct snd_soc_codec *codec, int enable)
{
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
int mode;
if (enable == twl4030->codec_powered)
@@ -222,28 +261,28 @@ static void twl4030_init_chip(struct snd_soc_codec *codec)
static void twl4030_apll_enable(struct snd_soc_codec *codec, int enable)
{
- struct twl4030_priv *twl4030 = codec->private_data;
- int status;
-
- if (enable == twl4030->apll_enabled)
- return;
-
- if (enable)
- /* Enable PLL */
- status = twl4030_codec_enable_resource(TWL4030_CODEC_RES_APLL);
- else
- /* Disable PLL */
- status = twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
+ int status = -1;
+
+ if (enable) {
+ twl4030->apll_enabled++;
+ if (twl4030->apll_enabled == 1)
+ status = twl4030_codec_enable_resource(
+ TWL4030_CODEC_RES_APLL);
+ } else {
+ twl4030->apll_enabled--;
+ if (!twl4030->apll_enabled)
+ status = twl4030_codec_disable_resource(
+ TWL4030_CODEC_RES_APLL);
+ }
if (status >= 0)
twl4030_write_reg_cache(codec, TWL4030_REG_APLL_CTL, status);
-
- twl4030->apll_enabled = enable;
}
static void twl4030_power_up(struct snd_soc_codec *codec)
{
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
u8 anamicl, regmisc1, byte;
int i = 0;
@@ -526,26 +565,26 @@ static int micpath_event(struct snd_soc_dapm_widget *w,
* Output PGA builder:
* Handle the muting and unmuting of the given output (turning off the
* amplifier associated with the output pin)
- * On mute bypass the reg_cache and mute the volume
- * On unmute: restore the register content
+ * On mute bypass the reg_cache and write 0 to the register
+ * On unmute: restore the register content from the reg_cache
* Outputs handled in this way: Earpiece, PreDrivL/R, CarkitL/R
*/
#define TWL4030_OUTPUT_PGA(pin_name, reg, mask) \
static int pin_name##pga_event(struct snd_soc_dapm_widget *w, \
struct snd_kcontrol *kcontrol, int event) \
{ \
- u8 reg_val; \
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec); \
\
switch (event) { \
case SND_SOC_DAPM_POST_PMU: \
+ twl4030->pin_name##_enabled = 1; \
twl4030_write(w->codec, reg, \
twl4030_read_reg_cache(w->codec, reg)); \
break; \
case SND_SOC_DAPM_POST_PMD: \
- reg_val = twl4030_read_reg_cache(w->codec, reg); \
- twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \
- reg_val & (~mask), \
- reg); \
+ twl4030->pin_name##_enabled = 0; \
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \
+ 0, reg); \
break; \
} \
return 0; \
@@ -636,13 +675,38 @@ static int apll_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int aif_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ u8 audio_if;
+
+ audio_if = twl4030_read_reg_cache(w->codec, TWL4030_REG_AUDIO_IF);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Enable AIF */
+ /* enable the PLL before we use it to clock the DAI */
+ twl4030_apll_enable(w->codec, 1);
+
+ twl4030_write(w->codec, TWL4030_REG_AUDIO_IF,
+ audio_if | TWL4030_AIF_EN);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* disable the DAI before we stop it's source PLL */
+ twl4030_write(w->codec, TWL4030_REG_AUDIO_IF,
+ audio_if & ~TWL4030_AIF_EN);
+ twl4030_apll_enable(w->codec, 0);
+ break;
+ }
+ return 0;
+}
+
static void headset_ramp(struct snd_soc_codec *codec, int ramp)
{
struct snd_soc_device *socdev = codec->socdev;
struct twl4030_setup_data *setup = socdev->codec_data;
unsigned char hs_gain, hs_pop;
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
/* Base values for ramp delay calculation: 2^19 - 2^26 */
unsigned int ramp_base[] = {524288, 1048576, 2097152, 4194304,
8388608, 16777216, 33554432, 67108864};
@@ -665,7 +729,10 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
/* Headset ramp-up according to the TRM */
hs_pop |= TWL4030_VMID_EN;
twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
- twl4030_write(codec, TWL4030_REG_HS_GAIN_SET, hs_gain);
+ /* Actually write to the register */
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ hs_gain,
+ TWL4030_REG_HS_GAIN_SET);
hs_pop |= TWL4030_RAMP_EN;
twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
/* Wait ramp delay time + 1, so the VMID can settle */
@@ -702,7 +769,7 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
static int headsetlpga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct twl4030_priv *twl4030 = w->codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -726,7 +793,7 @@ static int headsetlpga_event(struct snd_soc_dapm_widget *w,
static int headsetrpga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct twl4030_priv *twl4030 = w->codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -918,7 +985,7 @@ static int snd_soc_put_twl4030_opmode_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned short val;
unsigned short mask, bitmask;
@@ -1128,8 +1195,6 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("DIGIMIC1"),
/* Outputs */
- SND_SOC_DAPM_OUTPUT("OUTL"),
- SND_SOC_DAPM_OUTPUT("OUTR"),
SND_SOC_DAPM_OUTPUT("EARPIECE"),
SND_SOC_DAPM_OUTPUT("PREDRIVEL"),
SND_SOC_DAPM_OUTPUT("PREDRIVER"),
@@ -1141,6 +1206,11 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("HFR"),
SND_SOC_DAPM_OUTPUT("VIBRA"),
+ /* AIF and APLL clocks for running DAIs (including loopback) */
+ SND_SOC_DAPM_OUTPUT("Virtual HiFi OUT"),
+ SND_SOC_DAPM_INPUT("Virtual HiFi IN"),
+ SND_SOC_DAPM_OUTPUT("Virtual Voice OUT"),
+
/* DACs */
SND_SOC_DAPM_DAC("DAC Right1", "Right Front HiFi Playback",
SND_SOC_NOPM, 0, 0),
@@ -1204,7 +1274,8 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("APLL Enable", SND_SOC_NOPM, 0, 0, apll_event,
SND_SOC_DAPM_PRE_PMU|SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("AIF Enable", TWL4030_REG_AUDIO_IF, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("AIF Enable", SND_SOC_NOPM, 0, 0, aif_event,
+ SND_SOC_DAPM_PRE_PMU|SND_SOC_DAPM_POST_PMD),
/* Output MIXER controls */
/* Earpiece */
@@ -1334,10 +1405,6 @@ static const struct snd_soc_dapm_route intercon[] = {
{"Digital Voice Playback Mixer", NULL, "DAC Voice"},
/* Supply for the digital part (APLL) */
- {"Digital R1 Playback Mixer", NULL, "APLL Enable"},
- {"Digital L1 Playback Mixer", NULL, "APLL Enable"},
- {"Digital R2 Playback Mixer", NULL, "APLL Enable"},
- {"Digital L2 Playback Mixer", NULL, "APLL Enable"},
{"Digital Voice Playback Mixer", NULL, "APLL Enable"},
{"Digital R1 Playback Mixer", NULL, "AIF Enable"},
@@ -1411,8 +1478,14 @@ static const struct snd_soc_dapm_route intercon[] = {
{"Vibra Mux", "AudioR2", "DAC Right2"},
/* outputs */
- {"OUTL", NULL, "Analog L2 Playback Mixer"},
- {"OUTR", NULL, "Analog R2 Playback Mixer"},
+ /* Must be always connected (for AIF and APLL) */
+ {"Virtual HiFi OUT", NULL, "Digital L1 Playback Mixer"},
+ {"Virtual HiFi OUT", NULL, "Digital R1 Playback Mixer"},
+ {"Virtual HiFi OUT", NULL, "Digital L2 Playback Mixer"},
+ {"Virtual HiFi OUT", NULL, "Digital R2 Playback Mixer"},
+ /* Must be always connected (for APLL) */
+ {"Virtual Voice OUT", NULL, "Digital Voice Playback Mixer"},
+ /* Physical outputs */
{"EARPIECE", NULL, "Earpiece PGA"},
{"PREDRIVEL", NULL, "PredriveL PGA"},
{"PREDRIVER", NULL, "PredriveR PGA"},
@@ -1426,6 +1499,12 @@ static const struct snd_soc_dapm_route intercon[] = {
{"VIBRA", NULL, "Vibra Route"},
/* Capture path */
+ /* Must be always connected (for AIF and APLL) */
+ {"ADC Virtual Left1", NULL, "Virtual HiFi IN"},
+ {"ADC Virtual Right1", NULL, "Virtual HiFi IN"},
+ {"ADC Virtual Left2", NULL, "Virtual HiFi IN"},
+ {"ADC Virtual Right2", NULL, "Virtual HiFi IN"},
+ /* Physical inputs */
{"Analog Left", "Main Mic Capture Switch", "MAINMIC"},
{"Analog Left", "Headset Mic Capture Switch", "HSMIC"},
{"Analog Left", "AUXL Capture Switch", "AUXL"},
@@ -1458,11 +1537,6 @@ static const struct snd_soc_dapm_route intercon[] = {
{"ADC Virtual Left2", NULL, "TX2 Capture Route"},
{"ADC Virtual Right2", NULL, "TX2 Capture Route"},
- {"ADC Virtual Left1", NULL, "APLL Enable"},
- {"ADC Virtual Right1", NULL, "APLL Enable"},
- {"ADC Virtual Left2", NULL, "APLL Enable"},
- {"ADC Virtual Right2", NULL, "APLL Enable"},
-
{"ADC Virtual Left1", NULL, "AIF Enable"},
{"ADC Virtual Right1", NULL, "AIF Enable"},
{"ADC Virtual Left2", NULL, "AIF Enable"},
@@ -1588,7 +1662,7 @@ static int twl4030_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
if (twl4030->master_substream) {
twl4030->slave_substream = substream;
@@ -1619,7 +1693,7 @@ static void twl4030_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
if (twl4030->master_substream == substream)
twl4030->master_substream = twl4030->slave_substream;
@@ -1645,7 +1719,7 @@ static int twl4030_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
u8 mode, old_mode, format, old_format;
/* If the substream has 4 channel, do the necessary setup */
@@ -1765,7 +1839,7 @@ static int twl4030_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 19200000:
@@ -1880,7 +1954,7 @@ static int twl4030_voice_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
u8 mode;
/* If the system master clock is not 26MHz, the voice PCM interface is
@@ -1962,7 +2036,7 @@ static int twl4030_voice_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct twl4030_priv *twl4030 = codec->private_data;
+ struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
if (freq != 26000000) {
dev_err(codec->dev, "Unsupported APLL mclk: %u, the Voice"
@@ -2108,7 +2182,6 @@ static int twl4030_soc_resume(struct platform_device *pdev)
struct snd_soc_codec *codec = socdev->card->codec;
twl4030_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- twl4030_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
@@ -2125,7 +2198,7 @@ static int twl4030_soc_probe(struct platform_device *pdev)
BUG_ON(!twl4030_codec);
codec = twl4030_codec;
- twl4030 = codec->private_data;
+ twl4030 = snd_soc_codec_get_drvdata(codec);
socdev->card->codec = codec;
/* Configuration for headset ramp delay from setup data */
@@ -2188,7 +2261,7 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
}
codec = &twl4030->codec;
- codec->private_data = twl4030;
+ snd_soc_codec_set_drvdata(codec, twl4030);
codec->dev = &pdev->dev;
twl4030_dai[0].dev = &pdev->dev;
twl4030_dai[1].dev = &pdev->dev;
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
new file mode 100644
index 000000000000..2ae442edeb9a
--- /dev/null
+++ b/sound/soc/codecs/twl6040.c
@@ -0,0 +1,1227 @@
+/*
+ * ALSA SoC TWL6040 codec driver
+ *
+ * Author: Misael Lopez Cruz <x0052729@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/i2c/twl.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "twl6040.h"
+
+#define TWL6040_RATES (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+#define TWL6040_FORMATS (SNDRV_PCM_FMTBIT_S32_LE)
+
+/* codec private data */
+struct twl6040_data {
+ struct snd_soc_codec codec;
+ int audpwron;
+ int naudint;
+ int codec_powered;
+ int pll;
+ int non_lp;
+ unsigned int sysclk;
+ struct snd_pcm_hw_constraint_list *sysclk_constraints;
+ struct completion ready;
+};
+
+/*
+ * twl6040 register cache & default register settings
+ */
+static const u8 twl6040_reg[TWL6040_CACHEREGNUM] = {
+ 0x00, /* not used 0x00 */
+ 0x4B, /* TWL6040_ASICID (ro) 0x01 */
+ 0x00, /* TWL6040_ASICREV (ro) 0x02 */
+ 0x00, /* TWL6040_INTID 0x03 */
+ 0x00, /* TWL6040_INTMR 0x04 */
+ 0x00, /* TWL6040_NCPCTRL 0x05 */
+ 0x00, /* TWL6040_LDOCTL 0x06 */
+ 0x60, /* TWL6040_HPPLLCTL 0x07 */
+ 0x00, /* TWL6040_LPPLLCTL 0x08 */
+ 0x4A, /* TWL6040_LPPLLDIV 0x09 */
+ 0x00, /* TWL6040_AMICBCTL 0x0A */
+ 0x00, /* TWL6040_DMICBCTL 0x0B */
+ 0x18, /* TWL6040_MICLCTL 0x0C - No input selected on Left Mic */
+ 0x18, /* TWL6040_MICRCTL 0x0D - No input selected on Right Mic */
+ 0x00, /* TWL6040_MICGAIN 0x0E */
+ 0x1B, /* TWL6040_LINEGAIN 0x0F */
+ 0x00, /* TWL6040_HSLCTL 0x10 */
+ 0x00, /* TWL6040_HSRCTL 0x11 */
+ 0x00, /* TWL6040_HSGAIN 0x12 */
+ 0x00, /* TWL6040_EARCTL 0x13 */
+ 0x00, /* TWL6040_HFLCTL 0x14 */
+ 0x00, /* TWL6040_HFLGAIN 0x15 */
+ 0x00, /* TWL6040_HFRCTL 0x16 */
+ 0x00, /* TWL6040_HFRGAIN 0x17 */
+ 0x00, /* TWL6040_VIBCTLL 0x18 */
+ 0x00, /* TWL6040_VIBDATL 0x19 */
+ 0x00, /* TWL6040_VIBCTLR 0x1A */
+ 0x00, /* TWL6040_VIBDATR 0x1B */
+ 0x00, /* TWL6040_HKCTL1 0x1C */
+ 0x00, /* TWL6040_HKCTL2 0x1D */
+ 0x00, /* TWL6040_GPOCTL 0x1E */
+ 0x00, /* TWL6040_ALB 0x1F */
+ 0x00, /* TWL6040_DLB 0x20 */
+ 0x00, /* not used 0x21 */
+ 0x00, /* not used 0x22 */
+ 0x00, /* not used 0x23 */
+ 0x00, /* not used 0x24 */
+ 0x00, /* not used 0x25 */
+ 0x00, /* not used 0x26 */
+ 0x00, /* not used 0x27 */
+ 0x00, /* TWL6040_TRIM1 0x28 */
+ 0x00, /* TWL6040_TRIM2 0x29 */
+ 0x00, /* TWL6040_TRIM3 0x2A */
+ 0x00, /* TWL6040_HSOTRIM 0x2B */
+ 0x00, /* TWL6040_HFOTRIM 0x2C */
+ 0x09, /* TWL6040_ACCCTL 0x2D */
+ 0x00, /* TWL6040_STATUS (ro) 0x2E */
+};
+
+/*
+ * twl6040 vio/gnd registers:
+ * registers under vio/gnd supply can be accessed
+ * before the power-up sequence, after NRESPWRON goes high
+ */
+static const int twl6040_vio_reg[TWL6040_VIOREGNUM] = {
+ TWL6040_REG_ASICID,
+ TWL6040_REG_ASICREV,
+ TWL6040_REG_INTID,
+ TWL6040_REG_INTMR,
+ TWL6040_REG_NCPCTL,
+ TWL6040_REG_LDOCTL,
+ TWL6040_REG_AMICBCTL,
+ TWL6040_REG_DMICBCTL,
+ TWL6040_REG_HKCTL1,
+ TWL6040_REG_HKCTL2,
+ TWL6040_REG_GPOCTL,
+ TWL6040_REG_TRIM1,
+ TWL6040_REG_TRIM2,
+ TWL6040_REG_TRIM3,
+ TWL6040_REG_HSOTRIM,
+ TWL6040_REG_HFOTRIM,
+ TWL6040_REG_ACCCTL,
+ TWL6040_REG_STATUS,
+};
+
+/*
+ * twl6040 vdd/vss registers:
+ * registers under vdd/vss supplies can only be accessed
+ * after the power-up sequence
+ */
+static const int twl6040_vdd_reg[TWL6040_VDDREGNUM] = {
+ TWL6040_REG_HPPLLCTL,
+ TWL6040_REG_LPPLLCTL,
+ TWL6040_REG_LPPLLDIV,
+ TWL6040_REG_MICLCTL,
+ TWL6040_REG_MICRCTL,
+ TWL6040_REG_MICGAIN,
+ TWL6040_REG_LINEGAIN,
+ TWL6040_REG_HSLCTL,
+ TWL6040_REG_HSRCTL,
+ TWL6040_REG_HSGAIN,
+ TWL6040_REG_EARCTL,
+ TWL6040_REG_HFLCTL,
+ TWL6040_REG_HFLGAIN,
+ TWL6040_REG_HFRCTL,
+ TWL6040_REG_HFRGAIN,
+ TWL6040_REG_VIBCTLL,
+ TWL6040_REG_VIBDATL,
+ TWL6040_REG_VIBCTLR,
+ TWL6040_REG_VIBDATR,
+ TWL6040_REG_ALB,
+ TWL6040_REG_DLB,
+};
+
+/*
+ * read twl6040 register cache
+ */
+static inline unsigned int twl6040_read_reg_cache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u8 *cache = codec->reg_cache;
+
+ if (reg >= TWL6040_CACHEREGNUM)
+ return -EIO;
+
+ return cache[reg];
+}
+
+/*
+ * write twl6040 register cache
+ */
+static inline void twl6040_write_reg_cache(struct snd_soc_codec *codec,
+ u8 reg, u8 value)
+{
+ u8 *cache = codec->reg_cache;
+
+ if (reg >= TWL6040_CACHEREGNUM)
+ return;
+ cache[reg] = value;
+}
+
+/*
+ * read from twl6040 hardware register
+ */
+static int twl6040_read_reg_volatile(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u8 value;
+
+ if (reg >= TWL6040_CACHEREGNUM)
+ return -EIO;
+
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &value, reg);
+ twl6040_write_reg_cache(codec, reg, value);
+
+ return value;
+}
+
+/*
+ * write to the twl6040 register space
+ */
+static int twl6040_write(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int value)
+{
+ if (reg >= TWL6040_CACHEREGNUM)
+ return -EIO;
+
+ twl6040_write_reg_cache(codec, reg, value);
+ return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value, reg);
+}
+
+static void twl6040_init_vio_regs(struct snd_soc_codec *codec)
+{
+ u8 *cache = codec->reg_cache;
+ int reg, i;
+
+ /* allow registers to be accessed by i2c */
+ twl6040_write(codec, TWL6040_REG_ACCCTL, cache[TWL6040_REG_ACCCTL]);
+
+ for (i = 0; i < TWL6040_VIOREGNUM; i++) {
+ reg = twl6040_vio_reg[i];
+ /* skip read-only registers (ASICID, ASICREV, STATUS) */
+ switch (reg) {
+ case TWL6040_REG_ASICID:
+ case TWL6040_REG_ASICREV:
+ case TWL6040_REG_STATUS:
+ continue;
+ default:
+ break;
+ }
+ twl6040_write(codec, reg, cache[reg]);
+ }
+}
+
+static void twl6040_init_vdd_regs(struct snd_soc_codec *codec)
+{
+ u8 *cache = codec->reg_cache;
+ int reg, i;
+
+ for (i = 0; i < TWL6040_VDDREGNUM; i++) {
+ reg = twl6040_vdd_reg[i];
+ twl6040_write(codec, reg, cache[reg]);
+ }
+}
+
+/* twl6040 codec manual power-up sequence */
+static void twl6040_power_up(struct snd_soc_codec *codec)
+{
+ u8 ncpctl, ldoctl, lppllctl, accctl;
+
+ ncpctl = twl6040_read_reg_cache(codec, TWL6040_REG_NCPCTL);
+ ldoctl = twl6040_read_reg_cache(codec, TWL6040_REG_LDOCTL);
+ lppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_LPPLLCTL);
+ accctl = twl6040_read_reg_cache(codec, TWL6040_REG_ACCCTL);
+
+ /* enable reference system */
+ ldoctl |= TWL6040_REFENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ msleep(10);
+ /* enable internal oscillator */
+ ldoctl |= TWL6040_OSCENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(10);
+ /* enable high-side ldo */
+ ldoctl |= TWL6040_HSLDOENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(244);
+ /* enable negative charge pump */
+ ncpctl |= TWL6040_NCPENA | TWL6040_NCPOPEN;
+ twl6040_write(codec, TWL6040_REG_NCPCTL, ncpctl);
+ udelay(488);
+ /* enable low-side ldo */
+ ldoctl |= TWL6040_LSLDOENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(244);
+ /* enable low-power pll */
+ lppllctl |= TWL6040_LPLLENA;
+ twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+ /* reset state machine */
+ accctl |= TWL6040_RESETSPLIT;
+ twl6040_write(codec, TWL6040_REG_ACCCTL, accctl);
+ mdelay(5);
+ accctl &= ~TWL6040_RESETSPLIT;
+ twl6040_write(codec, TWL6040_REG_ACCCTL, accctl);
+ /* disable internal oscillator */
+ ldoctl &= ~TWL6040_OSCENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+}
+
+/* twl6040 codec manual power-down sequence */
+static void twl6040_power_down(struct snd_soc_codec *codec)
+{
+ u8 ncpctl, ldoctl, lppllctl, accctl;
+
+ ncpctl = twl6040_read_reg_cache(codec, TWL6040_REG_NCPCTL);
+ ldoctl = twl6040_read_reg_cache(codec, TWL6040_REG_LDOCTL);
+ lppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_LPPLLCTL);
+ accctl = twl6040_read_reg_cache(codec, TWL6040_REG_ACCCTL);
+
+ /* enable internal oscillator */
+ ldoctl |= TWL6040_OSCENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(10);
+ /* disable low-power pll */
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+ /* disable low-side ldo */
+ ldoctl &= ~TWL6040_LSLDOENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(244);
+ /* disable negative charge pump */
+ ncpctl &= ~(TWL6040_NCPENA | TWL6040_NCPOPEN);
+ twl6040_write(codec, TWL6040_REG_NCPCTL, ncpctl);
+ udelay(488);
+ /* disable high-side ldo */
+ ldoctl &= ~TWL6040_HSLDOENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ udelay(244);
+ /* disable internal oscillator */
+ ldoctl &= ~TWL6040_OSCENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ /* disable reference system */
+ ldoctl &= ~TWL6040_REFENA;
+ twl6040_write(codec, TWL6040_REG_LDOCTL, ldoctl);
+ msleep(10);
+}
+
+/* set headset dac and driver power mode */
+static int headset_power_mode(struct snd_soc_codec *codec, int high_perf)
+{
+ int hslctl, hsrctl;
+ int mask = TWL6040_HSDRVMODEL | TWL6040_HSDACMODEL;
+
+ hslctl = twl6040_read_reg_cache(codec, TWL6040_REG_HSLCTL);
+ hsrctl = twl6040_read_reg_cache(codec, TWL6040_REG_HSRCTL);
+
+ if (high_perf) {
+ hslctl &= ~mask;
+ hsrctl &= ~mask;
+ } else {
+ hslctl |= mask;
+ hsrctl |= mask;
+ }
+
+ twl6040_write(codec, TWL6040_REG_HSLCTL, hslctl);
+ twl6040_write(codec, TWL6040_REG_HSRCTL, hsrctl);
+
+ return 0;
+}
+
+static int twl6040_power_mode_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ priv->non_lp++;
+ else
+ priv->non_lp--;
+
+ return 0;
+}
+
+/* audio interrupt handler */
+static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+{
+ struct snd_soc_codec *codec = data;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ u8 intid;
+
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID);
+
+ switch (intid) {
+ case TWL6040_THINT:
+ dev_alert(codec->dev, "die temp over-limit detection\n");
+ break;
+ case TWL6040_PLUGINT:
+ case TWL6040_UNPLUGINT:
+ case TWL6040_HOOKINT:
+ break;
+ case TWL6040_HFINT:
+ dev_alert(codec->dev, "hf drivers over current detection\n");
+ break;
+ case TWL6040_VIBINT:
+ dev_alert(codec->dev, "vib drivers over current detection\n");
+ break;
+ case TWL6040_READYINT:
+ complete(&priv->ready);
+ break;
+ default:
+ dev_err(codec->dev, "unknown audio interrupt %d\n", intid);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * MICATT volume control:
+ * from -6 to 0 dB in 6 dB steps
+ */
+static DECLARE_TLV_DB_SCALE(mic_preamp_tlv, -600, 600, 0);
+
+/*
+ * MICGAIN volume control:
+ * from 6 to 30 dB in 6 dB steps
+ */
+static DECLARE_TLV_DB_SCALE(mic_amp_tlv, 600, 600, 0);
+
+/*
+ * HSGAIN volume control:
+ * from -30 to 0 dB in 2 dB steps
+ */
+static DECLARE_TLV_DB_SCALE(hs_tlv, -3000, 200, 0);
+
+/*
+ * HFGAIN volume control:
+ * from -52 to 6 dB in 2 dB steps
+ */
+static DECLARE_TLV_DB_SCALE(hf_tlv, -5200, 200, 0);
+
+/* Left analog microphone selection */
+static const char *twl6040_amicl_texts[] =
+ {"Headset Mic", "Main Mic", "Aux/FM Left", "Off"};
+
+/* Right analog microphone selection */
+static const char *twl6040_amicr_texts[] =
+ {"Headset Mic", "Sub Mic", "Aux/FM Right", "Off"};
+
+static const struct soc_enum twl6040_enum[] = {
+ SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 3, twl6040_amicl_texts),
+ SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 3, twl6040_amicr_texts),
+};
+
+static const struct snd_kcontrol_new amicl_control =
+ SOC_DAPM_ENUM("Route", twl6040_enum[0]);
+
+static const struct snd_kcontrol_new amicr_control =
+ SOC_DAPM_ENUM("Route", twl6040_enum[1]);
+
+/* Headset DAC playback switches */
+static const struct snd_kcontrol_new hsdacl_switch_controls =
+ SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSLCTL, 5, 1, 0);
+
+static const struct snd_kcontrol_new hsdacr_switch_controls =
+ SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSRCTL, 5, 1, 0);
+
+/* Handsfree DAC playback switches */
+static const struct snd_kcontrol_new hfdacl_switch_controls =
+ SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFLCTL, 2, 1, 0);
+
+static const struct snd_kcontrol_new hfdacr_switch_controls =
+ SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFRCTL, 2, 1, 0);
+
+/* Headset driver switches */
+static const struct snd_kcontrol_new hsl_driver_switch_controls =
+ SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSLCTL, 2, 1, 0);
+
+static const struct snd_kcontrol_new hsr_driver_switch_controls =
+ SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSRCTL, 2, 1, 0);
+
+/* Handsfree driver switches */
+static const struct snd_kcontrol_new hfl_driver_switch_controls =
+ SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFLCTL, 4, 1, 0);
+
+static const struct snd_kcontrol_new hfr_driver_switch_controls =
+ SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFRCTL, 4, 1, 0);
+
+static const struct snd_kcontrol_new twl6040_snd_controls[] = {
+ /* Capture gains */
+ SOC_DOUBLE_TLV("Capture Preamplifier Volume",
+ TWL6040_REG_MICGAIN, 6, 7, 1, 1, mic_preamp_tlv),
+ SOC_DOUBLE_TLV("Capture Volume",
+ TWL6040_REG_MICGAIN, 0, 3, 4, 0, mic_amp_tlv),
+
+ /* Playback gains */
+ SOC_DOUBLE_TLV("Headset Playback Volume",
+ TWL6040_REG_HSGAIN, 0, 4, 0xF, 1, hs_tlv),
+ SOC_DOUBLE_R_TLV("Handsfree Playback Volume",
+ TWL6040_REG_HFLGAIN, TWL6040_REG_HFRGAIN, 0, 0x1D, 1, hf_tlv),
+
+};
+
+static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = {
+ /* Inputs */
+ SND_SOC_DAPM_INPUT("MAINMIC"),
+ SND_SOC_DAPM_INPUT("HSMIC"),
+ SND_SOC_DAPM_INPUT("SUBMIC"),
+ SND_SOC_DAPM_INPUT("AFML"),
+ SND_SOC_DAPM_INPUT("AFMR"),
+
+ /* Outputs */
+ SND_SOC_DAPM_OUTPUT("HSOL"),
+ SND_SOC_DAPM_OUTPUT("HSOR"),
+ SND_SOC_DAPM_OUTPUT("HFL"),
+ SND_SOC_DAPM_OUTPUT("HFR"),
+
+ /* Analog input muxes for the capture amplifiers */
+ SND_SOC_DAPM_MUX("Analog Left Capture Route",
+ SND_SOC_NOPM, 0, 0, &amicl_control),
+ SND_SOC_DAPM_MUX("Analog Right Capture Route",
+ SND_SOC_NOPM, 0, 0, &amicr_control),
+
+ /* Analog capture PGAs */
+ SND_SOC_DAPM_PGA("MicAmpL",
+ TWL6040_REG_MICLCTL, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MicAmpR",
+ TWL6040_REG_MICRCTL, 0, 0, NULL, 0),
+
+ /* ADCs */
+ SND_SOC_DAPM_ADC("ADC Left", "Left Front Capture",
+ TWL6040_REG_MICLCTL, 2, 0),
+ SND_SOC_DAPM_ADC("ADC Right", "Right Front Capture",
+ TWL6040_REG_MICRCTL, 2, 0),
+
+ /* Microphone bias */
+ SND_SOC_DAPM_MICBIAS("Headset Mic Bias",
+ TWL6040_REG_AMICBCTL, 0, 0),
+ SND_SOC_DAPM_MICBIAS("Main Mic Bias",
+ TWL6040_REG_AMICBCTL, 4, 0),
+ SND_SOC_DAPM_MICBIAS("Digital Mic1 Bias",
+ TWL6040_REG_DMICBCTL, 0, 0),
+ SND_SOC_DAPM_MICBIAS("Digital Mic2 Bias",
+ TWL6040_REG_DMICBCTL, 4, 0),
+
+ /* DACs */
+ SND_SOC_DAPM_DAC("HSDAC Left", "Headset Playback",
+ TWL6040_REG_HSLCTL, 0, 0),
+ SND_SOC_DAPM_DAC("HSDAC Right", "Headset Playback",
+ TWL6040_REG_HSRCTL, 0, 0),
+ SND_SOC_DAPM_DAC_E("HFDAC Left", "Handsfree Playback",
+ TWL6040_REG_HFLCTL, 0, 0,
+ twl6040_power_mode_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("HFDAC Right", "Handsfree Playback",
+ TWL6040_REG_HFRCTL, 0, 0,
+ twl6040_power_mode_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* Analog playback switches */
+ SND_SOC_DAPM_SWITCH("HSDAC Left Playback",
+ SND_SOC_NOPM, 0, 0, &hsdacl_switch_controls),
+ SND_SOC_DAPM_SWITCH("HSDAC Right Playback",
+ SND_SOC_NOPM, 0, 0, &hsdacr_switch_controls),
+ SND_SOC_DAPM_SWITCH("HFDAC Left Playback",
+ SND_SOC_NOPM, 0, 0, &hfdacl_switch_controls),
+ SND_SOC_DAPM_SWITCH("HFDAC Right Playback",
+ SND_SOC_NOPM, 0, 0, &hfdacr_switch_controls),
+
+ SND_SOC_DAPM_SWITCH("Headset Left Driver",
+ SND_SOC_NOPM, 0, 0, &hsl_driver_switch_controls),
+ SND_SOC_DAPM_SWITCH("Headset Right Driver",
+ SND_SOC_NOPM, 0, 0, &hsr_driver_switch_controls),
+ SND_SOC_DAPM_SWITCH_E("Handsfree Left Driver",
+ SND_SOC_NOPM, 0, 0, &hfl_driver_switch_controls,
+ twl6040_power_mode_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SWITCH_E("Handsfree Right Driver",
+ SND_SOC_NOPM, 0, 0, &hfr_driver_switch_controls,
+ twl6040_power_mode_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* Analog playback PGAs */
+ SND_SOC_DAPM_PGA("HFDAC Left PGA",
+ TWL6040_REG_HFLCTL, 1, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HFDAC Right PGA",
+ TWL6040_REG_HFRCTL, 1, 0, NULL, 0),
+
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+ /* Capture path */
+ {"Analog Left Capture Route", "Headset Mic", "HSMIC"},
+ {"Analog Left Capture Route", "Main Mic", "MAINMIC"},
+ {"Analog Left Capture Route", "Aux/FM Left", "AFML"},
+
+ {"Analog Right Capture Route", "Headset Mic", "HSMIC"},
+ {"Analog Right Capture Route", "Sub Mic", "SUBMIC"},
+ {"Analog Right Capture Route", "Aux/FM Right", "AFMR"},
+
+ {"MicAmpL", NULL, "Analog Left Capture Route"},
+ {"MicAmpR", NULL, "Analog Right Capture Route"},
+
+ {"ADC Left", NULL, "MicAmpL"},
+ {"ADC Right", NULL, "MicAmpR"},
+
+ /* Headset playback path */
+ {"HSDAC Left Playback", "Switch", "HSDAC Left"},
+ {"HSDAC Right Playback", "Switch", "HSDAC Right"},
+
+ {"Headset Left Driver", "Switch", "HSDAC Left Playback"},
+ {"Headset Right Driver", "Switch", "HSDAC Right Playback"},
+
+ {"HSOL", NULL, "Headset Left Driver"},
+ {"HSOR", NULL, "Headset Right Driver"},
+
+ /* Handsfree playback path */
+ {"HFDAC Left Playback", "Switch", "HFDAC Left"},
+ {"HFDAC Right Playback", "Switch", "HFDAC Right"},
+
+ {"HFDAC Left PGA", NULL, "HFDAC Left Playback"},
+ {"HFDAC Right PGA", NULL, "HFDAC Right Playback"},
+
+ {"Handsfree Left Driver", "Switch", "HFDAC Left PGA"},
+ {"Handsfree Right Driver", "Switch", "HFDAC Right PGA"},
+
+ {"HFL", NULL, "Handsfree Left Driver"},
+ {"HFR", NULL, "Handsfree Right Driver"},
+};
+
+static int twl6040_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(codec, twl6040_dapm_widgets,
+ ARRAY_SIZE(twl6040_dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+
+ snd_soc_dapm_new_widgets(codec);
+
+ return 0;
+}
+
+static int twl6040_power_up_completion(struct snd_soc_codec *codec,
+ int naudint)
+{
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ int time_left;
+ u8 intid;
+
+ time_left = wait_for_completion_timeout(&priv->ready,
+ msecs_to_jiffies(48));
+
+ if (!time_left) {
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid,
+ TWL6040_REG_INTID);
+ if (!(intid & TWL6040_READYINT)) {
+ dev_err(codec->dev, "timeout waiting for READYINT\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ priv->codec_powered = 1;
+
+ return 0;
+}
+
+static int twl6040_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ int audpwron = priv->audpwron;
+ int naudint = priv->naudint;
+ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (priv->codec_powered)
+ break;
+
+ if (gpio_is_valid(audpwron)) {
+ /* use AUDPWRON line */
+ gpio_set_value(audpwron, 1);
+
+ /* wait for power-up completion */
+ ret = twl6040_power_up_completion(codec, naudint);
+ if (ret)
+ return ret;
+
+ /* sync registers updated during power-up sequence */
+ twl6040_read_reg_volatile(codec, TWL6040_REG_NCPCTL);
+ twl6040_read_reg_volatile(codec, TWL6040_REG_LDOCTL);
+ twl6040_read_reg_volatile(codec, TWL6040_REG_LPPLLCTL);
+ } else {
+ /* use manual power-up sequence */
+ twl6040_power_up(codec);
+ priv->codec_powered = 1;
+ }
+
+ /* initialize vdd/vss registers with reg_cache */
+ twl6040_init_vdd_regs(codec);
+ break;
+ case SND_SOC_BIAS_OFF:
+ if (!priv->codec_powered)
+ break;
+
+ if (gpio_is_valid(audpwron)) {
+ /* use AUDPWRON line */
+ gpio_set_value(audpwron, 0);
+
+ /* power-down sequence latency */
+ udelay(500);
+
+ /* sync registers updated during power-down sequence */
+ twl6040_read_reg_volatile(codec, TWL6040_REG_NCPCTL);
+ twl6040_read_reg_volatile(codec, TWL6040_REG_LDOCTL);
+ twl6040_write_reg_cache(codec, TWL6040_REG_LPPLLCTL,
+ 0x00);
+ } else {
+ /* use manual power-down sequence */
+ twl6040_power_down(codec);
+ }
+
+ priv->codec_powered = 0;
+ break;
+ }
+
+ codec->bias_level = level;
+
+ return 0;
+}
+
+/* set of rates for each pll: low-power and high-performance */
+
+static unsigned int lp_rates[] = {
+ 88200,
+ 96000,
+};
+
+static struct snd_pcm_hw_constraint_list lp_constraints = {
+ .count = ARRAY_SIZE(lp_rates),
+ .list = lp_rates,
+};
+
+static unsigned int hp_rates[] = {
+ 96000,
+};
+
+static struct snd_pcm_hw_constraint_list hp_constraints = {
+ .count = ARRAY_SIZE(hp_rates),
+ .list = hp_rates,
+};
+
+static int twl6040_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+
+ if (!priv->sysclk) {
+ dev_err(codec->dev,
+ "no mclk configured, call set_sysclk() on init\n");
+ return -EINVAL;
+ }
+
+ /*
+ * capture is not supported at 17.64 MHz,
+ * it's reserved for headset low-power playback scenario
+ */
+ if ((priv->sysclk == 17640000) && substream->stream) {
+ dev_err(codec->dev,
+ "capture mode is not supported at %dHz\n",
+ priv->sysclk);
+ return -EINVAL;
+ }
+
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ priv->sysclk_constraints);
+
+ return 0;
+}
+
+static int twl6040_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ u8 lppllctl;
+ int rate;
+
+ /* nothing to do for high-perf pll, it supports only 48 kHz */
+ if (priv->pll == TWL6040_HPPLL_ID)
+ return 0;
+
+ lppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_LPPLLCTL);
+
+ rate = params_rate(params);
+ switch (rate) {
+ case 88200:
+ lppllctl |= TWL6040_LPLLFIN;
+ priv->sysclk = 17640000;
+ break;
+ case 96000:
+ lppllctl &= ~TWL6040_LPLLFIN;
+ priv->sysclk = 19200000;
+ break;
+ default:
+ dev_err(codec->dev, "unsupported rate %d\n", rate);
+ return -EINVAL;
+ }
+
+ twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ return 0;
+}
+
+static int twl6040_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /*
+ * low-power playback mode is restricted
+ * for headset path only
+ */
+ if ((priv->sysclk == 17640000) && priv->non_lp) {
+ dev_err(codec->dev,
+ "some enabled paths aren't supported at %dHz\n",
+ priv->sysclk);
+ return -EPERM;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int twl6040_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+ u8 hppllctl, lppllctl;
+
+ hppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_HPPLLCTL);
+ lppllctl = twl6040_read_reg_cache(codec, TWL6040_REG_LPPLLCTL);
+
+ switch (clk_id) {
+ case TWL6040_SYSCLK_SEL_LPPLL:
+ switch (freq) {
+ case 32768:
+ /* headset dac and driver must be in low-power mode */
+ headset_power_mode(codec, 0);
+
+ /* clk32k input requires low-power pll */
+ lppllctl |= TWL6040_LPLLENA;
+ twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+ mdelay(5);
+ lppllctl &= ~TWL6040_HPLLSEL;
+ twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+ hppllctl &= ~TWL6040_HPLLENA;
+ twl6040_write(codec, TWL6040_REG_HPPLLCTL, hppllctl);
+ break;
+ default:
+ dev_err(codec->dev, "unknown mclk freq %d\n", freq);
+ return -EINVAL;
+ }
+
+ /* lppll divider */
+ switch (priv->sysclk) {
+ case 17640000:
+ lppllctl |= TWL6040_LPLLFIN;
+ break;
+ case 19200000:
+ lppllctl &= ~TWL6040_LPLLFIN;
+ break;
+ default:
+ /* sysclk not yet configured */
+ lppllctl &= ~TWL6040_LPLLFIN;
+ priv->sysclk = 19200000;
+ break;
+ }
+
+ twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ priv->pll = TWL6040_LPPLL_ID;
+ priv->sysclk_constraints = &lp_constraints;
+ break;
+ case TWL6040_SYSCLK_SEL_HPPLL:
+ hppllctl &= ~TWL6040_MCLK_MSK;
+
+ switch (freq) {
+ case 12000000:
+ /* mclk input, pll enabled */
+ hppllctl |= TWL6040_MCLK_12000KHZ |
+ TWL6040_HPLLSQRBP |
+ TWL6040_HPLLENA;
+ break;
+ case 19200000:
+ /* mclk input, pll disabled */
+ hppllctl |= TWL6040_MCLK_19200KHZ |
+ TWL6040_HPLLSQRBP |
+ TWL6040_HPLLBP;
+ break;
+ case 26000000:
+ /* mclk input, pll enabled */
+ hppllctl |= TWL6040_MCLK_26000KHZ |
+ TWL6040_HPLLSQRBP |
+ TWL6040_HPLLENA;
+ break;
+ case 38400000:
+ /* clk slicer, pll disabled */
+ hppllctl |= TWL6040_MCLK_38400KHZ |
+ TWL6040_HPLLSQRENA |
+ TWL6040_HPLLBP;
+ break;
+ default:
+ dev_err(codec->dev, "unknown mclk freq %d\n", freq);
+ return -EINVAL;
+ }
+
+ /* headset dac and driver must be in high-performance mode */
+ headset_power_mode(codec, 1);
+
+ twl6040_write(codec, TWL6040_REG_HPPLLCTL, hppllctl);
+ udelay(500);
+ lppllctl |= TWL6040_HPLLSEL;
+ twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_write(codec, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ /* high-performance pll can provide only 19.2 MHz */
+ priv->pll = TWL6040_HPPLL_ID;
+ priv->sysclk = 19200000;
+ priv->sysclk_constraints = &hp_constraints;
+ break;
+ default:
+ dev_err(codec->dev, "unknown clk_id %d\n", clk_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops twl6040_dai_ops = {
+ .startup = twl6040_startup,
+ .hw_params = twl6040_hw_params,
+ .trigger = twl6040_trigger,
+ .set_sysclk = twl6040_set_dai_sysclk,
+};
+
+struct snd_soc_dai twl6040_dai = {
+ .name = "twl6040",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = TWL6040_RATES,
+ .formats = TWL6040_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = TWL6040_RATES,
+ .formats = TWL6040_FORMATS,
+ },
+ .ops = &twl6040_dai_ops,
+};
+EXPORT_SYMBOL_GPL(twl6040_dai);
+
+#ifdef CONFIG_PM
+static int twl6040_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ twl6040_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int twl6040_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define twl6040_suspend NULL
+#define twl6040_resume NULL
+#endif
+
+static struct snd_soc_codec *twl6040_codec;
+
+static int twl6040_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ BUG_ON(!twl6040_codec);
+
+ codec = twl6040_codec;
+ socdev->card->codec = codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create pcms\n");
+ return ret;
+ }
+
+ snd_soc_add_controls(codec, twl6040_snd_controls,
+ ARRAY_SIZE(twl6040_snd_controls));
+ twl6040_add_widgets(codec);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register card\n");
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+ return ret;
+}
+
+static int twl6040_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ twl6040_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+ kfree(codec);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_twl6040 = {
+ .probe = twl6040_probe,
+ .remove = twl6040_remove,
+ .suspend = twl6040_suspend,
+ .resume = twl6040_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_twl6040);
+
+static int __devinit twl6040_codec_probe(struct platform_device *pdev)
+{
+ struct twl4030_codec_data *twl_codec = pdev->dev.platform_data;
+ struct snd_soc_codec *codec;
+ struct twl6040_data *priv;
+ int audpwron, naudint;
+ int ret = 0;
+
+ priv = kzalloc(sizeof(struct twl6040_data), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ if (twl_codec) {
+ audpwron = twl_codec->audpwron_gpio;
+ naudint = twl_codec->naudint_irq;
+ } else {
+ audpwron = -EINVAL;
+ naudint = 0;
+ }
+
+ priv->audpwron = audpwron;
+ priv->naudint = naudint;
+
+ codec = &priv->codec;
+ codec->dev = &pdev->dev;
+ twl6040_dai.dev = &pdev->dev;
+
+ codec->name = "twl6040";
+ codec->owner = THIS_MODULE;
+ codec->read = twl6040_read_reg_cache;
+ codec->write = twl6040_write;
+ codec->set_bias_level = twl6040_set_bias_level;
+ snd_soc_codec_set_drvdata(codec, priv);
+ codec->dai = &twl6040_dai;
+ codec->num_dai = 1;
+ codec->reg_cache_size = ARRAY_SIZE(twl6040_reg);
+ codec->reg_cache = kmemdup(twl6040_reg, sizeof(twl6040_reg),
+ GFP_KERNEL);
+ if (codec->reg_cache == NULL) {
+ ret = -ENOMEM;
+ goto cache_err;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+ init_completion(&priv->ready);
+
+ if (gpio_is_valid(audpwron)) {
+ ret = gpio_request(audpwron, "audpwron");
+ if (ret)
+ goto gpio1_err;
+
+ ret = gpio_direction_output(audpwron, 0);
+ if (ret)
+ goto gpio2_err;
+
+ priv->codec_powered = 0;
+ }
+
+ if (naudint) {
+ /* audio interrupt */
+ ret = request_threaded_irq(naudint, NULL,
+ twl6040_naudint_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "twl6040_codec", codec);
+ if (ret)
+ goto gpio2_err;
+ } else {
+ if (gpio_is_valid(audpwron)) {
+ /* enable only codec ready interrupt */
+ twl6040_write_reg_cache(codec, TWL6040_REG_INTMR,
+ ~TWL6040_READYMSK & TWL6040_ALLINT_MSK);
+ } else {
+ /* no interrupts at all */
+ twl6040_write_reg_cache(codec, TWL6040_REG_INTMR,
+ TWL6040_ALLINT_MSK);
+ }
+ }
+
+ /* init vio registers */
+ twl6040_init_vio_regs(codec);
+
+ /* power on device */
+ ret = twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ if (ret)
+ goto irq_err;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret)
+ goto reg_err;
+
+ twl6040_codec = codec;
+
+ ret = snd_soc_register_dai(&twl6040_dai);
+ if (ret)
+ goto dai_err;
+
+ return 0;
+
+dai_err:
+ snd_soc_unregister_codec(codec);
+ twl6040_codec = NULL;
+reg_err:
+ twl6040_set_bias_level(codec, SND_SOC_BIAS_OFF);
+irq_err:
+ if (naudint)
+ free_irq(naudint, codec);
+gpio2_err:
+ if (gpio_is_valid(audpwron))
+ gpio_free(audpwron);
+gpio1_err:
+ kfree(codec->reg_cache);
+cache_err:
+ kfree(priv);
+ return ret;
+}
+
+static int __devexit twl6040_codec_remove(struct platform_device *pdev)
+{
+ struct twl6040_data *priv = snd_soc_codec_get_drvdata(twl6040_codec);
+ int audpwron = priv->audpwron;
+ int naudint = priv->naudint;
+
+ if (gpio_is_valid(audpwron))
+ gpio_free(audpwron);
+
+ if (naudint)
+ free_irq(naudint, twl6040_codec);
+
+ snd_soc_unregister_dai(&twl6040_dai);
+ snd_soc_unregister_codec(twl6040_codec);
+
+ kfree(twl6040_codec);
+ twl6040_codec = NULL;
+
+ return 0;
+}
+
+static struct platform_driver twl6040_codec_driver = {
+ .driver = {
+ .name = "twl6040_codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = twl6040_codec_probe,
+ .remove = __devexit_p(twl6040_codec_remove),
+};
+
+static int __init twl6040_codec_init(void)
+{
+ return platform_driver_register(&twl6040_codec_driver);
+}
+module_init(twl6040_codec_init);
+
+static void __exit twl6040_codec_exit(void)
+{
+ platform_driver_unregister(&twl6040_codec_driver);
+}
+module_exit(twl6040_codec_exit);
+
+MODULE_DESCRIPTION("ASoC TWL6040 codec driver");
+MODULE_AUTHOR("Misael Lopez Cruz");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/twl6040.h b/sound/soc/codecs/twl6040.h
new file mode 100644
index 000000000000..c472070a1da2
--- /dev/null
+++ b/sound/soc/codecs/twl6040.h
@@ -0,0 +1,141 @@
+/*
+ * ALSA SoC TWL6040 codec driver
+ *
+ * Author: Misael Lopez Cruz <x0052729@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TWL6040_H__
+#define __TWL6040_H__
+
+#define TWL6040_REG_ASICID 0x01
+#define TWL6040_REG_ASICREV 0x02
+#define TWL6040_REG_INTID 0x03
+#define TWL6040_REG_INTMR 0x04
+#define TWL6040_REG_NCPCTL 0x05
+#define TWL6040_REG_LDOCTL 0x06
+#define TWL6040_REG_HPPLLCTL 0x07
+#define TWL6040_REG_LPPLLCTL 0x08
+#define TWL6040_REG_LPPLLDIV 0x09
+#define TWL6040_REG_AMICBCTL 0x0A
+#define TWL6040_REG_DMICBCTL 0x0B
+#define TWL6040_REG_MICLCTL 0x0C
+#define TWL6040_REG_MICRCTL 0x0D
+#define TWL6040_REG_MICGAIN 0x0E
+#define TWL6040_REG_LINEGAIN 0x0F
+#define TWL6040_REG_HSLCTL 0x10
+#define TWL6040_REG_HSRCTL 0x11
+#define TWL6040_REG_HSGAIN 0x12
+#define TWL6040_REG_EARCTL 0x13
+#define TWL6040_REG_HFLCTL 0x14
+#define TWL6040_REG_HFLGAIN 0x15
+#define TWL6040_REG_HFRCTL 0x16
+#define TWL6040_REG_HFRGAIN 0x17
+#define TWL6040_REG_VIBCTLL 0x18
+#define TWL6040_REG_VIBDATL 0x19
+#define TWL6040_REG_VIBCTLR 0x1A
+#define TWL6040_REG_VIBDATR 0x1B
+#define TWL6040_REG_HKCTL1 0x1C
+#define TWL6040_REG_HKCTL2 0x1D
+#define TWL6040_REG_GPOCTL 0x1E
+#define TWL6040_REG_ALB 0x1F
+#define TWL6040_REG_DLB 0x20
+#define TWL6040_REG_TRIM1 0x28
+#define TWL6040_REG_TRIM2 0x29
+#define TWL6040_REG_TRIM3 0x2A
+#define TWL6040_REG_HSOTRIM 0x2B
+#define TWL6040_REG_HFOTRIM 0x2C
+#define TWL6040_REG_ACCCTL 0x2D
+#define TWL6040_REG_STATUS 0x2E
+
+#define TWL6040_CACHEREGNUM (TWL6040_REG_STATUS + 1)
+
+#define TWL6040_VIOREGNUM 18
+#define TWL6040_VDDREGNUM 21
+
+/* INTID (0x03) fields */
+
+#define TWL6040_THINT 0x01
+#define TWL6040_PLUGINT 0x02
+#define TWL6040_UNPLUGINT 0x04
+#define TWL6040_HOOKINT 0x08
+#define TWL6040_HFINT 0x10
+#define TWL6040_VIBINT 0x20
+#define TWL6040_READYINT 0x40
+
+/* INTMR (0x04) fields */
+
+#define TWL6040_READYMSK 0x40
+#define TWL6040_ALLINT_MSK 0x7B
+
+/* NCPCTL (0x05) fields */
+
+#define TWL6040_NCPENA 0x01
+#define TWL6040_NCPOPEN 0x40
+
+/* LDOCTL (0x06) fields */
+
+#define TWL6040_LSLDOENA 0x01
+#define TWL6040_HSLDOENA 0x04
+#define TWL6040_REFENA 0x40
+#define TWL6040_OSCENA 0x80
+
+/* HPPLLCTL (0x07) fields */
+
+#define TWL6040_HPLLENA 0x01
+#define TWL6040_HPLLRST 0x02
+#define TWL6040_HPLLBP 0x04
+#define TWL6040_HPLLSQRENA 0x08
+#define TWL6040_HPLLSQRBP 0x10
+#define TWL6040_MCLK_12000KHZ (0 << 5)
+#define TWL6040_MCLK_19200KHZ (1 << 5)
+#define TWL6040_MCLK_26000KHZ (2 << 5)
+#define TWL6040_MCLK_38400KHZ (3 << 5)
+#define TWL6040_MCLK_MSK 0x60
+
+/* LPPLLCTL (0x08) fields */
+
+#define TWL6040_LPLLENA 0x01
+#define TWL6040_LPLLRST 0x02
+#define TWL6040_LPLLSEL 0x04
+#define TWL6040_LPLLFIN 0x08
+#define TWL6040_HPLLSEL 0x10
+
+/* HSLCTL (0x10) fields */
+
+#define TWL6040_HSDACMODEL 0x02
+#define TWL6040_HSDRVMODEL 0x08
+
+/* HSRCTL (0x11) fields */
+
+#define TWL6040_HSDACMODER 0x02
+#define TWL6040_HSDRVMODER 0x08
+
+/* ACCCTL (0x2D) fields */
+
+#define TWL6040_RESETSPLIT 0x04
+
+#define TWL6040_SYSCLK_SEL_LPPLL 1
+#define TWL6040_SYSCLK_SEL_HPPLL 2
+
+#define TWL6040_HPPLL_ID 1
+#define TWL6040_LPPLL_ID 2
+
+extern struct snd_soc_dai twl6040_dai;
+extern struct snd_soc_codec_device soc_codec_dev_twl6040;
+
+#endif /* End of __TWL6040_H__ */
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index a8dcd5a5bbcb..28aac53c97bb 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -175,7 +175,7 @@ static int uda134x_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct uda134x_priv *uda134x = codec->private_data;
+ struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
struct snd_pcm_runtime *master_runtime;
if (uda134x->master_substream) {
@@ -208,7 +208,7 @@ static void uda134x_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct uda134x_priv *uda134x = codec->private_data;
+ struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
if (uda134x->master_substream == substream)
uda134x->master_substream = uda134x->slave_substream;
@@ -223,7 +223,7 @@ static int uda134x_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct uda134x_priv *uda134x = codec->private_data;
+ struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
u8 hw_params;
if (substream == uda134x->slave_substream) {
@@ -295,7 +295,7 @@ static int uda134x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct uda134x_priv *uda134x = codec->private_data;
+ struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
pr_debug("%s clk_id: %d, freq: %u, dir: %d\n", __func__,
clk_id, freq, dir);
@@ -317,7 +317,7 @@ static int uda134x_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct uda134x_priv *uda134x = codec->private_data;
+ struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
pr_debug("%s fmt: %08X\n", __func__, fmt);
@@ -432,6 +432,14 @@ SOC_ENUM("PCM Playback De-emphasis", uda134x_mixer_enum[1]),
SOC_SINGLE("DC Filter Enable Switch", UDA134X_STATUS0, 0, 1, 0),
};
+static const struct snd_kcontrol_new uda1345_snd_controls[] = {
+SOC_SINGLE("Master Playback Volume", UDA134X_DATA000, 0, 0x3F, 1),
+
+SOC_ENUM("PCM Playback De-emphasis", uda134x_mixer_enum[1]),
+
+SOC_SINGLE("DC Filter Enable Switch", UDA134X_STATUS0, 0, 1, 0),
+};
+
static struct snd_soc_dai_ops uda134x_dai_ops = {
.startup = uda134x_startup,
.shutdown = uda134x_shutdown,
@@ -487,6 +495,7 @@ static int uda134x_soc_probe(struct platform_device *pdev)
case UDA134X_UDA1340:
case UDA134X_UDA1341:
case UDA134X_UDA1344:
+ case UDA134X_UDA1345:
break;
default:
printk(KERN_ERR "UDA134X SoC codec: "
@@ -504,7 +513,7 @@ static int uda134x_soc_probe(struct platform_device *pdev)
uda134x = kzalloc(sizeof(struct uda134x_priv), GFP_KERNEL);
if (uda134x == NULL)
goto priv_err;
- codec->private_data = uda134x;
+ snd_soc_codec_set_drvdata(codec, uda134x);
codec->reg_cache = kmemdup(uda134x_reg, sizeof(uda134x_reg),
GFP_KERNEL);
@@ -552,6 +561,10 @@ static int uda134x_soc_probe(struct platform_device *pdev)
ret = snd_soc_add_controls(codec, uda1341_snd_controls,
ARRAY_SIZE(uda1341_snd_controls));
break;
+ case UDA134X_UDA1345:
+ ret = snd_soc_add_controls(codec, uda1345_snd_controls,
+ ARRAY_SIZE(uda1345_snd_controls));
+ break;
default:
printk(KERN_ERR "%s unknown codec type: %d",
__func__, pd->model);
@@ -568,7 +581,7 @@ static int uda134x_soc_probe(struct platform_device *pdev)
pcm_err:
kfree(codec->reg_cache);
reg_err:
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
priv_err:
kfree(codec);
return ret;
@@ -586,7 +599,7 @@ static int uda134x_soc_remove(struct platform_device *pdev)
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec->reg_cache);
kfree(codec);
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 9cd0a66b7663..2f925a27dcde 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -476,7 +476,7 @@ static int uda1380_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct uda1380_priv *uda1380 = codec->private_data;
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
int mixer = uda1380_read_reg_cache(codec, UDA1380_MIXER);
switch (cmd) {
@@ -670,7 +670,6 @@ static int uda1380_resume(struct platform_device *pdev)
codec->hw_write(codec->control_data, data, 2);
}
uda1380_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- uda1380_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
@@ -774,7 +773,7 @@ static int uda1380_register(struct uda1380_priv *uda1380)
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = uda1380;
+ snd_soc_codec_set_drvdata(codec, uda1380);
codec->name = "UDA1380";
codec->owner = THIS_MODULE;
codec->read = uda1380_read_reg_cache;
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 2e0772f9c456..8ae20208e7be 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -55,6 +55,7 @@ struct wm8350_output {
struct wm8350_jack_data {
struct snd_soc_jack *jack;
int report;
+ int short_report;
};
struct wm8350_data {
@@ -63,6 +64,7 @@ struct wm8350_data {
struct wm8350_output out2;
struct wm8350_jack_data hpl;
struct wm8350_jack_data hpr;
+ struct wm8350_jack_data mic;
struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
int fll_freq_out;
int fll_freq_in;
@@ -94,7 +96,7 @@ static int wm8350_codec_write(struct snd_soc_codec *codec, unsigned int reg,
*/
static inline int wm8350_out1_ramp_step(struct snd_soc_codec *codec)
{
- struct wm8350_data *wm8350_data = codec->private_data;
+ struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out1 = &wm8350_data->out1;
struct wm8350 *wm8350 = codec->control_data;
int left_complete = 0, right_complete = 0;
@@ -160,7 +162,7 @@ static inline int wm8350_out1_ramp_step(struct snd_soc_codec *codec)
*/
static inline int wm8350_out2_ramp_step(struct snd_soc_codec *codec)
{
- struct wm8350_data *wm8350_data = codec->private_data;
+ struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out2 = &wm8350_data->out2;
struct wm8350 *wm8350 = codec->control_data;
int left_complete = 0, right_complete = 0;
@@ -230,7 +232,7 @@ static void wm8350_pga_work(struct work_struct *work)
{
struct snd_soc_codec *codec =
container_of(work, struct snd_soc_codec, delayed_work.work);
- struct wm8350_data *wm8350_data = codec->private_data;
+ struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out1 = &wm8350_data->out1,
*out2 = &wm8350_data->out2;
int i, out1_complete, out2_complete;
@@ -277,7 +279,7 @@ static int pga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- struct wm8350_data *wm8350_data = codec->private_data;
+ struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out;
switch (w->shift) {
@@ -322,7 +324,7 @@ static int wm8350_put_volsw_2r_vu(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8350_data *wm8350_priv = codec->private_data;
+ struct wm8350_data *wm8350_priv = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out = NULL;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
@@ -365,7 +367,7 @@ static int wm8350_get_volsw_2r(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8350_data *wm8350_priv = codec->private_data;
+ struct wm8350_data *wm8350_priv = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out1 = &wm8350_priv->out1;
struct wm8350_output *out2 = &wm8350_priv->out2;
struct soc_mixer_control *mc =
@@ -1107,7 +1109,7 @@ static int wm8350_set_fll(struct snd_soc_dai *codec_dai,
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8350 *wm8350 = codec->control_data;
- struct wm8350_data *priv = codec->private_data;
+ struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
struct _fll_div fll_div;
int ret = 0;
u16 fll_1, fll_4;
@@ -1159,7 +1161,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8350 *wm8350 = codec->control_data;
- struct wm8350_data *priv = codec->private_data;
+ struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
struct wm8350_audio_platform_data *platform =
wm8350->codec.platform_data;
u16 pm1;
@@ -1335,9 +1337,6 @@ static int wm8350_resume(struct platform_device *pdev)
wm8350_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- if (codec->suspend_bias_level == SND_SOC_BIAS_ON)
- wm8350_set_bias_level(codec, SND_SOC_BIAS_ON);
-
return 0;
}
@@ -1392,12 +1391,13 @@ static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
* @jack: jack to report detection events on
* @report: value to report
*
- * Enables the headphone jack detection of the WM8350.
+ * Enables the headphone jack detection of the WM8350. If no report
+ * is specified then detection is disabled.
*/
int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which,
struct snd_soc_jack *jack, int report)
{
- struct wm8350_data *priv = codec->private_data;
+ struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
struct wm8350 *wm8350 = codec->control_data;
int irq;
int ena;
@@ -1421,8 +1421,12 @@ int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which,
return -EINVAL;
}
- wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA);
- wm8350_set_bits(wm8350, WM8350_JACK_DETECT, ena);
+ if (report) {
+ wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA);
+ wm8350_set_bits(wm8350, WM8350_JACK_DETECT, ena);
+ } else {
+ wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, ena);
+ }
/* Sync status */
wm8350_hp_jack_handler(irq + wm8350->irq_base, priv);
@@ -1431,6 +1435,60 @@ int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which,
}
EXPORT_SYMBOL_GPL(wm8350_hp_jack_detect);
+static irqreturn_t wm8350_mic_handler(int irq, void *data)
+{
+ struct wm8350_data *priv = data;
+ struct wm8350 *wm8350 = priv->codec.control_data;
+ u16 reg;
+ int report = 0;
+
+ reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS);
+ if (reg & WM8350_JACK_MICSCD_LVL)
+ report |= priv->mic.short_report;
+ if (reg & WM8350_JACK_MICSD_LVL)
+ report |= priv->mic.report;
+
+ snd_soc_jack_report(priv->mic.jack, report,
+ priv->mic.report | priv->mic.short_report);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * wm8350_mic_jack_detect - Enable microphone jack detection.
+ *
+ * @codec: WM8350 codec
+ * @jack: jack to report detection events on
+ * @detect_report: value to report when presence detected
+ * @short_report: value to report when microphone short detected
+ *
+ * Enables the microphone jack detection of the WM8350. If both reports
+ * are specified as zero then detection is disabled.
+ */
+int wm8350_mic_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack,
+ int detect_report, int short_report)
+{
+ struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
+ struct wm8350 *wm8350 = codec->control_data;
+
+ priv->mic.jack = jack;
+ priv->mic.report = detect_report;
+ priv->mic.short_report = short_report;
+
+ if (detect_report || short_report) {
+ wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA);
+ wm8350_set_bits(wm8350, WM8350_POWER_MGMT_1,
+ WM8350_MIC_DET_ENA);
+ } else {
+ wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_1,
+ WM8350_MIC_DET_ENA);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8350_mic_jack_detect);
+
static struct snd_soc_codec *wm8350_codec;
static int wm8350_probe(struct platform_device *pdev)
@@ -1448,7 +1506,7 @@ static int wm8350_probe(struct platform_device *pdev)
socdev->card->codec = wm8350_codec;
codec = socdev->card->codec;
wm8350 = codec->control_data;
- priv = codec->private_data;
+ priv = snd_soc_codec_get_drvdata(codec);
/* Enable the codec */
wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
@@ -1494,6 +1552,10 @@ static int wm8350_probe(struct platform_device *pdev)
wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R,
wm8350_hp_jack_handler, 0, "Right jack detect",
priv);
+ wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICSCD,
+ wm8350_mic_handler, 0, "Microphone short", priv);
+ wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICD,
+ wm8350_mic_handler, 0, "Microphone detect", priv);
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
@@ -1515,18 +1577,21 @@ static int wm8350_remove(struct platform_device *pdev)
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
struct wm8350 *wm8350 = codec->control_data;
- struct wm8350_data *priv = codec->private_data;
+ struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
int ret;
wm8350_clear_bits(wm8350, WM8350_JACK_DETECT,
WM8350_JDL_ENA | WM8350_JDR_ENA);
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_MICD, priv);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_MICSCD, priv);
wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, priv);
wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, priv);
priv->hpl.jack = NULL;
priv->hpr.jack = NULL;
+ priv->mic.jack = NULL;
/* cancel any work waiting to be queued. */
ret = cancel_delayed_work(&codec->delayed_work);
@@ -1631,7 +1696,7 @@ static __devinit int wm8350_codec_probe(struct platform_device *pdev)
codec->dai = &wm8350_dai;
codec->num_dai = 1;
codec->reg_cache_size = WM8350_MAX_REGISTER;
- codec->private_data = priv;
+ snd_soc_codec_set_drvdata(codec, priv);
codec->control_data = wm8350;
/* Put the codec into reset if it wasn't already */
@@ -1663,7 +1728,7 @@ static int __devexit wm8350_codec_remove(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = wm8350->codec.codec;
- struct wm8350_data *priv = codec->private_data;
+ struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
snd_soc_unregister_dai(&wm8350_dai);
snd_soc_unregister_codec(codec);
diff --git a/sound/soc/codecs/wm8350.h b/sound/soc/codecs/wm8350.h
index d088eb4b88bb..9ed0467c71db 100644
--- a/sound/soc/codecs/wm8350.h
+++ b/sound/soc/codecs/wm8350.h
@@ -25,5 +25,8 @@ enum wm8350_jack {
int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which,
struct snd_soc_jack *jack, int report);
+int wm8350_mic_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack,
+ int detect_report, int short_report);
#endif
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 6acc885cf9b7..7f5d080536a0 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -77,7 +77,7 @@ struct wm8400_priv {
static inline unsigned int wm8400_read(struct snd_soc_codec *codec,
unsigned int reg)
{
- struct wm8400_priv *wm8400 = codec->private_data;
+ struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
if (reg == WM8400_INTDRIVBITS)
return wm8400->fake_register;
@@ -91,7 +91,7 @@ static inline unsigned int wm8400_read(struct snd_soc_codec *codec,
static int wm8400_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
- struct wm8400_priv *wm8400 = codec->private_data;
+ struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
if (reg == WM8400_INTDRIVBITS) {
wm8400->fake_register = value;
@@ -102,7 +102,7 @@ static int wm8400_write(struct snd_soc_codec *codec, unsigned int reg,
static void wm8400_codec_reset(struct snd_soc_codec *codec)
{
- struct wm8400_priv *wm8400 = codec->private_data;
+ struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
wm8400_reset_codec_reg_cache(wm8400->wm8400);
}
@@ -926,7 +926,7 @@ static int wm8400_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8400_priv *wm8400 = codec->private_data;
+ struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
wm8400->sysclk = freq;
return 0;
@@ -1015,7 +1015,7 @@ static int wm8400_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
unsigned int freq_out)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8400_priv *wm8400 = codec->private_data;
+ struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
struct fll_factors factors;
int ret;
u16 reg;
@@ -1204,7 +1204,7 @@ static int wm8400_mute(struct snd_soc_dai *dai, int mute)
static int wm8400_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- struct wm8400_priv *wm8400 = codec->private_data;
+ struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
u16 val;
int ret;
@@ -1467,7 +1467,7 @@ static int wm8400_codec_probe(struct platform_device *dev)
return -ENOMEM;
codec = &priv->codec;
- codec->private_data = priv;
+ snd_soc_codec_set_drvdata(codec, priv);
codec->control_data = dev_get_drvdata(&dev->dev);
priv->wm8400 = dev_get_drvdata(&dev->dev);
@@ -1530,7 +1530,7 @@ err:
static int __exit wm8400_codec_remove(struct platform_device *dev)
{
- struct wm8400_priv *priv = wm8400_codec->private_data;
+ struct wm8400_priv *priv = snd_soc_codec_get_drvdata(wm8400_codec);
u16 reg;
snd_soc_unregister_dai(&wm8400_dai);
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index 9000b1d19afb..0f7bcb61071a 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -557,7 +557,7 @@ static int wm8510_resume(struct platform_device *pdev)
codec->hw_write(codec->control_data, data, 2);
}
wm8510_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- wm8510_set_bias_level(codec, codec->suspend_bias_level);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index 19cd47293424..37242a7d3077 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -138,7 +138,7 @@ static int wm8523_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8523_priv *wm8523 = codec->private_data;
+ struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec);
/* The set of sample rates that can be supported depends on the
* MCLK supplied to the CODEC - enforce this.
@@ -164,7 +164,7 @@ static int wm8523_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8523_priv *wm8523 = codec->private_data;
+ struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec);
int i;
u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1);
u16 aifctrl2 = snd_soc_read(codec, WM8523_AIF_CTRL2);
@@ -211,7 +211,7 @@ static int wm8523_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8523_priv *wm8523 = codec->private_data;
+ struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec);
unsigned int val;
int i;
@@ -318,7 +318,7 @@ static int wm8523_set_dai_fmt(struct snd_soc_dai *codec_dai,
static int wm8523_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- struct wm8523_priv *wm8523 = codec->private_data;
+ struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec);
int ret, i;
switch (level) {
@@ -489,7 +489,7 @@ static int wm8523_register(struct wm8523_priv *wm8523,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8523;
+ snd_soc_codec_set_drvdata(codec, wm8523);
codec->name = "WM8523";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 8cc9042965eb..c3571ee5c11b 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -412,7 +412,7 @@ static int wm8580_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
{
int offset;
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8580_priv *wm8580 = codec->private_data;
+ struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
struct pll_state *state;
struct _pll_div pll_div;
unsigned int reg;
@@ -840,7 +840,7 @@ static int wm8580_register(struct wm8580_priv *wm8580,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8580;
+ snd_soc_codec_set_drvdata(codec, wm8580);
codec->name = "WM8580";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index 8ca3812f2f2f..effb14eee7d4 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -163,7 +163,7 @@ static int wm8711_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8711_priv *wm8711 = codec->private_data;
+ struct wm8711_priv *wm8711 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8711_IFACE) & 0xfffc;
int i = get_coeff(wm8711->sysclk, params_rate(params));
u16 srate = (coeff_div[i].sr << 2) |
@@ -227,7 +227,7 @@ static int wm8711_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8711_priv *wm8711 = codec->private_data;
+ struct wm8711_priv *wm8711 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
@@ -376,7 +376,7 @@ static int wm8711_resume(struct platform_device *pdev)
codec->hw_write(codec->control_data, data, 2);
}
wm8711_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- wm8711_set_bias_level(codec, codec->suspend_bias_level);
+
return 0;
}
@@ -446,7 +446,7 @@ static int wm8711_register(struct wm8711_priv *wm8711,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8711;
+ snd_soc_codec_set_drvdata(codec, wm8711);
codec->name = "WM8711";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index 07adc375a706..34be2d2b69ef 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -238,7 +238,7 @@ static int wm8728_resume(struct platform_device *pdev)
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- wm8728_set_bias_level(codec, codec->suspend_bias_level);
+ wm8728_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index e7c6bf163185..0ab9b6355297 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -225,7 +225,7 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8731_priv *wm8731 = codec->private_data;
+ struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8731_IFACE) & 0xfff3;
int i = get_coeff(wm8731->sysclk, params_rate(params));
u16 srate = (coeff_div[i].sr << 2) |
@@ -292,7 +292,7 @@ static int wm8731_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8731_priv *wm8731 = codec->private_data;
+ struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
@@ -369,6 +369,10 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
static int wm8731_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+ int i, ret;
+ u8 data[2];
+ u16 *cache = codec->reg_cache;
u16 reg;
switch (level) {
@@ -377,6 +381,24 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
+ wm8731->supplies);
+ if (ret != 0)
+ return ret;
+
+ /* Sync reg_cache with the hardware */
+ for (i = 0; i < ARRAY_SIZE(wm8731_reg); i++) {
+ if (cache[i] == wm8731_reg[i])
+ continue;
+
+ data[0] = (i << 1) | ((cache[i] >> 8)
+ & 0x0001);
+ data[1] = cache[i] & 0x00ff;
+ codec->hw_write(codec->control_data, data, 2);
+ }
+ }
+
/* Clear PWROFF, gate CLKOUT, everything else as-is */
reg = snd_soc_read(codec, WM8731_PWR) & 0xff7f;
snd_soc_write(codec, WM8731_PWR, reg | 0x0040);
@@ -384,17 +406,15 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_OFF:
snd_soc_write(codec, WM8731_ACTIVE, 0x0);
snd_soc_write(codec, WM8731_PWR, 0xffff);
+ regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
+ wm8731->supplies);
break;
}
codec->bias_level = level;
return 0;
}
-#define WM8731_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
- SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
- SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
- SNDRV_PCM_RATE_96000)
+#define WM8731_RATES SNDRV_PCM_RATE_8000_96000
#define WM8731_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE)
@@ -432,12 +452,9 @@ static int wm8731_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8731_priv *wm8731 = codec->private_data;
- snd_soc_write(codec, WM8731_ACTIVE, 0x0);
wm8731_set_bias_level(codec, SND_SOC_BIAS_OFF);
- regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
- wm8731->supplies);
+
return 0;
}
@@ -445,27 +462,8 @@ static int wm8731_resume(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8731_priv *wm8731 = codec->private_data;
- int i, ret;
- u8 data[2];
- u16 *cache = codec->reg_cache;
- ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
- wm8731->supplies);
- if (ret != 0)
- return ret;
-
- /* Sync reg_cache with the hardware */
- for (i = 0; i < ARRAY_SIZE(wm8731_reg); i++) {
- if (cache[i] == wm8731_reg[i])
- continue;
-
- data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
- data[1] = cache[i] & 0x00ff;
- codec->hw_write(codec->control_data, data, 2);
- }
wm8731_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- wm8731_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
@@ -540,7 +538,7 @@ static int wm8731_register(struct wm8731_priv *wm8731,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8731;
+ snd_soc_codec_set_drvdata(codec, wm8731);
codec->name = "WM8731";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
@@ -609,6 +607,9 @@ static int wm8731_register(struct wm8731_priv *wm8731,
goto err_codec;
}
+ /* Regulators will have been enabled by bias management */
+ regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
+
return 0;
err_codec:
@@ -627,7 +628,6 @@ static void wm8731_unregister(struct wm8731_priv *wm8731)
wm8731_set_bias_level(&wm8731->codec, SND_SOC_BIAS_OFF);
snd_soc_unregister_dai(&wm8731_dai);
snd_soc_unregister_codec(&wm8731->codec);
- regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
regulator_bulk_free(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
kfree(wm8731);
wm8731_codec = NULL;
@@ -708,7 +708,7 @@ MODULE_DEVICE_TABLE(i2c, wm8731_i2c_id);
static struct i2c_driver wm8731_i2c_driver = {
.driver = {
- .name = "WM8731 I2C Codec",
+ .name = "wm8731",
.owner = THIS_MODULE,
},
.probe = wm8731_i2c_probe,
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 2916ed4d3844..9407e193fcc3 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -30,13 +30,6 @@
#include "wm8750.h"
-#define WM8750_VERSION "0.12"
-
-/* codec private data */
-struct wm8750_priv {
- unsigned int sysclk;
-};
-
/*
* wm8750 register cache
* We can't read the WM8750 register space when we
@@ -56,6 +49,13 @@ static const u16 wm8750_reg[] = {
0x0079, 0x0079, 0x0079, /* 40 */
};
+/* codec private data */
+struct wm8750_priv {
+ unsigned int sysclk;
+ struct snd_soc_codec codec;
+ u16 reg_cache[ARRAY_SIZE(wm8750_reg)];
+};
+
#define wm8750_reset(c) snd_soc_write(c, WM8750_RESET, 0)
/*
@@ -483,7 +483,7 @@ static int wm8750_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8750_priv *wm8750 = codec->private_data;
+ struct wm8750_priv *wm8750 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
@@ -562,7 +562,7 @@ static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8750_priv *wm8750 = codec->private_data;
+ struct wm8750_priv *wm8750 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8750_IFACE) & 0x1f3;
u16 srate = snd_soc_read(codec, WM8750_SRATE) & 0x1c0;
int coeff = get_coeff(wm8750->sysclk, params_rate(params));
@@ -614,10 +614,16 @@ static int wm8750_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x00c0);
break;
case SND_SOC_BIAS_PREPARE:
- /* set vmid to 5k for quick power up */
- snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1);
break;
case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ /* Set VMID to 5k */
+ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1);
+
+ /* ...and ramp */
+ msleep(1000);
+ }
+
/* mute dac and set vmid to 500k, enable VREF */
snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x0141);
break;
@@ -661,13 +667,6 @@ struct snd_soc_dai wm8750_dai = {
};
EXPORT_SYMBOL_GPL(wm8750_dai);
-static void wm8750_work(struct work_struct *work)
-{
- struct snd_soc_codec *codec =
- container_of(work, struct snd_soc_codec, delayed_work.work);
- wm8750_set_bias_level(codec, codec->bias_level);
-}
-
static int wm8750_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
@@ -696,36 +695,92 @@ static int wm8750_resume(struct platform_device *pdev)
wm8750_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- /* charge wm8750 caps */
- if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
- wm8750_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
- codec->bias_level = SND_SOC_BIAS_ON;
- schedule_delayed_work(&codec->delayed_work,
- msecs_to_jiffies(1000));
+ return 0;
+}
+
+static struct snd_soc_codec *wm8750_codec;
+
+static int wm8750_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (!wm8750_codec) {
+ dev_err(&pdev->dev, "WM8750 codec not yet registered\n");
+ return -EINVAL;
+ }
+
+ socdev->card->codec = wm8750_codec;
+ codec = wm8750_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ printk(KERN_ERR "wm8750: failed to create pcms\n");
+ goto err;
}
+ snd_soc_add_controls(codec, wm8750_snd_controls,
+ ARRAY_SIZE(wm8750_snd_controls));
+ wm8750_add_widgets(codec);
+
+ return 0;
+
+err:
+ return ret;
+}
+
+/* power down chip */
+static int wm8750_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
return 0;
}
+struct snd_soc_codec_device soc_codec_dev_wm8750 = {
+ .probe = wm8750_probe,
+ .remove = wm8750_remove,
+ .suspend = wm8750_suspend,
+ .resume = wm8750_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8750);
+
/*
* initialise the WM8750 driver
* register the mixer and dsp interfaces with the kernel
*/
-static int wm8750_init(struct snd_soc_device *socdev,
- enum snd_soc_control_type control)
+static int wm8750_register(struct wm8750_priv *wm8750,
+ enum snd_soc_control_type control)
{
- struct snd_soc_codec *codec = socdev->card->codec;
+ struct snd_soc_codec *codec = &wm8750->codec;
int reg, ret = 0;
+ if (wm8750_codec) {
+ dev_err(codec->dev, "Multiple WM8750 devices not supported\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
codec->name = "WM8750";
codec->owner = THIS_MODULE;
+ codec->bias_level = SND_SOC_BIAS_STANDBY;
codec->set_bias_level = wm8750_set_bias_level;
codec->dai = &wm8750_dai;
codec->num_dai = 1;
- codec->reg_cache_size = ARRAY_SIZE(wm8750_reg);
- codec->reg_cache = kmemdup(wm8750_reg, sizeof(wm8750_reg), GFP_KERNEL);
- if (codec->reg_cache == NULL)
- return -ENOMEM;
+ codec->reg_cache_size = ARRAY_SIZE(wm8750->reg_cache) + 1;
+ codec->reg_cache = &wm8750->reg_cache;
+ snd_soc_codec_set_drvdata(codec, wm8750);
+
+ memcpy(codec->reg_cache, wm8750_reg, sizeof(wm8750->reg_cache));
ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
if (ret < 0) {
@@ -739,17 +794,8 @@ static int wm8750_init(struct snd_soc_device *socdev,
goto err;
}
- /* register pcms */
- ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
- if (ret < 0) {
- printk(KERN_ERR "wm8750: failed to create pcms\n");
- goto err;
- }
-
/* charge output caps */
- wm8750_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
- codec->bias_level = SND_SOC_BIAS_STANDBY;
- schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(1000));
+ wm8750_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* set the update bits */
reg = snd_soc_read(codec, WM8750_LDAC);
@@ -769,19 +815,37 @@ static int wm8750_init(struct snd_soc_device *socdev,
reg = snd_soc_read(codec, WM8750_RINVOL);
snd_soc_write(codec, WM8750_RINVOL, reg | 0x0100);
- snd_soc_add_controls(codec, wm8750_snd_controls,
- ARRAY_SIZE(wm8750_snd_controls));
- wm8750_add_widgets(codec);
- return ret;
+ wm8750_codec = codec;
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ goto err;
+ }
+
+ ret = snd_soc_register_dais(&wm8750_dai, 1);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAIs: %d\n", ret);
+ goto err_codec;
+ }
+
+ return 0;
+
+err_codec:
+ snd_soc_unregister_codec(codec);
err:
- kfree(codec->reg_cache);
+ kfree(wm8750);
return ret;
}
-/* If the i2c layer weren't so broken, we could pass this kind of data
- around */
-static struct snd_soc_device *wm8750_socdev;
+static void wm8750_unregister(struct wm8750_priv *wm8750)
+{
+ wm8750_set_bias_level(&wm8750->codec, SND_SOC_BIAS_OFF);
+ snd_soc_unregister_dais(&wm8750_dai, 1);
+ snd_soc_unregister_codec(&wm8750->codec);
+ kfree(wm8750);
+ wm8750_codec = NULL;
+}
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
@@ -795,24 +859,26 @@ static struct snd_soc_device *wm8750_socdev;
static int wm8750_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct snd_soc_device *socdev = wm8750_socdev;
- struct snd_soc_codec *codec = socdev->card->codec;
- int ret;
+ struct snd_soc_codec *codec;
+ struct wm8750_priv *wm8750;
+
+ wm8750 = kzalloc(sizeof(struct wm8750_priv), GFP_KERNEL);
+ if (wm8750 == NULL)
+ return -ENOMEM;
- i2c_set_clientdata(i2c, codec);
+ codec = &wm8750->codec;
codec->control_data = i2c;
+ i2c_set_clientdata(i2c, wm8750);
- ret = wm8750_init(socdev, SND_SOC_I2C);
- if (ret < 0)
- pr_err("failed to initialise WM8750\n");
+ codec->dev = &i2c->dev;
- return ret;
+ return wm8750_register(wm8750, SND_SOC_I2C);
}
static int wm8750_i2c_remove(struct i2c_client *client)
{
- struct snd_soc_codec *codec = i2c_get_clientdata(client);
- kfree(codec->reg_cache);
+ struct wm8750_priv *wm8750 = i2c_get_clientdata(client);
+ wm8750_unregister(wm8750);
return 0;
}
@@ -831,66 +897,31 @@ static struct i2c_driver wm8750_i2c_driver = {
.remove = wm8750_i2c_remove,
.id_table = wm8750_i2c_id,
};
-
-static int wm8750_add_i2c_device(struct platform_device *pdev,
- const struct wm8750_setup_data *setup)
-{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
- int ret;
-
- ret = i2c_add_driver(&wm8750_i2c_driver);
- if (ret != 0) {
- dev_err(&pdev->dev, "can't add i2c driver\n");
- return ret;
- }
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = setup->i2c_address;
- strlcpy(info.type, "wm8750", I2C_NAME_SIZE);
-
- adapter = i2c_get_adapter(setup->i2c_bus);
- if (!adapter) {
- dev_err(&pdev->dev, "can't get i2c adapter %d\n",
- setup->i2c_bus);
- goto err_driver;
- }
-
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- dev_err(&pdev->dev, "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- goto err_driver;
- }
-
- return 0;
-
-err_driver:
- i2c_del_driver(&wm8750_i2c_driver);
- return -ENODEV;
-}
#endif
#if defined(CONFIG_SPI_MASTER)
static int __devinit wm8750_spi_probe(struct spi_device *spi)
{
- struct snd_soc_device *socdev = wm8750_socdev;
- struct snd_soc_codec *codec = socdev->card->codec;
- int ret;
+ struct snd_soc_codec *codec;
+ struct wm8750_priv *wm8750;
+
+ wm8750 = kzalloc(sizeof(struct wm8750_priv), GFP_KERNEL);
+ if (wm8750 == NULL)
+ return -ENOMEM;
+ codec = &wm8750->codec;
codec->control_data = spi;
+ codec->dev = &spi->dev;
- ret = wm8750_init(socdev, SND_SOC_SPI);
- if (ret < 0)
- dev_err(&spi->dev, "failed to initialise WM8750\n");
+ dev_set_drvdata(&spi->dev, wm8750);
- return ret;
+ return wm8750_register(wm8750, SND_SOC_SPI);
}
static int __devexit wm8750_spi_remove(struct spi_device *spi)
{
+ struct wm8750_priv *wm8750 = dev_get_drvdata(&spi->dev);
+ wm8750_unregister(wm8750);
return 0;
}
@@ -905,115 +936,31 @@ static struct spi_driver wm8750_spi_driver = {
};
#endif
-static int wm8750_probe(struct platform_device *pdev)
+static int __init wm8750_modinit(void)
{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct wm8750_setup_data *setup = socdev->codec_data;
- struct snd_soc_codec *codec;
- struct wm8750_priv *wm8750;
int ret;
-
- pr_info("WM8750 Audio Codec %s", WM8750_VERSION);
- codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
- if (codec == NULL)
- return -ENOMEM;
-
- wm8750 = kzalloc(sizeof(struct wm8750_priv), GFP_KERNEL);
- if (wm8750 == NULL) {
- kfree(codec);
- return -ENOMEM;
- }
-
- codec->private_data = wm8750;
- socdev->card->codec = codec;
- mutex_init(&codec->mutex);
- INIT_LIST_HEAD(&codec->dapm_widgets);
- INIT_LIST_HEAD(&codec->dapm_paths);
- wm8750_socdev = socdev;
- INIT_DELAYED_WORK(&codec->delayed_work, wm8750_work);
-
- ret = -ENODEV;
-
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- if (setup->i2c_address) {
- ret = wm8750_add_i2c_device(pdev, setup);
- }
+ ret = i2c_add_driver(&wm8750_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8750 I2C driver: %d\n", ret);
#endif
#if defined(CONFIG_SPI_MASTER)
- if (setup->spi) {
- ret = spi_register_driver(&wm8750_spi_driver);
- if (ret != 0)
- printk(KERN_ERR "can't add spi driver");
- }
+ ret = spi_register_driver(&wm8750_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8750 SPI driver: %d\n", ret);
#endif
-
- if (ret != 0) {
- kfree(codec->private_data);
- kfree(codec);
- }
- return ret;
-}
-
-/*
- * This function forces any delayed work to be queued and run.
- */
-static int run_delayed_work(struct delayed_work *dwork)
-{
- int ret;
-
- /* cancel any work waiting to be queued. */
- ret = cancel_delayed_work(dwork);
-
- /* if there was any work waiting then we run it now and
- * wait for it's completion */
- if (ret) {
- schedule_delayed_work(dwork, 0);
- flush_scheduled_work();
- }
- return ret;
+ return 0;
}
+module_init(wm8750_modinit);
-/* power down chip */
-static int wm8750_remove(struct platform_device *pdev)
+static void __exit wm8750_exit(void)
{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_codec *codec = socdev->card->codec;
-
- if (codec->control_data)
- wm8750_set_bias_level(codec, SND_SOC_BIAS_OFF);
- run_delayed_work(&codec->delayed_work);
- snd_soc_free_pcms(socdev);
- snd_soc_dapm_free(socdev);
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- i2c_unregister_device(codec->control_data);
i2c_del_driver(&wm8750_i2c_driver);
#endif
#if defined(CONFIG_SPI_MASTER)
spi_unregister_driver(&wm8750_spi_driver);
#endif
- kfree(codec->private_data);
- kfree(codec);
-
- return 0;
-}
-
-struct snd_soc_codec_device soc_codec_dev_wm8750 = {
- .probe = wm8750_probe,
- .remove = wm8750_remove,
- .suspend = wm8750_suspend,
- .resume = wm8750_resume,
-};
-EXPORT_SYMBOL_GPL(soc_codec_dev_wm8750);
-
-static int __init wm8750_modinit(void)
-{
- return snd_soc_register_dai(&wm8750_dai);
-}
-module_init(wm8750_modinit);
-
-static void __exit wm8750_exit(void)
-{
- snd_soc_unregister_dai(&wm8750_dai);
}
module_exit(wm8750_exit);
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 613199a0f799..b59f349c5218 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -851,7 +851,7 @@ static int wm8753_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8753_priv *wm8753 = codec->private_data;
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
@@ -914,7 +914,7 @@ static int wm8753_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8753_priv *wm8753 = codec->private_data;
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
u16 voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x01f3;
u16 srate = wm8753_read_reg_cache(codec, WM8753_SRATE1) & 0x017f;
@@ -1148,7 +1148,7 @@ static int wm8753_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8753_priv *wm8753 = codec->private_data;
+ struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
u16 srate = wm8753_read_reg_cache(codec, WM8753_SRATE1) & 0x01c0;
u16 hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x01f3;
int coeff;
@@ -1646,7 +1646,7 @@ static int wm8753_register(struct wm8753_priv *wm8753)
codec->num_dai = 2;
codec->reg_cache_size = ARRAY_SIZE(wm8753->reg_cache) + 1;
codec->reg_cache = &wm8753->reg_cache;
- codec->private_data = wm8753;
+ snd_soc_codec_set_drvdata(codec, wm8753);
memcpy(codec->reg_cache, wm8753_reg, sizeof(wm8753->reg_cache));
INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work);
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 60b1b3e1094b..7e4a627b4c7e 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -227,7 +227,7 @@ static int wm8776_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8776_priv *wm8776 = codec->private_data;
+ struct wm8776_priv *wm8776 = snd_soc_codec_get_drvdata(codec);
int iface_reg, iface;
int ratio_shift, master;
int i;
@@ -304,7 +304,7 @@ static int wm8776_set_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8776_priv *wm8776 = codec->private_data;
+ struct wm8776_priv *wm8776 = snd_soc_codec_get_drvdata(codec);
BUG_ON(dai->id >= ARRAY_SIZE(wm8776->sysclk));
@@ -491,7 +491,7 @@ static int wm8776_register(struct wm8776_priv *wm8776,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8776;
+ snd_soc_codec_set_drvdata(codec, wm8776);
codec->name = "WM8776";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index b7fd96adac64..5da17a704e5a 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -745,7 +745,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
static int wm8900_set_fll(struct snd_soc_codec *codec,
int fll_id, unsigned int freq_in, unsigned int freq_out)
{
- struct wm8900_priv *wm8900 = codec->private_data;
+ struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec);
struct _fll_div fll_div;
unsigned int reg;
@@ -1132,7 +1132,7 @@ static int wm8900_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8900_priv *wm8900 = codec->private_data;
+ struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec);
int fll_out = wm8900->fll_out;
int fll_in = wm8900->fll_in;
int ret;
@@ -1156,7 +1156,7 @@ static int wm8900_resume(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8900_priv *wm8900 = codec->private_data;
+ struct wm8900_priv *wm8900 = snd_soc_codec_get_drvdata(codec);
u16 *cache;
int i, ret;
@@ -1206,7 +1206,7 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
codec = &wm8900->codec;
- codec->private_data = wm8900;
+ snd_soc_codec_set_drvdata(codec, wm8900);
codec->reg_cache = &wm8900->reg_cache[0];
codec->reg_cache_size = WM8900_MAXREG;
@@ -1305,7 +1305,7 @@ static __devexit int wm8900_i2c_remove(struct i2c_client *client)
wm8900_set_bias_level(wm8900_codec, SND_SOC_BIAS_OFF);
wm8900_dai.dev = NULL;
- kfree(wm8900_codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(wm8900_codec));
wm8900_codec = NULL;
return 0;
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index fa5f99fde68b..bf08282d5ee5 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -11,26 +11,27 @@
*
* TODO:
* - TDM mode configuration.
- * - Mic detect.
* - Digital microphone support.
- * - Interrupt support (mic detect and sequencer).
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
+#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
+#include <sound/jack.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
+#include <sound/wm8903.h>
#include "wm8903.h"
@@ -222,6 +223,14 @@ struct wm8903_priv {
int playback_active;
int capture_active;
+ struct completion wseq;
+
+ struct snd_soc_jack *mic_jack;
+ int mic_det;
+ int mic_short;
+ int mic_last_report;
+ int mic_delay;
+
struct snd_pcm_substream *master_substream;
struct snd_pcm_substream *slave_substream;
};
@@ -244,13 +253,14 @@ static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
{
u16 reg[5];
struct i2c_client *i2c = codec->control_data;
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
BUG_ON(start > 48);
- /* Enable the sequencer */
+ /* Enable the sequencer if it's not already on */
reg[0] = snd_soc_read(codec, WM8903_WRITE_SEQUENCER_0);
- reg[0] |= WM8903_WSEQ_ENA;
- snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0, reg[0]);
+ snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0,
+ reg[0] | WM8903_WSEQ_ENA);
dev_dbg(&i2c->dev, "Starting sequence at %d\n", start);
@@ -258,20 +268,19 @@ static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
start | WM8903_WSEQ_START);
/* Wait for it to complete. If we have the interrupt wired up then
- * we could block waiting for an interrupt, though polling may still
- * be desirable for diagnostic purposes.
+ * that will break us out of the poll early.
*/
do {
- msleep(10);
+ wait_for_completion_timeout(&wm8903->wseq,
+ msecs_to_jiffies(10));
reg[4] = snd_soc_read(codec, WM8903_WRITE_SEQUENCER_4);
} while (reg[4] & WM8903_WSEQ_BUSY);
dev_dbg(&i2c->dev, "Sequence complete\n");
- /* Disable the sequencer again */
- snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0,
- reg[0] & ~WM8903_WSEQ_ENA);
+ /* Disable the sequencer again if we enabled it */
+ snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0, reg[0]);
return 0;
}
@@ -412,7 +421,7 @@ static int wm8903_class_w_put(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct snd_soc_codec *codec = widget->codec;
- struct wm8903_priv *wm8903 = codec->private_data;
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c = codec->control_data;
u16 reg;
int ret;
@@ -993,7 +1002,7 @@ static int wm8903_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8903_priv *wm8903 = codec->private_data;
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
wm8903->sysclk = freq;
@@ -1221,7 +1230,7 @@ static int wm8903_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8903_priv *wm8903 = codec->private_data;
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c = codec->control_data;
struct snd_pcm_runtime *master_runtime;
@@ -1257,7 +1266,7 @@ static void wm8903_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8903_priv *wm8903 = codec->private_data;
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
wm8903->playback_active--;
@@ -1277,7 +1286,7 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8903_priv *wm8903 = codec->private_data;
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c = codec->control_data;
int fs = params_rate(params);
int bclk;
@@ -1436,6 +1445,116 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+/**
+ * wm8903_mic_detect - Enable microphone detection via the WM8903 IRQ
+ *
+ * @codec: WM8903 codec
+ * @jack: jack to report detection events on
+ * @det: value to report for presence detection
+ * @shrt: value to report for short detection
+ *
+ * Enable microphone detection via IRQ on the WM8903. If GPIOs are
+ * being used to bring out signals to the processor then only platform
+ * data configuration is needed for WM8903 and processor GPIOs should
+ * be configured using snd_soc_jack_add_gpios() instead.
+ *
+ * The current threasholds for detection should be configured using
+ * micdet_cfg in the platform data. Using this function will force on
+ * the microphone bias for the device.
+ */
+int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+ int det, int shrt)
+{
+ struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+ int irq_mask = WM8903_MICDET_EINT | WM8903_MICSHRT_EINT;
+
+ dev_dbg(codec->dev, "Enabling microphone detection: %x %x\n",
+ det, shrt);
+
+ /* Store the configuration */
+ wm8903->mic_jack = jack;
+ wm8903->mic_det = det;
+ wm8903->mic_short = shrt;
+
+ /* Enable interrupts we've got a report configured for */
+ if (det)
+ irq_mask &= ~WM8903_MICDET_EINT;
+ if (shrt)
+ irq_mask &= ~WM8903_MICSHRT_EINT;
+
+ snd_soc_update_bits(codec, WM8903_INTERRUPT_STATUS_1_MASK,
+ WM8903_MICDET_EINT | WM8903_MICSHRT_EINT,
+ irq_mask);
+
+ if (det && shrt) {
+ /* Enable mic detection, this may not have been set through
+ * platform data (eg, if the defaults are OK). */
+ snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
+ WM8903_WSEQ_ENA, WM8903_WSEQ_ENA);
+ snd_soc_update_bits(codec, WM8903_MIC_BIAS_CONTROL_0,
+ WM8903_MICDET_ENA, WM8903_MICDET_ENA);
+ } else {
+ snd_soc_update_bits(codec, WM8903_MIC_BIAS_CONTROL_0,
+ WM8903_MICDET_ENA, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8903_mic_detect);
+
+static irqreturn_t wm8903_irq(int irq, void *data)
+{
+ struct wm8903_priv *wm8903 = data;
+ struct snd_soc_codec *codec = &wm8903->codec;
+ int mic_report;
+ int int_pol;
+ int int_val = 0;
+ int mask = ~snd_soc_read(codec, WM8903_INTERRUPT_STATUS_1_MASK);
+
+ int_val = snd_soc_read(codec, WM8903_INTERRUPT_STATUS_1) & mask;
+
+ if (int_val & WM8903_WSEQ_BUSY_EINT) {
+ dev_dbg(codec->dev, "Write sequencer done\n");
+ complete(&wm8903->wseq);
+ }
+
+ /*
+ * The rest is microphone jack detection. We need to manually
+ * invert the polarity of the interrupt after each event - to
+ * simplify the code keep track of the last state we reported
+ * and just invert the relevant bits in both the report and
+ * the polarity register.
+ */
+ mic_report = wm8903->mic_last_report;
+ int_pol = snd_soc_read(codec, WM8903_INTERRUPT_POLARITY_1);
+
+ if (int_val & WM8903_MICSHRT_EINT) {
+ dev_dbg(codec->dev, "Microphone short (pol=%x)\n", int_pol);
+
+ mic_report ^= wm8903->mic_short;
+ int_pol ^= WM8903_MICSHRT_INV;
+ }
+
+ if (int_val & WM8903_MICDET_EINT) {
+ dev_dbg(codec->dev, "Microphone detect (pol=%x)\n", int_pol);
+
+ mic_report ^= wm8903->mic_det;
+ int_pol ^= WM8903_MICDET_INV;
+
+ msleep(wm8903->mic_delay);
+ }
+
+ snd_soc_update_bits(codec, WM8903_INTERRUPT_POLARITY_1,
+ WM8903_MICSHRT_INV | WM8903_MICDET_INV, int_pol);
+
+ snd_soc_jack_report(wm8903->mic_jack, mic_report,
+ wm8903->mic_short | wm8903->mic_det);
+
+ wm8903->mic_last_report = mic_report;
+
+ return IRQ_HANDLED;
+}
+
#define WM8903_PLAYBACK_RATES (SNDRV_PCM_RATE_8000 |\
SNDRV_PCM_RATE_11025 | \
SNDRV_PCM_RATE_16000 | \
@@ -1510,7 +1629,6 @@ static int wm8903_resume(struct platform_device *pdev)
/* Bring the codec back up to standby first to minimise pop/clicks */
wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- wm8903_set_bias_level(codec, codec->suspend_bias_level);
/* Sync back everything else */
if (tmp_cache) {
@@ -1530,9 +1648,11 @@ static struct snd_soc_codec *wm8903_codec;
static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ struct wm8903_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct wm8903_priv *wm8903;
struct snd_soc_codec *codec;
- int ret;
+ int ret, i;
+ int trigger, irq_pol;
u16 val;
wm8903 = kzalloc(sizeof(struct wm8903_priv), GFP_KERNEL);
@@ -1554,8 +1674,9 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
codec->num_dai = 1;
codec->reg_cache_size = ARRAY_SIZE(wm8903->reg_cache);
codec->reg_cache = &wm8903->reg_cache[0];
- codec->private_data = wm8903;
+ snd_soc_codec_set_drvdata(codec, wm8903);
codec->volatile_register = wm8903_volatile_register;
+ init_completion(&wm8903->wseq);
i2c_set_clientdata(i2c, codec);
codec->control_data = i2c;
@@ -1579,6 +1700,53 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
wm8903_reset(codec);
+ /* Set up GPIOs and microphone detection */
+ if (pdata) {
+ for (i = 0; i < ARRAY_SIZE(pdata->gpio_cfg); i++) {
+ if (!pdata->gpio_cfg[i])
+ continue;
+
+ snd_soc_write(codec, WM8903_GPIO_CONTROL_1 + i,
+ pdata->gpio_cfg[i] & 0xffff);
+ }
+
+ snd_soc_write(codec, WM8903_MIC_BIAS_CONTROL_0,
+ pdata->micdet_cfg);
+
+ /* Microphone detection needs the WSEQ clock */
+ if (pdata->micdet_cfg)
+ snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
+ WM8903_WSEQ_ENA, WM8903_WSEQ_ENA);
+
+ wm8903->mic_delay = pdata->micdet_delay;
+ }
+
+ if (i2c->irq) {
+ if (pdata && pdata->irq_active_low) {
+ trigger = IRQF_TRIGGER_LOW;
+ irq_pol = WM8903_IRQ_POL;
+ } else {
+ trigger = IRQF_TRIGGER_HIGH;
+ irq_pol = 0;
+ }
+
+ snd_soc_update_bits(codec, WM8903_INTERRUPT_CONTROL,
+ WM8903_IRQ_POL, irq_pol);
+
+ ret = request_threaded_irq(i2c->irq, NULL, wm8903_irq,
+ trigger | IRQF_ONESHOT,
+ "wm8903", wm8903);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to request IRQ: %d\n",
+ ret);
+ goto err;
+ }
+
+ /* Enable write sequencer interrupts */
+ snd_soc_update_bits(codec, WM8903_INTERRUPT_STATUS_1_MASK,
+ WM8903_IM_WSEQ_BUSY_EINT, 0);
+ }
+
/* power on device */
wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1619,7 +1787,7 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
- goto err;
+ goto err_irq;
}
ret = snd_soc_register_dai(&wm8903_dai);
@@ -1632,6 +1800,9 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
err_codec:
snd_soc_unregister_codec(codec);
+err_irq:
+ if (i2c->irq)
+ free_irq(i2c->irq, wm8903);
err:
wm8903_codec = NULL;
kfree(wm8903);
@@ -1641,13 +1812,17 @@ err:
static __devexit int wm8903_i2c_remove(struct i2c_client *client)
{
struct snd_soc_codec *codec = i2c_get_clientdata(client);
+ struct wm8903_priv *priv = snd_soc_codec_get_drvdata(codec);
snd_soc_unregister_dai(&wm8903_dai);
snd_soc_unregister_codec(codec);
wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF);
- kfree(codec->private_data);
+ if (client->irq)
+ free_irq(client->irq, priv);
+
+ kfree(priv);
wm8903_codec = NULL;
wm8903_dai.dev = NULL;
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index 0ea27e2b9963..ce384a2ad820 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -18,6 +18,10 @@
extern struct snd_soc_dai wm8903_dai;
extern struct snd_soc_codec_device soc_codec_dev_wm8903;
+extern int wm8903_mic_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack,
+ int det, int shrt);
+
#define WM8903_MCLK_DIV_2 1
#define WM8903_CLK_SYS 2
#define WM8903_BCLK 3
@@ -173,28 +177,6 @@ extern struct snd_soc_codec_device soc_codec_dev_wm8903;
#define WM8903_VMID_RES_5K 4
/*
- * R6 (0x06) - Mic Bias Control 0
- */
-#define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */
-#define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */
-#define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */
-#define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */
-#define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */
-#define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */
-#define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */
-#define WM8903_MICDET_ENA 0x0002 /* MICDET_ENA */
-#define WM8903_MICDET_ENA_MASK 0x0002 /* MICDET_ENA */
-#define WM8903_MICDET_ENA_SHIFT 1 /* MICDET_ENA */
-#define WM8903_MICDET_ENA_WIDTH 1 /* MICDET_ENA */
-#define WM8903_MICBIAS_ENA 0x0001 /* MICBIAS_ENA */
-#define WM8903_MICBIAS_ENA_MASK 0x0001 /* MICBIAS_ENA */
-#define WM8903_MICBIAS_ENA_SHIFT 0 /* MICBIAS_ENA */
-#define WM8903_MICBIAS_ENA_WIDTH 1 /* MICBIAS_ENA */
-
-/*
* R8 (0x08) - Analogue DAC 0
*/
#define WM8903_DACBIAS_SEL_MASK 0x0018 /* DACBIAS_SEL - [4:3] */
@@ -1135,201 +1117,6 @@ extern struct snd_soc_codec_device soc_codec_dev_wm8903;
#define WM8903_MASK_WRITE_ENA_WIDTH 1 /* MASK_WRITE_ENA */
/*
- * R116 (0x74) - GPIO Control 1
- */
-#define WM8903_GP1_FN_MASK 0x1F00 /* GP1_FN - [12:8] */
-#define WM8903_GP1_FN_SHIFT 8 /* GP1_FN - [12:8] */
-#define WM8903_GP1_FN_WIDTH 5 /* GP1_FN - [12:8] */
-#define WM8903_GP1_DIR 0x0080 /* GP1_DIR */
-#define WM8903_GP1_DIR_MASK 0x0080 /* GP1_DIR */
-#define WM8903_GP1_DIR_SHIFT 7 /* GP1_DIR */
-#define WM8903_GP1_DIR_WIDTH 1 /* GP1_DIR */
-#define WM8903_GP1_OP_CFG 0x0040 /* GP1_OP_CFG */
-#define WM8903_GP1_OP_CFG_MASK 0x0040 /* GP1_OP_CFG */
-#define WM8903_GP1_OP_CFG_SHIFT 6 /* GP1_OP_CFG */
-#define WM8903_GP1_OP_CFG_WIDTH 1 /* GP1_OP_CFG */
-#define WM8903_GP1_IP_CFG 0x0020 /* GP1_IP_CFG */
-#define WM8903_GP1_IP_CFG_MASK 0x0020 /* GP1_IP_CFG */
-#define WM8903_GP1_IP_CFG_SHIFT 5 /* GP1_IP_CFG */
-#define WM8903_GP1_IP_CFG_WIDTH 1 /* GP1_IP_CFG */
-#define WM8903_GP1_LVL 0x0010 /* GP1_LVL */
-#define WM8903_GP1_LVL_MASK 0x0010 /* GP1_LVL */
-#define WM8903_GP1_LVL_SHIFT 4 /* GP1_LVL */
-#define WM8903_GP1_LVL_WIDTH 1 /* GP1_LVL */
-#define WM8903_GP1_PD 0x0008 /* GP1_PD */
-#define WM8903_GP1_PD_MASK 0x0008 /* GP1_PD */
-#define WM8903_GP1_PD_SHIFT 3 /* GP1_PD */
-#define WM8903_GP1_PD_WIDTH 1 /* GP1_PD */
-#define WM8903_GP1_PU 0x0004 /* GP1_PU */
-#define WM8903_GP1_PU_MASK 0x0004 /* GP1_PU */
-#define WM8903_GP1_PU_SHIFT 2 /* GP1_PU */
-#define WM8903_GP1_PU_WIDTH 1 /* GP1_PU */
-#define WM8903_GP1_INTMODE 0x0002 /* GP1_INTMODE */
-#define WM8903_GP1_INTMODE_MASK 0x0002 /* GP1_INTMODE */
-#define WM8903_GP1_INTMODE_SHIFT 1 /* GP1_INTMODE */
-#define WM8903_GP1_INTMODE_WIDTH 1 /* GP1_INTMODE */
-#define WM8903_GP1_DB 0x0001 /* GP1_DB */
-#define WM8903_GP1_DB_MASK 0x0001 /* GP1_DB */
-#define WM8903_GP1_DB_SHIFT 0 /* GP1_DB */
-#define WM8903_GP1_DB_WIDTH 1 /* GP1_DB */
-
-/*
- * R117 (0x75) - GPIO Control 2
- */
-#define WM8903_GP2_FN_MASK 0x1F00 /* GP2_FN - [12:8] */
-#define WM8903_GP2_FN_SHIFT 8 /* GP2_FN - [12:8] */
-#define WM8903_GP2_FN_WIDTH 5 /* GP2_FN - [12:8] */
-#define WM8903_GP2_DIR 0x0080 /* GP2_DIR */
-#define WM8903_GP2_DIR_MASK 0x0080 /* GP2_DIR */
-#define WM8903_GP2_DIR_SHIFT 7 /* GP2_DIR */
-#define WM8903_GP2_DIR_WIDTH 1 /* GP2_DIR */
-#define WM8903_GP2_OP_CFG 0x0040 /* GP2_OP_CFG */
-#define WM8903_GP2_OP_CFG_MASK 0x0040 /* GP2_OP_CFG */
-#define WM8903_GP2_OP_CFG_SHIFT 6 /* GP2_OP_CFG */
-#define WM8903_GP2_OP_CFG_WIDTH 1 /* GP2_OP_CFG */
-#define WM8903_GP2_IP_CFG 0x0020 /* GP2_IP_CFG */
-#define WM8903_GP2_IP_CFG_MASK 0x0020 /* GP2_IP_CFG */
-#define WM8903_GP2_IP_CFG_SHIFT 5 /* GP2_IP_CFG */
-#define WM8903_GP2_IP_CFG_WIDTH 1 /* GP2_IP_CFG */
-#define WM8903_GP2_LVL 0x0010 /* GP2_LVL */
-#define WM8903_GP2_LVL_MASK 0x0010 /* GP2_LVL */
-#define WM8903_GP2_LVL_SHIFT 4 /* GP2_LVL */
-#define WM8903_GP2_LVL_WIDTH 1 /* GP2_LVL */
-#define WM8903_GP2_PD 0x0008 /* GP2_PD */
-#define WM8903_GP2_PD_MASK 0x0008 /* GP2_PD */
-#define WM8903_GP2_PD_SHIFT 3 /* GP2_PD */
-#define WM8903_GP2_PD_WIDTH 1 /* GP2_PD */
-#define WM8903_GP2_PU 0x0004 /* GP2_PU */
-#define WM8903_GP2_PU_MASK 0x0004 /* GP2_PU */
-#define WM8903_GP2_PU_SHIFT 2 /* GP2_PU */
-#define WM8903_GP2_PU_WIDTH 1 /* GP2_PU */
-#define WM8903_GP2_INTMODE 0x0002 /* GP2_INTMODE */
-#define WM8903_GP2_INTMODE_MASK 0x0002 /* GP2_INTMODE */
-#define WM8903_GP2_INTMODE_SHIFT 1 /* GP2_INTMODE */
-#define WM8903_GP2_INTMODE_WIDTH 1 /* GP2_INTMODE */
-#define WM8903_GP2_DB 0x0001 /* GP2_DB */
-#define WM8903_GP2_DB_MASK 0x0001 /* GP2_DB */
-#define WM8903_GP2_DB_SHIFT 0 /* GP2_DB */
-#define WM8903_GP2_DB_WIDTH 1 /* GP2_DB */
-
-/*
- * R118 (0x76) - GPIO Control 3
- */
-#define WM8903_GP3_FN_MASK 0x1F00 /* GP3_FN - [12:8] */
-#define WM8903_GP3_FN_SHIFT 8 /* GP3_FN - [12:8] */
-#define WM8903_GP3_FN_WIDTH 5 /* GP3_FN - [12:8] */
-#define WM8903_GP3_DIR 0x0080 /* GP3_DIR */
-#define WM8903_GP3_DIR_MASK 0x0080 /* GP3_DIR */
-#define WM8903_GP3_DIR_SHIFT 7 /* GP3_DIR */
-#define WM8903_GP3_DIR_WIDTH 1 /* GP3_DIR */
-#define WM8903_GP3_OP_CFG 0x0040 /* GP3_OP_CFG */
-#define WM8903_GP3_OP_CFG_MASK 0x0040 /* GP3_OP_CFG */
-#define WM8903_GP3_OP_CFG_SHIFT 6 /* GP3_OP_CFG */
-#define WM8903_GP3_OP_CFG_WIDTH 1 /* GP3_OP_CFG */
-#define WM8903_GP3_IP_CFG 0x0020 /* GP3_IP_CFG */
-#define WM8903_GP3_IP_CFG_MASK 0x0020 /* GP3_IP_CFG */
-#define WM8903_GP3_IP_CFG_SHIFT 5 /* GP3_IP_CFG */
-#define WM8903_GP3_IP_CFG_WIDTH 1 /* GP3_IP_CFG */
-#define WM8903_GP3_LVL 0x0010 /* GP3_LVL */
-#define WM8903_GP3_LVL_MASK 0x0010 /* GP3_LVL */
-#define WM8903_GP3_LVL_SHIFT 4 /* GP3_LVL */
-#define WM8903_GP3_LVL_WIDTH 1 /* GP3_LVL */
-#define WM8903_GP3_PD 0x0008 /* GP3_PD */
-#define WM8903_GP3_PD_MASK 0x0008 /* GP3_PD */
-#define WM8903_GP3_PD_SHIFT 3 /* GP3_PD */
-#define WM8903_GP3_PD_WIDTH 1 /* GP3_PD */
-#define WM8903_GP3_PU 0x0004 /* GP3_PU */
-#define WM8903_GP3_PU_MASK 0x0004 /* GP3_PU */
-#define WM8903_GP3_PU_SHIFT 2 /* GP3_PU */
-#define WM8903_GP3_PU_WIDTH 1 /* GP3_PU */
-#define WM8903_GP3_INTMODE 0x0002 /* GP3_INTMODE */
-#define WM8903_GP3_INTMODE_MASK 0x0002 /* GP3_INTMODE */
-#define WM8903_GP3_INTMODE_SHIFT 1 /* GP3_INTMODE */
-#define WM8903_GP3_INTMODE_WIDTH 1 /* GP3_INTMODE */
-#define WM8903_GP3_DB 0x0001 /* GP3_DB */
-#define WM8903_GP3_DB_MASK 0x0001 /* GP3_DB */
-#define WM8903_GP3_DB_SHIFT 0 /* GP3_DB */
-#define WM8903_GP3_DB_WIDTH 1 /* GP3_DB */
-
-/*
- * R119 (0x77) - GPIO Control 4
- */
-#define WM8903_GP4_FN_MASK 0x1F00 /* GP4_FN - [12:8] */
-#define WM8903_GP4_FN_SHIFT 8 /* GP4_FN - [12:8] */
-#define WM8903_GP4_FN_WIDTH 5 /* GP4_FN - [12:8] */
-#define WM8903_GP4_DIR 0x0080 /* GP4_DIR */
-#define WM8903_GP4_DIR_MASK 0x0080 /* GP4_DIR */
-#define WM8903_GP4_DIR_SHIFT 7 /* GP4_DIR */
-#define WM8903_GP4_DIR_WIDTH 1 /* GP4_DIR */
-#define WM8903_GP4_OP_CFG 0x0040 /* GP4_OP_CFG */
-#define WM8903_GP4_OP_CFG_MASK 0x0040 /* GP4_OP_CFG */
-#define WM8903_GP4_OP_CFG_SHIFT 6 /* GP4_OP_CFG */
-#define WM8903_GP4_OP_CFG_WIDTH 1 /* GP4_OP_CFG */
-#define WM8903_GP4_IP_CFG 0x0020 /* GP4_IP_CFG */
-#define WM8903_GP4_IP_CFG_MASK 0x0020 /* GP4_IP_CFG */
-#define WM8903_GP4_IP_CFG_SHIFT 5 /* GP4_IP_CFG */
-#define WM8903_GP4_IP_CFG_WIDTH 1 /* GP4_IP_CFG */
-#define WM8903_GP4_LVL 0x0010 /* GP4_LVL */
-#define WM8903_GP4_LVL_MASK 0x0010 /* GP4_LVL */
-#define WM8903_GP4_LVL_SHIFT 4 /* GP4_LVL */
-#define WM8903_GP4_LVL_WIDTH 1 /* GP4_LVL */
-#define WM8903_GP4_PD 0x0008 /* GP4_PD */
-#define WM8903_GP4_PD_MASK 0x0008 /* GP4_PD */
-#define WM8903_GP4_PD_SHIFT 3 /* GP4_PD */
-#define WM8903_GP4_PD_WIDTH 1 /* GP4_PD */
-#define WM8903_GP4_PU 0x0004 /* GP4_PU */
-#define WM8903_GP4_PU_MASK 0x0004 /* GP4_PU */
-#define WM8903_GP4_PU_SHIFT 2 /* GP4_PU */
-#define WM8903_GP4_PU_WIDTH 1 /* GP4_PU */
-#define WM8903_GP4_INTMODE 0x0002 /* GP4_INTMODE */
-#define WM8903_GP4_INTMODE_MASK 0x0002 /* GP4_INTMODE */
-#define WM8903_GP4_INTMODE_SHIFT 1 /* GP4_INTMODE */
-#define WM8903_GP4_INTMODE_WIDTH 1 /* GP4_INTMODE */
-#define WM8903_GP4_DB 0x0001 /* GP4_DB */
-#define WM8903_GP4_DB_MASK 0x0001 /* GP4_DB */
-#define WM8903_GP4_DB_SHIFT 0 /* GP4_DB */
-#define WM8903_GP4_DB_WIDTH 1 /* GP4_DB */
-
-/*
- * R120 (0x78) - GPIO Control 5
- */
-#define WM8903_GP5_FN_MASK 0x1F00 /* GP5_FN - [12:8] */
-#define WM8903_GP5_FN_SHIFT 8 /* GP5_FN - [12:8] */
-#define WM8903_GP5_FN_WIDTH 5 /* GP5_FN - [12:8] */
-#define WM8903_GP5_DIR 0x0080 /* GP5_DIR */
-#define WM8903_GP5_DIR_MASK 0x0080 /* GP5_DIR */
-#define WM8903_GP5_DIR_SHIFT 7 /* GP5_DIR */
-#define WM8903_GP5_DIR_WIDTH 1 /* GP5_DIR */
-#define WM8903_GP5_OP_CFG 0x0040 /* GP5_OP_CFG */
-#define WM8903_GP5_OP_CFG_MASK 0x0040 /* GP5_OP_CFG */
-#define WM8903_GP5_OP_CFG_SHIFT 6 /* GP5_OP_CFG */
-#define WM8903_GP5_OP_CFG_WIDTH 1 /* GP5_OP_CFG */
-#define WM8903_GP5_IP_CFG 0x0020 /* GP5_IP_CFG */
-#define WM8903_GP5_IP_CFG_MASK 0x0020 /* GP5_IP_CFG */
-#define WM8903_GP5_IP_CFG_SHIFT 5 /* GP5_IP_CFG */
-#define WM8903_GP5_IP_CFG_WIDTH 1 /* GP5_IP_CFG */
-#define WM8903_GP5_LVL 0x0010 /* GP5_LVL */
-#define WM8903_GP5_LVL_MASK 0x0010 /* GP5_LVL */
-#define WM8903_GP5_LVL_SHIFT 4 /* GP5_LVL */
-#define WM8903_GP5_LVL_WIDTH 1 /* GP5_LVL */
-#define WM8903_GP5_PD 0x0008 /* GP5_PD */
-#define WM8903_GP5_PD_MASK 0x0008 /* GP5_PD */
-#define WM8903_GP5_PD_SHIFT 3 /* GP5_PD */
-#define WM8903_GP5_PD_WIDTH 1 /* GP5_PD */
-#define WM8903_GP5_PU 0x0004 /* GP5_PU */
-#define WM8903_GP5_PU_MASK 0x0004 /* GP5_PU */
-#define WM8903_GP5_PU_SHIFT 2 /* GP5_PU */
-#define WM8903_GP5_PU_WIDTH 1 /* GP5_PU */
-#define WM8903_GP5_INTMODE 0x0002 /* GP5_INTMODE */
-#define WM8903_GP5_INTMODE_MASK 0x0002 /* GP5_INTMODE */
-#define WM8903_GP5_INTMODE_SHIFT 1 /* GP5_INTMODE */
-#define WM8903_GP5_INTMODE_WIDTH 1 /* GP5_INTMODE */
-#define WM8903_GP5_DB 0x0001 /* GP5_DB */
-#define WM8903_GP5_DB_MASK 0x0001 /* GP5_DB */
-#define WM8903_GP5_DB_SHIFT 0 /* GP5_DB */
-#define WM8903_GP5_DB_WIDTH 1 /* GP5_DB */
-
-/*
* R121 (0x79) - Interrupt Status 1
*/
#define WM8903_MICSHRT_EINT 0x8000 /* MICSHRT_EINT */
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index c6f0abcc5711..87f14f8675fa 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -613,7 +613,7 @@ static int wm8904_reset(struct snd_soc_codec *codec)
static int wm8904_configure_clocking(struct snd_soc_codec *codec)
{
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
unsigned int clock0, clock2, rate;
/* Gate the clock while we're updating to avoid misclocking */
@@ -669,7 +669,7 @@ static int wm8904_configure_clocking(struct snd_soc_codec *codec)
static void wm8904_set_drc(struct snd_soc_codec *codec)
{
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
struct wm8904_pdata *pdata = wm8904->pdata;
int save, i;
@@ -689,7 +689,7 @@ static int wm8904_put_drc_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
struct wm8904_pdata *pdata = wm8904->pdata;
int value = ucontrol->value.integer.value[0];
@@ -707,7 +707,7 @@ static int wm8904_get_drc_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.enumerated.item[0] = wm8904->drc_cfg;
@@ -716,7 +716,7 @@ static int wm8904_get_drc_enum(struct snd_kcontrol *kcontrol,
static void wm8904_set_retune_mobile(struct snd_soc_codec *codec)
{
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
struct wm8904_pdata *pdata = wm8904->pdata;
int best, best_val, save, i, cfg;
@@ -760,7 +760,7 @@ static int wm8904_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
struct wm8904_pdata *pdata = wm8904->pdata;
int value = ucontrol->value.integer.value[0];
@@ -778,7 +778,7 @@ static int wm8904_get_retune_mobile_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.enumerated.item[0] = wm8904->retune_mobile_cfg;
@@ -789,7 +789,7 @@ static int deemph_settings[] = { 0, 32000, 44100, 48000 };
static int wm8904_set_deemph(struct snd_soc_codec *codec)
{
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
int val, i, best;
/* If we're using deemphasis select the nearest available sample
@@ -818,7 +818,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
return wm8904->deemph;
}
@@ -827,7 +827,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
int deemph = ucontrol->value.enumerated.item[0];
if (deemph > 1)
@@ -943,7 +943,7 @@ static int sysclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -981,7 +981,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
int reg, val;
int dcs_mask;
int dcs_l, dcs_r;
@@ -1429,7 +1429,7 @@ static const struct snd_soc_dapm_route wm8912_intercon[] = {
static int wm8904_add_widgets(struct snd_soc_codec *codec)
{
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
snd_soc_dapm_new_controls(codec, wm8904_core_dapm_widgets,
ARRAY_SIZE(wm8904_core_dapm_widgets));
@@ -1543,7 +1543,7 @@ static int wm8904_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
int ret, i, best, best_val, cur_val;
unsigned int aif1 = 0;
unsigned int aif2 = 0;
@@ -1670,7 +1670,7 @@ static int wm8904_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8904_priv *priv = codec->private_data;
+ struct wm8904_priv *priv = snd_soc_codec_get_drvdata(codec);
switch (clk_id) {
case WM8904_CLK_MCLK:
@@ -1786,7 +1786,7 @@ static int wm8904_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots, int slot_width)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
int aif1 = 0;
/* Don't need to validate anything if we're turning off TDM */
@@ -1943,7 +1943,7 @@ static int wm8904_set_fll(struct snd_soc_dai *dai, int fll_id, int source,
unsigned int Fref, unsigned int Fout)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
struct _fll_div fll_div;
int ret, val;
int clock2, fll1;
@@ -2095,7 +2095,7 @@ static int wm8904_digital_mute(struct snd_soc_dai *codec_dai, int mute)
static void wm8904_sync_cache(struct snd_soc_codec *codec)
{
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
int i;
if (!codec->cache_sync)
@@ -2122,7 +2122,7 @@ static void wm8904_sync_cache(struct snd_soc_codec *codec)
static int wm8904_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
int ret;
switch (level) {
@@ -2395,7 +2395,7 @@ static int wm8904_probe(struct platform_device *pdev)
goto pcm_err;
}
- wm8904_handle_pdata(codec->private_data);
+ wm8904_handle_pdata(snd_soc_codec_get_drvdata(codec));
wm8904_add_widgets(codec);
@@ -2426,6 +2426,7 @@ EXPORT_SYMBOL_GPL(soc_codec_dev_wm8904);
static int wm8904_register(struct wm8904_priv *wm8904,
enum snd_soc_control_type control)
{
+ struct wm8904_pdata *pdata = wm8904->pdata;
int ret;
struct snd_soc_codec *codec = &wm8904->codec;
int i;
@@ -2439,7 +2440,7 @@ static int wm8904_register(struct wm8904_priv *wm8904,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8904;
+ snd_soc_codec_set_drvdata(codec, wm8904);
codec->name = "WM8904";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
@@ -2531,6 +2532,22 @@ static int wm8904_register(struct wm8904_priv *wm8904,
WM8904_LINEOUTRZC;
wm8904->reg_cache[WM8904_CLOCK_RATES_0] &= ~WM8904_SR_MODE;
+ /* Apply configuration from the platform data. */
+ if (wm8904->pdata) {
+ for (i = 0; i < WM8904_GPIO_REGS; i++) {
+ if (!pdata->gpio_cfg[i])
+ continue;
+
+ wm8904->reg_cache[WM8904_GPIO_CONTROL_1 + i]
+ = pdata->gpio_cfg[i] & 0xffff;
+ }
+
+ /* Zero is the default value for these anyway */
+ for (i = 0; i < WM8904_MIC_REGS; i++)
+ wm8904->reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i]
+ = pdata->mic_cfg[i];
+ }
+
/* Set Class W by default - this will be managed by the Class
* G widget at runtime where bypass paths are available.
*/
diff --git a/sound/soc/codecs/wm8904.h b/sound/soc/codecs/wm8904.h
index b68886df34e4..abe5059b3004 100644
--- a/sound/soc/codecs/wm8904.h
+++ b/sound/soc/codecs/wm8904.h
@@ -186,39 +186,6 @@ extern struct snd_soc_codec_device soc_codec_dev_wm8904;
#define WM8904_VMID_ENA_WIDTH 1 /* VMID_ENA */
/*
- * R6 (0x06) - Mic Bias Control 0
- */
-#define WM8904_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */
-#define WM8904_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */
-#define WM8904_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */
-#define WM8904_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */
-#define WM8904_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */
-#define WM8904_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */
-#define WM8904_MICDET_ENA 0x0002 /* MICDET_ENA */
-#define WM8904_MICDET_ENA_MASK 0x0002 /* MICDET_ENA */
-#define WM8904_MICDET_ENA_SHIFT 1 /* MICDET_ENA */
-#define WM8904_MICDET_ENA_WIDTH 1 /* MICDET_ENA */
-#define WM8904_MICBIAS_ENA 0x0001 /* MICBIAS_ENA */
-#define WM8904_MICBIAS_ENA_MASK 0x0001 /* MICBIAS_ENA */
-#define WM8904_MICBIAS_ENA_SHIFT 0 /* MICBIAS_ENA */
-#define WM8904_MICBIAS_ENA_WIDTH 1 /* MICBIAS_ENA */
-
-/*
- * R7 (0x07) - Mic Bias Control 1
- */
-#define WM8904_MIC_DET_FILTER_ENA 0x8000 /* MIC_DET_FILTER_ENA */
-#define WM8904_MIC_DET_FILTER_ENA_MASK 0x8000 /* MIC_DET_FILTER_ENA */
-#define WM8904_MIC_DET_FILTER_ENA_SHIFT 15 /* MIC_DET_FILTER_ENA */
-#define WM8904_MIC_DET_FILTER_ENA_WIDTH 1 /* MIC_DET_FILTER_ENA */
-#define WM8904_MIC_SHORT_FILTER_ENA 0x4000 /* MIC_SHORT_FILTER_ENA */
-#define WM8904_MIC_SHORT_FILTER_ENA_MASK 0x4000 /* MIC_SHORT_FILTER_ENA */
-#define WM8904_MIC_SHORT_FILTER_ENA_SHIFT 14 /* MIC_SHORT_FILTER_ENA */
-#define WM8904_MIC_SHORT_FILTER_ENA_WIDTH 1 /* MIC_SHORT_FILTER_ENA */
-#define WM8904_MICBIAS_SEL_MASK 0x0007 /* MICBIAS_SEL - [2:0] */
-#define WM8904_MICBIAS_SEL_SHIFT 0 /* MICBIAS_SEL - [2:0] */
-#define WM8904_MICBIAS_SEL_WIDTH 3 /* MICBIAS_SEL - [2:0] */
-
-/*
* R8 (0x08) - Analogue DAC 0
*/
#define WM8904_DAC_BIAS_SEL_MASK 0x0018 /* DAC_BIAS_SEL - [4:3] */
@@ -1200,70 +1167,6 @@ extern struct snd_soc_codec_device soc_codec_dev_wm8904;
#define WM8904_FLL_CLK_REF_SRC_WIDTH 2 /* FLL_CLK_REF_SRC - [1:0] */
/*
- * R121 (0x79) - GPIO Control 1
- */
-#define WM8904_GPIO1_PU 0x0020 /* GPIO1_PU */
-#define WM8904_GPIO1_PU_MASK 0x0020 /* GPIO1_PU */
-#define WM8904_GPIO1_PU_SHIFT 5 /* GPIO1_PU */
-#define WM8904_GPIO1_PU_WIDTH 1 /* GPIO1_PU */
-#define WM8904_GPIO1_PD 0x0010 /* GPIO1_PD */
-#define WM8904_GPIO1_PD_MASK 0x0010 /* GPIO1_PD */
-#define WM8904_GPIO1_PD_SHIFT 4 /* GPIO1_PD */
-#define WM8904_GPIO1_PD_WIDTH 1 /* GPIO1_PD */
-#define WM8904_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */
-#define WM8904_GPIO1_SEL_SHIFT 0 /* GPIO1_SEL - [3:0] */
-#define WM8904_GPIO1_SEL_WIDTH 4 /* GPIO1_SEL - [3:0] */
-
-/*
- * R122 (0x7A) - GPIO Control 2
- */
-#define WM8904_GPIO2_PU 0x0020 /* GPIO2_PU */
-#define WM8904_GPIO2_PU_MASK 0x0020 /* GPIO2_PU */
-#define WM8904_GPIO2_PU_SHIFT 5 /* GPIO2_PU */
-#define WM8904_GPIO2_PU_WIDTH 1 /* GPIO2_PU */
-#define WM8904_GPIO2_PD 0x0010 /* GPIO2_PD */
-#define WM8904_GPIO2_PD_MASK 0x0010 /* GPIO2_PD */
-#define WM8904_GPIO2_PD_SHIFT 4 /* GPIO2_PD */
-#define WM8904_GPIO2_PD_WIDTH 1 /* GPIO2_PD */
-#define WM8904_GPIO2_SEL_MASK 0x000F /* GPIO2_SEL - [3:0] */
-#define WM8904_GPIO2_SEL_SHIFT 0 /* GPIO2_SEL - [3:0] */
-#define WM8904_GPIO2_SEL_WIDTH 4 /* GPIO2_SEL - [3:0] */
-
-/*
- * R123 (0x7B) - GPIO Control 3
- */
-#define WM8904_GPIO3_PU 0x0020 /* GPIO3_PU */
-#define WM8904_GPIO3_PU_MASK 0x0020 /* GPIO3_PU */
-#define WM8904_GPIO3_PU_SHIFT 5 /* GPIO3_PU */
-#define WM8904_GPIO3_PU_WIDTH 1 /* GPIO3_PU */
-#define WM8904_GPIO3_PD 0x0010 /* GPIO3_PD */
-#define WM8904_GPIO3_PD_MASK 0x0010 /* GPIO3_PD */
-#define WM8904_GPIO3_PD_SHIFT 4 /* GPIO3_PD */
-#define WM8904_GPIO3_PD_WIDTH 1 /* GPIO3_PD */
-#define WM8904_GPIO3_SEL_MASK 0x000F /* GPIO3_SEL - [3:0] */
-#define WM8904_GPIO3_SEL_SHIFT 0 /* GPIO3_SEL - [3:0] */
-#define WM8904_GPIO3_SEL_WIDTH 4 /* GPIO3_SEL - [3:0] */
-
-/*
- * R124 (0x7C) - GPIO Control 4
- */
-#define WM8904_GPI7_ENA 0x0200 /* GPI7_ENA */
-#define WM8904_GPI7_ENA_MASK 0x0200 /* GPI7_ENA */
-#define WM8904_GPI7_ENA_SHIFT 9 /* GPI7_ENA */
-#define WM8904_GPI7_ENA_WIDTH 1 /* GPI7_ENA */
-#define WM8904_GPI8_ENA 0x0100 /* GPI8_ENA */
-#define WM8904_GPI8_ENA_MASK 0x0100 /* GPI8_ENA */
-#define WM8904_GPI8_ENA_SHIFT 8 /* GPI8_ENA */
-#define WM8904_GPI8_ENA_WIDTH 1 /* GPI8_ENA */
-#define WM8904_GPIO_BCLK_MODE_ENA 0x0080 /* GPIO_BCLK_MODE_ENA */
-#define WM8904_GPIO_BCLK_MODE_ENA_MASK 0x0080 /* GPIO_BCLK_MODE_ENA */
-#define WM8904_GPIO_BCLK_MODE_ENA_SHIFT 7 /* GPIO_BCLK_MODE_ENA */
-#define WM8904_GPIO_BCLK_MODE_ENA_WIDTH 1 /* GPIO_BCLK_MODE_ENA */
-#define WM8904_GPIO_BCLK_SEL_MASK 0x000F /* GPIO_BCLK_SEL - [3:0] */
-#define WM8904_GPIO_BCLK_SEL_SHIFT 0 /* GPIO_BCLK_SEL - [3:0] */
-#define WM8904_GPIO_BCLK_SEL_WIDTH 4 /* GPIO_BCLK_SEL - [3:0] */
-
-/*
* R126 (0x7E) - Digital Pulls
*/
#define WM8904_MCLK_PU 0x0080 /* MCLK_PU */
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 0c04b476487f..e3c4bbfaae27 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -581,7 +581,7 @@ static int wm8940_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8940_priv *wm8940 = codec->private_data;
+ struct wm8940_priv *wm8940 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
@@ -692,7 +692,6 @@ static int wm8940_resume(struct platform_device *pdev)
ret = wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
if (ret)
goto error_ret;
- ret = wm8940_set_bias_level(codec, codec->suspend_bias_level);
error_ret:
return ret;
@@ -773,7 +772,7 @@ static int wm8940_register(struct wm8940_priv *wm8940,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8940;
+ snd_soc_codec_set_drvdata(codec, wm8940);
codec->name = "WM8940";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index c8d7a809af4d..fedb76452f1b 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -235,7 +235,7 @@ static struct {
static int wm8955_configure_clocking(struct snd_soc_codec *codec)
{
- struct wm8955_priv *wm8955 = codec->private_data;
+ struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int i, ret, val;
int clocking = 0;
int srate = 0;
@@ -353,7 +353,7 @@ static int deemph_settings[] = { 0, 32000, 44100, 48000 };
static int wm8955_set_deemph(struct snd_soc_codec *codec)
{
- struct wm8955_priv *wm8955 = codec->private_data;
+ struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int val, i, best;
/* If we're using deemphasis select the nearest available sample
@@ -382,7 +382,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8955_priv *wm8955 = codec->private_data;
+ struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
return wm8955->deemph;
}
@@ -391,7 +391,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8955_priv *wm8955 = codec->private_data;
+ struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int deemph = ucontrol->value.enumerated.item[0];
if (deemph > 1)
@@ -598,7 +598,7 @@ static int wm8955_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8955_priv *wm8955 = codec->private_data;
+ struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int ret;
int wl;
@@ -647,7 +647,7 @@ static int wm8955_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8955_priv *priv = codec->private_data;
+ struct wm8955_priv *priv = snd_soc_codec_get_drvdata(codec);
int div;
switch (clk_id) {
@@ -770,7 +770,7 @@ static int wm8955_digital_mute(struct snd_soc_dai *codec_dai, int mute)
static int wm8955_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- struct wm8955_priv *wm8955 = codec->private_data;
+ struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int ret, i;
switch (level) {
@@ -971,7 +971,7 @@ static int wm8955_register(struct wm8955_priv *wm8955,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8955;
+ snd_soc_codec_set_drvdata(codec, wm8955);
codec->name = "WM8955";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index f1e63e01b04d..7233cc68435a 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -23,6 +23,7 @@
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
+#include <sound/wm8960.h>
#include "wm8960.h"
@@ -31,8 +32,14 @@
struct snd_soc_codec_device soc_codec_dev_wm8960;
/* R25 - Power 1 */
+#define WM8960_VMID_MASK 0x180
#define WM8960_VREF 0x40
+/* R26 - Power 2 */
+#define WM8960_PWR2_LOUT1 0x40
+#define WM8960_PWR2_ROUT1 0x20
+#define WM8960_PWR2_OUT3 0x02
+
/* R28 - Anti-pop 1 */
#define WM8960_POBCTRL 0x80
#define WM8960_BUFDCOPEN 0x10
@@ -42,6 +49,7 @@ struct snd_soc_codec_device soc_codec_dev_wm8960;
/* R29 - Anti-pop 2 */
#define WM8960_DISOP 0x40
+#define WM8960_DRES_MASK 0x30
/*
* wm8960 register cache
@@ -68,6 +76,9 @@ static const u16 wm8960_reg[WM8960_CACHEREGNUM] = {
struct wm8960_priv {
u16 reg_cache[WM8960_CACHEREGNUM];
struct snd_soc_codec codec;
+ struct snd_soc_dapm_widget *lout1;
+ struct snd_soc_dapm_widget *rout1;
+ struct snd_soc_dapm_widget *out3;
};
#define wm8960_reset(c) snd_soc_write(c, WM8960_RESET, 0)
@@ -226,10 +237,6 @@ SND_SOC_DAPM_MIXER("Right Output Mixer", WM8960_POWER3, 2, 0,
&wm8960_routput_mixer[0],
ARRAY_SIZE(wm8960_routput_mixer)),
-SND_SOC_DAPM_MIXER("Mono Output Mixer", WM8960_POWER2, 1, 0,
- &wm8960_mono_out[0],
- ARRAY_SIZE(wm8960_mono_out)),
-
SND_SOC_DAPM_PGA("LOUT1 PGA", WM8960_POWER2, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("ROUT1 PGA", WM8960_POWER2, 5, 0, NULL, 0),
@@ -248,6 +255,17 @@ SND_SOC_DAPM_OUTPUT("SPK_RN"),
SND_SOC_DAPM_OUTPUT("OUT3"),
};
+static const struct snd_soc_dapm_widget wm8960_dapm_widgets_out3[] = {
+SND_SOC_DAPM_MIXER("Mono Output Mixer", WM8960_POWER2, 1, 0,
+ &wm8960_mono_out[0],
+ ARRAY_SIZE(wm8960_mono_out)),
+};
+
+/* Represent OUT3 as a PGA so that it gets turned on with LOUT1/ROUT1 */
+static const struct snd_soc_dapm_widget wm8960_dapm_widgets_capless[] = {
+SND_SOC_DAPM_PGA("OUT3 VMID", WM8960_POWER2, 1, 0, NULL, 0),
+};
+
static const struct snd_soc_dapm_route audio_paths[] = {
{ "Left Boost Mixer", "LINPUT1 Switch", "LINPUT1" },
{ "Left Boost Mixer", "LINPUT2 Switch", "LINPUT2" },
@@ -278,9 +296,6 @@ static const struct snd_soc_dapm_route audio_paths[] = {
{ "Right Output Mixer", "Boost Bypass Switch", "Right Boost Mixer" } ,
{ "Right Output Mixer", "PCM Playback Switch", "Right DAC" },
- { "Mono Output Mixer", "Left Switch", "Left Output Mixer" },
- { "Mono Output Mixer", "Right Switch", "Right Output Mixer" },
-
{ "LOUT1 PGA", NULL, "Left Output Mixer" },
{ "ROUT1 PGA", NULL, "Right Output Mixer" },
@@ -297,17 +312,65 @@ static const struct snd_soc_dapm_route audio_paths[] = {
{ "SPK_LP", NULL, "Left Speaker Output" },
{ "SPK_RN", NULL, "Right Speaker Output" },
{ "SPK_RP", NULL, "Right Speaker Output" },
+};
+
+static const struct snd_soc_dapm_route audio_paths_out3[] = {
+ { "Mono Output Mixer", "Left Switch", "Left Output Mixer" },
+ { "Mono Output Mixer", "Right Switch", "Right Output Mixer" },
{ "OUT3", NULL, "Mono Output Mixer", }
};
+static const struct snd_soc_dapm_route audio_paths_capless[] = {
+ { "HP_L", NULL, "OUT3 VMID" },
+ { "HP_R", NULL, "OUT3 VMID" },
+
+ { "OUT3 VMID", NULL, "Left Output Mixer" },
+ { "OUT3 VMID", NULL, "Right Output Mixer" },
+};
+
static int wm8960_add_widgets(struct snd_soc_codec *codec)
{
+ struct wm8960_data *pdata = codec->dev->platform_data;
+ struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_widget *w;
+
snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets,
ARRAY_SIZE(wm8960_dapm_widgets));
snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+ /* In capless mode OUT3 is used to provide VMID for the
+ * headphone outputs, otherwise it is used as a mono mixer.
+ */
+ if (pdata && pdata->capless) {
+ snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_capless,
+ ARRAY_SIZE(wm8960_dapm_widgets_capless));
+
+ snd_soc_dapm_add_routes(codec, audio_paths_capless,
+ ARRAY_SIZE(audio_paths_capless));
+ } else {
+ snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_out3,
+ ARRAY_SIZE(wm8960_dapm_widgets_out3));
+
+ snd_soc_dapm_add_routes(codec, audio_paths_out3,
+ ARRAY_SIZE(audio_paths_out3));
+ }
+
+ /* We need to power up the headphone output stage out of
+ * sequence for capless mode. To save scanning the widget
+ * list each time to find the desired power state do so now
+ * and save the result.
+ */
+ list_for_each_entry(w, &codec->dapm_widgets, list) {
+ if (strcmp(w->name, "LOUT1 PGA") == 0)
+ wm8960->lout1 = w;
+ if (strcmp(w->name, "ROUT1 PGA") == 0)
+ wm8960->rout1 = w;
+ if (strcmp(w->name, "OUT3 VMID") == 0)
+ wm8960->out3 = w;
+ }
+
return 0;
}
@@ -408,10 +471,9 @@ static int wm8960_mute(struct snd_soc_dai *dai, int mute)
return 0;
}
-static int wm8960_set_bias_level(struct snd_soc_codec *codec,
- enum snd_soc_bias_level level)
+static int wm8960_set_bias_level_out3(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
{
- struct wm8960_data *pdata = codec->dev->platform_data;
u16 reg;
switch (level) {
@@ -430,18 +492,8 @@ static int wm8960_set_bias_level(struct snd_soc_codec *codec,
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* Enable anti-pop features */
snd_soc_write(codec, WM8960_APOP1,
- WM8960_POBCTRL | WM8960_SOFT_ST |
- WM8960_BUFDCOPEN | WM8960_BUFIOEN);
-
- /* Discharge HP output */
- reg = WM8960_DISOP;
- if (pdata)
- reg |= pdata->dres << 4;
- snd_soc_write(codec, WM8960_APOP2, reg);
-
- msleep(400);
-
- snd_soc_write(codec, WM8960_APOP2, 0);
+ WM8960_POBCTRL | WM8960_SOFT_ST |
+ WM8960_BUFDCOPEN | WM8960_BUFIOEN);
/* Enable & ramp VMID at 2x50k */
reg = snd_soc_read(codec, WM8960_POWER1);
@@ -472,8 +524,101 @@ static int wm8960_set_bias_level(struct snd_soc_codec *codec,
/* Disable VMID and VREF, let them discharge */
snd_soc_write(codec, WM8960_POWER1, 0);
msleep(600);
+ break;
+ }
+
+ codec->bias_level = level;
+
+ return 0;
+}
+
+static int wm8960_set_bias_level_capless(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+ int reg;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ switch (codec->bias_level) {
+ case SND_SOC_BIAS_STANDBY:
+ /* Enable anti pop mode */
+ snd_soc_update_bits(codec, WM8960_APOP1,
+ WM8960_POBCTRL | WM8960_SOFT_ST |
+ WM8960_BUFDCOPEN,
+ WM8960_POBCTRL | WM8960_SOFT_ST |
+ WM8960_BUFDCOPEN);
+
+ /* Enable LOUT1, ROUT1 and OUT3 if they're enabled */
+ reg = 0;
+ if (wm8960->lout1 && wm8960->lout1->power)
+ reg |= WM8960_PWR2_LOUT1;
+ if (wm8960->rout1 && wm8960->rout1->power)
+ reg |= WM8960_PWR2_ROUT1;
+ if (wm8960->out3 && wm8960->out3->power)
+ reg |= WM8960_PWR2_OUT3;
+ snd_soc_update_bits(codec, WM8960_POWER2,
+ WM8960_PWR2_LOUT1 |
+ WM8960_PWR2_ROUT1 |
+ WM8960_PWR2_OUT3, reg);
+
+ /* Enable VMID at 2*50k */
+ snd_soc_update_bits(codec, WM8960_POWER1,
+ WM8960_VMID_MASK, 0x80);
+
+ /* Ramp */
+ msleep(100);
+
+ /* Enable VREF */
+ snd_soc_update_bits(codec, WM8960_POWER1,
+ WM8960_VREF, WM8960_VREF);
+
+ msleep(100);
+ break;
+
+ case SND_SOC_BIAS_ON:
+ /* Enable anti-pop mode */
+ snd_soc_update_bits(codec, WM8960_APOP1,
+ WM8960_POBCTRL | WM8960_SOFT_ST |
+ WM8960_BUFDCOPEN,
+ WM8960_POBCTRL | WM8960_SOFT_ST |
+ WM8960_BUFDCOPEN);
+
+ /* Disable VMID and VREF */
+ snd_soc_update_bits(codec, WM8960_POWER1,
+ WM8960_VREF | WM8960_VMID_MASK, 0);
+ break;
+
+ default:
+ break;
+ }
+ break;
- snd_soc_write(codec, WM8960_APOP1, 0);
+ case SND_SOC_BIAS_STANDBY:
+ switch (codec->bias_level) {
+ case SND_SOC_BIAS_PREPARE:
+ /* Disable HP discharge */
+ snd_soc_update_bits(codec, WM8960_APOP2,
+ WM8960_DISOP | WM8960_DRES_MASK,
+ 0);
+
+ /* Disable anti-pop features */
+ snd_soc_update_bits(codec, WM8960_APOP1,
+ WM8960_POBCTRL | WM8960_SOFT_ST |
+ WM8960_BUFDCOPEN,
+ WM8960_POBCTRL | WM8960_SOFT_ST |
+ WM8960_BUFDCOPEN);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case SND_SOC_BIAS_OFF:
break;
}
@@ -594,10 +739,6 @@ static int wm8960_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
u16 reg;
switch (div_id) {
- case WM8960_SYSCLKSEL:
- reg = snd_soc_read(codec, WM8960_CLOCK1) & 0x1fe;
- snd_soc_write(codec, WM8960_CLOCK1, reg | div);
- break;
case WM8960_SYSCLKDIV:
reg = snd_soc_read(codec, WM8960_CLOCK1) & 0x1f9;
snd_soc_write(codec, WM8960_CLOCK1, reg | div);
@@ -663,7 +804,7 @@ static int wm8960_suspend(struct platform_device *pdev, pm_message_t state)
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- wm8960_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ codec->set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -682,8 +823,8 @@ static int wm8960_resume(struct platform_device *pdev)
codec->hw_write(codec->control_data, data, 2);
}
- wm8960_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- wm8960_set_bias_level(codec, codec->suspend_bias_level);
+ codec->set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
return 0;
}
@@ -753,6 +894,8 @@ static int wm8960_register(struct wm8960_priv *wm8960,
goto err;
}
+ codec->set_bias_level = wm8960_set_bias_level_out3;
+
if (!pdata) {
dev_warn(codec->dev, "No platform data supplied\n");
} else {
@@ -760,17 +903,19 @@ static int wm8960_register(struct wm8960_priv *wm8960,
dev_err(codec->dev, "Invalid DRES: %d\n", pdata->dres);
pdata->dres = 0;
}
+
+ if (pdata->capless)
+ codec->set_bias_level = wm8960_set_bias_level_capless;
}
mutex_init(&codec->mutex);
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8960;
+ snd_soc_codec_set_drvdata(codec, wm8960);
codec->name = "WM8960";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
- codec->set_bias_level = wm8960_set_bias_level;
codec->dai = &wm8960_dai;
codec->num_dai = 1;
codec->reg_cache_size = WM8960_CACHEREGNUM;
@@ -792,7 +937,7 @@ static int wm8960_register(struct wm8960_priv *wm8960,
wm8960_dai.dev = codec->dev;
- wm8960_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ codec->set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch the update bits */
reg = snd_soc_read(codec, WM8960_LINVOL);
@@ -841,7 +986,7 @@ err:
static void wm8960_unregister(struct wm8960_priv *wm8960)
{
- wm8960_set_bias_level(&wm8960->codec, SND_SOC_BIAS_OFF);
+ wm8960->codec.set_bias_level(&wm8960->codec, SND_SOC_BIAS_OFF);
snd_soc_unregister_dai(&wm8960_dai);
snd_soc_unregister_codec(&wm8960->codec);
kfree(wm8960);
@@ -883,7 +1028,7 @@ MODULE_DEVICE_TABLE(i2c, wm8960_i2c_id);
static struct i2c_driver wm8960_i2c_driver = {
.driver = {
- .name = "WM8960 I2C Codec",
+ .name = "wm8960",
.owner = THIS_MODULE,
},
.probe = wm8960_i2c_probe,
diff --git a/sound/soc/codecs/wm8960.h b/sound/soc/codecs/wm8960.h
index c9af56c9d9d4..a5ef65481b86 100644
--- a/sound/soc/codecs/wm8960.h
+++ b/sound/soc/codecs/wm8960.h
@@ -76,7 +76,6 @@
#define WM8960_OPCLKDIV 2
#define WM8960_DCLKDIV 3
#define WM8960_TOCLKSEL 4
-#define WM8960_SYSCLKSEL 5
#define WM8960_SYSCLK_DIV_1 (0 << 1)
#define WM8960_SYSCLK_DIV_2 (2 << 1)
@@ -114,14 +113,4 @@
extern struct snd_soc_dai wm8960_dai;
extern struct snd_soc_codec_device soc_codec_dev_wm8960;
-#define WM8960_DRES_400R 0
-#define WM8960_DRES_200R 1
-#define WM8960_DRES_600R 2
-#define WM8960_DRES_150R 3
-#define WM8960_DRES_MAX 3
-
-struct wm8960_data {
- int dres;
-};
-
#endif
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 50634ab76a5c..5b9a756242f1 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -631,7 +631,7 @@ static int wm8961_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8961_priv *wm8961 = codec->private_data;
+ struct wm8961_priv *wm8961 = snd_soc_codec_get_drvdata(codec);
int i, best, target, fs;
u16 reg;
@@ -722,7 +722,7 @@ static int wm8961_set_sysclk(struct snd_soc_dai *dai, int clk_id,
int dir)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8961_priv *wm8961 = codec->private_data;
+ struct wm8961_priv *wm8961 = snd_soc_codec_get_drvdata(codec);
u16 reg = snd_soc_read(codec, WM8961_CLOCKING1);
if (freq > 33000000) {
@@ -1065,7 +1065,7 @@ static int wm8961_register(struct wm8961_priv *wm8961)
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8961;
+ snd_soc_codec_set_drvdata(codec, wm8961);
codec->name = "WM8961";
codec->owner = THIS_MODULE;
codec->dai = &wm8961_dai;
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index a65b781af512..a99620f335d2 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -415,7 +415,7 @@ static int wm8971_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8971_priv *wm8971 = codec->private_data;
+ struct wm8971_priv *wm8971 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
@@ -494,7 +494,7 @@ static int wm8971_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8971_priv *wm8971 = codec->private_data;
+ struct wm8971_priv *wm8971 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8971_IFACE) & 0x1f3;
u16 srate = snd_soc_read(codec, WM8971_SRATE) & 0x1c0;
int coeff = get_coeff(wm8971->sysclk, params_rate(params));
@@ -820,7 +820,7 @@ static int wm8971_probe(struct platform_device *pdev)
return -ENOMEM;
}
- codec->private_data = wm8971;
+ snd_soc_codec_set_drvdata(codec, wm8971);
socdev->card->codec = codec;
mutex_init(&codec->mutex);
INIT_LIST_HEAD(&codec->dapm_widgets);
@@ -830,7 +830,7 @@ static int wm8971_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&codec->delayed_work, wm8971_work);
wm8971_workq = create_workqueue("wm8971");
if (wm8971_workq == NULL) {
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec);
return -ENOMEM;
}
@@ -844,7 +844,7 @@ static int wm8971_probe(struct platform_device *pdev)
if (ret != 0) {
destroy_workqueue(wm8971_workq);
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec);
}
@@ -867,7 +867,7 @@ static int wm8971_remove(struct platform_device *pdev)
i2c_unregister_device(codec->control_data);
i2c_del_driver(&wm8971_i2c_driver);
#endif
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec);
return 0;
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 69708c4cc004..a2c4b2f37cca 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -181,7 +181,7 @@ SOC_SINGLE("ADC 128x Oversampling Switch", WM8974_ADC, 8, 1, 0),
static const struct snd_kcontrol_new wm8974_speaker_mixer_controls[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_SPKMIX, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_SPKMIX, 5, 1, 0),
-SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_SPKMIX, 0, 1, 1),
+SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_SPKMIX, 0, 1, 0),
};
/* Mono Output Mixer */
@@ -609,7 +609,7 @@ static int wm8974_resume(struct platform_device *pdev)
codec->hw_write(codec->control_data, data, 2);
}
wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- wm8974_set_bias_level(codec, codec->suspend_bias_level);
+
return 0;
}
@@ -677,7 +677,7 @@ static __devinit int wm8974_register(struct wm8974_priv *wm8974)
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8974;
+ snd_soc_codec_set_drvdata(codec, wm8974);
codec->name = "WM8974";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 526f56b09066..51d5f433215c 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -439,7 +439,7 @@ static int wm8978_enum_mclk(unsigned int f_out, unsigned int f_mclk,
*/
static int wm8978_configure_pll(struct snd_soc_codec *codec)
{
- struct wm8978_priv *wm8978 = codec->private_data;
+ struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
struct wm8978_pll_div pll_div;
unsigned int f_opclk = wm8978->f_opclk, f_mclk = wm8978->f_mclk,
f_256fs = wm8978->f_256fs;
@@ -535,7 +535,7 @@ static int wm8978_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8978_priv *wm8978 = codec->private_data;
+ struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
switch (div_id) {
@@ -580,7 +580,7 @@ static int wm8978_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8978_priv *wm8978 = codec->private_data;
+ struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
dev_dbg(codec->dev, "%s: ID %d, freq %u\n", __func__, clk_id, freq);
@@ -692,7 +692,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8978_priv *wm8978 = codec->private_data;
+ struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
/* Word length mask = 0x60 */
u16 iface_ctl = snd_soc_read(codec, WM8978_AUDIO_INTERFACE) & ~0x60;
/* Sampling rate mask = 0xe (for filters) */
@@ -912,7 +912,7 @@ static int wm8978_resume(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8978_priv *wm8978 = codec->private_data;
+ struct wm8978_priv *wm8978 = snd_soc_codec_get_drvdata(codec);
int i;
u16 *cache = codec->reg_cache;
@@ -1020,7 +1020,7 @@ static __devinit int wm8978_register(struct wm8978_priv *wm8978)
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8978;
+ snd_soc_codec_set_drvdata(codec, wm8978);
codec->name = "WM8978";
codec->owner = THIS_MODULE;
codec->bias_level = SND_SOC_BIAS_OFF;
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index bb18c3ecfeb9..0417dae32e6f 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -495,7 +495,7 @@ static int wm8988_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8988_priv *wm8988 = codec->private_data;
+ struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case 11289600:
@@ -585,7 +585,7 @@ static int wm8988_pcm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8988_priv *wm8988 = codec->private_data;
+ struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
/* The set of sample rates that can be supported depends on the
* MCLK supplied to the CODEC - enforce this.
@@ -610,7 +610,7 @@ static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8988_priv *wm8988 = codec->private_data;
+ struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8988_IFACE) & 0x1f3;
u16 srate = snd_soc_read(codec, WM8988_SRATE) & 0x180;
int coeff;
@@ -833,7 +833,7 @@ static int wm8988_register(struct wm8988_priv *wm8988,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8988;
+ snd_soc_codec_set_drvdata(codec, wm8988);
codec->name = "WM8988";
codec->owner = THIS_MODULE;
codec->dai = &wm8988_dai;
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 831f4730bfd5..7b536d923ea9 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1012,7 +1012,7 @@ static int wm8990_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8990_priv *wm8990 = codec->private_data;
+ struct wm8990_priv *wm8990 = snd_soc_codec_get_drvdata(codec);
wm8990->sysclk = freq;
return 0;
@@ -1524,7 +1524,7 @@ static int wm8990_probe(struct platform_device *pdev)
return -ENOMEM;
}
- codec->private_data = wm8990;
+ snd_soc_codec_set_drvdata(codec, wm8990);
socdev->card->codec = codec;
mutex_init(&codec->mutex);
INIT_LIST_HEAD(&codec->dapm_widgets);
@@ -1541,7 +1541,7 @@ static int wm8990_probe(struct platform_device *pdev)
#endif
if (ret != 0) {
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec);
}
return ret;
@@ -1561,7 +1561,7 @@ static int wm8990_remove(struct platform_device *pdev)
i2c_unregister_device(codec->control_data);
i2c_del_driver(&wm8990_i2c_driver);
#endif
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec);
return 0;
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 03e8b1a6a56c..d8d300c6175f 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -371,7 +371,7 @@ static int wm8993_set_fll(struct snd_soc_dai *dai, int fll_id, int source,
unsigned int Fref, unsigned int Fout)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
u16 reg1, reg4, reg5;
struct _fll_div fll_div;
int ret;
@@ -458,7 +458,7 @@ static int wm8993_set_fll(struct snd_soc_dai *dai, int fll_id, int source,
static int configure_clock(struct snd_soc_codec *codec)
{
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
unsigned int reg;
/* This should be done on init() for bypass paths */
@@ -717,7 +717,7 @@ static int class_w_put(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct snd_soc_codec *codec = widget->codec;
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
int ret;
/* Turn it off if we're using the main output mixer */
@@ -949,7 +949,7 @@ static void wm8993_cache_restore(struct snd_soc_codec *codec)
static int wm8993_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
int ret;
switch (level) {
@@ -1047,7 +1047,7 @@ static int wm8993_set_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
switch (clk_id) {
case WM8993_SYSCLK_MCLK:
@@ -1067,7 +1067,7 @@ static int wm8993_set_dai_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
unsigned int aif1 = snd_soc_read(codec, WM8993_AUDIO_INTERFACE_1);
unsigned int aif4 = snd_soc_read(codec, WM8993_AUDIO_INTERFACE_4);
@@ -1163,7 +1163,7 @@ static int wm8993_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
int ret, i, best, best_val, cur_val;
unsigned int clocking1, clocking3, aif1, aif4;
@@ -1328,7 +1328,7 @@ static int wm8993_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots, int slot_width)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
int aif1 = 0;
int aif2 = 0;
@@ -1431,7 +1431,7 @@ static int wm8993_probe(struct platform_device *pdev)
socdev->card->codec = wm8993_codec;
codec = wm8993_codec;
- wm8993 = codec->private_data;
+ wm8993 = snd_soc_codec_get_drvdata(codec);
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
@@ -1478,7 +1478,7 @@ static int wm8993_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
int fll_fout = wm8993->fll_fout;
int fll_fref = wm8993->fll_fref;
int ret;
@@ -1502,7 +1502,7 @@ static int wm8993_resume(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8993_priv *wm8993 = codec->private_data;
+ struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
int ret;
wm8993_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1571,7 +1571,7 @@ static int wm8993_i2c_probe(struct i2c_client *i2c,
codec->set_bias_level = wm8993_set_bias_level;
codec->dai = &wm8993_dai;
codec->num_dai = 1;
- codec->private_data = wm8993;
+ snd_soc_codec_set_drvdata(codec, wm8993);
wm8993->hubs_data.hp_startup_mode = 1;
wm8993->hubs_data.dcs_codes = -2;
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 9da0724cd47a..e84a1177f350 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -62,6 +62,12 @@ static int wm8994_retune_mobile_base[] = {
#define WM8994_REG_CACHE_SIZE 0x621
+struct wm8994_micdet {
+ struct snd_soc_jack *jack;
+ int det;
+ int shrt;
+};
+
/* codec private data */
struct wm8994_priv {
struct wm_hubs_data hubs;
@@ -87,6 +93,8 @@ struct wm8994_priv {
int retune_mobile_cfg[WM8994_NUM_EQ];
struct soc_enum retune_mobile_enum;
+ struct wm8994_micdet micdet[2];
+
struct wm8994_pdata *pdata;
};
@@ -1696,13 +1704,15 @@ static int wm8994_volatile(unsigned int reg)
static int wm8994_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
BUG_ON(reg > WM8994_MAX_REGISTER);
if (!wm8994_volatile(reg))
wm8994->reg_cache[reg] = value;
+ dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+
return wm8994_reg_write(codec->control_data, reg, value);
}
@@ -1721,7 +1731,7 @@ static unsigned int wm8994_read(struct snd_soc_codec *codec,
static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
{
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int rate;
int reg1 = 0;
int offset;
@@ -1762,6 +1772,11 @@ static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
dev_dbg(codec->dev, "Dividing AIF%d clock to %dHz\n",
aif + 1, rate);
}
+
+ if (rate && rate < 3000000)
+ dev_warn(codec->dev, "AIF%dCLK is %dHz, should be >=3MHz for optimal performance\n",
+ aif + 1, rate);
+
wm8994->aifclk[aif] = rate;
snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1 + offset,
@@ -1773,7 +1788,7 @@ static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
static int configure_clock(struct snd_soc_codec *codec)
{
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int old, new;
/* Bring up the AIF clocks first */
@@ -1870,7 +1885,7 @@ static int wm8994_put_drc_sw(struct snd_kcontrol *kcontrol,
static void wm8994_set_drc(struct snd_soc_codec *codec, int drc)
{
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994_pdata *pdata = wm8994->pdata;
int base = wm8994_drc_base[drc];
int cfg = wm8994->drc_cfg[drc];
@@ -1906,7 +1921,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994_pdata *pdata = wm8994->pdata;
int drc = wm8994_get_drc(kcontrol->id.name);
int value = ucontrol->value.integer.value[0];
@@ -1928,7 +1943,7 @@ static int wm8994_get_drc_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int drc = wm8994_get_drc(kcontrol->id.name);
ucontrol->value.enumerated.item[0] = wm8994->drc_cfg[drc];
@@ -1938,7 +1953,7 @@ static int wm8994_get_drc_enum(struct snd_kcontrol *kcontrol,
static void wm8994_set_retune_mobile(struct snd_soc_codec *codec, int block)
{
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994_pdata *pdata = wm8994->pdata;
int base = wm8994_retune_mobile_base[block];
int iface, best, best_val, save, i, cfg;
@@ -2009,7 +2024,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994_pdata *pdata = wm8994->pdata;
int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
int value = ucontrol->value.integer.value[0];
@@ -2031,7 +2046,7 @@ static int wm8994_get_retune_mobile_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block];
@@ -2182,13 +2197,13 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
/* Only support direct DAC->headphone paths */
reg = snd_soc_read(codec, WM8994_OUTPUT_MIXER_1);
if (!(reg & WM8994_DAC1L_TO_HPOUT1L)) {
- dev_dbg(codec->dev, "HPL connected to output mixer\n");
+ dev_vdbg(codec->dev, "HPL connected to output mixer\n");
enable = 0;
}
reg = snd_soc_read(codec, WM8994_OUTPUT_MIXER_2);
if (!(reg & WM8994_DAC1R_TO_HPOUT1R)) {
- dev_dbg(codec->dev, "HPR connected to output mixer\n");
+ dev_vdbg(codec->dev, "HPR connected to output mixer\n");
enable = 0;
}
@@ -2196,26 +2211,26 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
reg = snd_soc_read(codec, WM8994_DAC1_LEFT_MIXER_ROUTING);
switch (reg) {
case WM8994_AIF2DACL_TO_DAC1L:
- dev_dbg(codec->dev, "Class W source AIF2DAC\n");
+ dev_vdbg(codec->dev, "Class W source AIF2DAC\n");
source = 2 << WM8994_CP_DYN_SRC_SEL_SHIFT;
break;
case WM8994_AIF1DAC2L_TO_DAC1L:
- dev_dbg(codec->dev, "Class W source AIF1DAC2\n");
+ dev_vdbg(codec->dev, "Class W source AIF1DAC2\n");
source = 1 << WM8994_CP_DYN_SRC_SEL_SHIFT;
break;
case WM8994_AIF1DAC1L_TO_DAC1L:
- dev_dbg(codec->dev, "Class W source AIF1DAC1\n");
+ dev_vdbg(codec->dev, "Class W source AIF1DAC1\n");
source = 0 << WM8994_CP_DYN_SRC_SEL_SHIFT;
break;
default:
- dev_dbg(codec->dev, "DAC mixer setting: %x\n", reg);
+ dev_vdbg(codec->dev, "DAC mixer setting: %x\n", reg);
enable = 0;
break;
}
reg_r = snd_soc_read(codec, WM8994_DAC1_RIGHT_MIXER_ROUTING);
if (reg_r != reg) {
- dev_dbg(codec->dev, "Left and right DAC mixers different\n");
+ dev_vdbg(codec->dev, "Left and right DAC mixers different\n");
enable = 0;
}
@@ -2777,9 +2792,18 @@ static int wm8994_get_fll_config(struct fll_div *fll,
if (freq_in > 1000000) {
fll->fll_fratio = 0;
- } else {
+ } else if (freq_in > 256000) {
+ fll->fll_fratio = 1;
+ freq_in *= 2;
+ } else if (freq_in > 128000) {
+ fll->fll_fratio = 2;
+ freq_in *= 4;
+ } else if (freq_in > 64000) {
fll->fll_fratio = 3;
freq_in *= 8;
+ } else {
+ fll->fll_fratio = 4;
+ freq_in *= 16;
}
pr_debug("FLL_FRATIO=%d, Fref=%dHz\n", fll->fll_fratio, freq_in);
@@ -2812,7 +2836,7 @@ static int wm8994_set_fll(struct snd_soc_dai *dai, int id, int src,
unsigned int freq_in, unsigned int freq_out)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int reg_offset, ret;
struct fll_div fll;
u16 reg, aif1, aif2;
@@ -2836,6 +2860,21 @@ static int wm8994_set_fll(struct snd_soc_dai *dai, int id, int src,
return -EINVAL;
}
+ switch (src) {
+ case 0:
+ /* Allow no source specification when stopping */
+ if (freq_out)
+ return -EINVAL;
+ break;
+ case WM8994_FLL_SRC_MCLK1:
+ case WM8994_FLL_SRC_MCLK2:
+ case WM8994_FLL_SRC_LRCLK:
+ case WM8994_FLL_SRC_BCLK:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* Are we changing anything? */
if (wm8994->fll[id].src == src &&
wm8994->fll[id].in == freq_in && wm8994->fll[id].out == freq_out)
@@ -2876,8 +2915,10 @@ static int wm8994_set_fll(struct snd_soc_dai *dai, int id, int src,
fll.n << WM8994_FLL1_N_SHIFT);
snd_soc_update_bits(codec, WM8994_FLL1_CONTROL_5 + reg_offset,
- WM8994_FLL1_REFCLK_DIV_MASK,
- fll.clk_ref_div << WM8994_FLL1_REFCLK_DIV_SHIFT);
+ WM8994_FLL1_REFCLK_DIV_MASK |
+ WM8994_FLL1_REFCLK_SRC_MASK,
+ (fll.clk_ref_div << WM8994_FLL1_REFCLK_DIV_SHIFT) |
+ (src - 1));
/* Enable (with fractional mode if required) */
if (freq_out) {
@@ -2892,6 +2933,7 @@ static int wm8994_set_fll(struct snd_soc_dai *dai, int id, int src,
wm8994->fll[id].in = freq_in;
wm8994->fll[id].out = freq_out;
+ wm8994->fll[id].src = src;
/* Enable any gated AIF clocks */
snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
@@ -2908,7 +2950,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
switch (dai->id) {
case 1:
@@ -3174,7 +3216,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int aif1_reg;
int bclk_reg;
int lrclk_reg;
@@ -3338,6 +3380,36 @@ static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
return 0;
}
+static int wm8994_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int reg, val, mask;
+
+ switch (codec_dai->id) {
+ case 1:
+ reg = WM8994_AIF1_MASTER_SLAVE;
+ mask = WM8994_AIF1_TRI;
+ break;
+ case 2:
+ reg = WM8994_AIF2_MASTER_SLAVE;
+ mask = WM8994_AIF2_TRI;
+ break;
+ case 3:
+ reg = WM8994_POWER_MANAGEMENT_6;
+ mask = WM8994_AIF3_TRI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (tristate)
+ val = mask;
+ else
+ val = 0;
+
+ return snd_soc_update_bits(codec, reg, mask, reg);
+}
+
#define WM8994_RATES SNDRV_PCM_RATE_8000_96000
#define WM8994_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
@@ -3349,6 +3421,7 @@ static struct snd_soc_dai_ops wm8994_aif1_dai_ops = {
.hw_params = wm8994_hw_params,
.digital_mute = wm8994_aif_mute,
.set_pll = wm8994_set_fll,
+ .set_tristate = wm8994_set_tristate,
};
static struct snd_soc_dai_ops wm8994_aif2_dai_ops = {
@@ -3357,6 +3430,11 @@ static struct snd_soc_dai_ops wm8994_aif2_dai_ops = {
.hw_params = wm8994_hw_params,
.digital_mute = wm8994_aif_mute,
.set_pll = wm8994_set_fll,
+ .set_tristate = wm8994_set_tristate,
+};
+
+static struct snd_soc_dai_ops wm8994_aif3_dai_ops = {
+ .set_tristate = wm8994_set_tristate,
};
struct snd_soc_dai wm8994_dai[] = {
@@ -3400,6 +3478,7 @@ struct snd_soc_dai wm8994_dai[] = {
},
{
.name = "WM8994 AIF3",
+ .id = 3,
.playback = {
.stream_name = "AIF3 Playback",
.channels_min = 2,
@@ -3414,6 +3493,7 @@ struct snd_soc_dai wm8994_dai[] = {
.rates = WM8994_RATES,
.formats = WM8994_FORMATS,
},
+ .ops = &wm8994_aif3_dai_ops,
}
};
EXPORT_SYMBOL_GPL(wm8994_dai);
@@ -3423,7 +3503,7 @@ static int wm8994_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int i, ret;
for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) {
@@ -3444,7 +3524,7 @@ static int wm8994_resume(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm8994_priv *wm8994 = codec->private_data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
u16 *reg_cache = codec->reg_cache;
int i, ret;
@@ -3469,6 +3549,9 @@ static int wm8994_resume(struct platform_device *pdev)
wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) {
+ if (!wm8994->fll_suspend[i].out)
+ continue;
+
ret = wm8994_set_fll(&codec->dai[0], i + 1,
wm8994->fll_suspend[i].src,
wm8994->fll_suspend[i].in,
@@ -3639,7 +3722,7 @@ static int wm8994_probe(struct platform_device *pdev)
return ret;
}
- wm8994_handle_pdata(codec->private_data);
+ wm8994_handle_pdata(snd_soc_codec_get_drvdata(codec));
wm_hubs_add_analogue_controls(codec);
snd_soc_add_controls(codec, wm8994_snd_controls,
@@ -3670,6 +3753,96 @@ struct snd_soc_codec_device soc_codec_dev_wm8994 = {
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8994);
+/**
+ * wm8994_mic_detect - Enable microphone detection via the WM8994 IRQ
+ *
+ * @codec: WM8994 codec
+ * @jack: jack to report detection events on
+ * @micbias: microphone bias to detect on
+ * @det: value to report for presence detection
+ * @shrt: value to report for short detection
+ *
+ * Enable microphone detection via IRQ on the WM8994. If GPIOs are
+ * being used to bring out signals to the processor then only platform
+ * data configuration is needed for WM8903 and processor GPIOs should
+ * be configured using snd_soc_jack_add_gpios() instead.
+ *
+ * Configuration of detection levels is available via the micbias1_lvl
+ * and micbias2_lvl platform data members.
+ */
+int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+ int micbias, int det, int shrt)
+{
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ struct wm8994_micdet *micdet;
+ int reg;
+
+ switch (micbias) {
+ case 1:
+ micdet = &wm8994->micdet[0];
+ break;
+ case 2:
+ micdet = &wm8994->micdet[1];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "Configuring microphone detection on %d: %x %x\n",
+ micbias, det, shrt);
+
+ /* Store the configuration */
+ micdet->jack = jack;
+ micdet->det = det;
+ micdet->shrt = shrt;
+
+ /* If either of the jacks is set up then enable detection */
+ if (wm8994->micdet[0].jack || wm8994->micdet[1].jack)
+ reg = WM8994_MICD_ENA;
+ else
+ reg = 0;
+
+ snd_soc_update_bits(codec, WM8994_MICBIAS, WM8994_MICD_ENA, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8994_mic_detect);
+
+static irqreturn_t wm8994_mic_irq(int irq, void *data)
+{
+ struct wm8994_priv *priv = data;
+ struct snd_soc_codec *codec = &priv->codec;
+ int reg;
+ int report;
+
+ reg = snd_soc_read(codec, WM8994_INTERRUPT_RAW_STATUS_2);
+ if (reg < 0) {
+ dev_err(codec->dev, "Failed to read microphone status: %d\n",
+ reg);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(codec->dev, "Microphone status: %x\n", reg);
+
+ report = 0;
+ if (reg & WM8994_MIC1_DET_STS)
+ report |= priv->micdet[0].det;
+ if (reg & WM8994_MIC1_SHRT_STS)
+ report |= priv->micdet[0].shrt;
+ snd_soc_jack_report(priv->micdet[0].jack, report,
+ priv->micdet[0].det | priv->micdet[0].shrt);
+
+ report = 0;
+ if (reg & WM8994_MIC2_DET_STS)
+ report |= priv->micdet[1].det;
+ if (reg & WM8994_MIC2_SHRT_STS)
+ report |= priv->micdet[1].shrt;
+ snd_soc_jack_report(priv->micdet[1].jack, report,
+ priv->micdet[1].det | priv->micdet[1].shrt);
+
+ return IRQ_HANDLED;
+}
+
static int wm8994_codec_probe(struct platform_device *pdev)
{
int ret;
@@ -3695,7 +3868,7 @@ static int wm8994_codec_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm8994;
+ snd_soc_codec_set_drvdata(codec, wm8994);
codec->control_data = dev_get_drvdata(pdev->dev.parent);
codec->name = "WM8994";
codec->owner = THIS_MODULE;
@@ -3743,6 +3916,30 @@ static int wm8994_codec_probe(struct platform_device *pdev)
break;
}
+ ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
+ wm8994_mic_irq, "Mic 1 detect", wm8994);
+ if (ret != 0)
+ dev_warn(&pdev->dev,
+ "Failed to request Mic1 detect IRQ: %d\n", ret);
+
+ ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT,
+ wm8994_mic_irq, "Mic 1 short", wm8994);
+ if (ret != 0)
+ dev_warn(&pdev->dev,
+ "Failed to request Mic1 short IRQ: %d\n", ret);
+
+ ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_DET,
+ wm8994_mic_irq, "Mic 2 detect", wm8994);
+ if (ret != 0)
+ dev_warn(&pdev->dev,
+ "Failed to request Mic2 detect IRQ: %d\n", ret);
+
+ ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT,
+ wm8994_mic_irq, "Mic 2 short", wm8994);
+ if (ret != 0)
+ dev_warn(&pdev->dev,
+ "Failed to request Mic2 short IRQ: %d\n", ret);
+
/* Remember if AIFnLRCLK is configured as a GPIO. This should be
* configured on init - if a system wants to do this dynamically
* at runtime we can deal with that then.
@@ -3750,7 +3947,7 @@ static int wm8994_codec_probe(struct platform_device *pdev)
ret = wm8994_reg_read(codec->control_data, WM8994_GPIO_1);
if (ret < 0) {
dev_err(codec->dev, "Failed to read GPIO1 state: %d\n", ret);
- goto err;
+ goto err_irq;
}
if ((ret & WM8994_GPN_FN_MASK) != WM8994_GP_FN_PIN_SPECIFIC) {
wm8994->lrclk_shared[0] = 1;
@@ -3762,7 +3959,7 @@ static int wm8994_codec_probe(struct platform_device *pdev)
ret = wm8994_reg_read(codec->control_data, WM8994_GPIO_6);
if (ret < 0) {
dev_err(codec->dev, "Failed to read GPIO6 state: %d\n", ret);
- goto err;
+ goto err_irq;
}
if ((ret & WM8994_GPN_FN_MASK) != WM8994_GP_FN_PIN_SPECIFIC) {
wm8994->lrclk_shared[1] = 1;
@@ -3812,7 +4009,7 @@ static int wm8994_codec_probe(struct platform_device *pdev)
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to register codec: %d\n", ret);
- goto err;
+ goto err_irq;
}
ret = snd_soc_register_dais(wm8994_dai, ARRAY_SIZE(wm8994_dai));
@@ -3827,6 +4024,11 @@ static int wm8994_codec_probe(struct platform_device *pdev)
err_codec:
snd_soc_unregister_codec(codec);
+err_irq:
+ wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, wm8994);
+ wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994);
+ wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994);
+ wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994);
err:
kfree(wm8994);
return ret;
@@ -3840,6 +4042,10 @@ static int __devexit wm8994_codec_remove(struct platform_device *pdev)
wm8994_set_bias_level(codec, SND_SOC_BIAS_OFF);
snd_soc_unregister_dais(wm8994_dai, ARRAY_SIZE(wm8994_dai));
snd_soc_unregister_codec(&wm8994->codec);
+ wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, wm8994);
+ wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994);
+ wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994);
+ wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994);
kfree(wm8994);
wm8994_codec = NULL;
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index 0a5e1424dea0..7072dc539354 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -23,4 +23,12 @@ extern struct snd_soc_dai wm8994_dai[];
#define WM8994_FLL1 1
#define WM8994_FLL2 2
+#define WM8994_FLL_SRC_MCLK1 1
+#define WM8994_FLL_SRC_MCLK2 2
+#define WM8994_FLL_SRC_LRCLK 3
+#define WM8994_FLL_SRC_BCLK 4
+
+int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+ int micbias, int det, int shrt);
+
#endif
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 3a184fcb702b..13186fb4dcb4 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -521,7 +521,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id,
unsigned int Fref, unsigned int Fout)
{
- struct wm9081_priv *wm9081 = codec->private_data;
+ struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
u16 reg1, reg4, reg5;
struct _fll_div fll_div;
int ret;
@@ -607,7 +607,7 @@ static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id,
static int configure_clock(struct snd_soc_codec *codec)
{
- struct wm9081_priv *wm9081 = codec->private_data;
+ struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
int new_sysclk, i, target;
unsigned int reg;
int ret = 0;
@@ -702,7 +702,7 @@ static int clk_sys_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- struct wm9081_priv *wm9081 = codec->private_data;
+ struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
/* This should be done on init() for bypass paths */
switch (wm9081->sysclk_source) {
@@ -873,7 +873,7 @@ static int wm9081_set_dai_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm9081_priv *wm9081 = codec->private_data;
+ struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
unsigned int aif2 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_2);
aif2 &= ~(WM9081_AIF_BCLK_INV | WM9081_AIF_LRCLK_INV |
@@ -965,7 +965,7 @@ static int wm9081_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm9081_priv *wm9081 = codec->private_data;
+ struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
int ret, i, best, best_val, cur_val;
unsigned int clk_ctrl2, aif1, aif2, aif3, aif4;
@@ -1139,7 +1139,7 @@ static int wm9081_set_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
- struct wm9081_priv *wm9081 = codec->private_data;
+ struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
switch (clk_id) {
case WM9081_SYSCLK_MCLK:
@@ -1159,7 +1159,7 @@ static int wm9081_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
struct snd_soc_codec *codec = dai->codec;
- struct wm9081_priv *wm9081 = codec->private_data;
+ struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
unsigned int aif1 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_1);
aif1 &= ~(WM9081_AIFDAC_TDM_SLOT_MASK | WM9081_AIFDAC_TDM_MODE_MASK);
@@ -1242,7 +1242,7 @@ static int wm9081_probe(struct platform_device *pdev)
socdev->card->codec = wm9081_codec;
codec = wm9081_codec;
- wm9081 = codec->private_data;
+ wm9081 = snd_soc_codec_get_drvdata(codec);
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
@@ -1339,7 +1339,7 @@ static int wm9081_register(struct wm9081_priv *wm9081,
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
- codec->private_data = wm9081;
+ snd_soc_codec_set_drvdata(codec, wm9081);
codec->name = "WM9081";
codec->owner = THIS_MODULE;
codec->dai = &wm9081_dai;
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
new file mode 100644
index 000000000000..1592250daec0
--- /dev/null
+++ b/sound/soc/codecs/wm9090.c
@@ -0,0 +1,773 @@
+/*
+ * ALSA SoC WM9090 driver
+ *
+ * Copyright 2009, 2010 Wolfson Microelectronics
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <sound/wm9090.h>
+
+#include "wm9090.h"
+
+static struct snd_soc_codec *wm9090_codec;
+
+static const u16 wm9090_reg_defaults[] = {
+ 0x9093, /* R0 - Software Reset */
+ 0x0006, /* R1 - Power Management (1) */
+ 0x6000, /* R2 - Power Management (2) */
+ 0x0000, /* R3 - Power Management (3) */
+ 0x0000, /* R4 */
+ 0x0000, /* R5 */
+ 0x01C0, /* R6 - Clocking 1 */
+ 0x0000, /* R7 */
+ 0x0000, /* R8 */
+ 0x0000, /* R9 */
+ 0x0000, /* R10 */
+ 0x0000, /* R11 */
+ 0x0000, /* R12 */
+ 0x0000, /* R13 */
+ 0x0000, /* R14 */
+ 0x0000, /* R15 */
+ 0x0000, /* R16 */
+ 0x0000, /* R17 */
+ 0x0000, /* R18 */
+ 0x0000, /* R19 */
+ 0x0000, /* R20 */
+ 0x0000, /* R21 */
+ 0x0003, /* R22 - IN1 Line Control */
+ 0x0003, /* R23 - IN2 Line Control */
+ 0x0083, /* R24 - IN1 Line Input A Volume */
+ 0x0083, /* R25 - IN1 Line Input B Volume */
+ 0x0083, /* R26 - IN2 Line Input A Volume */
+ 0x0083, /* R27 - IN2 Line Input B Volume */
+ 0x002D, /* R28 - Left Output Volume */
+ 0x002D, /* R29 - Right Output Volume */
+ 0x0000, /* R30 */
+ 0x0000, /* R31 */
+ 0x0000, /* R32 */
+ 0x0000, /* R33 */
+ 0x0100, /* R34 - SPKMIXL Attenuation */
+ 0x0000, /* R35 */
+ 0x0010, /* R36 - SPKOUT Mixers */
+ 0x0140, /* R37 - ClassD3 */
+ 0x0039, /* R38 - Speaker Volume Left */
+ 0x0000, /* R39 */
+ 0x0000, /* R40 */
+ 0x0000, /* R41 */
+ 0x0000, /* R42 */
+ 0x0000, /* R43 */
+ 0x0000, /* R44 */
+ 0x0000, /* R45 - Output Mixer1 */
+ 0x0000, /* R46 - Output Mixer2 */
+ 0x0100, /* R47 - Output Mixer3 */
+ 0x0100, /* R48 - Output Mixer4 */
+ 0x0000, /* R49 */
+ 0x0000, /* R50 */
+ 0x0000, /* R51 */
+ 0x0000, /* R52 */
+ 0x0000, /* R53 */
+ 0x0000, /* R54 - Speaker Mixer */
+ 0x0000, /* R55 */
+ 0x0000, /* R56 */
+ 0x000D, /* R57 - AntiPOP2 */
+ 0x0000, /* R58 */
+ 0x0000, /* R59 */
+ 0x0000, /* R60 */
+ 0x0000, /* R61 */
+ 0x0000, /* R62 */
+ 0x0000, /* R63 */
+ 0x0000, /* R64 */
+ 0x0000, /* R65 */
+ 0x0000, /* R66 */
+ 0x0000, /* R67 */
+ 0x0000, /* R68 */
+ 0x0000, /* R69 */
+ 0x0000, /* R70 - Write Sequencer 0 */
+ 0x0000, /* R71 - Write Sequencer 1 */
+ 0x0000, /* R72 - Write Sequencer 2 */
+ 0x0000, /* R73 - Write Sequencer 3 */
+ 0x0000, /* R74 - Write Sequencer 4 */
+ 0x0000, /* R75 - Write Sequencer 5 */
+ 0x1F25, /* R76 - Charge Pump 1 */
+ 0x0000, /* R77 */
+ 0x0000, /* R78 */
+ 0x0000, /* R79 */
+ 0x0000, /* R80 */
+ 0x0000, /* R81 */
+ 0x0000, /* R82 */
+ 0x0000, /* R83 */
+ 0x0000, /* R84 - DC Servo 0 */
+ 0x054A, /* R85 - DC Servo 1 */
+ 0x0000, /* R86 */
+ 0x0000, /* R87 - DC Servo 3 */
+ 0x0000, /* R88 - DC Servo Readback 0 */
+ 0x0000, /* R89 - DC Servo Readback 1 */
+ 0x0000, /* R90 - DC Servo Readback 2 */
+ 0x0000, /* R91 */
+ 0x0000, /* R92 */
+ 0x0000, /* R93 */
+ 0x0000, /* R94 */
+ 0x0000, /* R95 */
+ 0x0100, /* R96 - Analogue HP 0 */
+ 0x0000, /* R97 */
+ 0x8640, /* R98 - AGC Control 0 */
+ 0xC000, /* R99 - AGC Control 1 */
+ 0x0200, /* R100 - AGC Control 2 */
+};
+
+/* This struct is used to save the context */
+struct wm9090_priv {
+ /* We're not really registering as a CODEC since ASoC core
+ * does not yet support multiple CODECs but having the CODEC
+ * structure means we can reuse some of the ASoC core
+ * features.
+ */
+ struct snd_soc_codec codec;
+ struct mutex mutex;
+ u16 reg_cache[WM9090_MAX_REGISTER + 1];
+ struct wm9090_platform_data pdata;
+};
+
+static int wm9090_volatile(unsigned int reg)
+{
+ switch (reg) {
+ case WM9090_SOFTWARE_RESET:
+ case WM9090_DC_SERVO_0:
+ case WM9090_DC_SERVO_READBACK_0:
+ case WM9090_DC_SERVO_READBACK_1:
+ case WM9090_DC_SERVO_READBACK_2:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static void wait_for_dc_servo(struct snd_soc_codec *codec)
+{
+ unsigned int reg;
+ int count = 0;
+
+ dev_dbg(codec->dev, "Waiting for DC servo...\n");
+ do {
+ count++;
+ msleep(1);
+ reg = snd_soc_read(codec, WM9090_DC_SERVO_READBACK_0);
+ dev_dbg(codec->dev, "DC servo status: %x\n", reg);
+ } while ((reg & WM9090_DCS_CAL_COMPLETE_MASK)
+ != WM9090_DCS_CAL_COMPLETE_MASK && count < 1000);
+
+ if ((reg & WM9090_DCS_CAL_COMPLETE_MASK)
+ != WM9090_DCS_CAL_COMPLETE_MASK)
+ dev_err(codec->dev, "Timed out waiting for DC Servo\n");
+}
+
+static const unsigned int in_tlv[] = {
+ TLV_DB_RANGE_HEAD(6),
+ 0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
+ 1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
+ 4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
+};
+static const unsigned int mix_tlv[] = {
+ TLV_DB_RANGE_HEAD(4),
+ 0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
+ 3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
+static const unsigned int spkboost_tlv[] = {
+ TLV_DB_RANGE_HEAD(7),
+ 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
+ 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
+};
+
+static const struct snd_kcontrol_new wm9090_controls[] = {
+SOC_SINGLE_TLV("IN1A Volume", WM9090_IN1_LINE_INPUT_A_VOLUME, 0, 6, 0,
+ in_tlv),
+SOC_SINGLE("IN1A Switch", WM9090_IN1_LINE_INPUT_A_VOLUME, 7, 1, 1),
+SOC_SINGLE("IN1A ZC Switch", WM9090_IN1_LINE_INPUT_A_VOLUME, 6, 1, 0),
+
+SOC_SINGLE_TLV("IN2A Volume", WM9090_IN2_LINE_INPUT_A_VOLUME, 0, 6, 0,
+ in_tlv),
+SOC_SINGLE("IN2A Switch", WM9090_IN2_LINE_INPUT_A_VOLUME, 7, 1, 1),
+SOC_SINGLE("IN2A ZC Switch", WM9090_IN2_LINE_INPUT_A_VOLUME, 6, 1, 0),
+
+SOC_SINGLE("MIXOUTL Switch", WM9090_OUTPUT_MIXER3, 8, 1, 1),
+SOC_SINGLE_TLV("MIXOUTL IN1A Volume", WM9090_OUTPUT_MIXER3, 6, 3, 1,
+ mix_tlv),
+SOC_SINGLE_TLV("MIXOUTL IN2A Volume", WM9090_OUTPUT_MIXER3, 2, 3, 1,
+ mix_tlv),
+
+SOC_SINGLE("MIXOUTR Switch", WM9090_OUTPUT_MIXER4, 8, 1, 1),
+SOC_SINGLE_TLV("MIXOUTR IN1A Volume", WM9090_OUTPUT_MIXER4, 6, 3, 1,
+ mix_tlv),
+SOC_SINGLE_TLV("MIXOUTR IN2A Volume", WM9090_OUTPUT_MIXER4, 2, 3, 1,
+ mix_tlv),
+
+SOC_SINGLE("SPKMIX Switch", WM9090_SPKMIXL_ATTENUATION, 8, 1, 1),
+SOC_SINGLE_TLV("SPKMIX IN1A Volume", WM9090_SPKMIXL_ATTENUATION, 6, 3, 1,
+ mix_tlv),
+SOC_SINGLE_TLV("SPKMIX IN2A Volume", WM9090_SPKMIXL_ATTENUATION, 2, 3, 1,
+ mix_tlv),
+
+SOC_DOUBLE_R_TLV("Headphone Volume", WM9090_LEFT_OUTPUT_VOLUME,
+ WM9090_RIGHT_OUTPUT_VOLUME, 0, 63, 0, out_tlv),
+SOC_DOUBLE_R("Headphone Switch", WM9090_LEFT_OUTPUT_VOLUME,
+ WM9090_RIGHT_OUTPUT_VOLUME, 6, 1, 1),
+SOC_DOUBLE_R("Headphone ZC Switch", WM9090_LEFT_OUTPUT_VOLUME,
+ WM9090_RIGHT_OUTPUT_VOLUME, 7, 1, 0),
+
+SOC_SINGLE_TLV("Speaker Volume", WM9090_SPEAKER_VOLUME_LEFT, 0, 63, 0,
+ out_tlv),
+SOC_SINGLE("Speaker Switch", WM9090_SPEAKER_VOLUME_LEFT, 6, 1, 1),
+SOC_SINGLE("Speaker ZC Switch", WM9090_SPEAKER_VOLUME_LEFT, 7, 1, 0),
+SOC_SINGLE_TLV("Speaker Boost Volume", WM9090_CLASSD3, 3, 7, 0, spkboost_tlv),
+};
+
+static const struct snd_kcontrol_new wm9090_in1_se_controls[] = {
+SOC_SINGLE_TLV("IN1B Volume", WM9090_IN1_LINE_INPUT_B_VOLUME, 0, 6, 0,
+ in_tlv),
+SOC_SINGLE("IN1B Switch", WM9090_IN1_LINE_INPUT_B_VOLUME, 7, 1, 1),
+SOC_SINGLE("IN1B ZC Switch", WM9090_IN1_LINE_INPUT_B_VOLUME, 6, 1, 0),
+
+SOC_SINGLE_TLV("SPKMIX IN1B Volume", WM9090_SPKMIXL_ATTENUATION, 4, 3, 1,
+ mix_tlv),
+SOC_SINGLE_TLV("MIXOUTL IN1B Volume", WM9090_OUTPUT_MIXER3, 4, 3, 1,
+ mix_tlv),
+SOC_SINGLE_TLV("MIXOUTR IN1B Volume", WM9090_OUTPUT_MIXER4, 4, 3, 1,
+ mix_tlv),
+};
+
+static const struct snd_kcontrol_new wm9090_in2_se_controls[] = {
+SOC_SINGLE_TLV("IN2B Volume", WM9090_IN2_LINE_INPUT_B_VOLUME, 0, 6, 0,
+ in_tlv),
+SOC_SINGLE("IN2B Switch", WM9090_IN2_LINE_INPUT_B_VOLUME, 7, 1, 1),
+SOC_SINGLE("IN2B ZC Switch", WM9090_IN2_LINE_INPUT_B_VOLUME, 6, 1, 0),
+
+SOC_SINGLE_TLV("SPKMIX IN2B Volume", WM9090_SPKMIXL_ATTENUATION, 0, 3, 1,
+ mix_tlv),
+SOC_SINGLE_TLV("MIXOUTL IN2B Volume", WM9090_OUTPUT_MIXER3, 0, 3, 1,
+ mix_tlv),
+SOC_SINGLE_TLV("MIXOUTR IN2B Volume", WM9090_OUTPUT_MIXER4, 0, 3, 1,
+ mix_tlv),
+};
+
+static int hp_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ unsigned int reg = snd_soc_read(codec, WM9090_ANALOGUE_HP_0);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec, WM9090_CHARGE_PUMP_1,
+ WM9090_CP_ENA, WM9090_CP_ENA);
+
+ msleep(5);
+
+ snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_1,
+ WM9090_HPOUT1L_ENA | WM9090_HPOUT1R_ENA,
+ WM9090_HPOUT1L_ENA | WM9090_HPOUT1R_ENA);
+
+ reg |= WM9090_HPOUT1L_DLY | WM9090_HPOUT1R_DLY;
+ snd_soc_write(codec, WM9090_ANALOGUE_HP_0, reg);
+
+ /* Start the DC servo. We don't currently use the
+ * ability to save the state since we don't have full
+ * control of the analogue paths and they can change
+ * DC offsets; see the WM8904 driver for an example of
+ * doing so.
+ */
+ snd_soc_write(codec, WM9090_DC_SERVO_0,
+ WM9090_DCS_ENA_CHAN_0 |
+ WM9090_DCS_ENA_CHAN_1 |
+ WM9090_DCS_TRIG_STARTUP_1 |
+ WM9090_DCS_TRIG_STARTUP_0);
+ wait_for_dc_servo(codec);
+
+ reg |= WM9090_HPOUT1R_OUTP | WM9090_HPOUT1R_RMV_SHORT |
+ WM9090_HPOUT1L_OUTP | WM9090_HPOUT1L_RMV_SHORT;
+ snd_soc_write(codec, WM9090_ANALOGUE_HP_0, reg);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ reg &= ~(WM9090_HPOUT1L_RMV_SHORT |
+ WM9090_HPOUT1L_DLY |
+ WM9090_HPOUT1L_OUTP |
+ WM9090_HPOUT1R_RMV_SHORT |
+ WM9090_HPOUT1R_DLY |
+ WM9090_HPOUT1R_OUTP);
+
+ snd_soc_write(codec, WM9090_ANALOGUE_HP_0, reg);
+
+ snd_soc_write(codec, WM9090_DC_SERVO_0, 0);
+
+ snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_1,
+ WM9090_HPOUT1L_ENA | WM9090_HPOUT1R_ENA,
+ 0);
+
+ snd_soc_update_bits(codec, WM9090_CHARGE_PUMP_1,
+ WM9090_CP_ENA, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new spkmix[] = {
+SOC_DAPM_SINGLE("IN1A Switch", WM9090_SPEAKER_MIXER, 6, 1, 0),
+SOC_DAPM_SINGLE("IN1B Switch", WM9090_SPEAKER_MIXER, 4, 1, 0),
+SOC_DAPM_SINGLE("IN2A Switch", WM9090_SPEAKER_MIXER, 2, 1, 0),
+SOC_DAPM_SINGLE("IN2B Switch", WM9090_SPEAKER_MIXER, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new spkout[] = {
+SOC_DAPM_SINGLE("Mixer Switch", WM9090_SPKOUT_MIXERS, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new mixoutl[] = {
+SOC_DAPM_SINGLE("IN1A Switch", WM9090_OUTPUT_MIXER1, 6, 1, 0),
+SOC_DAPM_SINGLE("IN1B Switch", WM9090_OUTPUT_MIXER1, 4, 1, 0),
+SOC_DAPM_SINGLE("IN2A Switch", WM9090_OUTPUT_MIXER1, 2, 1, 0),
+SOC_DAPM_SINGLE("IN2B Switch", WM9090_OUTPUT_MIXER1, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new mixoutr[] = {
+SOC_DAPM_SINGLE("IN1A Switch", WM9090_OUTPUT_MIXER2, 6, 1, 0),
+SOC_DAPM_SINGLE("IN1B Switch", WM9090_OUTPUT_MIXER2, 4, 1, 0),
+SOC_DAPM_SINGLE("IN2A Switch", WM9090_OUTPUT_MIXER2, 2, 1, 0),
+SOC_DAPM_SINGLE("IN2B Switch", WM9090_OUTPUT_MIXER2, 0, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget wm9090_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("IN1+"),
+SND_SOC_DAPM_INPUT("IN1-"),
+SND_SOC_DAPM_INPUT("IN2+"),
+SND_SOC_DAPM_INPUT("IN2-"),
+
+SND_SOC_DAPM_SUPPLY("OSC", WM9090_POWER_MANAGEMENT_1, 3, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("IN1A PGA", WM9090_POWER_MANAGEMENT_2, 7, 0, NULL, 0),
+SND_SOC_DAPM_PGA("IN1B PGA", WM9090_POWER_MANAGEMENT_2, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("IN2A PGA", WM9090_POWER_MANAGEMENT_2, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("IN2B PGA", WM9090_POWER_MANAGEMENT_2, 4, 0, NULL, 0),
+
+SND_SOC_DAPM_MIXER("SPKMIX", WM9090_POWER_MANAGEMENT_3, 3, 0,
+ spkmix, ARRAY_SIZE(spkmix)),
+SND_SOC_DAPM_MIXER("MIXOUTL", WM9090_POWER_MANAGEMENT_3, 5, 0,
+ mixoutl, ARRAY_SIZE(mixoutl)),
+SND_SOC_DAPM_MIXER("MIXOUTR", WM9090_POWER_MANAGEMENT_3, 4, 0,
+ mixoutr, ARRAY_SIZE(mixoutr)),
+
+SND_SOC_DAPM_PGA_E("HP PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ hp_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+SND_SOC_DAPM_PGA("SPKPGA", WM9090_POWER_MANAGEMENT_3, 8, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("SPKOUT", WM9090_POWER_MANAGEMENT_1, 12, 0,
+ spkout, ARRAY_SIZE(spkout)),
+
+SND_SOC_DAPM_OUTPUT("HPR"),
+SND_SOC_DAPM_OUTPUT("HPL"),
+SND_SOC_DAPM_OUTPUT("Speaker"),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ { "IN1A PGA", NULL, "IN1+" },
+ { "IN2A PGA", NULL, "IN2+" },
+
+ { "SPKMIX", "IN1A Switch", "IN1A PGA" },
+ { "SPKMIX", "IN2A Switch", "IN2A PGA" },
+
+ { "MIXOUTL", "IN1A Switch", "IN1A PGA" },
+ { "MIXOUTL", "IN2A Switch", "IN2A PGA" },
+
+ { "MIXOUTR", "IN1A Switch", "IN1A PGA" },
+ { "MIXOUTR", "IN2A Switch", "IN2A PGA" },
+
+ { "HP PGA", NULL, "OSC" },
+ { "HP PGA", NULL, "MIXOUTL" },
+ { "HP PGA", NULL, "MIXOUTR" },
+
+ { "HPL", NULL, "HP PGA" },
+ { "HPR", NULL, "HP PGA" },
+
+ { "SPKPGA", NULL, "OSC" },
+ { "SPKPGA", NULL, "SPKMIX" },
+
+ { "SPKOUT", "Mixer Switch", "SPKPGA" },
+
+ { "Speaker", NULL, "SPKOUT" },
+};
+
+static const struct snd_soc_dapm_route audio_map_in1_se[] = {
+ { "IN1B PGA", NULL, "IN1-" },
+
+ { "SPKMIX", "IN1B Switch", "IN1B PGA" },
+ { "MIXOUTL", "IN1B Switch", "IN1B PGA" },
+ { "MIXOUTR", "IN1B Switch", "IN1B PGA" },
+};
+
+static const struct snd_soc_dapm_route audio_map_in1_diff[] = {
+ { "IN1A PGA", NULL, "IN1-" },
+};
+
+static const struct snd_soc_dapm_route audio_map_in2_se[] = {
+ { "IN2B PGA", NULL, "IN2-" },
+
+ { "SPKMIX", "IN2B Switch", "IN2B PGA" },
+ { "MIXOUTL", "IN2B Switch", "IN2B PGA" },
+ { "MIXOUTR", "IN2B Switch", "IN2B PGA" },
+};
+
+static const struct snd_soc_dapm_route audio_map_in2_diff[] = {
+ { "IN2A PGA", NULL, "IN2-" },
+};
+
+static int wm9090_add_controls(struct snd_soc_codec *codec)
+{
+ struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec);
+ int i;
+
+ snd_soc_dapm_new_controls(codec, wm9090_dapm_widgets,
+ ARRAY_SIZE(wm9090_dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+ snd_soc_add_controls(codec, wm9090_controls,
+ ARRAY_SIZE(wm9090_controls));
+
+ if (wm9090->pdata.lin1_diff) {
+ snd_soc_dapm_add_routes(codec, audio_map_in1_diff,
+ ARRAY_SIZE(audio_map_in1_diff));
+ } else {
+ snd_soc_dapm_add_routes(codec, audio_map_in1_se,
+ ARRAY_SIZE(audio_map_in1_se));
+ snd_soc_add_controls(codec, wm9090_in1_se_controls,
+ ARRAY_SIZE(wm9090_in1_se_controls));
+ }
+
+ if (wm9090->pdata.lin2_diff) {
+ snd_soc_dapm_add_routes(codec, audio_map_in2_diff,
+ ARRAY_SIZE(audio_map_in2_diff));
+ } else {
+ snd_soc_dapm_add_routes(codec, audio_map_in2_se,
+ ARRAY_SIZE(audio_map_in2_se));
+ snd_soc_add_controls(codec, wm9090_in2_se_controls,
+ ARRAY_SIZE(wm9090_in2_se_controls));
+ }
+
+ if (wm9090->pdata.agc_ena) {
+ for (i = 0; i < ARRAY_SIZE(wm9090->pdata.agc); i++)
+ snd_soc_write(codec, WM9090_AGC_CONTROL_0 + i,
+ wm9090->pdata.agc[i]);
+ snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_3,
+ WM9090_AGC_ENA, WM9090_AGC_ENA);
+ } else {
+ snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_3,
+ WM9090_AGC_ENA, 0);
+ }
+
+ return 0;
+
+}
+
+/*
+ * The machine driver should call this from their set_bias_level; if there
+ * isn't one then this can just be set as the set_bias_level function.
+ */
+static int wm9090_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ u16 *reg_cache = codec->reg_cache;
+ int i, ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ snd_soc_update_bits(codec, WM9090_ANTIPOP2, WM9090_VMID_ENA,
+ WM9090_VMID_ENA);
+ snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_1,
+ WM9090_BIAS_ENA |
+ WM9090_VMID_RES_MASK,
+ WM9090_BIAS_ENA |
+ 1 << WM9090_VMID_RES_SHIFT);
+ msleep(1); /* Probably an overestimate */
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ /* Restore the register cache */
+ for (i = 1; i < codec->reg_cache_size; i++) {
+ if (reg_cache[i] == wm9090_reg_defaults[i])
+ continue;
+ if (wm9090_volatile(i))
+ continue;
+
+ ret = snd_soc_write(codec, i, reg_cache[i]);
+ if (ret != 0)
+ dev_warn(codec->dev,
+ "Failed to restore register %d: %d\n",
+ i, ret);
+ }
+ }
+
+ /* We keep VMID off during standby since the combination of
+ * ground referenced outputs and class D speaker mean that
+ * latency is not an issue.
+ */
+ snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_1,
+ WM9090_BIAS_ENA | WM9090_VMID_RES_MASK, 0);
+ snd_soc_update_bits(codec, WM9090_ANTIPOP2,
+ WM9090_VMID_ENA, 0);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ break;
+ }
+
+ codec->bias_level = level;
+
+ return 0;
+}
+
+static int wm9090_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm9090_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm9090_codec;
+ codec = wm9090_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ wm9090_add_controls(codec);
+
+ return 0;
+
+pcm_err:
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int wm9090_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm9090_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int wm9090_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm9090_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define wm9090_suspend NULL
+#define wm9090_resume NULL
+#endif
+
+static int wm9090_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_wm9090 = {
+ .probe = wm9090_probe,
+ .remove = wm9090_remove,
+ .suspend = wm9090_suspend,
+ .resume = wm9090_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm9090);
+
+static int wm9090_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm9090_priv *wm9090;
+ struct snd_soc_codec *codec;
+ int ret;
+
+ wm9090 = kzalloc(sizeof(*wm9090), GFP_KERNEL);
+ if (wm9090 == NULL) {
+ dev_err(&i2c->dev, "Can not allocate memory\n");
+ return -ENOMEM;
+ }
+ codec = &wm9090->codec;
+
+ if (i2c->dev.platform_data)
+ memcpy(&wm9090->pdata, i2c->dev.platform_data,
+ sizeof(wm9090->pdata));
+
+ wm9090_codec = codec;
+
+ i2c_set_clientdata(i2c, wm9090);
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->control_data = i2c;
+ snd_soc_codec_set_drvdata(codec, wm9090);
+ codec->dev = &i2c->dev;
+ codec->name = "WM9090";
+ codec->owner = THIS_MODULE;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm9090_set_bias_level,
+ codec->reg_cache_size = WM9090_MAX_REGISTER + 1;
+ codec->reg_cache = &wm9090->reg_cache;
+ codec->volatile_register = wm9090_volatile;
+
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ memcpy(&wm9090->reg_cache, wm9090_reg_defaults,
+ sizeof(wm9090->reg_cache));
+
+ ret = snd_soc_read(codec, WM9090_SOFTWARE_RESET);
+ if (ret < 0)
+ goto err;
+ if (ret != wm9090_reg_defaults[WM9090_SOFTWARE_RESET]) {
+ dev_err(&i2c->dev, "Device is not a WM9090, ID=%x\n", ret);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = snd_soc_write(codec, WM9090_SOFTWARE_RESET, 0);
+ if (ret < 0)
+ goto err;
+
+ /* Configure some defaults; they will be written out when we
+ * bring the bias up.
+ */
+ wm9090->reg_cache[WM9090_IN1_LINE_INPUT_A_VOLUME] |= WM9090_IN1_VU
+ | WM9090_IN1A_ZC;
+ wm9090->reg_cache[WM9090_IN1_LINE_INPUT_B_VOLUME] |= WM9090_IN1_VU
+ | WM9090_IN1B_ZC;
+ wm9090->reg_cache[WM9090_IN2_LINE_INPUT_A_VOLUME] |= WM9090_IN2_VU
+ | WM9090_IN2A_ZC;
+ wm9090->reg_cache[WM9090_IN2_LINE_INPUT_B_VOLUME] |= WM9090_IN2_VU
+ | WM9090_IN2B_ZC;
+ wm9090->reg_cache[WM9090_SPEAKER_VOLUME_LEFT] |=
+ WM9090_SPKOUT_VU | WM9090_SPKOUTL_ZC;
+ wm9090->reg_cache[WM9090_LEFT_OUTPUT_VOLUME] |=
+ WM9090_HPOUT1_VU | WM9090_HPOUT1L_ZC;
+ wm9090->reg_cache[WM9090_RIGHT_OUTPUT_VOLUME] |=
+ WM9090_HPOUT1_VU | WM9090_HPOUT1R_ZC;
+
+ wm9090->reg_cache[WM9090_CLOCKING_1] |= WM9090_TOCLK_ENA;
+
+ wm9090_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
+ goto err_bias;
+ }
+
+ return 0;
+
+err_bias:
+ wm9090_set_bias_level(codec, SND_SOC_BIAS_OFF);
+err:
+ kfree(wm9090);
+ i2c_set_clientdata(i2c, NULL);
+ wm9090_codec = NULL;
+
+ return ret;
+}
+
+static int wm9090_i2c_remove(struct i2c_client *i2c)
+{
+ struct wm9090_priv *wm9090 = i2c_get_clientdata(i2c);
+ struct snd_soc_codec *codec = &wm9090->codec;
+
+ snd_soc_unregister_codec(codec);
+ wm9090_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ kfree(wm9090);
+ wm9090_codec = NULL;
+
+ return 0;
+}
+
+static const struct i2c_device_id wm9090_id[] = {
+ { "wm9090", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm9090_id);
+
+static struct i2c_driver wm9090_i2c_driver = {
+ .driver = {
+ .name = "wm9090",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm9090_i2c_probe,
+ .remove = __devexit_p(wm9090_i2c_remove),
+ .id_table = wm9090_id,
+};
+
+static int __init wm9090_init(void)
+{
+ return i2c_add_driver(&wm9090_i2c_driver);
+}
+module_init(wm9090_init);
+
+static void __exit wm9090_exit(void)
+{
+ i2c_del_driver(&wm9090_i2c_driver);
+}
+module_exit(wm9090_exit);
+
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM9090 ASoC driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm9090.h b/sound/soc/codecs/wm9090.h
new file mode 100644
index 000000000000..b08eab932a5b
--- /dev/null
+++ b/sound/soc/codecs/wm9090.h
@@ -0,0 +1,715 @@
+/*
+ * ALSA SoC WM9090 driver
+ *
+ * Copyright 2009 Wolfson Microelectronics
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __WM9090_H
+#define __WM9090_H
+
+extern struct snd_soc_codec_device soc_codec_dev_wm9090;
+
+/*
+ * Register values.
+ */
+#define WM9090_SOFTWARE_RESET 0x00
+#define WM9090_POWER_MANAGEMENT_1 0x01
+#define WM9090_POWER_MANAGEMENT_2 0x02
+#define WM9090_POWER_MANAGEMENT_3 0x03
+#define WM9090_CLOCKING_1 0x06
+#define WM9090_IN1_LINE_CONTROL 0x16
+#define WM9090_IN2_LINE_CONTROL 0x17
+#define WM9090_IN1_LINE_INPUT_A_VOLUME 0x18
+#define WM9090_IN1_LINE_INPUT_B_VOLUME 0x19
+#define WM9090_IN2_LINE_INPUT_A_VOLUME 0x1A
+#define WM9090_IN2_LINE_INPUT_B_VOLUME 0x1B
+#define WM9090_LEFT_OUTPUT_VOLUME 0x1C
+#define WM9090_RIGHT_OUTPUT_VOLUME 0x1D
+#define WM9090_SPKMIXL_ATTENUATION 0x22
+#define WM9090_SPKOUT_MIXERS 0x24
+#define WM9090_CLASSD3 0x25
+#define WM9090_SPEAKER_VOLUME_LEFT 0x26
+#define WM9090_OUTPUT_MIXER1 0x2D
+#define WM9090_OUTPUT_MIXER2 0x2E
+#define WM9090_OUTPUT_MIXER3 0x2F
+#define WM9090_OUTPUT_MIXER4 0x30
+#define WM9090_SPEAKER_MIXER 0x36
+#define WM9090_ANTIPOP2 0x39
+#define WM9090_WRITE_SEQUENCER_0 0x46
+#define WM9090_WRITE_SEQUENCER_1 0x47
+#define WM9090_WRITE_SEQUENCER_2 0x48
+#define WM9090_WRITE_SEQUENCER_3 0x49
+#define WM9090_WRITE_SEQUENCER_4 0x4A
+#define WM9090_WRITE_SEQUENCER_5 0x4B
+#define WM9090_CHARGE_PUMP_1 0x4C
+#define WM9090_DC_SERVO_0 0x54
+#define WM9090_DC_SERVO_1 0x55
+#define WM9090_DC_SERVO_3 0x57
+#define WM9090_DC_SERVO_READBACK_0 0x58
+#define WM9090_DC_SERVO_READBACK_1 0x59
+#define WM9090_DC_SERVO_READBACK_2 0x5A
+#define WM9090_ANALOGUE_HP_0 0x60
+#define WM9090_AGC_CONTROL_0 0x62
+#define WM9090_AGC_CONTROL_1 0x63
+#define WM9090_AGC_CONTROL_2 0x64
+
+#define WM9090_REGISTER_COUNT 40
+#define WM9090_MAX_REGISTER 0x64
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Software Reset
+ */
+#define WM9090_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */
+#define WM9090_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */
+#define WM9090_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */
+
+/*
+ * R1 (0x01) - Power Management (1)
+ */
+#define WM9090_SPKOUTL_ENA 0x1000 /* SPKOUTL_ENA */
+#define WM9090_SPKOUTL_ENA_MASK 0x1000 /* SPKOUTL_ENA */
+#define WM9090_SPKOUTL_ENA_SHIFT 12 /* SPKOUTL_ENA */
+#define WM9090_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
+#define WM9090_HPOUT1L_ENA 0x0200 /* HPOUT1L_ENA */
+#define WM9090_HPOUT1L_ENA_MASK 0x0200 /* HPOUT1L_ENA */
+#define WM9090_HPOUT1L_ENA_SHIFT 9 /* HPOUT1L_ENA */
+#define WM9090_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */
+#define WM9090_HPOUT1R_ENA 0x0100 /* HPOUT1R_ENA */
+#define WM9090_HPOUT1R_ENA_MASK 0x0100 /* HPOUT1R_ENA */
+#define WM9090_HPOUT1R_ENA_SHIFT 8 /* HPOUT1R_ENA */
+#define WM9090_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */
+#define WM9090_OSC_ENA 0x0008 /* OSC_ENA */
+#define WM9090_OSC_ENA_MASK 0x0008 /* OSC_ENA */
+#define WM9090_OSC_ENA_SHIFT 3 /* OSC_ENA */
+#define WM9090_OSC_ENA_WIDTH 1 /* OSC_ENA */
+#define WM9090_VMID_RES_MASK 0x0006 /* VMID_RES - [2:1] */
+#define WM9090_VMID_RES_SHIFT 1 /* VMID_RES - [2:1] */
+#define WM9090_VMID_RES_WIDTH 2 /* VMID_RES - [2:1] */
+#define WM9090_BIAS_ENA 0x0001 /* BIAS_ENA */
+#define WM9090_BIAS_ENA_MASK 0x0001 /* BIAS_ENA */
+#define WM9090_BIAS_ENA_SHIFT 0 /* BIAS_ENA */
+#define WM9090_BIAS_ENA_WIDTH 1 /* BIAS_ENA */
+
+/*
+ * R2 (0x02) - Power Management (2)
+ */
+#define WM9090_TSHUT 0x8000 /* TSHUT */
+#define WM9090_TSHUT_MASK 0x8000 /* TSHUT */
+#define WM9090_TSHUT_SHIFT 15 /* TSHUT */
+#define WM9090_TSHUT_WIDTH 1 /* TSHUT */
+#define WM9090_TSHUT_ENA 0x4000 /* TSHUT_ENA */
+#define WM9090_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */
+#define WM9090_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */
+#define WM9090_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */
+#define WM9090_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */
+#define WM9090_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */
+#define WM9090_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */
+#define WM9090_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */
+#define WM9090_IN1A_ENA 0x0080 /* IN1A_ENA */
+#define WM9090_IN1A_ENA_MASK 0x0080 /* IN1A_ENA */
+#define WM9090_IN1A_ENA_SHIFT 7 /* IN1A_ENA */
+#define WM9090_IN1A_ENA_WIDTH 1 /* IN1A_ENA */
+#define WM9090_IN1B_ENA 0x0040 /* IN1B_ENA */
+#define WM9090_IN1B_ENA_MASK 0x0040 /* IN1B_ENA */
+#define WM9090_IN1B_ENA_SHIFT 6 /* IN1B_ENA */
+#define WM9090_IN1B_ENA_WIDTH 1 /* IN1B_ENA */
+#define WM9090_IN2A_ENA 0x0020 /* IN2A_ENA */
+#define WM9090_IN2A_ENA_MASK 0x0020 /* IN2A_ENA */
+#define WM9090_IN2A_ENA_SHIFT 5 /* IN2A_ENA */
+#define WM9090_IN2A_ENA_WIDTH 1 /* IN2A_ENA */
+#define WM9090_IN2B_ENA 0x0010 /* IN2B_ENA */
+#define WM9090_IN2B_ENA_MASK 0x0010 /* IN2B_ENA */
+#define WM9090_IN2B_ENA_SHIFT 4 /* IN2B_ENA */
+#define WM9090_IN2B_ENA_WIDTH 1 /* IN2B_ENA */
+
+/*
+ * R3 (0x03) - Power Management (3)
+ */
+#define WM9090_AGC_ENA 0x4000 /* AGC_ENA */
+#define WM9090_AGC_ENA_MASK 0x4000 /* AGC_ENA */
+#define WM9090_AGC_ENA_SHIFT 14 /* AGC_ENA */
+#define WM9090_AGC_ENA_WIDTH 1 /* AGC_ENA */
+#define WM9090_SPKLVOL_ENA 0x0100 /* SPKLVOL_ENA */
+#define WM9090_SPKLVOL_ENA_MASK 0x0100 /* SPKLVOL_ENA */
+#define WM9090_SPKLVOL_ENA_SHIFT 8 /* SPKLVOL_ENA */
+#define WM9090_SPKLVOL_ENA_WIDTH 1 /* SPKLVOL_ENA */
+#define WM9090_MIXOUTL_ENA 0x0020 /* MIXOUTL_ENA */
+#define WM9090_MIXOUTL_ENA_MASK 0x0020 /* MIXOUTL_ENA */
+#define WM9090_MIXOUTL_ENA_SHIFT 5 /* MIXOUTL_ENA */
+#define WM9090_MIXOUTL_ENA_WIDTH 1 /* MIXOUTL_ENA */
+#define WM9090_MIXOUTR_ENA 0x0010 /* MIXOUTR_ENA */
+#define WM9090_MIXOUTR_ENA_MASK 0x0010 /* MIXOUTR_ENA */
+#define WM9090_MIXOUTR_ENA_SHIFT 4 /* MIXOUTR_ENA */
+#define WM9090_MIXOUTR_ENA_WIDTH 1 /* MIXOUTR_ENA */
+#define WM9090_SPKMIX_ENA 0x0008 /* SPKMIX_ENA */
+#define WM9090_SPKMIX_ENA_MASK 0x0008 /* SPKMIX_ENA */
+#define WM9090_SPKMIX_ENA_SHIFT 3 /* SPKMIX_ENA */
+#define WM9090_SPKMIX_ENA_WIDTH 1 /* SPKMIX_ENA */
+
+/*
+ * R6 (0x06) - Clocking 1
+ */
+#define WM9090_TOCLK_RATE 0x8000 /* TOCLK_RATE */
+#define WM9090_TOCLK_RATE_MASK 0x8000 /* TOCLK_RATE */
+#define WM9090_TOCLK_RATE_SHIFT 15 /* TOCLK_RATE */
+#define WM9090_TOCLK_RATE_WIDTH 1 /* TOCLK_RATE */
+#define WM9090_TOCLK_ENA 0x4000 /* TOCLK_ENA */
+#define WM9090_TOCLK_ENA_MASK 0x4000 /* TOCLK_ENA */
+#define WM9090_TOCLK_ENA_SHIFT 14 /* TOCLK_ENA */
+#define WM9090_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */
+
+/*
+ * R22 (0x16) - IN1 Line Control
+ */
+#define WM9090_IN1_DIFF 0x0002 /* IN1_DIFF */
+#define WM9090_IN1_DIFF_MASK 0x0002 /* IN1_DIFF */
+#define WM9090_IN1_DIFF_SHIFT 1 /* IN1_DIFF */
+#define WM9090_IN1_DIFF_WIDTH 1 /* IN1_DIFF */
+#define WM9090_IN1_CLAMP 0x0001 /* IN1_CLAMP */
+#define WM9090_IN1_CLAMP_MASK 0x0001 /* IN1_CLAMP */
+#define WM9090_IN1_CLAMP_SHIFT 0 /* IN1_CLAMP */
+#define WM9090_IN1_CLAMP_WIDTH 1 /* IN1_CLAMP */
+
+/*
+ * R23 (0x17) - IN2 Line Control
+ */
+#define WM9090_IN2_DIFF 0x0002 /* IN2_DIFF */
+#define WM9090_IN2_DIFF_MASK 0x0002 /* IN2_DIFF */
+#define WM9090_IN2_DIFF_SHIFT 1 /* IN2_DIFF */
+#define WM9090_IN2_DIFF_WIDTH 1 /* IN2_DIFF */
+#define WM9090_IN2_CLAMP 0x0001 /* IN2_CLAMP */
+#define WM9090_IN2_CLAMP_MASK 0x0001 /* IN2_CLAMP */
+#define WM9090_IN2_CLAMP_SHIFT 0 /* IN2_CLAMP */
+#define WM9090_IN2_CLAMP_WIDTH 1 /* IN2_CLAMP */
+
+/*
+ * R24 (0x18) - IN1 Line Input A Volume
+ */
+#define WM9090_IN1_VU 0x0100 /* IN1_VU */
+#define WM9090_IN1_VU_MASK 0x0100 /* IN1_VU */
+#define WM9090_IN1_VU_SHIFT 8 /* IN1_VU */
+#define WM9090_IN1_VU_WIDTH 1 /* IN1_VU */
+#define WM9090_IN1A_MUTE 0x0080 /* IN1A_MUTE */
+#define WM9090_IN1A_MUTE_MASK 0x0080 /* IN1A_MUTE */
+#define WM9090_IN1A_MUTE_SHIFT 7 /* IN1A_MUTE */
+#define WM9090_IN1A_MUTE_WIDTH 1 /* IN1A_MUTE */
+#define WM9090_IN1A_ZC 0x0040 /* IN1A_ZC */
+#define WM9090_IN1A_ZC_MASK 0x0040 /* IN1A_ZC */
+#define WM9090_IN1A_ZC_SHIFT 6 /* IN1A_ZC */
+#define WM9090_IN1A_ZC_WIDTH 1 /* IN1A_ZC */
+#define WM9090_IN1A_VOL_MASK 0x0007 /* IN1A_VOL - [2:0] */
+#define WM9090_IN1A_VOL_SHIFT 0 /* IN1A_VOL - [2:0] */
+#define WM9090_IN1A_VOL_WIDTH 3 /* IN1A_VOL - [2:0] */
+
+/*
+ * R25 (0x19) - IN1 Line Input B Volume
+ */
+#define WM9090_IN1_VU 0x0100 /* IN1_VU */
+#define WM9090_IN1_VU_MASK 0x0100 /* IN1_VU */
+#define WM9090_IN1_VU_SHIFT 8 /* IN1_VU */
+#define WM9090_IN1_VU_WIDTH 1 /* IN1_VU */
+#define WM9090_IN1B_MUTE 0x0080 /* IN1B_MUTE */
+#define WM9090_IN1B_MUTE_MASK 0x0080 /* IN1B_MUTE */
+#define WM9090_IN1B_MUTE_SHIFT 7 /* IN1B_MUTE */
+#define WM9090_IN1B_MUTE_WIDTH 1 /* IN1B_MUTE */
+#define WM9090_IN1B_ZC 0x0040 /* IN1B_ZC */
+#define WM9090_IN1B_ZC_MASK 0x0040 /* IN1B_ZC */
+#define WM9090_IN1B_ZC_SHIFT 6 /* IN1B_ZC */
+#define WM9090_IN1B_ZC_WIDTH 1 /* IN1B_ZC */
+#define WM9090_IN1B_VOL_MASK 0x0007 /* IN1B_VOL - [2:0] */
+#define WM9090_IN1B_VOL_SHIFT 0 /* IN1B_VOL - [2:0] */
+#define WM9090_IN1B_VOL_WIDTH 3 /* IN1B_VOL - [2:0] */
+
+/*
+ * R26 (0x1A) - IN2 Line Input A Volume
+ */
+#define WM9090_IN2_VU 0x0100 /* IN2_VU */
+#define WM9090_IN2_VU_MASK 0x0100 /* IN2_VU */
+#define WM9090_IN2_VU_SHIFT 8 /* IN2_VU */
+#define WM9090_IN2_VU_WIDTH 1 /* IN2_VU */
+#define WM9090_IN2A_MUTE 0x0080 /* IN2A_MUTE */
+#define WM9090_IN2A_MUTE_MASK 0x0080 /* IN2A_MUTE */
+#define WM9090_IN2A_MUTE_SHIFT 7 /* IN2A_MUTE */
+#define WM9090_IN2A_MUTE_WIDTH 1 /* IN2A_MUTE */
+#define WM9090_IN2A_ZC 0x0040 /* IN2A_ZC */
+#define WM9090_IN2A_ZC_MASK 0x0040 /* IN2A_ZC */
+#define WM9090_IN2A_ZC_SHIFT 6 /* IN2A_ZC */
+#define WM9090_IN2A_ZC_WIDTH 1 /* IN2A_ZC */
+#define WM9090_IN2A_VOL_MASK 0x0007 /* IN2A_VOL - [2:0] */
+#define WM9090_IN2A_VOL_SHIFT 0 /* IN2A_VOL - [2:0] */
+#define WM9090_IN2A_VOL_WIDTH 3 /* IN2A_VOL - [2:0] */
+
+/*
+ * R27 (0x1B) - IN2 Line Input B Volume
+ */
+#define WM9090_IN2_VU 0x0100 /* IN2_VU */
+#define WM9090_IN2_VU_MASK 0x0100 /* IN2_VU */
+#define WM9090_IN2_VU_SHIFT 8 /* IN2_VU */
+#define WM9090_IN2_VU_WIDTH 1 /* IN2_VU */
+#define WM9090_IN2B_MUTE 0x0080 /* IN2B_MUTE */
+#define WM9090_IN2B_MUTE_MASK 0x0080 /* IN2B_MUTE */
+#define WM9090_IN2B_MUTE_SHIFT 7 /* IN2B_MUTE */
+#define WM9090_IN2B_MUTE_WIDTH 1 /* IN2B_MUTE */
+#define WM9090_IN2B_ZC 0x0040 /* IN2B_ZC */
+#define WM9090_IN2B_ZC_MASK 0x0040 /* IN2B_ZC */
+#define WM9090_IN2B_ZC_SHIFT 6 /* IN2B_ZC */
+#define WM9090_IN2B_ZC_WIDTH 1 /* IN2B_ZC */
+#define WM9090_IN2B_VOL_MASK 0x0007 /* IN2B_VOL - [2:0] */
+#define WM9090_IN2B_VOL_SHIFT 0 /* IN2B_VOL - [2:0] */
+#define WM9090_IN2B_VOL_WIDTH 3 /* IN2B_VOL - [2:0] */
+
+/*
+ * R28 (0x1C) - Left Output Volume
+ */
+#define WM9090_HPOUT1_VU 0x0100 /* HPOUT1_VU */
+#define WM9090_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */
+#define WM9090_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */
+#define WM9090_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */
+#define WM9090_HPOUT1L_ZC 0x0080 /* HPOUT1L_ZC */
+#define WM9090_HPOUT1L_ZC_MASK 0x0080 /* HPOUT1L_ZC */
+#define WM9090_HPOUT1L_ZC_SHIFT 7 /* HPOUT1L_ZC */
+#define WM9090_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */
+#define WM9090_HPOUT1L_MUTE 0x0040 /* HPOUT1L_MUTE */
+#define WM9090_HPOUT1L_MUTE_MASK 0x0040 /* HPOUT1L_MUTE */
+#define WM9090_HPOUT1L_MUTE_SHIFT 6 /* HPOUT1L_MUTE */
+#define WM9090_HPOUT1L_MUTE_WIDTH 1 /* HPOUT1L_MUTE */
+#define WM9090_HPOUT1L_VOL_MASK 0x003F /* HPOUT1L_VOL - [5:0] */
+#define WM9090_HPOUT1L_VOL_SHIFT 0 /* HPOUT1L_VOL - [5:0] */
+#define WM9090_HPOUT1L_VOL_WIDTH 6 /* HPOUT1L_VOL - [5:0] */
+
+/*
+ * R29 (0x1D) - Right Output Volume
+ */
+#define WM9090_HPOUT1_VU 0x0100 /* HPOUT1_VU */
+#define WM9090_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */
+#define WM9090_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */
+#define WM9090_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */
+#define WM9090_HPOUT1R_ZC 0x0080 /* HPOUT1R_ZC */
+#define WM9090_HPOUT1R_ZC_MASK 0x0080 /* HPOUT1R_ZC */
+#define WM9090_HPOUT1R_ZC_SHIFT 7 /* HPOUT1R_ZC */
+#define WM9090_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */
+#define WM9090_HPOUT1R_MUTE 0x0040 /* HPOUT1R_MUTE */
+#define WM9090_HPOUT1R_MUTE_MASK 0x0040 /* HPOUT1R_MUTE */
+#define WM9090_HPOUT1R_MUTE_SHIFT 6 /* HPOUT1R_MUTE */
+#define WM9090_HPOUT1R_MUTE_WIDTH 1 /* HPOUT1R_MUTE */
+#define WM9090_HPOUT1R_VOL_MASK 0x003F /* HPOUT1R_VOL - [5:0] */
+#define WM9090_HPOUT1R_VOL_SHIFT 0 /* HPOUT1R_VOL - [5:0] */
+#define WM9090_HPOUT1R_VOL_WIDTH 6 /* HPOUT1R_VOL - [5:0] */
+
+/*
+ * R34 (0x22) - SPKMIXL Attenuation
+ */
+#define WM9090_SPKMIX_MUTE 0x0100 /* SPKMIX_MUTE */
+#define WM9090_SPKMIX_MUTE_MASK 0x0100 /* SPKMIX_MUTE */
+#define WM9090_SPKMIX_MUTE_SHIFT 8 /* SPKMIX_MUTE */
+#define WM9090_SPKMIX_MUTE_WIDTH 1 /* SPKMIX_MUTE */
+#define WM9090_IN1A_SPKMIX_VOL_MASK 0x00C0 /* IN1A_SPKMIX_VOL - [7:6] */
+#define WM9090_IN1A_SPKMIX_VOL_SHIFT 6 /* IN1A_SPKMIX_VOL - [7:6] */
+#define WM9090_IN1A_SPKMIX_VOL_WIDTH 2 /* IN1A_SPKMIX_VOL - [7:6] */
+#define WM9090_IN1B_SPKMIX_VOL_MASK 0x0030 /* IN1B_SPKMIX_VOL - [5:4] */
+#define WM9090_IN1B_SPKMIX_VOL_SHIFT 4 /* IN1B_SPKMIX_VOL - [5:4] */
+#define WM9090_IN1B_SPKMIX_VOL_WIDTH 2 /* IN1B_SPKMIX_VOL - [5:4] */
+#define WM9090_IN2A_SPKMIX_VOL_MASK 0x000C /* IN2A_SPKMIX_VOL - [3:2] */
+#define WM9090_IN2A_SPKMIX_VOL_SHIFT 2 /* IN2A_SPKMIX_VOL - [3:2] */
+#define WM9090_IN2A_SPKMIX_VOL_WIDTH 2 /* IN2A_SPKMIX_VOL - [3:2] */
+#define WM9090_IN2B_SPKMIX_VOL_MASK 0x0003 /* IN2B_SPKMIX_VOL - [1:0] */
+#define WM9090_IN2B_SPKMIX_VOL_SHIFT 0 /* IN2B_SPKMIX_VOL - [1:0] */
+#define WM9090_IN2B_SPKMIX_VOL_WIDTH 2 /* IN2B_SPKMIX_VOL - [1:0] */
+
+/*
+ * R36 (0x24) - SPKOUT Mixers
+ */
+#define WM9090_SPKMIXL_TO_SPKOUTL 0x0010 /* SPKMIXL_TO_SPKOUTL */
+#define WM9090_SPKMIXL_TO_SPKOUTL_MASK 0x0010 /* SPKMIXL_TO_SPKOUTL */
+#define WM9090_SPKMIXL_TO_SPKOUTL_SHIFT 4 /* SPKMIXL_TO_SPKOUTL */
+#define WM9090_SPKMIXL_TO_SPKOUTL_WIDTH 1 /* SPKMIXL_TO_SPKOUTL */
+
+/*
+ * R37 (0x25) - ClassD3
+ */
+#define WM9090_SPKOUTL_BOOST_MASK 0x0038 /* SPKOUTL_BOOST - [5:3] */
+#define WM9090_SPKOUTL_BOOST_SHIFT 3 /* SPKOUTL_BOOST - [5:3] */
+#define WM9090_SPKOUTL_BOOST_WIDTH 3 /* SPKOUTL_BOOST - [5:3] */
+
+/*
+ * R38 (0x26) - Speaker Volume Left
+ */
+#define WM9090_SPKOUT_VU 0x0100 /* SPKOUT_VU */
+#define WM9090_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */
+#define WM9090_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */
+#define WM9090_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */
+#define WM9090_SPKOUTL_ZC 0x0080 /* SPKOUTL_ZC */
+#define WM9090_SPKOUTL_ZC_MASK 0x0080 /* SPKOUTL_ZC */
+#define WM9090_SPKOUTL_ZC_SHIFT 7 /* SPKOUTL_ZC */
+#define WM9090_SPKOUTL_ZC_WIDTH 1 /* SPKOUTL_ZC */
+#define WM9090_SPKOUTL_MUTE 0x0040 /* SPKOUTL_MUTE */
+#define WM9090_SPKOUTL_MUTE_MASK 0x0040 /* SPKOUTL_MUTE */
+#define WM9090_SPKOUTL_MUTE_SHIFT 6 /* SPKOUTL_MUTE */
+#define WM9090_SPKOUTL_MUTE_WIDTH 1 /* SPKOUTL_MUTE */
+#define WM9090_SPKOUTL_VOL_MASK 0x003F /* SPKOUTL_VOL - [5:0] */
+#define WM9090_SPKOUTL_VOL_SHIFT 0 /* SPKOUTL_VOL - [5:0] */
+#define WM9090_SPKOUTL_VOL_WIDTH 6 /* SPKOUTL_VOL - [5:0] */
+
+/*
+ * R45 (0x2D) - Output Mixer1
+ */
+#define WM9090_IN1A_TO_MIXOUTL 0x0040 /* IN1A_TO_MIXOUTL */
+#define WM9090_IN1A_TO_MIXOUTL_MASK 0x0040 /* IN1A_TO_MIXOUTL */
+#define WM9090_IN1A_TO_MIXOUTL_SHIFT 6 /* IN1A_TO_MIXOUTL */
+#define WM9090_IN1A_TO_MIXOUTL_WIDTH 1 /* IN1A_TO_MIXOUTL */
+#define WM9090_IN2A_TO_MIXOUTL 0x0004 /* IN2A_TO_MIXOUTL */
+#define WM9090_IN2A_TO_MIXOUTL_MASK 0x0004 /* IN2A_TO_MIXOUTL */
+#define WM9090_IN2A_TO_MIXOUTL_SHIFT 2 /* IN2A_TO_MIXOUTL */
+#define WM9090_IN2A_TO_MIXOUTL_WIDTH 1 /* IN2A_TO_MIXOUTL */
+
+/*
+ * R46 (0x2E) - Output Mixer2
+ */
+#define WM9090_IN1A_TO_MIXOUTR 0x0040 /* IN1A_TO_MIXOUTR */
+#define WM9090_IN1A_TO_MIXOUTR_MASK 0x0040 /* IN1A_TO_MIXOUTR */
+#define WM9090_IN1A_TO_MIXOUTR_SHIFT 6 /* IN1A_TO_MIXOUTR */
+#define WM9090_IN1A_TO_MIXOUTR_WIDTH 1 /* IN1A_TO_MIXOUTR */
+#define WM9090_IN1B_TO_MIXOUTR 0x0010 /* IN1B_TO_MIXOUTR */
+#define WM9090_IN1B_TO_MIXOUTR_MASK 0x0010 /* IN1B_TO_MIXOUTR */
+#define WM9090_IN1B_TO_MIXOUTR_SHIFT 4 /* IN1B_TO_MIXOUTR */
+#define WM9090_IN1B_TO_MIXOUTR_WIDTH 1 /* IN1B_TO_MIXOUTR */
+#define WM9090_IN2A_TO_MIXOUTR 0x0004 /* IN2A_TO_MIXOUTR */
+#define WM9090_IN2A_TO_MIXOUTR_MASK 0x0004 /* IN2A_TO_MIXOUTR */
+#define WM9090_IN2A_TO_MIXOUTR_SHIFT 2 /* IN2A_TO_MIXOUTR */
+#define WM9090_IN2A_TO_MIXOUTR_WIDTH 1 /* IN2A_TO_MIXOUTR */
+#define WM9090_IN2B_TO_MIXOUTR 0x0001 /* IN2B_TO_MIXOUTR */
+#define WM9090_IN2B_TO_MIXOUTR_MASK 0x0001 /* IN2B_TO_MIXOUTR */
+#define WM9090_IN2B_TO_MIXOUTR_SHIFT 0 /* IN2B_TO_MIXOUTR */
+#define WM9090_IN2B_TO_MIXOUTR_WIDTH 1 /* IN2B_TO_MIXOUTR */
+
+/*
+ * R47 (0x2F) - Output Mixer3
+ */
+#define WM9090_MIXOUTL_MUTE 0x0100 /* MIXOUTL_MUTE */
+#define WM9090_MIXOUTL_MUTE_MASK 0x0100 /* MIXOUTL_MUTE */
+#define WM9090_MIXOUTL_MUTE_SHIFT 8 /* MIXOUTL_MUTE */
+#define WM9090_MIXOUTL_MUTE_WIDTH 1 /* MIXOUTL_MUTE */
+#define WM9090_IN1A_MIXOUTL_VOL_MASK 0x00C0 /* IN1A_MIXOUTL_VOL - [7:6] */
+#define WM9090_IN1A_MIXOUTL_VOL_SHIFT 6 /* IN1A_MIXOUTL_VOL - [7:6] */
+#define WM9090_IN1A_MIXOUTL_VOL_WIDTH 2 /* IN1A_MIXOUTL_VOL - [7:6] */
+#define WM9090_IN2A_MIXOUTL_VOL_MASK 0x000C /* IN2A_MIXOUTL_VOL - [3:2] */
+#define WM9090_IN2A_MIXOUTL_VOL_SHIFT 2 /* IN2A_MIXOUTL_VOL - [3:2] */
+#define WM9090_IN2A_MIXOUTL_VOL_WIDTH 2 /* IN2A_MIXOUTL_VOL - [3:2] */
+
+/*
+ * R48 (0x30) - Output Mixer4
+ */
+#define WM9090_MIXOUTR_MUTE 0x0100 /* MIXOUTR_MUTE */
+#define WM9090_MIXOUTR_MUTE_MASK 0x0100 /* MIXOUTR_MUTE */
+#define WM9090_MIXOUTR_MUTE_SHIFT 8 /* MIXOUTR_MUTE */
+#define WM9090_MIXOUTR_MUTE_WIDTH 1 /* MIXOUTR_MUTE */
+#define WM9090_IN1A_MIXOUTR_VOL_MASK 0x00C0 /* IN1A_MIXOUTR_VOL - [7:6] */
+#define WM9090_IN1A_MIXOUTR_VOL_SHIFT 6 /* IN1A_MIXOUTR_VOL - [7:6] */
+#define WM9090_IN1A_MIXOUTR_VOL_WIDTH 2 /* IN1A_MIXOUTR_VOL - [7:6] */
+#define WM9090_IN1B_MIXOUTR_VOL_MASK 0x0030 /* IN1B_MIXOUTR_VOL - [5:4] */
+#define WM9090_IN1B_MIXOUTR_VOL_SHIFT 4 /* IN1B_MIXOUTR_VOL - [5:4] */
+#define WM9090_IN1B_MIXOUTR_VOL_WIDTH 2 /* IN1B_MIXOUTR_VOL - [5:4] */
+#define WM9090_IN2A_MIXOUTR_VOL_MASK 0x000C /* IN2A_MIXOUTR_VOL - [3:2] */
+#define WM9090_IN2A_MIXOUTR_VOL_SHIFT 2 /* IN2A_MIXOUTR_VOL - [3:2] */
+#define WM9090_IN2A_MIXOUTR_VOL_WIDTH 2 /* IN2A_MIXOUTR_VOL - [3:2] */
+#define WM9090_IN2B_MIXOUTR_VOL_MASK 0x0003 /* IN2B_MIXOUTR_VOL - [1:0] */
+#define WM9090_IN2B_MIXOUTR_VOL_SHIFT 0 /* IN2B_MIXOUTR_VOL - [1:0] */
+#define WM9090_IN2B_MIXOUTR_VOL_WIDTH 2 /* IN2B_MIXOUTR_VOL - [1:0] */
+
+/*
+ * R54 (0x36) - Speaker Mixer
+ */
+#define WM9090_IN1A_TO_SPKMIX 0x0040 /* IN1A_TO_SPKMIX */
+#define WM9090_IN1A_TO_SPKMIX_MASK 0x0040 /* IN1A_TO_SPKMIX */
+#define WM9090_IN1A_TO_SPKMIX_SHIFT 6 /* IN1A_TO_SPKMIX */
+#define WM9090_IN1A_TO_SPKMIX_WIDTH 1 /* IN1A_TO_SPKMIX */
+#define WM9090_IN1B_TO_SPKMIX 0x0010 /* IN1B_TO_SPKMIX */
+#define WM9090_IN1B_TO_SPKMIX_MASK 0x0010 /* IN1B_TO_SPKMIX */
+#define WM9090_IN1B_TO_SPKMIX_SHIFT 4 /* IN1B_TO_SPKMIX */
+#define WM9090_IN1B_TO_SPKMIX_WIDTH 1 /* IN1B_TO_SPKMIX */
+#define WM9090_IN2A_TO_SPKMIX 0x0004 /* IN2A_TO_SPKMIX */
+#define WM9090_IN2A_TO_SPKMIX_MASK 0x0004 /* IN2A_TO_SPKMIX */
+#define WM9090_IN2A_TO_SPKMIX_SHIFT 2 /* IN2A_TO_SPKMIX */
+#define WM9090_IN2A_TO_SPKMIX_WIDTH 1 /* IN2A_TO_SPKMIX */
+#define WM9090_IN2B_TO_SPKMIX 0x0001 /* IN2B_TO_SPKMIX */
+#define WM9090_IN2B_TO_SPKMIX_MASK 0x0001 /* IN2B_TO_SPKMIX */
+#define WM9090_IN2B_TO_SPKMIX_SHIFT 0 /* IN2B_TO_SPKMIX */
+#define WM9090_IN2B_TO_SPKMIX_WIDTH 1 /* IN2B_TO_SPKMIX */
+
+/*
+ * R57 (0x39) - AntiPOP2
+ */
+#define WM9090_VMID_BUF_ENA 0x0008 /* VMID_BUF_ENA */
+#define WM9090_VMID_BUF_ENA_MASK 0x0008 /* VMID_BUF_ENA */
+#define WM9090_VMID_BUF_ENA_SHIFT 3 /* VMID_BUF_ENA */
+#define WM9090_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */
+#define WM9090_VMID_ENA 0x0001 /* VMID_ENA */
+#define WM9090_VMID_ENA_MASK 0x0001 /* VMID_ENA */
+#define WM9090_VMID_ENA_SHIFT 0 /* VMID_ENA */
+#define WM9090_VMID_ENA_WIDTH 1 /* VMID_ENA */
+
+/*
+ * R70 (0x46) - Write Sequencer 0
+ */
+#define WM9090_WSEQ_ENA 0x0100 /* WSEQ_ENA */
+#define WM9090_WSEQ_ENA_MASK 0x0100 /* WSEQ_ENA */
+#define WM9090_WSEQ_ENA_SHIFT 8 /* WSEQ_ENA */
+#define WM9090_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
+#define WM9090_WSEQ_WRITE_INDEX_MASK 0x000F /* WSEQ_WRITE_INDEX - [3:0] */
+#define WM9090_WSEQ_WRITE_INDEX_SHIFT 0 /* WSEQ_WRITE_INDEX - [3:0] */
+#define WM9090_WSEQ_WRITE_INDEX_WIDTH 4 /* WSEQ_WRITE_INDEX - [3:0] */
+
+/*
+ * R71 (0x47) - Write Sequencer 1
+ */
+#define WM9090_WSEQ_DATA_WIDTH_MASK 0x7000 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM9090_WSEQ_DATA_WIDTH_SHIFT 12 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM9090_WSEQ_DATA_WIDTH_WIDTH 3 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM9090_WSEQ_DATA_START_MASK 0x0F00 /* WSEQ_DATA_START - [11:8] */
+#define WM9090_WSEQ_DATA_START_SHIFT 8 /* WSEQ_DATA_START - [11:8] */
+#define WM9090_WSEQ_DATA_START_WIDTH 4 /* WSEQ_DATA_START - [11:8] */
+#define WM9090_WSEQ_ADDR_MASK 0x00FF /* WSEQ_ADDR - [7:0] */
+#define WM9090_WSEQ_ADDR_SHIFT 0 /* WSEQ_ADDR - [7:0] */
+#define WM9090_WSEQ_ADDR_WIDTH 8 /* WSEQ_ADDR - [7:0] */
+
+/*
+ * R72 (0x48) - Write Sequencer 2
+ */
+#define WM9090_WSEQ_EOS 0x4000 /* WSEQ_EOS */
+#define WM9090_WSEQ_EOS_MASK 0x4000 /* WSEQ_EOS */
+#define WM9090_WSEQ_EOS_SHIFT 14 /* WSEQ_EOS */
+#define WM9090_WSEQ_EOS_WIDTH 1 /* WSEQ_EOS */
+#define WM9090_WSEQ_DELAY_MASK 0x0F00 /* WSEQ_DELAY - [11:8] */
+#define WM9090_WSEQ_DELAY_SHIFT 8 /* WSEQ_DELAY - [11:8] */
+#define WM9090_WSEQ_DELAY_WIDTH 4 /* WSEQ_DELAY - [11:8] */
+#define WM9090_WSEQ_DATA_MASK 0x00FF /* WSEQ_DATA - [7:0] */
+#define WM9090_WSEQ_DATA_SHIFT 0 /* WSEQ_DATA - [7:0] */
+#define WM9090_WSEQ_DATA_WIDTH 8 /* WSEQ_DATA - [7:0] */
+
+/*
+ * R73 (0x49) - Write Sequencer 3
+ */
+#define WM9090_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */
+#define WM9090_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */
+#define WM9090_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */
+#define WM9090_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
+#define WM9090_WSEQ_START 0x0100 /* WSEQ_START */
+#define WM9090_WSEQ_START_MASK 0x0100 /* WSEQ_START */
+#define WM9090_WSEQ_START_SHIFT 8 /* WSEQ_START */
+#define WM9090_WSEQ_START_WIDTH 1 /* WSEQ_START */
+#define WM9090_WSEQ_START_INDEX_MASK 0x003F /* WSEQ_START_INDEX - [5:0] */
+#define WM9090_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [5:0] */
+#define WM9090_WSEQ_START_INDEX_WIDTH 6 /* WSEQ_START_INDEX - [5:0] */
+
+/*
+ * R74 (0x4A) - Write Sequencer 4
+ */
+#define WM9090_WSEQ_BUSY 0x0001 /* WSEQ_BUSY */
+#define WM9090_WSEQ_BUSY_MASK 0x0001 /* WSEQ_BUSY */
+#define WM9090_WSEQ_BUSY_SHIFT 0 /* WSEQ_BUSY */
+#define WM9090_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
+
+/*
+ * R75 (0x4B) - Write Sequencer 5
+ */
+#define WM9090_WSEQ_CURRENT_INDEX_MASK 0x003F /* WSEQ_CURRENT_INDEX - [5:0] */
+#define WM9090_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [5:0] */
+#define WM9090_WSEQ_CURRENT_INDEX_WIDTH 6 /* WSEQ_CURRENT_INDEX - [5:0] */
+
+/*
+ * R76 (0x4C) - Charge Pump 1
+ */
+#define WM9090_CP_ENA 0x8000 /* CP_ENA */
+#define WM9090_CP_ENA_MASK 0x8000 /* CP_ENA */
+#define WM9090_CP_ENA_SHIFT 15 /* CP_ENA */
+#define WM9090_CP_ENA_WIDTH 1 /* CP_ENA */
+
+/*
+ * R84 (0x54) - DC Servo 0
+ */
+#define WM9090_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */
+#define WM9090_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */
+#define WM9090_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */
+#define WM9090_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */
+#define WM9090_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */
+#define WM9090_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */
+#define WM9090_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */
+#define WM9090_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */
+#define WM9090_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */
+#define WM9090_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */
+#define WM9090_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */
+#define WM9090_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */
+#define WM9090_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */
+#define WM9090_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */
+#define WM9090_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */
+#define WM9090_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */
+#define WM9090_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */
+#define WM9090_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */
+#define WM9090_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */
+#define WM9090_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */
+#define WM9090_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */
+#define WM9090_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */
+#define WM9090_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */
+#define WM9090_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */
+#define WM9090_DCS_TRIG_DAC_WR_1 0x0008 /* DCS_TRIG_DAC_WR_1 */
+#define WM9090_DCS_TRIG_DAC_WR_1_MASK 0x0008 /* DCS_TRIG_DAC_WR_1 */
+#define WM9090_DCS_TRIG_DAC_WR_1_SHIFT 3 /* DCS_TRIG_DAC_WR_1 */
+#define WM9090_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */
+#define WM9090_DCS_TRIG_DAC_WR_0 0x0004 /* DCS_TRIG_DAC_WR_0 */
+#define WM9090_DCS_TRIG_DAC_WR_0_MASK 0x0004 /* DCS_TRIG_DAC_WR_0 */
+#define WM9090_DCS_TRIG_DAC_WR_0_SHIFT 2 /* DCS_TRIG_DAC_WR_0 */
+#define WM9090_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */
+#define WM9090_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */
+#define WM9090_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */
+#define WM9090_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */
+#define WM9090_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */
+#define WM9090_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */
+#define WM9090_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */
+#define WM9090_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */
+#define WM9090_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */
+
+/*
+ * R85 (0x55) - DC Servo 1
+ */
+#define WM9090_DCS_SERIES_NO_01_MASK 0x0FE0 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM9090_DCS_SERIES_NO_01_SHIFT 5 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM9090_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM9090_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM9090_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM9090_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */
+
+/*
+ * R87 (0x57) - DC Servo 3
+ */
+#define WM9090_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM9090_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM9090_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM9090_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM9090_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM9090_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */
+
+/*
+ * R88 (0x58) - DC Servo Readback 0
+ */
+#define WM9090_DCS_CAL_COMPLETE_MASK 0x0300 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM9090_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM9090_DCS_CAL_COMPLETE_WIDTH 2 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM9090_DCS_DAC_WR_COMPLETE_MASK 0x0030 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM9090_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM9090_DCS_DAC_WR_COMPLETE_WIDTH 2 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM9090_DCS_STARTUP_COMPLETE_MASK 0x0003 /* DCS_STARTUP_COMPLETE - [1:0] */
+#define WM9090_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [1:0] */
+#define WM9090_DCS_STARTUP_COMPLETE_WIDTH 2 /* DCS_STARTUP_COMPLETE - [1:0] */
+
+/*
+ * R89 (0x59) - DC Servo Readback 1
+ */
+#define WM9090_DCS_DAC_WR_VAL_1_RD_MASK 0x00FF /* DCS_DAC_WR_VAL_1_RD - [7:0] */
+#define WM9090_DCS_DAC_WR_VAL_1_RD_SHIFT 0 /* DCS_DAC_WR_VAL_1_RD - [7:0] */
+#define WM9090_DCS_DAC_WR_VAL_1_RD_WIDTH 8 /* DCS_DAC_WR_VAL_1_RD - [7:0] */
+
+/*
+ * R90 (0x5A) - DC Servo Readback 2
+ */
+#define WM9090_DCS_DAC_WR_VAL_0_RD_MASK 0x00FF /* DCS_DAC_WR_VAL_0_RD - [7:0] */
+#define WM9090_DCS_DAC_WR_VAL_0_RD_SHIFT 0 /* DCS_DAC_WR_VAL_0_RD - [7:0] */
+#define WM9090_DCS_DAC_WR_VAL_0_RD_WIDTH 8 /* DCS_DAC_WR_VAL_0_RD - [7:0] */
+
+/*
+ * R96 (0x60) - Analogue HP 0
+ */
+#define WM9090_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */
+#define WM9090_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */
+#define WM9090_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */
+#define WM9090_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */
+#define WM9090_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */
+#define WM9090_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */
+#define WM9090_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */
+#define WM9090_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */
+#define WM9090_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */
+#define WM9090_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */
+#define WM9090_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */
+#define WM9090_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */
+#define WM9090_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */
+#define WM9090_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */
+#define WM9090_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */
+#define WM9090_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */
+#define WM9090_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */
+#define WM9090_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */
+#define WM9090_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */
+#define WM9090_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */
+#define WM9090_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */
+#define WM9090_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */
+#define WM9090_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */
+#define WM9090_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */
+
+/*
+ * R98 (0x62) - AGC Control 0
+ */
+#define WM9090_AGC_CLIP_ENA 0x8000 /* AGC_CLIP_ENA */
+#define WM9090_AGC_CLIP_ENA_MASK 0x8000 /* AGC_CLIP_ENA */
+#define WM9090_AGC_CLIP_ENA_SHIFT 15 /* AGC_CLIP_ENA */
+#define WM9090_AGC_CLIP_ENA_WIDTH 1 /* AGC_CLIP_ENA */
+#define WM9090_AGC_CLIP_THR_MASK 0x0F00 /* AGC_CLIP_THR - [11:8] */
+#define WM9090_AGC_CLIP_THR_SHIFT 8 /* AGC_CLIP_THR - [11:8] */
+#define WM9090_AGC_CLIP_THR_WIDTH 4 /* AGC_CLIP_THR - [11:8] */
+#define WM9090_AGC_CLIP_ATK_MASK 0x0070 /* AGC_CLIP_ATK - [6:4] */
+#define WM9090_AGC_CLIP_ATK_SHIFT 4 /* AGC_CLIP_ATK - [6:4] */
+#define WM9090_AGC_CLIP_ATK_WIDTH 3 /* AGC_CLIP_ATK - [6:4] */
+#define WM9090_AGC_CLIP_DCY_MASK 0x0007 /* AGC_CLIP_DCY - [2:0] */
+#define WM9090_AGC_CLIP_DCY_SHIFT 0 /* AGC_CLIP_DCY - [2:0] */
+#define WM9090_AGC_CLIP_DCY_WIDTH 3 /* AGC_CLIP_DCY - [2:0] */
+
+/*
+ * R99 (0x63) - AGC Control 1
+ */
+#define WM9090_AGC_PWR_ENA 0x8000 /* AGC_PWR_ENA */
+#define WM9090_AGC_PWR_ENA_MASK 0x8000 /* AGC_PWR_ENA */
+#define WM9090_AGC_PWR_ENA_SHIFT 15 /* AGC_PWR_ENA */
+#define WM9090_AGC_PWR_ENA_WIDTH 1 /* AGC_PWR_ENA */
+#define WM9090_AGC_PWR_AVG 0x1000 /* AGC_PWR_AVG */
+#define WM9090_AGC_PWR_AVG_MASK 0x1000 /* AGC_PWR_AVG */
+#define WM9090_AGC_PWR_AVG_SHIFT 12 /* AGC_PWR_AVG */
+#define WM9090_AGC_PWR_AVG_WIDTH 1 /* AGC_PWR_AVG */
+#define WM9090_AGC_PWR_THR_MASK 0x0F00 /* AGC_PWR_THR - [11:8] */
+#define WM9090_AGC_PWR_THR_SHIFT 8 /* AGC_PWR_THR - [11:8] */
+#define WM9090_AGC_PWR_THR_WIDTH 4 /* AGC_PWR_THR - [11:8] */
+#define WM9090_AGC_PWR_ATK_MASK 0x0070 /* AGC_PWR_ATK - [6:4] */
+#define WM9090_AGC_PWR_ATK_SHIFT 4 /* AGC_PWR_ATK - [6:4] */
+#define WM9090_AGC_PWR_ATK_WIDTH 3 /* AGC_PWR_ATK - [6:4] */
+#define WM9090_AGC_PWR_DCY_MASK 0x0007 /* AGC_PWR_DCY - [2:0] */
+#define WM9090_AGC_PWR_DCY_SHIFT 0 /* AGC_PWR_DCY - [2:0] */
+#define WM9090_AGC_PWR_DCY_WIDTH 3 /* AGC_PWR_DCY - [2:0] */
+
+/*
+ * R100 (0x64) - AGC Control 2
+ */
+#define WM9090_AGC_RAMP 0x0100 /* AGC_RAMP */
+#define WM9090_AGC_RAMP_MASK 0x0100 /* AGC_RAMP */
+#define WM9090_AGC_RAMP_SHIFT 8 /* AGC_RAMP */
+#define WM9090_AGC_RAMP_WIDTH 1 /* AGC_RAMP */
+#define WM9090_AGC_MINGAIN_MASK 0x003F /* AGC_MINGAIN - [5:0] */
+#define WM9090_AGC_MINGAIN_SHIFT 0 /* AGC_MINGAIN - [5:0] */
+#define WM9090_AGC_MINGAIN_WIDTH 6 /* AGC_MINGAIN - [5:0] */
+
+#endif
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 2f48a8aae22c..28790a2ffe8d 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -632,9 +632,6 @@ static int wm9712_soc_resume(struct platform_device *pdev)
}
}
- if (codec->suspend_bias_level == SND_SOC_BIAS_ON)
- wm9712_set_bias_level(codec, SND_SOC_BIAS_ON);
-
return ret;
}
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 2fca514fde58..34e0c91092fa 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -764,7 +764,7 @@ static void pll_factors(struct _pll_div *pll_div, unsigned int source)
static int wm9713_set_pll(struct snd_soc_codec *codec,
int pll_id, unsigned int freq_in, unsigned int freq_out)
{
- struct wm9713_priv *wm9713 = codec->private_data;
+ struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
u16 reg, reg2;
struct _pll_div pll_div;
@@ -1175,7 +1175,7 @@ static int wm9713_soc_resume(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- struct wm9713_priv *wm9713 = codec->private_data;
+ struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
int i, ret;
u16 *cache = codec->reg_cache;
@@ -1201,9 +1201,6 @@ static int wm9713_soc_resume(struct platform_device *pdev)
}
}
- if (codec->suspend_bias_level == SND_SOC_BIAS_ON)
- wm9713_set_bias_level(codec, SND_SOC_BIAS_ON);
-
return ret;
}
@@ -1228,8 +1225,9 @@ static int wm9713_soc_probe(struct platform_device *pdev)
codec->reg_cache_size = sizeof(wm9713_reg);
codec->reg_cache_step = 2;
- codec->private_data = kzalloc(sizeof(struct wm9713_priv), GFP_KERNEL);
- if (codec->private_data == NULL) {
+ snd_soc_codec_set_drvdata(codec, kzalloc(sizeof(struct wm9713_priv),
+ GFP_KERNEL));
+ if (snd_soc_codec_get_drvdata(codec) == NULL) {
ret = -ENOMEM;
goto priv_err;
}
@@ -1280,7 +1278,7 @@ pcm_err:
snd_soc_free_ac97_codec(codec);
codec_err:
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
priv_err:
kfree(codec->reg_cache);
@@ -1302,7 +1300,7 @@ static int wm9713_soc_remove(struct platform_device *pdev)
snd_soc_dapm_free(socdev);
snd_soc_free_pcms(socdev);
snd_soc_free_ac97_codec(codec);
- kfree(codec->private_data);
+ kfree(snd_soc_codec_get_drvdata(codec));
kfree(codec->reg_cache);
kfree(codec);
return 0;
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index e1f225a3ac46..16f1a57da08a 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -91,7 +91,7 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec, unsigned int op)
*/
static void calibrate_dc_servo(struct snd_soc_codec *codec)
{
- struct wm_hubs_data *hubs = codec->private_data;
+ struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
u16 reg, reg_l, reg_r, dcs_cfg;
/* Set for 32 series updates */
@@ -127,6 +127,8 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec)
break;
}
+ dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
+
/* HPOUT1L */
if (reg_l + hubs->dcs_codes > 0 &&
reg_l + hubs->dcs_codes < 0xff)
@@ -139,6 +141,8 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec)
reg_r += hubs->dcs_codes;
dcs_cfg |= reg_r;
+ dev_dbg(codec->dev, "DCS result: %x\n", dcs_cfg);
+
/* Do it */
snd_soc_write(codec, WM8993_DC_SERVO_3, dcs_cfg);
wait_for_dc_servo(codec,
@@ -154,7 +158,7 @@ static int wm8993_put_dc_servo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct wm_hubs_data *hubs = codec->private_data;
+ struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
int ret;
ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
@@ -327,7 +331,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- struct wm_hubs_data *hubs = codec->private_data;
+ struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -397,14 +401,14 @@ static int hp_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, WM8993_ANALOGUE_HP_0,
- WM8993_HPOUT1L_DLY |
- WM8993_HPOUT1R_DLY |
+ WM8993_HPOUT1L_OUTP |
+ WM8993_HPOUT1R_OUTP |
WM8993_HPOUT1L_RMV_SHORT |
WM8993_HPOUT1R_RMV_SHORT, 0);
snd_soc_update_bits(codec, WM8993_ANALOGUE_HP_0,
- WM8993_HPOUT1L_OUTP |
- WM8993_HPOUT1R_OUTP, 0);
+ WM8993_HPOUT1L_DLY |
+ WM8993_HPOUT1R_DLY, 0);
snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA,
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index 047ee39418c0..6bbf001f6591 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -12,15 +12,38 @@ config SND_DAVINCI_SOC_I2S
config SND_DAVINCI_SOC_MCASP
tristate
+config SND_DAVINCI_SOC_VCIF
+ tristate
+
config SND_DAVINCI_SOC_EVM
tristate "SoC Audio support for DaVinci DM6446, DM355 or DM365 EVM"
depends on SND_DAVINCI_SOC
- depends on MACH_DAVINCI_EVM || MACH_DAVINCI_DM355_EVM || MACH_DAVINCI_DM365_EVM
+ depends on MACH_DAVINCI_EVM || MACH_DAVINCI_DM355_EVM || MACH_DAVINCI_DM365_EVM
select SND_DAVINCI_SOC_I2S
select SND_SOC_TLV320AIC3X
help
Say Y if you want to add support for SoC audio on TI
- DaVinci DM6446 or DM355 EVM platforms.
+ DaVinci DM6446, DM355 or DM365 EVM platforms.
+
+choice
+ prompt "DM365 codec select"
+ depends on SND_DAVINCI_SOC_EVM
+ depends on MACH_DAVINCI_DM365_EVM
+ default SND_DM365_EXTERNAL_CODEC
+
+config SND_DM365_AIC3X_CODEC
+ bool "Audio Codec - AIC3101"
+ help
+ Say Y if you want to add support for AIC3101 audio codec
+
+config SND_DM365_VOICE_CODEC
+ bool "Voice Codec - CQ93VC"
+ select MFD_DAVINCI_VOICECODEC
+ select SND_DAVINCI_SOC_VCIF
+ select SND_SOC_CQ0093VC
+ help
+ Say Y if you want to add support for SoC On-chip voice codec
+endchoice
config SND_DM6467_SOC_EVM
tristate "SoC Audio support for DaVinci DM6467 EVM"
diff --git a/sound/soc/davinci/Makefile b/sound/soc/davinci/Makefile
index a6939d71b988..a93679d618cd 100644
--- a/sound/soc/davinci/Makefile
+++ b/sound/soc/davinci/Makefile
@@ -2,10 +2,12 @@
snd-soc-davinci-objs := davinci-pcm.o
snd-soc-davinci-i2s-objs := davinci-i2s.o
snd-soc-davinci-mcasp-objs:= davinci-mcasp.o
+snd-soc-davinci-vcif-objs:= davinci-vcif.o
obj-$(CONFIG_SND_DAVINCI_SOC) += snd-soc-davinci.o
obj-$(CONFIG_SND_DAVINCI_SOC_I2S) += snd-soc-davinci-i2s.o
obj-$(CONFIG_SND_DAVINCI_SOC_MCASP) += snd-soc-davinci-mcasp.o
+obj-$(CONFIG_SND_DAVINCI_SOC_VCIF) += snd-soc-davinci-vcif.o
# DAVINCI Machine Support
snd-soc-evm-objs := davinci-evm.o
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 7ccbe6684fc2..97f74d6a33e6 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -28,10 +28,12 @@
#include <mach/mux.h>
#include "../codecs/tlv320aic3x.h"
+#include "../codecs/cq93vc.h"
#include "../codecs/spdif_transciever.h"
#include "davinci-pcm.h"
#include "davinci-i2s.h"
#include "davinci-mcasp.h"
+#include "davinci-vcif.h"
#define AUDIO_FORMAT (SND_SOC_DAIFMT_DSP_B | \
SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF)
@@ -81,10 +83,24 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int evm_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+
+ /* set cpu DAI configuration */
+ return snd_soc_dai_set_fmt(cpu_dai, AUDIO_FORMAT);
+}
+
static struct snd_soc_ops evm_ops = {
.hw_params = evm_hw_params,
};
+static struct snd_soc_ops evm_spdif_ops = {
+ .hw_params = evm_spdif_hw_params,
+};
+
/* davinci-evm machine dapm widgets */
static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
@@ -151,6 +167,22 @@ static struct snd_soc_dai_link evm_dai = {
.ops = &evm_ops,
};
+static struct snd_soc_dai_link dm365_evm_dai = {
+#ifdef CONFIG_SND_DM365_AIC3X_CODEC
+ .name = "TLV320AIC3X",
+ .stream_name = "AIC3X",
+ .cpu_dai = &davinci_i2s_dai,
+ .codec_dai = &aic3x_dai,
+ .init = evm_aic3x_init,
+ .ops = &evm_ops,
+#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
+ .name = "Voice Codec - CQ93VC",
+ .stream_name = "CQ93",
+ .cpu_dai = &davinci_vcif_dai,
+ .codec_dai = &cq93vc_dai,
+#endif
+};
+
static struct snd_soc_dai_link dm6467_evm_dai[] = {
{
.name = "TLV320AIC3X",
@@ -165,7 +197,7 @@ static struct snd_soc_dai_link dm6467_evm_dai[] = {
.stream_name = "spdif",
.cpu_dai = &davinci_mcasp_dai[DAVINCI_MCASP_DIT_DAI],
.codec_dai = &dit_stub_dai,
- .ops = &evm_ops,
+ .ops = &evm_spdif_ops,
},
};
static struct snd_soc_dai_link da8xx_evm_dai = {
@@ -177,7 +209,7 @@ static struct snd_soc_dai_link da8xx_evm_dai = {
.ops = &evm_ops,
};
-/* davinci dm6446, dm355 or dm365 evm audio machine driver */
+/* davinci dm6446, dm355 evm audio machine driver */
static struct snd_soc_card snd_soc_card_evm = {
.name = "DaVinci EVM",
.platform = &davinci_soc_platform,
@@ -185,6 +217,15 @@ static struct snd_soc_card snd_soc_card_evm = {
.num_links = 1,
};
+/* davinci dm365 evm audio machine driver */
+static struct snd_soc_card dm365_snd_soc_card_evm = {
+ .name = "DaVinci DM365 EVM",
+ .platform = &davinci_soc_platform,
+ .dai_link = &dm365_evm_dai,
+ .num_links = 1,
+};
+
+
/* davinci dm6467 evm audio machine driver */
static struct snd_soc_card dm6467_snd_soc_card_evm = {
.name = "DaVinci DM6467 EVM",
@@ -217,6 +258,17 @@ static struct snd_soc_device evm_snd_devdata = {
};
/* evm audio subsystem */
+static struct snd_soc_device dm365_evm_snd_devdata = {
+ .card = &dm365_snd_soc_card_evm,
+#ifdef CONFIG_SND_DM365_AIC3X_CODEC
+ .codec_dev = &soc_codec_dev_aic3x,
+ .codec_data = &aic3x_setup,
+#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
+ .codec_dev = &soc_codec_dev_cq93vc,
+#endif
+};
+
+/* evm audio subsystem */
static struct snd_soc_device dm6467_evm_snd_devdata = {
.card = &dm6467_snd_soc_card_evm,
.codec_dev = &soc_codec_dev_aic3x,
@@ -244,12 +296,15 @@ static int __init evm_init(void)
int index;
int ret;
- if (machine_is_davinci_evm() || machine_is_davinci_dm365_evm()) {
+ if (machine_is_davinci_evm()) {
evm_snd_dev_data = &evm_snd_devdata;
index = 0;
} else if (machine_is_davinci_dm355_evm()) {
evm_snd_dev_data = &evm_snd_devdata;
index = 1;
+ } else if (machine_is_davinci_dm365_evm()) {
+ evm_snd_dev_data = &dm365_evm_snd_devdata;
+ index = 0;
} else if (machine_is_davinci_dm6467_evm()) {
evm_snd_dev_data = &dm6467_evm_snd_devdata;
index = 0;
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
new file mode 100644
index 000000000000..9aa980d38231
--- /dev/null
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -0,0 +1,274 @@
+/*
+ * ALSA SoC Voice Codec Interface for TI DAVINCI processor
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/mfd/davinci_voicecodec.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include "davinci-pcm.h"
+#include "davinci-i2s.h"
+#include "davinci-vcif.h"
+
+#define MOD_REG_BIT(val, mask, set) do { \
+ if (set) { \
+ val |= mask; \
+ } else { \
+ val &= ~mask; \
+ } \
+} while (0)
+
+struct davinci_vcif_dev {
+ struct davinci_vc *davinci_vc;
+ struct davinci_pcm_dma_params dma_params[2];
+};
+
+static void davinci_vcif_start(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct davinci_vcif_dev *davinci_vcif_dev =
+ rtd->dai->cpu_dai->private_data;
+ struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
+ u32 w;
+
+ /* Start the sample generator and enable transmitter/receiver */
+ w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 1);
+ else
+ MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 1);
+
+ writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
+}
+
+static void davinci_vcif_stop(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct davinci_vcif_dev *davinci_vcif_dev =
+ rtd->dai->cpu_dai->private_data;
+ struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
+ u32 w;
+
+ /* Reset transmitter/receiver and sample rate/frame sync generators */
+ w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 0);
+ else
+ MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 0);
+
+ writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
+}
+
+static int davinci_vcif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct davinci_vcif_dev *davinci_vcif_dev = dai->private_data;
+ struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
+ struct davinci_pcm_dma_params *dma_params =
+ &davinci_vcif_dev->dma_params[substream->stream];
+ u32 w;
+
+ /* Restart the codec before setup */
+ davinci_vcif_stop(substream);
+ davinci_vcif_start(substream);
+
+ /* General line settings */
+ writel(DAVINCI_VC_CTRL_MASK, davinci_vc->base + DAVINCI_VC_CTRL);
+
+ writel(DAVINCI_VC_INT_MASK, davinci_vc->base + DAVINCI_VC_INTCLR);
+
+ writel(DAVINCI_VC_INT_MASK, davinci_vc->base + DAVINCI_VC_INTEN);
+
+ w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
+
+ /* Determine xfer data type */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_U8:
+ dma_params->data_type = 0;
+
+ MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
+ DAVINCI_VC_CTRL_RD_UNSIGNED |
+ DAVINCI_VC_CTRL_WD_BITS_8 |
+ DAVINCI_VC_CTRL_WD_UNSIGNED, 1);
+ break;
+ case SNDRV_PCM_FORMAT_S8:
+ dma_params->data_type = 1;
+
+ MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
+ DAVINCI_VC_CTRL_WD_BITS_8, 1);
+
+ MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_UNSIGNED |
+ DAVINCI_VC_CTRL_WD_UNSIGNED, 0);
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ dma_params->data_type = 2;
+
+ MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
+ DAVINCI_VC_CTRL_RD_UNSIGNED |
+ DAVINCI_VC_CTRL_WD_BITS_8 |
+ DAVINCI_VC_CTRL_WD_UNSIGNED, 0);
+ break;
+ default:
+ printk(KERN_WARNING "davinci-vcif: unsupported PCM format");
+ return -EINVAL;
+ }
+
+ dma_params->acnt = dma_params->data_type;
+
+ writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
+
+ return 0;
+}
+
+static int davinci_vcif_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ davinci_vcif_start(substream);
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ davinci_vcif_stop(substream);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+#define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000
+
+static struct snd_soc_dai_ops davinci_vcif_dai_ops = {
+ .trigger = davinci_vcif_trigger,
+ .hw_params = davinci_vcif_hw_params,
+};
+
+struct snd_soc_dai davinci_vcif_dai = {
+ .name = "davinci-vcif",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = DAVINCI_VCIF_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,},
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = DAVINCI_VCIF_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,},
+ .ops = &davinci_vcif_dai_ops,
+
+};
+EXPORT_SYMBOL_GPL(davinci_vcif_dai);
+
+static int davinci_vcif_probe(struct platform_device *pdev)
+{
+ struct davinci_vc *davinci_vc = platform_get_drvdata(pdev);
+ struct davinci_vcif_dev *davinci_vcif_dev;
+ int ret;
+
+ davinci_vcif_dev = kzalloc(sizeof(struct davinci_vcif_dev), GFP_KERNEL);
+ if (!davinci_vc) {
+ dev_dbg(&pdev->dev,
+ "could not allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ /* DMA tx params */
+ davinci_vcif_dev->davinci_vc = davinci_vc;
+ davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel =
+ davinci_vc->davinci_vcif.dma_tx_channel;
+ davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr =
+ davinci_vc->davinci_vcif.dma_tx_addr;
+
+ /* DMA rx params */
+ davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel =
+ davinci_vc->davinci_vcif.dma_rx_channel;
+ davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr =
+ davinci_vc->davinci_vcif.dma_rx_addr;
+
+ davinci_vcif_dai.dev = &pdev->dev;
+ davinci_vcif_dai.capture.dma_data = davinci_vcif_dev->dma_params;
+ davinci_vcif_dai.playback.dma_data = davinci_vcif_dev->dma_params;
+ davinci_vcif_dai.private_data = davinci_vcif_dev;
+
+ ret = snd_soc_register_dai(&davinci_vcif_dai);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "could not register dai\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ kfree(davinci_vcif_dev);
+
+ return ret;
+}
+
+static int davinci_vcif_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_dai(&davinci_vcif_dai);
+
+ return 0;
+}
+
+static struct platform_driver davinci_vcif_driver = {
+ .probe = davinci_vcif_probe,
+ .remove = davinci_vcif_remove,
+ .driver = {
+ .name = "davinci_vcif",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init davinci_vcif_init(void)
+{
+ return platform_driver_probe(&davinci_vcif_driver, davinci_vcif_probe);
+}
+module_init(davinci_vcif_init);
+
+static void __exit davinci_vcif_exit(void)
+{
+ platform_driver_unregister(&davinci_vcif_driver);
+}
+module_exit(davinci_vcif_exit);
+
+MODULE_AUTHOR("Miguel Aguilar");
+MODULE_DESCRIPTION("Texas Instruments DaVinci ASoC Voice Codec Interface");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/davinci/davinci-vcif.h b/sound/soc/davinci/davinci-vcif.h
new file mode 100644
index 000000000000..571c9948724f
--- /dev/null
+++ b/sound/soc/davinci/davinci-vcif.h
@@ -0,0 +1,28 @@
+/*
+ * ALSA SoC Voice Codec Interface for TI DAVINCI processor
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _DAVINCI_VCIF_H
+#define _DAVINCI_VCIF_H
+
+extern struct snd_soc_dai davinci_vcif_dai;
+
+#endif
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index 7174b4c710de..eba9b9d257a1 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -11,3 +11,11 @@ config SND_IMX_SOC
config SND_MXC_SOC_SSI
tristate
+config SND_MXC_SOC_WM1133_EV1
+ tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted"
+ depends on SND_IMX_SOC && EXPERIMENTAL
+ select SND_SOC_WM8350
+ select SND_MXC_SOC_SSI
+ help
+ Enable support for audio on the i.MX31ADS with the WM1133-EV1
+ PMIC board with WM8835x fitted.
diff --git a/sound/soc/imx/Makefile b/sound/soc/imx/Makefile
index 9f8bb92ddfcc..2d203635ac11 100644
--- a/sound/soc/imx/Makefile
+++ b/sound/soc/imx/Makefile
@@ -9,4 +9,7 @@ obj-$(CONFIG_SND_IMX_SOC) += snd-soc-imx.o
# i.MX Machine Support
snd-soc-phycore-ac97-objs := phycore-ac97.o
+snd-soc-wm1133-ev1-objs := wm1133-ev1.o
+
obj-$(CONFIG_SND_SOC_PHYCORE_AC97) += snd-soc-phycore-ac97.o
+obj-$(CONFIG_SND_MXC_SOC_WM1133_EV1) += snd-soc-wm1133-ev1.o
diff --git a/sound/soc/imx/wm1133-ev1.c b/sound/soc/imx/wm1133-ev1.c
new file mode 100644
index 000000000000..a6e7d9497639
--- /dev/null
+++ b/sound/soc/imx/wm1133-ev1.c
@@ -0,0 +1,308 @@
+/*
+ * wm1133-ev1.c - Audio for WM1133-EV1 on i.MX31ADS
+ *
+ * Copyright (c) 2010 Wolfson Microelectronics plc
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * Based on an earlier driver for the same hardware by Liam Girdwood.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <mach/audmux.h>
+
+#include "imx-ssi.h"
+#include "../codecs/wm8350.h"
+
+/* There is a silicon mic on the board optionally connected via a solder pad
+ * SP1. Define this to enable it.
+ */
+#undef USE_SIMIC
+
+struct _wm8350_audio {
+ unsigned int channels;
+ snd_pcm_format_t format;
+ unsigned int rate;
+ unsigned int sysclk;
+ unsigned int bclkdiv;
+ unsigned int clkdiv;
+ unsigned int lr_rate;
+};
+
+/* in order of power consumption per rate (lowest first) */
+static const struct _wm8350_audio wm8350_audio[] = {
+ /* 16bit mono modes */
+ {1, SNDRV_PCM_FORMAT_S16_LE, 8000, 12288000 >> 1,
+ WM8350_BCLK_DIV_48, WM8350_DACDIV_3, 16,},
+
+ /* 16 bit stereo modes */
+ {2, SNDRV_PCM_FORMAT_S16_LE, 8000, 12288000,
+ WM8350_BCLK_DIV_48, WM8350_DACDIV_6, 32,},
+ {2, SNDRV_PCM_FORMAT_S16_LE, 16000, 12288000,
+ WM8350_BCLK_DIV_24, WM8350_DACDIV_3, 32,},
+ {2, SNDRV_PCM_FORMAT_S16_LE, 32000, 12288000,
+ WM8350_BCLK_DIV_12, WM8350_DACDIV_1_5, 32,},
+ {2, SNDRV_PCM_FORMAT_S16_LE, 48000, 12288000,
+ WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,},
+ {2, SNDRV_PCM_FORMAT_S16_LE, 96000, 24576000,
+ WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,},
+ {2, SNDRV_PCM_FORMAT_S16_LE, 11025, 11289600,
+ WM8350_BCLK_DIV_32, WM8350_DACDIV_4, 32,},
+ {2, SNDRV_PCM_FORMAT_S16_LE, 22050, 11289600,
+ WM8350_BCLK_DIV_16, WM8350_DACDIV_2, 32,},
+ {2, SNDRV_PCM_FORMAT_S16_LE, 44100, 11289600,
+ WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,},
+ {2, SNDRV_PCM_FORMAT_S16_LE, 88200, 22579200,
+ WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,},
+
+ /* 24bit stereo modes */
+ {2, SNDRV_PCM_FORMAT_S24_LE, 48000, 12288000,
+ WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,},
+ {2, SNDRV_PCM_FORMAT_S24_LE, 96000, 24576000,
+ WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,},
+ {2, SNDRV_PCM_FORMAT_S24_LE, 44100, 11289600,
+ WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,},
+ {2, SNDRV_PCM_FORMAT_S24_LE, 88200, 22579200,
+ WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,},
+};
+
+static int wm1133_ev1_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int i, found = 0;
+ snd_pcm_format_t format = params_format(params);
+ unsigned int rate = params_rate(params);
+ unsigned int channels = params_channels(params);
+ u32 dai_format;
+
+ /* find the correct audio parameters */
+ for (i = 0; i < ARRAY_SIZE(wm8350_audio); i++) {
+ if (rate == wm8350_audio[i].rate &&
+ format == wm8350_audio[i].format &&
+ channels == wm8350_audio[i].channels) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ /* codec FLL input is 14.75 MHz from MCLK */
+ snd_soc_dai_set_pll(codec_dai, 0, 0, 14750000, wm8350_audio[i].sysclk);
+
+ dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM;
+
+ /* set codec DAI configuration */
+ snd_soc_dai_set_fmt(codec_dai, dai_format);
+
+ /* set cpu DAI configuration */
+ snd_soc_dai_set_fmt(cpu_dai, dai_format);
+
+ /* TODO: The SSI driver should figure this out for us */
+ switch (channels) {
+ case 2:
+ snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0);
+ break;
+ case 1:
+ snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffe, 0xffffffe, 1, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set MCLK as the codec system clock for DAC and ADC */
+ snd_soc_dai_set_sysclk(codec_dai, WM8350_MCLK_SEL_PLL_MCLK,
+ wm8350_audio[i].sysclk, SND_SOC_CLOCK_IN);
+
+ /* set codec BCLK division for sample rate */
+ snd_soc_dai_set_clkdiv(codec_dai, WM8350_BCLK_CLKDIV,
+ wm8350_audio[i].bclkdiv);
+
+ /* DAI is synchronous and clocked with DAC LRCLK & ADC LRC */
+ snd_soc_dai_set_clkdiv(codec_dai,
+ WM8350_DACLR_CLKDIV, wm8350_audio[i].lr_rate);
+ snd_soc_dai_set_clkdiv(codec_dai,
+ WM8350_ADCLR_CLKDIV, wm8350_audio[i].lr_rate);
+
+ /* now configure DAC and ADC clocks */
+ snd_soc_dai_set_clkdiv(codec_dai,
+ WM8350_DAC_CLKDIV, wm8350_audio[i].clkdiv);
+
+ snd_soc_dai_set_clkdiv(codec_dai,
+ WM8350_ADC_CLKDIV, wm8350_audio[i].clkdiv);
+
+ return 0;
+}
+
+static struct snd_soc_ops wm1133_ev1_ops = {
+ .hw_params = wm1133_ev1_hw_params,
+};
+
+static const struct snd_soc_dapm_widget wm1133_ev1_widgets[] = {
+#ifdef USE_SIMIC
+ SND_SOC_DAPM_MIC("SiMIC", NULL),
+#endif
+ SND_SOC_DAPM_MIC("Mic1 Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic2 Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+ SND_SOC_DAPM_LINE("Line Out Jack", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+};
+
+/* imx32ads soc_card audio map */
+static const struct snd_soc_dapm_route wm1133_ev1_map[] = {
+
+#ifdef USE_SIMIC
+ /* SiMIC --> IN1LN (with automatic bias) via SP1 */
+ { "IN1LN", NULL, "Mic Bias" },
+ { "Mic Bias", NULL, "SiMIC" },
+#endif
+
+ /* Mic 1 Jack --> IN1LN and IN1LP (with automatic bias) */
+ { "IN1LN", NULL, "Mic Bias" },
+ { "IN1LP", NULL, "Mic1 Jack" },
+ { "Mic Bias", NULL, "Mic1 Jack" },
+
+ /* Mic 2 Jack --> IN1RN and IN1RP (with automatic bias) */
+ { "IN1RN", NULL, "Mic Bias" },
+ { "IN1RP", NULL, "Mic2 Jack" },
+ { "Mic Bias", NULL, "Mic2 Jack" },
+
+ /* Line in Jack --> AUX (L+R) */
+ { "IN3R", NULL, "Line In Jack" },
+ { "IN3L", NULL, "Line In Jack" },
+
+ /* Out1 --> Headphone Jack */
+ { "Headphone Jack", NULL, "OUT1R" },
+ { "Headphone Jack", NULL, "OUT1L" },
+
+ /* Out1 --> Line Out Jack */
+ { "Line Out Jack", NULL, "OUT2R" },
+ { "Line Out Jack", NULL, "OUT2L" },
+};
+
+static struct snd_soc_jack hp_jack;
+
+static struct snd_soc_jack_pin hp_jack_pins[] = {
+ { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE },
+};
+
+static struct snd_soc_jack mic_jack;
+
+static struct snd_soc_jack_pin mic_jack_pins[] = {
+ { .pin = "Mic1 Jack", .mask = SND_JACK_MICROPHONE },
+ { .pin = "Mic2 Jack", .mask = SND_JACK_MICROPHONE },
+};
+
+static int wm1133_ev1_init(struct snd_soc_codec *codec)
+{
+ struct snd_soc_card *card = codec->socdev->card;
+
+ snd_soc_dapm_new_controls(codec, wm1133_ev1_widgets,
+ ARRAY_SIZE(wm1133_ev1_widgets));
+
+ snd_soc_dapm_add_routes(codec, wm1133_ev1_map,
+ ARRAY_SIZE(wm1133_ev1_map));
+
+ /* Headphone jack detection */
+ snd_soc_jack_new(card, "Headphone", SND_JACK_HEADPHONE, &hp_jack);
+ snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
+ hp_jack_pins);
+ wm8350_hp_jack_detect(codec, WM8350_JDR, &hp_jack, SND_JACK_HEADPHONE);
+
+ /* Microphone jack detection */
+ snd_soc_jack_new(card, "Microphone",
+ SND_JACK_MICROPHONE | SND_JACK_BTN_0, &mic_jack);
+ snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins),
+ mic_jack_pins);
+ wm8350_mic_jack_detect(codec, &mic_jack, SND_JACK_MICROPHONE,
+ SND_JACK_BTN_0);
+
+ snd_soc_dapm_force_enable_pin(codec, "Mic Bias");
+
+ return 0;
+}
+
+
+static struct snd_soc_dai_link wm1133_ev1_dai = {
+ .name = "WM1133-EV1",
+ .stream_name = "Audio",
+ .cpu_dai = &imx_ssi_pcm_dai[0],
+ .codec_dai = &wm8350_dai,
+ .init = wm1133_ev1_init,
+ .ops = &wm1133_ev1_ops,
+ .symmetric_rates = 1,
+};
+
+static struct snd_soc_card wm1133_ev1 = {
+ .name = "WM1133-EV1",
+ .platform = &imx_soc_platform,
+ .dai_link = &wm1133_ev1_dai,
+ .num_links = 1,
+};
+
+static struct snd_soc_device wm1133_ev1_snd_devdata = {
+ .card = &wm1133_ev1,
+ .codec_dev = &soc_codec_dev_wm8350,
+};
+
+static struct platform_device *wm1133_ev1_snd_device;
+
+static int __init wm1133_ev1_audio_init(void)
+{
+ int ret;
+ unsigned int ptcr, pdcr;
+
+ /* SSI0 mastered by port 5 */
+ ptcr = MXC_AUDMUX_V2_PTCR_SYN |
+ MXC_AUDMUX_V2_PTCR_TFSDIR |
+ MXC_AUDMUX_V2_PTCR_TFSEL(MX31_AUDMUX_PORT5_SSI_PINS_5) |
+ MXC_AUDMUX_V2_PTCR_TCLKDIR |
+ MXC_AUDMUX_V2_PTCR_TCSEL(MX31_AUDMUX_PORT5_SSI_PINS_5);
+ pdcr = MXC_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT5_SSI_PINS_5);
+ mxc_audmux_v2_configure_port(MX31_AUDMUX_PORT1_SSI0, ptcr, pdcr);
+
+ ptcr = MXC_AUDMUX_V2_PTCR_SYN;
+ pdcr = MXC_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0);
+ mxc_audmux_v2_configure_port(MX31_AUDMUX_PORT5_SSI_PINS_5, ptcr, pdcr);
+
+ wm1133_ev1_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!wm1133_ev1_snd_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(wm1133_ev1_snd_device, &wm1133_ev1_snd_devdata);
+ wm1133_ev1_snd_devdata.dev = &wm1133_ev1_snd_device->dev;
+ ret = platform_device_add(wm1133_ev1_snd_device);
+
+ if (ret)
+ platform_device_put(wm1133_ev1_snd_device);
+
+ return ret;
+}
+module_init(wm1133_ev1_audio_init);
+
+static void __exit wm1133_ev1_audio_exit(void)
+{
+ platform_device_unregister(wm1133_ev1_snd_device);
+}
+module_exit(wm1133_ev1_audio_exit);
+
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("Audio for WM1133-EV1 on i.MX31ADS");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index f11963c21873..83be4a76d2bb 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -18,6 +18,16 @@ config SND_OMAP_SOC_N810
help
Say Y if you want to add support for SoC audio on Nokia N810.
+config SND_OMAP_SOC_RX51
+ tristate "SoC Audio support for Nokia RX-51"
+ depends on SND_OMAP_SOC && MACH_NOKIA_RX51
+ select OMAP_MCBSP
+ select SND_OMAP_SOC_MCBSP
+ select SND_SOC_TLV320AIC3X
+ help
+ Say Y if you want to add support for SoC audio on Nokia RX-51
+ hardware. This is also known as Nokia N900 product.
+
config SND_OMAP_SOC_AMS_DELTA
tristate "SoC Audio support for Amstrad E3 (Delta) videophone"
depends on SND_OMAP_SOC && MACH_AMS_DELTA
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index 0bc00ca14b37..3a75755f25e4 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_SND_OMAP_SOC_MCPDM) += snd-soc-omap-mcpdm.o
# OMAP Machine Support
snd-soc-n810-objs := n810.o
+snd-soc-rx51-objs := rx51.o
snd-soc-ams-delta-objs := ams-delta.o
snd-soc-osk5912-objs := osk5912.o
snd-soc-overo-objs := overo.o
@@ -22,6 +23,7 @@ snd-soc-zoom2-objs := zoom2.o
snd-soc-igep0020-objs := igep0020.o
obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
+obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
obj-$(CONFIG_SND_OMAP_SOC_AMS_DELTA) += snd-soc-ams-delta.o
obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o
obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o
diff --git a/sound/soc/omap/mcpdm.c b/sound/soc/omap/mcpdm.c
index 1dab4c14874d..90b8bf71c893 100644
--- a/sound/soc/omap/mcpdm.c
+++ b/sound/soc/omap/mcpdm.c
@@ -1,5 +1,5 @@
/*
- * mcpdm.c -- McPDM interface driver
+ * mcpdm.c -- McPDM interface driver
*
* Author: Jorge Eduardo Candelaria <x0107209@ti.com>
* Copyright (C) 2009 - Texas Instruments, Inc.
@@ -39,46 +39,46 @@ static struct omap_mcpdm *mcpdm;
static inline void omap_mcpdm_write(u16 reg, u32 val)
{
- __raw_writel(val, mcpdm->io_base + reg);
+ __raw_writel(val, mcpdm->io_base + reg);
}
static inline int omap_mcpdm_read(u16 reg)
{
- return __raw_readl(mcpdm->io_base + reg);
+ return __raw_readl(mcpdm->io_base + reg);
}
static void omap_mcpdm_reg_dump(void)
{
- dev_dbg(mcpdm->dev, "***********************\n");
- dev_dbg(mcpdm->dev, "IRQSTATUS_RAW: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQSTATUS_RAW));
- dev_dbg(mcpdm->dev, "IRQSTATUS: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQSTATUS));
- dev_dbg(mcpdm->dev, "IRQENABLE_SET: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQENABLE_SET));
- dev_dbg(mcpdm->dev, "IRQENABLE_CLR: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQENABLE_CLR));
- dev_dbg(mcpdm->dev, "IRQWAKE_EN: 0x%04x\n",
- omap_mcpdm_read(MCPDM_IRQWAKE_EN));
- dev_dbg(mcpdm->dev, "DMAENABLE_SET: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DMAENABLE_SET));
- dev_dbg(mcpdm->dev, "DMAENABLE_CLR: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DMAENABLE_CLR));
- dev_dbg(mcpdm->dev, "DMAWAKEEN: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DMAWAKEEN));
- dev_dbg(mcpdm->dev, "CTRL: 0x%04x\n",
- omap_mcpdm_read(MCPDM_CTRL));
- dev_dbg(mcpdm->dev, "DN_DATA: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DN_DATA));
- dev_dbg(mcpdm->dev, "UP_DATA: 0x%04x\n",
- omap_mcpdm_read(MCPDM_UP_DATA));
- dev_dbg(mcpdm->dev, "FIFO_CTRL_DN: 0x%04x\n",
- omap_mcpdm_read(MCPDM_FIFO_CTRL_DN));
- dev_dbg(mcpdm->dev, "FIFO_CTRL_UP: 0x%04x\n",
- omap_mcpdm_read(MCPDM_FIFO_CTRL_UP));
- dev_dbg(mcpdm->dev, "DN_OFFSET: 0x%04x\n",
- omap_mcpdm_read(MCPDM_DN_OFFSET));
- dev_dbg(mcpdm->dev, "***********************\n");
+ dev_dbg(mcpdm->dev, "***********************\n");
+ dev_dbg(mcpdm->dev, "IRQSTATUS_RAW: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_IRQSTATUS_RAW));
+ dev_dbg(mcpdm->dev, "IRQSTATUS: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_IRQSTATUS));
+ dev_dbg(mcpdm->dev, "IRQENABLE_SET: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_IRQENABLE_SET));
+ dev_dbg(mcpdm->dev, "IRQENABLE_CLR: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_IRQENABLE_CLR));
+ dev_dbg(mcpdm->dev, "IRQWAKE_EN: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_IRQWAKE_EN));
+ dev_dbg(mcpdm->dev, "DMAENABLE_SET: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_DMAENABLE_SET));
+ dev_dbg(mcpdm->dev, "DMAENABLE_CLR: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_DMAENABLE_CLR));
+ dev_dbg(mcpdm->dev, "DMAWAKEEN: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_DMAWAKEEN));
+ dev_dbg(mcpdm->dev, "CTRL: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_CTRL));
+ dev_dbg(mcpdm->dev, "DN_DATA: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_DN_DATA));
+ dev_dbg(mcpdm->dev, "UP_DATA: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_UP_DATA));
+ dev_dbg(mcpdm->dev, "FIFO_CTRL_DN: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_FIFO_CTRL_DN));
+ dev_dbg(mcpdm->dev, "FIFO_CTRL_UP: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_FIFO_CTRL_UP));
+ dev_dbg(mcpdm->dev, "DN_OFFSET: 0x%04x\n",
+ omap_mcpdm_read(MCPDM_DN_OFFSET));
+ dev_dbg(mcpdm->dev, "***********************\n");
}
/*
@@ -87,26 +87,26 @@ static void omap_mcpdm_reg_dump(void)
*/
static void omap_mcpdm_reset_capture(int reset)
{
- int ctrl = omap_mcpdm_read(MCPDM_CTRL);
+ int ctrl = omap_mcpdm_read(MCPDM_CTRL);
- if (reset)
- ctrl |= SW_UP_RST;
- else
- ctrl &= ~SW_UP_RST;
+ if (reset)
+ ctrl |= SW_UP_RST;
+ else
+ ctrl &= ~SW_UP_RST;
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
+ omap_mcpdm_write(MCPDM_CTRL, ctrl);
}
static void omap_mcpdm_reset_playback(int reset)
{
- int ctrl = omap_mcpdm_read(MCPDM_CTRL);
+ int ctrl = omap_mcpdm_read(MCPDM_CTRL);
- if (reset)
- ctrl |= SW_DN_RST;
- else
- ctrl &= ~SW_DN_RST;
+ if (reset)
+ ctrl |= SW_DN_RST;
+ else
+ ctrl &= ~SW_DN_RST;
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
+ omap_mcpdm_write(MCPDM_CTRL, ctrl);
}
/*
@@ -115,14 +115,14 @@ static void omap_mcpdm_reset_playback(int reset)
*/
void omap_mcpdm_start(int stream)
{
- int ctrl = omap_mcpdm_read(MCPDM_CTRL);
+ int ctrl = omap_mcpdm_read(MCPDM_CTRL);
- if (stream)
- ctrl |= mcpdm->up_channels;
- else
- ctrl |= mcpdm->dn_channels;
+ if (stream)
+ ctrl |= mcpdm->up_channels;
+ else
+ ctrl |= mcpdm->dn_channels;
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
+ omap_mcpdm_write(MCPDM_CTRL, ctrl);
}
/*
@@ -131,14 +131,14 @@ void omap_mcpdm_start(int stream)
*/
void omap_mcpdm_stop(int stream)
{
- int ctrl = omap_mcpdm_read(MCPDM_CTRL);
+ int ctrl = omap_mcpdm_read(MCPDM_CTRL);
- if (stream)
- ctrl &= ~mcpdm->up_channels;
- else
- ctrl &= ~mcpdm->dn_channels;
+ if (stream)
+ ctrl &= ~mcpdm->up_channels;
+ else
+ ctrl &= ~mcpdm->dn_channels;
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
+ omap_mcpdm_write(MCPDM_CTRL, ctrl);
}
/*
@@ -147,38 +147,38 @@ void omap_mcpdm_stop(int stream)
*/
int omap_mcpdm_capture_open(struct omap_mcpdm_link *uplink)
{
- int irq_mask = 0;
- int ctrl;
+ int irq_mask = 0;
+ int ctrl;
- if (!uplink)
- return -EINVAL;
+ if (!uplink)
+ return -EINVAL;
- mcpdm->uplink = uplink;
+ mcpdm->uplink = uplink;
- /* Enable irq request generation */
- irq_mask |= uplink->irq_mask & MCPDM_UPLINK_IRQ_MASK;
- omap_mcpdm_write(MCPDM_IRQENABLE_SET, irq_mask);
+ /* Enable irq request generation */
+ irq_mask |= uplink->irq_mask & MCPDM_UPLINK_IRQ_MASK;
+ omap_mcpdm_write(MCPDM_IRQENABLE_SET, irq_mask);
- /* Configure uplink threshold */
- if (uplink->threshold > UP_THRES_MAX)
- uplink->threshold = UP_THRES_MAX;
+ /* Configure uplink threshold */
+ if (uplink->threshold > UP_THRES_MAX)
+ uplink->threshold = UP_THRES_MAX;
- omap_mcpdm_write(MCPDM_FIFO_CTRL_UP, uplink->threshold);
+ omap_mcpdm_write(MCPDM_FIFO_CTRL_UP, uplink->threshold);
- /* Configure DMA controller */
- omap_mcpdm_write(MCPDM_DMAENABLE_SET, DMA_UP_ENABLE);
+ /* Configure DMA controller */
+ omap_mcpdm_write(MCPDM_DMAENABLE_SET, DMA_UP_ENABLE);
- /* Set pdm out format */
- ctrl = omap_mcpdm_read(MCPDM_CTRL);
- ctrl &= ~PDMOUTFORMAT;
- ctrl |= uplink->format & PDMOUTFORMAT;
+ /* Set pdm out format */
+ ctrl = omap_mcpdm_read(MCPDM_CTRL);
+ ctrl &= ~PDMOUTFORMAT;
+ ctrl |= uplink->format & PDMOUTFORMAT;
- /* Uplink channels */
- mcpdm->up_channels = uplink->channels & (PDM_UP_MASK | PDM_STATUS_MASK);
+ /* Uplink channels */
+ mcpdm->up_channels = uplink->channels & (PDM_UP_MASK | PDM_STATUS_MASK);
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
+ omap_mcpdm_write(MCPDM_CTRL, ctrl);
- return 0;
+ return 0;
}
/*
@@ -187,38 +187,38 @@ int omap_mcpdm_capture_open(struct omap_mcpdm_link *uplink)
*/
int omap_mcpdm_playback_open(struct omap_mcpdm_link *downlink)
{
- int irq_mask = 0;
- int ctrl;
+ int irq_mask = 0;
+ int ctrl;
- if (!downlink)
- return -EINVAL;
+ if (!downlink)
+ return -EINVAL;
- mcpdm->downlink = downlink;
+ mcpdm->downlink = downlink;
- /* Enable irq request generation */
- irq_mask |= downlink->irq_mask & MCPDM_DOWNLINK_IRQ_MASK;
- omap_mcpdm_write(MCPDM_IRQENABLE_SET, irq_mask);
+ /* Enable irq request generation */
+ irq_mask |= downlink->irq_mask & MCPDM_DOWNLINK_IRQ_MASK;
+ omap_mcpdm_write(MCPDM_IRQENABLE_SET, irq_mask);
- /* Configure uplink threshold */
- if (downlink->threshold > DN_THRES_MAX)
- downlink->threshold = DN_THRES_MAX;
+ /* Configure uplink threshold */
+ if (downlink->threshold > DN_THRES_MAX)
+ downlink->threshold = DN_THRES_MAX;
- omap_mcpdm_write(MCPDM_FIFO_CTRL_DN, downlink->threshold);
+ omap_mcpdm_write(MCPDM_FIFO_CTRL_DN, downlink->threshold);
- /* Enable DMA request generation */
- omap_mcpdm_write(MCPDM_DMAENABLE_SET, DMA_DN_ENABLE);
+ /* Enable DMA request generation */
+ omap_mcpdm_write(MCPDM_DMAENABLE_SET, DMA_DN_ENABLE);
- /* Set pdm out format */
- ctrl = omap_mcpdm_read(MCPDM_CTRL);
- ctrl &= ~PDMOUTFORMAT;
- ctrl |= downlink->format & PDMOUTFORMAT;
+ /* Set pdm out format */
+ ctrl = omap_mcpdm_read(MCPDM_CTRL);
+ ctrl &= ~PDMOUTFORMAT;
+ ctrl |= downlink->format & PDMOUTFORMAT;
- /* Downlink channels */
- mcpdm->dn_channels = downlink->channels & (PDM_DN_MASK | PDM_CMD_MASK);
+ /* Downlink channels */
+ mcpdm->dn_channels = downlink->channels & (PDM_DN_MASK | PDM_CMD_MASK);
- omap_mcpdm_write(MCPDM_CTRL, ctrl);
+ omap_mcpdm_write(MCPDM_CTRL, ctrl);
- return 0;
+ return 0;
}
/*
@@ -227,24 +227,24 @@ int omap_mcpdm_playback_open(struct omap_mcpdm_link *downlink)
*/
int omap_mcpdm_capture_close(struct omap_mcpdm_link *uplink)
{
- int irq_mask = 0;
+ int irq_mask = 0;
- if (!uplink)
- return -EINVAL;
+ if (!uplink)
+ return -EINVAL;
- /* Disable irq request generation */
- irq_mask |= uplink->irq_mask & MCPDM_UPLINK_IRQ_MASK;
- omap_mcpdm_write(MCPDM_IRQENABLE_CLR, irq_mask);
+ /* Disable irq request generation */
+ irq_mask |= uplink->irq_mask & MCPDM_UPLINK_IRQ_MASK;
+ omap_mcpdm_write(MCPDM_IRQENABLE_CLR, irq_mask);
- /* Disable DMA request generation */
- omap_mcpdm_write(MCPDM_DMAENABLE_CLR, DMA_UP_ENABLE);
+ /* Disable DMA request generation */
+ omap_mcpdm_write(MCPDM_DMAENABLE_CLR, DMA_UP_ENABLE);
- /* Clear Downlink channels */
- mcpdm->up_channels = 0;
+ /* Clear Downlink channels */
+ mcpdm->up_channels = 0;
- mcpdm->uplink = NULL;
+ mcpdm->uplink = NULL;
- return 0;
+ return 0;
}
/*
@@ -253,124 +253,124 @@ int omap_mcpdm_capture_close(struct omap_mcpdm_link *uplink)
*/
int omap_mcpdm_playback_close(struct omap_mcpdm_link *downlink)
{
- int irq_mask = 0;
+ int irq_mask = 0;
- if (!downlink)
- return -EINVAL;
+ if (!downlink)
+ return -EINVAL;
- /* Disable irq request generation */
- irq_mask |= downlink->irq_mask & MCPDM_DOWNLINK_IRQ_MASK;
- omap_mcpdm_write(MCPDM_IRQENABLE_CLR, irq_mask);
+ /* Disable irq request generation */
+ irq_mask |= downlink->irq_mask & MCPDM_DOWNLINK_IRQ_MASK;
+ omap_mcpdm_write(MCPDM_IRQENABLE_CLR, irq_mask);
- /* Disable DMA request generation */
- omap_mcpdm_write(MCPDM_DMAENABLE_CLR, DMA_DN_ENABLE);
+ /* Disable DMA request generation */
+ omap_mcpdm_write(MCPDM_DMAENABLE_CLR, DMA_DN_ENABLE);
- /* clear Downlink channels */
- mcpdm->dn_channels = 0;
+ /* clear Downlink channels */
+ mcpdm->dn_channels = 0;
- mcpdm->downlink = NULL;
+ mcpdm->downlink = NULL;
- return 0;
+ return 0;
}
static irqreturn_t omap_mcpdm_irq_handler(int irq, void *dev_id)
{
- struct omap_mcpdm *mcpdm_irq = dev_id;
- int irq_status;
-
- irq_status = omap_mcpdm_read(MCPDM_IRQSTATUS);
-
- /* Acknowledge irq event */
- omap_mcpdm_write(MCPDM_IRQSTATUS, irq_status);
-
- if (irq & MCPDM_DN_IRQ_FULL) {
- dev_err(mcpdm_irq->dev, "DN FIFO error %x\n", irq_status);
- omap_mcpdm_reset_playback(1);
- omap_mcpdm_playback_open(mcpdm_irq->downlink);
- omap_mcpdm_reset_playback(0);
- }
-
- if (irq & MCPDM_DN_IRQ_EMPTY) {
- dev_err(mcpdm_irq->dev, "DN FIFO error %x\n", irq_status);
- omap_mcpdm_reset_playback(1);
- omap_mcpdm_playback_open(mcpdm_irq->downlink);
- omap_mcpdm_reset_playback(0);
- }
-
- if (irq & MCPDM_DN_IRQ) {
- dev_dbg(mcpdm_irq->dev, "DN write request\n");
- }
-
- if (irq & MCPDM_UP_IRQ_FULL) {
- dev_err(mcpdm_irq->dev, "UP FIFO error %x\n", irq_status);
- omap_mcpdm_reset_capture(1);
- omap_mcpdm_capture_open(mcpdm_irq->uplink);
- omap_mcpdm_reset_capture(0);
- }
-
- if (irq & MCPDM_UP_IRQ_EMPTY) {
- dev_err(mcpdm_irq->dev, "UP FIFO error %x\n", irq_status);
- omap_mcpdm_reset_capture(1);
- omap_mcpdm_capture_open(mcpdm_irq->uplink);
- omap_mcpdm_reset_capture(0);
- }
-
- if (irq & MCPDM_UP_IRQ) {
- dev_dbg(mcpdm_irq->dev, "UP write request\n");
- }
-
- return IRQ_HANDLED;
+ struct omap_mcpdm *mcpdm_irq = dev_id;
+ int irq_status;
+
+ irq_status = omap_mcpdm_read(MCPDM_IRQSTATUS);
+
+ /* Acknowledge irq event */
+ omap_mcpdm_write(MCPDM_IRQSTATUS, irq_status);
+
+ if (irq & MCPDM_DN_IRQ_FULL) {
+ dev_err(mcpdm_irq->dev, "DN FIFO error %x\n", irq_status);
+ omap_mcpdm_reset_playback(1);
+ omap_mcpdm_playback_open(mcpdm_irq->downlink);
+ omap_mcpdm_reset_playback(0);
+ }
+
+ if (irq & MCPDM_DN_IRQ_EMPTY) {
+ dev_err(mcpdm_irq->dev, "DN FIFO error %x\n", irq_status);
+ omap_mcpdm_reset_playback(1);
+ omap_mcpdm_playback_open(mcpdm_irq->downlink);
+ omap_mcpdm_reset_playback(0);
+ }
+
+ if (irq & MCPDM_DN_IRQ) {
+ dev_dbg(mcpdm_irq->dev, "DN write request\n");
+ }
+
+ if (irq & MCPDM_UP_IRQ_FULL) {
+ dev_err(mcpdm_irq->dev, "UP FIFO error %x\n", irq_status);
+ omap_mcpdm_reset_capture(1);
+ omap_mcpdm_capture_open(mcpdm_irq->uplink);
+ omap_mcpdm_reset_capture(0);
+ }
+
+ if (irq & MCPDM_UP_IRQ_EMPTY) {
+ dev_err(mcpdm_irq->dev, "UP FIFO error %x\n", irq_status);
+ omap_mcpdm_reset_capture(1);
+ omap_mcpdm_capture_open(mcpdm_irq->uplink);
+ omap_mcpdm_reset_capture(0);
+ }
+
+ if (irq & MCPDM_UP_IRQ) {
+ dev_dbg(mcpdm_irq->dev, "UP write request\n");
+ }
+
+ return IRQ_HANDLED;
}
int omap_mcpdm_request(void)
{
- int ret;
+ int ret;
- clk_enable(mcpdm->clk);
+ clk_enable(mcpdm->clk);
- spin_lock(&mcpdm->lock);
+ spin_lock(&mcpdm->lock);
- if (!mcpdm->free) {
- dev_err(mcpdm->dev, "McPDM interface is in use\n");
- spin_unlock(&mcpdm->lock);
- ret = -EBUSY;
- goto err;
- }
- mcpdm->free = 0;
+ if (!mcpdm->free) {
+ dev_err(mcpdm->dev, "McPDM interface is in use\n");
+ spin_unlock(&mcpdm->lock);
+ ret = -EBUSY;
+ goto err;
+ }
+ mcpdm->free = 0;
- spin_unlock(&mcpdm->lock);
+ spin_unlock(&mcpdm->lock);
- /* Disable lines while request is ongoing */
- omap_mcpdm_write(MCPDM_CTRL, 0x00);
+ /* Disable lines while request is ongoing */
+ omap_mcpdm_write(MCPDM_CTRL, 0x00);
- ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler,
- 0, "McPDM", (void *)mcpdm);
- if (ret) {
- dev_err(mcpdm->dev, "Request for McPDM IRQ failed\n");
- goto err;
- }
+ ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler,
+ 0, "McPDM", (void *)mcpdm);
+ if (ret) {
+ dev_err(mcpdm->dev, "Request for McPDM IRQ failed\n");
+ goto err;
+ }
- return 0;
+ return 0;
err:
- clk_disable(mcpdm->clk);
- return ret;
+ clk_disable(mcpdm->clk);
+ return ret;
}
void omap_mcpdm_free(void)
{
- spin_lock(&mcpdm->lock);
- if (mcpdm->free) {
- dev_err(mcpdm->dev, "McPDM interface is already free\n");
- spin_unlock(&mcpdm->lock);
- return;
- }
- mcpdm->free = 1;
- spin_unlock(&mcpdm->lock);
-
- clk_disable(mcpdm->clk);
-
- free_irq(mcpdm->irq, (void *)mcpdm);
+ spin_lock(&mcpdm->lock);
+ if (mcpdm->free) {
+ dev_err(mcpdm->dev, "McPDM interface is already free\n");
+ spin_unlock(&mcpdm->lock);
+ return;
+ }
+ mcpdm->free = 1;
+ spin_unlock(&mcpdm->lock);
+
+ clk_disable(mcpdm->clk);
+
+ free_irq(mcpdm->irq, (void *)mcpdm);
}
/* Enable/disable DC offset cancelation for the analog
@@ -378,108 +378,108 @@ void omap_mcpdm_free(void)
*/
int omap_mcpdm_set_offset(int offset1, int offset2)
{
- int offset;
+ int offset;
- if ((offset1 > DN_OFST_MAX) || (offset2 > DN_OFST_MAX))
- return -EINVAL;
+ if ((offset1 > DN_OFST_MAX) || (offset2 > DN_OFST_MAX))
+ return -EINVAL;
- offset = (offset1 << DN_OFST_RX1) | (offset2 << DN_OFST_RX2);
+ offset = (offset1 << DN_OFST_RX1) | (offset2 << DN_OFST_RX2);
- /* offset cancellation for channel 1 */
- if (offset1)
- offset |= DN_OFST_RX1_EN;
- else
- offset &= ~DN_OFST_RX1_EN;
+ /* offset cancellation for channel 1 */
+ if (offset1)
+ offset |= DN_OFST_RX1_EN;
+ else
+ offset &= ~DN_OFST_RX1_EN;
- /* offset cancellation for channel 2 */
- if (offset2)
- offset |= DN_OFST_RX2_EN;
- else
- offset &= ~DN_OFST_RX2_EN;
+ /* offset cancellation for channel 2 */
+ if (offset2)
+ offset |= DN_OFST_RX2_EN;
+ else
+ offset &= ~DN_OFST_RX2_EN;
- omap_mcpdm_write(MCPDM_DN_OFFSET, offset);
+ omap_mcpdm_write(MCPDM_DN_OFFSET, offset);
- return 0;
+ return 0;
}
static int __devinit omap_mcpdm_probe(struct platform_device *pdev)
{
- struct resource *res;
- int ret = 0;
-
- mcpdm = kzalloc(sizeof(struct omap_mcpdm), GFP_KERNEL);
- if (!mcpdm) {
- ret = -ENOMEM;
- goto exit;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no resource\n");
- goto err_resource;
- }
-
- spin_lock_init(&mcpdm->lock);
- mcpdm->free = 1;
- mcpdm->io_base = ioremap(res->start, resource_size(res));
- if (!mcpdm->io_base) {
- ret = -ENOMEM;
- goto err_resource;
- }
-
- mcpdm->irq = platform_get_irq(pdev, 0);
-
- mcpdm->clk = clk_get(&pdev->dev, "pdm_ck");
- if (IS_ERR(mcpdm->clk)) {
- ret = PTR_ERR(mcpdm->clk);
- dev_err(&pdev->dev, "unable to get pdm_ck: %d\n", ret);
- goto err_clk;
- }
-
- mcpdm->dev = &pdev->dev;
- platform_set_drvdata(pdev, mcpdm);
-
- return 0;
+ struct resource *res;
+ int ret = 0;
+
+ mcpdm = kzalloc(sizeof(struct omap_mcpdm), GFP_KERNEL);
+ if (!mcpdm) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no resource\n");
+ goto err_resource;
+ }
+
+ spin_lock_init(&mcpdm->lock);
+ mcpdm->free = 1;
+ mcpdm->io_base = ioremap(res->start, resource_size(res));
+ if (!mcpdm->io_base) {
+ ret = -ENOMEM;
+ goto err_resource;
+ }
+
+ mcpdm->irq = platform_get_irq(pdev, 0);
+
+ mcpdm->clk = clk_get(&pdev->dev, "pdm_ck");
+ if (IS_ERR(mcpdm->clk)) {
+ ret = PTR_ERR(mcpdm->clk);
+ dev_err(&pdev->dev, "unable to get pdm_ck: %d\n", ret);
+ goto err_clk;
+ }
+
+ mcpdm->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mcpdm);
+
+ return 0;
err_clk:
- iounmap(mcpdm->io_base);
+ iounmap(mcpdm->io_base);
err_resource:
- kfree(mcpdm);
+ kfree(mcpdm);
exit:
- return ret;
+ return ret;
}
static int __devexit omap_mcpdm_remove(struct platform_device *pdev)
{
- struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev);
+ struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
+ platform_set_drvdata(pdev, NULL);
- clk_put(mcpdm_ptr->clk);
+ clk_put(mcpdm_ptr->clk);
- iounmap(mcpdm_ptr->io_base);
+ iounmap(mcpdm_ptr->io_base);
- mcpdm_ptr->clk = NULL;
- mcpdm_ptr->free = 0;
- mcpdm_ptr->dev = NULL;
+ mcpdm_ptr->clk = NULL;
+ mcpdm_ptr->free = 0;
+ mcpdm_ptr->dev = NULL;
- kfree(mcpdm_ptr);
+ kfree(mcpdm_ptr);
- return 0;
+ return 0;
}
static struct platform_driver omap_mcpdm_driver = {
- .probe = omap_mcpdm_probe,
- .remove = __devexit_p(omap_mcpdm_remove),
- .driver = {
- .name = "omap-mcpdm",
- },
+ .probe = omap_mcpdm_probe,
+ .remove = __devexit_p(omap_mcpdm_remove),
+ .driver = {
+ .name = "omap-mcpdm",
+ },
};
static struct platform_device *omap_mcpdm_device;
static int __init omap_mcpdm_init(void)
{
- return platform_driver_register(&omap_mcpdm_driver);
+ return platform_driver_register(&omap_mcpdm_driver);
}
arch_initcall(omap_mcpdm_init);
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 8ad9dc901007..2d33a89f147a 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -256,6 +256,31 @@ static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd,
return err;
}
+static snd_pcm_sframes_t omap_mcbsp_dai_delay(
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+ u16 fifo_use;
+ snd_pcm_sframes_t delay;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fifo_use = omap_mcbsp_get_tx_delay(mcbsp_data->bus_id);
+ else
+ fifo_use = omap_mcbsp_get_rx_delay(mcbsp_data->bus_id);
+
+ /*
+ * Divide the used locations with the channel count to get the
+ * FIFO usage in samples (don't care about partial samples in the
+ * buffer).
+ */
+ delay = fifo_use / substream->runtime->channels;
+
+ return delay;
+}
+
static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -308,7 +333,8 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
format = mcbsp_data->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
wpf = channels = params_channels(params);
- if (channels == 2 && format == SND_SOC_DAIFMT_I2S) {
+ if (channels == 2 && (format == SND_SOC_DAIFMT_I2S ||
+ format == SND_SOC_DAIFMT_LEFT_J)) {
/* Use dual-phase frames */
regs->rcr2 |= RPHASE;
regs->xcr2 |= XPHASE;
@@ -353,6 +379,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
/* Set FS period and length in terms of bit clock periods */
switch (format) {
case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_LEFT_J:
regs->srgr2 |= FPER(framesize - 1);
regs->srgr1 |= FWID((framesize >> 1) - 1);
break;
@@ -404,6 +431,14 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
regs->rcr2 |= RDATDLY(1);
regs->xcr2 |= XDATDLY(1);
break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ /* 0-bit data delay */
+ regs->rcr2 |= RDATDLY(0);
+ regs->xcr2 |= XDATDLY(0);
+ regs->spcr1 |= RJUST(2);
+ /* Invert FS polarity configuration */
+ temp_fmt ^= SND_SOC_DAIFMT_NB_IF;
+ break;
case SND_SOC_DAIFMT_DSP_A:
/* 1-bit data delay */
regs->rcr2 |= RDATDLY(1);
@@ -609,6 +644,7 @@ static struct snd_soc_dai_ops omap_mcbsp_dai_ops = {
.startup = omap_mcbsp_dai_startup,
.shutdown = omap_mcbsp_dai_shutdown,
.trigger = omap_mcbsp_dai_trigger,
+ .delay = omap_mcbsp_dai_delay,
.hw_params = omap_mcbsp_dai_hw_params,
.set_fmt = omap_mcbsp_dai_set_dai_fmt,
.set_clkdiv = omap_mcbsp_dai_set_clkdiv,
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index de10f76baded..87ce842fa2e8 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -188,8 +188,6 @@ static int omap3pandora_out_init(struct snd_soc_codec *codec)
int ret;
/* All TWL4030 output pins are floating */
- snd_soc_dapm_nc_pin(codec, "OUTL");
- snd_soc_dapm_nc_pin(codec, "OUTR");
snd_soc_dapm_nc_pin(codec, "EARPIECE");
snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
snd_soc_dapm_nc_pin(codec, "PREDRIVER");
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
new file mode 100644
index 000000000000..47d831ef2dbb
--- /dev/null
+++ b/sound/soc/omap/rx51.c
@@ -0,0 +1,294 @@
+/*
+ * rx51.c -- SoC audio for Nokia RX-51
+ *
+ * Copyright (C) 2008 - 2009 Nokia Corporation
+ *
+ * Contact: Peter Ujfalusi <peter.ujfalusi@nokia.com>
+ * Eduardo Valentin <eduardo.valentin@nokia.com>
+ * Jarkko Nikula <jhnikula@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <asm/mach-types.h>
+
+#include "omap-mcbsp.h"
+#include "omap-pcm.h"
+#include "../codecs/tlv320aic3x.h"
+
+/*
+ * REVISIT: TWL4030 GPIO base in RX-51. Now statically defined to 192. This
+ * gpio is reserved in arch/arm/mach-omap2/board-rx51-peripherals.c
+ */
+#define RX51_SPEAKER_AMP_TWL_GPIO (192 + 7)
+
+static int rx51_spk_func;
+static int rx51_dmic_func;
+
+static void rx51_ext_control(struct snd_soc_codec *codec)
+{
+ if (rx51_spk_func)
+ snd_soc_dapm_enable_pin(codec, "Ext Spk");
+ else
+ snd_soc_dapm_disable_pin(codec, "Ext Spk");
+ if (rx51_dmic_func)
+ snd_soc_dapm_enable_pin(codec, "DMic");
+ else
+ snd_soc_dapm_disable_pin(codec, "DMic");
+
+ snd_soc_dapm_sync(codec);
+}
+
+static int rx51_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->socdev->card->codec;
+
+ snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_CHANNELS, 2, 2);
+ rx51_ext_control(codec);
+
+ return 0;
+}
+
+static int rx51_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int err;
+
+ /* Set codec DAI configuration */
+ err = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (err < 0)
+ return err;
+
+ /* Set cpu DAI configuration */
+ err = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (err < 0)
+ return err;
+
+ /* Set the codec system clock for DAC and ADC */
+ return snd_soc_dai_set_sysclk(codec_dai, 0, 19200000,
+ SND_SOC_CLOCK_IN);
+}
+
+static struct snd_soc_ops rx51_ops = {
+ .startup = rx51_startup,
+ .hw_params = rx51_hw_params,
+};
+
+static int rx51_get_spk(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = rx51_spk_func;
+
+ return 0;
+}
+
+static int rx51_set_spk(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+
+ if (rx51_spk_func == ucontrol->value.integer.value[0])
+ return 0;
+
+ rx51_spk_func = ucontrol->value.integer.value[0];
+ rx51_ext_control(codec);
+
+ return 1;
+}
+
+static int rx51_spk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ gpio_set_value(RX51_SPEAKER_AMP_TWL_GPIO, 1);
+ else
+ gpio_set_value(RX51_SPEAKER_AMP_TWL_GPIO, 0);
+
+ return 0;
+}
+
+static int rx51_get_input(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = rx51_dmic_func;
+
+ return 0;
+}
+
+static int rx51_set_input(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+
+ if (rx51_dmic_func == ucontrol->value.integer.value[0])
+ return 0;
+
+ rx51_dmic_func = ucontrol->value.integer.value[0];
+ rx51_ext_control(codec);
+
+ return 1;
+}
+
+static const struct snd_soc_dapm_widget aic34_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Ext Spk", rx51_spk_event),
+ SND_SOC_DAPM_MIC("DMic", NULL),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ {"Ext Spk", NULL, "HPLOUT"},
+ {"Ext Spk", NULL, "HPROUT"},
+
+ {"DMic Rate 64", NULL, "Mic Bias 2V"},
+ {"Mic Bias 2V", NULL, "DMic"},
+};
+
+static const char *spk_function[] = {"Off", "On"};
+static const char *input_function[] = {"ADC", "Digital Mic"};
+
+static const struct soc_enum rx51_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(spk_function), spk_function),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(input_function), input_function),
+};
+
+static const struct snd_kcontrol_new aic34_rx51_controls[] = {
+ SOC_ENUM_EXT("Speaker Function", rx51_enum[0],
+ rx51_get_spk, rx51_set_spk),
+ SOC_ENUM_EXT("Input Select", rx51_enum[1],
+ rx51_get_input, rx51_set_input),
+};
+
+static int rx51_aic34_init(struct snd_soc_codec *codec)
+{
+ int err;
+
+ /* Set up NC codec pins */
+ snd_soc_dapm_nc_pin(codec, "MIC3L");
+ snd_soc_dapm_nc_pin(codec, "MIC3R");
+ snd_soc_dapm_nc_pin(codec, "LINE1R");
+
+ /* Add RX-51 specific controls */
+ err = snd_soc_add_controls(codec, aic34_rx51_controls,
+ ARRAY_SIZE(aic34_rx51_controls));
+ if (err < 0)
+ return err;
+
+ /* Add RX-51 specific widgets */
+ snd_soc_dapm_new_controls(codec, aic34_dapm_widgets,
+ ARRAY_SIZE(aic34_dapm_widgets));
+
+ /* Set up RX-51 specific audio path audio_map */
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+ snd_soc_dapm_sync(codec);
+
+ return 0;
+}
+
+/* Digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link rx51_dai[] = {
+ {
+ .name = "TLV320AIC34",
+ .stream_name = "AIC34",
+ .cpu_dai = &omap_mcbsp_dai[0],
+ .codec_dai = &aic3x_dai,
+ .init = rx51_aic34_init,
+ .ops = &rx51_ops,
+ },
+};
+
+/* Audio private data */
+static struct aic3x_setup_data rx51_aic34_setup = {
+ .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED,
+ .gpio_func[1] = AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT,
+};
+
+/* Audio card */
+static struct snd_soc_card rx51_sound_card = {
+ .name = "RX-51",
+ .dai_link = rx51_dai,
+ .num_links = ARRAY_SIZE(rx51_dai),
+ .platform = &omap_soc_platform,
+};
+
+/* Audio subsystem */
+static struct snd_soc_device rx51_snd_devdata = {
+ .card = &rx51_sound_card,
+ .codec_dev = &soc_codec_dev_aic3x,
+ .codec_data = &rx51_aic34_setup,
+};
+
+static struct platform_device *rx51_snd_device;
+
+static int __init rx51_soc_init(void)
+{
+ int err;
+
+ if (!machine_is_nokia_rx51())
+ return -ENODEV;
+
+ rx51_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!rx51_snd_device) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ platform_set_drvdata(rx51_snd_device, &rx51_snd_devdata);
+ rx51_snd_devdata.dev = &rx51_snd_device->dev;
+ *(unsigned int *)rx51_dai[0].cpu_dai->private_data = 1; /* McBSP2 */
+
+ err = platform_device_add(rx51_snd_device);
+ if (err)
+ goto err2;
+
+ return 0;
+err2:
+ platform_device_put(rx51_snd_device);
+err1:
+
+ return err;
+}
+
+static void __exit rx51_soc_exit(void)
+{
+ platform_device_unregister(rx51_snd_device);
+}
+
+module_init(rx51_soc_init);
+module_exit(rx51_soc_exit);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("ALSA SoC Nokia RX-51");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c
index f90a2ac888cf..50a94ee76ecc 100644
--- a/sound/soc/omap/zoom2.c
+++ b/sound/soc/omap/zoom2.c
@@ -181,9 +181,6 @@ static int zoom2_twl4030_init(struct snd_soc_codec *codec)
snd_soc_dapm_nc_pin(codec, "CARKITMIC");
snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
-
- snd_soc_dapm_nc_pin(codec, "OUTL");
- snd_soc_dapm_nc_pin(codec, "OUTR");
snd_soc_dapm_nc_pin(codec, "EARPIECE");
snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
snd_soc_dapm_nc_pin(codec, "PREDRIVER");
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 376e14a9c273..495a36fba360 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -42,6 +42,14 @@ config SND_PXA2XX_SOC_SPITZ
Say Y if you want to add support for SoC audio on Sharp
Zaurus SL-Cxx00 models (Spitz, Borzoi and Akita).
+config SND_PXA2XX_SOC_Z2
+ tristate "SoC Audio support for Zipit Z2"
+ depends on SND_PXA2XX_SOC && MACH_ZIPIT2
+ select SND_PXA2XX_SOC_I2S
+ select SND_SOC_WM8750
+ help
+ Say Y if you want to add support for SoC audio on Zipit Z2.
+
config SND_PXA2XX_SOC_POODLE
tristate "SoC Audio support for Poodle"
depends on SND_PXA2XX_SOC && MACH_POODLE
diff --git a/sound/soc/pxa/Makefile b/sound/soc/pxa/Makefile
index f3e08fd40ca2..caa03d8f4789 100644
--- a/sound/soc/pxa/Makefile
+++ b/sound/soc/pxa/Makefile
@@ -22,6 +22,7 @@ snd-soc-palm27x-objs := palm27x.o
snd-soc-zylonite-objs := zylonite.o
snd-soc-magician-objs := magician.o
snd-soc-mioa701-objs := mioa701_wm9713.o
+snd-soc-z2-objs := z2.o
snd-soc-imote2-objs := imote2.o
snd-soc-raumfeld-objs := raumfeld.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_SND_PXA2XX_SOC_EM_X270) += snd-soc-em-x270.o
obj-$(CONFIG_SND_PXA2XX_SOC_PALM27X) += snd-soc-palm27x.o
obj-$(CONFIG_SND_PXA2XX_SOC_MAGICIAN) += snd-soc-magician.o
obj-$(CONFIG_SND_PXA2XX_SOC_MIOA701) += snd-soc-mioa701.o
+obj-$(CONFIG_SND_PXA2XX_SOC_Z2) += snd-soc-z2.o
obj-$(CONFIG_SND_SOC_ZYLONITE) += snd-soc-zylonite.o
obj-$(CONFIG_SND_PXA2XX_SOC_IMOTE2) += snd-soc-imote2.o
obj-$(CONFIG_SND_SOC_RAUMFELD) += snd-soc-raumfeld.o
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index c4cd2acaacb4..1941a357e8c4 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -322,19 +322,44 @@ static struct snd_soc_card snd_soc_spitz = {
.num_links = 1,
};
-/* spitz audio private data */
-static struct wm8750_setup_data spitz_wm8750_setup = {
- .i2c_bus = 0,
- .i2c_address = 0x1b,
-};
-
/* spitz audio subsystem */
static struct snd_soc_device spitz_snd_devdata = {
.card = &snd_soc_spitz,
.codec_dev = &soc_codec_dev_wm8750,
- .codec_data = &spitz_wm8750_setup,
};
+/*
+ * FIXME: This is a temporary bodge to avoid cross-tree merge issues.
+ * New drivers should register the wm8750 I2C device in the machine
+ * setup code (under arch/arm for ARM systems).
+ */
+static int wm8750_i2c_register(void)
+{
+ struct i2c_board_info info;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ info.addr = 0x1b;
+ strlcpy(info.type, "wm8750", I2C_NAME_SIZE);
+
+ adapter = i2c_get_adapter(0);
+ if (!adapter) {
+ printk(KERN_ERR "can't get i2c adapter 0\n");
+ return -ENODEV;
+ }
+
+ client = i2c_new_device(adapter, &info);
+ i2c_put_adapter(adapter);
+ if (!client) {
+ printk(KERN_ERR "can't add i2c device at 0x%x\n",
+ (unsigned int)info.addr);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static struct platform_device *spitz_snd_device;
static int __init spitz_init(void)
@@ -344,6 +369,10 @@ static int __init spitz_init(void)
if (!(machine_is_spitz() || machine_is_borzoi() || machine_is_akita()))
return -ENODEV;
+ ret = wm8750_i2c_setup();
+ if (ret != 0)
+ return ret;
+
spitz_snd_device = platform_device_alloc("soc-audio", -1);
if (!spitz_snd_device)
return -ENOMEM;
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
new file mode 100644
index 000000000000..4e4d2fa8ddc5
--- /dev/null
+++ b/sound/soc/pxa/z2.c
@@ -0,0 +1,246 @@
+/*
+ * linux/sound/soc/pxa/z2.c
+ *
+ * SoC Audio driver for Aeronix Zipit Z2
+ *
+ * Copyright (C) 2009 Ken McGuire <kenm@desertweyr.com>
+ * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/jack.h>
+
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include <mach/audio.h>
+#include <mach/z2.h>
+
+#include "../codecs/wm8750.h"
+#include "pxa2xx-pcm.h"
+#include "pxa2xx-i2s.h"
+
+static struct snd_soc_card snd_soc_z2;
+
+static int z2_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ unsigned int clk = 0;
+ int ret = 0;
+
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 48000:
+ case 96000:
+ clk = 12288000;
+ break;
+ case 11025:
+ case 22050:
+ case 44100:
+ clk = 11289600;
+ break;
+ }
+
+ /* set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ return ret;
+
+ /* set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ return ret;
+
+ /* set the codec system clock for DAC and ADC */
+ ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ return ret;
+
+ /* set the I2S system clock as input (unused) */
+ ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct snd_soc_jack hs_jack;
+
+/* Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin hs_jack_pins[] = {
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+
+/* Headset jack detection gpios */
+static struct snd_soc_jack_gpio hs_jack_gpios[] = {
+ {
+ .gpio = GPIO37_ZIPITZ2_HEADSET_DETECT,
+ .name = "hsdet-gpio",
+ .report = SND_JACK_HEADSET,
+ .debounce_time = 200,
+ },
+};
+
+/* z2 machine dapm widgets */
+static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+
+ /* headset is a mic and mono headphone */
+ SND_SOC_DAPM_HP("Headset Jack", NULL),
+};
+
+/* Z2 machine audio_map */
+static const struct snd_soc_dapm_route audio_map[] = {
+
+ /* headphone connected to LOUT1, ROUT1 */
+ {"Headphone Jack", NULL, "LOUT1"},
+ {"Headphone Jack", NULL, "ROUT1"},
+
+ /* ext speaker connected to LOUT2, ROUT2 */
+ {"Ext Spk", NULL , "ROUT2"},
+ {"Ext Spk", NULL , "LOUT2"},
+
+ /* mic is connected to R input 2 - with bias */
+ {"RINPUT2", NULL, "Mic Bias"},
+ {"Mic Bias", NULL, "Mic Jack"},
+};
+
+/*
+ * Logic for a wm8750 as connected on a Z2 Device
+ */
+static int z2_wm8750_init(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ /* NC codec pins */
+ snd_soc_dapm_disable_pin(codec, "LINPUT3");
+ snd_soc_dapm_disable_pin(codec, "RINPUT3");
+ snd_soc_dapm_disable_pin(codec, "OUT3");
+ snd_soc_dapm_disable_pin(codec, "MONO");
+
+ /* Add z2 specific widgets */
+ snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+ ARRAY_SIZE(wm8750_dapm_widgets));
+
+ /* Set up z2 specific audio paths */
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+ ret = snd_soc_dapm_sync(codec);
+ if (ret)
+ goto err;
+
+ /* Jack detection API stuff */
+ ret = snd_soc_jack_new(&snd_soc_z2, "Headset Jack", SND_JACK_HEADSET,
+ &hs_jack);
+ if (ret)
+ goto err;
+
+ ret = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
+ hs_jack_pins);
+ if (ret)
+ goto err;
+
+ ret = snd_soc_jack_add_gpios(&hs_jack, ARRAY_SIZE(hs_jack_gpios),
+ hs_jack_gpios);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static struct snd_soc_ops z2_ops = {
+ .hw_params = z2_hw_params,
+};
+
+/* z2 digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link z2_dai = {
+ .name = "wm8750",
+ .stream_name = "WM8750",
+ .cpu_dai = &pxa_i2s_dai,
+ .codec_dai = &wm8750_dai,
+ .init = z2_wm8750_init,
+ .ops = &z2_ops,
+};
+
+/* z2 audio machine driver */
+static struct snd_soc_card snd_soc_z2 = {
+ .name = "Z2",
+ .platform = &pxa2xx_soc_platform,
+ .dai_link = &z2_dai,
+ .num_links = 1,
+};
+
+/* z2 audio subsystem */
+static struct snd_soc_device z2_snd_devdata = {
+ .card = &snd_soc_z2,
+ .codec_dev = &soc_codec_dev_wm8750,
+};
+
+static struct platform_device *z2_snd_device;
+
+static int __init z2_init(void)
+{
+ int ret;
+
+ if (!machine_is_zipit2())
+ return -ENODEV;
+
+ z2_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!z2_snd_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(z2_snd_device, &z2_snd_devdata);
+ z2_snd_devdata.dev = &z2_snd_device->dev;
+ ret = platform_device_add(z2_snd_device);
+
+ if (ret)
+ platform_device_put(z2_snd_device);
+
+ return ret;
+}
+
+static void __exit z2_exit(void)
+{
+ platform_device_unregister(z2_snd_device);
+}
+
+module_init(z2_init);
+module_exit(z2_exit);
+
+MODULE_AUTHOR("Ken McGuire <kenm@desertweyr.com>, "
+ "Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("ALSA SoC ZipitZ2");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
index 15fe57e5a232..2a7cc222d098 100644
--- a/sound/soc/s3c24xx/Kconfig
+++ b/sound/soc/s3c24xx/Kconfig
@@ -24,6 +24,11 @@ config SND_S3C64XX_SOC_I2S
select SND_S3C_I2SV2_SOC
select S3C64XX_DMA
+config SND_S3C64XX_SOC_I2S_V4
+ tristate
+ select SND_S3C_I2SV2_SOC
+ select S3C64XX_DMA
+
config SND_S3C_SOC_PCM
tristate
@@ -59,12 +64,11 @@ config SND_S3C24XX_SOC_JIVE_WM8750
config SND_S3C64XX_SOC_WM8580
tristate "SoC I2S Audio support for WM8580 on SMDK64XX"
- depends on SND_S3C24XX_SOC && (MACH_SMDK6400 || MACH_SMDK6410)
- depends on BROKEN
+ depends on SND_S3C24XX_SOC && MACH_SMDK6410
select SND_SOC_WM8580
- select SND_S3C64XX_SOC_I2S
+ select SND_S3C64XX_SOC_I2S_V4
help
- Sat Y if you want to add support for SoC audio on the SMDK64XX.
+ Say Y if you want to add support for SoC audio on the SMDK6410.
config SND_S3C24XX_SOC_SMDK2443_WM9710
tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
diff --git a/sound/soc/s3c24xx/Makefile b/sound/soc/s3c24xx/Makefile
index df071a376fa2..81d8dc503f87 100644
--- a/sound/soc/s3c24xx/Makefile
+++ b/sound/soc/s3c24xx/Makefile
@@ -4,6 +4,7 @@ snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
snd-soc-s3c64xx-i2s-objs := s3c64xx-i2s.o
snd-soc-s3c-ac97-objs := s3c-ac97.o
+snd-soc-s3c64xx-i2s-v4-objs := s3c64xx-i2s-v4.o
snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
snd-soc-s3c-pcm-objs := s3c-pcm.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_SND_S3C24XX_SOC_I2S) += snd-soc-s3c24xx-i2s.o
obj-$(CONFIG_SND_S3C_SOC_AC97) += snd-soc-s3c-ac97.o
obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
obj-$(CONFIG_SND_S3C64XX_SOC_I2S) += snd-soc-s3c64xx-i2s.o
+obj-$(CONFIG_SND_S3C64XX_SOC_I2S_V4) += snd-soc-s3c64xx-i2s-v4.o
obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
obj-$(CONFIG_SND_S3C_SOC_PCM) += snd-soc-s3c-pcm.o
diff --git a/sound/soc/s3c24xx/jive_wm8750.c b/sound/soc/s3c24xx/jive_wm8750.c
index 59dc2c6b56d9..8c108b121c10 100644
--- a/sound/soc/s3c24xx/jive_wm8750.c
+++ b/sound/soc/s3c24xx/jive_wm8750.c
@@ -70,7 +70,7 @@ static int jive_hw_params(struct snd_pcm_substream *substream,
}
s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params),
- s3c2412_get_iisclk());
+ s3c_i2sv2_get_clock(cpu_dai));
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
@@ -152,15 +152,10 @@ static struct snd_soc_card snd_soc_machine_jive = {
.num_links = 1,
};
-/* jive audio private data */
-static struct wm8750_setup_data jive_wm8750_setup = {
-};
-
/* jive audio subsystem */
static struct snd_soc_device jive_snd_devdata = {
.card = &snd_soc_machine_jive,
.codec_dev = &soc_codec_dev_wm8750,
- .codec_data = &jive_wm8750_setup,
};
static struct platform_device *jive_snd_device;
diff --git a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
index dea83d30a5c9..209c25994c7e 100644
--- a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
+++ b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
@@ -362,6 +362,14 @@ static int neo1973_gta02_wm8753_init(struct snd_soc_codec *codec)
snd_soc_dapm_disable_pin(codec, "Handset Mic");
snd_soc_dapm_disable_pin(codec, "Handset Spk");
+ /* allow audio paths from the GSM modem to run during suspend */
+ snd_soc_dapm_ignore_suspend(codec, "Stereo Out");
+ snd_soc_dapm_ignore_suspend(codec, "GSM Line Out");
+ snd_soc_dapm_ignore_suspend(codec, "GSM Line In");
+ snd_soc_dapm_ignore_suspend(codec, "Headset Mic");
+ snd_soc_dapm_ignore_suspend(codec, "Handset Mic");
+ snd_soc_dapm_ignore_suspend(codec, "Handset Spk");
+
snd_soc_dapm_sync(codec);
return 0;
diff --git a/arch/arm/plat-samsung/include/plat/regs-s3c2412-iis.h b/sound/soc/s3c24xx/regs-i2s-v2.h
index abf2fbc2eb2f..5e5e5680580b 100644
--- a/arch/arm/plat-samsung/include/plat/regs-s3c2412-iis.h
+++ b/sound/soc/s3c24xx/regs-i2s-v2.h
@@ -20,6 +20,24 @@
#define S3C2412_IISTXD (0x10)
#define S3C2412_IISRXD (0x14)
+#define S5PC1XX_IISFICS 0x18
+#define S5PC1XX_IISTXDS 0x1C
+
+#define S5PC1XX_IISCON_SW_RST (1 << 31)
+#define S5PC1XX_IISCON_FRXOFSTATUS (1 << 26)
+#define S5PC1XX_IISCON_FRXORINTEN (1 << 25)
+#define S5PC1XX_IISCON_FTXSURSTAT (1 << 24)
+#define S5PC1XX_IISCON_FTXSURINTEN (1 << 23)
+#define S5PC1XX_IISCON_TXSDMAPAUSE (1 << 20)
+#define S5PC1XX_IISCON_TXSDMACTIVE (1 << 18)
+
+#define S3C64XX_IISCON_FTXURSTATUS (1 << 17)
+#define S3C64XX_IISCON_FTXURINTEN (1 << 16)
+#define S3C64XX_IISCON_TXFIFO2_EMPTY (1 << 15)
+#define S3C64XX_IISCON_TXFIFO1_EMPTY (1 << 14)
+#define S3C64XX_IISCON_TXFIFO2_FULL (1 << 13)
+#define S3C64XX_IISCON_TXFIFO1_FULL (1 << 12)
+
#define S3C2412_IISCON_LRINDEX (1 << 11)
#define S3C2412_IISCON_TXFIFO_EMPTY (1 << 10)
#define S3C2412_IISCON_RXFIFO_EMPTY (1 << 9)
@@ -33,18 +51,30 @@
#define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1)
#define S3C2412_IISCON_IIS_ACTIVE (1 << 0)
+#define S5PC1XX_IISMOD_OPCLK_CDCLK_OUT (0 << 30)
+#define S5PC1XX_IISMOD_OPCLK_CDCLK_IN (1 << 30)
+#define S5PC1XX_IISMOD_OPCLK_BCLK_OUT (2 << 30)
+#define S5PC1XX_IISMOD_OPCLK_PCLK (3 << 30)
+#define S5PC1XX_IISMOD_OPCLK_MASK (3 << 30)
+#define S5PC1XX_IISMOD_TXS_IDMA (1 << 28) /* Sec_TXFIFO use I-DMA */
+#define S5PC1XX_IISMOD_BLCS_MASK 0x3
+#define S5PC1XX_IISMOD_BLCS_SHIFT 26
+#define S5PC1XX_IISMOD_BLCP_MASK 0x3
+#define S5PC1XX_IISMOD_BLCP_SHIFT 24
+
+#define S3C64XX_IISMOD_C2DD_HHALF (1 << 21) /* Discard Higher-half */
+#define S3C64XX_IISMOD_C2DD_LHALF (1 << 20) /* Discard Lower-half */
+#define S3C64XX_IISMOD_C1DD_HHALF (1 << 19)
+#define S3C64XX_IISMOD_C1DD_LHALF (1 << 18)
+#define S3C64XX_IISMOD_DC2_EN (1 << 17)
+#define S3C64XX_IISMOD_DC1_EN (1 << 16)
#define S3C64XX_IISMOD_BLC_16BIT (0 << 13)
#define S3C64XX_IISMOD_BLC_8BIT (1 << 13)
#define S3C64XX_IISMOD_BLC_24BIT (2 << 13)
#define S3C64XX_IISMOD_BLC_MASK (3 << 13)
-#define S3C64XX_IISMOD_IMS_PCLK (0 << 10)
-#define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10)
-
-#define S3C2412_IISMOD_MASTER_INTERNAL (0 << 10)
-#define S3C2412_IISMOD_MASTER_EXTERNAL (1 << 10)
-#define S3C2412_IISMOD_SLAVE (2 << 10)
-#define S3C2412_IISMOD_MASTER_MASK (3 << 10)
+#define S3C2412_IISMOD_IMS_SYSMUX (1 << 10)
+#define S3C2412_IISMOD_SLAVE (1 << 11)
#define S3C2412_IISMOD_MODE_TXONLY (0 << 8)
#define S3C2412_IISMOD_MODE_RXONLY (1 << 8)
#define S3C2412_IISMOD_MODE_TXRX (2 << 8)
@@ -71,12 +101,15 @@
#define S3C2412_IISPSR_PSREN (1 << 15)
+#define S3C64XX_IISFIC_TX2COUNT(x) (((x) >> 24) & 0xf)
+#define S3C64XX_IISFIC_TX1COUNT(x) (((x) >> 16) & 0xf)
+
#define S3C2412_IISFIC_TXFLUSH (1 << 15)
#define S3C2412_IISFIC_RXFLUSH (1 << 7)
#define S3C2412_IISFIC_TXCOUNT(x) (((x) >> 8) & 0xf)
#define S3C2412_IISFIC_RXCOUNT(x) (((x) >> 0) & 0xf)
-
+#define S5PC1XX_IISFICS_TXFLUSH (1 << 15)
+#define S5PC1XX_IISFICS_TXCOUNT(x) (((x) >> 8) & 0x7f)
#endif /* __ASM_ARCH_REGS_S3C2412_IIS_H */
-
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/s3c24xx/s3c-i2s-v2.c
index 88515946b6c0..13311c8cf965 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.c
+++ b/sound/soc/s3c24xx/s3c-i2s-v2.c
@@ -16,24 +16,17 @@
* option) any later version.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
-#include <linux/kernel.h>
#include <linux/io.h>
-#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
-#include <sound/initval.h>
#include <sound/soc.h>
-#include <plat/regs-s3c2412-iis.h>
-
#include <mach/dma.h>
+#include "regs-i2s-v2.h"
#include "s3c-i2s-v2.h"
#include "s3c-dma.h"
@@ -272,35 +265,14 @@ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
iismod = readl(i2s->regs + S3C2412_IISMOD);
pr_debug("hw_params r: IISMOD: %x \n", iismod);
-#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
-#define IISMOD_MASTER_MASK S3C2412_IISMOD_MASTER_MASK
-#define IISMOD_SLAVE S3C2412_IISMOD_SLAVE
-#define IISMOD_MASTER S3C2412_IISMOD_MASTER_INTERNAL
-#endif
-
-#if defined(CONFIG_PLAT_S3C64XX)
-/* From Rev1.1 datasheet, we have two master and two slave modes:
- * IMS[11:10]:
- * 00 = master mode, fed from PCLK
- * 01 = master mode, fed from CLKAUDIO
- * 10 = slave mode, using PCLK
- * 11 = slave mode, using I2SCLK
- */
-#define IISMOD_MASTER_MASK (1 << 11)
-#define IISMOD_SLAVE (1 << 11)
-#define IISMOD_MASTER (0 << 11)
-#endif
-
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
i2s->master = 0;
- iismod &= ~IISMOD_MASTER_MASK;
- iismod |= IISMOD_SLAVE;
+ iismod |= S3C2412_IISMOD_SLAVE;
break;
case SND_SOC_DAIFMT_CBS_CFS:
i2s->master = 1;
- iismod &= ~IISMOD_MASTER_MASK;
- iismod |= IISMOD_MASTER;
+ iismod &= ~S3C2412_IISMOD_SLAVE;
break;
default:
pr_err("unknwon master/slave format\n");
@@ -332,7 +304,7 @@ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
return 0;
}
-static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
+static int s3c_i2sv2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *socdai)
{
@@ -355,37 +327,67 @@ static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
iismod = readl(i2s->regs + S3C2412_IISMOD);
pr_debug("%s: r: IISMOD: %x\n", __func__, iismod);
-#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
+ iismod &= ~S3C64XX_IISMOD_BLC_MASK;
+ /* Sample size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
- iismod |= S3C2412_IISMOD_8BIT;
+ iismod |= S3C64XX_IISMOD_BLC_8BIT;
break;
case SNDRV_PCM_FORMAT_S16_LE:
- iismod &= ~S3C2412_IISMOD_8BIT;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ iismod |= S3C64XX_IISMOD_BLC_24BIT;
break;
}
-#endif
-#ifdef CONFIG_PLAT_S3C64XX
- iismod &= ~(S3C64XX_IISMOD_BLC_MASK | S3C2412_IISMOD_BCLK_MASK);
- /* Sample size */
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S8:
- /* 8 bit sample, 16fs BCLK */
- iismod |= (S3C64XX_IISMOD_BLC_8BIT | S3C2412_IISMOD_BCLK_16FS);
+ writel(iismod, i2s->regs + S3C2412_IISMOD);
+ pr_debug("%s: w: IISMOD: %x\n", __func__, iismod);
+
+ return 0;
+}
+
+static int s3c_i2sv2_set_sysclk(struct snd_soc_dai *cpu_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
+ u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
+
+ pr_debug("Entered %s\n", __func__);
+ pr_debug("%s r: IISMOD: %x\n", __func__, iismod);
+
+ switch (clk_id) {
+ case S3C_I2SV2_CLKSRC_PCLK:
+ iismod &= ~S3C2412_IISMOD_IMS_SYSMUX;
break;
- case SNDRV_PCM_FORMAT_S16_LE:
- /* 16 bit sample, 32fs BCLK */
+
+ case S3C_I2SV2_CLKSRC_AUDIOBUS:
+ iismod |= S3C2412_IISMOD_IMS_SYSMUX;
break;
- case SNDRV_PCM_FORMAT_S24_LE:
- /* 24 bit sample, 48fs BCLK */
- iismod |= (S3C64XX_IISMOD_BLC_24BIT | S3C2412_IISMOD_BCLK_48FS);
+
+ case S3C_I2SV2_CLKSRC_CDCLK:
+ /* Error if controller doesn't have the CDCLKCON bit */
+ if (!(i2s->feature & S3C_FEATURE_CDCLKCON))
+ return -EINVAL;
+
+ switch (dir) {
+ case SND_SOC_CLOCK_IN:
+ iismod |= S3C64XX_IISMOD_CDCLKCON;
+ break;
+ case SND_SOC_CLOCK_OUT:
+ iismod &= ~S3C64XX_IISMOD_CDCLKCON;
+ break;
+ default:
+ return -EINVAL;
+ }
break;
+
+ default:
+ return -EINVAL;
}
-#endif
writel(iismod, i2s->regs + S3C2412_IISMOD);
- pr_debug("%s: w: IISMOD: %x\n", __func__, iismod);
+ pr_debug("%s w: IISMOD: %x\n", __func__, iismod);
+
return 0;
}
@@ -472,29 +474,25 @@ static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
switch (div_id) {
case S3C_I2SV2_DIV_BCLK:
- if (div > 3) {
- /* convert value to bit field */
-
- switch (div) {
- case 16:
- div = S3C2412_IISMOD_BCLK_16FS;
- break;
+ switch (div) {
+ case 16:
+ div = S3C2412_IISMOD_BCLK_16FS;
+ break;
- case 32:
- div = S3C2412_IISMOD_BCLK_32FS;
- break;
+ case 32:
+ div = S3C2412_IISMOD_BCLK_32FS;
+ break;
- case 24:
- div = S3C2412_IISMOD_BCLK_24FS;
- break;
+ case 24:
+ div = S3C2412_IISMOD_BCLK_24FS;
+ break;
- case 48:
- div = S3C2412_IISMOD_BCLK_48FS;
- break;
+ case 48:
+ div = S3C2412_IISMOD_BCLK_48FS;
+ break;
- default:
- return -EINVAL;
- }
+ default:
+ return -EINVAL;
}
reg = readl(i2s->regs + S3C2412_IISMOD);
@@ -505,29 +503,25 @@ static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
break;
case S3C_I2SV2_DIV_RCLK:
- if (div > 3) {
- /* convert value to bit field */
-
- switch (div) {
- case 256:
- div = S3C2412_IISMOD_RCLK_256FS;
- break;
+ switch (div) {
+ case 256:
+ div = S3C2412_IISMOD_RCLK_256FS;
+ break;
- case 384:
- div = S3C2412_IISMOD_RCLK_384FS;
- break;
+ case 384:
+ div = S3C2412_IISMOD_RCLK_384FS;
+ break;
- case 512:
- div = S3C2412_IISMOD_RCLK_512FS;
- break;
+ case 512:
+ div = S3C2412_IISMOD_RCLK_512FS;
+ break;
- case 768:
- div = S3C2412_IISMOD_RCLK_768FS;
- break;
+ case 768:
+ div = S3C2412_IISMOD_RCLK_768FS;
+ break;
- default:
- return -EINVAL;
- }
+ default:
+ return -EINVAL;
}
reg = readl(i2s->regs + S3C2412_IISMOD);
@@ -553,6 +547,33 @@ static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
return 0;
}
+static snd_pcm_sframes_t s3c2412_i2s_delay(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct s3c_i2sv2_info *i2s = to_info(dai);
+ u32 reg = readl(i2s->regs + S3C2412_IISFIC);
+ snd_pcm_sframes_t delay;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ delay = S3C2412_IISFIC_TXCOUNT(reg);
+ else
+ delay = S3C2412_IISFIC_RXCOUNT(reg);
+
+ return delay;
+}
+
+struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai)
+{
+ struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
+ u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
+
+ if (iismod & S3C2412_IISMOD_IMS_SYSMUX)
+ return i2s->iis_cclk;
+ else
+ return i2s->iis_pclk;
+}
+EXPORT_SYMBOL_GPL(s3c_i2sv2_get_clock);
+
/* default table of all avaialable root fs divisors */
static unsigned int iis_fs_tab[] = { 256, 512, 384, 768 };
@@ -735,9 +756,15 @@ int s3c_i2sv2_register_dai(struct snd_soc_dai *dai)
struct snd_soc_dai_ops *ops = dai->ops;
ops->trigger = s3c2412_i2s_trigger;
- ops->hw_params = s3c2412_i2s_hw_params;
+ if (!ops->hw_params)
+ ops->hw_params = s3c_i2sv2_hw_params;
ops->set_fmt = s3c2412_i2s_set_fmt;
ops->set_clkdiv = s3c2412_i2s_set_clkdiv;
+ ops->set_sysclk = s3c_i2sv2_set_sysclk;
+
+ /* Allow overriding by (for example) IISv4 */
+ if (!ops->delay)
+ ops->delay = s3c2412_i2s_delay;
dai->suspend = s3c2412_i2s_suspend;
dai->resume = s3c2412_i2s_resume;
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.h b/sound/soc/s3c24xx/s3c-i2s-v2.h
index ecf8eaaed1db..766f43a13d8b 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.h
+++ b/sound/soc/s3c24xx/s3c-i2s-v2.h
@@ -25,10 +25,20 @@
#define S3C_I2SV2_DIV_RCLK (2)
#define S3C_I2SV2_DIV_PRESCALER (3)
+#define S3C_I2SV2_CLKSRC_PCLK 0
+#define S3C_I2SV2_CLKSRC_AUDIOBUS 1
+#define S3C_I2SV2_CLKSRC_CDCLK 2
+
+/* Set this flag for I2S controllers that have the bit IISMOD[12]
+ * bridge/break RCLK signal and external Xi2sCDCLK pin.
+ */
+#define S3C_FEATURE_CDCLKCON (1 << 0)
+
/**
* struct s3c_i2sv2_info - S3C I2S-V2 information
* @dev: The parent device passed to use from the probe.
* @regs: The pointer to the device registe block.
+ * @feature: Set of bit-flags indicating features of the controller.
* @master: True if the I2S core is the I2S bit clock master.
* @dma_playback: DMA information for playback channel.
* @dma_capture: DMA information for capture channel.
@@ -43,9 +53,10 @@ struct s3c_i2sv2_info {
struct device *dev;
void __iomem *regs;
+ u32 feature;
+
struct clk *iis_pclk;
struct clk *iis_cclk;
- struct clk *iis_clk;
unsigned char master;
@@ -57,6 +68,8 @@ struct s3c_i2sv2_info {
u32 suspend_iispsr;
};
+extern struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai);
+
struct s3c_i2sv2_rate_calc {
unsigned int clk_div; /* for prescaler */
unsigned int fs_div; /* for root frame clock */
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.c b/sound/soc/s3c24xx/s3c2412-i2s.c
index 359e59346ba2..709adef9d043 100644
--- a/sound/soc/s3c24xx/s3c2412-i2s.c
+++ b/sound/soc/s3c24xx/s3c2412-i2s.c
@@ -32,12 +32,11 @@
#include <sound/soc.h>
#include <mach/hardware.h>
-#include <plat/regs-s3c2412-iis.h>
-
#include <mach/regs-gpio.h>
#include <mach/dma.h>
#include "s3c-dma.h"
+#include "regs-i2s-v2.h"
#include "s3c2412-i2s.h"
#define S3C2412_I2S_DEBUG 0
@@ -66,44 +65,11 @@ static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = {
static struct s3c_i2sv2_info s3c2412_i2s;
-/*
- * Set S3C2412 Clock source
- */
-static int s3c2412_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
- int clk_id, unsigned int freq, int dir)
+static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai)
{
- u32 iismod = readl(s3c2412_i2s.regs + S3C2412_IISMOD);
-
- pr_debug("%s(%p, %d, %u, %d)\n", __func__, cpu_dai, clk_id,
- freq, dir);
-
- switch (clk_id) {
- case S3C2412_CLKSRC_PCLK:
- s3c2412_i2s.master = 1;
- iismod &= ~S3C2412_IISMOD_MASTER_MASK;
- iismod |= S3C2412_IISMOD_MASTER_INTERNAL;
- break;
- case S3C2412_CLKSRC_I2SCLK:
- s3c2412_i2s.master = 0;
- iismod &= ~S3C2412_IISMOD_MASTER_MASK;
- iismod |= S3C2412_IISMOD_MASTER_EXTERNAL;
- break;
- default:
- return -EINVAL;
- }
-
- writel(iismod, s3c2412_i2s.regs + S3C2412_IISMOD);
- return 0;
+ return cpu_dai->private_data;
}
-
-struct clk *s3c2412_get_iisclk(void)
-{
- return s3c2412_i2s.iis_clk;
-}
-EXPORT_SYMBOL_GPL(s3c2412_get_iisclk);
-
-
static int s3c2412_i2s_probe(struct platform_device *pdev,
struct snd_soc_dai *dai)
{
@@ -142,13 +108,48 @@ static int s3c2412_i2s_probe(struct platform_device *pdev,
return 0;
}
+static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
+ struct s3c_dma_params *dma_data;
+ u32 iismod;
+
+ pr_debug("Entered %s\n", __func__);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dma_data = i2s->dma_playback;
+ else
+ dma_data = i2s->dma_capture;
+
+ snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
+
+ iismod = readl(i2s->regs + S3C2412_IISMOD);
+ pr_debug("%s: r: IISMOD: %x\n", __func__, iismod);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ iismod |= S3C2412_IISMOD_8BIT;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ iismod &= ~S3C2412_IISMOD_8BIT;
+ break;
+ }
+
+ writel(iismod, i2s->regs + S3C2412_IISMOD);
+ pr_debug("%s: w: IISMOD: %x\n", __func__, iismod);
+
+ return 0;
+}
+
#define S3C2412_I2S_RATES \
(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
static struct snd_soc_dai_ops s3c2412_i2s_dai_ops = {
- .set_sysclk = s3c2412_i2s_set_sysclk,
+ .hw_params = s3c2412_i2s_hw_params,
};
struct snd_soc_dai s3c2412_i2s_dai = {
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.h b/sound/soc/s3c24xx/s3c2412-i2s.h
index 92848e54be16..0b5686b4d5c3 100644
--- a/sound/soc/s3c24xx/s3c2412-i2s.h
+++ b/sound/soc/s3c24xx/s3c2412-i2s.h
@@ -21,10 +21,8 @@
#define S3C2412_DIV_RCLK S3C_I2SV2_DIV_RCLK
#define S3C2412_DIV_PRESCALER S3C_I2SV2_DIV_PRESCALER
-#define S3C2412_CLKSRC_PCLK (0)
-#define S3C2412_CLKSRC_I2SCLK (1)
-
-extern struct clk *s3c2412_get_iisclk(void);
+#define S3C2412_CLKSRC_PCLK S3C_I2SV2_CLKSRC_PCLK
+#define S3C2412_CLKSRC_I2SCLK S3C_I2SV2_CLKSRC_AUDIOBUS
extern struct snd_soc_dai s3c2412_i2s_dai;
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c b/sound/soc/s3c24xx/s3c64xx-i2s-v4.c
new file mode 100644
index 000000000000..06db130030a1
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c64xx-i2s-v4.c
@@ -0,0 +1,209 @@
+/* sound/soc/s3c24xx/s3c64xx-i2s-v4.c
+ *
+ * ALSA SoC Audio Layer - S3C64XX I2Sv4 driver
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ * Author: Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#include <mach/gpio-bank-c.h>
+#include <mach/gpio-bank-h.h>
+#include <plat/gpio-cfg.h>
+
+#include <mach/map.h>
+#include <mach/dma.h>
+
+#include "s3c-dma.h"
+#include "regs-i2s-v2.h"
+#include "s3c64xx-i2s.h"
+
+static struct s3c2410_dma_client s3c64xx_dma_client_out = {
+ .name = "I2Sv4 PCM Stereo out"
+};
+
+static struct s3c2410_dma_client s3c64xx_dma_client_in = {
+ .name = "I2Sv4 PCM Stereo in"
+};
+
+static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_out;
+static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_in;
+static struct s3c_i2sv2_info s3c64xx_i2sv4;
+
+struct snd_soc_dai s3c64xx_i2s_v4_dai;
+EXPORT_SYMBOL_GPL(s3c64xx_i2s_v4_dai);
+
+static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai)
+{
+ return cpu_dai->private_data;
+}
+
+static int s3c64xx_i2sv4_probe(struct platform_device *pdev,
+ struct snd_soc_dai *dai)
+{
+ /* configure GPIO for i2s port */
+ s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C64XX_GPC4_I2S_V40_DO0);
+ s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C64XX_GPC5_I2S_V40_DO1);
+ s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C64XX_GPC7_I2S_V40_DO2);
+ s3c_gpio_cfgpin(S3C64XX_GPH(6), S3C64XX_GPH6_I2S_V40_BCLK);
+ s3c_gpio_cfgpin(S3C64XX_GPH(7), S3C64XX_GPH7_I2S_V40_CDCLK);
+ s3c_gpio_cfgpin(S3C64XX_GPH(8), S3C64XX_GPH8_I2S_V40_LRCLK);
+ s3c_gpio_cfgpin(S3C64XX_GPH(9), S3C64XX_GPH9_I2S_V40_DI);
+
+ return 0;
+}
+
+static int s3c_i2sv4_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
+ struct s3c_dma_params *dma_data;
+ u32 iismod;
+
+ dev_dbg(cpu_dai->dev, "Entered %s\n", __func__);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dma_data = i2s->dma_playback;
+ else
+ dma_data = i2s->dma_capture;
+
+ snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
+
+ iismod = readl(i2s->regs + S3C2412_IISMOD);
+ dev_dbg(cpu_dai->dev, "%s: r: IISMOD: %x\n", __func__, iismod);
+
+ iismod &= ~S3C64XX_IISMOD_BLC_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ iismod |= S3C64XX_IISMOD_BLC_8BIT;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ iismod |= S3C64XX_IISMOD_BLC_24BIT;
+ break;
+ }
+
+ writel(iismod, i2s->regs + S3C2412_IISMOD);
+ dev_dbg(cpu_dai->dev, "%s: w: IISMOD: %x\n", __func__, iismod);
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops s3c64xx_i2sv4_dai_ops = {
+ .hw_params = s3c_i2sv4_hw_params,
+};
+
+static __devinit int s3c64xx_i2sv4_dev_probe(struct platform_device *pdev)
+{
+ struct s3c_i2sv2_info *i2s;
+ struct snd_soc_dai *dai;
+ int ret;
+
+ i2s = &s3c64xx_i2sv4;
+ dai = &s3c64xx_i2s_v4_dai;
+
+ if (dai->dev) {
+ dev_dbg(dai->dev, "%s: \
+ I2Sv4 instance already registered!\n", __func__);
+ return -EBUSY;
+ }
+
+ dai->dev = &pdev->dev;
+ dai->name = "s3c64xx-i2s-v4";
+ dai->id = 0;
+ dai->symmetric_rates = 1;
+ dai->playback.channels_min = 2;
+ dai->playback.channels_max = 2;
+ dai->playback.rates = S3C64XX_I2S_RATES;
+ dai->playback.formats = S3C64XX_I2S_FMTS;
+ dai->capture.channels_min = 2;
+ dai->capture.channels_max = 2;
+ dai->capture.rates = S3C64XX_I2S_RATES;
+ dai->capture.formats = S3C64XX_I2S_FMTS;
+ dai->probe = s3c64xx_i2sv4_probe;
+ dai->ops = &s3c64xx_i2sv4_dai_ops;
+
+ i2s->feature |= S3C_FEATURE_CDCLKCON;
+
+ i2s->dma_capture = &s3c64xx_i2sv4_pcm_stereo_in;
+ i2s->dma_playback = &s3c64xx_i2sv4_pcm_stereo_out;
+
+ i2s->dma_capture->channel = DMACH_HSI_I2SV40_RX;
+ i2s->dma_capture->dma_addr = S3C64XX_PA_IISV4 + S3C2412_IISRXD;
+ i2s->dma_playback->channel = DMACH_HSI_I2SV40_TX;
+ i2s->dma_playback->dma_addr = S3C64XX_PA_IISV4 + S3C2412_IISTXD;
+
+ i2s->dma_capture->client = &s3c64xx_dma_client_in;
+ i2s->dma_capture->dma_size = 4;
+ i2s->dma_playback->client = &s3c64xx_dma_client_out;
+ i2s->dma_playback->dma_size = 4;
+
+ i2s->iis_cclk = clk_get(&pdev->dev, "audio-bus");
+ if (IS_ERR(i2s->iis_cclk)) {
+ dev_err(&pdev->dev, "failed to get audio-bus\n");
+ ret = PTR_ERR(i2s->iis_cclk);
+ goto err;
+ }
+
+ clk_enable(i2s->iis_cclk);
+
+ ret = s3c_i2sv2_probe(pdev, dai, i2s, 0);
+ if (ret)
+ goto err_clk;
+
+ ret = s3c_i2sv2_register_dai(dai);
+ if (ret != 0)
+ goto err_i2sv2;
+
+ return 0;
+
+err_i2sv2:
+ /* Not implemented for I2Sv2 core yet */
+err_clk:
+ clk_put(i2s->iis_cclk);
+err:
+ return ret;
+}
+
+static __devexit int s3c64xx_i2sv4_dev_remove(struct platform_device *pdev)
+{
+ dev_err(&pdev->dev, "Device removal not yet supported\n");
+ return 0;
+}
+
+static struct platform_driver s3c64xx_i2sv4_driver = {
+ .probe = s3c64xx_i2sv4_dev_probe,
+ .remove = s3c64xx_i2sv4_dev_remove,
+ .driver = {
+ .name = "s3c64xx-iis-v4",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s3c64xx_i2sv4_init(void)
+{
+ return platform_driver_register(&s3c64xx_i2sv4_driver);
+}
+module_init(s3c64xx_i2sv4_init);
+
+static void __exit s3c64xx_i2sv4_exit(void)
+{
+ platform_driver_unregister(&s3c64xx_i2sv4_driver);
+}
+module_exit(s3c64xx_i2sv4_exit);
+
+/* Module information */
+MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("S3C64XX I2Sv4 SoC Interface");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.c b/sound/soc/s3c24xx/s3c64xx-i2s.c
index a72c251401ac..1d85cb85a7d2 100644
--- a/sound/soc/s3c24xx/s3c64xx-i2s.c
+++ b/sound/soc/s3c24xx/s3c64xx-i2s.c
@@ -12,16 +12,12 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <sound/soc.h>
-#include <plat/regs-s3c2412-iis.h>
#include <mach/gpio-bank-d.h>
#include <mach/gpio-bank-e.h>
#include <plat/gpio-cfg.h>
@@ -30,6 +26,7 @@
#include <mach/dma.h>
#include "s3c-dma.h"
+#include "regs-i2s-v2.h"
#include "s3c64xx-i2s.h"
/* The value should be set to maximum of the total number
@@ -57,55 +54,6 @@ static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai)
return cpu_dai->private_data;
}
-static int s3c64xx_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
- int clk_id, unsigned int freq, int dir)
-{
- struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
- u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
-
- switch (clk_id) {
- case S3C64XX_CLKSRC_PCLK:
- iismod &= ~S3C64XX_IISMOD_IMS_SYSMUX;
- break;
-
- case S3C64XX_CLKSRC_MUX:
- iismod |= S3C64XX_IISMOD_IMS_SYSMUX;
- break;
-
- case S3C64XX_CLKSRC_CDCLK:
- switch (dir) {
- case SND_SOC_CLOCK_IN:
- iismod |= S3C64XX_IISMOD_CDCLKCON;
- break;
- case SND_SOC_CLOCK_OUT:
- iismod &= ~S3C64XX_IISMOD_CDCLKCON;
- break;
- default:
- return -EINVAL;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- writel(iismod, i2s->regs + S3C2412_IISMOD);
-
- return 0;
-}
-
-struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai)
-{
- struct s3c_i2sv2_info *i2s = to_info(dai);
- u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
-
- if (iismod & S3C64XX_IISMOD_IMS_SYSMUX)
- return i2s->iis_cclk;
- else
- return i2s->iis_pclk;
-}
-EXPORT_SYMBOL_GPL(s3c64xx_i2s_get_clock);
-
static int s3c64xx_i2s_probe(struct platform_device *pdev,
struct snd_soc_dai *dai)
{
@@ -130,18 +78,7 @@ static int s3c64xx_i2s_probe(struct platform_device *pdev,
}
-#define S3C64XX_I2S_RATES \
- (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
- SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
- SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-
-#define S3C64XX_I2S_FMTS \
- (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
- SNDRV_PCM_FMTBIT_S24_LE)
-
-static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops = {
- .set_sysclk = s3c64xx_i2s_set_sysclk,
-};
+static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops;
static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev)
{
@@ -171,6 +108,8 @@ static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev)
dai->probe = s3c64xx_i2s_probe;
dai->ops = &s3c64xx_i2s_dai_ops;
+ i2s->feature |= S3C_FEATURE_CDCLKCON;
+
i2s->dma_capture = &s3c64xx_i2s_pcm_stereo_in[pdev->id];
i2s->dma_playback = &s3c64xx_i2s_pcm_stereo_out[pdev->id];
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.h b/sound/soc/s3c24xx/s3c64xx-i2s.h
index abe7253b55fc..7a40f43d1d51 100644
--- a/sound/soc/s3c24xx/s3c64xx-i2s.h
+++ b/sound/soc/s3c24xx/s3c64xx-i2s.h
@@ -23,12 +23,20 @@ struct clk;
#define S3C64XX_DIV_RCLK S3C_I2SV2_DIV_RCLK
#define S3C64XX_DIV_PRESCALER S3C_I2SV2_DIV_PRESCALER
-#define S3C64XX_CLKSRC_PCLK (0)
-#define S3C64XX_CLKSRC_MUX (1)
-#define S3C64XX_CLKSRC_CDCLK (2)
+#define S3C64XX_CLKSRC_PCLK S3C_I2SV2_CLKSRC_PCLK
+#define S3C64XX_CLKSRC_MUX S3C_I2SV2_CLKSRC_AUDIOBUS
+#define S3C64XX_CLKSRC_CDCLK S3C_I2SV2_CLKSRC_CDCLK
-extern struct snd_soc_dai s3c64xx_i2s_dai[];
+#define S3C64XX_I2S_RATES \
+ (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+#define S3C64XX_I2S_FMTS \
+ (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
-extern struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai);
+extern struct snd_soc_dai s3c64xx_i2s_dai[];
+extern struct snd_soc_dai s3c64xx_i2s_v4_dai;
#endif /* __SND_SOC_S3C24XX_S3C64XX_I2S_H */
diff --git a/sound/soc/s3c24xx/smdk64xx_wm8580.c b/sound/soc/s3c24xx/smdk64xx_wm8580.c
index efe4901213a3..07e8e51d10d6 100644
--- a/sound/soc/s3c24xx/smdk64xx_wm8580.c
+++ b/sound/soc/s3c24xx/smdk64xx_wm8580.c
@@ -22,8 +22,6 @@
#include "s3c-dma.h"
#include "s3c64xx-i2s.h"
-#define S3C64XX_I2S_V4 2
-
/* SMDK64XX has a 12MHZ crystal attached to WM8580 */
#define SMDK64XX_WM8580_FREQ 12000000
@@ -215,7 +213,7 @@ static struct snd_soc_dai_link smdk64xx_dai[] = {
{ /* Primary Playback i/f */
.name = "WM8580 PAIF RX",
.stream_name = "Playback",
- .cpu_dai = &s3c64xx_i2s_dai[S3C64XX_I2S_V4],
+ .cpu_dai = &s3c64xx_i2s_v4_dai,
.codec_dai = &wm8580_dai[WM8580_DAI_PAIFRX],
.init = smdk64xx_wm8580_init_paifrx,
.ops = &smdk64xx_ops,
@@ -223,7 +221,7 @@ static struct snd_soc_dai_link smdk64xx_dai[] = {
{ /* Primary Capture i/f */
.name = "WM8580 PAIF TX",
.stream_name = "Capture",
- .cpu_dai = &s3c64xx_i2s_dai[S3C64XX_I2S_V4],
+ .cpu_dai = &s3c64xx_i2s_v4_dai,
.codec_dai = &wm8580_dai[WM8580_DAI_PAIFTX],
.init = smdk64xx_wm8580_init_paiftx,
.ops = &smdk64xx_ops,
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index f07f6d8b93e1..a1d14bc3c76f 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -1,5 +1,5 @@
menu "SoC Audio support for SuperH"
- depends on SUPERH
+ depends on SUPERH || ARCH_SHMOBILE
config SND_SOC_PCM_SH7760
tristate "SoC Audio support for Renesas SH7760"
@@ -22,7 +22,6 @@ config SND_SOC_SH4_SSI
config SND_SOC_SH4_FSI
tristate "SH4 FSI support"
- depends on CPU_SUBTYPE_SH7724
help
This option enables FSI sound support
diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c
index 5263ab18f827..be018542314e 100644
--- a/sound/soc/sh/fsi-ak4642.c
+++ b/sound/soc/sh/fsi-ak4642.c
@@ -22,11 +22,25 @@
#include <sound/sh_fsi.h>
#include <../sound/soc/codecs/ak4642.h>
+static int fsi_ak4642_dai_init(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_dai_set_fmt(&ak4642_dai, SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_sysclk(&ak4642_dai, 0, 11289600, 0);
+
+ return ret;
+}
+
static struct snd_soc_dai_link fsi_dai_link = {
.name = "AK4642",
.stream_name = "AK4642",
.cpu_dai = &fsi_soc_dai[0], /* fsi */
.codec_dai = &ak4642_dai,
+ .init = fsi_ak4642_dai_init,
.ops = NULL,
};
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 8dc966f45c36..3396a0db06ba 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -41,14 +41,19 @@
#define MUTE_ST 0x0028
#define REG_END MUTE_ST
+
+#define CPU_INT_ST 0x01F4
+#define CPU_IEMSK 0x01F8
+#define CPU_IMSK 0x01FC
#define INT_ST 0x0200
#define IEMSK 0x0204
#define IMSK 0x0208
#define MUTE 0x020C
#define CLK_RST 0x0210
#define SOFT_RST 0x0214
-#define MREG_START INT_ST
-#define MREG_END SOFT_RST
+#define FIFO_SZ 0x0218
+#define MREG_START CPU_INT_ST
+#define MREG_END FIFO_SZ
/* DO_FMT */
/* DI_FMT */
@@ -80,6 +85,17 @@
#define INT_A_IN (1 << 4)
#define INT_A_OUT (1 << 0)
+/* SOFT_RST */
+#define PBSR (1 << 12) /* Port B Software Reset */
+#define PASR (1 << 8) /* Port A Software Reset */
+#define IR (1 << 4) /* Interrupt Reset */
+#define FSISR (1 << 0) /* Software Reset */
+
+/* FIFO_SZ */
+#define OUT_SZ_MASK 0x7
+#define BO_SZ_SHIFT 8
+#define AO_SZ_SHIFT 0
+
#define FSI_RATES SNDRV_PCM_RATE_8000_96000
#define FSI_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
@@ -105,11 +121,18 @@ struct fsi_priv {
int periods;
};
+struct fsi_regs {
+ u32 int_st;
+ u32 iemsk;
+ u32 imsk;
+};
+
struct fsi_master {
void __iomem *base;
int irq;
struct fsi_priv fsia;
struct fsi_priv fsib;
+ struct fsi_regs *regs;
struct sh_fsi_platform_info *info;
spinlock_t lock;
};
@@ -317,7 +340,7 @@ static int fsi_get_fifo_residue(struct fsi_priv *fsi, int is_play)
/************************************************************************
- ctrl function
+ irq function
************************************************************************/
@@ -326,8 +349,8 @@ static void fsi_irq_enable(struct fsi_priv *fsi, int is_play)
u32 data = fsi_port_ab_io_bit(fsi, is_play);
struct fsi_master *master = fsi_get_master(fsi);
- fsi_master_mask_set(master, IMSK, data, data);
- fsi_master_mask_set(master, IEMSK, data, data);
+ fsi_master_mask_set(master, master->regs->imsk, data, data);
+ fsi_master_mask_set(master, master->regs->iemsk, data, data);
}
static void fsi_irq_disable(struct fsi_priv *fsi, int is_play)
@@ -335,10 +358,39 @@ static void fsi_irq_disable(struct fsi_priv *fsi, int is_play)
u32 data = fsi_port_ab_io_bit(fsi, is_play);
struct fsi_master *master = fsi_get_master(fsi);
- fsi_master_mask_set(master, IMSK, data, 0);
- fsi_master_mask_set(master, IEMSK, data, 0);
+ fsi_master_mask_set(master, master->regs->imsk, data, 0);
+ fsi_master_mask_set(master, master->regs->iemsk, data, 0);
+}
+
+static u32 fsi_irq_get_status(struct fsi_master *master)
+{
+ return fsi_master_read(master, master->regs->int_st);
+}
+
+static void fsi_irq_clear_all_status(struct fsi_master *master)
+{
+ fsi_master_write(master, master->regs->int_st, 0x0000000);
}
+static void fsi_irq_clear_status(struct fsi_priv *fsi)
+{
+ u32 data = 0;
+ struct fsi_master *master = fsi_get_master(fsi);
+
+ data |= fsi_port_ab_io_bit(fsi, 0);
+ data |= fsi_port_ab_io_bit(fsi, 1);
+
+ /* clear interrupt factor */
+ fsi_master_mask_set(master, master->regs->int_st, data, 0);
+}
+
+/************************************************************************
+
+
+ ctrl function
+
+
+************************************************************************/
static void fsi_clk_ctrl(struct fsi_priv *fsi, int enable)
{
u32 val = fsi_is_port_a(fsi) ? (1 << 0) : (1 << 4);
@@ -350,41 +402,61 @@ static void fsi_clk_ctrl(struct fsi_priv *fsi, int enable)
fsi_master_mask_set(master, CLK_RST, val, 0);
}
-static void fsi_irq_init(struct fsi_priv *fsi, int is_play)
+static void fsi_fifo_init(struct fsi_priv *fsi,
+ int is_play,
+ struct snd_soc_dai *dai)
{
- u32 data;
- u32 ctrl;
+ struct fsi_master *master = fsi_get_master(fsi);
+ u32 ctrl, shift, i;
- data = fsi_port_ab_io_bit(fsi, is_play);
- ctrl = is_play ? DOFF_CTL : DIFF_CTL;
+ /* get on-chip RAM capacity */
+ shift = fsi_master_read(master, FIFO_SZ);
+ shift >>= fsi_is_port_a(fsi) ? AO_SZ_SHIFT : BO_SZ_SHIFT;
+ shift &= OUT_SZ_MASK;
+ fsi->fifo_max = 256 << shift;
+ dev_dbg(dai->dev, "fifo = %d words\n", fsi->fifo_max);
- /* set IMSK */
- fsi_irq_disable(fsi, is_play);
+ /*
+ * The maximum number of sample data varies depending
+ * on the number of channels selected for the format.
+ *
+ * FIFOs are used in 4-channel units in 3-channel mode
+ * and in 8-channel units in 5- to 7-channel mode
+ * meaning that more FIFOs than the required size of DPRAM
+ * are used.
+ *
+ * ex) if 256 words of DP-RAM is connected
+ * 1 channel: 256 (256 x 1 = 256)
+ * 2 channels: 128 (128 x 2 = 256)
+ * 3 channels: 64 ( 64 x 3 = 192)
+ * 4 channels: 64 ( 64 x 4 = 256)
+ * 5 channels: 32 ( 32 x 5 = 160)
+ * 6 channels: 32 ( 32 x 6 = 192)
+ * 7 channels: 32 ( 32 x 7 = 224)
+ * 8 channels: 32 ( 32 x 8 = 256)
+ */
+ for (i = 1; i < fsi->chan; i <<= 1)
+ fsi->fifo_max >>= 1;
+ dev_dbg(dai->dev, "%d channel %d store\n", fsi->chan, fsi->fifo_max);
+
+ ctrl = is_play ? DOFF_CTL : DIFF_CTL;
/* set interrupt generation factor */
fsi_reg_write(fsi, ctrl, IRQ_HALF);
/* clear FIFO */
fsi_reg_mask_set(fsi, ctrl, FIFO_CLR, FIFO_CLR);
-
- /* clear interrupt factor */
- fsi_master_mask_set(fsi_get_master(fsi), INT_ST, data, 0);
}
static void fsi_soft_all_reset(struct fsi_master *master)
{
- u32 status = fsi_master_read(master, SOFT_RST);
-
/* port AB reset */
- status &= 0x000000ff;
- fsi_master_write(master, SOFT_RST, status);
+ fsi_master_mask_set(master, SOFT_RST, PASR | PBSR, 0);
mdelay(10);
/* soft reset */
- status &= 0x000000f0;
- fsi_master_write(master, SOFT_RST, status);
- status |= 0x00000001;
- fsi_master_write(master, SOFT_RST, status);
+ fsi_master_mask_set(master, SOFT_RST, FSISR, 0);
+ fsi_master_mask_set(master, SOFT_RST, FSISR, FSISR);
mdelay(10);
}
@@ -559,12 +631,11 @@ static int fsi_data_pop(struct fsi_priv *fsi, int startup)
static irqreturn_t fsi_interrupt(int irq, void *data)
{
struct fsi_master *master = data;
- u32 status = fsi_master_read(master, SOFT_RST) & ~0x00000010;
- u32 int_st = fsi_master_read(master, INT_ST);
+ u32 int_st = fsi_irq_get_status(master);
/* clear irq status */
- fsi_master_write(master, SOFT_RST, status);
- fsi_master_write(master, SOFT_RST, status | 0x00000010);
+ fsi_master_mask_set(master, SOFT_RST, IR, 0);
+ fsi_master_mask_set(master, SOFT_RST, IR, IR);
if (int_st & INT_A_OUT)
fsi_data_push(&master->fsia, 0);
@@ -575,7 +646,7 @@ static irqreturn_t fsi_interrupt(int irq, void *data)
if (int_st & INT_B_IN)
fsi_data_pop(&master->fsib, 0);
- fsi_master_write(master, INT_ST, 0x0000000);
+ fsi_irq_clear_all_status(master);
return IRQ_HANDLED;
}
@@ -669,29 +740,6 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream,
dev_err(dai->dev, "unknown format.\n");
return -EINVAL;
}
-
- switch (fsi->chan) {
- case 1:
- fsi->fifo_max = 256;
- break;
- case 2:
- fsi->fifo_max = 128;
- break;
- case 3:
- case 4:
- fsi->fifo_max = 64;
- break;
- case 5:
- case 6:
- case 7:
- case 8:
- fsi->fifo_max = 32;
- break;
- default:
- dev_err(dai->dev, "channel size error.\n");
- return -EINVAL;
- }
-
fsi_reg_write(fsi, reg, data);
/*
@@ -700,8 +748,12 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream,
if (is_master)
fsi_clk_ctrl(fsi, 1);
- /* irq setting */
- fsi_irq_init(fsi, is_play);
+ /* irq clear */
+ fsi_irq_disable(fsi, is_play);
+ fsi_irq_clear_status(fsi);
+
+ /* fifo init */
+ fsi_fifo_init(fsi, is_play, dai);
return ret;
}
@@ -913,6 +965,7 @@ EXPORT_SYMBOL_GPL(fsi_soc_platform);
static int fsi_probe(struct platform_device *pdev)
{
struct fsi_master *master;
+ const struct platform_device_id *id_entry;
struct resource *res;
unsigned int irq;
int ret;
@@ -922,6 +975,12 @@ static int fsi_probe(struct platform_device *pdev)
return -ENODEV;
}
+ id_entry = pdev->id_entry;
+ if (!id_entry) {
+ dev_err(&pdev->dev, "unknown fsi device\n");
+ return -ENODEV;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!res || (int)irq <= 0) {
@@ -950,6 +1009,7 @@ static int fsi_probe(struct platform_device *pdev)
master->fsia.master = master;
master->fsib.base = master->base + 0x40;
master->fsib.master = master;
+ master->regs = (struct fsi_regs *)id_entry->driver_data;
spin_lock_init(&master->lock);
pm_runtime_enable(&pdev->dev);
@@ -962,7 +1022,8 @@ static int fsi_probe(struct platform_device *pdev)
fsi_soft_all_reset(master);
- ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED, "fsi", master);
+ ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED,
+ id_entry->name, master);
if (ret) {
dev_err(&pdev->dev, "irq request err\n");
goto exit_iounmap;
@@ -1029,6 +1090,23 @@ static struct dev_pm_ops fsi_pm_ops = {
.runtime_resume = fsi_runtime_nop,
};
+static struct fsi_regs fsi_regs = {
+ .int_st = INT_ST,
+ .iemsk = IEMSK,
+ .imsk = IMSK,
+};
+
+static struct fsi_regs fsi2_regs = {
+ .int_st = CPU_INT_ST,
+ .iemsk = CPU_IEMSK,
+ .imsk = CPU_IMSK,
+};
+
+static struct platform_device_id fsi_id_table[] = {
+ { "sh_fsi", (kernel_ulong_t)&fsi_regs },
+ { "sh_fsi2", (kernel_ulong_t)&fsi2_regs },
+};
+
static struct platform_driver fsi_driver = {
.driver = {
.name = "sh_fsi",
@@ -1036,6 +1114,7 @@ static struct platform_driver fsi_driver = {
},
.probe = fsi_probe,
.remove = fsi_remove,
+ .id_table = fsi_id_table,
};
static int __init fsi_mobile_init(void)
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index 5869dc3be781..472af38188c1 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -44,6 +44,8 @@ static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg,
return 0;
}
+ dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+
ret = codec->hw_write(codec->control_data, data, 2);
if (ret == 2)
return 0;
@@ -112,6 +114,8 @@ static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
return 0;
}
+ dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+
ret = codec->hw_write(codec->control_data, data, 2);
if (ret == 2)
return 0;
@@ -159,7 +163,8 @@ static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
BUG_ON(codec->volatile_register);
- data[0] = reg & 0xff;
+ reg &= 0xff;
+ data[0] = reg;
data[1] = value & 0xff;
if (reg < codec->reg_cache_size)
@@ -170,6 +175,8 @@ static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
return 0;
}
+ dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+
if (codec->hw_write(codec->control_data, data, 2) == 2)
return 0;
else
@@ -180,6 +187,7 @@ static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec,
unsigned int reg)
{
u8 *cache = codec->reg_cache;
+ reg &= 0xff;
if (reg >= codec->reg_cache_size)
return -1;
return cache[reg];
@@ -203,6 +211,8 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
return 0;
}
+ dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+
if (codec->hw_write(codec->control_data, data, 3) == 3)
return 0;
else
@@ -226,6 +236,40 @@ static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
}
#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+static unsigned int snd_soc_8_8_read_i2c(struct snd_soc_codec *codec,
+ unsigned int r)
+{
+ struct i2c_msg xfer[2];
+ u8 reg = r;
+ u8 data;
+ int ret;
+ struct i2c_client *client = codec->control_data;
+
+ /* Write register */
+ xfer[0].addr = client->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = client->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 1;
+ xfer[1].buf = &data;
+
+ ret = i2c_transfer(client->adapter, xfer, 2);
+ if (ret != 2) {
+ dev_err(&client->dev, "i2c_transfer() returned %d\n", ret);
+ return 0;
+ }
+
+ return data;
+}
+#else
+#define snd_soc_8_8_read_i2c NULL
+#endif
+
+#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
static unsigned int snd_soc_8_16_read_i2c(struct snd_soc_codec *codec,
unsigned int r)
{
@@ -326,6 +370,8 @@ static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
return 0;
}
+ dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+
ret = codec->hw_write(codec->control_data, data, 3);
if (ret == 3)
return 0;
@@ -366,6 +412,86 @@ static int snd_soc_16_8_spi_write(void *control_data, const char *data,
#define snd_soc_16_8_spi_write NULL
#endif
+#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+static unsigned int snd_soc_16_16_read_i2c(struct snd_soc_codec *codec,
+ unsigned int r)
+{
+ struct i2c_msg xfer[2];
+ u16 reg = cpu_to_be16(r);
+ u16 data;
+ int ret;
+ struct i2c_client *client = codec->control_data;
+
+ /* Write register */
+ xfer[0].addr = client->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 2;
+ xfer[0].buf = (u8 *)&reg;
+
+ /* Read data */
+ xfer[1].addr = client->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 2;
+ xfer[1].buf = (u8 *)&data;
+
+ ret = i2c_transfer(client->adapter, xfer, 2);
+ if (ret != 2) {
+ dev_err(&client->dev, "i2c_transfer() returned %d\n", ret);
+ return 0;
+ }
+
+ return be16_to_cpu(data);
+}
+#else
+#define snd_soc_16_16_read_i2c NULL
+#endif
+
+static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *cache = codec->reg_cache;
+
+ if (reg >= codec->reg_cache_size ||
+ snd_soc_codec_volatile_register(codec, reg)) {
+ if (codec->cache_only)
+ return -EINVAL;
+
+ return codec->hw_read(codec, reg);
+ }
+
+ return cache[reg];
+}
+
+static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u16 *cache = codec->reg_cache;
+ u8 data[4];
+ int ret;
+
+ data[0] = (reg >> 8) & 0xff;
+ data[1] = reg & 0xff;
+ data[2] = (value >> 8) & 0xff;
+ data[3] = value & 0xff;
+
+ if (reg < codec->reg_cache_size)
+ cache[reg] = value;
+
+ if (codec->cache_only) {
+ codec->cache_sync = 1;
+ return 0;
+ }
+
+ dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+
+ ret = codec->hw_write(codec->control_data, data, 4);
+ if (ret == 4)
+ return 0;
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
static struct {
int addr_bits;
@@ -388,6 +514,7 @@ static struct {
{
.addr_bits = 8, .data_bits = 8,
.write = snd_soc_8_8_write, .read = snd_soc_8_8_read,
+ .i2c_read = snd_soc_8_8_read_i2c,
},
{
.addr_bits = 8, .data_bits = 16,
@@ -400,6 +527,11 @@ static struct {
.i2c_read = snd_soc_16_8_read_i2c,
.spi_write = snd_soc_16_8_spi_write,
},
+ {
+ .addr_bits = 16, .data_bits = 16,
+ .write = snd_soc_16_16_write, .read = snd_soc_16_16_read,
+ .i2c_read = snd_soc_16_16_read_i2c,
+ },
};
/**
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index ad7f9528d751..998569d60330 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -316,7 +316,7 @@ static int soc_pcm_apply_symmetry(struct snd_pcm_substream *substream)
if (codec_dai->symmetric_rates || cpu_dai->symmetric_rates ||
machine->symmetric_rates) {
- dev_dbg(card->dev, "Symmetry forces %dHz rate\n",
+ dev_dbg(card->dev, "Symmetry forces %dHz rate\n",
machine->rate);
ret = snd_pcm_hw_constraint_minmax(substream->runtime,
@@ -405,6 +405,12 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
codec_dai->playback.formats & cpu_dai->playback.formats;
runtime->hw.rates =
codec_dai->playback.rates & cpu_dai->playback.rates;
+ if (codec_dai->playback.rates
+ & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))
+ runtime->hw.rates |= cpu_dai->playback.rates;
+ if (cpu_dai->playback.rates
+ & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))
+ runtime->hw.rates |= codec_dai->playback.rates;
} else {
runtime->hw.rate_min =
max(codec_dai->capture.rate_min,
@@ -422,6 +428,12 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
codec_dai->capture.formats & cpu_dai->capture.formats;
runtime->hw.rates =
codec_dai->capture.rates & cpu_dai->capture.rates;
+ if (codec_dai->capture.rates
+ & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))
+ runtime->hw.rates |= cpu_dai->capture.rates;
+ if (cpu_dai->capture.rates
+ & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))
+ runtime->hw.rates |= codec_dai->capture.rates;
}
snd_pcm_limit_hw_rates(runtime);
@@ -455,12 +467,15 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
pr_debug("asoc: min rate %d max rate %d\n", runtime->hw.rate_min,
runtime->hw.rate_max);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- cpu_dai->playback.active = codec_dai->playback.active = 1;
- else
- cpu_dai->capture.active = codec_dai->capture.active = 1;
- cpu_dai->active = codec_dai->active = 1;
- cpu_dai->runtime = runtime;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ cpu_dai->playback.active++;
+ codec_dai->playback.active++;
+ } else {
+ cpu_dai->capture.active++;
+ codec_dai->capture.active++;
+ }
+ cpu_dai->active++;
+ codec_dai->active++;
card->codec->active++;
mutex_unlock(&pcm_mutex);
return 0;
@@ -536,15 +551,16 @@ static int soc_codec_close(struct snd_pcm_substream *substream)
mutex_lock(&pcm_mutex);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- cpu_dai->playback.active = codec_dai->playback.active = 0;
- else
- cpu_dai->capture.active = codec_dai->capture.active = 0;
-
- if (codec_dai->playback.active == 0 &&
- codec_dai->capture.active == 0) {
- cpu_dai->active = codec_dai->active = 0;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ cpu_dai->playback.active--;
+ codec_dai->playback.active--;
+ } else {
+ cpu_dai->capture.active--;
+ codec_dai->capture.active--;
}
+
+ cpu_dai->active--;
+ codec_dai->active--;
codec->active--;
/* Muting the DAC suppresses artifacts caused during digital
@@ -564,7 +580,6 @@ static int soc_codec_close(struct snd_pcm_substream *substream)
if (platform->pcm_ops->close)
platform->pcm_ops->close(substream);
- cpu_dai->runtime = NULL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/* start delayed pop wq here for playback streams */
@@ -802,6 +817,41 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
+/*
+ * soc level wrapper for pointer callback
+ * If cpu_dai, codec_dai, platform driver has the delay callback, than
+ * the runtime->delay will be updated accordingly.
+ */
+static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_card *card = socdev->card;
+ struct snd_soc_platform *platform = card->platform;
+ struct snd_soc_dai_link *machine = rtd->dai;
+ struct snd_soc_dai *cpu_dai = machine->cpu_dai;
+ struct snd_soc_dai *codec_dai = machine->codec_dai;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_uframes_t offset = 0;
+ snd_pcm_sframes_t delay = 0;
+
+ if (platform->pcm_ops->pointer)
+ offset = platform->pcm_ops->pointer(substream);
+
+ if (cpu_dai->ops->delay)
+ delay += cpu_dai->ops->delay(substream, cpu_dai);
+
+ if (codec_dai->ops->delay)
+ delay += codec_dai->ops->delay(substream, codec_dai);
+
+ if (platform->delay)
+ delay += platform->delay(substream, codec_dai);
+
+ runtime->delay = delay;
+
+ return offset;
+}
+
/* ASoC PCM operations */
static struct snd_pcm_ops soc_pcm_ops = {
.open = soc_pcm_open,
@@ -810,6 +860,7 @@ static struct snd_pcm_ops soc_pcm_ops = {
.hw_free = soc_pcm_hw_free,
.prepare = soc_pcm_prepare,
.trigger = soc_pcm_trigger,
+ .pointer = soc_pcm_pointer,
};
#ifdef CONFIG_PM
@@ -843,23 +894,35 @@ static int soc_suspend(struct device *dev)
/* mute any active DAC's */
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *dai = card->dai_link[i].codec_dai;
+
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
if (dai->ops->digital_mute && dai->playback.active)
dai->ops->digital_mute(dai, 1);
}
/* suspend all pcms */
- for (i = 0; i < card->num_links; i++)
+ for (i = 0; i < card->num_links; i++) {
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
snd_pcm_suspend_all(card->dai_link[i].pcm);
+ }
if (card->suspend_pre)
card->suspend_pre(pdev, PMSG_SUSPEND);
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
+
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
if (cpu_dai->suspend && !cpu_dai->ac97_control)
cpu_dai->suspend(cpu_dai);
if (platform->suspend)
- platform->suspend(cpu_dai);
+ platform->suspend(&card->dai_link[i]);
}
/* close any waiting streams and save state */
@@ -868,6 +931,10 @@ static int soc_suspend(struct device *dev)
for (i = 0; i < codec->num_dai; i++) {
char *stream = codec->dai[i].playback.stream_name;
+
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
if (stream != NULL)
snd_soc_dapm_stream_event(codec, stream,
SND_SOC_DAPM_STREAM_SUSPEND);
@@ -877,11 +944,26 @@ static int soc_suspend(struct device *dev)
SND_SOC_DAPM_STREAM_SUSPEND);
}
- if (codec_dev->suspend)
- codec_dev->suspend(pdev, PMSG_SUSPEND);
+ /* If there are paths active then the CODEC will be held with
+ * bias _ON and should not be suspended. */
+ if (codec_dev->suspend) {
+ switch (codec->bias_level) {
+ case SND_SOC_BIAS_STANDBY:
+ case SND_SOC_BIAS_OFF:
+ codec_dev->suspend(pdev, PMSG_SUSPEND);
+ break;
+ default:
+ dev_dbg(socdev->dev, "CODEC is on over suspend\n");
+ break;
+ }
+ }
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
+
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
if (cpu_dai->suspend && cpu_dai->ac97_control)
cpu_dai->suspend(cpu_dai);
}
@@ -913,20 +995,44 @@ static void soc_resume_deferred(struct work_struct *work)
dev_dbg(socdev->dev, "starting resume work\n");
+ /* Bring us up into D2 so that DAPM starts enabling things */
+ snd_power_change_state(codec->card, SNDRV_CTL_POWER_D2);
+
if (card->resume_pre)
card->resume_pre(pdev);
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
+
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
if (cpu_dai->resume && cpu_dai->ac97_control)
cpu_dai->resume(cpu_dai);
}
- if (codec_dev->resume)
- codec_dev->resume(pdev);
+ /* If the CODEC was idle over suspend then it will have been
+ * left with bias OFF or STANDBY and suspended so we must now
+ * resume. Otherwise the suspend was suppressed.
+ */
+ if (codec_dev->resume) {
+ switch (codec->bias_level) {
+ case SND_SOC_BIAS_STANDBY:
+ case SND_SOC_BIAS_OFF:
+ codec_dev->resume(pdev);
+ break;
+ default:
+ dev_dbg(socdev->dev, "CODEC was on over suspend\n");
+ break;
+ }
+ }
for (i = 0; i < codec->num_dai; i++) {
char *stream = codec->dai[i].playback.stream_name;
+
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
if (stream != NULL)
snd_soc_dapm_stream_event(codec, stream,
SND_SOC_DAPM_STREAM_RESUME);
@@ -939,16 +1045,24 @@ static void soc_resume_deferred(struct work_struct *work)
/* unmute any active DACs */
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *dai = card->dai_link[i].codec_dai;
+
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
if (dai->ops->digital_mute && dai->playback.active)
dai->ops->digital_mute(dai, 0);
}
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
+
+ if (card->dai_link[i].ignore_suspend)
+ continue;
+
if (cpu_dai->resume && !cpu_dai->ac97_control)
cpu_dai->resume(cpu_dai);
if (platform->resume)
- platform->resume(cpu_dai);
+ platform->resume(&card->dai_link[i]);
}
if (card->resume_post)
@@ -1233,26 +1347,25 @@ static int soc_remove(struct platform_device *pdev)
struct snd_soc_platform *platform = card->platform;
struct snd_soc_codec_device *codec_dev = socdev->codec_dev;
- if (!card->instantiated)
- return 0;
+ if (card->instantiated) {
+ run_delayed_work(&card->delayed_work);
- run_delayed_work(&card->delayed_work);
+ if (platform->remove)
+ platform->remove(pdev);
- if (platform->remove)
- platform->remove(pdev);
+ if (codec_dev->remove)
+ codec_dev->remove(pdev);
- if (codec_dev->remove)
- codec_dev->remove(pdev);
+ for (i = 0; i < card->num_links; i++) {
+ struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
+ if (cpu_dai->remove)
+ cpu_dai->remove(pdev, cpu_dai);
+ }
- for (i = 0; i < card->num_links; i++) {
- struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
- if (cpu_dai->remove)
- cpu_dai->remove(pdev, cpu_dai);
+ if (card->remove)
+ card->remove(pdev);
}
- if (card->remove)
- card->remove(pdev);
-
snd_soc_unregister_card(card);
return 0;
@@ -1336,7 +1449,6 @@ static int soc_new_pcm(struct snd_soc_device *socdev,
dai_link->pcm = pcm;
pcm->private_data = rtd;
soc_pcm_ops.mmap = platform->pcm_ops->mmap;
- soc_pcm_ops.pointer = platform->pcm_ops->pointer;
soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
soc_pcm_ops.copy = platform->pcm_ops->copy;
soc_pcm_ops.silence = platform->pcm_ops->silence;
@@ -1906,18 +2018,22 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- int max = mc->max;
+ int platform_max;
unsigned int shift = mc->shift;
unsigned int rshift = mc->rshift;
- if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
+ if (!mc->platform_max)
+ mc->platform_max = mc->max;
+ platform_max = mc->platform_max;
+
+ if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = shift == rshift ? 1 : 2;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = max;
+ uinfo->value.integer.max = platform_max;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
@@ -2015,16 +2131,20 @@ int snd_soc_info_volsw_2r(struct snd_kcontrol *kcontrol,
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- int max = mc->max;
+ int platform_max;
- if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
+ if (!mc->platform_max)
+ mc->platform_max = mc->max;
+ platform_max = mc->platform_max;
+
+ if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = max;
+ uinfo->value.integer.max = platform_max;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw_2r);
@@ -2125,13 +2245,17 @@ int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol,
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- int max = mc->max;
+ int platform_max;
int min = mc->min;
+ if (!mc->platform_max)
+ mc->platform_max = mc->max;
+ platform_max = mc->platform_max;
+
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = max-min;
+ uinfo->value.integer.max = platform_max - min;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw_s8);
@@ -2190,6 +2314,45 @@ int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8);
/**
+ * snd_soc_limit_volume - Set new limit to an existing volume control.
+ *
+ * @codec: where to look for the control
+ * @name: Name of the control
+ * @max: new maximum limit
+ *
+ * Return 0 for success, else error.
+ */
+int snd_soc_limit_volume(struct snd_soc_codec *codec,
+ const char *name, int max)
+{
+ struct snd_card *card = codec->card;
+ struct snd_kcontrol *kctl;
+ struct soc_mixer_control *mc;
+ int found = 0;
+ int ret = -EINVAL;
+
+ /* Sanity check for name and max */
+ if (unlikely(!name || max <= 0))
+ return -EINVAL;
+
+ list_for_each_entry(kctl, &card->controls, list) {
+ if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ mc = (struct soc_mixer_control *)kctl->private_value;
+ if (max <= mc->max) {
+ mc->platform_max = max;
+ ret = 0;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_limit_volume);
+
+/**
* snd_soc_dai_set_sysclk - configure DAI system or master clock.
* @dai: DAI
* @clk_id: DAI specific clock ID
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7c28f401f436..03cb7c05ebec 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -98,7 +98,6 @@ static void pop_dbg(u32 pop_time, const char *fmt, ...)
if (pop_time) {
vprintk(fmt, args);
- pop_wait(pop_time);
}
va_end(args);
@@ -315,62 +314,14 @@ static int dapm_update_bits(struct snd_soc_dapm_widget *widget)
pop_dbg(codec->pop_time, "pop test %s : %s in %d ms\n",
widget->name, widget->power ? "on" : "off",
codec->pop_time);
- snd_soc_write(codec, widget->reg, new);
pop_wait(codec->pop_time);
+ snd_soc_write(codec, widget->reg, new);
}
pr_debug("reg %x old %x new %x change %d\n", widget->reg,
old, new, change);
return change;
}
-/* ramps the volume up or down to minimise pops before or after a
- * DAPM power event */
-static int dapm_set_pga(struct snd_soc_dapm_widget *widget, int power)
-{
- const struct snd_kcontrol_new *k = widget->kcontrols;
-
- if (widget->muted && !power)
- return 0;
- if (!widget->muted && power)
- return 0;
-
- if (widget->num_kcontrols && k) {
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)k->private_value;
- unsigned int reg = mc->reg;
- unsigned int shift = mc->shift;
- int max = mc->max;
- unsigned int mask = (1 << fls(max)) - 1;
- unsigned int invert = mc->invert;
-
- if (power) {
- int i;
- /* power up has happended, increase volume to last level */
- if (invert) {
- for (i = max; i > widget->saved_value; i--)
- snd_soc_update_bits(widget->codec, reg, mask, i);
- } else {
- for (i = 0; i < widget->saved_value; i++)
- snd_soc_update_bits(widget->codec, reg, mask, i);
- }
- widget->muted = 0;
- } else {
- /* power down is about to occur, decrease volume to mute */
- int val = snd_soc_read(widget->codec, reg);
- int i = widget->saved_value = (val >> shift) & mask;
- if (invert) {
- for (; i < mask; i++)
- snd_soc_update_bits(widget->codec, reg, mask, i);
- } else {
- for (; i > 0; i--)
- snd_soc_update_bits(widget->codec, reg, mask, i);
- }
- widget->muted = 1;
- }
- }
- return 0;
-}
-
/* create new dapm mixer control */
static int dapm_new_mixer(struct snd_soc_codec *codec,
struct snd_soc_dapm_widget *w)
@@ -465,20 +416,10 @@ err:
static int dapm_new_pga(struct snd_soc_codec *codec,
struct snd_soc_dapm_widget *w)
{
- struct snd_kcontrol *kcontrol;
- int ret = 0;
-
- if (!w->num_kcontrols)
- return -EINVAL;
+ if (w->num_kcontrols)
+ pr_err("asoc: PGA controls not supported: '%s'\n", w->name);
- kcontrol = snd_soc_cnew(&w->kcontrols[0], w, w->name);
- ret = snd_ctl_add(codec->card, kcontrol);
- if (ret < 0) {
- printk(KERN_ERR "asoc: failed to add kcontrol %s\n", w->name);
- return ret;
- }
-
- return ret;
+ return 0;
}
/* reset 'walked' bit for each dapm path */
@@ -490,6 +431,25 @@ static inline void dapm_clear_walk(struct snd_soc_codec *codec)
p->walked = 0;
}
+/* We implement power down on suspend by checking the power state of
+ * the ALSA card - when we are suspending the ALSA state for the card
+ * is set to D3.
+ */
+static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget)
+{
+ struct snd_soc_codec *codec = widget->codec;
+
+ switch (snd_power_get_state(codec->card)) {
+ case SNDRV_CTL_POWER_D3hot:
+ case SNDRV_CTL_POWER_D3cold:
+ if (widget->ignore_suspend)
+ pr_debug("%s ignoring suspend\n", widget->name);
+ return widget->ignore_suspend;
+ default:
+ return 1;
+ }
+}
+
/*
* Recursively check for a completed path to an active or physically connected
* output widget. Returns number of complete paths.
@@ -506,7 +466,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget)
case snd_soc_dapm_adc:
case snd_soc_dapm_aif_out:
if (widget->active)
- return 1;
+ return snd_soc_dapm_suspend_check(widget);
default:
break;
}
@@ -514,12 +474,12 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget)
if (widget->connected) {
/* connected pin ? */
if (widget->id == snd_soc_dapm_output && !widget->ext)
- return 1;
+ return snd_soc_dapm_suspend_check(widget);
/* connected jack or spk ? */
if (widget->id == snd_soc_dapm_hp || widget->id == snd_soc_dapm_spk ||
(widget->id == snd_soc_dapm_line && !list_empty(&widget->sources)))
- return 1;
+ return snd_soc_dapm_suspend_check(widget);
}
list_for_each_entry(path, &widget->sinks, list_source) {
@@ -552,7 +512,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget)
case snd_soc_dapm_dac:
case snd_soc_dapm_aif_in:
if (widget->active)
- return 1;
+ return snd_soc_dapm_suspend_check(widget);
default:
break;
}
@@ -560,16 +520,16 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget)
if (widget->connected) {
/* connected pin ? */
if (widget->id == snd_soc_dapm_input && !widget->ext)
- return 1;
+ return snd_soc_dapm_suspend_check(widget);
/* connected VMID/Bias for lower pops */
if (widget->id == snd_soc_dapm_vmid)
- return 1;
+ return snd_soc_dapm_suspend_check(widget);
/* connected jack ? */
if (widget->id == snd_soc_dapm_mic ||
(widget->id == snd_soc_dapm_line && !list_empty(&widget->sinks)))
- return 1;
+ return snd_soc_dapm_suspend_check(widget);
}
list_for_each_entry(path, &widget->sources, list_sink) {
@@ -634,16 +594,8 @@ static int dapm_generic_apply_power(struct snd_soc_dapm_widget *w)
return ret;
}
- /* Lower PGA volume to reduce pops */
- if (w->id == snd_soc_dapm_pga && !w->power)
- dapm_set_pga(w, w->power);
-
dapm_update_bits(w);
- /* Raise PGA volume to reduce pops */
- if (w->id == snd_soc_dapm_pga && w->power)
- dapm_set_pga(w, w->power);
-
/* power up post event */
if (w->power && w->event &&
(w->event_flags & SND_SOC_DAPM_POST_PMU)) {
@@ -810,10 +762,6 @@ static void dapm_seq_run_coalesced(struct snd_soc_codec *codec,
pr_err("%s: pre event failed: %d\n",
w->name, ret);
}
-
- /* Lower PGA volume to reduce pops */
- if (w->id == snd_soc_dapm_pga && !w->power)
- dapm_set_pga(w, w->power);
}
if (reg >= 0) {
@@ -825,10 +773,6 @@ static void dapm_seq_run_coalesced(struct snd_soc_codec *codec,
}
list_for_each_entry(w, pending, power_list) {
- /* Raise PGA volume to reduce pops */
- if (w->id == snd_soc_dapm_pga && w->power)
- dapm_set_pga(w, w->power);
-
/* power up post event */
if (w->power && w->event &&
(w->event_flags & SND_SOC_DAPM_POST_PMU)) {
@@ -973,19 +917,12 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
if (!w->power_check)
continue;
- /* If we're suspending then pull down all the
- * power. */
- switch (event) {
- case SND_SOC_DAPM_STREAM_SUSPEND:
- power = 0;
- break;
-
- default:
+ if (!w->force)
power = w->power_check(w);
- if (power)
- sys_power = 1;
- break;
- }
+ else
+ power = 1;
+ if (power)
+ sys_power = 1;
if (w->power == power)
continue;
@@ -1076,6 +1013,7 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
pop_dbg(codec->pop_time, "DAPM sequencing finished, waiting %dms\n",
codec->pop_time);
+ pop_wait(codec->pop_time);
return 0;
}
@@ -1338,6 +1276,9 @@ static int snd_soc_dapm_set_pin(struct snd_soc_codec *codec,
if (!strcmp(w->name, pin)) {
pr_debug("dapm: %s: pin %s\n", codec->name, pin);
w->connected = status;
+ /* Allow disabling of forced pins */
+ if (status == 0)
+ w->force = 0;
return 0;
}
}
@@ -1594,12 +1535,6 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
unsigned int invert = mc->invert;
unsigned int mask = (1 << fls(max)) - 1;
- /* return the saved value if we are powered down */
- if (widget->id == snd_soc_dapm_pga && !widget->power) {
- ucontrol->value.integer.value[0] = widget->saved_value;
- return 0;
- }
-
ucontrol->value.integer.value[0] =
(snd_soc_read(widget->codec, reg) >> shift) & mask;
if (shift != rshift)
@@ -1659,13 +1594,6 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
mutex_lock(&widget->codec->mutex);
widget->value = val;
- /* save volume value if the widget is powered down */
- if (widget->id == snd_soc_dapm_pga && !widget->power) {
- widget->saved_value = val;
- mutex_unlock(&widget->codec->mutex);
- return 1;
- }
-
if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
if (val)
/* new connection */
@@ -2094,18 +2022,8 @@ int snd_soc_dapm_stream_event(struct snd_soc_codec *codec,
w->active = 0;
break;
case SND_SOC_DAPM_STREAM_SUSPEND:
- if (w->active)
- w->suspend = 1;
- w->active = 0;
- break;
case SND_SOC_DAPM_STREAM_RESUME:
- if (w->suspend) {
- w->active = 1;
- w->suspend = 0;
- }
- break;
case SND_SOC_DAPM_STREAM_PAUSE_PUSH:
- break;
case SND_SOC_DAPM_STREAM_PAUSE_RELEASE:
break;
}
@@ -2135,6 +2053,36 @@ int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin)
EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin);
/**
+ * snd_soc_dapm_force_enable_pin - force a pin to be enabled
+ * @codec: SoC codec
+ * @pin: pin name
+ *
+ * Enables input/output pin regardless of any other state. This is
+ * intended for use with microphone bias supplies used in microphone
+ * jack detection.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec, const char *pin)
+{
+ struct snd_soc_dapm_widget *w;
+
+ list_for_each_entry(w, &codec->dapm_widgets, list) {
+ if (!strcmp(w->name, pin)) {
+ pr_debug("dapm: %s: pin %s\n", codec->name, pin);
+ w->connected = 1;
+ w->force = 1;
+ return 0;
+ }
+ }
+
+ pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin);
+
+/**
* snd_soc_dapm_disable_pin - disable pin.
* @codec: SoC codec
* @pin: pin name
@@ -2192,6 +2140,33 @@ int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin)
EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_status);
/**
+ * snd_soc_dapm_ignore_suspend - ignore suspend status for DAPM endpoint
+ * @codec: audio codec
+ * @pin: audio signal pin endpoint (or start point)
+ *
+ * Mark the given endpoint or pin as ignoring suspend. When the
+ * system is disabled a path between two endpoints flagged as ignoring
+ * suspend will not be disabled. The path must already be enabled via
+ * normal means at suspend time, it will not be turned on if it was not
+ * already enabled.
+ */
+int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin)
+{
+ struct snd_soc_dapm_widget *w;
+
+ list_for_each_entry(w, &codec->dapm_widgets, list) {
+ if (!strcmp(w->name, pin)) {
+ w->ignore_suspend = 1;
+ return 0;
+ }
+ }
+
+ pr_err("Unknown DAPM pin: %s\n", pin);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
+
+/**
* snd_soc_dapm_free - free dapm resources
* @socdev: SoC device
*
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 3c07a94c2e30..29159e1781d0 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -37,6 +37,7 @@ int snd_soc_jack_new(struct snd_soc_card *card, const char *id, int type,
{
jack->card = card;
INIT_LIST_HEAD(&jack->pins);
+ BLOCKING_INIT_NOTIFIER_HEAD(&jack->notifier);
return snd_jack_new(card->codec->card, id, type, &jack->jack);
}
@@ -63,10 +64,9 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
int enable;
int oldstatus;
- if (!jack) {
- WARN_ON_ONCE(!jack);
+ if (!jack)
return;
- }
+
codec = jack->card->codec;
mutex_lock(&codec->mutex);
@@ -93,6 +93,9 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
snd_soc_dapm_disable_pin(codec, pin->pin);
}
+ /* Report before the DAPM sync to help users updating micbias status */
+ blocking_notifier_call_chain(&jack->notifier, status, NULL);
+
snd_soc_dapm_sync(codec);
snd_jack_report(jack->jack, status);
@@ -143,6 +146,40 @@ int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
}
EXPORT_SYMBOL_GPL(snd_soc_jack_add_pins);
+/**
+ * snd_soc_jack_notifier_register - Register a notifier for jack status
+ *
+ * @jack: ASoC jack
+ * @nb: Notifier block to register
+ *
+ * Register for notification of the current status of the jack. Note
+ * that it is not possible to report additional jack events in the
+ * callback from the notifier, this is intended to support
+ * applications such as enabling electrical detection only when a
+ * mechanical detection event has occurred.
+ */
+void snd_soc_jack_notifier_register(struct snd_soc_jack *jack,
+ struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&jack->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(snd_soc_jack_notifier_register);
+
+/**
+ * snd_soc_jack_notifier_unregister - Unregister a notifier for jack status
+ *
+ * @jack: ASoC jack
+ * @nb: Notifier block to unregister
+ *
+ * Stop notifying for status changes.
+ */
+void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack,
+ struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&jack->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(snd_soc_jack_notifier_unregister);
+
#ifdef CONFIG_GPIOLIB
/* gpio detect */
static void snd_soc_jack_gpio_detect(struct snd_soc_jack_gpio *gpio)
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 49cc7ea9a518..7082cb8045f2 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -160,7 +160,7 @@ static void txx9aclc_dma_tasklet(unsigned long data)
void __iomem *base = drvdata->base;
spin_unlock_irqrestore(&dmadata->dma_lock, flags);
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL);
/* first time */
for (i = 0; i < NR_DMA_CHAIN; i++) {
desc = txx9aclc_dma_submit(dmadata,
@@ -268,7 +268,7 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
struct dma_chan *chan = dmadata->dma_chan;
dmadata->frag_count = -1;
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL);
return 0;
}
@@ -397,7 +397,7 @@ static int txx9aclc_pcm_remove(struct platform_device *pdev)
struct dma_chan *chan = dmadata->dma_chan;
if (chan) {
dmadata->frag_count = -1;
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL);
dma_release_channel(chan);
}
dev->dmadata[i].dma_chan = NULL;
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index c570ae3e6d55..44d6d2ec964f 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -22,8 +22,7 @@ config SND_USB_AUDIO
will be called snd-usb-audio.
config SND_USB_UA101
- tristate "Edirol UA-101/UA-1000 driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Edirol UA-101/UA-1000 driver"
select SND_PCM
select SND_RAWMIDI
help
@@ -65,6 +64,7 @@ config SND_USB_CAIAQ
* Native Instruments Audio 8 DJ
* Native Instruments Guitar Rig Session I/O
* Native Instruments Guitar Rig mobile
+ * Native Instruments Traktor Kontrol X1
To compile this driver as a module, choose M here: the module
will be called snd-usb-caiaq.
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 5bf64aef9558..e7ac7f493a8f 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -2,14 +2,24 @@
# Makefile for ALSA
#
-snd-usb-audio-objs := usbaudio.o usbmixer.o
-snd-usb-lib-objs := usbmidi.o
-snd-ua101-objs := ua101.o
+snd-usb-audio-objs := card.o \
+ mixer.o \
+ mixer_quirks.o \
+ proc.o \
+ quirks.o \
+ format.o \
+ endpoint.o \
+ urb.o \
+ pcm.o \
+ helper.o
+
+snd-usbmidi-lib-objs := midi.o
# Toplevel Module Dependency
-obj-$(CONFIG_SND_USB_AUDIO) += snd-usb-audio.o snd-usb-lib.o
-obj-$(CONFIG_SND_USB_UA101) += snd-ua101.o snd-usb-lib.o
-obj-$(CONFIG_SND_USB_USX2Y) += snd-usb-lib.o
-obj-$(CONFIG_SND_USB_US122L) += snd-usb-lib.o
+obj-$(CONFIG_SND_USB_AUDIO) += snd-usb-audio.o snd-usbmidi-lib.o
+
+obj-$(CONFIG_SND_USB_UA101) += snd-usbmidi-lib.o
+obj-$(CONFIG_SND_USB_USX2Y) += snd-usbmidi-lib.o
+obj-$(CONFIG_SND_USB_US122L) += snd-usbmidi-lib.o
-obj-$(CONFIG_SND) += usx2y/ caiaq/
+obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/
diff --git a/sound/usb/caiaq/control.c b/sound/usb/caiaq/control.c
index 537102ba6b9d..36ed703a7416 100644
--- a/sound/usb/caiaq/control.c
+++ b/sound/usb/caiaq/control.c
@@ -35,33 +35,41 @@ static int control_info(struct snd_kcontrol *kcontrol,
struct snd_usb_caiaqdev *dev = caiaqdev(chip->card);
int pos = kcontrol->private_value;
int is_intval = pos & CNT_INTVAL;
- unsigned int id = dev->chip.usb_id;
+ int maxval = 63;
uinfo->count = 1;
pos &= ~CNT_INTVAL;
- if (id == USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ)
- && (pos == 0)) {
- /* current input mode of A8DJ */
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = 2;
- return 0;
- }
+ switch (dev->chip.usb_id) {
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ):
+ if (pos == 0) {
+ /* current input mode of A8DJ */
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 2;
+ return 0;
+ }
+ break;
- if (id == USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ)
- && (pos == 0)) {
- /* current input mode of A4DJ */
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = 1;
- return 0;
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ):
+ if (pos == 0) {
+ /* current input mode of A4DJ */
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ return 0;
+ }
+ break;
+
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ maxval = 127;
+ break;
}
if (is_intval) {
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = 64;
+ uinfo->value.integer.max = maxval;
} else {
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->value.integer.min = 0;
@@ -102,9 +110,10 @@ static int control_put(struct snd_kcontrol *kcontrol,
struct snd_usb_audio *chip = snd_kcontrol_chip(kcontrol);
struct snd_usb_caiaqdev *dev = caiaqdev(chip->card);
int pos = kcontrol->private_value;
+ unsigned char cmd = EP1_CMD_WRITE_IO;
- if (dev->chip.usb_id ==
- USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ)) {
+ switch (dev->chip.usb_id) {
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ): {
/* A4DJ has only one control */
/* do not expose hardware input mode 0 */
dev->control_state[0] = ucontrol->value.integer.value[0] + 1;
@@ -113,10 +122,15 @@ static int control_put(struct snd_kcontrol *kcontrol,
return 1;
}
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ cmd = EP1_CMD_DIMM_LEDS;
+ break;
+ }
+
if (pos & CNT_INTVAL) {
dev->control_state[pos & ~CNT_INTVAL]
= ucontrol->value.integer.value[0];
- snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO,
+ snd_usb_caiaq_send_command(dev, cmd,
dev->control_state, sizeof(dev->control_state));
} else {
if (ucontrol->value.integer.value[0])
@@ -124,7 +138,7 @@ static int control_put(struct snd_kcontrol *kcontrol,
else
dev->control_state[pos / 8] &= ~(1 << (pos % 8));
- snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO,
+ snd_usb_caiaq_send_command(dev, cmd,
dev->control_state, sizeof(dev->control_state));
}
@@ -273,6 +287,43 @@ static struct caiaq_controller a4dj_controller[] = {
{ "Current input mode", 0 | CNT_INTVAL }
};
+static struct caiaq_controller kontrolx1_controller[] = {
+ { "LED FX A: ON", 7 | CNT_INTVAL },
+ { "LED FX A: 1", 6 | CNT_INTVAL },
+ { "LED FX A: 2", 5 | CNT_INTVAL },
+ { "LED FX A: 3", 4 | CNT_INTVAL },
+ { "LED FX B: ON", 3 | CNT_INTVAL },
+ { "LED FX B: 1", 2 | CNT_INTVAL },
+ { "LED FX B: 2", 1 | CNT_INTVAL },
+ { "LED FX B: 3", 0 | CNT_INTVAL },
+
+ { "LED Hotcue", 28 | CNT_INTVAL },
+ { "LED Shift (white)", 29 | CNT_INTVAL },
+ { "LED Shift (green)", 30 | CNT_INTVAL },
+
+ { "LED Deck A: FX1", 24 | CNT_INTVAL },
+ { "LED Deck A: FX2", 25 | CNT_INTVAL },
+ { "LED Deck A: IN", 17 | CNT_INTVAL },
+ { "LED Deck A: OUT", 16 | CNT_INTVAL },
+ { "LED Deck A: < BEAT", 19 | CNT_INTVAL },
+ { "LED Deck A: BEAT >", 18 | CNT_INTVAL },
+ { "LED Deck A: CUE/ABS", 21 | CNT_INTVAL },
+ { "LED Deck A: CUP/REL", 20 | CNT_INTVAL },
+ { "LED Deck A: PLAY", 23 | CNT_INTVAL },
+ { "LED Deck A: SYNC", 22 | CNT_INTVAL },
+
+ { "LED Deck B: FX1", 26 | CNT_INTVAL },
+ { "LED Deck B: FX2", 27 | CNT_INTVAL },
+ { "LED Deck B: IN", 15 | CNT_INTVAL },
+ { "LED Deck B: OUT", 14 | CNT_INTVAL },
+ { "LED Deck B: < BEAT", 13 | CNT_INTVAL },
+ { "LED Deck B: BEAT >", 12 | CNT_INTVAL },
+ { "LED Deck B: CUE/ABS", 11 | CNT_INTVAL },
+ { "LED Deck B: CUP/REL", 10 | CNT_INTVAL },
+ { "LED Deck B: PLAY", 9 | CNT_INTVAL },
+ { "LED Deck B: SYNC", 8 | CNT_INTVAL },
+};
+
static int __devinit add_controls(struct caiaq_controller *c, int num,
struct snd_usb_caiaqdev *dev)
{
@@ -321,10 +372,16 @@ int __devinit snd_usb_caiaq_control_init(struct snd_usb_caiaqdev *dev)
ret = add_controls(a8dj_controller,
ARRAY_SIZE(a8dj_controller), dev);
break;
+
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ):
ret = add_controls(a4dj_controller,
ARRAY_SIZE(a4dj_controller), dev);
break;
+
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ ret = add_controls(kontrolx1_controller,
+ ARRAY_SIZE(kontrolx1_controller), dev);
+ break;
}
return ret;
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index afc5aeb68005..805271827675 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -47,7 +47,8 @@ MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
"{Native Instruments, Audio 4 DJ},"
"{Native Instruments, Audio 8 DJ},"
"{Native Instruments, Session I/O},"
- "{Native Instruments, GuitarRig mobile}");
+ "{Native Instruments, GuitarRig mobile}"
+ "{Native Instruments, Traktor Kontrol X1}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */
static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
@@ -128,6 +129,11 @@ static struct usb_device_id snd_usb_id_table[] = {
.idVendor = USB_VID_NATIVEINSTRUMENTS,
.idProduct = USB_PID_AUDIO2DJ
},
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = USB_VID_NATIVEINSTRUMENTS,
+ .idProduct = USB_PID_TRAKTORKONTROLX1
+ },
{ /* terminator */ }
};
diff --git a/sound/usb/caiaq/device.h b/sound/usb/caiaq/device.h
index 44e3edf88bef..f1117ecc84fd 100644
--- a/sound/usb/caiaq/device.h
+++ b/sound/usb/caiaq/device.h
@@ -5,18 +5,20 @@
#define USB_VID_NATIVEINSTRUMENTS 0x17cc
-#define USB_PID_RIGKONTROL2 0x1969
-#define USB_PID_RIGKONTROL3 0x1940
-#define USB_PID_KORECONTROLLER 0x4711
-#define USB_PID_KORECONTROLLER2 0x4712
-#define USB_PID_AK1 0x0815
-#define USB_PID_AUDIO2DJ 0x041c
-#define USB_PID_AUDIO4DJ 0x0839
-#define USB_PID_AUDIO8DJ 0x1978
-#define USB_PID_SESSIONIO 0x1915
-#define USB_PID_GUITARRIGMOBILE 0x0d8d
+#define USB_PID_RIGKONTROL2 0x1969
+#define USB_PID_RIGKONTROL3 0x1940
+#define USB_PID_KORECONTROLLER 0x4711
+#define USB_PID_KORECONTROLLER2 0x4712
+#define USB_PID_AK1 0x0815
+#define USB_PID_AUDIO2DJ 0x041c
+#define USB_PID_AUDIO4DJ 0x0839
+#define USB_PID_AUDIO8DJ 0x1978
+#define USB_PID_SESSIONIO 0x1915
+#define USB_PID_GUITARRIGMOBILE 0x0d8d
+#define USB_PID_TRAKTORKONTROLX1 0x2305
#define EP1_BUFSIZE 64
+#define EP4_BUFSIZE 512
#define CAIAQ_USB_STR_LEN 0xff
#define MAX_STREAMS 32
@@ -104,6 +106,8 @@ struct snd_usb_caiaqdev {
struct input_dev *input_dev;
char phys[64]; /* physical device path */
unsigned short keycode[64];
+ struct urb *ep4_in_urb;
+ unsigned char ep4_in_buf[EP4_BUFSIZE];
#endif
/* ALSA */
diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
index a48d309bd94c..8bbfbfd4c658 100644
--- a/sound/usb/caiaq/input.c
+++ b/sound/usb/caiaq/input.c
@@ -16,9 +16,11 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
+#include <sound/core.h>
#include <sound/pcm.h>
#include "device.h"
@@ -65,6 +67,8 @@ static unsigned short keycode_kore[] = {
KEY_BRL_DOT5
};
+#define KONTROLX1_INPUTS 40
+
#define DEG90 (range / 2)
#define DEG180 (range)
#define DEG270 (DEG90 + DEG180)
@@ -162,6 +166,17 @@ static void snd_caiaq_input_read_analog(struct snd_usb_caiaqdev *dev,
input_report_abs(input_dev, ABS_Z, (buf[4] << 8) | buf[5]);
input_sync(input_dev);
break;
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ input_report_abs(input_dev, ABS_HAT0X, (buf[8] << 8) | buf[9]);
+ input_report_abs(input_dev, ABS_HAT0Y, (buf[4] << 8) | buf[5]);
+ input_report_abs(input_dev, ABS_HAT1X, (buf[12] << 8) | buf[13]);
+ input_report_abs(input_dev, ABS_HAT1Y, (buf[2] << 8) | buf[3]);
+ input_report_abs(input_dev, ABS_HAT2X, (buf[15] << 8) | buf[15]);
+ input_report_abs(input_dev, ABS_HAT2Y, (buf[0] << 8) | buf[1]);
+ input_report_abs(input_dev, ABS_HAT3X, (buf[10] << 8) | buf[11]);
+ input_report_abs(input_dev, ABS_HAT3Y, (buf[6] << 8) | buf[7]);
+ input_sync(input_dev);
+ break;
}
}
@@ -201,7 +216,7 @@ static void snd_caiaq_input_read_erp(struct snd_usb_caiaqdev *dev,
}
static void snd_caiaq_input_read_io(struct snd_usb_caiaqdev *dev,
- char *buf, unsigned int len)
+ unsigned char *buf, unsigned int len)
{
struct input_dev *input_dev = dev->input_dev;
unsigned short *keycode = input_dev->keycode;
@@ -218,15 +233,84 @@ static void snd_caiaq_input_read_io(struct snd_usb_caiaqdev *dev,
input_report_key(input_dev, keycode[i],
buf[i / 8] & (1 << (i % 8)));
- if (dev->chip.usb_id ==
- USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER) ||
- dev->chip.usb_id ==
- USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2))
+ switch (dev->chip.usb_id) {
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER):
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2):
input_report_abs(dev->input_dev, ABS_MISC, 255 - buf[4]);
+ break;
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ /* rotary encoders */
+ input_report_abs(dev->input_dev, ABS_X, buf[5] & 0xf);
+ input_report_abs(dev->input_dev, ABS_Y, buf[5] >> 4);
+ input_report_abs(dev->input_dev, ABS_Z, buf[6] & 0xf);
+ input_report_abs(dev->input_dev, ABS_MISC, buf[6] >> 4);
+ break;
+ }
input_sync(input_dev);
}
+static void snd_usb_caiaq_ep4_reply_dispatch(struct urb *urb)
+{
+ struct snd_usb_caiaqdev *dev = urb->context;
+ unsigned char *buf = urb->transfer_buffer;
+ int ret;
+
+ if (urb->status || !dev || urb != dev->ep4_in_urb)
+ return;
+
+ if (urb->actual_length < 24)
+ goto requeue;
+
+ switch (dev->chip.usb_id) {
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ if (buf[0] & 0x3)
+ snd_caiaq_input_read_io(dev, buf + 1, 7);
+
+ if (buf[0] & 0x4)
+ snd_caiaq_input_read_analog(dev, buf + 8, 16);
+
+ break;
+ }
+
+requeue:
+ dev->ep4_in_urb->actual_length = 0;
+ ret = usb_submit_urb(dev->ep4_in_urb, GFP_ATOMIC);
+ if (ret < 0)
+ log("unable to submit urb. OOM!?\n");
+}
+
+static int snd_usb_caiaq_input_open(struct input_dev *idev)
+{
+ struct snd_usb_caiaqdev *dev = input_get_drvdata(idev);
+
+ if (!dev)
+ return -EINVAL;
+
+ switch (dev->chip.usb_id) {
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ if (usb_submit_urb(dev->ep4_in_urb, GFP_KERNEL) != 0)
+ return -EIO;
+ break;
+ }
+
+ return 0;
+}
+
+static void snd_usb_caiaq_input_close(struct input_dev *idev)
+{
+ struct snd_usb_caiaqdev *dev = input_get_drvdata(idev);
+
+ if (!dev)
+ return;
+
+ switch (dev->chip.usb_id) {
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ usb_kill_urb(dev->ep4_in_urb);
+ break;
+ }
+}
+
void snd_usb_caiaq_input_dispatch(struct snd_usb_caiaqdev *dev,
char *buf,
unsigned int len)
@@ -251,7 +335,7 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *dev)
{
struct usb_device *usb_dev = dev->chip.dev;
struct input_dev *input;
- int i, ret;
+ int i, ret = 0;
input = input_allocate_device();
if (!input)
@@ -265,7 +349,9 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *dev)
usb_to_input_id(usb_dev, &input->id);
input->dev.parent = &usb_dev->dev;
- switch (dev->chip.usb_id) {
+ input_set_drvdata(input, dev);
+
+ switch (dev->chip.usb_id) {
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2):
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input->absbit[0] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
@@ -326,25 +412,72 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *dev)
input_set_abs_params(input, ABS_MISC, 0, 255, 0, 1);
snd_usb_caiaq_set_auto_msg(dev, 1, 10, 5);
break;
+ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
+ input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input->absbit[0] = BIT_MASK(ABS_HAT0X) | BIT_MASK(ABS_HAT0Y) |
+ BIT_MASK(ABS_HAT1X) | BIT_MASK(ABS_HAT1Y) |
+ BIT_MASK(ABS_HAT2X) | BIT_MASK(ABS_HAT2Y) |
+ BIT_MASK(ABS_HAT3X) | BIT_MASK(ABS_HAT3Y) |
+ BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
+ BIT_MASK(ABS_Z);
+ input->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
+ BUILD_BUG_ON(sizeof(dev->keycode) < KONTROLX1_INPUTS);
+ for (i = 0; i < KONTROLX1_INPUTS; i++)
+ dev->keycode[i] = BTN_MISC + i;
+ input->keycodemax = KONTROLX1_INPUTS;
+
+ /* analog potentiometers */
+ input_set_abs_params(input, ABS_HAT0X, 0, 4096, 0, 10);
+ input_set_abs_params(input, ABS_HAT0Y, 0, 4096, 0, 10);
+ input_set_abs_params(input, ABS_HAT1X, 0, 4096, 0, 10);
+ input_set_abs_params(input, ABS_HAT1Y, 0, 4096, 0, 10);
+ input_set_abs_params(input, ABS_HAT2X, 0, 4096, 0, 10);
+ input_set_abs_params(input, ABS_HAT2Y, 0, 4096, 0, 10);
+ input_set_abs_params(input, ABS_HAT3X, 0, 4096, 0, 10);
+ input_set_abs_params(input, ABS_HAT3Y, 0, 4096, 0, 10);
+
+ /* rotary encoders */
+ input_set_abs_params(input, ABS_X, 0, 0xf, 0, 1);
+ input_set_abs_params(input, ABS_Y, 0, 0xf, 0, 1);
+ input_set_abs_params(input, ABS_Z, 0, 0xf, 0, 1);
+ input_set_abs_params(input, ABS_MISC, 0, 0xf, 0, 1);
+
+ dev->ep4_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->ep4_in_urb) {
+ ret = -ENOMEM;
+ goto exit_free_idev;
+ }
+
+ usb_fill_bulk_urb(dev->ep4_in_urb, usb_dev,
+ usb_rcvbulkpipe(usb_dev, 0x4),
+ dev->ep4_in_buf, EP4_BUFSIZE,
+ snd_usb_caiaq_ep4_reply_dispatch, dev);
+
+ snd_usb_caiaq_set_auto_msg(dev, 1, 10, 5);
+
+ break;
default:
/* no input methods supported on this device */
- input_free_device(input);
- return 0;
+ goto exit_free_idev;
}
+ input->open = snd_usb_caiaq_input_open;
+ input->close = snd_usb_caiaq_input_close;
input->keycode = dev->keycode;
input->keycodesize = sizeof(unsigned short);
for (i = 0; i < input->keycodemax; i++)
__set_bit(dev->keycode[i], input->keybit);
ret = input_register_device(input);
- if (ret < 0) {
- input_free_device(input);
- return ret;
- }
+ if (ret < 0)
+ goto exit_free_idev;
dev->input_dev = input;
return 0;
+
+exit_free_idev:
+ input_free_device(input);
+ return ret;
}
void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *dev)
@@ -352,6 +485,10 @@ void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *dev)
if (!dev || !dev->input_dev)
return;
+ usb_kill_urb(dev->ep4_in_urb);
+ usb_free_urb(dev->ep4_in_urb);
+ dev->ep4_in_urb = NULL;
+
input_unregister_device(dev->input_dev);
dev->input_dev = NULL;
}
diff --git a/sound/usb/card.c b/sound/usb/card.c
new file mode 100644
index 000000000000..da1346bd4856
--- /dev/null
+++ b/sound/usb/card.c
@@ -0,0 +1,652 @@
+/*
+ * (Tentative) USB Audio Driver for ALSA
+ *
+ * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de>
+ *
+ * Many codes borrowed from audio.c by
+ * Alan Cox (alan@lxorguk.ukuu.org.uk)
+ * Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * NOTES:
+ *
+ * - async unlink should be used for avoiding the sleep inside lock.
+ * 2.4.22 usb-uhci seems buggy for async unlinking and results in
+ * oops. in such a cse, pass async_unlink=0 option.
+ * - the linked URBs would be preferred but not used so far because of
+ * the instability of unlinking.
+ * - type II is not supported properly. there is no device which supports
+ * this type *correctly*. SB extigy looks as if it supports, but it's
+ * indeed an AC3 stream packed in SPDIF frames (i.e. no real AC3 stream).
+ */
+
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/usb.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+
+#include <sound/core.h>
+#include <sound/info.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "midi.h"
+#include "mixer.h"
+#include "proc.h"
+#include "quirks.h"
+#include "endpoint.h"
+#include "helper.h"
+#include "debug.h"
+#include "pcm.h"
+#include "urb.h"
+#include "format.h"
+
+MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
+MODULE_DESCRIPTION("USB Audio");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("{{Generic,USB Audio}}");
+
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card */
+/* Vendor/product IDs for this card */
+static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
+static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
+static int nrpacks = 8; /* max. number of packets per urb */
+static int async_unlink = 1;
+static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
+static int ignore_ctl_error;
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for the USB audio adapter.");
+module_param_array(id, charp, NULL, 0444);
+MODULE_PARM_DESC(id, "ID string for the USB audio adapter.");
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable USB audio adapter.");
+module_param_array(vid, int, NULL, 0444);
+MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device.");
+module_param_array(pid, int, NULL, 0444);
+MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
+module_param(nrpacks, int, 0644);
+MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB.");
+module_param(async_unlink, bool, 0444);
+MODULE_PARM_DESC(async_unlink, "Use async unlink mode.");
+module_param_array(device_setup, int, NULL, 0444);
+MODULE_PARM_DESC(device_setup, "Specific device setup (if needed).");
+module_param(ignore_ctl_error, bool, 0444);
+MODULE_PARM_DESC(ignore_ctl_error,
+ "Ignore errors from USB controller for mixer interfaces.");
+
+/*
+ * we keep the snd_usb_audio_t instances by ourselves for merging
+ * the all interfaces on the same card as one sound device.
+ */
+
+static DEFINE_MUTEX(register_mutex);
+static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
+static struct usb_driver usb_audio_driver;
+
+/*
+ * disconnect streams
+ * called from snd_usb_audio_disconnect()
+ */
+static void snd_usb_stream_disconnect(struct list_head *head)
+{
+ int idx;
+ struct snd_usb_stream *as;
+ struct snd_usb_substream *subs;
+
+ as = list_entry(head, struct snd_usb_stream, list);
+ for (idx = 0; idx < 2; idx++) {
+ subs = &as->substream[idx];
+ if (!subs->num_formats)
+ return;
+ snd_usb_release_substream_urbs(subs, 1);
+ subs->interface = -1;
+ }
+}
+
+static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int interface)
+{
+ struct usb_device *dev = chip->dev;
+ struct usb_host_interface *alts;
+ struct usb_interface_descriptor *altsd;
+ struct usb_interface *iface = usb_ifnum_to_if(dev, interface);
+
+ if (!iface) {
+ snd_printk(KERN_ERR "%d:%u:%d : does not exist\n",
+ dev->devnum, ctrlif, interface);
+ return -EINVAL;
+ }
+
+ if (usb_interface_claimed(iface)) {
+ snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
+ dev->devnum, ctrlif, interface);
+ return -EINVAL;
+ }
+
+ alts = &iface->altsetting[0];
+ altsd = get_iface_desc(alts);
+ if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
+ altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
+ altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
+ int err = snd_usbmidi_create(chip->card, iface,
+ &chip->midi_list, NULL);
+ if (err < 0) {
+ snd_printk(KERN_ERR "%d:%u:%d: cannot create sequencer device\n",
+ dev->devnum, ctrlif, interface);
+ return -EINVAL;
+ }
+ usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
+
+ return 0;
+ }
+
+ if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
+ altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
+ altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIOSTREAMING) {
+ snd_printdd(KERN_ERR "%d:%u:%d: skipping non-supported interface %d\n",
+ dev->devnum, ctrlif, interface, altsd->bInterfaceClass);
+ /* skip non-supported classes */
+ return -EINVAL;
+ }
+
+ if (snd_usb_get_speed(dev) == USB_SPEED_LOW) {
+ snd_printk(KERN_ERR "low speed audio streaming not supported\n");
+ return -EINVAL;
+ }
+
+ if (! snd_usb_parse_audio_endpoints(chip, interface)) {
+ usb_set_interface(dev, interface, 0); /* reset the current interface */
+ usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * parse audio control descriptor and create pcm/midi streams
+ */
+static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+{
+ struct usb_device *dev = chip->dev;
+ struct usb_host_interface *host_iface;
+ struct usb_interface_descriptor *altsd;
+ void *control_header;
+ int i, protocol;
+
+ /* find audiocontrol interface */
+ host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
+ control_header = snd_usb_find_csint_desc(host_iface->extra,
+ host_iface->extralen,
+ NULL, UAC_HEADER);
+ altsd = get_iface_desc(host_iface);
+ protocol = altsd->bInterfaceProtocol;
+
+ if (!control_header) {
+ snd_printk(KERN_ERR "cannot find UAC_HEADER\n");
+ return -EINVAL;
+ }
+
+ switch (protocol) {
+ case UAC_VERSION_1: {
+ struct uac_ac_header_descriptor_v1 *h1 = control_header;
+
+ if (!h1->bInCollection) {
+ snd_printk(KERN_INFO "skipping empty audio interface (v1)\n");
+ return -EINVAL;
+ }
+
+ if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
+ snd_printk(KERN_ERR "invalid UAC_HEADER (v1)\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < h1->bInCollection; i++)
+ snd_usb_create_stream(chip, ctrlif, h1->baInterfaceNr[i]);
+
+ break;
+ }
+
+ case UAC_VERSION_2: {
+ struct uac_clock_source_descriptor *cs;
+ struct usb_interface_assoc_descriptor *assoc =
+ usb_ifnum_to_if(dev, ctrlif)->intf_assoc;
+
+ if (!assoc) {
+ snd_printk(KERN_ERR "Audio class v2 interfaces need an interface association\n");
+ return -EINVAL;
+ }
+
+ /* FIXME: for now, we expect there is at least one clock source
+ * descriptor and we always take the first one.
+ * We should properly support devices with multiple clock sources,
+ * clock selectors and sample rate conversion units. */
+
+ cs = snd_usb_find_csint_desc(host_iface->extra, host_iface->extralen,
+ NULL, UAC2_CLOCK_SOURCE);
+
+ if (!cs) {
+ snd_printk(KERN_ERR "CLOCK_SOURCE descriptor not found\n");
+ return -EINVAL;
+ }
+
+ chip->clock_id = cs->bClockID;
+
+ for (i = 0; i < assoc->bInterfaceCount; i++) {
+ int intf = assoc->bFirstInterface + i;
+
+ if (intf != ctrlif)
+ snd_usb_create_stream(chip, ctrlif, intf);
+ }
+
+ break;
+ }
+
+ default:
+ snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * free the chip instance
+ *
+ * here we have to do not much, since pcm and controls are already freed
+ *
+ */
+
+static int snd_usb_audio_free(struct snd_usb_audio *chip)
+{
+ kfree(chip);
+ return 0;
+}
+
+static int snd_usb_audio_dev_free(struct snd_device *device)
+{
+ struct snd_usb_audio *chip = device->device_data;
+ return snd_usb_audio_free(chip);
+}
+
+
+/*
+ * create a chip instance and set its names.
+ */
+static int snd_usb_audio_create(struct usb_device *dev, int idx,
+ const struct snd_usb_audio_quirk *quirk,
+ struct snd_usb_audio **rchip)
+{
+ struct snd_card *card;
+ struct snd_usb_audio *chip;
+ int err, len;
+ char component[14];
+ static struct snd_device_ops ops = {
+ .dev_free = snd_usb_audio_dev_free,
+ };
+
+ *rchip = NULL;
+
+ if (snd_usb_get_speed(dev) != USB_SPEED_LOW &&
+ snd_usb_get_speed(dev) != USB_SPEED_FULL &&
+ snd_usb_get_speed(dev) != USB_SPEED_HIGH) {
+ snd_printk(KERN_ERR "unknown device speed %d\n", snd_usb_get_speed(dev));
+ return -ENXIO;
+ }
+
+ err = snd_card_create(index[idx], id[idx], THIS_MODULE, 0, &card);
+ if (err < 0) {
+ snd_printk(KERN_ERR "cannot create card instance %d\n", idx);
+ return err;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (! chip) {
+ snd_card_free(card);
+ return -ENOMEM;
+ }
+
+ chip->index = idx;
+ chip->dev = dev;
+ chip->card = card;
+ chip->setup = device_setup[idx];
+ chip->nrpacks = nrpacks;
+ chip->async_unlink = async_unlink;
+
+ chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+ INIT_LIST_HEAD(&chip->pcm_list);
+ INIT_LIST_HEAD(&chip->midi_list);
+ INIT_LIST_HEAD(&chip->mixer_list);
+
+ if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ snd_usb_audio_free(chip);
+ snd_card_free(card);
+ return err;
+ }
+
+ strcpy(card->driver, "USB-Audio");
+ sprintf(component, "USB%04x:%04x",
+ USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id));
+ snd_component_add(card, component);
+
+ /* retrieve the device string as shortname */
+ if (quirk && quirk->product_name) {
+ strlcpy(card->shortname, quirk->product_name, sizeof(card->shortname));
+ } else {
+ if (!dev->descriptor.iProduct ||
+ usb_string(dev, dev->descriptor.iProduct,
+ card->shortname, sizeof(card->shortname)) <= 0) {
+ /* no name available from anywhere, so use ID */
+ sprintf(card->shortname, "USB Device %#04x:%#04x",
+ USB_ID_VENDOR(chip->usb_id),
+ USB_ID_PRODUCT(chip->usb_id));
+ }
+ }
+
+ /* retrieve the vendor and device strings as longname */
+ if (quirk && quirk->vendor_name) {
+ len = strlcpy(card->longname, quirk->vendor_name, sizeof(card->longname));
+ } else {
+ if (dev->descriptor.iManufacturer)
+ len = usb_string(dev, dev->descriptor.iManufacturer,
+ card->longname, sizeof(card->longname));
+ else
+ len = 0;
+ /* we don't really care if there isn't any vendor string */
+ }
+ if (len > 0)
+ strlcat(card->longname, " ", sizeof(card->longname));
+
+ strlcat(card->longname, card->shortname, sizeof(card->longname));
+
+ len = strlcat(card->longname, " at ", sizeof(card->longname));
+
+ if (len < sizeof(card->longname))
+ usb_make_path(dev, card->longname + len, sizeof(card->longname) - len);
+
+ strlcat(card->longname,
+ snd_usb_get_speed(dev) == USB_SPEED_LOW ? ", low speed" :
+ snd_usb_get_speed(dev) == USB_SPEED_FULL ? ", full speed" :
+ ", high speed",
+ sizeof(card->longname));
+
+ snd_usb_audio_create_proc(chip);
+
+ *rchip = chip;
+ return 0;
+}
+
+/*
+ * probe the active usb device
+ *
+ * note that this can be called multiple times per a device, when it
+ * includes multiple audio control interfaces.
+ *
+ * thus we check the usb device pointer and creates the card instance
+ * only at the first time. the successive calls of this function will
+ * append the pcm interface to the corresponding card.
+ */
+static void *snd_usb_audio_probe(struct usb_device *dev,
+ struct usb_interface *intf,
+ const struct usb_device_id *usb_id)
+{
+ const struct snd_usb_audio_quirk *quirk = (const struct snd_usb_audio_quirk *)usb_id->driver_info;
+ int i, err;
+ struct snd_usb_audio *chip;
+ struct usb_host_interface *alts;
+ int ifnum;
+ u32 id;
+
+ alts = &intf->altsetting[0];
+ ifnum = get_iface_desc(alts)->bInterfaceNumber;
+ id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+ if (quirk && quirk->ifnum >= 0 && ifnum != quirk->ifnum)
+ goto __err_val;
+
+ if (snd_usb_apply_boot_quirk(dev, intf, quirk) < 0)
+ goto __err_val;
+
+ /*
+ * found a config. now register to ALSA
+ */
+
+ /* check whether it's already registered */
+ chip = NULL;
+ mutex_lock(&register_mutex);
+ for (i = 0; i < SNDRV_CARDS; i++) {
+ if (usb_chip[i] && usb_chip[i]->dev == dev) {
+ if (usb_chip[i]->shutdown) {
+ snd_printk(KERN_ERR "USB device is in the shutdown state, cannot create a card instance\n");
+ goto __error;
+ }
+ chip = usb_chip[i];
+ break;
+ }
+ }
+ if (! chip) {
+ /* it's a fresh one.
+ * now look for an empty slot and create a new card instance
+ */
+ for (i = 0; i < SNDRV_CARDS; i++)
+ if (enable[i] && ! usb_chip[i] &&
+ (vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) &&
+ (pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) {
+ if (snd_usb_audio_create(dev, i, quirk, &chip) < 0) {
+ goto __error;
+ }
+ snd_card_set_dev(chip->card, &intf->dev);
+ break;
+ }
+ if (!chip) {
+ printk(KERN_ERR "no available usb audio device\n");
+ goto __error;
+ }
+ }
+
+ chip->txfr_quirk = 0;
+ err = 1; /* continue */
+ if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
+ /* need some special handlings */
+ if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
+ goto __error;
+ }
+
+ if (err > 0) {
+ /* create normal USB audio interfaces */
+ if (snd_usb_create_streams(chip, ifnum) < 0 ||
+ snd_usb_create_mixer(chip, ifnum, ignore_ctl_error) < 0) {
+ goto __error;
+ }
+ }
+
+ /* we are allowed to call snd_card_register() many times */
+ if (snd_card_register(chip->card) < 0) {
+ goto __error;
+ }
+
+ usb_chip[chip->index] = chip;
+ chip->num_interfaces++;
+ mutex_unlock(&register_mutex);
+ return chip;
+
+ __error:
+ if (chip && !chip->num_interfaces)
+ snd_card_free(chip->card);
+ mutex_unlock(&register_mutex);
+ __err_val:
+ return NULL;
+}
+
+/*
+ * we need to take care of counter, since disconnection can be called also
+ * many times as well as usb_audio_probe().
+ */
+static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
+{
+ struct snd_usb_audio *chip;
+ struct snd_card *card;
+ struct list_head *p;
+
+ if (ptr == (void *)-1L)
+ return;
+
+ chip = ptr;
+ card = chip->card;
+ mutex_lock(&register_mutex);
+ chip->shutdown = 1;
+ chip->num_interfaces--;
+ if (chip->num_interfaces <= 0) {
+ snd_card_disconnect(card);
+ /* release the pcm resources */
+ list_for_each(p, &chip->pcm_list) {
+ snd_usb_stream_disconnect(p);
+ }
+ /* release the midi resources */
+ list_for_each(p, &chip->midi_list) {
+ snd_usbmidi_disconnect(p);
+ }
+ /* release mixer resources */
+ list_for_each(p, &chip->mixer_list) {
+ snd_usb_mixer_disconnect(p);
+ }
+ usb_chip[chip->index] = NULL;
+ mutex_unlock(&register_mutex);
+ snd_card_free_when_closed(card);
+ } else {
+ mutex_unlock(&register_mutex);
+ }
+}
+
+/*
+ * new 2.5 USB kernel API
+ */
+static int usb_audio_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ void *chip;
+ chip = snd_usb_audio_probe(interface_to_usbdev(intf), intf, id);
+ if (chip) {
+ usb_set_intfdata(intf, chip);
+ return 0;
+ } else
+ return -EIO;
+}
+
+static void usb_audio_disconnect(struct usb_interface *intf)
+{
+ snd_usb_audio_disconnect(interface_to_usbdev(intf),
+ usb_get_intfdata(intf));
+}
+
+#ifdef CONFIG_PM
+static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct snd_usb_audio *chip = usb_get_intfdata(intf);
+ struct list_head *p;
+ struct snd_usb_stream *as;
+
+ if (chip == (void *)-1L)
+ return 0;
+
+ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
+ if (!chip->num_suspended_intf++) {
+ list_for_each(p, &chip->pcm_list) {
+ as = list_entry(p, struct snd_usb_stream, list);
+ snd_pcm_suspend_all(as->pcm);
+ }
+ }
+
+ return 0;
+}
+
+static int usb_audio_resume(struct usb_interface *intf)
+{
+ struct snd_usb_audio *chip = usb_get_intfdata(intf);
+
+ if (chip == (void *)-1L)
+ return 0;
+ if (--chip->num_suspended_intf)
+ return 0;
+ /*
+ * ALSA leaves material resumption to user space
+ * we just notify
+ */
+
+ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
+
+ return 0;
+}
+#else
+#define usb_audio_suspend NULL
+#define usb_audio_resume NULL
+#endif /* CONFIG_PM */
+
+static struct usb_device_id usb_audio_ids [] = {
+#include "quirks-table.h"
+ { .match_flags = (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS),
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE (usb, usb_audio_ids);
+
+/*
+ * entry point for linux usb interface
+ */
+
+static struct usb_driver usb_audio_driver = {
+ .name = "snd-usb-audio",
+ .probe = usb_audio_probe,
+ .disconnect = usb_audio_disconnect,
+ .suspend = usb_audio_suspend,
+ .resume = usb_audio_resume,
+ .id_table = usb_audio_ids,
+};
+
+static int __init snd_usb_audio_init(void)
+{
+ if (nrpacks < 1 || nrpacks > MAX_PACKS) {
+ printk(KERN_WARNING "invalid nrpacks value.\n");
+ return -EINVAL;
+ }
+ return usb_register(&usb_audio_driver);
+}
+
+static void __exit snd_usb_audio_cleanup(void)
+{
+ usb_deregister(&usb_audio_driver);
+}
+
+module_init(snd_usb_audio_init);
+module_exit(snd_usb_audio_cleanup);
diff --git a/sound/usb/card.h b/sound/usb/card.h
new file mode 100644
index 000000000000..ed92420c1095
--- /dev/null
+++ b/sound/usb/card.h
@@ -0,0 +1,105 @@
+#ifndef __USBAUDIO_CARD_H
+#define __USBAUDIO_CARD_H
+
+#define MAX_PACKS 20
+#define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */
+#define MAX_URBS 8
+#define SYNC_URBS 4 /* always four urbs for sync */
+#define MAX_QUEUE 24 /* try not to exceed this queue length, in ms */
+
+struct audioformat {
+ struct list_head list;
+ u64 formats; /* ALSA format bits */
+ unsigned int channels; /* # channels */
+ unsigned int fmt_type; /* USB audio format type (1-3) */
+ unsigned int frame_size; /* samples per frame for non-audio */
+ int iface; /* interface number */
+ unsigned char altsetting; /* corresponding alternate setting */
+ unsigned char altset_idx; /* array index of altenate setting */
+ unsigned char attributes; /* corresponding attributes of cs endpoint */
+ unsigned char endpoint; /* endpoint */
+ unsigned char ep_attr; /* endpoint attributes */
+ unsigned char datainterval; /* log_2 of data packet interval */
+ unsigned int maxpacksize; /* max. packet size */
+ unsigned int rates; /* rate bitmasks */
+ unsigned int rate_min, rate_max; /* min/max rates */
+ unsigned int nr_rates; /* number of rate table entries */
+ unsigned int *rate_table; /* rate table */
+};
+
+struct snd_usb_substream;
+
+struct snd_urb_ctx {
+ struct urb *urb;
+ unsigned int buffer_size; /* size of data buffer, if data URB */
+ struct snd_usb_substream *subs;
+ int index; /* index for urb array */
+ int packets; /* number of packets per urb */
+};
+
+struct snd_urb_ops {
+ int (*prepare)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
+ int (*retire)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
+ int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
+ int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
+};
+
+struct snd_usb_substream {
+ struct snd_usb_stream *stream;
+ struct usb_device *dev;
+ struct snd_pcm_substream *pcm_substream;
+ int direction; /* playback or capture */
+ int interface; /* current interface */
+ int endpoint; /* assigned endpoint */
+ struct audioformat *cur_audiofmt; /* current audioformat pointer (for hw_params callback) */
+ unsigned int cur_rate; /* current rate (for hw_params callback) */
+ unsigned int period_bytes; /* current period bytes (for hw_params callback) */
+ unsigned int altset_idx; /* USB data format: index of alternate setting */
+ unsigned int datapipe; /* the data i/o pipe */
+ unsigned int syncpipe; /* 1 - async out or adaptive in */
+ unsigned int datainterval; /* log_2 of data packet interval */
+ unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
+ unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */
+ unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */
+ unsigned int freqmax; /* maximum sampling rate, used for buffer management */
+ unsigned int phase; /* phase accumulator */
+ unsigned int maxpacksize; /* max packet size in bytes */
+ unsigned int maxframesize; /* max packet size in frames */
+ unsigned int curpacksize; /* current packet size in bytes (for capture) */
+ unsigned int curframesize; /* current packet size in frames (for capture) */
+ unsigned int fill_max: 1; /* fill max packet size always */
+ unsigned int txfr_quirk:1; /* allow sub-frame alignment */
+ unsigned int fmt_type; /* USB audio format type (1-3) */
+
+ unsigned int running: 1; /* running status */
+
+ unsigned int hwptr_done; /* processed byte position in the buffer */
+ unsigned int transfer_done; /* processed frames since last period update */
+ unsigned long active_mask; /* bitmask of active urbs */
+ unsigned long unlink_mask; /* bitmask of unlinked urbs */
+
+ unsigned int nurbs; /* # urbs */
+ struct snd_urb_ctx dataurb[MAX_URBS]; /* data urb table */
+ struct snd_urb_ctx syncurb[SYNC_URBS]; /* sync urb table */
+ char *syncbuf; /* sync buffer for all sync URBs */
+ dma_addr_t sync_dma; /* DMA address of syncbuf */
+
+ u64 formats; /* format bitmasks (all or'ed) */
+ unsigned int num_formats; /* number of supported audio formats (list) */
+ struct list_head fmt_list; /* format list */
+ struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
+ spinlock_t lock;
+
+ struct snd_urb_ops ops; /* callbacks (must be filled at init) */
+};
+
+struct snd_usb_stream {
+ struct snd_usb_audio *chip;
+ struct snd_pcm *pcm;
+ int pcm_index;
+ unsigned int fmt_type; /* USB audio format type (1-3) */
+ struct snd_usb_substream substream[2];
+ struct list_head list;
+};
+
+#endif /* __USBAUDIO_CARD_H */
diff --git a/sound/usb/debug.h b/sound/usb/debug.h
new file mode 100644
index 000000000000..343ec2d9ee66
--- /dev/null
+++ b/sound/usb/debug.h
@@ -0,0 +1,15 @@
+#ifndef __USBAUDIO_DEBUG_H
+#define __USBAUDIO_DEBUG_H
+
+/*
+ * h/w constraints
+ */
+
+#ifdef HW_CONST_DEBUG
+#define hwc_debug(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else
+#define hwc_debug(fmt, args...) /**/
+#endif
+
+#endif /* __USBAUDIO_DEBUG_H */
+
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
new file mode 100644
index 000000000000..ef07a6d0dd5f
--- /dev/null
+++ b/sound/usb/endpoint.c
@@ -0,0 +1,362 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "proc.h"
+#include "quirks.h"
+#include "endpoint.h"
+#include "urb.h"
+#include "pcm.h"
+#include "helper.h"
+#include "format.h"
+
+/*
+ * free a substream
+ */
+static void free_substream(struct snd_usb_substream *subs)
+{
+ struct list_head *p, *n;
+
+ if (!subs->num_formats)
+ return; /* not initialized */
+ list_for_each_safe(p, n, &subs->fmt_list) {
+ struct audioformat *fp = list_entry(p, struct audioformat, list);
+ kfree(fp->rate_table);
+ kfree(fp);
+ }
+ kfree(subs->rate_list.list);
+}
+
+
+/*
+ * free a usb stream instance
+ */
+static void snd_usb_audio_stream_free(struct snd_usb_stream *stream)
+{
+ free_substream(&stream->substream[0]);
+ free_substream(&stream->substream[1]);
+ list_del(&stream->list);
+ kfree(stream);
+}
+
+static void snd_usb_audio_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_usb_stream *stream = pcm->private_data;
+ if (stream) {
+ stream->pcm = NULL;
+ snd_usb_audio_stream_free(stream);
+ }
+}
+
+
+/*
+ * add this endpoint to the chip instance.
+ * if a stream with the same endpoint already exists, append to it.
+ * if not, create a new pcm stream.
+ */
+int snd_usb_add_audio_endpoint(struct snd_usb_audio *chip, int stream, struct audioformat *fp)
+{
+ struct list_head *p;
+ struct snd_usb_stream *as;
+ struct snd_usb_substream *subs;
+ struct snd_pcm *pcm;
+ int err;
+
+ list_for_each(p, &chip->pcm_list) {
+ as = list_entry(p, struct snd_usb_stream, list);
+ if (as->fmt_type != fp->fmt_type)
+ continue;
+ subs = &as->substream[stream];
+ if (!subs->endpoint)
+ continue;
+ if (subs->endpoint == fp->endpoint) {
+ list_add_tail(&fp->list, &subs->fmt_list);
+ subs->num_formats++;
+ subs->formats |= fp->formats;
+ return 0;
+ }
+ }
+ /* look for an empty stream */
+ list_for_each(p, &chip->pcm_list) {
+ as = list_entry(p, struct snd_usb_stream, list);
+ if (as->fmt_type != fp->fmt_type)
+ continue;
+ subs = &as->substream[stream];
+ if (subs->endpoint)
+ continue;
+ err = snd_pcm_new_stream(as->pcm, stream, 1);
+ if (err < 0)
+ return err;
+ snd_usb_init_substream(as, stream, fp);
+ return 0;
+ }
+
+ /* create a new pcm */
+ as = kzalloc(sizeof(*as), GFP_KERNEL);
+ if (!as)
+ return -ENOMEM;
+ as->pcm_index = chip->pcm_devs;
+ as->chip = chip;
+ as->fmt_type = fp->fmt_type;
+ err = snd_pcm_new(chip->card, "USB Audio", chip->pcm_devs,
+ stream == SNDRV_PCM_STREAM_PLAYBACK ? 1 : 0,
+ stream == SNDRV_PCM_STREAM_PLAYBACK ? 0 : 1,
+ &pcm);
+ if (err < 0) {
+ kfree(as);
+ return err;
+ }
+ as->pcm = pcm;
+ pcm->private_data = as;
+ pcm->private_free = snd_usb_audio_pcm_free;
+ pcm->info_flags = 0;
+ if (chip->pcm_devs > 0)
+ sprintf(pcm->name, "USB Audio #%d", chip->pcm_devs);
+ else
+ strcpy(pcm->name, "USB Audio");
+
+ snd_usb_init_substream(as, stream, fp);
+
+ list_add(&as->list, &chip->pcm_list);
+ chip->pcm_devs++;
+
+ snd_usb_proc_pcm_format_add(as);
+
+ return 0;
+}
+
+int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
+{
+ struct usb_device *dev;
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
+ struct usb_interface_descriptor *altsd;
+ int i, altno, err, stream;
+ int format = 0, num_channels = 0;
+ struct audioformat *fp = NULL;
+ unsigned char *fmt, *csep;
+ int num, protocol;
+
+ dev = chip->dev;
+
+ /* parse the interface's altsettings */
+ iface = usb_ifnum_to_if(dev, iface_no);
+
+ num = iface->num_altsetting;
+
+ /*
+ * Dallas DS4201 workaround: It presents 5 altsettings, but the last
+ * one misses syncpipe, and does not produce any sound.
+ */
+ if (chip->usb_id == USB_ID(0x04fa, 0x4201))
+ num = 4;
+
+ for (i = 0; i < num; i++) {
+ alts = &iface->altsetting[i];
+ altsd = get_iface_desc(alts);
+ protocol = altsd->bInterfaceProtocol;
+ /* skip invalid one */
+ if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
+ altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
+ (altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIOSTREAMING &&
+ altsd->bInterfaceSubClass != USB_SUBCLASS_VENDOR_SPEC) ||
+ altsd->bNumEndpoints < 1 ||
+ le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) == 0)
+ continue;
+ /* must be isochronous */
+ if ((get_endpoint(alts, 0)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
+ USB_ENDPOINT_XFER_ISOC)
+ continue;
+ /* check direction */
+ stream = (get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN) ?
+ SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ altno = altsd->bAlternateSetting;
+
+ if (snd_usb_apply_interface_quirk(chip, iface_no, altno))
+ continue;
+
+ /* get audio formats */
+ switch (protocol) {
+ case UAC_VERSION_1: {
+ struct uac_as_header_descriptor_v1 *as =
+ snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
+
+ if (!as) {
+ snd_printk(KERN_ERR "%d:%u:%d : UAC_AS_GENERAL descriptor not found\n",
+ dev->devnum, iface_no, altno);
+ continue;
+ }
+
+ if (as->bLength < sizeof(*as)) {
+ snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_AS_GENERAL desc\n",
+ dev->devnum, iface_no, altno);
+ continue;
+ }
+
+ format = le16_to_cpu(as->wFormatTag); /* remember the format value */
+ break;
+ }
+
+ case UAC_VERSION_2: {
+ struct uac_as_header_descriptor_v2 *as =
+ snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
+
+ if (!as) {
+ snd_printk(KERN_ERR "%d:%u:%d : UAC_AS_GENERAL descriptor not found\n",
+ dev->devnum, iface_no, altno);
+ continue;
+ }
+
+ if (as->bLength < sizeof(*as)) {
+ snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_AS_GENERAL desc\n",
+ dev->devnum, iface_no, altno);
+ continue;
+ }
+
+ num_channels = as->bNrChannels;
+ format = le32_to_cpu(as->bmFormats);
+
+ break;
+ }
+
+ default:
+ snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
+ dev->devnum, iface_no, altno, protocol);
+ continue;
+ }
+
+ /* get format type */
+ fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE);
+ if (!fmt) {
+ snd_printk(KERN_ERR "%d:%u:%d : no UAC_FORMAT_TYPE desc\n",
+ dev->devnum, iface_no, altno);
+ continue;
+ }
+ if (((protocol == UAC_VERSION_1) && (fmt[0] < 8)) ||
+ ((protocol == UAC_VERSION_2) && (fmt[0] != 6))) {
+ snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n",
+ dev->devnum, iface_no, altno);
+ continue;
+ }
+
+ /*
+ * Blue Microphones workaround: The last altsetting is identical
+ * with the previous one, except for a larger packet size, but
+ * is actually a mislabeled two-channel setting; ignore it.
+ */
+ if (fmt[4] == 1 && fmt[5] == 2 && altno == 2 && num == 3 &&
+ fp && fp->altsetting == 1 && fp->channels == 1 &&
+ fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
+ protocol == UAC_VERSION_1 &&
+ le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) ==
+ fp->maxpacksize * 2)
+ continue;
+
+ csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
+ /* Creamware Noah has this descriptor after the 2nd endpoint */
+ if (!csep && altsd->bNumEndpoints >= 2)
+ csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT);
+ if (!csep || csep[0] < 7 || csep[2] != UAC_EP_GENERAL) {
+ snd_printk(KERN_WARNING "%d:%u:%d : no or invalid"
+ " class specific endpoint descriptor\n",
+ dev->devnum, iface_no, altno);
+ csep = NULL;
+ }
+
+ fp = kzalloc(sizeof(*fp), GFP_KERNEL);
+ if (! fp) {
+ snd_printk(KERN_ERR "cannot malloc\n");
+ return -ENOMEM;
+ }
+
+ fp->iface = iface_no;
+ fp->altsetting = altno;
+ fp->altset_idx = i;
+ fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
+ fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
+ fp->datainterval = snd_usb_parse_datainterval(chip, alts);
+ fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+ /* num_channels is only set for v2 interfaces */
+ fp->channels = num_channels;
+ if (snd_usb_get_speed(dev) == USB_SPEED_HIGH)
+ fp->maxpacksize = (((fp->maxpacksize >> 11) & 3) + 1)
+ * (fp->maxpacksize & 0x7ff);
+ fp->attributes = csep ? csep[3] : 0;
+
+ /* some quirks for attributes here */
+
+ switch (chip->usb_id) {
+ case USB_ID(0x0a92, 0x0053): /* AudioTrak Optoplay */
+ /* Optoplay sets the sample rate attribute although
+ * it seems not supporting it in fact.
+ */
+ fp->attributes &= ~UAC_EP_CS_ATTR_SAMPLE_RATE;
+ break;
+ case USB_ID(0x041e, 0x3020): /* Creative SB Audigy 2 NX */
+ case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */
+ case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra 8 */
+ case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
+ /* doesn't set the sample rate attribute, but supports it */
+ fp->attributes |= UAC_EP_CS_ATTR_SAMPLE_RATE;
+ break;
+ case USB_ID(0x047f, 0x0ca1): /* plantronics headset */
+ case USB_ID(0x077d, 0x07af): /* Griffin iMic (note that there is
+ an older model 77d:223) */
+ /*
+ * plantronics headset and Griffin iMic have set adaptive-in
+ * although it's really not...
+ */
+ fp->ep_attr &= ~USB_ENDPOINT_SYNCTYPE;
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fp->ep_attr |= USB_ENDPOINT_SYNC_ADAPTIVE;
+ else
+ fp->ep_attr |= USB_ENDPOINT_SYNC_SYNC;
+ break;
+ }
+
+ /* ok, let's parse further... */
+ if (snd_usb_parse_audio_format(chip, fp, format, fmt, stream, alts) < 0) {
+ kfree(fp->rate_table);
+ kfree(fp);
+ continue;
+ }
+
+ snd_printdd(KERN_INFO "%d:%u:%d: add audio endpoint %#x\n", dev->devnum, iface_no, altno, fp->endpoint);
+ err = snd_usb_add_audio_endpoint(chip, stream, fp);
+ if (err < 0) {
+ kfree(fp->rate_table);
+ kfree(fp);
+ return err;
+ }
+ /* try to set the interface... */
+ usb_set_interface(chip->dev, iface_no, altno);
+ snd_usb_init_pitch(chip, iface_no, alts, fp);
+ snd_usb_init_sample_rate(chip, iface_no, alts, fp, fp->rate_max);
+ }
+ return 0;
+}
+
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
new file mode 100644
index 000000000000..64dd0db023b2
--- /dev/null
+++ b/sound/usb/endpoint.h
@@ -0,0 +1,11 @@
+#ifndef __USBAUDIO_ENDPOINT_H
+#define __USBAUDIO_ENDPOINT_H
+
+int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip,
+ int iface_no);
+
+int snd_usb_add_audio_endpoint(struct snd_usb_audio *chip,
+ int stream,
+ struct audioformat *fp);
+
+#endif /* __USBAUDIO_ENDPOINT_H */
diff --git a/sound/usb/format.c b/sound/usb/format.c
new file mode 100644
index 000000000000..b87cf87c4e7b
--- /dev/null
+++ b/sound/usb/format.c
@@ -0,0 +1,432 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "quirks.h"
+#include "helper.h"
+#include "debug.h"
+
+/*
+ * parse the audio format type I descriptor
+ * and returns the corresponding pcm format
+ *
+ * @dev: usb device
+ * @fp: audioformat record
+ * @format: the format tag (wFormatTag)
+ * @fmt: the format type descriptor
+ */
+static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+ int format, void *_fmt,
+ int protocol)
+{
+ int sample_width, sample_bytes;
+ u64 pcm_formats;
+
+ switch (protocol) {
+ case UAC_VERSION_1: {
+ struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
+ sample_width = fmt->bBitResolution;
+ sample_bytes = fmt->bSubframeSize;
+ format = 1 << format;
+ break;
+ }
+
+ case UAC_VERSION_2: {
+ struct uac_format_type_i_ext_descriptor *fmt = _fmt;
+ sample_width = fmt->bBitResolution;
+ sample_bytes = fmt->bSubslotSize;
+ format <<= 1;
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ pcm_formats = 0;
+
+ if (format == 0 || format == (1 << UAC_FORMAT_TYPE_I_UNDEFINED)) {
+ /* some devices don't define this correctly... */
+ snd_printdd(KERN_INFO "%d:%u:%d : format type 0 is detected, processed as PCM\n",
+ chip->dev->devnum, fp->iface, fp->altsetting);
+ format = 1 << UAC_FORMAT_TYPE_I_PCM;
+ }
+ if (format & (1 << UAC_FORMAT_TYPE_I_PCM)) {
+ if (sample_width > sample_bytes * 8) {
+ snd_printk(KERN_INFO "%d:%u:%d : sample bitwidth %d in over sample bytes %d\n",
+ chip->dev->devnum, fp->iface, fp->altsetting,
+ sample_width, sample_bytes);
+ }
+ /* check the format byte size */
+ switch (sample_bytes) {
+ case 1:
+ pcm_formats |= SNDRV_PCM_FMTBIT_S8;
+ break;
+ case 2:
+ if (snd_usb_is_big_endian_format(chip, fp))
+ pcm_formats |= SNDRV_PCM_FMTBIT_S16_BE; /* grrr, big endian!! */
+ else
+ pcm_formats |= SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ case 3:
+ if (snd_usb_is_big_endian_format(chip, fp))
+ pcm_formats |= SNDRV_PCM_FMTBIT_S24_3BE; /* grrr, big endian!! */
+ else
+ pcm_formats |= SNDRV_PCM_FMTBIT_S24_3LE;
+ break;
+ case 4:
+ pcm_formats |= SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ snd_printk(KERN_INFO "%d:%u:%d : unsupported sample bitwidth %d in %d bytes\n",
+ chip->dev->devnum, fp->iface, fp->altsetting,
+ sample_width, sample_bytes);
+ break;
+ }
+ }
+ if (format & (1 << UAC_FORMAT_TYPE_I_PCM8)) {
+ /* Dallas DS4201 workaround: it advertises U8 format, but really
+ supports S8. */
+ if (chip->usb_id == USB_ID(0x04fa, 0x4201))
+ pcm_formats |= SNDRV_PCM_FMTBIT_S8;
+ else
+ pcm_formats |= SNDRV_PCM_FMTBIT_U8;
+ }
+ if (format & (1 << UAC_FORMAT_TYPE_I_IEEE_FLOAT)) {
+ pcm_formats |= SNDRV_PCM_FMTBIT_FLOAT_LE;
+ }
+ if (format & (1 << UAC_FORMAT_TYPE_I_ALAW)) {
+ pcm_formats |= SNDRV_PCM_FMTBIT_A_LAW;
+ }
+ if (format & (1 << UAC_FORMAT_TYPE_I_MULAW)) {
+ pcm_formats |= SNDRV_PCM_FMTBIT_MU_LAW;
+ }
+ if (format & ~0x3f) {
+ snd_printk(KERN_INFO "%d:%u:%d : unsupported format bits %#x\n",
+ chip->dev->devnum, fp->iface, fp->altsetting, format);
+ }
+ return pcm_formats;
+}
+
+
+/*
+ * parse the format descriptor and stores the possible sample rates
+ * on the audioformat table (audio class v1).
+ *
+ * @dev: usb device
+ * @fp: audioformat record
+ * @fmt: the format descriptor
+ * @offset: the start offset of descriptor pointing the rate type
+ * (7 for type I and II, 8 for type II)
+ */
+static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audioformat *fp,
+ unsigned char *fmt, int offset)
+{
+ int nr_rates = fmt[offset];
+
+ if (fmt[0] < offset + 1 + 3 * (nr_rates ? nr_rates : 2)) {
+ snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n",
+ chip->dev->devnum, fp->iface, fp->altsetting);
+ return -1;
+ }
+
+ if (nr_rates) {
+ /*
+ * build the rate table and bitmap flags
+ */
+ int r, idx;
+
+ fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
+ if (fp->rate_table == NULL) {
+ snd_printk(KERN_ERR "cannot malloc\n");
+ return -1;
+ }
+
+ fp->nr_rates = 0;
+ fp->rate_min = fp->rate_max = 0;
+ for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) {
+ unsigned int rate = combine_triple(&fmt[idx]);
+ if (!rate)
+ continue;
+ /* C-Media CM6501 mislabels its 96 kHz altsetting */
+ if (rate == 48000 && nr_rates == 1 &&
+ (chip->usb_id == USB_ID(0x0d8c, 0x0201) ||
+ chip->usb_id == USB_ID(0x0d8c, 0x0102)) &&
+ fp->altsetting == 5 && fp->maxpacksize == 392)
+ rate = 96000;
+ /* Creative VF0470 Live Cam reports 16 kHz instead of 8kHz */
+ if (rate == 16000 && chip->usb_id == USB_ID(0x041e, 0x4068))
+ rate = 8000;
+
+ fp->rate_table[fp->nr_rates] = rate;
+ if (!fp->rate_min || rate < fp->rate_min)
+ fp->rate_min = rate;
+ if (!fp->rate_max || rate > fp->rate_max)
+ fp->rate_max = rate;
+ fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+ fp->nr_rates++;
+ }
+ if (!fp->nr_rates) {
+ hwc_debug("All rates were zero. Skipping format!\n");
+ return -1;
+ }
+ } else {
+ /* continuous rates */
+ fp->rates = SNDRV_PCM_RATE_CONTINUOUS;
+ fp->rate_min = combine_triple(&fmt[offset + 1]);
+ fp->rate_max = combine_triple(&fmt[offset + 4]);
+ }
+ return 0;
+}
+
+/*
+ * parse the format descriptor and stores the possible sample rates
+ * on the audioformat table (audio class v2).
+ */
+static int parse_audio_format_rates_v2(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+ struct usb_host_interface *iface)
+{
+ struct usb_device *dev = chip->dev;
+ unsigned char tmp[2], *data;
+ int i, nr_rates, data_size, ret = 0;
+
+ /* get the number of sample rates first by only fetching 2 bytes */
+ ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ UAC2_CS_CONTROL_SAM_FREQ << 8, chip->clock_id << 8,
+ tmp, sizeof(tmp), 1000);
+
+ if (ret < 0) {
+ snd_printk(KERN_ERR "unable to retrieve number of sample rates\n");
+ goto err;
+ }
+
+ nr_rates = (tmp[1] << 8) | tmp[0];
+ data_size = 2 + 12 * nr_rates;
+ data = kzalloc(data_size, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* now get the full information */
+ ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ UAC2_CS_CONTROL_SAM_FREQ << 8, chip->clock_id << 8,
+ data, data_size, 1000);
+
+ if (ret < 0) {
+ snd_printk(KERN_ERR "unable to retrieve sample rate range\n");
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
+ if (!fp->rate_table) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ fp->nr_rates = 0;
+ fp->rate_min = fp->rate_max = 0;
+
+ for (i = 0; i < nr_rates; i++) {
+ int rate = combine_quad(&data[2 + 12 * i]);
+
+ fp->rate_table[fp->nr_rates] = rate;
+ if (!fp->rate_min || rate < fp->rate_min)
+ fp->rate_min = rate;
+ if (!fp->rate_max || rate > fp->rate_max)
+ fp->rate_max = rate;
+ fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+ fp->nr_rates++;
+ }
+
+err_free:
+ kfree(data);
+err:
+ return ret;
+}
+
+/*
+ * parse the format type I and III descriptors
+ */
+static int parse_audio_format_i(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+ int format, void *_fmt,
+ struct usb_host_interface *iface)
+{
+ struct usb_interface_descriptor *altsd = get_iface_desc(iface);
+ struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
+ int protocol = altsd->bInterfaceProtocol;
+ int pcm_format, ret;
+
+ if (fmt->bFormatType == UAC_FORMAT_TYPE_III) {
+ /* FIXME: the format type is really IECxxx
+ * but we give normal PCM format to get the existing
+ * apps working...
+ */
+ switch (chip->usb_id) {
+
+ case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */
+ if (chip->setup == 0x00 &&
+ fp->altsetting == 6)
+ pcm_format = SNDRV_PCM_FORMAT_S16_BE;
+ else
+ pcm_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ default:
+ pcm_format = SNDRV_PCM_FORMAT_S16_LE;
+ }
+ fp->formats = 1uLL << pcm_format;
+ } else {
+ fp->formats = parse_audio_format_i_type(chip, fp, format,
+ fmt, protocol);
+ if (!fp->formats)
+ return -1;
+ }
+
+ /* gather possible sample rates */
+ /* audio class v1 reports possible sample rates as part of the
+ * proprietary class specific descriptor.
+ * audio class v2 uses class specific EP0 range requests for that.
+ */
+ switch (protocol) {
+ case UAC_VERSION_1:
+ fp->channels = fmt->bNrChannels;
+ ret = parse_audio_format_rates_v1(chip, fp, _fmt, 7);
+ break;
+ case UAC_VERSION_2:
+ /* fp->channels is already set in this case */
+ ret = parse_audio_format_rates_v2(chip, fp, iface);
+ break;
+ }
+
+ if (fp->channels < 1) {
+ snd_printk(KERN_ERR "%d:%u:%d : invalid channels %d\n",
+ chip->dev->devnum, fp->iface, fp->altsetting, fp->channels);
+ return -1;
+ }
+
+ return ret;
+}
+
+/*
+ * parse the format type II descriptor
+ */
+static int parse_audio_format_ii(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+ int format, void *_fmt,
+ struct usb_host_interface *iface)
+{
+ int brate, framesize, ret;
+ struct usb_interface_descriptor *altsd = get_iface_desc(iface);
+ int protocol = altsd->bInterfaceProtocol;
+
+ switch (format) {
+ case UAC_FORMAT_TYPE_II_AC3:
+ /* FIXME: there is no AC3 format defined yet */
+ // fp->formats = SNDRV_PCM_FMTBIT_AC3;
+ fp->formats = SNDRV_PCM_FMTBIT_U8; /* temporary hack to receive byte streams */
+ break;
+ case UAC_FORMAT_TYPE_II_MPEG:
+ fp->formats = SNDRV_PCM_FMTBIT_MPEG;
+ break;
+ default:
+ snd_printd(KERN_INFO "%d:%u:%d : unknown format tag %#x is detected. processed as MPEG.\n",
+ chip->dev->devnum, fp->iface, fp->altsetting, format);
+ fp->formats = SNDRV_PCM_FMTBIT_MPEG;
+ break;
+ }
+
+ fp->channels = 1;
+
+ switch (protocol) {
+ case UAC_VERSION_1: {
+ struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
+ brate = le16_to_cpu(fmt->wMaxBitRate);
+ framesize = le16_to_cpu(fmt->wSamplesPerFrame);
+ snd_printd(KERN_INFO "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize);
+ fp->frame_size = framesize;
+ ret = parse_audio_format_rates_v1(chip, fp, _fmt, 8); /* fmt[8..] sample rates */
+ break;
+ }
+ case UAC_VERSION_2: {
+ struct uac_format_type_ii_ext_descriptor *fmt = _fmt;
+ brate = le16_to_cpu(fmt->wMaxBitRate);
+ framesize = le16_to_cpu(fmt->wSamplesPerFrame);
+ snd_printd(KERN_INFO "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize);
+ fp->frame_size = framesize;
+ ret = parse_audio_format_rates_v2(chip, fp, iface);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp,
+ int format, unsigned char *fmt, int stream,
+ struct usb_host_interface *iface)
+{
+ int err;
+
+ switch (fmt[3]) {
+ case UAC_FORMAT_TYPE_I:
+ case UAC_FORMAT_TYPE_III:
+ err = parse_audio_format_i(chip, fp, format, fmt, iface);
+ break;
+ case UAC_FORMAT_TYPE_II:
+ err = parse_audio_format_ii(chip, fp, format, fmt, iface);
+ break;
+ default:
+ snd_printd(KERN_INFO "%d:%u:%d : format type %d is not supported yet\n",
+ chip->dev->devnum, fp->iface, fp->altsetting, fmt[3]);
+ return -1;
+ }
+ fp->fmt_type = fmt[3];
+ if (err < 0)
+ return err;
+#if 1
+ /* FIXME: temporary hack for extigy/audigy 2 nx/zs */
+ /* extigy apparently supports sample rates other than 48k
+ * but not in ordinary way. so we enable only 48k atm.
+ */
+ if (chip->usb_id == USB_ID(0x041e, 0x3000) ||
+ chip->usb_id == USB_ID(0x041e, 0x3020) ||
+ chip->usb_id == USB_ID(0x041e, 0x3061)) {
+ if (fmt[3] == UAC_FORMAT_TYPE_I &&
+ fp->rates != SNDRV_PCM_RATE_48000 &&
+ fp->rates != SNDRV_PCM_RATE_96000)
+ return -1;
+ }
+#endif
+ return 0;
+}
+
diff --git a/sound/usb/format.h b/sound/usb/format.h
new file mode 100644
index 000000000000..8298c4e8ddfa
--- /dev/null
+++ b/sound/usb/format.h
@@ -0,0 +1,8 @@
+#ifndef __USBAUDIO_FORMAT_H
+#define __USBAUDIO_FORMAT_H
+
+int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp,
+ int format, unsigned char *fmt, int stream,
+ struct usb_host_interface *iface);
+
+#endif /* __USBAUDIO_FORMAT_H */
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
new file mode 100644
index 000000000000..d48d6f8f6ac9
--- /dev/null
+++ b/sound/usb/helper.c
@@ -0,0 +1,113 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "usbaudio.h"
+#include "helper.h"
+
+/*
+ * combine bytes and get an integer value
+ */
+unsigned int snd_usb_combine_bytes(unsigned char *bytes, int size)
+{
+ switch (size) {
+ case 1: return *bytes;
+ case 2: return combine_word(bytes);
+ case 3: return combine_triple(bytes);
+ case 4: return combine_quad(bytes);
+ default: return 0;
+ }
+}
+
+/*
+ * parse descriptor buffer and return the pointer starting the given
+ * descriptor type.
+ */
+void *snd_usb_find_desc(void *descstart, int desclen, void *after, u8 dtype)
+{
+ u8 *p, *end, *next;
+
+ p = descstart;
+ end = p + desclen;
+ for (; p < end;) {
+ if (p[0] < 2)
+ return NULL;
+ next = p + p[0];
+ if (next > end)
+ return NULL;
+ if (p[1] == dtype && (!after || (void *)p > after)) {
+ return p;
+ }
+ p = next;
+ }
+ return NULL;
+}
+
+/*
+ * find a class-specified interface descriptor with the given subtype.
+ */
+void *snd_usb_find_csint_desc(void *buffer, int buflen, void *after, u8 dsubtype)
+{
+ unsigned char *p = after;
+
+ while ((p = snd_usb_find_desc(buffer, buflen, p,
+ USB_DT_CS_INTERFACE)) != NULL) {
+ if (p[0] >= 3 && p[2] == dsubtype)
+ return p;
+ }
+ return NULL;
+}
+
+/*
+ * Wrapper for usb_control_msg().
+ * Allocates a temp buffer to prevent dmaing from/to the stack.
+ */
+int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index, void *data,
+ __u16 size, int timeout)
+{
+ int err;
+ void *buf = NULL;
+
+ if (size > 0) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+ err = usb_control_msg(dev, pipe, request, requesttype,
+ value, index, buf, size, timeout);
+ if (size > 0) {
+ memcpy(data, buf, size);
+ kfree(buf);
+ }
+ return err;
+}
+
+unsigned char snd_usb_parse_datainterval(struct snd_usb_audio *chip,
+ struct usb_host_interface *alts)
+{
+ if (snd_usb_get_speed(chip->dev) == USB_SPEED_HIGH &&
+ get_endpoint(alts, 0)->bInterval >= 1 &&
+ get_endpoint(alts, 0)->bInterval <= 4)
+ return get_endpoint(alts, 0)->bInterval - 1;
+ else
+ return 0;
+}
+
diff --git a/sound/usb/helper.h b/sound/usb/helper.h
new file mode 100644
index 000000000000..a6b0e51b3a9a
--- /dev/null
+++ b/sound/usb/helper.h
@@ -0,0 +1,32 @@
+#ifndef __USBAUDIO_HELPER_H
+#define __USBAUDIO_HELPER_H
+
+unsigned int snd_usb_combine_bytes(unsigned char *bytes, int size);
+
+void *snd_usb_find_desc(void *descstart, int desclen, void *after, u8 dtype);
+void *snd_usb_find_csint_desc(void *descstart, int desclen, void *after, u8 dsubtype);
+
+int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe,
+ __u8 request, __u8 requesttype, __u16 value, __u16 index,
+ void *data, __u16 size, int timeout);
+
+unsigned char snd_usb_parse_datainterval(struct snd_usb_audio *chip,
+ struct usb_host_interface *alts);
+
+/*
+ * retrieve usb_interface descriptor from the host interface
+ * (conditional for compatibility with the older API)
+ */
+#ifndef get_iface_desc
+#define get_iface_desc(iface) (&(iface)->desc)
+#define get_endpoint(alt,ep) (&(alt)->endpoint[ep].desc)
+#define get_ep_desc(ep) (&(ep)->desc)
+#define get_cfg_desc(cfg) (&(cfg)->desc)
+#endif
+
+#ifndef snd_usb_get_speed
+#define snd_usb_get_speed(dev) ((dev)->speed)
+#endif
+
+
+#endif /* __USBAUDIO_HELPER_H */
diff --git a/sound/usb/usbmidi.c b/sound/usb/midi.c
index 9e28b20cb2ce..2c1558c327bb 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/midi.c
@@ -53,7 +53,8 @@
#include <sound/rawmidi.h>
#include <sound/asequencer.h>
#include "usbaudio.h"
-
+#include "midi.h"
+#include "helper.h"
/*
* define this to log all USB packets
diff --git a/sound/usb/midi.h b/sound/usb/midi.h
new file mode 100644
index 000000000000..2089ec987c66
--- /dev/null
+++ b/sound/usb/midi.h
@@ -0,0 +1,48 @@
+#ifndef __USBMIDI_H
+#define __USBMIDI_H
+
+/* maximum number of endpoints per interface */
+#define MIDI_MAX_ENDPOINTS 2
+
+/* data for QUIRK_MIDI_FIXED_ENDPOINT */
+struct snd_usb_midi_endpoint_info {
+ int8_t out_ep; /* ep number, 0 autodetect */
+ uint8_t out_interval; /* interval for interrupt endpoints */
+ int8_t in_ep;
+ uint8_t in_interval;
+ uint16_t out_cables; /* bitmask */
+ uint16_t in_cables; /* bitmask */
+};
+
+/* for QUIRK_MIDI_YAMAHA, data is NULL */
+
+/* for QUIRK_MIDI_MIDIMAN, data points to a snd_usb_midi_endpoint_info
+ * structure (out_cables and in_cables only) */
+
+/* for QUIRK_COMPOSITE, data points to an array of snd_usb_audio_quirk
+ * structures, terminated with .ifnum = -1 */
+
+/* for QUIRK_AUDIO_FIXED_ENDPOINT, data points to an audioformat structure */
+
+/* for QUIRK_AUDIO/MIDI_STANDARD_INTERFACE, data is NULL */
+
+/* for QUIRK_AUDIO_EDIROL_UA700_UA25/UA1000, data is NULL */
+
+/* for QUIRK_IGNORE_INTERFACE, data is NULL */
+
+/* for QUIRK_MIDI_NOVATION and _RAW, data is NULL */
+
+/* for QUIRK_MIDI_EMAGIC, data points to a snd_usb_midi_endpoint_info
+ * structure (out_cables and in_cables only) */
+
+/* for QUIRK_MIDI_CME, data is NULL */
+
+int snd_usbmidi_create(struct snd_card *card,
+ struct usb_interface *iface,
+ struct list_head *midi_list,
+ const struct snd_usb_audio_quirk *quirk);
+void snd_usbmidi_input_stop(struct list_head* p);
+void snd_usbmidi_input_start(struct list_head* p);
+void snd_usbmidi_disconnect(struct list_head *p);
+
+#endif /* __USBMIDI_H */
diff --git a/sound/usb/misc/Makefile b/sound/usb/misc/Makefile
new file mode 100644
index 000000000000..ccefd8158936
--- /dev/null
+++ b/sound/usb/misc/Makefile
@@ -0,0 +1,2 @@
+snd-ua101-objs := ua101.o
+obj-$(CONFIG_SND_USB_UA101) += snd-ua101.o
diff --git a/sound/usb/ua101.c b/sound/usb/misc/ua101.c
index 3d458d3b9962..796d8b25ee89 100644
--- a/sound/usb/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -23,7 +23,8 @@
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
-#include "usbaudio.h"
+#include "../usbaudio.h"
+#include "../midi.h"
MODULE_DESCRIPTION("Edirol UA-101/1000 driver");
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
diff --git a/sound/usb/usbmixer.c b/sound/usb/mixer.c
index 8e8f871b74ca..97dd17655104 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/mixer.c
@@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -41,60 +42,12 @@
#include <sound/tlv.h>
#include "usbaudio.h"
-
-/*
- */
-
-/* ignore error from controls - for debugging */
-/* #define IGNORE_CTL_ERROR */
-
-/*
- * Sound Blaster remote control configuration
- *
- * format of remote control data:
- * Extigy: xx 00
- * Audigy 2 NX: 06 80 xx 00 00 00
- * Live! 24-bit: 06 80 xx yy 22 83
- */
-static const struct rc_config {
- u32 usb_id;
- u8 offset;
- u8 length;
- u8 packet_length;
- u8 min_packet_length; /* minimum accepted length of the URB result */
- u8 mute_mixer_id;
- u32 mute_code;
-} rc_configs[] = {
- { USB_ID(0x041e, 0x3000), 0, 1, 2, 1, 18, 0x0013 }, /* Extigy */
- { USB_ID(0x041e, 0x3020), 2, 1, 6, 6, 18, 0x0013 }, /* Audigy 2 NX */
- { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
- { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
-};
+#include "mixer.h"
+#include "helper.h"
+#include "mixer_quirks.h"
#define MAX_ID_ELEMS 256
-struct usb_mixer_interface {
- struct snd_usb_audio *chip;
- unsigned int ctrlif;
- struct list_head list;
- unsigned int ignore_ctl_error;
- struct urb *urb;
- /* array[MAX_ID_ELEMS], indexed by unit id */
- struct usb_mixer_elem_info **id_elems;
-
- /* Sound Blaster remote control stuff */
- const struct rc_config *rc_cfg;
- u32 rc_code;
- wait_queue_head_t rc_waitq;
- struct urb *rc_urb;
- struct usb_ctrlrequest *rc_setup_packet;
- u8 rc_buffer[6];
-
- u8 audigy2nx_leds[3];
- u8 xonar_u1_status;
-};
-
-
struct usb_audio_term {
int id;
int type;
@@ -116,39 +69,6 @@ struct mixer_build {
const struct usbmix_selector_map *selector_map;
};
-#define MAX_CHANNELS 10 /* max logical channels */
-
-struct usb_mixer_elem_info {
- struct usb_mixer_interface *mixer;
- struct usb_mixer_elem_info *next_id_elem; /* list of controls with same id */
- struct snd_ctl_elem_id *elem_id;
- unsigned int id;
- unsigned int control; /* CS or ICN (high byte) */
- unsigned int cmask; /* channel mask bitmap: 0 = master */
- int channels;
- int val_type;
- int min, max, res;
- int dBmin, dBmax;
- int cached;
- int cache_val[MAX_CHANNELS];
- u8 initialized;
-};
-
-
-enum {
- USB_FEATURE_NONE = 0,
- USB_FEATURE_MUTE = 1,
- USB_FEATURE_VOLUME,
- USB_FEATURE_BASS,
- USB_FEATURE_MID,
- USB_FEATURE_TREBLE,
- USB_FEATURE_GEQ,
- USB_FEATURE_AGC,
- USB_FEATURE_DELAY,
- USB_FEATURE_BASSBOOST,
- USB_FEATURE_LOUDNESS
-};
-
enum {
USB_MIXER_BOOLEAN,
USB_MIXER_INV_BOOLEAN,
@@ -213,7 +133,7 @@ enum {
* if the mixer topology is too complicated and the parsed names are
* ambiguous, add the entries in usbmixer_maps.c.
*/
-#include "usbmixer_maps.c"
+#include "mixer_maps.c"
static const struct usbmix_name_map *
find_map(struct mixer_build *state, int unitid, int control)
@@ -278,6 +198,7 @@ static int check_mapped_selector_name(struct mixer_build *state, int unitid,
/*
* find an audio control unit with the given unit id
+ * this doesn't return any clock related units, so they need to be handled elsewhere
*/
static void *find_audio_control_unit(struct mixer_build *state, unsigned char unit)
{
@@ -286,7 +207,7 @@ static void *find_audio_control_unit(struct mixer_build *state, unsigned char un
p = NULL;
while ((p = snd_usb_find_desc(state->buffer, state->buflen, p,
USB_DT_CS_INTERFACE)) != NULL) {
- if (p[0] >= 4 && p[2] >= UAC_INPUT_TERMINAL && p[2] <= UAC_EXTENSION_UNIT_V1 && p[3] == unit)
+ if (p[0] >= 4 && p[2] >= UAC_INPUT_TERMINAL && p[2] <= UAC2_EXTENSION_UNIT_V2 && p[3] == unit)
return p;
}
return NULL;
@@ -383,7 +304,7 @@ static int get_abs_value(struct usb_mixer_elem_info *cval, int val)
* retrieve a mixer value
*/
-static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
+static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
{
unsigned char buf[2];
int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
@@ -405,6 +326,58 @@ static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int vali
return -EINVAL;
}
+static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
+{
+ unsigned char buf[14]; /* enough space for one range of 4 bytes */
+ unsigned char *val;
+ int ret;
+ __u8 bRequest;
+
+ bRequest = (request == UAC_GET_CUR) ?
+ UAC2_CS_CUR : UAC2_CS_RANGE;
+
+ ret = snd_usb_ctl_msg(cval->mixer->chip->dev,
+ usb_rcvctrlpipe(cval->mixer->chip->dev, 0),
+ bRequest,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ validx, cval->mixer->ctrlif | (cval->id << 8),
+ buf, sizeof(buf), 1000);
+
+ if (ret < 0) {
+ snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
+ request, validx, cval->mixer->ctrlif | (cval->id << 8), cval->val_type);
+ return ret;
+ }
+
+ switch (request) {
+ case UAC_GET_CUR:
+ val = buf;
+ break;
+ case UAC_GET_MIN:
+ val = buf + sizeof(__u16);
+ break;
+ case UAC_GET_MAX:
+ val = buf + sizeof(__u16) * 2;
+ break;
+ case UAC_GET_RES:
+ val = buf + sizeof(__u16) * 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
+
+ return 0;
+}
+
+static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
+{
+ return (cval->mixer->protocol == UAC_VERSION_1) ?
+ get_ctl_value_v1(cval, request, validx, value_ret) :
+ get_ctl_value_v2(cval, request, validx, value_ret);
+}
+
static int get_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int *value)
{
return get_ctl_value(cval, UAC_GET_CUR, validx, value);
@@ -429,8 +402,7 @@ static int get_cur_mix_value(struct usb_mixer_elem_info *cval,
err = get_cur_mix_raw(cval, channel, value);
if (err < 0) {
if (!cval->mixer->ignore_ctl_error)
- snd_printd(KERN_ERR "cannot get current value for "
- "control %d ch %d: err = %d\n",
+ snd_printd(KERN_ERR "cannot get current value for control %d ch %d: err = %d\n",
cval->control, channel, err);
return err;
}
@@ -444,11 +416,26 @@ static int get_cur_mix_value(struct usb_mixer_elem_info *cval,
* set a mixer value
*/
-static int set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set)
+int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ int request, int validx, int value_set)
{
unsigned char buf[2];
- int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
- int timeout = 10;
+ int val_len, timeout = 10;
+
+ if (cval->mixer->protocol == UAC_VERSION_1) {
+ val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
+ } else { /* UAC_VERSION_2 */
+ /* audio class v2 controls are always 2 bytes in size */
+ val_len = sizeof(__u16);
+
+ /* FIXME */
+ if (request != UAC_SET_CUR) {
+ snd_printdd(KERN_WARNING "RANGE setting not yet supported\n");
+ return -EINVAL;
+ }
+
+ request = UAC2_CS_CUR;
+ }
value_set = convert_bytes_value(cval, value_set);
buf[0] = value_set & 0xff;
@@ -468,14 +455,14 @@ static int set_ctl_value(struct usb_mixer_elem_info *cval, int request, int vali
static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value)
{
- return set_ctl_value(cval, UAC_SET_CUR, validx, value);
+ return snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, validx, value);
}
static int set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
int index, int value)
{
int err;
- err = set_ctl_value(cval, UAC_SET_CUR, (cval->control << 8) | channel,
+ err = snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, (cval->control << 8) | channel,
value);
if (err < 0)
return err;
@@ -644,46 +631,65 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
*/
static int check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term)
{
- unsigned char *p1;
+ void *p1;
memset(term, 0, sizeof(*term));
while ((p1 = find_audio_control_unit(state, id)) != NULL) {
+ unsigned char *hdr = p1;
term->id = id;
- switch (p1[2]) {
+ switch (hdr[2]) {
case UAC_INPUT_TERMINAL:
- term->type = combine_word(p1 + 4);
- term->channels = p1[7];
- term->chconfig = combine_word(p1 + 8);
- term->name = p1[11];
+ if (state->mixer->protocol == UAC_VERSION_1) {
+ struct uac_input_terminal_descriptor *d = p1;
+ term->type = le16_to_cpu(d->wTerminalType);
+ term->channels = d->bNrChannels;
+ term->chconfig = le16_to_cpu(d->wChannelConfig);
+ term->name = d->iTerminal;
+ } else { /* UAC_VERSION_2 */
+ struct uac2_input_terminal_descriptor *d = p1;
+ term->type = le16_to_cpu(d->wTerminalType);
+ term->channels = d->bNrChannels;
+ term->chconfig = le32_to_cpu(d->bmChannelConfig);
+ term->name = d->iTerminal;
+ }
return 0;
- case UAC_FEATURE_UNIT:
- id = p1[4];
+ case UAC_FEATURE_UNIT: {
+ /* the header is the same for v1 and v2 */
+ struct uac_feature_unit_descriptor *d = p1;
+ id = d->bSourceID;
break; /* continue to parse */
- case UAC_MIXER_UNIT:
- term->type = p1[2] << 16; /* virtual type */
- term->channels = p1[5 + p1[4]];
- term->chconfig = combine_word(p1 + 6 + p1[4]);
- term->name = p1[p1[0] - 1];
+ }
+ case UAC_MIXER_UNIT: {
+ struct uac_mixer_unit_descriptor *d = p1;
+ term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->channels = uac_mixer_unit_bNrChannels(d);
+ term->chconfig = uac_mixer_unit_wChannelConfig(d, state->mixer->protocol);
+ term->name = uac_mixer_unit_iMixer(d);
return 0;
- case UAC_SELECTOR_UNIT:
+ }
+ case UAC_SELECTOR_UNIT: {
+ struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- if (check_input_term(state, p1[5], term) < 0)
+ if (check_input_term(state, d->baSourceID[0], term) < 0)
return -ENODEV;
- term->type = p1[2] << 16; /* virtual type */
+ term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->id = id;
- term->name = p1[9 + p1[0] - 1];
+ term->name = uac_selector_unit_iSelector(d);
return 0;
+ }
case UAC_PROCESSING_UNIT_V1:
- case UAC_EXTENSION_UNIT_V1:
- if (p1[6] == 1) {
- id = p1[7];
+ case UAC_EXTENSION_UNIT_V1: {
+ struct uac_processing_unit_descriptor *d = p1;
+ if (d->bNrInPins) {
+ id = d->baSourceID[0];
break; /* continue to parse */
}
- term->type = p1[2] << 16; /* virtual type */
- term->channels = p1[7 + p1[6]];
- term->chconfig = combine_word(p1 + 8 + p1[6]);
- term->name = p1[12 + p1[6] + p1[11 + p1[6]]];
+ term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->channels = uac_processing_unit_bNrChannels(d);
+ term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol);
+ term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol);
return 0;
+ }
default:
return -ENODEV;
}
@@ -764,7 +770,8 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
int last_valid_res = cval->res;
while (cval->res > 1) {
- if (set_ctl_value(cval, UAC_SET_RES, (cval->control << 8) | minchn, cval->res / 2) < 0)
+ if (snd_usb_mixer_set_ctl_value(cval, UAC_SET_RES,
+ (cval->control << 8) | minchn, cval->res / 2) < 0)
break;
cval->res /= 2;
}
@@ -929,6 +936,15 @@ static struct snd_kcontrol_new usb_feature_unit_ctl = {
.put = mixer_ctl_feature_put,
};
+/* the read-only variant */
+static struct snd_kcontrol_new usb_feature_unit_ctl_ro = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "", /* will be filled later manually */
+ .info = mixer_ctl_feature_info,
+ .get = mixer_ctl_feature_get,
+ .put = NULL,
+};
+
/*
* build a feature control
@@ -939,20 +955,22 @@ static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str)
return strlcat(kctl->id.name, str, sizeof(kctl->id.name));
}
-static void build_feature_ctl(struct mixer_build *state, unsigned char *desc,
+static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
unsigned int ctl_mask, int control,
- struct usb_audio_term *iterm, int unitid)
+ struct usb_audio_term *iterm, int unitid,
+ int read_only)
{
+ struct uac_feature_unit_descriptor *desc = raw_desc;
unsigned int len = 0;
int mapped_name = 0;
- int nameid = desc[desc[0] - 1];
+ int nameid = uac_feature_unit_iFeature(desc);
struct snd_kcontrol *kctl;
struct usb_mixer_elem_info *cval;
const struct usbmix_name_map *map;
control++; /* change from zero-based to 1-based value */
- if (control == USB_FEATURE_GEQ) {
+ if (control == UAC_GRAPHIC_EQUALIZER_CONTROL) {
/* FIXME: not supported yet */
return;
}
@@ -984,7 +1002,11 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc,
/* get min/max values */
get_min_max(cval, 0);
- kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
+ if (read_only)
+ kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval);
+ else
+ kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
+
if (! kctl) {
snd_printk(KERN_ERR "cannot malloc kcontrol\n");
kfree(cval);
@@ -999,8 +1021,8 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc,
kctl->id.name, sizeof(kctl->id.name));
switch (control) {
- case USB_FEATURE_MUTE:
- case USB_FEATURE_VOLUME:
+ case UAC_MUTE_CONTROL:
+ case UAC_VOLUME_CONTROL:
/* determine the control name. the rule is:
* - if a name id is given in descriptor, use it.
* - if the connected input can be determined, then use the name
@@ -1027,9 +1049,9 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc,
len = append_ctl_name(kctl, " Playback");
}
}
- append_ctl_name(kctl, control == USB_FEATURE_MUTE ?
+ append_ctl_name(kctl, control == UAC_MUTE_CONTROL ?
" Switch" : " Volume");
- if (control == USB_FEATURE_VOLUME) {
+ if (control == UAC_VOLUME_CONTROL) {
kctl->tlv.c = mixer_vol_tlv;
kctl->vd[0].access |=
SNDRV_CTL_ELEM_ACCESS_TLV_READ |
@@ -1094,49 +1116,92 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
struct usb_audio_term iterm;
unsigned int master_bits, first_ch_bits;
int err, csize;
- struct uac_feature_unit_descriptor *ftr = _ftr;
+ struct uac_feature_unit_descriptor *hdr = _ftr;
+ __u8 *bmaControls;
- if (ftr->bLength < 7 || ! (csize = ftr->bControlSize) || ftr->bLength < 7 + csize) {
+ if (state->mixer->protocol == UAC_VERSION_1) {
+ csize = hdr->bControlSize;
+ channels = (hdr->bLength - 7) / csize - 1;
+ bmaControls = hdr->bmaControls;
+ } else {
+ struct uac2_feature_unit_descriptor *ftr = _ftr;
+ csize = 4;
+ channels = (hdr->bLength - 6) / 4;
+ bmaControls = ftr->bmaControls;
+ }
+
+ if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) {
snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid);
return -EINVAL;
}
/* parse the source unit */
- if ((err = parse_audio_unit(state, ftr->bSourceID)) < 0)
+ if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0)
return err;
/* determine the input source type and name */
- if (check_input_term(state, ftr->bSourceID, &iterm) < 0)
+ if (check_input_term(state, hdr->bSourceID, &iterm) < 0)
return -EINVAL;
- channels = (ftr->bLength - 7) / csize - 1;
-
- master_bits = snd_usb_combine_bytes(ftr->controls, csize);
+ master_bits = snd_usb_combine_bytes(bmaControls, csize);
/* master configuration quirks */
switch (state->chip->usb_id) {
case USB_ID(0x08bb, 0x2702):
snd_printk(KERN_INFO
"usbmixer: master volume quirk for PCM2702 chip\n");
/* disable non-functional volume control */
- master_bits &= ~(1 << (USB_FEATURE_VOLUME - 1));
+ master_bits &= ~UAC_FU_VOLUME;
break;
}
if (channels > 0)
- first_ch_bits = snd_usb_combine_bytes(ftr->controls + csize, csize);
+ first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize);
else
first_ch_bits = 0;
- /* check all control types */
- for (i = 0; i < 10; i++) {
- unsigned int ch_bits = 0;
- for (j = 0; j < channels; j++) {
- unsigned int mask = snd_usb_combine_bytes(ftr->controls + csize * (j+1), csize);
- if (mask & (1 << i))
- ch_bits |= (1 << j);
+
+ if (state->mixer->protocol == UAC_VERSION_1) {
+ /* check all control types */
+ for (i = 0; i < 10; i++) {
+ unsigned int ch_bits = 0;
+ for (j = 0; j < channels; j++) {
+ unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize);
+ if (mask & (1 << i))
+ ch_bits |= (1 << j);
+ }
+ /* audio class v1 controls are never read-only */
+ if (ch_bits & 1) /* the first channel must be set (for ease of programming) */
+ build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, 0);
+ if (master_bits & (1 << i))
+ build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, 0);
+ }
+ } else { /* UAC_VERSION_2 */
+ for (i = 0; i < 30/2; i++) {
+ /* From the USB Audio spec v2.0:
+ bmaControls() is a (ch+1)-element array of 4-byte bitmaps,
+ each containing a set of bit pairs. If a Control is present,
+ it must be Host readable. If a certain Control is not
+ present then the bit pair must be set to 0b00.
+ If a Control is present but read-only, the bit pair must be
+ set to 0b01. If a Control is also Host programmable, the bit
+ pair must be set to 0b11. The value 0b10 is not allowed. */
+ unsigned int ch_bits = 0;
+ unsigned int ch_read_only = 0;
+
+ for (j = 0; j < channels; j++) {
+ unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize);
+ if (mask & (1 << (i * 2))) {
+ ch_bits |= (1 << j);
+ if (~mask & (1 << ((i * 2) + 1)))
+ ch_read_only |= (1 << j);
+ }
+ }
+
+ /* FIXME: the whole unit is read-only if any of the channels is marked read-only */
+ if (ch_bits & 1) /* the first channel must be set (for ease of programming) */
+ build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, !!ch_read_only);
+ if (master_bits & (1 << i * 2))
+ build_feature_ctl(state, _ftr, 0, i, &iterm, unitid,
+ ~master_bits & (1 << ((i * 2) + 1)));
}
- if (ch_bits & 1) /* the first channel must be set (for ease of programming) */
- build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid);
- if (master_bits & (1 << i))
- build_feature_ctl(state, _ftr, 0, i, &iterm, unitid);
}
return 0;
@@ -1154,13 +1219,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
* input channel number (zero based) is given in control field instead.
*/
-static void build_mixer_unit_ctl(struct mixer_build *state, unsigned char *desc,
+static void build_mixer_unit_ctl(struct mixer_build *state,
+ struct uac_mixer_unit_descriptor *desc,
int in_pin, int in_ch, int unitid,
struct usb_audio_term *iterm)
{
struct usb_mixer_elem_info *cval;
- unsigned int input_pins = desc[4];
- unsigned int num_outs = desc[5 + input_pins];
+ unsigned int num_outs = uac_mixer_unit_bNrChannels(desc);
unsigned int i, len;
struct snd_kcontrol *kctl;
const struct usbmix_name_map *map;
@@ -1178,7 +1243,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state, unsigned char *desc,
cval->control = in_ch + 1; /* based on 1 */
cval->val_type = USB_MIXER_S16;
for (i = 0; i < num_outs; i++) {
- if (check_matrix_bitmap(desc + 9 + input_pins, in_ch, i, num_outs)) {
+ if (check_matrix_bitmap(uac_mixer_unit_bmControls(desc, state->mixer->protocol), in_ch, i, num_outs)) {
cval->cmask |= (1 << i);
cval->channels++;
}
@@ -1211,18 +1276,19 @@ static void build_mixer_unit_ctl(struct mixer_build *state, unsigned char *desc,
/*
* parse a mixer unit
*/
-static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, unsigned char *desc)
+static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, void *raw_desc)
{
+ struct uac_mixer_unit_descriptor *desc = raw_desc;
struct usb_audio_term iterm;
int input_pins, num_ins, num_outs;
int pin, ich, err;
- if (desc[0] < 11 || ! (input_pins = desc[4]) || ! (num_outs = desc[5 + input_pins])) {
+ if (desc->bLength < 11 || ! (input_pins = desc->bNrInPins) || ! (num_outs = uac_mixer_unit_bNrChannels(desc))) {
snd_printk(KERN_ERR "invalid MIXER UNIT descriptor %d\n", unitid);
return -EINVAL;
}
/* no bmControls field (e.g. Maya44) -> ignore */
- if (desc[0] <= 10 + input_pins) {
+ if (desc->bLength <= 10 + input_pins) {
snd_printdd(KERN_INFO "MU %d has no bmControls field\n", unitid);
return 0;
}
@@ -1230,10 +1296,10 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, unsigne
num_ins = 0;
ich = 0;
for (pin = 0; pin < input_pins; pin++) {
- err = parse_audio_unit(state, desc[5 + pin]);
+ err = parse_audio_unit(state, desc->baSourceID[pin]);
if (err < 0)
return err;
- err = check_input_term(state, desc[5 + pin], &iterm);
+ err = check_input_term(state, desc->baSourceID[pin], &iterm);
if (err < 0)
return err;
num_ins += iterm.channels;
@@ -1241,7 +1307,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, unsigne
int och, ich_has_controls = 0;
for (och = 0; och < num_outs; ++och) {
- if (check_matrix_bitmap(desc + 9 + input_pins,
+ if (check_matrix_bitmap(uac_mixer_unit_bmControls(desc, state->mixer->protocol),
ich, och, num_outs)) {
ich_has_controls = 1;
break;
@@ -1377,8 +1443,8 @@ static struct procunit_info procunits[] = {
* predefined data for extension units
*/
static struct procunit_value_info clock_rate_xu_info[] = {
- { USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 },
- { 0 }
+ { USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 },
+ { 0 }
};
static struct procunit_value_info clock_source_xu_info[] = {
{ USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN },
@@ -1402,9 +1468,10 @@ static struct procunit_info extunits[] = {
/*
* build a processing/extension unit
*/
-static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned char *dsc, struct procunit_info *list, char *name)
+static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw_desc, struct procunit_info *list, char *name)
{
- int num_ins = dsc[6];
+ struct uac_processing_unit_descriptor *desc = raw_desc;
+ int num_ins = desc->bNrInPins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
@@ -1419,17 +1486,18 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned
0, NULL, default_value_info
};
- if (dsc[0] < 13 || dsc[0] < 13 + num_ins || dsc[0] < num_ins + dsc[11 + num_ins]) {
+ if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
+ desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
snd_printk(KERN_ERR "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
}
for (i = 0; i < num_ins; i++) {
- if ((err = parse_audio_unit(state, dsc[7 + i])) < 0)
+ if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0)
return err;
}
- type = combine_word(&dsc[4]);
+ type = le16_to_cpu(desc->wProcessType);
for (info = list; info && info->type; info++)
if (info->type == type)
break;
@@ -1437,8 +1505,9 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned
info = &default_info;
for (valinfo = info->values; valinfo->control; valinfo++) {
- /* FIXME: bitmap might be longer than 8bit */
- if (! (dsc[12 + num_ins] & (1 << (valinfo->control - 1))))
+ __u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol);
+
+ if (! (controls[valinfo->control / 8] & (1 << ((valinfo->control % 8) - 1))))
continue;
map = find_map(state, unitid, valinfo->control);
if (check_ignored_ctl(map))
@@ -1456,9 +1525,10 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned
/* get min/max values */
if (type == USB_PROC_UPDOWN && cval->control == USB_PROC_UPDOWN_MODE_SEL) {
+ __u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol);
/* FIXME: hard-coded */
cval->min = 1;
- cval->max = dsc[15];
+ cval->max = control_spec[0];
cval->res = 1;
cval->initialized = 1;
} else {
@@ -1488,7 +1558,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned
else if (info->name)
strlcpy(kctl->id.name, info->name, sizeof(kctl->id.name));
else {
- nameid = dsc[12 + num_ins + dsc[11 + num_ins]];
+ nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol);
len = 0;
if (nameid)
len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name));
@@ -1507,14 +1577,16 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned
}
-static int parse_audio_processing_unit(struct mixer_build *state, int unitid, unsigned char *desc)
+static int parse_audio_processing_unit(struct mixer_build *state, int unitid, void *raw_desc)
{
- return build_audio_procunit(state, unitid, desc, procunits, "Processing Unit");
+ return build_audio_procunit(state, unitid, raw_desc, procunits, "Processing Unit");
}
-static int parse_audio_extension_unit(struct mixer_build *state, int unitid, unsigned char *desc)
+static int parse_audio_extension_unit(struct mixer_build *state, int unitid, void *raw_desc)
{
- return build_audio_procunit(state, unitid, desc, extunits, "Extension Unit");
+ /* Note that we parse extension units with processing unit descriptors.
+ * That's ok as the layout is the same */
+ return build_audio_procunit(state, unitid, raw_desc, extunits, "Extension Unit");
}
@@ -1616,9 +1688,9 @@ static void usb_mixer_selector_elem_free(struct snd_kcontrol *kctl)
/*
* parse a selector unit
*/
-static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsigned char *desc)
+static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void *raw_desc)
{
- unsigned int num_ins = desc[4];
+ struct uac_selector_unit_descriptor *desc = raw_desc;
unsigned int i, nameid, len;
int err;
struct usb_mixer_elem_info *cval;
@@ -1626,17 +1698,17 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi
const struct usbmix_name_map *map;
char **namelist;
- if (! num_ins || desc[0] < 5 + num_ins) {
+ if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
snd_printk(KERN_ERR "invalid SELECTOR UNIT descriptor %d\n", unitid);
return -EINVAL;
}
- for (i = 0; i < num_ins; i++) {
- if ((err = parse_audio_unit(state, desc[5 + i])) < 0)
+ for (i = 0; i < desc->bNrInPins; i++) {
+ if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0)
return err;
}
- if (num_ins == 1) /* only one ? nonsense! */
+ if (desc->bNrInPins == 1) /* only one ? nonsense! */
return 0;
map = find_map(state, unitid, 0);
@@ -1653,18 +1725,18 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi
cval->val_type = USB_MIXER_U8;
cval->channels = 1;
cval->min = 1;
- cval->max = num_ins;
+ cval->max = desc->bNrInPins;
cval->res = 1;
cval->initialized = 1;
- namelist = kmalloc(sizeof(char *) * num_ins, GFP_KERNEL);
+ namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL);
if (! namelist) {
snd_printk(KERN_ERR "cannot malloc\n");
kfree(cval);
return -ENOMEM;
}
#define MAX_ITEM_NAME_LEN 64
- for (i = 0; i < num_ins; i++) {
+ for (i = 0; i < desc->bNrInPins; i++) {
struct usb_audio_term iterm;
len = 0;
namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL);
@@ -1678,7 +1750,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi
}
len = check_mapped_selector_name(state, unitid, i, namelist[i],
MAX_ITEM_NAME_LEN);
- if (! len && check_input_term(state, desc[5 + i], &iterm) >= 0)
+ if (! len && check_input_term(state, desc->baSourceID[i], &iterm) >= 0)
len = get_term_name(state, &iterm, namelist[i], MAX_ITEM_NAME_LEN, 0);
if (! len)
sprintf(namelist[i], "Input %d", i);
@@ -1694,7 +1766,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi
kctl->private_value = (unsigned long)namelist;
kctl->private_free = usb_mixer_selector_elem_free;
- nameid = desc[desc[0] - 1];
+ nameid = uac_selector_unit_iSelector(desc);
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (len)
;
@@ -1713,7 +1785,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi
}
snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
- cval->id, kctl->id.name, num_ins);
+ cval->id, kctl->id.name, desc->bNrInPins);
if ((err = add_control_to_empty(state, kctl)) < 0)
return err;
@@ -1748,9 +1820,17 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
case UAC_FEATURE_UNIT:
return parse_audio_feature_unit(state, unitid, p1);
case UAC_PROCESSING_UNIT_V1:
- return parse_audio_processing_unit(state, unitid, p1);
+ /* UAC2_EFFECT_UNIT has the same value */
+ if (state->mixer->protocol == UAC_VERSION_1)
+ return parse_audio_processing_unit(state, unitid, p1);
+ else
+ return 0; /* FIXME - effect units not implemented yet */
case UAC_EXTENSION_UNIT_V1:
- return parse_audio_extension_unit(state, unitid, p1);
+ /* UAC2_PROCESSING_UNIT_V2 has the same value */
+ if (state->mixer->protocol == UAC_VERSION_1)
+ return parse_audio_extension_unit(state, unitid, p1);
+ else /* UAC_VERSION_2 */
+ return parse_audio_processing_unit(state, unitid, p1);
default:
snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
return -EINVAL;
@@ -1783,11 +1863,11 @@ static int snd_usb_mixer_dev_free(struct snd_device *device)
*/
static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
{
- struct uac_output_terminal_descriptor_v1 *desc;
struct mixer_build state;
int err;
const struct usbmix_ctl_map *map;
struct usb_host_interface *hostif;
+ void *p;
hostif = &usb_ifnum_to_if(mixer->chip->dev, mixer->ctrlif)->altsetting[0];
memset(&state, 0, sizeof(state));
@@ -1806,23 +1886,39 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
}
}
- desc = NULL;
- while ((desc = snd_usb_find_csint_desc(hostif->extra, hostif->extralen, desc, UAC_OUTPUT_TERMINAL)) != NULL) {
- if (desc->bLength < 9)
- continue; /* invalid descriptor? */
- set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */
- state.oterm.id = desc->bTerminalID;
- state.oterm.type = le16_to_cpu(desc->wTerminalType);
- state.oterm.name = desc->iTerminal;
- err = parse_audio_unit(&state, desc->bSourceID);
- if (err < 0)
- return err;
+ p = NULL;
+ while ((p = snd_usb_find_csint_desc(hostif->extra, hostif->extralen, p, UAC_OUTPUT_TERMINAL)) != NULL) {
+ if (mixer->protocol == UAC_VERSION_1) {
+ struct uac_output_terminal_descriptor_v1 *desc = p;
+
+ if (desc->bLength < sizeof(*desc))
+ continue; /* invalid descriptor? */
+ set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */
+ state.oterm.id = desc->bTerminalID;
+ state.oterm.type = le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+ if (err < 0)
+ return err;
+ } else { /* UAC_VERSION_2 */
+ struct uac2_output_terminal_descriptor *desc = p;
+
+ if (desc->bLength < sizeof(*desc))
+ continue; /* invalid descriptor? */
+ set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */
+ state.oterm.id = desc->bTerminalID;
+ state.oterm.type = le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+ if (err < 0)
+ return err;
+ }
}
+
return 0;
}
-static void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer,
- int unitid)
+void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
{
struct usb_mixer_elem_info *info;
@@ -1871,54 +1967,98 @@ static void snd_usb_mixer_proc_read(struct snd_info_entry *entry,
}
}
-static void snd_usb_mixer_memory_change(struct usb_mixer_interface *mixer,
- int unitid)
+static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
+ int attribute, int value, int index)
{
- if (!mixer->rc_cfg)
+ struct usb_mixer_elem_info *info;
+ __u8 unitid = (index >> 8) & 0xff;
+ __u8 control = (value >> 8) & 0xff;
+ __u8 channel = value & 0xff;
+
+ if (channel >= MAX_CHANNELS) {
+ snd_printk(KERN_DEBUG "%s(): bogus channel number %d\n",
+ __func__, channel);
return;
- /* unit ids specific to Extigy/Audigy 2 NX: */
- switch (unitid) {
- case 0: /* remote control */
- mixer->rc_urb->dev = mixer->chip->dev;
- usb_submit_urb(mixer->rc_urb, GFP_ATOMIC);
- break;
- case 4: /* digital in jack */
- case 7: /* line in jacks */
- case 19: /* speaker out jacks */
- case 20: /* headphones out jack */
- break;
- /* live24ext: 4 = line-in jack */
- case 3: /* hp-out jack (may actuate Mute) */
- if (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
- mixer->chip->usb_id == USB_ID(0x041e, 0x3048))
- snd_usb_mixer_notify_id(mixer, mixer->rc_cfg->mute_mixer_id);
- break;
- default:
- snd_printd(KERN_DEBUG "memory change in unknown unit %d\n", unitid);
- break;
+ }
+
+ for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem) {
+ if (info->control != control)
+ continue;
+
+ switch (attribute) {
+ case UAC2_CS_CUR:
+ /* invalidate cache, so the value is read from the device */
+ if (channel)
+ info->cached &= ~(1 << channel);
+ else /* master channel */
+ info->cached = 0;
+
+ snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+ info->elem_id);
+ break;
+
+ case UAC2_CS_RANGE:
+ /* TODO */
+ break;
+
+ case UAC2_CS_MEM:
+ /* TODO */
+ break;
+
+ default:
+ snd_printk(KERN_DEBUG "unknown attribute %d in interrupt\n",
+ attribute);
+ break;
+ } /* switch */
}
}
-static void snd_usb_mixer_status_complete(struct urb *urb)
+static void snd_usb_mixer_interrupt(struct urb *urb)
{
struct usb_mixer_interface *mixer = urb->context;
+ int len = urb->actual_length;
+
+ if (urb->status != 0)
+ goto requeue;
- if (urb->status == 0) {
- u8 *buf = urb->transfer_buffer;
- int i;
+ if (mixer->protocol == UAC_VERSION_1) {
+ struct uac1_status_word *status;
- for (i = urb->actual_length; i >= 2; buf += 2, i -= 2) {
+ for (status = urb->transfer_buffer;
+ len >= sizeof(*status);
+ len -= sizeof(*status), status++) {
snd_printd(KERN_DEBUG "status interrupt: %02x %02x\n",
- buf[0], buf[1]);
+ status->bStatusType,
+ status->bOriginator);
+
/* ignore any notifications not from the control interface */
- if ((buf[0] & 0x0f) != 0)
+ if ((status->bStatusType & UAC1_STATUS_TYPE_ORIG_MASK) !=
+ UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF)
continue;
- if (!(buf[0] & 0x40))
- snd_usb_mixer_notify_id(mixer, buf[1]);
+
+ if (status->bStatusType & UAC1_STATUS_TYPE_MEM_CHANGED)
+ snd_usb_mixer_rc_memory_change(mixer, status->bOriginator);
else
- snd_usb_mixer_memory_change(mixer, buf[1]);
+ snd_usb_mixer_notify_id(mixer, status->bOriginator);
+ }
+ } else { /* UAC_VERSION_2 */
+ struct uac2_interrupt_data_msg *msg;
+
+ for (msg = urb->transfer_buffer;
+ len >= sizeof(*msg);
+ len -= sizeof(*msg), msg++) {
+ /* drop vendor specific and endpoint requests */
+ if ((msg->bInfo & UAC2_INTERRUPT_DATA_MSG_VENDOR) ||
+ (msg->bInfo & UAC2_INTERRUPT_DATA_MSG_EP))
+ continue;
+
+ snd_usb_mixer_interrupt_v2(mixer, msg->bAttribute,
+ le16_to_cpu(msg->wValue),
+ le16_to_cpu(msg->wIndex));
}
}
+
+requeue:
if (urb->status != -ENOENT && urb->status != -ECONNRESET) {
urb->dev = mixer->chip->dev;
usb_submit_urb(urb, GFP_ATOMIC);
@@ -1955,301 +2095,11 @@ static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer)
usb_fill_int_urb(mixer->urb, mixer->chip->dev,
usb_rcvintpipe(mixer->chip->dev, epnum),
transfer_buffer, buffer_length,
- snd_usb_mixer_status_complete, mixer, ep->bInterval);
+ snd_usb_mixer_interrupt, mixer, ep->bInterval);
usb_submit_urb(mixer->urb, GFP_KERNEL);
return 0;
}
-static void snd_usb_soundblaster_remote_complete(struct urb *urb)
-{
- struct usb_mixer_interface *mixer = urb->context;
- const struct rc_config *rc = mixer->rc_cfg;
- u32 code;
-
- if (urb->status < 0 || urb->actual_length < rc->min_packet_length)
- return;
-
- code = mixer->rc_buffer[rc->offset];
- if (rc->length == 2)
- code |= mixer->rc_buffer[rc->offset + 1] << 8;
-
- /* the Mute button actually changes the mixer control */
- if (code == rc->mute_code)
- snd_usb_mixer_notify_id(mixer, rc->mute_mixer_id);
- mixer->rc_code = code;
- wmb();
- wake_up(&mixer->rc_waitq);
-}
-
-static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf,
- long count, loff_t *offset)
-{
- struct usb_mixer_interface *mixer = hw->private_data;
- int err;
- u32 rc_code;
-
- if (count != 1 && count != 4)
- return -EINVAL;
- err = wait_event_interruptible(mixer->rc_waitq,
- (rc_code = xchg(&mixer->rc_code, 0)) != 0);
- if (err == 0) {
- if (count == 1)
- err = put_user(rc_code, buf);
- else
- err = put_user(rc_code, (u32 __user *)buf);
- }
- return err < 0 ? err : count;
-}
-
-static unsigned int snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file,
- poll_table *wait)
-{
- struct usb_mixer_interface *mixer = hw->private_data;
-
- poll_wait(file, &mixer->rc_waitq, wait);
- return mixer->rc_code ? POLLIN | POLLRDNORM : 0;
-}
-
-static int snd_usb_soundblaster_remote_init(struct usb_mixer_interface *mixer)
-{
- struct snd_hwdep *hwdep;
- int err, len, i;
-
- for (i = 0; i < ARRAY_SIZE(rc_configs); ++i)
- if (rc_configs[i].usb_id == mixer->chip->usb_id)
- break;
- if (i >= ARRAY_SIZE(rc_configs))
- return 0;
- mixer->rc_cfg = &rc_configs[i];
-
- len = mixer->rc_cfg->packet_length;
-
- init_waitqueue_head(&mixer->rc_waitq);
- err = snd_hwdep_new(mixer->chip->card, "SB remote control", 0, &hwdep);
- if (err < 0)
- return err;
- snprintf(hwdep->name, sizeof(hwdep->name),
- "%s remote control", mixer->chip->card->shortname);
- hwdep->iface = SNDRV_HWDEP_IFACE_SB_RC;
- hwdep->private_data = mixer;
- hwdep->ops.read = snd_usb_sbrc_hwdep_read;
- hwdep->ops.poll = snd_usb_sbrc_hwdep_poll;
- hwdep->exclusive = 1;
-
- mixer->rc_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!mixer->rc_urb)
- return -ENOMEM;
- mixer->rc_setup_packet = kmalloc(sizeof(*mixer->rc_setup_packet), GFP_KERNEL);
- if (!mixer->rc_setup_packet) {
- usb_free_urb(mixer->rc_urb);
- mixer->rc_urb = NULL;
- return -ENOMEM;
- }
- mixer->rc_setup_packet->bRequestType =
- USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
- mixer->rc_setup_packet->bRequest = UAC_GET_MEM;
- mixer->rc_setup_packet->wValue = cpu_to_le16(0);
- mixer->rc_setup_packet->wIndex = cpu_to_le16(0);
- mixer->rc_setup_packet->wLength = cpu_to_le16(len);
- usb_fill_control_urb(mixer->rc_urb, mixer->chip->dev,
- usb_rcvctrlpipe(mixer->chip->dev, 0),
- (u8*)mixer->rc_setup_packet, mixer->rc_buffer, len,
- snd_usb_soundblaster_remote_complete, mixer);
- return 0;
-}
-
-#define snd_audigy2nx_led_info snd_ctl_boolean_mono_info
-
-static int snd_audigy2nx_led_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
- int index = kcontrol->private_value;
-
- ucontrol->value.integer.value[0] = mixer->audigy2nx_leds[index];
- return 0;
-}
-
-static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
- int index = kcontrol->private_value;
- int value = ucontrol->value.integer.value[0];
- int err, changed;
-
- if (value > 1)
- return -EINVAL;
- changed = value != mixer->audigy2nx_leds[index];
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
- value, index + 2, NULL, 0, 100);
- if (err < 0)
- return err;
- mixer->audigy2nx_leds[index] = value;
- return changed;
-}
-
-static struct snd_kcontrol_new snd_audigy2nx_controls[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "CMSS LED Switch",
- .info = snd_audigy2nx_led_info,
- .get = snd_audigy2nx_led_get,
- .put = snd_audigy2nx_led_put,
- .private_value = 0,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Power LED Switch",
- .info = snd_audigy2nx_led_info,
- .get = snd_audigy2nx_led_get,
- .put = snd_audigy2nx_led_put,
- .private_value = 1,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Dolby Digital LED Switch",
- .info = snd_audigy2nx_led_info,
- .get = snd_audigy2nx_led_get,
- .put = snd_audigy2nx_led_put,
- .private_value = 2,
- },
-};
-
-static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer)
-{
- int i, err;
-
- for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) {
- if (i > 1 && /* Live24ext has 2 LEDs only */
- (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
- mixer->chip->usb_id == USB_ID(0x041e, 0x3048)))
- break;
- err = snd_ctl_add(mixer->chip->card,
- snd_ctl_new1(&snd_audigy2nx_controls[i], mixer));
- if (err < 0)
- return err;
- }
- mixer->audigy2nx_leds[1] = 1; /* Power LED is on by default */
- return 0;
-}
-
-static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
-{
- static const struct sb_jack {
- int unitid;
- const char *name;
- } jacks_audigy2nx[] = {
- {4, "dig in "},
- {7, "line in"},
- {19, "spk out"},
- {20, "hph out"},
- {-1, NULL}
- }, jacks_live24ext[] = {
- {4, "line in"}, /* &1=Line, &2=Mic*/
- {3, "hph out"}, /* headphones */
- {0, "RC "}, /* last command, 6 bytes see rc_config above */
- {-1, NULL}
- };
- const struct sb_jack *jacks;
- struct usb_mixer_interface *mixer = entry->private_data;
- int i, err;
- u8 buf[3];
-
- snd_iprintf(buffer, "%s jacks\n\n", mixer->chip->card->shortname);
- if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020))
- jacks = jacks_audigy2nx;
- else if (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
- mixer->chip->usb_id == USB_ID(0x041e, 0x3048))
- jacks = jacks_live24ext;
- else
- return;
-
- for (i = 0; jacks[i].name; ++i) {
- snd_iprintf(buffer, "%s: ", jacks[i].name);
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_rcvctrlpipe(mixer->chip->dev, 0),
- UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
- USB_RECIP_INTERFACE, 0,
- jacks[i].unitid << 8, buf, 3, 100);
- if (err == 3 && (buf[0] == 3 || buf[0] == 6))
- snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
- else
- snd_iprintf(buffer, "?\n");
- }
-}
-
-static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
-
- ucontrol->value.integer.value[0] = !!(mixer->xonar_u1_status & 0x02);
- return 0;
-}
-
-static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
- u8 old_status, new_status;
- int err, changed;
-
- old_status = mixer->xonar_u1_status;
- if (ucontrol->value.integer.value[0])
- new_status = old_status | 0x02;
- else
- new_status = old_status & ~0x02;
- changed = new_status != old_status;
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0), 0x08,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
- 50, 0, &new_status, 1, 100);
- if (err < 0)
- return err;
- mixer->xonar_u1_status = new_status;
- return changed;
-}
-
-static struct snd_kcontrol_new snd_xonar_u1_output_switch = {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Digital Playback Switch",
- .info = snd_ctl_boolean_mono_info,
- .get = snd_xonar_u1_switch_get,
- .put = snd_xonar_u1_switch_put,
-};
-
-static int snd_xonar_u1_controls_create(struct usb_mixer_interface *mixer)
-{
- int err;
-
- err = snd_ctl_add(mixer->chip->card,
- snd_ctl_new1(&snd_xonar_u1_output_switch, mixer));
- if (err < 0)
- return err;
- mixer->xonar_u1_status = 0x05;
- return 0;
-}
-
-void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
- unsigned char samplerate_id)
-{
- struct usb_mixer_interface *mixer;
- struct usb_mixer_elem_info *cval;
- int unitid = 12; /* SamleRate ExtensionUnit ID */
-
- list_for_each_entry(mixer, &chip->mixer_list, list) {
- cval = mixer->id_elems[unitid];
- if (cval) {
- set_cur_ctl_value(cval, cval->control << 8,
- samplerate_id);
- snd_usb_mixer_notify_id(mixer, unitid);
- }
- break;
- }
-}
-
int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
int ignore_error)
{
@@ -2259,7 +2109,7 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
struct usb_mixer_interface *mixer;
struct snd_info_entry *entry;
struct usb_host_interface *host_iface;
- int err, protocol;
+ int err;
strcpy(chip->card->mixername, "USB Mixer");
@@ -2277,38 +2127,13 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
}
host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
- protocol = host_iface->desc.bInterfaceProtocol;
-
- /* FIXME! */
- if (protocol != UAC_VERSION_1) {
- snd_printk(KERN_WARNING "mixer interface protocol 0x%02x not yet supported\n",
- protocol);
- return 0;
- }
+ mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol;
if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
(err = snd_usb_mixer_status_create(mixer)) < 0)
goto _error;
- if ((err = snd_usb_soundblaster_remote_init(mixer)) < 0)
- goto _error;
-
- if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) ||
- mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
- mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) {
- if ((err = snd_audigy2nx_controls_create(mixer)) < 0)
- goto _error;
- if (!snd_card_proc_new(chip->card, "audigy2nx", &entry))
- snd_info_set_text_ops(entry, mixer,
- snd_audigy2nx_proc_read);
- }
-
- if (mixer->chip->usb_id == USB_ID(0x0b05, 0x1739) ||
- mixer->chip->usb_id == USB_ID(0x0b05, 0x1743)) {
- err = snd_xonar_u1_controls_create(mixer);
- if (err < 0)
- goto _error;
- }
+ snd_usb_mixer_apply_create_quirk(mixer);
err = snd_device_new(chip->card, SNDRV_DEV_LOWLEVEL, mixer, &dev_ops);
if (err < 0)
@@ -2329,7 +2154,7 @@ _error:
void snd_usb_mixer_disconnect(struct list_head *p)
{
struct usb_mixer_interface *mixer;
-
+
mixer = list_entry(p, struct usb_mixer_interface, list);
usb_kill_urb(mixer->urb);
usb_kill_urb(mixer->rc_urb);
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
new file mode 100644
index 000000000000..130123854a6c
--- /dev/null
+++ b/sound/usb/mixer.h
@@ -0,0 +1,55 @@
+#ifndef __USBMIXER_H
+#define __USBMIXER_H
+
+struct usb_mixer_interface {
+ struct snd_usb_audio *chip;
+ unsigned int ctrlif;
+ struct list_head list;
+ unsigned int ignore_ctl_error;
+ struct urb *urb;
+ /* array[MAX_ID_ELEMS], indexed by unit id */
+ struct usb_mixer_elem_info **id_elems;
+
+ /* the usb audio specification version this interface complies to */
+ int protocol;
+
+ /* Sound Blaster remote control stuff */
+ const struct rc_config *rc_cfg;
+ u32 rc_code;
+ wait_queue_head_t rc_waitq;
+ struct urb *rc_urb;
+ struct usb_ctrlrequest *rc_setup_packet;
+ u8 rc_buffer[6];
+
+ u8 audigy2nx_leds[3];
+ u8 xonar_u1_status;
+};
+
+#define MAX_CHANNELS 10 /* max logical channels */
+
+struct usb_mixer_elem_info {
+ struct usb_mixer_interface *mixer;
+ struct usb_mixer_elem_info *next_id_elem; /* list of controls with same id */
+ struct snd_ctl_elem_id *elem_id;
+ unsigned int id;
+ unsigned int control; /* CS or ICN (high byte) */
+ unsigned int cmask; /* channel mask bitmap: 0 = master */
+ int channels;
+ int val_type;
+ int min, max, res;
+ int dBmin, dBmax;
+ int cached;
+ int cache_val[MAX_CHANNELS];
+ u8 initialized;
+};
+
+int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
+ int ignore_error);
+void snd_usb_mixer_disconnect(struct list_head *p);
+
+void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
+
+int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ int request, int validx, int value_set);
+
+#endif /* __USBMIXER_H */
diff --git a/sound/usb/usbmixer_maps.c b/sound/usb/mixer_maps.c
index 79e903a60862..d93fc89beba8 100644
--- a/sound/usb/usbmixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -85,8 +85,8 @@ static struct usbmix_name_map extigy_map[] = {
/* 16: MU (w/o controls) */
{ 17, NULL, 1 }, /* DISABLED: PU-switch (any effect?) */
{ 17, "Channel Routing", 2 }, /* PU: mode select */
- { 18, "Tone Control - Bass", USB_FEATURE_BASS }, /* FU */
- { 18, "Tone Control - Treble", USB_FEATURE_TREBLE }, /* FU */
+ { 18, "Tone Control - Bass", UAC_BASS_CONTROL }, /* FU */
+ { 18, "Tone Control - Treble", UAC_TREBLE_CONTROL }, /* FU */
{ 18, "Master Playback" }, /* FU; others */
/* 19: OT speaker */
/* 20: OT headphone */
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
new file mode 100644
index 000000000000..e7df1e5e3f2e
--- /dev/null
+++ b/sound/usb/mixer_quirks.c
@@ -0,0 +1,412 @@
+/*
+ * USB Audio Driver for ALSA
+ *
+ * Quirks and vendor-specific extensions for mixer interfaces
+ *
+ * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de>
+ *
+ * Many codes borrowed from audio.c by
+ * Alan Cox (alan@lxorguk.ukuu.org.uk)
+ * Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/hwdep.h>
+#include <sound/info.h>
+
+#include "usbaudio.h"
+#include "mixer.h"
+#include "mixer_quirks.h"
+#include "helper.h"
+
+/*
+ * Sound Blaster remote control configuration
+ *
+ * format of remote control data:
+ * Extigy: xx 00
+ * Audigy 2 NX: 06 80 xx 00 00 00
+ * Live! 24-bit: 06 80 xx yy 22 83
+ */
+static const struct rc_config {
+ u32 usb_id;
+ u8 offset;
+ u8 length;
+ u8 packet_length;
+ u8 min_packet_length; /* minimum accepted length of the URB result */
+ u8 mute_mixer_id;
+ u32 mute_code;
+} rc_configs[] = {
+ { USB_ID(0x041e, 0x3000), 0, 1, 2, 1, 18, 0x0013 }, /* Extigy */
+ { USB_ID(0x041e, 0x3020), 2, 1, 6, 6, 18, 0x0013 }, /* Audigy 2 NX */
+ { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
+ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
+};
+
+static void snd_usb_soundblaster_remote_complete(struct urb *urb)
+{
+ struct usb_mixer_interface *mixer = urb->context;
+ const struct rc_config *rc = mixer->rc_cfg;
+ u32 code;
+
+ if (urb->status < 0 || urb->actual_length < rc->min_packet_length)
+ return;
+
+ code = mixer->rc_buffer[rc->offset];
+ if (rc->length == 2)
+ code |= mixer->rc_buffer[rc->offset + 1] << 8;
+
+ /* the Mute button actually changes the mixer control */
+ if (code == rc->mute_code)
+ snd_usb_mixer_notify_id(mixer, rc->mute_mixer_id);
+ mixer->rc_code = code;
+ wmb();
+ wake_up(&mixer->rc_waitq);
+}
+
+static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf,
+ long count, loff_t *offset)
+{
+ struct usb_mixer_interface *mixer = hw->private_data;
+ int err;
+ u32 rc_code;
+
+ if (count != 1 && count != 4)
+ return -EINVAL;
+ err = wait_event_interruptible(mixer->rc_waitq,
+ (rc_code = xchg(&mixer->rc_code, 0)) != 0);
+ if (err == 0) {
+ if (count == 1)
+ err = put_user(rc_code, buf);
+ else
+ err = put_user(rc_code, (u32 __user *)buf);
+ }
+ return err < 0 ? err : count;
+}
+
+static unsigned int snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file,
+ poll_table *wait)
+{
+ struct usb_mixer_interface *mixer = hw->private_data;
+
+ poll_wait(file, &mixer->rc_waitq, wait);
+ return mixer->rc_code ? POLLIN | POLLRDNORM : 0;
+}
+
+static int snd_usb_soundblaster_remote_init(struct usb_mixer_interface *mixer)
+{
+ struct snd_hwdep *hwdep;
+ int err, len, i;
+
+ for (i = 0; i < ARRAY_SIZE(rc_configs); ++i)
+ if (rc_configs[i].usb_id == mixer->chip->usb_id)
+ break;
+ if (i >= ARRAY_SIZE(rc_configs))
+ return 0;
+ mixer->rc_cfg = &rc_configs[i];
+
+ len = mixer->rc_cfg->packet_length;
+
+ init_waitqueue_head(&mixer->rc_waitq);
+ err = snd_hwdep_new(mixer->chip->card, "SB remote control", 0, &hwdep);
+ if (err < 0)
+ return err;
+ snprintf(hwdep->name, sizeof(hwdep->name),
+ "%s remote control", mixer->chip->card->shortname);
+ hwdep->iface = SNDRV_HWDEP_IFACE_SB_RC;
+ hwdep->private_data = mixer;
+ hwdep->ops.read = snd_usb_sbrc_hwdep_read;
+ hwdep->ops.poll = snd_usb_sbrc_hwdep_poll;
+ hwdep->exclusive = 1;
+
+ mixer->rc_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!mixer->rc_urb)
+ return -ENOMEM;
+ mixer->rc_setup_packet = kmalloc(sizeof(*mixer->rc_setup_packet), GFP_KERNEL);
+ if (!mixer->rc_setup_packet) {
+ usb_free_urb(mixer->rc_urb);
+ mixer->rc_urb = NULL;
+ return -ENOMEM;
+ }
+ mixer->rc_setup_packet->bRequestType =
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ mixer->rc_setup_packet->bRequest = UAC_GET_MEM;
+ mixer->rc_setup_packet->wValue = cpu_to_le16(0);
+ mixer->rc_setup_packet->wIndex = cpu_to_le16(0);
+ mixer->rc_setup_packet->wLength = cpu_to_le16(len);
+ usb_fill_control_urb(mixer->rc_urb, mixer->chip->dev,
+ usb_rcvctrlpipe(mixer->chip->dev, 0),
+ (u8*)mixer->rc_setup_packet, mixer->rc_buffer, len,
+ snd_usb_soundblaster_remote_complete, mixer);
+ return 0;
+}
+
+#define snd_audigy2nx_led_info snd_ctl_boolean_mono_info
+
+static int snd_audigy2nx_led_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ int index = kcontrol->private_value;
+
+ ucontrol->value.integer.value[0] = mixer->audigy2nx_leds[index];
+ return 0;
+}
+
+static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ int index = kcontrol->private_value;
+ int value = ucontrol->value.integer.value[0];
+ int err, changed;
+
+ if (value > 1)
+ return -EINVAL;
+ changed = value != mixer->audigy2nx_leds[index];
+ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ value, index + 2, NULL, 0, 100);
+ if (err < 0)
+ return err;
+ mixer->audigy2nx_leds[index] = value;
+ return changed;
+}
+
+static struct snd_kcontrol_new snd_audigy2nx_controls[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "CMSS LED Switch",
+ .info = snd_audigy2nx_led_info,
+ .get = snd_audigy2nx_led_get,
+ .put = snd_audigy2nx_led_put,
+ .private_value = 0,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Power LED Switch",
+ .info = snd_audigy2nx_led_info,
+ .get = snd_audigy2nx_led_get,
+ .put = snd_audigy2nx_led_put,
+ .private_value = 1,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Dolby Digital LED Switch",
+ .info = snd_audigy2nx_led_info,
+ .get = snd_audigy2nx_led_get,
+ .put = snd_audigy2nx_led_put,
+ .private_value = 2,
+ },
+};
+
+static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) {
+ if (i > 1 && /* Live24ext has 2 LEDs only */
+ (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+ mixer->chip->usb_id == USB_ID(0x041e, 0x3048)))
+ break;
+ err = snd_ctl_add(mixer->chip->card,
+ snd_ctl_new1(&snd_audigy2nx_controls[i], mixer));
+ if (err < 0)
+ return err;
+ }
+ mixer->audigy2nx_leds[1] = 1; /* Power LED is on by default */
+ return 0;
+}
+
+static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ static const struct sb_jack {
+ int unitid;
+ const char *name;
+ } jacks_audigy2nx[] = {
+ {4, "dig in "},
+ {7, "line in"},
+ {19, "spk out"},
+ {20, "hph out"},
+ {-1, NULL}
+ }, jacks_live24ext[] = {
+ {4, "line in"}, /* &1=Line, &2=Mic*/
+ {3, "hph out"}, /* headphones */
+ {0, "RC "}, /* last command, 6 bytes see rc_config above */
+ {-1, NULL}
+ };
+ const struct sb_jack *jacks;
+ struct usb_mixer_interface *mixer = entry->private_data;
+ int i, err;
+ u8 buf[3];
+
+ snd_iprintf(buffer, "%s jacks\n\n", mixer->chip->card->shortname);
+ if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020))
+ jacks = jacks_audigy2nx;
+ else if (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+ mixer->chip->usb_id == USB_ID(0x041e, 0x3048))
+ jacks = jacks_live24ext;
+ else
+ return;
+
+ for (i = 0; jacks[i].name; ++i) {
+ snd_iprintf(buffer, "%s: ", jacks[i].name);
+ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_rcvctrlpipe(mixer->chip->dev, 0),
+ UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE, 0,
+ jacks[i].unitid << 8, buf, 3, 100);
+ if (err == 3 && (buf[0] == 3 || buf[0] == 6))
+ snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
+ else
+ snd_iprintf(buffer, "?\n");
+ }
+}
+
+static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = !!(mixer->xonar_u1_status & 0x02);
+ return 0;
+}
+
+static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ u8 old_status, new_status;
+ int err, changed;
+
+ old_status = mixer->xonar_u1_status;
+ if (ucontrol->value.integer.value[0])
+ new_status = old_status | 0x02;
+ else
+ new_status = old_status & ~0x02;
+ changed = new_status != old_status;
+ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x08,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 50, 0, &new_status, 1, 100);
+ if (err < 0)
+ return err;
+ mixer->xonar_u1_status = new_status;
+ return changed;
+}
+
+static struct snd_kcontrol_new snd_xonar_u1_output_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Digital Playback Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = snd_xonar_u1_switch_get,
+ .put = snd_xonar_u1_switch_put,
+};
+
+static int snd_xonar_u1_controls_create(struct usb_mixer_interface *mixer)
+{
+ int err;
+
+ err = snd_ctl_add(mixer->chip->card,
+ snd_ctl_new1(&snd_xonar_u1_output_switch, mixer));
+ if (err < 0)
+ return err;
+ mixer->xonar_u1_status = 0x05;
+ return 0;
+}
+
+void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
+ unsigned char samplerate_id)
+{
+ struct usb_mixer_interface *mixer;
+ struct usb_mixer_elem_info *cval;
+ int unitid = 12; /* SamleRate ExtensionUnit ID */
+
+ list_for_each_entry(mixer, &chip->mixer_list, list) {
+ cval = mixer->id_elems[unitid];
+ if (cval) {
+ snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
+ cval->control << 8,
+ samplerate_id);
+ snd_usb_mixer_notify_id(mixer, unitid);
+ }
+ break;
+ }
+}
+
+int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+{
+ int err;
+ struct snd_info_entry *entry;
+
+ if ((err = snd_usb_soundblaster_remote_init(mixer)) < 0)
+ return err;
+
+ if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) ||
+ mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+ mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) {
+ if ((err = snd_audigy2nx_controls_create(mixer)) < 0)
+ return err;
+ if (!snd_card_proc_new(mixer->chip->card, "audigy2nx", &entry))
+ snd_info_set_text_ops(entry, mixer,
+ snd_audigy2nx_proc_read);
+ }
+
+ if (mixer->chip->usb_id == USB_ID(0x0b05, 0x1739) ||
+ mixer->chip->usb_id == USB_ID(0x0b05, 0x1743)) {
+ err = snd_xonar_u1_controls_create(mixer);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
+ int unitid)
+{
+ if (!mixer->rc_cfg)
+ return;
+ /* unit ids specific to Extigy/Audigy 2 NX: */
+ switch (unitid) {
+ case 0: /* remote control */
+ mixer->rc_urb->dev = mixer->chip->dev;
+ usb_submit_urb(mixer->rc_urb, GFP_ATOMIC);
+ break;
+ case 4: /* digital in jack */
+ case 7: /* line in jacks */
+ case 19: /* speaker out jacks */
+ case 20: /* headphones out jack */
+ break;
+ /* live24ext: 4 = line-in jack */
+ case 3: /* hp-out jack (may actuate Mute) */
+ if (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+ mixer->chip->usb_id == USB_ID(0x041e, 0x3048))
+ snd_usb_mixer_notify_id(mixer, mixer->rc_cfg->mute_mixer_id);
+ break;
+ default:
+ snd_printd(KERN_DEBUG "memory change in unknown unit %d\n", unitid);
+ break;
+ }
+}
+
diff --git a/sound/usb/mixer_quirks.h b/sound/usb/mixer_quirks.h
new file mode 100644
index 000000000000..bdbfab093816
--- /dev/null
+++ b/sound/usb/mixer_quirks.h
@@ -0,0 +1,13 @@
+#ifndef SND_USB_MIXER_QUIRKS_H
+#define SND_USB_MIXER_QUIRKS_H
+
+int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer);
+
+void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
+ unsigned char samplerate_id);
+
+void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
+ int unitid);
+
+#endif /* SND_USB_MIXER_QUIRKS_H */
+
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
new file mode 100644
index 000000000000..2bf0d77d1768
--- /dev/null
+++ b/sound/usb/pcm.c
@@ -0,0 +1,935 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "quirks.h"
+#include "debug.h"
+#include "urb.h"
+#include "helper.h"
+#include "pcm.h"
+
+/*
+ * return the current pcm pointer. just based on the hwptr_done value.
+ */
+static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_usb_substream *subs;
+ unsigned int hwptr_done;
+
+ subs = (struct snd_usb_substream *)substream->runtime->private_data;
+ spin_lock(&subs->lock);
+ hwptr_done = subs->hwptr_done;
+ spin_unlock(&subs->lock);
+ return hwptr_done / (substream->runtime->frame_bits >> 3);
+}
+
+/*
+ * find a matching audio format
+ */
+static struct audioformat *find_format(struct snd_usb_substream *subs, unsigned int format,
+ unsigned int rate, unsigned int channels)
+{
+ struct list_head *p;
+ struct audioformat *found = NULL;
+ int cur_attr = 0, attr;
+
+ list_for_each(p, &subs->fmt_list) {
+ struct audioformat *fp;
+ fp = list_entry(p, struct audioformat, list);
+ if (!(fp->formats & (1uLL << format)))
+ continue;
+ if (fp->channels != channels)
+ continue;
+ if (rate < fp->rate_min || rate > fp->rate_max)
+ continue;
+ if (! (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)) {
+ unsigned int i;
+ for (i = 0; i < fp->nr_rates; i++)
+ if (fp->rate_table[i] == rate)
+ break;
+ if (i >= fp->nr_rates)
+ continue;
+ }
+ attr = fp->ep_attr & USB_ENDPOINT_SYNCTYPE;
+ if (! found) {
+ found = fp;
+ cur_attr = attr;
+ continue;
+ }
+ /* avoid async out and adaptive in if the other method
+ * supports the same format.
+ * this is a workaround for the case like
+ * M-audio audiophile USB.
+ */
+ if (attr != cur_attr) {
+ if ((attr == USB_ENDPOINT_SYNC_ASYNC &&
+ subs->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
+ (attr == USB_ENDPOINT_SYNC_ADAPTIVE &&
+ subs->direction == SNDRV_PCM_STREAM_CAPTURE))
+ continue;
+ if ((cur_attr == USB_ENDPOINT_SYNC_ASYNC &&
+ subs->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
+ (cur_attr == USB_ENDPOINT_SYNC_ADAPTIVE &&
+ subs->direction == SNDRV_PCM_STREAM_CAPTURE)) {
+ found = fp;
+ cur_attr = attr;
+ continue;
+ }
+ }
+ /* find the format with the largest max. packet size */
+ if (fp->maxpacksize > found->maxpacksize) {
+ found = fp;
+ cur_attr = attr;
+ }
+ }
+ return found;
+}
+
+static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
+ struct usb_host_interface *alts,
+ struct audioformat *fmt)
+{
+ struct usb_device *dev = chip->dev;
+ unsigned int ep;
+ unsigned char data[1];
+ int err;
+
+ ep = get_endpoint(alts, 0)->bEndpointAddress;
+
+ /* if endpoint doesn't have pitch control, bail out */
+ if (!(fmt->attributes & UAC_EP_CS_ATTR_PITCH_CONTROL))
+ return 0;
+
+ data[0] = 1;
+ if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
+ USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT,
+ UAC_EP_CS_ATTR_PITCH_CONTROL << 8, ep,
+ data, sizeof(data), 1000)) < 0) {
+ snd_printk(KERN_ERR "%d:%d:%d: cannot set enable PITCH\n",
+ dev->devnum, iface, ep);
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * initialize the picth control and sample rate
+ */
+int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
+ struct usb_host_interface *alts,
+ struct audioformat *fmt)
+{
+ struct usb_interface_descriptor *altsd = get_iface_desc(alts);
+
+ switch (altsd->bInterfaceProtocol) {
+ case UAC_VERSION_1:
+ return init_pitch_v1(chip, iface, alts, fmt);
+
+ case UAC_VERSION_2:
+ /* not implemented yet */
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
+ struct usb_host_interface *alts,
+ struct audioformat *fmt, int rate)
+{
+ struct usb_device *dev = chip->dev;
+ unsigned int ep;
+ unsigned char data[3];
+ int err, crate;
+
+ ep = get_endpoint(alts, 0)->bEndpointAddress;
+ /* if endpoint doesn't have sampling rate control, bail out */
+ if (!(fmt->attributes & UAC_EP_CS_ATTR_SAMPLE_RATE)) {
+ snd_printk(KERN_WARNING "%d:%d:%d: endpoint lacks sample rate attribute bit, cannot set.\n",
+ dev->devnum, iface, fmt->altsetting);
+ return 0;
+ }
+
+ data[0] = rate;
+ data[1] = rate >> 8;
+ data[2] = rate >> 16;
+ if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
+ USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT,
+ UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
+ data, sizeof(data), 1000)) < 0) {
+ snd_printk(KERN_ERR "%d:%d:%d: cannot set freq %d to ep %#x\n",
+ dev->devnum, iface, fmt->altsetting, rate, ep);
+ return err;
+ }
+ if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
+ USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_IN,
+ UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
+ data, sizeof(data), 1000)) < 0) {
+ snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq at ep %#x\n",
+ dev->devnum, iface, fmt->altsetting, ep);
+ return 0; /* some devices don't support reading */
+ }
+ crate = data[0] | (data[1] << 8) | (data[2] << 16);
+ if (crate != rate) {
+ snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate);
+ // runtime->rate = crate;
+ }
+
+ return 0;
+}
+
+static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface,
+ struct usb_host_interface *alts,
+ struct audioformat *fmt, int rate)
+{
+ struct usb_device *dev = chip->dev;
+ unsigned char data[4];
+ int err, crate;
+
+ data[0] = rate;
+ data[1] = rate >> 8;
+ data[2] = rate >> 16;
+ data[3] = rate >> 24;
+ if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ UAC2_CS_CONTROL_SAM_FREQ << 8, chip->clock_id << 8,
+ data, sizeof(data), 1000)) < 0) {
+ snd_printk(KERN_ERR "%d:%d:%d: cannot set freq %d (v2)\n",
+ dev->devnum, iface, fmt->altsetting, rate);
+ return err;
+ }
+ if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ UAC2_CS_CONTROL_SAM_FREQ << 8, chip->clock_id << 8,
+ data, sizeof(data), 1000)) < 0) {
+ snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n",
+ dev->devnum, iface, fmt->altsetting);
+ return err;
+ }
+ crate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
+ if (crate != rate)
+ snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate);
+
+ return 0;
+}
+
+int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
+ struct usb_host_interface *alts,
+ struct audioformat *fmt, int rate)
+{
+ struct usb_interface_descriptor *altsd = get_iface_desc(alts);
+
+ switch (altsd->bInterfaceProtocol) {
+ case UAC_VERSION_1:
+ return set_sample_rate_v1(chip, iface, alts, fmt, rate);
+
+ case UAC_VERSION_2:
+ return set_sample_rate_v2(chip, iface, alts, fmt, rate);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * find a matching format and set up the interface
+ */
+static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
+{
+ struct usb_device *dev = subs->dev;
+ struct usb_host_interface *alts;
+ struct usb_interface_descriptor *altsd;
+ struct usb_interface *iface;
+ unsigned int ep, attr;
+ int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
+ int err;
+
+ iface = usb_ifnum_to_if(dev, fmt->iface);
+ if (WARN_ON(!iface))
+ return -EINVAL;
+ alts = &iface->altsetting[fmt->altset_idx];
+ altsd = get_iface_desc(alts);
+ if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting))
+ return -EINVAL;
+
+ if (fmt == subs->cur_audiofmt)
+ return 0;
+
+ /* close the old interface */
+ if (subs->interface >= 0 && subs->interface != fmt->iface) {
+ if (usb_set_interface(subs->dev, subs->interface, 0) < 0) {
+ snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed\n",
+ dev->devnum, fmt->iface, fmt->altsetting);
+ return -EIO;
+ }
+ subs->interface = -1;
+ subs->altset_idx = 0;
+ }
+
+ /* set interface */
+ if (subs->interface != fmt->iface || subs->altset_idx != fmt->altset_idx) {
+ if (usb_set_interface(dev, fmt->iface, fmt->altsetting) < 0) {
+ snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed\n",
+ dev->devnum, fmt->iface, fmt->altsetting);
+ return -EIO;
+ }
+ snd_printdd(KERN_INFO "setting usb interface %d:%d\n", fmt->iface, fmt->altsetting);
+ subs->interface = fmt->iface;
+ subs->altset_idx = fmt->altset_idx;
+ }
+
+ /* create a data pipe */
+ ep = fmt->endpoint & USB_ENDPOINT_NUMBER_MASK;
+ if (is_playback)
+ subs->datapipe = usb_sndisocpipe(dev, ep);
+ else
+ subs->datapipe = usb_rcvisocpipe(dev, ep);
+ subs->datainterval = fmt->datainterval;
+ subs->syncpipe = subs->syncinterval = 0;
+ subs->maxpacksize = fmt->maxpacksize;
+ subs->fill_max = 0;
+
+ /* we need a sync pipe in async OUT or adaptive IN mode */
+ /* check the number of EP, since some devices have broken
+ * descriptors which fool us. if it has only one EP,
+ * assume it as adaptive-out or sync-in.
+ */
+ attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
+ if (((is_playback && attr == USB_ENDPOINT_SYNC_ASYNC) ||
+ (! is_playback && attr == USB_ENDPOINT_SYNC_ADAPTIVE)) &&
+ altsd->bNumEndpoints >= 2) {
+ /* check sync-pipe endpoint */
+ /* ... and check descriptor size before accessing bSynchAddress
+ because there is a version of the SB Audigy 2 NX firmware lacking
+ the audio fields in the endpoint descriptors */
+ if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 0x01 ||
+ (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+ get_endpoint(alts, 1)->bSynchAddress != 0)) {
+ snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
+ dev->devnum, fmt->iface, fmt->altsetting);
+ return -EINVAL;
+ }
+ ep = get_endpoint(alts, 1)->bEndpointAddress;
+ if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+ (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
+ (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+ snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
+ dev->devnum, fmt->iface, fmt->altsetting);
+ return -EINVAL;
+ }
+ ep &= USB_ENDPOINT_NUMBER_MASK;
+ if (is_playback)
+ subs->syncpipe = usb_rcvisocpipe(dev, ep);
+ else
+ subs->syncpipe = usb_sndisocpipe(dev, ep);
+ if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+ get_endpoint(alts, 1)->bRefresh >= 1 &&
+ get_endpoint(alts, 1)->bRefresh <= 9)
+ subs->syncinterval = get_endpoint(alts, 1)->bRefresh;
+ else if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
+ subs->syncinterval = 1;
+ else if (get_endpoint(alts, 1)->bInterval >= 1 &&
+ get_endpoint(alts, 1)->bInterval <= 16)
+ subs->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
+ else
+ subs->syncinterval = 3;
+ }
+
+ /* always fill max packet size */
+ if (fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX)
+ subs->fill_max = 1;
+
+ if ((err = snd_usb_init_pitch(subs->stream->chip, subs->interface, alts, fmt)) < 0)
+ return err;
+
+ subs->cur_audiofmt = fmt;
+
+ snd_usb_set_format_quirk(subs, fmt);
+
+#if 0
+ printk(KERN_DEBUG
+ "setting done: format = %d, rate = %d..%d, channels = %d\n",
+ fmt->format, fmt->rate_min, fmt->rate_max, fmt->channels);
+ printk(KERN_DEBUG
+ " datapipe = 0x%0x, syncpipe = 0x%0x\n",
+ subs->datapipe, subs->syncpipe);
+#endif
+
+ return 0;
+}
+
+/*
+ * hw_params callback
+ *
+ * allocate a buffer and set the given audio format.
+ *
+ * so far we use a physically linear buffer although packetize transfer
+ * doesn't need a continuous area.
+ * if sg buffer is supported on the later version of alsa, we'll follow
+ * that.
+ */
+static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_usb_substream *subs = substream->runtime->private_data;
+ struct audioformat *fmt;
+ unsigned int channels, rate, format;
+ int ret, changed;
+
+ ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+ if (ret < 0)
+ return ret;
+
+ format = params_format(hw_params);
+ rate = params_rate(hw_params);
+ channels = params_channels(hw_params);
+ fmt = find_format(subs, format, rate, channels);
+ if (!fmt) {
+ snd_printd(KERN_DEBUG "cannot set format: format = %#x, rate = %d, channels = %d\n",
+ format, rate, channels);
+ return -EINVAL;
+ }
+
+ changed = subs->cur_audiofmt != fmt ||
+ subs->period_bytes != params_period_bytes(hw_params) ||
+ subs->cur_rate != rate;
+ if ((ret = set_format(subs, fmt)) < 0)
+ return ret;
+
+ if (subs->cur_rate != rate) {
+ struct usb_host_interface *alts;
+ struct usb_interface *iface;
+ iface = usb_ifnum_to_if(subs->dev, fmt->iface);
+ alts = &iface->altsetting[fmt->altset_idx];
+ ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate);
+ if (ret < 0)
+ return ret;
+ subs->cur_rate = rate;
+ }
+
+ if (changed) {
+ /* format changed */
+ snd_usb_release_substream_urbs(subs, 0);
+ /* influenced: period_bytes, channels, rate, format, */
+ ret = snd_usb_init_substream_urbs(subs, params_period_bytes(hw_params),
+ params_rate(hw_params),
+ snd_pcm_format_physical_width(params_format(hw_params)) *
+ params_channels(hw_params));
+ }
+
+ return ret;
+}
+
+/*
+ * hw_free callback
+ *
+ * reset the audio format and release the buffer
+ */
+static int snd_usb_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_usb_substream *subs = substream->runtime->private_data;
+
+ subs->cur_audiofmt = NULL;
+ subs->cur_rate = 0;
+ subs->period_bytes = 0;
+ if (!subs->stream->chip->shutdown)
+ snd_usb_release_substream_urbs(subs, 0);
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+/*
+ * prepare callback
+ *
+ * only a few subtle things...
+ */
+static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_usb_substream *subs = runtime->private_data;
+
+ if (! subs->cur_audiofmt) {
+ snd_printk(KERN_ERR "usbaudio: no format is specified!\n");
+ return -ENXIO;
+ }
+
+ /* some unit conversions in runtime */
+ subs->maxframesize = bytes_to_frames(runtime, subs->maxpacksize);
+ subs->curframesize = bytes_to_frames(runtime, subs->curpacksize);
+
+ /* reset the pointer */
+ subs->hwptr_done = 0;
+ subs->transfer_done = 0;
+ subs->phase = 0;
+ runtime->delay = 0;
+
+ return snd_usb_substream_prepare(subs, runtime);
+}
+
+static struct snd_pcm_hardware snd_usb_hardware =
+{
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_PAUSE,
+ .buffer_bytes_max = 1024 * 1024,
+ .period_bytes_min = 64,
+ .period_bytes_max = 512 * 1024,
+ .periods_min = 2,
+ .periods_max = 1024,
+};
+
+static int hw_check_valid_format(struct snd_usb_substream *subs,
+ struct snd_pcm_hw_params *params,
+ struct audioformat *fp)
+{
+ struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *ct = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmts = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_interval *pt = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME);
+ struct snd_mask check_fmts;
+ unsigned int ptime;
+
+ /* check the format */
+ snd_mask_none(&check_fmts);
+ check_fmts.bits[0] = (u32)fp->formats;
+ check_fmts.bits[1] = (u32)(fp->formats >> 32);
+ snd_mask_intersect(&check_fmts, fmts);
+ if (snd_mask_empty(&check_fmts)) {
+ hwc_debug(" > check: no supported format %d\n", fp->format);
+ return 0;
+ }
+ /* check the channels */
+ if (fp->channels < ct->min || fp->channels > ct->max) {
+ hwc_debug(" > check: no valid channels %d (%d/%d)\n", fp->channels, ct->min, ct->max);
+ return 0;
+ }
+ /* check the rate is within the range */
+ if (fp->rate_min > it->max || (fp->rate_min == it->max && it->openmax)) {
+ hwc_debug(" > check: rate_min %d > max %d\n", fp->rate_min, it->max);
+ return 0;
+ }
+ if (fp->rate_max < it->min || (fp->rate_max == it->min && it->openmin)) {
+ hwc_debug(" > check: rate_max %d < min %d\n", fp->rate_max, it->min);
+ return 0;
+ }
+ /* check whether the period time is >= the data packet interval */
+ if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH) {
+ ptime = 125 * (1 << fp->datainterval);
+ if (ptime > pt->max || (ptime == pt->max && pt->openmax)) {
+ hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ struct list_head *p;
+ struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ unsigned int rmin, rmax;
+ int changed;
+
+ hwc_debug("hw_rule_rate: (%d,%d)\n", it->min, it->max);
+ changed = 0;
+ rmin = rmax = 0;
+ list_for_each(p, &subs->fmt_list) {
+ struct audioformat *fp;
+ fp = list_entry(p, struct audioformat, list);
+ if (!hw_check_valid_format(subs, params, fp))
+ continue;
+ if (changed++) {
+ if (rmin > fp->rate_min)
+ rmin = fp->rate_min;
+ if (rmax < fp->rate_max)
+ rmax = fp->rate_max;
+ } else {
+ rmin = fp->rate_min;
+ rmax = fp->rate_max;
+ }
+ }
+
+ if (!changed) {
+ hwc_debug(" --> get empty\n");
+ it->empty = 1;
+ return -EINVAL;
+ }
+
+ changed = 0;
+ if (it->min < rmin) {
+ it->min = rmin;
+ it->openmin = 0;
+ changed = 1;
+ }
+ if (it->max > rmax) {
+ it->max = rmax;
+ it->openmax = 0;
+ changed = 1;
+ }
+ if (snd_interval_checkempty(it)) {
+ it->empty = 1;
+ return -EINVAL;
+ }
+ hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed);
+ return changed;
+}
+
+
+static int hw_rule_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ struct list_head *p;
+ struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ unsigned int rmin, rmax;
+ int changed;
+
+ hwc_debug("hw_rule_channels: (%d,%d)\n", it->min, it->max);
+ changed = 0;
+ rmin = rmax = 0;
+ list_for_each(p, &subs->fmt_list) {
+ struct audioformat *fp;
+ fp = list_entry(p, struct audioformat, list);
+ if (!hw_check_valid_format(subs, params, fp))
+ continue;
+ if (changed++) {
+ if (rmin > fp->channels)
+ rmin = fp->channels;
+ if (rmax < fp->channels)
+ rmax = fp->channels;
+ } else {
+ rmin = fp->channels;
+ rmax = fp->channels;
+ }
+ }
+
+ if (!changed) {
+ hwc_debug(" --> get empty\n");
+ it->empty = 1;
+ return -EINVAL;
+ }
+
+ changed = 0;
+ if (it->min < rmin) {
+ it->min = rmin;
+ it->openmin = 0;
+ changed = 1;
+ }
+ if (it->max > rmax) {
+ it->max = rmax;
+ it->openmax = 0;
+ changed = 1;
+ }
+ if (snd_interval_checkempty(it)) {
+ it->empty = 1;
+ return -EINVAL;
+ }
+ hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed);
+ return changed;
+}
+
+static int hw_rule_format(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ struct list_head *p;
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ u64 fbits;
+ u32 oldbits[2];
+ int changed;
+
+ hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
+ fbits = 0;
+ list_for_each(p, &subs->fmt_list) {
+ struct audioformat *fp;
+ fp = list_entry(p, struct audioformat, list);
+ if (!hw_check_valid_format(subs, params, fp))
+ continue;
+ fbits |= fp->formats;
+ }
+
+ oldbits[0] = fmt->bits[0];
+ oldbits[1] = fmt->bits[1];
+ fmt->bits[0] &= (u32)fbits;
+ fmt->bits[1] &= (u32)(fbits >> 32);
+ if (!fmt->bits[0] && !fmt->bits[1]) {
+ hwc_debug(" --> get empty\n");
+ return -EINVAL;
+ }
+ changed = (oldbits[0] != fmt->bits[0] || oldbits[1] != fmt->bits[1]);
+ hwc_debug(" --> %x:%x (changed = %d)\n", fmt->bits[0], fmt->bits[1], changed);
+ return changed;
+}
+
+static int hw_rule_period_time(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_usb_substream *subs = rule->private;
+ struct audioformat *fp;
+ struct snd_interval *it;
+ unsigned char min_datainterval;
+ unsigned int pmin;
+ int changed;
+
+ it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME);
+ hwc_debug("hw_rule_period_time: (%u,%u)\n", it->min, it->max);
+ min_datainterval = 0xff;
+ list_for_each_entry(fp, &subs->fmt_list, list) {
+ if (!hw_check_valid_format(subs, params, fp))
+ continue;
+ min_datainterval = min(min_datainterval, fp->datainterval);
+ }
+ if (min_datainterval == 0xff) {
+ hwc_debug(" --> get emtpy\n");
+ it->empty = 1;
+ return -EINVAL;
+ }
+ pmin = 125 * (1 << min_datainterval);
+ changed = 0;
+ if (it->min < pmin) {
+ it->min = pmin;
+ it->openmin = 0;
+ changed = 1;
+ }
+ if (snd_interval_checkempty(it)) {
+ it->empty = 1;
+ return -EINVAL;
+ }
+ hwc_debug(" --> (%u,%u) (changed = %d)\n", it->min, it->max, changed);
+ return changed;
+}
+
+/*
+ * If the device supports unusual bit rates, does the request meet these?
+ */
+static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
+ struct snd_usb_substream *subs)
+{
+ struct audioformat *fp;
+ int count = 0, needs_knot = 0;
+ int err;
+
+ list_for_each_entry(fp, &subs->fmt_list, list) {
+ if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
+ return 0;
+ count += fp->nr_rates;
+ if (fp->rates & SNDRV_PCM_RATE_KNOT)
+ needs_knot = 1;
+ }
+ if (!needs_knot)
+ return 0;
+
+ subs->rate_list.count = count;
+ subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL);
+ subs->rate_list.mask = 0;
+ count = 0;
+ list_for_each_entry(fp, &subs->fmt_list, list) {
+ int i;
+ for (i = 0; i < fp->nr_rates; i++)
+ subs->rate_list.list[count++] = fp->rate_table[i];
+ }
+ err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &subs->rate_list);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+
+/*
+ * set up the runtime hardware information.
+ */
+
+static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs)
+{
+ struct list_head *p;
+ unsigned int pt, ptmin;
+ int param_period_time_if_needed;
+ int err;
+
+ runtime->hw.formats = subs->formats;
+
+ runtime->hw.rate_min = 0x7fffffff;
+ runtime->hw.rate_max = 0;
+ runtime->hw.channels_min = 256;
+ runtime->hw.channels_max = 0;
+ runtime->hw.rates = 0;
+ ptmin = UINT_MAX;
+ /* check min/max rates and channels */
+ list_for_each(p, &subs->fmt_list) {
+ struct audioformat *fp;
+ fp = list_entry(p, struct audioformat, list);
+ runtime->hw.rates |= fp->rates;
+ if (runtime->hw.rate_min > fp->rate_min)
+ runtime->hw.rate_min = fp->rate_min;
+ if (runtime->hw.rate_max < fp->rate_max)
+ runtime->hw.rate_max = fp->rate_max;
+ if (runtime->hw.channels_min > fp->channels)
+ runtime->hw.channels_min = fp->channels;
+ if (runtime->hw.channels_max < fp->channels)
+ runtime->hw.channels_max = fp->channels;
+ if (fp->fmt_type == UAC_FORMAT_TYPE_II && fp->frame_size > 0) {
+ /* FIXME: there might be more than one audio formats... */
+ runtime->hw.period_bytes_min = runtime->hw.period_bytes_max =
+ fp->frame_size;
+ }
+ pt = 125 * (1 << fp->datainterval);
+ ptmin = min(ptmin, pt);
+ }
+
+ param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME;
+ if (snd_usb_get_speed(subs->dev) != USB_SPEED_HIGH)
+ /* full speed devices have fixed data packet interval */
+ ptmin = 1000;
+ if (ptmin == 1000)
+ /* if period time doesn't go below 1 ms, no rules needed */
+ param_period_time_if_needed = -1;
+ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+ ptmin, UINT_MAX);
+
+ if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ hw_rule_rate, subs,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ param_period_time_if_needed,
+ -1)) < 0)
+ return err;
+ if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ hw_rule_channels, subs,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ SNDRV_PCM_HW_PARAM_RATE,
+ param_period_time_if_needed,
+ -1)) < 0)
+ return err;
+ if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_format, subs,
+ SNDRV_PCM_HW_PARAM_RATE,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ param_period_time_if_needed,
+ -1)) < 0)
+ return err;
+ if (param_period_time_if_needed >= 0) {
+ err = snd_pcm_hw_rule_add(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+ hw_rule_period_time, subs,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ SNDRV_PCM_HW_PARAM_RATE,
+ -1);
+ if (err < 0)
+ return err;
+ }
+ if ((err = snd_usb_pcm_check_knot(runtime, subs)) < 0)
+ return err;
+ return 0;
+}
+
+static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
+{
+ struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_usb_substream *subs = &as->substream[direction];
+
+ subs->interface = -1;
+ subs->altset_idx = 0;
+ runtime->hw = snd_usb_hardware;
+ runtime->private_data = subs;
+ subs->pcm_substream = substream;
+ return setup_hw_info(runtime, subs);
+}
+
+static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
+{
+ struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
+ struct snd_usb_substream *subs = &as->substream[direction];
+
+ if (!as->chip->shutdown && subs->interface >= 0) {
+ usb_set_interface(subs->dev, subs->interface, 0);
+ subs->interface = -1;
+ }
+ subs->pcm_substream = NULL;
+ return 0;
+}
+
+static int snd_usb_playback_open(struct snd_pcm_substream *substream)
+{
+ return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_PLAYBACK);
+}
+
+static int snd_usb_playback_close(struct snd_pcm_substream *substream)
+{
+ return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_PLAYBACK);
+}
+
+static int snd_usb_capture_open(struct snd_pcm_substream *substream)
+{
+ return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_CAPTURE);
+}
+
+static int snd_usb_capture_close(struct snd_pcm_substream *substream)
+{
+ return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_CAPTURE);
+}
+
+static struct snd_pcm_ops snd_usb_playback_ops = {
+ .open = snd_usb_playback_open,
+ .close = snd_usb_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_usb_hw_params,
+ .hw_free = snd_usb_hw_free,
+ .prepare = snd_usb_pcm_prepare,
+ .trigger = snd_usb_substream_playback_trigger,
+ .pointer = snd_usb_pcm_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+};
+
+static struct snd_pcm_ops snd_usb_capture_ops = {
+ .open = snd_usb_capture_open,
+ .close = snd_usb_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_usb_hw_params,
+ .hw_free = snd_usb_hw_free,
+ .prepare = snd_usb_pcm_prepare,
+ .trigger = snd_usb_substream_capture_trigger,
+ .pointer = snd_usb_pcm_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+};
+
+void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream)
+{
+ snd_pcm_set_ops(pcm, stream,
+ stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ &snd_usb_playback_ops : &snd_usb_capture_ops);
+}
diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h
new file mode 100644
index 000000000000..1c931b68f3b5
--- /dev/null
+++ b/sound/usb/pcm.h
@@ -0,0 +1,14 @@
+#ifndef __USBAUDIO_PCM_H
+#define __USBAUDIO_PCM_H
+
+void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream);
+
+int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
+ struct usb_host_interface *alts,
+ struct audioformat *fmt);
+
+int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
+ struct usb_host_interface *alts,
+ struct audioformat *fmt, int rate);
+
+#endif /* __USBAUDIO_PCM_H */
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
new file mode 100644
index 000000000000..f5e3f356b95f
--- /dev/null
+++ b/sound/usb/proc.c
@@ -0,0 +1,168 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/usb.h>
+
+#include <sound/core.h>
+#include <sound/info.h>
+#include <sound/pcm.h>
+
+#include "usbaudio.h"
+#include "helper.h"
+#include "card.h"
+#include "proc.h"
+
+/* convert our full speed USB rate into sampling rate in Hz */
+static inline unsigned get_full_speed_hz(unsigned int usb_rate)
+{
+ return (usb_rate * 125 + (1 << 12)) >> 13;
+}
+
+/* convert our high speed USB rate into sampling rate in Hz */
+static inline unsigned get_high_speed_hz(unsigned int usb_rate)
+{
+ return (usb_rate * 125 + (1 << 9)) >> 10;
+}
+
+/*
+ * common proc files to show the usb device info
+ */
+static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
+{
+ struct snd_usb_audio *chip = entry->private_data;
+ if (!chip->shutdown)
+ snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum);
+}
+
+static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
+{
+ struct snd_usb_audio *chip = entry->private_data;
+ if (!chip->shutdown)
+ snd_iprintf(buffer, "%04x:%04x\n",
+ USB_ID_VENDOR(chip->usb_id),
+ USB_ID_PRODUCT(chip->usb_id));
+}
+
+void snd_usb_audio_create_proc(struct snd_usb_audio *chip)
+{
+ struct snd_info_entry *entry;
+ if (!snd_card_proc_new(chip->card, "usbbus", &entry))
+ snd_info_set_text_ops(entry, chip, proc_audio_usbbus_read);
+ if (!snd_card_proc_new(chip->card, "usbid", &entry))
+ snd_info_set_text_ops(entry, chip, proc_audio_usbid_read);
+}
+
+/*
+ * proc interface for list the supported pcm formats
+ */
+static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct snd_info_buffer *buffer)
+{
+ struct list_head *p;
+ static char *sync_types[4] = {
+ "NONE", "ASYNC", "ADAPTIVE", "SYNC"
+ };
+
+ list_for_each(p, &subs->fmt_list) {
+ struct audioformat *fp;
+ snd_pcm_format_t fmt;
+ fp = list_entry(p, struct audioformat, list);
+ snd_iprintf(buffer, " Interface %d\n", fp->iface);
+ snd_iprintf(buffer, " Altset %d\n", fp->altsetting);
+ snd_iprintf(buffer, " Format:");
+ for (fmt = 0; fmt <= SNDRV_PCM_FORMAT_LAST; ++fmt)
+ if (fp->formats & (1uLL << fmt))
+ snd_iprintf(buffer, " %s",
+ snd_pcm_format_name(fmt));
+ snd_iprintf(buffer, "\n");
+ snd_iprintf(buffer, " Channels: %d\n", fp->channels);
+ snd_iprintf(buffer, " Endpoint: %d %s (%s)\n",
+ fp->endpoint & USB_ENDPOINT_NUMBER_MASK,
+ fp->endpoint & USB_DIR_IN ? "IN" : "OUT",
+ sync_types[(fp->ep_attr & USB_ENDPOINT_SYNCTYPE) >> 2]);
+ if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) {
+ snd_iprintf(buffer, " Rates: %d - %d (continuous)\n",
+ fp->rate_min, fp->rate_max);
+ } else {
+ unsigned int i;
+ snd_iprintf(buffer, " Rates: ");
+ for (i = 0; i < fp->nr_rates; i++) {
+ if (i > 0)
+ snd_iprintf(buffer, ", ");
+ snd_iprintf(buffer, "%d", fp->rate_table[i]);
+ }
+ snd_iprintf(buffer, "\n");
+ }
+ if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH)
+ snd_iprintf(buffer, " Data packet interval: %d us\n",
+ 125 * (1 << fp->datainterval));
+ // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize);
+ // snd_iprintf(buffer, " EP Attribute = %#x\n", fp->attributes);
+ }
+}
+
+static void proc_dump_substream_status(struct snd_usb_substream *subs, struct snd_info_buffer *buffer)
+{
+ if (subs->running) {
+ unsigned int i;
+ snd_iprintf(buffer, " Status: Running\n");
+ snd_iprintf(buffer, " Interface = %d\n", subs->interface);
+ snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx);
+ snd_iprintf(buffer, " URBs = %d [ ", subs->nurbs);
+ for (i = 0; i < subs->nurbs; i++)
+ snd_iprintf(buffer, "%d ", subs->dataurb[i].packets);
+ snd_iprintf(buffer, "]\n");
+ snd_iprintf(buffer, " Packet Size = %d\n", subs->curpacksize);
+ snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
+ snd_usb_get_speed(subs->dev) == USB_SPEED_FULL
+ ? get_full_speed_hz(subs->freqm)
+ : get_high_speed_hz(subs->freqm),
+ subs->freqm >> 16, subs->freqm & 0xffff);
+ } else {
+ snd_iprintf(buffer, " Status: Stop\n");
+ }
+}
+
+static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
+{
+ struct snd_usb_stream *stream = entry->private_data;
+
+ snd_iprintf(buffer, "%s : %s\n", stream->chip->card->longname, stream->pcm->name);
+
+ if (stream->substream[SNDRV_PCM_STREAM_PLAYBACK].num_formats) {
+ snd_iprintf(buffer, "\nPlayback:\n");
+ proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer);
+ proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer);
+ }
+ if (stream->substream[SNDRV_PCM_STREAM_CAPTURE].num_formats) {
+ snd_iprintf(buffer, "\nCapture:\n");
+ proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer);
+ proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer);
+ }
+}
+
+void snd_usb_proc_pcm_format_add(struct snd_usb_stream *stream)
+{
+ struct snd_info_entry *entry;
+ char name[32];
+ struct snd_card *card = stream->chip->card;
+
+ sprintf(name, "stream%d", stream->pcm_index);
+ if (!snd_card_proc_new(card, name, &entry))
+ snd_info_set_text_ops(entry, stream, proc_pcm_format_read);
+}
+
diff --git a/sound/usb/proc.h b/sound/usb/proc.h
new file mode 100644
index 000000000000..a45b765e4cf1
--- /dev/null
+++ b/sound/usb/proc.h
@@ -0,0 +1,8 @@
+#ifndef __USBAUDIO_PROC_H
+#define __USBAUDIO_PROC_H
+
+void snd_usb_audio_create_proc(struct snd_usb_audio *chip);
+void snd_usb_proc_pcm_format_add(struct snd_usb_stream *stream);
+
+#endif /* __USBAUDIO_PROC_H */
+
diff --git a/sound/usb/usbquirks.h b/sound/usb/quirks-table.h
index 2b426c1fd0e8..91ddef31bcbd 100644
--- a/sound/usb/usbquirks.h
+++ b/sound/usb/quirks-table.h
@@ -279,7 +279,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.ifnum = 0,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = & (const struct audioformat) {
- .format = SNDRV_PCM_FORMAT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels = 4,
.iface = 0,
.altsetting = 1,
@@ -296,7 +296,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.ifnum = 1,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = & (const struct audioformat) {
- .format = SNDRV_PCM_FORMAT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels = 2,
.iface = 1,
.altsetting = 1,
@@ -580,7 +580,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.ifnum = 0,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = & (const struct audioformat) {
- .format = SNDRV_PCM_FORMAT_S24_3LE,
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
.channels = 2,
.iface = 0,
.altsetting = 1,
@@ -597,7 +597,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.ifnum = 1,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = & (const struct audioformat) {
- .format = SNDRV_PCM_FORMAT_S24_3LE,
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
.channels = 2,
.iface = 1,
.altsetting = 1,
@@ -793,7 +793,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.ifnum = 1,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = & (const struct audioformat) {
- .format = SNDRV_PCM_FORMAT_S24_3LE,
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
.channels = 2,
.iface = 1,
.altsetting = 1,
@@ -810,7 +810,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.ifnum = 2,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = & (const struct audioformat) {
- .format = SNDRV_PCM_FORMAT_S24_3LE,
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
.channels = 2,
.iface = 2,
.altsetting = 1,
@@ -1826,6 +1826,60 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
}
},
+{
+ USB_DEVICE(0x0763, 0x2080),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "M-Audio", */
+ /* .product_name = "Fast Track Ultra 8", */
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = & (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ /* interface 3 (MIDI) is standard compliant */
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+{
+ USB_DEVICE(0x0763, 0x2081),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "M-Audio", */
+ /* .product_name = "Fast Track Ultra 8R", */
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = & (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ /* interface 3 (MIDI) is standard compliant */
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
/* Casio devices */
{
@@ -2203,7 +2257,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.ifnum = 1,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = &(const struct audioformat) {
- .format = SNDRV_PCM_FORMAT_S24_3BE,
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
.channels = 2,
.iface = 1,
.altsetting = 1,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
new file mode 100644
index 000000000000..136e5b4cf6de
--- /dev/null
+++ b/sound/usb/quirks.c
@@ -0,0 +1,594 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+
+#include <sound/core.h>
+#include <sound/info.h>
+#include <sound/pcm.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "mixer.h"
+#include "mixer_quirks.h"
+#include "midi.h"
+#include "quirks.h"
+#include "helper.h"
+#include "endpoint.h"
+#include "pcm.h"
+
+/*
+ * handle the quirks for the contained interfaces
+ */
+static int create_composite_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ int probed_ifnum = get_iface_desc(iface->altsetting)->bInterfaceNumber;
+ int err;
+
+ for (quirk = quirk->data; quirk->ifnum >= 0; ++quirk) {
+ iface = usb_ifnum_to_if(chip->dev, quirk->ifnum);
+ if (!iface)
+ continue;
+ if (quirk->ifnum != probed_ifnum &&
+ usb_interface_claimed(iface))
+ continue;
+ err = snd_usb_create_quirk(chip, iface, driver, quirk);
+ if (err < 0)
+ return err;
+ if (quirk->ifnum != probed_ifnum)
+ usb_driver_claim_interface(driver, iface, (void *)-1L);
+ }
+ return 0;
+}
+
+static int ignore_interface_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ return 0;
+}
+
+
+/*
+ * Allow alignment on audio sub-slot (channel samples) rather than
+ * on audio slots (audio frames)
+ */
+static int create_align_transfer_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ chip->txfr_quirk = 1;
+ return 1; /* Continue with creating streams and mixer */
+}
+
+static int create_any_midi_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *intf,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ return snd_usbmidi_create(chip->card, intf, &chip->midi_list, quirk);
+}
+
+/*
+ * create a stream for an interface with proper descriptors
+ */
+static int create_standard_audio_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ struct usb_host_interface *alts;
+ struct usb_interface_descriptor *altsd;
+ int err;
+
+ alts = &iface->altsetting[0];
+ altsd = get_iface_desc(alts);
+ err = snd_usb_parse_audio_endpoints(chip, altsd->bInterfaceNumber);
+ if (err < 0) {
+ snd_printk(KERN_ERR "cannot setup if %d: error %d\n",
+ altsd->bInterfaceNumber, err);
+ return err;
+ }
+ /* reset the current interface */
+ usb_set_interface(chip->dev, altsd->bInterfaceNumber, 0);
+ return 0;
+}
+
+/*
+ * create a stream for an endpoint/altsetting without proper descriptors
+ */
+static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ struct audioformat *fp;
+ struct usb_host_interface *alts;
+ int stream, err;
+ unsigned *rate_table = NULL;
+
+ fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL);
+ if (! fp) {
+ snd_printk(KERN_ERR "cannot memdup\n");
+ return -ENOMEM;
+ }
+ if (fp->nr_rates > 0) {
+ rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL);
+ if (!rate_table) {
+ kfree(fp);
+ return -ENOMEM;
+ }
+ memcpy(rate_table, fp->rate_table, sizeof(int) * fp->nr_rates);
+ fp->rate_table = rate_table;
+ }
+
+ stream = (fp->endpoint & USB_DIR_IN)
+ ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ err = snd_usb_add_audio_endpoint(chip, stream, fp);
+ if (err < 0) {
+ kfree(fp);
+ kfree(rate_table);
+ return err;
+ }
+ if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
+ fp->altset_idx >= iface->num_altsetting) {
+ kfree(fp);
+ kfree(rate_table);
+ return -EINVAL;
+ }
+ alts = &iface->altsetting[fp->altset_idx];
+ fp->datainterval = snd_usb_parse_datainterval(chip, alts);
+ fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+ usb_set_interface(chip->dev, fp->iface, 0);
+ snd_usb_init_pitch(chip, fp->iface, alts, fp);
+ snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
+ return 0;
+}
+
+/*
+ * Create a stream for an Edirol UA-700/UA-25/UA-4FX interface.
+ * The only way to detect the sample rate is by looking at wMaxPacketSize.
+ */
+static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ static const struct audioformat ua_format = {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 2,
+ .fmt_type = UAC_FORMAT_TYPE_I,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ };
+ struct usb_host_interface *alts;
+ struct usb_interface_descriptor *altsd;
+ struct audioformat *fp;
+ int stream, err;
+
+ /* both PCM and MIDI interfaces have 2 or more altsettings */
+ if (iface->num_altsetting < 2)
+ return -ENXIO;
+ alts = &iface->altsetting[1];
+ altsd = get_iface_desc(alts);
+
+ if (altsd->bNumEndpoints == 2) {
+ static const struct snd_usb_midi_endpoint_info ua700_ep = {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ };
+ static const struct snd_usb_audio_quirk ua700_quirk = {
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &ua700_ep
+ };
+ static const struct snd_usb_midi_endpoint_info uaxx_ep = {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ };
+ static const struct snd_usb_audio_quirk uaxx_quirk = {
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &uaxx_ep
+ };
+ const struct snd_usb_audio_quirk *quirk =
+ chip->usb_id == USB_ID(0x0582, 0x002b)
+ ? &ua700_quirk : &uaxx_quirk;
+ return snd_usbmidi_create(chip->card, iface,
+ &chip->midi_list, quirk);
+ }
+
+ if (altsd->bNumEndpoints != 1)
+ return -ENXIO;
+
+ fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ return -ENOMEM;
+ memcpy(fp, &ua_format, sizeof(*fp));
+
+ fp->iface = altsd->bInterfaceNumber;
+ fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
+ fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
+ fp->datainterval = 0;
+ fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+
+ switch (fp->maxpacksize) {
+ case 0x120:
+ fp->rate_max = fp->rate_min = 44100;
+ break;
+ case 0x138:
+ case 0x140:
+ fp->rate_max = fp->rate_min = 48000;
+ break;
+ case 0x258:
+ case 0x260:
+ fp->rate_max = fp->rate_min = 96000;
+ break;
+ default:
+ snd_printk(KERN_ERR "unknown sample rate\n");
+ kfree(fp);
+ return -ENXIO;
+ }
+
+ stream = (fp->endpoint & USB_DIR_IN)
+ ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ err = snd_usb_add_audio_endpoint(chip, stream, fp);
+ if (err < 0) {
+ kfree(fp);
+ return err;
+ }
+ usb_set_interface(chip->dev, fp->iface, 0);
+ return 0;
+}
+
+/*
+ * audio-interface quirks
+ *
+ * returns zero if no standard audio/MIDI parsing is needed.
+ * returns a postive value if standard audio/midi interfaces are parsed
+ * after this.
+ * returns a negative value at error.
+ */
+int snd_usb_create_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ typedef int (*quirk_func_t)(struct snd_usb_audio *,
+ struct usb_interface *,
+ struct usb_driver *,
+ const struct snd_usb_audio_quirk *);
+ static const quirk_func_t quirk_funcs[] = {
+ [QUIRK_IGNORE_INTERFACE] = ignore_interface_quirk,
+ [QUIRK_COMPOSITE] = create_composite_quirk,
+ [QUIRK_MIDI_STANDARD_INTERFACE] = create_any_midi_quirk,
+ [QUIRK_MIDI_FIXED_ENDPOINT] = create_any_midi_quirk,
+ [QUIRK_MIDI_YAMAHA] = create_any_midi_quirk,
+ [QUIRK_MIDI_MIDIMAN] = create_any_midi_quirk,
+ [QUIRK_MIDI_NOVATION] = create_any_midi_quirk,
+ [QUIRK_MIDI_FASTLANE] = create_any_midi_quirk,
+ [QUIRK_MIDI_EMAGIC] = create_any_midi_quirk,
+ [QUIRK_MIDI_CME] = create_any_midi_quirk,
+ [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
+ [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
+ [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
+ [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk
+ };
+
+ if (quirk->type < QUIRK_TYPE_COUNT) {
+ return quirk_funcs[quirk->type](chip, iface, driver, quirk);
+ } else {
+ snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type);
+ return -ENXIO;
+ }
+}
+
+/*
+ * boot quirks
+ */
+
+#define EXTIGY_FIRMWARE_SIZE_OLD 794
+#define EXTIGY_FIRMWARE_SIZE_NEW 483
+
+static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf)
+{
+ struct usb_host_config *config = dev->actconfig;
+ int err;
+
+ if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD ||
+ le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_NEW) {
+ snd_printdd("sending Extigy boot sequence...\n");
+ /* Send message to force it to reconnect with full interface. */
+ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev,0),
+ 0x10, 0x43, 0x0001, 0x000a, NULL, 0, 1000);
+ if (err < 0) snd_printdd("error sending boot message: %d\n", err);
+ err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+ &dev->descriptor, sizeof(dev->descriptor));
+ config = dev->actconfig;
+ if (err < 0) snd_printdd("error usb_get_descriptor: %d\n", err);
+ err = usb_reset_configuration(dev);
+ if (err < 0) snd_printdd("error usb_reset_configuration: %d\n", err);
+ snd_printdd("extigy_boot: new boot length = %d\n",
+ le16_to_cpu(get_cfg_desc(config)->wTotalLength));
+ return -ENODEV; /* quit this anyway */
+ }
+ return 0;
+}
+
+static int snd_usb_audigy2nx_boot_quirk(struct usb_device *dev)
+{
+ u8 buf = 1;
+
+ snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), 0x2a,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 0, 0, &buf, 1, 1000);
+ if (buf == 0) {
+ snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0x29,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 1, 2000, NULL, 0, 1000);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/*
+ * C-Media CM106/CM106+ have four 16-bit internal registers that are nicely
+ * documented in the device's data sheet.
+ */
+static int snd_usb_cm106_write_int_reg(struct usb_device *dev, int reg, u16 value)
+{
+ u8 buf[4];
+ buf[0] = 0x20;
+ buf[1] = value & 0xff;
+ buf[2] = (value >> 8) & 0xff;
+ buf[3] = reg;
+ return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_CONFIGURATION,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
+ 0, 0, &buf, 4, 1000);
+}
+
+static int snd_usb_cm106_boot_quirk(struct usb_device *dev)
+{
+ /*
+ * Enable line-out driver mode, set headphone source to front
+ * channels, enable stereo mic.
+ */
+ return snd_usb_cm106_write_int_reg(dev, 2, 0x8004);
+}
+
+/*
+ * C-Media CM6206 is based on CM106 with two additional
+ * registers that are not documented in the data sheet.
+ * Values here are chosen based on sniffing USB traffic
+ * under Windows.
+ */
+static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
+{
+ int err, reg;
+ int val[] = {0x200c, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
+
+ for (reg = 0; reg < ARRAY_SIZE(val); reg++) {
+ err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]);
+ if (err < 0)
+ return err;
+ }
+
+ return err;
+}
+
+/*
+ * This call will put the synth in "USB send" mode, i.e it will send MIDI
+ * messages through USB (this is disabled at startup). The synth will
+ * acknowledge by sending a sysex on endpoint 0x85 and by displaying a USB
+ * sign on its LCD. Values here are chosen based on sniffing USB traffic
+ * under Windows.
+ */
+static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
+{
+ int err, actual_length;
+
+ /* "midi send" enable */
+ static const u8 seq[] = { 0x4e, 0x73, 0x52, 0x01 };
+
+ void *buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x05), buf,
+ ARRAY_SIZE(seq), &actual_length, 1000);
+ kfree(buf);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Setup quirks
+ */
+#define AUDIOPHILE_SET 0x01 /* if set, parse device_setup */
+#define AUDIOPHILE_SET_DTS 0x02 /* if set, enable DTS Digital Output */
+#define AUDIOPHILE_SET_96K 0x04 /* 48-96KHz rate if set, 8-48KHz otherwise */
+#define AUDIOPHILE_SET_24B 0x08 /* 24bits sample if set, 16bits otherwise */
+#define AUDIOPHILE_SET_DI 0x10 /* if set, enable Digital Input */
+#define AUDIOPHILE_SET_MASK 0x1F /* bit mask for setup value */
+#define AUDIOPHILE_SET_24B_48K_DI 0x19 /* value for 24bits+48KHz+Digital Input */
+#define AUDIOPHILE_SET_24B_48K_NOTDI 0x09 /* value for 24bits+48KHz+No Digital Input */
+#define AUDIOPHILE_SET_16B_48K_DI 0x11 /* value for 16bits+48KHz+Digital Input */
+#define AUDIOPHILE_SET_16B_48K_NOTDI 0x01 /* value for 16bits+48KHz+No Digital Input */
+
+static int audiophile_skip_setting_quirk(struct snd_usb_audio *chip,
+ int iface,
+ int altno)
+{
+ /* Reset ALL ifaces to 0 altsetting.
+ * Call it for every possible altsetting of every interface.
+ */
+ usb_set_interface(chip->dev, iface, 0);
+
+ if (chip->setup & AUDIOPHILE_SET) {
+ if ((chip->setup & AUDIOPHILE_SET_DTS)
+ && altno != 6)
+ return 1; /* skip this altsetting */
+ if ((chip->setup & AUDIOPHILE_SET_96K)
+ && altno != 1)
+ return 1; /* skip this altsetting */
+ if ((chip->setup & AUDIOPHILE_SET_MASK) ==
+ AUDIOPHILE_SET_24B_48K_DI && altno != 2)
+ return 1; /* skip this altsetting */
+ if ((chip->setup & AUDIOPHILE_SET_MASK) ==
+ AUDIOPHILE_SET_24B_48K_NOTDI && altno != 3)
+ return 1; /* skip this altsetting */
+ if ((chip->setup & AUDIOPHILE_SET_MASK) ==
+ AUDIOPHILE_SET_16B_48K_DI && altno != 4)
+ return 1; /* skip this altsetting */
+ if ((chip->setup & AUDIOPHILE_SET_MASK) ==
+ AUDIOPHILE_SET_16B_48K_NOTDI && altno != 5)
+ return 1; /* skip this altsetting */
+ }
+
+ return 0; /* keep this altsetting */
+}
+
+int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip,
+ int iface,
+ int altno)
+{
+ /* audiophile usb: skip altsets incompatible with device_setup */
+ if (chip->usb_id == USB_ID(0x0763, 0x2003))
+ return audiophile_skip_setting_quirk(chip, iface, altno);
+
+ return 0;
+}
+
+int snd_usb_apply_boot_quirk(struct usb_device *dev,
+ struct usb_interface *intf,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ u32 id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* SB Extigy needs special boot-up sequence */
+ /* if more models come, this will go to the quirk list. */
+ if (id == USB_ID(0x041e, 0x3000))
+ return snd_usb_extigy_boot_quirk(dev, intf);
+
+ /* SB Audigy 2 NX needs its own boot-up magic, too */
+ if (id == USB_ID(0x041e, 0x3020))
+ return snd_usb_audigy2nx_boot_quirk(dev);
+
+ /* C-Media CM106 / Turtle Beach Audio Advantage Roadie */
+ if (id == USB_ID(0x10f5, 0x0200))
+ return snd_usb_cm106_boot_quirk(dev);
+
+ /* C-Media CM6206 / CM106-Like Sound Device */
+ if (id == USB_ID(0x0d8c, 0x0102))
+ return snd_usb_cm6206_boot_quirk(dev);
+
+ /* Access Music VirusTI Desktop */
+ if (id == USB_ID(0x133e, 0x0815))
+ return snd_usb_accessmusic_boot_quirk(dev);
+
+ return 0;
+}
+
+/*
+ * check if the device uses big-endian samples
+ */
+int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, struct audioformat *fp)
+{
+ switch (chip->usb_id) {
+ case USB_ID(0x0763, 0x2001): /* M-Audio Quattro: captured data only */
+ if (fp->endpoint & USB_DIR_IN)
+ return 1;
+ break;
+ case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */
+ if (chip->setup == 0x00 ||
+ fp->altsetting==1 || fp->altsetting==2 || fp->altsetting==3)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * For E-Mu 0404USB/0202USB/TrackerPre sample rate should be set for device,
+ * not for interface.
+ */
+
+enum {
+ EMU_QUIRK_SR_44100HZ = 0,
+ EMU_QUIRK_SR_48000HZ,
+ EMU_QUIRK_SR_88200HZ,
+ EMU_QUIRK_SR_96000HZ,
+ EMU_QUIRK_SR_176400HZ,
+ EMU_QUIRK_SR_192000HZ
+};
+
+static void set_format_emu_quirk(struct snd_usb_substream *subs,
+ struct audioformat *fmt)
+{
+ unsigned char emu_samplerate_id = 0;
+
+ /* When capture is active
+ * sample rate shouldn't be changed
+ * by playback substream
+ */
+ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (subs->stream->substream[SNDRV_PCM_STREAM_CAPTURE].interface != -1)
+ return;
+ }
+
+ switch (fmt->rate_min) {
+ case 48000:
+ emu_samplerate_id = EMU_QUIRK_SR_48000HZ;
+ break;
+ case 88200:
+ emu_samplerate_id = EMU_QUIRK_SR_88200HZ;
+ break;
+ case 96000:
+ emu_samplerate_id = EMU_QUIRK_SR_96000HZ;
+ break;
+ case 176400:
+ emu_samplerate_id = EMU_QUIRK_SR_176400HZ;
+ break;
+ case 192000:
+ emu_samplerate_id = EMU_QUIRK_SR_192000HZ;
+ break;
+ default:
+ emu_samplerate_id = EMU_QUIRK_SR_44100HZ;
+ break;
+ }
+ snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id);
+}
+
+void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ struct audioformat *fmt)
+{
+ switch (subs->stream->chip->usb_id) {
+ case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
+ case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
+ case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
+ set_format_emu_quirk(subs, fmt);
+ break;
+ }
+}
+
diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h
new file mode 100644
index 000000000000..03e5e94098cd
--- /dev/null
+++ b/sound/usb/quirks.h
@@ -0,0 +1,23 @@
+#ifndef __USBAUDIO_QUIRKS_H
+#define __USBAUDIO_QUIRKS_H
+
+int snd_usb_create_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk);
+
+int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip,
+ int iface,
+ int altno);
+
+int snd_usb_apply_boot_quirk(struct usb_device *dev,
+ struct usb_interface *intf,
+ const struct snd_usb_audio_quirk *quirk);
+
+void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ struct audioformat *fmt);
+
+int snd_usb_is_big_endian_format(struct snd_usb_audio *chip,
+ struct audioformat *fp);
+
+#endif /* __USBAUDIO_QUIRKS_H */
diff --git a/sound/usb/urb.c b/sound/usb/urb.c
new file mode 100644
index 000000000000..5570a2ba5736
--- /dev/null
+++ b/sound/usb/urb.c
@@ -0,0 +1,995 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+#include "usbaudio.h"
+#include "helper.h"
+#include "card.h"
+#include "urb.h"
+#include "pcm.h"
+
+/*
+ * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
+ * this will overflow at approx 524 kHz
+ */
+static inline unsigned get_usb_full_speed_rate(unsigned int rate)
+{
+ return ((rate << 13) + 62) / 125;
+}
+
+/*
+ * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
+ * this will overflow at approx 4 MHz
+ */
+static inline unsigned get_usb_high_speed_rate(unsigned int rate)
+{
+ return ((rate << 10) + 62) / 125;
+}
+
+/*
+ * unlink active urbs.
+ */
+static int deactivate_urbs(struct snd_usb_substream *subs, int force, int can_sleep)
+{
+ struct snd_usb_audio *chip = subs->stream->chip;
+ unsigned int i;
+ int async;
+
+ subs->running = 0;
+
+ if (!force && subs->stream->chip->shutdown) /* to be sure... */
+ return -EBADFD;
+
+ async = !can_sleep && chip->async_unlink;
+
+ if (!async && in_interrupt())
+ return 0;
+
+ for (i = 0; i < subs->nurbs; i++) {
+ if (test_bit(i, &subs->active_mask)) {
+ if (!test_and_set_bit(i, &subs->unlink_mask)) {
+ struct urb *u = subs->dataurb[i].urb;
+ if (async)
+ usb_unlink_urb(u);
+ else
+ usb_kill_urb(u);
+ }
+ }
+ }
+ if (subs->syncpipe) {
+ for (i = 0; i < SYNC_URBS; i++) {
+ if (test_bit(i+16, &subs->active_mask)) {
+ if (!test_and_set_bit(i+16, &subs->unlink_mask)) {
+ struct urb *u = subs->syncurb[i].urb;
+ if (async)
+ usb_unlink_urb(u);
+ else
+ usb_kill_urb(u);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * release a urb data
+ */
+static void release_urb_ctx(struct snd_urb_ctx *u)
+{
+ if (u->urb) {
+ if (u->buffer_size)
+ usb_buffer_free(u->subs->dev, u->buffer_size,
+ u->urb->transfer_buffer,
+ u->urb->transfer_dma);
+ usb_free_urb(u->urb);
+ u->urb = NULL;
+ }
+}
+
+/*
+ * wait until all urbs are processed.
+ */
+static int wait_clear_urbs(struct snd_usb_substream *subs)
+{
+ unsigned long end_time = jiffies + msecs_to_jiffies(1000);
+ unsigned int i;
+ int alive;
+
+ do {
+ alive = 0;
+ for (i = 0; i < subs->nurbs; i++) {
+ if (test_bit(i, &subs->active_mask))
+ alive++;
+ }
+ if (subs->syncpipe) {
+ for (i = 0; i < SYNC_URBS; i++) {
+ if (test_bit(i + 16, &subs->active_mask))
+ alive++;
+ }
+ }
+ if (! alive)
+ break;
+ schedule_timeout_uninterruptible(1);
+ } while (time_before(jiffies, end_time));
+ if (alive)
+ snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive);
+ return 0;
+}
+
+/*
+ * release a substream
+ */
+void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
+{
+ int i;
+
+ /* stop urbs (to be sure) */
+ deactivate_urbs(subs, force, 1);
+ wait_clear_urbs(subs);
+
+ for (i = 0; i < MAX_URBS; i++)
+ release_urb_ctx(&subs->dataurb[i]);
+ for (i = 0; i < SYNC_URBS; i++)
+ release_urb_ctx(&subs->syncurb[i]);
+ usb_buffer_free(subs->dev, SYNC_URBS * 4,
+ subs->syncbuf, subs->sync_dma);
+ subs->syncbuf = NULL;
+ subs->nurbs = 0;
+}
+
+/*
+ * complete callback from data urb
+ */
+static void snd_complete_urb(struct urb *urb)
+{
+ struct snd_urb_ctx *ctx = urb->context;
+ struct snd_usb_substream *subs = ctx->subs;
+ struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
+ int err = 0;
+
+ if ((subs->running && subs->ops.retire(subs, substream->runtime, urb)) ||
+ !subs->running || /* can be stopped during retire callback */
+ (err = subs->ops.prepare(subs, substream->runtime, urb)) < 0 ||
+ (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
+ clear_bit(ctx->index, &subs->active_mask);
+ if (err < 0) {
+ snd_printd(KERN_ERR "cannot submit urb (err = %d)\n", err);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ }
+ }
+}
+
+
+/*
+ * complete callback from sync urb
+ */
+static void snd_complete_sync_urb(struct urb *urb)
+{
+ struct snd_urb_ctx *ctx = urb->context;
+ struct snd_usb_substream *subs = ctx->subs;
+ struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
+ int err = 0;
+
+ if ((subs->running && subs->ops.retire_sync(subs, substream->runtime, urb)) ||
+ !subs->running || /* can be stopped during retire callback */
+ (err = subs->ops.prepare_sync(subs, substream->runtime, urb)) < 0 ||
+ (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
+ clear_bit(ctx->index + 16, &subs->active_mask);
+ if (err < 0) {
+ snd_printd(KERN_ERR "cannot submit sync urb (err = %d)\n", err);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ }
+ }
+}
+
+
+/*
+ * initialize a substream for plaback/capture
+ */
+int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
+ unsigned int period_bytes,
+ unsigned int rate,
+ unsigned int frame_bits)
+{
+ unsigned int maxsize, i;
+ int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned int urb_packs, total_packs, packs_per_ms;
+ struct snd_usb_audio *chip = subs->stream->chip;
+
+ /* calculate the frequency in 16.16 format */
+ if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
+ subs->freqn = get_usb_full_speed_rate(rate);
+ else
+ subs->freqn = get_usb_high_speed_rate(rate);
+ subs->freqm = subs->freqn;
+ /* calculate max. frequency */
+ if (subs->maxpacksize) {
+ /* whatever fits into a max. size packet */
+ maxsize = subs->maxpacksize;
+ subs->freqmax = (maxsize / (frame_bits >> 3))
+ << (16 - subs->datainterval);
+ } else {
+ /* no max. packet size: just take 25% higher than nominal */
+ subs->freqmax = subs->freqn + (subs->freqn >> 2);
+ maxsize = ((subs->freqmax + 0xffff) * (frame_bits >> 3))
+ >> (16 - subs->datainterval);
+ }
+ subs->phase = 0;
+
+ if (subs->fill_max)
+ subs->curpacksize = subs->maxpacksize;
+ else
+ subs->curpacksize = maxsize;
+
+ if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH)
+ packs_per_ms = 8 >> subs->datainterval;
+ else
+ packs_per_ms = 1;
+
+ if (is_playback) {
+ urb_packs = max(chip->nrpacks, 1);
+ urb_packs = min(urb_packs, (unsigned int)MAX_PACKS);
+ } else
+ urb_packs = 1;
+ urb_packs *= packs_per_ms;
+ if (subs->syncpipe)
+ urb_packs = min(urb_packs, 1U << subs->syncinterval);
+
+ /* decide how many packets to be used */
+ if (is_playback) {
+ unsigned int minsize, maxpacks;
+ /* determine how small a packet can be */
+ minsize = (subs->freqn >> (16 - subs->datainterval))
+ * (frame_bits >> 3);
+ /* with sync from device, assume it can be 12% lower */
+ if (subs->syncpipe)
+ minsize -= minsize >> 3;
+ minsize = max(minsize, 1u);
+ total_packs = (period_bytes + minsize - 1) / minsize;
+ /* we need at least two URBs for queueing */
+ if (total_packs < 2) {
+ total_packs = 2;
+ } else {
+ /* and we don't want too long a queue either */
+ maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
+ total_packs = min(total_packs, maxpacks);
+ }
+ } else {
+ while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
+ urb_packs >>= 1;
+ total_packs = MAX_URBS * urb_packs;
+ }
+ subs->nurbs = (total_packs + urb_packs - 1) / urb_packs;
+ if (subs->nurbs > MAX_URBS) {
+ /* too much... */
+ subs->nurbs = MAX_URBS;
+ total_packs = MAX_URBS * urb_packs;
+ } else if (subs->nurbs < 2) {
+ /* too little - we need at least two packets
+ * to ensure contiguous playback/capture
+ */
+ subs->nurbs = 2;
+ }
+
+ /* allocate and initialize data urbs */
+ for (i = 0; i < subs->nurbs; i++) {
+ struct snd_urb_ctx *u = &subs->dataurb[i];
+ u->index = i;
+ u->subs = subs;
+ u->packets = (i + 1) * total_packs / subs->nurbs
+ - i * total_packs / subs->nurbs;
+ u->buffer_size = maxsize * u->packets;
+ if (subs->fmt_type == UAC_FORMAT_TYPE_II)
+ u->packets++; /* for transfer delimiter */
+ u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
+ if (!u->urb)
+ goto out_of_memory;
+ u->urb->transfer_buffer =
+ usb_buffer_alloc(subs->dev, u->buffer_size, GFP_KERNEL,
+ &u->urb->transfer_dma);
+ if (!u->urb->transfer_buffer)
+ goto out_of_memory;
+ u->urb->pipe = subs->datapipe;
+ u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ u->urb->interval = 1 << subs->datainterval;
+ u->urb->context = u;
+ u->urb->complete = snd_complete_urb;
+ }
+
+ if (subs->syncpipe) {
+ /* allocate and initialize sync urbs */
+ subs->syncbuf = usb_buffer_alloc(subs->dev, SYNC_URBS * 4,
+ GFP_KERNEL, &subs->sync_dma);
+ if (!subs->syncbuf)
+ goto out_of_memory;
+ for (i = 0; i < SYNC_URBS; i++) {
+ struct snd_urb_ctx *u = &subs->syncurb[i];
+ u->index = i;
+ u->subs = subs;
+ u->packets = 1;
+ u->urb = usb_alloc_urb(1, GFP_KERNEL);
+ if (!u->urb)
+ goto out_of_memory;
+ u->urb->transfer_buffer = subs->syncbuf + i * 4;
+ u->urb->transfer_dma = subs->sync_dma + i * 4;
+ u->urb->transfer_buffer_length = 4;
+ u->urb->pipe = subs->syncpipe;
+ u->urb->transfer_flags = URB_ISO_ASAP |
+ URB_NO_TRANSFER_DMA_MAP;
+ u->urb->number_of_packets = 1;
+ u->urb->interval = 1 << subs->syncinterval;
+ u->urb->context = u;
+ u->urb->complete = snd_complete_sync_urb;
+ }
+ }
+ return 0;
+
+out_of_memory:
+ snd_usb_release_substream_urbs(subs, 0);
+ return -ENOMEM;
+}
+
+/*
+ * prepare urb for full speed capture sync pipe
+ *
+ * fill the length and offset of each urb descriptor.
+ * the fixed 10.14 frequency is passed through the pipe.
+ */
+static int prepare_capture_sync_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ unsigned char *cp = urb->transfer_buffer;
+ struct snd_urb_ctx *ctx = urb->context;
+
+ urb->dev = ctx->subs->dev; /* we need to set this at each time */
+ urb->iso_frame_desc[0].length = 3;
+ urb->iso_frame_desc[0].offset = 0;
+ cp[0] = subs->freqn >> 2;
+ cp[1] = subs->freqn >> 10;
+ cp[2] = subs->freqn >> 18;
+ return 0;
+}
+
+/*
+ * prepare urb for high speed capture sync pipe
+ *
+ * fill the length and offset of each urb descriptor.
+ * the fixed 12.13 frequency is passed as 16.16 through the pipe.
+ */
+static int prepare_capture_sync_urb_hs(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ unsigned char *cp = urb->transfer_buffer;
+ struct snd_urb_ctx *ctx = urb->context;
+
+ urb->dev = ctx->subs->dev; /* we need to set this at each time */
+ urb->iso_frame_desc[0].length = 4;
+ urb->iso_frame_desc[0].offset = 0;
+ cp[0] = subs->freqn;
+ cp[1] = subs->freqn >> 8;
+ cp[2] = subs->freqn >> 16;
+ cp[3] = subs->freqn >> 24;
+ return 0;
+}
+
+/*
+ * process after capture sync complete
+ * - nothing to do
+ */
+static int retire_capture_sync_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ return 0;
+}
+
+/*
+ * prepare urb for capture data pipe
+ *
+ * fill the offset and length of each descriptor.
+ *
+ * we use a temporary buffer to write the captured data.
+ * since the length of written data is determined by host, we cannot
+ * write onto the pcm buffer directly... the data is thus copied
+ * later at complete callback to the global buffer.
+ */
+static int prepare_capture_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ int i, offs;
+ struct snd_urb_ctx *ctx = urb->context;
+
+ offs = 0;
+ urb->dev = ctx->subs->dev; /* we need to set this at each time */
+ for (i = 0; i < ctx->packets; i++) {
+ urb->iso_frame_desc[i].offset = offs;
+ urb->iso_frame_desc[i].length = subs->curpacksize;
+ offs += subs->curpacksize;
+ }
+ urb->transfer_buffer_length = offs;
+ urb->number_of_packets = ctx->packets;
+ return 0;
+}
+
+/*
+ * process after capture complete
+ *
+ * copy the data from each desctiptor to the pcm buffer, and
+ * update the current position.
+ */
+static int retire_capture_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ unsigned long flags;
+ unsigned char *cp;
+ int i;
+ unsigned int stride, frames, bytes, oldptr;
+ int period_elapsed = 0;
+
+ stride = runtime->frame_bits >> 3;
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ if (urb->iso_frame_desc[i].status) {
+ snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
+ // continue;
+ }
+ bytes = urb->iso_frame_desc[i].actual_length;
+ frames = bytes / stride;
+ if (!subs->txfr_quirk)
+ bytes = frames * stride;
+ if (bytes % (runtime->sample_bits >> 3) != 0) {
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+ int oldbytes = bytes;
+#endif
+ bytes = frames * stride;
+ snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n",
+ oldbytes, bytes);
+ }
+ /* update the current pointer */
+ spin_lock_irqsave(&subs->lock, flags);
+ oldptr = subs->hwptr_done;
+ subs->hwptr_done += bytes;
+ if (subs->hwptr_done >= runtime->buffer_size * stride)
+ subs->hwptr_done -= runtime->buffer_size * stride;
+ frames = (bytes + (oldptr % stride)) / stride;
+ subs->transfer_done += frames;
+ if (subs->transfer_done >= runtime->period_size) {
+ subs->transfer_done -= runtime->period_size;
+ period_elapsed = 1;
+ }
+ spin_unlock_irqrestore(&subs->lock, flags);
+ /* copy a data chunk */
+ if (oldptr + bytes > runtime->buffer_size * stride) {
+ unsigned int bytes1 =
+ runtime->buffer_size * stride - oldptr;
+ memcpy(runtime->dma_area + oldptr, cp, bytes1);
+ memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1);
+ } else {
+ memcpy(runtime->dma_area + oldptr, cp, bytes);
+ }
+ }
+ if (period_elapsed)
+ snd_pcm_period_elapsed(subs->pcm_substream);
+ return 0;
+}
+
+/*
+ * Process after capture complete when paused. Nothing to do.
+ */
+static int retire_paused_capture_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ return 0;
+}
+
+
+/*
+ * prepare urb for full speed playback sync pipe
+ *
+ * set up the offset and length to receive the current frequency.
+ */
+
+static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ struct snd_urb_ctx *ctx = urb->context;
+
+ urb->dev = ctx->subs->dev; /* we need to set this at each time */
+ urb->iso_frame_desc[0].length = 3;
+ urb->iso_frame_desc[0].offset = 0;
+ return 0;
+}
+
+/*
+ * prepare urb for high speed playback sync pipe
+ *
+ * set up the offset and length to receive the current frequency.
+ */
+
+static int prepare_playback_sync_urb_hs(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ struct snd_urb_ctx *ctx = urb->context;
+
+ urb->dev = ctx->subs->dev; /* we need to set this at each time */
+ urb->iso_frame_desc[0].length = 4;
+ urb->iso_frame_desc[0].offset = 0;
+ return 0;
+}
+
+/*
+ * process after full speed playback sync complete
+ *
+ * retrieve the current 10.14 frequency from pipe, and set it.
+ * the value is referred in prepare_playback_urb().
+ */
+static int retire_playback_sync_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ unsigned int f;
+ unsigned long flags;
+
+ if (urb->iso_frame_desc[0].status == 0 &&
+ urb->iso_frame_desc[0].actual_length == 3) {
+ f = combine_triple((u8*)urb->transfer_buffer) << 2;
+ if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
+ spin_lock_irqsave(&subs->lock, flags);
+ subs->freqm = f;
+ spin_unlock_irqrestore(&subs->lock, flags);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * process after high speed playback sync complete
+ *
+ * retrieve the current 12.13 frequency from pipe, and set it.
+ * the value is referred in prepare_playback_urb().
+ */
+static int retire_playback_sync_urb_hs(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ unsigned int f;
+ unsigned long flags;
+
+ if (urb->iso_frame_desc[0].status == 0 &&
+ urb->iso_frame_desc[0].actual_length == 4) {
+ f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
+ if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
+ spin_lock_irqsave(&subs->lock, flags);
+ subs->freqm = f;
+ spin_unlock_irqrestore(&subs->lock, flags);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * process after E-Mu 0202/0404/Tracker Pre high speed playback sync complete
+ *
+ * These devices return the number of samples per packet instead of the number
+ * of samples per microframe.
+ */
+static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ unsigned int f;
+ unsigned long flags;
+
+ if (urb->iso_frame_desc[0].status == 0 &&
+ urb->iso_frame_desc[0].actual_length == 4) {
+ f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
+ f >>= subs->datainterval;
+ if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
+ spin_lock_irqsave(&subs->lock, flags);
+ subs->freqm = f;
+ spin_unlock_irqrestore(&subs->lock, flags);
+ }
+ }
+
+ return 0;
+}
+
+/* determine the number of frames in the next packet */
+static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs)
+{
+ if (subs->fill_max)
+ return subs->maxframesize;
+ else {
+ subs->phase = (subs->phase & 0xffff)
+ + (subs->freqm << subs->datainterval);
+ return min(subs->phase >> 16, subs->maxframesize);
+ }
+}
+
+/*
+ * Prepare urb for streaming before playback starts or when paused.
+ *
+ * We don't have any data, so we send silence.
+ */
+static int prepare_nodata_playback_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ unsigned int i, offs, counts;
+ struct snd_urb_ctx *ctx = urb->context;
+ int stride = runtime->frame_bits >> 3;
+
+ offs = 0;
+ urb->dev = ctx->subs->dev;
+ for (i = 0; i < ctx->packets; ++i) {
+ counts = snd_usb_audio_next_packet_size(subs);
+ urb->iso_frame_desc[i].offset = offs * stride;
+ urb->iso_frame_desc[i].length = counts * stride;
+ offs += counts;
+ }
+ urb->number_of_packets = ctx->packets;
+ urb->transfer_buffer_length = offs * stride;
+ memset(urb->transfer_buffer,
+ runtime->format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0,
+ offs * stride);
+ return 0;
+}
+
+/*
+ * prepare urb for playback data pipe
+ *
+ * Since a URB can handle only a single linear buffer, we must use double
+ * buffering when the data to be transferred overflows the buffer boundary.
+ * To avoid inconsistencies when updating hwptr_done, we use double buffering
+ * for all URBs.
+ */
+static int prepare_playback_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ int i, stride;
+ unsigned int counts, frames, bytes;
+ unsigned long flags;
+ int period_elapsed = 0;
+ struct snd_urb_ctx *ctx = urb->context;
+
+ stride = runtime->frame_bits >> 3;
+
+ frames = 0;
+ urb->dev = ctx->subs->dev; /* we need to set this at each time */
+ urb->number_of_packets = 0;
+ spin_lock_irqsave(&subs->lock, flags);
+ for (i = 0; i < ctx->packets; i++) {
+ counts = snd_usb_audio_next_packet_size(subs);
+ /* set up descriptor */
+ urb->iso_frame_desc[i].offset = frames * stride;
+ urb->iso_frame_desc[i].length = counts * stride;
+ frames += counts;
+ urb->number_of_packets++;
+ subs->transfer_done += counts;
+ if (subs->transfer_done >= runtime->period_size) {
+ subs->transfer_done -= runtime->period_size;
+ period_elapsed = 1;
+ if (subs->fmt_type == UAC_FORMAT_TYPE_II) {
+ if (subs->transfer_done > 0) {
+ /* FIXME: fill-max mode is not
+ * supported yet */
+ frames -= subs->transfer_done;
+ counts -= subs->transfer_done;
+ urb->iso_frame_desc[i].length =
+ counts * stride;
+ subs->transfer_done = 0;
+ }
+ i++;
+ if (i < ctx->packets) {
+ /* add a transfer delimiter */
+ urb->iso_frame_desc[i].offset =
+ frames * stride;
+ urb->iso_frame_desc[i].length = 0;
+ urb->number_of_packets++;
+ }
+ break;
+ }
+ }
+ if (period_elapsed) /* finish at the period boundary */
+ break;
+ }
+ bytes = frames * stride;
+ if (subs->hwptr_done + bytes > runtime->buffer_size * stride) {
+ /* err, the transferred area goes over buffer boundary. */
+ unsigned int bytes1 =
+ runtime->buffer_size * stride - subs->hwptr_done;
+ memcpy(urb->transfer_buffer,
+ runtime->dma_area + subs->hwptr_done, bytes1);
+ memcpy(urb->transfer_buffer + bytes1,
+ runtime->dma_area, bytes - bytes1);
+ } else {
+ memcpy(urb->transfer_buffer,
+ runtime->dma_area + subs->hwptr_done, bytes);
+ }
+ subs->hwptr_done += bytes;
+ if (subs->hwptr_done >= runtime->buffer_size * stride)
+ subs->hwptr_done -= runtime->buffer_size * stride;
+ runtime->delay += frames;
+ spin_unlock_irqrestore(&subs->lock, flags);
+ urb->transfer_buffer_length = bytes;
+ if (period_elapsed)
+ snd_pcm_period_elapsed(subs->pcm_substream);
+ return 0;
+}
+
+/*
+ * process after playback data complete
+ * - decrease the delay count again
+ */
+static int retire_playback_urb(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime,
+ struct urb *urb)
+{
+ unsigned long flags;
+ int stride = runtime->frame_bits >> 3;
+ int processed = urb->transfer_buffer_length / stride;
+
+ spin_lock_irqsave(&subs->lock, flags);
+ if (processed > runtime->delay)
+ runtime->delay = 0;
+ else
+ runtime->delay -= processed;
+ spin_unlock_irqrestore(&subs->lock, flags);
+ return 0;
+}
+
+static const char *usb_error_string(int err)
+{
+ switch (err) {
+ case -ENODEV:
+ return "no device";
+ case -ENOENT:
+ return "endpoint not enabled";
+ case -EPIPE:
+ return "endpoint stalled";
+ case -ENOSPC:
+ return "not enough bandwidth";
+ case -ESHUTDOWN:
+ return "device disabled";
+ case -EHOSTUNREACH:
+ return "device suspended";
+ case -EINVAL:
+ case -EAGAIN:
+ case -EFBIG:
+ case -EMSGSIZE:
+ return "internal error";
+ default:
+ return "unknown error";
+ }
+}
+
+/*
+ * set up and start data/sync urbs
+ */
+static int start_urbs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime)
+{
+ unsigned int i;
+ int err;
+
+ if (subs->stream->chip->shutdown)
+ return -EBADFD;
+
+ for (i = 0; i < subs->nurbs; i++) {
+ if (snd_BUG_ON(!subs->dataurb[i].urb))
+ return -EINVAL;
+ if (subs->ops.prepare(subs, runtime, subs->dataurb[i].urb) < 0) {
+ snd_printk(KERN_ERR "cannot prepare datapipe for urb %d\n", i);
+ goto __error;
+ }
+ }
+ if (subs->syncpipe) {
+ for (i = 0; i < SYNC_URBS; i++) {
+ if (snd_BUG_ON(!subs->syncurb[i].urb))
+ return -EINVAL;
+ if (subs->ops.prepare_sync(subs, runtime, subs->syncurb[i].urb) < 0) {
+ snd_printk(KERN_ERR "cannot prepare syncpipe for urb %d\n", i);
+ goto __error;
+ }
+ }
+ }
+
+ subs->active_mask = 0;
+ subs->unlink_mask = 0;
+ subs->running = 1;
+ for (i = 0; i < subs->nurbs; i++) {
+ err = usb_submit_urb(subs->dataurb[i].urb, GFP_ATOMIC);
+ if (err < 0) {
+ snd_printk(KERN_ERR "cannot submit datapipe "
+ "for urb %d, error %d: %s\n",
+ i, err, usb_error_string(err));
+ goto __error;
+ }
+ set_bit(i, &subs->active_mask);
+ }
+ if (subs->syncpipe) {
+ for (i = 0; i < SYNC_URBS; i++) {
+ err = usb_submit_urb(subs->syncurb[i].urb, GFP_ATOMIC);
+ if (err < 0) {
+ snd_printk(KERN_ERR "cannot submit syncpipe "
+ "for urb %d, error %d: %s\n",
+ i, err, usb_error_string(err));
+ goto __error;
+ }
+ set_bit(i + 16, &subs->active_mask);
+ }
+ }
+ return 0;
+
+ __error:
+ // snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
+ deactivate_urbs(subs, 0, 0);
+ return -EPIPE;
+}
+
+
+/*
+ */
+static struct snd_urb_ops audio_urb_ops[2] = {
+ {
+ .prepare = prepare_nodata_playback_urb,
+ .retire = retire_playback_urb,
+ .prepare_sync = prepare_playback_sync_urb,
+ .retire_sync = retire_playback_sync_urb,
+ },
+ {
+ .prepare = prepare_capture_urb,
+ .retire = retire_capture_urb,
+ .prepare_sync = prepare_capture_sync_urb,
+ .retire_sync = retire_capture_sync_urb,
+ },
+};
+
+static struct snd_urb_ops audio_urb_ops_high_speed[2] = {
+ {
+ .prepare = prepare_nodata_playback_urb,
+ .retire = retire_playback_urb,
+ .prepare_sync = prepare_playback_sync_urb_hs,
+ .retire_sync = retire_playback_sync_urb_hs,
+ },
+ {
+ .prepare = prepare_capture_urb,
+ .retire = retire_capture_urb,
+ .prepare_sync = prepare_capture_sync_urb_hs,
+ .retire_sync = retire_capture_sync_urb,
+ },
+};
+
+/*
+ * initialize the substream instance.
+ */
+
+void snd_usb_init_substream(struct snd_usb_stream *as,
+ int stream, struct audioformat *fp)
+{
+ struct snd_usb_substream *subs = &as->substream[stream];
+
+ INIT_LIST_HEAD(&subs->fmt_list);
+ spin_lock_init(&subs->lock);
+
+ subs->stream = as;
+ subs->direction = stream;
+ subs->dev = as->chip->dev;
+ subs->txfr_quirk = as->chip->txfr_quirk;
+ if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
+ subs->ops = audio_urb_ops[stream];
+ } else {
+ subs->ops = audio_urb_ops_high_speed[stream];
+ switch (as->chip->usb_id) {
+ case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
+ case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
+ case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
+ subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
+ break;
+ case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra 8 */
+ case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
+ subs->ops.prepare_sync = prepare_playback_sync_urb;
+ subs->ops.retire_sync = retire_playback_sync_urb;
+ break;
+ }
+ }
+
+ snd_usb_set_pcm_ops(as->pcm, stream);
+
+ list_add_tail(&fp->list, &subs->fmt_list);
+ subs->formats |= fp->formats;
+ subs->endpoint = fp->endpoint;
+ subs->num_formats++;
+ subs->fmt_type = fp->fmt_type;
+}
+
+int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_usb_substream *subs = substream->runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ subs->ops.prepare = prepare_playback_urb;
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ return deactivate_urbs(subs, 0, 0);
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ subs->ops.prepare = prepare_nodata_playback_urb;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_usb_substream *subs = substream->runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ subs->ops.retire = retire_capture_urb;
+ return start_urbs(subs, substream->runtime);
+ case SNDRV_PCM_TRIGGER_STOP:
+ return deactivate_urbs(subs, 0, 0);
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ subs->ops.retire = retire_paused_capture_urb;
+ return 0;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ subs->ops.retire = retire_capture_urb;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int snd_usb_substream_prepare(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime)
+{
+ /* clear urbs (to be sure) */
+ deactivate_urbs(subs, 0, 1);
+ wait_clear_urbs(subs);
+
+ /* for playback, submit the URBs now; otherwise, the first hwptr_done
+ * updates for all URBs would happen at the same time when starting */
+ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ subs->ops.prepare = prepare_nodata_playback_urb;
+ return start_urbs(subs, runtime);
+ }
+
+ return 0;
+}
+
diff --git a/sound/usb/urb.h b/sound/usb/urb.h
new file mode 100644
index 000000000000..888da38079cf
--- /dev/null
+++ b/sound/usb/urb.h
@@ -0,0 +1,21 @@
+#ifndef __USBAUDIO_URB_H
+#define __USBAUDIO_URB_H
+
+void snd_usb_init_substream(struct snd_usb_stream *as,
+ int stream,
+ struct audioformat *fp);
+
+int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
+ unsigned int period_bytes,
+ unsigned int rate,
+ unsigned int frame_bits);
+
+void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force);
+
+int snd_usb_substream_prepare(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime);
+
+int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream, int cmd);
+int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd);
+
+#endif /* __USBAUDIO_URB_H */
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
deleted file mode 100644
index 11b0826b8fe6..000000000000
--- a/sound/usb/usbaudio.c
+++ /dev/null
@@ -1,4050 +0,0 @@
-/*
- * (Tentative) USB Audio Driver for ALSA
- *
- * Main and PCM part
- *
- * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de>
- *
- * Many codes borrowed from audio.c by
- * Alan Cox (alan@lxorguk.ukuu.org.uk)
- * Thomas Sailer (sailer@ife.ee.ethz.ch)
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * NOTES:
- *
- * - async unlink should be used for avoiding the sleep inside lock.
- * 2.4.22 usb-uhci seems buggy for async unlinking and results in
- * oops. in such a cse, pass async_unlink=0 option.
- * - the linked URBs would be preferred but not used so far because of
- * the instability of unlinking.
- * - type II is not supported properly. there is no device which supports
- * this type *correctly*. SB extigy looks as if it supports, but it's
- * indeed an AC3 stream packed in SPDIF frames (i.e. no real AC3 stream).
- */
-
-
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/usb.h>
-#include <linux/moduleparam.h>
-#include <linux/mutex.h>
-#include <linux/usb/audio.h>
-#include <linux/usb/ch9.h>
-
-#include <sound/core.h>
-#include <sound/info.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-
-#include "usbaudio.h"
-
-
-MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
-MODULE_DESCRIPTION("USB Audio");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Generic,USB Audio}}");
-
-
-static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card */
-/* Vendor/product IDs for this card */
-static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
-static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
-static int nrpacks = 8; /* max. number of packets per urb */
-static int async_unlink = 1;
-static int device_setup[SNDRV_CARDS]; /* device parameter for this card*/
-static int ignore_ctl_error;
-
-module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "Index value for the USB audio adapter.");
-module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string for the USB audio adapter.");
-module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "Enable USB audio adapter.");
-module_param_array(vid, int, NULL, 0444);
-MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device.");
-module_param_array(pid, int, NULL, 0444);
-MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
-module_param(nrpacks, int, 0644);
-MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB.");
-module_param(async_unlink, bool, 0444);
-MODULE_PARM_DESC(async_unlink, "Use async unlink mode.");
-module_param_array(device_setup, int, NULL, 0444);
-MODULE_PARM_DESC(device_setup, "Specific device setup (if needed).");
-module_param(ignore_ctl_error, bool, 0444);
-MODULE_PARM_DESC(ignore_ctl_error,
- "Ignore errors from USB controller for mixer interfaces.");
-
-/*
- * debug the h/w constraints
- */
-/* #define HW_CONST_DEBUG */
-
-
-/*
- *
- */
-
-#define MAX_PACKS 20
-#define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */
-#define MAX_URBS 8
-#define SYNC_URBS 4 /* always four urbs for sync */
-#define MAX_QUEUE 24 /* try not to exceed this queue length, in ms */
-
-struct audioformat {
- struct list_head list;
- snd_pcm_format_t format; /* format type */
- unsigned int channels; /* # channels */
- unsigned int fmt_type; /* USB audio format type (1-3) */
- unsigned int frame_size; /* samples per frame for non-audio */
- int iface; /* interface number */
- unsigned char altsetting; /* corresponding alternate setting */
- unsigned char altset_idx; /* array index of altenate setting */
- unsigned char attributes; /* corresponding attributes of cs endpoint */
- unsigned char endpoint; /* endpoint */
- unsigned char ep_attr; /* endpoint attributes */
- unsigned char datainterval; /* log_2 of data packet interval */
- unsigned int maxpacksize; /* max. packet size */
- unsigned int rates; /* rate bitmasks */
- unsigned int rate_min, rate_max; /* min/max rates */
- unsigned int nr_rates; /* number of rate table entries */
- unsigned int *rate_table; /* rate table */
-};
-
-struct snd_usb_substream;
-
-struct snd_urb_ctx {
- struct urb *urb;
- unsigned int buffer_size; /* size of data buffer, if data URB */
- struct snd_usb_substream *subs;
- int index; /* index for urb array */
- int packets; /* number of packets per urb */
-};
-
-struct snd_urb_ops {
- int (*prepare)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
- int (*retire)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
- int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
- int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
-};
-
-struct snd_usb_substream {
- struct snd_usb_stream *stream;
- struct usb_device *dev;
- struct snd_pcm_substream *pcm_substream;
- int direction; /* playback or capture */
- int interface; /* current interface */
- int endpoint; /* assigned endpoint */
- struct audioformat *cur_audiofmt; /* current audioformat pointer (for hw_params callback) */
- unsigned int cur_rate; /* current rate (for hw_params callback) */
- unsigned int period_bytes; /* current period bytes (for hw_params callback) */
- unsigned int format; /* USB data format */
- unsigned int datapipe; /* the data i/o pipe */
- unsigned int syncpipe; /* 1 - async out or adaptive in */
- unsigned int datainterval; /* log_2 of data packet interval */
- unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
- unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */
- unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */
- unsigned int freqmax; /* maximum sampling rate, used for buffer management */
- unsigned int phase; /* phase accumulator */
- unsigned int maxpacksize; /* max packet size in bytes */
- unsigned int maxframesize; /* max packet size in frames */
- unsigned int curpacksize; /* current packet size in bytes (for capture) */
- unsigned int curframesize; /* current packet size in frames (for capture) */
- unsigned int fill_max: 1; /* fill max packet size always */
- unsigned int txfr_quirk:1; /* allow sub-frame alignment */
- unsigned int fmt_type; /* USB audio format type (1-3) */
-
- unsigned int running: 1; /* running status */
-
- unsigned int hwptr_done; /* processed byte position in the buffer */
- unsigned int transfer_done; /* processed frames since last period update */
- unsigned long active_mask; /* bitmask of active urbs */
- unsigned long unlink_mask; /* bitmask of unlinked urbs */
-
- unsigned int nurbs; /* # urbs */
- struct snd_urb_ctx dataurb[MAX_URBS]; /* data urb table */
- struct snd_urb_ctx syncurb[SYNC_URBS]; /* sync urb table */
- char *syncbuf; /* sync buffer for all sync URBs */
- dma_addr_t sync_dma; /* DMA address of syncbuf */
-
- u64 formats; /* format bitmasks (all or'ed) */
- unsigned int num_formats; /* number of supported audio formats (list) */
- struct list_head fmt_list; /* format list */
- struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
- spinlock_t lock;
-
- struct snd_urb_ops ops; /* callbacks (must be filled at init) */
-};
-
-
-struct snd_usb_stream {
- struct snd_usb_audio *chip;
- struct snd_pcm *pcm;
- int pcm_index;
- unsigned int fmt_type; /* USB audio format type (1-3) */
- struct snd_usb_substream substream[2];
- struct list_head list;
-};
-
-
-/*
- * we keep the snd_usb_audio_t instances by ourselves for merging
- * the all interfaces on the same card as one sound device.
- */
-
-static DEFINE_MUTEX(register_mutex);
-static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
-
-
-/*
- * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
- * this will overflow at approx 524 kHz
- */
-static inline unsigned get_usb_full_speed_rate(unsigned int rate)
-{
- return ((rate << 13) + 62) / 125;
-}
-
-/*
- * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
- * this will overflow at approx 4 MHz
- */
-static inline unsigned get_usb_high_speed_rate(unsigned int rate)
-{
- return ((rate << 10) + 62) / 125;
-}
-
-/* convert our full speed USB rate into sampling rate in Hz */
-static inline unsigned get_full_speed_hz(unsigned int usb_rate)
-{
- return (usb_rate * 125 + (1 << 12)) >> 13;
-}
-
-/* convert our high speed USB rate into sampling rate in Hz */
-static inline unsigned get_high_speed_hz(unsigned int usb_rate)
-{
- return (usb_rate * 125 + (1 << 9)) >> 10;
-}
-
-
-/*
- * prepare urb for full speed capture sync pipe
- *
- * fill the length and offset of each urb descriptor.
- * the fixed 10.14 frequency is passed through the pipe.
- */
-static int prepare_capture_sync_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned char *cp = urb->transfer_buffer;
- struct snd_urb_ctx *ctx = urb->context;
-
- urb->dev = ctx->subs->dev; /* we need to set this at each time */
- urb->iso_frame_desc[0].length = 3;
- urb->iso_frame_desc[0].offset = 0;
- cp[0] = subs->freqn >> 2;
- cp[1] = subs->freqn >> 10;
- cp[2] = subs->freqn >> 18;
- return 0;
-}
-
-/*
- * prepare urb for high speed capture sync pipe
- *
- * fill the length and offset of each urb descriptor.
- * the fixed 12.13 frequency is passed as 16.16 through the pipe.
- */
-static int prepare_capture_sync_urb_hs(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned char *cp = urb->transfer_buffer;
- struct snd_urb_ctx *ctx = urb->context;
-
- urb->dev = ctx->subs->dev; /* we need to set this at each time */
- urb->iso_frame_desc[0].length = 4;
- urb->iso_frame_desc[0].offset = 0;
- cp[0] = subs->freqn;
- cp[1] = subs->freqn >> 8;
- cp[2] = subs->freqn >> 16;
- cp[3] = subs->freqn >> 24;
- return 0;
-}
-
-/*
- * process after capture sync complete
- * - nothing to do
- */
-static int retire_capture_sync_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- return 0;
-}
-
-/*
- * prepare urb for capture data pipe
- *
- * fill the offset and length of each descriptor.
- *
- * we use a temporary buffer to write the captured data.
- * since the length of written data is determined by host, we cannot
- * write onto the pcm buffer directly... the data is thus copied
- * later at complete callback to the global buffer.
- */
-static int prepare_capture_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- int i, offs;
- struct snd_urb_ctx *ctx = urb->context;
-
- offs = 0;
- urb->dev = ctx->subs->dev; /* we need to set this at each time */
- for (i = 0; i < ctx->packets; i++) {
- urb->iso_frame_desc[i].offset = offs;
- urb->iso_frame_desc[i].length = subs->curpacksize;
- offs += subs->curpacksize;
- }
- urb->transfer_buffer_length = offs;
- urb->number_of_packets = ctx->packets;
- return 0;
-}
-
-/*
- * process after capture complete
- *
- * copy the data from each desctiptor to the pcm buffer, and
- * update the current position.
- */
-static int retire_capture_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned long flags;
- unsigned char *cp;
- int i;
- unsigned int stride, frames, bytes, oldptr;
- int period_elapsed = 0;
-
- stride = runtime->frame_bits >> 3;
-
- for (i = 0; i < urb->number_of_packets; i++) {
- cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
- if (urb->iso_frame_desc[i].status) {
- snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
- // continue;
- }
- bytes = urb->iso_frame_desc[i].actual_length;
- frames = bytes / stride;
- if (!subs->txfr_quirk)
- bytes = frames * stride;
- if (bytes % (runtime->sample_bits >> 3) != 0) {
-#ifdef CONFIG_SND_DEBUG_VERBOSE
- int oldbytes = bytes;
-#endif
- bytes = frames * stride;
- snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n",
- oldbytes, bytes);
- }
- /* update the current pointer */
- spin_lock_irqsave(&subs->lock, flags);
- oldptr = subs->hwptr_done;
- subs->hwptr_done += bytes;
- if (subs->hwptr_done >= runtime->buffer_size * stride)
- subs->hwptr_done -= runtime->buffer_size * stride;
- frames = (bytes + (oldptr % stride)) / stride;
- subs->transfer_done += frames;
- if (subs->transfer_done >= runtime->period_size) {
- subs->transfer_done -= runtime->period_size;
- period_elapsed = 1;
- }
- spin_unlock_irqrestore(&subs->lock, flags);
- /* copy a data chunk */
- if (oldptr + bytes > runtime->buffer_size * stride) {
- unsigned int bytes1 =
- runtime->buffer_size * stride - oldptr;
- memcpy(runtime->dma_area + oldptr, cp, bytes1);
- memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1);
- } else {
- memcpy(runtime->dma_area + oldptr, cp, bytes);
- }
- }
- if (period_elapsed)
- snd_pcm_period_elapsed(subs->pcm_substream);
- return 0;
-}
-
-/*
- * Process after capture complete when paused. Nothing to do.
- */
-static int retire_paused_capture_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- return 0;
-}
-
-
-/*
- * prepare urb for full speed playback sync pipe
- *
- * set up the offset and length to receive the current frequency.
- */
-
-static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- struct snd_urb_ctx *ctx = urb->context;
-
- urb->dev = ctx->subs->dev; /* we need to set this at each time */
- urb->iso_frame_desc[0].length = 3;
- urb->iso_frame_desc[0].offset = 0;
- return 0;
-}
-
-/*
- * prepare urb for high speed playback sync pipe
- *
- * set up the offset and length to receive the current frequency.
- */
-
-static int prepare_playback_sync_urb_hs(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- struct snd_urb_ctx *ctx = urb->context;
-
- urb->dev = ctx->subs->dev; /* we need to set this at each time */
- urb->iso_frame_desc[0].length = 4;
- urb->iso_frame_desc[0].offset = 0;
- return 0;
-}
-
-/*
- * process after full speed playback sync complete
- *
- * retrieve the current 10.14 frequency from pipe, and set it.
- * the value is referred in prepare_playback_urb().
- */
-static int retire_playback_sync_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned int f;
- unsigned long flags;
-
- if (urb->iso_frame_desc[0].status == 0 &&
- urb->iso_frame_desc[0].actual_length == 3) {
- f = combine_triple((u8*)urb->transfer_buffer) << 2;
- if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
- spin_lock_irqsave(&subs->lock, flags);
- subs->freqm = f;
- spin_unlock_irqrestore(&subs->lock, flags);
- }
- }
-
- return 0;
-}
-
-/*
- * process after high speed playback sync complete
- *
- * retrieve the current 12.13 frequency from pipe, and set it.
- * the value is referred in prepare_playback_urb().
- */
-static int retire_playback_sync_urb_hs(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned int f;
- unsigned long flags;
-
- if (urb->iso_frame_desc[0].status == 0 &&
- urb->iso_frame_desc[0].actual_length == 4) {
- f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
- if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
- spin_lock_irqsave(&subs->lock, flags);
- subs->freqm = f;
- spin_unlock_irqrestore(&subs->lock, flags);
- }
- }
-
- return 0;
-}
-
-/*
- * process after E-Mu 0202/0404/Tracker Pre high speed playback sync complete
- *
- * These devices return the number of samples per packet instead of the number
- * of samples per microframe.
- */
-static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned int f;
- unsigned long flags;
-
- if (urb->iso_frame_desc[0].status == 0 &&
- urb->iso_frame_desc[0].actual_length == 4) {
- f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
- f >>= subs->datainterval;
- if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
- spin_lock_irqsave(&subs->lock, flags);
- subs->freqm = f;
- spin_unlock_irqrestore(&subs->lock, flags);
- }
- }
-
- return 0;
-}
-
-/* determine the number of frames in the next packet */
-static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs)
-{
- if (subs->fill_max)
- return subs->maxframesize;
- else {
- subs->phase = (subs->phase & 0xffff)
- + (subs->freqm << subs->datainterval);
- return min(subs->phase >> 16, subs->maxframesize);
- }
-}
-
-/*
- * Prepare urb for streaming before playback starts or when paused.
- *
- * We don't have any data, so we send silence.
- */
-static int prepare_nodata_playback_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned int i, offs, counts;
- struct snd_urb_ctx *ctx = urb->context;
- int stride = runtime->frame_bits >> 3;
-
- offs = 0;
- urb->dev = ctx->subs->dev;
- for (i = 0; i < ctx->packets; ++i) {
- counts = snd_usb_audio_next_packet_size(subs);
- urb->iso_frame_desc[i].offset = offs * stride;
- urb->iso_frame_desc[i].length = counts * stride;
- offs += counts;
- }
- urb->number_of_packets = ctx->packets;
- urb->transfer_buffer_length = offs * stride;
- memset(urb->transfer_buffer,
- subs->cur_audiofmt->format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0,
- offs * stride);
- return 0;
-}
-
-/*
- * prepare urb for playback data pipe
- *
- * Since a URB can handle only a single linear buffer, we must use double
- * buffering when the data to be transferred overflows the buffer boundary.
- * To avoid inconsistencies when updating hwptr_done, we use double buffering
- * for all URBs.
- */
-static int prepare_playback_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- int i, stride;
- unsigned int counts, frames, bytes;
- unsigned long flags;
- int period_elapsed = 0;
- struct snd_urb_ctx *ctx = urb->context;
-
- stride = runtime->frame_bits >> 3;
-
- frames = 0;
- urb->dev = ctx->subs->dev; /* we need to set this at each time */
- urb->number_of_packets = 0;
- spin_lock_irqsave(&subs->lock, flags);
- for (i = 0; i < ctx->packets; i++) {
- counts = snd_usb_audio_next_packet_size(subs);
- /* set up descriptor */
- urb->iso_frame_desc[i].offset = frames * stride;
- urb->iso_frame_desc[i].length = counts * stride;
- frames += counts;
- urb->number_of_packets++;
- subs->transfer_done += counts;
- if (subs->transfer_done >= runtime->period_size) {
- subs->transfer_done -= runtime->period_size;
- period_elapsed = 1;
- if (subs->fmt_type == UAC_FORMAT_TYPE_II) {
- if (subs->transfer_done > 0) {
- /* FIXME: fill-max mode is not
- * supported yet */
- frames -= subs->transfer_done;
- counts -= subs->transfer_done;
- urb->iso_frame_desc[i].length =
- counts * stride;
- subs->transfer_done = 0;
- }
- i++;
- if (i < ctx->packets) {
- /* add a transfer delimiter */
- urb->iso_frame_desc[i].offset =
- frames * stride;
- urb->iso_frame_desc[i].length = 0;
- urb->number_of_packets++;
- }
- break;
- }
- }
- if (period_elapsed) /* finish at the period boundary */
- break;
- }
- bytes = frames * stride;
- if (subs->hwptr_done + bytes > runtime->buffer_size * stride) {
- /* err, the transferred area goes over buffer boundary. */
- unsigned int bytes1 =
- runtime->buffer_size * stride - subs->hwptr_done;
- memcpy(urb->transfer_buffer,
- runtime->dma_area + subs->hwptr_done, bytes1);
- memcpy(urb->transfer_buffer + bytes1,
- runtime->dma_area, bytes - bytes1);
- } else {
- memcpy(urb->transfer_buffer,
- runtime->dma_area + subs->hwptr_done, bytes);
- }
- subs->hwptr_done += bytes;
- if (subs->hwptr_done >= runtime->buffer_size * stride)
- subs->hwptr_done -= runtime->buffer_size * stride;
- runtime->delay += frames;
- spin_unlock_irqrestore(&subs->lock, flags);
- urb->transfer_buffer_length = bytes;
- if (period_elapsed)
- snd_pcm_period_elapsed(subs->pcm_substream);
- return 0;
-}
-
-/*
- * process after playback data complete
- * - decrease the delay count again
- */
-static int retire_playback_urb(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned long flags;
- int stride = runtime->frame_bits >> 3;
- int processed = urb->transfer_buffer_length / stride;
-
- spin_lock_irqsave(&subs->lock, flags);
- if (processed > runtime->delay)
- runtime->delay = 0;
- else
- runtime->delay -= processed;
- spin_unlock_irqrestore(&subs->lock, flags);
- return 0;
-}
-
-
-/*
- */
-static struct snd_urb_ops audio_urb_ops[2] = {
- {
- .prepare = prepare_nodata_playback_urb,
- .retire = retire_playback_urb,
- .prepare_sync = prepare_playback_sync_urb,
- .retire_sync = retire_playback_sync_urb,
- },
- {
- .prepare = prepare_capture_urb,
- .retire = retire_capture_urb,
- .prepare_sync = prepare_capture_sync_urb,
- .retire_sync = retire_capture_sync_urb,
- },
-};
-
-static struct snd_urb_ops audio_urb_ops_high_speed[2] = {
- {
- .prepare = prepare_nodata_playback_urb,
- .retire = retire_playback_urb,
- .prepare_sync = prepare_playback_sync_urb_hs,
- .retire_sync = retire_playback_sync_urb_hs,
- },
- {
- .prepare = prepare_capture_urb,
- .retire = retire_capture_urb,
- .prepare_sync = prepare_capture_sync_urb_hs,
- .retire_sync = retire_capture_sync_urb,
- },
-};
-
-/*
- * complete callback from data urb
- */
-static void snd_complete_urb(struct urb *urb)
-{
- struct snd_urb_ctx *ctx = urb->context;
- struct snd_usb_substream *subs = ctx->subs;
- struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
- int err = 0;
-
- if ((subs->running && subs->ops.retire(subs, substream->runtime, urb)) ||
- !subs->running || /* can be stopped during retire callback */
- (err = subs->ops.prepare(subs, substream->runtime, urb)) < 0 ||
- (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
- clear_bit(ctx->index, &subs->active_mask);
- if (err < 0) {
- snd_printd(KERN_ERR "cannot submit urb (err = %d)\n", err);
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- }
- }
-}
-
-
-/*
- * complete callback from sync urb
- */
-static void snd_complete_sync_urb(struct urb *urb)
-{
- struct snd_urb_ctx *ctx = urb->context;
- struct snd_usb_substream *subs = ctx->subs;
- struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
- int err = 0;
-
- if ((subs->running && subs->ops.retire_sync(subs, substream->runtime, urb)) ||
- !subs->running || /* can be stopped during retire callback */
- (err = subs->ops.prepare_sync(subs, substream->runtime, urb)) < 0 ||
- (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
- clear_bit(ctx->index + 16, &subs->active_mask);
- if (err < 0) {
- snd_printd(KERN_ERR "cannot submit sync urb (err = %d)\n", err);
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- }
- }
-}
-
-
-/*
- * unlink active urbs.
- */
-static int deactivate_urbs(struct snd_usb_substream *subs, int force, int can_sleep)
-{
- unsigned int i;
- int async;
-
- subs->running = 0;
-
- if (!force && subs->stream->chip->shutdown) /* to be sure... */
- return -EBADFD;
-
- async = !can_sleep && async_unlink;
-
- if (!async && in_interrupt())
- return 0;
-
- for (i = 0; i < subs->nurbs; i++) {
- if (test_bit(i, &subs->active_mask)) {
- if (!test_and_set_bit(i, &subs->unlink_mask)) {
- struct urb *u = subs->dataurb[i].urb;
- if (async)
- usb_unlink_urb(u);
- else
- usb_kill_urb(u);
- }
- }
- }
- if (subs->syncpipe) {
- for (i = 0; i < SYNC_URBS; i++) {
- if (test_bit(i+16, &subs->active_mask)) {
- if (!test_and_set_bit(i+16, &subs->unlink_mask)) {
- struct urb *u = subs->syncurb[i].urb;
- if (async)
- usb_unlink_urb(u);
- else
- usb_kill_urb(u);
- }
- }
- }
- }
- return 0;
-}
-
-
-static const char *usb_error_string(int err)
-{
- switch (err) {
- case -ENODEV:
- return "no device";
- case -ENOENT:
- return "endpoint not enabled";
- case -EPIPE:
- return "endpoint stalled";
- case -ENOSPC:
- return "not enough bandwidth";
- case -ESHUTDOWN:
- return "device disabled";
- case -EHOSTUNREACH:
- return "device suspended";
- case -EINVAL:
- case -EAGAIN:
- case -EFBIG:
- case -EMSGSIZE:
- return "internal error";
- default:
- return "unknown error";
- }
-}
-
-/*
- * set up and start data/sync urbs
- */
-static int start_urbs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime)
-{
- unsigned int i;
- int err;
-
- if (subs->stream->chip->shutdown)
- return -EBADFD;
-
- for (i = 0; i < subs->nurbs; i++) {
- if (snd_BUG_ON(!subs->dataurb[i].urb))
- return -EINVAL;
- if (subs->ops.prepare(subs, runtime, subs->dataurb[i].urb) < 0) {
- snd_printk(KERN_ERR "cannot prepare datapipe for urb %d\n", i);
- goto __error;
- }
- }
- if (subs->syncpipe) {
- for (i = 0; i < SYNC_URBS; i++) {
- if (snd_BUG_ON(!subs->syncurb[i].urb))
- return -EINVAL;
- if (subs->ops.prepare_sync(subs, runtime, subs->syncurb[i].urb) < 0) {
- snd_printk(KERN_ERR "cannot prepare syncpipe for urb %d\n", i);
- goto __error;
- }
- }
- }
-
- subs->active_mask = 0;
- subs->unlink_mask = 0;
- subs->running = 1;
- for (i = 0; i < subs->nurbs; i++) {
- err = usb_submit_urb(subs->dataurb[i].urb, GFP_ATOMIC);
- if (err < 0) {
- snd_printk(KERN_ERR "cannot submit datapipe "
- "for urb %d, error %d: %s\n",
- i, err, usb_error_string(err));
- goto __error;
- }
- set_bit(i, &subs->active_mask);
- }
- if (subs->syncpipe) {
- for (i = 0; i < SYNC_URBS; i++) {
- err = usb_submit_urb(subs->syncurb[i].urb, GFP_ATOMIC);
- if (err < 0) {
- snd_printk(KERN_ERR "cannot submit syncpipe "
- "for urb %d, error %d: %s\n",
- i, err, usb_error_string(err));
- goto __error;
- }
- set_bit(i + 16, &subs->active_mask);
- }
- }
- return 0;
-
- __error:
- // snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
- deactivate_urbs(subs, 0, 0);
- return -EPIPE;
-}
-
-
-/*
- * wait until all urbs are processed.
- */
-static int wait_clear_urbs(struct snd_usb_substream *subs)
-{
- unsigned long end_time = jiffies + msecs_to_jiffies(1000);
- unsigned int i;
- int alive;
-
- do {
- alive = 0;
- for (i = 0; i < subs->nurbs; i++) {
- if (test_bit(i, &subs->active_mask))
- alive++;
- }
- if (subs->syncpipe) {
- for (i = 0; i < SYNC_URBS; i++) {
- if (test_bit(i + 16, &subs->active_mask))
- alive++;
- }
- }
- if (! alive)
- break;
- schedule_timeout_uninterruptible(1);
- } while (time_before(jiffies, end_time));
- if (alive)
- snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive);
- return 0;
-}
-
-
-/*
- * return the current pcm pointer. just based on the hwptr_done value.
- */
-static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream)
-{
- struct snd_usb_substream *subs;
- unsigned int hwptr_done;
-
- subs = (struct snd_usb_substream *)substream->runtime->private_data;
- spin_lock(&subs->lock);
- hwptr_done = subs->hwptr_done;
- spin_unlock(&subs->lock);
- return hwptr_done / (substream->runtime->frame_bits >> 3);
-}
-
-
-/*
- * start/stop playback substream
- */
-static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- struct snd_usb_substream *subs = substream->runtime->private_data;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- subs->ops.prepare = prepare_playback_urb;
- return 0;
- case SNDRV_PCM_TRIGGER_STOP:
- return deactivate_urbs(subs, 0, 0);
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- subs->ops.prepare = prepare_nodata_playback_urb;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/*
- * start/stop capture substream
- */
-static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- struct snd_usb_substream *subs = substream->runtime->private_data;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- subs->ops.retire = retire_capture_urb;
- return start_urbs(subs, substream->runtime);
- case SNDRV_PCM_TRIGGER_STOP:
- return deactivate_urbs(subs, 0, 0);
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- subs->ops.retire = retire_paused_capture_urb;
- return 0;
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- subs->ops.retire = retire_capture_urb;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-
-/*
- * release a urb data
- */
-static void release_urb_ctx(struct snd_urb_ctx *u)
-{
- if (u->urb) {
- if (u->buffer_size)
- usb_buffer_free(u->subs->dev, u->buffer_size,
- u->urb->transfer_buffer,
- u->urb->transfer_dma);
- usb_free_urb(u->urb);
- u->urb = NULL;
- }
-}
-
-/*
- * release a substream
- */
-static void release_substream_urbs(struct snd_usb_substream *subs, int force)
-{
- int i;
-
- /* stop urbs (to be sure) */
- deactivate_urbs(subs, force, 1);
- wait_clear_urbs(subs);
-
- for (i = 0; i < MAX_URBS; i++)
- release_urb_ctx(&subs->dataurb[i]);
- for (i = 0; i < SYNC_URBS; i++)
- release_urb_ctx(&subs->syncurb[i]);
- usb_buffer_free(subs->dev, SYNC_URBS * 4,
- subs->syncbuf, subs->sync_dma);
- subs->syncbuf = NULL;
- subs->nurbs = 0;
-}
-
-/*
- * initialize a substream for plaback/capture
- */
-static int init_substream_urbs(struct snd_usb_substream *subs, unsigned int period_bytes,
- unsigned int rate, unsigned int frame_bits)
-{
- unsigned int maxsize, i;
- int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
- unsigned int urb_packs, total_packs, packs_per_ms;
-
- /* calculate the frequency in 16.16 format */
- if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
- subs->freqn = get_usb_full_speed_rate(rate);
- else
- subs->freqn = get_usb_high_speed_rate(rate);
- subs->freqm = subs->freqn;
- /* calculate max. frequency */
- if (subs->maxpacksize) {
- /* whatever fits into a max. size packet */
- maxsize = subs->maxpacksize;
- subs->freqmax = (maxsize / (frame_bits >> 3))
- << (16 - subs->datainterval);
- } else {
- /* no max. packet size: just take 25% higher than nominal */
- subs->freqmax = subs->freqn + (subs->freqn >> 2);
- maxsize = ((subs->freqmax + 0xffff) * (frame_bits >> 3))
- >> (16 - subs->datainterval);
- }
- subs->phase = 0;
-
- if (subs->fill_max)
- subs->curpacksize = subs->maxpacksize;
- else
- subs->curpacksize = maxsize;
-
- if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH)
- packs_per_ms = 8 >> subs->datainterval;
- else
- packs_per_ms = 1;
-
- if (is_playback) {
- urb_packs = max(nrpacks, 1);
- urb_packs = min(urb_packs, (unsigned int)MAX_PACKS);
- } else
- urb_packs = 1;
- urb_packs *= packs_per_ms;
- if (subs->syncpipe)
- urb_packs = min(urb_packs, 1U << subs->syncinterval);
-
- /* decide how many packets to be used */
- if (is_playback) {
- unsigned int minsize, maxpacks;
- /* determine how small a packet can be */
- minsize = (subs->freqn >> (16 - subs->datainterval))
- * (frame_bits >> 3);
- /* with sync from device, assume it can be 12% lower */
- if (subs->syncpipe)
- minsize -= minsize >> 3;
- minsize = max(minsize, 1u);
- total_packs = (period_bytes + minsize - 1) / minsize;
- /* we need at least two URBs for queueing */
- if (total_packs < 2) {
- total_packs = 2;
- } else {
- /* and we don't want too long a queue either */
- maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
- total_packs = min(total_packs, maxpacks);
- }
- } else {
- while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
- urb_packs >>= 1;
- total_packs = MAX_URBS * urb_packs;
- }
- subs->nurbs = (total_packs + urb_packs - 1) / urb_packs;
- if (subs->nurbs > MAX_URBS) {
- /* too much... */
- subs->nurbs = MAX_URBS;
- total_packs = MAX_URBS * urb_packs;
- } else if (subs->nurbs < 2) {
- /* too little - we need at least two packets
- * to ensure contiguous playback/capture
- */
- subs->nurbs = 2;
- }
-
- /* allocate and initialize data urbs */
- for (i = 0; i < subs->nurbs; i++) {
- struct snd_urb_ctx *u = &subs->dataurb[i];
- u->index = i;
- u->subs = subs;
- u->packets = (i + 1) * total_packs / subs->nurbs
- - i * total_packs / subs->nurbs;
- u->buffer_size = maxsize * u->packets;
- if (subs->fmt_type == UAC_FORMAT_TYPE_II)
- u->packets++; /* for transfer delimiter */
- u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
- if (!u->urb)
- goto out_of_memory;
- u->urb->transfer_buffer =
- usb_buffer_alloc(subs->dev, u->buffer_size, GFP_KERNEL,
- &u->urb->transfer_dma);
- if (!u->urb->transfer_buffer)
- goto out_of_memory;
- u->urb->pipe = subs->datapipe;
- u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
- u->urb->interval = 1 << subs->datainterval;
- u->urb->context = u;
- u->urb->complete = snd_complete_urb;
- }
-
- if (subs->syncpipe) {
- /* allocate and initialize sync urbs */
- subs->syncbuf = usb_buffer_alloc(subs->dev, SYNC_URBS * 4,
- GFP_KERNEL, &subs->sync_dma);
- if (!subs->syncbuf)
- goto out_of_memory;
- for (i = 0; i < SYNC_URBS; i++) {
- struct snd_urb_ctx *u = &subs->syncurb[i];
- u->index = i;
- u->subs = subs;
- u->packets = 1;
- u->urb = usb_alloc_urb(1, GFP_KERNEL);
- if (!u->urb)
- goto out_of_memory;
- u->urb->transfer_buffer = subs->syncbuf + i * 4;
- u->urb->transfer_dma = subs->sync_dma + i * 4;
- u->urb->transfer_buffer_length = 4;
- u->urb->pipe = subs->syncpipe;
- u->urb->transfer_flags = URB_ISO_ASAP |
- URB_NO_TRANSFER_DMA_MAP;
- u->urb->number_of_packets = 1;
- u->urb->interval = 1 << subs->syncinterval;
- u->urb->context = u;
- u->urb->complete = snd_complete_sync_urb;
- }
- }
- return 0;
-
-out_of_memory:
- release_substream_urbs(subs, 0);
- return -ENOMEM;
-}
-
-
-/*
- * find a matching audio format
- */
-static struct audioformat *find_format(struct snd_usb_substream *subs, unsigned int format,
- unsigned int rate, unsigned int channels)
-{
- struct list_head *p;
- struct audioformat *found = NULL;
- int cur_attr = 0, attr;
-
- list_for_each(p, &subs->fmt_list) {
- struct audioformat *fp;
- fp = list_entry(p, struct audioformat, list);
- if (fp->format != format || fp->channels != channels)
- continue;
- if (rate < fp->rate_min || rate > fp->rate_max)
- continue;
- if (! (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)) {
- unsigned int i;
- for (i = 0; i < fp->nr_rates; i++)
- if (fp->rate_table[i] == rate)
- break;
- if (i >= fp->nr_rates)
- continue;
- }
- attr = fp->ep_attr & USB_ENDPOINT_SYNCTYPE;
- if (! found) {
- found = fp;
- cur_attr = attr;
- continue;
- }
- /* avoid async out and adaptive in if the other method
- * supports the same format.
- * this is a workaround for the case like
- * M-audio audiophile USB.
- */
- if (attr != cur_attr) {
- if ((attr == USB_ENDPOINT_SYNC_ASYNC &&
- subs->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
- (attr == USB_ENDPOINT_SYNC_ADAPTIVE &&
- subs->direction == SNDRV_PCM_STREAM_CAPTURE))
- continue;
- if ((cur_attr == USB_ENDPOINT_SYNC_ASYNC &&
- subs->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
- (cur_attr == USB_ENDPOINT_SYNC_ADAPTIVE &&
- subs->direction == SNDRV_PCM_STREAM_CAPTURE)) {
- found = fp;
- cur_attr = attr;
- continue;
- }
- }
- /* find the format with the largest max. packet size */
- if (fp->maxpacksize > found->maxpacksize) {
- found = fp;
- cur_attr = attr;
- }
- }
- return found;
-}
-
-
-/*
- * initialize the picth control and sample rate
- */
-static int init_usb_pitch(struct usb_device *dev, int iface,
- struct usb_host_interface *alts,
- struct audioformat *fmt)
-{
- unsigned int ep;
- unsigned char data[1];
- int err;
-
- ep = get_endpoint(alts, 0)->bEndpointAddress;
- /* if endpoint has pitch control, enable it */
- if (fmt->attributes & UAC_EP_CS_ATTR_PITCH_CONTROL) {
- data[0] = 1;
- if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
- USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT,
- UAC_EP_CS_ATTR_PITCH_CONTROL << 8, ep, data, 1, 1000)) < 0) {
- snd_printk(KERN_ERR "%d:%d:%d: cannot set enable PITCH\n",
- dev->devnum, iface, ep);
- return err;
- }
- }
- return 0;
-}
-
-static int init_usb_sample_rate(struct usb_device *dev, int iface,
- struct usb_host_interface *alts,
- struct audioformat *fmt, int rate)
-{
- unsigned int ep;
- unsigned char data[3];
- int err;
-
- ep = get_endpoint(alts, 0)->bEndpointAddress;
- /* if endpoint has sampling rate control, set it */
- if (fmt->attributes & UAC_EP_CS_ATTR_SAMPLE_RATE) {
- int crate;
- data[0] = rate;
- data[1] = rate >> 8;
- data[2] = rate >> 16;
- if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
- USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT,
- UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, data, 3, 1000)) < 0) {
- snd_printk(KERN_ERR "%d:%d:%d: cannot set freq %d to ep %#x\n",
- dev->devnum, iface, fmt->altsetting, rate, ep);
- return err;
- }
- if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
- USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_IN,
- UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, data, 3, 1000)) < 0) {
- snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq at ep %#x\n",
- dev->devnum, iface, fmt->altsetting, ep);
- return 0; /* some devices don't support reading */
- }
- crate = data[0] | (data[1] << 8) | (data[2] << 16);
- if (crate != rate) {
- snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate);
- // runtime->rate = crate;
- }
- }
- return 0;
-}
-
-/*
- * For E-Mu 0404USB/0202USB/TrackerPre sample rate should be set for device,
- * not for interface.
- */
-static void set_format_emu_quirk(struct snd_usb_substream *subs,
- struct audioformat *fmt)
-{
- unsigned char emu_samplerate_id = 0;
-
- /* When capture is active
- * sample rate shouldn't be changed
- * by playback substream
- */
- if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
- if (subs->stream->substream[SNDRV_PCM_STREAM_CAPTURE].interface != -1)
- return;
- }
-
- switch (fmt->rate_min) {
- case 48000:
- emu_samplerate_id = EMU_QUIRK_SR_48000HZ;
- break;
- case 88200:
- emu_samplerate_id = EMU_QUIRK_SR_88200HZ;
- break;
- case 96000:
- emu_samplerate_id = EMU_QUIRK_SR_96000HZ;
- break;
- case 176400:
- emu_samplerate_id = EMU_QUIRK_SR_176400HZ;
- break;
- case 192000:
- emu_samplerate_id = EMU_QUIRK_SR_192000HZ;
- break;
- default:
- emu_samplerate_id = EMU_QUIRK_SR_44100HZ;
- break;
- }
- snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id);
-}
-
-/*
- * find a matching format and set up the interface
- */
-static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
-{
- struct usb_device *dev = subs->dev;
- struct usb_host_interface *alts;
- struct usb_interface_descriptor *altsd;
- struct usb_interface *iface;
- unsigned int ep, attr;
- int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
- int err;
-
- iface = usb_ifnum_to_if(dev, fmt->iface);
- if (WARN_ON(!iface))
- return -EINVAL;
- alts = &iface->altsetting[fmt->altset_idx];
- altsd = get_iface_desc(alts);
- if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting))
- return -EINVAL;
-
- if (fmt == subs->cur_audiofmt)
- return 0;
-
- /* close the old interface */
- if (subs->interface >= 0 && subs->interface != fmt->iface) {
- if (usb_set_interface(subs->dev, subs->interface, 0) < 0) {
- snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed\n",
- dev->devnum, fmt->iface, fmt->altsetting);
- return -EIO;
- }
- subs->interface = -1;
- subs->format = 0;
- }
-
- /* set interface */
- if (subs->interface != fmt->iface || subs->format != fmt->altset_idx) {
- if (usb_set_interface(dev, fmt->iface, fmt->altsetting) < 0) {
- snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed\n",
- dev->devnum, fmt->iface, fmt->altsetting);
- return -EIO;
- }
- snd_printdd(KERN_INFO "setting usb interface %d:%d\n", fmt->iface, fmt->altsetting);
- subs->interface = fmt->iface;
- subs->format = fmt->altset_idx;
- }
-
- /* create a data pipe */
- ep = fmt->endpoint & USB_ENDPOINT_NUMBER_MASK;
- if (is_playback)
- subs->datapipe = usb_sndisocpipe(dev, ep);
- else
- subs->datapipe = usb_rcvisocpipe(dev, ep);
- subs->datainterval = fmt->datainterval;
- subs->syncpipe = subs->syncinterval = 0;
- subs->maxpacksize = fmt->maxpacksize;
- subs->fill_max = 0;
-
- /* we need a sync pipe in async OUT or adaptive IN mode */
- /* check the number of EP, since some devices have broken
- * descriptors which fool us. if it has only one EP,
- * assume it as adaptive-out or sync-in.
- */
- attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
- if (((is_playback && attr == USB_ENDPOINT_SYNC_ASYNC) ||
- (! is_playback && attr == USB_ENDPOINT_SYNC_ADAPTIVE)) &&
- altsd->bNumEndpoints >= 2) {
- /* check sync-pipe endpoint */
- /* ... and check descriptor size before accessing bSynchAddress
- because there is a version of the SB Audigy 2 NX firmware lacking
- the audio fields in the endpoint descriptors */
- if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 0x01 ||
- (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
- get_endpoint(alts, 1)->bSynchAddress != 0)) {
- snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
- dev->devnum, fmt->iface, fmt->altsetting);
- return -EINVAL;
- }
- ep = get_endpoint(alts, 1)->bEndpointAddress;
- if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
- (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
- (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
- snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
- dev->devnum, fmt->iface, fmt->altsetting);
- return -EINVAL;
- }
- ep &= USB_ENDPOINT_NUMBER_MASK;
- if (is_playback)
- subs->syncpipe = usb_rcvisocpipe(dev, ep);
- else
- subs->syncpipe = usb_sndisocpipe(dev, ep);
- if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
- get_endpoint(alts, 1)->bRefresh >= 1 &&
- get_endpoint(alts, 1)->bRefresh <= 9)
- subs->syncinterval = get_endpoint(alts, 1)->bRefresh;
- else if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
- subs->syncinterval = 1;
- else if (get_endpoint(alts, 1)->bInterval >= 1 &&
- get_endpoint(alts, 1)->bInterval <= 16)
- subs->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
- else
- subs->syncinterval = 3;
- }
-
- /* always fill max packet size */
- if (fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX)
- subs->fill_max = 1;
-
- if ((err = init_usb_pitch(dev, subs->interface, alts, fmt)) < 0)
- return err;
-
- subs->cur_audiofmt = fmt;
-
- switch (subs->stream->chip->usb_id) {
- case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
- case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
- case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
- set_format_emu_quirk(subs, fmt);
- break;
- }
-
-#if 0
- printk(KERN_DEBUG
- "setting done: format = %d, rate = %d..%d, channels = %d\n",
- fmt->format, fmt->rate_min, fmt->rate_max, fmt->channels);
- printk(KERN_DEBUG
- " datapipe = 0x%0x, syncpipe = 0x%0x\n",
- subs->datapipe, subs->syncpipe);
-#endif
-
- return 0;
-}
-
-/*
- * hw_params callback
- *
- * allocate a buffer and set the given audio format.
- *
- * so far we use a physically linear buffer although packetize transfer
- * doesn't need a continuous area.
- * if sg buffer is supported on the later version of alsa, we'll follow
- * that.
- */
-static int snd_usb_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct snd_usb_substream *subs = substream->runtime->private_data;
- struct audioformat *fmt;
- unsigned int channels, rate, format;
- int ret, changed;
-
- ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
- if (ret < 0)
- return ret;
-
- format = params_format(hw_params);
- rate = params_rate(hw_params);
- channels = params_channels(hw_params);
- fmt = find_format(subs, format, rate, channels);
- if (!fmt) {
- snd_printd(KERN_DEBUG "cannot set format: format = %#x, rate = %d, channels = %d\n",
- format, rate, channels);
- return -EINVAL;
- }
-
- changed = subs->cur_audiofmt != fmt ||
- subs->period_bytes != params_period_bytes(hw_params) ||
- subs->cur_rate != rate;
- if ((ret = set_format(subs, fmt)) < 0)
- return ret;
-
- if (subs->cur_rate != rate) {
- struct usb_host_interface *alts;
- struct usb_interface *iface;
- iface = usb_ifnum_to_if(subs->dev, fmt->iface);
- alts = &iface->altsetting[fmt->altset_idx];
- ret = init_usb_sample_rate(subs->dev, subs->interface, alts, fmt, rate);
- if (ret < 0)
- return ret;
- subs->cur_rate = rate;
- }
-
- if (changed) {
- /* format changed */
- release_substream_urbs(subs, 0);
- /* influenced: period_bytes, channels, rate, format, */
- ret = init_substream_urbs(subs, params_period_bytes(hw_params),
- params_rate(hw_params),
- snd_pcm_format_physical_width(params_format(hw_params)) * params_channels(hw_params));
- }
-
- return ret;
-}
-
-/*
- * hw_free callback
- *
- * reset the audio format and release the buffer
- */
-static int snd_usb_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_usb_substream *subs = substream->runtime->private_data;
-
- subs->cur_audiofmt = NULL;
- subs->cur_rate = 0;
- subs->period_bytes = 0;
- if (!subs->stream->chip->shutdown)
- release_substream_urbs(subs, 0);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
-}
-
-/*
- * prepare callback
- *
- * only a few subtle things...
- */
-static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_usb_substream *subs = runtime->private_data;
-
- if (! subs->cur_audiofmt) {
- snd_printk(KERN_ERR "usbaudio: no format is specified!\n");
- return -ENXIO;
- }
-
- /* some unit conversions in runtime */
- subs->maxframesize = bytes_to_frames(runtime, subs->maxpacksize);
- subs->curframesize = bytes_to_frames(runtime, subs->curpacksize);
-
- /* reset the pointer */
- subs->hwptr_done = 0;
- subs->transfer_done = 0;
- subs->phase = 0;
- runtime->delay = 0;
-
- /* clear urbs (to be sure) */
- deactivate_urbs(subs, 0, 1);
- wait_clear_urbs(subs);
-
- /* for playback, submit the URBs now; otherwise, the first hwptr_done
- * updates for all URBs would happen at the same time when starting */
- if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
- subs->ops.prepare = prepare_nodata_playback_urb;
- return start_urbs(subs, runtime);
- } else
- return 0;
-}
-
-static struct snd_pcm_hardware snd_usb_hardware =
-{
- .info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_PAUSE,
- .buffer_bytes_max = 1024 * 1024,
- .period_bytes_min = 64,
- .period_bytes_max = 512 * 1024,
- .periods_min = 2,
- .periods_max = 1024,
-};
-
-/*
- * h/w constraints
- */
-
-#ifdef HW_CONST_DEBUG
-#define hwc_debug(fmt, args...) printk(KERN_DEBUG fmt, ##args)
-#else
-#define hwc_debug(fmt, args...) /**/
-#endif
-
-static int hw_check_valid_format(struct snd_usb_substream *subs,
- struct snd_pcm_hw_params *params,
- struct audioformat *fp)
-{
- struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval *ct = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
- struct snd_mask *fmts = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
- struct snd_interval *pt = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME);
- unsigned int ptime;
-
- /* check the format */
- if (!snd_mask_test(fmts, fp->format)) {
- hwc_debug(" > check: no supported format %d\n", fp->format);
- return 0;
- }
- /* check the channels */
- if (fp->channels < ct->min || fp->channels > ct->max) {
- hwc_debug(" > check: no valid channels %d (%d/%d)\n", fp->channels, ct->min, ct->max);
- return 0;
- }
- /* check the rate is within the range */
- if (fp->rate_min > it->max || (fp->rate_min == it->max && it->openmax)) {
- hwc_debug(" > check: rate_min %d > max %d\n", fp->rate_min, it->max);
- return 0;
- }
- if (fp->rate_max < it->min || (fp->rate_max == it->min && it->openmin)) {
- hwc_debug(" > check: rate_max %d < min %d\n", fp->rate_max, it->min);
- return 0;
- }
- /* check whether the period time is >= the data packet interval */
- if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH) {
- ptime = 125 * (1 << fp->datainterval);
- if (ptime > pt->max || (ptime == pt->max && pt->openmax)) {
- hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max);
- return 0;
- }
- }
- return 1;
-}
-
-static int hw_rule_rate(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- struct snd_usb_substream *subs = rule->private;
- struct list_head *p;
- struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
- unsigned int rmin, rmax;
- int changed;
-
- hwc_debug("hw_rule_rate: (%d,%d)\n", it->min, it->max);
- changed = 0;
- rmin = rmax = 0;
- list_for_each(p, &subs->fmt_list) {
- struct audioformat *fp;
- fp = list_entry(p, struct audioformat, list);
- if (!hw_check_valid_format(subs, params, fp))
- continue;
- if (changed++) {
- if (rmin > fp->rate_min)
- rmin = fp->rate_min;
- if (rmax < fp->rate_max)
- rmax = fp->rate_max;
- } else {
- rmin = fp->rate_min;
- rmax = fp->rate_max;
- }
- }
-
- if (!changed) {
- hwc_debug(" --> get empty\n");
- it->empty = 1;
- return -EINVAL;
- }
-
- changed = 0;
- if (it->min < rmin) {
- it->min = rmin;
- it->openmin = 0;
- changed = 1;
- }
- if (it->max > rmax) {
- it->max = rmax;
- it->openmax = 0;
- changed = 1;
- }
- if (snd_interval_checkempty(it)) {
- it->empty = 1;
- return -EINVAL;
- }
- hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed);
- return changed;
-}
-
-
-static int hw_rule_channels(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- struct snd_usb_substream *subs = rule->private;
- struct list_head *p;
- struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
- unsigned int rmin, rmax;
- int changed;
-
- hwc_debug("hw_rule_channels: (%d,%d)\n", it->min, it->max);
- changed = 0;
- rmin = rmax = 0;
- list_for_each(p, &subs->fmt_list) {
- struct audioformat *fp;
- fp = list_entry(p, struct audioformat, list);
- if (!hw_check_valid_format(subs, params, fp))
- continue;
- if (changed++) {
- if (rmin > fp->channels)
- rmin = fp->channels;
- if (rmax < fp->channels)
- rmax = fp->channels;
- } else {
- rmin = fp->channels;
- rmax = fp->channels;
- }
- }
-
- if (!changed) {
- hwc_debug(" --> get empty\n");
- it->empty = 1;
- return -EINVAL;
- }
-
- changed = 0;
- if (it->min < rmin) {
- it->min = rmin;
- it->openmin = 0;
- changed = 1;
- }
- if (it->max > rmax) {
- it->max = rmax;
- it->openmax = 0;
- changed = 1;
- }
- if (snd_interval_checkempty(it)) {
- it->empty = 1;
- return -EINVAL;
- }
- hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed);
- return changed;
-}
-
-static int hw_rule_format(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- struct snd_usb_substream *subs = rule->private;
- struct list_head *p;
- struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
- u64 fbits;
- u32 oldbits[2];
- int changed;
-
- hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
- fbits = 0;
- list_for_each(p, &subs->fmt_list) {
- struct audioformat *fp;
- fp = list_entry(p, struct audioformat, list);
- if (!hw_check_valid_format(subs, params, fp))
- continue;
- fbits |= (1ULL << fp->format);
- }
-
- oldbits[0] = fmt->bits[0];
- oldbits[1] = fmt->bits[1];
- fmt->bits[0] &= (u32)fbits;
- fmt->bits[1] &= (u32)(fbits >> 32);
- if (!fmt->bits[0] && !fmt->bits[1]) {
- hwc_debug(" --> get empty\n");
- return -EINVAL;
- }
- changed = (oldbits[0] != fmt->bits[0] || oldbits[1] != fmt->bits[1]);
- hwc_debug(" --> %x:%x (changed = %d)\n", fmt->bits[0], fmt->bits[1], changed);
- return changed;
-}
-
-static int hw_rule_period_time(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- struct snd_usb_substream *subs = rule->private;
- struct audioformat *fp;
- struct snd_interval *it;
- unsigned char min_datainterval;
- unsigned int pmin;
- int changed;
-
- it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME);
- hwc_debug("hw_rule_period_time: (%u,%u)\n", it->min, it->max);
- min_datainterval = 0xff;
- list_for_each_entry(fp, &subs->fmt_list, list) {
- if (!hw_check_valid_format(subs, params, fp))
- continue;
- min_datainterval = min(min_datainterval, fp->datainterval);
- }
- if (min_datainterval == 0xff) {
- hwc_debug(" --> get emtpy\n");
- it->empty = 1;
- return -EINVAL;
- }
- pmin = 125 * (1 << min_datainterval);
- changed = 0;
- if (it->min < pmin) {
- it->min = pmin;
- it->openmin = 0;
- changed = 1;
- }
- if (snd_interval_checkempty(it)) {
- it->empty = 1;
- return -EINVAL;
- }
- hwc_debug(" --> (%u,%u) (changed = %d)\n", it->min, it->max, changed);
- return changed;
-}
-
-/*
- * If the device supports unusual bit rates, does the request meet these?
- */
-static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
- struct snd_usb_substream *subs)
-{
- struct audioformat *fp;
- int count = 0, needs_knot = 0;
- int err;
-
- list_for_each_entry(fp, &subs->fmt_list, list) {
- if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
- return 0;
- count += fp->nr_rates;
- if (fp->rates & SNDRV_PCM_RATE_KNOT)
- needs_knot = 1;
- }
- if (!needs_knot)
- return 0;
-
- subs->rate_list.count = count;
- subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL);
- subs->rate_list.mask = 0;
- count = 0;
- list_for_each_entry(fp, &subs->fmt_list, list) {
- int i;
- for (i = 0; i < fp->nr_rates; i++)
- subs->rate_list.list[count++] = fp->rate_table[i];
- }
- err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- &subs->rate_list);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-
-/*
- * set up the runtime hardware information.
- */
-
-static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs)
-{
- struct list_head *p;
- unsigned int pt, ptmin;
- int param_period_time_if_needed;
- int err;
-
- runtime->hw.formats = subs->formats;
-
- runtime->hw.rate_min = 0x7fffffff;
- runtime->hw.rate_max = 0;
- runtime->hw.channels_min = 256;
- runtime->hw.channels_max = 0;
- runtime->hw.rates = 0;
- ptmin = UINT_MAX;
- /* check min/max rates and channels */
- list_for_each(p, &subs->fmt_list) {
- struct audioformat *fp;
- fp = list_entry(p, struct audioformat, list);
- runtime->hw.rates |= fp->rates;
- if (runtime->hw.rate_min > fp->rate_min)
- runtime->hw.rate_min = fp->rate_min;
- if (runtime->hw.rate_max < fp->rate_max)
- runtime->hw.rate_max = fp->rate_max;
- if (runtime->hw.channels_min > fp->channels)
- runtime->hw.channels_min = fp->channels;
- if (runtime->hw.channels_max < fp->channels)
- runtime->hw.channels_max = fp->channels;
- if (fp->fmt_type == UAC_FORMAT_TYPE_II && fp->frame_size > 0) {
- /* FIXME: there might be more than one audio formats... */
- runtime->hw.period_bytes_min = runtime->hw.period_bytes_max =
- fp->frame_size;
- }
- pt = 125 * (1 << fp->datainterval);
- ptmin = min(ptmin, pt);
- }
-
- param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME;
- if (snd_usb_get_speed(subs->dev) != USB_SPEED_HIGH)
- /* full speed devices have fixed data packet interval */
- ptmin = 1000;
- if (ptmin == 1000)
- /* if period time doesn't go below 1 ms, no rules needed */
- param_period_time_if_needed = -1;
- snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
- ptmin, UINT_MAX);
-
- if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- hw_rule_rate, subs,
- SNDRV_PCM_HW_PARAM_FORMAT,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- param_period_time_if_needed,
- -1)) < 0)
- return err;
- if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- hw_rule_channels, subs,
- SNDRV_PCM_HW_PARAM_FORMAT,
- SNDRV_PCM_HW_PARAM_RATE,
- param_period_time_if_needed,
- -1)) < 0)
- return err;
- if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
- hw_rule_format, subs,
- SNDRV_PCM_HW_PARAM_RATE,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- param_period_time_if_needed,
- -1)) < 0)
- return err;
- if (param_period_time_if_needed >= 0) {
- err = snd_pcm_hw_rule_add(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_TIME,
- hw_rule_period_time, subs,
- SNDRV_PCM_HW_PARAM_FORMAT,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- SNDRV_PCM_HW_PARAM_RATE,
- -1);
- if (err < 0)
- return err;
- }
- if ((err = snd_usb_pcm_check_knot(runtime, subs)) < 0)
- return err;
- return 0;
-}
-
-static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
-{
- struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_usb_substream *subs = &as->substream[direction];
-
- subs->interface = -1;
- subs->format = 0;
- runtime->hw = snd_usb_hardware;
- runtime->private_data = subs;
- subs->pcm_substream = substream;
- return setup_hw_info(runtime, subs);
-}
-
-static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
-{
- struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
- struct snd_usb_substream *subs = &as->substream[direction];
-
- if (!as->chip->shutdown && subs->interface >= 0) {
- usb_set_interface(subs->dev, subs->interface, 0);
- subs->interface = -1;
- }
- subs->pcm_substream = NULL;
- return 0;
-}
-
-static int snd_usb_playback_open(struct snd_pcm_substream *substream)
-{
- return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_PLAYBACK);
-}
-
-static int snd_usb_playback_close(struct snd_pcm_substream *substream)
-{
- return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_PLAYBACK);
-}
-
-static int snd_usb_capture_open(struct snd_pcm_substream *substream)
-{
- return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_CAPTURE);
-}
-
-static int snd_usb_capture_close(struct snd_pcm_substream *substream)
-{
- return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_CAPTURE);
-}
-
-static struct snd_pcm_ops snd_usb_playback_ops = {
- .open = snd_usb_playback_open,
- .close = snd_usb_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_usb_hw_params,
- .hw_free = snd_usb_hw_free,
- .prepare = snd_usb_pcm_prepare,
- .trigger = snd_usb_pcm_playback_trigger,
- .pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
-};
-
-static struct snd_pcm_ops snd_usb_capture_ops = {
- .open = snd_usb_capture_open,
- .close = snd_usb_capture_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_usb_hw_params,
- .hw_free = snd_usb_hw_free,
- .prepare = snd_usb_pcm_prepare,
- .trigger = snd_usb_pcm_capture_trigger,
- .pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
-};
-
-
-
-/*
- * helper functions
- */
-
-/*
- * combine bytes and get an integer value
- */
-unsigned int snd_usb_combine_bytes(unsigned char *bytes, int size)
-{
- switch (size) {
- case 1: return *bytes;
- case 2: return combine_word(bytes);
- case 3: return combine_triple(bytes);
- case 4: return combine_quad(bytes);
- default: return 0;
- }
-}
-
-/*
- * parse descriptor buffer and return the pointer starting the given
- * descriptor type.
- */
-void *snd_usb_find_desc(void *descstart, int desclen, void *after, u8 dtype)
-{
- u8 *p, *end, *next;
-
- p = descstart;
- end = p + desclen;
- for (; p < end;) {
- if (p[0] < 2)
- return NULL;
- next = p + p[0];
- if (next > end)
- return NULL;
- if (p[1] == dtype && (!after || (void *)p > after)) {
- return p;
- }
- p = next;
- }
- return NULL;
-}
-
-/*
- * find a class-specified interface descriptor with the given subtype.
- */
-void *snd_usb_find_csint_desc(void *buffer, int buflen, void *after, u8 dsubtype)
-{
- unsigned char *p = after;
-
- while ((p = snd_usb_find_desc(buffer, buflen, p,
- USB_DT_CS_INTERFACE)) != NULL) {
- if (p[0] >= 3 && p[2] == dsubtype)
- return p;
- }
- return NULL;
-}
-
-/*
- * Wrapper for usb_control_msg().
- * Allocates a temp buffer to prevent dmaing from/to the stack.
- */
-int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
- __u8 requesttype, __u16 value, __u16 index, void *data,
- __u16 size, int timeout)
-{
- int err;
- void *buf = NULL;
-
- if (size > 0) {
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
- err = usb_control_msg(dev, pipe, request, requesttype,
- value, index, buf, size, timeout);
- if (size > 0) {
- memcpy(data, buf, size);
- kfree(buf);
- }
- return err;
-}
-
-
-/*
- * entry point for linux usb interface
- */
-
-static int usb_audio_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
-static void usb_audio_disconnect(struct usb_interface *intf);
-
-#ifdef CONFIG_PM
-static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message);
-static int usb_audio_resume(struct usb_interface *intf);
-#else
-#define usb_audio_suspend NULL
-#define usb_audio_resume NULL
-#endif
-
-static struct usb_device_id usb_audio_ids [] = {
-#include "usbquirks.h"
- { .match_flags = (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS),
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, usb_audio_ids);
-
-static struct usb_driver usb_audio_driver = {
- .name = "snd-usb-audio",
- .probe = usb_audio_probe,
- .disconnect = usb_audio_disconnect,
- .suspend = usb_audio_suspend,
- .resume = usb_audio_resume,
- .id_table = usb_audio_ids,
-};
-
-
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_SND_VERBOSE_PROCFS)
-
-/*
- * proc interface for list the supported pcm formats
- */
-static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct snd_info_buffer *buffer)
-{
- struct list_head *p;
- static char *sync_types[4] = {
- "NONE", "ASYNC", "ADAPTIVE", "SYNC"
- };
-
- list_for_each(p, &subs->fmt_list) {
- struct audioformat *fp;
- fp = list_entry(p, struct audioformat, list);
- snd_iprintf(buffer, " Interface %d\n", fp->iface);
- snd_iprintf(buffer, " Altset %d\n", fp->altsetting);
- snd_iprintf(buffer, " Format: %s\n",
- snd_pcm_format_name(fp->format));
- snd_iprintf(buffer, " Channels: %d\n", fp->channels);
- snd_iprintf(buffer, " Endpoint: %d %s (%s)\n",
- fp->endpoint & USB_ENDPOINT_NUMBER_MASK,
- fp->endpoint & USB_DIR_IN ? "IN" : "OUT",
- sync_types[(fp->ep_attr & USB_ENDPOINT_SYNCTYPE) >> 2]);
- if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) {
- snd_iprintf(buffer, " Rates: %d - %d (continuous)\n",
- fp->rate_min, fp->rate_max);
- } else {
- unsigned int i;
- snd_iprintf(buffer, " Rates: ");
- for (i = 0; i < fp->nr_rates; i++) {
- if (i > 0)
- snd_iprintf(buffer, ", ");
- snd_iprintf(buffer, "%d", fp->rate_table[i]);
- }
- snd_iprintf(buffer, "\n");
- }
- if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH)
- snd_iprintf(buffer, " Data packet interval: %d us\n",
- 125 * (1 << fp->datainterval));
- // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize);
- // snd_iprintf(buffer, " EP Attribute = %#x\n", fp->attributes);
- }
-}
-
-static void proc_dump_substream_status(struct snd_usb_substream *subs, struct snd_info_buffer *buffer)
-{
- if (subs->running) {
- unsigned int i;
- snd_iprintf(buffer, " Status: Running\n");
- snd_iprintf(buffer, " Interface = %d\n", subs->interface);
- snd_iprintf(buffer, " Altset = %d\n", subs->format);
- snd_iprintf(buffer, " URBs = %d [ ", subs->nurbs);
- for (i = 0; i < subs->nurbs; i++)
- snd_iprintf(buffer, "%d ", subs->dataurb[i].packets);
- snd_iprintf(buffer, "]\n");
- snd_iprintf(buffer, " Packet Size = %d\n", subs->curpacksize);
- snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
- snd_usb_get_speed(subs->dev) == USB_SPEED_FULL
- ? get_full_speed_hz(subs->freqm)
- : get_high_speed_hz(subs->freqm),
- subs->freqm >> 16, subs->freqm & 0xffff);
- } else {
- snd_iprintf(buffer, " Status: Stop\n");
- }
-}
-
-static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
-{
- struct snd_usb_stream *stream = entry->private_data;
-
- snd_iprintf(buffer, "%s : %s\n", stream->chip->card->longname, stream->pcm->name);
-
- if (stream->substream[SNDRV_PCM_STREAM_PLAYBACK].num_formats) {
- snd_iprintf(buffer, "\nPlayback:\n");
- proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer);
- proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer);
- }
- if (stream->substream[SNDRV_PCM_STREAM_CAPTURE].num_formats) {
- snd_iprintf(buffer, "\nCapture:\n");
- proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer);
- proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer);
- }
-}
-
-static void proc_pcm_format_add(struct snd_usb_stream *stream)
-{
- struct snd_info_entry *entry;
- char name[32];
- struct snd_card *card = stream->chip->card;
-
- sprintf(name, "stream%d", stream->pcm_index);
- if (!snd_card_proc_new(card, name, &entry))
- snd_info_set_text_ops(entry, stream, proc_pcm_format_read);
-}
-
-#else
-
-static inline void proc_pcm_format_add(struct snd_usb_stream *stream)
-{
-}
-
-#endif
-
-/*
- * initialize the substream instance.
- */
-
-static void init_substream(struct snd_usb_stream *as, int stream, struct audioformat *fp)
-{
- struct snd_usb_substream *subs = &as->substream[stream];
-
- INIT_LIST_HEAD(&subs->fmt_list);
- spin_lock_init(&subs->lock);
-
- subs->stream = as;
- subs->direction = stream;
- subs->dev = as->chip->dev;
- subs->txfr_quirk = as->chip->txfr_quirk;
- if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
- subs->ops = audio_urb_ops[stream];
- } else {
- subs->ops = audio_urb_ops_high_speed[stream];
- switch (as->chip->usb_id) {
- case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
- case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
- case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
- subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
- break;
- }
- }
- snd_pcm_set_ops(as->pcm, stream,
- stream == SNDRV_PCM_STREAM_PLAYBACK ?
- &snd_usb_playback_ops : &snd_usb_capture_ops);
-
- list_add_tail(&fp->list, &subs->fmt_list);
- subs->formats |= 1ULL << fp->format;
- subs->endpoint = fp->endpoint;
- subs->num_formats++;
- subs->fmt_type = fp->fmt_type;
-}
-
-
-/*
- * free a substream
- */
-static void free_substream(struct snd_usb_substream *subs)
-{
- struct list_head *p, *n;
-
- if (!subs->num_formats)
- return; /* not initialized */
- list_for_each_safe(p, n, &subs->fmt_list) {
- struct audioformat *fp = list_entry(p, struct audioformat, list);
- kfree(fp->rate_table);
- kfree(fp);
- }
- kfree(subs->rate_list.list);
-}
-
-
-/*
- * free a usb stream instance
- */
-static void snd_usb_audio_stream_free(struct snd_usb_stream *stream)
-{
- free_substream(&stream->substream[0]);
- free_substream(&stream->substream[1]);
- list_del(&stream->list);
- kfree(stream);
-}
-
-static void snd_usb_audio_pcm_free(struct snd_pcm *pcm)
-{
- struct snd_usb_stream *stream = pcm->private_data;
- if (stream) {
- stream->pcm = NULL;
- snd_usb_audio_stream_free(stream);
- }
-}
-
-
-/*
- * add this endpoint to the chip instance.
- * if a stream with the same endpoint already exists, append to it.
- * if not, create a new pcm stream.
- */
-static int add_audio_endpoint(struct snd_usb_audio *chip, int stream, struct audioformat *fp)
-{
- struct list_head *p;
- struct snd_usb_stream *as;
- struct snd_usb_substream *subs;
- struct snd_pcm *pcm;
- int err;
-
- list_for_each(p, &chip->pcm_list) {
- as = list_entry(p, struct snd_usb_stream, list);
- if (as->fmt_type != fp->fmt_type)
- continue;
- subs = &as->substream[stream];
- if (!subs->endpoint)
- continue;
- if (subs->endpoint == fp->endpoint) {
- list_add_tail(&fp->list, &subs->fmt_list);
- subs->num_formats++;
- subs->formats |= 1ULL << fp->format;
- return 0;
- }
- }
- /* look for an empty stream */
- list_for_each(p, &chip->pcm_list) {
- as = list_entry(p, struct snd_usb_stream, list);
- if (as->fmt_type != fp->fmt_type)
- continue;
- subs = &as->substream[stream];
- if (subs->endpoint)
- continue;
- err = snd_pcm_new_stream(as->pcm, stream, 1);
- if (err < 0)
- return err;
- init_substream(as, stream, fp);
- return 0;
- }
-
- /* create a new pcm */
- as = kzalloc(sizeof(*as), GFP_KERNEL);
- if (!as)
- return -ENOMEM;
- as->pcm_index = chip->pcm_devs;
- as->chip = chip;
- as->fmt_type = fp->fmt_type;
- err = snd_pcm_new(chip->card, "USB Audio", chip->pcm_devs,
- stream == SNDRV_PCM_STREAM_PLAYBACK ? 1 : 0,
- stream == SNDRV_PCM_STREAM_PLAYBACK ? 0 : 1,
- &pcm);
- if (err < 0) {
- kfree(as);
- return err;
- }
- as->pcm = pcm;
- pcm->private_data = as;
- pcm->private_free = snd_usb_audio_pcm_free;
- pcm->info_flags = 0;
- if (chip->pcm_devs > 0)
- sprintf(pcm->name, "USB Audio #%d", chip->pcm_devs);
- else
- strcpy(pcm->name, "USB Audio");
-
- init_substream(as, stream, fp);
-
- list_add(&as->list, &chip->pcm_list);
- chip->pcm_devs++;
-
- proc_pcm_format_add(as);
-
- return 0;
-}
-
-
-/*
- * check if the device uses big-endian samples
- */
-static int is_big_endian_format(struct snd_usb_audio *chip, struct audioformat *fp)
-{
- switch (chip->usb_id) {
- case USB_ID(0x0763, 0x2001): /* M-Audio Quattro: captured data only */
- if (fp->endpoint & USB_DIR_IN)
- return 1;
- break;
- case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */
- if (device_setup[chip->index] == 0x00 ||
- fp->altsetting==1 || fp->altsetting==2 || fp->altsetting==3)
- return 1;
- }
- return 0;
-}
-
-/*
- * parse the audio format type I descriptor
- * and returns the corresponding pcm format
- *
- * @dev: usb device
- * @fp: audioformat record
- * @format: the format tag (wFormatTag)
- * @fmt: the format type descriptor
- */
-static int parse_audio_format_i_type(struct snd_usb_audio *chip,
- struct audioformat *fp,
- int format, void *_fmt,
- int protocol)
-{
- int pcm_format, i;
- int sample_width, sample_bytes;
-
- switch (protocol) {
- case UAC_VERSION_1: {
- struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
- sample_width = fmt->bBitResolution;
- sample_bytes = fmt->bSubframeSize;
- break;
- }
-
- case UAC_VERSION_2: {
- struct uac_format_type_i_ext_descriptor *fmt = _fmt;
- sample_width = fmt->bBitResolution;
- sample_bytes = fmt->bSubslotSize;
-
- /*
- * FIXME
- * USB audio class v2 devices specify a bitmap of possible
- * audio formats rather than one fix value. For now, we just
- * pick one of them and report that as the only possible
- * value for this setting.
- * The bit allocation map is in fact compatible to the
- * wFormatTag of the v1 AS streaming descriptors, which is why
- * we can simply map the matrix.
- */
-
- for (i = 0; i < 5; i++)
- if (format & (1UL << i)) {
- format = i + 1;
- break;
- }
-
- break;
- }
-
- default:
- return -EINVAL;
- }
-
- /* FIXME: correct endianess and sign? */
- pcm_format = -1;
-
- switch (format) {
- case UAC_FORMAT_TYPE_I_UNDEFINED: /* some devices don't define this correctly... */
- snd_printdd(KERN_INFO "%d:%u:%d : format type 0 is detected, processed as PCM\n",
- chip->dev->devnum, fp->iface, fp->altsetting);
- /* fall-through */
- case UAC_FORMAT_TYPE_I_PCM:
- if (sample_width > sample_bytes * 8) {
- snd_printk(KERN_INFO "%d:%u:%d : sample bitwidth %d in over sample bytes %d\n",
- chip->dev->devnum, fp->iface, fp->altsetting,
- sample_width, sample_bytes);
- }
- /* check the format byte size */
- switch (sample_bytes) {
- case 1:
- pcm_format = SNDRV_PCM_FORMAT_S8;
- break;
- case 2:
- if (is_big_endian_format(chip, fp))
- pcm_format = SNDRV_PCM_FORMAT_S16_BE; /* grrr, big endian!! */
- else
- pcm_format = SNDRV_PCM_FORMAT_S16_LE;
- break;
- case 3:
- if (is_big_endian_format(chip, fp))
- pcm_format = SNDRV_PCM_FORMAT_S24_3BE; /* grrr, big endian!! */
- else
- pcm_format = SNDRV_PCM_FORMAT_S24_3LE;
- break;
- case 4:
- pcm_format = SNDRV_PCM_FORMAT_S32_LE;
- break;
- default:
- snd_printk(KERN_INFO "%d:%u:%d : unsupported sample bitwidth %d in %d bytes\n",
- chip->dev->devnum, fp->iface, fp->altsetting,
- sample_width, sample_bytes);
- break;
- }
- break;
- case UAC_FORMAT_TYPE_I_PCM8:
- pcm_format = SNDRV_PCM_FORMAT_U8;
-
- /* Dallas DS4201 workaround: it advertises U8 format, but really
- supports S8. */
- if (chip->usb_id == USB_ID(0x04fa, 0x4201))
- pcm_format = SNDRV_PCM_FORMAT_S8;
- break;
- case UAC_FORMAT_TYPE_I_IEEE_FLOAT:
- pcm_format = SNDRV_PCM_FORMAT_FLOAT_LE;
- break;
- case UAC_FORMAT_TYPE_I_ALAW:
- pcm_format = SNDRV_PCM_FORMAT_A_LAW;
- break;
- case UAC_FORMAT_TYPE_I_MULAW:
- pcm_format = SNDRV_PCM_FORMAT_MU_LAW;
- break;
- default:
- snd_printk(KERN_INFO "%d:%u:%d : unsupported format type %d\n",
- chip->dev->devnum, fp->iface, fp->altsetting, format);
- break;
- }
- return pcm_format;
-}
-
-
-/*
- * parse the format descriptor and stores the possible sample rates
- * on the audioformat table (audio class v1).
- *
- * @dev: usb device
- * @fp: audioformat record
- * @fmt: the format descriptor
- * @offset: the start offset of descriptor pointing the rate type
- * (7 for type I and II, 8 for type II)
- */
-static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audioformat *fp,
- unsigned char *fmt, int offset)
-{
- int nr_rates = fmt[offset];
-
- if (fmt[0] < offset + 1 + 3 * (nr_rates ? nr_rates : 2)) {
- snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n",
- chip->dev->devnum, fp->iface, fp->altsetting);
- return -1;
- }
-
- if (nr_rates) {
- /*
- * build the rate table and bitmap flags
- */
- int r, idx;
-
- fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
- if (fp->rate_table == NULL) {
- snd_printk(KERN_ERR "cannot malloc\n");
- return -1;
- }
-
- fp->nr_rates = 0;
- fp->rate_min = fp->rate_max = 0;
- for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) {
- unsigned int rate = combine_triple(&fmt[idx]);
- if (!rate)
- continue;
- /* C-Media CM6501 mislabels its 96 kHz altsetting */
- if (rate == 48000 && nr_rates == 1 &&
- (chip->usb_id == USB_ID(0x0d8c, 0x0201) ||
- chip->usb_id == USB_ID(0x0d8c, 0x0102)) &&
- fp->altsetting == 5 && fp->maxpacksize == 392)
- rate = 96000;
- /* Creative VF0470 Live Cam reports 16 kHz instead of 8kHz */
- if (rate == 16000 && chip->usb_id == USB_ID(0x041e, 0x4068))
- rate = 8000;
- fp->rate_table[fp->nr_rates] = rate;
- if (!fp->rate_min || rate < fp->rate_min)
- fp->rate_min = rate;
- if (!fp->rate_max || rate > fp->rate_max)
- fp->rate_max = rate;
- fp->rates |= snd_pcm_rate_to_rate_bit(rate);
- fp->nr_rates++;
- }
- if (!fp->nr_rates) {
- hwc_debug("All rates were zero. Skipping format!\n");
- return -1;
- }
- } else {
- /* continuous rates */
- fp->rates = SNDRV_PCM_RATE_CONTINUOUS;
- fp->rate_min = combine_triple(&fmt[offset + 1]);
- fp->rate_max = combine_triple(&fmt[offset + 4]);
- }
- return 0;
-}
-
-/*
- * parse the format descriptor and stores the possible sample rates
- * on the audioformat table (audio class v2).
- */
-static int parse_audio_format_rates_v2(struct snd_usb_audio *chip,
- struct audioformat *fp,
- struct usb_host_interface *iface)
-{
- struct usb_device *dev = chip->dev;
- unsigned char tmp[2], *data;
- int i, nr_rates, data_size, ret = 0;
-
- /* get the number of sample rates first by only fetching 2 bytes */
- ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- 0x0100, chip->clock_id << 8, tmp, sizeof(tmp), 1000);
-
- if (ret < 0) {
- snd_printk(KERN_ERR "unable to retrieve number of sample rates\n");
- goto err;
- }
-
- nr_rates = (tmp[1] << 8) | tmp[0];
- data_size = 2 + 12 * nr_rates;
- data = kzalloc(data_size, GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* now get the full information */
- ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- 0x0100, chip->clock_id << 8, data, data_size, 1000);
-
- if (ret < 0) {
- snd_printk(KERN_ERR "unable to retrieve sample rate range\n");
- ret = -EINVAL;
- goto err_free;
- }
-
- fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
- if (!fp->rate_table) {
- ret = -ENOMEM;
- goto err_free;
- }
-
- fp->nr_rates = 0;
- fp->rate_min = fp->rate_max = 0;
-
- for (i = 0; i < nr_rates; i++) {
- int rate = combine_quad(&data[2 + 12 * i]);
-
- fp->rate_table[fp->nr_rates] = rate;
- if (!fp->rate_min || rate < fp->rate_min)
- fp->rate_min = rate;
- if (!fp->rate_max || rate > fp->rate_max)
- fp->rate_max = rate;
- fp->rates |= snd_pcm_rate_to_rate_bit(rate);
- fp->nr_rates++;
- }
-
-err_free:
- kfree(data);
-err:
- return ret;
-}
-
-/*
- * parse the format type I and III descriptors
- */
-static int parse_audio_format_i(struct snd_usb_audio *chip,
- struct audioformat *fp,
- int format, void *_fmt,
- struct usb_host_interface *iface)
-{
- struct usb_interface_descriptor *altsd = get_iface_desc(iface);
- struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
- int protocol = altsd->bInterfaceProtocol;
- int pcm_format, ret;
-
- if (fmt->bFormatType == UAC_FORMAT_TYPE_III) {
- /* FIXME: the format type is really IECxxx
- * but we give normal PCM format to get the existing
- * apps working...
- */
- switch (chip->usb_id) {
-
- case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */
- if (device_setup[chip->index] == 0x00 &&
- fp->altsetting == 6)
- pcm_format = SNDRV_PCM_FORMAT_S16_BE;
- else
- pcm_format = SNDRV_PCM_FORMAT_S16_LE;
- break;
- default:
- pcm_format = SNDRV_PCM_FORMAT_S16_LE;
- }
- } else {
- pcm_format = parse_audio_format_i_type(chip, fp, format, fmt, protocol);
- if (pcm_format < 0)
- return -1;
- }
-
- fp->format = pcm_format;
-
- /* gather possible sample rates */
- /* audio class v1 reports possible sample rates as part of the
- * proprietary class specific descriptor.
- * audio class v2 uses class specific EP0 range requests for that.
- */
- switch (protocol) {
- case UAC_VERSION_1:
- fp->channels = fmt->bNrChannels;
- ret = parse_audio_format_rates_v1(chip, fp, _fmt, 7);
- break;
- case UAC_VERSION_2:
- /* fp->channels is already set in this case */
- ret = parse_audio_format_rates_v2(chip, fp, iface);
- break;
- }
-
- if (fp->channels < 1) {
- snd_printk(KERN_ERR "%d:%u:%d : invalid channels %d\n",
- chip->dev->devnum, fp->iface, fp->altsetting, fp->channels);
- return -1;
- }
-
- return ret;
-}
-
-/*
- * parse the format type II descriptor
- */
-static int parse_audio_format_ii(struct snd_usb_audio *chip,
- struct audioformat *fp,
- int format, void *_fmt,
- struct usb_host_interface *iface)
-{
- int brate, framesize, ret;
- struct usb_interface_descriptor *altsd = get_iface_desc(iface);
- int protocol = altsd->bInterfaceProtocol;
-
- switch (format) {
- case UAC_FORMAT_TYPE_II_AC3:
- /* FIXME: there is no AC3 format defined yet */
- // fp->format = SNDRV_PCM_FORMAT_AC3;
- fp->format = SNDRV_PCM_FORMAT_U8; /* temporarily hack to receive byte streams */
- break;
- case UAC_FORMAT_TYPE_II_MPEG:
- fp->format = SNDRV_PCM_FORMAT_MPEG;
- break;
- default:
- snd_printd(KERN_INFO "%d:%u:%d : unknown format tag %#x is detected. processed as MPEG.\n",
- chip->dev->devnum, fp->iface, fp->altsetting, format);
- fp->format = SNDRV_PCM_FORMAT_MPEG;
- break;
- }
-
- fp->channels = 1;
-
- switch (protocol) {
- case UAC_VERSION_1: {
- struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
- brate = le16_to_cpu(fmt->wMaxBitRate);
- framesize = le16_to_cpu(fmt->wSamplesPerFrame);
- snd_printd(KERN_INFO "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize);
- fp->frame_size = framesize;
- ret = parse_audio_format_rates_v1(chip, fp, _fmt, 8); /* fmt[8..] sample rates */
- break;
- }
- case UAC_VERSION_2: {
- struct uac_format_type_ii_ext_descriptor *fmt = _fmt;
- brate = le16_to_cpu(fmt->wMaxBitRate);
- framesize = le16_to_cpu(fmt->wSamplesPerFrame);
- snd_printd(KERN_INFO "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize);
- fp->frame_size = framesize;
- ret = parse_audio_format_rates_v2(chip, fp, iface);
- break;
- }
- }
-
- return ret;
-}
-
-static int parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp,
- int format, unsigned char *fmt, int stream,
- struct usb_host_interface *iface)
-{
- int err;
-
- switch (fmt[3]) {
- case UAC_FORMAT_TYPE_I:
- case UAC_FORMAT_TYPE_III:
- err = parse_audio_format_i(chip, fp, format, fmt, iface);
- break;
- case UAC_FORMAT_TYPE_II:
- err = parse_audio_format_ii(chip, fp, format, fmt, iface);
- break;
- default:
- snd_printd(KERN_INFO "%d:%u:%d : format type %d is not supported yet\n",
- chip->dev->devnum, fp->iface, fp->altsetting, fmt[3]);
- return -1;
- }
- fp->fmt_type = fmt[3];
- if (err < 0)
- return err;
-#if 1
- /* FIXME: temporary hack for extigy/audigy 2 nx/zs */
- /* extigy apparently supports sample rates other than 48k
- * but not in ordinary way. so we enable only 48k atm.
- */
- if (chip->usb_id == USB_ID(0x041e, 0x3000) ||
- chip->usb_id == USB_ID(0x041e, 0x3020) ||
- chip->usb_id == USB_ID(0x041e, 0x3061)) {
- if (fmt[3] == UAC_FORMAT_TYPE_I &&
- fp->rates != SNDRV_PCM_RATE_48000 &&
- fp->rates != SNDRV_PCM_RATE_96000)
- return -1;
- }
-#endif
- return 0;
-}
-
-static unsigned char parse_datainterval(struct snd_usb_audio *chip,
- struct usb_host_interface *alts)
-{
- if (snd_usb_get_speed(chip->dev) == USB_SPEED_HIGH &&
- get_endpoint(alts, 0)->bInterval >= 1 &&
- get_endpoint(alts, 0)->bInterval <= 4)
- return get_endpoint(alts, 0)->bInterval - 1;
- else
- return 0;
-}
-
-static int audiophile_skip_setting_quirk(struct snd_usb_audio *chip,
- int iface, int altno);
-static int parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
-{
- struct usb_device *dev;
- struct usb_interface *iface;
- struct usb_host_interface *alts;
- struct usb_interface_descriptor *altsd;
- int i, altno, err, stream;
- int format = 0, num_channels = 0;
- struct audioformat *fp = NULL;
- unsigned char *fmt, *csep;
- int num, protocol;
-
- dev = chip->dev;
-
- /* parse the interface's altsettings */
- iface = usb_ifnum_to_if(dev, iface_no);
-
- num = iface->num_altsetting;
-
- /*
- * Dallas DS4201 workaround: It presents 5 altsettings, but the last
- * one misses syncpipe, and does not produce any sound.
- */
- if (chip->usb_id == USB_ID(0x04fa, 0x4201))
- num = 4;
-
- for (i = 0; i < num; i++) {
- alts = &iface->altsetting[i];
- altsd = get_iface_desc(alts);
- protocol = altsd->bInterfaceProtocol;
- /* skip invalid one */
- if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
- altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
- (altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIOSTREAMING &&
- altsd->bInterfaceSubClass != USB_SUBCLASS_VENDOR_SPEC) ||
- altsd->bNumEndpoints < 1 ||
- le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) == 0)
- continue;
- /* must be isochronous */
- if ((get_endpoint(alts, 0)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
- USB_ENDPOINT_XFER_ISOC)
- continue;
- /* check direction */
- stream = (get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN) ?
- SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
- altno = altsd->bAlternateSetting;
-
- /* audiophile usb: skip altsets incompatible with device_setup
- */
- if (chip->usb_id == USB_ID(0x0763, 0x2003) &&
- audiophile_skip_setting_quirk(chip, iface_no, altno))
- continue;
-
- /* get audio formats */
- switch (protocol) {
- case UAC_VERSION_1: {
- struct uac_as_header_descriptor_v1 *as =
- snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
-
- if (!as) {
- snd_printk(KERN_ERR "%d:%u:%d : UAC_AS_GENERAL descriptor not found\n",
- dev->devnum, iface_no, altno);
- continue;
- }
-
- if (as->bLength < sizeof(*as)) {
- snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_AS_GENERAL desc\n",
- dev->devnum, iface_no, altno);
- continue;
- }
-
- format = le16_to_cpu(as->wFormatTag); /* remember the format value */
- break;
- }
-
- case UAC_VERSION_2: {
- struct uac_as_header_descriptor_v2 *as =
- snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
-
- if (!as) {
- snd_printk(KERN_ERR "%d:%u:%d : UAC_AS_GENERAL descriptor not found\n",
- dev->devnum, iface_no, altno);
- continue;
- }
-
- if (as->bLength < sizeof(*as)) {
- snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_AS_GENERAL desc\n",
- dev->devnum, iface_no, altno);
- continue;
- }
-
- num_channels = as->bNrChannels;
- format = le32_to_cpu(as->bmFormats);
-
- break;
- }
-
- default:
- snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
- dev->devnum, iface_no, altno, protocol);
- continue;
- }
-
- /* get format type */
- fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE);
- if (!fmt) {
- snd_printk(KERN_ERR "%d:%u:%d : no UAC_FORMAT_TYPE desc\n",
- dev->devnum, iface_no, altno);
- continue;
- }
- if (((protocol == UAC_VERSION_1) && (fmt[0] < 8)) ||
- ((protocol == UAC_VERSION_2) && (fmt[0] != 6))) {
- snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n",
- dev->devnum, iface_no, altno);
- continue;
- }
-
- /*
- * Blue Microphones workaround: The last altsetting is identical
- * with the previous one, except for a larger packet size, but
- * is actually a mislabeled two-channel setting; ignore it.
- */
- if (fmt[4] == 1 && fmt[5] == 2 && altno == 2 && num == 3 &&
- fp && fp->altsetting == 1 && fp->channels == 1 &&
- fp->format == SNDRV_PCM_FORMAT_S16_LE &&
- protocol == UAC_VERSION_1 &&
- le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) ==
- fp->maxpacksize * 2)
- continue;
-
- csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
- /* Creamware Noah has this descriptor after the 2nd endpoint */
- if (!csep && altsd->bNumEndpoints >= 2)
- csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT);
- if (!csep || csep[0] < 7 || csep[2] != UAC_EP_GENERAL) {
- snd_printk(KERN_WARNING "%d:%u:%d : no or invalid"
- " class specific endpoint descriptor\n",
- dev->devnum, iface_no, altno);
- csep = NULL;
- }
-
- fp = kzalloc(sizeof(*fp), GFP_KERNEL);
- if (! fp) {
- snd_printk(KERN_ERR "cannot malloc\n");
- return -ENOMEM;
- }
-
- fp->iface = iface_no;
- fp->altsetting = altno;
- fp->altset_idx = i;
- fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
- fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
- fp->datainterval = parse_datainterval(chip, alts);
- fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
- /* num_channels is only set for v2 interfaces */
- fp->channels = num_channels;
- if (snd_usb_get_speed(dev) == USB_SPEED_HIGH)
- fp->maxpacksize = (((fp->maxpacksize >> 11) & 3) + 1)
- * (fp->maxpacksize & 0x7ff);
- fp->attributes = csep ? csep[3] : 0;
-
- /* some quirks for attributes here */
-
- switch (chip->usb_id) {
- case USB_ID(0x0a92, 0x0053): /* AudioTrak Optoplay */
- /* Optoplay sets the sample rate attribute although
- * it seems not supporting it in fact.
- */
- fp->attributes &= ~UAC_EP_CS_ATTR_SAMPLE_RATE;
- break;
- case USB_ID(0x041e, 0x3020): /* Creative SB Audigy 2 NX */
- case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */
- /* doesn't set the sample rate attribute, but supports it */
- fp->attributes |= UAC_EP_CS_ATTR_SAMPLE_RATE;
- break;
- case USB_ID(0x047f, 0x0ca1): /* plantronics headset */
- case USB_ID(0x077d, 0x07af): /* Griffin iMic (note that there is
- an older model 77d:223) */
- /*
- * plantronics headset and Griffin iMic have set adaptive-in
- * although it's really not...
- */
- fp->ep_attr &= ~USB_ENDPOINT_SYNCTYPE;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- fp->ep_attr |= USB_ENDPOINT_SYNC_ADAPTIVE;
- else
- fp->ep_attr |= USB_ENDPOINT_SYNC_SYNC;
- break;
- }
-
- /* ok, let's parse further... */
- if (parse_audio_format(chip, fp, format, fmt, stream, alts) < 0) {
- kfree(fp->rate_table);
- kfree(fp);
- continue;
- }
-
- snd_printdd(KERN_INFO "%d:%u:%d: add audio endpoint %#x\n", dev->devnum, iface_no, altno, fp->endpoint);
- err = add_audio_endpoint(chip, stream, fp);
- if (err < 0) {
- kfree(fp->rate_table);
- kfree(fp);
- return err;
- }
- /* try to set the interface... */
- usb_set_interface(chip->dev, iface_no, altno);
- init_usb_pitch(chip->dev, iface_no, alts, fp);
- init_usb_sample_rate(chip->dev, iface_no, alts, fp, fp->rate_max);
- }
- return 0;
-}
-
-
-/*
- * disconnect streams
- * called from snd_usb_audio_disconnect()
- */
-static void snd_usb_stream_disconnect(struct list_head *head)
-{
- int idx;
- struct snd_usb_stream *as;
- struct snd_usb_substream *subs;
-
- as = list_entry(head, struct snd_usb_stream, list);
- for (idx = 0; idx < 2; idx++) {
- subs = &as->substream[idx];
- if (!subs->num_formats)
- return;
- release_substream_urbs(subs, 1);
- subs->interface = -1;
- }
-}
-
-static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int interface)
-{
- struct usb_device *dev = chip->dev;
- struct usb_host_interface *alts;
- struct usb_interface_descriptor *altsd;
- struct usb_interface *iface = usb_ifnum_to_if(dev, interface);
-
- if (!iface) {
- snd_printk(KERN_ERR "%d:%u:%d : does not exist\n",
- dev->devnum, ctrlif, interface);
- return -EINVAL;
- }
-
- if (usb_interface_claimed(iface)) {
- snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
- dev->devnum, ctrlif, interface);
- return -EINVAL;
- }
-
- alts = &iface->altsetting[0];
- altsd = get_iface_desc(alts);
- if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
- altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
- altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
- int err = snd_usbmidi_create(chip->card, iface,
- &chip->midi_list, NULL);
- if (err < 0) {
- snd_printk(KERN_ERR "%d:%u:%d: cannot create sequencer device\n",
- dev->devnum, ctrlif, interface);
- return -EINVAL;
- }
- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
-
- return 0;
- }
-
- if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
- altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
- altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIOSTREAMING) {
- snd_printdd(KERN_ERR "%d:%u:%d: skipping non-supported interface %d\n",
- dev->devnum, ctrlif, interface, altsd->bInterfaceClass);
- /* skip non-supported classes */
- return -EINVAL;
- }
-
- if (snd_usb_get_speed(dev) == USB_SPEED_LOW) {
- snd_printk(KERN_ERR "low speed audio streaming not supported\n");
- return -EINVAL;
- }
-
- if (! parse_audio_endpoints(chip, interface)) {
- usb_set_interface(dev, interface, 0); /* reset the current interface */
- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * parse audio control descriptor and create pcm/midi streams
- */
-static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
-{
- struct usb_device *dev = chip->dev;
- struct usb_host_interface *host_iface;
- struct usb_interface_descriptor *altsd;
- void *control_header;
- int i, protocol;
-
- /* find audiocontrol interface */
- host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
- control_header = snd_usb_find_csint_desc(host_iface->extra,
- host_iface->extralen,
- NULL, UAC_HEADER);
- altsd = get_iface_desc(host_iface);
- protocol = altsd->bInterfaceProtocol;
-
- if (!control_header) {
- snd_printk(KERN_ERR "cannot find UAC_HEADER\n");
- return -EINVAL;
- }
-
- switch (protocol) {
- case UAC_VERSION_1: {
- struct uac_ac_header_descriptor_v1 *h1 = control_header;
-
- if (!h1->bInCollection) {
- snd_printk(KERN_INFO "skipping empty audio interface (v1)\n");
- return -EINVAL;
- }
-
- if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
- snd_printk(KERN_ERR "invalid UAC_HEADER (v1)\n");
- return -EINVAL;
- }
-
- for (i = 0; i < h1->bInCollection; i++)
- snd_usb_create_stream(chip, ctrlif, h1->baInterfaceNr[i]);
-
- break;
- }
-
- case UAC_VERSION_2: {
- struct uac_clock_source_descriptor *cs;
- struct usb_interface_assoc_descriptor *assoc =
- usb_ifnum_to_if(dev, ctrlif)->intf_assoc;
-
- if (!assoc) {
- snd_printk(KERN_ERR "Audio class v2 interfaces need an interface association\n");
- return -EINVAL;
- }
-
- /* FIXME: for now, we expect there is at least one clock source
- * descriptor and we always take the first one.
- * We should properly support devices with multiple clock sources,
- * clock selectors and sample rate conversion units. */
-
- cs = snd_usb_find_csint_desc(host_iface->extra, host_iface->extralen,
- NULL, UAC_CLOCK_SOURCE);
-
- if (!cs) {
- snd_printk(KERN_ERR "CLOCK_SOURCE descriptor not found\n");
- return -EINVAL;
- }
-
- chip->clock_id = cs->bClockID;
-
- for (i = 0; i < assoc->bInterfaceCount; i++) {
- int intf = assoc->bFirstInterface + i;
-
- if (intf != ctrlif)
- snd_usb_create_stream(chip, ctrlif, intf);
- }
-
- break;
- }
-
- default:
- snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * create a stream for an endpoint/altsetting without proper descriptors
- */
-static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk)
-{
- struct audioformat *fp;
- struct usb_host_interface *alts;
- int stream, err;
- unsigned *rate_table = NULL;
-
- fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL);
- if (! fp) {
- snd_printk(KERN_ERR "cannot memdup\n");
- return -ENOMEM;
- }
- if (fp->nr_rates > 0) {
- rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL);
- if (!rate_table) {
- kfree(fp);
- return -ENOMEM;
- }
- memcpy(rate_table, fp->rate_table, sizeof(int) * fp->nr_rates);
- fp->rate_table = rate_table;
- }
-
- stream = (fp->endpoint & USB_DIR_IN)
- ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
- err = add_audio_endpoint(chip, stream, fp);
- if (err < 0) {
- kfree(fp);
- kfree(rate_table);
- return err;
- }
- if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
- fp->altset_idx >= iface->num_altsetting) {
- kfree(fp);
- kfree(rate_table);
- return -EINVAL;
- }
- alts = &iface->altsetting[fp->altset_idx];
- fp->datainterval = parse_datainterval(chip, alts);
- fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
- usb_set_interface(chip->dev, fp->iface, 0);
- init_usb_pitch(chip->dev, fp->iface, alts, fp);
- init_usb_sample_rate(chip->dev, fp->iface, alts, fp, fp->rate_max);
- return 0;
-}
-
-/*
- * create a stream for an interface with proper descriptors
- */
-static int create_standard_audio_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk)
-{
- struct usb_host_interface *alts;
- struct usb_interface_descriptor *altsd;
- int err;
-
- alts = &iface->altsetting[0];
- altsd = get_iface_desc(alts);
- err = parse_audio_endpoints(chip, altsd->bInterfaceNumber);
- if (err < 0) {
- snd_printk(KERN_ERR "cannot setup if %d: error %d\n",
- altsd->bInterfaceNumber, err);
- return err;
- }
- /* reset the current interface */
- usb_set_interface(chip->dev, altsd->bInterfaceNumber, 0);
- return 0;
-}
-
-/*
- * Create a stream for an Edirol UA-700/UA-25/UA-4FX interface.
- * The only way to detect the sample rate is by looking at wMaxPacketSize.
- */
-static int create_uaxx_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk)
-{
- static const struct audioformat ua_format = {
- .format = SNDRV_PCM_FORMAT_S24_3LE,
- .channels = 2,
- .fmt_type = UAC_FORMAT_TYPE_I,
- .altsetting = 1,
- .altset_idx = 1,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- };
- struct usb_host_interface *alts;
- struct usb_interface_descriptor *altsd;
- struct audioformat *fp;
- int stream, err;
-
- /* both PCM and MIDI interfaces have 2 or more altsettings */
- if (iface->num_altsetting < 2)
- return -ENXIO;
- alts = &iface->altsetting[1];
- altsd = get_iface_desc(alts);
-
- if (altsd->bNumEndpoints == 2) {
- static const struct snd_usb_midi_endpoint_info ua700_ep = {
- .out_cables = 0x0003,
- .in_cables = 0x0003
- };
- static const struct snd_usb_audio_quirk ua700_quirk = {
- .type = QUIRK_MIDI_FIXED_ENDPOINT,
- .data = &ua700_ep
- };
- static const struct snd_usb_midi_endpoint_info uaxx_ep = {
- .out_cables = 0x0001,
- .in_cables = 0x0001
- };
- static const struct snd_usb_audio_quirk uaxx_quirk = {
- .type = QUIRK_MIDI_FIXED_ENDPOINT,
- .data = &uaxx_ep
- };
- const struct snd_usb_audio_quirk *quirk =
- chip->usb_id == USB_ID(0x0582, 0x002b)
- ? &ua700_quirk : &uaxx_quirk;
- return snd_usbmidi_create(chip->card, iface,
- &chip->midi_list, quirk);
- }
-
- if (altsd->bNumEndpoints != 1)
- return -ENXIO;
-
- fp = kmalloc(sizeof(*fp), GFP_KERNEL);
- if (!fp)
- return -ENOMEM;
- memcpy(fp, &ua_format, sizeof(*fp));
-
- fp->iface = altsd->bInterfaceNumber;
- fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
- fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
- fp->datainterval = 0;
- fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
-
- switch (fp->maxpacksize) {
- case 0x120:
- fp->rate_max = fp->rate_min = 44100;
- break;
- case 0x138:
- case 0x140:
- fp->rate_max = fp->rate_min = 48000;
- break;
- case 0x258:
- case 0x260:
- fp->rate_max = fp->rate_min = 96000;
- break;
- default:
- snd_printk(KERN_ERR "unknown sample rate\n");
- kfree(fp);
- return -ENXIO;
- }
-
- stream = (fp->endpoint & USB_DIR_IN)
- ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
- err = add_audio_endpoint(chip, stream, fp);
- if (err < 0) {
- kfree(fp);
- return err;
- }
- usb_set_interface(chip->dev, fp->iface, 0);
- return 0;
-}
-
-static int snd_usb_create_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk);
-
-/*
- * handle the quirks for the contained interfaces
- */
-static int create_composite_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk)
-{
- int probed_ifnum = get_iface_desc(iface->altsetting)->bInterfaceNumber;
- int err;
-
- for (quirk = quirk->data; quirk->ifnum >= 0; ++quirk) {
- iface = usb_ifnum_to_if(chip->dev, quirk->ifnum);
- if (!iface)
- continue;
- if (quirk->ifnum != probed_ifnum &&
- usb_interface_claimed(iface))
- continue;
- err = snd_usb_create_quirk(chip, iface, quirk);
- if (err < 0)
- return err;
- if (quirk->ifnum != probed_ifnum)
- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
- }
- return 0;
-}
-
-static int ignore_interface_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk)
-{
- return 0;
-}
-
-/*
- * Allow alignment on audio sub-slot (channel samples) rather than
- * on audio slots (audio frames)
- */
-static int create_align_transfer_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk)
-{
- chip->txfr_quirk = 1;
- return 1; /* Continue with creating streams and mixer */
-}
-
-
-/*
- * boot quirks
- */
-
-#define EXTIGY_FIRMWARE_SIZE_OLD 794
-#define EXTIGY_FIRMWARE_SIZE_NEW 483
-
-static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf)
-{
- struct usb_host_config *config = dev->actconfig;
- int err;
-
- if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD ||
- le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_NEW) {
- snd_printdd("sending Extigy boot sequence...\n");
- /* Send message to force it to reconnect with full interface. */
- err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev,0),
- 0x10, 0x43, 0x0001, 0x000a, NULL, 0, 1000);
- if (err < 0) snd_printdd("error sending boot message: %d\n", err);
- err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
- &dev->descriptor, sizeof(dev->descriptor));
- config = dev->actconfig;
- if (err < 0) snd_printdd("error usb_get_descriptor: %d\n", err);
- err = usb_reset_configuration(dev);
- if (err < 0) snd_printdd("error usb_reset_configuration: %d\n", err);
- snd_printdd("extigy_boot: new boot length = %d\n",
- le16_to_cpu(get_cfg_desc(config)->wTotalLength));
- return -ENODEV; /* quit this anyway */
- }
- return 0;
-}
-
-static int snd_usb_audigy2nx_boot_quirk(struct usb_device *dev)
-{
- u8 buf = 1;
-
- snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), 0x2a,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
- 0, 0, &buf, 1, 1000);
- if (buf == 0) {
- snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0x29,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
- 1, 2000, NULL, 0, 1000);
- return -ENODEV;
- }
- return 0;
-}
-
-/*
- * C-Media CM106/CM106+ have four 16-bit internal registers that are nicely
- * documented in the device's data sheet.
- */
-static int snd_usb_cm106_write_int_reg(struct usb_device *dev, int reg, u16 value)
-{
- u8 buf[4];
- buf[0] = 0x20;
- buf[1] = value & 0xff;
- buf[2] = (value >> 8) & 0xff;
- buf[3] = reg;
- return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_CONFIGURATION,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
- 0, 0, &buf, 4, 1000);
-}
-
-static int snd_usb_cm106_boot_quirk(struct usb_device *dev)
-{
- /*
- * Enable line-out driver mode, set headphone source to front
- * channels, enable stereo mic.
- */
- return snd_usb_cm106_write_int_reg(dev, 2, 0x8004);
-}
-
-/*
- * C-Media CM6206 is based on CM106 with two additional
- * registers that are not documented in the data sheet.
- * Values here are chosen based on sniffing USB traffic
- * under Windows.
- */
-static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
-{
- int err, reg;
- int val[] = {0x200c, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
-
- for (reg = 0; reg < ARRAY_SIZE(val); reg++) {
- err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]);
- if (err < 0)
- return err;
- }
-
- return err;
-}
-
-/*
- * This call will put the synth in "USB send" mode, i.e it will send MIDI
- * messages through USB (this is disabled at startup). The synth will
- * acknowledge by sending a sysex on endpoint 0x85 and by displaying a USB
- * sign on its LCD. Values here are chosen based on sniffing USB traffic
- * under Windows.
- */
-static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
-{
- int err, actual_length;
-
- /* "midi send" enable */
- static const u8 seq[] = { 0x4e, 0x73, 0x52, 0x01 };
-
- void *buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x05), buf,
- ARRAY_SIZE(seq), &actual_length, 1000);
- kfree(buf);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/*
- * Setup quirks
- */
-#define AUDIOPHILE_SET 0x01 /* if set, parse device_setup */
-#define AUDIOPHILE_SET_DTS 0x02 /* if set, enable DTS Digital Output */
-#define AUDIOPHILE_SET_96K 0x04 /* 48-96KHz rate if set, 8-48KHz otherwise */
-#define AUDIOPHILE_SET_24B 0x08 /* 24bits sample if set, 16bits otherwise */
-#define AUDIOPHILE_SET_DI 0x10 /* if set, enable Digital Input */
-#define AUDIOPHILE_SET_MASK 0x1F /* bit mask for setup value */
-#define AUDIOPHILE_SET_24B_48K_DI 0x19 /* value for 24bits+48KHz+Digital Input */
-#define AUDIOPHILE_SET_24B_48K_NOTDI 0x09 /* value for 24bits+48KHz+No Digital Input */
-#define AUDIOPHILE_SET_16B_48K_DI 0x11 /* value for 16bits+48KHz+Digital Input */
-#define AUDIOPHILE_SET_16B_48K_NOTDI 0x01 /* value for 16bits+48KHz+No Digital Input */
-
-static int audiophile_skip_setting_quirk(struct snd_usb_audio *chip,
- int iface, int altno)
-{
- /* Reset ALL ifaces to 0 altsetting.
- * Call it for every possible altsetting of every interface.
- */
- usb_set_interface(chip->dev, iface, 0);
-
- if (device_setup[chip->index] & AUDIOPHILE_SET) {
- if ((device_setup[chip->index] & AUDIOPHILE_SET_DTS)
- && altno != 6)
- return 1; /* skip this altsetting */
- if ((device_setup[chip->index] & AUDIOPHILE_SET_96K)
- && altno != 1)
- return 1; /* skip this altsetting */
- if ((device_setup[chip->index] & AUDIOPHILE_SET_MASK) ==
- AUDIOPHILE_SET_24B_48K_DI && altno != 2)
- return 1; /* skip this altsetting */
- if ((device_setup[chip->index] & AUDIOPHILE_SET_MASK) ==
- AUDIOPHILE_SET_24B_48K_NOTDI && altno != 3)
- return 1; /* skip this altsetting */
- if ((device_setup[chip->index] & AUDIOPHILE_SET_MASK) ==
- AUDIOPHILE_SET_16B_48K_DI && altno != 4)
- return 1; /* skip this altsetting */
- if ((device_setup[chip->index] & AUDIOPHILE_SET_MASK) ==
- AUDIOPHILE_SET_16B_48K_NOTDI && altno != 5)
- return 1; /* skip this altsetting */
- }
- return 0; /* keep this altsetting */
-}
-
-static int create_any_midi_quirk(struct snd_usb_audio *chip,
- struct usb_interface *intf,
- const struct snd_usb_audio_quirk *quirk)
-{
- return snd_usbmidi_create(chip->card, intf, &chip->midi_list, quirk);
-}
-
-/*
- * audio-interface quirks
- *
- * returns zero if no standard audio/MIDI parsing is needed.
- * returns a postive value if standard audio/midi interfaces are parsed
- * after this.
- * returns a negative value at error.
- */
-static int snd_usb_create_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk)
-{
- typedef int (*quirk_func_t)(struct snd_usb_audio *, struct usb_interface *,
- const struct snd_usb_audio_quirk *);
- static const quirk_func_t quirk_funcs[] = {
- [QUIRK_IGNORE_INTERFACE] = ignore_interface_quirk,
- [QUIRK_COMPOSITE] = create_composite_quirk,
- [QUIRK_MIDI_STANDARD_INTERFACE] = create_any_midi_quirk,
- [QUIRK_MIDI_FIXED_ENDPOINT] = create_any_midi_quirk,
- [QUIRK_MIDI_YAMAHA] = create_any_midi_quirk,
- [QUIRK_MIDI_MIDIMAN] = create_any_midi_quirk,
- [QUIRK_MIDI_NOVATION] = create_any_midi_quirk,
- [QUIRK_MIDI_FASTLANE] = create_any_midi_quirk,
- [QUIRK_MIDI_EMAGIC] = create_any_midi_quirk,
- [QUIRK_MIDI_CME] = create_any_midi_quirk,
- [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
- [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
- [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
- [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk
- };
-
- if (quirk->type < QUIRK_TYPE_COUNT) {
- return quirk_funcs[quirk->type](chip, iface, quirk);
- } else {
- snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type);
- return -ENXIO;
- }
-}
-
-
-/*
- * common proc files to show the usb device info
- */
-static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
-{
- struct snd_usb_audio *chip = entry->private_data;
- if (!chip->shutdown)
- snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum);
-}
-
-static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
-{
- struct snd_usb_audio *chip = entry->private_data;
- if (!chip->shutdown)
- snd_iprintf(buffer, "%04x:%04x\n",
- USB_ID_VENDOR(chip->usb_id),
- USB_ID_PRODUCT(chip->usb_id));
-}
-
-static void snd_usb_audio_create_proc(struct snd_usb_audio *chip)
-{
- struct snd_info_entry *entry;
- if (!snd_card_proc_new(chip->card, "usbbus", &entry))
- snd_info_set_text_ops(entry, chip, proc_audio_usbbus_read);
- if (!snd_card_proc_new(chip->card, "usbid", &entry))
- snd_info_set_text_ops(entry, chip, proc_audio_usbid_read);
-}
-
-/*
- * free the chip instance
- *
- * here we have to do not much, since pcm and controls are already freed
- *
- */
-
-static int snd_usb_audio_free(struct snd_usb_audio *chip)
-{
- kfree(chip);
- return 0;
-}
-
-static int snd_usb_audio_dev_free(struct snd_device *device)
-{
- struct snd_usb_audio *chip = device->device_data;
- return snd_usb_audio_free(chip);
-}
-
-
-/*
- * create a chip instance and set its names.
- */
-static int snd_usb_audio_create(struct usb_device *dev, int idx,
- const struct snd_usb_audio_quirk *quirk,
- struct snd_usb_audio **rchip)
-{
- struct snd_card *card;
- struct snd_usb_audio *chip;
- int err, len;
- char component[14];
- static struct snd_device_ops ops = {
- .dev_free = snd_usb_audio_dev_free,
- };
-
- *rchip = NULL;
-
- if (snd_usb_get_speed(dev) != USB_SPEED_LOW &&
- snd_usb_get_speed(dev) != USB_SPEED_FULL &&
- snd_usb_get_speed(dev) != USB_SPEED_HIGH) {
- snd_printk(KERN_ERR "unknown device speed %d\n", snd_usb_get_speed(dev));
- return -ENXIO;
- }
-
- err = snd_card_create(index[idx], id[idx], THIS_MODULE, 0, &card);
- if (err < 0) {
- snd_printk(KERN_ERR "cannot create card instance %d\n", idx);
- return err;
- }
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (! chip) {
- snd_card_free(card);
- return -ENOMEM;
- }
-
- chip->index = idx;
- chip->dev = dev;
- chip->card = card;
- chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
- INIT_LIST_HEAD(&chip->pcm_list);
- INIT_LIST_HEAD(&chip->midi_list);
- INIT_LIST_HEAD(&chip->mixer_list);
-
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
- snd_usb_audio_free(chip);
- snd_card_free(card);
- return err;
- }
-
- strcpy(card->driver, "USB-Audio");
- sprintf(component, "USB%04x:%04x",
- USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id));
- snd_component_add(card, component);
-
- /* retrieve the device string as shortname */
- if (quirk && quirk->product_name) {
- strlcpy(card->shortname, quirk->product_name, sizeof(card->shortname));
- } else {
- if (!dev->descriptor.iProduct ||
- usb_string(dev, dev->descriptor.iProduct,
- card->shortname, sizeof(card->shortname)) <= 0) {
- /* no name available from anywhere, so use ID */
- sprintf(card->shortname, "USB Device %#04x:%#04x",
- USB_ID_VENDOR(chip->usb_id),
- USB_ID_PRODUCT(chip->usb_id));
- }
- }
-
- /* retrieve the vendor and device strings as longname */
- if (quirk && quirk->vendor_name) {
- len = strlcpy(card->longname, quirk->vendor_name, sizeof(card->longname));
- } else {
- if (dev->descriptor.iManufacturer)
- len = usb_string(dev, dev->descriptor.iManufacturer,
- card->longname, sizeof(card->longname));
- else
- len = 0;
- /* we don't really care if there isn't any vendor string */
- }
- if (len > 0)
- strlcat(card->longname, " ", sizeof(card->longname));
-
- strlcat(card->longname, card->shortname, sizeof(card->longname));
-
- len = strlcat(card->longname, " at ", sizeof(card->longname));
-
- if (len < sizeof(card->longname))
- usb_make_path(dev, card->longname + len, sizeof(card->longname) - len);
-
- strlcat(card->longname,
- snd_usb_get_speed(dev) == USB_SPEED_LOW ? ", low speed" :
- snd_usb_get_speed(dev) == USB_SPEED_FULL ? ", full speed" :
- ", high speed",
- sizeof(card->longname));
-
- snd_usb_audio_create_proc(chip);
-
- *rchip = chip;
- return 0;
-}
-
-
-/*
- * probe the active usb device
- *
- * note that this can be called multiple times per a device, when it
- * includes multiple audio control interfaces.
- *
- * thus we check the usb device pointer and creates the card instance
- * only at the first time. the successive calls of this function will
- * append the pcm interface to the corresponding card.
- */
-static void *snd_usb_audio_probe(struct usb_device *dev,
- struct usb_interface *intf,
- const struct usb_device_id *usb_id)
-{
- const struct snd_usb_audio_quirk *quirk = (const struct snd_usb_audio_quirk *)usb_id->driver_info;
- int i, err;
- struct snd_usb_audio *chip;
- struct usb_host_interface *alts;
- int ifnum;
- u32 id;
-
- alts = &intf->altsetting[0];
- ifnum = get_iface_desc(alts)->bInterfaceNumber;
- id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
- if (quirk && quirk->ifnum >= 0 && ifnum != quirk->ifnum)
- goto __err_val;
-
- /* SB Extigy needs special boot-up sequence */
- /* if more models come, this will go to the quirk list. */
- if (id == USB_ID(0x041e, 0x3000)) {
- if (snd_usb_extigy_boot_quirk(dev, intf) < 0)
- goto __err_val;
- }
- /* SB Audigy 2 NX needs its own boot-up magic, too */
- if (id == USB_ID(0x041e, 0x3020)) {
- if (snd_usb_audigy2nx_boot_quirk(dev) < 0)
- goto __err_val;
- }
-
- /* C-Media CM106 / Turtle Beach Audio Advantage Roadie */
- if (id == USB_ID(0x10f5, 0x0200)) {
- if (snd_usb_cm106_boot_quirk(dev) < 0)
- goto __err_val;
- }
-
- /* C-Media CM6206 / CM106-Like Sound Device */
- if (id == USB_ID(0x0d8c, 0x0102)) {
- if (snd_usb_cm6206_boot_quirk(dev) < 0)
- goto __err_val;
- }
-
- /* Access Music VirusTI Desktop */
- if (id == USB_ID(0x133e, 0x0815)) {
- if (snd_usb_accessmusic_boot_quirk(dev) < 0)
- goto __err_val;
- }
-
- /*
- * found a config. now register to ALSA
- */
-
- /* check whether it's already registered */
- chip = NULL;
- mutex_lock(&register_mutex);
- for (i = 0; i < SNDRV_CARDS; i++) {
- if (usb_chip[i] && usb_chip[i]->dev == dev) {
- if (usb_chip[i]->shutdown) {
- snd_printk(KERN_ERR "USB device is in the shutdown state, cannot create a card instance\n");
- goto __error;
- }
- chip = usb_chip[i];
- break;
- }
- }
- if (! chip) {
- /* it's a fresh one.
- * now look for an empty slot and create a new card instance
- */
- for (i = 0; i < SNDRV_CARDS; i++)
- if (enable[i] && ! usb_chip[i] &&
- (vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) &&
- (pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) {
- if (snd_usb_audio_create(dev, i, quirk, &chip) < 0) {
- goto __error;
- }
- snd_card_set_dev(chip->card, &intf->dev);
- break;
- }
- if (!chip) {
- printk(KERN_ERR "no available usb audio device\n");
- goto __error;
- }
- }
-
- chip->txfr_quirk = 0;
- err = 1; /* continue */
- if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
- /* need some special handlings */
- if ((err = snd_usb_create_quirk(chip, intf, quirk)) < 0)
- goto __error;
- }
-
- if (err > 0) {
- /* create normal USB audio interfaces */
- if (snd_usb_create_streams(chip, ifnum) < 0 ||
- snd_usb_create_mixer(chip, ifnum, ignore_ctl_error) < 0) {
- goto __error;
- }
- }
-
- /* we are allowed to call snd_card_register() many times */
- if (snd_card_register(chip->card) < 0) {
- goto __error;
- }
-
- usb_chip[chip->index] = chip;
- chip->num_interfaces++;
- mutex_unlock(&register_mutex);
- return chip;
-
- __error:
- if (chip && !chip->num_interfaces)
- snd_card_free(chip->card);
- mutex_unlock(&register_mutex);
- __err_val:
- return NULL;
-}
-
-/*
- * we need to take care of counter, since disconnection can be called also
- * many times as well as usb_audio_probe().
- */
-static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
-{
- struct snd_usb_audio *chip;
- struct snd_card *card;
- struct list_head *p;
-
- if (ptr == (void *)-1L)
- return;
-
- chip = ptr;
- card = chip->card;
- mutex_lock(&register_mutex);
- chip->shutdown = 1;
- chip->num_interfaces--;
- if (chip->num_interfaces <= 0) {
- snd_card_disconnect(card);
- /* release the pcm resources */
- list_for_each(p, &chip->pcm_list) {
- snd_usb_stream_disconnect(p);
- }
- /* release the midi resources */
- list_for_each(p, &chip->midi_list) {
- snd_usbmidi_disconnect(p);
- }
- /* release mixer resources */
- list_for_each(p, &chip->mixer_list) {
- snd_usb_mixer_disconnect(p);
- }
- usb_chip[chip->index] = NULL;
- mutex_unlock(&register_mutex);
- snd_card_free_when_closed(card);
- } else {
- mutex_unlock(&register_mutex);
- }
-}
-
-/*
- * new 2.5 USB kernel API
- */
-static int usb_audio_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- void *chip;
- chip = snd_usb_audio_probe(interface_to_usbdev(intf), intf, id);
- if (chip) {
- usb_set_intfdata(intf, chip);
- return 0;
- } else
- return -EIO;
-}
-
-static void usb_audio_disconnect(struct usb_interface *intf)
-{
- snd_usb_audio_disconnect(interface_to_usbdev(intf),
- usb_get_intfdata(intf));
-}
-
-#ifdef CONFIG_PM
-static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct snd_usb_audio *chip = usb_get_intfdata(intf);
- struct list_head *p;
- struct snd_usb_stream *as;
-
- if (chip == (void *)-1L)
- return 0;
-
- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
- if (!chip->num_suspended_intf++) {
- list_for_each(p, &chip->pcm_list) {
- as = list_entry(p, struct snd_usb_stream, list);
- snd_pcm_suspend_all(as->pcm);
- }
- }
-
- return 0;
-}
-
-static int usb_audio_resume(struct usb_interface *intf)
-{
- struct snd_usb_audio *chip = usb_get_intfdata(intf);
-
- if (chip == (void *)-1L)
- return 0;
- if (--chip->num_suspended_intf)
- return 0;
- /*
- * ALSA leaves material resumption to user space
- * we just notify
- */
-
- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
-static int __init snd_usb_audio_init(void)
-{
- if (nrpacks < 1 || nrpacks > MAX_PACKS) {
- printk(KERN_WARNING "invalid nrpacks value.\n");
- return -EINVAL;
- }
- return usb_register(&usb_audio_driver);
-}
-
-
-static void __exit snd_usb_audio_cleanup(void)
-{
- usb_deregister(&usb_audio_driver);
-}
-
-module_init(snd_usb_audio_init);
-module_exit(snd_usb_audio_cleanup);
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 42c299cbf63a..d679e72a3e5c 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -21,15 +21,13 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/* maximum number of endpoints per interface */
-#define MIDI_MAX_ENDPOINTS 2
-
/* handling of USB vendor/product ID pairs as 32-bit numbers */
#define USB_ID(vendor, product) (((vendor) << 16) | (product))
#define USB_ID_VENDOR(id) ((id) >> 16)
#define USB_ID_PRODUCT(id) ((u16)(id))
/*
+ *
*/
struct snd_usb_audio {
@@ -51,6 +49,10 @@ struct snd_usb_audio {
struct list_head midi_list; /* list of midi interfaces */
struct list_head mixer_list; /* list of mixer interfaces */
+
+ int setup; /* from the 'device_setup' module param */
+ int nrpacks; /* from the 'nrpacks' module param */
+ int async_unlink; /* from the 'async_unlink' module param */
};
/*
@@ -89,93 +91,8 @@ struct snd_usb_audio_quirk {
const void *data;
};
-/* data for QUIRK_MIDI_FIXED_ENDPOINT */
-struct snd_usb_midi_endpoint_info {
- int8_t out_ep; /* ep number, 0 autodetect */
- uint8_t out_interval; /* interval for interrupt endpoints */
- int8_t in_ep;
- uint8_t in_interval;
- uint16_t out_cables; /* bitmask */
- uint16_t in_cables; /* bitmask */
-};
-
-/* for QUIRK_MIDI_YAMAHA, data is NULL */
-
-/* for QUIRK_MIDI_MIDIMAN, data points to a snd_usb_midi_endpoint_info
- * structure (out_cables and in_cables only) */
-
-/* for QUIRK_COMPOSITE, data points to an array of snd_usb_audio_quirk
- * structures, terminated with .ifnum = -1 */
-
-/* for QUIRK_AUDIO_FIXED_ENDPOINT, data points to an audioformat structure */
-
-/* for QUIRK_AUDIO/MIDI_STANDARD_INTERFACE, data is NULL */
-
-/* for QUIRK_AUDIO_EDIROL_UAXX, data is NULL */
-
-/* for QUIRK_IGNORE_INTERFACE, data is NULL */
-
-/* for QUIRK_MIDI_NOVATION and _RAW, data is NULL */
-
-/* for QUIRK_MIDI_EMAGIC, data points to a snd_usb_midi_endpoint_info
- * structure (out_cables and in_cables only) */
-
-/* for QUIRK_MIDI_CME, data is NULL */
-
-/*
- */
-
-/*E-mu USB samplerate control quirk*/
-enum {
- EMU_QUIRK_SR_44100HZ = 0,
- EMU_QUIRK_SR_48000HZ,
- EMU_QUIRK_SR_88200HZ,
- EMU_QUIRK_SR_96000HZ,
- EMU_QUIRK_SR_176400HZ,
- EMU_QUIRK_SR_192000HZ
-};
-
#define combine_word(s) ((*(s)) | ((unsigned int)(s)[1] << 8))
#define combine_triple(s) (combine_word(s) | ((unsigned int)(s)[2] << 16))
#define combine_quad(s) (combine_triple(s) | ((unsigned int)(s)[3] << 24))
-unsigned int snd_usb_combine_bytes(unsigned char *bytes, int size);
-
-void *snd_usb_find_desc(void *descstart, int desclen, void *after, u8 dtype);
-void *snd_usb_find_csint_desc(void *descstart, int desclen, void *after, u8 dsubtype);
-
-int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe,
- __u8 request, __u8 requesttype, __u16 value, __u16 index,
- void *data, __u16 size, int timeout);
-
-int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
- int ignore_error);
-void snd_usb_mixer_disconnect(struct list_head *p);
-
-int snd_usbmidi_create(struct snd_card *card,
- struct usb_interface *iface,
- struct list_head *midi_list,
- const struct snd_usb_audio_quirk *quirk);
-void snd_usbmidi_input_stop(struct list_head* p);
-void snd_usbmidi_input_start(struct list_head* p);
-void snd_usbmidi_disconnect(struct list_head *p);
-
-void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
- unsigned char samplerate_id);
-
-/*
- * retrieve usb_interface descriptor from the host interface
- * (conditional for compatibility with the older API)
- */
-#ifndef get_iface_desc
-#define get_iface_desc(iface) (&(iface)->desc)
-#define get_endpoint(alt,ep) (&(alt)->endpoint[ep].desc)
-#define get_ep_desc(ep) (&(ep)->desc)
-#define get_cfg_desc(cfg) (&(cfg)->desc)
-#endif
-
-#ifndef snd_usb_get_speed
-#define snd_usb_get_speed(dev) ((dev)->speed)
-#endif
-
#endif /* __USBAUDIO_H */
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 9ca9a13a78da..6ef68e42138e 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -26,6 +26,7 @@
#define MODNAME "US122L"
#include "usb_stream.c"
#include "../usbaudio.h"
+#include "../midi.h"
#include "us122l.h"
MODULE_AUTHOR("Karsten Wiese <fzu@wemgehoertderstaat.de>");
diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h
index 1d174cea352b..e43c0a86441a 100644
--- a/sound/usb/usx2y/usbusx2y.h
+++ b/sound/usb/usx2y/usbusx2y.h
@@ -1,6 +1,7 @@
#ifndef USBUSX2Y_H
#define USBUSX2Y_H
#include "../usbaudio.h"
+#include "../midi.h"
#include "usbus428ctldefs.h"
#define NRURBS 2
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index c9dcade06831..5164a655c39f 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -1,5 +1,5 @@
perf-annotate(1)
-==============
+================
NAME
----
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index ae525ac5a2ce..a3dbadb26ef5 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -1,5 +1,5 @@
perf-bench(1)
-============
+=============
NAME
----
@@ -19,12 +19,12 @@ COMMON OPTIONS
-f::
--format=::
Specify format style.
-Current available format styles are,
+Current available format styles are:
'default'::
Default style. This is mainly for human reading.
---------------------
-% perf bench sched pipe # with no style specify
+% perf bench sched pipe # with no style specified
(executing 1000000 pipe operations between two tasks)
Total time:5.855 sec
5.855061 usecs/op
@@ -79,7 +79,7 @@ options (20 sender and receiver processes per group)
Total time:0.308 sec
-% perf bench sched messaging -t -g 20 # be multi-thread,with 20 groups
+% perf bench sched messaging -t -g 20 # be multi-thread, with 20 groups
(20 sender and receiver threads per group)
(20 groups == 800 threads run)
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index 88bc3b519746..5d1a9500277f 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -8,7 +8,7 @@ perf-buildid-cache - Manage build-id cache.
SYNOPSIS
--------
[verse]
-'perf buildid-list <options>'
+'perf buildid-cache <options>'
DESCRIPTION
-----------
@@ -30,4 +30,4 @@ OPTIONS
SEE ALSO
--------
-linkperf:perf-record[1], linkperf:perf-report[1]
+linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-buildid-list[1]
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index 8974e208cba6..20d97d84ea1c 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -1,5 +1,5 @@
perf-diff(1)
-==============
+============
NAME
----
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
new file mode 100644
index 000000000000..025630d43cd2
--- /dev/null
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -0,0 +1,35 @@
+perf-inject(1)
+==============
+
+NAME
+----
+perf-inject - Filter to augment the events stream with additional information
+
+SYNOPSIS
+--------
+[verse]
+'perf inject <options>'
+
+DESCRIPTION
+-----------
+perf-inject reads a perf-record event stream and repipes it to stdout. At any
+point the processing code can inject other events into the event stream - in
+this case build-ids (-b option) are read and injected as needed into the event
+stream.
+
+Build-ids are just the first user of perf-inject - potentially anything that
+needs userspace processing to augment the events stream with additional
+information could make use of this facility.
+
+OPTIONS
+-------
+-b::
+--build-ids=::
+ Inject build-ids into the output stream
+-v::
+--verbose::
+ Be more verbose.
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index eac4d852e7cd..a52fcde894c7 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -1,5 +1,5 @@
perf-kmem(1)
-==============
+============
NAME
----
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
new file mode 100644
index 000000000000..d004e19fe6d6
--- /dev/null
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -0,0 +1,68 @@
+perf-kvm(1)
+===========
+
+NAME
+----
+perf-kvm - Tool to trace/measure kvm guest os
+
+SYNOPSIS
+--------
+[verse]
+'perf kvm' [--host] [--guest] [--guestmount=<path>
+ [--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]]
+ {top|record|report|diff|buildid-list}
+'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
+ | --guestvmlinux=<path>] {top|record|report|diff|buildid-list}
+
+DESCRIPTION
+-----------
+There are a couple of variants of perf kvm:
+
+ 'perf kvm [options] top <command>' to generates and displays
+ a performance counter profile of guest os in realtime
+ of an arbitrary workload.
+
+ 'perf kvm record <command>' to record the performance couinter profile
+ of an arbitrary workload and save it into a perf data file. If both
+ --host and --guest are input, the perf data file name is perf.data.kvm.
+ If there is no --host but --guest, the file name is perf.data.guest.
+ If there is no --guest but --host, the file name is perf.data.host.
+
+ 'perf kvm report' to display the performance counter profile information
+ recorded via perf kvm record.
+
+ 'perf kvm diff' to displays the performance difference amongst two perf.data
+ files captured via perf record.
+
+ 'perf kvm buildid-list' to display the buildids found in a perf data file,
+ so that other tools can be used to fetch packages with matching symbol tables
+ for use by perf report.
+
+OPTIONS
+-------
+--host=::
+ Collect host side performance profile.
+--guest=::
+ Collect guest side performance profile.
+--guestmount=<path>::
+ Guest os root file system mount directory. Users mounts guest os
+ root directories under <path> by a specific filesystem access method,
+ typically, sshfs. For example, start 2 guest os. The one's pid is 8888
+ and the other's is 9999.
+ #mkdir ~/guestmount; cd ~/guestmount
+ #sshfs -o allow_other,direct_io -p 5551 localhost:/ 8888/
+ #sshfs -o allow_other,direct_io -p 5552 localhost:/ 9999/
+ #perf kvm --host --guest --guestmount=~/guestmount top
+--guestkallsyms=<path>::
+ Guest os /proc/kallsyms file copy. 'perf' kvm' reads it to get guest
+ kernel symbols. Users copy it out from guest os.
+--guestmodules=<path>::
+ Guest os /proc/modules file copy. 'perf' kvm' reads it to get guest
+ kernel module information. Users copy it out from guest os.
+--guestvmlinux=<path>::
+ Guest os kernel vmlinux.
+
+SEE ALSO
+--------
+linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1],
+linkperf:perf-diff[1], linkperf:perf-buildid-list[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 8290b9422668..43e3dd284b90 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -15,6 +15,35 @@ DESCRIPTION
This command displays the symbolic event types which can be selected in the
various perf commands with the -e option.
+RAW HARDWARE EVENT DESCRIPTOR
+-----------------------------
+Even when an event is not available in a symbolic form within perf right now,
+it can be encoded in a per processor specific way.
+
+For instance For x86 CPUs NNN represents the raw register encoding with the
+layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide] Figure 30-1 Layout
+of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344,
+Figure 13-7 Performance Event-Select Register (PerfEvtSeln)).
+
+Example:
+
+If the Intel docs for a QM720 Core i7 describe an event as:
+
+ Event Umask Event Mask
+ Num. Value Mnemonic Description Comment
+
+ A8H 01H LSD.UOPS Counts the number of micro-ops Use cmask=1 and
+ delivered by loop stream detector invert to count
+ cycles
+
+raw encoding of 0x1A8 can be used:
+
+ perf stat -e r1a8 -a sleep 1
+ perf record -e r1a8 ...
+
+You should refer to the processor specific documentation for getting these
+details. Some of them are referenced in the SEE ALSO section below.
+
OPTIONS
-------
None
@@ -22,4 +51,6 @@ None
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-top[1],
-linkperf:perf-record[1]
+linkperf:perf-record[1],
+http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
+http://support.amd.com/us/Processor_TechDocs/24593.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 34202b1be0bb..94a258c96a44 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -57,6 +57,14 @@ OPTIONS
--force::
Forcibly add events with existing name.
+-n::
+--dry-run::
+ Dry run. With this option, --add and --del doesn't execute actual
+ adding and removal operations.
+
+--max-probes::
+ Set the maximum number of probe points for an event. Default is 128.
+
PROBE SYNTAX
------------
Probe points are defined by following syntax.
@@ -74,13 +82,22 @@ Probe points are defined by following syntax.
'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'.
'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
It is also possible to specify a probe point by the source line number or lazy matching by using 'SRC:ALN' or 'SRC;PTN' syntax, where 'SRC' is the source file path, ':ALN' is the line number and ';PTN' is the lazy matching pattern.
-'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
+'ARG' specifies the arguments of this probe point, (see PROBE ARGUMENT).
+
+PROBE ARGUMENT
+--------------
+Each probe argument follows below syntax.
+
+ [NAME=]LOCALVAR|$retval|%REG|@SYMBOL[:TYPE]
+
+'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
+'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo.
LINE SYNTAX
-----------
Line range is descripted by following syntax.
- "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]"
+ "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]"
FUNC specifies the function name of showing lines. 'RLN' is the start line
number from function entry line, and 'RLN2' is the end line number. As same as
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index fc46c0b40f6e..020d871c7934 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -58,7 +58,7 @@ OPTIONS
-f::
--force::
- Overwrite existing data file.
+ Overwrite existing data file. (deprecated)
-c::
--count=::
@@ -101,7 +101,7 @@ OPTIONS
-R::
--raw-samples::
-Collect raw sample records from all opened counters (typically for tracepoint counters).
+Collect raw sample records from all opened counters (default for tracepoint counters).
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 1ce79198997b..8417644a6166 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -12,7 +12,7 @@ SYNOPSIS
DESCRIPTION
-----------
-There's four variants of perf sched:
+There are four variants of perf sched:
'perf sched record <command>' to record the scheduling events
of an arbitrary workload.
@@ -27,7 +27,7 @@ There's four variants of perf sched:
via perf sched record. (this is done by starting up mockup threads
that mimic the workload based on the events in the trace. These
threads can then replay the timings (CPU runtime and sleep patterns)
- of the workload as it occured when it was recorded - and can repeat
+ of the workload as it occurred when it was recorded - and can repeat
it a number of times, measuring its performance.)
OPTIONS
diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt
new file mode 100644
index 000000000000..1c4b5f5b7f71
--- /dev/null
+++ b/tools/perf/Documentation/perf-test.txt
@@ -0,0 +1,22 @@
+perf-test(1)
+============
+
+NAME
+----
+perf-test - Runs sanity tests.
+
+SYNOPSIS
+--------
+[verse]
+'perf test <options>'
+
+DESCRIPTION
+-----------
+This command does assorted sanity tests, initially thru linked routines but
+also will look for a directory with more tests in the form of scripts.
+
+OPTIONS
+-------
+-v::
+--verbose::
+ Be more verbose.
diff --git a/tools/perf/Documentation/perf-trace-perl.txt b/tools/perf/Documentation/perf-trace-perl.txt
index d729cee8d987..ee6525ee6d69 100644
--- a/tools/perf/Documentation/perf-trace-perl.txt
+++ b/tools/perf/Documentation/perf-trace-perl.txt
@@ -49,12 +49,10 @@ available as calls back into the perf executable (see below).
As an example, the following perf record command can be used to record
all sched_wakeup events in the system:
- # perf record -c 1 -f -a -M -R -e sched:sched_wakeup
+ # perf record -a -e sched:sched_wakeup
Traces meant to be processed using a script should be recorded with
-the above options: -c 1 says to sample every event, -a to enable
-system-wide collection, -M to multiplex the output, and -R to collect
-raw samples.
+the above option: -a to enable system-wide collection.
The format file for the sched_wakep event defines the following fields
(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
diff --git a/tools/perf/Documentation/perf-trace-python.txt b/tools/perf/Documentation/perf-trace-python.txt
index a241aca77184..864aac283a7b 100644
--- a/tools/perf/Documentation/perf-trace-python.txt
+++ b/tools/perf/Documentation/perf-trace-python.txt
@@ -1,5 +1,5 @@
perf-trace-python(1)
-==================
+====================
NAME
----
@@ -93,7 +93,7 @@ don't care how it exited, so we'll use 'perf record' to record only
the sys_enter events:
----
-# perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
+# perf record -a -e raw_syscalls:sys_enter
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 56.545 MB perf.data (~2470503 samples) ]
@@ -359,7 +359,7 @@ your script:
# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-record
#!/bin/bash
-perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
+perf record -a -e raw_syscalls:sys_enter
----
The 'report' script is also a shell script with the same base name as
@@ -449,12 +449,10 @@ available as calls back into the perf executable (see below).
As an example, the following perf record command can be used to record
all sched_wakeup events in the system:
- # perf record -c 1 -f -a -M -R -e sched:sched_wakeup
+ # perf record -a -e sched:sched_wakeup
Traces meant to be processed using a script should be recorded with
-the above options: -c 1 says to sample every event, -a to enable
-system-wide collection, -M to multiplex the output, and -R to collect
-raw samples.
+the above option: -a to enable system-wide collection.
The format file for the sched_wakep event defines the following fields
(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 8879299cd9df..122ec9dc4853 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -1,5 +1,5 @@
perf-trace(1)
-==============
+=============
NAME
----
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index bc0f670a8338..0ef5cfe52f2d 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -1,3 +1,7 @@
+ifeq ("$(origin O)", "command line")
+ OUTPUT := $(O)/
+endif
+
# The default target of this Makefile is...
all::
@@ -150,10 +154,17 @@ all::
# Define LDFLAGS=-static to build a static binary.
#
# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
+#
+# Define NO_DWARF if you do not want debug-info analysis feature at all.
-PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
- @$(SHELL_PATH) util/PERF-VERSION-GEN
--include PERF-VERSION-FILE
+$(shell sh -c 'mkdir -p $(OUTPUT)scripts/python/Perf-Trace-Util/' 2> /dev/null)
+$(shell sh -c 'mkdir -p $(OUTPUT)scripts/perl/Perf-Trace-Util/' 2> /dev/null)
+$(shell sh -c 'mkdir -p $(OUTPUT)util/scripting-engines/' 2> /dev/null)
+$(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null)
+
+$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
+ @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
+-include $(OUTPUT)PERF-VERSION-FILE
uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not')
uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not')
@@ -162,6 +173,22 @@ uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not')
uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+ -e s/arm.*/arm/ -e s/sa110/arm/ \
+ -e s/s390x/s390/ -e s/parisc64/parisc/ \
+ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
+ -e s/sh[234].*/sh/ )
+
+# Additional ARCH settings for x86
+ifeq ($(ARCH),i386)
+ ARCH := x86
+endif
+ifeq ($(ARCH),x86_64)
+ ARCH := x86
+endif
+
+$(shell sh -c 'mkdir -p $(OUTPUT)arch/$(ARCH)/util/' 2> /dev/null)
+
# CFLAGS and LDFLAGS are for the users to override from the command line.
#
@@ -274,7 +301,7 @@ endif
# Those must not be GNU-specific; they are shared with perl/ which may
# be built by a different compiler. (Note that this is an artifact now
# but it still might be nice to keep that distinction.)
-BASIC_CFLAGS = -Iutil/include
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include
BASIC_LDFLAGS =
# Guard against environment variables
@@ -308,7 +335,7 @@ PROGRAMS += $(EXTRA_PROGRAMS)
#
# Single 'perf' binary right now:
#
-PROGRAMS += perf
+PROGRAMS += $(OUTPUT)perf
# List built-in command $C whose implementation cmd_$C() is not in
# builtin-$C.o but is linked in as part of some other command.
@@ -318,7 +345,7 @@ PROGRAMS += perf
ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
# what 'all' will build but not install in perfexecdir
-OTHER_PROGRAMS = perf$X
+OTHER_PROGRAMS = $(OUTPUT)perf$X
# Set paths to tools early so that they can be used for version tests.
ifndef SHELL_PATH
@@ -330,7 +357,7 @@ endif
export PERL_PATH
-LIB_FILE=libperf.a
+LIB_FILE=$(OUTPUT)libperf.a
LIB_H += ../../include/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
@@ -350,12 +377,13 @@ LIB_H += util/include/linux/rbtree.h
LIB_H += util/include/linux/string.h
LIB_H += util/include/linux/types.h
LIB_H += util/include/asm/asm-offsets.h
-LIB_H += util/include/asm/bitops.h
LIB_H += util/include/asm/bug.h
LIB_H += util/include/asm/byteorder.h
+LIB_H += util/include/asm/hweight.h
LIB_H += util/include/asm/swab.h
LIB_H += util/include/asm/system.h
LIB_H += util/include/asm/uaccess.h
+LIB_H += util/include/dwarf-regs.h
LIB_H += perf.h
LIB_H += util/cache.h
LIB_H += util/callchain.h
@@ -375,7 +403,6 @@ LIB_H += util/header.h
LIB_H += util/help.h
LIB_H += util/session.h
LIB_H += util/strbuf.h
-LIB_H += util/string.h
LIB_H += util/strlist.h
LIB_H += util/svghelper.h
LIB_H += util/run-command.h
@@ -391,77 +418,79 @@ LIB_H += util/probe-finder.h
LIB_H += util/probe-event.h
LIB_H += util/cpumap.h
-LIB_OBJS += util/abspath.o
-LIB_OBJS += util/alias.o
-LIB_OBJS += util/build-id.o
-LIB_OBJS += util/config.o
-LIB_OBJS += util/ctype.o
-LIB_OBJS += util/debugfs.o
-LIB_OBJS += util/environment.o
-LIB_OBJS += util/event.o
-LIB_OBJS += util/exec_cmd.o
-LIB_OBJS += util/help.o
-LIB_OBJS += util/levenshtein.o
-LIB_OBJS += util/parse-options.o
-LIB_OBJS += util/parse-events.o
-LIB_OBJS += util/path.o
-LIB_OBJS += util/rbtree.o
-LIB_OBJS += util/bitmap.o
-LIB_OBJS += util/hweight.o
-LIB_OBJS += util/find_next_bit.o
-LIB_OBJS += util/run-command.o
-LIB_OBJS += util/quote.o
-LIB_OBJS += util/strbuf.o
-LIB_OBJS += util/string.o
-LIB_OBJS += util/strlist.o
-LIB_OBJS += util/usage.o
-LIB_OBJS += util/wrapper.o
-LIB_OBJS += util/sigchain.o
-LIB_OBJS += util/symbol.o
-LIB_OBJS += util/color.o
-LIB_OBJS += util/pager.o
-LIB_OBJS += util/header.o
-LIB_OBJS += util/callchain.o
-LIB_OBJS += util/values.o
-LIB_OBJS += util/debug.o
-LIB_OBJS += util/map.o
-LIB_OBJS += util/session.o
-LIB_OBJS += util/thread.o
-LIB_OBJS += util/trace-event-parse.o
-LIB_OBJS += util/trace-event-read.o
-LIB_OBJS += util/trace-event-info.o
-LIB_OBJS += util/trace-event-scripting.o
-LIB_OBJS += util/svghelper.o
-LIB_OBJS += util/sort.o
-LIB_OBJS += util/hist.o
-LIB_OBJS += util/probe-event.o
-LIB_OBJS += util/util.o
-LIB_OBJS += util/cpumap.o
-
-BUILTIN_OBJS += builtin-annotate.o
-
-BUILTIN_OBJS += builtin-bench.o
+LIB_OBJS += $(OUTPUT)util/abspath.o
+LIB_OBJS += $(OUTPUT)util/alias.o
+LIB_OBJS += $(OUTPUT)util/build-id.o
+LIB_OBJS += $(OUTPUT)util/config.o
+LIB_OBJS += $(OUTPUT)util/ctype.o
+LIB_OBJS += $(OUTPUT)util/debugfs.o
+LIB_OBJS += $(OUTPUT)util/environment.o
+LIB_OBJS += $(OUTPUT)util/event.o
+LIB_OBJS += $(OUTPUT)util/exec_cmd.o
+LIB_OBJS += $(OUTPUT)util/help.o
+LIB_OBJS += $(OUTPUT)util/levenshtein.o
+LIB_OBJS += $(OUTPUT)util/parse-options.o
+LIB_OBJS += $(OUTPUT)util/parse-events.o
+LIB_OBJS += $(OUTPUT)util/path.o
+LIB_OBJS += $(OUTPUT)util/rbtree.o
+LIB_OBJS += $(OUTPUT)util/bitmap.o
+LIB_OBJS += $(OUTPUT)util/hweight.o
+LIB_OBJS += $(OUTPUT)util/run-command.o
+LIB_OBJS += $(OUTPUT)util/quote.o
+LIB_OBJS += $(OUTPUT)util/strbuf.o
+LIB_OBJS += $(OUTPUT)util/string.o
+LIB_OBJS += $(OUTPUT)util/strlist.o
+LIB_OBJS += $(OUTPUT)util/usage.o
+LIB_OBJS += $(OUTPUT)util/wrapper.o
+LIB_OBJS += $(OUTPUT)util/sigchain.o
+LIB_OBJS += $(OUTPUT)util/symbol.o
+LIB_OBJS += $(OUTPUT)util/color.o
+LIB_OBJS += $(OUTPUT)util/pager.o
+LIB_OBJS += $(OUTPUT)util/header.o
+LIB_OBJS += $(OUTPUT)util/callchain.o
+LIB_OBJS += $(OUTPUT)util/values.o
+LIB_OBJS += $(OUTPUT)util/debug.o
+LIB_OBJS += $(OUTPUT)util/map.o
+LIB_OBJS += $(OUTPUT)util/session.o
+LIB_OBJS += $(OUTPUT)util/thread.o
+LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
+LIB_OBJS += $(OUTPUT)util/trace-event-read.o
+LIB_OBJS += $(OUTPUT)util/trace-event-info.o
+LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
+LIB_OBJS += $(OUTPUT)util/svghelper.o
+LIB_OBJS += $(OUTPUT)util/sort.o
+LIB_OBJS += $(OUTPUT)util/hist.o
+LIB_OBJS += $(OUTPUT)util/probe-event.o
+LIB_OBJS += $(OUTPUT)util/util.o
+LIB_OBJS += $(OUTPUT)util/cpumap.o
+
+BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
+
+BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
# Benchmark modules
-BUILTIN_OBJS += bench/sched-messaging.o
-BUILTIN_OBJS += bench/sched-pipe.o
-BUILTIN_OBJS += bench/mem-memcpy.o
-
-BUILTIN_OBJS += builtin-diff.o
-BUILTIN_OBJS += builtin-help.o
-BUILTIN_OBJS += builtin-sched.o
-BUILTIN_OBJS += builtin-buildid-list.o
-BUILTIN_OBJS += builtin-buildid-cache.o
-BUILTIN_OBJS += builtin-list.o
-BUILTIN_OBJS += builtin-record.o
-BUILTIN_OBJS += builtin-report.o
-BUILTIN_OBJS += builtin-stat.o
-BUILTIN_OBJS += builtin-timechart.o
-BUILTIN_OBJS += builtin-top.o
-BUILTIN_OBJS += builtin-trace.o
-BUILTIN_OBJS += builtin-probe.o
-BUILTIN_OBJS += builtin-kmem.o
-BUILTIN_OBJS += builtin-lock.o
+BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
+BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
+BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
+
+BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
+BUILTIN_OBJS += $(OUTPUT)builtin-help.o
+BUILTIN_OBJS += $(OUTPUT)builtin-sched.o
+BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o
+BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o
+BUILTIN_OBJS += $(OUTPUT)builtin-list.o
+BUILTIN_OBJS += $(OUTPUT)builtin-record.o
+BUILTIN_OBJS += $(OUTPUT)builtin-report.o
+BUILTIN_OBJS += $(OUTPUT)builtin-stat.o
+BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o
+BUILTIN_OBJS += $(OUTPUT)builtin-top.o
+BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
+BUILTIN_OBJS += $(OUTPUT)builtin-probe.o
+BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o
+BUILTIN_OBJS += $(OUTPUT)builtin-lock.o
+BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
+BUILTIN_OBJS += $(OUTPUT)builtin-test.o
+BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
PERFLIBS = $(LIB_FILE)
@@ -476,6 +505,15 @@ PERFLIBS = $(LIB_FILE)
-include config.mak.autogen
-include config.mak
+ifndef NO_DWARF
+ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/libdw-dev);
+ NO_DWARF := 1
+endif # Dwarf support
+endif # NO_DWARF
+
+-include arch/$(ARCH)/Makefile
+
ifeq ($(uname_S),Darwin)
ifndef NO_FINK
ifeq ($(shell test -d /sw/lib && echo y),y)
@@ -492,6 +530,10 @@ ifeq ($(uname_S),Darwin)
PTHREAD_LIBS =
endif
+ifneq ($(OUTPUT),)
+ BASIC_CFLAGS += -I$(OUTPUT)
+endif
+
ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
@@ -504,13 +546,22 @@ else
msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
endif
-ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
- msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev);
- BASIC_CFLAGS += -DNO_DWARF_SUPPORT
+ifndef NO_DWARF
+ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
+ msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
else
- BASIC_CFLAGS += -I/usr/include/elfutils
+ BASIC_CFLAGS += -I/usr/include/elfutils -DDWARF_SUPPORT
EXTLIBS += -lelf -ldw
- LIB_OBJS += util/probe-finder.o
+ LIB_OBJS += $(OUTPUT)util/probe-finder.o
+endif # PERF_HAVE_DWARF_REGS
+endif # NO_DWARF
+
+ifneq ($(shell sh -c "(echo '\#include <newt.h>'; echo 'int main(void) { newtInit(); newtCls(); return newtFinished(); }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -lnewt -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ msg := $(warning newt not found, disables TUI support. Please install newt-devel or libnewt-dev);
+ BASIC_CFLAGS += -DNO_NEWT_SUPPORT
+else
+ EXTLIBS += -lnewt
+ LIB_OBJS += $(OUTPUT)util/newt.o
endif
ifndef NO_LIBPERL
@@ -522,8 +573,8 @@ ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; e
BASIC_CFLAGS += -DNO_LIBPERL
else
ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
- LIB_OBJS += util/scripting-engines/trace-event-perl.o
- LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o
+ LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
+ LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
endif
ifndef NO_LIBPYTHON
@@ -531,12 +582,12 @@ PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
endif
-ifneq ($(shell sh -c "(echo '\#include <Python.h>'; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o /dev/null $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#include <Python.h>'; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o $(BITBUCKET) $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
BASIC_CFLAGS += -DNO_LIBPYTHON
else
ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
- LIB_OBJS += util/scripting-engines/trace-event-python.o
- LIB_OBJS += scripts/python/Perf-Trace-Util/Context.o
+ LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
+ LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
endif
ifdef NO_DEMANGLE
@@ -607,53 +658,53 @@ ifdef NO_C99_FORMAT
endif
ifdef SNPRINTF_RETURNS_BOGUS
COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS
- COMPAT_OBJS += compat/snprintf.o
+ COMPAT_OBJS += $(OUTPUT)compat/snprintf.o
endif
ifdef FREAD_READS_DIRECTORIES
COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES
- COMPAT_OBJS += compat/fopen.o
+ COMPAT_OBJS += $(OUTPUT)compat/fopen.o
endif
ifdef NO_SYMLINK_HEAD
BASIC_CFLAGS += -DNO_SYMLINK_HEAD
endif
ifdef NO_STRCASESTR
COMPAT_CFLAGS += -DNO_STRCASESTR
- COMPAT_OBJS += compat/strcasestr.o
+ COMPAT_OBJS += $(OUTPUT)compat/strcasestr.o
endif
ifdef NO_STRTOUMAX
COMPAT_CFLAGS += -DNO_STRTOUMAX
- COMPAT_OBJS += compat/strtoumax.o
+ COMPAT_OBJS += $(OUTPUT)compat/strtoumax.o
endif
ifdef NO_STRTOULL
COMPAT_CFLAGS += -DNO_STRTOULL
endif
ifdef NO_SETENV
COMPAT_CFLAGS += -DNO_SETENV
- COMPAT_OBJS += compat/setenv.o
+ COMPAT_OBJS += $(OUTPUT)compat/setenv.o
endif
ifdef NO_MKDTEMP
COMPAT_CFLAGS += -DNO_MKDTEMP
- COMPAT_OBJS += compat/mkdtemp.o
+ COMPAT_OBJS += $(OUTPUT)compat/mkdtemp.o
endif
ifdef NO_UNSETENV
COMPAT_CFLAGS += -DNO_UNSETENV
- COMPAT_OBJS += compat/unsetenv.o
+ COMPAT_OBJS += $(OUTPUT)compat/unsetenv.o
endif
ifdef NO_SYS_SELECT_H
BASIC_CFLAGS += -DNO_SYS_SELECT_H
endif
ifdef NO_MMAP
COMPAT_CFLAGS += -DNO_MMAP
- COMPAT_OBJS += compat/mmap.o
+ COMPAT_OBJS += $(OUTPUT)compat/mmap.o
else
ifdef USE_WIN32_MMAP
COMPAT_CFLAGS += -DUSE_WIN32_MMAP
- COMPAT_OBJS += compat/win32mmap.o
+ COMPAT_OBJS += $(OUTPUT)compat/win32mmap.o
endif
endif
ifdef NO_PREAD
COMPAT_CFLAGS += -DNO_PREAD
- COMPAT_OBJS += compat/pread.o
+ COMPAT_OBJS += $(OUTPUT)compat/pread.o
endif
ifdef NO_FAST_WORKING_DIRECTORY
BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY
@@ -675,10 +726,10 @@ else
endif
endif
ifdef NO_INET_NTOP
- LIB_OBJS += compat/inet_ntop.o
+ LIB_OBJS += $(OUTPUT)compat/inet_ntop.o
endif
ifdef NO_INET_PTON
- LIB_OBJS += compat/inet_pton.o
+ LIB_OBJS += $(OUTPUT)compat/inet_pton.o
endif
ifdef NO_ICONV
@@ -695,15 +746,15 @@ endif
ifdef PPC_SHA1
SHA1_HEADER = "ppc/sha1.h"
- LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o
+ LIB_OBJS += $(OUTPUT)ppc/sha1.o ppc/sha1ppc.o
else
ifdef ARM_SHA1
SHA1_HEADER = "arm/sha1.h"
- LIB_OBJS += arm/sha1.o arm/sha1_arm.o
+ LIB_OBJS += $(OUTPUT)arm/sha1.o $(OUTPUT)arm/sha1_arm.o
else
ifdef MOZILLA_SHA1
SHA1_HEADER = "mozilla-sha1/sha1.h"
- LIB_OBJS += mozilla-sha1/sha1.o
+ LIB_OBJS += $(OUTPUT)mozilla-sha1/sha1.o
else
SHA1_HEADER = <openssl/sha.h>
EXTLIBS += $(LIB_4_CRYPTO)
@@ -715,15 +766,15 @@ ifdef NO_PERL_MAKEMAKER
endif
ifdef NO_HSTRERROR
COMPAT_CFLAGS += -DNO_HSTRERROR
- COMPAT_OBJS += compat/hstrerror.o
+ COMPAT_OBJS += $(OUTPUT)compat/hstrerror.o
endif
ifdef NO_MEMMEM
COMPAT_CFLAGS += -DNO_MEMMEM
- COMPAT_OBJS += compat/memmem.o
+ COMPAT_OBJS += $(OUTPUT)compat/memmem.o
endif
ifdef INTERNAL_QSORT
COMPAT_CFLAGS += -DINTERNAL_QSORT
- COMPAT_OBJS += compat/qsort.o
+ COMPAT_OBJS += $(OUTPUT)compat/qsort.o
endif
ifdef RUNTIME_PREFIX
COMPAT_CFLAGS += -DRUNTIME_PREFIX
@@ -803,7 +854,7 @@ export TAR INSTALL DESTDIR SHELL_PATH
SHELL = $(SHELL_PATH)
-all:: .perf.dev.null shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS
+all:: .perf.dev.null shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) $(OUTPUT)PERF-BUILD-OPTIONS
ifneq (,$X)
$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
endif
@@ -815,39 +866,39 @@ please_set_SHELL_PATH_to_a_more_modern_shell:
shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
-strip: $(PROGRAMS) perf$X
- $(STRIP) $(STRIP_OPTS) $(PROGRAMS) perf$X
+strip: $(PROGRAMS) $(OUTPUT)perf$X
+ $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf$X
-perf.o: perf.c common-cmds.h PERF-CFLAGS
+$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
- $(ALL_CFLAGS) -c $(filter %.c,$^)
+ $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
-perf$X: perf.o $(BUILTIN_OBJS) $(PERFLIBS)
- $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ perf.o \
+$(OUTPUT)perf$X: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
+ $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(OUTPUT)perf.o \
$(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS)
-builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \
+$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
'-DPERF_MAN_PATH="$(mandir_SQ)"' \
'-DPERF_INFO_PATH="$(infodir_SQ)"' $<
-builtin-timechart.o: builtin-timechart.c common-cmds.h PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \
+$(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
'-DPERF_HTML_PATH="$(htmldir_SQ)"' \
'-DPERF_MAN_PATH="$(mandir_SQ)"' \
'-DPERF_INFO_PATH="$(infodir_SQ)"' $<
-$(BUILT_INS): perf$X
+$(BUILT_INS): $(OUTPUT)perf$X
$(QUIET_BUILT_IN)$(RM) $@ && \
ln perf$X $@ 2>/dev/null || \
ln -s perf$X $@ 2>/dev/null || \
cp perf$X $@
-common-cmds.h: util/generate-cmdlist.sh command-list.txt
+$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
-common-cmds.h: $(wildcard Documentation/perf-*.txt)
+$(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
$(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
@@ -859,7 +910,7 @@ $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
-e 's/@@NO_CURL@@/$(NO_CURL)/g' \
$@.sh >$@+ && \
chmod +x $@+ && \
- mv $@+ $@
+ mv $@+ $(OUTPUT)$@
configure: configure.ac
$(QUIET_GEN)$(RM) $@ $<+ && \
@@ -869,60 +920,47 @@ configure: configure.ac
$(RM) $<+
# These can record PERF_VERSION
-perf.o perf.spec \
+$(OUTPUT)perf.o perf.spec \
$(patsubst %.sh,%,$(SCRIPT_SH)) \
$(patsubst %.perl,%,$(SCRIPT_PERL)) \
- : PERF-VERSION-FILE
+ : $(OUTPUT)PERF-VERSION-FILE
-%.o: %.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $<
-%.s: %.c PERF-CFLAGS
+$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
-%.o: %.S
- $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.o: %.S
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
-util/exec_cmd.o: util/exec_cmd.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \
+$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
'-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
'-DBINDIR="$(bindir_relative_SQ)"' \
'-DPREFIX="$(prefix_SQ)"' \
$<
-builtin-init-db.o: builtin-init-db.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $<
-
-util/config.o: util/config.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-
-util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-
-# some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing
-# from <string.h> that comes from kernel headers wrapping.
-KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//`
-
-util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+$(OUTPUT)builtin-init-db.o: builtin-init-db.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $<
-util/hweight.o: ../../lib/hweight.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
-scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
-util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-python.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
-scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o scripts/python/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
-perf-%$X: %.o $(PERFLIBS)
+$(OUTPUT)perf-%$X: %.o $(PERFLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
@@ -963,17 +1001,17 @@ cscope:
TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\
$(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ)
-PERF-CFLAGS: .FORCE-PERF-CFLAGS
+$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
@FLAGS='$(TRACK_CFLAGS)'; \
- if test x"$$FLAGS" != x"`cat PERF-CFLAGS 2>/dev/null`" ; then \
+ if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \
echo 1>&2 " * new build flags or prefix"; \
- echo "$$FLAGS" >PERF-CFLAGS; \
+ echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
fi
# We need to apply sq twice, once to protect from the shell
-# that runs PERF-BUILD-OPTIONS, and then again to protect it
+# that runs $(OUTPUT)PERF-BUILD-OPTIONS, and then again to protect it
# and the first level quoting from the shell that runs "echo".
-PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS
+$(OUTPUT)PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS
@echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@
@echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@
@echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@
@@ -994,7 +1032,7 @@ all:: $(TEST_PROGRAMS)
export NO_SVN_TESTS
-check: common-cmds.h
+check: $(OUTPUT)common-cmds.h
if sparse; \
then \
for i in *.c */*.c; \
@@ -1028,10 +1066,10 @@ export perfexec_instdir
install: all
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
- $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
+ $(INSTALL) $(OUTPUT)perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
- $(INSTALL) perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
@@ -1045,7 +1083,7 @@ ifdef BUILT_INS
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
ifneq (,$X)
- $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';)
+ $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) $(OUTPUT)perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';)
endif
endif
@@ -1129,14 +1167,14 @@ clean:
$(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE)
$(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X
$(RM) $(TEST_PROGRAMS)
- $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope*
+ $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
$(RM) -r autom4te.cache
$(RM) config.log config.mak.autogen config.mak.append config.status config.cache
$(RM) -r $(PERF_TARNAME) .doc-tmp-dir
$(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz
$(RM) $(htmldocs).tar.gz $(manpages).tar.gz
$(MAKE) -C Documentation/ clean
- $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS
+ $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-BUILD-OPTIONS
.PHONY: all install clean strip
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
new file mode 100644
index 000000000000..15130b50dfe3
--- /dev/null
+++ b/tools/perf/arch/powerpc/Makefile
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
new file mode 100644
index 000000000000..48ae0c5e3f73
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -0,0 +1,88 @@
+/*
+ * Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2010 Ian Munsie, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+
+struct pt_regs_dwarfnum {
+ const char *name;
+ unsigned int dwarfnum;
+};
+
+#define STR(s) #s
+#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
+#define GPR_DWARFNUM_NAME(num) \
+ {.name = STR(%gpr##num), .dwarfnum = num}
+#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
+
+/*
+ * Reference:
+ * http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html
+ */
+static const struct pt_regs_dwarfnum regdwarfnum_table[] = {
+ GPR_DWARFNUM_NAME(0),
+ GPR_DWARFNUM_NAME(1),
+ GPR_DWARFNUM_NAME(2),
+ GPR_DWARFNUM_NAME(3),
+ GPR_DWARFNUM_NAME(4),
+ GPR_DWARFNUM_NAME(5),
+ GPR_DWARFNUM_NAME(6),
+ GPR_DWARFNUM_NAME(7),
+ GPR_DWARFNUM_NAME(8),
+ GPR_DWARFNUM_NAME(9),
+ GPR_DWARFNUM_NAME(10),
+ GPR_DWARFNUM_NAME(11),
+ GPR_DWARFNUM_NAME(12),
+ GPR_DWARFNUM_NAME(13),
+ GPR_DWARFNUM_NAME(14),
+ GPR_DWARFNUM_NAME(15),
+ GPR_DWARFNUM_NAME(16),
+ GPR_DWARFNUM_NAME(17),
+ GPR_DWARFNUM_NAME(18),
+ GPR_DWARFNUM_NAME(19),
+ GPR_DWARFNUM_NAME(20),
+ GPR_DWARFNUM_NAME(21),
+ GPR_DWARFNUM_NAME(22),
+ GPR_DWARFNUM_NAME(23),
+ GPR_DWARFNUM_NAME(24),
+ GPR_DWARFNUM_NAME(25),
+ GPR_DWARFNUM_NAME(26),
+ GPR_DWARFNUM_NAME(27),
+ GPR_DWARFNUM_NAME(28),
+ GPR_DWARFNUM_NAME(29),
+ GPR_DWARFNUM_NAME(30),
+ GPR_DWARFNUM_NAME(31),
+ REG_DWARFNUM_NAME("%msr", 66),
+ REG_DWARFNUM_NAME("%ctr", 109),
+ REG_DWARFNUM_NAME("%link", 108),
+ REG_DWARFNUM_NAME("%xer", 101),
+ REG_DWARFNUM_NAME("%dar", 119),
+ REG_DWARFNUM_NAME("%dsisr", 118),
+ REG_DWARFNUM_END,
+};
+
+/**
+ * get_arch_regstr() - lookup register name from it's DWARF register number
+ * @n: the DWARF register number
+ *
+ * get_arch_regstr() returns the name of the register in struct
+ * regdwarfnum_table from it's DWARF register number. If the register is not
+ * found in the table, this returns NULL;
+ */
+const char *get_arch_regstr(unsigned int n)
+{
+ const struct pt_regs_dwarfnum *roff;
+ for (roff = regdwarfnum_table; roff->name != NULL; roff++)
+ if (roff->dwarfnum == n)
+ return roff->name;
+ return NULL;
+}
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
new file mode 100644
index 000000000000..15130b50dfe3
--- /dev/null
+++ b/tools/perf/arch/x86/Makefile
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
new file mode 100644
index 000000000000..a794d3081928
--- /dev/null
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -0,0 +1,75 @@
+/*
+ * dwarf-regs.c : Mapping of DWARF debug register numbers into register names.
+ * Extracted from probe-finder.c
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+/*
+ * Generic dwarf analysis helpers
+ */
+
+#define X86_32_MAX_REGS 8
+const char *x86_32_regs_table[X86_32_MAX_REGS] = {
+ "%ax",
+ "%cx",
+ "%dx",
+ "%bx",
+ "$stack", /* Stack address instead of %sp */
+ "%bp",
+ "%si",
+ "%di",
+};
+
+#define X86_64_MAX_REGS 16
+const char *x86_64_regs_table[X86_64_MAX_REGS] = {
+ "%ax",
+ "%dx",
+ "%cx",
+ "%bx",
+ "%si",
+ "%di",
+ "%bp",
+ "%sp",
+ "%r8",
+ "%r9",
+ "%r10",
+ "%r11",
+ "%r12",
+ "%r13",
+ "%r14",
+ "%r15",
+};
+
+/* TODO: switching by dwarf address size */
+#ifdef __x86_64__
+#define ARCH_MAX_REGS X86_64_MAX_REGS
+#define arch_regs_table x86_64_regs_table
+#else
+#define ARCH_MAX_REGS X86_32_MAX_REGS
+#define arch_regs_table x86_32_regs_table
+#endif
+
+/* Return architecture dependent register string (for kprobe-tracer) */
+const char *get_arch_regstr(unsigned int n)
+{
+ return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
+}
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 89773178e894..38dae7465142 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -10,7 +10,6 @@
#include "../perf.h"
#include "../util/util.h"
#include "../util/parse-options.h"
-#include "../util/string.h"
#include "../util/header.h"
#include "bench.h"
@@ -24,7 +23,7 @@
static const char *length_str = "1MB";
static const char *routine = "default";
-static int use_clock = 0;
+static bool use_clock = false;
static int clock_fd;
static const struct option options[] = {
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 81cee78181fa..da1b2e9f01ff 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -31,9 +31,9 @@
#define DATASIZE 100
-static int use_pipes = 0;
+static bool use_pipes = false;
static unsigned int loops = 100;
-static unsigned int thread_mode = 0;
+static bool thread_mode = false;
static unsigned int num_groups = 10;
struct sender_context {
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 4f77c7c27640..d9ab3ce446ac 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -93,7 +93,7 @@ int bench_sched_pipe(int argc, const char **argv,
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
- printf("# Extecuted %d pipe operations between two tasks\n\n",
+ printf("# Executed %d pipe operations between two tasks\n\n",
loops);
result_usec = diff.tv_sec * 1000000;
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 6ad7148451c5..3940964161b3 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -14,7 +14,6 @@
#include "util/cache.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
-#include "util/string.h"
#include "perf.h"
#include "util/debug.h"
@@ -29,11 +28,11 @@
static char const *input_name = "perf.data";
-static int force;
+static bool force;
-static int full_paths;
+static bool full_paths;
-static int print_line;
+static bool print_line;
struct sym_hist {
u64 sum;
@@ -69,13 +68,11 @@ static int sym__alloc_hist(struct symbol *self)
static int annotate__hist_hit(struct hist_entry *he, u64 ip)
{
unsigned int sym_size, offset;
- struct symbol *sym = he->sym;
+ struct symbol *sym = he->ms.sym;
struct sym_priv *priv;
struct sym_hist *h;
- he->count++;
-
- if (!sym || !he->map)
+ if (!sym || !he->ms.map)
return 0;
priv = symbol__priv(sym);
@@ -85,7 +82,7 @@ static int annotate__hist_hit(struct hist_entry *he, u64 ip)
sym_size = sym->end - sym->start;
offset = ip - sym->start;
- pr_debug3("%s: ip=%#Lx\n", __func__, he->map->unmap_ip(he->map, ip));
+ pr_debug3("%s: ip=%#Lx\n", __func__, he->ms.map->unmap_ip(he->ms.map, ip));
if (offset >= sym_size)
return 0;
@@ -94,15 +91,13 @@ static int annotate__hist_hit(struct hist_entry *he, u64 ip)
h->sum++;
h->ip[offset]++;
- pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", he->sym->start,
- he->sym->name, ip, ip - he->sym->start, h->ip[offset]);
+ pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", he->ms.sym->start,
+ he->ms.sym->name, ip, ip - he->ms.sym->start, h->ip[offset]);
return 0;
}
-static int perf_session__add_hist_entry(struct perf_session *self,
- struct addr_location *al, u64 count)
+static int hists__add_entry(struct hists *self, struct addr_location *al)
{
- bool hit;
struct hist_entry *he;
if (sym_hist_filter != NULL &&
@@ -116,7 +111,7 @@ static int perf_session__add_hist_entry(struct perf_session *self,
return 0;
}
- he = __perf_session__add_hist_entry(&self->hists, al, NULL, count, &hit);
+ he = __hists__add_entry(self, al, NULL, 1);
if (he == NULL)
return -ENOMEM;
@@ -136,7 +131,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
return -1;
}
- if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) {
+ if (!al.filtered && hists__add_entry(&session->hists, &al)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
return -1;
@@ -187,7 +182,7 @@ static struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
static int parse_line(FILE *file, struct hist_entry *he,
struct list_head *head)
{
- struct symbol *sym = he->sym;
+ struct symbol *sym = he->ms.sym;
struct objdump_line *objdump_line;
char *line = NULL, *tmp, *tmp2;
size_t line_len;
@@ -226,7 +221,7 @@ static int parse_line(FILE *file, struct hist_entry *he,
}
if (line_ip != -1) {
- u64 start = map__rip_2objdump(he->map, sym->start);
+ u64 start = map__rip_2objdump(he->ms.map, sym->start);
offset = line_ip - start;
}
@@ -244,7 +239,7 @@ static int objdump_line__print(struct objdump_line *self,
struct list_head *head,
struct hist_entry *he, u64 len)
{
- struct symbol *sym = he->sym;
+ struct symbol *sym = he->ms.sym;
static const char *prev_line;
static const char *prev_color;
@@ -327,7 +322,7 @@ static void insert_source_line(struct sym_ext *sym_ext)
static void free_source_line(struct hist_entry *he, int len)
{
- struct sym_priv *priv = symbol__priv(he->sym);
+ struct sym_priv *priv = symbol__priv(he->ms.sym);
struct sym_ext *sym_ext = priv->ext;
int i;
@@ -346,7 +341,7 @@ static void free_source_line(struct hist_entry *he, int len)
static void
get_source_line(struct hist_entry *he, int len, const char *filename)
{
- struct symbol *sym = he->sym;
+ struct symbol *sym = he->ms.sym;
u64 start;
int i;
char cmd[PATH_MAX * 2];
@@ -361,7 +356,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename)
if (!priv->ext)
return;
- start = he->map->unmap_ip(he->map, sym->start);
+ start = he->ms.map->unmap_ip(he->ms.map, sym->start);
for (i = 0; i < len; i++) {
char *path = NULL;
@@ -425,7 +420,7 @@ static void print_summary(const char *filename)
static void hist_entry__print_hits(struct hist_entry *self)
{
- struct symbol *sym = self->sym;
+ struct symbol *sym = self->ms.sym;
struct sym_priv *priv = symbol__priv(sym);
struct sym_hist *h = priv->hist;
u64 len = sym->end - sym->start, offset;
@@ -439,9 +434,9 @@ static void hist_entry__print_hits(struct hist_entry *self)
static void annotate_sym(struct hist_entry *he)
{
- struct map *map = he->map;
+ struct map *map = he->ms.map;
struct dso *dso = map->dso;
- struct symbol *sym = he->sym;
+ struct symbol *sym = he->ms.sym;
const char *filename = dso->long_name, *d_filename;
u64 len;
char command[PATH_MAX*2];
@@ -452,6 +447,16 @@ static void annotate_sym(struct hist_entry *he)
if (!filename)
return;
+ if (dso->origin == DSO__ORIG_KERNEL) {
+ if (dso->annotate_warned)
+ return;
+ dso->annotate_warned = 1;
+ pr_err("Can't annotate %s: No vmlinux file was found in the "
+ "path:\n", sym->name);
+ vmlinux_path__fprintf(stderr);
+ return;
+ }
+
pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
@@ -508,25 +513,25 @@ static void annotate_sym(struct hist_entry *he)
free_source_line(he, len);
}
-static void perf_session__find_annotations(struct perf_session *self)
+static void hists__find_annotations(struct hists *self)
{
struct rb_node *nd;
- for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct sym_priv *priv;
- if (he->sym == NULL)
+ if (he->ms.sym == NULL)
continue;
- priv = symbol__priv(he->sym);
+ priv = symbol__priv(he->ms.sym);
if (priv->hist == NULL)
continue;
annotate_sym(he);
/*
* Since we have a hist_entry per IP for the same symbol, free
- * he->sym->hist to signal we already processed this symbol.
+ * he->ms.sym->hist to signal we already processed this symbol.
*/
free(priv->hist);
priv->hist = NULL;
@@ -545,7 +550,7 @@ static int __cmd_annotate(void)
int ret;
struct perf_session *session;
- session = perf_session__new(input_name, O_RDONLY, force);
+ session = perf_session__new(input_name, O_RDONLY, force, false);
if (session == NULL)
return -ENOMEM;
@@ -562,11 +567,11 @@ static int __cmd_annotate(void)
perf_session__fprintf(session, stdout);
if (verbose > 2)
- dsos__fprintf(stdout);
+ perf_session__fprintf_dsos(session, stdout);
- perf_session__collapse_resort(&session->hists);
- perf_session__output_resort(&session->hists, session->event_total[0]);
- perf_session__find_annotations(session);
+ hists__collapse_resort(&session->hists);
+ hists__output_resort(&session->hists);
+ hists__find_annotations(&session->hists);
out_delete:
perf_session__delete(session);
@@ -581,10 +586,12 @@ static const char * const annotate_usage[] = {
static const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
+ OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
+ "only consider symbols in these dsos"),
OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
"symbol to annotate"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 30a05f552c96..f8e3d1852029 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -27,7 +27,7 @@ static const struct option buildid_cache_options[] = {
"file list", "file(s) to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"),
+ OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_END()
};
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index d0675c02f81e..44a47e13bd67 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -16,7 +16,7 @@
#include "util/symbol.h"
static char const *input_name = "perf.data";
-static int force;
+static bool force;
static bool with_hits;
static const char * const buildid_list_usage[] = {
@@ -29,7 +29,7 @@ static const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose"),
OPT_END()
};
@@ -39,14 +39,14 @@ static int __cmd_buildid_list(void)
int err = -1;
struct perf_session *session;
- session = perf_session__new(input_name, O_RDONLY, force);
+ session = perf_session__new(input_name, O_RDONLY, force, false);
if (session == NULL)
return -1;
if (with_hits)
perf_session__process_events(session, &build_id__mark_dso_hit_ops);
- dsos__fprintf_buildid(stdout, with_hits);
+ perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
perf_session__delete(session);
return err;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1ea15d8aeed1..3a95a0260a5b 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -19,23 +19,15 @@
static char const *input_old = "perf.data.old",
*input_new = "perf.data";
static char diff__default_sort_order[] = "dso,symbol";
-static int force;
+static bool force;
static bool show_displacement;
-static int perf_session__add_hist_entry(struct perf_session *self,
- struct addr_location *al, u64 count)
+static int hists__add_entry(struct hists *self,
+ struct addr_location *al, u64 count)
{
- bool hit;
- struct hist_entry *he = __perf_session__add_hist_entry(&self->hists,
- al, NULL,
- count, &hit);
- if (he == NULL)
- return -ENOMEM;
-
- if (hit)
- he->count += count;
-
- return 0;
+ if (__hists__add_entry(self, al, NULL, count) != NULL)
+ return 0;
+ return -ENOMEM;
}
static int diff__process_sample_event(event_t *event, struct perf_session *session)
@@ -57,12 +49,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
event__parse_sample(event, session->sample_type, &data);
- if (perf_session__add_hist_entry(session, &al, data.period)) {
+ if (hists__add_entry(&session->hists, &al, data.period)) {
pr_warning("problem incrementing symbol count, skipping event\n");
return -1;
}
- session->events_stats.total += data.period;
+ session->hists.stats.total += data.period;
return 0;
}
@@ -95,35 +87,34 @@ static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
rb_insert_color(&he->rb_node, root);
}
-static void perf_session__resort_hist_entries(struct perf_session *self)
+static void hists__resort_entries(struct hists *self)
{
unsigned long position = 1;
struct rb_root tmp = RB_ROOT;
- struct rb_node *next = rb_first(&self->hists);
+ struct rb_node *next = rb_first(&self->entries);
while (next != NULL) {
struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
- rb_erase(&n->rb_node, &self->hists);
+ rb_erase(&n->rb_node, &self->entries);
n->position = position++;
perf_session__insert_hist_entry_by_name(&tmp, n);
}
- self->hists = tmp;
+ self->entries = tmp;
}
-static void perf_session__set_hist_entries_positions(struct perf_session *self)
+static void hists__set_positions(struct hists *self)
{
- perf_session__output_resort(&self->hists, self->events_stats.total);
- perf_session__resort_hist_entries(self);
+ hists__output_resort(self);
+ hists__resort_entries(self);
}
-static struct hist_entry *
-perf_session__find_hist_entry(struct perf_session *self,
- struct hist_entry *he)
+static struct hist_entry *hists__find_entry(struct hists *self,
+ struct hist_entry *he)
{
- struct rb_node *n = self->hists.rb_node;
+ struct rb_node *n = self->entries.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
@@ -140,14 +131,13 @@ perf_session__find_hist_entry(struct perf_session *self,
return NULL;
}
-static void perf_session__match_hists(struct perf_session *old_session,
- struct perf_session *new_session)
+static void hists__match(struct hists *older, struct hists *newer)
{
struct rb_node *nd;
- for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&newer->entries); nd; nd = rb_next(nd)) {
struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
- pos->pair = perf_session__find_hist_entry(old_session, pos);
+ pos->pair = hists__find_entry(older, pos);
}
}
@@ -156,8 +146,8 @@ static int __cmd_diff(void)
int ret, i;
struct perf_session *session[2];
- session[0] = perf_session__new(input_old, O_RDONLY, force);
- session[1] = perf_session__new(input_new, O_RDONLY, force);
+ session[0] = perf_session__new(input_old, O_RDONLY, force, false);
+ session[1] = perf_session__new(input_new, O_RDONLY, force, false);
if (session[0] == NULL || session[1] == NULL)
return -ENOMEM;
@@ -167,15 +157,13 @@ static int __cmd_diff(void)
goto out_delete;
}
- perf_session__output_resort(&session[1]->hists,
- session[1]->events_stats.total);
+ hists__output_resort(&session[1]->hists);
if (show_displacement)
- perf_session__set_hist_entries_positions(session[0]);
+ hists__set_positions(&session[0]->hists);
- perf_session__match_hists(session[0], session[1]);
- perf_session__fprintf_hists(&session[1]->hists, session[0],
- show_displacement, stdout,
- session[1]->events_stats.total);
+ hists__match(&session[0]->hists, &session[1]->hists);
+ hists__fprintf(&session[1]->hists, &session[0]->hists,
+ show_displacement, stdout);
out_delete:
for (i = 0; i < 2; ++i)
perf_session__delete(session[i]);
@@ -188,7 +176,7 @@ static const char * const diff_usage[] = {
};
static const struct option options[] = {
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('m', "displacement", &show_displacement,
"Show position displacement relative to baseline"),
@@ -225,6 +213,10 @@ int cmd_diff(int argc, const char **argv, const char *prefix __used)
input_new = argv[1];
} else
input_new = argv[0];
+ } else if (symbol_conf.default_guest_vmlinux_name ||
+ symbol_conf.default_guest_kallsyms) {
+ input_old = "perf.data.host";
+ input_new = "perf.data.guest";
}
symbol_conf.exclude_other = false;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 215b584007b1..81e3ecc40fc7 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -29,7 +29,7 @@ enum help_format {
HELP_FORMAT_WEB,
};
-static int show_all = 0;
+static bool show_all = false;
static enum help_format help_format = HELP_FORMAT_MAN;
static struct option builtin_help_options[] = {
OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
new file mode 100644
index 000000000000..8e3e47b064ce
--- /dev/null
+++ b/tools/perf/builtin-inject.c
@@ -0,0 +1,228 @@
+/*
+ * builtin-inject.c
+ *
+ * Builtin inject command: Examine the live mode (stdin) event stream
+ * and repipe it to stdout while optionally injecting additional
+ * events into it.
+ */
+#include "builtin.h"
+
+#include "perf.h"
+#include "util/session.h"
+#include "util/debug.h"
+
+#include "util/parse-options.h"
+
+static char const *input_name = "-";
+static bool inject_build_ids;
+
+static int event__repipe(event_t *event __used,
+ struct perf_session *session __used)
+{
+ uint32_t size;
+ void *buf = event;
+
+ size = event->header.size;
+
+ while (size) {
+ int ret = write(STDOUT_FILENO, buf, size);
+ if (ret < 0)
+ return -errno;
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return 0;
+}
+
+static int event__repipe_mmap(event_t *self, struct perf_session *session)
+{
+ int err;
+
+ err = event__process_mmap(self, session);
+ event__repipe(self, session);
+
+ return err;
+}
+
+static int event__repipe_task(event_t *self, struct perf_session *session)
+{
+ int err;
+
+ err = event__process_task(self, session);
+ event__repipe(self, session);
+
+ return err;
+}
+
+static int event__repipe_tracing_data(event_t *self,
+ struct perf_session *session)
+{
+ int err;
+
+ event__repipe(self, session);
+ err = event__process_tracing_data(self, session);
+
+ return err;
+}
+
+static int dso__read_build_id(struct dso *self)
+{
+ if (self->has_build_id)
+ return 0;
+
+ if (filename__read_build_id(self->long_name, self->build_id,
+ sizeof(self->build_id)) > 0) {
+ self->has_build_id = true;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int dso__inject_build_id(struct dso *self, struct perf_session *session)
+{
+ u16 misc = PERF_RECORD_MISC_USER;
+ struct machine *machine;
+ int err;
+
+ if (dso__read_build_id(self) < 0) {
+ pr_debug("no build_id found for %s\n", self->long_name);
+ return -1;
+ }
+
+ machine = perf_session__find_host_machine(session);
+ if (machine == NULL) {
+ pr_err("Can't find machine for session\n");
+ return -1;
+ }
+
+ if (self->kernel)
+ misc = PERF_RECORD_MISC_KERNEL;
+
+ err = event__synthesize_build_id(self, misc, event__repipe,
+ machine, session);
+ if (err) {
+ pr_err("Can't synthesize build_id event for %s\n", self->long_name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int event__inject_buildid(event_t *event, struct perf_session *session)
+{
+ struct addr_location al;
+ struct thread *thread;
+ u8 cpumode;
+
+ cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ thread = perf_session__findnew(session, event->ip.pid);
+ if (thread == NULL) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ goto repipe;
+ }
+
+ thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
+ event->ip.pid, event->ip.ip, &al);
+
+ if (al.map != NULL) {
+ if (!al.map->dso->hit) {
+ al.map->dso->hit = 1;
+ if (map__load(al.map, NULL) >= 0) {
+ dso__inject_build_id(al.map->dso, session);
+ /*
+ * If this fails, too bad, let the other side
+ * account this as unresolved.
+ */
+ } else
+ pr_warning("no symbols found in %s, maybe "
+ "install a debug package?\n",
+ al.map->dso->long_name);
+ }
+ }
+
+repipe:
+ event__repipe(event, session);
+ return 0;
+}
+
+struct perf_event_ops inject_ops = {
+ .sample = event__repipe,
+ .mmap = event__repipe,
+ .comm = event__repipe,
+ .fork = event__repipe,
+ .exit = event__repipe,
+ .lost = event__repipe,
+ .read = event__repipe,
+ .throttle = event__repipe,
+ .unthrottle = event__repipe,
+ .attr = event__repipe,
+ .event_type = event__repipe,
+ .tracing_data = event__repipe,
+ .build_id = event__repipe,
+};
+
+extern volatile int session_done;
+
+static void sig_handler(int sig __attribute__((__unused__)))
+{
+ session_done = 1;
+}
+
+static int __cmd_inject(void)
+{
+ struct perf_session *session;
+ int ret = -EINVAL;
+
+ signal(SIGINT, sig_handler);
+
+ if (inject_build_ids) {
+ inject_ops.sample = event__inject_buildid;
+ inject_ops.mmap = event__repipe_mmap;
+ inject_ops.fork = event__repipe_task;
+ inject_ops.tracing_data = event__repipe_tracing_data;
+ }
+
+ session = perf_session__new(input_name, O_RDONLY, false, true);
+ if (session == NULL)
+ return -ENOMEM;
+
+ ret = perf_session__process_events(session, &inject_ops);
+
+ perf_session__delete(session);
+
+ return ret;
+}
+
+static const char * const report_usage[] = {
+ "perf inject [<options>]",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_BOOLEAN('b', "build-ids", &inject_build_ids,
+ "Inject build-ids into the output stream"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show build ids, etc)"),
+ OPT_END()
+};
+
+int cmd_inject(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, options, report_usage, 0);
+
+ /*
+ * Any (unrecognized) arguments left?
+ */
+ if (argc)
+ usage_with_options(report_usage, options);
+
+ if (symbol__init() < 0)
+ return -1;
+
+ return __cmd_inject();
+}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 924a9518931a..31f60a2535e0 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -335,8 +335,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
}
static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
- .comm = event__process_comm,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+ .ordered_samples = true,
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -351,6 +352,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
int n_lines, int is_caller)
{
struct rb_node *next;
+ struct machine *machine;
printf("%.102s\n", graph_dotted_line);
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
@@ -359,23 +361,29 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
next = rb_first(root);
+ machine = perf_session__find_host_machine(session);
+ if (!machine) {
+ pr_err("__print_result: couldn't find kernel information\n");
+ return;
+ }
while (next && n_lines--) {
struct alloc_stat *data = rb_entry(next, struct alloc_stat,
node);
struct symbol *sym = NULL;
+ struct map *map;
char buf[BUFSIZ];
u64 addr;
if (is_caller) {
addr = data->call_site;
if (!raw_ip)
- sym = map_groups__find_function(&session->kmaps, addr, NULL);
+ sym = machine__find_kernel_function(machine, addr, &map, NULL);
} else
addr = data->ptr;
if (sym != NULL)
snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
- addr - sym->start);
+ addr - map->unmap_ip(map, sym->start));
else
snprintf(buf, sizeof(buf), "%#Lx", addr);
printf(" %-34s |", buf);
@@ -484,10 +492,13 @@ static void sort_result(void)
static int __cmd_kmem(void)
{
int err = -EINVAL;
- struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false);
if (session == NULL)
return -ENOMEM;
+ if (perf_session__create_kernel_maps(session) < 0)
+ goto out_delete;
+
if (!perf_session__has_traces(session, "kmem record"))
goto out_delete;
@@ -718,7 +729,6 @@ static const char *record_args[] = {
"record",
"-a",
"-R",
- "-M",
"-f",
"-c", "1",
"-e", "kmem:kmalloc",
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
new file mode 100644
index 000000000000..a4c7cae45024
--- /dev/null
+++ b/tools/perf/builtin-kvm.c
@@ -0,0 +1,144 @@
+#include "builtin.h"
+#include "perf.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+#include "util/session.h"
+
+#include "util/parse-options.h"
+#include "util/trace-event.h"
+
+#include "util/debug.h"
+
+#include <sys/prctl.h>
+
+#include <semaphore.h>
+#include <pthread.h>
+#include <math.h>
+
+static char *file_name;
+static char name_buffer[256];
+
+int perf_host = 1;
+int perf_guest;
+
+static const char * const kvm_usage[] = {
+ "perf kvm [<options>] {top|record|report|diff|buildid-list}",
+ NULL
+};
+
+static const struct option kvm_options[] = {
+ OPT_STRING('i', "input", &file_name, "file",
+ "Input file name"),
+ OPT_STRING('o', "output", &file_name, "file",
+ "Output file name"),
+ OPT_BOOLEAN(0, "guest", &perf_guest,
+ "Collect guest os data"),
+ OPT_BOOLEAN(0, "host", &perf_host,
+ "Collect guest os data"),
+ OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
+ "guest mount directory under which every guest os"
+ " instance has a subdir"),
+ OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
+ "file", "file saving guest os vmlinux"),
+ OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
+ "file", "file saving guest os /proc/kallsyms"),
+ OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
+ "file", "file saving guest os /proc/modules"),
+ OPT_END()
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ int rec_argc, i = 0, j;
+ const char **rec_argv;
+
+ rec_argc = argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ rec_argv[i++] = strdup("record");
+ rec_argv[i++] = strdup("-o");
+ rec_argv[i++] = strdup(file_name);
+ for (j = 1; j < argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+static int __cmd_report(int argc, const char **argv)
+{
+ int rec_argc, i = 0, j;
+ const char **rec_argv;
+
+ rec_argc = argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ rec_argv[i++] = strdup("report");
+ rec_argv[i++] = strdup("-i");
+ rec_argv[i++] = strdup(file_name);
+ for (j = 1; j < argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_report(i, rec_argv, NULL);
+}
+
+static int __cmd_buildid_list(int argc, const char **argv)
+{
+ int rec_argc, i = 0, j;
+ const char **rec_argv;
+
+ rec_argc = argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ rec_argv[i++] = strdup("buildid-list");
+ rec_argv[i++] = strdup("-i");
+ rec_argv[i++] = strdup(file_name);
+ for (j = 1; j < argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_buildid_list(i, rec_argv, NULL);
+}
+
+int cmd_kvm(int argc, const char **argv, const char *prefix __used)
+{
+ perf_host = perf_guest = 0;
+
+ argc = parse_options(argc, argv, kvm_options, kvm_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(kvm_usage, kvm_options);
+
+ if (!perf_host)
+ perf_guest = 1;
+
+ if (!file_name) {
+ if (perf_host && !perf_guest)
+ sprintf(name_buffer, "perf.data.host");
+ else if (!perf_host && perf_guest)
+ sprintf(name_buffer, "perf.data.guest");
+ else
+ sprintf(name_buffer, "perf.data.kvm");
+ file_name = name_buffer;
+ }
+
+ if (!strncmp(argv[0], "rec", 3))
+ return __cmd_record(argc, argv);
+ else if (!strncmp(argv[0], "rep", 3))
+ return __cmd_report(argc, argv);
+ else if (!strncmp(argv[0], "diff", 4))
+ return cmd_diff(argc, argv, NULL);
+ else if (!strncmp(argv[0], "top", 3))
+ return cmd_top(argc, argv, NULL);
+ else if (!strncmp(argv[0], "buildid-list", 12))
+ return __cmd_buildid_list(argc, argv);
+ else
+ usage_with_options(kvm_usage, kvm_options);
+
+ return 0;
+}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index e12c844df1e2..e18dfdc2948a 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -23,6 +23,8 @@
#include <linux/list.h>
#include <linux/hash.h>
+static struct perf_session *session;
+
/* based on kernel/lockdep.c */
#define LOCKHASH_BITS 12
#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
@@ -32,9 +34,6 @@ static struct list_head lockhash_table[LOCKHASH_SIZE];
#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
-#define LOCK_STATE_UNLOCKED 0 /* initial state */
-#define LOCK_STATE_LOCKED 1
-
struct lock_stat {
struct list_head hash_entry;
struct rb_node rb; /* used for sorting */
@@ -47,20 +46,151 @@ struct lock_stat {
void *addr; /* address of lockdep_map, used as ID */
char *name; /* for strcpy(), we cannot use const */
- int state;
- u64 prev_event_time; /* timestamp of previous event */
-
- unsigned int nr_acquired;
unsigned int nr_acquire;
+ unsigned int nr_acquired;
unsigned int nr_contended;
unsigned int nr_release;
+ unsigned int nr_readlock;
+ unsigned int nr_trylock;
/* these times are in nano sec. */
u64 wait_time_total;
u64 wait_time_min;
u64 wait_time_max;
+
+ int discard; /* flag of blacklist */
};
+/*
+ * States of lock_seq_stat
+ *
+ * UNINITIALIZED is required for detecting first event of acquire.
+ * As the nature of lock events, there is no guarantee
+ * that the first event for the locks are acquire,
+ * it can be acquired, contended or release.
+ */
+#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
+#define SEQ_STATE_RELEASED 1
+#define SEQ_STATE_ACQUIRING 2
+#define SEQ_STATE_ACQUIRED 3
+#define SEQ_STATE_READ_ACQUIRED 4
+#define SEQ_STATE_CONTENDED 5
+
+/*
+ * MAX_LOCK_DEPTH
+ * Imported from include/linux/sched.h.
+ * Should this be synchronized?
+ */
+#define MAX_LOCK_DEPTH 48
+
+/*
+ * struct lock_seq_stat:
+ * Place to put on state of one lock sequence
+ * 1) acquire -> acquired -> release
+ * 2) acquire -> contended -> acquired -> release
+ * 3) acquire (with read or try) -> release
+ * 4) Are there other patterns?
+ */
+struct lock_seq_stat {
+ struct list_head list;
+ int state;
+ u64 prev_event_time;
+ void *addr;
+
+ int read_count;
+};
+
+struct thread_stat {
+ struct rb_node rb;
+
+ u32 tid;
+ struct list_head seq_list;
+};
+
+static struct rb_root thread_stats;
+
+static struct thread_stat *thread_stat_find(u32 tid)
+{
+ struct rb_node *node;
+ struct thread_stat *st;
+
+ node = thread_stats.rb_node;
+ while (node) {
+ st = container_of(node, struct thread_stat, rb);
+ if (st->tid == tid)
+ return st;
+ else if (tid < st->tid)
+ node = node->rb_left;
+ else
+ node = node->rb_right;
+ }
+
+ return NULL;
+}
+
+static void thread_stat_insert(struct thread_stat *new)
+{
+ struct rb_node **rb = &thread_stats.rb_node;
+ struct rb_node *parent = NULL;
+ struct thread_stat *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct thread_stat, rb);
+ parent = *rb;
+
+ if (new->tid < p->tid)
+ rb = &(*rb)->rb_left;
+ else if (new->tid > p->tid)
+ rb = &(*rb)->rb_right;
+ else
+ BUG_ON("inserting invalid thread_stat\n");
+ }
+
+ rb_link_node(&new->rb, parent, rb);
+ rb_insert_color(&new->rb, &thread_stats);
+}
+
+static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
+{
+ struct thread_stat *st;
+
+ st = thread_stat_find(tid);
+ if (st)
+ return st;
+
+ st = zalloc(sizeof(struct thread_stat));
+ if (!st)
+ die("memory allocation failed\n");
+
+ st->tid = tid;
+ INIT_LIST_HEAD(&st->seq_list);
+
+ thread_stat_insert(st);
+
+ return st;
+}
+
+static struct thread_stat *thread_stat_findnew_first(u32 tid);
+static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
+ thread_stat_findnew_first;
+
+static struct thread_stat *thread_stat_findnew_first(u32 tid)
+{
+ struct thread_stat *st;
+
+ st = zalloc(sizeof(struct thread_stat));
+ if (!st)
+ die("memory allocation failed\n");
+ st->tid = tid;
+ INIT_LIST_HEAD(&st->seq_list);
+
+ rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
+ rb_insert_color(&st->rb, &thread_stats);
+
+ thread_stat_findnew = thread_stat_findnew_after_first;
+ return st;
+}
+
/* build simple key function one is bigger than two */
#define SINGLE_KEY(member) \
static int lock_stat_key_ ## member(struct lock_stat *one, \
@@ -175,8 +305,6 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
goto alloc_failed;
strcpy(new->name, name);
- /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
- new->state = LOCK_STATE_UNLOCKED;
new->wait_time_min = ULLONG_MAX;
list_add(&new->hash_entry, entry);
@@ -188,8 +316,6 @@ alloc_failed:
static char const *input_name = "perf.data";
-static int profile_cpu = -1;
-
struct raw_event_sample {
u32 size;
char data[0];
@@ -198,6 +324,7 @@ struct raw_event_sample {
struct trace_acquire_event {
void *addr;
const char *name;
+ int flag;
};
struct trace_acquired_event {
@@ -241,120 +368,258 @@ struct trace_lock_handler {
struct thread *thread);
};
+static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
+{
+ struct lock_seq_stat *seq;
+
+ list_for_each_entry(seq, &ts->seq_list, list) {
+ if (seq->addr == addr)
+ return seq;
+ }
+
+ seq = zalloc(sizeof(struct lock_seq_stat));
+ if (!seq)
+ die("Not enough memory\n");
+ seq->state = SEQ_STATE_UNINITIALIZED;
+ seq->addr = addr;
+
+ list_add(&seq->list, &ts->seq_list);
+ return seq;
+}
+
+enum broken_state {
+ BROKEN_ACQUIRE,
+ BROKEN_ACQUIRED,
+ BROKEN_CONTENDED,
+ BROKEN_RELEASE,
+ BROKEN_MAX,
+};
+
+static int bad_hist[BROKEN_MAX];
+
+enum acquire_flags {
+ TRY_LOCK = 1,
+ READ_LOCK = 2,
+};
+
static void
report_lock_acquire_event(struct trace_acquire_event *acquire_event,
struct event *__event __used,
int cpu __used,
- u64 timestamp,
+ u64 timestamp __used,
struct thread *thread __used)
{
- struct lock_stat *st;
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+
+ ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
+ if (ls->discard)
+ return;
- st = lock_stat_findnew(acquire_event->addr, acquire_event->name);
+ ts = thread_stat_findnew(thread->pid);
+ seq = get_seq(ts, acquire_event->addr);
- switch (st->state) {
- case LOCK_STATE_UNLOCKED:
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ case SEQ_STATE_RELEASED:
+ if (!acquire_event->flag) {
+ seq->state = SEQ_STATE_ACQUIRING;
+ } else {
+ if (acquire_event->flag & TRY_LOCK)
+ ls->nr_trylock++;
+ if (acquire_event->flag & READ_LOCK)
+ ls->nr_readlock++;
+ seq->state = SEQ_STATE_READ_ACQUIRED;
+ seq->read_count = 1;
+ ls->nr_acquired++;
+ }
+ break;
+ case SEQ_STATE_READ_ACQUIRED:
+ if (acquire_event->flag & READ_LOCK) {
+ seq->read_count++;
+ ls->nr_acquired++;
+ goto end;
+ } else {
+ goto broken;
+ }
break;
- case LOCK_STATE_LOCKED:
+ case SEQ_STATE_ACQUIRED:
+ case SEQ_STATE_ACQUIRING:
+ case SEQ_STATE_CONTENDED:
+broken:
+ /* broken lock sequence, discard it */
+ ls->discard = 1;
+ bad_hist[BROKEN_ACQUIRE]++;
+ list_del(&seq->list);
+ free(seq);
+ goto end;
break;
default:
- BUG_ON(1);
+ BUG_ON("Unknown state of lock sequence found!\n");
break;
}
- st->prev_event_time = timestamp;
+ ls->nr_acquire++;
+ seq->prev_event_time = timestamp;
+end:
+ return;
}
static void
report_lock_acquired_event(struct trace_acquired_event *acquired_event,
struct event *__event __used,
int cpu __used,
- u64 timestamp,
+ u64 timestamp __used,
struct thread *thread __used)
{
- struct lock_stat *st;
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+ u64 contended_term;
+
+ ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
+ if (ls->discard)
+ return;
- st = lock_stat_findnew(acquired_event->addr, acquired_event->name);
+ ts = thread_stat_findnew(thread->pid);
+ seq = get_seq(ts, acquired_event->addr);
- switch (st->state) {
- case LOCK_STATE_UNLOCKED:
- st->state = LOCK_STATE_LOCKED;
- st->nr_acquired++;
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ /* orphan event, do nothing */
+ return;
+ case SEQ_STATE_ACQUIRING:
+ break;
+ case SEQ_STATE_CONTENDED:
+ contended_term = timestamp - seq->prev_event_time;
+ ls->wait_time_total += contended_term;
+ if (contended_term < ls->wait_time_min)
+ ls->wait_time_min = contended_term;
+ if (ls->wait_time_max < contended_term)
+ ls->wait_time_max = contended_term;
break;
- case LOCK_STATE_LOCKED:
+ case SEQ_STATE_RELEASED:
+ case SEQ_STATE_ACQUIRED:
+ case SEQ_STATE_READ_ACQUIRED:
+ /* broken lock sequence, discard it */
+ ls->discard = 1;
+ bad_hist[BROKEN_ACQUIRED]++;
+ list_del(&seq->list);
+ free(seq);
+ goto end;
break;
+
default:
- BUG_ON(1);
+ BUG_ON("Unknown state of lock sequence found!\n");
break;
}
- st->prev_event_time = timestamp;
+ seq->state = SEQ_STATE_ACQUIRED;
+ ls->nr_acquired++;
+ seq->prev_event_time = timestamp;
+end:
+ return;
}
static void
report_lock_contended_event(struct trace_contended_event *contended_event,
struct event *__event __used,
int cpu __used,
- u64 timestamp,
+ u64 timestamp __used,
struct thread *thread __used)
{
- struct lock_stat *st;
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
- st = lock_stat_findnew(contended_event->addr, contended_event->name);
+ ls = lock_stat_findnew(contended_event->addr, contended_event->name);
+ if (ls->discard)
+ return;
- switch (st->state) {
- case LOCK_STATE_UNLOCKED:
+ ts = thread_stat_findnew(thread->pid);
+ seq = get_seq(ts, contended_event->addr);
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ /* orphan event, do nothing */
+ return;
+ case SEQ_STATE_ACQUIRING:
break;
- case LOCK_STATE_LOCKED:
- st->nr_contended++;
+ case SEQ_STATE_RELEASED:
+ case SEQ_STATE_ACQUIRED:
+ case SEQ_STATE_READ_ACQUIRED:
+ case SEQ_STATE_CONTENDED:
+ /* broken lock sequence, discard it */
+ ls->discard = 1;
+ bad_hist[BROKEN_CONTENDED]++;
+ list_del(&seq->list);
+ free(seq);
+ goto end;
break;
default:
- BUG_ON(1);
+ BUG_ON("Unknown state of lock sequence found!\n");
break;
}
- st->prev_event_time = timestamp;
+ seq->state = SEQ_STATE_CONTENDED;
+ ls->nr_contended++;
+ seq->prev_event_time = timestamp;
+end:
+ return;
}
static void
report_lock_release_event(struct trace_release_event *release_event,
struct event *__event __used,
int cpu __used,
- u64 timestamp,
+ u64 timestamp __used,
struct thread *thread __used)
{
- struct lock_stat *st;
- u64 hold_time;
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
- st = lock_stat_findnew(release_event->addr, release_event->name);
+ ls = lock_stat_findnew(release_event->addr, release_event->name);
+ if (ls->discard)
+ return;
- switch (st->state) {
- case LOCK_STATE_UNLOCKED:
- break;
- case LOCK_STATE_LOCKED:
- st->state = LOCK_STATE_UNLOCKED;
- hold_time = timestamp - st->prev_event_time;
+ ts = thread_stat_findnew(thread->pid);
+ seq = get_seq(ts, release_event->addr);
- if (timestamp < st->prev_event_time) {
- /* terribly, this can happen... */
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ goto end;
+ break;
+ case SEQ_STATE_ACQUIRED:
+ break;
+ case SEQ_STATE_READ_ACQUIRED:
+ seq->read_count--;
+ BUG_ON(seq->read_count < 0);
+ if (!seq->read_count) {
+ ls->nr_release++;
goto end;
}
-
- if (st->wait_time_min > hold_time)
- st->wait_time_min = hold_time;
- if (st->wait_time_max < hold_time)
- st->wait_time_max = hold_time;
- st->wait_time_total += hold_time;
-
- st->nr_release++;
+ break;
+ case SEQ_STATE_ACQUIRING:
+ case SEQ_STATE_CONTENDED:
+ case SEQ_STATE_RELEASED:
+ /* broken lock sequence, discard it */
+ ls->discard = 1;
+ bad_hist[BROKEN_RELEASE]++;
+ goto free_seq;
break;
default:
- BUG_ON(1);
+ BUG_ON("Unknown state of lock sequence found!\n");
break;
}
+ ls->nr_release++;
+free_seq:
+ list_del(&seq->list);
+ free(seq);
end:
- st->prev_event_time = timestamp;
+ return;
}
/* lock oriented handlers */
@@ -381,6 +646,7 @@ process_lock_acquire_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&acquire_event.addr, &tmp, sizeof(void *));
acquire_event.name = (char *)raw_field_ptr(event, "name", data);
+ acquire_event.flag = (int)raw_field_value(event, "flag", data);
if (trace_handler->acquire_event)
trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
@@ -441,8 +707,7 @@ process_lock_release_event(void *data,
}
static void
-process_raw_event(void *data, int cpu,
- u64 timestamp, struct thread *thread)
+process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
{
struct event *event;
int type;
@@ -460,173 +725,19 @@ process_raw_event(void *data, int cpu,
process_lock_release_event(data, event, cpu, timestamp, thread);
}
-struct raw_event_queue {
- u64 timestamp;
- int cpu;
- void *data;
- struct thread *thread;
- struct list_head list;
-};
-
-static LIST_HEAD(raw_event_head);
-
-#define FLUSH_PERIOD (5 * NSEC_PER_SEC)
-
-static u64 flush_limit = ULLONG_MAX;
-static u64 last_flush = 0;
-struct raw_event_queue *last_inserted;
-
-static void flush_raw_event_queue(u64 limit)
-{
- struct raw_event_queue *tmp, *iter;
-
- list_for_each_entry_safe(iter, tmp, &raw_event_head, list) {
- if (iter->timestamp > limit)
- return;
-
- if (iter == last_inserted)
- last_inserted = NULL;
-
- process_raw_event(iter->data, iter->cpu, iter->timestamp,
- iter->thread);
-
- last_flush = iter->timestamp;
- list_del(&iter->list);
- free(iter->data);
- free(iter);
- }
-}
-
-static void __queue_raw_event_end(struct raw_event_queue *new)
-{
- struct raw_event_queue *iter;
-
- list_for_each_entry_reverse(iter, &raw_event_head, list) {
- if (iter->timestamp < new->timestamp) {
- list_add(&new->list, &iter->list);
- return;
- }
- }
-
- list_add(&new->list, &raw_event_head);
-}
-
-static void __queue_raw_event_before(struct raw_event_queue *new,
- struct raw_event_queue *iter)
+static void print_bad_events(int bad, int total)
{
- list_for_each_entry_continue_reverse(iter, &raw_event_head, list) {
- if (iter->timestamp < new->timestamp) {
- list_add(&new->list, &iter->list);
- return;
- }
- }
-
- list_add(&new->list, &raw_event_head);
-}
-
-static void __queue_raw_event_after(struct raw_event_queue *new,
- struct raw_event_queue *iter)
-{
- list_for_each_entry_continue(iter, &raw_event_head, list) {
- if (iter->timestamp > new->timestamp) {
- list_add_tail(&new->list, &iter->list);
- return;
- }
- }
- list_add_tail(&new->list, &raw_event_head);
-}
-
-/* The queue is ordered by time */
-static void __queue_raw_event(struct raw_event_queue *new)
-{
- if (!last_inserted) {
- __queue_raw_event_end(new);
- return;
- }
-
- /*
- * Most of the time the current event has a timestamp
- * very close to the last event inserted, unless we just switched
- * to another event buffer. Having a sorting based on a list and
- * on the last inserted event that is close to the current one is
- * probably more efficient than an rbtree based sorting.
- */
- if (last_inserted->timestamp >= new->timestamp)
- __queue_raw_event_before(new, last_inserted);
- else
- __queue_raw_event_after(new, last_inserted);
-}
-
-static void queue_raw_event(void *data, int raw_size, int cpu,
- u64 timestamp, struct thread *thread)
-{
- struct raw_event_queue *new;
-
- if (flush_limit == ULLONG_MAX)
- flush_limit = timestamp + FLUSH_PERIOD;
-
- if (timestamp < last_flush) {
- printf("Warning: Timestamp below last timeslice flush\n");
- return;
- }
-
- new = malloc(sizeof(*new));
- if (!new)
- die("Not enough memory\n");
-
- new->timestamp = timestamp;
- new->cpu = cpu;
- new->thread = thread;
-
- new->data = malloc(raw_size);
- if (!new->data)
- die("Not enough memory\n");
-
- memcpy(new->data, data, raw_size);
-
- __queue_raw_event(new);
- last_inserted = new;
-
- /*
- * We want to have a slice of events covering 2 * FLUSH_PERIOD
- * If FLUSH_PERIOD is big enough, it ensures every events that occured
- * in the first half of the timeslice have all been buffered and there
- * are none remaining (we need that because of the weakly ordered
- * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
- * timeslice, we flush the first half to be gentle with the memory
- * (the second half can still get new events in the middle, so wait
- * another period to flush it)
- */
- if (new->timestamp > flush_limit &&
- new->timestamp - flush_limit > FLUSH_PERIOD) {
- flush_limit += FLUSH_PERIOD;
- flush_raw_event_queue(flush_limit);
- }
-}
-
-static int process_sample_event(event_t *event, struct perf_session *session)
-{
- struct thread *thread;
- struct sample_data data;
-
- bzero(&data, sizeof(struct sample_data));
- event__parse_sample(event, session->sample_type, &data);
- thread = perf_session__findnew(session, data.pid);
-
- if (thread == NULL) {
- pr_debug("problem processing %d event, skipping it.\n",
- event->header.type);
- return -1;
- }
-
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
- if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
- return 0;
-
- queue_raw_event(data.raw_data, data.raw_size, data.cpu, data.time, thread);
-
- return 0;
+ /* Output for debug, this have to be removed */
+ int i;
+ const char *name[4] =
+ { "acquire", "acquired", "contended", "release" };
+
+ pr_info("\n=== output for debug===\n\n");
+ pr_info("bad: %d, total: %d\n", bad, total);
+ pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
+ pr_info("histogram of events caused bad sequence\n");
+ for (i = 0; i < BROKEN_MAX; i++)
+ pr_info(" %10s: %d\n", name[i], bad_hist[i]);
}
/* TODO: various way to print, coloring, nano or milli sec */
@@ -634,26 +745,30 @@ static void print_result(void)
{
struct lock_stat *st;
char cut_name[20];
+ int bad, total;
- printf("%18s ", "ID");
- printf("%20s ", "Name");
- printf("%10s ", "acquired");
- printf("%10s ", "contended");
+ pr_info("%20s ", "Name");
+ pr_info("%10s ", "acquired");
+ pr_info("%10s ", "contended");
- printf("%15s ", "total wait (ns)");
- printf("%15s ", "max wait (ns)");
- printf("%15s ", "min wait (ns)");
+ pr_info("%15s ", "total wait (ns)");
+ pr_info("%15s ", "max wait (ns)");
+ pr_info("%15s ", "min wait (ns)");
- printf("\n\n");
+ pr_info("\n\n");
+ bad = total = 0;
while ((st = pop_from_result())) {
+ total++;
+ if (st->discard) {
+ bad++;
+ continue;
+ }
bzero(cut_name, 20);
- printf("%p ", st->addr);
-
if (strlen(st->name) < 16) {
/* output raw name */
- printf("%20s ", st->name);
+ pr_info("%20s ", st->name);
} else {
strncpy(cut_name, st->name, 16);
cut_name[16] = '.';
@@ -661,18 +776,40 @@ static void print_result(void)
cut_name[18] = '.';
cut_name[19] = '\0';
/* cut off name for saving output style */
- printf("%20s ", cut_name);
+ pr_info("%20s ", cut_name);
}
- printf("%10u ", st->nr_acquired);
- printf("%10u ", st->nr_contended);
+ pr_info("%10u ", st->nr_acquired);
+ pr_info("%10u ", st->nr_contended);
- printf("%15llu ", st->wait_time_total);
- printf("%15llu ", st->wait_time_max);
- printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
+ pr_info("%15llu ", st->wait_time_total);
+ pr_info("%15llu ", st->wait_time_max);
+ pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ?
0 : st->wait_time_min);
- printf("\n");
+ pr_info("\n");
}
+
+ print_bad_events(bad, total);
+}
+
+static int info_threads;
+static int info_map;
+
+static void dump_threads(void)
+{
+ struct thread_stat *st;
+ struct rb_node *node;
+ struct thread *t;
+
+ pr_info("%10s: comm\n", "Thread ID");
+
+ node = rb_first(&thread_stats);
+ while (node) {
+ st = container_of(node, struct thread_stat, rb);
+ t = perf_session__findnew(session, st->tid);
+ pr_info("%10d: %s\n", st->tid, t->comm);
+ node = rb_next(node);
+ };
}
static void dump_map(void)
@@ -680,23 +817,53 @@ static void dump_map(void)
unsigned int i;
struct lock_stat *st;
+ pr_info("Address of instance: name of class\n");
for (i = 0; i < LOCKHASH_SIZE; i++) {
list_for_each_entry(st, &lockhash_table[i], hash_entry) {
- printf("%p: %s\n", st->addr, st->name);
+ pr_info(" %p: %s\n", st->addr, st->name);
}
}
}
+static void dump_info(void)
+{
+ if (info_threads)
+ dump_threads();
+ else if (info_map)
+ dump_map();
+ else
+ die("Unknown type of information\n");
+}
+
+static int process_sample_event(event_t *self, struct perf_session *s)
+{
+ struct sample_data data;
+ struct thread *thread;
+
+ bzero(&data, sizeof(data));
+ event__parse_sample(self, s->sample_type, &data);
+
+ thread = perf_session__findnew(s, data.tid);
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ self->header.type);
+ return -1;
+ }
+
+ process_raw_event(data.raw_data, data.cpu, data.time, thread);
+
+ return 0;
+}
+
static struct perf_event_ops eops = {
.sample = process_sample_event,
.comm = event__process_comm,
+ .ordered_samples = true,
};
-static struct perf_session *session;
-
static int read_events(void)
{
- session = perf_session__new(input_name, O_RDONLY, 0);
+ session = perf_session__new(input_name, O_RDONLY, 0, false);
if (!session)
die("Initializing perf session failed\n");
@@ -720,7 +887,6 @@ static void __cmd_report(void)
setup_pager();
select_key();
read_events();
- flush_raw_event_queue(ULLONG_MAX);
sort_result();
print_result();
}
@@ -737,6 +903,19 @@ static const struct option report_options[] = {
OPT_END()
};
+static const char * const info_usage[] = {
+ "perf lock info [<options>]",
+ NULL
+};
+
+static const struct option info_options[] = {
+ OPT_BOOLEAN('t', "threads", &info_threads,
+ "dump thread list in perf.data"),
+ OPT_BOOLEAN('m', "map", &info_map,
+ "map of lock instances (name:address table)"),
+ OPT_END()
+};
+
static const char * const lock_usage[] = {
"perf lock [<options>] {record|trace|report}",
NULL
@@ -744,14 +923,13 @@ static const char * const lock_usage[] = {
static const struct option lock_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
+ OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_END()
};
static const char *record_args[] = {
"record",
- "-a",
"-R",
"-f",
"-m", "1024",
@@ -808,12 +986,18 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used)
} else if (!strcmp(argv[0], "trace")) {
/* Aliased to 'perf trace' */
return cmd_trace(argc, argv, prefix);
- } else if (!strcmp(argv[0], "map")) {
+ } else if (!strcmp(argv[0], "info")) {
+ if (argc) {
+ argc = parse_options(argc, argv,
+ info_options, info_usage, 0);
+ if (argc)
+ usage_with_options(info_usage, info_options);
+ }
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
setup_pager();
read_events();
- dump_map();
+ dump_info();
} else {
usage_with_options(lock_usage, lock_options);
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 152d6c9b1fa4..61c6d70732c9 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -36,13 +36,10 @@
#include "builtin.h"
#include "util/util.h"
#include "util/strlist.h"
-#include "util/event.h"
+#include "util/symbol.h"
#include "util/debug.h"
#include "util/debugfs.h"
-#include "util/symbol.h"
-#include "util/thread.h"
#include "util/parse-options.h"
-#include "util/parse-events.h" /* For debugfs_path */
#include "util/probe-finder.h"
#include "util/probe-event.h"
@@ -50,103 +47,84 @@
/* Session management structure */
static struct {
- bool need_dwarf;
bool list_events;
bool force_add;
bool show_lines;
- int nr_probe;
- struct probe_point probes[MAX_PROBES];
+ int nevents;
+ struct perf_probe_event events[MAX_PROBES];
struct strlist *dellist;
- struct map_groups kmap_groups;
- struct map *kmaps[MAP__NR_TYPES];
struct line_range line_range;
-} session;
+ int max_probe_points;
+} params;
/* Parse an event definition. Note that any error must die. */
-static void parse_probe_event(const char *str)
+static int parse_probe_event(const char *str)
{
- struct probe_point *pp = &session.probes[session.nr_probe];
+ struct perf_probe_event *pev = &params.events[params.nevents];
+ int ret;
- pr_debug("probe-definition(%d): %s\n", session.nr_probe, str);
- if (++session.nr_probe == MAX_PROBES)
+ pr_debug("probe-definition(%d): %s\n", params.nevents, str);
+ if (++params.nevents == MAX_PROBES)
die("Too many probes (> %d) are specified.", MAX_PROBES);
- /* Parse perf-probe event into probe_point */
- parse_perf_probe_event(str, pp, &session.need_dwarf);
+ /* Parse a perf-probe command into event */
+ ret = parse_perf_probe_command(str, pev);
+ pr_debug("%d arguments\n", pev->nargs);
- pr_debug("%d arguments\n", pp->nr_args);
+ return ret;
}
-static void parse_probe_event_argv(int argc, const char **argv)
+static int parse_probe_event_argv(int argc, const char **argv)
{
- int i, len;
+ int i, len, ret;
char *buf;
/* Bind up rest arguments */
len = 0;
for (i = 0; i < argc; i++)
len += strlen(argv[i]) + 1;
- buf = zalloc(len + 1);
- if (!buf)
- die("Failed to allocate memory for binding arguments.");
+ buf = xzalloc(len + 1);
len = 0;
for (i = 0; i < argc; i++)
len += sprintf(&buf[len], "%s ", argv[i]);
- parse_probe_event(buf);
+ ret = parse_probe_event(buf);
free(buf);
+ return ret;
}
static int opt_add_probe_event(const struct option *opt __used,
const char *str, int unset __used)
{
if (str)
- parse_probe_event(str);
- return 0;
+ return parse_probe_event(str);
+ else
+ return 0;
}
static int opt_del_probe_event(const struct option *opt __used,
const char *str, int unset __used)
{
if (str) {
- if (!session.dellist)
- session.dellist = strlist__new(true, NULL);
- strlist__add(session.dellist, str);
+ if (!params.dellist)
+ params.dellist = strlist__new(true, NULL);
+ strlist__add(params.dellist, str);
}
return 0;
}
-/* Currently just checking function name from symbol map */
-static void evaluate_probe_point(struct probe_point *pp)
-{
- struct symbol *sym;
- sym = map__find_symbol_by_name(session.kmaps[MAP__FUNCTION],
- pp->function, NULL);
- if (!sym)
- die("Kernel symbol \'%s\' not found - probe not added.",
- pp->function);
-}
-
-#ifndef NO_DWARF_SUPPORT
-static int open_vmlinux(void)
-{
- if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) {
- pr_debug("Failed to load kernel map.\n");
- return -EINVAL;
- }
- pr_debug("Try to open %s\n",
- session.kmaps[MAP__FUNCTION]->dso->long_name);
- return open(session.kmaps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
-}
-
+#ifdef DWARF_SUPPORT
static int opt_show_lines(const struct option *opt __used,
const char *str, int unset __used)
{
+ int ret = 0;
+
if (str)
- parse_line_range_desc(str, &session.line_range);
- INIT_LIST_HEAD(&session.line_range.line_list);
- session.show_lines = true;
- return 0;
+ ret = parse_line_range_desc(str, &params.line_range);
+ INIT_LIST_HEAD(&params.line_range.line_list);
+ params.show_lines = true;
+
+ return ret;
}
#endif
@@ -155,29 +133,25 @@ static const char * const probe_usage[] = {
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list",
-#ifndef NO_DWARF_SUPPORT
+#ifdef DWARF_SUPPORT
"perf probe --line 'LINEDESC'",
#endif
NULL
};
static const struct option options[] = {
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show parsed arguments, etc)"),
-#ifndef NO_DWARF_SUPPORT
- OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
- "file", "vmlinux pathname"),
-#endif
- OPT_BOOLEAN('l', "list", &session.list_events,
+ OPT_BOOLEAN('l', "list", &params.list_events,
"list up current probe events"),
OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
opt_del_probe_event),
OPT_CALLBACK('a', "add", NULL,
-#ifdef NO_DWARF_SUPPORT
- "[EVENT=]FUNC[+OFF|%return] [ARG ...]",
-#else
+#ifdef DWARF_SUPPORT
"[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
- " [ARG ...]",
+ " [[NAME=]ARG ...]",
+#else
+ "[EVENT=]FUNC[+OFF|%return] [[NAME=]ARG ...]",
#endif
"probe point definition, where\n"
"\t\tGROUP:\tGroup name (optional)\n"
@@ -185,51 +159,35 @@ static const struct option options[] = {
"\t\tFUNC:\tFunction name\n"
"\t\tOFF:\tOffset from function entry (in byte)\n"
"\t\t%return:\tPut the probe at function return\n"
-#ifdef NO_DWARF_SUPPORT
- "\t\tARG:\tProbe argument (only \n"
-#else
+#ifdef DWARF_SUPPORT
"\t\tSRC:\tSource code path\n"
"\t\tRL:\tRelative line number from function entry.\n"
"\t\tAL:\tAbsolute line number in file.\n"
"\t\tPT:\tLazy expression of line code.\n"
"\t\tARG:\tProbe argument (local variable name or\n"
-#endif
"\t\t\tkprobe-tracer argument format.)\n",
+#else
+ "\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n",
+#endif
opt_add_probe_event),
- OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events"
+ OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events"
" with existing name"),
-#ifndef NO_DWARF_SUPPORT
+#ifdef DWARF_SUPPORT
OPT_CALLBACK('L', "line", NULL,
- "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]",
+ "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
"Show source code lines.", opt_show_lines),
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
#endif
+ OPT__DRY_RUN(&probe_event_dry_run),
+ OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
+ "Set how many probe points can be found for a probe."),
OPT_END()
};
-/* Initialize symbol maps for vmlinux */
-static void init_vmlinux(void)
-{
- symbol_conf.sort_by_name = true;
- if (symbol_conf.vmlinux_name == NULL)
- symbol_conf.try_vmlinux_path = true;
- else
- pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
- if (symbol__init() < 0)
- die("Failed to init symbol map.");
-
- map_groups__init(&session.kmap_groups);
- if (map_groups__create_kernel_maps(&session.kmap_groups,
- session.kmaps) < 0)
- die("Failed to create kernel maps.");
-}
-
int cmd_probe(int argc, const char **argv, const char *prefix __used)
{
- int i, ret;
-#ifndef NO_DWARF_SUPPORT
- int fd;
-#endif
- struct probe_point *pp;
+ int ret;
argc = parse_options(argc, argv, options, probe_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
@@ -238,123 +196,69 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
pr_warning(" Error: '-' is not supported.\n");
usage_with_options(probe_usage, options);
}
- parse_probe_event_argv(argc, argv);
+ ret = parse_probe_event_argv(argc, argv);
+ if (ret < 0) {
+ pr_err(" Error: Parse Error. (%d)\n", ret);
+ return ret;
+ }
}
- if ((!session.nr_probe && !session.dellist && !session.list_events &&
- !session.show_lines))
- usage_with_options(probe_usage, options);
+ if (params.max_probe_points == 0)
+ params.max_probe_points = MAX_PROBES;
- if (debugfs_valid_mountpoint(debugfs_path) < 0)
- die("Failed to find debugfs path.");
+ if ((!params.nevents && !params.dellist && !params.list_events &&
+ !params.show_lines))
+ usage_with_options(probe_usage, options);
- if (session.list_events) {
- if (session.nr_probe != 0 || session.dellist) {
- pr_warning(" Error: Don't use --list with"
- " --add/--del.\n");
+ if (params.list_events) {
+ if (params.nevents != 0 || params.dellist) {
+ pr_err(" Error: Don't use --list with --add/--del.\n");
usage_with_options(probe_usage, options);
}
- if (session.show_lines) {
- pr_warning(" Error: Don't use --list with --line.\n");
+ if (params.show_lines) {
+ pr_err(" Error: Don't use --list with --line.\n");
usage_with_options(probe_usage, options);
}
- show_perf_probe_events();
- return 0;
+ ret = show_perf_probe_events();
+ if (ret < 0)
+ pr_err(" Error: Failed to show event list. (%d)\n",
+ ret);
+ return ret;
}
-#ifndef NO_DWARF_SUPPORT
- if (session.show_lines) {
- if (session.nr_probe != 0 || session.dellist) {
+#ifdef DWARF_SUPPORT
+ if (params.show_lines) {
+ if (params.nevents != 0 || params.dellist) {
pr_warning(" Error: Don't use --line with"
" --add/--del.\n");
usage_with_options(probe_usage, options);
}
- init_vmlinux();
- fd = open_vmlinux();
- if (fd < 0)
- die("Could not open debuginfo file.");
- ret = find_line_range(fd, &session.line_range);
- if (ret <= 0)
- die("Source line is not found.\n");
- close(fd);
- show_line_range(&session.line_range);
- return 0;
- }
-#endif
- if (session.dellist) {
- del_trace_kprobe_events(session.dellist);
- strlist__delete(session.dellist);
- if (session.nr_probe == 0)
- return 0;
+ ret = show_line_range(&params.line_range);
+ if (ret < 0)
+ pr_err(" Error: Failed to show lines. (%d)\n", ret);
+ return ret;
}
+#endif
- /* Add probes */
- init_vmlinux();
-
- if (session.need_dwarf)
-#ifdef NO_DWARF_SUPPORT
- die("Debuginfo-analysis is not supported");
-#else /* !NO_DWARF_SUPPORT */
- pr_debug("Some probes require debuginfo.\n");
-
- fd = open_vmlinux();
- if (fd < 0) {
- if (session.need_dwarf)
- die("Could not open debuginfo file.");
-
- pr_debug("Could not open vmlinux/module file."
- " Try to use symbols.\n");
- goto end_dwarf;
- }
-
- /* Searching probe points */
- for (i = 0; i < session.nr_probe; i++) {
- pp = &session.probes[i];
- if (pp->found)
- continue;
-
- lseek(fd, SEEK_SET, 0);
- ret = find_probe_point(fd, pp);
- if (ret > 0)
- continue;
- if (ret == 0) { /* No error but failed to find probe point. */
- synthesize_perf_probe_point(pp);
- die("Probe point '%s' not found. - probe not added.",
- pp->probes[0]);
- }
- /* Error path */
- if (session.need_dwarf) {
- if (ret == -ENOENT)
- pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO=y.\n");
- die("Could not analyze debuginfo.");
+ if (params.dellist) {
+ ret = del_perf_probe_events(params.dellist);
+ strlist__delete(params.dellist);
+ if (ret < 0) {
+ pr_err(" Error: Failed to delete events. (%d)\n", ret);
+ return ret;
}
- pr_debug("An error occurred in debuginfo analysis."
- " Try to use symbols.\n");
- break;
}
- close(fd);
-
-end_dwarf:
-#endif /* !NO_DWARF_SUPPORT */
- /* Synthesize probes without dwarf */
- for (i = 0; i < session.nr_probe; i++) {
- pp = &session.probes[i];
- if (pp->found) /* This probe is already found. */
- continue;
-
- evaluate_probe_point(pp);
- ret = synthesize_trace_kprobe_event(pp);
- if (ret == -E2BIG)
- die("probe point definition becomes too long.");
- else if (ret < 0)
- die("Failed to synthesize a probe point.");
+ if (params.nevents) {
+ ret = add_perf_probe_events(params.events, params.nevents,
+ params.force_add,
+ params.max_probe_points);
+ if (ret < 0) {
+ pr_err(" Error: Failed to add events. (%d)\n", ret);
+ return ret;
+ }
}
-
- /* Settng up probe points */
- add_trace_kprobe_events(session.probes, session.nr_probe,
- session.force_add);
return 0;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3b8b6387c47c..6b77b285fe10 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -15,7 +15,6 @@
#include "util/util.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
-#include "util/string.h"
#include "util/header.h"
#include "util/event.h"
@@ -27,31 +26,41 @@
#include <unistd.h>
#include <sched.h>
-static int fd[MAX_NR_CPUS][MAX_COUNTERS];
+enum write_mode_t {
+ WRITE_FORCE,
+ WRITE_APPEND
+};
+
+static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
+static unsigned int user_interval = UINT_MAX;
static long default_interval = 0;
static int nr_cpus = 0;
static unsigned int page_size;
static unsigned int mmap_pages = 128;
+static unsigned int user_freq = UINT_MAX;
static int freq = 1000;
static int output;
+static int pipe_output = 0;
static const char *output_name = "perf.data";
static int group = 0;
static unsigned int realtime_prio = 0;
-static int raw_samples = 0;
-static int system_wide = 0;
+static bool raw_samples = false;
+static bool system_wide = false;
static int profile_cpu = -1;
static pid_t target_pid = -1;
+static pid_t target_tid = -1;
+static pid_t *all_tids = NULL;
+static int thread_num = 0;
static pid_t child_pid = -1;
-static int inherit = 1;
-static int force = 0;
-static int append_file = 0;
-static int call_graph = 0;
-static int inherit_stat = 0;
-static int no_samples = 0;
-static int sample_address = 0;
-static int multiplex = 0;
+static bool inherit = true;
+static enum write_mode_t write_mode = WRITE_FORCE;
+static bool call_graph = false;
+static bool inherit_stat = false;
+static bool no_samples = false;
+static bool sample_address = false;
+static bool multiplex = false;
static int multiplex_fd = -1;
static long samples = 0;
@@ -60,7 +69,7 @@ static struct timeval this_read;
static u64 bytes_written = 0;
-static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
+static struct pollfd *event_array;
static int nr_poll = 0;
static int nr_cpu = 0;
@@ -77,7 +86,7 @@ struct mmap_data {
unsigned int prev;
};
-static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
+static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
static unsigned long mmap_read_head(struct mmap_data *md)
{
@@ -101,6 +110,11 @@ static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
pc->data_tail = tail;
}
+static void advance_output(size_t size)
+{
+ bytes_written += size;
+}
+
static void write_output(void *buf, size_t size)
{
while (size) {
@@ -225,12 +239,13 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
return h_attr;
}
-static void create_counter(int counter, int cpu, pid_t pid)
+static void create_counter(int counter, int cpu)
{
char *filter = filters[counter];
struct perf_event_attr *attr = attrs + counter;
struct perf_header_attr *h_attr;
int track = !counter; /* only the first counter needs these */
+ int thread_index;
int ret;
struct {
u64 count;
@@ -248,10 +263,19 @@ static void create_counter(int counter, int cpu, pid_t pid)
if (nr_counters > 1)
attr->sample_type |= PERF_SAMPLE_ID;
- if (freq) {
- attr->sample_type |= PERF_SAMPLE_PERIOD;
- attr->freq = 1;
- attr->sample_freq = freq;
+ /*
+ * We default some events to a 1 default interval. But keep
+ * it a weak assumption overridable by the user.
+ */
+ if (!attr->sample_period || (user_freq != UINT_MAX &&
+ user_interval != UINT_MAX)) {
+ if (freq) {
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+ attr->freq = 1;
+ attr->sample_freq = freq;
+ } else {
+ attr->sample_period = default_interval;
+ }
}
if (no_samples)
@@ -275,118 +299,129 @@ static void create_counter(int counter, int cpu, pid_t pid)
attr->mmap = track;
attr->comm = track;
attr->inherit = inherit;
- attr->disabled = 1;
+ if (target_pid == -1 && !system_wide) {
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+ }
+ for (thread_index = 0; thread_index < thread_num; thread_index++) {
try_again:
- fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0);
-
- if (fd[nr_cpu][counter] < 0) {
- int err = errno;
-
- if (err == EPERM || err == EACCES)
- die("Permission error - are you root?\n");
- else if (err == ENODEV && profile_cpu != -1)
- die("No such device - did you specify an out-of-range profile CPU?\n");
+ fd[nr_cpu][counter][thread_index] = sys_perf_event_open(attr,
+ all_tids[thread_index], cpu, group_fd, 0);
+
+ if (fd[nr_cpu][counter][thread_index] < 0) {
+ int err = errno;
+
+ if (err == EPERM || err == EACCES)
+ die("Permission error - are you root?\n"
+ "\t Consider tweaking"
+ " /proc/sys/kernel/perf_event_paranoid.\n");
+ else if (err == ENODEV && profile_cpu != -1) {
+ die("No such device - did you specify"
+ " an out-of-range profile CPU?\n");
+ }
- /*
- * If it's cycles then fall back to hrtimer
- * based cpu-clock-tick sw counter, which
- * is always available even if no PMU support:
- */
- if (attr->type == PERF_TYPE_HARDWARE
- && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
-
- if (verbose)
- warning(" ... trying to fall back to cpu-clock-ticks\n");
- attr->type = PERF_TYPE_SOFTWARE;
- attr->config = PERF_COUNT_SW_CPU_CLOCK;
- goto try_again;
- }
- printf("\n");
- error("perfcounter syscall returned with %d (%s)\n",
- fd[nr_cpu][counter], strerror(err));
+ /*
+ * If it's cycles then fall back to hrtimer
+ * based cpu-clock-tick sw counter, which
+ * is always available even if no PMU support:
+ */
+ if (attr->type == PERF_TYPE_HARDWARE
+ && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+
+ if (verbose)
+ warning(" ... trying to fall back to cpu-clock-ticks\n");
+ attr->type = PERF_TYPE_SOFTWARE;
+ attr->config = PERF_COUNT_SW_CPU_CLOCK;
+ goto try_again;
+ }
+ printf("\n");
+ error("perfcounter syscall returned with %d (%s)\n",
+ fd[nr_cpu][counter][thread_index], strerror(err));
#if defined(__i386__) || defined(__x86_64__)
- if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
- die("No hardware sampling interrupt available. No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.\n");
+ if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
+ die("No hardware sampling interrupt available."
+ " No APIC? If so then you can boot the kernel"
+ " with the \"lapic\" boot parameter to"
+ " force-enable it.\n");
#endif
- die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
- exit(-1);
- }
+ die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ exit(-1);
+ }
- h_attr = get_header_attr(attr, counter);
- if (h_attr == NULL)
- die("nomem\n");
+ h_attr = get_header_attr(attr, counter);
+ if (h_attr == NULL)
+ die("nomem\n");
- if (!file_new) {
- if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
- fprintf(stderr, "incompatible append\n");
- exit(-1);
+ if (!file_new) {
+ if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
+ fprintf(stderr, "incompatible append\n");
+ exit(-1);
+ }
}
- }
- if (read(fd[nr_cpu][counter], &read_data, sizeof(read_data)) == -1) {
- perror("Unable to read perf file descriptor\n");
- exit(-1);
- }
+ if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) {
+ perror("Unable to read perf file descriptor\n");
+ exit(-1);
+ }
- if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
- pr_warning("Not enough memory to add id\n");
- exit(-1);
- }
+ if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
+ pr_warning("Not enough memory to add id\n");
+ exit(-1);
+ }
- assert(fd[nr_cpu][counter] >= 0);
- fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK);
+ assert(fd[nr_cpu][counter][thread_index] >= 0);
+ fcntl(fd[nr_cpu][counter][thread_index], F_SETFL, O_NONBLOCK);
- /*
- * First counter acts as the group leader:
- */
- if (group && group_fd == -1)
- group_fd = fd[nr_cpu][counter];
- if (multiplex && multiplex_fd == -1)
- multiplex_fd = fd[nr_cpu][counter];
+ /*
+ * First counter acts as the group leader:
+ */
+ if (group && group_fd == -1)
+ group_fd = fd[nr_cpu][counter][thread_index];
+ if (multiplex && multiplex_fd == -1)
+ multiplex_fd = fd[nr_cpu][counter][thread_index];
- if (multiplex && fd[nr_cpu][counter] != multiplex_fd) {
+ if (multiplex && fd[nr_cpu][counter][thread_index] != multiplex_fd) {
- ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
- assert(ret != -1);
- } else {
- event_array[nr_poll].fd = fd[nr_cpu][counter];
- event_array[nr_poll].events = POLLIN;
- nr_poll++;
-
- mmap_array[nr_cpu][counter].counter = counter;
- mmap_array[nr_cpu][counter].prev = 0;
- mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1;
- mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
- PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter], 0);
- if (mmap_array[nr_cpu][counter].base == MAP_FAILED) {
- error("failed to mmap with %d (%s)\n", errno, strerror(errno));
- exit(-1);
+ ret = ioctl(fd[nr_cpu][counter][thread_index], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
+ assert(ret != -1);
+ } else {
+ event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index];
+ event_array[nr_poll].events = POLLIN;
+ nr_poll++;
+
+ mmap_array[nr_cpu][counter][thread_index].counter = counter;
+ mmap_array[nr_cpu][counter][thread_index].prev = 0;
+ mmap_array[nr_cpu][counter][thread_index].mask = mmap_pages*page_size - 1;
+ mmap_array[nr_cpu][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
+ PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0);
+ if (mmap_array[nr_cpu][counter][thread_index].base == MAP_FAILED) {
+ error("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ exit(-1);
+ }
}
- }
- if (filter != NULL) {
- ret = ioctl(fd[nr_cpu][counter],
- PERF_EVENT_IOC_SET_FILTER, filter);
- if (ret) {
- error("failed to set filter with %d (%s)\n", errno,
- strerror(errno));
- exit(-1);
+ if (filter != NULL) {
+ ret = ioctl(fd[nr_cpu][counter][thread_index],
+ PERF_EVENT_IOC_SET_FILTER, filter);
+ if (ret) {
+ error("failed to set filter with %d (%s)\n", errno,
+ strerror(errno));
+ exit(-1);
+ }
}
}
-
- ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
}
-static void open_counters(int cpu, pid_t pid)
+static void open_counters(int cpu)
{
int counter;
group_fd = -1;
for (counter = 0; counter < nr_counters; counter++)
- create_counter(counter, cpu, pid);
+ create_counter(counter, cpu);
nr_cpu++;
}
@@ -406,10 +441,80 @@ static int process_buildids(void)
static void atexit_header(void)
{
- session->header.data_size += bytes_written;
+ if (!pipe_output) {
+ session->header.data_size += bytes_written;
+
+ process_buildids();
+ perf_header__write(&session->header, output, true);
+ }
+}
+
+static void event__synthesize_guest_os(struct machine *machine, void *data)
+{
+ int err;
+ char *guest_kallsyms;
+ char path[PATH_MAX];
+ struct perf_session *psession = data;
+
+ if (machine__is_host(machine))
+ return;
+
+ /*
+ *As for guest kernel when processing subcommand record&report,
+ *we arrange module mmap prior to guest kernel mmap and trigger
+ *a preload dso because default guest module symbols are loaded
+ *from guest kallsyms instead of /lib/modules/XXX/XXX. This
+ *method is used to avoid symbol missing when the first addr is
+ *in module instead of in guest kernel.
+ */
+ err = event__synthesize_modules(process_synthesized_event,
+ psession, machine);
+ if (err < 0)
+ pr_err("Couldn't record guest kernel [%d]'s reference"
+ " relocation symbol.\n", machine->pid);
+
+ if (machine__is_default_guest(machine))
+ guest_kallsyms = (char *) symbol_conf.default_guest_kallsyms;
+ else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ guest_kallsyms = path;
+ }
+
+ /*
+ * We use _stext for guest kernel because guest kernel's /proc/kallsyms
+ * have no _text sometimes.
+ */
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ psession, machine, "_text");
+ if (err < 0)
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ psession, machine, "_stext");
+ if (err < 0)
+ pr_err("Couldn't record guest kernel [%d]'s reference"
+ " relocation symbol.\n", machine->pid);
+}
+
+static struct perf_event_header finished_round_event = {
+ .size = sizeof(struct perf_event_header),
+ .type = PERF_RECORD_FINISHED_ROUND,
+};
+
+static void mmap_read_all(void)
+{
+ int i, counter, thread;
- process_buildids();
- perf_header__write(&session->header, output, true);
+ for (i = 0; i < nr_cpu; i++) {
+ for (counter = 0; counter < nr_counters; counter++) {
+ for (thread = 0; thread < thread_num; thread++) {
+ if (mmap_array[i][counter][thread].base)
+ mmap_read(&mmap_array[i][counter][thread]);
+ }
+
+ }
+ }
+
+ if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
+ write_output(&finished_round_event, sizeof(finished_round_event));
}
static int __cmd_record(int argc, const char **argv)
@@ -421,8 +526,9 @@ static int __cmd_record(int argc, const char **argv)
int err;
unsigned long waking = 0;
int child_ready_pipe[2], go_pipe[2];
- const bool forks = target_pid == -1 && argc > 0;
+ const bool forks = argc > 0;
char buf;
+ struct machine *machine;
page_size = sysconf(_SC_PAGE_SIZE);
@@ -435,70 +541,63 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- if (!stat(output_name, &st) && st.st_size) {
- if (!force) {
- if (!append_file) {
- pr_err("Error, output file %s exists, use -A "
- "to append or -f to overwrite.\n",
- output_name);
- exit(-1);
- }
- } else {
+ if (!strcmp(output_name, "-"))
+ pipe_output = 1;
+ else if (!stat(output_name, &st) && st.st_size) {
+ if (write_mode == WRITE_FORCE) {
char oldname[PATH_MAX];
snprintf(oldname, sizeof(oldname), "%s.old",
output_name);
unlink(oldname);
rename(output_name, oldname);
}
- } else {
- append_file = 0;
+ } else if (write_mode == WRITE_APPEND) {
+ write_mode = WRITE_FORCE;
}
flags = O_CREAT|O_RDWR;
- if (append_file)
+ if (write_mode == WRITE_APPEND)
file_new = 0;
else
flags |= O_TRUNC;
- output = open(output_name, flags, S_IRUSR|S_IWUSR);
+ if (pipe_output)
+ output = STDOUT_FILENO;
+ else
+ output = open(output_name, flags, S_IRUSR | S_IWUSR);
if (output < 0) {
perror("failed to create output file");
exit(-1);
}
- session = perf_session__new(output_name, O_WRONLY, force);
+ session = perf_session__new(output_name, O_WRONLY,
+ write_mode == WRITE_FORCE, false);
if (session == NULL) {
pr_err("Not enough memory for reading perf file header\n");
return -1;
}
if (!file_new) {
- err = perf_header__read(&session->header, output);
+ err = perf_header__read(session, output);
if (err < 0)
return err;
}
- if (raw_samples) {
+ if (have_tracepoints(attrs, nr_counters))
perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
- } else {
- for (i = 0; i < nr_counters; i++) {
- if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
- perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
- break;
- }
- }
- }
atexit(atexit_header);
if (forks) {
- pid = fork();
+ child_pid = fork();
if (pid < 0) {
perror("failed to fork");
exit(-1);
}
- if (!pid) {
+ if (!child_pid) {
+ if (pipe_output)
+ dup2(2, 1);
close(child_ready_pipe[0]);
close(go_pipe[1]);
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
@@ -527,10 +626,8 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- child_pid = pid;
-
- if (!system_wide)
- target_pid = pid;
+ if (!system_wide && target_tid == -1 && target_pid == -1)
+ all_tids[0] = child_pid;
close(child_ready_pipe[1]);
close(go_pipe[0]);
@@ -544,16 +641,19 @@ static int __cmd_record(int argc, const char **argv)
close(child_ready_pipe[0]);
}
-
if ((!system_wide && !inherit) || profile_cpu != -1) {
- open_counters(profile_cpu, target_pid);
+ open_counters(profile_cpu);
} else {
nr_cpus = read_cpu_map();
for (i = 0; i < nr_cpus; i++)
- open_counters(cpumap[i], target_pid);
+ open_counters(cpumap[i]);
}
- if (file_new) {
+ if (pipe_output) {
+ err = perf_header__write_pipe(output);
+ if (err < 0)
+ return err;
+ } else if (file_new) {
err = perf_header__write(&session->header, output, false);
if (err < 0)
return err;
@@ -561,21 +661,70 @@ static int __cmd_record(int argc, const char **argv)
post_processing_offset = lseek(output, 0, SEEK_CUR);
+ if (pipe_output) {
+ err = event__synthesize_attrs(&session->header,
+ process_synthesized_event,
+ session);
+ if (err < 0) {
+ pr_err("Couldn't synthesize attrs.\n");
+ return err;
+ }
+
+ err = event__synthesize_event_types(process_synthesized_event,
+ session);
+ if (err < 0) {
+ pr_err("Couldn't synthesize event_types.\n");
+ return err;
+ }
+
+ if (have_tracepoints(attrs, nr_counters)) {
+ /*
+ * FIXME err <= 0 here actually means that
+ * there were no tracepoints so its not really
+ * an error, just that we don't need to
+ * synthesize anything. We really have to
+ * return this more properly and also
+ * propagate errors that now are calling die()
+ */
+ err = event__synthesize_tracing_data(output, attrs,
+ nr_counters,
+ process_synthesized_event,
+ session);
+ if (err <= 0) {
+ pr_err("Couldn't record tracing data.\n");
+ return err;
+ }
+ advance_output(err);
+ }
+ }
+
+ machine = perf_session__find_host_machine(session);
+ if (!machine) {
+ pr_err("Couldn't find native kernel information.\n");
+ return -1;
+ }
+
err = event__synthesize_kernel_mmap(process_synthesized_event,
- session, "_text");
+ session, machine, "_text");
+ if (err < 0)
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ session, machine, "_stext");
if (err < 0) {
pr_err("Couldn't record kernel reference relocation symbol.\n");
return err;
}
- err = event__synthesize_modules(process_synthesized_event, session);
+ err = event__synthesize_modules(process_synthesized_event,
+ session, machine);
if (err < 0) {
pr_err("Couldn't record kernel reference relocation symbol.\n");
return err;
}
+ if (perf_guest)
+ perf_session__process_machines(session, event__synthesize_guest_os);
if (!system_wide && profile_cpu == -1)
- event__synthesize_thread(target_pid, process_synthesized_event,
+ event__synthesize_thread(target_tid, process_synthesized_event,
session);
else
event__synthesize_threads(process_synthesized_event, session);
@@ -598,13 +747,9 @@ static int __cmd_record(int argc, const char **argv)
for (;;) {
int hits = samples;
+ int thread;
- for (i = 0; i < nr_cpu; i++) {
- for (counter = 0; counter < nr_counters; counter++) {
- if (mmap_array[i][counter].base)
- mmap_read(&mmap_array[i][counter]);
- }
- }
+ mmap_read_all();
if (hits == samples) {
if (done)
@@ -615,8 +760,15 @@ static int __cmd_record(int argc, const char **argv)
if (done) {
for (i = 0; i < nr_cpu; i++) {
- for (counter = 0; counter < nr_counters; counter++)
- ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE);
+ for (counter = 0;
+ counter < nr_counters;
+ counter++) {
+ for (thread = 0;
+ thread < thread_num;
+ thread++)
+ ioctl(fd[i][counter][thread],
+ PERF_EVENT_IOC_DISABLE);
+ }
}
}
}
@@ -641,6 +793,8 @@ static const char * const record_usage[] = {
NULL
};
+static bool force, append_file;
+
static const struct option options[] = {
OPT_CALLBACK('e', "event", NULL, "event",
"event selector. use 'perf list' to list available events",
@@ -648,7 +802,9 @@ static const struct option options[] = {
OPT_CALLBACK(0, "filter", NULL, "filter",
"event filter", parse_filter),
OPT_INTEGER('p', "pid", &target_pid,
- "record events on existing pid"),
+ "record events on existing process id"),
+ OPT_INTEGER('t', "tid", &target_tid,
+ "record events on existing thread id"),
OPT_INTEGER('r', "realtime", &realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_BOOLEAN('R', "raw-samples", &raw_samples,
@@ -660,20 +816,20 @@ static const struct option options[] = {
OPT_INTEGER('C', "profile_cpu", &profile_cpu,
"CPU to profile on"),
OPT_BOOLEAN('f', "force", &force,
- "overwrite existing data file"),
- OPT_LONG('c', "count", &default_interval,
+ "overwrite existing data file (deprecated)"),
+ OPT_LONG('c', "count", &user_interval,
"event period to sample"),
OPT_STRING('o', "output", &output_name, "file",
"output file name"),
OPT_BOOLEAN('i', "inherit", &inherit,
"child tasks inherit counters"),
- OPT_INTEGER('F', "freq", &freq,
+ OPT_INTEGER('F', "freq", &user_freq,
"profile at this frequency"),
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
"number of mmap data pages"),
OPT_BOOLEAN('g', "call-graph", &call_graph,
"do call-graph (stack chain/backtrace) recording"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('s', "stat", &inherit_stat,
"per thread counts"),
@@ -688,13 +844,24 @@ static const struct option options[] = {
int cmd_record(int argc, const char **argv, const char *prefix __used)
{
- int counter;
+ int i,j;
argc = parse_options(argc, argv, options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc && target_pid == -1 && !system_wide && profile_cpu == -1)
+ if (!argc && target_pid == -1 && target_tid == -1 &&
+ !system_wide && profile_cpu == -1)
usage_with_options(record_usage, options);
+ if (force && append_file) {
+ fprintf(stderr, "Can't overwrite and append at the same time."
+ " You need to choose between -f and -A");
+ usage_with_options(record_usage, options);
+ } else if (append_file) {
+ write_mode = WRITE_APPEND;
+ } else {
+ write_mode = WRITE_FORCE;
+ }
+
symbol__init();
if (!nr_counters) {
@@ -703,6 +870,42 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
}
+ if (target_pid != -1) {
+ target_tid = target_pid;
+ thread_num = find_all_tid(target_pid, &all_tids);
+ if (thread_num <= 0) {
+ fprintf(stderr, "Can't find all threads of pid %d\n",
+ target_pid);
+ usage_with_options(record_usage, options);
+ }
+ } else {
+ all_tids=malloc(sizeof(pid_t));
+ if (!all_tids)
+ return -ENOMEM;
+
+ all_tids[0] = target_tid;
+ thread_num = 1;
+ }
+
+ for (i = 0; i < MAX_NR_CPUS; i++) {
+ for (j = 0; j < MAX_COUNTERS; j++) {
+ fd[i][j] = malloc(sizeof(int)*thread_num);
+ mmap_array[i][j] = zalloc(
+ sizeof(struct mmap_data)*thread_num);
+ if (!fd[i][j] || !mmap_array[i][j])
+ return -ENOMEM;
+ }
+ }
+ event_array = malloc(
+ sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
+ if (!event_array)
+ return -ENOMEM;
+
+ if (user_interval != UINT_MAX)
+ default_interval = user_interval;
+ if (user_freq != UINT_MAX)
+ freq = user_freq;
+
/*
* User specified count overrides default frequency.
*/
@@ -715,12 +918,5 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
exit(EXIT_FAILURE);
}
- for (counter = 0; counter < nr_counters; counter++) {
- if (attrs[counter].sample_period)
- continue;
-
- attrs[counter].sample_period = default_interval;
- }
-
return __cmd_record(argc, argv);
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index f815de25d0fc..d7c75291e788 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -14,7 +14,6 @@
#include "util/cache.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
-#include "util/string.h"
#include "util/callchain.h"
#include "util/strlist.h"
#include "util/values.h"
@@ -33,11 +32,11 @@
static char const *input_name = "perf.data";
-static int force;
+static bool force;
static bool hide_unresolved;
static bool dont_use_callchains;
-static int show_threads;
+static bool show_threads;
static struct perf_read_values show_threads_values;
static char default_pretty_printing_style[] = "normal";
@@ -45,16 +44,17 @@ static char *pretty_printing_style = default_pretty_printing_style;
static char callchain_default_opt[] = "fractal,0.5";
-static struct event_stat_id *get_stats(struct perf_session *self,
- u64 event_stream, u32 type, u64 config)
+static struct hists *perf_session__hists_findnew(struct perf_session *self,
+ u64 event_stream, u32 type,
+ u64 config)
{
- struct rb_node **p = &self->stats_by_id.rb_node;
+ struct rb_node **p = &self->hists_tree.rb_node;
struct rb_node *parent = NULL;
- struct event_stat_id *iter, *new;
+ struct hists *iter, *new;
while (*p != NULL) {
parent = *p;
- iter = rb_entry(parent, struct event_stat_id, rb_node);
+ iter = rb_entry(parent, struct hists, rb_node);
if (iter->config == config)
return iter;
@@ -65,15 +65,15 @@ static struct event_stat_id *get_stats(struct perf_session *self,
p = &(*p)->rb_left;
}
- new = malloc(sizeof(struct event_stat_id));
+ new = malloc(sizeof(struct hists));
if (new == NULL)
return NULL;
- memset(new, 0, sizeof(struct event_stat_id));
+ memset(new, 0, sizeof(struct hists));
new->event_stream = event_stream;
new->config = config;
new->type = type;
rb_link_node(&new->rb_node, parent, p);
- rb_insert_color(&new->rb_node, &self->stats_by_id);
+ rb_insert_color(&new->rb_node, &self->hists_tree);
return new;
}
@@ -81,70 +81,55 @@ static int perf_session__add_hist_entry(struct perf_session *self,
struct addr_location *al,
struct sample_data *data)
{
- struct symbol **syms = NULL, *parent = NULL;
- bool hit;
+ struct map_symbol *syms = NULL;
+ struct symbol *parent = NULL;
+ int err = -ENOMEM;
struct hist_entry *he;
- struct event_stat_id *stats;
+ struct hists *hists;
struct perf_event_attr *attr;
- if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain)
+ if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) {
syms = perf_session__resolve_callchain(self, al->thread,
data->callchain, &parent);
+ if (syms == NULL)
+ return -ENOMEM;
+ }
attr = perf_header__find_attr(data->id, &self->header);
if (attr)
- stats = get_stats(self, data->id, attr->type, attr->config);
+ hists = perf_session__hists_findnew(self, data->id, attr->type, attr->config);
else
- stats = get_stats(self, data->id, 0, 0);
- if (stats == NULL)
- return -ENOMEM;
- he = __perf_session__add_hist_entry(&stats->hists, al, parent,
- data->period, &hit);
+ hists = perf_session__hists_findnew(self, data->id, 0, 0);
+ if (hists == NULL)
+ goto out_free_syms;
+ he = __hists__add_entry(hists, al, parent, data->period);
if (he == NULL)
- return -ENOMEM;
-
- if (hit)
- he->count += data->period;
-
- if (symbol_conf.use_callchain) {
- if (!hit)
- callchain_init(&he->callchain);
- append_chain(&he->callchain, data->callchain, syms);
- free(syms);
- }
-
- return 0;
-}
-
-static int validate_chain(struct ip_callchain *chain, event_t *event)
-{
- unsigned int chain_size;
-
- chain_size = event->header.size;
- chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
-
- if (chain->nr*sizeof(u64) > chain_size)
- return -1;
-
- return 0;
+ goto out_free_syms;
+ err = 0;
+ if (symbol_conf.use_callchain)
+ err = append_chain(he->callchain, data->callchain, syms);
+out_free_syms:
+ free(syms);
+ return err;
}
static int add_event_total(struct perf_session *session,
struct sample_data *data,
struct perf_event_attr *attr)
{
- struct event_stat_id *stats;
+ struct hists *hists;
if (attr)
- stats = get_stats(session, data->id, attr->type, attr->config);
+ hists = perf_session__hists_findnew(session, data->id,
+ attr->type, attr->config);
else
- stats = get_stats(session, data->id, 0, 0);
+ hists = perf_session__hists_findnew(session, data->id, 0, 0);
- if (!stats)
+ if (!hists)
return -ENOMEM;
- stats->stats.total += data->period;
- session->events_stats.total += data->period;
+ hists->stats.total += data->period;
+ session->hists.stats.total += data->period;
return 0;
}
@@ -164,7 +149,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
dump_printf("... chain: nr:%Lu\n", data.callchain->nr);
- if (validate_chain(data.callchain, event) < 0) {
+ if (!ip_callchain__valid(data.callchain, event)) {
pr_debug("call-chain problem with event, "
"skipping it.\n");
return 0;
@@ -260,15 +245,29 @@ static struct perf_event_ops event_ops = {
.fork = event__process_task,
.lost = event__process_lost,
.read = process_read_event,
+ .attr = event__process_attr,
+ .event_type = event__process_event_type,
+ .tracing_data = event__process_tracing_data,
+ .build_id = event__process_build_id,
};
+extern volatile int session_done;
+
+static void sig_handler(int sig __attribute__((__unused__)))
+{
+ session_done = 1;
+}
+
static int __cmd_report(void)
{
int ret = -EINVAL;
struct perf_session *session;
struct rb_node *next;
+ const char *help = "For a higher level overview, try: perf report --sort comm,dso";
+
+ signal(SIGINT, sig_handler);
- session = perf_session__new(input_name, O_RDONLY, force);
+ session = perf_session__new(input_name, O_RDONLY, force, false);
if (session == NULL)
return -ENOMEM;
@@ -292,39 +291,47 @@ static int __cmd_report(void)
perf_session__fprintf(session, stdout);
if (verbose > 2)
- dsos__fprintf(stdout);
+ perf_session__fprintf_dsos(session, stdout);
- next = rb_first(&session->stats_by_id);
+ next = rb_first(&session->hists_tree);
while (next) {
- struct event_stat_id *stats;
-
- stats = rb_entry(next, struct event_stat_id, rb_node);
- perf_session__collapse_resort(&stats->hists);
- perf_session__output_resort(&stats->hists, stats->stats.total);
- if (rb_first(&session->stats_by_id) ==
- rb_last(&session->stats_by_id))
- fprintf(stdout, "# Samples: %Ld\n#\n",
- stats->stats.total);
- else
- fprintf(stdout, "# Samples: %Ld %s\n#\n",
- stats->stats.total,
- __event_name(stats->type, stats->config));
-
- perf_session__fprintf_hists(&stats->hists, NULL, false, stdout,
- stats->stats.total);
- fprintf(stdout, "\n\n");
- next = rb_next(&stats->rb_node);
+ struct hists *hists;
+
+ hists = rb_entry(next, struct hists, rb_node);
+ hists__collapse_resort(hists);
+ hists__output_resort(hists);
+ if (use_browser)
+ perf_session__browse_hists(&hists->entries,
+ hists->nr_entries,
+ hists->stats.total, help,
+ input_name);
+ else {
+ if (rb_first(&session->hists.entries) ==
+ rb_last(&session->hists.entries))
+ fprintf(stdout, "# Samples: %Ld\n#\n",
+ hists->stats.total);
+ else
+ fprintf(stdout, "# Samples: %Ld %s\n#\n",
+ hists->stats.total,
+ __event_name(hists->type, hists->config));
+
+ hists__fprintf(hists, NULL, false, stdout);
+ fprintf(stdout, "\n\n");
+ }
+
+ next = rb_next(&hists->rb_node);
}
- if (sort_order == default_sort_order &&
- parent_pattern == default_parent_pattern)
- fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n");
+ if (!use_browser && sort_order == default_sort_order &&
+ parent_pattern == default_parent_pattern) {
+ fprintf(stdout, "#\n# (%s)\n#\n", help);
- if (show_threads) {
- bool raw_printing_style = !strcmp(pretty_printing_style, "raw");
- perf_read_values_display(stdout, &show_threads_values,
- raw_printing_style);
- perf_read_values_destroy(&show_threads_values);
+ if (show_threads) {
+ bool style = !strcmp(pretty_printing_style, "raw");
+ perf_read_values_display(stdout, &show_threads_values,
+ style);
+ perf_read_values_destroy(&show_threads_values);
+ }
}
out_delete:
perf_session__delete(session);
@@ -335,7 +342,7 @@ static int
parse_callchain_opt(const struct option *opt __used, const char *arg,
int unset)
{
- char *tok;
+ char *tok, *tok2;
char *endptr;
/*
@@ -380,10 +387,13 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
if (!tok)
goto setup;
+ tok2 = strtok(NULL, ",");
callchain_param.min_percent = strtod(tok, &endptr);
if (tok == endptr)
return -1;
+ if (tok2)
+ callchain_param.print_limit = strtod(tok2, &endptr);
setup:
if (register_callchain_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain params\n");
@@ -400,7 +410,7 @@ static const char * const report_usage[] = {
static const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
@@ -419,12 +429,14 @@ static const struct option options[] = {
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
"Don't shorten the pathnames taking into account the cwd"),
+ OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
+ "Show sample percentage for different cpu modes"),
OPT_STRING('p', "parent", &parent_pattern, "regex",
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
- "Display callchains using output_type and min percent threshold. "
+ "Display callchains using output_type (graph, flat, fractal, or none) and min percent threshold. "
"Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
@@ -447,7 +459,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
{
argc = parse_options(argc, argv, options, report_usage, 0);
- setup_pager();
+ if (strcmp(input_name, "-") != 0)
+ setup_browser();
if (symbol__init() < 0)
return -1;
@@ -455,7 +468,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
setup_sorting(report_usage, options);
if (parent_pattern != default_parent_pattern) {
- sort_dimension__add("parent");
+ if (sort_dimension__add("parent") < 0)
+ return -1;
sort_parent.elide = 1;
} else
symbol_conf.exclude_other = false;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 4f5a03e43444..aef6ed0e119c 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -68,10 +68,10 @@ enum sched_event_type {
struct sched_atom {
enum sched_event_type type;
+ int specific_wait;
u64 timestamp;
u64 duration;
unsigned long nr;
- int specific_wait;
sem_t *wait_sem;
struct task_desc *wakee;
};
@@ -1651,15 +1651,16 @@ static int process_lost_event(event_t *event __used,
}
static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
- .comm = event__process_comm,
- .lost = process_lost_event,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+ .lost = process_lost_event,
+ .ordered_samples = true,
};
static int read_events(void)
{
int err = -EINVAL;
- struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false);
if (session == NULL)
return -ENOMEM;
@@ -1790,7 +1791,7 @@ static const char * const sched_usage[] = {
static const struct option sched_options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
@@ -1805,7 +1806,7 @@ static const char * const latency_usage[] = {
static const struct option latency_options[] = {
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): runtime, switch, avg, max"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_INTEGER('C', "CPU", &profile_cpu,
"CPU to profile on"),
@@ -1822,7 +1823,7 @@ static const char * const replay_usage[] = {
static const struct option replay_options[] = {
OPT_INTEGER('r', "repeat", &replay_repeat,
"repeat the workload replay N times (-1: infinite)"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
@@ -1850,7 +1851,6 @@ static const char *record_args[] = {
"record",
"-a",
"-R",
- "-M",
"-f",
"-m", "1024",
"-c", "1",
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 95db31cff6fd..e619ac89dff5 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -46,6 +46,7 @@
#include "util/debug.h"
#include "util/header.h"
#include "util/cpumap.h"
+#include "util/thread.h"
#include <sys/prctl.h>
#include <math.h>
@@ -66,18 +67,21 @@ static struct perf_event_attr default_attrs[] = {
};
-static int system_wide = 0;
+static bool system_wide = false;
static unsigned int nr_cpus = 0;
static int run_idx = 0;
static int run_count = 1;
-static int inherit = 1;
-static int scale = 1;
+static bool inherit = true;
+static bool scale = true;
static pid_t target_pid = -1;
+static pid_t target_tid = -1;
+static pid_t *all_tids = NULL;
+static int thread_num = 0;
static pid_t child_pid = -1;
-static int null_run = 0;
+static bool null_run = false;
-static int fd[MAX_NR_CPUS][MAX_COUNTERS];
+static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
static int event_scaled[MAX_COUNTERS];
@@ -140,9 +144,11 @@ struct stats runtime_branches_stats;
#define ERR_PERF_OPEN \
"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
-static void create_perf_stat_counter(int counter, int pid)
+static int create_perf_stat_counter(int counter)
{
struct perf_event_attr *attr = attrs + counter;
+ int thread;
+ int ncreated = 0;
if (scale)
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -152,21 +158,33 @@ static void create_perf_stat_counter(int counter, int pid)
unsigned int cpu;
for (cpu = 0; cpu < nr_cpus; cpu++) {
- fd[cpu][counter] = sys_perf_event_open(attr, -1, cpumap[cpu], -1, 0);
- if (fd[cpu][counter] < 0 && verbose)
- fprintf(stderr, ERR_PERF_OPEN, counter,
- fd[cpu][counter], strerror(errno));
+ fd[cpu][counter][0] = sys_perf_event_open(attr,
+ -1, cpumap[cpu], -1, 0);
+ if (fd[cpu][counter][0] < 0)
+ pr_debug(ERR_PERF_OPEN, counter,
+ fd[cpu][counter][0], strerror(errno));
+ else
+ ++ncreated;
}
} else {
attr->inherit = inherit;
- attr->disabled = 1;
- attr->enable_on_exec = 1;
-
- fd[0][counter] = sys_perf_event_open(attr, pid, -1, -1, 0);
- if (fd[0][counter] < 0 && verbose)
- fprintf(stderr, ERR_PERF_OPEN, counter,
- fd[0][counter], strerror(errno));
+ if (target_pid == -1) {
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+ }
+ for (thread = 0; thread < thread_num; thread++) {
+ fd[0][counter][thread] = sys_perf_event_open(attr,
+ all_tids[thread], -1, -1, 0);
+ if (fd[0][counter][thread] < 0)
+ pr_debug(ERR_PERF_OPEN, counter,
+ fd[0][counter][thread],
+ strerror(errno));
+ else
+ ++ncreated;
+ }
}
+
+ return ncreated;
}
/*
@@ -190,25 +208,28 @@ static void read_counter(int counter)
unsigned int cpu;
size_t res, nv;
int scaled;
- int i;
+ int i, thread;
count[0] = count[1] = count[2] = 0;
nv = scale ? 3 : 1;
for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (fd[cpu][counter] < 0)
- continue;
-
- res = read(fd[cpu][counter], single_count, nv * sizeof(u64));
- assert(res == nv * sizeof(u64));
-
- close(fd[cpu][counter]);
- fd[cpu][counter] = -1;
-
- count[0] += single_count[0];
- if (scale) {
- count[1] += single_count[1];
- count[2] += single_count[2];
+ for (thread = 0; thread < thread_num; thread++) {
+ if (fd[cpu][counter][thread] < 0)
+ continue;
+
+ res = read(fd[cpu][counter][thread],
+ single_count, nv * sizeof(u64));
+ assert(res == nv * sizeof(u64));
+
+ close(fd[cpu][counter][thread]);
+ fd[cpu][counter][thread] = -1;
+
+ count[0] += single_count[0];
+ if (scale) {
+ count[1] += single_count[1];
+ count[2] += single_count[2];
+ }
}
}
@@ -250,10 +271,9 @@ static int run_perf_stat(int argc __used, const char **argv)
{
unsigned long long t0, t1;
int status = 0;
- int counter;
- int pid = target_pid;
+ int counter, ncreated = 0;
int child_ready_pipe[2], go_pipe[2];
- const bool forks = (target_pid == -1 && argc > 0);
+ const bool forks = (argc > 0);
char buf;
if (!system_wide)
@@ -265,10 +285,10 @@ static int run_perf_stat(int argc __used, const char **argv)
}
if (forks) {
- if ((pid = fork()) < 0)
+ if ((child_pid = fork()) < 0)
perror("failed to fork");
- if (!pid) {
+ if (!child_pid) {
close(child_ready_pipe[0]);
close(go_pipe[1]);
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
@@ -297,7 +317,8 @@ static int run_perf_stat(int argc __used, const char **argv)
exit(-1);
}
- child_pid = pid;
+ if (target_tid == -1 && target_pid == -1 && !system_wide)
+ all_tids[0] = child_pid;
/*
* Wait for the child to be ready to exec.
@@ -310,7 +331,16 @@ static int run_perf_stat(int argc __used, const char **argv)
}
for (counter = 0; counter < nr_counters; counter++)
- create_perf_stat_counter(counter, pid);
+ ncreated += create_perf_stat_counter(counter);
+
+ if (ncreated == 0) {
+ pr_err("No permission to collect %sstats.\n"
+ "Consider tweaking /proc/sys/kernel/perf_event_paranoid.\n",
+ system_wide ? "system-wide " : "");
+ if (child_pid != -1)
+ kill(child_pid, SIGTERM);
+ return -1;
+ }
/*
* Enable counters and exec the command:
@@ -321,7 +351,7 @@ static int run_perf_stat(int argc __used, const char **argv)
close(go_pipe[1]);
wait(&status);
} else {
- while(!done);
+ while(!done) sleep(1);
}
t1 = rdclock();
@@ -429,12 +459,14 @@ static void print_stat(int argc, const char **argv)
fprintf(stderr, "\n");
fprintf(stderr, " Performance counter stats for ");
- if(target_pid == -1) {
+ if(target_pid == -1 && target_tid == -1) {
fprintf(stderr, "\'%s", argv[0]);
for (i = 1; i < argc; i++)
fprintf(stderr, " %s", argv[i]);
- }else
- fprintf(stderr, "task pid \'%d", target_pid);
+ } else if (target_pid != -1)
+ fprintf(stderr, "process id \'%d", target_pid);
+ else
+ fprintf(stderr, "thread id \'%d", target_tid);
fprintf(stderr, "\'");
if (run_count > 1)
@@ -459,7 +491,7 @@ static volatile int signr = -1;
static void skip_signal(int signo)
{
- if(target_pid != -1)
+ if(child_pid == -1)
done = 1;
signr = signo;
@@ -489,12 +521,14 @@ static const struct option options[] = {
OPT_BOOLEAN('i', "inherit", &inherit,
"child tasks inherit counters"),
OPT_INTEGER('p', "pid", &target_pid,
- "stat events on existing pid"),
+ "stat events on existing process id"),
+ OPT_INTEGER('t', "tid", &target_tid,
+ "stat events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
OPT_BOOLEAN('c', "scale", &scale,
"scale/normalize counters"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100)"),
@@ -506,10 +540,11 @@ static const struct option options[] = {
int cmd_stat(int argc, const char **argv, const char *prefix __used)
{
int status;
+ int i,j;
argc = parse_options(argc, argv, options, stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc && target_pid == -1)
+ if (!argc && target_pid == -1 && target_tid == -1)
usage_with_options(stat_usage, options);
if (run_count <= 0)
usage_with_options(stat_usage, options);
@@ -525,6 +560,31 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
else
nr_cpus = 1;
+ if (target_pid != -1) {
+ target_tid = target_pid;
+ thread_num = find_all_tid(target_pid, &all_tids);
+ if (thread_num <= 0) {
+ fprintf(stderr, "Can't find all threads of pid %d\n",
+ target_pid);
+ usage_with_options(stat_usage, options);
+ }
+ } else {
+ all_tids=malloc(sizeof(pid_t));
+ if (!all_tids)
+ return -ENOMEM;
+
+ all_tids[0] = target_tid;
+ thread_num = 1;
+ }
+
+ for (i = 0; i < MAX_NR_CPUS; i++) {
+ for (j = 0; j < MAX_COUNTERS; j++) {
+ fd[i][j] = malloc(sizeof(int)*thread_num);
+ if (!fd[i][j])
+ return -ENOMEM;
+ }
+ }
+
/*
* We dont want to block the signals - that would cause
* child tasks to inherit that and Ctrl-C would not work.
@@ -543,7 +603,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
status = run_perf_stat(argc, argv);
}
- print_stat(argc, argv);
+ if (status != -1)
+ print_stat(argc, argv);
return status;
}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
new file mode 100644
index 000000000000..0339612e7385
--- /dev/null
+++ b/tools/perf/builtin-test.c
@@ -0,0 +1,281 @@
+/*
+ * builtin-test.c
+ *
+ * Builtin regression testing command: ever growing number of sanity tests
+ */
+#include "builtin.h"
+
+#include "util/cache.h"
+#include "util/debug.h"
+#include "util/parse-options.h"
+#include "util/session.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+
+static long page_size;
+
+static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
+{
+ bool *visited = symbol__priv(sym);
+ *visited = true;
+ return 0;
+}
+
+static int test__vmlinux_matches_kallsyms(void)
+{
+ int err = -1;
+ struct rb_node *nd;
+ struct symbol *sym;
+ struct map *kallsyms_map, *vmlinux_map;
+ struct machine kallsyms, vmlinux;
+ enum map_type type = MAP__FUNCTION;
+ struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
+
+ /*
+ * Step 1:
+ *
+ * Init the machines that will hold kernel, modules obtained from
+ * both vmlinux + .ko files and from /proc/kallsyms split by modules.
+ */
+ machine__init(&kallsyms, "", HOST_KERNEL_ID);
+ machine__init(&vmlinux, "", HOST_KERNEL_ID);
+
+ /*
+ * Step 2:
+ *
+ * Create the kernel maps for kallsyms and the DSO where we will then
+ * load /proc/kallsyms. Also create the modules maps from /proc/modules
+ * and find the .ko files that match them in /lib/modules/`uname -r`/.
+ */
+ if (machine__create_kernel_maps(&kallsyms) < 0) {
+ pr_debug("machine__create_kernel_maps ");
+ return -1;
+ }
+
+ /*
+ * Step 3:
+ *
+ * Load and split /proc/kallsyms into multiple maps, one per module.
+ */
+ if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
+ pr_debug("dso__load_kallsyms ");
+ goto out;
+ }
+
+ /*
+ * Step 4:
+ *
+ * kallsyms will be internally on demand sorted by name so that we can
+ * find the reference relocation * symbol, i.e. the symbol we will use
+ * to see if the running kernel was relocated by checking if it has the
+ * same value in the vmlinux file we load.
+ */
+ kallsyms_map = machine__kernel_map(&kallsyms, type);
+
+ sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
+ if (sym == NULL) {
+ pr_debug("dso__find_symbol_by_name ");
+ goto out;
+ }
+
+ ref_reloc_sym.addr = sym->start;
+
+ /*
+ * Step 5:
+ *
+ * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
+ */
+ if (machine__create_kernel_maps(&vmlinux) < 0) {
+ pr_debug("machine__create_kernel_maps ");
+ goto out;
+ }
+
+ vmlinux_map = machine__kernel_map(&vmlinux, type);
+ map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
+
+ /*
+ * Step 6:
+ *
+ * Locate a vmlinux file in the vmlinux path that has a buildid that
+ * matches the one of the running kernel.
+ *
+ * While doing that look if we find the ref reloc symbol, if we find it
+ * we'll have its ref_reloc_symbol.unrelocated_addr and then
+ * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
+ * to fixup the symbols.
+ */
+ if (machine__load_vmlinux_path(&vmlinux, type,
+ vmlinux_matches_kallsyms_filter) <= 0) {
+ pr_debug("machine__load_vmlinux_path ");
+ goto out;
+ }
+
+ err = 0;
+ /*
+ * Step 7:
+ *
+ * Now look at the symbols in the vmlinux DSO and check if we find all of them
+ * in the kallsyms dso. For the ones that are in both, check its names and
+ * end addresses too.
+ */
+ for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
+ struct symbol *pair;
+
+ sym = rb_entry(nd, struct symbol, rb_node);
+ pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
+
+ if (pair && pair->start == sym->start) {
+next_pair:
+ if (strcmp(sym->name, pair->name) == 0) {
+ /*
+ * kallsyms don't have the symbol end, so we
+ * set that by using the next symbol start - 1,
+ * in some cases we get this up to a page
+ * wrong, trace_kmalloc when I was developing
+ * this code was one such example, 2106 bytes
+ * off the real size. More than that and we
+ * _really_ have a problem.
+ */
+ s64 skew = sym->end - pair->end;
+ if (llabs(skew) < page_size)
+ continue;
+
+ pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n",
+ sym->start, sym->name, sym->end, pair->end);
+ } else {
+ struct rb_node *nnd = rb_prev(&pair->rb_node);
+
+ if (nnd) {
+ struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
+
+ if (next->start == sym->start) {
+ pair = next;
+ goto next_pair;
+ }
+ }
+ pr_debug("%#Lx: diff name v: %s k: %s\n",
+ sym->start, sym->name, pair->name);
+ }
+ } else
+ pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name);
+
+ err = -1;
+ }
+
+ if (!verbose)
+ goto out;
+
+ pr_info("Maps only in vmlinux:\n");
+
+ for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
+ /*
+ * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
+ * the kernel will have the path for the vmlinux file being used,
+ * so use the short name, less descriptive but the same ("[kernel]" in
+ * both cases.
+ */
+ pair = map_groups__find_by_name(&kallsyms.kmaps, type,
+ (pos->dso->kernel ?
+ pos->dso->short_name :
+ pos->dso->name));
+ if (pair)
+ pair->priv = 1;
+ else
+ map__fprintf(pos, stderr);
+ }
+
+ pr_info("Maps in vmlinux with a different name in kallsyms:\n");
+
+ for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
+
+ pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
+ if (pair == NULL || pair->priv)
+ continue;
+
+ if (pair->start == pos->start) {
+ pair->priv = 1;
+ pr_info(" %Lx-%Lx %Lx %s in kallsyms as",
+ pos->start, pos->end, pos->pgoff, pos->dso->name);
+ if (pos->pgoff != pair->pgoff || pos->end != pair->end)
+ pr_info(": \n*%Lx-%Lx %Lx",
+ pair->start, pair->end, pair->pgoff);
+ pr_info(" %s\n", pair->dso->name);
+ pair->priv = 1;
+ }
+ }
+
+ pr_info("Maps only in kallsyms:\n");
+
+ for (nd = rb_first(&kallsyms.kmaps.maps[type]);
+ nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+
+ if (!pos->priv)
+ map__fprintf(pos, stderr);
+ }
+out:
+ return err;
+}
+
+static struct test {
+ const char *desc;
+ int (*func)(void);
+} tests[] = {
+ {
+ .desc = "vmlinux symtab matches kallsyms",
+ .func = test__vmlinux_matches_kallsyms,
+ },
+ {
+ .func = NULL,
+ },
+};
+
+static int __cmd_test(void)
+{
+ int i = 0;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+
+ while (tests[i].func) {
+ int err;
+ pr_info("%2d: %s:", i + 1, tests[i].desc);
+ pr_debug("\n--- start ---\n");
+ err = tests[i].func();
+ pr_debug("---- end ----\n%s:", tests[i].desc);
+ pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
+ ++i;
+ }
+
+ return 0;
+}
+
+static const char * const test_usage[] = {
+ "perf test [<options>]",
+ NULL,
+};
+
+static const struct option test_options[] = {
+ OPT_BOOLEAN('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_END()
+};
+
+int cmd_test(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, test_options, test_usage, 0);
+ if (argc)
+ usage_with_options(test_usage, test_options);
+
+ symbol_conf.priv_size = sizeof(int);
+ symbol_conf.sort_by_name = true;
+ symbol_conf.try_vmlinux_path = true;
+
+ if (symbol__init() < 0)
+ return -1;
+
+ setup_pager();
+
+ return __cmd_test();
+}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 0d4d8ff7914b..5a52ed9fc10b 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -21,7 +21,6 @@
#include "util/cache.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
-#include "util/string.h"
#include "util/callchain.h"
#include "util/strlist.h"
@@ -43,7 +42,7 @@ static u64 turbo_frequency;
static u64 first_time, last_time;
-static int power_only;
+static bool power_only;
struct per_pid;
@@ -78,8 +77,6 @@ struct per_pid {
struct per_pidcomm *all;
struct per_pidcomm *current;
-
- int painted;
};
@@ -146,9 +143,6 @@ struct wake_event {
static struct power_event *power_events;
static struct wake_event *wake_events;
-struct sample_wrapper *all_samples;
-
-
struct process_filter;
struct process_filter {
char *name;
@@ -569,88 +563,6 @@ static void end_sample_processing(void)
}
}
-static u64 sample_time(event_t *event, const struct perf_session *session)
-{
- int cursor;
-
- cursor = 0;
- if (session->sample_type & PERF_SAMPLE_IP)
- cursor++;
- if (session->sample_type & PERF_SAMPLE_TID)
- cursor++;
- if (session->sample_type & PERF_SAMPLE_TIME)
- return event->sample.array[cursor];
- return 0;
-}
-
-
-/*
- * We first queue all events, sorted backwards by insertion.
- * The order will get flipped later.
- */
-static int queue_sample_event(event_t *event, struct perf_session *session)
-{
- struct sample_wrapper *copy, *prev;
- int size;
-
- size = event->sample.header.size + sizeof(struct sample_wrapper) + 8;
-
- copy = malloc(size);
- if (!copy)
- return 1;
-
- memset(copy, 0, size);
-
- copy->next = NULL;
- copy->timestamp = sample_time(event, session);
-
- memcpy(&copy->data, event, event->sample.header.size);
-
- /* insert in the right place in the list */
-
- if (!all_samples) {
- /* first sample ever */
- all_samples = copy;
- return 0;
- }
-
- if (all_samples->timestamp < copy->timestamp) {
- /* insert at the head of the list */
- copy->next = all_samples;
- all_samples = copy;
- return 0;
- }
-
- prev = all_samples;
- while (prev->next) {
- if (prev->next->timestamp < copy->timestamp) {
- copy->next = prev->next;
- prev->next = copy;
- return 0;
- }
- prev = prev->next;
- }
- /* insert at the end of the list */
- prev->next = copy;
-
- return 0;
-}
-
-static void sort_queued_samples(void)
-{
- struct sample_wrapper *cursor, *next;
-
- cursor = all_samples;
- all_samples = NULL;
-
- while (cursor) {
- next = cursor->next;
- cursor->next = all_samples;
- all_samples = cursor;
- cursor = next;
- }
-}
-
/*
* Sort the pid datastructure
*/
@@ -1014,31 +926,17 @@ static void write_svg_file(const char *filename)
svg_close();
}
-static void process_samples(struct perf_session *session)
-{
- struct sample_wrapper *cursor;
- event_t *event;
-
- sort_queued_samples();
-
- cursor = all_samples;
- while (cursor) {
- event = (void *)&cursor->data;
- cursor = cursor->next;
- process_sample_event(event, session);
- }
-}
-
static struct perf_event_ops event_ops = {
- .comm = process_comm_event,
- .fork = process_fork_event,
- .exit = process_exit_event,
- .sample = queue_sample_event,
+ .comm = process_comm_event,
+ .fork = process_fork_event,
+ .exit = process_exit_event,
+ .sample = process_sample_event,
+ .ordered_samples = true,
};
static int __cmd_timechart(void)
{
- struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false);
int ret = -EINVAL;
if (session == NULL)
@@ -1051,8 +949,6 @@ static int __cmd_timechart(void)
if (ret)
goto out_delete;
- process_samples(session);
-
end_sample_processing();
sort_pids();
@@ -1075,7 +971,6 @@ static const char *record_args[] = {
"record",
"-a",
"-R",
- "-M",
"-f",
"-c", "1",
"-e", "power:power_start",
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1f529321607e..ed9b5b6905fa 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -55,9 +55,9 @@
#include <linux/unistd.h>
#include <linux/types.h>
-static int fd[MAX_NR_CPUS][MAX_COUNTERS];
+static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
-static int system_wide = 0;
+static bool system_wide = false;
static int default_interval = 0;
@@ -65,18 +65,21 @@ static int count_filter = 5;
static int print_entries;
static int target_pid = -1;
-static int inherit = 0;
+static int target_tid = -1;
+static pid_t *all_tids = NULL;
+static int thread_num = 0;
+static bool inherit = false;
static int profile_cpu = -1;
static int nr_cpus = 0;
static unsigned int realtime_prio = 0;
-static int group = 0;
+static bool group = false;
static unsigned int page_size;
static unsigned int mmap_pages = 16;
static int freq = 1000; /* 1 KHz */
static int delay_secs = 2;
-static int zero = 0;
-static int dump_symtab = 0;
+static bool zero = false;
+static bool dump_symtab = false;
static bool hide_kernel_symbols = false;
static bool hide_user_symbols = false;
@@ -133,7 +136,7 @@ static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
return ((void *)self) + symbol_conf.priv_size;
}
-static void get_term_dimensions(struct winsize *ws)
+void get_term_dimensions(struct winsize *ws)
{
char *s = getenv("LINES");
@@ -169,7 +172,7 @@ static void sig_winch_handler(int sig __used)
update_print_entries(&winsize);
}
-static void parse_source(struct sym_entry *syme)
+static int parse_source(struct sym_entry *syme)
{
struct symbol *sym;
struct sym_entry_source *source;
@@ -180,12 +183,21 @@ static void parse_source(struct sym_entry *syme)
u64 len;
if (!syme)
- return;
+ return -1;
+
+ sym = sym_entry__symbol(syme);
+ map = syme->map;
+
+ /*
+ * We can't annotate with just /proc/kallsyms
+ */
+ if (map->dso->origin == DSO__ORIG_KERNEL)
+ return -1;
if (syme->src == NULL) {
syme->src = zalloc(sizeof(*source));
if (syme->src == NULL)
- return;
+ return -1;
pthread_mutex_init(&syme->src->lock, NULL);
}
@@ -195,9 +207,6 @@ static void parse_source(struct sym_entry *syme)
pthread_mutex_lock(&source->lock);
goto out_assign;
}
-
- sym = sym_entry__symbol(syme);
- map = syme->map;
path = map->dso->long_name;
len = sym->end - sym->start;
@@ -209,7 +218,7 @@ static void parse_source(struct sym_entry *syme)
file = popen(command, "r");
if (!file)
- return;
+ return -1;
pthread_mutex_lock(&source->lock);
source->lines_tail = &source->lines;
@@ -245,6 +254,7 @@ static void parse_source(struct sym_entry *syme)
out_assign:
sym_filter_entry = syme;
pthread_mutex_unlock(&source->lock);
+ return 0;
}
static void __zero_source_counters(struct sym_entry *syme)
@@ -410,7 +420,9 @@ static double sym_weight(const struct sym_entry *sym)
}
static long samples;
-static long userspace_samples;
+static long kernel_samples, us_samples;
+static long exact_samples;
+static long guest_us_samples, guest_kernel_samples;
static const char CONSOLE_CLEAR[] = "";
static void __list_insert_active_sym(struct sym_entry *syme)
@@ -450,7 +462,11 @@ static void print_sym_table(void)
int printed = 0, j;
int counter, snap = !display_weighted ? sym_counter : 0;
float samples_per_sec = samples/delay_secs;
- float ksamples_per_sec = (samples-userspace_samples)/delay_secs;
+ float ksamples_per_sec = kernel_samples/delay_secs;
+ float us_samples_per_sec = (us_samples)/delay_secs;
+ float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
+ float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
+ float esamples_percent = (100.0*exact_samples)/samples;
float sum_ksamples = 0.0;
struct sym_entry *syme, *n;
struct rb_root tmp = RB_ROOT;
@@ -458,7 +474,8 @@ static void print_sym_table(void)
int sym_width = 0, dso_width = 0, dso_short_width = 0;
const int win_width = winsize.ws_col - 1;
- samples = userspace_samples = 0;
+ samples = us_samples = kernel_samples = exact_samples = 0;
+ guest_kernel_samples = guest_us_samples = 0;
/* Sort the active symbols */
pthread_mutex_lock(&active_symbols_lock);
@@ -489,9 +506,30 @@ static void print_sym_table(void)
puts(CONSOLE_CLEAR);
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
- printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
- samples_per_sec,
- 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
+ if (!perf_guest) {
+ printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
+ " exact: %4.1f%% [",
+ samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ } else {
+ printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
+ " guest kernel:%4.1f%% guest us:%4.1f%%"
+ " exact: %4.1f%% [",
+ samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_kernel_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_us_samples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ }
if (nr_counters == 1 || !display_weighted) {
printf("%Ld", (u64)attrs[0].sample_period);
@@ -514,13 +552,15 @@ static void print_sym_table(void)
if (target_pid != -1)
printf(" (target_pid: %d", target_pid);
+ else if (target_tid != -1)
+ printf(" (target_tid: %d", target_tid);
else
printf(" (all");
if (profile_cpu != -1)
printf(", cpu: %d)\n", profile_cpu);
else {
- if (target_pid != -1)
+ if (target_tid != -1)
printf(")\n");
else
printf(", %d CPUs)\n", nr_cpus);
@@ -582,7 +622,6 @@ static void print_sym_table(void)
syme = rb_entry(nd, struct sym_entry, rb_node);
sym = sym_entry__symbol(syme);
-
if (++printed > print_entries || (int)syme->snap_count < count_filter)
continue;
@@ -746,7 +785,7 @@ static int key_mapped(int c)
return 0;
}
-static void handle_keypress(int c)
+static void handle_keypress(struct perf_session *session, int c)
{
if (!key_mapped(c)) {
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
@@ -815,7 +854,7 @@ static void handle_keypress(int c)
case 'Q':
printf("exiting.\n");
if (dump_symtab)
- dsos__fprintf(stderr);
+ perf_session__fprintf_dsos(session, stderr);
exit(0);
case 's':
prompt_symbol(&sym_filter_entry, "Enter details symbol");
@@ -839,7 +878,7 @@ static void handle_keypress(int c)
display_weighted = ~display_weighted;
break;
case 'z':
- zero = ~zero;
+ zero = !zero;
break;
default:
break;
@@ -851,6 +890,7 @@ static void *display_thread(void *arg __used)
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios tc, save;
int delay_msecs, c;
+ struct perf_session *session = (struct perf_session *) arg;
tcgetattr(0, &save);
tc = save;
@@ -871,7 +911,7 @@ repeat:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
- handle_keypress(c);
+ handle_keypress(session, c);
goto repeat;
return NULL;
@@ -942,24 +982,48 @@ static void event__process_sample(const event_t *self,
u64 ip = self->ip.ip;
struct sym_entry *syme;
struct addr_location al;
+ struct machine *machine;
u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
++samples;
switch (origin) {
case PERF_RECORD_MISC_USER:
- ++userspace_samples;
+ ++us_samples;
if (hide_user_symbols)
return;
+ machine = perf_session__find_host_machine(session);
break;
case PERF_RECORD_MISC_KERNEL:
+ ++kernel_samples;
if (hide_kernel_symbols)
return;
+ machine = perf_session__find_host_machine(session);
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ ++guest_kernel_samples;
+ machine = perf_session__find_machine(session, self->ip.pid);
break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ ++guest_us_samples;
+ /*
+ * TODO: we don't process guest user from host side
+ * except simple counting.
+ */
+ return;
default:
return;
}
+ if (!machine && perf_guest) {
+ pr_err("Can't find guest [%d]'s kernel information\n",
+ self->ip.pid);
+ return;
+ }
+
+ if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
+ exact_samples++;
+
if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
al.filtered)
return;
@@ -976,7 +1040,7 @@ static void event__process_sample(const event_t *self,
* --hide-kernel-symbols, even if the user specifies an
* invalid --vmlinux ;-)
*/
- if (al.map == session->vmlinux_maps[MAP__FUNCTION] &&
+ if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
pr_err("The %s file can't be used\n",
symbol_conf.vmlinux_name);
@@ -990,7 +1054,17 @@ static void event__process_sample(const event_t *self,
if (sym_filter_entry_sched) {
sym_filter_entry = sym_filter_entry_sched;
sym_filter_entry_sched = NULL;
- parse_source(sym_filter_entry);
+ if (parse_source(sym_filter_entry) < 0) {
+ struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+
+ pr_err("Can't annotate %s", sym->name);
+ if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
+ pr_err(": No vmlinux file was found in the path:\n");
+ vmlinux_path__fprintf(stderr);
+ } else
+ pr_err(".\n");
+ exit(1);
+ }
}
syme = symbol__priv(al.sym);
@@ -1106,16 +1180,21 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
md->prev = old;
}
-static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
-static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
+static struct pollfd *event_array;
+static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
static void perf_session__mmap_read(struct perf_session *self)
{
- int i, counter;
+ int i, counter, thread_index;
for (i = 0; i < nr_cpus; i++) {
for (counter = 0; counter < nr_counters; counter++)
- perf_session__mmap_read_counter(self, &mmap_array[i][counter]);
+ for (thread_index = 0;
+ thread_index < thread_num;
+ thread_index++) {
+ perf_session__mmap_read_counter(self,
+ &mmap_array[i][counter][thread_index]);
+ }
}
}
@@ -1126,9 +1205,10 @@ static void start_counter(int i, int counter)
{
struct perf_event_attr *attr;
int cpu;
+ int thread_index;
cpu = profile_cpu;
- if (target_pid == -1 && profile_cpu == -1)
+ if (target_tid == -1 && profile_cpu == -1)
cpu = cpumap[i];
attr = attrs + counter;
@@ -1144,55 +1224,58 @@ static void start_counter(int i, int counter)
attr->inherit = (cpu < 0) && inherit;
attr->mmap = 1;
+ for (thread_index = 0; thread_index < thread_num; thread_index++) {
try_again:
- fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
-
- if (fd[i][counter] < 0) {
- int err = errno;
+ fd[i][counter][thread_index] = sys_perf_event_open(attr,
+ all_tids[thread_index], cpu, group_fd, 0);
+
+ if (fd[i][counter][thread_index] < 0) {
+ int err = errno;
+
+ if (err == EPERM || err == EACCES)
+ die("No permission - are you root?\n");
+ /*
+ * If it's cycles then fall back to hrtimer
+ * based cpu-clock-tick sw counter, which
+ * is always available even if no PMU support:
+ */
+ if (attr->type == PERF_TYPE_HARDWARE
+ && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+
+ if (verbose)
+ warning(" ... trying to fall back to cpu-clock-ticks\n");
+
+ attr->type = PERF_TYPE_SOFTWARE;
+ attr->config = PERF_COUNT_SW_CPU_CLOCK;
+ goto try_again;
+ }
+ printf("\n");
+ error("perfcounter syscall returned with %d (%s)\n",
+ fd[i][counter][thread_index], strerror(err));
+ die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ exit(-1);
+ }
+ assert(fd[i][counter][thread_index] >= 0);
+ fcntl(fd[i][counter][thread_index], F_SETFL, O_NONBLOCK);
- if (err == EPERM || err == EACCES)
- die("No permission - are you root?\n");
/*
- * If it's cycles then fall back to hrtimer
- * based cpu-clock-tick sw counter, which
- * is always available even if no PMU support:
+ * First counter acts as the group leader:
*/
- if (attr->type == PERF_TYPE_HARDWARE
- && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
-
- if (verbose)
- warning(" ... trying to fall back to cpu-clock-ticks\n");
-
- attr->type = PERF_TYPE_SOFTWARE;
- attr->config = PERF_COUNT_SW_CPU_CLOCK;
- goto try_again;
- }
- printf("\n");
- error("perfcounter syscall returned with %d (%s)\n",
- fd[i][counter], strerror(err));
- die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
- exit(-1);
+ if (group && group_fd == -1)
+ group_fd = fd[i][counter][thread_index];
+
+ event_array[nr_poll].fd = fd[i][counter][thread_index];
+ event_array[nr_poll].events = POLLIN;
+ nr_poll++;
+
+ mmap_array[i][counter][thread_index].counter = counter;
+ mmap_array[i][counter][thread_index].prev = 0;
+ mmap_array[i][counter][thread_index].mask = mmap_pages*page_size - 1;
+ mmap_array[i][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
+ PROT_READ, MAP_SHARED, fd[i][counter][thread_index], 0);
+ if (mmap_array[i][counter][thread_index].base == MAP_FAILED)
+ die("failed to mmap with %d (%s)\n", errno, strerror(errno));
}
- assert(fd[i][counter] >= 0);
- fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
-
- /*
- * First counter acts as the group leader:
- */
- if (group && group_fd == -1)
- group_fd = fd[i][counter];
-
- event_array[nr_poll].fd = fd[i][counter];
- event_array[nr_poll].events = POLLIN;
- nr_poll++;
-
- mmap_array[i][counter].counter = counter;
- mmap_array[i][counter].prev = 0;
- mmap_array[i][counter].mask = mmap_pages*page_size - 1;
- mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
- PROT_READ, MAP_SHARED, fd[i][counter], 0);
- if (mmap_array[i][counter].base == MAP_FAILED)
- die("failed to mmap with %d (%s)\n", errno, strerror(errno));
}
static int __cmd_top(void)
@@ -1204,12 +1287,12 @@ static int __cmd_top(void)
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
*/
- struct perf_session *session = perf_session__new(NULL, O_WRONLY, false);
+ struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false);
if (session == NULL)
return -ENOMEM;
- if (target_pid != -1)
- event__synthesize_thread(target_pid, event__process, session);
+ if (target_tid != -1)
+ event__synthesize_thread(target_tid, event__process, session);
else
event__synthesize_threads(event__process, session);
@@ -1220,11 +1303,11 @@ static int __cmd_top(void)
}
/* Wait for a minimal set of events before starting the snapshot */
- poll(event_array, nr_poll, 100);
+ poll(&event_array[0], nr_poll, 100);
perf_session__mmap_read(session);
- if (pthread_create(&thread, NULL, display_thread, NULL)) {
+ if (pthread_create(&thread, NULL, display_thread, session)) {
printf("Could not create display thread.\n");
exit(-1);
}
@@ -1263,7 +1346,9 @@ static const struct option options[] = {
OPT_INTEGER('c', "count", &default_interval,
"event period to sample"),
OPT_INTEGER('p', "pid", &target_pid,
- "profile events on existing pid"),
+ "profile events on existing process id"),
+ OPT_INTEGER('t', "tid", &target_tid,
+ "profile events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
OPT_INTEGER('C', "CPU", &profile_cpu,
@@ -1296,7 +1381,7 @@ static const struct option options[] = {
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
"hide user symbols"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_END()
};
@@ -1304,6 +1389,7 @@ static const struct option options[] = {
int cmd_top(int argc, const char **argv, const char *prefix __used)
{
int counter;
+ int i,j;
page_size = sysconf(_SC_PAGE_SIZE);
@@ -1311,8 +1397,39 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
if (argc)
usage_with_options(top_usage, options);
+ if (target_pid != -1) {
+ target_tid = target_pid;
+ thread_num = find_all_tid(target_pid, &all_tids);
+ if (thread_num <= 0) {
+ fprintf(stderr, "Can't find all threads of pid %d\n",
+ target_pid);
+ usage_with_options(top_usage, options);
+ }
+ } else {
+ all_tids=malloc(sizeof(pid_t));
+ if (!all_tids)
+ return -ENOMEM;
+
+ all_tids[0] = target_tid;
+ thread_num = 1;
+ }
+
+ for (i = 0; i < MAX_NR_CPUS; i++) {
+ for (j = 0; j < MAX_COUNTERS; j++) {
+ fd[i][j] = malloc(sizeof(int)*thread_num);
+ mmap_array[i][j] = zalloc(
+ sizeof(struct mmap_data)*thread_num);
+ if (!fd[i][j] || !mmap_array[i][j])
+ return -ENOMEM;
+ }
+ }
+ event_array = malloc(
+ sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
+ if (!event_array)
+ return -ENOMEM;
+
/* CPU and PID are mutually exclusive */
- if (target_pid != -1 && profile_cpu != -1) {
+ if (target_tid > 0 && profile_cpu != -1) {
printf("WARNING: PID switch overriding CPU\n");
sleep(1);
profile_cpu = -1;
@@ -1353,7 +1470,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
attrs[counter].sample_period = default_interval;
}
- if (target_pid != -1 || profile_cpu != -1)
+ if (target_tid != -1 || profile_cpu != -1)
nr_cpus = 1;
else
nr_cpus = read_cpu_map();
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 407041d20de0..95fcb0517a9d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -11,6 +11,8 @@
static char const *script_name;
static char const *generate_script_lang;
+static bool debug_ordering;
+static u64 last_timestamp;
static int default_start_script(const char *script __unused,
int argc __unused,
@@ -51,6 +53,8 @@ static void setup_scripting(void)
static int cleanup_scripting(void)
{
+ pr_debug("\nperf trace script stopped\n");
+
return scripting_ops->stop_script();
}
@@ -87,6 +91,14 @@ static int process_sample_event(event_t *event, struct perf_session *session)
}
if (session->sample_type & PERF_SAMPLE_RAW) {
+ if (debug_ordering) {
+ if (data.time < last_timestamp) {
+ pr_err("Samples misordered, previous: %llu "
+ "this: %llu\n", last_timestamp,
+ data.time);
+ }
+ last_timestamp = data.time;
+ }
/*
* FIXME: better resolve from pid from the struct trace_entry
* field, although it should be the same than this perf
@@ -97,17 +109,31 @@ static int process_sample_event(event_t *event, struct perf_session *session)
data.time, thread->comm);
}
- session->events_stats.total += data.period;
+ session->hists.stats.total += data.period;
return 0;
}
static struct perf_event_ops event_ops = {
.sample = process_sample_event,
.comm = event__process_comm,
+ .attr = event__process_attr,
+ .event_type = event__process_event_type,
+ .tracing_data = event__process_tracing_data,
+ .build_id = event__process_build_id,
+ .ordered_samples = true,
};
+extern volatile int session_done;
+
+static void sig_handler(int sig __unused)
+{
+ session_done = 1;
+}
+
static int __cmd_trace(struct perf_session *session)
{
+ signal(SIGINT, sig_handler);
+
return perf_session__process_events(session, &event_ops);
}
@@ -505,7 +531,7 @@ static const char * const trace_usage[] = {
static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('L', "Latency", &latency_format,
"show latency attributes (irqs/preemption disabled, etc)"),
@@ -518,6 +544,8 @@ static const struct option options[] = {
"generate perf-trace.xx script in specified language"),
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
+ OPT_BOOLEAN('d', "debug-ordering", &debug_ordering,
+ "check that samples time ordering is monotonic"),
OPT_END()
};
@@ -548,6 +576,65 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
suffix = REPORT_SUFFIX;
}
+ if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
+ char *record_script_path, *report_script_path;
+ int live_pipe[2];
+ pid_t pid;
+
+ record_script_path = get_script_path(argv[1], RECORD_SUFFIX);
+ if (!record_script_path) {
+ fprintf(stderr, "record script not found\n");
+ return -1;
+ }
+
+ report_script_path = get_script_path(argv[1], REPORT_SUFFIX);
+ if (!report_script_path) {
+ fprintf(stderr, "report script not found\n");
+ return -1;
+ }
+
+ if (pipe(live_pipe) < 0) {
+ perror("failed to create pipe");
+ exit(-1);
+ }
+
+ pid = fork();
+ if (pid < 0) {
+ perror("failed to fork");
+ exit(-1);
+ }
+
+ if (!pid) {
+ dup2(live_pipe[1], 1);
+ close(live_pipe[0]);
+
+ __argv = malloc(5 * sizeof(const char *));
+ __argv[0] = "/bin/sh";
+ __argv[1] = record_script_path;
+ __argv[2] = "-o";
+ __argv[3] = "-";
+ __argv[4] = NULL;
+
+ execvp("/bin/sh", (char **)__argv);
+ exit(-1);
+ }
+
+ dup2(live_pipe[0], 0);
+ close(live_pipe[1]);
+
+ __argv = malloc((argc + 3) * sizeof(const char *));
+ __argv[0] = "/bin/sh";
+ __argv[1] = report_script_path;
+ for (i = 2; i < argc; i++)
+ __argv[i] = argv[i];
+ __argv[i++] = "-i";
+ __argv[i++] = "-";
+ __argv[i++] = NULL;
+
+ execvp("/bin/sh", (char **)__argv);
+ exit(-1);
+ }
+
if (suffix) {
script_path = get_script_path(argv[2], suffix);
if (!script_path) {
@@ -576,11 +663,12 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
if (!script_name)
setup_pager();
- session = perf_session__new(input_name, O_RDONLY, 0);
+ session = perf_session__new(input_name, O_RDONLY, 0, false);
if (session == NULL)
return -ENOMEM;
- if (!perf_session__has_traces(session, "record -R"))
+ if (strcmp(input_name, "-") &&
+ !perf_session__has_traces(session, "record -R"))
return -EINVAL;
if (generate_script_lang) {
@@ -617,6 +705,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
err = scripting_ops->start_script(script_name, argc, argv);
if (err)
goto out;
+ pr_debug("perf trace started with script %s\n\n", script_name);
}
err = __cmd_trace(session);
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 10fe49e7048a..921245b28583 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -32,5 +32,8 @@ extern int cmd_version(int argc, const char **argv, const char *prefix);
extern int cmd_probe(int argc, const char **argv, const char *prefix);
extern int cmd_kmem(int argc, const char **argv, const char *prefix);
extern int cmd_lock(int argc, const char **argv, const char *prefix);
+extern int cmd_kvm(int argc, const char **argv, const char *prefix);
+extern int cmd_test(int argc, const char **argv, const char *prefix);
+extern int cmd_inject(int argc, const char **argv, const char *prefix);
#endif
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index db6ee94d4a8e..949d77fc0b97 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -8,6 +8,7 @@ perf-bench mainporcelain common
perf-buildid-cache mainporcelain common
perf-buildid-list mainporcelain common
perf-diff mainporcelain common
+perf-inject mainporcelain common
perf-list mainporcelain common
perf-sched mainporcelain common
perf-record mainporcelain common
@@ -19,3 +20,5 @@ perf-trace mainporcelain common
perf-probe mainporcelain common
perf-kmem mainporcelain common
perf-lock mainporcelain common
+perf-kvm mainporcelain common
+perf-test mainporcelain common
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
index 910468e6e01c..2e7a4f417e20 100644
--- a/tools/perf/perf-archive.sh
+++ b/tools/perf/perf-archive.sh
@@ -30,4 +30,7 @@ done
tar cfj $PERF_DATA.tar.bz2 -C $DEBUGDIR -T $MANIFEST
rm -f $MANIFEST $BUILDIDS
+echo -e "Now please run:\n"
+echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n"
+echo "wherever you need to run 'perf report' on."
exit 0
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index cd32c200cdb3..08e0e5d2b50e 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -13,9 +13,10 @@
#include "util/quote.h"
#include "util/run-command.h"
#include "util/parse-events.h"
-#include "util/string.h"
#include "util/debugfs.h"
+bool use_browser;
+
const char perf_usage_string[] =
"perf [--version] [--help] COMMAND [ARGS]";
@@ -262,6 +263,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
set_debugfs_path();
status = p->fn(argc, argv, prefix);
+ exit_browser(status);
+
if (status)
return status & 0xff;
@@ -304,6 +307,9 @@ static void handle_internal_command(int argc, const char **argv)
{ "probe", cmd_probe, 0 },
{ "kmem", cmd_kmem, 0 },
{ "lock", cmd_lock, 0 },
+ { "kvm", cmd_kvm, 0 },
+ { "test", cmd_test, 0 },
+ { "inject", cmd_inject, 0 },
};
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 6fb379bc1d1f..02821febb704 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -1,6 +1,10 @@
#ifndef _PERF_PERF_H
#define _PERF_PERF_H
+struct winsize;
+
+void get_term_dimensions(struct winsize *ws);
+
#if defined(__i386__)
#include "../../arch/x86/include/asm/unistd.h"
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
@@ -102,8 +106,6 @@ static inline unsigned long long rdclock(void)
#define __user
#define asmlinkage
-#define __used __attribute__((__unused__))
-
#define unlikely(x) __builtin_expect(!!(x), 0)
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
@@ -129,4 +131,6 @@ struct ip_callchain {
u64 ips[0];
};
+extern int perf_host, perf_guest;
+
#endif
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
index f869c48dc9b0..d94b40c8ac85 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
@@ -15,6 +15,7 @@ our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
our @EXPORT = qw(
avg nsecs nsecs_secs nsecs_nsecs nsecs_usecs print_nsecs
+clear_term
);
our $VERSION = '0.01';
@@ -55,6 +56,11 @@ sub nsecs_str {
return $str;
}
+sub clear_term
+{
+ print "\x1b[H\x1b[2J";
+}
+
1;
__END__
=head1 NAME
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record
index e6cb1474f8e8..423ad6aed056 100644
--- a/tools/perf/scripts/perl/bin/check-perf-trace-record
+++ b/tools/perf/scripts/perl/bin/check-perf-trace-record
@@ -1,2 +1,2 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry -e kmem:kfree
+perf record -a -e kmem:kmalloc -e irq:softirq_entry -e kmem:kfree
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
index f8885d389e6f..eb5846bcb565 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-record
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -1,2 +1,2 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
+perf record -a -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-report b/tools/perf/scripts/perl/bin/failed-syscalls-report
index 8bfc660e5056..e3a5e55d54ff 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-report
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-report
@@ -1,4 +1,10 @@
#!/bin/bash
# description: system-wide failed syscalls
# args: [comm]
-perf trace -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $1
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $comm
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record
index b25056ebf963..5bfaae5a6cba 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-record
+++ b/tools/perf/scripts/perl/bin/rw-by-file-record
@@ -1,2 +1,3 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_enter_write
+perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
+
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-report b/tools/perf/scripts/perl/bin/rw-by-file-report
index eddb9ccce6a5..d83070b7eeb5 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-report
+++ b/tools/perf/scripts/perl/bin/rw-by-file-report
@@ -1,7 +1,13 @@
#!/bin/bash
# description: r/w activity for a program, by file
# args: <comm>
-perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $1
+if [ $# -lt 1 ] ; then
+ echo "usage: rw-by-file <comm>"
+ exit
+fi
+comm=$1
+shift
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $comm
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record
index 8903979c5b6c..6e0b2f7755ac 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-record
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-record
@@ -1,2 +1,2 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write
+perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-report b/tools/perf/scripts/perl/bin/rw-by-pid-report
index 7f44c25cc857..7ef46983f62f 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-report
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-report
@@ -1,6 +1,6 @@
#!/bin/bash
# description: system-wide r/w activity
-perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record
new file mode 100644
index 000000000000..6e0b2f7755ac
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rwtop-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rwtop-report b/tools/perf/scripts/perl/bin/rwtop-report
new file mode 100644
index 000000000000..93e698cd3f38
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rwtop-report
@@ -0,0 +1,23 @@
+#!/bin/bash
+# description: system-wide r/w top
+# args: [interval]
+n_args=0
+for i in "$@"
+do
+ if expr match "$i" "-" > /dev/null ; then
+ break
+ fi
+ n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 1 ] ; then
+ echo "usage: rwtop-report [interval]"
+ exit
+fi
+if [ "$n_args" -gt 0 ] ; then
+ interval=$1
+ shift
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/rwtop.pl $interval
+
+
+
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record
index 6abedda911a4..9f2acaaae9f0 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-record
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-record
@@ -1,5 +1,5 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e sched:sched_switch -e sched:sched_wakeup
+perf record -a -e sched:sched_switch -e sched:sched_wakeup $@
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-report b/tools/perf/scripts/perl/bin/wakeup-latency-report
index fce3adcb3249..a0d898f9ca1d 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-report
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-report
@@ -1,6 +1,6 @@
#!/bin/bash
# description: system-wide min/max/avg wakeup latency
-perf trace -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
index fce6637b19ba..85301f2471ff 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-record
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-record
@@ -1,2 +1,2 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion
+perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report
index 71cfbd182fb9..35081132ef97 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-report
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-report
@@ -1,6 +1,6 @@
#!/bin/bash
# description: workqueue stats (ins/exe/create/destroy)
-perf trace -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl
diff --git a/tools/perf/scripts/perl/failed-syscalls.pl b/tools/perf/scripts/perl/failed-syscalls.pl
index c18e7e27a84b..94bc25a347eb 100644
--- a/tools/perf/scripts/perl/failed-syscalls.pl
+++ b/tools/perf/scripts/perl/failed-syscalls.pl
@@ -11,6 +11,8 @@ use Perf::Trace::Core;
use Perf::Trace::Context;
use Perf::Trace::Util;
+my $for_comm = shift;
+
my %failed_syscalls;
sub raw_syscalls::sys_exit
@@ -33,6 +35,8 @@ sub trace_end
foreach my $comm (sort {$failed_syscalls{$b} <=> $failed_syscalls{$a}}
keys %failed_syscalls) {
- printf("%-20s %10s\n", $comm, $failed_syscalls{$comm});
+ next if ($for_comm && $comm ne $for_comm);
+
+ printf("%-20s %10s\n", $comm, $failed_syscalls{$comm});
}
}
diff --git a/tools/perf/scripts/perl/rw-by-pid.pl b/tools/perf/scripts/perl/rw-by-pid.pl
index da601fae1a00..9db23c9daf55 100644
--- a/tools/perf/scripts/perl/rw-by-pid.pl
+++ b/tools/perf/scripts/perl/rw-by-pid.pl
@@ -79,12 +79,12 @@ sub trace_end
printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------",
"-----------", "----------", "----------");
- foreach my $pid (sort {$reads{$b}{bytes_read} <=>
- $reads{$a}{bytes_read}} keys %reads) {
- my $comm = $reads{$pid}{comm};
- my $total_reads = $reads{$pid}{total_reads};
- my $bytes_requested = $reads{$pid}{bytes_requested};
- my $bytes_read = $reads{$pid}{bytes_read};
+ foreach my $pid (sort { ($reads{$b}{bytes_read} || 0) <=>
+ ($reads{$a}{bytes_read} || 0) } keys %reads) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $total_reads = $reads{$pid}{total_reads} || 0;
+ my $bytes_requested = $reads{$pid}{bytes_requested} || 0;
+ my $bytes_read = $reads{$pid}{bytes_read} || 0;
printf("%6s %-20s %10s %10s %10s\n", $pid, $comm,
$total_reads, $bytes_requested, $bytes_read);
@@ -96,16 +96,23 @@ sub trace_end
printf("%6s %20s %6s %10s\n", "------", "--------------------",
"------", "----------");
- foreach my $pid (keys %reads) {
- my $comm = $reads{$pid}{comm};
- foreach my $err (sort {$reads{$b}{comm} cmp $reads{$a}{comm}}
- keys %{$reads{$pid}{errors}}) {
- my $errors = $reads{$pid}{errors}{$err};
+ my @errcounts = ();
- printf("%6d %-20s %6d %10s\n", $pid, $comm, $err, $errors);
+ foreach my $pid (keys %reads) {
+ foreach my $error (keys %{$reads{$pid}{errors}}) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $errcount = $reads{$pid}{errors}{$error} || 0;
+ push @errcounts, [$pid, $comm, $error, $errcount];
}
}
+ @errcounts = sort { $b->[3] <=> $a->[3] } @errcounts;
+
+ for my $i (0 .. $#errcounts) {
+ printf("%6d %-20s %6d %10s\n", $errcounts[$i][0],
+ $errcounts[$i][1], $errcounts[$i][2], $errcounts[$i][3]);
+ }
+
printf("\nwrite counts by pid:\n\n");
printf("%6s %20s %10s %10s\n", "pid", "comm",
@@ -113,11 +120,11 @@ sub trace_end
printf("%6s %-20s %10s %10s\n", "------", "--------------------",
"-----------", "----------");
- foreach my $pid (sort {$writes{$b}{bytes_written} <=>
- $writes{$a}{bytes_written}} keys %writes) {
- my $comm = $writes{$pid}{comm};
- my $total_writes = $writes{$pid}{total_writes};
- my $bytes_written = $writes{$pid}{bytes_written};
+ foreach my $pid (sort { ($writes{$b}{bytes_written} || 0) <=>
+ ($writes{$a}{bytes_written} || 0)} keys %writes) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $total_writes = $writes{$pid}{total_writes} || 0;
+ my $bytes_written = $writes{$pid}{bytes_written} || 0;
printf("%6s %-20s %10s %10s\n", $pid, $comm,
$total_writes, $bytes_written);
@@ -129,16 +136,23 @@ sub trace_end
printf("%6s %20s %6s %10s\n", "------", "--------------------",
"------", "----------");
- foreach my $pid (keys %writes) {
- my $comm = $writes{$pid}{comm};
- foreach my $err (sort {$writes{$b}{comm} cmp $writes{$a}{comm}}
- keys %{$writes{$pid}{errors}}) {
- my $errors = $writes{$pid}{errors}{$err};
+ @errcounts = ();
- printf("%6d %-20s %6d %10s\n", $pid, $comm, $err, $errors);
+ foreach my $pid (keys %writes) {
+ foreach my $error (keys %{$writes{$pid}{errors}}) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $errcount = $writes{$pid}{errors}{$error} || 0;
+ push @errcounts, [$pid, $comm, $error, $errcount];
}
}
+ @errcounts = sort { $b->[3] <=> $a->[3] } @errcounts;
+
+ for my $i (0 .. $#errcounts) {
+ printf("%6d %-20s %6d %10s\n", $errcounts[$i][0],
+ $errcounts[$i][1], $errcounts[$i][2], $errcounts[$i][3]);
+ }
+
print_unhandled();
}
diff --git a/tools/perf/scripts/perl/rwtop.pl b/tools/perf/scripts/perl/rwtop.pl
new file mode 100644
index 000000000000..4bb3ecd33472
--- /dev/null
+++ b/tools/perf/scripts/perl/rwtop.pl
@@ -0,0 +1,199 @@
+#!/usr/bin/perl -w
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+# read/write top
+#
+# Periodically displays system-wide r/w call activity, broken down by
+# pid. If an [interval] arg is specified, the display will be
+# refreshed every [interval] seconds. The default interval is 3
+# seconds.
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my $default_interval = 3;
+my $nlines = 20;
+my $print_thread;
+my $print_pending = 0;
+
+my %reads;
+my %writes;
+
+my $interval = shift;
+if (!$interval) {
+ $interval = $default_interval;
+}
+
+sub syscalls::sys_exit_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $ret) = @_;
+
+ print_check();
+
+ if ($ret > 0) {
+ $reads{$common_pid}{bytes_read} += $ret;
+ } else {
+ if (!defined ($reads{$common_pid}{bytes_read})) {
+ $reads{$common_pid}{bytes_read} = 0;
+ }
+ $reads{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $fd, $buf, $count) = @_;
+
+ print_check();
+
+ $reads{$common_pid}{bytes_requested} += $count;
+ $reads{$common_pid}{total_reads}++;
+ $reads{$common_pid}{comm} = $common_comm;
+}
+
+sub syscalls::sys_exit_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $ret) = @_;
+
+ print_check();
+
+ if ($ret <= 0) {
+ $writes{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $fd, $buf, $count) = @_;
+
+ print_check();
+
+ $writes{$common_pid}{bytes_written} += $count;
+ $writes{$common_pid}{total_writes}++;
+ $writes{$common_pid}{comm} = $common_comm;
+}
+
+sub trace_begin
+{
+ $SIG{ALRM} = \&set_print_pending;
+ alarm 1;
+}
+
+sub trace_end
+{
+ print_unhandled();
+ print_totals();
+}
+
+sub print_check()
+{
+ if ($print_pending == 1) {
+ $print_pending = 0;
+ print_totals();
+ }
+}
+
+sub set_print_pending()
+{
+ $print_pending = 1;
+ alarm $interval;
+}
+
+sub print_totals
+{
+ my $count;
+
+ $count = 0;
+
+ clear_term();
+
+ printf("\nread counts by pid:\n\n");
+
+ printf("%6s %20s %10s %10s %10s\n", "pid", "comm",
+ "# reads", "bytes_req", "bytes_read");
+ printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------",
+ "----------", "----------", "----------");
+
+ foreach my $pid (sort { ($reads{$b}{bytes_read} || 0) <=>
+ ($reads{$a}{bytes_read} || 0) } keys %reads) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $total_reads = $reads{$pid}{total_reads} || 0;
+ my $bytes_requested = $reads{$pid}{bytes_requested} || 0;
+ my $bytes_read = $reads{$pid}{bytes_read} || 0;
+
+ printf("%6s %-20s %10s %10s %10s\n", $pid, $comm,
+ $total_reads, $bytes_requested, $bytes_read);
+
+ if (++$count == $nlines) {
+ last;
+ }
+ }
+
+ $count = 0;
+
+ printf("\nwrite counts by pid:\n\n");
+
+ printf("%6s %20s %10s %13s\n", "pid", "comm",
+ "# writes", "bytes_written");
+ printf("%6s %-20s %10s %13s\n", "------", "--------------------",
+ "----------", "-------------");
+
+ foreach my $pid (sort { ($writes{$b}{bytes_written} || 0) <=>
+ ($writes{$a}{bytes_written} || 0)} keys %writes) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $total_writes = $writes{$pid}{total_writes} || 0;
+ my $bytes_written = $writes{$pid}{bytes_written} || 0;
+
+ printf("%6s %-20s %10s %13s\n", $pid, $comm,
+ $total_writes, $bytes_written);
+
+ if (++$count == $nlines) {
+ last;
+ }
+ }
+
+ %reads = ();
+ %writes = ();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm) = @_;
+
+ $unhandled{$event_name}++;
+}
diff --git a/tools/perf/scripts/perl/wakeup-latency.pl b/tools/perf/scripts/perl/wakeup-latency.pl
index ed58ef284e23..d9143dcec6c6 100644
--- a/tools/perf/scripts/perl/wakeup-latency.pl
+++ b/tools/perf/scripts/perl/wakeup-latency.pl
@@ -22,8 +22,8 @@ my %last_wakeup;
my $max_wakeup_latency;
my $min_wakeup_latency;
-my $total_wakeup_latency;
-my $total_wakeups;
+my $total_wakeup_latency = 0;
+my $total_wakeups = 0;
sub sched::sched_switch
{
@@ -67,8 +67,12 @@ sub trace_end
{
printf("wakeup_latency stats:\n\n");
print "total_wakeups: $total_wakeups\n";
- printf("avg_wakeup_latency (ns): %u\n",
- avg($total_wakeup_latency, $total_wakeups));
+ if ($total_wakeups) {
+ printf("avg_wakeup_latency (ns): %u\n",
+ avg($total_wakeup_latency, $total_wakeups));
+ } else {
+ printf("avg_wakeup_latency (ns): N/A\n");
+ }
printf("min_wakeup_latency (ns): %u\n", $min_wakeup_latency);
printf("max_wakeup_latency (ns): %u\n", $max_wakeup_latency);
diff --git a/tools/perf/scripts/perl/workqueue-stats.pl b/tools/perf/scripts/perl/workqueue-stats.pl
index 511302c8a494..b84b12699b70 100644
--- a/tools/perf/scripts/perl/workqueue-stats.pl
+++ b/tools/perf/scripts/perl/workqueue-stats.pl
@@ -71,9 +71,9 @@ sub trace_end
printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----");
foreach my $pidhash (@cpus) {
while ((my $pid, my $wqhash) = each %$pidhash) {
- my $ins = $$wqhash{'inserted'};
- my $exe = $$wqhash{'executed'};
- my $comm = $$wqhash{'comm'};
+ my $ins = $$wqhash{'inserted'} || 0;
+ my $exe = $$wqhash{'executed'} || 0;
+ my $comm = $$wqhash{'comm'} || "";
if ($ins || $exe) {
printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm);
}
@@ -87,9 +87,9 @@ sub trace_end
printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----");
foreach my $pidhash (@cpus) {
while ((my $pid, my $wqhash) = each %$pidhash) {
- my $created = $$wqhash{'created'};
- my $destroyed = $$wqhash{'destroyed'};
- my $comm = $$wqhash{'comm'};
+ my $created = $$wqhash{'created'} || 0;
+ my $destroyed = $$wqhash{'destroyed'} || 0;
+ my $comm = $$wqhash{'comm'} || "";
if ($created || $destroyed) {
printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed,
$comm);
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
index 83e91435ed09..9689bc0acd9f 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -23,3 +23,6 @@ def nsecs_nsecs(nsecs):
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
+
+def clear_term():
+ print("\x1b[H\x1b[2J")
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
index f8885d389e6f..eb5846bcb565 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -1,2 +1,2 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
+perf record -a -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
index 1e0c0a860c87..30293545fcc2 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
@@ -1,4 +1,10 @@
#!/bin/bash
# description: system-wide failed syscalls, by pid
# args: [comm]
-perf trace -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $1
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
new file mode 100644
index 000000000000..1fc5998b721d
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/sctop-report b/tools/perf/scripts/python/bin/sctop-report
new file mode 100644
index 000000000000..b01c842ae7b4
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sctop-report
@@ -0,0 +1,24 @@
+#!/bin/bash
+# description: syscall top
+# args: [comm] [interval]
+n_args=0
+for i in "$@"
+do
+ if expr match "$i" "-" > /dev/null ; then
+ break
+ fi
+ n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 2 ] ; then
+ echo "usage: sctop-report [comm] [interval]"
+ exit
+fi
+if [ "$n_args" -gt 1 ] ; then
+ comm=$1
+ interval=$2
+ shift 2
+elif [ "$n_args" -gt 0 ] ; then
+ interval=$1
+ shift
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/python/sctop.py $comm $interval
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
index 45a8c50359da..1fc5998b721d 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -1,2 +1,2 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
+perf record -a -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
index f8044d192271..9e9d8ddd72ce 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
@@ -1,4 +1,10 @@
#!/bin/bash
# description: system-wide syscall counts, by pid
# args: [comm]
-perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $1
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
index 45a8c50359da..1fc5998b721d 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -1,2 +1,2 @@
#!/bin/bash
-perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
+perf record -a -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report
index a366aa61612f..dc076b618796 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-report
+++ b/tools/perf/scripts/python/bin/syscall-counts-report
@@ -1,4 +1,10 @@
#!/bin/bash
# description: system-wide syscall counts
# args: [comm]
-perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py $1
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts.py $comm
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
new file mode 100644
index 000000000000..6cafad40c296
--- /dev/null
+++ b/tools/perf/scripts/python/sctop.py
@@ -0,0 +1,78 @@
+# system call top
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Periodically displays system-wide system call totals, broken down by
+# syscall. If a [comm] arg is specified, only syscalls called by
+# [comm] are displayed. If an [interval] arg is specified, the display
+# will be refreshed every [interval] seconds. The default interval is
+# 3 seconds.
+
+import thread
+import time
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
+
+for_comm = None
+default_interval = 3
+interval = default_interval
+
+if len(sys.argv) > 3:
+ sys.exit(usage)
+
+if len(sys.argv) > 2:
+ for_comm = sys.argv[1]
+ interval = int(sys.argv[2])
+elif len(sys.argv) > 1:
+ try:
+ interval = int(sys.argv[1])
+ except ValueError:
+ for_comm = sys.argv[1]
+ interval = default_interval
+
+syscalls = autodict()
+
+def trace_begin():
+ thread.start_new_thread(print_syscall_totals, (interval,))
+ pass
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+
+def print_syscall_totals(interval):
+ while 1:
+ clear_term()
+ if for_comm is not None:
+ print "\nsyscall events for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall events:\n\n",
+
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "----------"),
+
+ for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+ reverse = True):
+ try:
+ print "%-40d %10d\n" % (id, val),
+ except TypeError:
+ pass
+ syscalls.clear()
+ time.sleep(interval)
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 54552a00a117..49ece7921914 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -1,6 +1,10 @@
#!/bin/sh
-GVF=PERF-VERSION-FILE
+if [ $# -eq 1 ] ; then
+ OUTPUT=$1
+fi
+
+GVF=${OUTPUT}PERF-VERSION-FILE
DEF_VER=v0.0.2.PERF
LF='
diff --git a/tools/perf/util/bitmap.c b/tools/perf/util/bitmap.c
new file mode 100644
index 000000000000..5e230acae1e9
--- /dev/null
+++ b/tools/perf/util/bitmap.c
@@ -0,0 +1,21 @@
+/*
+ * From lib/bitmap.c
+ * Helper functions for bitmap.h.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <linux/bitmap.h>
+
+int __bitmap_weight(const unsigned long *bitmap, int bits)
+{
+ int k, w = 0, lim = bits/BITS_PER_LONG;
+
+ for (k = 0; k < lim; k++)
+ w += hweight_long(bitmap[k]);
+
+ if (bits % BITS_PER_LONG)
+ w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+
+ return w;
+}
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 04904b35ba81..0f60a3906808 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -24,7 +24,7 @@ static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
}
thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.ip, &al);
+ event->ip.pid, event->ip.ip, &al);
if (al.map != NULL)
al.map->dso->hit = 1;
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 918eb376abe3..4b9aab7f0405 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -1,6 +1,7 @@
#ifndef __PERF_CACHE_H
#define __PERF_CACHE_H
+#include <stdbool.h>
#include "util.h"
#include "strbuf.h"
#include "../perf.h"
@@ -69,6 +70,19 @@ extern const char *pager_program;
extern int pager_in_use(void);
extern int pager_use_color;
+extern bool use_browser;
+
+#ifdef NO_NEWT_SUPPORT
+static inline void setup_browser(void)
+{
+ setup_pager();
+}
+static inline void exit_browser(bool wait_for_ok __used) {}
+#else
+void setup_browser(void);
+void exit_browser(bool wait_for_ok);
+#endif
+
extern const char *editor_program;
extern const char *excludes_file;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index b3b71258272a..21a52e0a4435 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com>
*
* Handle the callchains from the stream in an ad-hoc radix tree and then
* sort them in an rbtree.
@@ -17,6 +17,13 @@
#include "callchain.h"
+bool ip_callchain__valid(struct ip_callchain *chain, event_t *event)
+{
+ unsigned int chain_size = event->header.size;
+ chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
+ return chain->nr * sizeof(u64) <= chain_size;
+}
+
#define chain_for_each_child(child, parent) \
list_for_each_entry(child, &parent->children, brothers)
@@ -160,7 +167,7 @@ create_child(struct callchain_node *parent, bool inherit_children)
{
struct callchain_node *new;
- new = malloc(sizeof(*new));
+ new = zalloc(sizeof(*new));
if (!new) {
perror("not enough memory to create child for code path tree");
return NULL;
@@ -183,25 +190,36 @@ create_child(struct callchain_node *parent, bool inherit_children)
return new;
}
+
+struct resolved_ip {
+ u64 ip;
+ struct map_symbol ms;
+};
+
+struct resolved_chain {
+ u64 nr;
+ struct resolved_ip ips[0];
+};
+
+
/*
* Fill the node with callchain values
*/
static void
-fill_node(struct callchain_node *node, struct ip_callchain *chain,
- int start, struct symbol **syms)
+fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
{
unsigned int i;
for (i = start; i < chain->nr; i++) {
struct callchain_list *call;
- call = malloc(sizeof(*call));
+ call = zalloc(sizeof(*call));
if (!call) {
perror("not enough memory for the code path tree");
return;
}
- call->ip = chain->ips[i];
- call->sym = syms[i];
+ call->ip = chain->ips[i].ip;
+ call->ms = chain->ips[i].ms;
list_add_tail(&call->list, &node->val);
}
node->val_nr = chain->nr - start;
@@ -210,13 +228,13 @@ fill_node(struct callchain_node *node, struct ip_callchain *chain,
}
static void
-add_child(struct callchain_node *parent, struct ip_callchain *chain,
- int start, struct symbol **syms)
+add_child(struct callchain_node *parent, struct resolved_chain *chain,
+ int start)
{
struct callchain_node *new;
new = create_child(parent, false);
- fill_node(new, chain, start, syms);
+ fill_node(new, chain, start);
new->children_hit = 0;
new->hit = 1;
@@ -228,9 +246,8 @@ add_child(struct callchain_node *parent, struct ip_callchain *chain,
* Then create another child to host the given callchain of new branch
*/
static void
-split_add_child(struct callchain_node *parent, struct ip_callchain *chain,
- struct callchain_list *to_split, int idx_parents, int idx_local,
- struct symbol **syms)
+split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
+ struct callchain_list *to_split, int idx_parents, int idx_local)
{
struct callchain_node *new;
struct list_head *old_tail;
@@ -257,7 +274,7 @@ split_add_child(struct callchain_node *parent, struct ip_callchain *chain,
/* create a new child for the new branch if any */
if (idx_total < chain->nr) {
parent->hit = 0;
- add_child(parent, chain, idx_total, syms);
+ add_child(parent, chain, idx_total);
parent->children_hit++;
} else {
parent->hit = 1;
@@ -265,32 +282,33 @@ split_add_child(struct callchain_node *parent, struct ip_callchain *chain,
}
static int
-__append_chain(struct callchain_node *root, struct ip_callchain *chain,
- unsigned int start, struct symbol **syms);
+__append_chain(struct callchain_node *root, struct resolved_chain *chain,
+ unsigned int start);
static void
-__append_chain_children(struct callchain_node *root, struct ip_callchain *chain,
- struct symbol **syms, unsigned int start)
+__append_chain_children(struct callchain_node *root,
+ struct resolved_chain *chain,
+ unsigned int start)
{
struct callchain_node *rnode;
/* lookup in childrens */
chain_for_each_child(rnode, root) {
- unsigned int ret = __append_chain(rnode, chain, start, syms);
+ unsigned int ret = __append_chain(rnode, chain, start);
if (!ret)
goto inc_children_hit;
}
/* nothing in children, add to the current node */
- add_child(root, chain, start, syms);
+ add_child(root, chain, start);
inc_children_hit:
root->children_hit++;
}
static int
-__append_chain(struct callchain_node *root, struct ip_callchain *chain,
- unsigned int start, struct symbol **syms)
+__append_chain(struct callchain_node *root, struct resolved_chain *chain,
+ unsigned int start)
{
struct callchain_list *cnode;
unsigned int i = start;
@@ -302,13 +320,19 @@ __append_chain(struct callchain_node *root, struct ip_callchain *chain,
* anywhere inside a function.
*/
list_for_each_entry(cnode, &root->val, list) {
+ struct symbol *sym;
+
if (i == chain->nr)
break;
- if (cnode->sym && syms[i]) {
- if (cnode->sym->start != syms[i]->start)
+
+ sym = chain->ips[i].ms.sym;
+
+ if (cnode->ms.sym && sym) {
+ if (cnode->ms.sym->start != sym->start)
break;
- } else if (cnode->ip != chain->ips[i])
+ } else if (cnode->ip != chain->ips[i].ip)
break;
+
if (!found)
found = true;
i++;
@@ -320,7 +344,7 @@ __append_chain(struct callchain_node *root, struct ip_callchain *chain,
/* we match only a part of the node. Split it and add the new chain */
if (i - start < root->val_nr) {
- split_add_child(root, chain, cnode, start, i - start, syms);
+ split_add_child(root, chain, cnode, start, i - start);
return 0;
}
@@ -331,15 +355,50 @@ __append_chain(struct callchain_node *root, struct ip_callchain *chain,
}
/* We match the node and still have a part remaining */
- __append_chain_children(root, chain, syms, i);
+ __append_chain_children(root, chain, i);
return 0;
}
-void append_chain(struct callchain_node *root, struct ip_callchain *chain,
- struct symbol **syms)
+static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
+ struct map_symbol *syms)
{
+ int i, j = 0;
+
+ for (i = 0; i < (int)old->nr; i++) {
+ if (old->ips[i] >= PERF_CONTEXT_MAX)
+ continue;
+
+ new->ips[j].ip = old->ips[i];
+ new->ips[j].ms = syms[i];
+ j++;
+ }
+
+ new->nr = j;
+}
+
+
+int append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ struct map_symbol *syms)
+{
+ struct resolved_chain *filtered;
+
if (!chain->nr)
- return;
- __append_chain_children(root, chain, syms, 0);
+ return 0;
+
+ filtered = zalloc(sizeof(*filtered) +
+ chain->nr * sizeof(struct resolved_ip));
+ if (!filtered)
+ return -ENOMEM;
+
+ filter_context(chain, filtered, syms);
+
+ if (!filtered->nr)
+ goto end;
+
+ __append_chain_children(root, filtered, 0);
+end:
+ free(filtered);
+
+ return 0;
}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index ad4626de4c2b..1cba1f5504e7 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -4,6 +4,7 @@
#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
+#include "event.h"
#include "util.h"
#include "symbol.h"
@@ -33,13 +34,14 @@ typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_node *,
struct callchain_param {
enum chain_mode mode;
+ u32 print_limit;
double min_percent;
sort_chain_func_t sort;
};
struct callchain_list {
u64 ip;
- struct symbol *sym;
+ struct map_symbol ms;
struct list_head list;
};
@@ -56,6 +58,8 @@ static inline u64 cumul_hits(struct callchain_node *node)
}
int register_callchain_param(struct callchain_param *param);
-void append_chain(struct callchain_node *root, struct ip_callchain *chain,
- struct symbol **syms);
+int append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ struct map_symbol *syms);
+
+bool ip_callchain__valid(struct ip_callchain *chain, event_t *event);
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index e88bca55a599..e191eb9a667f 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -166,6 +166,31 @@ int perf_color_default_config(const char *var, const char *value, void *cb)
return perf_default_config(var, value, cb);
}
+static int __color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args, const char *trail)
+{
+ int r = 0;
+
+ /*
+ * Auto-detect:
+ */
+ if (perf_use_color_default < 0) {
+ if (isatty(1) || pager_in_use())
+ perf_use_color_default = 1;
+ else
+ perf_use_color_default = 0;
+ }
+
+ if (perf_use_color_default && *color)
+ r += snprintf(bf, size, "%s", color);
+ r += vsnprintf(bf + r, size - r, fmt, args);
+ if (perf_use_color_default && *color)
+ r += snprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
+ if (trail)
+ r += snprintf(bf + r, size - r, "%s", trail);
+ return r;
+}
+
static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
va_list args, const char *trail)
{
@@ -191,11 +216,28 @@ static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
return r;
}
+int color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args)
+{
+ return __color_vsnprintf(bf, size, color, fmt, args, NULL);
+}
+
int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
{
return __color_vfprintf(fp, color, fmt, args, NULL);
}
+int color_snprintf(char *bf, size_t size, const char *color,
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = color_vsnprintf(bf, size, color, fmt, args);
+ va_end(args);
+ return r;
+}
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
{
@@ -274,3 +316,9 @@ int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
return r;
}
+
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent)
+{
+ const char *color = get_percent_color(percent);
+ return color_snprintf(bf, size, color, fmt, percent);
+}
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 24e8809210bb..dea082b79602 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -32,10 +32,14 @@ int perf_color_default_config(const char *var, const char *value, void *cb);
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
void color_parse(const char *value, const char *var, char *dst);
void color_parse_mem(const char *value, int len, const char *var, char *dst);
+int color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args);
int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
+int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent);
int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
const char *get_percent_color(double percent);
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 0905600c3851..dd824cf3b628 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -6,13 +6,14 @@
#include <stdarg.h>
#include <stdio.h>
+#include "cache.h"
#include "color.h"
#include "event.h"
#include "debug.h"
#include "util.h"
int verbose = 0;
-int dump_trace = 0;
+bool dump_trace = false;
int eprintf(int level, const char *fmt, ...)
{
@@ -21,7 +22,10 @@ int eprintf(int level, const char *fmt, ...)
if (verbose >= level) {
va_start(args, fmt);
- ret = vfprintf(stderr, fmt, args);
+ if (use_browser)
+ ret = browser__show_help(fmt, args);
+ else
+ ret = vfprintf(stderr, fmt, args);
va_end(args);
}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index c6c24c522dea..047ac3324ebe 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -2,14 +2,38 @@
#ifndef __PERF_DEBUG_H
#define __PERF_DEBUG_H
+#include <stdbool.h>
#include "event.h"
extern int verbose;
-extern int dump_trace;
+extern bool dump_trace;
-int eprintf(int level,
- const char *fmt, ...) __attribute__((format(printf, 2, 3)));
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
void trace_event(event_t *event);
+struct ui_progress;
+
+#ifdef NO_NEWT_SUPPORT
+static inline int browser__show_help(const char *format __used, va_list ap __used)
+{
+ return 0;
+}
+
+static inline struct ui_progress *ui_progress__new(const char *title __used,
+ u64 total __used)
+{
+ return (struct ui_progress *)1;
+}
+
+static inline void ui_progress__update(struct ui_progress *self __used,
+ u64 curr __used) {}
+
+static inline void ui_progress__delete(struct ui_progress *self __used) {}
+#else
+int browser__show_help(const char *format, va_list ap);
+struct ui_progress *ui_progress__new(const char *title, u64 total);
+void ui_progress__update(struct ui_progress *self, u64 curr);
+void ui_progress__delete(struct ui_progress *self);
+#endif
+
#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 705ec63548b4..cce006ec8f05 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -112,7 +112,11 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
event_t ev = {
.header = {
.type = PERF_RECORD_MMAP,
- .misc = 0, /* Just like the kernel, see kernel/perf_event.c __perf_event_mmap */
+ /*
+ * Just like the kernel, see __perf_event_mmap
+ * in kernel/perf_event.c
+ */
+ .misc = PERF_RECORD_MISC_USER,
},
};
int n;
@@ -130,6 +134,7 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
continue;
pbf += n + 3;
if (*pbf == 'x') { /* vm_exec */
+ u64 vm_pgoff;
char *execname = strchr(bf, '/');
/* Catch VDSO */
@@ -139,6 +144,14 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
if (execname == NULL)
continue;
+ pbf += 3;
+ n = hex2u64(pbf, &vm_pgoff);
+ /* pgoff is in bytes, not pages */
+ if (n >= 0)
+ ev.mmap.pgoff = vm_pgoff << getpagesize();
+ else
+ ev.mmap.pgoff = 0;
+
size = strlen(execname);
execname[size - 1] = '\0'; /* Remove \n */
memcpy(ev.mmap.filename, execname, size);
@@ -158,11 +171,23 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
}
int event__synthesize_modules(event__handler_t process,
- struct perf_session *session)
+ struct perf_session *session,
+ struct machine *machine)
{
struct rb_node *nd;
+ struct map_groups *kmaps = &machine->kmaps;
+ u16 misc;
+
+ /*
+ * kernel uses 0 for user space maps, see kernel/perf_event.c
+ * __perf_event_mmap
+ */
+ if (machine__is_host(machine))
+ misc = PERF_RECORD_MISC_KERNEL;
+ else
+ misc = PERF_RECORD_MISC_GUEST_KERNEL;
- for (nd = rb_first(&session->kmaps.maps[MAP__FUNCTION]);
+ for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
nd; nd = rb_next(nd)) {
event_t ev;
size_t size;
@@ -173,12 +198,13 @@ int event__synthesize_modules(event__handler_t process,
size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
memset(&ev, 0, sizeof(ev));
- ev.mmap.header.misc = 1; /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
+ ev.mmap.header.misc = misc;
ev.mmap.header.type = PERF_RECORD_MMAP;
ev.mmap.header.size = (sizeof(ev.mmap) -
(sizeof(ev.mmap.filename) - size));
ev.mmap.start = pos->start;
ev.mmap.len = pos->end - pos->start;
+ ev.mmap.pid = machine->pid;
memcpy(ev.mmap.filename, pos->dso->long_name,
pos->dso->long_name_len + 1);
@@ -241,13 +267,18 @@ static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
int event__synthesize_kernel_mmap(event__handler_t process,
struct perf_session *session,
+ struct machine *machine,
const char *symbol_name)
{
size_t size;
+ const char *filename, *mmap_name;
+ char path[PATH_MAX];
+ char name_buff[PATH_MAX];
+ struct map *map;
+
event_t ev = {
.header = {
.type = PERF_RECORD_MMAP,
- .misc = 1, /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
},
};
/*
@@ -257,16 +288,37 @@ int event__synthesize_kernel_mmap(event__handler_t process,
*/
struct process_symbol_args args = { .name = symbol_name, };
- if (kallsyms__parse("/proc/kallsyms", &args, find_symbol_cb) <= 0)
+ mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
+ if (machine__is_host(machine)) {
+ /*
+ * kernel uses PERF_RECORD_MISC_USER for user space maps,
+ * see kernel/perf_event.c __perf_event_mmap
+ */
+ ev.header.misc = PERF_RECORD_MISC_KERNEL;
+ filename = "/proc/kallsyms";
+ } else {
+ ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ if (machine__is_default_guest(machine))
+ filename = (char *) symbol_conf.default_guest_kallsyms;
+ else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ filename = path;
+ }
+ }
+
+ if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
return -ENOENT;
+ map = machine->vmlinux_maps[MAP__FUNCTION];
size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
- "[kernel.kallsyms.%s]", symbol_name) + 1;
+ "%s%s", mmap_name, symbol_name) + 1;
size = ALIGN(size, sizeof(u64));
- ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size));
+ ev.mmap.header.size = (sizeof(ev.mmap) -
+ (sizeof(ev.mmap.filename) - size));
ev.mmap.pgoff = args.start;
- ev.mmap.start = session->vmlinux_maps[MAP__FUNCTION]->start;
- ev.mmap.len = session->vmlinux_maps[MAP__FUNCTION]->end - ev.mmap.start ;
+ ev.mmap.start = map->start;
+ ev.mmap.len = map->end - ev.mmap.start;
+ ev.mmap.pid = machine->pid;
return process(&ev, session);
}
@@ -316,26 +368,54 @@ int event__process_comm(event_t *self, struct perf_session *session)
int event__process_lost(event_t *self, struct perf_session *session)
{
dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
- session->events_stats.lost += self->lost.lost;
+ session->hists.stats.lost += self->lost.lost;
return 0;
}
-int event__process_mmap(event_t *self, struct perf_session *session)
+static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
+{
+ maps[MAP__FUNCTION]->start = self->mmap.start;
+ maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
+ /*
+ * Be a bit paranoid here, some perf.data file came with
+ * a zero sized synthesized MMAP event for the kernel.
+ */
+ if (maps[MAP__FUNCTION]->end == 0)
+ maps[MAP__FUNCTION]->end = ~0UL;
+}
+
+static int event__process_kernel_mmap(event_t *self,
+ struct perf_session *session)
{
- struct thread *thread;
struct map *map;
+ char kmmap_prefix[PATH_MAX];
+ struct machine *machine;
+ enum dso_kernel_type kernel_type;
+ bool is_kernel_mmap;
+
+ machine = perf_session__findnew_machine(session, self->mmap.pid);
+ if (!machine) {
+ pr_err("Can't find id %d's machine\n", self->mmap.pid);
+ goto out_problem;
+ }
- dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
- self->mmap.pid, self->mmap.tid, self->mmap.start,
- self->mmap.len, self->mmap.pgoff, self->mmap.filename);
+ machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
+ if (machine__is_host(machine))
+ kernel_type = DSO_TYPE_KERNEL;
+ else
+ kernel_type = DSO_TYPE_GUEST_KERNEL;
- if (self->mmap.pid == 0) {
- static const char kmmap_prefix[] = "[kernel.kallsyms.";
+ is_kernel_mmap = memcmp(self->mmap.filename,
+ kmmap_prefix,
+ strlen(kmmap_prefix)) == 0;
+ if (self->mmap.filename[0] == '/' ||
+ (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
- if (self->mmap.filename[0] == '/') {
- char short_module_name[1024];
- char *name = strrchr(self->mmap.filename, '/'), *dot;
+ char short_module_name[1024];
+ char *name, *dot;
+ if (self->mmap.filename[0] == '/') {
+ name = strrchr(self->mmap.filename, '/');
if (name == NULL)
goto out_problem;
@@ -343,58 +423,84 @@ int event__process_mmap(event_t *self, struct perf_session *session)
dot = strrchr(name, '.');
if (dot == NULL)
goto out_problem;
-
snprintf(short_module_name, sizeof(short_module_name),
- "[%.*s]", (int)(dot - name), name);
+ "[%.*s]", (int)(dot - name), name);
strxfrchar(short_module_name, '-', '_');
-
- map = perf_session__new_module_map(session,
- self->mmap.start,
- self->mmap.filename);
- if (map == NULL)
- goto out_problem;
-
- name = strdup(short_module_name);
- if (name == NULL)
- goto out_problem;
-
- map->dso->short_name = name;
- map->end = map->start + self->mmap.len;
- } else if (memcmp(self->mmap.filename, kmmap_prefix,
- sizeof(kmmap_prefix) - 1) == 0) {
- const char *symbol_name = (self->mmap.filename +
- sizeof(kmmap_prefix) - 1);
+ } else
+ strcpy(short_module_name, self->mmap.filename);
+
+ map = machine__new_module(machine, self->mmap.start,
+ self->mmap.filename);
+ if (map == NULL)
+ goto out_problem;
+
+ name = strdup(short_module_name);
+ if (name == NULL)
+ goto out_problem;
+
+ map->dso->short_name = name;
+ map->end = map->start + self->mmap.len;
+ } else if (is_kernel_mmap) {
+ const char *symbol_name = (self->mmap.filename +
+ strlen(kmmap_prefix));
+ /*
+ * Should be there already, from the build-id table in
+ * the header.
+ */
+ struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
+ kmmap_prefix);
+ if (kernel == NULL)
+ goto out_problem;
+
+ kernel->kernel = kernel_type;
+ if (__machine__create_kernel_maps(machine, kernel) < 0)
+ goto out_problem;
+
+ event_set_kernel_mmap_len(machine->vmlinux_maps, self);
+ perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+ symbol_name,
+ self->mmap.pgoff);
+ if (machine__is_default_guest(machine)) {
/*
- * Should be there already, from the build-id table in
- * the header.
+ * preload dso of guest kernel and modules
*/
- struct dso *kernel = __dsos__findnew(&dsos__kernel,
- "[kernel.kallsyms]");
- if (kernel == NULL)
- goto out_problem;
-
- kernel->kernel = 1;
- if (__perf_session__create_kernel_maps(session, kernel) < 0)
- goto out_problem;
+ dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
+ NULL);
+ }
+ }
+ return 0;
+out_problem:
+ return -1;
+}
- session->vmlinux_maps[MAP__FUNCTION]->start = self->mmap.start;
- session->vmlinux_maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
- /*
- * Be a bit paranoid here, some perf.data file came with
- * a zero sized synthesized MMAP event for the kernel.
- */
- if (session->vmlinux_maps[MAP__FUNCTION]->end == 0)
- session->vmlinux_maps[MAP__FUNCTION]->end = ~0UL;
+int event__process_mmap(event_t *self, struct perf_session *session)
+{
+ struct machine *machine;
+ struct thread *thread;
+ struct map *map;
+ u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ int ret = 0;
- perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name,
- self->mmap.pgoff);
- }
+ dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
+ self->mmap.pid, self->mmap.tid, self->mmap.start,
+ self->mmap.len, self->mmap.pgoff, self->mmap.filename);
+
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
+ cpumode == PERF_RECORD_MISC_KERNEL) {
+ ret = event__process_kernel_mmap(self, session);
+ if (ret < 0)
+ goto out_problem;
return 0;
}
+ machine = perf_session__find_host_machine(session);
+ if (machine == NULL)
+ goto out_problem;
thread = perf_session__findnew(session, self->mmap.pid);
- map = map__new(&self->mmap, MAP__FUNCTION,
- session->cwd, session->cwdlen);
+ map = map__new(&machine->user_dsos, self->mmap.start,
+ self->mmap.len, self->mmap.pgoff,
+ self->mmap.pid, self->mmap.filename,
+ MAP__FUNCTION, session->cwd, session->cwdlen);
if (thread == NULL || map == NULL)
goto out_problem;
@@ -434,22 +540,56 @@ int event__process_task(event_t *self, struct perf_session *session)
void thread__find_addr_map(struct thread *self,
struct perf_session *session, u8 cpumode,
- enum map_type type, u64 addr,
+ enum map_type type, pid_t pid, u64 addr,
struct addr_location *al)
{
struct map_groups *mg = &self->mg;
+ struct machine *machine = NULL;
al->thread = self;
al->addr = addr;
+ al->cpumode = cpumode;
+ al->filtered = false;
- if (cpumode == PERF_RECORD_MISC_KERNEL) {
+ if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
- mg = &session->kmaps;
- } else if (cpumode == PERF_RECORD_MISC_USER)
+ machine = perf_session__find_host_machine(session);
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
+ mg = &machine->kmaps;
+ } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
- else {
- al->level = 'H';
+ machine = perf_session__find_host_machine(session);
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+ al->level = 'g';
+ machine = perf_session__find_machine(session, pid);
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
+ mg = &machine->kmaps;
+ } else {
+ /*
+ * 'u' means guest os user space.
+ * TODO: We don't support guest user space. Might support late.
+ */
+ if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
+ al->level = 'u';
+ else
+ al->level = 'H';
al->map = NULL;
+
+ if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
+ cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
+ !perf_guest)
+ al->filtered = true;
+ if ((cpumode == PERF_RECORD_MISC_USER ||
+ cpumode == PERF_RECORD_MISC_KERNEL) &&
+ !perf_host)
+ al->filtered = true;
+
return;
}
try_again:
@@ -464,8 +604,10 @@ try_again:
* "[vdso]" dso, but for now lets use the old trick of looking
* in the whole kernel symbol list.
*/
- if ((long long)al->addr < 0 && mg != &session->kmaps) {
- mg = &session->kmaps;
+ if ((long long)al->addr < 0 &&
+ cpumode == PERF_RECORD_MISC_KERNEL &&
+ machine && mg != &machine->kmaps) {
+ mg = &machine->kmaps;
goto try_again;
}
} else
@@ -474,11 +616,11 @@ try_again:
void thread__find_addr_location(struct thread *self,
struct perf_session *session, u8 cpumode,
- enum map_type type, u64 addr,
+ enum map_type type, pid_t pid, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
{
- thread__find_addr_map(self, session, cpumode, type, addr, al);
+ thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
if (al->map != NULL)
al->sym = map__find_symbol(al->map, al->addr, filter);
else
@@ -490,8 +632,10 @@ static void dso__calc_col_width(struct dso *self)
if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
(!symbol_conf.dso_list ||
strlist__has_entry(symbol_conf.dso_list, self->name))) {
- unsigned int slen = strlen(self->name);
- if (slen > dsos__col_width)
+ u16 slen = self->short_name_len;
+ if (verbose)
+ slen = self->long_name_len;
+ if (dsos__col_width < slen)
dsos__col_width = slen;
}
@@ -512,31 +656,55 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
goto out_filtered;
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ /*
+ * Have we already created the kernel maps for the host machine?
+ *
+ * This should have happened earlier, when we processed the kernel MMAP
+ * events, but for older perf.data files there was no such thing, so do
+ * it now.
+ */
+ if (cpumode == PERF_RECORD_MISC_KERNEL &&
+ session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
+ machine__create_kernel_maps(&session->host_machine);
- thread__find_addr_location(thread, session, cpumode, MAP__FUNCTION,
- self->ip.ip, al, filter);
+ thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
+ self->ip.pid, self->ip.ip, al);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
- /*
- * We have to do this here as we may have a dso with no symbol hit that
- * has a name longer than the ones with symbols sampled.
- */
- if (al->map && !sort_dso.elide && !al->map->dso->slen_calculated)
- dso__calc_col_width(al->map->dso);
-
- if (symbol_conf.dso_list &&
- (!al->map || !al->map->dso ||
- !(strlist__has_entry(symbol_conf.dso_list, al->map->dso->short_name) ||
- (al->map->dso->short_name != al->map->dso->long_name &&
- strlist__has_entry(symbol_conf.dso_list, al->map->dso->long_name)))))
- goto out_filtered;
+ al->sym = NULL;
+
+ if (al->map) {
+ if (symbol_conf.dso_list &&
+ (!al->map || !al->map->dso ||
+ !(strlist__has_entry(symbol_conf.dso_list,
+ al->map->dso->short_name) ||
+ (al->map->dso->short_name != al->map->dso->long_name &&
+ strlist__has_entry(symbol_conf.dso_list,
+ al->map->dso->long_name)))))
+ goto out_filtered;
+ /*
+ * We have to do this here as we may have a dso with no symbol
+ * hit that has a name longer than the ones with symbols
+ * sampled.
+ */
+ if (!sort_dso.elide && !al->map->dso->slen_calculated)
+ dso__calc_col_width(al->map->dso);
+
+ al->sym = map__find_symbol(al->map, al->addr, filter);
+ } else {
+ const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+
+ if (dsos__col_width < unresolved_col_width &&
+ !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ !symbol_conf.dso_list)
+ dsos__col_width = unresolved_col_width;
+ }
if (symbol_conf.sym_list && al->sym &&
!strlist__has_entry(symbol_conf.sym_list, al->sym->name))
goto out_filtered;
- al->filtered = false;
return 0;
out_filtered:
@@ -570,6 +738,7 @@ int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
array++;
}
+ data->id = -1ULL;
if (type & PERF_SAMPLE_ID) {
data->id = *array;
array++;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index a33b94952e34..48c2cc9dae4f 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -68,21 +68,54 @@ struct sample_data {
u64 addr;
u64 id;
u64 stream_id;
- u32 cpu;
u64 period;
- struct ip_callchain *callchain;
+ u32 cpu;
u32 raw_size;
void *raw_data;
+ struct ip_callchain *callchain;
};
#define BUILD_ID_SIZE 20
struct build_id_event {
struct perf_event_header header;
+ pid_t pid;
u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
char filename[];
};
+enum perf_user_event_type { /* above any possible kernel type */
+ PERF_RECORD_HEADER_ATTR = 64,
+ PERF_RECORD_HEADER_EVENT_TYPE = 65,
+ PERF_RECORD_HEADER_TRACING_DATA = 66,
+ PERF_RECORD_HEADER_BUILD_ID = 67,
+ PERF_RECORD_FINISHED_ROUND = 68,
+ PERF_RECORD_HEADER_MAX
+};
+
+struct attr_event {
+ struct perf_event_header header;
+ struct perf_event_attr attr;
+ u64 id[];
+};
+
+#define MAX_EVENT_NAME 64
+
+struct perf_trace_event_type {
+ u64 event_id;
+ char name[MAX_EVENT_NAME];
+};
+
+struct event_type_event {
+ struct perf_event_header header;
+ struct perf_trace_event_type event_type;
+};
+
+struct tracing_data_event {
+ struct perf_event_header header;
+ u32 size;
+};
+
typedef union event_union {
struct perf_event_header header;
struct ip_event ip;
@@ -92,22 +125,12 @@ typedef union event_union {
struct lost_event lost;
struct read_event read;
struct sample_event sample;
+ struct attr_event attr;
+ struct event_type_event event_type;
+ struct tracing_data_event tracing_data;
+ struct build_id_event build_id;
} event_t;
-struct events_stats {
- u64 total;
- u64 lost;
-};
-
-struct event_stat_id {
- struct rb_node rb_node;
- struct rb_root hists;
- struct events_stats stats;
- u64 config;
- u64 event_stream;
- u32 type;
-};
-
void event__print_totals(void);
struct perf_session;
@@ -119,10 +142,13 @@ int event__synthesize_thread(pid_t pid, event__handler_t process,
void event__synthesize_threads(event__handler_t process,
struct perf_session *session);
int event__synthesize_kernel_mmap(event__handler_t process,
- struct perf_session *session,
- const char *symbol_name);
+ struct perf_session *session,
+ struct machine *machine,
+ const char *symbol_name);
+
int event__synthesize_modules(event__handler_t process,
- struct perf_session *session);
+ struct perf_session *session,
+ struct machine *machine);
int event__process_comm(event_t *self, struct perf_session *session);
int event__process_lost(event_t *self, struct perf_session *session);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 6c9aa16ee51f..8847bec64c54 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -99,13 +99,6 @@ int perf_header__add_attr(struct perf_header *self,
return 0;
}
-#define MAX_EVENT_NAME 64
-
-struct perf_trace_event_type {
- u64 event_id;
- char name[MAX_EVENT_NAME];
-};
-
static int event_count;
static struct perf_trace_event_type *events;
@@ -197,7 +190,8 @@ static int write_padded(int fd, const void *bf, size_t count,
continue; \
else
-static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd)
+static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
+ u16 misc, int fd)
{
struct dso *pos;
@@ -212,6 +206,7 @@ static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd)
len = ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
+ b.pid = pid;
b.header.misc = misc;
b.header.size = sizeof(b) + len;
err = do_write(fd, &b, sizeof(b));
@@ -226,13 +221,32 @@ static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd)
return 0;
}
-static int dsos__write_buildid_table(int fd)
+static int dsos__write_buildid_table(struct perf_header *header, int fd)
{
- int err = __dsos__write_buildid_table(&dsos__kernel,
- PERF_RECORD_MISC_KERNEL, fd);
- if (err == 0)
- err = __dsos__write_buildid_table(&dsos__user,
- PERF_RECORD_MISC_USER, fd);
+ struct perf_session *session = container_of(header,
+ struct perf_session, header);
+ struct rb_node *nd;
+ int err = 0;
+ u16 kmisc, umisc;
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ if (machine__is_host(pos)) {
+ kmisc = PERF_RECORD_MISC_KERNEL;
+ umisc = PERF_RECORD_MISC_USER;
+ } else {
+ kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+ umisc = PERF_RECORD_MISC_GUEST_USER;
+ }
+
+ err = __dsos__write_buildid_table(&pos->kernel_dsos, pos->pid,
+ kmisc, fd);
+ if (err == 0)
+ err = __dsos__write_buildid_table(&pos->user_dsos,
+ pos->pid, umisc, fd);
+ if (err)
+ break;
+ }
return err;
}
@@ -349,9 +363,12 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
return err;
}
-static int dsos__cache_build_ids(void)
+static int dsos__cache_build_ids(struct perf_header *self)
{
- int err_kernel, err_user;
+ struct perf_session *session = container_of(self,
+ struct perf_session, header);
+ struct rb_node *nd;
+ int ret = 0;
char debugdir[PATH_MAX];
snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
@@ -360,9 +377,28 @@ static int dsos__cache_build_ids(void)
if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
return -1;
- err_kernel = __dsos__cache_build_ids(&dsos__kernel, debugdir);
- err_user = __dsos__cache_build_ids(&dsos__user, debugdir);
- return err_kernel || err_user ? -1 : 0;
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= __dsos__cache_build_ids(&pos->kernel_dsos, debugdir);
+ ret |= __dsos__cache_build_ids(&pos->user_dsos, debugdir);
+ }
+ return ret ? -1 : 0;
+}
+
+static bool dsos__read_build_ids(struct perf_header *self, bool with_hits)
+{
+ bool ret = false;
+ struct perf_session *session = container_of(self,
+ struct perf_session, header);
+ struct rb_node *nd;
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= __dsos__read_build_ids(&pos->kernel_dsos, with_hits);
+ ret |= __dsos__read_build_ids(&pos->user_dsos, with_hits);
+ }
+
+ return ret;
}
static int perf_header__adds_write(struct perf_header *self, int fd)
@@ -373,7 +409,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
u64 sec_start;
int idx = 0, err;
- if (dsos__read_build_ids(true))
+ if (dsos__read_build_ids(self, true))
perf_header__set_feat(self, HEADER_BUILD_ID);
nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
@@ -400,7 +436,6 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
}
-
if (perf_header__has_feat(self, HEADER_BUILD_ID)) {
struct perf_file_section *buildid_sec;
@@ -408,14 +443,14 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
/* Write build-ids */
buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
- err = dsos__write_buildid_table(fd);
+ err = dsos__write_buildid_table(self, fd);
if (err < 0) {
pr_debug("failed to write buildid table\n");
goto out_free;
}
buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
buildid_sec->offset;
- dsos__cache_build_ids();
+ dsos__cache_build_ids(self);
}
lseek(fd, sec_start, SEEK_SET);
@@ -427,6 +462,25 @@ out_free:
return err;
}
+int perf_header__write_pipe(int fd)
+{
+ struct perf_pipe_file_header f_header;
+ int err;
+
+ f_header = (struct perf_pipe_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ };
+
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf pipe header\n");
+ return err;
+ }
+
+ return 0;
+}
+
int perf_header__write(struct perf_header *self, int fd, bool at_exit)
{
struct perf_file_header f_header;
@@ -518,25 +572,10 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
return 0;
}
-static int do_read(int fd, void *buf, size_t size)
-{
- while (size) {
- int ret = read(fd, buf, size);
-
- if (ret <= 0)
- return -1;
-
- size -= ret;
- buf += ret;
- }
-
- return 0;
-}
-
static int perf_header__getbuffer64(struct perf_header *self,
int fd, void *buf, size_t size)
{
- if (do_read(fd, buf, size))
+ if (do_read(fd, buf, size) <= 0)
return -1;
if (self->needs_swap)
@@ -592,7 +631,7 @@ int perf_file_header__read(struct perf_file_header *self,
{
lseek(fd, 0, SEEK_SET);
- if (do_read(fd, self, sizeof(*self)) ||
+ if (do_read(fd, self, sizeof(*self)) <= 0 ||
memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
return -1;
@@ -636,6 +675,93 @@ int perf_file_header__read(struct perf_file_header *self,
return 0;
}
+static int __event_process_build_id(struct build_id_event *bev,
+ char *filename,
+ struct perf_session *session)
+{
+ int err = -1;
+ struct list_head *head;
+ struct machine *machine;
+ u16 misc;
+ struct dso *dso;
+ enum dso_kernel_type dso_type;
+
+ machine = perf_session__findnew_machine(session, bev->pid);
+ if (!machine)
+ goto out;
+
+ misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ switch (misc) {
+ case PERF_RECORD_MISC_KERNEL:
+ dso_type = DSO_TYPE_KERNEL;
+ head = &machine->kernel_dsos;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ dso_type = DSO_TYPE_GUEST_KERNEL;
+ head = &machine->kernel_dsos;
+ break;
+ case PERF_RECORD_MISC_USER:
+ case PERF_RECORD_MISC_GUEST_USER:
+ dso_type = DSO_TYPE_USER;
+ head = &machine->user_dsos;
+ break;
+ default:
+ goto out;
+ }
+
+ dso = __dsos__findnew(head, filename);
+ if (dso != NULL) {
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ dso__set_build_id(dso, &bev->build_id);
+
+ if (filename[0] == '[')
+ dso->kernel = dso_type;
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id),
+ sbuild_id);
+ pr_debug("build id event received for %s: %s\n",
+ dso->long_name, sbuild_id);
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
+static int perf_header__read_build_ids(struct perf_header *self,
+ int input, u64 offset, u64 size)
+{
+ struct perf_session *session = container_of(self,
+ struct perf_session, header);
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size;
+ int err = -1;
+
+ while (offset < limit) {
+ ssize_t len;
+
+ if (read(input, &bev, sizeof(bev)) != sizeof(bev))
+ goto out;
+
+ if (self->needs_swap)
+ perf_event_header__bswap(&bev.header);
+
+ len = bev.header.size - sizeof(bev);
+ if (read(input, filename, len) != len)
+ goto out;
+
+ __event_process_build_id(&bev, filename, session);
+
+ offset += bev.header.size;
+ }
+ err = 0;
+out:
+ return err;
+}
+
static int perf_file_section__process(struct perf_file_section *self,
struct perf_header *ph,
int feat, int fd)
@@ -648,7 +774,7 @@ static int perf_file_section__process(struct perf_file_section *self,
switch (feat) {
case HEADER_TRACE_INFO:
- trace_report(fd);
+ trace_report(fd, false);
break;
case HEADER_BUILD_ID:
@@ -662,13 +788,56 @@ static int perf_file_section__process(struct perf_file_section *self,
return 0;
}
-int perf_header__read(struct perf_header *self, int fd)
+static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
+ struct perf_header *ph, int fd,
+ bool repipe)
+{
+ if (do_read(fd, self, sizeof(*self)) <= 0 ||
+ memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
+ return -1;
+
+ if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0)
+ return -1;
+
+ if (self->size != sizeof(*self)) {
+ u64 size = bswap_64(self->size);
+
+ if (size != sizeof(*self))
+ return -1;
+
+ ph->needs_swap = true;
+ }
+
+ return 0;
+}
+
+static int perf_header__read_pipe(struct perf_session *session, int fd)
{
+ struct perf_header *self = &session->header;
+ struct perf_pipe_file_header f_header;
+
+ if (perf_file_header__read_pipe(&f_header, self, fd,
+ session->repipe) < 0) {
+ pr_debug("incompatible file format\n");
+ return -EINVAL;
+ }
+
+ session->fd = fd;
+
+ return 0;
+}
+
+int perf_header__read(struct perf_session *session, int fd)
+{
+ struct perf_header *self = &session->header;
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j;
+ if (session->fd_pipe)
+ return perf_header__read_pipe(session, fd);
+
if (perf_file_header__read(&f_header, self, fd) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
@@ -753,6 +922,14 @@ perf_header__find_attr(u64 id, struct perf_header *header)
{
int i;
+ /*
+ * We set id to -1 if the data file doesn't contain sample
+ * ids. Check for this and avoid walking through the entire
+ * list of ids which may be large.
+ */
+ if (id == -1ULL)
+ return NULL;
+
for (i = 0; i < header->attrs; i++) {
struct perf_header_attr *attr = header->attr[i];
int j;
@@ -765,3 +942,231 @@ perf_header__find_attr(u64 id, struct perf_header *header)
return NULL;
}
+
+int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
+ event__handler_t process,
+ struct perf_session *session)
+{
+ event_t *ev;
+ size_t size;
+ int err;
+
+ size = sizeof(struct perf_event_attr);
+ size = ALIGN(size, sizeof(u64));
+ size += sizeof(struct perf_event_header);
+ size += ids * sizeof(u64);
+
+ ev = malloc(size);
+
+ ev->attr.attr = *attr;
+ memcpy(ev->attr.id, id, ids * sizeof(u64));
+
+ ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
+ ev->attr.header.size = size;
+
+ err = process(ev, session);
+
+ free(ev);
+
+ return err;
+}
+
+int event__synthesize_attrs(struct perf_header *self,
+ event__handler_t process,
+ struct perf_session *session)
+{
+ struct perf_header_attr *attr;
+ int i, err = 0;
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ err = event__synthesize_attr(&attr->attr, attr->ids, attr->id,
+ process, session);
+ if (err) {
+ pr_debug("failed to create perf header attribute\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+int event__process_attr(event_t *self, struct perf_session *session)
+{
+ struct perf_header_attr *attr;
+ unsigned int i, ids, n_ids;
+
+ attr = perf_header_attr__new(&self->attr.attr);
+ if (attr == NULL)
+ return -ENOMEM;
+
+ ids = self->header.size;
+ ids -= (void *)&self->attr.id - (void *)self;
+ n_ids = ids / sizeof(u64);
+
+ for (i = 0; i < n_ids; i++) {
+ if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) {
+ perf_header_attr__delete(attr);
+ return -ENOMEM;
+ }
+ }
+
+ if (perf_header__add_attr(&session->header, attr) < 0) {
+ perf_header_attr__delete(attr);
+ return -ENOMEM;
+ }
+
+ perf_session__update_sample_type(session);
+
+ return 0;
+}
+
+int event__synthesize_event_type(u64 event_id, char *name,
+ event__handler_t process,
+ struct perf_session *session)
+{
+ event_t ev;
+ size_t size = 0;
+ int err = 0;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.event_type.event_type.event_id = event_id;
+ memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
+ strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
+
+ ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
+ size = strlen(name);
+ size = ALIGN(size, sizeof(u64));
+ ev.event_type.header.size = sizeof(ev.event_type) -
+ (sizeof(ev.event_type.event_type.name) - size);
+
+ err = process(&ev, session);
+
+ return err;
+}
+
+int event__synthesize_event_types(event__handler_t process,
+ struct perf_session *session)
+{
+ struct perf_trace_event_type *type;
+ int i, err = 0;
+
+ for (i = 0; i < event_count; i++) {
+ type = &events[i];
+
+ err = event__synthesize_event_type(type->event_id, type->name,
+ process, session);
+ if (err) {
+ pr_debug("failed to create perf header event type\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+int event__process_event_type(event_t *self,
+ struct perf_session *session __unused)
+{
+ if (perf_header__push_event(self->event_type.event_type.event_id,
+ self->event_type.event_type.name) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
+ int nb_events,
+ event__handler_t process,
+ struct perf_session *session __unused)
+{
+ event_t ev;
+ ssize_t size = 0, aligned_size = 0, padding;
+ int err = 0;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
+ size = read_tracing_data_size(fd, pattrs, nb_events);
+ if (size <= 0)
+ return size;
+ aligned_size = ALIGN(size, sizeof(u64));
+ padding = aligned_size - size;
+ ev.tracing_data.header.size = sizeof(ev.tracing_data);
+ ev.tracing_data.size = aligned_size;
+
+ process(&ev, session);
+
+ err = read_tracing_data(fd, pattrs, nb_events);
+ write_padded(fd, NULL, 0, padding);
+
+ return aligned_size;
+}
+
+int event__process_tracing_data(event_t *self,
+ struct perf_session *session)
+{
+ ssize_t size_read, padding, size = self->tracing_data.size;
+ off_t offset = lseek(session->fd, 0, SEEK_CUR);
+ char buf[BUFSIZ];
+
+ /* setup for reading amidst mmap */
+ lseek(session->fd, offset + sizeof(struct tracing_data_event),
+ SEEK_SET);
+
+ size_read = trace_report(session->fd, session->repipe);
+
+ padding = ALIGN(size_read, sizeof(u64)) - size_read;
+
+ if (read(session->fd, buf, padding) < 0)
+ die("reading input file");
+ if (session->repipe) {
+ int retw = write(STDOUT_FILENO, buf, padding);
+ if (retw <= 0 || retw != padding)
+ die("repiping tracing data padding");
+ }
+
+ if (size_read + padding != size)
+ die("tracing data size mismatch");
+
+ return size_read + padding;
+}
+
+int event__synthesize_build_id(struct dso *pos, u16 misc,
+ event__handler_t process,
+ struct machine *machine,
+ struct perf_session *session)
+{
+ event_t ev;
+ size_t len;
+ int err = 0;
+
+ if (!pos->hit)
+ return err;
+
+ memset(&ev, 0, sizeof(ev));
+
+ len = pos->long_name_len + 1;
+ len = ALIGN(len, NAME_ALIGN);
+ memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
+ ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
+ ev.build_id.header.misc = misc;
+ ev.build_id.pid = machine->pid;
+ ev.build_id.header.size = sizeof(ev.build_id) + len;
+ memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
+
+ err = process(&ev, session);
+
+ return err;
+}
+
+int event__process_build_id(event_t *self,
+ struct perf_session *session)
+{
+ __event_process_build_id(&self->build_id,
+ self->build_id.filename,
+ session);
+ return 0;
+}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 82a6af72d4cc..402ac2454cf8 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -39,6 +39,11 @@ struct perf_file_header {
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
+struct perf_pipe_file_header {
+ u64 magic;
+ u64 size;
+};
+
struct perf_header;
int perf_file_header__read(struct perf_file_header *self,
@@ -47,21 +52,22 @@ int perf_file_header__read(struct perf_file_header *self,
struct perf_header {
int frozen;
int attrs, size;
+ bool needs_swap;
struct perf_header_attr **attr;
s64 attr_offset;
u64 data_offset;
u64 data_size;
u64 event_offset;
u64 event_size;
- bool needs_swap;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
int perf_header__init(struct perf_header *self);
void perf_header__exit(struct perf_header *self);
-int perf_header__read(struct perf_header *self, int fd);
+int perf_header__read(struct perf_session *session, int fd);
int perf_header__write(struct perf_header *self, int fd, bool at_exit);
+int perf_header__write_pipe(int fd);
int perf_header__add_attr(struct perf_header *self,
struct perf_header_attr *attr);
@@ -89,4 +95,33 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
+int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
+ event__handler_t process,
+ struct perf_session *session);
+int event__synthesize_attrs(struct perf_header *self,
+ event__handler_t process,
+ struct perf_session *session);
+int event__process_attr(event_t *self, struct perf_session *session);
+
+int event__synthesize_event_type(u64 event_id, char *name,
+ event__handler_t process,
+ struct perf_session *session);
+int event__synthesize_event_types(event__handler_t process,
+ struct perf_session *session);
+int event__process_event_type(event_t *self,
+ struct perf_session *session);
+
+int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
+ int nb_events,
+ event__handler_t process,
+ struct perf_session *session);
+int event__process_tracing_data(event_t *self,
+ struct perf_session *session);
+
+int event__synthesize_build_id(struct dso *pos, u16 misc,
+ event__handler_t process,
+ struct machine *machine,
+ struct perf_session *session);
+int event__process_build_id(event_t *self, struct perf_session *session);
+
#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 2be33c7dbf03..e34fd248067d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -8,22 +8,65 @@ struct callchain_param callchain_param = {
.min_percent = 0.5
};
+static void hist_entry__add_cpumode_count(struct hist_entry *self,
+ unsigned int cpumode, u64 count)
+{
+ switch (cpumode) {
+ case PERF_RECORD_MISC_KERNEL:
+ self->count_sys += count;
+ break;
+ case PERF_RECORD_MISC_USER:
+ self->count_us += count;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ self->count_guest_sys += count;
+ break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ self->count_guest_us += count;
+ break;
+ default:
+ break;
+ }
+}
+
/*
* histogram, sorted on item, collects counts
*/
-struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
- struct addr_location *al,
- struct symbol *sym_parent,
- u64 count, bool *hit)
+static struct hist_entry *hist_entry__new(struct hist_entry *template)
+{
+ size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
+ struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
+
+ if (self != NULL) {
+ *self = *template;
+ if (symbol_conf.use_callchain)
+ callchain_init(self->callchain);
+ }
+
+ return self;
+}
+
+static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
+{
+ if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
+ self->max_sym_namelen = entry->ms.sym->namelen;
+ ++self->nr_entries;
+}
+
+struct hist_entry *__hists__add_entry(struct hists *self,
+ struct addr_location *al,
+ struct symbol *sym_parent, u64 count)
{
- struct rb_node **p = &hists->rb_node;
+ struct rb_node **p = &self->entries.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct hist_entry entry = {
.thread = al->thread,
- .map = al->map,
- .sym = al->sym,
+ .ms = {
+ .map = al->map,
+ .sym = al->sym,
+ },
.ip = al->addr,
.level = al->level,
.count = count,
@@ -38,8 +81,8 @@ struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
cmp = hist_entry__cmp(&entry, he);
if (!cmp) {
- *hit = true;
- return he;
+ he->count += count;
+ goto out;
}
if (cmp < 0)
@@ -48,13 +91,14 @@ struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
p = &(*p)->rb_right;
}
- he = malloc(sizeof(*he));
+ he = hist_entry__new(&entry);
if (!he)
return NULL;
- *he = entry;
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, hists);
- *hit = false;
+ rb_insert_color(&he->rb_node, &self->entries);
+ hists__inc_nr_entries(self, he);
+out:
+ hist_entry__add_cpumode_count(he, al->cpumode, count);
return he;
}
@@ -65,7 +109,7 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
int64_t cmp = 0;
list_for_each_entry(se, &hist_entry__sort_list, list) {
- cmp = se->cmp(left, right);
+ cmp = se->se_cmp(left, right);
if (cmp)
break;
}
@@ -82,7 +126,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
list_for_each_entry(se, &hist_entry__sort_list, list) {
int64_t (*f)(struct hist_entry *, struct hist_entry *);
- f = se->collapse ?: se->cmp;
+ f = se->se_collapse ?: se->se_cmp;
cmp = f(left, right);
if (cmp)
@@ -101,7 +145,7 @@ void hist_entry__free(struct hist_entry *he)
* collapse the histogram
*/
-static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
+static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
@@ -117,7 +161,7 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
if (!cmp) {
iter->count += he->count;
hist_entry__free(he);
- return;
+ return false;
}
if (cmp < 0)
@@ -128,9 +172,10 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
+ return true;
}
-void perf_session__collapse_resort(struct rb_root *hists)
+void hists__collapse_resort(struct hists *self)
{
struct rb_root tmp;
struct rb_node *next;
@@ -140,33 +185,36 @@ void perf_session__collapse_resort(struct rb_root *hists)
return;
tmp = RB_ROOT;
- next = rb_first(hists);
+ next = rb_first(&self->entries);
+ self->nr_entries = 0;
+ self->max_sym_namelen = 0;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
- rb_erase(&n->rb_node, hists);
- collapse__insert_entry(&tmp, n);
+ rb_erase(&n->rb_node, &self->entries);
+ if (collapse__insert_entry(&tmp, n))
+ hists__inc_nr_entries(self, n);
}
- *hists = tmp;
+ self->entries = tmp;
}
/*
* reverse the map, sort on count.
*/
-static void perf_session__insert_output_hist_entry(struct rb_root *root,
- struct hist_entry *he,
- u64 min_callchain_hits)
+static void __hists__insert_output_entry(struct rb_root *entries,
+ struct hist_entry *he,
+ u64 min_callchain_hits)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &entries->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
if (symbol_conf.use_callchain)
- callchain_param.sort(&he->sorted_chain, &he->callchain,
+ callchain_param.sort(&he->sorted_chain, he->callchain,
min_callchain_hits, &callchain_param);
while (*p != NULL) {
@@ -180,32 +228,34 @@ static void perf_session__insert_output_hist_entry(struct rb_root *root,
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color(&he->rb_node, entries);
}
-void perf_session__output_resort(struct rb_root *hists, u64 total_samples)
+void hists__output_resort(struct hists *self)
{
struct rb_root tmp;
struct rb_node *next;
struct hist_entry *n;
u64 min_callchain_hits;
- min_callchain_hits =
- total_samples * (callchain_param.min_percent / 100);
+ min_callchain_hits = self->stats.total * (callchain_param.min_percent / 100);
tmp = RB_ROOT;
- next = rb_first(hists);
+ next = rb_first(&self->entries);
+
+ self->nr_entries = 0;
+ self->max_sym_namelen = 0;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
- rb_erase(&n->rb_node, hists);
- perf_session__insert_output_hist_entry(&tmp, n,
- min_callchain_hits);
+ rb_erase(&n->rb_node, &self->entries);
+ __hists__insert_output_entry(&tmp, n, min_callchain_hits);
+ hists__inc_nr_entries(self, n);
}
- *hists = tmp;
+ self->entries = tmp;
}
static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
@@ -258,8 +308,8 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
} else
ret += fprintf(fp, "%s", " ");
}
- if (chain->sym)
- ret += fprintf(fp, "%s\n", chain->sym->name);
+ if (chain->ms.sym)
+ ret += fprintf(fp, "%s\n", chain->ms.sym->name);
else
ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
@@ -278,7 +328,7 @@ static void init_rem_hits(void)
}
strcpy(rem_sq_bracket->name, "[...]");
- rem_hits.sym = rem_sq_bracket;
+ rem_hits.ms.sym = rem_sq_bracket;
}
static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
@@ -293,6 +343,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
u64 remaining;
size_t ret = 0;
int i;
+ uint entries_printed = 0;
if (callchain_param.mode == CHAIN_GRAPH_REL)
new_total = self->children_hit;
@@ -328,8 +379,6 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
left_margin);
i = 0;
list_for_each_entry(chain, &child->val, list) {
- if (chain->ip >= PERF_CONTEXT_MAX)
- continue;
ret += ipchain__fprintf_graph(fp, chain, depth,
new_depth_mask, i++,
new_total,
@@ -341,6 +390,8 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
new_depth_mask | (1 << depth),
left_margin);
node = next;
+ if (++entries_printed == callchain_param.print_limit)
+ break;
}
if (callchain_param.mode == CHAIN_GRAPH_REL &&
@@ -366,11 +417,9 @@ static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
bool printed = false;
int i = 0;
int ret = 0;
+ u32 entries_printed = 0;
list_for_each_entry(chain, &self->val, list) {
- if (chain->ip >= PERF_CONTEXT_MAX)
- continue;
-
if (!i++ && sort__first_dimension == SORT_SYM)
continue;
@@ -385,10 +434,13 @@ static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
} else
ret += callchain__fprintf_left_margin(fp, left_margin);
- if (chain->sym)
- ret += fprintf(fp, " %s\n", chain->sym->name);
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
else
ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+
+ if (++entries_printed == callchain_param.print_limit)
+ break;
}
ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
@@ -411,8 +463,8 @@ static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
list_for_each_entry(chain, &self->val, list) {
if (chain->ip >= PERF_CONTEXT_MAX)
continue;
- if (chain->sym)
- ret += fprintf(fp, " %s\n", chain->sym->name);
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
else
ret += fprintf(fp, " %p\n",
(void *)(long)chain->ip);
@@ -427,6 +479,7 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
struct rb_node *rb_node;
struct callchain_node *chain;
size_t ret = 0;
+ u32 entries_printed = 0;
rb_node = rb_first(&self->sorted_chain);
while (rb_node) {
@@ -449,48 +502,81 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
break;
}
ret += fprintf(fp, "\n");
+ if (++entries_printed == callchain_param.print_limit)
+ break;
rb_node = rb_next(rb_node);
}
return ret;
}
-static size_t hist_entry__fprintf(struct hist_entry *self,
- struct perf_session *pair_session,
- bool show_displacement,
- long displacement, FILE *fp,
- u64 session_total)
+int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
+ struct hists *pair_hists, bool show_displacement,
+ long displacement, bool color, u64 session_total)
{
struct sort_entry *se;
- u64 count, total;
+ u64 count, total, count_sys, count_us, count_guest_sys, count_guest_us;
const char *sep = symbol_conf.field_sep;
- size_t ret;
+ int ret;
if (symbol_conf.exclude_other && !self->parent)
return 0;
- if (pair_session) {
+ if (pair_hists) {
count = self->pair ? self->pair->count : 0;
- total = pair_session->events_stats.total;
+ total = pair_hists->stats.total;
+ count_sys = self->pair ? self->pair->count_sys : 0;
+ count_us = self->pair ? self->pair->count_us : 0;
+ count_guest_sys = self->pair ? self->pair->count_guest_sys : 0;
+ count_guest_us = self->pair ? self->pair->count_guest_us : 0;
} else {
count = self->count;
total = session_total;
+ count_sys = self->count_sys;
+ count_us = self->count_us;
+ count_guest_sys = self->count_guest_sys;
+ count_guest_us = self->count_guest_us;
}
- if (total)
- ret = percent_color_fprintf(fp, sep ? "%.2f" : " %6.2f%%",
- (count * 100.0) / total);
- else
- ret = fprintf(fp, sep ? "%lld" : "%12lld ", count);
+ if (total) {
+ if (color)
+ ret = percent_color_snprintf(s, size,
+ sep ? "%.2f" : " %6.2f%%",
+ (count * 100.0) / total);
+ else
+ ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
+ (count * 100.0) / total);
+ if (symbol_conf.show_cpu_utilization) {
+ ret += percent_color_snprintf(s + ret, size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (count_sys * 100.0) / total);
+ ret += percent_color_snprintf(s + ret, size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (count_us * 100.0) / total);
+ if (perf_guest) {
+ ret += percent_color_snprintf(s + ret,
+ size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (count_guest_sys * 100.0) /
+ total);
+ ret += percent_color_snprintf(s + ret,
+ size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (count_guest_us * 100.0) /
+ total);
+ }
+ }
+ } else
+ ret = snprintf(s, size, sep ? "%lld" : "%12lld ", count);
if (symbol_conf.show_nr_samples) {
if (sep)
- fprintf(fp, "%c%lld", *sep, count);
+ ret += snprintf(s + ret, size - ret, "%c%lld", *sep, count);
else
- fprintf(fp, "%11lld", count);
+ ret += snprintf(s + ret, size - ret, "%11lld", count);
}
- if (pair_session) {
+ if (pair_hists) {
char bf[32];
double old_percent = 0, new_percent = 0, diff;
@@ -507,9 +593,9 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
snprintf(bf, sizeof(bf), " ");
if (sep)
- ret += fprintf(fp, "%c%s", *sep, bf);
+ ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
else
- ret += fprintf(fp, "%11.11s", bf);
+ ret += snprintf(s + ret, size - ret, "%11.11s", bf);
if (show_displacement) {
if (displacement)
@@ -518,9 +604,9 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
snprintf(bf, sizeof(bf), " ");
if (sep)
- fprintf(fp, "%c%s", *sep, bf);
+ ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
else
- fprintf(fp, "%6.6s", bf);
+ ret += snprintf(s + ret, size - ret, "%6.6s", bf);
}
}
@@ -528,33 +614,43 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
if (se->elide)
continue;
- fprintf(fp, "%s", sep ?: " ");
- ret += se->print(fp, self, se->width ? *se->width : 0);
+ ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
+ ret += se->se_snprintf(self, s + ret, size - ret,
+ se->se_width ? *se->se_width : 0);
}
- ret += fprintf(fp, "\n");
+ return ret;
+}
- if (symbol_conf.use_callchain) {
- int left_margin = 0;
+int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
+ bool show_displacement, long displacement, FILE *fp,
+ u64 session_total)
+{
+ char bf[512];
+ hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
+ show_displacement, displacement,
+ true, session_total);
+ return fprintf(fp, "%s\n", bf);
+}
- if (sort__first_dimension == SORT_COMM) {
- se = list_first_entry(&hist_entry__sort_list, typeof(*se),
- list);
- left_margin = se->width ? *se->width : 0;
- left_margin -= thread__comm_len(self->thread);
- }
+static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
+ u64 session_total)
+{
+ int left_margin = 0;
- hist_entry_callchain__fprintf(fp, self, session_total,
- left_margin);
+ if (sort__first_dimension == SORT_COMM) {
+ struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
+ typeof(*se), list);
+ left_margin = se->se_width ? *se->se_width : 0;
+ left_margin -= thread__comm_len(self->thread);
}
- return ret;
+ return hist_entry_callchain__fprintf(fp, self, session_total,
+ left_margin);
}
-size_t perf_session__fprintf_hists(struct rb_root *hists,
- struct perf_session *pair,
- bool show_displacement, FILE *fp,
- u64 session_total)
+size_t hists__fprintf(struct hists *self, struct hists *pair,
+ bool show_displacement, FILE *fp)
{
struct sort_entry *se;
struct rb_node *nd;
@@ -576,6 +672,24 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
fputs(" Samples ", fp);
}
+ if (symbol_conf.show_cpu_utilization) {
+ if (sep) {
+ ret += fprintf(fp, "%csys", *sep);
+ ret += fprintf(fp, "%cus", *sep);
+ if (perf_guest) {
+ ret += fprintf(fp, "%cguest sys", *sep);
+ ret += fprintf(fp, "%cguest us", *sep);
+ }
+ } else {
+ ret += fprintf(fp, " sys ");
+ ret += fprintf(fp, " us ");
+ if (perf_guest) {
+ ret += fprintf(fp, " guest sys ");
+ ret += fprintf(fp, " guest us ");
+ }
+ }
+ }
+
if (pair) {
if (sep)
ret += fprintf(fp, "%cDelta", *sep);
@@ -594,22 +708,22 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
if (se->elide)
continue;
if (sep) {
- fprintf(fp, "%c%s", *sep, se->header);
+ fprintf(fp, "%c%s", *sep, se->se_header);
continue;
}
- width = strlen(se->header);
- if (se->width) {
+ width = strlen(se->se_header);
+ if (se->se_width) {
if (symbol_conf.col_width_list_str) {
if (col_width) {
- *se->width = atoi(col_width);
+ *se->se_width = atoi(col_width);
col_width = strchr(col_width, ',');
if (col_width)
++col_width;
}
}
- width = *se->width = max(*se->width, width);
+ width = *se->se_width = max(*se->se_width, width);
}
- fprintf(fp, " %*s", width, se->header);
+ fprintf(fp, " %*s", width, se->se_header);
}
fprintf(fp, "\n");
@@ -631,10 +745,10 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
continue;
fprintf(fp, " ");
- if (se->width)
- width = *se->width;
+ if (se->se_width)
+ width = *se->se_width;
else
- width = strlen(se->header);
+ width = strlen(se->se_header);
for (i = 0; i < width; i++)
fprintf(fp, ".");
}
@@ -642,7 +756,7 @@ size_t perf_session__fprintf_hists(struct rb_root *hists,
fprintf(fp, "\n#\n");
print_entries:
- for (nd = rb_first(hists); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (show_displacement) {
@@ -654,10 +768,14 @@ print_entries:
++position;
}
ret += hist_entry__fprintf(h, pair, show_displacement,
- displacement, fp, session_total);
- if (h->map == NULL && verbose > 1) {
+ displacement, fp, self->stats.total);
+
+ if (symbol_conf.use_callchain)
+ ret += hist_entry__fprintf_callchain(h, fp, self->stats.total);
+
+ if (h->ms.map == NULL && verbose > 1) {
__map_groups__fprintf_maps(&h->thread->mg,
- MAP__FUNCTION, fp);
+ MAP__FUNCTION, verbose, fp);
fprintf(fp, "%.10s end\n", graph_dotted_line);
}
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 16f360cce5bf..1b18d04195dc 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -6,24 +6,42 @@
extern struct callchain_param callchain_param;
-struct perf_session;
struct hist_entry;
struct addr_location;
struct symbol;
struct rb_root;
-struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
- struct addr_location *al,
- struct symbol *parent,
- u64 count, bool *hit);
+struct events_stats {
+ u64 total;
+ u64 lost;
+};
+
+struct hists {
+ struct rb_node rb_node;
+ struct rb_root entries;
+ u64 nr_entries;
+ struct events_stats stats;
+ u64 config;
+ u64 event_stream;
+ u32 type;
+ u32 max_sym_namelen;
+};
+
+struct hist_entry *__hists__add_entry(struct hists *self,
+ struct addr_location *al,
+ struct symbol *parent, u64 count);
extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
+int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
+ bool show_displacement, long displacement, FILE *fp,
+ u64 total);
+int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
+ struct hists *pair_hists, bool show_displacement,
+ long displacement, bool color, u64 total);
void hist_entry__free(struct hist_entry *);
-void perf_session__output_resort(struct rb_root *hists, u64 total_samples);
-void perf_session__collapse_resort(struct rb_root *hists);
-size_t perf_session__fprintf_hists(struct rb_root *hists,
- struct perf_session *pair,
- bool show_displacement, FILE *fp,
- u64 session_total);
+void hists__output_resort(struct hists *self);
+void hists__collapse_resort(struct hists *self);
+size_t hists__fprintf(struct hists *self, struct hists *pair,
+ bool show_displacement, FILE *fp);
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
new file mode 100644
index 000000000000..5c1d0d099f0d
--- /dev/null
+++ b/tools/perf/util/hweight.c
@@ -0,0 +1,31 @@
+#include <linux/bitops.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int hweight32(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55555555);
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+}
+
+unsigned long hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+ return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+ __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+ res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+#endif
+}
diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h
deleted file mode 100644
index 58e9817ffae0..000000000000
--- a/tools/perf/util/include/asm/bitops.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _PERF_ASM_BITOPS_H_
-#define _PERF_ASM_BITOPS_H_
-
-#include <sys/types.h>
-#include "../../types.h"
-#include <linux/compiler.h>
-
-/* CHECKME: Not sure both always match */
-#define BITS_PER_LONG __WORDSIZE
-
-#include "../../../../include/asm-generic/bitops/__fls.h"
-#include "../../../../include/asm-generic/bitops/fls.h"
-#include "../../../../include/asm-generic/bitops/fls64.h"
-#include "../../../../include/asm-generic/bitops/__ffs.h"
-#include "../../../../include/asm-generic/bitops/ffz.h"
-#include "../../../../include/asm-generic/bitops/hweight.h"
-
-#endif
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
new file mode 100644
index 000000000000..36cf26d434a5
--- /dev/null
+++ b/tools/perf/util/include/asm/hweight.h
@@ -0,0 +1,8 @@
+#ifndef PERF_HWEIGHT_H
+#define PERF_HWEIGHT_H
+
+#include <linux/types.h>
+unsigned int hweight32(unsigned int w);
+unsigned long hweight64(__u64 w);
+
+#endif /* PERF_HWEIGHT_H */
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
new file mode 100644
index 000000000000..cf6727e99c44
--- /dev/null
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_DWARF_REGS_H_
+#define _PERF_DWARF_REGS_H_
+
+#ifdef DWARF_SUPPORT
+const char *get_arch_regstr(unsigned int n);
+#endif
+
+#endif
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h
index 94507639a8c4..eda4416efa0a 100644
--- a/tools/perf/util/include/linux/bitmap.h
+++ b/tools/perf/util/include/linux/bitmap.h
@@ -1,3 +1,35 @@
-#include "../../../../include/linux/bitmap.h"
-#include "../../../../include/asm-generic/bitops/find.h"
-#include <linux/errno.h>
+#ifndef _PERF_BITOPS_H
+#define _PERF_BITOPS_H
+
+#include <string.h>
+#include <linux/bitops.h>
+
+int __bitmap_weight(const unsigned long *bitmap, int bits);
+
+#define BITMAP_LAST_WORD_MASK(nbits) \
+( \
+ ((nbits) % BITS_PER_LONG) ? \
+ (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
+)
+
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+
+static inline int bitmap_weight(const unsigned long *src, int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight(src, nbits);
+}
+
+#endif /* _PERF_BITOPS_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 8d63116e9435..bb4ac2e05385 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -1,13 +1,12 @@
#ifndef _PERF_LINUX_BITOPS_H_
#define _PERF_LINUX_BITOPS_H_
-#define __KERNEL__
+#include <linux/kernel.h>
+#include <asm/hweight.h>
-#define CONFIG_GENERIC_FIND_NEXT_BIT
-#define CONFIG_GENERIC_FIND_FIRST_BIT
-#include "../../../../include/linux/bitops.h"
-
-#undef __KERNEL__
+#define BITS_PER_LONG __WORDSIZE
+#define BITS_PER_BYTE 8
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
static inline void set_bit(int nr, unsigned long *addr)
{
@@ -20,10 +19,9 @@ static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
}
-unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned
- long size, unsigned long offset);
-
-unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned
- long size, unsigned long offset);
+static inline unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
#endif
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
index dfb0713ed47f..791f9dd27ebf 100644
--- a/tools/perf/util/include/linux/compiler.h
+++ b/tools/perf/util/include/linux/compiler.h
@@ -7,4 +7,6 @@
#define __user
#define __attribute_const__
+#define __used __attribute__((__unused__))
+
#endif
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index f2611655ab51..388ab1bfd114 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -85,16 +85,19 @@ simple_strtoul(const char *nptr, char **endptr, int base)
return strtoul(nptr, endptr, base);
}
+int eprintf(int level,
+ const char *fmt, ...) __attribute__((format(printf, 2, 3)));
+
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
#endif
#define pr_err(fmt, ...) \
- do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_warning(fmt, ...) \
- do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
- do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debug(fmt, ...) \
eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debugN(n, fmt, ...) \
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index e509cd59c67d..e672f2fef65b 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,9 +1,11 @@
-#include "event.h"
#include "symbol.h"
+#include <errno.h>
+#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
-#include "debug.h"
+#include <unistd.h>
+#include "map.h"
const char *map_type__name[MAP__NR_TYPES] = {
[MAP__FUNCTION] = "Functions",
@@ -36,15 +38,16 @@ void map__init(struct map *self, enum map_type type,
self->map_ip = map__map_ip;
self->unmap_ip = map__unmap_ip;
RB_CLEAR_NODE(&self->rb_node);
+ self->groups = NULL;
}
-struct map *map__new(struct mmap_event *event, enum map_type type,
- char *cwd, int cwdlen)
+struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+ u64 pgoff, u32 pid, char *filename,
+ enum map_type type, char *cwd, int cwdlen)
{
struct map *self = malloc(sizeof(*self));
if (self != NULL) {
- const char *filename = event->filename;
char newfilename[PATH_MAX];
struct dso *dso;
int anon;
@@ -62,16 +65,15 @@ struct map *map__new(struct mmap_event *event, enum map_type type,
anon = is_anon_memory(filename);
if (anon) {
- snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
+ snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
filename = newfilename;
}
- dso = dsos__findnew(filename);
+ dso = __dsos__findnew(dsos__list, filename);
if (dso == NULL)
goto out_delete;
- map__init(self, type, event->start, event->start + event->len,
- event->pgoff, dso);
+ map__init(self, type, start, start + len, pgoff, dso);
if (anon) {
set_identity:
@@ -235,3 +237,392 @@ u64 map__objdump_2ip(struct map *map, u64 addr)
map->unmap_ip(map, addr); /* RIP -> IP */
return ip;
}
+
+void map_groups__init(struct map_groups *self)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ self->maps[i] = RB_ROOT;
+ INIT_LIST_HEAD(&self->removed_maps[i]);
+ }
+ self->machine = NULL;
+}
+
+void map_groups__flush(struct map_groups *self)
+{
+ int type;
+
+ for (type = 0; type < MAP__NR_TYPES; type++) {
+ struct rb_root *root = &self->maps[type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, root);
+ /*
+ * We may have references to this map, for
+ * instance in some hist_entry instances, so
+ * just move them to a separate list.
+ */
+ list_add_tail(&pos->node, &self->removed_maps[pos->type]);
+ }
+ }
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *self,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ struct map *map = map_groups__find(self, type, addr);
+
+ if (map != NULL) {
+ if (mapp != NULL)
+ *mapp = map;
+ return map__find_symbol(map, map->map_ip(map, addr), filter);
+ }
+
+ return NULL;
+}
+
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
+ enum map_type type,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
+
+ if (sym == NULL)
+ continue;
+ if (mapp != NULL)
+ *mapp = pos;
+ return sym;
+ }
+
+ return NULL;
+}
+
+size_t __map_groups__fprintf_maps(struct map_groups *self,
+ enum map_type type, int verbose, FILE *fp)
+{
+ size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ printed += fprintf(fp, "Map:");
+ printed += map__fprintf(pos, fp);
+ if (verbose > 2) {
+ printed += dso__fprintf(pos->dso, type, fp);
+ printed += fprintf(fp, "--\n");
+ }
+ }
+
+ return printed;
+}
+
+size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp)
+{
+ size_t printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += __map_groups__fprintf_maps(self, i, verbose, fp);
+ return printed;
+}
+
+static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
+ enum map_type type,
+ int verbose, FILE *fp)
+{
+ struct map *pos;
+ size_t printed = 0;
+
+ list_for_each_entry(pos, &self->removed_maps[type], node) {
+ printed += fprintf(fp, "Map:");
+ printed += map__fprintf(pos, fp);
+ if (verbose > 1) {
+ printed += dso__fprintf(pos->dso, type, fp);
+ printed += fprintf(fp, "--\n");
+ }
+ }
+ return printed;
+}
+
+static size_t map_groups__fprintf_removed_maps(struct map_groups *self,
+ int verbose, FILE *fp)
+{
+ size_t printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp);
+ return printed;
+}
+
+size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp)
+{
+ size_t printed = map_groups__fprintf_maps(self, verbose, fp);
+ printed += fprintf(fp, "Removed maps:\n");
+ return printed + map_groups__fprintf_removed_maps(self, verbose, fp);
+}
+
+int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
+ int verbose, FILE *fp)
+{
+ struct rb_root *root = &self->maps[map->type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ if (!map__overlap(pos, map))
+ continue;
+
+ if (verbose >= 2) {
+ fputs("overlapping maps:\n", fp);
+ map__fprintf(map, fp);
+ map__fprintf(pos, fp);
+ }
+
+ rb_erase(&pos->rb_node, root);
+ /*
+ * We may have references to this map, for instance in some
+ * hist_entry instances, so just move them to a separate
+ * list.
+ */
+ list_add_tail(&pos->node, &self->removed_maps[map->type]);
+ /*
+ * Now check if we need to create new maps for areas not
+ * overlapped by the new map:
+ */
+ if (map->start > pos->start) {
+ struct map *before = map__clone(pos);
+
+ if (before == NULL)
+ return -ENOMEM;
+
+ before->end = map->start - 1;
+ map_groups__insert(self, before);
+ if (verbose >= 2)
+ map__fprintf(before, fp);
+ }
+
+ if (map->end < pos->end) {
+ struct map *after = map__clone(pos);
+
+ if (after == NULL)
+ return -ENOMEM;
+
+ after->start = map->end + 1;
+ map_groups__insert(self, after);
+ if (verbose >= 2)
+ map__fprintf(after, fp);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * XXX This should not really _copy_ te maps, but refcount them.
+ */
+int map_groups__clone(struct map_groups *self,
+ struct map_groups *parent, enum map_type type)
+{
+ struct rb_node *nd;
+ for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+ struct map *new = map__clone(map);
+ if (new == NULL)
+ return -ENOMEM;
+ map_groups__insert(self, new);
+ }
+ return 0;
+}
+
+static u64 map__reloc_map_ip(struct map *map, u64 ip)
+{
+ return ip + (s64)map->pgoff;
+}
+
+static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
+{
+ return ip - (s64)map->pgoff;
+}
+
+void map__reloc_vmlinux(struct map *self)
+{
+ struct kmap *kmap = map__kmap(self);
+ s64 reloc;
+
+ if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
+ return;
+
+ reloc = (kmap->ref_reloc_sym->unrelocated_addr -
+ kmap->ref_reloc_sym->addr);
+
+ if (!reloc)
+ return;
+
+ self->map_ip = map__reloc_map_ip;
+ self->unmap_ip = map__reloc_unmap_ip;
+ self->pgoff = reloc;
+}
+
+void maps__insert(struct rb_root *maps, struct map *map)
+{
+ struct rb_node **p = &maps->rb_node;
+ struct rb_node *parent = NULL;
+ const u64 ip = map->start;
+ struct map *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node);
+ if (ip < m->start)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&map->rb_node, parent, p);
+ rb_insert_color(&map->rb_node, maps);
+}
+
+struct map *maps__find(struct rb_root *maps, u64 ip)
+{
+ struct rb_node **p = &maps->rb_node;
+ struct rb_node *parent = NULL;
+ struct map *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node);
+ if (ip < m->start)
+ p = &(*p)->rb_left;
+ else if (ip > m->end)
+ p = &(*p)->rb_right;
+ else
+ return m;
+ }
+
+ return NULL;
+}
+
+int machine__init(struct machine *self, const char *root_dir, pid_t pid)
+{
+ map_groups__init(&self->kmaps);
+ RB_CLEAR_NODE(&self->rb_node);
+ INIT_LIST_HEAD(&self->user_dsos);
+ INIT_LIST_HEAD(&self->kernel_dsos);
+
+ self->kmaps.machine = self;
+ self->pid = pid;
+ self->root_dir = strdup(root_dir);
+ return self->root_dir == NULL ? -ENOMEM : 0;
+}
+
+struct machine *machines__add(struct rb_root *self, pid_t pid,
+ const char *root_dir)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct machine *pos, *machine = malloc(sizeof(*machine));
+
+ if (!machine)
+ return NULL;
+
+ if (machine__init(machine, root_dir, pid) != 0) {
+ free(machine);
+ return NULL;
+ }
+
+ while (*p != NULL) {
+ parent = *p;
+ pos = rb_entry(parent, struct machine, rb_node);
+ if (pid < pos->pid)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&machine->rb_node, parent, p);
+ rb_insert_color(&machine->rb_node, self);
+
+ return machine;
+}
+
+struct machine *machines__find(struct rb_root *self, pid_t pid)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct machine *machine;
+ struct machine *default_machine = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ machine = rb_entry(parent, struct machine, rb_node);
+ if (pid < machine->pid)
+ p = &(*p)->rb_left;
+ else if (pid > machine->pid)
+ p = &(*p)->rb_right;
+ else
+ return machine;
+ if (!machine->pid)
+ default_machine = machine;
+ }
+
+ return default_machine;
+}
+
+struct machine *machines__findnew(struct rb_root *self, pid_t pid)
+{
+ char path[PATH_MAX];
+ const char *root_dir;
+ struct machine *machine = machines__find(self, pid);
+
+ if (!machine || machine->pid != pid) {
+ if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
+ root_dir = "";
+ else {
+ if (!symbol_conf.guestmount)
+ goto out;
+ sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+ if (access(path, R_OK)) {
+ pr_err("Can't access file %s\n", path);
+ goto out;
+ }
+ root_dir = path;
+ }
+ machine = machines__add(self, pid, root_dir);
+ }
+
+out:
+ return machine;
+}
+
+void machines__process(struct rb_root *self, machine__process_t process, void *data)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ process(pos, data);
+ }
+}
+
+char *machine__mmap_name(struct machine *self, char *bf, size_t size)
+{
+ if (machine__is_host(self))
+ snprintf(bf, size, "[%s]", "kernel.kallsyms");
+ else if (machine__is_default_guest(self))
+ snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
+ else
+ snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
+
+ return bf;
+}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index b756368076c6..f39134512829 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -4,7 +4,9 @@
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
-#include <linux/types.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include "types.h"
enum map_type {
MAP__FUNCTION = 0,
@@ -18,6 +20,7 @@ extern const char *map_type__name[MAP__NR_TYPES];
struct dso;
struct ref_reloc_sym;
struct map_groups;
+struct machine;
struct map {
union {
@@ -27,6 +30,7 @@ struct map {
u64 start;
u64 end;
enum map_type type;
+ u32 priv;
u64 pgoff;
/* ip -> dso rip */
@@ -35,6 +39,7 @@ struct map {
u64 (*unmap_ip)(struct map *, u64);
struct dso *dso;
+ struct map_groups *groups;
};
struct kmap {
@@ -42,6 +47,32 @@ struct kmap {
struct map_groups *kmaps;
};
+struct map_groups {
+ struct rb_root maps[MAP__NR_TYPES];
+ struct list_head removed_maps[MAP__NR_TYPES];
+ struct machine *machine;
+};
+
+/* Native host kernel uses -1 as pid index in machine */
+#define HOST_KERNEL_ID (-1)
+#define DEFAULT_GUEST_KERNEL_ID (0)
+
+struct machine {
+ struct rb_node rb_node;
+ pid_t pid;
+ char *root_dir;
+ struct list_head user_dsos;
+ struct list_head kernel_dsos;
+ struct map_groups kmaps;
+ struct map *vmlinux_maps[MAP__NR_TYPES];
+};
+
+static inline
+struct map *machine__kernel_map(struct machine *self, enum map_type type)
+{
+ return self->vmlinux_maps[type];
+}
+
static inline struct kmap *map__kmap(struct map *self)
{
return (struct kmap *)(self + 1);
@@ -68,14 +99,14 @@ u64 map__rip_2objdump(struct map *map, u64 rip);
u64 map__objdump_2ip(struct map *map, u64 addr);
struct symbol;
-struct mmap_event;
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
void map__init(struct map *self, enum map_type type,
u64 start, u64 end, u64 pgoff, struct dso *dso);
-struct map *map__new(struct mmap_event *event, enum map_type,
- char *cwd, int cwdlen);
+struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+ u64 pgoff, u32 pid, char *filename,
+ enum map_type type, char *cwd, int cwdlen);
void map__delete(struct map *self);
struct map *map__clone(struct map *self);
int map__overlap(struct map *l, struct map *r);
@@ -91,4 +122,96 @@ void map__fixup_end(struct map *self);
void map__reloc_vmlinux(struct map *self);
+size_t __map_groups__fprintf_maps(struct map_groups *self,
+ enum map_type type, int verbose, FILE *fp);
+void maps__insert(struct rb_root *maps, struct map *map);
+struct map *maps__find(struct rb_root *maps, u64 addr);
+void map_groups__init(struct map_groups *self);
+int map_groups__clone(struct map_groups *self,
+ struct map_groups *parent, enum map_type type);
+size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp);
+size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp);
+
+typedef void (*machine__process_t)(struct machine *self, void *data);
+
+void machines__process(struct rb_root *self, machine__process_t process, void *data);
+struct machine *machines__add(struct rb_root *self, pid_t pid,
+ const char *root_dir);
+struct machine *machines__find_host(struct rb_root *self);
+struct machine *machines__find(struct rb_root *self, pid_t pid);
+struct machine *machines__findnew(struct rb_root *self, pid_t pid);
+char *machine__mmap_name(struct machine *self, char *bf, size_t size);
+int machine__init(struct machine *self, const char *root_dir, pid_t pid);
+
+/*
+ * Default guest kernel is defined by parameter --guestkallsyms
+ * and --guestmodules
+ */
+static inline bool machine__is_default_guest(struct machine *self)
+{
+ return self ? self->pid == DEFAULT_GUEST_KERNEL_ID : false;
+}
+
+static inline bool machine__is_host(struct machine *self)
+{
+ return self ? self->pid == HOST_KERNEL_ID : false;
+}
+
+static inline void map_groups__insert(struct map_groups *self, struct map *map)
+{
+ maps__insert(&self->maps[map->type], map);
+ map->groups = self;
+}
+
+static inline struct map *map_groups__find(struct map_groups *self,
+ enum map_type type, u64 addr)
+{
+ return maps__find(&self->maps[type], addr);
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *self,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter);
+
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
+ enum map_type type,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter);
+
+static inline
+struct symbol *machine__find_kernel_symbol(struct machine *self,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_symbol(&self->kmaps, type, addr, mapp, filter);
+}
+
+static inline
+struct symbol *machine__find_kernel_function(struct machine *self, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return machine__find_kernel_symbol(self, MAP__FUNCTION, addr, mapp, filter);
+}
+
+static inline
+struct symbol *map_groups__find_function_by_name(struct map_groups *self,
+ const char *name, struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
+}
+
+int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
+ int verbose, FILE *fp);
+
+struct map *map_groups__find_by_name(struct map_groups *self,
+ enum map_type type, const char *name);
+struct map *machine__new_module(struct machine *self, u64 start, const char *filename);
+
+void map_groups__flush(struct map_groups *self);
+
#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/newt.c b/tools/perf/util/newt.c
new file mode 100644
index 000000000000..e283a6e6b6e0
--- /dev/null
+++ b/tools/perf/util/newt.c
@@ -0,0 +1,725 @@
+#define _GNU_SOURCE
+#include <stdio.h>
+#undef _GNU_SOURCE
+
+#include <stdlib.h>
+#include <newt.h>
+#include <sys/ttydefaults.h>
+
+#include "cache.h"
+#include "hist.h"
+#include "session.h"
+#include "sort.h"
+#include "symbol.h"
+
+struct ui_progress {
+ newtComponent form, scale;
+};
+
+struct ui_progress *ui_progress__new(const char *title, u64 total)
+{
+ struct ui_progress *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ int cols;
+ newtGetScreenSize(&cols, NULL);
+ cols -= 4;
+ newtCenteredWindow(cols, 1, title);
+ self->form = newtForm(NULL, NULL, 0);
+ if (self->form == NULL)
+ goto out_free_self;
+ self->scale = newtScale(0, 0, cols, total);
+ if (self->scale == NULL)
+ goto out_free_form;
+ newtFormAddComponent(self->form, self->scale);
+ newtRefresh();
+ }
+
+ return self;
+
+out_free_form:
+ newtFormDestroy(self->form);
+out_free_self:
+ free(self);
+ return NULL;
+}
+
+void ui_progress__update(struct ui_progress *self, u64 curr)
+{
+ newtScaleSet(self->scale, curr);
+ newtRefresh();
+}
+
+void ui_progress__delete(struct ui_progress *self)
+{
+ newtFormDestroy(self->form);
+ newtPopWindow();
+ free(self);
+}
+
+static char browser__last_msg[1024];
+
+int browser__show_help(const char *format, va_list ap)
+{
+ int ret;
+ static int backlog;
+
+ ret = vsnprintf(browser__last_msg + backlog,
+ sizeof(browser__last_msg) - backlog, format, ap);
+ backlog += ret;
+
+ if (browser__last_msg[backlog - 1] == '\n') {
+ newtPopHelpLine();
+ newtPushHelpLine(browser__last_msg);
+ newtRefresh();
+ backlog = 0;
+ }
+
+ return ret;
+}
+
+static void newt_form__set_exit_keys(newtComponent self)
+{
+ newtFormAddHotKey(self, NEWT_KEY_ESCAPE);
+ newtFormAddHotKey(self, 'Q');
+ newtFormAddHotKey(self, 'q');
+ newtFormAddHotKey(self, CTRL('c'));
+}
+
+static newtComponent newt_form__new(void)
+{
+ newtComponent self = newtForm(NULL, NULL, 0);
+ if (self)
+ newt_form__set_exit_keys(self);
+ return self;
+}
+
+static int popup_menu(int argc, char * const argv[])
+{
+ struct newtExitStruct es;
+ int i, rc = -1, max_len = 5;
+ newtComponent listbox, form = newt_form__new();
+
+ if (form == NULL)
+ return -1;
+
+ listbox = newtListbox(0, 0, argc, NEWT_FLAG_RETURNEXIT);
+ if (listbox == NULL)
+ goto out_destroy_form;
+
+ newtFormAddComponent(form, listbox);
+
+ for (i = 0; i < argc; ++i) {
+ int len = strlen(argv[i]);
+ if (len > max_len)
+ max_len = len;
+ if (newtListboxAddEntry(listbox, argv[i], (void *)(long)i))
+ goto out_destroy_form;
+ }
+
+ newtCenteredWindow(max_len, argc, NULL);
+ newtFormRun(form, &es);
+ rc = newtListboxGetCurrent(listbox) - NULL;
+ if (es.reason == NEWT_EXIT_HOTKEY)
+ rc = -1;
+ newtPopWindow();
+out_destroy_form:
+ newtFormDestroy(form);
+ return rc;
+}
+
+static bool dialog_yesno(const char *msg)
+{
+ /* newtWinChoice should really be accepting const char pointers... */
+ char yes[] = "Yes", no[] = "No";
+ return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
+}
+
+/*
+ * When debugging newt problems it was useful to be able to "unroll"
+ * the calls to newtCheckBoxTreeAdd{Array,Item}, so that we can generate
+ * a source file with the sequence of calls to these methods, to then
+ * tweak the arrays to get the intended results, so I'm keeping this code
+ * here, may be useful again in the future.
+ */
+#undef NEWT_DEBUG
+
+static void newt_checkbox_tree__add(newtComponent tree, const char *str,
+ void *priv, int *indexes)
+{
+#ifdef NEWT_DEBUG
+ /* Print the newtCheckboxTreeAddArray to tinker with its index arrays */
+ int i = 0, len = 40 - strlen(str);
+
+ fprintf(stderr,
+ "\tnewtCheckboxTreeAddItem(tree, %*.*s\"%s\", (void *)%p, 0, ",
+ len, len, " ", str, priv);
+ while (indexes[i] != NEWT_ARG_LAST) {
+ if (indexes[i] != NEWT_ARG_APPEND)
+ fprintf(stderr, " %d,", indexes[i]);
+ else
+ fprintf(stderr, " %s,", "NEWT_ARG_APPEND");
+ ++i;
+ }
+ fprintf(stderr, " %s", " NEWT_ARG_LAST);\n");
+ fflush(stderr);
+#endif
+ newtCheckboxTreeAddArray(tree, str, priv, 0, indexes);
+}
+
+static char *callchain_list__sym_name(struct callchain_list *self,
+ char *bf, size_t bfsize)
+{
+ if (self->ms.sym)
+ return self->ms.sym->name;
+
+ snprintf(bf, bfsize, "%#Lx", self->ip);
+ return bf;
+}
+
+static void __callchain__append_graph_browser(struct callchain_node *self,
+ newtComponent tree, u64 total,
+ int *indexes, int depth)
+{
+ struct rb_node *node;
+ u64 new_total, remaining;
+ int idx = 0;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = self->children_hit;
+ else
+ new_total = total;
+
+ remaining = new_total;
+ node = rb_first(&self->rb_root);
+ while (node) {
+ struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+ struct rb_node *next = rb_next(node);
+ u64 cumul = cumul_hits(child);
+ struct callchain_list *chain;
+ int first = true, printed = 0;
+ int chain_idx = -1;
+ remaining -= cumul;
+
+ indexes[depth] = NEWT_ARG_APPEND;
+ indexes[depth + 1] = NEWT_ARG_LAST;
+
+ list_for_each_entry(chain, &child->val, list) {
+ char ipstr[BITS_PER_LONG / 4 + 1],
+ *alloc_str = NULL;
+ const char *str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+
+ if (first) {
+ double percent = cumul * 100.0 / new_total;
+
+ first = false;
+ if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
+ str = "Not enough memory!";
+ else
+ str = alloc_str;
+ } else {
+ indexes[depth] = idx;
+ indexes[depth + 1] = NEWT_ARG_APPEND;
+ indexes[depth + 2] = NEWT_ARG_LAST;
+ ++chain_idx;
+ }
+ newt_checkbox_tree__add(tree, str, &chain->ms, indexes);
+ free(alloc_str);
+ ++printed;
+ }
+
+ indexes[depth] = idx;
+ if (chain_idx != -1)
+ indexes[depth + 1] = chain_idx;
+ if (printed != 0)
+ ++idx;
+ __callchain__append_graph_browser(child, tree, new_total, indexes,
+ depth + (chain_idx != -1 ? 2 : 1));
+ node = next;
+ }
+}
+
+static void callchain__append_graph_browser(struct callchain_node *self,
+ newtComponent tree, u64 total,
+ int *indexes, int parent_idx)
+{
+ struct callchain_list *chain;
+ int i = 0;
+
+ indexes[1] = NEWT_ARG_APPEND;
+ indexes[2] = NEWT_ARG_LAST;
+
+ list_for_each_entry(chain, &self->val, list) {
+ char ipstr[BITS_PER_LONG / 4 + 1], *str;
+
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+
+ if (!i++ && sort__first_dimension == SORT_SYM)
+ continue;
+
+ str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ newt_checkbox_tree__add(tree, str, &chain->ms, indexes);
+ }
+
+ indexes[1] = parent_idx;
+ indexes[2] = NEWT_ARG_APPEND;
+ indexes[3] = NEWT_ARG_LAST;
+ __callchain__append_graph_browser(self, tree, total, indexes, 2);
+}
+
+static void hist_entry__append_callchain_browser(struct hist_entry *self,
+ newtComponent tree, u64 total, int parent_idx)
+{
+ struct rb_node *rb_node;
+ int indexes[1024] = { [0] = parent_idx, };
+ int idx = 0;
+ struct callchain_node *chain;
+
+ rb_node = rb_first(&self->sorted_chain);
+ while (rb_node) {
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ switch (callchain_param.mode) {
+ case CHAIN_FLAT:
+ break;
+ case CHAIN_GRAPH_ABS: /* falldown */
+ case CHAIN_GRAPH_REL:
+ callchain__append_graph_browser(chain, tree, total, indexes, idx++);
+ break;
+ case CHAIN_NONE:
+ default:
+ break;
+ }
+ rb_node = rb_next(rb_node);
+ }
+}
+
+static size_t hist_entry__append_browser(struct hist_entry *self,
+ newtComponent tree, u64 total)
+{
+ char s[256];
+ size_t ret;
+
+ if (symbol_conf.exclude_other && !self->parent)
+ return 0;
+
+ ret = hist_entry__snprintf(self, s, sizeof(s), NULL,
+ false, 0, false, total);
+ if (symbol_conf.use_callchain) {
+ int indexes[2];
+
+ indexes[0] = NEWT_ARG_APPEND;
+ indexes[1] = NEWT_ARG_LAST;
+ newt_checkbox_tree__add(tree, s, &self->ms, indexes);
+ } else
+ newtListboxAppendEntry(tree, s, &self->ms);
+
+ return ret;
+}
+
+static void map_symbol__annotate_browser(const struct map_symbol *self,
+ const char *input_name)
+{
+ FILE *fp;
+ int cols, rows;
+ newtComponent form, tree;
+ struct newtExitStruct es;
+ char *str;
+ size_t line_len, max_line_len = 0;
+ size_t max_usable_width;
+ char *line = NULL;
+
+ if (self->sym == NULL)
+ return;
+
+ if (asprintf(&str, "perf annotate -i \"%s\" -d \"%s\" %s 2>&1 | expand",
+ input_name, self->map->dso->name, self->sym->name) < 0)
+ return;
+
+ fp = popen(str, "r");
+ if (fp == NULL)
+ goto out_free_str;
+
+ newtPushHelpLine("Press ESC to exit");
+ newtGetScreenSize(&cols, &rows);
+ tree = newtListbox(0, 0, rows - 5, NEWT_FLAG_SCROLL);
+
+ while (!feof(fp)) {
+ if (getline(&line, &line_len, fp) < 0 || !line_len)
+ break;
+ while (line_len != 0 && isspace(line[line_len - 1]))
+ line[--line_len] = '\0';
+
+ if (line_len > max_line_len)
+ max_line_len = line_len;
+ newtListboxAppendEntry(tree, line, NULL);
+ }
+ fclose(fp);
+ free(line);
+
+ max_usable_width = cols - 22;
+ if (max_line_len > max_usable_width)
+ max_line_len = max_usable_width;
+
+ newtListboxSetWidth(tree, max_line_len);
+
+ newtCenteredWindow(max_line_len + 2, rows - 5, self->sym->name);
+ form = newt_form__new();
+ newtFormAddComponent(form, tree);
+
+ newtFormRun(form, &es);
+ newtFormDestroy(form);
+ newtPopWindow();
+ newtPopHelpLine();
+out_free_str:
+ free(str);
+}
+
+static const void *newt__symbol_tree_get_current(newtComponent self)
+{
+ if (symbol_conf.use_callchain)
+ return newtCheckboxTreeGetCurrent(self);
+ return newtListboxGetCurrent(self);
+}
+
+static void hist_browser__selection(newtComponent self, void *data)
+{
+ const struct map_symbol **symbol_ptr = data;
+ *symbol_ptr = newt__symbol_tree_get_current(self);
+}
+
+struct hist_browser {
+ newtComponent form, tree;
+ const struct map_symbol *selection;
+};
+
+static struct hist_browser *hist_browser__new(void)
+{
+ struct hist_browser *self = malloc(sizeof(*self));
+
+ if (self != NULL)
+ self->form = NULL;
+
+ return self;
+}
+
+static void hist_browser__delete(struct hist_browser *self)
+{
+ newtFormDestroy(self->form);
+ newtPopWindow();
+ free(self);
+}
+
+static int hist_browser__populate(struct hist_browser *self, struct rb_root *hists,
+ u64 nr_hists, u64 session_total, const char *title)
+{
+ int max_len = 0, idx, cols, rows;
+ struct ui_progress *progress;
+ struct rb_node *nd;
+ u64 curr_hist = 0;
+ char seq[] = ".";
+ char str[256];
+
+ if (self->form) {
+ newtFormDestroy(self->form);
+ newtPopWindow();
+ }
+
+ snprintf(str, sizeof(str), "Samples: %Ld ",
+ session_total);
+ newtDrawRootText(0, 0, str);
+
+ newtGetScreenSize(NULL, &rows);
+
+ if (symbol_conf.use_callchain)
+ self->tree = newtCheckboxTreeMulti(0, 0, rows - 5, seq,
+ NEWT_FLAG_SCROLL);
+ else
+ self->tree = newtListbox(0, 0, rows - 5,
+ (NEWT_FLAG_SCROLL |
+ NEWT_FLAG_RETURNEXIT));
+
+ newtComponentAddCallback(self->tree, hist_browser__selection,
+ &self->selection);
+
+ progress = ui_progress__new("Adding entries to the browser...", nr_hists);
+ if (progress == NULL)
+ return -1;
+
+ idx = 0;
+ for (nd = rb_first(hists); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ int len;
+
+ if (h->filtered)
+ continue;
+
+ len = hist_entry__append_browser(h, self->tree, session_total);
+ if (len > max_len)
+ max_len = len;
+ if (symbol_conf.use_callchain)
+ hist_entry__append_callchain_browser(h, self->tree,
+ session_total, idx++);
+ ++curr_hist;
+ if (curr_hist % 5)
+ ui_progress__update(progress, curr_hist);
+ }
+
+ ui_progress__delete(progress);
+
+ newtGetScreenSize(&cols, &rows);
+
+ if (max_len > cols)
+ max_len = cols - 3;
+
+ if (!symbol_conf.use_callchain)
+ newtListboxSetWidth(self->tree, max_len);
+
+ newtCenteredWindow(max_len + (symbol_conf.use_callchain ? 5 : 0),
+ rows - 5, title);
+ self->form = newt_form__new();
+ if (self->form == NULL)
+ return -1;
+
+ newtFormAddHotKey(self->form, 'A');
+ newtFormAddHotKey(self->form, 'a');
+ newtFormAddHotKey(self->form, NEWT_KEY_RIGHT);
+ newtFormAddComponents(self->form, self->tree, NULL);
+ self->selection = newt__symbol_tree_get_current(self->tree);
+
+ return 0;
+}
+
+enum hist_filter {
+ HIST_FILTER__DSO,
+ HIST_FILTER__THREAD,
+};
+
+static u64 hists__filter_by_dso(struct rb_root *hists, const struct dso *dso,
+ u64 *session_total)
+{
+ struct rb_node *nd;
+ u64 nr_hists = 0;
+
+ *session_total = 0;
+
+ for (nd = rb_first(hists); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
+ h->filtered |= (1 << HIST_FILTER__DSO);
+ continue;
+ }
+ h->filtered &= ~(1 << HIST_FILTER__DSO);
+ ++nr_hists;
+ *session_total += h->count;
+ }
+
+ return nr_hists;
+}
+
+static u64 hists__filter_by_thread(struct rb_root *hists, const struct thread *thread,
+ u64 *session_total)
+{
+ struct rb_node *nd;
+ u64 nr_hists = 0;
+
+ *session_total = 0;
+
+ for (nd = rb_first(hists); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (thread != NULL && h->thread != thread) {
+ h->filtered |= (1 << HIST_FILTER__THREAD);
+ continue;
+ }
+ h->filtered &= ~(1 << HIST_FILTER__THREAD);
+ ++nr_hists;
+ *session_total += h->count;
+ }
+
+ return nr_hists;
+}
+
+static struct thread *hist_browser__selected_thread(struct hist_browser *self)
+{
+ int *indexes;
+
+ if (!symbol_conf.use_callchain)
+ goto out;
+
+ indexes = newtCheckboxTreeFindItem(self->tree, (void *)self->selection);
+ if (indexes) {
+ bool is_hist_entry = indexes[1] == NEWT_ARG_LAST;
+ free(indexes);
+ if (is_hist_entry)
+ goto out;
+ }
+ return NULL;
+out:
+ return *(struct thread **)(self->selection + 1);
+}
+
+static int hist_browser__title(char *bf, size_t size, const char *input_name,
+ const struct dso *dso, const struct thread *thread)
+{
+ int printed = 0;
+
+ if (thread)
+ printed += snprintf(bf + printed, size - printed,
+ "Thread: %s(%d)",
+ (thread->comm_set ? thread->comm : ""),
+ thread->pid);
+ if (dso)
+ printed += snprintf(bf + printed, size - printed,
+ "%sDSO: %s", thread ? " " : "",
+ dso->short_name);
+ return printed ?: snprintf(bf, size, "Report: %s", input_name);
+}
+
+int perf_session__browse_hists(struct rb_root *hists, u64 nr_hists,
+ u64 session_total, const char *helpline,
+ const char *input_name)
+{
+ struct hist_browser *browser = hist_browser__new();
+ const struct thread *thread_filter = NULL;
+ const struct dso *dso_filter = NULL;
+ struct newtExitStruct es;
+ char msg[160];
+ int err = -1;
+
+ if (browser == NULL)
+ return -1;
+
+ newtPushHelpLine(helpline);
+
+ hist_browser__title(msg, sizeof(msg), input_name,
+ dso_filter, thread_filter);
+ if (hist_browser__populate(browser, hists, nr_hists, session_total, msg) < 0)
+ goto out;
+
+ while (1) {
+ const struct thread *thread;
+ const struct dso *dso;
+ char *options[16];
+ int nr_options = 0, choice = 0, i,
+ annotate = -2, zoom_dso = -2, zoom_thread = -2;
+
+ newtFormRun(browser->form, &es);
+ if (es.reason == NEWT_EXIT_HOTKEY) {
+ if (toupper(es.u.key) == 'A')
+ goto do_annotate;
+ if (es.u.key == NEWT_KEY_ESCAPE ||
+ toupper(es.u.key) == 'Q' ||
+ es.u.key == CTRL('c')) {
+ if (dialog_yesno("Do you really want to exit?"))
+ break;
+ else
+ continue;
+ }
+ }
+
+ if (browser->selection->sym != NULL &&
+ asprintf(&options[nr_options], "Annotate %s",
+ browser->selection->sym->name) > 0)
+ annotate = nr_options++;
+
+ thread = hist_browser__selected_thread(browser);
+ if (thread != NULL &&
+ asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
+ (thread_filter ? "out of" : "into"),
+ (thread->comm_set ? thread->comm : ""),
+ thread->pid) > 0)
+ zoom_thread = nr_options++;
+
+ dso = browser->selection->map ? browser->selection->map->dso : NULL;
+ if (dso != NULL &&
+ asprintf(&options[nr_options], "Zoom %s %s DSO",
+ (dso_filter ? "out of" : "into"),
+ (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
+ zoom_dso = nr_options++;
+
+ options[nr_options++] = (char *)"Exit";
+
+ choice = popup_menu(nr_options, options);
+
+ for (i = 0; i < nr_options - 1; ++i)
+ free(options[i]);
+
+ if (choice == nr_options - 1)
+ break;
+
+ if (choice == -1)
+ continue;
+do_annotate:
+ if (choice == annotate) {
+ if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
+ newtPopHelpLine();
+ newtPushHelpLine("No vmlinux file found, can't "
+ "annotate with just a "
+ "kallsyms file");
+ continue;
+ }
+ map_symbol__annotate_browser(browser->selection, input_name);
+ } else if (choice == zoom_dso) {
+ if (dso_filter) {
+ newtPopHelpLine();
+ dso_filter = NULL;
+ } else {
+ snprintf(msg, sizeof(msg),
+ "To zoom out press -> + \"Zoom out of %s DSO\"",
+ dso->kernel ? "the Kernel" : dso->short_name);
+ newtPushHelpLine(msg);
+ dso_filter = dso;
+ }
+ nr_hists = hists__filter_by_dso(hists, dso_filter, &session_total);
+ hist_browser__title(msg, sizeof(msg), input_name,
+ dso_filter, thread_filter);
+ if (hist_browser__populate(browser, hists, nr_hists, session_total, msg) < 0)
+ goto out;
+ } else if (choice == zoom_thread) {
+ if (thread_filter) {
+ newtPopHelpLine();
+ thread_filter = NULL;
+ } else {
+ snprintf(msg, sizeof(msg),
+ "To zoom out press -> + \"Zoom out of %s(%d) thread\"",
+ (thread->comm_set ? thread->comm : ""),
+ thread->pid);
+ newtPushHelpLine(msg);
+ thread_filter = thread;
+ }
+ nr_hists = hists__filter_by_thread(hists, thread_filter, &session_total);
+ hist_browser__title(msg, sizeof(msg), input_name,
+ dso_filter, thread_filter);
+ if (hist_browser__populate(browser, hists, nr_hists, session_total, msg) < 0)
+ goto out;
+ }
+ }
+ err = 0;
+out:
+ hist_browser__delete(browser);
+ return err;
+}
+
+void setup_browser(void)
+{
+ if (!isatty(1))
+ return;
+
+ use_browser = true;
+ newtInit();
+ newtCls();
+ newtPushHelpLine(" ");
+}
+
+void exit_browser(bool wait_for_ok)
+{
+ if (use_browser) {
+ if (wait_for_ok) {
+ char title[] = "Fatal Error", ok[] = "Ok";
+ newtWinMessage(title, ok, browser__last_msg);
+ }
+ newtFinished();
+ }
+}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 05d0c5c2030c..9bf0f402ca73 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -5,6 +5,7 @@
#include "parse-events.h"
#include "exec_cmd.h"
#include "string.h"
+#include "symbol.h"
#include "cache.h"
#include "header.h"
#include "debugfs.h"
@@ -409,7 +410,6 @@ static enum event_result
parse_single_tracepoint_event(char *sys_name,
const char *evt_name,
unsigned int evt_length,
- char *flags,
struct perf_event_attr *attr,
const char **strp)
{
@@ -418,14 +418,6 @@ parse_single_tracepoint_event(char *sys_name,
u64 id;
int fd;
- if (flags) {
- if (!strncmp(flags, "record", strlen(flags))) {
- attr->sample_type |= PERF_SAMPLE_RAW;
- attr->sample_type |= PERF_SAMPLE_TIME;
- attr->sample_type |= PERF_SAMPLE_CPU;
- }
- }
-
snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
sys_name, evt_name);
@@ -444,6 +436,13 @@ parse_single_tracepoint_event(char *sys_name,
attr->type = PERF_TYPE_TRACEPOINT;
*strp = evt_name + evt_length;
+ attr->sample_type |= PERF_SAMPLE_RAW;
+ attr->sample_type |= PERF_SAMPLE_TIME;
+ attr->sample_type |= PERF_SAMPLE_CPU;
+
+ attr->sample_period = 1;
+
+
return EVT_HANDLED;
}
@@ -532,8 +531,7 @@ static enum event_result parse_tracepoint_event(const char **strp,
flags);
} else
return parse_single_tracepoint_event(sys_name, evt_name,
- evt_length, flags,
- attr, strp);
+ evt_length, attr, strp);
}
static enum event_result
@@ -690,19 +688,29 @@ static enum event_result
parse_event_modifier(const char **strp, struct perf_event_attr *attr)
{
const char *str = *strp;
- int eu = 1, ek = 1, eh = 1;
+ int exclude = 0;
+ int eu = 0, ek = 0, eh = 0, precise = 0;
if (*str++ != ':')
return 0;
while (*str) {
- if (*str == 'u')
+ if (*str == 'u') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
eu = 0;
- else if (*str == 'k')
+ } else if (*str == 'k') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
ek = 0;
- else if (*str == 'h')
+ } else if (*str == 'h') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
eh = 0;
- else
+ } else if (*str == 'p') {
+ precise++;
+ } else
break;
+
++str;
}
if (str >= *strp + 2) {
@@ -710,6 +718,7 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
attr->exclude_user = eu;
attr->exclude_kernel = ek;
attr->exclude_hv = eh;
+ attr->precise_ip = precise;
return 1;
}
return 0;
@@ -934,7 +943,8 @@ void print_events(void)
printf("\n");
printf(" %-42s [%s]\n",
- "rNNN", event_type_descriptors[PERF_TYPE_RAW]);
+ "rNNN (see 'perf list --help' on how to encode it)",
+ event_type_descriptors[PERF_TYPE_RAW]);
printf("\n");
printf(" %-42s [%s]\n",
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index b8c1f64bc935..fc4ab3fe877a 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -13,6 +13,7 @@ struct tracepoint_path {
};
extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
+extern bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events);
extern int nr_counters;
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index efebd5b476b3..ed887642460c 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -49,6 +49,7 @@ static int get_value(struct parse_opt_ctx_t *p,
break;
/* FALLTHROUGH */
case OPTION_BOOLEAN:
+ case OPTION_INCR:
case OPTION_BIT:
case OPTION_SET_INT:
case OPTION_SET_PTR:
@@ -73,6 +74,10 @@ static int get_value(struct parse_opt_ctx_t *p,
return 0;
case OPTION_BOOLEAN:
+ *(bool *)opt->value = unset ? false : true;
+ return 0;
+
+ case OPTION_INCR:
*(int *)opt->value = unset ? 0 : *(int *)opt->value + 1;
return 0;
@@ -478,6 +483,7 @@ int usage_with_options_internal(const char * const *usagestr,
case OPTION_GROUP:
case OPTION_BIT:
case OPTION_BOOLEAN:
+ case OPTION_INCR:
case OPTION_SET_INT:
case OPTION_SET_PTR:
case OPTION_LONG:
@@ -500,6 +506,7 @@ int usage_with_options_internal(const char * const *usagestr,
void usage_with_options(const char * const *usagestr,
const struct option *opts)
{
+ exit_browser(false);
usage_with_options_internal(usagestr, opts, 0);
exit(129);
}
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index 948805af43c2..b2da725f102a 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -8,7 +8,8 @@ enum parse_opt_type {
OPTION_GROUP,
/* options with no arguments */
OPTION_BIT,
- OPTION_BOOLEAN, /* _INCR would have been a better name */
+ OPTION_BOOLEAN,
+ OPTION_INCR,
OPTION_SET_INT,
OPTION_SET_PTR,
/* options with arguments (usually) */
@@ -95,6 +96,7 @@ struct option {
#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) }
#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
+#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
#define OPT_SET_INT(s, l, v, h, i) { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) }
#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 7c004b6ef24f..914c67095d96 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -33,20 +33,27 @@
#include <limits.h>
#undef _GNU_SOURCE
+#include "util.h"
#include "event.h"
#include "string.h"
#include "strlist.h"
#include "debug.h"
#include "cache.h"
#include "color.h"
-#include "parse-events.h" /* For debugfs_path */
+#include "symbol.h"
+#include "thread.h"
+#include "debugfs.h"
+#include "trace-event.h" /* For __unused */
#include "probe-event.h"
+#include "probe-finder.h"
#define MAX_CMDLEN 256
#define MAX_PROBE_ARGS 128
#define PERFPROBE_GROUP "probe"
-#define semantic_error(msg ...) die("Semantic error :" msg)
+bool probe_event_dry_run; /* Dry run flag */
+
+#define semantic_error(msg ...) pr_err("Semantic error :" msg)
/* If there is no space to write, returns -E2BIG. */
static int e_snprintf(char *str, size_t size, const char *format, ...)
@@ -64,7 +71,275 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
return ret;
}
-void parse_line_range_desc(const char *arg, struct line_range *lr)
+static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
+static struct machine machine;
+
+/* Initialize symbol maps and path of vmlinux */
+static int init_vmlinux(void)
+{
+ struct dso *kernel;
+ int ret;
+
+ symbol_conf.sort_by_name = true;
+ if (symbol_conf.vmlinux_name == NULL)
+ symbol_conf.try_vmlinux_path = true;
+ else
+ pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
+ ret = symbol__init();
+ if (ret < 0) {
+ pr_debug("Failed to init symbol map.\n");
+ goto out;
+ }
+
+ ret = machine__init(&machine, "/", 0);
+ if (ret < 0)
+ goto out;
+
+ kernel = dso__new_kernel(symbol_conf.vmlinux_name);
+ if (kernel == NULL)
+ die("Failed to create kernel dso.");
+
+ ret = __machine__create_kernel_maps(&machine, kernel);
+ if (ret < 0)
+ pr_debug("Failed to create kernel maps.\n");
+
+out:
+ if (ret < 0)
+ pr_warning("Failed to init vmlinux path.\n");
+ return ret;
+}
+
+#ifdef DWARF_SUPPORT
+static int open_vmlinux(void)
+{
+ if (map__load(machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
+ pr_debug("Failed to load kernel map.\n");
+ return -EINVAL;
+ }
+ pr_debug("Try to open %s\n", machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name);
+ return open(machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
+}
+
+/* Convert trace point to probe point with debuginfo */
+static int convert_to_perf_probe_point(struct kprobe_trace_point *tp,
+ struct perf_probe_point *pp)
+{
+ struct symbol *sym;
+ int fd, ret = -ENOENT;
+
+ sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
+ tp->symbol, NULL);
+ if (sym) {
+ fd = open_vmlinux();
+ if (fd >= 0) {
+ ret = find_perf_probe_point(fd,
+ sym->start + tp->offset, pp);
+ close(fd);
+ }
+ }
+ if (ret <= 0) {
+ pr_debug("Failed to find corresponding probes from "
+ "debuginfo. Use kprobe event information.\n");
+ pp->function = strdup(tp->symbol);
+ if (pp->function == NULL)
+ return -ENOMEM;
+ pp->offset = tp->offset;
+ }
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
+/* Try to find perf_probe_event with debuginfo */
+static int try_to_find_kprobe_trace_events(struct perf_probe_event *pev,
+ struct kprobe_trace_event **tevs,
+ int max_tevs)
+{
+ bool need_dwarf = perf_probe_event_need_dwarf(pev);
+ int fd, ntevs;
+
+ fd = open_vmlinux();
+ if (fd < 0) {
+ if (need_dwarf) {
+ pr_warning("Failed to open debuginfo file.\n");
+ return fd;
+ }
+ pr_debug("Could not open vmlinux. Try to use symbols.\n");
+ return 0;
+ }
+
+ /* Searching trace events corresponding to probe event */
+ ntevs = find_kprobe_trace_events(fd, pev, tevs, max_tevs);
+ close(fd);
+
+ if (ntevs > 0) { /* Succeeded to find trace events */
+ pr_debug("find %d kprobe_trace_events.\n", ntevs);
+ return ntevs;
+ }
+
+ if (ntevs == 0) { /* No error but failed to find probe point. */
+ pr_warning("Probe point '%s' not found.\n",
+ synthesize_perf_probe_point(&pev->point));
+ return -ENOENT;
+ }
+ /* Error path : ntevs < 0 */
+ pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
+ if (ntevs == -EBADF) {
+ pr_warning("Warning: No dwarf info found in the vmlinux - "
+ "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
+ if (!need_dwarf) {
+ pr_debug("Trying to use symbols.\nn");
+ return 0;
+ }
+ }
+ return ntevs;
+}
+
+#define LINEBUF_SIZE 256
+#define NR_ADDITIONAL_LINES 2
+
+static int show_one_line(FILE *fp, int l, bool skip, bool show_num)
+{
+ char buf[LINEBUF_SIZE];
+ const char *color = PERF_COLOR_BLUE;
+
+ if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
+ goto error;
+ if (!skip) {
+ if (show_num)
+ fprintf(stdout, "%7d %s", l, buf);
+ else
+ color_fprintf(stdout, color, " %s", buf);
+ }
+
+ while (strlen(buf) == LINEBUF_SIZE - 1 &&
+ buf[LINEBUF_SIZE - 2] != '\n') {
+ if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
+ goto error;
+ if (!skip) {
+ if (show_num)
+ fprintf(stdout, "%s", buf);
+ else
+ color_fprintf(stdout, color, "%s", buf);
+ }
+ }
+
+ return 0;
+error:
+ if (feof(fp))
+ pr_warning("Source file is shorter than expected.\n");
+ else
+ pr_warning("File read error: %s\n", strerror(errno));
+
+ return -1;
+}
+
+/*
+ * Show line-range always requires debuginfo to find source file and
+ * line number.
+ */
+int show_line_range(struct line_range *lr)
+{
+ int l = 1;
+ struct line_node *ln;
+ FILE *fp;
+ int fd, ret;
+
+ /* Search a line range */
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ fd = open_vmlinux();
+ if (fd < 0) {
+ pr_warning("Failed to open debuginfo file.\n");
+ return fd;
+ }
+
+ ret = find_line_range(fd, lr);
+ close(fd);
+ if (ret == 0) {
+ pr_warning("Specified source line is not found.\n");
+ return -ENOENT;
+ } else if (ret < 0) {
+ pr_warning("Debuginfo analysis failed. (%d)\n", ret);
+ return ret;
+ }
+
+ setup_pager();
+
+ if (lr->function)
+ fprintf(stdout, "<%s:%d>\n", lr->function,
+ lr->start - lr->offset);
+ else
+ fprintf(stdout, "<%s:%d>\n", lr->file, lr->start);
+
+ fp = fopen(lr->path, "r");
+ if (fp == NULL) {
+ pr_warning("Failed to open %s: %s\n", lr->path,
+ strerror(errno));
+ return -errno;
+ }
+ /* Skip to starting line number */
+ while (l < lr->start && ret >= 0)
+ ret = show_one_line(fp, l++, true, false);
+ if (ret < 0)
+ goto end;
+
+ list_for_each_entry(ln, &lr->line_list, list) {
+ while (ln->line > l && ret >= 0)
+ ret = show_one_line(fp, (l++) - lr->offset,
+ false, false);
+ if (ret >= 0)
+ ret = show_one_line(fp, (l++) - lr->offset,
+ false, true);
+ if (ret < 0)
+ goto end;
+ }
+
+ if (lr->end == INT_MAX)
+ lr->end = l + NR_ADDITIONAL_LINES;
+ while (l <= lr->end && !feof(fp) && ret >= 0)
+ ret = show_one_line(fp, (l++) - lr->offset, false, false);
+end:
+ fclose(fp);
+ return ret;
+}
+
+#else /* !DWARF_SUPPORT */
+
+static int convert_to_perf_probe_point(struct kprobe_trace_point *tp,
+ struct perf_probe_point *pp)
+{
+ pp->function = strdup(tp->symbol);
+ if (pp->function == NULL)
+ return -ENOMEM;
+ pp->offset = tp->offset;
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
+static int try_to_find_kprobe_trace_events(struct perf_probe_event *pev,
+ struct kprobe_trace_event **tevs __unused,
+ int max_tevs __unused)
+{
+ if (perf_probe_event_need_dwarf(pev)) {
+ pr_warning("Debuginfo-analysis is not supported.\n");
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+int show_line_range(struct line_range *lr __unused)
+{
+ pr_warning("Debuginfo-analysis is not supported.\n");
+ return -ENOSYS;
+}
+
+#endif
+
+int parse_line_range_desc(const char *arg, struct line_range *lr)
{
const char *ptr;
char *tmp;
@@ -75,29 +350,45 @@ void parse_line_range_desc(const char *arg, struct line_range *lr)
*/
ptr = strchr(arg, ':');
if (ptr) {
- lr->start = (unsigned int)strtoul(ptr + 1, &tmp, 0);
- if (*tmp == '+')
- lr->end = lr->start + (unsigned int)strtoul(tmp + 1,
- &tmp, 0);
- else if (*tmp == '-')
- lr->end = (unsigned int)strtoul(tmp + 1, &tmp, 0);
+ lr->start = (int)strtoul(ptr + 1, &tmp, 0);
+ if (*tmp == '+') {
+ lr->end = lr->start + (int)strtoul(tmp + 1, &tmp, 0);
+ lr->end--; /*
+ * Adjust the number of lines here.
+ * If the number of lines == 1, the
+ * the end of line should be equal to
+ * the start of line.
+ */
+ } else if (*tmp == '-')
+ lr->end = (int)strtoul(tmp + 1, &tmp, 0);
else
- lr->end = 0;
- pr_debug("Line range is %u to %u\n", lr->start, lr->end);
- if (lr->end && lr->start > lr->end)
+ lr->end = INT_MAX;
+ pr_debug("Line range is %d to %d\n", lr->start, lr->end);
+ if (lr->start > lr->end) {
semantic_error("Start line must be smaller"
- " than end line.");
- if (*tmp != '\0')
- semantic_error("Tailing with invalid character '%d'.",
+ " than end line.\n");
+ return -EINVAL;
+ }
+ if (*tmp != '\0') {
+ semantic_error("Tailing with invalid character '%d'.\n",
*tmp);
+ return -EINVAL;
+ }
tmp = strndup(arg, (ptr - arg));
- } else
+ } else {
tmp = strdup(arg);
+ lr->end = INT_MAX;
+ }
+
+ if (tmp == NULL)
+ return -ENOMEM;
if (strchr(tmp, '.'))
lr->file = tmp;
else
lr->function = tmp;
+
+ return 0;
}
/* Check the name is good for event/group */
@@ -113,8 +404,9 @@ static bool check_event_name(const char *name)
}
/* Parse probepoint definition. */
-static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
+static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
{
+ struct perf_probe_point *pp = &pev->point;
char *ptr, *tmp;
char c, nc = 0;
/*
@@ -129,13 +421,19 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
if (ptr && *ptr == '=') { /* Event name */
*ptr = '\0';
tmp = ptr + 1;
- ptr = strchr(arg, ':');
- if (ptr) /* Group name is not supported yet. */
- semantic_error("Group name is not supported yet.");
- if (!check_event_name(arg))
+ if (strchr(arg, ':')) {
+ semantic_error("Group name is not supported yet.\n");
+ return -ENOTSUP;
+ }
+ if (!check_event_name(arg)) {
semantic_error("%s is bad for event name -it must "
- "follow C symbol-naming rule.", arg);
- pp->event = strdup(arg);
+ "follow C symbol-naming rule.\n", arg);
+ return -EINVAL;
+ }
+ pev->event = strdup(arg);
+ if (pev->event == NULL)
+ return -ENOMEM;
+ pev->group = NULL;
arg = tmp;
}
@@ -145,12 +443,15 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
*ptr++ = '\0';
}
+ tmp = strdup(arg);
+ if (tmp == NULL)
+ return -ENOMEM;
+
/* Check arg is function or file and copy it */
- if (strchr(arg, '.')) /* File */
- pp->file = strdup(arg);
+ if (strchr(tmp, '.')) /* File */
+ pp->file = tmp;
else /* Function */
- pp->function = strdup(arg);
- DIE_IF(pp->file == NULL && pp->function == NULL);
+ pp->function = tmp;
/* Parse other options */
while (ptr) {
@@ -158,6 +459,8 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
c = nc;
if (c == ';') { /* Lazy pattern must be the last part */
pp->lazy_line = strdup(arg);
+ if (pp->lazy_line == NULL)
+ return -ENOMEM;
break;
}
ptr = strpbrk(arg, ";:+@%");
@@ -168,266 +471,658 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
switch (c) {
case ':': /* Line number */
pp->line = strtoul(arg, &tmp, 0);
- if (*tmp != '\0')
+ if (*tmp != '\0') {
semantic_error("There is non-digit char"
- " in line number.");
+ " in line number.\n");
+ return -EINVAL;
+ }
break;
case '+': /* Byte offset from a symbol */
pp->offset = strtoul(arg, &tmp, 0);
- if (*tmp != '\0')
+ if (*tmp != '\0') {
semantic_error("There is non-digit character"
- " in offset.");
+ " in offset.\n");
+ return -EINVAL;
+ }
break;
case '@': /* File name */
- if (pp->file)
- semantic_error("SRC@SRC is not allowed.");
+ if (pp->file) {
+ semantic_error("SRC@SRC is not allowed.\n");
+ return -EINVAL;
+ }
pp->file = strdup(arg);
- DIE_IF(pp->file == NULL);
+ if (pp->file == NULL)
+ return -ENOMEM;
break;
case '%': /* Probe places */
if (strcmp(arg, "return") == 0) {
pp->retprobe = 1;
- } else /* Others not supported yet */
- semantic_error("%%%s is not supported.", arg);
+ } else { /* Others not supported yet */
+ semantic_error("%%%s is not supported.\n", arg);
+ return -ENOTSUP;
+ }
break;
- default:
- DIE_IF("Program has a bug.");
+ default: /* Buggy case */
+ pr_err("This program has a bug at %s:%d.\n",
+ __FILE__, __LINE__);
+ return -ENOTSUP;
break;
}
}
/* Exclusion check */
- if (pp->lazy_line && pp->line)
+ if (pp->lazy_line && pp->line) {
semantic_error("Lazy pattern can't be used with line number.");
+ return -EINVAL;
+ }
- if (pp->lazy_line && pp->offset)
+ if (pp->lazy_line && pp->offset) {
semantic_error("Lazy pattern can't be used with offset.");
+ return -EINVAL;
+ }
- if (pp->line && pp->offset)
+ if (pp->line && pp->offset) {
semantic_error("Offset can't be used with line number.");
+ return -EINVAL;
+ }
- if (!pp->line && !pp->lazy_line && pp->file && !pp->function)
+ if (!pp->line && !pp->lazy_line && pp->file && !pp->function) {
semantic_error("File always requires line number or "
"lazy pattern.");
+ return -EINVAL;
+ }
- if (pp->offset && !pp->function)
+ if (pp->offset && !pp->function) {
semantic_error("Offset requires an entry function.");
+ return -EINVAL;
+ }
- if (pp->retprobe && !pp->function)
+ if (pp->retprobe && !pp->function) {
semantic_error("Return probe requires an entry function.");
+ return -EINVAL;
+ }
- if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe)
+ if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) {
semantic_error("Offset/Line/Lazy pattern can't be used with "
"return probe.");
+ return -EINVAL;
+ }
- pr_debug("symbol:%s file:%s line:%d offset:%d return:%d lazy:%s\n",
+ pr_debug("symbol:%s file:%s line:%d offset:%lu return:%d lazy:%s\n",
pp->function, pp->file, pp->line, pp->offset, pp->retprobe,
pp->lazy_line);
+ return 0;
}
-/* Parse perf-probe event definition */
-void parse_perf_probe_event(const char *str, struct probe_point *pp,
- bool *need_dwarf)
+/* Parse perf-probe event argument */
+static int parse_perf_probe_arg(char *str, struct perf_probe_arg *arg)
{
- char **argv;
- int argc, i;
+ char *tmp;
+ struct perf_probe_arg_field **fieldp;
+
+ pr_debug("parsing arg: %s into ", str);
- *need_dwarf = false;
+ tmp = strchr(str, '=');
+ if (tmp) {
+ arg->name = strndup(str, tmp - str);
+ if (arg->name == NULL)
+ return -ENOMEM;
+ pr_debug("name:%s ", arg->name);
+ str = tmp + 1;
+ }
- argv = argv_split(str, &argc);
- if (!argv)
- die("argv_split failed.");
- if (argc > MAX_PROBE_ARGS + 1)
- semantic_error("Too many arguments");
+ tmp = strchr(str, ':');
+ if (tmp) { /* Type setting */
+ *tmp = '\0';
+ arg->type = strdup(tmp + 1);
+ if (arg->type == NULL)
+ return -ENOMEM;
+ pr_debug("type:%s ", arg->type);
+ }
+ tmp = strpbrk(str, "-.");
+ if (!is_c_varname(str) || !tmp) {
+ /* A variable, register, symbol or special value */
+ arg->var = strdup(str);
+ if (arg->var == NULL)
+ return -ENOMEM;
+ pr_debug("%s\n", arg->var);
+ return 0;
+ }
+
+ /* Structure fields */
+ arg->var = strndup(str, tmp - str);
+ if (arg->var == NULL)
+ return -ENOMEM;
+ pr_debug("%s, ", arg->var);
+ fieldp = &arg->field;
+
+ do {
+ *fieldp = zalloc(sizeof(struct perf_probe_arg_field));
+ if (*fieldp == NULL)
+ return -ENOMEM;
+ if (*tmp == '.') {
+ str = tmp + 1;
+ (*fieldp)->ref = false;
+ } else if (tmp[1] == '>') {
+ str = tmp + 2;
+ (*fieldp)->ref = true;
+ } else {
+ semantic_error("Argument parse error: %s\n", str);
+ return -EINVAL;
+ }
+
+ tmp = strpbrk(str, "-.");
+ if (tmp) {
+ (*fieldp)->name = strndup(str, tmp - str);
+ if ((*fieldp)->name == NULL)
+ return -ENOMEM;
+ pr_debug("%s(%d), ", (*fieldp)->name, (*fieldp)->ref);
+ fieldp = &(*fieldp)->next;
+ }
+ } while (tmp);
+ (*fieldp)->name = strdup(str);
+ if ((*fieldp)->name == NULL)
+ return -ENOMEM;
+ pr_debug("%s(%d)\n", (*fieldp)->name, (*fieldp)->ref);
+
+ /* If no name is specified, set the last field name */
+ if (!arg->name) {
+ arg->name = strdup((*fieldp)->name);
+ if (arg->name == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Parse perf-probe event command */
+int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
+{
+ char **argv;
+ int argc, i, ret = 0;
+
+ argv = argv_split(cmd, &argc);
+ if (!argv) {
+ pr_debug("Failed to split arguments.\n");
+ return -ENOMEM;
+ }
+ if (argc - 1 > MAX_PROBE_ARGS) {
+ semantic_error("Too many probe arguments (%d).\n", argc - 1);
+ ret = -ERANGE;
+ goto out;
+ }
/* Parse probe point */
- parse_perf_probe_probepoint(argv[0], pp);
- if (pp->file || pp->line || pp->lazy_line)
- *need_dwarf = true;
+ ret = parse_perf_probe_point(argv[0], pev);
+ if (ret < 0)
+ goto out;
/* Copy arguments and ensure return probe has no C argument */
- pp->nr_args = argc - 1;
- pp->args = zalloc(sizeof(char *) * pp->nr_args);
- for (i = 0; i < pp->nr_args; i++) {
- pp->args[i] = strdup(argv[i + 1]);
- if (!pp->args[i])
- die("Failed to copy argument.");
- if (is_c_varname(pp->args[i])) {
- if (pp->retprobe)
- semantic_error("You can't specify local"
- " variable for kretprobe");
- *need_dwarf = true;
+ pev->nargs = argc - 1;
+ pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
+ if (pev->args == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < pev->nargs && ret >= 0; i++) {
+ ret = parse_perf_probe_arg(argv[i + 1], &pev->args[i]);
+ if (ret >= 0 &&
+ is_c_varname(pev->args[i].var) && pev->point.retprobe) {
+ semantic_error("You can't specify local variable for"
+ " kretprobe.\n");
+ ret = -EINVAL;
}
}
-
+out:
argv_free(argv);
+
+ return ret;
+}
+
+/* Return true if this perf_probe_event requires debuginfo */
+bool perf_probe_event_need_dwarf(struct perf_probe_event *pev)
+{
+ int i;
+
+ if (pev->point.file || pev->point.line || pev->point.lazy_line)
+ return true;
+
+ for (i = 0; i < pev->nargs; i++)
+ if (is_c_varname(pev->args[i].var))
+ return true;
+
+ return false;
}
/* Parse kprobe_events event into struct probe_point */
-void parse_trace_kprobe_event(const char *str, struct probe_point *pp)
+int parse_kprobe_trace_command(const char *cmd, struct kprobe_trace_event *tev)
{
+ struct kprobe_trace_point *tp = &tev->point;
char pr;
char *p;
int ret, i, argc;
char **argv;
- pr_debug("Parsing kprobe_events: %s\n", str);
- argv = argv_split(str, &argc);
- if (!argv)
- die("argv_split failed.");
- if (argc < 2)
- semantic_error("Too less arguments.");
+ pr_debug("Parsing kprobe_events: %s\n", cmd);
+ argv = argv_split(cmd, &argc);
+ if (!argv) {
+ pr_debug("Failed to split arguments.\n");
+ return -ENOMEM;
+ }
+ if (argc < 2) {
+ semantic_error("Too few probe arguments.\n");
+ ret = -ERANGE;
+ goto out;
+ }
/* Scan event and group name. */
ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
- &pr, (float *)(void *)&pp->group,
- (float *)(void *)&pp->event);
- if (ret != 3)
- semantic_error("Failed to parse event name: %s", argv[0]);
- pr_debug("Group:%s Event:%s probe:%c\n", pp->group, pp->event, pr);
+ &pr, (float *)(void *)&tev->group,
+ (float *)(void *)&tev->event);
+ if (ret != 3) {
+ semantic_error("Failed to parse event name: %s\n", argv[0]);
+ ret = -EINVAL;
+ goto out;
+ }
+ pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr);
- pp->retprobe = (pr == 'r');
+ tp->retprobe = (pr == 'r');
/* Scan function name and offset */
- ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function,
- &pp->offset);
+ ret = sscanf(argv[1], "%a[^+]+%lu", (float *)(void *)&tp->symbol,
+ &tp->offset);
if (ret == 1)
- pp->offset = 0;
-
- /* kprobe_events doesn't have this information */
- pp->line = 0;
- pp->file = NULL;
+ tp->offset = 0;
- pp->nr_args = argc - 2;
- pp->args = zalloc(sizeof(char *) * pp->nr_args);
- for (i = 0; i < pp->nr_args; i++) {
+ tev->nargs = argc - 2;
+ tev->args = zalloc(sizeof(struct kprobe_trace_arg) * tev->nargs);
+ if (tev->args == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < tev->nargs; i++) {
p = strchr(argv[i + 2], '=');
if (p) /* We don't need which register is assigned. */
- *p = '\0';
- pp->args[i] = strdup(argv[i + 2]);
- if (!pp->args[i])
- die("Failed to copy argument.");
+ *p++ = '\0';
+ else
+ p = argv[i + 2];
+ tev->args[i].name = strdup(argv[i + 2]);
+ /* TODO: parse regs and offset */
+ tev->args[i].value = strdup(p);
+ if (tev->args[i].name == NULL || tev->args[i].value == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
}
-
+ ret = 0;
+out:
argv_free(argv);
+ return ret;
}
-/* Synthesize only probe point (not argument) */
-int synthesize_perf_probe_point(struct probe_point *pp)
+/* Compose only probe arg */
+int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len)
{
- char *buf;
- char offs[64] = "", line[64] = "";
+ struct perf_probe_arg_field *field = pa->field;
int ret;
+ char *tmp = buf;
- pp->probes[0] = buf = zalloc(MAX_CMDLEN);
- pp->found = 1;
- if (!buf)
- die("Failed to allocate memory by zalloc.");
+ if (pa->name && pa->var)
+ ret = e_snprintf(tmp, len, "%s=%s", pa->name, pa->var);
+ else
+ ret = e_snprintf(tmp, len, "%s", pa->name ? pa->name : pa->var);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+
+ while (field) {
+ ret = e_snprintf(tmp, len, "%s%s", field->ref ? "->" : ".",
+ field->name);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+ field = field->next;
+ }
+
+ if (pa->type) {
+ ret = e_snprintf(tmp, len, ":%s", pa->type);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+ }
+
+ return tmp - buf;
+error:
+ pr_debug("Failed to synthesize perf probe argument: %s",
+ strerror(-ret));
+ return ret;
+}
+
+/* Compose only probe point (not argument) */
+static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
+{
+ char *buf, *tmp;
+ char offs[32] = "", line[32] = "", file[32] = "";
+ int ret, len;
+
+ buf = zalloc(MAX_CMDLEN);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
if (pp->offset) {
- ret = e_snprintf(offs, 64, "+%d", pp->offset);
+ ret = e_snprintf(offs, 32, "+%lu", pp->offset);
if (ret <= 0)
goto error;
}
if (pp->line) {
- ret = e_snprintf(line, 64, ":%d", pp->line);
+ ret = e_snprintf(line, 32, ":%d", pp->line);
+ if (ret <= 0)
+ goto error;
+ }
+ if (pp->file) {
+ len = strlen(pp->file) - 31;
+ if (len < 0)
+ len = 0;
+ tmp = strchr(pp->file + len, '/');
+ if (!tmp)
+ tmp = pp->file + len;
+ ret = e_snprintf(file, 32, "@%s", tmp + 1);
if (ret <= 0)
goto error;
}
if (pp->function)
- ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->function,
- offs, pp->retprobe ? "%return" : "", line);
+ ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s%s", pp->function,
+ offs, pp->retprobe ? "%return" : "", line,
+ file);
else
- ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", pp->file, line);
- if (ret <= 0) {
+ ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", file, line);
+ if (ret <= 0)
+ goto error;
+
+ return buf;
error:
- free(pp->probes[0]);
- pp->probes[0] = NULL;
- pp->found = 0;
- }
- return ret;
+ pr_debug("Failed to synthesize perf probe point: %s",
+ strerror(-ret));
+ if (buf)
+ free(buf);
+ return NULL;
}
-int synthesize_perf_probe_event(struct probe_point *pp)
+#if 0
+char *synthesize_perf_probe_command(struct perf_probe_event *pev)
{
char *buf;
int i, len, ret;
- len = synthesize_perf_probe_point(pp);
- if (len < 0)
- return 0;
+ buf = synthesize_perf_probe_point(&pev->point);
+ if (!buf)
+ return NULL;
- buf = pp->probes[0];
- for (i = 0; i < pp->nr_args; i++) {
+ len = strlen(buf);
+ for (i = 0; i < pev->nargs; i++) {
ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
- pp->args[i]);
- if (ret <= 0)
- goto error;
+ pev->args[i].name);
+ if (ret <= 0) {
+ free(buf);
+ return NULL;
+ }
len += ret;
}
- pp->found = 1;
- return pp->found;
-error:
- free(pp->probes[0]);
- pp->probes[0] = NULL;
+ return buf;
+}
+#endif
+
+static int __synthesize_kprobe_trace_arg_ref(struct kprobe_trace_arg_ref *ref,
+ char **buf, size_t *buflen,
+ int depth)
+{
+ int ret;
+ if (ref->next) {
+ depth = __synthesize_kprobe_trace_arg_ref(ref->next, buf,
+ buflen, depth + 1);
+ if (depth < 0)
+ goto out;
+ }
+
+ ret = e_snprintf(*buf, *buflen, "%+ld(", ref->offset);
+ if (ret < 0)
+ depth = ret;
+ else {
+ *buf += ret;
+ *buflen -= ret;
+ }
+out:
+ return depth;
- return ret;
}
-int synthesize_trace_kprobe_event(struct probe_point *pp)
+static int synthesize_kprobe_trace_arg(struct kprobe_trace_arg *arg,
+ char *buf, size_t buflen)
{
+ int ret, depth = 0;
+ char *tmp = buf;
+
+ /* Argument name or separator */
+ if (arg->name)
+ ret = e_snprintf(buf, buflen, " %s=", arg->name);
+ else
+ ret = e_snprintf(buf, buflen, " ");
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+
+ /* Dereferencing arguments */
+ if (arg->ref) {
+ depth = __synthesize_kprobe_trace_arg_ref(arg->ref, &buf,
+ &buflen, 1);
+ if (depth < 0)
+ return depth;
+ }
+
+ /* Print argument value */
+ ret = e_snprintf(buf, buflen, "%s", arg->value);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+
+ /* Closing */
+ while (depth--) {
+ ret = e_snprintf(buf, buflen, ")");
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+ }
+ /* Print argument type */
+ if (arg->type) {
+ ret = e_snprintf(buf, buflen, ":%s", arg->type);
+ if (ret <= 0)
+ return ret;
+ buf += ret;
+ }
+
+ return buf - tmp;
+}
+
+char *synthesize_kprobe_trace_command(struct kprobe_trace_event *tev)
+{
+ struct kprobe_trace_point *tp = &tev->point;
char *buf;
int i, len, ret;
- pp->probes[0] = buf = zalloc(MAX_CMDLEN);
- if (!buf)
- die("Failed to allocate memory by zalloc.");
- ret = e_snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset);
- if (ret <= 0)
+ buf = zalloc(MAX_CMDLEN);
+ if (buf == NULL)
+ return NULL;
+
+ len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s+%lu",
+ tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event,
+ tp->symbol, tp->offset);
+ if (len <= 0)
goto error;
- len = ret;
- for (i = 0; i < pp->nr_args; i++) {
- ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
- pp->args[i]);
+ for (i = 0; i < tev->nargs; i++) {
+ ret = synthesize_kprobe_trace_arg(&tev->args[i], buf + len,
+ MAX_CMDLEN - len);
if (ret <= 0)
goto error;
len += ret;
}
- pp->found = 1;
- return pp->found;
+ return buf;
error:
- free(pp->probes[0]);
- pp->probes[0] = NULL;
+ free(buf);
+ return NULL;
+}
+
+int convert_to_perf_probe_event(struct kprobe_trace_event *tev,
+ struct perf_probe_event *pev)
+{
+ char buf[64] = "";
+ int i, ret;
+
+ /* Convert event/group name */
+ pev->event = strdup(tev->event);
+ pev->group = strdup(tev->group);
+ if (pev->event == NULL || pev->group == NULL)
+ return -ENOMEM;
+
+ /* Convert trace_point to probe_point */
+ ret = convert_to_perf_probe_point(&tev->point, &pev->point);
+ if (ret < 0)
+ return ret;
+
+ /* Convert trace_arg to probe_arg */
+ pev->nargs = tev->nargs;
+ pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
+ if (pev->args == NULL)
+ return -ENOMEM;
+ for (i = 0; i < tev->nargs && ret >= 0; i++) {
+ if (tev->args[i].name)
+ pev->args[i].name = strdup(tev->args[i].name);
+ else {
+ ret = synthesize_kprobe_trace_arg(&tev->args[i],
+ buf, 64);
+ pev->args[i].name = strdup(buf);
+ }
+ if (pev->args[i].name == NULL && ret >= 0)
+ ret = -ENOMEM;
+ }
+
+ if (ret < 0)
+ clear_perf_probe_event(pev);
return ret;
}
-static int open_kprobe_events(int flags, int mode)
+void clear_perf_probe_event(struct perf_probe_event *pev)
+{
+ struct perf_probe_point *pp = &pev->point;
+ struct perf_probe_arg_field *field, *next;
+ int i;
+
+ if (pev->event)
+ free(pev->event);
+ if (pev->group)
+ free(pev->group);
+ if (pp->file)
+ free(pp->file);
+ if (pp->function)
+ free(pp->function);
+ if (pp->lazy_line)
+ free(pp->lazy_line);
+ for (i = 0; i < pev->nargs; i++) {
+ if (pev->args[i].name)
+ free(pev->args[i].name);
+ if (pev->args[i].var)
+ free(pev->args[i].var);
+ if (pev->args[i].type)
+ free(pev->args[i].type);
+ field = pev->args[i].field;
+ while (field) {
+ next = field->next;
+ if (field->name)
+ free(field->name);
+ free(field);
+ field = next;
+ }
+ }
+ if (pev->args)
+ free(pev->args);
+ memset(pev, 0, sizeof(*pev));
+}
+
+void clear_kprobe_trace_event(struct kprobe_trace_event *tev)
+{
+ struct kprobe_trace_arg_ref *ref, *next;
+ int i;
+
+ if (tev->event)
+ free(tev->event);
+ if (tev->group)
+ free(tev->group);
+ if (tev->point.symbol)
+ free(tev->point.symbol);
+ for (i = 0; i < tev->nargs; i++) {
+ if (tev->args[i].name)
+ free(tev->args[i].name);
+ if (tev->args[i].value)
+ free(tev->args[i].value);
+ if (tev->args[i].type)
+ free(tev->args[i].type);
+ ref = tev->args[i].ref;
+ while (ref) {
+ next = ref->next;
+ free(ref);
+ ref = next;
+ }
+ }
+ if (tev->args)
+ free(tev->args);
+ memset(tev, 0, sizeof(*tev));
+}
+
+static int open_kprobe_events(bool readwrite)
{
char buf[PATH_MAX];
+ const char *__debugfs;
int ret;
- ret = e_snprintf(buf, PATH_MAX, "%s/../kprobe_events", debugfs_path);
- if (ret < 0)
- die("Failed to make kprobe_events path.");
+ __debugfs = debugfs_find_mountpoint();
+ if (__debugfs == NULL) {
+ pr_warning("Debugfs is not mounted.\n");
+ return -ENOENT;
+ }
+
+ ret = e_snprintf(buf, PATH_MAX, "%stracing/kprobe_events", __debugfs);
+ if (ret >= 0) {
+ pr_debug("Opening %s write=%d\n", buf, readwrite);
+ if (readwrite && !probe_event_dry_run)
+ ret = open(buf, O_RDWR, O_APPEND);
+ else
+ ret = open(buf, O_RDONLY, 0);
+ }
- ret = open(buf, flags, mode);
if (ret < 0) {
if (errno == ENOENT)
- die("kprobe_events file does not exist -"
- " please rebuild with CONFIG_KPROBE_EVENT.");
+ pr_warning("kprobe_events file does not exist - please"
+ " rebuild kernel with CONFIG_KPROBE_EVENT.\n");
else
- die("Could not open kprobe_events file: %s",
- strerror(errno));
+ pr_warning("Failed to open kprobe_events file: %s\n",
+ strerror(errno));
}
return ret;
}
/* Get raw string list of current kprobe_events */
-static struct strlist *get_trace_kprobe_event_rawlist(int fd)
+static struct strlist *get_kprobe_trace_command_rawlist(int fd)
{
int ret, idx;
FILE *fp;
@@ -447,271 +1142,486 @@ static struct strlist *get_trace_kprobe_event_rawlist(int fd)
if (p[idx] == '\n')
p[idx] = '\0';
ret = strlist__add(sl, buf);
- if (ret < 0)
- die("strlist__add failed: %s", strerror(-ret));
+ if (ret < 0) {
+ pr_debug("strlist__add failed: %s\n", strerror(-ret));
+ strlist__delete(sl);
+ return NULL;
+ }
}
fclose(fp);
return sl;
}
-/* Free and zero clear probe_point */
-static void clear_probe_point(struct probe_point *pp)
-{
- int i;
-
- if (pp->event)
- free(pp->event);
- if (pp->group)
- free(pp->group);
- if (pp->function)
- free(pp->function);
- if (pp->file)
- free(pp->file);
- if (pp->lazy_line)
- free(pp->lazy_line);
- for (i = 0; i < pp->nr_args; i++)
- free(pp->args[i]);
- if (pp->args)
- free(pp->args);
- for (i = 0; i < pp->found; i++)
- free(pp->probes[i]);
- memset(pp, 0, sizeof(*pp));
-}
-
/* Show an event */
-static void show_perf_probe_event(const char *event, const char *place,
- struct probe_point *pp)
+static int show_perf_probe_event(struct perf_probe_event *pev)
{
int i, ret;
char buf[128];
+ char *place;
+
+ /* Synthesize only event probe point */
+ place = synthesize_perf_probe_point(&pev->point);
+ if (!place)
+ return -EINVAL;
- ret = e_snprintf(buf, 128, "%s:%s", pp->group, event);
+ ret = e_snprintf(buf, 128, "%s:%s", pev->group, pev->event);
if (ret < 0)
- die("Failed to copy event: %s", strerror(-ret));
- printf(" %-40s (on %s", buf, place);
+ return ret;
+
+ printf(" %-20s (on %s", buf, place);
- if (pp->nr_args > 0) {
+ if (pev->nargs > 0) {
printf(" with");
- for (i = 0; i < pp->nr_args; i++)
- printf(" %s", pp->args[i]);
+ for (i = 0; i < pev->nargs; i++) {
+ ret = synthesize_perf_probe_arg(&pev->args[i],
+ buf, 128);
+ if (ret < 0)
+ break;
+ printf(" %s", buf);
+ }
}
printf(")\n");
+ free(place);
+ return ret;
}
/* List up current perf-probe events */
-void show_perf_probe_events(void)
+int show_perf_probe_events(void)
{
- int fd;
- struct probe_point pp;
+ int fd, ret;
+ struct kprobe_trace_event tev;
+ struct perf_probe_event pev;
struct strlist *rawlist;
struct str_node *ent;
setup_pager();
- memset(&pp, 0, sizeof(pp));
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ memset(&tev, 0, sizeof(tev));
+ memset(&pev, 0, sizeof(pev));
- fd = open_kprobe_events(O_RDONLY, 0);
- rawlist = get_trace_kprobe_event_rawlist(fd);
+ fd = open_kprobe_events(false);
+ if (fd < 0)
+ return fd;
+
+ rawlist = get_kprobe_trace_command_rawlist(fd);
close(fd);
+ if (!rawlist)
+ return -ENOENT;
strlist__for_each(ent, rawlist) {
- parse_trace_kprobe_event(ent->s, &pp);
- /* Synthesize only event probe point */
- synthesize_perf_probe_point(&pp);
- /* Show an event */
- show_perf_probe_event(pp.event, pp.probes[0], &pp);
- clear_probe_point(&pp);
+ ret = parse_kprobe_trace_command(ent->s, &tev);
+ if (ret >= 0) {
+ ret = convert_to_perf_probe_event(&tev, &pev);
+ if (ret >= 0)
+ ret = show_perf_probe_event(&pev);
+ }
+ clear_perf_probe_event(&pev);
+ clear_kprobe_trace_event(&tev);
+ if (ret < 0)
+ break;
}
-
strlist__delete(rawlist);
+
+ return ret;
}
/* Get current perf-probe event names */
-static struct strlist *get_perf_event_names(int fd, bool include_group)
+static struct strlist *get_kprobe_trace_event_names(int fd, bool include_group)
{
char buf[128];
struct strlist *sl, *rawlist;
struct str_node *ent;
- struct probe_point pp;
+ struct kprobe_trace_event tev;
+ int ret = 0;
- memset(&pp, 0, sizeof(pp));
- rawlist = get_trace_kprobe_event_rawlist(fd);
+ memset(&tev, 0, sizeof(tev));
+ rawlist = get_kprobe_trace_command_rawlist(fd);
sl = strlist__new(true, NULL);
strlist__for_each(ent, rawlist) {
- parse_trace_kprobe_event(ent->s, &pp);
+ ret = parse_kprobe_trace_command(ent->s, &tev);
+ if (ret < 0)
+ break;
if (include_group) {
- if (e_snprintf(buf, 128, "%s:%s", pp.group,
- pp.event) < 0)
- die("Failed to copy group:event name.");
- strlist__add(sl, buf);
+ ret = e_snprintf(buf, 128, "%s:%s", tev.group,
+ tev.event);
+ if (ret >= 0)
+ ret = strlist__add(sl, buf);
} else
- strlist__add(sl, pp.event);
- clear_probe_point(&pp);
+ ret = strlist__add(sl, tev.event);
+ clear_kprobe_trace_event(&tev);
+ if (ret < 0)
+ break;
}
-
strlist__delete(rawlist);
+ if (ret < 0) {
+ strlist__delete(sl);
+ return NULL;
+ }
return sl;
}
-static void write_trace_kprobe_event(int fd, const char *buf)
+static int write_kprobe_trace_event(int fd, struct kprobe_trace_event *tev)
{
- int ret;
+ int ret = 0;
+ char *buf = synthesize_kprobe_trace_command(tev);
+
+ if (!buf) {
+ pr_debug("Failed to synthesize kprobe trace event.\n");
+ return -EINVAL;
+ }
pr_debug("Writing event: %s\n", buf);
- ret = write(fd, buf, strlen(buf));
- if (ret <= 0)
- die("Failed to write event: %s", strerror(errno));
+ if (!probe_event_dry_run) {
+ ret = write(fd, buf, strlen(buf));
+ if (ret <= 0)
+ pr_warning("Failed to write event: %s\n",
+ strerror(errno));
+ }
+ free(buf);
+ return ret;
}
-static void get_new_event_name(char *buf, size_t len, const char *base,
- struct strlist *namelist, bool allow_suffix)
+static int get_new_event_name(char *buf, size_t len, const char *base,
+ struct strlist *namelist, bool allow_suffix)
{
int i, ret;
/* Try no suffix */
ret = e_snprintf(buf, len, "%s", base);
- if (ret < 0)
- die("snprintf() failed: %s", strerror(-ret));
+ if (ret < 0) {
+ pr_debug("snprintf() failed: %s\n", strerror(-ret));
+ return ret;
+ }
if (!strlist__has_entry(namelist, buf))
- return;
+ return 0;
if (!allow_suffix) {
pr_warning("Error: event \"%s\" already exists. "
"(Use -f to force duplicates.)\n", base);
- die("Can't add new event.");
+ return -EEXIST;
}
/* Try to add suffix */
for (i = 1; i < MAX_EVENT_INDEX; i++) {
ret = e_snprintf(buf, len, "%s_%d", base, i);
- if (ret < 0)
- die("snprintf() failed: %s", strerror(-ret));
+ if (ret < 0) {
+ pr_debug("snprintf() failed: %s\n", strerror(-ret));
+ return ret;
+ }
if (!strlist__has_entry(namelist, buf))
break;
}
- if (i == MAX_EVENT_INDEX)
- die("Too many events are on the same function.");
+ if (i == MAX_EVENT_INDEX) {
+ pr_warning("Too many events are on the same function.\n");
+ ret = -ERANGE;
+ }
+
+ return ret;
}
-void add_trace_kprobe_events(struct probe_point *probes, int nr_probes,
- bool force_add)
+static int __add_kprobe_trace_events(struct perf_probe_event *pev,
+ struct kprobe_trace_event *tevs,
+ int ntevs, bool allow_suffix)
{
- int i, j, fd;
- struct probe_point *pp;
- char buf[MAX_CMDLEN];
- char event[64];
+ int i, fd, ret;
+ struct kprobe_trace_event *tev = NULL;
+ char buf[64];
+ const char *event, *group;
struct strlist *namelist;
- bool allow_suffix;
- fd = open_kprobe_events(O_RDWR, O_APPEND);
+ fd = open_kprobe_events(true);
+ if (fd < 0)
+ return fd;
/* Get current event names */
- namelist = get_perf_event_names(fd, false);
-
- for (j = 0; j < nr_probes; j++) {
- pp = probes + j;
- if (!pp->event)
- pp->event = strdup(pp->function);
- if (!pp->group)
- pp->group = strdup(PERFPROBE_GROUP);
- DIE_IF(!pp->event || !pp->group);
- /* If force_add is true, suffix search is allowed */
- allow_suffix = force_add;
- for (i = 0; i < pp->found; i++) {
- /* Get an unused new event name */
- get_new_event_name(event, 64, pp->event, namelist,
- allow_suffix);
- snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n",
- pp->retprobe ? 'r' : 'p',
- pp->group, event,
- pp->probes[i]);
- write_trace_kprobe_event(fd, buf);
- printf("Added new event:\n");
- /* Get the first parameter (probe-point) */
- sscanf(pp->probes[i], "%s", buf);
- show_perf_probe_event(event, buf, pp);
- /* Add added event name to namelist */
- strlist__add(namelist, event);
- /*
- * Probes after the first probe which comes from same
- * user input are always allowed to add suffix, because
- * there might be several addresses corresponding to
- * one code line.
- */
- allow_suffix = true;
+ namelist = get_kprobe_trace_event_names(fd, false);
+ if (!namelist) {
+ pr_debug("Failed to get current event list.\n");
+ return -EIO;
+ }
+
+ ret = 0;
+ printf("Add new event%s\n", (ntevs > 1) ? "s:" : ":");
+ for (i = 0; i < ntevs; i++) {
+ tev = &tevs[i];
+ if (pev->event)
+ event = pev->event;
+ else
+ if (pev->point.function)
+ event = pev->point.function;
+ else
+ event = tev->point.symbol;
+ if (pev->group)
+ group = pev->group;
+ else
+ group = PERFPROBE_GROUP;
+
+ /* Get an unused new event name */
+ ret = get_new_event_name(buf, 64, event,
+ namelist, allow_suffix);
+ if (ret < 0)
+ break;
+ event = buf;
+
+ tev->event = strdup(event);
+ tev->group = strdup(group);
+ if (tev->event == NULL || tev->group == NULL) {
+ ret = -ENOMEM;
+ break;
}
+ ret = write_kprobe_trace_event(fd, tev);
+ if (ret < 0)
+ break;
+ /* Add added event name to namelist */
+ strlist__add(namelist, event);
+
+ /* Trick here - save current event/group */
+ event = pev->event;
+ group = pev->group;
+ pev->event = tev->event;
+ pev->group = tev->group;
+ show_perf_probe_event(pev);
+ /* Trick here - restore current event/group */
+ pev->event = (char *)event;
+ pev->group = (char *)group;
+
+ /*
+ * Probes after the first probe which comes from same
+ * user input are always allowed to add suffix, because
+ * there might be several addresses corresponding to
+ * one code line.
+ */
+ allow_suffix = true;
+ }
+
+ if (ret >= 0) {
+ /* Show how to use the event. */
+ printf("\nYou can now use it on all perf tools, such as:\n\n");
+ printf("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
+ tev->event);
}
- /* Show how to use the event. */
- printf("\nYou can now use it on all perf tools, such as:\n\n");
- printf("\tperf record -e %s:%s -a sleep 1\n\n", PERFPROBE_GROUP, event);
strlist__delete(namelist);
close(fd);
+ return ret;
+}
+
+static int convert_to_kprobe_trace_events(struct perf_probe_event *pev,
+ struct kprobe_trace_event **tevs,
+ int max_tevs)
+{
+ struct symbol *sym;
+ int ret = 0, i;
+ struct kprobe_trace_event *tev;
+
+ /* Convert perf_probe_event with debuginfo */
+ ret = try_to_find_kprobe_trace_events(pev, tevs, max_tevs);
+ if (ret != 0)
+ return ret;
+
+ /* Allocate trace event buffer */
+ tev = *tevs = zalloc(sizeof(struct kprobe_trace_event));
+ if (tev == NULL)
+ return -ENOMEM;
+
+ /* Copy parameters */
+ tev->point.symbol = strdup(pev->point.function);
+ if (tev->point.symbol == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ tev->point.offset = pev->point.offset;
+ tev->nargs = pev->nargs;
+ if (tev->nargs) {
+ tev->args = zalloc(sizeof(struct kprobe_trace_arg)
+ * tev->nargs);
+ if (tev->args == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < tev->nargs; i++) {
+ if (pev->args[i].name) {
+ tev->args[i].name = strdup(pev->args[i].name);
+ if (tev->args[i].name == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+ tev->args[i].value = strdup(pev->args[i].var);
+ if (tev->args[i].value == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ if (pev->args[i].type) {
+ tev->args[i].type = strdup(pev->args[i].type);
+ if (tev->args[i].type == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+ }
+ }
+
+ /* Currently just checking function name from symbol map */
+ sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
+ tev->point.symbol, NULL);
+ if (!sym) {
+ pr_warning("Kernel symbol \'%s\' not found.\n",
+ tev->point.symbol);
+ ret = -ENOENT;
+ goto error;
+ }
+
+ return 1;
+error:
+ clear_kprobe_trace_event(tev);
+ free(tev);
+ *tevs = NULL;
+ return ret;
+}
+
+struct __event_package {
+ struct perf_probe_event *pev;
+ struct kprobe_trace_event *tevs;
+ int ntevs;
+};
+
+int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
+ bool force_add, int max_tevs)
+{
+ int i, j, ret;
+ struct __event_package *pkgs;
+
+ pkgs = zalloc(sizeof(struct __event_package) * npevs);
+ if (pkgs == NULL)
+ return -ENOMEM;
+
+ /* Init vmlinux path */
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ /* Loop 1: convert all events */
+ for (i = 0; i < npevs; i++) {
+ pkgs[i].pev = &pevs[i];
+ /* Convert with or without debuginfo */
+ ret = convert_to_kprobe_trace_events(pkgs[i].pev,
+ &pkgs[i].tevs, max_tevs);
+ if (ret < 0)
+ goto end;
+ pkgs[i].ntevs = ret;
+ }
+
+ /* Loop 2: add all events */
+ for (i = 0; i < npevs && ret >= 0; i++)
+ ret = __add_kprobe_trace_events(pkgs[i].pev, pkgs[i].tevs,
+ pkgs[i].ntevs, force_add);
+end:
+ /* Loop 3: cleanup trace events */
+ for (i = 0; i < npevs; i++)
+ for (j = 0; j < pkgs[i].ntevs; j++)
+ clear_kprobe_trace_event(&pkgs[i].tevs[j]);
+
+ return ret;
}
-static void __del_trace_kprobe_event(int fd, struct str_node *ent)
+static int __del_trace_kprobe_event(int fd, struct str_node *ent)
{
char *p;
char buf[128];
+ int ret;
/* Convert from perf-probe event to trace-kprobe event */
- if (e_snprintf(buf, 128, "-:%s", ent->s) < 0)
- die("Failed to copy event.");
+ ret = e_snprintf(buf, 128, "-:%s", ent->s);
+ if (ret < 0)
+ goto error;
+
p = strchr(buf + 2, ':');
- if (!p)
- die("Internal error: %s should have ':' but not.", ent->s);
+ if (!p) {
+ pr_debug("Internal error: %s should have ':' but not.\n",
+ ent->s);
+ ret = -ENOTSUP;
+ goto error;
+ }
*p = '/';
- write_trace_kprobe_event(fd, buf);
+ pr_debug("Writing event: %s\n", buf);
+ ret = write(fd, buf, strlen(buf));
+ if (ret < 0)
+ goto error;
+
printf("Remove event: %s\n", ent->s);
+ return 0;
+error:
+ pr_warning("Failed to delete event: %s\n", strerror(-ret));
+ return ret;
}
-static void del_trace_kprobe_event(int fd, const char *group,
- const char *event, struct strlist *namelist)
+static int del_trace_kprobe_event(int fd, const char *group,
+ const char *event, struct strlist *namelist)
{
char buf[128];
struct str_node *ent, *n;
- int found = 0;
+ int found = 0, ret = 0;
- if (e_snprintf(buf, 128, "%s:%s", group, event) < 0)
- die("Failed to copy event.");
+ ret = e_snprintf(buf, 128, "%s:%s", group, event);
+ if (ret < 0) {
+ pr_err("Failed to copy event.");
+ return ret;
+ }
if (strpbrk(buf, "*?")) { /* Glob-exp */
strlist__for_each_safe(ent, n, namelist)
if (strglobmatch(ent->s, buf)) {
found++;
- __del_trace_kprobe_event(fd, ent);
+ ret = __del_trace_kprobe_event(fd, ent);
+ if (ret < 0)
+ break;
strlist__remove(namelist, ent);
}
} else {
ent = strlist__find(namelist, buf);
if (ent) {
found++;
- __del_trace_kprobe_event(fd, ent);
- strlist__remove(namelist, ent);
+ ret = __del_trace_kprobe_event(fd, ent);
+ if (ret >= 0)
+ strlist__remove(namelist, ent);
}
}
- if (found == 0)
- pr_info("Info: event \"%s\" does not exist, could not remove it.\n", buf);
+ if (found == 0 && ret >= 0)
+ pr_info("Info: Event \"%s\" does not exist.\n", buf);
+
+ return ret;
}
-void del_trace_kprobe_events(struct strlist *dellist)
+int del_perf_probe_events(struct strlist *dellist)
{
- int fd;
+ int fd, ret = 0;
const char *group, *event;
char *p, *str;
struct str_node *ent;
struct strlist *namelist;
- fd = open_kprobe_events(O_RDWR, O_APPEND);
+ fd = open_kprobe_events(true);
+ if (fd < 0)
+ return fd;
+
/* Get current event names */
- namelist = get_perf_event_names(fd, true);
+ namelist = get_kprobe_trace_event_names(fd, true);
+ if (namelist == NULL)
+ return -EINVAL;
strlist__for_each(ent, dellist) {
str = strdup(ent->s);
- if (!str)
- die("Failed to copy event.");
+ if (str == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
pr_debug("Parsing: %s\n", str);
p = strchr(str, ':');
if (p) {
@@ -723,80 +1633,14 @@ void del_trace_kprobe_events(struct strlist *dellist)
event = str;
}
pr_debug("Group: %s, Event: %s\n", group, event);
- del_trace_kprobe_event(fd, group, event, namelist);
+ ret = del_trace_kprobe_event(fd, group, event, namelist);
free(str);
+ if (ret < 0)
+ break;
}
strlist__delete(namelist);
close(fd);
-}
-#define LINEBUF_SIZE 256
-#define NR_ADDITIONAL_LINES 2
-
-static void show_one_line(FILE *fp, unsigned int l, bool skip, bool show_num)
-{
- char buf[LINEBUF_SIZE];
- const char *color = PERF_COLOR_BLUE;
-
- if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
- goto error;
- if (!skip) {
- if (show_num)
- fprintf(stdout, "%7u %s", l, buf);
- else
- color_fprintf(stdout, color, " %s", buf);
- }
-
- while (strlen(buf) == LINEBUF_SIZE - 1 &&
- buf[LINEBUF_SIZE - 2] != '\n') {
- if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
- goto error;
- if (!skip) {
- if (show_num)
- fprintf(stdout, "%s", buf);
- else
- color_fprintf(stdout, color, "%s", buf);
- }
- }
- return;
-error:
- if (feof(fp))
- die("Source file is shorter than expected.");
- else
- die("File read error: %s", strerror(errno));
+ return ret;
}
-void show_line_range(struct line_range *lr)
-{
- unsigned int l = 1;
- struct line_node *ln;
- FILE *fp;
-
- setup_pager();
-
- if (lr->function)
- fprintf(stdout, "<%s:%d>\n", lr->function,
- lr->start - lr->offset);
- else
- fprintf(stdout, "<%s:%d>\n", lr->file, lr->start);
-
- fp = fopen(lr->path, "r");
- if (fp == NULL)
- die("Failed to open %s: %s", lr->path, strerror(errno));
- /* Skip to starting line number */
- while (l < lr->start)
- show_one_line(fp, l++, true, false);
-
- list_for_each_entry(ln, &lr->line_list, list) {
- while (ln->line > l)
- show_one_line(fp, (l++) - lr->offset, false, false);
- show_one_line(fp, (l++) - lr->offset, false, true);
- }
-
- if (lr->end == INT_MAX)
- lr->end = l + NR_ADDITIONAL_LINES;
- while (l < lr->end && !feof(fp))
- show_one_line(fp, (l++) - lr->offset, false, false);
-
- fclose(fp);
-}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 711287d4baea..e9db1a214ca4 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -2,21 +2,125 @@
#define _PROBE_EVENT_H
#include <stdbool.h>
-#include "probe-finder.h"
#include "strlist.h"
-extern void parse_line_range_desc(const char *arg, struct line_range *lr);
-extern void parse_perf_probe_event(const char *str, struct probe_point *pp,
- bool *need_dwarf);
-extern int synthesize_perf_probe_point(struct probe_point *pp);
-extern int synthesize_perf_probe_event(struct probe_point *pp);
-extern void parse_trace_kprobe_event(const char *str, struct probe_point *pp);
-extern int synthesize_trace_kprobe_event(struct probe_point *pp);
-extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes,
- bool force_add);
-extern void del_trace_kprobe_events(struct strlist *dellist);
-extern void show_perf_probe_events(void);
-extern void show_line_range(struct line_range *lr);
+extern bool probe_event_dry_run;
+
+/* kprobe-tracer tracing point */
+struct kprobe_trace_point {
+ char *symbol; /* Base symbol */
+ unsigned long offset; /* Offset from symbol */
+ bool retprobe; /* Return probe flag */
+};
+
+/* kprobe-tracer tracing argument referencing offset */
+struct kprobe_trace_arg_ref {
+ struct kprobe_trace_arg_ref *next; /* Next reference */
+ long offset; /* Offset value */
+};
+
+/* kprobe-tracer tracing argument */
+struct kprobe_trace_arg {
+ char *name; /* Argument name */
+ char *value; /* Base value */
+ char *type; /* Type name */
+ struct kprobe_trace_arg_ref *ref; /* Referencing offset */
+};
+
+/* kprobe-tracer tracing event (point + arg) */
+struct kprobe_trace_event {
+ char *event; /* Event name */
+ char *group; /* Group name */
+ struct kprobe_trace_point point; /* Trace point */
+ int nargs; /* Number of args */
+ struct kprobe_trace_arg *args; /* Arguments */
+};
+
+/* Perf probe probing point */
+struct perf_probe_point {
+ char *file; /* File path */
+ char *function; /* Function name */
+ int line; /* Line number */
+ bool retprobe; /* Return probe flag */
+ char *lazy_line; /* Lazy matching pattern */
+ unsigned long offset; /* Offset from function entry */
+};
+
+/* Perf probe probing argument field chain */
+struct perf_probe_arg_field {
+ struct perf_probe_arg_field *next; /* Next field */
+ char *name; /* Name of the field */
+ bool ref; /* Referencing flag */
+};
+
+/* Perf probe probing argument */
+struct perf_probe_arg {
+ char *name; /* Argument name */
+ char *var; /* Variable name */
+ char *type; /* Type name */
+ struct perf_probe_arg_field *field; /* Structure fields */
+};
+
+/* Perf probe probing event (point + arg) */
+struct perf_probe_event {
+ char *event; /* Event name */
+ char *group; /* Group name */
+ struct perf_probe_point point; /* Probe point */
+ int nargs; /* Number of arguments */
+ struct perf_probe_arg *args; /* Arguments */
+};
+
+
+/* Line number container */
+struct line_node {
+ struct list_head list;
+ int line;
+};
+
+/* Line range */
+struct line_range {
+ char *file; /* File name */
+ char *function; /* Function name */
+ int start; /* Start line number */
+ int end; /* End line number */
+ int offset; /* Start line offset */
+ char *path; /* Real path name */
+ struct list_head line_list; /* Visible lines */
+};
+
+/* Command string to events */
+extern int parse_perf_probe_command(const char *cmd,
+ struct perf_probe_event *pev);
+extern int parse_kprobe_trace_command(const char *cmd,
+ struct kprobe_trace_event *tev);
+
+/* Events to command string */
+extern char *synthesize_perf_probe_command(struct perf_probe_event *pev);
+extern char *synthesize_kprobe_trace_command(struct kprobe_trace_event *tev);
+extern int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf,
+ size_t len);
+
+/* Check the perf_probe_event needs debuginfo */
+extern bool perf_probe_event_need_dwarf(struct perf_probe_event *pev);
+
+/* Convert from kprobe_trace_event to perf_probe_event */
+extern int convert_to_perf_probe_event(struct kprobe_trace_event *tev,
+ struct perf_probe_event *pev);
+
+/* Release event contents */
+extern void clear_perf_probe_event(struct perf_probe_event *pev);
+extern void clear_kprobe_trace_event(struct kprobe_trace_event *tev);
+
+/* Command string to line-range */
+extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
+
+
+extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
+ bool force_add, int max_probe_points);
+extern int del_perf_probe_events(struct strlist *dellist);
+extern int show_perf_probe_events(void);
+extern int show_line_range(struct line_range *lr);
+
/* Maximum index number of event-name postfix */
#define MAX_EVENT_INDEX 1024
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c171a243d05b..562b1443e785 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -31,6 +31,7 @@
#include <string.h>
#include <stdarg.h>
#include <ctype.h>
+#include <dwarf-regs.h>
#include "string.h"
#include "event.h"
@@ -38,57 +39,8 @@
#include "util.h"
#include "probe-finder.h"
-
-/*
- * Generic dwarf analysis helpers
- */
-
-#define X86_32_MAX_REGS 8
-const char *x86_32_regs_table[X86_32_MAX_REGS] = {
- "%ax",
- "%cx",
- "%dx",
- "%bx",
- "$stack", /* Stack address instead of %sp */
- "%bp",
- "%si",
- "%di",
-};
-
-#define X86_64_MAX_REGS 16
-const char *x86_64_regs_table[X86_64_MAX_REGS] = {
- "%ax",
- "%dx",
- "%cx",
- "%bx",
- "%si",
- "%di",
- "%bp",
- "%sp",
- "%r8",
- "%r9",
- "%r10",
- "%r11",
- "%r12",
- "%r13",
- "%r14",
- "%r15",
-};
-
-/* TODO: switching by dwarf address size */
-#ifdef __x86_64__
-#define ARCH_MAX_REGS X86_64_MAX_REGS
-#define arch_regs_table x86_64_regs_table
-#else
-#define ARCH_MAX_REGS X86_32_MAX_REGS
-#define arch_regs_table x86_32_regs_table
-#endif
-
-/* Return architecture dependent register string (for kprobe-tracer) */
-static const char *get_arch_regstr(unsigned int n)
-{
- return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
-}
+/* Kprobe tracer basic type is up to u64 */
+#define MAX_BASIC_TYPE_BITS 64
/*
* Compare the tail of two strings.
@@ -108,7 +60,7 @@ static int strtailcmp(const char *s1, const char *s2)
/* Line number list operations */
/* Add a line to line number list */
-static void line_list__add_line(struct list_head *head, unsigned int line)
+static int line_list__add_line(struct list_head *head, int line)
{
struct line_node *ln;
struct list_head *p;
@@ -119,21 +71,23 @@ static void line_list__add_line(struct list_head *head, unsigned int line)
p = &ln->list;
goto found;
} else if (ln->line == line) /* Already exist */
- return ;
+ return 1;
}
/* List is empty, or the smallest entry */
p = head;
found:
pr_debug("line list: add a line %u\n", line);
ln = zalloc(sizeof(struct line_node));
- DIE_IF(ln == NULL);
+ if (ln == NULL)
+ return -ENOMEM;
ln->line = line;
INIT_LIST_HEAD(&ln->list);
list_add(&ln->list, p);
+ return 0;
}
/* Check if the line in line number list */
-static int line_list__has_line(struct list_head *head, unsigned int line)
+static int line_list__has_line(struct list_head *head, int line)
{
struct line_node *ln;
@@ -184,9 +138,129 @@ static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
if (strtailcmp(src, fname) == 0)
break;
}
+ if (i == nfiles)
+ return NULL;
return src;
}
+/* Compare diename and tname */
+static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
+{
+ const char *name;
+ name = dwarf_diename(dw_die);
+ return name ? strcmp(tname, name) : -1;
+}
+
+/* Get type die, but skip qualifiers and typedef */
+static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ Dwarf_Attribute attr;
+ int tag;
+
+ do {
+ if (dwarf_attr(vr_die, DW_AT_type, &attr) == NULL ||
+ dwarf_formref_die(&attr, die_mem) == NULL)
+ return NULL;
+
+ tag = dwarf_tag(die_mem);
+ vr_die = die_mem;
+ } while (tag == DW_TAG_const_type ||
+ tag == DW_TAG_restrict_type ||
+ tag == DW_TAG_volatile_type ||
+ tag == DW_TAG_shared_type ||
+ tag == DW_TAG_typedef);
+
+ return die_mem;
+}
+
+static bool die_is_signed_type(Dwarf_Die *tp_die)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Word ret;
+
+ if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL ||
+ dwarf_formudata(&attr, &ret) != 0)
+ return false;
+
+ return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
+ ret == DW_ATE_signed_fixed);
+}
+
+static int die_get_byte_size(Dwarf_Die *tp_die)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Word ret;
+
+ if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL ||
+ dwarf_formudata(&attr, &ret) != 0)
+ return 0;
+
+ return (int)ret;
+}
+
+/* Get data_member_location offset */
+static int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Op *expr;
+ size_t nexpr;
+ int ret;
+
+ if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL)
+ return -ENOENT;
+
+ if (dwarf_formudata(&attr, offs) != 0) {
+ /* DW_AT_data_member_location should be DW_OP_plus_uconst */
+ ret = dwarf_getlocation(&attr, &expr, &nexpr);
+ if (ret < 0 || nexpr == 0)
+ return -ENOENT;
+
+ if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) {
+ pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n",
+ expr[0].atom, nexpr);
+ return -ENOTSUP;
+ }
+ *offs = (Dwarf_Word)expr[0].number;
+ }
+ return 0;
+}
+
+/* Return values for die_find callbacks */
+enum {
+ DIE_FIND_CB_FOUND = 0, /* End of Search */
+ DIE_FIND_CB_CHILD = 1, /* Search only children */
+ DIE_FIND_CB_SIBLING = 2, /* Search only siblings */
+ DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
+};
+
+/* Search a child die */
+static Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
+ int (*callback)(Dwarf_Die *, void *),
+ void *data, Dwarf_Die *die_mem)
+{
+ Dwarf_Die child_die;
+ int ret;
+
+ ret = dwarf_child(rt_die, die_mem);
+ if (ret != 0)
+ return NULL;
+
+ do {
+ ret = callback(die_mem, data);
+ if (ret == DIE_FIND_CB_FOUND)
+ return die_mem;
+
+ if ((ret & DIE_FIND_CB_CHILD) &&
+ die_find_child(die_mem, callback, data, &child_die)) {
+ memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
+ return die_mem;
+ }
+ } while ((ret & DIE_FIND_CB_SIBLING) &&
+ dwarf_siblingof(die_mem, die_mem) == 0);
+
+ return NULL;
+}
+
struct __addr_die_search_param {
Dwarf_Addr addr;
Dwarf_Die *die_mem;
@@ -205,8 +279,8 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
}
/* Search a real subprogram including this line, */
-static Dwarf_Die *die_get_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
- Dwarf_Die *die_mem)
+static Dwarf_Die *die_find_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
{
struct __addr_die_search_param ad;
ad.addr = addr;
@@ -218,77 +292,64 @@ static Dwarf_Die *die_get_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
return die_mem;
}
-/* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */
-static Dwarf_Die *die_get_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
- Dwarf_Die *die_mem)
+/* die_find callback for inline function search */
+static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
{
- Dwarf_Die child_die;
- int ret;
+ Dwarf_Addr *addr = data;
- ret = dwarf_child(sp_die, die_mem);
- if (ret != 0)
- return NULL;
+ if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
+ dwarf_haspc(die_mem, *addr))
+ return DIE_FIND_CB_FOUND;
- do {
- if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
- dwarf_haspc(die_mem, addr))
- return die_mem;
-
- if (die_get_inlinefunc(die_mem, addr, &child_die)) {
- memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
- return die_mem;
- }
- } while (dwarf_siblingof(die_mem, die_mem) == 0);
-
- return NULL;
+ return DIE_FIND_CB_CONTINUE;
}
-/* Compare diename and tname */
-static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
+/* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */
+static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
{
- const char *name;
- name = dwarf_diename(dw_die);
- DIE_IF(name == NULL);
- return strcmp(tname, name);
+ return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
}
-/* Get entry pc(or low pc, 1st entry of ranges) of the die */
-static Dwarf_Addr die_get_entrypc(Dwarf_Die *dw_die)
+static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
{
- Dwarf_Addr epc;
- int ret;
+ const char *name = data;
+ int tag;
- ret = dwarf_entrypc(dw_die, &epc);
- DIE_IF(ret == -1);
- return epc;
+ tag = dwarf_tag(die_mem);
+ if ((tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) &&
+ (die_compare_name(die_mem, name) == 0))
+ return DIE_FIND_CB_FOUND;
+
+ return DIE_FIND_CB_CONTINUE;
}
-/* Get a variable die */
+/* Find a variable called 'name' */
static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
Dwarf_Die *die_mem)
{
- Dwarf_Die child_die;
- int tag;
- int ret;
+ return die_find_child(sp_die, __die_find_variable_cb, (void *)name,
+ die_mem);
+}
- ret = dwarf_child(sp_die, die_mem);
- if (ret != 0)
- return NULL;
+static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
+{
+ const char *name = data;
- do {
- tag = dwarf_tag(die_mem);
- if ((tag == DW_TAG_formal_parameter ||
- tag == DW_TAG_variable) &&
- (die_compare_name(die_mem, name) == 0))
- return die_mem;
+ if ((dwarf_tag(die_mem) == DW_TAG_member) &&
+ (die_compare_name(die_mem, name) == 0))
+ return DIE_FIND_CB_FOUND;
- if (die_find_variable(die_mem, name, &child_die)) {
- memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
- return die_mem;
- }
- } while (dwarf_siblingof(die_mem, die_mem) == 0);
+ return DIE_FIND_CB_SIBLING;
+}
- return NULL;
+/* Find a member called 'name' */
+static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(st_die, __die_find_member_cb, (void *)name,
+ die_mem);
}
/*
@@ -296,19 +357,22 @@ static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
*/
/* Show a location */
-static void show_location(Dwarf_Op *op, struct probe_finder *pf)
+static int convert_location(Dwarf_Op *op, struct probe_finder *pf)
{
unsigned int regn;
Dwarf_Word offs = 0;
- int deref = 0, ret;
+ bool ref = false;
const char *regs;
+ struct kprobe_trace_arg *tvar = pf->tvar;
- /* TODO: support CFA */
/* If this is based on frame buffer, set the offset */
if (op->atom == DW_OP_fbreg) {
- if (pf->fb_ops == NULL)
- die("The attribute of frame base is not supported.\n");
- deref = 1;
+ if (pf->fb_ops == NULL) {
+ pr_warning("The attribute of frame base is not "
+ "supported.\n");
+ return -ENOTSUP;
+ }
+ ref = true;
offs = op->number;
op = &pf->fb_ops[0];
}
@@ -316,35 +380,164 @@ static void show_location(Dwarf_Op *op, struct probe_finder *pf)
if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
regn = op->atom - DW_OP_breg0;
offs += op->number;
- deref = 1;
+ ref = true;
} else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) {
regn = op->atom - DW_OP_reg0;
} else if (op->atom == DW_OP_bregx) {
regn = op->number;
offs += op->number2;
- deref = 1;
+ ref = true;
} else if (op->atom == DW_OP_regx) {
regn = op->number;
- } else
- die("DW_OP %d is not supported.", op->atom);
+ } else {
+ pr_warning("DW_OP %x is not supported.\n", op->atom);
+ return -ENOTSUP;
+ }
regs = get_arch_regstr(regn);
- if (!regs)
- die("%u exceeds max register number.", regn);
+ if (!regs) {
+ pr_warning("Mapping for DWARF register number %u missing on this architecture.", regn);
+ return -ERANGE;
+ }
+
+ tvar->value = strdup(regs);
+ if (tvar->value == NULL)
+ return -ENOMEM;
+
+ if (ref) {
+ tvar->ref = zalloc(sizeof(struct kprobe_trace_arg_ref));
+ if (tvar->ref == NULL)
+ return -ENOMEM;
+ tvar->ref->offset = (long)offs;
+ }
+ return 0;
+}
+
+static int convert_variable_type(Dwarf_Die *vr_die,
+ struct kprobe_trace_arg *targ)
+{
+ Dwarf_Die type;
+ char buf[16];
+ int ret;
+
+ if (die_get_real_type(vr_die, &type) == NULL) {
+ pr_warning("Failed to get a type information of %s.\n",
+ dwarf_diename(vr_die));
+ return -ENOENT;
+ }
+
+ ret = die_get_byte_size(&type) * 8;
+ if (ret) {
+ /* Check the bitwidth */
+ if (ret > MAX_BASIC_TYPE_BITS) {
+ pr_info("%s exceeds max-bitwidth."
+ " Cut down to %d bits.\n",
+ dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
+ ret = MAX_BASIC_TYPE_BITS;
+ }
+
+ ret = snprintf(buf, 16, "%c%d",
+ die_is_signed_type(&type) ? 's' : 'u', ret);
+ if (ret < 0 || ret >= 16) {
+ if (ret >= 16)
+ ret = -E2BIG;
+ pr_warning("Failed to convert variable type: %s\n",
+ strerror(-ret));
+ return ret;
+ }
+ targ->type = strdup(buf);
+ if (targ->type == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
+ struct perf_probe_arg_field *field,
+ struct kprobe_trace_arg_ref **ref_ptr,
+ Dwarf_Die *die_mem)
+{
+ struct kprobe_trace_arg_ref *ref = *ref_ptr;
+ Dwarf_Die type;
+ Dwarf_Word offs;
+ int ret;
+
+ pr_debug("converting %s in %s\n", field->name, varname);
+ if (die_get_real_type(vr_die, &type) == NULL) {
+ pr_warning("Failed to get the type of %s.\n", varname);
+ return -ENOENT;
+ }
+
+ /* Check the pointer and dereference */
+ if (dwarf_tag(&type) == DW_TAG_pointer_type) {
+ if (!field->ref) {
+ pr_err("Semantic error: %s must be referred by '->'\n",
+ field->name);
+ return -EINVAL;
+ }
+ /* Get the type pointed by this pointer */
+ if (die_get_real_type(&type, &type) == NULL) {
+ pr_warning("Failed to get the type of %s.\n", varname);
+ return -ENOENT;
+ }
+ /* Verify it is a data structure */
+ if (dwarf_tag(&type) != DW_TAG_structure_type) {
+ pr_warning("%s is not a data structure.\n", varname);
+ return -EINVAL;
+ }
+
+ ref = zalloc(sizeof(struct kprobe_trace_arg_ref));
+ if (ref == NULL)
+ return -ENOMEM;
+ if (*ref_ptr)
+ (*ref_ptr)->next = ref;
+ else
+ *ref_ptr = ref;
+ } else {
+ /* Verify it is a data structure */
+ if (dwarf_tag(&type) != DW_TAG_structure_type) {
+ pr_warning("%s is not a data structure.\n", varname);
+ return -EINVAL;
+ }
+ if (field->ref) {
+ pr_err("Semantic error: %s must be referred by '.'\n",
+ field->name);
+ return -EINVAL;
+ }
+ if (!ref) {
+ pr_warning("Structure on a register is not "
+ "supported yet.\n");
+ return -ENOTSUP;
+ }
+ }
+
+ if (die_find_member(&type, field->name, die_mem) == NULL) {
+ pr_warning("%s(tyep:%s) has no member %s.\n", varname,
+ dwarf_diename(&type), field->name);
+ return -EINVAL;
+ }
- if (deref)
- ret = snprintf(pf->buf, pf->len, " %s=%+jd(%s)",
- pf->var, (intmax_t)offs, regs);
+ /* Get the offset of the field */
+ ret = die_get_data_member_location(die_mem, &offs);
+ if (ret < 0) {
+ pr_warning("Failed to get the offset of %s.\n", field->name);
+ return ret;
+ }
+ ref->offset += (long)offs;
+
+ /* Converting next field */
+ if (field->next)
+ return convert_variable_fields(die_mem, field->name,
+ field->next, &ref, die_mem);
else
- ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs);
- DIE_IF(ret < 0);
- DIE_IF(ret >= pf->len);
+ return 0;
}
/* Show a variables in kprobe event format */
-static void show_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
+static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
{
Dwarf_Attribute attr;
+ Dwarf_Die die_mem;
Dwarf_Op *expr;
size_t nexpr;
int ret;
@@ -356,142 +549,191 @@ static void show_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
if (ret <= 0 || nexpr == 0)
goto error;
- show_location(expr, pf);
+ ret = convert_location(expr, pf);
+ if (ret == 0 && pf->pvar->field) {
+ ret = convert_variable_fields(vr_die, pf->pvar->var,
+ pf->pvar->field, &pf->tvar->ref,
+ &die_mem);
+ vr_die = &die_mem;
+ }
+ if (ret == 0) {
+ if (pf->pvar->type) {
+ pf->tvar->type = strdup(pf->pvar->type);
+ if (pf->tvar->type == NULL)
+ ret = -ENOMEM;
+ } else
+ ret = convert_variable_type(vr_die, pf->tvar);
+ }
/* *expr will be cached in libdw. Don't free it. */
- return ;
+ return ret;
error:
/* TODO: Support const_value */
- die("Failed to find the location of %s at this address.\n"
- " Perhaps, it has been optimized out.", pf->var);
+ pr_err("Failed to find the location of %s at this address.\n"
+ " Perhaps, it has been optimized out.\n", pf->pvar->var);
+ return -ENOENT;
}
/* Find a variable in a subprogram die */
-static void find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
+static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
{
- int ret;
Dwarf_Die vr_die;
+ char buf[32], *ptr;
+ int ret;
- /* TODO: Support struct members and arrays */
- if (!is_c_varname(pf->var)) {
- /* Output raw parameters */
- ret = snprintf(pf->buf, pf->len, " %s", pf->var);
- DIE_IF(ret < 0);
- DIE_IF(ret >= pf->len);
- return ;
+ /* TODO: Support arrays */
+ if (pf->pvar->name)
+ pf->tvar->name = strdup(pf->pvar->name);
+ else {
+ ret = synthesize_perf_probe_arg(pf->pvar, buf, 32);
+ if (ret < 0)
+ return ret;
+ ptr = strchr(buf, ':'); /* Change type separator to _ */
+ if (ptr)
+ *ptr = '_';
+ pf->tvar->name = strdup(buf);
+ }
+ if (pf->tvar->name == NULL)
+ return -ENOMEM;
+
+ if (!is_c_varname(pf->pvar->var)) {
+ /* Copy raw parameters */
+ pf->tvar->value = strdup(pf->pvar->var);
+ if (pf->tvar->value == NULL)
+ return -ENOMEM;
+ else
+ return 0;
}
- pr_debug("Searching '%s' variable in context.\n", pf->var);
+ pr_debug("Searching '%s' variable in context.\n",
+ pf->pvar->var);
/* Search child die for local variables and parameters. */
- if (!die_find_variable(sp_die, pf->var, &vr_die))
- die("Failed to find '%s' in this function.", pf->var);
-
- show_variable(&vr_die, pf);
+ if (!die_find_variable(sp_die, pf->pvar->var, &vr_die)) {
+ pr_warning("Failed to find '%s' in this function.\n",
+ pf->pvar->var);
+ return -ENOENT;
+ }
+ return convert_variable(&vr_die, pf);
}
/* Show a probe point to output buffer */
-static void show_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
+static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
{
- struct probe_point *pp = pf->pp;
+ struct kprobe_trace_event *tev;
Dwarf_Addr eaddr;
Dwarf_Die die_mem;
const char *name;
- char tmp[MAX_PROBE_BUFFER];
- int ret, i, len;
+ int ret, i;
Dwarf_Attribute fb_attr;
size_t nops;
+ if (pf->ntevs == pf->max_tevs) {
+ pr_warning("Too many( > %d) probe point found.\n",
+ pf->max_tevs);
+ return -ERANGE;
+ }
+ tev = &pf->tevs[pf->ntevs++];
+
/* If no real subprogram, find a real one */
if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
- sp_die = die_get_real_subprogram(&pf->cu_die,
+ sp_die = die_find_real_subprogram(&pf->cu_die,
pf->addr, &die_mem);
- if (!sp_die)
- die("Probe point is not found in subprograms.");
+ if (!sp_die) {
+ pr_warning("Failed to find probe point in any "
+ "functions.\n");
+ return -ENOENT;
+ }
}
- /* Output name of probe point */
+ /* Copy the name of probe point */
name = dwarf_diename(sp_die);
if (name) {
- dwarf_entrypc(sp_die, &eaddr);
- ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%lu", name,
- (unsigned long)(pf->addr - eaddr));
- /* Copy the function name if possible */
- if (!pp->function) {
- pp->function = strdup(name);
- pp->offset = (size_t)(pf->addr - eaddr);
+ if (dwarf_entrypc(sp_die, &eaddr) != 0) {
+ pr_warning("Failed to get entry pc of %s\n",
+ dwarf_diename(sp_die));
+ return -ENOENT;
}
- } else {
+ tev->point.symbol = strdup(name);
+ if (tev->point.symbol == NULL)
+ return -ENOMEM;
+ tev->point.offset = (unsigned long)(pf->addr - eaddr);
+ } else
/* This function has no name. */
- ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%jx",
- (uintmax_t)pf->addr);
- if (!pp->function) {
- /* TODO: Use _stext */
- pp->function = strdup("");
- pp->offset = (size_t)pf->addr;
- }
- }
- DIE_IF(ret < 0);
- DIE_IF(ret >= MAX_PROBE_BUFFER);
- len = ret;
- pr_debug("Probe point found: %s\n", tmp);
+ tev->point.offset = (unsigned long)pf->addr;
+
+ pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
+ tev->point.offset);
/* Get the frame base attribute/ops */
dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
- if (ret <= 0 || nops == 0)
+ if (ret <= 0 || nops == 0) {
pf->fb_ops = NULL;
+ } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
+ pf->cfi != NULL) {
+ Dwarf_Frame *frame;
+ if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
+ dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
+ pr_warning("Failed to get CFA on 0x%jx\n",
+ (uintmax_t)pf->addr);
+ return -ENOENT;
+ }
+ }
/* Find each argument */
- /* TODO: use dwarf_cfi_addrframe */
- for (i = 0; i < pp->nr_args; i++) {
- pf->var = pp->args[i];
- pf->buf = &tmp[len];
- pf->len = MAX_PROBE_BUFFER - len;
- find_variable(sp_die, pf);
- len += strlen(pf->buf);
+ tev->nargs = pf->pev->nargs;
+ tev->args = zalloc(sizeof(struct kprobe_trace_arg) * tev->nargs);
+ if (tev->args == NULL)
+ return -ENOMEM;
+ for (i = 0; i < pf->pev->nargs; i++) {
+ pf->pvar = &pf->pev->args[i];
+ pf->tvar = &tev->args[i];
+ ret = find_variable(sp_die, pf);
+ if (ret != 0)
+ return ret;
}
/* *pf->fb_ops will be cached in libdw. Don't free it. */
pf->fb_ops = NULL;
-
- if (pp->found == MAX_PROBES)
- die("Too many( > %d) probe point found.\n", MAX_PROBES);
-
- pp->probes[pp->found] = strdup(tmp);
- pp->found++;
+ return 0;
}
/* Find probe point from its line number */
-static void find_probe_point_by_line(struct probe_finder *pf)
+static int find_probe_point_by_line(struct probe_finder *pf)
{
Dwarf_Lines *lines;
Dwarf_Line *line;
size_t nlines, i;
Dwarf_Addr addr;
int lineno;
- int ret;
+ int ret = 0;
- ret = dwarf_getsrclines(&pf->cu_die, &lines, &nlines);
- DIE_IF(ret != 0);
+ if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
+ pr_warning("No source lines found in this CU.\n");
+ return -ENOENT;
+ }
- for (i = 0; i < nlines; i++) {
+ for (i = 0; i < nlines && ret == 0; i++) {
line = dwarf_onesrcline(lines, i);
- dwarf_lineno(line, &lineno);
- if (lineno != pf->lno)
+ if (dwarf_lineno(line, &lineno) != 0 ||
+ lineno != pf->lno)
continue;
/* TODO: Get fileno from line, but how? */
if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
continue;
- ret = dwarf_lineaddr(line, &addr);
- DIE_IF(ret != 0);
+ if (dwarf_lineaddr(line, &addr) != 0) {
+ pr_warning("Failed to get the address of the line.\n");
+ return -ENOENT;
+ }
pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
(int)i, lineno, (uintmax_t)addr);
pf->addr = addr;
- show_probe_point(NULL, pf);
+ ret = convert_probe_point(NULL, pf);
/* Continuing, because target line might be inlined. */
}
+ return ret;
}
/* Find lines which match lazy pattern */
@@ -499,16 +741,27 @@ static int find_lazy_match_lines(struct list_head *head,
const char *fname, const char *pat)
{
char *fbuf, *p1, *p2;
- int fd, line, nlines = 0;
+ int fd, ret, line, nlines = 0;
struct stat st;
fd = open(fname, O_RDONLY);
- if (fd < 0)
- die("failed to open %s", fname);
- DIE_IF(fstat(fd, &st) < 0);
- fbuf = malloc(st.st_size + 2);
- DIE_IF(fbuf == NULL);
- DIE_IF(read(fd, fbuf, st.st_size) < 0);
+ if (fd < 0) {
+ pr_warning("Failed to open %s: %s\n", fname, strerror(-fd));
+ return fd;
+ }
+
+ ret = fstat(fd, &st);
+ if (ret < 0) {
+ pr_warning("Failed to get the size of %s: %s\n",
+ fname, strerror(errno));
+ return ret;
+ }
+ fbuf = xmalloc(st.st_size + 2);
+ ret = read(fd, fbuf, st.st_size);
+ if (ret < 0) {
+ pr_warning("Failed to read %s: %s\n", fname, strerror(errno));
+ return ret;
+ }
close(fd);
fbuf[st.st_size] = '\n'; /* Dummy line */
fbuf[st.st_size + 1] = '\0';
@@ -528,7 +781,7 @@ static int find_lazy_match_lines(struct list_head *head,
}
/* Find probe points from lazy pattern */
-static void find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
+static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
{
Dwarf_Lines *lines;
Dwarf_Line *line;
@@ -536,37 +789,46 @@ static void find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
Dwarf_Addr addr;
Dwarf_Die die_mem;
int lineno;
- int ret;
+ int ret = 0;
if (list_empty(&pf->lcache)) {
/* Matching lazy line pattern */
ret = find_lazy_match_lines(&pf->lcache, pf->fname,
- pf->pp->lazy_line);
- if (ret <= 0)
- die("No matched lines found in %s.", pf->fname);
+ pf->pev->point.lazy_line);
+ if (ret == 0) {
+ pr_debug("No matched lines found in %s.\n", pf->fname);
+ return 0;
+ } else if (ret < 0)
+ return ret;
}
- ret = dwarf_getsrclines(&pf->cu_die, &lines, &nlines);
- DIE_IF(ret != 0);
- for (i = 0; i < nlines; i++) {
+ if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
+ pr_warning("No source lines found in this CU.\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; i < nlines && ret >= 0; i++) {
line = dwarf_onesrcline(lines, i);
- dwarf_lineno(line, &lineno);
- if (!line_list__has_line(&pf->lcache, lineno))
+ if (dwarf_lineno(line, &lineno) != 0 ||
+ !line_list__has_line(&pf->lcache, lineno))
continue;
/* TODO: Get fileno from line, but how? */
if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
continue;
- ret = dwarf_lineaddr(line, &addr);
- DIE_IF(ret != 0);
+ if (dwarf_lineaddr(line, &addr) != 0) {
+ pr_debug("Failed to get the address of line %d.\n",
+ lineno);
+ continue;
+ }
if (sp_die) {
/* Address filtering 1: does sp_die include addr? */
if (!dwarf_haspc(sp_die, addr))
continue;
/* Address filtering 2: No child include addr? */
- if (die_get_inlinefunc(sp_die, addr, &die_mem))
+ if (die_find_inlinefunc(sp_die, addr, &die_mem))
continue;
}
@@ -574,27 +836,44 @@ static void find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
(int)i, lineno, (unsigned long long)addr);
pf->addr = addr;
- show_probe_point(sp_die, pf);
+ ret = convert_probe_point(sp_die, pf);
/* Continuing, because target line might be inlined. */
}
/* TODO: deallocate lines, but how? */
+ return ret;
}
+/* Callback parameter with return value */
+struct dwarf_callback_param {
+ void *data;
+ int retval;
+};
+
static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
{
- struct probe_finder *pf = (struct probe_finder *)data;
- struct probe_point *pp = pf->pp;
+ struct dwarf_callback_param *param = data;
+ struct probe_finder *pf = param->data;
+ struct perf_probe_point *pp = &pf->pev->point;
+ Dwarf_Addr addr;
if (pp->lazy_line)
- find_probe_point_lazy(in_die, pf);
+ param->retval = find_probe_point_lazy(in_die, pf);
else {
/* Get probe address */
- pf->addr = die_get_entrypc(in_die);
+ if (dwarf_entrypc(in_die, &addr) != 0) {
+ pr_warning("Failed to get entry pc of %s.\n",
+ dwarf_diename(in_die));
+ param->retval = -ENOENT;
+ return DWARF_CB_ABORT;
+ }
+ pf->addr = addr;
pf->addr += pp->offset;
pr_debug("found inline addr: 0x%jx\n",
(uintmax_t)pf->addr);
- show_probe_point(in_die, pf);
+ param->retval = convert_probe_point(in_die, pf);
+ if (param->retval < 0)
+ return DWARF_CB_ABORT;
}
return DWARF_CB_OK;
@@ -603,59 +882,88 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
/* Search function from function name */
static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
{
- struct probe_finder *pf = (struct probe_finder *)data;
- struct probe_point *pp = pf->pp;
+ struct dwarf_callback_param *param = data;
+ struct probe_finder *pf = param->data;
+ struct perf_probe_point *pp = &pf->pev->point;
/* Check tag and diename */
if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
die_compare_name(sp_die, pp->function) != 0)
- return 0;
+ return DWARF_CB_OK;
pf->fname = dwarf_decl_file(sp_die);
if (pp->line) { /* Function relative line */
dwarf_decl_line(sp_die, &pf->lno);
pf->lno += pp->line;
- find_probe_point_by_line(pf);
+ param->retval = find_probe_point_by_line(pf);
} else if (!dwarf_func_inline(sp_die)) {
/* Real function */
if (pp->lazy_line)
- find_probe_point_lazy(sp_die, pf);
+ param->retval = find_probe_point_lazy(sp_die, pf);
else {
- pf->addr = die_get_entrypc(sp_die);
+ if (dwarf_entrypc(sp_die, &pf->addr) != 0) {
+ pr_warning("Failed to get entry pc of %s.\n",
+ dwarf_diename(sp_die));
+ param->retval = -ENOENT;
+ return DWARF_CB_ABORT;
+ }
pf->addr += pp->offset;
/* TODO: Check the address in this function */
- show_probe_point(sp_die, pf);
+ param->retval = convert_probe_point(sp_die, pf);
}
- } else
+ } else {
+ struct dwarf_callback_param _param = {.data = (void *)pf,
+ .retval = 0};
/* Inlined function: search instances */
- dwarf_func_inline_instances(sp_die, probe_point_inline_cb, pf);
+ dwarf_func_inline_instances(sp_die, probe_point_inline_cb,
+ &_param);
+ param->retval = _param.retval;
+ }
- return 1; /* Exit; no same symbol in this CU. */
+ return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
}
-static void find_probe_point_by_func(struct probe_finder *pf)
+static int find_probe_point_by_func(struct probe_finder *pf)
{
- dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, pf, 0);
+ struct dwarf_callback_param _param = {.data = (void *)pf,
+ .retval = 0};
+ dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, &_param, 0);
+ return _param.retval;
}
-/* Find a probe point */
-int find_probe_point(int fd, struct probe_point *pp)
+/* Find kprobe_trace_events specified by perf_probe_event from debuginfo */
+int find_kprobe_trace_events(int fd, struct perf_probe_event *pev,
+ struct kprobe_trace_event **tevs, int max_tevs)
{
- struct probe_finder pf = {.pp = pp};
+ struct probe_finder pf = {.pev = pev, .max_tevs = max_tevs};
+ struct perf_probe_point *pp = &pev->point;
Dwarf_Off off, noff;
size_t cuhl;
Dwarf_Die *diep;
Dwarf *dbg;
+ int ret = 0;
+
+ pf.tevs = zalloc(sizeof(struct kprobe_trace_event) * max_tevs);
+ if (pf.tevs == NULL)
+ return -ENOMEM;
+ *tevs = pf.tevs;
+ pf.ntevs = 0;
dbg = dwarf_begin(fd, DWARF_C_READ);
- if (!dbg)
- return -ENOENT;
+ if (!dbg) {
+ pr_warning("No dwarf info found in the vmlinux - "
+ "please rebuild with CONFIG_DEBUG_INFO=y.\n");
+ return -EBADF;
+ }
+
+ /* Get the call frame information from this dwarf */
+ pf.cfi = dwarf_getcfi(dbg);
- pp->found = 0;
off = 0;
line_list__init(&pf.lcache);
/* Loop on CUs (Compilation Unit) */
- while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
+ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
+ ret >= 0) {
/* Get the DIE(Debugging Information Entry) of this CU */
diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die);
if (!diep)
@@ -669,12 +977,12 @@ int find_probe_point(int fd, struct probe_point *pp)
if (!pp->file || pf.fname) {
if (pp->function)
- find_probe_point_by_func(&pf);
+ ret = find_probe_point_by_func(&pf);
else if (pp->lazy_line)
- find_probe_point_lazy(NULL, &pf);
+ ret = find_probe_point_lazy(NULL, &pf);
else {
pf.lno = pp->line;
- find_probe_point_by_line(&pf);
+ ret = find_probe_point_by_line(&pf);
}
}
off = noff;
@@ -682,41 +990,169 @@ int find_probe_point(int fd, struct probe_point *pp)
line_list__free(&pf.lcache);
dwarf_end(dbg);
- return pp->found;
+ return (ret < 0) ? ret : pf.ntevs;
+}
+
+/* Reverse search */
+int find_perf_probe_point(int fd, unsigned long addr,
+ struct perf_probe_point *ppt)
+{
+ Dwarf_Die cudie, spdie, indie;
+ Dwarf *dbg;
+ Dwarf_Line *line;
+ Dwarf_Addr laddr, eaddr;
+ const char *tmp;
+ int lineno, ret = 0;
+ bool found = false;
+
+ dbg = dwarf_begin(fd, DWARF_C_READ);
+ if (!dbg)
+ return -EBADF;
+
+ /* Find cu die */
+ if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr, &cudie)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Find a corresponding line */
+ line = dwarf_getsrc_die(&cudie, (Dwarf_Addr)addr);
+ if (line) {
+ if (dwarf_lineaddr(line, &laddr) == 0 &&
+ (Dwarf_Addr)addr == laddr &&
+ dwarf_lineno(line, &lineno) == 0) {
+ tmp = dwarf_linesrc(line, NULL, NULL);
+ if (tmp) {
+ ppt->line = lineno;
+ ppt->file = strdup(tmp);
+ if (ppt->file == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ found = true;
+ }
+ }
+ }
+
+ /* Find a corresponding function */
+ if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) {
+ tmp = dwarf_diename(&spdie);
+ if (!tmp || dwarf_entrypc(&spdie, &eaddr) != 0)
+ goto end;
+
+ if (ppt->line) {
+ if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,
+ &indie)) {
+ /* addr in an inline function */
+ tmp = dwarf_diename(&indie);
+ if (!tmp)
+ goto end;
+ ret = dwarf_decl_line(&indie, &lineno);
+ } else {
+ if (eaddr == addr) { /* Function entry */
+ lineno = ppt->line;
+ ret = 0;
+ } else
+ ret = dwarf_decl_line(&spdie, &lineno);
+ }
+ if (ret == 0) {
+ /* Make a relative line number */
+ ppt->line -= lineno;
+ goto found;
+ }
+ }
+ /* We don't have a line number, let's use offset */
+ ppt->offset = addr - (unsigned long)eaddr;
+found:
+ ppt->function = strdup(tmp);
+ if (ppt->function == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ found = true;
+ }
+
+end:
+ dwarf_end(dbg);
+ if (ret >= 0)
+ ret = found ? 1 : 0;
+ return ret;
+}
+
+/* Add a line and store the src path */
+static int line_range_add_line(const char *src, unsigned int lineno,
+ struct line_range *lr)
+{
+ /* Copy real path */
+ if (!lr->path) {
+ lr->path = strdup(src);
+ if (lr->path == NULL)
+ return -ENOMEM;
+ }
+ return line_list__add_line(&lr->line_list, lineno);
+}
+
+/* Search function declaration lines */
+static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct dwarf_callback_param *param = data;
+ struct line_finder *lf = param->data;
+ const char *src;
+ int lineno;
+
+ src = dwarf_decl_file(sp_die);
+ if (src && strtailcmp(src, lf->fname) != 0)
+ return DWARF_CB_OK;
+
+ if (dwarf_decl_line(sp_die, &lineno) != 0 ||
+ (lf->lno_s > lineno || lf->lno_e < lineno))
+ return DWARF_CB_OK;
+
+ param->retval = line_range_add_line(src, lineno, lf->lr);
+ if (param->retval < 0)
+ return DWARF_CB_ABORT;
+ return DWARF_CB_OK;
+}
+
+static int find_line_range_func_decl_lines(struct line_finder *lf)
+{
+ struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
+ dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, &param, 0);
+ return param.retval;
}
/* Find line range from its line number */
-static void find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
+static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
{
Dwarf_Lines *lines;
Dwarf_Line *line;
size_t nlines, i;
Dwarf_Addr addr;
- int lineno;
- int ret;
+ int lineno, ret = 0;
const char *src;
Dwarf_Die die_mem;
line_list__init(&lf->lr->line_list);
- ret = dwarf_getsrclines(&lf->cu_die, &lines, &nlines);
- DIE_IF(ret != 0);
+ if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) {
+ pr_warning("No source lines found in this CU.\n");
+ return -ENOENT;
+ }
+ /* Search probable lines on lines list */
for (i = 0; i < nlines; i++) {
line = dwarf_onesrcline(lines, i);
- ret = dwarf_lineno(line, &lineno);
- DIE_IF(ret != 0);
- if (lf->lno_s > lineno || lf->lno_e < lineno)
+ if (dwarf_lineno(line, &lineno) != 0 ||
+ (lf->lno_s > lineno || lf->lno_e < lineno))
continue;
if (sp_die) {
/* Address filtering 1: does sp_die include addr? */
- ret = dwarf_lineaddr(line, &addr);
- DIE_IF(ret != 0);
- if (!dwarf_haspc(sp_die, addr))
+ if (dwarf_lineaddr(line, &addr) != 0 ||
+ !dwarf_haspc(sp_die, addr))
continue;
/* Address filtering 2: No child include addr? */
- if (die_get_inlinefunc(sp_die, addr, &die_mem))
+ if (die_find_inlinefunc(sp_die, addr, &die_mem))
continue;
}
@@ -725,30 +1161,49 @@ static void find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
if (strtailcmp(src, lf->fname) != 0)
continue;
- /* Copy real path */
- if (!lf->lr->path)
- lf->lr->path = strdup(src);
- line_list__add_line(&lf->lr->line_list, (unsigned int)lineno);
+ ret = line_range_add_line(src, lineno, lf->lr);
+ if (ret < 0)
+ return ret;
}
+
+ /*
+ * Dwarf lines doesn't include function declarations. We have to
+ * check functions list or given function.
+ */
+ if (sp_die) {
+ src = dwarf_decl_file(sp_die);
+ if (src && dwarf_decl_line(sp_die, &lineno) == 0 &&
+ (lf->lno_s <= lineno && lf->lno_e >= lineno))
+ ret = line_range_add_line(src, lineno, lf->lr);
+ } else
+ ret = find_line_range_func_decl_lines(lf);
+
/* Update status */
- if (!list_empty(&lf->lr->line_list))
- lf->found = 1;
+ if (ret >= 0)
+ if (!list_empty(&lf->lr->line_list))
+ ret = lf->found = 1;
+ else
+ ret = 0; /* Lines are not found */
else {
free(lf->lr->path);
lf->lr->path = NULL;
}
+ return ret;
}
static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
{
- find_line_range_by_line(in_die, (struct line_finder *)data);
+ struct dwarf_callback_param *param = data;
+
+ param->retval = find_line_range_by_line(in_die, param->data);
return DWARF_CB_ABORT; /* No need to find other instances */
}
/* Search function from function name */
static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
{
- struct line_finder *lf = (struct line_finder *)data;
+ struct dwarf_callback_param *param = data;
+ struct line_finder *lf = param->data;
struct line_range *lr = lf->lr;
if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
@@ -757,44 +1212,55 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
dwarf_decl_line(sp_die, &lr->offset);
pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
lf->lno_s = lr->offset + lr->start;
- if (!lr->end)
+ if (lf->lno_s < 0) /* Overflow */
+ lf->lno_s = INT_MAX;
+ lf->lno_e = lr->offset + lr->end;
+ if (lf->lno_e < 0) /* Overflow */
lf->lno_e = INT_MAX;
- else
- lf->lno_e = lr->offset + lr->end;
+ pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e);
lr->start = lf->lno_s;
lr->end = lf->lno_e;
- if (dwarf_func_inline(sp_die))
+ if (dwarf_func_inline(sp_die)) {
+ struct dwarf_callback_param _param;
+ _param.data = (void *)lf;
+ _param.retval = 0;
dwarf_func_inline_instances(sp_die,
- line_range_inline_cb, lf);
- else
- find_line_range_by_line(sp_die, lf);
- return 1;
+ line_range_inline_cb,
+ &_param);
+ param->retval = _param.retval;
+ } else
+ param->retval = find_line_range_by_line(sp_die, lf);
+ return DWARF_CB_ABORT;
}
- return 0;
+ return DWARF_CB_OK;
}
-static void find_line_range_by_func(struct line_finder *lf)
+static int find_line_range_by_func(struct line_finder *lf)
{
- dwarf_getfuncs(&lf->cu_die, line_range_search_cb, lf, 0);
+ struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
+ dwarf_getfuncs(&lf->cu_die, line_range_search_cb, &param, 0);
+ return param.retval;
}
int find_line_range(int fd, struct line_range *lr)
{
struct line_finder lf = {.lr = lr, .found = 0};
- int ret;
+ int ret = 0;
Dwarf_Off off = 0, noff;
size_t cuhl;
Dwarf_Die *diep;
Dwarf *dbg;
dbg = dwarf_begin(fd, DWARF_C_READ);
- if (!dbg)
- return -ENOENT;
+ if (!dbg) {
+ pr_warning("No dwarf info found in the vmlinux - "
+ "please rebuild with CONFIG_DEBUG_INFO=y.\n");
+ return -EBADF;
+ }
/* Loop on CUs (Compilation Unit) */
- while (!lf.found) {
- ret = dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL);
- if (ret != 0)
+ while (!lf.found && ret >= 0) {
+ if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0)
break;
/* Get the DIE(Debugging Information Entry) of this CU */
@@ -810,20 +1276,18 @@ int find_line_range(int fd, struct line_range *lr)
if (!lr->file || lf.fname) {
if (lr->function)
- find_line_range_by_func(&lf);
+ ret = find_line_range_by_func(&lf);
else {
lf.lno_s = lr->start;
- if (!lr->end)
- lf.lno_e = INT_MAX;
- else
- lf.lno_e = lr->end;
- find_line_range_by_line(NULL, &lf);
+ lf.lno_e = lr->end;
+ ret = find_line_range_by_line(NULL, &lf);
}
}
off = noff;
}
pr_debug("path: %lx\n", (unsigned long)lr->path);
dwarf_end(dbg);
- return lf.found;
+
+ return (ret < 0) ? ret : lf.found;
}
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 21f7354397b4..66f1980e3855 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -3,6 +3,7 @@
#include <stdbool.h>
#include "util.h"
+#include "probe-event.h"
#define MAX_PATH_LEN 256
#define MAX_PROBE_BUFFER 1024
@@ -14,67 +15,39 @@ static inline int is_c_varname(const char *name)
return isalpha(name[0]) || name[0] == '_';
}
-struct probe_point {
- char *event; /* Event name */
- char *group; /* Event group */
+#ifdef DWARF_SUPPORT
+/* Find kprobe_trace_events specified by perf_probe_event from debuginfo */
+extern int find_kprobe_trace_events(int fd, struct perf_probe_event *pev,
+ struct kprobe_trace_event **tevs,
+ int max_tevs);
- /* Inputs */
- char *file; /* File name */
- int line; /* Line number */
- char *lazy_line; /* Lazy line pattern */
+/* Find a perf_probe_point from debuginfo */
+extern int find_perf_probe_point(int fd, unsigned long addr,
+ struct perf_probe_point *ppt);
- char *function; /* Function name */
- int offset; /* Offset bytes */
-
- int nr_args; /* Number of arguments */
- char **args; /* Arguments */
-
- int retprobe; /* Return probe */
-
- /* Output */
- int found; /* Number of found probe points */
- char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/
-};
-
-/* Line number container */
-struct line_node {
- struct list_head list;
- unsigned int line;
-};
-
-/* Line range */
-struct line_range {
- char *file; /* File name */
- char *function; /* Function name */
- unsigned int start; /* Start line number */
- unsigned int end; /* End line number */
- int offset; /* Start line offset */
- char *path; /* Real path name */
- struct list_head line_list; /* Visible lines */
-};
-
-#ifndef NO_DWARF_SUPPORT
-extern int find_probe_point(int fd, struct probe_point *pp);
extern int find_line_range(int fd, struct line_range *lr);
#include <dwarf.h>
#include <libdw.h>
struct probe_finder {
- struct probe_point *pp; /* Target probe point */
+ struct perf_probe_event *pev; /* Target probe event */
+ struct kprobe_trace_event *tevs; /* Result trace events */
+ int ntevs; /* Number of trace events */
+ int max_tevs; /* Max number of trace events */
/* For function searching */
- Dwarf_Addr addr; /* Address */
- const char *fname; /* File name */
int lno; /* Line number */
+ Dwarf_Addr addr; /* Address */
+ const char *fname; /* Real file name */
Dwarf_Die cu_die; /* Current CU */
+ struct list_head lcache; /* Line cache for lazy match */
/* For variable searching */
+ Dwarf_CFI *cfi; /* Call Frame Information */
Dwarf_Op *fb_ops; /* Frame base attribute */
- const char *var; /* Current variable name */
- char *buf; /* Current output buffer */
- int len; /* Length of output buffer */
- struct list_head lcache; /* Line cache for lazy match */
+ struct perf_probe_arg *pvar; /* Current target variable */
+ struct kprobe_trace_arg *tvar; /* Current result variable */
};
struct line_finder {
@@ -87,6 +60,6 @@ struct line_finder {
int found;
};
-#endif /* NO_DWARF_SUPPORT */
+#endif /* DWARF_SUPPORT */
#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 5376378e0cfc..b059dc50cc2d 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -371,7 +371,6 @@ static int perl_start_script(const char *script, int argc, const char **argv)
run_start_sub();
free(command_line);
- fprintf(stderr, "perf trace started with Perl script %s\n\n", script);
return 0;
error:
perl_free(my_perl);
@@ -394,8 +393,6 @@ static int perl_stop_script(void)
perl_destruct(my_perl);
perl_free(my_perl);
- fprintf(stderr, "\nperf trace Perl script stopped\n");
-
return 0;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 6a72f14c5986..81f39cab3aaa 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -374,8 +374,6 @@ static int python_start_script(const char *script, int argc, const char **argv)
}
free(command_line);
- fprintf(stderr, "perf trace started with Python script %s\n\n",
- script);
return err;
error:
@@ -407,8 +405,6 @@ out:
Py_XDECREF(main_module);
Py_Finalize();
- fprintf(stderr, "\nperf trace Python script stopped\n");
-
return err;
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index eed1cb889008..72a7f6ae0293 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -14,6 +14,16 @@ static int perf_session__open(struct perf_session *self, bool force)
{
struct stat input_stat;
+ if (!strcmp(self->filename, "-")) {
+ self->fd_pipe = true;
+ self->fd = STDIN_FILENO;
+
+ if (perf_header__read(self, self->fd) < 0)
+ pr_err("incompatible file format");
+
+ return 0;
+ }
+
self->fd = open(self->filename, O_RDONLY);
if (self->fd < 0) {
pr_err("failed to open file: %s", self->filename);
@@ -38,7 +48,7 @@ static int perf_session__open(struct perf_session *self, bool force)
goto out_close;
}
- if (perf_header__read(&self->header, self->fd) < 0) {
+ if (perf_header__read(self, self->fd) < 0) {
pr_err("incompatible file format");
goto out_close;
}
@@ -52,12 +62,21 @@ out_close:
return -1;
}
-static inline int perf_session__create_kernel_maps(struct perf_session *self)
+void perf_session__update_sample_type(struct perf_session *self)
+{
+ self->sample_type = perf_header__sample_type(&self->header);
+}
+
+int perf_session__create_kernel_maps(struct perf_session *self)
{
- return map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps);
+ int ret = machine__create_kernel_maps(&self->host_machine);
+
+ if (ret >= 0)
+ ret = machines__create_guest_kernel_maps(&self->machines);
+ return ret;
}
-struct perf_session *perf_session__new(const char *filename, int mode, bool force)
+struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
{
size_t len = filename ? strlen(filename) + 1 : 0;
struct perf_session *self = zalloc(sizeof(*self) + len);
@@ -70,13 +89,16 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
memcpy(self->filename, filename, len);
self->threads = RB_ROOT;
- self->stats_by_id = RB_ROOT;
+ self->hists_tree = RB_ROOT;
self->last_match = NULL;
self->mmap_window = 32;
self->cwd = NULL;
self->cwdlen = 0;
self->unknown_events = 0;
- map_groups__init(&self->kmaps);
+ self->machines = RB_ROOT;
+ self->repipe = repipe;
+ INIT_LIST_HEAD(&self->ordered_samples.samples_head);
+ machine__init(&self->host_machine, "", HOST_KERNEL_ID);
if (mode == O_RDONLY) {
if (perf_session__open(self, force) < 0)
@@ -90,7 +112,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
goto out_delete;
}
- self->sample_type = perf_header__sample_type(&self->header);
+ perf_session__update_sample_type(self);
out:
return self;
out_free:
@@ -117,22 +139,17 @@ static bool symbol__match_parent_regex(struct symbol *sym)
return 0;
}
-struct symbol **perf_session__resolve_callchain(struct perf_session *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent)
+struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent)
{
u8 cpumode = PERF_RECORD_MISC_USER;
- struct symbol **syms = NULL;
unsigned int i;
+ struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
- if (symbol_conf.use_callchain) {
- syms = calloc(chain->nr, sizeof(*syms));
- if (!syms) {
- fprintf(stderr, "Can't allocate memory for symbols\n");
- exit(-1);
- }
- }
+ if (!syms)
+ return NULL;
for (i = 0; i < chain->nr; i++) {
u64 ip = chain->ips[i];
@@ -152,15 +169,17 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self,
continue;
}
+ al.filtered = false;
thread__find_addr_location(thread, self, cpumode,
- MAP__FUNCTION, ip, &al, NULL);
+ MAP__FUNCTION, thread->pid, ip, &al, NULL);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
symbol__match_parent_regex(al.sym))
*parent = al.sym;
if (!symbol_conf.use_callchain)
break;
- syms[i] = al.sym;
+ syms[i].map = al.map;
+ syms[i].sym = al.sym;
}
}
@@ -174,6 +193,18 @@ static int process_event_stub(event_t *event __used,
return 0;
}
+static int process_finished_round_stub(event_t *event __used,
+ struct perf_session *session __used,
+ struct perf_event_ops *ops __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_finished_round(event_t *event,
+ struct perf_session *session,
+ struct perf_event_ops *ops);
+
static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
if (handler->sample == NULL)
@@ -194,6 +225,20 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
handler->throttle = process_event_stub;
if (handler->unthrottle == NULL)
handler->unthrottle = process_event_stub;
+ if (handler->attr == NULL)
+ handler->attr = process_event_stub;
+ if (handler->event_type == NULL)
+ handler->event_type = process_event_stub;
+ if (handler->tracing_data == NULL)
+ handler->tracing_data = process_event_stub;
+ if (handler->build_id == NULL)
+ handler->build_id = process_event_stub;
+ if (handler->finished_round == NULL) {
+ if (handler->ordered_samples)
+ handler->finished_round = process_finished_round;
+ else
+ handler->finished_round = process_finished_round_stub;
+ }
}
static const char *event__name[] = {
@@ -207,16 +252,23 @@ static const char *event__name[] = {
[PERF_RECORD_FORK] = "FORK",
[PERF_RECORD_READ] = "READ",
[PERF_RECORD_SAMPLE] = "SAMPLE",
+ [PERF_RECORD_HEADER_ATTR] = "ATTR",
+ [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
+ [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
+ [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
};
-unsigned long event__total[PERF_RECORD_MAX];
+unsigned long event__total[PERF_RECORD_HEADER_MAX];
void event__print_totals(void)
{
int i;
- for (i = 0; i < PERF_RECORD_MAX; ++i)
+ for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
+ if (!event__name[i])
+ continue;
pr_info("%10s events: %10ld\n",
event__name[i], event__total[i]);
+ }
}
void mem_bswap_64(void *src, int byte_size)
@@ -270,6 +322,37 @@ static void event__read_swap(event_t *self)
self->read.id = bswap_64(self->read.id);
}
+static void event__attr_swap(event_t *self)
+{
+ size_t size;
+
+ self->attr.attr.type = bswap_32(self->attr.attr.type);
+ self->attr.attr.size = bswap_32(self->attr.attr.size);
+ self->attr.attr.config = bswap_64(self->attr.attr.config);
+ self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
+ self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
+ self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
+ self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
+ self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
+ self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
+ self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
+
+ size = self->header.size;
+ size -= (void *)&self->attr.id - (void *)self;
+ mem_bswap_64(self->attr.id, size);
+}
+
+static void event__event_type_swap(event_t *self)
+{
+ self->event_type.event_type.event_id =
+ bswap_64(self->event_type.event_type.event_id);
+}
+
+static void event__tracing_data_swap(event_t *self)
+{
+ self->tracing_data.size = bswap_32(self->tracing_data.size);
+}
+
typedef void (*event__swap_op)(event_t *self);
static event__swap_op event__swap_ops[] = {
@@ -280,9 +363,212 @@ static event__swap_op event__swap_ops[] = {
[PERF_RECORD_LOST] = event__all64_swap,
[PERF_RECORD_READ] = event__read_swap,
[PERF_RECORD_SAMPLE] = event__all64_swap,
- [PERF_RECORD_MAX] = NULL,
+ [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
+ [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
+ [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
+ [PERF_RECORD_HEADER_BUILD_ID] = NULL,
+ [PERF_RECORD_HEADER_MAX] = NULL,
+};
+
+struct sample_queue {
+ u64 timestamp;
+ struct sample_event *event;
+ struct list_head list;
};
+static void flush_sample_queue(struct perf_session *s,
+ struct perf_event_ops *ops)
+{
+ struct list_head *head = &s->ordered_samples.samples_head;
+ u64 limit = s->ordered_samples.next_flush;
+ struct sample_queue *tmp, *iter;
+
+ if (!ops->ordered_samples || !limit)
+ return;
+
+ list_for_each_entry_safe(iter, tmp, head, list) {
+ if (iter->timestamp > limit)
+ return;
+
+ if (iter == s->ordered_samples.last_inserted)
+ s->ordered_samples.last_inserted = NULL;
+
+ ops->sample((event_t *)iter->event, s);
+
+ s->ordered_samples.last_flush = iter->timestamp;
+ list_del(&iter->list);
+ free(iter->event);
+ free(iter);
+ }
+}
+
+/*
+ * When perf record finishes a pass on every buffers, it records this pseudo
+ * event.
+ * We record the max timestamp t found in the pass n.
+ * Assuming these timestamps are monotonic across cpus, we know that if
+ * a buffer still has events with timestamps below t, they will be all
+ * available and then read in the pass n + 1.
+ * Hence when we start to read the pass n + 2, we can safely flush every
+ * events with timestamps below t.
+ *
+ * ============ PASS n =================
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 1 | 2
+ * 2 | 3
+ * - | 4 <--- max recorded
+ *
+ * ============ PASS n + 1 ==============
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 3 | 5
+ * 4 | 6
+ * 5 | 7 <---- max recorded
+ *
+ * Flush every events below timestamp 4
+ *
+ * ============ PASS n + 2 ==============
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 6 | 8
+ * 7 | 9
+ * - | 10
+ *
+ * Flush every events below timestamp 7
+ * etc...
+ */
+static int process_finished_round(event_t *event __used,
+ struct perf_session *session,
+ struct perf_event_ops *ops)
+{
+ flush_sample_queue(session, ops);
+ session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
+
+ return 0;
+}
+
+static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
+{
+ struct sample_queue *iter;
+
+ list_for_each_entry_reverse(iter, head, list) {
+ if (iter->timestamp < new->timestamp) {
+ list_add(&new->list, &iter->list);
+ return;
+ }
+ }
+
+ list_add(&new->list, head);
+}
+
+static void __queue_sample_before(struct sample_queue *new,
+ struct sample_queue *iter,
+ struct list_head *head)
+{
+ list_for_each_entry_continue_reverse(iter, head, list) {
+ if (iter->timestamp < new->timestamp) {
+ list_add(&new->list, &iter->list);
+ return;
+ }
+ }
+
+ list_add(&new->list, head);
+}
+
+static void __queue_sample_after(struct sample_queue *new,
+ struct sample_queue *iter,
+ struct list_head *head)
+{
+ list_for_each_entry_continue(iter, head, list) {
+ if (iter->timestamp > new->timestamp) {
+ list_add_tail(&new->list, &iter->list);
+ return;
+ }
+ }
+ list_add_tail(&new->list, head);
+}
+
+/* The queue is ordered by time */
+static void __queue_sample_event(struct sample_queue *new,
+ struct perf_session *s)
+{
+ struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
+ struct list_head *head = &s->ordered_samples.samples_head;
+
+
+ if (!last_inserted) {
+ __queue_sample_end(new, head);
+ return;
+ }
+
+ /*
+ * Most of the time the current event has a timestamp
+ * very close to the last event inserted, unless we just switched
+ * to another event buffer. Having a sorting based on a list and
+ * on the last inserted event that is close to the current one is
+ * probably more efficient than an rbtree based sorting.
+ */
+ if (last_inserted->timestamp >= new->timestamp)
+ __queue_sample_before(new, last_inserted, head);
+ else
+ __queue_sample_after(new, last_inserted, head);
+}
+
+static int queue_sample_event(event_t *event, struct sample_data *data,
+ struct perf_session *s)
+{
+ u64 timestamp = data->time;
+ struct sample_queue *new;
+
+
+ if (timestamp < s->ordered_samples.last_flush) {
+ printf("Warning: Timestamp below last timeslice flush\n");
+ return -EINVAL;
+ }
+
+ new = malloc(sizeof(*new));
+ if (!new)
+ return -ENOMEM;
+
+ new->timestamp = timestamp;
+
+ new->event = malloc(event->header.size);
+ if (!new->event) {
+ free(new);
+ return -ENOMEM;
+ }
+
+ memcpy(new->event, event, event->header.size);
+
+ __queue_sample_event(new, s);
+ s->ordered_samples.last_inserted = new;
+
+ if (new->timestamp > s->ordered_samples.max_timestamp)
+ s->ordered_samples.max_timestamp = new->timestamp;
+
+ return 0;
+}
+
+static int perf_session__process_sample(event_t *event, struct perf_session *s,
+ struct perf_event_ops *ops)
+{
+ struct sample_data data;
+
+ if (!ops->ordered_samples)
+ return ops->sample(event, s);
+
+ bzero(&data, sizeof(struct sample_data));
+ event__parse_sample(event, s->sample_type, &data);
+
+ queue_sample_event(event, &data, s);
+
+ return 0;
+}
+
static int perf_session__process_event(struct perf_session *self,
event_t *event,
struct perf_event_ops *ops,
@@ -290,7 +576,7 @@ static int perf_session__process_event(struct perf_session *self,
{
trace_event(event);
- if (event->header.type < PERF_RECORD_MAX) {
+ if (event->header.type < PERF_RECORD_HEADER_MAX) {
dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
offset + head, event->header.size,
event__name[event->header.type]);
@@ -303,7 +589,7 @@ static int perf_session__process_event(struct perf_session *self,
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
- return ops->sample(event, self);
+ return perf_session__process_sample(event, self, ops);
case PERF_RECORD_MMAP:
return ops->mmap(event, self);
case PERF_RECORD_COMM:
@@ -320,6 +606,18 @@ static int perf_session__process_event(struct perf_session *self,
return ops->throttle(event, self);
case PERF_RECORD_UNTHROTTLE:
return ops->unthrottle(event, self);
+ case PERF_RECORD_HEADER_ATTR:
+ return ops->attr(event, self);
+ case PERF_RECORD_HEADER_EVENT_TYPE:
+ return ops->event_type(event, self);
+ case PERF_RECORD_HEADER_TRACING_DATA:
+ /* setup for reading amidst mmap */
+ lseek(self->fd, offset + head, SEEK_SET);
+ return ops->tracing_data(event, self);
+ case PERF_RECORD_HEADER_BUILD_ID:
+ return ops->build_id(event, self);
+ case PERF_RECORD_FINISHED_ROUND:
+ return ops->finished_round(event, self, ops);
default:
self->unknown_events++;
return -1;
@@ -333,56 +631,114 @@ void perf_event_header__bswap(struct perf_event_header *self)
self->size = bswap_16(self->size);
}
-int perf_header__read_build_ids(struct perf_header *self,
- int input, u64 offset, u64 size)
+static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
- struct build_id_event bev;
- char filename[PATH_MAX];
- u64 limit = offset + size;
- int err = -1;
-
- while (offset < limit) {
- struct dso *dso;
- ssize_t len;
- struct list_head *head = &dsos__user;
+ struct thread *thread = perf_session__findnew(self, 0);
- if (read(input, &bev, sizeof(bev)) != sizeof(bev))
- goto out;
+ if (thread == NULL || thread__set_comm(thread, "swapper")) {
+ pr_err("problem inserting idle task.\n");
+ thread = NULL;
+ }
- if (self->needs_swap)
- perf_event_header__bswap(&bev.header);
+ return thread;
+}
- len = bev.header.size - sizeof(bev);
- if (read(input, filename, len) != len)
- goto out;
+int do_read(int fd, void *buf, size_t size)
+{
+ void *buf_start = buf;
- if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
- head = &dsos__kernel;
+ while (size) {
+ int ret = read(fd, buf, size);
- dso = __dsos__findnew(head, filename);
- if (dso != NULL) {
- dso__set_build_id(dso, &bev.build_id);
- if (head == &dsos__kernel && filename[0] == '[')
- dso->kernel = 1;
- }
+ if (ret <= 0)
+ return ret;
- offset += bev.header.size;
+ size -= ret;
+ buf += ret;
}
- err = 0;
-out:
- return err;
+
+ return buf - buf_start;
}
-static struct thread *perf_session__register_idle_thread(struct perf_session *self)
+#define session_done() (*(volatile int *)(&session_done))
+volatile int session_done;
+
+static int __perf_session__process_pipe_events(struct perf_session *self,
+ struct perf_event_ops *ops)
{
- struct thread *thread = perf_session__findnew(self, 0);
+ event_t event;
+ uint32_t size;
+ int skip = 0;
+ u64 head;
+ int err;
+ void *p;
- if (thread == NULL || thread__set_comm(thread, "swapper")) {
- pr_err("problem inserting idle task.\n");
- thread = NULL;
+ perf_event_ops__fill_defaults(ops);
+
+ head = 0;
+more:
+ err = do_read(self->fd, &event, sizeof(struct perf_event_header));
+ if (err <= 0) {
+ if (err == 0)
+ goto done;
+
+ pr_err("failed to read event header\n");
+ goto out_err;
}
- return thread;
+ if (self->header.needs_swap)
+ perf_event_header__bswap(&event.header);
+
+ size = event.header.size;
+ if (size == 0)
+ size = 8;
+
+ p = &event;
+ p += sizeof(struct perf_event_header);
+
+ if (size - sizeof(struct perf_event_header)) {
+ err = do_read(self->fd, p,
+ size - sizeof(struct perf_event_header));
+ if (err <= 0) {
+ if (err == 0) {
+ pr_err("unexpected end of event stream\n");
+ goto done;
+ }
+
+ pr_err("failed to read event data\n");
+ goto out_err;
+ }
+ }
+
+ if (size == 0 ||
+ (skip = perf_session__process_event(self, &event, ops,
+ 0, head)) < 0) {
+ dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+ head, event.header.size, event.header.type);
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ dump_printf("\n%#Lx [%#x]: event: %d\n",
+ head, event.header.size, event.header.type);
+
+ if (skip > 0)
+ head += skip;
+
+ if (!session_done())
+ goto more;
+done:
+ err = 0;
+out_err:
+ return err;
}
int __perf_session__process_events(struct perf_session *self,
@@ -396,6 +752,10 @@ int __perf_session__process_events(struct perf_session *self,
event_t *event;
uint32_t size;
char *buf;
+ struct ui_progress *progress = ui_progress__new("Processing events...",
+ self->size);
+ if (progress == NULL)
+ return -1;
perf_event_ops__fill_defaults(ops);
@@ -424,6 +784,7 @@ remap:
more:
event = (event_t *)(buf + head);
+ ui_progress__update(progress, offset);
if (self->header.needs_swap)
perf_event_header__bswap(&event->header);
@@ -473,7 +834,11 @@ more:
goto more;
done:
err = 0;
+ /* do the final flush for ordered samples */
+ self->ordered_samples.next_flush = ULLONG_MAX;
+ flush_sample_queue(self, ops);
out_err:
+ ui_progress__delete(progress);
return err;
}
@@ -502,9 +867,13 @@ out_getcwd_err:
self->cwdlen = strlen(self->cwd);
}
- err = __perf_session__process_events(self, self->header.data_offset,
- self->header.data_size,
- self->size, ops);
+ if (!self->fd_pipe)
+ err = __perf_session__process_events(self,
+ self->header.data_offset,
+ self->header.data_size,
+ self->size, ops);
+ else
+ err = __perf_session__process_pipe_events(self, ops);
out_err:
return err;
}
@@ -519,56 +888,41 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg)
return true;
}
-int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
+int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
const char *symbol_name,
u64 addr)
{
char *bracket;
enum map_type i;
+ struct ref_reloc_sym *ref;
- self->ref_reloc_sym.name = strdup(symbol_name);
- if (self->ref_reloc_sym.name == NULL)
+ ref = zalloc(sizeof(struct ref_reloc_sym));
+ if (ref == NULL)
return -ENOMEM;
- bracket = strchr(self->ref_reloc_sym.name, ']');
+ ref->name = strdup(symbol_name);
+ if (ref->name == NULL) {
+ free(ref);
+ return -ENOMEM;
+ }
+
+ bracket = strchr(ref->name, ']');
if (bracket)
*bracket = '\0';
- self->ref_reloc_sym.addr = addr;
+ ref->addr = addr;
for (i = 0; i < MAP__NR_TYPES; ++i) {
- struct kmap *kmap = map__kmap(self->vmlinux_maps[i]);
- kmap->ref_reloc_sym = &self->ref_reloc_sym;
+ struct kmap *kmap = map__kmap(maps[i]);
+ kmap->ref_reloc_sym = ref;
}
return 0;
}
-static u64 map__reloc_map_ip(struct map *map, u64 ip)
-{
- return ip + (s64)map->pgoff;
-}
-
-static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
-{
- return ip - (s64)map->pgoff;
-}
-
-void map__reloc_vmlinux(struct map *self)
+size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
- struct kmap *kmap = map__kmap(self);
- s64 reloc;
-
- if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
- return;
-
- reloc = (kmap->ref_reloc_sym->unrelocated_addr -
- kmap->ref_reloc_sym->addr);
-
- if (!reloc)
- return;
-
- self->map_ip = map__reloc_map_ip;
- self->unmap_ip = map__reloc_unmap_ip;
- self->pgoff = reloc;
+ return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
+ __dsos__fprintf(&self->host_machine.user_dsos, fp) +
+ machines__fprintf_dsos(&self->machines, fp);
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 5c33417eebb3..46190f94b547 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -1,6 +1,7 @@
#ifndef __PERF_SESSION_H
#define __PERF_SESSION_H
+#include "hist.h"
#include "event.h"
#include "header.h"
#include "symbol.h"
@@ -8,45 +9,71 @@
#include <linux/rbtree.h>
#include "../../../include/linux/perf_event.h"
+struct sample_queue;
struct ip_callchain;
struct thread;
+struct ordered_samples {
+ u64 last_flush;
+ u64 next_flush;
+ u64 max_timestamp;
+ struct list_head samples_head;
+ struct sample_queue *last_inserted;
+};
+
struct perf_session {
struct perf_header header;
unsigned long size;
unsigned long mmap_window;
- struct map_groups kmaps;
struct rb_root threads;
struct thread *last_match;
- struct map *vmlinux_maps[MAP__NR_TYPES];
- struct events_stats events_stats;
- struct rb_root stats_by_id;
+ struct machine host_machine;
+ struct rb_root machines;
+ struct rb_root hists_tree;
unsigned long event_total[PERF_RECORD_MAX];
unsigned long unknown_events;
- struct rb_root hists;
+ /*
+ * FIXME: should point to the first entry in hists_tree and
+ * be a hists instance. Right now its only 'report'
+ * that is using ->hists_tree while all the rest use
+ * ->hists.
+ */
+ struct hists hists;
u64 sample_type;
- struct ref_reloc_sym ref_reloc_sym;
int fd;
+ bool fd_pipe;
+ bool repipe;
int cwdlen;
char *cwd;
+ struct ordered_samples ordered_samples;
char filename[0];
};
+struct perf_event_ops;
+
typedef int (*event_op)(event_t *self, struct perf_session *session);
+typedef int (*event_op2)(event_t *self, struct perf_session *session,
+ struct perf_event_ops *ops);
struct perf_event_ops {
- event_op sample,
- mmap,
- comm,
- fork,
- exit,
- lost,
- read,
- throttle,
- unthrottle;
+ event_op sample,
+ mmap,
+ comm,
+ fork,
+ exit,
+ lost,
+ read,
+ throttle,
+ unthrottle,
+ attr,
+ event_type,
+ tracing_data,
+ build_id;
+ event_op2 finished_round;
+ bool ordered_samples;
};
-struct perf_session *perf_session__new(const char *filename, int mode, bool force);
+struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe);
void perf_session__delete(struct perf_session *self);
void perf_event_header__bswap(struct perf_event_header *self);
@@ -57,33 +84,75 @@ int __perf_session__process_events(struct perf_session *self,
int perf_session__process_events(struct perf_session *self,
struct perf_event_ops *event_ops);
-struct symbol **perf_session__resolve_callchain(struct perf_session *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent);
+struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
bool perf_session__has_traces(struct perf_session *self, const char *msg);
-int perf_header__read_build_ids(struct perf_header *self, int input,
- u64 offset, u64 file_size);
-
-int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
+int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
const char *symbol_name,
u64 addr);
void mem_bswap_64(void *src, int byte_size);
-static inline int __perf_session__create_kernel_maps(struct perf_session *self,
- struct dso *kernel)
+int perf_session__create_kernel_maps(struct perf_session *self);
+
+int do_read(int fd, void *buf, size_t size);
+void perf_session__update_sample_type(struct perf_session *self);
+
+#ifdef NO_NEWT_SUPPORT
+static inline int perf_session__browse_hists(struct rb_root *hists __used,
+ u64 nr_hists __used,
+ u64 session_total __used,
+ const char *helpline __used,
+ const char *input_name __used)
{
- return __map_groups__create_kernel_maps(&self->kmaps,
- self->vmlinux_maps, kernel);
+ return 0;
}
+#else
+int perf_session__browse_hists(struct rb_root *hists, u64 nr_hists,
+ u64 session_total, const char *helpline,
+ const char *input_name);
+#endif
+
+static inline
+struct machine *perf_session__find_host_machine(struct perf_session *self)
+{
+ return &self->host_machine;
+}
+
+static inline
+struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
+{
+ if (pid == HOST_KERNEL_ID)
+ return &self->host_machine;
+ return machines__find(&self->machines, pid);
+}
+
+static inline
+struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid)
+{
+ if (pid == HOST_KERNEL_ID)
+ return &self->host_machine;
+ return machines__findnew(&self->machines, pid);
+}
+
+static inline
+void perf_session__process_machines(struct perf_session *self,
+ machine__process_t process)
+{
+ process(&self->host_machine, self);
+ return machines__process(&self->machines, process, self);
+}
+
+size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
-static inline struct map *
- perf_session__new_module_map(struct perf_session *self,
- u64 start, const char *filename)
+static inline
+size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
+ bool with_hits)
{
- return map_groups__new_module(&self->kmaps, start, filename);
+ return machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index cb0f327de9e8..da30b305fba0 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -18,39 +18,50 @@ char * field_sep;
LIST_HEAD(hist_entry__sort_list);
+static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+
struct sort_entry sort_thread = {
- .header = "Command: Pid",
- .cmp = sort__thread_cmp,
- .print = sort__thread_print,
- .width = &threads__col_width,
+ .se_header = "Command: Pid",
+ .se_cmp = sort__thread_cmp,
+ .se_snprintf = hist_entry__thread_snprintf,
+ .se_width = &threads__col_width,
};
struct sort_entry sort_comm = {
- .header = "Command",
- .cmp = sort__comm_cmp,
- .collapse = sort__comm_collapse,
- .print = sort__comm_print,
- .width = &comms__col_width,
+ .se_header = "Command",
+ .se_cmp = sort__comm_cmp,
+ .se_collapse = sort__comm_collapse,
+ .se_snprintf = hist_entry__comm_snprintf,
+ .se_width = &comms__col_width,
};
struct sort_entry sort_dso = {
- .header = "Shared Object",
- .cmp = sort__dso_cmp,
- .print = sort__dso_print,
- .width = &dsos__col_width,
+ .se_header = "Shared Object",
+ .se_cmp = sort__dso_cmp,
+ .se_snprintf = hist_entry__dso_snprintf,
+ .se_width = &dsos__col_width,
};
struct sort_entry sort_sym = {
- .header = "Symbol",
- .cmp = sort__sym_cmp,
- .print = sort__sym_print,
+ .se_header = "Symbol",
+ .se_cmp = sort__sym_cmp,
+ .se_snprintf = hist_entry__sym_snprintf,
};
struct sort_entry sort_parent = {
- .header = "Parent symbol",
- .cmp = sort__parent_cmp,
- .print = sort__parent_print,
- .width = &parent_symbol__col_width,
+ .se_header = "Parent symbol",
+ .se_cmp = sort__parent_cmp,
+ .se_snprintf = hist_entry__parent_snprintf,
+ .se_width = &parent_symbol__col_width,
};
struct sort_dimension {
@@ -85,45 +96,38 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
return right->thread->pid - left->thread->pid;
}
-int repsep_fprintf(FILE *fp, const char *fmt, ...)
+static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
{
int n;
va_list ap;
va_start(ap, fmt);
- if (!field_sep)
- n = vfprintf(fp, fmt, ap);
- else {
- char *bf = NULL;
- n = vasprintf(&bf, fmt, ap);
- if (n > 0) {
- char *sep = bf;
-
- while (1) {
- sep = strchr(sep, *field_sep);
- if (sep == NULL)
- break;
- *sep = '.';
- }
+ n = vsnprintf(bf, size, fmt, ap);
+ if (field_sep && n > 0) {
+ char *sep = bf;
+
+ while (1) {
+ sep = strchr(sep, *field_sep);
+ if (sep == NULL)
+ break;
+ *sep = '.';
}
- fputs(bf, fp);
- free(bf);
}
va_end(ap);
return n;
}
-size_t
-sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
+static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
{
- return repsep_fprintf(fp, "%*s:%5d", width - 6,
+ return repsep_snprintf(bf, size, "%*s:%5d", width,
self->thread->comm ?: "", self->thread->pid);
}
-size_t
-sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
+static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
{
- return repsep_fprintf(fp, "%*s", width, self->thread->comm);
+ return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
}
/* --sort dso */
@@ -131,8 +135,8 @@ sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
int64_t
sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
{
- struct dso *dso_l = left->map ? left->map->dso : NULL;
- struct dso *dso_r = right->map ? right->map->dso : NULL;
+ struct dso *dso_l = left->ms.map ? left->ms.map->dso : NULL;
+ struct dso *dso_r = right->ms.map ? right->ms.map->dso : NULL;
const char *dso_name_l, *dso_name_r;
if (!dso_l || !dso_r)
@@ -149,16 +153,16 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
return strcmp(dso_name_l, dso_name_r);
}
-size_t
-sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
+static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
{
- if (self->map && self->map->dso) {
- const char *dso_name = !verbose ? self->map->dso->short_name :
- self->map->dso->long_name;
- return repsep_fprintf(fp, "%-*s", width, dso_name);
+ if (self->ms.map && self->ms.map->dso) {
+ const char *dso_name = !verbose ? self->ms.map->dso->short_name :
+ self->ms.map->dso->long_name;
+ return repsep_snprintf(bf, size, "%-*s", width, dso_name);
}
- return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
+ return repsep_snprintf(bf, size, "%*Lx", width, self->ip);
}
/* --sort symbol */
@@ -168,31 +172,31 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
u64 ip_l, ip_r;
- if (left->sym == right->sym)
+ if (left->ms.sym == right->ms.sym)
return 0;
- ip_l = left->sym ? left->sym->start : left->ip;
- ip_r = right->sym ? right->sym->start : right->ip;
+ ip_l = left->ms.sym ? left->ms.sym->start : left->ip;
+ ip_r = right->ms.sym ? right->ms.sym->start : right->ip;
return (int64_t)(ip_r - ip_l);
}
-
-size_t
-sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
+static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width __used)
{
size_t ret = 0;
if (verbose) {
- char o = self->map ? dso__symtab_origin(self->map->dso) : '!';
- ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o);
+ char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!';
+ ret += repsep_snprintf(bf, size, "%#018llx %c ", self->ip, o);
}
- ret += repsep_fprintf(fp, "[%c] ", self->level);
- if (self->sym)
- ret += repsep_fprintf(fp, "%s", self->sym->name);
+ ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level);
+ if (self->ms.sym)
+ ret += repsep_snprintf(bf + ret, size - ret, "%s",
+ self->ms.sym->name);
else
- ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
+ ret += repsep_snprintf(bf + ret, size - ret, "%#016llx", self->ip);
return ret;
}
@@ -231,10 +235,10 @@ sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
return strcmp(sym_l->name, sym_r->name);
}
-size_t
-sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
+static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
{
- return repsep_fprintf(fp, "%-*s", width,
+ return repsep_snprintf(bf, size, "%-*s", width,
self->parent ? self->parent->name : "[other]");
}
@@ -251,7 +255,7 @@ int sort_dimension__add(const char *tok)
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
- if (sd->entry->collapse)
+ if (sd->entry->se_collapse)
sort__need_collapse = 1;
if (sd->entry == &sort_parent) {
@@ -260,9 +264,8 @@ int sort_dimension__add(const char *tok)
char err[BUFSIZ];
regerror(ret, &parent_regex, err, sizeof(err));
- fprintf(stderr, "Invalid regex: %s\n%s",
- parent_pattern, err);
- exit(-1);
+ pr_err("Invalid regex: %s\n%s", parent_pattern, err);
+ return -EINVAL;
}
sort__has_parent = 1;
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 753f9ea99fb0..b7c54eeed9c9 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -44,18 +44,28 @@ extern enum sort_type sort__first_dimension;
struct hist_entry {
struct rb_node rb_node;
u64 count;
+ u64 count_sys;
+ u64 count_us;
+ u64 count_guest_sys;
+ u64 count_guest_us;
+
+ /*
+ * XXX WARNING!
+ * thread _has_ to come after ms, see
+ * hist_browser__selected_thread in util/newt.c
+ */
+ struct map_symbol ms;
struct thread *thread;
- struct map *map;
- struct symbol *sym;
u64 ip;
char level;
- struct symbol *parent;
- struct callchain_node callchain;
+ u8 filtered;
+ struct symbol *parent;
union {
unsigned long position;
struct hist_entry *pair;
struct rb_root sorted_chain;
};
+ struct callchain_node callchain[0];
};
enum sort_type {
@@ -73,12 +83,13 @@ enum sort_type {
struct sort_entry {
struct list_head list;
- const char *header;
+ const char *se_header;
- int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
- int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
- size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
- unsigned int *width;
+ int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *);
+ int64_t (*se_collapse)(struct hist_entry *, struct hist_entry *);
+ int (*se_snprintf)(struct hist_entry *self, char *bf, size_t size,
+ unsigned int width);
+ unsigned int *se_width;
bool elide;
};
@@ -87,7 +98,6 @@ extern struct list_head hist_entry__sort_list;
void setup_sorting(const char * const usagestr[], const struct option *opts);
-extern int repsep_fprintf(FILE *fp, const char *fmt, ...);
extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int);
extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int);
extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index a175949ed216..0409fc7c0058 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -1,48 +1,5 @@
-#include "string.h"
#include "util.h"
-
-static int hex(char ch)
-{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- if ((ch >= 'A') && (ch <= 'F'))
- return ch - 'A' + 10;
- return -1;
-}
-
-/*
- * While we find nice hex chars, build a long_val.
- * Return number of chars processed.
- */
-int hex2u64(const char *ptr, u64 *long_val)
-{
- const char *p = ptr;
- *long_val = 0;
-
- while (*p) {
- const int hex_val = hex(*p);
-
- if (hex_val < 0)
- break;
-
- *long_val = (*long_val << 4) | hex_val;
- p++;
- }
-
- return p - ptr;
-}
-
-char *strxfrchar(char *s, char from, char to)
-{
- char *p = s;
-
- while ((p = strchr(p, from)) != NULL)
- *p++ = to;
-
- return s;
-}
+#include "string.h"
#define K 1024LL
/*
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
deleted file mode 100644
index 542e44de3719..000000000000
--- a/tools/perf/util/string.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef __PERF_STRING_H_
-#define __PERF_STRING_H_
-
-#include <stdbool.h>
-#include "types.h"
-
-int hex2u64(const char *ptr, u64 *val);
-char *strxfrchar(char *s, char from, char to);
-s64 perf_atoll(const char *str);
-char **argv_split(const char *str, int *argcp);
-void argv_free(char **argv);
-bool strglobmatch(const char *str, const char *pat);
-bool strlazymatch(const char *str, const char *pat);
-
-#define _STR(x) #x
-#define STR(x) _STR(x)
-
-#endif /* __PERF_STRING_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c458c4a371d1..ecccc8df128e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,13 +1,19 @@
-#include "util.h"
-#include "../perf.h"
-#include "sort.h"
-#include "string.h"
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <libgen.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/param.h>
+#include <fcntl.h>
+#include <unistd.h>
#include "symbol.h"
-#include "thread.h"
+#include "strlist.h"
-#include "debug.h"
-
-#include <asm/bug.h>
#include <libelf.h>
#include <gelf.h>
#include <elf.h>
@@ -18,22 +24,12 @@
#define NT_GNU_BUILD_ID 3
#endif
-enum dso_origin {
- DSO__ORIG_KERNEL = 0,
- DSO__ORIG_JAVA_JIT,
- DSO__ORIG_BUILD_ID_CACHE,
- DSO__ORIG_FEDORA,
- DSO__ORIG_UBUNTU,
- DSO__ORIG_BUILDID,
- DSO__ORIG_DSO,
- DSO__ORIG_KMODULE,
- DSO__ORIG_NOT_FOUND,
-};
-
static void dsos__add(struct list_head *head, struct dso *dso);
static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
static int dso__load_kernel_sym(struct dso *self, struct map *map,
symbol_filter_t filter);
+static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
+ symbol_filter_t filter);
static int vmlinux_path__nr_entries;
static char **vmlinux_path;
@@ -126,16 +122,17 @@ static void map_groups__fixup_end(struct map_groups *self)
static struct symbol *symbol__new(u64 start, u64 len, const char *name)
{
size_t namelen = strlen(name) + 1;
- struct symbol *self = zalloc(symbol_conf.priv_size +
- sizeof(*self) + namelen);
+ struct symbol *self = calloc(1, (symbol_conf.priv_size +
+ sizeof(*self) + namelen));
if (self == NULL)
return NULL;
if (symbol_conf.priv_size)
self = ((void *)self) + symbol_conf.priv_size;
- self->start = start;
- self->end = len ? start + len - 1 : start;
+ self->start = start;
+ self->end = len ? start + len - 1 : start;
+ self->namelen = namelen - 1;
pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
@@ -178,7 +175,7 @@ static void dso__set_basename(struct dso *self)
struct dso *dso__new(const char *name)
{
- struct dso *self = zalloc(sizeof(*self) + strlen(name) + 1);
+ struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1);
if (self != NULL) {
int i;
@@ -192,6 +189,8 @@ struct dso *dso__new(const char *name)
self->loaded = 0;
self->sorted_by_name = 0;
self->has_build_id = 0;
+ self->kernel = DSO_TYPE_USER;
+ INIT_LIST_HEAD(&self->node);
}
return self;
@@ -408,12 +407,9 @@ int kallsyms__parse(const char *filename, void *arg,
char *symbol_name;
line_len = getline(&line, &n, file);
- if (line_len < 0)
+ if (line_len < 0 || !line)
break;
- if (!line)
- goto out_failure;
-
line[--line_len] = '\0'; /* \n */
len = hex2u64(line, &start);
@@ -465,6 +461,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
* map__split_kallsyms, when we have split the maps per module
*/
symbols__insert(root, sym);
+
return 0;
}
@@ -489,6 +486,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
symbol_filter_t filter)
{
struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct machine *machine = kmaps->machine;
struct map *curr_map = map;
struct symbol *pos;
int count = 0;
@@ -510,15 +508,33 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
*module++ = '\0';
if (strcmp(curr_map->dso->short_name, module)) {
- curr_map = map_groups__find_by_name(kmaps, map->type, module);
+ if (curr_map != map &&
+ self->kernel == DSO_TYPE_GUEST_KERNEL &&
+ machine__is_default_guest(machine)) {
+ /*
+ * We assume all symbols of a module are
+ * continuous in * kallsyms, so curr_map
+ * points to a module and all its
+ * symbols are in its kmap. Mark it as
+ * loaded.
+ */
+ dso__set_loaded(curr_map->dso,
+ curr_map->type);
+ }
+
+ curr_map = map_groups__find_by_name(kmaps,
+ map->type, module);
if (curr_map == NULL) {
- pr_debug("/proc/{kallsyms,modules} "
+ pr_err("%s/proc/{kallsyms,modules} "
"inconsistency while looking "
- "for \"%s\" module!\n", module);
- return -1;
+ "for \"%s\" module!\n",
+ machine->root_dir, module);
+ curr_map = map;
+ goto discard_symbol;
}
- if (curr_map->dso->loaded)
+ if (curr_map->dso->loaded &&
+ !machine__is_default_guest(machine))
goto discard_symbol;
}
/*
@@ -531,13 +547,21 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
char dso_name[PATH_MAX];
struct dso *dso;
- snprintf(dso_name, sizeof(dso_name), "[kernel].%d",
- kernel_range++);
+ if (self->kernel == DSO_TYPE_GUEST_KERNEL)
+ snprintf(dso_name, sizeof(dso_name),
+ "[guest.kernel].%d",
+ kernel_range++);
+ else
+ snprintf(dso_name, sizeof(dso_name),
+ "[kernel].%d",
+ kernel_range++);
dso = dso__new(dso_name);
if (dso == NULL)
return -1;
+ dso->kernel = self->kernel;
+
curr_map = map__new2(pos->start, dso, map->type);
if (curr_map == NULL) {
dso__delete(dso);
@@ -561,6 +585,12 @@ discard_symbol: rb_erase(&pos->rb_node, root);
}
}
+ if (curr_map != map &&
+ self->kernel == DSO_TYPE_GUEST_KERNEL &&
+ machine__is_default_guest(kmaps->machine)) {
+ dso__set_loaded(curr_map->dso, curr_map->type);
+ }
+
return count;
}
@@ -571,7 +601,10 @@ int dso__load_kallsyms(struct dso *self, const char *filename,
return -1;
symbols__fixup_end(&self->symbols[map->type]);
- self->origin = DSO__ORIG_KERNEL;
+ if (self->kernel == DSO_TYPE_GUEST_KERNEL)
+ self->origin = DSO__ORIG_GUEST_KERNEL;
+ else
+ self->origin = DSO__ORIG_KERNEL;
return dso__split_kallsyms(self, map, filter);
}
@@ -870,8 +903,8 @@ out_close:
if (err == 0)
return nr;
out:
- pr_warning("%s: problems reading %s PLT info.\n",
- __func__, self->long_name);
+ pr_debug("%s: problems reading %s PLT info.\n",
+ __func__, self->long_name);
return 0;
}
@@ -958,7 +991,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
- if (!self->kernel) {
+ if (self->kernel == DSO_TYPE_USER) {
self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
elf_section_by_name(elf, &ehdr, &shdr,
".gnu.prelink_undo",
@@ -990,7 +1023,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
section_name = elf_sec__name(&shdr, secstrs);
- if (self->kernel || kmodule) {
+ if (self->kernel != DSO_TYPE_USER || kmodule) {
char dso_name[PATH_MAX];
if (strcmp(section_name,
@@ -1017,6 +1050,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
curr_dso = dso__new(dso_name);
if (curr_dso == NULL)
goto out_elf_end;
+ curr_dso->kernel = self->kernel;
curr_map = map__new2(start, curr_dso,
map->type);
if (curr_map == NULL) {
@@ -1025,9 +1059,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
}
curr_map->map_ip = identity__map_ip;
curr_map->unmap_ip = identity__map_ip;
- curr_dso->origin = DSO__ORIG_KERNEL;
+ curr_dso->origin = self->origin;
map_groups__insert(kmap->kmaps, curr_map);
- dsos__add(&dsos__kernel, curr_dso);
+ dsos__add(&self->node, curr_dso);
dso__set_loaded(curr_dso, map->type);
} else
curr_dso = curr_map->dso;
@@ -1089,7 +1123,7 @@ static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
}
-static bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
{
bool have_build_id = false;
struct dso *pos;
@@ -1107,13 +1141,6 @@ static bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
return have_build_id;
}
-bool dsos__read_build_ids(bool with_hits)
-{
- bool kbuildids = __dsos__read_build_ids(&dsos__kernel, with_hits),
- ubuildids = __dsos__read_build_ids(&dsos__user, with_hits);
- return kbuildids || ubuildids;
-}
-
/*
* Align offset to 4 bytes as needed for note name and descriptor data.
*/
@@ -1248,6 +1275,8 @@ char dso__symtab_origin(const struct dso *self)
[DSO__ORIG_BUILDID] = 'b',
[DSO__ORIG_DSO] = 'd',
[DSO__ORIG_KMODULE] = 'K',
+ [DSO__ORIG_GUEST_KERNEL] = 'g',
+ [DSO__ORIG_GUEST_KMODULE] = 'G',
};
if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
@@ -1263,11 +1292,20 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
char build_id_hex[BUILD_ID_SIZE * 2 + 1];
int ret = -1;
int fd;
+ struct machine *machine;
+ const char *root_dir;
dso__set_loaded(self, map->type);
- if (self->kernel)
+ if (self->kernel == DSO_TYPE_KERNEL)
return dso__load_kernel_sym(self, map, filter);
+ else if (self->kernel == DSO_TYPE_GUEST_KERNEL)
+ return dso__load_guest_kernel_sym(self, map, filter);
+
+ if (map->groups && map->groups->machine)
+ machine = map->groups->machine;
+ else
+ machine = NULL;
name = malloc(size);
if (!name)
@@ -1321,6 +1359,13 @@ more:
case DSO__ORIG_DSO:
snprintf(name, size, "%s", self->long_name);
break;
+ case DSO__ORIG_GUEST_KMODULE:
+ if (map->groups && map->groups->machine)
+ root_dir = map->groups->machine->root_dir;
+ else
+ root_dir = "";
+ snprintf(name, size, "%s%s", root_dir, self->long_name);
+ break;
default:
goto out;
@@ -1374,7 +1419,8 @@ struct map *map_groups__find_by_name(struct map_groups *self,
return NULL;
}
-static int dso__kernel_module_get_build_id(struct dso *self)
+static int dso__kernel_module_get_build_id(struct dso *self,
+ const char *root_dir)
{
char filename[PATH_MAX];
/*
@@ -1384,8 +1430,8 @@ static int dso__kernel_module_get_build_id(struct dso *self)
const char *name = self->short_name + 1;
snprintf(filename, sizeof(filename),
- "/sys/module/%.*s/notes/.note.gnu.build-id",
- (int)strlen(name - 1), name);
+ "%s/sys/module/%.*s/notes/.note.gnu.build-id",
+ root_dir, (int)strlen(name) - 1, name);
if (sysfs__read_build_id(filename, self->build_id,
sizeof(self->build_id)) == 0)
@@ -1394,26 +1440,33 @@ static int dso__kernel_module_get_build_id(struct dso *self)
return 0;
}
-static int map_groups__set_modules_path_dir(struct map_groups *self, char *dirname)
+static int map_groups__set_modules_path_dir(struct map_groups *self,
+ const char *dir_name)
{
struct dirent *dent;
- DIR *dir = opendir(dirname);
+ DIR *dir = opendir(dir_name);
if (!dir) {
- pr_debug("%s: cannot open %s dir\n", __func__, dirname);
+ pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
return -1;
}
while ((dent = readdir(dir)) != NULL) {
char path[PATH_MAX];
+ struct stat st;
- if (dent->d_type == DT_DIR) {
+ /*sshfs might return bad dent->d_type, so we have to stat*/
+ sprintf(path, "%s/%s", dir_name, dent->d_name);
+ if (stat(path, &st))
+ continue;
+
+ if (S_ISDIR(st.st_mode)) {
if (!strcmp(dent->d_name, ".") ||
!strcmp(dent->d_name, ".."))
continue;
snprintf(path, sizeof(path), "%s/%s",
- dirname, dent->d_name);
+ dir_name, dent->d_name);
if (map_groups__set_modules_path_dir(self, path) < 0)
goto failure;
} else {
@@ -1433,13 +1486,13 @@ static int map_groups__set_modules_path_dir(struct map_groups *self, char *dirna
continue;
snprintf(path, sizeof(path), "%s/%s",
- dirname, dent->d_name);
+ dir_name, dent->d_name);
long_name = strdup(path);
if (long_name == NULL)
goto failure;
dso__set_long_name(map->dso, long_name);
- dso__kernel_module_get_build_id(map->dso);
+ dso__kernel_module_get_build_id(map->dso, "");
}
}
@@ -1449,18 +1502,47 @@ failure:
return -1;
}
-static int map_groups__set_modules_path(struct map_groups *self)
+static char *get_kernel_version(const char *root_dir)
{
- struct utsname uts;
+ char version[PATH_MAX];
+ FILE *file;
+ char *name, *tmp;
+ const char *prefix = "Linux version ";
+
+ sprintf(version, "%s/proc/version", root_dir);
+ file = fopen(version, "r");
+ if (!file)
+ return NULL;
+
+ version[0] = '\0';
+ tmp = fgets(version, sizeof(version), file);
+ fclose(file);
+
+ name = strstr(version, prefix);
+ if (!name)
+ return NULL;
+ name += strlen(prefix);
+ tmp = strchr(name, ' ');
+ if (tmp)
+ *tmp = '\0';
+
+ return strdup(name);
+}
+
+static int machine__set_modules_path(struct machine *self)
+{
+ char *version;
char modules_path[PATH_MAX];
- if (uname(&uts) < 0)
+ version = get_kernel_version(self->root_dir);
+ if (!version)
return -1;
- snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel",
- uts.release);
+ snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+ self->root_dir, version);
+ free(version);
- return map_groups__set_modules_path_dir(self, modules_path);
+ return map_groups__set_modules_path_dir(&self->kmaps, modules_path);
}
/*
@@ -1470,8 +1552,8 @@ static int map_groups__set_modules_path(struct map_groups *self)
*/
static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
{
- struct map *self = zalloc(sizeof(*self) +
- (dso->kernel ? sizeof(struct kmap) : 0));
+ struct map *self = calloc(1, (sizeof(*self) +
+ (dso->kernel ? sizeof(struct kmap) : 0)));
if (self != NULL) {
/*
* ->end will be filled after we load all the symbols
@@ -1482,11 +1564,11 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
return self;
}
-struct map *map_groups__new_module(struct map_groups *self, u64 start,
- const char *filename)
+struct map *machine__new_module(struct machine *self, u64 start,
+ const char *filename)
{
struct map *map;
- struct dso *dso = __dsos__findnew(&dsos__kernel, filename);
+ struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename);
if (dso == NULL)
return NULL;
@@ -1495,18 +1577,31 @@ struct map *map_groups__new_module(struct map_groups *self, u64 start,
if (map == NULL)
return NULL;
- dso->origin = DSO__ORIG_KMODULE;
- map_groups__insert(self, map);
+ if (machine__is_host(self))
+ dso->origin = DSO__ORIG_KMODULE;
+ else
+ dso->origin = DSO__ORIG_GUEST_KMODULE;
+ map_groups__insert(&self->kmaps, map);
return map;
}
-static int map_groups__create_modules(struct map_groups *self)
+static int machine__create_modules(struct machine *self)
{
char *line = NULL;
size_t n;
- FILE *file = fopen("/proc/modules", "r");
+ FILE *file;
struct map *map;
+ const char *modules;
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(self))
+ modules = symbol_conf.default_guest_modules;
+ else {
+ sprintf(path, "%s/proc/modules", self->root_dir);
+ modules = path;
+ }
+ file = fopen(modules, "r");
if (file == NULL)
return -1;
@@ -1538,16 +1633,16 @@ static int map_groups__create_modules(struct map_groups *self)
*sep = '\0';
snprintf(name, sizeof(name), "[%s]", line);
- map = map_groups__new_module(self, start, name);
+ map = machine__new_module(self, start, name);
if (map == NULL)
goto out_delete_line;
- dso__kernel_module_get_build_id(map->dso);
+ dso__kernel_module_get_build_id(map->dso, self->root_dir);
}
free(line);
fclose(file);
- return map_groups__set_modules_path(self);
+ return machine__set_modules_path(self);
out_delete_line:
free(line);
@@ -1714,8 +1809,56 @@ out_fixup:
return err;
}
-LIST_HEAD(dsos__user);
-LIST_HEAD(dsos__kernel);
+static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ int err;
+ const char *kallsyms_filename = NULL;
+ struct machine *machine;
+ char path[PATH_MAX];
+
+ if (!map->groups) {
+ pr_debug("Guest kernel map hasn't the point to groups\n");
+ return -1;
+ }
+ machine = map->groups->machine;
+
+ if (machine__is_default_guest(machine)) {
+ /*
+ * if the user specified a vmlinux filename, use it and only
+ * it, reporting errors to the user if it cannot be used.
+ * Or use file guest_kallsyms inputted by user on commandline
+ */
+ if (symbol_conf.default_guest_vmlinux_name != NULL) {
+ err = dso__load_vmlinux(self, map,
+ symbol_conf.default_guest_vmlinux_name, filter);
+ goto out_try_fixup;
+ }
+
+ kallsyms_filename = symbol_conf.default_guest_kallsyms;
+ if (!kallsyms_filename)
+ return -1;
+ } else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ kallsyms_filename = path;
+ }
+
+ err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", kallsyms_filename);
+
+out_try_fixup:
+ if (err > 0) {
+ if (kallsyms_filename != NULL) {
+ machine__mmap_name(machine, path, sizeof(path));
+ dso__set_long_name(self, strdup(path));
+ }
+ map__fixup_start(map);
+ map__fixup_end(map);
+ }
+
+ return err;
+}
static void dsos__add(struct list_head *head, struct dso *dso)
{
@@ -1747,21 +1890,32 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name)
return dso;
}
-static void __dsos__fprintf(struct list_head *head, FILE *fp)
+size_t __dsos__fprintf(struct list_head *head, FILE *fp)
{
struct dso *pos;
+ size_t ret = 0;
list_for_each_entry(pos, head, node) {
int i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- dso__fprintf(pos, i, fp);
+ ret += dso__fprintf(pos, i, fp);
}
+
+ return ret;
}
-void dsos__fprintf(FILE *fp)
+size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp)
{
- __dsos__fprintf(&dsos__kernel, fp);
- __dsos__fprintf(&dsos__user, fp);
+ struct rb_node *nd;
+ size_t ret = 0;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret += __dsos__fprintf(&pos->kernel_dsos, fp);
+ ret += __dsos__fprintf(&pos->user_dsos, fp);
+ }
+
+ return ret;
}
static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
@@ -1779,10 +1933,17 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
return ret;
}
-size_t dsos__fprintf_buildid(FILE *fp, bool with_hits)
+size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits)
{
- return (__dsos__fprintf_buildid(&dsos__kernel, fp, with_hits) +
- __dsos__fprintf_buildid(&dsos__user, fp, with_hits));
+ struct rb_node *nd;
+ size_t ret = 0;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret += __dsos__fprintf_buildid(&pos->kernel_dsos, fp, with_hits);
+ ret += __dsos__fprintf_buildid(&pos->user_dsos, fp, with_hits);
+ }
+ return ret;
}
struct dso *dso__new_kernel(const char *name)
@@ -1791,55 +1952,98 @@ struct dso *dso__new_kernel(const char *name)
if (self != NULL) {
dso__set_short_name(self, "[kernel]");
- self->kernel = 1;
+ self->kernel = DSO_TYPE_KERNEL;
}
return self;
}
-void dso__read_running_kernel_build_id(struct dso *self)
+static struct dso *dso__new_guest_kernel(struct machine *machine,
+ const char *name)
{
- if (sysfs__read_build_id("/sys/kernel/notes", self->build_id,
+ char bf[PATH_MAX];
+ struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf)));
+
+ if (self != NULL) {
+ dso__set_short_name(self, "[guest.kernel]");
+ self->kernel = DSO_TYPE_GUEST_KERNEL;
+ }
+
+ return self;
+}
+
+void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine)
+{
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(machine))
+ return;
+ sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
+ if (sysfs__read_build_id(path, self->build_id,
sizeof(self->build_id)) == 0)
self->has_build_id = true;
}
-static struct dso *dsos__create_kernel(const char *vmlinux)
+static struct dso *machine__create_kernel(struct machine *self)
{
- struct dso *kernel = dso__new_kernel(vmlinux);
+ const char *vmlinux_name = NULL;
+ struct dso *kernel;
- if (kernel != NULL) {
- dso__read_running_kernel_build_id(kernel);
- dsos__add(&dsos__kernel, kernel);
+ if (machine__is_host(self)) {
+ vmlinux_name = symbol_conf.vmlinux_name;
+ kernel = dso__new_kernel(vmlinux_name);
+ } else {
+ if (machine__is_default_guest(self))
+ vmlinux_name = symbol_conf.default_guest_vmlinux_name;
+ kernel = dso__new_guest_kernel(self, vmlinux_name);
}
+ if (kernel != NULL) {
+ dso__read_running_kernel_build_id(kernel, self);
+ dsos__add(&self->kernel_dsos, kernel);
+ }
return kernel;
}
-int __map_groups__create_kernel_maps(struct map_groups *self,
- struct map *vmlinux_maps[MAP__NR_TYPES],
- struct dso *kernel)
+int __machine__create_kernel_maps(struct machine *self, struct dso *kernel)
{
enum map_type type;
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
- vmlinux_maps[type] = map__new2(0, kernel, type);
- if (vmlinux_maps[type] == NULL)
+ self->vmlinux_maps[type] = map__new2(0, kernel, type);
+ if (self->vmlinux_maps[type] == NULL)
return -1;
- vmlinux_maps[type]->map_ip =
- vmlinux_maps[type]->unmap_ip = identity__map_ip;
+ self->vmlinux_maps[type]->map_ip =
+ self->vmlinux_maps[type]->unmap_ip = identity__map_ip;
- kmap = map__kmap(vmlinux_maps[type]);
- kmap->kmaps = self;
- map_groups__insert(self, vmlinux_maps[type]);
+ kmap = map__kmap(self->vmlinux_maps[type]);
+ kmap->kmaps = &self->kmaps;
+ map_groups__insert(&self->kmaps, self->vmlinux_maps[type]);
}
return 0;
}
+int machine__create_kernel_maps(struct machine *self)
+{
+ struct dso *kernel = machine__create_kernel(self);
+
+ if (kernel == NULL ||
+ __machine__create_kernel_maps(self, kernel) < 0)
+ return -1;
+
+ if (symbol_conf.use_modules && machine__create_modules(self) < 0)
+ pr_debug("Problems creating module maps, continuing anyway...\n");
+ /*
+ * Now that we have all the maps created, just set the ->end of them:
+ */
+ map_groups__fixup_end(&self->kmaps);
+ return 0;
+}
+
static void vmlinux_path__exit(void)
{
while (--vmlinux_path__nr_entries >= 0) {
@@ -1895,6 +2099,17 @@ out_fail:
return -1;
}
+size_t vmlinux_path__fprintf(FILE *fp)
+{
+ int i;
+ size_t printed = 0;
+
+ for (i = 0; i < vmlinux_path__nr_entries; ++i)
+ printed += fprintf(fp, "[%d] %s\n", i, vmlinux_path[i]);
+
+ return printed;
+}
+
static int setup_list(struct strlist **list, const char *list_str,
const char *list_name)
{
@@ -1945,22 +2160,129 @@ out_free_comm_list:
return -1;
}
-int map_groups__create_kernel_maps(struct map_groups *self,
- struct map *vmlinux_maps[MAP__NR_TYPES])
+int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
{
- struct dso *kernel = dsos__create_kernel(symbol_conf.vmlinux_name);
+ struct machine *machine = machines__findnew(self, pid);
- if (kernel == NULL)
+ if (machine == NULL)
return -1;
- if (__map_groups__create_kernel_maps(self, vmlinux_maps, kernel) < 0)
- return -1;
+ return machine__create_kernel_maps(machine);
+}
- if (symbol_conf.use_modules && map_groups__create_modules(self) < 0)
- pr_debug("Problems creating module maps, continuing anyway...\n");
- /*
- * Now that we have all the maps created, just set the ->end of them:
- */
- map_groups__fixup_end(self);
- return 0;
+static int hex(char ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ return -1;
+}
+
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int hex2u64(const char *ptr, u64 *long_val)
+{
+ const char *p = ptr;
+ *long_val = 0;
+
+ while (*p) {
+ const int hex_val = hex(*p);
+
+ if (hex_val < 0)
+ break;
+
+ *long_val = (*long_val << 4) | hex_val;
+ p++;
+ }
+
+ return p - ptr;
+}
+
+char *strxfrchar(char *s, char from, char to)
+{
+ char *p = s;
+
+ while ((p = strchr(p, from)) != NULL)
+ *p++ = to;
+
+ return s;
+}
+
+int machines__create_guest_kernel_maps(struct rb_root *self)
+{
+ int ret = 0;
+ struct dirent **namelist = NULL;
+ int i, items = 0;
+ char path[PATH_MAX];
+ pid_t pid;
+
+ if (symbol_conf.default_guest_vmlinux_name ||
+ symbol_conf.default_guest_modules ||
+ symbol_conf.default_guest_kallsyms) {
+ machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID);
+ }
+
+ if (symbol_conf.guestmount) {
+ items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
+ if (items <= 0)
+ return -ENOENT;
+ for (i = 0; i < items; i++) {
+ if (!isdigit(namelist[i]->d_name[0])) {
+ /* Filter out . and .. */
+ continue;
+ }
+ pid = atoi(namelist[i]->d_name);
+ sprintf(path, "%s/%s/proc/kallsyms",
+ symbol_conf.guestmount,
+ namelist[i]->d_name);
+ ret = access(path, R_OK);
+ if (ret) {
+ pr_debug("Can't access file %s\n", path);
+ goto failure;
+ }
+ machines__create_kernel_maps(self, pid);
+ }
+failure:
+ free(namelist);
+ }
+
+ return ret;
+}
+
+int machine__load_kallsyms(struct machine *self, const char *filename,
+ enum map_type type, symbol_filter_t filter)
+{
+ struct map *map = self->vmlinux_maps[type];
+ int ret = dso__load_kallsyms(map->dso, filename, map, filter);
+
+ if (ret > 0) {
+ dso__set_loaded(map->dso, type);
+ /*
+ * Since /proc/kallsyms will have multiple sessions for the
+ * kernel, with modules between them, fixup the end of all
+ * sections.
+ */
+ __map_groups__fixup_end(&self->kmaps, type);
+ }
+
+ return ret;
+}
+
+int machine__load_vmlinux_path(struct machine *self, enum map_type type,
+ symbol_filter_t filter)
+{
+ struct map *map = self->vmlinux_maps[type];
+ int ret = dso__load_vmlinux_path(map->dso, map, filter);
+
+ if (ret > 0) {
+ dso__set_loaded(map->dso, type);
+ map__reloc_vmlinux(map);
+ }
+
+ return ret;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index f30a37428919..6389d1acaf81 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -3,10 +3,11 @@
#include <linux/types.h>
#include <stdbool.h>
-#include "types.h"
+#include <stdint.h>
+#include "map.h"
#include <linux/list.h>
#include <linux/rbtree.h>
-#include "event.h"
+#include <stdio.h>
#define DEBUG_CACHE_DIR ".debug"
@@ -29,6 +30,9 @@ static inline char *bfd_demangle(void __used *v, const char __used *c,
#endif
#endif
+int hex2u64(const char *ptr, u64 *val);
+char *strxfrchar(char *s, char from, char to);
+
/*
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
@@ -44,10 +48,13 @@ static inline char *bfd_demangle(void __used *v, const char __used *c,
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
#endif
+#define BUILD_ID_SIZE 20
+
struct symbol {
struct rb_node rb_node;
u64 start;
u64 end;
+ u16 namelen;
char name[0];
};
@@ -63,10 +70,15 @@ struct symbol_conf {
show_nr_samples,
use_callchain,
exclude_other,
- full_paths;
+ full_paths,
+ show_cpu_utilization;
const char *vmlinux_name,
*field_sep;
- char *dso_list_str,
+ const char *default_guest_vmlinux_name,
+ *default_guest_kallsyms,
+ *default_guest_modules;
+ const char *guestmount;
+ char *dso_list_str,
*comm_list_str,
*sym_list_str,
*col_width_list_str;
@@ -88,6 +100,11 @@ struct ref_reloc_sym {
u64 unrelocated_addr;
};
+struct map_symbol {
+ struct map *map;
+ struct symbol *sym;
+};
+
struct addr_location {
struct thread *thread;
struct map *map;
@@ -95,6 +112,13 @@ struct addr_location {
u64 addr;
char level;
bool filtered;
+ unsigned int cpumode;
+};
+
+enum dso_kernel_type {
+ DSO_TYPE_USER = 0,
+ DSO_TYPE_KERNEL,
+ DSO_TYPE_GUEST_KERNEL
};
struct dso {
@@ -104,8 +128,9 @@ struct dso {
u8 adjust_symbols:1;
u8 slen_calculated:1;
u8 has_build_id:1;
- u8 kernel:1;
+ enum dso_kernel_type kernel;
u8 hit:1;
+ u8 annotate_warned:1;
unsigned char origin;
u8 sorted_by_name;
u8 loaded;
@@ -131,42 +156,65 @@ static inline void dso__set_loaded(struct dso *self, enum map_type type)
void dso__sort_by_name(struct dso *self, enum map_type type);
-extern struct list_head dsos__user, dsos__kernel;
-
struct dso *__dsos__findnew(struct list_head *head, const char *name);
-static inline struct dso *dsos__findnew(const char *name)
-{
- return __dsos__findnew(&dsos__user, name);
-}
-
int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
int dso__load_vmlinux_path(struct dso *self, struct map *map,
symbol_filter_t filter);
int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map,
symbol_filter_t filter);
-void dsos__fprintf(FILE *fp);
-size_t dsos__fprintf_buildid(FILE *fp, bool with_hits);
+int machine__load_kallsyms(struct machine *self, const char *filename,
+ enum map_type type, symbol_filter_t filter);
+int machine__load_vmlinux_path(struct machine *self, enum map_type type,
+ symbol_filter_t filter);
+
+size_t __dsos__fprintf(struct list_head *head, FILE *fp);
+
+size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp);
+size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits);
size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
+
+enum dso_origin {
+ DSO__ORIG_KERNEL = 0,
+ DSO__ORIG_GUEST_KERNEL,
+ DSO__ORIG_JAVA_JIT,
+ DSO__ORIG_BUILD_ID_CACHE,
+ DSO__ORIG_FEDORA,
+ DSO__ORIG_UBUNTU,
+ DSO__ORIG_BUILDID,
+ DSO__ORIG_DSO,
+ DSO__ORIG_GUEST_KMODULE,
+ DSO__ORIG_KMODULE,
+ DSO__ORIG_NOT_FOUND,
+};
+
char dso__symtab_origin(const struct dso *self);
void dso__set_long_name(struct dso *self, char *name);
void dso__set_build_id(struct dso *self, void *build_id);
-void dso__read_running_kernel_build_id(struct dso *self);
+void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine);
struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
const char *name);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
-bool dsos__read_build_ids(bool with_hits);
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
int build_id__sprintf(const u8 *self, int len, char *bf);
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
char type, u64 start));
+int __machine__create_kernel_maps(struct machine *self, struct dso *kernel);
+int machine__create_kernel_maps(struct machine *self);
+
+int machines__create_kernel_maps(struct rb_root *self, pid_t pid);
+int machines__create_guest_kernel_maps(struct rb_root *self);
+
int symbol__init(void);
bool symbol_type__is_a(char symbol_type, enum map_type map_type);
+size_t vmlinux_path__fprintf(FILE *fp);
+
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index fa968312ee7d..1f7ecd47f499 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -7,13 +7,35 @@
#include "util.h"
#include "debug.h"
-void map_groups__init(struct map_groups *self)
+int find_all_tid(int pid, pid_t ** all_tid)
{
+ char name[256];
+ int items;
+ struct dirent **namelist = NULL;
+ int ret = 0;
int i;
- for (i = 0; i < MAP__NR_TYPES; ++i) {
- self->maps[i] = RB_ROOT;
- INIT_LIST_HEAD(&self->removed_maps[i]);
+
+ sprintf(name, "/proc/%d/task", pid);
+ items = scandir(name, &namelist, NULL, NULL);
+ if (items <= 0)
+ return -ENOENT;
+ *all_tid = malloc(sizeof(pid_t) * items);
+ if (!*all_tid) {
+ ret = -ENOMEM;
+ goto failure;
}
+
+ for (i = 0; i < items; i++)
+ (*all_tid)[i] = atoi(namelist[i]->d_name);
+
+ ret = items;
+
+failure:
+ for (i=0; i<items; i++)
+ free(namelist[i]);
+ free(namelist);
+
+ return ret;
}
static struct thread *thread__new(pid_t pid)
@@ -31,28 +53,6 @@ static struct thread *thread__new(pid_t pid)
return self;
}
-static void map_groups__flush(struct map_groups *self)
-{
- int type;
-
- for (type = 0; type < MAP__NR_TYPES; type++) {
- struct rb_root *root = &self->maps[type];
- struct rb_node *next = rb_first(root);
-
- while (next) {
- struct map *pos = rb_entry(next, struct map, rb_node);
- next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, root);
- /*
- * We may have references to this map, for
- * instance in some hist_entry instances, so
- * just move them to a separate list.
- */
- list_add_tail(&pos->node, &self->removed_maps[pos->type]);
- }
- }
-}
-
int thread__set_comm(struct thread *self, const char *comm)
{
int err;
@@ -79,69 +79,10 @@ int thread__comm_len(struct thread *self)
return self->comm_len;
}
-size_t __map_groups__fprintf_maps(struct map_groups *self,
- enum map_type type, FILE *fp)
-{
- size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
- struct rb_node *nd;
-
- for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
- struct map *pos = rb_entry(nd, struct map, rb_node);
- printed += fprintf(fp, "Map:");
- printed += map__fprintf(pos, fp);
- if (verbose > 2) {
- printed += dso__fprintf(pos->dso, type, fp);
- printed += fprintf(fp, "--\n");
- }
- }
-
- return printed;
-}
-
-size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
-{
- size_t printed = 0, i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __map_groups__fprintf_maps(self, i, fp);
- return printed;
-}
-
-static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
- enum map_type type, FILE *fp)
-{
- struct map *pos;
- size_t printed = 0;
-
- list_for_each_entry(pos, &self->removed_maps[type], node) {
- printed += fprintf(fp, "Map:");
- printed += map__fprintf(pos, fp);
- if (verbose > 1) {
- printed += dso__fprintf(pos->dso, type, fp);
- printed += fprintf(fp, "--\n");
- }
- }
- return printed;
-}
-
-static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
-{
- size_t printed = 0, i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __map_groups__fprintf_removed_maps(self, i, fp);
- return printed;
-}
-
-static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
-{
- size_t printed = map_groups__fprintf_maps(self, fp);
- printed += fprintf(fp, "Removed maps:\n");
- return printed + map_groups__fprintf_removed_maps(self, fp);
-}
-
static size_t thread__fprintf(struct thread *self, FILE *fp)
{
return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
- map_groups__fprintf(&self->mg, fp);
+ map_groups__fprintf(&self->mg, verbose, fp);
}
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
@@ -183,127 +124,12 @@ struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
return th;
}
-static int map_groups__fixup_overlappings(struct map_groups *self,
- struct map *map)
-{
- struct rb_root *root = &self->maps[map->type];
- struct rb_node *next = rb_first(root);
-
- while (next) {
- struct map *pos = rb_entry(next, struct map, rb_node);
- next = rb_next(&pos->rb_node);
-
- if (!map__overlap(pos, map))
- continue;
-
- if (verbose >= 2) {
- fputs("overlapping maps:\n", stderr);
- map__fprintf(map, stderr);
- map__fprintf(pos, stderr);
- }
-
- rb_erase(&pos->rb_node, root);
- /*
- * We may have references to this map, for instance in some
- * hist_entry instances, so just move them to a separate
- * list.
- */
- list_add_tail(&pos->node, &self->removed_maps[map->type]);
- /*
- * Now check if we need to create new maps for areas not
- * overlapped by the new map:
- */
- if (map->start > pos->start) {
- struct map *before = map__clone(pos);
-
- if (before == NULL)
- return -ENOMEM;
-
- before->end = map->start - 1;
- map_groups__insert(self, before);
- if (verbose >= 2)
- map__fprintf(before, stderr);
- }
-
- if (map->end < pos->end) {
- struct map *after = map__clone(pos);
-
- if (after == NULL)
- return -ENOMEM;
-
- after->start = map->end + 1;
- map_groups__insert(self, after);
- if (verbose >= 2)
- map__fprintf(after, stderr);
- }
- }
-
- return 0;
-}
-
-void maps__insert(struct rb_root *maps, struct map *map)
-{
- struct rb_node **p = &maps->rb_node;
- struct rb_node *parent = NULL;
- const u64 ip = map->start;
- struct map *m;
-
- while (*p != NULL) {
- parent = *p;
- m = rb_entry(parent, struct map, rb_node);
- if (ip < m->start)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&map->rb_node, parent, p);
- rb_insert_color(&map->rb_node, maps);
-}
-
-struct map *maps__find(struct rb_root *maps, u64 ip)
-{
- struct rb_node **p = &maps->rb_node;
- struct rb_node *parent = NULL;
- struct map *m;
-
- while (*p != NULL) {
- parent = *p;
- m = rb_entry(parent, struct map, rb_node);
- if (ip < m->start)
- p = &(*p)->rb_left;
- else if (ip > m->end)
- p = &(*p)->rb_right;
- else
- return m;
- }
-
- return NULL;
-}
-
void thread__insert_map(struct thread *self, struct map *map)
{
- map_groups__fixup_overlappings(&self->mg, map);
+ map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
map_groups__insert(&self->mg, map);
}
-/*
- * XXX This should not really _copy_ te maps, but refcount them.
- */
-static int map_groups__clone(struct map_groups *self,
- struct map_groups *parent, enum map_type type)
-{
- struct rb_node *nd;
- for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
- struct map *map = rb_entry(nd, struct map, rb_node);
- struct map *new = map__clone(map);
- if (new == NULL)
- return -ENOMEM;
- map_groups__insert(self, new);
- }
- return 0;
-}
-
int thread__fork(struct thread *self, struct thread *parent)
{
int i;
@@ -336,15 +162,3 @@ size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
return ret;
}
-
-struct symbol *map_groups__find_symbol(struct map_groups *self,
- enum map_type type, u64 addr,
- symbol_filter_t filter)
-{
- struct map *map = map_groups__find(self, type, addr);
-
- if (map != NULL)
- return map__find_symbol(map, map->map_ip(map, addr), filter);
-
- return NULL;
-}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index dcf70303e58e..1dfd9ff8bdcd 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -5,14 +5,6 @@
#include <unistd.h>
#include "symbol.h"
-struct map_groups {
- struct rb_root maps[MAP__NR_TYPES];
- struct list_head removed_maps[MAP__NR_TYPES];
-};
-
-size_t __map_groups__fprintf_maps(struct map_groups *self,
- enum map_type type, FILE *fp);
-
struct thread {
struct rb_node rb_node;
struct map_groups mg;
@@ -23,29 +15,16 @@ struct thread {
int comm_len;
};
-void map_groups__init(struct map_groups *self);
+struct perf_session;
+
+int find_all_tid(int pid, pid_t ** all_tid);
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
void thread__insert_map(struct thread *self, struct map *map);
int thread__fork(struct thread *self, struct thread *parent);
-size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp);
size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
-void maps__insert(struct rb_root *maps, struct map *map);
-struct map *maps__find(struct rb_root *maps, u64 addr);
-
-static inline void map_groups__insert(struct map_groups *self, struct map *map)
-{
- maps__insert(&self->maps[map->type], map);
-}
-
-static inline struct map *map_groups__find(struct map_groups *self,
- enum map_type type, u64 addr)
-{
- return maps__find(&self->maps[type], addr);
-}
-
static inline struct map *thread__find_map(struct thread *self,
enum map_type type, u64 addr)
{
@@ -54,34 +33,12 @@ static inline struct map *thread__find_map(struct thread *self,
void thread__find_addr_map(struct thread *self,
struct perf_session *session, u8 cpumode,
- enum map_type type, u64 addr,
+ enum map_type type, pid_t pid, u64 addr,
struct addr_location *al);
void thread__find_addr_location(struct thread *self,
struct perf_session *session, u8 cpumode,
- enum map_type type, u64 addr,
+ enum map_type type, pid_t pid, u64 addr,
struct addr_location *al,
symbol_filter_t filter);
-struct symbol *map_groups__find_symbol(struct map_groups *self,
- enum map_type type, u64 addr,
- symbol_filter_t filter);
-
-static inline struct symbol *map_groups__find_function(struct map_groups *self,
- u64 addr,
- symbol_filter_t filter)
-{
- return map_groups__find_symbol(self, MAP__FUNCTION, addr, filter);
-}
-
-struct map *map_groups__find_by_name(struct map_groups *self,
- enum map_type type, const char *name);
-
-int __map_groups__create_kernel_maps(struct map_groups *self,
- struct map *vmlinux_maps[MAP__NR_TYPES],
- struct dso *kernel);
-int map_groups__create_kernel_maps(struct map_groups *self,
- struct map *vmlinux_maps[MAP__NR_TYPES]);
-
-struct map *map_groups__new_module(struct map_groups *self, u64 start,
- const char *filename);
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 5ea8973ad331..b1572601286c 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -154,10 +154,17 @@ static void put_tracing_file(char *file)
free(file);
}
+static ssize_t calc_data_size;
+
static ssize_t write_or_die(const void *buf, size_t len)
{
int ret;
+ if (calc_data_size) {
+ calc_data_size += len;
+ return len;
+ }
+
ret = write(output_fd, buf, len);
if (ret < 0)
die("writing to '%s'", output_file);
@@ -480,6 +487,17 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
return nr_tracepoints > 0 ? path.next : NULL;
}
+bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events)
+{
+ int i;
+
+ for (i = 0; i < nb_events; i++)
+ if (pattrs[i].type == PERF_TYPE_TRACEPOINT)
+ return true;
+
+ return false;
+}
+
int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
{
char buf[BUFSIZ];
@@ -526,3 +544,20 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
return 0;
}
+
+ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs,
+ int nb_events)
+{
+ ssize_t size;
+ int err = 0;
+
+ calc_data_size = 1;
+ err = read_tracing_data(fd, pattrs, nb_events);
+ size = calc_data_size - 1;
+ calc_data_size = 0;
+
+ if (err < 0)
+ return err;
+
+ return size;
+}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 9b3c20f42f98..73a02223c629 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -37,10 +37,12 @@ int header_page_ts_offset;
int header_page_ts_size;
int header_page_size_offset;
int header_page_size_size;
+int header_page_overwrite_offset;
+int header_page_overwrite_size;
int header_page_data_offset;
int header_page_data_size;
-int latency_format;
+bool latency_format;
static char *input_buf;
static unsigned long long input_buf_ptr;
@@ -628,23 +630,32 @@ static int test_type(enum event_type type, enum event_type expect)
return 0;
}
-static int test_type_token(enum event_type type, char *token,
- enum event_type expect, const char *expect_tok)
+static int __test_type_token(enum event_type type, char *token,
+ enum event_type expect, const char *expect_tok,
+ bool warn)
{
if (type != expect) {
- warning("Error: expected type %d but read %d",
- expect, type);
+ if (warn)
+ warning("Error: expected type %d but read %d",
+ expect, type);
return -1;
}
if (strcmp(token, expect_tok) != 0) {
- warning("Error: expected '%s' but read '%s'",
- expect_tok, token);
+ if (warn)
+ warning("Error: expected '%s' but read '%s'",
+ expect_tok, token);
return -1;
}
return 0;
}
+static int test_type_token(enum event_type type, char *token,
+ enum event_type expect, const char *expect_tok)
+{
+ return __test_type_token(type, token, expect, expect_tok, true);
+}
+
static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
{
enum event_type type;
@@ -661,7 +672,8 @@ static int read_expect_type(enum event_type expect, char **tok)
return __read_expect_type(expect, tok, 1);
}
-static int __read_expected(enum event_type expect, const char *str, int newline_ok)
+static int __read_expected(enum event_type expect, const char *str,
+ int newline_ok, bool warn)
{
enum event_type type;
char *token;
@@ -672,7 +684,7 @@ static int __read_expected(enum event_type expect, const char *str, int newline_
else
type = read_token_item(&token);
- ret = test_type_token(type, token, expect, str);
+ ret = __test_type_token(type, token, expect, str, warn);
free_token(token);
@@ -681,12 +693,12 @@ static int __read_expected(enum event_type expect, const char *str, int newline_
static int read_expected(enum event_type expect, const char *str)
{
- return __read_expected(expect, str, 1);
+ return __read_expected(expect, str, 1, true);
}
static int read_expected_item(enum event_type expect, const char *str)
{
- return __read_expected(expect, str, 0);
+ return __read_expected(expect, str, 0, true);
}
static char *event_read_name(void)
@@ -744,7 +756,7 @@ static int field_is_string(struct format_field *field)
static int field_is_dynamic(struct format_field *field)
{
- if (!strcmp(field->type, "__data_loc"))
+ if (!strncmp(field->type, "__data_loc", 10))
return 1;
return 0;
@@ -1925,7 +1937,7 @@ void *raw_field_ptr(struct event *event, const char *name, void *data)
if (!field)
return NULL;
- if (field->flags & FIELD_IS_STRING) {
+ if (field->flags & FIELD_IS_DYNAMIC) {
int offset;
offset = *(int *)(data + field->offset);
@@ -3087,88 +3099,6 @@ static void print_args(struct print_arg *args)
}
}
-static void parse_header_field(const char *field,
- int *offset, int *size)
-{
- char *token;
- int type;
-
- if (read_expected(EVENT_ITEM, "field") < 0)
- return;
- if (read_expected(EVENT_OP, ":") < 0)
- return;
-
- /* type */
- if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
- free_token(token);
-
- if (read_expected(EVENT_ITEM, field) < 0)
- return;
- if (read_expected(EVENT_OP, ";") < 0)
- return;
- if (read_expected(EVENT_ITEM, "offset") < 0)
- return;
- if (read_expected(EVENT_OP, ":") < 0)
- return;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
- *offset = atoi(token);
- free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
- return;
- if (read_expected(EVENT_ITEM, "size") < 0)
- return;
- if (read_expected(EVENT_OP, ":") < 0)
- return;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
- *size = atoi(token);
- free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
- return;
- type = read_token(&token);
- if (type != EVENT_NEWLINE) {
- /* newer versions of the kernel have a "signed" type */
- if (type != EVENT_ITEM)
- goto fail;
-
- if (strcmp(token, "signed") != 0)
- goto fail;
-
- free_token(token);
-
- if (read_expected(EVENT_OP, ":") < 0)
- return;
-
- if (read_expect_type(EVENT_ITEM, &token))
- goto fail;
-
- free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
- return;
-
- if (read_expect_type(EVENT_NEWLINE, &token))
- goto fail;
- }
- fail:
- free_token(token);
-}
-
-int parse_header_page(char *buf, unsigned long size)
-{
- init_input_buf(buf, size);
-
- parse_header_field("timestamp", &header_page_ts_offset,
- &header_page_ts_size);
- parse_header_field("commit", &header_page_size_offset,
- &header_page_size_size);
- parse_header_field("data", &header_page_data_offset,
- &header_page_data_size);
-
- return 0;
-}
-
int parse_ftrace_file(char *buf, unsigned long size)
{
struct format_field *field;
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 7cd1193918c7..cb54cd002f49 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -50,14 +50,51 @@ static int long_size;
static unsigned long page_size;
+static ssize_t calc_data_size;
+static bool repipe;
+
+/* If it fails, the next read will report it */
+static void skip(int size)
+{
+ lseek(input_fd, size, SEEK_CUR);
+}
+
+static int do_read(int fd, void *buf, int size)
+{
+ int rsize = size;
+
+ while (size) {
+ int ret = read(fd, buf, size);
+
+ if (ret <= 0)
+ return -1;
+
+ if (repipe) {
+ int retw = write(STDOUT_FILENO, buf, ret);
+
+ if (retw <= 0 || retw != ret)
+ die("repiping input file");
+ }
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return rsize;
+}
+
static int read_or_die(void *data, int size)
{
int r;
- r = read(input_fd, data, size);
- if (r != size)
+ r = do_read(input_fd, data, size);
+ if (r <= 0)
die("reading input file (size expected=%d received=%d)",
size, r);
+
+ if (calc_data_size)
+ calc_data_size += r;
+
return r;
}
@@ -82,57 +119,36 @@ static char *read_string(void)
char buf[BUFSIZ];
char *str = NULL;
int size = 0;
- int i;
off_t r;
+ char c;
for (;;) {
- r = read(input_fd, buf, BUFSIZ);
+ r = read(input_fd, &c, 1);
if (r < 0)
die("reading input file");
if (!r)
die("no data");
- for (i = 0; i < r; i++) {
- if (!buf[i])
- break;
- }
- if (i < r)
- break;
+ if (repipe) {
+ int retw = write(STDOUT_FILENO, &c, 1);
- if (str) {
- size += BUFSIZ;
- str = realloc(str, size);
- if (!str)
- die("malloc of size %d", size);
- memcpy(str + (size - BUFSIZ), buf, BUFSIZ);
- } else {
- size = BUFSIZ;
- str = malloc_or_die(size);
- memcpy(str, buf, size);
+ if (retw <= 0 || retw != r)
+ die("repiping input file string");
}
- }
- /* trailing \0: */
- i++;
-
- /* move the file descriptor to the end of the string */
- r = lseek(input_fd, -(r - i), SEEK_CUR);
- if (r == (off_t)-1)
- die("lseek");
-
- if (str) {
- size += i;
- str = realloc(str, size);
- if (!str)
- die("malloc of size %d", size);
- memcpy(str + (size - i), buf, i);
- } else {
- size = i;
- str = malloc_or_die(i);
- memcpy(str, buf, i);
+ buf[size++] = c;
+
+ if (!c)
+ break;
}
+ if (calc_data_size)
+ calc_data_size += size;
+
+ str = malloc_or_die(size);
+ memcpy(str, buf, size);
+
return str;
}
@@ -174,7 +190,6 @@ static void read_ftrace_printk(void)
static void read_header_files(void)
{
unsigned long long size;
- char *header_page;
char *header_event;
char buf[BUFSIZ];
@@ -184,10 +199,7 @@ static void read_header_files(void)
die("did not read header page");
size = read8();
- header_page = malloc_or_die(size);
- read_or_die(header_page, size);
- parse_header_page(header_page, size);
- free(header_page);
+ skip(size);
/*
* The size field in the page is of type long,
@@ -459,7 +471,7 @@ struct record *trace_read_data(int cpu)
return data;
}
-void trace_report(int fd)
+ssize_t trace_report(int fd, bool __repipe)
{
char buf[BUFSIZ];
char test[] = { 23, 8, 68 };
@@ -467,6 +479,10 @@ void trace_report(int fd)
int show_version = 0;
int show_funcs = 0;
int show_printk = 0;
+ ssize_t size;
+
+ calc_data_size = 1;
+ repipe = __repipe;
input_fd = fd;
@@ -499,14 +515,18 @@ void trace_report(int fd)
read_proc_kallsyms();
read_ftrace_printk();
+ size = calc_data_size - 1;
+ calc_data_size = 0;
+ repipe = false;
+
if (show_funcs) {
print_funcs();
- return;
+ return size;
}
if (show_printk) {
print_printk();
- return;
+ return size;
}
- return;
+ return size;
}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index c3269b937db4..406d452956db 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,6 +1,7 @@
#ifndef __PERF_TRACE_EVENTS_H
#define __PERF_TRACE_EVENTS_H
+#include <stdbool.h>
#include "parse-events.h"
#define __unused __attribute__((unused))
@@ -162,7 +163,7 @@ struct record *trace_read_data(int cpu);
void parse_set_info(int nr_cpus, int long_sz);
-void trace_report(int fd);
+ssize_t trace_report(int fd, bool repipe);
void *malloc_or_die(unsigned int size);
@@ -241,9 +242,8 @@ extern int header_page_size_size;
extern int header_page_data_offset;
extern int header_page_data_size;
-extern int latency_format;
+extern bool latency_format;
-int parse_header_page(char *buf, unsigned long size);
int trace_parse_common_type(void *data);
int trace_parse_common_pid(void *data);
int parse_common_pc(void *data);
@@ -258,6 +258,8 @@ void *raw_field_ptr(struct event *event, const char *name, void *data);
unsigned long long eval_flag(const char *flag);
int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events);
+ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs,
+ int nb_events);
/* taken from kernel/trace/trace.h */
enum trace_flag_type {
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 0f5b2a6f1080..fbf45d1b26f7 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -42,12 +42,14 @@
#define _ALL_SOURCE 1
#define _GNU_SOURCE 1
#define _BSD_SOURCE 1
+#define HAS_BOOL
#include <unistd.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <fcntl.h>
+#include <stdbool.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdarg.h>
@@ -78,6 +80,7 @@
#include <pwd.h>
#include <inttypes.h>
#include "../../../include/linux/magic.h"
+#include "types.h"
#ifndef NO_ICONV
@@ -295,6 +298,13 @@ extern void *xmemdupz(const void *data, size_t len);
extern char *xstrndup(const char *str, size_t len);
extern void *xrealloc(void *ptr, size_t size) __attribute__((weak));
+static inline void *xzalloc(size_t size)
+{
+ void *buf = xmalloc(size);
+
+ return memset(buf, 0, size);
+}
+
static inline void *zalloc(size_t size)
{
return calloc(1, size);
@@ -309,6 +319,7 @@ static inline int has_extension(const char *filename, const char *ext)
{
size_t len = strlen(filename);
size_t extlen = strlen(ext);
+
return len > extlen && !memcmp(filename + len - extlen, ext, extlen);
}
@@ -322,6 +333,7 @@ static inline int has_extension(const char *filename, const char *ext)
#undef isalnum
#undef tolower
#undef toupper
+
extern unsigned char sane_ctype[256];
#define GIT_SPACE 0x01
#define GIT_DIGIT 0x02
@@ -406,4 +418,13 @@ void git_qsort(void *base, size_t nmemb, size_t size,
int mkdir_p(char *path, mode_t mode);
int copyfile(const char *from, const char *to);
+s64 perf_atoll(const char *str);
+char **argv_split(const char *str, int *argcp);
+void argv_free(char **argv);
+bool strglobmatch(const char *str, const char *pat);
+bool strlazymatch(const char *str, const char *pat);
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
#endif
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 02ff2b19dbe2..4d10b1e047f4 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -316,12 +316,16 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
kvm_assigned_dev_intr, 0,
"kvm_assigned_msix_device",
(void *)dev);
- /* FIXME: free requested_irq's on failure */
if (r)
- return r;
+ goto err;
}
return 0;
+err:
+ for (i -= 1; i >= 0; i--)
+ free_irq(dev->host_msix_entries[i].vector, (void *)dev);
+ pci_disable_msix(dev->dev);
+ return r;
}
#endif
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 36e258029649..53850177163f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -120,8 +120,10 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
return ret;
out_free_dev:
+ kvm->coalesced_mmio_dev = NULL;
kfree(dev);
out_free_page:
+ kvm->coalesced_mmio_ring = NULL;
__free_page(page);
out_err:
return ret;
@@ -139,7 +141,7 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
if (dev == NULL)
- return -EINVAL;
+ return -ENXIO;
mutex_lock(&kvm->slots_lock);
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
@@ -162,7 +164,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *z;
if (dev == NULL)
- return -EINVAL;
+ return -ENXIO;
mutex_lock(&kvm->slots_lock);
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 03a5eb22da2b..7c79c1d76d0c 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -197,7 +197,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
union kvm_ioapic_redirect_entry entry;
int ret = 1;
- mutex_lock(&ioapic->lock);
+ spin_lock(&ioapic->lock);
if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
entry = ioapic->redirtbl[irq];
level ^= entry.fields.polarity;
@@ -214,7 +214,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
}
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
}
- mutex_unlock(&ioapic->lock);
+ spin_unlock(&ioapic->lock);
return ret;
}
@@ -238,9 +238,9 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
* is dropped it will be put into irr and will be delivered
* after ack notifier returns.
*/
- mutex_unlock(&ioapic->lock);
+ spin_unlock(&ioapic->lock);
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
- mutex_lock(&ioapic->lock);
+ spin_lock(&ioapic->lock);
if (trigger_mode != IOAPIC_LEVEL_TRIG)
continue;
@@ -259,9 +259,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
smp_rmb();
if (!test_bit(vector, ioapic->handled_vectors))
return;
- mutex_lock(&ioapic->lock);
+ spin_lock(&ioapic->lock);
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
- mutex_unlock(&ioapic->lock);
+ spin_unlock(&ioapic->lock);
}
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
@@ -287,7 +287,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
ASSERT(!(addr & 0xf)); /* check alignment */
addr &= 0xff;
- mutex_lock(&ioapic->lock);
+ spin_lock(&ioapic->lock);
switch (addr) {
case IOAPIC_REG_SELECT:
result = ioapic->ioregsel;
@@ -301,7 +301,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
result = 0;
break;
}
- mutex_unlock(&ioapic->lock);
+ spin_unlock(&ioapic->lock);
switch (len) {
case 8:
@@ -338,7 +338,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
}
addr &= 0xff;
- mutex_lock(&ioapic->lock);
+ spin_lock(&ioapic->lock);
switch (addr) {
case IOAPIC_REG_SELECT:
ioapic->ioregsel = data;
@@ -356,7 +356,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
default:
break;
}
- mutex_unlock(&ioapic->lock);
+ spin_unlock(&ioapic->lock);
return 0;
}
@@ -386,7 +386,7 @@ int kvm_ioapic_init(struct kvm *kvm)
ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
if (!ioapic)
return -ENOMEM;
- mutex_init(&ioapic->lock);
+ spin_lock_init(&ioapic->lock);
kvm->arch.vioapic = ioapic;
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
@@ -419,9 +419,9 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
if (!ioapic)
return -EINVAL;
- mutex_lock(&ioapic->lock);
+ spin_lock(&ioapic->lock);
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
- mutex_unlock(&ioapic->lock);
+ spin_unlock(&ioapic->lock);
return 0;
}
@@ -431,9 +431,9 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
if (!ioapic)
return -EINVAL;
- mutex_lock(&ioapic->lock);
+ spin_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
update_handled_vectors(ioapic);
- mutex_unlock(&ioapic->lock);
+ spin_unlock(&ioapic->lock);
return 0;
}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 8a751b78a430..0b190c34ccc3 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -45,7 +45,7 @@ struct kvm_ioapic {
struct kvm_io_device dev;
struct kvm *kvm;
void (*ack_notifier)(void *opaque, int irq);
- struct mutex lock;
+ spinlock_t lock;
DECLARE_BITMAP(handled_vectors, 256);
};
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 80fd3ad3b2de..37ca71ebdba8 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -78,7 +78,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
int i, r = 0;
struct kvm_memslots *slots;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) {
r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
@@ -217,7 +217,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
int i;
struct kvm_memslots *slots;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) {
kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c82ae2492634..f032806a212f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -422,9 +422,6 @@ static struct kvm *kvm_create_vm(void)
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- kvm_coalesced_mmio_init(kvm);
-#endif
out:
return kvm;
@@ -560,6 +557,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
+ r = -EINVAL;
+ if (npages > KVM_MEM_MAX_NR_PAGES)
+ goto out;
+
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
@@ -833,7 +834,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{
int i;
- struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
+ struct kvm_memslots *slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -855,7 +856,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
- struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
+ struct kvm_memslots *slots = kvm_memslots(kvm);
gfn = unalias_gfn_instantiation(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
@@ -899,7 +900,7 @@ out:
int memslot_id(struct kvm *kvm, gfn_t gfn)
{
int i;
- struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
+ struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *memslot = NULL;
gfn = unalias_gfn(kvm, gfn);
@@ -914,6 +915,11 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
return memslot - slots->memslots;
}
+static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+}
+
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
@@ -922,7 +928,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
slot = gfn_to_memslot_unaliased(kvm, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
- return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
+ return gfn_to_hva_memslot(slot, gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
@@ -972,11 +978,6 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);
-static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
-{
- return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
-}
-
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn)
{
@@ -1190,13 +1191,8 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
- unsigned long *p = memslot->dirty_bitmap +
- rel_gfn / BITS_PER_LONG;
- int offset = rel_gfn % BITS_PER_LONG;
- /* avoid RMW */
- if (!generic_test_le_bit(offset, p))
- generic___set_le_bit(offset, p);
+ generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
}
}
@@ -1609,7 +1605,6 @@ static long kvm_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
- r = -ENXIO;
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
if (r)
goto out;
@@ -1621,7 +1616,6 @@ static long kvm_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
- r = -ENXIO;
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
if (r)
goto out;
@@ -1755,12 +1749,19 @@ static struct file_operations kvm_vm_fops = {
static int kvm_dev_ioctl_create_vm(void)
{
- int fd;
+ int fd, r;
struct kvm *kvm;
kvm = kvm_create_vm();
if (IS_ERR(kvm))
return PTR_ERR(kvm);
+#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+ r = kvm_coalesced_mmio_init(kvm);
+ if (r < 0) {
+ kvm_put_kvm(kvm);
+ return r;
+ }
+#endif
fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
if (fd < 0)
kvm_put_kvm(kvm);
@@ -1928,11 +1929,6 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
cpu);
hardware_disable(NULL);
break;
- case CPU_UP_CANCELED:
- printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
- cpu);
- smp_call_function_single(cpu, hardware_disable, NULL, 1);
- break;
case CPU_ONLINE:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
@@ -1991,7 +1987,9 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
int i;
- struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
+ struct kvm_io_bus *bus;
+
+ bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
for (i = 0; i < bus->dev_count; i++)
if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
return 0;
@@ -2003,8 +2001,9 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
int i;
- struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
+ struct kvm_io_bus *bus;
+ bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
for (i = 0; i < bus->dev_count; i++)
if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
return 0;
@@ -2179,7 +2178,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
-int kvm_init(void *opaque, unsigned int vcpu_size,
+int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
struct module *module)
{
int r;
@@ -2229,8 +2228,9 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
goto out_free_4;
/* A kmem cache lets us meet the alignment requirements of fx_save. */
- kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
- __alignof__(struct kvm_vcpu),
+ if (!vcpu_align)
+ vcpu_align = __alignof__(struct kvm_vcpu);
+ kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
0, NULL);
if (!kvm_vcpu_cache) {
r = -ENOMEM;
@@ -2279,7 +2279,6 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
- tracepoint_synchronize_unregister();
kvm_exit_debug();
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);